xref: /illumos-gate/usr/src/uts/common/io/pciex/pcie_fault.c (revision 8509e9caaaa43d21ab1a18a2aa45b43322c378ac)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
23  */
24 
25 #include <sys/sysmacros.h>
26 #include <sys/types.h>
27 #include <sys/kmem.h>
28 #include <sys/modctl.h>
29 #include <sys/ddi.h>
30 #include <sys/sunddi.h>
31 #include <sys/sunndi.h>
32 #include <sys/fm/protocol.h>
33 #include <sys/fm/util.h>
34 #include <sys/fm/io/ddi.h>
35 #include <sys/fm/io/pci.h>
36 #include <sys/promif.h>
37 #include <sys/disp.h>
38 #include <sys/atomic.h>
39 #include <sys/pcie.h>
40 #include <sys/pci_cap.h>
41 #include <sys/pcie_impl.h>
42 
43 #define	PF_PCIE_BDG_ERR (PCIE_DEVSTS_FE_DETECTED | PCIE_DEVSTS_NFE_DETECTED | \
44 	PCIE_DEVSTS_CE_DETECTED)
45 
46 #define	PF_PCI_BDG_ERR (PCI_STAT_S_SYSERR | PCI_STAT_S_TARG_AB | \
47 	PCI_STAT_R_MAST_AB | PCI_STAT_R_TARG_AB | PCI_STAT_S_PERROR)
48 
49 #define	PF_AER_FATAL_ERR (PCIE_AER_UCE_DLP | PCIE_AER_UCE_SD |\
50 	PCIE_AER_UCE_FCP | PCIE_AER_UCE_RO | PCIE_AER_UCE_MTLP)
51 #define	PF_AER_NON_FATAL_ERR (PCIE_AER_UCE_PTLP | PCIE_AER_UCE_TO | \
52 	PCIE_AER_UCE_CA | PCIE_AER_UCE_ECRC | PCIE_AER_UCE_UR)
53 
54 #define	PF_SAER_FATAL_ERR (PCIE_AER_SUCE_USC_MSG_DATA_ERR | \
55 	PCIE_AER_SUCE_UC_ATTR_ERR | PCIE_AER_SUCE_UC_ADDR_ERR | \
56 	PCIE_AER_SUCE_SERR_ASSERT)
57 #define	PF_SAER_NON_FATAL_ERR (PCIE_AER_SUCE_TA_ON_SC | \
58 	PCIE_AER_SUCE_MA_ON_SC | PCIE_AER_SUCE_RCVD_TA | \
59 	PCIE_AER_SUCE_RCVD_MA | PCIE_AER_SUCE_USC_ERR | \
60 	PCIE_AER_SUCE_UC_DATA_ERR | PCIE_AER_SUCE_TIMER_EXPIRED | \
61 	PCIE_AER_SUCE_PERR_ASSERT | PCIE_AER_SUCE_INTERNAL_ERR)
62 
63 #define	PF_PCI_PARITY_ERR (PCI_STAT_S_PERROR | PCI_STAT_PERROR)
64 
65 #define	PF_FIRST_AER_ERR(bit, adv) \
66 	(bit & (1 << (adv->pcie_adv_ctl & PCIE_AER_CTL_FST_ERR_PTR_MASK)))
67 
68 #define	HAS_AER_LOGS(pfd_p, bit) \
69 	(PCIE_HAS_AER(pfd_p->pe_bus_p) && \
70 	PF_FIRST_AER_ERR(bit, PCIE_ADV_REG(pfd_p)))
71 
72 #define	PF_FIRST_SAER_ERR(bit, adv) \
73 	(bit & (1 << (adv->pcie_sue_ctl & PCIE_AER_SCTL_FST_ERR_PTR_MASK)))
74 
75 #define	HAS_SAER_LOGS(pfd_p, bit) \
76 	(PCIE_HAS_AER(pfd_p->pe_bus_p) && \
77 	PF_FIRST_SAER_ERR(bit, PCIE_ADV_BDG_REG(pfd_p)))
78 
79 #define	GET_SAER_CMD(pfd_p) \
80 	((PCIE_ADV_BDG_HDR(pfd_p, 1) >> \
81 	PCIE_AER_SUCE_HDR_CMD_LWR_SHIFT) & PCIE_AER_SUCE_HDR_CMD_LWR_MASK)
82 
83 #define	CE_ADVISORY(pfd_p) \
84 	(PCIE_ADV_REG(pfd_p)->pcie_ce_status & PCIE_AER_CE_AD_NFE)
85 
86 /* PCIe Fault Fabric Error analysis table */
87 typedef struct pf_fab_err_tbl {
88 	uint32_t	bit;		/* Error bit */
89 	int		(*handler)();	/* Error handling fuction */
90 	uint16_t	affected_flags; /* Primary affected flag */
91 	/*
92 	 * Secondary affected flag, effective when the information
93 	 * indicated by the primary flag is not available, eg.
94 	 * PF_AFFECTED_AER/SAER/ADDR
95 	 */
96 	uint16_t	sec_affected_flags;
97 } pf_fab_err_tbl_t;
98 
99 static pcie_bus_t *pf_is_ready(dev_info_t *);
100 /* Functions for scanning errors */
101 static int pf_default_hdl(dev_info_t *, pf_impl_t *);
102 static int pf_dispatch(dev_info_t *, pf_impl_t *, boolean_t);
103 static boolean_t pf_in_addr_range(pcie_bus_t *, uint64_t);
104 
105 /* Functions for gathering errors */
106 static void pf_pcix_ecc_regs_gather(pf_pcix_ecc_regs_t *pcix_ecc_regs,
107     pcie_bus_t *bus_p, boolean_t bdg);
108 static void pf_pcix_regs_gather(pf_data_t *pfd_p, pcie_bus_t *bus_p);
109 static void pf_pcie_regs_gather(pf_data_t *pfd_p, pcie_bus_t *bus_p);
110 static void pf_pci_regs_gather(pf_data_t *pfd_p, pcie_bus_t *bus_p);
111 static int pf_dummy_cb(dev_info_t *, ddi_fm_error_t *, const void *);
112 static void pf_en_dq(pf_data_t *pfd_p, pf_impl_t *impl_p);
113 
114 /* Functions for analysing errors */
115 static int pf_analyse_error(ddi_fm_error_t *, pf_impl_t *);
116 static void pf_adjust_for_no_aer(pf_data_t *);
117 static void pf_adjust_for_no_saer(pf_data_t *);
118 static pf_data_t *pf_get_pcie_bridge(pf_data_t *, pcie_req_id_t);
119 static pf_data_t *pf_get_parent_pcie_bridge(pf_data_t *);
120 static boolean_t pf_matched_in_rc(pf_data_t *, pf_data_t *,
121     uint32_t);
122 static int pf_analyse_error_tbl(ddi_fm_error_t *, pf_impl_t *,
123     pf_data_t *, const pf_fab_err_tbl_t *, uint32_t);
124 static int pf_analyse_ca_ur(ddi_fm_error_t *, uint32_t,
125     pf_data_t *, pf_data_t *);
126 static int pf_analyse_ma_ta(ddi_fm_error_t *, uint32_t,
127     pf_data_t *, pf_data_t *);
128 static int pf_analyse_pci(ddi_fm_error_t *, uint32_t,
129     pf_data_t *, pf_data_t *);
130 static int pf_analyse_perr_assert(ddi_fm_error_t *, uint32_t,
131     pf_data_t *, pf_data_t *);
132 static int pf_analyse_ptlp(ddi_fm_error_t *, uint32_t,
133     pf_data_t *, pf_data_t *);
134 static int pf_analyse_sc(ddi_fm_error_t *, uint32_t,
135     pf_data_t *, pf_data_t *);
136 static int pf_analyse_to(ddi_fm_error_t *, uint32_t,
137     pf_data_t *, pf_data_t *);
138 static int pf_analyse_uc(ddi_fm_error_t *, uint32_t,
139     pf_data_t *, pf_data_t *);
140 static int pf_analyse_uc_data(ddi_fm_error_t *, uint32_t,
141     pf_data_t *, pf_data_t *);
142 static int pf_no_panic(ddi_fm_error_t *, uint32_t,
143     pf_data_t *, pf_data_t *);
144 static int pf_panic(ddi_fm_error_t *, uint32_t,
145     pf_data_t *, pf_data_t *);
146 static void pf_send_ereport(ddi_fm_error_t *, pf_impl_t *);
147 static int pf_fm_callback(dev_info_t *dip, ddi_fm_error_t *derr);
148 
149 /* PCIe Fabric Handle Lookup Support Functions. */
150 static int pf_hdl_child_lookup(dev_info_t *, ddi_fm_error_t *, uint32_t,
151     uint64_t, pcie_req_id_t);
152 static int pf_hdl_compare(dev_info_t *, ddi_fm_error_t *, uint32_t, uint64_t,
153     pcie_req_id_t, ndi_fmc_t *);
154 static int pf_log_hdl_lookup(dev_info_t *, ddi_fm_error_t *, pf_data_t *,
155 	boolean_t);
156 
157 static int pf_handler_enter(dev_info_t *, pf_impl_t *);
158 static void pf_handler_exit(dev_info_t *);
159 static void pf_reset_pfd(pf_data_t *);
160 
161 boolean_t pcie_full_scan = B_FALSE;	/* Force to always do a full scan */
162 int pcie_disable_scan = 0;		/* Disable fabric scan */
163 
164 /* Inform interested parties that error handling is about to begin. */
165 /* ARGSUSED */
166 void
167 pf_eh_enter(pcie_bus_t *bus_p)
168 {
169 }
170 
171 /* Inform interested parties that error handling has ended. */
172 void
173 pf_eh_exit(pcie_bus_t *bus_p)
174 {
175 	pcie_bus_t *rbus_p = PCIE_DIP2BUS(bus_p->bus_rp_dip);
176 	pf_data_t *root_pfd_p = PCIE_BUS2PFD(rbus_p);
177 	pf_data_t *pfd_p;
178 	uint_t intr_type = PCIE_ROOT_EH_SRC(root_pfd_p)->intr_type;
179 
180 	pciev_eh_exit(root_pfd_p, intr_type);
181 
182 	/* Clear affected device info and INTR SRC */
183 	for (pfd_p = root_pfd_p; pfd_p; pfd_p = pfd_p->pe_next) {
184 		PFD_AFFECTED_DEV(pfd_p)->pe_affected_flags = 0;
185 		PFD_AFFECTED_DEV(pfd_p)->pe_affected_bdf = PCIE_INVALID_BDF;
186 		if (PCIE_IS_ROOT(PCIE_PFD2BUS(pfd_p))) {
187 			PCIE_ROOT_EH_SRC(pfd_p)->intr_type = PF_INTR_TYPE_NONE;
188 			PCIE_ROOT_EH_SRC(pfd_p)->intr_data = NULL;
189 		}
190 	}
191 }
192 
193 /*
194  * Scan Fabric is the entry point for PCI/PCIe IO fabric errors.  The
195  * caller may create a local pf_data_t with the "root fault"
196  * information populated to either do a precise or full scan.  More
197  * than one pf_data_t maybe linked together if there are multiple
198  * errors.  Only a PCIe compliant Root Port device may pass in NULL
199  * for the root_pfd_p.
200  *
201  * "Root Complexes" such as NPE and PX should call scan_fabric using itself as
202  * the rdip.  PCIe Root ports should call pf_scan_fabric using it's parent as
203  * the rdip.
204  *
205  * Scan fabric initiated from RCs are likely due to a fabric message, traps or
206  * any RC detected errors that propagated to/from the fabric.
207  *
208  * This code assumes that by the time pf_scan_fabric is
209  * called, pf_handler_enter has NOT been called on the rdip.
210  */
211 int
212 pf_scan_fabric(dev_info_t *rdip, ddi_fm_error_t *derr, pf_data_t *root_pfd_p)
213 {
214 	pf_impl_t	impl;
215 	pf_data_t	*pfd_p, *pfd_head_p, *pfd_tail_p;
216 	int		scan_flag = PF_SCAN_SUCCESS;
217 	int		analyse_flag = PF_ERR_NO_ERROR;
218 	boolean_t	full_scan = pcie_full_scan;
219 
220 	if (pcie_disable_scan)
221 		return (analyse_flag);
222 
223 	/* Find the head and tail of this link list */
224 	pfd_head_p = root_pfd_p;
225 	for (pfd_tail_p = root_pfd_p; pfd_tail_p && pfd_tail_p->pe_next;
226 	    pfd_tail_p = pfd_tail_p->pe_next)
227 		;
228 
229 	/* Save head/tail */
230 	impl.pf_total = 0;
231 	impl.pf_derr = derr;
232 	impl.pf_dq_head_p = pfd_head_p;
233 	impl.pf_dq_tail_p = pfd_tail_p;
234 
235 	/* If scan is initiated from RP then RP itself must be scanned. */
236 	if (PCIE_IS_RP(PCIE_DIP2BUS(rdip)) && pf_is_ready(rdip) &&
237 	    !root_pfd_p) {
238 		scan_flag = pf_handler_enter(rdip, &impl);
239 		if (scan_flag & PF_SCAN_DEADLOCK)
240 			goto done;
241 
242 		scan_flag = pf_default_hdl(rdip, &impl);
243 		if (scan_flag & PF_SCAN_NO_ERR_IN_CHILD)
244 			goto done;
245 	}
246 
247 	/*
248 	 * Scan the fabric using the scan_bdf and scan_addr in error q.
249 	 * scan_bdf will be valid in the following cases:
250 	 *	- Fabric message
251 	 *	- Poisoned TLP
252 	 *	- Signaled UR/CA
253 	 *	- Received UR/CA
254 	 *	- PIO load failures
255 	 */
256 	for (pfd_p = impl.pf_dq_head_p; pfd_p && PFD_IS_ROOT(pfd_p);
257 	    pfd_p = pfd_p->pe_next) {
258 		impl.pf_fault = PCIE_ROOT_FAULT(pfd_p);
259 
260 		if (PFD_IS_RC(pfd_p))
261 			impl.pf_total++;
262 
263 		if (impl.pf_fault->full_scan)
264 			full_scan = B_TRUE;
265 
266 		if (full_scan ||
267 		    PCIE_CHECK_VALID_BDF(impl.pf_fault->scan_bdf) ||
268 		    impl.pf_fault->scan_addr)
269 			scan_flag |= pf_dispatch(rdip, &impl, full_scan);
270 
271 		if (full_scan)
272 			break;
273 	}
274 
275 done:
276 	/*
277 	 * If this is due to safe access, don't analyze the errors and return
278 	 * success regardless of how scan fabric went.
279 	 */
280 	if (derr->fme_flag != DDI_FM_ERR_UNEXPECTED) {
281 		analyse_flag = PF_ERR_NO_PANIC;
282 	} else {
283 		analyse_flag = pf_analyse_error(derr, &impl);
284 	}
285 
286 	pf_send_ereport(derr, &impl);
287 
288 	/*
289 	 * Check if any hardened driver's callback reported a panic.
290 	 * If so panic.
291 	 */
292 	if (scan_flag & PF_SCAN_CB_FAILURE)
293 		analyse_flag |= PF_ERR_PANIC;
294 
295 	/*
296 	 * If a deadlock was detected, panic the system as error analysis has
297 	 * been compromised.
298 	 */
299 	if (scan_flag & PF_SCAN_DEADLOCK)
300 		analyse_flag |= PF_ERR_PANIC_DEADLOCK;
301 
302 	derr->fme_status = PF_ERR2DDIFM_ERR(scan_flag);
303 
304 	return (analyse_flag);
305 }
306 
307 void
308 pcie_force_fullscan(void)
309 {
310 	pcie_full_scan = B_TRUE;
311 }
312 
313 /*
314  * pf_dispatch walks the device tree and calls the pf_default_hdl if the device
315  * falls in the error path.
316  *
317  * Returns PF_SCAN_* flags
318  */
319 static int
320 pf_dispatch(dev_info_t *pdip, pf_impl_t *impl, boolean_t full_scan)
321 {
322 	dev_info_t	*dip;
323 	pcie_req_id_t	rid = impl->pf_fault->scan_bdf;
324 	pcie_bus_t	*bus_p;
325 	int		scan_flag = PF_SCAN_SUCCESS;
326 
327 	for (dip = ddi_get_child(pdip); dip; dip = ddi_get_next_sibling(dip)) {
328 		/* Make sure dip is attached and ready */
329 		if (!(bus_p = pf_is_ready(dip)))
330 			continue;
331 
332 		scan_flag |= pf_handler_enter(dip, impl);
333 		if (scan_flag & PF_SCAN_DEADLOCK)
334 			break;
335 
336 		/*
337 		 * Handle this device if it is a:
338 		 * o Full Scan
339 		 * o PCI/PCI-X Device
340 		 * o Fault BDF = Device BDF
341 		 * o BDF/ADDR is in range of the Bridge/Switch
342 		 */
343 		if (full_scan ||
344 		    (bus_p->bus_bdf == rid) ||
345 		    pf_in_bus_range(bus_p, rid) ||
346 		    pf_in_addr_range(bus_p, impl->pf_fault->scan_addr)) {
347 			int hdl_flag = pf_default_hdl(dip, impl);
348 			scan_flag |= hdl_flag;
349 
350 			/*
351 			 * A bridge may have detected no errors in which case
352 			 * there is no need to scan further down.
353 			 */
354 			if (hdl_flag & PF_SCAN_NO_ERR_IN_CHILD)
355 				continue;
356 		} else {
357 			pf_handler_exit(dip);
358 			continue;
359 		}
360 
361 		/* match or in bridge bus-range */
362 		switch (bus_p->bus_dev_type) {
363 		case PCIE_PCIECAP_DEV_TYPE_PCIE2PCI:
364 		case PCIE_PCIECAP_DEV_TYPE_PCI2PCIE:
365 			scan_flag |= pf_dispatch(dip, impl, B_TRUE);
366 			break;
367 		case PCIE_PCIECAP_DEV_TYPE_UP:
368 		case PCIE_PCIECAP_DEV_TYPE_DOWN:
369 		case PCIE_PCIECAP_DEV_TYPE_ROOT:
370 		{
371 			pf_data_t *pfd_p = PCIE_BUS2PFD(bus_p);
372 			pf_pci_err_regs_t *err_p = PCI_ERR_REG(pfd_p);
373 			pf_pci_bdg_err_regs_t *serr_p = PCI_BDG_ERR_REG(pfd_p);
374 			/*
375 			 * Continue if the fault BDF != the switch or there is a
376 			 * parity error
377 			 */
378 			if ((bus_p->bus_bdf != rid) ||
379 			    (err_p->pci_err_status & PF_PCI_PARITY_ERR) ||
380 			    (serr_p->pci_bdg_sec_stat & PF_PCI_PARITY_ERR))
381 				scan_flag |= pf_dispatch(dip, impl, full_scan);
382 			break;
383 		}
384 		case PCIE_PCIECAP_DEV_TYPE_PCIE_DEV:
385 		case PCIE_PCIECAP_DEV_TYPE_PCI_DEV:
386 			/*
387 			 * Reached a PCIe end point so stop. Note dev_type
388 			 * PCI_DEV is just a PCIe device that requires IO Space
389 			 */
390 			break;
391 		case PCIE_PCIECAP_DEV_TYPE_PCI_PSEUDO:
392 			if (PCIE_IS_BDG(bus_p))
393 				scan_flag |= pf_dispatch(dip, impl, B_TRUE);
394 			break;
395 		default:
396 			ASSERT(B_FALSE);
397 		}
398 	}
399 	return (scan_flag);
400 }
401 
402 /* Returns whether the "bdf" is in the bus range of a switch/bridge */
403 boolean_t
404 pf_in_bus_range(pcie_bus_t *bus_p, pcie_req_id_t bdf)
405 {
406 	pci_bus_range_t *br_p = &bus_p->bus_bus_range;
407 	uint8_t		bus_no = (bdf & PCIE_REQ_ID_BUS_MASK) >>
408 	    PCIE_REQ_ID_BUS_SHIFT;
409 
410 	/* check if given bdf falls within bridge's bus range */
411 	if (PCIE_IS_BDG(bus_p) &&
412 	    ((bus_no >= br_p->lo) && (bus_no <= br_p->hi)))
413 		return (B_TRUE);
414 	else
415 		return (B_FALSE);
416 }
417 
418 /*
419  * Return whether the "addr" is in the assigned addr of a device.
420  */
421 boolean_t
422 pf_in_assigned_addr(pcie_bus_t *bus_p, uint64_t addr)
423 {
424 	uint_t		i;
425 	uint64_t	low, hi;
426 	pci_regspec_t	*assign_p = bus_p->bus_assigned_addr;
427 
428 	for (i = 0; i < bus_p->bus_assigned_entries; i++, assign_p++) {
429 		low = assign_p->pci_phys_low;
430 		hi = low + assign_p->pci_size_low;
431 		if ((addr < hi) && (addr >= low))
432 			return (B_TRUE);
433 	}
434 	return (B_FALSE);
435 }
436 
437 /*
438  * Returns whether the "addr" is in the addr range of a switch/bridge, or if the
439  * "addr" is in the assigned addr of a device.
440  */
441 static boolean_t
442 pf_in_addr_range(pcie_bus_t *bus_p, uint64_t addr)
443 {
444 	uint_t		i;
445 	uint64_t	low, hi;
446 	ppb_ranges_t	*ranges_p = bus_p->bus_addr_ranges;
447 
448 	if (!addr)
449 		return (B_FALSE);
450 
451 	/* check if given address belongs to this device */
452 	if (pf_in_assigned_addr(bus_p, addr))
453 		return (B_TRUE);
454 
455 	/* check if given address belongs to a child below this device */
456 	if (!PCIE_IS_BDG(bus_p))
457 		return (B_FALSE);
458 
459 	for (i = 0; i < bus_p->bus_addr_entries; i++, ranges_p++) {
460 		switch (ranges_p->child_high & PCI_ADDR_MASK) {
461 		case PCI_ADDR_IO:
462 		case PCI_ADDR_MEM32:
463 			low = ranges_p->child_low;
464 			hi = ranges_p->size_low + low;
465 			if ((addr < hi) && (addr >= low))
466 				return (B_TRUE);
467 			break;
468 		case PCI_ADDR_MEM64:
469 			low = ((uint64_t)ranges_p->child_mid << 32) |
470 			    (uint64_t)ranges_p->child_low;
471 			hi = (((uint64_t)ranges_p->size_high << 32) |
472 			    (uint64_t)ranges_p->size_low) + low;
473 			if ((addr < hi) && (addr >= low))
474 				return (B_TRUE);
475 			break;
476 		}
477 	}
478 	return (B_FALSE);
479 }
480 
481 static pcie_bus_t *
482 pf_is_ready(dev_info_t *dip)
483 {
484 	pcie_bus_t	*bus_p = PCIE_DIP2BUS(dip);
485 	if (!bus_p)
486 		return (NULL);
487 
488 	if (!(bus_p->bus_fm_flags & PF_FM_READY))
489 		return (NULL);
490 	return (bus_p);
491 }
492 
493 static void
494 pf_pcix_ecc_regs_gather(pf_pcix_ecc_regs_t *pcix_ecc_regs,
495     pcie_bus_t *bus_p, boolean_t bdg)
496 {
497 	if (bdg) {
498 		pcix_ecc_regs->pcix_ecc_ctlstat = PCIX_CAP_GET(32, bus_p,
499 		    PCI_PCIX_BDG_ECC_STATUS);
500 		pcix_ecc_regs->pcix_ecc_fstaddr = PCIX_CAP_GET(32, bus_p,
501 		    PCI_PCIX_BDG_ECC_FST_AD);
502 		pcix_ecc_regs->pcix_ecc_secaddr = PCIX_CAP_GET(32, bus_p,
503 		    PCI_PCIX_BDG_ECC_SEC_AD);
504 		pcix_ecc_regs->pcix_ecc_attr = PCIX_CAP_GET(32, bus_p,
505 		    PCI_PCIX_BDG_ECC_ATTR);
506 	} else {
507 		pcix_ecc_regs->pcix_ecc_ctlstat = PCIX_CAP_GET(32, bus_p,
508 		    PCI_PCIX_ECC_STATUS);
509 		pcix_ecc_regs->pcix_ecc_fstaddr = PCIX_CAP_GET(32, bus_p,
510 		    PCI_PCIX_ECC_FST_AD);
511 		pcix_ecc_regs->pcix_ecc_secaddr = PCIX_CAP_GET(32, bus_p,
512 		    PCI_PCIX_ECC_SEC_AD);
513 		pcix_ecc_regs->pcix_ecc_attr = PCIX_CAP_GET(32, bus_p,
514 		    PCI_PCIX_ECC_ATTR);
515 	}
516 }
517 
518 
519 static void
520 pf_pcix_regs_gather(pf_data_t *pfd_p, pcie_bus_t *bus_p)
521 {
522 	/*
523 	 * For PCI-X device PCI-X Capability only exists for Type 0 Headers.
524 	 * PCI-X Bridge Capability only exists for Type 1 Headers.
525 	 * Both capabilities do not exist at the same time.
526 	 */
527 	if (PCIE_IS_BDG(bus_p)) {
528 		pf_pcix_bdg_err_regs_t *pcix_bdg_regs;
529 
530 		pcix_bdg_regs = PCIX_BDG_ERR_REG(pfd_p);
531 
532 		pcix_bdg_regs->pcix_bdg_sec_stat = PCIX_CAP_GET(16, bus_p,
533 		    PCI_PCIX_SEC_STATUS);
534 		pcix_bdg_regs->pcix_bdg_stat = PCIX_CAP_GET(32, bus_p,
535 		    PCI_PCIX_BDG_STATUS);
536 
537 		if (PCIX_ECC_VERSION_CHECK(bus_p)) {
538 			/*
539 			 * PCI Express to PCI-X bridges only implement the
540 			 * secondary side of the PCI-X ECC registers, bit one is
541 			 * read-only so we make sure we do not write to it.
542 			 */
543 			if (!PCIE_IS_PCIE_BDG(bus_p)) {
544 				PCIX_CAP_PUT(32, bus_p, PCI_PCIX_BDG_ECC_STATUS,
545 				    0);
546 				pf_pcix_ecc_regs_gather(
547 				    PCIX_BDG_ECC_REG(pfd_p, 0), bus_p, B_TRUE);
548 				PCIX_CAP_PUT(32, bus_p, PCI_PCIX_BDG_ECC_STATUS,
549 				    1);
550 			}
551 			pf_pcix_ecc_regs_gather(PCIX_BDG_ECC_REG(pfd_p, 0),
552 			    bus_p, B_TRUE);
553 		}
554 	} else {
555 		pf_pcix_err_regs_t *pcix_regs = PCIX_ERR_REG(pfd_p);
556 
557 		pcix_regs->pcix_command = PCIX_CAP_GET(16, bus_p,
558 		    PCI_PCIX_COMMAND);
559 		pcix_regs->pcix_status = PCIX_CAP_GET(32, bus_p,
560 		    PCI_PCIX_STATUS);
561 		if (PCIX_ECC_VERSION_CHECK(bus_p))
562 			pf_pcix_ecc_regs_gather(PCIX_ECC_REG(pfd_p), bus_p,
563 			    B_TRUE);
564 	}
565 }
566 
567 static void
568 pf_pcie_regs_gather(pf_data_t *pfd_p, pcie_bus_t *bus_p)
569 {
570 	pf_pcie_err_regs_t *pcie_regs = PCIE_ERR_REG(pfd_p);
571 	pf_pcie_adv_err_regs_t *pcie_adv_regs = PCIE_ADV_REG(pfd_p);
572 
573 	pcie_regs->pcie_err_status = PCIE_CAP_GET(16, bus_p, PCIE_DEVSTS);
574 	pcie_regs->pcie_err_ctl = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL);
575 	pcie_regs->pcie_dev_cap = PCIE_CAP_GET(32, bus_p, PCIE_DEVCAP);
576 
577 	if (PCIE_IS_BDG(bus_p) && PCIE_IS_PCIX(bus_p))
578 		pf_pcix_regs_gather(pfd_p, bus_p);
579 
580 	if (PCIE_IS_ROOT(bus_p)) {
581 		pf_pcie_rp_err_regs_t *pcie_rp_regs = PCIE_RP_REG(pfd_p);
582 
583 		pcie_rp_regs->pcie_rp_status = PCIE_CAP_GET(32, bus_p,
584 		    PCIE_ROOTSTS);
585 		pcie_rp_regs->pcie_rp_ctl = PCIE_CAP_GET(16, bus_p,
586 		    PCIE_ROOTCTL);
587 	}
588 
589 	if (!PCIE_HAS_AER(bus_p))
590 		return;
591 
592 	/* Gather UE AERs */
593 	pcie_adv_regs->pcie_adv_ctl = PCIE_AER_GET(32, bus_p,
594 	    PCIE_AER_CTL);
595 	pcie_adv_regs->pcie_ue_status = PCIE_AER_GET(32, bus_p,
596 	    PCIE_AER_UCE_STS);
597 	pcie_adv_regs->pcie_ue_mask = PCIE_AER_GET(32, bus_p,
598 	    PCIE_AER_UCE_MASK);
599 	pcie_adv_regs->pcie_ue_sev = PCIE_AER_GET(32, bus_p,
600 	    PCIE_AER_UCE_SERV);
601 	PCIE_ADV_HDR(pfd_p, 0) = PCIE_AER_GET(32, bus_p,
602 	    PCIE_AER_HDR_LOG);
603 	PCIE_ADV_HDR(pfd_p, 1) = PCIE_AER_GET(32, bus_p,
604 	    PCIE_AER_HDR_LOG + 0x4);
605 	PCIE_ADV_HDR(pfd_p, 2) = PCIE_AER_GET(32, bus_p,
606 	    PCIE_AER_HDR_LOG + 0x8);
607 	PCIE_ADV_HDR(pfd_p, 3) = PCIE_AER_GET(32, bus_p,
608 	    PCIE_AER_HDR_LOG + 0xc);
609 
610 	/* Gather CE AERs */
611 	pcie_adv_regs->pcie_ce_status = PCIE_AER_GET(32, bus_p,
612 	    PCIE_AER_CE_STS);
613 	pcie_adv_regs->pcie_ce_mask = PCIE_AER_GET(32, bus_p,
614 	    PCIE_AER_CE_MASK);
615 
616 	/*
617 	 * If pci express to pci bridge then grab the bridge
618 	 * error registers.
619 	 */
620 	if (PCIE_IS_PCIE_BDG(bus_p)) {
621 		pf_pcie_adv_bdg_err_regs_t *pcie_bdg_regs =
622 		    PCIE_ADV_BDG_REG(pfd_p);
623 
624 		pcie_bdg_regs->pcie_sue_ctl = PCIE_AER_GET(32, bus_p,
625 		    PCIE_AER_SCTL);
626 		pcie_bdg_regs->pcie_sue_status = PCIE_AER_GET(32, bus_p,
627 		    PCIE_AER_SUCE_STS);
628 		pcie_bdg_regs->pcie_sue_mask = PCIE_AER_GET(32, bus_p,
629 		    PCIE_AER_SUCE_MASK);
630 		pcie_bdg_regs->pcie_sue_sev = PCIE_AER_GET(32, bus_p,
631 		    PCIE_AER_SUCE_SERV);
632 		PCIE_ADV_BDG_HDR(pfd_p, 0) = PCIE_AER_GET(32, bus_p,
633 		    PCIE_AER_SHDR_LOG);
634 		PCIE_ADV_BDG_HDR(pfd_p, 1) = PCIE_AER_GET(32, bus_p,
635 		    PCIE_AER_SHDR_LOG + 0x4);
636 		PCIE_ADV_BDG_HDR(pfd_p, 2) = PCIE_AER_GET(32, bus_p,
637 		    PCIE_AER_SHDR_LOG + 0x8);
638 		PCIE_ADV_BDG_HDR(pfd_p, 3) = PCIE_AER_GET(32, bus_p,
639 		    PCIE_AER_SHDR_LOG + 0xc);
640 	}
641 
642 	/*
643 	 * If PCI Express root port then grab the root port
644 	 * error registers.
645 	 */
646 	if (PCIE_IS_ROOT(bus_p)) {
647 		pf_pcie_adv_rp_err_regs_t *pcie_rp_regs =
648 		    PCIE_ADV_RP_REG(pfd_p);
649 
650 		pcie_rp_regs->pcie_rp_err_cmd = PCIE_AER_GET(32, bus_p,
651 		    PCIE_AER_RE_CMD);
652 		pcie_rp_regs->pcie_rp_err_status = PCIE_AER_GET(32, bus_p,
653 		    PCIE_AER_RE_STS);
654 		pcie_rp_regs->pcie_rp_ce_src_id = PCIE_AER_GET(16, bus_p,
655 		    PCIE_AER_CE_SRC_ID);
656 		pcie_rp_regs->pcie_rp_ue_src_id = PCIE_AER_GET(16, bus_p,
657 		    PCIE_AER_ERR_SRC_ID);
658 	}
659 }
660 
661 static void
662 pf_pci_regs_gather(pf_data_t *pfd_p, pcie_bus_t *bus_p)
663 {
664 	pf_pci_err_regs_t *pci_regs = PCI_ERR_REG(pfd_p);
665 
666 	/*
667 	 * Start by reading all the error registers that are available for
668 	 * pci and pci express and for leaf devices and bridges/switches
669 	 */
670 	pci_regs->pci_err_status = PCIE_GET(16, bus_p, PCI_CONF_STAT);
671 	pci_regs->pci_cfg_comm = PCIE_GET(16, bus_p, PCI_CONF_COMM);
672 
673 	/*
674 	 * If pci-pci bridge grab PCI bridge specific error registers.
675 	 */
676 	if (PCIE_IS_BDG(bus_p)) {
677 		pf_pci_bdg_err_regs_t *pci_bdg_regs = PCI_BDG_ERR_REG(pfd_p);
678 		pci_bdg_regs->pci_bdg_sec_stat =
679 		    PCIE_GET(16, bus_p, PCI_BCNF_SEC_STATUS);
680 		pci_bdg_regs->pci_bdg_ctrl =
681 		    PCIE_GET(16, bus_p, PCI_BCNF_BCNTRL);
682 	}
683 
684 	/*
685 	 * If pci express device grab pci express error registers and
686 	 * check for advanced error reporting features and grab them if
687 	 * available.
688 	 */
689 	if (PCIE_IS_PCIE(bus_p))
690 		pf_pcie_regs_gather(pfd_p, bus_p);
691 	else if (PCIE_IS_PCIX(bus_p))
692 		pf_pcix_regs_gather(pfd_p, bus_p);
693 
694 }
695 
696 static void
697 pf_pcix_regs_clear(pf_data_t *pfd_p, pcie_bus_t *bus_p)
698 {
699 	if (PCIE_IS_BDG(bus_p)) {
700 		pf_pcix_bdg_err_regs_t *pcix_bdg_regs;
701 
702 		pcix_bdg_regs = PCIX_BDG_ERR_REG(pfd_p);
703 
704 		PCIX_CAP_PUT(16, bus_p, PCI_PCIX_SEC_STATUS,
705 		    pcix_bdg_regs->pcix_bdg_sec_stat);
706 
707 		PCIX_CAP_PUT(32, bus_p, PCI_PCIX_BDG_STATUS,
708 		    pcix_bdg_regs->pcix_bdg_stat);
709 
710 		if (PCIX_ECC_VERSION_CHECK(bus_p)) {
711 			pf_pcix_ecc_regs_t *pcix_bdg_ecc_regs;
712 			/*
713 			 * PCI Express to PCI-X bridges only implement the
714 			 * secondary side of the PCI-X ECC registers.  For
715 			 * clearing, there is no need to "select" the ECC
716 			 * register, just write what was originally read.
717 			 */
718 			if (!PCIE_IS_PCIE_BDG(bus_p)) {
719 				pcix_bdg_ecc_regs = PCIX_BDG_ECC_REG(pfd_p, 0);
720 				PCIX_CAP_PUT(32, bus_p, PCI_PCIX_BDG_ECC_STATUS,
721 				    pcix_bdg_ecc_regs->pcix_ecc_ctlstat);
722 
723 			}
724 			pcix_bdg_ecc_regs = PCIX_BDG_ECC_REG(pfd_p, 1);
725 			PCIX_CAP_PUT(32, bus_p, PCI_PCIX_BDG_ECC_STATUS,
726 			    pcix_bdg_ecc_regs->pcix_ecc_ctlstat);
727 		}
728 	} else {
729 		pf_pcix_err_regs_t *pcix_regs = PCIX_ERR_REG(pfd_p);
730 
731 		PCIX_CAP_PUT(32, bus_p, PCI_PCIX_STATUS,
732 		    pcix_regs->pcix_status);
733 
734 		if (PCIX_ECC_VERSION_CHECK(bus_p)) {
735 			pf_pcix_ecc_regs_t *pcix_ecc_regs = PCIX_ECC_REG(pfd_p);
736 
737 			PCIX_CAP_PUT(32, bus_p, PCI_PCIX_ECC_STATUS,
738 			    pcix_ecc_regs->pcix_ecc_ctlstat);
739 		}
740 	}
741 }
742 
743 static void
744 pf_pcie_regs_clear(pf_data_t *pfd_p, pcie_bus_t *bus_p)
745 {
746 	pf_pcie_err_regs_t *pcie_regs = PCIE_ERR_REG(pfd_p);
747 	pf_pcie_adv_err_regs_t *pcie_adv_regs = PCIE_ADV_REG(pfd_p);
748 
749 	PCIE_CAP_PUT(16, bus_p, PCIE_DEVSTS, pcie_regs->pcie_err_status);
750 
751 	if (PCIE_IS_BDG(bus_p) && PCIE_IS_PCIX(bus_p))
752 		pf_pcix_regs_clear(pfd_p, bus_p);
753 
754 	if (!PCIE_HAS_AER(bus_p))
755 		return;
756 
757 	PCIE_AER_PUT(32, bus_p, PCIE_AER_UCE_STS,
758 	    pcie_adv_regs->pcie_ue_status);
759 
760 	PCIE_AER_PUT(32, bus_p, PCIE_AER_CE_STS,
761 	    pcie_adv_regs->pcie_ce_status);
762 
763 	if (PCIE_IS_PCIE_BDG(bus_p)) {
764 		pf_pcie_adv_bdg_err_regs_t *pcie_bdg_regs =
765 		    PCIE_ADV_BDG_REG(pfd_p);
766 
767 		PCIE_AER_PUT(32, bus_p, PCIE_AER_SUCE_STS,
768 		    pcie_bdg_regs->pcie_sue_status);
769 	}
770 
771 	/*
772 	 * If PCI Express root complex then clear the root complex
773 	 * error registers.
774 	 */
775 	if (PCIE_IS_ROOT(bus_p)) {
776 		pf_pcie_adv_rp_err_regs_t *pcie_rp_regs;
777 
778 		pcie_rp_regs = PCIE_ADV_RP_REG(pfd_p);
779 
780 		PCIE_AER_PUT(32, bus_p, PCIE_AER_RE_STS,
781 		    pcie_rp_regs->pcie_rp_err_status);
782 	}
783 }
784 
785 static void
786 pf_pci_regs_clear(pf_data_t *pfd_p, pcie_bus_t *bus_p)
787 {
788 	if (PCIE_IS_PCIE(bus_p))
789 		pf_pcie_regs_clear(pfd_p, bus_p);
790 	else if (PCIE_IS_PCIX(bus_p))
791 		pf_pcix_regs_clear(pfd_p, bus_p);
792 
793 	PCIE_PUT(16, bus_p, PCI_CONF_STAT, pfd_p->pe_pci_regs->pci_err_status);
794 
795 	if (PCIE_IS_BDG(bus_p)) {
796 		pf_pci_bdg_err_regs_t *pci_bdg_regs = PCI_BDG_ERR_REG(pfd_p);
797 		PCIE_PUT(16, bus_p, PCI_BCNF_SEC_STATUS,
798 		    pci_bdg_regs->pci_bdg_sec_stat);
799 	}
800 }
801 
802 /* ARGSUSED */
803 void
804 pcie_clear_errors(dev_info_t *dip)
805 {
806 	pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
807 	pf_data_t *pfd_p = PCIE_DIP2PFD(dip);
808 
809 	ASSERT(bus_p);
810 
811 	pf_pci_regs_gather(pfd_p, bus_p);
812 	pf_pci_regs_clear(pfd_p, bus_p);
813 }
814 
815 /* Find the fault BDF, fault Addr or full scan on a PCIe Root Port. */
816 static void
817 pf_pci_find_rp_fault(pf_data_t *pfd_p, pcie_bus_t *bus_p)
818 {
819 	pf_root_fault_t *root_fault = PCIE_ROOT_FAULT(pfd_p);
820 	pf_pcie_adv_rp_err_regs_t *rp_regs = PCIE_ADV_RP_REG(pfd_p);
821 	uint32_t root_err = rp_regs->pcie_rp_err_status;
822 	uint32_t ue_err = PCIE_ADV_REG(pfd_p)->pcie_ue_status;
823 	int num_faults = 0;
824 
825 	/* Since this data structure is reused, make sure to reset it */
826 	root_fault->full_scan = B_FALSE;
827 	root_fault->scan_bdf = PCIE_INVALID_BDF;
828 	root_fault->scan_addr = 0;
829 
830 	if (!PCIE_HAS_AER(bus_p) &&
831 	    (PCI_BDG_ERR_REG(pfd_p)->pci_bdg_sec_stat & PF_PCI_BDG_ERR)) {
832 		PCIE_ROOT_FAULT(pfd_p)->full_scan = B_TRUE;
833 		return;
834 	}
835 
836 	/*
837 	 * Check to see if an error has been received that
838 	 * requires a scan of the fabric.  Count the number of
839 	 * faults seen.  If MUL CE/FE_NFE that counts for
840 	 * atleast 2 faults, so just return with full_scan.
841 	 */
842 	if ((root_err & PCIE_AER_RE_STS_MUL_CE_RCVD) ||
843 	    (root_err & PCIE_AER_RE_STS_MUL_FE_NFE_RCVD)) {
844 		PCIE_ROOT_FAULT(pfd_p)->full_scan = B_TRUE;
845 		return;
846 	}
847 
848 	if (root_err & PCIE_AER_RE_STS_CE_RCVD)
849 		num_faults++;
850 
851 	if (root_err & PCIE_AER_RE_STS_FE_NFE_RCVD)
852 		num_faults++;
853 
854 	if (ue_err & PCIE_AER_UCE_CA)
855 		num_faults++;
856 
857 	if (ue_err & PCIE_AER_UCE_UR)
858 		num_faults++;
859 
860 	/* If no faults just return */
861 	if (num_faults == 0)
862 		return;
863 
864 	/* If faults > 1 do full scan */
865 	if (num_faults > 1) {
866 		PCIE_ROOT_FAULT(pfd_p)->full_scan = B_TRUE;
867 		return;
868 	}
869 
870 	/* By this point, there is only 1 fault detected */
871 	if (root_err & PCIE_AER_RE_STS_CE_RCVD) {
872 		PCIE_ROOT_FAULT(pfd_p)->scan_bdf = rp_regs->pcie_rp_ce_src_id;
873 		num_faults--;
874 	} else if (root_err & PCIE_AER_RE_STS_FE_NFE_RCVD) {
875 		PCIE_ROOT_FAULT(pfd_p)->scan_bdf = rp_regs->pcie_rp_ue_src_id;
876 		num_faults--;
877 	} else if ((HAS_AER_LOGS(pfd_p, PCIE_AER_UCE_CA) ||
878 	    HAS_AER_LOGS(pfd_p, PCIE_AER_UCE_UR)) &&
879 	    (pf_tlp_decode(PCIE_PFD2BUS(pfd_p), PCIE_ADV_REG(pfd_p)) ==
880 	    DDI_SUCCESS)) {
881 		PCIE_ROOT_FAULT(pfd_p)->scan_addr =
882 		    PCIE_ADV_REG(pfd_p)->pcie_ue_tgt_addr;
883 		num_faults--;
884 	}
885 
886 	/*
887 	 * This means an error did occur, but we couldn't extract the fault BDF
888 	 */
889 	if (num_faults > 0)
890 		PCIE_ROOT_FAULT(pfd_p)->full_scan = B_TRUE;
891 
892 }
893 
894 
895 /*
896  * Load PCIe Fault Data for PCI/PCIe devices into PCIe Fault Data Queue
897  *
898  * Returns a scan flag.
899  * o PF_SCAN_SUCCESS - Error gathered and cleared sucessfuly, data added to
900  *   Fault Q
901  * o PF_SCAN_BAD_RESPONSE - Unable to talk to device, item added to fault Q
902  * o PF_SCAN_CB_FAILURE - A hardened device deemed that the error was fatal.
903  * o PF_SCAN_NO_ERR_IN_CHILD - Only applies to bridge to prevent further
904  *   unnecessary scanning
905  * o PF_SCAN_IN_DQ - This device has already been scanned; it was skipped this
906  *   time.
907  */
908 static int
909 pf_default_hdl(dev_info_t *dip, pf_impl_t *impl)
910 {
911 	pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
912 	pf_data_t *pfd_p = PCIE_DIP2PFD(dip);
913 	int cb_sts, scan_flag = PF_SCAN_SUCCESS;
914 
915 	/* Make sure this device hasn't already been snapshotted and cleared */
916 	if (pfd_p->pe_valid == B_TRUE) {
917 		scan_flag |= PF_SCAN_IN_DQ;
918 		goto done;
919 	}
920 
921 	/*
922 	 * Read vendor/device ID and check with cached data, if it doesn't match
923 	 * could very well be a device that isn't responding anymore.  Just
924 	 * stop.  Save the basic info in the error q for post mortem debugging
925 	 * purposes.
926 	 */
927 	if (PCIE_GET(32, bus_p, PCI_CONF_VENID) != bus_p->bus_dev_ven_id) {
928 		char buf[FM_MAX_CLASS];
929 
930 		(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
931 		    PCI_ERROR_SUBCLASS, PCI_NR);
932 		ddi_fm_ereport_post(dip, buf, fm_ena_generate(0, FM_ENA_FMT1),
933 		    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0, NULL);
934 
935 		/*
936 		 * For IOV/Hotplug purposes skip gathering info fo this device,
937 		 * but populate affected info and severity.  Clear out any data
938 		 * that maybe been saved in the last fabric scan.
939 		 */
940 		pf_reset_pfd(pfd_p);
941 		pfd_p->pe_severity_flags = PF_ERR_PANIC_BAD_RESPONSE;
942 		PFD_AFFECTED_DEV(pfd_p)->pe_affected_flags = PF_AFFECTED_SELF;
943 
944 		/* Add the snapshot to the error q */
945 		pf_en_dq(pfd_p, impl);
946 		pfd_p->pe_valid = B_TRUE;
947 
948 		return (PF_SCAN_BAD_RESPONSE);
949 	}
950 
951 	pf_pci_regs_gather(pfd_p, bus_p);
952 	pf_pci_regs_clear(pfd_p, bus_p);
953 	if (PCIE_IS_RP(bus_p))
954 		pf_pci_find_rp_fault(pfd_p, bus_p);
955 
956 	cb_sts = pf_fm_callback(dip, impl->pf_derr);
957 
958 	if (cb_sts == DDI_FM_FATAL || cb_sts == DDI_FM_UNKNOWN)
959 		scan_flag |= PF_SCAN_CB_FAILURE;
960 
961 	/* Add the snapshot to the error q */
962 	pf_en_dq(pfd_p, impl);
963 
964 done:
965 	/*
966 	 * If a bridge does not have any error no need to scan any further down.
967 	 * For PCIe devices, check the PCIe device status and PCI secondary
968 	 * status.
969 	 * - Some non-compliant PCIe devices do not utilize PCIe
970 	 *   error registers.  If so rely on legacy PCI error registers.
971 	 * For PCI devices, check the PCI secondary status.
972 	 */
973 	if (PCIE_IS_PCIE_BDG(bus_p) &&
974 	    !(PCIE_ERR_REG(pfd_p)->pcie_err_status & PF_PCIE_BDG_ERR) &&
975 	    !(PCI_BDG_ERR_REG(pfd_p)->pci_bdg_sec_stat & PF_PCI_BDG_ERR))
976 		scan_flag |= PF_SCAN_NO_ERR_IN_CHILD;
977 
978 	if (PCIE_IS_PCI_BDG(bus_p) &&
979 	    !(PCI_BDG_ERR_REG(pfd_p)->pci_bdg_sec_stat & PF_PCI_BDG_ERR))
980 		scan_flag |= PF_SCAN_NO_ERR_IN_CHILD;
981 
982 	pfd_p->pe_valid = B_TRUE;
983 	return (scan_flag);
984 }
985 
986 /*
987  * Called during postattach to initialize a device's error handling
988  * capabilities.  If the devices has already been hardened, then there isn't
989  * much needed.  Otherwise initialize the device's default FMA capabilities.
990  *
991  * In a future project where PCIe support is removed from pcifm, several
992  * "properties" that are setup in ddi_fm_init and pci_ereport_setup need to be
993  * created here so that the PCI/PCIe eversholt rules will work properly.
994  */
995 void
996 pf_init(dev_info_t *dip, ddi_iblock_cookie_t ibc, ddi_attach_cmd_t cmd)
997 {
998 	pcie_bus_t		*bus_p = PCIE_DIP2BUS(dip);
999 	struct i_ddi_fmhdl	*fmhdl = DEVI(dip)->devi_fmhdl;
1000 	boolean_t		need_cb_register = B_FALSE;
1001 
1002 	if (!bus_p) {
1003 		cmn_err(CE_WARN, "devi_bus information is not set for %s%d.\n",
1004 		    ddi_driver_name(dip), ddi_get_instance(dip));
1005 		return;
1006 	}
1007 
1008 	if (fmhdl) {
1009 		/*
1010 		 * If device is only ereport capable and not callback capable
1011 		 * make it callback capable. The only downside is that the
1012 		 * "fm-errcb-capable" property is not created for this device
1013 		 * which should be ok since it's not used anywhere.
1014 		 */
1015 		if (!(fmhdl->fh_cap & DDI_FM_ERRCB_CAPABLE))
1016 			need_cb_register = B_TRUE;
1017 	} else {
1018 		int cap;
1019 		/*
1020 		 * fm-capable in driver.conf can be used to set fm_capabilities.
1021 		 * If fm-capable is not defined, set the default
1022 		 * DDI_FM_EREPORT_CAPABLE and DDI_FM_ERRCB_CAPABLE.
1023 		 */
1024 		cap = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1025 		    DDI_PROP_DONTPASS, "fm-capable",
1026 		    DDI_FM_EREPORT_CAPABLE | DDI_FM_ERRCB_CAPABLE);
1027 		cap &= (DDI_FM_EREPORT_CAPABLE | DDI_FM_ERRCB_CAPABLE);
1028 
1029 		bus_p->bus_fm_flags |= PF_FM_IS_NH;
1030 
1031 		if (cmd == DDI_ATTACH) {
1032 			ddi_fm_init(dip, &cap, &ibc);
1033 			pci_ereport_setup(dip);
1034 		}
1035 
1036 		if (cap & DDI_FM_ERRCB_CAPABLE)
1037 			need_cb_register = B_TRUE;
1038 
1039 		fmhdl = DEVI(dip)->devi_fmhdl;
1040 	}
1041 
1042 	/* If ddi_fm_init fails for any reason RETURN */
1043 	if (!fmhdl) {
1044 		bus_p->bus_fm_flags = 0;
1045 		return;
1046 	}
1047 
1048 	fmhdl->fh_cap |=  DDI_FM_ERRCB_CAPABLE;
1049 	if (cmd == DDI_ATTACH) {
1050 		if (need_cb_register)
1051 			ddi_fm_handler_register(dip, pf_dummy_cb, NULL);
1052 	}
1053 
1054 	bus_p->bus_fm_flags |= PF_FM_READY;
1055 }
1056 
1057 /* undo FMA lock, called at predetach */
1058 void
1059 pf_fini(dev_info_t *dip, ddi_detach_cmd_t cmd)
1060 {
1061 	pcie_bus_t	*bus_p = PCIE_DIP2BUS(dip);
1062 
1063 	if (!bus_p)
1064 		return;
1065 
1066 	/* Don't fini anything if device isn't FM Ready */
1067 	if (!(bus_p->bus_fm_flags & PF_FM_READY))
1068 		return;
1069 
1070 	/* no other code should set the flag to false */
1071 	bus_p->bus_fm_flags &= ~PF_FM_READY;
1072 
1073 	/*
1074 	 * Grab the mutex to make sure device isn't in the middle of
1075 	 * error handling.  Setting the bus_fm_flag to ~PF_FM_READY
1076 	 * should prevent this device from being error handled after
1077 	 * the mutex has been released.
1078 	 */
1079 	(void) pf_handler_enter(dip, NULL);
1080 	pf_handler_exit(dip);
1081 
1082 	/* undo non-hardened drivers */
1083 	if (bus_p->bus_fm_flags & PF_FM_IS_NH) {
1084 		if (cmd == DDI_DETACH) {
1085 			bus_p->bus_fm_flags &= ~PF_FM_IS_NH;
1086 			pci_ereport_teardown(dip);
1087 			/*
1088 			 * ddi_fini itself calls ddi_handler_unregister,
1089 			 * so no need to explicitly call unregister.
1090 			 */
1091 			ddi_fm_fini(dip);
1092 		}
1093 	}
1094 }
1095 
1096 /*ARGSUSED*/
1097 static int
1098 pf_dummy_cb(dev_info_t *dip, ddi_fm_error_t *derr, const void *not_used)
1099 {
1100 	return (DDI_FM_OK);
1101 }
1102 
1103 /*
1104  * Add PFD to queue.  If it is an RC add it to the beginning,
1105  * otherwise add it to the end.
1106  */
1107 static void
1108 pf_en_dq(pf_data_t *pfd_p, pf_impl_t *impl)
1109 {
1110 	pf_data_t *head_p = impl->pf_dq_head_p;
1111 	pf_data_t *tail_p = impl->pf_dq_tail_p;
1112 
1113 	impl->pf_total++;
1114 
1115 	if (!head_p) {
1116 		ASSERT(PFD_IS_ROOT(pfd_p));
1117 		impl->pf_dq_head_p = pfd_p;
1118 		impl->pf_dq_tail_p = pfd_p;
1119 		pfd_p->pe_prev = NULL;
1120 		pfd_p->pe_next = NULL;
1121 		return;
1122 	}
1123 
1124 	/* Check if this is a Root Port eprt */
1125 	if (PFD_IS_ROOT(pfd_p)) {
1126 		pf_data_t *root_p, *last_p = NULL;
1127 
1128 		/* The first item must be a RP */
1129 		root_p = head_p;
1130 		for (last_p = head_p; last_p && PFD_IS_ROOT(last_p);
1131 		    last_p = last_p->pe_next)
1132 			root_p = last_p;
1133 
1134 		/* root_p is the last RP pfd. last_p is the first non-RP pfd. */
1135 		root_p->pe_next = pfd_p;
1136 		pfd_p->pe_prev = root_p;
1137 		pfd_p->pe_next = last_p;
1138 
1139 		if (last_p)
1140 			last_p->pe_prev = pfd_p;
1141 		else
1142 			tail_p = pfd_p;
1143 	} else {
1144 		tail_p->pe_next = pfd_p;
1145 		pfd_p->pe_prev = tail_p;
1146 		pfd_p->pe_next = NULL;
1147 		tail_p = pfd_p;
1148 	}
1149 
1150 	impl->pf_dq_head_p = head_p;
1151 	impl->pf_dq_tail_p = tail_p;
1152 }
1153 
1154 /*
1155  * Ignore:
1156  * - TRAINING: as leaves do not have children
1157  * - SD: as leaves do not have children
1158  */
1159 const pf_fab_err_tbl_t pcie_pcie_tbl[] = {
1160 	{PCIE_AER_UCE_DLP,	pf_panic,
1161 	    PF_AFFECTED_PARENT, 0},
1162 
1163 	{PCIE_AER_UCE_PTLP,	pf_analyse_ptlp,
1164 	    PF_AFFECTED_SELF, 0},
1165 
1166 	{PCIE_AER_UCE_FCP,	pf_panic,
1167 	    PF_AFFECTED_PARENT, 0},
1168 
1169 	{PCIE_AER_UCE_TO,	pf_analyse_to,
1170 	    PF_AFFECTED_SELF, 0},
1171 
1172 	{PCIE_AER_UCE_CA,	pf_analyse_ca_ur,
1173 	    PF_AFFECTED_SELF, 0},
1174 
1175 	{PCIE_AER_UCE_UC,	pf_analyse_uc,
1176 	    0, 0},
1177 
1178 	{PCIE_AER_UCE_RO,	pf_panic,
1179 	    PF_AFFECTED_PARENT, 0},
1180 
1181 	{PCIE_AER_UCE_MTLP,	pf_panic,
1182 	    PF_AFFECTED_PARENT, 0},
1183 
1184 	{PCIE_AER_UCE_ECRC,	pf_panic,
1185 	    PF_AFFECTED_SELF, 0},
1186 
1187 	{PCIE_AER_UCE_UR,	pf_analyse_ca_ur,
1188 	    PF_AFFECTED_SELF, 0},
1189 
1190 	{0, NULL, 0, 0}
1191 };
1192 
1193 const pf_fab_err_tbl_t pcie_rp_tbl[] = {
1194 	{PCIE_AER_UCE_TRAINING,	pf_no_panic,
1195 	    PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN, 0},
1196 
1197 	{PCIE_AER_UCE_DLP,	pf_panic,
1198 	    PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN, 0},
1199 
1200 	{PCIE_AER_UCE_SD,	pf_no_panic,
1201 	    PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN, 0},
1202 
1203 	{PCIE_AER_UCE_PTLP,	pf_analyse_ptlp,
1204 	    PF_AFFECTED_AER, PF_AFFECTED_CHILDREN},
1205 
1206 	{PCIE_AER_UCE_FCP,	pf_panic,
1207 	    PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN, 0},
1208 
1209 	{PCIE_AER_UCE_TO,	pf_panic,
1210 	    PF_AFFECTED_ADDR, PF_AFFECTED_CHILDREN},
1211 
1212 	{PCIE_AER_UCE_CA,	pf_no_panic,
1213 	    PF_AFFECTED_AER, PF_AFFECTED_CHILDREN},
1214 
1215 	{PCIE_AER_UCE_UC,	pf_analyse_uc,
1216 	    0, 0},
1217 
1218 	{PCIE_AER_UCE_RO,	pf_panic,
1219 	    PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN, 0},
1220 
1221 	{PCIE_AER_UCE_MTLP,	pf_panic,
1222 	    PF_AFFECTED_SELF | PF_AFFECTED_AER,
1223 	    PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN},
1224 
1225 	{PCIE_AER_UCE_ECRC,	pf_panic,
1226 	    PF_AFFECTED_AER, PF_AFFECTED_CHILDREN},
1227 
1228 	{PCIE_AER_UCE_UR,	pf_no_panic,
1229 	    PF_AFFECTED_AER, PF_AFFECTED_CHILDREN},
1230 
1231 	{0, NULL, 0, 0}
1232 };
1233 
1234 const pf_fab_err_tbl_t pcie_sw_tbl[] = {
1235 	{PCIE_AER_UCE_TRAINING,	pf_no_panic,
1236 	    PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN, 0},
1237 
1238 	{PCIE_AER_UCE_DLP,	pf_panic,
1239 	    PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN, 0},
1240 
1241 	{PCIE_AER_UCE_SD,	pf_no_panic,
1242 	    PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN, 0},
1243 
1244 	{PCIE_AER_UCE_PTLP,	pf_analyse_ptlp,
1245 	    PF_AFFECTED_AER, PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN},
1246 
1247 	{PCIE_AER_UCE_FCP,	pf_panic,
1248 	    PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN, 0},
1249 
1250 	{PCIE_AER_UCE_TO,	pf_analyse_to,
1251 	    PF_AFFECTED_CHILDREN, 0},
1252 
1253 	{PCIE_AER_UCE_CA,	pf_analyse_ca_ur,
1254 	    PF_AFFECTED_AER, PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN},
1255 
1256 	{PCIE_AER_UCE_UC,	pf_analyse_uc,
1257 	    0, 0},
1258 
1259 	{PCIE_AER_UCE_RO,	pf_panic,
1260 	    PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN, 0},
1261 
1262 	{PCIE_AER_UCE_MTLP,	pf_panic,
1263 	    PF_AFFECTED_SELF | PF_AFFECTED_AER,
1264 	    PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN},
1265 
1266 	{PCIE_AER_UCE_ECRC,	pf_panic,
1267 	    PF_AFFECTED_AER, PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN},
1268 
1269 	{PCIE_AER_UCE_UR,	pf_analyse_ca_ur,
1270 	    PF_AFFECTED_AER, PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN},
1271 
1272 	{0, NULL, 0, 0}
1273 };
1274 
1275 const pf_fab_err_tbl_t pcie_pcie_bdg_tbl[] = {
1276 	{PCIE_AER_SUCE_TA_ON_SC,	pf_analyse_sc,
1277 	    0, 0},
1278 
1279 	{PCIE_AER_SUCE_MA_ON_SC,	pf_analyse_sc,
1280 	    0, 0},
1281 
1282 	{PCIE_AER_SUCE_RCVD_TA,		pf_analyse_ma_ta,
1283 	    0, 0},
1284 
1285 	{PCIE_AER_SUCE_RCVD_MA,		pf_analyse_ma_ta,
1286 	    0, 0},
1287 
1288 	{PCIE_AER_SUCE_USC_ERR,		pf_panic,
1289 	    PF_AFFECTED_SAER, PF_AFFECTED_CHILDREN},
1290 
1291 	{PCIE_AER_SUCE_USC_MSG_DATA_ERR, pf_analyse_ma_ta,
1292 	    PF_AFFECTED_SAER, PF_AFFECTED_CHILDREN},
1293 
1294 	{PCIE_AER_SUCE_UC_DATA_ERR,	pf_analyse_uc_data,
1295 	    PF_AFFECTED_SAER, PF_AFFECTED_CHILDREN},
1296 
1297 	{PCIE_AER_SUCE_UC_ATTR_ERR,	pf_panic,
1298 	    PF_AFFECTED_CHILDREN, 0},
1299 
1300 	{PCIE_AER_SUCE_UC_ADDR_ERR,	pf_panic,
1301 	    PF_AFFECTED_CHILDREN, 0},
1302 
1303 	{PCIE_AER_SUCE_TIMER_EXPIRED,	pf_panic,
1304 	    PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN, 0},
1305 
1306 	{PCIE_AER_SUCE_PERR_ASSERT,	pf_analyse_perr_assert,
1307 	    0, 0},
1308 
1309 	{PCIE_AER_SUCE_SERR_ASSERT,	pf_no_panic,
1310 	    0, 0},
1311 
1312 	{PCIE_AER_SUCE_INTERNAL_ERR,	pf_panic,
1313 	    PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN, 0},
1314 
1315 	{0, NULL, 0, 0}
1316 };
1317 
1318 const pf_fab_err_tbl_t pcie_pci_bdg_tbl[] = {
1319 	{PCI_STAT_PERROR,	pf_analyse_pci,
1320 	    PF_AFFECTED_SELF, 0},
1321 
1322 	{PCI_STAT_S_PERROR,	pf_analyse_pci,
1323 	    PF_AFFECTED_SELF, 0},
1324 
1325 	{PCI_STAT_S_SYSERR,	pf_panic,
1326 	    PF_AFFECTED_SELF, 0},
1327 
1328 	{PCI_STAT_R_MAST_AB,	pf_analyse_pci,
1329 	    PF_AFFECTED_SELF, 0},
1330 
1331 	{PCI_STAT_R_TARG_AB,	pf_analyse_pci,
1332 	    PF_AFFECTED_SELF, 0},
1333 
1334 	{PCI_STAT_S_TARG_AB,	pf_analyse_pci,
1335 	    PF_AFFECTED_SELF, 0},
1336 
1337 	{0, NULL, 0, 0}
1338 };
1339 
1340 const pf_fab_err_tbl_t pcie_pci_tbl[] = {
1341 	{PCI_STAT_PERROR,	pf_analyse_pci,
1342 	    PF_AFFECTED_SELF, 0},
1343 
1344 	{PCI_STAT_S_PERROR,	pf_analyse_pci,
1345 	    PF_AFFECTED_SELF, 0},
1346 
1347 	{PCI_STAT_S_SYSERR,	pf_panic,
1348 	    PF_AFFECTED_SELF, 0},
1349 
1350 	{PCI_STAT_R_MAST_AB,	pf_analyse_pci,
1351 	    PF_AFFECTED_SELF, 0},
1352 
1353 	{PCI_STAT_R_TARG_AB,	pf_analyse_pci,
1354 	    PF_AFFECTED_SELF, 0},
1355 
1356 	{PCI_STAT_S_TARG_AB,	pf_analyse_pci,
1357 	    PF_AFFECTED_SELF, 0},
1358 
1359 	{0, NULL, 0, 0}
1360 };
1361 
1362 #define	PF_MASKED_AER_ERR(pfd_p) \
1363 	(PCIE_ADV_REG(pfd_p)->pcie_ue_status & \
1364 	    ((PCIE_ADV_REG(pfd_p)->pcie_ue_mask) ^ 0xFFFFFFFF))
1365 #define	PF_MASKED_SAER_ERR(pfd_p) \
1366 	(PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_status & \
1367 	    ((PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_mask) ^ 0xFFFFFFFF))
1368 /*
1369  * Analyse all the PCIe Fault Data (erpt) gathered during dispatch in the erpt
1370  * Queue.
1371  */
1372 static int
1373 pf_analyse_error(ddi_fm_error_t *derr, pf_impl_t *impl)
1374 {
1375 	int		sts_flags, error_flags = 0;
1376 	pf_data_t	*pfd_p;
1377 
1378 	for (pfd_p = impl->pf_dq_head_p; pfd_p; pfd_p = pfd_p->pe_next) {
1379 		sts_flags = 0;
1380 
1381 		/* skip analysing error when no error info is gathered */
1382 		if (pfd_p->pe_severity_flags == PF_ERR_PANIC_BAD_RESPONSE)
1383 			goto done;
1384 
1385 		switch (PCIE_PFD2BUS(pfd_p)->bus_dev_type) {
1386 		case PCIE_PCIECAP_DEV_TYPE_PCIE_DEV:
1387 		case PCIE_PCIECAP_DEV_TYPE_PCI_DEV:
1388 			if (PCIE_DEVSTS_CE_DETECTED &
1389 			    PCIE_ERR_REG(pfd_p)->pcie_err_status)
1390 				sts_flags |= PF_ERR_CE;
1391 
1392 			pf_adjust_for_no_aer(pfd_p);
1393 			sts_flags |= pf_analyse_error_tbl(derr, impl,
1394 			    pfd_p, pcie_pcie_tbl, PF_MASKED_AER_ERR(pfd_p));
1395 			break;
1396 		case PCIE_PCIECAP_DEV_TYPE_ROOT:
1397 			pf_adjust_for_no_aer(pfd_p);
1398 			sts_flags |= pf_analyse_error_tbl(derr, impl,
1399 			    pfd_p, pcie_rp_tbl, PF_MASKED_AER_ERR(pfd_p));
1400 			break;
1401 		case PCIE_PCIECAP_DEV_TYPE_RC_PSEUDO:
1402 			/* no adjust_for_aer for pseudo RC */
1403 			/* keep the severity passed on from RC if any */
1404 			sts_flags |= pfd_p->pe_severity_flags;
1405 			sts_flags |= pf_analyse_error_tbl(derr, impl, pfd_p,
1406 			    pcie_rp_tbl, PF_MASKED_AER_ERR(pfd_p));
1407 			break;
1408 		case PCIE_PCIECAP_DEV_TYPE_UP:
1409 		case PCIE_PCIECAP_DEV_TYPE_DOWN:
1410 			if (PCIE_DEVSTS_CE_DETECTED &
1411 			    PCIE_ERR_REG(pfd_p)->pcie_err_status)
1412 				sts_flags |= PF_ERR_CE;
1413 
1414 			pf_adjust_for_no_aer(pfd_p);
1415 			sts_flags |= pf_analyse_error_tbl(derr, impl,
1416 			    pfd_p, pcie_sw_tbl, PF_MASKED_AER_ERR(pfd_p));
1417 			break;
1418 		case PCIE_PCIECAP_DEV_TYPE_PCIE2PCI:
1419 			if (PCIE_DEVSTS_CE_DETECTED &
1420 			    PCIE_ERR_REG(pfd_p)->pcie_err_status)
1421 				sts_flags |= PF_ERR_CE;
1422 
1423 			pf_adjust_for_no_aer(pfd_p);
1424 			pf_adjust_for_no_saer(pfd_p);
1425 			sts_flags |= pf_analyse_error_tbl(derr,
1426 			    impl, pfd_p, pcie_pcie_tbl,
1427 			    PF_MASKED_AER_ERR(pfd_p));
1428 			sts_flags |= pf_analyse_error_tbl(derr,
1429 			    impl, pfd_p, pcie_pcie_bdg_tbl,
1430 			    PF_MASKED_SAER_ERR(pfd_p));
1431 			/*
1432 			 * Some non-compliant PCIe devices do not utilize PCIe
1433 			 * error registers.  So fallthrough and rely on legacy
1434 			 * PCI error registers.
1435 			 */
1436 			if ((PCIE_DEVSTS_NFE_DETECTED | PCIE_DEVSTS_FE_DETECTED)
1437 			    & PCIE_ERR_REG(pfd_p)->pcie_err_status)
1438 				break;
1439 			/* FALLTHROUGH */
1440 		case PCIE_PCIECAP_DEV_TYPE_PCI_PSEUDO:
1441 			sts_flags |= pf_analyse_error_tbl(derr, impl,
1442 			    pfd_p, pcie_pci_tbl,
1443 			    PCI_ERR_REG(pfd_p)->pci_err_status);
1444 
1445 			if (!PCIE_IS_BDG(PCIE_PFD2BUS(pfd_p)))
1446 				break;
1447 
1448 			sts_flags |= pf_analyse_error_tbl(derr,
1449 			    impl, pfd_p, pcie_pci_bdg_tbl,
1450 			    PCI_BDG_ERR_REG(pfd_p)->pci_bdg_sec_stat);
1451 		}
1452 
1453 		pfd_p->pe_severity_flags = sts_flags;
1454 
1455 done:
1456 		pfd_p->pe_orig_severity_flags = pfd_p->pe_severity_flags;
1457 		/* Have pciev_eh adjust the severity */
1458 		pfd_p->pe_severity_flags = pciev_eh(pfd_p, impl);
1459 
1460 		error_flags |= pfd_p->pe_severity_flags;
1461 	}
1462 
1463 	return (error_flags);
1464 }
1465 
1466 static int
1467 pf_analyse_error_tbl(ddi_fm_error_t *derr, pf_impl_t *impl,
1468     pf_data_t *pfd_p, const pf_fab_err_tbl_t *tbl, uint32_t err_reg)
1469 {
1470 	const pf_fab_err_tbl_t *row;
1471 	int err = 0;
1472 	uint16_t flags;
1473 	uint32_t bit;
1474 
1475 	for (row = tbl; err_reg && (row->bit != 0); row++) {
1476 		bit = row->bit;
1477 		if (!(err_reg & bit))
1478 			continue;
1479 		err |= row->handler(derr, bit, impl->pf_dq_head_p, pfd_p);
1480 
1481 		flags = row->affected_flags;
1482 		/*
1483 		 * check if the primary flag is valid;
1484 		 * if not, use the secondary flag
1485 		 */
1486 		if (flags & PF_AFFECTED_AER) {
1487 			if (!HAS_AER_LOGS(pfd_p, bit)) {
1488 				flags = row->sec_affected_flags;
1489 			}
1490 		} else if (flags & PF_AFFECTED_SAER) {
1491 			if (!HAS_SAER_LOGS(pfd_p, bit)) {
1492 				flags = row->sec_affected_flags;
1493 			}
1494 		} else if (flags & PF_AFFECTED_ADDR) {
1495 			/* only Root has this flag */
1496 			if (PCIE_ROOT_FAULT(pfd_p)->scan_addr == 0) {
1497 				flags = row->sec_affected_flags;
1498 			}
1499 		}
1500 
1501 		PFD_AFFECTED_DEV(pfd_p)->pe_affected_flags |= flags;
1502 	}
1503 
1504 	if (!err)
1505 		err = PF_ERR_NO_ERROR;
1506 
1507 	return (err);
1508 }
1509 
1510 /*
1511  * PCIe Completer Abort and Unsupport Request error analyser.  If a PCIe device
1512  * issues a CA/UR a corresponding Received CA/UR should have been seen in the
1513  * PCIe root complex.  Check to see if RC did indeed receive a CA/UR, if so then
1514  * this error may be safely ignored.  If not check the logs and see if an
1515  * associated handler for this transaction can be found.
1516  */
1517 /* ARGSUSED */
1518 static int
1519 pf_analyse_ca_ur(ddi_fm_error_t *derr, uint32_t bit, pf_data_t *dq_head_p,
1520     pf_data_t *pfd_p)
1521 {
1522 	uint32_t	abort_type;
1523 	dev_info_t	*rpdip = PCIE_PFD2BUS(pfd_p)->bus_rp_dip;
1524 
1525 	/* If UR's are masked forgive this error */
1526 	if ((pcie_get_aer_uce_mask() & PCIE_AER_UCE_UR) &&
1527 	    (bit == PCIE_AER_UCE_UR))
1528 		return (PF_ERR_NO_PANIC);
1529 
1530 	/*
1531 	 * If a RP has an CA/UR it means a leaf sent a bad request to the RP
1532 	 * such as a config read or a bad DMA address.
1533 	 */
1534 	if (PCIE_IS_RP(PCIE_PFD2BUS(pfd_p)))
1535 		goto handle_lookup;
1536 
1537 	if (bit == PCIE_AER_UCE_UR)
1538 		abort_type = PCI_STAT_R_MAST_AB;
1539 	else
1540 		abort_type = PCI_STAT_R_TARG_AB;
1541 
1542 	if (pf_matched_in_rc(dq_head_p, pfd_p, abort_type))
1543 		return (PF_ERR_MATCHED_RC);
1544 
1545 handle_lookup:
1546 	if (HAS_AER_LOGS(pfd_p, bit) &&
1547 	    pf_log_hdl_lookup(rpdip, derr, pfd_p, B_TRUE) == PF_HDL_FOUND)
1548 			return (PF_ERR_MATCHED_DEVICE);
1549 
1550 	return (PF_ERR_PANIC);
1551 }
1552 
1553 /*
1554  * PCIe-PCI Bridge Received Master Abort and Target error analyser.  If a PCIe
1555  * Bridge receives a MA/TA a corresponding sent CA/UR should have been seen in
1556  * the PCIe root complex.  Check to see if RC did indeed receive a CA/UR, if so
1557  * then this error may be safely ignored.  If not check the logs and see if an
1558  * associated handler for this transaction can be found.
1559  */
1560 /* ARGSUSED */
1561 static int
1562 pf_analyse_ma_ta(ddi_fm_error_t *derr, uint32_t bit, pf_data_t *dq_head_p,
1563     pf_data_t *pfd_p)
1564 {
1565 	dev_info_t	*rpdip = PCIE_PFD2BUS(pfd_p)->bus_rp_dip;
1566 	uint32_t	abort_type;
1567 
1568 	/* If UR's are masked forgive this error */
1569 	if ((pcie_get_aer_uce_mask() & PCIE_AER_UCE_UR) &&
1570 	    (bit == PCIE_AER_SUCE_RCVD_MA))
1571 		return (PF_ERR_NO_PANIC);
1572 
1573 	if (bit == PCIE_AER_SUCE_RCVD_MA)
1574 		abort_type = PCI_STAT_R_MAST_AB;
1575 	else
1576 		abort_type = PCI_STAT_R_TARG_AB;
1577 
1578 	if (pf_matched_in_rc(dq_head_p, pfd_p, abort_type))
1579 		return (PF_ERR_MATCHED_RC);
1580 
1581 	if (!HAS_SAER_LOGS(pfd_p, bit))
1582 		return (PF_ERR_PANIC);
1583 
1584 	if (pf_log_hdl_lookup(rpdip, derr, pfd_p, B_FALSE) == PF_HDL_FOUND)
1585 		return (PF_ERR_MATCHED_DEVICE);
1586 
1587 	return (PF_ERR_PANIC);
1588 }
1589 
1590 /*
1591  * Generic PCI error analyser.  This function is used for Parity Errors,
1592  * Received Master Aborts, Received Target Aborts, and Signaled Target Aborts.
1593  * In general PCI devices do not have error logs, it is very difficult to figure
1594  * out what transaction caused the error.  Instead find the nearest PCIe-PCI
1595  * Bridge and check to see if it has logs and if it has an error associated with
1596  * this PCI Device.
1597  */
1598 /* ARGSUSED */
1599 static int
1600 pf_analyse_pci(ddi_fm_error_t *derr, uint32_t bit, pf_data_t *dq_head_p,
1601     pf_data_t *pfd_p)
1602 {
1603 	pf_data_t	*parent_pfd_p;
1604 	uint16_t	cmd;
1605 	uint32_t	aer_ue_status;
1606 	pcie_bus_t	*bus_p = PCIE_PFD2BUS(pfd_p);
1607 	pf_pcie_adv_bdg_err_regs_t *parent_saer_p;
1608 
1609 	if (PCI_ERR_REG(pfd_p)->pci_err_status & PCI_STAT_S_SYSERR)
1610 		return (PF_ERR_PANIC);
1611 
1612 	/* If UR's are masked forgive this error */
1613 	if ((pcie_get_aer_uce_mask() & PCIE_AER_UCE_UR) &&
1614 	    (bit == PCI_STAT_R_MAST_AB))
1615 		return (PF_ERR_NO_PANIC);
1616 
1617 
1618 	if (bit & (PCI_STAT_PERROR | PCI_STAT_S_PERROR)) {
1619 		aer_ue_status = PCIE_AER_SUCE_PERR_ASSERT;
1620 	} else {
1621 		aer_ue_status = (PCIE_AER_SUCE_TA_ON_SC |
1622 		    PCIE_AER_SUCE_MA_ON_SC | PCIE_AER_SUCE_RCVD_TA |
1623 		    PCIE_AER_SUCE_RCVD_MA);
1624 	}
1625 
1626 	parent_pfd_p = pf_get_parent_pcie_bridge(pfd_p);
1627 	if (parent_pfd_p == NULL)
1628 		return (PF_ERR_PANIC);
1629 
1630 	/* Check if parent bridge has seen this error */
1631 	parent_saer_p = PCIE_ADV_BDG_REG(parent_pfd_p);
1632 	if (!(parent_saer_p->pcie_sue_status & aer_ue_status) ||
1633 	    !HAS_SAER_LOGS(parent_pfd_p, aer_ue_status))
1634 		return (PF_ERR_PANIC);
1635 
1636 	/*
1637 	 * If the addr or bdf from the parent PCIe bridge logs belong to this
1638 	 * PCI device, assume the PCIe bridge's error handling has already taken
1639 	 * care of this PCI device's error.
1640 	 */
1641 	if (pf_pci_decode(parent_pfd_p, &cmd) != DDI_SUCCESS)
1642 		return (PF_ERR_PANIC);
1643 
1644 	if ((parent_saer_p->pcie_sue_tgt_bdf == bus_p->bus_bdf) ||
1645 	    pf_in_addr_range(bus_p, parent_saer_p->pcie_sue_tgt_addr))
1646 		return (PF_ERR_MATCHED_PARENT);
1647 
1648 	/*
1649 	 * If this device is a PCI-PCI bridge, check if the bdf in the parent
1650 	 * PCIe bridge logs is in the range of this PCI-PCI Bridge's bus ranges.
1651 	 * If they are, then assume the PCIe bridge's error handling has already
1652 	 * taken care of this PCI-PCI bridge device's error.
1653 	 */
1654 	if (PCIE_IS_BDG(bus_p) &&
1655 	    pf_in_bus_range(bus_p, parent_saer_p->pcie_sue_tgt_bdf))
1656 		return (PF_ERR_MATCHED_PARENT);
1657 
1658 	return (PF_ERR_PANIC);
1659 }
1660 
1661 /*
1662  * PCIe Bridge transactions associated with PERR.
1663  * o Bridge received a poisoned Non-Posted Write (CFG Writes) from PCIe
1664  * o Bridge received a poisoned Posted Write from (MEM Writes) from PCIe
1665  * o Bridge received a poisoned Completion on a Split Transction from PCIe
1666  * o Bridge received a poisoned Completion on a Delayed Transction from PCIe
1667  *
1668  * Check for non-poisoned PCIe transactions that got forwarded to the secondary
1669  * side and detects a PERR#.  Except for delayed read completions, a poisoned
1670  * TLP will be forwarded to the secondary bus and PERR# will be asserted.
1671  */
1672 /* ARGSUSED */
1673 static int
1674 pf_analyse_perr_assert(ddi_fm_error_t *derr, uint32_t bit, pf_data_t *dq_head_p,
1675     pf_data_t *pfd_p)
1676 {
1677 	dev_info_t	*rpdip = PCIE_PFD2BUS(pfd_p)->bus_rp_dip;
1678 	uint16_t	cmd;
1679 	int		hdl_sts = PF_HDL_NOTFOUND;
1680 	int		err = PF_ERR_NO_ERROR;
1681 	pf_pcie_adv_bdg_err_regs_t *saer_p;
1682 
1683 
1684 	if (HAS_SAER_LOGS(pfd_p, bit)) {
1685 		saer_p = PCIE_ADV_BDG_REG(pfd_p);
1686 		if (pf_pci_decode(pfd_p, &cmd) != DDI_SUCCESS)
1687 			return (PF_ERR_PANIC);
1688 
1689 cmd_switch:
1690 		switch (cmd) {
1691 		case PCI_PCIX_CMD_IOWR:
1692 		case PCI_PCIX_CMD_MEMWR:
1693 		case PCI_PCIX_CMD_MEMWR_BL:
1694 		case PCI_PCIX_CMD_MEMWRBL:
1695 			/* Posted Writes Transactions */
1696 			if (saer_p->pcie_sue_tgt_trans == PF_ADDR_PIO)
1697 				hdl_sts = pf_log_hdl_lookup(rpdip, derr, pfd_p,
1698 				    B_FALSE);
1699 			break;
1700 		case PCI_PCIX_CMD_CFWR:
1701 			/*
1702 			 * Check to see if it is a non-posted write.  If so, a
1703 			 * UR Completion would have been sent.
1704 			 */
1705 			if (pf_matched_in_rc(dq_head_p, pfd_p,
1706 			    PCI_STAT_R_MAST_AB)) {
1707 				hdl_sts = PF_HDL_FOUND;
1708 				err = PF_ERR_MATCHED_RC;
1709 				goto done;
1710 			}
1711 			hdl_sts = pf_log_hdl_lookup(rpdip, derr, pfd_p,
1712 			    B_FALSE);
1713 			break;
1714 		case PCI_PCIX_CMD_SPL:
1715 			hdl_sts = pf_log_hdl_lookup(rpdip, derr, pfd_p,
1716 			    B_FALSE);
1717 			break;
1718 		case PCI_PCIX_CMD_DADR:
1719 			cmd = (PCIE_ADV_BDG_HDR(pfd_p, 1) >>
1720 			    PCIE_AER_SUCE_HDR_CMD_UP_SHIFT) &
1721 			    PCIE_AER_SUCE_HDR_CMD_UP_MASK;
1722 			if (cmd != PCI_PCIX_CMD_DADR)
1723 				goto cmd_switch;
1724 			/* FALLTHROUGH */
1725 		default:
1726 			/* Unexpected situation, panic */
1727 			hdl_sts = PF_HDL_NOTFOUND;
1728 		}
1729 
1730 		if (hdl_sts == PF_HDL_FOUND)
1731 			err = PF_ERR_MATCHED_DEVICE;
1732 		else
1733 			err = PF_ERR_PANIC;
1734 	} else {
1735 		/*
1736 		 * Check to see if it is a non-posted write.  If so, a UR
1737 		 * Completion would have been sent.
1738 		 */
1739 		if ((PCIE_ERR_REG(pfd_p)->pcie_err_status &
1740 		    PCIE_DEVSTS_UR_DETECTED) &&
1741 		    pf_matched_in_rc(dq_head_p, pfd_p, PCI_STAT_R_MAST_AB))
1742 			err = PF_ERR_MATCHED_RC;
1743 
1744 		/* Check for posted writes.  Transaction is lost. */
1745 		if (PCI_BDG_ERR_REG(pfd_p)->pci_bdg_sec_stat &
1746 		    PCI_STAT_S_PERROR)
1747 			err = PF_ERR_PANIC;
1748 
1749 		/*
1750 		 * All other scenarios are due to read completions.  Check for
1751 		 * PERR on the primary side.  If found the primary side error
1752 		 * handling will take care of this error.
1753 		 */
1754 		if (err == PF_ERR_NO_ERROR) {
1755 			if (PCI_ERR_REG(pfd_p)->pci_err_status &
1756 			    PCI_STAT_PERROR)
1757 				err = PF_ERR_MATCHED_PARENT;
1758 			else
1759 				err = PF_ERR_PANIC;
1760 		}
1761 	}
1762 
1763 done:
1764 	return (err);
1765 }
1766 
1767 /*
1768  * PCIe Poisoned TLP error analyser.  If a PCIe device receives a Poisoned TLP,
1769  * check the logs and see if an associated handler for this transaction can be
1770  * found.
1771  */
1772 /* ARGSUSED */
1773 static int
1774 pf_analyse_ptlp(ddi_fm_error_t *derr, uint32_t bit, pf_data_t *dq_head_p,
1775     pf_data_t *pfd_p)
1776 {
1777 	dev_info_t	*rpdip = PCIE_PFD2BUS(pfd_p)->bus_rp_dip;
1778 
1779 	/*
1780 	 * If AERs are supported find the logs in this device, otherwise look in
1781 	 * it's parent's logs.
1782 	 */
1783 	if (HAS_AER_LOGS(pfd_p, bit)) {
1784 		pcie_tlp_hdr_t *hdr = (pcie_tlp_hdr_t *)&PCIE_ADV_HDR(pfd_p, 0);
1785 
1786 		/*
1787 		 * Double check that the log contains a poisoned TLP.
1788 		 * Some devices like PLX switch do not log poison TLP headers.
1789 		 */
1790 		if (hdr->ep) {
1791 			if (pf_log_hdl_lookup(rpdip, derr, pfd_p, B_TRUE) ==
1792 			    PF_HDL_FOUND)
1793 				return (PF_ERR_MATCHED_DEVICE);
1794 		}
1795 
1796 		/*
1797 		 * If an address is found and hdl lookup failed panic.
1798 		 * Otherwise check parents to see if there was enough
1799 		 * information recover.
1800 		 */
1801 		if (PCIE_ADV_REG(pfd_p)->pcie_ue_tgt_addr)
1802 			return (PF_ERR_PANIC);
1803 	}
1804 
1805 	/*
1806 	 * Check to see if the rc has already handled this error or a parent has
1807 	 * already handled this error.
1808 	 *
1809 	 * If the error info in the RC wasn't enough to find the fault device,
1810 	 * such as if the faulting device lies behind a PCIe-PCI bridge from a
1811 	 * poisoned completion, check to see if the PCIe-PCI bridge has enough
1812 	 * info to recover.  For completion TLP's, the AER header logs only
1813 	 * contain the faulting BDF in the Root Port.  For PCIe device the fault
1814 	 * BDF is the fault device.  But if the fault device is behind a
1815 	 * PCIe-PCI bridge the fault BDF could turn out just to be a PCIe-PCI
1816 	 * bridge's secondary bus number.
1817 	 */
1818 	if (!PFD_IS_ROOT(pfd_p)) {
1819 		dev_info_t *pdip = ddi_get_parent(PCIE_PFD2DIP(pfd_p));
1820 		pf_data_t *parent_pfd_p;
1821 
1822 		if (PCIE_PFD2BUS(pfd_p)->bus_rp_dip == pdip) {
1823 			if (pf_matched_in_rc(dq_head_p, pfd_p, PCI_STAT_PERROR))
1824 				return (PF_ERR_MATCHED_RC);
1825 		}
1826 
1827 		parent_pfd_p = PCIE_DIP2PFD(pdip);
1828 
1829 		if (HAS_AER_LOGS(parent_pfd_p, bit))
1830 			return (PF_ERR_MATCHED_PARENT);
1831 	} else {
1832 		pf_data_t *bdg_pfd_p;
1833 		pcie_req_id_t secbus;
1834 
1835 		/*
1836 		 * Looking for a pcie bridge only makes sense if the BDF
1837 		 * Dev/Func = 0/0
1838 		 */
1839 		if (!PCIE_HAS_AER(PCIE_PFD2BUS(pfd_p)))
1840 			goto done;
1841 
1842 		secbus = PCIE_ADV_REG(pfd_p)->pcie_ue_tgt_bdf;
1843 
1844 		if (!PCIE_CHECK_VALID_BDF(secbus) || (secbus & 0xFF))
1845 			goto done;
1846 
1847 		bdg_pfd_p = pf_get_pcie_bridge(pfd_p, secbus);
1848 
1849 		if (bdg_pfd_p && HAS_SAER_LOGS(bdg_pfd_p,
1850 		    PCIE_AER_SUCE_PERR_ASSERT)) {
1851 			return pf_analyse_perr_assert(derr,
1852 			    PCIE_AER_SUCE_PERR_ASSERT, dq_head_p, pfd_p);
1853 		}
1854 	}
1855 done:
1856 	return (PF_ERR_PANIC);
1857 }
1858 
1859 /*
1860  * PCIe-PCI Bridge Received Master and Target abort error analyser on Split
1861  * Completions.  If a PCIe Bridge receives a MA/TA check logs and see if an
1862  * associated handler for this transaction can be found.
1863  */
1864 /* ARGSUSED */
1865 static int
1866 pf_analyse_sc(ddi_fm_error_t *derr, uint32_t bit, pf_data_t *dq_head_p,
1867     pf_data_t *pfd_p)
1868 {
1869 	dev_info_t	*rpdip = PCIE_PFD2BUS(pfd_p)->bus_rp_dip;
1870 	uint16_t	cmd;
1871 	int		sts = PF_HDL_NOTFOUND;
1872 
1873 	if (!HAS_SAER_LOGS(pfd_p, bit))
1874 		return (PF_ERR_PANIC);
1875 
1876 	if (pf_pci_decode(pfd_p, &cmd) != DDI_SUCCESS)
1877 		return (PF_ERR_PANIC);
1878 
1879 	if (cmd == PCI_PCIX_CMD_SPL)
1880 		sts = pf_log_hdl_lookup(rpdip, derr, pfd_p, B_FALSE);
1881 
1882 	if (sts == PF_HDL_FOUND)
1883 		return (PF_ERR_MATCHED_DEVICE);
1884 
1885 	return (PF_ERR_PANIC);
1886 }
1887 
1888 /*
1889  * PCIe Timeout error analyser.  This error can be forgiven if it is marked as
1890  * CE Advisory.  If it is marked as advisory, this means the HW can recover
1891  * and/or retry the transaction automatically.
1892  */
1893 /* ARGSUSED */
1894 static int
1895 pf_analyse_to(ddi_fm_error_t *derr, uint32_t bit, pf_data_t *dq_head_p,
1896     pf_data_t *pfd_p)
1897 {
1898 	if (HAS_AER_LOGS(pfd_p, bit) && CE_ADVISORY(pfd_p))
1899 		return (PF_ERR_NO_PANIC);
1900 
1901 	return (PF_ERR_PANIC);
1902 }
1903 
1904 /*
1905  * PCIe Unexpected Completion.  Check to see if this TLP was misrouted by
1906  * matching the device BDF with the TLP Log.  If misrouting panic, otherwise
1907  * don't panic.
1908  */
1909 /* ARGSUSED */
1910 static int
1911 pf_analyse_uc(ddi_fm_error_t *derr, uint32_t bit, pf_data_t *dq_head_p,
1912     pf_data_t *pfd_p)
1913 {
1914 	if (HAS_AER_LOGS(pfd_p, bit) &&
1915 	    (PCIE_PFD2BUS(pfd_p)->bus_bdf == (PCIE_ADV_HDR(pfd_p, 2) >> 16)))
1916 		return (PF_ERR_NO_PANIC);
1917 
1918 	/*
1919 	 * This is a case of mis-routing. Any of the switches above this
1920 	 * device could be at fault.
1921 	 */
1922 	PFD_AFFECTED_DEV(pfd_p)->pe_affected_flags = PF_AFFECTED_ROOT;
1923 
1924 	return (PF_ERR_PANIC);
1925 }
1926 
1927 /*
1928  * PCIe-PCI Bridge Uncorrectable Data error analyser.  All Uncorrectable Data
1929  * errors should have resulted in a PCIe Poisoned TLP to the RC, except for
1930  * Posted Writes.  Check the logs for Posted Writes and if the RC did not see a
1931  * Poisoned TLP.
1932  *
1933  * Non-Posted Writes will also generate a UR in the completion status, which the
1934  * RC should also see.
1935  */
1936 /* ARGSUSED */
1937 static int
1938 pf_analyse_uc_data(ddi_fm_error_t *derr, uint32_t bit, pf_data_t *dq_head_p,
1939     pf_data_t *pfd_p)
1940 {
1941 	dev_info_t	*rpdip = PCIE_PFD2BUS(pfd_p)->bus_rp_dip;
1942 
1943 	if (!HAS_SAER_LOGS(pfd_p, bit))
1944 		return (PF_ERR_PANIC);
1945 
1946 	if (pf_matched_in_rc(dq_head_p, pfd_p, PCI_STAT_PERROR))
1947 		return (PF_ERR_MATCHED_RC);
1948 
1949 	if (pf_log_hdl_lookup(rpdip, derr, pfd_p, B_FALSE) == PF_HDL_FOUND)
1950 		return (PF_ERR_MATCHED_DEVICE);
1951 
1952 	return (PF_ERR_PANIC);
1953 }
1954 
1955 /* ARGSUSED */
1956 static int
1957 pf_no_panic(ddi_fm_error_t *derr, uint32_t bit, pf_data_t *dq_head_p,
1958     pf_data_t *pfd_p)
1959 {
1960 	return (PF_ERR_NO_PANIC);
1961 }
1962 
1963 /* ARGSUSED */
1964 static int
1965 pf_panic(ddi_fm_error_t *derr, uint32_t bit, pf_data_t *dq_head_p,
1966     pf_data_t *pfd_p)
1967 {
1968 	return (PF_ERR_PANIC);
1969 }
1970 
1971 /*
1972  * If a PCIe device does not support AER, assume all AER statuses have been set,
1973  * unless other registers do not indicate a certain error occuring.
1974  */
1975 static void
1976 pf_adjust_for_no_aer(pf_data_t *pfd_p)
1977 {
1978 	uint32_t	aer_ue = 0;
1979 	uint16_t	status;
1980 
1981 	if (PCIE_HAS_AER(PCIE_PFD2BUS(pfd_p)))
1982 		return;
1983 
1984 	if (PCIE_ERR_REG(pfd_p)->pcie_err_status & PCIE_DEVSTS_FE_DETECTED)
1985 		aer_ue = PF_AER_FATAL_ERR;
1986 
1987 	if (PCIE_ERR_REG(pfd_p)->pcie_err_status & PCIE_DEVSTS_NFE_DETECTED) {
1988 		aer_ue = PF_AER_NON_FATAL_ERR;
1989 		status = PCI_ERR_REG(pfd_p)->pci_err_status;
1990 
1991 		/* Check if the device received a PTLP */
1992 		if (!(status & PCI_STAT_PERROR))
1993 			aer_ue &= ~PCIE_AER_UCE_PTLP;
1994 
1995 		/* Check if the device signaled a CA */
1996 		if (!(status & PCI_STAT_S_TARG_AB))
1997 			aer_ue &= ~PCIE_AER_UCE_CA;
1998 
1999 		/* Check if the device sent a UR */
2000 		if (!(PCIE_ERR_REG(pfd_p)->pcie_err_status &
2001 		    PCIE_DEVSTS_UR_DETECTED))
2002 			aer_ue &= ~PCIE_AER_UCE_UR;
2003 
2004 		/*
2005 		 * Ignore ECRCs as it is optional and will manefest itself as
2006 		 * another error like PTLP and MFP
2007 		 */
2008 		aer_ue &= ~PCIE_AER_UCE_ECRC;
2009 
2010 		/*
2011 		 * Generally if NFE is set, SERR should also be set. Exception:
2012 		 * When certain non-fatal errors are masked, and some of them
2013 		 * happened to be the cause of the NFE, SERR will not be set and
2014 		 * they can not be the source of this interrupt.
2015 		 *
2016 		 * On x86, URs are masked (NFE + UR can be set), if any other
2017 		 * non-fatal errors (i.e, PTLP, CTO, CA, UC, ECRC, ACS) did
2018 		 * occur, SERR should be set since they are not masked. So if
2019 		 * SERR is not set, none of them occurred.
2020 		 */
2021 		if (!(status & PCI_STAT_S_SYSERR))
2022 			aer_ue &= ~PCIE_AER_UCE_TO;
2023 	}
2024 
2025 	if (!PCIE_IS_BDG(PCIE_PFD2BUS(pfd_p))) {
2026 		aer_ue &= ~PCIE_AER_UCE_TRAINING;
2027 		aer_ue &= ~PCIE_AER_UCE_SD;
2028 	}
2029 
2030 	PCIE_ADV_REG(pfd_p)->pcie_ue_status = aer_ue;
2031 }
2032 
2033 static void
2034 pf_adjust_for_no_saer(pf_data_t *pfd_p)
2035 {
2036 	uint32_t	s_aer_ue = 0;
2037 	uint16_t	status;
2038 
2039 	if (PCIE_HAS_AER(PCIE_PFD2BUS(pfd_p)))
2040 		return;
2041 
2042 	if (PCIE_ERR_REG(pfd_p)->pcie_err_status & PCIE_DEVSTS_FE_DETECTED)
2043 		s_aer_ue = PF_SAER_FATAL_ERR;
2044 
2045 	if (PCIE_ERR_REG(pfd_p)->pcie_err_status & PCIE_DEVSTS_NFE_DETECTED) {
2046 		s_aer_ue = PF_SAER_NON_FATAL_ERR;
2047 		status = PCI_BDG_ERR_REG(pfd_p)->pci_bdg_sec_stat;
2048 
2049 		/* Check if the device received a UC_DATA */
2050 		if (!(status & PCI_STAT_PERROR))
2051 			s_aer_ue &= ~PCIE_AER_SUCE_UC_DATA_ERR;
2052 
2053 		/* Check if the device received a RCVD_MA/MA_ON_SC */
2054 		if (!(status & (PCI_STAT_R_MAST_AB))) {
2055 			s_aer_ue &= ~PCIE_AER_SUCE_RCVD_MA;
2056 			s_aer_ue &= ~PCIE_AER_SUCE_MA_ON_SC;
2057 		}
2058 
2059 		/* Check if the device received a RCVD_TA/TA_ON_SC */
2060 		if (!(status & (PCI_STAT_R_TARG_AB))) {
2061 			s_aer_ue &= ~PCIE_AER_SUCE_RCVD_TA;
2062 			s_aer_ue &= ~PCIE_AER_SUCE_TA_ON_SC;
2063 		}
2064 	}
2065 
2066 	PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_status = s_aer_ue;
2067 }
2068 
2069 /* Find the PCIe-PCI bridge based on secondary bus number */
2070 static pf_data_t *
2071 pf_get_pcie_bridge(pf_data_t *pfd_p, pcie_req_id_t secbus)
2072 {
2073 	pf_data_t *bdg_pfd_p;
2074 
2075 	/* Search down for the PCIe-PCI device. */
2076 	for (bdg_pfd_p = pfd_p->pe_next; bdg_pfd_p;
2077 	    bdg_pfd_p = bdg_pfd_p->pe_next) {
2078 		if (PCIE_IS_PCIE_BDG(PCIE_PFD2BUS(bdg_pfd_p)) &&
2079 		    PCIE_PFD2BUS(bdg_pfd_p)->bus_bdg_secbus == secbus)
2080 			return (bdg_pfd_p);
2081 	}
2082 
2083 	return (NULL);
2084 }
2085 
2086 /* Find the PCIe-PCI bridge of a PCI device */
2087 static pf_data_t *
2088 pf_get_parent_pcie_bridge(pf_data_t *pfd_p)
2089 {
2090 	dev_info_t	*dip, *rp_dip = PCIE_PFD2BUS(pfd_p)->bus_rp_dip;
2091 
2092 	/* This only makes sense if the device is a PCI device */
2093 	if (!PCIE_IS_PCI(PCIE_PFD2BUS(pfd_p)))
2094 		return (NULL);
2095 
2096 	/*
2097 	 * Search up for the PCIe-PCI device.  Watchout for x86 where pci
2098 	 * devices hang directly off of NPE.
2099 	 */
2100 	for (dip = PCIE_PFD2DIP(pfd_p); dip; dip = ddi_get_parent(dip)) {
2101 		if (dip == rp_dip)
2102 			dip = NULL;
2103 
2104 		if (PCIE_IS_PCIE_BDG(PCIE_DIP2BUS(dip)))
2105 			return (PCIE_DIP2PFD(dip));
2106 	}
2107 
2108 	return (NULL);
2109 }
2110 
2111 /*
2112  * See if a leaf error was bubbled up to the Root Complex (RC) and handled.
2113  * As of right now only RC's have enough information to have errors found in the
2114  * fabric to be matched to the RC.  Note that Root Port's (RP) do not carry
2115  * enough information.  Currently known RC's are SPARC Fire architecture and
2116  * it's equivalents, and x86's NPE.
2117  * SPARC Fire architectures have a plethora of error registers, while currently
2118  * NPE only have the address of a failed load.
2119  *
2120  * Check if the RC logged an error with the appropriate status type/abort type.
2121  * Ex: Parity Error, Received Master/Target Abort
2122  * Check if either the fault address found in the rc matches the device's
2123  * assigned address range (PIO's only) or the fault BDF in the rc matches the
2124  * device's BDF or Secondary Bus/Bus Range.
2125  */
2126 static boolean_t
2127 pf_matched_in_rc(pf_data_t *dq_head_p, pf_data_t *pfd_p,
2128     uint32_t abort_type)
2129 {
2130 	pcie_bus_t	*bus_p = PCIE_PFD2BUS(pfd_p);
2131 	pf_data_t	*rc_pfd_p;
2132 	pcie_req_id_t	fault_bdf;
2133 
2134 	for (rc_pfd_p = dq_head_p; PFD_IS_ROOT(rc_pfd_p);
2135 	    rc_pfd_p = rc_pfd_p->pe_next) {
2136 		/* Only root complex's have enough information to match */
2137 		if (!PCIE_IS_RC(PCIE_PFD2BUS(rc_pfd_p)))
2138 			continue;
2139 
2140 		/* If device and rc abort type does not match continue */
2141 		if (!(PCI_BDG_ERR_REG(rc_pfd_p)->pci_bdg_sec_stat & abort_type))
2142 			continue;
2143 
2144 		fault_bdf = PCIE_ROOT_FAULT(rc_pfd_p)->scan_bdf;
2145 
2146 		/* The Fault BDF = Device's BDF */
2147 		if (fault_bdf == bus_p->bus_bdf)
2148 			return (B_TRUE);
2149 
2150 		/* The Fault Addr is in device's address range */
2151 		if (pf_in_addr_range(bus_p,
2152 		    PCIE_ROOT_FAULT(rc_pfd_p)->scan_addr))
2153 			return (B_TRUE);
2154 
2155 		/* The Fault BDF is from PCIe-PCI Bridge's secondary bus */
2156 		if (PCIE_IS_PCIE_BDG(bus_p) &&
2157 		    pf_in_bus_range(bus_p, fault_bdf))
2158 			return (B_TRUE);
2159 	}
2160 
2161 	return (B_FALSE);
2162 }
2163 
2164 /*
2165  * Check the RP and see if the error is PIO/DMA.  If the RP also has a PERR then
2166  * it is a DMA, otherwise it's a PIO
2167  */
2168 static void
2169 pf_pci_find_trans_type(pf_data_t *pfd_p, uint64_t *addr, uint32_t *trans_type,
2170     pcie_req_id_t *bdf)
2171 {
2172 	pf_data_t *rc_pfd_p;
2173 
2174 	/* Could be DMA or PIO.  Find out by look at error type. */
2175 	switch (PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_status) {
2176 	case PCIE_AER_SUCE_TA_ON_SC:
2177 	case PCIE_AER_SUCE_MA_ON_SC:
2178 		*trans_type = PF_ADDR_DMA;
2179 		return;
2180 	case PCIE_AER_SUCE_RCVD_TA:
2181 	case PCIE_AER_SUCE_RCVD_MA:
2182 		*bdf = PCIE_INVALID_BDF;
2183 		*trans_type = PF_ADDR_PIO;
2184 		return;
2185 	case PCIE_AER_SUCE_USC_ERR:
2186 	case PCIE_AER_SUCE_UC_DATA_ERR:
2187 	case PCIE_AER_SUCE_PERR_ASSERT:
2188 		break;
2189 	default:
2190 		*addr = 0;
2191 		*bdf = PCIE_INVALID_BDF;
2192 		*trans_type = 0;
2193 		return;
2194 	}
2195 
2196 	*bdf = PCIE_INVALID_BDF;
2197 	*trans_type = PF_ADDR_PIO;
2198 	for (rc_pfd_p = pfd_p->pe_prev; rc_pfd_p;
2199 	    rc_pfd_p = rc_pfd_p->pe_prev) {
2200 		if (PFD_IS_ROOT(rc_pfd_p) &&
2201 		    (PCI_BDG_ERR_REG(rc_pfd_p)->pci_bdg_sec_stat &
2202 		    PCI_STAT_PERROR)) {
2203 			*trans_type = PF_ADDR_DMA;
2204 			return;
2205 		}
2206 	}
2207 }
2208 
2209 /*
2210  * pf_pci_decode function decodes the secondary aer transaction logs in
2211  * PCIe-PCI bridges.
2212  *
2213  * The log is 128 bits long and arranged in this manner.
2214  * [0:35]   Transaction Attribute	(s_aer_h0-saer_h1)
2215  * [36:39]  Transaction lower command	(saer_h1)
2216  * [40:43]  Transaction upper command	(saer_h1)
2217  * [44:63]  Reserved
2218  * [64:127] Address			(saer_h2-saer_h3)
2219  */
2220 /* ARGSUSED */
2221 int
2222 pf_pci_decode(pf_data_t *pfd_p, uint16_t *cmd)
2223 {
2224 	pcix_attr_t	*attr;
2225 	uint64_t	addr;
2226 	uint32_t	trans_type;
2227 	pcie_req_id_t	bdf = PCIE_INVALID_BDF;
2228 
2229 	attr = (pcix_attr_t *)&PCIE_ADV_BDG_HDR(pfd_p, 0);
2230 	*cmd = GET_SAER_CMD(pfd_p);
2231 
2232 cmd_switch:
2233 	switch (*cmd) {
2234 	case PCI_PCIX_CMD_IORD:
2235 	case PCI_PCIX_CMD_IOWR:
2236 		/* IO Access should always be down stream */
2237 		addr = PCIE_ADV_BDG_HDR(pfd_p, 2);
2238 		bdf = attr->rid;
2239 		trans_type = PF_ADDR_PIO;
2240 		break;
2241 	case PCI_PCIX_CMD_MEMRD_DW:
2242 	case PCI_PCIX_CMD_MEMRD_BL:
2243 	case PCI_PCIX_CMD_MEMRDBL:
2244 	case PCI_PCIX_CMD_MEMWR:
2245 	case PCI_PCIX_CMD_MEMWR_BL:
2246 	case PCI_PCIX_CMD_MEMWRBL:
2247 		addr = ((uint64_t)PCIE_ADV_BDG_HDR(pfd_p, 3) <<
2248 		    PCIE_AER_SUCE_HDR_ADDR_SHIFT) | PCIE_ADV_BDG_HDR(pfd_p, 2);
2249 		bdf = attr->rid;
2250 
2251 		pf_pci_find_trans_type(pfd_p, &addr, &trans_type, &bdf);
2252 		break;
2253 	case PCI_PCIX_CMD_CFRD:
2254 	case PCI_PCIX_CMD_CFWR:
2255 		/*
2256 		 * CFG Access should always be down stream.  Match the BDF in
2257 		 * the address phase.
2258 		 */
2259 		addr = 0;
2260 		bdf = attr->rid;
2261 		trans_type = PF_ADDR_CFG;
2262 		break;
2263 	case PCI_PCIX_CMD_SPL:
2264 		/*
2265 		 * Check for DMA read completions.  The requesting BDF is in the
2266 		 * Address phase.
2267 		 */
2268 		addr = 0;
2269 		bdf = attr->rid;
2270 		trans_type = PF_ADDR_DMA;
2271 		break;
2272 	case PCI_PCIX_CMD_DADR:
2273 		/*
2274 		 * For Dual Address Cycles the transaction command is in the 2nd
2275 		 * address phase.
2276 		 */
2277 		*cmd = (PCIE_ADV_BDG_HDR(pfd_p, 1) >>
2278 		    PCIE_AER_SUCE_HDR_CMD_UP_SHIFT) &
2279 		    PCIE_AER_SUCE_HDR_CMD_UP_MASK;
2280 		if (*cmd != PCI_PCIX_CMD_DADR)
2281 			goto cmd_switch;
2282 		/* FALLTHROUGH */
2283 	default:
2284 		PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_trans = 0;
2285 		PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_bdf = PCIE_INVALID_BDF;
2286 		PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_addr = 0;
2287 		return (DDI_FAILURE);
2288 	}
2289 	PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_trans = trans_type;
2290 	PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_bdf = bdf;
2291 	PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_addr = addr;
2292 	return (DDI_SUCCESS);
2293 }
2294 
2295 /*
2296  * Based on either the BDF/ADDR find and mark the faulting DMA/ACC handler.
2297  * Returns either PF_HDL_NOTFOUND or PF_HDL_FOUND.
2298  */
2299 int
2300 pf_hdl_lookup(dev_info_t *dip, uint64_t ena, uint32_t flag, uint64_t addr,
2301     pcie_req_id_t bdf)
2302 {
2303 	ddi_fm_error_t		derr;
2304 
2305 	/* If we don't know the addr or rid just return with NOTFOUND */
2306 	if ((addr == 0) && !PCIE_CHECK_VALID_BDF(bdf))
2307 		return (PF_HDL_NOTFOUND);
2308 
2309 	/*
2310 	 * Disable DMA handle lookup until DMA errors can be handled and
2311 	 * reported synchronously.  When enabled again, check for the
2312 	 * PF_ADDR_DMA flag
2313 	 */
2314 	if (!(flag & (PF_ADDR_PIO | PF_ADDR_CFG))) {
2315 		return (PF_HDL_NOTFOUND);
2316 	}
2317 
2318 	bzero(&derr, sizeof (ddi_fm_error_t));
2319 	derr.fme_version = DDI_FME_VERSION;
2320 	derr.fme_flag = DDI_FM_ERR_UNEXPECTED;
2321 	derr.fme_ena = ena;
2322 
2323 	return (pf_hdl_child_lookup(dip, &derr, flag, addr, bdf));
2324 }
2325 
2326 static int
2327 pf_hdl_child_lookup(dev_info_t *dip, ddi_fm_error_t *derr, uint32_t flag,
2328     uint64_t addr, pcie_req_id_t bdf)
2329 {
2330 	int			status = PF_HDL_NOTFOUND;
2331 	ndi_fmc_t		*fcp = NULL;
2332 	struct i_ddi_fmhdl	*fmhdl = DEVI(dip)->devi_fmhdl;
2333 	pcie_req_id_t		dip_bdf;
2334 	boolean_t		have_lock = B_FALSE;
2335 	pcie_bus_t		*bus_p;
2336 	dev_info_t		*cdip;
2337 
2338 	if (!(bus_p = pf_is_ready(dip))) {
2339 		return (status);
2340 	}
2341 
2342 	ASSERT(fmhdl);
2343 	if (!i_ddi_fm_handler_owned(dip)) {
2344 		/*
2345 		 * pf_handler_enter always returns SUCCESS if the 'impl' arg is
2346 		 * NULL.
2347 		 */
2348 		(void) pf_handler_enter(dip, NULL);
2349 		have_lock = B_TRUE;
2350 	}
2351 
2352 	dip_bdf = PCI_GET_BDF(dip);
2353 
2354 	/* Check if dip and BDF match, if not recurse to it's children. */
2355 	if (!PCIE_IS_RC(bus_p) && (!PCIE_CHECK_VALID_BDF(bdf) ||
2356 	    dip_bdf == bdf)) {
2357 		if ((flag & PF_ADDR_DMA) && DDI_FM_DMA_ERR_CAP(fmhdl->fh_cap))
2358 			fcp = fmhdl->fh_dma_cache;
2359 		else
2360 			fcp = NULL;
2361 
2362 		if (fcp)
2363 			status = pf_hdl_compare(dip, derr, DMA_HANDLE, addr,
2364 			    bdf, fcp);
2365 
2366 
2367 		if (((flag & PF_ADDR_PIO) || (flag & PF_ADDR_CFG)) &&
2368 		    DDI_FM_ACC_ERR_CAP(fmhdl->fh_cap))
2369 			fcp = fmhdl->fh_acc_cache;
2370 		else
2371 			fcp = NULL;
2372 
2373 		if (fcp)
2374 			status = pf_hdl_compare(dip, derr, ACC_HANDLE, addr,
2375 			    bdf, fcp);
2376 	}
2377 
2378 	/* If we found the handler or know it's this device, we're done */
2379 	if (!PCIE_IS_RC(bus_p) && ((dip_bdf == bdf) ||
2380 	    (status == PF_HDL_FOUND)))
2381 		goto done;
2382 
2383 	/*
2384 	 * If the current devuce us a PCIe-PCI bridge need to check for special
2385 	 * cases:
2386 	 *
2387 	 * If it is a PIO and we don't have an address or this is a DMA, check
2388 	 * to see if the BDF = secondary bus.  If so stop.  The BDF isn't a real
2389 	 * BDF and the fault device could have come from any device in the PCI
2390 	 * bus.
2391 	 */
2392 	if (PCIE_IS_PCIE_BDG(bus_p) &&
2393 	    ((flag & PF_ADDR_DMA || flag & PF_ADDR_PIO)) &&
2394 	    ((bus_p->bus_bdg_secbus << PCIE_REQ_ID_BUS_SHIFT) == bdf))
2395 		goto done;
2396 
2397 
2398 	/* If we can't find the handler check it's children */
2399 	for (cdip = ddi_get_child(dip); cdip;
2400 	    cdip = ddi_get_next_sibling(cdip)) {
2401 		if ((bus_p = PCIE_DIP2BUS(cdip)) == NULL)
2402 			continue;
2403 
2404 		if (pf_in_bus_range(bus_p, bdf) ||
2405 		    pf_in_addr_range(bus_p, addr))
2406 			status = pf_hdl_child_lookup(cdip, derr, flag, addr,
2407 			    bdf);
2408 
2409 		if (status == PF_HDL_FOUND)
2410 			goto done;
2411 	}
2412 
2413 done:
2414 	if (have_lock == B_TRUE)
2415 		pf_handler_exit(dip);
2416 
2417 	return (status);
2418 }
2419 
2420 static int
2421 pf_hdl_compare(dev_info_t *dip, ddi_fm_error_t *derr, uint32_t flag,
2422     uint64_t addr, pcie_req_id_t bdf, ndi_fmc_t *fcp)
2423 {
2424 	ndi_fmcentry_t	*fep;
2425 	int		found = 0;
2426 	int		status;
2427 
2428 	mutex_enter(&fcp->fc_lock);
2429 	for (fep = fcp->fc_head; fep != NULL; fep = fep->fce_next) {
2430 		ddi_fmcompare_t compare_func;
2431 
2432 		/*
2433 		 * Compare captured error state with handle
2434 		 * resources.  During the comparison and
2435 		 * subsequent error handling, we block
2436 		 * attempts to free the cache entry.
2437 		 */
2438 		if (flag == ACC_HANDLE) {
2439 			compare_func =
2440 			    i_ddi_fm_acc_err_cf_get((ddi_acc_handle_t)
2441 			    fep->fce_resource);
2442 		} else {
2443 			compare_func =
2444 			    i_ddi_fm_dma_err_cf_get((ddi_dma_handle_t)
2445 			    fep->fce_resource);
2446 		}
2447 
2448 		if (compare_func == NULL) /* unbound or not FLAGERR */
2449 			continue;
2450 
2451 		status = compare_func(dip, fep->fce_resource,
2452 		    (void *)&addr, (void *)&bdf);
2453 
2454 		if (status == DDI_FM_NONFATAL) {
2455 			found++;
2456 
2457 			/* Set the error for this resource handle */
2458 			if (flag == ACC_HANDLE) {
2459 				ddi_acc_handle_t ap = fep->fce_resource;
2460 
2461 				i_ddi_fm_acc_err_set(ap, derr->fme_ena, status,
2462 				    DDI_FM_ERR_UNEXPECTED);
2463 				ddi_fm_acc_err_get(ap, derr, DDI_FME_VERSION);
2464 				derr->fme_acc_handle = ap;
2465 			} else {
2466 				ddi_dma_handle_t dp = fep->fce_resource;
2467 
2468 				i_ddi_fm_dma_err_set(dp, derr->fme_ena, status,
2469 				    DDI_FM_ERR_UNEXPECTED);
2470 				ddi_fm_dma_err_get(dp, derr, DDI_FME_VERSION);
2471 				derr->fme_dma_handle = dp;
2472 			}
2473 		}
2474 	}
2475 	mutex_exit(&fcp->fc_lock);
2476 
2477 	/*
2478 	 * If a handler isn't found and we know this is the right device mark
2479 	 * them all failed.
2480 	 */
2481 	if ((addr != 0) && PCIE_CHECK_VALID_BDF(bdf) && (found == 0)) {
2482 		status = pf_hdl_compare(dip, derr, flag, addr, bdf, fcp);
2483 		if (status == PF_HDL_FOUND)
2484 			found++;
2485 	}
2486 
2487 	return ((found) ? PF_HDL_FOUND : PF_HDL_NOTFOUND);
2488 }
2489 
2490 /*
2491  * Automatically decode AER header logs and does a handling look up based on the
2492  * AER header decoding.
2493  *
2494  * For this function only the Primary/Secondary AER Header Logs need to be valid
2495  * in the pfd (PCIe Fault Data) arg.
2496  *
2497  * Returns either PF_HDL_NOTFOUND or PF_HDL_FOUND.
2498  */
2499 /* ARGSUSED */
2500 static int
2501 pf_log_hdl_lookup(dev_info_t *rpdip, ddi_fm_error_t *derr, pf_data_t *pfd_p,
2502     boolean_t is_primary)
2503 {
2504 	/*
2505 	 * Disabling this function temporarily until errors can be handled
2506 	 * synchronously.
2507 	 *
2508 	 * This function is currently only called during the middle of a fabric
2509 	 * scan.  If the fabric scan is called synchronously with an error seen
2510 	 * in the RP/RC, then the related errors in the fabric will have a
2511 	 * PF_ERR_MATCHED_RC error severity.  pf_log_hdl_lookup code will be by
2512 	 * passed when the severity is PF_ERR_MATCHED_RC.  Handle lookup would
2513 	 * have already happened in RP/RC error handling in a synchronous
2514 	 * manner.  Errors unrelated should panic, because they are being
2515 	 * handled asynchronously.
2516 	 *
2517 	 * If fabric scan is called asynchronously from any RP/RC error, then
2518 	 * DMA/PIO UE errors seen in the fabric should panic.  pf_lop_hdl_lookup
2519 	 * will return PF_HDL_NOTFOUND to ensure that the system panics.
2520 	 */
2521 	return (PF_HDL_NOTFOUND);
2522 }
2523 
2524 /*
2525  * Decodes the TLP and returns the BDF of the handler, address and transaction
2526  * type if known.
2527  *
2528  * Types of TLP logs seen in RC, and what to extract:
2529  *
2530  * Memory(DMA) - Requester BDF, address, PF_DMA_ADDR
2531  * Memory(PIO) - address, PF_PIO_ADDR
2532  * CFG - Should not occur and result in UR
2533  * Completion(DMA) - Requester BDF, PF_DMA_ADDR
2534  * Completion(PIO) - Requester BDF, PF_PIO_ADDR
2535  *
2536  * Types of TLP logs seen in SW/Leaf, and what to extract:
2537  *
2538  * Memory(DMA) - Requester BDF, address, PF_DMA_ADDR
2539  * Memory(PIO) - address, PF_PIO_ADDR
2540  * CFG - Destined BDF, address, PF_CFG_ADDR
2541  * Completion(DMA) - Requester BDF, PF_DMA_ADDR
2542  * Completion(PIO) - Requester BDF, PF_PIO_ADDR
2543  *
2544  * The adv_reg_p must be passed in separately for use with SPARC RPs.  A
2545  * SPARC RP could have multiple AER header logs which cannot be directly
2546  * accessed via the bus_p.
2547  */
2548 int
2549 pf_tlp_decode(pcie_bus_t *bus_p, pf_pcie_adv_err_regs_t *adv_reg_p)
2550 {
2551 	pcie_tlp_hdr_t	*tlp_hdr = (pcie_tlp_hdr_t *)adv_reg_p->pcie_ue_hdr;
2552 	pcie_req_id_t	my_bdf, tlp_bdf, flt_bdf = PCIE_INVALID_BDF;
2553 	uint64_t	flt_addr = 0;
2554 	uint32_t	flt_trans_type = 0;
2555 
2556 	adv_reg_p->pcie_ue_tgt_addr = 0;
2557 	adv_reg_p->pcie_ue_tgt_bdf = PCIE_INVALID_BDF;
2558 	adv_reg_p->pcie_ue_tgt_trans = 0;
2559 
2560 	my_bdf = bus_p->bus_bdf;
2561 	switch (tlp_hdr->type) {
2562 	case PCIE_TLP_TYPE_IO:
2563 	case PCIE_TLP_TYPE_MEM:
2564 	case PCIE_TLP_TYPE_MEMLK:
2565 		/* Grab the 32/64bit fault address */
2566 		if (tlp_hdr->fmt & 0x1) {
2567 			flt_addr = ((uint64_t)adv_reg_p->pcie_ue_hdr[2] << 32);
2568 			flt_addr |= adv_reg_p->pcie_ue_hdr[3];
2569 		} else {
2570 			flt_addr = adv_reg_p->pcie_ue_hdr[2];
2571 		}
2572 
2573 		tlp_bdf = (pcie_req_id_t)(adv_reg_p->pcie_ue_hdr[1] >> 16);
2574 
2575 		/*
2576 		 * If the req bdf >= this.bdf, then it means the request is this
2577 		 * device or came from a device below it.  Unless this device is
2578 		 * a PCIe root port then it means is a DMA, otherwise PIO.
2579 		 */
2580 		if ((tlp_bdf >= my_bdf) && !PCIE_IS_ROOT(bus_p)) {
2581 			flt_trans_type = PF_ADDR_DMA;
2582 			flt_bdf = tlp_bdf;
2583 		} else if (PCIE_IS_ROOT(bus_p) &&
2584 		    (PF_FIRST_AER_ERR(PCIE_AER_UCE_PTLP, adv_reg_p) ||
2585 		    (PF_FIRST_AER_ERR(PCIE_AER_UCE_CA, adv_reg_p)))) {
2586 			flt_trans_type = PF_ADDR_DMA;
2587 			flt_bdf = tlp_bdf;
2588 		} else {
2589 			flt_trans_type = PF_ADDR_PIO;
2590 			flt_bdf = PCIE_INVALID_BDF;
2591 		}
2592 		break;
2593 	case PCIE_TLP_TYPE_CFG0:
2594 	case PCIE_TLP_TYPE_CFG1:
2595 		flt_addr = 0;
2596 		flt_bdf = (pcie_req_id_t)(adv_reg_p->pcie_ue_hdr[2] >> 16);
2597 		flt_trans_type = PF_ADDR_CFG;
2598 		break;
2599 	case PCIE_TLP_TYPE_CPL:
2600 	case PCIE_TLP_TYPE_CPLLK:
2601 	{
2602 		pcie_cpl_t *cpl_tlp = (pcie_cpl_t *)&adv_reg_p->pcie_ue_hdr[1];
2603 
2604 		flt_addr = 0;
2605 		flt_bdf = (cpl_tlp->rid > cpl_tlp->cid) ? cpl_tlp->rid :
2606 		    cpl_tlp->cid;
2607 
2608 		/*
2609 		 * If the cpl bdf < this.bdf, then it means the request is this
2610 		 * device or came from a device below it.  Unless this device is
2611 		 * a PCIe root port then it means is a DMA, otherwise PIO.
2612 		 */
2613 		if (cpl_tlp->rid > cpl_tlp->cid) {
2614 			flt_trans_type = PF_ADDR_DMA;
2615 		} else {
2616 			flt_trans_type = PF_ADDR_PIO | PF_ADDR_CFG;
2617 		}
2618 		break;
2619 	}
2620 	default:
2621 		return (DDI_FAILURE);
2622 	}
2623 
2624 	adv_reg_p->pcie_ue_tgt_addr = flt_addr;
2625 	adv_reg_p->pcie_ue_tgt_bdf = flt_bdf;
2626 	adv_reg_p->pcie_ue_tgt_trans = flt_trans_type;
2627 
2628 	return (DDI_SUCCESS);
2629 }
2630 
2631 #define	PCIE_EREPORT	DDI_IO_CLASS "." PCI_ERROR_SUBCLASS "." PCIEX_FABRIC
2632 static int
2633 pf_ereport_setup(dev_info_t *dip, uint64_t ena, nvlist_t **ereport,
2634     nvlist_t **detector, errorq_elem_t **eqep)
2635 {
2636 	struct i_ddi_fmhdl *fmhdl = DEVI(dip)->devi_fmhdl;
2637 	char device_path[MAXPATHLEN];
2638 	nv_alloc_t *nva;
2639 
2640 	*eqep = errorq_reserve(fmhdl->fh_errorq);
2641 	if (*eqep == NULL) {
2642 		atomic_inc_64(&fmhdl->fh_kstat.fek_erpt_dropped.value.ui64);
2643 		return (DDI_FAILURE);
2644 	}
2645 
2646 	*ereport = errorq_elem_nvl(fmhdl->fh_errorq, *eqep);
2647 	nva = errorq_elem_nva(fmhdl->fh_errorq, *eqep);
2648 
2649 	ASSERT(*ereport);
2650 	ASSERT(nva);
2651 
2652 	/*
2653 	 * Use the dev_path/devid for this device instance.
2654 	 */
2655 	*detector = fm_nvlist_create(nva);
2656 	if (dip == ddi_root_node()) {
2657 		device_path[0] = '/';
2658 		device_path[1] = '\0';
2659 	} else {
2660 		(void) ddi_pathname(dip, device_path);
2661 	}
2662 
2663 	fm_fmri_dev_set(*detector, FM_DEV_SCHEME_VERSION, NULL,
2664 	    device_path, NULL, NULL);
2665 
2666 	if (ena == 0)
2667 		ena = fm_ena_generate(0, FM_ENA_FMT1);
2668 
2669 	fm_ereport_set(*ereport, 0, PCIE_EREPORT, ena, *detector, NULL);
2670 
2671 	return (DDI_SUCCESS);
2672 }
2673 
2674 /* ARGSUSED */
2675 static void
2676 pf_ereport_post(dev_info_t *dip, nvlist_t **ereport, nvlist_t **detector,
2677     errorq_elem_t **eqep)
2678 {
2679 	struct i_ddi_fmhdl *fmhdl = DEVI(dip)->devi_fmhdl;
2680 
2681 	errorq_commit(fmhdl->fh_errorq, *eqep, ERRORQ_ASYNC);
2682 }
2683 
2684 static void
2685 pf_send_ereport(ddi_fm_error_t *derr, pf_impl_t *impl)
2686 {
2687 	nvlist_t	*ereport;
2688 	nvlist_t	*detector;
2689 	errorq_elem_t	*eqep;
2690 	pcie_bus_t	*bus_p;
2691 	pf_data_t	*pfd_p;
2692 	uint32_t	total = impl->pf_total;
2693 
2694 	/*
2695 	 * Ereports need to be sent in a top down fashion. The fabric translator
2696 	 * expects the ereports from the Root first. This is needed to tell if
2697 	 * the system contains a PCIe complaint RC/RP.
2698 	 */
2699 	for (pfd_p = impl->pf_dq_head_p; pfd_p; pfd_p = pfd_p->pe_next) {
2700 		bus_p = PCIE_PFD2BUS(pfd_p);
2701 		pfd_p->pe_valid = B_FALSE;
2702 
2703 		if (derr->fme_flag != DDI_FM_ERR_UNEXPECTED ||
2704 		    !DDI_FM_EREPORT_CAP(ddi_fm_capable(PCIE_PFD2DIP(pfd_p))))
2705 			continue;
2706 
2707 		if (pf_ereport_setup(PCIE_BUS2DIP(bus_p), derr->fme_ena,
2708 		    &ereport, &detector, &eqep) != DDI_SUCCESS)
2709 			continue;
2710 
2711 		if (PFD_IS_RC(pfd_p)) {
2712 			fm_payload_set(ereport,
2713 			    "scan_bdf", DATA_TYPE_UINT16,
2714 			    PCIE_ROOT_FAULT(pfd_p)->scan_bdf,
2715 			    "scan_addr", DATA_TYPE_UINT64,
2716 			    PCIE_ROOT_FAULT(pfd_p)->scan_addr,
2717 			    "intr_src", DATA_TYPE_UINT16,
2718 			    PCIE_ROOT_EH_SRC(pfd_p)->intr_type,
2719 			    NULL);
2720 			goto generic;
2721 		}
2722 
2723 		/* Generic PCI device information */
2724 		fm_payload_set(ereport,
2725 		    "bdf", DATA_TYPE_UINT16, bus_p->bus_bdf,
2726 		    "device_id", DATA_TYPE_UINT16,
2727 		    (bus_p->bus_dev_ven_id >> 16),
2728 		    "vendor_id", DATA_TYPE_UINT16,
2729 		    (bus_p->bus_dev_ven_id & 0xFFFF),
2730 		    "rev_id", DATA_TYPE_UINT8, bus_p->bus_rev_id,
2731 		    "dev_type", DATA_TYPE_UINT16, bus_p->bus_dev_type,
2732 		    "pcie_off", DATA_TYPE_UINT16, bus_p->bus_pcie_off,
2733 		    "pcix_off", DATA_TYPE_UINT16, bus_p->bus_pcix_off,
2734 		    "aer_off", DATA_TYPE_UINT16, bus_p->bus_aer_off,
2735 		    "ecc_ver", DATA_TYPE_UINT16, bus_p->bus_ecc_ver,
2736 		    NULL);
2737 
2738 		/* PCI registers */
2739 		fm_payload_set(ereport,
2740 		    "pci_status", DATA_TYPE_UINT16,
2741 		    PCI_ERR_REG(pfd_p)->pci_err_status,
2742 		    "pci_command", DATA_TYPE_UINT16,
2743 		    PCI_ERR_REG(pfd_p)->pci_cfg_comm,
2744 		    NULL);
2745 
2746 		/* PCI bridge registers */
2747 		if (PCIE_IS_BDG(bus_p)) {
2748 			fm_payload_set(ereport,
2749 			    "pci_bdg_sec_status", DATA_TYPE_UINT16,
2750 			    PCI_BDG_ERR_REG(pfd_p)->pci_bdg_sec_stat,
2751 			    "pci_bdg_ctrl", DATA_TYPE_UINT16,
2752 			    PCI_BDG_ERR_REG(pfd_p)->pci_bdg_ctrl,
2753 			    NULL);
2754 		}
2755 
2756 		/* PCIx registers */
2757 		if (PCIE_IS_PCIX(bus_p) && !PCIE_IS_BDG(bus_p)) {
2758 			fm_payload_set(ereport,
2759 			    "pcix_status", DATA_TYPE_UINT32,
2760 			    PCIX_ERR_REG(pfd_p)->pcix_status,
2761 			    "pcix_command", DATA_TYPE_UINT16,
2762 			    PCIX_ERR_REG(pfd_p)->pcix_command,
2763 			    NULL);
2764 		}
2765 
2766 		/* PCIx ECC Registers */
2767 		if (PCIX_ECC_VERSION_CHECK(bus_p)) {
2768 			pf_pcix_ecc_regs_t *ecc_bdg_reg;
2769 			pf_pcix_ecc_regs_t *ecc_reg;
2770 
2771 			if (PCIE_IS_BDG(bus_p))
2772 				ecc_bdg_reg = PCIX_BDG_ECC_REG(pfd_p, 0);
2773 			ecc_reg = PCIX_ECC_REG(pfd_p);
2774 			fm_payload_set(ereport,
2775 			    "pcix_ecc_control_0", DATA_TYPE_UINT16,
2776 			    PCIE_IS_BDG(bus_p) ?
2777 			    (ecc_bdg_reg->pcix_ecc_ctlstat >> 16) :
2778 			    (ecc_reg->pcix_ecc_ctlstat >> 16),
2779 			    "pcix_ecc_status_0", DATA_TYPE_UINT16,
2780 			    PCIE_IS_BDG(bus_p) ?
2781 			    (ecc_bdg_reg->pcix_ecc_ctlstat & 0xFFFF) :
2782 			    (ecc_reg->pcix_ecc_ctlstat & 0xFFFF),
2783 			    "pcix_ecc_fst_addr_0", DATA_TYPE_UINT32,
2784 			    PCIE_IS_BDG(bus_p) ?
2785 			    ecc_bdg_reg->pcix_ecc_fstaddr :
2786 			    ecc_reg->pcix_ecc_fstaddr,
2787 			    "pcix_ecc_sec_addr_0", DATA_TYPE_UINT32,
2788 			    PCIE_IS_BDG(bus_p) ?
2789 			    ecc_bdg_reg->pcix_ecc_secaddr :
2790 			    ecc_reg->pcix_ecc_secaddr,
2791 			    "pcix_ecc_attr_0", DATA_TYPE_UINT32,
2792 			    PCIE_IS_BDG(bus_p) ?
2793 			    ecc_bdg_reg->pcix_ecc_attr :
2794 			    ecc_reg->pcix_ecc_attr,
2795 			    NULL);
2796 		}
2797 
2798 		/* PCIx ECC Bridge Registers */
2799 		if (PCIX_ECC_VERSION_CHECK(bus_p) && PCIE_IS_BDG(bus_p)) {
2800 			pf_pcix_ecc_regs_t *ecc_bdg_reg;
2801 
2802 			ecc_bdg_reg = PCIX_BDG_ECC_REG(pfd_p, 1);
2803 			fm_payload_set(ereport,
2804 			    "pcix_ecc_control_1", DATA_TYPE_UINT16,
2805 			    (ecc_bdg_reg->pcix_ecc_ctlstat >> 16),
2806 			    "pcix_ecc_status_1", DATA_TYPE_UINT16,
2807 			    (ecc_bdg_reg->pcix_ecc_ctlstat & 0xFFFF),
2808 			    "pcix_ecc_fst_addr_1", DATA_TYPE_UINT32,
2809 			    ecc_bdg_reg->pcix_ecc_fstaddr,
2810 			    "pcix_ecc_sec_addr_1", DATA_TYPE_UINT32,
2811 			    ecc_bdg_reg->pcix_ecc_secaddr,
2812 			    "pcix_ecc_attr_1", DATA_TYPE_UINT32,
2813 			    ecc_bdg_reg->pcix_ecc_attr,
2814 			    NULL);
2815 		}
2816 
2817 		/* PCIx Bridge */
2818 		if (PCIE_IS_PCIX(bus_p) && PCIE_IS_BDG(bus_p)) {
2819 			fm_payload_set(ereport,
2820 			    "pcix_bdg_status", DATA_TYPE_UINT32,
2821 			    PCIX_BDG_ERR_REG(pfd_p)->pcix_bdg_stat,
2822 			    "pcix_bdg_sec_status", DATA_TYPE_UINT16,
2823 			    PCIX_BDG_ERR_REG(pfd_p)->pcix_bdg_sec_stat,
2824 			    NULL);
2825 		}
2826 
2827 		/* PCIe registers */
2828 		if (PCIE_IS_PCIE(bus_p)) {
2829 			fm_payload_set(ereport,
2830 			    "pcie_status", DATA_TYPE_UINT16,
2831 			    PCIE_ERR_REG(pfd_p)->pcie_err_status,
2832 			    "pcie_command", DATA_TYPE_UINT16,
2833 			    PCIE_ERR_REG(pfd_p)->pcie_err_ctl,
2834 			    "pcie_dev_cap", DATA_TYPE_UINT32,
2835 			    PCIE_ERR_REG(pfd_p)->pcie_dev_cap,
2836 			    NULL);
2837 		}
2838 
2839 		/* PCIe AER registers */
2840 		if (PCIE_HAS_AER(bus_p)) {
2841 			fm_payload_set(ereport,
2842 			    "pcie_adv_ctl", DATA_TYPE_UINT32,
2843 			    PCIE_ADV_REG(pfd_p)->pcie_adv_ctl,
2844 			    "pcie_ue_status", DATA_TYPE_UINT32,
2845 			    PCIE_ADV_REG(pfd_p)->pcie_ue_status,
2846 			    "pcie_ue_mask", DATA_TYPE_UINT32,
2847 			    PCIE_ADV_REG(pfd_p)->pcie_ue_mask,
2848 			    "pcie_ue_sev", DATA_TYPE_UINT32,
2849 			    PCIE_ADV_REG(pfd_p)->pcie_ue_sev,
2850 			    "pcie_ue_hdr0", DATA_TYPE_UINT32,
2851 			    PCIE_ADV_REG(pfd_p)->pcie_ue_hdr[0],
2852 			    "pcie_ue_hdr1", DATA_TYPE_UINT32,
2853 			    PCIE_ADV_REG(pfd_p)->pcie_ue_hdr[1],
2854 			    "pcie_ue_hdr2", DATA_TYPE_UINT32,
2855 			    PCIE_ADV_REG(pfd_p)->pcie_ue_hdr[2],
2856 			    "pcie_ue_hdr3", DATA_TYPE_UINT32,
2857 			    PCIE_ADV_REG(pfd_p)->pcie_ue_hdr[3],
2858 			    "pcie_ce_status", DATA_TYPE_UINT32,
2859 			    PCIE_ADV_REG(pfd_p)->pcie_ce_status,
2860 			    "pcie_ce_mask", DATA_TYPE_UINT32,
2861 			    PCIE_ADV_REG(pfd_p)->pcie_ce_mask,
2862 			    NULL);
2863 		}
2864 
2865 		/* PCIe AER decoded header */
2866 		if (HAS_AER_LOGS(pfd_p, PCIE_ADV_REG(pfd_p)->pcie_ue_status)) {
2867 			fm_payload_set(ereport,
2868 			    "pcie_ue_tgt_trans", DATA_TYPE_UINT32,
2869 			    PCIE_ADV_REG(pfd_p)->pcie_ue_tgt_trans,
2870 			    "pcie_ue_tgt_addr", DATA_TYPE_UINT64,
2871 			    PCIE_ADV_REG(pfd_p)->pcie_ue_tgt_addr,
2872 			    "pcie_ue_tgt_bdf", DATA_TYPE_UINT16,
2873 			    PCIE_ADV_REG(pfd_p)->pcie_ue_tgt_bdf,
2874 			    NULL);
2875 			/* Clear these values as they no longer valid */
2876 			PCIE_ADV_REG(pfd_p)->pcie_ue_tgt_trans = 0;
2877 			PCIE_ADV_REG(pfd_p)->pcie_ue_tgt_addr = 0;
2878 			PCIE_ADV_REG(pfd_p)->pcie_ue_tgt_bdf = PCIE_INVALID_BDF;
2879 		}
2880 
2881 		/* PCIe BDG AER registers */
2882 		if (PCIE_IS_PCIE_BDG(bus_p) && PCIE_HAS_AER(bus_p)) {
2883 			fm_payload_set(ereport,
2884 			    "pcie_sue_adv_ctl", DATA_TYPE_UINT32,
2885 			    PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_ctl,
2886 			    "pcie_sue_status", DATA_TYPE_UINT32,
2887 			    PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_status,
2888 			    "pcie_sue_mask", DATA_TYPE_UINT32,
2889 			    PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_mask,
2890 			    "pcie_sue_sev", DATA_TYPE_UINT32,
2891 			    PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_sev,
2892 			    "pcie_sue_hdr0", DATA_TYPE_UINT32,
2893 			    PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_hdr[0],
2894 			    "pcie_sue_hdr1", DATA_TYPE_UINT32,
2895 			    PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_hdr[1],
2896 			    "pcie_sue_hdr2", DATA_TYPE_UINT32,
2897 			    PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_hdr[2],
2898 			    "pcie_sue_hdr3", DATA_TYPE_UINT32,
2899 			    PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_hdr[3],
2900 			    NULL);
2901 		}
2902 
2903 		/* PCIe BDG AER decoded header */
2904 		if (PCIE_IS_PCIE_BDG(bus_p) && HAS_SAER_LOGS(pfd_p,
2905 		    PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_status)) {
2906 			fm_payload_set(ereport,
2907 			    "pcie_sue_tgt_trans", DATA_TYPE_UINT32,
2908 			    PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_trans,
2909 			    "pcie_sue_tgt_addr", DATA_TYPE_UINT64,
2910 			    PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_addr,
2911 			    "pcie_sue_tgt_bdf", DATA_TYPE_UINT16,
2912 			    PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_bdf,
2913 			    NULL);
2914 			/* Clear these values as they no longer valid */
2915 			PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_trans = 0;
2916 			PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_addr = 0;
2917 			PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_bdf =
2918 			    PCIE_INVALID_BDF;
2919 		}
2920 
2921 		/* PCIe RP registers */
2922 		if (PCIE_IS_RP(bus_p)) {
2923 			fm_payload_set(ereport,
2924 			    "pcie_rp_status", DATA_TYPE_UINT32,
2925 			    PCIE_RP_REG(pfd_p)->pcie_rp_status,
2926 			    "pcie_rp_control", DATA_TYPE_UINT16,
2927 			    PCIE_RP_REG(pfd_p)->pcie_rp_ctl,
2928 			    NULL);
2929 		}
2930 
2931 		/* PCIe RP AER registers */
2932 		if (PCIE_IS_RP(bus_p) && PCIE_HAS_AER(bus_p)) {
2933 			fm_payload_set(ereport,
2934 			    "pcie_adv_rp_status", DATA_TYPE_UINT32,
2935 			    PCIE_ADV_RP_REG(pfd_p)->pcie_rp_err_status,
2936 			    "pcie_adv_rp_command", DATA_TYPE_UINT32,
2937 			    PCIE_ADV_RP_REG(pfd_p)->pcie_rp_err_cmd,
2938 			    "pcie_adv_rp_ce_src_id", DATA_TYPE_UINT16,
2939 			    PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ce_src_id,
2940 			    "pcie_adv_rp_ue_src_id", DATA_TYPE_UINT16,
2941 			    PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ue_src_id,
2942 			    NULL);
2943 		}
2944 
2945 generic:
2946 		/* IOV related information */
2947 		if (!PCIE_BDG_IS_UNASSIGNED(PCIE_PFD2BUS(impl->pf_dq_head_p))) {
2948 			fm_payload_set(ereport,
2949 			    "pcie_aff_flags", DATA_TYPE_UINT16,
2950 			    PFD_AFFECTED_DEV(pfd_p)->pe_affected_flags,
2951 			    "pcie_aff_bdf", DATA_TYPE_UINT16,
2952 			    PFD_AFFECTED_DEV(pfd_p)->pe_affected_bdf,
2953 			    "orig_sev", DATA_TYPE_UINT32,
2954 			    pfd_p->pe_orig_severity_flags,
2955 			    NULL);
2956 		}
2957 
2958 		/* Misc ereport information */
2959 		fm_payload_set(ereport,
2960 		    "remainder", DATA_TYPE_UINT32, --total,
2961 		    "severity", DATA_TYPE_UINT32, pfd_p->pe_severity_flags,
2962 		    NULL);
2963 
2964 		pf_ereport_post(PCIE_BUS2DIP(bus_p), &ereport, &detector,
2965 		    &eqep);
2966 	}
2967 
2968 	/* Unlock all the devices in the queue */
2969 	for (pfd_p = impl->pf_dq_tail_p; pfd_p; pfd_p = pfd_p->pe_prev) {
2970 		if (pfd_p->pe_lock) {
2971 			pf_handler_exit(PCIE_PFD2DIP(pfd_p));
2972 		}
2973 	}
2974 }
2975 
2976 /*
2977  * pf_handler_enter must be called to serial access to each device's pf_data_t.
2978  * Once error handling is finished with the device call pf_handler_exit to allow
2979  * other threads to access it.  The same thread may call pf_handler_enter
2980  * several times without any consequences.
2981  *
2982  * The "impl" variable is passed in during scan fabric to double check that
2983  * there is not a recursive algorithm and to ensure only one thread is doing a
2984  * fabric scan at all times.
2985  *
2986  * In some cases "impl" is not available, such as "child lookup" being called
2987  * from outside of scan fabric, just pass in NULL for this variable and this
2988  * extra check will be skipped.
2989  */
2990 static int
2991 pf_handler_enter(dev_info_t *dip, pf_impl_t *impl)
2992 {
2993 	pf_data_t *pfd_p = PCIE_DIP2PFD(dip);
2994 
2995 	ASSERT(pfd_p);
2996 
2997 	/*
2998 	 * Check to see if the lock has already been taken by this
2999 	 * thread.  If so just return and don't take lock again.
3000 	 */
3001 	if (!pfd_p->pe_lock || !impl) {
3002 		i_ddi_fm_handler_enter(dip);
3003 		pfd_p->pe_lock = B_TRUE;
3004 		return (PF_SCAN_SUCCESS);
3005 	}
3006 
3007 	/* Check to see that this dip is already in the "impl" error queue */
3008 	for (pfd_p = impl->pf_dq_head_p; pfd_p; pfd_p = pfd_p->pe_next) {
3009 		if (PCIE_PFD2DIP(pfd_p) == dip) {
3010 			return (PF_SCAN_SUCCESS);
3011 		}
3012 	}
3013 
3014 	return (PF_SCAN_DEADLOCK);
3015 }
3016 
3017 static void
3018 pf_handler_exit(dev_info_t *dip)
3019 {
3020 	pf_data_t *pfd_p = PCIE_DIP2PFD(dip);
3021 
3022 	ASSERT(pfd_p);
3023 
3024 	ASSERT(pfd_p->pe_lock == B_TRUE);
3025 	i_ddi_fm_handler_exit(dip);
3026 	pfd_p->pe_lock = B_FALSE;
3027 }
3028 
3029 /*
3030  * This function calls the driver's callback function (if it's FMA hardened
3031  * and callback capable). This function relies on the current thread already
3032  * owning the driver's fmhdl lock.
3033  */
3034 static int
3035 pf_fm_callback(dev_info_t *dip, ddi_fm_error_t *derr)
3036 {
3037 	int cb_sts = DDI_FM_OK;
3038 
3039 	if (DDI_FM_ERRCB_CAP(ddi_fm_capable(dip))) {
3040 		dev_info_t *pdip = ddi_get_parent(dip);
3041 		struct i_ddi_fmhdl *hdl = DEVI(pdip)->devi_fmhdl;
3042 		struct i_ddi_fmtgt *tgt = hdl->fh_tgts;
3043 		struct i_ddi_errhdl *errhdl;
3044 		while (tgt != NULL) {
3045 			if (dip == tgt->ft_dip) {
3046 				errhdl = tgt->ft_errhdl;
3047 				cb_sts = errhdl->eh_func(dip, derr,
3048 				    errhdl->eh_impl);
3049 				break;
3050 			}
3051 			tgt = tgt->ft_next;
3052 		}
3053 	}
3054 	return (cb_sts);
3055 }
3056 
3057 static void
3058 pf_reset_pfd(pf_data_t *pfd_p)
3059 {
3060 	pcie_bus_t	*bus_p = PCIE_PFD2BUS(pfd_p);
3061 
3062 	pfd_p->pe_severity_flags = 0;
3063 	pfd_p->pe_orig_severity_flags = 0;
3064 	/* pe_lock and pe_valid were reset in pf_send_ereport */
3065 
3066 	PFD_AFFECTED_DEV(pfd_p)->pe_affected_flags = 0;
3067 	PFD_AFFECTED_DEV(pfd_p)->pe_affected_bdf = PCIE_INVALID_BDF;
3068 
3069 	if (PCIE_IS_ROOT(bus_p)) {
3070 		PCIE_ROOT_FAULT(pfd_p)->scan_bdf = PCIE_INVALID_BDF;
3071 		PCIE_ROOT_FAULT(pfd_p)->scan_addr = 0;
3072 		PCIE_ROOT_FAULT(pfd_p)->full_scan = B_FALSE;
3073 		PCIE_ROOT_EH_SRC(pfd_p)->intr_type = PF_INTR_TYPE_NONE;
3074 		PCIE_ROOT_EH_SRC(pfd_p)->intr_data = NULL;
3075 	}
3076 
3077 	if (PCIE_IS_BDG(bus_p)) {
3078 		bzero(PCI_BDG_ERR_REG(pfd_p), sizeof (pf_pci_bdg_err_regs_t));
3079 	}
3080 
3081 	PCI_ERR_REG(pfd_p)->pci_err_status = 0;
3082 	PCI_ERR_REG(pfd_p)->pci_cfg_comm = 0;
3083 
3084 	if (PCIE_IS_PCIE(bus_p)) {
3085 		if (PCIE_IS_ROOT(bus_p)) {
3086 			bzero(PCIE_RP_REG(pfd_p),
3087 			    sizeof (pf_pcie_rp_err_regs_t));
3088 			bzero(PCIE_ADV_RP_REG(pfd_p),
3089 			    sizeof (pf_pcie_adv_rp_err_regs_t));
3090 			PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ce_src_id =
3091 			    PCIE_INVALID_BDF;
3092 			PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ue_src_id =
3093 			    PCIE_INVALID_BDF;
3094 		} else if (PCIE_IS_PCIE_BDG(bus_p)) {
3095 			bzero(PCIE_ADV_BDG_REG(pfd_p),
3096 			    sizeof (pf_pcie_adv_bdg_err_regs_t));
3097 			PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_bdf =
3098 			    PCIE_INVALID_BDF;
3099 		}
3100 
3101 		if (PCIE_IS_PCIE_BDG(bus_p) && PCIE_IS_PCIX(bus_p)) {
3102 			if (PCIX_ECC_VERSION_CHECK(bus_p)) {
3103 				bzero(PCIX_BDG_ECC_REG(pfd_p, 0),
3104 				    sizeof (pf_pcix_ecc_regs_t));
3105 				bzero(PCIX_BDG_ECC_REG(pfd_p, 1),
3106 				    sizeof (pf_pcix_ecc_regs_t));
3107 			}
3108 			PCIX_BDG_ERR_REG(pfd_p)->pcix_bdg_sec_stat = 0;
3109 			PCIX_BDG_ERR_REG(pfd_p)->pcix_bdg_stat = 0;
3110 		}
3111 
3112 		PCIE_ADV_REG(pfd_p)->pcie_adv_ctl = 0;
3113 		PCIE_ADV_REG(pfd_p)->pcie_ue_status = 0;
3114 		PCIE_ADV_REG(pfd_p)->pcie_ue_mask = 0;
3115 		PCIE_ADV_REG(pfd_p)->pcie_ue_sev = 0;
3116 		PCIE_ADV_HDR(pfd_p, 0) = 0;
3117 		PCIE_ADV_HDR(pfd_p, 1) = 0;
3118 		PCIE_ADV_HDR(pfd_p, 2) = 0;
3119 		PCIE_ADV_HDR(pfd_p, 3) = 0;
3120 		PCIE_ADV_REG(pfd_p)->pcie_ce_status = 0;
3121 		PCIE_ADV_REG(pfd_p)->pcie_ce_mask = 0;
3122 		PCIE_ADV_REG(pfd_p)->pcie_ue_tgt_trans = 0;
3123 		PCIE_ADV_REG(pfd_p)->pcie_ue_tgt_addr = 0;
3124 		PCIE_ADV_REG(pfd_p)->pcie_ue_tgt_bdf = PCIE_INVALID_BDF;
3125 
3126 		PCIE_ERR_REG(pfd_p)->pcie_err_status = 0;
3127 		PCIE_ERR_REG(pfd_p)->pcie_err_ctl = 0;
3128 		PCIE_ERR_REG(pfd_p)->pcie_dev_cap = 0;
3129 
3130 	} else if (PCIE_IS_PCIX(bus_p)) {
3131 		if (PCIE_IS_BDG(bus_p)) {
3132 			if (PCIX_ECC_VERSION_CHECK(bus_p)) {
3133 				bzero(PCIX_BDG_ECC_REG(pfd_p, 0),
3134 				    sizeof (pf_pcix_ecc_regs_t));
3135 				bzero(PCIX_BDG_ECC_REG(pfd_p, 1),
3136 				    sizeof (pf_pcix_ecc_regs_t));
3137 			}
3138 			PCIX_BDG_ERR_REG(pfd_p)->pcix_bdg_sec_stat = 0;
3139 			PCIX_BDG_ERR_REG(pfd_p)->pcix_bdg_stat = 0;
3140 		} else {
3141 			if (PCIX_ECC_VERSION_CHECK(bus_p)) {
3142 				bzero(PCIX_ECC_REG(pfd_p),
3143 				    sizeof (pf_pcix_ecc_regs_t));
3144 			}
3145 			PCIX_ERR_REG(pfd_p)->pcix_command = 0;
3146 			PCIX_ERR_REG(pfd_p)->pcix_status = 0;
3147 		}
3148 	}
3149 
3150 	pfd_p->pe_prev = NULL;
3151 	pfd_p->pe_next = NULL;
3152 	pfd_p->pe_rber_fatal = B_FALSE;
3153 }
3154 
3155 pcie_bus_t *
3156 pf_find_busp_by_bdf(pf_impl_t *impl, pcie_req_id_t bdf)
3157 {
3158 	pcie_bus_t *temp_bus_p;
3159 	pf_data_t *temp_pfd_p;
3160 
3161 	for (temp_pfd_p = impl->pf_dq_head_p;
3162 	    temp_pfd_p;
3163 	    temp_pfd_p = temp_pfd_p->pe_next) {
3164 		temp_bus_p = PCIE_PFD2BUS(temp_pfd_p);
3165 
3166 		if (bdf == temp_bus_p->bus_bdf) {
3167 			return (temp_bus_p);
3168 		}
3169 	}
3170 
3171 	return (NULL);
3172 }
3173 
3174 pcie_bus_t *
3175 pf_find_busp_by_addr(pf_impl_t *impl, uint64_t addr)
3176 {
3177 	pcie_bus_t *temp_bus_p;
3178 	pf_data_t *temp_pfd_p;
3179 
3180 	for (temp_pfd_p = impl->pf_dq_head_p;
3181 	    temp_pfd_p;
3182 	    temp_pfd_p = temp_pfd_p->pe_next) {
3183 		temp_bus_p = PCIE_PFD2BUS(temp_pfd_p);
3184 
3185 		if (pf_in_assigned_addr(temp_bus_p, addr)) {
3186 			return (temp_bus_p);
3187 		}
3188 	}
3189 
3190 	return (NULL);
3191 }
3192 
3193 pcie_bus_t *
3194 pf_find_busp_by_aer(pf_impl_t *impl, pf_data_t *pfd_p)
3195 {
3196 	pf_pcie_adv_err_regs_t *reg_p = PCIE_ADV_REG(pfd_p);
3197 	pcie_bus_t *temp_bus_p = NULL;
3198 	pcie_req_id_t bdf;
3199 	uint64_t addr;
3200 	pcie_tlp_hdr_t *tlp_hdr = (pcie_tlp_hdr_t *)reg_p->pcie_ue_hdr;
3201 	uint32_t trans_type = reg_p->pcie_ue_tgt_trans;
3202 
3203 	if ((tlp_hdr->type == PCIE_TLP_TYPE_CPL) ||
3204 	    (tlp_hdr->type == PCIE_TLP_TYPE_CPLLK)) {
3205 		pcie_cpl_t *cpl_tlp = (pcie_cpl_t *)&reg_p->pcie_ue_hdr[1];
3206 
3207 		bdf = (cpl_tlp->rid > cpl_tlp->cid) ? cpl_tlp->rid :
3208 		    cpl_tlp->cid;
3209 		temp_bus_p = pf_find_busp_by_bdf(impl, bdf);
3210 	} else if (trans_type == PF_ADDR_PIO) {
3211 		addr = reg_p->pcie_ue_tgt_addr;
3212 		temp_bus_p = pf_find_busp_by_addr(impl, addr);
3213 	} else {
3214 		/* PF_ADDR_DMA type */
3215 		bdf = reg_p->pcie_ue_tgt_bdf;
3216 		temp_bus_p = pf_find_busp_by_bdf(impl, bdf);
3217 	}
3218 
3219 	return (temp_bus_p);
3220 }
3221 
3222 pcie_bus_t *
3223 pf_find_busp_by_saer(pf_impl_t *impl, pf_data_t *pfd_p)
3224 {
3225 	pf_pcie_adv_bdg_err_regs_t *reg_p = PCIE_ADV_BDG_REG(pfd_p);
3226 	pcie_bus_t *temp_bus_p = NULL;
3227 	pcie_req_id_t bdf;
3228 	uint64_t addr;
3229 
3230 	addr = reg_p->pcie_sue_tgt_addr;
3231 	bdf = reg_p->pcie_sue_tgt_bdf;
3232 
3233 	if (addr != 0) {
3234 		temp_bus_p = pf_find_busp_by_addr(impl, addr);
3235 	} else if (PCIE_CHECK_VALID_BDF(bdf)) {
3236 		temp_bus_p = pf_find_busp_by_bdf(impl, bdf);
3237 	}
3238 
3239 	return (temp_bus_p);
3240 }
3241