xref: /illumos-gate/usr/src/uts/common/io/pciex/pcie_fault.c (revision 5328fc53d11d7151861fa272e4fb0248b8f0e145)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2017, Joyent, Inc.
24  */
25 
26 #include <sys/sysmacros.h>
27 #include <sys/types.h>
28 #include <sys/kmem.h>
29 #include <sys/modctl.h>
30 #include <sys/ddi.h>
31 #include <sys/sunddi.h>
32 #include <sys/sunndi.h>
33 #include <sys/fm/protocol.h>
34 #include <sys/fm/util.h>
35 #include <sys/fm/io/ddi.h>
36 #include <sys/fm/io/pci.h>
37 #include <sys/promif.h>
38 #include <sys/disp.h>
39 #include <sys/atomic.h>
40 #include <sys/pcie.h>
41 #include <sys/pci_cap.h>
42 #include <sys/pcie_impl.h>
43 
44 #define	PF_PCIE_BDG_ERR (PCIE_DEVSTS_FE_DETECTED | PCIE_DEVSTS_NFE_DETECTED | \
45 	PCIE_DEVSTS_CE_DETECTED)
46 
47 #define	PF_PCI_BDG_ERR (PCI_STAT_S_SYSERR | PCI_STAT_S_TARG_AB | \
48 	PCI_STAT_R_MAST_AB | PCI_STAT_R_TARG_AB | PCI_STAT_S_PERROR)
49 
50 #define	PF_AER_FATAL_ERR (PCIE_AER_UCE_DLP | PCIE_AER_UCE_SD |\
51 	PCIE_AER_UCE_FCP | PCIE_AER_UCE_RO | PCIE_AER_UCE_MTLP)
52 #define	PF_AER_NON_FATAL_ERR (PCIE_AER_UCE_PTLP | PCIE_AER_UCE_TO | \
53 	PCIE_AER_UCE_CA | PCIE_AER_UCE_ECRC | PCIE_AER_UCE_UR)
54 
55 #define	PF_SAER_FATAL_ERR (PCIE_AER_SUCE_USC_MSG_DATA_ERR | \
56 	PCIE_AER_SUCE_UC_ATTR_ERR | PCIE_AER_SUCE_UC_ADDR_ERR | \
57 	PCIE_AER_SUCE_SERR_ASSERT)
58 #define	PF_SAER_NON_FATAL_ERR (PCIE_AER_SUCE_TA_ON_SC | \
59 	PCIE_AER_SUCE_MA_ON_SC | PCIE_AER_SUCE_RCVD_TA | \
60 	PCIE_AER_SUCE_RCVD_MA | PCIE_AER_SUCE_USC_ERR | \
61 	PCIE_AER_SUCE_UC_DATA_ERR | PCIE_AER_SUCE_TIMER_EXPIRED | \
62 	PCIE_AER_SUCE_PERR_ASSERT | PCIE_AER_SUCE_INTERNAL_ERR)
63 
64 #define	PF_PCI_PARITY_ERR (PCI_STAT_S_PERROR | PCI_STAT_PERROR)
65 
66 #define	PF_FIRST_AER_ERR(bit, adv) \
67 	(bit & (1 << (adv->pcie_adv_ctl & PCIE_AER_CTL_FST_ERR_PTR_MASK)))
68 
69 #define	HAS_AER_LOGS(pfd_p, bit) \
70 	(PCIE_HAS_AER(pfd_p->pe_bus_p) && \
71 	PF_FIRST_AER_ERR(bit, PCIE_ADV_REG(pfd_p)))
72 
73 #define	PF_FIRST_SAER_ERR(bit, adv) \
74 	(bit & (1 << (adv->pcie_sue_ctl & PCIE_AER_SCTL_FST_ERR_PTR_MASK)))
75 
76 #define	HAS_SAER_LOGS(pfd_p, bit) \
77 	(PCIE_HAS_AER(pfd_p->pe_bus_p) && \
78 	PF_FIRST_SAER_ERR(bit, PCIE_ADV_BDG_REG(pfd_p)))
79 
80 #define	GET_SAER_CMD(pfd_p) \
81 	((PCIE_ADV_BDG_HDR(pfd_p, 1) >> \
82 	PCIE_AER_SUCE_HDR_CMD_LWR_SHIFT) & PCIE_AER_SUCE_HDR_CMD_LWR_MASK)
83 
84 #define	CE_ADVISORY(pfd_p) \
85 	(PCIE_ADV_REG(pfd_p)->pcie_ce_status & PCIE_AER_CE_AD_NFE)
86 
87 /* PCIe Fault Fabric Error analysis table */
88 typedef struct pf_fab_err_tbl {
89 	uint32_t	bit;		/* Error bit */
90 	int		(*handler)();	/* Error handling fuction */
91 	uint16_t	affected_flags; /* Primary affected flag */
92 	/*
93 	 * Secondary affected flag, effective when the information
94 	 * indicated by the primary flag is not available, eg.
95 	 * PF_AFFECTED_AER/SAER/ADDR
96 	 */
97 	uint16_t	sec_affected_flags;
98 } pf_fab_err_tbl_t;
99 
100 static pcie_bus_t *pf_is_ready(dev_info_t *);
101 /* Functions for scanning errors */
102 static int pf_default_hdl(dev_info_t *, pf_impl_t *);
103 static int pf_dispatch(dev_info_t *, pf_impl_t *, boolean_t);
104 static boolean_t pf_in_addr_range(pcie_bus_t *, uint64_t);
105 
106 /* Functions for gathering errors */
107 static void pf_pcix_ecc_regs_gather(pf_pcix_ecc_regs_t *pcix_ecc_regs,
108     pcie_bus_t *bus_p, boolean_t bdg);
109 static void pf_pcix_regs_gather(pf_data_t *pfd_p, pcie_bus_t *bus_p);
110 static void pf_pcie_regs_gather(pf_data_t *pfd_p, pcie_bus_t *bus_p);
111 static void pf_pci_regs_gather(pf_data_t *pfd_p, pcie_bus_t *bus_p);
112 static int pf_dummy_cb(dev_info_t *, ddi_fm_error_t *, const void *);
113 static void pf_en_dq(pf_data_t *pfd_p, pf_impl_t *impl_p);
114 
115 /* Functions for analysing errors */
116 static int pf_analyse_error(ddi_fm_error_t *, pf_impl_t *);
117 static void pf_adjust_for_no_aer(pf_data_t *);
118 static void pf_adjust_for_no_saer(pf_data_t *);
119 static pf_data_t *pf_get_pcie_bridge(pf_data_t *, pcie_req_id_t);
120 static pf_data_t *pf_get_parent_pcie_bridge(pf_data_t *);
121 static boolean_t pf_matched_in_rc(pf_data_t *, pf_data_t *,
122     uint32_t);
123 static int pf_analyse_error_tbl(ddi_fm_error_t *, pf_impl_t *,
124     pf_data_t *, const pf_fab_err_tbl_t *, uint32_t);
125 static int pf_analyse_ca_ur(ddi_fm_error_t *, uint32_t,
126     pf_data_t *, pf_data_t *);
127 static int pf_analyse_ma_ta(ddi_fm_error_t *, uint32_t,
128     pf_data_t *, pf_data_t *);
129 static int pf_analyse_pci(ddi_fm_error_t *, uint32_t,
130     pf_data_t *, pf_data_t *);
131 static int pf_analyse_perr_assert(ddi_fm_error_t *, uint32_t,
132     pf_data_t *, pf_data_t *);
133 static int pf_analyse_ptlp(ddi_fm_error_t *, uint32_t,
134     pf_data_t *, pf_data_t *);
135 static int pf_analyse_sc(ddi_fm_error_t *, uint32_t,
136     pf_data_t *, pf_data_t *);
137 static int pf_analyse_to(ddi_fm_error_t *, uint32_t,
138     pf_data_t *, pf_data_t *);
139 static int pf_analyse_uc(ddi_fm_error_t *, uint32_t,
140     pf_data_t *, pf_data_t *);
141 static int pf_analyse_uc_data(ddi_fm_error_t *, uint32_t,
142     pf_data_t *, pf_data_t *);
143 static int pf_no_panic(ddi_fm_error_t *, uint32_t,
144     pf_data_t *, pf_data_t *);
145 static int pf_panic(ddi_fm_error_t *, uint32_t,
146     pf_data_t *, pf_data_t *);
147 static void pf_send_ereport(ddi_fm_error_t *, pf_impl_t *);
148 static int pf_fm_callback(dev_info_t *dip, ddi_fm_error_t *derr);
149 
150 /* PCIe Fabric Handle Lookup Support Functions. */
151 static int pf_hdl_child_lookup(dev_info_t *, ddi_fm_error_t *, uint32_t,
152     uint64_t, pcie_req_id_t);
153 static int pf_hdl_compare(dev_info_t *, ddi_fm_error_t *, uint32_t, uint64_t,
154     pcie_req_id_t, ndi_fmc_t *);
155 static int pf_log_hdl_lookup(dev_info_t *, ddi_fm_error_t *, pf_data_t *,
156 	boolean_t);
157 
158 static int pf_handler_enter(dev_info_t *, pf_impl_t *);
159 static void pf_handler_exit(dev_info_t *);
160 static void pf_reset_pfd(pf_data_t *);
161 
162 boolean_t pcie_full_scan = B_FALSE;	/* Force to always do a full scan */
163 int pcie_disable_scan = 0;		/* Disable fabric scan */
164 
165 /* Inform interested parties that error handling is about to begin. */
166 /* ARGSUSED */
167 void
168 pf_eh_enter(pcie_bus_t *bus_p)
169 {
170 }
171 
172 /* Inform interested parties that error handling has ended. */
173 void
174 pf_eh_exit(pcie_bus_t *bus_p)
175 {
176 	pcie_bus_t *rbus_p = PCIE_DIP2BUS(bus_p->bus_rp_dip);
177 	pf_data_t *root_pfd_p = PCIE_BUS2PFD(rbus_p);
178 	pf_data_t *pfd_p;
179 	uint_t intr_type = PCIE_ROOT_EH_SRC(root_pfd_p)->intr_type;
180 
181 	pciev_eh_exit(root_pfd_p, intr_type);
182 
183 	/* Clear affected device info and INTR SRC */
184 	for (pfd_p = root_pfd_p; pfd_p; pfd_p = pfd_p->pe_next) {
185 		PFD_AFFECTED_DEV(pfd_p)->pe_affected_flags = 0;
186 		PFD_AFFECTED_DEV(pfd_p)->pe_affected_bdf = PCIE_INVALID_BDF;
187 		if (PCIE_IS_ROOT(PCIE_PFD2BUS(pfd_p))) {
188 			PCIE_ROOT_EH_SRC(pfd_p)->intr_type = PF_INTR_TYPE_NONE;
189 			PCIE_ROOT_EH_SRC(pfd_p)->intr_data = NULL;
190 		}
191 	}
192 }
193 
194 /*
195  * Scan Fabric is the entry point for PCI/PCIe IO fabric errors.  The
196  * caller may create a local pf_data_t with the "root fault"
197  * information populated to either do a precise or full scan.  More
198  * than one pf_data_t maybe linked together if there are multiple
199  * errors.  Only a PCIe compliant Root Port device may pass in NULL
200  * for the root_pfd_p.
201  *
202  * "Root Complexes" such as NPE and PX should call scan_fabric using itself as
203  * the rdip.  PCIe Root ports should call pf_scan_fabric using it's parent as
204  * the rdip.
205  *
206  * Scan fabric initiated from RCs are likely due to a fabric message, traps or
207  * any RC detected errors that propagated to/from the fabric.
208  *
209  * This code assumes that by the time pf_scan_fabric is
210  * called, pf_handler_enter has NOT been called on the rdip.
211  */
212 int
213 pf_scan_fabric(dev_info_t *rdip, ddi_fm_error_t *derr, pf_data_t *root_pfd_p)
214 {
215 	pf_impl_t	impl;
216 	pf_data_t	*pfd_p, *pfd_head_p, *pfd_tail_p;
217 	int		scan_flag = PF_SCAN_SUCCESS;
218 	int		analyse_flag = PF_ERR_NO_ERROR;
219 	boolean_t	full_scan = pcie_full_scan;
220 
221 	if (pcie_disable_scan)
222 		return (analyse_flag);
223 
224 	/* Find the head and tail of this link list */
225 	pfd_head_p = root_pfd_p;
226 	for (pfd_tail_p = root_pfd_p; pfd_tail_p && pfd_tail_p->pe_next;
227 	    pfd_tail_p = pfd_tail_p->pe_next)
228 		;
229 
230 	/* Save head/tail */
231 	impl.pf_total = 0;
232 	impl.pf_derr = derr;
233 	impl.pf_dq_head_p = pfd_head_p;
234 	impl.pf_dq_tail_p = pfd_tail_p;
235 
236 	/* If scan is initiated from RP then RP itself must be scanned. */
237 	if (PCIE_IS_RP(PCIE_DIP2BUS(rdip)) && pf_is_ready(rdip) &&
238 	    !root_pfd_p) {
239 		scan_flag = pf_handler_enter(rdip, &impl);
240 		if (scan_flag & PF_SCAN_DEADLOCK)
241 			goto done;
242 
243 		scan_flag = pf_default_hdl(rdip, &impl);
244 		if (scan_flag & PF_SCAN_NO_ERR_IN_CHILD)
245 			goto done;
246 	}
247 
248 	/*
249 	 * Scan the fabric using the scan_bdf and scan_addr in error q.
250 	 * scan_bdf will be valid in the following cases:
251 	 *	- Fabric message
252 	 *	- Poisoned TLP
253 	 *	- Signaled UR/CA
254 	 *	- Received UR/CA
255 	 *	- PIO load failures
256 	 */
257 	for (pfd_p = impl.pf_dq_head_p; pfd_p && PFD_IS_ROOT(pfd_p);
258 	    pfd_p = pfd_p->pe_next) {
259 		impl.pf_fault = PCIE_ROOT_FAULT(pfd_p);
260 
261 		if (PFD_IS_RC(pfd_p))
262 			impl.pf_total++;
263 
264 		if (impl.pf_fault->full_scan)
265 			full_scan = B_TRUE;
266 
267 		if (full_scan ||
268 		    PCIE_CHECK_VALID_BDF(impl.pf_fault->scan_bdf) ||
269 		    impl.pf_fault->scan_addr)
270 			scan_flag |= pf_dispatch(rdip, &impl, full_scan);
271 
272 		if (full_scan)
273 			break;
274 	}
275 
276 done:
277 	/*
278 	 * If this is due to safe access, don't analyze the errors and return
279 	 * success regardless of how scan fabric went.
280 	 */
281 	if (derr->fme_flag != DDI_FM_ERR_UNEXPECTED) {
282 		analyse_flag = PF_ERR_NO_PANIC;
283 	} else {
284 		analyse_flag = pf_analyse_error(derr, &impl);
285 	}
286 
287 	pf_send_ereport(derr, &impl);
288 
289 	/*
290 	 * Check if any hardened driver's callback reported a panic.
291 	 * If so panic.
292 	 */
293 	if (scan_flag & PF_SCAN_CB_FAILURE)
294 		analyse_flag |= PF_ERR_PANIC;
295 
296 	/*
297 	 * If a deadlock was detected, panic the system as error analysis has
298 	 * been compromised.
299 	 */
300 	if (scan_flag & PF_SCAN_DEADLOCK)
301 		analyse_flag |= PF_ERR_PANIC_DEADLOCK;
302 
303 	derr->fme_status = PF_ERR2DDIFM_ERR(scan_flag);
304 
305 	return (analyse_flag);
306 }
307 
308 void
309 pcie_force_fullscan(void)
310 {
311 	pcie_full_scan = B_TRUE;
312 }
313 
314 /*
315  * pf_dispatch walks the device tree and calls the pf_default_hdl if the device
316  * falls in the error path.
317  *
318  * Returns PF_SCAN_* flags
319  */
320 static int
321 pf_dispatch(dev_info_t *pdip, pf_impl_t *impl, boolean_t full_scan)
322 {
323 	dev_info_t	*dip;
324 	pcie_req_id_t	rid = impl->pf_fault->scan_bdf;
325 	pcie_bus_t	*bus_p;
326 	int		scan_flag = PF_SCAN_SUCCESS;
327 
328 	for (dip = ddi_get_child(pdip); dip; dip = ddi_get_next_sibling(dip)) {
329 		/* Make sure dip is attached and ready */
330 		if (!(bus_p = pf_is_ready(dip)))
331 			continue;
332 
333 		scan_flag |= pf_handler_enter(dip, impl);
334 		if (scan_flag & PF_SCAN_DEADLOCK)
335 			break;
336 
337 		/*
338 		 * Handle this device if it is a:
339 		 * o Full Scan
340 		 * o PCI/PCI-X Device
341 		 * o Fault BDF = Device BDF
342 		 * o BDF/ADDR is in range of the Bridge/Switch
343 		 */
344 		if (full_scan ||
345 		    (bus_p->bus_bdf == rid) ||
346 		    pf_in_bus_range(bus_p, rid) ||
347 		    pf_in_addr_range(bus_p, impl->pf_fault->scan_addr)) {
348 			int hdl_flag = pf_default_hdl(dip, impl);
349 			scan_flag |= hdl_flag;
350 
351 			/*
352 			 * A bridge may have detected no errors in which case
353 			 * there is no need to scan further down.
354 			 */
355 			if (hdl_flag & PF_SCAN_NO_ERR_IN_CHILD)
356 				continue;
357 		} else {
358 			pf_handler_exit(dip);
359 			continue;
360 		}
361 
362 		/* match or in bridge bus-range */
363 		switch (bus_p->bus_dev_type) {
364 		case PCIE_PCIECAP_DEV_TYPE_PCIE2PCI:
365 		case PCIE_PCIECAP_DEV_TYPE_PCI2PCIE:
366 			scan_flag |= pf_dispatch(dip, impl, B_TRUE);
367 			break;
368 		case PCIE_PCIECAP_DEV_TYPE_UP:
369 		case PCIE_PCIECAP_DEV_TYPE_DOWN:
370 		case PCIE_PCIECAP_DEV_TYPE_ROOT:
371 		{
372 			pf_data_t *pfd_p = PCIE_BUS2PFD(bus_p);
373 			pf_pci_err_regs_t *err_p = PCI_ERR_REG(pfd_p);
374 			pf_pci_bdg_err_regs_t *serr_p = PCI_BDG_ERR_REG(pfd_p);
375 			/*
376 			 * Continue if the fault BDF != the switch or there is a
377 			 * parity error
378 			 */
379 			if ((bus_p->bus_bdf != rid) ||
380 			    (err_p->pci_err_status & PF_PCI_PARITY_ERR) ||
381 			    (serr_p->pci_bdg_sec_stat & PF_PCI_PARITY_ERR))
382 				scan_flag |= pf_dispatch(dip, impl, full_scan);
383 			break;
384 		}
385 		case PCIE_PCIECAP_DEV_TYPE_PCIE_DEV:
386 		case PCIE_PCIECAP_DEV_TYPE_PCI_DEV:
387 			/*
388 			 * Reached a PCIe end point so stop. Note dev_type
389 			 * PCI_DEV is just a PCIe device that requires IO Space
390 			 */
391 			break;
392 		case PCIE_PCIECAP_DEV_TYPE_PCI_PSEUDO:
393 			if (PCIE_IS_BDG(bus_p))
394 				scan_flag |= pf_dispatch(dip, impl, B_TRUE);
395 			break;
396 		default:
397 			ASSERT(B_FALSE);
398 		}
399 	}
400 	return (scan_flag);
401 }
402 
403 /* Returns whether the "bdf" is in the bus range of a switch/bridge */
404 boolean_t
405 pf_in_bus_range(pcie_bus_t *bus_p, pcie_req_id_t bdf)
406 {
407 	pci_bus_range_t *br_p = &bus_p->bus_bus_range;
408 	uint8_t		bus_no = (bdf & PCIE_REQ_ID_BUS_MASK) >>
409 	    PCIE_REQ_ID_BUS_SHIFT;
410 
411 	/* check if given bdf falls within bridge's bus range */
412 	if (PCIE_IS_BDG(bus_p) &&
413 	    ((bus_no >= br_p->lo) && (bus_no <= br_p->hi)))
414 		return (B_TRUE);
415 	else
416 		return (B_FALSE);
417 }
418 
419 /*
420  * Return whether the "addr" is in the assigned addr of a device.
421  */
422 boolean_t
423 pf_in_assigned_addr(pcie_bus_t *bus_p, uint64_t addr)
424 {
425 	uint_t		i;
426 	uint64_t	low, hi;
427 	pci_regspec_t	*assign_p = bus_p->bus_assigned_addr;
428 
429 	for (i = 0; i < bus_p->bus_assigned_entries; i++, assign_p++) {
430 		low = assign_p->pci_phys_low;
431 		hi = low + assign_p->pci_size_low;
432 		if ((addr < hi) && (addr >= low))
433 			return (B_TRUE);
434 	}
435 	return (B_FALSE);
436 }
437 
438 /*
439  * Returns whether the "addr" is in the addr range of a switch/bridge, or if the
440  * "addr" is in the assigned addr of a device.
441  */
442 static boolean_t
443 pf_in_addr_range(pcie_bus_t *bus_p, uint64_t addr)
444 {
445 	uint_t		i;
446 	uint64_t	low, hi;
447 	ppb_ranges_t	*ranges_p = bus_p->bus_addr_ranges;
448 
449 	if (!addr)
450 		return (B_FALSE);
451 
452 	/* check if given address belongs to this device */
453 	if (pf_in_assigned_addr(bus_p, addr))
454 		return (B_TRUE);
455 
456 	/* check if given address belongs to a child below this device */
457 	if (!PCIE_IS_BDG(bus_p))
458 		return (B_FALSE);
459 
460 	for (i = 0; i < bus_p->bus_addr_entries; i++, ranges_p++) {
461 		switch (ranges_p->child_high & PCI_ADDR_MASK) {
462 		case PCI_ADDR_IO:
463 		case PCI_ADDR_MEM32:
464 			low = ranges_p->child_low;
465 			hi = ranges_p->size_low + low;
466 			if ((addr < hi) && (addr >= low))
467 				return (B_TRUE);
468 			break;
469 		case PCI_ADDR_MEM64:
470 			low = ((uint64_t)ranges_p->child_mid << 32) |
471 			    (uint64_t)ranges_p->child_low;
472 			hi = (((uint64_t)ranges_p->size_high << 32) |
473 			    (uint64_t)ranges_p->size_low) + low;
474 			if ((addr < hi) && (addr >= low))
475 				return (B_TRUE);
476 			break;
477 		}
478 	}
479 	return (B_FALSE);
480 }
481 
482 static pcie_bus_t *
483 pf_is_ready(dev_info_t *dip)
484 {
485 	pcie_bus_t	*bus_p = PCIE_DIP2BUS(dip);
486 	if (!bus_p)
487 		return (NULL);
488 
489 	if (!(bus_p->bus_fm_flags & PF_FM_READY))
490 		return (NULL);
491 	return (bus_p);
492 }
493 
494 static void
495 pf_pcix_ecc_regs_gather(pf_pcix_ecc_regs_t *pcix_ecc_regs,
496     pcie_bus_t *bus_p, boolean_t bdg)
497 {
498 	if (bdg) {
499 		pcix_ecc_regs->pcix_ecc_ctlstat = PCIX_CAP_GET(32, bus_p,
500 		    PCI_PCIX_BDG_ECC_STATUS);
501 		pcix_ecc_regs->pcix_ecc_fstaddr = PCIX_CAP_GET(32, bus_p,
502 		    PCI_PCIX_BDG_ECC_FST_AD);
503 		pcix_ecc_regs->pcix_ecc_secaddr = PCIX_CAP_GET(32, bus_p,
504 		    PCI_PCIX_BDG_ECC_SEC_AD);
505 		pcix_ecc_regs->pcix_ecc_attr = PCIX_CAP_GET(32, bus_p,
506 		    PCI_PCIX_BDG_ECC_ATTR);
507 	} else {
508 		pcix_ecc_regs->pcix_ecc_ctlstat = PCIX_CAP_GET(32, bus_p,
509 		    PCI_PCIX_ECC_STATUS);
510 		pcix_ecc_regs->pcix_ecc_fstaddr = PCIX_CAP_GET(32, bus_p,
511 		    PCI_PCIX_ECC_FST_AD);
512 		pcix_ecc_regs->pcix_ecc_secaddr = PCIX_CAP_GET(32, bus_p,
513 		    PCI_PCIX_ECC_SEC_AD);
514 		pcix_ecc_regs->pcix_ecc_attr = PCIX_CAP_GET(32, bus_p,
515 		    PCI_PCIX_ECC_ATTR);
516 	}
517 }
518 
519 
520 static void
521 pf_pcix_regs_gather(pf_data_t *pfd_p, pcie_bus_t *bus_p)
522 {
523 	/*
524 	 * For PCI-X device PCI-X Capability only exists for Type 0 Headers.
525 	 * PCI-X Bridge Capability only exists for Type 1 Headers.
526 	 * Both capabilities do not exist at the same time.
527 	 */
528 	if (PCIE_IS_BDG(bus_p)) {
529 		pf_pcix_bdg_err_regs_t *pcix_bdg_regs;
530 
531 		pcix_bdg_regs = PCIX_BDG_ERR_REG(pfd_p);
532 
533 		pcix_bdg_regs->pcix_bdg_sec_stat = PCIX_CAP_GET(16, bus_p,
534 		    PCI_PCIX_SEC_STATUS);
535 		pcix_bdg_regs->pcix_bdg_stat = PCIX_CAP_GET(32, bus_p,
536 		    PCI_PCIX_BDG_STATUS);
537 
538 		if (PCIX_ECC_VERSION_CHECK(bus_p)) {
539 			/*
540 			 * PCI Express to PCI-X bridges only implement the
541 			 * secondary side of the PCI-X ECC registers, bit one is
542 			 * read-only so we make sure we do not write to it.
543 			 */
544 			if (!PCIE_IS_PCIE_BDG(bus_p)) {
545 				PCIX_CAP_PUT(32, bus_p, PCI_PCIX_BDG_ECC_STATUS,
546 				    0);
547 				pf_pcix_ecc_regs_gather(
548 				    PCIX_BDG_ECC_REG(pfd_p, 0), bus_p, B_TRUE);
549 				PCIX_CAP_PUT(32, bus_p, PCI_PCIX_BDG_ECC_STATUS,
550 				    1);
551 			}
552 			pf_pcix_ecc_regs_gather(PCIX_BDG_ECC_REG(pfd_p, 0),
553 			    bus_p, B_TRUE);
554 		}
555 	} else {
556 		pf_pcix_err_regs_t *pcix_regs = PCIX_ERR_REG(pfd_p);
557 
558 		pcix_regs->pcix_command = PCIX_CAP_GET(16, bus_p,
559 		    PCI_PCIX_COMMAND);
560 		pcix_regs->pcix_status = PCIX_CAP_GET(32, bus_p,
561 		    PCI_PCIX_STATUS);
562 		if (PCIX_ECC_VERSION_CHECK(bus_p))
563 			pf_pcix_ecc_regs_gather(PCIX_ECC_REG(pfd_p), bus_p,
564 			    B_TRUE);
565 	}
566 }
567 
568 static void
569 pf_pcie_regs_gather(pf_data_t *pfd_p, pcie_bus_t *bus_p)
570 {
571 	pf_pcie_err_regs_t *pcie_regs = PCIE_ERR_REG(pfd_p);
572 	pf_pcie_adv_err_regs_t *pcie_adv_regs = PCIE_ADV_REG(pfd_p);
573 
574 	pcie_regs->pcie_err_status = PCIE_CAP_GET(16, bus_p, PCIE_DEVSTS);
575 	pcie_regs->pcie_err_ctl = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL);
576 	pcie_regs->pcie_dev_cap = PCIE_CAP_GET(32, bus_p, PCIE_DEVCAP);
577 
578 	if (PCIE_IS_BDG(bus_p) && PCIE_IS_PCIX(bus_p))
579 		pf_pcix_regs_gather(pfd_p, bus_p);
580 
581 	if (PCIE_IS_ROOT(bus_p)) {
582 		pf_pcie_rp_err_regs_t *pcie_rp_regs = PCIE_RP_REG(pfd_p);
583 
584 		pcie_rp_regs->pcie_rp_status = PCIE_CAP_GET(32, bus_p,
585 		    PCIE_ROOTSTS);
586 		pcie_rp_regs->pcie_rp_ctl = PCIE_CAP_GET(16, bus_p,
587 		    PCIE_ROOTCTL);
588 	}
589 
590 	if (!PCIE_HAS_AER(bus_p))
591 		return;
592 
593 	/* Gather UE AERs */
594 	pcie_adv_regs->pcie_adv_ctl = PCIE_AER_GET(32, bus_p,
595 	    PCIE_AER_CTL);
596 	pcie_adv_regs->pcie_ue_status = PCIE_AER_GET(32, bus_p,
597 	    PCIE_AER_UCE_STS);
598 	pcie_adv_regs->pcie_ue_mask = PCIE_AER_GET(32, bus_p,
599 	    PCIE_AER_UCE_MASK);
600 	pcie_adv_regs->pcie_ue_sev = PCIE_AER_GET(32, bus_p,
601 	    PCIE_AER_UCE_SERV);
602 	PCIE_ADV_HDR(pfd_p, 0) = PCIE_AER_GET(32, bus_p,
603 	    PCIE_AER_HDR_LOG);
604 	PCIE_ADV_HDR(pfd_p, 1) = PCIE_AER_GET(32, bus_p,
605 	    PCIE_AER_HDR_LOG + 0x4);
606 	PCIE_ADV_HDR(pfd_p, 2) = PCIE_AER_GET(32, bus_p,
607 	    PCIE_AER_HDR_LOG + 0x8);
608 	PCIE_ADV_HDR(pfd_p, 3) = PCIE_AER_GET(32, bus_p,
609 	    PCIE_AER_HDR_LOG + 0xc);
610 
611 	/* Gather CE AERs */
612 	pcie_adv_regs->pcie_ce_status = PCIE_AER_GET(32, bus_p,
613 	    PCIE_AER_CE_STS);
614 	pcie_adv_regs->pcie_ce_mask = PCIE_AER_GET(32, bus_p,
615 	    PCIE_AER_CE_MASK);
616 
617 	/*
618 	 * If pci express to pci bridge then grab the bridge
619 	 * error registers.
620 	 */
621 	if (PCIE_IS_PCIE_BDG(bus_p)) {
622 		pf_pcie_adv_bdg_err_regs_t *pcie_bdg_regs =
623 		    PCIE_ADV_BDG_REG(pfd_p);
624 
625 		pcie_bdg_regs->pcie_sue_ctl = PCIE_AER_GET(32, bus_p,
626 		    PCIE_AER_SCTL);
627 		pcie_bdg_regs->pcie_sue_status = PCIE_AER_GET(32, bus_p,
628 		    PCIE_AER_SUCE_STS);
629 		pcie_bdg_regs->pcie_sue_mask = PCIE_AER_GET(32, bus_p,
630 		    PCIE_AER_SUCE_MASK);
631 		pcie_bdg_regs->pcie_sue_sev = PCIE_AER_GET(32, bus_p,
632 		    PCIE_AER_SUCE_SERV);
633 		PCIE_ADV_BDG_HDR(pfd_p, 0) = PCIE_AER_GET(32, bus_p,
634 		    PCIE_AER_SHDR_LOG);
635 		PCIE_ADV_BDG_HDR(pfd_p, 1) = PCIE_AER_GET(32, bus_p,
636 		    PCIE_AER_SHDR_LOG + 0x4);
637 		PCIE_ADV_BDG_HDR(pfd_p, 2) = PCIE_AER_GET(32, bus_p,
638 		    PCIE_AER_SHDR_LOG + 0x8);
639 		PCIE_ADV_BDG_HDR(pfd_p, 3) = PCIE_AER_GET(32, bus_p,
640 		    PCIE_AER_SHDR_LOG + 0xc);
641 	}
642 
643 	/*
644 	 * If PCI Express root port then grab the root port
645 	 * error registers.
646 	 */
647 	if (PCIE_IS_ROOT(bus_p)) {
648 		pf_pcie_adv_rp_err_regs_t *pcie_rp_regs =
649 		    PCIE_ADV_RP_REG(pfd_p);
650 
651 		pcie_rp_regs->pcie_rp_err_cmd = PCIE_AER_GET(32, bus_p,
652 		    PCIE_AER_RE_CMD);
653 		pcie_rp_regs->pcie_rp_err_status = PCIE_AER_GET(32, bus_p,
654 		    PCIE_AER_RE_STS);
655 		pcie_rp_regs->pcie_rp_ce_src_id = PCIE_AER_GET(16, bus_p,
656 		    PCIE_AER_CE_SRC_ID);
657 		pcie_rp_regs->pcie_rp_ue_src_id = PCIE_AER_GET(16, bus_p,
658 		    PCIE_AER_ERR_SRC_ID);
659 	}
660 }
661 
662 static void
663 pf_pci_regs_gather(pf_data_t *pfd_p, pcie_bus_t *bus_p)
664 {
665 	pf_pci_err_regs_t *pci_regs = PCI_ERR_REG(pfd_p);
666 
667 	/*
668 	 * Start by reading all the error registers that are available for
669 	 * pci and pci express and for leaf devices and bridges/switches
670 	 */
671 	pci_regs->pci_err_status = PCIE_GET(16, bus_p, PCI_CONF_STAT);
672 	pci_regs->pci_cfg_comm = PCIE_GET(16, bus_p, PCI_CONF_COMM);
673 
674 	/*
675 	 * If pci-pci bridge grab PCI bridge specific error registers.
676 	 */
677 	if (PCIE_IS_BDG(bus_p)) {
678 		pf_pci_bdg_err_regs_t *pci_bdg_regs = PCI_BDG_ERR_REG(pfd_p);
679 		pci_bdg_regs->pci_bdg_sec_stat =
680 		    PCIE_GET(16, bus_p, PCI_BCNF_SEC_STATUS);
681 		pci_bdg_regs->pci_bdg_ctrl =
682 		    PCIE_GET(16, bus_p, PCI_BCNF_BCNTRL);
683 	}
684 
685 	/*
686 	 * If pci express device grab pci express error registers and
687 	 * check for advanced error reporting features and grab them if
688 	 * available.
689 	 */
690 	if (PCIE_IS_PCIE(bus_p))
691 		pf_pcie_regs_gather(pfd_p, bus_p);
692 	else if (PCIE_IS_PCIX(bus_p))
693 		pf_pcix_regs_gather(pfd_p, bus_p);
694 
695 }
696 
697 static void
698 pf_pcix_regs_clear(pf_data_t *pfd_p, pcie_bus_t *bus_p)
699 {
700 	if (PCIE_IS_BDG(bus_p)) {
701 		pf_pcix_bdg_err_regs_t *pcix_bdg_regs;
702 
703 		pcix_bdg_regs = PCIX_BDG_ERR_REG(pfd_p);
704 
705 		PCIX_CAP_PUT(16, bus_p, PCI_PCIX_SEC_STATUS,
706 		    pcix_bdg_regs->pcix_bdg_sec_stat);
707 
708 		PCIX_CAP_PUT(32, bus_p, PCI_PCIX_BDG_STATUS,
709 		    pcix_bdg_regs->pcix_bdg_stat);
710 
711 		if (PCIX_ECC_VERSION_CHECK(bus_p)) {
712 			pf_pcix_ecc_regs_t *pcix_bdg_ecc_regs;
713 			/*
714 			 * PCI Express to PCI-X bridges only implement the
715 			 * secondary side of the PCI-X ECC registers.  For
716 			 * clearing, there is no need to "select" the ECC
717 			 * register, just write what was originally read.
718 			 */
719 			if (!PCIE_IS_PCIE_BDG(bus_p)) {
720 				pcix_bdg_ecc_regs = PCIX_BDG_ECC_REG(pfd_p, 0);
721 				PCIX_CAP_PUT(32, bus_p, PCI_PCIX_BDG_ECC_STATUS,
722 				    pcix_bdg_ecc_regs->pcix_ecc_ctlstat);
723 
724 			}
725 			pcix_bdg_ecc_regs = PCIX_BDG_ECC_REG(pfd_p, 1);
726 			PCIX_CAP_PUT(32, bus_p, PCI_PCIX_BDG_ECC_STATUS,
727 			    pcix_bdg_ecc_regs->pcix_ecc_ctlstat);
728 		}
729 	} else {
730 		pf_pcix_err_regs_t *pcix_regs = PCIX_ERR_REG(pfd_p);
731 
732 		PCIX_CAP_PUT(32, bus_p, PCI_PCIX_STATUS,
733 		    pcix_regs->pcix_status);
734 
735 		if (PCIX_ECC_VERSION_CHECK(bus_p)) {
736 			pf_pcix_ecc_regs_t *pcix_ecc_regs = PCIX_ECC_REG(pfd_p);
737 
738 			PCIX_CAP_PUT(32, bus_p, PCI_PCIX_ECC_STATUS,
739 			    pcix_ecc_regs->pcix_ecc_ctlstat);
740 		}
741 	}
742 }
743 
744 static void
745 pf_pcie_regs_clear(pf_data_t *pfd_p, pcie_bus_t *bus_p)
746 {
747 	pf_pcie_err_regs_t *pcie_regs = PCIE_ERR_REG(pfd_p);
748 	pf_pcie_adv_err_regs_t *pcie_adv_regs = PCIE_ADV_REG(pfd_p);
749 
750 	PCIE_CAP_PUT(16, bus_p, PCIE_DEVSTS, pcie_regs->pcie_err_status);
751 
752 	if (PCIE_IS_BDG(bus_p) && PCIE_IS_PCIX(bus_p))
753 		pf_pcix_regs_clear(pfd_p, bus_p);
754 
755 	if (!PCIE_HAS_AER(bus_p))
756 		return;
757 
758 	PCIE_AER_PUT(32, bus_p, PCIE_AER_UCE_STS,
759 	    pcie_adv_regs->pcie_ue_status);
760 
761 	PCIE_AER_PUT(32, bus_p, PCIE_AER_CE_STS,
762 	    pcie_adv_regs->pcie_ce_status);
763 
764 	if (PCIE_IS_PCIE_BDG(bus_p)) {
765 		pf_pcie_adv_bdg_err_regs_t *pcie_bdg_regs =
766 		    PCIE_ADV_BDG_REG(pfd_p);
767 
768 		PCIE_AER_PUT(32, bus_p, PCIE_AER_SUCE_STS,
769 		    pcie_bdg_regs->pcie_sue_status);
770 	}
771 
772 	/*
773 	 * If PCI Express root complex then clear the root complex
774 	 * error registers.
775 	 */
776 	if (PCIE_IS_ROOT(bus_p)) {
777 		pf_pcie_adv_rp_err_regs_t *pcie_rp_regs;
778 
779 		pcie_rp_regs = PCIE_ADV_RP_REG(pfd_p);
780 
781 		PCIE_AER_PUT(32, bus_p, PCIE_AER_RE_STS,
782 		    pcie_rp_regs->pcie_rp_err_status);
783 	}
784 }
785 
786 static void
787 pf_pci_regs_clear(pf_data_t *pfd_p, pcie_bus_t *bus_p)
788 {
789 	if (PCIE_IS_PCIE(bus_p))
790 		pf_pcie_regs_clear(pfd_p, bus_p);
791 	else if (PCIE_IS_PCIX(bus_p))
792 		pf_pcix_regs_clear(pfd_p, bus_p);
793 
794 	PCIE_PUT(16, bus_p, PCI_CONF_STAT, pfd_p->pe_pci_regs->pci_err_status);
795 
796 	if (PCIE_IS_BDG(bus_p)) {
797 		pf_pci_bdg_err_regs_t *pci_bdg_regs = PCI_BDG_ERR_REG(pfd_p);
798 		PCIE_PUT(16, bus_p, PCI_BCNF_SEC_STATUS,
799 		    pci_bdg_regs->pci_bdg_sec_stat);
800 	}
801 }
802 
803 /* ARGSUSED */
804 void
805 pcie_clear_errors(dev_info_t *dip)
806 {
807 	pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
808 	pf_data_t *pfd_p = PCIE_DIP2PFD(dip);
809 
810 	ASSERT(bus_p);
811 
812 	pf_pci_regs_gather(pfd_p, bus_p);
813 	pf_pci_regs_clear(pfd_p, bus_p);
814 }
815 
816 /* Find the fault BDF, fault Addr or full scan on a PCIe Root Port. */
817 static void
818 pf_pci_find_rp_fault(pf_data_t *pfd_p, pcie_bus_t *bus_p)
819 {
820 	pf_root_fault_t *root_fault = PCIE_ROOT_FAULT(pfd_p);
821 	pf_pcie_adv_rp_err_regs_t *rp_regs = PCIE_ADV_RP_REG(pfd_p);
822 	uint32_t root_err = rp_regs->pcie_rp_err_status;
823 	uint32_t ue_err = PCIE_ADV_REG(pfd_p)->pcie_ue_status;
824 	int num_faults = 0;
825 
826 	/* Since this data structure is reused, make sure to reset it */
827 	root_fault->full_scan = B_FALSE;
828 	root_fault->scan_bdf = PCIE_INVALID_BDF;
829 	root_fault->scan_addr = 0;
830 
831 	if (!PCIE_HAS_AER(bus_p) &&
832 	    (PCI_BDG_ERR_REG(pfd_p)->pci_bdg_sec_stat & PF_PCI_BDG_ERR)) {
833 		PCIE_ROOT_FAULT(pfd_p)->full_scan = B_TRUE;
834 		return;
835 	}
836 
837 	/*
838 	 * Check to see if an error has been received that
839 	 * requires a scan of the fabric.  Count the number of
840 	 * faults seen.  If MUL CE/FE_NFE that counts for
841 	 * atleast 2 faults, so just return with full_scan.
842 	 */
843 	if ((root_err & PCIE_AER_RE_STS_MUL_CE_RCVD) ||
844 	    (root_err & PCIE_AER_RE_STS_MUL_FE_NFE_RCVD)) {
845 		PCIE_ROOT_FAULT(pfd_p)->full_scan = B_TRUE;
846 		return;
847 	}
848 
849 	if (root_err & PCIE_AER_RE_STS_CE_RCVD)
850 		num_faults++;
851 
852 	if (root_err & PCIE_AER_RE_STS_FE_NFE_RCVD)
853 		num_faults++;
854 
855 	if (ue_err & PCIE_AER_UCE_CA)
856 		num_faults++;
857 
858 	if (ue_err & PCIE_AER_UCE_UR)
859 		num_faults++;
860 
861 	/* If no faults just return */
862 	if (num_faults == 0)
863 		return;
864 
865 	/* If faults > 1 do full scan */
866 	if (num_faults > 1) {
867 		PCIE_ROOT_FAULT(pfd_p)->full_scan = B_TRUE;
868 		return;
869 	}
870 
871 	/* By this point, there is only 1 fault detected */
872 	if (root_err & PCIE_AER_RE_STS_CE_RCVD) {
873 		PCIE_ROOT_FAULT(pfd_p)->scan_bdf = rp_regs->pcie_rp_ce_src_id;
874 		num_faults--;
875 	} else if (root_err & PCIE_AER_RE_STS_FE_NFE_RCVD) {
876 		PCIE_ROOT_FAULT(pfd_p)->scan_bdf = rp_regs->pcie_rp_ue_src_id;
877 		num_faults--;
878 	} else if ((HAS_AER_LOGS(pfd_p, PCIE_AER_UCE_CA) ||
879 	    HAS_AER_LOGS(pfd_p, PCIE_AER_UCE_UR)) &&
880 	    (pf_tlp_decode(PCIE_PFD2BUS(pfd_p), PCIE_ADV_REG(pfd_p)) ==
881 	    DDI_SUCCESS)) {
882 		PCIE_ROOT_FAULT(pfd_p)->scan_addr =
883 		    PCIE_ADV_REG(pfd_p)->pcie_ue_tgt_addr;
884 		num_faults--;
885 	}
886 
887 	/*
888 	 * This means an error did occur, but we couldn't extract the fault BDF
889 	 */
890 	if (num_faults > 0)
891 		PCIE_ROOT_FAULT(pfd_p)->full_scan = B_TRUE;
892 
893 }
894 
895 
896 /*
897  * Load PCIe Fault Data for PCI/PCIe devices into PCIe Fault Data Queue
898  *
899  * Returns a scan flag.
900  * o PF_SCAN_SUCCESS - Error gathered and cleared sucessfuly, data added to
901  *   Fault Q
902  * o PF_SCAN_BAD_RESPONSE - Unable to talk to device, item added to fault Q
903  * o PF_SCAN_CB_FAILURE - A hardened device deemed that the error was fatal.
904  * o PF_SCAN_NO_ERR_IN_CHILD - Only applies to bridge to prevent further
905  *   unnecessary scanning
906  * o PF_SCAN_IN_DQ - This device has already been scanned; it was skipped this
907  *   time.
908  */
909 static int
910 pf_default_hdl(dev_info_t *dip, pf_impl_t *impl)
911 {
912 	pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
913 	pf_data_t *pfd_p = PCIE_DIP2PFD(dip);
914 	int cb_sts, scan_flag = PF_SCAN_SUCCESS;
915 
916 	/* Make sure this device hasn't already been snapshotted and cleared */
917 	if (pfd_p->pe_valid == B_TRUE) {
918 		scan_flag |= PF_SCAN_IN_DQ;
919 		goto done;
920 	}
921 
922 	/*
923 	 * If this is a device used for PCI passthrough into a virtual machine,
924 	 * don't let any error it caused panic the system.
925 	 */
926 	if (bus_p->bus_fm_flags & PF_FM_IS_PASSTHRU)
927 		pfd_p->pe_severity_mask |= PF_ERR_PANIC;
928 
929 	/*
930 	 * Read vendor/device ID and check with cached data; if it doesn't
931 	 * match, it could very well mean that the device is no longer
932 	 * responding.  In this case, we return PF_SCAN_BAD_RESPONSE; should
933 	 * the caller choose to panic in this case, we will have the basic
934 	 * info in the error queue for the purposes of postmortem debugging.
935 	 */
936 	if (PCIE_GET(32, bus_p, PCI_CONF_VENID) != bus_p->bus_dev_ven_id) {
937 		char buf[FM_MAX_CLASS];
938 
939 		(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
940 		    PCI_ERROR_SUBCLASS, PCI_NR);
941 		ddi_fm_ereport_post(dip, buf, fm_ena_generate(0, FM_ENA_FMT1),
942 		    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0, NULL);
943 
944 		/*
945 		 * For IOV/Hotplug purposes skip gathering info for this device,
946 		 * but populate affected info and severity.  Clear out any data
947 		 * that maybe been saved in the last fabric scan.
948 		 */
949 		pf_reset_pfd(pfd_p);
950 		pfd_p->pe_severity_flags = PF_ERR_BAD_RESPONSE;
951 		PFD_AFFECTED_DEV(pfd_p)->pe_affected_flags = PF_AFFECTED_SELF;
952 
953 		/* Add the snapshot to the error q */
954 		pf_en_dq(pfd_p, impl);
955 		pfd_p->pe_valid = B_TRUE;
956 
957 		return (PF_SCAN_BAD_RESPONSE);
958 	}
959 
960 	pf_pci_regs_gather(pfd_p, bus_p);
961 	pf_pci_regs_clear(pfd_p, bus_p);
962 
963 	if (PCIE_IS_RP(bus_p))
964 		pf_pci_find_rp_fault(pfd_p, bus_p);
965 
966 	cb_sts = pf_fm_callback(dip, impl->pf_derr);
967 
968 	if (cb_sts == DDI_FM_FATAL || cb_sts == DDI_FM_UNKNOWN)
969 		scan_flag |= PF_SCAN_CB_FAILURE;
970 
971 	/* Add the snapshot to the error q */
972 	pf_en_dq(pfd_p, impl);
973 
974 done:
975 	/*
976 	 * If a bridge does not have any error no need to scan any further down.
977 	 * For PCIe devices, check the PCIe device status and PCI secondary
978 	 * status.
979 	 * - Some non-compliant PCIe devices do not utilize PCIe
980 	 *   error registers.  If so rely on legacy PCI error registers.
981 	 * For PCI devices, check the PCI secondary status.
982 	 */
983 	if (PCIE_IS_PCIE_BDG(bus_p) &&
984 	    !(PCIE_ERR_REG(pfd_p)->pcie_err_status & PF_PCIE_BDG_ERR) &&
985 	    !(PCI_BDG_ERR_REG(pfd_p)->pci_bdg_sec_stat & PF_PCI_BDG_ERR))
986 		scan_flag |= PF_SCAN_NO_ERR_IN_CHILD;
987 
988 	if (PCIE_IS_PCI_BDG(bus_p) &&
989 	    !(PCI_BDG_ERR_REG(pfd_p)->pci_bdg_sec_stat & PF_PCI_BDG_ERR))
990 		scan_flag |= PF_SCAN_NO_ERR_IN_CHILD;
991 
992 	pfd_p->pe_valid = B_TRUE;
993 	return (scan_flag);
994 }
995 
996 /*
997  * Set the passthru flag on a device bus_p. Called by passthru drivers to
998  * indicate when a device is or is no longer under passthru control.
999  */
1000 void
1001 pf_set_passthru(dev_info_t *dip, boolean_t is_passthru)
1002 {
1003 	pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
1004 
1005 	if (is_passthru) {
1006 		atomic_or_uint(&bus_p->bus_fm_flags, PF_FM_IS_PASSTHRU);
1007 	} else {
1008 		atomic_and_uint(&bus_p->bus_fm_flags, ~PF_FM_IS_PASSTHRU);
1009 	}
1010 }
1011 
1012 /*
1013  * Called during postattach to initialize a device's error handling
1014  * capabilities.  If the devices has already been hardened, then there isn't
1015  * much needed.  Otherwise initialize the device's default FMA capabilities.
1016  *
1017  * In a future project where PCIe support is removed from pcifm, several
1018  * "properties" that are setup in ddi_fm_init and pci_ereport_setup need to be
1019  * created here so that the PCI/PCIe eversholt rules will work properly.
1020  */
1021 void
1022 pf_init(dev_info_t *dip, ddi_iblock_cookie_t ibc, ddi_attach_cmd_t cmd)
1023 {
1024 	pcie_bus_t		*bus_p = PCIE_DIP2BUS(dip);
1025 	struct i_ddi_fmhdl	*fmhdl = DEVI(dip)->devi_fmhdl;
1026 	boolean_t		need_cb_register = B_FALSE;
1027 
1028 	if (!bus_p) {
1029 		cmn_err(CE_WARN, "devi_bus information is not set for %s%d.\n",
1030 		    ddi_driver_name(dip), ddi_get_instance(dip));
1031 		return;
1032 	}
1033 
1034 	if (fmhdl) {
1035 		/*
1036 		 * If device is only ereport capable and not callback capable
1037 		 * make it callback capable. The only downside is that the
1038 		 * "fm-errcb-capable" property is not created for this device
1039 		 * which should be ok since it's not used anywhere.
1040 		 */
1041 		if (!(fmhdl->fh_cap & DDI_FM_ERRCB_CAPABLE))
1042 			need_cb_register = B_TRUE;
1043 	} else {
1044 		int cap;
1045 		/*
1046 		 * fm-capable in driver.conf can be used to set fm_capabilities.
1047 		 * If fm-capable is not defined, set the default
1048 		 * DDI_FM_EREPORT_CAPABLE and DDI_FM_ERRCB_CAPABLE.
1049 		 */
1050 		cap = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1051 		    DDI_PROP_DONTPASS, "fm-capable",
1052 		    DDI_FM_EREPORT_CAPABLE | DDI_FM_ERRCB_CAPABLE);
1053 		cap &= (DDI_FM_EREPORT_CAPABLE | DDI_FM_ERRCB_CAPABLE);
1054 
1055 		atomic_or_uint(&bus_p->bus_fm_flags, PF_FM_IS_NH);
1056 
1057 		if (cmd == DDI_ATTACH) {
1058 			ddi_fm_init(dip, &cap, &ibc);
1059 			pci_ereport_setup(dip);
1060 		}
1061 
1062 		if (cap & DDI_FM_ERRCB_CAPABLE)
1063 			need_cb_register = B_TRUE;
1064 
1065 		fmhdl = DEVI(dip)->devi_fmhdl;
1066 	}
1067 
1068 	/* If ddi_fm_init fails for any reason RETURN */
1069 	if (!fmhdl) {
1070 		(void) atomic_swap_uint(&bus_p->bus_fm_flags, 0);
1071 		return;
1072 	}
1073 
1074 	fmhdl->fh_cap |=  DDI_FM_ERRCB_CAPABLE;
1075 	if (cmd == DDI_ATTACH) {
1076 		if (need_cb_register)
1077 			ddi_fm_handler_register(dip, pf_dummy_cb, NULL);
1078 	}
1079 
1080 	atomic_or_uint(&bus_p->bus_fm_flags, PF_FM_READY);
1081 }
1082 
1083 /* undo FMA lock, called at predetach */
1084 void
1085 pf_fini(dev_info_t *dip, ddi_detach_cmd_t cmd)
1086 {
1087 	pcie_bus_t	*bus_p = PCIE_DIP2BUS(dip);
1088 
1089 	if (!bus_p)
1090 		return;
1091 
1092 	/* Don't fini anything if device isn't FM Ready */
1093 	if (!(bus_p->bus_fm_flags & PF_FM_READY))
1094 		return;
1095 
1096 	/* no other code should set the flag to false */
1097 	atomic_and_uint(&bus_p->bus_fm_flags, ~PF_FM_READY);
1098 
1099 	/*
1100 	 * Grab the mutex to make sure device isn't in the middle of
1101 	 * error handling.  Setting the bus_fm_flag to ~PF_FM_READY
1102 	 * should prevent this device from being error handled after
1103 	 * the mutex has been released.
1104 	 */
1105 	(void) pf_handler_enter(dip, NULL);
1106 	pf_handler_exit(dip);
1107 
1108 	/* undo non-hardened drivers */
1109 	if (bus_p->bus_fm_flags & PF_FM_IS_NH) {
1110 		if (cmd == DDI_DETACH) {
1111 			atomic_and_uint(&bus_p->bus_fm_flags, ~PF_FM_IS_NH);
1112 			pci_ereport_teardown(dip);
1113 			/*
1114 			 * ddi_fini itself calls ddi_handler_unregister,
1115 			 * so no need to explicitly call unregister.
1116 			 */
1117 			ddi_fm_fini(dip);
1118 		}
1119 	}
1120 }
1121 
1122 /*ARGSUSED*/
1123 static int
1124 pf_dummy_cb(dev_info_t *dip, ddi_fm_error_t *derr, const void *not_used)
1125 {
1126 	return (DDI_FM_OK);
1127 }
1128 
1129 /*
1130  * Add PFD to queue.  If it is an RC add it to the beginning,
1131  * otherwise add it to the end.
1132  */
1133 static void
1134 pf_en_dq(pf_data_t *pfd_p, pf_impl_t *impl)
1135 {
1136 	pf_data_t *head_p = impl->pf_dq_head_p;
1137 	pf_data_t *tail_p = impl->pf_dq_tail_p;
1138 
1139 	impl->pf_total++;
1140 
1141 	if (!head_p) {
1142 		ASSERT(PFD_IS_ROOT(pfd_p));
1143 		impl->pf_dq_head_p = pfd_p;
1144 		impl->pf_dq_tail_p = pfd_p;
1145 		pfd_p->pe_prev = NULL;
1146 		pfd_p->pe_next = NULL;
1147 		return;
1148 	}
1149 
1150 	/* Check if this is a Root Port eprt */
1151 	if (PFD_IS_ROOT(pfd_p)) {
1152 		pf_data_t *root_p, *last_p = NULL;
1153 
1154 		/* The first item must be a RP */
1155 		root_p = head_p;
1156 		for (last_p = head_p; last_p && PFD_IS_ROOT(last_p);
1157 		    last_p = last_p->pe_next)
1158 			root_p = last_p;
1159 
1160 		/* root_p is the last RP pfd. last_p is the first non-RP pfd. */
1161 		root_p->pe_next = pfd_p;
1162 		pfd_p->pe_prev = root_p;
1163 		pfd_p->pe_next = last_p;
1164 
1165 		if (last_p)
1166 			last_p->pe_prev = pfd_p;
1167 		else
1168 			tail_p = pfd_p;
1169 	} else {
1170 		tail_p->pe_next = pfd_p;
1171 		pfd_p->pe_prev = tail_p;
1172 		pfd_p->pe_next = NULL;
1173 		tail_p = pfd_p;
1174 	}
1175 
1176 	impl->pf_dq_head_p = head_p;
1177 	impl->pf_dq_tail_p = tail_p;
1178 }
1179 
1180 /*
1181  * Ignore:
1182  * - TRAINING: as leaves do not have children
1183  * - SD: as leaves do not have children
1184  */
1185 const pf_fab_err_tbl_t pcie_pcie_tbl[] = {
1186 	{PCIE_AER_UCE_DLP,	pf_panic,
1187 	    PF_AFFECTED_PARENT, 0},
1188 
1189 	{PCIE_AER_UCE_PTLP,	pf_analyse_ptlp,
1190 	    PF_AFFECTED_SELF, 0},
1191 
1192 	{PCIE_AER_UCE_FCP,	pf_panic,
1193 	    PF_AFFECTED_PARENT, 0},
1194 
1195 	{PCIE_AER_UCE_TO,	pf_analyse_to,
1196 	    PF_AFFECTED_SELF, 0},
1197 
1198 	{PCIE_AER_UCE_CA,	pf_analyse_ca_ur,
1199 	    PF_AFFECTED_SELF, 0},
1200 
1201 	{PCIE_AER_UCE_UC,	pf_analyse_uc,
1202 	    0, 0},
1203 
1204 	{PCIE_AER_UCE_RO,	pf_panic,
1205 	    PF_AFFECTED_PARENT, 0},
1206 
1207 	{PCIE_AER_UCE_MTLP,	pf_panic,
1208 	    PF_AFFECTED_PARENT, 0},
1209 
1210 	{PCIE_AER_UCE_ECRC,	pf_panic,
1211 	    PF_AFFECTED_SELF, 0},
1212 
1213 	{PCIE_AER_UCE_UR,	pf_analyse_ca_ur,
1214 	    PF_AFFECTED_SELF, 0},
1215 
1216 	{0, NULL, 0, 0}
1217 };
1218 
1219 const pf_fab_err_tbl_t pcie_rp_tbl[] = {
1220 	{PCIE_AER_UCE_TRAINING,	pf_no_panic,
1221 	    PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN, 0},
1222 
1223 	{PCIE_AER_UCE_DLP,	pf_panic,
1224 	    PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN, 0},
1225 
1226 	{PCIE_AER_UCE_SD,	pf_no_panic,
1227 	    PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN, 0},
1228 
1229 	{PCIE_AER_UCE_PTLP,	pf_analyse_ptlp,
1230 	    PF_AFFECTED_AER, PF_AFFECTED_CHILDREN},
1231 
1232 	{PCIE_AER_UCE_FCP,	pf_panic,
1233 	    PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN, 0},
1234 
1235 	{PCIE_AER_UCE_TO,	pf_panic,
1236 	    PF_AFFECTED_ADDR, PF_AFFECTED_CHILDREN},
1237 
1238 	{PCIE_AER_UCE_CA,	pf_no_panic,
1239 	    PF_AFFECTED_AER, PF_AFFECTED_CHILDREN},
1240 
1241 	{PCIE_AER_UCE_UC,	pf_analyse_uc,
1242 	    0, 0},
1243 
1244 	{PCIE_AER_UCE_RO,	pf_panic,
1245 	    PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN, 0},
1246 
1247 	{PCIE_AER_UCE_MTLP,	pf_panic,
1248 	    PF_AFFECTED_SELF | PF_AFFECTED_AER,
1249 	    PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN},
1250 
1251 	{PCIE_AER_UCE_ECRC,	pf_panic,
1252 	    PF_AFFECTED_AER, PF_AFFECTED_CHILDREN},
1253 
1254 	{PCIE_AER_UCE_UR,	pf_no_panic,
1255 	    PF_AFFECTED_AER, PF_AFFECTED_CHILDREN},
1256 
1257 	{0, NULL, 0, 0}
1258 };
1259 
1260 const pf_fab_err_tbl_t pcie_sw_tbl[] = {
1261 	{PCIE_AER_UCE_TRAINING,	pf_no_panic,
1262 	    PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN, 0},
1263 
1264 	{PCIE_AER_UCE_DLP,	pf_panic,
1265 	    PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN, 0},
1266 
1267 	{PCIE_AER_UCE_SD,	pf_no_panic,
1268 	    PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN, 0},
1269 
1270 	{PCIE_AER_UCE_PTLP,	pf_analyse_ptlp,
1271 	    PF_AFFECTED_AER, PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN},
1272 
1273 	{PCIE_AER_UCE_FCP,	pf_panic,
1274 	    PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN, 0},
1275 
1276 	{PCIE_AER_UCE_TO,	pf_analyse_to,
1277 	    PF_AFFECTED_CHILDREN, 0},
1278 
1279 	{PCIE_AER_UCE_CA,	pf_analyse_ca_ur,
1280 	    PF_AFFECTED_AER, PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN},
1281 
1282 	{PCIE_AER_UCE_UC,	pf_analyse_uc,
1283 	    0, 0},
1284 
1285 	{PCIE_AER_UCE_RO,	pf_panic,
1286 	    PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN, 0},
1287 
1288 	{PCIE_AER_UCE_MTLP,	pf_panic,
1289 	    PF_AFFECTED_SELF | PF_AFFECTED_AER,
1290 	    PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN},
1291 
1292 	{PCIE_AER_UCE_ECRC,	pf_panic,
1293 	    PF_AFFECTED_AER, PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN},
1294 
1295 	{PCIE_AER_UCE_UR,	pf_analyse_ca_ur,
1296 	    PF_AFFECTED_AER, PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN},
1297 
1298 	{0, NULL, 0, 0}
1299 };
1300 
1301 const pf_fab_err_tbl_t pcie_pcie_bdg_tbl[] = {
1302 	{PCIE_AER_SUCE_TA_ON_SC,	pf_analyse_sc,
1303 	    0, 0},
1304 
1305 	{PCIE_AER_SUCE_MA_ON_SC,	pf_analyse_sc,
1306 	    0, 0},
1307 
1308 	{PCIE_AER_SUCE_RCVD_TA,		pf_analyse_ma_ta,
1309 	    0, 0},
1310 
1311 	{PCIE_AER_SUCE_RCVD_MA,		pf_analyse_ma_ta,
1312 	    0, 0},
1313 
1314 	{PCIE_AER_SUCE_USC_ERR,		pf_panic,
1315 	    PF_AFFECTED_SAER, PF_AFFECTED_CHILDREN},
1316 
1317 	{PCIE_AER_SUCE_USC_MSG_DATA_ERR, pf_analyse_ma_ta,
1318 	    PF_AFFECTED_SAER, PF_AFFECTED_CHILDREN},
1319 
1320 	{PCIE_AER_SUCE_UC_DATA_ERR,	pf_analyse_uc_data,
1321 	    PF_AFFECTED_SAER, PF_AFFECTED_CHILDREN},
1322 
1323 	{PCIE_AER_SUCE_UC_ATTR_ERR,	pf_panic,
1324 	    PF_AFFECTED_CHILDREN, 0},
1325 
1326 	{PCIE_AER_SUCE_UC_ADDR_ERR,	pf_panic,
1327 	    PF_AFFECTED_CHILDREN, 0},
1328 
1329 	{PCIE_AER_SUCE_TIMER_EXPIRED,	pf_panic,
1330 	    PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN, 0},
1331 
1332 	{PCIE_AER_SUCE_PERR_ASSERT,	pf_analyse_perr_assert,
1333 	    0, 0},
1334 
1335 	{PCIE_AER_SUCE_SERR_ASSERT,	pf_no_panic,
1336 	    0, 0},
1337 
1338 	{PCIE_AER_SUCE_INTERNAL_ERR,	pf_panic,
1339 	    PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN, 0},
1340 
1341 	{0, NULL, 0, 0}
1342 };
1343 
1344 const pf_fab_err_tbl_t pcie_pci_bdg_tbl[] = {
1345 	{PCI_STAT_PERROR,	pf_analyse_pci,
1346 	    PF_AFFECTED_SELF, 0},
1347 
1348 	{PCI_STAT_S_PERROR,	pf_analyse_pci,
1349 	    PF_AFFECTED_SELF, 0},
1350 
1351 	{PCI_STAT_S_SYSERR,	pf_panic,
1352 	    PF_AFFECTED_SELF, 0},
1353 
1354 	{PCI_STAT_R_MAST_AB,	pf_analyse_pci,
1355 	    PF_AFFECTED_SELF, 0},
1356 
1357 	{PCI_STAT_R_TARG_AB,	pf_analyse_pci,
1358 	    PF_AFFECTED_SELF, 0},
1359 
1360 	{PCI_STAT_S_TARG_AB,	pf_analyse_pci,
1361 	    PF_AFFECTED_SELF, 0},
1362 
1363 	{0, NULL, 0, 0}
1364 };
1365 
1366 const pf_fab_err_tbl_t pcie_pci_tbl[] = {
1367 	{PCI_STAT_PERROR,	pf_analyse_pci,
1368 	    PF_AFFECTED_SELF, 0},
1369 
1370 	{PCI_STAT_S_PERROR,	pf_analyse_pci,
1371 	    PF_AFFECTED_SELF, 0},
1372 
1373 	{PCI_STAT_S_SYSERR,	pf_panic,
1374 	    PF_AFFECTED_SELF, 0},
1375 
1376 	{PCI_STAT_R_MAST_AB,	pf_analyse_pci,
1377 	    PF_AFFECTED_SELF, 0},
1378 
1379 	{PCI_STAT_R_TARG_AB,	pf_analyse_pci,
1380 	    PF_AFFECTED_SELF, 0},
1381 
1382 	{PCI_STAT_S_TARG_AB,	pf_analyse_pci,
1383 	    PF_AFFECTED_SELF, 0},
1384 
1385 	{0, NULL, 0, 0}
1386 };
1387 
1388 #define	PF_MASKED_AER_ERR(pfd_p) \
1389 	(PCIE_ADV_REG(pfd_p)->pcie_ue_status & \
1390 	    ((PCIE_ADV_REG(pfd_p)->pcie_ue_mask) ^ 0xFFFFFFFF))
1391 #define	PF_MASKED_SAER_ERR(pfd_p) \
1392 	(PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_status & \
1393 	    ((PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_mask) ^ 0xFFFFFFFF))
1394 /*
1395  * Analyse all the PCIe Fault Data (erpt) gathered during dispatch in the erpt
1396  * Queue.
1397  */
1398 static int
1399 pf_analyse_error(ddi_fm_error_t *derr, pf_impl_t *impl)
1400 {
1401 	int		sts_flags, error_flags = 0;
1402 	pf_data_t	*pfd_p;
1403 
1404 	for (pfd_p = impl->pf_dq_head_p; pfd_p; pfd_p = pfd_p->pe_next) {
1405 		sts_flags = 0;
1406 
1407 		/* skip analysing error when no error info is gathered */
1408 		if (pfd_p->pe_severity_flags == PF_ERR_BAD_RESPONSE)
1409 			goto done;
1410 
1411 		switch (PCIE_PFD2BUS(pfd_p)->bus_dev_type) {
1412 		case PCIE_PCIECAP_DEV_TYPE_PCIE_DEV:
1413 		case PCIE_PCIECAP_DEV_TYPE_PCI_DEV:
1414 			if (PCIE_DEVSTS_CE_DETECTED &
1415 			    PCIE_ERR_REG(pfd_p)->pcie_err_status)
1416 				sts_flags |= PF_ERR_CE;
1417 
1418 			pf_adjust_for_no_aer(pfd_p);
1419 			sts_flags |= pf_analyse_error_tbl(derr, impl,
1420 			    pfd_p, pcie_pcie_tbl, PF_MASKED_AER_ERR(pfd_p));
1421 			break;
1422 		case PCIE_PCIECAP_DEV_TYPE_ROOT:
1423 			pf_adjust_for_no_aer(pfd_p);
1424 			sts_flags |= pf_analyse_error_tbl(derr, impl,
1425 			    pfd_p, pcie_rp_tbl, PF_MASKED_AER_ERR(pfd_p));
1426 			break;
1427 		case PCIE_PCIECAP_DEV_TYPE_RC_PSEUDO:
1428 			/* no adjust_for_aer for pseudo RC */
1429 			/* keep the severity passed on from RC if any */
1430 			sts_flags |= pfd_p->pe_severity_flags;
1431 			sts_flags |= pf_analyse_error_tbl(derr, impl, pfd_p,
1432 			    pcie_rp_tbl, PF_MASKED_AER_ERR(pfd_p));
1433 			break;
1434 		case PCIE_PCIECAP_DEV_TYPE_UP:
1435 		case PCIE_PCIECAP_DEV_TYPE_DOWN:
1436 			if (PCIE_DEVSTS_CE_DETECTED &
1437 			    PCIE_ERR_REG(pfd_p)->pcie_err_status)
1438 				sts_flags |= PF_ERR_CE;
1439 
1440 			pf_adjust_for_no_aer(pfd_p);
1441 			sts_flags |= pf_analyse_error_tbl(derr, impl,
1442 			    pfd_p, pcie_sw_tbl, PF_MASKED_AER_ERR(pfd_p));
1443 			break;
1444 		case PCIE_PCIECAP_DEV_TYPE_PCIE2PCI:
1445 			if (PCIE_DEVSTS_CE_DETECTED &
1446 			    PCIE_ERR_REG(pfd_p)->pcie_err_status)
1447 				sts_flags |= PF_ERR_CE;
1448 
1449 			pf_adjust_for_no_aer(pfd_p);
1450 			pf_adjust_for_no_saer(pfd_p);
1451 			sts_flags |= pf_analyse_error_tbl(derr,
1452 			    impl, pfd_p, pcie_pcie_tbl,
1453 			    PF_MASKED_AER_ERR(pfd_p));
1454 			sts_flags |= pf_analyse_error_tbl(derr,
1455 			    impl, pfd_p, pcie_pcie_bdg_tbl,
1456 			    PF_MASKED_SAER_ERR(pfd_p));
1457 			/*
1458 			 * Some non-compliant PCIe devices do not utilize PCIe
1459 			 * error registers.  So fallthrough and rely on legacy
1460 			 * PCI error registers.
1461 			 */
1462 			if ((PCIE_DEVSTS_NFE_DETECTED | PCIE_DEVSTS_FE_DETECTED)
1463 			    & PCIE_ERR_REG(pfd_p)->pcie_err_status)
1464 				break;
1465 			/* FALLTHROUGH */
1466 		case PCIE_PCIECAP_DEV_TYPE_PCI_PSEUDO:
1467 			sts_flags |= pf_analyse_error_tbl(derr, impl,
1468 			    pfd_p, pcie_pci_tbl,
1469 			    PCI_ERR_REG(pfd_p)->pci_err_status);
1470 
1471 			if (!PCIE_IS_BDG(PCIE_PFD2BUS(pfd_p)))
1472 				break;
1473 
1474 			sts_flags |= pf_analyse_error_tbl(derr,
1475 			    impl, pfd_p, pcie_pci_bdg_tbl,
1476 			    PCI_BDG_ERR_REG(pfd_p)->pci_bdg_sec_stat);
1477 		}
1478 
1479 		pfd_p->pe_severity_flags = sts_flags;
1480 
1481 done:
1482 		pfd_p->pe_orig_severity_flags = pfd_p->pe_severity_flags;
1483 		/* Have pciev_eh adjust the severity */
1484 		pfd_p->pe_severity_flags = pciev_eh(pfd_p, impl);
1485 
1486 		pfd_p->pe_severity_flags &= ~pfd_p->pe_severity_mask;
1487 
1488 		error_flags |= pfd_p->pe_severity_flags;
1489 	}
1490 
1491 	return (error_flags);
1492 }
1493 
1494 static int
1495 pf_analyse_error_tbl(ddi_fm_error_t *derr, pf_impl_t *impl,
1496     pf_data_t *pfd_p, const pf_fab_err_tbl_t *tbl, uint32_t err_reg)
1497 {
1498 	const pf_fab_err_tbl_t *row;
1499 	int err = 0;
1500 	uint16_t flags;
1501 	uint32_t bit;
1502 
1503 	for (row = tbl; err_reg && (row->bit != 0); row++) {
1504 		bit = row->bit;
1505 		if (!(err_reg & bit))
1506 			continue;
1507 		err |= row->handler(derr, bit, impl->pf_dq_head_p, pfd_p);
1508 
1509 		flags = row->affected_flags;
1510 		/*
1511 		 * check if the primary flag is valid;
1512 		 * if not, use the secondary flag
1513 		 */
1514 		if (flags & PF_AFFECTED_AER) {
1515 			if (!HAS_AER_LOGS(pfd_p, bit)) {
1516 				flags = row->sec_affected_flags;
1517 			}
1518 		} else if (flags & PF_AFFECTED_SAER) {
1519 			if (!HAS_SAER_LOGS(pfd_p, bit)) {
1520 				flags = row->sec_affected_flags;
1521 			}
1522 		} else if (flags & PF_AFFECTED_ADDR) {
1523 			/* only Root has this flag */
1524 			if (PCIE_ROOT_FAULT(pfd_p)->scan_addr == 0) {
1525 				flags = row->sec_affected_flags;
1526 			}
1527 		}
1528 
1529 		PFD_AFFECTED_DEV(pfd_p)->pe_affected_flags |= flags;
1530 	}
1531 
1532 	if (!err)
1533 		err = PF_ERR_NO_ERROR;
1534 
1535 	return (err);
1536 }
1537 
1538 /*
1539  * PCIe Completer Abort and Unsupport Request error analyser.  If a PCIe device
1540  * issues a CA/UR a corresponding Received CA/UR should have been seen in the
1541  * PCIe root complex.  Check to see if RC did indeed receive a CA/UR, if so then
1542  * this error may be safely ignored.  If not check the logs and see if an
1543  * associated handler for this transaction can be found.
1544  */
1545 /* ARGSUSED */
1546 static int
1547 pf_analyse_ca_ur(ddi_fm_error_t *derr, uint32_t bit, pf_data_t *dq_head_p,
1548     pf_data_t *pfd_p)
1549 {
1550 	uint32_t	abort_type;
1551 	dev_info_t	*rpdip = PCIE_PFD2BUS(pfd_p)->bus_rp_dip;
1552 
1553 	/* If UR's are masked forgive this error */
1554 	if ((pcie_get_aer_uce_mask() & PCIE_AER_UCE_UR) &&
1555 	    (bit == PCIE_AER_UCE_UR))
1556 		return (PF_ERR_NO_PANIC);
1557 
1558 	/*
1559 	 * If a RP has an CA/UR it means a leaf sent a bad request to the RP
1560 	 * such as a config read or a bad DMA address.
1561 	 */
1562 	if (PCIE_IS_RP(PCIE_PFD2BUS(pfd_p)))
1563 		goto handle_lookup;
1564 
1565 	if (bit == PCIE_AER_UCE_UR)
1566 		abort_type = PCI_STAT_R_MAST_AB;
1567 	else
1568 		abort_type = PCI_STAT_R_TARG_AB;
1569 
1570 	if (pf_matched_in_rc(dq_head_p, pfd_p, abort_type))
1571 		return (PF_ERR_MATCHED_RC);
1572 
1573 handle_lookup:
1574 	if (HAS_AER_LOGS(pfd_p, bit) &&
1575 	    pf_log_hdl_lookup(rpdip, derr, pfd_p, B_TRUE) == PF_HDL_FOUND)
1576 			return (PF_ERR_MATCHED_DEVICE);
1577 
1578 	return (PF_ERR_PANIC);
1579 }
1580 
1581 /*
1582  * PCIe-PCI Bridge Received Master Abort and Target error analyser.  If a PCIe
1583  * Bridge receives a MA/TA a corresponding sent CA/UR should have been seen in
1584  * the PCIe root complex.  Check to see if RC did indeed receive a CA/UR, if so
1585  * then this error may be safely ignored.  If not check the logs and see if an
1586  * associated handler for this transaction can be found.
1587  */
1588 /* ARGSUSED */
1589 static int
1590 pf_analyse_ma_ta(ddi_fm_error_t *derr, uint32_t bit, pf_data_t *dq_head_p,
1591     pf_data_t *pfd_p)
1592 {
1593 	dev_info_t	*rpdip = PCIE_PFD2BUS(pfd_p)->bus_rp_dip;
1594 	uint32_t	abort_type;
1595 
1596 	/* If UR's are masked forgive this error */
1597 	if ((pcie_get_aer_uce_mask() & PCIE_AER_UCE_UR) &&
1598 	    (bit == PCIE_AER_SUCE_RCVD_MA))
1599 		return (PF_ERR_NO_PANIC);
1600 
1601 	if (bit == PCIE_AER_SUCE_RCVD_MA)
1602 		abort_type = PCI_STAT_R_MAST_AB;
1603 	else
1604 		abort_type = PCI_STAT_R_TARG_AB;
1605 
1606 	if (pf_matched_in_rc(dq_head_p, pfd_p, abort_type))
1607 		return (PF_ERR_MATCHED_RC);
1608 
1609 	if (!HAS_SAER_LOGS(pfd_p, bit))
1610 		return (PF_ERR_PANIC);
1611 
1612 	if (pf_log_hdl_lookup(rpdip, derr, pfd_p, B_FALSE) == PF_HDL_FOUND)
1613 		return (PF_ERR_MATCHED_DEVICE);
1614 
1615 	return (PF_ERR_PANIC);
1616 }
1617 
1618 /*
1619  * Generic PCI error analyser.  This function is used for Parity Errors,
1620  * Received Master Aborts, Received Target Aborts, and Signaled Target Aborts.
1621  * In general PCI devices do not have error logs, it is very difficult to figure
1622  * out what transaction caused the error.  Instead find the nearest PCIe-PCI
1623  * Bridge and check to see if it has logs and if it has an error associated with
1624  * this PCI Device.
1625  */
1626 /* ARGSUSED */
1627 static int
1628 pf_analyse_pci(ddi_fm_error_t *derr, uint32_t bit, pf_data_t *dq_head_p,
1629     pf_data_t *pfd_p)
1630 {
1631 	pf_data_t	*parent_pfd_p;
1632 	uint16_t	cmd;
1633 	uint32_t	aer_ue_status;
1634 	pcie_bus_t	*bus_p = PCIE_PFD2BUS(pfd_p);
1635 	pf_pcie_adv_bdg_err_regs_t *parent_saer_p;
1636 
1637 	if (PCI_ERR_REG(pfd_p)->pci_err_status & PCI_STAT_S_SYSERR)
1638 		return (PF_ERR_PANIC);
1639 
1640 	/* If UR's are masked forgive this error */
1641 	if ((pcie_get_aer_uce_mask() & PCIE_AER_UCE_UR) &&
1642 	    (bit == PCI_STAT_R_MAST_AB))
1643 		return (PF_ERR_NO_PANIC);
1644 
1645 
1646 	if (bit & (PCI_STAT_PERROR | PCI_STAT_S_PERROR)) {
1647 		aer_ue_status = PCIE_AER_SUCE_PERR_ASSERT;
1648 	} else {
1649 		aer_ue_status = (PCIE_AER_SUCE_TA_ON_SC |
1650 		    PCIE_AER_SUCE_MA_ON_SC | PCIE_AER_SUCE_RCVD_TA |
1651 		    PCIE_AER_SUCE_RCVD_MA);
1652 	}
1653 
1654 	parent_pfd_p = pf_get_parent_pcie_bridge(pfd_p);
1655 	if (parent_pfd_p == NULL)
1656 		return (PF_ERR_PANIC);
1657 
1658 	/* Check if parent bridge has seen this error */
1659 	parent_saer_p = PCIE_ADV_BDG_REG(parent_pfd_p);
1660 	if (!(parent_saer_p->pcie_sue_status & aer_ue_status) ||
1661 	    !HAS_SAER_LOGS(parent_pfd_p, aer_ue_status))
1662 		return (PF_ERR_PANIC);
1663 
1664 	/*
1665 	 * If the addr or bdf from the parent PCIe bridge logs belong to this
1666 	 * PCI device, assume the PCIe bridge's error handling has already taken
1667 	 * care of this PCI device's error.
1668 	 */
1669 	if (pf_pci_decode(parent_pfd_p, &cmd) != DDI_SUCCESS)
1670 		return (PF_ERR_PANIC);
1671 
1672 	if ((parent_saer_p->pcie_sue_tgt_bdf == bus_p->bus_bdf) ||
1673 	    pf_in_addr_range(bus_p, parent_saer_p->pcie_sue_tgt_addr))
1674 		return (PF_ERR_MATCHED_PARENT);
1675 
1676 	/*
1677 	 * If this device is a PCI-PCI bridge, check if the bdf in the parent
1678 	 * PCIe bridge logs is in the range of this PCI-PCI Bridge's bus ranges.
1679 	 * If they are, then assume the PCIe bridge's error handling has already
1680 	 * taken care of this PCI-PCI bridge device's error.
1681 	 */
1682 	if (PCIE_IS_BDG(bus_p) &&
1683 	    pf_in_bus_range(bus_p, parent_saer_p->pcie_sue_tgt_bdf))
1684 		return (PF_ERR_MATCHED_PARENT);
1685 
1686 	return (PF_ERR_PANIC);
1687 }
1688 
1689 /*
1690  * PCIe Bridge transactions associated with PERR.
1691  * o Bridge received a poisoned Non-Posted Write (CFG Writes) from PCIe
1692  * o Bridge received a poisoned Posted Write from (MEM Writes) from PCIe
1693  * o Bridge received a poisoned Completion on a Split Transction from PCIe
1694  * o Bridge received a poisoned Completion on a Delayed Transction from PCIe
1695  *
1696  * Check for non-poisoned PCIe transactions that got forwarded to the secondary
1697  * side and detects a PERR#.  Except for delayed read completions, a poisoned
1698  * TLP will be forwarded to the secondary bus and PERR# will be asserted.
1699  */
1700 /* ARGSUSED */
1701 static int
1702 pf_analyse_perr_assert(ddi_fm_error_t *derr, uint32_t bit, pf_data_t *dq_head_p,
1703     pf_data_t *pfd_p)
1704 {
1705 	dev_info_t	*rpdip = PCIE_PFD2BUS(pfd_p)->bus_rp_dip;
1706 	uint16_t	cmd;
1707 	int		hdl_sts = PF_HDL_NOTFOUND;
1708 	int		err = PF_ERR_NO_ERROR;
1709 	pf_pcie_adv_bdg_err_regs_t *saer_p;
1710 
1711 
1712 	if (HAS_SAER_LOGS(pfd_p, bit)) {
1713 		saer_p = PCIE_ADV_BDG_REG(pfd_p);
1714 		if (pf_pci_decode(pfd_p, &cmd) != DDI_SUCCESS)
1715 			return (PF_ERR_PANIC);
1716 
1717 cmd_switch:
1718 		switch (cmd) {
1719 		case PCI_PCIX_CMD_IOWR:
1720 		case PCI_PCIX_CMD_MEMWR:
1721 		case PCI_PCIX_CMD_MEMWR_BL:
1722 		case PCI_PCIX_CMD_MEMWRBL:
1723 			/* Posted Writes Transactions */
1724 			if (saer_p->pcie_sue_tgt_trans == PF_ADDR_PIO)
1725 				hdl_sts = pf_log_hdl_lookup(rpdip, derr, pfd_p,
1726 				    B_FALSE);
1727 			break;
1728 		case PCI_PCIX_CMD_CFWR:
1729 			/*
1730 			 * Check to see if it is a non-posted write.  If so, a
1731 			 * UR Completion would have been sent.
1732 			 */
1733 			if (pf_matched_in_rc(dq_head_p, pfd_p,
1734 			    PCI_STAT_R_MAST_AB)) {
1735 				hdl_sts = PF_HDL_FOUND;
1736 				err = PF_ERR_MATCHED_RC;
1737 				goto done;
1738 			}
1739 			hdl_sts = pf_log_hdl_lookup(rpdip, derr, pfd_p,
1740 			    B_FALSE);
1741 			break;
1742 		case PCI_PCIX_CMD_SPL:
1743 			hdl_sts = pf_log_hdl_lookup(rpdip, derr, pfd_p,
1744 			    B_FALSE);
1745 			break;
1746 		case PCI_PCIX_CMD_DADR:
1747 			cmd = (PCIE_ADV_BDG_HDR(pfd_p, 1) >>
1748 			    PCIE_AER_SUCE_HDR_CMD_UP_SHIFT) &
1749 			    PCIE_AER_SUCE_HDR_CMD_UP_MASK;
1750 			if (cmd != PCI_PCIX_CMD_DADR)
1751 				goto cmd_switch;
1752 			/* FALLTHROUGH */
1753 		default:
1754 			/* Unexpected situation, panic */
1755 			hdl_sts = PF_HDL_NOTFOUND;
1756 		}
1757 
1758 		if (hdl_sts == PF_HDL_FOUND)
1759 			err = PF_ERR_MATCHED_DEVICE;
1760 		else
1761 			err = PF_ERR_PANIC;
1762 	} else {
1763 		/*
1764 		 * Check to see if it is a non-posted write.  If so, a UR
1765 		 * Completion would have been sent.
1766 		 */
1767 		if ((PCIE_ERR_REG(pfd_p)->pcie_err_status &
1768 		    PCIE_DEVSTS_UR_DETECTED) &&
1769 		    pf_matched_in_rc(dq_head_p, pfd_p, PCI_STAT_R_MAST_AB))
1770 			err = PF_ERR_MATCHED_RC;
1771 
1772 		/* Check for posted writes.  Transaction is lost. */
1773 		if (PCI_BDG_ERR_REG(pfd_p)->pci_bdg_sec_stat &
1774 		    PCI_STAT_S_PERROR)
1775 			err = PF_ERR_PANIC;
1776 
1777 		/*
1778 		 * All other scenarios are due to read completions.  Check for
1779 		 * PERR on the primary side.  If found the primary side error
1780 		 * handling will take care of this error.
1781 		 */
1782 		if (err == PF_ERR_NO_ERROR) {
1783 			if (PCI_ERR_REG(pfd_p)->pci_err_status &
1784 			    PCI_STAT_PERROR)
1785 				err = PF_ERR_MATCHED_PARENT;
1786 			else
1787 				err = PF_ERR_PANIC;
1788 		}
1789 	}
1790 
1791 done:
1792 	return (err);
1793 }
1794 
1795 /*
1796  * PCIe Poisoned TLP error analyser.  If a PCIe device receives a Poisoned TLP,
1797  * check the logs and see if an associated handler for this transaction can be
1798  * found.
1799  */
1800 /* ARGSUSED */
1801 static int
1802 pf_analyse_ptlp(ddi_fm_error_t *derr, uint32_t bit, pf_data_t *dq_head_p,
1803     pf_data_t *pfd_p)
1804 {
1805 	dev_info_t	*rpdip = PCIE_PFD2BUS(pfd_p)->bus_rp_dip;
1806 
1807 	/*
1808 	 * If AERs are supported find the logs in this device, otherwise look in
1809 	 * it's parent's logs.
1810 	 */
1811 	if (HAS_AER_LOGS(pfd_p, bit)) {
1812 		pcie_tlp_hdr_t *hdr = (pcie_tlp_hdr_t *)&PCIE_ADV_HDR(pfd_p, 0);
1813 
1814 		/*
1815 		 * Double check that the log contains a poisoned TLP.
1816 		 * Some devices like PLX switch do not log poison TLP headers.
1817 		 */
1818 		if (hdr->ep) {
1819 			if (pf_log_hdl_lookup(rpdip, derr, pfd_p, B_TRUE) ==
1820 			    PF_HDL_FOUND)
1821 				return (PF_ERR_MATCHED_DEVICE);
1822 		}
1823 
1824 		/*
1825 		 * If an address is found and hdl lookup failed panic.
1826 		 * Otherwise check parents to see if there was enough
1827 		 * information recover.
1828 		 */
1829 		if (PCIE_ADV_REG(pfd_p)->pcie_ue_tgt_addr)
1830 			return (PF_ERR_PANIC);
1831 	}
1832 
1833 	/*
1834 	 * Check to see if the rc has already handled this error or a parent has
1835 	 * already handled this error.
1836 	 *
1837 	 * If the error info in the RC wasn't enough to find the fault device,
1838 	 * such as if the faulting device lies behind a PCIe-PCI bridge from a
1839 	 * poisoned completion, check to see if the PCIe-PCI bridge has enough
1840 	 * info to recover.  For completion TLP's, the AER header logs only
1841 	 * contain the faulting BDF in the Root Port.  For PCIe device the fault
1842 	 * BDF is the fault device.  But if the fault device is behind a
1843 	 * PCIe-PCI bridge the fault BDF could turn out just to be a PCIe-PCI
1844 	 * bridge's secondary bus number.
1845 	 */
1846 	if (!PFD_IS_ROOT(pfd_p)) {
1847 		dev_info_t *pdip = ddi_get_parent(PCIE_PFD2DIP(pfd_p));
1848 		pf_data_t *parent_pfd_p;
1849 
1850 		if (PCIE_PFD2BUS(pfd_p)->bus_rp_dip == pdip) {
1851 			if (pf_matched_in_rc(dq_head_p, pfd_p, PCI_STAT_PERROR))
1852 				return (PF_ERR_MATCHED_RC);
1853 		}
1854 
1855 		parent_pfd_p = PCIE_DIP2PFD(pdip);
1856 
1857 		if (HAS_AER_LOGS(parent_pfd_p, bit))
1858 			return (PF_ERR_MATCHED_PARENT);
1859 	} else {
1860 		pf_data_t *bdg_pfd_p;
1861 		pcie_req_id_t secbus;
1862 
1863 		/*
1864 		 * Looking for a pcie bridge only makes sense if the BDF
1865 		 * Dev/Func = 0/0
1866 		 */
1867 		if (!PCIE_HAS_AER(PCIE_PFD2BUS(pfd_p)))
1868 			goto done;
1869 
1870 		secbus = PCIE_ADV_REG(pfd_p)->pcie_ue_tgt_bdf;
1871 
1872 		if (!PCIE_CHECK_VALID_BDF(secbus) || (secbus & 0xFF))
1873 			goto done;
1874 
1875 		bdg_pfd_p = pf_get_pcie_bridge(pfd_p, secbus);
1876 
1877 		if (bdg_pfd_p && HAS_SAER_LOGS(bdg_pfd_p,
1878 		    PCIE_AER_SUCE_PERR_ASSERT)) {
1879 			return pf_analyse_perr_assert(derr,
1880 			    PCIE_AER_SUCE_PERR_ASSERT, dq_head_p, pfd_p);
1881 		}
1882 	}
1883 done:
1884 	return (PF_ERR_PANIC);
1885 }
1886 
1887 /*
1888  * PCIe-PCI Bridge Received Master and Target abort error analyser on Split
1889  * Completions.  If a PCIe Bridge receives a MA/TA check logs and see if an
1890  * associated handler for this transaction can be found.
1891  */
1892 /* ARGSUSED */
1893 static int
1894 pf_analyse_sc(ddi_fm_error_t *derr, uint32_t bit, pf_data_t *dq_head_p,
1895     pf_data_t *pfd_p)
1896 {
1897 	dev_info_t	*rpdip = PCIE_PFD2BUS(pfd_p)->bus_rp_dip;
1898 	uint16_t	cmd;
1899 	int		sts = PF_HDL_NOTFOUND;
1900 
1901 	if (!HAS_SAER_LOGS(pfd_p, bit))
1902 		return (PF_ERR_PANIC);
1903 
1904 	if (pf_pci_decode(pfd_p, &cmd) != DDI_SUCCESS)
1905 		return (PF_ERR_PANIC);
1906 
1907 	if (cmd == PCI_PCIX_CMD_SPL)
1908 		sts = pf_log_hdl_lookup(rpdip, derr, pfd_p, B_FALSE);
1909 
1910 	if (sts == PF_HDL_FOUND)
1911 		return (PF_ERR_MATCHED_DEVICE);
1912 
1913 	return (PF_ERR_PANIC);
1914 }
1915 
1916 /*
1917  * PCIe Timeout error analyser.  This error can be forgiven if it is marked as
1918  * CE Advisory.  If it is marked as advisory, this means the HW can recover
1919  * and/or retry the transaction automatically.
1920  */
1921 /* ARGSUSED */
1922 static int
1923 pf_analyse_to(ddi_fm_error_t *derr, uint32_t bit, pf_data_t *dq_head_p,
1924     pf_data_t *pfd_p)
1925 {
1926 	if (HAS_AER_LOGS(pfd_p, bit) && CE_ADVISORY(pfd_p))
1927 		return (PF_ERR_NO_PANIC);
1928 
1929 	return (PF_ERR_PANIC);
1930 }
1931 
1932 /*
1933  * PCIe Unexpected Completion.  Check to see if this TLP was misrouted by
1934  * matching the device BDF with the TLP Log.  If misrouting panic, otherwise
1935  * don't panic.
1936  */
1937 /* ARGSUSED */
1938 static int
1939 pf_analyse_uc(ddi_fm_error_t *derr, uint32_t bit, pf_data_t *dq_head_p,
1940     pf_data_t *pfd_p)
1941 {
1942 	if (HAS_AER_LOGS(pfd_p, bit) &&
1943 	    (PCIE_PFD2BUS(pfd_p)->bus_bdf == (PCIE_ADV_HDR(pfd_p, 2) >> 16)))
1944 		return (PF_ERR_NO_PANIC);
1945 
1946 	/*
1947 	 * This is a case of mis-routing. Any of the switches above this
1948 	 * device could be at fault.
1949 	 */
1950 	PFD_AFFECTED_DEV(pfd_p)->pe_affected_flags = PF_AFFECTED_ROOT;
1951 
1952 	return (PF_ERR_PANIC);
1953 }
1954 
1955 /*
1956  * PCIe-PCI Bridge Uncorrectable Data error analyser.  All Uncorrectable Data
1957  * errors should have resulted in a PCIe Poisoned TLP to the RC, except for
1958  * Posted Writes.  Check the logs for Posted Writes and if the RC did not see a
1959  * Poisoned TLP.
1960  *
1961  * Non-Posted Writes will also generate a UR in the completion status, which the
1962  * RC should also see.
1963  */
1964 /* ARGSUSED */
1965 static int
1966 pf_analyse_uc_data(ddi_fm_error_t *derr, uint32_t bit, pf_data_t *dq_head_p,
1967     pf_data_t *pfd_p)
1968 {
1969 	dev_info_t	*rpdip = PCIE_PFD2BUS(pfd_p)->bus_rp_dip;
1970 
1971 	if (!HAS_SAER_LOGS(pfd_p, bit))
1972 		return (PF_ERR_PANIC);
1973 
1974 	if (pf_matched_in_rc(dq_head_p, pfd_p, PCI_STAT_PERROR))
1975 		return (PF_ERR_MATCHED_RC);
1976 
1977 	if (pf_log_hdl_lookup(rpdip, derr, pfd_p, B_FALSE) == PF_HDL_FOUND)
1978 		return (PF_ERR_MATCHED_DEVICE);
1979 
1980 	return (PF_ERR_PANIC);
1981 }
1982 
1983 /* ARGSUSED */
1984 static int
1985 pf_no_panic(ddi_fm_error_t *derr, uint32_t bit, pf_data_t *dq_head_p,
1986     pf_data_t *pfd_p)
1987 {
1988 	return (PF_ERR_NO_PANIC);
1989 }
1990 
1991 /* ARGSUSED */
1992 static int
1993 pf_panic(ddi_fm_error_t *derr, uint32_t bit, pf_data_t *dq_head_p,
1994     pf_data_t *pfd_p)
1995 {
1996 	return (PF_ERR_PANIC);
1997 }
1998 
1999 /*
2000  * If a PCIe device does not support AER, assume all AER statuses have been set,
2001  * unless other registers do not indicate a certain error occuring.
2002  */
2003 static void
2004 pf_adjust_for_no_aer(pf_data_t *pfd_p)
2005 {
2006 	uint32_t	aer_ue = 0;
2007 	uint16_t	status;
2008 
2009 	if (PCIE_HAS_AER(PCIE_PFD2BUS(pfd_p)))
2010 		return;
2011 
2012 	if (PCIE_ERR_REG(pfd_p)->pcie_err_status & PCIE_DEVSTS_FE_DETECTED)
2013 		aer_ue = PF_AER_FATAL_ERR;
2014 
2015 	if (PCIE_ERR_REG(pfd_p)->pcie_err_status & PCIE_DEVSTS_NFE_DETECTED) {
2016 		aer_ue = PF_AER_NON_FATAL_ERR;
2017 		status = PCI_ERR_REG(pfd_p)->pci_err_status;
2018 
2019 		/* Check if the device received a PTLP */
2020 		if (!(status & PCI_STAT_PERROR))
2021 			aer_ue &= ~PCIE_AER_UCE_PTLP;
2022 
2023 		/* Check if the device signaled a CA */
2024 		if (!(status & PCI_STAT_S_TARG_AB))
2025 			aer_ue &= ~PCIE_AER_UCE_CA;
2026 
2027 		/* Check if the device sent a UR */
2028 		if (!(PCIE_ERR_REG(pfd_p)->pcie_err_status &
2029 		    PCIE_DEVSTS_UR_DETECTED))
2030 			aer_ue &= ~PCIE_AER_UCE_UR;
2031 
2032 		/*
2033 		 * Ignore ECRCs as it is optional and will manefest itself as
2034 		 * another error like PTLP and MFP
2035 		 */
2036 		aer_ue &= ~PCIE_AER_UCE_ECRC;
2037 
2038 		/*
2039 		 * Generally if NFE is set, SERR should also be set. Exception:
2040 		 * When certain non-fatal errors are masked, and some of them
2041 		 * happened to be the cause of the NFE, SERR will not be set and
2042 		 * they can not be the source of this interrupt.
2043 		 *
2044 		 * On x86, URs are masked (NFE + UR can be set), if any other
2045 		 * non-fatal errors (i.e, PTLP, CTO, CA, UC, ECRC, ACS) did
2046 		 * occur, SERR should be set since they are not masked. So if
2047 		 * SERR is not set, none of them occurred.
2048 		 */
2049 		if (!(status & PCI_STAT_S_SYSERR))
2050 			aer_ue &= ~PCIE_AER_UCE_TO;
2051 	}
2052 
2053 	if (!PCIE_IS_BDG(PCIE_PFD2BUS(pfd_p))) {
2054 		aer_ue &= ~PCIE_AER_UCE_TRAINING;
2055 		aer_ue &= ~PCIE_AER_UCE_SD;
2056 	}
2057 
2058 	PCIE_ADV_REG(pfd_p)->pcie_ue_status = aer_ue;
2059 }
2060 
2061 static void
2062 pf_adjust_for_no_saer(pf_data_t *pfd_p)
2063 {
2064 	uint32_t	s_aer_ue = 0;
2065 	uint16_t	status;
2066 
2067 	if (PCIE_HAS_AER(PCIE_PFD2BUS(pfd_p)))
2068 		return;
2069 
2070 	if (PCIE_ERR_REG(pfd_p)->pcie_err_status & PCIE_DEVSTS_FE_DETECTED)
2071 		s_aer_ue = PF_SAER_FATAL_ERR;
2072 
2073 	if (PCIE_ERR_REG(pfd_p)->pcie_err_status & PCIE_DEVSTS_NFE_DETECTED) {
2074 		s_aer_ue = PF_SAER_NON_FATAL_ERR;
2075 		status = PCI_BDG_ERR_REG(pfd_p)->pci_bdg_sec_stat;
2076 
2077 		/* Check if the device received a UC_DATA */
2078 		if (!(status & PCI_STAT_PERROR))
2079 			s_aer_ue &= ~PCIE_AER_SUCE_UC_DATA_ERR;
2080 
2081 		/* Check if the device received a RCVD_MA/MA_ON_SC */
2082 		if (!(status & (PCI_STAT_R_MAST_AB))) {
2083 			s_aer_ue &= ~PCIE_AER_SUCE_RCVD_MA;
2084 			s_aer_ue &= ~PCIE_AER_SUCE_MA_ON_SC;
2085 		}
2086 
2087 		/* Check if the device received a RCVD_TA/TA_ON_SC */
2088 		if (!(status & (PCI_STAT_R_TARG_AB))) {
2089 			s_aer_ue &= ~PCIE_AER_SUCE_RCVD_TA;
2090 			s_aer_ue &= ~PCIE_AER_SUCE_TA_ON_SC;
2091 		}
2092 	}
2093 
2094 	PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_status = s_aer_ue;
2095 }
2096 
2097 /* Find the PCIe-PCI bridge based on secondary bus number */
2098 static pf_data_t *
2099 pf_get_pcie_bridge(pf_data_t *pfd_p, pcie_req_id_t secbus)
2100 {
2101 	pf_data_t *bdg_pfd_p;
2102 
2103 	/* Search down for the PCIe-PCI device. */
2104 	for (bdg_pfd_p = pfd_p->pe_next; bdg_pfd_p;
2105 	    bdg_pfd_p = bdg_pfd_p->pe_next) {
2106 		if (PCIE_IS_PCIE_BDG(PCIE_PFD2BUS(bdg_pfd_p)) &&
2107 		    PCIE_PFD2BUS(bdg_pfd_p)->bus_bdg_secbus == secbus)
2108 			return (bdg_pfd_p);
2109 	}
2110 
2111 	return (NULL);
2112 }
2113 
2114 /* Find the PCIe-PCI bridge of a PCI device */
2115 static pf_data_t *
2116 pf_get_parent_pcie_bridge(pf_data_t *pfd_p)
2117 {
2118 	dev_info_t	*dip, *rp_dip = PCIE_PFD2BUS(pfd_p)->bus_rp_dip;
2119 
2120 	/* This only makes sense if the device is a PCI device */
2121 	if (!PCIE_IS_PCI(PCIE_PFD2BUS(pfd_p)))
2122 		return (NULL);
2123 
2124 	/*
2125 	 * Search up for the PCIe-PCI device.  Watchout for x86 where pci
2126 	 * devices hang directly off of NPE.
2127 	 */
2128 	for (dip = PCIE_PFD2DIP(pfd_p); dip; dip = ddi_get_parent(dip)) {
2129 		if (dip == rp_dip)
2130 			dip = NULL;
2131 
2132 		if (PCIE_IS_PCIE_BDG(PCIE_DIP2BUS(dip)))
2133 			return (PCIE_DIP2PFD(dip));
2134 	}
2135 
2136 	return (NULL);
2137 }
2138 
2139 /*
2140  * See if a leaf error was bubbled up to the Root Complex (RC) and handled.
2141  * As of right now only RC's have enough information to have errors found in the
2142  * fabric to be matched to the RC.  Note that Root Port's (RP) do not carry
2143  * enough information.  Currently known RC's are SPARC Fire architecture and
2144  * it's equivalents, and x86's NPE.
2145  * SPARC Fire architectures have a plethora of error registers, while currently
2146  * NPE only have the address of a failed load.
2147  *
2148  * Check if the RC logged an error with the appropriate status type/abort type.
2149  * Ex: Parity Error, Received Master/Target Abort
2150  * Check if either the fault address found in the rc matches the device's
2151  * assigned address range (PIO's only) or the fault BDF in the rc matches the
2152  * device's BDF or Secondary Bus/Bus Range.
2153  */
2154 static boolean_t
2155 pf_matched_in_rc(pf_data_t *dq_head_p, pf_data_t *pfd_p,
2156     uint32_t abort_type)
2157 {
2158 	pcie_bus_t	*bus_p = PCIE_PFD2BUS(pfd_p);
2159 	pf_data_t	*rc_pfd_p;
2160 	pcie_req_id_t	fault_bdf;
2161 
2162 	for (rc_pfd_p = dq_head_p; PFD_IS_ROOT(rc_pfd_p);
2163 	    rc_pfd_p = rc_pfd_p->pe_next) {
2164 		/* Only root complex's have enough information to match */
2165 		if (!PCIE_IS_RC(PCIE_PFD2BUS(rc_pfd_p)))
2166 			continue;
2167 
2168 		/* If device and rc abort type does not match continue */
2169 		if (!(PCI_BDG_ERR_REG(rc_pfd_p)->pci_bdg_sec_stat & abort_type))
2170 			continue;
2171 
2172 		fault_bdf = PCIE_ROOT_FAULT(rc_pfd_p)->scan_bdf;
2173 
2174 		/* The Fault BDF = Device's BDF */
2175 		if (fault_bdf == bus_p->bus_bdf)
2176 			return (B_TRUE);
2177 
2178 		/* The Fault Addr is in device's address range */
2179 		if (pf_in_addr_range(bus_p,
2180 		    PCIE_ROOT_FAULT(rc_pfd_p)->scan_addr))
2181 			return (B_TRUE);
2182 
2183 		/* The Fault BDF is from PCIe-PCI Bridge's secondary bus */
2184 		if (PCIE_IS_PCIE_BDG(bus_p) &&
2185 		    pf_in_bus_range(bus_p, fault_bdf))
2186 			return (B_TRUE);
2187 	}
2188 
2189 	return (B_FALSE);
2190 }
2191 
2192 /*
2193  * Check the RP and see if the error is PIO/DMA.  If the RP also has a PERR then
2194  * it is a DMA, otherwise it's a PIO
2195  */
2196 static void
2197 pf_pci_find_trans_type(pf_data_t *pfd_p, uint64_t *addr, uint32_t *trans_type,
2198     pcie_req_id_t *bdf)
2199 {
2200 	pf_data_t *rc_pfd_p;
2201 
2202 	/* Could be DMA or PIO.  Find out by look at error type. */
2203 	switch (PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_status) {
2204 	case PCIE_AER_SUCE_TA_ON_SC:
2205 	case PCIE_AER_SUCE_MA_ON_SC:
2206 		*trans_type = PF_ADDR_DMA;
2207 		return;
2208 	case PCIE_AER_SUCE_RCVD_TA:
2209 	case PCIE_AER_SUCE_RCVD_MA:
2210 		*bdf = PCIE_INVALID_BDF;
2211 		*trans_type = PF_ADDR_PIO;
2212 		return;
2213 	case PCIE_AER_SUCE_USC_ERR:
2214 	case PCIE_AER_SUCE_UC_DATA_ERR:
2215 	case PCIE_AER_SUCE_PERR_ASSERT:
2216 		break;
2217 	default:
2218 		*addr = 0;
2219 		*bdf = PCIE_INVALID_BDF;
2220 		*trans_type = 0;
2221 		return;
2222 	}
2223 
2224 	*bdf = PCIE_INVALID_BDF;
2225 	*trans_type = PF_ADDR_PIO;
2226 	for (rc_pfd_p = pfd_p->pe_prev; rc_pfd_p;
2227 	    rc_pfd_p = rc_pfd_p->pe_prev) {
2228 		if (PFD_IS_ROOT(rc_pfd_p) &&
2229 		    (PCI_BDG_ERR_REG(rc_pfd_p)->pci_bdg_sec_stat &
2230 		    PCI_STAT_PERROR)) {
2231 			*trans_type = PF_ADDR_DMA;
2232 			return;
2233 		}
2234 	}
2235 }
2236 
2237 /*
2238  * pf_pci_decode function decodes the secondary aer transaction logs in
2239  * PCIe-PCI bridges.
2240  *
2241  * The log is 128 bits long and arranged in this manner.
2242  * [0:35]   Transaction Attribute	(s_aer_h0-saer_h1)
2243  * [36:39]  Transaction lower command	(saer_h1)
2244  * [40:43]  Transaction upper command	(saer_h1)
2245  * [44:63]  Reserved
2246  * [64:127] Address			(saer_h2-saer_h3)
2247  */
2248 /* ARGSUSED */
2249 int
2250 pf_pci_decode(pf_data_t *pfd_p, uint16_t *cmd)
2251 {
2252 	pcix_attr_t	*attr;
2253 	uint64_t	addr;
2254 	uint32_t	trans_type;
2255 	pcie_req_id_t	bdf = PCIE_INVALID_BDF;
2256 
2257 	attr = (pcix_attr_t *)&PCIE_ADV_BDG_HDR(pfd_p, 0);
2258 	*cmd = GET_SAER_CMD(pfd_p);
2259 
2260 cmd_switch:
2261 	switch (*cmd) {
2262 	case PCI_PCIX_CMD_IORD:
2263 	case PCI_PCIX_CMD_IOWR:
2264 		/* IO Access should always be down stream */
2265 		addr = PCIE_ADV_BDG_HDR(pfd_p, 2);
2266 		bdf = attr->rid;
2267 		trans_type = PF_ADDR_PIO;
2268 		break;
2269 	case PCI_PCIX_CMD_MEMRD_DW:
2270 	case PCI_PCIX_CMD_MEMRD_BL:
2271 	case PCI_PCIX_CMD_MEMRDBL:
2272 	case PCI_PCIX_CMD_MEMWR:
2273 	case PCI_PCIX_CMD_MEMWR_BL:
2274 	case PCI_PCIX_CMD_MEMWRBL:
2275 		addr = ((uint64_t)PCIE_ADV_BDG_HDR(pfd_p, 3) <<
2276 		    PCIE_AER_SUCE_HDR_ADDR_SHIFT) | PCIE_ADV_BDG_HDR(pfd_p, 2);
2277 		bdf = attr->rid;
2278 
2279 		pf_pci_find_trans_type(pfd_p, &addr, &trans_type, &bdf);
2280 		break;
2281 	case PCI_PCIX_CMD_CFRD:
2282 	case PCI_PCIX_CMD_CFWR:
2283 		/*
2284 		 * CFG Access should always be down stream.  Match the BDF in
2285 		 * the address phase.
2286 		 */
2287 		addr = 0;
2288 		bdf = attr->rid;
2289 		trans_type = PF_ADDR_CFG;
2290 		break;
2291 	case PCI_PCIX_CMD_SPL:
2292 		/*
2293 		 * Check for DMA read completions.  The requesting BDF is in the
2294 		 * Address phase.
2295 		 */
2296 		addr = 0;
2297 		bdf = attr->rid;
2298 		trans_type = PF_ADDR_DMA;
2299 		break;
2300 	case PCI_PCIX_CMD_DADR:
2301 		/*
2302 		 * For Dual Address Cycles the transaction command is in the 2nd
2303 		 * address phase.
2304 		 */
2305 		*cmd = (PCIE_ADV_BDG_HDR(pfd_p, 1) >>
2306 		    PCIE_AER_SUCE_HDR_CMD_UP_SHIFT) &
2307 		    PCIE_AER_SUCE_HDR_CMD_UP_MASK;
2308 		if (*cmd != PCI_PCIX_CMD_DADR)
2309 			goto cmd_switch;
2310 		/* FALLTHROUGH */
2311 	default:
2312 		PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_trans = 0;
2313 		PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_bdf = PCIE_INVALID_BDF;
2314 		PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_addr = 0;
2315 		return (DDI_FAILURE);
2316 	}
2317 	PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_trans = trans_type;
2318 	PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_bdf = bdf;
2319 	PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_addr = addr;
2320 	return (DDI_SUCCESS);
2321 }
2322 
2323 /*
2324  * Based on either the BDF/ADDR find and mark the faulting DMA/ACC handler.
2325  * Returns either PF_HDL_NOTFOUND or PF_HDL_FOUND.
2326  */
2327 int
2328 pf_hdl_lookup(dev_info_t *dip, uint64_t ena, uint32_t flag, uint64_t addr,
2329     pcie_req_id_t bdf)
2330 {
2331 	ddi_fm_error_t		derr;
2332 
2333 	/* If we don't know the addr or rid just return with NOTFOUND */
2334 	if ((addr == 0) && !PCIE_CHECK_VALID_BDF(bdf))
2335 		return (PF_HDL_NOTFOUND);
2336 
2337 	/*
2338 	 * Disable DMA handle lookup until DMA errors can be handled and
2339 	 * reported synchronously.  When enabled again, check for the
2340 	 * PF_ADDR_DMA flag
2341 	 */
2342 	if (!(flag & (PF_ADDR_PIO | PF_ADDR_CFG))) {
2343 		return (PF_HDL_NOTFOUND);
2344 	}
2345 
2346 	bzero(&derr, sizeof (ddi_fm_error_t));
2347 	derr.fme_version = DDI_FME_VERSION;
2348 	derr.fme_flag = DDI_FM_ERR_UNEXPECTED;
2349 	derr.fme_ena = ena;
2350 
2351 	return (pf_hdl_child_lookup(dip, &derr, flag, addr, bdf));
2352 }
2353 
2354 static int
2355 pf_hdl_child_lookup(dev_info_t *dip, ddi_fm_error_t *derr, uint32_t flag,
2356     uint64_t addr, pcie_req_id_t bdf)
2357 {
2358 	int			status = PF_HDL_NOTFOUND;
2359 	ndi_fmc_t		*fcp = NULL;
2360 	struct i_ddi_fmhdl	*fmhdl = DEVI(dip)->devi_fmhdl;
2361 	pcie_req_id_t		dip_bdf;
2362 	boolean_t		have_lock = B_FALSE;
2363 	pcie_bus_t		*bus_p;
2364 	dev_info_t		*cdip;
2365 
2366 	if (!(bus_p = pf_is_ready(dip))) {
2367 		return (status);
2368 	}
2369 
2370 	ASSERT(fmhdl);
2371 	if (!i_ddi_fm_handler_owned(dip)) {
2372 		/*
2373 		 * pf_handler_enter always returns SUCCESS if the 'impl' arg is
2374 		 * NULL.
2375 		 */
2376 		(void) pf_handler_enter(dip, NULL);
2377 		have_lock = B_TRUE;
2378 	}
2379 
2380 	dip_bdf = PCI_GET_BDF(dip);
2381 
2382 	/* Check if dip and BDF match, if not recurse to it's children. */
2383 	if (!PCIE_IS_RC(bus_p) && (!PCIE_CHECK_VALID_BDF(bdf) ||
2384 	    dip_bdf == bdf)) {
2385 		if ((flag & PF_ADDR_DMA) && DDI_FM_DMA_ERR_CAP(fmhdl->fh_cap))
2386 			fcp = fmhdl->fh_dma_cache;
2387 		else
2388 			fcp = NULL;
2389 
2390 		if (fcp)
2391 			status = pf_hdl_compare(dip, derr, DMA_HANDLE, addr,
2392 			    bdf, fcp);
2393 
2394 
2395 		if (((flag & PF_ADDR_PIO) || (flag & PF_ADDR_CFG)) &&
2396 		    DDI_FM_ACC_ERR_CAP(fmhdl->fh_cap))
2397 			fcp = fmhdl->fh_acc_cache;
2398 		else
2399 			fcp = NULL;
2400 
2401 		if (fcp)
2402 			status = pf_hdl_compare(dip, derr, ACC_HANDLE, addr,
2403 			    bdf, fcp);
2404 	}
2405 
2406 	/* If we found the handler or know it's this device, we're done */
2407 	if (!PCIE_IS_RC(bus_p) && ((dip_bdf == bdf) ||
2408 	    (status == PF_HDL_FOUND)))
2409 		goto done;
2410 
2411 	/*
2412 	 * If the current devuce us a PCIe-PCI bridge need to check for special
2413 	 * cases:
2414 	 *
2415 	 * If it is a PIO and we don't have an address or this is a DMA, check
2416 	 * to see if the BDF = secondary bus.  If so stop.  The BDF isn't a real
2417 	 * BDF and the fault device could have come from any device in the PCI
2418 	 * bus.
2419 	 */
2420 	if (PCIE_IS_PCIE_BDG(bus_p) &&
2421 	    ((flag & PF_ADDR_DMA || flag & PF_ADDR_PIO)) &&
2422 	    ((bus_p->bus_bdg_secbus << PCIE_REQ_ID_BUS_SHIFT) == bdf))
2423 		goto done;
2424 
2425 
2426 	/* If we can't find the handler check it's children */
2427 	for (cdip = ddi_get_child(dip); cdip;
2428 	    cdip = ddi_get_next_sibling(cdip)) {
2429 		if ((bus_p = PCIE_DIP2BUS(cdip)) == NULL)
2430 			continue;
2431 
2432 		if (pf_in_bus_range(bus_p, bdf) ||
2433 		    pf_in_addr_range(bus_p, addr))
2434 			status = pf_hdl_child_lookup(cdip, derr, flag, addr,
2435 			    bdf);
2436 
2437 		if (status == PF_HDL_FOUND)
2438 			goto done;
2439 	}
2440 
2441 done:
2442 	if (have_lock == B_TRUE)
2443 		pf_handler_exit(dip);
2444 
2445 	return (status);
2446 }
2447 
2448 static int
2449 pf_hdl_compare(dev_info_t *dip, ddi_fm_error_t *derr, uint32_t flag,
2450     uint64_t addr, pcie_req_id_t bdf, ndi_fmc_t *fcp)
2451 {
2452 	ndi_fmcentry_t	*fep;
2453 	int		found = 0;
2454 	int		status;
2455 
2456 	mutex_enter(&fcp->fc_lock);
2457 	for (fep = fcp->fc_head; fep != NULL; fep = fep->fce_next) {
2458 		ddi_fmcompare_t compare_func;
2459 
2460 		/*
2461 		 * Compare captured error state with handle
2462 		 * resources.  During the comparison and
2463 		 * subsequent error handling, we block
2464 		 * attempts to free the cache entry.
2465 		 */
2466 		if (flag == ACC_HANDLE) {
2467 			compare_func =
2468 			    i_ddi_fm_acc_err_cf_get((ddi_acc_handle_t)
2469 			    fep->fce_resource);
2470 		} else {
2471 			compare_func =
2472 			    i_ddi_fm_dma_err_cf_get((ddi_dma_handle_t)
2473 			    fep->fce_resource);
2474 		}
2475 
2476 		if (compare_func == NULL) /* unbound or not FLAGERR */
2477 			continue;
2478 
2479 		status = compare_func(dip, fep->fce_resource,
2480 		    (void *)&addr, (void *)&bdf);
2481 
2482 		if (status == DDI_FM_NONFATAL) {
2483 			found++;
2484 
2485 			/* Set the error for this resource handle */
2486 			if (flag == ACC_HANDLE) {
2487 				ddi_acc_handle_t ap = fep->fce_resource;
2488 
2489 				i_ddi_fm_acc_err_set(ap, derr->fme_ena, status,
2490 				    DDI_FM_ERR_UNEXPECTED);
2491 				ddi_fm_acc_err_get(ap, derr, DDI_FME_VERSION);
2492 				derr->fme_acc_handle = ap;
2493 			} else {
2494 				ddi_dma_handle_t dp = fep->fce_resource;
2495 
2496 				i_ddi_fm_dma_err_set(dp, derr->fme_ena, status,
2497 				    DDI_FM_ERR_UNEXPECTED);
2498 				ddi_fm_dma_err_get(dp, derr, DDI_FME_VERSION);
2499 				derr->fme_dma_handle = dp;
2500 			}
2501 		}
2502 	}
2503 	mutex_exit(&fcp->fc_lock);
2504 
2505 	/*
2506 	 * If a handler isn't found and we know this is the right device mark
2507 	 * them all failed.
2508 	 */
2509 	if ((addr != 0) && PCIE_CHECK_VALID_BDF(bdf) && (found == 0)) {
2510 		status = pf_hdl_compare(dip, derr, flag, addr, bdf, fcp);
2511 		if (status == PF_HDL_FOUND)
2512 			found++;
2513 	}
2514 
2515 	return ((found) ? PF_HDL_FOUND : PF_HDL_NOTFOUND);
2516 }
2517 
2518 /*
2519  * Automatically decode AER header logs and does a handling look up based on the
2520  * AER header decoding.
2521  *
2522  * For this function only the Primary/Secondary AER Header Logs need to be valid
2523  * in the pfd (PCIe Fault Data) arg.
2524  *
2525  * Returns either PF_HDL_NOTFOUND or PF_HDL_FOUND.
2526  */
2527 /* ARGSUSED */
2528 static int
2529 pf_log_hdl_lookup(dev_info_t *rpdip, ddi_fm_error_t *derr, pf_data_t *pfd_p,
2530     boolean_t is_primary)
2531 {
2532 	/*
2533 	 * Disabling this function temporarily until errors can be handled
2534 	 * synchronously.
2535 	 *
2536 	 * This function is currently only called during the middle of a fabric
2537 	 * scan.  If the fabric scan is called synchronously with an error seen
2538 	 * in the RP/RC, then the related errors in the fabric will have a
2539 	 * PF_ERR_MATCHED_RC error severity.  pf_log_hdl_lookup code will be by
2540 	 * passed when the severity is PF_ERR_MATCHED_RC.  Handle lookup would
2541 	 * have already happened in RP/RC error handling in a synchronous
2542 	 * manner.  Errors unrelated should panic, because they are being
2543 	 * handled asynchronously.
2544 	 *
2545 	 * If fabric scan is called asynchronously from any RP/RC error, then
2546 	 * DMA/PIO UE errors seen in the fabric should panic.  pf_lop_hdl_lookup
2547 	 * will return PF_HDL_NOTFOUND to ensure that the system panics.
2548 	 */
2549 	return (PF_HDL_NOTFOUND);
2550 }
2551 
2552 /*
2553  * Decodes the TLP and returns the BDF of the handler, address and transaction
2554  * type if known.
2555  *
2556  * Types of TLP logs seen in RC, and what to extract:
2557  *
2558  * Memory(DMA) - Requester BDF, address, PF_DMA_ADDR
2559  * Memory(PIO) - address, PF_PIO_ADDR
2560  * CFG - Should not occur and result in UR
2561  * Completion(DMA) - Requester BDF, PF_DMA_ADDR
2562  * Completion(PIO) - Requester BDF, PF_PIO_ADDR
2563  *
2564  * Types of TLP logs seen in SW/Leaf, and what to extract:
2565  *
2566  * Memory(DMA) - Requester BDF, address, PF_DMA_ADDR
2567  * Memory(PIO) - address, PF_PIO_ADDR
2568  * CFG - Destined BDF, address, PF_CFG_ADDR
2569  * Completion(DMA) - Requester BDF, PF_DMA_ADDR
2570  * Completion(PIO) - Requester BDF, PF_PIO_ADDR
2571  *
2572  * The adv_reg_p must be passed in separately for use with SPARC RPs.  A
2573  * SPARC RP could have multiple AER header logs which cannot be directly
2574  * accessed via the bus_p.
2575  */
2576 int
2577 pf_tlp_decode(pcie_bus_t *bus_p, pf_pcie_adv_err_regs_t *adv_reg_p)
2578 {
2579 	pcie_tlp_hdr_t	*tlp_hdr = (pcie_tlp_hdr_t *)adv_reg_p->pcie_ue_hdr;
2580 	pcie_req_id_t	my_bdf, tlp_bdf, flt_bdf = PCIE_INVALID_BDF;
2581 	uint64_t	flt_addr = 0;
2582 	uint32_t	flt_trans_type = 0;
2583 
2584 	adv_reg_p->pcie_ue_tgt_addr = 0;
2585 	adv_reg_p->pcie_ue_tgt_bdf = PCIE_INVALID_BDF;
2586 	adv_reg_p->pcie_ue_tgt_trans = 0;
2587 
2588 	my_bdf = bus_p->bus_bdf;
2589 	switch (tlp_hdr->type) {
2590 	case PCIE_TLP_TYPE_IO:
2591 	case PCIE_TLP_TYPE_MEM:
2592 	case PCIE_TLP_TYPE_MEMLK:
2593 		/* Grab the 32/64bit fault address */
2594 		if (tlp_hdr->fmt & 0x1) {
2595 			flt_addr = ((uint64_t)adv_reg_p->pcie_ue_hdr[2] << 32);
2596 			flt_addr |= adv_reg_p->pcie_ue_hdr[3];
2597 		} else {
2598 			flt_addr = adv_reg_p->pcie_ue_hdr[2];
2599 		}
2600 
2601 		tlp_bdf = (pcie_req_id_t)(adv_reg_p->pcie_ue_hdr[1] >> 16);
2602 
2603 		/*
2604 		 * If the req bdf >= this.bdf, then it means the request is this
2605 		 * device or came from a device below it.  Unless this device is
2606 		 * a PCIe root port then it means is a DMA, otherwise PIO.
2607 		 */
2608 		if ((tlp_bdf >= my_bdf) && !PCIE_IS_ROOT(bus_p)) {
2609 			flt_trans_type = PF_ADDR_DMA;
2610 			flt_bdf = tlp_bdf;
2611 		} else if (PCIE_IS_ROOT(bus_p) &&
2612 		    (PF_FIRST_AER_ERR(PCIE_AER_UCE_PTLP, adv_reg_p) ||
2613 		    (PF_FIRST_AER_ERR(PCIE_AER_UCE_CA, adv_reg_p)))) {
2614 			flt_trans_type = PF_ADDR_DMA;
2615 			flt_bdf = tlp_bdf;
2616 		} else {
2617 			flt_trans_type = PF_ADDR_PIO;
2618 			flt_bdf = PCIE_INVALID_BDF;
2619 		}
2620 		break;
2621 	case PCIE_TLP_TYPE_CFG0:
2622 	case PCIE_TLP_TYPE_CFG1:
2623 		flt_addr = 0;
2624 		flt_bdf = (pcie_req_id_t)(adv_reg_p->pcie_ue_hdr[2] >> 16);
2625 		flt_trans_type = PF_ADDR_CFG;
2626 		break;
2627 	case PCIE_TLP_TYPE_CPL:
2628 	case PCIE_TLP_TYPE_CPLLK:
2629 	{
2630 		pcie_cpl_t *cpl_tlp = (pcie_cpl_t *)&adv_reg_p->pcie_ue_hdr[1];
2631 
2632 		flt_addr = 0;
2633 		flt_bdf = (cpl_tlp->rid > cpl_tlp->cid) ? cpl_tlp->rid :
2634 		    cpl_tlp->cid;
2635 
2636 		/*
2637 		 * If the cpl bdf < this.bdf, then it means the request is this
2638 		 * device or came from a device below it.  Unless this device is
2639 		 * a PCIe root port then it means is a DMA, otherwise PIO.
2640 		 */
2641 		if (cpl_tlp->rid > cpl_tlp->cid) {
2642 			flt_trans_type = PF_ADDR_DMA;
2643 		} else {
2644 			flt_trans_type = PF_ADDR_PIO | PF_ADDR_CFG;
2645 		}
2646 		break;
2647 	}
2648 	default:
2649 		return (DDI_FAILURE);
2650 	}
2651 
2652 	adv_reg_p->pcie_ue_tgt_addr = flt_addr;
2653 	adv_reg_p->pcie_ue_tgt_bdf = flt_bdf;
2654 	adv_reg_p->pcie_ue_tgt_trans = flt_trans_type;
2655 
2656 	return (DDI_SUCCESS);
2657 }
2658 
2659 #define	PCIE_EREPORT	DDI_IO_CLASS "." PCI_ERROR_SUBCLASS "." PCIEX_FABRIC
2660 static int
2661 pf_ereport_setup(dev_info_t *dip, uint64_t ena, nvlist_t **ereport,
2662     nvlist_t **detector, errorq_elem_t **eqep)
2663 {
2664 	struct i_ddi_fmhdl *fmhdl = DEVI(dip)->devi_fmhdl;
2665 	char device_path[MAXPATHLEN];
2666 	nv_alloc_t *nva;
2667 
2668 	*eqep = errorq_reserve(fmhdl->fh_errorq);
2669 	if (*eqep == NULL) {
2670 		atomic_inc_64(&fmhdl->fh_kstat.fek_erpt_dropped.value.ui64);
2671 		return (DDI_FAILURE);
2672 	}
2673 
2674 	*ereport = errorq_elem_nvl(fmhdl->fh_errorq, *eqep);
2675 	nva = errorq_elem_nva(fmhdl->fh_errorq, *eqep);
2676 
2677 	ASSERT(*ereport);
2678 	ASSERT(nva);
2679 
2680 	/*
2681 	 * Use the dev_path/devid for this device instance.
2682 	 */
2683 	*detector = fm_nvlist_create(nva);
2684 	if (dip == ddi_root_node()) {
2685 		device_path[0] = '/';
2686 		device_path[1] = '\0';
2687 	} else {
2688 		(void) ddi_pathname(dip, device_path);
2689 	}
2690 
2691 	fm_fmri_dev_set(*detector, FM_DEV_SCHEME_VERSION, NULL,
2692 	    device_path, NULL, NULL);
2693 
2694 	if (ena == 0)
2695 		ena = fm_ena_generate(0, FM_ENA_FMT1);
2696 
2697 	fm_ereport_set(*ereport, 0, PCIE_EREPORT, ena, *detector, NULL);
2698 
2699 	return (DDI_SUCCESS);
2700 }
2701 
2702 /* ARGSUSED */
2703 static void
2704 pf_ereport_post(dev_info_t *dip, nvlist_t **ereport, nvlist_t **detector,
2705     errorq_elem_t **eqep)
2706 {
2707 	struct i_ddi_fmhdl *fmhdl = DEVI(dip)->devi_fmhdl;
2708 
2709 	errorq_commit(fmhdl->fh_errorq, *eqep, ERRORQ_ASYNC);
2710 }
2711 
2712 static void
2713 pf_send_ereport(ddi_fm_error_t *derr, pf_impl_t *impl)
2714 {
2715 	nvlist_t	*ereport;
2716 	nvlist_t	*detector;
2717 	errorq_elem_t	*eqep;
2718 	pcie_bus_t	*bus_p;
2719 	pf_data_t	*pfd_p;
2720 	uint32_t	total = impl->pf_total;
2721 
2722 	/*
2723 	 * Ereports need to be sent in a top down fashion. The fabric translator
2724 	 * expects the ereports from the Root first. This is needed to tell if
2725 	 * the system contains a PCIe complaint RC/RP.
2726 	 */
2727 	for (pfd_p = impl->pf_dq_head_p; pfd_p; pfd_p = pfd_p->pe_next) {
2728 		bus_p = PCIE_PFD2BUS(pfd_p);
2729 		pfd_p->pe_valid = B_FALSE;
2730 
2731 		if (derr->fme_flag != DDI_FM_ERR_UNEXPECTED ||
2732 		    !DDI_FM_EREPORT_CAP(ddi_fm_capable(PCIE_PFD2DIP(pfd_p))))
2733 			continue;
2734 
2735 		if (pf_ereport_setup(PCIE_BUS2DIP(bus_p), derr->fme_ena,
2736 		    &ereport, &detector, &eqep) != DDI_SUCCESS)
2737 			continue;
2738 
2739 		if (PFD_IS_RC(pfd_p)) {
2740 			fm_payload_set(ereport,
2741 			    "scan_bdf", DATA_TYPE_UINT16,
2742 			    PCIE_ROOT_FAULT(pfd_p)->scan_bdf,
2743 			    "scan_addr", DATA_TYPE_UINT64,
2744 			    PCIE_ROOT_FAULT(pfd_p)->scan_addr,
2745 			    "intr_src", DATA_TYPE_UINT16,
2746 			    PCIE_ROOT_EH_SRC(pfd_p)->intr_type,
2747 			    NULL);
2748 			goto generic;
2749 		}
2750 
2751 		/* Generic PCI device information */
2752 		fm_payload_set(ereport,
2753 		    "bdf", DATA_TYPE_UINT16, bus_p->bus_bdf,
2754 		    "device_id", DATA_TYPE_UINT16,
2755 		    (bus_p->bus_dev_ven_id >> 16),
2756 		    "vendor_id", DATA_TYPE_UINT16,
2757 		    (bus_p->bus_dev_ven_id & 0xFFFF),
2758 		    "rev_id", DATA_TYPE_UINT8, bus_p->bus_rev_id,
2759 		    "dev_type", DATA_TYPE_UINT16, bus_p->bus_dev_type,
2760 		    "pcie_off", DATA_TYPE_UINT16, bus_p->bus_pcie_off,
2761 		    "pcix_off", DATA_TYPE_UINT16, bus_p->bus_pcix_off,
2762 		    "aer_off", DATA_TYPE_UINT16, bus_p->bus_aer_off,
2763 		    "ecc_ver", DATA_TYPE_UINT16, bus_p->bus_ecc_ver,
2764 		    NULL);
2765 
2766 		/* PCI registers */
2767 		fm_payload_set(ereport,
2768 		    "pci_status", DATA_TYPE_UINT16,
2769 		    PCI_ERR_REG(pfd_p)->pci_err_status,
2770 		    "pci_command", DATA_TYPE_UINT16,
2771 		    PCI_ERR_REG(pfd_p)->pci_cfg_comm,
2772 		    NULL);
2773 
2774 		/* PCI bridge registers */
2775 		if (PCIE_IS_BDG(bus_p)) {
2776 			fm_payload_set(ereport,
2777 			    "pci_bdg_sec_status", DATA_TYPE_UINT16,
2778 			    PCI_BDG_ERR_REG(pfd_p)->pci_bdg_sec_stat,
2779 			    "pci_bdg_ctrl", DATA_TYPE_UINT16,
2780 			    PCI_BDG_ERR_REG(pfd_p)->pci_bdg_ctrl,
2781 			    NULL);
2782 		}
2783 
2784 		/* PCIx registers */
2785 		if (PCIE_IS_PCIX(bus_p) && !PCIE_IS_BDG(bus_p)) {
2786 			fm_payload_set(ereport,
2787 			    "pcix_status", DATA_TYPE_UINT32,
2788 			    PCIX_ERR_REG(pfd_p)->pcix_status,
2789 			    "pcix_command", DATA_TYPE_UINT16,
2790 			    PCIX_ERR_REG(pfd_p)->pcix_command,
2791 			    NULL);
2792 		}
2793 
2794 		/* PCIx ECC Registers */
2795 		if (PCIX_ECC_VERSION_CHECK(bus_p)) {
2796 			pf_pcix_ecc_regs_t *ecc_bdg_reg;
2797 			pf_pcix_ecc_regs_t *ecc_reg;
2798 
2799 			if (PCIE_IS_BDG(bus_p))
2800 				ecc_bdg_reg = PCIX_BDG_ECC_REG(pfd_p, 0);
2801 			ecc_reg = PCIX_ECC_REG(pfd_p);
2802 			fm_payload_set(ereport,
2803 			    "pcix_ecc_control_0", DATA_TYPE_UINT16,
2804 			    PCIE_IS_BDG(bus_p) ?
2805 			    (ecc_bdg_reg->pcix_ecc_ctlstat >> 16) :
2806 			    (ecc_reg->pcix_ecc_ctlstat >> 16),
2807 			    "pcix_ecc_status_0", DATA_TYPE_UINT16,
2808 			    PCIE_IS_BDG(bus_p) ?
2809 			    (ecc_bdg_reg->pcix_ecc_ctlstat & 0xFFFF) :
2810 			    (ecc_reg->pcix_ecc_ctlstat & 0xFFFF),
2811 			    "pcix_ecc_fst_addr_0", DATA_TYPE_UINT32,
2812 			    PCIE_IS_BDG(bus_p) ?
2813 			    ecc_bdg_reg->pcix_ecc_fstaddr :
2814 			    ecc_reg->pcix_ecc_fstaddr,
2815 			    "pcix_ecc_sec_addr_0", DATA_TYPE_UINT32,
2816 			    PCIE_IS_BDG(bus_p) ?
2817 			    ecc_bdg_reg->pcix_ecc_secaddr :
2818 			    ecc_reg->pcix_ecc_secaddr,
2819 			    "pcix_ecc_attr_0", DATA_TYPE_UINT32,
2820 			    PCIE_IS_BDG(bus_p) ?
2821 			    ecc_bdg_reg->pcix_ecc_attr :
2822 			    ecc_reg->pcix_ecc_attr,
2823 			    NULL);
2824 		}
2825 
2826 		/* PCIx ECC Bridge Registers */
2827 		if (PCIX_ECC_VERSION_CHECK(bus_p) && PCIE_IS_BDG(bus_p)) {
2828 			pf_pcix_ecc_regs_t *ecc_bdg_reg;
2829 
2830 			ecc_bdg_reg = PCIX_BDG_ECC_REG(pfd_p, 1);
2831 			fm_payload_set(ereport,
2832 			    "pcix_ecc_control_1", DATA_TYPE_UINT16,
2833 			    (ecc_bdg_reg->pcix_ecc_ctlstat >> 16),
2834 			    "pcix_ecc_status_1", DATA_TYPE_UINT16,
2835 			    (ecc_bdg_reg->pcix_ecc_ctlstat & 0xFFFF),
2836 			    "pcix_ecc_fst_addr_1", DATA_TYPE_UINT32,
2837 			    ecc_bdg_reg->pcix_ecc_fstaddr,
2838 			    "pcix_ecc_sec_addr_1", DATA_TYPE_UINT32,
2839 			    ecc_bdg_reg->pcix_ecc_secaddr,
2840 			    "pcix_ecc_attr_1", DATA_TYPE_UINT32,
2841 			    ecc_bdg_reg->pcix_ecc_attr,
2842 			    NULL);
2843 		}
2844 
2845 		/* PCIx Bridge */
2846 		if (PCIE_IS_PCIX(bus_p) && PCIE_IS_BDG(bus_p)) {
2847 			fm_payload_set(ereport,
2848 			    "pcix_bdg_status", DATA_TYPE_UINT32,
2849 			    PCIX_BDG_ERR_REG(pfd_p)->pcix_bdg_stat,
2850 			    "pcix_bdg_sec_status", DATA_TYPE_UINT16,
2851 			    PCIX_BDG_ERR_REG(pfd_p)->pcix_bdg_sec_stat,
2852 			    NULL);
2853 		}
2854 
2855 		/* PCIe registers */
2856 		if (PCIE_IS_PCIE(bus_p)) {
2857 			fm_payload_set(ereport,
2858 			    "pcie_status", DATA_TYPE_UINT16,
2859 			    PCIE_ERR_REG(pfd_p)->pcie_err_status,
2860 			    "pcie_command", DATA_TYPE_UINT16,
2861 			    PCIE_ERR_REG(pfd_p)->pcie_err_ctl,
2862 			    "pcie_dev_cap", DATA_TYPE_UINT32,
2863 			    PCIE_ERR_REG(pfd_p)->pcie_dev_cap,
2864 			    NULL);
2865 		}
2866 
2867 		/* PCIe AER registers */
2868 		if (PCIE_HAS_AER(bus_p)) {
2869 			fm_payload_set(ereport,
2870 			    "pcie_adv_ctl", DATA_TYPE_UINT32,
2871 			    PCIE_ADV_REG(pfd_p)->pcie_adv_ctl,
2872 			    "pcie_ue_status", DATA_TYPE_UINT32,
2873 			    PCIE_ADV_REG(pfd_p)->pcie_ue_status,
2874 			    "pcie_ue_mask", DATA_TYPE_UINT32,
2875 			    PCIE_ADV_REG(pfd_p)->pcie_ue_mask,
2876 			    "pcie_ue_sev", DATA_TYPE_UINT32,
2877 			    PCIE_ADV_REG(pfd_p)->pcie_ue_sev,
2878 			    "pcie_ue_hdr0", DATA_TYPE_UINT32,
2879 			    PCIE_ADV_REG(pfd_p)->pcie_ue_hdr[0],
2880 			    "pcie_ue_hdr1", DATA_TYPE_UINT32,
2881 			    PCIE_ADV_REG(pfd_p)->pcie_ue_hdr[1],
2882 			    "pcie_ue_hdr2", DATA_TYPE_UINT32,
2883 			    PCIE_ADV_REG(pfd_p)->pcie_ue_hdr[2],
2884 			    "pcie_ue_hdr3", DATA_TYPE_UINT32,
2885 			    PCIE_ADV_REG(pfd_p)->pcie_ue_hdr[3],
2886 			    "pcie_ce_status", DATA_TYPE_UINT32,
2887 			    PCIE_ADV_REG(pfd_p)->pcie_ce_status,
2888 			    "pcie_ce_mask", DATA_TYPE_UINT32,
2889 			    PCIE_ADV_REG(pfd_p)->pcie_ce_mask,
2890 			    NULL);
2891 		}
2892 
2893 		/* PCIe AER decoded header */
2894 		if (HAS_AER_LOGS(pfd_p, PCIE_ADV_REG(pfd_p)->pcie_ue_status)) {
2895 			fm_payload_set(ereport,
2896 			    "pcie_ue_tgt_trans", DATA_TYPE_UINT32,
2897 			    PCIE_ADV_REG(pfd_p)->pcie_ue_tgt_trans,
2898 			    "pcie_ue_tgt_addr", DATA_TYPE_UINT64,
2899 			    PCIE_ADV_REG(pfd_p)->pcie_ue_tgt_addr,
2900 			    "pcie_ue_tgt_bdf", DATA_TYPE_UINT16,
2901 			    PCIE_ADV_REG(pfd_p)->pcie_ue_tgt_bdf,
2902 			    NULL);
2903 			/* Clear these values as they no longer valid */
2904 			PCIE_ADV_REG(pfd_p)->pcie_ue_tgt_trans = 0;
2905 			PCIE_ADV_REG(pfd_p)->pcie_ue_tgt_addr = 0;
2906 			PCIE_ADV_REG(pfd_p)->pcie_ue_tgt_bdf = PCIE_INVALID_BDF;
2907 		}
2908 
2909 		/* PCIe BDG AER registers */
2910 		if (PCIE_IS_PCIE_BDG(bus_p) && PCIE_HAS_AER(bus_p)) {
2911 			fm_payload_set(ereport,
2912 			    "pcie_sue_adv_ctl", DATA_TYPE_UINT32,
2913 			    PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_ctl,
2914 			    "pcie_sue_status", DATA_TYPE_UINT32,
2915 			    PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_status,
2916 			    "pcie_sue_mask", DATA_TYPE_UINT32,
2917 			    PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_mask,
2918 			    "pcie_sue_sev", DATA_TYPE_UINT32,
2919 			    PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_sev,
2920 			    "pcie_sue_hdr0", DATA_TYPE_UINT32,
2921 			    PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_hdr[0],
2922 			    "pcie_sue_hdr1", DATA_TYPE_UINT32,
2923 			    PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_hdr[1],
2924 			    "pcie_sue_hdr2", DATA_TYPE_UINT32,
2925 			    PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_hdr[2],
2926 			    "pcie_sue_hdr3", DATA_TYPE_UINT32,
2927 			    PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_hdr[3],
2928 			    NULL);
2929 		}
2930 
2931 		/* PCIe BDG AER decoded header */
2932 		if (PCIE_IS_PCIE_BDG(bus_p) && HAS_SAER_LOGS(pfd_p,
2933 		    PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_status)) {
2934 			fm_payload_set(ereport,
2935 			    "pcie_sue_tgt_trans", DATA_TYPE_UINT32,
2936 			    PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_trans,
2937 			    "pcie_sue_tgt_addr", DATA_TYPE_UINT64,
2938 			    PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_addr,
2939 			    "pcie_sue_tgt_bdf", DATA_TYPE_UINT16,
2940 			    PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_bdf,
2941 			    NULL);
2942 			/* Clear these values as they no longer valid */
2943 			PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_trans = 0;
2944 			PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_addr = 0;
2945 			PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_bdf =
2946 			    PCIE_INVALID_BDF;
2947 		}
2948 
2949 		/* PCIe RP registers */
2950 		if (PCIE_IS_RP(bus_p)) {
2951 			fm_payload_set(ereport,
2952 			    "pcie_rp_status", DATA_TYPE_UINT32,
2953 			    PCIE_RP_REG(pfd_p)->pcie_rp_status,
2954 			    "pcie_rp_control", DATA_TYPE_UINT16,
2955 			    PCIE_RP_REG(pfd_p)->pcie_rp_ctl,
2956 			    NULL);
2957 		}
2958 
2959 		/* PCIe RP AER registers */
2960 		if (PCIE_IS_RP(bus_p) && PCIE_HAS_AER(bus_p)) {
2961 			fm_payload_set(ereport,
2962 			    "pcie_adv_rp_status", DATA_TYPE_UINT32,
2963 			    PCIE_ADV_RP_REG(pfd_p)->pcie_rp_err_status,
2964 			    "pcie_adv_rp_command", DATA_TYPE_UINT32,
2965 			    PCIE_ADV_RP_REG(pfd_p)->pcie_rp_err_cmd,
2966 			    "pcie_adv_rp_ce_src_id", DATA_TYPE_UINT16,
2967 			    PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ce_src_id,
2968 			    "pcie_adv_rp_ue_src_id", DATA_TYPE_UINT16,
2969 			    PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ue_src_id,
2970 			    NULL);
2971 		}
2972 
2973 generic:
2974 		/* IOV related information */
2975 		if (!PCIE_BDG_IS_UNASSIGNED(PCIE_PFD2BUS(impl->pf_dq_head_p))) {
2976 			fm_payload_set(ereport,
2977 			    "pcie_aff_flags", DATA_TYPE_UINT16,
2978 			    PFD_AFFECTED_DEV(pfd_p)->pe_affected_flags,
2979 			    "pcie_aff_bdf", DATA_TYPE_UINT16,
2980 			    PFD_AFFECTED_DEV(pfd_p)->pe_affected_bdf,
2981 			    "orig_sev", DATA_TYPE_UINT32,
2982 			    pfd_p->pe_orig_severity_flags,
2983 			    NULL);
2984 		}
2985 
2986 		/* Misc ereport information */
2987 		fm_payload_set(ereport,
2988 		    "remainder", DATA_TYPE_UINT32, --total,
2989 		    "severity", DATA_TYPE_UINT32, pfd_p->pe_severity_flags,
2990 		    NULL);
2991 
2992 		pf_ereport_post(PCIE_BUS2DIP(bus_p), &ereport, &detector,
2993 		    &eqep);
2994 	}
2995 
2996 	/* Unlock all the devices in the queue */
2997 	for (pfd_p = impl->pf_dq_tail_p; pfd_p; pfd_p = pfd_p->pe_prev) {
2998 		if (pfd_p->pe_lock) {
2999 			pf_handler_exit(PCIE_PFD2DIP(pfd_p));
3000 		}
3001 	}
3002 }
3003 
3004 /*
3005  * pf_handler_enter must be called to serial access to each device's pf_data_t.
3006  * Once error handling is finished with the device call pf_handler_exit to allow
3007  * other threads to access it.  The same thread may call pf_handler_enter
3008  * several times without any consequences.
3009  *
3010  * The "impl" variable is passed in during scan fabric to double check that
3011  * there is not a recursive algorithm and to ensure only one thread is doing a
3012  * fabric scan at all times.
3013  *
3014  * In some cases "impl" is not available, such as "child lookup" being called
3015  * from outside of scan fabric, just pass in NULL for this variable and this
3016  * extra check will be skipped.
3017  */
3018 static int
3019 pf_handler_enter(dev_info_t *dip, pf_impl_t *impl)
3020 {
3021 	pf_data_t *pfd_p = PCIE_DIP2PFD(dip);
3022 
3023 	ASSERT(pfd_p);
3024 
3025 	/*
3026 	 * Check to see if the lock has already been taken by this
3027 	 * thread.  If so just return and don't take lock again.
3028 	 */
3029 	if (!pfd_p->pe_lock || !impl) {
3030 		i_ddi_fm_handler_enter(dip);
3031 		pfd_p->pe_lock = B_TRUE;
3032 		return (PF_SCAN_SUCCESS);
3033 	}
3034 
3035 	/* Check to see that this dip is already in the "impl" error queue */
3036 	for (pfd_p = impl->pf_dq_head_p; pfd_p; pfd_p = pfd_p->pe_next) {
3037 		if (PCIE_PFD2DIP(pfd_p) == dip) {
3038 			return (PF_SCAN_SUCCESS);
3039 		}
3040 	}
3041 
3042 	return (PF_SCAN_DEADLOCK);
3043 }
3044 
3045 static void
3046 pf_handler_exit(dev_info_t *dip)
3047 {
3048 	pf_data_t *pfd_p = PCIE_DIP2PFD(dip);
3049 
3050 	ASSERT(pfd_p);
3051 
3052 	ASSERT(pfd_p->pe_lock == B_TRUE);
3053 	i_ddi_fm_handler_exit(dip);
3054 	pfd_p->pe_lock = B_FALSE;
3055 }
3056 
3057 /*
3058  * This function calls the driver's callback function (if it's FMA hardened
3059  * and callback capable). This function relies on the current thread already
3060  * owning the driver's fmhdl lock.
3061  */
3062 static int
3063 pf_fm_callback(dev_info_t *dip, ddi_fm_error_t *derr)
3064 {
3065 	int cb_sts = DDI_FM_OK;
3066 
3067 	if (DDI_FM_ERRCB_CAP(ddi_fm_capable(dip))) {
3068 		dev_info_t *pdip = ddi_get_parent(dip);
3069 		struct i_ddi_fmhdl *hdl = DEVI(pdip)->devi_fmhdl;
3070 		struct i_ddi_fmtgt *tgt = hdl->fh_tgts;
3071 		struct i_ddi_errhdl *errhdl;
3072 		while (tgt != NULL) {
3073 			if (dip == tgt->ft_dip) {
3074 				errhdl = tgt->ft_errhdl;
3075 				cb_sts = errhdl->eh_func(dip, derr,
3076 				    errhdl->eh_impl);
3077 				break;
3078 			}
3079 			tgt = tgt->ft_next;
3080 		}
3081 	}
3082 	return (cb_sts);
3083 }
3084 
3085 static void
3086 pf_reset_pfd(pf_data_t *pfd_p)
3087 {
3088 	pcie_bus_t	*bus_p = PCIE_PFD2BUS(pfd_p);
3089 
3090 	pfd_p->pe_severity_flags = 0;
3091 	pfd_p->pe_severity_mask = 0;
3092 	pfd_p->pe_orig_severity_flags = 0;
3093 	/* pe_lock and pe_valid were reset in pf_send_ereport */
3094 
3095 	PFD_AFFECTED_DEV(pfd_p)->pe_affected_flags = 0;
3096 	PFD_AFFECTED_DEV(pfd_p)->pe_affected_bdf = PCIE_INVALID_BDF;
3097 
3098 	if (PCIE_IS_ROOT(bus_p)) {
3099 		PCIE_ROOT_FAULT(pfd_p)->scan_bdf = PCIE_INVALID_BDF;
3100 		PCIE_ROOT_FAULT(pfd_p)->scan_addr = 0;
3101 		PCIE_ROOT_FAULT(pfd_p)->full_scan = B_FALSE;
3102 		PCIE_ROOT_EH_SRC(pfd_p)->intr_type = PF_INTR_TYPE_NONE;
3103 		PCIE_ROOT_EH_SRC(pfd_p)->intr_data = NULL;
3104 	}
3105 
3106 	if (PCIE_IS_BDG(bus_p)) {
3107 		bzero(PCI_BDG_ERR_REG(pfd_p), sizeof (pf_pci_bdg_err_regs_t));
3108 	}
3109 
3110 	PCI_ERR_REG(pfd_p)->pci_err_status = 0;
3111 	PCI_ERR_REG(pfd_p)->pci_cfg_comm = 0;
3112 
3113 	if (PCIE_IS_PCIE(bus_p)) {
3114 		if (PCIE_IS_ROOT(bus_p)) {
3115 			bzero(PCIE_RP_REG(pfd_p),
3116 			    sizeof (pf_pcie_rp_err_regs_t));
3117 			bzero(PCIE_ADV_RP_REG(pfd_p),
3118 			    sizeof (pf_pcie_adv_rp_err_regs_t));
3119 			PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ce_src_id =
3120 			    PCIE_INVALID_BDF;
3121 			PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ue_src_id =
3122 			    PCIE_INVALID_BDF;
3123 		} else if (PCIE_IS_PCIE_BDG(bus_p)) {
3124 			bzero(PCIE_ADV_BDG_REG(pfd_p),
3125 			    sizeof (pf_pcie_adv_bdg_err_regs_t));
3126 			PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_bdf =
3127 			    PCIE_INVALID_BDF;
3128 		}
3129 
3130 		if (PCIE_IS_PCIE_BDG(bus_p) && PCIE_IS_PCIX(bus_p)) {
3131 			if (PCIX_ECC_VERSION_CHECK(bus_p)) {
3132 				bzero(PCIX_BDG_ECC_REG(pfd_p, 0),
3133 				    sizeof (pf_pcix_ecc_regs_t));
3134 				bzero(PCIX_BDG_ECC_REG(pfd_p, 1),
3135 				    sizeof (pf_pcix_ecc_regs_t));
3136 			}
3137 			PCIX_BDG_ERR_REG(pfd_p)->pcix_bdg_sec_stat = 0;
3138 			PCIX_BDG_ERR_REG(pfd_p)->pcix_bdg_stat = 0;
3139 		}
3140 
3141 		PCIE_ADV_REG(pfd_p)->pcie_adv_ctl = 0;
3142 		PCIE_ADV_REG(pfd_p)->pcie_ue_status = 0;
3143 		PCIE_ADV_REG(pfd_p)->pcie_ue_mask = 0;
3144 		PCIE_ADV_REG(pfd_p)->pcie_ue_sev = 0;
3145 		PCIE_ADV_HDR(pfd_p, 0) = 0;
3146 		PCIE_ADV_HDR(pfd_p, 1) = 0;
3147 		PCIE_ADV_HDR(pfd_p, 2) = 0;
3148 		PCIE_ADV_HDR(pfd_p, 3) = 0;
3149 		PCIE_ADV_REG(pfd_p)->pcie_ce_status = 0;
3150 		PCIE_ADV_REG(pfd_p)->pcie_ce_mask = 0;
3151 		PCIE_ADV_REG(pfd_p)->pcie_ue_tgt_trans = 0;
3152 		PCIE_ADV_REG(pfd_p)->pcie_ue_tgt_addr = 0;
3153 		PCIE_ADV_REG(pfd_p)->pcie_ue_tgt_bdf = PCIE_INVALID_BDF;
3154 
3155 		PCIE_ERR_REG(pfd_p)->pcie_err_status = 0;
3156 		PCIE_ERR_REG(pfd_p)->pcie_err_ctl = 0;
3157 		PCIE_ERR_REG(pfd_p)->pcie_dev_cap = 0;
3158 
3159 	} else if (PCIE_IS_PCIX(bus_p)) {
3160 		if (PCIE_IS_BDG(bus_p)) {
3161 			if (PCIX_ECC_VERSION_CHECK(bus_p)) {
3162 				bzero(PCIX_BDG_ECC_REG(pfd_p, 0),
3163 				    sizeof (pf_pcix_ecc_regs_t));
3164 				bzero(PCIX_BDG_ECC_REG(pfd_p, 1),
3165 				    sizeof (pf_pcix_ecc_regs_t));
3166 			}
3167 			PCIX_BDG_ERR_REG(pfd_p)->pcix_bdg_sec_stat = 0;
3168 			PCIX_BDG_ERR_REG(pfd_p)->pcix_bdg_stat = 0;
3169 		} else {
3170 			if (PCIX_ECC_VERSION_CHECK(bus_p)) {
3171 				bzero(PCIX_ECC_REG(pfd_p),
3172 				    sizeof (pf_pcix_ecc_regs_t));
3173 			}
3174 			PCIX_ERR_REG(pfd_p)->pcix_command = 0;
3175 			PCIX_ERR_REG(pfd_p)->pcix_status = 0;
3176 		}
3177 	}
3178 
3179 	pfd_p->pe_prev = NULL;
3180 	pfd_p->pe_next = NULL;
3181 	pfd_p->pe_rber_fatal = B_FALSE;
3182 }
3183 
3184 pcie_bus_t *
3185 pf_find_busp_by_bdf(pf_impl_t *impl, pcie_req_id_t bdf)
3186 {
3187 	pcie_bus_t *temp_bus_p;
3188 	pf_data_t *temp_pfd_p;
3189 
3190 	for (temp_pfd_p = impl->pf_dq_head_p;
3191 	    temp_pfd_p;
3192 	    temp_pfd_p = temp_pfd_p->pe_next) {
3193 		temp_bus_p = PCIE_PFD2BUS(temp_pfd_p);
3194 
3195 		if (bdf == temp_bus_p->bus_bdf) {
3196 			return (temp_bus_p);
3197 		}
3198 	}
3199 
3200 	return (NULL);
3201 }
3202 
3203 pcie_bus_t *
3204 pf_find_busp_by_addr(pf_impl_t *impl, uint64_t addr)
3205 {
3206 	pcie_bus_t *temp_bus_p;
3207 	pf_data_t *temp_pfd_p;
3208 
3209 	for (temp_pfd_p = impl->pf_dq_head_p;
3210 	    temp_pfd_p;
3211 	    temp_pfd_p = temp_pfd_p->pe_next) {
3212 		temp_bus_p = PCIE_PFD2BUS(temp_pfd_p);
3213 
3214 		if (pf_in_assigned_addr(temp_bus_p, addr)) {
3215 			return (temp_bus_p);
3216 		}
3217 	}
3218 
3219 	return (NULL);
3220 }
3221 
3222 pcie_bus_t *
3223 pf_find_busp_by_aer(pf_impl_t *impl, pf_data_t *pfd_p)
3224 {
3225 	pf_pcie_adv_err_regs_t *reg_p = PCIE_ADV_REG(pfd_p);
3226 	pcie_bus_t *temp_bus_p = NULL;
3227 	pcie_req_id_t bdf;
3228 	uint64_t addr;
3229 	pcie_tlp_hdr_t *tlp_hdr = (pcie_tlp_hdr_t *)reg_p->pcie_ue_hdr;
3230 	uint32_t trans_type = reg_p->pcie_ue_tgt_trans;
3231 
3232 	if ((tlp_hdr->type == PCIE_TLP_TYPE_CPL) ||
3233 	    (tlp_hdr->type == PCIE_TLP_TYPE_CPLLK)) {
3234 		pcie_cpl_t *cpl_tlp = (pcie_cpl_t *)&reg_p->pcie_ue_hdr[1];
3235 
3236 		bdf = (cpl_tlp->rid > cpl_tlp->cid) ? cpl_tlp->rid :
3237 		    cpl_tlp->cid;
3238 		temp_bus_p = pf_find_busp_by_bdf(impl, bdf);
3239 	} else if (trans_type == PF_ADDR_PIO) {
3240 		addr = reg_p->pcie_ue_tgt_addr;
3241 		temp_bus_p = pf_find_busp_by_addr(impl, addr);
3242 	} else {
3243 		/* PF_ADDR_DMA type */
3244 		bdf = reg_p->pcie_ue_tgt_bdf;
3245 		temp_bus_p = pf_find_busp_by_bdf(impl, bdf);
3246 	}
3247 
3248 	return (temp_bus_p);
3249 }
3250 
3251 pcie_bus_t *
3252 pf_find_busp_by_saer(pf_impl_t *impl, pf_data_t *pfd_p)
3253 {
3254 	pf_pcie_adv_bdg_err_regs_t *reg_p = PCIE_ADV_BDG_REG(pfd_p);
3255 	pcie_bus_t *temp_bus_p = NULL;
3256 	pcie_req_id_t bdf;
3257 	uint64_t addr;
3258 
3259 	addr = reg_p->pcie_sue_tgt_addr;
3260 	bdf = reg_p->pcie_sue_tgt_bdf;
3261 
3262 	if (addr != 0) {
3263 		temp_bus_p = pf_find_busp_by_addr(impl, addr);
3264 	} else if (PCIE_CHECK_VALID_BDF(bdf)) {
3265 		temp_bus_p = pf_find_busp_by_bdf(impl, bdf);
3266 	}
3267 
3268 	return (temp_bus_p);
3269 }
3270