xref: /illumos-gate/usr/src/uts/common/io/pciex/pcie_fault.c (revision fb4eb4e8c8161158740a5a4057e45ec942165dc2)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <sys/sysmacros.h>
27 #include <sys/types.h>
28 #include <sys/kmem.h>
29 #include <sys/modctl.h>
30 #include <sys/ddi.h>
31 #include <sys/sunddi.h>
32 #include <sys/sunndi.h>
33 #include <sys/fm/protocol.h>
34 #include <sys/fm/util.h>
35 #include <sys/fm/io/ddi.h>
36 #include <sys/fm/io/pci.h>
37 #include <sys/promif.h>
38 #include <sys/disp.h>
39 #include <sys/atomic.h>
40 #include <sys/pcie.h>
41 #include <sys/pci_cap.h>
42 #include <sys/pcie_impl.h>
43 
44 #define	PF_PCIE_BDG_ERR (PCIE_DEVSTS_FE_DETECTED | PCIE_DEVSTS_NFE_DETECTED | \
45 	PCIE_DEVSTS_CE_DETECTED)
46 
47 #define	PF_PCI_BDG_ERR (PCI_STAT_S_SYSERR | PCI_STAT_S_TARG_AB | \
48 	PCI_STAT_R_MAST_AB | PCI_STAT_R_TARG_AB | PCI_STAT_S_PERROR)
49 
50 #define	PF_AER_FATAL_ERR (PCIE_AER_UCE_DLP | PCIE_AER_UCE_SD |\
51 	PCIE_AER_UCE_FCP | PCIE_AER_UCE_RO | PCIE_AER_UCE_MTLP)
52 #define	PF_AER_NON_FATAL_ERR (PCIE_AER_UCE_PTLP | PCIE_AER_UCE_TO | \
53 	PCIE_AER_UCE_CA | PCIE_AER_UCE_ECRC | PCIE_AER_UCE_UR)
54 
55 #define	PF_SAER_FATAL_ERR (PCIE_AER_SUCE_USC_MSG_DATA_ERR | \
56 	PCIE_AER_SUCE_UC_ATTR_ERR | PCIE_AER_SUCE_UC_ADDR_ERR | \
57 	PCIE_AER_SUCE_SERR_ASSERT)
58 #define	PF_SAER_NON_FATAL_ERR (PCIE_AER_SUCE_TA_ON_SC | \
59 	PCIE_AER_SUCE_MA_ON_SC | PCIE_AER_SUCE_RCVD_TA | \
60 	PCIE_AER_SUCE_RCVD_MA | PCIE_AER_SUCE_USC_ERR | \
61 	PCIE_AER_SUCE_UC_DATA_ERR | PCIE_AER_SUCE_TIMER_EXPIRED | \
62 	PCIE_AER_SUCE_PERR_ASSERT | PCIE_AER_SUCE_INTERNAL_ERR)
63 
64 #define	PF_PCI_PARITY_ERR (PCI_STAT_S_PERROR | PCI_STAT_PERROR)
65 
66 #define	PF_FIRST_AER_ERR(bit, adv) \
67 	(bit & (1 << (adv->pcie_adv_ctl & PCIE_AER_CTL_FST_ERR_PTR_MASK)))
68 
69 #define	HAS_AER_LOGS(pfd_p, bit) \
70 	(PCIE_HAS_AER(pfd_p->pe_bus_p) && \
71 	PF_FIRST_AER_ERR(bit, PCIE_ADV_REG(pfd_p)))
72 
73 #define	PF_FIRST_SAER_ERR(bit, adv) \
74 	(bit & (1 << (adv->pcie_sue_ctl & PCIE_AER_SCTL_FST_ERR_PTR_MASK)))
75 
76 #define	HAS_SAER_LOGS(pfd_p, bit) \
77 	(PCIE_HAS_AER(pfd_p->pe_bus_p) && \
78 	PF_FIRST_SAER_ERR(bit, PCIE_ADV_BDG_REG(pfd_p)))
79 
80 #define	GET_SAER_CMD(pfd_p) \
81 	((PCIE_ADV_BDG_HDR(pfd_p, 1) >> \
82 	PCIE_AER_SUCE_HDR_CMD_LWR_SHIFT) & PCIE_AER_SUCE_HDR_CMD_LWR_MASK)
83 
84 #define	CE_ADVISORY(pfd_p) \
85 	(PCIE_ADV_REG(pfd_p)->pcie_ce_status & PCIE_AER_CE_AD_NFE)
86 
87 /* PCIe Fault Fabric Error analysis table */
88 typedef struct pf_fab_err_tbl {
89 	uint32_t	bit;		/* Error bit */
90 	int		(*handler)();	/* Error handling fuction */
91 	uint16_t	affected_flags; /* Primary affected flag */
92 	/*
93 	 * Secondary affected flag, effective when the information
94 	 * indicated by the primary flag is not available, eg.
95 	 * PF_AFFECTED_AER/SAER/ADDR
96 	 */
97 	uint16_t	sec_affected_flags;
98 } pf_fab_err_tbl_t;
99 
100 static pcie_bus_t *pf_is_ready(dev_info_t *);
101 /* Functions for scanning errors */
102 static int pf_default_hdl(dev_info_t *, pf_impl_t *);
103 static int pf_dispatch(dev_info_t *, pf_impl_t *, boolean_t);
104 static boolean_t pf_in_addr_range(pcie_bus_t *, uint64_t);
105 
106 /* Functions for gathering errors */
107 static void pf_pcix_ecc_regs_gather(pf_pcix_ecc_regs_t *pcix_ecc_regs,
108     pcie_bus_t *bus_p, boolean_t bdg);
109 static void pf_pcix_regs_gather(pf_data_t *pfd_p, pcie_bus_t *bus_p);
110 static void pf_pcie_regs_gather(pf_data_t *pfd_p, pcie_bus_t *bus_p);
111 static void pf_pci_regs_gather(pf_data_t *pfd_p, pcie_bus_t *bus_p);
112 static int pf_dummy_cb(dev_info_t *, ddi_fm_error_t *, const void *);
113 static void pf_en_dq(pf_data_t *pfd_p, pf_impl_t *impl_p);
114 
115 /* Functions for analysing errors */
116 static int pf_analyse_error(ddi_fm_error_t *, pf_impl_t *);
117 static void pf_adjust_for_no_aer(pf_data_t *);
118 static void pf_adjust_for_no_saer(pf_data_t *);
119 static pf_data_t *pf_get_pcie_bridge(pf_data_t *, pcie_req_id_t);
120 static pf_data_t *pf_get_parent_pcie_bridge(pf_data_t *);
121 static boolean_t pf_matched_in_rc(pf_data_t *, pf_data_t *,
122     uint32_t);
123 static int pf_analyse_error_tbl(ddi_fm_error_t *, pf_impl_t *,
124     pf_data_t *, const pf_fab_err_tbl_t *, uint32_t);
125 static int pf_analyse_ca_ur(ddi_fm_error_t *, uint32_t,
126     pf_data_t *, pf_data_t *);
127 static int pf_analyse_ma_ta(ddi_fm_error_t *, uint32_t,
128     pf_data_t *, pf_data_t *);
129 static int pf_analyse_pci(ddi_fm_error_t *, uint32_t,
130     pf_data_t *, pf_data_t *);
131 static int pf_analyse_perr_assert(ddi_fm_error_t *, uint32_t,
132     pf_data_t *, pf_data_t *);
133 static int pf_analyse_ptlp(ddi_fm_error_t *, uint32_t,
134     pf_data_t *, pf_data_t *);
135 static int pf_analyse_sc(ddi_fm_error_t *, uint32_t,
136     pf_data_t *, pf_data_t *);
137 static int pf_analyse_to(ddi_fm_error_t *, uint32_t,
138     pf_data_t *, pf_data_t *);
139 static int pf_analyse_uc(ddi_fm_error_t *, uint32_t,
140     pf_data_t *, pf_data_t *);
141 static int pf_analyse_uc_data(ddi_fm_error_t *, uint32_t,
142     pf_data_t *, pf_data_t *);
143 static int pf_no_panic(ddi_fm_error_t *, uint32_t,
144     pf_data_t *, pf_data_t *);
145 static int pf_panic(ddi_fm_error_t *, uint32_t,
146     pf_data_t *, pf_data_t *);
147 static void pf_send_ereport(ddi_fm_error_t *, pf_impl_t *);
148 static int pf_fm_callback(dev_info_t *dip, ddi_fm_error_t *derr);
149 
150 /* PCIe Fabric Handle Lookup Support Functions. */
151 static int pf_hdl_child_lookup(dev_info_t *, ddi_fm_error_t *, uint32_t,
152     uint64_t, pcie_req_id_t);
153 static int pf_hdl_compare(dev_info_t *, ddi_fm_error_t *, uint32_t, uint64_t,
154     pcie_req_id_t, ndi_fmc_t *);
155 static int pf_log_hdl_lookup(dev_info_t *, ddi_fm_error_t *, pf_data_t *,
156 	boolean_t);
157 
158 static int pf_handler_enter(dev_info_t *, pf_impl_t *);
159 static void pf_handler_exit(dev_info_t *);
160 static void pf_reset_pfd(pf_data_t *);
161 
162 boolean_t pcie_full_scan = B_FALSE;	/* Force to always do a full scan */
163 int pcie_disable_scan = 0;		/* Disable fabric scan */
164 
165 /* Inform interested parties that error handling is about to begin. */
166 /* ARGSUSED */
167 void
168 pf_eh_enter(pcie_bus_t *bus_p) {
169 }
170 
171 /* Inform interested parties that error handling has ended. */
172 void
173 pf_eh_exit(pcie_bus_t *bus_p)
174 {
175 	pcie_bus_t *rbus_p = PCIE_DIP2BUS(bus_p->bus_rp_dip);
176 	pf_data_t *root_pfd_p = PCIE_BUS2PFD(rbus_p);
177 	pf_data_t *pfd_p;
178 	uint_t intr_type = PCIE_ROOT_EH_SRC(root_pfd_p)->intr_type;
179 
180 	pciev_eh_exit(root_pfd_p, intr_type);
181 
182 	/* Clear affected device info and INTR SRC */
183 	for (pfd_p = root_pfd_p; pfd_p; pfd_p = pfd_p->pe_next) {
184 		PFD_AFFECTED_DEV(pfd_p)->pe_affected_flags = 0;
185 		PFD_AFFECTED_DEV(pfd_p)->pe_affected_bdf = PCIE_INVALID_BDF;
186 		if (PCIE_IS_ROOT(PCIE_PFD2BUS(pfd_p))) {
187 			PCIE_ROOT_EH_SRC(pfd_p)->intr_type = PF_INTR_TYPE_NONE;
188 			PCIE_ROOT_EH_SRC(pfd_p)->intr_data = NULL;
189 		}
190 	}
191 }
192 
193 /*
194  * Scan Fabric is the entry point for PCI/PCIe IO fabric errors.  The
195  * caller may create a local pf_data_t with the "root fault"
196  * information populated to either do a precise or full scan.  More
197  * than one pf_data_t maybe linked together if there are multiple
198  * errors.  Only a PCIe compliant Root Port device may pass in NULL
199  * for the root_pfd_p.
200  *
201  * "Root Complexes" such as NPE and PX should call scan_fabric using itself as
202  * the rdip.  PCIe Root ports should call pf_scan_fabric using it's parent as
203  * the rdip.
204  *
205  * Scan fabric initiated from RCs are likely due to a fabric message, traps or
206  * any RC detected errors that propagated to/from the fabric.
207  *
208  * This code assumes that by the time pf_scan_fabric is
209  * called, pf_handler_enter has NOT been called on the rdip.
210  */
211 int
212 pf_scan_fabric(dev_info_t *rdip, ddi_fm_error_t *derr, pf_data_t *root_pfd_p)
213 {
214 	pf_impl_t	impl;
215 	pf_data_t	*pfd_p, *pfd_head_p, *pfd_tail_p;
216 	int		scan_flag = PF_SCAN_SUCCESS;
217 	int		analyse_flag = PF_ERR_NO_ERROR;
218 	boolean_t	full_scan = pcie_full_scan;
219 
220 	if (pcie_disable_scan)
221 		return (analyse_flag);
222 
223 	/* Find the head and tail of this link list */
224 	pfd_head_p = root_pfd_p;
225 	for (pfd_tail_p = root_pfd_p; pfd_tail_p && pfd_tail_p->pe_next;
226 	    pfd_tail_p = pfd_tail_p->pe_next)
227 		;
228 
229 	/* Save head/tail */
230 	impl.pf_total = 0;
231 	impl.pf_derr = derr;
232 	impl.pf_dq_head_p = pfd_head_p;
233 	impl.pf_dq_tail_p = pfd_tail_p;
234 
235 	/* If scan is initiated from RP then RP itself must be scanned. */
236 	if (PCIE_IS_RP(PCIE_DIP2BUS(rdip)) && pf_is_ready(rdip) &&
237 	    !root_pfd_p) {
238 		scan_flag = pf_handler_enter(rdip, &impl);
239 		if (scan_flag & PF_SCAN_DEADLOCK)
240 			goto done;
241 
242 		scan_flag = pf_default_hdl(rdip, &impl);
243 		if (scan_flag & PF_SCAN_NO_ERR_IN_CHILD)
244 			goto done;
245 	}
246 
247 	/*
248 	 * Scan the fabric using the scan_bdf and scan_addr in error q.
249 	 * scan_bdf will be valid in the following cases:
250 	 *	- Fabric message
251 	 *	- Poisoned TLP
252 	 *	- Signaled UR/CA
253 	 *	- Received UR/CA
254 	 *	- PIO load failures
255 	 */
256 	for (pfd_p = impl.pf_dq_head_p; pfd_p && PFD_IS_ROOT(pfd_p);
257 	    pfd_p = pfd_p->pe_next) {
258 		impl.pf_fault = PCIE_ROOT_FAULT(pfd_p);
259 
260 		if (PFD_IS_RC(pfd_p))
261 			impl.pf_total++;
262 
263 		if (impl.pf_fault->full_scan)
264 			full_scan = B_TRUE;
265 
266 		if (full_scan ||
267 		    PCIE_CHECK_VALID_BDF(impl.pf_fault->scan_bdf) ||
268 		    impl.pf_fault->scan_addr)
269 			scan_flag |= pf_dispatch(rdip, &impl, full_scan);
270 
271 		if (full_scan)
272 			break;
273 	}
274 
275 done:
276 	/*
277 	 * If this is due to safe access, don't analyze the errors and return
278 	 * success regardless of how scan fabric went.
279 	 */
280 	if (derr->fme_flag != DDI_FM_ERR_UNEXPECTED) {
281 		analyse_flag = PF_ERR_NO_PANIC;
282 	} else {
283 		analyse_flag = pf_analyse_error(derr, &impl);
284 	}
285 
286 	pf_send_ereport(derr, &impl);
287 
288 	/*
289 	 * Check if any hardened driver's callback reported a panic.
290 	 * If so panic.
291 	 */
292 	if (scan_flag & PF_SCAN_CB_FAILURE)
293 		analyse_flag |= PF_ERR_PANIC;
294 
295 	/*
296 	 * If a deadlock was detected, panic the system as error analysis has
297 	 * been compromised.
298 	 */
299 	if (scan_flag & PF_SCAN_DEADLOCK)
300 		analyse_flag |= PF_ERR_PANIC_DEADLOCK;
301 
302 	derr->fme_status = PF_ERR2DDIFM_ERR(scan_flag);
303 
304 	return (analyse_flag);
305 }
306 
307 void
308 pcie_force_fullscan() {
309 	pcie_full_scan = B_TRUE;
310 }
311 
312 /*
313  * pf_dispatch walks the device tree and calls the pf_default_hdl if the device
314  * falls in the error path.
315  *
316  * Returns PF_SCAN_* flags
317  */
318 static int
319 pf_dispatch(dev_info_t *pdip, pf_impl_t *impl, boolean_t full_scan)
320 {
321 	dev_info_t	*dip;
322 	pcie_req_id_t	rid = impl->pf_fault->scan_bdf;
323 	pcie_bus_t	*bus_p;
324 	int		scan_flag = PF_SCAN_SUCCESS;
325 
326 	for (dip = ddi_get_child(pdip); dip; dip = ddi_get_next_sibling(dip)) {
327 		/* Make sure dip is attached and ready */
328 		if (!(bus_p = pf_is_ready(dip)))
329 			continue;
330 
331 		scan_flag |= pf_handler_enter(dip, impl);
332 		if (scan_flag & PF_SCAN_DEADLOCK)
333 			break;
334 
335 		/*
336 		 * Handle this device if it is a:
337 		 * o Full Scan
338 		 * o PCI/PCI-X Device
339 		 * o Fault BDF = Device BDF
340 		 * o BDF/ADDR is in range of the Bridge/Switch
341 		 */
342 		if (full_scan ||
343 		    (bus_p->bus_bdf == rid) ||
344 		    pf_in_bus_range(bus_p, rid) ||
345 		    pf_in_addr_range(bus_p, impl->pf_fault->scan_addr)) {
346 			int hdl_flag = pf_default_hdl(dip, impl);
347 			scan_flag |= hdl_flag;
348 
349 			/*
350 			 * A bridge may have detected no errors in which case
351 			 * there is no need to scan further down.
352 			 */
353 			if (hdl_flag & PF_SCAN_NO_ERR_IN_CHILD)
354 				continue;
355 		} else {
356 			pf_handler_exit(dip);
357 			continue;
358 		}
359 
360 		/* match or in bridge bus-range */
361 		switch (bus_p->bus_dev_type) {
362 		case PCIE_PCIECAP_DEV_TYPE_PCIE2PCI:
363 		case PCIE_PCIECAP_DEV_TYPE_PCI2PCIE:
364 			scan_flag |= pf_dispatch(dip, impl, B_TRUE);
365 			break;
366 		case PCIE_PCIECAP_DEV_TYPE_UP:
367 		case PCIE_PCIECAP_DEV_TYPE_DOWN:
368 		case PCIE_PCIECAP_DEV_TYPE_ROOT:
369 		{
370 			pf_data_t *pfd_p = PCIE_BUS2PFD(bus_p);
371 			pf_pci_err_regs_t *err_p = PCI_ERR_REG(pfd_p);
372 			pf_pci_bdg_err_regs_t *serr_p = PCI_BDG_ERR_REG(pfd_p);
373 			/*
374 			 * Continue if the fault BDF != the switch or there is a
375 			 * parity error
376 			 */
377 			if ((bus_p->bus_bdf != rid) ||
378 			    (err_p->pci_err_status & PF_PCI_PARITY_ERR) ||
379 			    (serr_p->pci_bdg_sec_stat & PF_PCI_PARITY_ERR))
380 				scan_flag |= pf_dispatch(dip, impl, full_scan);
381 			break;
382 		}
383 		case PCIE_PCIECAP_DEV_TYPE_PCIE_DEV:
384 		case PCIE_PCIECAP_DEV_TYPE_PCI_DEV:
385 			/*
386 			 * Reached a PCIe end point so stop. Note dev_type
387 			 * PCI_DEV is just a PCIe device that requires IO Space
388 			 */
389 			break;
390 		case PCIE_PCIECAP_DEV_TYPE_PCI_PSEUDO:
391 			if (PCIE_IS_BDG(bus_p))
392 				scan_flag |= pf_dispatch(dip, impl, B_TRUE);
393 			break;
394 		default:
395 			ASSERT(B_FALSE);
396 		}
397 	}
398 	return (scan_flag);
399 }
400 
401 /* Returns whether the "bdf" is in the bus range of a switch/bridge */
402 boolean_t
403 pf_in_bus_range(pcie_bus_t *bus_p, pcie_req_id_t bdf)
404 {
405 	pci_bus_range_t *br_p = &bus_p->bus_bus_range;
406 	uint8_t		bus_no = (bdf & PCIE_REQ_ID_BUS_MASK) >>
407 	    PCIE_REQ_ID_BUS_SHIFT;
408 
409 	/* check if given bdf falls within bridge's bus range */
410 	if (PCIE_IS_BDG(bus_p) &&
411 	    ((bus_no >= br_p->lo) && (bus_no <= br_p->hi)))
412 		return (B_TRUE);
413 	else
414 		return (B_FALSE);
415 }
416 
417 /*
418  * Return whether the "addr" is in the assigned addr of a device.
419  */
420 boolean_t
421 pf_in_assigned_addr(pcie_bus_t *bus_p, uint64_t addr)
422 {
423 	uint_t		i;
424 	uint64_t	low, hi;
425 	pci_regspec_t	*assign_p = bus_p->bus_assigned_addr;
426 
427 	for (i = 0; i < bus_p->bus_assigned_entries; i++, assign_p++) {
428 		low = assign_p->pci_phys_low;
429 		hi = low + assign_p->pci_size_low;
430 		if ((addr < hi) && (addr >= low))
431 			return (B_TRUE);
432 	}
433 	return (B_FALSE);
434 }
435 
436 /*
437  * Returns whether the "addr" is in the addr range of a switch/bridge, or if the
438  * "addr" is in the assigned addr of a device.
439  */
440 static boolean_t
441 pf_in_addr_range(pcie_bus_t *bus_p, uint64_t addr)
442 {
443 	uint_t		i;
444 	uint64_t	low, hi;
445 	ppb_ranges_t	*ranges_p = bus_p->bus_addr_ranges;
446 
447 	/* check if given address belongs to this device */
448 	if (pf_in_assigned_addr(bus_p, addr))
449 		return (B_TRUE);
450 
451 	/* check if given address belongs to a child below this device */
452 	if (!PCIE_IS_BDG(bus_p))
453 		return (B_FALSE);
454 
455 	for (i = 0; i < bus_p->bus_addr_entries; i++, ranges_p++) {
456 		switch (ranges_p->child_high & PCI_ADDR_MASK) {
457 		case PCI_ADDR_IO:
458 		case PCI_ADDR_MEM32:
459 			low = ranges_p->child_low;
460 			hi = ranges_p->size_low + low;
461 			if ((addr < hi) && (addr >= low))
462 				return (B_TRUE);
463 			break;
464 		case PCI_ADDR_MEM64:
465 			low = ((uint64_t)ranges_p->child_mid << 32) |
466 			    (uint64_t)ranges_p->child_low;
467 			hi = (((uint64_t)ranges_p->size_high << 32) |
468 			    (uint64_t)ranges_p->size_low) + low;
469 			if ((addr < hi) && (addr >= low))
470 				return (B_TRUE);
471 			break;
472 		}
473 	}
474 	return (B_FALSE);
475 }
476 
477 static pcie_bus_t *
478 pf_is_ready(dev_info_t *dip)
479 {
480 	pcie_bus_t	*bus_p = PCIE_DIP2BUS(dip);
481 	if (!bus_p)
482 		return (NULL);
483 
484 	if (!(bus_p->bus_fm_flags & PF_FM_READY))
485 		return (NULL);
486 	return (bus_p);
487 }
488 
489 static void
490 pf_pcix_ecc_regs_gather(pf_pcix_ecc_regs_t *pcix_ecc_regs,
491     pcie_bus_t *bus_p, boolean_t bdg)
492 {
493 	if (bdg) {
494 		pcix_ecc_regs->pcix_ecc_ctlstat = PCIX_CAP_GET(32, bus_p,
495 		    PCI_PCIX_BDG_ECC_STATUS);
496 		pcix_ecc_regs->pcix_ecc_fstaddr = PCIX_CAP_GET(32, bus_p,
497 		    PCI_PCIX_BDG_ECC_FST_AD);
498 		pcix_ecc_regs->pcix_ecc_secaddr = PCIX_CAP_GET(32, bus_p,
499 		    PCI_PCIX_BDG_ECC_SEC_AD);
500 		pcix_ecc_regs->pcix_ecc_attr = PCIX_CAP_GET(32, bus_p,
501 		    PCI_PCIX_BDG_ECC_ATTR);
502 	} else {
503 		pcix_ecc_regs->pcix_ecc_ctlstat = PCIX_CAP_GET(32, bus_p,
504 		    PCI_PCIX_ECC_STATUS);
505 		pcix_ecc_regs->pcix_ecc_fstaddr = PCIX_CAP_GET(32, bus_p,
506 		    PCI_PCIX_ECC_FST_AD);
507 		pcix_ecc_regs->pcix_ecc_secaddr = PCIX_CAP_GET(32, bus_p,
508 		    PCI_PCIX_ECC_SEC_AD);
509 		pcix_ecc_regs->pcix_ecc_attr = PCIX_CAP_GET(32, bus_p,
510 		    PCI_PCIX_ECC_ATTR);
511 	}
512 }
513 
514 
515 static void
516 pf_pcix_regs_gather(pf_data_t *pfd_p, pcie_bus_t *bus_p)
517 {
518 	/*
519 	 * For PCI-X device PCI-X Capability only exists for Type 0 Headers.
520 	 * PCI-X Bridge Capability only exists for Type 1 Headers.
521 	 * Both capabilities do not exist at the same time.
522 	 */
523 	if (PCIE_IS_BDG(bus_p)) {
524 		pf_pcix_bdg_err_regs_t *pcix_bdg_regs;
525 
526 		pcix_bdg_regs = PCIX_BDG_ERR_REG(pfd_p);
527 
528 		pcix_bdg_regs->pcix_bdg_sec_stat = PCIX_CAP_GET(16, bus_p,
529 		    PCI_PCIX_SEC_STATUS);
530 		pcix_bdg_regs->pcix_bdg_stat = PCIX_CAP_GET(32, bus_p,
531 		    PCI_PCIX_BDG_STATUS);
532 
533 		if (PCIX_ECC_VERSION_CHECK(bus_p)) {
534 			/*
535 			 * PCI Express to PCI-X bridges only implement the
536 			 * secondary side of the PCI-X ECC registers, bit one is
537 			 * read-only so we make sure we do not write to it.
538 			 */
539 			if (!PCIE_IS_PCIE_BDG(bus_p)) {
540 				PCIX_CAP_PUT(32, bus_p, PCI_PCIX_BDG_ECC_STATUS,
541 				    0);
542 				pf_pcix_ecc_regs_gather(
543 				    PCIX_BDG_ECC_REG(pfd_p, 0), bus_p, B_TRUE);
544 				PCIX_CAP_PUT(32, bus_p, PCI_PCIX_BDG_ECC_STATUS,
545 				    1);
546 			}
547 			pf_pcix_ecc_regs_gather(PCIX_BDG_ECC_REG(pfd_p, 0),
548 			    bus_p, B_TRUE);
549 		}
550 	} else {
551 		pf_pcix_err_regs_t *pcix_regs = PCIX_ERR_REG(pfd_p);
552 
553 		pcix_regs->pcix_command = PCIX_CAP_GET(16, bus_p,
554 		    PCI_PCIX_COMMAND);
555 		pcix_regs->pcix_status = PCIX_CAP_GET(32, bus_p,
556 		    PCI_PCIX_STATUS);
557 		if (PCIX_ECC_VERSION_CHECK(bus_p))
558 			pf_pcix_ecc_regs_gather(PCIX_ECC_REG(pfd_p), bus_p,
559 			    B_TRUE);
560 	}
561 }
562 
563 static void
564 pf_pcie_regs_gather(pf_data_t *pfd_p, pcie_bus_t *bus_p)
565 {
566 	pf_pcie_err_regs_t *pcie_regs = PCIE_ERR_REG(pfd_p);
567 	pf_pcie_adv_err_regs_t *pcie_adv_regs = PCIE_ADV_REG(pfd_p);
568 
569 	pcie_regs->pcie_err_status = PCIE_CAP_GET(16, bus_p, PCIE_DEVSTS);
570 	pcie_regs->pcie_err_ctl = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL);
571 	pcie_regs->pcie_dev_cap = PCIE_CAP_GET(32, bus_p, PCIE_DEVCAP);
572 
573 	if (PCIE_IS_BDG(bus_p) && PCIE_IS_PCIX(bus_p))
574 		pf_pcix_regs_gather(pfd_p, bus_p);
575 
576 	if (PCIE_IS_ROOT(bus_p)) {
577 		pf_pcie_rp_err_regs_t *pcie_rp_regs = PCIE_RP_REG(pfd_p);
578 
579 		pcie_rp_regs->pcie_rp_status = PCIE_CAP_GET(32, bus_p,
580 		    PCIE_ROOTSTS);
581 		pcie_rp_regs->pcie_rp_ctl = PCIE_CAP_GET(16, bus_p,
582 		    PCIE_ROOTCTL);
583 	}
584 
585 	if (!PCIE_HAS_AER(bus_p))
586 		return;
587 
588 	/* Gather UE AERs */
589 	pcie_adv_regs->pcie_adv_ctl = PCIE_AER_GET(32, bus_p,
590 	    PCIE_AER_CTL);
591 	pcie_adv_regs->pcie_ue_status = PCIE_AER_GET(32, bus_p,
592 	    PCIE_AER_UCE_STS);
593 	pcie_adv_regs->pcie_ue_mask = PCIE_AER_GET(32, bus_p,
594 	    PCIE_AER_UCE_MASK);
595 	pcie_adv_regs->pcie_ue_sev = PCIE_AER_GET(32, bus_p,
596 	    PCIE_AER_UCE_SERV);
597 	PCIE_ADV_HDR(pfd_p, 0) = PCIE_AER_GET(32, bus_p,
598 	    PCIE_AER_HDR_LOG);
599 	PCIE_ADV_HDR(pfd_p, 1) = PCIE_AER_GET(32, bus_p,
600 	    PCIE_AER_HDR_LOG + 0x4);
601 	PCIE_ADV_HDR(pfd_p, 2) = PCIE_AER_GET(32, bus_p,
602 	    PCIE_AER_HDR_LOG + 0x8);
603 	PCIE_ADV_HDR(pfd_p, 3) = PCIE_AER_GET(32, bus_p,
604 	    PCIE_AER_HDR_LOG + 0xc);
605 
606 	/* Gather CE AERs */
607 	pcie_adv_regs->pcie_ce_status = PCIE_AER_GET(32, bus_p,
608 	    PCIE_AER_CE_STS);
609 	pcie_adv_regs->pcie_ce_mask = PCIE_AER_GET(32, bus_p,
610 	    PCIE_AER_CE_MASK);
611 
612 	/*
613 	 * If pci express to pci bridge then grab the bridge
614 	 * error registers.
615 	 */
616 	if (PCIE_IS_PCIE_BDG(bus_p)) {
617 		pf_pcie_adv_bdg_err_regs_t *pcie_bdg_regs =
618 		    PCIE_ADV_BDG_REG(pfd_p);
619 
620 		pcie_bdg_regs->pcie_sue_ctl = PCIE_AER_GET(32, bus_p,
621 		    PCIE_AER_SCTL);
622 		pcie_bdg_regs->pcie_sue_status = PCIE_AER_GET(32, bus_p,
623 		    PCIE_AER_SUCE_STS);
624 		pcie_bdg_regs->pcie_sue_mask = PCIE_AER_GET(32, bus_p,
625 		    PCIE_AER_SUCE_MASK);
626 		pcie_bdg_regs->pcie_sue_sev = PCIE_AER_GET(32, bus_p,
627 		    PCIE_AER_SUCE_SERV);
628 		PCIE_ADV_BDG_HDR(pfd_p, 0) = PCIE_AER_GET(32, bus_p,
629 		    PCIE_AER_SHDR_LOG);
630 		PCIE_ADV_BDG_HDR(pfd_p, 1) = PCIE_AER_GET(32, bus_p,
631 		    PCIE_AER_SHDR_LOG + 0x4);
632 		PCIE_ADV_BDG_HDR(pfd_p, 2) = PCIE_AER_GET(32, bus_p,
633 		    PCIE_AER_SHDR_LOG + 0x8);
634 		PCIE_ADV_BDG_HDR(pfd_p, 3) = PCIE_AER_GET(32, bus_p,
635 		    PCIE_AER_SHDR_LOG + 0xc);
636 	}
637 
638 	/*
639 	 * If PCI Express root port then grab the root port
640 	 * error registers.
641 	 */
642 	if (PCIE_IS_ROOT(bus_p)) {
643 		pf_pcie_adv_rp_err_regs_t *pcie_rp_regs =
644 		    PCIE_ADV_RP_REG(pfd_p);
645 
646 		pcie_rp_regs->pcie_rp_err_cmd = PCIE_AER_GET(32, bus_p,
647 		    PCIE_AER_RE_CMD);
648 		pcie_rp_regs->pcie_rp_err_status = PCIE_AER_GET(32, bus_p,
649 		    PCIE_AER_RE_STS);
650 		pcie_rp_regs->pcie_rp_ce_src_id = PCIE_AER_GET(16, bus_p,
651 		    PCIE_AER_CE_SRC_ID);
652 		pcie_rp_regs->pcie_rp_ue_src_id = PCIE_AER_GET(16, bus_p,
653 		    PCIE_AER_ERR_SRC_ID);
654 	}
655 }
656 
657 static void
658 pf_pci_regs_gather(pf_data_t *pfd_p, pcie_bus_t *bus_p)
659 {
660 	pf_pci_err_regs_t *pci_regs = PCI_ERR_REG(pfd_p);
661 
662 	/*
663 	 * Start by reading all the error registers that are available for
664 	 * pci and pci express and for leaf devices and bridges/switches
665 	 */
666 	pci_regs->pci_err_status = PCIE_GET(16, bus_p, PCI_CONF_STAT);
667 	pci_regs->pci_cfg_comm = PCIE_GET(16, bus_p, PCI_CONF_COMM);
668 
669 	/*
670 	 * If pci-pci bridge grab PCI bridge specific error registers.
671 	 */
672 	if (PCIE_IS_BDG(bus_p)) {
673 		pf_pci_bdg_err_regs_t *pci_bdg_regs = PCI_BDG_ERR_REG(pfd_p);
674 		pci_bdg_regs->pci_bdg_sec_stat =
675 		    PCIE_GET(16, bus_p, PCI_BCNF_SEC_STATUS);
676 		pci_bdg_regs->pci_bdg_ctrl =
677 		    PCIE_GET(16, bus_p, PCI_BCNF_BCNTRL);
678 	}
679 
680 	/*
681 	 * If pci express device grab pci express error registers and
682 	 * check for advanced error reporting features and grab them if
683 	 * available.
684 	 */
685 	if (PCIE_IS_PCIE(bus_p))
686 		pf_pcie_regs_gather(pfd_p, bus_p);
687 	else if (PCIE_IS_PCIX(bus_p))
688 		pf_pcix_regs_gather(pfd_p, bus_p);
689 
690 }
691 
692 static void
693 pf_pcix_regs_clear(pf_data_t *pfd_p, pcie_bus_t *bus_p)
694 {
695 	if (PCIE_IS_BDG(bus_p)) {
696 		pf_pcix_bdg_err_regs_t *pcix_bdg_regs;
697 
698 		pcix_bdg_regs = PCIX_BDG_ERR_REG(pfd_p);
699 
700 		PCIX_CAP_PUT(16, bus_p, PCI_PCIX_SEC_STATUS,
701 		    pcix_bdg_regs->pcix_bdg_sec_stat);
702 
703 		PCIX_CAP_PUT(32, bus_p, PCI_PCIX_BDG_STATUS,
704 		    pcix_bdg_regs->pcix_bdg_stat);
705 
706 		if (PCIX_ECC_VERSION_CHECK(bus_p)) {
707 			pf_pcix_ecc_regs_t *pcix_bdg_ecc_regs;
708 			/*
709 			 * PCI Express to PCI-X bridges only implement the
710 			 * secondary side of the PCI-X ECC registers.  For
711 			 * clearing, there is no need to "select" the ECC
712 			 * register, just write what was originally read.
713 			 */
714 			if (!PCIE_IS_PCIE_BDG(bus_p)) {
715 				pcix_bdg_ecc_regs = PCIX_BDG_ECC_REG(pfd_p, 0);
716 				PCIX_CAP_PUT(32, bus_p, PCI_PCIX_BDG_ECC_STATUS,
717 				    pcix_bdg_ecc_regs->pcix_ecc_ctlstat);
718 
719 			}
720 			pcix_bdg_ecc_regs = PCIX_BDG_ECC_REG(pfd_p, 1);
721 			PCIX_CAP_PUT(32, bus_p, PCI_PCIX_BDG_ECC_STATUS,
722 			    pcix_bdg_ecc_regs->pcix_ecc_ctlstat);
723 		}
724 	} else {
725 		pf_pcix_err_regs_t *pcix_regs = PCIX_ERR_REG(pfd_p);
726 
727 		PCIX_CAP_PUT(32, bus_p, PCI_PCIX_STATUS,
728 		    pcix_regs->pcix_status);
729 
730 		if (PCIX_ECC_VERSION_CHECK(bus_p)) {
731 			pf_pcix_ecc_regs_t *pcix_ecc_regs = PCIX_ECC_REG(pfd_p);
732 
733 			PCIX_CAP_PUT(32, bus_p, PCI_PCIX_ECC_STATUS,
734 			    pcix_ecc_regs->pcix_ecc_ctlstat);
735 		}
736 	}
737 }
738 
739 static void
740 pf_pcie_regs_clear(pf_data_t *pfd_p, pcie_bus_t *bus_p)
741 {
742 	pf_pcie_err_regs_t *pcie_regs = PCIE_ERR_REG(pfd_p);
743 	pf_pcie_adv_err_regs_t *pcie_adv_regs = PCIE_ADV_REG(pfd_p);
744 
745 	PCIE_CAP_PUT(16, bus_p, PCIE_DEVSTS, pcie_regs->pcie_err_status);
746 
747 	if (PCIE_IS_BDG(bus_p) && PCIE_IS_PCIX(bus_p))
748 		pf_pcix_regs_clear(pfd_p, bus_p);
749 
750 	if (!PCIE_HAS_AER(bus_p))
751 		return;
752 
753 	PCIE_AER_PUT(32, bus_p, PCIE_AER_UCE_STS,
754 	    pcie_adv_regs->pcie_ue_status);
755 
756 	PCIE_AER_PUT(32, bus_p, PCIE_AER_CE_STS,
757 	    pcie_adv_regs->pcie_ce_status);
758 
759 	if (PCIE_IS_PCIE_BDG(bus_p)) {
760 		pf_pcie_adv_bdg_err_regs_t *pcie_bdg_regs =
761 		    PCIE_ADV_BDG_REG(pfd_p);
762 
763 		PCIE_AER_PUT(32, bus_p, PCIE_AER_SUCE_STS,
764 		    pcie_bdg_regs->pcie_sue_status);
765 	}
766 
767 	/*
768 	 * If PCI Express root complex then clear the root complex
769 	 * error registers.
770 	 */
771 	if (PCIE_IS_ROOT(bus_p)) {
772 		pf_pcie_adv_rp_err_regs_t *pcie_rp_regs;
773 
774 		pcie_rp_regs = PCIE_ADV_RP_REG(pfd_p);
775 
776 		PCIE_AER_PUT(32, bus_p, PCIE_AER_RE_STS,
777 		    pcie_rp_regs->pcie_rp_err_status);
778 	}
779 }
780 
781 static void
782 pf_pci_regs_clear(pf_data_t *pfd_p, pcie_bus_t *bus_p)
783 {
784 	if (PCIE_IS_PCIE(bus_p))
785 		pf_pcie_regs_clear(pfd_p, bus_p);
786 	else if (PCIE_IS_PCIX(bus_p))
787 		pf_pcix_regs_clear(pfd_p, bus_p);
788 
789 	PCIE_PUT(16, bus_p, PCI_CONF_STAT, pfd_p->pe_pci_regs->pci_err_status);
790 
791 	if (PCIE_IS_BDG(bus_p)) {
792 		pf_pci_bdg_err_regs_t *pci_bdg_regs = PCI_BDG_ERR_REG(pfd_p);
793 		PCIE_PUT(16, bus_p, PCI_BCNF_SEC_STATUS,
794 		    pci_bdg_regs->pci_bdg_sec_stat);
795 	}
796 }
797 
798 /* ARGSUSED */
799 void
800 pcie_clear_errors(dev_info_t *dip)
801 {
802 	pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
803 	pf_data_t *pfd_p = PCIE_DIP2PFD(dip);
804 
805 	ASSERT(bus_p);
806 
807 	pf_pci_regs_gather(pfd_p, bus_p);
808 	pf_pci_regs_clear(pfd_p, bus_p);
809 }
810 
811 /* Find the fault BDF, fault Addr or full scan on a PCIe Root Port. */
812 static void
813 pf_pci_find_rp_fault(pf_data_t *pfd_p, pcie_bus_t *bus_p)
814 {
815 	pf_root_fault_t *root_fault = PCIE_ROOT_FAULT(pfd_p);
816 	pf_pcie_adv_rp_err_regs_t *rp_regs = PCIE_ADV_RP_REG(pfd_p);
817 	uint32_t root_err = rp_regs->pcie_rp_err_status;
818 	uint32_t ue_err = PCIE_ADV_REG(pfd_p)->pcie_ue_status;
819 	int num_faults = 0;
820 
821 	/* Since this data structure is reused, make sure to reset it */
822 	root_fault->full_scan = B_FALSE;
823 	root_fault->scan_bdf = PCIE_INVALID_BDF;
824 	root_fault->scan_addr = 0;
825 
826 	if (!PCIE_HAS_AER(bus_p) &&
827 	    (PCI_BDG_ERR_REG(pfd_p)->pci_bdg_sec_stat & PF_PCI_BDG_ERR)) {
828 		PCIE_ROOT_FAULT(pfd_p)->full_scan = B_TRUE;
829 		return;
830 	}
831 
832 	/*
833 	 * Check to see if an error has been received that
834 	 * requires a scan of the fabric.  Count the number of
835 	 * faults seen.  If MUL CE/FE_NFE that counts for
836 	 * atleast 2 faults, so just return with full_scan.
837 	 */
838 	if ((root_err & PCIE_AER_RE_STS_MUL_CE_RCVD) ||
839 	    (root_err & PCIE_AER_RE_STS_MUL_FE_NFE_RCVD)) {
840 		PCIE_ROOT_FAULT(pfd_p)->full_scan = B_TRUE;
841 		return;
842 	}
843 
844 	if (root_err & PCIE_AER_RE_STS_CE_RCVD)
845 		num_faults++;
846 
847 	if (root_err & PCIE_AER_RE_STS_FE_NFE_RCVD)
848 		num_faults++;
849 
850 	if (ue_err & PCIE_AER_UCE_CA)
851 		num_faults++;
852 
853 	if (ue_err & PCIE_AER_UCE_UR)
854 		num_faults++;
855 
856 	/* If no faults just return */
857 	if (num_faults == 0)
858 		return;
859 
860 	/* If faults > 1 do full scan */
861 	if (num_faults > 1) {
862 		PCIE_ROOT_FAULT(pfd_p)->full_scan = B_TRUE;
863 		return;
864 	}
865 
866 	/* By this point, there is only 1 fault detected */
867 	if (root_err & PCIE_AER_RE_STS_CE_RCVD) {
868 		PCIE_ROOT_FAULT(pfd_p)->scan_bdf = rp_regs->pcie_rp_ce_src_id;
869 		num_faults--;
870 	} else if (root_err & PCIE_AER_RE_STS_FE_NFE_RCVD) {
871 		PCIE_ROOT_FAULT(pfd_p)->scan_bdf = rp_regs->pcie_rp_ue_src_id;
872 		num_faults--;
873 	} else if ((HAS_AER_LOGS(pfd_p, PCIE_AER_UCE_CA) ||
874 	    HAS_AER_LOGS(pfd_p, PCIE_AER_UCE_UR)) &&
875 	    (pf_tlp_decode(PCIE_PFD2BUS(pfd_p), PCIE_ADV_REG(pfd_p)) ==
876 	    DDI_SUCCESS)) {
877 		PCIE_ROOT_FAULT(pfd_p)->scan_addr =
878 		    PCIE_ADV_REG(pfd_p)->pcie_ue_tgt_addr;
879 		num_faults--;
880 	}
881 
882 	/*
883 	 * This means an error did occur, but we couldn't extract the fault BDF
884 	 */
885 	if (num_faults > 0)
886 		PCIE_ROOT_FAULT(pfd_p)->full_scan = B_TRUE;
887 
888 }
889 
890 
891 /*
892  * Load PCIe Fault Data for PCI/PCIe devices into PCIe Fault Data Queue
893  *
894  * Returns a scan flag.
895  * o PF_SCAN_SUCCESS - Error gathered and cleared sucessfuly, data added to
896  *   Fault Q
897  * o PF_SCAN_BAD_RESPONSE - Unable to talk to device, item added to fault Q
898  * o PF_SCAN_CB_FAILURE - A hardened device deemed that the error was fatal.
899  * o PF_SCAN_NO_ERR_IN_CHILD - Only applies to bridge to prevent further
900  *   unnecessary scanning
901  * o PF_SCAN_IN_DQ - This device has already been scanned; it was skipped this
902  *   time.
903  */
904 static int
905 pf_default_hdl(dev_info_t *dip, pf_impl_t *impl)
906 {
907 	pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
908 	pf_data_t *pfd_p = PCIE_DIP2PFD(dip);
909 	int cb_sts, scan_flag = PF_SCAN_SUCCESS;
910 
911 	/* Make sure this device hasn't already been snapshotted and cleared */
912 	if (pfd_p->pe_valid == B_TRUE) {
913 		scan_flag |= PF_SCAN_IN_DQ;
914 		goto done;
915 	}
916 
917 	/*
918 	 * Read vendor/device ID and check with cached data, if it doesn't match
919 	 * could very well be a device that isn't responding anymore.  Just
920 	 * stop.  Save the basic info in the error q for post mortem debugging
921 	 * purposes.
922 	 */
923 	if (PCIE_GET(32, bus_p, PCI_CONF_VENID) != bus_p->bus_dev_ven_id) {
924 		char buf[FM_MAX_CLASS];
925 
926 		(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
927 		    PCI_ERROR_SUBCLASS, PCI_NR);
928 		ddi_fm_ereport_post(dip, buf, fm_ena_generate(0, FM_ENA_FMT1),
929 		    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0, NULL);
930 
931 		/*
932 		 * For IOV/Hotplug purposes skip gathering info fo this device,
933 		 * but populate affected info and severity.  Clear out any data
934 		 * that maybe been saved in the last fabric scan.
935 		 */
936 		pf_reset_pfd(pfd_p);
937 		pfd_p->pe_severity_flags = PF_ERR_PANIC_BAD_RESPONSE;
938 		PFD_AFFECTED_DEV(pfd_p)->pe_affected_flags = PF_AFFECTED_SELF;
939 
940 		/* Add the snapshot to the error q */
941 		pf_en_dq(pfd_p, impl);
942 		pfd_p->pe_valid = B_TRUE;
943 
944 		return (PF_SCAN_BAD_RESPONSE);
945 	}
946 
947 	pf_pci_regs_gather(pfd_p, bus_p);
948 	pf_pci_regs_clear(pfd_p, bus_p);
949 	if (PCIE_IS_RP(bus_p))
950 		pf_pci_find_rp_fault(pfd_p, bus_p);
951 
952 	cb_sts = pf_fm_callback(dip, impl->pf_derr);
953 
954 	if (cb_sts == DDI_FM_FATAL || cb_sts == DDI_FM_UNKNOWN)
955 		scan_flag |= PF_SCAN_CB_FAILURE;
956 
957 	/* Add the snapshot to the error q */
958 	pf_en_dq(pfd_p, impl);
959 
960 done:
961 	/*
962 	 * If a bridge does not have any error no need to scan any further down.
963 	 * For PCIe devices, check the PCIe device status and PCI secondary
964 	 * status.
965 	 * - Some non-compliant PCIe devices do not utilize PCIe
966 	 *   error registers.  If so rely on legacy PCI error registers.
967 	 * For PCI devices, check the PCI secondary status.
968 	 */
969 	if (PCIE_IS_PCIE_BDG(bus_p) &&
970 	    !(PCIE_ERR_REG(pfd_p)->pcie_err_status & PF_PCIE_BDG_ERR) &&
971 	    !(PCI_BDG_ERR_REG(pfd_p)->pci_bdg_sec_stat & PF_PCI_BDG_ERR))
972 		scan_flag |= PF_SCAN_NO_ERR_IN_CHILD;
973 
974 	if (PCIE_IS_PCI_BDG(bus_p) &&
975 	    !(PCI_BDG_ERR_REG(pfd_p)->pci_bdg_sec_stat & PF_PCI_BDG_ERR))
976 		scan_flag |= PF_SCAN_NO_ERR_IN_CHILD;
977 
978 	pfd_p->pe_valid = B_TRUE;
979 	return (scan_flag);
980 }
981 
982 /*
983  * Called during postattach to initialize a device's error handling
984  * capabilities.  If the devices has already been hardened, then there isn't
985  * much needed.  Otherwise initialize the device's default FMA capabilities.
986  *
987  * In a future project where PCIe support is removed from pcifm, several
988  * "properties" that are setup in ddi_fm_init and pci_ereport_setup need to be
989  * created here so that the PCI/PCIe eversholt rules will work properly.
990  */
991 void
992 pf_init(dev_info_t *dip, ddi_iblock_cookie_t ibc, ddi_attach_cmd_t cmd)
993 {
994 	pcie_bus_t		*bus_p = PCIE_DIP2BUS(dip);
995 	struct i_ddi_fmhdl	*fmhdl = DEVI(dip)->devi_fmhdl;
996 	boolean_t		need_cb_register = B_FALSE;
997 
998 	if (!bus_p) {
999 		cmn_err(CE_WARN, "devi_bus information is not set for %s%d.\n",
1000 		    ddi_driver_name(dip), ddi_get_instance(dip));
1001 		return;
1002 	}
1003 
1004 	if (fmhdl) {
1005 		/*
1006 		 * If device is only ereport capable and not callback capable
1007 		 * make it callback capable. The only downside is that the
1008 		 * "fm-errcb-capable" property is not created for this device
1009 		 * which should be ok since it's not used anywhere.
1010 		 */
1011 		if (!(fmhdl->fh_cap & DDI_FM_ERRCB_CAPABLE))
1012 			need_cb_register = B_TRUE;
1013 	} else {
1014 		int cap;
1015 		/*
1016 		 * fm-capable in driver.conf can be used to set fm_capabilities.
1017 		 * If fm-capable is not defined, set the default
1018 		 * DDI_FM_EREPORT_CAPABLE and DDI_FM_ERRCB_CAPABLE.
1019 		 */
1020 		cap = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1021 		    DDI_PROP_DONTPASS, "fm-capable",
1022 		    DDI_FM_EREPORT_CAPABLE | DDI_FM_ERRCB_CAPABLE);
1023 		cap &= (DDI_FM_EREPORT_CAPABLE | DDI_FM_ERRCB_CAPABLE);
1024 
1025 		bus_p->bus_fm_flags |= PF_FM_IS_NH;
1026 
1027 		if (cmd == DDI_ATTACH) {
1028 			ddi_fm_init(dip, &cap, &ibc);
1029 			pci_ereport_setup(dip);
1030 		}
1031 
1032 		if (cap & DDI_FM_ERRCB_CAPABLE)
1033 			need_cb_register = B_TRUE;
1034 
1035 		fmhdl = DEVI(dip)->devi_fmhdl;
1036 	}
1037 
1038 	/* If ddi_fm_init fails for any reason RETURN */
1039 	if (!fmhdl) {
1040 		bus_p->bus_fm_flags = 0;
1041 		return;
1042 	}
1043 
1044 	fmhdl->fh_cap |=  DDI_FM_ERRCB_CAPABLE;
1045 	if (cmd == DDI_ATTACH) {
1046 		if (need_cb_register)
1047 			ddi_fm_handler_register(dip, pf_dummy_cb, NULL);
1048 	}
1049 
1050 	bus_p->bus_fm_flags |= PF_FM_READY;
1051 }
1052 
1053 /* undo FMA lock, called at predetach */
1054 void
1055 pf_fini(dev_info_t *dip, ddi_detach_cmd_t cmd)
1056 {
1057 	pcie_bus_t	*bus_p = PCIE_DIP2BUS(dip);
1058 
1059 	if (!bus_p)
1060 		return;
1061 
1062 	/* Don't fini anything if device isn't FM Ready */
1063 	if (!(bus_p->bus_fm_flags & PF_FM_READY))
1064 		return;
1065 
1066 	/* no other code should set the flag to false */
1067 	bus_p->bus_fm_flags &= ~PF_FM_READY;
1068 
1069 	/*
1070 	 * Grab the mutex to make sure device isn't in the middle of
1071 	 * error handling.  Setting the bus_fm_flag to ~PF_FM_READY
1072 	 * should prevent this device from being error handled after
1073 	 * the mutex has been released.
1074 	 */
1075 	(void) pf_handler_enter(dip, NULL);
1076 	pf_handler_exit(dip);
1077 
1078 	/* undo non-hardened drivers */
1079 	if (bus_p->bus_fm_flags & PF_FM_IS_NH) {
1080 		if (cmd == DDI_DETACH) {
1081 			bus_p->bus_fm_flags &= ~PF_FM_IS_NH;
1082 			pci_ereport_teardown(dip);
1083 			/*
1084 			 * ddi_fini itself calls ddi_handler_unregister,
1085 			 * so no need to explicitly call unregister.
1086 			 */
1087 			ddi_fm_fini(dip);
1088 		}
1089 	}
1090 }
1091 
1092 /*ARGSUSED*/
1093 static int
1094 pf_dummy_cb(dev_info_t *dip, ddi_fm_error_t *derr, const void *not_used)
1095 {
1096 	return (DDI_FM_OK);
1097 }
1098 
1099 /*
1100  * Add PFD to queue.  If it is an RC add it to the beginning,
1101  * otherwise add it to the end.
1102  */
1103 static void
1104 pf_en_dq(pf_data_t *pfd_p, pf_impl_t *impl)
1105 {
1106 	pf_data_t *head_p = impl->pf_dq_head_p;
1107 	pf_data_t *tail_p = impl->pf_dq_tail_p;
1108 
1109 	impl->pf_total++;
1110 
1111 	if (!head_p) {
1112 		ASSERT(PFD_IS_ROOT(pfd_p));
1113 		impl->pf_dq_head_p = pfd_p;
1114 		impl->pf_dq_tail_p = pfd_p;
1115 		pfd_p->pe_prev = NULL;
1116 		pfd_p->pe_next = NULL;
1117 		return;
1118 	}
1119 
1120 	/* Check if this is a Root Port eprt */
1121 	if (PFD_IS_ROOT(pfd_p)) {
1122 		pf_data_t *root_p, *last_p = NULL;
1123 
1124 		/* The first item must be a RP */
1125 		root_p = head_p;
1126 		for (last_p = head_p; last_p && PFD_IS_ROOT(last_p);
1127 		    last_p = last_p->pe_next)
1128 			root_p = last_p;
1129 
1130 		/* root_p is the last RP pfd. last_p is the first non-RP pfd. */
1131 		root_p->pe_next = pfd_p;
1132 		pfd_p->pe_prev = root_p;
1133 		pfd_p->pe_next = last_p;
1134 
1135 		if (last_p)
1136 			last_p->pe_prev = pfd_p;
1137 		else
1138 			tail_p = pfd_p;
1139 	} else {
1140 		tail_p->pe_next = pfd_p;
1141 		pfd_p->pe_prev = tail_p;
1142 		pfd_p->pe_next = NULL;
1143 		tail_p = pfd_p;
1144 	}
1145 
1146 	impl->pf_dq_head_p = head_p;
1147 	impl->pf_dq_tail_p = tail_p;
1148 }
1149 
1150 /*
1151  * Ignore:
1152  * - TRAINING: as leaves do not have children
1153  * - SD: as leaves do not have children
1154  */
1155 const pf_fab_err_tbl_t pcie_pcie_tbl[] = {
1156 	{PCIE_AER_UCE_DLP,	pf_panic,
1157 	    PF_AFFECTED_PARENT, 0},
1158 
1159 	{PCIE_AER_UCE_PTLP,	pf_analyse_ptlp,
1160 	    PF_AFFECTED_SELF, 0},
1161 
1162 	{PCIE_AER_UCE_FCP,	pf_panic,
1163 	    PF_AFFECTED_PARENT, 0},
1164 
1165 	{PCIE_AER_UCE_TO,	pf_analyse_to,
1166 	    PF_AFFECTED_SELF, 0},
1167 
1168 	{PCIE_AER_UCE_CA,	pf_analyse_ca_ur,
1169 	    PF_AFFECTED_SELF, 0},
1170 
1171 	{PCIE_AER_UCE_UC,	pf_analyse_uc,
1172 	    0, 0},
1173 
1174 	{PCIE_AER_UCE_RO,	pf_panic,
1175 	    PF_AFFECTED_PARENT, 0},
1176 
1177 	{PCIE_AER_UCE_MTLP,	pf_panic,
1178 	    PF_AFFECTED_PARENT, 0},
1179 
1180 	{PCIE_AER_UCE_ECRC,	pf_panic,
1181 	    PF_AFFECTED_SELF, 0},
1182 
1183 	{PCIE_AER_UCE_UR,	pf_analyse_ca_ur,
1184 	    PF_AFFECTED_SELF, 0},
1185 
1186 	{NULL, NULL, NULL, NULL}
1187 };
1188 
1189 const pf_fab_err_tbl_t pcie_rp_tbl[] = {
1190 	{PCIE_AER_UCE_TRAINING,	pf_no_panic,
1191 	    PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN, 0},
1192 
1193 	{PCIE_AER_UCE_DLP,	pf_panic,
1194 	    PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN, 0},
1195 
1196 	{PCIE_AER_UCE_SD,	pf_no_panic,
1197 	    PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN, 0},
1198 
1199 	{PCIE_AER_UCE_PTLP,	pf_analyse_ptlp,
1200 	    PF_AFFECTED_AER, PF_AFFECTED_CHILDREN},
1201 
1202 	{PCIE_AER_UCE_FCP,	pf_panic,
1203 	    PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN, 0},
1204 
1205 	{PCIE_AER_UCE_TO,	pf_panic,
1206 	    PF_AFFECTED_ADDR, PF_AFFECTED_CHILDREN},
1207 
1208 	{PCIE_AER_UCE_CA,	pf_no_panic,
1209 	    PF_AFFECTED_AER, PF_AFFECTED_CHILDREN},
1210 
1211 	{PCIE_AER_UCE_UC,	pf_analyse_uc,
1212 	    0, 0},
1213 
1214 	{PCIE_AER_UCE_RO,	pf_panic,
1215 	    PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN, 0},
1216 
1217 	{PCIE_AER_UCE_MTLP,	pf_panic,
1218 	    PF_AFFECTED_SELF | PF_AFFECTED_AER,
1219 	    PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN},
1220 
1221 	{PCIE_AER_UCE_ECRC,	pf_panic,
1222 	    PF_AFFECTED_AER, PF_AFFECTED_CHILDREN},
1223 
1224 	{PCIE_AER_UCE_UR,	pf_no_panic,
1225 	    PF_AFFECTED_AER, PF_AFFECTED_CHILDREN},
1226 
1227 	{NULL, NULL, NULL, NULL}
1228 };
1229 
1230 const pf_fab_err_tbl_t pcie_sw_tbl[] = {
1231 	{PCIE_AER_UCE_TRAINING,	pf_no_panic,
1232 	    PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN, 0},
1233 
1234 	{PCIE_AER_UCE_DLP,	pf_panic,
1235 	    PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN, 0},
1236 
1237 	{PCIE_AER_UCE_SD,	pf_no_panic,
1238 	    PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN, 0},
1239 
1240 	{PCIE_AER_UCE_PTLP,	pf_analyse_ptlp,
1241 	    PF_AFFECTED_AER, PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN},
1242 
1243 	{PCIE_AER_UCE_FCP,	pf_panic,
1244 	    PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN, 0},
1245 
1246 	{PCIE_AER_UCE_TO,	pf_analyse_to,
1247 	    PF_AFFECTED_CHILDREN, 0},
1248 
1249 	{PCIE_AER_UCE_CA,	pf_analyse_ca_ur,
1250 	    PF_AFFECTED_AER, PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN},
1251 
1252 	{PCIE_AER_UCE_UC,	pf_analyse_uc,
1253 	    0, 0},
1254 
1255 	{PCIE_AER_UCE_RO,	pf_panic,
1256 	    PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN, 0},
1257 
1258 	{PCIE_AER_UCE_MTLP,	pf_panic,
1259 	    PF_AFFECTED_SELF | PF_AFFECTED_AER,
1260 	    PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN},
1261 
1262 	{PCIE_AER_UCE_ECRC,	pf_panic,
1263 	    PF_AFFECTED_AER, PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN},
1264 
1265 	{PCIE_AER_UCE_UR,	pf_analyse_ca_ur,
1266 	    PF_AFFECTED_AER, PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN},
1267 
1268 	{NULL, NULL, NULL, NULL}
1269 };
1270 
1271 const pf_fab_err_tbl_t pcie_pcie_bdg_tbl[] = {
1272 	{PCIE_AER_SUCE_TA_ON_SC,	pf_analyse_sc,
1273 	    0, 0},
1274 
1275 	{PCIE_AER_SUCE_MA_ON_SC,	pf_analyse_sc,
1276 	    0, 0},
1277 
1278 	{PCIE_AER_SUCE_RCVD_TA,		pf_analyse_ma_ta,
1279 	    0, 0},
1280 
1281 	{PCIE_AER_SUCE_RCVD_MA,		pf_analyse_ma_ta,
1282 	    0, 0},
1283 
1284 	{PCIE_AER_SUCE_USC_ERR,		pf_panic,
1285 	    PF_AFFECTED_SAER, PF_AFFECTED_CHILDREN},
1286 
1287 	{PCIE_AER_SUCE_USC_MSG_DATA_ERR, pf_analyse_ma_ta,
1288 	    PF_AFFECTED_SAER, PF_AFFECTED_CHILDREN},
1289 
1290 	{PCIE_AER_SUCE_UC_DATA_ERR,	pf_analyse_uc_data,
1291 	    PF_AFFECTED_SAER, PF_AFFECTED_CHILDREN},
1292 
1293 	{PCIE_AER_SUCE_UC_ATTR_ERR,	pf_panic,
1294 	    PF_AFFECTED_CHILDREN, 0},
1295 
1296 	{PCIE_AER_SUCE_UC_ADDR_ERR,	pf_panic,
1297 	    PF_AFFECTED_CHILDREN, 0},
1298 
1299 	{PCIE_AER_SUCE_TIMER_EXPIRED,	pf_panic,
1300 	    PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN, 0},
1301 
1302 	{PCIE_AER_SUCE_PERR_ASSERT,	pf_analyse_perr_assert,
1303 	    0, 0},
1304 
1305 	{PCIE_AER_SUCE_SERR_ASSERT,	pf_no_panic,
1306 	    0, 0},
1307 
1308 	{PCIE_AER_SUCE_INTERNAL_ERR,	pf_panic,
1309 	    PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN, 0},
1310 
1311 	{NULL, NULL, NULL, NULL}
1312 };
1313 
1314 const pf_fab_err_tbl_t pcie_pci_bdg_tbl[] = {
1315 	{PCI_STAT_PERROR,	pf_analyse_pci,
1316 	    PF_AFFECTED_SELF, 0},
1317 
1318 	{PCI_STAT_S_PERROR,	pf_analyse_pci,
1319 	    PF_AFFECTED_SELF, 0},
1320 
1321 	{PCI_STAT_S_SYSERR,	pf_panic,
1322 	    PF_AFFECTED_SELF, 0},
1323 
1324 	{PCI_STAT_R_MAST_AB,	pf_analyse_pci,
1325 	    PF_AFFECTED_SELF, 0},
1326 
1327 	{PCI_STAT_R_TARG_AB,	pf_analyse_pci,
1328 	    PF_AFFECTED_SELF, 0},
1329 
1330 	{PCI_STAT_S_TARG_AB,	pf_analyse_pci,
1331 	    PF_AFFECTED_SELF, 0},
1332 
1333 	{NULL, NULL, NULL, NULL}
1334 };
1335 
1336 const pf_fab_err_tbl_t pcie_pci_tbl[] = {
1337 	{PCI_STAT_PERROR,	pf_analyse_pci,
1338 	    PF_AFFECTED_SELF, 0},
1339 
1340 	{PCI_STAT_S_PERROR,	pf_analyse_pci,
1341 	    PF_AFFECTED_SELF, 0},
1342 
1343 	{PCI_STAT_S_SYSERR,	pf_panic,
1344 	    PF_AFFECTED_SELF, 0},
1345 
1346 	{PCI_STAT_R_MAST_AB,	pf_analyse_pci,
1347 	    PF_AFFECTED_SELF, 0},
1348 
1349 	{PCI_STAT_R_TARG_AB,	pf_analyse_pci,
1350 	    PF_AFFECTED_SELF, 0},
1351 
1352 	{PCI_STAT_S_TARG_AB,	pf_analyse_pci,
1353 	    PF_AFFECTED_SELF, 0},
1354 
1355 	{NULL, NULL, NULL, NULL}
1356 };
1357 
1358 #define	PF_MASKED_AER_ERR(pfd_p) \
1359 	(PCIE_ADV_REG(pfd_p)->pcie_ue_status & \
1360 	    ((PCIE_ADV_REG(pfd_p)->pcie_ue_mask) ^ 0xFFFFFFFF))
1361 #define	PF_MASKED_SAER_ERR(pfd_p) \
1362 	(PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_status & \
1363 	    ((PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_mask) ^ 0xFFFFFFFF))
1364 /*
1365  * Analyse all the PCIe Fault Data (erpt) gathered during dispatch in the erpt
1366  * Queue.
1367  */
1368 static int
1369 pf_analyse_error(ddi_fm_error_t *derr, pf_impl_t *impl)
1370 {
1371 	int		sts_flags, error_flags = 0;
1372 	pf_data_t	*pfd_p;
1373 
1374 	for (pfd_p = impl->pf_dq_head_p; pfd_p; pfd_p = pfd_p->pe_next) {
1375 		sts_flags = 0;
1376 
1377 		/* skip analysing error when no error info is gathered */
1378 		if (pfd_p->pe_severity_flags == PF_ERR_PANIC_BAD_RESPONSE)
1379 			goto done;
1380 
1381 		switch (PCIE_PFD2BUS(pfd_p)->bus_dev_type) {
1382 		case PCIE_PCIECAP_DEV_TYPE_PCIE_DEV:
1383 		case PCIE_PCIECAP_DEV_TYPE_PCI_DEV:
1384 			if (PCIE_DEVSTS_CE_DETECTED &
1385 			    PCIE_ERR_REG(pfd_p)->pcie_err_status)
1386 				sts_flags |= PF_ERR_CE;
1387 
1388 			pf_adjust_for_no_aer(pfd_p);
1389 			sts_flags |= pf_analyse_error_tbl(derr, impl,
1390 			    pfd_p, pcie_pcie_tbl, PF_MASKED_AER_ERR(pfd_p));
1391 			break;
1392 		case PCIE_PCIECAP_DEV_TYPE_ROOT:
1393 			pf_adjust_for_no_aer(pfd_p);
1394 			sts_flags |= pf_analyse_error_tbl(derr, impl,
1395 			    pfd_p, pcie_rp_tbl, PF_MASKED_AER_ERR(pfd_p));
1396 			break;
1397 		case PCIE_PCIECAP_DEV_TYPE_RC_PSEUDO:
1398 			/* no adjust_for_aer for pseudo RC */
1399 			/* keep the severity passed on from RC if any */
1400 			sts_flags |= pfd_p->pe_severity_flags;
1401 			sts_flags |= pf_analyse_error_tbl(derr, impl, pfd_p,
1402 			    pcie_rp_tbl, PF_MASKED_AER_ERR(pfd_p));
1403 			break;
1404 		case PCIE_PCIECAP_DEV_TYPE_UP:
1405 		case PCIE_PCIECAP_DEV_TYPE_DOWN:
1406 			if (PCIE_DEVSTS_CE_DETECTED &
1407 			    PCIE_ERR_REG(pfd_p)->pcie_err_status)
1408 				sts_flags |= PF_ERR_CE;
1409 
1410 			pf_adjust_for_no_aer(pfd_p);
1411 			sts_flags |= pf_analyse_error_tbl(derr, impl,
1412 			    pfd_p, pcie_sw_tbl, PF_MASKED_AER_ERR(pfd_p));
1413 			break;
1414 		case PCIE_PCIECAP_DEV_TYPE_PCIE2PCI:
1415 			if (PCIE_DEVSTS_CE_DETECTED &
1416 			    PCIE_ERR_REG(pfd_p)->pcie_err_status)
1417 				sts_flags |= PF_ERR_CE;
1418 
1419 			pf_adjust_for_no_aer(pfd_p);
1420 			pf_adjust_for_no_saer(pfd_p);
1421 			sts_flags |= pf_analyse_error_tbl(derr,
1422 			    impl, pfd_p, pcie_pcie_tbl,
1423 			    PF_MASKED_AER_ERR(pfd_p));
1424 			sts_flags |= pf_analyse_error_tbl(derr,
1425 			    impl, pfd_p, pcie_pcie_bdg_tbl,
1426 			    PF_MASKED_SAER_ERR(pfd_p));
1427 			/*
1428 			 * Some non-compliant PCIe devices do not utilize PCIe
1429 			 * error registers.  So fallthrough and rely on legacy
1430 			 * PCI error registers.
1431 			 */
1432 			if ((PCIE_DEVSTS_NFE_DETECTED | PCIE_DEVSTS_FE_DETECTED)
1433 			    & PCIE_ERR_REG(pfd_p)->pcie_err_status)
1434 				break;
1435 			/* FALLTHROUGH */
1436 		case PCIE_PCIECAP_DEV_TYPE_PCI_PSEUDO:
1437 			sts_flags |= pf_analyse_error_tbl(derr, impl,
1438 			    pfd_p, pcie_pci_tbl,
1439 			    PCI_ERR_REG(pfd_p)->pci_err_status);
1440 
1441 			if (!PCIE_IS_BDG(PCIE_PFD2BUS(pfd_p)))
1442 				break;
1443 
1444 			sts_flags |= pf_analyse_error_tbl(derr,
1445 			    impl, pfd_p, pcie_pci_bdg_tbl,
1446 			    PCI_BDG_ERR_REG(pfd_p)->pci_bdg_sec_stat);
1447 		}
1448 
1449 		pfd_p->pe_severity_flags = sts_flags;
1450 
1451 done:
1452 		pfd_p->pe_orig_severity_flags = pfd_p->pe_severity_flags;
1453 		/* Have pciev_eh adjust the severity */
1454 		pfd_p->pe_severity_flags = pciev_eh(pfd_p, impl);
1455 
1456 		error_flags |= pfd_p->pe_severity_flags;
1457 	}
1458 
1459 	return (error_flags);
1460 }
1461 
1462 static int
1463 pf_analyse_error_tbl(ddi_fm_error_t *derr, pf_impl_t *impl,
1464     pf_data_t *pfd_p, const pf_fab_err_tbl_t *tbl, uint32_t err_reg)
1465 {
1466 	const pf_fab_err_tbl_t *row;
1467 	int err = 0;
1468 	uint16_t flags;
1469 	uint32_t bit;
1470 
1471 	for (row = tbl; err_reg && (row->bit != NULL); row++) {
1472 		bit = row->bit;
1473 		if (!(err_reg & bit))
1474 			continue;
1475 		err |= row->handler(derr, bit, impl->pf_dq_head_p, pfd_p);
1476 
1477 		flags = row->affected_flags;
1478 		/*
1479 		 * check if the primary flag is valid;
1480 		 * if not, use the secondary flag
1481 		 */
1482 		if (flags & PF_AFFECTED_AER) {
1483 			if (!HAS_AER_LOGS(pfd_p, bit)) {
1484 				flags = row->sec_affected_flags;
1485 			}
1486 		} else if (flags & PF_AFFECTED_SAER) {
1487 			if (!HAS_SAER_LOGS(pfd_p, bit)) {
1488 				flags = row->sec_affected_flags;
1489 			}
1490 		} else if (flags & PF_AFFECTED_ADDR) {
1491 			/* only Root has this flag */
1492 			if (PCIE_ROOT_FAULT(pfd_p)->scan_addr == 0) {
1493 				flags = row->sec_affected_flags;
1494 			}
1495 		}
1496 
1497 		PFD_AFFECTED_DEV(pfd_p)->pe_affected_flags |= flags;
1498 	}
1499 
1500 	if (!err)
1501 		err = PF_ERR_NO_ERROR;
1502 
1503 	return (err);
1504 }
1505 
1506 /*
1507  * PCIe Completer Abort and Unsupport Request error analyser.  If a PCIe device
1508  * issues a CA/UR a corresponding Received CA/UR should have been seen in the
1509  * PCIe root complex.  Check to see if RC did indeed receive a CA/UR, if so then
1510  * this error may be safely ignored.  If not check the logs and see if an
1511  * associated handler for this transaction can be found.
1512  */
1513 /* ARGSUSED */
1514 static int
1515 pf_analyse_ca_ur(ddi_fm_error_t *derr, uint32_t bit, pf_data_t *dq_head_p,
1516     pf_data_t *pfd_p)
1517 {
1518 	uint32_t	abort_type;
1519 	dev_info_t	*rpdip = PCIE_PFD2BUS(pfd_p)->bus_rp_dip;
1520 
1521 	/* If UR's are masked forgive this error */
1522 	if ((pcie_get_aer_uce_mask() & PCIE_AER_UCE_UR) &&
1523 	    (bit == PCIE_AER_UCE_UR))
1524 		return (PF_ERR_NO_PANIC);
1525 
1526 	/*
1527 	 * If a RP has an CA/UR it means a leaf sent a bad request to the RP
1528 	 * such as a config read or a bad DMA address.
1529 	 */
1530 	if (PCIE_IS_RP(PCIE_PFD2BUS(pfd_p)))
1531 		goto handle_lookup;
1532 
1533 	if (bit == PCIE_AER_UCE_UR)
1534 		abort_type = PCI_STAT_R_MAST_AB;
1535 	else
1536 		abort_type = PCI_STAT_R_TARG_AB;
1537 
1538 	if (pf_matched_in_rc(dq_head_p, pfd_p, abort_type))
1539 		return (PF_ERR_MATCHED_RC);
1540 
1541 handle_lookup:
1542 	if (HAS_AER_LOGS(pfd_p, bit) &&
1543 	    pf_log_hdl_lookup(rpdip, derr, pfd_p, B_TRUE) == PF_HDL_FOUND)
1544 			return (PF_ERR_MATCHED_DEVICE);
1545 
1546 	return (PF_ERR_PANIC);
1547 }
1548 
1549 /*
1550  * PCIe-PCI Bridge Received Master Abort and Target error analyser.  If a PCIe
1551  * Bridge receives a MA/TA a corresponding sent CA/UR should have been seen in
1552  * the PCIe root complex.  Check to see if RC did indeed receive a CA/UR, if so
1553  * then this error may be safely ignored.  If not check the logs and see if an
1554  * associated handler for this transaction can be found.
1555  */
1556 /* ARGSUSED */
1557 static int
1558 pf_analyse_ma_ta(ddi_fm_error_t *derr, uint32_t bit, pf_data_t *dq_head_p,
1559     pf_data_t *pfd_p)
1560 {
1561 	dev_info_t	*rpdip = PCIE_PFD2BUS(pfd_p)->bus_rp_dip;
1562 	uint32_t	abort_type;
1563 
1564 	/* If UR's are masked forgive this error */
1565 	if ((pcie_get_aer_uce_mask() & PCIE_AER_UCE_UR) &&
1566 	    (bit == PCIE_AER_SUCE_RCVD_MA))
1567 		return (PF_ERR_NO_PANIC);
1568 
1569 	if (bit == PCIE_AER_SUCE_RCVD_MA)
1570 		abort_type = PCI_STAT_R_MAST_AB;
1571 	else
1572 		abort_type = PCI_STAT_R_TARG_AB;
1573 
1574 	if (pf_matched_in_rc(dq_head_p, pfd_p, abort_type))
1575 		return (PF_ERR_MATCHED_RC);
1576 
1577 	if (!HAS_SAER_LOGS(pfd_p, bit))
1578 		return (PF_ERR_PANIC);
1579 
1580 	if (pf_log_hdl_lookup(rpdip, derr, pfd_p, B_FALSE) == PF_HDL_FOUND)
1581 		return (PF_ERR_MATCHED_DEVICE);
1582 
1583 	return (PF_ERR_PANIC);
1584 }
1585 
1586 /*
1587  * Generic PCI error analyser.  This function is used for Parity Errors,
1588  * Received Master Aborts, Received Target Aborts, and Signaled Target Aborts.
1589  * In general PCI devices do not have error logs, it is very difficult to figure
1590  * out what transaction caused the error.  Instead find the nearest PCIe-PCI
1591  * Bridge and check to see if it has logs and if it has an error associated with
1592  * this PCI Device.
1593  */
1594 /* ARGSUSED */
1595 static int
1596 pf_analyse_pci(ddi_fm_error_t *derr, uint32_t bit, pf_data_t *dq_head_p,
1597     pf_data_t *pfd_p)
1598 {
1599 	pf_data_t	*parent_pfd_p;
1600 	uint16_t	cmd;
1601 	uint32_t	aer_ue_status;
1602 	pcie_bus_t	*bus_p = PCIE_PFD2BUS(pfd_p);
1603 	pf_pcie_adv_bdg_err_regs_t *parent_saer_p;
1604 
1605 	if (PCI_ERR_REG(pfd_p)->pci_err_status & PCI_STAT_S_SYSERR)
1606 		return (PF_ERR_PANIC);
1607 
1608 	/* If UR's are masked forgive this error */
1609 	if ((pcie_get_aer_uce_mask() & PCIE_AER_UCE_UR) &&
1610 	    (bit == PCI_STAT_R_MAST_AB))
1611 		return (PF_ERR_NO_PANIC);
1612 
1613 
1614 	if (bit & (PCI_STAT_PERROR | PCI_STAT_S_PERROR)) {
1615 		aer_ue_status = PCIE_AER_SUCE_PERR_ASSERT;
1616 	} else {
1617 		aer_ue_status = (PCIE_AER_SUCE_TA_ON_SC |
1618 		    PCIE_AER_SUCE_MA_ON_SC | PCIE_AER_SUCE_RCVD_TA |
1619 		    PCIE_AER_SUCE_RCVD_MA);
1620 	}
1621 
1622 	parent_pfd_p = pf_get_parent_pcie_bridge(pfd_p);
1623 	if (parent_pfd_p == NULL)
1624 		return (PF_ERR_PANIC);
1625 
1626 	/* Check if parent bridge has seen this error */
1627 	parent_saer_p = PCIE_ADV_BDG_REG(parent_pfd_p);
1628 	if (!(parent_saer_p->pcie_sue_status & aer_ue_status) ||
1629 	    !HAS_SAER_LOGS(parent_pfd_p, aer_ue_status))
1630 		return (PF_ERR_PANIC);
1631 
1632 	/*
1633 	 * If the addr or bdf from the parent PCIe bridge logs belong to this
1634 	 * PCI device, assume the PCIe bridge's error handling has already taken
1635 	 * care of this PCI device's error.
1636 	 */
1637 	if (pf_pci_decode(parent_pfd_p, &cmd) != DDI_SUCCESS)
1638 		return (PF_ERR_PANIC);
1639 
1640 	if ((parent_saer_p->pcie_sue_tgt_bdf == bus_p->bus_bdf) ||
1641 	    pf_in_addr_range(bus_p, parent_saer_p->pcie_sue_tgt_addr))
1642 		return (PF_ERR_MATCHED_PARENT);
1643 
1644 	/*
1645 	 * If this device is a PCI-PCI bridge, check if the bdf in the parent
1646 	 * PCIe bridge logs is in the range of this PCI-PCI Bridge's bus ranges.
1647 	 * If they are, then assume the PCIe bridge's error handling has already
1648 	 * taken care of this PCI-PCI bridge device's error.
1649 	 */
1650 	if (PCIE_IS_BDG(bus_p) &&
1651 	    pf_in_bus_range(bus_p, parent_saer_p->pcie_sue_tgt_bdf))
1652 		return (PF_ERR_MATCHED_PARENT);
1653 
1654 	return (PF_ERR_PANIC);
1655 }
1656 
1657 /*
1658  * PCIe Bridge transactions associated with PERR.
1659  * o Bridge received a poisoned Non-Posted Write (CFG Writes) from PCIe
1660  * o Bridge received a poisoned Posted Write from (MEM Writes) from PCIe
1661  * o Bridge received a poisoned Completion on a Split Transction from PCIe
1662  * o Bridge received a poisoned Completion on a Delayed Transction from PCIe
1663  *
1664  * Check for non-poisoned PCIe transactions that got forwarded to the secondary
1665  * side and detects a PERR#.  Except for delayed read completions, a poisoned
1666  * TLP will be forwarded to the secondary bus and PERR# will be asserted.
1667  */
1668 /* ARGSUSED */
1669 static int
1670 pf_analyse_perr_assert(ddi_fm_error_t *derr, uint32_t bit, pf_data_t *dq_head_p,
1671     pf_data_t *pfd_p)
1672 {
1673 	dev_info_t	*rpdip = PCIE_PFD2BUS(pfd_p)->bus_rp_dip;
1674 	uint16_t	cmd;
1675 	int		hdl_sts = PF_HDL_NOTFOUND;
1676 	int		err = PF_ERR_NO_ERROR;
1677 	pf_pcie_adv_bdg_err_regs_t *saer_p;
1678 
1679 
1680 	if (HAS_SAER_LOGS(pfd_p, bit)) {
1681 		saer_p = PCIE_ADV_BDG_REG(pfd_p);
1682 		if (pf_pci_decode(pfd_p, &cmd) != DDI_SUCCESS)
1683 			return (PF_ERR_PANIC);
1684 
1685 cmd_switch:
1686 		switch (cmd) {
1687 		case PCI_PCIX_CMD_IOWR:
1688 		case PCI_PCIX_CMD_MEMWR:
1689 		case PCI_PCIX_CMD_MEMWR_BL:
1690 		case PCI_PCIX_CMD_MEMWRBL:
1691 			/* Posted Writes Transactions */
1692 			if (saer_p->pcie_sue_tgt_trans == PF_ADDR_PIO)
1693 				hdl_sts = pf_log_hdl_lookup(rpdip, derr, pfd_p,
1694 				    B_FALSE);
1695 			break;
1696 		case PCI_PCIX_CMD_CFWR:
1697 			/*
1698 			 * Check to see if it is a non-posted write.  If so, a
1699 			 * UR Completion would have been sent.
1700 			 */
1701 			if (pf_matched_in_rc(dq_head_p, pfd_p,
1702 			    PCI_STAT_R_MAST_AB)) {
1703 				hdl_sts = PF_HDL_FOUND;
1704 				err = PF_ERR_MATCHED_RC;
1705 				goto done;
1706 			}
1707 			hdl_sts = pf_log_hdl_lookup(rpdip, derr, pfd_p,
1708 			    B_FALSE);
1709 			break;
1710 		case PCI_PCIX_CMD_SPL:
1711 			hdl_sts = pf_log_hdl_lookup(rpdip, derr, pfd_p,
1712 			    B_FALSE);
1713 			break;
1714 		case PCI_PCIX_CMD_DADR:
1715 			cmd = (PCIE_ADV_BDG_HDR(pfd_p, 1) >>
1716 			    PCIE_AER_SUCE_HDR_CMD_UP_SHIFT) &
1717 			    PCIE_AER_SUCE_HDR_CMD_UP_MASK;
1718 			if (cmd != PCI_PCIX_CMD_DADR)
1719 				goto cmd_switch;
1720 			/* FALLTHROUGH */
1721 		default:
1722 			/* Unexpected situation, panic */
1723 			hdl_sts = PF_HDL_NOTFOUND;
1724 		}
1725 
1726 		if (hdl_sts == PF_HDL_FOUND)
1727 			err = PF_ERR_MATCHED_DEVICE;
1728 		else
1729 			err = PF_ERR_PANIC;
1730 	} else {
1731 		/*
1732 		 * Check to see if it is a non-posted write.  If so, a UR
1733 		 * Completion would have been sent.
1734 		 */
1735 		if ((PCIE_ERR_REG(pfd_p)->pcie_err_status &
1736 		    PCIE_DEVSTS_UR_DETECTED) &&
1737 		    pf_matched_in_rc(dq_head_p, pfd_p, PCI_STAT_R_MAST_AB))
1738 			err = PF_ERR_MATCHED_RC;
1739 
1740 		/* Check for posted writes.  Transaction is lost. */
1741 		if (PCI_BDG_ERR_REG(pfd_p)->pci_bdg_sec_stat &
1742 		    PCI_STAT_S_PERROR)
1743 			err = PF_ERR_PANIC;
1744 
1745 		/*
1746 		 * All other scenarios are due to read completions.  Check for
1747 		 * PERR on the primary side.  If found the primary side error
1748 		 * handling will take care of this error.
1749 		 */
1750 		if (err == PF_ERR_NO_ERROR) {
1751 			if (PCI_ERR_REG(pfd_p)->pci_err_status &
1752 			    PCI_STAT_PERROR)
1753 				err = PF_ERR_MATCHED_PARENT;
1754 			else
1755 				err = PF_ERR_PANIC;
1756 		}
1757 	}
1758 
1759 done:
1760 	return (err);
1761 }
1762 
1763 /*
1764  * PCIe Poisoned TLP error analyser.  If a PCIe device receives a Poisoned TLP,
1765  * check the logs and see if an associated handler for this transaction can be
1766  * found.
1767  */
1768 /* ARGSUSED */
1769 static int
1770 pf_analyse_ptlp(ddi_fm_error_t *derr, uint32_t bit, pf_data_t *dq_head_p,
1771     pf_data_t *pfd_p)
1772 {
1773 	dev_info_t	*rpdip = PCIE_PFD2BUS(pfd_p)->bus_rp_dip;
1774 
1775 	/*
1776 	 * If AERs are supported find the logs in this device, otherwise look in
1777 	 * it's parent's logs.
1778 	 */
1779 	if (HAS_AER_LOGS(pfd_p, bit)) {
1780 		pcie_tlp_hdr_t *hdr = (pcie_tlp_hdr_t *)&PCIE_ADV_HDR(pfd_p, 0);
1781 
1782 		/*
1783 		 * Double check that the log contains a poisoned TLP.
1784 		 * Some devices like PLX switch do not log poison TLP headers.
1785 		 */
1786 		if (hdr->ep) {
1787 			if (pf_log_hdl_lookup(rpdip, derr, pfd_p, B_TRUE) ==
1788 			    PF_HDL_FOUND)
1789 				return (PF_ERR_MATCHED_DEVICE);
1790 		}
1791 
1792 		/*
1793 		 * If an address is found and hdl lookup failed panic.
1794 		 * Otherwise check parents to see if there was enough
1795 		 * information recover.
1796 		 */
1797 		if (PCIE_ADV_REG(pfd_p)->pcie_ue_tgt_addr)
1798 			return (PF_ERR_PANIC);
1799 	}
1800 
1801 	/*
1802 	 * Check to see if the rc has already handled this error or a parent has
1803 	 * already handled this error.
1804 	 *
1805 	 * If the error info in the RC wasn't enough to find the fault device,
1806 	 * such as if the faulting device lies behind a PCIe-PCI bridge from a
1807 	 * poisoned completion, check to see if the PCIe-PCI bridge has enough
1808 	 * info to recover.  For completion TLP's, the AER header logs only
1809 	 * contain the faulting BDF in the Root Port.  For PCIe device the fault
1810 	 * BDF is the fault device.  But if the fault device is behind a
1811 	 * PCIe-PCI bridge the fault BDF could turn out just to be a PCIe-PCI
1812 	 * bridge's secondary bus number.
1813 	 */
1814 	if (!PFD_IS_ROOT(pfd_p)) {
1815 		dev_info_t *pdip = ddi_get_parent(PCIE_PFD2DIP(pfd_p));
1816 		pf_data_t *parent_pfd_p;
1817 
1818 		if (PCIE_PFD2BUS(pfd_p)->bus_rp_dip == pdip) {
1819 			if (pf_matched_in_rc(dq_head_p, pfd_p, PCI_STAT_PERROR))
1820 				return (PF_ERR_MATCHED_RC);
1821 		}
1822 
1823 		parent_pfd_p = PCIE_DIP2PFD(pdip);
1824 
1825 		if (HAS_AER_LOGS(parent_pfd_p, bit))
1826 			return (PF_ERR_MATCHED_PARENT);
1827 	} else {
1828 		pf_data_t *bdg_pfd_p;
1829 		pcie_req_id_t secbus;
1830 
1831 		/*
1832 		 * Looking for a pcie bridge only makes sense if the BDF
1833 		 * Dev/Func = 0/0
1834 		 */
1835 		if (!PCIE_HAS_AER(PCIE_PFD2BUS(pfd_p)))
1836 			goto done;
1837 
1838 		secbus = PCIE_ADV_REG(pfd_p)->pcie_ue_tgt_bdf;
1839 
1840 		if (!PCIE_CHECK_VALID_BDF(secbus) || (secbus & 0xFF))
1841 			goto done;
1842 
1843 		bdg_pfd_p = pf_get_pcie_bridge(pfd_p, secbus);
1844 
1845 		if (bdg_pfd_p && HAS_SAER_LOGS(bdg_pfd_p,
1846 		    PCIE_AER_SUCE_PERR_ASSERT)) {
1847 			return pf_analyse_perr_assert(derr,
1848 			    PCIE_AER_SUCE_PERR_ASSERT, dq_head_p, pfd_p);
1849 		}
1850 	}
1851 done:
1852 	return (PF_ERR_PANIC);
1853 }
1854 
1855 /*
1856  * PCIe-PCI Bridge Received Master and Target abort error analyser on Split
1857  * Completions.  If a PCIe Bridge receives a MA/TA check logs and see if an
1858  * associated handler for this transaction can be found.
1859  */
1860 /* ARGSUSED */
1861 static int
1862 pf_analyse_sc(ddi_fm_error_t *derr, uint32_t bit, pf_data_t *dq_head_p,
1863     pf_data_t *pfd_p)
1864 {
1865 	dev_info_t	*rpdip = PCIE_PFD2BUS(pfd_p)->bus_rp_dip;
1866 	uint16_t	cmd;
1867 	int		sts = PF_HDL_NOTFOUND;
1868 
1869 	if (!HAS_SAER_LOGS(pfd_p, bit))
1870 		return (PF_ERR_PANIC);
1871 
1872 	if (pf_pci_decode(pfd_p, &cmd) != DDI_SUCCESS)
1873 		return (PF_ERR_PANIC);
1874 
1875 	if (cmd == PCI_PCIX_CMD_SPL)
1876 		sts = pf_log_hdl_lookup(rpdip, derr, pfd_p, B_FALSE);
1877 
1878 	if (sts == PF_HDL_FOUND)
1879 		return (PF_ERR_MATCHED_DEVICE);
1880 
1881 	return (PF_ERR_PANIC);
1882 }
1883 
1884 /*
1885  * PCIe Timeout error analyser.  This error can be forgiven if it is marked as
1886  * CE Advisory.  If it is marked as advisory, this means the HW can recover
1887  * and/or retry the transaction automatically.
1888  */
1889 /* ARGSUSED */
1890 static int
1891 pf_analyse_to(ddi_fm_error_t *derr, uint32_t bit, pf_data_t *dq_head_p,
1892     pf_data_t *pfd_p)
1893 {
1894 	if (HAS_AER_LOGS(pfd_p, bit) && CE_ADVISORY(pfd_p))
1895 		return (PF_ERR_NO_PANIC);
1896 
1897 	return (PF_ERR_PANIC);
1898 }
1899 
1900 /*
1901  * PCIe Unexpected Completion.  Check to see if this TLP was misrouted by
1902  * matching the device BDF with the TLP Log.  If misrouting panic, otherwise
1903  * don't panic.
1904  */
1905 /* ARGSUSED */
1906 static int
1907 pf_analyse_uc(ddi_fm_error_t *derr, uint32_t bit, pf_data_t *dq_head_p,
1908     pf_data_t *pfd_p)
1909 {
1910 	if (HAS_AER_LOGS(pfd_p, bit) &&
1911 	    (PCIE_PFD2BUS(pfd_p)->bus_bdf == (PCIE_ADV_HDR(pfd_p, 2) >> 16)))
1912 		return (PF_ERR_NO_PANIC);
1913 
1914 	/*
1915 	 * This is a case of mis-routing. Any of the switches above this
1916 	 * device could be at fault.
1917 	 */
1918 	PFD_AFFECTED_DEV(pfd_p)->pe_affected_flags = PF_AFFECTED_ROOT;
1919 
1920 	return (PF_ERR_PANIC);
1921 }
1922 
1923 /*
1924  * PCIe-PCI Bridge Uncorrectable Data error analyser.  All Uncorrectable Data
1925  * errors should have resulted in a PCIe Poisoned TLP to the RC, except for
1926  * Posted Writes.  Check the logs for Posted Writes and if the RC did not see a
1927  * Poisoned TLP.
1928  *
1929  * Non-Posted Writes will also generate a UR in the completion status, which the
1930  * RC should also see.
1931  */
1932 /* ARGSUSED */
1933 static int
1934 pf_analyse_uc_data(ddi_fm_error_t *derr, uint32_t bit, pf_data_t *dq_head_p,
1935     pf_data_t *pfd_p)
1936 {
1937 	dev_info_t	*rpdip = PCIE_PFD2BUS(pfd_p)->bus_rp_dip;
1938 
1939 	if (!HAS_SAER_LOGS(pfd_p, bit))
1940 		return (PF_ERR_PANIC);
1941 
1942 	if (pf_matched_in_rc(dq_head_p, pfd_p, PCI_STAT_PERROR))
1943 		return (PF_ERR_MATCHED_RC);
1944 
1945 	if (pf_log_hdl_lookup(rpdip, derr, pfd_p, B_FALSE) == PF_HDL_FOUND)
1946 		return (PF_ERR_MATCHED_DEVICE);
1947 
1948 	return (PF_ERR_PANIC);
1949 }
1950 
1951 /* ARGSUSED */
1952 static int
1953 pf_no_panic(ddi_fm_error_t *derr, uint32_t bit, pf_data_t *dq_head_p,
1954     pf_data_t *pfd_p)
1955 {
1956 	return (PF_ERR_NO_PANIC);
1957 }
1958 
1959 /* ARGSUSED */
1960 static int
1961 pf_panic(ddi_fm_error_t *derr, uint32_t bit, pf_data_t *dq_head_p,
1962     pf_data_t *pfd_p)
1963 {
1964 	return (PF_ERR_PANIC);
1965 }
1966 
1967 /*
1968  * If a PCIe device does not support AER, assume all AER statuses have been set,
1969  * unless other registers do not indicate a certain error occuring.
1970  */
1971 static void
1972 pf_adjust_for_no_aer(pf_data_t *pfd_p)
1973 {
1974 	uint32_t	aer_ue = 0;
1975 	uint16_t	status;
1976 
1977 	if (PCIE_HAS_AER(PCIE_PFD2BUS(pfd_p)))
1978 		return;
1979 
1980 	if (PCIE_ERR_REG(pfd_p)->pcie_err_status & PCIE_DEVSTS_FE_DETECTED)
1981 		aer_ue = PF_AER_FATAL_ERR;
1982 
1983 	if (PCIE_ERR_REG(pfd_p)->pcie_err_status & PCIE_DEVSTS_NFE_DETECTED) {
1984 		aer_ue = PF_AER_NON_FATAL_ERR;
1985 		status = PCI_ERR_REG(pfd_p)->pci_err_status;
1986 
1987 		/* Check if the device received a PTLP */
1988 		if (!(status & PCI_STAT_PERROR))
1989 			aer_ue &= ~PCIE_AER_UCE_PTLP;
1990 
1991 		/* Check if the device signaled a CA */
1992 		if (!(status & PCI_STAT_S_TARG_AB))
1993 			aer_ue &= ~PCIE_AER_UCE_CA;
1994 
1995 		/* Check if the device sent a UR */
1996 		if (!(PCIE_ERR_REG(pfd_p)->pcie_err_status &
1997 		    PCIE_DEVSTS_UR_DETECTED))
1998 			aer_ue &= ~PCIE_AER_UCE_UR;
1999 
2000 		/*
2001 		 * Ignore ECRCs as it is optional and will manefest itself as
2002 		 * another error like PTLP and MFP
2003 		 */
2004 		aer_ue &= ~PCIE_AER_UCE_ECRC;
2005 
2006 		/*
2007 		 * Generally if NFE is set, SERR should also be set. Exception:
2008 		 * When certain non-fatal errors are masked, and some of them
2009 		 * happened to be the cause of the NFE, SERR will not be set and
2010 		 * they can not be the source of this interrupt.
2011 		 *
2012 		 * On x86, URs are masked (NFE + UR can be set), if any other
2013 		 * non-fatal errors (i.e, PTLP, CTO, CA, UC, ECRC, ACS) did
2014 		 * occur, SERR should be set since they are not masked. So if
2015 		 * SERR is not set, none of them occurred.
2016 		 */
2017 		if (!(status & PCI_STAT_S_SYSERR))
2018 			aer_ue &= ~PCIE_AER_UCE_TO;
2019 	}
2020 
2021 	if (!PCIE_IS_BDG(PCIE_PFD2BUS(pfd_p))) {
2022 		aer_ue &= ~PCIE_AER_UCE_TRAINING;
2023 		aer_ue &= ~PCIE_AER_UCE_SD;
2024 	}
2025 
2026 	PCIE_ADV_REG(pfd_p)->pcie_ue_status = aer_ue;
2027 }
2028 
2029 static void
2030 pf_adjust_for_no_saer(pf_data_t *pfd_p)
2031 {
2032 	uint32_t	s_aer_ue = 0;
2033 	uint16_t	status;
2034 
2035 	if (PCIE_HAS_AER(PCIE_PFD2BUS(pfd_p)))
2036 		return;
2037 
2038 	if (PCIE_ERR_REG(pfd_p)->pcie_err_status & PCIE_DEVSTS_FE_DETECTED)
2039 		s_aer_ue = PF_SAER_FATAL_ERR;
2040 
2041 	if (PCIE_ERR_REG(pfd_p)->pcie_err_status & PCIE_DEVSTS_NFE_DETECTED) {
2042 		s_aer_ue = PF_SAER_NON_FATAL_ERR;
2043 		status = PCI_BDG_ERR_REG(pfd_p)->pci_bdg_sec_stat;
2044 
2045 		/* Check if the device received a UC_DATA */
2046 		if (!(status & PCI_STAT_PERROR))
2047 			s_aer_ue &= ~PCIE_AER_SUCE_UC_DATA_ERR;
2048 
2049 		/* Check if the device received a RCVD_MA/MA_ON_SC */
2050 		if (!(status & (PCI_STAT_R_MAST_AB))) {
2051 			s_aer_ue &= ~PCIE_AER_SUCE_RCVD_MA;
2052 			s_aer_ue &= ~PCIE_AER_SUCE_MA_ON_SC;
2053 		}
2054 
2055 		/* Check if the device received a RCVD_TA/TA_ON_SC */
2056 		if (!(status & (PCI_STAT_R_TARG_AB))) {
2057 			s_aer_ue &= ~PCIE_AER_SUCE_RCVD_TA;
2058 			s_aer_ue &= ~PCIE_AER_SUCE_TA_ON_SC;
2059 		}
2060 	}
2061 
2062 	PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_status = s_aer_ue;
2063 }
2064 
2065 /* Find the PCIe-PCI bridge based on secondary bus number */
2066 static pf_data_t *
2067 pf_get_pcie_bridge(pf_data_t *pfd_p, pcie_req_id_t secbus)
2068 {
2069 	pf_data_t *bdg_pfd_p;
2070 
2071 	/* Search down for the PCIe-PCI device. */
2072 	for (bdg_pfd_p = pfd_p->pe_next; bdg_pfd_p;
2073 	    bdg_pfd_p = bdg_pfd_p->pe_next) {
2074 		if (PCIE_IS_PCIE_BDG(PCIE_PFD2BUS(bdg_pfd_p)) &&
2075 		    PCIE_PFD2BUS(bdg_pfd_p)->bus_bdg_secbus == secbus)
2076 			return (bdg_pfd_p);
2077 	}
2078 
2079 	return (NULL);
2080 }
2081 
2082 /* Find the PCIe-PCI bridge of a PCI device */
2083 static pf_data_t *
2084 pf_get_parent_pcie_bridge(pf_data_t *pfd_p)
2085 {
2086 	dev_info_t	*dip, *rp_dip = PCIE_PFD2BUS(pfd_p)->bus_rp_dip;
2087 
2088 	/* This only makes sense if the device is a PCI device */
2089 	if (!PCIE_IS_PCI(PCIE_PFD2BUS(pfd_p)))
2090 		return (NULL);
2091 
2092 	/*
2093 	 * Search up for the PCIe-PCI device.  Watchout for x86 where pci
2094 	 * devices hang directly off of NPE.
2095 	 */
2096 	for (dip = PCIE_PFD2DIP(pfd_p); dip; dip = ddi_get_parent(dip)) {
2097 		if (dip == rp_dip)
2098 			dip = NULL;
2099 
2100 		if (PCIE_IS_PCIE_BDG(PCIE_DIP2BUS(dip)))
2101 			return (PCIE_DIP2PFD(dip));
2102 	}
2103 
2104 	return (NULL);
2105 }
2106 
2107 /*
2108  * See if a leaf error was bubbled up to the Root Complex (RC) and handled.
2109  * As of right now only RC's have enough information to have errors found in the
2110  * fabric to be matched to the RC.  Note that Root Port's (RP) do not carry
2111  * enough information.  Currently known RC's are SPARC Fire architecture and
2112  * it's equivalents, and x86's NPE.
2113  * SPARC Fire architectures have a plethora of error registers, while currently
2114  * NPE only have the address of a failed load.
2115  *
2116  * Check if the RC logged an error with the appropriate status type/abort type.
2117  * Ex: Parity Error, Received Master/Target Abort
2118  * Check if either the fault address found in the rc matches the device's
2119  * assigned address range (PIO's only) or the fault BDF in the rc matches the
2120  * device's BDF or Secondary Bus/Bus Range.
2121  */
2122 static boolean_t
2123 pf_matched_in_rc(pf_data_t *dq_head_p, pf_data_t *pfd_p,
2124     uint32_t abort_type)
2125 {
2126 	pcie_bus_t	*bus_p = PCIE_PFD2BUS(pfd_p);
2127 	pf_data_t	*rc_pfd_p;
2128 	pcie_req_id_t	fault_bdf;
2129 
2130 	for (rc_pfd_p = dq_head_p; PFD_IS_ROOT(rc_pfd_p);
2131 	    rc_pfd_p = rc_pfd_p->pe_next) {
2132 		/* Only root complex's have enough information to match */
2133 		if (!PCIE_IS_RC(PCIE_PFD2BUS(rc_pfd_p)))
2134 			continue;
2135 
2136 		/* If device and rc abort type does not match continue */
2137 		if (!(PCI_BDG_ERR_REG(rc_pfd_p)->pci_bdg_sec_stat & abort_type))
2138 			continue;
2139 
2140 		fault_bdf = PCIE_ROOT_FAULT(rc_pfd_p)->scan_bdf;
2141 
2142 		/* The Fault BDF = Device's BDF */
2143 		if (fault_bdf == bus_p->bus_bdf)
2144 			return (B_TRUE);
2145 
2146 		/* The Fault Addr is in device's address range */
2147 		if (pf_in_addr_range(bus_p,
2148 		    PCIE_ROOT_FAULT(rc_pfd_p)->scan_addr))
2149 			return (B_TRUE);
2150 
2151 		/* The Fault BDF is from PCIe-PCI Bridge's secondary bus */
2152 		if (PCIE_IS_PCIE_BDG(bus_p) &&
2153 		    pf_in_bus_range(bus_p, fault_bdf))
2154 			return (B_TRUE);
2155 	}
2156 
2157 	return (B_FALSE);
2158 }
2159 
2160 /*
2161  * Check the RP and see if the error is PIO/DMA.  If the RP also has a PERR then
2162  * it is a DMA, otherwise it's a PIO
2163  */
2164 static void
2165 pf_pci_find_trans_type(pf_data_t *pfd_p, uint64_t *addr, uint32_t *trans_type,
2166     pcie_req_id_t *bdf) {
2167 	pf_data_t *rc_pfd_p;
2168 
2169 	/* Could be DMA or PIO.  Find out by look at error type. */
2170 	switch (PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_status) {
2171 	case PCIE_AER_SUCE_TA_ON_SC:
2172 	case PCIE_AER_SUCE_MA_ON_SC:
2173 		*trans_type = PF_ADDR_DMA;
2174 		return;
2175 	case PCIE_AER_SUCE_RCVD_TA:
2176 	case PCIE_AER_SUCE_RCVD_MA:
2177 		*bdf = PCIE_INVALID_BDF;
2178 		*trans_type = PF_ADDR_PIO;
2179 		return;
2180 	case PCIE_AER_SUCE_USC_ERR:
2181 	case PCIE_AER_SUCE_UC_DATA_ERR:
2182 	case PCIE_AER_SUCE_PERR_ASSERT:
2183 		break;
2184 	default:
2185 		*addr = 0;
2186 		*bdf = PCIE_INVALID_BDF;
2187 		*trans_type = 0;
2188 		return;
2189 	}
2190 
2191 	*bdf = PCIE_INVALID_BDF;
2192 	*trans_type = PF_ADDR_PIO;
2193 	for (rc_pfd_p = pfd_p->pe_prev; rc_pfd_p;
2194 	    rc_pfd_p = rc_pfd_p->pe_prev) {
2195 		if (PFD_IS_ROOT(rc_pfd_p) &&
2196 		    (PCI_BDG_ERR_REG(rc_pfd_p)->pci_bdg_sec_stat &
2197 		    PCI_STAT_PERROR)) {
2198 			*trans_type = PF_ADDR_DMA;
2199 			return;
2200 		}
2201 	}
2202 }
2203 
2204 /*
2205  * pf_pci_decode function decodes the secondary aer transaction logs in
2206  * PCIe-PCI bridges.
2207  *
2208  * The log is 128 bits long and arranged in this manner.
2209  * [0:35]   Transaction Attribute	(s_aer_h0-saer_h1)
2210  * [36:39]  Transaction lower command	(saer_h1)
2211  * [40:43]  Transaction upper command	(saer_h1)
2212  * [44:63]  Reserved
2213  * [64:127] Address			(saer_h2-saer_h3)
2214  */
2215 /* ARGSUSED */
2216 int
2217 pf_pci_decode(pf_data_t *pfd_p, uint16_t *cmd) {
2218 	pcix_attr_t	*attr;
2219 	uint64_t	addr;
2220 	uint32_t	trans_type;
2221 	pcie_req_id_t	bdf = PCIE_INVALID_BDF;
2222 
2223 	attr = (pcix_attr_t *)&PCIE_ADV_BDG_HDR(pfd_p, 0);
2224 	*cmd = GET_SAER_CMD(pfd_p);
2225 
2226 cmd_switch:
2227 	switch (*cmd) {
2228 	case PCI_PCIX_CMD_IORD:
2229 	case PCI_PCIX_CMD_IOWR:
2230 		/* IO Access should always be down stream */
2231 		addr = PCIE_ADV_BDG_HDR(pfd_p, 2);
2232 		bdf = attr->rid;
2233 		trans_type = PF_ADDR_PIO;
2234 		break;
2235 	case PCI_PCIX_CMD_MEMRD_DW:
2236 	case PCI_PCIX_CMD_MEMRD_BL:
2237 	case PCI_PCIX_CMD_MEMRDBL:
2238 	case PCI_PCIX_CMD_MEMWR:
2239 	case PCI_PCIX_CMD_MEMWR_BL:
2240 	case PCI_PCIX_CMD_MEMWRBL:
2241 		addr = ((uint64_t)PCIE_ADV_BDG_HDR(pfd_p, 3) <<
2242 		    PCIE_AER_SUCE_HDR_ADDR_SHIFT) | PCIE_ADV_BDG_HDR(pfd_p, 2);
2243 		bdf = attr->rid;
2244 
2245 		pf_pci_find_trans_type(pfd_p, &addr, &trans_type, &bdf);
2246 		break;
2247 	case PCI_PCIX_CMD_CFRD:
2248 	case PCI_PCIX_CMD_CFWR:
2249 		/*
2250 		 * CFG Access should always be down stream.  Match the BDF in
2251 		 * the address phase.
2252 		 */
2253 		addr = 0;
2254 		bdf = attr->rid;
2255 		trans_type = PF_ADDR_CFG;
2256 		break;
2257 	case PCI_PCIX_CMD_SPL:
2258 		/*
2259 		 * Check for DMA read completions.  The requesting BDF is in the
2260 		 * Address phase.
2261 		 */
2262 		addr = 0;
2263 		bdf = attr->rid;
2264 		trans_type = PF_ADDR_DMA;
2265 		break;
2266 	case PCI_PCIX_CMD_DADR:
2267 		/*
2268 		 * For Dual Address Cycles the transaction command is in the 2nd
2269 		 * address phase.
2270 		 */
2271 		*cmd = (PCIE_ADV_BDG_HDR(pfd_p, 1) >>
2272 		    PCIE_AER_SUCE_HDR_CMD_UP_SHIFT) &
2273 		    PCIE_AER_SUCE_HDR_CMD_UP_MASK;
2274 		if (*cmd != PCI_PCIX_CMD_DADR)
2275 			goto cmd_switch;
2276 		/* FALLTHROUGH */
2277 	default:
2278 		PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_trans = 0;
2279 		PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_bdf = PCIE_INVALID_BDF;
2280 		PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_addr = 0;
2281 		return (DDI_FAILURE);
2282 	}
2283 	PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_trans = trans_type;
2284 	PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_bdf = bdf;
2285 	PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_addr = addr;
2286 	return (DDI_SUCCESS);
2287 }
2288 
2289 /*
2290  * Based on either the BDF/ADDR find and mark the faulting DMA/ACC handler.
2291  * Returns either PF_HDL_NOTFOUND or PF_HDL_FOUND.
2292  */
2293 int
2294 pf_hdl_lookup(dev_info_t *dip, uint64_t ena, uint32_t flag, uint64_t addr,
2295     pcie_req_id_t bdf)
2296 {
2297 	ddi_fm_error_t		derr;
2298 
2299 	/* If we don't know the addr or rid just return with NOTFOUND */
2300 	if ((addr == NULL) && !PCIE_CHECK_VALID_BDF(bdf))
2301 		return (PF_HDL_NOTFOUND);
2302 
2303 	if (!(flag & (PF_ADDR_DMA | PF_ADDR_PIO | PF_ADDR_CFG))) {
2304 		return (PF_HDL_NOTFOUND);
2305 	}
2306 
2307 	bzero(&derr, sizeof (ddi_fm_error_t));
2308 	derr.fme_version = DDI_FME_VERSION;
2309 	derr.fme_flag = DDI_FM_ERR_UNEXPECTED;
2310 	derr.fme_ena = ena;
2311 
2312 	return (pf_hdl_child_lookup(dip, &derr, flag, addr, bdf));
2313 }
2314 
2315 static int
2316 pf_hdl_child_lookup(dev_info_t *dip, ddi_fm_error_t *derr, uint32_t flag,
2317     uint64_t addr, pcie_req_id_t bdf)
2318 {
2319 	int			status = PF_HDL_NOTFOUND;
2320 	ndi_fmc_t		*fcp = NULL;
2321 	struct i_ddi_fmhdl	*fmhdl = DEVI(dip)->devi_fmhdl;
2322 	pcie_req_id_t		dip_bdf;
2323 	boolean_t		have_lock = B_FALSE;
2324 	pcie_bus_t		*bus_p;
2325 	dev_info_t		*cdip;
2326 
2327 	if (!(bus_p = pf_is_ready(dip))) {
2328 		return (status);
2329 	}
2330 
2331 	ASSERT(fmhdl);
2332 	if (!i_ddi_fm_handler_owned(dip)) {
2333 		/*
2334 		 * pf_handler_enter always returns SUCCESS if the 'impl' arg is
2335 		 * NULL.
2336 		 */
2337 		(void) pf_handler_enter(dip, NULL);
2338 		have_lock = B_TRUE;
2339 	}
2340 
2341 	dip_bdf = PCI_GET_BDF(dip);
2342 
2343 	/* Check if dip and BDF match, if not recurse to it's children. */
2344 	if (!PCIE_IS_RC(bus_p) && (!PCIE_CHECK_VALID_BDF(bdf) ||
2345 	    dip_bdf == bdf)) {
2346 		if ((flag & PF_ADDR_DMA) && DDI_FM_DMA_ERR_CAP(fmhdl->fh_cap))
2347 			fcp = fmhdl->fh_dma_cache;
2348 		else
2349 			fcp = NULL;
2350 
2351 		if (fcp)
2352 			status = pf_hdl_compare(dip, derr, DMA_HANDLE, addr,
2353 			    bdf, fcp);
2354 
2355 
2356 		if (((flag & PF_ADDR_PIO) || (flag & PF_ADDR_CFG)) &&
2357 		    DDI_FM_ACC_ERR_CAP(fmhdl->fh_cap))
2358 			fcp = fmhdl->fh_acc_cache;
2359 		else
2360 			fcp = NULL;
2361 
2362 		if (fcp)
2363 			status = pf_hdl_compare(dip, derr, ACC_HANDLE, addr,
2364 			    bdf, fcp);
2365 	}
2366 
2367 	/* If we found the handler or know it's this device, we're done */
2368 	if (!PCIE_IS_RC(bus_p) && ((dip_bdf == bdf) ||
2369 	    (status == PF_HDL_FOUND)))
2370 		goto done;
2371 
2372 	/*
2373 	 * If the current devuce us a PCIe-PCI bridge need to check for special
2374 	 * cases:
2375 	 *
2376 	 * If it is a PIO and we don't have an address or this is a DMA, check
2377 	 * to see if the BDF = secondary bus.  If so stop.  The BDF isn't a real
2378 	 * BDF and the fault device could have come from any device in the PCI
2379 	 * bus.
2380 	 */
2381 	if (PCIE_IS_PCIE_BDG(bus_p) &&
2382 	    ((flag & PF_ADDR_DMA || flag & PF_ADDR_PIO)) &&
2383 	    ((bus_p->bus_bdg_secbus << PCIE_REQ_ID_BUS_SHIFT) == bdf))
2384 		goto done;
2385 
2386 
2387 	/* If we can't find the handler check it's children */
2388 	for (cdip = ddi_get_child(dip); cdip;
2389 	    cdip = ddi_get_next_sibling(cdip)) {
2390 		if ((bus_p = PCIE_DIP2BUS(cdip)) == NULL)
2391 			continue;
2392 
2393 		if (pf_in_bus_range(bus_p, bdf) ||
2394 		    pf_in_addr_range(bus_p, addr))
2395 			status = pf_hdl_child_lookup(cdip, derr, flag, addr,
2396 			    bdf);
2397 
2398 		if (status == PF_HDL_FOUND)
2399 			goto done;
2400 	}
2401 
2402 done:
2403 	if (have_lock == B_TRUE)
2404 		pf_handler_exit(dip);
2405 
2406 	return (status);
2407 }
2408 
2409 static int
2410 pf_hdl_compare(dev_info_t *dip, ddi_fm_error_t *derr, uint32_t flag,
2411     uint64_t addr, pcie_req_id_t bdf, ndi_fmc_t *fcp) {
2412 	ndi_fmcentry_t	*fep;
2413 	int		found = 0;
2414 	int		status;
2415 
2416 	mutex_enter(&fcp->fc_lock);
2417 	for (fep = fcp->fc_head; fep != NULL; fep = fep->fce_next) {
2418 		ddi_fmcompare_t compare_func;
2419 
2420 		/*
2421 		 * Compare captured error state with handle
2422 		 * resources.  During the comparison and
2423 		 * subsequent error handling, we block
2424 		 * attempts to free the cache entry.
2425 		 */
2426 		compare_func = (flag == ACC_HANDLE) ?
2427 		    i_ddi_fm_acc_err_cf_get((ddi_acc_handle_t)
2428 			fep->fce_resource) :
2429 		    i_ddi_fm_dma_err_cf_get((ddi_dma_handle_t)
2430 			fep->fce_resource);
2431 
2432 		status = compare_func(dip, fep->fce_resource,
2433 			    (void *)&addr, (void *)&bdf);
2434 
2435 		if (status == DDI_FM_NONFATAL) {
2436 			found++;
2437 
2438 			/* Set the error for this resource handle */
2439 			if (flag == ACC_HANDLE) {
2440 				ddi_acc_handle_t ap = fep->fce_resource;
2441 
2442 				i_ddi_fm_acc_err_set(ap, derr->fme_ena, status,
2443 				    DDI_FM_ERR_UNEXPECTED);
2444 				ddi_fm_acc_err_get(ap, derr, DDI_FME_VERSION);
2445 				derr->fme_acc_handle = ap;
2446 			} else {
2447 				ddi_dma_handle_t dp = fep->fce_resource;
2448 
2449 				i_ddi_fm_dma_err_set(dp, derr->fme_ena, status,
2450 				    DDI_FM_ERR_UNEXPECTED);
2451 				ddi_fm_dma_err_get(dp, derr, DDI_FME_VERSION);
2452 				derr->fme_dma_handle = dp;
2453 			}
2454 		}
2455 	}
2456 	mutex_exit(&fcp->fc_lock);
2457 
2458 	/*
2459 	 * If a handler isn't found and we know this is the right device mark
2460 	 * them all failed.
2461 	 */
2462 	if ((addr != NULL) && PCIE_CHECK_VALID_BDF(bdf) && (found == 0)) {
2463 		status = pf_hdl_compare(dip, derr, flag, addr, bdf, fcp);
2464 		if (status == PF_HDL_FOUND)
2465 			found++;
2466 	}
2467 
2468 	return ((found) ? PF_HDL_FOUND : PF_HDL_NOTFOUND);
2469 }
2470 
2471 /*
2472  * Automatically decode AER header logs and does a handling look up based on the
2473  * AER header decoding.
2474  *
2475  * For this function only the Primary/Secondary AER Header Logs need to be valid
2476  * in the pfd (PCIe Fault Data) arg.
2477  *
2478  * Returns either PF_HDL_NOTFOUND or PF_HDL_FOUND.
2479  */
2480 static int
2481 pf_log_hdl_lookup(dev_info_t *rpdip, ddi_fm_error_t *derr, pf_data_t *pfd_p,
2482 	boolean_t is_primary)
2483 {
2484 	int		lookup = PF_HDL_NOTFOUND;
2485 
2486 	if (is_primary) {
2487 		pf_pcie_adv_err_regs_t *reg_p = PCIE_ADV_REG(pfd_p);
2488 		if (pf_tlp_decode(PCIE_PFD2BUS(pfd_p), reg_p) == DDI_SUCCESS) {
2489 			lookup = pf_hdl_lookup(rpdip, derr->fme_ena,
2490 			    reg_p->pcie_ue_tgt_trans,
2491 			    reg_p->pcie_ue_tgt_addr,
2492 			    reg_p->pcie_ue_tgt_bdf);
2493 		}
2494 	} else {
2495 		pf_pcie_adv_bdg_err_regs_t *reg_p = PCIE_ADV_BDG_REG(pfd_p);
2496 		uint16_t cmd;
2497 		if (pf_pci_decode(pfd_p, &cmd) == DDI_SUCCESS) {
2498 			lookup = pf_hdl_lookup(rpdip, derr->fme_ena,
2499 			    reg_p->pcie_sue_tgt_trans,
2500 			    reg_p->pcie_sue_tgt_addr,
2501 			    reg_p->pcie_sue_tgt_bdf);
2502 		}
2503 	}
2504 
2505 	return (lookup);
2506 }
2507 
2508 /*
2509  * Decodes the TLP and returns the BDF of the handler, address and transaction
2510  * type if known.
2511  *
2512  * Types of TLP logs seen in RC, and what to extract:
2513  *
2514  * Memory(DMA) - Requester BDF, address, PF_DMA_ADDR
2515  * Memory(PIO) - address, PF_PIO_ADDR
2516  * CFG - Should not occur and result in UR
2517  * Completion(DMA) - Requester BDF, PF_DMA_ADDR
2518  * Completion(PIO) - Requester BDF, PF_PIO_ADDR
2519  *
2520  * Types of TLP logs seen in SW/Leaf, and what to extract:
2521  *
2522  * Memory(DMA) - Requester BDF, address, PF_DMA_ADDR
2523  * Memory(PIO) - address, PF_PIO_ADDR
2524  * CFG - Destined BDF, address, PF_CFG_ADDR
2525  * Completion(DMA) - Requester BDF, PF_DMA_ADDR
2526  * Completion(PIO) - Requester BDF, PF_PIO_ADDR
2527  *
2528  * The adv_reg_p must be passed in separately for use with SPARC RPs.  A
2529  * SPARC RP could have multiple AER header logs which cannot be directly
2530  * accessed via the bus_p.
2531  */
2532 int
2533 pf_tlp_decode(pcie_bus_t *bus_p, pf_pcie_adv_err_regs_t *adv_reg_p) {
2534 	pcie_tlp_hdr_t	*tlp_hdr = (pcie_tlp_hdr_t *)adv_reg_p->pcie_ue_hdr;
2535 	pcie_req_id_t	my_bdf, tlp_bdf, flt_bdf = PCIE_INVALID_BDF;
2536 	uint64_t	flt_addr = 0;
2537 	uint32_t	flt_trans_type = 0;
2538 
2539 	adv_reg_p->pcie_ue_tgt_addr = 0;
2540 	adv_reg_p->pcie_ue_tgt_bdf = PCIE_INVALID_BDF;
2541 	adv_reg_p->pcie_ue_tgt_trans = 0;
2542 
2543 	my_bdf = bus_p->bus_bdf;
2544 	switch (tlp_hdr->type) {
2545 	case PCIE_TLP_TYPE_IO:
2546 	case PCIE_TLP_TYPE_MEM:
2547 	case PCIE_TLP_TYPE_MEMLK:
2548 		/* Grab the 32/64bit fault address */
2549 		if (tlp_hdr->fmt & 0x1) {
2550 			flt_addr = ((uint64_t)adv_reg_p->pcie_ue_hdr[2] << 32);
2551 			flt_addr |= adv_reg_p->pcie_ue_hdr[3];
2552 		} else {
2553 			flt_addr = adv_reg_p->pcie_ue_hdr[2];
2554 		}
2555 
2556 		tlp_bdf = (pcie_req_id_t)(adv_reg_p->pcie_ue_hdr[1] >> 16);
2557 
2558 		/*
2559 		 * If the req bdf >= this.bdf, then it means the request is this
2560 		 * device or came from a device below it.  Unless this device is
2561 		 * a PCIe root port then it means is a DMA, otherwise PIO.
2562 		 */
2563 		if ((tlp_bdf >= my_bdf) && !PCIE_IS_ROOT(bus_p)) {
2564 			flt_trans_type = PF_ADDR_DMA;
2565 			flt_bdf = tlp_bdf;
2566 		} else if (PCIE_IS_ROOT(bus_p) &&
2567 		    (PF_FIRST_AER_ERR(PCIE_AER_UCE_PTLP, adv_reg_p) ||
2568 			(PF_FIRST_AER_ERR(PCIE_AER_UCE_CA, adv_reg_p)))) {
2569 			flt_trans_type = PF_ADDR_DMA;
2570 			flt_bdf = tlp_bdf;
2571 		} else {
2572 			flt_trans_type = PF_ADDR_PIO;
2573 			flt_bdf = PCIE_INVALID_BDF;
2574 		}
2575 		break;
2576 	case PCIE_TLP_TYPE_CFG0:
2577 	case PCIE_TLP_TYPE_CFG1:
2578 		flt_addr = 0;
2579 		flt_bdf = (pcie_req_id_t)(adv_reg_p->pcie_ue_hdr[2] >> 16);
2580 		flt_trans_type = PF_ADDR_CFG;
2581 		break;
2582 	case PCIE_TLP_TYPE_CPL:
2583 	case PCIE_TLP_TYPE_CPLLK:
2584 	{
2585 		pcie_cpl_t *cpl_tlp = (pcie_cpl_t *)&adv_reg_p->pcie_ue_hdr[1];
2586 
2587 		flt_addr = NULL;
2588 		flt_bdf = (cpl_tlp->rid > cpl_tlp->cid) ? cpl_tlp->rid :
2589 		    cpl_tlp->cid;
2590 
2591 		/*
2592 		 * If the cpl bdf < this.bdf, then it means the request is this
2593 		 * device or came from a device below it.  Unless this device is
2594 		 * a PCIe root port then it means is a DMA, otherwise PIO.
2595 		 */
2596 		if (cpl_tlp->rid > cpl_tlp->cid) {
2597 			flt_trans_type = PF_ADDR_DMA;
2598 		} else {
2599 			flt_trans_type = PF_ADDR_PIO | PF_ADDR_CFG;
2600 		}
2601 		break;
2602 	}
2603 	default:
2604 		return (DDI_FAILURE);
2605 	}
2606 
2607 	adv_reg_p->pcie_ue_tgt_addr = flt_addr;
2608 	adv_reg_p->pcie_ue_tgt_bdf = flt_bdf;
2609 	adv_reg_p->pcie_ue_tgt_trans = flt_trans_type;
2610 
2611 	return (DDI_SUCCESS);
2612 }
2613 
2614 #define	PCIE_EREPORT	DDI_IO_CLASS "." PCI_ERROR_SUBCLASS "." PCIEX_FABRIC
2615 static int
2616 pf_ereport_setup(dev_info_t *dip, uint64_t ena, nvlist_t **ereport,
2617     nvlist_t **detector, errorq_elem_t **eqep)
2618 {
2619 	struct i_ddi_fmhdl *fmhdl = DEVI(dip)->devi_fmhdl;
2620 	char device_path[MAXPATHLEN];
2621 	nv_alloc_t *nva;
2622 
2623 	*eqep = errorq_reserve(fmhdl->fh_errorq);
2624 	if (*eqep == NULL) {
2625 		atomic_add_64(&fmhdl->fh_kstat.fek_erpt_dropped.value.ui64, 1);
2626 		return (DDI_FAILURE);
2627 	}
2628 
2629 	*ereport = errorq_elem_nvl(fmhdl->fh_errorq, *eqep);
2630 	nva = errorq_elem_nva(fmhdl->fh_errorq, *eqep);
2631 
2632 	ASSERT(*ereport);
2633 	ASSERT(nva);
2634 
2635 	/*
2636 	 * Use the dev_path/devid for this device instance.
2637 	 */
2638 	*detector = fm_nvlist_create(nva);
2639 	if (dip == ddi_root_node()) {
2640 		device_path[0] = '/';
2641 		device_path[1] = '\0';
2642 	} else {
2643 		(void) ddi_pathname(dip, device_path);
2644 	}
2645 
2646 	fm_fmri_dev_set(*detector, FM_DEV_SCHEME_VERSION, NULL,
2647 	    device_path, NULL);
2648 
2649 	if (ena == 0)
2650 		ena = fm_ena_generate(0, FM_ENA_FMT1);
2651 
2652 	fm_ereport_set(*ereport, 0, PCIE_EREPORT, ena, *detector, NULL);
2653 
2654 	return (DDI_SUCCESS);
2655 }
2656 
2657 /* ARGSUSED */
2658 static void
2659 pf_ereport_post(dev_info_t *dip, nvlist_t **ereport, nvlist_t **detector,
2660     errorq_elem_t **eqep)
2661 {
2662 	struct i_ddi_fmhdl *fmhdl = DEVI(dip)->devi_fmhdl;
2663 
2664 	errorq_commit(fmhdl->fh_errorq, *eqep, ERRORQ_ASYNC);
2665 }
2666 
2667 static void
2668 pf_send_ereport(ddi_fm_error_t *derr, pf_impl_t *impl)
2669 {
2670 	nvlist_t	*ereport;
2671 	nvlist_t	*detector;
2672 	errorq_elem_t	*eqep;
2673 	pcie_bus_t	*bus_p;
2674 	pf_data_t	*pfd_p;
2675 	uint32_t	total = impl->pf_total;
2676 
2677 	/*
2678 	 * Ereports need to be sent in a top down fashion. The fabric translator
2679 	 * expects the ereports from the Root first. This is needed to tell if
2680 	 * the system contains a PCIe complaint RC/RP.
2681 	 */
2682 	for (pfd_p = impl->pf_dq_head_p; pfd_p; pfd_p = pfd_p->pe_next) {
2683 		bus_p = PCIE_PFD2BUS(pfd_p);
2684 		pfd_p->pe_valid = B_FALSE;
2685 
2686 		if (derr->fme_flag != DDI_FM_ERR_UNEXPECTED ||
2687 		    !DDI_FM_EREPORT_CAP(ddi_fm_capable(PCIE_PFD2DIP(pfd_p))))
2688 			continue;
2689 
2690 		if (pf_ereport_setup(PCIE_BUS2DIP(bus_p), derr->fme_ena,
2691 		    &ereport, &detector, &eqep) != DDI_SUCCESS)
2692 			continue;
2693 
2694 		if (PFD_IS_RC(pfd_p)) {
2695 			fm_payload_set(ereport,
2696 			    "scan_bdf", DATA_TYPE_UINT16,
2697 			    PCIE_ROOT_FAULT(pfd_p)->scan_bdf,
2698 			    "scan_addr", DATA_TYPE_UINT64,
2699 			    PCIE_ROOT_FAULT(pfd_p)->scan_addr,
2700 			    "intr_src", DATA_TYPE_UINT16,
2701 			    PCIE_ROOT_EH_SRC(pfd_p)->intr_type,
2702 			    NULL);
2703 			goto generic;
2704 		}
2705 
2706 		/* Generic PCI device information */
2707 		fm_payload_set(ereport,
2708 		    "bdf", DATA_TYPE_UINT16, bus_p->bus_bdf,
2709 		    "device_id", DATA_TYPE_UINT16,
2710 		    (bus_p->bus_dev_ven_id >> 16),
2711 		    "vendor_id", DATA_TYPE_UINT16,
2712 		    (bus_p->bus_dev_ven_id & 0xFFFF),
2713 		    "rev_id", DATA_TYPE_UINT8, bus_p->bus_rev_id,
2714 		    "dev_type", DATA_TYPE_UINT16, bus_p->bus_dev_type,
2715 		    "pcie_off", DATA_TYPE_UINT16, bus_p->bus_pcie_off,
2716 		    "pcix_off", DATA_TYPE_UINT16, bus_p->bus_pcix_off,
2717 		    "aer_off", DATA_TYPE_UINT16, bus_p->bus_aer_off,
2718 		    "ecc_ver", DATA_TYPE_UINT16, bus_p->bus_ecc_ver,
2719 		    NULL);
2720 
2721 		/* PCI registers */
2722 		fm_payload_set(ereport,
2723 		    "pci_status", DATA_TYPE_UINT16,
2724 		    PCI_ERR_REG(pfd_p)->pci_err_status,
2725 		    "pci_command", DATA_TYPE_UINT16,
2726 		    PCI_ERR_REG(pfd_p)->pci_cfg_comm,
2727 		    NULL);
2728 
2729 		/* PCI bridge registers */
2730 		if (PCIE_IS_BDG(bus_p)) {
2731 			fm_payload_set(ereport,
2732 			    "pci_bdg_sec_status", DATA_TYPE_UINT16,
2733 			    PCI_BDG_ERR_REG(pfd_p)->pci_bdg_sec_stat,
2734 			    "pci_bdg_ctrl", DATA_TYPE_UINT16,
2735 			    PCI_BDG_ERR_REG(pfd_p)->pci_bdg_ctrl,
2736 			    NULL);
2737 		}
2738 
2739 		/* PCIx registers */
2740 		if (PCIE_IS_PCIX(bus_p) && !PCIE_IS_BDG(bus_p)) {
2741 			fm_payload_set(ereport,
2742 			    "pcix_status", DATA_TYPE_UINT32,
2743 			    PCIX_ERR_REG(pfd_p)->pcix_status,
2744 			    "pcix_command", DATA_TYPE_UINT16,
2745 			    PCIX_ERR_REG(pfd_p)->pcix_command,
2746 			    NULL);
2747 		}
2748 
2749 		/* PCIx ECC Registers */
2750 		if (PCIX_ECC_VERSION_CHECK(bus_p)) {
2751 			pf_pcix_ecc_regs_t *ecc_bdg_reg;
2752 			pf_pcix_ecc_regs_t *ecc_reg;
2753 
2754 			if (PCIE_IS_BDG(bus_p))
2755 				ecc_bdg_reg = PCIX_BDG_ECC_REG(pfd_p, 0);
2756 			ecc_reg = PCIX_ECC_REG(pfd_p);
2757 			fm_payload_set(ereport,
2758 			    "pcix_ecc_control_0", DATA_TYPE_UINT16,
2759 			    PCIE_IS_BDG(bus_p) ?
2760 			    (ecc_bdg_reg->pcix_ecc_ctlstat >> 16) :
2761 			    (ecc_reg->pcix_ecc_ctlstat >> 16),
2762 			    "pcix_ecc_status_0", DATA_TYPE_UINT16,
2763 			    PCIE_IS_BDG(bus_p) ?
2764 			    (ecc_bdg_reg->pcix_ecc_ctlstat & 0xFFFF) :
2765 			    (ecc_reg->pcix_ecc_ctlstat & 0xFFFF),
2766 			    "pcix_ecc_fst_addr_0", DATA_TYPE_UINT32,
2767 			    PCIE_IS_BDG(bus_p) ?
2768 			    ecc_bdg_reg->pcix_ecc_fstaddr :
2769 			    ecc_reg->pcix_ecc_fstaddr,
2770 			    "pcix_ecc_sec_addr_0", DATA_TYPE_UINT32,
2771 			    PCIE_IS_BDG(bus_p) ?
2772 			    ecc_bdg_reg->pcix_ecc_secaddr :
2773 			    ecc_reg->pcix_ecc_secaddr,
2774 			    "pcix_ecc_attr_0", DATA_TYPE_UINT32,
2775 			    PCIE_IS_BDG(bus_p) ?
2776 			    ecc_bdg_reg->pcix_ecc_attr :
2777 			    ecc_reg->pcix_ecc_attr,
2778 			    NULL);
2779 		}
2780 
2781 		/* PCIx ECC Bridge Registers */
2782 		if (PCIX_ECC_VERSION_CHECK(bus_p) && PCIE_IS_BDG(bus_p)) {
2783 			pf_pcix_ecc_regs_t *ecc_bdg_reg;
2784 
2785 			ecc_bdg_reg = PCIX_BDG_ECC_REG(pfd_p, 1);
2786 			fm_payload_set(ereport,
2787 			    "pcix_ecc_control_1", DATA_TYPE_UINT16,
2788 			    (ecc_bdg_reg->pcix_ecc_ctlstat >> 16),
2789 			    "pcix_ecc_status_1", DATA_TYPE_UINT16,
2790 			    (ecc_bdg_reg->pcix_ecc_ctlstat & 0xFFFF),
2791 			    "pcix_ecc_fst_addr_1", DATA_TYPE_UINT32,
2792 			    ecc_bdg_reg->pcix_ecc_fstaddr,
2793 			    "pcix_ecc_sec_addr_1", DATA_TYPE_UINT32,
2794 			    ecc_bdg_reg->pcix_ecc_secaddr,
2795 			    "pcix_ecc_attr_1", DATA_TYPE_UINT32,
2796 			    ecc_bdg_reg->pcix_ecc_attr,
2797 			    NULL);
2798 		}
2799 
2800 		/* PCIx Bridge */
2801 		if (PCIE_IS_PCIX(bus_p) && PCIE_IS_BDG(bus_p)) {
2802 			fm_payload_set(ereport,
2803 			    "pcix_bdg_status", DATA_TYPE_UINT32,
2804 			    PCIX_BDG_ERR_REG(pfd_p)->pcix_bdg_stat,
2805 			    "pcix_bdg_sec_status", DATA_TYPE_UINT16,
2806 			    PCIX_BDG_ERR_REG(pfd_p)->pcix_bdg_sec_stat,
2807 			    NULL);
2808 		}
2809 
2810 		/* PCIe registers */
2811 		if (PCIE_IS_PCIE(bus_p)) {
2812 			fm_payload_set(ereport,
2813 			    "pcie_status", DATA_TYPE_UINT16,
2814 			    PCIE_ERR_REG(pfd_p)->pcie_err_status,
2815 			    "pcie_command", DATA_TYPE_UINT16,
2816 			    PCIE_ERR_REG(pfd_p)->pcie_err_ctl,
2817 			    "pcie_dev_cap", DATA_TYPE_UINT32,
2818 			    PCIE_ERR_REG(pfd_p)->pcie_dev_cap,
2819 			    NULL);
2820 		}
2821 
2822 		/* PCIe AER registers */
2823 		if (PCIE_HAS_AER(bus_p)) {
2824 			fm_payload_set(ereport,
2825 			    "pcie_adv_ctl", DATA_TYPE_UINT32,
2826 			    PCIE_ADV_REG(pfd_p)->pcie_adv_ctl,
2827 			    "pcie_ue_status", DATA_TYPE_UINT32,
2828 			    PCIE_ADV_REG(pfd_p)->pcie_ue_status,
2829 			    "pcie_ue_mask", DATA_TYPE_UINT32,
2830 			    PCIE_ADV_REG(pfd_p)->pcie_ue_mask,
2831 			    "pcie_ue_sev", DATA_TYPE_UINT32,
2832 			    PCIE_ADV_REG(pfd_p)->pcie_ue_sev,
2833 			    "pcie_ue_hdr0", DATA_TYPE_UINT32,
2834 			    PCIE_ADV_REG(pfd_p)->pcie_ue_hdr[0],
2835 			    "pcie_ue_hdr1", DATA_TYPE_UINT32,
2836 			    PCIE_ADV_REG(pfd_p)->pcie_ue_hdr[1],
2837 			    "pcie_ue_hdr2", DATA_TYPE_UINT32,
2838 			    PCIE_ADV_REG(pfd_p)->pcie_ue_hdr[2],
2839 			    "pcie_ue_hdr3", DATA_TYPE_UINT32,
2840 			    PCIE_ADV_REG(pfd_p)->pcie_ue_hdr[3],
2841 			    "pcie_ce_status", DATA_TYPE_UINT32,
2842 			    PCIE_ADV_REG(pfd_p)->pcie_ce_status,
2843 			    "pcie_ce_mask", DATA_TYPE_UINT32,
2844 			    PCIE_ADV_REG(pfd_p)->pcie_ce_mask,
2845 			    NULL);
2846 		}
2847 
2848 		/* PCIe AER decoded header */
2849 		if (HAS_AER_LOGS(pfd_p, PCIE_ADV_REG(pfd_p)->pcie_ue_status)) {
2850 			fm_payload_set(ereport,
2851 			    "pcie_ue_tgt_trans", DATA_TYPE_UINT32,
2852 			    PCIE_ADV_REG(pfd_p)->pcie_ue_tgt_trans,
2853 			    "pcie_ue_tgt_addr", DATA_TYPE_UINT64,
2854 			    PCIE_ADV_REG(pfd_p)->pcie_ue_tgt_addr,
2855 			    "pcie_ue_tgt_bdf", DATA_TYPE_UINT16,
2856 			    PCIE_ADV_REG(pfd_p)->pcie_ue_tgt_bdf,
2857 			    NULL);
2858 			/* Clear these values as they no longer valid */
2859 			PCIE_ADV_REG(pfd_p)->pcie_ue_tgt_trans = 0;
2860 			PCIE_ADV_REG(pfd_p)->pcie_ue_tgt_addr = 0;
2861 			PCIE_ADV_REG(pfd_p)->pcie_ue_tgt_bdf = PCIE_INVALID_BDF;
2862 		}
2863 
2864 		/* PCIe BDG AER registers */
2865 		if (PCIE_IS_PCIE_BDG(bus_p) && PCIE_HAS_AER(bus_p)) {
2866 			fm_payload_set(ereport,
2867 			    "pcie_sue_adv_ctl", DATA_TYPE_UINT32,
2868 			    PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_ctl,
2869 			    "pcie_sue_status", DATA_TYPE_UINT32,
2870 			    PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_status,
2871 			    "pcie_sue_mask", DATA_TYPE_UINT32,
2872 			    PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_mask,
2873 			    "pcie_sue_sev", DATA_TYPE_UINT32,
2874 			    PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_sev,
2875 			    "pcie_sue_hdr0", DATA_TYPE_UINT32,
2876 			    PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_hdr[0],
2877 			    "pcie_sue_hdr1", DATA_TYPE_UINT32,
2878 			    PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_hdr[1],
2879 			    "pcie_sue_hdr2", DATA_TYPE_UINT32,
2880 			    PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_hdr[2],
2881 			    "pcie_sue_hdr3", DATA_TYPE_UINT32,
2882 			    PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_hdr[3],
2883 			    NULL);
2884 		}
2885 
2886 		/* PCIe BDG AER decoded header */
2887 		if (PCIE_IS_PCIE_BDG(bus_p) && HAS_SAER_LOGS(pfd_p,
2888 		    PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_status)) {
2889 			fm_payload_set(ereport,
2890 			    "pcie_sue_tgt_trans", DATA_TYPE_UINT32,
2891 			    PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_trans,
2892 			    "pcie_sue_tgt_addr", DATA_TYPE_UINT64,
2893 			    PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_addr,
2894 			    "pcie_sue_tgt_bdf", DATA_TYPE_UINT16,
2895 			    PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_bdf,
2896 			    NULL);
2897 			/* Clear these values as they no longer valid */
2898 			PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_trans = 0;
2899 			PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_addr = 0;
2900 			PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_bdf =
2901 			    PCIE_INVALID_BDF;
2902 		}
2903 
2904 		/* PCIe RP registers */
2905 		if (PCIE_IS_RP(bus_p)) {
2906 			fm_payload_set(ereport,
2907 			    "pcie_rp_status", DATA_TYPE_UINT32,
2908 			    PCIE_RP_REG(pfd_p)->pcie_rp_status,
2909 			    "pcie_rp_control", DATA_TYPE_UINT16,
2910 			    PCIE_RP_REG(pfd_p)->pcie_rp_ctl,
2911 			    NULL);
2912 		}
2913 
2914 		/* PCIe RP AER registers */
2915 		if (PCIE_IS_RP(bus_p) && PCIE_HAS_AER(bus_p)) {
2916 			fm_payload_set(ereport,
2917 			    "pcie_adv_rp_status", DATA_TYPE_UINT32,
2918 			    PCIE_ADV_RP_REG(pfd_p)->pcie_rp_err_status,
2919 			    "pcie_adv_rp_command", DATA_TYPE_UINT32,
2920 			    PCIE_ADV_RP_REG(pfd_p)->pcie_rp_err_cmd,
2921 			    "pcie_adv_rp_ce_src_id", DATA_TYPE_UINT16,
2922 			    PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ce_src_id,
2923 			    "pcie_adv_rp_ue_src_id", DATA_TYPE_UINT16,
2924 			    PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ue_src_id,
2925 			    NULL);
2926 		}
2927 
2928 generic:
2929 		/* IOV related information */
2930 		if (!PCIE_BDG_IS_UNASSIGNED(PCIE_PFD2BUS(impl->pf_dq_head_p))) {
2931 			fm_payload_set(ereport,
2932 			    "pcie_aff_flags", DATA_TYPE_UINT16,
2933 			    PFD_AFFECTED_DEV(pfd_p)->pe_affected_flags,
2934 			    "pcie_aff_bdf", DATA_TYPE_UINT16,
2935 			    PFD_AFFECTED_DEV(pfd_p)->pe_affected_bdf,
2936 			    "orig_sev", DATA_TYPE_UINT32,
2937 			    pfd_p->pe_orig_severity_flags,
2938 			    NULL);
2939 		}
2940 
2941 		/* Misc ereport information */
2942 		fm_payload_set(ereport,
2943 		    "remainder", DATA_TYPE_UINT32, --total,
2944 		    "severity", DATA_TYPE_UINT32, pfd_p->pe_severity_flags,
2945 		    NULL);
2946 
2947 		pf_ereport_post(PCIE_BUS2DIP(bus_p), &ereport, &detector,
2948 		    &eqep);
2949 	}
2950 
2951 	/* Unlock all the devices in the queue */
2952 	for (pfd_p = impl->pf_dq_tail_p; pfd_p; pfd_p = pfd_p->pe_prev) {
2953 		if (pfd_p->pe_lock) {
2954 			pf_handler_exit(PCIE_PFD2DIP(pfd_p));
2955 		}
2956 	}
2957 }
2958 
2959 /*
2960  * pf_handler_enter must be called to serial access to each device's pf_data_t.
2961  * Once error handling is finished with the device call pf_handler_exit to allow
2962  * other threads to access it.  The same thread may call pf_handler_enter
2963  * several times without any consequences.
2964  *
2965  * The "impl" variable is passed in during scan fabric to double check that
2966  * there is not a recursive algorithm and to ensure only one thread is doing a
2967  * fabric scan at all times.
2968  *
2969  * In some cases "impl" is not available, such as "child lookup" being called
2970  * from outside of scan fabric, just pass in NULL for this variable and this
2971  * extra check will be skipped.
2972  */
2973 static int
2974 pf_handler_enter(dev_info_t *dip, pf_impl_t *impl)
2975 {
2976 	pf_data_t *pfd_p = PCIE_DIP2PFD(dip);
2977 
2978 	ASSERT(pfd_p);
2979 
2980 	/*
2981 	 * Check to see if the lock has already been taken by this
2982 	 * thread.  If so just return and don't take lock again.
2983 	 */
2984 	if (!pfd_p->pe_lock || !impl) {
2985 		i_ddi_fm_handler_enter(dip);
2986 		pfd_p->pe_lock = B_TRUE;
2987 		return (PF_SCAN_SUCCESS);
2988 	}
2989 
2990 	/* Check to see that this dip is already in the "impl" error queue */
2991 	for (pfd_p = impl->pf_dq_head_p; pfd_p; pfd_p = pfd_p->pe_next) {
2992 		if (PCIE_PFD2DIP(pfd_p) == dip) {
2993 			return (PF_SCAN_SUCCESS);
2994 		}
2995 	}
2996 
2997 	return (PF_SCAN_DEADLOCK);
2998 }
2999 
3000 static void
3001 pf_handler_exit(dev_info_t *dip)
3002 {
3003 	pf_data_t *pfd_p = PCIE_DIP2PFD(dip);
3004 
3005 	ASSERT(pfd_p);
3006 
3007 	ASSERT(pfd_p->pe_lock == B_TRUE);
3008 	i_ddi_fm_handler_exit(dip);
3009 	pfd_p->pe_lock = B_FALSE;
3010 }
3011 
3012 /*
3013  * This function calls the driver's callback function (if it's FMA hardened
3014  * and callback capable). This function relies on the current thread already
3015  * owning the driver's fmhdl lock.
3016  */
3017 static int
3018 pf_fm_callback(dev_info_t *dip, ddi_fm_error_t *derr)
3019 {
3020 	int cb_sts = DDI_FM_OK;
3021 
3022 	if (DDI_FM_ERRCB_CAP(ddi_fm_capable(dip))) {
3023 		dev_info_t *pdip = ddi_get_parent(dip);
3024 		struct i_ddi_fmhdl *hdl = DEVI(pdip)->devi_fmhdl;
3025 		struct i_ddi_fmtgt *tgt = hdl->fh_tgts;
3026 		struct i_ddi_errhdl *errhdl;
3027 		while (tgt != NULL) {
3028 			if (dip == tgt->ft_dip) {
3029 				errhdl = tgt->ft_errhdl;
3030 				cb_sts = errhdl->eh_func(dip, derr,
3031 				    errhdl->eh_impl);
3032 				break;
3033 			}
3034 			tgt = tgt->ft_next;
3035 		}
3036 	}
3037 	return (cb_sts);
3038 }
3039 
3040 static void
3041 pf_reset_pfd(pf_data_t *pfd_p)
3042 {
3043 	pcie_bus_t	*bus_p = PCIE_PFD2BUS(pfd_p);
3044 
3045 	pfd_p->pe_severity_flags = 0;
3046 	pfd_p->pe_orig_severity_flags = 0;
3047 	/* pe_lock and pe_valid were reset in pf_send_ereport */
3048 
3049 	PFD_AFFECTED_DEV(pfd_p)->pe_affected_flags = 0;
3050 	PFD_AFFECTED_DEV(pfd_p)->pe_affected_bdf = PCIE_INVALID_BDF;
3051 
3052 	if (PCIE_IS_ROOT(bus_p)) {
3053 		PCIE_ROOT_FAULT(pfd_p)->scan_bdf = PCIE_INVALID_BDF;
3054 		PCIE_ROOT_FAULT(pfd_p)->scan_addr = 0;
3055 		PCIE_ROOT_FAULT(pfd_p)->full_scan = B_FALSE;
3056 		PCIE_ROOT_EH_SRC(pfd_p)->intr_type = PF_INTR_TYPE_NONE;
3057 		PCIE_ROOT_EH_SRC(pfd_p)->intr_data = NULL;
3058 	}
3059 
3060 	if (PCIE_IS_BDG(bus_p)) {
3061 		bzero(PCI_BDG_ERR_REG(pfd_p), sizeof (pf_pci_bdg_err_regs_t));
3062 	}
3063 
3064 	PCI_ERR_REG(pfd_p)->pci_err_status = 0;
3065 	PCI_ERR_REG(pfd_p)->pci_cfg_comm = 0;
3066 
3067 	if (PCIE_IS_PCIE(bus_p)) {
3068 		if (PCIE_IS_ROOT(bus_p)) {
3069 			bzero(PCIE_RP_REG(pfd_p),
3070 			    sizeof (pf_pcie_rp_err_regs_t));
3071 			bzero(PCIE_ADV_RP_REG(pfd_p),
3072 			    sizeof (pf_pcie_adv_rp_err_regs_t));
3073 			PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ce_src_id =
3074 			    PCIE_INVALID_BDF;
3075 			PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ue_src_id =
3076 			    PCIE_INVALID_BDF;
3077 		} else if (PCIE_IS_PCIE_BDG(bus_p)) {
3078 			bzero(PCIE_ADV_BDG_REG(pfd_p),
3079 			    sizeof (pf_pcie_adv_bdg_err_regs_t));
3080 			PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_bdf =
3081 			    PCIE_INVALID_BDF;
3082 		}
3083 
3084 		if (PCIE_IS_PCIE_BDG(bus_p) && PCIE_IS_PCIX(bus_p)) {
3085 			if (PCIX_ECC_VERSION_CHECK(bus_p)) {
3086 				bzero(PCIX_BDG_ECC_REG(pfd_p, 0),
3087 				    sizeof (pf_pcix_ecc_regs_t));
3088 				bzero(PCIX_BDG_ECC_REG(pfd_p, 1),
3089 				    sizeof (pf_pcix_ecc_regs_t));
3090 			}
3091 			PCIX_BDG_ERR_REG(pfd_p)->pcix_bdg_sec_stat = 0;
3092 			PCIX_BDG_ERR_REG(pfd_p)->pcix_bdg_stat = 0;
3093 		}
3094 
3095 		PCIE_ADV_REG(pfd_p)->pcie_adv_ctl = 0;
3096 		PCIE_ADV_REG(pfd_p)->pcie_ue_status = 0;
3097 		PCIE_ADV_REG(pfd_p)->pcie_ue_mask = 0;
3098 		PCIE_ADV_REG(pfd_p)->pcie_ue_sev = 0;
3099 		PCIE_ADV_HDR(pfd_p, 0) = 0;
3100 		PCIE_ADV_HDR(pfd_p, 1) = 0;
3101 		PCIE_ADV_HDR(pfd_p, 2) = 0;
3102 		PCIE_ADV_HDR(pfd_p, 3) = 0;
3103 		PCIE_ADV_REG(pfd_p)->pcie_ce_status = 0;
3104 		PCIE_ADV_REG(pfd_p)->pcie_ce_mask = 0;
3105 		PCIE_ADV_REG(pfd_p)->pcie_ue_tgt_trans = 0;
3106 		PCIE_ADV_REG(pfd_p)->pcie_ue_tgt_addr = 0;
3107 		PCIE_ADV_REG(pfd_p)->pcie_ue_tgt_bdf = PCIE_INVALID_BDF;
3108 
3109 		PCIE_ERR_REG(pfd_p)->pcie_err_status = 0;
3110 		PCIE_ERR_REG(pfd_p)->pcie_err_ctl = 0;
3111 		PCIE_ERR_REG(pfd_p)->pcie_dev_cap = 0;
3112 
3113 	} else if (PCIE_IS_PCIX(bus_p)) {
3114 		if (PCIE_IS_BDG(bus_p)) {
3115 			if (PCIX_ECC_VERSION_CHECK(bus_p)) {
3116 				bzero(PCIX_BDG_ECC_REG(pfd_p, 0),
3117 				    sizeof (pf_pcix_ecc_regs_t));
3118 				bzero(PCIX_BDG_ECC_REG(pfd_p, 1),
3119 				    sizeof (pf_pcix_ecc_regs_t));
3120 			}
3121 			PCIX_BDG_ERR_REG(pfd_p)->pcix_bdg_sec_stat = 0;
3122 			PCIX_BDG_ERR_REG(pfd_p)->pcix_bdg_stat = 0;
3123 		} else {
3124 			if (PCIX_ECC_VERSION_CHECK(bus_p)) {
3125 				bzero(PCIX_ECC_REG(pfd_p),
3126 				    sizeof (pf_pcix_ecc_regs_t));
3127 			}
3128 			PCIX_ERR_REG(pfd_p)->pcix_command = 0;
3129 			PCIX_ERR_REG(pfd_p)->pcix_status = 0;
3130 		}
3131 	}
3132 
3133 	pfd_p->pe_prev = NULL;
3134 	pfd_p->pe_next = NULL;
3135 	pfd_p->pe_rber_fatal = B_FALSE;
3136 }
3137 
3138 pcie_bus_t *
3139 pf_find_busp_by_bdf(pf_impl_t *impl, pcie_req_id_t bdf)
3140 {
3141 	pcie_bus_t *temp_bus_p;
3142 	pf_data_t *temp_pfd_p;
3143 
3144 	for (temp_pfd_p = impl->pf_dq_head_p;
3145 	    temp_pfd_p;
3146 	    temp_pfd_p = temp_pfd_p->pe_next) {
3147 		temp_bus_p = PCIE_PFD2BUS(temp_pfd_p);
3148 
3149 		if (bdf == temp_bus_p->bus_bdf) {
3150 			return (temp_bus_p);
3151 		}
3152 	}
3153 
3154 	return (NULL);
3155 }
3156 
3157 pcie_bus_t *
3158 pf_find_busp_by_addr(pf_impl_t *impl, uint64_t addr)
3159 {
3160 	pcie_bus_t *temp_bus_p;
3161 	pf_data_t *temp_pfd_p;
3162 
3163 	for (temp_pfd_p = impl->pf_dq_head_p;
3164 	    temp_pfd_p;
3165 	    temp_pfd_p = temp_pfd_p->pe_next) {
3166 		temp_bus_p = PCIE_PFD2BUS(temp_pfd_p);
3167 
3168 		if (pf_in_assigned_addr(temp_bus_p, addr)) {
3169 			return (temp_bus_p);
3170 		}
3171 	}
3172 
3173 	return (NULL);
3174 }
3175 
3176 pcie_bus_t *
3177 pf_find_busp_by_aer(pf_impl_t *impl, pf_data_t *pfd_p)
3178 {
3179 	pf_pcie_adv_err_regs_t *reg_p = PCIE_ADV_REG(pfd_p);
3180 	pcie_bus_t *temp_bus_p = NULL;
3181 	pcie_req_id_t bdf;
3182 	uint64_t addr;
3183 	pcie_tlp_hdr_t *tlp_hdr = (pcie_tlp_hdr_t *)reg_p->pcie_ue_hdr;
3184 	uint32_t trans_type = reg_p->pcie_ue_tgt_trans;
3185 
3186 	if ((tlp_hdr->type == PCIE_TLP_TYPE_CPL) ||
3187 	    (tlp_hdr->type == PCIE_TLP_TYPE_CPLLK)) {
3188 		pcie_cpl_t *cpl_tlp = (pcie_cpl_t *)&reg_p->pcie_ue_hdr[1];
3189 
3190 		bdf = (cpl_tlp->rid > cpl_tlp->cid) ? cpl_tlp->rid :
3191 		    cpl_tlp->cid;
3192 		temp_bus_p = pf_find_busp_by_bdf(impl, bdf);
3193 	} else if (trans_type == PF_ADDR_PIO) {
3194 		addr = reg_p->pcie_ue_tgt_addr;
3195 		temp_bus_p = pf_find_busp_by_addr(impl, addr);
3196 	} else {
3197 		/* PF_ADDR_DMA type */
3198 		bdf = reg_p->pcie_ue_tgt_bdf;
3199 		temp_bus_p = pf_find_busp_by_bdf(impl, bdf);
3200 	}
3201 
3202 	return (temp_bus_p);
3203 }
3204 
3205 pcie_bus_t *
3206 pf_find_busp_by_saer(pf_impl_t *impl, pf_data_t *pfd_p)
3207 {
3208 	pf_pcie_adv_bdg_err_regs_t *reg_p = PCIE_ADV_BDG_REG(pfd_p);
3209 	pcie_bus_t *temp_bus_p = NULL;
3210 	pcie_req_id_t bdf;
3211 	uint64_t addr;
3212 
3213 	addr = reg_p->pcie_sue_tgt_addr;
3214 	bdf = reg_p->pcie_sue_tgt_bdf;
3215 
3216 	if (addr != NULL) {
3217 		temp_bus_p = pf_find_busp_by_addr(impl, addr);
3218 	} else if (PCIE_CHECK_VALID_BDF(bdf)) {
3219 		temp_bus_p = pf_find_busp_by_bdf(impl, bdf);
3220 	}
3221 
3222 	return (temp_bus_p);
3223 }
3224