xref: /illumos-gate/usr/src/uts/common/io/pciex/pcie_fault.c (revision 2dea4eed7ad1c66ae4770263aa2911815a8b86eb)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <sys/sysmacros.h>
27 #include <sys/types.h>
28 #include <sys/kmem.h>
29 #include <sys/modctl.h>
30 #include <sys/ddi.h>
31 #include <sys/sunddi.h>
32 #include <sys/sunndi.h>
33 #include <sys/fm/protocol.h>
34 #include <sys/fm/util.h>
35 #include <sys/fm/io/ddi.h>
36 #include <sys/fm/io/pci.h>
37 #include <sys/promif.h>
38 #include <sys/disp.h>
39 #include <sys/atomic.h>
40 #include <sys/pcie.h>
41 #include <sys/pci_cap.h>
42 #include <sys/pcie_impl.h>
43 
44 #define	PF_PCIE_BDG_ERR (PCIE_DEVSTS_FE_DETECTED | PCIE_DEVSTS_NFE_DETECTED | \
45 	PCIE_DEVSTS_CE_DETECTED)
46 
47 #define	PF_PCI_BDG_ERR (PCI_STAT_S_SYSERR | PCI_STAT_S_TARG_AB | \
48 	PCI_STAT_R_MAST_AB | PCI_STAT_R_TARG_AB | PCI_STAT_S_PERROR)
49 
50 #define	PF_AER_FATAL_ERR (PCIE_AER_UCE_DLP | PCIE_AER_UCE_SD |\
51 	PCIE_AER_UCE_FCP | PCIE_AER_UCE_RO | PCIE_AER_UCE_MTLP)
52 #define	PF_AER_NON_FATAL_ERR (PCIE_AER_UCE_PTLP | PCIE_AER_UCE_TO | \
53 	PCIE_AER_UCE_CA | PCIE_AER_UCE_ECRC | PCIE_AER_UCE_UR)
54 
55 #define	PF_SAER_FATAL_ERR (PCIE_AER_SUCE_USC_MSG_DATA_ERR | \
56 	PCIE_AER_SUCE_UC_ATTR_ERR | PCIE_AER_SUCE_UC_ADDR_ERR | \
57 	PCIE_AER_SUCE_SERR_ASSERT)
58 #define	PF_SAER_NON_FATAL_ERR (PCIE_AER_SUCE_TA_ON_SC | \
59 	PCIE_AER_SUCE_MA_ON_SC | PCIE_AER_SUCE_RCVD_TA | \
60 	PCIE_AER_SUCE_RCVD_MA | PCIE_AER_SUCE_USC_ERR | \
61 	PCIE_AER_SUCE_UC_DATA_ERR | PCIE_AER_SUCE_TIMER_EXPIRED | \
62 	PCIE_AER_SUCE_PERR_ASSERT | PCIE_AER_SUCE_INTERNAL_ERR)
63 
64 #define	PF_PCI_PARITY_ERR (PCI_STAT_S_PERROR | PCI_STAT_PERROR)
65 
66 #define	PF_FIRST_AER_ERR(bit, adv) \
67 	(bit & (1 << (adv->pcie_adv_ctl & PCIE_AER_CTL_FST_ERR_PTR_MASK)))
68 
69 #define	HAS_AER_LOGS(pfd_p, bit) \
70 	(PCIE_HAS_AER(pfd_p->pe_bus_p) && \
71 	PF_FIRST_AER_ERR(bit, PCIE_ADV_REG(pfd_p)))
72 
73 #define	PF_FIRST_SAER_ERR(bit, adv) \
74 	(bit & (1 << (adv->pcie_sue_ctl & PCIE_AER_SCTL_FST_ERR_PTR_MASK)))
75 
76 #define	HAS_SAER_LOGS(pfd_p, bit) \
77 	(PCIE_HAS_AER(pfd_p->pe_bus_p) && \
78 	PF_FIRST_SAER_ERR(bit, PCIE_ADV_BDG_REG(pfd_p)))
79 
80 #define	GET_SAER_CMD(pfd_p) \
81 	((PCIE_ADV_BDG_HDR(pfd_p, 1) >> \
82 	PCIE_AER_SUCE_HDR_CMD_LWR_SHIFT) & PCIE_AER_SUCE_HDR_CMD_LWR_MASK)
83 
84 #define	CE_ADVISORY(pfd_p) \
85 	(PCIE_ADV_REG(pfd_p)->pcie_ce_status & PCIE_AER_CE_AD_NFE)
86 
87 /* PCIe Fault Fabric Error analysis table */
88 typedef struct pf_fab_err_tbl {
89 	uint32_t	bit;		/* Error bit */
90 	int		(*handler)();	/* Error handling fuction */
91 	uint16_t	affected_flags; /* Primary affected flag */
92 	/*
93 	 * Secondary affected flag, effective when the information
94 	 * indicated by the primary flag is not available, eg.
95 	 * PF_AFFECTED_AER/SAER/ADDR
96 	 */
97 	uint16_t	sec_affected_flags;
98 } pf_fab_err_tbl_t;
99 
100 static pcie_bus_t *pf_is_ready(dev_info_t *);
101 /* Functions for scanning errors */
102 static int pf_default_hdl(dev_info_t *, pf_impl_t *);
103 static int pf_dispatch(dev_info_t *, pf_impl_t *, boolean_t);
104 static boolean_t pf_in_addr_range(pcie_bus_t *, uint64_t);
105 
106 /* Functions for gathering errors */
107 static void pf_pcix_ecc_regs_gather(pf_pcix_ecc_regs_t *pcix_ecc_regs,
108     pcie_bus_t *bus_p, boolean_t bdg);
109 static void pf_pcix_regs_gather(pf_data_t *pfd_p, pcie_bus_t *bus_p);
110 static void pf_pcie_regs_gather(pf_data_t *pfd_p, pcie_bus_t *bus_p);
111 static void pf_pci_regs_gather(pf_data_t *pfd_p, pcie_bus_t *bus_p);
112 static int pf_dummy_cb(dev_info_t *, ddi_fm_error_t *, const void *);
113 static void pf_en_dq(pf_data_t *pfd_p, pf_impl_t *impl_p);
114 
115 /* Functions for analysing errors */
116 static int pf_analyse_error(ddi_fm_error_t *, pf_impl_t *);
117 static void pf_adjust_for_no_aer(pf_data_t *);
118 static void pf_adjust_for_no_saer(pf_data_t *);
119 static pf_data_t *pf_get_pcie_bridge(pf_data_t *, pcie_req_id_t);
120 static pf_data_t *pf_get_parent_pcie_bridge(pf_data_t *);
121 static boolean_t pf_matched_in_rc(pf_data_t *, pf_data_t *,
122     uint32_t);
123 static int pf_analyse_error_tbl(ddi_fm_error_t *, pf_impl_t *,
124     pf_data_t *, const pf_fab_err_tbl_t *, uint32_t);
125 static int pf_analyse_ca_ur(ddi_fm_error_t *, uint32_t,
126     pf_data_t *, pf_data_t *);
127 static int pf_analyse_ma_ta(ddi_fm_error_t *, uint32_t,
128     pf_data_t *, pf_data_t *);
129 static int pf_analyse_pci(ddi_fm_error_t *, uint32_t,
130     pf_data_t *, pf_data_t *);
131 static int pf_analyse_perr_assert(ddi_fm_error_t *, uint32_t,
132     pf_data_t *, pf_data_t *);
133 static int pf_analyse_ptlp(ddi_fm_error_t *, uint32_t,
134     pf_data_t *, pf_data_t *);
135 static int pf_analyse_sc(ddi_fm_error_t *, uint32_t,
136     pf_data_t *, pf_data_t *);
137 static int pf_analyse_to(ddi_fm_error_t *, uint32_t,
138     pf_data_t *, pf_data_t *);
139 static int pf_analyse_uc(ddi_fm_error_t *, uint32_t,
140     pf_data_t *, pf_data_t *);
141 static int pf_analyse_uc_data(ddi_fm_error_t *, uint32_t,
142     pf_data_t *, pf_data_t *);
143 static int pf_no_panic(ddi_fm_error_t *, uint32_t,
144     pf_data_t *, pf_data_t *);
145 static int pf_panic(ddi_fm_error_t *, uint32_t,
146     pf_data_t *, pf_data_t *);
147 static void pf_send_ereport(ddi_fm_error_t *, pf_impl_t *);
148 static int pf_fm_callback(dev_info_t *dip, ddi_fm_error_t *derr);
149 
150 /* PCIe Fabric Handle Lookup Support Functions. */
151 static int pf_hdl_child_lookup(dev_info_t *, ddi_fm_error_t *, uint32_t,
152     uint64_t, pcie_req_id_t);
153 static int pf_hdl_compare(dev_info_t *, ddi_fm_error_t *, uint32_t, uint64_t,
154     pcie_req_id_t, ndi_fmc_t *);
155 static int pf_log_hdl_lookup(dev_info_t *, ddi_fm_error_t *, pf_data_t *,
156 	boolean_t);
157 
158 static int pf_handler_enter(dev_info_t *, pf_impl_t *);
159 static void pf_handler_exit(dev_info_t *);
160 static void pf_reset_pfd(pf_data_t *);
161 
162 boolean_t pcie_full_scan = B_FALSE;	/* Force to always do a full scan */
163 int pcie_disable_scan = 0;		/* Disable fabric scan */
164 
165 /* Inform interested parties that error handling is about to begin. */
166 /* ARGSUSED */
167 void
168 pf_eh_enter(pcie_bus_t *bus_p) {
169 }
170 
171 /* Inform interested parties that error handling has ended. */
172 void
173 pf_eh_exit(pcie_bus_t *bus_p)
174 {
175 	pcie_bus_t *rbus_p = PCIE_DIP2BUS(bus_p->bus_rp_dip);
176 	pf_data_t *root_pfd_p = PCIE_BUS2PFD(rbus_p);
177 	pf_data_t *pfd_p;
178 	uint_t intr_type = PCIE_ROOT_EH_SRC(root_pfd_p)->intr_type;
179 
180 	pciev_eh_exit(root_pfd_p, intr_type);
181 
182 	/* Clear affected device info and INTR SRC */
183 	for (pfd_p = root_pfd_p; pfd_p; pfd_p = pfd_p->pe_next) {
184 		PFD_AFFECTED_DEV(pfd_p)->pe_affected_flags = 0;
185 		PFD_AFFECTED_DEV(pfd_p)->pe_affected_bdf = PCIE_INVALID_BDF;
186 		if (PCIE_IS_ROOT(PCIE_PFD2BUS(pfd_p))) {
187 			PCIE_ROOT_EH_SRC(pfd_p)->intr_type = PF_INTR_TYPE_NONE;
188 			PCIE_ROOT_EH_SRC(pfd_p)->intr_data = NULL;
189 		}
190 	}
191 }
192 
193 /*
194  * Scan Fabric is the entry point for PCI/PCIe IO fabric errors.  The
195  * caller may create a local pf_data_t with the "root fault"
196  * information populated to either do a precise or full scan.  More
197  * than one pf_data_t maybe linked together if there are multiple
198  * errors.  Only a PCIe compliant Root Port device may pass in NULL
199  * for the root_pfd_p.
200  *
201  * "Root Complexes" such as NPE and PX should call scan_fabric using itself as
202  * the rdip.  PCIe Root ports should call pf_scan_fabric using it's parent as
203  * the rdip.
204  *
205  * Scan fabric initiated from RCs are likely due to a fabric message, traps or
206  * any RC detected errors that propagated to/from the fabric.
207  *
208  * This code assumes that by the time pf_scan_fabric is
209  * called, pf_handler_enter has NOT been called on the rdip.
210  */
211 int
212 pf_scan_fabric(dev_info_t *rdip, ddi_fm_error_t *derr, pf_data_t *root_pfd_p)
213 {
214 	pf_impl_t	impl;
215 	pf_data_t	*pfd_p, *pfd_head_p, *pfd_tail_p;
216 	int		scan_flag = PF_SCAN_SUCCESS;
217 	int		analyse_flag = PF_ERR_NO_ERROR;
218 	boolean_t	full_scan = pcie_full_scan;
219 
220 	if (pcie_disable_scan)
221 		return (analyse_flag);
222 
223 	/* Find the head and tail of this link list */
224 	pfd_head_p = root_pfd_p;
225 	for (pfd_tail_p = root_pfd_p; pfd_tail_p && pfd_tail_p->pe_next;
226 	    pfd_tail_p = pfd_tail_p->pe_next)
227 		;
228 
229 	/* Save head/tail */
230 	impl.pf_total = 0;
231 	impl.pf_derr = derr;
232 	impl.pf_dq_head_p = pfd_head_p;
233 	impl.pf_dq_tail_p = pfd_tail_p;
234 
235 	/* If scan is initiated from RP then RP itself must be scanned. */
236 	if (PCIE_IS_RP(PCIE_DIP2BUS(rdip)) && pf_is_ready(rdip) &&
237 	    !root_pfd_p) {
238 		scan_flag = pf_handler_enter(rdip, &impl);
239 		if (scan_flag & PF_SCAN_DEADLOCK)
240 			goto done;
241 
242 		scan_flag = pf_default_hdl(rdip, &impl);
243 		if (scan_flag & PF_SCAN_NO_ERR_IN_CHILD)
244 			goto done;
245 	}
246 
247 	/*
248 	 * Scan the fabric using the scan_bdf and scan_addr in error q.
249 	 * scan_bdf will be valid in the following cases:
250 	 *	- Fabric message
251 	 *	- Poisoned TLP
252 	 *	- Signaled UR/CA
253 	 *	- Received UR/CA
254 	 *	- PIO load failures
255 	 */
256 	for (pfd_p = impl.pf_dq_head_p; pfd_p && PFD_IS_ROOT(pfd_p);
257 	    pfd_p = pfd_p->pe_next) {
258 		impl.pf_fault = PCIE_ROOT_FAULT(pfd_p);
259 
260 		if (impl.pf_fault->full_scan)
261 			full_scan = B_TRUE;
262 
263 		if (full_scan ||
264 		    PCIE_CHECK_VALID_BDF(impl.pf_fault->scan_bdf) ||
265 		    impl.pf_fault->scan_addr)
266 			scan_flag |= pf_dispatch(rdip, &impl, full_scan);
267 
268 		if (full_scan)
269 			break;
270 	}
271 
272 done:
273 	/*
274 	 * If this is due to safe access, don't analyze the errors and return
275 	 * success regardless of how scan fabric went.
276 	 */
277 	if (derr->fme_flag != DDI_FM_ERR_UNEXPECTED) {
278 		analyse_flag = PF_ERR_NO_PANIC;
279 	} else {
280 		analyse_flag = pf_analyse_error(derr, &impl);
281 	}
282 
283 	pf_send_ereport(derr, &impl);
284 
285 	/*
286 	 * Check if any hardened driver's callback reported a panic.
287 	 * If so panic.
288 	 */
289 	if (scan_flag & PF_SCAN_CB_FAILURE)
290 		analyse_flag |= PF_ERR_PANIC;
291 
292 	/*
293 	 * If a deadlock was detected, panic the system as error analysis has
294 	 * been compromised.
295 	 */
296 	if (scan_flag & PF_SCAN_DEADLOCK)
297 		analyse_flag |= PF_ERR_PANIC_DEADLOCK;
298 
299 	derr->fme_status = PF_ERR2DDIFM_ERR(scan_flag);
300 
301 	return (analyse_flag);
302 }
303 
304 void
305 pcie_force_fullscan() {
306 	pcie_full_scan = B_TRUE;
307 }
308 
309 /*
310  * pf_dispatch walks the device tree and calls the pf_default_hdl if the device
311  * falls in the error path.
312  *
313  * Returns PF_SCAN_* flags
314  */
315 static int
316 pf_dispatch(dev_info_t *pdip, pf_impl_t *impl, boolean_t full_scan)
317 {
318 	dev_info_t	*dip;
319 	pcie_req_id_t	rid = impl->pf_fault->scan_bdf;
320 	pcie_bus_t	*bus_p;
321 	int		scan_flag = PF_SCAN_SUCCESS;
322 
323 	for (dip = ddi_get_child(pdip); dip; dip = ddi_get_next_sibling(dip)) {
324 		/* Make sure dip is attached and ready */
325 		if (!(bus_p = pf_is_ready(dip)))
326 			continue;
327 
328 		scan_flag |= pf_handler_enter(dip, impl);
329 		if (scan_flag & PF_SCAN_DEADLOCK)
330 			break;
331 
332 		/*
333 		 * Handle this device if it is a:
334 		 * o Full Scan
335 		 * o PCI/PCI-X Device
336 		 * o Fault BDF = Device BDF
337 		 * o BDF/ADDR is in range of the Bridge/Switch
338 		 */
339 		if (full_scan ||
340 		    (bus_p->bus_bdf == rid) ||
341 		    pf_in_bus_range(bus_p, rid) ||
342 		    pf_in_addr_range(bus_p, impl->pf_fault->scan_addr)) {
343 			int hdl_flag = pf_default_hdl(dip, impl);
344 			scan_flag |= hdl_flag;
345 
346 			/*
347 			 * A bridge may have detected no errors in which case
348 			 * there is no need to scan further down.
349 			 */
350 			if (hdl_flag & PF_SCAN_NO_ERR_IN_CHILD)
351 				continue;
352 		} else {
353 			pf_handler_exit(dip);
354 			continue;
355 		}
356 
357 		/* match or in bridge bus-range */
358 		switch (bus_p->bus_dev_type) {
359 		case PCIE_PCIECAP_DEV_TYPE_PCIE2PCI:
360 		case PCIE_PCIECAP_DEV_TYPE_PCI2PCIE:
361 			scan_flag |= pf_dispatch(dip, impl, B_TRUE);
362 			break;
363 		case PCIE_PCIECAP_DEV_TYPE_UP:
364 		case PCIE_PCIECAP_DEV_TYPE_DOWN:
365 		case PCIE_PCIECAP_DEV_TYPE_ROOT:
366 		{
367 			pf_data_t *pfd_p = PCIE_BUS2PFD(bus_p);
368 			pf_pci_err_regs_t *err_p = PCI_ERR_REG(pfd_p);
369 			pf_pci_bdg_err_regs_t *serr_p = PCI_BDG_ERR_REG(pfd_p);
370 			/*
371 			 * Continue if the fault BDF != the switch or there is a
372 			 * parity error
373 			 */
374 			if ((bus_p->bus_bdf != rid) ||
375 			    (err_p->pci_err_status & PF_PCI_PARITY_ERR) ||
376 			    (serr_p->pci_bdg_sec_stat & PF_PCI_PARITY_ERR))
377 				scan_flag |= pf_dispatch(dip, impl, full_scan);
378 			break;
379 		}
380 		case PCIE_PCIECAP_DEV_TYPE_PCIE_DEV:
381 		case PCIE_PCIECAP_DEV_TYPE_PCI_DEV:
382 			/*
383 			 * Reached a PCIe end point so stop. Note dev_type
384 			 * PCI_DEV is just a PCIe device that requires IO Space
385 			 */
386 			break;
387 		case PCIE_PCIECAP_DEV_TYPE_PCI_PSEUDO:
388 			if (PCIE_IS_BDG(bus_p))
389 				scan_flag |= pf_dispatch(dip, impl, B_TRUE);
390 			break;
391 		default:
392 			ASSERT(B_FALSE);
393 		}
394 	}
395 	return (scan_flag);
396 }
397 
398 /* Returns whether the "bdf" is in the bus range of a switch/bridge */
399 boolean_t
400 pf_in_bus_range(pcie_bus_t *bus_p, pcie_req_id_t bdf)
401 {
402 	pci_bus_range_t *br_p = &bus_p->bus_bus_range;
403 	uint8_t		bus_no = (bdf & PCIE_REQ_ID_BUS_MASK) >>
404 	    PCIE_REQ_ID_BUS_SHIFT;
405 
406 	/* check if given bdf falls within bridge's bus range */
407 	if (PCIE_IS_BDG(bus_p) &&
408 	    ((bus_no >= br_p->lo) && (bus_no <= br_p->hi)))
409 		return (B_TRUE);
410 	else
411 		return (B_FALSE);
412 }
413 
414 /*
415  * Return whether the "addr" is in the assigned addr of a device.
416  */
417 boolean_t
418 pf_in_assigned_addr(pcie_bus_t *bus_p, uint64_t addr)
419 {
420 	uint_t		i;
421 	uint64_t	low, hi;
422 	pci_regspec_t	*assign_p = bus_p->bus_assigned_addr;
423 
424 	for (i = 0; i < bus_p->bus_assigned_entries; i++, assign_p++) {
425 		low = assign_p->pci_phys_low;
426 		hi = low + assign_p->pci_size_low;
427 		if ((addr < hi) && (addr >= low))
428 			return (B_TRUE);
429 	}
430 	return (B_FALSE);
431 }
432 
433 /*
434  * Returns whether the "addr" is in the addr range of a switch/bridge, or if the
435  * "addr" is in the assigned addr of a device.
436  */
437 static boolean_t
438 pf_in_addr_range(pcie_bus_t *bus_p, uint64_t addr)
439 {
440 	uint_t		i;
441 	uint64_t	low, hi;
442 	ppb_ranges_t	*ranges_p = bus_p->bus_addr_ranges;
443 
444 	/* check if given address belongs to this device */
445 	if (pf_in_assigned_addr(bus_p, addr))
446 		return (B_TRUE);
447 
448 	/* check if given address belongs to a child below this device */
449 	if (!PCIE_IS_BDG(bus_p))
450 		return (B_FALSE);
451 
452 	for (i = 0; i < bus_p->bus_addr_entries; i++, ranges_p++) {
453 		switch (ranges_p->child_high & PCI_ADDR_MASK) {
454 		case PCI_ADDR_IO:
455 		case PCI_ADDR_MEM32:
456 			low = ranges_p->child_low;
457 			hi = ranges_p->size_low + low;
458 			if ((addr < hi) && (addr >= low))
459 				return (B_TRUE);
460 			break;
461 		case PCI_ADDR_MEM64:
462 			low = ((uint64_t)ranges_p->child_mid << 32) |
463 			    (uint64_t)ranges_p->child_low;
464 			hi = (((uint64_t)ranges_p->size_high << 32) |
465 			    (uint64_t)ranges_p->size_low) + low;
466 			if ((addr < hi) && (addr >= low))
467 				return (B_TRUE);
468 			break;
469 		}
470 	}
471 	return (B_FALSE);
472 }
473 
474 static pcie_bus_t *
475 pf_is_ready(dev_info_t *dip)
476 {
477 	pcie_bus_t	*bus_p = PCIE_DIP2BUS(dip);
478 	if (!bus_p)
479 		return (NULL);
480 
481 	if (!(bus_p->bus_fm_flags & PF_FM_READY))
482 		return (NULL);
483 	return (bus_p);
484 }
485 
486 static void
487 pf_pcix_ecc_regs_gather(pf_pcix_ecc_regs_t *pcix_ecc_regs,
488     pcie_bus_t *bus_p, boolean_t bdg)
489 {
490 	if (bdg) {
491 		pcix_ecc_regs->pcix_ecc_ctlstat = PCIX_CAP_GET(32, bus_p,
492 		    PCI_PCIX_BDG_ECC_STATUS);
493 		pcix_ecc_regs->pcix_ecc_fstaddr = PCIX_CAP_GET(32, bus_p,
494 		    PCI_PCIX_BDG_ECC_FST_AD);
495 		pcix_ecc_regs->pcix_ecc_secaddr = PCIX_CAP_GET(32, bus_p,
496 		    PCI_PCIX_BDG_ECC_SEC_AD);
497 		pcix_ecc_regs->pcix_ecc_attr = PCIX_CAP_GET(32, bus_p,
498 		    PCI_PCIX_BDG_ECC_ATTR);
499 	} else {
500 		pcix_ecc_regs->pcix_ecc_ctlstat = PCIX_CAP_GET(32, bus_p,
501 		    PCI_PCIX_ECC_STATUS);
502 		pcix_ecc_regs->pcix_ecc_fstaddr = PCIX_CAP_GET(32, bus_p,
503 		    PCI_PCIX_ECC_FST_AD);
504 		pcix_ecc_regs->pcix_ecc_secaddr = PCIX_CAP_GET(32, bus_p,
505 		    PCI_PCIX_ECC_SEC_AD);
506 		pcix_ecc_regs->pcix_ecc_attr = PCIX_CAP_GET(32, bus_p,
507 		    PCI_PCIX_ECC_ATTR);
508 	}
509 }
510 
511 
512 static void
513 pf_pcix_regs_gather(pf_data_t *pfd_p, pcie_bus_t *bus_p)
514 {
515 	/*
516 	 * For PCI-X device PCI-X Capability only exists for Type 0 Headers.
517 	 * PCI-X Bridge Capability only exists for Type 1 Headers.
518 	 * Both capabilities do not exist at the same time.
519 	 */
520 	if (PCIE_IS_BDG(bus_p)) {
521 		pf_pcix_bdg_err_regs_t *pcix_bdg_regs;
522 
523 		pcix_bdg_regs = PCIX_BDG_ERR_REG(pfd_p);
524 
525 		pcix_bdg_regs->pcix_bdg_sec_stat = PCIX_CAP_GET(16, bus_p,
526 		    PCI_PCIX_SEC_STATUS);
527 		pcix_bdg_regs->pcix_bdg_stat = PCIX_CAP_GET(32, bus_p,
528 		    PCI_PCIX_BDG_STATUS);
529 
530 		if (PCIX_ECC_VERSION_CHECK(bus_p)) {
531 			/*
532 			 * PCI Express to PCI-X bridges only implement the
533 			 * secondary side of the PCI-X ECC registers, bit one is
534 			 * read-only so we make sure we do not write to it.
535 			 */
536 			if (!PCIE_IS_PCIE_BDG(bus_p)) {
537 				PCIX_CAP_PUT(32, bus_p, PCI_PCIX_BDG_ECC_STATUS,
538 				    0);
539 				pf_pcix_ecc_regs_gather(
540 				    PCIX_BDG_ECC_REG(pfd_p, 0), bus_p, B_TRUE);
541 				PCIX_CAP_PUT(32, bus_p, PCI_PCIX_BDG_ECC_STATUS,
542 				    1);
543 			}
544 			pf_pcix_ecc_regs_gather(PCIX_BDG_ECC_REG(pfd_p, 0),
545 			    bus_p, B_TRUE);
546 		}
547 	} else {
548 		pf_pcix_err_regs_t *pcix_regs = PCIX_ERR_REG(pfd_p);
549 
550 		pcix_regs->pcix_command = PCIX_CAP_GET(16, bus_p,
551 		    PCI_PCIX_COMMAND);
552 		pcix_regs->pcix_status = PCIX_CAP_GET(32, bus_p,
553 		    PCI_PCIX_STATUS);
554 		if (PCIX_ECC_VERSION_CHECK(bus_p))
555 			pf_pcix_ecc_regs_gather(PCIX_ECC_REG(pfd_p), bus_p,
556 			    B_TRUE);
557 	}
558 }
559 
560 static void
561 pf_pcie_regs_gather(pf_data_t *pfd_p, pcie_bus_t *bus_p)
562 {
563 	pf_pcie_err_regs_t *pcie_regs = PCIE_ERR_REG(pfd_p);
564 	pf_pcie_adv_err_regs_t *pcie_adv_regs = PCIE_ADV_REG(pfd_p);
565 
566 	pcie_regs->pcie_err_status = PCIE_CAP_GET(16, bus_p, PCIE_DEVSTS);
567 	pcie_regs->pcie_err_ctl = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL);
568 	pcie_regs->pcie_dev_cap = PCIE_CAP_GET(32, bus_p, PCIE_DEVCAP);
569 
570 	if (PCIE_IS_BDG(bus_p) && PCIE_IS_PCIX(bus_p))
571 		pf_pcix_regs_gather(pfd_p, bus_p);
572 
573 	if (PCIE_IS_ROOT(bus_p)) {
574 		pf_pcie_rp_err_regs_t *pcie_rp_regs = PCIE_RP_REG(pfd_p);
575 
576 		pcie_rp_regs->pcie_rp_status = PCIE_CAP_GET(32, bus_p,
577 		    PCIE_ROOTSTS);
578 		pcie_rp_regs->pcie_rp_ctl = PCIE_CAP_GET(16, bus_p,
579 		    PCIE_ROOTCTL);
580 	}
581 
582 	if (!PCIE_HAS_AER(bus_p))
583 		return;
584 
585 	/* Gather UE AERs */
586 	pcie_adv_regs->pcie_adv_ctl = PCIE_AER_GET(32, bus_p,
587 	    PCIE_AER_CTL);
588 	pcie_adv_regs->pcie_ue_status = PCIE_AER_GET(32, bus_p,
589 	    PCIE_AER_UCE_STS);
590 	pcie_adv_regs->pcie_ue_mask = PCIE_AER_GET(32, bus_p,
591 	    PCIE_AER_UCE_MASK);
592 	pcie_adv_regs->pcie_ue_sev = PCIE_AER_GET(32, bus_p,
593 	    PCIE_AER_UCE_SERV);
594 	PCIE_ADV_HDR(pfd_p, 0) = PCIE_AER_GET(32, bus_p,
595 	    PCIE_AER_HDR_LOG);
596 	PCIE_ADV_HDR(pfd_p, 1) = PCIE_AER_GET(32, bus_p,
597 	    PCIE_AER_HDR_LOG + 0x4);
598 	PCIE_ADV_HDR(pfd_p, 2) = PCIE_AER_GET(32, bus_p,
599 	    PCIE_AER_HDR_LOG + 0x8);
600 	PCIE_ADV_HDR(pfd_p, 3) = PCIE_AER_GET(32, bus_p,
601 	    PCIE_AER_HDR_LOG + 0xc);
602 
603 	/* Gather CE AERs */
604 	pcie_adv_regs->pcie_ce_status = PCIE_AER_GET(32, bus_p,
605 	    PCIE_AER_CE_STS);
606 	pcie_adv_regs->pcie_ce_mask = PCIE_AER_GET(32, bus_p,
607 	    PCIE_AER_CE_MASK);
608 
609 	/*
610 	 * If pci express to pci bridge then grab the bridge
611 	 * error registers.
612 	 */
613 	if (PCIE_IS_PCIE_BDG(bus_p)) {
614 		pf_pcie_adv_bdg_err_regs_t *pcie_bdg_regs =
615 		    PCIE_ADV_BDG_REG(pfd_p);
616 
617 		pcie_bdg_regs->pcie_sue_ctl = PCIE_AER_GET(32, bus_p,
618 		    PCIE_AER_SCTL);
619 		pcie_bdg_regs->pcie_sue_status = PCIE_AER_GET(32, bus_p,
620 		    PCIE_AER_SUCE_STS);
621 		pcie_bdg_regs->pcie_sue_mask = PCIE_AER_GET(32, bus_p,
622 		    PCIE_AER_SUCE_MASK);
623 		pcie_bdg_regs->pcie_sue_sev = PCIE_AER_GET(32, bus_p,
624 		    PCIE_AER_SUCE_SERV);
625 		PCIE_ADV_BDG_HDR(pfd_p, 0) = PCIE_AER_GET(32, bus_p,
626 		    PCIE_AER_SHDR_LOG);
627 		PCIE_ADV_BDG_HDR(pfd_p, 1) = PCIE_AER_GET(32, bus_p,
628 		    PCIE_AER_SHDR_LOG + 0x4);
629 		PCIE_ADV_BDG_HDR(pfd_p, 2) = PCIE_AER_GET(32, bus_p,
630 		    PCIE_AER_SHDR_LOG + 0x8);
631 		PCIE_ADV_BDG_HDR(pfd_p, 3) = PCIE_AER_GET(32, bus_p,
632 		    PCIE_AER_SHDR_LOG + 0xc);
633 	}
634 
635 	/*
636 	 * If PCI Express root port then grab the root port
637 	 * error registers.
638 	 */
639 	if (PCIE_IS_ROOT(bus_p)) {
640 		pf_pcie_adv_rp_err_regs_t *pcie_rp_regs =
641 		    PCIE_ADV_RP_REG(pfd_p);
642 
643 		pcie_rp_regs->pcie_rp_err_cmd = PCIE_AER_GET(32, bus_p,
644 		    PCIE_AER_RE_CMD);
645 		pcie_rp_regs->pcie_rp_err_status = PCIE_AER_GET(32, bus_p,
646 		    PCIE_AER_RE_STS);
647 		pcie_rp_regs->pcie_rp_ce_src_id = PCIE_AER_GET(16, bus_p,
648 		    PCIE_AER_CE_SRC_ID);
649 		pcie_rp_regs->pcie_rp_ue_src_id = PCIE_AER_GET(16, bus_p,
650 		    PCIE_AER_ERR_SRC_ID);
651 	}
652 }
653 
654 static void
655 pf_pci_regs_gather(pf_data_t *pfd_p, pcie_bus_t *bus_p)
656 {
657 	pf_pci_err_regs_t *pci_regs = PCI_ERR_REG(pfd_p);
658 
659 	/*
660 	 * Start by reading all the error registers that are available for
661 	 * pci and pci express and for leaf devices and bridges/switches
662 	 */
663 	pci_regs->pci_err_status = PCIE_GET(16, bus_p, PCI_CONF_STAT);
664 	pci_regs->pci_cfg_comm = PCIE_GET(16, bus_p, PCI_CONF_COMM);
665 
666 	/*
667 	 * If pci-pci bridge grab PCI bridge specific error registers.
668 	 */
669 	if (PCIE_IS_BDG(bus_p)) {
670 		pf_pci_bdg_err_regs_t *pci_bdg_regs = PCI_BDG_ERR_REG(pfd_p);
671 		pci_bdg_regs->pci_bdg_sec_stat =
672 		    PCIE_GET(16, bus_p, PCI_BCNF_SEC_STATUS);
673 		pci_bdg_regs->pci_bdg_ctrl =
674 		    PCIE_GET(16, bus_p, PCI_BCNF_BCNTRL);
675 	}
676 
677 	/*
678 	 * If pci express device grab pci express error registers and
679 	 * check for advanced error reporting features and grab them if
680 	 * available.
681 	 */
682 	if (PCIE_IS_PCIE(bus_p))
683 		pf_pcie_regs_gather(pfd_p, bus_p);
684 	else if (PCIE_IS_PCIX(bus_p))
685 		pf_pcix_regs_gather(pfd_p, bus_p);
686 
687 }
688 
689 static void
690 pf_pcix_regs_clear(pf_data_t *pfd_p, pcie_bus_t *bus_p)
691 {
692 	if (PCIE_IS_BDG(bus_p)) {
693 		pf_pcix_bdg_err_regs_t *pcix_bdg_regs;
694 
695 		pcix_bdg_regs = PCIX_BDG_ERR_REG(pfd_p);
696 
697 		PCIX_CAP_PUT(16, bus_p, PCI_PCIX_SEC_STATUS,
698 		    pcix_bdg_regs->pcix_bdg_sec_stat);
699 
700 		PCIX_CAP_PUT(32, bus_p, PCI_PCIX_BDG_STATUS,
701 		    pcix_bdg_regs->pcix_bdg_stat);
702 
703 		if (PCIX_ECC_VERSION_CHECK(bus_p)) {
704 			pf_pcix_ecc_regs_t *pcix_bdg_ecc_regs;
705 			/*
706 			 * PCI Express to PCI-X bridges only implement the
707 			 * secondary side of the PCI-X ECC registers.  For
708 			 * clearing, there is no need to "select" the ECC
709 			 * register, just write what was originally read.
710 			 */
711 			if (!PCIE_IS_PCIE_BDG(bus_p)) {
712 				pcix_bdg_ecc_regs = PCIX_BDG_ECC_REG(pfd_p, 0);
713 				PCIX_CAP_PUT(32, bus_p, PCI_PCIX_BDG_ECC_STATUS,
714 				    pcix_bdg_ecc_regs->pcix_ecc_ctlstat);
715 
716 			}
717 			pcix_bdg_ecc_regs = PCIX_BDG_ECC_REG(pfd_p, 1);
718 			PCIX_CAP_PUT(32, bus_p, PCI_PCIX_BDG_ECC_STATUS,
719 			    pcix_bdg_ecc_regs->pcix_ecc_ctlstat);
720 		}
721 	} else {
722 		pf_pcix_err_regs_t *pcix_regs = PCIX_ERR_REG(pfd_p);
723 
724 		PCIX_CAP_PUT(32, bus_p, PCI_PCIX_STATUS,
725 		    pcix_regs->pcix_status);
726 
727 		if (PCIX_ECC_VERSION_CHECK(bus_p)) {
728 			pf_pcix_ecc_regs_t *pcix_ecc_regs = PCIX_ECC_REG(pfd_p);
729 
730 			PCIX_CAP_PUT(32, bus_p, PCI_PCIX_ECC_STATUS,
731 			    pcix_ecc_regs->pcix_ecc_ctlstat);
732 		}
733 	}
734 }
735 
736 static void
737 pf_pcie_regs_clear(pf_data_t *pfd_p, pcie_bus_t *bus_p)
738 {
739 	pf_pcie_err_regs_t *pcie_regs = PCIE_ERR_REG(pfd_p);
740 	pf_pcie_adv_err_regs_t *pcie_adv_regs = PCIE_ADV_REG(pfd_p);
741 
742 	PCIE_CAP_PUT(16, bus_p, PCIE_DEVSTS, pcie_regs->pcie_err_status);
743 
744 	if (PCIE_IS_BDG(bus_p) && PCIE_IS_PCIX(bus_p))
745 		pf_pcix_regs_clear(pfd_p, bus_p);
746 
747 	if (!PCIE_HAS_AER(bus_p))
748 		return;
749 
750 	PCIE_AER_PUT(32, bus_p, PCIE_AER_UCE_STS,
751 	    pcie_adv_regs->pcie_ue_status);
752 
753 	PCIE_AER_PUT(32, bus_p, PCIE_AER_CE_STS,
754 	    pcie_adv_regs->pcie_ce_status);
755 
756 	if (PCIE_IS_PCIE_BDG(bus_p)) {
757 		pf_pcie_adv_bdg_err_regs_t *pcie_bdg_regs =
758 		    PCIE_ADV_BDG_REG(pfd_p);
759 
760 		PCIE_AER_PUT(32, bus_p, PCIE_AER_SUCE_STS,
761 		    pcie_bdg_regs->pcie_sue_status);
762 	}
763 
764 	/*
765 	 * If PCI Express root complex then clear the root complex
766 	 * error registers.
767 	 */
768 	if (PCIE_IS_ROOT(bus_p)) {
769 		pf_pcie_adv_rp_err_regs_t *pcie_rp_regs;
770 
771 		pcie_rp_regs = PCIE_ADV_RP_REG(pfd_p);
772 
773 		PCIE_AER_PUT(32, bus_p, PCIE_AER_RE_STS,
774 		    pcie_rp_regs->pcie_rp_err_status);
775 	}
776 }
777 
778 static void
779 pf_pci_regs_clear(pf_data_t *pfd_p, pcie_bus_t *bus_p)
780 {
781 	if (PCIE_IS_PCIE(bus_p))
782 		pf_pcie_regs_clear(pfd_p, bus_p);
783 	else if (PCIE_IS_PCIX(bus_p))
784 		pf_pcix_regs_clear(pfd_p, bus_p);
785 
786 	PCIE_PUT(16, bus_p, PCI_CONF_STAT, pfd_p->pe_pci_regs->pci_err_status);
787 
788 	if (PCIE_IS_BDG(bus_p)) {
789 		pf_pci_bdg_err_regs_t *pci_bdg_regs = PCI_BDG_ERR_REG(pfd_p);
790 		PCIE_PUT(16, bus_p, PCI_BCNF_SEC_STATUS,
791 		    pci_bdg_regs->pci_bdg_sec_stat);
792 	}
793 }
794 
795 /* ARGSUSED */
796 void
797 pcie_clear_errors(dev_info_t *dip)
798 {
799 	pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
800 	pf_data_t *pfd_p = PCIE_DIP2PFD(dip);
801 
802 	ASSERT(bus_p);
803 
804 	pf_pci_regs_gather(pfd_p, bus_p);
805 	pf_pci_regs_clear(pfd_p, bus_p);
806 }
807 
808 /* Find the fault BDF, fault Addr or full scan on a PCIe Root Port. */
809 static void
810 pf_pci_find_rp_fault(pf_data_t *pfd_p, pcie_bus_t *bus_p)
811 {
812 	pf_root_fault_t *root_fault = PCIE_ROOT_FAULT(pfd_p);
813 	pf_pcie_adv_rp_err_regs_t *rp_regs = PCIE_ADV_RP_REG(pfd_p);
814 	uint32_t root_err = rp_regs->pcie_rp_err_status;
815 	uint32_t ue_err = PCIE_ADV_REG(pfd_p)->pcie_ue_status;
816 	int num_faults = 0;
817 
818 	/* Since this data structure is reused, make sure to reset it */
819 	root_fault->full_scan = B_FALSE;
820 	root_fault->scan_bdf = PCIE_INVALID_BDF;
821 	root_fault->scan_addr = 0;
822 
823 	if (!PCIE_HAS_AER(bus_p) &&
824 	    (PCI_BDG_ERR_REG(pfd_p)->pci_bdg_sec_stat & PF_PCI_BDG_ERR)) {
825 		PCIE_ROOT_FAULT(pfd_p)->full_scan = B_TRUE;
826 		return;
827 	}
828 
829 	/*
830 	 * Check to see if an error has been received that
831 	 * requires a scan of the fabric.  Count the number of
832 	 * faults seen.  If MUL CE/FE_NFE that counts for
833 	 * atleast 2 faults, so just return with full_scan.
834 	 */
835 	if ((root_err & PCIE_AER_RE_STS_MUL_CE_RCVD) ||
836 	    (root_err & PCIE_AER_RE_STS_MUL_FE_NFE_RCVD)) {
837 		PCIE_ROOT_FAULT(pfd_p)->full_scan = B_TRUE;
838 		return;
839 	}
840 
841 	if (root_err & PCIE_AER_RE_STS_CE_RCVD)
842 		num_faults++;
843 
844 	if (root_err & PCIE_AER_RE_STS_FE_NFE_RCVD)
845 		num_faults++;
846 
847 	if (ue_err & PCIE_AER_UCE_CA)
848 		num_faults++;
849 
850 	if (ue_err & PCIE_AER_UCE_UR)
851 		num_faults++;
852 
853 	/* If no faults just return */
854 	if (num_faults == 0)
855 		return;
856 
857 	/* If faults > 1 do full scan */
858 	if (num_faults > 1) {
859 		PCIE_ROOT_FAULT(pfd_p)->full_scan = B_TRUE;
860 		return;
861 	}
862 
863 	/* By this point, there is only 1 fault detected */
864 	if (root_err & PCIE_AER_RE_STS_CE_RCVD) {
865 		PCIE_ROOT_FAULT(pfd_p)->scan_bdf = rp_regs->pcie_rp_ce_src_id;
866 		num_faults--;
867 	} else if (root_err & PCIE_AER_RE_STS_FE_NFE_RCVD) {
868 		PCIE_ROOT_FAULT(pfd_p)->scan_bdf = rp_regs->pcie_rp_ue_src_id;
869 		num_faults--;
870 	} else if ((HAS_AER_LOGS(pfd_p, PCIE_AER_UCE_CA) ||
871 	    HAS_AER_LOGS(pfd_p, PCIE_AER_UCE_UR)) &&
872 	    (pf_tlp_decode(PCIE_PFD2BUS(pfd_p), PCIE_ADV_REG(pfd_p)) ==
873 	    DDI_SUCCESS)) {
874 		PCIE_ROOT_FAULT(pfd_p)->scan_addr =
875 		    PCIE_ADV_REG(pfd_p)->pcie_ue_tgt_addr;
876 		num_faults--;
877 	}
878 
879 	/*
880 	 * This means an error did occur, but we couldn't extract the fault BDF
881 	 */
882 	if (num_faults > 0)
883 		PCIE_ROOT_FAULT(pfd_p)->full_scan = B_TRUE;
884 
885 }
886 
887 
888 /*
889  * Load PCIe Fault Data for PCI/PCIe devices into PCIe Fault Data Queue
890  *
891  * Returns a scan flag.
892  * o PF_SCAN_SUCCESS - Error gathered and cleared sucessfuly, data added to
893  *   Fault Q
894  * o PF_SCAN_BAD_RESPONSE - Unable to talk to device, item added to fault Q
895  * o PF_SCAN_CB_FAILURE - A hardened device deemed that the error was fatal.
896  * o PF_SCAN_NO_ERR_IN_CHILD - Only applies to bridge to prevent further
897  *   unnecessary scanning
898  * o PF_SCAN_IN_DQ - This device has already been scanned; it was skipped this
899  *   time.
900  */
901 static int
902 pf_default_hdl(dev_info_t *dip, pf_impl_t *impl)
903 {
904 	pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
905 	pf_data_t *pfd_p = PCIE_DIP2PFD(dip);
906 	int cb_sts, scan_flag = PF_SCAN_SUCCESS;
907 
908 	/* Make sure this device hasn't already been snapshotted and cleared */
909 	if (pfd_p->pe_valid == B_TRUE) {
910 		scan_flag |= PF_SCAN_IN_DQ;
911 		goto done;
912 	}
913 
914 	/*
915 	 * Read vendor/device ID and check with cached data, if it doesn't match
916 	 * could very well be a device that isn't responding anymore.  Just
917 	 * stop.  Save the basic info in the error q for post mortem debugging
918 	 * purposes.
919 	 */
920 	if (PCIE_GET(32, bus_p, PCI_CONF_VENID) != bus_p->bus_dev_ven_id) {
921 		char buf[FM_MAX_CLASS];
922 
923 		(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
924 		    PCI_ERROR_SUBCLASS, PCI_NR);
925 		ddi_fm_ereport_post(dip, buf, fm_ena_generate(0, FM_ENA_FMT1),
926 		    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0, NULL);
927 
928 		/*
929 		 * For IOV/Hotplug purposes skip gathering info fo this device,
930 		 * but populate affected info and severity.  Clear out any data
931 		 * that maybe been saved in the last fabric scan.
932 		 */
933 		pf_reset_pfd(pfd_p);
934 		pfd_p->pe_severity_flags = PF_ERR_PANIC_BAD_RESPONSE;
935 		PFD_AFFECTED_DEV(pfd_p)->pe_affected_flags = PF_AFFECTED_SELF;
936 
937 		/* Add the snapshot to the error q */
938 		pf_en_dq(pfd_p, impl);
939 		pfd_p->pe_valid = B_TRUE;
940 
941 		return (PF_SCAN_BAD_RESPONSE);
942 	}
943 
944 	pf_pci_regs_gather(pfd_p, bus_p);
945 	pf_pci_regs_clear(pfd_p, bus_p);
946 	if (PCIE_IS_RP(bus_p))
947 		pf_pci_find_rp_fault(pfd_p, bus_p);
948 
949 	cb_sts = pf_fm_callback(dip, impl->pf_derr);
950 
951 	if (cb_sts == DDI_FM_FATAL || cb_sts == DDI_FM_UNKNOWN)
952 		scan_flag |= PF_SCAN_CB_FAILURE;
953 
954 	/* Add the snapshot to the error q */
955 	pf_en_dq(pfd_p, impl);
956 
957 done:
958 	/*
959 	 * If a bridge does not have any error no need to scan any further down.
960 	 * For PCIe devices, check the PCIe device status and PCI secondary
961 	 * status.
962 	 * - Some non-compliant PCIe devices do not utilize PCIe
963 	 *   error registers.  If so rely on legacy PCI error registers.
964 	 * For PCI devices, check the PCI secondary status.
965 	 */
966 	if (PCIE_IS_PCIE_BDG(bus_p) &&
967 	    !(PCIE_ERR_REG(pfd_p)->pcie_err_status & PF_PCIE_BDG_ERR) &&
968 	    !(PCI_BDG_ERR_REG(pfd_p)->pci_bdg_sec_stat & PF_PCI_BDG_ERR))
969 		scan_flag |= PF_SCAN_NO_ERR_IN_CHILD;
970 
971 	if (PCIE_IS_PCI_BDG(bus_p) &&
972 	    !(PCI_BDG_ERR_REG(pfd_p)->pci_bdg_sec_stat & PF_PCI_BDG_ERR))
973 		scan_flag |= PF_SCAN_NO_ERR_IN_CHILD;
974 
975 	pfd_p->pe_valid = B_TRUE;
976 	return (scan_flag);
977 }
978 
979 /*
980  * Called during postattach to initialize a device's error handling
981  * capabilities.  If the devices has already been hardened, then there isn't
982  * much needed.  Otherwise initialize the device's default FMA capabilities.
983  *
984  * In a future project where PCIe support is removed from pcifm, several
985  * "properties" that are setup in ddi_fm_init and pci_ereport_setup need to be
986  * created here so that the PCI/PCIe eversholt rules will work properly.
987  */
988 void
989 pf_init(dev_info_t *dip, ddi_iblock_cookie_t ibc, ddi_attach_cmd_t cmd)
990 {
991 	pcie_bus_t		*bus_p = PCIE_DIP2BUS(dip);
992 	struct i_ddi_fmhdl	*fmhdl = DEVI(dip)->devi_fmhdl;
993 	boolean_t		need_cb_register = B_FALSE;
994 
995 	if (!bus_p) {
996 		cmn_err(CE_WARN, "devi_bus information is not set for %s%d.\n",
997 		    ddi_driver_name(dip), ddi_get_instance(dip));
998 		return;
999 	}
1000 
1001 	if (fmhdl) {
1002 		/*
1003 		 * If device is only ereport capable and not callback capable
1004 		 * make it callback capable. The only downside is that the
1005 		 * "fm-errcb-capable" property is not created for this device
1006 		 * which should be ok since it's not used anywhere.
1007 		 */
1008 		if (!(fmhdl->fh_cap & DDI_FM_ERRCB_CAPABLE))
1009 			need_cb_register = B_TRUE;
1010 	} else {
1011 		int cap;
1012 		/*
1013 		 * fm-capable in driver.conf can be used to set fm_capabilities.
1014 		 * If fm-capable is not defined, set the default
1015 		 * DDI_FM_EREPORT_CAPABLE and DDI_FM_ERRCB_CAPABLE.
1016 		 */
1017 		cap = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1018 		    DDI_PROP_DONTPASS, "fm-capable",
1019 		    DDI_FM_EREPORT_CAPABLE | DDI_FM_ERRCB_CAPABLE);
1020 		cap &= (DDI_FM_EREPORT_CAPABLE | DDI_FM_ERRCB_CAPABLE);
1021 
1022 		bus_p->bus_fm_flags |= PF_FM_IS_NH;
1023 
1024 		if (cmd == DDI_ATTACH) {
1025 			ddi_fm_init(dip, &cap, &ibc);
1026 			pci_ereport_setup(dip);
1027 		}
1028 
1029 		if (cap & DDI_FM_ERRCB_CAPABLE)
1030 			need_cb_register = B_TRUE;
1031 
1032 		fmhdl = DEVI(dip)->devi_fmhdl;
1033 	}
1034 
1035 	/* If ddi_fm_init fails for any reason RETURN */
1036 	if (!fmhdl) {
1037 		bus_p->bus_fm_flags = 0;
1038 		return;
1039 	}
1040 
1041 	fmhdl->fh_cap |=  DDI_FM_ERRCB_CAPABLE;
1042 	if (cmd == DDI_ATTACH) {
1043 		if (need_cb_register)
1044 			ddi_fm_handler_register(dip, pf_dummy_cb, NULL);
1045 	}
1046 
1047 	bus_p->bus_fm_flags |= PF_FM_READY;
1048 }
1049 
1050 /* undo FMA lock, called at predetach */
1051 void
1052 pf_fini(dev_info_t *dip, ddi_detach_cmd_t cmd)
1053 {
1054 	pcie_bus_t	*bus_p = PCIE_DIP2BUS(dip);
1055 
1056 	if (!bus_p)
1057 		return;
1058 
1059 	/* Don't fini anything if device isn't FM Ready */
1060 	if (!(bus_p->bus_fm_flags & PF_FM_READY))
1061 		return;
1062 
1063 	/* no other code should set the flag to false */
1064 	bus_p->bus_fm_flags &= ~PF_FM_READY;
1065 
1066 	/*
1067 	 * Grab the mutex to make sure device isn't in the middle of
1068 	 * error handling.  Setting the bus_fm_flag to ~PF_FM_READY
1069 	 * should prevent this device from being error handled after
1070 	 * the mutex has been released.
1071 	 */
1072 	(void) pf_handler_enter(dip, NULL);
1073 	pf_handler_exit(dip);
1074 
1075 	/* undo non-hardened drivers */
1076 	if (bus_p->bus_fm_flags & PF_FM_IS_NH) {
1077 		if (cmd == DDI_DETACH) {
1078 			bus_p->bus_fm_flags &= ~PF_FM_IS_NH;
1079 			pci_ereport_teardown(dip);
1080 			/*
1081 			 * ddi_fini itself calls ddi_handler_unregister,
1082 			 * so no need to explicitly call unregister.
1083 			 */
1084 			ddi_fm_fini(dip);
1085 		}
1086 	}
1087 }
1088 
1089 /*ARGSUSED*/
1090 static int
1091 pf_dummy_cb(dev_info_t *dip, ddi_fm_error_t *derr, const void *not_used)
1092 {
1093 	return (DDI_FM_OK);
1094 }
1095 
1096 /*
1097  * Add PFD to queue.  If it is an RC add it to the beginning,
1098  * otherwise add it to the end.
1099  */
1100 static void
1101 pf_en_dq(pf_data_t *pfd_p, pf_impl_t *impl)
1102 {
1103 	pf_data_t *head_p = impl->pf_dq_head_p;
1104 	pf_data_t *tail_p = impl->pf_dq_tail_p;
1105 
1106 	impl->pf_total++;
1107 
1108 	if (!head_p) {
1109 		ASSERT(PFD_IS_ROOT(pfd_p));
1110 		impl->pf_dq_head_p = pfd_p;
1111 		impl->pf_dq_tail_p = pfd_p;
1112 		pfd_p->pe_prev = NULL;
1113 		pfd_p->pe_next = NULL;
1114 		return;
1115 	}
1116 
1117 	/* Check if this is a Root Port eprt */
1118 	if (PFD_IS_ROOT(pfd_p)) {
1119 		pf_data_t *root_p, *last_p = NULL;
1120 
1121 		/* The first item must be a RP */
1122 		root_p = head_p;
1123 		for (last_p = head_p; last_p && PFD_IS_ROOT(last_p);
1124 		    last_p = last_p->pe_next)
1125 			root_p = last_p;
1126 
1127 		/* root_p is the last RP pfd. last_p is the first non-RP pfd. */
1128 		root_p->pe_next = pfd_p;
1129 		pfd_p->pe_prev = root_p;
1130 		pfd_p->pe_next = last_p;
1131 
1132 		if (last_p)
1133 			last_p->pe_prev = pfd_p;
1134 		else
1135 			tail_p = pfd_p;
1136 	} else {
1137 		tail_p->pe_next = pfd_p;
1138 		pfd_p->pe_prev = tail_p;
1139 		pfd_p->pe_next = NULL;
1140 		tail_p = pfd_p;
1141 	}
1142 
1143 	impl->pf_dq_head_p = head_p;
1144 	impl->pf_dq_tail_p = tail_p;
1145 }
1146 
1147 /*
1148  * Ignore:
1149  * - TRAINING: as leaves do not have children
1150  * - SD: as leaves do not have children
1151  */
1152 const pf_fab_err_tbl_t pcie_pcie_tbl[] = {
1153 	{PCIE_AER_UCE_DLP,	pf_panic,
1154 	    PF_AFFECTED_PARENT, 0},
1155 
1156 	{PCIE_AER_UCE_PTLP,	pf_analyse_ptlp,
1157 	    PF_AFFECTED_SELF, 0},
1158 
1159 	{PCIE_AER_UCE_FCP,	pf_panic,
1160 	    PF_AFFECTED_PARENT, 0},
1161 
1162 	{PCIE_AER_UCE_TO,	pf_analyse_to,
1163 	    PF_AFFECTED_SELF, 0},
1164 
1165 	{PCIE_AER_UCE_CA,	pf_analyse_ca_ur,
1166 	    PF_AFFECTED_SELF, 0},
1167 
1168 	{PCIE_AER_UCE_UC,	pf_analyse_uc,
1169 	    0, 0},
1170 
1171 	{PCIE_AER_UCE_RO,	pf_panic,
1172 	    PF_AFFECTED_PARENT, 0},
1173 
1174 	{PCIE_AER_UCE_MTLP,	pf_panic,
1175 	    PF_AFFECTED_PARENT, 0},
1176 
1177 	{PCIE_AER_UCE_ECRC,	pf_panic,
1178 	    PF_AFFECTED_SELF, 0},
1179 
1180 	{PCIE_AER_UCE_UR,	pf_analyse_ca_ur,
1181 	    PF_AFFECTED_SELF, 0},
1182 
1183 	{NULL, NULL, NULL, NULL}
1184 };
1185 
1186 const pf_fab_err_tbl_t pcie_rp_tbl[] = {
1187 	{PCIE_AER_UCE_TRAINING,	pf_no_panic,
1188 	    PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN, 0},
1189 
1190 	{PCIE_AER_UCE_DLP,	pf_panic,
1191 	    PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN, 0},
1192 
1193 	{PCIE_AER_UCE_SD,	pf_no_panic,
1194 	    PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN, 0},
1195 
1196 	{PCIE_AER_UCE_PTLP,	pf_analyse_ptlp,
1197 	    PF_AFFECTED_AER, PF_AFFECTED_CHILDREN},
1198 
1199 	{PCIE_AER_UCE_FCP,	pf_panic,
1200 	    PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN, 0},
1201 
1202 	{PCIE_AER_UCE_TO,	pf_panic,
1203 	    PF_AFFECTED_ADDR, PF_AFFECTED_CHILDREN},
1204 
1205 	{PCIE_AER_UCE_CA,	pf_no_panic,
1206 	    PF_AFFECTED_AER, PF_AFFECTED_CHILDREN},
1207 
1208 	{PCIE_AER_UCE_UC,	pf_analyse_uc,
1209 	    0, 0},
1210 
1211 	{PCIE_AER_UCE_RO,	pf_panic,
1212 	    PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN, 0},
1213 
1214 	{PCIE_AER_UCE_MTLP,	pf_panic,
1215 	    PF_AFFECTED_SELF | PF_AFFECTED_AER,
1216 	    PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN},
1217 
1218 	{PCIE_AER_UCE_ECRC,	pf_panic,
1219 	    PF_AFFECTED_AER, PF_AFFECTED_CHILDREN},
1220 
1221 	{PCIE_AER_UCE_UR,	pf_no_panic,
1222 	    PF_AFFECTED_AER, PF_AFFECTED_CHILDREN},
1223 
1224 	{NULL, NULL, NULL, NULL}
1225 };
1226 
1227 const pf_fab_err_tbl_t pcie_sw_tbl[] = {
1228 	{PCIE_AER_UCE_TRAINING,	pf_no_panic,
1229 	    PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN, 0},
1230 
1231 	{PCIE_AER_UCE_DLP,	pf_panic,
1232 	    PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN, 0},
1233 
1234 	{PCIE_AER_UCE_SD,	pf_no_panic,
1235 	    PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN, 0},
1236 
1237 	{PCIE_AER_UCE_PTLP,	pf_analyse_ptlp,
1238 	    PF_AFFECTED_AER, PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN},
1239 
1240 	{PCIE_AER_UCE_FCP,	pf_panic,
1241 	    PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN, 0},
1242 
1243 	{PCIE_AER_UCE_TO,	pf_analyse_to,
1244 	    PF_AFFECTED_CHILDREN, 0},
1245 
1246 	{PCIE_AER_UCE_CA,	pf_analyse_ca_ur,
1247 	    PF_AFFECTED_AER, PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN},
1248 
1249 	{PCIE_AER_UCE_UC,	pf_analyse_uc,
1250 	    0, 0},
1251 
1252 	{PCIE_AER_UCE_RO,	pf_panic,
1253 	    PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN, 0},
1254 
1255 	{PCIE_AER_UCE_MTLP,	pf_panic,
1256 	    PF_AFFECTED_SELF | PF_AFFECTED_AER,
1257 	    PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN},
1258 
1259 	{PCIE_AER_UCE_ECRC,	pf_panic,
1260 	    PF_AFFECTED_AER, PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN},
1261 
1262 	{PCIE_AER_UCE_UR,	pf_analyse_ca_ur,
1263 	    PF_AFFECTED_AER, PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN},
1264 
1265 	{NULL, NULL, NULL, NULL}
1266 };
1267 
1268 const pf_fab_err_tbl_t pcie_pcie_bdg_tbl[] = {
1269 	{PCIE_AER_SUCE_TA_ON_SC,	pf_analyse_sc,
1270 	    0, 0},
1271 
1272 	{PCIE_AER_SUCE_MA_ON_SC,	pf_analyse_sc,
1273 	    0, 0},
1274 
1275 	{PCIE_AER_SUCE_RCVD_TA,		pf_analyse_ma_ta,
1276 	    0, 0},
1277 
1278 	{PCIE_AER_SUCE_RCVD_MA,		pf_analyse_ma_ta,
1279 	    0, 0},
1280 
1281 	{PCIE_AER_SUCE_USC_ERR,		pf_panic,
1282 	    PF_AFFECTED_SAER, PF_AFFECTED_CHILDREN},
1283 
1284 	{PCIE_AER_SUCE_USC_MSG_DATA_ERR, pf_analyse_ma_ta,
1285 	    PF_AFFECTED_SAER, PF_AFFECTED_CHILDREN},
1286 
1287 	{PCIE_AER_SUCE_UC_DATA_ERR,	pf_analyse_uc_data,
1288 	    PF_AFFECTED_SAER, PF_AFFECTED_CHILDREN},
1289 
1290 	{PCIE_AER_SUCE_UC_ATTR_ERR,	pf_panic,
1291 	    PF_AFFECTED_CHILDREN, 0},
1292 
1293 	{PCIE_AER_SUCE_UC_ADDR_ERR,	pf_panic,
1294 	    PF_AFFECTED_CHILDREN, 0},
1295 
1296 	{PCIE_AER_SUCE_TIMER_EXPIRED,	pf_panic,
1297 	    PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN, 0},
1298 
1299 	{PCIE_AER_SUCE_PERR_ASSERT,	pf_analyse_perr_assert,
1300 	    0, 0},
1301 
1302 	{PCIE_AER_SUCE_SERR_ASSERT,	pf_no_panic,
1303 	    0, 0},
1304 
1305 	{PCIE_AER_SUCE_INTERNAL_ERR,	pf_panic,
1306 	    PF_AFFECTED_SELF | PF_AFFECTED_CHILDREN, 0},
1307 
1308 	{NULL, NULL, NULL, NULL}
1309 };
1310 
1311 const pf_fab_err_tbl_t pcie_pci_bdg_tbl[] = {
1312 	{PCI_STAT_PERROR,	pf_analyse_pci,
1313 	    PF_AFFECTED_SELF, 0},
1314 
1315 	{PCI_STAT_S_PERROR,	pf_analyse_pci,
1316 	    PF_AFFECTED_SELF, 0},
1317 
1318 	{PCI_STAT_S_SYSERR,	pf_panic,
1319 	    PF_AFFECTED_SELF, 0},
1320 
1321 	{PCI_STAT_R_MAST_AB,	pf_analyse_pci,
1322 	    PF_AFFECTED_SELF, 0},
1323 
1324 	{PCI_STAT_R_TARG_AB,	pf_analyse_pci,
1325 	    PF_AFFECTED_SELF, 0},
1326 
1327 	{PCI_STAT_S_TARG_AB,	pf_analyse_pci,
1328 	    PF_AFFECTED_SELF, 0},
1329 
1330 	{NULL, NULL, NULL, NULL}
1331 };
1332 
1333 const pf_fab_err_tbl_t pcie_pci_tbl[] = {
1334 	{PCI_STAT_PERROR,	pf_analyse_pci,
1335 	    PF_AFFECTED_SELF, 0},
1336 
1337 	{PCI_STAT_S_PERROR,	pf_analyse_pci,
1338 	    PF_AFFECTED_SELF, 0},
1339 
1340 	{PCI_STAT_S_SYSERR,	pf_panic,
1341 	    PF_AFFECTED_SELF, 0},
1342 
1343 	{PCI_STAT_R_MAST_AB,	pf_analyse_pci,
1344 	    PF_AFFECTED_SELF, 0},
1345 
1346 	{PCI_STAT_R_TARG_AB,	pf_analyse_pci,
1347 	    PF_AFFECTED_SELF, 0},
1348 
1349 	{PCI_STAT_S_TARG_AB,	pf_analyse_pci,
1350 	    PF_AFFECTED_SELF, 0},
1351 
1352 	{NULL, NULL, NULL, NULL}
1353 };
1354 
1355 #define	PF_MASKED_AER_ERR(pfd_p) \
1356 	(PCIE_ADV_REG(pfd_p)->pcie_ue_status & \
1357 	    ((PCIE_ADV_REG(pfd_p)->pcie_ue_mask) ^ 0xFFFFFFFF))
1358 #define	PF_MASKED_SAER_ERR(pfd_p) \
1359 	(PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_status & \
1360 	    ((PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_mask) ^ 0xFFFFFFFF))
1361 /*
1362  * Analyse all the PCIe Fault Data (erpt) gathered during dispatch in the erpt
1363  * Queue.
1364  */
1365 static int
1366 pf_analyse_error(ddi_fm_error_t *derr, pf_impl_t *impl)
1367 {
1368 	int		sts_flags, error_flags = 0;
1369 	pf_data_t	*pfd_p;
1370 
1371 	for (pfd_p = impl->pf_dq_head_p; pfd_p; pfd_p = pfd_p->pe_next) {
1372 		sts_flags = 0;
1373 
1374 		/* skip analysing error when no error info is gathered */
1375 		if (pfd_p->pe_severity_flags == PF_ERR_PANIC_BAD_RESPONSE)
1376 			goto done;
1377 
1378 		switch (PCIE_PFD2BUS(pfd_p)->bus_dev_type) {
1379 		case PCIE_PCIECAP_DEV_TYPE_PCIE_DEV:
1380 		case PCIE_PCIECAP_DEV_TYPE_PCI_DEV:
1381 			if (PCIE_DEVSTS_CE_DETECTED &
1382 			    PCIE_ERR_REG(pfd_p)->pcie_err_status)
1383 				sts_flags |= PF_ERR_CE;
1384 
1385 			pf_adjust_for_no_aer(pfd_p);
1386 			sts_flags |= pf_analyse_error_tbl(derr, impl,
1387 			    pfd_p, pcie_pcie_tbl, PF_MASKED_AER_ERR(pfd_p));
1388 			break;
1389 		case PCIE_PCIECAP_DEV_TYPE_ROOT:
1390 			pf_adjust_for_no_aer(pfd_p);
1391 			sts_flags |= pf_analyse_error_tbl(derr, impl,
1392 			    pfd_p, pcie_rp_tbl, PF_MASKED_AER_ERR(pfd_p));
1393 			break;
1394 		case PCIE_PCIECAP_DEV_TYPE_RC_PSEUDO:
1395 			/* no adjust_for_aer for pseudo RC */
1396 			/* keep the severity passed on from RC if any */
1397 			sts_flags |= pfd_p->pe_severity_flags;
1398 			sts_flags |= pf_analyse_error_tbl(derr, impl, pfd_p,
1399 			    pcie_rp_tbl, PF_MASKED_AER_ERR(pfd_p));
1400 			break;
1401 		case PCIE_PCIECAP_DEV_TYPE_UP:
1402 		case PCIE_PCIECAP_DEV_TYPE_DOWN:
1403 			if (PCIE_DEVSTS_CE_DETECTED &
1404 			    PCIE_ERR_REG(pfd_p)->pcie_err_status)
1405 				sts_flags |= PF_ERR_CE;
1406 
1407 			pf_adjust_for_no_aer(pfd_p);
1408 			sts_flags |= pf_analyse_error_tbl(derr, impl,
1409 			    pfd_p, pcie_sw_tbl, PF_MASKED_AER_ERR(pfd_p));
1410 			break;
1411 		case PCIE_PCIECAP_DEV_TYPE_PCIE2PCI:
1412 			if (PCIE_DEVSTS_CE_DETECTED &
1413 			    PCIE_ERR_REG(pfd_p)->pcie_err_status)
1414 				sts_flags |= PF_ERR_CE;
1415 
1416 			pf_adjust_for_no_aer(pfd_p);
1417 			pf_adjust_for_no_saer(pfd_p);
1418 			sts_flags |= pf_analyse_error_tbl(derr,
1419 			    impl, pfd_p, pcie_pcie_tbl,
1420 			    PF_MASKED_AER_ERR(pfd_p));
1421 			sts_flags |= pf_analyse_error_tbl(derr,
1422 			    impl, pfd_p, pcie_pcie_bdg_tbl,
1423 			    PF_MASKED_SAER_ERR(pfd_p));
1424 			/*
1425 			 * Some non-compliant PCIe devices do not utilize PCIe
1426 			 * error registers.  So fallthrough and rely on legacy
1427 			 * PCI error registers.
1428 			 */
1429 			if ((PCIE_DEVSTS_NFE_DETECTED | PCIE_DEVSTS_FE_DETECTED)
1430 			    & PCIE_ERR_REG(pfd_p)->pcie_err_status)
1431 				break;
1432 			/* FALLTHROUGH */
1433 		case PCIE_PCIECAP_DEV_TYPE_PCI_PSEUDO:
1434 			sts_flags |= pf_analyse_error_tbl(derr, impl,
1435 			    pfd_p, pcie_pci_tbl,
1436 			    PCI_ERR_REG(pfd_p)->pci_err_status);
1437 
1438 			if (!PCIE_IS_BDG(PCIE_PFD2BUS(pfd_p)))
1439 				break;
1440 
1441 			sts_flags |= pf_analyse_error_tbl(derr,
1442 			    impl, pfd_p, pcie_pci_bdg_tbl,
1443 			    PCI_BDG_ERR_REG(pfd_p)->pci_bdg_sec_stat);
1444 		}
1445 
1446 		pfd_p->pe_severity_flags = sts_flags;
1447 
1448 done:
1449 		pfd_p->pe_orig_severity_flags = pfd_p->pe_severity_flags;
1450 		/* Have pciev_eh adjust the severity */
1451 		pfd_p->pe_severity_flags = pciev_eh(pfd_p, impl);
1452 
1453 		error_flags |= pfd_p->pe_severity_flags;
1454 	}
1455 
1456 	return (error_flags);
1457 }
1458 
1459 static int
1460 pf_analyse_error_tbl(ddi_fm_error_t *derr, pf_impl_t *impl,
1461     pf_data_t *pfd_p, const pf_fab_err_tbl_t *tbl, uint32_t err_reg)
1462 {
1463 	const pf_fab_err_tbl_t *row;
1464 	int err = 0;
1465 	uint16_t flags;
1466 	uint32_t bit;
1467 
1468 	for (row = tbl; err_reg && (row->bit != NULL); row++) {
1469 		bit = row->bit;
1470 		if (!(err_reg & bit))
1471 			continue;
1472 		err |= row->handler(derr, bit, impl->pf_dq_head_p, pfd_p);
1473 
1474 		flags = row->affected_flags;
1475 		/*
1476 		 * check if the primary flag is valid;
1477 		 * if not, use the secondary flag
1478 		 */
1479 		if (flags & PF_AFFECTED_AER) {
1480 			if (!HAS_AER_LOGS(pfd_p, bit)) {
1481 				flags = row->sec_affected_flags;
1482 			}
1483 		} else if (flags & PF_AFFECTED_SAER) {
1484 			if (!HAS_SAER_LOGS(pfd_p, bit)) {
1485 				flags = row->sec_affected_flags;
1486 			}
1487 		} else if (flags & PF_AFFECTED_ADDR) {
1488 			/* only Root has this flag */
1489 			if (PCIE_ROOT_FAULT(pfd_p)->scan_addr == 0) {
1490 				flags = row->sec_affected_flags;
1491 			}
1492 		}
1493 
1494 		PFD_AFFECTED_DEV(pfd_p)->pe_affected_flags |= flags;
1495 	}
1496 
1497 	if (!err)
1498 		err = PF_ERR_NO_ERROR;
1499 
1500 	return (err);
1501 }
1502 
1503 /*
1504  * PCIe Completer Abort and Unsupport Request error analyser.  If a PCIe device
1505  * issues a CA/UR a corresponding Received CA/UR should have been seen in the
1506  * PCIe root complex.  Check to see if RC did indeed receive a CA/UR, if so then
1507  * this error may be safely ignored.  If not check the logs and see if an
1508  * associated handler for this transaction can be found.
1509  */
1510 /* ARGSUSED */
1511 static int
1512 pf_analyse_ca_ur(ddi_fm_error_t *derr, uint32_t bit, pf_data_t *dq_head_p,
1513     pf_data_t *pfd_p)
1514 {
1515 	uint32_t	abort_type;
1516 	dev_info_t	*rpdip = PCIE_PFD2BUS(pfd_p)->bus_rp_dip;
1517 
1518 	/* If UR's are masked forgive this error */
1519 	if ((pcie_get_aer_uce_mask() & PCIE_AER_UCE_UR) &&
1520 	    (bit == PCIE_AER_UCE_UR))
1521 		return (PF_ERR_NO_PANIC);
1522 
1523 	/*
1524 	 * If a RP has an CA/UR it means a leaf sent a bad request to the RP
1525 	 * such as a config read or a bad DMA address.
1526 	 */
1527 	if (PCIE_IS_RP(PCIE_PFD2BUS(pfd_p)))
1528 		goto handle_lookup;
1529 
1530 	if (bit == PCIE_AER_UCE_UR)
1531 		abort_type = PCI_STAT_R_MAST_AB;
1532 	else
1533 		abort_type = PCI_STAT_R_TARG_AB;
1534 
1535 	if (pf_matched_in_rc(dq_head_p, pfd_p, abort_type))
1536 		return (PF_ERR_MATCHED_RC);
1537 
1538 handle_lookup:
1539 	if (HAS_AER_LOGS(pfd_p, bit) &&
1540 	    pf_log_hdl_lookup(rpdip, derr, pfd_p, B_TRUE) == PF_HDL_FOUND)
1541 			return (PF_ERR_MATCHED_DEVICE);
1542 
1543 	return (PF_ERR_PANIC);
1544 }
1545 
1546 /*
1547  * PCIe-PCI Bridge Received Master Abort and Target error analyser.  If a PCIe
1548  * Bridge receives a MA/TA a corresponding sent CA/UR should have been seen in
1549  * the PCIe root complex.  Check to see if RC did indeed receive a CA/UR, if so
1550  * then this error may be safely ignored.  If not check the logs and see if an
1551  * associated handler for this transaction can be found.
1552  */
1553 /* ARGSUSED */
1554 static int
1555 pf_analyse_ma_ta(ddi_fm_error_t *derr, uint32_t bit, pf_data_t *dq_head_p,
1556     pf_data_t *pfd_p)
1557 {
1558 	dev_info_t	*rpdip = PCIE_PFD2BUS(pfd_p)->bus_rp_dip;
1559 	uint32_t	abort_type;
1560 
1561 	/* If UR's are masked forgive this error */
1562 	if ((pcie_get_aer_uce_mask() & PCIE_AER_UCE_UR) &&
1563 	    (bit == PCIE_AER_SUCE_RCVD_MA))
1564 		return (PF_ERR_NO_PANIC);
1565 
1566 	if (bit == PCIE_AER_SUCE_RCVD_MA)
1567 		abort_type = PCI_STAT_R_MAST_AB;
1568 	else
1569 		abort_type = PCI_STAT_R_TARG_AB;
1570 
1571 	if (pf_matched_in_rc(dq_head_p, pfd_p, abort_type))
1572 		return (PF_ERR_MATCHED_RC);
1573 
1574 	if (!HAS_SAER_LOGS(pfd_p, bit))
1575 		return (PF_ERR_PANIC);
1576 
1577 	if (pf_log_hdl_lookup(rpdip, derr, pfd_p, B_FALSE) == PF_HDL_FOUND)
1578 		return (PF_ERR_MATCHED_DEVICE);
1579 
1580 	return (PF_ERR_PANIC);
1581 }
1582 
1583 /*
1584  * Generic PCI error analyser.  This function is used for Parity Errors,
1585  * Received Master Aborts, Received Target Aborts, and Signaled Target Aborts.
1586  * In general PCI devices do not have error logs, it is very difficult to figure
1587  * out what transaction caused the error.  Instead find the nearest PCIe-PCI
1588  * Bridge and check to see if it has logs and if it has an error associated with
1589  * this PCI Device.
1590  */
1591 /* ARGSUSED */
1592 static int
1593 pf_analyse_pci(ddi_fm_error_t *derr, uint32_t bit, pf_data_t *dq_head_p,
1594     pf_data_t *pfd_p)
1595 {
1596 	pf_data_t	*parent_pfd_p;
1597 	uint16_t	cmd;
1598 	uint32_t	aer_ue_status;
1599 	pcie_bus_t	*bus_p = PCIE_PFD2BUS(pfd_p);
1600 	pf_pcie_adv_bdg_err_regs_t *parent_saer_p;
1601 
1602 	if (PCI_ERR_REG(pfd_p)->pci_err_status & PCI_STAT_S_SYSERR)
1603 		return (PF_ERR_PANIC);
1604 
1605 	/* If UR's are masked forgive this error */
1606 	if ((pcie_get_aer_uce_mask() & PCIE_AER_UCE_UR) &&
1607 	    (bit == PCI_STAT_R_MAST_AB))
1608 		return (PF_ERR_NO_PANIC);
1609 
1610 
1611 	if (bit & (PCI_STAT_PERROR | PCI_STAT_S_PERROR)) {
1612 		aer_ue_status = PCIE_AER_SUCE_PERR_ASSERT;
1613 	} else {
1614 		aer_ue_status = (PCIE_AER_SUCE_TA_ON_SC |
1615 		    PCIE_AER_SUCE_MA_ON_SC | PCIE_AER_SUCE_RCVD_TA |
1616 		    PCIE_AER_SUCE_RCVD_MA);
1617 	}
1618 
1619 	parent_pfd_p = pf_get_parent_pcie_bridge(pfd_p);
1620 	if (parent_pfd_p == NULL)
1621 		return (PF_ERR_PANIC);
1622 
1623 	/* Check if parent bridge has seen this error */
1624 	parent_saer_p = PCIE_ADV_BDG_REG(parent_pfd_p);
1625 	if (!(parent_saer_p->pcie_sue_status & aer_ue_status) ||
1626 	    !HAS_SAER_LOGS(parent_pfd_p, aer_ue_status))
1627 		return (PF_ERR_PANIC);
1628 
1629 	/*
1630 	 * If the addr or bdf from the parent PCIe bridge logs belong to this
1631 	 * PCI device, assume the PCIe bridge's error handling has already taken
1632 	 * care of this PCI device's error.
1633 	 */
1634 	if (pf_pci_decode(parent_pfd_p, &cmd) != DDI_SUCCESS)
1635 		return (PF_ERR_PANIC);
1636 
1637 	if ((parent_saer_p->pcie_sue_tgt_bdf == bus_p->bus_bdf) ||
1638 	    pf_in_addr_range(bus_p, parent_saer_p->pcie_sue_tgt_addr))
1639 		return (PF_ERR_MATCHED_PARENT);
1640 
1641 	/*
1642 	 * If this device is a PCI-PCI bridge, check if the bdf in the parent
1643 	 * PCIe bridge logs is in the range of this PCI-PCI Bridge's bus ranges.
1644 	 * If they are, then assume the PCIe bridge's error handling has already
1645 	 * taken care of this PCI-PCI bridge device's error.
1646 	 */
1647 	if (PCIE_IS_BDG(bus_p) &&
1648 	    pf_in_bus_range(bus_p, parent_saer_p->pcie_sue_tgt_bdf))
1649 		return (PF_ERR_MATCHED_PARENT);
1650 
1651 	return (PF_ERR_PANIC);
1652 }
1653 
1654 /*
1655  * PCIe Bridge transactions associated with PERR.
1656  * o Bridge received a poisoned Non-Posted Write (CFG Writes) from PCIe
1657  * o Bridge received a poisoned Posted Write from (MEM Writes) from PCIe
1658  * o Bridge received a poisoned Completion on a Split Transction from PCIe
1659  * o Bridge received a poisoned Completion on a Delayed Transction from PCIe
1660  *
1661  * Check for non-poisoned PCIe transactions that got forwarded to the secondary
1662  * side and detects a PERR#.  Except for delayed read completions, a poisoned
1663  * TLP will be forwarded to the secondary bus and PERR# will be asserted.
1664  */
1665 /* ARGSUSED */
1666 static int
1667 pf_analyse_perr_assert(ddi_fm_error_t *derr, uint32_t bit, pf_data_t *dq_head_p,
1668     pf_data_t *pfd_p)
1669 {
1670 	dev_info_t	*rpdip = PCIE_PFD2BUS(pfd_p)->bus_rp_dip;
1671 	uint16_t	cmd;
1672 	int		hdl_sts = PF_HDL_NOTFOUND;
1673 	int		err = PF_ERR_NO_ERROR;
1674 	pf_pcie_adv_bdg_err_regs_t *saer_p;
1675 
1676 
1677 	if (HAS_SAER_LOGS(pfd_p, bit)) {
1678 		saer_p = PCIE_ADV_BDG_REG(pfd_p);
1679 		if (pf_pci_decode(pfd_p, &cmd) != DDI_SUCCESS)
1680 			return (PF_ERR_PANIC);
1681 
1682 cmd_switch:
1683 		switch (cmd) {
1684 		case PCI_PCIX_CMD_IOWR:
1685 		case PCI_PCIX_CMD_MEMWR:
1686 		case PCI_PCIX_CMD_MEMWR_BL:
1687 		case PCI_PCIX_CMD_MEMWRBL:
1688 			/* Posted Writes Transactions */
1689 			if (saer_p->pcie_sue_tgt_trans == PF_ADDR_PIO)
1690 				hdl_sts = pf_log_hdl_lookup(rpdip, derr, pfd_p,
1691 				    B_FALSE);
1692 			break;
1693 		case PCI_PCIX_CMD_CFWR:
1694 			/*
1695 			 * Check to see if it is a non-posted write.  If so, a
1696 			 * UR Completion would have been sent.
1697 			 */
1698 			if (pf_matched_in_rc(dq_head_p, pfd_p,
1699 			    PCI_STAT_R_MAST_AB)) {
1700 				hdl_sts = PF_HDL_FOUND;
1701 				err = PF_ERR_MATCHED_RC;
1702 				goto done;
1703 			}
1704 			hdl_sts = pf_log_hdl_lookup(rpdip, derr, pfd_p,
1705 			    B_FALSE);
1706 			break;
1707 		case PCI_PCIX_CMD_SPL:
1708 			hdl_sts = pf_log_hdl_lookup(rpdip, derr, pfd_p,
1709 			    B_FALSE);
1710 			break;
1711 		case PCI_PCIX_CMD_DADR:
1712 			cmd = (PCIE_ADV_BDG_HDR(pfd_p, 1) >>
1713 			    PCIE_AER_SUCE_HDR_CMD_UP_SHIFT) &
1714 			    PCIE_AER_SUCE_HDR_CMD_UP_MASK;
1715 			if (cmd != PCI_PCIX_CMD_DADR)
1716 				goto cmd_switch;
1717 			/* FALLTHROUGH */
1718 		default:
1719 			/* Unexpected situation, panic */
1720 			hdl_sts = PF_HDL_NOTFOUND;
1721 		}
1722 
1723 		if (hdl_sts == PF_HDL_FOUND)
1724 			err = PF_ERR_MATCHED_DEVICE;
1725 		else
1726 			err = PF_ERR_PANIC;
1727 	} else {
1728 		/*
1729 		 * Check to see if it is a non-posted write.  If so, a UR
1730 		 * Completion would have been sent.
1731 		 */
1732 		if ((PCIE_ERR_REG(pfd_p)->pcie_err_status &
1733 		    PCIE_DEVSTS_UR_DETECTED) &&
1734 		    pf_matched_in_rc(dq_head_p, pfd_p, PCI_STAT_R_MAST_AB))
1735 			err = PF_ERR_MATCHED_RC;
1736 
1737 		/* Check for posted writes.  Transaction is lost. */
1738 		if (PCI_BDG_ERR_REG(pfd_p)->pci_bdg_sec_stat &
1739 		    PCI_STAT_S_PERROR)
1740 			err = PF_ERR_PANIC;
1741 
1742 		/*
1743 		 * All other scenarios are due to read completions.  Check for
1744 		 * PERR on the primary side.  If found the primary side error
1745 		 * handling will take care of this error.
1746 		 */
1747 		if (err == PF_ERR_NO_ERROR) {
1748 			if (PCI_ERR_REG(pfd_p)->pci_err_status &
1749 			    PCI_STAT_PERROR)
1750 				err = PF_ERR_MATCHED_PARENT;
1751 			else
1752 				err = PF_ERR_PANIC;
1753 		}
1754 	}
1755 
1756 done:
1757 	return (err);
1758 }
1759 
1760 /*
1761  * PCIe Poisoned TLP error analyser.  If a PCIe device receives a Poisoned TLP,
1762  * check the logs and see if an associated handler for this transaction can be
1763  * found.
1764  */
1765 /* ARGSUSED */
1766 static int
1767 pf_analyse_ptlp(ddi_fm_error_t *derr, uint32_t bit, pf_data_t *dq_head_p,
1768     pf_data_t *pfd_p)
1769 {
1770 	dev_info_t	*rpdip = PCIE_PFD2BUS(pfd_p)->bus_rp_dip;
1771 
1772 	/*
1773 	 * If AERs are supported find the logs in this device, otherwise look in
1774 	 * it's parent's logs.
1775 	 */
1776 	if (HAS_AER_LOGS(pfd_p, bit)) {
1777 		pcie_tlp_hdr_t *hdr = (pcie_tlp_hdr_t *)&PCIE_ADV_HDR(pfd_p, 0);
1778 
1779 		/*
1780 		 * Double check that the log contains a poisoned TLP.
1781 		 * Some devices like PLX switch do not log poison TLP headers.
1782 		 */
1783 		if (hdr->ep) {
1784 			if (pf_log_hdl_lookup(rpdip, derr, pfd_p, B_TRUE) ==
1785 			    PF_HDL_FOUND)
1786 				return (PF_ERR_MATCHED_DEVICE);
1787 		}
1788 
1789 		/*
1790 		 * If an address is found and hdl lookup failed panic.
1791 		 * Otherwise check parents to see if there was enough
1792 		 * information recover.
1793 		 */
1794 		if (PCIE_ADV_REG(pfd_p)->pcie_ue_tgt_addr)
1795 			return (PF_ERR_PANIC);
1796 	}
1797 
1798 	/*
1799 	 * Check to see if the rc has already handled this error or a parent has
1800 	 * already handled this error.
1801 	 *
1802 	 * If the error info in the RC wasn't enough to find the fault device,
1803 	 * such as if the faulting device lies behind a PCIe-PCI bridge from a
1804 	 * poisoned completion, check to see if the PCIe-PCI bridge has enough
1805 	 * info to recover.  For completion TLP's, the AER header logs only
1806 	 * contain the faulting BDF in the Root Port.  For PCIe device the fault
1807 	 * BDF is the fault device.  But if the fault device is behind a
1808 	 * PCIe-PCI bridge the fault BDF could turn out just to be a PCIe-PCI
1809 	 * bridge's secondary bus number.
1810 	 */
1811 	if (!PFD_IS_ROOT(pfd_p)) {
1812 		dev_info_t *pdip = ddi_get_parent(PCIE_PFD2DIP(pfd_p));
1813 		pf_data_t *parent_pfd_p;
1814 
1815 		if (PCIE_PFD2BUS(pfd_p)->bus_rp_dip == pdip) {
1816 			if (pf_matched_in_rc(dq_head_p, pfd_p, PCI_STAT_PERROR))
1817 				return (PF_ERR_MATCHED_RC);
1818 		}
1819 
1820 		parent_pfd_p = PCIE_DIP2PFD(pdip);
1821 
1822 		if (HAS_AER_LOGS(parent_pfd_p, bit))
1823 			return (PF_ERR_MATCHED_PARENT);
1824 	} else {
1825 		pf_data_t *bdg_pfd_p;
1826 		pcie_req_id_t secbus;
1827 
1828 		/*
1829 		 * Looking for a pcie bridge only makes sense if the BDF
1830 		 * Dev/Func = 0/0
1831 		 */
1832 		if (!PCIE_HAS_AER(PCIE_PFD2BUS(pfd_p)))
1833 			goto done;
1834 
1835 		secbus = PCIE_ADV_REG(pfd_p)->pcie_ue_tgt_bdf;
1836 
1837 		if (!PCIE_CHECK_VALID_BDF(secbus) || (secbus & 0xFF))
1838 			goto done;
1839 
1840 		bdg_pfd_p = pf_get_pcie_bridge(pfd_p, secbus);
1841 
1842 		if (bdg_pfd_p && HAS_SAER_LOGS(bdg_pfd_p,
1843 		    PCIE_AER_SUCE_PERR_ASSERT)) {
1844 			return pf_analyse_perr_assert(derr,
1845 			    PCIE_AER_SUCE_PERR_ASSERT, dq_head_p, pfd_p);
1846 		}
1847 	}
1848 done:
1849 	return (PF_ERR_PANIC);
1850 }
1851 
1852 /*
1853  * PCIe-PCI Bridge Received Master and Target abort error analyser on Split
1854  * Completions.  If a PCIe Bridge receives a MA/TA check logs and see if an
1855  * associated handler for this transaction can be found.
1856  */
1857 /* ARGSUSED */
1858 static int
1859 pf_analyse_sc(ddi_fm_error_t *derr, uint32_t bit, pf_data_t *dq_head_p,
1860     pf_data_t *pfd_p)
1861 {
1862 	dev_info_t	*rpdip = PCIE_PFD2BUS(pfd_p)->bus_rp_dip;
1863 	uint16_t	cmd;
1864 	int		sts = PF_HDL_NOTFOUND;
1865 
1866 	if (!HAS_SAER_LOGS(pfd_p, bit))
1867 		return (PF_ERR_PANIC);
1868 
1869 	if (pf_pci_decode(pfd_p, &cmd) != DDI_SUCCESS)
1870 		return (PF_ERR_PANIC);
1871 
1872 	if (cmd == PCI_PCIX_CMD_SPL)
1873 		sts = pf_log_hdl_lookup(rpdip, derr, pfd_p, B_FALSE);
1874 
1875 	if (sts == PF_HDL_FOUND)
1876 		return (PF_ERR_MATCHED_DEVICE);
1877 
1878 	return (PF_ERR_PANIC);
1879 }
1880 
1881 /*
1882  * PCIe Timeout error analyser.  This error can be forgiven if it is marked as
1883  * CE Advisory.  If it is marked as advisory, this means the HW can recover
1884  * and/or retry the transaction automatically.
1885  */
1886 /* ARGSUSED */
1887 static int
1888 pf_analyse_to(ddi_fm_error_t *derr, uint32_t bit, pf_data_t *dq_head_p,
1889     pf_data_t *pfd_p)
1890 {
1891 	if (HAS_AER_LOGS(pfd_p, bit) && CE_ADVISORY(pfd_p))
1892 		return (PF_ERR_NO_PANIC);
1893 
1894 	return (PF_ERR_PANIC);
1895 }
1896 
1897 /*
1898  * PCIe Unexpected Completion.  Check to see if this TLP was misrouted by
1899  * matching the device BDF with the TLP Log.  If misrouting panic, otherwise
1900  * don't panic.
1901  */
1902 /* ARGSUSED */
1903 static int
1904 pf_analyse_uc(ddi_fm_error_t *derr, uint32_t bit, pf_data_t *dq_head_p,
1905     pf_data_t *pfd_p)
1906 {
1907 	if (HAS_AER_LOGS(pfd_p, bit) &&
1908 	    (PCIE_PFD2BUS(pfd_p)->bus_bdf == (PCIE_ADV_HDR(pfd_p, 2) >> 16)))
1909 		return (PF_ERR_NO_PANIC);
1910 
1911 	/*
1912 	 * This is a case of mis-routing. Any of the switches above this
1913 	 * device could be at fault.
1914 	 */
1915 	PFD_AFFECTED_DEV(pfd_p)->pe_affected_flags = PF_AFFECTED_ROOT;
1916 
1917 	return (PF_ERR_PANIC);
1918 }
1919 
1920 /*
1921  * PCIe-PCI Bridge Uncorrectable Data error analyser.  All Uncorrectable Data
1922  * errors should have resulted in a PCIe Poisoned TLP to the RC, except for
1923  * Posted Writes.  Check the logs for Posted Writes and if the RC did not see a
1924  * Poisoned TLP.
1925  *
1926  * Non-Posted Writes will also generate a UR in the completion status, which the
1927  * RC should also see.
1928  */
1929 /* ARGSUSED */
1930 static int
1931 pf_analyse_uc_data(ddi_fm_error_t *derr, uint32_t bit, pf_data_t *dq_head_p,
1932     pf_data_t *pfd_p)
1933 {
1934 	dev_info_t	*rpdip = PCIE_PFD2BUS(pfd_p)->bus_rp_dip;
1935 
1936 	if (!HAS_SAER_LOGS(pfd_p, bit))
1937 		return (PF_ERR_PANIC);
1938 
1939 	if (pf_matched_in_rc(dq_head_p, pfd_p, PCI_STAT_PERROR))
1940 		return (PF_ERR_MATCHED_RC);
1941 
1942 	if (pf_log_hdl_lookup(rpdip, derr, pfd_p, B_FALSE) == PF_HDL_FOUND)
1943 		return (PF_ERR_MATCHED_DEVICE);
1944 
1945 	return (PF_ERR_PANIC);
1946 }
1947 
1948 /* ARGSUSED */
1949 static int
1950 pf_no_panic(ddi_fm_error_t *derr, uint32_t bit, pf_data_t *dq_head_p,
1951     pf_data_t *pfd_p)
1952 {
1953 	return (PF_ERR_NO_PANIC);
1954 }
1955 
1956 /* ARGSUSED */
1957 static int
1958 pf_panic(ddi_fm_error_t *derr, uint32_t bit, pf_data_t *dq_head_p,
1959     pf_data_t *pfd_p)
1960 {
1961 	return (PF_ERR_PANIC);
1962 }
1963 
1964 /*
1965  * If a PCIe device does not support AER, assume all AER statuses have been set,
1966  * unless other registers do not indicate a certain error occuring.
1967  */
1968 static void
1969 pf_adjust_for_no_aer(pf_data_t *pfd_p)
1970 {
1971 	uint32_t	aer_ue = 0;
1972 	uint16_t	status;
1973 
1974 	if (PCIE_HAS_AER(PCIE_PFD2BUS(pfd_p)))
1975 		return;
1976 
1977 	if (PCIE_ERR_REG(pfd_p)->pcie_err_status & PCIE_DEVSTS_FE_DETECTED)
1978 		aer_ue = PF_AER_FATAL_ERR;
1979 
1980 	if (PCIE_ERR_REG(pfd_p)->pcie_err_status & PCIE_DEVSTS_NFE_DETECTED) {
1981 		aer_ue = PF_AER_NON_FATAL_ERR;
1982 		status = PCI_ERR_REG(pfd_p)->pci_err_status;
1983 
1984 		/* Check if the device received a PTLP */
1985 		if (!(status & PCI_STAT_PERROR))
1986 			aer_ue &= ~PCIE_AER_UCE_PTLP;
1987 
1988 		/* Check if the device signaled a CA */
1989 		if (!(status & PCI_STAT_S_TARG_AB))
1990 			aer_ue &= ~PCIE_AER_UCE_CA;
1991 
1992 		/* Check if the device sent a UR */
1993 		if (!(PCIE_ERR_REG(pfd_p)->pcie_err_status &
1994 		    PCIE_DEVSTS_UR_DETECTED))
1995 			aer_ue &= ~PCIE_AER_UCE_UR;
1996 
1997 		/*
1998 		 * Ignore ECRCs as it is optional and will manefest itself as
1999 		 * another error like PTLP and MFP
2000 		 */
2001 		aer_ue &= ~PCIE_AER_UCE_ECRC;
2002 
2003 		/*
2004 		 * Generally if NFE is set, SERR should also be set. Exception:
2005 		 * When certain non-fatal errors are masked, and some of them
2006 		 * happened to be the cause of the NFE, SERR will not be set and
2007 		 * they can not be the source of this interrupt.
2008 		 *
2009 		 * On x86, URs are masked (NFE + UR can be set), if any other
2010 		 * non-fatal errors (i.e, PTLP, CTO, CA, UC, ECRC, ACS) did
2011 		 * occur, SERR should be set since they are not masked. So if
2012 		 * SERR is not set, none of them occurred.
2013 		 */
2014 		if (!(status & PCI_STAT_S_SYSERR))
2015 			aer_ue &= ~PCIE_AER_UCE_TO;
2016 	}
2017 
2018 	if (!PCIE_IS_BDG(PCIE_PFD2BUS(pfd_p))) {
2019 		aer_ue &= ~PCIE_AER_UCE_TRAINING;
2020 		aer_ue &= ~PCIE_AER_UCE_SD;
2021 	}
2022 
2023 	PCIE_ADV_REG(pfd_p)->pcie_ue_status = aer_ue;
2024 }
2025 
2026 static void
2027 pf_adjust_for_no_saer(pf_data_t *pfd_p)
2028 {
2029 	uint32_t	s_aer_ue = 0;
2030 	uint16_t	status;
2031 
2032 	if (PCIE_HAS_AER(PCIE_PFD2BUS(pfd_p)))
2033 		return;
2034 
2035 	if (PCIE_ERR_REG(pfd_p)->pcie_err_status & PCIE_DEVSTS_FE_DETECTED)
2036 		s_aer_ue = PF_SAER_FATAL_ERR;
2037 
2038 	if (PCIE_ERR_REG(pfd_p)->pcie_err_status & PCIE_DEVSTS_NFE_DETECTED) {
2039 		s_aer_ue = PF_SAER_NON_FATAL_ERR;
2040 		status = PCI_BDG_ERR_REG(pfd_p)->pci_bdg_sec_stat;
2041 
2042 		/* Check if the device received a UC_DATA */
2043 		if (!(status & PCI_STAT_PERROR))
2044 			s_aer_ue &= ~PCIE_AER_SUCE_UC_DATA_ERR;
2045 
2046 		/* Check if the device received a RCVD_MA/MA_ON_SC */
2047 		if (!(status & (PCI_STAT_R_MAST_AB))) {
2048 			s_aer_ue &= ~PCIE_AER_SUCE_RCVD_MA;
2049 			s_aer_ue &= ~PCIE_AER_SUCE_MA_ON_SC;
2050 		}
2051 
2052 		/* Check if the device received a RCVD_TA/TA_ON_SC */
2053 		if (!(status & (PCI_STAT_R_TARG_AB))) {
2054 			s_aer_ue &= ~PCIE_AER_SUCE_RCVD_TA;
2055 			s_aer_ue &= ~PCIE_AER_SUCE_TA_ON_SC;
2056 		}
2057 	}
2058 
2059 	PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_status = s_aer_ue;
2060 }
2061 
2062 /* Find the PCIe-PCI bridge based on secondary bus number */
2063 static pf_data_t *
2064 pf_get_pcie_bridge(pf_data_t *pfd_p, pcie_req_id_t secbus)
2065 {
2066 	pf_data_t *bdg_pfd_p;
2067 
2068 	/* Search down for the PCIe-PCI device. */
2069 	for (bdg_pfd_p = pfd_p->pe_next; bdg_pfd_p;
2070 	    bdg_pfd_p = bdg_pfd_p->pe_next) {
2071 		if (PCIE_IS_PCIE_BDG(PCIE_PFD2BUS(bdg_pfd_p)) &&
2072 		    PCIE_PFD2BUS(bdg_pfd_p)->bus_bdg_secbus == secbus)
2073 			return (bdg_pfd_p);
2074 	}
2075 
2076 	return (NULL);
2077 }
2078 
2079 /* Find the PCIe-PCI bridge of a PCI device */
2080 static pf_data_t *
2081 pf_get_parent_pcie_bridge(pf_data_t *pfd_p)
2082 {
2083 	dev_info_t	*dip, *rp_dip = PCIE_PFD2BUS(pfd_p)->bus_rp_dip;
2084 
2085 	/* This only makes sense if the device is a PCI device */
2086 	if (!PCIE_IS_PCI(PCIE_PFD2BUS(pfd_p)))
2087 		return (NULL);
2088 
2089 	/*
2090 	 * Search up for the PCIe-PCI device.  Watchout for x86 where pci
2091 	 * devices hang directly off of NPE.
2092 	 */
2093 	for (dip = PCIE_PFD2DIP(pfd_p); dip; dip = ddi_get_parent(dip)) {
2094 		if (dip == rp_dip)
2095 			dip = NULL;
2096 
2097 		if (PCIE_IS_PCIE_BDG(PCIE_DIP2BUS(dip)))
2098 			return (PCIE_DIP2PFD(dip));
2099 	}
2100 
2101 	return (NULL);
2102 }
2103 
2104 /*
2105  * See if a leaf error was bubbled up to the Root Complex (RC) and handled.
2106  * As of right now only RC's have enough information to have errors found in the
2107  * fabric to be matched to the RC.  Note that Root Port's (RP) do not carry
2108  * enough information.  Currently known RC's are SPARC Fire architecture and
2109  * it's equivalents, and x86's NPE.
2110  * SPARC Fire architectures have a plethora of error registers, while currently
2111  * NPE only have the address of a failed load.
2112  *
2113  * Check if the RC logged an error with the appropriate status type/abort type.
2114  * Ex: Parity Error, Received Master/Target Abort
2115  * Check if either the fault address found in the rc matches the device's
2116  * assigned address range (PIO's only) or the fault BDF in the rc matches the
2117  * device's BDF or Secondary Bus/Bus Range.
2118  */
2119 static boolean_t
2120 pf_matched_in_rc(pf_data_t *dq_head_p, pf_data_t *pfd_p,
2121     uint32_t abort_type)
2122 {
2123 	pcie_bus_t	*bus_p = PCIE_PFD2BUS(pfd_p);
2124 	pf_data_t	*rc_pfd_p;
2125 	pcie_req_id_t	fault_bdf;
2126 
2127 	for (rc_pfd_p = dq_head_p; PFD_IS_ROOT(rc_pfd_p);
2128 	    rc_pfd_p = rc_pfd_p->pe_next) {
2129 		/* Only root complex's have enough information to match */
2130 		if (!PCIE_IS_RC(PCIE_PFD2BUS(rc_pfd_p)))
2131 			continue;
2132 
2133 		/* If device and rc abort type does not match continue */
2134 		if (!(PCI_BDG_ERR_REG(rc_pfd_p)->pci_bdg_sec_stat & abort_type))
2135 			continue;
2136 
2137 		fault_bdf = PCIE_ROOT_FAULT(rc_pfd_p)->scan_bdf;
2138 
2139 		/* The Fault BDF = Device's BDF */
2140 		if (fault_bdf == bus_p->bus_bdf)
2141 			return (B_TRUE);
2142 
2143 		/* The Fault Addr is in device's address range */
2144 		if (pf_in_addr_range(bus_p,
2145 		    PCIE_ROOT_FAULT(rc_pfd_p)->scan_addr))
2146 			return (B_TRUE);
2147 
2148 		/* The Fault BDF is from PCIe-PCI Bridge's secondary bus */
2149 		if (PCIE_IS_PCIE_BDG(bus_p) &&
2150 		    pf_in_bus_range(bus_p, fault_bdf))
2151 			return (B_TRUE);
2152 	}
2153 
2154 	return (B_FALSE);
2155 }
2156 
2157 /*
2158  * Check the RP and see if the error is PIO/DMA.  If the RP also has a PERR then
2159  * it is a DMA, otherwise it's a PIO
2160  */
2161 static void
2162 pf_pci_find_trans_type(pf_data_t *pfd_p, uint64_t *addr, uint32_t *trans_type,
2163     pcie_req_id_t *bdf) {
2164 	pf_data_t *rc_pfd_p;
2165 
2166 	/* Could be DMA or PIO.  Find out by look at error type. */
2167 	switch (PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_status) {
2168 	case PCIE_AER_SUCE_TA_ON_SC:
2169 	case PCIE_AER_SUCE_MA_ON_SC:
2170 		*trans_type = PF_ADDR_DMA;
2171 		return;
2172 	case PCIE_AER_SUCE_RCVD_TA:
2173 	case PCIE_AER_SUCE_RCVD_MA:
2174 		*bdf = PCIE_INVALID_BDF;
2175 		*trans_type = PF_ADDR_PIO;
2176 		return;
2177 	case PCIE_AER_SUCE_USC_ERR:
2178 	case PCIE_AER_SUCE_UC_DATA_ERR:
2179 	case PCIE_AER_SUCE_PERR_ASSERT:
2180 		break;
2181 	default:
2182 		*addr = 0;
2183 		*bdf = PCIE_INVALID_BDF;
2184 		*trans_type = 0;
2185 		return;
2186 	}
2187 
2188 	*bdf = PCIE_INVALID_BDF;
2189 	*trans_type = PF_ADDR_PIO;
2190 	for (rc_pfd_p = pfd_p->pe_prev; rc_pfd_p;
2191 	    rc_pfd_p = rc_pfd_p->pe_prev) {
2192 		if (PFD_IS_ROOT(rc_pfd_p) &&
2193 		    (PCI_BDG_ERR_REG(rc_pfd_p)->pci_bdg_sec_stat &
2194 		    PCI_STAT_PERROR)) {
2195 			*trans_type = PF_ADDR_DMA;
2196 			return;
2197 		}
2198 	}
2199 }
2200 
2201 /*
2202  * pf_pci_decode function decodes the secondary aer transaction logs in
2203  * PCIe-PCI bridges.
2204  *
2205  * The log is 128 bits long and arranged in this manner.
2206  * [0:35]   Transaction Attribute	(s_aer_h0-saer_h1)
2207  * [36:39]  Transaction lower command	(saer_h1)
2208  * [40:43]  Transaction upper command	(saer_h1)
2209  * [44:63]  Reserved
2210  * [64:127] Address			(saer_h2-saer_h3)
2211  */
2212 /* ARGSUSED */
2213 int
2214 pf_pci_decode(pf_data_t *pfd_p, uint16_t *cmd) {
2215 	pcix_attr_t	*attr;
2216 	uint64_t	addr;
2217 	uint32_t	trans_type;
2218 	pcie_req_id_t	bdf = PCIE_INVALID_BDF;
2219 
2220 	attr = (pcix_attr_t *)&PCIE_ADV_BDG_HDR(pfd_p, 0);
2221 	*cmd = GET_SAER_CMD(pfd_p);
2222 
2223 cmd_switch:
2224 	switch (*cmd) {
2225 	case PCI_PCIX_CMD_IORD:
2226 	case PCI_PCIX_CMD_IOWR:
2227 		/* IO Access should always be down stream */
2228 		addr = PCIE_ADV_BDG_HDR(pfd_p, 2);
2229 		bdf = attr->rid;
2230 		trans_type = PF_ADDR_PIO;
2231 		break;
2232 	case PCI_PCIX_CMD_MEMRD_DW:
2233 	case PCI_PCIX_CMD_MEMRD_BL:
2234 	case PCI_PCIX_CMD_MEMRDBL:
2235 	case PCI_PCIX_CMD_MEMWR:
2236 	case PCI_PCIX_CMD_MEMWR_BL:
2237 	case PCI_PCIX_CMD_MEMWRBL:
2238 		addr = ((uint64_t)PCIE_ADV_BDG_HDR(pfd_p, 3) <<
2239 		    PCIE_AER_SUCE_HDR_ADDR_SHIFT) | PCIE_ADV_BDG_HDR(pfd_p, 2);
2240 		bdf = attr->rid;
2241 
2242 		pf_pci_find_trans_type(pfd_p, &addr, &trans_type, &bdf);
2243 		break;
2244 	case PCI_PCIX_CMD_CFRD:
2245 	case PCI_PCIX_CMD_CFWR:
2246 		/*
2247 		 * CFG Access should always be down stream.  Match the BDF in
2248 		 * the address phase.
2249 		 */
2250 		addr = 0;
2251 		bdf = attr->rid;
2252 		trans_type = PF_ADDR_CFG;
2253 		break;
2254 	case PCI_PCIX_CMD_SPL:
2255 		/*
2256 		 * Check for DMA read completions.  The requesting BDF is in the
2257 		 * Address phase.
2258 		 */
2259 		addr = 0;
2260 		bdf = attr->rid;
2261 		trans_type = PF_ADDR_DMA;
2262 		break;
2263 	case PCI_PCIX_CMD_DADR:
2264 		/*
2265 		 * For Dual Address Cycles the transaction command is in the 2nd
2266 		 * address phase.
2267 		 */
2268 		*cmd = (PCIE_ADV_BDG_HDR(pfd_p, 1) >>
2269 		    PCIE_AER_SUCE_HDR_CMD_UP_SHIFT) &
2270 		    PCIE_AER_SUCE_HDR_CMD_UP_MASK;
2271 		if (*cmd != PCI_PCIX_CMD_DADR)
2272 			goto cmd_switch;
2273 		/* FALLTHROUGH */
2274 	default:
2275 		PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_trans = 0;
2276 		PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_bdf = PCIE_INVALID_BDF;
2277 		PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_addr = 0;
2278 		return (DDI_FAILURE);
2279 	}
2280 	PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_trans = trans_type;
2281 	PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_bdf = bdf;
2282 	PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_addr = addr;
2283 	return (DDI_SUCCESS);
2284 }
2285 
2286 /*
2287  * Based on either the BDF/ADDR find and mark the faulting DMA/ACC handler.
2288  * Returns either PF_HDL_NOTFOUND or PF_HDL_FOUND.
2289  */
2290 int
2291 pf_hdl_lookup(dev_info_t *dip, uint64_t ena, uint32_t flag, uint64_t addr,
2292     pcie_req_id_t bdf)
2293 {
2294 	ddi_fm_error_t		derr;
2295 
2296 	/* If we don't know the addr or rid just return with NOTFOUND */
2297 	if ((addr == NULL) && !PCIE_CHECK_VALID_BDF(bdf))
2298 		return (PF_HDL_NOTFOUND);
2299 
2300 	if (!(flag & (PF_ADDR_DMA | PF_ADDR_PIO | PF_ADDR_CFG))) {
2301 		return (PF_HDL_NOTFOUND);
2302 	}
2303 
2304 	bzero(&derr, sizeof (ddi_fm_error_t));
2305 	derr.fme_version = DDI_FME_VERSION;
2306 	derr.fme_flag = DDI_FM_ERR_UNEXPECTED;
2307 	derr.fme_ena = ena;
2308 
2309 	return (pf_hdl_child_lookup(dip, &derr, flag, addr, bdf));
2310 }
2311 
2312 static int
2313 pf_hdl_child_lookup(dev_info_t *dip, ddi_fm_error_t *derr, uint32_t flag,
2314     uint64_t addr, pcie_req_id_t bdf)
2315 {
2316 	int			status = PF_HDL_NOTFOUND;
2317 	ndi_fmc_t		*fcp = NULL;
2318 	struct i_ddi_fmhdl	*fmhdl = DEVI(dip)->devi_fmhdl;
2319 	pcie_req_id_t		dip_bdf;
2320 	boolean_t		have_lock = B_FALSE;
2321 	pcie_bus_t		*bus_p;
2322 	dev_info_t		*cdip;
2323 
2324 	if (!(bus_p = pf_is_ready(dip))) {
2325 		return (status);
2326 	}
2327 
2328 	ASSERT(fmhdl);
2329 	if (!i_ddi_fm_handler_owned(dip)) {
2330 		/*
2331 		 * pf_handler_enter always returns SUCCESS if the 'impl' arg is
2332 		 * NULL.
2333 		 */
2334 		(void) pf_handler_enter(dip, NULL);
2335 		have_lock = B_TRUE;
2336 	}
2337 
2338 	dip_bdf = PCI_GET_BDF(dip);
2339 
2340 	/* Check if dip and BDF match, if not recurse to it's children. */
2341 	if (!PCIE_IS_RC(bus_p) && (!PCIE_CHECK_VALID_BDF(bdf) ||
2342 	    dip_bdf == bdf)) {
2343 		if ((flag & PF_ADDR_DMA) && DDI_FM_DMA_ERR_CAP(fmhdl->fh_cap))
2344 			fcp = fmhdl->fh_dma_cache;
2345 		else
2346 			fcp = NULL;
2347 
2348 		if (fcp)
2349 			status = pf_hdl_compare(dip, derr, DMA_HANDLE, addr,
2350 			    bdf, fcp);
2351 
2352 
2353 		if (((flag & PF_ADDR_PIO) || (flag & PF_ADDR_CFG)) &&
2354 		    DDI_FM_ACC_ERR_CAP(fmhdl->fh_cap))
2355 			fcp = fmhdl->fh_acc_cache;
2356 		else
2357 			fcp = NULL;
2358 
2359 		if (fcp)
2360 			status = pf_hdl_compare(dip, derr, ACC_HANDLE, addr,
2361 			    bdf, fcp);
2362 	}
2363 
2364 	/* If we found the handler or know it's this device, we're done */
2365 	if (!PCIE_IS_RC(bus_p) && ((dip_bdf == bdf) ||
2366 	    (status == PF_HDL_FOUND)))
2367 		goto done;
2368 
2369 	/*
2370 	 * If the current devuce us a PCIe-PCI bridge need to check for special
2371 	 * cases:
2372 	 *
2373 	 * If it is a PIO and we don't have an address or this is a DMA, check
2374 	 * to see if the BDF = secondary bus.  If so stop.  The BDF isn't a real
2375 	 * BDF and the fault device could have come from any device in the PCI
2376 	 * bus.
2377 	 */
2378 	if (PCIE_IS_PCIE_BDG(bus_p) &&
2379 	    ((flag & PF_ADDR_DMA || flag & PF_ADDR_PIO)) &&
2380 	    ((bus_p->bus_bdg_secbus << PCIE_REQ_ID_BUS_SHIFT) == bdf))
2381 		goto done;
2382 
2383 
2384 	/* If we can't find the handler check it's children */
2385 	for (cdip = ddi_get_child(dip); cdip;
2386 	    cdip = ddi_get_next_sibling(cdip)) {
2387 		if ((bus_p = PCIE_DIP2BUS(cdip)) == NULL)
2388 			continue;
2389 
2390 		if (pf_in_bus_range(bus_p, bdf) ||
2391 		    pf_in_addr_range(bus_p, addr))
2392 			status = pf_hdl_child_lookup(cdip, derr, flag, addr,
2393 			    bdf);
2394 
2395 		if (status == PF_HDL_FOUND)
2396 			goto done;
2397 	}
2398 
2399 done:
2400 	if (have_lock == B_TRUE)
2401 		pf_handler_exit(dip);
2402 
2403 	return (status);
2404 }
2405 
2406 static int
2407 pf_hdl_compare(dev_info_t *dip, ddi_fm_error_t *derr, uint32_t flag,
2408     uint64_t addr, pcie_req_id_t bdf, ndi_fmc_t *fcp) {
2409 	ndi_fmcentry_t	*fep;
2410 	int		found = 0;
2411 	int		status;
2412 
2413 	mutex_enter(&fcp->fc_lock);
2414 	for (fep = fcp->fc_head; fep != NULL; fep = fep->fce_next) {
2415 		ddi_fmcompare_t compare_func;
2416 
2417 		/*
2418 		 * Compare captured error state with handle
2419 		 * resources.  During the comparison and
2420 		 * subsequent error handling, we block
2421 		 * attempts to free the cache entry.
2422 		 */
2423 		compare_func = (flag == ACC_HANDLE) ?
2424 		    i_ddi_fm_acc_err_cf_get((ddi_acc_handle_t)
2425 			fep->fce_resource) :
2426 		    i_ddi_fm_dma_err_cf_get((ddi_dma_handle_t)
2427 			fep->fce_resource);
2428 
2429 		status = compare_func(dip, fep->fce_resource,
2430 			    (void *)&addr, (void *)&bdf);
2431 
2432 		if (status == DDI_FM_NONFATAL) {
2433 			found++;
2434 
2435 			/* Set the error for this resource handle */
2436 			if (flag == ACC_HANDLE) {
2437 				ddi_acc_handle_t ap = fep->fce_resource;
2438 
2439 				i_ddi_fm_acc_err_set(ap, derr->fme_ena, status,
2440 				    DDI_FM_ERR_UNEXPECTED);
2441 				ddi_fm_acc_err_get(ap, derr, DDI_FME_VERSION);
2442 				derr->fme_acc_handle = ap;
2443 			} else {
2444 				ddi_dma_handle_t dp = fep->fce_resource;
2445 
2446 				i_ddi_fm_dma_err_set(dp, derr->fme_ena, status,
2447 				    DDI_FM_ERR_UNEXPECTED);
2448 				ddi_fm_dma_err_get(dp, derr, DDI_FME_VERSION);
2449 				derr->fme_dma_handle = dp;
2450 			}
2451 		}
2452 	}
2453 	mutex_exit(&fcp->fc_lock);
2454 
2455 	/*
2456 	 * If a handler isn't found and we know this is the right device mark
2457 	 * them all failed.
2458 	 */
2459 	if ((addr != NULL) && PCIE_CHECK_VALID_BDF(bdf) && (found == 0)) {
2460 		status = pf_hdl_compare(dip, derr, flag, addr, bdf, fcp);
2461 		if (status == PF_HDL_FOUND)
2462 			found++;
2463 	}
2464 
2465 	return ((found) ? PF_HDL_FOUND : PF_HDL_NOTFOUND);
2466 }
2467 
2468 /*
2469  * Automatically decode AER header logs and does a handling look up based on the
2470  * AER header decoding.
2471  *
2472  * For this function only the Primary/Secondary AER Header Logs need to be valid
2473  * in the pfd (PCIe Fault Data) arg.
2474  *
2475  * Returns either PF_HDL_NOTFOUND or PF_HDL_FOUND.
2476  */
2477 static int
2478 pf_log_hdl_lookup(dev_info_t *rpdip, ddi_fm_error_t *derr, pf_data_t *pfd_p,
2479 	boolean_t is_primary)
2480 {
2481 	int		lookup = PF_HDL_NOTFOUND;
2482 
2483 	if (is_primary) {
2484 		pf_pcie_adv_err_regs_t *reg_p = PCIE_ADV_REG(pfd_p);
2485 		if (pf_tlp_decode(PCIE_PFD2BUS(pfd_p), reg_p) == DDI_SUCCESS) {
2486 			lookup = pf_hdl_lookup(rpdip, derr->fme_ena,
2487 			    reg_p->pcie_ue_tgt_trans,
2488 			    reg_p->pcie_ue_tgt_addr,
2489 			    reg_p->pcie_ue_tgt_bdf);
2490 		}
2491 	} else {
2492 		pf_pcie_adv_bdg_err_regs_t *reg_p = PCIE_ADV_BDG_REG(pfd_p);
2493 		uint16_t cmd;
2494 		if (pf_pci_decode(pfd_p, &cmd) == DDI_SUCCESS) {
2495 			lookup = pf_hdl_lookup(rpdip, derr->fme_ena,
2496 			    reg_p->pcie_sue_tgt_trans,
2497 			    reg_p->pcie_sue_tgt_addr,
2498 			    reg_p->pcie_sue_tgt_bdf);
2499 		}
2500 	}
2501 
2502 	return (lookup);
2503 }
2504 
2505 /*
2506  * Decodes the TLP and returns the BDF of the handler, address and transaction
2507  * type if known.
2508  *
2509  * Types of TLP logs seen in RC, and what to extract:
2510  *
2511  * Memory(DMA) - Requester BDF, address, PF_DMA_ADDR
2512  * Memory(PIO) - address, PF_PIO_ADDR
2513  * CFG - Should not occur and result in UR
2514  * Completion(DMA) - Requester BDF, PF_DMA_ADDR
2515  * Completion(PIO) - Requester BDF, PF_PIO_ADDR
2516  *
2517  * Types of TLP logs seen in SW/Leaf, and what to extract:
2518  *
2519  * Memory(DMA) - Requester BDF, address, PF_DMA_ADDR
2520  * Memory(PIO) - address, PF_PIO_ADDR
2521  * CFG - Destined BDF, address, PF_CFG_ADDR
2522  * Completion(DMA) - Requester BDF, PF_DMA_ADDR
2523  * Completion(PIO) - Requester BDF, PF_PIO_ADDR
2524  *
2525  * The adv_reg_p must be passed in separately for use with SPARC RPs.  A
2526  * SPARC RP could have multiple AER header logs which cannot be directly
2527  * accessed via the bus_p.
2528  */
2529 int
2530 pf_tlp_decode(pcie_bus_t *bus_p, pf_pcie_adv_err_regs_t *adv_reg_p) {
2531 	pcie_tlp_hdr_t	*tlp_hdr = (pcie_tlp_hdr_t *)adv_reg_p->pcie_ue_hdr;
2532 	pcie_req_id_t	my_bdf, tlp_bdf, flt_bdf = PCIE_INVALID_BDF;
2533 	uint64_t	flt_addr = 0;
2534 	uint32_t	flt_trans_type = 0;
2535 
2536 	adv_reg_p->pcie_ue_tgt_addr = 0;
2537 	adv_reg_p->pcie_ue_tgt_bdf = PCIE_INVALID_BDF;
2538 	adv_reg_p->pcie_ue_tgt_trans = 0;
2539 
2540 	my_bdf = bus_p->bus_bdf;
2541 	switch (tlp_hdr->type) {
2542 	case PCIE_TLP_TYPE_IO:
2543 	case PCIE_TLP_TYPE_MEM:
2544 	case PCIE_TLP_TYPE_MEMLK:
2545 		/* Grab the 32/64bit fault address */
2546 		if (tlp_hdr->fmt & 0x1) {
2547 			flt_addr = ((uint64_t)adv_reg_p->pcie_ue_hdr[2] << 32);
2548 			flt_addr |= adv_reg_p->pcie_ue_hdr[3];
2549 		} else {
2550 			flt_addr = adv_reg_p->pcie_ue_hdr[2];
2551 		}
2552 
2553 		tlp_bdf = (pcie_req_id_t)(adv_reg_p->pcie_ue_hdr[1] >> 16);
2554 
2555 		/*
2556 		 * If the req bdf >= this.bdf, then it means the request is this
2557 		 * device or came from a device below it.  Unless this device is
2558 		 * a PCIe root port then it means is a DMA, otherwise PIO.
2559 		 */
2560 		if ((tlp_bdf >= my_bdf) && !PCIE_IS_ROOT(bus_p)) {
2561 			flt_trans_type = PF_ADDR_DMA;
2562 			flt_bdf = tlp_bdf;
2563 		} else if (PCIE_IS_ROOT(bus_p) &&
2564 		    (PF_FIRST_AER_ERR(PCIE_AER_UCE_PTLP, adv_reg_p) ||
2565 			(PF_FIRST_AER_ERR(PCIE_AER_UCE_CA, adv_reg_p)))) {
2566 			flt_trans_type = PF_ADDR_DMA;
2567 			flt_bdf = tlp_bdf;
2568 		} else {
2569 			flt_trans_type = PF_ADDR_PIO;
2570 			flt_bdf = PCIE_INVALID_BDF;
2571 		}
2572 		break;
2573 	case PCIE_TLP_TYPE_CFG0:
2574 	case PCIE_TLP_TYPE_CFG1:
2575 		flt_addr = 0;
2576 		flt_bdf = (pcie_req_id_t)(adv_reg_p->pcie_ue_hdr[2] >> 16);
2577 		flt_trans_type = PF_ADDR_CFG;
2578 		break;
2579 	case PCIE_TLP_TYPE_CPL:
2580 	case PCIE_TLP_TYPE_CPLLK:
2581 	{
2582 		pcie_cpl_t *cpl_tlp = (pcie_cpl_t *)&adv_reg_p->pcie_ue_hdr[1];
2583 
2584 		flt_addr = NULL;
2585 		flt_bdf = cpl_tlp->rid;
2586 
2587 		/*
2588 		 * If the cpl bdf < this.bdf, then it means the request is this
2589 		 * device or came from a device below it.  Unless this device is
2590 		 * a PCIe root port then it means is a DMA, otherwise PIO.
2591 		 */
2592 		if (cpl_tlp->rid > cpl_tlp->cid) {
2593 			flt_trans_type = PF_ADDR_DMA;
2594 		} else {
2595 			flt_trans_type = PF_ADDR_PIO | PF_ADDR_CFG;
2596 		}
2597 		break;
2598 	}
2599 	default:
2600 		return (DDI_FAILURE);
2601 	}
2602 
2603 	adv_reg_p->pcie_ue_tgt_addr = flt_addr;
2604 	adv_reg_p->pcie_ue_tgt_bdf = flt_bdf;
2605 	adv_reg_p->pcie_ue_tgt_trans = flt_trans_type;
2606 
2607 	return (DDI_SUCCESS);
2608 }
2609 
2610 #define	PCIE_EREPORT	DDI_IO_CLASS "." PCI_ERROR_SUBCLASS "." PCIEX_FABRIC
2611 static int
2612 pf_ereport_setup(dev_info_t *dip, uint64_t ena, nvlist_t **ereport,
2613     nvlist_t **detector, errorq_elem_t **eqep)
2614 {
2615 	struct i_ddi_fmhdl *fmhdl = DEVI(dip)->devi_fmhdl;
2616 	char device_path[MAXPATHLEN];
2617 	nv_alloc_t *nva;
2618 
2619 	*eqep = errorq_reserve(fmhdl->fh_errorq);
2620 	if (*eqep == NULL) {
2621 		atomic_add_64(&fmhdl->fh_kstat.fek_erpt_dropped.value.ui64, 1);
2622 		return (DDI_FAILURE);
2623 	}
2624 
2625 	*ereport = errorq_elem_nvl(fmhdl->fh_errorq, *eqep);
2626 	nva = errorq_elem_nva(fmhdl->fh_errorq, *eqep);
2627 
2628 	ASSERT(*ereport);
2629 	ASSERT(nva);
2630 
2631 	/*
2632 	 * Use the dev_path/devid for this device instance.
2633 	 */
2634 	*detector = fm_nvlist_create(nva);
2635 	if (dip == ddi_root_node()) {
2636 		device_path[0] = '/';
2637 		device_path[1] = '\0';
2638 	} else {
2639 		(void) ddi_pathname(dip, device_path);
2640 	}
2641 
2642 	fm_fmri_dev_set(*detector, FM_DEV_SCHEME_VERSION, NULL,
2643 	    device_path, NULL);
2644 
2645 	if (ena == 0)
2646 		ena = fm_ena_generate(0, FM_ENA_FMT1);
2647 
2648 	fm_ereport_set(*ereport, 0, PCIE_EREPORT, ena, *detector, NULL);
2649 
2650 	return (DDI_SUCCESS);
2651 }
2652 
2653 /* ARGSUSED */
2654 static void
2655 pf_ereport_post(dev_info_t *dip, nvlist_t **ereport, nvlist_t **detector,
2656     errorq_elem_t **eqep)
2657 {
2658 	struct i_ddi_fmhdl *fmhdl = DEVI(dip)->devi_fmhdl;
2659 
2660 	errorq_commit(fmhdl->fh_errorq, *eqep, ERRORQ_ASYNC);
2661 }
2662 
2663 static void
2664 pf_send_ereport(ddi_fm_error_t *derr, pf_impl_t *impl)
2665 {
2666 	nvlist_t	*ereport;
2667 	nvlist_t	*detector;
2668 	errorq_elem_t	*eqep;
2669 	pcie_bus_t	*bus_p;
2670 	pf_data_t	*pfd_p;
2671 	uint32_t	total = impl->pf_total;
2672 
2673 	/*
2674 	 * Ereports need to be sent in a top down fashion. The fabric translator
2675 	 * expects the ereports from the Root first. This is needed to tell if
2676 	 * the system contains a PCIe complaint RC/RP.
2677 	 */
2678 	for (pfd_p = impl->pf_dq_head_p; pfd_p; pfd_p = pfd_p->pe_next) {
2679 		bus_p = PCIE_PFD2BUS(pfd_p);
2680 		pfd_p->pe_valid = B_FALSE;
2681 
2682 		if (derr->fme_flag != DDI_FM_ERR_UNEXPECTED ||
2683 		    PFD_IS_RC(pfd_p) ||
2684 		    !DDI_FM_EREPORT_CAP(ddi_fm_capable(PCIE_PFD2DIP(pfd_p))))
2685 			continue;
2686 
2687 		if (pf_ereport_setup(PCIE_BUS2DIP(bus_p), derr->fme_ena,
2688 		    &ereport, &detector, &eqep) != DDI_SUCCESS)
2689 			continue;
2690 
2691 		/* Generic PCI device information */
2692 		fm_payload_set(ereport,
2693 		    "bdf", DATA_TYPE_UINT16, bus_p->bus_bdf,
2694 		    "device_id", DATA_TYPE_UINT16,
2695 		    (bus_p->bus_dev_ven_id >> 16),
2696 		    "vendor_id", DATA_TYPE_UINT16,
2697 		    (bus_p->bus_dev_ven_id & 0xFFFF),
2698 		    "rev_id", DATA_TYPE_UINT8, bus_p->bus_rev_id,
2699 		    "dev_type", DATA_TYPE_UINT16, bus_p->bus_dev_type,
2700 		    "pcie_off", DATA_TYPE_UINT16, bus_p->bus_pcie_off,
2701 		    "pcix_off", DATA_TYPE_UINT16, bus_p->bus_pcix_off,
2702 		    "aer_off", DATA_TYPE_UINT16, bus_p->bus_aer_off,
2703 		    "ecc_ver", DATA_TYPE_UINT16, bus_p->bus_ecc_ver,
2704 		    NULL);
2705 
2706 		/* PCI registers */
2707 		fm_payload_set(ereport,
2708 		    "pci_status", DATA_TYPE_UINT16,
2709 		    PCI_ERR_REG(pfd_p)->pci_err_status,
2710 		    "pci_command", DATA_TYPE_UINT16,
2711 		    PCI_ERR_REG(pfd_p)->pci_cfg_comm,
2712 		    NULL);
2713 
2714 		/* PCI bridge registers */
2715 		if (PCIE_IS_BDG(bus_p)) {
2716 			fm_payload_set(ereport,
2717 			    "pci_bdg_sec_status", DATA_TYPE_UINT16,
2718 			    PCI_BDG_ERR_REG(pfd_p)->pci_bdg_sec_stat,
2719 			    "pci_bdg_ctrl", DATA_TYPE_UINT16,
2720 			    PCI_BDG_ERR_REG(pfd_p)->pci_bdg_ctrl,
2721 			    NULL);
2722 		}
2723 
2724 		/* PCIx registers */
2725 		if (PCIE_IS_PCIX(bus_p) && !PCIE_IS_BDG(bus_p)) {
2726 			fm_payload_set(ereport,
2727 			    "pcix_status", DATA_TYPE_UINT32,
2728 			    PCIX_ERR_REG(pfd_p)->pcix_status,
2729 			    "pcix_command", DATA_TYPE_UINT16,
2730 			    PCIX_ERR_REG(pfd_p)->pcix_command,
2731 			    NULL);
2732 		}
2733 
2734 		/* PCIx ECC Registers */
2735 		if (PCIX_ECC_VERSION_CHECK(bus_p)) {
2736 			pf_pcix_ecc_regs_t *ecc_bdg_reg;
2737 			pf_pcix_ecc_regs_t *ecc_reg;
2738 
2739 			if (PCIE_IS_BDG(bus_p))
2740 				ecc_bdg_reg = PCIX_BDG_ECC_REG(pfd_p, 0);
2741 			ecc_reg = PCIX_ECC_REG(pfd_p);
2742 			fm_payload_set(ereport,
2743 			    "pcix_ecc_control_0", DATA_TYPE_UINT16,
2744 			    PCIE_IS_BDG(bus_p) ?
2745 			    (ecc_bdg_reg->pcix_ecc_ctlstat >> 16) :
2746 			    (ecc_reg->pcix_ecc_ctlstat >> 16),
2747 			    "pcix_ecc_status_0", DATA_TYPE_UINT16,
2748 			    PCIE_IS_BDG(bus_p) ?
2749 			    (ecc_bdg_reg->pcix_ecc_ctlstat & 0xFFFF) :
2750 			    (ecc_reg->pcix_ecc_ctlstat & 0xFFFF),
2751 			    "pcix_ecc_fst_addr_0", DATA_TYPE_UINT32,
2752 			    PCIE_IS_BDG(bus_p) ?
2753 			    ecc_bdg_reg->pcix_ecc_fstaddr :
2754 			    ecc_reg->pcix_ecc_fstaddr,
2755 			    "pcix_ecc_sec_addr_0", DATA_TYPE_UINT32,
2756 			    PCIE_IS_BDG(bus_p) ?
2757 			    ecc_bdg_reg->pcix_ecc_secaddr :
2758 			    ecc_reg->pcix_ecc_secaddr,
2759 			    "pcix_ecc_attr_0", DATA_TYPE_UINT32,
2760 			    PCIE_IS_BDG(bus_p) ?
2761 			    ecc_bdg_reg->pcix_ecc_attr :
2762 			    ecc_reg->pcix_ecc_attr,
2763 			    NULL);
2764 		}
2765 
2766 		/* PCIx ECC Bridge Registers */
2767 		if (PCIX_ECC_VERSION_CHECK(bus_p) && PCIE_IS_BDG(bus_p)) {
2768 			pf_pcix_ecc_regs_t *ecc_bdg_reg;
2769 
2770 			ecc_bdg_reg = PCIX_BDG_ECC_REG(pfd_p, 1);
2771 			fm_payload_set(ereport,
2772 			    "pcix_ecc_control_1", DATA_TYPE_UINT16,
2773 			    (ecc_bdg_reg->pcix_ecc_ctlstat >> 16),
2774 			    "pcix_ecc_status_1", DATA_TYPE_UINT16,
2775 			    (ecc_bdg_reg->pcix_ecc_ctlstat & 0xFFFF),
2776 			    "pcix_ecc_fst_addr_1", DATA_TYPE_UINT32,
2777 			    ecc_bdg_reg->pcix_ecc_fstaddr,
2778 			    "pcix_ecc_sec_addr_1", DATA_TYPE_UINT32,
2779 			    ecc_bdg_reg->pcix_ecc_secaddr,
2780 			    "pcix_ecc_attr_1", DATA_TYPE_UINT32,
2781 			    ecc_bdg_reg->pcix_ecc_attr,
2782 			    NULL);
2783 		}
2784 
2785 		/* PCIx Bridge */
2786 		if (PCIE_IS_PCIX(bus_p) && PCIE_IS_BDG(bus_p)) {
2787 			fm_payload_set(ereport,
2788 			    "pcix_bdg_status", DATA_TYPE_UINT32,
2789 			    PCIX_BDG_ERR_REG(pfd_p)->pcix_bdg_stat,
2790 			    "pcix_bdg_sec_status", DATA_TYPE_UINT16,
2791 			    PCIX_BDG_ERR_REG(pfd_p)->pcix_bdg_sec_stat,
2792 			    NULL);
2793 		}
2794 
2795 		/* PCIe registers */
2796 		if (PCIE_IS_PCIE(bus_p)) {
2797 			fm_payload_set(ereport,
2798 			    "pcie_status", DATA_TYPE_UINT16,
2799 			    PCIE_ERR_REG(pfd_p)->pcie_err_status,
2800 			    "pcie_command", DATA_TYPE_UINT16,
2801 			    PCIE_ERR_REG(pfd_p)->pcie_err_ctl,
2802 			    "pcie_dev_cap", DATA_TYPE_UINT32,
2803 			    PCIE_ERR_REG(pfd_p)->pcie_dev_cap,
2804 			    NULL);
2805 		}
2806 
2807 		/* PCIe AER registers */
2808 		if (PCIE_HAS_AER(bus_p)) {
2809 			fm_payload_set(ereport,
2810 			    "pcie_adv_ctl", DATA_TYPE_UINT32,
2811 			    PCIE_ADV_REG(pfd_p)->pcie_adv_ctl,
2812 			    "pcie_ue_status", DATA_TYPE_UINT32,
2813 			    PCIE_ADV_REG(pfd_p)->pcie_ue_status,
2814 			    "pcie_ue_mask", DATA_TYPE_UINT32,
2815 			    PCIE_ADV_REG(pfd_p)->pcie_ue_mask,
2816 			    "pcie_ue_sev", DATA_TYPE_UINT32,
2817 			    PCIE_ADV_REG(pfd_p)->pcie_ue_sev,
2818 			    "pcie_ue_hdr0", DATA_TYPE_UINT32,
2819 			    PCIE_ADV_REG(pfd_p)->pcie_ue_hdr[0],
2820 			    "pcie_ue_hdr1", DATA_TYPE_UINT32,
2821 			    PCIE_ADV_REG(pfd_p)->pcie_ue_hdr[1],
2822 			    "pcie_ue_hdr2", DATA_TYPE_UINT32,
2823 			    PCIE_ADV_REG(pfd_p)->pcie_ue_hdr[2],
2824 			    "pcie_ue_hdr3", DATA_TYPE_UINT32,
2825 			    PCIE_ADV_REG(pfd_p)->pcie_ue_hdr[3],
2826 			    "pcie_ce_status", DATA_TYPE_UINT32,
2827 			    PCIE_ADV_REG(pfd_p)->pcie_ce_status,
2828 			    "pcie_ce_mask", DATA_TYPE_UINT32,
2829 			    PCIE_ADV_REG(pfd_p)->pcie_ce_mask,
2830 			    NULL);
2831 		}
2832 
2833 		/* PCIe AER decoded header */
2834 		if (HAS_AER_LOGS(pfd_p, PCIE_ADV_REG(pfd_p)->pcie_ue_status)) {
2835 			fm_payload_set(ereport,
2836 			    "pcie_ue_tgt_trans", DATA_TYPE_UINT32,
2837 			    PCIE_ADV_REG(pfd_p)->pcie_ue_tgt_trans,
2838 			    "pcie_ue_tgt_addr", DATA_TYPE_UINT64,
2839 			    PCIE_ADV_REG(pfd_p)->pcie_ue_tgt_addr,
2840 			    "pcie_ue_tgt_bdf", DATA_TYPE_UINT16,
2841 			    PCIE_ADV_REG(pfd_p)->pcie_ue_tgt_bdf,
2842 			    NULL);
2843 			/* Clear these values as they no longer valid */
2844 			PCIE_ADV_REG(pfd_p)->pcie_ue_tgt_trans = 0;
2845 			PCIE_ADV_REG(pfd_p)->pcie_ue_tgt_addr = 0;
2846 			PCIE_ADV_REG(pfd_p)->pcie_ue_tgt_bdf = PCIE_INVALID_BDF;
2847 		}
2848 
2849 		/* PCIe BDG AER registers */
2850 		if (PCIE_IS_PCIE_BDG(bus_p) && PCIE_HAS_AER(bus_p)) {
2851 			fm_payload_set(ereport,
2852 			    "pcie_sue_adv_ctl", DATA_TYPE_UINT32,
2853 			    PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_ctl,
2854 			    "pcie_sue_status", DATA_TYPE_UINT32,
2855 			    PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_status,
2856 			    "pcie_sue_mask", DATA_TYPE_UINT32,
2857 			    PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_mask,
2858 			    "pcie_sue_sev", DATA_TYPE_UINT32,
2859 			    PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_sev,
2860 			    "pcie_sue_hdr0", DATA_TYPE_UINT32,
2861 			    PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_hdr[0],
2862 			    "pcie_sue_hdr1", DATA_TYPE_UINT32,
2863 			    PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_hdr[1],
2864 			    "pcie_sue_hdr2", DATA_TYPE_UINT32,
2865 			    PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_hdr[2],
2866 			    "pcie_sue_hdr3", DATA_TYPE_UINT32,
2867 			    PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_hdr[3],
2868 			    NULL);
2869 		}
2870 
2871 		/* PCIe BDG AER decoded header */
2872 		if (PCIE_IS_PCIE_BDG(bus_p) && HAS_SAER_LOGS(pfd_p,
2873 		    PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_status)) {
2874 			fm_payload_set(ereport,
2875 			    "pcie_sue_tgt_trans", DATA_TYPE_UINT32,
2876 			    PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_trans,
2877 			    "pcie_sue_tgt_addr", DATA_TYPE_UINT64,
2878 			    PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_addr,
2879 			    "pcie_sue_tgt_bdf", DATA_TYPE_UINT16,
2880 			    PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_bdf,
2881 			    NULL);
2882 			/* Clear these values as they no longer valid */
2883 			PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_trans = 0;
2884 			PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_addr = 0;
2885 			PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_bdf =
2886 			    PCIE_INVALID_BDF;
2887 		}
2888 
2889 		/* PCIe RP registers */
2890 		if (PCIE_IS_RP(bus_p)) {
2891 			fm_payload_set(ereport,
2892 			    "pcie_rp_status", DATA_TYPE_UINT32,
2893 			    PCIE_RP_REG(pfd_p)->pcie_rp_status,
2894 			    "pcie_rp_control", DATA_TYPE_UINT16,
2895 			    PCIE_RP_REG(pfd_p)->pcie_rp_ctl,
2896 			    NULL);
2897 		}
2898 
2899 		/* PCIe RP AER registers */
2900 		if (PCIE_IS_RP(bus_p) && PCIE_HAS_AER(bus_p)) {
2901 			fm_payload_set(ereport,
2902 			    "pcie_adv_rp_status", DATA_TYPE_UINT32,
2903 			    PCIE_ADV_RP_REG(pfd_p)->pcie_rp_err_status,
2904 			    "pcie_adv_rp_command", DATA_TYPE_UINT32,
2905 			    PCIE_ADV_RP_REG(pfd_p)->pcie_rp_err_cmd,
2906 			    "pcie_adv_rp_ce_src_id", DATA_TYPE_UINT16,
2907 			    PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ce_src_id,
2908 			    "pcie_adv_rp_ue_src_id", DATA_TYPE_UINT16,
2909 			    PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ue_src_id,
2910 			    NULL);
2911 		}
2912 
2913 		/* IOV related information */
2914 		if (!PCIE_BDG_IS_UNASSIGNED(PCIE_PFD2BUS(impl->pf_dq_head_p))) {
2915 			fm_payload_set(ereport,
2916 			    "pcie_aff_flags", DATA_TYPE_UINT16,
2917 			    PFD_AFFECTED_DEV(pfd_p)->pe_affected_flags,
2918 			    "pcie_aff_bdf", DATA_TYPE_UINT16,
2919 			    PFD_AFFECTED_DEV(pfd_p)->pe_affected_bdf,
2920 			    "orig_sev", DATA_TYPE_UINT32,
2921 			    pfd_p->pe_orig_severity_flags,
2922 			    NULL);
2923 		}
2924 
2925 		/* Misc ereport information */
2926 		fm_payload_set(ereport,
2927 		    "remainder", DATA_TYPE_UINT32, total--,
2928 		    "severity", DATA_TYPE_UINT32, pfd_p->pe_severity_flags,
2929 		    NULL);
2930 
2931 		pf_ereport_post(PCIE_BUS2DIP(bus_p), &ereport, &detector,
2932 		    &eqep);
2933 	}
2934 
2935 	/* Unlock all the devices in the queue */
2936 	for (pfd_p = impl->pf_dq_tail_p; pfd_p; pfd_p = pfd_p->pe_prev) {
2937 		if (pfd_p->pe_lock) {
2938 			pf_handler_exit(PCIE_PFD2DIP(pfd_p));
2939 		}
2940 	}
2941 }
2942 
2943 /*
2944  * pf_handler_enter must be called to serial access to each device's pf_data_t.
2945  * Once error handling is finished with the device call pf_handler_exit to allow
2946  * other threads to access it.  The same thread may call pf_handler_enter
2947  * several times without any consequences.
2948  *
2949  * The "impl" variable is passed in during scan fabric to double check that
2950  * there is not a recursive algorithm and to ensure only one thread is doing a
2951  * fabric scan at all times.
2952  *
2953  * In some cases "impl" is not available, such as "child lookup" being called
2954  * from outside of scan fabric, just pass in NULL for this variable and this
2955  * extra check will be skipped.
2956  */
2957 static int
2958 pf_handler_enter(dev_info_t *dip, pf_impl_t *impl)
2959 {
2960 	pf_data_t *pfd_p = PCIE_DIP2PFD(dip);
2961 
2962 	ASSERT(pfd_p);
2963 
2964 	/*
2965 	 * Check to see if the lock has already been taken by this
2966 	 * thread.  If so just return and don't take lock again.
2967 	 */
2968 	if (!pfd_p->pe_lock || !impl) {
2969 		i_ddi_fm_handler_enter(dip);
2970 		pfd_p->pe_lock = B_TRUE;
2971 		return (PF_SCAN_SUCCESS);
2972 	}
2973 
2974 	/* Check to see that this dip is already in the "impl" error queue */
2975 	for (pfd_p = impl->pf_dq_head_p; pfd_p; pfd_p = pfd_p->pe_next) {
2976 		if (PCIE_PFD2DIP(pfd_p) == dip) {
2977 			return (PF_SCAN_SUCCESS);
2978 		}
2979 	}
2980 
2981 	return (PF_SCAN_DEADLOCK);
2982 }
2983 
2984 static void
2985 pf_handler_exit(dev_info_t *dip)
2986 {
2987 	pf_data_t *pfd_p = PCIE_DIP2PFD(dip);
2988 
2989 	ASSERT(pfd_p);
2990 
2991 	ASSERT(pfd_p->pe_lock == B_TRUE);
2992 	i_ddi_fm_handler_exit(dip);
2993 	pfd_p->pe_lock = B_FALSE;
2994 }
2995 
2996 /*
2997  * This function calls the driver's callback function (if it's FMA hardened
2998  * and callback capable). This function relies on the current thread already
2999  * owning the driver's fmhdl lock.
3000  */
3001 static int
3002 pf_fm_callback(dev_info_t *dip, ddi_fm_error_t *derr)
3003 {
3004 	int cb_sts = DDI_FM_OK;
3005 
3006 	if (DDI_FM_ERRCB_CAP(ddi_fm_capable(dip))) {
3007 		dev_info_t *pdip = ddi_get_parent(dip);
3008 		struct i_ddi_fmhdl *hdl = DEVI(pdip)->devi_fmhdl;
3009 		struct i_ddi_fmtgt *tgt = hdl->fh_tgts;
3010 		struct i_ddi_errhdl *errhdl;
3011 		while (tgt != NULL) {
3012 			if (dip == tgt->ft_dip) {
3013 				errhdl = tgt->ft_errhdl;
3014 				cb_sts = errhdl->eh_func(dip, derr,
3015 				    errhdl->eh_impl);
3016 				break;
3017 			}
3018 			tgt = tgt->ft_next;
3019 		}
3020 	}
3021 	return (cb_sts);
3022 }
3023 
3024 static void
3025 pf_reset_pfd(pf_data_t *pfd_p)
3026 {
3027 	pcie_bus_t	*bus_p = PCIE_PFD2BUS(pfd_p);
3028 
3029 	pfd_p->pe_severity_flags = 0;
3030 	pfd_p->pe_orig_severity_flags = 0;
3031 	/* pe_lock and pe_valid were reset in pf_send_ereport */
3032 
3033 	PFD_AFFECTED_DEV(pfd_p)->pe_affected_flags = 0;
3034 	PFD_AFFECTED_DEV(pfd_p)->pe_affected_bdf = PCIE_INVALID_BDF;
3035 
3036 	if (PCIE_IS_ROOT(bus_p)) {
3037 		PCIE_ROOT_FAULT(pfd_p)->scan_bdf = PCIE_INVALID_BDF;
3038 		PCIE_ROOT_FAULT(pfd_p)->scan_addr = 0;
3039 		PCIE_ROOT_FAULT(pfd_p)->full_scan = B_FALSE;
3040 		PCIE_ROOT_EH_SRC(pfd_p)->intr_type = PF_INTR_TYPE_NONE;
3041 		PCIE_ROOT_EH_SRC(pfd_p)->intr_data = NULL;
3042 	}
3043 
3044 	if (PCIE_IS_BDG(bus_p)) {
3045 		bzero(PCI_BDG_ERR_REG(pfd_p), sizeof (pf_pci_bdg_err_regs_t));
3046 	}
3047 
3048 	PCI_ERR_REG(pfd_p)->pci_err_status = 0;
3049 	PCI_ERR_REG(pfd_p)->pci_cfg_comm = 0;
3050 
3051 	if (PCIE_IS_PCIE(bus_p)) {
3052 		if (PCIE_IS_ROOT(bus_p)) {
3053 			bzero(PCIE_RP_REG(pfd_p),
3054 			    sizeof (pf_pcie_rp_err_regs_t));
3055 			bzero(PCIE_ADV_RP_REG(pfd_p),
3056 			    sizeof (pf_pcie_adv_rp_err_regs_t));
3057 			PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ce_src_id =
3058 			    PCIE_INVALID_BDF;
3059 			PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ue_src_id =
3060 			    PCIE_INVALID_BDF;
3061 		} else if (PCIE_IS_PCIE_BDG(bus_p)) {
3062 			bzero(PCIE_ADV_BDG_REG(pfd_p),
3063 			    sizeof (pf_pcie_adv_bdg_err_regs_t));
3064 			PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_bdf =
3065 			    PCIE_INVALID_BDF;
3066 		}
3067 
3068 		if (PCIE_IS_PCIE_BDG(bus_p) && PCIE_IS_PCIX(bus_p)) {
3069 			if (PCIX_ECC_VERSION_CHECK(bus_p)) {
3070 				bzero(PCIX_BDG_ECC_REG(pfd_p, 0),
3071 				    sizeof (pf_pcix_ecc_regs_t));
3072 				bzero(PCIX_BDG_ECC_REG(pfd_p, 1),
3073 				    sizeof (pf_pcix_ecc_regs_t));
3074 			}
3075 			PCIX_BDG_ERR_REG(pfd_p)->pcix_bdg_sec_stat = 0;
3076 			PCIX_BDG_ERR_REG(pfd_p)->pcix_bdg_stat = 0;
3077 		}
3078 
3079 		PCIE_ADV_REG(pfd_p)->pcie_adv_ctl = 0;
3080 		PCIE_ADV_REG(pfd_p)->pcie_ue_status = 0;
3081 		PCIE_ADV_REG(pfd_p)->pcie_ue_mask = 0;
3082 		PCIE_ADV_REG(pfd_p)->pcie_ue_sev = 0;
3083 		PCIE_ADV_HDR(pfd_p, 0) = 0;
3084 		PCIE_ADV_HDR(pfd_p, 1) = 0;
3085 		PCIE_ADV_HDR(pfd_p, 2) = 0;
3086 		PCIE_ADV_HDR(pfd_p, 3) = 0;
3087 		PCIE_ADV_REG(pfd_p)->pcie_ce_status = 0;
3088 		PCIE_ADV_REG(pfd_p)->pcie_ce_mask = 0;
3089 		PCIE_ADV_REG(pfd_p)->pcie_ue_tgt_trans = 0;
3090 		PCIE_ADV_REG(pfd_p)->pcie_ue_tgt_addr = 0;
3091 		PCIE_ADV_REG(pfd_p)->pcie_ue_tgt_bdf = PCIE_INVALID_BDF;
3092 
3093 		PCIE_ERR_REG(pfd_p)->pcie_err_status = 0;
3094 		PCIE_ERR_REG(pfd_p)->pcie_err_ctl = 0;
3095 		PCIE_ERR_REG(pfd_p)->pcie_dev_cap = 0;
3096 
3097 	} else if (PCIE_IS_PCIX(bus_p)) {
3098 		if (PCIE_IS_BDG(bus_p)) {
3099 			if (PCIX_ECC_VERSION_CHECK(bus_p)) {
3100 				bzero(PCIX_BDG_ECC_REG(pfd_p, 0),
3101 				    sizeof (pf_pcix_ecc_regs_t));
3102 				bzero(PCIX_BDG_ECC_REG(pfd_p, 1),
3103 				    sizeof (pf_pcix_ecc_regs_t));
3104 			}
3105 			PCIX_BDG_ERR_REG(pfd_p)->pcix_bdg_sec_stat = 0;
3106 			PCIX_BDG_ERR_REG(pfd_p)->pcix_bdg_stat = 0;
3107 		} else {
3108 			if (PCIX_ECC_VERSION_CHECK(bus_p)) {
3109 				bzero(PCIX_ECC_REG(pfd_p),
3110 				    sizeof (pf_pcix_ecc_regs_t));
3111 			}
3112 			PCIX_ERR_REG(pfd_p)->pcix_command = 0;
3113 			PCIX_ERR_REG(pfd_p)->pcix_status = 0;
3114 		}
3115 	}
3116 
3117 	pfd_p->pe_prev = NULL;
3118 	pfd_p->pe_next = NULL;
3119 	pfd_p->pe_rber_fatal = B_FALSE;
3120 }
3121 
3122 pcie_bus_t *
3123 pf_find_busp_by_bdf(pf_impl_t *impl, pcie_req_id_t bdf)
3124 {
3125 	pcie_bus_t *temp_bus_p;
3126 	pf_data_t *temp_pfd_p;
3127 
3128 	for (temp_pfd_p = impl->pf_dq_head_p;
3129 	    temp_pfd_p;
3130 	    temp_pfd_p = temp_pfd_p->pe_next) {
3131 		temp_bus_p = PCIE_PFD2BUS(temp_pfd_p);
3132 
3133 		if (bdf == temp_bus_p->bus_bdf) {
3134 			return (temp_bus_p);
3135 		}
3136 	}
3137 
3138 	return (NULL);
3139 }
3140 
3141 pcie_bus_t *
3142 pf_find_busp_by_addr(pf_impl_t *impl, uint64_t addr)
3143 {
3144 	pcie_bus_t *temp_bus_p;
3145 	pf_data_t *temp_pfd_p;
3146 
3147 	for (temp_pfd_p = impl->pf_dq_head_p;
3148 	    temp_pfd_p;
3149 	    temp_pfd_p = temp_pfd_p->pe_next) {
3150 		temp_bus_p = PCIE_PFD2BUS(temp_pfd_p);
3151 
3152 		if (pf_in_assigned_addr(temp_bus_p, addr)) {
3153 			return (temp_bus_p);
3154 		}
3155 	}
3156 
3157 	return (NULL);
3158 }
3159 
3160 pcie_bus_t *
3161 pf_find_busp_by_aer(pf_impl_t *impl, pf_data_t *pfd_p)
3162 {
3163 	pf_pcie_adv_err_regs_t *reg_p = PCIE_ADV_REG(pfd_p);
3164 	pcie_bus_t *temp_bus_p = NULL;
3165 	pcie_req_id_t bdf;
3166 	uint64_t addr;
3167 	pcie_tlp_hdr_t *tlp_hdr = (pcie_tlp_hdr_t *)reg_p->pcie_ue_hdr;
3168 	uint32_t trans_type = reg_p->pcie_ue_tgt_trans;
3169 
3170 	if ((tlp_hdr->type == PCIE_TLP_TYPE_CPL) ||
3171 	    (tlp_hdr->type == PCIE_TLP_TYPE_CPLLK)) {
3172 		pcie_cpl_t *cpl_tlp = (pcie_cpl_t *)&reg_p->pcie_ue_hdr[1];
3173 
3174 		bdf = (cpl_tlp->rid > cpl_tlp->cid) ? cpl_tlp->rid :
3175 		    cpl_tlp->cid;
3176 		temp_bus_p = pf_find_busp_by_bdf(impl, bdf);
3177 	} else if (trans_type == PF_ADDR_PIO) {
3178 		addr = reg_p->pcie_ue_tgt_addr;
3179 		temp_bus_p = pf_find_busp_by_addr(impl, addr);
3180 	} else {
3181 		/* PF_ADDR_DMA type */
3182 		bdf = reg_p->pcie_ue_tgt_bdf;
3183 		temp_bus_p = pf_find_busp_by_bdf(impl, bdf);
3184 	}
3185 
3186 	return (temp_bus_p);
3187 }
3188 
3189 pcie_bus_t *
3190 pf_find_busp_by_saer(pf_impl_t *impl, pf_data_t *pfd_p)
3191 {
3192 	pf_pcie_adv_bdg_err_regs_t *reg_p = PCIE_ADV_BDG_REG(pfd_p);
3193 	pcie_bus_t *temp_bus_p = NULL;
3194 	pcie_req_id_t bdf;
3195 	uint64_t addr;
3196 
3197 	addr = reg_p->pcie_sue_tgt_addr;
3198 	bdf = reg_p->pcie_sue_tgt_bdf;
3199 
3200 	if (addr != NULL) {
3201 		temp_bus_p = pf_find_busp_by_addr(impl, addr);
3202 	} else if (PCIE_CHECK_VALID_BDF(bdf)) {
3203 		temp_bus_p = pf_find_busp_by_bdf(impl, bdf);
3204 	}
3205 
3206 	return (temp_bus_p);
3207 }
3208