xref: /titanic_50/usr/src/uts/sun4v/io/px/px_err.c (revision 3edf445cce90224c4218c6987d6709e8481cae58)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * sun4v Fire Error Handling
30  */
31 
32 #include <sys/types.h>
33 #include <sys/ddi.h>
34 #include <sys/sunddi.h>
35 #include <sys/sunndi.h>
36 #include <sys/fm/protocol.h>
37 #include <sys/fm/util.h>
38 #include <sys/membar.h>
39 #include "px_obj.h"
40 #include "px_err.h"
41 
42 static void px_err_fill_pf_data(dev_info_t *dip, px_t *px_p, px_rc_err_t *epkt);
43 static uint_t px_err_intr(px_fault_t *fault_p, px_rc_err_t *epkt);
44 static int  px_err_epkt_severity(px_t *px_p, ddi_fm_error_t *derr,
45     px_rc_err_t *epkt, int caller);
46 
47 static void px_err_log_handle(dev_info_t *dip, px_rc_err_t *epkt,
48     boolean_t is_block_pci, char *msg);
49 static void px_err_send_epkt_erpt(dev_info_t *dip, px_rc_err_t *epkt,
50     boolean_t is_block_pci, int err, ddi_fm_error_t *derr,
51     boolean_t is_valid_epkt);
52 static int px_cb_epkt_severity(dev_info_t *dip, ddi_fm_error_t *derr,
53     px_rc_err_t *epkt);
54 static int px_mmu_epkt_severity(dev_info_t *dip, ddi_fm_error_t *derr,
55     px_rc_err_t *epkt);
56 static int px_intr_epkt_severity(dev_info_t *dip, ddi_fm_error_t *derr,
57     px_rc_err_t *epkt);
58 static int px_pcie_epkt_severity(dev_info_t *dip, ddi_fm_error_t *derr,
59     px_rc_err_t *epkt);
60 static int px_intr_handle_errors(dev_info_t *dip, ddi_fm_error_t *derr,
61     px_rc_err_t *epkt);
62 static void px_fix_legacy_epkt(dev_info_t *dip, ddi_fm_error_t *derr,
63     px_rc_err_t *epkt);
64 static int px_mmu_handle_lookup(dev_info_t *dip, ddi_fm_error_t *derr,
65     px_rc_err_t *epkt);
66 
67 /* Include the code generated sun4v epkt checking code */
68 #include "px_err_gen.c"
69 
70 /*
71  * This variable indicates if we have a hypervisor that could potentially send
72  * incorrect epkts. We always set this to TRUE for now until we find a way to
73  * tell if this HV bug has been fixed.
74  */
75 boolean_t px_legacy_epkt = B_TRUE;
76 
77 /*
78  * px_err_cb_intr:
79  * Interrupt handler for the Host Bus Block.
80  */
81 uint_t
82 px_err_cb_intr(caddr_t arg)
83 {
84 	px_fault_t	*fault_p = (px_fault_t *)arg;
85 	px_rc_err_t	*epkt = (px_rc_err_t *)fault_p->px_intr_payload;
86 
87 	if (epkt != NULL) {
88 		return (px_err_intr(fault_p, epkt));
89 	}
90 
91 	return (DDI_INTR_UNCLAIMED);
92 }
93 
94 /*
95  * px_err_dmc_pec_intr:
96  * Interrupt handler for the DMC/PEC block.
97  */
98 uint_t
99 px_err_dmc_pec_intr(caddr_t arg)
100 {
101 	px_fault_t	*fault_p = (px_fault_t *)arg;
102 	px_rc_err_t	*epkt = (px_rc_err_t *)fault_p->px_intr_payload;
103 
104 	if (epkt != NULL) {
105 		return (px_err_intr(fault_p, epkt));
106 	}
107 
108 	return (DDI_INTR_UNCLAIMED);
109 }
110 
111 /*
112  * px_err_cmn_intr:
113  * Common function called by trap, mondo and fabric intr.
114  * This function is more meaningful in sun4u implementation.  Kept
115  * to mirror sun4u call stack.
116  * o check for safe access
117  * o create and queue RC info for later use in fabric scan.
118  *   o RUC/WUC, PTLP, MMU Errors(CA), UR
119  *
120  * @param px_p		leaf in which to check access
121  * @param derr		fm err data structure to be updated
122  * @param caller	PX_TRAP_CALL | PX_INTR_CALL
123  * @param chkjbc	whether to handle hostbus registers (ignored)
124  * @return err		PX_NO_PANIC | PX_PROTECTED |
125  *                      PX_PANIC | PX_HW_RESET | PX_EXPECTED
126  */
127 /* ARGSUSED */
128 int
129 px_err_cmn_intr(px_t *px_p, ddi_fm_error_t *derr, int caller, int block)
130 {
131 	px_err_safeacc_check(px_p, derr);
132 	return (DDI_FM_OK);
133 }
134 
135 /*
136  * fills RC specific fault data
137  */
138 static void
139 px_err_fill_pfd(dev_info_t *dip, px_t *px_p, px_rc_err_t *epkt) {
140 	pf_pcie_adv_err_regs_t adv_reg;
141 	int		sts = DDI_SUCCESS;
142 	pcie_req_id_t	fault_bdf = 0;
143 	uint64_t	fault_addr = 0;
144 	uint16_t	s_status = 0;
145 
146 	/* Add an PCIE PF_DATA Entry */
147 	if (epkt->rc_descr.block == BLOCK_MMU) {
148 		/* Only PIO Fault Addresses are valid, this is DMA */
149 		s_status = PCI_STAT_S_TARG_AB;
150 		fault_addr = NULL;
151 
152 		if (epkt->rc_descr.H)
153 			fault_bdf = (pcie_req_id_t)(epkt->hdr[0] >> 16);
154 		else
155 			sts = DDI_FAILURE;
156 	} else {
157 		px_pec_err_t	*pec_p = (px_pec_err_t *)epkt;
158 		uint32_t	dir = pec_p->pec_descr.dir;
159 
160 		adv_reg.pcie_ue_hdr[0] = (uint32_t)(pec_p->hdr[0]);
161 		adv_reg.pcie_ue_hdr[1] = (uint32_t)(pec_p->hdr[0] >> 32);
162 		adv_reg.pcie_ue_hdr[2] = (uint32_t)(pec_p->hdr[1]);
163 		adv_reg.pcie_ue_hdr[3] = (uint32_t)(pec_p->hdr[1] >> 32);
164 
165 		/* translate RC UR/CA to legacy secondary errors */
166 		if ((dir == DIR_READ || dir == DIR_WRITE) &&
167 		    pec_p->pec_descr.U) {
168 			if (pec_p->ue_reg_status & PCIE_AER_UCE_UR)
169 				s_status |= PCI_STAT_R_MAST_AB;
170 			if (pec_p->ue_reg_status & PCIE_AER_UCE_CA)
171 				s_status |= PCI_STAT_R_TARG_AB;
172 		}
173 
174 		if (pec_p->ue_reg_status & PCIE_AER_UCE_PTLP)
175 			s_status |= PCI_STAT_PERROR;
176 
177 		if (pec_p->ue_reg_status & PCIE_AER_UCE_CA)
178 			s_status |= PCI_STAT_S_TARG_AB;
179 
180 		sts = pf_tlp_decode(PCIE_DIP2BUS(dip), &adv_reg);
181 		fault_bdf = adv_reg.pcie_ue_tgt_bdf;
182 		fault_addr = adv_reg.pcie_ue_tgt_bdf;
183 	}
184 
185 	if (sts == DDI_SUCCESS)
186 		px_rp_en_q(px_p, fault_bdf, fault_addr, s_status);
187 }
188 
189 /*
190  * px_err_intr:
191  * Interrupt handler for the JBC/DMC/PEC block.
192  * o lock
193  * o create derr
194  * o check safe access
195  * o px_err_check_severity(epkt)
196  * o pcie_scan_fabric
197  * o Idle intr state
198  * o unlock
199  * o handle error: fatal? fm_panic() : return INTR_CLAIMED)
200  */
201 static uint_t
202 px_err_intr(px_fault_t *fault_p, px_rc_err_t *epkt)
203 {
204 	px_t		*px_p = DIP_TO_STATE(fault_p->px_fh_dip);
205 	dev_info_t	*rpdip = px_p->px_dip;
206 	int		rc_err, fab_err, msg;
207 	ddi_fm_error_t	derr;
208 
209 	if (px_fm_enter(px_p) != DDI_SUCCESS)
210 		goto done;
211 
212 	/* Create the derr */
213 	bzero(&derr, sizeof (ddi_fm_error_t));
214 	derr.fme_version = DDI_FME_VERSION;
215 	derr.fme_ena = fm_ena_generate(epkt->stick, FM_ENA_FMT1);
216 	derr.fme_flag = DDI_FM_ERR_UNEXPECTED;
217 
218 	/* Basically check for safe access */
219 	(void) px_err_cmn_intr(px_p, &derr, PX_INTR_CALL, PX_FM_BLOCK_ALL);
220 
221 	/* Check the severity of this error */
222 	rc_err = px_err_epkt_severity(px_p, &derr, epkt, PX_INTR_CALL);
223 
224 	/* Scan the fabric if the root port is not in drain state. */
225 	fab_err = px_scan_fabric(px_p, rpdip, &derr);
226 
227 	/* Set the intr state to idle for the leaf that received the mondo */
228 	if (px_lib_intr_setstate(rpdip, fault_p->px_fh_sysino,
229 	    INTR_IDLE_STATE) != DDI_SUCCESS) {
230 		px_fm_exit(px_p);
231 		return (DDI_INTR_UNCLAIMED);
232 	}
233 
234 	switch (epkt->rc_descr.block) {
235 	case BLOCK_MMU: /* FALLTHROUGH */
236 	case BLOCK_INTR:
237 		msg = PX_RC;
238 		break;
239 	case BLOCK_PCIE:
240 		msg = PX_RP;
241 		break;
242 	case BLOCK_HOSTBUS: /* FALLTHROUGH */
243 	default:
244 		msg = PX_HB;
245 		break;
246 	}
247 
248 	px_err_panic(rc_err, msg, fab_err, B_TRUE);
249 	px_fm_exit(px_p);
250 	px_err_panic(rc_err, msg, fab_err, B_FALSE);
251 
252 done:
253 	return (DDI_INTR_CLAIMED);
254 }
255 
256 /*
257  * px_err_epkt_severity:
258  * Check the severity of the fire error based the epkt received
259  *
260  * @param px_p		leaf in which to take the snap shot.
261  * @param derr		fm err in which the ereport is to be based on
262  * @param epkt		epkt recevied from HV
263  */
264 static int
265 px_err_epkt_severity(px_t *px_p, ddi_fm_error_t *derr, px_rc_err_t *epkt,
266     int caller)
267 {
268 	px_pec_t 	*pec_p = px_p->px_pec_p;
269 	dev_info_t	*dip = px_p->px_dip;
270 	boolean_t	is_safeacc = B_FALSE;
271 	boolean_t	is_block_pci = B_FALSE;
272 	boolean_t	is_valid_epkt = B_FALSE;
273 	int		err = 0;
274 
275 	/* Cautious access error handling  */
276 	switch (derr->fme_flag) {
277 	case DDI_FM_ERR_EXPECTED:
278 		if (caller == PX_TRAP_CALL) {
279 			/*
280 			 * for ddi_caut_get treat all events as nonfatal
281 			 * The trampoline will set err_ena = 0,
282 			 * err_status = NONFATAL.
283 			 */
284 			derr->fme_status = DDI_FM_NONFATAL;
285 			is_safeacc = B_TRUE;
286 		} else {
287 			/*
288 			 * For ddi_caut_put treat all events as nonfatal. Here
289 			 * we have the handle and can call ndi_fm_acc_err_set().
290 			 */
291 			derr->fme_status = DDI_FM_NONFATAL;
292 			ndi_fm_acc_err_set(pec_p->pec_acc_hdl, derr);
293 			is_safeacc = B_TRUE;
294 		}
295 		break;
296 	case DDI_FM_ERR_PEEK:
297 	case DDI_FM_ERR_POKE:
298 		/*
299 		 * For ddi_peek/poke treat all events as nonfatal.
300 		 */
301 		is_safeacc = B_TRUE;
302 		break;
303 	default:
304 		is_safeacc = B_FALSE;
305 	}
306 
307 	/*
308 	 * Older hypervisors in some cases send epkts with incorrect fields.
309 	 * We have to handle these "special" epkts correctly.
310 	 */
311 	if (px_legacy_epkt)
312 		px_fix_legacy_epkt(dip, derr, epkt);
313 
314 	switch (epkt->rc_descr.block) {
315 	case BLOCK_HOSTBUS:
316 		err = px_cb_epkt_severity(dip, derr, epkt);
317 		break;
318 	case BLOCK_MMU:
319 		err = px_mmu_epkt_severity(dip, derr, epkt);
320 		px_err_fill_pfd(dip, px_p, epkt);
321 		break;
322 	case BLOCK_INTR:
323 		err = px_intr_epkt_severity(dip, derr, epkt);
324 		break;
325 	case BLOCK_PCIE:
326 		is_block_pci = B_TRUE;
327 		err = px_pcie_epkt_severity(dip, derr, epkt);
328 		px_err_fill_pfd(dip, px_p, epkt);
329 		break;
330 	default:
331 		err = 0;
332 	}
333 
334 	if ((err & PX_HW_RESET) || (err & PX_PANIC)) {
335 		if (px_log & PX_PANIC)
336 			px_err_log_handle(dip, epkt, is_block_pci, "PANIC");
337 		is_valid_epkt = B_TRUE;
338 	} else if (err & PX_PROTECTED) {
339 		if (px_log & PX_PROTECTED)
340 			px_err_log_handle(dip, epkt, is_block_pci, "PROTECTED");
341 		is_valid_epkt = B_TRUE;
342 	} else if (err & PX_NO_PANIC) {
343 		if (px_log & PX_NO_PANIC)
344 			px_err_log_handle(dip, epkt, is_block_pci, "NO PANIC");
345 		is_valid_epkt = B_TRUE;
346 	} else if (err & PX_NO_ERROR) {
347 		if (px_log & PX_NO_ERROR)
348 			px_err_log_handle(dip, epkt, is_block_pci, "NO ERROR");
349 		is_valid_epkt = B_TRUE;
350 	} else if (err == 0) {
351 		px_err_log_handle(dip, epkt, is_block_pci, "UNRECOGNIZED");
352 		is_valid_epkt = B_FALSE;
353 
354 		/* Panic on a unrecognized epkt */
355 		err = PX_PANIC;
356 	}
357 
358 	px_err_send_epkt_erpt(dip, epkt, is_block_pci, err, derr,
359 	    is_valid_epkt);
360 
361 	/* Readjust the severity as a result of safe access */
362 	if (is_safeacc && !(err & PX_PANIC) && !(px_die & PX_PROTECTED))
363 		err = PX_NO_PANIC;
364 
365 	return (err);
366 }
367 
368 static void
369 px_err_send_epkt_erpt(dev_info_t *dip, px_rc_err_t *epkt,
370     boolean_t is_block_pci, int err, ddi_fm_error_t *derr,
371     boolean_t is_valid_epkt)
372 {
373 	char buf[FM_MAX_CLASS], descr_buf[1024];
374 
375 	/* send ereport for debug purposes */
376 	(void) snprintf(buf, FM_MAX_CLASS, "%s", PX_FM_RC_UNRECOG);
377 
378 	if (is_block_pci) {
379 		px_pec_err_t *pec = (px_pec_err_t *)epkt;
380 		(void) snprintf(descr_buf, sizeof (descr_buf),
381 		    "%s Epkt contents:\n"
382 		    "Block: 0x%x, Dir: 0x%x, Flags: Z=%d, S=%d, R=%d\n"
383 		    "I=%d, H=%d, C=%d, U=%d, E=%d, P=%d\n"
384 		    "PCI Err Status: 0x%x, PCIe Err Status: 0x%x\n"
385 		    "CE Status Reg: 0x%x, UE Status Reg: 0x%x\n"
386 		    "HDR1: 0x%lx, HDR2: 0x%lx\n"
387 		    "Err Src Reg: 0x%x, Root Err Status: 0x%x\n"
388 		    "Err Severity: 0x%x\n",
389 		    is_valid_epkt ? "Valid" : "Invalid",
390 		    pec->pec_descr.block, pec->pec_descr.dir,
391 		    pec->pec_descr.Z, pec->pec_descr.S,
392 		    pec->pec_descr.R, pec->pec_descr.I,
393 		    pec->pec_descr.H, pec->pec_descr.C,
394 		    pec->pec_descr.U, pec->pec_descr.E,
395 		    pec->pec_descr.P, pec->pci_err_status,
396 		    pec->pcie_err_status, pec->ce_reg_status,
397 		    pec->ue_reg_status, pec->hdr[0],
398 		    pec->hdr[1], pec->err_src_reg,
399 		    pec->root_err_status, err);
400 
401 		ddi_fm_ereport_post(dip, buf, derr->fme_ena,
402 		    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
403 		    EPKT_SYSINO, DATA_TYPE_UINT64,
404 		    is_valid_epkt ? pec->sysino : 0,
405 		    EPKT_EHDL, DATA_TYPE_UINT64,
406 		    is_valid_epkt ? pec->ehdl : 0,
407 		    EPKT_STICK, DATA_TYPE_UINT64,
408 		    is_valid_epkt ? pec->stick : 0,
409 		    EPKT_PEC_DESCR, DATA_TYPE_STRING, descr_buf);
410 	} else {
411 		(void) snprintf(descr_buf, sizeof (descr_buf),
412 		    "%s Epkt contents:\n"
413 		    "Block: 0x%x, Op: 0x%x, Phase: 0x%x, Cond: 0x%x\n"
414 		    "Dir: 0x%x, Flags: STOP=%d, H=%d, R=%d, D=%d\n"
415 		    "M=%d, S=%d, Size: 0x%x, Addr: 0x%lx\n"
416 		    "Hdr1: 0x%lx, Hdr2: 0x%lx, Res: 0x%lx\n"
417 		    "Err Severity: 0x%x\n",
418 		    is_valid_epkt ? "Valid" : "Invalid",
419 		    epkt->rc_descr.block, epkt->rc_descr.op,
420 		    epkt->rc_descr.phase, epkt->rc_descr.cond,
421 		    epkt->rc_descr.dir, epkt->rc_descr.STOP,
422 		    epkt->rc_descr.H, epkt->rc_descr.R,
423 		    epkt->rc_descr.D, epkt->rc_descr.M,
424 		    epkt->rc_descr.S, epkt->size, epkt->addr,
425 		    epkt->hdr[0], epkt->hdr[1], epkt->reserved,
426 		    err);
427 
428 		ddi_fm_ereport_post(dip, buf, derr->fme_ena,
429 		    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
430 		    EPKT_SYSINO, DATA_TYPE_UINT64,
431 		    is_valid_epkt ? epkt->sysino : 0,
432 		    EPKT_EHDL, DATA_TYPE_UINT64,
433 		    is_valid_epkt ? epkt->ehdl : 0,
434 		    EPKT_STICK, DATA_TYPE_UINT64,
435 		    is_valid_epkt ? epkt->stick : 0,
436 		    EPKT_RC_DESCR, DATA_TYPE_STRING, descr_buf);
437 	}
438 }
439 
440 static void
441 px_err_log_handle(dev_info_t *dip, px_rc_err_t *epkt, boolean_t is_block_pci,
442     char *msg)
443 {
444 	if (is_block_pci) {
445 		px_pec_err_t *pec = (px_pec_err_t *)epkt;
446 		DBG(DBG_ERR_INTR, dip,
447 		    "A PCIe root port error has occured with a severity"
448 		    " \"%s\"\n"
449 		    "\tBlock: 0x%x, Dir: 0x%x, Flags: Z=%d, S=%d, R=%d, I=%d\n"
450 		    "\tH=%d, C=%d, U=%d, E=%d, P=%d\n"
451 		    "\tpci_err: 0x%x, pcie_err=0x%x, ce_reg: 0x%x\n"
452 		    "\tue_reg: 0x%x, Hdr1: 0x%p, Hdr2: 0x%p\n"
453 		    "\terr_src: 0x%x, root_err: 0x%x\n",
454 		    msg, pec->pec_descr.block, pec->pec_descr.dir,
455 		    pec->pec_descr.Z, pec->pec_descr.S, pec->pec_descr.R,
456 		    pec->pec_descr.I, pec->pec_descr.H, pec->pec_descr.C,
457 		    pec->pec_descr.U, pec->pec_descr.E, pec->pec_descr.P,
458 		    pec->pci_err_status, pec->pcie_err_status,
459 		    pec->ce_reg_status, pec->ue_reg_status, pec->hdr[0],
460 		    pec->hdr[1], pec->err_src_reg, pec->root_err_status);
461 	} else {
462 		DBG(DBG_ERR_INTR, dip,
463 		    "A PCIe root complex error has occured with a severity"
464 		    " \"%s\"\n"
465 		    "\tBlock: 0x%x, Op: 0x%x, Phase: 0x%x, Cond: 0x%x\n"
466 		    "\tDir: 0x%x, Flags: STOP=%d, H=%d, R=%d, D=%d, M=%d\n"
467 		    "\tS=%d, Size: 0x%x, Addr: 0x%p\n"
468 		    "\tHdr1: 0x%p, Hdr2: 0x%p, Res: 0x%p\n",
469 		    msg, epkt->rc_descr.block, epkt->rc_descr.op,
470 		    epkt->rc_descr.phase, epkt->rc_descr.cond,
471 		    epkt->rc_descr.dir, epkt->rc_descr.STOP, epkt->rc_descr.H,
472 		    epkt->rc_descr.R, epkt->rc_descr.D, epkt->rc_descr.M,
473 		    epkt->rc_descr.S, epkt->size, epkt->addr, epkt->hdr[0],
474 		    epkt->hdr[1], epkt->reserved);
475 	}
476 }
477 
478 /* ARGSUSED */
479 static void
480 px_fix_legacy_epkt(dev_info_t *dip, ddi_fm_error_t *derr, px_rc_err_t *epkt)
481 {
482 	/*
483 	 * We don't have a default case for any of the below switch statements
484 	 * since we are ok with the code falling through.
485 	 */
486 	switch (epkt->rc_descr.block) {
487 	case BLOCK_HOSTBUS:
488 		switch (epkt->rc_descr.op) {
489 		case OP_DMA:
490 			switch (epkt->rc_descr.phase) {
491 			case PH_UNKNOWN:
492 				switch (epkt->rc_descr.cond) {
493 				case CND_UNKNOWN:
494 					switch (epkt->rc_descr.dir) {
495 					case DIR_RESERVED:
496 						epkt->rc_descr.dir = DIR_READ;
497 						break;
498 					} /* DIR */
499 				} /* CND */
500 			} /* PH */
501 		} /* OP */
502 		break;
503 	case BLOCK_MMU:
504 		switch (epkt->rc_descr.op) {
505 		case OP_XLAT:
506 			switch (epkt->rc_descr.phase) {
507 			case PH_DATA:
508 				switch (epkt->rc_descr.cond) {
509 				case CND_PROT:
510 					switch (epkt->rc_descr.dir) {
511 					case DIR_UNKNOWN:
512 						epkt->rc_descr.dir = DIR_WRITE;
513 						break;
514 					} /* DIR */
515 				} /* CND */
516 				break;
517 			case PH_IRR:
518 				switch (epkt->rc_descr.cond) {
519 				case CND_RESERVED:
520 					switch (epkt->rc_descr.dir) {
521 					case DIR_IRR:
522 						epkt->rc_descr.phase = PH_ADDR;
523 						epkt->rc_descr.cond = CND_IRR;
524 					} /* DIR */
525 				} /* CND */
526 			} /* PH */
527 		} /* OP */
528 		break;
529 	case BLOCK_INTR:
530 		switch (epkt->rc_descr.op) {
531 		case OP_MSIQ:
532 			switch (epkt->rc_descr.phase) {
533 			case PH_UNKNOWN:
534 				switch (epkt->rc_descr.cond) {
535 				case CND_ILL:
536 					switch (epkt->rc_descr.dir) {
537 					case DIR_RESERVED:
538 						epkt->rc_descr.dir = DIR_IRR;
539 						break;
540 					} /* DIR */
541 					break;
542 				case CND_IRR:
543 					switch (epkt->rc_descr.dir) {
544 					case DIR_IRR:
545 						epkt->rc_descr.cond = CND_OV;
546 						break;
547 					} /* DIR */
548 				} /* CND */
549 			} /* PH */
550 			break;
551 		case OP_RESERVED:
552 			switch (epkt->rc_descr.phase) {
553 			case PH_UNKNOWN:
554 				switch (epkt->rc_descr.cond) {
555 				case CND_ILL:
556 					switch (epkt->rc_descr.dir) {
557 					case DIR_IRR:
558 						epkt->rc_descr.op = OP_MSI32;
559 						epkt->rc_descr.phase = PH_DATA;
560 						break;
561 					} /* DIR */
562 				} /* CND */
563 				break;
564 			case PH_DATA:
565 				switch (epkt->rc_descr.cond) {
566 				case CND_INT:
567 					switch (epkt->rc_descr.dir) {
568 					case DIR_UNKNOWN:
569 						epkt->rc_descr.op = OP_MSI32;
570 						break;
571 					} /* DIR */
572 				} /* CND */
573 			} /* PH */
574 		} /* OP */
575 	} /* BLOCK */
576 }
577 
578 /* ARGSUSED */
579 static int
580 px_intr_handle_errors(dev_info_t *dip, ddi_fm_error_t *derr, px_rc_err_t *epkt)
581 {
582 	return (px_err_check_eq(dip));
583 }
584 
585 /* ARGSUSED */
586 static int
587 px_pcie_epkt_severity(dev_info_t *dip, ddi_fm_error_t *derr, px_rc_err_t *epkt)
588 {
589 	px_pec_err_t	*pec_p = (px_pec_err_t *)epkt;
590 	px_err_pcie_t	*pcie = (px_err_pcie_t *)epkt;
591 	pf_pcie_adv_err_regs_t adv_reg;
592 	int		sts;
593 	uint32_t	temp;
594 
595 	/*
596 	 * Check for failed PIO Read/Writes, which are errors that are not
597 	 * defined in the PCIe spec.
598 	 */
599 	temp = PCIE_AER_UCE_UR | PCIE_AER_UCE_CA;
600 	if (((pec_p->pec_descr.dir == DIR_READ) ||
601 	    (pec_p->pec_descr.dir == DIR_WRITE)) &&
602 	    pec_p->pec_descr.U && (pec_p->ue_reg_status & temp)) {
603 		adv_reg.pcie_ue_hdr[0] = (uint32_t)(pec_p->hdr[0]);
604 		adv_reg.pcie_ue_hdr[1] = (uint32_t)(pec_p->hdr[0] >> 32);
605 		adv_reg.pcie_ue_hdr[2] = (uint32_t)(pec_p->hdr[1]);
606 		adv_reg.pcie_ue_hdr[3] = (uint32_t)(pec_p->hdr[1] >> 32);
607 
608 		sts = pf_tlp_decode(PCIE_DIP2BUS(dip), &adv_reg);
609 
610 		if (sts == DDI_SUCCESS &&
611 		    pf_hdl_lookup(dip, derr->fme_ena,
612 		    adv_reg.pcie_ue_tgt_trans,
613 		    adv_reg.pcie_ue_tgt_addr,
614 		    adv_reg.pcie_ue_tgt_bdf) == PF_HDL_FOUND)
615 			return (PX_NO_PANIC);
616 		else
617 			return (PX_PANIC);
618 	}
619 
620 	if (!pec_p->pec_descr.C)
621 		pec_p->ce_reg_status = 0;
622 	if (!pec_p->pec_descr.U)
623 		pec_p->ue_reg_status = 0;
624 	if (!pec_p->pec_descr.H)
625 		pec_p->hdr[0] = 0;
626 	if (!pec_p->pec_descr.I)
627 		pec_p->hdr[1] = 0;
628 
629 	/*
630 	 * According to the PCIe spec, there is a first error pointer.  If there
631 	 * are header logs recorded and there are more than one error, the log
632 	 * will belong to the error that the first error pointer points to.
633 	 *
634 	 * The regs.primary_ue expects a bit number, go through the ue register
635 	 * and find the first error that occured.  Because the sun4v epkt spec
636 	 * does not define this value, the algorithm below gives the lower bit
637 	 * priority.
638 	 */
639 	temp = pcie->ue_reg;
640 	if (temp) {
641 		int x;
642 		for (x = 0; !(temp & 0x1); x++) {
643 			temp = temp >> 1;
644 		}
645 		pcie->primary_ue = 1 << x;
646 	} else {
647 		pcie->primary_ue = 0;
648 	}
649 
650 	/* Sun4v doesn't log the TX hdr except for CTOs */
651 	if (pcie->primary_ue == PCIE_AER_UCE_TO) {
652 		pcie->tx_hdr1 = pcie->rx_hdr1;
653 		pcie->tx_hdr2 = pcie->rx_hdr2;
654 		pcie->tx_hdr3 = pcie->rx_hdr3;
655 		pcie->tx_hdr4 = pcie->rx_hdr4;
656 		pcie->rx_hdr1 = 0;
657 		pcie->rx_hdr2 = 0;
658 		pcie->rx_hdr3 = 0;
659 		pcie->rx_hdr4 = 0;
660 	} else {
661 		pcie->tx_hdr1 = 0;
662 		pcie->tx_hdr2 = 0;
663 		pcie->tx_hdr3 = 0;
664 		pcie->tx_hdr4 = 0;
665 	}
666 
667 	return (px_err_check_pcie(dip, derr, pcie));
668 }
669 
670 static int
671 px_mmu_handle_lookup(dev_info_t *dip, ddi_fm_error_t *derr, px_rc_err_t *epkt)
672 {
673 	uint64_t addr = (uint64_t)epkt->addr;
674 	pcie_req_id_t bdf = NULL;
675 
676 	if (epkt->rc_descr.H) {
677 		bdf = (uint32_t)((epkt->hdr[0] >> 16) && 0xFFFF);
678 	}
679 
680 	return (pf_hdl_lookup(dip, derr->fme_ena, PF_ADDR_DMA, addr,
681 	    bdf));
682 }
683