xref: /titanic_50/usr/src/uts/sun4v/io/px/px_err.c (revision 2df1fe9ca32bb227b9158c67f5c00b54c20b10fd)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * sun4v Fire Error Handling
30  */
31 
32 #include <sys/types.h>
33 #include <sys/ddi.h>
34 #include <sys/sunddi.h>
35 #include <sys/fm/protocol.h>
36 #include <sys/fm/util.h>
37 #include <sys/membar.h>
38 #include "px_obj.h"
39 #include "px_err.h"
40 
41 static void px_err_fill_pf_data(dev_info_t *dip, px_t *px_p, px_rc_err_t *epkt);
42 static uint_t px_err_intr(px_fault_t *fault_p, px_rc_err_t *epkt);
43 static int  px_err_epkt_severity(px_t *px_p, ddi_fm_error_t *derr,
44     px_rc_err_t *epkt, int caller);
45 
46 static void px_err_log_handle(dev_info_t *dip, px_rc_err_t *epkt,
47     boolean_t is_block_pci, char *msg);
48 static int px_cb_epkt_severity(dev_info_t *dip, ddi_fm_error_t *derr,
49     px_rc_err_t *epkt);
50 static int px_mmu_epkt_severity(dev_info_t *dip, ddi_fm_error_t *derr,
51     px_rc_err_t *epkt);
52 static int px_intr_epkt_severity(dev_info_t *dip, ddi_fm_error_t *derr,
53     px_rc_err_t *epkt);
54 static int px_pcie_epkt_severity(dev_info_t *dip, ddi_fm_error_t *derr,
55     px_rc_err_t *epkt);
56 static int px_intr_handle_errors(dev_info_t *dip, ddi_fm_error_t *derr,
57     px_rc_err_t *epkt);
58 static void px_fix_legacy_epkt(dev_info_t *dip, ddi_fm_error_t *derr,
59     px_rc_err_t *epkt);
60 static int px_mmu_handle_lookup(dev_info_t *dip, ddi_fm_error_t *derr,
61     px_rc_err_t *epkt);
62 
63 /* Include the code generated sun4v epkt checking code */
64 #include "px_err_gen.c"
65 
66 /*
67  * This variable indicates if we have a hypervisor that could potentially send
68  * incorrect epkts. We always set this to TRUE for now until we find a way to
69  * tell if this HV bug has been fixed.
70  */
71 boolean_t px_legacy_epkt = B_TRUE;
72 
73 /*
74  * px_err_cb_intr:
75  * Interrupt handler for the Host Bus Block.
76  */
77 uint_t
78 px_err_cb_intr(caddr_t arg)
79 {
80 	px_fault_t	*fault_p = (px_fault_t *)arg;
81 	px_rc_err_t	*epkt = (px_rc_err_t *)fault_p->px_intr_payload;
82 
83 	if (epkt != NULL) {
84 		return (px_err_intr(fault_p, epkt));
85 	}
86 
87 	return (DDI_INTR_UNCLAIMED);
88 }
89 
90 /*
91  * px_err_dmc_pec_intr:
92  * Interrupt handler for the DMC/PEC block.
93  */
94 uint_t
95 px_err_dmc_pec_intr(caddr_t arg)
96 {
97 	px_fault_t	*fault_p = (px_fault_t *)arg;
98 	px_rc_err_t	*epkt = (px_rc_err_t *)fault_p->px_intr_payload;
99 
100 	if (epkt != NULL) {
101 		return (px_err_intr(fault_p, epkt));
102 	}
103 
104 	return (DDI_INTR_UNCLAIMED);
105 }
106 
107 /*
108  * px_err_cmn_intr:
109  * Common function called by trap, mondo and fabric intr.
110  * This function is more meaningful in sun4u implementation.  Kept
111  * to mirror sun4u call stack.
112  * o check for safe access
113  * o create and queue RC info for later use in fabric scan.
114  *   o RUC/WUC, PTLP, MMU Errors(CA), UR
115  *
116  * @param px_p		leaf in which to check access
117  * @param derr		fm err data structure to be updated
118  * @param caller	PX_TRAP_CALL | PX_INTR_CALL
119  * @param chkjbc	whether to handle hostbus registers (ignored)
120  * @return err		PX_NO_PANIC | PX_PROTECTED |
121  *                      PX_PANIC | PX_HW_RESET | PX_EXPECTED
122  */
123 /* ARGSUSED */
124 int
125 px_err_cmn_intr(px_t *px_p, ddi_fm_error_t *derr, int caller, int block)
126 {
127 	px_err_safeacc_check(px_p, derr);
128 	return (DDI_FM_OK);
129 }
130 
131 /*
132  * fills RC specific fault data
133  */
134 static void
135 px_err_fill_pfd(dev_info_t *dip, px_t *px_p, px_rc_err_t *epkt) {
136 	pf_data_t	pf_data = {0};
137 	int		sts = DDI_SUCCESS;
138 	pcie_req_id_t	fault_bdf = 0;
139 	uint32_t	fault_addr = 0;
140 	uint16_t	s_status = 0;
141 
142 	/* Add an PCIE PF_DATA Entry */
143 	if (epkt->rc_descr.block == BLOCK_MMU) {
144 		/* Only PIO Fault Addresses are valid, this is DMA */
145 		s_status = PCI_STAT_S_TARG_AB;
146 		fault_addr = NULL;
147 
148 		if (epkt->rc_descr.H)
149 			fault_bdf = (pcie_req_id_t)(epkt->hdr[0] >> 16);
150 		else
151 			sts = DDI_FAILURE;
152 	} else {
153 		px_pec_err_t	*pec_p = (px_pec_err_t *)epkt;
154 		uint32_t	trans_type;
155 		uint32_t	dir = pec_p->pec_descr.dir;
156 
157 		pf_data.rp_bdf = px_p->px_bdf;
158 		pf_data.aer_h0 = (uint32_t)(pec_p->hdr[0]);
159 		pf_data.aer_h1 = (uint32_t)(pec_p->hdr[0] >> 32);
160 		pf_data.aer_h2 = (uint32_t)(pec_p->hdr[1]);
161 		pf_data.aer_h3 = (uint32_t)(pec_p->hdr[1] >> 32);
162 
163 		/* translate RC UR/CA to legacy secondary errors */
164 		if ((dir == DIR_READ || dir == DIR_WRITE) &&
165 		    pec_p->pec_descr.U) {
166 			if (pec_p->ue_reg_status & PCIE_AER_UCE_UR)
167 				s_status |= PCI_STAT_R_MAST_AB;
168 			if (pec_p->ue_reg_status & PCIE_AER_UCE_CA)
169 				s_status |= PCI_STAT_R_TARG_AB;
170 		}
171 
172 		if (pec_p->ue_reg_status & PCIE_AER_UCE_PTLP)
173 			s_status |= PCI_STAT_PERROR;
174 
175 		if (pec_p->ue_reg_status & PCIE_AER_UCE_CA)
176 			s_status |= PCI_STAT_S_TARG_AB;
177 
178 		sts = pf_tlp_decode(dip, &pf_data, &fault_bdf, &fault_addr,
179 		    &trans_type);
180 	}
181 
182 	if (sts == DDI_SUCCESS)
183 		px_rp_en_q(px_p, fault_bdf, fault_addr, s_status);
184 }
185 
186 /*
187  * px_err_intr:
188  * Interrupt handler for the JBC/DMC/PEC block.
189  * o lock
190  * o create derr
191  * o check safe access
192  * o px_err_check_severity(epkt)
193  * o pcie_scan_fabric
194  * o Idle intr state
195  * o unlock
196  * o handle error: fatal? fm_panic() : return INTR_CLAIMED)
197  */
198 static uint_t
199 px_err_intr(px_fault_t *fault_p, px_rc_err_t *epkt)
200 {
201 	px_t		*px_p = DIP_TO_STATE(fault_p->px_fh_dip);
202 	dev_info_t	*rpdip = px_p->px_dip;
203 	int		rc_err, fab_err = PF_NO_PANIC, msg;
204 	ddi_fm_error_t	derr;
205 
206 	mutex_enter(&px_p->px_fm_mutex);
207 	px_p->px_fm_mutex_owner = curthread;
208 
209 	/* Create the derr */
210 	bzero(&derr, sizeof (ddi_fm_error_t));
211 	derr.fme_version = DDI_FME_VERSION;
212 	derr.fme_ena = fm_ena_generate(epkt->stick, FM_ENA_FMT1);
213 	derr.fme_flag = DDI_FM_ERR_UNEXPECTED;
214 
215 	/* Basically check for safe access */
216 	(void) px_err_cmn_intr(px_p, &derr, PX_INTR_CALL, PX_FM_BLOCK_ALL);
217 
218 	/* Check the severity of this error */
219 	rc_err = px_err_epkt_severity(px_p, &derr, epkt, PX_INTR_CALL);
220 
221 	/* Scan the fabric if the root port is not in drain state. */
222 	if (!px_lib_is_in_drain_state(px_p))
223 		fab_err = pf_scan_fabric(rpdip, &derr, px_p->px_dq_p,
224 		    &px_p->px_dq_tail);
225 
226 	/* Set the intr state to idle for the leaf that received the mondo */
227 	if (px_lib_intr_setstate(rpdip, fault_p->px_fh_sysino,
228 	    INTR_IDLE_STATE) != DDI_SUCCESS) {
229 		px_p->px_fm_mutex_owner = NULL;
230 		mutex_exit(&px_p->px_fm_mutex);
231 		return (DDI_INTR_UNCLAIMED);
232 	}
233 
234 	px_p->px_fm_mutex_owner = NULL;
235 	mutex_exit(&px_p->px_fm_mutex);
236 
237 	switch (epkt->rc_descr.block) {
238 	case BLOCK_MMU: /* FALLTHROUGH */
239 	case BLOCK_INTR:
240 		msg = PX_RC;
241 		break;
242 	case BLOCK_PCIE:
243 		msg = PX_RP;
244 		break;
245 	case BLOCK_HOSTBUS: /* FALLTHROUGH */
246 	default:
247 		msg = PX_HB;
248 		break;
249 	}
250 
251 	px_err_panic(rc_err, msg, fab_err);
252 
253 	return (DDI_INTR_CLAIMED);
254 }
255 
256 /*
257  * px_err_epkt_severity:
258  * Check the severity of the fire error based the epkt received
259  *
260  * @param px_p		leaf in which to take the snap shot.
261  * @param derr		fm err in which the ereport is to be based on
262  * @param epkt		epkt recevied from HV
263  */
264 static int
265 px_err_epkt_severity(px_t *px_p, ddi_fm_error_t *derr, px_rc_err_t *epkt,
266     int caller)
267 {
268 	px_pec_t 	*pec_p = px_p->px_pec_p;
269 	dev_info_t	*dip = px_p->px_dip;
270 	boolean_t	is_safeacc = B_FALSE;
271 	boolean_t	is_block_pci = B_FALSE;
272 	char		buf[FM_MAX_CLASS], descr_buf[1024];
273 	int		err = 0;
274 
275 	/* Cautious access error handling  */
276 	switch (derr->fme_flag) {
277 	case DDI_FM_ERR_EXPECTED:
278 		if (caller == PX_TRAP_CALL) {
279 			/*
280 			 * for ddi_caut_get treat all events as nonfatal
281 			 * The trampoline will set err_ena = 0,
282 			 * err_status = NONFATAL.
283 			 */
284 			derr->fme_status = DDI_FM_NONFATAL;
285 			is_safeacc = B_TRUE;
286 		} else {
287 			/*
288 			 * For ddi_caut_put treat all events as nonfatal. Here
289 			 * we have the handle and can call ndi_fm_acc_err_set().
290 			 */
291 			derr->fme_status = DDI_FM_NONFATAL;
292 			ndi_fm_acc_err_set(pec_p->pec_acc_hdl, derr);
293 			is_safeacc = B_TRUE;
294 		}
295 		break;
296 	case DDI_FM_ERR_PEEK:
297 	case DDI_FM_ERR_POKE:
298 		/*
299 		 * For ddi_peek/poke treat all events as nonfatal.
300 		 */
301 		is_safeacc = B_TRUE;
302 		break;
303 	default:
304 		is_safeacc = B_FALSE;
305 	}
306 
307 	/*
308 	 * Older hypervisors in some cases send epkts with incorrect fields.
309 	 * We have to handle these "special" epkts correctly.
310 	 */
311 	if (px_legacy_epkt)
312 		px_fix_legacy_epkt(dip, derr, epkt);
313 
314 	switch (epkt->rc_descr.block) {
315 	case BLOCK_HOSTBUS:
316 		err = px_cb_epkt_severity(dip, derr, epkt);
317 		break;
318 	case BLOCK_MMU:
319 		err = px_mmu_epkt_severity(dip, derr, epkt);
320 		px_err_fill_pfd(dip, px_p, epkt);
321 		break;
322 	case BLOCK_INTR:
323 		err = px_intr_epkt_severity(dip, derr, epkt);
324 		break;
325 	case BLOCK_PCIE:
326 		is_block_pci = B_TRUE;
327 		err = px_pcie_epkt_severity(dip, derr, epkt);
328 		px_err_fill_pfd(dip, px_p, epkt);
329 		break;
330 	default:
331 		err = 0;
332 	}
333 
334 	if ((err & PX_HW_RESET) || (err & PX_PANIC)) {
335 		if (px_log & PX_PANIC)
336 			px_err_log_handle(dip, epkt, is_block_pci, "PANIC");
337 	} else if (err & PX_PROTECTED) {
338 		if (px_log & PX_PROTECTED)
339 			px_err_log_handle(dip, epkt, is_block_pci, "PROTECTED");
340 	} else if (err & PX_NO_PANIC) {
341 		if (px_log & PX_NO_PANIC)
342 			px_err_log_handle(dip, epkt, is_block_pci, "NO PANIC");
343 	} else if (err & PX_NO_ERROR) {
344 		if (px_log & PX_NO_ERROR)
345 			px_err_log_handle(dip, epkt, is_block_pci, "NO ERROR");
346 	} else if (err == 0) {
347 		px_err_log_handle(dip, epkt, is_block_pci, "UNRECOGNIZED");
348 
349 		/* Unrecognized epkt. send ereport */
350 		(void) snprintf(buf, FM_MAX_CLASS, "%s", PX_FM_RC_UNRECOG);
351 
352 		if (is_block_pci) {
353 			px_pec_err_t	*pec = (px_pec_err_t *)epkt;
354 
355 			(void) snprintf(descr_buf, sizeof (descr_buf),
356 			    "Epkt contents:\n"
357 			    "Block: 0x%x, Dir: 0x%x, Flags: Z=%d, S=%d, R=%d\n"
358 			    "I=%d, H=%d, C=%d, U=%d, E=%d, P=%d\n"
359 			    "PCI Err Status: 0x%x, PCIe Err Status: 0x%x\n"
360 			    "CE Status Reg: 0x%x, UE Status Reg: 0x%x\n"
361 			    "HDR1: 0x%lx, HDR2: 0x%lx\n"
362 			    "Err Src Reg: 0x%x, Root Err Status: 0x%x\n",
363 			    pec->pec_descr.block, pec->pec_descr.dir,
364 			    pec->pec_descr.Z, pec->pec_descr.S,
365 			    pec->pec_descr.R, pec->pec_descr.I,
366 			    pec->pec_descr.H, pec->pec_descr.C,
367 			    pec->pec_descr.U, pec->pec_descr.E,
368 			    pec->pec_descr.P, pec->pci_err_status,
369 			    pec->pcie_err_status, pec->ce_reg_status,
370 			    pec->ue_reg_status, pec->hdr[0],
371 			    pec->hdr[1], pec->err_src_reg,
372 			    pec->root_err_status);
373 
374 			ddi_fm_ereport_post(dip, buf, derr->fme_ena,
375 			    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
376 			    EPKT_SYSINO, DATA_TYPE_UINT64, pec->sysino,
377 			    EPKT_EHDL, DATA_TYPE_UINT64, pec->ehdl,
378 			    EPKT_STICK, DATA_TYPE_UINT64, pec->stick,
379 			    EPKT_PEC_DESCR, DATA_TYPE_STRING, descr_buf);
380 		} else {
381 			(void) snprintf(descr_buf, sizeof (descr_buf),
382 			    "Epkt contents:\n"
383 			    "Block: 0x%x, Op: 0x%x, Phase: 0x%x, Cond: 0x%x\n"
384 			    "Dir: 0x%x, Flags: STOP=%d, H=%d, R=%d, D=%d\n"
385 			    "M=%d, S=%d, Size: 0x%x, Addr: 0x%lx\n"
386 			    "Hdr1: 0x%lx, Hdr2: 0x%lx, Res: 0x%lx\n",
387 			    epkt->rc_descr.block, epkt->rc_descr.op,
388 			    epkt->rc_descr.phase, epkt->rc_descr.cond,
389 			    epkt->rc_descr.dir, epkt->rc_descr.STOP,
390 			    epkt->rc_descr.H, epkt->rc_descr.R,
391 			    epkt->rc_descr.D, epkt->rc_descr.M,
392 			    epkt->rc_descr.S, epkt->size, epkt->addr,
393 			    epkt->hdr[0], epkt->hdr[1], epkt->reserved);
394 
395 			ddi_fm_ereport_post(dip, buf, derr->fme_ena,
396 			    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
397 			    EPKT_SYSINO, DATA_TYPE_UINT64, epkt->sysino,
398 			    EPKT_EHDL, DATA_TYPE_UINT64, epkt->ehdl,
399 			    EPKT_STICK, DATA_TYPE_UINT64, epkt->stick,
400 			    EPKT_RC_DESCR, DATA_TYPE_STRING, descr_buf);
401 		}
402 
403 		err = PX_PANIC;
404 	}
405 
406 	/* Readjust the severity as a result of safe access */
407 	if (is_safeacc && !(err & PX_PANIC) && !(px_die & PX_PROTECTED))
408 		err = PX_NO_PANIC;
409 
410 	return (err);
411 }
412 
413 static void
414 px_err_log_handle(dev_info_t *dip, px_rc_err_t *epkt, boolean_t is_block_pci,
415     char *msg)
416 {
417 	if (is_block_pci) {
418 		px_pec_err_t *pec = (px_pec_err_t *)epkt;
419 		DBG(DBG_ERR_INTR, dip,
420 		    "A PCIe root port error has occured with a severity"
421 		    " \"%s\"\n"
422 		    "\tBlock: 0x%x, Dir: 0x%x, Flags: Z=%d, S=%d, R=%d, I=%d\n"
423 		    "\tH=%d, C=%d, U=%d, E=%d, P=%d\n"
424 		    "\tpci_err: 0x%x, pcie_err=0x%x, ce_reg: 0x%x\n"
425 		    "\tue_reg: 0x%x, Hdr1: 0x%p, Hdr2: 0x%p\n"
426 		    "\terr_src: 0x%x, root_err: 0x%x\n",
427 		    msg, pec->pec_descr.block, pec->pec_descr.dir,
428 		    pec->pec_descr.Z, pec->pec_descr.S, pec->pec_descr.R,
429 		    pec->pec_descr.I, pec->pec_descr.H, pec->pec_descr.C,
430 		    pec->pec_descr.U, pec->pec_descr.E, pec->pec_descr.P,
431 		    pec->pci_err_status, pec->pcie_err_status,
432 		    pec->ce_reg_status, pec->ue_reg_status, pec->hdr[0],
433 		    pec->hdr[1], pec->err_src_reg, pec->root_err_status);
434 	} else {
435 		DBG(DBG_ERR_INTR, dip,
436 		    "A PCIe root complex error has occured with a severity"
437 		    " \"%s\"\n"
438 		    "\tBlock: 0x%x, Op: 0x%x, Phase: 0x%x, Cond: 0x%x\n"
439 		    "\tDir: 0x%x, Flags: STOP=%d, H=%d, R=%d, D=%d, M=%d\n"
440 		    "\tS=%d, Size: 0x%x, Addr: 0x%p\n"
441 		    "\tHdr1: 0x%p, Hdr2: 0x%p, Res: 0x%p\n",
442 		    msg, epkt->rc_descr.block, epkt->rc_descr.op,
443 		    epkt->rc_descr.phase, epkt->rc_descr.cond,
444 		    epkt->rc_descr.dir, epkt->rc_descr.STOP, epkt->rc_descr.H,
445 		    epkt->rc_descr.R, epkt->rc_descr.D, epkt->rc_descr.M,
446 		    epkt->rc_descr.S, epkt->size, epkt->addr, epkt->hdr[0],
447 		    epkt->hdr[1], epkt->reserved);
448 	}
449 }
450 
451 /* ARGSUSED */
452 static void
453 px_fix_legacy_epkt(dev_info_t *dip, ddi_fm_error_t *derr, px_rc_err_t *epkt)
454 {
455 	/*
456 	 * We don't have a default case for any of the below switch statements
457 	 * since we are ok with the code falling through.
458 	 */
459 	switch (epkt->rc_descr.block) {
460 	case BLOCK_HOSTBUS:
461 		switch (epkt->rc_descr.op) {
462 		case OP_DMA:
463 			switch (epkt->rc_descr.phase) {
464 			case PH_UNKNOWN:
465 				switch (epkt->rc_descr.cond) {
466 				case CND_UNKNOWN:
467 					switch (epkt->rc_descr.dir) {
468 					case DIR_RESERVED:
469 						epkt->rc_descr.dir = DIR_READ;
470 						break;
471 					} /* DIR */
472 				} /* CND */
473 			} /* PH */
474 		} /* OP */
475 		break;
476 	case BLOCK_MMU:
477 		switch (epkt->rc_descr.op) {
478 		case OP_XLAT:
479 			switch (epkt->rc_descr.phase) {
480 			case PH_DATA:
481 				switch (epkt->rc_descr.cond) {
482 				case CND_PROT:
483 					switch (epkt->rc_descr.dir) {
484 					case DIR_UNKNOWN:
485 						epkt->rc_descr.dir = DIR_WRITE;
486 						break;
487 					} /* DIR */
488 				} /* CND */
489 				break;
490 			case PH_IRR:
491 				switch (epkt->rc_descr.cond) {
492 				case CND_RESERVED:
493 					switch (epkt->rc_descr.dir) {
494 					case DIR_IRR:
495 						epkt->rc_descr.phase = PH_ADDR;
496 						epkt->rc_descr.cond = CND_IRR;
497 					} /* DIR */
498 				} /* CND */
499 			} /* PH */
500 		} /* OP */
501 		break;
502 	case BLOCK_INTR:
503 		switch (epkt->rc_descr.op) {
504 		case OP_MSIQ:
505 			switch (epkt->rc_descr.phase) {
506 			case PH_UNKNOWN:
507 				switch (epkt->rc_descr.cond) {
508 				case CND_ILL:
509 					switch (epkt->rc_descr.dir) {
510 					case DIR_RESERVED:
511 						epkt->rc_descr.dir = DIR_IRR;
512 						break;
513 					} /* DIR */
514 					break;
515 				case CND_IRR:
516 					switch (epkt->rc_descr.dir) {
517 					case DIR_IRR:
518 						epkt->rc_descr.cond = CND_OV;
519 						break;
520 					} /* DIR */
521 				} /* CND */
522 			} /* PH */
523 			break;
524 		case OP_RESERVED:
525 			switch (epkt->rc_descr.phase) {
526 			case PH_UNKNOWN:
527 				switch (epkt->rc_descr.cond) {
528 				case CND_ILL:
529 					switch (epkt->rc_descr.dir) {
530 					case DIR_IRR:
531 						epkt->rc_descr.op = OP_MSI32;
532 						epkt->rc_descr.phase = PH_DATA;
533 						break;
534 					} /* DIR */
535 				} /* CND */
536 				break;
537 			case PH_DATA:
538 				switch (epkt->rc_descr.cond) {
539 				case CND_INT:
540 					switch (epkt->rc_descr.dir) {
541 					case DIR_UNKNOWN:
542 						epkt->rc_descr.op = OP_MSI32;
543 						break;
544 					} /* DIR */
545 				} /* CND */
546 			} /* PH */
547 		} /* OP */
548 	} /* BLOCK */
549 }
550 
551 /* ARGSUSED */
552 static int
553 px_intr_handle_errors(dev_info_t *dip, ddi_fm_error_t *derr, px_rc_err_t *epkt)
554 {
555 	return (px_err_check_eq(dip));
556 }
557 
558 /* ARGSUSED */
559 static int
560 px_pcie_epkt_severity(dev_info_t *dip, ddi_fm_error_t *derr, px_rc_err_t *epkt)
561 {
562 	px_t		*px_p = DIP_TO_STATE(dip);
563 	px_pec_err_t	*pec = (px_pec_err_t *)epkt;
564 	px_err_pcie_t	*pcie = (px_err_pcie_t *)epkt;
565 	pf_data_t	pf_data;
566 	int		x;
567 	uint32_t	temp;
568 
569 	/*
570 	 * Check for failed PIO Read/Writes, which are errors that are not
571 	 * defined in the PCIe spec.
572 	 */
573 	pf_data.rp_bdf = px_p->px_bdf;
574 	temp = PCIE_AER_UCE_UR | PCIE_AER_UCE_CA;
575 	if (((pec->pec_descr.dir == DIR_READ) || (pec->pec_descr.dir ==
576 	    DIR_WRITE)) && pec->pec_descr.U && (pec->ue_reg_status & temp)) {
577 		pf_data.aer_h0 = (uint32_t)(pec->hdr[0]);
578 		pf_data.aer_h1 = (uint32_t)(pec->hdr[0] >> 32);
579 		pf_data.aer_h2 = (uint32_t)(pec->hdr[1]);
580 		pf_data.aer_h3 = (uint32_t)(pec->hdr[1] >> 32);
581 
582 		if (pf_tlp_hdl_lookup(dip, derr, &pf_data) == PF_HDL_FOUND)
583 			return (PX_NO_PANIC);
584 		else
585 			return (PX_PANIC);
586 	}
587 
588 	if (!pec->pec_descr.C)
589 		pec->ce_reg_status = 0;
590 	if (!pec->pec_descr.U)
591 		pec->ue_reg_status = 0;
592 	if (!pec->pec_descr.H)
593 		pec->hdr[0] = 0;
594 	if (!pec->pec_descr.I)
595 		pec->hdr[1] = 0;
596 
597 	/*
598 	 * According to the PCIe spec, there is a first error pointer.  If there
599 	 * are header logs recorded and there are more than one error, the log
600 	 * will belong to the error that the first error pointer points to.
601 	 *
602 	 * The regs.primary_ue expects a bit number, go through the ue register
603 	 * and find the first error that occured.  Because the sun4v epkt spec
604 	 * does not define this value, the algorithm below gives the lower bit
605 	 * priority.
606 	 */
607 	temp = pcie->ue_reg;
608 	if (temp) {
609 		for (x = 0; !(temp & 0x1); x++) {
610 			temp = temp >> 1;
611 		}
612 		pcie->primary_ue = 1 << x;
613 	} else {
614 		pcie->primary_ue = 0;
615 	}
616 
617 	/* Sun4v doesn't log the TX hdr except for CTOs */
618 	if (pcie->primary_ue == PCIE_AER_UCE_TO) {
619 		pcie->tx_hdr1 = pcie->rx_hdr1;
620 		pcie->tx_hdr2 = pcie->rx_hdr2;
621 		pcie->tx_hdr3 = pcie->rx_hdr3;
622 		pcie->tx_hdr4 = pcie->rx_hdr4;
623 		pcie->rx_hdr1 = 0;
624 		pcie->rx_hdr2 = 0;
625 		pcie->rx_hdr3 = 0;
626 		pcie->rx_hdr4 = 0;
627 	} else {
628 		pcie->tx_hdr1 = 0;
629 		pcie->tx_hdr2 = 0;
630 		pcie->tx_hdr3 = 0;
631 		pcie->tx_hdr4 = 0;
632 	}
633 
634 	return (px_err_check_pcie(dip, derr, pcie));
635 }
636 
637 static int
638 px_mmu_handle_lookup(dev_info_t *dip, ddi_fm_error_t *derr, px_rc_err_t *epkt)
639 {
640 	uint32_t addr = (uint32_t)epkt->addr;
641 	pcie_req_id_t bdf = NULL;
642 
643 	if (epkt->rc_descr.H) {
644 		bdf = (uint32_t)((epkt->hdr[0] >> 16) && 0xFFFF);
645 	}
646 
647 	return (pf_hdl_lookup(dip, derr->fme_ena, PF_DMA_ADDR, addr,
648 	    bdf));
649 }
650