1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * sun4v Fire Error Handling 28 */ 29 30 #include <sys/types.h> 31 #include <sys/ddi.h> 32 #include <sys/sunddi.h> 33 #include <sys/sunndi.h> 34 #include <sys/fm/protocol.h> 35 #include <sys/fm/util.h> 36 #include <sys/membar.h> 37 #include "px_obj.h" 38 #include "px_err.h" 39 40 static void px_err_fill_pf_data(dev_info_t *dip, px_t *px_p, px_rc_err_t *epkt); 41 static uint_t px_err_intr(px_fault_t *fault_p, px_rc_err_t *epkt); 42 static int px_err_epkt_severity(px_t *px_p, ddi_fm_error_t *derr, 43 px_rc_err_t *epkt, int caller); 44 45 static void px_err_log_handle(dev_info_t *dip, px_rc_err_t *epkt, 46 boolean_t is_block_pci, char *msg); 47 static void px_err_send_epkt_erpt(dev_info_t *dip, px_rc_err_t *epkt, 48 boolean_t is_block_pci, int err, ddi_fm_error_t *derr, 49 boolean_t is_valid_epkt); 50 static int px_cb_epkt_severity(dev_info_t *dip, ddi_fm_error_t *derr, 51 px_rc_err_t *epkt); 52 static int px_mmu_epkt_severity(dev_info_t *dip, ddi_fm_error_t *derr, 53 px_rc_err_t *epkt); 54 static int px_intr_epkt_severity(dev_info_t *dip, ddi_fm_error_t *derr, 55 px_rc_err_t *epkt); 56 static int px_pcie_epkt_severity(dev_info_t *dip, ddi_fm_error_t *derr, 57 px_rc_err_t *epkt); 58 static int px_intr_handle_errors(dev_info_t *dip, ddi_fm_error_t *derr, 59 px_rc_err_t *epkt); 60 static void px_fix_legacy_epkt(dev_info_t *dip, ddi_fm_error_t *derr, 61 px_rc_err_t *epkt); 62 static int px_mmu_handle_lookup(dev_info_t *dip, ddi_fm_error_t *derr, 63 px_rc_err_t *epkt); 64 65 /* Include the code generated sun4v epkt checking code */ 66 #include "px_err_gen.c" 67 68 /* 69 * This variable indicates if we have a hypervisor that could potentially send 70 * incorrect epkts. We always set this to TRUE for now until we find a way to 71 * tell if this HV bug has been fixed. 72 */ 73 boolean_t px_legacy_epkt = B_TRUE; 74 75 /* 76 * px_err_cb_intr: 77 * Interrupt handler for the Host Bus Block. 78 */ 79 uint_t 80 px_err_cb_intr(caddr_t arg) 81 { 82 px_fault_t *fault_p = (px_fault_t *)arg; 83 px_rc_err_t *epkt = (px_rc_err_t *)fault_p->px_intr_payload; 84 85 if (epkt != NULL) { 86 return (px_err_intr(fault_p, epkt)); 87 } 88 89 return (DDI_INTR_UNCLAIMED); 90 } 91 92 /* 93 * px_err_dmc_pec_intr: 94 * Interrupt handler for the DMC/PEC block. 95 */ 96 uint_t 97 px_err_dmc_pec_intr(caddr_t arg) 98 { 99 px_fault_t *fault_p = (px_fault_t *)arg; 100 px_rc_err_t *epkt = (px_rc_err_t *)fault_p->px_intr_payload; 101 102 if (epkt != NULL) { 103 return (px_err_intr(fault_p, epkt)); 104 } 105 106 return (DDI_INTR_UNCLAIMED); 107 } 108 109 /* 110 * px_err_cmn_intr: 111 * Common function called by trap, mondo and fabric intr. 112 * This function is more meaningful in sun4u implementation. Kept 113 * to mirror sun4u call stack. 114 * o check for safe access 115 * o create and queue RC info for later use in fabric scan. 116 * o RUC/WUC, PTLP, MMU Errors(CA), UR 117 * 118 * @param px_p leaf in which to check access 119 * @param derr fm err data structure to be updated 120 * @param caller PX_TRAP_CALL | PX_INTR_CALL 121 * @param chkjbc whether to handle hostbus registers (ignored) 122 * @return err PX_NO_PANIC | PX_PROTECTED | 123 * PX_PANIC | PX_HW_RESET | PX_EXPECTED 124 */ 125 /* ARGSUSED */ 126 int 127 px_err_cmn_intr(px_t *px_p, ddi_fm_error_t *derr, int caller, int block) 128 { 129 px_err_safeacc_check(px_p, derr); 130 return (DDI_FM_OK); 131 } 132 133 /* 134 * fills RC specific fault data 135 */ 136 static void 137 px_err_fill_pfd(dev_info_t *dip, px_t *px_p, px_rc_err_t *epkt) { 138 pf_pcie_adv_err_regs_t adv_reg; 139 int sts = DDI_SUCCESS; 140 pcie_req_id_t fault_bdf = PCIE_INVALID_BDF; 141 uint64_t fault_addr = 0; 142 uint16_t s_status = 0; 143 144 /* Add an PCIE PF_DATA Entry */ 145 if (epkt->rc_descr.block == BLOCK_MMU) { 146 /* Only PIO Fault Addresses are valid, this is DMA */ 147 s_status = PCI_STAT_S_TARG_AB; 148 fault_addr = NULL; 149 150 if (epkt->rc_descr.H) 151 fault_bdf = (pcie_req_id_t)(epkt->hdr[0] >> 16); 152 else 153 sts = DDI_FAILURE; 154 } else { 155 px_pec_err_t *pec_p = (px_pec_err_t *)epkt; 156 uint32_t dir = pec_p->pec_descr.dir; 157 158 adv_reg.pcie_ue_hdr[0] = (uint32_t)(pec_p->hdr[0]); 159 adv_reg.pcie_ue_hdr[1] = (uint32_t)(pec_p->hdr[0] >> 32); 160 adv_reg.pcie_ue_hdr[2] = (uint32_t)(pec_p->hdr[1]); 161 adv_reg.pcie_ue_hdr[3] = (uint32_t)(pec_p->hdr[1] >> 32); 162 163 /* translate RC UR/CA to legacy secondary errors */ 164 if ((dir == DIR_READ || dir == DIR_WRITE) && 165 pec_p->pec_descr.U) { 166 if (pec_p->ue_reg_status & PCIE_AER_UCE_UR) 167 s_status |= PCI_STAT_R_MAST_AB; 168 if (pec_p->ue_reg_status & PCIE_AER_UCE_CA) 169 s_status |= PCI_STAT_R_TARG_AB; 170 } 171 172 if (pec_p->ue_reg_status & PCIE_AER_UCE_PTLP) 173 s_status |= PCI_STAT_PERROR; 174 175 if (pec_p->ue_reg_status & PCIE_AER_UCE_CA) 176 s_status |= PCI_STAT_S_TARG_AB; 177 178 sts = pf_tlp_decode(PCIE_DIP2BUS(dip), &adv_reg); 179 fault_bdf = adv_reg.pcie_ue_tgt_bdf; 180 fault_addr = adv_reg.pcie_ue_tgt_addr; 181 } 182 183 if (sts == DDI_SUCCESS) 184 px_rp_en_q(px_p, fault_bdf, fault_addr, s_status); 185 } 186 187 /* 188 * px_err_intr: 189 * Interrupt handler for the JBC/DMC/PEC block. 190 * o lock 191 * o create derr 192 * o check safe access 193 * o px_err_check_severity(epkt) 194 * o pcie_scan_fabric 195 * o Idle intr state 196 * o unlock 197 * o handle error: fatal? fm_panic() : return INTR_CLAIMED) 198 */ 199 static uint_t 200 px_err_intr(px_fault_t *fault_p, px_rc_err_t *epkt) 201 { 202 px_t *px_p = DIP_TO_STATE(fault_p->px_fh_dip); 203 dev_info_t *rpdip = px_p->px_dip; 204 int rc_err, fab_err, msg; 205 ddi_fm_error_t derr; 206 207 if (px_fm_enter(px_p) != DDI_SUCCESS) 208 goto done; 209 210 /* Create the derr */ 211 bzero(&derr, sizeof (ddi_fm_error_t)); 212 derr.fme_version = DDI_FME_VERSION; 213 derr.fme_ena = fm_ena_generate(epkt->stick, FM_ENA_FMT1); 214 derr.fme_flag = DDI_FM_ERR_UNEXPECTED; 215 216 /* Basically check for safe access */ 217 (void) px_err_cmn_intr(px_p, &derr, PX_INTR_CALL, PX_FM_BLOCK_ALL); 218 219 /* Check the severity of this error */ 220 rc_err = px_err_epkt_severity(px_p, &derr, epkt, PX_INTR_CALL); 221 222 /* Scan the fabric if the root port is not in drain state. */ 223 fab_err = px_scan_fabric(px_p, rpdip, &derr); 224 225 /* Set the intr state to idle for the leaf that received the mondo */ 226 if (px_lib_intr_setstate(rpdip, fault_p->px_fh_sysino, 227 INTR_IDLE_STATE) != DDI_SUCCESS) { 228 px_fm_exit(px_p); 229 return (DDI_INTR_UNCLAIMED); 230 } 231 232 switch (epkt->rc_descr.block) { 233 case BLOCK_MMU: /* FALLTHROUGH */ 234 case BLOCK_INTR: 235 msg = PX_RC; 236 break; 237 case BLOCK_PCIE: 238 msg = PX_RP; 239 break; 240 case BLOCK_HOSTBUS: /* FALLTHROUGH */ 241 default: 242 msg = PX_HB; 243 break; 244 } 245 246 px_err_panic(rc_err, msg, fab_err, B_TRUE); 247 px_fm_exit(px_p); 248 px_err_panic(rc_err, msg, fab_err, B_FALSE); 249 250 done: 251 return (DDI_INTR_CLAIMED); 252 } 253 254 /* 255 * px_err_epkt_severity: 256 * Check the severity of the fire error based the epkt received 257 * 258 * @param px_p leaf in which to take the snap shot. 259 * @param derr fm err in which the ereport is to be based on 260 * @param epkt epkt recevied from HV 261 */ 262 static int 263 px_err_epkt_severity(px_t *px_p, ddi_fm_error_t *derr, px_rc_err_t *epkt, 264 int caller) 265 { 266 px_pec_t *pec_p = px_p->px_pec_p; 267 dev_info_t *dip = px_p->px_dip; 268 boolean_t is_safeacc = B_FALSE; 269 boolean_t is_block_pci = B_FALSE; 270 boolean_t is_valid_epkt = B_FALSE; 271 int err = 0; 272 273 /* Cautious access error handling */ 274 switch (derr->fme_flag) { 275 case DDI_FM_ERR_EXPECTED: 276 if (caller == PX_TRAP_CALL) { 277 /* 278 * for ddi_caut_get treat all events as nonfatal 279 * The trampoline will set err_ena = 0, 280 * err_status = NONFATAL. 281 */ 282 derr->fme_status = DDI_FM_NONFATAL; 283 is_safeacc = B_TRUE; 284 } else { 285 /* 286 * For ddi_caut_put treat all events as nonfatal. Here 287 * we have the handle and can call ndi_fm_acc_err_set(). 288 */ 289 derr->fme_status = DDI_FM_NONFATAL; 290 ndi_fm_acc_err_set(pec_p->pec_acc_hdl, derr); 291 is_safeacc = B_TRUE; 292 } 293 break; 294 case DDI_FM_ERR_PEEK: 295 case DDI_FM_ERR_POKE: 296 /* 297 * For ddi_peek/poke treat all events as nonfatal. 298 */ 299 is_safeacc = B_TRUE; 300 break; 301 default: 302 is_safeacc = B_FALSE; 303 } 304 305 /* 306 * Older hypervisors in some cases send epkts with incorrect fields. 307 * We have to handle these "special" epkts correctly. 308 */ 309 if (px_legacy_epkt) 310 px_fix_legacy_epkt(dip, derr, epkt); 311 312 switch (epkt->rc_descr.block) { 313 case BLOCK_HOSTBUS: 314 err = px_cb_epkt_severity(dip, derr, epkt); 315 break; 316 case BLOCK_MMU: 317 err = px_mmu_epkt_severity(dip, derr, epkt); 318 px_err_fill_pfd(dip, px_p, epkt); 319 break; 320 case BLOCK_INTR: 321 err = px_intr_epkt_severity(dip, derr, epkt); 322 break; 323 case BLOCK_PCIE: 324 is_block_pci = B_TRUE; 325 err = px_pcie_epkt_severity(dip, derr, epkt); 326 px_err_fill_pfd(dip, px_p, epkt); 327 break; 328 default: 329 err = 0; 330 } 331 332 if ((err & PX_HW_RESET) || (err & PX_PANIC)) { 333 if (px_log & PX_PANIC) 334 px_err_log_handle(dip, epkt, is_block_pci, "PANIC"); 335 is_valid_epkt = B_TRUE; 336 } else if (err & PX_PROTECTED) { 337 if (px_log & PX_PROTECTED) 338 px_err_log_handle(dip, epkt, is_block_pci, "PROTECTED"); 339 is_valid_epkt = B_TRUE; 340 } else if (err & PX_NO_PANIC) { 341 if (px_log & PX_NO_PANIC) 342 px_err_log_handle(dip, epkt, is_block_pci, "NO PANIC"); 343 is_valid_epkt = B_TRUE; 344 } else if (err & PX_NO_ERROR) { 345 if (px_log & PX_NO_ERROR) 346 px_err_log_handle(dip, epkt, is_block_pci, "NO ERROR"); 347 is_valid_epkt = B_TRUE; 348 } else if (err == 0) { 349 px_err_log_handle(dip, epkt, is_block_pci, "UNRECOGNIZED"); 350 is_valid_epkt = B_FALSE; 351 352 /* Panic on a unrecognized epkt */ 353 err = PX_PANIC; 354 } 355 356 px_err_send_epkt_erpt(dip, epkt, is_block_pci, err, derr, 357 is_valid_epkt); 358 359 /* Readjust the severity as a result of safe access */ 360 if (is_safeacc && !(err & PX_PANIC) && !(px_die & PX_PROTECTED)) 361 err = PX_NO_PANIC; 362 363 return (err); 364 } 365 366 static void 367 px_err_send_epkt_erpt(dev_info_t *dip, px_rc_err_t *epkt, 368 boolean_t is_block_pci, int err, ddi_fm_error_t *derr, 369 boolean_t is_valid_epkt) 370 { 371 char buf[FM_MAX_CLASS], descr_buf[1024]; 372 373 /* send ereport for debug purposes */ 374 (void) snprintf(buf, FM_MAX_CLASS, "%s", PX_FM_RC_UNRECOG); 375 376 if (is_block_pci) { 377 px_pec_err_t *pec = (px_pec_err_t *)epkt; 378 (void) snprintf(descr_buf, sizeof (descr_buf), 379 "%s Epkt contents:\n" 380 "Block: 0x%x, Dir: 0x%x, Flags: Z=%d, S=%d, R=%d\n" 381 "I=%d, H=%d, C=%d, U=%d, E=%d, P=%d\n" 382 "PCI Err Status: 0x%x, PCIe Err Status: 0x%x\n" 383 "CE Status Reg: 0x%x, UE Status Reg: 0x%x\n" 384 "HDR1: 0x%lx, HDR2: 0x%lx\n" 385 "Err Src Reg: 0x%x, Root Err Status: 0x%x\n" 386 "Err Severity: 0x%x\n", 387 is_valid_epkt ? "Valid" : "Invalid", 388 pec->pec_descr.block, pec->pec_descr.dir, 389 pec->pec_descr.Z, pec->pec_descr.S, 390 pec->pec_descr.R, pec->pec_descr.I, 391 pec->pec_descr.H, pec->pec_descr.C, 392 pec->pec_descr.U, pec->pec_descr.E, 393 pec->pec_descr.P, pec->pci_err_status, 394 pec->pcie_err_status, pec->ce_reg_status, 395 pec->ue_reg_status, pec->hdr[0], 396 pec->hdr[1], pec->err_src_reg, 397 pec->root_err_status, err); 398 399 ddi_fm_ereport_post(dip, buf, derr->fme_ena, 400 DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0, 401 EPKT_SYSINO, DATA_TYPE_UINT64, 402 is_valid_epkt ? pec->sysino : 0, 403 EPKT_EHDL, DATA_TYPE_UINT64, 404 is_valid_epkt ? pec->ehdl : 0, 405 EPKT_STICK, DATA_TYPE_UINT64, 406 is_valid_epkt ? pec->stick : 0, 407 EPKT_PEC_DESCR, DATA_TYPE_STRING, descr_buf); 408 } else { 409 (void) snprintf(descr_buf, sizeof (descr_buf), 410 "%s Epkt contents:\n" 411 "Block: 0x%x, Op: 0x%x, Phase: 0x%x, Cond: 0x%x\n" 412 "Dir: 0x%x, Flags: STOP=%d, H=%d, R=%d, D=%d\n" 413 "M=%d, S=%d, Size: 0x%x, Addr: 0x%lx\n" 414 "Hdr1: 0x%lx, Hdr2: 0x%lx, Res: 0x%lx\n" 415 "Err Severity: 0x%x\n", 416 is_valid_epkt ? "Valid" : "Invalid", 417 epkt->rc_descr.block, epkt->rc_descr.op, 418 epkt->rc_descr.phase, epkt->rc_descr.cond, 419 epkt->rc_descr.dir, epkt->rc_descr.STOP, 420 epkt->rc_descr.H, epkt->rc_descr.R, 421 epkt->rc_descr.D, epkt->rc_descr.M, 422 epkt->rc_descr.S, epkt->size, epkt->addr, 423 epkt->hdr[0], epkt->hdr[1], epkt->reserved, 424 err); 425 426 ddi_fm_ereport_post(dip, buf, derr->fme_ena, 427 DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0, 428 EPKT_SYSINO, DATA_TYPE_UINT64, 429 is_valid_epkt ? epkt->sysino : 0, 430 EPKT_EHDL, DATA_TYPE_UINT64, 431 is_valid_epkt ? epkt->ehdl : 0, 432 EPKT_STICK, DATA_TYPE_UINT64, 433 is_valid_epkt ? epkt->stick : 0, 434 EPKT_RC_DESCR, DATA_TYPE_STRING, descr_buf); 435 } 436 } 437 438 static void 439 px_err_log_handle(dev_info_t *dip, px_rc_err_t *epkt, boolean_t is_block_pci, 440 char *msg) 441 { 442 if (is_block_pci) { 443 px_pec_err_t *pec = (px_pec_err_t *)epkt; 444 DBG(DBG_ERR_INTR, dip, 445 "A PCIe root port error has occured with a severity" 446 " \"%s\"\n" 447 "\tBlock: 0x%x, Dir: 0x%x, Flags: Z=%d, S=%d, R=%d, I=%d\n" 448 "\tH=%d, C=%d, U=%d, E=%d, P=%d\n" 449 "\tpci_err: 0x%x, pcie_err=0x%x, ce_reg: 0x%x\n" 450 "\tue_reg: 0x%x, Hdr1: 0x%p, Hdr2: 0x%p\n" 451 "\terr_src: 0x%x, root_err: 0x%x\n", 452 msg, pec->pec_descr.block, pec->pec_descr.dir, 453 pec->pec_descr.Z, pec->pec_descr.S, pec->pec_descr.R, 454 pec->pec_descr.I, pec->pec_descr.H, pec->pec_descr.C, 455 pec->pec_descr.U, pec->pec_descr.E, pec->pec_descr.P, 456 pec->pci_err_status, pec->pcie_err_status, 457 pec->ce_reg_status, pec->ue_reg_status, pec->hdr[0], 458 pec->hdr[1], pec->err_src_reg, pec->root_err_status); 459 } else { 460 DBG(DBG_ERR_INTR, dip, 461 "A PCIe root complex error has occured with a severity" 462 " \"%s\"\n" 463 "\tBlock: 0x%x, Op: 0x%x, Phase: 0x%x, Cond: 0x%x\n" 464 "\tDir: 0x%x, Flags: STOP=%d, H=%d, R=%d, D=%d, M=%d\n" 465 "\tS=%d, Size: 0x%x, Addr: 0x%p\n" 466 "\tHdr1: 0x%p, Hdr2: 0x%p, Res: 0x%p\n", 467 msg, epkt->rc_descr.block, epkt->rc_descr.op, 468 epkt->rc_descr.phase, epkt->rc_descr.cond, 469 epkt->rc_descr.dir, epkt->rc_descr.STOP, epkt->rc_descr.H, 470 epkt->rc_descr.R, epkt->rc_descr.D, epkt->rc_descr.M, 471 epkt->rc_descr.S, epkt->size, epkt->addr, epkt->hdr[0], 472 epkt->hdr[1], epkt->reserved); 473 } 474 } 475 476 /* ARGSUSED */ 477 static void 478 px_fix_legacy_epkt(dev_info_t *dip, ddi_fm_error_t *derr, px_rc_err_t *epkt) 479 { 480 /* 481 * We don't have a default case for any of the below switch statements 482 * since we are ok with the code falling through. 483 */ 484 switch (epkt->rc_descr.block) { 485 case BLOCK_HOSTBUS: 486 switch (epkt->rc_descr.op) { 487 case OP_DMA: 488 switch (epkt->rc_descr.phase) { 489 case PH_UNKNOWN: 490 switch (epkt->rc_descr.cond) { 491 case CND_UNKNOWN: 492 switch (epkt->rc_descr.dir) { 493 case DIR_RESERVED: 494 epkt->rc_descr.dir = DIR_READ; 495 break; 496 } /* DIR */ 497 } /* CND */ 498 } /* PH */ 499 } /* OP */ 500 break; 501 case BLOCK_MMU: 502 switch (epkt->rc_descr.op) { 503 case OP_XLAT: 504 switch (epkt->rc_descr.phase) { 505 case PH_DATA: 506 switch (epkt->rc_descr.cond) { 507 case CND_PROT: 508 switch (epkt->rc_descr.dir) { 509 case DIR_UNKNOWN: 510 epkt->rc_descr.dir = DIR_WRITE; 511 break; 512 } /* DIR */ 513 } /* CND */ 514 break; 515 case PH_IRR: 516 switch (epkt->rc_descr.cond) { 517 case CND_RESERVED: 518 switch (epkt->rc_descr.dir) { 519 case DIR_IRR: 520 epkt->rc_descr.phase = PH_ADDR; 521 epkt->rc_descr.cond = CND_IRR; 522 } /* DIR */ 523 } /* CND */ 524 } /* PH */ 525 } /* OP */ 526 break; 527 case BLOCK_INTR: 528 switch (epkt->rc_descr.op) { 529 case OP_MSIQ: 530 switch (epkt->rc_descr.phase) { 531 case PH_UNKNOWN: 532 switch (epkt->rc_descr.cond) { 533 case CND_ILL: 534 switch (epkt->rc_descr.dir) { 535 case DIR_RESERVED: 536 epkt->rc_descr.dir = DIR_IRR; 537 break; 538 } /* DIR */ 539 break; 540 case CND_IRR: 541 switch (epkt->rc_descr.dir) { 542 case DIR_IRR: 543 epkt->rc_descr.cond = CND_OV; 544 break; 545 } /* DIR */ 546 } /* CND */ 547 } /* PH */ 548 break; 549 case OP_RESERVED: 550 switch (epkt->rc_descr.phase) { 551 case PH_UNKNOWN: 552 switch (epkt->rc_descr.cond) { 553 case CND_ILL: 554 switch (epkt->rc_descr.dir) { 555 case DIR_IRR: 556 epkt->rc_descr.op = OP_MSI32; 557 epkt->rc_descr.phase = PH_DATA; 558 break; 559 } /* DIR */ 560 } /* CND */ 561 break; 562 case PH_DATA: 563 switch (epkt->rc_descr.cond) { 564 case CND_INT: 565 switch (epkt->rc_descr.dir) { 566 case DIR_UNKNOWN: 567 epkt->rc_descr.op = OP_MSI32; 568 break; 569 } /* DIR */ 570 } /* CND */ 571 } /* PH */ 572 } /* OP */ 573 } /* BLOCK */ 574 } 575 576 /* ARGSUSED */ 577 static int 578 px_intr_handle_errors(dev_info_t *dip, ddi_fm_error_t *derr, px_rc_err_t *epkt) 579 { 580 return (px_err_check_eq(dip)); 581 } 582 583 /* ARGSUSED */ 584 static int 585 px_pcie_epkt_severity(dev_info_t *dip, ddi_fm_error_t *derr, px_rc_err_t *epkt) 586 { 587 px_pec_err_t *pec_p = (px_pec_err_t *)epkt; 588 px_err_pcie_t *pcie = (px_err_pcie_t *)epkt; 589 pf_pcie_adv_err_regs_t adv_reg; 590 int sts; 591 uint32_t temp; 592 593 /* 594 * Check for failed PIO Read/Writes, which are errors that are not 595 * defined in the PCIe spec. 596 */ 597 temp = PCIE_AER_UCE_UR | PCIE_AER_UCE_CA; 598 if (((pec_p->pec_descr.dir == DIR_READ) || 599 (pec_p->pec_descr.dir == DIR_WRITE)) && 600 pec_p->pec_descr.U && (pec_p->ue_reg_status & temp)) { 601 adv_reg.pcie_ue_hdr[0] = (uint32_t)(pec_p->hdr[0]); 602 adv_reg.pcie_ue_hdr[1] = (uint32_t)(pec_p->hdr[0] >> 32); 603 adv_reg.pcie_ue_hdr[2] = (uint32_t)(pec_p->hdr[1]); 604 adv_reg.pcie_ue_hdr[3] = (uint32_t)(pec_p->hdr[1] >> 32); 605 606 sts = pf_tlp_decode(PCIE_DIP2BUS(dip), &adv_reg); 607 608 if (sts == DDI_SUCCESS && 609 pf_hdl_lookup(dip, derr->fme_ena, 610 adv_reg.pcie_ue_tgt_trans, 611 adv_reg.pcie_ue_tgt_addr, 612 adv_reg.pcie_ue_tgt_bdf) == PF_HDL_FOUND) 613 return (PX_NO_PANIC); 614 else 615 return (PX_PANIC); 616 } 617 618 if (!pec_p->pec_descr.C) 619 pec_p->ce_reg_status = 0; 620 if (!pec_p->pec_descr.U) 621 pec_p->ue_reg_status = 0; 622 if (!pec_p->pec_descr.H) 623 pec_p->hdr[0] = 0; 624 if (!pec_p->pec_descr.I) 625 pec_p->hdr[1] = 0; 626 627 /* 628 * According to the PCIe spec, there is a first error pointer. If there 629 * are header logs recorded and there are more than one error, the log 630 * will belong to the error that the first error pointer points to. 631 * 632 * The regs.primary_ue expects a bit number, go through the ue register 633 * and find the first error that occured. Because the sun4v epkt spec 634 * does not define this value, the algorithm below gives the lower bit 635 * priority. 636 */ 637 temp = pcie->ue_reg; 638 if (temp) { 639 int x; 640 for (x = 0; !(temp & 0x1); x++) { 641 temp = temp >> 1; 642 } 643 pcie->primary_ue = 1 << x; 644 } else { 645 pcie->primary_ue = 0; 646 } 647 648 /* Sun4v doesn't log the TX hdr except for CTOs */ 649 if (pcie->primary_ue == PCIE_AER_UCE_TO) { 650 pcie->tx_hdr1 = pcie->rx_hdr1; 651 pcie->tx_hdr2 = pcie->rx_hdr2; 652 pcie->tx_hdr3 = pcie->rx_hdr3; 653 pcie->tx_hdr4 = pcie->rx_hdr4; 654 pcie->rx_hdr1 = 0; 655 pcie->rx_hdr2 = 0; 656 pcie->rx_hdr3 = 0; 657 pcie->rx_hdr4 = 0; 658 } else { 659 pcie->tx_hdr1 = 0; 660 pcie->tx_hdr2 = 0; 661 pcie->tx_hdr3 = 0; 662 pcie->tx_hdr4 = 0; 663 } 664 665 return (px_err_check_pcie(dip, derr, pcie)); 666 } 667 668 static int 669 px_mmu_handle_lookup(dev_info_t *dip, ddi_fm_error_t *derr, px_rc_err_t *epkt) 670 { 671 uint64_t addr = (uint64_t)epkt->addr; 672 pcie_req_id_t bdf = PCIE_INVALID_BDF; 673 674 if (epkt->rc_descr.H) { 675 bdf = (uint32_t)((epkt->hdr[0] >> 16) && 0xFFFF); 676 } 677 678 return (pf_hdl_lookup(dip, derr->fme_ena, PF_ADDR_DMA, addr, 679 bdf)); 680 } 681