1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include <sys/types.h> 30 #include <sys/sunndi.h> 31 #include <sys/sysmacros.h> 32 #include <sys/ddifm_impl.h> 33 #include <sys/fm/util.h> 34 #include <sys/fm/protocol.h> 35 #include <sys/fm/io/pci.h> 36 #include <sys/fm/io/ddi.h> 37 #include <sys/pci.h> 38 #include <sys/pcie.h> 39 #include <sys/pci_impl.h> 40 #include <sys/epm.h> 41 #include <sys/pcifm.h> 42 43 #define PCIX_ECC_VER_CHECK(x) (((x) == PCI_PCIX_VER_1) ||\ 44 ((x) == PCI_PCIX_VER_2)) 45 46 /* 47 * Expected PCI Express error mask values 48 * 49 * !!NOTE!! All PCI Express functionality including PCIe initialization, PCIe 50 * error handling has been moved to the common pcie misc module. All functions 51 * and variables dealting with PCIe in this file have been deprecated and will 52 * be eventually removed. All Legacy PCI and PCI-X related code should remain 53 * as is. 54 */ 55 uint32_t pcie_aer_uce_log_bits = PCIE_AER_UCE_LOG_BITS; 56 uint32_t pcie_aer_suce_log_bits = PCIE_AER_SUCE_LOG_BITS; 57 58 errorq_t *pci_target_queue = NULL; 59 60 pci_fm_err_t pci_err_tbl[] = { 61 PCI_DET_PERR, PCI_STAT_PERROR, NULL, DDI_FM_UNKNOWN, 62 PCI_MDPE, PCI_STAT_S_PERROR, PCI_TARG_MDPE, DDI_FM_UNKNOWN, 63 PCI_SIG_SERR, PCI_STAT_S_SYSERR, NULL, DDI_FM_FATAL, 64 PCI_MA, PCI_STAT_R_MAST_AB, PCI_TARG_MA, DDI_FM_UNKNOWN, 65 PCI_REC_TA, PCI_STAT_R_TARG_AB, PCI_TARG_REC_TA, DDI_FM_UNKNOWN, 66 PCI_SIG_TA, PCI_STAT_S_TARG_AB, NULL, DDI_FM_UNKNOWN, 67 NULL, NULL, NULL, NULL, 68 }; 69 70 pci_fm_err_t pci_bdg_err_tbl[] = { 71 PCI_DET_PERR, PCI_STAT_PERROR, NULL, DDI_FM_UNKNOWN, 72 PCI_MDPE, PCI_STAT_S_PERROR, PCI_TARG_MDPE, DDI_FM_UNKNOWN, 73 PCI_REC_SERR, PCI_STAT_S_SYSERR, NULL, DDI_FM_UNKNOWN, 74 #if defined(__sparc) 75 PCI_MA, PCI_STAT_R_MAST_AB, PCI_TARG_MA, DDI_FM_UNKNOWN, 76 #endif 77 PCI_REC_TA, PCI_STAT_R_TARG_AB, PCI_TARG_REC_TA, DDI_FM_UNKNOWN, 78 PCI_SIG_TA, PCI_STAT_S_TARG_AB, NULL, DDI_FM_UNKNOWN, 79 NULL, NULL, NULL, NULL, 80 }; 81 82 static pci_fm_err_t pciex_ce_err_tbl[] = { 83 PCIEX_RE, PCIE_AER_CE_RECEIVER_ERR, NULL, DDI_FM_OK, 84 PCIEX_RNR, PCIE_AER_CE_REPLAY_ROLLOVER, NULL, DDI_FM_OK, 85 PCIEX_RTO, PCIE_AER_CE_REPLAY_TO, NULL, DDI_FM_OK, 86 PCIEX_BDP, PCIE_AER_CE_BAD_DLLP, NULL, DDI_FM_OK, 87 PCIEX_BTP, PCIE_AER_CE_BAD_TLP, NULL, DDI_FM_OK, 88 PCIEX_ANFE, PCIE_AER_CE_AD_NFE, NULL, DDI_FM_OK, 89 NULL, NULL, NULL, NULL, 90 }; 91 92 static pci_fm_err_t pciex_ue_err_tbl[] = { 93 PCIEX_TE, PCIE_AER_UCE_TRAINING, NULL, DDI_FM_FATAL, 94 PCIEX_DLP, PCIE_AER_UCE_DLP, NULL, DDI_FM_FATAL, 95 PCIEX_SD, PCIE_AER_UCE_SD, NULL, DDI_FM_FATAL, 96 PCIEX_ROF, PCIE_AER_UCE_RO, NULL, DDI_FM_FATAL, 97 PCIEX_FCP, PCIE_AER_UCE_FCP, NULL, DDI_FM_FATAL, 98 PCIEX_MFP, PCIE_AER_UCE_MTLP, NULL, DDI_FM_FATAL, 99 PCIEX_CTO, PCIE_AER_UCE_TO, NULL, DDI_FM_UNKNOWN, 100 PCIEX_UC, PCIE_AER_UCE_UC, NULL, DDI_FM_OK, 101 PCIEX_ECRC, PCIE_AER_UCE_ECRC, NULL, DDI_FM_UNKNOWN, 102 PCIEX_CA, PCIE_AER_UCE_CA, NULL, DDI_FM_UNKNOWN, 103 PCIEX_UR, PCIE_AER_UCE_UR, NULL, DDI_FM_UNKNOWN, 104 PCIEX_POIS, PCIE_AER_UCE_PTLP, NULL, DDI_FM_UNKNOWN, 105 NULL, NULL, NULL, NULL, 106 }; 107 108 static pci_fm_err_t pcie_sue_err_tbl[] = { 109 PCIEX_S_TA_SC, PCIE_AER_SUCE_TA_ON_SC, NULL, DDI_FM_UNKNOWN, 110 PCIEX_S_MA_SC, PCIE_AER_SUCE_MA_ON_SC, NULL, DDI_FM_UNKNOWN, 111 PCIEX_S_RTA, PCIE_AER_SUCE_RCVD_TA, NULL, DDI_FM_UNKNOWN, 112 #if defined(__sparc) 113 PCIEX_S_RMA, PCIE_AER_SUCE_RCVD_MA, NULL, DDI_FM_UNKNOWN, 114 #endif 115 PCIEX_S_USC, PCIE_AER_SUCE_USC_ERR, NULL, DDI_FM_UNKNOWN, 116 PCIEX_S_USCMD, PCIE_AER_SUCE_USC_MSG_DATA_ERR, NULL, DDI_FM_FATAL, 117 PCIEX_S_UDE, PCIE_AER_SUCE_UC_DATA_ERR, NULL, DDI_FM_UNKNOWN, 118 PCIEX_S_UAT, PCIE_AER_SUCE_UC_ATTR_ERR, NULL, DDI_FM_FATAL, 119 PCIEX_S_UADR, PCIE_AER_SUCE_UC_ADDR_ERR, NULL, DDI_FM_FATAL, 120 PCIEX_S_TEX, PCIE_AER_SUCE_TIMER_EXPIRED, NULL, DDI_FM_FATAL, 121 PCIEX_S_PERR, PCIE_AER_SUCE_PERR_ASSERT, NULL, DDI_FM_UNKNOWN, 122 PCIEX_S_SERR, PCIE_AER_SUCE_SERR_ASSERT, NULL, DDI_FM_FATAL, 123 PCIEX_INTERR, PCIE_AER_SUCE_INTERNAL_ERR, NULL, DDI_FM_FATAL, 124 NULL, NULL, NULL, NULL, 125 }; 126 127 static pci_fm_err_t pcix_err_tbl[] = { 128 PCIX_SPL_DIS, PCI_PCIX_SPL_DSCD, NULL, DDI_FM_UNKNOWN, 129 PCIX_UNEX_SPL, PCI_PCIX_UNEX_SPL, NULL, DDI_FM_UNKNOWN, 130 PCIX_RX_SPL_MSG, PCI_PCIX_RX_SPL_MSG, NULL, DDI_FM_UNKNOWN, 131 NULL, NULL, NULL, NULL, 132 }; 133 134 static pci_fm_err_t pcix_sec_err_tbl[] = { 135 PCIX_SPL_DIS, PCI_PCIX_BSS_SPL_DSCD, NULL, DDI_FM_UNKNOWN, 136 PCIX_UNEX_SPL, PCI_PCIX_BSS_UNEX_SPL, NULL, DDI_FM_UNKNOWN, 137 PCIX_BSS_SPL_OR, PCI_PCIX_BSS_SPL_OR, NULL, DDI_FM_OK, 138 PCIX_BSS_SPL_DLY, PCI_PCIX_BSS_SPL_DLY, NULL, DDI_FM_OK, 139 NULL, NULL, NULL, NULL, 140 }; 141 142 static pci_fm_err_t pciex_nadv_err_tbl[] = { 143 PCIEX_UR, PCIE_DEVSTS_UR_DETECTED, NULL, DDI_FM_UNKNOWN, 144 PCIEX_FAT, PCIE_DEVSTS_FE_DETECTED, NULL, DDI_FM_FATAL, 145 PCIEX_NONFAT, PCIE_DEVSTS_NFE_DETECTED, NULL, DDI_FM_UNKNOWN, 146 PCIEX_CORR, PCIE_DEVSTS_CE_DETECTED, NULL, DDI_FM_OK, 147 NULL, NULL, NULL, NULL, 148 }; 149 150 static int 151 pci_config_check(ddi_acc_handle_t handle, int fme_flag) 152 { 153 ddi_acc_hdl_t *hp = impl_acc_hdl_get(handle); 154 ddi_fm_error_t de; 155 156 if (!(DDI_FM_ACC_ERR_CAP(ddi_fm_capable(hp->ah_dip)))) 157 return (DDI_FM_OK); 158 159 de.fme_version = DDI_FME_VERSION; 160 161 ddi_fm_acc_err_get(handle, &de, de.fme_version); 162 if (de.fme_status != DDI_FM_OK) { 163 if (fme_flag == DDI_FM_ERR_UNEXPECTED) { 164 char buf[FM_MAX_CLASS]; 165 166 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", 167 PCI_ERROR_SUBCLASS, PCI_NR); 168 ddi_fm_ereport_post(hp->ah_dip, buf, de.fme_ena, 169 DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0, NULL); 170 } 171 ddi_fm_acc_err_clear(handle, de.fme_version); 172 } 173 return (de.fme_status); 174 } 175 176 static void 177 pcix_ecc_regs_gather(pci_erpt_t *erpt_p, pcix_ecc_regs_t *pcix_ecc_regs, 178 uint8_t pcix_cap_ptr, int fme_flag) 179 { 180 int bdg = erpt_p->pe_dflags & PCI_BRIDGE_DEV; 181 182 pcix_ecc_regs->pcix_ecc_ctlstat = pci_config_get32(erpt_p->pe_hdl, 183 (pcix_cap_ptr + (bdg ? PCI_PCIX_BDG_ECC_STATUS : 184 PCI_PCIX_ECC_STATUS))); 185 if (pci_config_check(erpt_p->pe_hdl, fme_flag) == DDI_FM_OK) 186 pcix_ecc_regs->pcix_ecc_vflags |= PCIX_ERR_ECC_STS_VALID; 187 else 188 return; 189 pcix_ecc_regs->pcix_ecc_fstaddr = pci_config_get32(erpt_p->pe_hdl, 190 (pcix_cap_ptr + (bdg ? PCI_PCIX_BDG_ECC_FST_AD : 191 PCI_PCIX_ECC_FST_AD))); 192 pcix_ecc_regs->pcix_ecc_secaddr = pci_config_get32(erpt_p->pe_hdl, 193 (pcix_cap_ptr + (bdg ? PCI_PCIX_BDG_ECC_SEC_AD : 194 PCI_PCIX_ECC_SEC_AD))); 195 pcix_ecc_regs->pcix_ecc_attr = pci_config_get32(( 196 ddi_acc_handle_t)erpt_p->pe_hdl, 197 (pcix_cap_ptr + (bdg ? PCI_PCIX_BDG_ECC_ATTR : PCI_PCIX_ECC_ATTR))); 198 } 199 200 static void 201 pcix_regs_gather(pci_erpt_t *erpt_p, void *pe_regs, int fme_flag) 202 { 203 if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) { 204 pcix_bdg_error_regs_t *pcix_bdg_regs = 205 (pcix_bdg_error_regs_t *)pe_regs; 206 uint8_t pcix_bdg_cap_ptr; 207 int i; 208 209 pcix_bdg_cap_ptr = pcix_bdg_regs->pcix_bdg_cap_ptr; 210 pcix_bdg_regs->pcix_bdg_sec_stat = pci_config_get16( 211 erpt_p->pe_hdl, (pcix_bdg_cap_ptr + PCI_PCIX_SEC_STATUS)); 212 if (pci_config_check(erpt_p->pe_hdl, fme_flag) == DDI_FM_OK) 213 pcix_bdg_regs->pcix_bdg_vflags |= 214 PCIX_BDG_SEC_STATUS_VALID; 215 else 216 return; 217 pcix_bdg_regs->pcix_bdg_stat = pci_config_get32(erpt_p->pe_hdl, 218 (pcix_bdg_cap_ptr + PCI_PCIX_BDG_STATUS)); 219 if (pci_config_check(erpt_p->pe_hdl, fme_flag) == DDI_FM_OK) 220 pcix_bdg_regs->pcix_bdg_vflags |= PCIX_BDG_STATUS_VALID; 221 else 222 return; 223 if (PCIX_ECC_VER_CHECK(pcix_bdg_regs->pcix_bdg_ver)) { 224 pcix_ecc_regs_t *pcix_bdg_ecc_regs; 225 /* 226 * PCI Express to PCI-X bridges only implement the 227 * secondary side of the PCI-X ECC registers, bit one is 228 * read-only so we make sure we do not write to it. 229 */ 230 if (erpt_p->pe_dflags & PCIEX_2PCI_DEV) { 231 pcix_bdg_ecc_regs = 232 pcix_bdg_regs->pcix_bdg_ecc_regs[1]; 233 pcix_ecc_regs_gather(erpt_p, pcix_bdg_ecc_regs, 234 pcix_bdg_cap_ptr, fme_flag); 235 } else { 236 for (i = 0; i < 2; i++) { 237 pcix_bdg_ecc_regs = 238 pcix_bdg_regs->pcix_bdg_ecc_regs[i]; 239 pci_config_put32(erpt_p->pe_hdl, 240 (pcix_bdg_cap_ptr + 241 PCI_PCIX_BDG_ECC_STATUS), i); 242 pcix_ecc_regs_gather(erpt_p, 243 pcix_bdg_ecc_regs, 244 pcix_bdg_cap_ptr, fme_flag); 245 } 246 } 247 } 248 } else { 249 pcix_error_regs_t *pcix_regs = (pcix_error_regs_t *)pe_regs; 250 uint8_t pcix_cap_ptr; 251 252 pcix_cap_ptr = pcix_regs->pcix_cap_ptr; 253 254 pcix_regs->pcix_command = pci_config_get16(erpt_p->pe_hdl, 255 (pcix_cap_ptr + PCI_PCIX_COMMAND)); 256 pcix_regs->pcix_status = pci_config_get32(erpt_p->pe_hdl, 257 (pcix_cap_ptr + PCI_PCIX_STATUS)); 258 if (pci_config_check(erpt_p->pe_hdl, fme_flag) == DDI_FM_OK) 259 pcix_regs->pcix_vflags |= PCIX_ERR_STATUS_VALID; 260 else 261 return; 262 if (PCIX_ECC_VER_CHECK(pcix_regs->pcix_ver)) { 263 pcix_ecc_regs_t *pcix_ecc_regs = 264 pcix_regs->pcix_ecc_regs; 265 266 pcix_ecc_regs_gather(erpt_p, pcix_ecc_regs, 267 pcix_cap_ptr, fme_flag); 268 } 269 } 270 } 271 272 static void 273 pcie_regs_gather(pci_erpt_t *erpt_p, int fme_flag) 274 { 275 pcie_error_regs_t *pcie_regs = (pcie_error_regs_t *)erpt_p->pe_regs; 276 uint8_t pcie_cap_ptr; 277 pcie_adv_error_regs_t *pcie_adv_regs; 278 uint16_t pcie_ecap_ptr; 279 280 pcie_cap_ptr = pcie_regs->pcie_cap_ptr; 281 282 pcie_regs->pcie_err_status = pci_config_get16(erpt_p->pe_hdl, 283 pcie_cap_ptr + PCIE_DEVSTS); 284 if (pci_config_check(erpt_p->pe_hdl, fme_flag) == DDI_FM_OK) 285 pcie_regs->pcie_vflags |= PCIE_ERR_STATUS_VALID; 286 else 287 return; 288 289 pcie_regs->pcie_err_ctl = pci_config_get16(erpt_p->pe_hdl, 290 (pcie_cap_ptr + PCIE_DEVCTL)); 291 pcie_regs->pcie_dev_cap = pci_config_get16(erpt_p->pe_hdl, 292 (pcie_cap_ptr + PCIE_DEVCAP)); 293 294 if ((erpt_p->pe_dflags & PCI_BRIDGE_DEV) && (erpt_p->pe_dflags & 295 PCIX_DEV)) 296 pcix_regs_gather(erpt_p, pcie_regs->pcix_bdg_regs, fme_flag); 297 298 if (erpt_p->pe_dflags & PCIEX_RC_DEV) { 299 pcie_rc_error_regs_t *pcie_rc_regs = pcie_regs->pcie_rc_regs; 300 301 pcie_rc_regs->pcie_rc_status = pci_config_get32(erpt_p->pe_hdl, 302 (pcie_cap_ptr + PCIE_ROOTSTS)); 303 pcie_rc_regs->pcie_rc_ctl = pci_config_get16(erpt_p->pe_hdl, 304 (pcie_cap_ptr + PCIE_ROOTCTL)); 305 } 306 307 if (!(erpt_p->pe_dflags & PCIEX_ADV_DEV)) 308 return; 309 310 pcie_adv_regs = pcie_regs->pcie_adv_regs; 311 312 pcie_ecap_ptr = pcie_adv_regs->pcie_adv_cap_ptr; 313 314 pcie_adv_regs->pcie_ue_status = pci_config_get32(erpt_p->pe_hdl, 315 pcie_ecap_ptr + PCIE_AER_UCE_STS); 316 if (pci_config_check(erpt_p->pe_hdl, fme_flag) == DDI_FM_OK) 317 pcie_adv_regs->pcie_adv_vflags |= PCIE_UE_STATUS_VALID; 318 319 pcie_adv_regs->pcie_ue_mask = pci_config_get32(erpt_p->pe_hdl, 320 pcie_ecap_ptr + PCIE_AER_UCE_MASK); 321 pcie_adv_regs->pcie_ue_sev = pci_config_get32(erpt_p->pe_hdl, 322 pcie_ecap_ptr + PCIE_AER_UCE_SERV); 323 pcie_adv_regs->pcie_adv_ctl = pci_config_get32(erpt_p->pe_hdl, 324 pcie_ecap_ptr + PCIE_AER_CTL); 325 pcie_adv_regs->pcie_ue_hdr0 = pci_config_get32(erpt_p->pe_hdl, 326 pcie_ecap_ptr + PCIE_AER_HDR_LOG); 327 if (pci_config_check(erpt_p->pe_hdl, fme_flag) == DDI_FM_OK) { 328 int i; 329 pcie_adv_regs->pcie_adv_vflags |= PCIE_UE_HDR_VALID; 330 331 for (i = 0; i < 3; i++) { 332 pcie_adv_regs->pcie_ue_hdr[i] = pci_config_get32( 333 erpt_p->pe_hdl, pcie_ecap_ptr + PCIE_AER_HDR_LOG + 334 (4 * (i + 1))); 335 } 336 } 337 338 pcie_adv_regs->pcie_ce_status = pci_config_get32(erpt_p->pe_hdl, 339 pcie_ecap_ptr + PCIE_AER_CE_STS); 340 if (pci_config_check(erpt_p->pe_hdl, fme_flag) == DDI_FM_OK) 341 pcie_adv_regs->pcie_adv_vflags |= PCIE_CE_STATUS_VALID; 342 343 pcie_adv_regs->pcie_ce_mask = pci_config_get32(erpt_p->pe_hdl, 344 pcie_ecap_ptr + PCIE_AER_CE_MASK); 345 346 /* 347 * If pci express to pci bridge then grab the bridge 348 * error registers. 349 */ 350 if (erpt_p->pe_dflags & PCIEX_2PCI_DEV) { 351 pcie_adv_bdg_error_regs_t *pcie_bdg_regs = 352 pcie_adv_regs->pcie_adv_bdg_regs; 353 354 pcie_bdg_regs->pcie_sue_status = 355 pci_config_get32(erpt_p->pe_hdl, 356 pcie_ecap_ptr + PCIE_AER_SUCE_STS); 357 pcie_bdg_regs->pcie_sue_mask = 358 pci_config_get32(erpt_p->pe_hdl, 359 pcie_ecap_ptr + PCIE_AER_SUCE_MASK); 360 if (pci_config_check(erpt_p->pe_hdl, fme_flag) == DDI_FM_OK) 361 pcie_adv_regs->pcie_adv_vflags |= PCIE_SUE_STATUS_VALID; 362 pcie_bdg_regs->pcie_sue_hdr0 = pci_config_get32(erpt_p->pe_hdl, 363 (pcie_ecap_ptr + PCIE_AER_SHDR_LOG)); 364 365 if (pci_config_check(erpt_p->pe_hdl, fme_flag) == DDI_FM_OK) { 366 int i; 367 368 pcie_adv_regs->pcie_adv_vflags |= PCIE_SUE_HDR_VALID; 369 370 for (i = 0; i < 3; i++) { 371 pcie_bdg_regs->pcie_sue_hdr[i] = 372 pci_config_get32(erpt_p->pe_hdl, 373 pcie_ecap_ptr + PCIE_AER_SHDR_LOG + 374 (4 * (i + 1))); 375 } 376 } 377 } 378 /* 379 * If PCI Express root complex then grab the root complex 380 * error registers. 381 */ 382 if (erpt_p->pe_dflags & PCIEX_RC_DEV) { 383 pcie_adv_rc_error_regs_t *pcie_rc_regs = 384 pcie_adv_regs->pcie_adv_rc_regs; 385 386 pcie_rc_regs->pcie_rc_err_cmd = pci_config_get32(erpt_p->pe_hdl, 387 (pcie_ecap_ptr + PCIE_AER_RE_CMD)); 388 pcie_rc_regs->pcie_rc_err_status = 389 pci_config_get32(erpt_p->pe_hdl, 390 (pcie_ecap_ptr + PCIE_AER_RE_STS)); 391 if (pci_config_check(erpt_p->pe_hdl, fme_flag) == DDI_FM_OK) 392 pcie_adv_regs->pcie_adv_vflags |= 393 PCIE_RC_ERR_STATUS_VALID; 394 pcie_rc_regs->pcie_rc_ce_src_id = 395 pci_config_get16(erpt_p->pe_hdl, 396 (pcie_ecap_ptr + PCIE_AER_CE_SRC_ID)); 397 pcie_rc_regs->pcie_rc_ue_src_id = 398 pci_config_get16(erpt_p->pe_hdl, 399 (pcie_ecap_ptr + PCIE_AER_ERR_SRC_ID)); 400 if (pci_config_check(erpt_p->pe_hdl, fme_flag) == DDI_FM_OK) 401 pcie_adv_regs->pcie_adv_vflags |= PCIE_SRC_ID_VALID; 402 } 403 } 404 405 /*ARGSUSED*/ 406 static void 407 pci_regs_gather(dev_info_t *dip, pci_erpt_t *erpt_p, int fme_flag) 408 { 409 pci_error_regs_t *pci_regs = erpt_p->pe_pci_regs; 410 411 /* 412 * Start by reading all the error registers that are available for 413 * pci and pci express and for leaf devices and bridges/switches 414 */ 415 pci_regs->pci_err_status = pci_config_get16(erpt_p->pe_hdl, 416 PCI_CONF_STAT); 417 if (pci_config_check(erpt_p->pe_hdl, fme_flag) != DDI_FM_OK) 418 return; 419 pci_regs->pci_vflags |= PCI_ERR_STATUS_VALID; 420 pci_regs->pci_cfg_comm = pci_config_get16(erpt_p->pe_hdl, 421 PCI_CONF_COMM); 422 if (pci_config_check(erpt_p->pe_hdl, fme_flag) != DDI_FM_OK) 423 return; 424 425 /* 426 * If pci-pci bridge grab PCI bridge specific error registers. 427 */ 428 if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) { 429 pci_regs->pci_bdg_regs->pci_bdg_sec_stat = 430 pci_config_get16(erpt_p->pe_hdl, PCI_BCNF_SEC_STATUS); 431 if (pci_config_check(erpt_p->pe_hdl, fme_flag) == DDI_FM_OK) 432 pci_regs->pci_bdg_regs->pci_bdg_vflags |= 433 PCI_BDG_SEC_STAT_VALID; 434 pci_regs->pci_bdg_regs->pci_bdg_ctrl = 435 pci_config_get16(erpt_p->pe_hdl, PCI_BCNF_BCNTRL); 436 if (pci_config_check(erpt_p->pe_hdl, fme_flag) == DDI_FM_OK) 437 pci_regs->pci_bdg_regs->pci_bdg_vflags |= 438 PCI_BDG_CTRL_VALID; 439 } 440 441 /* 442 * If pci express device grab pci express error registers and 443 * check for advanced error reporting features and grab them if 444 * available. 445 */ 446 if (erpt_p->pe_dflags & PCIEX_DEV) 447 pcie_regs_gather(erpt_p, fme_flag); 448 else if (erpt_p->pe_dflags & PCIX_DEV) 449 pcix_regs_gather(erpt_p, erpt_p->pe_regs, fme_flag); 450 451 } 452 453 static void 454 pcix_regs_clear(pci_erpt_t *erpt_p, void *pe_regs) 455 { 456 if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) { 457 pcix_bdg_error_regs_t *pcix_bdg_regs = 458 (pcix_bdg_error_regs_t *)pe_regs; 459 uint8_t pcix_bdg_cap_ptr; 460 int i; 461 462 pcix_bdg_cap_ptr = pcix_bdg_regs->pcix_bdg_cap_ptr; 463 464 if (pcix_bdg_regs->pcix_bdg_vflags & PCIX_BDG_SEC_STATUS_VALID) 465 pci_config_put16(erpt_p->pe_hdl, 466 (pcix_bdg_cap_ptr + PCI_PCIX_SEC_STATUS), 467 pcix_bdg_regs->pcix_bdg_sec_stat); 468 469 if (pcix_bdg_regs->pcix_bdg_vflags & PCIX_BDG_STATUS_VALID) 470 pci_config_put32(erpt_p->pe_hdl, 471 (pcix_bdg_cap_ptr + PCI_PCIX_BDG_STATUS), 472 pcix_bdg_regs->pcix_bdg_stat); 473 474 pcix_bdg_regs->pcix_bdg_vflags = 0x0; 475 476 if (PCIX_ECC_VER_CHECK(pcix_bdg_regs->pcix_bdg_ver)) { 477 pcix_ecc_regs_t *pcix_bdg_ecc_regs; 478 /* 479 * PCI Express to PCI-X bridges only implement the 480 * secondary side of the PCI-X ECC registers, bit one is 481 * read-only so we make sure we do not write to it. 482 */ 483 if (erpt_p->pe_dflags & PCIEX_2PCI_DEV) { 484 pcix_bdg_ecc_regs = 485 pcix_bdg_regs->pcix_bdg_ecc_regs[1]; 486 487 if (pcix_bdg_ecc_regs->pcix_ecc_vflags & 488 PCIX_ERR_ECC_STS_VALID) { 489 490 pci_config_put32(erpt_p->pe_hdl, 491 (pcix_bdg_cap_ptr + 492 PCI_PCIX_BDG_ECC_STATUS), 493 pcix_bdg_ecc_regs-> 494 pcix_ecc_ctlstat); 495 } 496 pcix_bdg_ecc_regs->pcix_ecc_vflags = 0x0; 497 } else { 498 for (i = 0; i < 2; i++) { 499 pcix_bdg_ecc_regs = 500 pcix_bdg_regs->pcix_bdg_ecc_regs[i]; 501 502 503 if (pcix_bdg_ecc_regs->pcix_ecc_vflags & 504 PCIX_ERR_ECC_STS_VALID) { 505 pci_config_put32(erpt_p->pe_hdl, 506 (pcix_bdg_cap_ptr + 507 PCI_PCIX_BDG_ECC_STATUS), 508 i); 509 510 pci_config_put32(erpt_p->pe_hdl, 511 (pcix_bdg_cap_ptr + 512 PCI_PCIX_BDG_ECC_STATUS), 513 pcix_bdg_ecc_regs-> 514 pcix_ecc_ctlstat); 515 } 516 pcix_bdg_ecc_regs->pcix_ecc_vflags = 517 0x0; 518 } 519 } 520 } 521 } else { 522 pcix_error_regs_t *pcix_regs = (pcix_error_regs_t *)pe_regs; 523 uint8_t pcix_cap_ptr; 524 525 pcix_cap_ptr = pcix_regs->pcix_cap_ptr; 526 527 if (pcix_regs->pcix_vflags & PCIX_ERR_STATUS_VALID) 528 pci_config_put32(erpt_p->pe_hdl, 529 (pcix_cap_ptr + PCI_PCIX_STATUS), 530 pcix_regs->pcix_status); 531 532 pcix_regs->pcix_vflags = 0x0; 533 534 if (PCIX_ECC_VER_CHECK(pcix_regs->pcix_ver)) { 535 pcix_ecc_regs_t *pcix_ecc_regs = 536 pcix_regs->pcix_ecc_regs; 537 538 if (pcix_ecc_regs->pcix_ecc_vflags & 539 PCIX_ERR_ECC_STS_VALID) 540 pci_config_put32(erpt_p->pe_hdl, 541 (pcix_cap_ptr + PCI_PCIX_ECC_STATUS), 542 pcix_ecc_regs->pcix_ecc_ctlstat); 543 544 pcix_ecc_regs->pcix_ecc_vflags = 0x0; 545 } 546 } 547 } 548 549 static void 550 pcie_regs_clear(pci_erpt_t *erpt_p) 551 { 552 pcie_error_regs_t *pcie_regs = (pcie_error_regs_t *)erpt_p->pe_regs; 553 uint8_t pcie_cap_ptr; 554 pcie_adv_error_regs_t *pcie_adv_regs; 555 uint16_t pcie_ecap_ptr; 556 557 pcie_cap_ptr = pcie_regs->pcie_cap_ptr; 558 559 if (pcie_regs->pcie_vflags & PCIE_ERR_STATUS_VALID) 560 pci_config_put16(erpt_p->pe_hdl, pcie_cap_ptr + PCIE_DEVSTS, 561 pcie_regs->pcie_err_status); 562 563 pcie_regs->pcie_vflags = 0x0; 564 565 if ((erpt_p->pe_dflags & PCI_BRIDGE_DEV) && 566 (erpt_p->pe_dflags & PCIX_DEV)) 567 pcix_regs_clear(erpt_p, pcie_regs->pcix_bdg_regs); 568 569 if (!(erpt_p->pe_dflags & PCIEX_ADV_DEV)) 570 return; 571 572 pcie_adv_regs = pcie_regs->pcie_adv_regs; 573 574 pcie_ecap_ptr = pcie_adv_regs->pcie_adv_cap_ptr; 575 576 if (pcie_adv_regs->pcie_adv_vflags & PCIE_UE_STATUS_VALID) 577 pci_config_put32(erpt_p->pe_hdl, 578 pcie_ecap_ptr + PCIE_AER_UCE_STS, 579 pcie_adv_regs->pcie_ue_status); 580 581 if (pcie_adv_regs->pcie_adv_vflags & PCIE_CE_STATUS_VALID) 582 pci_config_put32(erpt_p->pe_hdl, 583 pcie_ecap_ptr + PCIE_AER_CE_STS, 584 pcie_adv_regs->pcie_ce_status); 585 586 587 if (erpt_p->pe_dflags & PCIEX_2PCI_DEV) { 588 pcie_adv_bdg_error_regs_t *pcie_bdg_regs = 589 pcie_adv_regs->pcie_adv_bdg_regs; 590 591 592 if (pcie_adv_regs->pcie_adv_vflags & PCIE_SUE_STATUS_VALID) 593 pci_config_put32(erpt_p->pe_hdl, 594 pcie_ecap_ptr + PCIE_AER_SUCE_STS, 595 pcie_bdg_regs->pcie_sue_status); 596 } 597 /* 598 * If PCI Express root complex then clear the root complex 599 * error registers. 600 */ 601 if (erpt_p->pe_dflags & PCIEX_RC_DEV) { 602 pcie_adv_rc_error_regs_t *pcie_rc_regs = 603 pcie_adv_regs->pcie_adv_rc_regs; 604 605 606 if (pcie_adv_regs->pcie_adv_vflags & PCIE_RC_ERR_STATUS_VALID) 607 pci_config_put32(erpt_p->pe_hdl, 608 (pcie_ecap_ptr + PCIE_AER_RE_STS), 609 pcie_rc_regs->pcie_rc_err_status); 610 } 611 pcie_adv_regs->pcie_adv_vflags = 0x0; 612 } 613 614 static void 615 pci_regs_clear(pci_erpt_t *erpt_p) 616 { 617 /* 618 * Finally clear the error bits 619 */ 620 if (erpt_p->pe_dflags & PCIEX_DEV) 621 pcie_regs_clear(erpt_p); 622 else if (erpt_p->pe_dflags & PCIX_DEV) 623 pcix_regs_clear(erpt_p, erpt_p->pe_regs); 624 625 if (erpt_p->pe_pci_regs->pci_vflags & PCI_ERR_STATUS_VALID) 626 pci_config_put16(erpt_p->pe_hdl, PCI_CONF_STAT, 627 erpt_p->pe_pci_regs->pci_err_status); 628 629 erpt_p->pe_pci_regs->pci_vflags = 0x0; 630 631 if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) { 632 if (erpt_p->pe_pci_regs->pci_bdg_regs->pci_bdg_vflags & 633 PCI_BDG_SEC_STAT_VALID) 634 pci_config_put16(erpt_p->pe_hdl, PCI_BCNF_SEC_STATUS, 635 erpt_p->pe_pci_regs->pci_bdg_regs-> 636 pci_bdg_sec_stat); 637 if (erpt_p->pe_pci_regs->pci_bdg_regs->pci_bdg_vflags & 638 PCI_BDG_CTRL_VALID) 639 pci_config_put16(erpt_p->pe_hdl, PCI_BCNF_BCNTRL, 640 erpt_p->pe_pci_regs->pci_bdg_regs->pci_bdg_ctrl); 641 642 erpt_p->pe_pci_regs->pci_bdg_regs->pci_bdg_vflags = 0x0; 643 } 644 } 645 646 /* 647 * pcix_ereport_setup: Allocate structures for PCI-X error handling and ereport 648 * generation. 649 */ 650 /* ARGSUSED */ 651 static void 652 pcix_ereport_setup(dev_info_t *dip, pci_erpt_t *erpt_p) 653 { 654 uint8_t pcix_cap_ptr; 655 int i; 656 657 pcix_cap_ptr = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 658 "pcix-capid-pointer", PCI_CAP_NEXT_PTR_NULL); 659 660 if (pcix_cap_ptr != PCI_CAP_NEXT_PTR_NULL) 661 erpt_p->pe_dflags |= PCIX_DEV; 662 else 663 return; 664 665 if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) { 666 pcix_bdg_error_regs_t *pcix_bdg_regs; 667 668 erpt_p->pe_regs = kmem_zalloc(sizeof (pcix_bdg_error_regs_t), 669 KM_SLEEP); 670 pcix_bdg_regs = (pcix_bdg_error_regs_t *)erpt_p->pe_regs; 671 pcix_bdg_regs->pcix_bdg_cap_ptr = pcix_cap_ptr; 672 pcix_bdg_regs->pcix_bdg_ver = pci_config_get16(erpt_p->pe_hdl, 673 pcix_cap_ptr + PCI_PCIX_SEC_STATUS) & PCI_PCIX_VER_MASK; 674 if (PCIX_ECC_VER_CHECK(pcix_bdg_regs->pcix_bdg_ver)) { 675 for (i = 0; i < 2; i++) { 676 pcix_bdg_regs->pcix_bdg_ecc_regs[i] = 677 kmem_zalloc(sizeof (pcix_ecc_regs_t), 678 KM_SLEEP); 679 } 680 } 681 } else { 682 pcix_error_regs_t *pcix_regs; 683 684 erpt_p->pe_regs = kmem_zalloc(sizeof (pcix_error_regs_t), 685 KM_SLEEP); 686 pcix_regs = (pcix_error_regs_t *)erpt_p->pe_regs; 687 pcix_regs->pcix_cap_ptr = pcix_cap_ptr; 688 pcix_regs->pcix_ver = pci_config_get16(erpt_p->pe_hdl, 689 pcix_cap_ptr + PCI_PCIX_COMMAND) & PCI_PCIX_VER_MASK; 690 if (PCIX_ECC_VER_CHECK(pcix_regs->pcix_ver)) { 691 pcix_regs->pcix_ecc_regs = kmem_zalloc( 692 sizeof (pcix_ecc_regs_t), KM_SLEEP); 693 } 694 } 695 } 696 697 static void 698 pcie_ereport_setup(dev_info_t *dip, pci_erpt_t *erpt_p) 699 { 700 pcie_error_regs_t *pcie_regs; 701 pcie_adv_error_regs_t *pcie_adv_regs; 702 uint8_t pcix_cap_ptr; 703 uint8_t pcie_cap_ptr; 704 uint16_t pcie_ecap_ptr; 705 uint16_t dev_type = 0; 706 707 /* 708 * The following sparc specific code should be removed once the pci_cap 709 * interfaces create the necessary properties for us. 710 */ 711 #if defined(__sparc) 712 ushort_t status; 713 uint32_t slot_cap; 714 uint8_t cap_ptr = 0; 715 uint8_t cap_id = 0; 716 uint32_t hdr, hdr_next_ptr, hdr_cap_id; 717 uint16_t offset = P2ALIGN(PCIE_EXT_CAP, 4); 718 uint16_t aer_ptr = 0; 719 720 cap_ptr = pci_config_get8(erpt_p->pe_hdl, PCI_CONF_CAP_PTR); 721 if (pci_config_check(erpt_p->pe_hdl, DDI_FM_ERR_UNEXPECTED) == 722 DDI_FM_OK) { 723 while ((cap_id = pci_config_get8(erpt_p->pe_hdl, cap_ptr)) != 724 0xff) { 725 if (cap_id == PCI_CAP_ID_PCIX) { 726 (void) ndi_prop_update_int(DDI_DEV_T_NONE, dip, 727 "pcix-capid-pointer", cap_ptr); 728 } 729 if (cap_id == PCI_CAP_ID_PCI_E) { 730 status = pci_config_get16(erpt_p->pe_hdl, cap_ptr + 2); 731 if (status & PCIE_PCIECAP_SLOT_IMPL) { 732 /* offset 14h is Slot Cap Register */ 733 slot_cap = pci_config_get32(erpt_p->pe_hdl, 734 cap_ptr + PCIE_SLOTCAP); 735 (void) ndi_prop_update_int(DDI_DEV_T_NONE, dip, 736 "pcie-slotcap-reg", slot_cap); 737 } 738 (void) ndi_prop_update_int(DDI_DEV_T_NONE, dip, 739 "pcie-capid-reg", pci_config_get16(erpt_p->pe_hdl, 740 cap_ptr + PCIE_PCIECAP)); 741 (void) ndi_prop_update_int(DDI_DEV_T_NONE, dip, 742 "pcie-capid-pointer", cap_ptr); 743 744 } 745 if ((cap_ptr = pci_config_get8(erpt_p->pe_hdl, 746 cap_ptr + 1)) == 0xff || cap_ptr == 0 || 747 (pci_config_check(erpt_p->pe_hdl, 748 DDI_FM_ERR_UNEXPECTED) != DDI_FM_OK)) 749 break; 750 } 751 } 752 753 #endif 754 755 pcix_cap_ptr = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 756 "pcix-capid-pointer", PCI_CAP_NEXT_PTR_NULL); 757 758 if (pcix_cap_ptr != PCI_CAP_NEXT_PTR_NULL) 759 erpt_p->pe_dflags |= PCIX_DEV; 760 761 pcie_cap_ptr = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 762 DDI_PROP_DONTPASS, "pcie-capid-pointer", PCI_CAP_NEXT_PTR_NULL); 763 764 if (pcie_cap_ptr != PCI_CAP_NEXT_PTR_NULL) { 765 erpt_p->pe_dflags |= PCIEX_DEV; 766 erpt_p->pe_regs = kmem_zalloc(sizeof (pcie_error_regs_t), 767 KM_SLEEP); 768 pcie_regs = (pcie_error_regs_t *)erpt_p->pe_regs; 769 pcie_regs->pcie_cap_ptr = pcie_cap_ptr; 770 } 771 772 if (!(erpt_p->pe_dflags & PCIEX_DEV)) 773 return; 774 775 /* 776 * Don't currently need to check for version here because we are 777 * compliant with PCIE 1.0a which is version 0 and is guaranteed 778 * software compatibility with future versions. We will need to 779 * add errors for new detectors/features which are added in newer 780 * revisions [sec 7.8.2]. 781 */ 782 pcie_regs->pcie_cap = pci_config_get16(erpt_p->pe_hdl, 783 pcie_regs->pcie_cap_ptr + PCIE_PCIECAP); 784 785 dev_type = pcie_regs->pcie_cap & PCIE_PCIECAP_DEV_TYPE_MASK; 786 787 if ((erpt_p->pe_dflags & PCI_BRIDGE_DEV) && 788 (erpt_p->pe_dflags & PCIX_DEV)) { 789 int i; 790 791 pcie_regs->pcix_bdg_regs = 792 kmem_zalloc(sizeof (pcix_bdg_error_regs_t), KM_SLEEP); 793 794 pcie_regs->pcix_bdg_regs->pcix_bdg_cap_ptr = pcix_cap_ptr; 795 pcie_regs->pcix_bdg_regs->pcix_bdg_ver = 796 pci_config_get16(erpt_p->pe_hdl, 797 pcix_cap_ptr + PCI_PCIX_SEC_STATUS) & PCI_PCIX_VER_MASK; 798 799 if (PCIX_ECC_VER_CHECK(pcie_regs->pcix_bdg_regs->pcix_bdg_ver)) 800 for (i = 0; i < 2; i++) 801 pcie_regs->pcix_bdg_regs->pcix_bdg_ecc_regs[i] = 802 kmem_zalloc(sizeof (pcix_ecc_regs_t), 803 KM_SLEEP); 804 } 805 806 if (dev_type == PCIE_PCIECAP_DEV_TYPE_ROOT) { 807 erpt_p->pe_dflags |= PCIEX_RC_DEV; 808 pcie_regs->pcie_rc_regs = kmem_zalloc( 809 sizeof (pcie_rc_error_regs_t), KM_SLEEP); 810 } 811 /* 812 * The following sparc specific code should be removed once the pci_cap 813 * interfaces create the necessary properties for us. 814 */ 815 #if defined(__sparc) 816 817 hdr = pci_config_get32(erpt_p->pe_hdl, offset); 818 hdr_next_ptr = (hdr >> PCIE_EXT_CAP_NEXT_PTR_SHIFT) & 819 PCIE_EXT_CAP_NEXT_PTR_MASK; 820 hdr_cap_id = (hdr >> PCIE_EXT_CAP_ID_SHIFT) & PCIE_EXT_CAP_ID_MASK; 821 822 while ((hdr_next_ptr != PCIE_EXT_CAP_NEXT_PTR_NULL) && 823 (hdr_cap_id != PCIE_EXT_CAP_ID_AER)) { 824 offset = P2ALIGN(hdr_next_ptr, 4); 825 hdr = pci_config_get32(erpt_p->pe_hdl, offset); 826 hdr_next_ptr = (hdr >> PCIE_EXT_CAP_NEXT_PTR_SHIFT) & 827 PCIE_EXT_CAP_NEXT_PTR_MASK; 828 hdr_cap_id = (hdr >> PCIE_EXT_CAP_ID_SHIFT) & 829 PCIE_EXT_CAP_ID_MASK; 830 } 831 832 if (hdr_cap_id == PCIE_EXT_CAP_ID_AER) 833 aer_ptr = P2ALIGN(offset, 4); 834 if (aer_ptr != PCI_CAP_NEXT_PTR_NULL) 835 (void) ndi_prop_update_int(DDI_DEV_T_NONE, dip, 836 "pcie-aer-pointer", aer_ptr); 837 #endif 838 839 /* 840 * Find and store if this device is capable of pci express 841 * advanced errors, if not report an error against the device. 842 */ 843 pcie_ecap_ptr = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 844 "pcie-aer-pointer", PCI_CAP_NEXT_PTR_NULL); 845 if (pcie_ecap_ptr != PCI_CAP_NEXT_PTR_NULL) { 846 erpt_p->pe_dflags |= PCIEX_ADV_DEV; 847 pcie_regs->pcie_adv_regs = kmem_zalloc( 848 sizeof (pcie_adv_error_regs_t), KM_SLEEP); 849 pcie_regs->pcie_adv_regs->pcie_adv_cap_ptr = pcie_ecap_ptr; 850 } 851 852 if (!(erpt_p->pe_dflags & PCIEX_ADV_DEV)) { 853 return; 854 } 855 856 pcie_adv_regs = pcie_regs->pcie_adv_regs; 857 858 if (pcie_adv_regs == NULL) 859 return; 860 /* 861 * Initialize structures for advanced PCI Express devices. 862 */ 863 864 /* 865 * Advanced error registers exist for PCI Express to PCI(X) Bridges and 866 * may also exist for PCI(X) to PCI Express Bridges, the latter is not 867 * well explained in the PCI Express to PCI/PCI-X Bridge Specification 868 * 1.0 and will be left out of the current gathering of these registers. 869 */ 870 if (dev_type == PCIE_PCIECAP_DEV_TYPE_PCIE2PCI) { 871 erpt_p->pe_dflags |= PCIEX_2PCI_DEV; 872 pcie_adv_regs->pcie_adv_bdg_regs = kmem_zalloc( 873 sizeof (pcie_adv_bdg_error_regs_t), KM_SLEEP); 874 } 875 876 if (erpt_p->pe_dflags & PCIEX_RC_DEV) 877 pcie_adv_regs->pcie_adv_rc_regs = kmem_zalloc( 878 sizeof (pcie_adv_rc_error_regs_t), KM_SLEEP); 879 } 880 881 /* 882 * pci_ereport_setup: Detect PCI device type and initialize structures to be 883 * used to generate ereports based on detected generic device errors. 884 */ 885 void 886 pci_ereport_setup(dev_info_t *dip) 887 { 888 struct dev_info *devi = DEVI(dip); 889 struct i_ddi_fmhdl *fmhdl = devi->devi_fmhdl; 890 pci_erpt_t *erpt_p; 891 uint8_t pci_hdr_type; 892 uint16_t pci_status; 893 pci_regspec_t *pci_rp; 894 int32_t len; 895 uint32_t phys_hi; 896 897 /* 898 * If device is not ereport capbable then report an error against the 899 * driver for using this interface, 900 */ 901 if (!DDI_FM_EREPORT_CAP(ddi_fm_capable(dip)) && 902 !DDI_FM_ERRCB_CAP(ddi_fm_capable(dip))) { 903 i_ddi_drv_ereport_post(dip, DVR_EFMCAP, NULL, DDI_SLEEP); 904 return; 905 } 906 907 /* 908 * ASSERT fmhdl exists and fh_bus_specific is NULL. 909 */ 910 ASSERT(fmhdl && (fmhdl->fh_bus_specific == NULL)); 911 912 erpt_p = kmem_zalloc(sizeof (pci_erpt_t), KM_SLEEP); 913 914 if (pci_config_setup(dip, &erpt_p->pe_hdl) != DDI_SUCCESS) 915 goto error; 916 917 erpt_p->pe_pci_regs = kmem_zalloc(sizeof (pci_error_regs_t), KM_SLEEP); 918 919 pci_status = pci_config_get16(erpt_p->pe_hdl, PCI_CONF_STAT); 920 if (pci_config_check(erpt_p->pe_hdl, DDI_FM_ERR_UNEXPECTED) != 921 DDI_FM_OK) 922 goto error; 923 924 /* 925 * Get header type and record if device is a bridge. 926 */ 927 pci_hdr_type = pci_config_get8(erpt_p->pe_hdl, PCI_CONF_HEADER); 928 if (pci_config_check(erpt_p->pe_hdl, DDI_FM_ERR_UNEXPECTED) != 929 DDI_FM_OK) 930 goto error; 931 932 /* 933 * Check to see if PCI device is a bridge, if so allocate pci bridge 934 * error register structure. 935 */ 936 if ((pci_hdr_type & PCI_HEADER_TYPE_M) == PCI_HEADER_PPB) { 937 erpt_p->pe_dflags |= PCI_BRIDGE_DEV; 938 erpt_p->pe_pci_regs->pci_bdg_regs = kmem_zalloc( 939 sizeof (pci_bdg_error_regs_t), KM_SLEEP); 940 } 941 942 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "reg", 943 (caddr_t)&pci_rp, &len) == DDI_SUCCESS) { 944 phys_hi = pci_rp->pci_phys_hi; 945 kmem_free(pci_rp, len); 946 947 erpt_p->pe_bdf = (uint16_t)(PCI_REG_BDFR_G(phys_hi) >> 948 PCI_REG_FUNC_SHIFT); 949 } 950 951 if (!(pci_status & PCI_STAT_CAP)) { 952 goto done; 953 } 954 955 /* 956 * Initialize structures for PCI Express and PCI-X devices. 957 * Order matters below and pcie_ereport_setup should preceed 958 * pcix_ereport_setup. 959 */ 960 pcie_ereport_setup(dip, erpt_p); 961 962 if (!(erpt_p->pe_dflags & PCIEX_DEV)) { 963 pcix_ereport_setup(dip, erpt_p); 964 } 965 966 done: 967 pci_regs_gather(dip, erpt_p, DDI_FM_ERR_UNEXPECTED); 968 pci_regs_clear(erpt_p); 969 970 /* 971 * Before returning set fh_bus_specific to completed pci_erpt_t 972 * structure 973 */ 974 fmhdl->fh_bus_specific = (void *)erpt_p; 975 976 return; 977 error: 978 if (erpt_p->pe_pci_regs) 979 kmem_free(erpt_p->pe_pci_regs, sizeof (pci_error_regs_t)); 980 kmem_free(erpt_p, sizeof (pci_erpt_t)); 981 erpt_p = NULL; 982 } 983 984 static void 985 pcix_ereport_teardown(pci_erpt_t *erpt_p) 986 { 987 if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) { 988 pcix_bdg_error_regs_t *pcix_bdg_regs; 989 uint16_t pcix_ver; 990 991 pcix_bdg_regs = (pcix_bdg_error_regs_t *)erpt_p->pe_regs; 992 pcix_ver = pcix_bdg_regs->pcix_bdg_ver; 993 if (PCIX_ECC_VER_CHECK(pcix_ver)) { 994 int i; 995 for (i = 0; i < 2; i++) 996 kmem_free(pcix_bdg_regs->pcix_bdg_ecc_regs[i], 997 sizeof (pcix_ecc_regs_t)); 998 } 999 kmem_free(erpt_p->pe_regs, sizeof (pcix_bdg_error_regs_t)); 1000 } else { 1001 pcix_error_regs_t *pcix_regs; 1002 uint16_t pcix_ver; 1003 1004 pcix_regs = (pcix_error_regs_t *)erpt_p->pe_regs; 1005 pcix_ver = pcix_regs->pcix_ver; 1006 if (PCIX_ECC_VER_CHECK(pcix_ver)) { 1007 kmem_free(pcix_regs->pcix_ecc_regs, 1008 sizeof (pcix_ecc_regs_t)); 1009 } 1010 kmem_free(erpt_p->pe_regs, sizeof (pcix_error_regs_t)); 1011 } 1012 } 1013 1014 static void 1015 pcie_ereport_teardown(pci_erpt_t *erpt_p) 1016 { 1017 pcie_error_regs_t *pcie_regs = (pcie_error_regs_t *)erpt_p->pe_regs; 1018 1019 if (erpt_p->pe_dflags & PCIEX_ADV_DEV) { 1020 pcie_adv_error_regs_t *pcie_adv = pcie_regs->pcie_adv_regs; 1021 1022 if (erpt_p->pe_dflags & PCIEX_2PCI_DEV) 1023 kmem_free(pcie_adv->pcie_adv_bdg_regs, 1024 sizeof (pcie_adv_bdg_error_regs_t)); 1025 if (erpt_p->pe_dflags & PCIEX_RC_DEV) 1026 kmem_free(pcie_adv->pcie_adv_rc_regs, 1027 sizeof (pcie_adv_rc_error_regs_t)); 1028 kmem_free(pcie_adv, sizeof (pcie_adv_error_regs_t)); 1029 } 1030 1031 if (erpt_p->pe_dflags & PCIEX_RC_DEV) 1032 kmem_free(pcie_regs->pcie_rc_regs, 1033 sizeof (pcie_rc_error_regs_t)); 1034 1035 if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) { 1036 if (erpt_p->pe_dflags & PCIX_DEV) { 1037 uint16_t pcix_ver = pcie_regs->pcix_bdg_regs-> 1038 pcix_bdg_ver; 1039 1040 if (PCIX_ECC_VER_CHECK(pcix_ver)) { 1041 int i; 1042 for (i = 0; i < 2; i++) 1043 kmem_free(pcie_regs->pcix_bdg_regs-> 1044 pcix_bdg_ecc_regs[i], 1045 sizeof (pcix_ecc_regs_t)); 1046 } 1047 kmem_free(pcie_regs->pcix_bdg_regs, 1048 sizeof (pcix_bdg_error_regs_t)); 1049 } 1050 } 1051 kmem_free(erpt_p->pe_regs, sizeof (pcie_error_regs_t)); 1052 } 1053 1054 void 1055 pci_ereport_teardown(dev_info_t *dip) 1056 { 1057 struct i_ddi_fmhdl *fmhdl = DEVI(dip)->devi_fmhdl; 1058 pci_erpt_t *erpt_p; 1059 1060 if (!DDI_FM_EREPORT_CAP(ddi_fm_capable(dip)) && 1061 !DDI_FM_ERRCB_CAP(ddi_fm_capable(dip))) { 1062 i_ddi_drv_ereport_post(dip, DVR_EFMCAP, NULL, DDI_SLEEP); 1063 } 1064 1065 ASSERT(fmhdl); 1066 1067 erpt_p = (pci_erpt_t *)fmhdl->fh_bus_specific; 1068 if (erpt_p == NULL) 1069 return; 1070 1071 if (erpt_p->pe_dflags & PCIEX_DEV) 1072 pcie_ereport_teardown(erpt_p); 1073 else if (erpt_p->pe_dflags & PCIX_DEV) 1074 pcix_ereport_teardown(erpt_p); 1075 pci_config_teardown((ddi_acc_handle_t *)&erpt_p->pe_hdl); 1076 if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) 1077 kmem_free(erpt_p->pe_pci_regs->pci_bdg_regs, 1078 sizeof (pci_bdg_error_regs_t)); 1079 kmem_free(erpt_p->pe_pci_regs, sizeof (pci_error_regs_t)); 1080 kmem_free(erpt_p, sizeof (pci_erpt_t)); 1081 fmhdl->fh_bus_specific = NULL; 1082 1083 /* 1084 * The following sparc specific code should be removed once the pci_cap 1085 * interfaces create the necessary properties for us. 1086 */ 1087 #if defined(__sparc) 1088 (void) ndi_prop_remove(DDI_DEV_T_NONE, dip, "pcix-capid-pointer"); 1089 (void) ndi_prop_remove(DDI_DEV_T_NONE, dip, "pcie-slotcap-reg"); 1090 (void) ndi_prop_remove(DDI_DEV_T_NONE, dip, "pcie-capid-reg"); 1091 (void) ndi_prop_remove(DDI_DEV_T_NONE, dip, "pcie-capid-pointer"); 1092 (void) ndi_prop_remove(DDI_DEV_T_NONE, dip, "pcie-aer-pointer"); 1093 #endif 1094 } 1095 1096 static void 1097 pcie_ereport_post(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p, 1098 char *buf, int errtype) 1099 { 1100 pcie_error_regs_t *pcie_regs = (pcie_error_regs_t *)erpt_p->pe_regs; 1101 pcie_adv_error_regs_t *pcie_adv_regs = pcie_regs->pcie_adv_regs; 1102 pcie_adv_rc_error_regs_t *pcie_adv_rc_regs; 1103 1104 switch (errtype) { 1105 case PCIEX_TYPE_CE: 1106 ddi_fm_ereport_post(dip, buf, derr->fme_ena, 1107 DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0, 1108 PCIEX_DEVSTS_REG, DATA_TYPE_UINT16, 1109 pcie_regs->pcie_err_status, 1110 PCIEX_CE_STATUS_REG, DATA_TYPE_UINT32, 1111 pcie_adv_regs->pcie_ce_status, NULL); 1112 break; 1113 case PCIEX_TYPE_UE: 1114 ddi_fm_ereport_post(dip, buf, derr->fme_ena, 1115 DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0, 1116 PCIEX_DEVSTS_REG, DATA_TYPE_UINT16, 1117 pcie_regs->pcie_err_status, 1118 PCIEX_UE_STATUS_REG, DATA_TYPE_UINT32, 1119 pcie_adv_regs->pcie_ue_status, PCIEX_UE_SEV_REG, 1120 DATA_TYPE_UINT32, pcie_adv_regs->pcie_ue_sev, 1121 PCIEX_ADV_CTL, DATA_TYPE_UINT32, 1122 pcie_adv_regs->pcie_adv_ctl, 1123 PCIEX_SRC_ID, DATA_TYPE_UINT16, 1124 pcie_adv_regs->pcie_adv_bdf, 1125 PCIEX_SRC_VALID, DATA_TYPE_BOOLEAN_VALUE, 1126 (pcie_adv_regs->pcie_adv_bdf != NULL) ? 1127 1 : NULL, 1128 #ifdef DEBUG 1129 PCIEX_UE_HDR0, DATA_TYPE_UINT32, 1130 pcie_adv_regs->pcie_ue_hdr0, 1131 PCIEX_UE_HDR1, DATA_TYPE_UINT32, 1132 pcie_adv_regs->pcie_ue_hdr[0], 1133 PCIEX_UE_HDR2, DATA_TYPE_UINT32, 1134 pcie_adv_regs->pcie_ue_hdr[1], 1135 PCIEX_UE_HDR3, DATA_TYPE_UINT32, 1136 pcie_adv_regs->pcie_ue_hdr[2], 1137 #endif 1138 NULL); 1139 break; 1140 case PCIEX_TYPE_GEN: 1141 ddi_fm_ereport_post(dip, buf, derr->fme_ena, 1142 DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 1143 0, PCIEX_DEVSTS_REG, DATA_TYPE_UINT16, 1144 pcie_regs->pcie_err_status, NULL); 1145 break; 1146 case PCIEX_TYPE_RC_UE_MSG: 1147 case PCIEX_TYPE_RC_CE_MSG: 1148 pcie_adv_rc_regs = pcie_adv_regs->pcie_adv_rc_regs; 1149 1150 ddi_fm_ereport_post(dip, buf, derr->fme_ena, 1151 DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0, 1152 PCIEX_ROOT_ERRSTS_REG, DATA_TYPE_UINT32, 1153 pcie_adv_rc_regs->pcie_rc_err_status, 1154 PCIEX_SRC_ID, DATA_TYPE_UINT16, 1155 (errtype == PCIEX_TYPE_RC_UE_MSG) ? 1156 pcie_adv_rc_regs->pcie_rc_ue_src_id : 1157 pcie_adv_rc_regs->pcie_rc_ce_src_id, 1158 PCIEX_SRC_VALID, DATA_TYPE_BOOLEAN_VALUE, 1159 (errtype == PCIEX_TYPE_RC_UE_MSG) ? 1160 (pcie_adv_regs->pcie_adv_vflags & PCIE_SRC_ID_VALID && 1161 pcie_adv_rc_regs->pcie_rc_ue_src_id != 0) : 1162 (pcie_adv_regs->pcie_adv_vflags & PCIE_SRC_ID_VALID && 1163 pcie_adv_rc_regs->pcie_rc_ce_src_id != 0), NULL); 1164 break; 1165 case PCIEX_TYPE_RC_MULT_MSG: 1166 pcie_adv_rc_regs = pcie_adv_regs->pcie_adv_rc_regs; 1167 1168 ddi_fm_ereport_post(dip, buf, derr->fme_ena, 1169 DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0, 1170 PCIEX_ROOT_ERRSTS_REG, DATA_TYPE_UINT32, 1171 pcie_adv_rc_regs->pcie_rc_err_status, NULL); 1172 break; 1173 default: 1174 break; 1175 } 1176 } 1177 1178 /*ARGSUSED*/ 1179 static void 1180 pcie_check_addr(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p) 1181 { 1182 pcie_error_regs_t *pcie_regs = (pcie_error_regs_t *)erpt_p->pe_regs; 1183 pcie_adv_error_regs_t *pcie_adv_regs = pcie_regs->pcie_adv_regs; 1184 pcie_tlp_hdr_t *ue_hdr0; 1185 uint32_t *ue_hdr; 1186 uint64_t addr = NULL; 1187 int upstream = 0; 1188 pci_fme_bus_specific_t *pci_fme_bsp = 1189 (pci_fme_bus_specific_t *)derr->fme_bus_specific; 1190 1191 if (!(pcie_adv_regs->pcie_adv_vflags & PCIE_UE_HDR_VALID)) 1192 return; 1193 1194 ue_hdr0 = (pcie_tlp_hdr_t *)&pcie_adv_regs->pcie_ue_hdr0; 1195 ue_hdr = pcie_adv_regs->pcie_ue_hdr; 1196 1197 if ((pcie_regs->pcie_cap & PCIE_PCIECAP_DEV_TYPE_MASK) == 1198 PCIE_PCIECAP_DEV_TYPE_ROOT || 1199 (pcie_regs->pcie_cap & PCIE_PCIECAP_DEV_TYPE_MASK) == 1200 PCIE_PCIECAP_DEV_TYPE_DOWN) 1201 upstream = 1; 1202 1203 switch (ue_hdr0->type) { 1204 case PCIE_TLP_TYPE_MEM: 1205 case PCIE_TLP_TYPE_MEMLK: 1206 if ((ue_hdr0->fmt & 0x1) == 0x1) { 1207 pcie_mem64_t *mem64_tlp = (pcie_mem64_t *)ue_hdr; 1208 1209 addr = (uint64_t)mem64_tlp->addr1 << 32 | 1210 (uint32_t)mem64_tlp->addr0 << 2; 1211 pcie_adv_regs->pcie_adv_bdf = mem64_tlp->rid; 1212 } else { 1213 pcie_memio32_t *memio32_tlp = (pcie_memio32_t *)ue_hdr; 1214 1215 addr = (uint32_t)memio32_tlp->addr0 << 2; 1216 pcie_adv_regs->pcie_adv_bdf = memio32_tlp->rid; 1217 } 1218 if (upstream) { 1219 pci_fme_bsp->pci_bs_bdf = pcie_adv_regs->pcie_adv_bdf; 1220 pci_fme_bsp->pci_bs_flags |= PCI_BS_BDF_VALID; 1221 } else if ((pcie_regs->pcie_cap & PCIE_PCIECAP_DEV_TYPE_MASK) == 1222 PCIE_PCIECAP_DEV_TYPE_PCIE_DEV) { 1223 pci_fme_bsp->pci_bs_bdf = erpt_p->pe_bdf; 1224 pci_fme_bsp->pci_bs_flags |= PCI_BS_BDF_VALID; 1225 } 1226 pci_fme_bsp->pci_bs_addr = addr; 1227 pci_fme_bsp->pci_bs_flags |= PCI_BS_ADDR_VALID; 1228 pci_fme_bsp->pci_bs_type = upstream ? DMA_HANDLE : ACC_HANDLE; 1229 break; 1230 1231 case PCIE_TLP_TYPE_IO: 1232 { 1233 pcie_memio32_t *memio32_tlp = (pcie_memio32_t *)ue_hdr; 1234 1235 addr = (uint32_t)memio32_tlp->addr0 << 2; 1236 pcie_adv_regs->pcie_adv_bdf = memio32_tlp->rid; 1237 if ((pcie_regs->pcie_cap & 1238 PCIE_PCIECAP_DEV_TYPE_MASK) == 1239 PCIE_PCIECAP_DEV_TYPE_PCIE_DEV) { 1240 pci_fme_bsp->pci_bs_bdf = erpt_p->pe_bdf; 1241 pci_fme_bsp->pci_bs_flags |= PCI_BS_BDF_VALID; 1242 } 1243 pci_fme_bsp->pci_bs_addr = addr; 1244 pci_fme_bsp->pci_bs_flags |= PCI_BS_ADDR_VALID; 1245 pci_fme_bsp->pci_bs_type = ACC_HANDLE; 1246 break; 1247 } 1248 case PCIE_TLP_TYPE_CFG0: 1249 case PCIE_TLP_TYPE_CFG1: 1250 { 1251 pcie_cfg_t *cfg_tlp = (pcie_cfg_t *)ue_hdr; 1252 1253 pcie_adv_regs->pcie_adv_bdf = cfg_tlp->rid; 1254 pci_fme_bsp->pci_bs_bdf = (uint16_t)cfg_tlp->bus << 8 | 1255 (uint16_t)cfg_tlp->dev << 3 | cfg_tlp->func; 1256 pci_fme_bsp->pci_bs_flags |= PCI_BS_BDF_VALID; 1257 pci_fme_bsp->pci_bs_type = ACC_HANDLE; 1258 break; 1259 } 1260 case PCIE_TLP_TYPE_MSG: 1261 { 1262 pcie_msg_t *msg_tlp = (pcie_msg_t *)ue_hdr; 1263 1264 pcie_adv_regs->pcie_adv_bdf = msg_tlp->rid; 1265 break; 1266 } 1267 case PCIE_TLP_TYPE_CPL: 1268 case PCIE_TLP_TYPE_CPLLK: 1269 { 1270 pcie_cpl_t *cpl_tlp = (pcie_cpl_t *)ue_hdr; 1271 1272 pcie_adv_regs->pcie_adv_bdf = cpl_tlp->cid; 1273 pci_fme_bsp->pci_bs_flags |= PCI_BS_BDF_VALID; 1274 if (upstream) { 1275 pci_fme_bsp->pci_bs_bdf = cpl_tlp->cid; 1276 pci_fme_bsp->pci_bs_type = ACC_HANDLE; 1277 } else { 1278 pci_fme_bsp->pci_bs_bdf = cpl_tlp->rid; 1279 pci_fme_bsp->pci_bs_type = DMA_HANDLE; 1280 } 1281 break; 1282 } 1283 case PCIE_TLP_TYPE_MSI: 1284 default: 1285 break; 1286 } 1287 } 1288 1289 /*ARGSUSED*/ 1290 static void 1291 pcie_pci_check_addr(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p, 1292 int type) 1293 { 1294 pcie_error_regs_t *pcie_regs = (pcie_error_regs_t *)erpt_p->pe_regs; 1295 pcie_adv_error_regs_t *pcie_adv_regs = pcie_regs->pcie_adv_regs; 1296 pcie_adv_bdg_error_regs_t *pcie_bdg_regs = 1297 pcie_adv_regs->pcie_adv_bdg_regs; 1298 uint64_t addr = NULL; 1299 pcix_attr_t *pcie_pci_sue_attr; 1300 int cmd; 1301 int dual_addr = 0; 1302 pci_fme_bus_specific_t *pci_fme_bsp = 1303 (pci_fme_bus_specific_t *)derr->fme_bus_specific; 1304 1305 if (!(pcie_adv_regs->pcie_adv_vflags & PCIE_SUE_HDR_VALID)) 1306 return; 1307 1308 pcie_pci_sue_attr = (pcix_attr_t *)&pcie_bdg_regs->pcie_sue_hdr0; 1309 cmd = (pcie_bdg_regs->pcie_sue_hdr[0] >> 1310 PCIE_AER_SUCE_HDR_CMD_LWR_SHIFT) & PCIE_AER_SUCE_HDR_CMD_LWR_MASK; 1311 1312 cmd_switch: 1313 addr = pcie_bdg_regs->pcie_sue_hdr[2]; 1314 addr = (addr << PCIE_AER_SUCE_HDR_ADDR_SHIFT) | 1315 pcie_bdg_regs->pcie_sue_hdr[1]; 1316 switch (cmd) { 1317 case PCI_PCIX_CMD_IORD: 1318 case PCI_PCIX_CMD_IOWR: 1319 pcie_adv_regs->pcie_adv_bdf = pcie_pci_sue_attr->rid; 1320 if (addr) { 1321 pci_fme_bsp->pci_bs_addr = addr; 1322 pci_fme_bsp->pci_bs_flags |= PCI_BS_ADDR_VALID; 1323 pci_fme_bsp->pci_bs_type = ACC_HANDLE; 1324 } 1325 break; 1326 case PCI_PCIX_CMD_MEMRD_DW: 1327 case PCI_PCIX_CMD_MEMWR: 1328 case PCI_PCIX_CMD_MEMRD_BL: 1329 case PCI_PCIX_CMD_MEMWR_BL: 1330 case PCI_PCIX_CMD_MEMRDBL: 1331 case PCI_PCIX_CMD_MEMWRBL: 1332 pcie_adv_regs->pcie_adv_bdf = pcie_pci_sue_attr->rid; 1333 if (addr) { 1334 pci_fme_bsp->pci_bs_addr = addr; 1335 pci_fme_bsp->pci_bs_flags |= PCI_BS_ADDR_VALID; 1336 pci_fme_bsp->pci_bs_type = type; 1337 } 1338 break; 1339 case PCI_PCIX_CMD_CFRD: 1340 case PCI_PCIX_CMD_CFWR: 1341 pcie_adv_regs->pcie_adv_bdf = pcie_pci_sue_attr->rid; 1342 /* 1343 * for type 1 config transaction we can find bdf from address 1344 */ 1345 if ((addr & 3) == 1) { 1346 pci_fme_bsp->pci_bs_bdf = (addr >> 8) & 0xffffffff; 1347 pci_fme_bsp->pci_bs_flags |= PCI_BS_BDF_VALID; 1348 pci_fme_bsp->pci_bs_type = ACC_HANDLE; 1349 } 1350 break; 1351 case PCI_PCIX_CMD_SPL: 1352 pcie_adv_regs->pcie_adv_bdf = pcie_pci_sue_attr->rid; 1353 if (type == ACC_HANDLE) { 1354 pci_fme_bsp->pci_bs_bdf = pcie_adv_regs->pcie_adv_bdf; 1355 pci_fme_bsp->pci_bs_flags |= PCI_BS_BDF_VALID; 1356 pci_fme_bsp->pci_bs_type = type; 1357 } 1358 break; 1359 case PCI_PCIX_CMD_DADR: 1360 cmd = (pcie_bdg_regs->pcie_sue_hdr[0] >> 1361 PCIE_AER_SUCE_HDR_CMD_UP_SHIFT) & 1362 PCIE_AER_SUCE_HDR_CMD_UP_MASK; 1363 if (dual_addr) 1364 break; 1365 ++dual_addr; 1366 goto cmd_switch; 1367 default: 1368 break; 1369 } 1370 } 1371 1372 /*ARGSUSED*/ 1373 static int 1374 pcix_check_addr(dev_info_t *dip, ddi_fm_error_t *derr, 1375 pcix_ecc_regs_t *pcix_ecc_regs, int type) 1376 { 1377 int cmd = (pcix_ecc_regs->pcix_ecc_ctlstat >> 16) & 0xf; 1378 uint64_t addr; 1379 pci_fme_bus_specific_t *pci_fme_bsp = 1380 (pci_fme_bus_specific_t *)derr->fme_bus_specific; 1381 1382 addr = pcix_ecc_regs->pcix_ecc_secaddr; 1383 addr = addr << 32; 1384 addr |= pcix_ecc_regs->pcix_ecc_fstaddr; 1385 1386 switch (cmd) { 1387 case PCI_PCIX_CMD_INTR: 1388 case PCI_PCIX_CMD_SPEC: 1389 return (DDI_FM_FATAL); 1390 case PCI_PCIX_CMD_IORD: 1391 case PCI_PCIX_CMD_IOWR: 1392 pci_fme_bsp->pci_bs_addr = addr; 1393 pci_fme_bsp->pci_bs_flags |= PCI_BS_ADDR_VALID; 1394 pci_fme_bsp->pci_bs_type = type; 1395 return (DDI_FM_UNKNOWN); 1396 case PCI_PCIX_CMD_DEVID: 1397 return (DDI_FM_FATAL); 1398 case PCI_PCIX_CMD_MEMRD_DW: 1399 case PCI_PCIX_CMD_MEMWR: 1400 case PCI_PCIX_CMD_MEMRD_BL: 1401 case PCI_PCIX_CMD_MEMWR_BL: 1402 pci_fme_bsp->pci_bs_addr = addr; 1403 pci_fme_bsp->pci_bs_flags |= PCI_BS_ADDR_VALID; 1404 pci_fme_bsp->pci_bs_type = type; 1405 return (DDI_FM_UNKNOWN); 1406 case PCI_PCIX_CMD_CFRD: 1407 case PCI_PCIX_CMD_CFWR: 1408 /* 1409 * for type 1 config transaction we can find bdf from address 1410 */ 1411 if ((addr & 3) == 1) { 1412 pci_fme_bsp->pci_bs_bdf = (addr >> 8) & 0xffffffff; 1413 pci_fme_bsp->pci_bs_flags |= PCI_BS_BDF_VALID; 1414 pci_fme_bsp->pci_bs_type = type; 1415 } 1416 return (DDI_FM_UNKNOWN); 1417 case PCI_PCIX_CMD_SPL: 1418 case PCI_PCIX_CMD_DADR: 1419 return (DDI_FM_UNKNOWN); 1420 case PCI_PCIX_CMD_MEMRDBL: 1421 case PCI_PCIX_CMD_MEMWRBL: 1422 pci_fme_bsp->pci_bs_addr = addr; 1423 pci_fme_bsp->pci_bs_flags |= PCI_BS_ADDR_VALID; 1424 pci_fme_bsp->pci_bs_type = type; 1425 return (DDI_FM_UNKNOWN); 1426 default: 1427 return (DDI_FM_FATAL); 1428 } 1429 } 1430 1431 /*ARGSUSED*/ 1432 static int 1433 pci_bdg_error_report(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p) 1434 { 1435 pci_bdg_error_regs_t *pci_bdg_regs = erpt_p->pe_pci_regs->pci_bdg_regs; 1436 int fatal = 0; 1437 int nonfatal = 0; 1438 int unknown = 0; 1439 int ok = 0; 1440 int ret = DDI_FM_OK; 1441 char buf[FM_MAX_CLASS]; 1442 int i; 1443 pci_fme_bus_specific_t *pci_fme_bsp = 1444 (pci_fme_bus_specific_t *)derr->fme_bus_specific; 1445 1446 if (derr->fme_flag != DDI_FM_ERR_UNEXPECTED) 1447 goto done; 1448 1449 if ((pci_bdg_regs->pci_bdg_vflags & PCI_BDG_CTRL_VALID) && 1450 (pci_bdg_regs->pci_bdg_ctrl & PCI_BCNF_BCNTRL_DTO_STAT)) { 1451 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", 1452 PCI_ERROR_SUBCLASS, PCI_DTO); 1453 ddi_fm_ereport_post(dip, buf, derr->fme_ena, 1454 DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0, 1455 PCI_SEC_CONFIG_STATUS, DATA_TYPE_UINT16, 1456 pci_bdg_regs->pci_bdg_sec_stat, PCI_BCNTRL, 1457 DATA_TYPE_UINT16, pci_bdg_regs->pci_bdg_ctrl, NULL); 1458 unknown++; 1459 } 1460 1461 if (pci_bdg_regs->pci_bdg_vflags & PCI_BDG_SEC_STAT_VALID) { 1462 for (i = 0; pci_bdg_err_tbl[i].err_class != NULL; i++) { 1463 if (pci_bdg_regs->pci_bdg_sec_stat & 1464 pci_bdg_err_tbl[i].reg_bit) { 1465 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s-%s", 1466 PCI_ERROR_SUBCLASS, PCI_SEC_ERROR_SUBCLASS, 1467 pci_bdg_err_tbl[i].err_class); 1468 ddi_fm_ereport_post(dip, buf, derr->fme_ena, 1469 DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0, 1470 PCI_SEC_CONFIG_STATUS, DATA_TYPE_UINT16, 1471 pci_bdg_regs->pci_bdg_sec_stat, PCI_BCNTRL, 1472 DATA_TYPE_UINT16, 1473 pci_bdg_regs->pci_bdg_ctrl, NULL); 1474 PCI_FM_SEV_INC(pci_bdg_err_tbl[i].flags); 1475 if (pci_fme_bsp && (pci_fme_bsp->pci_bs_flags & 1476 PCI_BS_ADDR_VALID) && 1477 pci_fme_bsp->pci_bs_type == ACC_HANDLE && 1478 pci_bdg_err_tbl[i].terr_class) 1479 pci_target_enqueue(derr->fme_ena, 1480 pci_bdg_err_tbl[i].terr_class, 1481 PCI_ERROR_SUBCLASS, 1482 pci_fme_bsp->pci_bs_addr); 1483 } 1484 } 1485 #if !defined(__sparc) 1486 /* 1487 * For x86, many drivers and even user-level code currently get 1488 * away with accessing bad addresses, getting a UR and getting 1489 * -1 returned. Unfortunately, we have no control over this, so 1490 * we will have to treat all URs as nonfatal. Moreover, if the 1491 * leaf driver is non-hardened, then we don't actually see the 1492 * UR directly. All we see is a secondary bus master abort at 1493 * the root complex - so it's this condition that we actually 1494 * need to treat as nonfatal (providing no other unrelated nfe 1495 * conditions have also been seen by the root complex). 1496 */ 1497 if ((erpt_p->pe_dflags & PCIEX_RC_DEV) && 1498 (pci_bdg_regs->pci_bdg_sec_stat & PCI_STAT_R_MAST_AB) && 1499 !(pci_bdg_regs->pci_bdg_sec_stat & PCI_STAT_S_PERROR)) { 1500 pcie_error_regs_t *pcie_regs = 1501 (pcie_error_regs_t *)erpt_p->pe_regs; 1502 if ((pcie_regs->pcie_vflags & PCIE_ERR_STATUS_VALID) && 1503 !(pcie_regs->pcie_err_status & 1504 PCIE_DEVSTS_NFE_DETECTED)) 1505 nonfatal++; 1506 if (erpt_p->pe_dflags & PCIEX_ADV_DEV) { 1507 pcie_adv_error_regs_t *pcie_adv_regs = 1508 pcie_regs->pcie_adv_regs; 1509 pcie_adv_rc_error_regs_t *pcie_rc_regs = 1510 pcie_adv_regs->pcie_adv_rc_regs; 1511 if ((pcie_adv_regs->pcie_adv_vflags & 1512 PCIE_RC_ERR_STATUS_VALID) && 1513 (pcie_rc_regs->pcie_rc_err_status & 1514 PCIE_AER_RE_STS_NFE_MSGS_RCVD)) { 1515 (void) snprintf(buf, FM_MAX_CLASS, 1516 "%s.%s-%s", PCI_ERROR_SUBCLASS, 1517 PCI_SEC_ERROR_SUBCLASS, PCI_MA); 1518 ddi_fm_ereport_post(dip, buf, 1519 derr->fme_ena, DDI_NOSLEEP, 1520 FM_VERSION, DATA_TYPE_UINT8, 0, 1521 PCI_SEC_CONFIG_STATUS, 1522 DATA_TYPE_UINT16, 1523 pci_bdg_regs->pci_bdg_sec_stat, 1524 PCI_BCNTRL, DATA_TYPE_UINT16, 1525 pci_bdg_regs->pci_bdg_ctrl, NULL); 1526 } 1527 } 1528 } 1529 #endif 1530 } 1531 1532 done: 1533 /* 1534 * Need to check for poke and cautious put. We already know peek 1535 * and cautious get errors occurred (as we got a trap) and we know 1536 * they are nonfatal. 1537 */ 1538 if (derr->fme_flag == DDI_FM_ERR_EXPECTED) { 1539 /* 1540 * for cautious puts we treat all errors as nonfatal. Actually 1541 * we set nonfatal for cautious gets as well - doesn't do any 1542 * harm 1543 */ 1544 if (pci_bdg_regs->pci_bdg_sec_stat & (PCI_STAT_R_TARG_AB | 1545 PCI_STAT_R_MAST_AB | PCI_STAT_S_PERROR | PCI_STAT_S_SYSERR)) 1546 nonfatal++; 1547 } 1548 if (derr->fme_flag == DDI_FM_ERR_POKE) { 1549 /* 1550 * special case for pokes - we only consider master abort 1551 * and target abort as nonfatal. Sserr with no master abort is 1552 * fatal, but master/target abort can come in on separate 1553 * instance, so return unknown and parent will determine if 1554 * nonfatal (if another child returned nonfatal - ie master 1555 * or target abort) or fatal otherwise 1556 */ 1557 if (pci_bdg_regs->pci_bdg_sec_stat & (PCI_STAT_R_TARG_AB | 1558 PCI_STAT_R_MAST_AB)) 1559 nonfatal++; 1560 if (erpt_p->pe_pci_regs->pci_err_status & PCI_STAT_S_SYSERR) 1561 unknown++; 1562 } 1563 1564 /* 1565 * now check children below the bridge 1566 */ 1567 ret = ndi_fm_handler_dispatch(dip, NULL, derr); 1568 PCI_FM_SEV_INC(ret); 1569 return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL : 1570 (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK))); 1571 } 1572 1573 static int 1574 pcix_ecc_error_report(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p, 1575 void *pe_regs) 1576 { 1577 pcix_error_regs_t *pcix_regs; 1578 pcix_bdg_error_regs_t *pcix_bdg_regs; 1579 pcix_ecc_regs_t *pcix_ecc_regs; 1580 int bridge; 1581 int i; 1582 int ecc_phase; 1583 int ecc_corr; 1584 int sec_ue; 1585 int sec_ce; 1586 int fatal = 0; 1587 int nonfatal = 0; 1588 int unknown = 0; 1589 int ok = 0; 1590 char buf[FM_MAX_CLASS]; 1591 1592 if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) { 1593 pcix_bdg_regs = (pcix_bdg_error_regs_t *)pe_regs; 1594 bridge = 1; 1595 } else { 1596 pcix_regs = (pcix_error_regs_t *)pe_regs; 1597 bridge = 0; 1598 } 1599 1600 for (i = 0; i < (bridge ? 2 : 1); i++) { 1601 int ret = DDI_FM_OK; 1602 pcix_ecc_regs = bridge ? pcix_bdg_regs->pcix_bdg_ecc_regs[i] : 1603 pcix_regs->pcix_ecc_regs; 1604 if (pcix_ecc_regs->pcix_ecc_vflags & PCIX_ERR_ECC_STS_VALID) { 1605 ecc_phase = (pcix_ecc_regs->pcix_ecc_ctlstat & 1606 PCI_PCIX_ECC_PHASE) >> 0x4; 1607 ecc_corr = (pcix_ecc_regs->pcix_ecc_ctlstat & 1608 PCI_PCIX_ECC_CORR); 1609 sec_ue = (pcix_ecc_regs->pcix_ecc_ctlstat & 1610 PCI_PCIX_ECC_S_UE); 1611 sec_ce = (pcix_ecc_regs->pcix_ecc_ctlstat & 1612 PCI_PCIX_ECC_S_CE); 1613 1614 switch (ecc_phase) { 1615 case PCI_PCIX_ECC_PHASE_NOERR: 1616 break; 1617 case PCI_PCIX_ECC_PHASE_FADDR: 1618 case PCI_PCIX_ECC_PHASE_SADDR: 1619 PCI_FM_SEV_INC(ecc_corr ? DDI_FM_OK : 1620 DDI_FM_FATAL); 1621 (void) snprintf(buf, FM_MAX_CLASS, 1622 "%s.%s%s", PCIX_ERROR_SUBCLASS, 1623 i ? PCIX_SEC_ERROR_SUBCLASS : "", 1624 ecc_corr ? PCIX_ECC_CE_ADDR : 1625 PCIX_ECC_UE_ADDR); 1626 break; 1627 case PCI_PCIX_ECC_PHASE_ATTR: 1628 PCI_FM_SEV_INC(ecc_corr ? 1629 DDI_FM_OK : DDI_FM_FATAL); 1630 (void) snprintf(buf, FM_MAX_CLASS, 1631 "%s.%s%s", PCIX_ERROR_SUBCLASS, 1632 i ? PCIX_SEC_ERROR_SUBCLASS : "", 1633 ecc_corr ? PCIX_ECC_CE_ATTR : 1634 PCIX_ECC_UE_ATTR); 1635 break; 1636 case PCI_PCIX_ECC_PHASE_DATA32: 1637 case PCI_PCIX_ECC_PHASE_DATA64: 1638 if (ecc_corr) 1639 ret = DDI_FM_OK; 1640 else { 1641 int type; 1642 pci_error_regs_t *pci_regs = 1643 erpt_p->pe_pci_regs; 1644 1645 if (i) { 1646 if (pci_regs->pci_bdg_regs-> 1647 pci_bdg_sec_stat & 1648 PCI_STAT_S_PERROR) 1649 type = ACC_HANDLE; 1650 else 1651 type = DMA_HANDLE; 1652 } else { 1653 if (pci_regs->pci_err_status & 1654 PCI_STAT_S_PERROR) 1655 type = DMA_HANDLE; 1656 else 1657 type = ACC_HANDLE; 1658 } 1659 ret = pcix_check_addr(dip, derr, 1660 pcix_ecc_regs, type); 1661 } 1662 PCI_FM_SEV_INC(ret); 1663 1664 (void) snprintf(buf, FM_MAX_CLASS, 1665 "%s.%s%s", PCIX_ERROR_SUBCLASS, 1666 i ? PCIX_SEC_ERROR_SUBCLASS : "", 1667 ecc_corr ? PCIX_ECC_CE_DATA : 1668 PCIX_ECC_UE_DATA); 1669 break; 1670 } 1671 if (ecc_phase) 1672 if (bridge) 1673 ddi_fm_ereport_post(dip, buf, 1674 derr->fme_ena, 1675 DDI_NOSLEEP, FM_VERSION, 1676 DATA_TYPE_UINT8, 0, 1677 PCIX_SEC_STATUS, DATA_TYPE_UINT16, 1678 pcix_bdg_regs->pcix_bdg_sec_stat, 1679 PCIX_BDG_STAT, DATA_TYPE_UINT32, 1680 pcix_bdg_regs->pcix_bdg_stat, 1681 PCIX_ECC_CTLSTAT, DATA_TYPE_UINT32, 1682 pcix_ecc_regs->pcix_ecc_ctlstat, 1683 PCIX_ECC_ATTR, DATA_TYPE_UINT32, 1684 pcix_ecc_regs->pcix_ecc_attr, NULL); 1685 else 1686 ddi_fm_ereport_post(dip, buf, 1687 derr->fme_ena, 1688 DDI_NOSLEEP, FM_VERSION, 1689 DATA_TYPE_UINT8, 0, 1690 PCIX_COMMAND, DATA_TYPE_UINT16, 1691 pcix_regs->pcix_command, 1692 PCIX_STATUS, DATA_TYPE_UINT32, 1693 pcix_regs->pcix_status, 1694 PCIX_ECC_CTLSTAT, DATA_TYPE_UINT32, 1695 pcix_ecc_regs->pcix_ecc_ctlstat, 1696 PCIX_ECC_ATTR, DATA_TYPE_UINT32, 1697 pcix_ecc_regs->pcix_ecc_attr, NULL); 1698 if (sec_ce || sec_ue) { 1699 (void) snprintf(buf, FM_MAX_CLASS, 1700 "%s.%s%s", PCIX_ERROR_SUBCLASS, 1701 i ? PCIX_SEC_ERROR_SUBCLASS : "", 1702 sec_ce ? PCIX_ECC_S_CE : PCIX_ECC_S_UE); 1703 if (bridge) 1704 ddi_fm_ereport_post(dip, buf, 1705 derr->fme_ena, 1706 DDI_NOSLEEP, FM_VERSION, 1707 DATA_TYPE_UINT8, 0, 1708 PCIX_SEC_STATUS, DATA_TYPE_UINT16, 1709 pcix_bdg_regs->pcix_bdg_sec_stat, 1710 PCIX_BDG_STAT, DATA_TYPE_UINT32, 1711 pcix_bdg_regs->pcix_bdg_stat, 1712 PCIX_ECC_CTLSTAT, DATA_TYPE_UINT32, 1713 pcix_ecc_regs->pcix_ecc_ctlstat, 1714 PCIX_ECC_ATTR, DATA_TYPE_UINT32, 1715 pcix_ecc_regs->pcix_ecc_attr, NULL); 1716 else 1717 ddi_fm_ereport_post(dip, buf, 1718 derr->fme_ena, 1719 DDI_NOSLEEP, FM_VERSION, 1720 DATA_TYPE_UINT8, 0, 1721 PCIX_COMMAND, DATA_TYPE_UINT16, 1722 pcix_regs->pcix_command, 1723 PCIX_STATUS, DATA_TYPE_UINT32, 1724 pcix_regs->pcix_status, 1725 PCIX_ECC_CTLSTAT, DATA_TYPE_UINT32, 1726 pcix_ecc_regs->pcix_ecc_ctlstat, 1727 PCIX_ECC_ATTR, DATA_TYPE_UINT32, 1728 pcix_ecc_regs->pcix_ecc_attr, NULL); 1729 PCI_FM_SEV_INC(sec_ue ? DDI_FM_FATAL : 1730 DDI_FM_OK); 1731 } 1732 } 1733 } 1734 return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL : 1735 (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK))); 1736 } 1737 1738 static int 1739 pcix_bdg_error_report(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p, 1740 void *pe_regs) 1741 { 1742 pcix_bdg_error_regs_t *pcix_bdg_regs = (pcix_bdg_error_regs_t *)pe_regs; 1743 int fatal = 0; 1744 int nonfatal = 0; 1745 int unknown = 0; 1746 int ok = 0; 1747 char buf[FM_MAX_CLASS]; 1748 int i; 1749 1750 if (pcix_bdg_regs->pcix_bdg_vflags & PCIX_BDG_STATUS_VALID) { 1751 for (i = 0; pcix_err_tbl[i].err_class != NULL; i++) { 1752 if ((pcix_bdg_regs->pcix_bdg_stat & 1753 pcix_err_tbl[i].reg_bit)) { 1754 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", 1755 PCIX_ERROR_SUBCLASS, 1756 pcix_err_tbl[i].err_class); 1757 ddi_fm_ereport_post(dip, buf, derr->fme_ena, 1758 DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0, 1759 PCIX_SEC_STATUS, DATA_TYPE_UINT16, 1760 pcix_bdg_regs->pcix_bdg_sec_stat, 1761 PCIX_BDG_STAT, DATA_TYPE_UINT32, 1762 pcix_bdg_regs->pcix_bdg_stat, NULL); 1763 PCI_FM_SEV_INC(pcix_err_tbl[i].flags); 1764 } 1765 } 1766 } 1767 1768 if (pcix_bdg_regs->pcix_bdg_vflags & PCIX_BDG_SEC_STATUS_VALID) { 1769 for (i = 0; pcix_sec_err_tbl[i].err_class != NULL; i++) { 1770 if ((pcix_bdg_regs->pcix_bdg_sec_stat & 1771 pcix_sec_err_tbl[i].reg_bit)) { 1772 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s%s", 1773 PCIX_ERROR_SUBCLASS, 1774 PCIX_SEC_ERROR_SUBCLASS, 1775 pcix_sec_err_tbl[i].err_class); 1776 ddi_fm_ereport_post(dip, buf, derr->fme_ena, 1777 DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0, 1778 PCIX_SEC_STATUS, DATA_TYPE_UINT16, 1779 pcix_bdg_regs->pcix_bdg_sec_stat, 1780 PCIX_BDG_STAT, DATA_TYPE_UINT32, 1781 pcix_bdg_regs->pcix_bdg_stat, NULL); 1782 PCI_FM_SEV_INC(pcix_sec_err_tbl[i].flags); 1783 } 1784 } 1785 } 1786 1787 /* Log/Handle ECC errors */ 1788 if (PCIX_ECC_VER_CHECK(pcix_bdg_regs->pcix_bdg_ver)) { 1789 int ret; 1790 1791 ret = pcix_ecc_error_report(dip, derr, erpt_p, 1792 (void *)pcix_bdg_regs); 1793 PCI_FM_SEV_INC(ret); 1794 } 1795 return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL : 1796 (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK))); 1797 } 1798 1799 static int 1800 pcix_error_report(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p) 1801 { 1802 pcix_error_regs_t *pcix_regs = (pcix_error_regs_t *)erpt_p->pe_regs; 1803 int fatal = 0; 1804 int nonfatal = 0; 1805 int unknown = 0; 1806 int ok = 0; 1807 char buf[FM_MAX_CLASS]; 1808 int i; 1809 1810 if (pcix_regs->pcix_vflags & PCIX_ERR_STATUS_VALID) { 1811 for (i = 0; pcix_err_tbl[i].err_class != NULL; i++) { 1812 if (!(pcix_regs->pcix_status & pcix_err_tbl[i].reg_bit)) 1813 continue; 1814 1815 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", 1816 PCIX_ERROR_SUBCLASS, pcix_err_tbl[i].err_class); 1817 ddi_fm_ereport_post(dip, buf, derr->fme_ena, 1818 DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0, 1819 PCIX_COMMAND, DATA_TYPE_UINT16, 1820 pcix_regs->pcix_command, PCIX_STATUS, 1821 DATA_TYPE_UINT32, pcix_regs->pcix_status, 1822 NULL); 1823 PCI_FM_SEV_INC(pcix_err_tbl[i].flags); 1824 } 1825 } 1826 /* Log/Handle ECC errors */ 1827 if (PCIX_ECC_VER_CHECK(pcix_regs->pcix_ver)) { 1828 int ret = pcix_ecc_error_report(dip, derr, erpt_p, 1829 (void *)pcix_regs); 1830 PCI_FM_SEV_INC(ret); 1831 } 1832 1833 return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL : 1834 (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK))); 1835 } 1836 1837 static int 1838 pcie_rc_error_report(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p, 1839 void *pe_regs) 1840 { 1841 pcie_adv_error_regs_t *pcie_adv_regs = (pcie_adv_error_regs_t *)pe_regs; 1842 int fatal = 0; 1843 int nonfatal = 0; 1844 int unknown = 0; 1845 char buf[FM_MAX_CLASS]; 1846 1847 if (pcie_adv_regs->pcie_adv_vflags & PCIE_RC_ERR_STATUS_VALID) { 1848 pcie_adv_rc_error_regs_t *pcie_rc_regs = 1849 pcie_adv_regs->pcie_adv_rc_regs; 1850 int ce, ue, mult_ce, mult_ue, first_ue_fatal, nfe, fe; 1851 1852 ce = pcie_rc_regs->pcie_rc_err_status & 1853 PCIE_AER_RE_STS_CE_RCVD; 1854 ue = pcie_rc_regs->pcie_rc_err_status & 1855 PCIE_AER_RE_STS_FE_NFE_RCVD; 1856 mult_ce = pcie_rc_regs->pcie_rc_err_status & 1857 PCIE_AER_RE_STS_MUL_CE_RCVD; 1858 mult_ue = pcie_rc_regs->pcie_rc_err_status & 1859 PCIE_AER_RE_STS_MUL_FE_NFE_RCVD; 1860 first_ue_fatal = pcie_rc_regs->pcie_rc_err_status & 1861 PCIE_AER_RE_STS_FIRST_UC_FATAL; 1862 nfe = pcie_rc_regs->pcie_rc_err_status & 1863 PCIE_AER_RE_STS_NFE_MSGS_RCVD; 1864 fe = pcie_rc_regs->pcie_rc_err_status & 1865 PCIE_AER_RE_STS_FE_MSGS_RCVD; 1866 /* 1867 * log fatal/nonfatal/corrected messages 1868 * recieved by root complex 1869 */ 1870 if (ue && fe) 1871 fatal++; 1872 1873 if (fe && first_ue_fatal) { 1874 (void) snprintf(buf, FM_MAX_CLASS, 1875 "%s.%s", PCIEX_ERROR_SUBCLASS, PCIEX_RC_FE_MSG); 1876 pcie_ereport_post(dip, derr, erpt_p, buf, 1877 PCIEX_TYPE_RC_UE_MSG); 1878 } 1879 if (nfe && !first_ue_fatal) { 1880 (void) snprintf(buf, FM_MAX_CLASS, 1881 "%s.%s", PCIEX_ERROR_SUBCLASS, PCIEX_RC_NFE_MSG); 1882 pcie_ereport_post(dip, derr, erpt_p, buf, 1883 PCIEX_TYPE_RC_UE_MSG); 1884 } 1885 if (ce) { 1886 (void) snprintf(buf, FM_MAX_CLASS, 1887 "%s.%s", PCIEX_ERROR_SUBCLASS, PCIEX_RC_CE_MSG); 1888 pcie_ereport_post(dip, derr, erpt_p, buf, 1889 PCIEX_TYPE_RC_CE_MSG); 1890 } 1891 if (mult_ce) { 1892 (void) snprintf(buf, FM_MAX_CLASS, 1893 "%s.%s", PCIEX_ERROR_SUBCLASS, PCIEX_RC_MCE_MSG); 1894 pcie_ereport_post(dip, derr, erpt_p, buf, 1895 PCIEX_TYPE_RC_MULT_MSG); 1896 } 1897 if (mult_ue) { 1898 (void) snprintf(buf, FM_MAX_CLASS, 1899 "%s.%s", PCIEX_ERROR_SUBCLASS, PCIEX_RC_MUE_MSG); 1900 pcie_ereport_post(dip, derr, erpt_p, buf, 1901 PCIEX_TYPE_RC_MULT_MSG); 1902 } 1903 } 1904 return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL : 1905 (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK))); 1906 } 1907 1908 static int 1909 pcie_error_report(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p) 1910 { 1911 int fatal = 0; 1912 int nonfatal = 0; 1913 int unknown = 0; 1914 int ok = 0; 1915 int type; 1916 char buf[FM_MAX_CLASS]; 1917 int i; 1918 pcie_error_regs_t *pcie_regs = (pcie_error_regs_t *)erpt_p->pe_regs; 1919 pcie_adv_error_regs_t *pcie_adv_regs; 1920 pcie_adv_bdg_error_regs_t *pcie_bdg_regs; 1921 1922 if ((erpt_p->pe_dflags & PCI_BRIDGE_DEV) && 1923 (erpt_p->pe_dflags & PCIX_DEV)) { 1924 int ret = pcix_bdg_error_report(dip, derr, erpt_p, 1925 (void *)pcie_regs->pcix_bdg_regs); 1926 PCI_FM_SEV_INC(ret); 1927 } 1928 1929 if (!(erpt_p->pe_dflags & PCIEX_ADV_DEV)) { 1930 if (!(pcie_regs->pcie_vflags & PCIE_ERR_STATUS_VALID)) 1931 goto done; 1932 #if !defined(__sparc) 1933 /* 1934 * On x86 ignore UR on non-RBER leaf devices, pciex-pci 1935 * bridges and switches. 1936 */ 1937 if ((pcie_regs->pcie_err_status & PCIE_DEVSTS_UR_DETECTED) && 1938 !(pcie_regs->pcie_err_status & PCIE_DEVSTS_FE_DETECTED) && 1939 ((erpt_p->pe_dflags & (PCIEX_2PCI_DEV|PCIEX_SWITCH_DEV)) || 1940 !(erpt_p->pe_dflags & PCI_BRIDGE_DEV)) && 1941 !(pcie_regs->pcie_dev_cap & PCIE_DEVCAP_ROLE_BASED_ERR_REP)) 1942 goto done; 1943 #endif 1944 for (i = 0; pciex_nadv_err_tbl[i].err_class != NULL; i++) { 1945 if (!(pcie_regs->pcie_err_status & 1946 pciex_nadv_err_tbl[i].reg_bit)) 1947 continue; 1948 1949 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", 1950 PCIEX_ERROR_SUBCLASS, 1951 pciex_nadv_err_tbl[i].err_class); 1952 pcie_ereport_post(dip, derr, erpt_p, buf, 1953 PCIEX_TYPE_GEN); 1954 PCI_FM_SEV_INC(pciex_nadv_err_tbl[i].flags); 1955 } 1956 goto done; 1957 } 1958 1959 pcie_adv_regs = pcie_regs->pcie_adv_regs; 1960 1961 /* 1962 * Log PCI Express uncorrectable errors 1963 */ 1964 if (pcie_adv_regs->pcie_adv_vflags & PCIE_UE_STATUS_VALID) { 1965 for (i = 0; pciex_ue_err_tbl[i].err_class != NULL; i++) { 1966 if (!(pcie_adv_regs->pcie_ue_status & 1967 pciex_ue_err_tbl[i].reg_bit)) 1968 continue; 1969 1970 (void) snprintf(buf, FM_MAX_CLASS, 1971 "%s.%s", PCIEX_ERROR_SUBCLASS, 1972 pciex_ue_err_tbl[i].err_class); 1973 1974 /* 1975 * First check for advisary nonfatal conditions 1976 * - hardware endpoint successfully retrying a cto 1977 * - hardware endpoint receiving poisoned tlp and 1978 * dealing with it itself (but not if root complex) 1979 * If the device has declared these as correctable 1980 * errors then treat them as such. 1981 */ 1982 if ((pciex_ue_err_tbl[i].reg_bit == PCIE_AER_UCE_TO || 1983 (pciex_ue_err_tbl[i].reg_bit == PCIE_AER_UCE_PTLP && 1984 !(erpt_p->pe_dflags & PCIEX_RC_DEV))) && 1985 (pcie_regs->pcie_err_status & 1986 PCIE_DEVSTS_CE_DETECTED) && 1987 !(pcie_regs->pcie_err_status & 1988 PCIE_DEVSTS_NFE_DETECTED)) { 1989 pcie_ereport_post(dip, derr, erpt_p, buf, 1990 PCIEX_TYPE_UE); 1991 continue; 1992 } 1993 1994 #if !defined(__sparc) 1995 /* 1996 * On x86 for leaf devices and pciex-pci bridges, 1997 * ignore UR on non-RBER devices or on RBER devices when 1998 * advisory nonfatal. 1999 */ 2000 if (pciex_ue_err_tbl[i].reg_bit == PCIE_AER_UCE_UR && 2001 ((erpt_p->pe_dflags & 2002 (PCIEX_2PCI_DEV|PCIEX_SWITCH_DEV)) || 2003 !(erpt_p->pe_dflags & PCI_BRIDGE_DEV))) { 2004 if (!(pcie_regs->pcie_dev_cap & 2005 PCIE_DEVCAP_ROLE_BASED_ERR_REP)) 2006 continue; 2007 if (!(pcie_regs->pcie_err_status & 2008 PCIE_DEVSTS_NFE_DETECTED)) 2009 continue; 2010 } 2011 #endif 2012 pcie_adv_regs->pcie_adv_bdf = 0; 2013 /* 2014 * Now try and look up handle if 2015 * - error bit is among PCIE_AER_UCE_LOG_BITS, and 2016 * - no other PCIE_AER_UCE_LOG_BITS are set, and 2017 * - error bit is not masked, and 2018 * - flag is DDI_FM_UNKNOWN 2019 */ 2020 if ((pcie_adv_regs->pcie_ue_status & 2021 pcie_aer_uce_log_bits) == 2022 pciex_ue_err_tbl[i].reg_bit && 2023 !(pciex_ue_err_tbl[i].reg_bit & 2024 pcie_adv_regs->pcie_ue_mask) && 2025 pciex_ue_err_tbl[i].flags == DDI_FM_UNKNOWN) 2026 pcie_check_addr(dip, derr, erpt_p); 2027 2028 PCI_FM_SEV_INC(pciex_ue_err_tbl[i].flags); 2029 pcie_ereport_post(dip, derr, erpt_p, buf, 2030 PCIEX_TYPE_UE); 2031 } 2032 } 2033 2034 /* 2035 * Log PCI Express correctable errors 2036 */ 2037 if (pcie_adv_regs->pcie_adv_vflags & PCIE_CE_STATUS_VALID) { 2038 for (i = 0; pciex_ce_err_tbl[i].err_class != NULL; i++) { 2039 if (!(pcie_adv_regs->pcie_ce_status & 2040 pciex_ce_err_tbl[i].reg_bit)) 2041 continue; 2042 2043 (void) snprintf(buf, FM_MAX_CLASS, 2044 "%s.%s", PCIEX_ERROR_SUBCLASS, 2045 pciex_ce_err_tbl[i].err_class); 2046 pcie_ereport_post(dip, derr, erpt_p, buf, 2047 PCIEX_TYPE_CE); 2048 } 2049 } 2050 2051 if (!(erpt_p->pe_dflags & PCI_BRIDGE_DEV)) 2052 goto done; 2053 2054 if (erpt_p->pe_dflags & PCIEX_RC_DEV) { 2055 int ret = pcie_rc_error_report(dip, derr, erpt_p, 2056 (void *)pcie_adv_regs); 2057 PCI_FM_SEV_INC(ret); 2058 } 2059 2060 if (!((erpt_p->pe_dflags & PCIEX_2PCI_DEV) && 2061 (pcie_adv_regs->pcie_adv_vflags & PCIE_SUE_STATUS_VALID))) 2062 goto done; 2063 2064 pcie_bdg_regs = pcie_adv_regs->pcie_adv_bdg_regs; 2065 2066 for (i = 0; pcie_sue_err_tbl[i].err_class != NULL; i++) { 2067 if ((pcie_bdg_regs->pcie_sue_status & 2068 pcie_sue_err_tbl[i].reg_bit)) { 2069 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", 2070 PCIEX_ERROR_SUBCLASS, 2071 pcie_sue_err_tbl[i].err_class); 2072 2073 if ((pcie_bdg_regs->pcie_sue_status & 2074 pcie_aer_suce_log_bits) != 2075 pcie_sue_err_tbl[i].reg_bit || 2076 pcie_sue_err_tbl[i].flags != DDI_FM_UNKNOWN) { 2077 ddi_fm_ereport_post(dip, buf, derr->fme_ena, 2078 DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0, 2079 PCIEX_SEC_UE_STATUS, DATA_TYPE_UINT32, 2080 pcie_bdg_regs->pcie_sue_status, 2081 #ifdef DEBUG 2082 PCIEX_SUE_HDR0, DATA_TYPE_UINT32, 2083 pcie_bdg_regs->pcie_sue_hdr0, 2084 PCIEX_SUE_HDR1, DATA_TYPE_UINT32, 2085 pcie_bdg_regs->pcie_sue_hdr[0], 2086 PCIEX_SUE_HDR2, DATA_TYPE_UINT32, 2087 pcie_bdg_regs->pcie_sue_hdr[1], 2088 PCIEX_SUE_HDR3, DATA_TYPE_UINT32, 2089 pcie_bdg_regs->pcie_sue_hdr[2], 2090 #endif 2091 NULL); 2092 } else { 2093 pcie_adv_regs->pcie_adv_bdf = 0; 2094 switch (pcie_sue_err_tbl[i].reg_bit) { 2095 case PCIE_AER_SUCE_RCVD_TA: 2096 case PCIE_AER_SUCE_RCVD_MA: 2097 case PCIE_AER_SUCE_USC_ERR: 2098 type = ACC_HANDLE; 2099 break; 2100 case PCIE_AER_SUCE_TA_ON_SC: 2101 case PCIE_AER_SUCE_MA_ON_SC: 2102 type = DMA_HANDLE; 2103 break; 2104 case PCIE_AER_SUCE_UC_DATA_ERR: 2105 case PCIE_AER_SUCE_PERR_ASSERT: 2106 if (erpt_p->pe_pci_regs->pci_bdg_regs-> 2107 pci_bdg_sec_stat & 2108 PCI_STAT_S_PERROR) 2109 type = ACC_HANDLE; 2110 else 2111 type = DMA_HANDLE; 2112 break; 2113 } 2114 pcie_pci_check_addr(dip, derr, erpt_p, type); 2115 ddi_fm_ereport_post(dip, buf, derr->fme_ena, 2116 DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0, 2117 PCIEX_SEC_UE_STATUS, DATA_TYPE_UINT32, 2118 pcie_bdg_regs->pcie_sue_status, 2119 PCIEX_SRC_ID, DATA_TYPE_UINT16, 2120 pcie_adv_regs->pcie_adv_bdf, 2121 PCIEX_SRC_VALID, DATA_TYPE_BOOLEAN_VALUE, 2122 (pcie_adv_regs->pcie_adv_bdf != NULL) ? 2123 1 : NULL, 2124 #ifdef DEBUG 2125 PCIEX_SUE_HDR0, DATA_TYPE_UINT32, 2126 pcie_bdg_regs->pcie_sue_hdr0, 2127 PCIEX_SUE_HDR1, DATA_TYPE_UINT32, 2128 pcie_bdg_regs->pcie_sue_hdr[0], 2129 PCIEX_SUE_HDR2, DATA_TYPE_UINT32, 2130 pcie_bdg_regs->pcie_sue_hdr[1], 2131 PCIEX_SUE_HDR3, DATA_TYPE_UINT32, 2132 pcie_bdg_regs->pcie_sue_hdr[2], 2133 #endif 2134 NULL); 2135 } 2136 PCI_FM_SEV_INC(pcie_sue_err_tbl[i].flags); 2137 } 2138 } 2139 done: 2140 return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL : 2141 (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK))); 2142 } 2143 2144 static void 2145 pci_error_report(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p) 2146 { 2147 int fatal = 0; 2148 int nonfatal = 0; 2149 int unknown = 0; 2150 int ok = 0; 2151 char buf[FM_MAX_CLASS]; 2152 int i; 2153 2154 if (derr->fme_flag == DDI_FM_ERR_UNEXPECTED) { 2155 /* 2156 * Log generic PCI errors. 2157 */ 2158 for (i = 0; pci_err_tbl[i].err_class != NULL; i++) { 2159 if (!(erpt_p->pe_pci_regs->pci_err_status & 2160 pci_err_tbl[i].reg_bit) || 2161 !(erpt_p->pe_pci_regs->pci_vflags & 2162 PCI_ERR_STATUS_VALID)) 2163 continue; 2164 /* 2165 * Generate an ereport for this error bit. 2166 */ 2167 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", 2168 PCI_ERROR_SUBCLASS, pci_err_tbl[i].err_class); 2169 ddi_fm_ereport_post(dip, buf, derr->fme_ena, 2170 DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0, 2171 PCI_CONFIG_STATUS, DATA_TYPE_UINT16, 2172 erpt_p->pe_pci_regs->pci_err_status, 2173 PCI_CONFIG_COMMAND, DATA_TYPE_UINT16, 2174 erpt_p->pe_pci_regs->pci_cfg_comm, NULL); 2175 2176 /* 2177 * The meaning of SERR is different for PCIEX (just 2178 * implies a message has been sent) so we don't want to 2179 * treat that one as fatal. 2180 */ 2181 if ((erpt_p->pe_dflags & PCIEX_DEV) && 2182 pci_err_tbl[i].reg_bit == PCI_STAT_S_SYSERR) { 2183 unknown++; 2184 } else { 2185 PCI_FM_SEV_INC(pci_err_tbl[i].flags); 2186 } 2187 } 2188 if (erpt_p->pe_dflags & PCIEX_DEV) { 2189 int ret = pcie_error_report(dip, derr, erpt_p); 2190 PCI_FM_SEV_INC(ret); 2191 } else if (erpt_p->pe_dflags & PCIX_DEV) { 2192 if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) { 2193 int ret = pcix_bdg_error_report(dip, derr, 2194 erpt_p, erpt_p->pe_regs); 2195 PCI_FM_SEV_INC(ret); 2196 } else { 2197 int ret = pcix_error_report(dip, derr, erpt_p); 2198 PCI_FM_SEV_INC(ret); 2199 } 2200 } 2201 } 2202 2203 if ((erpt_p->pe_dflags & PCI_BRIDGE_DEV)) { 2204 int ret = pci_bdg_error_report(dip, derr, erpt_p); 2205 PCI_FM_SEV_INC(ret); 2206 } 2207 2208 if (derr->fme_flag == DDI_FM_ERR_UNEXPECTED) { 2209 pci_fme_bus_specific_t *pci_fme_bsp; 2210 int ret = DDI_FM_UNKNOWN; 2211 2212 pci_fme_bsp = (pci_fme_bus_specific_t *)derr->fme_bus_specific; 2213 if (pci_fme_bsp->pci_bs_flags & PCI_BS_ADDR_VALID) { 2214 ret = ndi_fmc_entry_error(dip, 2215 pci_fme_bsp->pci_bs_type, derr, 2216 (void *)&pci_fme_bsp->pci_bs_addr); 2217 PCI_FM_SEV_INC(ret); 2218 } 2219 /* 2220 * If we didn't find the handle using an addr, try using bdf. 2221 * Note we don't do this where the bdf is for a 2222 * device behind a pciex/pci bridge as the bridge may have 2223 * fabricated the bdf. 2224 */ 2225 if (ret == DDI_FM_UNKNOWN && 2226 (pci_fme_bsp->pci_bs_flags & PCI_BS_BDF_VALID) && 2227 pci_fme_bsp->pci_bs_bdf == erpt_p->pe_bdf && 2228 (erpt_p->pe_dflags & PCIEX_DEV) && 2229 !(erpt_p->pe_dflags & PCIEX_2PCI_DEV)) { 2230 ret = ndi_fmc_entry_error_all(dip, 2231 pci_fme_bsp->pci_bs_type, derr); 2232 PCI_FM_SEV_INC(ret); 2233 } 2234 } 2235 2236 derr->fme_status = (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL : 2237 (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK))); 2238 } 2239 2240 void 2241 pci_ereport_post(dev_info_t *dip, ddi_fm_error_t *derr, uint16_t *xx_status) 2242 { 2243 struct i_ddi_fmhdl *fmhdl; 2244 pci_erpt_t *erpt_p; 2245 ddi_fm_error_t de; 2246 pci_fme_bus_specific_t pci_fme_bs; 2247 2248 /* 2249 * On PCI Express systems, all error handling and ereport are done via 2250 * the PCIe misc module. This function is a no-op for PCIe Systems. In 2251 * order to tell if a system is a PCI or PCIe system, check that the 2252 * bus_private_data exists. If it exists, this is a PCIe system. 2253 */ 2254 if (ndi_get_bus_private(dip, B_TRUE)) { 2255 derr->fme_status = DDI_FM_OK; 2256 if (xx_status != NULL) 2257 *xx_status = 0x0; 2258 2259 return; 2260 } 2261 2262 fmhdl = DEVI(dip)->devi_fmhdl; 2263 if (!DDI_FM_EREPORT_CAP(ddi_fm_capable(dip)) && 2264 !DDI_FM_ERRCB_CAP(ddi_fm_capable(dip))) { 2265 i_ddi_drv_ereport_post(dip, DVR_EFMCAP, NULL, DDI_NOSLEEP); 2266 return; 2267 } 2268 2269 /* 2270 * copy in the ddi_fm_error_t structure in case it's VER0 2271 */ 2272 de.fme_version = derr->fme_version; 2273 de.fme_status = derr->fme_status; 2274 de.fme_flag = derr->fme_flag; 2275 de.fme_ena = derr->fme_ena; 2276 de.fme_acc_handle = derr->fme_acc_handle; 2277 de.fme_dma_handle = derr->fme_dma_handle; 2278 de.fme_bus_specific = derr->fme_bus_specific; 2279 if (derr->fme_version >= DDI_FME_VER1) 2280 de.fme_bus_type = derr->fme_bus_type; 2281 else 2282 de.fme_bus_type = DDI_FME_BUS_TYPE_DFLT; 2283 if (de.fme_bus_type == DDI_FME_BUS_TYPE_DFLT) { 2284 /* 2285 * if this is the first pci device we've found convert 2286 * fme_bus_specific to DDI_FME_BUS_TYPE_PCI 2287 */ 2288 bzero(&pci_fme_bs, sizeof (pci_fme_bs)); 2289 if (de.fme_bus_specific) { 2290 /* 2291 * the cpu passed us an addr - this can be used to look 2292 * up an access handle 2293 */ 2294 pci_fme_bs.pci_bs_addr = (uintptr_t)de.fme_bus_specific; 2295 pci_fme_bs.pci_bs_type = ACC_HANDLE; 2296 pci_fme_bs.pci_bs_flags |= PCI_BS_ADDR_VALID; 2297 } 2298 de.fme_bus_specific = (void *)&pci_fme_bs; 2299 de.fme_bus_type = DDI_FME_BUS_TYPE_PCI; 2300 } 2301 2302 ASSERT(fmhdl); 2303 2304 if (de.fme_ena == NULL) 2305 de.fme_ena = fm_ena_generate(0, FM_ENA_FMT1); 2306 2307 erpt_p = (pci_erpt_t *)fmhdl->fh_bus_specific; 2308 if (erpt_p == NULL) 2309 return; 2310 2311 pci_regs_gather(dip, erpt_p, de.fme_flag); 2312 pci_error_report(dip, &de, erpt_p); 2313 pci_regs_clear(erpt_p); 2314 2315 derr->fme_status = de.fme_status; 2316 derr->fme_ena = de.fme_ena; 2317 derr->fme_acc_handle = de.fme_acc_handle; 2318 derr->fme_dma_handle = de.fme_dma_handle; 2319 if (xx_status != NULL) 2320 *xx_status = erpt_p->pe_pci_regs->pci_err_status; 2321 } 2322 2323 /* 2324 * private version of walk_devs() that can be used during panic. No 2325 * sleeping or locking required. 2326 */ 2327 static int 2328 pci_fm_walk_devs(dev_info_t *dip, int (*f)(dev_info_t *, void *), void *arg) 2329 { 2330 while (dip) { 2331 switch ((*f)(dip, arg)) { 2332 case DDI_WALK_TERMINATE: 2333 return (DDI_WALK_TERMINATE); 2334 case DDI_WALK_CONTINUE: 2335 if (pci_fm_walk_devs(ddi_get_child(dip), f, 2336 arg) == DDI_WALK_TERMINATE) 2337 return (DDI_WALK_TERMINATE); 2338 break; 2339 case DDI_WALK_PRUNECHILD: 2340 break; 2341 } 2342 dip = ddi_get_next_sibling(dip); 2343 } 2344 return (DDI_WALK_CONTINUE); 2345 } 2346 2347 /* 2348 * need special version of ddi_fm_ereport_post() as the leaf driver may 2349 * not be hardened. 2350 */ 2351 static void 2352 pci_fm_ereport_post(dev_info_t *dip, const char *error_class, uint64_t ena, 2353 uint8_t version, ...) 2354 { 2355 char *name; 2356 char device_path[MAXPATHLEN]; 2357 char ddi_error_class[FM_MAX_CLASS]; 2358 nvlist_t *ereport, *detector; 2359 nv_alloc_t *nva; 2360 errorq_elem_t *eqep; 2361 va_list ap; 2362 2363 if (panicstr) { 2364 eqep = errorq_reserve(ereport_errorq); 2365 if (eqep == NULL) 2366 return; 2367 ereport = errorq_elem_nvl(ereport_errorq, eqep); 2368 nva = errorq_elem_nva(ereport_errorq, eqep); 2369 detector = fm_nvlist_create(nva); 2370 } else { 2371 ereport = fm_nvlist_create(NULL); 2372 detector = fm_nvlist_create(NULL); 2373 } 2374 2375 (void) ddi_pathname(dip, device_path); 2376 fm_fmri_dev_set(detector, FM_DEV_SCHEME_VERSION, NULL, 2377 device_path, NULL); 2378 (void) snprintf(ddi_error_class, FM_MAX_CLASS, "%s.%s", 2379 DDI_IO_CLASS, error_class); 2380 fm_ereport_set(ereport, version, ddi_error_class, ena, detector, NULL); 2381 2382 va_start(ap, version); 2383 name = va_arg(ap, char *); 2384 (void) i_fm_payload_set(ereport, name, ap); 2385 va_end(ap); 2386 2387 if (panicstr) { 2388 errorq_commit(ereport_errorq, eqep, ERRORQ_SYNC); 2389 } else { 2390 (void) fm_ereport_post(ereport, EVCH_TRYHARD); 2391 fm_nvlist_destroy(ereport, FM_NVA_FREE); 2392 fm_nvlist_destroy(detector, FM_NVA_FREE); 2393 } 2394 } 2395 2396 static int 2397 pci_check_regs(dev_info_t *dip, void *arg) 2398 { 2399 int reglen; 2400 int rn; 2401 int totreg; 2402 pci_regspec_t *drv_regp; 2403 pci_target_err_t *tgt_err = (pci_target_err_t *)arg; 2404 2405 if (tgt_err->tgt_pci_space == PCI_REG_ADDR_G(PCI_ADDR_CONFIG)) { 2406 /* 2407 * for config space, we need to check if the given address 2408 * is a valid config space address for this device - based 2409 * on pci_phys_hi of the config space entry in reg property. 2410 */ 2411 if (ddi_getlongprop(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS, 2412 "reg", (caddr_t)&drv_regp, ®len) != DDI_SUCCESS) 2413 return (DDI_WALK_CONTINUE); 2414 2415 totreg = reglen / sizeof (pci_regspec_t); 2416 for (rn = 0; rn < totreg; rn++) { 2417 if (tgt_err->tgt_pci_space == 2418 PCI_REG_ADDR_G(drv_regp[rn].pci_phys_hi) && 2419 (tgt_err->tgt_pci_addr & (PCI_REG_BUS_M | 2420 PCI_REG_DEV_M | PCI_REG_FUNC_M)) == 2421 (drv_regp[rn].pci_phys_hi & (PCI_REG_BUS_M | 2422 PCI_REG_DEV_M | PCI_REG_FUNC_M))) { 2423 tgt_err->tgt_dip = dip; 2424 kmem_free(drv_regp, reglen); 2425 return (DDI_WALK_TERMINATE); 2426 } 2427 } 2428 kmem_free(drv_regp, reglen); 2429 } else { 2430 /* 2431 * for non config space, need to check reg to look 2432 * for any non-relocable mapping, otherwise check 2433 * assigned-addresses. 2434 */ 2435 if (ddi_getlongprop(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS, 2436 "reg", (caddr_t)&drv_regp, ®len) != DDI_SUCCESS) 2437 return (DDI_WALK_CONTINUE); 2438 2439 totreg = reglen / sizeof (pci_regspec_t); 2440 for (rn = 0; rn < totreg; rn++) { 2441 if ((drv_regp[rn].pci_phys_hi & PCI_RELOCAT_B) && 2442 (tgt_err->tgt_pci_space == TGT_PCI_SPACE_UNKNOWN || 2443 tgt_err->tgt_pci_space == 2444 PCI_REG_ADDR_G(drv_regp[rn].pci_phys_hi)) && 2445 (tgt_err->tgt_pci_addr >= 2446 (uint64_t)drv_regp[rn].pci_phys_low + 2447 ((uint64_t)drv_regp[rn].pci_phys_mid << 32)) && 2448 (tgt_err->tgt_pci_addr < 2449 (uint64_t)drv_regp[rn].pci_phys_low + 2450 ((uint64_t)drv_regp[rn].pci_phys_mid << 32) + 2451 (uint64_t)drv_regp[rn].pci_size_low + 2452 ((uint64_t)drv_regp[rn].pci_size_hi << 32))) { 2453 tgt_err->tgt_dip = dip; 2454 kmem_free(drv_regp, reglen); 2455 return (DDI_WALK_TERMINATE); 2456 } 2457 } 2458 kmem_free(drv_regp, reglen); 2459 2460 if (ddi_getlongprop(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS, 2461 "assigned-addresses", (caddr_t)&drv_regp, ®len) != 2462 DDI_SUCCESS) 2463 return (DDI_WALK_CONTINUE); 2464 2465 totreg = reglen / sizeof (pci_regspec_t); 2466 for (rn = 0; rn < totreg; rn++) { 2467 if ((tgt_err->tgt_pci_space == TGT_PCI_SPACE_UNKNOWN || 2468 tgt_err->tgt_pci_space == 2469 PCI_REG_ADDR_G(drv_regp[rn].pci_phys_hi)) && 2470 (tgt_err->tgt_pci_addr >= 2471 (uint64_t)drv_regp[rn].pci_phys_low + 2472 ((uint64_t)drv_regp[rn].pci_phys_mid << 32)) && 2473 (tgt_err->tgt_pci_addr < 2474 (uint64_t)drv_regp[rn].pci_phys_low + 2475 ((uint64_t)drv_regp[rn].pci_phys_mid << 32) + 2476 (uint64_t)drv_regp[rn].pci_size_low + 2477 ((uint64_t)drv_regp[rn].pci_size_hi << 32))) { 2478 tgt_err->tgt_dip = dip; 2479 kmem_free(drv_regp, reglen); 2480 return (DDI_WALK_TERMINATE); 2481 } 2482 } 2483 kmem_free(drv_regp, reglen); 2484 } 2485 return (DDI_WALK_CONTINUE); 2486 } 2487 2488 /* 2489 * impl_fix_ranges - fixes the config space entry of the "ranges" 2490 * property on psycho+ platforms. (if changing this function please make sure 2491 * to change the pci_fix_ranges function in pcipsy.c) 2492 */ 2493 /*ARGSUSED*/ 2494 static void 2495 pci_fix_ranges(dev_info_t *dip, pci_ranges_t *pci_ranges, int nrange) 2496 { 2497 #if defined(__sparc) 2498 char *name = ddi_binding_name(dip); 2499 2500 if ((strcmp(name, "pci108e,8000") == 0) || 2501 (strcmp(name, "pci108e,a000") == 0) || 2502 (strcmp(name, "pci108e,a001") == 0)) { 2503 int i; 2504 for (i = 0; i < nrange; i++, pci_ranges++) 2505 if ((pci_ranges->child_high & PCI_REG_ADDR_M) == 2506 PCI_ADDR_CONFIG) 2507 pci_ranges->parent_low |= 2508 pci_ranges->child_high; 2509 } 2510 #endif 2511 } 2512 2513 static int 2514 pci_check_ranges(dev_info_t *dip, void *arg) 2515 { 2516 uint64_t range_parent_begin; 2517 uint64_t range_parent_size; 2518 uint64_t range_parent_end; 2519 uint32_t space_type; 2520 uint32_t bus_num; 2521 uint32_t range_offset; 2522 pci_ranges_t *pci_ranges, *rangep; 2523 pci_bus_range_t *pci_bus_rangep; 2524 int pci_ranges_length; 2525 int nrange; 2526 pci_target_err_t *tgt_err = (pci_target_err_t *)arg; 2527 int i, size; 2528 if (strcmp(ddi_node_name(dip), "pci") != 0 && 2529 strcmp(ddi_node_name(dip), "pciex") != 0) 2530 return (DDI_WALK_CONTINUE); 2531 2532 /* 2533 * Get the ranges property. Note we only look at the top level pci 2534 * node (hostbridge) which has a ranges property of type pci_ranges_t 2535 * not at pci-pci bridges. 2536 */ 2537 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "ranges", 2538 (caddr_t)&pci_ranges, &pci_ranges_length) != DDI_SUCCESS) { 2539 /* 2540 * no ranges property - no translation needed 2541 */ 2542 tgt_err->tgt_pci_addr = tgt_err->tgt_err_addr; 2543 tgt_err->tgt_pci_space = TGT_PCI_SPACE_UNKNOWN; 2544 if (panicstr) 2545 (void) pci_fm_walk_devs(ddi_get_child(dip), 2546 pci_check_regs, (void *)tgt_err); 2547 else { 2548 int circ = 0; 2549 ndi_devi_enter(dip, &circ); 2550 ddi_walk_devs(ddi_get_child(dip), pci_check_regs, 2551 (void *)tgt_err); 2552 ndi_devi_exit(dip, circ); 2553 } 2554 if (tgt_err->tgt_dip != NULL) 2555 return (DDI_WALK_TERMINATE); 2556 return (DDI_WALK_PRUNECHILD); 2557 } 2558 nrange = pci_ranges_length / sizeof (pci_ranges_t); 2559 rangep = pci_ranges; 2560 2561 /* Need to fix the pci ranges property for psycho based systems */ 2562 pci_fix_ranges(dip, pci_ranges, nrange); 2563 2564 for (i = 0; i < nrange; i++, rangep++) { 2565 range_parent_begin = ((uint64_t)rangep->parent_high << 32) + 2566 rangep->parent_low; 2567 range_parent_size = ((uint64_t)rangep->size_high << 32) + 2568 rangep->size_low; 2569 range_parent_end = range_parent_begin + range_parent_size - 1; 2570 2571 if ((tgt_err->tgt_err_addr < range_parent_begin) || 2572 (tgt_err->tgt_err_addr > range_parent_end)) { 2573 /* Not in range */ 2574 continue; 2575 } 2576 space_type = PCI_REG_ADDR_G(rangep->child_high); 2577 if (space_type == PCI_REG_ADDR_G(PCI_ADDR_CONFIG)) { 2578 /* Config space address - check bus range */ 2579 range_offset = tgt_err->tgt_err_addr - 2580 range_parent_begin; 2581 bus_num = PCI_REG_BUS_G(range_offset); 2582 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, 2583 DDI_PROP_DONTPASS, "bus-range", 2584 (caddr_t)&pci_bus_rangep, &size) != DDI_SUCCESS) { 2585 continue; 2586 } 2587 if ((bus_num < pci_bus_rangep->lo) || 2588 (bus_num > pci_bus_rangep->hi)) { 2589 /* 2590 * Bus number not appropriate for this 2591 * pci nexus. 2592 */ 2593 kmem_free(pci_bus_rangep, size); 2594 continue; 2595 } 2596 kmem_free(pci_bus_rangep, size); 2597 } 2598 2599 /* We have a match if we get here - compute pci address */ 2600 tgt_err->tgt_pci_addr = tgt_err->tgt_err_addr - 2601 range_parent_begin; 2602 tgt_err->tgt_pci_addr += (((uint64_t)rangep->child_mid << 32) + 2603 rangep->child_low); 2604 tgt_err->tgt_pci_space = space_type; 2605 if (panicstr) 2606 (void) pci_fm_walk_devs(ddi_get_child(dip), 2607 pci_check_regs, (void *)tgt_err); 2608 else { 2609 int circ = 0; 2610 ndi_devi_enter(dip, &circ); 2611 ddi_walk_devs(ddi_get_child(dip), pci_check_regs, 2612 (void *)tgt_err); 2613 ndi_devi_exit(dip, circ); 2614 } 2615 if (tgt_err->tgt_dip != NULL) { 2616 kmem_free(pci_ranges, pci_ranges_length); 2617 return (DDI_WALK_TERMINATE); 2618 } 2619 } 2620 kmem_free(pci_ranges, pci_ranges_length); 2621 return (DDI_WALK_PRUNECHILD); 2622 } 2623 2624 /* 2625 * Function used to drain pci_target_queue, either during panic or after softint 2626 * is generated, to generate target device ereports based on captured physical 2627 * addresses 2628 */ 2629 /*ARGSUSED*/ 2630 static void 2631 pci_target_drain(void *private_p, pci_target_err_t *tgt_err) 2632 { 2633 char buf[FM_MAX_CLASS]; 2634 2635 /* 2636 * The following assumes that all pci_pci bridge devices 2637 * are configured as transparant. Find the top-level pci 2638 * nexus which has tgt_err_addr in one of its ranges, converting this 2639 * to a pci address in the process. Then starting at this node do 2640 * another tree walk to find a device with the pci address we've 2641 * found within range of one of it's assigned-addresses properties. 2642 */ 2643 tgt_err->tgt_dip = NULL; 2644 if (panicstr) 2645 (void) pci_fm_walk_devs(ddi_root_node(), pci_check_ranges, 2646 (void *)tgt_err); 2647 else 2648 ddi_walk_devs(ddi_root_node(), pci_check_ranges, 2649 (void *)tgt_err); 2650 if (tgt_err->tgt_dip == NULL) 2651 return; 2652 2653 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", tgt_err->tgt_bridge_type, 2654 tgt_err->tgt_err_class); 2655 pci_fm_ereport_post(tgt_err->tgt_dip, buf, tgt_err->tgt_err_ena, 0, 2656 PCI_PA, DATA_TYPE_UINT64, tgt_err->tgt_err_addr, NULL); 2657 } 2658 2659 void 2660 pci_target_enqueue(uint64_t ena, char *class, char *bridge_type, uint64_t addr) 2661 { 2662 pci_target_err_t tgt_err; 2663 2664 tgt_err.tgt_err_ena = ena; 2665 tgt_err.tgt_err_class = class; 2666 tgt_err.tgt_bridge_type = bridge_type; 2667 tgt_err.tgt_err_addr = addr; 2668 errorq_dispatch(pci_target_queue, (void *)&tgt_err, 2669 sizeof (pci_target_err_t), ERRORQ_ASYNC); 2670 } 2671 2672 void 2673 pci_targetq_init(void) 2674 { 2675 /* 2676 * PCI target errorq, to schedule async handling of generation of 2677 * target device ereports based on captured physical address. 2678 * The errorq is created here but destroyed when _fini is called 2679 * for the pci module. 2680 */ 2681 if (pci_target_queue == NULL) { 2682 pci_target_queue = errorq_create("pci_target_queue", 2683 (errorq_func_t)pci_target_drain, (void *)NULL, 2684 TARGET_MAX_ERRS, sizeof (pci_target_err_t), FM_ERR_PIL, 2685 ERRORQ_VITAL); 2686 if (pci_target_queue == NULL) 2687 panic("failed to create required system error queue"); 2688 } 2689 } 2690