1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include <sys/types.h> 30 #include <sys/sunndi.h> 31 #include <sys/sysmacros.h> 32 #include <sys/ddifm_impl.h> 33 #include <sys/fm/util.h> 34 #include <sys/fm/protocol.h> 35 #include <sys/fm/io/pci.h> 36 #include <sys/fm/io/ddi.h> 37 #include <sys/pci.h> 38 #include <sys/pcie.h> 39 #include <sys/pci_impl.h> 40 #include <sys/epm.h> 41 #include <sys/pcifm.h> 42 43 #define PCIX_ECC_VER_CHECK(x) (((x) == PCI_PCIX_VER_1) ||\ 44 ((x) == PCI_PCIX_VER_2)) 45 46 /* 47 * Expected PCI Express error mask values 48 */ 49 uint32_t pcie_expected_ce_mask = PCIE_AER_CE_AD_NFE; 50 uint32_t pcie_expected_ue_mask = 0x0; 51 uint32_t pcie_expected_sue_mask = 0x0; 52 53 errorq_t *pci_target_queue = NULL; 54 55 pci_fm_err_t pci_err_tbl[] = { 56 PCI_DET_PERR, PCI_STAT_PERROR, NULL, DDI_FM_UNKNOWN, 57 PCI_MDPE, PCI_STAT_S_PERROR, PCI_TARG_MDPE, DDI_FM_UNKNOWN, 58 PCI_SIG_SERR, PCI_STAT_S_SYSERR, NULL, DDI_FM_FATAL, 59 PCI_MA, PCI_STAT_R_MAST_AB, PCI_TARG_MA, DDI_FM_UNKNOWN, 60 PCI_REC_TA, PCI_STAT_R_TARG_AB, PCI_TARG_REC_TA, DDI_FM_UNKNOWN, 61 PCI_SIG_TA, PCI_STAT_S_TARG_AB, NULL, DDI_FM_UNKNOWN, 62 NULL, NULL, NULL, NULL, 63 }; 64 65 pci_fm_err_t pci_bdg_err_tbl[] = { 66 PCI_DET_PERR, PCI_STAT_PERROR, NULL, DDI_FM_UNKNOWN, 67 PCI_MDPE, PCI_STAT_S_PERROR, PCI_TARG_MDPE, DDI_FM_UNKNOWN, 68 PCI_REC_SERR, PCI_STAT_S_SYSERR, NULL, DDI_FM_UNKNOWN, 69 PCI_MA, PCI_STAT_R_MAST_AB, PCI_TARG_MA, DDI_FM_UNKNOWN, 70 PCI_REC_TA, PCI_STAT_R_TARG_AB, PCI_TARG_REC_TA, DDI_FM_UNKNOWN, 71 PCI_SIG_TA, PCI_STAT_S_TARG_AB, NULL, DDI_FM_UNKNOWN, 72 NULL, NULL, NULL, NULL, 73 }; 74 75 static pci_fm_err_t pciex_ce_err_tbl[] = { 76 PCIEX_RE, PCIE_AER_CE_RECEIVER_ERR, NULL, DDI_FM_NONFATAL, 77 PCIEX_RNR, PCIE_AER_CE_REPLAY_ROLLOVER, NULL, DDI_FM_NONFATAL, 78 PCIEX_RTO, PCIE_AER_CE_REPLAY_TO, NULL, DDI_FM_NONFATAL, 79 PCIEX_BDP, PCIE_AER_CE_BAD_DLLP, NULL, DDI_FM_NONFATAL, 80 PCIEX_BTP, PCIE_AER_CE_BAD_TLP, NULL, DDI_FM_NONFATAL, 81 PCIEX_ANFE, PCIE_AER_CE_AD_NFE, NULL, DDI_FM_NONFATAL, 82 NULL, NULL, NULL, NULL, 83 }; 84 85 static pci_fm_err_t pciex_ue_err_tbl[] = { 86 PCIEX_TE, PCIE_AER_UCE_TRAINING, NULL, DDI_FM_FATAL, 87 PCIEX_DLP, PCIE_AER_UCE_DLP, NULL, DDI_FM_FATAL, 88 PCIEX_SD, PCIE_AER_UCE_SD, NULL, DDI_FM_FATAL, 89 PCIEX_ROF, PCIE_AER_UCE_RO, NULL, DDI_FM_FATAL, 90 PCIEX_FCP, PCIE_AER_UCE_FCP, NULL, DDI_FM_FATAL, 91 PCIEX_MFP, PCIE_AER_UCE_MTLP, NULL, DDI_FM_FATAL, 92 PCIEX_CTO, PCIE_AER_UCE_TO, NULL, DDI_FM_NONFATAL, 93 PCIEX_UC, PCIE_AER_UCE_UC, NULL, DDI_FM_NONFATAL, 94 PCIEX_ECRC, PCIE_AER_UCE_ECRC, NULL, DDI_FM_UNKNOWN, 95 PCIEX_CA, PCIE_AER_UCE_CA, NULL, DDI_FM_UNKNOWN, 96 PCIEX_UR, PCIE_AER_UCE_UR, NULL, DDI_FM_NONFATAL, 97 PCIEX_POIS, PCIE_AER_UCE_PTLP, NULL, DDI_FM_UNKNOWN, 98 NULL, NULL, NULL, NULL, 99 }; 100 101 static pci_fm_err_t pcie_sue_err_tbl[] = { 102 PCIEX_S_TA_SC, PCIE_AER_SUCE_TA_ON_SC, NULL, DDI_FM_UNKNOWN, 103 PCIEX_S_MA_SC, PCIE_AER_SUCE_MA_ON_SC, NULL, DDI_FM_UNKNOWN, 104 PCIEX_S_RTA, PCIE_AER_SUCE_RCVD_TA, NULL, DDI_FM_UNKNOWN, 105 PCIEX_S_RMA, PCIE_AER_SUCE_RCVD_MA, NULL, DDI_FM_UNKNOWN, 106 PCIEX_S_USC, PCIE_AER_SUCE_USC_ERR, NULL, DDI_FM_UNKNOWN, 107 PCIEX_S_USCMD, PCIE_AER_SUCE_USC_MSG_DATA_ERR, NULL, DDI_FM_FATAL, 108 PCIEX_S_UDE, PCIE_AER_SUCE_UC_DATA_ERR, NULL, DDI_FM_UNKNOWN, 109 PCIEX_S_UAT, PCIE_AER_SUCE_UC_ATTR_ERR, NULL, DDI_FM_FATAL, 110 PCIEX_S_UADR, PCIE_AER_SUCE_UC_ADDR_ERR, NULL, DDI_FM_FATAL, 111 PCIEX_S_TEX, PCIE_AER_SUCE_TIMER_EXPIRED, NULL, DDI_FM_FATAL, 112 PCIEX_S_PERR, PCIE_AER_SUCE_PERR_ASSERT, NULL, DDI_FM_UNKNOWN, 113 PCIEX_S_SERR, PCIE_AER_SUCE_SERR_ASSERT, NULL, DDI_FM_FATAL, 114 PCIEX_INTERR, PCIE_AER_SUCE_INTERNAL_ERR, NULL, DDI_FM_FATAL, 115 NULL, NULL, NULL, NULL, 116 }; 117 118 static pci_fm_err_t pcix_err_tbl[] = { 119 PCIX_SPL_DIS, PCI_PCIX_SPL_DSCD, NULL, DDI_FM_UNKNOWN, 120 PCIX_UNEX_SPL, PCI_PCIX_UNEX_SPL, NULL, DDI_FM_UNKNOWN, 121 PCIX_RX_SPL_MSG, PCI_PCIX_RX_SPL_MSG, NULL, DDI_FM_UNKNOWN, 122 NULL, NULL, NULL, NULL, 123 }; 124 125 static pci_fm_err_t pcix_sec_err_tbl[] = { 126 PCIX_SPL_DIS, PCI_PCIX_BSS_SPL_DSCD, NULL, DDI_FM_UNKNOWN, 127 PCIX_UNEX_SPL, PCI_PCIX_BSS_UNEX_SPL, NULL, DDI_FM_UNKNOWN, 128 PCIX_BSS_SPL_OR, PCI_PCIX_BSS_SPL_OR, NULL, DDI_FM_NONFATAL, 129 PCIX_BSS_SPL_DLY, PCI_PCIX_BSS_SPL_DLY, NULL, DDI_FM_NONFATAL, 130 NULL, NULL, NULL, NULL, 131 }; 132 133 static pci_fm_err_t pciex_nadv_err_tbl[] = { 134 PCIEX_UR, PCIE_DEVSTS_UR_DETECTED, NULL, DDI_FM_UNKNOWN, 135 PCIEX_FAT, PCIE_DEVSTS_FE_DETECTED, NULL, DDI_FM_FATAL, 136 PCIEX_NONFAT, PCIE_DEVSTS_NFE_DETECTED, NULL, DDI_FM_UNKNOWN, 137 PCIEX_CORR, PCIE_DEVSTS_CE_DETECTED, NULL, DDI_FM_NONFATAL, 138 NULL, NULL, NULL, NULL, 139 }; 140 141 static int 142 pci_config_check(ddi_acc_handle_t handle) 143 { 144 ddi_acc_hdl_t *hp = impl_acc_hdl_get(handle); 145 ddi_fm_error_t de; 146 147 if (!(DDI_FM_ACC_ERR_CAP(ddi_fm_capable(hp->ah_dip)))) 148 return (DDI_FM_OK); 149 150 de.fme_version = DDI_FME_VERSION; 151 152 ddi_fm_acc_err_get(handle, &de, de.fme_version); 153 if (de.fme_status != DDI_FM_OK) { 154 char buf[FM_MAX_CLASS]; 155 156 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", PCI_ERROR_SUBCLASS, 157 PCI_NR); 158 ddi_fm_ereport_post(hp->ah_dip, buf, de.fme_ena, DDI_NOSLEEP, 159 FM_VERSION, DATA_TYPE_UINT8, 0, NULL); 160 ddi_fm_acc_err_clear(handle, de.fme_version); 161 } 162 return (de.fme_status); 163 } 164 165 static void 166 pcix_ecc_regs_gather(pci_erpt_t *erpt_p, pcix_ecc_regs_t *pcix_ecc_regs, 167 uint8_t pcix_cap_ptr) 168 { 169 int bdg = erpt_p->pe_dflags & PCI_BRIDGE_DEV; 170 171 pcix_ecc_regs->pcix_ecc_ctlstat = pci_config_get32(erpt_p->pe_hdl, 172 (pcix_cap_ptr + (bdg ? PCI_PCIX_BDG_ECC_STATUS : 173 PCI_PCIX_ECC_STATUS))); 174 if (pci_config_check(erpt_p->pe_hdl) == DDI_FM_OK) 175 pcix_ecc_regs->pcix_ecc_vflags |= PCIX_ERR_ECC_STS_VALID; 176 else 177 return; 178 pcix_ecc_regs->pcix_ecc_fstaddr = pci_config_get32(erpt_p->pe_hdl, 179 (pcix_cap_ptr + (bdg ? PCI_PCIX_BDG_ECC_FST_AD : 180 PCI_PCIX_ECC_FST_AD))); 181 pcix_ecc_regs->pcix_ecc_secaddr = pci_config_get32(erpt_p->pe_hdl, 182 (pcix_cap_ptr + (bdg ? PCI_PCIX_BDG_ECC_SEC_AD : 183 PCI_PCIX_ECC_SEC_AD))); 184 pcix_ecc_regs->pcix_ecc_attr = pci_config_get32(( 185 ddi_acc_handle_t)erpt_p->pe_hdl, 186 (pcix_cap_ptr + (bdg ? PCI_PCIX_BDG_ECC_ATTR : PCI_PCIX_ECC_ATTR))); 187 } 188 189 static void 190 pcix_regs_gather(pci_erpt_t *erpt_p, void *pe_regs) 191 { 192 if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) { 193 pcix_bdg_error_regs_t *pcix_bdg_regs = 194 (pcix_bdg_error_regs_t *)pe_regs; 195 uint8_t pcix_bdg_cap_ptr; 196 int i; 197 198 pcix_bdg_cap_ptr = pcix_bdg_regs->pcix_bdg_cap_ptr; 199 pcix_bdg_regs->pcix_bdg_sec_stat = pci_config_get16( 200 erpt_p->pe_hdl, (pcix_bdg_cap_ptr + PCI_PCIX_SEC_STATUS)); 201 if (pci_config_check(erpt_p->pe_hdl) == DDI_FM_OK) 202 pcix_bdg_regs->pcix_bdg_vflags |= 203 PCIX_BDG_SEC_STATUS_VALID; 204 else 205 return; 206 pcix_bdg_regs->pcix_bdg_stat = pci_config_get32(erpt_p->pe_hdl, 207 (pcix_bdg_cap_ptr + PCI_PCIX_BDG_STATUS)); 208 if (pci_config_check(erpt_p->pe_hdl) == DDI_FM_OK) 209 pcix_bdg_regs->pcix_bdg_vflags |= PCIX_BDG_STATUS_VALID; 210 else 211 return; 212 if (PCIX_ECC_VER_CHECK(pcix_bdg_regs->pcix_bdg_ver)) { 213 pcix_ecc_regs_t *pcix_bdg_ecc_regs; 214 /* 215 * PCI Express to PCI-X bridges only implement the 216 * secondary side of the PCI-X ECC registers, bit one is 217 * read-only so we make sure we do not write to it. 218 */ 219 if (erpt_p->pe_dflags & PCIEX_2PCI_DEV) { 220 pcix_bdg_ecc_regs = 221 pcix_bdg_regs->pcix_bdg_ecc_regs[1]; 222 pcix_ecc_regs_gather(erpt_p, pcix_bdg_ecc_regs, 223 pcix_bdg_cap_ptr); 224 } else { 225 for (i = 0; i < 2; i++) { 226 pcix_bdg_ecc_regs = 227 pcix_bdg_regs->pcix_bdg_ecc_regs[i]; 228 pci_config_put32(erpt_p->pe_hdl, 229 (pcix_bdg_cap_ptr + 230 PCI_PCIX_BDG_ECC_STATUS), i); 231 pcix_ecc_regs_gather(erpt_p, 232 pcix_bdg_ecc_regs, 233 pcix_bdg_cap_ptr); 234 } 235 } 236 } 237 } else { 238 pcix_error_regs_t *pcix_regs = (pcix_error_regs_t *)pe_regs; 239 uint8_t pcix_cap_ptr; 240 241 pcix_cap_ptr = pcix_regs->pcix_cap_ptr; 242 243 pcix_regs->pcix_command = pci_config_get16(erpt_p->pe_hdl, 244 (pcix_cap_ptr + PCI_PCIX_COMMAND)); 245 pcix_regs->pcix_status = pci_config_get32(erpt_p->pe_hdl, 246 (pcix_cap_ptr + PCI_PCIX_STATUS)); 247 if (pci_config_check(erpt_p->pe_hdl) == DDI_FM_OK) 248 pcix_regs->pcix_vflags |= PCIX_ERR_STATUS_VALID; 249 else 250 return; 251 if (PCIX_ECC_VER_CHECK(pcix_regs->pcix_ver)) { 252 pcix_ecc_regs_t *pcix_ecc_regs = 253 pcix_regs->pcix_ecc_regs; 254 255 pcix_ecc_regs_gather(erpt_p, pcix_ecc_regs, 256 pcix_cap_ptr); 257 } 258 } 259 } 260 261 static void 262 pcie_regs_gather(pci_erpt_t *erpt_p) 263 { 264 pcie_error_regs_t *pcie_regs = (pcie_error_regs_t *)erpt_p->pe_regs; 265 uint8_t pcie_cap_ptr; 266 pcie_adv_error_regs_t *pcie_adv_regs; 267 uint16_t pcie_ecap_ptr; 268 269 pcie_cap_ptr = pcie_regs->pcie_cap_ptr; 270 271 pcie_regs->pcie_err_status = pci_config_get16(erpt_p->pe_hdl, 272 pcie_cap_ptr + PCIE_DEVSTS); 273 if (pci_config_check(erpt_p->pe_hdl) == DDI_FM_OK) 274 pcie_regs->pcie_vflags |= PCIE_ERR_STATUS_VALID; 275 else 276 return; 277 278 pcie_regs->pcie_err_ctl = pci_config_get16(erpt_p->pe_hdl, 279 (pcie_cap_ptr + PCIE_DEVCTL)); 280 281 if ((erpt_p->pe_dflags & PCI_BRIDGE_DEV) && (erpt_p->pe_dflags & 282 PCIX_DEV)) 283 pcix_regs_gather(erpt_p, pcie_regs->pcix_bdg_regs); 284 285 if (erpt_p->pe_dflags & PCIEX_RC_DEV) { 286 pcie_rc_error_regs_t *pcie_rc_regs = pcie_regs->pcie_rc_regs; 287 288 pcie_rc_regs->pcie_rc_status = pci_config_get32(erpt_p->pe_hdl, 289 (pcie_cap_ptr + PCIE_ROOTSTS)); 290 pcie_rc_regs->pcie_rc_ctl = pci_config_get16(erpt_p->pe_hdl, 291 (pcie_cap_ptr + PCIE_ROOTCTL)); 292 } 293 294 if (!(erpt_p->pe_dflags & PCIEX_ADV_DEV)) 295 return; 296 297 pcie_adv_regs = pcie_regs->pcie_adv_regs; 298 299 pcie_ecap_ptr = pcie_adv_regs->pcie_adv_cap_ptr; 300 301 pcie_adv_regs->pcie_ue_status = pci_config_get32(erpt_p->pe_hdl, 302 pcie_ecap_ptr + PCIE_AER_UCE_STS); 303 if (pci_config_check(erpt_p->pe_hdl) == DDI_FM_OK) 304 pcie_adv_regs->pcie_adv_vflags |= PCIE_UE_STATUS_VALID; 305 306 pcie_adv_regs->pcie_ue_mask = pci_config_get32(erpt_p->pe_hdl, 307 pcie_ecap_ptr + PCIE_AER_UCE_MASK); 308 pcie_adv_regs->pcie_ue_sev = pci_config_get32(erpt_p->pe_hdl, 309 pcie_ecap_ptr + PCIE_AER_UCE_SERV); 310 pcie_adv_regs->pcie_adv_ctl = pci_config_get32(erpt_p->pe_hdl, 311 pcie_ecap_ptr + PCIE_AER_CTL); 312 pcie_adv_regs->pcie_ue_hdr0 = pci_config_get32(erpt_p->pe_hdl, 313 pcie_ecap_ptr + PCIE_AER_HDR_LOG); 314 if (pci_config_check(erpt_p->pe_hdl) == DDI_FM_OK) { 315 int i; 316 pcie_adv_regs->pcie_adv_vflags |= PCIE_UE_HDR_VALID; 317 318 for (i = 0; i < 3; i++) { 319 pcie_adv_regs->pcie_ue_hdr[i] = pci_config_get32( 320 erpt_p->pe_hdl, pcie_ecap_ptr + PCIE_AER_HDR_LOG + 321 (4 * (i + 1))); 322 } 323 } 324 325 pcie_adv_regs->pcie_ce_status = pci_config_get32(erpt_p->pe_hdl, 326 pcie_ecap_ptr + PCIE_AER_CE_STS); 327 if (pci_config_check(erpt_p->pe_hdl) == DDI_FM_OK) 328 pcie_adv_regs->pcie_adv_vflags |= PCIE_CE_STATUS_VALID; 329 330 pcie_adv_regs->pcie_ce_mask = pci_config_get32(erpt_p->pe_hdl, 331 pcie_ecap_ptr + PCIE_AER_CE_MASK); 332 333 /* 334 * If pci express to pci bridge then grab the bridge 335 * error registers. 336 */ 337 if (erpt_p->pe_dflags & PCIEX_2PCI_DEV) { 338 pcie_adv_bdg_error_regs_t *pcie_bdg_regs = 339 pcie_adv_regs->pcie_adv_bdg_regs; 340 341 pcie_bdg_regs->pcie_sue_status = 342 pci_config_get32(erpt_p->pe_hdl, 343 pcie_ecap_ptr + PCIE_AER_SUCE_STS); 344 if (pci_config_check(erpt_p->pe_hdl) == DDI_FM_OK) 345 pcie_adv_regs->pcie_adv_vflags |= PCIE_SUE_STATUS_VALID; 346 pcie_bdg_regs->pcie_sue_hdr0 = pci_config_get32(erpt_p->pe_hdl, 347 (pcie_ecap_ptr + PCIE_AER_SHDR_LOG)); 348 349 if (pci_config_check(erpt_p->pe_hdl) == DDI_FM_OK) { 350 int i; 351 352 pcie_adv_regs->pcie_adv_vflags |= PCIE_SUE_HDR_VALID; 353 354 for (i = 0; i < 3; i++) { 355 pcie_bdg_regs->pcie_sue_hdr[i] = 356 pci_config_get32(erpt_p->pe_hdl, 357 pcie_ecap_ptr + PCIE_AER_SHDR_LOG + 358 (4 * (i + 1))); 359 } 360 } 361 } 362 /* 363 * If PCI Express root complex then grab the root complex 364 * error registers. 365 */ 366 if (erpt_p->pe_dflags & PCIEX_RC_DEV) { 367 pcie_adv_rc_error_regs_t *pcie_rc_regs = 368 pcie_adv_regs->pcie_adv_rc_regs; 369 370 pcie_rc_regs->pcie_rc_err_cmd = pci_config_get32(erpt_p->pe_hdl, 371 (pcie_ecap_ptr + PCIE_AER_RE_CMD)); 372 pcie_rc_regs->pcie_rc_err_status = 373 pci_config_get32(erpt_p->pe_hdl, 374 (pcie_ecap_ptr + PCIE_AER_RE_STS)); 375 if (pci_config_check(erpt_p->pe_hdl) == DDI_FM_OK) 376 pcie_adv_regs->pcie_adv_vflags |= 377 PCIE_RC_ERR_STATUS_VALID; 378 pcie_rc_regs->pcie_rc_ce_src_id = 379 pci_config_get16(erpt_p->pe_hdl, 380 (pcie_ecap_ptr + PCIE_AER_CE_SRC_ID)); 381 pcie_rc_regs->pcie_rc_ue_src_id = 382 pci_config_get16(erpt_p->pe_hdl, 383 (pcie_ecap_ptr + PCIE_AER_ERR_SRC_ID)); 384 if (pci_config_check(erpt_p->pe_hdl) == DDI_FM_OK) 385 pcie_adv_regs->pcie_adv_vflags |= PCIE_SRC_ID_VALID; 386 } 387 } 388 389 /*ARGSUSED*/ 390 static void 391 pci_regs_gather(dev_info_t *dip, pci_erpt_t *erpt_p) 392 { 393 pci_error_regs_t *pci_regs = erpt_p->pe_pci_regs; 394 395 /* 396 * Start by reading all the error registers that are available for 397 * pci and pci express and for leaf devices and bridges/switches 398 */ 399 pci_regs->pci_err_status = pci_config_get16(erpt_p->pe_hdl, 400 PCI_CONF_STAT); 401 if (pci_config_check(erpt_p->pe_hdl) != DDI_FM_OK) 402 return; 403 pci_regs->pci_vflags |= PCI_ERR_STATUS_VALID; 404 pci_regs->pci_cfg_comm = pci_config_get16(erpt_p->pe_hdl, 405 PCI_CONF_COMM); 406 if (pci_config_check(erpt_p->pe_hdl) != DDI_FM_OK) 407 return; 408 409 /* 410 * If pci-pci bridge grab PCI bridge specific error registers. 411 */ 412 if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) { 413 pci_regs->pci_bdg_regs->pci_bdg_sec_stat = 414 pci_config_get16(erpt_p->pe_hdl, PCI_BCNF_SEC_STATUS); 415 if (pci_config_check(erpt_p->pe_hdl) == DDI_FM_OK) 416 pci_regs->pci_bdg_regs->pci_bdg_vflags |= 417 PCI_BDG_SEC_STAT_VALID; 418 pci_regs->pci_bdg_regs->pci_bdg_ctrl = 419 pci_config_get16(erpt_p->pe_hdl, PCI_BCNF_BCNTRL); 420 if (pci_config_check(erpt_p->pe_hdl) == DDI_FM_OK) 421 pci_regs->pci_bdg_regs->pci_bdg_vflags |= 422 PCI_BDG_CTRL_VALID; 423 } 424 425 /* 426 * If pci express device grab pci express error registers and 427 * check for advanced error reporting features and grab them if 428 * available. 429 */ 430 if (erpt_p->pe_dflags & PCIEX_DEV) 431 pcie_regs_gather(erpt_p); 432 else if (erpt_p->pe_dflags & PCIX_DEV) 433 pcix_regs_gather(erpt_p, erpt_p->pe_regs); 434 435 } 436 437 static void 438 pcix_regs_clear(pci_erpt_t *erpt_p, void *pe_regs) 439 { 440 if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) { 441 pcix_bdg_error_regs_t *pcix_bdg_regs = 442 (pcix_bdg_error_regs_t *)pe_regs; 443 uint8_t pcix_bdg_cap_ptr; 444 int i; 445 446 pcix_bdg_cap_ptr = pcix_bdg_regs->pcix_bdg_cap_ptr; 447 448 if (pcix_bdg_regs->pcix_bdg_vflags & PCIX_BDG_SEC_STATUS_VALID) 449 pci_config_put16(erpt_p->pe_hdl, 450 (pcix_bdg_cap_ptr + PCI_PCIX_SEC_STATUS), 451 pcix_bdg_regs->pcix_bdg_sec_stat); 452 453 if (pcix_bdg_regs->pcix_bdg_vflags & PCIX_BDG_STATUS_VALID) 454 pci_config_put32(erpt_p->pe_hdl, 455 (pcix_bdg_cap_ptr + PCI_PCIX_BDG_STATUS), 456 pcix_bdg_regs->pcix_bdg_stat); 457 458 pcix_bdg_regs->pcix_bdg_vflags = 0x0; 459 460 if (PCIX_ECC_VER_CHECK(pcix_bdg_regs->pcix_bdg_ver)) { 461 pcix_ecc_regs_t *pcix_bdg_ecc_regs; 462 /* 463 * PCI Express to PCI-X bridges only implement the 464 * secondary side of the PCI-X ECC registers, bit one is 465 * read-only so we make sure we do not write to it. 466 */ 467 if (erpt_p->pe_dflags & PCIEX_2PCI_DEV) { 468 pcix_bdg_ecc_regs = 469 pcix_bdg_regs->pcix_bdg_ecc_regs[1]; 470 471 if (pcix_bdg_ecc_regs->pcix_ecc_vflags & 472 PCIX_ERR_ECC_STS_VALID) { 473 474 pci_config_put32(erpt_p->pe_hdl, 475 (pcix_bdg_cap_ptr + 476 PCI_PCIX_BDG_ECC_STATUS), 477 pcix_bdg_ecc_regs-> 478 pcix_ecc_ctlstat); 479 } 480 pcix_bdg_ecc_regs->pcix_ecc_vflags = 0x0; 481 } else { 482 for (i = 0; i < 2; i++) { 483 pcix_bdg_ecc_regs = 484 pcix_bdg_regs->pcix_bdg_ecc_regs[i]; 485 486 487 if (pcix_bdg_ecc_regs->pcix_ecc_vflags & 488 PCIX_ERR_ECC_STS_VALID) { 489 pci_config_put32(erpt_p->pe_hdl, 490 (pcix_bdg_cap_ptr + 491 PCI_PCIX_BDG_ECC_STATUS), 492 i); 493 494 pci_config_put32(erpt_p->pe_hdl, 495 (pcix_bdg_cap_ptr + 496 PCI_PCIX_BDG_ECC_STATUS), 497 pcix_bdg_ecc_regs-> 498 pcix_ecc_ctlstat); 499 } 500 pcix_bdg_ecc_regs->pcix_ecc_vflags = 501 0x0; 502 } 503 } 504 } 505 } else { 506 pcix_error_regs_t *pcix_regs = (pcix_error_regs_t *)pe_regs; 507 uint8_t pcix_cap_ptr; 508 509 pcix_cap_ptr = pcix_regs->pcix_cap_ptr; 510 511 if (pcix_regs->pcix_vflags & PCIX_ERR_STATUS_VALID) 512 pci_config_put32(erpt_p->pe_hdl, 513 (pcix_cap_ptr + PCI_PCIX_STATUS), 514 pcix_regs->pcix_status); 515 516 pcix_regs->pcix_vflags = 0x0; 517 518 if (PCIX_ECC_VER_CHECK(pcix_regs->pcix_ver)) { 519 pcix_ecc_regs_t *pcix_ecc_regs = 520 pcix_regs->pcix_ecc_regs; 521 522 if (pcix_ecc_regs->pcix_ecc_vflags & 523 PCIX_ERR_ECC_STS_VALID) 524 pci_config_put32(erpt_p->pe_hdl, 525 (pcix_cap_ptr + PCI_PCIX_ECC_STATUS), 526 pcix_ecc_regs->pcix_ecc_ctlstat); 527 528 pcix_ecc_regs->pcix_ecc_vflags = 0x0; 529 } 530 } 531 } 532 533 static void 534 pcie_regs_clear(pci_erpt_t *erpt_p) 535 { 536 pcie_error_regs_t *pcie_regs = (pcie_error_regs_t *)erpt_p->pe_regs; 537 uint8_t pcie_cap_ptr; 538 pcie_adv_error_regs_t *pcie_adv_regs; 539 uint16_t pcie_ecap_ptr; 540 541 pcie_cap_ptr = pcie_regs->pcie_cap_ptr; 542 543 if (pcie_regs->pcie_vflags & PCIE_ERR_STATUS_VALID) 544 pci_config_put16(erpt_p->pe_hdl, pcie_cap_ptr + PCIE_DEVSTS, 545 pcie_regs->pcie_err_status); 546 547 pcie_regs->pcie_vflags = 0x0; 548 549 if ((erpt_p->pe_dflags & PCI_BRIDGE_DEV) && 550 (erpt_p->pe_dflags & PCIX_DEV)) 551 pcix_regs_clear(erpt_p, pcie_regs->pcix_bdg_regs); 552 553 if (!(erpt_p->pe_dflags & PCIEX_ADV_DEV)) 554 return; 555 556 pcie_adv_regs = pcie_regs->pcie_adv_regs; 557 558 pcie_ecap_ptr = pcie_adv_regs->pcie_adv_cap_ptr; 559 560 if (pcie_adv_regs->pcie_adv_vflags & PCIE_UE_STATUS_VALID) 561 pci_config_put32(erpt_p->pe_hdl, 562 pcie_ecap_ptr + PCIE_AER_UCE_STS, 563 pcie_adv_regs->pcie_ue_status); 564 565 if (pcie_adv_regs->pcie_adv_vflags & PCIE_CE_STATUS_VALID) 566 pci_config_put32(erpt_p->pe_hdl, 567 pcie_ecap_ptr + PCIE_AER_CE_STS, 568 pcie_adv_regs->pcie_ce_status); 569 570 571 if (erpt_p->pe_dflags & PCIEX_2PCI_DEV) { 572 pcie_adv_bdg_error_regs_t *pcie_bdg_regs = 573 pcie_adv_regs->pcie_adv_bdg_regs; 574 575 576 if (pcie_adv_regs->pcie_adv_vflags & PCIE_SUE_STATUS_VALID) 577 pci_config_put32(erpt_p->pe_hdl, 578 pcie_ecap_ptr + PCIE_AER_SUCE_STS, 579 pcie_bdg_regs->pcie_sue_status); 580 } 581 /* 582 * If PCI Express root complex then clear the root complex 583 * error registers. 584 */ 585 if (erpt_p->pe_dflags & PCIEX_RC_DEV) { 586 pcie_adv_rc_error_regs_t *pcie_rc_regs = 587 pcie_adv_regs->pcie_adv_rc_regs; 588 589 590 if (pcie_adv_regs->pcie_adv_vflags & PCIE_RC_ERR_STATUS_VALID) 591 pci_config_put32(erpt_p->pe_hdl, 592 (pcie_ecap_ptr + PCIE_AER_RE_STS), 593 pcie_rc_regs->pcie_rc_err_status); 594 } 595 pcie_adv_regs->pcie_adv_vflags = 0x0; 596 } 597 598 static void 599 pci_regs_clear(pci_erpt_t *erpt_p) 600 { 601 /* 602 * Finally clear the error bits 603 */ 604 if (erpt_p->pe_dflags & PCIEX_DEV) 605 pcie_regs_clear(erpt_p); 606 else if (erpt_p->pe_dflags & PCIX_DEV) 607 pcix_regs_clear(erpt_p, erpt_p->pe_regs); 608 609 if (erpt_p->pe_pci_regs->pci_vflags & PCI_ERR_STATUS_VALID) 610 pci_config_put16(erpt_p->pe_hdl, PCI_CONF_STAT, 611 erpt_p->pe_pci_regs->pci_err_status); 612 613 erpt_p->pe_pci_regs->pci_vflags = 0x0; 614 615 if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) { 616 if (erpt_p->pe_pci_regs->pci_bdg_regs->pci_bdg_vflags & 617 PCI_BDG_SEC_STAT_VALID) 618 pci_config_put16(erpt_p->pe_hdl, PCI_BCNF_SEC_STATUS, 619 erpt_p->pe_pci_regs->pci_bdg_regs-> 620 pci_bdg_sec_stat); 621 if (erpt_p->pe_pci_regs->pci_bdg_regs->pci_bdg_vflags & 622 PCI_BDG_CTRL_VALID) 623 pci_config_put16(erpt_p->pe_hdl, PCI_BCNF_BCNTRL, 624 erpt_p->pe_pci_regs->pci_bdg_regs->pci_bdg_ctrl); 625 626 erpt_p->pe_pci_regs->pci_bdg_regs->pci_bdg_vflags = 0x0; 627 } 628 } 629 630 /* 631 * pcix_ereport_setup: Allocate structures for PCI-X error handling and ereport 632 * generation. 633 */ 634 /* ARGSUSED */ 635 static void 636 pcix_ereport_setup(dev_info_t *dip, pci_erpt_t *erpt_p) 637 { 638 uint8_t pcix_cap_ptr; 639 int i; 640 641 pcix_cap_ptr = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 642 "pcix-capid-pointer", PCI_CAP_NEXT_PTR_NULL); 643 644 if (pcix_cap_ptr != PCI_CAP_NEXT_PTR_NULL) 645 erpt_p->pe_dflags |= PCIX_DEV; 646 else 647 return; 648 649 if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) { 650 pcix_bdg_error_regs_t *pcix_bdg_regs; 651 652 erpt_p->pe_regs = kmem_zalloc(sizeof (pcix_bdg_error_regs_t), 653 KM_SLEEP); 654 pcix_bdg_regs = (pcix_bdg_error_regs_t *)erpt_p->pe_regs; 655 pcix_bdg_regs->pcix_bdg_cap_ptr = pcix_cap_ptr; 656 pcix_bdg_regs->pcix_bdg_ver = pci_config_get16(erpt_p->pe_hdl, 657 pcix_cap_ptr + PCI_PCIX_SEC_STATUS) & PCI_PCIX_VER_MASK; 658 if (PCIX_ECC_VER_CHECK(pcix_bdg_regs->pcix_bdg_ver)) { 659 for (i = 0; i < 2; i++) { 660 pcix_bdg_regs->pcix_bdg_ecc_regs[i] = 661 kmem_zalloc(sizeof (pcix_ecc_regs_t), 662 KM_SLEEP); 663 } 664 } 665 } else { 666 pcix_error_regs_t *pcix_regs; 667 668 erpt_p->pe_regs = kmem_zalloc(sizeof (pcix_error_regs_t), 669 KM_SLEEP); 670 pcix_regs = (pcix_error_regs_t *)erpt_p->pe_regs; 671 pcix_regs->pcix_cap_ptr = pcix_cap_ptr; 672 pcix_regs->pcix_ver = pci_config_get16(erpt_p->pe_hdl, 673 pcix_cap_ptr + PCI_PCIX_COMMAND) & PCI_PCIX_VER_MASK; 674 if (PCIX_ECC_VER_CHECK(pcix_regs->pcix_ver)) { 675 pcix_regs->pcix_ecc_regs = kmem_zalloc( 676 sizeof (pcix_ecc_regs_t), KM_SLEEP); 677 } 678 } 679 } 680 681 static void 682 pcie_ereport_setup(dev_info_t *dip, pci_erpt_t *erpt_p) 683 { 684 pcie_error_regs_t *pcie_regs; 685 pcie_adv_error_regs_t *pcie_adv_regs; 686 char buf[FM_MAX_CLASS]; 687 uint8_t pcix_cap_ptr; 688 uint8_t pcie_cap_ptr; 689 uint16_t pcie_ecap_ptr; 690 uint16_t dev_type = 0; 691 uint32_t mask = pcie_expected_ue_mask; 692 693 /* 694 * The following sparc specific code should be removed once the pci_cap 695 * interfaces create the necessary properties for us. 696 */ 697 #if defined(__sparc) 698 ushort_t status; 699 uint32_t slot_cap; 700 uint8_t cap_ptr = 0; 701 uint8_t cap_id = 0; 702 uint32_t hdr, hdr_next_ptr, hdr_cap_id; 703 uint16_t offset = P2ALIGN(PCIE_EXT_CAP, 4); 704 uint16_t aer_ptr = 0; 705 706 cap_ptr = pci_config_get8(erpt_p->pe_hdl, PCI_CONF_CAP_PTR); 707 if (pci_config_check(erpt_p->pe_hdl) == DDI_FM_OK) { 708 while ((cap_id = pci_config_get8(erpt_p->pe_hdl, cap_ptr)) != 709 0xff) { 710 if (cap_id == PCI_CAP_ID_PCIX) { 711 (void) ndi_prop_update_int(DDI_DEV_T_NONE, dip, 712 "pcix-capid-pointer", cap_ptr); 713 } 714 if (cap_id == PCI_CAP_ID_PCI_E) { 715 status = pci_config_get16(erpt_p->pe_hdl, cap_ptr + 2); 716 if (status & PCIE_PCIECAP_SLOT_IMPL) { 717 /* offset 14h is Slot Cap Register */ 718 slot_cap = pci_config_get32(erpt_p->pe_hdl, 719 cap_ptr + PCIE_SLOTCAP); 720 (void) ndi_prop_update_int(DDI_DEV_T_NONE, dip, 721 "pcie-slotcap-reg", slot_cap); 722 } 723 (void) ndi_prop_update_int(DDI_DEV_T_NONE, dip, 724 "pcie-capid-reg", pci_config_get16(erpt_p->pe_hdl, 725 cap_ptr + PCIE_PCIECAP)); 726 (void) ndi_prop_update_int(DDI_DEV_T_NONE, dip, 727 "pcie-capid-pointer", cap_ptr); 728 729 } 730 if ((cap_ptr = pci_config_get8(erpt_p->pe_hdl, 731 cap_ptr + 1)) == 0xff || cap_ptr == 0 || 732 (pci_config_check(erpt_p->pe_hdl) != DDI_FM_OK)) 733 break; 734 } 735 } 736 737 #endif 738 739 pcix_cap_ptr = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 740 "pcix-capid-pointer", PCI_CAP_NEXT_PTR_NULL); 741 742 if (pcix_cap_ptr != PCI_CAP_NEXT_PTR_NULL) 743 erpt_p->pe_dflags |= PCIX_DEV; 744 745 pcie_cap_ptr = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 746 DDI_PROP_DONTPASS, "pcie-capid-pointer", PCI_CAP_NEXT_PTR_NULL); 747 748 if (pcie_cap_ptr != PCI_CAP_NEXT_PTR_NULL) { 749 erpt_p->pe_dflags |= PCIEX_DEV; 750 erpt_p->pe_regs = kmem_zalloc(sizeof (pcie_error_regs_t), 751 KM_SLEEP); 752 pcie_regs = (pcie_error_regs_t *)erpt_p->pe_regs; 753 pcie_regs->pcie_cap_ptr = pcie_cap_ptr; 754 } 755 756 if (!(erpt_p->pe_dflags & PCIEX_DEV)) 757 return; 758 759 /* 760 * Don't currently need to check for version here because we are 761 * compliant with PCIE 1.0a which is version 0 and is guaranteed 762 * software compatibility with future versions. We will need to 763 * add errors for new detectors/features which are added in newer 764 * revisions [sec 7.8.2]. 765 */ 766 pcie_regs->pcie_cap = pci_config_get16(erpt_p->pe_hdl, 767 pcie_regs->pcie_cap_ptr + PCIE_PCIECAP); 768 769 dev_type = pcie_regs->pcie_cap & PCIE_PCIECAP_DEV_TYPE_MASK; 770 771 if ((erpt_p->pe_dflags & PCI_BRIDGE_DEV) && 772 (erpt_p->pe_dflags & PCIX_DEV)) { 773 int i; 774 775 pcie_regs->pcix_bdg_regs = 776 kmem_zalloc(sizeof (pcix_bdg_error_regs_t), KM_SLEEP); 777 778 pcie_regs->pcix_bdg_regs->pcix_bdg_cap_ptr = pcix_cap_ptr; 779 pcie_regs->pcix_bdg_regs->pcix_bdg_ver = 780 pci_config_get16(erpt_p->pe_hdl, 781 pcix_cap_ptr + PCI_PCIX_SEC_STATUS) & PCI_PCIX_VER_MASK; 782 783 if (PCIX_ECC_VER_CHECK(pcie_regs->pcix_bdg_regs->pcix_bdg_ver)) 784 for (i = 0; i < 2; i++) 785 pcie_regs->pcix_bdg_regs->pcix_bdg_ecc_regs[i] = 786 kmem_zalloc(sizeof (pcix_ecc_regs_t), 787 KM_SLEEP); 788 } 789 790 if (dev_type == PCIE_PCIECAP_DEV_TYPE_ROOT) { 791 erpt_p->pe_dflags |= PCIEX_RC_DEV; 792 pcie_regs->pcie_rc_regs = kmem_zalloc( 793 sizeof (pcie_rc_error_regs_t), KM_SLEEP); 794 } 795 /* 796 * The following sparc specific code should be removed once the pci_cap 797 * interfaces create the necessary properties for us. 798 */ 799 #if defined(__sparc) 800 801 hdr = pci_config_get32(erpt_p->pe_hdl, offset); 802 hdr_next_ptr = (hdr >> PCIE_EXT_CAP_NEXT_PTR_SHIFT) & 803 PCIE_EXT_CAP_NEXT_PTR_MASK; 804 hdr_cap_id = (hdr >> PCIE_EXT_CAP_ID_SHIFT) & PCIE_EXT_CAP_ID_MASK; 805 806 while ((hdr_next_ptr != PCIE_EXT_CAP_NEXT_PTR_NULL) && 807 (hdr_cap_id != PCIE_EXT_CAP_ID_AER)) { 808 offset = P2ALIGN(hdr_next_ptr, 4); 809 hdr = pci_config_get32(erpt_p->pe_hdl, offset); 810 hdr_next_ptr = (hdr >> PCIE_EXT_CAP_NEXT_PTR_SHIFT) & 811 PCIE_EXT_CAP_NEXT_PTR_MASK; 812 hdr_cap_id = (hdr >> PCIE_EXT_CAP_ID_SHIFT) & 813 PCIE_EXT_CAP_ID_MASK; 814 } 815 816 if (hdr_cap_id == PCIE_EXT_CAP_ID_AER) 817 aer_ptr = P2ALIGN(offset, 4); 818 if (aer_ptr != PCI_CAP_NEXT_PTR_NULL) 819 (void) ndi_prop_update_int(DDI_DEV_T_NONE, dip, 820 "pcie-aer-pointer", aer_ptr); 821 #endif 822 823 /* 824 * Find and store if this device is capable of pci express 825 * advanced errors, if not report an error against the device. 826 */ 827 pcie_ecap_ptr = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 828 "pcie-aer-pointer", PCI_CAP_NEXT_PTR_NULL); 829 if (pcie_ecap_ptr != PCI_CAP_NEXT_PTR_NULL) { 830 erpt_p->pe_dflags |= PCIEX_ADV_DEV; 831 pcie_regs->pcie_adv_regs = kmem_zalloc( 832 sizeof (pcie_adv_error_regs_t), KM_SLEEP); 833 pcie_regs->pcie_adv_regs->pcie_adv_cap_ptr = pcie_ecap_ptr; 834 } 835 836 if (!(erpt_p->pe_dflags & PCIEX_ADV_DEV)) { 837 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", 838 PCIEX_ERROR_SUBCLASS, PCIEX_NADV); 839 ddi_fm_ereport_post(dip, buf, NULL, DDI_NOSLEEP, 840 FM_VERSION, DATA_TYPE_UINT8, 0, NULL); 841 return; 842 } 843 844 pcie_adv_regs = pcie_regs->pcie_adv_regs; 845 846 if (pcie_adv_regs == NULL) 847 return; 848 /* 849 * Initialize structures for advanced PCI Express devices. 850 */ 851 852 /* 853 * Advanced error registers exist for PCI Express to PCI(X) Bridges and 854 * may also exist for PCI(X) to PCI Express Bridges, the latter is not 855 * well explained in the PCI Express to PCI/PCI-X Bridge Specification 856 * 1.0 and will be left out of the current gathering of these registers. 857 */ 858 if (dev_type == PCIE_PCIECAP_DEV_TYPE_PCIE2PCI) { 859 erpt_p->pe_dflags |= PCIEX_2PCI_DEV; 860 pcie_adv_regs->pcie_adv_bdg_regs = kmem_zalloc( 861 sizeof (pcie_adv_bdg_error_regs_t), KM_SLEEP); 862 } 863 864 if (erpt_p->pe_dflags & PCIEX_RC_DEV) 865 pcie_adv_regs->pcie_adv_rc_regs = kmem_zalloc( 866 sizeof (pcie_adv_rc_error_regs_t), KM_SLEEP); 867 868 /* 869 * Check that mask values are as expected, if not 870 * change them to what we desire. 871 */ 872 pci_regs_gather(dip, erpt_p); 873 pcie_regs = (pcie_error_regs_t *)erpt_p->pe_regs; 874 if (pcie_regs->pcie_adv_regs->pcie_ce_mask != pcie_expected_ce_mask) { 875 pci_config_put32(erpt_p->pe_hdl, 876 pcie_ecap_ptr + PCIE_AER_CE_MASK, pcie_expected_ce_mask); 877 } 878 879 /* Disable PTLP/ECRC (or mask these two) for Switches */ 880 if (dev_type == PCIE_PCIECAP_DEV_TYPE_UP || 881 dev_type == PCIE_PCIECAP_DEV_TYPE_DOWN) 882 mask |= PCIE_AER_UCE_PTLP | PCIE_AER_UCE_ECRC; 883 884 if (pcie_regs->pcie_adv_regs->pcie_ue_mask != mask) { 885 pci_config_put32(erpt_p->pe_hdl, 886 pcie_ecap_ptr + PCIE_AER_UCE_MASK, mask); 887 } 888 if (erpt_p->pe_dflags & PCIEX_2PCI_DEV) { 889 if (pcie_regs->pcie_adv_regs->pcie_adv_bdg_regs->pcie_sue_mask 890 != pcie_expected_sue_mask) { 891 pci_config_put32(erpt_p->pe_hdl, 892 pcie_ecap_ptr + PCIE_AER_SUCE_MASK, 893 pcie_expected_sue_mask); 894 } 895 } 896 } 897 898 /* 899 * pci_ereport_setup: Detect PCI device type and initialize structures to be 900 * used to generate ereports based on detected generic device errors. 901 */ 902 void 903 pci_ereport_setup(dev_info_t *dip) 904 { 905 struct dev_info *devi = DEVI(dip); 906 struct i_ddi_fmhdl *fmhdl = devi->devi_fmhdl; 907 pci_erpt_t *erpt_p; 908 uint8_t pci_hdr_type; 909 uint16_t pci_status; 910 pci_regspec_t *pci_rp; 911 int32_t len; 912 uint32_t phys_hi; 913 914 /* 915 * If device is not ereport capbable then report an error against the 916 * driver for using this interface, 917 */ 918 if (!DDI_FM_EREPORT_CAP(ddi_fm_capable(dip)) && 919 !DDI_FM_ERRCB_CAP(ddi_fm_capable(dip))) { 920 i_ddi_drv_ereport_post(dip, DVR_EFMCAP, NULL, DDI_SLEEP); 921 return; 922 } 923 924 /* 925 * ASSERT fmhdl exists and fh_bus_specific is NULL. 926 */ 927 ASSERT(fmhdl && (fmhdl->fh_bus_specific == NULL)); 928 929 erpt_p = kmem_zalloc(sizeof (pci_erpt_t), KM_SLEEP); 930 931 if (pci_config_setup(dip, &erpt_p->pe_hdl) != DDI_SUCCESS) 932 goto error; 933 934 erpt_p->pe_pci_regs = kmem_zalloc(sizeof (pci_error_regs_t), KM_SLEEP); 935 936 pci_status = pci_config_get16(erpt_p->pe_hdl, PCI_CONF_STAT); 937 if (pci_config_check(erpt_p->pe_hdl) != DDI_FM_OK) 938 goto error; 939 940 /* 941 * Get header type and record if device is a bridge. 942 */ 943 pci_hdr_type = pci_config_get8(erpt_p->pe_hdl, PCI_CONF_HEADER); 944 if (pci_config_check(erpt_p->pe_hdl) != DDI_FM_OK) 945 goto error; 946 947 /* 948 * Check to see if PCI device is a bridge, if so allocate pci bridge 949 * error register structure. 950 */ 951 if ((pci_hdr_type & PCI_HEADER_TYPE_M) == PCI_HEADER_PPB) { 952 erpt_p->pe_dflags |= PCI_BRIDGE_DEV; 953 erpt_p->pe_pci_regs->pci_bdg_regs = kmem_zalloc( 954 sizeof (pci_bdg_error_regs_t), KM_SLEEP); 955 } 956 957 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "reg", 958 (caddr_t)&pci_rp, &len) == DDI_SUCCESS) { 959 phys_hi = pci_rp->pci_phys_hi; 960 kmem_free(pci_rp, len); 961 962 erpt_p->pe_bdf = (uint16_t)(PCI_REG_BDFR_G(phys_hi) >> 963 PCI_REG_FUNC_SHIFT); 964 } 965 966 967 if (!(pci_status & PCI_STAT_CAP)) { 968 goto done; 969 } 970 971 /* 972 * Initialize structures for PCI Express and PCI-X devices. 973 * Order matters below and pcie_ereport_setup should preceed 974 * pcix_ereport_setup. 975 */ 976 pcie_ereport_setup(dip, erpt_p); 977 978 if (!(erpt_p->pe_dflags & PCIEX_DEV)) { 979 pcix_ereport_setup(dip, erpt_p); 980 } 981 982 done: 983 pci_regs_gather(dip, erpt_p); 984 pci_regs_clear(erpt_p); 985 986 /* 987 * Before returning set fh_bus_specific to completed pci_erpt_t 988 * structure 989 */ 990 fmhdl->fh_bus_specific = (void *)erpt_p; 991 992 return; 993 error: 994 if (erpt_p->pe_pci_regs) 995 kmem_free(erpt_p->pe_pci_regs, sizeof (pci_error_regs_t)); 996 kmem_free(erpt_p, sizeof (pci_erpt_t)); 997 erpt_p = NULL; 998 } 999 1000 static void 1001 pcix_ereport_teardown(pci_erpt_t *erpt_p) 1002 { 1003 if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) { 1004 pcix_bdg_error_regs_t *pcix_bdg_regs; 1005 uint16_t pcix_ver; 1006 1007 pcix_bdg_regs = (pcix_bdg_error_regs_t *)erpt_p->pe_regs; 1008 pcix_ver = pcix_bdg_regs->pcix_bdg_ver; 1009 if (PCIX_ECC_VER_CHECK(pcix_ver)) { 1010 int i; 1011 for (i = 0; i < 2; i++) 1012 kmem_free(pcix_bdg_regs->pcix_bdg_ecc_regs[i], 1013 sizeof (pcix_ecc_regs_t)); 1014 } 1015 kmem_free(erpt_p->pe_regs, sizeof (pcix_bdg_error_regs_t)); 1016 } else { 1017 pcix_error_regs_t *pcix_regs; 1018 uint16_t pcix_ver; 1019 1020 pcix_regs = (pcix_error_regs_t *)erpt_p->pe_regs; 1021 pcix_ver = pcix_regs->pcix_ver; 1022 if (PCIX_ECC_VER_CHECK(pcix_ver)) { 1023 kmem_free(pcix_regs->pcix_ecc_regs, 1024 sizeof (pcix_ecc_regs_t)); 1025 } 1026 kmem_free(erpt_p->pe_regs, sizeof (pcix_error_regs_t)); 1027 } 1028 } 1029 1030 static void 1031 pcie_ereport_teardown(pci_erpt_t *erpt_p) 1032 { 1033 pcie_error_regs_t *pcie_regs = (pcie_error_regs_t *)erpt_p->pe_regs; 1034 1035 if (erpt_p->pe_dflags & PCIEX_ADV_DEV) { 1036 pcie_adv_error_regs_t *pcie_adv = pcie_regs->pcie_adv_regs; 1037 1038 if (erpt_p->pe_dflags & PCIEX_2PCI_DEV) 1039 kmem_free(pcie_adv->pcie_adv_bdg_regs, 1040 sizeof (pcie_adv_bdg_error_regs_t)); 1041 if (erpt_p->pe_dflags & PCIEX_RC_DEV) 1042 kmem_free(pcie_adv->pcie_adv_rc_regs, 1043 sizeof (pcie_adv_rc_error_regs_t)); 1044 kmem_free(pcie_adv, sizeof (pcie_adv_error_regs_t)); 1045 } 1046 1047 if (erpt_p->pe_dflags & PCIEX_RC_DEV) 1048 kmem_free(pcie_regs->pcie_rc_regs, 1049 sizeof (pcie_rc_error_regs_t)); 1050 1051 if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) { 1052 if (erpt_p->pe_dflags & PCIX_DEV) { 1053 uint16_t pcix_ver = pcie_regs->pcix_bdg_regs-> 1054 pcix_bdg_ver; 1055 1056 if (PCIX_ECC_VER_CHECK(pcix_ver)) { 1057 int i; 1058 for (i = 0; i < 2; i++) 1059 kmem_free(pcie_regs->pcix_bdg_regs-> 1060 pcix_bdg_ecc_regs[i], 1061 sizeof (pcix_ecc_regs_t)); 1062 } 1063 kmem_free(pcie_regs->pcix_bdg_regs, 1064 sizeof (pcix_bdg_error_regs_t)); 1065 } 1066 } 1067 kmem_free(erpt_p->pe_regs, sizeof (pcie_error_regs_t)); 1068 } 1069 1070 void 1071 pci_ereport_teardown(dev_info_t *dip) 1072 { 1073 struct i_ddi_fmhdl *fmhdl = DEVI(dip)->devi_fmhdl; 1074 pci_erpt_t *erpt_p; 1075 1076 if (!DDI_FM_EREPORT_CAP(ddi_fm_capable(dip)) && 1077 !DDI_FM_ERRCB_CAP(ddi_fm_capable(dip))) { 1078 i_ddi_drv_ereport_post(dip, DVR_EFMCAP, NULL, DDI_SLEEP); 1079 } 1080 1081 ASSERT(fmhdl); 1082 1083 erpt_p = (pci_erpt_t *)fmhdl->fh_bus_specific; 1084 if (erpt_p == NULL) 1085 return; 1086 1087 if (erpt_p->pe_dflags & PCIEX_DEV) 1088 pcie_ereport_teardown(erpt_p); 1089 else if (erpt_p->pe_dflags & PCIX_DEV) 1090 pcix_ereport_teardown(erpt_p); 1091 pci_config_teardown((ddi_acc_handle_t *)&erpt_p->pe_hdl); 1092 if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) 1093 kmem_free(erpt_p->pe_pci_regs->pci_bdg_regs, 1094 sizeof (pci_bdg_error_regs_t)); 1095 kmem_free(erpt_p->pe_pci_regs, sizeof (pci_error_regs_t)); 1096 kmem_free(erpt_p, sizeof (pci_erpt_t)); 1097 fmhdl->fh_bus_specific = NULL; 1098 /* 1099 * The following sparc specific code should be removed once the pci_cap 1100 * interfaces create the necessary properties for us. 1101 */ 1102 #if defined(__sparc) 1103 (void) ndi_prop_remove(DDI_DEV_T_NONE, dip, "pcix-capid-pointer"); 1104 (void) ndi_prop_remove(DDI_DEV_T_NONE, dip, "pcie-slotcap-reg"); 1105 (void) ndi_prop_remove(DDI_DEV_T_NONE, dip, "pcie-capid-reg"); 1106 (void) ndi_prop_remove(DDI_DEV_T_NONE, dip, "pcie-capid-pointer"); 1107 (void) ndi_prop_remove(DDI_DEV_T_NONE, dip, "pcie-aer-pointer"); 1108 #endif 1109 } 1110 1111 /* 1112 * Function used by PCI device and nexus error handlers to check if a 1113 * captured address resides in their DMA or ACC handle caches or the caches of 1114 * their children devices, respectively. 1115 */ 1116 static int 1117 pci_dev_hdl_lookup(dev_info_t *dip, int type, ddi_fm_error_t *derr, 1118 void *addr) 1119 { 1120 struct i_ddi_fmhdl *fmhdl = DEVI(dip)->devi_fmhdl; 1121 pci_erpt_t *erpt_p = (pci_erpt_t *)fmhdl->fh_bus_specific; 1122 1123 if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) 1124 return (ndi_fmc_error(dip, NULL, type, derr->fme_ena, addr)); 1125 else 1126 return (ndi_fmc_entry_error(dip, type, derr, addr)); 1127 } 1128 1129 static void 1130 pcie_ereport_post(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p, 1131 char *buf, int errtype) 1132 { 1133 pcie_error_regs_t *pcie_regs = (pcie_error_regs_t *)erpt_p->pe_regs; 1134 pcie_adv_error_regs_t *pcie_adv_regs = pcie_regs->pcie_adv_regs; 1135 pcie_adv_rc_error_regs_t *pcie_adv_rc_regs; 1136 1137 switch (errtype) { 1138 case PCIEX_TYPE_CE: 1139 ddi_fm_ereport_post(dip, buf, derr->fme_ena, 1140 DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0, 1141 PCIEX_DEVSTS_REG, DATA_TYPE_UINT16, 1142 pcie_regs->pcie_err_status, 1143 PCIEX_CE_STATUS_REG, DATA_TYPE_UINT32, 1144 pcie_adv_regs->pcie_ce_status, NULL); 1145 break; 1146 case PCIEX_TYPE_UE: 1147 ddi_fm_ereport_post(dip, buf, derr->fme_ena, 1148 DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0, 1149 PCIEX_DEVSTS_REG, DATA_TYPE_UINT16, 1150 pcie_regs->pcie_err_status, 1151 PCIEX_UE_STATUS_REG, DATA_TYPE_UINT32, 1152 pcie_adv_regs->pcie_ue_status, PCIEX_UE_SEV_REG, 1153 DATA_TYPE_UINT32, pcie_adv_regs->pcie_ue_sev, 1154 PCIEX_ADV_CTL, DATA_TYPE_UINT32, 1155 pcie_adv_regs->pcie_adv_ctl, 1156 PCIEX_SRC_ID, DATA_TYPE_UINT16, 1157 pcie_adv_regs->pcie_adv_bdf, 1158 PCIEX_SRC_VALID, DATA_TYPE_BOOLEAN_VALUE, 1159 (pcie_adv_regs->pcie_adv_bdf != NULL) ? 1160 1 : NULL, 1161 #ifdef DEBUG 1162 PCIEX_UE_HDR0, DATA_TYPE_UINT32, 1163 pcie_adv_regs->pcie_ue_hdr0, 1164 PCIEX_UE_HDR1, DATA_TYPE_UINT32, 1165 pcie_adv_regs->pcie_ue_hdr[0], 1166 PCIEX_UE_HDR2, DATA_TYPE_UINT32, 1167 pcie_adv_regs->pcie_ue_hdr[1], 1168 PCIEX_UE_HDR3, DATA_TYPE_UINT32, 1169 pcie_adv_regs->pcie_ue_hdr[2], 1170 #endif 1171 NULL); 1172 break; 1173 case PCIEX_TYPE_GEN: 1174 ddi_fm_ereport_post(dip, buf, derr->fme_ena, 1175 DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 1176 0, PCIEX_DEVSTS_REG, DATA_TYPE_UINT16, 1177 pcie_regs->pcie_err_status, NULL); 1178 break; 1179 case PCIEX_TYPE_RC_UE_MSG: 1180 case PCIEX_TYPE_RC_CE_MSG: 1181 pcie_adv_rc_regs = pcie_adv_regs->pcie_adv_rc_regs; 1182 1183 ddi_fm_ereport_post(dip, buf, derr->fme_ena, 1184 DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0, 1185 PCIEX_ROOT_ERRSTS_REG, DATA_TYPE_UINT32, 1186 pcie_adv_rc_regs->pcie_rc_err_status, 1187 PCIEX_SRC_ID, DATA_TYPE_UINT16, 1188 (errtype == PCIEX_TYPE_RC_UE_MSG) ? 1189 pcie_adv_rc_regs->pcie_rc_ue_src_id : 1190 pcie_adv_rc_regs->pcie_rc_ce_src_id, 1191 PCIEX_SRC_VALID, DATA_TYPE_BOOLEAN_VALUE, 1192 (errtype == PCIEX_TYPE_RC_UE_MSG) ? 1193 (pcie_adv_regs->pcie_adv_vflags & PCIE_SRC_ID_VALID && 1194 pcie_adv_rc_regs->pcie_rc_ue_src_id != 0) : 1195 (pcie_adv_regs->pcie_adv_vflags & PCIE_SRC_ID_VALID && 1196 pcie_adv_rc_regs->pcie_rc_ce_src_id != 0), NULL); 1197 break; 1198 case PCIEX_TYPE_RC_MULT_MSG: 1199 pcie_adv_rc_regs = pcie_adv_regs->pcie_adv_rc_regs; 1200 1201 ddi_fm_ereport_post(dip, buf, derr->fme_ena, 1202 DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0, 1203 PCIEX_ROOT_ERRSTS_REG, DATA_TYPE_UINT32, 1204 pcie_adv_rc_regs->pcie_rc_err_status, NULL); 1205 break; 1206 default: 1207 break; 1208 } 1209 } 1210 1211 static void 1212 pcie_check_addr(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *eprt_p) 1213 { 1214 pcie_error_regs_t *pcie_regs = (pcie_error_regs_t *)eprt_p->pe_regs; 1215 pcie_adv_error_regs_t *pcie_adv_regs = pcie_regs->pcie_adv_regs; 1216 pcie_tlp_hdr_t *ue_hdr0; 1217 uint32_t *ue_hdr; 1218 uint64_t addr = NULL; 1219 1220 if (!(pcie_adv_regs->pcie_adv_vflags & PCIE_UE_HDR_VALID)) { 1221 derr->fme_status = DDI_FM_UNKNOWN; 1222 return; 1223 } 1224 ue_hdr0 = (pcie_tlp_hdr_t *)&pcie_adv_regs->pcie_ue_hdr0; 1225 ue_hdr = pcie_adv_regs->pcie_ue_hdr; 1226 1227 switch (ue_hdr0->type) { 1228 case PCIE_TLP_TYPE_MEM: 1229 case PCIE_TLP_TYPE_MEMLK: 1230 if ((ue_hdr0->fmt & 0x1) == 0x1) { 1231 pcie_mem64_t *mem64_tlp = (pcie_mem64_t *)ue_hdr; 1232 1233 addr = (uint64_t)mem64_tlp->addr1 << 32 | 1234 (uint32_t)mem64_tlp->addr0 << 2; 1235 pcie_adv_regs->pcie_adv_bdf = mem64_tlp->rid; 1236 } else { 1237 pcie_memio32_t *memio32_tlp = (pcie_memio32_t *)ue_hdr; 1238 1239 addr = (uint32_t)memio32_tlp->addr0 << 2; 1240 pcie_adv_regs->pcie_adv_bdf = memio32_tlp->rid; 1241 } 1242 1243 derr->fme_status = pci_dev_hdl_lookup(dip, DMA_HANDLE, derr, 1244 (void *) &addr); 1245 /* 1246 * If DMA handle is not found error could have been a memory 1247 * mapped IO address so check in the access cache 1248 */ 1249 if (derr->fme_status == DDI_FM_UNKNOWN) 1250 derr->fme_status = pci_dev_hdl_lookup(dip, ACC_HANDLE, 1251 derr, (void *) &addr); 1252 break; 1253 1254 case PCIE_TLP_TYPE_IO: 1255 { 1256 pcie_memio32_t *memio32_tlp = (pcie_memio32_t *)ue_hdr; 1257 1258 addr = (uint32_t)memio32_tlp->addr0 << 2; 1259 pcie_adv_regs->pcie_adv_bdf = memio32_tlp->rid; 1260 derr->fme_status = pci_dev_hdl_lookup(dip, ACC_HANDLE, 1261 derr, (void *) &addr); 1262 break; 1263 } 1264 case PCIE_TLP_TYPE_CFG0: 1265 case PCIE_TLP_TYPE_CFG1: 1266 { 1267 pcie_cfg_t *cfg_tlp = (pcie_cfg_t *)ue_hdr; 1268 1269 pcie_adv_regs->pcie_adv_bdf = 1270 (uint16_t)cfg_tlp->bus << 8 | 1271 (uint16_t)cfg_tlp->dev << 3 | cfg_tlp->func; 1272 1273 derr->fme_status = DDI_FM_UNKNOWN; 1274 break; 1275 } 1276 case PCIE_TLP_TYPE_MSG: 1277 { 1278 pcie_msg_t *msg_tlp = (pcie_msg_t *)ue_hdr; 1279 1280 pcie_adv_regs->pcie_adv_bdf = msg_tlp->rid; 1281 derr->fme_status = DDI_FM_UNKNOWN; 1282 break; 1283 } 1284 case PCIE_TLP_TYPE_CPL: 1285 case PCIE_TLP_TYPE_CPLLK: 1286 { 1287 pcie_cpl_t *cpl_tlp = (pcie_cpl_t *)ue_hdr; 1288 1289 pcie_adv_regs->pcie_adv_bdf = cpl_tlp->cid; 1290 derr->fme_status = DDI_FM_UNKNOWN; 1291 break; 1292 } 1293 case PCIE_TLP_TYPE_MSI: 1294 default: 1295 derr->fme_status = DDI_FM_UNKNOWN; 1296 } 1297 1298 /* 1299 * If no handle was found in the children caches and their is no 1300 * address infomation already stored and we have a captured address 1301 * then we need to store it away so that intermediate bridges can 1302 * check if the address exists in their handle caches. 1303 */ 1304 if (derr->fme_status == DDI_FM_UNKNOWN && 1305 derr->fme_bus_specific == NULL && 1306 addr != NULL) 1307 derr->fme_bus_specific = (void *)(uintptr_t)addr; 1308 } 1309 1310 static void 1311 pcie_pci_check_addr(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *eprt_p) 1312 { 1313 pcie_error_regs_t *pcie_regs = (pcie_error_regs_t *)eprt_p->pe_regs; 1314 pcie_adv_error_regs_t *pcie_adv_regs = pcie_regs->pcie_adv_regs; 1315 pcie_adv_bdg_error_regs_t *pcie_bdg_regs = 1316 pcie_adv_regs->pcie_adv_bdg_regs; 1317 uint64_t addr = NULL; 1318 pcix_attr_t *pcie_pci_sue_attr; 1319 int cmd; 1320 int dual_addr = 0; 1321 1322 if (!(pcie_adv_regs->pcie_adv_vflags & PCIE_SUE_HDR_VALID)) { 1323 derr->fme_status = DDI_FM_UNKNOWN; 1324 return; 1325 } 1326 1327 pcie_pci_sue_attr = (pcix_attr_t *)&pcie_bdg_regs->pcie_sue_hdr0; 1328 cmd = (pcie_bdg_regs->pcie_sue_hdr[0] >> 1329 PCIE_AER_SUCE_HDR_CMD_LWR_SHIFT) & PCIE_AER_SUCE_HDR_CMD_LWR_MASK; 1330 cmd_switch: 1331 switch (cmd) { 1332 case PCI_PCIX_CMD_IORD: 1333 case PCI_PCIX_CMD_IOWR: 1334 pcie_adv_regs->pcie_adv_bdf = pcie_pci_sue_attr->rid; 1335 1336 addr = pcie_bdg_regs->pcie_sue_hdr[2]; 1337 addr = (addr << PCIE_AER_SUCE_HDR_ADDR_SHIFT) | 1338 pcie_bdg_regs->pcie_sue_hdr[1]; 1339 1340 derr->fme_status = pci_dev_hdl_lookup(dip, ACC_HANDLE, 1341 derr, (void *) &addr); 1342 break; 1343 case PCI_PCIX_CMD_MEMRD_DW: 1344 case PCI_PCIX_CMD_MEMWR: 1345 case PCI_PCIX_CMD_MEMRD_BL: 1346 case PCI_PCIX_CMD_MEMWR_BL: 1347 case PCI_PCIX_CMD_MEMRDBL: 1348 case PCI_PCIX_CMD_MEMWRBL: 1349 pcie_adv_regs->pcie_adv_bdf = pcie_pci_sue_attr->rid; 1350 1351 addr = pcie_bdg_regs->pcie_sue_hdr[2]; 1352 addr = (addr << PCIE_AER_SUCE_HDR_ADDR_SHIFT) | 1353 pcie_bdg_regs->pcie_sue_hdr[1]; 1354 1355 derr->fme_status = pci_dev_hdl_lookup(dip, DMA_HANDLE, 1356 derr, (void *) &addr); 1357 if (derr->fme_status == DDI_FM_UNKNOWN) 1358 derr->fme_status = pci_dev_hdl_lookup(dip, ACC_HANDLE, 1359 derr, (void *) &addr); 1360 break; 1361 case PCI_PCIX_CMD_CFRD: 1362 case PCI_PCIX_CMD_CFWR: 1363 /* 1364 * If we want to store the bdf of the device being addressed we 1365 * will need to do some surgery 1366 */ 1367 derr->fme_status = DDI_FM_UNKNOWN; 1368 break; 1369 case PCI_PCIX_CMD_DADR: 1370 cmd = (pcie_bdg_regs->pcie_sue_hdr[0] >> 1371 PCIE_AER_SUCE_HDR_CMD_UP_SHIFT) & 1372 PCIE_AER_SUCE_HDR_CMD_UP_MASK; 1373 if (dual_addr) 1374 break; 1375 ++dual_addr; 1376 goto cmd_switch; 1377 default: 1378 derr->fme_status = DDI_FM_UNKNOWN; 1379 } 1380 1381 /* 1382 * If no handle was found in the children caches and their is no 1383 * address infomation already stored and we have a captured address 1384 * then we need to store it away so that intermediate bridges can 1385 * check if the address exists in their handle caches. 1386 */ 1387 if (derr->fme_status == DDI_FM_UNKNOWN && 1388 derr->fme_bus_specific == NULL && 1389 addr != NULL) 1390 derr->fme_bus_specific = (void *)(uintptr_t)addr; 1391 } 1392 1393 static int 1394 pcix_check_addr(dev_info_t *dip, ddi_fm_error_t *derr, 1395 pcix_ecc_regs_t *pcix_ecc_regs) 1396 { 1397 int cmd = (pcix_ecc_regs->pcix_ecc_ctlstat >> 16) & 0xf; 1398 uint64_t addr; 1399 1400 addr = pcix_ecc_regs->pcix_ecc_secaddr; 1401 addr = addr << 32; 1402 addr |= pcix_ecc_regs->pcix_ecc_fstaddr; 1403 1404 switch (cmd) { 1405 case PCI_PCIX_CMD_INTR: 1406 case PCI_PCIX_CMD_SPEC: 1407 return (DDI_FM_FATAL); 1408 case PCI_PCIX_CMD_IORD: 1409 case PCI_PCIX_CMD_IOWR: 1410 return (pci_dev_hdl_lookup(dip, ACC_HANDLE, derr, 1411 (void *) &addr)); 1412 case PCI_PCIX_CMD_DEVID: 1413 return (DDI_FM_FATAL); 1414 case PCI_PCIX_CMD_MEMRD_DW: 1415 case PCI_PCIX_CMD_MEMWR: 1416 case PCI_PCIX_CMD_MEMRD_BL: 1417 case PCI_PCIX_CMD_MEMWR_BL: 1418 return (pci_dev_hdl_lookup(dip, DMA_HANDLE, derr, 1419 (void *) &addr)); 1420 case PCI_PCIX_CMD_CFRD: 1421 case PCI_PCIX_CMD_CFWR: 1422 return (pci_dev_hdl_lookup(dip, ACC_HANDLE, derr, 1423 (void *) &addr)); 1424 case PCI_PCIX_CMD_SPL: 1425 case PCI_PCIX_CMD_DADR: 1426 return (DDI_FM_FATAL); 1427 case PCI_PCIX_CMD_MEMRDBL: 1428 case PCI_PCIX_CMD_MEMWRBL: 1429 return (pci_dev_hdl_lookup(dip, DMA_HANDLE, derr, 1430 (void *) &addr)); 1431 default: 1432 return (DDI_FM_FATAL); 1433 } 1434 } 1435 1436 /*ARGSUSED*/ 1437 static int 1438 pci_bdg_error_report(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p) 1439 { 1440 pci_bdg_error_regs_t *pci_bdg_regs = erpt_p->pe_pci_regs->pci_bdg_regs; 1441 int fatal = 0; 1442 int nonfatal = 0; 1443 int unknown = 0; 1444 int ok = 0; 1445 int ret = DDI_FM_OK; 1446 char buf[FM_MAX_CLASS]; 1447 int i; 1448 1449 if (derr->fme_flag != DDI_FM_ERR_UNEXPECTED) 1450 goto done; 1451 1452 if ((pci_bdg_regs->pci_bdg_vflags & PCI_BDG_CTRL_VALID) && 1453 (pci_bdg_regs->pci_bdg_ctrl & PCI_BCNF_BCNTRL_DTO_STAT)) { 1454 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", 1455 PCI_ERROR_SUBCLASS, PCI_DTO); 1456 ddi_fm_ereport_post(dip, buf, derr->fme_ena, 1457 DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0, 1458 PCI_SEC_CONFIG_STATUS, DATA_TYPE_UINT16, 1459 pci_bdg_regs->pci_bdg_sec_stat, PCI_BCNTRL, 1460 DATA_TYPE_UINT16, pci_bdg_regs->pci_bdg_ctrl, NULL); 1461 unknown++; 1462 } 1463 1464 if (pci_bdg_regs->pci_bdg_vflags & PCI_BDG_SEC_STAT_VALID) { 1465 for (i = 0; pci_bdg_err_tbl[i].err_class != NULL; i++) { 1466 if (pci_bdg_regs->pci_bdg_sec_stat & 1467 pci_bdg_err_tbl[i].reg_bit) { 1468 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s-%s", 1469 PCI_ERROR_SUBCLASS, PCI_SEC_ERROR_SUBCLASS, 1470 pci_bdg_err_tbl[i].err_class); 1471 ddi_fm_ereport_post(dip, buf, derr->fme_ena, 1472 DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0, 1473 PCI_SEC_CONFIG_STATUS, DATA_TYPE_UINT16, 1474 pci_bdg_regs->pci_bdg_sec_stat, PCI_BCNTRL, 1475 DATA_TYPE_UINT16, 1476 pci_bdg_regs->pci_bdg_ctrl, NULL); 1477 /* 1478 * Increment severity based on flag if bridge 1479 * is PCI or PCI-X, if PCI Express and this is a 1480 * master abort then treat as nonfatal. 1481 * XXFM May need to check if all other errors 1482 * are related to MA? 1483 */ 1484 if (!(erpt_p->pe_dflags & PCIEX_DEV)) { 1485 PCI_FM_SEV_INC( 1486 pci_bdg_err_tbl[i].flags); 1487 } else if (pci_bdg_err_tbl[i].reg_bit == 1488 PCI_STAT_R_MAST_AB) { 1489 nonfatal++; 1490 } 1491 1492 if (derr->fme_bus_specific && 1493 pci_bdg_err_tbl[i].terr_class) 1494 pci_target_enqueue(derr->fme_ena, 1495 pci_bdg_err_tbl[i].terr_class, 1496 PCI_ERROR_SUBCLASS, 1497 (uintptr_t)derr->fme_bus_specific); 1498 } 1499 } 1500 } 1501 1502 done: 1503 1504 /* 1505 * Need to check for poke and cautious put. We already know peek 1506 * and cautious get errors occurred (as we got a trap) and we know 1507 * they are nonfatal. 1508 */ 1509 if (derr->fme_flag == DDI_FM_ERR_EXPECTED) { 1510 /* 1511 * for cautious puts we treat all errors as nonfatal. Actually 1512 * we set nonfatal for cautious gets as well - doesn't do any 1513 * harm 1514 */ 1515 if (pci_bdg_regs->pci_bdg_sec_stat & (PCI_STAT_R_TARG_AB | 1516 PCI_STAT_R_MAST_AB | PCI_STAT_S_PERROR | PCI_STAT_S_SYSERR)) 1517 nonfatal++; 1518 1519 /* 1520 * for cautious accesses we already have the acc_handle. Just 1521 * need to call children to clear their error bits 1522 */ 1523 ret = ndi_fm_handler_dispatch(dip, NULL, derr); 1524 PCI_FM_SEV_INC(ret); 1525 return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL : 1526 (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK))); 1527 } 1528 if (derr->fme_flag == DDI_FM_ERR_POKE) { 1529 /* 1530 * special case for pokes - we only consider master abort 1531 * and target abort as nonfatal. Sserr with no master abort is 1532 * fatal, but master/target abort can come in on separate 1533 * instance, so return unknown and parent will determine if 1534 * nonfatal (if another child returned nonfatal - ie master 1535 * or target abort) or fatal otherwise 1536 */ 1537 if (pci_bdg_regs->pci_bdg_sec_stat & (PCI_STAT_R_TARG_AB | 1538 PCI_STAT_R_MAST_AB)) 1539 nonfatal++; 1540 if (erpt_p->pe_pci_regs->pci_err_status & PCI_STAT_S_SYSERR) 1541 unknown++; 1542 } 1543 1544 /* 1545 * If errant address is passed in then attempt to find 1546 * ACC/DMA handle in caches. 1547 */ 1548 if (derr->fme_bus_specific) { 1549 int i; 1550 1551 for (i = 0; i < 2; i++) { 1552 ret = ndi_fmc_error(dip, NULL, i ? ACC_HANDLE : 1553 DMA_HANDLE, derr->fme_ena, 1554 (void *)&derr->fme_bus_specific); 1555 PCI_FM_SEV_INC(ret); 1556 } 1557 } 1558 1559 /* 1560 * now check children below the bridge, only if errant handle was not 1561 * found 1562 */ 1563 if (!derr->fme_acc_handle && !derr->fme_dma_handle) { 1564 ret = ndi_fm_handler_dispatch(dip, NULL, derr); 1565 PCI_FM_SEV_INC(ret); 1566 } 1567 1568 return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL : 1569 (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK))); 1570 } 1571 1572 static int 1573 pcix_ecc_error_report(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p, 1574 void *pe_regs) 1575 { 1576 pcix_error_regs_t *pcix_regs; 1577 pcix_bdg_error_regs_t *pcix_bdg_regs; 1578 pcix_ecc_regs_t *pcix_ecc_regs; 1579 int bridge; 1580 int i; 1581 int ecc_phase; 1582 int ecc_corr; 1583 int sec_ue; 1584 int sec_ce; 1585 int fatal = 0; 1586 int nonfatal = 0; 1587 int unknown = 0; 1588 int ok = 0; 1589 char buf[FM_MAX_CLASS]; 1590 1591 if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) { 1592 pcix_bdg_regs = (pcix_bdg_error_regs_t *)pe_regs; 1593 bridge = 1; 1594 } else { 1595 pcix_regs = (pcix_error_regs_t *)pe_regs; 1596 bridge = 0; 1597 } 1598 1599 for (i = 0; i < (bridge ? 2 : 1); i++) { 1600 int ret = DDI_FM_OK; 1601 pcix_ecc_regs = bridge ? pcix_bdg_regs->pcix_bdg_ecc_regs[i] : 1602 pcix_regs->pcix_ecc_regs; 1603 if (pcix_ecc_regs->pcix_ecc_vflags & PCIX_ERR_ECC_STS_VALID) { 1604 ecc_phase = (pcix_ecc_regs->pcix_ecc_ctlstat & 1605 PCI_PCIX_ECC_PHASE) >> 0x4; 1606 ecc_corr = (pcix_ecc_regs->pcix_ecc_ctlstat & 1607 PCI_PCIX_ECC_CORR); 1608 sec_ue = (pcix_ecc_regs->pcix_ecc_ctlstat & 1609 PCI_PCIX_ECC_S_UE); 1610 sec_ce = (pcix_ecc_regs->pcix_ecc_ctlstat & 1611 PCI_PCIX_ECC_S_CE); 1612 1613 switch (ecc_phase) { 1614 case PCI_PCIX_ECC_PHASE_NOERR: 1615 break; 1616 case PCI_PCIX_ECC_PHASE_FADDR: 1617 case PCI_PCIX_ECC_PHASE_SADDR: 1618 PCI_FM_SEV_INC(ecc_corr ? DDI_FM_NONFATAL : 1619 DDI_FM_FATAL); 1620 (void) snprintf(buf, FM_MAX_CLASS, 1621 "%s.%s%s", PCIX_ERROR_SUBCLASS, 1622 i ? PCIX_SEC_ERROR_SUBCLASS : "", 1623 ecc_corr ? PCIX_ECC_CE_ADDR : 1624 PCIX_ECC_UE_ADDR); 1625 break; 1626 case PCI_PCIX_ECC_PHASE_ATTR: 1627 PCI_FM_SEV_INC(ecc_corr ? 1628 DDI_FM_NONFATAL : DDI_FM_FATAL); 1629 (void) snprintf(buf, FM_MAX_CLASS, 1630 "%s.%s%s", PCIX_ERROR_SUBCLASS, 1631 i ? PCIX_SEC_ERROR_SUBCLASS : "", 1632 ecc_corr ? PCIX_ECC_CE_ATTR : 1633 PCIX_ECC_UE_ATTR); 1634 break; 1635 case PCI_PCIX_ECC_PHASE_DATA32: 1636 case PCI_PCIX_ECC_PHASE_DATA64: 1637 if (ecc_corr) 1638 ret = DDI_FM_NONFATAL; 1639 else 1640 ret = pcix_check_addr(dip, derr, 1641 pcix_ecc_regs); 1642 PCI_FM_SEV_INC(ret); 1643 1644 (void) snprintf(buf, FM_MAX_CLASS, 1645 "%s.%s%s", PCIX_ERROR_SUBCLASS, 1646 i ? PCIX_SEC_ERROR_SUBCLASS : "", 1647 ecc_corr ? PCIX_ECC_CE_DATA : 1648 PCIX_ECC_UE_DATA); 1649 break; 1650 } 1651 if (ecc_phase) 1652 if (bridge) 1653 ddi_fm_ereport_post(dip, buf, 1654 derr->fme_ena, 1655 DDI_NOSLEEP, FM_VERSION, 1656 DATA_TYPE_UINT8, 0, 1657 PCIX_SEC_STATUS, DATA_TYPE_UINT16, 1658 pcix_bdg_regs->pcix_bdg_sec_stat, 1659 PCIX_BDG_STAT, DATA_TYPE_UINT32, 1660 pcix_bdg_regs->pcix_bdg_stat, 1661 PCIX_ECC_CTLSTAT, DATA_TYPE_UINT32, 1662 pcix_ecc_regs->pcix_ecc_ctlstat, 1663 PCIX_ECC_ATTR, DATA_TYPE_UINT32, 1664 pcix_ecc_regs->pcix_ecc_attr, NULL); 1665 else 1666 ddi_fm_ereport_post(dip, buf, 1667 derr->fme_ena, 1668 DDI_NOSLEEP, FM_VERSION, 1669 DATA_TYPE_UINT8, 0, 1670 PCIX_COMMAND, DATA_TYPE_UINT16, 1671 pcix_regs->pcix_command, 1672 PCIX_STATUS, DATA_TYPE_UINT32, 1673 pcix_regs->pcix_status, 1674 PCIX_ECC_CTLSTAT, DATA_TYPE_UINT32, 1675 pcix_ecc_regs->pcix_ecc_ctlstat, 1676 PCIX_ECC_ATTR, DATA_TYPE_UINT32, 1677 pcix_ecc_regs->pcix_ecc_attr, NULL); 1678 if (sec_ce || sec_ue) { 1679 (void) snprintf(buf, FM_MAX_CLASS, 1680 "%s.%s%s", PCIX_ERROR_SUBCLASS, 1681 i ? PCIX_SEC_ERROR_SUBCLASS : "", 1682 sec_ce ? PCIX_ECC_S_CE : PCIX_ECC_S_UE); 1683 if (bridge) 1684 ddi_fm_ereport_post(dip, buf, 1685 derr->fme_ena, 1686 DDI_NOSLEEP, FM_VERSION, 1687 DATA_TYPE_UINT8, 0, 1688 PCIX_SEC_STATUS, DATA_TYPE_UINT16, 1689 pcix_bdg_regs->pcix_bdg_sec_stat, 1690 PCIX_BDG_STAT, DATA_TYPE_UINT32, 1691 pcix_bdg_regs->pcix_bdg_stat, 1692 PCIX_ECC_CTLSTAT, DATA_TYPE_UINT32, 1693 pcix_ecc_regs->pcix_ecc_ctlstat, 1694 PCIX_ECC_ATTR, DATA_TYPE_UINT32, 1695 pcix_ecc_regs->pcix_ecc_attr, NULL); 1696 else 1697 ddi_fm_ereport_post(dip, buf, 1698 derr->fme_ena, 1699 DDI_NOSLEEP, FM_VERSION, 1700 DATA_TYPE_UINT8, 0, 1701 PCIX_COMMAND, DATA_TYPE_UINT16, 1702 pcix_regs->pcix_command, 1703 PCIX_STATUS, DATA_TYPE_UINT32, 1704 pcix_regs->pcix_status, 1705 PCIX_ECC_CTLSTAT, DATA_TYPE_UINT32, 1706 pcix_ecc_regs->pcix_ecc_ctlstat, 1707 PCIX_ECC_ATTR, DATA_TYPE_UINT32, 1708 pcix_ecc_regs->pcix_ecc_attr, NULL); 1709 PCI_FM_SEV_INC(sec_ue ? DDI_FM_FATAL : 1710 DDI_FM_NONFATAL); 1711 } 1712 } 1713 } 1714 return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL : 1715 (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK))); 1716 } 1717 1718 static int 1719 pcix_bdg_error_report(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p, 1720 void *pe_regs) 1721 { 1722 pcix_bdg_error_regs_t *pcix_bdg_regs = (pcix_bdg_error_regs_t *)pe_regs; 1723 int fatal = 0; 1724 int nonfatal = 0; 1725 int unknown = 0; 1726 int ok = 0; 1727 char buf[FM_MAX_CLASS]; 1728 int i; 1729 1730 if (pcix_bdg_regs->pcix_bdg_vflags & PCIX_BDG_STATUS_VALID) { 1731 for (i = 0; pcix_err_tbl[i].err_class != NULL; i++) { 1732 if ((pcix_bdg_regs->pcix_bdg_stat & 1733 pcix_err_tbl[i].reg_bit)) { 1734 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", 1735 PCIX_ERROR_SUBCLASS, 1736 pcix_err_tbl[i].err_class); 1737 ddi_fm_ereport_post(dip, buf, derr->fme_ena, 1738 DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0, 1739 PCIX_SEC_STATUS, DATA_TYPE_UINT16, 1740 pcix_bdg_regs->pcix_bdg_sec_stat, 1741 PCIX_BDG_STAT, DATA_TYPE_UINT32, 1742 pcix_bdg_regs->pcix_bdg_stat, NULL); 1743 PCI_FM_SEV_INC(pcix_err_tbl[i].flags); 1744 } 1745 } 1746 } 1747 1748 if (pcix_bdg_regs->pcix_bdg_vflags & PCIX_BDG_SEC_STATUS_VALID) { 1749 for (i = 0; pcix_sec_err_tbl[i].err_class != NULL; i++) { 1750 if ((pcix_bdg_regs->pcix_bdg_sec_stat & 1751 pcix_sec_err_tbl[i].reg_bit)) { 1752 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s%s", 1753 PCIX_ERROR_SUBCLASS, 1754 PCIX_SEC_ERROR_SUBCLASS, 1755 pcix_sec_err_tbl[i].err_class); 1756 ddi_fm_ereport_post(dip, buf, derr->fme_ena, 1757 DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0, 1758 PCIX_SEC_STATUS, DATA_TYPE_UINT16, 1759 pcix_bdg_regs->pcix_bdg_sec_stat, 1760 PCIX_BDG_STAT, DATA_TYPE_UINT32, 1761 pcix_bdg_regs->pcix_bdg_stat, NULL); 1762 PCI_FM_SEV_INC(pcix_sec_err_tbl[i].flags); 1763 } 1764 } 1765 } 1766 1767 /* Log/Handle ECC errors */ 1768 if (PCIX_ECC_VER_CHECK(pcix_bdg_regs->pcix_bdg_ver)) { 1769 int ret; 1770 1771 ret = pcix_ecc_error_report(dip, derr, erpt_p, 1772 (void *)pcix_bdg_regs); 1773 PCI_FM_SEV_INC(ret); 1774 } 1775 return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL : 1776 (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK))); 1777 } 1778 1779 static int 1780 pcix_error_report(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p) 1781 { 1782 pcix_error_regs_t *pcix_regs = (pcix_error_regs_t *)erpt_p->pe_regs; 1783 int fatal = 0; 1784 int nonfatal = 0; 1785 int unknown = 0; 1786 int ok = 0; 1787 char buf[FM_MAX_CLASS]; 1788 int i; 1789 1790 if (pcix_regs->pcix_vflags & PCIX_ERR_STATUS_VALID) { 1791 for (i = 0; pcix_err_tbl[i].err_class != NULL; i++) { 1792 if (!(pcix_regs->pcix_status & pcix_err_tbl[i].reg_bit)) 1793 continue; 1794 1795 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", 1796 PCIX_ERROR_SUBCLASS, pcix_err_tbl[i].err_class); 1797 ddi_fm_ereport_post(dip, buf, derr->fme_ena, 1798 DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0, 1799 PCIX_COMMAND, DATA_TYPE_UINT16, 1800 pcix_regs->pcix_command, PCIX_STATUS, 1801 DATA_TYPE_UINT32, pcix_regs->pcix_status, 1802 NULL); 1803 PCI_FM_SEV_INC(pcix_err_tbl[i].flags); 1804 } 1805 } 1806 /* Log/Handle ECC errors */ 1807 if (PCIX_ECC_VER_CHECK(pcix_regs->pcix_ver)) { 1808 int ret = pcix_ecc_error_report(dip, derr, erpt_p, 1809 (void *)pcix_regs); 1810 PCI_FM_SEV_INC(ret); 1811 } 1812 1813 return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL : 1814 (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK))); 1815 } 1816 1817 static int 1818 pcie_rc_error_report(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p, 1819 void *pe_regs) 1820 { 1821 pcie_adv_error_regs_t *pcie_adv_regs = (pcie_adv_error_regs_t *)pe_regs; 1822 int fatal = 0; 1823 int nonfatal = 0; 1824 int unknown = 0; 1825 char buf[FM_MAX_CLASS]; 1826 1827 if (pcie_adv_regs->pcie_adv_vflags & PCIE_RC_ERR_STATUS_VALID) { 1828 pcie_adv_rc_error_regs_t *pcie_rc_regs = 1829 pcie_adv_regs->pcie_adv_rc_regs; 1830 int ce, ue, mult_ce, mult_ue, first_ue_fatal, nfe, fe; 1831 1832 ce = pcie_rc_regs->pcie_rc_err_status & 1833 PCIE_AER_RE_STS_CE_RCVD; 1834 ue = pcie_rc_regs->pcie_rc_err_status & 1835 PCIE_AER_RE_STS_FE_NFE_RCVD; 1836 mult_ce = pcie_rc_regs->pcie_rc_err_status & 1837 PCIE_AER_RE_STS_MUL_CE_RCVD; 1838 mult_ue = pcie_rc_regs->pcie_rc_err_status & 1839 PCIE_AER_RE_STS_MUL_FE_NFE_RCVD; 1840 first_ue_fatal = pcie_rc_regs->pcie_rc_err_status & 1841 PCIE_AER_RE_STS_FIRST_UC_FATAL; 1842 nfe = pcie_rc_regs->pcie_rc_err_status & 1843 PCIE_AER_RE_STS_NFE_MSGS_RCVD; 1844 fe = pcie_rc_regs->pcie_rc_err_status & 1845 PCIE_AER_RE_STS_FE_MSGS_RCVD; 1846 /* 1847 * log fatal/nonfatal/corrected messages 1848 * recieved by root complex 1849 */ 1850 if (ue && fe) 1851 fatal++; 1852 else if (ce && !ue) 1853 nonfatal++; 1854 1855 if (fe && first_ue_fatal) { 1856 (void) snprintf(buf, FM_MAX_CLASS, 1857 "%s.%s", PCIEX_ERROR_SUBCLASS, PCIEX_RC_FE_MSG); 1858 pcie_ereport_post(dip, derr, erpt_p, buf, 1859 PCIEX_TYPE_RC_UE_MSG); 1860 } 1861 if (nfe && !first_ue_fatal) { 1862 (void) snprintf(buf, FM_MAX_CLASS, 1863 "%s.%s", PCIEX_ERROR_SUBCLASS, PCIEX_RC_NFE_MSG); 1864 pcie_ereport_post(dip, derr, erpt_p, buf, 1865 PCIEX_TYPE_RC_UE_MSG); 1866 } 1867 if (ce) { 1868 (void) snprintf(buf, FM_MAX_CLASS, 1869 "%s.%s", PCIEX_ERROR_SUBCLASS, PCIEX_RC_CE_MSG); 1870 pcie_ereport_post(dip, derr, erpt_p, buf, 1871 PCIEX_TYPE_RC_CE_MSG); 1872 } 1873 if (mult_ce) { 1874 (void) snprintf(buf, FM_MAX_CLASS, 1875 "%s.%s", PCIEX_ERROR_SUBCLASS, PCIEX_RC_MCE_MSG); 1876 pcie_ereport_post(dip, derr, erpt_p, buf, 1877 PCIEX_TYPE_RC_MULT_MSG); 1878 } 1879 if (mult_ue) { 1880 (void) snprintf(buf, FM_MAX_CLASS, 1881 "%s.%s", PCIEX_ERROR_SUBCLASS, PCIEX_RC_MUE_MSG); 1882 pcie_ereport_post(dip, derr, erpt_p, buf, 1883 PCIEX_TYPE_RC_MULT_MSG); 1884 } 1885 } 1886 return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL : 1887 (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK))); 1888 } 1889 1890 static int 1891 pcie_error_report(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p) 1892 { 1893 int fatal = 0; 1894 int nonfatal = 0; 1895 int unknown = 0; 1896 int ok = 0; 1897 char buf[FM_MAX_CLASS]; 1898 int i; 1899 pcie_error_regs_t *pcie_regs = (pcie_error_regs_t *)erpt_p->pe_regs; 1900 pcie_adv_error_regs_t *pcie_adv_regs; 1901 pcie_adv_bdg_error_regs_t *pcie_bdg_regs; 1902 1903 if ((erpt_p->pe_dflags & PCI_BRIDGE_DEV) && 1904 (erpt_p->pe_dflags & PCIX_DEV)) { 1905 int ret = pcix_bdg_error_report(dip, derr, erpt_p, 1906 (void *)pcie_regs->pcix_bdg_regs); 1907 PCI_FM_SEV_INC(ret); 1908 } 1909 1910 if (!(erpt_p->pe_dflags & PCIEX_ADV_DEV)) { 1911 if (!(pcie_regs->pcie_vflags & PCIE_ERR_STATUS_VALID)) 1912 goto done; 1913 for (i = 0; pciex_nadv_err_tbl[i].err_class != NULL; i++) { 1914 if (!(pcie_regs->pcie_err_status & 1915 pciex_nadv_err_tbl[i].reg_bit)) 1916 continue; 1917 1918 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", 1919 PCIEX_ERROR_SUBCLASS, 1920 pciex_nadv_err_tbl[i].err_class); 1921 pcie_ereport_post(dip, derr, erpt_p, buf, 1922 PCIEX_TYPE_GEN); 1923 PCI_FM_SEV_INC(pciex_nadv_err_tbl[i].flags); 1924 } 1925 goto done; 1926 } 1927 1928 pcie_adv_regs = pcie_regs->pcie_adv_regs; 1929 1930 /* 1931 * Log PCI Express uncorrectable errors 1932 */ 1933 if (pcie_adv_regs->pcie_adv_vflags & PCIE_UE_STATUS_VALID) { 1934 for (i = 0; pciex_ue_err_tbl[i].err_class != NULL; i++) { 1935 if (!(pcie_adv_regs->pcie_ue_status & 1936 pciex_ue_err_tbl[i].reg_bit)) 1937 continue; 1938 1939 (void) snprintf(buf, FM_MAX_CLASS, 1940 "%s.%s", PCIEX_ERROR_SUBCLASS, 1941 pciex_ue_err_tbl[i].err_class); 1942 1943 pcie_adv_regs->pcie_adv_bdf = 0; 1944 if ((pcie_adv_regs->pcie_ue_status & 1945 PCIE_AER_UCE_LOG_BITS) != 1946 pciex_ue_err_tbl[i].reg_bit) { 1947 PCI_FM_SEV_INC(pciex_ue_err_tbl[i].flags); 1948 pcie_ereport_post(dip, derr, erpt_p, buf, 1949 PCIEX_TYPE_UE); 1950 } else { 1951 pcie_check_addr(dip, derr, erpt_p); 1952 /* 1953 * fatal/nonfatal errors are fatal/nonfatal 1954 * regardless of if we find a handle 1955 */ 1956 if (pciex_ue_err_tbl[i].flags == DDI_FM_FATAL) 1957 derr->fme_status = DDI_FM_FATAL; 1958 else if (pciex_ue_err_tbl[i].flags == 1959 DDI_FM_NONFATAL) 1960 derr->fme_status = DDI_FM_NONFATAL; 1961 pcie_ereport_post(dip, derr, erpt_p, buf, 1962 PCIEX_TYPE_UE); 1963 PCI_FM_SEV_INC(derr->fme_status); 1964 } 1965 } 1966 } 1967 1968 /* 1969 * Log PCI Express correctable errors 1970 */ 1971 if (pcie_adv_regs->pcie_adv_vflags & PCIE_CE_STATUS_VALID) { 1972 for (i = 0; pciex_ce_err_tbl[i].err_class != NULL; i++) { 1973 if (!(pcie_adv_regs->pcie_ce_status & 1974 pciex_ce_err_tbl[i].reg_bit)) 1975 continue; 1976 1977 (void) snprintf(buf, FM_MAX_CLASS, 1978 "%s.%s", PCIEX_ERROR_SUBCLASS, 1979 pciex_ce_err_tbl[i].err_class); 1980 pcie_ereport_post(dip, derr, erpt_p, buf, 1981 PCIEX_TYPE_CE); 1982 if (!fatal && !unknown) 1983 PCI_FM_SEV_INC(pciex_ce_err_tbl[i].flags); 1984 } 1985 } 1986 1987 if (!(erpt_p->pe_dflags & PCI_BRIDGE_DEV)) 1988 goto done; 1989 1990 if (erpt_p->pe_dflags & PCIEX_RC_DEV) { 1991 int ret = pcie_rc_error_report(dip, derr, erpt_p, 1992 (void *)pcie_adv_regs); 1993 PCI_FM_SEV_INC(ret); 1994 } 1995 1996 if (!((erpt_p->pe_dflags & PCIEX_2PCI_DEV) && 1997 (pcie_adv_regs->pcie_adv_vflags & PCIE_SUE_STATUS_VALID))) 1998 goto done; 1999 2000 pcie_bdg_regs = pcie_adv_regs->pcie_adv_bdg_regs; 2001 2002 for (i = 0; pcie_sue_err_tbl[i].err_class != NULL; i++) { 2003 if ((pcie_bdg_regs->pcie_sue_status & 2004 pcie_sue_err_tbl[i].reg_bit)) { 2005 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", 2006 PCIEX_ERROR_SUBCLASS, 2007 pcie_sue_err_tbl[i].err_class); 2008 2009 if ((pcie_bdg_regs->pcie_sue_status & 2010 PCIE_AER_SUCE_LOG_BITS) != 2011 pcie_sue_err_tbl[i].reg_bit) { 2012 PCI_FM_SEV_INC(pcie_sue_err_tbl[i].flags); 2013 ddi_fm_ereport_post(dip, buf, derr->fme_ena, 2014 DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0, 2015 PCIEX_SEC_UE_STATUS, DATA_TYPE_UINT32, 2016 pcie_bdg_regs->pcie_sue_status, 2017 #ifdef DEBUG 2018 PCIEX_SUE_HDR0, DATA_TYPE_UINT32, 2019 pcie_bdg_regs->pcie_sue_hdr0, 2020 PCIEX_SUE_HDR1, DATA_TYPE_UINT32, 2021 pcie_bdg_regs->pcie_sue_hdr[0], 2022 PCIEX_SUE_HDR2, DATA_TYPE_UINT32, 2023 pcie_bdg_regs->pcie_sue_hdr[1], 2024 PCIEX_SUE_HDR3, DATA_TYPE_UINT32, 2025 pcie_bdg_regs->pcie_sue_hdr[2], 2026 #endif 2027 NULL); 2028 } else { 2029 pcie_adv_regs->pcie_adv_bdf = 0; 2030 pcie_pci_check_addr(dip, derr, erpt_p); 2031 /* 2032 * fatal/nonfatal errors are fatal/nonfatal 2033 * regardless of if we find a handle 2034 */ 2035 if (pcie_sue_err_tbl[i].flags == DDI_FM_FATAL) 2036 derr->fme_status = DDI_FM_FATAL; 2037 else if (pcie_sue_err_tbl[i].flags == 2038 DDI_FM_NONFATAL) 2039 derr->fme_status = DDI_FM_NONFATAL; 2040 2041 ddi_fm_ereport_post(dip, buf, derr->fme_ena, 2042 DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0, 2043 PCIEX_SEC_UE_STATUS, DATA_TYPE_UINT32, 2044 pcie_bdg_regs->pcie_sue_status, 2045 PCIEX_SRC_ID, DATA_TYPE_UINT16, 2046 pcie_adv_regs->pcie_adv_bdf, 2047 PCIEX_SRC_VALID, DATA_TYPE_BOOLEAN_VALUE, 2048 (pcie_adv_regs->pcie_adv_bdf != NULL) ? 2049 1 : NULL, 2050 #ifdef DEBUG 2051 PCIEX_SUE_HDR0, DATA_TYPE_UINT32, 2052 pcie_bdg_regs->pcie_sue_hdr0, 2053 PCIEX_SUE_HDR1, DATA_TYPE_UINT32, 2054 pcie_bdg_regs->pcie_sue_hdr[0], 2055 PCIEX_SUE_HDR2, DATA_TYPE_UINT32, 2056 pcie_bdg_regs->pcie_sue_hdr[1], 2057 PCIEX_SUE_HDR3, DATA_TYPE_UINT32, 2058 pcie_bdg_regs->pcie_sue_hdr[2], 2059 #endif 2060 NULL); 2061 PCI_FM_SEV_INC(derr->fme_status); 2062 } 2063 } 2064 } 2065 done: 2066 return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL : 2067 (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK))); 2068 } 2069 2070 static void 2071 pci_error_report(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p) 2072 { 2073 int fatal = 0; 2074 int nonfatal = 0; 2075 int unknown = 0; 2076 int ok = 0; 2077 char buf[FM_MAX_CLASS]; 2078 int i; 2079 2080 if (derr->fme_flag == DDI_FM_ERR_UNEXPECTED) { 2081 /* 2082 * Log generic PCI errors. 2083 */ 2084 for (i = 0; pci_err_tbl[i].err_class != NULL; i++) { 2085 if (!(erpt_p->pe_pci_regs->pci_err_status & 2086 pci_err_tbl[i].reg_bit) || 2087 !(erpt_p->pe_pci_regs->pci_vflags & 2088 PCI_ERR_STATUS_VALID)) 2089 continue; 2090 /* 2091 * Generate an ereport for this error bit. 2092 */ 2093 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", 2094 PCI_ERROR_SUBCLASS, pci_err_tbl[i].err_class); 2095 ddi_fm_ereport_post(dip, buf, derr->fme_ena, 2096 DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0, 2097 PCI_CONFIG_STATUS, DATA_TYPE_UINT16, 2098 erpt_p->pe_pci_regs->pci_err_status, 2099 PCI_CONFIG_COMMAND, DATA_TYPE_UINT16, 2100 erpt_p->pe_pci_regs->pci_cfg_comm, NULL); 2101 2102 if (!(erpt_p->pe_dflags & PCIEX_DEV)) 2103 PCI_FM_SEV_INC(pci_err_tbl[i].flags); 2104 } 2105 if (erpt_p->pe_dflags & PCIEX_DEV) { 2106 int ret = pcie_error_report(dip, derr, erpt_p); 2107 PCI_FM_SEV_INC(ret); 2108 } else if (erpt_p->pe_dflags & PCIX_DEV) { 2109 if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) { 2110 int ret = pcix_bdg_error_report(dip, derr, 2111 erpt_p, erpt_p->pe_regs); 2112 PCI_FM_SEV_INC(ret); 2113 } else { 2114 int ret = pcix_error_report(dip, derr, erpt_p); 2115 PCI_FM_SEV_INC(ret); 2116 } 2117 } 2118 } 2119 2120 if ((erpt_p->pe_dflags & PCI_BRIDGE_DEV)) { 2121 int ret = pci_bdg_error_report(dip, derr, erpt_p); 2122 PCI_FM_SEV_INC(ret); 2123 } 2124 2125 derr->fme_status = (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL : 2126 (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK))); 2127 } 2128 2129 void 2130 pci_ereport_post(dev_info_t *dip, ddi_fm_error_t *derr, uint16_t *xx_status) 2131 { 2132 struct i_ddi_fmhdl *fmhdl; 2133 pci_erpt_t *erpt_p; 2134 2135 fmhdl = DEVI(dip)->devi_fmhdl; 2136 if (!DDI_FM_EREPORT_CAP(ddi_fm_capable(dip)) && 2137 !DDI_FM_ERRCB_CAP(ddi_fm_capable(dip))) { 2138 i_ddi_drv_ereport_post(dip, DVR_EFMCAP, NULL, DDI_NOSLEEP); 2139 return; 2140 } 2141 2142 ASSERT(fmhdl); 2143 2144 if (derr->fme_ena == NULL) 2145 derr->fme_ena = fm_ena_generate(0, FM_ENA_FMT1); 2146 2147 erpt_p = (pci_erpt_t *)fmhdl->fh_bus_specific; 2148 if (erpt_p == NULL) { 2149 i_ddi_drv_ereport_post(dip, DVR_EFMCAP, NULL, DDI_NOSLEEP); 2150 return; 2151 } 2152 2153 pci_regs_gather(dip, erpt_p); 2154 pci_error_report(dip, derr, erpt_p); 2155 pci_regs_clear(erpt_p); 2156 2157 if (xx_status != NULL) 2158 *xx_status = erpt_p->pe_pci_regs->pci_err_status; 2159 } 2160 2161 /* 2162 * private version of walk_devs() that can be used during panic. No 2163 * sleeping or locking required. 2164 */ 2165 static int 2166 pci_fm_walk_devs(dev_info_t *dip, int (*f)(dev_info_t *, void *), void *arg) 2167 { 2168 while (dip) { 2169 switch ((*f)(dip, arg)) { 2170 case DDI_WALK_TERMINATE: 2171 return (DDI_WALK_TERMINATE); 2172 case DDI_WALK_CONTINUE: 2173 if (pci_fm_walk_devs(ddi_get_child(dip), f, 2174 arg) == DDI_WALK_TERMINATE) 2175 return (DDI_WALK_TERMINATE); 2176 break; 2177 case DDI_WALK_PRUNECHILD: 2178 break; 2179 } 2180 dip = ddi_get_next_sibling(dip); 2181 } 2182 return (DDI_WALK_CONTINUE); 2183 } 2184 2185 /* 2186 * need special version of ddi_fm_ereport_post() as the leaf driver may 2187 * not be hardened. 2188 */ 2189 static void 2190 pci_fm_ereport_post(dev_info_t *dip, const char *error_class, uint64_t ena, 2191 uint8_t version, ...) 2192 { 2193 char *name; 2194 char device_path[MAXPATHLEN]; 2195 char ddi_error_class[FM_MAX_CLASS]; 2196 nvlist_t *ereport, *detector; 2197 nv_alloc_t *nva; 2198 errorq_elem_t *eqep; 2199 va_list ap; 2200 2201 if (panicstr) { 2202 eqep = errorq_reserve(ereport_errorq); 2203 if (eqep == NULL) 2204 return; 2205 ereport = errorq_elem_nvl(ereport_errorq, eqep); 2206 nva = errorq_elem_nva(ereport_errorq, eqep); 2207 detector = fm_nvlist_create(nva); 2208 } else { 2209 ereport = fm_nvlist_create(NULL); 2210 detector = fm_nvlist_create(NULL); 2211 } 2212 2213 (void) ddi_pathname(dip, device_path); 2214 fm_fmri_dev_set(detector, FM_DEV_SCHEME_VERSION, NULL, 2215 device_path, NULL); 2216 (void) snprintf(ddi_error_class, FM_MAX_CLASS, "%s.%s", 2217 DDI_IO_CLASS, error_class); 2218 fm_ereport_set(ereport, version, ddi_error_class, ena, detector, NULL); 2219 2220 va_start(ap, version); 2221 name = va_arg(ap, char *); 2222 (void) i_fm_payload_set(ereport, name, ap); 2223 va_end(ap); 2224 2225 if (panicstr) { 2226 errorq_commit(ereport_errorq, eqep, ERRORQ_SYNC); 2227 } else { 2228 (void) fm_ereport_post(ereport, EVCH_TRYHARD); 2229 fm_nvlist_destroy(ereport, FM_NVA_FREE); 2230 fm_nvlist_destroy(detector, FM_NVA_FREE); 2231 } 2232 } 2233 2234 static int 2235 pci_check_regs(dev_info_t *dip, void *arg) 2236 { 2237 int reglen; 2238 int rn; 2239 int totreg; 2240 pci_regspec_t *drv_regp; 2241 pci_target_err_t *tgt_err = (pci_target_err_t *)arg; 2242 2243 if (tgt_err->tgt_pci_space == PCI_REG_ADDR_G(PCI_ADDR_CONFIG)) { 2244 /* 2245 * for config space, we need to check if the given address 2246 * is a valid config space address for this device - based 2247 * on pci_phys_hi of the config space entry in reg property. 2248 */ 2249 if (ddi_getlongprop(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS, 2250 "reg", (caddr_t)&drv_regp, ®len) != DDI_SUCCESS) 2251 return (DDI_WALK_CONTINUE); 2252 2253 totreg = reglen / sizeof (pci_regspec_t); 2254 for (rn = 0; rn < totreg; rn++) { 2255 if (tgt_err->tgt_pci_space == 2256 PCI_REG_ADDR_G(drv_regp[rn].pci_phys_hi) && 2257 (tgt_err->tgt_pci_addr & (PCI_REG_BUS_M | 2258 PCI_REG_DEV_M | PCI_REG_FUNC_M)) == 2259 (drv_regp[rn].pci_phys_hi & (PCI_REG_BUS_M | 2260 PCI_REG_DEV_M | PCI_REG_FUNC_M))) { 2261 tgt_err->tgt_dip = dip; 2262 kmem_free(drv_regp, reglen); 2263 return (DDI_WALK_TERMINATE); 2264 } 2265 } 2266 kmem_free(drv_regp, reglen); 2267 } else { 2268 /* 2269 * for non config space, need to check reg to look 2270 * for any non-relocable mapping, otherwise check 2271 * assigned-addresses. 2272 */ 2273 if (ddi_getlongprop(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS, 2274 "reg", (caddr_t)&drv_regp, ®len) != DDI_SUCCESS) 2275 return (DDI_WALK_CONTINUE); 2276 2277 totreg = reglen / sizeof (pci_regspec_t); 2278 for (rn = 0; rn < totreg; rn++) { 2279 if ((drv_regp[rn].pci_phys_hi & PCI_RELOCAT_B) && 2280 (tgt_err->tgt_pci_space == TGT_PCI_SPACE_UNKNOWN || 2281 tgt_err->tgt_pci_space == 2282 PCI_REG_ADDR_G(drv_regp[rn].pci_phys_hi)) && 2283 (tgt_err->tgt_pci_addr >= 2284 (uint64_t)drv_regp[rn].pci_phys_low + 2285 ((uint64_t)drv_regp[rn].pci_phys_mid << 32)) && 2286 (tgt_err->tgt_pci_addr < 2287 (uint64_t)drv_regp[rn].pci_phys_low + 2288 ((uint64_t)drv_regp[rn].pci_phys_mid << 32) + 2289 (uint64_t)drv_regp[rn].pci_size_low + 2290 ((uint64_t)drv_regp[rn].pci_size_hi << 32))) { 2291 tgt_err->tgt_dip = dip; 2292 kmem_free(drv_regp, reglen); 2293 return (DDI_WALK_TERMINATE); 2294 } 2295 } 2296 kmem_free(drv_regp, reglen); 2297 2298 if (ddi_getlongprop(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS, 2299 "assigned-addresses", (caddr_t)&drv_regp, ®len) != 2300 DDI_SUCCESS) 2301 return (DDI_WALK_CONTINUE); 2302 2303 totreg = reglen / sizeof (pci_regspec_t); 2304 for (rn = 0; rn < totreg; rn++) { 2305 if ((tgt_err->tgt_pci_space == TGT_PCI_SPACE_UNKNOWN || 2306 tgt_err->tgt_pci_space == 2307 PCI_REG_ADDR_G(drv_regp[rn].pci_phys_hi)) && 2308 (tgt_err->tgt_pci_addr >= 2309 (uint64_t)drv_regp[rn].pci_phys_low + 2310 ((uint64_t)drv_regp[rn].pci_phys_mid << 32)) && 2311 (tgt_err->tgt_pci_addr < 2312 (uint64_t)drv_regp[rn].pci_phys_low + 2313 ((uint64_t)drv_regp[rn].pci_phys_mid << 32) + 2314 (uint64_t)drv_regp[rn].pci_size_low + 2315 ((uint64_t)drv_regp[rn].pci_size_hi << 32))) { 2316 tgt_err->tgt_dip = dip; 2317 kmem_free(drv_regp, reglen); 2318 return (DDI_WALK_TERMINATE); 2319 } 2320 } 2321 kmem_free(drv_regp, reglen); 2322 } 2323 return (DDI_WALK_CONTINUE); 2324 } 2325 2326 /* 2327 * impl_fix_ranges - fixes the config space entry of the "ranges" 2328 * property on psycho+ platforms. (if changing this function please make sure 2329 * to change the pci_fix_ranges function in pcipsy.c) 2330 */ 2331 /*ARGSUSED*/ 2332 static void 2333 pci_fix_ranges(dev_info_t *dip, pci_ranges_t *pci_ranges, int nrange) 2334 { 2335 #if defined(__sparc) 2336 char *name = ddi_binding_name(dip); 2337 2338 if ((strcmp(name, "pci108e,8000") == 0) || 2339 (strcmp(name, "pci108e,a000") == 0) || 2340 (strcmp(name, "pci108e,a001") == 0)) { 2341 int i; 2342 for (i = 0; i < nrange; i++, pci_ranges++) 2343 if ((pci_ranges->child_high & PCI_REG_ADDR_M) == 2344 PCI_ADDR_CONFIG) 2345 pci_ranges->parent_low |= 2346 pci_ranges->child_high; 2347 } 2348 #endif 2349 } 2350 2351 static int 2352 pci_check_ranges(dev_info_t *dip, void *arg) 2353 { 2354 uint64_t range_parent_begin; 2355 uint64_t range_parent_size; 2356 uint64_t range_parent_end; 2357 uint32_t space_type; 2358 uint32_t bus_num; 2359 uint32_t range_offset; 2360 pci_ranges_t *pci_ranges, *rangep; 2361 pci_bus_range_t *pci_bus_rangep; 2362 int pci_ranges_length; 2363 int nrange; 2364 pci_target_err_t *tgt_err = (pci_target_err_t *)arg; 2365 int i, size; 2366 if (strcmp(ddi_node_name(dip), "pci") != 0 && 2367 strcmp(ddi_node_name(dip), "pciex") != 0) 2368 return (DDI_WALK_CONTINUE); 2369 2370 /* 2371 * Get the ranges property. Note we only look at the top level pci 2372 * node (hostbridge) which has a ranges property of type pci_ranges_t 2373 * not at pci-pci bridges. 2374 */ 2375 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "ranges", 2376 (caddr_t)&pci_ranges, &pci_ranges_length) != DDI_SUCCESS) { 2377 /* 2378 * no ranges property - no translation needed 2379 */ 2380 tgt_err->tgt_pci_addr = tgt_err->tgt_err_addr; 2381 tgt_err->tgt_pci_space = TGT_PCI_SPACE_UNKNOWN; 2382 if (panicstr) 2383 (void) pci_fm_walk_devs(ddi_get_child(dip), 2384 pci_check_regs, (void *)tgt_err); 2385 else { 2386 int circ = 0; 2387 ndi_devi_enter(dip, &circ); 2388 ddi_walk_devs(ddi_get_child(dip), pci_check_regs, 2389 (void *)tgt_err); 2390 ndi_devi_exit(dip, circ); 2391 } 2392 if (tgt_err->tgt_dip != NULL) 2393 return (DDI_WALK_TERMINATE); 2394 return (DDI_WALK_PRUNECHILD); 2395 } 2396 nrange = pci_ranges_length / sizeof (pci_ranges_t); 2397 rangep = pci_ranges; 2398 2399 /* Need to fix the pci ranges property for psycho based systems */ 2400 pci_fix_ranges(dip, pci_ranges, nrange); 2401 2402 for (i = 0; i < nrange; i++, rangep++) { 2403 range_parent_begin = ((uint64_t)rangep->parent_high << 32) + 2404 rangep->parent_low; 2405 range_parent_size = ((uint64_t)rangep->size_high << 32) + 2406 rangep->size_low; 2407 range_parent_end = range_parent_begin + range_parent_size - 1; 2408 2409 if ((tgt_err->tgt_err_addr < range_parent_begin) || 2410 (tgt_err->tgt_err_addr > range_parent_end)) { 2411 /* Not in range */ 2412 continue; 2413 } 2414 space_type = PCI_REG_ADDR_G(rangep->child_high); 2415 if (space_type == PCI_REG_ADDR_G(PCI_ADDR_CONFIG)) { 2416 /* Config space address - check bus range */ 2417 range_offset = tgt_err->tgt_err_addr - 2418 range_parent_begin; 2419 bus_num = PCI_REG_BUS_G(range_offset); 2420 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, 2421 DDI_PROP_DONTPASS, "bus-range", 2422 (caddr_t)&pci_bus_rangep, &size) != DDI_SUCCESS) { 2423 continue; 2424 } 2425 if ((bus_num < pci_bus_rangep->lo) || 2426 (bus_num > pci_bus_rangep->hi)) { 2427 /* 2428 * Bus number not appropriate for this 2429 * pci nexus. 2430 */ 2431 kmem_free(pci_bus_rangep, size); 2432 continue; 2433 } 2434 kmem_free(pci_bus_rangep, size); 2435 } 2436 2437 /* We have a match if we get here - compute pci address */ 2438 tgt_err->tgt_pci_addr = tgt_err->tgt_err_addr - 2439 range_parent_begin; 2440 tgt_err->tgt_pci_addr += (((uint64_t)rangep->child_mid << 32) + 2441 rangep->child_low); 2442 tgt_err->tgt_pci_space = space_type; 2443 if (panicstr) 2444 (void) pci_fm_walk_devs(ddi_get_child(dip), 2445 pci_check_regs, (void *)tgt_err); 2446 else { 2447 int circ = 0; 2448 ndi_devi_enter(dip, &circ); 2449 ddi_walk_devs(ddi_get_child(dip), pci_check_regs, 2450 (void *)tgt_err); 2451 ndi_devi_exit(dip, circ); 2452 } 2453 if (tgt_err->tgt_dip != NULL) { 2454 kmem_free(pci_ranges, pci_ranges_length); 2455 return (DDI_WALK_TERMINATE); 2456 } 2457 } 2458 kmem_free(pci_ranges, pci_ranges_length); 2459 return (DDI_WALK_PRUNECHILD); 2460 } 2461 2462 /* 2463 * Function used to drain pci_target_queue, either during panic or after softint 2464 * is generated, to generate target device ereports based on captured physical 2465 * addresses 2466 */ 2467 /*ARGSUSED*/ 2468 static void 2469 pci_target_drain(void *private_p, pci_target_err_t *tgt_err) 2470 { 2471 char buf[FM_MAX_CLASS]; 2472 2473 /* 2474 * The following assumes that all pci_pci bridge devices 2475 * are configured as transparant. Find the top-level pci 2476 * nexus which has tgt_err_addr in one of its ranges, converting this 2477 * to a pci address in the process. Then starting at this node do 2478 * another tree walk to find a device with the pci address we've 2479 * found within range of one of it's assigned-addresses properties. 2480 */ 2481 tgt_err->tgt_dip = NULL; 2482 if (panicstr) 2483 (void) pci_fm_walk_devs(ddi_root_node(), pci_check_ranges, 2484 (void *)tgt_err); 2485 else 2486 ddi_walk_devs(ddi_root_node(), pci_check_ranges, 2487 (void *)tgt_err); 2488 if (tgt_err->tgt_dip == NULL) 2489 return; 2490 2491 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", tgt_err->tgt_bridge_type, 2492 tgt_err->tgt_err_class); 2493 pci_fm_ereport_post(tgt_err->tgt_dip, buf, tgt_err->tgt_err_ena, 0, 2494 PCI_PA, DATA_TYPE_UINT64, tgt_err->tgt_err_addr, NULL); 2495 } 2496 2497 void 2498 pci_target_enqueue(uint64_t ena, char *class, char *bridge_type, uint64_t addr) 2499 { 2500 pci_target_err_t tgt_err; 2501 2502 tgt_err.tgt_err_ena = ena; 2503 tgt_err.tgt_err_class = class; 2504 tgt_err.tgt_bridge_type = bridge_type; 2505 tgt_err.tgt_err_addr = addr; 2506 errorq_dispatch(pci_target_queue, (void *)&tgt_err, 2507 sizeof (pci_target_err_t), ERRORQ_ASYNC); 2508 } 2509 2510 void 2511 pci_targetq_init(void) 2512 { 2513 /* 2514 * PCI target errorq, to schedule async handling of generation of 2515 * target device ereports based on captured physical address. 2516 * The errorq is created here but destroyed when _fini is called 2517 * for the pci module. 2518 */ 2519 if (pci_target_queue == NULL) { 2520 pci_target_queue = errorq_create("pci_target_queue", 2521 (errorq_func_t)pci_target_drain, (void *)NULL, 2522 TARGET_MAX_ERRS, sizeof (pci_target_err_t), FM_ERR_PIL, 2523 ERRORQ_VITAL); 2524 if (pci_target_queue == NULL) 2525 panic("failed to create required system error queue"); 2526 } 2527 } 2528