1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <sys/sysmacros.h> 27 #include <sys/types.h> 28 #include <sys/kmem.h> 29 #include <sys/modctl.h> 30 #include <sys/ddi.h> 31 #include <sys/sunddi.h> 32 #include <sys/sunndi.h> 33 #include <sys/fm/protocol.h> 34 #include <sys/fm/util.h> 35 #include <sys/promif.h> 36 #include <sys/disp.h> 37 #include <sys/pcie.h> 38 #include <sys/pci_cap.h> 39 #include <sys/pcie_impl.h> 40 41 static void pcie_init_pfd(dev_info_t *); 42 static void pcie_fini_pfd(dev_info_t *); 43 44 #if defined(__i386) || defined(__amd64) 45 static void pcie_check_io_mem_range(ddi_acc_handle_t, boolean_t *, boolean_t *); 46 #endif /* defined(__i386) || defined(__amd64) */ 47 48 #ifdef DEBUG 49 uint_t pcie_debug_flags = 0; 50 static void pcie_print_bus(pcie_bus_t *bus_p); 51 #endif /* DEBUG */ 52 53 /* Variable to control default PCI-Express config settings */ 54 ushort_t pcie_command_default = 55 PCI_COMM_SERR_ENABLE | 56 PCI_COMM_WAIT_CYC_ENAB | 57 PCI_COMM_PARITY_DETECT | 58 PCI_COMM_ME | 59 PCI_COMM_MAE | 60 PCI_COMM_IO; 61 62 /* xxx_fw are bits that are controlled by FW and should not be modified */ 63 ushort_t pcie_command_default_fw = 64 PCI_COMM_SPEC_CYC | 65 PCI_COMM_MEMWR_INVAL | 66 PCI_COMM_PALETTE_SNOOP | 67 PCI_COMM_WAIT_CYC_ENAB | 68 0xF800; /* Reserved Bits */ 69 70 ushort_t pcie_bdg_command_default_fw = 71 PCI_BCNF_BCNTRL_ISA_ENABLE | 72 PCI_BCNF_BCNTRL_VGA_ENABLE | 73 0xF000; /* Reserved Bits */ 74 75 /* PCI-Express Base error defaults */ 76 ushort_t pcie_base_err_default = 77 PCIE_DEVCTL_CE_REPORTING_EN | 78 PCIE_DEVCTL_NFE_REPORTING_EN | 79 PCIE_DEVCTL_FE_REPORTING_EN | 80 PCIE_DEVCTL_UR_REPORTING_EN; 81 82 /* PCI-Express Device Control Register */ 83 uint16_t pcie_devctl_default = PCIE_DEVCTL_RO_EN | 84 PCIE_DEVCTL_MAX_READ_REQ_512; 85 86 /* PCI-Express AER Root Control Register */ 87 #define PCIE_ROOT_SYS_ERR (PCIE_ROOTCTL_SYS_ERR_ON_CE_EN | \ 88 PCIE_ROOTCTL_SYS_ERR_ON_NFE_EN | \ 89 PCIE_ROOTCTL_SYS_ERR_ON_FE_EN) 90 91 #if defined(__xpv) 92 ushort_t pcie_root_ctrl_default = 93 PCIE_ROOTCTL_SYS_ERR_ON_NFE_EN | 94 PCIE_ROOTCTL_SYS_ERR_ON_FE_EN; 95 #else 96 ushort_t pcie_root_ctrl_default = 97 PCIE_ROOTCTL_SYS_ERR_ON_CE_EN | 98 PCIE_ROOTCTL_SYS_ERR_ON_NFE_EN | 99 PCIE_ROOTCTL_SYS_ERR_ON_FE_EN; 100 #endif /* __xpv */ 101 102 /* PCI-Express Root Error Command Register */ 103 ushort_t pcie_root_error_cmd_default = 104 PCIE_AER_RE_CMD_CE_REP_EN | 105 PCIE_AER_RE_CMD_NFE_REP_EN | 106 PCIE_AER_RE_CMD_FE_REP_EN; 107 108 /* ECRC settings in the PCIe AER Control Register */ 109 uint32_t pcie_ecrc_value = 110 PCIE_AER_CTL_ECRC_GEN_ENA | 111 PCIE_AER_CTL_ECRC_CHECK_ENA; 112 113 /* 114 * If a particular platform wants to disable certain errors such as UR/MA, 115 * instead of using #defines have the platform's PCIe Root Complex driver set 116 * these masks using the pcie_get_XXX_mask and pcie_set_XXX_mask functions. For 117 * x86 the closest thing to a PCIe root complex driver is NPE. For SPARC the 118 * closest PCIe root complex driver is PX. 119 * 120 * pcie_serr_disable_flag : disable SERR only (in RCR and command reg) x86 121 * systems may want to disable SERR in general. For root ports, enabling SERR 122 * causes NMIs which are not handled and results in a watchdog timeout error. 123 */ 124 uint32_t pcie_aer_uce_mask = 0; /* AER UE Mask */ 125 uint32_t pcie_aer_ce_mask = 0; /* AER CE Mask */ 126 uint32_t pcie_aer_suce_mask = 0; /* AER Secondary UE Mask */ 127 uint32_t pcie_serr_disable_flag = 0; /* Disable SERR */ 128 129 /* Default severities needed for eversholt. Error handling doesn't care */ 130 uint32_t pcie_aer_uce_severity = PCIE_AER_UCE_MTLP | PCIE_AER_UCE_RO | \ 131 PCIE_AER_UCE_FCP | PCIE_AER_UCE_SD | PCIE_AER_UCE_DLP | \ 132 PCIE_AER_UCE_TRAINING; 133 uint32_t pcie_aer_suce_severity = PCIE_AER_SUCE_SERR_ASSERT | \ 134 PCIE_AER_SUCE_UC_ADDR_ERR | PCIE_AER_SUCE_UC_ATTR_ERR | \ 135 PCIE_AER_SUCE_USC_MSG_DATA_ERR; 136 137 int pcie_max_mps = PCIE_DEVCTL_MAX_PAYLOAD_4096 >> 5; 138 139 static void pcie_scan_mps(dev_info_t *rc_dip, dev_info_t *dip, 140 int *max_supported); 141 static int pcie_get_max_supported(dev_info_t *dip, void *arg); 142 static int pcie_map_phys(dev_info_t *dip, pci_regspec_t *phys_spec, 143 caddr_t *addrp, ddi_acc_handle_t *handlep); 144 static void pcie_unmap_phys(ddi_acc_handle_t *handlep, pci_regspec_t *ph); 145 146 /* 147 * modload support 148 */ 149 150 static struct modlmisc modlmisc = { 151 &mod_miscops, /* Type of module */ 152 "PCIE: PCI framework" 153 }; 154 155 static struct modlinkage modlinkage = { 156 MODREV_1, 157 (void *)&modlmisc, 158 NULL 159 }; 160 161 /* 162 * Global Variables needed for a non-atomic version of ddi_fm_ereport_post. 163 * Currently used to send the pci.fabric ereports whose payload depends on the 164 * type of PCI device it is being sent for. 165 */ 166 char *pcie_nv_buf; 167 nv_alloc_t *pcie_nvap; 168 nvlist_t *pcie_nvl; 169 170 int 171 _init(void) 172 { 173 int rval; 174 175 pcie_nv_buf = kmem_alloc(ERPT_DATA_SZ, KM_SLEEP); 176 pcie_nvap = fm_nva_xcreate(pcie_nv_buf, ERPT_DATA_SZ); 177 pcie_nvl = fm_nvlist_create(pcie_nvap); 178 179 rval = mod_install(&modlinkage); 180 return (rval); 181 } 182 183 int 184 _fini() 185 { 186 int rval; 187 188 fm_nvlist_destroy(pcie_nvl, FM_NVA_RETAIN); 189 fm_nva_xdestroy(pcie_nvap); 190 kmem_free(pcie_nv_buf, ERPT_DATA_SZ); 191 192 rval = mod_remove(&modlinkage); 193 return (rval); 194 } 195 196 int 197 _info(struct modinfo *modinfop) 198 { 199 return (mod_info(&modlinkage, modinfop)); 200 } 201 202 /* 203 * PCI-Express child device initialization. 204 * This function enables generic pci-express interrupts and error 205 * handling. 206 * 207 * @param pdip root dip (root nexus's dip) 208 * @param cdip child's dip (device's dip) 209 * @return DDI_SUCCESS or DDI_FAILURE 210 */ 211 /* ARGSUSED */ 212 int 213 pcie_initchild(dev_info_t *cdip) 214 { 215 uint16_t tmp16, reg16; 216 pcie_bus_t *bus_p; 217 218 bus_p = PCIE_DIP2BUS(cdip); 219 if (bus_p == NULL) { 220 PCIE_DBG("%s: BUS not found.\n", 221 ddi_driver_name(cdip)); 222 223 return (DDI_FAILURE); 224 } 225 226 /* Clear the device's status register */ 227 reg16 = PCIE_GET(16, bus_p, PCI_CONF_STAT); 228 PCIE_PUT(16, bus_p, PCI_CONF_STAT, reg16); 229 230 /* Setup the device's command register */ 231 reg16 = PCIE_GET(16, bus_p, PCI_CONF_COMM); 232 tmp16 = (reg16 & pcie_command_default_fw) | pcie_command_default; 233 234 #if defined(__i386) || defined(__amd64) 235 boolean_t empty_io_range = B_FALSE; 236 boolean_t empty_mem_range = B_FALSE; 237 /* 238 * Check for empty IO and Mem ranges on bridges. If so disable IO/Mem 239 * access as it can cause a hang if enabled. 240 */ 241 pcie_check_io_mem_range(bus_p->bus_cfg_hdl, &empty_io_range, 242 &empty_mem_range); 243 if ((empty_io_range == B_TRUE) && 244 (pcie_command_default & PCI_COMM_IO)) { 245 tmp16 &= ~PCI_COMM_IO; 246 PCIE_DBG("No I/O range found for %s, bdf 0x%x\n", 247 ddi_driver_name(cdip), bus_p->bus_bdf); 248 } 249 if ((empty_mem_range == B_TRUE) && 250 (pcie_command_default & PCI_COMM_MAE)) { 251 tmp16 &= ~PCI_COMM_MAE; 252 PCIE_DBG("No Mem range found for %s, bdf 0x%x\n", 253 ddi_driver_name(cdip), bus_p->bus_bdf); 254 } 255 #endif /* defined(__i386) || defined(__amd64) */ 256 257 if (pcie_serr_disable_flag && PCIE_IS_PCIE(bus_p)) 258 tmp16 &= ~PCI_COMM_SERR_ENABLE; 259 260 PCIE_PUT(16, bus_p, PCI_CONF_COMM, tmp16); 261 PCIE_DBG_CFG(cdip, bus_p, "COMMAND", 16, PCI_CONF_COMM, reg16); 262 263 /* 264 * If the device has a bus control register then program it 265 * based on the settings in the command register. 266 */ 267 if (PCIE_IS_BDG(bus_p)) { 268 /* Clear the device's secondary status register */ 269 reg16 = PCIE_GET(16, bus_p, PCI_BCNF_SEC_STATUS); 270 PCIE_PUT(16, bus_p, PCI_BCNF_SEC_STATUS, reg16); 271 272 /* Setup the device's secondary command register */ 273 reg16 = PCIE_GET(16, bus_p, PCI_BCNF_BCNTRL); 274 tmp16 = (reg16 & pcie_bdg_command_default_fw); 275 276 tmp16 |= PCI_BCNF_BCNTRL_SERR_ENABLE; 277 /* 278 * Workaround for this Nvidia bridge. Don't enable the SERR 279 * enable bit in the bridge control register as it could lead to 280 * bogus NMIs. 281 */ 282 if (bus_p->bus_dev_ven_id == 0x037010DE) 283 tmp16 &= ~PCI_BCNF_BCNTRL_SERR_ENABLE; 284 285 if (pcie_command_default & PCI_COMM_PARITY_DETECT) 286 tmp16 |= PCI_BCNF_BCNTRL_PARITY_ENABLE; 287 288 /* 289 * Enable Master Abort Mode only if URs have not been masked. 290 * For PCI and PCIe-PCI bridges, enabling this bit causes a 291 * Master Aborts/UR to be forwarded as a UR/TA or SERR. If this 292 * bit is masked, posted requests are dropped and non-posted 293 * requests are returned with -1. 294 */ 295 if (pcie_aer_uce_mask & PCIE_AER_UCE_UR) 296 tmp16 &= ~PCI_BCNF_BCNTRL_MAST_AB_MODE; 297 else 298 tmp16 |= PCI_BCNF_BCNTRL_MAST_AB_MODE; 299 PCIE_PUT(16, bus_p, PCI_BCNF_BCNTRL, tmp16); 300 PCIE_DBG_CFG(cdip, bus_p, "SEC CMD", 16, PCI_BCNF_BCNTRL, 301 reg16); 302 } 303 304 if (PCIE_IS_PCIE(bus_p)) { 305 /* Setup PCIe device control register */ 306 reg16 = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL); 307 tmp16 = pcie_devctl_default; 308 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, tmp16); 309 PCIE_DBG_CAP(cdip, bus_p, "DEVCTL", 16, PCIE_DEVCTL, reg16); 310 311 /* Enable PCIe errors */ 312 pcie_enable_errors(cdip); 313 } 314 315 if (pcie_initchild_mps(cdip) == DDI_FAILURE) 316 return (DDI_FAILURE); 317 318 return (DDI_SUCCESS); 319 } 320 321 #define PCIE_ZALLOC(data) kmem_zalloc(sizeof (data), KM_SLEEP) 322 static void 323 pcie_init_pfd(dev_info_t *dip) 324 { 325 pf_data_t *pfd_p = PCIE_ZALLOC(pf_data_t); 326 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 327 328 PCIE_DIP2PFD(dip) = pfd_p; 329 330 pfd_p->pe_bus_p = bus_p; 331 pfd_p->pe_severity_flags = 0; 332 pfd_p->pe_lock = B_FALSE; 333 pfd_p->pe_valid = B_FALSE; 334 335 /* Allocate the root fault struct for both RC and RP */ 336 if (PCIE_IS_ROOT(bus_p)) { 337 PCIE_ROOT_FAULT(pfd_p) = PCIE_ZALLOC(pf_root_fault_t); 338 PCIE_ROOT_FAULT(pfd_p)->scan_bdf = PCIE_INVALID_BDF; 339 } 340 341 PCI_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pci_err_regs_t); 342 343 if (PCIE_IS_BDG(bus_p)) 344 PCI_BDG_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pci_bdg_err_regs_t); 345 346 if (PCIE_IS_PCIE(bus_p)) { 347 PCIE_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_err_regs_t); 348 349 if (PCIE_IS_RP(bus_p)) 350 PCIE_RP_REG(pfd_p) = 351 PCIE_ZALLOC(pf_pcie_rp_err_regs_t); 352 353 PCIE_ADV_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_adv_err_regs_t); 354 PCIE_ADV_REG(pfd_p)->pcie_ue_tgt_bdf = PCIE_INVALID_BDF; 355 356 if (PCIE_IS_RP(bus_p)) { 357 PCIE_ADV_RP_REG(pfd_p) = 358 PCIE_ZALLOC(pf_pcie_adv_rp_err_regs_t); 359 PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ce_src_id = 360 PCIE_INVALID_BDF; 361 PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ue_src_id = 362 PCIE_INVALID_BDF; 363 } else if (PCIE_IS_PCIE_BDG(bus_p)) { 364 PCIE_ADV_BDG_REG(pfd_p) = 365 PCIE_ZALLOC(pf_pcie_adv_bdg_err_regs_t); 366 PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_bdf = 367 PCIE_INVALID_BDF; 368 } 369 370 if (PCIE_IS_PCIE_BDG(bus_p) && PCIE_IS_PCIX(bus_p)) { 371 PCIX_BDG_ERR_REG(pfd_p) = 372 PCIE_ZALLOC(pf_pcix_bdg_err_regs_t); 373 374 if (PCIX_ECC_VERSION_CHECK(bus_p)) { 375 PCIX_BDG_ECC_REG(pfd_p, 0) = 376 PCIE_ZALLOC(pf_pcix_ecc_regs_t); 377 PCIX_BDG_ECC_REG(pfd_p, 1) = 378 PCIE_ZALLOC(pf_pcix_ecc_regs_t); 379 } 380 } 381 } else if (PCIE_IS_PCIX(bus_p)) { 382 if (PCIE_IS_BDG(bus_p)) { 383 PCIX_BDG_ERR_REG(pfd_p) = 384 PCIE_ZALLOC(pf_pcix_bdg_err_regs_t); 385 386 if (PCIX_ECC_VERSION_CHECK(bus_p)) { 387 PCIX_BDG_ECC_REG(pfd_p, 0) = 388 PCIE_ZALLOC(pf_pcix_ecc_regs_t); 389 PCIX_BDG_ECC_REG(pfd_p, 1) = 390 PCIE_ZALLOC(pf_pcix_ecc_regs_t); 391 } 392 } else { 393 PCIX_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pcix_err_regs_t); 394 395 if (PCIX_ECC_VERSION_CHECK(bus_p)) 396 PCIX_ECC_REG(pfd_p) = 397 PCIE_ZALLOC(pf_pcix_ecc_regs_t); 398 } 399 } 400 } 401 402 static void 403 pcie_fini_pfd(dev_info_t *dip) 404 { 405 pf_data_t *pfd_p = PCIE_DIP2PFD(dip); 406 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 407 408 if (PCIE_IS_PCIE(bus_p)) { 409 if (PCIE_IS_PCIE_BDG(bus_p) && PCIE_IS_PCIX(bus_p)) { 410 if (PCIX_ECC_VERSION_CHECK(bus_p)) { 411 kmem_free(PCIX_BDG_ECC_REG(pfd_p, 0), 412 sizeof (pf_pcix_ecc_regs_t)); 413 kmem_free(PCIX_BDG_ECC_REG(pfd_p, 1), 414 sizeof (pf_pcix_ecc_regs_t)); 415 } 416 417 kmem_free(PCIX_BDG_ERR_REG(pfd_p), 418 sizeof (pf_pcix_bdg_err_regs_t)); 419 } 420 421 if (PCIE_IS_RP(bus_p)) 422 kmem_free(PCIE_ADV_RP_REG(pfd_p), 423 sizeof (pf_pcie_adv_rp_err_regs_t)); 424 else if (PCIE_IS_PCIE_BDG(bus_p)) 425 kmem_free(PCIE_ADV_BDG_REG(pfd_p), 426 sizeof (pf_pcie_adv_bdg_err_regs_t)); 427 428 kmem_free(PCIE_ADV_REG(pfd_p), 429 sizeof (pf_pcie_adv_err_regs_t)); 430 431 if (PCIE_IS_RP(bus_p)) 432 kmem_free(PCIE_RP_REG(pfd_p), 433 sizeof (pf_pcie_rp_err_regs_t)); 434 435 kmem_free(PCIE_ERR_REG(pfd_p), sizeof (pf_pcie_err_regs_t)); 436 } else if (PCIE_IS_PCIX(bus_p)) { 437 if (PCIE_IS_BDG(bus_p)) { 438 if (PCIX_ECC_VERSION_CHECK(bus_p)) { 439 kmem_free(PCIX_BDG_ECC_REG(pfd_p, 0), 440 sizeof (pf_pcix_ecc_regs_t)); 441 kmem_free(PCIX_BDG_ECC_REG(pfd_p, 1), 442 sizeof (pf_pcix_ecc_regs_t)); 443 } 444 445 kmem_free(PCIX_BDG_ERR_REG(pfd_p), 446 sizeof (pf_pcix_bdg_err_regs_t)); 447 } else { 448 if (PCIX_ECC_VERSION_CHECK(bus_p)) 449 kmem_free(PCIX_ECC_REG(pfd_p), 450 sizeof (pf_pcix_ecc_regs_t)); 451 452 kmem_free(PCIX_ERR_REG(pfd_p), 453 sizeof (pf_pcix_err_regs_t)); 454 } 455 } 456 457 if (PCIE_IS_BDG(bus_p)) 458 kmem_free(PCI_BDG_ERR_REG(pfd_p), 459 sizeof (pf_pci_bdg_err_regs_t)); 460 461 kmem_free(PCI_ERR_REG(pfd_p), sizeof (pf_pci_err_regs_t)); 462 463 if (PCIE_IS_ROOT(bus_p)) 464 kmem_free(PCIE_ROOT_FAULT(pfd_p), sizeof (pf_root_fault_t)); 465 466 kmem_free(PCIE_DIP2PFD(dip), sizeof (pf_data_t)); 467 468 PCIE_DIP2PFD(dip) = NULL; 469 } 470 471 472 /* 473 * Special functions to allocate pf_data_t's for PCIe root complexes. 474 * Note: Root Complex not Root Port 475 */ 476 void 477 pcie_rc_init_pfd(dev_info_t *dip, pf_data_t *pfd_p) 478 { 479 pfd_p->pe_bus_p = PCIE_DIP2DOWNBUS(dip); 480 pfd_p->pe_severity_flags = 0; 481 pfd_p->pe_lock = B_FALSE; 482 pfd_p->pe_valid = B_FALSE; 483 484 PCIE_ROOT_FAULT(pfd_p) = PCIE_ZALLOC(pf_root_fault_t); 485 PCIE_ROOT_FAULT(pfd_p)->scan_bdf = PCIE_INVALID_BDF; 486 PCI_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pci_err_regs_t); 487 PCI_BDG_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pci_bdg_err_regs_t); 488 PCIE_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_err_regs_t); 489 PCIE_RP_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_rp_err_regs_t); 490 PCIE_ADV_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_adv_err_regs_t); 491 PCIE_ADV_RP_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_adv_rp_err_regs_t); 492 493 PCIE_ADV_REG(pfd_p)->pcie_ue_sev = pcie_aer_uce_severity; 494 } 495 496 void 497 pcie_rc_fini_pfd(pf_data_t *pfd_p) 498 { 499 kmem_free(PCIE_ADV_RP_REG(pfd_p), sizeof (pf_pcie_adv_rp_err_regs_t)); 500 kmem_free(PCIE_ADV_REG(pfd_p), sizeof (pf_pcie_adv_err_regs_t)); 501 kmem_free(PCIE_RP_REG(pfd_p), sizeof (pf_pcie_rp_err_regs_t)); 502 kmem_free(PCIE_ERR_REG(pfd_p), sizeof (pf_pcie_err_regs_t)); 503 kmem_free(PCI_BDG_ERR_REG(pfd_p), sizeof (pf_pci_bdg_err_regs_t)); 504 kmem_free(PCI_ERR_REG(pfd_p), sizeof (pf_pci_err_regs_t)); 505 kmem_free(PCIE_ROOT_FAULT(pfd_p), sizeof (pf_root_fault_t)); 506 } 507 508 void 509 pcie_rc_init_bus(dev_info_t *dip) 510 { 511 pcie_bus_t *bus_p; 512 513 bus_p = (pcie_bus_t *)kmem_zalloc(sizeof (pcie_bus_t), KM_SLEEP); 514 bus_p->bus_dip = dip; 515 bus_p->bus_dev_type = PCIE_PCIECAP_DEV_TYPE_RC_PSEUDO; 516 bus_p->bus_hdr_type = PCI_HEADER_ONE; 517 518 /* Fake that there are AER logs */ 519 bus_p->bus_aer_off = (uint16_t)-1; 520 521 /* Needed only for handle lookup */ 522 bus_p->bus_fm_flags |= PF_FM_READY; 523 524 ndi_set_bus_private(dip, B_FALSE, DEVI_PORT_TYPE_PCI, bus_p); 525 } 526 527 void 528 pcie_rc_fini_bus(dev_info_t *dip) 529 { 530 pcie_bus_t *bus_p = (pcie_bus_t *)ndi_get_bus_private(dip, B_FALSE); 531 ndi_set_bus_private(dip, B_FALSE, NULL, NULL); 532 kmem_free(bus_p, sizeof (pcie_bus_t)); 533 } 534 535 /* 536 * Initialize PCIe Bus Private Data 537 * 538 * PCIe Bus Private Data contains commonly used PCI/PCIe information and offsets 539 * to key registers. 540 */ 541 pcie_bus_t * 542 pcie_init_bus(dev_info_t *cdip) 543 { 544 pcie_bus_t *bus_p = 0; 545 ddi_acc_handle_t eh = NULL; 546 int range_size; 547 dev_info_t *pdip; 548 const char *errstr = NULL; 549 550 ASSERT(PCIE_DIP2UPBUS(cdip) == NULL); 551 552 /* allocate memory for pcie bus data */ 553 bus_p = kmem_zalloc(sizeof (pcie_bus_t), KM_SLEEP); 554 555 556 /* Set back pointer to dip */ 557 bus_p->bus_dip = cdip; 558 559 /* Create an config access special to error handling */ 560 if (pci_config_setup(cdip, &eh) != DDI_SUCCESS) { 561 errstr = "Cannot setup config access"; 562 goto fail; 563 } 564 bus_p->bus_cfg_hdl = eh; 565 bus_p->bus_fm_flags = 0; 566 567 /* get device's bus/dev/function number */ 568 if (pcie_get_bdf_from_dip(cdip, &bus_p->bus_bdf) != DDI_SUCCESS) { 569 errstr = "Cannot get device BDF"; 570 goto fail; 571 } 572 573 /* Save the Vendor Id Device Id */ 574 bus_p->bus_dev_ven_id = PCIE_GET(32, bus_p, PCI_CONF_VENID); 575 bus_p->bus_rev_id = PCIE_GET(8, bus_p, PCI_CONF_REVID); 576 577 /* Save the Header Type */ 578 bus_p->bus_hdr_type = PCIE_GET(8, bus_p, PCI_CONF_HEADER); 579 bus_p->bus_hdr_type &= PCI_HEADER_TYPE_M; 580 581 /* Figure out the device type and all the relavant capability offsets */ 582 if ((PCI_CAP_LOCATE(eh, PCI_CAP_ID_PCI_E, &bus_p->bus_pcie_off)) 583 != DDI_FAILURE) { 584 bus_p->bus_dev_type = PCI_CAP_GET16(eh, NULL, 585 bus_p->bus_pcie_off, PCIE_PCIECAP) & 586 PCIE_PCIECAP_DEV_TYPE_MASK; 587 588 if (PCI_CAP_LOCATE(eh, PCI_CAP_XCFG_SPC(PCIE_EXT_CAP_ID_AER), 589 &bus_p->bus_aer_off) != DDI_SUCCESS) 590 bus_p->bus_aer_off = NULL; 591 } else { 592 bus_p->bus_pcie_off = NULL; 593 bus_p->bus_dev_type = PCIE_PCIECAP_DEV_TYPE_PCI_PSEUDO; 594 } 595 596 if ((PCI_CAP_LOCATE(eh, PCI_CAP_ID_PCIX, &bus_p->bus_pcix_off)) 597 != DDI_FAILURE) { 598 if (PCIE_IS_BDG(bus_p)) 599 bus_p->bus_ecc_ver = PCIX_CAP_GET(16, bus_p, 600 PCI_PCIX_SEC_STATUS) & PCI_PCIX_VER_MASK; 601 else 602 bus_p->bus_ecc_ver = PCIX_CAP_GET(16, bus_p, 603 PCI_PCIX_COMMAND) & PCI_PCIX_VER_MASK; 604 } else { 605 bus_p->bus_pcix_off = NULL; 606 bus_p->bus_ecc_ver = NULL; 607 } 608 609 /* Save the Range information if device is a switch/bridge */ 610 if (PCIE_IS_BDG(bus_p)) { 611 /* get "bus_range" property */ 612 range_size = sizeof (pci_bus_range_t); 613 if (ddi_getlongprop_buf(DDI_DEV_T_ANY, cdip, DDI_PROP_DONTPASS, 614 "bus-range", (caddr_t)&bus_p->bus_bus_range, &range_size) 615 != DDI_PROP_SUCCESS) { 616 errstr = "Cannot find \"bus-range\" property"; 617 goto fail; 618 } 619 620 /* get secondary bus number */ 621 bus_p->bus_bdg_secbus = PCIE_GET(8, bus_p, PCI_BCNF_SECBUS); 622 623 /* Get "ranges" property */ 624 if (ddi_getlongprop(DDI_DEV_T_ANY, cdip, DDI_PROP_DONTPASS, 625 "ranges", (caddr_t)&bus_p->bus_addr_ranges, 626 &bus_p->bus_addr_entries) != DDI_PROP_SUCCESS) 627 bus_p->bus_addr_entries = 0; 628 bus_p->bus_addr_entries /= sizeof (ppb_ranges_t); 629 } 630 631 /* save "assigned-addresses" property array, ignore failues */ 632 if (ddi_getlongprop(DDI_DEV_T_ANY, cdip, DDI_PROP_DONTPASS, 633 "assigned-addresses", (caddr_t)&bus_p->bus_assigned_addr, 634 &bus_p->bus_assigned_entries) == DDI_PROP_SUCCESS) 635 bus_p->bus_assigned_entries /= sizeof (pci_regspec_t); 636 else 637 bus_p->bus_assigned_entries = 0; 638 639 /* save RP dip and RP bdf */ 640 if (PCIE_IS_RP(bus_p)) { 641 bus_p->bus_rp_dip = cdip; 642 bus_p->bus_rp_bdf = bus_p->bus_bdf; 643 } else { 644 for (pdip = ddi_get_parent(cdip); pdip; 645 pdip = ddi_get_parent(pdip)) { 646 pcie_bus_t *parent_bus_p = PCIE_DIP2BUS(pdip); 647 648 /* 649 * When debugging be aware that some NVIDIA x86 650 * architectures have 2 nodes for each RP, One at Bus 651 * 0x0 and one at Bus 0x80. The requester is from Bus 652 * 0x80 653 */ 654 if (PCIE_IS_ROOT(parent_bus_p)) { 655 bus_p->bus_rp_dip = pdip; 656 bus_p->bus_rp_bdf = parent_bus_p->bus_bdf; 657 break; 658 } 659 } 660 } 661 662 ndi_set_bus_private(cdip, B_TRUE, DEVI_PORT_TYPE_PCI, (void *)bus_p); 663 664 pcie_init_pfd(cdip); 665 666 bus_p->bus_mps = 0; 667 668 pcie_init_plat(cdip); 669 670 PCIE_DBG("Add %s(dip 0x%p, bdf 0x%x, secbus 0x%x)\n", 671 ddi_driver_name(cdip), (void *)cdip, bus_p->bus_bdf, 672 bus_p->bus_bdg_secbus); 673 #ifdef DEBUG 674 pcie_print_bus(bus_p); 675 #endif 676 677 return (bus_p); 678 fail: 679 cmn_err(CE_WARN, "PCIE init err info failed BDF 0x%x:%s\n", 680 bus_p->bus_bdf, errstr); 681 if (eh) 682 pci_config_teardown(&eh); 683 kmem_free(bus_p, sizeof (pcie_bus_t)); 684 return (NULL); 685 } 686 687 int 688 pcie_postattach_child(dev_info_t *cdip) 689 { 690 pcie_bus_t *bus_p = PCIE_DIP2BUS(cdip); 691 692 if (!bus_p) 693 return (DDI_FAILURE); 694 695 return (pcie_enable_ce(cdip)); 696 } 697 698 /* 699 * PCI-Express child device de-initialization. 700 * This function disables generic pci-express interrupts and error 701 * handling. 702 */ 703 void 704 pcie_uninitchild(dev_info_t *cdip) 705 { 706 pcie_disable_errors(cdip); 707 pcie_fini_bus(cdip); 708 } 709 710 void 711 pcie_fini_bus(dev_info_t *cdip) 712 { 713 pcie_bus_t *bus_p; 714 715 pcie_fini_plat(cdip); 716 pcie_fini_pfd(cdip); 717 718 bus_p = PCIE_DIP2UPBUS(cdip); 719 ASSERT(bus_p); 720 pci_config_teardown(&bus_p->bus_cfg_hdl); 721 ndi_set_bus_private(cdip, B_TRUE, NULL, NULL); 722 kmem_free(bus_p->bus_assigned_addr, 723 (sizeof (pci_regspec_t) * bus_p->bus_assigned_entries)); 724 kmem_free(bus_p->bus_addr_ranges, 725 (sizeof (ppb_ranges_t) * bus_p->bus_addr_entries)); 726 727 kmem_free(bus_p, sizeof (pcie_bus_t)); 728 } 729 730 void 731 pcie_enable_errors(dev_info_t *dip) 732 { 733 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 734 uint16_t reg16, tmp16; 735 uint32_t reg32, tmp32; 736 737 ASSERT(bus_p); 738 739 /* 740 * Clear any pending errors 741 */ 742 pcie_clear_errors(dip); 743 744 if (!PCIE_IS_PCIE(bus_p)) 745 return; 746 747 /* 748 * Enable Baseline Error Handling but leave CE reporting off (poweron 749 * default). 750 */ 751 if ((reg16 = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL)) != 752 PCI_CAP_EINVAL16) { 753 tmp16 = (reg16 & (PCIE_DEVCTL_MAX_READ_REQ_MASK | 754 PCIE_DEVCTL_MAX_PAYLOAD_MASK)) | 755 (pcie_devctl_default & ~(PCIE_DEVCTL_MAX_READ_REQ_MASK | 756 PCIE_DEVCTL_MAX_PAYLOAD_MASK)) | 757 (pcie_base_err_default & (~PCIE_DEVCTL_CE_REPORTING_EN)); 758 759 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, tmp16); 760 PCIE_DBG_CAP(dip, bus_p, "DEVCTL", 16, PCIE_DEVCTL, reg16); 761 } 762 763 /* Enable Root Port Baseline Error Receiving */ 764 if (PCIE_IS_ROOT(bus_p) && 765 (reg16 = PCIE_CAP_GET(16, bus_p, PCIE_ROOTCTL)) != 766 PCI_CAP_EINVAL16) { 767 768 #if defined(__xpv) 769 /* 770 * When we're booted under the hypervisor we won't receive 771 * MSI's, so to ensure that uncorrectable errors aren't ignored 772 * we set the SERR_FAT and SERR_NONFAT bits in the Root Control 773 * Register. 774 */ 775 tmp16 = pcie_root_ctrl_default; 776 #else 777 tmp16 = pcie_serr_disable_flag ? 778 (pcie_root_ctrl_default & ~PCIE_ROOT_SYS_ERR) : 779 pcie_root_ctrl_default; 780 #endif /* __xpv */ 781 PCIE_CAP_PUT(16, bus_p, PCIE_ROOTCTL, tmp16); 782 PCIE_DBG_CAP(dip, bus_p, "ROOT DEVCTL", 16, PCIE_ROOTCTL, 783 reg16); 784 } 785 786 /* 787 * Enable PCI-Express Advanced Error Handling if Exists 788 */ 789 if (!PCIE_HAS_AER(bus_p)) 790 return; 791 792 /* Set Uncorrectable Severity */ 793 if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_UCE_SERV)) != 794 PCI_CAP_EINVAL32) { 795 tmp32 = pcie_aer_uce_severity; 796 797 PCIE_AER_PUT(32, bus_p, PCIE_AER_UCE_SERV, tmp32); 798 PCIE_DBG_AER(dip, bus_p, "AER UCE SEV", 32, PCIE_AER_UCE_SERV, 799 reg32); 800 } 801 802 /* Enable Uncorrectable errors */ 803 if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_UCE_MASK)) != 804 PCI_CAP_EINVAL32) { 805 tmp32 = pcie_aer_uce_mask; 806 807 PCIE_AER_PUT(32, bus_p, PCIE_AER_UCE_MASK, tmp32); 808 PCIE_DBG_AER(dip, bus_p, "AER UCE MASK", 32, PCIE_AER_UCE_MASK, 809 reg32); 810 } 811 812 /* Enable ECRC generation and checking */ 813 if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_CTL)) != 814 PCI_CAP_EINVAL32) { 815 tmp32 = reg32 | pcie_ecrc_value; 816 PCIE_AER_PUT(32, bus_p, PCIE_AER_CTL, tmp32); 817 PCIE_DBG_AER(dip, bus_p, "AER CTL", 32, PCIE_AER_CTL, reg32); 818 } 819 820 /* Enable Secondary Uncorrectable errors if this is a bridge */ 821 if (!PCIE_IS_PCIE_BDG(bus_p)) 822 goto root; 823 824 /* Set Uncorrectable Severity */ 825 if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_SUCE_SERV)) != 826 PCI_CAP_EINVAL32) { 827 tmp32 = pcie_aer_suce_severity; 828 829 PCIE_AER_PUT(32, bus_p, PCIE_AER_SUCE_SERV, tmp32); 830 PCIE_DBG_AER(dip, bus_p, "AER SUCE SEV", 32, PCIE_AER_SUCE_SERV, 831 reg32); 832 } 833 834 if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_SUCE_MASK)) != 835 PCI_CAP_EINVAL32) { 836 PCIE_AER_PUT(32, bus_p, PCIE_AER_SUCE_MASK, pcie_aer_suce_mask); 837 PCIE_DBG_AER(dip, bus_p, "AER SUCE MASK", 32, 838 PCIE_AER_SUCE_MASK, reg32); 839 } 840 841 root: 842 /* 843 * Enable Root Control this is a Root device 844 */ 845 if (!PCIE_IS_ROOT(bus_p)) 846 return; 847 848 #if !defined(__xpv) 849 if ((reg16 = PCIE_AER_GET(16, bus_p, PCIE_AER_RE_CMD)) != 850 PCI_CAP_EINVAL16) { 851 PCIE_AER_PUT(16, bus_p, PCIE_AER_RE_CMD, 852 pcie_root_error_cmd_default); 853 PCIE_DBG_AER(dip, bus_p, "AER Root Err Cmd", 16, 854 PCIE_AER_RE_CMD, reg16); 855 } 856 #endif /* __xpv */ 857 } 858 859 /* 860 * This function is used for enabling CE reporting and setting the AER CE mask. 861 * When called from outside the pcie module it should always be preceded by 862 * a call to pcie_enable_errors. 863 */ 864 int 865 pcie_enable_ce(dev_info_t *dip) 866 { 867 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 868 uint16_t device_sts, device_ctl; 869 uint32_t tmp_pcie_aer_ce_mask; 870 871 if (!PCIE_IS_PCIE(bus_p)) 872 return (DDI_SUCCESS); 873 874 /* 875 * The "pcie_ce_mask" property is used to control both the CE reporting 876 * enable field in the device control register and the AER CE mask. We 877 * leave CE reporting disabled if pcie_ce_mask is set to -1. 878 */ 879 880 tmp_pcie_aer_ce_mask = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, dip, 881 DDI_PROP_DONTPASS, "pcie_ce_mask", pcie_aer_ce_mask); 882 883 if (tmp_pcie_aer_ce_mask == (uint32_t)-1) { 884 /* 885 * Nothing to do since CE reporting has already been disabled. 886 */ 887 return (DDI_SUCCESS); 888 } 889 890 if (PCIE_HAS_AER(bus_p)) { 891 /* Enable AER CE */ 892 PCIE_AER_PUT(32, bus_p, PCIE_AER_CE_MASK, tmp_pcie_aer_ce_mask); 893 PCIE_DBG_AER(dip, bus_p, "AER CE MASK", 32, PCIE_AER_CE_MASK, 894 0); 895 896 /* Clear any pending AER CE errors */ 897 PCIE_AER_PUT(32, bus_p, PCIE_AER_CE_STS, -1); 898 } 899 900 /* clear any pending CE errors */ 901 if ((device_sts = PCIE_CAP_GET(16, bus_p, PCIE_DEVSTS)) != 902 PCI_CAP_EINVAL16) 903 PCIE_CAP_PUT(16, bus_p, PCIE_DEVSTS, 904 device_sts & (~PCIE_DEVSTS_CE_DETECTED)); 905 906 /* Enable CE reporting */ 907 device_ctl = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL); 908 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, 909 (device_ctl & (~PCIE_DEVCTL_ERR_MASK)) | pcie_base_err_default); 910 PCIE_DBG_CAP(dip, bus_p, "DEVCTL", 16, PCIE_DEVCTL, device_ctl); 911 912 return (DDI_SUCCESS); 913 } 914 915 /* ARGSUSED */ 916 void 917 pcie_disable_errors(dev_info_t *dip) 918 { 919 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 920 uint16_t device_ctl; 921 uint32_t aer_reg; 922 923 if (!PCIE_IS_PCIE(bus_p)) 924 return; 925 926 /* 927 * Disable PCI-Express Baseline Error Handling 928 */ 929 device_ctl = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL); 930 device_ctl &= ~PCIE_DEVCTL_ERR_MASK; 931 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, device_ctl); 932 933 /* 934 * Disable PCI-Express Advanced Error Handling if Exists 935 */ 936 if (!PCIE_HAS_AER(bus_p)) 937 goto root; 938 939 /* Disable Uncorrectable errors */ 940 PCIE_AER_PUT(32, bus_p, PCIE_AER_UCE_MASK, PCIE_AER_UCE_BITS); 941 942 /* Disable Correctable errors */ 943 PCIE_AER_PUT(32, bus_p, PCIE_AER_CE_MASK, PCIE_AER_CE_BITS); 944 945 /* Disable ECRC generation and checking */ 946 if ((aer_reg = PCIE_AER_GET(32, bus_p, PCIE_AER_CTL)) != 947 PCI_CAP_EINVAL32) { 948 aer_reg &= ~(PCIE_AER_CTL_ECRC_GEN_ENA | 949 PCIE_AER_CTL_ECRC_CHECK_ENA); 950 951 PCIE_AER_PUT(32, bus_p, PCIE_AER_CTL, aer_reg); 952 } 953 /* 954 * Disable Secondary Uncorrectable errors if this is a bridge 955 */ 956 if (!PCIE_IS_PCIE_BDG(bus_p)) 957 goto root; 958 959 PCIE_AER_PUT(32, bus_p, PCIE_AER_SUCE_MASK, PCIE_AER_SUCE_BITS); 960 961 root: 962 /* 963 * disable Root Control this is a Root device 964 */ 965 if (!PCIE_IS_ROOT(bus_p)) 966 return; 967 968 if (!pcie_serr_disable_flag) { 969 device_ctl = PCIE_CAP_GET(16, bus_p, PCIE_ROOTCTL); 970 device_ctl &= ~PCIE_ROOT_SYS_ERR; 971 PCIE_CAP_PUT(16, bus_p, PCIE_ROOTCTL, device_ctl); 972 } 973 974 if (!PCIE_HAS_AER(bus_p)) 975 return; 976 977 if ((device_ctl = PCIE_CAP_GET(16, bus_p, PCIE_AER_RE_CMD)) != 978 PCI_CAP_EINVAL16) { 979 device_ctl &= ~pcie_root_error_cmd_default; 980 PCIE_CAP_PUT(16, bus_p, PCIE_AER_RE_CMD, device_ctl); 981 } 982 } 983 984 /* 985 * Extract bdf from "reg" property. 986 */ 987 int 988 pcie_get_bdf_from_dip(dev_info_t *dip, pcie_req_id_t *bdf) 989 { 990 pci_regspec_t *regspec; 991 int reglen; 992 993 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 994 "reg", (int **)®spec, (uint_t *)®len) != DDI_SUCCESS) 995 return (DDI_FAILURE); 996 997 if (reglen < (sizeof (pci_regspec_t) / sizeof (int))) { 998 ddi_prop_free(regspec); 999 return (DDI_FAILURE); 1000 } 1001 1002 /* Get phys_hi from first element. All have same bdf. */ 1003 *bdf = (regspec->pci_phys_hi & (PCI_REG_BDFR_M ^ PCI_REG_REG_M)) >> 8; 1004 1005 ddi_prop_free(regspec); 1006 return (DDI_SUCCESS); 1007 } 1008 1009 dev_info_t * 1010 pcie_get_my_childs_dip(dev_info_t *dip, dev_info_t *rdip) 1011 { 1012 dev_info_t *cdip = rdip; 1013 1014 for (; ddi_get_parent(cdip) != dip; cdip = ddi_get_parent(cdip)) 1015 ; 1016 1017 return (cdip); 1018 } 1019 1020 uint32_t 1021 pcie_get_bdf_for_dma_xfer(dev_info_t *dip, dev_info_t *rdip) 1022 { 1023 dev_info_t *cdip; 1024 1025 /* 1026 * As part of the probing, the PCI fcode interpreter may setup a DMA 1027 * request if a given card has a fcode on it using dip and rdip of the 1028 * AP (attachment point) i.e, dip and rdip of px/pcieb driver. In this 1029 * case, return a invalid value for the bdf since we cannot get to the 1030 * bdf value of the actual device which will be initiating this DMA. 1031 */ 1032 if (rdip == dip) 1033 return (PCIE_INVALID_BDF); 1034 1035 cdip = pcie_get_my_childs_dip(dip, rdip); 1036 1037 /* 1038 * For a given rdip, return the bdf value of dip's (px or pcieb) 1039 * immediate child or secondary bus-id if dip is a PCIe2PCI bridge. 1040 * 1041 * XXX - For now, return a invalid bdf value for all PCI and PCI-X 1042 * devices since this needs more work. 1043 */ 1044 return (PCI_GET_PCIE2PCI_SECBUS(cdip) ? 1045 PCIE_INVALID_BDF : PCI_GET_BDF(cdip)); 1046 } 1047 1048 uint32_t 1049 pcie_get_aer_uce_mask() { 1050 return (pcie_aer_uce_mask); 1051 } 1052 uint32_t 1053 pcie_get_aer_ce_mask() { 1054 return (pcie_aer_ce_mask); 1055 } 1056 uint32_t 1057 pcie_get_aer_suce_mask() { 1058 return (pcie_aer_suce_mask); 1059 } 1060 uint32_t 1061 pcie_get_serr_mask() { 1062 return (pcie_serr_disable_flag); 1063 } 1064 1065 void 1066 pcie_set_aer_uce_mask(uint32_t mask) { 1067 pcie_aer_uce_mask = mask; 1068 if (mask & PCIE_AER_UCE_UR) 1069 pcie_base_err_default &= ~PCIE_DEVCTL_UR_REPORTING_EN; 1070 else 1071 pcie_base_err_default |= PCIE_DEVCTL_UR_REPORTING_EN; 1072 1073 if (mask & PCIE_AER_UCE_ECRC) 1074 pcie_ecrc_value = 0; 1075 } 1076 1077 void 1078 pcie_set_aer_ce_mask(uint32_t mask) { 1079 pcie_aer_ce_mask = mask; 1080 } 1081 void 1082 pcie_set_aer_suce_mask(uint32_t mask) { 1083 pcie_aer_suce_mask = mask; 1084 } 1085 void 1086 pcie_set_serr_mask(uint32_t mask) { 1087 pcie_serr_disable_flag = mask; 1088 } 1089 1090 /* 1091 * Is the rdip a child of dip. Used for checking certain CTLOPS from bubbling 1092 * up erronously. Ex. ISA ctlops to a PCI-PCI Bridge. 1093 */ 1094 boolean_t 1095 pcie_is_child(dev_info_t *dip, dev_info_t *rdip) 1096 { 1097 dev_info_t *cdip = ddi_get_child(dip); 1098 for (; cdip; cdip = ddi_get_next_sibling(cdip)) 1099 if (cdip == rdip) 1100 break; 1101 return (cdip != NULL); 1102 } 1103 1104 boolean_t 1105 pcie_is_link_disabled(dev_info_t *dip) 1106 { 1107 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 1108 1109 if (PCIE_IS_PCIE(bus_p)) { 1110 if (PCIE_CAP_GET(16, bus_p, PCIE_LINKCTL) & 1111 PCIE_LINKCTL_LINK_DISABLE) 1112 return (B_TRUE); 1113 } 1114 return (B_FALSE); 1115 } 1116 1117 /* 1118 * Initialize the MPS for a root port. 1119 * 1120 * dip - dip of root port device. 1121 */ 1122 void 1123 pcie_init_root_port_mps(dev_info_t *dip) 1124 { 1125 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 1126 int rp_cap, max_supported = pcie_max_mps; 1127 1128 (void) pcie_get_fabric_mps(ddi_get_parent(dip), 1129 ddi_get_child(dip), &max_supported); 1130 1131 rp_cap = PCI_CAP_GET16(bus_p->bus_cfg_hdl, NULL, 1132 bus_p->bus_pcie_off, PCIE_DEVCAP) & 1133 PCIE_DEVCAP_MAX_PAYLOAD_MASK; 1134 1135 if (rp_cap < max_supported) 1136 max_supported = rp_cap; 1137 1138 bus_p->bus_mps = max_supported; 1139 (void) pcie_initchild_mps(dip); 1140 } 1141 1142 /* 1143 * Initialize the Maximum Payload Size of a device. 1144 * 1145 * cdip - dip of device. 1146 * 1147 * returns - DDI_SUCCESS or DDI_FAILURE 1148 */ 1149 int 1150 pcie_initchild_mps(dev_info_t *cdip) 1151 { 1152 int max_payload_size; 1153 pcie_bus_t *bus_p; 1154 dev_info_t *pdip = ddi_get_parent(cdip); 1155 1156 bus_p = PCIE_DIP2BUS(cdip); 1157 if (bus_p == NULL) { 1158 PCIE_DBG("%s: BUS not found.\n", 1159 ddi_driver_name(cdip)); 1160 return (DDI_FAILURE); 1161 } 1162 1163 if (PCIE_IS_RP(bus_p)) { 1164 /* 1165 * If this device is a root port, then the mps scan 1166 * saved the mps in the root ports bus_p. 1167 */ 1168 max_payload_size = bus_p->bus_mps; 1169 } else { 1170 /* 1171 * If the device is not a root port, then the mps of 1172 * its parent should be used. 1173 */ 1174 pcie_bus_t *parent_bus_p = PCIE_DIP2BUS(pdip); 1175 max_payload_size = parent_bus_p->bus_mps; 1176 } 1177 1178 if (PCIE_IS_PCIE(bus_p) && (max_payload_size >= 0)) { 1179 pcie_bus_t *rootp_bus_p = PCIE_DIP2BUS(bus_p->bus_rp_dip); 1180 uint16_t mask, dev_ctrl = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL), 1181 mps = PCIE_CAP_GET(16, bus_p, PCIE_DEVCAP) & 1182 PCIE_DEVCAP_MAX_PAYLOAD_MASK; 1183 1184 mps = MIN(mps, (uint16_t)max_payload_size); 1185 1186 /* 1187 * If the MPS to be set is less than the root ports 1188 * MPS, then MRRS will have to be set the same as MPS. 1189 */ 1190 mask = ((mps < rootp_bus_p->bus_mps) ? 1191 PCIE_DEVCTL_MAX_READ_REQ_MASK : 0) | 1192 PCIE_DEVCTL_MAX_PAYLOAD_MASK; 1193 1194 dev_ctrl &= ~mask; 1195 mask = ((mps < rootp_bus_p->bus_mps) 1196 ? mps << PCIE_DEVCTL_MAX_READ_REQ_SHIFT : 0) 1197 | (mps << PCIE_DEVCTL_MAX_PAYLOAD_SHIFT); 1198 1199 dev_ctrl |= mask; 1200 1201 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, dev_ctrl); 1202 1203 bus_p->bus_mps = mps; 1204 } 1205 return (DDI_SUCCESS); 1206 } 1207 1208 /* 1209 * Scans a device tree/branch for a maximum payload size capabilities. 1210 * 1211 * rc_dip - dip of Root Complex. 1212 * dip - dip of device where scan will begin. 1213 * max_supported (IN) - maximum allowable MPS. 1214 * max_supported (OUT) - maximum payload size capability of fabric. 1215 */ 1216 void 1217 pcie_get_fabric_mps(dev_info_t *rc_dip, dev_info_t *dip, int *max_supported) 1218 { 1219 if (dip == NULL) 1220 return; 1221 1222 /* 1223 * Perform a fabric scan to obtain Maximum Payload Capabilities 1224 */ 1225 (void) pcie_scan_mps(rc_dip, dip, max_supported); 1226 1227 PCIE_DBG("MPS: Highest Common MPS= %x\n", max_supported); 1228 } 1229 1230 /* 1231 * Scans fabric and determines Maximum Payload Size based on 1232 * highest common denominator alogorithm 1233 */ 1234 static void 1235 pcie_scan_mps(dev_info_t *rc_dip, dev_info_t *dip, int *max_supported) 1236 { 1237 int circular_count; 1238 pcie_max_supported_t max_pay_load_supported; 1239 1240 max_pay_load_supported.dip = rc_dip; 1241 max_pay_load_supported.highest_common_mps = *max_supported; 1242 1243 ndi_devi_enter(ddi_get_parent(dip), &circular_count); 1244 ddi_walk_devs(dip, pcie_get_max_supported, 1245 (void *)&max_pay_load_supported); 1246 ndi_devi_exit(ddi_get_parent(dip), circular_count); 1247 *max_supported = max_pay_load_supported.highest_common_mps; 1248 } 1249 1250 /* 1251 * Called as part of the Maximum Payload Size scan. 1252 */ 1253 static int 1254 pcie_get_max_supported(dev_info_t *dip, void *arg) 1255 { 1256 uint32_t max_supported; 1257 uint16_t cap_ptr; 1258 pcie_max_supported_t *current = (pcie_max_supported_t *)arg; 1259 pci_regspec_t *reg; 1260 int rlen; 1261 caddr_t virt; 1262 ddi_acc_handle_t config_handle; 1263 1264 if (ddi_get_child(current->dip) == NULL) { 1265 goto fail1; 1266 } 1267 1268 if (pcie_dev(dip) == DDI_FAILURE) { 1269 PCIE_DBG("MPS: pcie_get_max_supported: %s: " 1270 "Not a PCIe dev\n", ddi_driver_name(dip)); 1271 goto fail1; 1272 } 1273 1274 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "reg", 1275 (caddr_t)®, &rlen) != DDI_PROP_SUCCESS) { 1276 PCIE_DBG("MPS: pcie_get_max_supported: %s: " 1277 "Can not read reg\n", ddi_driver_name(dip)); 1278 goto fail1; 1279 } 1280 1281 if (pcie_map_phys(ddi_get_child(current->dip), reg, &virt, 1282 &config_handle) != DDI_SUCCESS) { 1283 PCIE_DBG("MPS: pcie_get_max_supported: %s: pcie_map_phys " 1284 "failed\n", ddi_driver_name(dip)); 1285 goto fail2; 1286 } 1287 1288 if ((PCI_CAP_LOCATE(config_handle, PCI_CAP_ID_PCI_E, &cap_ptr)) == 1289 DDI_FAILURE) { 1290 goto fail3; 1291 } 1292 1293 max_supported = PCI_CAP_GET16(config_handle, NULL, cap_ptr, 1294 PCIE_DEVCAP) & PCIE_DEVCAP_MAX_PAYLOAD_MASK; 1295 1296 PCIE_DBG("PCIE MPS: %s: MPS Capabilities %x\n", ddi_driver_name(dip), 1297 max_supported); 1298 1299 if (max_supported < current->highest_common_mps) 1300 current->highest_common_mps = max_supported; 1301 1302 fail3: 1303 pcie_unmap_phys(&config_handle, reg); 1304 fail2: 1305 kmem_free(reg, rlen); 1306 fail1: 1307 return (DDI_WALK_CONTINUE); 1308 } 1309 1310 /* 1311 * Determines if there are any root ports attached to a root complex. 1312 * 1313 * dip - dip of root complex 1314 * 1315 * Returns - DDI_SUCCESS if there is at least one root port otherwise 1316 * DDI_FAILURE. 1317 */ 1318 int 1319 pcie_root_port(dev_info_t *dip) 1320 { 1321 int port_type; 1322 uint16_t cap_ptr; 1323 ddi_acc_handle_t config_handle; 1324 dev_info_t *cdip = ddi_get_child(dip); 1325 1326 /* 1327 * Determine if any of the children of the passed in dip 1328 * are root ports. 1329 */ 1330 for (; cdip; cdip = ddi_get_next_sibling(cdip)) { 1331 1332 if (pci_config_setup(cdip, &config_handle) != DDI_SUCCESS) 1333 continue; 1334 1335 if ((PCI_CAP_LOCATE(config_handle, PCI_CAP_ID_PCI_E, 1336 &cap_ptr)) == DDI_FAILURE) { 1337 pci_config_teardown(&config_handle); 1338 continue; 1339 } 1340 1341 port_type = PCI_CAP_GET16(config_handle, NULL, cap_ptr, 1342 PCIE_PCIECAP) & PCIE_PCIECAP_DEV_TYPE_MASK; 1343 1344 pci_config_teardown(&config_handle); 1345 1346 if (port_type == PCIE_PCIECAP_DEV_TYPE_ROOT) 1347 return (DDI_SUCCESS); 1348 } 1349 1350 /* No root ports were found */ 1351 1352 return (DDI_FAILURE); 1353 } 1354 1355 /* 1356 * Function that determines if a device a PCIe device. 1357 * 1358 * dip - dip of device. 1359 * 1360 * returns - DDI_SUCCESS if device is a PCIe device, otherwise DDI_FAILURE. 1361 */ 1362 int 1363 pcie_dev(dev_info_t *dip) 1364 { 1365 /* get parent device's device_type property */ 1366 char *device_type; 1367 int rc = DDI_FAILURE; 1368 dev_info_t *pdip = ddi_get_parent(dip); 1369 1370 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip, 1371 DDI_PROP_DONTPASS, "device_type", &device_type) 1372 != DDI_PROP_SUCCESS) { 1373 return (DDI_FAILURE); 1374 } 1375 1376 if (strcmp(device_type, "pciex") == 0) 1377 rc = DDI_SUCCESS; 1378 else 1379 rc = DDI_FAILURE; 1380 1381 ddi_prop_free(device_type); 1382 return (rc); 1383 } 1384 1385 /* 1386 * Function to map in a device's memory space. 1387 */ 1388 static int 1389 pcie_map_phys(dev_info_t *dip, pci_regspec_t *phys_spec, 1390 caddr_t *addrp, ddi_acc_handle_t *handlep) 1391 { 1392 ddi_map_req_t mr; 1393 ddi_acc_hdl_t *hp; 1394 int result; 1395 ddi_device_acc_attr_t attr; 1396 1397 attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 1398 attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 1399 attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 1400 attr.devacc_attr_access = DDI_CAUTIOUS_ACC; 1401 1402 *handlep = impl_acc_hdl_alloc(KM_SLEEP, NULL); 1403 hp = impl_acc_hdl_get(*handlep); 1404 hp->ah_vers = VERS_ACCHDL; 1405 hp->ah_dip = dip; 1406 hp->ah_rnumber = 0; 1407 hp->ah_offset = 0; 1408 hp->ah_len = 0; 1409 hp->ah_acc = attr; 1410 1411 mr.map_op = DDI_MO_MAP_LOCKED; 1412 mr.map_type = DDI_MT_REGSPEC; 1413 mr.map_obj.rp = (struct regspec *)phys_spec; 1414 mr.map_prot = PROT_READ | PROT_WRITE; 1415 mr.map_flags = DDI_MF_KERNEL_MAPPING; 1416 mr.map_handlep = hp; 1417 mr.map_vers = DDI_MAP_VERSION; 1418 1419 result = ddi_map(dip, &mr, 0, 0, addrp); 1420 1421 if (result != DDI_SUCCESS) { 1422 impl_acc_hdl_free(*handlep); 1423 *handlep = (ddi_acc_handle_t)NULL; 1424 } else { 1425 hp->ah_addr = *addrp; 1426 } 1427 1428 return (result); 1429 } 1430 1431 /* 1432 * Map out memory that was mapped in with pcie_map_phys(); 1433 */ 1434 static void 1435 pcie_unmap_phys(ddi_acc_handle_t *handlep, pci_regspec_t *ph) 1436 { 1437 ddi_map_req_t mr; 1438 ddi_acc_hdl_t *hp; 1439 1440 hp = impl_acc_hdl_get(*handlep); 1441 ASSERT(hp); 1442 1443 mr.map_op = DDI_MO_UNMAP; 1444 mr.map_type = DDI_MT_REGSPEC; 1445 mr.map_obj.rp = (struct regspec *)ph; 1446 mr.map_prot = PROT_READ | PROT_WRITE; 1447 mr.map_flags = DDI_MF_KERNEL_MAPPING; 1448 mr.map_handlep = hp; 1449 mr.map_vers = DDI_MAP_VERSION; 1450 1451 (void) ddi_map(hp->ah_dip, &mr, hp->ah_offset, 1452 hp->ah_len, &hp->ah_addr); 1453 1454 impl_acc_hdl_free(*handlep); 1455 *handlep = (ddi_acc_handle_t)NULL; 1456 } 1457 1458 void 1459 pcie_set_rber_fatal(dev_info_t *dip, boolean_t val) 1460 { 1461 pcie_bus_t *bus_p = PCIE_DIP2UPBUS(dip); 1462 bus_p->bus_pfd->pe_rber_fatal = val; 1463 } 1464 1465 /* 1466 * Return parent Root Port's pe_rber_fatal value. 1467 */ 1468 boolean_t 1469 pcie_get_rber_fatal(dev_info_t *dip) 1470 { 1471 pcie_bus_t *bus_p = PCIE_DIP2UPBUS(dip); 1472 pcie_bus_t *rp_bus_p = PCIE_DIP2UPBUS(bus_p->bus_rp_dip); 1473 return (rp_bus_p->bus_pfd->pe_rber_fatal); 1474 } 1475 1476 #ifdef DEBUG 1477 1478 static void 1479 pcie_print_bus(pcie_bus_t *bus_p) 1480 { 1481 pcie_dbg("\tbus_dip = 0x%p\n", bus_p->bus_dip); 1482 pcie_dbg("\tbus_fm_flags = 0x%x\n", bus_p->bus_fm_flags); 1483 1484 pcie_dbg("\tbus_bdf = 0x%x\n", bus_p->bus_bdf); 1485 pcie_dbg("\tbus_dev_ven_id = 0x%x\n", bus_p->bus_dev_ven_id); 1486 pcie_dbg("\tbus_rev_id = 0x%x\n", bus_p->bus_rev_id); 1487 pcie_dbg("\tbus_hdr_type = 0x%x\n", bus_p->bus_hdr_type); 1488 pcie_dbg("\tbus_dev_type = 0x%x\n", bus_p->bus_dev_type); 1489 pcie_dbg("\tbus_bdg_secbus = 0x%x\n", bus_p->bus_bdg_secbus); 1490 pcie_dbg("\tbus_pcie_off = 0x%x\n", bus_p->bus_pcie_off); 1491 pcie_dbg("\tbus_aer_off = 0x%x\n", bus_p->bus_aer_off); 1492 pcie_dbg("\tbus_pcix_off = 0x%x\n", bus_p->bus_pcix_off); 1493 pcie_dbg("\tbus_ecc_ver = 0x%x\n", bus_p->bus_ecc_ver); 1494 } 1495 1496 /* 1497 * For debugging purposes set pcie_dbg_print != 0 to see printf messages 1498 * during interrupt. 1499 * 1500 * When a proper solution is in place this code will disappear. 1501 * Potential solutions are: 1502 * o circular buffers 1503 * o taskq to print at lower pil 1504 */ 1505 int pcie_dbg_print = 0; 1506 void 1507 pcie_dbg(char *fmt, ...) 1508 { 1509 va_list ap; 1510 1511 if (!pcie_debug_flags) { 1512 return; 1513 } 1514 va_start(ap, fmt); 1515 if (servicing_interrupt()) { 1516 if (pcie_dbg_print) { 1517 prom_vprintf(fmt, ap); 1518 } 1519 } else { 1520 prom_vprintf(fmt, ap); 1521 } 1522 va_end(ap); 1523 } 1524 #endif /* DEBUG */ 1525 1526 #if defined(__i386) || defined(__amd64) 1527 static void 1528 pcie_check_io_mem_range(ddi_acc_handle_t cfg_hdl, boolean_t *empty_io_range, 1529 boolean_t *empty_mem_range) 1530 { 1531 uint8_t class, subclass; 1532 uint_t val; 1533 1534 class = pci_config_get8(cfg_hdl, PCI_CONF_BASCLASS); 1535 subclass = pci_config_get8(cfg_hdl, PCI_CONF_SUBCLASS); 1536 1537 if ((class == PCI_CLASS_BRIDGE) && (subclass == PCI_BRIDGE_PCI)) { 1538 val = (((uint_t)pci_config_get8(cfg_hdl, PCI_BCNF_IO_BASE_LOW) & 1539 PCI_BCNF_IO_MASK) << 8); 1540 /* 1541 * Assuming that a zero based io_range[0] implies an 1542 * invalid I/O range. Likewise for mem_range[0]. 1543 */ 1544 if (val == 0) 1545 *empty_io_range = B_TRUE; 1546 val = (((uint_t)pci_config_get16(cfg_hdl, PCI_BCNF_MEM_BASE) & 1547 PCI_BCNF_MEM_MASK) << 16); 1548 if (val == 0) 1549 *empty_mem_range = B_TRUE; 1550 } 1551 } 1552 #endif /* defined(__i386) || defined(__amd64) */ 1553