1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <sys/sysmacros.h> 27 #include <sys/types.h> 28 #include <sys/kmem.h> 29 #include <sys/modctl.h> 30 #include <sys/ddi.h> 31 #include <sys/sunddi.h> 32 #include <sys/sunndi.h> 33 #include <sys/fm/protocol.h> 34 #include <sys/fm/util.h> 35 #include <sys/promif.h> 36 #include <sys/disp.h> 37 #include <sys/stat.h> 38 #include <sys/file.h> 39 #include <sys/pci_cap.h> 40 #include <sys/pci_impl.h> 41 #include <sys/pcie_impl.h> 42 #include <sys/hotplug/pci/pcie_hp.h> 43 #include <sys/hotplug/pci/pcicfg.h> 44 45 /* Local functions prototypes */ 46 static void pcie_init_pfd(dev_info_t *); 47 static void pcie_fini_pfd(dev_info_t *); 48 49 #if defined(__i386) || defined(__amd64) 50 static void pcie_check_io_mem_range(ddi_acc_handle_t, boolean_t *, boolean_t *); 51 #endif /* defined(__i386) || defined(__amd64) */ 52 53 #ifdef DEBUG 54 uint_t pcie_debug_flags = 0; 55 static void pcie_print_bus(pcie_bus_t *bus_p); 56 void pcie_dbg(char *fmt, ...); 57 #endif /* DEBUG */ 58 59 /* Variable to control default PCI-Express config settings */ 60 ushort_t pcie_command_default = 61 PCI_COMM_SERR_ENABLE | 62 PCI_COMM_WAIT_CYC_ENAB | 63 PCI_COMM_PARITY_DETECT | 64 PCI_COMM_ME | 65 PCI_COMM_MAE | 66 PCI_COMM_IO; 67 68 /* xxx_fw are bits that are controlled by FW and should not be modified */ 69 ushort_t pcie_command_default_fw = 70 PCI_COMM_SPEC_CYC | 71 PCI_COMM_MEMWR_INVAL | 72 PCI_COMM_PALETTE_SNOOP | 73 PCI_COMM_WAIT_CYC_ENAB | 74 0xF800; /* Reserved Bits */ 75 76 ushort_t pcie_bdg_command_default_fw = 77 PCI_BCNF_BCNTRL_ISA_ENABLE | 78 PCI_BCNF_BCNTRL_VGA_ENABLE | 79 0xF000; /* Reserved Bits */ 80 81 /* PCI-Express Base error defaults */ 82 ushort_t pcie_base_err_default = 83 PCIE_DEVCTL_CE_REPORTING_EN | 84 PCIE_DEVCTL_NFE_REPORTING_EN | 85 PCIE_DEVCTL_FE_REPORTING_EN | 86 PCIE_DEVCTL_UR_REPORTING_EN; 87 88 /* PCI-Express Device Control Register */ 89 uint16_t pcie_devctl_default = PCIE_DEVCTL_RO_EN | 90 PCIE_DEVCTL_MAX_READ_REQ_512; 91 92 /* PCI-Express AER Root Control Register */ 93 #define PCIE_ROOT_SYS_ERR (PCIE_ROOTCTL_SYS_ERR_ON_CE_EN | \ 94 PCIE_ROOTCTL_SYS_ERR_ON_NFE_EN | \ 95 PCIE_ROOTCTL_SYS_ERR_ON_FE_EN) 96 97 ushort_t pcie_root_ctrl_default = 98 PCIE_ROOTCTL_SYS_ERR_ON_CE_EN | 99 PCIE_ROOTCTL_SYS_ERR_ON_NFE_EN | 100 PCIE_ROOTCTL_SYS_ERR_ON_FE_EN; 101 102 /* PCI-Express Root Error Command Register */ 103 ushort_t pcie_root_error_cmd_default = 104 PCIE_AER_RE_CMD_CE_REP_EN | 105 PCIE_AER_RE_CMD_NFE_REP_EN | 106 PCIE_AER_RE_CMD_FE_REP_EN; 107 108 /* ECRC settings in the PCIe AER Control Register */ 109 uint32_t pcie_ecrc_value = 110 PCIE_AER_CTL_ECRC_GEN_ENA | 111 PCIE_AER_CTL_ECRC_CHECK_ENA; 112 113 /* 114 * If a particular platform wants to disable certain errors such as UR/MA, 115 * instead of using #defines have the platform's PCIe Root Complex driver set 116 * these masks using the pcie_get_XXX_mask and pcie_set_XXX_mask functions. For 117 * x86 the closest thing to a PCIe root complex driver is NPE. For SPARC the 118 * closest PCIe root complex driver is PX. 119 * 120 * pcie_serr_disable_flag : disable SERR only (in RCR and command reg) x86 121 * systems may want to disable SERR in general. For root ports, enabling SERR 122 * causes NMIs which are not handled and results in a watchdog timeout error. 123 */ 124 uint32_t pcie_aer_uce_mask = 0; /* AER UE Mask */ 125 uint32_t pcie_aer_ce_mask = 0; /* AER CE Mask */ 126 uint32_t pcie_aer_suce_mask = 0; /* AER Secondary UE Mask */ 127 uint32_t pcie_serr_disable_flag = 0; /* Disable SERR */ 128 129 /* Default severities needed for eversholt. Error handling doesn't care */ 130 uint32_t pcie_aer_uce_severity = PCIE_AER_UCE_MTLP | PCIE_AER_UCE_RO | \ 131 PCIE_AER_UCE_FCP | PCIE_AER_UCE_SD | PCIE_AER_UCE_DLP | \ 132 PCIE_AER_UCE_TRAINING; 133 uint32_t pcie_aer_suce_severity = PCIE_AER_SUCE_SERR_ASSERT | \ 134 PCIE_AER_SUCE_UC_ADDR_ERR | PCIE_AER_SUCE_UC_ATTR_ERR | \ 135 PCIE_AER_SUCE_USC_MSG_DATA_ERR; 136 137 int pcie_max_mps = PCIE_DEVCTL_MAX_PAYLOAD_4096 >> 5; 138 int pcie_disable_ari = 0; 139 140 static void pcie_scan_mps(dev_info_t *rc_dip, dev_info_t *dip, 141 int *max_supported); 142 static int pcie_get_max_supported(dev_info_t *dip, void *arg); 143 static int pcie_map_phys(dev_info_t *dip, pci_regspec_t *phys_spec, 144 caddr_t *addrp, ddi_acc_handle_t *handlep); 145 static void pcie_unmap_phys(ddi_acc_handle_t *handlep, pci_regspec_t *ph); 146 147 /* 148 * modload support 149 */ 150 151 static struct modlmisc modlmisc = { 152 &mod_miscops, /* Type of module */ 153 "PCI Express Framework Module" 154 }; 155 156 static struct modlinkage modlinkage = { 157 MODREV_1, 158 (void *)&modlmisc, 159 NULL 160 }; 161 162 /* 163 * Global Variables needed for a non-atomic version of ddi_fm_ereport_post. 164 * Currently used to send the pci.fabric ereports whose payload depends on the 165 * type of PCI device it is being sent for. 166 */ 167 char *pcie_nv_buf; 168 nv_alloc_t *pcie_nvap; 169 nvlist_t *pcie_nvl; 170 171 int 172 _init(void) 173 { 174 int rval; 175 176 pcie_nv_buf = kmem_alloc(ERPT_DATA_SZ, KM_SLEEP); 177 pcie_nvap = fm_nva_xcreate(pcie_nv_buf, ERPT_DATA_SZ); 178 pcie_nvl = fm_nvlist_create(pcie_nvap); 179 180 rval = mod_install(&modlinkage); 181 return (rval); 182 } 183 184 int 185 _fini() 186 { 187 int rval; 188 189 fm_nvlist_destroy(pcie_nvl, FM_NVA_RETAIN); 190 fm_nva_xdestroy(pcie_nvap); 191 kmem_free(pcie_nv_buf, ERPT_DATA_SZ); 192 193 rval = mod_remove(&modlinkage); 194 return (rval); 195 } 196 197 int 198 _info(struct modinfo *modinfop) 199 { 200 return (mod_info(&modlinkage, modinfop)); 201 } 202 203 /* ARGSUSED */ 204 int 205 pcie_init(dev_info_t *dip, caddr_t arg) 206 { 207 int ret = DDI_SUCCESS; 208 209 /* 210 * Create a "devctl" minor node to support DEVCTL_DEVICE_* 211 * and DEVCTL_BUS_* ioctls to this bus. 212 */ 213 if ((ret = ddi_create_minor_node(dip, "devctl", S_IFCHR, 214 PCI_MINOR_NUM(ddi_get_instance(dip), PCI_DEVCTL_MINOR), 215 DDI_NT_NEXUS, 0)) != DDI_SUCCESS) { 216 PCIE_DBG("Failed to create devctl minor node for %s%d\n", 217 ddi_driver_name(dip), ddi_get_instance(dip)); 218 219 return (ret); 220 } 221 222 if ((ret = pcie_hp_init(dip, arg)) != DDI_SUCCESS) { 223 /* 224 * On a few x86 platforms, we observed unexpected hotplug 225 * initialization failures in recent years. Continue with 226 * a message printed because we don't want to stop PCI 227 * driver attach and system boot because of this hotplug 228 * initialization failure before we address all those issues. 229 */ 230 cmn_err(CE_WARN, "%s%d: Failed setting hotplug framework\n", 231 ddi_driver_name(dip), ddi_get_instance(dip)); 232 233 #if defined(__sparc) 234 ddi_remove_minor_node(dip, "devctl"); 235 236 return (ret); 237 #endif /* defined(__sparc) */ 238 } 239 240 if ((pcie_ari_supported(dip) == PCIE_ARI_FORW_SUPPORTED) && 241 (pcie_ari_is_enabled(dip) == PCIE_ARI_FORW_DISABLED)) 242 (void) pcicfg_configure(dip, 0, PCICFG_ALL_FUNC, 243 PCICFG_FLAG_ENABLE_ARI); 244 245 return (DDI_SUCCESS); 246 } 247 248 /* ARGSUSED */ 249 int 250 pcie_uninit(dev_info_t *dip) 251 { 252 int ret = DDI_SUCCESS; 253 254 if (pcie_ari_is_enabled(dip) == PCIE_ARI_FORW_ENABLED) 255 (void) pcie_ari_disable(dip); 256 257 if ((ret = pcie_hp_uninit(dip)) != DDI_SUCCESS) { 258 PCIE_DBG("Failed to uninitialize hotplug for %s%d\n", 259 ddi_driver_name(dip), ddi_get_instance(dip)); 260 261 return (ret); 262 } 263 264 ddi_remove_minor_node(dip, "devctl"); 265 266 return (ret); 267 } 268 269 /* ARGSUSED */ 270 int 271 pcie_intr(dev_info_t *dip) 272 { 273 return (pcie_hp_intr(dip)); 274 } 275 276 /* ARGSUSED */ 277 int 278 pcie_open(dev_info_t *dip, dev_t *devp, int flags, int otyp, cred_t *credp) 279 { 280 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 281 282 /* 283 * Make sure the open is for the right file type. 284 */ 285 if (otyp != OTYP_CHR) 286 return (EINVAL); 287 288 /* 289 * Handle the open by tracking the device state. 290 */ 291 if ((bus_p->bus_soft_state == PCI_SOFT_STATE_OPEN_EXCL) || 292 ((flags & FEXCL) && 293 (bus_p->bus_soft_state != PCI_SOFT_STATE_CLOSED))) { 294 return (EBUSY); 295 } 296 297 if (flags & FEXCL) 298 bus_p->bus_soft_state = PCI_SOFT_STATE_OPEN_EXCL; 299 else 300 bus_p->bus_soft_state = PCI_SOFT_STATE_OPEN; 301 302 return (0); 303 } 304 305 /* ARGSUSED */ 306 int 307 pcie_close(dev_info_t *dip, dev_t dev, int flags, int otyp, cred_t *credp) 308 { 309 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 310 311 if (otyp != OTYP_CHR) 312 return (EINVAL); 313 314 bus_p->bus_soft_state = PCI_SOFT_STATE_CLOSED; 315 316 return (0); 317 } 318 319 /* ARGSUSED */ 320 int 321 pcie_ioctl(dev_info_t *dip, dev_t dev, int cmd, intptr_t arg, int mode, 322 cred_t *credp, int *rvalp) 323 { 324 struct devctl_iocdata *dcp; 325 uint_t bus_state; 326 int rv = DDI_SUCCESS; 327 328 /* 329 * We can use the generic implementation for devctl ioctl 330 */ 331 switch (cmd) { 332 case DEVCTL_DEVICE_GETSTATE: 333 case DEVCTL_DEVICE_ONLINE: 334 case DEVCTL_DEVICE_OFFLINE: 335 case DEVCTL_BUS_GETSTATE: 336 return (ndi_devctl_ioctl(dip, cmd, arg, mode, 0)); 337 default: 338 break; 339 } 340 341 /* 342 * read devctl ioctl data 343 */ 344 if (ndi_dc_allochdl((void *)arg, &dcp) != NDI_SUCCESS) 345 return (EFAULT); 346 347 switch (cmd) { 348 case DEVCTL_BUS_QUIESCE: 349 if (ndi_get_bus_state(dip, &bus_state) == NDI_SUCCESS) 350 if (bus_state == BUS_QUIESCED) 351 break; 352 (void) ndi_set_bus_state(dip, BUS_QUIESCED); 353 break; 354 case DEVCTL_BUS_UNQUIESCE: 355 if (ndi_get_bus_state(dip, &bus_state) == NDI_SUCCESS) 356 if (bus_state == BUS_ACTIVE) 357 break; 358 (void) ndi_set_bus_state(dip, BUS_ACTIVE); 359 break; 360 case DEVCTL_BUS_RESET: 361 case DEVCTL_BUS_RESETALL: 362 case DEVCTL_DEVICE_RESET: 363 rv = ENOTSUP; 364 break; 365 default: 366 rv = ENOTTY; 367 } 368 369 ndi_dc_freehdl(dcp); 370 return (rv); 371 } 372 373 /* ARGSUSED */ 374 int 375 pcie_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 376 int flags, char *name, caddr_t valuep, int *lengthp) 377 { 378 if (dev == DDI_DEV_T_ANY) 379 goto skip; 380 381 if (PCIE_IS_HOTPLUG_CAPABLE(dip) && 382 strcmp(name, "pci-occupant") == 0) { 383 int pci_dev = PCI_MINOR_NUM_TO_PCI_DEVNUM(getminor(dev)); 384 385 pcie_hp_create_occupant_props(dip, dev, pci_dev); 386 } 387 388 skip: 389 return (ddi_prop_op(dev, dip, prop_op, flags, name, valuep, lengthp)); 390 } 391 392 /* 393 * PCI-Express child device initialization. 394 * This function enables generic pci-express interrupts and error 395 * handling. 396 * 397 * @param pdip root dip (root nexus's dip) 398 * @param cdip child's dip (device's dip) 399 * @return DDI_SUCCESS or DDI_FAILURE 400 */ 401 /* ARGSUSED */ 402 int 403 pcie_initchild(dev_info_t *cdip) 404 { 405 uint16_t tmp16, reg16; 406 pcie_bus_t *bus_p; 407 408 bus_p = PCIE_DIP2BUS(cdip); 409 if (bus_p == NULL) { 410 PCIE_DBG("%s: BUS not found.\n", 411 ddi_driver_name(cdip)); 412 413 return (DDI_FAILURE); 414 } 415 416 /* Clear the device's status register */ 417 reg16 = PCIE_GET(16, bus_p, PCI_CONF_STAT); 418 PCIE_PUT(16, bus_p, PCI_CONF_STAT, reg16); 419 420 /* Setup the device's command register */ 421 reg16 = PCIE_GET(16, bus_p, PCI_CONF_COMM); 422 tmp16 = (reg16 & pcie_command_default_fw) | pcie_command_default; 423 424 #if defined(__i386) || defined(__amd64) 425 boolean_t empty_io_range = B_FALSE; 426 boolean_t empty_mem_range = B_FALSE; 427 /* 428 * Check for empty IO and Mem ranges on bridges. If so disable IO/Mem 429 * access as it can cause a hang if enabled. 430 */ 431 pcie_check_io_mem_range(bus_p->bus_cfg_hdl, &empty_io_range, 432 &empty_mem_range); 433 if ((empty_io_range == B_TRUE) && 434 (pcie_command_default & PCI_COMM_IO)) { 435 tmp16 &= ~PCI_COMM_IO; 436 PCIE_DBG("No I/O range found for %s, bdf 0x%x\n", 437 ddi_driver_name(cdip), bus_p->bus_bdf); 438 } 439 if ((empty_mem_range == B_TRUE) && 440 (pcie_command_default & PCI_COMM_MAE)) { 441 tmp16 &= ~PCI_COMM_MAE; 442 PCIE_DBG("No Mem range found for %s, bdf 0x%x\n", 443 ddi_driver_name(cdip), bus_p->bus_bdf); 444 } 445 #endif /* defined(__i386) || defined(__amd64) */ 446 447 if (pcie_serr_disable_flag && PCIE_IS_PCIE(bus_p)) 448 tmp16 &= ~PCI_COMM_SERR_ENABLE; 449 450 PCIE_PUT(16, bus_p, PCI_CONF_COMM, tmp16); 451 PCIE_DBG_CFG(cdip, bus_p, "COMMAND", 16, PCI_CONF_COMM, reg16); 452 453 /* 454 * If the device has a bus control register then program it 455 * based on the settings in the command register. 456 */ 457 if (PCIE_IS_BDG(bus_p)) { 458 /* Clear the device's secondary status register */ 459 reg16 = PCIE_GET(16, bus_p, PCI_BCNF_SEC_STATUS); 460 PCIE_PUT(16, bus_p, PCI_BCNF_SEC_STATUS, reg16); 461 462 /* Setup the device's secondary command register */ 463 reg16 = PCIE_GET(16, bus_p, PCI_BCNF_BCNTRL); 464 tmp16 = (reg16 & pcie_bdg_command_default_fw); 465 466 tmp16 |= PCI_BCNF_BCNTRL_SERR_ENABLE; 467 /* 468 * Workaround for this Nvidia bridge. Don't enable the SERR 469 * enable bit in the bridge control register as it could lead to 470 * bogus NMIs. 471 */ 472 if (bus_p->bus_dev_ven_id == 0x037010DE) 473 tmp16 &= ~PCI_BCNF_BCNTRL_SERR_ENABLE; 474 475 if (pcie_command_default & PCI_COMM_PARITY_DETECT) 476 tmp16 |= PCI_BCNF_BCNTRL_PARITY_ENABLE; 477 478 /* 479 * Enable Master Abort Mode only if URs have not been masked. 480 * For PCI and PCIe-PCI bridges, enabling this bit causes a 481 * Master Aborts/UR to be forwarded as a UR/TA or SERR. If this 482 * bit is masked, posted requests are dropped and non-posted 483 * requests are returned with -1. 484 */ 485 if (pcie_aer_uce_mask & PCIE_AER_UCE_UR) 486 tmp16 &= ~PCI_BCNF_BCNTRL_MAST_AB_MODE; 487 else 488 tmp16 |= PCI_BCNF_BCNTRL_MAST_AB_MODE; 489 PCIE_PUT(16, bus_p, PCI_BCNF_BCNTRL, tmp16); 490 PCIE_DBG_CFG(cdip, bus_p, "SEC CMD", 16, PCI_BCNF_BCNTRL, 491 reg16); 492 } 493 494 if (PCIE_IS_PCIE(bus_p)) { 495 /* Setup PCIe device control register */ 496 reg16 = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL); 497 tmp16 = pcie_devctl_default; 498 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, tmp16); 499 PCIE_DBG_CAP(cdip, bus_p, "DEVCTL", 16, PCIE_DEVCTL, reg16); 500 501 /* Enable PCIe errors */ 502 pcie_enable_errors(cdip); 503 } 504 505 bus_p->bus_ari = B_FALSE; 506 if ((pcie_ari_is_enabled(ddi_get_parent(cdip)) 507 == PCIE_ARI_FORW_ENABLED) && (pcie_ari_device(cdip) 508 == PCIE_ARI_DEVICE)) { 509 bus_p->bus_ari = B_TRUE; 510 } 511 512 if (pcie_initchild_mps(cdip) == DDI_FAILURE) 513 return (DDI_FAILURE); 514 515 return (DDI_SUCCESS); 516 } 517 518 #define PCIE_ZALLOC(data) kmem_zalloc(sizeof (data), KM_SLEEP) 519 static void 520 pcie_init_pfd(dev_info_t *dip) 521 { 522 pf_data_t *pfd_p = PCIE_ZALLOC(pf_data_t); 523 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 524 525 PCIE_DIP2PFD(dip) = pfd_p; 526 527 pfd_p->pe_bus_p = bus_p; 528 pfd_p->pe_severity_flags = 0; 529 pfd_p->pe_lock = B_FALSE; 530 pfd_p->pe_valid = B_FALSE; 531 532 /* Allocate the root fault struct for both RC and RP */ 533 if (PCIE_IS_ROOT(bus_p)) { 534 PCIE_ROOT_FAULT(pfd_p) = PCIE_ZALLOC(pf_root_fault_t); 535 PCIE_ROOT_FAULT(pfd_p)->scan_bdf = PCIE_INVALID_BDF; 536 } 537 538 PCI_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pci_err_regs_t); 539 540 if (PCIE_IS_BDG(bus_p)) 541 PCI_BDG_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pci_bdg_err_regs_t); 542 543 if (PCIE_IS_PCIE(bus_p)) { 544 PCIE_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_err_regs_t); 545 546 if (PCIE_IS_RP(bus_p)) 547 PCIE_RP_REG(pfd_p) = 548 PCIE_ZALLOC(pf_pcie_rp_err_regs_t); 549 550 PCIE_ADV_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_adv_err_regs_t); 551 PCIE_ADV_REG(pfd_p)->pcie_ue_tgt_bdf = PCIE_INVALID_BDF; 552 553 if (PCIE_IS_RP(bus_p)) { 554 PCIE_ADV_RP_REG(pfd_p) = 555 PCIE_ZALLOC(pf_pcie_adv_rp_err_regs_t); 556 PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ce_src_id = 557 PCIE_INVALID_BDF; 558 PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ue_src_id = 559 PCIE_INVALID_BDF; 560 } else if (PCIE_IS_PCIE_BDG(bus_p)) { 561 PCIE_ADV_BDG_REG(pfd_p) = 562 PCIE_ZALLOC(pf_pcie_adv_bdg_err_regs_t); 563 PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_bdf = 564 PCIE_INVALID_BDF; 565 } 566 567 if (PCIE_IS_PCIE_BDG(bus_p) && PCIE_IS_PCIX(bus_p)) { 568 PCIX_BDG_ERR_REG(pfd_p) = 569 PCIE_ZALLOC(pf_pcix_bdg_err_regs_t); 570 571 if (PCIX_ECC_VERSION_CHECK(bus_p)) { 572 PCIX_BDG_ECC_REG(pfd_p, 0) = 573 PCIE_ZALLOC(pf_pcix_ecc_regs_t); 574 PCIX_BDG_ECC_REG(pfd_p, 1) = 575 PCIE_ZALLOC(pf_pcix_ecc_regs_t); 576 } 577 } 578 } else if (PCIE_IS_PCIX(bus_p)) { 579 if (PCIE_IS_BDG(bus_p)) { 580 PCIX_BDG_ERR_REG(pfd_p) = 581 PCIE_ZALLOC(pf_pcix_bdg_err_regs_t); 582 583 if (PCIX_ECC_VERSION_CHECK(bus_p)) { 584 PCIX_BDG_ECC_REG(pfd_p, 0) = 585 PCIE_ZALLOC(pf_pcix_ecc_regs_t); 586 PCIX_BDG_ECC_REG(pfd_p, 1) = 587 PCIE_ZALLOC(pf_pcix_ecc_regs_t); 588 } 589 } else { 590 PCIX_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pcix_err_regs_t); 591 592 if (PCIX_ECC_VERSION_CHECK(bus_p)) 593 PCIX_ECC_REG(pfd_p) = 594 PCIE_ZALLOC(pf_pcix_ecc_regs_t); 595 } 596 } 597 } 598 599 static void 600 pcie_fini_pfd(dev_info_t *dip) 601 { 602 pf_data_t *pfd_p = PCIE_DIP2PFD(dip); 603 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 604 605 if (PCIE_IS_PCIE(bus_p)) { 606 if (PCIE_IS_PCIE_BDG(bus_p) && PCIE_IS_PCIX(bus_p)) { 607 if (PCIX_ECC_VERSION_CHECK(bus_p)) { 608 kmem_free(PCIX_BDG_ECC_REG(pfd_p, 0), 609 sizeof (pf_pcix_ecc_regs_t)); 610 kmem_free(PCIX_BDG_ECC_REG(pfd_p, 1), 611 sizeof (pf_pcix_ecc_regs_t)); 612 } 613 614 kmem_free(PCIX_BDG_ERR_REG(pfd_p), 615 sizeof (pf_pcix_bdg_err_regs_t)); 616 } 617 618 if (PCIE_IS_RP(bus_p)) 619 kmem_free(PCIE_ADV_RP_REG(pfd_p), 620 sizeof (pf_pcie_adv_rp_err_regs_t)); 621 else if (PCIE_IS_PCIE_BDG(bus_p)) 622 kmem_free(PCIE_ADV_BDG_REG(pfd_p), 623 sizeof (pf_pcie_adv_bdg_err_regs_t)); 624 625 kmem_free(PCIE_ADV_REG(pfd_p), 626 sizeof (pf_pcie_adv_err_regs_t)); 627 628 if (PCIE_IS_RP(bus_p)) 629 kmem_free(PCIE_RP_REG(pfd_p), 630 sizeof (pf_pcie_rp_err_regs_t)); 631 632 kmem_free(PCIE_ERR_REG(pfd_p), sizeof (pf_pcie_err_regs_t)); 633 } else if (PCIE_IS_PCIX(bus_p)) { 634 if (PCIE_IS_BDG(bus_p)) { 635 if (PCIX_ECC_VERSION_CHECK(bus_p)) { 636 kmem_free(PCIX_BDG_ECC_REG(pfd_p, 0), 637 sizeof (pf_pcix_ecc_regs_t)); 638 kmem_free(PCIX_BDG_ECC_REG(pfd_p, 1), 639 sizeof (pf_pcix_ecc_regs_t)); 640 } 641 642 kmem_free(PCIX_BDG_ERR_REG(pfd_p), 643 sizeof (pf_pcix_bdg_err_regs_t)); 644 } else { 645 if (PCIX_ECC_VERSION_CHECK(bus_p)) 646 kmem_free(PCIX_ECC_REG(pfd_p), 647 sizeof (pf_pcix_ecc_regs_t)); 648 649 kmem_free(PCIX_ERR_REG(pfd_p), 650 sizeof (pf_pcix_err_regs_t)); 651 } 652 } 653 654 if (PCIE_IS_BDG(bus_p)) 655 kmem_free(PCI_BDG_ERR_REG(pfd_p), 656 sizeof (pf_pci_bdg_err_regs_t)); 657 658 kmem_free(PCI_ERR_REG(pfd_p), sizeof (pf_pci_err_regs_t)); 659 660 if (PCIE_IS_ROOT(bus_p)) 661 kmem_free(PCIE_ROOT_FAULT(pfd_p), sizeof (pf_root_fault_t)); 662 663 kmem_free(PCIE_DIP2PFD(dip), sizeof (pf_data_t)); 664 665 PCIE_DIP2PFD(dip) = NULL; 666 } 667 668 669 /* 670 * Special functions to allocate pf_data_t's for PCIe root complexes. 671 * Note: Root Complex not Root Port 672 */ 673 void 674 pcie_rc_init_pfd(dev_info_t *dip, pf_data_t *pfd_p) 675 { 676 pfd_p->pe_bus_p = PCIE_DIP2DOWNBUS(dip); 677 pfd_p->pe_severity_flags = 0; 678 pfd_p->pe_lock = B_FALSE; 679 pfd_p->pe_valid = B_FALSE; 680 681 PCIE_ROOT_FAULT(pfd_p) = PCIE_ZALLOC(pf_root_fault_t); 682 PCIE_ROOT_FAULT(pfd_p)->scan_bdf = PCIE_INVALID_BDF; 683 PCI_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pci_err_regs_t); 684 PCI_BDG_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pci_bdg_err_regs_t); 685 PCIE_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_err_regs_t); 686 PCIE_RP_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_rp_err_regs_t); 687 PCIE_ADV_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_adv_err_regs_t); 688 PCIE_ADV_RP_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_adv_rp_err_regs_t); 689 690 PCIE_ADV_REG(pfd_p)->pcie_ue_sev = pcie_aer_uce_severity; 691 } 692 693 void 694 pcie_rc_fini_pfd(pf_data_t *pfd_p) 695 { 696 kmem_free(PCIE_ADV_RP_REG(pfd_p), sizeof (pf_pcie_adv_rp_err_regs_t)); 697 kmem_free(PCIE_ADV_REG(pfd_p), sizeof (pf_pcie_adv_err_regs_t)); 698 kmem_free(PCIE_RP_REG(pfd_p), sizeof (pf_pcie_rp_err_regs_t)); 699 kmem_free(PCIE_ERR_REG(pfd_p), sizeof (pf_pcie_err_regs_t)); 700 kmem_free(PCI_BDG_ERR_REG(pfd_p), sizeof (pf_pci_bdg_err_regs_t)); 701 kmem_free(PCI_ERR_REG(pfd_p), sizeof (pf_pci_err_regs_t)); 702 kmem_free(PCIE_ROOT_FAULT(pfd_p), sizeof (pf_root_fault_t)); 703 } 704 705 void 706 pcie_rc_init_bus(dev_info_t *dip) 707 { 708 pcie_bus_t *bus_p; 709 710 bus_p = (pcie_bus_t *)kmem_zalloc(sizeof (pcie_bus_t), KM_SLEEP); 711 bus_p->bus_dip = dip; 712 bus_p->bus_dev_type = PCIE_PCIECAP_DEV_TYPE_RC_PSEUDO; 713 bus_p->bus_hdr_type = PCI_HEADER_ONE; 714 715 /* Fake that there are AER logs */ 716 bus_p->bus_aer_off = (uint16_t)-1; 717 718 /* Needed only for handle lookup */ 719 bus_p->bus_fm_flags |= PF_FM_READY; 720 721 ndi_set_bus_private(dip, B_FALSE, DEVI_PORT_TYPE_PCI, bus_p); 722 } 723 724 void 725 pcie_rc_fini_bus(dev_info_t *dip) 726 { 727 pcie_bus_t *bus_p = (pcie_bus_t *)ndi_get_bus_private(dip, B_FALSE); 728 729 ndi_set_bus_private(dip, B_FALSE, NULL, NULL); 730 kmem_free(bus_p, sizeof (pcie_bus_t)); 731 } 732 733 /* 734 * Initialize PCIe Bus Private Data 735 * 736 * PCIe Bus Private Data contains commonly used PCI/PCIe information and offsets 737 * to key registers. 738 */ 739 pcie_bus_t * 740 pcie_init_bus(dev_info_t *cdip) 741 { 742 pcie_bus_t *bus_p = 0; 743 ddi_acc_handle_t eh = NULL; 744 int range_size; 745 dev_info_t *pdip; 746 const char *errstr = NULL; 747 748 ASSERT(PCIE_DIP2UPBUS(cdip) == NULL); 749 750 /* allocate memory for pcie bus data */ 751 bus_p = kmem_zalloc(sizeof (pcie_bus_t), KM_SLEEP); 752 753 /* Set back pointer to dip */ 754 bus_p->bus_dip = cdip; 755 756 /* Create an config access special to error handling */ 757 if (pci_config_setup(cdip, &eh) != DDI_SUCCESS) { 758 errstr = "Cannot setup config access"; 759 goto fail; 760 } 761 762 bus_p->bus_cfg_hdl = eh; 763 bus_p->bus_fm_flags = 0; 764 bus_p->bus_soft_state = PCI_SOFT_STATE_CLOSED; 765 766 /* get device's bus/dev/function number */ 767 if (pcie_get_bdf_from_dip(cdip, &bus_p->bus_bdf) != DDI_SUCCESS) { 768 errstr = "Cannot get device BDF"; 769 goto fail; 770 } 771 772 /* Save the Vendor Id Device Id */ 773 bus_p->bus_dev_ven_id = PCIE_GET(32, bus_p, PCI_CONF_VENID); 774 bus_p->bus_rev_id = PCIE_GET(8, bus_p, PCI_CONF_REVID); 775 776 /* Save the Header Type */ 777 bus_p->bus_hdr_type = PCIE_GET(8, bus_p, PCI_CONF_HEADER); 778 bus_p->bus_hdr_type &= PCI_HEADER_TYPE_M; 779 780 /* Figure out the device type and all the relavant capability offsets */ 781 if ((PCI_CAP_LOCATE(eh, PCI_CAP_ID_PCI_E, &bus_p->bus_pcie_off)) 782 != DDI_FAILURE) { 783 bus_p->bus_dev_type = PCI_CAP_GET16(eh, NULL, 784 bus_p->bus_pcie_off, PCIE_PCIECAP) & 785 PCIE_PCIECAP_DEV_TYPE_MASK; 786 787 if (PCI_CAP_LOCATE(eh, PCI_CAP_XCFG_SPC(PCIE_EXT_CAP_ID_AER), 788 &bus_p->bus_aer_off) != DDI_SUCCESS) 789 bus_p->bus_aer_off = NULL; 790 791 /* Check and save PCIe hotplug capability information */ 792 if ((PCIE_IS_RP(bus_p) || PCIE_IS_SWD(bus_p)) && 793 (PCI_CAP_GET16(eh, NULL, bus_p->bus_pcie_off, PCIE_PCIECAP) 794 & PCIE_PCIECAP_SLOT_IMPL) && 795 (PCI_CAP_GET32(eh, NULL, bus_p->bus_pcie_off, PCIE_SLOTCAP) 796 & PCIE_SLOTCAP_HP_CAPABLE)) 797 bus_p->bus_hp_sup_modes |= PCIE_NATIVE_HP_MODE; 798 } else { 799 bus_p->bus_pcie_off = NULL; 800 bus_p->bus_dev_type = PCIE_PCIECAP_DEV_TYPE_PCI_PSEUDO; 801 } 802 803 if ((PCI_CAP_LOCATE(eh, PCI_CAP_ID_PCIX, &bus_p->bus_pcix_off)) 804 != DDI_FAILURE) { 805 if (PCIE_IS_BDG(bus_p)) 806 bus_p->bus_ecc_ver = PCIX_CAP_GET(16, bus_p, 807 PCI_PCIX_SEC_STATUS) & PCI_PCIX_VER_MASK; 808 else 809 bus_p->bus_ecc_ver = PCIX_CAP_GET(16, bus_p, 810 PCI_PCIX_COMMAND) & PCI_PCIX_VER_MASK; 811 } else { 812 bus_p->bus_pcix_off = NULL; 813 bus_p->bus_ecc_ver = NULL; 814 } 815 816 /* Save the Range information if device is a switch/bridge */ 817 if (PCIE_IS_BDG(bus_p)) { 818 /* Check and save PCI hotplug (SHPC) capability information */ 819 if ((PCI_CAP_LOCATE(eh, PCI_CAP_ID_PCI_HOTPLUG, 820 &bus_p->bus_pci_hp_off)) == DDI_SUCCESS) 821 bus_p->bus_hp_sup_modes |= PCIE_PCI_HP_MODE; 822 823 /* get "bus_range" property */ 824 range_size = sizeof (pci_bus_range_t); 825 if (ddi_getlongprop_buf(DDI_DEV_T_ANY, cdip, DDI_PROP_DONTPASS, 826 "bus-range", (caddr_t)&bus_p->bus_bus_range, &range_size) 827 != DDI_PROP_SUCCESS) { 828 errstr = "Cannot find \"bus-range\" property"; 829 goto fail; 830 } 831 832 /* get secondary bus number */ 833 bus_p->bus_bdg_secbus = PCIE_GET(8, bus_p, PCI_BCNF_SECBUS); 834 835 /* Get "ranges" property */ 836 if (ddi_getlongprop(DDI_DEV_T_ANY, cdip, DDI_PROP_DONTPASS, 837 "ranges", (caddr_t)&bus_p->bus_addr_ranges, 838 &bus_p->bus_addr_entries) != DDI_PROP_SUCCESS) 839 bus_p->bus_addr_entries = 0; 840 bus_p->bus_addr_entries /= sizeof (ppb_ranges_t); 841 } 842 843 /* save "assigned-addresses" property array, ignore failues */ 844 if (ddi_getlongprop(DDI_DEV_T_ANY, cdip, DDI_PROP_DONTPASS, 845 "assigned-addresses", (caddr_t)&bus_p->bus_assigned_addr, 846 &bus_p->bus_assigned_entries) == DDI_PROP_SUCCESS) 847 bus_p->bus_assigned_entries /= sizeof (pci_regspec_t); 848 else 849 bus_p->bus_assigned_entries = 0; 850 851 /* save RP dip and RP bdf */ 852 if (PCIE_IS_RP(bus_p)) { 853 bus_p->bus_rp_dip = cdip; 854 bus_p->bus_rp_bdf = bus_p->bus_bdf; 855 } else { 856 for (pdip = ddi_get_parent(cdip); pdip; 857 pdip = ddi_get_parent(pdip)) { 858 pcie_bus_t *parent_bus_p = PCIE_DIP2BUS(pdip); 859 860 /* 861 * When debugging be aware that some NVIDIA x86 862 * architectures have 2 nodes for each RP, One at Bus 863 * 0x0 and one at Bus 0x80. The requester is from Bus 864 * 0x80 865 */ 866 if (PCIE_IS_ROOT(parent_bus_p)) { 867 bus_p->bus_rp_dip = pdip; 868 bus_p->bus_rp_bdf = parent_bus_p->bus_bdf; 869 break; 870 } 871 } 872 } 873 874 ndi_set_bus_private(cdip, B_TRUE, DEVI_PORT_TYPE_PCI, (void *)bus_p); 875 876 if (PCIE_IS_HOTPLUG_CAPABLE(cdip)) 877 (void) ndi_prop_create_boolean(DDI_DEV_T_NONE, cdip, 878 "hotplug-capable"); 879 880 pcie_init_pfd(cdip); 881 882 bus_p->bus_mps = 0; 883 884 pcie_init_plat(cdip); 885 886 PCIE_DBG("Add %s(dip 0x%p, bdf 0x%x, secbus 0x%x)\n", 887 ddi_driver_name(cdip), (void *)cdip, bus_p->bus_bdf, 888 bus_p->bus_bdg_secbus); 889 #ifdef DEBUG 890 pcie_print_bus(bus_p); 891 #endif 892 893 return (bus_p); 894 fail: 895 cmn_err(CE_WARN, "PCIE init err info failed BDF 0x%x:%s\n", 896 bus_p->bus_bdf, errstr); 897 if (eh) 898 pci_config_teardown(&eh); 899 kmem_free(bus_p, sizeof (pcie_bus_t)); 900 return (NULL); 901 } 902 903 int 904 pcie_postattach_child(dev_info_t *cdip) 905 { 906 pcie_bus_t *bus_p = PCIE_DIP2BUS(cdip); 907 908 if (!bus_p) 909 return (DDI_FAILURE); 910 911 return (pcie_enable_ce(cdip)); 912 } 913 914 /* 915 * PCI-Express child device de-initialization. 916 * This function disables generic pci-express interrupts and error 917 * handling. 918 */ 919 void 920 pcie_uninitchild(dev_info_t *cdip) 921 { 922 pcie_disable_errors(cdip); 923 pcie_fini_bus(cdip); 924 } 925 926 void 927 pcie_fini_bus(dev_info_t *cdip) 928 { 929 pcie_bus_t *bus_p; 930 931 pcie_fini_plat(cdip); 932 pcie_fini_pfd(cdip); 933 934 bus_p = PCIE_DIP2UPBUS(cdip); 935 ASSERT(bus_p); 936 937 if (PCIE_IS_HOTPLUG_CAPABLE(cdip)) 938 (void) ndi_prop_remove(DDI_DEV_T_NONE, cdip, "hotplug-capable"); 939 940 pci_config_teardown(&bus_p->bus_cfg_hdl); 941 ndi_set_bus_private(cdip, B_TRUE, NULL, NULL); 942 kmem_free(bus_p->bus_assigned_addr, 943 (sizeof (pci_regspec_t) * bus_p->bus_assigned_entries)); 944 kmem_free(bus_p->bus_addr_ranges, 945 (sizeof (ppb_ranges_t) * bus_p->bus_addr_entries)); 946 947 kmem_free(bus_p, sizeof (pcie_bus_t)); 948 } 949 950 void 951 pcie_enable_errors(dev_info_t *dip) 952 { 953 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 954 uint16_t reg16, tmp16; 955 uint32_t reg32, tmp32; 956 957 ASSERT(bus_p); 958 959 /* 960 * Clear any pending errors 961 */ 962 pcie_clear_errors(dip); 963 964 if (!PCIE_IS_PCIE(bus_p)) 965 return; 966 967 /* 968 * Enable Baseline Error Handling but leave CE reporting off (poweron 969 * default). 970 */ 971 if ((reg16 = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL)) != 972 PCI_CAP_EINVAL16) { 973 tmp16 = (reg16 & (PCIE_DEVCTL_MAX_READ_REQ_MASK | 974 PCIE_DEVCTL_MAX_PAYLOAD_MASK)) | 975 (pcie_devctl_default & ~(PCIE_DEVCTL_MAX_READ_REQ_MASK | 976 PCIE_DEVCTL_MAX_PAYLOAD_MASK)) | 977 (pcie_base_err_default & (~PCIE_DEVCTL_CE_REPORTING_EN)); 978 979 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, tmp16); 980 PCIE_DBG_CAP(dip, bus_p, "DEVCTL", 16, PCIE_DEVCTL, reg16); 981 } 982 983 /* Enable Root Port Baseline Error Receiving */ 984 if (PCIE_IS_ROOT(bus_p) && 985 (reg16 = PCIE_CAP_GET(16, bus_p, PCIE_ROOTCTL)) != 986 PCI_CAP_EINVAL16) { 987 988 tmp16 = pcie_serr_disable_flag ? 989 (pcie_root_ctrl_default & ~PCIE_ROOT_SYS_ERR) : 990 pcie_root_ctrl_default; 991 PCIE_CAP_PUT(16, bus_p, PCIE_ROOTCTL, tmp16); 992 PCIE_DBG_CAP(dip, bus_p, "ROOT DEVCTL", 16, PCIE_ROOTCTL, 993 reg16); 994 } 995 996 /* 997 * Enable PCI-Express Advanced Error Handling if Exists 998 */ 999 if (!PCIE_HAS_AER(bus_p)) 1000 return; 1001 1002 /* Set Uncorrectable Severity */ 1003 if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_UCE_SERV)) != 1004 PCI_CAP_EINVAL32) { 1005 tmp32 = pcie_aer_uce_severity; 1006 1007 PCIE_AER_PUT(32, bus_p, PCIE_AER_UCE_SERV, tmp32); 1008 PCIE_DBG_AER(dip, bus_p, "AER UCE SEV", 32, PCIE_AER_UCE_SERV, 1009 reg32); 1010 } 1011 1012 /* Enable Uncorrectable errors */ 1013 if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_UCE_MASK)) != 1014 PCI_CAP_EINVAL32) { 1015 tmp32 = pcie_aer_uce_mask; 1016 1017 PCIE_AER_PUT(32, bus_p, PCIE_AER_UCE_MASK, tmp32); 1018 PCIE_DBG_AER(dip, bus_p, "AER UCE MASK", 32, PCIE_AER_UCE_MASK, 1019 reg32); 1020 } 1021 1022 /* Enable ECRC generation and checking */ 1023 if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_CTL)) != 1024 PCI_CAP_EINVAL32) { 1025 tmp32 = reg32 | pcie_ecrc_value; 1026 PCIE_AER_PUT(32, bus_p, PCIE_AER_CTL, tmp32); 1027 PCIE_DBG_AER(dip, bus_p, "AER CTL", 32, PCIE_AER_CTL, reg32); 1028 } 1029 1030 /* Enable Secondary Uncorrectable errors if this is a bridge */ 1031 if (!PCIE_IS_PCIE_BDG(bus_p)) 1032 goto root; 1033 1034 /* Set Uncorrectable Severity */ 1035 if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_SUCE_SERV)) != 1036 PCI_CAP_EINVAL32) { 1037 tmp32 = pcie_aer_suce_severity; 1038 1039 PCIE_AER_PUT(32, bus_p, PCIE_AER_SUCE_SERV, tmp32); 1040 PCIE_DBG_AER(dip, bus_p, "AER SUCE SEV", 32, PCIE_AER_SUCE_SERV, 1041 reg32); 1042 } 1043 1044 if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_SUCE_MASK)) != 1045 PCI_CAP_EINVAL32) { 1046 PCIE_AER_PUT(32, bus_p, PCIE_AER_SUCE_MASK, pcie_aer_suce_mask); 1047 PCIE_DBG_AER(dip, bus_p, "AER SUCE MASK", 32, 1048 PCIE_AER_SUCE_MASK, reg32); 1049 } 1050 1051 root: 1052 /* 1053 * Enable Root Control this is a Root device 1054 */ 1055 if (!PCIE_IS_ROOT(bus_p)) 1056 return; 1057 1058 if ((reg16 = PCIE_AER_GET(16, bus_p, PCIE_AER_RE_CMD)) != 1059 PCI_CAP_EINVAL16) { 1060 PCIE_AER_PUT(16, bus_p, PCIE_AER_RE_CMD, 1061 pcie_root_error_cmd_default); 1062 PCIE_DBG_AER(dip, bus_p, "AER Root Err Cmd", 16, 1063 PCIE_AER_RE_CMD, reg16); 1064 } 1065 } 1066 1067 /* 1068 * This function is used for enabling CE reporting and setting the AER CE mask. 1069 * When called from outside the pcie module it should always be preceded by 1070 * a call to pcie_enable_errors. 1071 */ 1072 int 1073 pcie_enable_ce(dev_info_t *dip) 1074 { 1075 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 1076 uint16_t device_sts, device_ctl; 1077 uint32_t tmp_pcie_aer_ce_mask; 1078 1079 if (!PCIE_IS_PCIE(bus_p)) 1080 return (DDI_SUCCESS); 1081 1082 /* 1083 * The "pcie_ce_mask" property is used to control both the CE reporting 1084 * enable field in the device control register and the AER CE mask. We 1085 * leave CE reporting disabled if pcie_ce_mask is set to -1. 1086 */ 1087 1088 tmp_pcie_aer_ce_mask = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, dip, 1089 DDI_PROP_DONTPASS, "pcie_ce_mask", pcie_aer_ce_mask); 1090 1091 if (tmp_pcie_aer_ce_mask == (uint32_t)-1) { 1092 /* 1093 * Nothing to do since CE reporting has already been disabled. 1094 */ 1095 return (DDI_SUCCESS); 1096 } 1097 1098 if (PCIE_HAS_AER(bus_p)) { 1099 /* Enable AER CE */ 1100 PCIE_AER_PUT(32, bus_p, PCIE_AER_CE_MASK, tmp_pcie_aer_ce_mask); 1101 PCIE_DBG_AER(dip, bus_p, "AER CE MASK", 32, PCIE_AER_CE_MASK, 1102 0); 1103 1104 /* Clear any pending AER CE errors */ 1105 PCIE_AER_PUT(32, bus_p, PCIE_AER_CE_STS, -1); 1106 } 1107 1108 /* clear any pending CE errors */ 1109 if ((device_sts = PCIE_CAP_GET(16, bus_p, PCIE_DEVSTS)) != 1110 PCI_CAP_EINVAL16) 1111 PCIE_CAP_PUT(16, bus_p, PCIE_DEVSTS, 1112 device_sts & (~PCIE_DEVSTS_CE_DETECTED)); 1113 1114 /* Enable CE reporting */ 1115 device_ctl = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL); 1116 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, 1117 (device_ctl & (~PCIE_DEVCTL_ERR_MASK)) | pcie_base_err_default); 1118 PCIE_DBG_CAP(dip, bus_p, "DEVCTL", 16, PCIE_DEVCTL, device_ctl); 1119 1120 return (DDI_SUCCESS); 1121 } 1122 1123 /* ARGSUSED */ 1124 void 1125 pcie_disable_errors(dev_info_t *dip) 1126 { 1127 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 1128 uint16_t device_ctl; 1129 uint32_t aer_reg; 1130 1131 if (!PCIE_IS_PCIE(bus_p)) 1132 return; 1133 1134 /* 1135 * Disable PCI-Express Baseline Error Handling 1136 */ 1137 device_ctl = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL); 1138 device_ctl &= ~PCIE_DEVCTL_ERR_MASK; 1139 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, device_ctl); 1140 1141 /* 1142 * Disable PCI-Express Advanced Error Handling if Exists 1143 */ 1144 if (!PCIE_HAS_AER(bus_p)) 1145 goto root; 1146 1147 /* Disable Uncorrectable errors */ 1148 PCIE_AER_PUT(32, bus_p, PCIE_AER_UCE_MASK, PCIE_AER_UCE_BITS); 1149 1150 /* Disable Correctable errors */ 1151 PCIE_AER_PUT(32, bus_p, PCIE_AER_CE_MASK, PCIE_AER_CE_BITS); 1152 1153 /* Disable ECRC generation and checking */ 1154 if ((aer_reg = PCIE_AER_GET(32, bus_p, PCIE_AER_CTL)) != 1155 PCI_CAP_EINVAL32) { 1156 aer_reg &= ~(PCIE_AER_CTL_ECRC_GEN_ENA | 1157 PCIE_AER_CTL_ECRC_CHECK_ENA); 1158 1159 PCIE_AER_PUT(32, bus_p, PCIE_AER_CTL, aer_reg); 1160 } 1161 /* 1162 * Disable Secondary Uncorrectable errors if this is a bridge 1163 */ 1164 if (!PCIE_IS_PCIE_BDG(bus_p)) 1165 goto root; 1166 1167 PCIE_AER_PUT(32, bus_p, PCIE_AER_SUCE_MASK, PCIE_AER_SUCE_BITS); 1168 1169 root: 1170 /* 1171 * disable Root Control this is a Root device 1172 */ 1173 if (!PCIE_IS_ROOT(bus_p)) 1174 return; 1175 1176 if (!pcie_serr_disable_flag) { 1177 device_ctl = PCIE_CAP_GET(16, bus_p, PCIE_ROOTCTL); 1178 device_ctl &= ~PCIE_ROOT_SYS_ERR; 1179 PCIE_CAP_PUT(16, bus_p, PCIE_ROOTCTL, device_ctl); 1180 } 1181 1182 if (!PCIE_HAS_AER(bus_p)) 1183 return; 1184 1185 if ((device_ctl = PCIE_CAP_GET(16, bus_p, PCIE_AER_RE_CMD)) != 1186 PCI_CAP_EINVAL16) { 1187 device_ctl &= ~pcie_root_error_cmd_default; 1188 PCIE_CAP_PUT(16, bus_p, PCIE_AER_RE_CMD, device_ctl); 1189 } 1190 } 1191 1192 /* 1193 * Extract bdf from "reg" property. 1194 */ 1195 int 1196 pcie_get_bdf_from_dip(dev_info_t *dip, pcie_req_id_t *bdf) 1197 { 1198 pci_regspec_t *regspec; 1199 int reglen; 1200 1201 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1202 "reg", (int **)®spec, (uint_t *)®len) != DDI_SUCCESS) 1203 return (DDI_FAILURE); 1204 1205 if (reglen < (sizeof (pci_regspec_t) / sizeof (int))) { 1206 ddi_prop_free(regspec); 1207 return (DDI_FAILURE); 1208 } 1209 1210 /* Get phys_hi from first element. All have same bdf. */ 1211 *bdf = (regspec->pci_phys_hi & (PCI_REG_BDFR_M ^ PCI_REG_REG_M)) >> 8; 1212 1213 ddi_prop_free(regspec); 1214 return (DDI_SUCCESS); 1215 } 1216 1217 dev_info_t * 1218 pcie_get_my_childs_dip(dev_info_t *dip, dev_info_t *rdip) 1219 { 1220 dev_info_t *cdip = rdip; 1221 1222 for (; ddi_get_parent(cdip) != dip; cdip = ddi_get_parent(cdip)) 1223 ; 1224 1225 return (cdip); 1226 } 1227 1228 uint32_t 1229 pcie_get_bdf_for_dma_xfer(dev_info_t *dip, dev_info_t *rdip) 1230 { 1231 dev_info_t *cdip; 1232 1233 /* 1234 * As part of the probing, the PCI fcode interpreter may setup a DMA 1235 * request if a given card has a fcode on it using dip and rdip of the 1236 * hotplug connector i.e, dip and rdip of px/pcieb driver. In this 1237 * case, return a invalid value for the bdf since we cannot get to the 1238 * bdf value of the actual device which will be initiating this DMA. 1239 */ 1240 if (rdip == dip) 1241 return (PCIE_INVALID_BDF); 1242 1243 cdip = pcie_get_my_childs_dip(dip, rdip); 1244 1245 /* 1246 * For a given rdip, return the bdf value of dip's (px or pcieb) 1247 * immediate child or secondary bus-id if dip is a PCIe2PCI bridge. 1248 * 1249 * XXX - For now, return a invalid bdf value for all PCI and PCI-X 1250 * devices since this needs more work. 1251 */ 1252 return (PCI_GET_PCIE2PCI_SECBUS(cdip) ? 1253 PCIE_INVALID_BDF : PCI_GET_BDF(cdip)); 1254 } 1255 1256 uint32_t 1257 pcie_get_aer_uce_mask() { 1258 return (pcie_aer_uce_mask); 1259 } 1260 uint32_t 1261 pcie_get_aer_ce_mask() { 1262 return (pcie_aer_ce_mask); 1263 } 1264 uint32_t 1265 pcie_get_aer_suce_mask() { 1266 return (pcie_aer_suce_mask); 1267 } 1268 uint32_t 1269 pcie_get_serr_mask() { 1270 return (pcie_serr_disable_flag); 1271 } 1272 1273 void 1274 pcie_set_aer_uce_mask(uint32_t mask) { 1275 pcie_aer_uce_mask = mask; 1276 if (mask & PCIE_AER_UCE_UR) 1277 pcie_base_err_default &= ~PCIE_DEVCTL_UR_REPORTING_EN; 1278 else 1279 pcie_base_err_default |= PCIE_DEVCTL_UR_REPORTING_EN; 1280 1281 if (mask & PCIE_AER_UCE_ECRC) 1282 pcie_ecrc_value = 0; 1283 } 1284 1285 void 1286 pcie_set_aer_ce_mask(uint32_t mask) { 1287 pcie_aer_ce_mask = mask; 1288 } 1289 void 1290 pcie_set_aer_suce_mask(uint32_t mask) { 1291 pcie_aer_suce_mask = mask; 1292 } 1293 void 1294 pcie_set_serr_mask(uint32_t mask) { 1295 pcie_serr_disable_flag = mask; 1296 } 1297 1298 /* 1299 * Is the rdip a child of dip. Used for checking certain CTLOPS from bubbling 1300 * up erronously. Ex. ISA ctlops to a PCI-PCI Bridge. 1301 */ 1302 boolean_t 1303 pcie_is_child(dev_info_t *dip, dev_info_t *rdip) 1304 { 1305 dev_info_t *cdip = ddi_get_child(dip); 1306 for (; cdip; cdip = ddi_get_next_sibling(cdip)) 1307 if (cdip == rdip) 1308 break; 1309 return (cdip != NULL); 1310 } 1311 1312 boolean_t 1313 pcie_is_link_disabled(dev_info_t *dip) 1314 { 1315 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 1316 1317 if (PCIE_IS_PCIE(bus_p)) { 1318 if (PCIE_CAP_GET(16, bus_p, PCIE_LINKCTL) & 1319 PCIE_LINKCTL_LINK_DISABLE) 1320 return (B_TRUE); 1321 } 1322 return (B_FALSE); 1323 } 1324 1325 /* 1326 * Initialize the MPS for a root port. 1327 * 1328 * dip - dip of root port device. 1329 */ 1330 void 1331 pcie_init_root_port_mps(dev_info_t *dip) 1332 { 1333 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 1334 int rp_cap, max_supported = pcie_max_mps; 1335 1336 (void) pcie_get_fabric_mps(ddi_get_parent(dip), 1337 ddi_get_child(dip), &max_supported); 1338 1339 rp_cap = PCI_CAP_GET16(bus_p->bus_cfg_hdl, NULL, 1340 bus_p->bus_pcie_off, PCIE_DEVCAP) & 1341 PCIE_DEVCAP_MAX_PAYLOAD_MASK; 1342 1343 if (rp_cap < max_supported) 1344 max_supported = rp_cap; 1345 1346 bus_p->bus_mps = max_supported; 1347 (void) pcie_initchild_mps(dip); 1348 } 1349 1350 /* 1351 * Initialize the Maximum Payload Size of a device. 1352 * 1353 * cdip - dip of device. 1354 * 1355 * returns - DDI_SUCCESS or DDI_FAILURE 1356 */ 1357 int 1358 pcie_initchild_mps(dev_info_t *cdip) 1359 { 1360 int max_payload_size; 1361 pcie_bus_t *bus_p; 1362 dev_info_t *pdip = ddi_get_parent(cdip); 1363 uint8_t dev_type; 1364 1365 bus_p = PCIE_DIP2BUS(cdip); 1366 if (bus_p == NULL) { 1367 PCIE_DBG("%s: BUS not found.\n", 1368 ddi_driver_name(cdip)); 1369 return (DDI_FAILURE); 1370 } 1371 1372 dev_type = bus_p->bus_dev_type; 1373 1374 /* 1375 * For ARI Devices, only function zero's MPS needs to be set. 1376 */ 1377 if ((dev_type == PCIE_PCIECAP_DEV_TYPE_PCIE_DEV) && 1378 (pcie_ari_is_enabled(pdip) == PCIE_ARI_FORW_ENABLED)) { 1379 pcie_req_id_t child_bdf; 1380 1381 if (pcie_get_bdf_from_dip(cdip, &child_bdf) == DDI_FAILURE) 1382 return (DDI_FAILURE); 1383 if ((child_bdf & PCIE_REQ_ID_ARI_FUNC_MASK) != 0) 1384 return (DDI_SUCCESS); 1385 } 1386 1387 if (PCIE_IS_RP(bus_p)) { 1388 /* 1389 * If this device is a root port, then the mps scan 1390 * saved the mps in the root ports bus_p. 1391 */ 1392 max_payload_size = bus_p->bus_mps; 1393 } else { 1394 /* 1395 * If the device is not a root port, then the mps of 1396 * its parent should be used. 1397 */ 1398 pcie_bus_t *parent_bus_p = PCIE_DIP2BUS(pdip); 1399 max_payload_size = parent_bus_p->bus_mps; 1400 } 1401 1402 if (PCIE_IS_PCIE(bus_p) && (max_payload_size >= 0)) { 1403 pcie_bus_t *rootp_bus_p = PCIE_DIP2BUS(bus_p->bus_rp_dip); 1404 uint16_t mask, dev_ctrl = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL), 1405 mps = PCIE_CAP_GET(16, bus_p, PCIE_DEVCAP) & 1406 PCIE_DEVCAP_MAX_PAYLOAD_MASK; 1407 1408 mps = MIN(mps, (uint16_t)max_payload_size); 1409 1410 /* 1411 * If the MPS to be set is less than the root ports 1412 * MPS, then MRRS will have to be set the same as MPS. 1413 */ 1414 mask = ((mps < rootp_bus_p->bus_mps) ? 1415 PCIE_DEVCTL_MAX_READ_REQ_MASK : 0) | 1416 PCIE_DEVCTL_MAX_PAYLOAD_MASK; 1417 1418 dev_ctrl &= ~mask; 1419 mask = ((mps < rootp_bus_p->bus_mps) 1420 ? mps << PCIE_DEVCTL_MAX_READ_REQ_SHIFT : 0) 1421 | (mps << PCIE_DEVCTL_MAX_PAYLOAD_SHIFT); 1422 1423 dev_ctrl |= mask; 1424 1425 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, dev_ctrl); 1426 1427 bus_p->bus_mps = mps; 1428 } 1429 1430 return (DDI_SUCCESS); 1431 } 1432 1433 /* 1434 * Scans a device tree/branch for a maximum payload size capabilities. 1435 * 1436 * rc_dip - dip of Root Complex. 1437 * dip - dip of device where scan will begin. 1438 * max_supported (IN) - maximum allowable MPS. 1439 * max_supported (OUT) - maximum payload size capability of fabric. 1440 */ 1441 void 1442 pcie_get_fabric_mps(dev_info_t *rc_dip, dev_info_t *dip, int *max_supported) 1443 { 1444 if (dip == NULL) 1445 return; 1446 1447 /* 1448 * Perform a fabric scan to obtain Maximum Payload Capabilities 1449 */ 1450 (void) pcie_scan_mps(rc_dip, dip, max_supported); 1451 1452 PCIE_DBG("MPS: Highest Common MPS= %x\n", max_supported); 1453 } 1454 1455 /* 1456 * Scans fabric and determines Maximum Payload Size based on 1457 * highest common denominator alogorithm 1458 */ 1459 static void 1460 pcie_scan_mps(dev_info_t *rc_dip, dev_info_t *dip, int *max_supported) 1461 { 1462 int circular_count; 1463 pcie_max_supported_t max_pay_load_supported; 1464 1465 max_pay_load_supported.dip = rc_dip; 1466 max_pay_load_supported.highest_common_mps = *max_supported; 1467 1468 ndi_devi_enter(ddi_get_parent(dip), &circular_count); 1469 ddi_walk_devs(dip, pcie_get_max_supported, 1470 (void *)&max_pay_load_supported); 1471 ndi_devi_exit(ddi_get_parent(dip), circular_count); 1472 1473 *max_supported = max_pay_load_supported.highest_common_mps; 1474 } 1475 1476 /* 1477 * Called as part of the Maximum Payload Size scan. 1478 */ 1479 static int 1480 pcie_get_max_supported(dev_info_t *dip, void *arg) 1481 { 1482 uint32_t max_supported; 1483 uint16_t cap_ptr; 1484 pcie_max_supported_t *current = (pcie_max_supported_t *)arg; 1485 pci_regspec_t *reg; 1486 int rlen; 1487 caddr_t virt; 1488 ddi_acc_handle_t config_handle; 1489 1490 if (ddi_get_child(current->dip) == NULL) { 1491 goto fail1; 1492 } 1493 1494 if (pcie_dev(dip) == DDI_FAILURE) { 1495 PCIE_DBG("MPS: pcie_get_max_supported: %s: " 1496 "Not a PCIe dev\n", ddi_driver_name(dip)); 1497 goto fail1; 1498 } 1499 1500 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "reg", 1501 (caddr_t)®, &rlen) != DDI_PROP_SUCCESS) { 1502 PCIE_DBG("MPS: pcie_get_max_supported: %s: " 1503 "Can not read reg\n", ddi_driver_name(dip)); 1504 goto fail1; 1505 } 1506 1507 if (pcie_map_phys(ddi_get_child(current->dip), reg, &virt, 1508 &config_handle) != DDI_SUCCESS) { 1509 PCIE_DBG("MPS: pcie_get_max_supported: %s: pcie_map_phys " 1510 "failed\n", ddi_driver_name(dip)); 1511 goto fail2; 1512 } 1513 1514 if ((PCI_CAP_LOCATE(config_handle, PCI_CAP_ID_PCI_E, &cap_ptr)) == 1515 DDI_FAILURE) { 1516 goto fail3; 1517 } 1518 1519 max_supported = PCI_CAP_GET16(config_handle, NULL, cap_ptr, 1520 PCIE_DEVCAP) & PCIE_DEVCAP_MAX_PAYLOAD_MASK; 1521 1522 PCIE_DBG("PCIE MPS: %s: MPS Capabilities %x\n", ddi_driver_name(dip), 1523 max_supported); 1524 1525 if (max_supported < current->highest_common_mps) 1526 current->highest_common_mps = max_supported; 1527 1528 fail3: 1529 pcie_unmap_phys(&config_handle, reg); 1530 fail2: 1531 kmem_free(reg, rlen); 1532 fail1: 1533 return (DDI_WALK_CONTINUE); 1534 } 1535 1536 /* 1537 * Determines if there are any root ports attached to a root complex. 1538 * 1539 * dip - dip of root complex 1540 * 1541 * Returns - DDI_SUCCESS if there is at least one root port otherwise 1542 * DDI_FAILURE. 1543 */ 1544 int 1545 pcie_root_port(dev_info_t *dip) 1546 { 1547 int port_type; 1548 uint16_t cap_ptr; 1549 ddi_acc_handle_t config_handle; 1550 dev_info_t *cdip = ddi_get_child(dip); 1551 1552 /* 1553 * Determine if any of the children of the passed in dip 1554 * are root ports. 1555 */ 1556 for (; cdip; cdip = ddi_get_next_sibling(cdip)) { 1557 1558 if (pci_config_setup(cdip, &config_handle) != DDI_SUCCESS) 1559 continue; 1560 1561 if ((PCI_CAP_LOCATE(config_handle, PCI_CAP_ID_PCI_E, 1562 &cap_ptr)) == DDI_FAILURE) { 1563 pci_config_teardown(&config_handle); 1564 continue; 1565 } 1566 1567 port_type = PCI_CAP_GET16(config_handle, NULL, cap_ptr, 1568 PCIE_PCIECAP) & PCIE_PCIECAP_DEV_TYPE_MASK; 1569 1570 pci_config_teardown(&config_handle); 1571 1572 if (port_type == PCIE_PCIECAP_DEV_TYPE_ROOT) 1573 return (DDI_SUCCESS); 1574 } 1575 1576 /* No root ports were found */ 1577 1578 return (DDI_FAILURE); 1579 } 1580 1581 /* 1582 * Function that determines if a device a PCIe device. 1583 * 1584 * dip - dip of device. 1585 * 1586 * returns - DDI_SUCCESS if device is a PCIe device, otherwise DDI_FAILURE. 1587 */ 1588 int 1589 pcie_dev(dev_info_t *dip) 1590 { 1591 /* get parent device's device_type property */ 1592 char *device_type; 1593 int rc = DDI_FAILURE; 1594 dev_info_t *pdip = ddi_get_parent(dip); 1595 1596 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip, 1597 DDI_PROP_DONTPASS, "device_type", &device_type) 1598 != DDI_PROP_SUCCESS) { 1599 return (DDI_FAILURE); 1600 } 1601 1602 if (strcmp(device_type, "pciex") == 0) 1603 rc = DDI_SUCCESS; 1604 else 1605 rc = DDI_FAILURE; 1606 1607 ddi_prop_free(device_type); 1608 return (rc); 1609 } 1610 1611 /* 1612 * Function to map in a device's memory space. 1613 */ 1614 static int 1615 pcie_map_phys(dev_info_t *dip, pci_regspec_t *phys_spec, 1616 caddr_t *addrp, ddi_acc_handle_t *handlep) 1617 { 1618 ddi_map_req_t mr; 1619 ddi_acc_hdl_t *hp; 1620 int result; 1621 ddi_device_acc_attr_t attr; 1622 1623 attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 1624 attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 1625 attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 1626 attr.devacc_attr_access = DDI_CAUTIOUS_ACC; 1627 1628 *handlep = impl_acc_hdl_alloc(KM_SLEEP, NULL); 1629 hp = impl_acc_hdl_get(*handlep); 1630 hp->ah_vers = VERS_ACCHDL; 1631 hp->ah_dip = dip; 1632 hp->ah_rnumber = 0; 1633 hp->ah_offset = 0; 1634 hp->ah_len = 0; 1635 hp->ah_acc = attr; 1636 1637 mr.map_op = DDI_MO_MAP_LOCKED; 1638 mr.map_type = DDI_MT_REGSPEC; 1639 mr.map_obj.rp = (struct regspec *)phys_spec; 1640 mr.map_prot = PROT_READ | PROT_WRITE; 1641 mr.map_flags = DDI_MF_KERNEL_MAPPING; 1642 mr.map_handlep = hp; 1643 mr.map_vers = DDI_MAP_VERSION; 1644 1645 result = ddi_map(dip, &mr, 0, 0, addrp); 1646 1647 if (result != DDI_SUCCESS) { 1648 impl_acc_hdl_free(*handlep); 1649 *handlep = (ddi_acc_handle_t)NULL; 1650 } else { 1651 hp->ah_addr = *addrp; 1652 } 1653 1654 return (result); 1655 } 1656 1657 /* 1658 * Map out memory that was mapped in with pcie_map_phys(); 1659 */ 1660 static void 1661 pcie_unmap_phys(ddi_acc_handle_t *handlep, pci_regspec_t *ph) 1662 { 1663 ddi_map_req_t mr; 1664 ddi_acc_hdl_t *hp; 1665 1666 hp = impl_acc_hdl_get(*handlep); 1667 ASSERT(hp); 1668 1669 mr.map_op = DDI_MO_UNMAP; 1670 mr.map_type = DDI_MT_REGSPEC; 1671 mr.map_obj.rp = (struct regspec *)ph; 1672 mr.map_prot = PROT_READ | PROT_WRITE; 1673 mr.map_flags = DDI_MF_KERNEL_MAPPING; 1674 mr.map_handlep = hp; 1675 mr.map_vers = DDI_MAP_VERSION; 1676 1677 (void) ddi_map(hp->ah_dip, &mr, hp->ah_offset, 1678 hp->ah_len, &hp->ah_addr); 1679 1680 impl_acc_hdl_free(*handlep); 1681 *handlep = (ddi_acc_handle_t)NULL; 1682 } 1683 1684 void 1685 pcie_set_rber_fatal(dev_info_t *dip, boolean_t val) 1686 { 1687 pcie_bus_t *bus_p = PCIE_DIP2UPBUS(dip); 1688 bus_p->bus_pfd->pe_rber_fatal = val; 1689 } 1690 1691 /* 1692 * Return parent Root Port's pe_rber_fatal value. 1693 */ 1694 boolean_t 1695 pcie_get_rber_fatal(dev_info_t *dip) 1696 { 1697 pcie_bus_t *bus_p = PCIE_DIP2UPBUS(dip); 1698 pcie_bus_t *rp_bus_p = PCIE_DIP2UPBUS(bus_p->bus_rp_dip); 1699 return (rp_bus_p->bus_pfd->pe_rber_fatal); 1700 } 1701 1702 int 1703 pcie_ari_supported(dev_info_t *dip) 1704 { 1705 uint32_t devcap2; 1706 uint16_t pciecap; 1707 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 1708 uint8_t dev_type; 1709 1710 PCIE_DBG("pcie_ari_supported: dip=%p\n", dip); 1711 1712 if (bus_p == NULL) 1713 return (PCIE_ARI_FORW_NOT_SUPPORTED); 1714 1715 dev_type = bus_p->bus_dev_type; 1716 1717 if ((dev_type != PCIE_PCIECAP_DEV_TYPE_DOWN) && 1718 (dev_type != PCIE_PCIECAP_DEV_TYPE_ROOT)) 1719 return (PCIE_ARI_FORW_NOT_SUPPORTED); 1720 1721 if (pcie_disable_ari) { 1722 PCIE_DBG("pcie_ari_supported: dip=%p: ARI Disabled\n", dip); 1723 return (PCIE_ARI_FORW_NOT_SUPPORTED); 1724 } 1725 1726 pciecap = PCIE_CAP_GET(16, bus_p, PCIE_PCIECAP); 1727 1728 if ((pciecap & PCIE_PCIECAP_VER_MASK) < PCIE_PCIECAP_VER_2_0) { 1729 PCIE_DBG("pcie_ari_supported: dip=%p: Not 2.0\n", dip); 1730 return (PCIE_ARI_FORW_NOT_SUPPORTED); 1731 } 1732 1733 devcap2 = PCIE_CAP_GET(32, bus_p, PCIE_DEVCAP2); 1734 1735 PCIE_DBG("pcie_ari_supported: dip=%p: DevCap2=0x%x\n", 1736 dip, devcap2); 1737 1738 if (devcap2 & PCIE_DEVCAP2_ARI_FORWARD) { 1739 PCIE_DBG("pcie_ari_supported: " 1740 "dip=%p: ARI Forwarding is supported\n", dip); 1741 return (PCIE_ARI_FORW_SUPPORTED); 1742 } 1743 return (PCIE_ARI_FORW_NOT_SUPPORTED); 1744 } 1745 1746 int 1747 pcie_ari_enable(dev_info_t *dip) 1748 { 1749 uint16_t devctl2; 1750 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 1751 1752 PCIE_DBG("pcie_ari_enable: dip=%p\n", dip); 1753 1754 if (pcie_ari_supported(dip) == PCIE_ARI_FORW_NOT_SUPPORTED) 1755 return (DDI_FAILURE); 1756 1757 devctl2 = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL2); 1758 devctl2 |= PCIE_DEVCTL2_ARI_FORWARD_EN; 1759 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL2, devctl2); 1760 1761 PCIE_DBG("pcie_ari_enable: dip=%p: writing 0x%x to DevCtl2\n", 1762 dip, devctl2); 1763 1764 return (DDI_SUCCESS); 1765 } 1766 1767 int 1768 pcie_ari_disable(dev_info_t *dip) 1769 { 1770 uint16_t devctl2; 1771 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 1772 1773 PCIE_DBG("pcie_ari_disable: dip=%p\n", dip); 1774 1775 if (pcie_ari_supported(dip) == PCIE_ARI_FORW_NOT_SUPPORTED) 1776 return (DDI_FAILURE); 1777 1778 devctl2 = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL2); 1779 devctl2 &= ~PCIE_DEVCTL2_ARI_FORWARD_EN; 1780 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL2, devctl2); 1781 1782 PCIE_DBG("pcie_ari_disable: dip=%p: writing 0x%x to DevCtl2\n", 1783 dip, devctl2); 1784 1785 return (DDI_SUCCESS); 1786 } 1787 1788 int 1789 pcie_ari_is_enabled(dev_info_t *dip) 1790 { 1791 uint16_t devctl2; 1792 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 1793 1794 PCIE_DBG("pcie_ari_is_enabled: dip=%p\n", dip); 1795 1796 if (pcie_ari_supported(dip) == PCIE_ARI_FORW_NOT_SUPPORTED) 1797 return (PCIE_ARI_FORW_DISABLED); 1798 1799 devctl2 = PCIE_CAP_GET(32, bus_p, PCIE_DEVCTL2); 1800 1801 PCIE_DBG("pcie_ari_is_enabled: dip=%p: DevCtl2=0x%x\n", 1802 dip, devctl2); 1803 1804 if (devctl2 & PCIE_DEVCTL2_ARI_FORWARD_EN) { 1805 PCIE_DBG("pcie_ari_is_enabled: " 1806 "dip=%p: ARI Forwarding is enabled\n", dip); 1807 return (PCIE_ARI_FORW_ENABLED); 1808 } 1809 1810 return (PCIE_ARI_FORW_DISABLED); 1811 } 1812 1813 int 1814 pcie_ari_device(dev_info_t *dip) 1815 { 1816 ddi_acc_handle_t handle; 1817 uint16_t cap_ptr; 1818 1819 PCIE_DBG("pcie_ari_device: dip=%p\n", dip); 1820 1821 /* 1822 * XXX - This function may be called before the bus_p structure 1823 * has been populated. This code can be changed to remove 1824 * pci_config_setup()/pci_config_teardown() when the RFE 1825 * to populate the bus_p structures early in boot is putback. 1826 */ 1827 1828 /* First make sure it is a PCIe device */ 1829 1830 if (pci_config_setup(dip, &handle) != DDI_SUCCESS) 1831 return (PCIE_NOT_ARI_DEVICE); 1832 1833 if ((PCI_CAP_LOCATE(handle, PCI_CAP_ID_PCI_E, &cap_ptr)) 1834 != DDI_SUCCESS) { 1835 pci_config_teardown(&handle); 1836 return (PCIE_NOT_ARI_DEVICE); 1837 } 1838 1839 /* Locate the ARI Capability */ 1840 1841 if ((PCI_CAP_LOCATE(handle, PCI_CAP_XCFG_SPC(PCIE_EXT_CAP_ID_ARI), 1842 &cap_ptr)) == DDI_FAILURE) { 1843 pci_config_teardown(&handle); 1844 return (PCIE_NOT_ARI_DEVICE); 1845 } 1846 1847 /* ARI Capability was found so it must be a ARI device */ 1848 PCIE_DBG("pcie_ari_device: ARI Device dip=%p\n", dip); 1849 1850 pci_config_teardown(&handle); 1851 return (PCIE_ARI_DEVICE); 1852 } 1853 1854 int 1855 pcie_ari_get_next_function(dev_info_t *dip, int *func) 1856 { 1857 uint32_t val; 1858 uint16_t cap_ptr, next_function; 1859 ddi_acc_handle_t handle; 1860 1861 /* 1862 * XXX - This function may be called before the bus_p structure 1863 * has been populated. This code can be changed to remove 1864 * pci_config_setup()/pci_config_teardown() when the RFE 1865 * to populate the bus_p structures early in boot is putback. 1866 */ 1867 1868 if (pci_config_setup(dip, &handle) != DDI_SUCCESS) 1869 return (DDI_FAILURE); 1870 1871 if ((PCI_CAP_LOCATE(handle, 1872 PCI_CAP_XCFG_SPC(PCIE_EXT_CAP_ID_ARI), &cap_ptr)) == DDI_FAILURE) { 1873 pci_config_teardown(&handle); 1874 return (DDI_FAILURE); 1875 } 1876 1877 val = PCI_CAP_GET32(handle, NULL, cap_ptr, PCIE_ARI_CAP); 1878 1879 next_function = (val >> PCIE_ARI_CAP_NEXT_FUNC_SHIFT) & 1880 PCIE_ARI_CAP_NEXT_FUNC_MASK; 1881 1882 pci_config_teardown(&handle); 1883 1884 *func = next_function; 1885 1886 return (DDI_SUCCESS); 1887 } 1888 1889 dev_info_t * 1890 pcie_func_to_dip(dev_info_t *dip, pcie_req_id_t function) 1891 { 1892 pcie_req_id_t child_bdf; 1893 dev_info_t *cdip; 1894 1895 for (cdip = ddi_get_child(dip); cdip; 1896 cdip = ddi_get_next_sibling(cdip)) { 1897 1898 if (pcie_get_bdf_from_dip(cdip, &child_bdf) == DDI_FAILURE) 1899 return (NULL); 1900 1901 if ((child_bdf & PCIE_REQ_ID_ARI_FUNC_MASK) == function) 1902 return (cdip); 1903 } 1904 return (NULL); 1905 } 1906 1907 #ifdef DEBUG 1908 1909 static void 1910 pcie_print_bus(pcie_bus_t *bus_p) 1911 { 1912 pcie_dbg("\tbus_dip = 0x%p\n", bus_p->bus_dip); 1913 pcie_dbg("\tbus_fm_flags = 0x%x\n", bus_p->bus_fm_flags); 1914 1915 pcie_dbg("\tbus_bdf = 0x%x\n", bus_p->bus_bdf); 1916 pcie_dbg("\tbus_dev_ven_id = 0x%x\n", bus_p->bus_dev_ven_id); 1917 pcie_dbg("\tbus_rev_id = 0x%x\n", bus_p->bus_rev_id); 1918 pcie_dbg("\tbus_hdr_type = 0x%x\n", bus_p->bus_hdr_type); 1919 pcie_dbg("\tbus_dev_type = 0x%x\n", bus_p->bus_dev_type); 1920 pcie_dbg("\tbus_bdg_secbus = 0x%x\n", bus_p->bus_bdg_secbus); 1921 pcie_dbg("\tbus_pcie_off = 0x%x\n", bus_p->bus_pcie_off); 1922 pcie_dbg("\tbus_aer_off = 0x%x\n", bus_p->bus_aer_off); 1923 pcie_dbg("\tbus_pcix_off = 0x%x\n", bus_p->bus_pcix_off); 1924 pcie_dbg("\tbus_ecc_ver = 0x%x\n", bus_p->bus_ecc_ver); 1925 } 1926 1927 /* 1928 * For debugging purposes set pcie_dbg_print != 0 to see printf messages 1929 * during interrupt. 1930 * 1931 * When a proper solution is in place this code will disappear. 1932 * Potential solutions are: 1933 * o circular buffers 1934 * o taskq to print at lower pil 1935 */ 1936 int pcie_dbg_print = 0; 1937 void 1938 pcie_dbg(char *fmt, ...) 1939 { 1940 va_list ap; 1941 1942 if (!pcie_debug_flags) { 1943 return; 1944 } 1945 va_start(ap, fmt); 1946 if (servicing_interrupt()) { 1947 if (pcie_dbg_print) { 1948 prom_vprintf(fmt, ap); 1949 } 1950 } else { 1951 prom_vprintf(fmt, ap); 1952 } 1953 va_end(ap); 1954 } 1955 #endif /* DEBUG */ 1956 1957 #if defined(__i386) || defined(__amd64) 1958 static void 1959 pcie_check_io_mem_range(ddi_acc_handle_t cfg_hdl, boolean_t *empty_io_range, 1960 boolean_t *empty_mem_range) 1961 { 1962 uint8_t class, subclass; 1963 uint_t val; 1964 1965 class = pci_config_get8(cfg_hdl, PCI_CONF_BASCLASS); 1966 subclass = pci_config_get8(cfg_hdl, PCI_CONF_SUBCLASS); 1967 1968 if ((class == PCI_CLASS_BRIDGE) && (subclass == PCI_BRIDGE_PCI)) { 1969 val = (((uint_t)pci_config_get8(cfg_hdl, PCI_BCNF_IO_BASE_LOW) & 1970 PCI_BCNF_IO_MASK) << 8); 1971 /* 1972 * Assuming that a zero based io_range[0] implies an 1973 * invalid I/O range. Likewise for mem_range[0]. 1974 */ 1975 if (val == 0) 1976 *empty_io_range = B_TRUE; 1977 val = (((uint_t)pci_config_get16(cfg_hdl, PCI_BCNF_MEM_BASE) & 1978 PCI_BCNF_MEM_MASK) << 16); 1979 if (val == 0) 1980 *empty_mem_range = B_TRUE; 1981 } 1982 } 1983 #endif /* defined(__i386) || defined(__amd64) */ 1984