1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <sys/sysmacros.h> 27 #include <sys/types.h> 28 #include <sys/kmem.h> 29 #include <sys/modctl.h> 30 #include <sys/ddi.h> 31 #include <sys/sunddi.h> 32 #include <sys/sunndi.h> 33 #include <sys/fm/protocol.h> 34 #include <sys/fm/util.h> 35 #include <sys/promif.h> 36 #include <sys/disp.h> 37 #include <sys/stat.h> 38 #include <sys/file.h> 39 #include <sys/pci_cap.h> 40 #include <sys/pci_impl.h> 41 #include <sys/pcie_impl.h> 42 #include <sys/hotplug/pci/pcie_hp.h> 43 #include <sys/hotplug/pci/pcicfg.h> 44 #include <sys/pci_cfgacc.h> 45 46 /* Local functions prototypes */ 47 static void pcie_init_pfd(dev_info_t *); 48 static void pcie_fini_pfd(dev_info_t *); 49 50 #if defined(__i386) || defined(__amd64) 51 static void pcie_check_io_mem_range(ddi_acc_handle_t, boolean_t *, boolean_t *); 52 #endif /* defined(__i386) || defined(__amd64) */ 53 54 #ifdef DEBUG 55 uint_t pcie_debug_flags = 0; 56 static void pcie_print_bus(pcie_bus_t *bus_p); 57 void pcie_dbg(char *fmt, ...); 58 #endif /* DEBUG */ 59 60 /* Variable to control default PCI-Express config settings */ 61 ushort_t pcie_command_default = 62 PCI_COMM_SERR_ENABLE | 63 PCI_COMM_WAIT_CYC_ENAB | 64 PCI_COMM_PARITY_DETECT | 65 PCI_COMM_ME | 66 PCI_COMM_MAE | 67 PCI_COMM_IO; 68 69 /* xxx_fw are bits that are controlled by FW and should not be modified */ 70 ushort_t pcie_command_default_fw = 71 PCI_COMM_SPEC_CYC | 72 PCI_COMM_MEMWR_INVAL | 73 PCI_COMM_PALETTE_SNOOP | 74 PCI_COMM_WAIT_CYC_ENAB | 75 0xF800; /* Reserved Bits */ 76 77 ushort_t pcie_bdg_command_default_fw = 78 PCI_BCNF_BCNTRL_ISA_ENABLE | 79 PCI_BCNF_BCNTRL_VGA_ENABLE | 80 0xF000; /* Reserved Bits */ 81 82 /* PCI-Express Base error defaults */ 83 ushort_t pcie_base_err_default = 84 PCIE_DEVCTL_CE_REPORTING_EN | 85 PCIE_DEVCTL_NFE_REPORTING_EN | 86 PCIE_DEVCTL_FE_REPORTING_EN | 87 PCIE_DEVCTL_UR_REPORTING_EN; 88 89 /* PCI-Express Device Control Register */ 90 uint16_t pcie_devctl_default = PCIE_DEVCTL_RO_EN | 91 PCIE_DEVCTL_MAX_READ_REQ_512; 92 93 /* PCI-Express AER Root Control Register */ 94 #define PCIE_ROOT_SYS_ERR (PCIE_ROOTCTL_SYS_ERR_ON_CE_EN | \ 95 PCIE_ROOTCTL_SYS_ERR_ON_NFE_EN | \ 96 PCIE_ROOTCTL_SYS_ERR_ON_FE_EN) 97 98 ushort_t pcie_root_ctrl_default = 99 PCIE_ROOTCTL_SYS_ERR_ON_CE_EN | 100 PCIE_ROOTCTL_SYS_ERR_ON_NFE_EN | 101 PCIE_ROOTCTL_SYS_ERR_ON_FE_EN; 102 103 /* PCI-Express Root Error Command Register */ 104 ushort_t pcie_root_error_cmd_default = 105 PCIE_AER_RE_CMD_CE_REP_EN | 106 PCIE_AER_RE_CMD_NFE_REP_EN | 107 PCIE_AER_RE_CMD_FE_REP_EN; 108 109 /* ECRC settings in the PCIe AER Control Register */ 110 uint32_t pcie_ecrc_value = 111 PCIE_AER_CTL_ECRC_GEN_ENA | 112 PCIE_AER_CTL_ECRC_CHECK_ENA; 113 114 /* 115 * If a particular platform wants to disable certain errors such as UR/MA, 116 * instead of using #defines have the platform's PCIe Root Complex driver set 117 * these masks using the pcie_get_XXX_mask and pcie_set_XXX_mask functions. For 118 * x86 the closest thing to a PCIe root complex driver is NPE. For SPARC the 119 * closest PCIe root complex driver is PX. 120 * 121 * pcie_serr_disable_flag : disable SERR only (in RCR and command reg) x86 122 * systems may want to disable SERR in general. For root ports, enabling SERR 123 * causes NMIs which are not handled and results in a watchdog timeout error. 124 */ 125 uint32_t pcie_aer_uce_mask = 0; /* AER UE Mask */ 126 uint32_t pcie_aer_ce_mask = 0; /* AER CE Mask */ 127 uint32_t pcie_aer_suce_mask = 0; /* AER Secondary UE Mask */ 128 uint32_t pcie_serr_disable_flag = 0; /* Disable SERR */ 129 130 /* Default severities needed for eversholt. Error handling doesn't care */ 131 uint32_t pcie_aer_uce_severity = PCIE_AER_UCE_MTLP | PCIE_AER_UCE_RO | \ 132 PCIE_AER_UCE_FCP | PCIE_AER_UCE_SD | PCIE_AER_UCE_DLP | \ 133 PCIE_AER_UCE_TRAINING; 134 uint32_t pcie_aer_suce_severity = PCIE_AER_SUCE_SERR_ASSERT | \ 135 PCIE_AER_SUCE_UC_ADDR_ERR | PCIE_AER_SUCE_UC_ATTR_ERR | \ 136 PCIE_AER_SUCE_USC_MSG_DATA_ERR; 137 138 int pcie_max_mps = PCIE_DEVCTL_MAX_PAYLOAD_4096 >> 5; 139 int pcie_disable_ari = 0; 140 141 static void pcie_scan_mps(dev_info_t *rc_dip, dev_info_t *dip, 142 int *max_supported); 143 static int pcie_get_max_supported(dev_info_t *dip, void *arg); 144 static int pcie_map_phys(dev_info_t *dip, pci_regspec_t *phys_spec, 145 caddr_t *addrp, ddi_acc_handle_t *handlep); 146 static void pcie_unmap_phys(ddi_acc_handle_t *handlep, pci_regspec_t *ph); 147 148 dev_info_t *pcie_get_rc_dip(dev_info_t *dip); 149 150 /* 151 * modload support 152 */ 153 154 static struct modlmisc modlmisc = { 155 &mod_miscops, /* Type of module */ 156 "PCI Express Framework Module" 157 }; 158 159 static struct modlinkage modlinkage = { 160 MODREV_1, 161 (void *)&modlmisc, 162 NULL 163 }; 164 165 /* 166 * Global Variables needed for a non-atomic version of ddi_fm_ereport_post. 167 * Currently used to send the pci.fabric ereports whose payload depends on the 168 * type of PCI device it is being sent for. 169 */ 170 char *pcie_nv_buf; 171 nv_alloc_t *pcie_nvap; 172 nvlist_t *pcie_nvl; 173 174 int 175 _init(void) 176 { 177 int rval; 178 179 pcie_nv_buf = kmem_alloc(ERPT_DATA_SZ, KM_SLEEP); 180 pcie_nvap = fm_nva_xcreate(pcie_nv_buf, ERPT_DATA_SZ); 181 pcie_nvl = fm_nvlist_create(pcie_nvap); 182 183 rval = mod_install(&modlinkage); 184 return (rval); 185 } 186 187 int 188 _fini() 189 { 190 int rval; 191 192 fm_nvlist_destroy(pcie_nvl, FM_NVA_RETAIN); 193 fm_nva_xdestroy(pcie_nvap); 194 kmem_free(pcie_nv_buf, ERPT_DATA_SZ); 195 196 rval = mod_remove(&modlinkage); 197 return (rval); 198 } 199 200 int 201 _info(struct modinfo *modinfop) 202 { 203 return (mod_info(&modlinkage, modinfop)); 204 } 205 206 /* ARGSUSED */ 207 int 208 pcie_init(dev_info_t *dip, caddr_t arg) 209 { 210 int ret = DDI_SUCCESS; 211 212 /* 213 * Create a "devctl" minor node to support DEVCTL_DEVICE_* 214 * and DEVCTL_BUS_* ioctls to this bus. 215 */ 216 if ((ret = ddi_create_minor_node(dip, "devctl", S_IFCHR, 217 PCI_MINOR_NUM(ddi_get_instance(dip), PCI_DEVCTL_MINOR), 218 DDI_NT_NEXUS, 0)) != DDI_SUCCESS) { 219 PCIE_DBG("Failed to create devctl minor node for %s%d\n", 220 ddi_driver_name(dip), ddi_get_instance(dip)); 221 222 return (ret); 223 } 224 225 if ((ret = pcie_hp_init(dip, arg)) != DDI_SUCCESS) { 226 /* 227 * On a few x86 platforms, we observed unexpected hotplug 228 * initialization failures in recent years. Continue with 229 * a message printed because we don't want to stop PCI 230 * driver attach and system boot because of this hotplug 231 * initialization failure before we address all those issues. 232 */ 233 cmn_err(CE_WARN, "%s%d: Failed setting hotplug framework\n", 234 ddi_driver_name(dip), ddi_get_instance(dip)); 235 236 #if defined(__sparc) 237 ddi_remove_minor_node(dip, "devctl"); 238 239 return (ret); 240 #endif /* defined(__sparc) */ 241 } 242 243 return (DDI_SUCCESS); 244 } 245 246 /* ARGSUSED */ 247 int 248 pcie_uninit(dev_info_t *dip) 249 { 250 int ret = DDI_SUCCESS; 251 252 if (pcie_ari_is_enabled(dip) == PCIE_ARI_FORW_ENABLED) 253 (void) pcie_ari_disable(dip); 254 255 if ((ret = pcie_hp_uninit(dip)) != DDI_SUCCESS) { 256 PCIE_DBG("Failed to uninitialize hotplug for %s%d\n", 257 ddi_driver_name(dip), ddi_get_instance(dip)); 258 259 return (ret); 260 } 261 262 ddi_remove_minor_node(dip, "devctl"); 263 264 return (ret); 265 } 266 267 /* ARGSUSED */ 268 int 269 pcie_intr(dev_info_t *dip) 270 { 271 return (pcie_hp_intr(dip)); 272 } 273 274 /* ARGSUSED */ 275 int 276 pcie_open(dev_info_t *dip, dev_t *devp, int flags, int otyp, cred_t *credp) 277 { 278 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 279 280 /* 281 * Make sure the open is for the right file type. 282 */ 283 if (otyp != OTYP_CHR) 284 return (EINVAL); 285 286 /* 287 * Handle the open by tracking the device state. 288 */ 289 if ((bus_p->bus_soft_state == PCI_SOFT_STATE_OPEN_EXCL) || 290 ((flags & FEXCL) && 291 (bus_p->bus_soft_state != PCI_SOFT_STATE_CLOSED))) { 292 return (EBUSY); 293 } 294 295 if (flags & FEXCL) 296 bus_p->bus_soft_state = PCI_SOFT_STATE_OPEN_EXCL; 297 else 298 bus_p->bus_soft_state = PCI_SOFT_STATE_OPEN; 299 300 return (0); 301 } 302 303 /* ARGSUSED */ 304 int 305 pcie_close(dev_info_t *dip, dev_t dev, int flags, int otyp, cred_t *credp) 306 { 307 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 308 309 if (otyp != OTYP_CHR) 310 return (EINVAL); 311 312 bus_p->bus_soft_state = PCI_SOFT_STATE_CLOSED; 313 314 return (0); 315 } 316 317 /* ARGSUSED */ 318 int 319 pcie_ioctl(dev_info_t *dip, dev_t dev, int cmd, intptr_t arg, int mode, 320 cred_t *credp, int *rvalp) 321 { 322 struct devctl_iocdata *dcp; 323 uint_t bus_state; 324 int rv = DDI_SUCCESS; 325 326 /* 327 * We can use the generic implementation for devctl ioctl 328 */ 329 switch (cmd) { 330 case DEVCTL_DEVICE_GETSTATE: 331 case DEVCTL_DEVICE_ONLINE: 332 case DEVCTL_DEVICE_OFFLINE: 333 case DEVCTL_BUS_GETSTATE: 334 return (ndi_devctl_ioctl(dip, cmd, arg, mode, 0)); 335 default: 336 break; 337 } 338 339 /* 340 * read devctl ioctl data 341 */ 342 if (ndi_dc_allochdl((void *)arg, &dcp) != NDI_SUCCESS) 343 return (EFAULT); 344 345 switch (cmd) { 346 case DEVCTL_BUS_QUIESCE: 347 if (ndi_get_bus_state(dip, &bus_state) == NDI_SUCCESS) 348 if (bus_state == BUS_QUIESCED) 349 break; 350 (void) ndi_set_bus_state(dip, BUS_QUIESCED); 351 break; 352 case DEVCTL_BUS_UNQUIESCE: 353 if (ndi_get_bus_state(dip, &bus_state) == NDI_SUCCESS) 354 if (bus_state == BUS_ACTIVE) 355 break; 356 (void) ndi_set_bus_state(dip, BUS_ACTIVE); 357 break; 358 case DEVCTL_BUS_RESET: 359 case DEVCTL_BUS_RESETALL: 360 case DEVCTL_DEVICE_RESET: 361 rv = ENOTSUP; 362 break; 363 default: 364 rv = ENOTTY; 365 } 366 367 ndi_dc_freehdl(dcp); 368 return (rv); 369 } 370 371 /* ARGSUSED */ 372 int 373 pcie_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 374 int flags, char *name, caddr_t valuep, int *lengthp) 375 { 376 if (dev == DDI_DEV_T_ANY) 377 goto skip; 378 379 if (PCIE_IS_HOTPLUG_CAPABLE(dip) && 380 strcmp(name, "pci-occupant") == 0) { 381 int pci_dev = PCI_MINOR_NUM_TO_PCI_DEVNUM(getminor(dev)); 382 383 pcie_hp_create_occupant_props(dip, dev, pci_dev); 384 } 385 386 skip: 387 return (ddi_prop_op(dev, dip, prop_op, flags, name, valuep, lengthp)); 388 } 389 390 int 391 pcie_init_cfghdl(dev_info_t *cdip) 392 { 393 pcie_bus_t *bus_p; 394 ddi_acc_handle_t eh = NULL; 395 396 bus_p = PCIE_DIP2BUS(cdip); 397 if (bus_p == NULL) 398 return (DDI_FAILURE); 399 400 /* Create an config access special to error handling */ 401 if (pci_config_setup(cdip, &eh) != DDI_SUCCESS) { 402 cmn_err(CE_WARN, "Cannot setup config access" 403 " for BDF 0x%x\n", bus_p->bus_bdf); 404 return (DDI_FAILURE); 405 } 406 407 bus_p->bus_cfg_hdl = eh; 408 return (DDI_SUCCESS); 409 } 410 411 void 412 pcie_fini_cfghdl(dev_info_t *cdip) 413 { 414 pcie_bus_t *bus_p = PCIE_DIP2BUS(cdip); 415 416 pci_config_teardown(&bus_p->bus_cfg_hdl); 417 } 418 419 /* 420 * PCI-Express child device initialization. 421 * This function enables generic pci-express interrupts and error 422 * handling. 423 * 424 * @param pdip root dip (root nexus's dip) 425 * @param cdip child's dip (device's dip) 426 * @return DDI_SUCCESS or DDI_FAILURE 427 */ 428 /* ARGSUSED */ 429 int 430 pcie_initchild(dev_info_t *cdip) 431 { 432 uint16_t tmp16, reg16; 433 pcie_bus_t *bus_p; 434 435 bus_p = PCIE_DIP2BUS(cdip); 436 if (bus_p == NULL) { 437 PCIE_DBG("%s: BUS not found.\n", 438 ddi_driver_name(cdip)); 439 440 return (DDI_FAILURE); 441 } 442 443 if (pcie_init_cfghdl(cdip) != DDI_SUCCESS) 444 return (DDI_FAILURE); 445 446 /* Clear the device's status register */ 447 reg16 = PCIE_GET(16, bus_p, PCI_CONF_STAT); 448 PCIE_PUT(16, bus_p, PCI_CONF_STAT, reg16); 449 450 /* Setup the device's command register */ 451 reg16 = PCIE_GET(16, bus_p, PCI_CONF_COMM); 452 tmp16 = (reg16 & pcie_command_default_fw) | pcie_command_default; 453 454 #if defined(__i386) || defined(__amd64) 455 boolean_t empty_io_range = B_FALSE; 456 boolean_t empty_mem_range = B_FALSE; 457 /* 458 * Check for empty IO and Mem ranges on bridges. If so disable IO/Mem 459 * access as it can cause a hang if enabled. 460 */ 461 pcie_check_io_mem_range(bus_p->bus_cfg_hdl, &empty_io_range, 462 &empty_mem_range); 463 if ((empty_io_range == B_TRUE) && 464 (pcie_command_default & PCI_COMM_IO)) { 465 tmp16 &= ~PCI_COMM_IO; 466 PCIE_DBG("No I/O range found for %s, bdf 0x%x\n", 467 ddi_driver_name(cdip), bus_p->bus_bdf); 468 } 469 if ((empty_mem_range == B_TRUE) && 470 (pcie_command_default & PCI_COMM_MAE)) { 471 tmp16 &= ~PCI_COMM_MAE; 472 PCIE_DBG("No Mem range found for %s, bdf 0x%x\n", 473 ddi_driver_name(cdip), bus_p->bus_bdf); 474 } 475 #endif /* defined(__i386) || defined(__amd64) */ 476 477 if (pcie_serr_disable_flag && PCIE_IS_PCIE(bus_p)) 478 tmp16 &= ~PCI_COMM_SERR_ENABLE; 479 480 PCIE_PUT(16, bus_p, PCI_CONF_COMM, tmp16); 481 PCIE_DBG_CFG(cdip, bus_p, "COMMAND", 16, PCI_CONF_COMM, reg16); 482 483 /* 484 * If the device has a bus control register then program it 485 * based on the settings in the command register. 486 */ 487 if (PCIE_IS_BDG(bus_p)) { 488 /* Clear the device's secondary status register */ 489 reg16 = PCIE_GET(16, bus_p, PCI_BCNF_SEC_STATUS); 490 PCIE_PUT(16, bus_p, PCI_BCNF_SEC_STATUS, reg16); 491 492 /* Setup the device's secondary command register */ 493 reg16 = PCIE_GET(16, bus_p, PCI_BCNF_BCNTRL); 494 tmp16 = (reg16 & pcie_bdg_command_default_fw); 495 496 tmp16 |= PCI_BCNF_BCNTRL_SERR_ENABLE; 497 /* 498 * Workaround for this Nvidia bridge. Don't enable the SERR 499 * enable bit in the bridge control register as it could lead to 500 * bogus NMIs. 501 */ 502 if (bus_p->bus_dev_ven_id == 0x037010DE) 503 tmp16 &= ~PCI_BCNF_BCNTRL_SERR_ENABLE; 504 505 if (pcie_command_default & PCI_COMM_PARITY_DETECT) 506 tmp16 |= PCI_BCNF_BCNTRL_PARITY_ENABLE; 507 508 /* 509 * Enable Master Abort Mode only if URs have not been masked. 510 * For PCI and PCIe-PCI bridges, enabling this bit causes a 511 * Master Aborts/UR to be forwarded as a UR/TA or SERR. If this 512 * bit is masked, posted requests are dropped and non-posted 513 * requests are returned with -1. 514 */ 515 if (pcie_aer_uce_mask & PCIE_AER_UCE_UR) 516 tmp16 &= ~PCI_BCNF_BCNTRL_MAST_AB_MODE; 517 else 518 tmp16 |= PCI_BCNF_BCNTRL_MAST_AB_MODE; 519 PCIE_PUT(16, bus_p, PCI_BCNF_BCNTRL, tmp16); 520 PCIE_DBG_CFG(cdip, bus_p, "SEC CMD", 16, PCI_BCNF_BCNTRL, 521 reg16); 522 } 523 524 if (PCIE_IS_PCIE(bus_p)) { 525 /* Setup PCIe device control register */ 526 reg16 = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL); 527 tmp16 = pcie_devctl_default; 528 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, tmp16); 529 PCIE_DBG_CAP(cdip, bus_p, "DEVCTL", 16, PCIE_DEVCTL, reg16); 530 531 /* Enable PCIe errors */ 532 pcie_enable_errors(cdip); 533 } 534 535 bus_p->bus_ari = B_FALSE; 536 if ((pcie_ari_is_enabled(ddi_get_parent(cdip)) 537 == PCIE_ARI_FORW_ENABLED) && (pcie_ari_device(cdip) 538 == PCIE_ARI_DEVICE)) { 539 bus_p->bus_ari = B_TRUE; 540 } 541 542 if (pcie_initchild_mps(cdip) == DDI_FAILURE) { 543 pcie_fini_cfghdl(cdip); 544 return (DDI_FAILURE); 545 } 546 547 return (DDI_SUCCESS); 548 } 549 550 #define PCIE_ZALLOC(data) kmem_zalloc(sizeof (data), KM_SLEEP) 551 static void 552 pcie_init_pfd(dev_info_t *dip) 553 { 554 pf_data_t *pfd_p = PCIE_ZALLOC(pf_data_t); 555 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 556 557 PCIE_DIP2PFD(dip) = pfd_p; 558 559 pfd_p->pe_bus_p = bus_p; 560 pfd_p->pe_severity_flags = 0; 561 pfd_p->pe_lock = B_FALSE; 562 pfd_p->pe_valid = B_FALSE; 563 564 /* Allocate the root fault struct for both RC and RP */ 565 if (PCIE_IS_ROOT(bus_p)) { 566 PCIE_ROOT_FAULT(pfd_p) = PCIE_ZALLOC(pf_root_fault_t); 567 PCIE_ROOT_FAULT(pfd_p)->scan_bdf = PCIE_INVALID_BDF; 568 } 569 570 PCI_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pci_err_regs_t); 571 572 if (PCIE_IS_BDG(bus_p)) 573 PCI_BDG_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pci_bdg_err_regs_t); 574 575 if (PCIE_IS_PCIE(bus_p)) { 576 PCIE_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_err_regs_t); 577 578 if (PCIE_IS_RP(bus_p)) 579 PCIE_RP_REG(pfd_p) = 580 PCIE_ZALLOC(pf_pcie_rp_err_regs_t); 581 582 PCIE_ADV_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_adv_err_regs_t); 583 PCIE_ADV_REG(pfd_p)->pcie_ue_tgt_bdf = PCIE_INVALID_BDF; 584 585 if (PCIE_IS_RP(bus_p)) { 586 PCIE_ADV_RP_REG(pfd_p) = 587 PCIE_ZALLOC(pf_pcie_adv_rp_err_regs_t); 588 PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ce_src_id = 589 PCIE_INVALID_BDF; 590 PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ue_src_id = 591 PCIE_INVALID_BDF; 592 } else if (PCIE_IS_PCIE_BDG(bus_p)) { 593 PCIE_ADV_BDG_REG(pfd_p) = 594 PCIE_ZALLOC(pf_pcie_adv_bdg_err_regs_t); 595 PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_bdf = 596 PCIE_INVALID_BDF; 597 } 598 599 if (PCIE_IS_PCIE_BDG(bus_p) && PCIE_IS_PCIX(bus_p)) { 600 PCIX_BDG_ERR_REG(pfd_p) = 601 PCIE_ZALLOC(pf_pcix_bdg_err_regs_t); 602 603 if (PCIX_ECC_VERSION_CHECK(bus_p)) { 604 PCIX_BDG_ECC_REG(pfd_p, 0) = 605 PCIE_ZALLOC(pf_pcix_ecc_regs_t); 606 PCIX_BDG_ECC_REG(pfd_p, 1) = 607 PCIE_ZALLOC(pf_pcix_ecc_regs_t); 608 } 609 } 610 } else if (PCIE_IS_PCIX(bus_p)) { 611 if (PCIE_IS_BDG(bus_p)) { 612 PCIX_BDG_ERR_REG(pfd_p) = 613 PCIE_ZALLOC(pf_pcix_bdg_err_regs_t); 614 615 if (PCIX_ECC_VERSION_CHECK(bus_p)) { 616 PCIX_BDG_ECC_REG(pfd_p, 0) = 617 PCIE_ZALLOC(pf_pcix_ecc_regs_t); 618 PCIX_BDG_ECC_REG(pfd_p, 1) = 619 PCIE_ZALLOC(pf_pcix_ecc_regs_t); 620 } 621 } else { 622 PCIX_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pcix_err_regs_t); 623 624 if (PCIX_ECC_VERSION_CHECK(bus_p)) 625 PCIX_ECC_REG(pfd_p) = 626 PCIE_ZALLOC(pf_pcix_ecc_regs_t); 627 } 628 } 629 } 630 631 static void 632 pcie_fini_pfd(dev_info_t *dip) 633 { 634 pf_data_t *pfd_p = PCIE_DIP2PFD(dip); 635 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 636 637 if (PCIE_IS_PCIE(bus_p)) { 638 if (PCIE_IS_PCIE_BDG(bus_p) && PCIE_IS_PCIX(bus_p)) { 639 if (PCIX_ECC_VERSION_CHECK(bus_p)) { 640 kmem_free(PCIX_BDG_ECC_REG(pfd_p, 0), 641 sizeof (pf_pcix_ecc_regs_t)); 642 kmem_free(PCIX_BDG_ECC_REG(pfd_p, 1), 643 sizeof (pf_pcix_ecc_regs_t)); 644 } 645 646 kmem_free(PCIX_BDG_ERR_REG(pfd_p), 647 sizeof (pf_pcix_bdg_err_regs_t)); 648 } 649 650 if (PCIE_IS_RP(bus_p)) 651 kmem_free(PCIE_ADV_RP_REG(pfd_p), 652 sizeof (pf_pcie_adv_rp_err_regs_t)); 653 else if (PCIE_IS_PCIE_BDG(bus_p)) 654 kmem_free(PCIE_ADV_BDG_REG(pfd_p), 655 sizeof (pf_pcie_adv_bdg_err_regs_t)); 656 657 kmem_free(PCIE_ADV_REG(pfd_p), 658 sizeof (pf_pcie_adv_err_regs_t)); 659 660 if (PCIE_IS_RP(bus_p)) 661 kmem_free(PCIE_RP_REG(pfd_p), 662 sizeof (pf_pcie_rp_err_regs_t)); 663 664 kmem_free(PCIE_ERR_REG(pfd_p), sizeof (pf_pcie_err_regs_t)); 665 } else if (PCIE_IS_PCIX(bus_p)) { 666 if (PCIE_IS_BDG(bus_p)) { 667 if (PCIX_ECC_VERSION_CHECK(bus_p)) { 668 kmem_free(PCIX_BDG_ECC_REG(pfd_p, 0), 669 sizeof (pf_pcix_ecc_regs_t)); 670 kmem_free(PCIX_BDG_ECC_REG(pfd_p, 1), 671 sizeof (pf_pcix_ecc_regs_t)); 672 } 673 674 kmem_free(PCIX_BDG_ERR_REG(pfd_p), 675 sizeof (pf_pcix_bdg_err_regs_t)); 676 } else { 677 if (PCIX_ECC_VERSION_CHECK(bus_p)) 678 kmem_free(PCIX_ECC_REG(pfd_p), 679 sizeof (pf_pcix_ecc_regs_t)); 680 681 kmem_free(PCIX_ERR_REG(pfd_p), 682 sizeof (pf_pcix_err_regs_t)); 683 } 684 } 685 686 if (PCIE_IS_BDG(bus_p)) 687 kmem_free(PCI_BDG_ERR_REG(pfd_p), 688 sizeof (pf_pci_bdg_err_regs_t)); 689 690 kmem_free(PCI_ERR_REG(pfd_p), sizeof (pf_pci_err_regs_t)); 691 692 if (PCIE_IS_ROOT(bus_p)) 693 kmem_free(PCIE_ROOT_FAULT(pfd_p), sizeof (pf_root_fault_t)); 694 695 kmem_free(PCIE_DIP2PFD(dip), sizeof (pf_data_t)); 696 697 PCIE_DIP2PFD(dip) = NULL; 698 } 699 700 701 /* 702 * Special functions to allocate pf_data_t's for PCIe root complexes. 703 * Note: Root Complex not Root Port 704 */ 705 void 706 pcie_rc_init_pfd(dev_info_t *dip, pf_data_t *pfd_p) 707 { 708 pfd_p->pe_bus_p = PCIE_DIP2DOWNBUS(dip); 709 pfd_p->pe_severity_flags = 0; 710 pfd_p->pe_lock = B_FALSE; 711 pfd_p->pe_valid = B_FALSE; 712 713 PCIE_ROOT_FAULT(pfd_p) = PCIE_ZALLOC(pf_root_fault_t); 714 PCIE_ROOT_FAULT(pfd_p)->scan_bdf = PCIE_INVALID_BDF; 715 PCI_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pci_err_regs_t); 716 PCI_BDG_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pci_bdg_err_regs_t); 717 PCIE_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_err_regs_t); 718 PCIE_RP_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_rp_err_regs_t); 719 PCIE_ADV_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_adv_err_regs_t); 720 PCIE_ADV_RP_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_adv_rp_err_regs_t); 721 722 PCIE_ADV_REG(pfd_p)->pcie_ue_sev = pcie_aer_uce_severity; 723 } 724 725 void 726 pcie_rc_fini_pfd(pf_data_t *pfd_p) 727 { 728 kmem_free(PCIE_ADV_RP_REG(pfd_p), sizeof (pf_pcie_adv_rp_err_regs_t)); 729 kmem_free(PCIE_ADV_REG(pfd_p), sizeof (pf_pcie_adv_err_regs_t)); 730 kmem_free(PCIE_RP_REG(pfd_p), sizeof (pf_pcie_rp_err_regs_t)); 731 kmem_free(PCIE_ERR_REG(pfd_p), sizeof (pf_pcie_err_regs_t)); 732 kmem_free(PCI_BDG_ERR_REG(pfd_p), sizeof (pf_pci_bdg_err_regs_t)); 733 kmem_free(PCI_ERR_REG(pfd_p), sizeof (pf_pci_err_regs_t)); 734 kmem_free(PCIE_ROOT_FAULT(pfd_p), sizeof (pf_root_fault_t)); 735 } 736 737 /* 738 * init pcie_bus_t for root complex 739 * 740 * Only a few of the fields in bus_t is valid for root complex. 741 * The fields that are bracketed are initialized in this routine: 742 * 743 * dev_info_t * <bus_dip> 744 * dev_info_t * bus_rp_dip 745 * ddi_acc_handle_t bus_cfg_hdl 746 * uint_t <bus_fm_flags> 747 * pcie_req_id_t bus_bdf 748 * pcie_req_id_t bus_rp_bdf 749 * uint32_t bus_dev_ven_id 750 * uint8_t bus_rev_id 751 * uint8_t <bus_hdr_type> 752 * uint16_t <bus_dev_type> 753 * uint8_t bus_bdg_secbus 754 * uint16_t bus_pcie_off 755 * uint16_t <bus_aer_off> 756 * uint16_t bus_pcix_off 757 * uint16_t bus_ecc_ver 758 * pci_bus_range_t bus_bus_range 759 * ppb_ranges_t * bus_addr_ranges 760 * int bus_addr_entries 761 * pci_regspec_t * bus_assigned_addr 762 * int bus_assigned_entries 763 * pf_data_t * bus_pfd 764 * int bus_mps 765 * uint64_t bus_cfgacc_base 766 * void * bus_plat_private 767 */ 768 void 769 pcie_rc_init_bus(dev_info_t *dip) 770 { 771 pcie_bus_t *bus_p; 772 773 bus_p = (pcie_bus_t *)kmem_zalloc(sizeof (pcie_bus_t), KM_SLEEP); 774 bus_p->bus_dip = dip; 775 bus_p->bus_dev_type = PCIE_PCIECAP_DEV_TYPE_RC_PSEUDO; 776 bus_p->bus_hdr_type = PCI_HEADER_ONE; 777 778 /* Fake that there are AER logs */ 779 bus_p->bus_aer_off = (uint16_t)-1; 780 781 /* Needed only for handle lookup */ 782 bus_p->bus_fm_flags |= PF_FM_READY; 783 784 ndi_set_bus_private(dip, B_FALSE, DEVI_PORT_TYPE_PCI, bus_p); 785 } 786 787 void 788 pcie_rc_fini_bus(dev_info_t *dip) 789 { 790 pcie_bus_t *bus_p = PCIE_DIP2DOWNBUS(dip); 791 ndi_set_bus_private(dip, B_FALSE, NULL, NULL); 792 kmem_free(bus_p, sizeof (pcie_bus_t)); 793 } 794 795 /* 796 * partially init pcie_bus_t for device (dip,bdf) for accessing pci 797 * config space 798 * 799 * This routine is invoked during boot, either after creating a devinfo node 800 * (x86 case) or during px driver attach (sparc case); it is also invoked 801 * in hotplug context after a devinfo node is created. 802 * 803 * The fields that are bracketed are initialized if flag PCIE_BUS_INITIAL 804 * is set: 805 * 806 * dev_info_t * <bus_dip> 807 * dev_info_t * <bus_rp_dip> 808 * ddi_acc_handle_t bus_cfg_hdl 809 * uint_t bus_fm_flags 810 * pcie_req_id_t <bus_bdf> 811 * pcie_req_id_t <bus_rp_bdf> 812 * uint32_t <bus_dev_ven_id> 813 * uint8_t <bus_rev_id> 814 * uint8_t <bus_hdr_type> 815 * uint16_t <bus_dev_type> 816 * uint8_t <bus_bdg_secbus 817 * uint16_t <bus_pcie_off> 818 * uint16_t <bus_aer_off> 819 * uint16_t <bus_pcix_off> 820 * uint16_t <bus_ecc_ver> 821 * pci_bus_range_t bus_bus_range 822 * ppb_ranges_t * bus_addr_ranges 823 * int bus_addr_entries 824 * pci_regspec_t * bus_assigned_addr 825 * int bus_assigned_entries 826 * pf_data_t * bus_pfd 827 * int bus_mps 828 * uint64_t bus_cfgacc_base 829 * void * bus_plat_private 830 * 831 * The fields that are bracketed are initialized if flag PCIE_BUS_FINAL 832 * is set: 833 * 834 * dev_info_t * bus_dip 835 * dev_info_t * bus_rp_dip 836 * ddi_acc_handle_t bus_cfg_hdl 837 * uint_t bus_fm_flags 838 * pcie_req_id_t bus_bdf 839 * pcie_req_id_t bus_rp_bdf 840 * uint32_t bus_dev_ven_id 841 * uint8_t bus_rev_id 842 * uint8_t bus_hdr_type 843 * uint16_t bus_dev_type 844 * uint8_t <bus_bdg_secbus> 845 * uint16_t bus_pcie_off 846 * uint16_t bus_aer_off 847 * uint16_t bus_pcix_off 848 * uint16_t bus_ecc_ver 849 * pci_bus_range_t <bus_bus_range> 850 * ppb_ranges_t * <bus_addr_ranges> 851 * int <bus_addr_entries> 852 * pci_regspec_t * <bus_assigned_addr> 853 * int <bus_assigned_entries> 854 * pf_data_t * <bus_pfd> 855 * int bus_mps 856 * uint64_t bus_cfgacc_base 857 * void * <bus_plat_private> 858 */ 859 860 pcie_bus_t * 861 pcie_init_bus(dev_info_t *dip, pcie_req_id_t bdf, uint8_t flags) 862 { 863 uint16_t status, base, baseptr, num_cap; 864 uint32_t capid; 865 int range_size; 866 pcie_bus_t *bus_p; 867 dev_info_t *rcdip; 868 dev_info_t *pdip; 869 const char *errstr = NULL; 870 871 if (!(flags & PCIE_BUS_INITIAL)) 872 goto initial_done; 873 874 bus_p = kmem_zalloc(sizeof (pcie_bus_t), KM_SLEEP); 875 876 bus_p->bus_dip = dip; 877 bus_p->bus_bdf = bdf; 878 879 rcdip = pcie_get_rc_dip(dip); 880 ASSERT(rcdip != NULL); 881 882 /* Save the Vendor ID, Device ID and revision ID */ 883 bus_p->bus_dev_ven_id = pci_cfgacc_get32(rcdip, bdf, PCI_CONF_VENID); 884 bus_p->bus_rev_id = pci_cfgacc_get8(rcdip, bdf, PCI_CONF_REVID); 885 /* Save the Header Type */ 886 bus_p->bus_hdr_type = pci_cfgacc_get8(rcdip, bdf, PCI_CONF_HEADER); 887 bus_p->bus_hdr_type &= PCI_HEADER_TYPE_M; 888 889 /* 890 * Figure out the device type and all the relavant capability offsets 891 */ 892 /* set default value */ 893 bus_p->bus_dev_type = PCIE_PCIECAP_DEV_TYPE_PCI_PSEUDO; 894 895 status = pci_cfgacc_get16(rcdip, bdf, PCI_CONF_STAT); 896 if (status == PCI_CAP_EINVAL16 || !(status & PCI_STAT_CAP)) 897 goto caps_done; /* capability not supported */ 898 899 /* Relevant conventional capabilities first */ 900 901 /* Conventional caps: PCI_CAP_ID_PCI_E, PCI_CAP_ID_PCIX */ 902 num_cap = 2; 903 904 switch (bus_p->bus_hdr_type) { 905 case PCI_HEADER_ZERO: 906 baseptr = PCI_CONF_CAP_PTR; 907 break; 908 case PCI_HEADER_PPB: 909 baseptr = PCI_BCNF_CAP_PTR; 910 break; 911 case PCI_HEADER_CARDBUS: 912 baseptr = PCI_CBUS_CAP_PTR; 913 break; 914 default: 915 cmn_err(CE_WARN, "%s: unexpected pci header type:%x", 916 __func__, bus_p->bus_hdr_type); 917 goto caps_done; 918 } 919 920 base = baseptr; 921 for (base = pci_cfgacc_get8(rcdip, bdf, base); base && num_cap; 922 base = pci_cfgacc_get8(rcdip, bdf, base + PCI_CAP_NEXT_PTR)) { 923 capid = pci_cfgacc_get8(rcdip, bdf, base); 924 switch (capid) { 925 case PCI_CAP_ID_PCI_E: 926 bus_p->bus_pcie_off = base; 927 bus_p->bus_dev_type = pci_cfgacc_get16(rcdip, bdf, 928 base + PCIE_PCIECAP) & PCIE_PCIECAP_DEV_TYPE_MASK; 929 930 /* Check and save PCIe hotplug capability information */ 931 if ((PCIE_IS_RP(bus_p) || PCIE_IS_SWD(bus_p)) && 932 (pci_cfgacc_get16(rcdip, bdf, base + PCIE_PCIECAP) 933 & PCIE_PCIECAP_SLOT_IMPL) && 934 (pci_cfgacc_get32(rcdip, bdf, base + PCIE_SLOTCAP) 935 & PCIE_SLOTCAP_HP_CAPABLE)) 936 bus_p->bus_hp_sup_modes |= PCIE_NATIVE_HP_MODE; 937 938 num_cap--; 939 break; 940 case PCI_CAP_ID_PCIX: 941 bus_p->bus_pcix_off = base; 942 if (PCIE_IS_BDG(bus_p)) 943 bus_p->bus_ecc_ver = 944 pci_cfgacc_get16(rcdip, bdf, base + 945 PCI_PCIX_SEC_STATUS) & PCI_PCIX_VER_MASK; 946 else 947 bus_p->bus_ecc_ver = 948 pci_cfgacc_get16(rcdip, bdf, base + 949 PCI_PCIX_COMMAND) & PCI_PCIX_VER_MASK; 950 num_cap--; 951 break; 952 default: 953 break; 954 } 955 } 956 957 /* Check and save PCI hotplug (SHPC) capability information */ 958 if (PCIE_IS_BDG(bus_p)) { 959 base = baseptr; 960 for (base = pci_cfgacc_get8(rcdip, bdf, base); 961 base; base = pci_cfgacc_get8(rcdip, bdf, 962 base + PCI_CAP_NEXT_PTR)) { 963 capid = pci_cfgacc_get8(rcdip, bdf, base); 964 if (capid == PCI_CAP_ID_PCI_HOTPLUG) { 965 bus_p->bus_pci_hp_off = base; 966 bus_p->bus_hp_sup_modes |= PCIE_PCI_HP_MODE; 967 break; 968 } 969 } 970 } 971 972 /* Then, relevant extended capabilities */ 973 974 if (!PCIE_IS_PCIE(bus_p)) 975 goto caps_done; 976 977 /* Extended caps: PCIE_EXT_CAP_ID_AER */ 978 for (base = PCIE_EXT_CAP; base; base = (capid >> 979 PCIE_EXT_CAP_NEXT_PTR_SHIFT) & PCIE_EXT_CAP_NEXT_PTR_MASK) { 980 capid = pci_cfgacc_get32(rcdip, bdf, base); 981 if (capid == PCI_CAP_EINVAL32) 982 break; 983 if (((capid >> PCIE_EXT_CAP_ID_SHIFT) & PCIE_EXT_CAP_ID_MASK) 984 == PCIE_EXT_CAP_ID_AER) { 985 bus_p->bus_aer_off = base; 986 break; 987 } 988 } 989 990 caps_done: 991 /* save RP dip and RP bdf */ 992 if (PCIE_IS_RP(bus_p)) { 993 bus_p->bus_rp_dip = dip; 994 bus_p->bus_rp_bdf = bus_p->bus_bdf; 995 } else { 996 for (pdip = ddi_get_parent(dip); pdip; 997 pdip = ddi_get_parent(pdip)) { 998 pcie_bus_t *parent_bus_p = PCIE_DIP2BUS(pdip); 999 1000 /* 1001 * If RP dip and RP bdf in parent's bus_t have 1002 * been initialized, simply use these instead of 1003 * continuing up to the RC. 1004 */ 1005 if (parent_bus_p->bus_rp_dip != NULL) { 1006 bus_p->bus_rp_dip = parent_bus_p->bus_rp_dip; 1007 bus_p->bus_rp_bdf = parent_bus_p->bus_rp_bdf; 1008 break; 1009 } 1010 1011 /* 1012 * When debugging be aware that some NVIDIA x86 1013 * architectures have 2 nodes for each RP, One at Bus 1014 * 0x0 and one at Bus 0x80. The requester is from Bus 1015 * 0x80 1016 */ 1017 if (PCIE_IS_ROOT(parent_bus_p)) { 1018 bus_p->bus_rp_dip = pdip; 1019 bus_p->bus_rp_bdf = parent_bus_p->bus_bdf; 1020 break; 1021 } 1022 } 1023 } 1024 1025 bus_p->bus_soft_state = PCI_SOFT_STATE_CLOSED; 1026 bus_p->bus_fm_flags = 0; 1027 bus_p->bus_mps = 0; 1028 1029 ndi_set_bus_private(dip, B_TRUE, DEVI_PORT_TYPE_PCI, (void *)bus_p); 1030 1031 if (PCIE_IS_HOTPLUG_CAPABLE(dip)) 1032 (void) ndi_prop_create_boolean(DDI_DEV_T_NONE, dip, 1033 "hotplug-capable"); 1034 1035 initial_done: 1036 if (!(flags & PCIE_BUS_FINAL)) 1037 goto final_done; 1038 1039 /* already initialized? */ 1040 bus_p = PCIE_DIP2BUS(dip); 1041 1042 /* Save the Range information if device is a switch/bridge */ 1043 if (PCIE_IS_BDG(bus_p)) { 1044 /* get "bus_range" property */ 1045 range_size = sizeof (pci_bus_range_t); 1046 if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1047 "bus-range", (caddr_t)&bus_p->bus_bus_range, &range_size) 1048 != DDI_PROP_SUCCESS) { 1049 errstr = "Cannot find \"bus-range\" property"; 1050 cmn_err(CE_WARN, 1051 "PCIE init err info failed BDF 0x%x:%s\n", 1052 bus_p->bus_bdf, errstr); 1053 } 1054 1055 /* get secondary bus number */ 1056 rcdip = pcie_get_rc_dip(dip); 1057 ASSERT(rcdip != NULL); 1058 1059 bus_p->bus_bdg_secbus = pci_cfgacc_get8(rcdip, 1060 bus_p->bus_bdf, PCI_BCNF_SECBUS); 1061 1062 /* Get "ranges" property */ 1063 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1064 "ranges", (caddr_t)&bus_p->bus_addr_ranges, 1065 &bus_p->bus_addr_entries) != DDI_PROP_SUCCESS) 1066 bus_p->bus_addr_entries = 0; 1067 bus_p->bus_addr_entries /= sizeof (ppb_ranges_t); 1068 } 1069 1070 /* save "assigned-addresses" property array, ignore failues */ 1071 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1072 "assigned-addresses", (caddr_t)&bus_p->bus_assigned_addr, 1073 &bus_p->bus_assigned_entries) == DDI_PROP_SUCCESS) 1074 bus_p->bus_assigned_entries /= sizeof (pci_regspec_t); 1075 else 1076 bus_p->bus_assigned_entries = 0; 1077 1078 pcie_init_pfd(dip); 1079 1080 pcie_init_plat(dip); 1081 1082 final_done: 1083 1084 PCIE_DBG("Add %s(dip 0x%p, bdf 0x%x, secbus 0x%x)\n", 1085 ddi_driver_name(dip), (void *)dip, bus_p->bus_bdf, 1086 bus_p->bus_bdg_secbus); 1087 #ifdef DEBUG 1088 pcie_print_bus(bus_p); 1089 #endif 1090 1091 return (bus_p); 1092 } 1093 1094 /* 1095 * Invoked before destroying devinfo node, mostly during hotplug 1096 * operation to free pcie_bus_t data structure 1097 */ 1098 /* ARGSUSED */ 1099 void 1100 pcie_fini_bus(dev_info_t *dip, uint8_t flags) 1101 { 1102 pcie_bus_t *bus_p = PCIE_DIP2UPBUS(dip); 1103 ASSERT(bus_p); 1104 1105 if (flags & PCIE_BUS_INITIAL) { 1106 pcie_fini_plat(dip); 1107 pcie_fini_pfd(dip); 1108 1109 kmem_free(bus_p->bus_assigned_addr, 1110 (sizeof (pci_regspec_t) * bus_p->bus_assigned_entries)); 1111 kmem_free(bus_p->bus_addr_ranges, 1112 (sizeof (ppb_ranges_t) * bus_p->bus_addr_entries)); 1113 /* zero out the fields that have been destroyed */ 1114 bus_p->bus_assigned_addr = NULL; 1115 bus_p->bus_addr_ranges = NULL; 1116 bus_p->bus_assigned_entries = 0; 1117 bus_p->bus_addr_entries = 0; 1118 } 1119 1120 if (flags & PCIE_BUS_FINAL) { 1121 if (PCIE_IS_HOTPLUG_CAPABLE(dip)) { 1122 (void) ndi_prop_remove(DDI_DEV_T_NONE, dip, 1123 "hotplug-capable"); 1124 } 1125 1126 ndi_set_bus_private(dip, B_TRUE, NULL, NULL); 1127 kmem_free(bus_p, sizeof (pcie_bus_t)); 1128 } 1129 } 1130 1131 int 1132 pcie_postattach_child(dev_info_t *cdip) 1133 { 1134 pcie_bus_t *bus_p = PCIE_DIP2BUS(cdip); 1135 1136 if (!bus_p) 1137 return (DDI_FAILURE); 1138 1139 return (pcie_enable_ce(cdip)); 1140 } 1141 1142 /* 1143 * PCI-Express child device de-initialization. 1144 * This function disables generic pci-express interrupts and error 1145 * handling. 1146 */ 1147 void 1148 pcie_uninitchild(dev_info_t *cdip) 1149 { 1150 pcie_disable_errors(cdip); 1151 pcie_fini_cfghdl(cdip); 1152 } 1153 1154 /* 1155 * find the root complex dip 1156 */ 1157 dev_info_t * 1158 pcie_get_rc_dip(dev_info_t *dip) 1159 { 1160 dev_info_t *rcdip; 1161 pcie_bus_t *rc_bus_p; 1162 1163 for (rcdip = ddi_get_parent(dip); rcdip; 1164 rcdip = ddi_get_parent(rcdip)) { 1165 rc_bus_p = PCIE_DIP2BUS(rcdip); 1166 if (rc_bus_p && PCIE_IS_RC(rc_bus_p)) 1167 break; 1168 } 1169 1170 return (rcdip); 1171 } 1172 1173 static boolean_t 1174 pcie_is_pci_device(dev_info_t *dip) 1175 { 1176 dev_info_t *pdip; 1177 char *device_type; 1178 1179 pdip = ddi_get_parent(dip); 1180 ASSERT(pdip); 1181 1182 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip, DDI_PROP_DONTPASS, 1183 "device_type", &device_type) != DDI_PROP_SUCCESS) 1184 return (B_FALSE); 1185 1186 if (strcmp(device_type, "pciex") != 0 && 1187 strcmp(device_type, "pci") != 0) { 1188 ddi_prop_free(device_type); 1189 return (B_FALSE); 1190 } 1191 1192 ddi_prop_free(device_type); 1193 return (B_TRUE); 1194 } 1195 1196 typedef struct { 1197 boolean_t init; 1198 uint8_t flags; 1199 } pcie_bus_arg_t; 1200 1201 /*ARGSUSED*/ 1202 static int 1203 pcie_fab_do_init_fini(dev_info_t *dip, void *arg) 1204 { 1205 pcie_req_id_t bdf; 1206 pcie_bus_arg_t *bus_arg = (pcie_bus_arg_t *)arg; 1207 1208 if (!pcie_is_pci_device(dip)) 1209 goto out; 1210 1211 if (bus_arg->init) { 1212 if (pcie_get_bdf_from_dip(dip, &bdf) != DDI_SUCCESS) 1213 goto out; 1214 1215 (void) pcie_init_bus(dip, bdf, bus_arg->flags); 1216 } else { 1217 (void) pcie_fini_bus(dip, bus_arg->flags); 1218 } 1219 1220 return (DDI_WALK_CONTINUE); 1221 1222 out: 1223 return (DDI_WALK_PRUNECHILD); 1224 } 1225 1226 void 1227 pcie_fab_init_bus(dev_info_t *rcdip, uint8_t flags) 1228 { 1229 int circular_count; 1230 dev_info_t *dip = ddi_get_child(rcdip); 1231 pcie_bus_arg_t arg; 1232 1233 arg.init = B_TRUE; 1234 arg.flags = flags; 1235 1236 ndi_devi_enter(rcdip, &circular_count); 1237 ddi_walk_devs(dip, pcie_fab_do_init_fini, &arg); 1238 ndi_devi_exit(rcdip, circular_count); 1239 } 1240 1241 void 1242 pcie_fab_fini_bus(dev_info_t *rcdip, uint8_t flags) 1243 { 1244 int circular_count; 1245 dev_info_t *dip = ddi_get_child(rcdip); 1246 pcie_bus_arg_t arg; 1247 1248 arg.init = B_FALSE; 1249 arg.flags = flags; 1250 1251 ndi_devi_enter(rcdip, &circular_count); 1252 ddi_walk_devs(dip, pcie_fab_do_init_fini, &arg); 1253 ndi_devi_exit(rcdip, circular_count); 1254 } 1255 1256 void 1257 pcie_enable_errors(dev_info_t *dip) 1258 { 1259 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 1260 uint16_t reg16, tmp16; 1261 uint32_t reg32, tmp32; 1262 1263 ASSERT(bus_p); 1264 1265 /* 1266 * Clear any pending errors 1267 */ 1268 pcie_clear_errors(dip); 1269 1270 if (!PCIE_IS_PCIE(bus_p)) 1271 return; 1272 1273 /* 1274 * Enable Baseline Error Handling but leave CE reporting off (poweron 1275 * default). 1276 */ 1277 if ((reg16 = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL)) != 1278 PCI_CAP_EINVAL16) { 1279 tmp16 = (reg16 & (PCIE_DEVCTL_MAX_READ_REQ_MASK | 1280 PCIE_DEVCTL_MAX_PAYLOAD_MASK)) | 1281 (pcie_devctl_default & ~(PCIE_DEVCTL_MAX_READ_REQ_MASK | 1282 PCIE_DEVCTL_MAX_PAYLOAD_MASK)) | 1283 (pcie_base_err_default & (~PCIE_DEVCTL_CE_REPORTING_EN)); 1284 1285 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, tmp16); 1286 PCIE_DBG_CAP(dip, bus_p, "DEVCTL", 16, PCIE_DEVCTL, reg16); 1287 } 1288 1289 /* Enable Root Port Baseline Error Receiving */ 1290 if (PCIE_IS_ROOT(bus_p) && 1291 (reg16 = PCIE_CAP_GET(16, bus_p, PCIE_ROOTCTL)) != 1292 PCI_CAP_EINVAL16) { 1293 1294 tmp16 = pcie_serr_disable_flag ? 1295 (pcie_root_ctrl_default & ~PCIE_ROOT_SYS_ERR) : 1296 pcie_root_ctrl_default; 1297 PCIE_CAP_PUT(16, bus_p, PCIE_ROOTCTL, tmp16); 1298 PCIE_DBG_CAP(dip, bus_p, "ROOT DEVCTL", 16, PCIE_ROOTCTL, 1299 reg16); 1300 } 1301 1302 /* 1303 * Enable PCI-Express Advanced Error Handling if Exists 1304 */ 1305 if (!PCIE_HAS_AER(bus_p)) 1306 return; 1307 1308 /* Set Uncorrectable Severity */ 1309 if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_UCE_SERV)) != 1310 PCI_CAP_EINVAL32) { 1311 tmp32 = pcie_aer_uce_severity; 1312 1313 PCIE_AER_PUT(32, bus_p, PCIE_AER_UCE_SERV, tmp32); 1314 PCIE_DBG_AER(dip, bus_p, "AER UCE SEV", 32, PCIE_AER_UCE_SERV, 1315 reg32); 1316 } 1317 1318 /* Enable Uncorrectable errors */ 1319 if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_UCE_MASK)) != 1320 PCI_CAP_EINVAL32) { 1321 tmp32 = pcie_aer_uce_mask; 1322 1323 PCIE_AER_PUT(32, bus_p, PCIE_AER_UCE_MASK, tmp32); 1324 PCIE_DBG_AER(dip, bus_p, "AER UCE MASK", 32, PCIE_AER_UCE_MASK, 1325 reg32); 1326 } 1327 1328 /* Enable ECRC generation and checking */ 1329 if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_CTL)) != 1330 PCI_CAP_EINVAL32) { 1331 tmp32 = reg32 | pcie_ecrc_value; 1332 PCIE_AER_PUT(32, bus_p, PCIE_AER_CTL, tmp32); 1333 PCIE_DBG_AER(dip, bus_p, "AER CTL", 32, PCIE_AER_CTL, reg32); 1334 } 1335 1336 /* Enable Secondary Uncorrectable errors if this is a bridge */ 1337 if (!PCIE_IS_PCIE_BDG(bus_p)) 1338 goto root; 1339 1340 /* Set Uncorrectable Severity */ 1341 if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_SUCE_SERV)) != 1342 PCI_CAP_EINVAL32) { 1343 tmp32 = pcie_aer_suce_severity; 1344 1345 PCIE_AER_PUT(32, bus_p, PCIE_AER_SUCE_SERV, tmp32); 1346 PCIE_DBG_AER(dip, bus_p, "AER SUCE SEV", 32, PCIE_AER_SUCE_SERV, 1347 reg32); 1348 } 1349 1350 if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_SUCE_MASK)) != 1351 PCI_CAP_EINVAL32) { 1352 PCIE_AER_PUT(32, bus_p, PCIE_AER_SUCE_MASK, pcie_aer_suce_mask); 1353 PCIE_DBG_AER(dip, bus_p, "AER SUCE MASK", 32, 1354 PCIE_AER_SUCE_MASK, reg32); 1355 } 1356 1357 root: 1358 /* 1359 * Enable Root Control this is a Root device 1360 */ 1361 if (!PCIE_IS_ROOT(bus_p)) 1362 return; 1363 1364 if ((reg16 = PCIE_AER_GET(16, bus_p, PCIE_AER_RE_CMD)) != 1365 PCI_CAP_EINVAL16) { 1366 PCIE_AER_PUT(16, bus_p, PCIE_AER_RE_CMD, 1367 pcie_root_error_cmd_default); 1368 PCIE_DBG_AER(dip, bus_p, "AER Root Err Cmd", 16, 1369 PCIE_AER_RE_CMD, reg16); 1370 } 1371 } 1372 1373 /* 1374 * This function is used for enabling CE reporting and setting the AER CE mask. 1375 * When called from outside the pcie module it should always be preceded by 1376 * a call to pcie_enable_errors. 1377 */ 1378 int 1379 pcie_enable_ce(dev_info_t *dip) 1380 { 1381 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 1382 uint16_t device_sts, device_ctl; 1383 uint32_t tmp_pcie_aer_ce_mask; 1384 1385 if (!PCIE_IS_PCIE(bus_p)) 1386 return (DDI_SUCCESS); 1387 1388 /* 1389 * The "pcie_ce_mask" property is used to control both the CE reporting 1390 * enable field in the device control register and the AER CE mask. We 1391 * leave CE reporting disabled if pcie_ce_mask is set to -1. 1392 */ 1393 1394 tmp_pcie_aer_ce_mask = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, dip, 1395 DDI_PROP_DONTPASS, "pcie_ce_mask", pcie_aer_ce_mask); 1396 1397 if (tmp_pcie_aer_ce_mask == (uint32_t)-1) { 1398 /* 1399 * Nothing to do since CE reporting has already been disabled. 1400 */ 1401 return (DDI_SUCCESS); 1402 } 1403 1404 if (PCIE_HAS_AER(bus_p)) { 1405 /* Enable AER CE */ 1406 PCIE_AER_PUT(32, bus_p, PCIE_AER_CE_MASK, tmp_pcie_aer_ce_mask); 1407 PCIE_DBG_AER(dip, bus_p, "AER CE MASK", 32, PCIE_AER_CE_MASK, 1408 0); 1409 1410 /* Clear any pending AER CE errors */ 1411 PCIE_AER_PUT(32, bus_p, PCIE_AER_CE_STS, -1); 1412 } 1413 1414 /* clear any pending CE errors */ 1415 if ((device_sts = PCIE_CAP_GET(16, bus_p, PCIE_DEVSTS)) != 1416 PCI_CAP_EINVAL16) 1417 PCIE_CAP_PUT(16, bus_p, PCIE_DEVSTS, 1418 device_sts & (~PCIE_DEVSTS_CE_DETECTED)); 1419 1420 /* Enable CE reporting */ 1421 device_ctl = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL); 1422 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, 1423 (device_ctl & (~PCIE_DEVCTL_ERR_MASK)) | pcie_base_err_default); 1424 PCIE_DBG_CAP(dip, bus_p, "DEVCTL", 16, PCIE_DEVCTL, device_ctl); 1425 1426 return (DDI_SUCCESS); 1427 } 1428 1429 /* ARGSUSED */ 1430 void 1431 pcie_disable_errors(dev_info_t *dip) 1432 { 1433 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 1434 uint16_t device_ctl; 1435 uint32_t aer_reg; 1436 1437 if (!PCIE_IS_PCIE(bus_p)) 1438 return; 1439 1440 /* 1441 * Disable PCI-Express Baseline Error Handling 1442 */ 1443 device_ctl = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL); 1444 device_ctl &= ~PCIE_DEVCTL_ERR_MASK; 1445 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, device_ctl); 1446 1447 /* 1448 * Disable PCI-Express Advanced Error Handling if Exists 1449 */ 1450 if (!PCIE_HAS_AER(bus_p)) 1451 goto root; 1452 1453 /* Disable Uncorrectable errors */ 1454 PCIE_AER_PUT(32, bus_p, PCIE_AER_UCE_MASK, PCIE_AER_UCE_BITS); 1455 1456 /* Disable Correctable errors */ 1457 PCIE_AER_PUT(32, bus_p, PCIE_AER_CE_MASK, PCIE_AER_CE_BITS); 1458 1459 /* Disable ECRC generation and checking */ 1460 if ((aer_reg = PCIE_AER_GET(32, bus_p, PCIE_AER_CTL)) != 1461 PCI_CAP_EINVAL32) { 1462 aer_reg &= ~(PCIE_AER_CTL_ECRC_GEN_ENA | 1463 PCIE_AER_CTL_ECRC_CHECK_ENA); 1464 1465 PCIE_AER_PUT(32, bus_p, PCIE_AER_CTL, aer_reg); 1466 } 1467 /* 1468 * Disable Secondary Uncorrectable errors if this is a bridge 1469 */ 1470 if (!PCIE_IS_PCIE_BDG(bus_p)) 1471 goto root; 1472 1473 PCIE_AER_PUT(32, bus_p, PCIE_AER_SUCE_MASK, PCIE_AER_SUCE_BITS); 1474 1475 root: 1476 /* 1477 * disable Root Control this is a Root device 1478 */ 1479 if (!PCIE_IS_ROOT(bus_p)) 1480 return; 1481 1482 if (!pcie_serr_disable_flag) { 1483 device_ctl = PCIE_CAP_GET(16, bus_p, PCIE_ROOTCTL); 1484 device_ctl &= ~PCIE_ROOT_SYS_ERR; 1485 PCIE_CAP_PUT(16, bus_p, PCIE_ROOTCTL, device_ctl); 1486 } 1487 1488 if (!PCIE_HAS_AER(bus_p)) 1489 return; 1490 1491 if ((device_ctl = PCIE_CAP_GET(16, bus_p, PCIE_AER_RE_CMD)) != 1492 PCI_CAP_EINVAL16) { 1493 device_ctl &= ~pcie_root_error_cmd_default; 1494 PCIE_CAP_PUT(16, bus_p, PCIE_AER_RE_CMD, device_ctl); 1495 } 1496 } 1497 1498 /* 1499 * Extract bdf from "reg" property. 1500 */ 1501 int 1502 pcie_get_bdf_from_dip(dev_info_t *dip, pcie_req_id_t *bdf) 1503 { 1504 pci_regspec_t *regspec; 1505 int reglen; 1506 1507 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1508 "reg", (int **)®spec, (uint_t *)®len) != DDI_SUCCESS) 1509 return (DDI_FAILURE); 1510 1511 if (reglen < (sizeof (pci_regspec_t) / sizeof (int))) { 1512 ddi_prop_free(regspec); 1513 return (DDI_FAILURE); 1514 } 1515 1516 /* Get phys_hi from first element. All have same bdf. */ 1517 *bdf = (regspec->pci_phys_hi & (PCI_REG_BDFR_M ^ PCI_REG_REG_M)) >> 8; 1518 1519 ddi_prop_free(regspec); 1520 return (DDI_SUCCESS); 1521 } 1522 1523 dev_info_t * 1524 pcie_get_my_childs_dip(dev_info_t *dip, dev_info_t *rdip) 1525 { 1526 dev_info_t *cdip = rdip; 1527 1528 for (; ddi_get_parent(cdip) != dip; cdip = ddi_get_parent(cdip)) 1529 ; 1530 1531 return (cdip); 1532 } 1533 1534 uint32_t 1535 pcie_get_bdf_for_dma_xfer(dev_info_t *dip, dev_info_t *rdip) 1536 { 1537 dev_info_t *cdip; 1538 1539 /* 1540 * As part of the probing, the PCI fcode interpreter may setup a DMA 1541 * request if a given card has a fcode on it using dip and rdip of the 1542 * hotplug connector i.e, dip and rdip of px/pcieb driver. In this 1543 * case, return a invalid value for the bdf since we cannot get to the 1544 * bdf value of the actual device which will be initiating this DMA. 1545 */ 1546 if (rdip == dip) 1547 return (PCIE_INVALID_BDF); 1548 1549 cdip = pcie_get_my_childs_dip(dip, rdip); 1550 1551 /* 1552 * For a given rdip, return the bdf value of dip's (px or pcieb) 1553 * immediate child or secondary bus-id if dip is a PCIe2PCI bridge. 1554 * 1555 * XXX - For now, return a invalid bdf value for all PCI and PCI-X 1556 * devices since this needs more work. 1557 */ 1558 return (PCI_GET_PCIE2PCI_SECBUS(cdip) ? 1559 PCIE_INVALID_BDF : PCI_GET_BDF(cdip)); 1560 } 1561 1562 uint32_t 1563 pcie_get_aer_uce_mask() { 1564 return (pcie_aer_uce_mask); 1565 } 1566 uint32_t 1567 pcie_get_aer_ce_mask() { 1568 return (pcie_aer_ce_mask); 1569 } 1570 uint32_t 1571 pcie_get_aer_suce_mask() { 1572 return (pcie_aer_suce_mask); 1573 } 1574 uint32_t 1575 pcie_get_serr_mask() { 1576 return (pcie_serr_disable_flag); 1577 } 1578 1579 void 1580 pcie_set_aer_uce_mask(uint32_t mask) { 1581 pcie_aer_uce_mask = mask; 1582 if (mask & PCIE_AER_UCE_UR) 1583 pcie_base_err_default &= ~PCIE_DEVCTL_UR_REPORTING_EN; 1584 else 1585 pcie_base_err_default |= PCIE_DEVCTL_UR_REPORTING_EN; 1586 1587 if (mask & PCIE_AER_UCE_ECRC) 1588 pcie_ecrc_value = 0; 1589 } 1590 1591 void 1592 pcie_set_aer_ce_mask(uint32_t mask) { 1593 pcie_aer_ce_mask = mask; 1594 } 1595 void 1596 pcie_set_aer_suce_mask(uint32_t mask) { 1597 pcie_aer_suce_mask = mask; 1598 } 1599 void 1600 pcie_set_serr_mask(uint32_t mask) { 1601 pcie_serr_disable_flag = mask; 1602 } 1603 1604 /* 1605 * Is the rdip a child of dip. Used for checking certain CTLOPS from bubbling 1606 * up erronously. Ex. ISA ctlops to a PCI-PCI Bridge. 1607 */ 1608 boolean_t 1609 pcie_is_child(dev_info_t *dip, dev_info_t *rdip) 1610 { 1611 dev_info_t *cdip = ddi_get_child(dip); 1612 for (; cdip; cdip = ddi_get_next_sibling(cdip)) 1613 if (cdip == rdip) 1614 break; 1615 return (cdip != NULL); 1616 } 1617 1618 boolean_t 1619 pcie_is_link_disabled(dev_info_t *dip) 1620 { 1621 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 1622 1623 if (PCIE_IS_PCIE(bus_p)) { 1624 if (PCIE_CAP_GET(16, bus_p, PCIE_LINKCTL) & 1625 PCIE_LINKCTL_LINK_DISABLE) 1626 return (B_TRUE); 1627 } 1628 return (B_FALSE); 1629 } 1630 1631 /* 1632 * Initialize the MPS for a root port. 1633 * 1634 * dip - dip of root port device. 1635 */ 1636 void 1637 pcie_init_root_port_mps(dev_info_t *dip) 1638 { 1639 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 1640 int rp_cap, max_supported = pcie_max_mps; 1641 1642 (void) pcie_get_fabric_mps(ddi_get_parent(dip), 1643 ddi_get_child(dip), &max_supported); 1644 1645 rp_cap = PCI_CAP_GET16(bus_p->bus_cfg_hdl, NULL, 1646 bus_p->bus_pcie_off, PCIE_DEVCAP) & 1647 PCIE_DEVCAP_MAX_PAYLOAD_MASK; 1648 1649 if (rp_cap < max_supported) 1650 max_supported = rp_cap; 1651 1652 bus_p->bus_mps = max_supported; 1653 (void) pcie_initchild_mps(dip); 1654 } 1655 1656 /* 1657 * Initialize the Maximum Payload Size of a device. 1658 * 1659 * cdip - dip of device. 1660 * 1661 * returns - DDI_SUCCESS or DDI_FAILURE 1662 */ 1663 int 1664 pcie_initchild_mps(dev_info_t *cdip) 1665 { 1666 int max_payload_size; 1667 pcie_bus_t *bus_p; 1668 dev_info_t *pdip = ddi_get_parent(cdip); 1669 uint8_t dev_type; 1670 1671 bus_p = PCIE_DIP2BUS(cdip); 1672 if (bus_p == NULL) { 1673 PCIE_DBG("%s: BUS not found.\n", 1674 ddi_driver_name(cdip)); 1675 return (DDI_FAILURE); 1676 } 1677 1678 dev_type = bus_p->bus_dev_type; 1679 1680 /* 1681 * For ARI Devices, only function zero's MPS needs to be set. 1682 */ 1683 if ((dev_type == PCIE_PCIECAP_DEV_TYPE_PCIE_DEV) && 1684 (pcie_ari_is_enabled(pdip) == PCIE_ARI_FORW_ENABLED)) { 1685 pcie_req_id_t child_bdf; 1686 1687 if (pcie_get_bdf_from_dip(cdip, &child_bdf) == DDI_FAILURE) 1688 return (DDI_FAILURE); 1689 if ((child_bdf & PCIE_REQ_ID_ARI_FUNC_MASK) != 0) 1690 return (DDI_SUCCESS); 1691 } 1692 1693 if (PCIE_IS_RP(bus_p)) { 1694 /* 1695 * If this device is a root port, then the mps scan 1696 * saved the mps in the root ports bus_p. 1697 */ 1698 max_payload_size = bus_p->bus_mps; 1699 } else { 1700 /* 1701 * If the device is not a root port, then the mps of 1702 * its parent should be used. 1703 */ 1704 pcie_bus_t *parent_bus_p = PCIE_DIP2BUS(pdip); 1705 max_payload_size = parent_bus_p->bus_mps; 1706 } 1707 1708 if (PCIE_IS_PCIE(bus_p) && (max_payload_size >= 0)) { 1709 pcie_bus_t *rootp_bus_p = PCIE_DIP2BUS(bus_p->bus_rp_dip); 1710 uint16_t mask, dev_ctrl = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL), 1711 mps = PCIE_CAP_GET(16, bus_p, PCIE_DEVCAP) & 1712 PCIE_DEVCAP_MAX_PAYLOAD_MASK; 1713 1714 mps = MIN(mps, (uint16_t)max_payload_size); 1715 1716 /* 1717 * If the MPS to be set is less than the root ports 1718 * MPS, then MRRS will have to be set the same as MPS. 1719 */ 1720 mask = ((mps < rootp_bus_p->bus_mps) ? 1721 PCIE_DEVCTL_MAX_READ_REQ_MASK : 0) | 1722 PCIE_DEVCTL_MAX_PAYLOAD_MASK; 1723 1724 dev_ctrl &= ~mask; 1725 mask = ((mps < rootp_bus_p->bus_mps) 1726 ? mps << PCIE_DEVCTL_MAX_READ_REQ_SHIFT : 0) 1727 | (mps << PCIE_DEVCTL_MAX_PAYLOAD_SHIFT); 1728 1729 dev_ctrl |= mask; 1730 1731 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, dev_ctrl); 1732 1733 bus_p->bus_mps = mps; 1734 } 1735 1736 return (DDI_SUCCESS); 1737 } 1738 1739 /* 1740 * Scans a device tree/branch for a maximum payload size capabilities. 1741 * 1742 * rc_dip - dip of Root Complex. 1743 * dip - dip of device where scan will begin. 1744 * max_supported (IN) - maximum allowable MPS. 1745 * max_supported (OUT) - maximum payload size capability of fabric. 1746 */ 1747 void 1748 pcie_get_fabric_mps(dev_info_t *rc_dip, dev_info_t *dip, int *max_supported) 1749 { 1750 if (dip == NULL) 1751 return; 1752 1753 /* 1754 * Perform a fabric scan to obtain Maximum Payload Capabilities 1755 */ 1756 (void) pcie_scan_mps(rc_dip, dip, max_supported); 1757 1758 PCIE_DBG("MPS: Highest Common MPS= %x\n", max_supported); 1759 } 1760 1761 /* 1762 * Scans fabric and determines Maximum Payload Size based on 1763 * highest common denominator alogorithm 1764 */ 1765 static void 1766 pcie_scan_mps(dev_info_t *rc_dip, dev_info_t *dip, int *max_supported) 1767 { 1768 int circular_count; 1769 pcie_max_supported_t max_pay_load_supported; 1770 1771 max_pay_load_supported.dip = rc_dip; 1772 max_pay_load_supported.highest_common_mps = *max_supported; 1773 1774 ndi_devi_enter(ddi_get_parent(dip), &circular_count); 1775 ddi_walk_devs(dip, pcie_get_max_supported, 1776 (void *)&max_pay_load_supported); 1777 ndi_devi_exit(ddi_get_parent(dip), circular_count); 1778 1779 *max_supported = max_pay_load_supported.highest_common_mps; 1780 } 1781 1782 /* 1783 * Called as part of the Maximum Payload Size scan. 1784 */ 1785 static int 1786 pcie_get_max_supported(dev_info_t *dip, void *arg) 1787 { 1788 uint32_t max_supported; 1789 uint16_t cap_ptr; 1790 pcie_max_supported_t *current = (pcie_max_supported_t *)arg; 1791 pci_regspec_t *reg; 1792 int rlen; 1793 caddr_t virt; 1794 ddi_acc_handle_t config_handle; 1795 1796 if (ddi_get_child(current->dip) == NULL) { 1797 goto fail1; 1798 } 1799 1800 if (pcie_dev(dip) == DDI_FAILURE) { 1801 PCIE_DBG("MPS: pcie_get_max_supported: %s: " 1802 "Not a PCIe dev\n", ddi_driver_name(dip)); 1803 goto fail1; 1804 } 1805 1806 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "reg", 1807 (caddr_t)®, &rlen) != DDI_PROP_SUCCESS) { 1808 PCIE_DBG("MPS: pcie_get_max_supported: %s: " 1809 "Can not read reg\n", ddi_driver_name(dip)); 1810 goto fail1; 1811 } 1812 1813 if (pcie_map_phys(ddi_get_child(current->dip), reg, &virt, 1814 &config_handle) != DDI_SUCCESS) { 1815 PCIE_DBG("MPS: pcie_get_max_supported: %s: pcie_map_phys " 1816 "failed\n", ddi_driver_name(dip)); 1817 goto fail2; 1818 } 1819 1820 if ((PCI_CAP_LOCATE(config_handle, PCI_CAP_ID_PCI_E, &cap_ptr)) == 1821 DDI_FAILURE) { 1822 goto fail3; 1823 } 1824 1825 max_supported = PCI_CAP_GET16(config_handle, NULL, cap_ptr, 1826 PCIE_DEVCAP) & PCIE_DEVCAP_MAX_PAYLOAD_MASK; 1827 1828 PCIE_DBG("PCIE MPS: %s: MPS Capabilities %x\n", ddi_driver_name(dip), 1829 max_supported); 1830 1831 if (max_supported < current->highest_common_mps) 1832 current->highest_common_mps = max_supported; 1833 1834 fail3: 1835 pcie_unmap_phys(&config_handle, reg); 1836 fail2: 1837 kmem_free(reg, rlen); 1838 fail1: 1839 return (DDI_WALK_CONTINUE); 1840 } 1841 1842 /* 1843 * Determines if there are any root ports attached to a root complex. 1844 * 1845 * dip - dip of root complex 1846 * 1847 * Returns - DDI_SUCCESS if there is at least one root port otherwise 1848 * DDI_FAILURE. 1849 */ 1850 int 1851 pcie_root_port(dev_info_t *dip) 1852 { 1853 int port_type; 1854 uint16_t cap_ptr; 1855 ddi_acc_handle_t config_handle; 1856 dev_info_t *cdip = ddi_get_child(dip); 1857 1858 /* 1859 * Determine if any of the children of the passed in dip 1860 * are root ports. 1861 */ 1862 for (; cdip; cdip = ddi_get_next_sibling(cdip)) { 1863 1864 if (pci_config_setup(cdip, &config_handle) != DDI_SUCCESS) 1865 continue; 1866 1867 if ((PCI_CAP_LOCATE(config_handle, PCI_CAP_ID_PCI_E, 1868 &cap_ptr)) == DDI_FAILURE) { 1869 pci_config_teardown(&config_handle); 1870 continue; 1871 } 1872 1873 port_type = PCI_CAP_GET16(config_handle, NULL, cap_ptr, 1874 PCIE_PCIECAP) & PCIE_PCIECAP_DEV_TYPE_MASK; 1875 1876 pci_config_teardown(&config_handle); 1877 1878 if (port_type == PCIE_PCIECAP_DEV_TYPE_ROOT) 1879 return (DDI_SUCCESS); 1880 } 1881 1882 /* No root ports were found */ 1883 1884 return (DDI_FAILURE); 1885 } 1886 1887 /* 1888 * Function that determines if a device a PCIe device. 1889 * 1890 * dip - dip of device. 1891 * 1892 * returns - DDI_SUCCESS if device is a PCIe device, otherwise DDI_FAILURE. 1893 */ 1894 int 1895 pcie_dev(dev_info_t *dip) 1896 { 1897 /* get parent device's device_type property */ 1898 char *device_type; 1899 int rc = DDI_FAILURE; 1900 dev_info_t *pdip = ddi_get_parent(dip); 1901 1902 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip, 1903 DDI_PROP_DONTPASS, "device_type", &device_type) 1904 != DDI_PROP_SUCCESS) { 1905 return (DDI_FAILURE); 1906 } 1907 1908 if (strcmp(device_type, "pciex") == 0) 1909 rc = DDI_SUCCESS; 1910 else 1911 rc = DDI_FAILURE; 1912 1913 ddi_prop_free(device_type); 1914 return (rc); 1915 } 1916 1917 /* 1918 * Function to map in a device's memory space. 1919 */ 1920 static int 1921 pcie_map_phys(dev_info_t *dip, pci_regspec_t *phys_spec, 1922 caddr_t *addrp, ddi_acc_handle_t *handlep) 1923 { 1924 ddi_map_req_t mr; 1925 ddi_acc_hdl_t *hp; 1926 int result; 1927 ddi_device_acc_attr_t attr; 1928 1929 attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 1930 attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 1931 attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 1932 attr.devacc_attr_access = DDI_CAUTIOUS_ACC; 1933 1934 *handlep = impl_acc_hdl_alloc(KM_SLEEP, NULL); 1935 hp = impl_acc_hdl_get(*handlep); 1936 hp->ah_vers = VERS_ACCHDL; 1937 hp->ah_dip = dip; 1938 hp->ah_rnumber = 0; 1939 hp->ah_offset = 0; 1940 hp->ah_len = 0; 1941 hp->ah_acc = attr; 1942 1943 mr.map_op = DDI_MO_MAP_LOCKED; 1944 mr.map_type = DDI_MT_REGSPEC; 1945 mr.map_obj.rp = (struct regspec *)phys_spec; 1946 mr.map_prot = PROT_READ | PROT_WRITE; 1947 mr.map_flags = DDI_MF_KERNEL_MAPPING; 1948 mr.map_handlep = hp; 1949 mr.map_vers = DDI_MAP_VERSION; 1950 1951 result = ddi_map(dip, &mr, 0, 0, addrp); 1952 1953 if (result != DDI_SUCCESS) { 1954 impl_acc_hdl_free(*handlep); 1955 *handlep = (ddi_acc_handle_t)NULL; 1956 } else { 1957 hp->ah_addr = *addrp; 1958 } 1959 1960 return (result); 1961 } 1962 1963 /* 1964 * Map out memory that was mapped in with pcie_map_phys(); 1965 */ 1966 static void 1967 pcie_unmap_phys(ddi_acc_handle_t *handlep, pci_regspec_t *ph) 1968 { 1969 ddi_map_req_t mr; 1970 ddi_acc_hdl_t *hp; 1971 1972 hp = impl_acc_hdl_get(*handlep); 1973 ASSERT(hp); 1974 1975 mr.map_op = DDI_MO_UNMAP; 1976 mr.map_type = DDI_MT_REGSPEC; 1977 mr.map_obj.rp = (struct regspec *)ph; 1978 mr.map_prot = PROT_READ | PROT_WRITE; 1979 mr.map_flags = DDI_MF_KERNEL_MAPPING; 1980 mr.map_handlep = hp; 1981 mr.map_vers = DDI_MAP_VERSION; 1982 1983 (void) ddi_map(hp->ah_dip, &mr, hp->ah_offset, 1984 hp->ah_len, &hp->ah_addr); 1985 1986 impl_acc_hdl_free(*handlep); 1987 *handlep = (ddi_acc_handle_t)NULL; 1988 } 1989 1990 void 1991 pcie_set_rber_fatal(dev_info_t *dip, boolean_t val) 1992 { 1993 pcie_bus_t *bus_p = PCIE_DIP2UPBUS(dip); 1994 bus_p->bus_pfd->pe_rber_fatal = val; 1995 } 1996 1997 /* 1998 * Return parent Root Port's pe_rber_fatal value. 1999 */ 2000 boolean_t 2001 pcie_get_rber_fatal(dev_info_t *dip) 2002 { 2003 pcie_bus_t *bus_p = PCIE_DIP2UPBUS(dip); 2004 pcie_bus_t *rp_bus_p = PCIE_DIP2UPBUS(bus_p->bus_rp_dip); 2005 return (rp_bus_p->bus_pfd->pe_rber_fatal); 2006 } 2007 2008 int 2009 pcie_ari_supported(dev_info_t *dip) 2010 { 2011 uint32_t devcap2; 2012 uint16_t pciecap; 2013 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 2014 uint8_t dev_type; 2015 2016 PCIE_DBG("pcie_ari_supported: dip=%p\n", dip); 2017 2018 if (bus_p == NULL) 2019 return (PCIE_ARI_FORW_NOT_SUPPORTED); 2020 2021 dev_type = bus_p->bus_dev_type; 2022 2023 if ((dev_type != PCIE_PCIECAP_DEV_TYPE_DOWN) && 2024 (dev_type != PCIE_PCIECAP_DEV_TYPE_ROOT)) 2025 return (PCIE_ARI_FORW_NOT_SUPPORTED); 2026 2027 if (pcie_disable_ari) { 2028 PCIE_DBG("pcie_ari_supported: dip=%p: ARI Disabled\n", dip); 2029 return (PCIE_ARI_FORW_NOT_SUPPORTED); 2030 } 2031 2032 pciecap = PCIE_CAP_GET(16, bus_p, PCIE_PCIECAP); 2033 2034 if ((pciecap & PCIE_PCIECAP_VER_MASK) < PCIE_PCIECAP_VER_2_0) { 2035 PCIE_DBG("pcie_ari_supported: dip=%p: Not 2.0\n", dip); 2036 return (PCIE_ARI_FORW_NOT_SUPPORTED); 2037 } 2038 2039 devcap2 = PCIE_CAP_GET(32, bus_p, PCIE_DEVCAP2); 2040 2041 PCIE_DBG("pcie_ari_supported: dip=%p: DevCap2=0x%x\n", 2042 dip, devcap2); 2043 2044 if (devcap2 & PCIE_DEVCAP2_ARI_FORWARD) { 2045 PCIE_DBG("pcie_ari_supported: " 2046 "dip=%p: ARI Forwarding is supported\n", dip); 2047 return (PCIE_ARI_FORW_SUPPORTED); 2048 } 2049 return (PCIE_ARI_FORW_NOT_SUPPORTED); 2050 } 2051 2052 int 2053 pcie_ari_enable(dev_info_t *dip) 2054 { 2055 uint16_t devctl2; 2056 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 2057 2058 PCIE_DBG("pcie_ari_enable: dip=%p\n", dip); 2059 2060 if (pcie_ari_supported(dip) == PCIE_ARI_FORW_NOT_SUPPORTED) 2061 return (DDI_FAILURE); 2062 2063 devctl2 = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL2); 2064 devctl2 |= PCIE_DEVCTL2_ARI_FORWARD_EN; 2065 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL2, devctl2); 2066 2067 PCIE_DBG("pcie_ari_enable: dip=%p: writing 0x%x to DevCtl2\n", 2068 dip, devctl2); 2069 2070 return (DDI_SUCCESS); 2071 } 2072 2073 int 2074 pcie_ari_disable(dev_info_t *dip) 2075 { 2076 uint16_t devctl2; 2077 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 2078 2079 PCIE_DBG("pcie_ari_disable: dip=%p\n", dip); 2080 2081 if (pcie_ari_supported(dip) == PCIE_ARI_FORW_NOT_SUPPORTED) 2082 return (DDI_FAILURE); 2083 2084 devctl2 = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL2); 2085 devctl2 &= ~PCIE_DEVCTL2_ARI_FORWARD_EN; 2086 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL2, devctl2); 2087 2088 PCIE_DBG("pcie_ari_disable: dip=%p: writing 0x%x to DevCtl2\n", 2089 dip, devctl2); 2090 2091 return (DDI_SUCCESS); 2092 } 2093 2094 int 2095 pcie_ari_is_enabled(dev_info_t *dip) 2096 { 2097 uint16_t devctl2; 2098 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 2099 2100 PCIE_DBG("pcie_ari_is_enabled: dip=%p\n", dip); 2101 2102 if (pcie_ari_supported(dip) == PCIE_ARI_FORW_NOT_SUPPORTED) 2103 return (PCIE_ARI_FORW_DISABLED); 2104 2105 devctl2 = PCIE_CAP_GET(32, bus_p, PCIE_DEVCTL2); 2106 2107 PCIE_DBG("pcie_ari_is_enabled: dip=%p: DevCtl2=0x%x\n", 2108 dip, devctl2); 2109 2110 if (devctl2 & PCIE_DEVCTL2_ARI_FORWARD_EN) { 2111 PCIE_DBG("pcie_ari_is_enabled: " 2112 "dip=%p: ARI Forwarding is enabled\n", dip); 2113 return (PCIE_ARI_FORW_ENABLED); 2114 } 2115 2116 return (PCIE_ARI_FORW_DISABLED); 2117 } 2118 2119 int 2120 pcie_ari_device(dev_info_t *dip) 2121 { 2122 ddi_acc_handle_t handle; 2123 uint16_t cap_ptr; 2124 2125 PCIE_DBG("pcie_ari_device: dip=%p\n", dip); 2126 2127 /* 2128 * XXX - This function may be called before the bus_p structure 2129 * has been populated. This code can be changed to remove 2130 * pci_config_setup()/pci_config_teardown() when the RFE 2131 * to populate the bus_p structures early in boot is putback. 2132 */ 2133 2134 /* First make sure it is a PCIe device */ 2135 2136 if (pci_config_setup(dip, &handle) != DDI_SUCCESS) 2137 return (PCIE_NOT_ARI_DEVICE); 2138 2139 if ((PCI_CAP_LOCATE(handle, PCI_CAP_ID_PCI_E, &cap_ptr)) 2140 != DDI_SUCCESS) { 2141 pci_config_teardown(&handle); 2142 return (PCIE_NOT_ARI_DEVICE); 2143 } 2144 2145 /* Locate the ARI Capability */ 2146 2147 if ((PCI_CAP_LOCATE(handle, PCI_CAP_XCFG_SPC(PCIE_EXT_CAP_ID_ARI), 2148 &cap_ptr)) == DDI_FAILURE) { 2149 pci_config_teardown(&handle); 2150 return (PCIE_NOT_ARI_DEVICE); 2151 } 2152 2153 /* ARI Capability was found so it must be a ARI device */ 2154 PCIE_DBG("pcie_ari_device: ARI Device dip=%p\n", dip); 2155 2156 pci_config_teardown(&handle); 2157 return (PCIE_ARI_DEVICE); 2158 } 2159 2160 int 2161 pcie_ari_get_next_function(dev_info_t *dip, int *func) 2162 { 2163 uint32_t val; 2164 uint16_t cap_ptr, next_function; 2165 ddi_acc_handle_t handle; 2166 2167 /* 2168 * XXX - This function may be called before the bus_p structure 2169 * has been populated. This code can be changed to remove 2170 * pci_config_setup()/pci_config_teardown() when the RFE 2171 * to populate the bus_p structures early in boot is putback. 2172 */ 2173 2174 if (pci_config_setup(dip, &handle) != DDI_SUCCESS) 2175 return (DDI_FAILURE); 2176 2177 if ((PCI_CAP_LOCATE(handle, 2178 PCI_CAP_XCFG_SPC(PCIE_EXT_CAP_ID_ARI), &cap_ptr)) == DDI_FAILURE) { 2179 pci_config_teardown(&handle); 2180 return (DDI_FAILURE); 2181 } 2182 2183 val = PCI_CAP_GET32(handle, NULL, cap_ptr, PCIE_ARI_CAP); 2184 2185 next_function = (val >> PCIE_ARI_CAP_NEXT_FUNC_SHIFT) & 2186 PCIE_ARI_CAP_NEXT_FUNC_MASK; 2187 2188 pci_config_teardown(&handle); 2189 2190 *func = next_function; 2191 2192 return (DDI_SUCCESS); 2193 } 2194 2195 dev_info_t * 2196 pcie_func_to_dip(dev_info_t *dip, pcie_req_id_t function) 2197 { 2198 pcie_req_id_t child_bdf; 2199 dev_info_t *cdip; 2200 2201 for (cdip = ddi_get_child(dip); cdip; 2202 cdip = ddi_get_next_sibling(cdip)) { 2203 2204 if (pcie_get_bdf_from_dip(cdip, &child_bdf) == DDI_FAILURE) 2205 return (NULL); 2206 2207 if ((child_bdf & PCIE_REQ_ID_ARI_FUNC_MASK) == function) 2208 return (cdip); 2209 } 2210 return (NULL); 2211 } 2212 2213 #ifdef DEBUG 2214 2215 static void 2216 pcie_print_bus(pcie_bus_t *bus_p) 2217 { 2218 pcie_dbg("\tbus_dip = 0x%p\n", bus_p->bus_dip); 2219 pcie_dbg("\tbus_fm_flags = 0x%x\n", bus_p->bus_fm_flags); 2220 2221 pcie_dbg("\tbus_bdf = 0x%x\n", bus_p->bus_bdf); 2222 pcie_dbg("\tbus_dev_ven_id = 0x%x\n", bus_p->bus_dev_ven_id); 2223 pcie_dbg("\tbus_rev_id = 0x%x\n", bus_p->bus_rev_id); 2224 pcie_dbg("\tbus_hdr_type = 0x%x\n", bus_p->bus_hdr_type); 2225 pcie_dbg("\tbus_dev_type = 0x%x\n", bus_p->bus_dev_type); 2226 pcie_dbg("\tbus_bdg_secbus = 0x%x\n", bus_p->bus_bdg_secbus); 2227 pcie_dbg("\tbus_pcie_off = 0x%x\n", bus_p->bus_pcie_off); 2228 pcie_dbg("\tbus_aer_off = 0x%x\n", bus_p->bus_aer_off); 2229 pcie_dbg("\tbus_pcix_off = 0x%x\n", bus_p->bus_pcix_off); 2230 pcie_dbg("\tbus_ecc_ver = 0x%x\n", bus_p->bus_ecc_ver); 2231 } 2232 2233 /* 2234 * For debugging purposes set pcie_dbg_print != 0 to see printf messages 2235 * during interrupt. 2236 * 2237 * When a proper solution is in place this code will disappear. 2238 * Potential solutions are: 2239 * o circular buffers 2240 * o taskq to print at lower pil 2241 */ 2242 int pcie_dbg_print = 0; 2243 void 2244 pcie_dbg(char *fmt, ...) 2245 { 2246 va_list ap; 2247 2248 if (!pcie_debug_flags) { 2249 return; 2250 } 2251 va_start(ap, fmt); 2252 if (servicing_interrupt()) { 2253 if (pcie_dbg_print) { 2254 prom_vprintf(fmt, ap); 2255 } 2256 } else { 2257 prom_vprintf(fmt, ap); 2258 } 2259 va_end(ap); 2260 } 2261 #endif /* DEBUG */ 2262 2263 #if defined(__i386) || defined(__amd64) 2264 static void 2265 pcie_check_io_mem_range(ddi_acc_handle_t cfg_hdl, boolean_t *empty_io_range, 2266 boolean_t *empty_mem_range) 2267 { 2268 uint8_t class, subclass; 2269 uint_t val; 2270 2271 class = pci_config_get8(cfg_hdl, PCI_CONF_BASCLASS); 2272 subclass = pci_config_get8(cfg_hdl, PCI_CONF_SUBCLASS); 2273 2274 if ((class == PCI_CLASS_BRIDGE) && (subclass == PCI_BRIDGE_PCI)) { 2275 val = (((uint_t)pci_config_get8(cfg_hdl, PCI_BCNF_IO_BASE_LOW) & 2276 PCI_BCNF_IO_MASK) << 8); 2277 /* 2278 * Assuming that a zero based io_range[0] implies an 2279 * invalid I/O range. Likewise for mem_range[0]. 2280 */ 2281 if (val == 0) 2282 *empty_io_range = B_TRUE; 2283 val = (((uint_t)pci_config_get16(cfg_hdl, PCI_BCNF_MEM_BASE) & 2284 PCI_BCNF_MEM_MASK) << 16); 2285 if (val == 0) 2286 *empty_mem_range = B_TRUE; 2287 } 2288 } 2289 2290 #endif /* defined(__i386) || defined(__amd64) */ 2291