1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <sys/sysmacros.h> 27 #include <sys/types.h> 28 #include <sys/kmem.h> 29 #include <sys/modctl.h> 30 #include <sys/ddi.h> 31 #include <sys/sunddi.h> 32 #include <sys/sunndi.h> 33 #include <sys/fm/protocol.h> 34 #include <sys/fm/util.h> 35 #include <sys/promif.h> 36 #include <sys/disp.h> 37 #include <sys/stat.h> 38 #include <sys/file.h> 39 #include <sys/pci_cap.h> 40 #include <sys/pci_impl.h> 41 #include <sys/pcie_impl.h> 42 #include <sys/hotplug/pci/pcie_hp.h> 43 #include <sys/hotplug/pci/pcicfg.h> 44 #include <sys/pci_cfgacc.h> 45 46 /* Local functions prototypes */ 47 static void pcie_init_pfd(dev_info_t *); 48 static void pcie_fini_pfd(dev_info_t *); 49 50 #if defined(__i386) || defined(__amd64) 51 static void pcie_check_io_mem_range(ddi_acc_handle_t, boolean_t *, boolean_t *); 52 #endif /* defined(__i386) || defined(__amd64) */ 53 54 #ifdef DEBUG 55 uint_t pcie_debug_flags = 0; 56 static void pcie_print_bus(pcie_bus_t *bus_p); 57 void pcie_dbg(char *fmt, ...); 58 #endif /* DEBUG */ 59 60 /* Variable to control default PCI-Express config settings */ 61 ushort_t pcie_command_default = 62 PCI_COMM_SERR_ENABLE | 63 PCI_COMM_WAIT_CYC_ENAB | 64 PCI_COMM_PARITY_DETECT | 65 PCI_COMM_ME | 66 PCI_COMM_MAE | 67 PCI_COMM_IO; 68 69 /* xxx_fw are bits that are controlled by FW and should not be modified */ 70 ushort_t pcie_command_default_fw = 71 PCI_COMM_SPEC_CYC | 72 PCI_COMM_MEMWR_INVAL | 73 PCI_COMM_PALETTE_SNOOP | 74 PCI_COMM_WAIT_CYC_ENAB | 75 0xF800; /* Reserved Bits */ 76 77 ushort_t pcie_bdg_command_default_fw = 78 PCI_BCNF_BCNTRL_ISA_ENABLE | 79 PCI_BCNF_BCNTRL_VGA_ENABLE | 80 0xF000; /* Reserved Bits */ 81 82 /* PCI-Express Base error defaults */ 83 ushort_t pcie_base_err_default = 84 PCIE_DEVCTL_CE_REPORTING_EN | 85 PCIE_DEVCTL_NFE_REPORTING_EN | 86 PCIE_DEVCTL_FE_REPORTING_EN | 87 PCIE_DEVCTL_UR_REPORTING_EN; 88 89 /* PCI-Express Device Control Register */ 90 uint16_t pcie_devctl_default = PCIE_DEVCTL_RO_EN | 91 PCIE_DEVCTL_MAX_READ_REQ_512; 92 93 /* PCI-Express AER Root Control Register */ 94 #define PCIE_ROOT_SYS_ERR (PCIE_ROOTCTL_SYS_ERR_ON_CE_EN | \ 95 PCIE_ROOTCTL_SYS_ERR_ON_NFE_EN | \ 96 PCIE_ROOTCTL_SYS_ERR_ON_FE_EN) 97 98 ushort_t pcie_root_ctrl_default = 99 PCIE_ROOTCTL_SYS_ERR_ON_CE_EN | 100 PCIE_ROOTCTL_SYS_ERR_ON_NFE_EN | 101 PCIE_ROOTCTL_SYS_ERR_ON_FE_EN; 102 103 /* PCI-Express Root Error Command Register */ 104 ushort_t pcie_root_error_cmd_default = 105 PCIE_AER_RE_CMD_CE_REP_EN | 106 PCIE_AER_RE_CMD_NFE_REP_EN | 107 PCIE_AER_RE_CMD_FE_REP_EN; 108 109 /* ECRC settings in the PCIe AER Control Register */ 110 uint32_t pcie_ecrc_value = 111 PCIE_AER_CTL_ECRC_GEN_ENA | 112 PCIE_AER_CTL_ECRC_CHECK_ENA; 113 114 /* 115 * If a particular platform wants to disable certain errors such as UR/MA, 116 * instead of using #defines have the platform's PCIe Root Complex driver set 117 * these masks using the pcie_get_XXX_mask and pcie_set_XXX_mask functions. For 118 * x86 the closest thing to a PCIe root complex driver is NPE. For SPARC the 119 * closest PCIe root complex driver is PX. 120 * 121 * pcie_serr_disable_flag : disable SERR only (in RCR and command reg) x86 122 * systems may want to disable SERR in general. For root ports, enabling SERR 123 * causes NMIs which are not handled and results in a watchdog timeout error. 124 */ 125 uint32_t pcie_aer_uce_mask = 0; /* AER UE Mask */ 126 uint32_t pcie_aer_ce_mask = 0; /* AER CE Mask */ 127 uint32_t pcie_aer_suce_mask = 0; /* AER Secondary UE Mask */ 128 uint32_t pcie_serr_disable_flag = 0; /* Disable SERR */ 129 130 /* Default severities needed for eversholt. Error handling doesn't care */ 131 uint32_t pcie_aer_uce_severity = PCIE_AER_UCE_MTLP | PCIE_AER_UCE_RO | \ 132 PCIE_AER_UCE_FCP | PCIE_AER_UCE_SD | PCIE_AER_UCE_DLP | \ 133 PCIE_AER_UCE_TRAINING; 134 uint32_t pcie_aer_suce_severity = PCIE_AER_SUCE_SERR_ASSERT | \ 135 PCIE_AER_SUCE_UC_ADDR_ERR | PCIE_AER_SUCE_UC_ATTR_ERR | \ 136 PCIE_AER_SUCE_USC_MSG_DATA_ERR; 137 138 int pcie_max_mps = PCIE_DEVCTL_MAX_PAYLOAD_4096 >> 5; 139 int pcie_disable_ari = 0; 140 141 static void pcie_scan_mps(dev_info_t *rc_dip, dev_info_t *dip, 142 int *max_supported); 143 static int pcie_get_max_supported(dev_info_t *dip, void *arg); 144 static int pcie_map_phys(dev_info_t *dip, pci_regspec_t *phys_spec, 145 caddr_t *addrp, ddi_acc_handle_t *handlep); 146 static void pcie_unmap_phys(ddi_acc_handle_t *handlep, pci_regspec_t *ph); 147 148 dev_info_t *pcie_get_rc_dip(dev_info_t *dip); 149 150 /* 151 * modload support 152 */ 153 154 static struct modlmisc modlmisc = { 155 &mod_miscops, /* Type of module */ 156 "PCI Express Framework Module" 157 }; 158 159 static struct modlinkage modlinkage = { 160 MODREV_1, 161 (void *)&modlmisc, 162 NULL 163 }; 164 165 /* 166 * Global Variables needed for a non-atomic version of ddi_fm_ereport_post. 167 * Currently used to send the pci.fabric ereports whose payload depends on the 168 * type of PCI device it is being sent for. 169 */ 170 char *pcie_nv_buf; 171 nv_alloc_t *pcie_nvap; 172 nvlist_t *pcie_nvl; 173 174 int 175 _init(void) 176 { 177 int rval; 178 179 pcie_nv_buf = kmem_alloc(ERPT_DATA_SZ, KM_SLEEP); 180 pcie_nvap = fm_nva_xcreate(pcie_nv_buf, ERPT_DATA_SZ); 181 pcie_nvl = fm_nvlist_create(pcie_nvap); 182 183 rval = mod_install(&modlinkage); 184 return (rval); 185 } 186 187 int 188 _fini() 189 { 190 int rval; 191 192 fm_nvlist_destroy(pcie_nvl, FM_NVA_RETAIN); 193 fm_nva_xdestroy(pcie_nvap); 194 kmem_free(pcie_nv_buf, ERPT_DATA_SZ); 195 196 rval = mod_remove(&modlinkage); 197 return (rval); 198 } 199 200 int 201 _info(struct modinfo *modinfop) 202 { 203 return (mod_info(&modlinkage, modinfop)); 204 } 205 206 /* ARGSUSED */ 207 int 208 pcie_init(dev_info_t *dip, caddr_t arg) 209 { 210 int ret = DDI_SUCCESS; 211 212 /* 213 * Create a "devctl" minor node to support DEVCTL_DEVICE_* 214 * and DEVCTL_BUS_* ioctls to this bus. 215 */ 216 if ((ret = ddi_create_minor_node(dip, "devctl", S_IFCHR, 217 PCI_MINOR_NUM(ddi_get_instance(dip), PCI_DEVCTL_MINOR), 218 DDI_NT_NEXUS, 0)) != DDI_SUCCESS) { 219 PCIE_DBG("Failed to create devctl minor node for %s%d\n", 220 ddi_driver_name(dip), ddi_get_instance(dip)); 221 222 return (ret); 223 } 224 225 if ((ret = pcie_hp_init(dip, arg)) != DDI_SUCCESS) { 226 /* 227 * On some x86 platforms, we observed unexpected hotplug 228 * initialization failures in recent years. The known cause 229 * is a hardware issue: while the problem PCI bridges have 230 * the Hotplug Capable registers set, the machine actually 231 * does not implement the expected ACPI object. 232 * 233 * We don't want to stop PCI driver attach and system boot 234 * just because of this hotplug initialization failure. 235 * Continue with a debug message printed. 236 */ 237 PCIE_DBG("%s%d: Failed setting hotplug framework\n", 238 ddi_driver_name(dip), ddi_get_instance(dip)); 239 240 #if defined(__sparc) 241 ddi_remove_minor_node(dip, "devctl"); 242 243 return (ret); 244 #endif /* defined(__sparc) */ 245 } 246 247 return (DDI_SUCCESS); 248 } 249 250 /* ARGSUSED */ 251 int 252 pcie_uninit(dev_info_t *dip) 253 { 254 int ret = DDI_SUCCESS; 255 256 if (pcie_ari_is_enabled(dip) == PCIE_ARI_FORW_ENABLED) 257 (void) pcie_ari_disable(dip); 258 259 if ((ret = pcie_hp_uninit(dip)) != DDI_SUCCESS) { 260 PCIE_DBG("Failed to uninitialize hotplug for %s%d\n", 261 ddi_driver_name(dip), ddi_get_instance(dip)); 262 263 return (ret); 264 } 265 266 ddi_remove_minor_node(dip, "devctl"); 267 268 return (ret); 269 } 270 271 /* ARGSUSED */ 272 int 273 pcie_intr(dev_info_t *dip) 274 { 275 return (pcie_hp_intr(dip)); 276 } 277 278 /* ARGSUSED */ 279 int 280 pcie_open(dev_info_t *dip, dev_t *devp, int flags, int otyp, cred_t *credp) 281 { 282 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 283 284 /* 285 * Make sure the open is for the right file type. 286 */ 287 if (otyp != OTYP_CHR) 288 return (EINVAL); 289 290 /* 291 * Handle the open by tracking the device state. 292 */ 293 if ((bus_p->bus_soft_state == PCI_SOFT_STATE_OPEN_EXCL) || 294 ((flags & FEXCL) && 295 (bus_p->bus_soft_state != PCI_SOFT_STATE_CLOSED))) { 296 return (EBUSY); 297 } 298 299 if (flags & FEXCL) 300 bus_p->bus_soft_state = PCI_SOFT_STATE_OPEN_EXCL; 301 else 302 bus_p->bus_soft_state = PCI_SOFT_STATE_OPEN; 303 304 return (0); 305 } 306 307 /* ARGSUSED */ 308 int 309 pcie_close(dev_info_t *dip, dev_t dev, int flags, int otyp, cred_t *credp) 310 { 311 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 312 313 if (otyp != OTYP_CHR) 314 return (EINVAL); 315 316 bus_p->bus_soft_state = PCI_SOFT_STATE_CLOSED; 317 318 return (0); 319 } 320 321 /* ARGSUSED */ 322 int 323 pcie_ioctl(dev_info_t *dip, dev_t dev, int cmd, intptr_t arg, int mode, 324 cred_t *credp, int *rvalp) 325 { 326 struct devctl_iocdata *dcp; 327 uint_t bus_state; 328 int rv = DDI_SUCCESS; 329 330 /* 331 * We can use the generic implementation for devctl ioctl 332 */ 333 switch (cmd) { 334 case DEVCTL_DEVICE_GETSTATE: 335 case DEVCTL_DEVICE_ONLINE: 336 case DEVCTL_DEVICE_OFFLINE: 337 case DEVCTL_BUS_GETSTATE: 338 return (ndi_devctl_ioctl(dip, cmd, arg, mode, 0)); 339 default: 340 break; 341 } 342 343 /* 344 * read devctl ioctl data 345 */ 346 if (ndi_dc_allochdl((void *)arg, &dcp) != NDI_SUCCESS) 347 return (EFAULT); 348 349 switch (cmd) { 350 case DEVCTL_BUS_QUIESCE: 351 if (ndi_get_bus_state(dip, &bus_state) == NDI_SUCCESS) 352 if (bus_state == BUS_QUIESCED) 353 break; 354 (void) ndi_set_bus_state(dip, BUS_QUIESCED); 355 break; 356 case DEVCTL_BUS_UNQUIESCE: 357 if (ndi_get_bus_state(dip, &bus_state) == NDI_SUCCESS) 358 if (bus_state == BUS_ACTIVE) 359 break; 360 (void) ndi_set_bus_state(dip, BUS_ACTIVE); 361 break; 362 case DEVCTL_BUS_RESET: 363 case DEVCTL_BUS_RESETALL: 364 case DEVCTL_DEVICE_RESET: 365 rv = ENOTSUP; 366 break; 367 default: 368 rv = ENOTTY; 369 } 370 371 ndi_dc_freehdl(dcp); 372 return (rv); 373 } 374 375 /* ARGSUSED */ 376 int 377 pcie_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 378 int flags, char *name, caddr_t valuep, int *lengthp) 379 { 380 if (dev == DDI_DEV_T_ANY) 381 goto skip; 382 383 if (PCIE_IS_HOTPLUG_CAPABLE(dip) && 384 strcmp(name, "pci-occupant") == 0) { 385 int pci_dev = PCI_MINOR_NUM_TO_PCI_DEVNUM(getminor(dev)); 386 387 pcie_hp_create_occupant_props(dip, dev, pci_dev); 388 } 389 390 skip: 391 return (ddi_prop_op(dev, dip, prop_op, flags, name, valuep, lengthp)); 392 } 393 394 int 395 pcie_init_cfghdl(dev_info_t *cdip) 396 { 397 pcie_bus_t *bus_p; 398 ddi_acc_handle_t eh = NULL; 399 400 bus_p = PCIE_DIP2BUS(cdip); 401 if (bus_p == NULL) 402 return (DDI_FAILURE); 403 404 /* Create an config access special to error handling */ 405 if (pci_config_setup(cdip, &eh) != DDI_SUCCESS) { 406 cmn_err(CE_WARN, "Cannot setup config access" 407 " for BDF 0x%x\n", bus_p->bus_bdf); 408 return (DDI_FAILURE); 409 } 410 411 bus_p->bus_cfg_hdl = eh; 412 return (DDI_SUCCESS); 413 } 414 415 void 416 pcie_fini_cfghdl(dev_info_t *cdip) 417 { 418 pcie_bus_t *bus_p = PCIE_DIP2BUS(cdip); 419 420 pci_config_teardown(&bus_p->bus_cfg_hdl); 421 } 422 423 /* 424 * PCI-Express child device initialization. 425 * This function enables generic pci-express interrupts and error 426 * handling. 427 * 428 * @param pdip root dip (root nexus's dip) 429 * @param cdip child's dip (device's dip) 430 * @return DDI_SUCCESS or DDI_FAILURE 431 */ 432 /* ARGSUSED */ 433 int 434 pcie_initchild(dev_info_t *cdip) 435 { 436 uint16_t tmp16, reg16; 437 pcie_bus_t *bus_p; 438 439 bus_p = PCIE_DIP2BUS(cdip); 440 if (bus_p == NULL) { 441 PCIE_DBG("%s: BUS not found.\n", 442 ddi_driver_name(cdip)); 443 444 return (DDI_FAILURE); 445 } 446 447 if (pcie_init_cfghdl(cdip) != DDI_SUCCESS) 448 return (DDI_FAILURE); 449 450 /* Clear the device's status register */ 451 reg16 = PCIE_GET(16, bus_p, PCI_CONF_STAT); 452 PCIE_PUT(16, bus_p, PCI_CONF_STAT, reg16); 453 454 /* Setup the device's command register */ 455 reg16 = PCIE_GET(16, bus_p, PCI_CONF_COMM); 456 tmp16 = (reg16 & pcie_command_default_fw) | pcie_command_default; 457 458 #if defined(__i386) || defined(__amd64) 459 boolean_t empty_io_range = B_FALSE; 460 boolean_t empty_mem_range = B_FALSE; 461 /* 462 * Check for empty IO and Mem ranges on bridges. If so disable IO/Mem 463 * access as it can cause a hang if enabled. 464 */ 465 pcie_check_io_mem_range(bus_p->bus_cfg_hdl, &empty_io_range, 466 &empty_mem_range); 467 if ((empty_io_range == B_TRUE) && 468 (pcie_command_default & PCI_COMM_IO)) { 469 tmp16 &= ~PCI_COMM_IO; 470 PCIE_DBG("No I/O range found for %s, bdf 0x%x\n", 471 ddi_driver_name(cdip), bus_p->bus_bdf); 472 } 473 if ((empty_mem_range == B_TRUE) && 474 (pcie_command_default & PCI_COMM_MAE)) { 475 tmp16 &= ~PCI_COMM_MAE; 476 PCIE_DBG("No Mem range found for %s, bdf 0x%x\n", 477 ddi_driver_name(cdip), bus_p->bus_bdf); 478 } 479 #endif /* defined(__i386) || defined(__amd64) */ 480 481 if (pcie_serr_disable_flag && PCIE_IS_PCIE(bus_p)) 482 tmp16 &= ~PCI_COMM_SERR_ENABLE; 483 484 PCIE_PUT(16, bus_p, PCI_CONF_COMM, tmp16); 485 PCIE_DBG_CFG(cdip, bus_p, "COMMAND", 16, PCI_CONF_COMM, reg16); 486 487 /* 488 * If the device has a bus control register then program it 489 * based on the settings in the command register. 490 */ 491 if (PCIE_IS_BDG(bus_p)) { 492 /* Clear the device's secondary status register */ 493 reg16 = PCIE_GET(16, bus_p, PCI_BCNF_SEC_STATUS); 494 PCIE_PUT(16, bus_p, PCI_BCNF_SEC_STATUS, reg16); 495 496 /* Setup the device's secondary command register */ 497 reg16 = PCIE_GET(16, bus_p, PCI_BCNF_BCNTRL); 498 tmp16 = (reg16 & pcie_bdg_command_default_fw); 499 500 tmp16 |= PCI_BCNF_BCNTRL_SERR_ENABLE; 501 /* 502 * Workaround for this Nvidia bridge. Don't enable the SERR 503 * enable bit in the bridge control register as it could lead to 504 * bogus NMIs. 505 */ 506 if (bus_p->bus_dev_ven_id == 0x037010DE) 507 tmp16 &= ~PCI_BCNF_BCNTRL_SERR_ENABLE; 508 509 if (pcie_command_default & PCI_COMM_PARITY_DETECT) 510 tmp16 |= PCI_BCNF_BCNTRL_PARITY_ENABLE; 511 512 /* 513 * Enable Master Abort Mode only if URs have not been masked. 514 * For PCI and PCIe-PCI bridges, enabling this bit causes a 515 * Master Aborts/UR to be forwarded as a UR/TA or SERR. If this 516 * bit is masked, posted requests are dropped and non-posted 517 * requests are returned with -1. 518 */ 519 if (pcie_aer_uce_mask & PCIE_AER_UCE_UR) 520 tmp16 &= ~PCI_BCNF_BCNTRL_MAST_AB_MODE; 521 else 522 tmp16 |= PCI_BCNF_BCNTRL_MAST_AB_MODE; 523 PCIE_PUT(16, bus_p, PCI_BCNF_BCNTRL, tmp16); 524 PCIE_DBG_CFG(cdip, bus_p, "SEC CMD", 16, PCI_BCNF_BCNTRL, 525 reg16); 526 } 527 528 if (PCIE_IS_PCIE(bus_p)) { 529 /* Setup PCIe device control register */ 530 reg16 = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL); 531 tmp16 = pcie_devctl_default; 532 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, tmp16); 533 PCIE_DBG_CAP(cdip, bus_p, "DEVCTL", 16, PCIE_DEVCTL, reg16); 534 535 /* Enable PCIe errors */ 536 pcie_enable_errors(cdip); 537 } 538 539 bus_p->bus_ari = B_FALSE; 540 if ((pcie_ari_is_enabled(ddi_get_parent(cdip)) 541 == PCIE_ARI_FORW_ENABLED) && (pcie_ari_device(cdip) 542 == PCIE_ARI_DEVICE)) { 543 bus_p->bus_ari = B_TRUE; 544 } 545 546 if (pcie_initchild_mps(cdip) == DDI_FAILURE) { 547 pcie_fini_cfghdl(cdip); 548 return (DDI_FAILURE); 549 } 550 551 return (DDI_SUCCESS); 552 } 553 554 #define PCIE_ZALLOC(data) kmem_zalloc(sizeof (data), KM_SLEEP) 555 static void 556 pcie_init_pfd(dev_info_t *dip) 557 { 558 pf_data_t *pfd_p = PCIE_ZALLOC(pf_data_t); 559 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 560 561 PCIE_DIP2PFD(dip) = pfd_p; 562 563 pfd_p->pe_bus_p = bus_p; 564 pfd_p->pe_severity_flags = 0; 565 pfd_p->pe_lock = B_FALSE; 566 pfd_p->pe_valid = B_FALSE; 567 568 /* Allocate the root fault struct for both RC and RP */ 569 if (PCIE_IS_ROOT(bus_p)) { 570 PCIE_ROOT_FAULT(pfd_p) = PCIE_ZALLOC(pf_root_fault_t); 571 PCIE_ROOT_FAULT(pfd_p)->scan_bdf = PCIE_INVALID_BDF; 572 } 573 574 PCI_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pci_err_regs_t); 575 576 if (PCIE_IS_BDG(bus_p)) 577 PCI_BDG_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pci_bdg_err_regs_t); 578 579 if (PCIE_IS_PCIE(bus_p)) { 580 PCIE_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_err_regs_t); 581 582 if (PCIE_IS_RP(bus_p)) 583 PCIE_RP_REG(pfd_p) = 584 PCIE_ZALLOC(pf_pcie_rp_err_regs_t); 585 586 PCIE_ADV_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_adv_err_regs_t); 587 PCIE_ADV_REG(pfd_p)->pcie_ue_tgt_bdf = PCIE_INVALID_BDF; 588 589 if (PCIE_IS_RP(bus_p)) { 590 PCIE_ADV_RP_REG(pfd_p) = 591 PCIE_ZALLOC(pf_pcie_adv_rp_err_regs_t); 592 PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ce_src_id = 593 PCIE_INVALID_BDF; 594 PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ue_src_id = 595 PCIE_INVALID_BDF; 596 } else if (PCIE_IS_PCIE_BDG(bus_p)) { 597 PCIE_ADV_BDG_REG(pfd_p) = 598 PCIE_ZALLOC(pf_pcie_adv_bdg_err_regs_t); 599 PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_bdf = 600 PCIE_INVALID_BDF; 601 } 602 603 if (PCIE_IS_PCIE_BDG(bus_p) && PCIE_IS_PCIX(bus_p)) { 604 PCIX_BDG_ERR_REG(pfd_p) = 605 PCIE_ZALLOC(pf_pcix_bdg_err_regs_t); 606 607 if (PCIX_ECC_VERSION_CHECK(bus_p)) { 608 PCIX_BDG_ECC_REG(pfd_p, 0) = 609 PCIE_ZALLOC(pf_pcix_ecc_regs_t); 610 PCIX_BDG_ECC_REG(pfd_p, 1) = 611 PCIE_ZALLOC(pf_pcix_ecc_regs_t); 612 } 613 } 614 } else if (PCIE_IS_PCIX(bus_p)) { 615 if (PCIE_IS_BDG(bus_p)) { 616 PCIX_BDG_ERR_REG(pfd_p) = 617 PCIE_ZALLOC(pf_pcix_bdg_err_regs_t); 618 619 if (PCIX_ECC_VERSION_CHECK(bus_p)) { 620 PCIX_BDG_ECC_REG(pfd_p, 0) = 621 PCIE_ZALLOC(pf_pcix_ecc_regs_t); 622 PCIX_BDG_ECC_REG(pfd_p, 1) = 623 PCIE_ZALLOC(pf_pcix_ecc_regs_t); 624 } 625 } else { 626 PCIX_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pcix_err_regs_t); 627 628 if (PCIX_ECC_VERSION_CHECK(bus_p)) 629 PCIX_ECC_REG(pfd_p) = 630 PCIE_ZALLOC(pf_pcix_ecc_regs_t); 631 } 632 } 633 } 634 635 static void 636 pcie_fini_pfd(dev_info_t *dip) 637 { 638 pf_data_t *pfd_p = PCIE_DIP2PFD(dip); 639 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 640 641 if (PCIE_IS_PCIE(bus_p)) { 642 if (PCIE_IS_PCIE_BDG(bus_p) && PCIE_IS_PCIX(bus_p)) { 643 if (PCIX_ECC_VERSION_CHECK(bus_p)) { 644 kmem_free(PCIX_BDG_ECC_REG(pfd_p, 0), 645 sizeof (pf_pcix_ecc_regs_t)); 646 kmem_free(PCIX_BDG_ECC_REG(pfd_p, 1), 647 sizeof (pf_pcix_ecc_regs_t)); 648 } 649 650 kmem_free(PCIX_BDG_ERR_REG(pfd_p), 651 sizeof (pf_pcix_bdg_err_regs_t)); 652 } 653 654 if (PCIE_IS_RP(bus_p)) 655 kmem_free(PCIE_ADV_RP_REG(pfd_p), 656 sizeof (pf_pcie_adv_rp_err_regs_t)); 657 else if (PCIE_IS_PCIE_BDG(bus_p)) 658 kmem_free(PCIE_ADV_BDG_REG(pfd_p), 659 sizeof (pf_pcie_adv_bdg_err_regs_t)); 660 661 kmem_free(PCIE_ADV_REG(pfd_p), 662 sizeof (pf_pcie_adv_err_regs_t)); 663 664 if (PCIE_IS_RP(bus_p)) 665 kmem_free(PCIE_RP_REG(pfd_p), 666 sizeof (pf_pcie_rp_err_regs_t)); 667 668 kmem_free(PCIE_ERR_REG(pfd_p), sizeof (pf_pcie_err_regs_t)); 669 } else if (PCIE_IS_PCIX(bus_p)) { 670 if (PCIE_IS_BDG(bus_p)) { 671 if (PCIX_ECC_VERSION_CHECK(bus_p)) { 672 kmem_free(PCIX_BDG_ECC_REG(pfd_p, 0), 673 sizeof (pf_pcix_ecc_regs_t)); 674 kmem_free(PCIX_BDG_ECC_REG(pfd_p, 1), 675 sizeof (pf_pcix_ecc_regs_t)); 676 } 677 678 kmem_free(PCIX_BDG_ERR_REG(pfd_p), 679 sizeof (pf_pcix_bdg_err_regs_t)); 680 } else { 681 if (PCIX_ECC_VERSION_CHECK(bus_p)) 682 kmem_free(PCIX_ECC_REG(pfd_p), 683 sizeof (pf_pcix_ecc_regs_t)); 684 685 kmem_free(PCIX_ERR_REG(pfd_p), 686 sizeof (pf_pcix_err_regs_t)); 687 } 688 } 689 690 if (PCIE_IS_BDG(bus_p)) 691 kmem_free(PCI_BDG_ERR_REG(pfd_p), 692 sizeof (pf_pci_bdg_err_regs_t)); 693 694 kmem_free(PCI_ERR_REG(pfd_p), sizeof (pf_pci_err_regs_t)); 695 696 if (PCIE_IS_ROOT(bus_p)) 697 kmem_free(PCIE_ROOT_FAULT(pfd_p), sizeof (pf_root_fault_t)); 698 699 kmem_free(PCIE_DIP2PFD(dip), sizeof (pf_data_t)); 700 701 PCIE_DIP2PFD(dip) = NULL; 702 } 703 704 705 /* 706 * Special functions to allocate pf_data_t's for PCIe root complexes. 707 * Note: Root Complex not Root Port 708 */ 709 void 710 pcie_rc_init_pfd(dev_info_t *dip, pf_data_t *pfd_p) 711 { 712 pfd_p->pe_bus_p = PCIE_DIP2DOWNBUS(dip); 713 pfd_p->pe_severity_flags = 0; 714 pfd_p->pe_lock = B_FALSE; 715 pfd_p->pe_valid = B_FALSE; 716 717 PCIE_ROOT_FAULT(pfd_p) = PCIE_ZALLOC(pf_root_fault_t); 718 PCIE_ROOT_FAULT(pfd_p)->scan_bdf = PCIE_INVALID_BDF; 719 PCI_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pci_err_regs_t); 720 PCI_BDG_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pci_bdg_err_regs_t); 721 PCIE_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_err_regs_t); 722 PCIE_RP_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_rp_err_regs_t); 723 PCIE_ADV_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_adv_err_regs_t); 724 PCIE_ADV_RP_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_adv_rp_err_regs_t); 725 726 PCIE_ADV_REG(pfd_p)->pcie_ue_sev = pcie_aer_uce_severity; 727 } 728 729 void 730 pcie_rc_fini_pfd(pf_data_t *pfd_p) 731 { 732 kmem_free(PCIE_ADV_RP_REG(pfd_p), sizeof (pf_pcie_adv_rp_err_regs_t)); 733 kmem_free(PCIE_ADV_REG(pfd_p), sizeof (pf_pcie_adv_err_regs_t)); 734 kmem_free(PCIE_RP_REG(pfd_p), sizeof (pf_pcie_rp_err_regs_t)); 735 kmem_free(PCIE_ERR_REG(pfd_p), sizeof (pf_pcie_err_regs_t)); 736 kmem_free(PCI_BDG_ERR_REG(pfd_p), sizeof (pf_pci_bdg_err_regs_t)); 737 kmem_free(PCI_ERR_REG(pfd_p), sizeof (pf_pci_err_regs_t)); 738 kmem_free(PCIE_ROOT_FAULT(pfd_p), sizeof (pf_root_fault_t)); 739 } 740 741 /* 742 * init pcie_bus_t for root complex 743 * 744 * Only a few of the fields in bus_t is valid for root complex. 745 * The fields that are bracketed are initialized in this routine: 746 * 747 * dev_info_t * <bus_dip> 748 * dev_info_t * bus_rp_dip 749 * ddi_acc_handle_t bus_cfg_hdl 750 * uint_t <bus_fm_flags> 751 * pcie_req_id_t bus_bdf 752 * pcie_req_id_t bus_rp_bdf 753 * uint32_t bus_dev_ven_id 754 * uint8_t bus_rev_id 755 * uint8_t <bus_hdr_type> 756 * uint16_t <bus_dev_type> 757 * uint8_t bus_bdg_secbus 758 * uint16_t bus_pcie_off 759 * uint16_t <bus_aer_off> 760 * uint16_t bus_pcix_off 761 * uint16_t bus_ecc_ver 762 * pci_bus_range_t bus_bus_range 763 * ppb_ranges_t * bus_addr_ranges 764 * int bus_addr_entries 765 * pci_regspec_t * bus_assigned_addr 766 * int bus_assigned_entries 767 * pf_data_t * bus_pfd 768 * int bus_mps 769 * uint64_t bus_cfgacc_base 770 * void * bus_plat_private 771 */ 772 void 773 pcie_rc_init_bus(dev_info_t *dip) 774 { 775 pcie_bus_t *bus_p; 776 777 bus_p = (pcie_bus_t *)kmem_zalloc(sizeof (pcie_bus_t), KM_SLEEP); 778 bus_p->bus_dip = dip; 779 bus_p->bus_dev_type = PCIE_PCIECAP_DEV_TYPE_RC_PSEUDO; 780 bus_p->bus_hdr_type = PCI_HEADER_ONE; 781 782 /* Fake that there are AER logs */ 783 bus_p->bus_aer_off = (uint16_t)-1; 784 785 /* Needed only for handle lookup */ 786 bus_p->bus_fm_flags |= PF_FM_READY; 787 788 ndi_set_bus_private(dip, B_FALSE, DEVI_PORT_TYPE_PCI, bus_p); 789 } 790 791 void 792 pcie_rc_fini_bus(dev_info_t *dip) 793 { 794 pcie_bus_t *bus_p = PCIE_DIP2DOWNBUS(dip); 795 ndi_set_bus_private(dip, B_FALSE, NULL, NULL); 796 kmem_free(bus_p, sizeof (pcie_bus_t)); 797 } 798 799 /* 800 * partially init pcie_bus_t for device (dip,bdf) for accessing pci 801 * config space 802 * 803 * This routine is invoked during boot, either after creating a devinfo node 804 * (x86 case) or during px driver attach (sparc case); it is also invoked 805 * in hotplug context after a devinfo node is created. 806 * 807 * The fields that are bracketed are initialized if flag PCIE_BUS_INITIAL 808 * is set: 809 * 810 * dev_info_t * <bus_dip> 811 * dev_info_t * <bus_rp_dip> 812 * ddi_acc_handle_t bus_cfg_hdl 813 * uint_t bus_fm_flags 814 * pcie_req_id_t <bus_bdf> 815 * pcie_req_id_t <bus_rp_bdf> 816 * uint32_t <bus_dev_ven_id> 817 * uint8_t <bus_rev_id> 818 * uint8_t <bus_hdr_type> 819 * uint16_t <bus_dev_type> 820 * uint8_t <bus_bdg_secbus 821 * uint16_t <bus_pcie_off> 822 * uint16_t <bus_aer_off> 823 * uint16_t <bus_pcix_off> 824 * uint16_t <bus_ecc_ver> 825 * pci_bus_range_t bus_bus_range 826 * ppb_ranges_t * bus_addr_ranges 827 * int bus_addr_entries 828 * pci_regspec_t * bus_assigned_addr 829 * int bus_assigned_entries 830 * pf_data_t * bus_pfd 831 * int bus_mps 832 * uint64_t bus_cfgacc_base 833 * void * bus_plat_private 834 * 835 * The fields that are bracketed are initialized if flag PCIE_BUS_FINAL 836 * is set: 837 * 838 * dev_info_t * bus_dip 839 * dev_info_t * bus_rp_dip 840 * ddi_acc_handle_t bus_cfg_hdl 841 * uint_t bus_fm_flags 842 * pcie_req_id_t bus_bdf 843 * pcie_req_id_t bus_rp_bdf 844 * uint32_t bus_dev_ven_id 845 * uint8_t bus_rev_id 846 * uint8_t bus_hdr_type 847 * uint16_t bus_dev_type 848 * uint8_t <bus_bdg_secbus> 849 * uint16_t bus_pcie_off 850 * uint16_t bus_aer_off 851 * uint16_t bus_pcix_off 852 * uint16_t bus_ecc_ver 853 * pci_bus_range_t <bus_bus_range> 854 * ppb_ranges_t * <bus_addr_ranges> 855 * int <bus_addr_entries> 856 * pci_regspec_t * <bus_assigned_addr> 857 * int <bus_assigned_entries> 858 * pf_data_t * <bus_pfd> 859 * int bus_mps 860 * uint64_t bus_cfgacc_base 861 * void * <bus_plat_private> 862 */ 863 864 pcie_bus_t * 865 pcie_init_bus(dev_info_t *dip, pcie_req_id_t bdf, uint8_t flags) 866 { 867 uint16_t status, base, baseptr, num_cap; 868 uint32_t capid; 869 int range_size; 870 pcie_bus_t *bus_p; 871 dev_info_t *rcdip; 872 dev_info_t *pdip; 873 const char *errstr = NULL; 874 875 if (!(flags & PCIE_BUS_INITIAL)) 876 goto initial_done; 877 878 bus_p = kmem_zalloc(sizeof (pcie_bus_t), KM_SLEEP); 879 880 bus_p->bus_dip = dip; 881 bus_p->bus_bdf = bdf; 882 883 rcdip = pcie_get_rc_dip(dip); 884 ASSERT(rcdip != NULL); 885 886 /* Save the Vendor ID, Device ID and revision ID */ 887 bus_p->bus_dev_ven_id = pci_cfgacc_get32(rcdip, bdf, PCI_CONF_VENID); 888 bus_p->bus_rev_id = pci_cfgacc_get8(rcdip, bdf, PCI_CONF_REVID); 889 /* Save the Header Type */ 890 bus_p->bus_hdr_type = pci_cfgacc_get8(rcdip, bdf, PCI_CONF_HEADER); 891 bus_p->bus_hdr_type &= PCI_HEADER_TYPE_M; 892 893 /* 894 * Figure out the device type and all the relavant capability offsets 895 */ 896 /* set default value */ 897 bus_p->bus_dev_type = PCIE_PCIECAP_DEV_TYPE_PCI_PSEUDO; 898 899 status = pci_cfgacc_get16(rcdip, bdf, PCI_CONF_STAT); 900 if (status == PCI_CAP_EINVAL16 || !(status & PCI_STAT_CAP)) 901 goto caps_done; /* capability not supported */ 902 903 /* Relevant conventional capabilities first */ 904 905 /* Conventional caps: PCI_CAP_ID_PCI_E, PCI_CAP_ID_PCIX */ 906 num_cap = 2; 907 908 switch (bus_p->bus_hdr_type) { 909 case PCI_HEADER_ZERO: 910 baseptr = PCI_CONF_CAP_PTR; 911 break; 912 case PCI_HEADER_PPB: 913 baseptr = PCI_BCNF_CAP_PTR; 914 break; 915 case PCI_HEADER_CARDBUS: 916 baseptr = PCI_CBUS_CAP_PTR; 917 break; 918 default: 919 cmn_err(CE_WARN, "%s: unexpected pci header type:%x", 920 __func__, bus_p->bus_hdr_type); 921 goto caps_done; 922 } 923 924 base = baseptr; 925 for (base = pci_cfgacc_get8(rcdip, bdf, base); base && num_cap; 926 base = pci_cfgacc_get8(rcdip, bdf, base + PCI_CAP_NEXT_PTR)) { 927 capid = pci_cfgacc_get8(rcdip, bdf, base); 928 switch (capid) { 929 case PCI_CAP_ID_PCI_E: 930 bus_p->bus_pcie_off = base; 931 bus_p->bus_dev_type = pci_cfgacc_get16(rcdip, bdf, 932 base + PCIE_PCIECAP) & PCIE_PCIECAP_DEV_TYPE_MASK; 933 934 /* Check and save PCIe hotplug capability information */ 935 if ((PCIE_IS_RP(bus_p) || PCIE_IS_SWD(bus_p)) && 936 (pci_cfgacc_get16(rcdip, bdf, base + PCIE_PCIECAP) 937 & PCIE_PCIECAP_SLOT_IMPL) && 938 (pci_cfgacc_get32(rcdip, bdf, base + PCIE_SLOTCAP) 939 & PCIE_SLOTCAP_HP_CAPABLE)) 940 bus_p->bus_hp_sup_modes |= PCIE_NATIVE_HP_MODE; 941 942 num_cap--; 943 break; 944 case PCI_CAP_ID_PCIX: 945 bus_p->bus_pcix_off = base; 946 if (PCIE_IS_BDG(bus_p)) 947 bus_p->bus_ecc_ver = 948 pci_cfgacc_get16(rcdip, bdf, base + 949 PCI_PCIX_SEC_STATUS) & PCI_PCIX_VER_MASK; 950 else 951 bus_p->bus_ecc_ver = 952 pci_cfgacc_get16(rcdip, bdf, base + 953 PCI_PCIX_COMMAND) & PCI_PCIX_VER_MASK; 954 num_cap--; 955 break; 956 default: 957 break; 958 } 959 } 960 961 /* Check and save PCI hotplug (SHPC) capability information */ 962 if (PCIE_IS_BDG(bus_p)) { 963 base = baseptr; 964 for (base = pci_cfgacc_get8(rcdip, bdf, base); 965 base; base = pci_cfgacc_get8(rcdip, bdf, 966 base + PCI_CAP_NEXT_PTR)) { 967 capid = pci_cfgacc_get8(rcdip, bdf, base); 968 if (capid == PCI_CAP_ID_PCI_HOTPLUG) { 969 bus_p->bus_pci_hp_off = base; 970 bus_p->bus_hp_sup_modes |= PCIE_PCI_HP_MODE; 971 break; 972 } 973 } 974 } 975 976 /* Then, relevant extended capabilities */ 977 978 if (!PCIE_IS_PCIE(bus_p)) 979 goto caps_done; 980 981 /* Extended caps: PCIE_EXT_CAP_ID_AER */ 982 for (base = PCIE_EXT_CAP; base; base = (capid >> 983 PCIE_EXT_CAP_NEXT_PTR_SHIFT) & PCIE_EXT_CAP_NEXT_PTR_MASK) { 984 capid = pci_cfgacc_get32(rcdip, bdf, base); 985 if (capid == PCI_CAP_EINVAL32) 986 break; 987 if (((capid >> PCIE_EXT_CAP_ID_SHIFT) & PCIE_EXT_CAP_ID_MASK) 988 == PCIE_EXT_CAP_ID_AER) { 989 bus_p->bus_aer_off = base; 990 break; 991 } 992 } 993 994 caps_done: 995 /* save RP dip and RP bdf */ 996 if (PCIE_IS_RP(bus_p)) { 997 bus_p->bus_rp_dip = dip; 998 bus_p->bus_rp_bdf = bus_p->bus_bdf; 999 } else { 1000 for (pdip = ddi_get_parent(dip); pdip; 1001 pdip = ddi_get_parent(pdip)) { 1002 pcie_bus_t *parent_bus_p = PCIE_DIP2BUS(pdip); 1003 1004 /* 1005 * If RP dip and RP bdf in parent's bus_t have 1006 * been initialized, simply use these instead of 1007 * continuing up to the RC. 1008 */ 1009 if (parent_bus_p->bus_rp_dip != NULL) { 1010 bus_p->bus_rp_dip = parent_bus_p->bus_rp_dip; 1011 bus_p->bus_rp_bdf = parent_bus_p->bus_rp_bdf; 1012 break; 1013 } 1014 1015 /* 1016 * When debugging be aware that some NVIDIA x86 1017 * architectures have 2 nodes for each RP, One at Bus 1018 * 0x0 and one at Bus 0x80. The requester is from Bus 1019 * 0x80 1020 */ 1021 if (PCIE_IS_ROOT(parent_bus_p)) { 1022 bus_p->bus_rp_dip = pdip; 1023 bus_p->bus_rp_bdf = parent_bus_p->bus_bdf; 1024 break; 1025 } 1026 } 1027 } 1028 1029 bus_p->bus_soft_state = PCI_SOFT_STATE_CLOSED; 1030 bus_p->bus_fm_flags = 0; 1031 bus_p->bus_mps = 0; 1032 1033 ndi_set_bus_private(dip, B_TRUE, DEVI_PORT_TYPE_PCI, (void *)bus_p); 1034 1035 if (PCIE_IS_HOTPLUG_CAPABLE(dip)) 1036 (void) ndi_prop_create_boolean(DDI_DEV_T_NONE, dip, 1037 "hotplug-capable"); 1038 1039 initial_done: 1040 if (!(flags & PCIE_BUS_FINAL)) 1041 goto final_done; 1042 1043 /* already initialized? */ 1044 bus_p = PCIE_DIP2BUS(dip); 1045 1046 /* Save the Range information if device is a switch/bridge */ 1047 if (PCIE_IS_BDG(bus_p)) { 1048 /* get "bus_range" property */ 1049 range_size = sizeof (pci_bus_range_t); 1050 if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1051 "bus-range", (caddr_t)&bus_p->bus_bus_range, &range_size) 1052 != DDI_PROP_SUCCESS) { 1053 errstr = "Cannot find \"bus-range\" property"; 1054 cmn_err(CE_WARN, 1055 "PCIE init err info failed BDF 0x%x:%s\n", 1056 bus_p->bus_bdf, errstr); 1057 } 1058 1059 /* get secondary bus number */ 1060 rcdip = pcie_get_rc_dip(dip); 1061 ASSERT(rcdip != NULL); 1062 1063 bus_p->bus_bdg_secbus = pci_cfgacc_get8(rcdip, 1064 bus_p->bus_bdf, PCI_BCNF_SECBUS); 1065 1066 /* Get "ranges" property */ 1067 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1068 "ranges", (caddr_t)&bus_p->bus_addr_ranges, 1069 &bus_p->bus_addr_entries) != DDI_PROP_SUCCESS) 1070 bus_p->bus_addr_entries = 0; 1071 bus_p->bus_addr_entries /= sizeof (ppb_ranges_t); 1072 } 1073 1074 /* save "assigned-addresses" property array, ignore failues */ 1075 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1076 "assigned-addresses", (caddr_t)&bus_p->bus_assigned_addr, 1077 &bus_p->bus_assigned_entries) == DDI_PROP_SUCCESS) 1078 bus_p->bus_assigned_entries /= sizeof (pci_regspec_t); 1079 else 1080 bus_p->bus_assigned_entries = 0; 1081 1082 pcie_init_pfd(dip); 1083 1084 pcie_init_plat(dip); 1085 1086 final_done: 1087 1088 PCIE_DBG("Add %s(dip 0x%p, bdf 0x%x, secbus 0x%x)\n", 1089 ddi_driver_name(dip), (void *)dip, bus_p->bus_bdf, 1090 bus_p->bus_bdg_secbus); 1091 #ifdef DEBUG 1092 pcie_print_bus(bus_p); 1093 #endif 1094 1095 return (bus_p); 1096 } 1097 1098 /* 1099 * Invoked before destroying devinfo node, mostly during hotplug 1100 * operation to free pcie_bus_t data structure 1101 */ 1102 /* ARGSUSED */ 1103 void 1104 pcie_fini_bus(dev_info_t *dip, uint8_t flags) 1105 { 1106 pcie_bus_t *bus_p = PCIE_DIP2UPBUS(dip); 1107 ASSERT(bus_p); 1108 1109 if (flags & PCIE_BUS_INITIAL) { 1110 pcie_fini_plat(dip); 1111 pcie_fini_pfd(dip); 1112 1113 kmem_free(bus_p->bus_assigned_addr, 1114 (sizeof (pci_regspec_t) * bus_p->bus_assigned_entries)); 1115 kmem_free(bus_p->bus_addr_ranges, 1116 (sizeof (ppb_ranges_t) * bus_p->bus_addr_entries)); 1117 /* zero out the fields that have been destroyed */ 1118 bus_p->bus_assigned_addr = NULL; 1119 bus_p->bus_addr_ranges = NULL; 1120 bus_p->bus_assigned_entries = 0; 1121 bus_p->bus_addr_entries = 0; 1122 } 1123 1124 if (flags & PCIE_BUS_FINAL) { 1125 if (PCIE_IS_HOTPLUG_CAPABLE(dip)) { 1126 (void) ndi_prop_remove(DDI_DEV_T_NONE, dip, 1127 "hotplug-capable"); 1128 } 1129 1130 ndi_set_bus_private(dip, B_TRUE, NULL, NULL); 1131 kmem_free(bus_p, sizeof (pcie_bus_t)); 1132 } 1133 } 1134 1135 int 1136 pcie_postattach_child(dev_info_t *cdip) 1137 { 1138 pcie_bus_t *bus_p = PCIE_DIP2BUS(cdip); 1139 1140 if (!bus_p) 1141 return (DDI_FAILURE); 1142 1143 return (pcie_enable_ce(cdip)); 1144 } 1145 1146 /* 1147 * PCI-Express child device de-initialization. 1148 * This function disables generic pci-express interrupts and error 1149 * handling. 1150 */ 1151 void 1152 pcie_uninitchild(dev_info_t *cdip) 1153 { 1154 pcie_disable_errors(cdip); 1155 pcie_fini_cfghdl(cdip); 1156 } 1157 1158 /* 1159 * find the root complex dip 1160 */ 1161 dev_info_t * 1162 pcie_get_rc_dip(dev_info_t *dip) 1163 { 1164 dev_info_t *rcdip; 1165 pcie_bus_t *rc_bus_p; 1166 1167 for (rcdip = ddi_get_parent(dip); rcdip; 1168 rcdip = ddi_get_parent(rcdip)) { 1169 rc_bus_p = PCIE_DIP2BUS(rcdip); 1170 if (rc_bus_p && PCIE_IS_RC(rc_bus_p)) 1171 break; 1172 } 1173 1174 return (rcdip); 1175 } 1176 1177 static boolean_t 1178 pcie_is_pci_device(dev_info_t *dip) 1179 { 1180 dev_info_t *pdip; 1181 char *device_type; 1182 1183 pdip = ddi_get_parent(dip); 1184 ASSERT(pdip); 1185 1186 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip, DDI_PROP_DONTPASS, 1187 "device_type", &device_type) != DDI_PROP_SUCCESS) 1188 return (B_FALSE); 1189 1190 if (strcmp(device_type, "pciex") != 0 && 1191 strcmp(device_type, "pci") != 0) { 1192 ddi_prop_free(device_type); 1193 return (B_FALSE); 1194 } 1195 1196 ddi_prop_free(device_type); 1197 return (B_TRUE); 1198 } 1199 1200 typedef struct { 1201 boolean_t init; 1202 uint8_t flags; 1203 } pcie_bus_arg_t; 1204 1205 /*ARGSUSED*/ 1206 static int 1207 pcie_fab_do_init_fini(dev_info_t *dip, void *arg) 1208 { 1209 pcie_req_id_t bdf; 1210 pcie_bus_arg_t *bus_arg = (pcie_bus_arg_t *)arg; 1211 1212 if (!pcie_is_pci_device(dip)) 1213 goto out; 1214 1215 if (bus_arg->init) { 1216 if (pcie_get_bdf_from_dip(dip, &bdf) != DDI_SUCCESS) 1217 goto out; 1218 1219 (void) pcie_init_bus(dip, bdf, bus_arg->flags); 1220 } else { 1221 (void) pcie_fini_bus(dip, bus_arg->flags); 1222 } 1223 1224 return (DDI_WALK_CONTINUE); 1225 1226 out: 1227 return (DDI_WALK_PRUNECHILD); 1228 } 1229 1230 void 1231 pcie_fab_init_bus(dev_info_t *rcdip, uint8_t flags) 1232 { 1233 int circular_count; 1234 dev_info_t *dip = ddi_get_child(rcdip); 1235 pcie_bus_arg_t arg; 1236 1237 arg.init = B_TRUE; 1238 arg.flags = flags; 1239 1240 ndi_devi_enter(rcdip, &circular_count); 1241 ddi_walk_devs(dip, pcie_fab_do_init_fini, &arg); 1242 ndi_devi_exit(rcdip, circular_count); 1243 } 1244 1245 void 1246 pcie_fab_fini_bus(dev_info_t *rcdip, uint8_t flags) 1247 { 1248 int circular_count; 1249 dev_info_t *dip = ddi_get_child(rcdip); 1250 pcie_bus_arg_t arg; 1251 1252 arg.init = B_FALSE; 1253 arg.flags = flags; 1254 1255 ndi_devi_enter(rcdip, &circular_count); 1256 ddi_walk_devs(dip, pcie_fab_do_init_fini, &arg); 1257 ndi_devi_exit(rcdip, circular_count); 1258 } 1259 1260 void 1261 pcie_enable_errors(dev_info_t *dip) 1262 { 1263 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 1264 uint16_t reg16, tmp16; 1265 uint32_t reg32, tmp32; 1266 1267 ASSERT(bus_p); 1268 1269 /* 1270 * Clear any pending errors 1271 */ 1272 pcie_clear_errors(dip); 1273 1274 if (!PCIE_IS_PCIE(bus_p)) 1275 return; 1276 1277 /* 1278 * Enable Baseline Error Handling but leave CE reporting off (poweron 1279 * default). 1280 */ 1281 if ((reg16 = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL)) != 1282 PCI_CAP_EINVAL16) { 1283 tmp16 = (reg16 & (PCIE_DEVCTL_MAX_READ_REQ_MASK | 1284 PCIE_DEVCTL_MAX_PAYLOAD_MASK)) | 1285 (pcie_devctl_default & ~(PCIE_DEVCTL_MAX_READ_REQ_MASK | 1286 PCIE_DEVCTL_MAX_PAYLOAD_MASK)) | 1287 (pcie_base_err_default & (~PCIE_DEVCTL_CE_REPORTING_EN)); 1288 1289 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, tmp16); 1290 PCIE_DBG_CAP(dip, bus_p, "DEVCTL", 16, PCIE_DEVCTL, reg16); 1291 } 1292 1293 /* Enable Root Port Baseline Error Receiving */ 1294 if (PCIE_IS_ROOT(bus_p) && 1295 (reg16 = PCIE_CAP_GET(16, bus_p, PCIE_ROOTCTL)) != 1296 PCI_CAP_EINVAL16) { 1297 1298 tmp16 = pcie_serr_disable_flag ? 1299 (pcie_root_ctrl_default & ~PCIE_ROOT_SYS_ERR) : 1300 pcie_root_ctrl_default; 1301 PCIE_CAP_PUT(16, bus_p, PCIE_ROOTCTL, tmp16); 1302 PCIE_DBG_CAP(dip, bus_p, "ROOT DEVCTL", 16, PCIE_ROOTCTL, 1303 reg16); 1304 } 1305 1306 /* 1307 * Enable PCI-Express Advanced Error Handling if Exists 1308 */ 1309 if (!PCIE_HAS_AER(bus_p)) 1310 return; 1311 1312 /* Set Uncorrectable Severity */ 1313 if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_UCE_SERV)) != 1314 PCI_CAP_EINVAL32) { 1315 tmp32 = pcie_aer_uce_severity; 1316 1317 PCIE_AER_PUT(32, bus_p, PCIE_AER_UCE_SERV, tmp32); 1318 PCIE_DBG_AER(dip, bus_p, "AER UCE SEV", 32, PCIE_AER_UCE_SERV, 1319 reg32); 1320 } 1321 1322 /* Enable Uncorrectable errors */ 1323 if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_UCE_MASK)) != 1324 PCI_CAP_EINVAL32) { 1325 tmp32 = pcie_aer_uce_mask; 1326 1327 PCIE_AER_PUT(32, bus_p, PCIE_AER_UCE_MASK, tmp32); 1328 PCIE_DBG_AER(dip, bus_p, "AER UCE MASK", 32, PCIE_AER_UCE_MASK, 1329 reg32); 1330 } 1331 1332 /* Enable ECRC generation and checking */ 1333 if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_CTL)) != 1334 PCI_CAP_EINVAL32) { 1335 tmp32 = reg32 | pcie_ecrc_value; 1336 PCIE_AER_PUT(32, bus_p, PCIE_AER_CTL, tmp32); 1337 PCIE_DBG_AER(dip, bus_p, "AER CTL", 32, PCIE_AER_CTL, reg32); 1338 } 1339 1340 /* Enable Secondary Uncorrectable errors if this is a bridge */ 1341 if (!PCIE_IS_PCIE_BDG(bus_p)) 1342 goto root; 1343 1344 /* Set Uncorrectable Severity */ 1345 if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_SUCE_SERV)) != 1346 PCI_CAP_EINVAL32) { 1347 tmp32 = pcie_aer_suce_severity; 1348 1349 PCIE_AER_PUT(32, bus_p, PCIE_AER_SUCE_SERV, tmp32); 1350 PCIE_DBG_AER(dip, bus_p, "AER SUCE SEV", 32, PCIE_AER_SUCE_SERV, 1351 reg32); 1352 } 1353 1354 if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_SUCE_MASK)) != 1355 PCI_CAP_EINVAL32) { 1356 PCIE_AER_PUT(32, bus_p, PCIE_AER_SUCE_MASK, pcie_aer_suce_mask); 1357 PCIE_DBG_AER(dip, bus_p, "AER SUCE MASK", 32, 1358 PCIE_AER_SUCE_MASK, reg32); 1359 } 1360 1361 root: 1362 /* 1363 * Enable Root Control this is a Root device 1364 */ 1365 if (!PCIE_IS_ROOT(bus_p)) 1366 return; 1367 1368 if ((reg16 = PCIE_AER_GET(16, bus_p, PCIE_AER_RE_CMD)) != 1369 PCI_CAP_EINVAL16) { 1370 PCIE_AER_PUT(16, bus_p, PCIE_AER_RE_CMD, 1371 pcie_root_error_cmd_default); 1372 PCIE_DBG_AER(dip, bus_p, "AER Root Err Cmd", 16, 1373 PCIE_AER_RE_CMD, reg16); 1374 } 1375 } 1376 1377 /* 1378 * This function is used for enabling CE reporting and setting the AER CE mask. 1379 * When called from outside the pcie module it should always be preceded by 1380 * a call to pcie_enable_errors. 1381 */ 1382 int 1383 pcie_enable_ce(dev_info_t *dip) 1384 { 1385 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 1386 uint16_t device_sts, device_ctl; 1387 uint32_t tmp_pcie_aer_ce_mask; 1388 1389 if (!PCIE_IS_PCIE(bus_p)) 1390 return (DDI_SUCCESS); 1391 1392 /* 1393 * The "pcie_ce_mask" property is used to control both the CE reporting 1394 * enable field in the device control register and the AER CE mask. We 1395 * leave CE reporting disabled if pcie_ce_mask is set to -1. 1396 */ 1397 1398 tmp_pcie_aer_ce_mask = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, dip, 1399 DDI_PROP_DONTPASS, "pcie_ce_mask", pcie_aer_ce_mask); 1400 1401 if (tmp_pcie_aer_ce_mask == (uint32_t)-1) { 1402 /* 1403 * Nothing to do since CE reporting has already been disabled. 1404 */ 1405 return (DDI_SUCCESS); 1406 } 1407 1408 if (PCIE_HAS_AER(bus_p)) { 1409 /* Enable AER CE */ 1410 PCIE_AER_PUT(32, bus_p, PCIE_AER_CE_MASK, tmp_pcie_aer_ce_mask); 1411 PCIE_DBG_AER(dip, bus_p, "AER CE MASK", 32, PCIE_AER_CE_MASK, 1412 0); 1413 1414 /* Clear any pending AER CE errors */ 1415 PCIE_AER_PUT(32, bus_p, PCIE_AER_CE_STS, -1); 1416 } 1417 1418 /* clear any pending CE errors */ 1419 if ((device_sts = PCIE_CAP_GET(16, bus_p, PCIE_DEVSTS)) != 1420 PCI_CAP_EINVAL16) 1421 PCIE_CAP_PUT(16, bus_p, PCIE_DEVSTS, 1422 device_sts & (~PCIE_DEVSTS_CE_DETECTED)); 1423 1424 /* Enable CE reporting */ 1425 device_ctl = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL); 1426 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, 1427 (device_ctl & (~PCIE_DEVCTL_ERR_MASK)) | pcie_base_err_default); 1428 PCIE_DBG_CAP(dip, bus_p, "DEVCTL", 16, PCIE_DEVCTL, device_ctl); 1429 1430 return (DDI_SUCCESS); 1431 } 1432 1433 /* ARGSUSED */ 1434 void 1435 pcie_disable_errors(dev_info_t *dip) 1436 { 1437 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 1438 uint16_t device_ctl; 1439 uint32_t aer_reg; 1440 1441 if (!PCIE_IS_PCIE(bus_p)) 1442 return; 1443 1444 /* 1445 * Disable PCI-Express Baseline Error Handling 1446 */ 1447 device_ctl = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL); 1448 device_ctl &= ~PCIE_DEVCTL_ERR_MASK; 1449 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, device_ctl); 1450 1451 /* 1452 * Disable PCI-Express Advanced Error Handling if Exists 1453 */ 1454 if (!PCIE_HAS_AER(bus_p)) 1455 goto root; 1456 1457 /* Disable Uncorrectable errors */ 1458 PCIE_AER_PUT(32, bus_p, PCIE_AER_UCE_MASK, PCIE_AER_UCE_BITS); 1459 1460 /* Disable Correctable errors */ 1461 PCIE_AER_PUT(32, bus_p, PCIE_AER_CE_MASK, PCIE_AER_CE_BITS); 1462 1463 /* Disable ECRC generation and checking */ 1464 if ((aer_reg = PCIE_AER_GET(32, bus_p, PCIE_AER_CTL)) != 1465 PCI_CAP_EINVAL32) { 1466 aer_reg &= ~(PCIE_AER_CTL_ECRC_GEN_ENA | 1467 PCIE_AER_CTL_ECRC_CHECK_ENA); 1468 1469 PCIE_AER_PUT(32, bus_p, PCIE_AER_CTL, aer_reg); 1470 } 1471 /* 1472 * Disable Secondary Uncorrectable errors if this is a bridge 1473 */ 1474 if (!PCIE_IS_PCIE_BDG(bus_p)) 1475 goto root; 1476 1477 PCIE_AER_PUT(32, bus_p, PCIE_AER_SUCE_MASK, PCIE_AER_SUCE_BITS); 1478 1479 root: 1480 /* 1481 * disable Root Control this is a Root device 1482 */ 1483 if (!PCIE_IS_ROOT(bus_p)) 1484 return; 1485 1486 if (!pcie_serr_disable_flag) { 1487 device_ctl = PCIE_CAP_GET(16, bus_p, PCIE_ROOTCTL); 1488 device_ctl &= ~PCIE_ROOT_SYS_ERR; 1489 PCIE_CAP_PUT(16, bus_p, PCIE_ROOTCTL, device_ctl); 1490 } 1491 1492 if (!PCIE_HAS_AER(bus_p)) 1493 return; 1494 1495 if ((device_ctl = PCIE_CAP_GET(16, bus_p, PCIE_AER_RE_CMD)) != 1496 PCI_CAP_EINVAL16) { 1497 device_ctl &= ~pcie_root_error_cmd_default; 1498 PCIE_CAP_PUT(16, bus_p, PCIE_AER_RE_CMD, device_ctl); 1499 } 1500 } 1501 1502 /* 1503 * Extract bdf from "reg" property. 1504 */ 1505 int 1506 pcie_get_bdf_from_dip(dev_info_t *dip, pcie_req_id_t *bdf) 1507 { 1508 pci_regspec_t *regspec; 1509 int reglen; 1510 1511 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1512 "reg", (int **)®spec, (uint_t *)®len) != DDI_SUCCESS) 1513 return (DDI_FAILURE); 1514 1515 if (reglen < (sizeof (pci_regspec_t) / sizeof (int))) { 1516 ddi_prop_free(regspec); 1517 return (DDI_FAILURE); 1518 } 1519 1520 /* Get phys_hi from first element. All have same bdf. */ 1521 *bdf = (regspec->pci_phys_hi & (PCI_REG_BDFR_M ^ PCI_REG_REG_M)) >> 8; 1522 1523 ddi_prop_free(regspec); 1524 return (DDI_SUCCESS); 1525 } 1526 1527 dev_info_t * 1528 pcie_get_my_childs_dip(dev_info_t *dip, dev_info_t *rdip) 1529 { 1530 dev_info_t *cdip = rdip; 1531 1532 for (; ddi_get_parent(cdip) != dip; cdip = ddi_get_parent(cdip)) 1533 ; 1534 1535 return (cdip); 1536 } 1537 1538 uint32_t 1539 pcie_get_bdf_for_dma_xfer(dev_info_t *dip, dev_info_t *rdip) 1540 { 1541 dev_info_t *cdip; 1542 1543 /* 1544 * As part of the probing, the PCI fcode interpreter may setup a DMA 1545 * request if a given card has a fcode on it using dip and rdip of the 1546 * hotplug connector i.e, dip and rdip of px/pcieb driver. In this 1547 * case, return a invalid value for the bdf since we cannot get to the 1548 * bdf value of the actual device which will be initiating this DMA. 1549 */ 1550 if (rdip == dip) 1551 return (PCIE_INVALID_BDF); 1552 1553 cdip = pcie_get_my_childs_dip(dip, rdip); 1554 1555 /* 1556 * For a given rdip, return the bdf value of dip's (px or pcieb) 1557 * immediate child or secondary bus-id if dip is a PCIe2PCI bridge. 1558 * 1559 * XXX - For now, return a invalid bdf value for all PCI and PCI-X 1560 * devices since this needs more work. 1561 */ 1562 return (PCI_GET_PCIE2PCI_SECBUS(cdip) ? 1563 PCIE_INVALID_BDF : PCI_GET_BDF(cdip)); 1564 } 1565 1566 uint32_t 1567 pcie_get_aer_uce_mask() { 1568 return (pcie_aer_uce_mask); 1569 } 1570 uint32_t 1571 pcie_get_aer_ce_mask() { 1572 return (pcie_aer_ce_mask); 1573 } 1574 uint32_t 1575 pcie_get_aer_suce_mask() { 1576 return (pcie_aer_suce_mask); 1577 } 1578 uint32_t 1579 pcie_get_serr_mask() { 1580 return (pcie_serr_disable_flag); 1581 } 1582 1583 void 1584 pcie_set_aer_uce_mask(uint32_t mask) { 1585 pcie_aer_uce_mask = mask; 1586 if (mask & PCIE_AER_UCE_UR) 1587 pcie_base_err_default &= ~PCIE_DEVCTL_UR_REPORTING_EN; 1588 else 1589 pcie_base_err_default |= PCIE_DEVCTL_UR_REPORTING_EN; 1590 1591 if (mask & PCIE_AER_UCE_ECRC) 1592 pcie_ecrc_value = 0; 1593 } 1594 1595 void 1596 pcie_set_aer_ce_mask(uint32_t mask) { 1597 pcie_aer_ce_mask = mask; 1598 } 1599 void 1600 pcie_set_aer_suce_mask(uint32_t mask) { 1601 pcie_aer_suce_mask = mask; 1602 } 1603 void 1604 pcie_set_serr_mask(uint32_t mask) { 1605 pcie_serr_disable_flag = mask; 1606 } 1607 1608 /* 1609 * Is the rdip a child of dip. Used for checking certain CTLOPS from bubbling 1610 * up erronously. Ex. ISA ctlops to a PCI-PCI Bridge. 1611 */ 1612 boolean_t 1613 pcie_is_child(dev_info_t *dip, dev_info_t *rdip) 1614 { 1615 dev_info_t *cdip = ddi_get_child(dip); 1616 for (; cdip; cdip = ddi_get_next_sibling(cdip)) 1617 if (cdip == rdip) 1618 break; 1619 return (cdip != NULL); 1620 } 1621 1622 boolean_t 1623 pcie_is_link_disabled(dev_info_t *dip) 1624 { 1625 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 1626 1627 if (PCIE_IS_PCIE(bus_p)) { 1628 if (PCIE_CAP_GET(16, bus_p, PCIE_LINKCTL) & 1629 PCIE_LINKCTL_LINK_DISABLE) 1630 return (B_TRUE); 1631 } 1632 return (B_FALSE); 1633 } 1634 1635 /* 1636 * Initialize the MPS for a root port. 1637 * 1638 * dip - dip of root port device. 1639 */ 1640 void 1641 pcie_init_root_port_mps(dev_info_t *dip) 1642 { 1643 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 1644 int rp_cap, max_supported = pcie_max_mps; 1645 1646 (void) pcie_get_fabric_mps(ddi_get_parent(dip), 1647 ddi_get_child(dip), &max_supported); 1648 1649 rp_cap = PCI_CAP_GET16(bus_p->bus_cfg_hdl, NULL, 1650 bus_p->bus_pcie_off, PCIE_DEVCAP) & 1651 PCIE_DEVCAP_MAX_PAYLOAD_MASK; 1652 1653 if (rp_cap < max_supported) 1654 max_supported = rp_cap; 1655 1656 bus_p->bus_mps = max_supported; 1657 (void) pcie_initchild_mps(dip); 1658 } 1659 1660 /* 1661 * Initialize the Maximum Payload Size of a device. 1662 * 1663 * cdip - dip of device. 1664 * 1665 * returns - DDI_SUCCESS or DDI_FAILURE 1666 */ 1667 int 1668 pcie_initchild_mps(dev_info_t *cdip) 1669 { 1670 int max_payload_size; 1671 pcie_bus_t *bus_p; 1672 dev_info_t *pdip = ddi_get_parent(cdip); 1673 uint8_t dev_type; 1674 1675 bus_p = PCIE_DIP2BUS(cdip); 1676 if (bus_p == NULL) { 1677 PCIE_DBG("%s: BUS not found.\n", 1678 ddi_driver_name(cdip)); 1679 return (DDI_FAILURE); 1680 } 1681 1682 dev_type = bus_p->bus_dev_type; 1683 1684 /* 1685 * For ARI Devices, only function zero's MPS needs to be set. 1686 */ 1687 if ((dev_type == PCIE_PCIECAP_DEV_TYPE_PCIE_DEV) && 1688 (pcie_ari_is_enabled(pdip) == PCIE_ARI_FORW_ENABLED)) { 1689 pcie_req_id_t child_bdf; 1690 1691 if (pcie_get_bdf_from_dip(cdip, &child_bdf) == DDI_FAILURE) 1692 return (DDI_FAILURE); 1693 if ((child_bdf & PCIE_REQ_ID_ARI_FUNC_MASK) != 0) 1694 return (DDI_SUCCESS); 1695 } 1696 1697 if (PCIE_IS_RP(bus_p)) { 1698 /* 1699 * If this device is a root port, then the mps scan 1700 * saved the mps in the root ports bus_p. 1701 */ 1702 max_payload_size = bus_p->bus_mps; 1703 } else { 1704 /* 1705 * If the device is not a root port, then the mps of 1706 * its parent should be used. 1707 */ 1708 pcie_bus_t *parent_bus_p = PCIE_DIP2BUS(pdip); 1709 max_payload_size = parent_bus_p->bus_mps; 1710 } 1711 1712 if (PCIE_IS_PCIE(bus_p) && (max_payload_size >= 0)) { 1713 pcie_bus_t *rootp_bus_p = PCIE_DIP2BUS(bus_p->bus_rp_dip); 1714 uint16_t mask, dev_ctrl = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL), 1715 mps = PCIE_CAP_GET(16, bus_p, PCIE_DEVCAP) & 1716 PCIE_DEVCAP_MAX_PAYLOAD_MASK; 1717 1718 mps = MIN(mps, (uint16_t)max_payload_size); 1719 1720 /* 1721 * If the MPS to be set is less than the root ports 1722 * MPS, then MRRS will have to be set the same as MPS. 1723 */ 1724 mask = ((mps < rootp_bus_p->bus_mps) ? 1725 PCIE_DEVCTL_MAX_READ_REQ_MASK : 0) | 1726 PCIE_DEVCTL_MAX_PAYLOAD_MASK; 1727 1728 dev_ctrl &= ~mask; 1729 mask = ((mps < rootp_bus_p->bus_mps) 1730 ? mps << PCIE_DEVCTL_MAX_READ_REQ_SHIFT : 0) 1731 | (mps << PCIE_DEVCTL_MAX_PAYLOAD_SHIFT); 1732 1733 dev_ctrl |= mask; 1734 1735 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, dev_ctrl); 1736 1737 bus_p->bus_mps = mps; 1738 } 1739 1740 return (DDI_SUCCESS); 1741 } 1742 1743 /* 1744 * Scans a device tree/branch for a maximum payload size capabilities. 1745 * 1746 * rc_dip - dip of Root Complex. 1747 * dip - dip of device where scan will begin. 1748 * max_supported (IN) - maximum allowable MPS. 1749 * max_supported (OUT) - maximum payload size capability of fabric. 1750 */ 1751 void 1752 pcie_get_fabric_mps(dev_info_t *rc_dip, dev_info_t *dip, int *max_supported) 1753 { 1754 if (dip == NULL) 1755 return; 1756 1757 /* 1758 * Perform a fabric scan to obtain Maximum Payload Capabilities 1759 */ 1760 (void) pcie_scan_mps(rc_dip, dip, max_supported); 1761 1762 PCIE_DBG("MPS: Highest Common MPS= %x\n", max_supported); 1763 } 1764 1765 /* 1766 * Scans fabric and determines Maximum Payload Size based on 1767 * highest common denominator alogorithm 1768 */ 1769 static void 1770 pcie_scan_mps(dev_info_t *rc_dip, dev_info_t *dip, int *max_supported) 1771 { 1772 int circular_count; 1773 pcie_max_supported_t max_pay_load_supported; 1774 1775 max_pay_load_supported.dip = rc_dip; 1776 max_pay_load_supported.highest_common_mps = *max_supported; 1777 1778 ndi_devi_enter(ddi_get_parent(dip), &circular_count); 1779 ddi_walk_devs(dip, pcie_get_max_supported, 1780 (void *)&max_pay_load_supported); 1781 ndi_devi_exit(ddi_get_parent(dip), circular_count); 1782 1783 *max_supported = max_pay_load_supported.highest_common_mps; 1784 } 1785 1786 /* 1787 * Called as part of the Maximum Payload Size scan. 1788 */ 1789 static int 1790 pcie_get_max_supported(dev_info_t *dip, void *arg) 1791 { 1792 uint32_t max_supported; 1793 uint16_t cap_ptr; 1794 pcie_max_supported_t *current = (pcie_max_supported_t *)arg; 1795 pci_regspec_t *reg; 1796 int rlen; 1797 caddr_t virt; 1798 ddi_acc_handle_t config_handle; 1799 1800 if (ddi_get_child(current->dip) == NULL) { 1801 goto fail1; 1802 } 1803 1804 if (pcie_dev(dip) == DDI_FAILURE) { 1805 PCIE_DBG("MPS: pcie_get_max_supported: %s: " 1806 "Not a PCIe dev\n", ddi_driver_name(dip)); 1807 goto fail1; 1808 } 1809 1810 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "reg", 1811 (caddr_t)®, &rlen) != DDI_PROP_SUCCESS) { 1812 PCIE_DBG("MPS: pcie_get_max_supported: %s: " 1813 "Can not read reg\n", ddi_driver_name(dip)); 1814 goto fail1; 1815 } 1816 1817 if (pcie_map_phys(ddi_get_child(current->dip), reg, &virt, 1818 &config_handle) != DDI_SUCCESS) { 1819 PCIE_DBG("MPS: pcie_get_max_supported: %s: pcie_map_phys " 1820 "failed\n", ddi_driver_name(dip)); 1821 goto fail2; 1822 } 1823 1824 if ((PCI_CAP_LOCATE(config_handle, PCI_CAP_ID_PCI_E, &cap_ptr)) == 1825 DDI_FAILURE) { 1826 goto fail3; 1827 } 1828 1829 max_supported = PCI_CAP_GET16(config_handle, NULL, cap_ptr, 1830 PCIE_DEVCAP) & PCIE_DEVCAP_MAX_PAYLOAD_MASK; 1831 1832 PCIE_DBG("PCIE MPS: %s: MPS Capabilities %x\n", ddi_driver_name(dip), 1833 max_supported); 1834 1835 if (max_supported < current->highest_common_mps) 1836 current->highest_common_mps = max_supported; 1837 1838 fail3: 1839 pcie_unmap_phys(&config_handle, reg); 1840 fail2: 1841 kmem_free(reg, rlen); 1842 fail1: 1843 return (DDI_WALK_CONTINUE); 1844 } 1845 1846 /* 1847 * Determines if there are any root ports attached to a root complex. 1848 * 1849 * dip - dip of root complex 1850 * 1851 * Returns - DDI_SUCCESS if there is at least one root port otherwise 1852 * DDI_FAILURE. 1853 */ 1854 int 1855 pcie_root_port(dev_info_t *dip) 1856 { 1857 int port_type; 1858 uint16_t cap_ptr; 1859 ddi_acc_handle_t config_handle; 1860 dev_info_t *cdip = ddi_get_child(dip); 1861 1862 /* 1863 * Determine if any of the children of the passed in dip 1864 * are root ports. 1865 */ 1866 for (; cdip; cdip = ddi_get_next_sibling(cdip)) { 1867 1868 if (pci_config_setup(cdip, &config_handle) != DDI_SUCCESS) 1869 continue; 1870 1871 if ((PCI_CAP_LOCATE(config_handle, PCI_CAP_ID_PCI_E, 1872 &cap_ptr)) == DDI_FAILURE) { 1873 pci_config_teardown(&config_handle); 1874 continue; 1875 } 1876 1877 port_type = PCI_CAP_GET16(config_handle, NULL, cap_ptr, 1878 PCIE_PCIECAP) & PCIE_PCIECAP_DEV_TYPE_MASK; 1879 1880 pci_config_teardown(&config_handle); 1881 1882 if (port_type == PCIE_PCIECAP_DEV_TYPE_ROOT) 1883 return (DDI_SUCCESS); 1884 } 1885 1886 /* No root ports were found */ 1887 1888 return (DDI_FAILURE); 1889 } 1890 1891 /* 1892 * Function that determines if a device a PCIe device. 1893 * 1894 * dip - dip of device. 1895 * 1896 * returns - DDI_SUCCESS if device is a PCIe device, otherwise DDI_FAILURE. 1897 */ 1898 int 1899 pcie_dev(dev_info_t *dip) 1900 { 1901 /* get parent device's device_type property */ 1902 char *device_type; 1903 int rc = DDI_FAILURE; 1904 dev_info_t *pdip = ddi_get_parent(dip); 1905 1906 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip, 1907 DDI_PROP_DONTPASS, "device_type", &device_type) 1908 != DDI_PROP_SUCCESS) { 1909 return (DDI_FAILURE); 1910 } 1911 1912 if (strcmp(device_type, "pciex") == 0) 1913 rc = DDI_SUCCESS; 1914 else 1915 rc = DDI_FAILURE; 1916 1917 ddi_prop_free(device_type); 1918 return (rc); 1919 } 1920 1921 /* 1922 * Function to map in a device's memory space. 1923 */ 1924 static int 1925 pcie_map_phys(dev_info_t *dip, pci_regspec_t *phys_spec, 1926 caddr_t *addrp, ddi_acc_handle_t *handlep) 1927 { 1928 ddi_map_req_t mr; 1929 ddi_acc_hdl_t *hp; 1930 int result; 1931 ddi_device_acc_attr_t attr; 1932 1933 attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 1934 attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 1935 attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 1936 attr.devacc_attr_access = DDI_CAUTIOUS_ACC; 1937 1938 *handlep = impl_acc_hdl_alloc(KM_SLEEP, NULL); 1939 hp = impl_acc_hdl_get(*handlep); 1940 hp->ah_vers = VERS_ACCHDL; 1941 hp->ah_dip = dip; 1942 hp->ah_rnumber = 0; 1943 hp->ah_offset = 0; 1944 hp->ah_len = 0; 1945 hp->ah_acc = attr; 1946 1947 mr.map_op = DDI_MO_MAP_LOCKED; 1948 mr.map_type = DDI_MT_REGSPEC; 1949 mr.map_obj.rp = (struct regspec *)phys_spec; 1950 mr.map_prot = PROT_READ | PROT_WRITE; 1951 mr.map_flags = DDI_MF_KERNEL_MAPPING; 1952 mr.map_handlep = hp; 1953 mr.map_vers = DDI_MAP_VERSION; 1954 1955 result = ddi_map(dip, &mr, 0, 0, addrp); 1956 1957 if (result != DDI_SUCCESS) { 1958 impl_acc_hdl_free(*handlep); 1959 *handlep = (ddi_acc_handle_t)NULL; 1960 } else { 1961 hp->ah_addr = *addrp; 1962 } 1963 1964 return (result); 1965 } 1966 1967 /* 1968 * Map out memory that was mapped in with pcie_map_phys(); 1969 */ 1970 static void 1971 pcie_unmap_phys(ddi_acc_handle_t *handlep, pci_regspec_t *ph) 1972 { 1973 ddi_map_req_t mr; 1974 ddi_acc_hdl_t *hp; 1975 1976 hp = impl_acc_hdl_get(*handlep); 1977 ASSERT(hp); 1978 1979 mr.map_op = DDI_MO_UNMAP; 1980 mr.map_type = DDI_MT_REGSPEC; 1981 mr.map_obj.rp = (struct regspec *)ph; 1982 mr.map_prot = PROT_READ | PROT_WRITE; 1983 mr.map_flags = DDI_MF_KERNEL_MAPPING; 1984 mr.map_handlep = hp; 1985 mr.map_vers = DDI_MAP_VERSION; 1986 1987 (void) ddi_map(hp->ah_dip, &mr, hp->ah_offset, 1988 hp->ah_len, &hp->ah_addr); 1989 1990 impl_acc_hdl_free(*handlep); 1991 *handlep = (ddi_acc_handle_t)NULL; 1992 } 1993 1994 void 1995 pcie_set_rber_fatal(dev_info_t *dip, boolean_t val) 1996 { 1997 pcie_bus_t *bus_p = PCIE_DIP2UPBUS(dip); 1998 bus_p->bus_pfd->pe_rber_fatal = val; 1999 } 2000 2001 /* 2002 * Return parent Root Port's pe_rber_fatal value. 2003 */ 2004 boolean_t 2005 pcie_get_rber_fatal(dev_info_t *dip) 2006 { 2007 pcie_bus_t *bus_p = PCIE_DIP2UPBUS(dip); 2008 pcie_bus_t *rp_bus_p = PCIE_DIP2UPBUS(bus_p->bus_rp_dip); 2009 return (rp_bus_p->bus_pfd->pe_rber_fatal); 2010 } 2011 2012 int 2013 pcie_ari_supported(dev_info_t *dip) 2014 { 2015 uint32_t devcap2; 2016 uint16_t pciecap; 2017 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 2018 uint8_t dev_type; 2019 2020 PCIE_DBG("pcie_ari_supported: dip=%p\n", dip); 2021 2022 if (bus_p == NULL) 2023 return (PCIE_ARI_FORW_NOT_SUPPORTED); 2024 2025 dev_type = bus_p->bus_dev_type; 2026 2027 if ((dev_type != PCIE_PCIECAP_DEV_TYPE_DOWN) && 2028 (dev_type != PCIE_PCIECAP_DEV_TYPE_ROOT)) 2029 return (PCIE_ARI_FORW_NOT_SUPPORTED); 2030 2031 if (pcie_disable_ari) { 2032 PCIE_DBG("pcie_ari_supported: dip=%p: ARI Disabled\n", dip); 2033 return (PCIE_ARI_FORW_NOT_SUPPORTED); 2034 } 2035 2036 pciecap = PCIE_CAP_GET(16, bus_p, PCIE_PCIECAP); 2037 2038 if ((pciecap & PCIE_PCIECAP_VER_MASK) < PCIE_PCIECAP_VER_2_0) { 2039 PCIE_DBG("pcie_ari_supported: dip=%p: Not 2.0\n", dip); 2040 return (PCIE_ARI_FORW_NOT_SUPPORTED); 2041 } 2042 2043 devcap2 = PCIE_CAP_GET(32, bus_p, PCIE_DEVCAP2); 2044 2045 PCIE_DBG("pcie_ari_supported: dip=%p: DevCap2=0x%x\n", 2046 dip, devcap2); 2047 2048 if (devcap2 & PCIE_DEVCAP2_ARI_FORWARD) { 2049 PCIE_DBG("pcie_ari_supported: " 2050 "dip=%p: ARI Forwarding is supported\n", dip); 2051 return (PCIE_ARI_FORW_SUPPORTED); 2052 } 2053 return (PCIE_ARI_FORW_NOT_SUPPORTED); 2054 } 2055 2056 int 2057 pcie_ari_enable(dev_info_t *dip) 2058 { 2059 uint16_t devctl2; 2060 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 2061 2062 PCIE_DBG("pcie_ari_enable: dip=%p\n", dip); 2063 2064 if (pcie_ari_supported(dip) == PCIE_ARI_FORW_NOT_SUPPORTED) 2065 return (DDI_FAILURE); 2066 2067 devctl2 = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL2); 2068 devctl2 |= PCIE_DEVCTL2_ARI_FORWARD_EN; 2069 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL2, devctl2); 2070 2071 PCIE_DBG("pcie_ari_enable: dip=%p: writing 0x%x to DevCtl2\n", 2072 dip, devctl2); 2073 2074 return (DDI_SUCCESS); 2075 } 2076 2077 int 2078 pcie_ari_disable(dev_info_t *dip) 2079 { 2080 uint16_t devctl2; 2081 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 2082 2083 PCIE_DBG("pcie_ari_disable: dip=%p\n", dip); 2084 2085 if (pcie_ari_supported(dip) == PCIE_ARI_FORW_NOT_SUPPORTED) 2086 return (DDI_FAILURE); 2087 2088 devctl2 = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL2); 2089 devctl2 &= ~PCIE_DEVCTL2_ARI_FORWARD_EN; 2090 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL2, devctl2); 2091 2092 PCIE_DBG("pcie_ari_disable: dip=%p: writing 0x%x to DevCtl2\n", 2093 dip, devctl2); 2094 2095 return (DDI_SUCCESS); 2096 } 2097 2098 int 2099 pcie_ari_is_enabled(dev_info_t *dip) 2100 { 2101 uint16_t devctl2; 2102 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 2103 2104 PCIE_DBG("pcie_ari_is_enabled: dip=%p\n", dip); 2105 2106 if (pcie_ari_supported(dip) == PCIE_ARI_FORW_NOT_SUPPORTED) 2107 return (PCIE_ARI_FORW_DISABLED); 2108 2109 devctl2 = PCIE_CAP_GET(32, bus_p, PCIE_DEVCTL2); 2110 2111 PCIE_DBG("pcie_ari_is_enabled: dip=%p: DevCtl2=0x%x\n", 2112 dip, devctl2); 2113 2114 if (devctl2 & PCIE_DEVCTL2_ARI_FORWARD_EN) { 2115 PCIE_DBG("pcie_ari_is_enabled: " 2116 "dip=%p: ARI Forwarding is enabled\n", dip); 2117 return (PCIE_ARI_FORW_ENABLED); 2118 } 2119 2120 return (PCIE_ARI_FORW_DISABLED); 2121 } 2122 2123 int 2124 pcie_ari_device(dev_info_t *dip) 2125 { 2126 ddi_acc_handle_t handle; 2127 uint16_t cap_ptr; 2128 2129 PCIE_DBG("pcie_ari_device: dip=%p\n", dip); 2130 2131 /* 2132 * XXX - This function may be called before the bus_p structure 2133 * has been populated. This code can be changed to remove 2134 * pci_config_setup()/pci_config_teardown() when the RFE 2135 * to populate the bus_p structures early in boot is putback. 2136 */ 2137 2138 /* First make sure it is a PCIe device */ 2139 2140 if (pci_config_setup(dip, &handle) != DDI_SUCCESS) 2141 return (PCIE_NOT_ARI_DEVICE); 2142 2143 if ((PCI_CAP_LOCATE(handle, PCI_CAP_ID_PCI_E, &cap_ptr)) 2144 != DDI_SUCCESS) { 2145 pci_config_teardown(&handle); 2146 return (PCIE_NOT_ARI_DEVICE); 2147 } 2148 2149 /* Locate the ARI Capability */ 2150 2151 if ((PCI_CAP_LOCATE(handle, PCI_CAP_XCFG_SPC(PCIE_EXT_CAP_ID_ARI), 2152 &cap_ptr)) == DDI_FAILURE) { 2153 pci_config_teardown(&handle); 2154 return (PCIE_NOT_ARI_DEVICE); 2155 } 2156 2157 /* ARI Capability was found so it must be a ARI device */ 2158 PCIE_DBG("pcie_ari_device: ARI Device dip=%p\n", dip); 2159 2160 pci_config_teardown(&handle); 2161 return (PCIE_ARI_DEVICE); 2162 } 2163 2164 int 2165 pcie_ari_get_next_function(dev_info_t *dip, int *func) 2166 { 2167 uint32_t val; 2168 uint16_t cap_ptr, next_function; 2169 ddi_acc_handle_t handle; 2170 2171 /* 2172 * XXX - This function may be called before the bus_p structure 2173 * has been populated. This code can be changed to remove 2174 * pci_config_setup()/pci_config_teardown() when the RFE 2175 * to populate the bus_p structures early in boot is putback. 2176 */ 2177 2178 if (pci_config_setup(dip, &handle) != DDI_SUCCESS) 2179 return (DDI_FAILURE); 2180 2181 if ((PCI_CAP_LOCATE(handle, 2182 PCI_CAP_XCFG_SPC(PCIE_EXT_CAP_ID_ARI), &cap_ptr)) == DDI_FAILURE) { 2183 pci_config_teardown(&handle); 2184 return (DDI_FAILURE); 2185 } 2186 2187 val = PCI_CAP_GET32(handle, NULL, cap_ptr, PCIE_ARI_CAP); 2188 2189 next_function = (val >> PCIE_ARI_CAP_NEXT_FUNC_SHIFT) & 2190 PCIE_ARI_CAP_NEXT_FUNC_MASK; 2191 2192 pci_config_teardown(&handle); 2193 2194 *func = next_function; 2195 2196 return (DDI_SUCCESS); 2197 } 2198 2199 dev_info_t * 2200 pcie_func_to_dip(dev_info_t *dip, pcie_req_id_t function) 2201 { 2202 pcie_req_id_t child_bdf; 2203 dev_info_t *cdip; 2204 2205 for (cdip = ddi_get_child(dip); cdip; 2206 cdip = ddi_get_next_sibling(cdip)) { 2207 2208 if (pcie_get_bdf_from_dip(cdip, &child_bdf) == DDI_FAILURE) 2209 return (NULL); 2210 2211 if ((child_bdf & PCIE_REQ_ID_ARI_FUNC_MASK) == function) 2212 return (cdip); 2213 } 2214 return (NULL); 2215 } 2216 2217 #ifdef DEBUG 2218 2219 static void 2220 pcie_print_bus(pcie_bus_t *bus_p) 2221 { 2222 pcie_dbg("\tbus_dip = 0x%p\n", bus_p->bus_dip); 2223 pcie_dbg("\tbus_fm_flags = 0x%x\n", bus_p->bus_fm_flags); 2224 2225 pcie_dbg("\tbus_bdf = 0x%x\n", bus_p->bus_bdf); 2226 pcie_dbg("\tbus_dev_ven_id = 0x%x\n", bus_p->bus_dev_ven_id); 2227 pcie_dbg("\tbus_rev_id = 0x%x\n", bus_p->bus_rev_id); 2228 pcie_dbg("\tbus_hdr_type = 0x%x\n", bus_p->bus_hdr_type); 2229 pcie_dbg("\tbus_dev_type = 0x%x\n", bus_p->bus_dev_type); 2230 pcie_dbg("\tbus_bdg_secbus = 0x%x\n", bus_p->bus_bdg_secbus); 2231 pcie_dbg("\tbus_pcie_off = 0x%x\n", bus_p->bus_pcie_off); 2232 pcie_dbg("\tbus_aer_off = 0x%x\n", bus_p->bus_aer_off); 2233 pcie_dbg("\tbus_pcix_off = 0x%x\n", bus_p->bus_pcix_off); 2234 pcie_dbg("\tbus_ecc_ver = 0x%x\n", bus_p->bus_ecc_ver); 2235 } 2236 2237 /* 2238 * For debugging purposes set pcie_dbg_print != 0 to see printf messages 2239 * during interrupt. 2240 * 2241 * When a proper solution is in place this code will disappear. 2242 * Potential solutions are: 2243 * o circular buffers 2244 * o taskq to print at lower pil 2245 */ 2246 int pcie_dbg_print = 0; 2247 void 2248 pcie_dbg(char *fmt, ...) 2249 { 2250 va_list ap; 2251 2252 if (!pcie_debug_flags) { 2253 return; 2254 } 2255 va_start(ap, fmt); 2256 if (servicing_interrupt()) { 2257 if (pcie_dbg_print) { 2258 prom_vprintf(fmt, ap); 2259 } 2260 } else { 2261 prom_vprintf(fmt, ap); 2262 } 2263 va_end(ap); 2264 } 2265 #endif /* DEBUG */ 2266 2267 #if defined(__i386) || defined(__amd64) 2268 static void 2269 pcie_check_io_mem_range(ddi_acc_handle_t cfg_hdl, boolean_t *empty_io_range, 2270 boolean_t *empty_mem_range) 2271 { 2272 uint8_t class, subclass; 2273 uint_t val; 2274 2275 class = pci_config_get8(cfg_hdl, PCI_CONF_BASCLASS); 2276 subclass = pci_config_get8(cfg_hdl, PCI_CONF_SUBCLASS); 2277 2278 if ((class == PCI_CLASS_BRIDGE) && (subclass == PCI_BRIDGE_PCI)) { 2279 val = (((uint_t)pci_config_get8(cfg_hdl, PCI_BCNF_IO_BASE_LOW) & 2280 PCI_BCNF_IO_MASK) << 8); 2281 /* 2282 * Assuming that a zero based io_range[0] implies an 2283 * invalid I/O range. Likewise for mem_range[0]. 2284 */ 2285 if (val == 0) 2286 *empty_io_range = B_TRUE; 2287 val = (((uint_t)pci_config_get16(cfg_hdl, PCI_BCNF_MEM_BASE) & 2288 PCI_BCNF_MEM_MASK) << 16); 2289 if (val == 0) 2290 *empty_mem_range = B_TRUE; 2291 } 2292 } 2293 2294 #endif /* defined(__i386) || defined(__amd64) */ 2295