1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 */ 25 26 #include <sys/sysmacros.h> 27 #include <sys/types.h> 28 #include <sys/kmem.h> 29 #include <sys/modctl.h> 30 #include <sys/ddi.h> 31 #include <sys/sunddi.h> 32 #include <sys/sunndi.h> 33 #include <sys/fm/protocol.h> 34 #include <sys/fm/util.h> 35 #include <sys/promif.h> 36 #include <sys/disp.h> 37 #include <sys/stat.h> 38 #include <sys/file.h> 39 #include <sys/pci_cap.h> 40 #include <sys/pci_impl.h> 41 #include <sys/pcie_impl.h> 42 #include <sys/hotplug/pci/pcie_hp.h> 43 #include <sys/hotplug/pci/pciehpc.h> 44 #include <sys/hotplug/pci/pcishpc.h> 45 #include <sys/hotplug/pci/pcicfg.h> 46 #include <sys/pci_cfgacc.h> 47 48 /* Local functions prototypes */ 49 static void pcie_init_pfd(dev_info_t *); 50 static void pcie_fini_pfd(dev_info_t *); 51 52 #if defined(__i386) || defined(__amd64) 53 static void pcie_check_io_mem_range(ddi_acc_handle_t, boolean_t *, boolean_t *); 54 #endif /* defined(__i386) || defined(__amd64) */ 55 56 #ifdef DEBUG 57 uint_t pcie_debug_flags = 0; 58 static void pcie_print_bus(pcie_bus_t *bus_p); 59 void pcie_dbg(char *fmt, ...); 60 #endif /* DEBUG */ 61 62 /* Variable to control default PCI-Express config settings */ 63 ushort_t pcie_command_default = 64 PCI_COMM_SERR_ENABLE | 65 PCI_COMM_WAIT_CYC_ENAB | 66 PCI_COMM_PARITY_DETECT | 67 PCI_COMM_ME | 68 PCI_COMM_MAE | 69 PCI_COMM_IO; 70 71 /* xxx_fw are bits that are controlled by FW and should not be modified */ 72 ushort_t pcie_command_default_fw = 73 PCI_COMM_SPEC_CYC | 74 PCI_COMM_MEMWR_INVAL | 75 PCI_COMM_PALETTE_SNOOP | 76 PCI_COMM_WAIT_CYC_ENAB | 77 0xF800; /* Reserved Bits */ 78 79 ushort_t pcie_bdg_command_default_fw = 80 PCI_BCNF_BCNTRL_ISA_ENABLE | 81 PCI_BCNF_BCNTRL_VGA_ENABLE | 82 0xF000; /* Reserved Bits */ 83 84 /* PCI-Express Base error defaults */ 85 ushort_t pcie_base_err_default = 86 PCIE_DEVCTL_CE_REPORTING_EN | 87 PCIE_DEVCTL_NFE_REPORTING_EN | 88 PCIE_DEVCTL_FE_REPORTING_EN | 89 PCIE_DEVCTL_UR_REPORTING_EN; 90 91 /* PCI-Express Device Control Register */ 92 uint16_t pcie_devctl_default = PCIE_DEVCTL_RO_EN | 93 PCIE_DEVCTL_MAX_READ_REQ_512; 94 95 /* PCI-Express AER Root Control Register */ 96 #define PCIE_ROOT_SYS_ERR (PCIE_ROOTCTL_SYS_ERR_ON_CE_EN | \ 97 PCIE_ROOTCTL_SYS_ERR_ON_NFE_EN | \ 98 PCIE_ROOTCTL_SYS_ERR_ON_FE_EN) 99 100 ushort_t pcie_root_ctrl_default = 101 PCIE_ROOTCTL_SYS_ERR_ON_CE_EN | 102 PCIE_ROOTCTL_SYS_ERR_ON_NFE_EN | 103 PCIE_ROOTCTL_SYS_ERR_ON_FE_EN; 104 105 /* PCI-Express Root Error Command Register */ 106 ushort_t pcie_root_error_cmd_default = 107 PCIE_AER_RE_CMD_CE_REP_EN | 108 PCIE_AER_RE_CMD_NFE_REP_EN | 109 PCIE_AER_RE_CMD_FE_REP_EN; 110 111 /* ECRC settings in the PCIe AER Control Register */ 112 uint32_t pcie_ecrc_value = 113 PCIE_AER_CTL_ECRC_GEN_ENA | 114 PCIE_AER_CTL_ECRC_CHECK_ENA; 115 116 /* 117 * If a particular platform wants to disable certain errors such as UR/MA, 118 * instead of using #defines have the platform's PCIe Root Complex driver set 119 * these masks using the pcie_get_XXX_mask and pcie_set_XXX_mask functions. For 120 * x86 the closest thing to a PCIe root complex driver is NPE. For SPARC the 121 * closest PCIe root complex driver is PX. 122 * 123 * pcie_serr_disable_flag : disable SERR only (in RCR and command reg) x86 124 * systems may want to disable SERR in general. For root ports, enabling SERR 125 * causes NMIs which are not handled and results in a watchdog timeout error. 126 */ 127 uint32_t pcie_aer_uce_mask = 0; /* AER UE Mask */ 128 uint32_t pcie_aer_ce_mask = 0; /* AER CE Mask */ 129 uint32_t pcie_aer_suce_mask = 0; /* AER Secondary UE Mask */ 130 uint32_t pcie_serr_disable_flag = 0; /* Disable SERR */ 131 132 /* Default severities needed for eversholt. Error handling doesn't care */ 133 uint32_t pcie_aer_uce_severity = PCIE_AER_UCE_MTLP | PCIE_AER_UCE_RO | \ 134 PCIE_AER_UCE_FCP | PCIE_AER_UCE_SD | PCIE_AER_UCE_DLP | \ 135 PCIE_AER_UCE_TRAINING; 136 uint32_t pcie_aer_suce_severity = PCIE_AER_SUCE_SERR_ASSERT | \ 137 PCIE_AER_SUCE_UC_ADDR_ERR | PCIE_AER_SUCE_UC_ATTR_ERR | \ 138 PCIE_AER_SUCE_USC_MSG_DATA_ERR; 139 140 int pcie_max_mps = PCIE_DEVCTL_MAX_PAYLOAD_4096 >> 5; 141 int pcie_disable_ari = 0; 142 143 static void pcie_scan_mps(dev_info_t *rc_dip, dev_info_t *dip, 144 int *max_supported); 145 static int pcie_get_max_supported(dev_info_t *dip, void *arg); 146 static int pcie_map_phys(dev_info_t *dip, pci_regspec_t *phys_spec, 147 caddr_t *addrp, ddi_acc_handle_t *handlep); 148 static void pcie_unmap_phys(ddi_acc_handle_t *handlep, pci_regspec_t *ph); 149 150 dev_info_t *pcie_get_rc_dip(dev_info_t *dip); 151 152 /* 153 * modload support 154 */ 155 156 static struct modlmisc modlmisc = { 157 &mod_miscops, /* Type of module */ 158 "PCI Express Framework Module" 159 }; 160 161 static struct modlinkage modlinkage = { 162 MODREV_1, 163 (void *)&modlmisc, 164 NULL 165 }; 166 167 /* 168 * Global Variables needed for a non-atomic version of ddi_fm_ereport_post. 169 * Currently used to send the pci.fabric ereports whose payload depends on the 170 * type of PCI device it is being sent for. 171 */ 172 char *pcie_nv_buf; 173 nv_alloc_t *pcie_nvap; 174 nvlist_t *pcie_nvl; 175 176 int 177 _init(void) 178 { 179 int rval; 180 181 pcie_nv_buf = kmem_alloc(ERPT_DATA_SZ, KM_SLEEP); 182 pcie_nvap = fm_nva_xcreate(pcie_nv_buf, ERPT_DATA_SZ); 183 pcie_nvl = fm_nvlist_create(pcie_nvap); 184 185 rval = mod_install(&modlinkage); 186 return (rval); 187 } 188 189 int 190 _fini() 191 { 192 int rval; 193 194 fm_nvlist_destroy(pcie_nvl, FM_NVA_RETAIN); 195 fm_nva_xdestroy(pcie_nvap); 196 kmem_free(pcie_nv_buf, ERPT_DATA_SZ); 197 198 rval = mod_remove(&modlinkage); 199 return (rval); 200 } 201 202 int 203 _info(struct modinfo *modinfop) 204 { 205 return (mod_info(&modlinkage, modinfop)); 206 } 207 208 /* ARGSUSED */ 209 int 210 pcie_init(dev_info_t *dip, caddr_t arg) 211 { 212 int ret = DDI_SUCCESS; 213 214 /* 215 * Create a "devctl" minor node to support DEVCTL_DEVICE_* 216 * and DEVCTL_BUS_* ioctls to this bus. 217 */ 218 if ((ret = ddi_create_minor_node(dip, "devctl", S_IFCHR, 219 PCI_MINOR_NUM(ddi_get_instance(dip), PCI_DEVCTL_MINOR), 220 DDI_NT_NEXUS, 0)) != DDI_SUCCESS) { 221 PCIE_DBG("Failed to create devctl minor node for %s%d\n", 222 ddi_driver_name(dip), ddi_get_instance(dip)); 223 224 return (ret); 225 } 226 227 if ((ret = pcie_hp_init(dip, arg)) != DDI_SUCCESS) { 228 /* 229 * On some x86 platforms, we observed unexpected hotplug 230 * initialization failures in recent years. The known cause 231 * is a hardware issue: while the problem PCI bridges have 232 * the Hotplug Capable registers set, the machine actually 233 * does not implement the expected ACPI object. 234 * 235 * We don't want to stop PCI driver attach and system boot 236 * just because of this hotplug initialization failure. 237 * Continue with a debug message printed. 238 */ 239 PCIE_DBG("%s%d: Failed setting hotplug framework\n", 240 ddi_driver_name(dip), ddi_get_instance(dip)); 241 242 #if defined(__sparc) 243 ddi_remove_minor_node(dip, "devctl"); 244 245 return (ret); 246 #endif /* defined(__sparc) */ 247 } 248 249 return (DDI_SUCCESS); 250 } 251 252 /* ARGSUSED */ 253 int 254 pcie_uninit(dev_info_t *dip) 255 { 256 int ret = DDI_SUCCESS; 257 258 if (pcie_ari_is_enabled(dip) == PCIE_ARI_FORW_ENABLED) 259 (void) pcie_ari_disable(dip); 260 261 if ((ret = pcie_hp_uninit(dip)) != DDI_SUCCESS) { 262 PCIE_DBG("Failed to uninitialize hotplug for %s%d\n", 263 ddi_driver_name(dip), ddi_get_instance(dip)); 264 265 return (ret); 266 } 267 268 ddi_remove_minor_node(dip, "devctl"); 269 270 return (ret); 271 } 272 273 /* 274 * PCIe module interface for enabling hotplug interrupt. 275 * 276 * It should be called after pcie_init() is done and bus driver's 277 * interrupt handlers have being attached. 278 */ 279 int 280 pcie_hpintr_enable(dev_info_t *dip) 281 { 282 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 283 pcie_hp_ctrl_t *ctrl_p = PCIE_GET_HP_CTRL(dip); 284 285 if (PCIE_IS_PCIE_HOTPLUG_ENABLED(bus_p)) { 286 (void) (ctrl_p->hc_ops.enable_hpc_intr)(ctrl_p); 287 } else if (PCIE_IS_PCI_HOTPLUG_ENABLED(bus_p)) { 288 (void) pcishpc_enable_irqs(ctrl_p); 289 } 290 return (DDI_SUCCESS); 291 } 292 293 /* 294 * PCIe module interface for disabling hotplug interrupt. 295 * 296 * It should be called before pcie_uninit() is called and bus driver's 297 * interrupt handlers is dettached. 298 */ 299 int 300 pcie_hpintr_disable(dev_info_t *dip) 301 { 302 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 303 pcie_hp_ctrl_t *ctrl_p = PCIE_GET_HP_CTRL(dip); 304 305 if (PCIE_IS_PCIE_HOTPLUG_ENABLED(bus_p)) { 306 (void) (ctrl_p->hc_ops.disable_hpc_intr)(ctrl_p); 307 } else if (PCIE_IS_PCI_HOTPLUG_ENABLED(bus_p)) { 308 (void) pcishpc_disable_irqs(ctrl_p); 309 } 310 return (DDI_SUCCESS); 311 } 312 313 /* ARGSUSED */ 314 int 315 pcie_intr(dev_info_t *dip) 316 { 317 return (pcie_hp_intr(dip)); 318 } 319 320 /* ARGSUSED */ 321 int 322 pcie_open(dev_info_t *dip, dev_t *devp, int flags, int otyp, cred_t *credp) 323 { 324 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 325 326 /* 327 * Make sure the open is for the right file type. 328 */ 329 if (otyp != OTYP_CHR) 330 return (EINVAL); 331 332 /* 333 * Handle the open by tracking the device state. 334 */ 335 if ((bus_p->bus_soft_state == PCI_SOFT_STATE_OPEN_EXCL) || 336 ((flags & FEXCL) && 337 (bus_p->bus_soft_state != PCI_SOFT_STATE_CLOSED))) { 338 return (EBUSY); 339 } 340 341 if (flags & FEXCL) 342 bus_p->bus_soft_state = PCI_SOFT_STATE_OPEN_EXCL; 343 else 344 bus_p->bus_soft_state = PCI_SOFT_STATE_OPEN; 345 346 return (0); 347 } 348 349 /* ARGSUSED */ 350 int 351 pcie_close(dev_info_t *dip, dev_t dev, int flags, int otyp, cred_t *credp) 352 { 353 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 354 355 if (otyp != OTYP_CHR) 356 return (EINVAL); 357 358 bus_p->bus_soft_state = PCI_SOFT_STATE_CLOSED; 359 360 return (0); 361 } 362 363 /* ARGSUSED */ 364 int 365 pcie_ioctl(dev_info_t *dip, dev_t dev, int cmd, intptr_t arg, int mode, 366 cred_t *credp, int *rvalp) 367 { 368 struct devctl_iocdata *dcp; 369 uint_t bus_state; 370 int rv = DDI_SUCCESS; 371 372 /* 373 * We can use the generic implementation for devctl ioctl 374 */ 375 switch (cmd) { 376 case DEVCTL_DEVICE_GETSTATE: 377 case DEVCTL_DEVICE_ONLINE: 378 case DEVCTL_DEVICE_OFFLINE: 379 case DEVCTL_BUS_GETSTATE: 380 return (ndi_devctl_ioctl(dip, cmd, arg, mode, 0)); 381 default: 382 break; 383 } 384 385 /* 386 * read devctl ioctl data 387 */ 388 if (ndi_dc_allochdl((void *)arg, &dcp) != NDI_SUCCESS) 389 return (EFAULT); 390 391 switch (cmd) { 392 case DEVCTL_BUS_QUIESCE: 393 if (ndi_get_bus_state(dip, &bus_state) == NDI_SUCCESS) 394 if (bus_state == BUS_QUIESCED) 395 break; 396 (void) ndi_set_bus_state(dip, BUS_QUIESCED); 397 break; 398 case DEVCTL_BUS_UNQUIESCE: 399 if (ndi_get_bus_state(dip, &bus_state) == NDI_SUCCESS) 400 if (bus_state == BUS_ACTIVE) 401 break; 402 (void) ndi_set_bus_state(dip, BUS_ACTIVE); 403 break; 404 case DEVCTL_BUS_RESET: 405 case DEVCTL_BUS_RESETALL: 406 case DEVCTL_DEVICE_RESET: 407 rv = ENOTSUP; 408 break; 409 default: 410 rv = ENOTTY; 411 } 412 413 ndi_dc_freehdl(dcp); 414 return (rv); 415 } 416 417 /* ARGSUSED */ 418 int 419 pcie_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 420 int flags, char *name, caddr_t valuep, int *lengthp) 421 { 422 if (dev == DDI_DEV_T_ANY) 423 goto skip; 424 425 if (PCIE_IS_HOTPLUG_CAPABLE(dip) && 426 strcmp(name, "pci-occupant") == 0) { 427 int pci_dev = PCI_MINOR_NUM_TO_PCI_DEVNUM(getminor(dev)); 428 429 pcie_hp_create_occupant_props(dip, dev, pci_dev); 430 } 431 432 skip: 433 return (ddi_prop_op(dev, dip, prop_op, flags, name, valuep, lengthp)); 434 } 435 436 int 437 pcie_init_cfghdl(dev_info_t *cdip) 438 { 439 pcie_bus_t *bus_p; 440 ddi_acc_handle_t eh = NULL; 441 442 bus_p = PCIE_DIP2BUS(cdip); 443 if (bus_p == NULL) 444 return (DDI_FAILURE); 445 446 /* Create an config access special to error handling */ 447 if (pci_config_setup(cdip, &eh) != DDI_SUCCESS) { 448 cmn_err(CE_WARN, "Cannot setup config access" 449 " for BDF 0x%x\n", bus_p->bus_bdf); 450 return (DDI_FAILURE); 451 } 452 453 bus_p->bus_cfg_hdl = eh; 454 return (DDI_SUCCESS); 455 } 456 457 void 458 pcie_fini_cfghdl(dev_info_t *cdip) 459 { 460 pcie_bus_t *bus_p = PCIE_DIP2BUS(cdip); 461 462 pci_config_teardown(&bus_p->bus_cfg_hdl); 463 } 464 465 /* 466 * PCI-Express child device initialization. 467 * This function enables generic pci-express interrupts and error 468 * handling. 469 * 470 * @param pdip root dip (root nexus's dip) 471 * @param cdip child's dip (device's dip) 472 * @return DDI_SUCCESS or DDI_FAILURE 473 */ 474 /* ARGSUSED */ 475 int 476 pcie_initchild(dev_info_t *cdip) 477 { 478 uint16_t tmp16, reg16; 479 pcie_bus_t *bus_p; 480 uint32_t devid, venid; 481 482 bus_p = PCIE_DIP2BUS(cdip); 483 if (bus_p == NULL) { 484 PCIE_DBG("%s: BUS not found.\n", 485 ddi_driver_name(cdip)); 486 487 return (DDI_FAILURE); 488 } 489 490 if (pcie_init_cfghdl(cdip) != DDI_SUCCESS) 491 return (DDI_FAILURE); 492 493 /* 494 * Update pcie_bus_t with real Vendor Id Device Id. 495 * 496 * For assigned devices in IOV environment, the OBP will return 497 * faked device id/vendor id on configration read and for both 498 * properties in root domain. translate_devid() function will 499 * update the properties with real device-id/vendor-id on such 500 * platforms, so that we can utilize the properties here to get 501 * real device-id/vendor-id and overwrite the faked ids. 502 * 503 * For unassigned devices or devices in non-IOV environment, the 504 * operation below won't make a difference. 505 * 506 * The IOV implementation only supports assignment of PCIE 507 * endpoint devices. Devices under pci-pci bridges don't need 508 * operation like this. 509 */ 510 devid = ddi_prop_get_int(DDI_DEV_T_ANY, cdip, DDI_PROP_DONTPASS, 511 "device-id", -1); 512 venid = ddi_prop_get_int(DDI_DEV_T_ANY, cdip, DDI_PROP_DONTPASS, 513 "vendor-id", -1); 514 bus_p->bus_dev_ven_id = (devid << 16) | (venid & 0xffff); 515 516 /* Clear the device's status register */ 517 reg16 = PCIE_GET(16, bus_p, PCI_CONF_STAT); 518 PCIE_PUT(16, bus_p, PCI_CONF_STAT, reg16); 519 520 /* Setup the device's command register */ 521 reg16 = PCIE_GET(16, bus_p, PCI_CONF_COMM); 522 tmp16 = (reg16 & pcie_command_default_fw) | pcie_command_default; 523 524 #if defined(__i386) || defined(__amd64) 525 boolean_t empty_io_range = B_FALSE; 526 boolean_t empty_mem_range = B_FALSE; 527 /* 528 * Check for empty IO and Mem ranges on bridges. If so disable IO/Mem 529 * access as it can cause a hang if enabled. 530 */ 531 pcie_check_io_mem_range(bus_p->bus_cfg_hdl, &empty_io_range, 532 &empty_mem_range); 533 if ((empty_io_range == B_TRUE) && 534 (pcie_command_default & PCI_COMM_IO)) { 535 tmp16 &= ~PCI_COMM_IO; 536 PCIE_DBG("No I/O range found for %s, bdf 0x%x\n", 537 ddi_driver_name(cdip), bus_p->bus_bdf); 538 } 539 if ((empty_mem_range == B_TRUE) && 540 (pcie_command_default & PCI_COMM_MAE)) { 541 tmp16 &= ~PCI_COMM_MAE; 542 PCIE_DBG("No Mem range found for %s, bdf 0x%x\n", 543 ddi_driver_name(cdip), bus_p->bus_bdf); 544 } 545 #endif /* defined(__i386) || defined(__amd64) */ 546 547 if (pcie_serr_disable_flag && PCIE_IS_PCIE(bus_p)) 548 tmp16 &= ~PCI_COMM_SERR_ENABLE; 549 550 PCIE_PUT(16, bus_p, PCI_CONF_COMM, tmp16); 551 PCIE_DBG_CFG(cdip, bus_p, "COMMAND", 16, PCI_CONF_COMM, reg16); 552 553 /* 554 * If the device has a bus control register then program it 555 * based on the settings in the command register. 556 */ 557 if (PCIE_IS_BDG(bus_p)) { 558 /* Clear the device's secondary status register */ 559 reg16 = PCIE_GET(16, bus_p, PCI_BCNF_SEC_STATUS); 560 PCIE_PUT(16, bus_p, PCI_BCNF_SEC_STATUS, reg16); 561 562 /* Setup the device's secondary command register */ 563 reg16 = PCIE_GET(16, bus_p, PCI_BCNF_BCNTRL); 564 tmp16 = (reg16 & pcie_bdg_command_default_fw); 565 566 tmp16 |= PCI_BCNF_BCNTRL_SERR_ENABLE; 567 /* 568 * Workaround for this Nvidia bridge. Don't enable the SERR 569 * enable bit in the bridge control register as it could lead to 570 * bogus NMIs. 571 */ 572 if (bus_p->bus_dev_ven_id == 0x037010DE) 573 tmp16 &= ~PCI_BCNF_BCNTRL_SERR_ENABLE; 574 575 if (pcie_command_default & PCI_COMM_PARITY_DETECT) 576 tmp16 |= PCI_BCNF_BCNTRL_PARITY_ENABLE; 577 578 /* 579 * Enable Master Abort Mode only if URs have not been masked. 580 * For PCI and PCIe-PCI bridges, enabling this bit causes a 581 * Master Aborts/UR to be forwarded as a UR/TA or SERR. If this 582 * bit is masked, posted requests are dropped and non-posted 583 * requests are returned with -1. 584 */ 585 if (pcie_aer_uce_mask & PCIE_AER_UCE_UR) 586 tmp16 &= ~PCI_BCNF_BCNTRL_MAST_AB_MODE; 587 else 588 tmp16 |= PCI_BCNF_BCNTRL_MAST_AB_MODE; 589 PCIE_PUT(16, bus_p, PCI_BCNF_BCNTRL, tmp16); 590 PCIE_DBG_CFG(cdip, bus_p, "SEC CMD", 16, PCI_BCNF_BCNTRL, 591 reg16); 592 } 593 594 if (PCIE_IS_PCIE(bus_p)) { 595 /* Setup PCIe device control register */ 596 reg16 = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL); 597 /* note: MPS/MRRS are initialized in pcie_initchild_mps() */ 598 tmp16 = (reg16 & (PCIE_DEVCTL_MAX_READ_REQ_MASK | 599 PCIE_DEVCTL_MAX_PAYLOAD_MASK)) | 600 (pcie_devctl_default & ~(PCIE_DEVCTL_MAX_READ_REQ_MASK | 601 PCIE_DEVCTL_MAX_PAYLOAD_MASK)); 602 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, tmp16); 603 PCIE_DBG_CAP(cdip, bus_p, "DEVCTL", 16, PCIE_DEVCTL, reg16); 604 605 /* Enable PCIe errors */ 606 pcie_enable_errors(cdip); 607 } 608 609 bus_p->bus_ari = B_FALSE; 610 if ((pcie_ari_is_enabled(ddi_get_parent(cdip)) 611 == PCIE_ARI_FORW_ENABLED) && (pcie_ari_device(cdip) 612 == PCIE_ARI_DEVICE)) { 613 bus_p->bus_ari = B_TRUE; 614 } 615 616 if (pcie_initchild_mps(cdip) == DDI_FAILURE) { 617 pcie_fini_cfghdl(cdip); 618 return (DDI_FAILURE); 619 } 620 621 return (DDI_SUCCESS); 622 } 623 624 static void 625 pcie_init_pfd(dev_info_t *dip) 626 { 627 pf_data_t *pfd_p = PCIE_ZALLOC(pf_data_t); 628 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 629 630 PCIE_DIP2PFD(dip) = pfd_p; 631 632 pfd_p->pe_bus_p = bus_p; 633 pfd_p->pe_severity_flags = 0; 634 pfd_p->pe_orig_severity_flags = 0; 635 pfd_p->pe_lock = B_FALSE; 636 pfd_p->pe_valid = B_FALSE; 637 638 /* Allocate the root fault struct for both RC and RP */ 639 if (PCIE_IS_ROOT(bus_p)) { 640 PCIE_ROOT_FAULT(pfd_p) = PCIE_ZALLOC(pf_root_fault_t); 641 PCIE_ROOT_FAULT(pfd_p)->scan_bdf = PCIE_INVALID_BDF; 642 PCIE_ROOT_EH_SRC(pfd_p) = PCIE_ZALLOC(pf_root_eh_src_t); 643 } 644 645 PCI_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pci_err_regs_t); 646 PFD_AFFECTED_DEV(pfd_p) = PCIE_ZALLOC(pf_affected_dev_t); 647 PFD_AFFECTED_DEV(pfd_p)->pe_affected_bdf = PCIE_INVALID_BDF; 648 649 if (PCIE_IS_BDG(bus_p)) 650 PCI_BDG_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pci_bdg_err_regs_t); 651 652 if (PCIE_IS_PCIE(bus_p)) { 653 PCIE_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_err_regs_t); 654 655 if (PCIE_IS_RP(bus_p)) 656 PCIE_RP_REG(pfd_p) = 657 PCIE_ZALLOC(pf_pcie_rp_err_regs_t); 658 659 PCIE_ADV_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_adv_err_regs_t); 660 PCIE_ADV_REG(pfd_p)->pcie_ue_tgt_bdf = PCIE_INVALID_BDF; 661 662 if (PCIE_IS_RP(bus_p)) { 663 PCIE_ADV_RP_REG(pfd_p) = 664 PCIE_ZALLOC(pf_pcie_adv_rp_err_regs_t); 665 PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ce_src_id = 666 PCIE_INVALID_BDF; 667 PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ue_src_id = 668 PCIE_INVALID_BDF; 669 } else if (PCIE_IS_PCIE_BDG(bus_p)) { 670 PCIE_ADV_BDG_REG(pfd_p) = 671 PCIE_ZALLOC(pf_pcie_adv_bdg_err_regs_t); 672 PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_bdf = 673 PCIE_INVALID_BDF; 674 } 675 676 if (PCIE_IS_PCIE_BDG(bus_p) && PCIE_IS_PCIX(bus_p)) { 677 PCIX_BDG_ERR_REG(pfd_p) = 678 PCIE_ZALLOC(pf_pcix_bdg_err_regs_t); 679 680 if (PCIX_ECC_VERSION_CHECK(bus_p)) { 681 PCIX_BDG_ECC_REG(pfd_p, 0) = 682 PCIE_ZALLOC(pf_pcix_ecc_regs_t); 683 PCIX_BDG_ECC_REG(pfd_p, 1) = 684 PCIE_ZALLOC(pf_pcix_ecc_regs_t); 685 } 686 } 687 } else if (PCIE_IS_PCIX(bus_p)) { 688 if (PCIE_IS_BDG(bus_p)) { 689 PCIX_BDG_ERR_REG(pfd_p) = 690 PCIE_ZALLOC(pf_pcix_bdg_err_regs_t); 691 692 if (PCIX_ECC_VERSION_CHECK(bus_p)) { 693 PCIX_BDG_ECC_REG(pfd_p, 0) = 694 PCIE_ZALLOC(pf_pcix_ecc_regs_t); 695 PCIX_BDG_ECC_REG(pfd_p, 1) = 696 PCIE_ZALLOC(pf_pcix_ecc_regs_t); 697 } 698 } else { 699 PCIX_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pcix_err_regs_t); 700 701 if (PCIX_ECC_VERSION_CHECK(bus_p)) 702 PCIX_ECC_REG(pfd_p) = 703 PCIE_ZALLOC(pf_pcix_ecc_regs_t); 704 } 705 } 706 } 707 708 static void 709 pcie_fini_pfd(dev_info_t *dip) 710 { 711 pf_data_t *pfd_p = PCIE_DIP2PFD(dip); 712 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 713 714 if (PCIE_IS_PCIE(bus_p)) { 715 if (PCIE_IS_PCIE_BDG(bus_p) && PCIE_IS_PCIX(bus_p)) { 716 if (PCIX_ECC_VERSION_CHECK(bus_p)) { 717 kmem_free(PCIX_BDG_ECC_REG(pfd_p, 0), 718 sizeof (pf_pcix_ecc_regs_t)); 719 kmem_free(PCIX_BDG_ECC_REG(pfd_p, 1), 720 sizeof (pf_pcix_ecc_regs_t)); 721 } 722 723 kmem_free(PCIX_BDG_ERR_REG(pfd_p), 724 sizeof (pf_pcix_bdg_err_regs_t)); 725 } 726 727 if (PCIE_IS_RP(bus_p)) 728 kmem_free(PCIE_ADV_RP_REG(pfd_p), 729 sizeof (pf_pcie_adv_rp_err_regs_t)); 730 else if (PCIE_IS_PCIE_BDG(bus_p)) 731 kmem_free(PCIE_ADV_BDG_REG(pfd_p), 732 sizeof (pf_pcie_adv_bdg_err_regs_t)); 733 734 kmem_free(PCIE_ADV_REG(pfd_p), 735 sizeof (pf_pcie_adv_err_regs_t)); 736 737 if (PCIE_IS_RP(bus_p)) 738 kmem_free(PCIE_RP_REG(pfd_p), 739 sizeof (pf_pcie_rp_err_regs_t)); 740 741 kmem_free(PCIE_ERR_REG(pfd_p), sizeof (pf_pcie_err_regs_t)); 742 } else if (PCIE_IS_PCIX(bus_p)) { 743 if (PCIE_IS_BDG(bus_p)) { 744 if (PCIX_ECC_VERSION_CHECK(bus_p)) { 745 kmem_free(PCIX_BDG_ECC_REG(pfd_p, 0), 746 sizeof (pf_pcix_ecc_regs_t)); 747 kmem_free(PCIX_BDG_ECC_REG(pfd_p, 1), 748 sizeof (pf_pcix_ecc_regs_t)); 749 } 750 751 kmem_free(PCIX_BDG_ERR_REG(pfd_p), 752 sizeof (pf_pcix_bdg_err_regs_t)); 753 } else { 754 if (PCIX_ECC_VERSION_CHECK(bus_p)) 755 kmem_free(PCIX_ECC_REG(pfd_p), 756 sizeof (pf_pcix_ecc_regs_t)); 757 758 kmem_free(PCIX_ERR_REG(pfd_p), 759 sizeof (pf_pcix_err_regs_t)); 760 } 761 } 762 763 if (PCIE_IS_BDG(bus_p)) 764 kmem_free(PCI_BDG_ERR_REG(pfd_p), 765 sizeof (pf_pci_bdg_err_regs_t)); 766 767 kmem_free(PFD_AFFECTED_DEV(pfd_p), sizeof (pf_affected_dev_t)); 768 kmem_free(PCI_ERR_REG(pfd_p), sizeof (pf_pci_err_regs_t)); 769 770 if (PCIE_IS_ROOT(bus_p)) { 771 kmem_free(PCIE_ROOT_FAULT(pfd_p), sizeof (pf_root_fault_t)); 772 kmem_free(PCIE_ROOT_EH_SRC(pfd_p), sizeof (pf_root_eh_src_t)); 773 } 774 775 kmem_free(PCIE_DIP2PFD(dip), sizeof (pf_data_t)); 776 777 PCIE_DIP2PFD(dip) = NULL; 778 } 779 780 781 /* 782 * Special functions to allocate pf_data_t's for PCIe root complexes. 783 * Note: Root Complex not Root Port 784 */ 785 void 786 pcie_rc_init_pfd(dev_info_t *dip, pf_data_t *pfd_p) 787 { 788 pfd_p->pe_bus_p = PCIE_DIP2DOWNBUS(dip); 789 pfd_p->pe_severity_flags = 0; 790 pfd_p->pe_orig_severity_flags = 0; 791 pfd_p->pe_lock = B_FALSE; 792 pfd_p->pe_valid = B_FALSE; 793 794 PCIE_ROOT_FAULT(pfd_p) = PCIE_ZALLOC(pf_root_fault_t); 795 PCIE_ROOT_FAULT(pfd_p)->scan_bdf = PCIE_INVALID_BDF; 796 PCIE_ROOT_EH_SRC(pfd_p) = PCIE_ZALLOC(pf_root_eh_src_t); 797 PCI_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pci_err_regs_t); 798 PFD_AFFECTED_DEV(pfd_p) = PCIE_ZALLOC(pf_affected_dev_t); 799 PFD_AFFECTED_DEV(pfd_p)->pe_affected_bdf = PCIE_INVALID_BDF; 800 PCI_BDG_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pci_bdg_err_regs_t); 801 PCIE_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_err_regs_t); 802 PCIE_RP_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_rp_err_regs_t); 803 PCIE_ADV_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_adv_err_regs_t); 804 PCIE_ADV_RP_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_adv_rp_err_regs_t); 805 PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ce_src_id = PCIE_INVALID_BDF; 806 PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ue_src_id = PCIE_INVALID_BDF; 807 808 PCIE_ADV_REG(pfd_p)->pcie_ue_sev = pcie_aer_uce_severity; 809 } 810 811 void 812 pcie_rc_fini_pfd(pf_data_t *pfd_p) 813 { 814 kmem_free(PCIE_ADV_RP_REG(pfd_p), sizeof (pf_pcie_adv_rp_err_regs_t)); 815 kmem_free(PCIE_ADV_REG(pfd_p), sizeof (pf_pcie_adv_err_regs_t)); 816 kmem_free(PCIE_RP_REG(pfd_p), sizeof (pf_pcie_rp_err_regs_t)); 817 kmem_free(PCIE_ERR_REG(pfd_p), sizeof (pf_pcie_err_regs_t)); 818 kmem_free(PCI_BDG_ERR_REG(pfd_p), sizeof (pf_pci_bdg_err_regs_t)); 819 kmem_free(PFD_AFFECTED_DEV(pfd_p), sizeof (pf_affected_dev_t)); 820 kmem_free(PCI_ERR_REG(pfd_p), sizeof (pf_pci_err_regs_t)); 821 kmem_free(PCIE_ROOT_FAULT(pfd_p), sizeof (pf_root_fault_t)); 822 kmem_free(PCIE_ROOT_EH_SRC(pfd_p), sizeof (pf_root_eh_src_t)); 823 } 824 825 /* 826 * init pcie_bus_t for root complex 827 * 828 * Only a few of the fields in bus_t is valid for root complex. 829 * The fields that are bracketed are initialized in this routine: 830 * 831 * dev_info_t * <bus_dip> 832 * dev_info_t * bus_rp_dip 833 * ddi_acc_handle_t bus_cfg_hdl 834 * uint_t <bus_fm_flags> 835 * pcie_req_id_t bus_bdf 836 * pcie_req_id_t bus_rp_bdf 837 * uint32_t bus_dev_ven_id 838 * uint8_t bus_rev_id 839 * uint8_t <bus_hdr_type> 840 * uint16_t <bus_dev_type> 841 * uint8_t bus_bdg_secbus 842 * uint16_t bus_pcie_off 843 * uint16_t <bus_aer_off> 844 * uint16_t bus_pcix_off 845 * uint16_t bus_ecc_ver 846 * pci_bus_range_t bus_bus_range 847 * ppb_ranges_t * bus_addr_ranges 848 * int bus_addr_entries 849 * pci_regspec_t * bus_assigned_addr 850 * int bus_assigned_entries 851 * pf_data_t * bus_pfd 852 * pcie_domain_t * <bus_dom> 853 * int bus_mps 854 * uint64_t bus_cfgacc_base 855 * void * bus_plat_private 856 */ 857 void 858 pcie_rc_init_bus(dev_info_t *dip) 859 { 860 pcie_bus_t *bus_p; 861 862 bus_p = (pcie_bus_t *)kmem_zalloc(sizeof (pcie_bus_t), KM_SLEEP); 863 bus_p->bus_dip = dip; 864 bus_p->bus_dev_type = PCIE_PCIECAP_DEV_TYPE_RC_PSEUDO; 865 bus_p->bus_hdr_type = PCI_HEADER_ONE; 866 867 /* Fake that there are AER logs */ 868 bus_p->bus_aer_off = (uint16_t)-1; 869 870 /* Needed only for handle lookup */ 871 bus_p->bus_fm_flags |= PF_FM_READY; 872 873 ndi_set_bus_private(dip, B_FALSE, DEVI_PORT_TYPE_PCI, bus_p); 874 875 PCIE_BUS2DOM(bus_p) = PCIE_ZALLOC(pcie_domain_t); 876 } 877 878 void 879 pcie_rc_fini_bus(dev_info_t *dip) 880 { 881 pcie_bus_t *bus_p = PCIE_DIP2DOWNBUS(dip); 882 ndi_set_bus_private(dip, B_FALSE, NULL, NULL); 883 kmem_free(PCIE_BUS2DOM(bus_p), sizeof (pcie_domain_t)); 884 kmem_free(bus_p, sizeof (pcie_bus_t)); 885 } 886 887 /* 888 * partially init pcie_bus_t for device (dip,bdf) for accessing pci 889 * config space 890 * 891 * This routine is invoked during boot, either after creating a devinfo node 892 * (x86 case) or during px driver attach (sparc case); it is also invoked 893 * in hotplug context after a devinfo node is created. 894 * 895 * The fields that are bracketed are initialized if flag PCIE_BUS_INITIAL 896 * is set: 897 * 898 * dev_info_t * <bus_dip> 899 * dev_info_t * <bus_rp_dip> 900 * ddi_acc_handle_t bus_cfg_hdl 901 * uint_t bus_fm_flags 902 * pcie_req_id_t <bus_bdf> 903 * pcie_req_id_t <bus_rp_bdf> 904 * uint32_t <bus_dev_ven_id> 905 * uint8_t <bus_rev_id> 906 * uint8_t <bus_hdr_type> 907 * uint16_t <bus_dev_type> 908 * uint8_t <bus_bdg_secbus 909 * uint16_t <bus_pcie_off> 910 * uint16_t <bus_aer_off> 911 * uint16_t <bus_pcix_off> 912 * uint16_t <bus_ecc_ver> 913 * pci_bus_range_t bus_bus_range 914 * ppb_ranges_t * bus_addr_ranges 915 * int bus_addr_entries 916 * pci_regspec_t * bus_assigned_addr 917 * int bus_assigned_entries 918 * pf_data_t * bus_pfd 919 * pcie_domain_t * bus_dom 920 * int bus_mps 921 * uint64_t bus_cfgacc_base 922 * void * bus_plat_private 923 * 924 * The fields that are bracketed are initialized if flag PCIE_BUS_FINAL 925 * is set: 926 * 927 * dev_info_t * bus_dip 928 * dev_info_t * bus_rp_dip 929 * ddi_acc_handle_t bus_cfg_hdl 930 * uint_t bus_fm_flags 931 * pcie_req_id_t bus_bdf 932 * pcie_req_id_t bus_rp_bdf 933 * uint32_t bus_dev_ven_id 934 * uint8_t bus_rev_id 935 * uint8_t bus_hdr_type 936 * uint16_t bus_dev_type 937 * uint8_t <bus_bdg_secbus> 938 * uint16_t bus_pcie_off 939 * uint16_t bus_aer_off 940 * uint16_t bus_pcix_off 941 * uint16_t bus_ecc_ver 942 * pci_bus_range_t <bus_bus_range> 943 * ppb_ranges_t * <bus_addr_ranges> 944 * int <bus_addr_entries> 945 * pci_regspec_t * <bus_assigned_addr> 946 * int <bus_assigned_entries> 947 * pf_data_t * <bus_pfd> 948 * pcie_domain_t * bus_dom 949 * int bus_mps 950 * uint64_t bus_cfgacc_base 951 * void * <bus_plat_private> 952 */ 953 954 pcie_bus_t * 955 pcie_init_bus(dev_info_t *dip, pcie_req_id_t bdf, uint8_t flags) 956 { 957 uint16_t status, base, baseptr, num_cap; 958 uint32_t capid; 959 int range_size; 960 pcie_bus_t *bus_p; 961 dev_info_t *rcdip; 962 dev_info_t *pdip; 963 const char *errstr = NULL; 964 965 if (!(flags & PCIE_BUS_INITIAL)) 966 goto initial_done; 967 968 bus_p = kmem_zalloc(sizeof (pcie_bus_t), KM_SLEEP); 969 970 bus_p->bus_dip = dip; 971 bus_p->bus_bdf = bdf; 972 973 rcdip = pcie_get_rc_dip(dip); 974 ASSERT(rcdip != NULL); 975 976 /* Save the Vendor ID, Device ID and revision ID */ 977 bus_p->bus_dev_ven_id = pci_cfgacc_get32(rcdip, bdf, PCI_CONF_VENID); 978 bus_p->bus_rev_id = pci_cfgacc_get8(rcdip, bdf, PCI_CONF_REVID); 979 /* Save the Header Type */ 980 bus_p->bus_hdr_type = pci_cfgacc_get8(rcdip, bdf, PCI_CONF_HEADER); 981 bus_p->bus_hdr_type &= PCI_HEADER_TYPE_M; 982 983 /* 984 * Figure out the device type and all the relavant capability offsets 985 */ 986 /* set default value */ 987 bus_p->bus_dev_type = PCIE_PCIECAP_DEV_TYPE_PCI_PSEUDO; 988 989 status = pci_cfgacc_get16(rcdip, bdf, PCI_CONF_STAT); 990 if (status == PCI_CAP_EINVAL16 || !(status & PCI_STAT_CAP)) 991 goto caps_done; /* capability not supported */ 992 993 /* Relevant conventional capabilities first */ 994 995 /* Conventional caps: PCI_CAP_ID_PCI_E, PCI_CAP_ID_PCIX */ 996 num_cap = 2; 997 998 switch (bus_p->bus_hdr_type) { 999 case PCI_HEADER_ZERO: 1000 baseptr = PCI_CONF_CAP_PTR; 1001 break; 1002 case PCI_HEADER_PPB: 1003 baseptr = PCI_BCNF_CAP_PTR; 1004 break; 1005 case PCI_HEADER_CARDBUS: 1006 baseptr = PCI_CBUS_CAP_PTR; 1007 break; 1008 default: 1009 cmn_err(CE_WARN, "%s: unexpected pci header type:%x", 1010 __func__, bus_p->bus_hdr_type); 1011 goto caps_done; 1012 } 1013 1014 base = baseptr; 1015 for (base = pci_cfgacc_get8(rcdip, bdf, base); base && num_cap; 1016 base = pci_cfgacc_get8(rcdip, bdf, base + PCI_CAP_NEXT_PTR)) { 1017 capid = pci_cfgacc_get8(rcdip, bdf, base); 1018 switch (capid) { 1019 case PCI_CAP_ID_PCI_E: 1020 bus_p->bus_pcie_off = base; 1021 bus_p->bus_dev_type = pci_cfgacc_get16(rcdip, bdf, 1022 base + PCIE_PCIECAP) & PCIE_PCIECAP_DEV_TYPE_MASK; 1023 1024 /* Check and save PCIe hotplug capability information */ 1025 if ((PCIE_IS_RP(bus_p) || PCIE_IS_SWD(bus_p)) && 1026 (pci_cfgacc_get16(rcdip, bdf, base + PCIE_PCIECAP) 1027 & PCIE_PCIECAP_SLOT_IMPL) && 1028 (pci_cfgacc_get32(rcdip, bdf, base + PCIE_SLOTCAP) 1029 & PCIE_SLOTCAP_HP_CAPABLE)) 1030 bus_p->bus_hp_sup_modes |= PCIE_NATIVE_HP_MODE; 1031 1032 num_cap--; 1033 break; 1034 case PCI_CAP_ID_PCIX: 1035 bus_p->bus_pcix_off = base; 1036 if (PCIE_IS_BDG(bus_p)) 1037 bus_p->bus_ecc_ver = 1038 pci_cfgacc_get16(rcdip, bdf, base + 1039 PCI_PCIX_SEC_STATUS) & PCI_PCIX_VER_MASK; 1040 else 1041 bus_p->bus_ecc_ver = 1042 pci_cfgacc_get16(rcdip, bdf, base + 1043 PCI_PCIX_COMMAND) & PCI_PCIX_VER_MASK; 1044 num_cap--; 1045 break; 1046 default: 1047 break; 1048 } 1049 } 1050 1051 /* Check and save PCI hotplug (SHPC) capability information */ 1052 if (PCIE_IS_BDG(bus_p)) { 1053 base = baseptr; 1054 for (base = pci_cfgacc_get8(rcdip, bdf, base); 1055 base; base = pci_cfgacc_get8(rcdip, bdf, 1056 base + PCI_CAP_NEXT_PTR)) { 1057 capid = pci_cfgacc_get8(rcdip, bdf, base); 1058 if (capid == PCI_CAP_ID_PCI_HOTPLUG) { 1059 bus_p->bus_pci_hp_off = base; 1060 bus_p->bus_hp_sup_modes |= PCIE_PCI_HP_MODE; 1061 break; 1062 } 1063 } 1064 } 1065 1066 /* Then, relevant extended capabilities */ 1067 1068 if (!PCIE_IS_PCIE(bus_p)) 1069 goto caps_done; 1070 1071 /* Extended caps: PCIE_EXT_CAP_ID_AER */ 1072 for (base = PCIE_EXT_CAP; base; base = (capid >> 1073 PCIE_EXT_CAP_NEXT_PTR_SHIFT) & PCIE_EXT_CAP_NEXT_PTR_MASK) { 1074 capid = pci_cfgacc_get32(rcdip, bdf, base); 1075 if (capid == PCI_CAP_EINVAL32) 1076 break; 1077 if (((capid >> PCIE_EXT_CAP_ID_SHIFT) & PCIE_EXT_CAP_ID_MASK) 1078 == PCIE_EXT_CAP_ID_AER) { 1079 bus_p->bus_aer_off = base; 1080 break; 1081 } 1082 } 1083 1084 caps_done: 1085 /* save RP dip and RP bdf */ 1086 if (PCIE_IS_RP(bus_p)) { 1087 bus_p->bus_rp_dip = dip; 1088 bus_p->bus_rp_bdf = bus_p->bus_bdf; 1089 } else { 1090 for (pdip = ddi_get_parent(dip); pdip; 1091 pdip = ddi_get_parent(pdip)) { 1092 pcie_bus_t *parent_bus_p = PCIE_DIP2BUS(pdip); 1093 1094 /* 1095 * If RP dip and RP bdf in parent's bus_t have 1096 * been initialized, simply use these instead of 1097 * continuing up to the RC. 1098 */ 1099 if (parent_bus_p->bus_rp_dip != NULL) { 1100 bus_p->bus_rp_dip = parent_bus_p->bus_rp_dip; 1101 bus_p->bus_rp_bdf = parent_bus_p->bus_rp_bdf; 1102 break; 1103 } 1104 1105 /* 1106 * When debugging be aware that some NVIDIA x86 1107 * architectures have 2 nodes for each RP, One at Bus 1108 * 0x0 and one at Bus 0x80. The requester is from Bus 1109 * 0x80 1110 */ 1111 if (PCIE_IS_ROOT(parent_bus_p)) { 1112 bus_p->bus_rp_dip = pdip; 1113 bus_p->bus_rp_bdf = parent_bus_p->bus_bdf; 1114 break; 1115 } 1116 } 1117 } 1118 1119 bus_p->bus_soft_state = PCI_SOFT_STATE_CLOSED; 1120 bus_p->bus_fm_flags = 0; 1121 bus_p->bus_mps = 0; 1122 1123 ndi_set_bus_private(dip, B_TRUE, DEVI_PORT_TYPE_PCI, (void *)bus_p); 1124 1125 if (PCIE_IS_HOTPLUG_CAPABLE(dip)) 1126 (void) ndi_prop_create_boolean(DDI_DEV_T_NONE, dip, 1127 "hotplug-capable"); 1128 1129 initial_done: 1130 if (!(flags & PCIE_BUS_FINAL)) 1131 goto final_done; 1132 1133 /* already initialized? */ 1134 bus_p = PCIE_DIP2BUS(dip); 1135 1136 /* Save the Range information if device is a switch/bridge */ 1137 if (PCIE_IS_BDG(bus_p)) { 1138 /* get "bus_range" property */ 1139 range_size = sizeof (pci_bus_range_t); 1140 if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1141 "bus-range", (caddr_t)&bus_p->bus_bus_range, &range_size) 1142 != DDI_PROP_SUCCESS) { 1143 errstr = "Cannot find \"bus-range\" property"; 1144 cmn_err(CE_WARN, 1145 "PCIE init err info failed BDF 0x%x:%s\n", 1146 bus_p->bus_bdf, errstr); 1147 } 1148 1149 /* get secondary bus number */ 1150 rcdip = pcie_get_rc_dip(dip); 1151 ASSERT(rcdip != NULL); 1152 1153 bus_p->bus_bdg_secbus = pci_cfgacc_get8(rcdip, 1154 bus_p->bus_bdf, PCI_BCNF_SECBUS); 1155 1156 /* Get "ranges" property */ 1157 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1158 "ranges", (caddr_t)&bus_p->bus_addr_ranges, 1159 &bus_p->bus_addr_entries) != DDI_PROP_SUCCESS) 1160 bus_p->bus_addr_entries = 0; 1161 bus_p->bus_addr_entries /= sizeof (ppb_ranges_t); 1162 } 1163 1164 /* save "assigned-addresses" property array, ignore failues */ 1165 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1166 "assigned-addresses", (caddr_t)&bus_p->bus_assigned_addr, 1167 &bus_p->bus_assigned_entries) == DDI_PROP_SUCCESS) 1168 bus_p->bus_assigned_entries /= sizeof (pci_regspec_t); 1169 else 1170 bus_p->bus_assigned_entries = 0; 1171 1172 pcie_init_pfd(dip); 1173 1174 pcie_init_plat(dip); 1175 1176 final_done: 1177 1178 PCIE_DBG("Add %s(dip 0x%p, bdf 0x%x, secbus 0x%x)\n", 1179 ddi_driver_name(dip), (void *)dip, bus_p->bus_bdf, 1180 bus_p->bus_bdg_secbus); 1181 #ifdef DEBUG 1182 pcie_print_bus(bus_p); 1183 #endif 1184 1185 return (bus_p); 1186 } 1187 1188 /* 1189 * Invoked before destroying devinfo node, mostly during hotplug 1190 * operation to free pcie_bus_t data structure 1191 */ 1192 /* ARGSUSED */ 1193 void 1194 pcie_fini_bus(dev_info_t *dip, uint8_t flags) 1195 { 1196 pcie_bus_t *bus_p = PCIE_DIP2UPBUS(dip); 1197 ASSERT(bus_p); 1198 1199 if (flags & PCIE_BUS_INITIAL) { 1200 pcie_fini_plat(dip); 1201 pcie_fini_pfd(dip); 1202 1203 kmem_free(bus_p->bus_assigned_addr, 1204 (sizeof (pci_regspec_t) * bus_p->bus_assigned_entries)); 1205 kmem_free(bus_p->bus_addr_ranges, 1206 (sizeof (ppb_ranges_t) * bus_p->bus_addr_entries)); 1207 /* zero out the fields that have been destroyed */ 1208 bus_p->bus_assigned_addr = NULL; 1209 bus_p->bus_addr_ranges = NULL; 1210 bus_p->bus_assigned_entries = 0; 1211 bus_p->bus_addr_entries = 0; 1212 } 1213 1214 if (flags & PCIE_BUS_FINAL) { 1215 if (PCIE_IS_HOTPLUG_CAPABLE(dip)) { 1216 (void) ndi_prop_remove(DDI_DEV_T_NONE, dip, 1217 "hotplug-capable"); 1218 } 1219 1220 ndi_set_bus_private(dip, B_TRUE, NULL, NULL); 1221 kmem_free(bus_p, sizeof (pcie_bus_t)); 1222 } 1223 } 1224 1225 int 1226 pcie_postattach_child(dev_info_t *cdip) 1227 { 1228 pcie_bus_t *bus_p = PCIE_DIP2BUS(cdip); 1229 1230 if (!bus_p) 1231 return (DDI_FAILURE); 1232 1233 return (pcie_enable_ce(cdip)); 1234 } 1235 1236 /* 1237 * PCI-Express child device de-initialization. 1238 * This function disables generic pci-express interrupts and error 1239 * handling. 1240 */ 1241 void 1242 pcie_uninitchild(dev_info_t *cdip) 1243 { 1244 pcie_disable_errors(cdip); 1245 pcie_fini_cfghdl(cdip); 1246 pcie_fini_dom(cdip); 1247 } 1248 1249 /* 1250 * find the root complex dip 1251 */ 1252 dev_info_t * 1253 pcie_get_rc_dip(dev_info_t *dip) 1254 { 1255 dev_info_t *rcdip; 1256 pcie_bus_t *rc_bus_p; 1257 1258 for (rcdip = ddi_get_parent(dip); rcdip; 1259 rcdip = ddi_get_parent(rcdip)) { 1260 rc_bus_p = PCIE_DIP2BUS(rcdip); 1261 if (rc_bus_p && PCIE_IS_RC(rc_bus_p)) 1262 break; 1263 } 1264 1265 return (rcdip); 1266 } 1267 1268 static boolean_t 1269 pcie_is_pci_device(dev_info_t *dip) 1270 { 1271 dev_info_t *pdip; 1272 char *device_type; 1273 1274 pdip = ddi_get_parent(dip); 1275 ASSERT(pdip); 1276 1277 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip, DDI_PROP_DONTPASS, 1278 "device_type", &device_type) != DDI_PROP_SUCCESS) 1279 return (B_FALSE); 1280 1281 if (strcmp(device_type, "pciex") != 0 && 1282 strcmp(device_type, "pci") != 0) { 1283 ddi_prop_free(device_type); 1284 return (B_FALSE); 1285 } 1286 1287 ddi_prop_free(device_type); 1288 return (B_TRUE); 1289 } 1290 1291 typedef struct { 1292 boolean_t init; 1293 uint8_t flags; 1294 } pcie_bus_arg_t; 1295 1296 /*ARGSUSED*/ 1297 static int 1298 pcie_fab_do_init_fini(dev_info_t *dip, void *arg) 1299 { 1300 pcie_req_id_t bdf; 1301 pcie_bus_arg_t *bus_arg = (pcie_bus_arg_t *)arg; 1302 1303 if (!pcie_is_pci_device(dip)) 1304 goto out; 1305 1306 if (bus_arg->init) { 1307 if (pcie_get_bdf_from_dip(dip, &bdf) != DDI_SUCCESS) 1308 goto out; 1309 1310 (void) pcie_init_bus(dip, bdf, bus_arg->flags); 1311 } else { 1312 (void) pcie_fini_bus(dip, bus_arg->flags); 1313 } 1314 1315 return (DDI_WALK_CONTINUE); 1316 1317 out: 1318 return (DDI_WALK_PRUNECHILD); 1319 } 1320 1321 void 1322 pcie_fab_init_bus(dev_info_t *rcdip, uint8_t flags) 1323 { 1324 int circular_count; 1325 dev_info_t *dip = ddi_get_child(rcdip); 1326 pcie_bus_arg_t arg; 1327 1328 arg.init = B_TRUE; 1329 arg.flags = flags; 1330 1331 ndi_devi_enter(rcdip, &circular_count); 1332 ddi_walk_devs(dip, pcie_fab_do_init_fini, &arg); 1333 ndi_devi_exit(rcdip, circular_count); 1334 } 1335 1336 void 1337 pcie_fab_fini_bus(dev_info_t *rcdip, uint8_t flags) 1338 { 1339 int circular_count; 1340 dev_info_t *dip = ddi_get_child(rcdip); 1341 pcie_bus_arg_t arg; 1342 1343 arg.init = B_FALSE; 1344 arg.flags = flags; 1345 1346 ndi_devi_enter(rcdip, &circular_count); 1347 ddi_walk_devs(dip, pcie_fab_do_init_fini, &arg); 1348 ndi_devi_exit(rcdip, circular_count); 1349 } 1350 1351 void 1352 pcie_enable_errors(dev_info_t *dip) 1353 { 1354 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 1355 uint16_t reg16, tmp16; 1356 uint32_t reg32, tmp32; 1357 1358 ASSERT(bus_p); 1359 1360 /* 1361 * Clear any pending errors 1362 */ 1363 pcie_clear_errors(dip); 1364 1365 if (!PCIE_IS_PCIE(bus_p)) 1366 return; 1367 1368 /* 1369 * Enable Baseline Error Handling but leave CE reporting off (poweron 1370 * default). 1371 */ 1372 if ((reg16 = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL)) != 1373 PCI_CAP_EINVAL16) { 1374 tmp16 = (reg16 & (PCIE_DEVCTL_MAX_READ_REQ_MASK | 1375 PCIE_DEVCTL_MAX_PAYLOAD_MASK)) | 1376 (pcie_devctl_default & ~(PCIE_DEVCTL_MAX_READ_REQ_MASK | 1377 PCIE_DEVCTL_MAX_PAYLOAD_MASK)) | 1378 (pcie_base_err_default & (~PCIE_DEVCTL_CE_REPORTING_EN)); 1379 1380 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, tmp16); 1381 PCIE_DBG_CAP(dip, bus_p, "DEVCTL", 16, PCIE_DEVCTL, reg16); 1382 } 1383 1384 /* Enable Root Port Baseline Error Receiving */ 1385 if (PCIE_IS_ROOT(bus_p) && 1386 (reg16 = PCIE_CAP_GET(16, bus_p, PCIE_ROOTCTL)) != 1387 PCI_CAP_EINVAL16) { 1388 1389 tmp16 = pcie_serr_disable_flag ? 1390 (pcie_root_ctrl_default & ~PCIE_ROOT_SYS_ERR) : 1391 pcie_root_ctrl_default; 1392 PCIE_CAP_PUT(16, bus_p, PCIE_ROOTCTL, tmp16); 1393 PCIE_DBG_CAP(dip, bus_p, "ROOT DEVCTL", 16, PCIE_ROOTCTL, 1394 reg16); 1395 } 1396 1397 /* 1398 * Enable PCI-Express Advanced Error Handling if Exists 1399 */ 1400 if (!PCIE_HAS_AER(bus_p)) 1401 return; 1402 1403 /* Set Uncorrectable Severity */ 1404 if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_UCE_SERV)) != 1405 PCI_CAP_EINVAL32) { 1406 tmp32 = pcie_aer_uce_severity; 1407 1408 PCIE_AER_PUT(32, bus_p, PCIE_AER_UCE_SERV, tmp32); 1409 PCIE_DBG_AER(dip, bus_p, "AER UCE SEV", 32, PCIE_AER_UCE_SERV, 1410 reg32); 1411 } 1412 1413 /* Enable Uncorrectable errors */ 1414 if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_UCE_MASK)) != 1415 PCI_CAP_EINVAL32) { 1416 tmp32 = pcie_aer_uce_mask; 1417 1418 PCIE_AER_PUT(32, bus_p, PCIE_AER_UCE_MASK, tmp32); 1419 PCIE_DBG_AER(dip, bus_p, "AER UCE MASK", 32, PCIE_AER_UCE_MASK, 1420 reg32); 1421 } 1422 1423 /* Enable ECRC generation and checking */ 1424 if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_CTL)) != 1425 PCI_CAP_EINVAL32) { 1426 tmp32 = reg32 | pcie_ecrc_value; 1427 PCIE_AER_PUT(32, bus_p, PCIE_AER_CTL, tmp32); 1428 PCIE_DBG_AER(dip, bus_p, "AER CTL", 32, PCIE_AER_CTL, reg32); 1429 } 1430 1431 /* Enable Secondary Uncorrectable errors if this is a bridge */ 1432 if (!PCIE_IS_PCIE_BDG(bus_p)) 1433 goto root; 1434 1435 /* Set Uncorrectable Severity */ 1436 if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_SUCE_SERV)) != 1437 PCI_CAP_EINVAL32) { 1438 tmp32 = pcie_aer_suce_severity; 1439 1440 PCIE_AER_PUT(32, bus_p, PCIE_AER_SUCE_SERV, tmp32); 1441 PCIE_DBG_AER(dip, bus_p, "AER SUCE SEV", 32, PCIE_AER_SUCE_SERV, 1442 reg32); 1443 } 1444 1445 if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_SUCE_MASK)) != 1446 PCI_CAP_EINVAL32) { 1447 PCIE_AER_PUT(32, bus_p, PCIE_AER_SUCE_MASK, pcie_aer_suce_mask); 1448 PCIE_DBG_AER(dip, bus_p, "AER SUCE MASK", 32, 1449 PCIE_AER_SUCE_MASK, reg32); 1450 } 1451 1452 root: 1453 /* 1454 * Enable Root Control this is a Root device 1455 */ 1456 if (!PCIE_IS_ROOT(bus_p)) 1457 return; 1458 1459 if ((reg16 = PCIE_AER_GET(16, bus_p, PCIE_AER_RE_CMD)) != 1460 PCI_CAP_EINVAL16) { 1461 PCIE_AER_PUT(16, bus_p, PCIE_AER_RE_CMD, 1462 pcie_root_error_cmd_default); 1463 PCIE_DBG_AER(dip, bus_p, "AER Root Err Cmd", 16, 1464 PCIE_AER_RE_CMD, reg16); 1465 } 1466 } 1467 1468 /* 1469 * This function is used for enabling CE reporting and setting the AER CE mask. 1470 * When called from outside the pcie module it should always be preceded by 1471 * a call to pcie_enable_errors. 1472 */ 1473 int 1474 pcie_enable_ce(dev_info_t *dip) 1475 { 1476 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 1477 uint16_t device_sts, device_ctl; 1478 uint32_t tmp_pcie_aer_ce_mask; 1479 1480 if (!PCIE_IS_PCIE(bus_p)) 1481 return (DDI_SUCCESS); 1482 1483 /* 1484 * The "pcie_ce_mask" property is used to control both the CE reporting 1485 * enable field in the device control register and the AER CE mask. We 1486 * leave CE reporting disabled if pcie_ce_mask is set to -1. 1487 */ 1488 1489 tmp_pcie_aer_ce_mask = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, dip, 1490 DDI_PROP_DONTPASS, "pcie_ce_mask", pcie_aer_ce_mask); 1491 1492 if (tmp_pcie_aer_ce_mask == (uint32_t)-1) { 1493 /* 1494 * Nothing to do since CE reporting has already been disabled. 1495 */ 1496 return (DDI_SUCCESS); 1497 } 1498 1499 if (PCIE_HAS_AER(bus_p)) { 1500 /* Enable AER CE */ 1501 PCIE_AER_PUT(32, bus_p, PCIE_AER_CE_MASK, tmp_pcie_aer_ce_mask); 1502 PCIE_DBG_AER(dip, bus_p, "AER CE MASK", 32, PCIE_AER_CE_MASK, 1503 0); 1504 1505 /* Clear any pending AER CE errors */ 1506 PCIE_AER_PUT(32, bus_p, PCIE_AER_CE_STS, -1); 1507 } 1508 1509 /* clear any pending CE errors */ 1510 if ((device_sts = PCIE_CAP_GET(16, bus_p, PCIE_DEVSTS)) != 1511 PCI_CAP_EINVAL16) 1512 PCIE_CAP_PUT(16, bus_p, PCIE_DEVSTS, 1513 device_sts & (~PCIE_DEVSTS_CE_DETECTED)); 1514 1515 /* Enable CE reporting */ 1516 device_ctl = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL); 1517 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, 1518 (device_ctl & (~PCIE_DEVCTL_ERR_MASK)) | pcie_base_err_default); 1519 PCIE_DBG_CAP(dip, bus_p, "DEVCTL", 16, PCIE_DEVCTL, device_ctl); 1520 1521 return (DDI_SUCCESS); 1522 } 1523 1524 /* ARGSUSED */ 1525 void 1526 pcie_disable_errors(dev_info_t *dip) 1527 { 1528 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 1529 uint16_t device_ctl; 1530 uint32_t aer_reg; 1531 1532 if (!PCIE_IS_PCIE(bus_p)) 1533 return; 1534 1535 /* 1536 * Disable PCI-Express Baseline Error Handling 1537 */ 1538 device_ctl = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL); 1539 device_ctl &= ~PCIE_DEVCTL_ERR_MASK; 1540 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, device_ctl); 1541 1542 /* 1543 * Disable PCI-Express Advanced Error Handling if Exists 1544 */ 1545 if (!PCIE_HAS_AER(bus_p)) 1546 goto root; 1547 1548 /* Disable Uncorrectable errors */ 1549 PCIE_AER_PUT(32, bus_p, PCIE_AER_UCE_MASK, PCIE_AER_UCE_BITS); 1550 1551 /* Disable Correctable errors */ 1552 PCIE_AER_PUT(32, bus_p, PCIE_AER_CE_MASK, PCIE_AER_CE_BITS); 1553 1554 /* Disable ECRC generation and checking */ 1555 if ((aer_reg = PCIE_AER_GET(32, bus_p, PCIE_AER_CTL)) != 1556 PCI_CAP_EINVAL32) { 1557 aer_reg &= ~(PCIE_AER_CTL_ECRC_GEN_ENA | 1558 PCIE_AER_CTL_ECRC_CHECK_ENA); 1559 1560 PCIE_AER_PUT(32, bus_p, PCIE_AER_CTL, aer_reg); 1561 } 1562 /* 1563 * Disable Secondary Uncorrectable errors if this is a bridge 1564 */ 1565 if (!PCIE_IS_PCIE_BDG(bus_p)) 1566 goto root; 1567 1568 PCIE_AER_PUT(32, bus_p, PCIE_AER_SUCE_MASK, PCIE_AER_SUCE_BITS); 1569 1570 root: 1571 /* 1572 * disable Root Control this is a Root device 1573 */ 1574 if (!PCIE_IS_ROOT(bus_p)) 1575 return; 1576 1577 if (!pcie_serr_disable_flag) { 1578 device_ctl = PCIE_CAP_GET(16, bus_p, PCIE_ROOTCTL); 1579 device_ctl &= ~PCIE_ROOT_SYS_ERR; 1580 PCIE_CAP_PUT(16, bus_p, PCIE_ROOTCTL, device_ctl); 1581 } 1582 1583 if (!PCIE_HAS_AER(bus_p)) 1584 return; 1585 1586 if ((device_ctl = PCIE_CAP_GET(16, bus_p, PCIE_AER_RE_CMD)) != 1587 PCI_CAP_EINVAL16) { 1588 device_ctl &= ~pcie_root_error_cmd_default; 1589 PCIE_CAP_PUT(16, bus_p, PCIE_AER_RE_CMD, device_ctl); 1590 } 1591 } 1592 1593 /* 1594 * Extract bdf from "reg" property. 1595 */ 1596 int 1597 pcie_get_bdf_from_dip(dev_info_t *dip, pcie_req_id_t *bdf) 1598 { 1599 pci_regspec_t *regspec; 1600 int reglen; 1601 1602 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1603 "reg", (int **)®spec, (uint_t *)®len) != DDI_SUCCESS) 1604 return (DDI_FAILURE); 1605 1606 if (reglen < (sizeof (pci_regspec_t) / sizeof (int))) { 1607 ddi_prop_free(regspec); 1608 return (DDI_FAILURE); 1609 } 1610 1611 /* Get phys_hi from first element. All have same bdf. */ 1612 *bdf = (regspec->pci_phys_hi & (PCI_REG_BDFR_M ^ PCI_REG_REG_M)) >> 8; 1613 1614 ddi_prop_free(regspec); 1615 return (DDI_SUCCESS); 1616 } 1617 1618 dev_info_t * 1619 pcie_get_my_childs_dip(dev_info_t *dip, dev_info_t *rdip) 1620 { 1621 dev_info_t *cdip = rdip; 1622 1623 for (; ddi_get_parent(cdip) != dip; cdip = ddi_get_parent(cdip)) 1624 ; 1625 1626 return (cdip); 1627 } 1628 1629 uint32_t 1630 pcie_get_bdf_for_dma_xfer(dev_info_t *dip, dev_info_t *rdip) 1631 { 1632 dev_info_t *cdip; 1633 1634 /* 1635 * As part of the probing, the PCI fcode interpreter may setup a DMA 1636 * request if a given card has a fcode on it using dip and rdip of the 1637 * hotplug connector i.e, dip and rdip of px/pcieb driver. In this 1638 * case, return a invalid value for the bdf since we cannot get to the 1639 * bdf value of the actual device which will be initiating this DMA. 1640 */ 1641 if (rdip == dip) 1642 return (PCIE_INVALID_BDF); 1643 1644 cdip = pcie_get_my_childs_dip(dip, rdip); 1645 1646 /* 1647 * For a given rdip, return the bdf value of dip's (px or pcieb) 1648 * immediate child or secondary bus-id if dip is a PCIe2PCI bridge. 1649 * 1650 * XXX - For now, return a invalid bdf value for all PCI and PCI-X 1651 * devices since this needs more work. 1652 */ 1653 return (PCI_GET_PCIE2PCI_SECBUS(cdip) ? 1654 PCIE_INVALID_BDF : PCI_GET_BDF(cdip)); 1655 } 1656 1657 uint32_t 1658 pcie_get_aer_uce_mask() { 1659 return (pcie_aer_uce_mask); 1660 } 1661 uint32_t 1662 pcie_get_aer_ce_mask() { 1663 return (pcie_aer_ce_mask); 1664 } 1665 uint32_t 1666 pcie_get_aer_suce_mask() { 1667 return (pcie_aer_suce_mask); 1668 } 1669 uint32_t 1670 pcie_get_serr_mask() { 1671 return (pcie_serr_disable_flag); 1672 } 1673 1674 void 1675 pcie_set_aer_uce_mask(uint32_t mask) { 1676 pcie_aer_uce_mask = mask; 1677 if (mask & PCIE_AER_UCE_UR) 1678 pcie_base_err_default &= ~PCIE_DEVCTL_UR_REPORTING_EN; 1679 else 1680 pcie_base_err_default |= PCIE_DEVCTL_UR_REPORTING_EN; 1681 1682 if (mask & PCIE_AER_UCE_ECRC) 1683 pcie_ecrc_value = 0; 1684 } 1685 1686 void 1687 pcie_set_aer_ce_mask(uint32_t mask) { 1688 pcie_aer_ce_mask = mask; 1689 } 1690 void 1691 pcie_set_aer_suce_mask(uint32_t mask) { 1692 pcie_aer_suce_mask = mask; 1693 } 1694 void 1695 pcie_set_serr_mask(uint32_t mask) { 1696 pcie_serr_disable_flag = mask; 1697 } 1698 1699 /* 1700 * Is the rdip a child of dip. Used for checking certain CTLOPS from bubbling 1701 * up erronously. Ex. ISA ctlops to a PCI-PCI Bridge. 1702 */ 1703 boolean_t 1704 pcie_is_child(dev_info_t *dip, dev_info_t *rdip) 1705 { 1706 dev_info_t *cdip = ddi_get_child(dip); 1707 for (; cdip; cdip = ddi_get_next_sibling(cdip)) 1708 if (cdip == rdip) 1709 break; 1710 return (cdip != NULL); 1711 } 1712 1713 boolean_t 1714 pcie_is_link_disabled(dev_info_t *dip) 1715 { 1716 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 1717 1718 if (PCIE_IS_PCIE(bus_p)) { 1719 if (PCIE_CAP_GET(16, bus_p, PCIE_LINKCTL) & 1720 PCIE_LINKCTL_LINK_DISABLE) 1721 return (B_TRUE); 1722 } 1723 return (B_FALSE); 1724 } 1725 1726 /* 1727 * Initialize the MPS for a root port. 1728 * 1729 * dip - dip of root port device. 1730 */ 1731 void 1732 pcie_init_root_port_mps(dev_info_t *dip) 1733 { 1734 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 1735 int rp_cap, max_supported = pcie_max_mps; 1736 1737 (void) pcie_get_fabric_mps(ddi_get_parent(dip), 1738 ddi_get_child(dip), &max_supported); 1739 1740 rp_cap = PCI_CAP_GET16(bus_p->bus_cfg_hdl, NULL, 1741 bus_p->bus_pcie_off, PCIE_DEVCAP) & 1742 PCIE_DEVCAP_MAX_PAYLOAD_MASK; 1743 1744 if (rp_cap < max_supported) 1745 max_supported = rp_cap; 1746 1747 bus_p->bus_mps = max_supported; 1748 (void) pcie_initchild_mps(dip); 1749 } 1750 1751 /* 1752 * Initialize the Maximum Payload Size of a device. 1753 * 1754 * cdip - dip of device. 1755 * 1756 * returns - DDI_SUCCESS or DDI_FAILURE 1757 */ 1758 int 1759 pcie_initchild_mps(dev_info_t *cdip) 1760 { 1761 pcie_bus_t *bus_p; 1762 dev_info_t *pdip = ddi_get_parent(cdip); 1763 uint8_t dev_type; 1764 1765 bus_p = PCIE_DIP2BUS(cdip); 1766 if (bus_p == NULL) { 1767 PCIE_DBG("%s: BUS not found.\n", 1768 ddi_driver_name(cdip)); 1769 return (DDI_FAILURE); 1770 } 1771 1772 dev_type = bus_p->bus_dev_type; 1773 1774 /* 1775 * For ARI Devices, only function zero's MPS needs to be set. 1776 */ 1777 if ((dev_type == PCIE_PCIECAP_DEV_TYPE_PCIE_DEV) && 1778 (pcie_ari_is_enabled(pdip) == PCIE_ARI_FORW_ENABLED)) { 1779 pcie_req_id_t child_bdf; 1780 1781 if (pcie_get_bdf_from_dip(cdip, &child_bdf) == DDI_FAILURE) 1782 return (DDI_FAILURE); 1783 if ((child_bdf & PCIE_REQ_ID_ARI_FUNC_MASK) != 0) 1784 return (DDI_SUCCESS); 1785 } 1786 1787 if (PCIE_IS_PCIE(bus_p)) { 1788 int suggested_mrrs, fabric_mps; 1789 uint16_t device_mps, device_mps_cap, device_mrrs, dev_ctrl; 1790 1791 dev_ctrl = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL); 1792 if ((fabric_mps = (PCIE_IS_RP(bus_p) ? bus_p : 1793 PCIE_DIP2BUS(pdip))->bus_mps) < 0) { 1794 dev_ctrl = (dev_ctrl & ~(PCIE_DEVCTL_MAX_READ_REQ_MASK | 1795 PCIE_DEVCTL_MAX_PAYLOAD_MASK)) | 1796 (pcie_devctl_default & 1797 (PCIE_DEVCTL_MAX_READ_REQ_MASK | 1798 PCIE_DEVCTL_MAX_PAYLOAD_MASK)); 1799 1800 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, dev_ctrl); 1801 return (DDI_SUCCESS); 1802 } 1803 1804 device_mps_cap = PCIE_CAP_GET(16, bus_p, PCIE_DEVCAP) & 1805 PCIE_DEVCAP_MAX_PAYLOAD_MASK; 1806 1807 device_mrrs = (dev_ctrl & PCIE_DEVCTL_MAX_READ_REQ_MASK) >> 1808 PCIE_DEVCTL_MAX_READ_REQ_SHIFT; 1809 1810 if (device_mps_cap < fabric_mps) 1811 device_mrrs = device_mps = device_mps_cap; 1812 else 1813 device_mps = (uint16_t)fabric_mps; 1814 1815 suggested_mrrs = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, 1816 cdip, DDI_PROP_DONTPASS, "suggested-mrrs", device_mrrs); 1817 1818 if ((device_mps == fabric_mps) || 1819 (suggested_mrrs < device_mrrs)) 1820 device_mrrs = (uint16_t)suggested_mrrs; 1821 1822 /* 1823 * Replace MPS and MRRS settings. 1824 */ 1825 dev_ctrl &= ~(PCIE_DEVCTL_MAX_READ_REQ_MASK | 1826 PCIE_DEVCTL_MAX_PAYLOAD_MASK); 1827 1828 dev_ctrl |= ((device_mrrs << PCIE_DEVCTL_MAX_READ_REQ_SHIFT) | 1829 device_mps << PCIE_DEVCTL_MAX_PAYLOAD_SHIFT); 1830 1831 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, dev_ctrl); 1832 1833 bus_p->bus_mps = device_mps; 1834 } 1835 1836 return (DDI_SUCCESS); 1837 } 1838 1839 /* 1840 * Scans a device tree/branch for a maximum payload size capabilities. 1841 * 1842 * rc_dip - dip of Root Complex. 1843 * dip - dip of device where scan will begin. 1844 * max_supported (IN) - maximum allowable MPS. 1845 * max_supported (OUT) - maximum payload size capability of fabric. 1846 */ 1847 void 1848 pcie_get_fabric_mps(dev_info_t *rc_dip, dev_info_t *dip, int *max_supported) 1849 { 1850 if (dip == NULL) 1851 return; 1852 1853 /* 1854 * Perform a fabric scan to obtain Maximum Payload Capabilities 1855 */ 1856 (void) pcie_scan_mps(rc_dip, dip, max_supported); 1857 1858 PCIE_DBG("MPS: Highest Common MPS= %x\n", max_supported); 1859 } 1860 1861 /* 1862 * Scans fabric and determines Maximum Payload Size based on 1863 * highest common denominator alogorithm 1864 */ 1865 static void 1866 pcie_scan_mps(dev_info_t *rc_dip, dev_info_t *dip, int *max_supported) 1867 { 1868 int circular_count; 1869 pcie_max_supported_t max_pay_load_supported; 1870 1871 max_pay_load_supported.dip = rc_dip; 1872 max_pay_load_supported.highest_common_mps = *max_supported; 1873 1874 ndi_devi_enter(ddi_get_parent(dip), &circular_count); 1875 ddi_walk_devs(dip, pcie_get_max_supported, 1876 (void *)&max_pay_load_supported); 1877 ndi_devi_exit(ddi_get_parent(dip), circular_count); 1878 1879 *max_supported = max_pay_load_supported.highest_common_mps; 1880 } 1881 1882 /* 1883 * Called as part of the Maximum Payload Size scan. 1884 */ 1885 static int 1886 pcie_get_max_supported(dev_info_t *dip, void *arg) 1887 { 1888 uint32_t max_supported; 1889 uint16_t cap_ptr; 1890 pcie_max_supported_t *current = (pcie_max_supported_t *)arg; 1891 pci_regspec_t *reg; 1892 int rlen; 1893 caddr_t virt; 1894 ddi_acc_handle_t config_handle; 1895 1896 if (ddi_get_child(current->dip) == NULL) { 1897 goto fail1; 1898 } 1899 1900 if (pcie_dev(dip) == DDI_FAILURE) { 1901 PCIE_DBG("MPS: pcie_get_max_supported: %s: " 1902 "Not a PCIe dev\n", ddi_driver_name(dip)); 1903 goto fail1; 1904 } 1905 1906 /* 1907 * If the suggested-mrrs property exists, then don't include this 1908 * device in the MPS capabilities scan. 1909 */ 1910 if (ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1911 "suggested-mrrs") != 0) 1912 goto fail1; 1913 1914 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "reg", 1915 (caddr_t)®, &rlen) != DDI_PROP_SUCCESS) { 1916 PCIE_DBG("MPS: pcie_get_max_supported: %s: " 1917 "Can not read reg\n", ddi_driver_name(dip)); 1918 goto fail1; 1919 } 1920 1921 if (pcie_map_phys(ddi_get_child(current->dip), reg, &virt, 1922 &config_handle) != DDI_SUCCESS) { 1923 PCIE_DBG("MPS: pcie_get_max_supported: %s: pcie_map_phys " 1924 "failed\n", ddi_driver_name(dip)); 1925 goto fail2; 1926 } 1927 1928 if ((PCI_CAP_LOCATE(config_handle, PCI_CAP_ID_PCI_E, &cap_ptr)) == 1929 DDI_FAILURE) { 1930 goto fail3; 1931 } 1932 1933 max_supported = PCI_CAP_GET16(config_handle, NULL, cap_ptr, 1934 PCIE_DEVCAP) & PCIE_DEVCAP_MAX_PAYLOAD_MASK; 1935 1936 PCIE_DBG("PCIE MPS: %s: MPS Capabilities %x\n", ddi_driver_name(dip), 1937 max_supported); 1938 1939 if (max_supported < current->highest_common_mps) 1940 current->highest_common_mps = max_supported; 1941 1942 fail3: 1943 pcie_unmap_phys(&config_handle, reg); 1944 fail2: 1945 kmem_free(reg, rlen); 1946 fail1: 1947 return (DDI_WALK_CONTINUE); 1948 } 1949 1950 /* 1951 * Determines if there are any root ports attached to a root complex. 1952 * 1953 * dip - dip of root complex 1954 * 1955 * Returns - DDI_SUCCESS if there is at least one root port otherwise 1956 * DDI_FAILURE. 1957 */ 1958 int 1959 pcie_root_port(dev_info_t *dip) 1960 { 1961 int port_type; 1962 uint16_t cap_ptr; 1963 ddi_acc_handle_t config_handle; 1964 dev_info_t *cdip = ddi_get_child(dip); 1965 1966 /* 1967 * Determine if any of the children of the passed in dip 1968 * are root ports. 1969 */ 1970 for (; cdip; cdip = ddi_get_next_sibling(cdip)) { 1971 1972 if (pci_config_setup(cdip, &config_handle) != DDI_SUCCESS) 1973 continue; 1974 1975 if ((PCI_CAP_LOCATE(config_handle, PCI_CAP_ID_PCI_E, 1976 &cap_ptr)) == DDI_FAILURE) { 1977 pci_config_teardown(&config_handle); 1978 continue; 1979 } 1980 1981 port_type = PCI_CAP_GET16(config_handle, NULL, cap_ptr, 1982 PCIE_PCIECAP) & PCIE_PCIECAP_DEV_TYPE_MASK; 1983 1984 pci_config_teardown(&config_handle); 1985 1986 if (port_type == PCIE_PCIECAP_DEV_TYPE_ROOT) 1987 return (DDI_SUCCESS); 1988 } 1989 1990 /* No root ports were found */ 1991 1992 return (DDI_FAILURE); 1993 } 1994 1995 /* 1996 * Function that determines if a device a PCIe device. 1997 * 1998 * dip - dip of device. 1999 * 2000 * returns - DDI_SUCCESS if device is a PCIe device, otherwise DDI_FAILURE. 2001 */ 2002 int 2003 pcie_dev(dev_info_t *dip) 2004 { 2005 /* get parent device's device_type property */ 2006 char *device_type; 2007 int rc = DDI_FAILURE; 2008 dev_info_t *pdip = ddi_get_parent(dip); 2009 2010 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip, 2011 DDI_PROP_DONTPASS, "device_type", &device_type) 2012 != DDI_PROP_SUCCESS) { 2013 return (DDI_FAILURE); 2014 } 2015 2016 if (strcmp(device_type, "pciex") == 0) 2017 rc = DDI_SUCCESS; 2018 else 2019 rc = DDI_FAILURE; 2020 2021 ddi_prop_free(device_type); 2022 return (rc); 2023 } 2024 2025 /* 2026 * Function to map in a device's memory space. 2027 */ 2028 static int 2029 pcie_map_phys(dev_info_t *dip, pci_regspec_t *phys_spec, 2030 caddr_t *addrp, ddi_acc_handle_t *handlep) 2031 { 2032 ddi_map_req_t mr; 2033 ddi_acc_hdl_t *hp; 2034 int result; 2035 ddi_device_acc_attr_t attr; 2036 2037 attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 2038 attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 2039 attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 2040 attr.devacc_attr_access = DDI_CAUTIOUS_ACC; 2041 2042 *handlep = impl_acc_hdl_alloc(KM_SLEEP, NULL); 2043 hp = impl_acc_hdl_get(*handlep); 2044 hp->ah_vers = VERS_ACCHDL; 2045 hp->ah_dip = dip; 2046 hp->ah_rnumber = 0; 2047 hp->ah_offset = 0; 2048 hp->ah_len = 0; 2049 hp->ah_acc = attr; 2050 2051 mr.map_op = DDI_MO_MAP_LOCKED; 2052 mr.map_type = DDI_MT_REGSPEC; 2053 mr.map_obj.rp = (struct regspec *)phys_spec; 2054 mr.map_prot = PROT_READ | PROT_WRITE; 2055 mr.map_flags = DDI_MF_KERNEL_MAPPING; 2056 mr.map_handlep = hp; 2057 mr.map_vers = DDI_MAP_VERSION; 2058 2059 result = ddi_map(dip, &mr, 0, 0, addrp); 2060 2061 if (result != DDI_SUCCESS) { 2062 impl_acc_hdl_free(*handlep); 2063 *handlep = (ddi_acc_handle_t)NULL; 2064 } else { 2065 hp->ah_addr = *addrp; 2066 } 2067 2068 return (result); 2069 } 2070 2071 /* 2072 * Map out memory that was mapped in with pcie_map_phys(); 2073 */ 2074 static void 2075 pcie_unmap_phys(ddi_acc_handle_t *handlep, pci_regspec_t *ph) 2076 { 2077 ddi_map_req_t mr; 2078 ddi_acc_hdl_t *hp; 2079 2080 hp = impl_acc_hdl_get(*handlep); 2081 ASSERT(hp); 2082 2083 mr.map_op = DDI_MO_UNMAP; 2084 mr.map_type = DDI_MT_REGSPEC; 2085 mr.map_obj.rp = (struct regspec *)ph; 2086 mr.map_prot = PROT_READ | PROT_WRITE; 2087 mr.map_flags = DDI_MF_KERNEL_MAPPING; 2088 mr.map_handlep = hp; 2089 mr.map_vers = DDI_MAP_VERSION; 2090 2091 (void) ddi_map(hp->ah_dip, &mr, hp->ah_offset, 2092 hp->ah_len, &hp->ah_addr); 2093 2094 impl_acc_hdl_free(*handlep); 2095 *handlep = (ddi_acc_handle_t)NULL; 2096 } 2097 2098 void 2099 pcie_set_rber_fatal(dev_info_t *dip, boolean_t val) 2100 { 2101 pcie_bus_t *bus_p = PCIE_DIP2UPBUS(dip); 2102 bus_p->bus_pfd->pe_rber_fatal = val; 2103 } 2104 2105 /* 2106 * Return parent Root Port's pe_rber_fatal value. 2107 */ 2108 boolean_t 2109 pcie_get_rber_fatal(dev_info_t *dip) 2110 { 2111 pcie_bus_t *bus_p = PCIE_DIP2UPBUS(dip); 2112 pcie_bus_t *rp_bus_p = PCIE_DIP2UPBUS(bus_p->bus_rp_dip); 2113 return (rp_bus_p->bus_pfd->pe_rber_fatal); 2114 } 2115 2116 int 2117 pcie_ari_supported(dev_info_t *dip) 2118 { 2119 uint32_t devcap2; 2120 uint16_t pciecap; 2121 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 2122 uint8_t dev_type; 2123 2124 PCIE_DBG("pcie_ari_supported: dip=%p\n", dip); 2125 2126 if (bus_p == NULL) 2127 return (PCIE_ARI_FORW_NOT_SUPPORTED); 2128 2129 dev_type = bus_p->bus_dev_type; 2130 2131 if ((dev_type != PCIE_PCIECAP_DEV_TYPE_DOWN) && 2132 (dev_type != PCIE_PCIECAP_DEV_TYPE_ROOT)) 2133 return (PCIE_ARI_FORW_NOT_SUPPORTED); 2134 2135 if (pcie_disable_ari) { 2136 PCIE_DBG("pcie_ari_supported: dip=%p: ARI Disabled\n", dip); 2137 return (PCIE_ARI_FORW_NOT_SUPPORTED); 2138 } 2139 2140 pciecap = PCIE_CAP_GET(16, bus_p, PCIE_PCIECAP); 2141 2142 if ((pciecap & PCIE_PCIECAP_VER_MASK) < PCIE_PCIECAP_VER_2_0) { 2143 PCIE_DBG("pcie_ari_supported: dip=%p: Not 2.0\n", dip); 2144 return (PCIE_ARI_FORW_NOT_SUPPORTED); 2145 } 2146 2147 devcap2 = PCIE_CAP_GET(32, bus_p, PCIE_DEVCAP2); 2148 2149 PCIE_DBG("pcie_ari_supported: dip=%p: DevCap2=0x%x\n", 2150 dip, devcap2); 2151 2152 if (devcap2 & PCIE_DEVCAP2_ARI_FORWARD) { 2153 PCIE_DBG("pcie_ari_supported: " 2154 "dip=%p: ARI Forwarding is supported\n", dip); 2155 return (PCIE_ARI_FORW_SUPPORTED); 2156 } 2157 return (PCIE_ARI_FORW_NOT_SUPPORTED); 2158 } 2159 2160 int 2161 pcie_ari_enable(dev_info_t *dip) 2162 { 2163 uint16_t devctl2; 2164 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 2165 2166 PCIE_DBG("pcie_ari_enable: dip=%p\n", dip); 2167 2168 if (pcie_ari_supported(dip) == PCIE_ARI_FORW_NOT_SUPPORTED) 2169 return (DDI_FAILURE); 2170 2171 devctl2 = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL2); 2172 devctl2 |= PCIE_DEVCTL2_ARI_FORWARD_EN; 2173 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL2, devctl2); 2174 2175 PCIE_DBG("pcie_ari_enable: dip=%p: writing 0x%x to DevCtl2\n", 2176 dip, devctl2); 2177 2178 return (DDI_SUCCESS); 2179 } 2180 2181 int 2182 pcie_ari_disable(dev_info_t *dip) 2183 { 2184 uint16_t devctl2; 2185 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 2186 2187 PCIE_DBG("pcie_ari_disable: dip=%p\n", dip); 2188 2189 if (pcie_ari_supported(dip) == PCIE_ARI_FORW_NOT_SUPPORTED) 2190 return (DDI_FAILURE); 2191 2192 devctl2 = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL2); 2193 devctl2 &= ~PCIE_DEVCTL2_ARI_FORWARD_EN; 2194 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL2, devctl2); 2195 2196 PCIE_DBG("pcie_ari_disable: dip=%p: writing 0x%x to DevCtl2\n", 2197 dip, devctl2); 2198 2199 return (DDI_SUCCESS); 2200 } 2201 2202 int 2203 pcie_ari_is_enabled(dev_info_t *dip) 2204 { 2205 uint16_t devctl2; 2206 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 2207 2208 PCIE_DBG("pcie_ari_is_enabled: dip=%p\n", dip); 2209 2210 if (pcie_ari_supported(dip) == PCIE_ARI_FORW_NOT_SUPPORTED) 2211 return (PCIE_ARI_FORW_DISABLED); 2212 2213 devctl2 = PCIE_CAP_GET(32, bus_p, PCIE_DEVCTL2); 2214 2215 PCIE_DBG("pcie_ari_is_enabled: dip=%p: DevCtl2=0x%x\n", 2216 dip, devctl2); 2217 2218 if (devctl2 & PCIE_DEVCTL2_ARI_FORWARD_EN) { 2219 PCIE_DBG("pcie_ari_is_enabled: " 2220 "dip=%p: ARI Forwarding is enabled\n", dip); 2221 return (PCIE_ARI_FORW_ENABLED); 2222 } 2223 2224 return (PCIE_ARI_FORW_DISABLED); 2225 } 2226 2227 int 2228 pcie_ari_device(dev_info_t *dip) 2229 { 2230 ddi_acc_handle_t handle; 2231 uint16_t cap_ptr; 2232 2233 PCIE_DBG("pcie_ari_device: dip=%p\n", dip); 2234 2235 /* 2236 * XXX - This function may be called before the bus_p structure 2237 * has been populated. This code can be changed to remove 2238 * pci_config_setup()/pci_config_teardown() when the RFE 2239 * to populate the bus_p structures early in boot is putback. 2240 */ 2241 2242 /* First make sure it is a PCIe device */ 2243 2244 if (pci_config_setup(dip, &handle) != DDI_SUCCESS) 2245 return (PCIE_NOT_ARI_DEVICE); 2246 2247 if ((PCI_CAP_LOCATE(handle, PCI_CAP_ID_PCI_E, &cap_ptr)) 2248 != DDI_SUCCESS) { 2249 pci_config_teardown(&handle); 2250 return (PCIE_NOT_ARI_DEVICE); 2251 } 2252 2253 /* Locate the ARI Capability */ 2254 2255 if ((PCI_CAP_LOCATE(handle, PCI_CAP_XCFG_SPC(PCIE_EXT_CAP_ID_ARI), 2256 &cap_ptr)) == DDI_FAILURE) { 2257 pci_config_teardown(&handle); 2258 return (PCIE_NOT_ARI_DEVICE); 2259 } 2260 2261 /* ARI Capability was found so it must be a ARI device */ 2262 PCIE_DBG("pcie_ari_device: ARI Device dip=%p\n", dip); 2263 2264 pci_config_teardown(&handle); 2265 return (PCIE_ARI_DEVICE); 2266 } 2267 2268 int 2269 pcie_ari_get_next_function(dev_info_t *dip, int *func) 2270 { 2271 uint32_t val; 2272 uint16_t cap_ptr, next_function; 2273 ddi_acc_handle_t handle; 2274 2275 /* 2276 * XXX - This function may be called before the bus_p structure 2277 * has been populated. This code can be changed to remove 2278 * pci_config_setup()/pci_config_teardown() when the RFE 2279 * to populate the bus_p structures early in boot is putback. 2280 */ 2281 2282 if (pci_config_setup(dip, &handle) != DDI_SUCCESS) 2283 return (DDI_FAILURE); 2284 2285 if ((PCI_CAP_LOCATE(handle, 2286 PCI_CAP_XCFG_SPC(PCIE_EXT_CAP_ID_ARI), &cap_ptr)) == DDI_FAILURE) { 2287 pci_config_teardown(&handle); 2288 return (DDI_FAILURE); 2289 } 2290 2291 val = PCI_CAP_GET32(handle, NULL, cap_ptr, PCIE_ARI_CAP); 2292 2293 next_function = (val >> PCIE_ARI_CAP_NEXT_FUNC_SHIFT) & 2294 PCIE_ARI_CAP_NEXT_FUNC_MASK; 2295 2296 pci_config_teardown(&handle); 2297 2298 *func = next_function; 2299 2300 return (DDI_SUCCESS); 2301 } 2302 2303 dev_info_t * 2304 pcie_func_to_dip(dev_info_t *dip, pcie_req_id_t function) 2305 { 2306 pcie_req_id_t child_bdf; 2307 dev_info_t *cdip; 2308 2309 for (cdip = ddi_get_child(dip); cdip; 2310 cdip = ddi_get_next_sibling(cdip)) { 2311 2312 if (pcie_get_bdf_from_dip(cdip, &child_bdf) == DDI_FAILURE) 2313 return (NULL); 2314 2315 if ((child_bdf & PCIE_REQ_ID_ARI_FUNC_MASK) == function) 2316 return (cdip); 2317 } 2318 return (NULL); 2319 } 2320 2321 #ifdef DEBUG 2322 2323 static void 2324 pcie_print_bus(pcie_bus_t *bus_p) 2325 { 2326 pcie_dbg("\tbus_dip = 0x%p\n", bus_p->bus_dip); 2327 pcie_dbg("\tbus_fm_flags = 0x%x\n", bus_p->bus_fm_flags); 2328 2329 pcie_dbg("\tbus_bdf = 0x%x\n", bus_p->bus_bdf); 2330 pcie_dbg("\tbus_dev_ven_id = 0x%x\n", bus_p->bus_dev_ven_id); 2331 pcie_dbg("\tbus_rev_id = 0x%x\n", bus_p->bus_rev_id); 2332 pcie_dbg("\tbus_hdr_type = 0x%x\n", bus_p->bus_hdr_type); 2333 pcie_dbg("\tbus_dev_type = 0x%x\n", bus_p->bus_dev_type); 2334 pcie_dbg("\tbus_bdg_secbus = 0x%x\n", bus_p->bus_bdg_secbus); 2335 pcie_dbg("\tbus_pcie_off = 0x%x\n", bus_p->bus_pcie_off); 2336 pcie_dbg("\tbus_aer_off = 0x%x\n", bus_p->bus_aer_off); 2337 pcie_dbg("\tbus_pcix_off = 0x%x\n", bus_p->bus_pcix_off); 2338 pcie_dbg("\tbus_ecc_ver = 0x%x\n", bus_p->bus_ecc_ver); 2339 } 2340 2341 /* 2342 * For debugging purposes set pcie_dbg_print != 0 to see printf messages 2343 * during interrupt. 2344 * 2345 * When a proper solution is in place this code will disappear. 2346 * Potential solutions are: 2347 * o circular buffers 2348 * o taskq to print at lower pil 2349 */ 2350 int pcie_dbg_print = 0; 2351 void 2352 pcie_dbg(char *fmt, ...) 2353 { 2354 va_list ap; 2355 2356 if (!pcie_debug_flags) { 2357 return; 2358 } 2359 va_start(ap, fmt); 2360 if (servicing_interrupt()) { 2361 if (pcie_dbg_print) { 2362 prom_vprintf(fmt, ap); 2363 } 2364 } else { 2365 prom_vprintf(fmt, ap); 2366 } 2367 va_end(ap); 2368 } 2369 #endif /* DEBUG */ 2370 2371 #if defined(__i386) || defined(__amd64) 2372 static void 2373 pcie_check_io_mem_range(ddi_acc_handle_t cfg_hdl, boolean_t *empty_io_range, 2374 boolean_t *empty_mem_range) 2375 { 2376 uint8_t class, subclass; 2377 uint_t val; 2378 2379 class = pci_config_get8(cfg_hdl, PCI_CONF_BASCLASS); 2380 subclass = pci_config_get8(cfg_hdl, PCI_CONF_SUBCLASS); 2381 2382 if ((class == PCI_CLASS_BRIDGE) && (subclass == PCI_BRIDGE_PCI)) { 2383 val = (((uint_t)pci_config_get8(cfg_hdl, PCI_BCNF_IO_BASE_LOW) & 2384 PCI_BCNF_IO_MASK) << 8); 2385 /* 2386 * Assuming that a zero based io_range[0] implies an 2387 * invalid I/O range. Likewise for mem_range[0]. 2388 */ 2389 if (val == 0) 2390 *empty_io_range = B_TRUE; 2391 val = (((uint_t)pci_config_get16(cfg_hdl, PCI_BCNF_MEM_BASE) & 2392 PCI_BCNF_MEM_MASK) << 16); 2393 if (val == 0) 2394 *empty_mem_range = B_TRUE; 2395 } 2396 } 2397 2398 #endif /* defined(__i386) || defined(__amd64) */ 2399