1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2017, Joyent, Inc. 25 */ 26 27 #include <sys/sysmacros.h> 28 #include <sys/types.h> 29 #include <sys/kmem.h> 30 #include <sys/modctl.h> 31 #include <sys/ddi.h> 32 #include <sys/sunddi.h> 33 #include <sys/sunndi.h> 34 #include <sys/fm/protocol.h> 35 #include <sys/fm/util.h> 36 #include <sys/promif.h> 37 #include <sys/disp.h> 38 #include <sys/stat.h> 39 #include <sys/file.h> 40 #include <sys/pci_cap.h> 41 #include <sys/pci_impl.h> 42 #include <sys/pcie_impl.h> 43 #include <sys/hotplug/pci/pcie_hp.h> 44 #include <sys/hotplug/pci/pciehpc.h> 45 #include <sys/hotplug/pci/pcishpc.h> 46 #include <sys/hotplug/pci/pcicfg.h> 47 #include <sys/pci_cfgacc.h> 48 49 /* Local functions prototypes */ 50 static void pcie_init_pfd(dev_info_t *); 51 static void pcie_fini_pfd(dev_info_t *); 52 53 #if defined(__i386) || defined(__amd64) 54 static void pcie_check_io_mem_range(ddi_acc_handle_t, boolean_t *, boolean_t *); 55 #endif /* defined(__i386) || defined(__amd64) */ 56 57 #ifdef DEBUG 58 uint_t pcie_debug_flags = 0; 59 static void pcie_print_bus(pcie_bus_t *bus_p); 60 void pcie_dbg(char *fmt, ...); 61 #endif /* DEBUG */ 62 63 /* Variable to control default PCI-Express config settings */ 64 ushort_t pcie_command_default = 65 PCI_COMM_SERR_ENABLE | 66 PCI_COMM_WAIT_CYC_ENAB | 67 PCI_COMM_PARITY_DETECT | 68 PCI_COMM_ME | 69 PCI_COMM_MAE | 70 PCI_COMM_IO; 71 72 /* xxx_fw are bits that are controlled by FW and should not be modified */ 73 ushort_t pcie_command_default_fw = 74 PCI_COMM_SPEC_CYC | 75 PCI_COMM_MEMWR_INVAL | 76 PCI_COMM_PALETTE_SNOOP | 77 PCI_COMM_WAIT_CYC_ENAB | 78 0xF800; /* Reserved Bits */ 79 80 ushort_t pcie_bdg_command_default_fw = 81 PCI_BCNF_BCNTRL_ISA_ENABLE | 82 PCI_BCNF_BCNTRL_VGA_ENABLE | 83 0xF000; /* Reserved Bits */ 84 85 /* PCI-Express Base error defaults */ 86 ushort_t pcie_base_err_default = 87 PCIE_DEVCTL_CE_REPORTING_EN | 88 PCIE_DEVCTL_NFE_REPORTING_EN | 89 PCIE_DEVCTL_FE_REPORTING_EN | 90 PCIE_DEVCTL_UR_REPORTING_EN; 91 92 /* PCI-Express Device Control Register */ 93 uint16_t pcie_devctl_default = PCIE_DEVCTL_RO_EN | 94 PCIE_DEVCTL_MAX_READ_REQ_512; 95 96 /* PCI-Express AER Root Control Register */ 97 #define PCIE_ROOT_SYS_ERR (PCIE_ROOTCTL_SYS_ERR_ON_CE_EN | \ 98 PCIE_ROOTCTL_SYS_ERR_ON_NFE_EN | \ 99 PCIE_ROOTCTL_SYS_ERR_ON_FE_EN) 100 101 ushort_t pcie_root_ctrl_default = 102 PCIE_ROOTCTL_SYS_ERR_ON_CE_EN | 103 PCIE_ROOTCTL_SYS_ERR_ON_NFE_EN | 104 PCIE_ROOTCTL_SYS_ERR_ON_FE_EN; 105 106 /* PCI-Express Root Error Command Register */ 107 ushort_t pcie_root_error_cmd_default = 108 PCIE_AER_RE_CMD_CE_REP_EN | 109 PCIE_AER_RE_CMD_NFE_REP_EN | 110 PCIE_AER_RE_CMD_FE_REP_EN; 111 112 /* ECRC settings in the PCIe AER Control Register */ 113 uint32_t pcie_ecrc_value = 114 PCIE_AER_CTL_ECRC_GEN_ENA | 115 PCIE_AER_CTL_ECRC_CHECK_ENA; 116 117 /* 118 * If a particular platform wants to disable certain errors such as UR/MA, 119 * instead of using #defines have the platform's PCIe Root Complex driver set 120 * these masks using the pcie_get_XXX_mask and pcie_set_XXX_mask functions. For 121 * x86 the closest thing to a PCIe root complex driver is NPE. For SPARC the 122 * closest PCIe root complex driver is PX. 123 * 124 * pcie_serr_disable_flag : disable SERR only (in RCR and command reg) x86 125 * systems may want to disable SERR in general. For root ports, enabling SERR 126 * causes NMIs which are not handled and results in a watchdog timeout error. 127 */ 128 uint32_t pcie_aer_uce_mask = 0; /* AER UE Mask */ 129 uint32_t pcie_aer_ce_mask = 0; /* AER CE Mask */ 130 uint32_t pcie_aer_suce_mask = 0; /* AER Secondary UE Mask */ 131 uint32_t pcie_serr_disable_flag = 0; /* Disable SERR */ 132 133 /* Default severities needed for eversholt. Error handling doesn't care */ 134 uint32_t pcie_aer_uce_severity = PCIE_AER_UCE_MTLP | PCIE_AER_UCE_RO | \ 135 PCIE_AER_UCE_FCP | PCIE_AER_UCE_SD | PCIE_AER_UCE_DLP | \ 136 PCIE_AER_UCE_TRAINING; 137 uint32_t pcie_aer_suce_severity = PCIE_AER_SUCE_SERR_ASSERT | \ 138 PCIE_AER_SUCE_UC_ADDR_ERR | PCIE_AER_SUCE_UC_ATTR_ERR | \ 139 PCIE_AER_SUCE_USC_MSG_DATA_ERR; 140 141 int pcie_max_mps = PCIE_DEVCTL_MAX_PAYLOAD_4096 >> 5; 142 int pcie_disable_ari = 0; 143 144 static void pcie_scan_mps(dev_info_t *rc_dip, dev_info_t *dip, 145 int *max_supported); 146 static int pcie_get_max_supported(dev_info_t *dip, void *arg); 147 static int pcie_map_phys(dev_info_t *dip, pci_regspec_t *phys_spec, 148 caddr_t *addrp, ddi_acc_handle_t *handlep); 149 static void pcie_unmap_phys(ddi_acc_handle_t *handlep, pci_regspec_t *ph); 150 151 dev_info_t *pcie_get_rc_dip(dev_info_t *dip); 152 153 /* 154 * modload support 155 */ 156 157 static struct modlmisc modlmisc = { 158 &mod_miscops, /* Type of module */ 159 "PCI Express Framework Module" 160 }; 161 162 static struct modlinkage modlinkage = { 163 MODREV_1, 164 (void *)&modlmisc, 165 NULL 166 }; 167 168 /* 169 * Global Variables needed for a non-atomic version of ddi_fm_ereport_post. 170 * Currently used to send the pci.fabric ereports whose payload depends on the 171 * type of PCI device it is being sent for. 172 */ 173 char *pcie_nv_buf; 174 nv_alloc_t *pcie_nvap; 175 nvlist_t *pcie_nvl; 176 177 int 178 _init(void) 179 { 180 int rval; 181 182 pcie_nv_buf = kmem_alloc(ERPT_DATA_SZ, KM_SLEEP); 183 pcie_nvap = fm_nva_xcreate(pcie_nv_buf, ERPT_DATA_SZ); 184 pcie_nvl = fm_nvlist_create(pcie_nvap); 185 186 if ((rval = mod_install(&modlinkage)) != 0) { 187 fm_nvlist_destroy(pcie_nvl, FM_NVA_RETAIN); 188 fm_nva_xdestroy(pcie_nvap); 189 kmem_free(pcie_nv_buf, ERPT_DATA_SZ); 190 } 191 return (rval); 192 } 193 194 int 195 _fini() 196 { 197 int rval; 198 199 if ((rval = mod_remove(&modlinkage)) == 0) { 200 fm_nvlist_destroy(pcie_nvl, FM_NVA_RETAIN); 201 fm_nva_xdestroy(pcie_nvap); 202 kmem_free(pcie_nv_buf, ERPT_DATA_SZ); 203 } 204 return (rval); 205 } 206 207 int 208 _info(struct modinfo *modinfop) 209 { 210 return (mod_info(&modlinkage, modinfop)); 211 } 212 213 /* ARGSUSED */ 214 int 215 pcie_init(dev_info_t *dip, caddr_t arg) 216 { 217 int ret = DDI_SUCCESS; 218 219 /* 220 * Create a "devctl" minor node to support DEVCTL_DEVICE_* 221 * and DEVCTL_BUS_* ioctls to this bus. 222 */ 223 if ((ret = ddi_create_minor_node(dip, "devctl", S_IFCHR, 224 PCI_MINOR_NUM(ddi_get_instance(dip), PCI_DEVCTL_MINOR), 225 DDI_NT_NEXUS, 0)) != DDI_SUCCESS) { 226 PCIE_DBG("Failed to create devctl minor node for %s%d\n", 227 ddi_driver_name(dip), ddi_get_instance(dip)); 228 229 return (ret); 230 } 231 232 if ((ret = pcie_hp_init(dip, arg)) != DDI_SUCCESS) { 233 /* 234 * On some x86 platforms, we observed unexpected hotplug 235 * initialization failures in recent years. The known cause 236 * is a hardware issue: while the problem PCI bridges have 237 * the Hotplug Capable registers set, the machine actually 238 * does not implement the expected ACPI object. 239 * 240 * We don't want to stop PCI driver attach and system boot 241 * just because of this hotplug initialization failure. 242 * Continue with a debug message printed. 243 */ 244 PCIE_DBG("%s%d: Failed setting hotplug framework\n", 245 ddi_driver_name(dip), ddi_get_instance(dip)); 246 247 #if defined(__sparc) 248 ddi_remove_minor_node(dip, "devctl"); 249 250 return (ret); 251 #endif /* defined(__sparc) */ 252 } 253 254 return (DDI_SUCCESS); 255 } 256 257 /* ARGSUSED */ 258 int 259 pcie_uninit(dev_info_t *dip) 260 { 261 int ret = DDI_SUCCESS; 262 263 if (pcie_ari_is_enabled(dip) == PCIE_ARI_FORW_ENABLED) 264 (void) pcie_ari_disable(dip); 265 266 if ((ret = pcie_hp_uninit(dip)) != DDI_SUCCESS) { 267 PCIE_DBG("Failed to uninitialize hotplug for %s%d\n", 268 ddi_driver_name(dip), ddi_get_instance(dip)); 269 270 return (ret); 271 } 272 273 ddi_remove_minor_node(dip, "devctl"); 274 275 return (ret); 276 } 277 278 /* 279 * PCIe module interface for enabling hotplug interrupt. 280 * 281 * It should be called after pcie_init() is done and bus driver's 282 * interrupt handlers have being attached. 283 */ 284 int 285 pcie_hpintr_enable(dev_info_t *dip) 286 { 287 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 288 pcie_hp_ctrl_t *ctrl_p = PCIE_GET_HP_CTRL(dip); 289 290 if (PCIE_IS_PCIE_HOTPLUG_ENABLED(bus_p)) { 291 (void) (ctrl_p->hc_ops.enable_hpc_intr)(ctrl_p); 292 } else if (PCIE_IS_PCI_HOTPLUG_ENABLED(bus_p)) { 293 (void) pcishpc_enable_irqs(ctrl_p); 294 } 295 return (DDI_SUCCESS); 296 } 297 298 /* 299 * PCIe module interface for disabling hotplug interrupt. 300 * 301 * It should be called before pcie_uninit() is called and bus driver's 302 * interrupt handlers is dettached. 303 */ 304 int 305 pcie_hpintr_disable(dev_info_t *dip) 306 { 307 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 308 pcie_hp_ctrl_t *ctrl_p = PCIE_GET_HP_CTRL(dip); 309 310 if (PCIE_IS_PCIE_HOTPLUG_ENABLED(bus_p)) { 311 (void) (ctrl_p->hc_ops.disable_hpc_intr)(ctrl_p); 312 } else if (PCIE_IS_PCI_HOTPLUG_ENABLED(bus_p)) { 313 (void) pcishpc_disable_irqs(ctrl_p); 314 } 315 return (DDI_SUCCESS); 316 } 317 318 /* ARGSUSED */ 319 int 320 pcie_intr(dev_info_t *dip) 321 { 322 return (pcie_hp_intr(dip)); 323 } 324 325 /* ARGSUSED */ 326 int 327 pcie_open(dev_info_t *dip, dev_t *devp, int flags, int otyp, cred_t *credp) 328 { 329 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 330 331 /* 332 * Make sure the open is for the right file type. 333 */ 334 if (otyp != OTYP_CHR) 335 return (EINVAL); 336 337 /* 338 * Handle the open by tracking the device state. 339 */ 340 if ((bus_p->bus_soft_state == PCI_SOFT_STATE_OPEN_EXCL) || 341 ((flags & FEXCL) && 342 (bus_p->bus_soft_state != PCI_SOFT_STATE_CLOSED))) { 343 return (EBUSY); 344 } 345 346 if (flags & FEXCL) 347 bus_p->bus_soft_state = PCI_SOFT_STATE_OPEN_EXCL; 348 else 349 bus_p->bus_soft_state = PCI_SOFT_STATE_OPEN; 350 351 return (0); 352 } 353 354 /* ARGSUSED */ 355 int 356 pcie_close(dev_info_t *dip, dev_t dev, int flags, int otyp, cred_t *credp) 357 { 358 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 359 360 if (otyp != OTYP_CHR) 361 return (EINVAL); 362 363 bus_p->bus_soft_state = PCI_SOFT_STATE_CLOSED; 364 365 return (0); 366 } 367 368 /* ARGSUSED */ 369 int 370 pcie_ioctl(dev_info_t *dip, dev_t dev, int cmd, intptr_t arg, int mode, 371 cred_t *credp, int *rvalp) 372 { 373 struct devctl_iocdata *dcp; 374 uint_t bus_state; 375 int rv = DDI_SUCCESS; 376 377 /* 378 * We can use the generic implementation for devctl ioctl 379 */ 380 switch (cmd) { 381 case DEVCTL_DEVICE_GETSTATE: 382 case DEVCTL_DEVICE_ONLINE: 383 case DEVCTL_DEVICE_OFFLINE: 384 case DEVCTL_BUS_GETSTATE: 385 return (ndi_devctl_ioctl(dip, cmd, arg, mode, 0)); 386 default: 387 break; 388 } 389 390 /* 391 * read devctl ioctl data 392 */ 393 if (ndi_dc_allochdl((void *)arg, &dcp) != NDI_SUCCESS) 394 return (EFAULT); 395 396 switch (cmd) { 397 case DEVCTL_BUS_QUIESCE: 398 if (ndi_get_bus_state(dip, &bus_state) == NDI_SUCCESS) 399 if (bus_state == BUS_QUIESCED) 400 break; 401 (void) ndi_set_bus_state(dip, BUS_QUIESCED); 402 break; 403 case DEVCTL_BUS_UNQUIESCE: 404 if (ndi_get_bus_state(dip, &bus_state) == NDI_SUCCESS) 405 if (bus_state == BUS_ACTIVE) 406 break; 407 (void) ndi_set_bus_state(dip, BUS_ACTIVE); 408 break; 409 case DEVCTL_BUS_RESET: 410 case DEVCTL_BUS_RESETALL: 411 case DEVCTL_DEVICE_RESET: 412 rv = ENOTSUP; 413 break; 414 default: 415 rv = ENOTTY; 416 } 417 418 ndi_dc_freehdl(dcp); 419 return (rv); 420 } 421 422 /* ARGSUSED */ 423 int 424 pcie_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 425 int flags, char *name, caddr_t valuep, int *lengthp) 426 { 427 if (dev == DDI_DEV_T_ANY) 428 goto skip; 429 430 if (PCIE_IS_HOTPLUG_CAPABLE(dip) && 431 strcmp(name, "pci-occupant") == 0) { 432 int pci_dev = PCI_MINOR_NUM_TO_PCI_DEVNUM(getminor(dev)); 433 434 pcie_hp_create_occupant_props(dip, dev, pci_dev); 435 } 436 437 skip: 438 return (ddi_prop_op(dev, dip, prop_op, flags, name, valuep, lengthp)); 439 } 440 441 int 442 pcie_init_cfghdl(dev_info_t *cdip) 443 { 444 pcie_bus_t *bus_p; 445 ddi_acc_handle_t eh = NULL; 446 447 bus_p = PCIE_DIP2BUS(cdip); 448 if (bus_p == NULL) 449 return (DDI_FAILURE); 450 451 /* Create an config access special to error handling */ 452 if (pci_config_setup(cdip, &eh) != DDI_SUCCESS) { 453 cmn_err(CE_WARN, "Cannot setup config access" 454 " for BDF 0x%x\n", bus_p->bus_bdf); 455 return (DDI_FAILURE); 456 } 457 458 bus_p->bus_cfg_hdl = eh; 459 return (DDI_SUCCESS); 460 } 461 462 void 463 pcie_fini_cfghdl(dev_info_t *cdip) 464 { 465 pcie_bus_t *bus_p = PCIE_DIP2BUS(cdip); 466 467 pci_config_teardown(&bus_p->bus_cfg_hdl); 468 } 469 470 void 471 pcie_determine_serial(dev_info_t *dip) 472 { 473 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 474 ddi_acc_handle_t h; 475 uint16_t cap; 476 uchar_t serial[8]; 477 uint32_t low, high; 478 479 if (!PCIE_IS_PCIE(bus_p)) 480 return; 481 482 h = bus_p->bus_cfg_hdl; 483 484 if ((PCI_CAP_LOCATE(h, PCI_CAP_XCFG_SPC(PCIE_EXT_CAP_ID_SER), &cap)) == 485 DDI_FAILURE) 486 return; 487 488 high = PCI_XCAP_GET32(h, 0, cap, PCIE_SER_SID_UPPER_DW); 489 low = PCI_XCAP_GET32(h, 0, cap, PCIE_SER_SID_LOWER_DW); 490 491 /* 492 * Here, we're trying to figure out if we had an invalid PCIe read. From 493 * looking at the contents of the value, it can be hard to tell the 494 * difference between a value that has all 1s correctly versus if we had 495 * an error. In this case, we only assume it's invalid if both register 496 * reads are invalid. We also only use 32-bit reads as we're not sure if 497 * all devices will support these as 64-bit reads, while we know that 498 * they'll support these as 32-bit reads. 499 */ 500 if (high == PCI_EINVAL32 && low == PCI_EINVAL32) 501 return; 502 503 serial[0] = low & 0xff; 504 serial[1] = (low >> 8) & 0xff; 505 serial[2] = (low >> 16) & 0xff; 506 serial[3] = (low >> 24) & 0xff; 507 serial[4] = high & 0xff; 508 serial[5] = (high >> 8) & 0xff; 509 serial[6] = (high >> 16) & 0xff; 510 serial[7] = (high >> 24) & 0xff; 511 512 (void) ndi_prop_update_byte_array(DDI_DEV_T_NONE, dip, "pcie-serial", 513 serial, sizeof (serial)); 514 } 515 516 /* 517 * PCI-Express child device initialization. 518 * This function enables generic pci-express interrupts and error 519 * handling. 520 * 521 * @param pdip root dip (root nexus's dip) 522 * @param cdip child's dip (device's dip) 523 * @return DDI_SUCCESS or DDI_FAILURE 524 */ 525 /* ARGSUSED */ 526 int 527 pcie_initchild(dev_info_t *cdip) 528 { 529 uint16_t tmp16, reg16; 530 pcie_bus_t *bus_p; 531 uint32_t devid, venid; 532 533 bus_p = PCIE_DIP2BUS(cdip); 534 if (bus_p == NULL) { 535 PCIE_DBG("%s: BUS not found.\n", 536 ddi_driver_name(cdip)); 537 538 return (DDI_FAILURE); 539 } 540 541 if (pcie_init_cfghdl(cdip) != DDI_SUCCESS) 542 return (DDI_FAILURE); 543 544 /* 545 * Update pcie_bus_t with real Vendor Id Device Id. 546 * 547 * For assigned devices in IOV environment, the OBP will return 548 * faked device id/vendor id on configration read and for both 549 * properties in root domain. translate_devid() function will 550 * update the properties with real device-id/vendor-id on such 551 * platforms, so that we can utilize the properties here to get 552 * real device-id/vendor-id and overwrite the faked ids. 553 * 554 * For unassigned devices or devices in non-IOV environment, the 555 * operation below won't make a difference. 556 * 557 * The IOV implementation only supports assignment of PCIE 558 * endpoint devices. Devices under pci-pci bridges don't need 559 * operation like this. 560 */ 561 devid = ddi_prop_get_int(DDI_DEV_T_ANY, cdip, DDI_PROP_DONTPASS, 562 "device-id", -1); 563 venid = ddi_prop_get_int(DDI_DEV_T_ANY, cdip, DDI_PROP_DONTPASS, 564 "vendor-id", -1); 565 bus_p->bus_dev_ven_id = (devid << 16) | (venid & 0xffff); 566 567 /* Clear the device's status register */ 568 reg16 = PCIE_GET(16, bus_p, PCI_CONF_STAT); 569 PCIE_PUT(16, bus_p, PCI_CONF_STAT, reg16); 570 571 /* Setup the device's command register */ 572 reg16 = PCIE_GET(16, bus_p, PCI_CONF_COMM); 573 tmp16 = (reg16 & pcie_command_default_fw) | pcie_command_default; 574 575 #if defined(__i386) || defined(__amd64) 576 boolean_t empty_io_range = B_FALSE; 577 boolean_t empty_mem_range = B_FALSE; 578 /* 579 * Check for empty IO and Mem ranges on bridges. If so disable IO/Mem 580 * access as it can cause a hang if enabled. 581 */ 582 pcie_check_io_mem_range(bus_p->bus_cfg_hdl, &empty_io_range, 583 &empty_mem_range); 584 if ((empty_io_range == B_TRUE) && 585 (pcie_command_default & PCI_COMM_IO)) { 586 tmp16 &= ~PCI_COMM_IO; 587 PCIE_DBG("No I/O range found for %s, bdf 0x%x\n", 588 ddi_driver_name(cdip), bus_p->bus_bdf); 589 } 590 if ((empty_mem_range == B_TRUE) && 591 (pcie_command_default & PCI_COMM_MAE)) { 592 tmp16 &= ~PCI_COMM_MAE; 593 PCIE_DBG("No Mem range found for %s, bdf 0x%x\n", 594 ddi_driver_name(cdip), bus_p->bus_bdf); 595 } 596 #endif /* defined(__i386) || defined(__amd64) */ 597 598 if (pcie_serr_disable_flag && PCIE_IS_PCIE(bus_p)) 599 tmp16 &= ~PCI_COMM_SERR_ENABLE; 600 601 PCIE_PUT(16, bus_p, PCI_CONF_COMM, tmp16); 602 PCIE_DBG_CFG(cdip, bus_p, "COMMAND", 16, PCI_CONF_COMM, reg16); 603 604 /* 605 * If the device has a bus control register then program it 606 * based on the settings in the command register. 607 */ 608 if (PCIE_IS_BDG(bus_p)) { 609 /* Clear the device's secondary status register */ 610 reg16 = PCIE_GET(16, bus_p, PCI_BCNF_SEC_STATUS); 611 PCIE_PUT(16, bus_p, PCI_BCNF_SEC_STATUS, reg16); 612 613 /* Setup the device's secondary command register */ 614 reg16 = PCIE_GET(16, bus_p, PCI_BCNF_BCNTRL); 615 tmp16 = (reg16 & pcie_bdg_command_default_fw); 616 617 tmp16 |= PCI_BCNF_BCNTRL_SERR_ENABLE; 618 /* 619 * Workaround for this Nvidia bridge. Don't enable the SERR 620 * enable bit in the bridge control register as it could lead to 621 * bogus NMIs. 622 */ 623 if (bus_p->bus_dev_ven_id == 0x037010DE) 624 tmp16 &= ~PCI_BCNF_BCNTRL_SERR_ENABLE; 625 626 if (pcie_command_default & PCI_COMM_PARITY_DETECT) 627 tmp16 |= PCI_BCNF_BCNTRL_PARITY_ENABLE; 628 629 /* 630 * Enable Master Abort Mode only if URs have not been masked. 631 * For PCI and PCIe-PCI bridges, enabling this bit causes a 632 * Master Aborts/UR to be forwarded as a UR/TA or SERR. If this 633 * bit is masked, posted requests are dropped and non-posted 634 * requests are returned with -1. 635 */ 636 if (pcie_aer_uce_mask & PCIE_AER_UCE_UR) 637 tmp16 &= ~PCI_BCNF_BCNTRL_MAST_AB_MODE; 638 else 639 tmp16 |= PCI_BCNF_BCNTRL_MAST_AB_MODE; 640 PCIE_PUT(16, bus_p, PCI_BCNF_BCNTRL, tmp16); 641 PCIE_DBG_CFG(cdip, bus_p, "SEC CMD", 16, PCI_BCNF_BCNTRL, 642 reg16); 643 } 644 645 if (PCIE_IS_PCIE(bus_p)) { 646 /* Setup PCIe device control register */ 647 reg16 = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL); 648 /* note: MPS/MRRS are initialized in pcie_initchild_mps() */ 649 tmp16 = (reg16 & (PCIE_DEVCTL_MAX_READ_REQ_MASK | 650 PCIE_DEVCTL_MAX_PAYLOAD_MASK)) | 651 (pcie_devctl_default & ~(PCIE_DEVCTL_MAX_READ_REQ_MASK | 652 PCIE_DEVCTL_MAX_PAYLOAD_MASK)); 653 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, tmp16); 654 PCIE_DBG_CAP(cdip, bus_p, "DEVCTL", 16, PCIE_DEVCTL, reg16); 655 656 /* Enable PCIe errors */ 657 pcie_enable_errors(cdip); 658 659 pcie_determine_serial(cdip); 660 } 661 662 bus_p->bus_ari = B_FALSE; 663 if ((pcie_ari_is_enabled(ddi_get_parent(cdip)) 664 == PCIE_ARI_FORW_ENABLED) && (pcie_ari_device(cdip) 665 == PCIE_ARI_DEVICE)) { 666 bus_p->bus_ari = B_TRUE; 667 } 668 669 if (pcie_initchild_mps(cdip) == DDI_FAILURE) { 670 pcie_fini_cfghdl(cdip); 671 return (DDI_FAILURE); 672 } 673 674 return (DDI_SUCCESS); 675 } 676 677 static void 678 pcie_init_pfd(dev_info_t *dip) 679 { 680 pf_data_t *pfd_p = PCIE_ZALLOC(pf_data_t); 681 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 682 683 PCIE_DIP2PFD(dip) = pfd_p; 684 685 pfd_p->pe_bus_p = bus_p; 686 pfd_p->pe_severity_flags = 0; 687 pfd_p->pe_orig_severity_flags = 0; 688 pfd_p->pe_lock = B_FALSE; 689 pfd_p->pe_valid = B_FALSE; 690 691 /* Allocate the root fault struct for both RC and RP */ 692 if (PCIE_IS_ROOT(bus_p)) { 693 PCIE_ROOT_FAULT(pfd_p) = PCIE_ZALLOC(pf_root_fault_t); 694 PCIE_ROOT_FAULT(pfd_p)->scan_bdf = PCIE_INVALID_BDF; 695 PCIE_ROOT_EH_SRC(pfd_p) = PCIE_ZALLOC(pf_root_eh_src_t); 696 } 697 698 PCI_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pci_err_regs_t); 699 PFD_AFFECTED_DEV(pfd_p) = PCIE_ZALLOC(pf_affected_dev_t); 700 PFD_AFFECTED_DEV(pfd_p)->pe_affected_bdf = PCIE_INVALID_BDF; 701 702 if (PCIE_IS_BDG(bus_p)) 703 PCI_BDG_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pci_bdg_err_regs_t); 704 705 if (PCIE_IS_PCIE(bus_p)) { 706 PCIE_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_err_regs_t); 707 708 if (PCIE_IS_RP(bus_p)) 709 PCIE_RP_REG(pfd_p) = 710 PCIE_ZALLOC(pf_pcie_rp_err_regs_t); 711 712 PCIE_ADV_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_adv_err_regs_t); 713 PCIE_ADV_REG(pfd_p)->pcie_ue_tgt_bdf = PCIE_INVALID_BDF; 714 715 if (PCIE_IS_RP(bus_p)) { 716 PCIE_ADV_RP_REG(pfd_p) = 717 PCIE_ZALLOC(pf_pcie_adv_rp_err_regs_t); 718 PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ce_src_id = 719 PCIE_INVALID_BDF; 720 PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ue_src_id = 721 PCIE_INVALID_BDF; 722 } else if (PCIE_IS_PCIE_BDG(bus_p)) { 723 PCIE_ADV_BDG_REG(pfd_p) = 724 PCIE_ZALLOC(pf_pcie_adv_bdg_err_regs_t); 725 PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_bdf = 726 PCIE_INVALID_BDF; 727 } 728 729 if (PCIE_IS_PCIE_BDG(bus_p) && PCIE_IS_PCIX(bus_p)) { 730 PCIX_BDG_ERR_REG(pfd_p) = 731 PCIE_ZALLOC(pf_pcix_bdg_err_regs_t); 732 733 if (PCIX_ECC_VERSION_CHECK(bus_p)) { 734 PCIX_BDG_ECC_REG(pfd_p, 0) = 735 PCIE_ZALLOC(pf_pcix_ecc_regs_t); 736 PCIX_BDG_ECC_REG(pfd_p, 1) = 737 PCIE_ZALLOC(pf_pcix_ecc_regs_t); 738 } 739 } 740 } else if (PCIE_IS_PCIX(bus_p)) { 741 if (PCIE_IS_BDG(bus_p)) { 742 PCIX_BDG_ERR_REG(pfd_p) = 743 PCIE_ZALLOC(pf_pcix_bdg_err_regs_t); 744 745 if (PCIX_ECC_VERSION_CHECK(bus_p)) { 746 PCIX_BDG_ECC_REG(pfd_p, 0) = 747 PCIE_ZALLOC(pf_pcix_ecc_regs_t); 748 PCIX_BDG_ECC_REG(pfd_p, 1) = 749 PCIE_ZALLOC(pf_pcix_ecc_regs_t); 750 } 751 } else { 752 PCIX_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pcix_err_regs_t); 753 754 if (PCIX_ECC_VERSION_CHECK(bus_p)) 755 PCIX_ECC_REG(pfd_p) = 756 PCIE_ZALLOC(pf_pcix_ecc_regs_t); 757 } 758 } 759 } 760 761 static void 762 pcie_fini_pfd(dev_info_t *dip) 763 { 764 pf_data_t *pfd_p = PCIE_DIP2PFD(dip); 765 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 766 767 if (PCIE_IS_PCIE(bus_p)) { 768 if (PCIE_IS_PCIE_BDG(bus_p) && PCIE_IS_PCIX(bus_p)) { 769 if (PCIX_ECC_VERSION_CHECK(bus_p)) { 770 kmem_free(PCIX_BDG_ECC_REG(pfd_p, 0), 771 sizeof (pf_pcix_ecc_regs_t)); 772 kmem_free(PCIX_BDG_ECC_REG(pfd_p, 1), 773 sizeof (pf_pcix_ecc_regs_t)); 774 } 775 776 kmem_free(PCIX_BDG_ERR_REG(pfd_p), 777 sizeof (pf_pcix_bdg_err_regs_t)); 778 } 779 780 if (PCIE_IS_RP(bus_p)) 781 kmem_free(PCIE_ADV_RP_REG(pfd_p), 782 sizeof (pf_pcie_adv_rp_err_regs_t)); 783 else if (PCIE_IS_PCIE_BDG(bus_p)) 784 kmem_free(PCIE_ADV_BDG_REG(pfd_p), 785 sizeof (pf_pcie_adv_bdg_err_regs_t)); 786 787 kmem_free(PCIE_ADV_REG(pfd_p), 788 sizeof (pf_pcie_adv_err_regs_t)); 789 790 if (PCIE_IS_RP(bus_p)) 791 kmem_free(PCIE_RP_REG(pfd_p), 792 sizeof (pf_pcie_rp_err_regs_t)); 793 794 kmem_free(PCIE_ERR_REG(pfd_p), sizeof (pf_pcie_err_regs_t)); 795 } else if (PCIE_IS_PCIX(bus_p)) { 796 if (PCIE_IS_BDG(bus_p)) { 797 if (PCIX_ECC_VERSION_CHECK(bus_p)) { 798 kmem_free(PCIX_BDG_ECC_REG(pfd_p, 0), 799 sizeof (pf_pcix_ecc_regs_t)); 800 kmem_free(PCIX_BDG_ECC_REG(pfd_p, 1), 801 sizeof (pf_pcix_ecc_regs_t)); 802 } 803 804 kmem_free(PCIX_BDG_ERR_REG(pfd_p), 805 sizeof (pf_pcix_bdg_err_regs_t)); 806 } else { 807 if (PCIX_ECC_VERSION_CHECK(bus_p)) 808 kmem_free(PCIX_ECC_REG(pfd_p), 809 sizeof (pf_pcix_ecc_regs_t)); 810 811 kmem_free(PCIX_ERR_REG(pfd_p), 812 sizeof (pf_pcix_err_regs_t)); 813 } 814 } 815 816 if (PCIE_IS_BDG(bus_p)) 817 kmem_free(PCI_BDG_ERR_REG(pfd_p), 818 sizeof (pf_pci_bdg_err_regs_t)); 819 820 kmem_free(PFD_AFFECTED_DEV(pfd_p), sizeof (pf_affected_dev_t)); 821 kmem_free(PCI_ERR_REG(pfd_p), sizeof (pf_pci_err_regs_t)); 822 823 if (PCIE_IS_ROOT(bus_p)) { 824 kmem_free(PCIE_ROOT_FAULT(pfd_p), sizeof (pf_root_fault_t)); 825 kmem_free(PCIE_ROOT_EH_SRC(pfd_p), sizeof (pf_root_eh_src_t)); 826 } 827 828 kmem_free(PCIE_DIP2PFD(dip), sizeof (pf_data_t)); 829 830 PCIE_DIP2PFD(dip) = NULL; 831 } 832 833 834 /* 835 * Special functions to allocate pf_data_t's for PCIe root complexes. 836 * Note: Root Complex not Root Port 837 */ 838 void 839 pcie_rc_init_pfd(dev_info_t *dip, pf_data_t *pfd_p) 840 { 841 pfd_p->pe_bus_p = PCIE_DIP2DOWNBUS(dip); 842 pfd_p->pe_severity_flags = 0; 843 pfd_p->pe_orig_severity_flags = 0; 844 pfd_p->pe_lock = B_FALSE; 845 pfd_p->pe_valid = B_FALSE; 846 847 PCIE_ROOT_FAULT(pfd_p) = PCIE_ZALLOC(pf_root_fault_t); 848 PCIE_ROOT_FAULT(pfd_p)->scan_bdf = PCIE_INVALID_BDF; 849 PCIE_ROOT_EH_SRC(pfd_p) = PCIE_ZALLOC(pf_root_eh_src_t); 850 PCI_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pci_err_regs_t); 851 PFD_AFFECTED_DEV(pfd_p) = PCIE_ZALLOC(pf_affected_dev_t); 852 PFD_AFFECTED_DEV(pfd_p)->pe_affected_bdf = PCIE_INVALID_BDF; 853 PCI_BDG_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pci_bdg_err_regs_t); 854 PCIE_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_err_regs_t); 855 PCIE_RP_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_rp_err_regs_t); 856 PCIE_ADV_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_adv_err_regs_t); 857 PCIE_ADV_RP_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_adv_rp_err_regs_t); 858 PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ce_src_id = PCIE_INVALID_BDF; 859 PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ue_src_id = PCIE_INVALID_BDF; 860 861 PCIE_ADV_REG(pfd_p)->pcie_ue_sev = pcie_aer_uce_severity; 862 } 863 864 void 865 pcie_rc_fini_pfd(pf_data_t *pfd_p) 866 { 867 kmem_free(PCIE_ADV_RP_REG(pfd_p), sizeof (pf_pcie_adv_rp_err_regs_t)); 868 kmem_free(PCIE_ADV_REG(pfd_p), sizeof (pf_pcie_adv_err_regs_t)); 869 kmem_free(PCIE_RP_REG(pfd_p), sizeof (pf_pcie_rp_err_regs_t)); 870 kmem_free(PCIE_ERR_REG(pfd_p), sizeof (pf_pcie_err_regs_t)); 871 kmem_free(PCI_BDG_ERR_REG(pfd_p), sizeof (pf_pci_bdg_err_regs_t)); 872 kmem_free(PFD_AFFECTED_DEV(pfd_p), sizeof (pf_affected_dev_t)); 873 kmem_free(PCI_ERR_REG(pfd_p), sizeof (pf_pci_err_regs_t)); 874 kmem_free(PCIE_ROOT_FAULT(pfd_p), sizeof (pf_root_fault_t)); 875 kmem_free(PCIE_ROOT_EH_SRC(pfd_p), sizeof (pf_root_eh_src_t)); 876 } 877 878 /* 879 * init pcie_bus_t for root complex 880 * 881 * Only a few of the fields in bus_t is valid for root complex. 882 * The fields that are bracketed are initialized in this routine: 883 * 884 * dev_info_t * <bus_dip> 885 * dev_info_t * bus_rp_dip 886 * ddi_acc_handle_t bus_cfg_hdl 887 * uint_t <bus_fm_flags> 888 * pcie_req_id_t bus_bdf 889 * pcie_req_id_t bus_rp_bdf 890 * uint32_t bus_dev_ven_id 891 * uint8_t bus_rev_id 892 * uint8_t <bus_hdr_type> 893 * uint16_t <bus_dev_type> 894 * uint8_t bus_bdg_secbus 895 * uint16_t bus_pcie_off 896 * uint16_t <bus_aer_off> 897 * uint16_t bus_pcix_off 898 * uint16_t bus_ecc_ver 899 * pci_bus_range_t bus_bus_range 900 * ppb_ranges_t * bus_addr_ranges 901 * int bus_addr_entries 902 * pci_regspec_t * bus_assigned_addr 903 * int bus_assigned_entries 904 * pf_data_t * bus_pfd 905 * pcie_domain_t * <bus_dom> 906 * int bus_mps 907 * uint64_t bus_cfgacc_base 908 * void * bus_plat_private 909 */ 910 void 911 pcie_rc_init_bus(dev_info_t *dip) 912 { 913 pcie_bus_t *bus_p; 914 915 bus_p = (pcie_bus_t *)kmem_zalloc(sizeof (pcie_bus_t), KM_SLEEP); 916 bus_p->bus_dip = dip; 917 bus_p->bus_dev_type = PCIE_PCIECAP_DEV_TYPE_RC_PSEUDO; 918 bus_p->bus_hdr_type = PCI_HEADER_ONE; 919 920 /* Fake that there are AER logs */ 921 bus_p->bus_aer_off = (uint16_t)-1; 922 923 /* Needed only for handle lookup */ 924 bus_p->bus_fm_flags |= PF_FM_READY; 925 926 ndi_set_bus_private(dip, B_FALSE, DEVI_PORT_TYPE_PCI, bus_p); 927 928 PCIE_BUS2DOM(bus_p) = PCIE_ZALLOC(pcie_domain_t); 929 } 930 931 void 932 pcie_rc_fini_bus(dev_info_t *dip) 933 { 934 pcie_bus_t *bus_p = PCIE_DIP2DOWNBUS(dip); 935 ndi_set_bus_private(dip, B_FALSE, 0, NULL); 936 kmem_free(PCIE_BUS2DOM(bus_p), sizeof (pcie_domain_t)); 937 kmem_free(bus_p, sizeof (pcie_bus_t)); 938 } 939 940 /* 941 * partially init pcie_bus_t for device (dip,bdf) for accessing pci 942 * config space 943 * 944 * This routine is invoked during boot, either after creating a devinfo node 945 * (x86 case) or during px driver attach (sparc case); it is also invoked 946 * in hotplug context after a devinfo node is created. 947 * 948 * The fields that are bracketed are initialized if flag PCIE_BUS_INITIAL 949 * is set: 950 * 951 * dev_info_t * <bus_dip> 952 * dev_info_t * <bus_rp_dip> 953 * ddi_acc_handle_t bus_cfg_hdl 954 * uint_t bus_fm_flags 955 * pcie_req_id_t <bus_bdf> 956 * pcie_req_id_t <bus_rp_bdf> 957 * uint32_t <bus_dev_ven_id> 958 * uint8_t <bus_rev_id> 959 * uint8_t <bus_hdr_type> 960 * uint16_t <bus_dev_type> 961 * uint8_t <bus_bdg_secbus 962 * uint16_t <bus_pcie_off> 963 * uint16_t <bus_aer_off> 964 * uint16_t <bus_pcix_off> 965 * uint16_t <bus_ecc_ver> 966 * pci_bus_range_t bus_bus_range 967 * ppb_ranges_t * bus_addr_ranges 968 * int bus_addr_entries 969 * pci_regspec_t * bus_assigned_addr 970 * int bus_assigned_entries 971 * pf_data_t * bus_pfd 972 * pcie_domain_t * bus_dom 973 * int bus_mps 974 * uint64_t bus_cfgacc_base 975 * void * bus_plat_private 976 * 977 * The fields that are bracketed are initialized if flag PCIE_BUS_FINAL 978 * is set: 979 * 980 * dev_info_t * bus_dip 981 * dev_info_t * bus_rp_dip 982 * ddi_acc_handle_t bus_cfg_hdl 983 * uint_t bus_fm_flags 984 * pcie_req_id_t bus_bdf 985 * pcie_req_id_t bus_rp_bdf 986 * uint32_t bus_dev_ven_id 987 * uint8_t bus_rev_id 988 * uint8_t bus_hdr_type 989 * uint16_t bus_dev_type 990 * uint8_t <bus_bdg_secbus> 991 * uint16_t bus_pcie_off 992 * uint16_t bus_aer_off 993 * uint16_t bus_pcix_off 994 * uint16_t bus_ecc_ver 995 * pci_bus_range_t <bus_bus_range> 996 * ppb_ranges_t * <bus_addr_ranges> 997 * int <bus_addr_entries> 998 * pci_regspec_t * <bus_assigned_addr> 999 * int <bus_assigned_entries> 1000 * pf_data_t * <bus_pfd> 1001 * pcie_domain_t * bus_dom 1002 * int bus_mps 1003 * uint64_t bus_cfgacc_base 1004 * void * <bus_plat_private> 1005 */ 1006 1007 pcie_bus_t * 1008 pcie_init_bus(dev_info_t *dip, pcie_req_id_t bdf, uint8_t flags) 1009 { 1010 uint16_t status, base, baseptr, num_cap; 1011 uint32_t capid; 1012 int range_size; 1013 pcie_bus_t *bus_p; 1014 dev_info_t *rcdip; 1015 dev_info_t *pdip; 1016 const char *errstr = NULL; 1017 1018 if (!(flags & PCIE_BUS_INITIAL)) 1019 goto initial_done; 1020 1021 bus_p = kmem_zalloc(sizeof (pcie_bus_t), KM_SLEEP); 1022 1023 bus_p->bus_dip = dip; 1024 bus_p->bus_bdf = bdf; 1025 1026 rcdip = pcie_get_rc_dip(dip); 1027 ASSERT(rcdip != NULL); 1028 1029 /* Save the Vendor ID, Device ID and revision ID */ 1030 bus_p->bus_dev_ven_id = pci_cfgacc_get32(rcdip, bdf, PCI_CONF_VENID); 1031 bus_p->bus_rev_id = pci_cfgacc_get8(rcdip, bdf, PCI_CONF_REVID); 1032 /* Save the Header Type */ 1033 bus_p->bus_hdr_type = pci_cfgacc_get8(rcdip, bdf, PCI_CONF_HEADER); 1034 bus_p->bus_hdr_type &= PCI_HEADER_TYPE_M; 1035 1036 /* 1037 * Figure out the device type and all the relavant capability offsets 1038 */ 1039 /* set default value */ 1040 bus_p->bus_dev_type = PCIE_PCIECAP_DEV_TYPE_PCI_PSEUDO; 1041 1042 status = pci_cfgacc_get16(rcdip, bdf, PCI_CONF_STAT); 1043 if (status == PCI_CAP_EINVAL16 || !(status & PCI_STAT_CAP)) 1044 goto caps_done; /* capability not supported */ 1045 1046 /* Relevant conventional capabilities first */ 1047 1048 /* Conventional caps: PCI_CAP_ID_PCI_E, PCI_CAP_ID_PCIX */ 1049 num_cap = 2; 1050 1051 switch (bus_p->bus_hdr_type) { 1052 case PCI_HEADER_ZERO: 1053 baseptr = PCI_CONF_CAP_PTR; 1054 break; 1055 case PCI_HEADER_PPB: 1056 baseptr = PCI_BCNF_CAP_PTR; 1057 break; 1058 case PCI_HEADER_CARDBUS: 1059 baseptr = PCI_CBUS_CAP_PTR; 1060 break; 1061 default: 1062 cmn_err(CE_WARN, "%s: unexpected pci header type:%x", 1063 __func__, bus_p->bus_hdr_type); 1064 goto caps_done; 1065 } 1066 1067 base = baseptr; 1068 for (base = pci_cfgacc_get8(rcdip, bdf, base); base && num_cap; 1069 base = pci_cfgacc_get8(rcdip, bdf, base + PCI_CAP_NEXT_PTR)) { 1070 capid = pci_cfgacc_get8(rcdip, bdf, base); 1071 switch (capid) { 1072 case PCI_CAP_ID_PCI_E: 1073 bus_p->bus_pcie_off = base; 1074 bus_p->bus_dev_type = pci_cfgacc_get16(rcdip, bdf, 1075 base + PCIE_PCIECAP) & PCIE_PCIECAP_DEV_TYPE_MASK; 1076 1077 /* Check and save PCIe hotplug capability information */ 1078 if ((PCIE_IS_RP(bus_p) || PCIE_IS_SWD(bus_p)) && 1079 (pci_cfgacc_get16(rcdip, bdf, base + PCIE_PCIECAP) 1080 & PCIE_PCIECAP_SLOT_IMPL) && 1081 (pci_cfgacc_get32(rcdip, bdf, base + PCIE_SLOTCAP) 1082 & PCIE_SLOTCAP_HP_CAPABLE)) 1083 bus_p->bus_hp_sup_modes |= PCIE_NATIVE_HP_MODE; 1084 1085 num_cap--; 1086 break; 1087 case PCI_CAP_ID_PCIX: 1088 bus_p->bus_pcix_off = base; 1089 if (PCIE_IS_BDG(bus_p)) 1090 bus_p->bus_ecc_ver = 1091 pci_cfgacc_get16(rcdip, bdf, base + 1092 PCI_PCIX_SEC_STATUS) & PCI_PCIX_VER_MASK; 1093 else 1094 bus_p->bus_ecc_ver = 1095 pci_cfgacc_get16(rcdip, bdf, base + 1096 PCI_PCIX_COMMAND) & PCI_PCIX_VER_MASK; 1097 num_cap--; 1098 break; 1099 default: 1100 break; 1101 } 1102 } 1103 1104 /* Check and save PCI hotplug (SHPC) capability information */ 1105 if (PCIE_IS_BDG(bus_p)) { 1106 base = baseptr; 1107 for (base = pci_cfgacc_get8(rcdip, bdf, base); 1108 base; base = pci_cfgacc_get8(rcdip, bdf, 1109 base + PCI_CAP_NEXT_PTR)) { 1110 capid = pci_cfgacc_get8(rcdip, bdf, base); 1111 if (capid == PCI_CAP_ID_PCI_HOTPLUG) { 1112 bus_p->bus_pci_hp_off = base; 1113 bus_p->bus_hp_sup_modes |= PCIE_PCI_HP_MODE; 1114 break; 1115 } 1116 } 1117 } 1118 1119 /* Then, relevant extended capabilities */ 1120 1121 if (!PCIE_IS_PCIE(bus_p)) 1122 goto caps_done; 1123 1124 /* Extended caps: PCIE_EXT_CAP_ID_AER */ 1125 for (base = PCIE_EXT_CAP; base; base = (capid >> 1126 PCIE_EXT_CAP_NEXT_PTR_SHIFT) & PCIE_EXT_CAP_NEXT_PTR_MASK) { 1127 capid = pci_cfgacc_get32(rcdip, bdf, base); 1128 if (capid == PCI_CAP_EINVAL32) 1129 break; 1130 if (((capid >> PCIE_EXT_CAP_ID_SHIFT) & PCIE_EXT_CAP_ID_MASK) 1131 == PCIE_EXT_CAP_ID_AER) { 1132 bus_p->bus_aer_off = base; 1133 break; 1134 } 1135 } 1136 1137 caps_done: 1138 /* save RP dip and RP bdf */ 1139 if (PCIE_IS_RP(bus_p)) { 1140 bus_p->bus_rp_dip = dip; 1141 bus_p->bus_rp_bdf = bus_p->bus_bdf; 1142 } else { 1143 for (pdip = ddi_get_parent(dip); pdip; 1144 pdip = ddi_get_parent(pdip)) { 1145 pcie_bus_t *parent_bus_p = PCIE_DIP2BUS(pdip); 1146 1147 /* 1148 * If RP dip and RP bdf in parent's bus_t have 1149 * been initialized, simply use these instead of 1150 * continuing up to the RC. 1151 */ 1152 if (parent_bus_p->bus_rp_dip != NULL) { 1153 bus_p->bus_rp_dip = parent_bus_p->bus_rp_dip; 1154 bus_p->bus_rp_bdf = parent_bus_p->bus_rp_bdf; 1155 break; 1156 } 1157 1158 /* 1159 * When debugging be aware that some NVIDIA x86 1160 * architectures have 2 nodes for each RP, One at Bus 1161 * 0x0 and one at Bus 0x80. The requester is from Bus 1162 * 0x80 1163 */ 1164 if (PCIE_IS_ROOT(parent_bus_p)) { 1165 bus_p->bus_rp_dip = pdip; 1166 bus_p->bus_rp_bdf = parent_bus_p->bus_bdf; 1167 break; 1168 } 1169 } 1170 } 1171 1172 bus_p->bus_soft_state = PCI_SOFT_STATE_CLOSED; 1173 bus_p->bus_fm_flags = 0; 1174 bus_p->bus_mps = 0; 1175 1176 ndi_set_bus_private(dip, B_TRUE, DEVI_PORT_TYPE_PCI, (void *)bus_p); 1177 1178 if (PCIE_IS_HOTPLUG_CAPABLE(dip)) 1179 (void) ndi_prop_create_boolean(DDI_DEV_T_NONE, dip, 1180 "hotplug-capable"); 1181 1182 initial_done: 1183 if (!(flags & PCIE_BUS_FINAL)) 1184 goto final_done; 1185 1186 /* already initialized? */ 1187 bus_p = PCIE_DIP2BUS(dip); 1188 1189 /* Save the Range information if device is a switch/bridge */ 1190 if (PCIE_IS_BDG(bus_p)) { 1191 /* get "bus_range" property */ 1192 range_size = sizeof (pci_bus_range_t); 1193 if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1194 "bus-range", (caddr_t)&bus_p->bus_bus_range, &range_size) 1195 != DDI_PROP_SUCCESS) { 1196 errstr = "Cannot find \"bus-range\" property"; 1197 cmn_err(CE_WARN, 1198 "PCIE init err info failed BDF 0x%x:%s\n", 1199 bus_p->bus_bdf, errstr); 1200 } 1201 1202 /* get secondary bus number */ 1203 rcdip = pcie_get_rc_dip(dip); 1204 ASSERT(rcdip != NULL); 1205 1206 bus_p->bus_bdg_secbus = pci_cfgacc_get8(rcdip, 1207 bus_p->bus_bdf, PCI_BCNF_SECBUS); 1208 1209 /* Get "ranges" property */ 1210 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1211 "ranges", (caddr_t)&bus_p->bus_addr_ranges, 1212 &bus_p->bus_addr_entries) != DDI_PROP_SUCCESS) 1213 bus_p->bus_addr_entries = 0; 1214 bus_p->bus_addr_entries /= sizeof (ppb_ranges_t); 1215 } 1216 1217 /* save "assigned-addresses" property array, ignore failues */ 1218 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1219 "assigned-addresses", (caddr_t)&bus_p->bus_assigned_addr, 1220 &bus_p->bus_assigned_entries) == DDI_PROP_SUCCESS) 1221 bus_p->bus_assigned_entries /= sizeof (pci_regspec_t); 1222 else 1223 bus_p->bus_assigned_entries = 0; 1224 1225 pcie_init_pfd(dip); 1226 1227 pcie_init_plat(dip); 1228 1229 final_done: 1230 1231 PCIE_DBG("Add %s(dip 0x%p, bdf 0x%x, secbus 0x%x)\n", 1232 ddi_driver_name(dip), (void *)dip, bus_p->bus_bdf, 1233 bus_p->bus_bdg_secbus); 1234 #ifdef DEBUG 1235 pcie_print_bus(bus_p); 1236 #endif 1237 1238 return (bus_p); 1239 } 1240 1241 /* 1242 * Invoked before destroying devinfo node, mostly during hotplug 1243 * operation to free pcie_bus_t data structure 1244 */ 1245 /* ARGSUSED */ 1246 void 1247 pcie_fini_bus(dev_info_t *dip, uint8_t flags) 1248 { 1249 pcie_bus_t *bus_p = PCIE_DIP2UPBUS(dip); 1250 ASSERT(bus_p); 1251 1252 if (flags & PCIE_BUS_INITIAL) { 1253 pcie_fini_plat(dip); 1254 pcie_fini_pfd(dip); 1255 1256 kmem_free(bus_p->bus_assigned_addr, 1257 (sizeof (pci_regspec_t) * bus_p->bus_assigned_entries)); 1258 kmem_free(bus_p->bus_addr_ranges, 1259 (sizeof (ppb_ranges_t) * bus_p->bus_addr_entries)); 1260 /* zero out the fields that have been destroyed */ 1261 bus_p->bus_assigned_addr = NULL; 1262 bus_p->bus_addr_ranges = NULL; 1263 bus_p->bus_assigned_entries = 0; 1264 bus_p->bus_addr_entries = 0; 1265 } 1266 1267 if (flags & PCIE_BUS_FINAL) { 1268 if (PCIE_IS_HOTPLUG_CAPABLE(dip)) { 1269 (void) ndi_prop_remove(DDI_DEV_T_NONE, dip, 1270 "hotplug-capable"); 1271 } 1272 1273 ndi_set_bus_private(dip, B_TRUE, 0, NULL); 1274 kmem_free(bus_p, sizeof (pcie_bus_t)); 1275 } 1276 } 1277 1278 int 1279 pcie_postattach_child(dev_info_t *cdip) 1280 { 1281 pcie_bus_t *bus_p = PCIE_DIP2BUS(cdip); 1282 1283 if (!bus_p) 1284 return (DDI_FAILURE); 1285 1286 return (pcie_enable_ce(cdip)); 1287 } 1288 1289 /* 1290 * PCI-Express child device de-initialization. 1291 * This function disables generic pci-express interrupts and error 1292 * handling. 1293 */ 1294 void 1295 pcie_uninitchild(dev_info_t *cdip) 1296 { 1297 pcie_disable_errors(cdip); 1298 pcie_fini_cfghdl(cdip); 1299 pcie_fini_dom(cdip); 1300 } 1301 1302 /* 1303 * find the root complex dip 1304 */ 1305 dev_info_t * 1306 pcie_get_rc_dip(dev_info_t *dip) 1307 { 1308 dev_info_t *rcdip; 1309 pcie_bus_t *rc_bus_p; 1310 1311 for (rcdip = ddi_get_parent(dip); rcdip; 1312 rcdip = ddi_get_parent(rcdip)) { 1313 rc_bus_p = PCIE_DIP2BUS(rcdip); 1314 if (rc_bus_p && PCIE_IS_RC(rc_bus_p)) 1315 break; 1316 } 1317 1318 return (rcdip); 1319 } 1320 1321 static boolean_t 1322 pcie_is_pci_device(dev_info_t *dip) 1323 { 1324 dev_info_t *pdip; 1325 char *device_type; 1326 1327 pdip = ddi_get_parent(dip); 1328 ASSERT(pdip); 1329 1330 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip, DDI_PROP_DONTPASS, 1331 "device_type", &device_type) != DDI_PROP_SUCCESS) 1332 return (B_FALSE); 1333 1334 if (strcmp(device_type, "pciex") != 0 && 1335 strcmp(device_type, "pci") != 0) { 1336 ddi_prop_free(device_type); 1337 return (B_FALSE); 1338 } 1339 1340 ddi_prop_free(device_type); 1341 return (B_TRUE); 1342 } 1343 1344 typedef struct { 1345 boolean_t init; 1346 uint8_t flags; 1347 } pcie_bus_arg_t; 1348 1349 /*ARGSUSED*/ 1350 static int 1351 pcie_fab_do_init_fini(dev_info_t *dip, void *arg) 1352 { 1353 pcie_req_id_t bdf; 1354 pcie_bus_arg_t *bus_arg = (pcie_bus_arg_t *)arg; 1355 1356 if (!pcie_is_pci_device(dip)) 1357 goto out; 1358 1359 if (bus_arg->init) { 1360 if (pcie_get_bdf_from_dip(dip, &bdf) != DDI_SUCCESS) 1361 goto out; 1362 1363 (void) pcie_init_bus(dip, bdf, bus_arg->flags); 1364 } else { 1365 (void) pcie_fini_bus(dip, bus_arg->flags); 1366 } 1367 1368 return (DDI_WALK_CONTINUE); 1369 1370 out: 1371 return (DDI_WALK_PRUNECHILD); 1372 } 1373 1374 void 1375 pcie_fab_init_bus(dev_info_t *rcdip, uint8_t flags) 1376 { 1377 int circular_count; 1378 dev_info_t *dip = ddi_get_child(rcdip); 1379 pcie_bus_arg_t arg; 1380 1381 arg.init = B_TRUE; 1382 arg.flags = flags; 1383 1384 ndi_devi_enter(rcdip, &circular_count); 1385 ddi_walk_devs(dip, pcie_fab_do_init_fini, &arg); 1386 ndi_devi_exit(rcdip, circular_count); 1387 } 1388 1389 void 1390 pcie_fab_fini_bus(dev_info_t *rcdip, uint8_t flags) 1391 { 1392 int circular_count; 1393 dev_info_t *dip = ddi_get_child(rcdip); 1394 pcie_bus_arg_t arg; 1395 1396 arg.init = B_FALSE; 1397 arg.flags = flags; 1398 1399 ndi_devi_enter(rcdip, &circular_count); 1400 ddi_walk_devs(dip, pcie_fab_do_init_fini, &arg); 1401 ndi_devi_exit(rcdip, circular_count); 1402 } 1403 1404 void 1405 pcie_enable_errors(dev_info_t *dip) 1406 { 1407 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 1408 uint16_t reg16, tmp16; 1409 uint32_t reg32, tmp32; 1410 1411 ASSERT(bus_p); 1412 1413 /* 1414 * Clear any pending errors 1415 */ 1416 pcie_clear_errors(dip); 1417 1418 if (!PCIE_IS_PCIE(bus_p)) 1419 return; 1420 1421 /* 1422 * Enable Baseline Error Handling but leave CE reporting off (poweron 1423 * default). 1424 */ 1425 if ((reg16 = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL)) != 1426 PCI_CAP_EINVAL16) { 1427 tmp16 = (reg16 & (PCIE_DEVCTL_MAX_READ_REQ_MASK | 1428 PCIE_DEVCTL_MAX_PAYLOAD_MASK)) | 1429 (pcie_devctl_default & ~(PCIE_DEVCTL_MAX_READ_REQ_MASK | 1430 PCIE_DEVCTL_MAX_PAYLOAD_MASK)) | 1431 (pcie_base_err_default & (~PCIE_DEVCTL_CE_REPORTING_EN)); 1432 1433 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, tmp16); 1434 PCIE_DBG_CAP(dip, bus_p, "DEVCTL", 16, PCIE_DEVCTL, reg16); 1435 } 1436 1437 /* Enable Root Port Baseline Error Receiving */ 1438 if (PCIE_IS_ROOT(bus_p) && 1439 (reg16 = PCIE_CAP_GET(16, bus_p, PCIE_ROOTCTL)) != 1440 PCI_CAP_EINVAL16) { 1441 1442 tmp16 = pcie_serr_disable_flag ? 1443 (pcie_root_ctrl_default & ~PCIE_ROOT_SYS_ERR) : 1444 pcie_root_ctrl_default; 1445 PCIE_CAP_PUT(16, bus_p, PCIE_ROOTCTL, tmp16); 1446 PCIE_DBG_CAP(dip, bus_p, "ROOT DEVCTL", 16, PCIE_ROOTCTL, 1447 reg16); 1448 } 1449 1450 /* 1451 * Enable PCI-Express Advanced Error Handling if Exists 1452 */ 1453 if (!PCIE_HAS_AER(bus_p)) 1454 return; 1455 1456 /* Set Uncorrectable Severity */ 1457 if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_UCE_SERV)) != 1458 PCI_CAP_EINVAL32) { 1459 tmp32 = pcie_aer_uce_severity; 1460 1461 PCIE_AER_PUT(32, bus_p, PCIE_AER_UCE_SERV, tmp32); 1462 PCIE_DBG_AER(dip, bus_p, "AER UCE SEV", 32, PCIE_AER_UCE_SERV, 1463 reg32); 1464 } 1465 1466 /* Enable Uncorrectable errors */ 1467 if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_UCE_MASK)) != 1468 PCI_CAP_EINVAL32) { 1469 tmp32 = pcie_aer_uce_mask; 1470 1471 PCIE_AER_PUT(32, bus_p, PCIE_AER_UCE_MASK, tmp32); 1472 PCIE_DBG_AER(dip, bus_p, "AER UCE MASK", 32, PCIE_AER_UCE_MASK, 1473 reg32); 1474 } 1475 1476 /* Enable ECRC generation and checking */ 1477 if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_CTL)) != 1478 PCI_CAP_EINVAL32) { 1479 tmp32 = reg32 | pcie_ecrc_value; 1480 PCIE_AER_PUT(32, bus_p, PCIE_AER_CTL, tmp32); 1481 PCIE_DBG_AER(dip, bus_p, "AER CTL", 32, PCIE_AER_CTL, reg32); 1482 } 1483 1484 /* Enable Secondary Uncorrectable errors if this is a bridge */ 1485 if (!PCIE_IS_PCIE_BDG(bus_p)) 1486 goto root; 1487 1488 /* Set Uncorrectable Severity */ 1489 if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_SUCE_SERV)) != 1490 PCI_CAP_EINVAL32) { 1491 tmp32 = pcie_aer_suce_severity; 1492 1493 PCIE_AER_PUT(32, bus_p, PCIE_AER_SUCE_SERV, tmp32); 1494 PCIE_DBG_AER(dip, bus_p, "AER SUCE SEV", 32, PCIE_AER_SUCE_SERV, 1495 reg32); 1496 } 1497 1498 if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_SUCE_MASK)) != 1499 PCI_CAP_EINVAL32) { 1500 PCIE_AER_PUT(32, bus_p, PCIE_AER_SUCE_MASK, pcie_aer_suce_mask); 1501 PCIE_DBG_AER(dip, bus_p, "AER SUCE MASK", 32, 1502 PCIE_AER_SUCE_MASK, reg32); 1503 } 1504 1505 root: 1506 /* 1507 * Enable Root Control this is a Root device 1508 */ 1509 if (!PCIE_IS_ROOT(bus_p)) 1510 return; 1511 1512 if ((reg16 = PCIE_AER_GET(16, bus_p, PCIE_AER_RE_CMD)) != 1513 PCI_CAP_EINVAL16) { 1514 PCIE_AER_PUT(16, bus_p, PCIE_AER_RE_CMD, 1515 pcie_root_error_cmd_default); 1516 PCIE_DBG_AER(dip, bus_p, "AER Root Err Cmd", 16, 1517 PCIE_AER_RE_CMD, reg16); 1518 } 1519 } 1520 1521 /* 1522 * This function is used for enabling CE reporting and setting the AER CE mask. 1523 * When called from outside the pcie module it should always be preceded by 1524 * a call to pcie_enable_errors. 1525 */ 1526 int 1527 pcie_enable_ce(dev_info_t *dip) 1528 { 1529 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 1530 uint16_t device_sts, device_ctl; 1531 uint32_t tmp_pcie_aer_ce_mask; 1532 1533 if (!PCIE_IS_PCIE(bus_p)) 1534 return (DDI_SUCCESS); 1535 1536 /* 1537 * The "pcie_ce_mask" property is used to control both the CE reporting 1538 * enable field in the device control register and the AER CE mask. We 1539 * leave CE reporting disabled if pcie_ce_mask is set to -1. 1540 */ 1541 1542 tmp_pcie_aer_ce_mask = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, dip, 1543 DDI_PROP_DONTPASS, "pcie_ce_mask", pcie_aer_ce_mask); 1544 1545 if (tmp_pcie_aer_ce_mask == (uint32_t)-1) { 1546 /* 1547 * Nothing to do since CE reporting has already been disabled. 1548 */ 1549 return (DDI_SUCCESS); 1550 } 1551 1552 if (PCIE_HAS_AER(bus_p)) { 1553 /* Enable AER CE */ 1554 PCIE_AER_PUT(32, bus_p, PCIE_AER_CE_MASK, tmp_pcie_aer_ce_mask); 1555 PCIE_DBG_AER(dip, bus_p, "AER CE MASK", 32, PCIE_AER_CE_MASK, 1556 0); 1557 1558 /* Clear any pending AER CE errors */ 1559 PCIE_AER_PUT(32, bus_p, PCIE_AER_CE_STS, -1); 1560 } 1561 1562 /* clear any pending CE errors */ 1563 if ((device_sts = PCIE_CAP_GET(16, bus_p, PCIE_DEVSTS)) != 1564 PCI_CAP_EINVAL16) 1565 PCIE_CAP_PUT(16, bus_p, PCIE_DEVSTS, 1566 device_sts & (~PCIE_DEVSTS_CE_DETECTED)); 1567 1568 /* Enable CE reporting */ 1569 device_ctl = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL); 1570 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, 1571 (device_ctl & (~PCIE_DEVCTL_ERR_MASK)) | pcie_base_err_default); 1572 PCIE_DBG_CAP(dip, bus_p, "DEVCTL", 16, PCIE_DEVCTL, device_ctl); 1573 1574 return (DDI_SUCCESS); 1575 } 1576 1577 /* ARGSUSED */ 1578 void 1579 pcie_disable_errors(dev_info_t *dip) 1580 { 1581 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 1582 uint16_t device_ctl; 1583 uint32_t aer_reg; 1584 1585 if (!PCIE_IS_PCIE(bus_p)) 1586 return; 1587 1588 /* 1589 * Disable PCI-Express Baseline Error Handling 1590 */ 1591 device_ctl = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL); 1592 device_ctl &= ~PCIE_DEVCTL_ERR_MASK; 1593 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, device_ctl); 1594 1595 /* 1596 * Disable PCI-Express Advanced Error Handling if Exists 1597 */ 1598 if (!PCIE_HAS_AER(bus_p)) 1599 goto root; 1600 1601 /* Disable Uncorrectable errors */ 1602 PCIE_AER_PUT(32, bus_p, PCIE_AER_UCE_MASK, PCIE_AER_UCE_BITS); 1603 1604 /* Disable Correctable errors */ 1605 PCIE_AER_PUT(32, bus_p, PCIE_AER_CE_MASK, PCIE_AER_CE_BITS); 1606 1607 /* Disable ECRC generation and checking */ 1608 if ((aer_reg = PCIE_AER_GET(32, bus_p, PCIE_AER_CTL)) != 1609 PCI_CAP_EINVAL32) { 1610 aer_reg &= ~(PCIE_AER_CTL_ECRC_GEN_ENA | 1611 PCIE_AER_CTL_ECRC_CHECK_ENA); 1612 1613 PCIE_AER_PUT(32, bus_p, PCIE_AER_CTL, aer_reg); 1614 } 1615 /* 1616 * Disable Secondary Uncorrectable errors if this is a bridge 1617 */ 1618 if (!PCIE_IS_PCIE_BDG(bus_p)) 1619 goto root; 1620 1621 PCIE_AER_PUT(32, bus_p, PCIE_AER_SUCE_MASK, PCIE_AER_SUCE_BITS); 1622 1623 root: 1624 /* 1625 * disable Root Control this is a Root device 1626 */ 1627 if (!PCIE_IS_ROOT(bus_p)) 1628 return; 1629 1630 if (!pcie_serr_disable_flag) { 1631 device_ctl = PCIE_CAP_GET(16, bus_p, PCIE_ROOTCTL); 1632 device_ctl &= ~PCIE_ROOT_SYS_ERR; 1633 PCIE_CAP_PUT(16, bus_p, PCIE_ROOTCTL, device_ctl); 1634 } 1635 1636 if (!PCIE_HAS_AER(bus_p)) 1637 return; 1638 1639 if ((device_ctl = PCIE_CAP_GET(16, bus_p, PCIE_AER_RE_CMD)) != 1640 PCI_CAP_EINVAL16) { 1641 device_ctl &= ~pcie_root_error_cmd_default; 1642 PCIE_CAP_PUT(16, bus_p, PCIE_AER_RE_CMD, device_ctl); 1643 } 1644 } 1645 1646 /* 1647 * Extract bdf from "reg" property. 1648 */ 1649 int 1650 pcie_get_bdf_from_dip(dev_info_t *dip, pcie_req_id_t *bdf) 1651 { 1652 pci_regspec_t *regspec; 1653 int reglen; 1654 1655 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1656 "reg", (int **)®spec, (uint_t *)®len) != DDI_SUCCESS) 1657 return (DDI_FAILURE); 1658 1659 if (reglen < (sizeof (pci_regspec_t) / sizeof (int))) { 1660 ddi_prop_free(regspec); 1661 return (DDI_FAILURE); 1662 } 1663 1664 /* Get phys_hi from first element. All have same bdf. */ 1665 *bdf = (regspec->pci_phys_hi & (PCI_REG_BDFR_M ^ PCI_REG_REG_M)) >> 8; 1666 1667 ddi_prop_free(regspec); 1668 return (DDI_SUCCESS); 1669 } 1670 1671 dev_info_t * 1672 pcie_get_my_childs_dip(dev_info_t *dip, dev_info_t *rdip) 1673 { 1674 dev_info_t *cdip = rdip; 1675 1676 for (; ddi_get_parent(cdip) != dip; cdip = ddi_get_parent(cdip)) 1677 ; 1678 1679 return (cdip); 1680 } 1681 1682 uint32_t 1683 pcie_get_bdf_for_dma_xfer(dev_info_t *dip, dev_info_t *rdip) 1684 { 1685 dev_info_t *cdip; 1686 1687 /* 1688 * As part of the probing, the PCI fcode interpreter may setup a DMA 1689 * request if a given card has a fcode on it using dip and rdip of the 1690 * hotplug connector i.e, dip and rdip of px/pcieb driver. In this 1691 * case, return a invalid value for the bdf since we cannot get to the 1692 * bdf value of the actual device which will be initiating this DMA. 1693 */ 1694 if (rdip == dip) 1695 return (PCIE_INVALID_BDF); 1696 1697 cdip = pcie_get_my_childs_dip(dip, rdip); 1698 1699 /* 1700 * For a given rdip, return the bdf value of dip's (px or pcieb) 1701 * immediate child or secondary bus-id if dip is a PCIe2PCI bridge. 1702 * 1703 * XXX - For now, return a invalid bdf value for all PCI and PCI-X 1704 * devices since this needs more work. 1705 */ 1706 return (PCI_GET_PCIE2PCI_SECBUS(cdip) ? 1707 PCIE_INVALID_BDF : PCI_GET_BDF(cdip)); 1708 } 1709 1710 uint32_t 1711 pcie_get_aer_uce_mask() 1712 { 1713 return (pcie_aer_uce_mask); 1714 } 1715 uint32_t 1716 pcie_get_aer_ce_mask() 1717 { 1718 return (pcie_aer_ce_mask); 1719 } 1720 uint32_t 1721 pcie_get_aer_suce_mask() 1722 { 1723 return (pcie_aer_suce_mask); 1724 } 1725 uint32_t 1726 pcie_get_serr_mask() 1727 { 1728 return (pcie_serr_disable_flag); 1729 } 1730 1731 void 1732 pcie_set_aer_uce_mask(uint32_t mask) 1733 { 1734 pcie_aer_uce_mask = mask; 1735 if (mask & PCIE_AER_UCE_UR) 1736 pcie_base_err_default &= ~PCIE_DEVCTL_UR_REPORTING_EN; 1737 else 1738 pcie_base_err_default |= PCIE_DEVCTL_UR_REPORTING_EN; 1739 1740 if (mask & PCIE_AER_UCE_ECRC) 1741 pcie_ecrc_value = 0; 1742 } 1743 1744 void 1745 pcie_set_aer_ce_mask(uint32_t mask) 1746 { 1747 pcie_aer_ce_mask = mask; 1748 } 1749 void 1750 pcie_set_aer_suce_mask(uint32_t mask) 1751 { 1752 pcie_aer_suce_mask = mask; 1753 } 1754 void 1755 pcie_set_serr_mask(uint32_t mask) 1756 { 1757 pcie_serr_disable_flag = mask; 1758 } 1759 1760 /* 1761 * Is the rdip a child of dip. Used for checking certain CTLOPS from bubbling 1762 * up erronously. Ex. ISA ctlops to a PCI-PCI Bridge. 1763 */ 1764 boolean_t 1765 pcie_is_child(dev_info_t *dip, dev_info_t *rdip) 1766 { 1767 dev_info_t *cdip = ddi_get_child(dip); 1768 for (; cdip; cdip = ddi_get_next_sibling(cdip)) 1769 if (cdip == rdip) 1770 break; 1771 return (cdip != NULL); 1772 } 1773 1774 boolean_t 1775 pcie_is_link_disabled(dev_info_t *dip) 1776 { 1777 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 1778 1779 if (PCIE_IS_PCIE(bus_p)) { 1780 if (PCIE_CAP_GET(16, bus_p, PCIE_LINKCTL) & 1781 PCIE_LINKCTL_LINK_DISABLE) 1782 return (B_TRUE); 1783 } 1784 return (B_FALSE); 1785 } 1786 1787 /* 1788 * Initialize the MPS for a root port. 1789 * 1790 * dip - dip of root port device. 1791 */ 1792 void 1793 pcie_init_root_port_mps(dev_info_t *dip) 1794 { 1795 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 1796 int rp_cap, max_supported = pcie_max_mps; 1797 1798 (void) pcie_get_fabric_mps(ddi_get_parent(dip), 1799 ddi_get_child(dip), &max_supported); 1800 1801 rp_cap = PCI_CAP_GET16(bus_p->bus_cfg_hdl, 0, 1802 bus_p->bus_pcie_off, PCIE_DEVCAP) & 1803 PCIE_DEVCAP_MAX_PAYLOAD_MASK; 1804 1805 if (rp_cap < max_supported) 1806 max_supported = rp_cap; 1807 1808 bus_p->bus_mps = max_supported; 1809 (void) pcie_initchild_mps(dip); 1810 } 1811 1812 /* 1813 * Initialize the Maximum Payload Size of a device. 1814 * 1815 * cdip - dip of device. 1816 * 1817 * returns - DDI_SUCCESS or DDI_FAILURE 1818 */ 1819 int 1820 pcie_initchild_mps(dev_info_t *cdip) 1821 { 1822 pcie_bus_t *bus_p; 1823 dev_info_t *pdip = ddi_get_parent(cdip); 1824 uint8_t dev_type; 1825 1826 bus_p = PCIE_DIP2BUS(cdip); 1827 if (bus_p == NULL) { 1828 PCIE_DBG("%s: BUS not found.\n", 1829 ddi_driver_name(cdip)); 1830 return (DDI_FAILURE); 1831 } 1832 1833 dev_type = bus_p->bus_dev_type; 1834 1835 /* 1836 * For ARI Devices, only function zero's MPS needs to be set. 1837 */ 1838 if ((dev_type == PCIE_PCIECAP_DEV_TYPE_PCIE_DEV) && 1839 (pcie_ari_is_enabled(pdip) == PCIE_ARI_FORW_ENABLED)) { 1840 pcie_req_id_t child_bdf; 1841 1842 if (pcie_get_bdf_from_dip(cdip, &child_bdf) == DDI_FAILURE) 1843 return (DDI_FAILURE); 1844 if ((child_bdf & PCIE_REQ_ID_ARI_FUNC_MASK) != 0) 1845 return (DDI_SUCCESS); 1846 } 1847 1848 if (PCIE_IS_PCIE(bus_p)) { 1849 int suggested_mrrs, fabric_mps; 1850 uint16_t device_mps, device_mps_cap, device_mrrs, dev_ctrl; 1851 1852 dev_ctrl = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL); 1853 if ((fabric_mps = (PCIE_IS_RP(bus_p) ? bus_p : 1854 PCIE_DIP2BUS(pdip))->bus_mps) < 0) { 1855 dev_ctrl = (dev_ctrl & ~(PCIE_DEVCTL_MAX_READ_REQ_MASK | 1856 PCIE_DEVCTL_MAX_PAYLOAD_MASK)) | 1857 (pcie_devctl_default & 1858 (PCIE_DEVCTL_MAX_READ_REQ_MASK | 1859 PCIE_DEVCTL_MAX_PAYLOAD_MASK)); 1860 1861 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, dev_ctrl); 1862 return (DDI_SUCCESS); 1863 } 1864 1865 device_mps_cap = PCIE_CAP_GET(16, bus_p, PCIE_DEVCAP) & 1866 PCIE_DEVCAP_MAX_PAYLOAD_MASK; 1867 1868 device_mrrs = (dev_ctrl & PCIE_DEVCTL_MAX_READ_REQ_MASK) >> 1869 PCIE_DEVCTL_MAX_READ_REQ_SHIFT; 1870 1871 if (device_mps_cap < fabric_mps) 1872 device_mrrs = device_mps = device_mps_cap; 1873 else 1874 device_mps = (uint16_t)fabric_mps; 1875 1876 suggested_mrrs = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, 1877 cdip, DDI_PROP_DONTPASS, "suggested-mrrs", device_mrrs); 1878 1879 if ((device_mps == fabric_mps) || 1880 (suggested_mrrs < device_mrrs)) 1881 device_mrrs = (uint16_t)suggested_mrrs; 1882 1883 /* 1884 * Replace MPS and MRRS settings. 1885 */ 1886 dev_ctrl &= ~(PCIE_DEVCTL_MAX_READ_REQ_MASK | 1887 PCIE_DEVCTL_MAX_PAYLOAD_MASK); 1888 1889 dev_ctrl |= ((device_mrrs << PCIE_DEVCTL_MAX_READ_REQ_SHIFT) | 1890 device_mps << PCIE_DEVCTL_MAX_PAYLOAD_SHIFT); 1891 1892 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, dev_ctrl); 1893 1894 bus_p->bus_mps = device_mps; 1895 } 1896 1897 return (DDI_SUCCESS); 1898 } 1899 1900 /* 1901 * Scans a device tree/branch for a maximum payload size capabilities. 1902 * 1903 * rc_dip - dip of Root Complex. 1904 * dip - dip of device where scan will begin. 1905 * max_supported (IN) - maximum allowable MPS. 1906 * max_supported (OUT) - maximum payload size capability of fabric. 1907 */ 1908 void 1909 pcie_get_fabric_mps(dev_info_t *rc_dip, dev_info_t *dip, int *max_supported) 1910 { 1911 if (dip == NULL) 1912 return; 1913 1914 /* 1915 * Perform a fabric scan to obtain Maximum Payload Capabilities 1916 */ 1917 (void) pcie_scan_mps(rc_dip, dip, max_supported); 1918 1919 PCIE_DBG("MPS: Highest Common MPS= %x\n", max_supported); 1920 } 1921 1922 /* 1923 * Scans fabric and determines Maximum Payload Size based on 1924 * highest common denominator alogorithm 1925 */ 1926 static void 1927 pcie_scan_mps(dev_info_t *rc_dip, dev_info_t *dip, int *max_supported) 1928 { 1929 int circular_count; 1930 pcie_max_supported_t max_pay_load_supported; 1931 1932 max_pay_load_supported.dip = rc_dip; 1933 max_pay_load_supported.highest_common_mps = *max_supported; 1934 1935 ndi_devi_enter(ddi_get_parent(dip), &circular_count); 1936 ddi_walk_devs(dip, pcie_get_max_supported, 1937 (void *)&max_pay_load_supported); 1938 ndi_devi_exit(ddi_get_parent(dip), circular_count); 1939 1940 *max_supported = max_pay_load_supported.highest_common_mps; 1941 } 1942 1943 /* 1944 * Called as part of the Maximum Payload Size scan. 1945 */ 1946 static int 1947 pcie_get_max_supported(dev_info_t *dip, void *arg) 1948 { 1949 uint32_t max_supported; 1950 uint16_t cap_ptr; 1951 pcie_max_supported_t *current = (pcie_max_supported_t *)arg; 1952 pci_regspec_t *reg; 1953 int rlen; 1954 caddr_t virt; 1955 ddi_acc_handle_t config_handle; 1956 1957 if (ddi_get_child(current->dip) == NULL) { 1958 goto fail1; 1959 } 1960 1961 if (pcie_dev(dip) == DDI_FAILURE) { 1962 PCIE_DBG("MPS: pcie_get_max_supported: %s: " 1963 "Not a PCIe dev\n", ddi_driver_name(dip)); 1964 goto fail1; 1965 } 1966 1967 /* 1968 * If the suggested-mrrs property exists, then don't include this 1969 * device in the MPS capabilities scan. 1970 */ 1971 if (ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1972 "suggested-mrrs") != 0) 1973 goto fail1; 1974 1975 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "reg", 1976 (caddr_t)®, &rlen) != DDI_PROP_SUCCESS) { 1977 PCIE_DBG("MPS: pcie_get_max_supported: %s: " 1978 "Can not read reg\n", ddi_driver_name(dip)); 1979 goto fail1; 1980 } 1981 1982 if (pcie_map_phys(ddi_get_child(current->dip), reg, &virt, 1983 &config_handle) != DDI_SUCCESS) { 1984 PCIE_DBG("MPS: pcie_get_max_supported: %s: pcie_map_phys " 1985 "failed\n", ddi_driver_name(dip)); 1986 goto fail2; 1987 } 1988 1989 if ((PCI_CAP_LOCATE(config_handle, PCI_CAP_ID_PCI_E, &cap_ptr)) == 1990 DDI_FAILURE) { 1991 goto fail3; 1992 } 1993 1994 max_supported = PCI_CAP_GET16(config_handle, 0, cap_ptr, 1995 PCIE_DEVCAP) & PCIE_DEVCAP_MAX_PAYLOAD_MASK; 1996 1997 PCIE_DBG("PCIE MPS: %s: MPS Capabilities %x\n", ddi_driver_name(dip), 1998 max_supported); 1999 2000 if (max_supported < current->highest_common_mps) 2001 current->highest_common_mps = max_supported; 2002 2003 fail3: 2004 pcie_unmap_phys(&config_handle, reg); 2005 fail2: 2006 kmem_free(reg, rlen); 2007 fail1: 2008 return (DDI_WALK_CONTINUE); 2009 } 2010 2011 /* 2012 * Determines if there are any root ports attached to a root complex. 2013 * 2014 * dip - dip of root complex 2015 * 2016 * Returns - DDI_SUCCESS if there is at least one root port otherwise 2017 * DDI_FAILURE. 2018 */ 2019 int 2020 pcie_root_port(dev_info_t *dip) 2021 { 2022 int port_type; 2023 uint16_t cap_ptr; 2024 ddi_acc_handle_t config_handle; 2025 dev_info_t *cdip = ddi_get_child(dip); 2026 2027 /* 2028 * Determine if any of the children of the passed in dip 2029 * are root ports. 2030 */ 2031 for (; cdip; cdip = ddi_get_next_sibling(cdip)) { 2032 2033 if (pci_config_setup(cdip, &config_handle) != DDI_SUCCESS) 2034 continue; 2035 2036 if ((PCI_CAP_LOCATE(config_handle, PCI_CAP_ID_PCI_E, 2037 &cap_ptr)) == DDI_FAILURE) { 2038 pci_config_teardown(&config_handle); 2039 continue; 2040 } 2041 2042 port_type = PCI_CAP_GET16(config_handle, 0, cap_ptr, 2043 PCIE_PCIECAP) & PCIE_PCIECAP_DEV_TYPE_MASK; 2044 2045 pci_config_teardown(&config_handle); 2046 2047 if (port_type == PCIE_PCIECAP_DEV_TYPE_ROOT) 2048 return (DDI_SUCCESS); 2049 } 2050 2051 /* No root ports were found */ 2052 2053 return (DDI_FAILURE); 2054 } 2055 2056 /* 2057 * Function that determines if a device a PCIe device. 2058 * 2059 * dip - dip of device. 2060 * 2061 * returns - DDI_SUCCESS if device is a PCIe device, otherwise DDI_FAILURE. 2062 */ 2063 int 2064 pcie_dev(dev_info_t *dip) 2065 { 2066 /* get parent device's device_type property */ 2067 char *device_type; 2068 int rc = DDI_FAILURE; 2069 dev_info_t *pdip = ddi_get_parent(dip); 2070 2071 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip, 2072 DDI_PROP_DONTPASS, "device_type", &device_type) 2073 != DDI_PROP_SUCCESS) { 2074 return (DDI_FAILURE); 2075 } 2076 2077 if (strcmp(device_type, "pciex") == 0) 2078 rc = DDI_SUCCESS; 2079 else 2080 rc = DDI_FAILURE; 2081 2082 ddi_prop_free(device_type); 2083 return (rc); 2084 } 2085 2086 /* 2087 * Function to map in a device's memory space. 2088 */ 2089 static int 2090 pcie_map_phys(dev_info_t *dip, pci_regspec_t *phys_spec, 2091 caddr_t *addrp, ddi_acc_handle_t *handlep) 2092 { 2093 ddi_map_req_t mr; 2094 ddi_acc_hdl_t *hp; 2095 int result; 2096 ddi_device_acc_attr_t attr; 2097 2098 attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 2099 attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 2100 attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 2101 attr.devacc_attr_access = DDI_CAUTIOUS_ACC; 2102 2103 *handlep = impl_acc_hdl_alloc(KM_SLEEP, NULL); 2104 hp = impl_acc_hdl_get(*handlep); 2105 hp->ah_vers = VERS_ACCHDL; 2106 hp->ah_dip = dip; 2107 hp->ah_rnumber = 0; 2108 hp->ah_offset = 0; 2109 hp->ah_len = 0; 2110 hp->ah_acc = attr; 2111 2112 mr.map_op = DDI_MO_MAP_LOCKED; 2113 mr.map_type = DDI_MT_REGSPEC; 2114 mr.map_obj.rp = (struct regspec *)phys_spec; 2115 mr.map_prot = PROT_READ | PROT_WRITE; 2116 mr.map_flags = DDI_MF_KERNEL_MAPPING; 2117 mr.map_handlep = hp; 2118 mr.map_vers = DDI_MAP_VERSION; 2119 2120 result = ddi_map(dip, &mr, 0, 0, addrp); 2121 2122 if (result != DDI_SUCCESS) { 2123 impl_acc_hdl_free(*handlep); 2124 *handlep = (ddi_acc_handle_t)NULL; 2125 } else { 2126 hp->ah_addr = *addrp; 2127 } 2128 2129 return (result); 2130 } 2131 2132 /* 2133 * Map out memory that was mapped in with pcie_map_phys(); 2134 */ 2135 static void 2136 pcie_unmap_phys(ddi_acc_handle_t *handlep, pci_regspec_t *ph) 2137 { 2138 ddi_map_req_t mr; 2139 ddi_acc_hdl_t *hp; 2140 2141 hp = impl_acc_hdl_get(*handlep); 2142 ASSERT(hp); 2143 2144 mr.map_op = DDI_MO_UNMAP; 2145 mr.map_type = DDI_MT_REGSPEC; 2146 mr.map_obj.rp = (struct regspec *)ph; 2147 mr.map_prot = PROT_READ | PROT_WRITE; 2148 mr.map_flags = DDI_MF_KERNEL_MAPPING; 2149 mr.map_handlep = hp; 2150 mr.map_vers = DDI_MAP_VERSION; 2151 2152 (void) ddi_map(hp->ah_dip, &mr, hp->ah_offset, 2153 hp->ah_len, &hp->ah_addr); 2154 2155 impl_acc_hdl_free(*handlep); 2156 *handlep = (ddi_acc_handle_t)NULL; 2157 } 2158 2159 void 2160 pcie_set_rber_fatal(dev_info_t *dip, boolean_t val) 2161 { 2162 pcie_bus_t *bus_p = PCIE_DIP2UPBUS(dip); 2163 bus_p->bus_pfd->pe_rber_fatal = val; 2164 } 2165 2166 /* 2167 * Return parent Root Port's pe_rber_fatal value. 2168 */ 2169 boolean_t 2170 pcie_get_rber_fatal(dev_info_t *dip) 2171 { 2172 pcie_bus_t *bus_p = PCIE_DIP2UPBUS(dip); 2173 pcie_bus_t *rp_bus_p = PCIE_DIP2UPBUS(bus_p->bus_rp_dip); 2174 return (rp_bus_p->bus_pfd->pe_rber_fatal); 2175 } 2176 2177 int 2178 pcie_ari_supported(dev_info_t *dip) 2179 { 2180 uint32_t devcap2; 2181 uint16_t pciecap; 2182 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 2183 uint8_t dev_type; 2184 2185 PCIE_DBG("pcie_ari_supported: dip=%p\n", dip); 2186 2187 if (bus_p == NULL) 2188 return (PCIE_ARI_FORW_NOT_SUPPORTED); 2189 2190 dev_type = bus_p->bus_dev_type; 2191 2192 if ((dev_type != PCIE_PCIECAP_DEV_TYPE_DOWN) && 2193 (dev_type != PCIE_PCIECAP_DEV_TYPE_ROOT)) 2194 return (PCIE_ARI_FORW_NOT_SUPPORTED); 2195 2196 if (pcie_disable_ari) { 2197 PCIE_DBG("pcie_ari_supported: dip=%p: ARI Disabled\n", dip); 2198 return (PCIE_ARI_FORW_NOT_SUPPORTED); 2199 } 2200 2201 pciecap = PCIE_CAP_GET(16, bus_p, PCIE_PCIECAP); 2202 2203 if ((pciecap & PCIE_PCIECAP_VER_MASK) < PCIE_PCIECAP_VER_2_0) { 2204 PCIE_DBG("pcie_ari_supported: dip=%p: Not 2.0\n", dip); 2205 return (PCIE_ARI_FORW_NOT_SUPPORTED); 2206 } 2207 2208 devcap2 = PCIE_CAP_GET(32, bus_p, PCIE_DEVCAP2); 2209 2210 PCIE_DBG("pcie_ari_supported: dip=%p: DevCap2=0x%x\n", 2211 dip, devcap2); 2212 2213 if (devcap2 & PCIE_DEVCAP2_ARI_FORWARD) { 2214 PCIE_DBG("pcie_ari_supported: " 2215 "dip=%p: ARI Forwarding is supported\n", dip); 2216 return (PCIE_ARI_FORW_SUPPORTED); 2217 } 2218 return (PCIE_ARI_FORW_NOT_SUPPORTED); 2219 } 2220 2221 int 2222 pcie_ari_enable(dev_info_t *dip) 2223 { 2224 uint16_t devctl2; 2225 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 2226 2227 PCIE_DBG("pcie_ari_enable: dip=%p\n", dip); 2228 2229 if (pcie_ari_supported(dip) == PCIE_ARI_FORW_NOT_SUPPORTED) 2230 return (DDI_FAILURE); 2231 2232 devctl2 = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL2); 2233 devctl2 |= PCIE_DEVCTL2_ARI_FORWARD_EN; 2234 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL2, devctl2); 2235 2236 PCIE_DBG("pcie_ari_enable: dip=%p: writing 0x%x to DevCtl2\n", 2237 dip, devctl2); 2238 2239 return (DDI_SUCCESS); 2240 } 2241 2242 int 2243 pcie_ari_disable(dev_info_t *dip) 2244 { 2245 uint16_t devctl2; 2246 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 2247 2248 PCIE_DBG("pcie_ari_disable: dip=%p\n", dip); 2249 2250 if (pcie_ari_supported(dip) == PCIE_ARI_FORW_NOT_SUPPORTED) 2251 return (DDI_FAILURE); 2252 2253 devctl2 = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL2); 2254 devctl2 &= ~PCIE_DEVCTL2_ARI_FORWARD_EN; 2255 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL2, devctl2); 2256 2257 PCIE_DBG("pcie_ari_disable: dip=%p: writing 0x%x to DevCtl2\n", 2258 dip, devctl2); 2259 2260 return (DDI_SUCCESS); 2261 } 2262 2263 int 2264 pcie_ari_is_enabled(dev_info_t *dip) 2265 { 2266 uint16_t devctl2; 2267 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 2268 2269 PCIE_DBG("pcie_ari_is_enabled: dip=%p\n", dip); 2270 2271 if (pcie_ari_supported(dip) == PCIE_ARI_FORW_NOT_SUPPORTED) 2272 return (PCIE_ARI_FORW_DISABLED); 2273 2274 devctl2 = PCIE_CAP_GET(32, bus_p, PCIE_DEVCTL2); 2275 2276 PCIE_DBG("pcie_ari_is_enabled: dip=%p: DevCtl2=0x%x\n", 2277 dip, devctl2); 2278 2279 if (devctl2 & PCIE_DEVCTL2_ARI_FORWARD_EN) { 2280 PCIE_DBG("pcie_ari_is_enabled: " 2281 "dip=%p: ARI Forwarding is enabled\n", dip); 2282 return (PCIE_ARI_FORW_ENABLED); 2283 } 2284 2285 return (PCIE_ARI_FORW_DISABLED); 2286 } 2287 2288 int 2289 pcie_ari_device(dev_info_t *dip) 2290 { 2291 ddi_acc_handle_t handle; 2292 uint16_t cap_ptr; 2293 2294 PCIE_DBG("pcie_ari_device: dip=%p\n", dip); 2295 2296 /* 2297 * XXX - This function may be called before the bus_p structure 2298 * has been populated. This code can be changed to remove 2299 * pci_config_setup()/pci_config_teardown() when the RFE 2300 * to populate the bus_p structures early in boot is putback. 2301 */ 2302 2303 /* First make sure it is a PCIe device */ 2304 2305 if (pci_config_setup(dip, &handle) != DDI_SUCCESS) 2306 return (PCIE_NOT_ARI_DEVICE); 2307 2308 if ((PCI_CAP_LOCATE(handle, PCI_CAP_ID_PCI_E, &cap_ptr)) 2309 != DDI_SUCCESS) { 2310 pci_config_teardown(&handle); 2311 return (PCIE_NOT_ARI_DEVICE); 2312 } 2313 2314 /* Locate the ARI Capability */ 2315 2316 if ((PCI_CAP_LOCATE(handle, PCI_CAP_XCFG_SPC(PCIE_EXT_CAP_ID_ARI), 2317 &cap_ptr)) == DDI_FAILURE) { 2318 pci_config_teardown(&handle); 2319 return (PCIE_NOT_ARI_DEVICE); 2320 } 2321 2322 /* ARI Capability was found so it must be a ARI device */ 2323 PCIE_DBG("pcie_ari_device: ARI Device dip=%p\n", dip); 2324 2325 pci_config_teardown(&handle); 2326 return (PCIE_ARI_DEVICE); 2327 } 2328 2329 int 2330 pcie_ari_get_next_function(dev_info_t *dip, int *func) 2331 { 2332 uint32_t val; 2333 uint16_t cap_ptr, next_function; 2334 ddi_acc_handle_t handle; 2335 2336 /* 2337 * XXX - This function may be called before the bus_p structure 2338 * has been populated. This code can be changed to remove 2339 * pci_config_setup()/pci_config_teardown() when the RFE 2340 * to populate the bus_p structures early in boot is putback. 2341 */ 2342 2343 if (pci_config_setup(dip, &handle) != DDI_SUCCESS) 2344 return (DDI_FAILURE); 2345 2346 if ((PCI_CAP_LOCATE(handle, 2347 PCI_CAP_XCFG_SPC(PCIE_EXT_CAP_ID_ARI), &cap_ptr)) == DDI_FAILURE) { 2348 pci_config_teardown(&handle); 2349 return (DDI_FAILURE); 2350 } 2351 2352 val = PCI_CAP_GET32(handle, 0, cap_ptr, PCIE_ARI_CAP); 2353 2354 next_function = (val >> PCIE_ARI_CAP_NEXT_FUNC_SHIFT) & 2355 PCIE_ARI_CAP_NEXT_FUNC_MASK; 2356 2357 pci_config_teardown(&handle); 2358 2359 *func = next_function; 2360 2361 return (DDI_SUCCESS); 2362 } 2363 2364 dev_info_t * 2365 pcie_func_to_dip(dev_info_t *dip, pcie_req_id_t function) 2366 { 2367 pcie_req_id_t child_bdf; 2368 dev_info_t *cdip; 2369 2370 for (cdip = ddi_get_child(dip); cdip; 2371 cdip = ddi_get_next_sibling(cdip)) { 2372 2373 if (pcie_get_bdf_from_dip(cdip, &child_bdf) == DDI_FAILURE) 2374 return (NULL); 2375 2376 if ((child_bdf & PCIE_REQ_ID_ARI_FUNC_MASK) == function) 2377 return (cdip); 2378 } 2379 return (NULL); 2380 } 2381 2382 #ifdef DEBUG 2383 2384 static void 2385 pcie_print_bus(pcie_bus_t *bus_p) 2386 { 2387 pcie_dbg("\tbus_dip = 0x%p\n", bus_p->bus_dip); 2388 pcie_dbg("\tbus_fm_flags = 0x%x\n", bus_p->bus_fm_flags); 2389 2390 pcie_dbg("\tbus_bdf = 0x%x\n", bus_p->bus_bdf); 2391 pcie_dbg("\tbus_dev_ven_id = 0x%x\n", bus_p->bus_dev_ven_id); 2392 pcie_dbg("\tbus_rev_id = 0x%x\n", bus_p->bus_rev_id); 2393 pcie_dbg("\tbus_hdr_type = 0x%x\n", bus_p->bus_hdr_type); 2394 pcie_dbg("\tbus_dev_type = 0x%x\n", bus_p->bus_dev_type); 2395 pcie_dbg("\tbus_bdg_secbus = 0x%x\n", bus_p->bus_bdg_secbus); 2396 pcie_dbg("\tbus_pcie_off = 0x%x\n", bus_p->bus_pcie_off); 2397 pcie_dbg("\tbus_aer_off = 0x%x\n", bus_p->bus_aer_off); 2398 pcie_dbg("\tbus_pcix_off = 0x%x\n", bus_p->bus_pcix_off); 2399 pcie_dbg("\tbus_ecc_ver = 0x%x\n", bus_p->bus_ecc_ver); 2400 } 2401 2402 /* 2403 * For debugging purposes set pcie_dbg_print != 0 to see printf messages 2404 * during interrupt. 2405 * 2406 * When a proper solution is in place this code will disappear. 2407 * Potential solutions are: 2408 * o circular buffers 2409 * o taskq to print at lower pil 2410 */ 2411 int pcie_dbg_print = 0; 2412 void 2413 pcie_dbg(char *fmt, ...) 2414 { 2415 va_list ap; 2416 2417 if (!pcie_debug_flags) { 2418 return; 2419 } 2420 va_start(ap, fmt); 2421 if (servicing_interrupt()) { 2422 if (pcie_dbg_print) { 2423 prom_vprintf(fmt, ap); 2424 } 2425 } else { 2426 prom_vprintf(fmt, ap); 2427 } 2428 va_end(ap); 2429 } 2430 #endif /* DEBUG */ 2431 2432 #if defined(__i386) || defined(__amd64) 2433 static void 2434 pcie_check_io_mem_range(ddi_acc_handle_t cfg_hdl, boolean_t *empty_io_range, 2435 boolean_t *empty_mem_range) 2436 { 2437 uint8_t class, subclass; 2438 uint_t val; 2439 2440 class = pci_config_get8(cfg_hdl, PCI_CONF_BASCLASS); 2441 subclass = pci_config_get8(cfg_hdl, PCI_CONF_SUBCLASS); 2442 2443 if ((class == PCI_CLASS_BRIDGE) && (subclass == PCI_BRIDGE_PCI)) { 2444 val = (((uint_t)pci_config_get8(cfg_hdl, PCI_BCNF_IO_BASE_LOW) & 2445 PCI_BCNF_IO_MASK) << 8); 2446 /* 2447 * Assuming that a zero based io_range[0] implies an 2448 * invalid I/O range. Likewise for mem_range[0]. 2449 */ 2450 if (val == 0) 2451 *empty_io_range = B_TRUE; 2452 val = (((uint_t)pci_config_get16(cfg_hdl, PCI_BCNF_MEM_BASE) & 2453 PCI_BCNF_MEM_MASK) << 16); 2454 if (val == 0) 2455 *empty_mem_range = B_TRUE; 2456 } 2457 } 2458 2459 #endif /* defined(__i386) || defined(__amd64) */ 2460