1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <sys/sysmacros.h> 27 #include <sys/types.h> 28 #include <sys/kmem.h> 29 #include <sys/modctl.h> 30 #include <sys/ddi.h> 31 #include <sys/sunddi.h> 32 #include <sys/sunndi.h> 33 #include <sys/fm/protocol.h> 34 #include <sys/fm/util.h> 35 #include <sys/promif.h> 36 #include <sys/disp.h> 37 #include <sys/stat.h> 38 #include <sys/file.h> 39 #include <sys/pci_cap.h> 40 #include <sys/pci_impl.h> 41 #include <sys/pcie_impl.h> 42 #include <sys/hotplug/pci/pcie_hp.h> 43 #include <sys/hotplug/pci/pciehpc.h> 44 #include <sys/hotplug/pci/pcishpc.h> 45 #include <sys/hotplug/pci/pcicfg.h> 46 #include <sys/pci_cfgacc.h> 47 48 /* Local functions prototypes */ 49 static void pcie_init_pfd(dev_info_t *); 50 static void pcie_fini_pfd(dev_info_t *); 51 52 #if defined(__i386) || defined(__amd64) 53 static void pcie_check_io_mem_range(ddi_acc_handle_t, boolean_t *, boolean_t *); 54 #endif /* defined(__i386) || defined(__amd64) */ 55 56 #ifdef DEBUG 57 uint_t pcie_debug_flags = 0; 58 static void pcie_print_bus(pcie_bus_t *bus_p); 59 void pcie_dbg(char *fmt, ...); 60 #endif /* DEBUG */ 61 62 /* Variable to control default PCI-Express config settings */ 63 ushort_t pcie_command_default = 64 PCI_COMM_SERR_ENABLE | 65 PCI_COMM_WAIT_CYC_ENAB | 66 PCI_COMM_PARITY_DETECT | 67 PCI_COMM_ME | 68 PCI_COMM_MAE | 69 PCI_COMM_IO; 70 71 /* xxx_fw are bits that are controlled by FW and should not be modified */ 72 ushort_t pcie_command_default_fw = 73 PCI_COMM_SPEC_CYC | 74 PCI_COMM_MEMWR_INVAL | 75 PCI_COMM_PALETTE_SNOOP | 76 PCI_COMM_WAIT_CYC_ENAB | 77 0xF800; /* Reserved Bits */ 78 79 ushort_t pcie_bdg_command_default_fw = 80 PCI_BCNF_BCNTRL_ISA_ENABLE | 81 PCI_BCNF_BCNTRL_VGA_ENABLE | 82 0xF000; /* Reserved Bits */ 83 84 /* PCI-Express Base error defaults */ 85 ushort_t pcie_base_err_default = 86 PCIE_DEVCTL_CE_REPORTING_EN | 87 PCIE_DEVCTL_NFE_REPORTING_EN | 88 PCIE_DEVCTL_FE_REPORTING_EN | 89 PCIE_DEVCTL_UR_REPORTING_EN; 90 91 /* PCI-Express Device Control Register */ 92 uint16_t pcie_devctl_default = PCIE_DEVCTL_RO_EN | 93 PCIE_DEVCTL_MAX_READ_REQ_512; 94 95 /* PCI-Express AER Root Control Register */ 96 #define PCIE_ROOT_SYS_ERR (PCIE_ROOTCTL_SYS_ERR_ON_CE_EN | \ 97 PCIE_ROOTCTL_SYS_ERR_ON_NFE_EN | \ 98 PCIE_ROOTCTL_SYS_ERR_ON_FE_EN) 99 100 ushort_t pcie_root_ctrl_default = 101 PCIE_ROOTCTL_SYS_ERR_ON_CE_EN | 102 PCIE_ROOTCTL_SYS_ERR_ON_NFE_EN | 103 PCIE_ROOTCTL_SYS_ERR_ON_FE_EN; 104 105 /* PCI-Express Root Error Command Register */ 106 ushort_t pcie_root_error_cmd_default = 107 PCIE_AER_RE_CMD_CE_REP_EN | 108 PCIE_AER_RE_CMD_NFE_REP_EN | 109 PCIE_AER_RE_CMD_FE_REP_EN; 110 111 /* ECRC settings in the PCIe AER Control Register */ 112 uint32_t pcie_ecrc_value = 113 PCIE_AER_CTL_ECRC_GEN_ENA | 114 PCIE_AER_CTL_ECRC_CHECK_ENA; 115 116 /* 117 * If a particular platform wants to disable certain errors such as UR/MA, 118 * instead of using #defines have the platform's PCIe Root Complex driver set 119 * these masks using the pcie_get_XXX_mask and pcie_set_XXX_mask functions. For 120 * x86 the closest thing to a PCIe root complex driver is NPE. For SPARC the 121 * closest PCIe root complex driver is PX. 122 * 123 * pcie_serr_disable_flag : disable SERR only (in RCR and command reg) x86 124 * systems may want to disable SERR in general. For root ports, enabling SERR 125 * causes NMIs which are not handled and results in a watchdog timeout error. 126 */ 127 uint32_t pcie_aer_uce_mask = 0; /* AER UE Mask */ 128 uint32_t pcie_aer_ce_mask = 0; /* AER CE Mask */ 129 uint32_t pcie_aer_suce_mask = 0; /* AER Secondary UE Mask */ 130 uint32_t pcie_serr_disable_flag = 0; /* Disable SERR */ 131 132 /* Default severities needed for eversholt. Error handling doesn't care */ 133 uint32_t pcie_aer_uce_severity = PCIE_AER_UCE_MTLP | PCIE_AER_UCE_RO | \ 134 PCIE_AER_UCE_FCP | PCIE_AER_UCE_SD | PCIE_AER_UCE_DLP | \ 135 PCIE_AER_UCE_TRAINING; 136 uint32_t pcie_aer_suce_severity = PCIE_AER_SUCE_SERR_ASSERT | \ 137 PCIE_AER_SUCE_UC_ADDR_ERR | PCIE_AER_SUCE_UC_ATTR_ERR | \ 138 PCIE_AER_SUCE_USC_MSG_DATA_ERR; 139 140 int pcie_max_mps = PCIE_DEVCTL_MAX_PAYLOAD_4096 >> 5; 141 int pcie_disable_ari = 0; 142 143 static void pcie_scan_mps(dev_info_t *rc_dip, dev_info_t *dip, 144 int *max_supported); 145 static int pcie_get_max_supported(dev_info_t *dip, void *arg); 146 static int pcie_map_phys(dev_info_t *dip, pci_regspec_t *phys_spec, 147 caddr_t *addrp, ddi_acc_handle_t *handlep); 148 static void pcie_unmap_phys(ddi_acc_handle_t *handlep, pci_regspec_t *ph); 149 150 dev_info_t *pcie_get_rc_dip(dev_info_t *dip); 151 152 /* 153 * modload support 154 */ 155 156 static struct modlmisc modlmisc = { 157 &mod_miscops, /* Type of module */ 158 "PCI Express Framework Module" 159 }; 160 161 static struct modlinkage modlinkage = { 162 MODREV_1, 163 (void *)&modlmisc, 164 NULL 165 }; 166 167 /* 168 * Global Variables needed for a non-atomic version of ddi_fm_ereport_post. 169 * Currently used to send the pci.fabric ereports whose payload depends on the 170 * type of PCI device it is being sent for. 171 */ 172 char *pcie_nv_buf; 173 nv_alloc_t *pcie_nvap; 174 nvlist_t *pcie_nvl; 175 176 int 177 _init(void) 178 { 179 int rval; 180 181 pcie_nv_buf = kmem_alloc(ERPT_DATA_SZ, KM_SLEEP); 182 pcie_nvap = fm_nva_xcreate(pcie_nv_buf, ERPT_DATA_SZ); 183 pcie_nvl = fm_nvlist_create(pcie_nvap); 184 185 rval = mod_install(&modlinkage); 186 return (rval); 187 } 188 189 int 190 _fini() 191 { 192 int rval; 193 194 fm_nvlist_destroy(pcie_nvl, FM_NVA_RETAIN); 195 fm_nva_xdestroy(pcie_nvap); 196 kmem_free(pcie_nv_buf, ERPT_DATA_SZ); 197 198 rval = mod_remove(&modlinkage); 199 return (rval); 200 } 201 202 int 203 _info(struct modinfo *modinfop) 204 { 205 return (mod_info(&modlinkage, modinfop)); 206 } 207 208 /* ARGSUSED */ 209 int 210 pcie_init(dev_info_t *dip, caddr_t arg) 211 { 212 int ret = DDI_SUCCESS; 213 214 /* 215 * Create a "devctl" minor node to support DEVCTL_DEVICE_* 216 * and DEVCTL_BUS_* ioctls to this bus. 217 */ 218 if ((ret = ddi_create_minor_node(dip, "devctl", S_IFCHR, 219 PCI_MINOR_NUM(ddi_get_instance(dip), PCI_DEVCTL_MINOR), 220 DDI_NT_NEXUS, 0)) != DDI_SUCCESS) { 221 PCIE_DBG("Failed to create devctl minor node for %s%d\n", 222 ddi_driver_name(dip), ddi_get_instance(dip)); 223 224 return (ret); 225 } 226 227 if ((ret = pcie_hp_init(dip, arg)) != DDI_SUCCESS) { 228 /* 229 * On some x86 platforms, we observed unexpected hotplug 230 * initialization failures in recent years. The known cause 231 * is a hardware issue: while the problem PCI bridges have 232 * the Hotplug Capable registers set, the machine actually 233 * does not implement the expected ACPI object. 234 * 235 * We don't want to stop PCI driver attach and system boot 236 * just because of this hotplug initialization failure. 237 * Continue with a debug message printed. 238 */ 239 PCIE_DBG("%s%d: Failed setting hotplug framework\n", 240 ddi_driver_name(dip), ddi_get_instance(dip)); 241 242 #if defined(__sparc) 243 ddi_remove_minor_node(dip, "devctl"); 244 245 return (ret); 246 #endif /* defined(__sparc) */ 247 } 248 249 return (DDI_SUCCESS); 250 } 251 252 /* ARGSUSED */ 253 int 254 pcie_uninit(dev_info_t *dip) 255 { 256 int ret = DDI_SUCCESS; 257 258 if (pcie_ari_is_enabled(dip) == PCIE_ARI_FORW_ENABLED) 259 (void) pcie_ari_disable(dip); 260 261 if ((ret = pcie_hp_uninit(dip)) != DDI_SUCCESS) { 262 PCIE_DBG("Failed to uninitialize hotplug for %s%d\n", 263 ddi_driver_name(dip), ddi_get_instance(dip)); 264 265 return (ret); 266 } 267 268 ddi_remove_minor_node(dip, "devctl"); 269 270 return (ret); 271 } 272 273 /* 274 * PCIe module interface for enabling hotplug interrupt. 275 * 276 * It should be called after pcie_init() is done and bus driver's 277 * interrupt handlers have being attached. 278 */ 279 int 280 pcie_hpintr_enable(dev_info_t *dip) 281 { 282 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 283 pcie_hp_ctrl_t *ctrl_p = PCIE_GET_HP_CTRL(dip); 284 285 if (PCIE_IS_PCIE_HOTPLUG_ENABLED(bus_p)) { 286 (void) (ctrl_p->hc_ops.enable_hpc_intr)(ctrl_p); 287 } else if (PCIE_IS_PCI_HOTPLUG_ENABLED(bus_p)) { 288 (void) pcishpc_enable_irqs(ctrl_p); 289 } 290 return (DDI_SUCCESS); 291 } 292 293 /* 294 * PCIe module interface for disabling hotplug interrupt. 295 * 296 * It should be called before pcie_uninit() is called and bus driver's 297 * interrupt handlers is dettached. 298 */ 299 int 300 pcie_hpintr_disable(dev_info_t *dip) 301 { 302 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 303 pcie_hp_ctrl_t *ctrl_p = PCIE_GET_HP_CTRL(dip); 304 305 if (PCIE_IS_PCIE_HOTPLUG_ENABLED(bus_p)) { 306 (void) (ctrl_p->hc_ops.disable_hpc_intr)(ctrl_p); 307 } else if (PCIE_IS_PCI_HOTPLUG_ENABLED(bus_p)) { 308 (void) pcishpc_disable_irqs(ctrl_p); 309 } 310 return (DDI_SUCCESS); 311 } 312 313 /* ARGSUSED */ 314 int 315 pcie_intr(dev_info_t *dip) 316 { 317 return (pcie_hp_intr(dip)); 318 } 319 320 /* ARGSUSED */ 321 int 322 pcie_open(dev_info_t *dip, dev_t *devp, int flags, int otyp, cred_t *credp) 323 { 324 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 325 326 /* 327 * Make sure the open is for the right file type. 328 */ 329 if (otyp != OTYP_CHR) 330 return (EINVAL); 331 332 /* 333 * Handle the open by tracking the device state. 334 */ 335 if ((bus_p->bus_soft_state == PCI_SOFT_STATE_OPEN_EXCL) || 336 ((flags & FEXCL) && 337 (bus_p->bus_soft_state != PCI_SOFT_STATE_CLOSED))) { 338 return (EBUSY); 339 } 340 341 if (flags & FEXCL) 342 bus_p->bus_soft_state = PCI_SOFT_STATE_OPEN_EXCL; 343 else 344 bus_p->bus_soft_state = PCI_SOFT_STATE_OPEN; 345 346 return (0); 347 } 348 349 /* ARGSUSED */ 350 int 351 pcie_close(dev_info_t *dip, dev_t dev, int flags, int otyp, cred_t *credp) 352 { 353 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 354 355 if (otyp != OTYP_CHR) 356 return (EINVAL); 357 358 bus_p->bus_soft_state = PCI_SOFT_STATE_CLOSED; 359 360 return (0); 361 } 362 363 /* ARGSUSED */ 364 int 365 pcie_ioctl(dev_info_t *dip, dev_t dev, int cmd, intptr_t arg, int mode, 366 cred_t *credp, int *rvalp) 367 { 368 struct devctl_iocdata *dcp; 369 uint_t bus_state; 370 int rv = DDI_SUCCESS; 371 372 /* 373 * We can use the generic implementation for devctl ioctl 374 */ 375 switch (cmd) { 376 case DEVCTL_DEVICE_GETSTATE: 377 case DEVCTL_DEVICE_ONLINE: 378 case DEVCTL_DEVICE_OFFLINE: 379 case DEVCTL_BUS_GETSTATE: 380 return (ndi_devctl_ioctl(dip, cmd, arg, mode, 0)); 381 default: 382 break; 383 } 384 385 /* 386 * read devctl ioctl data 387 */ 388 if (ndi_dc_allochdl((void *)arg, &dcp) != NDI_SUCCESS) 389 return (EFAULT); 390 391 switch (cmd) { 392 case DEVCTL_BUS_QUIESCE: 393 if (ndi_get_bus_state(dip, &bus_state) == NDI_SUCCESS) 394 if (bus_state == BUS_QUIESCED) 395 break; 396 (void) ndi_set_bus_state(dip, BUS_QUIESCED); 397 break; 398 case DEVCTL_BUS_UNQUIESCE: 399 if (ndi_get_bus_state(dip, &bus_state) == NDI_SUCCESS) 400 if (bus_state == BUS_ACTIVE) 401 break; 402 (void) ndi_set_bus_state(dip, BUS_ACTIVE); 403 break; 404 case DEVCTL_BUS_RESET: 405 case DEVCTL_BUS_RESETALL: 406 case DEVCTL_DEVICE_RESET: 407 rv = ENOTSUP; 408 break; 409 default: 410 rv = ENOTTY; 411 } 412 413 ndi_dc_freehdl(dcp); 414 return (rv); 415 } 416 417 /* ARGSUSED */ 418 int 419 pcie_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 420 int flags, char *name, caddr_t valuep, int *lengthp) 421 { 422 if (dev == DDI_DEV_T_ANY) 423 goto skip; 424 425 if (PCIE_IS_HOTPLUG_CAPABLE(dip) && 426 strcmp(name, "pci-occupant") == 0) { 427 int pci_dev = PCI_MINOR_NUM_TO_PCI_DEVNUM(getminor(dev)); 428 429 pcie_hp_create_occupant_props(dip, dev, pci_dev); 430 } 431 432 skip: 433 return (ddi_prop_op(dev, dip, prop_op, flags, name, valuep, lengthp)); 434 } 435 436 int 437 pcie_init_cfghdl(dev_info_t *cdip) 438 { 439 pcie_bus_t *bus_p; 440 ddi_acc_handle_t eh = NULL; 441 442 bus_p = PCIE_DIP2BUS(cdip); 443 if (bus_p == NULL) 444 return (DDI_FAILURE); 445 446 /* Create an config access special to error handling */ 447 if (pci_config_setup(cdip, &eh) != DDI_SUCCESS) { 448 cmn_err(CE_WARN, "Cannot setup config access" 449 " for BDF 0x%x\n", bus_p->bus_bdf); 450 return (DDI_FAILURE); 451 } 452 453 bus_p->bus_cfg_hdl = eh; 454 return (DDI_SUCCESS); 455 } 456 457 void 458 pcie_fini_cfghdl(dev_info_t *cdip) 459 { 460 pcie_bus_t *bus_p = PCIE_DIP2BUS(cdip); 461 462 pci_config_teardown(&bus_p->bus_cfg_hdl); 463 } 464 465 /* 466 * PCI-Express child device initialization. 467 * This function enables generic pci-express interrupts and error 468 * handling. 469 * 470 * @param pdip root dip (root nexus's dip) 471 * @param cdip child's dip (device's dip) 472 * @return DDI_SUCCESS or DDI_FAILURE 473 */ 474 /* ARGSUSED */ 475 int 476 pcie_initchild(dev_info_t *cdip) 477 { 478 uint16_t tmp16, reg16; 479 pcie_bus_t *bus_p; 480 481 bus_p = PCIE_DIP2BUS(cdip); 482 if (bus_p == NULL) { 483 PCIE_DBG("%s: BUS not found.\n", 484 ddi_driver_name(cdip)); 485 486 return (DDI_FAILURE); 487 } 488 489 if (pcie_init_cfghdl(cdip) != DDI_SUCCESS) 490 return (DDI_FAILURE); 491 492 /* Clear the device's status register */ 493 reg16 = PCIE_GET(16, bus_p, PCI_CONF_STAT); 494 PCIE_PUT(16, bus_p, PCI_CONF_STAT, reg16); 495 496 /* Setup the device's command register */ 497 reg16 = PCIE_GET(16, bus_p, PCI_CONF_COMM); 498 tmp16 = (reg16 & pcie_command_default_fw) | pcie_command_default; 499 500 #if defined(__i386) || defined(__amd64) 501 boolean_t empty_io_range = B_FALSE; 502 boolean_t empty_mem_range = B_FALSE; 503 /* 504 * Check for empty IO and Mem ranges on bridges. If so disable IO/Mem 505 * access as it can cause a hang if enabled. 506 */ 507 pcie_check_io_mem_range(bus_p->bus_cfg_hdl, &empty_io_range, 508 &empty_mem_range); 509 if ((empty_io_range == B_TRUE) && 510 (pcie_command_default & PCI_COMM_IO)) { 511 tmp16 &= ~PCI_COMM_IO; 512 PCIE_DBG("No I/O range found for %s, bdf 0x%x\n", 513 ddi_driver_name(cdip), bus_p->bus_bdf); 514 } 515 if ((empty_mem_range == B_TRUE) && 516 (pcie_command_default & PCI_COMM_MAE)) { 517 tmp16 &= ~PCI_COMM_MAE; 518 PCIE_DBG("No Mem range found for %s, bdf 0x%x\n", 519 ddi_driver_name(cdip), bus_p->bus_bdf); 520 } 521 #endif /* defined(__i386) || defined(__amd64) */ 522 523 if (pcie_serr_disable_flag && PCIE_IS_PCIE(bus_p)) 524 tmp16 &= ~PCI_COMM_SERR_ENABLE; 525 526 PCIE_PUT(16, bus_p, PCI_CONF_COMM, tmp16); 527 PCIE_DBG_CFG(cdip, bus_p, "COMMAND", 16, PCI_CONF_COMM, reg16); 528 529 /* 530 * If the device has a bus control register then program it 531 * based on the settings in the command register. 532 */ 533 if (PCIE_IS_BDG(bus_p)) { 534 /* Clear the device's secondary status register */ 535 reg16 = PCIE_GET(16, bus_p, PCI_BCNF_SEC_STATUS); 536 PCIE_PUT(16, bus_p, PCI_BCNF_SEC_STATUS, reg16); 537 538 /* Setup the device's secondary command register */ 539 reg16 = PCIE_GET(16, bus_p, PCI_BCNF_BCNTRL); 540 tmp16 = (reg16 & pcie_bdg_command_default_fw); 541 542 tmp16 |= PCI_BCNF_BCNTRL_SERR_ENABLE; 543 /* 544 * Workaround for this Nvidia bridge. Don't enable the SERR 545 * enable bit in the bridge control register as it could lead to 546 * bogus NMIs. 547 */ 548 if (bus_p->bus_dev_ven_id == 0x037010DE) 549 tmp16 &= ~PCI_BCNF_BCNTRL_SERR_ENABLE; 550 551 if (pcie_command_default & PCI_COMM_PARITY_DETECT) 552 tmp16 |= PCI_BCNF_BCNTRL_PARITY_ENABLE; 553 554 /* 555 * Enable Master Abort Mode only if URs have not been masked. 556 * For PCI and PCIe-PCI bridges, enabling this bit causes a 557 * Master Aborts/UR to be forwarded as a UR/TA or SERR. If this 558 * bit is masked, posted requests are dropped and non-posted 559 * requests are returned with -1. 560 */ 561 if (pcie_aer_uce_mask & PCIE_AER_UCE_UR) 562 tmp16 &= ~PCI_BCNF_BCNTRL_MAST_AB_MODE; 563 else 564 tmp16 |= PCI_BCNF_BCNTRL_MAST_AB_MODE; 565 PCIE_PUT(16, bus_p, PCI_BCNF_BCNTRL, tmp16); 566 PCIE_DBG_CFG(cdip, bus_p, "SEC CMD", 16, PCI_BCNF_BCNTRL, 567 reg16); 568 } 569 570 if (PCIE_IS_PCIE(bus_p)) { 571 /* Setup PCIe device control register */ 572 reg16 = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL); 573 tmp16 = pcie_devctl_default; 574 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, tmp16); 575 PCIE_DBG_CAP(cdip, bus_p, "DEVCTL", 16, PCIE_DEVCTL, reg16); 576 577 /* Enable PCIe errors */ 578 pcie_enable_errors(cdip); 579 } 580 581 bus_p->bus_ari = B_FALSE; 582 if ((pcie_ari_is_enabled(ddi_get_parent(cdip)) 583 == PCIE_ARI_FORW_ENABLED) && (pcie_ari_device(cdip) 584 == PCIE_ARI_DEVICE)) { 585 bus_p->bus_ari = B_TRUE; 586 } 587 588 if (pcie_initchild_mps(cdip) == DDI_FAILURE) { 589 pcie_fini_cfghdl(cdip); 590 return (DDI_FAILURE); 591 } 592 593 return (DDI_SUCCESS); 594 } 595 596 #define PCIE_ZALLOC(data) kmem_zalloc(sizeof (data), KM_SLEEP) 597 static void 598 pcie_init_pfd(dev_info_t *dip) 599 { 600 pf_data_t *pfd_p = PCIE_ZALLOC(pf_data_t); 601 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 602 603 PCIE_DIP2PFD(dip) = pfd_p; 604 605 pfd_p->pe_bus_p = bus_p; 606 pfd_p->pe_severity_flags = 0; 607 pfd_p->pe_lock = B_FALSE; 608 pfd_p->pe_valid = B_FALSE; 609 610 /* Allocate the root fault struct for both RC and RP */ 611 if (PCIE_IS_ROOT(bus_p)) { 612 PCIE_ROOT_FAULT(pfd_p) = PCIE_ZALLOC(pf_root_fault_t); 613 PCIE_ROOT_FAULT(pfd_p)->scan_bdf = PCIE_INVALID_BDF; 614 } 615 616 PCI_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pci_err_regs_t); 617 618 if (PCIE_IS_BDG(bus_p)) 619 PCI_BDG_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pci_bdg_err_regs_t); 620 621 if (PCIE_IS_PCIE(bus_p)) { 622 PCIE_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_err_regs_t); 623 624 if (PCIE_IS_RP(bus_p)) 625 PCIE_RP_REG(pfd_p) = 626 PCIE_ZALLOC(pf_pcie_rp_err_regs_t); 627 628 PCIE_ADV_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_adv_err_regs_t); 629 PCIE_ADV_REG(pfd_p)->pcie_ue_tgt_bdf = PCIE_INVALID_BDF; 630 631 if (PCIE_IS_RP(bus_p)) { 632 PCIE_ADV_RP_REG(pfd_p) = 633 PCIE_ZALLOC(pf_pcie_adv_rp_err_regs_t); 634 PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ce_src_id = 635 PCIE_INVALID_BDF; 636 PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ue_src_id = 637 PCIE_INVALID_BDF; 638 } else if (PCIE_IS_PCIE_BDG(bus_p)) { 639 PCIE_ADV_BDG_REG(pfd_p) = 640 PCIE_ZALLOC(pf_pcie_adv_bdg_err_regs_t); 641 PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_bdf = 642 PCIE_INVALID_BDF; 643 } 644 645 if (PCIE_IS_PCIE_BDG(bus_p) && PCIE_IS_PCIX(bus_p)) { 646 PCIX_BDG_ERR_REG(pfd_p) = 647 PCIE_ZALLOC(pf_pcix_bdg_err_regs_t); 648 649 if (PCIX_ECC_VERSION_CHECK(bus_p)) { 650 PCIX_BDG_ECC_REG(pfd_p, 0) = 651 PCIE_ZALLOC(pf_pcix_ecc_regs_t); 652 PCIX_BDG_ECC_REG(pfd_p, 1) = 653 PCIE_ZALLOC(pf_pcix_ecc_regs_t); 654 } 655 } 656 } else if (PCIE_IS_PCIX(bus_p)) { 657 if (PCIE_IS_BDG(bus_p)) { 658 PCIX_BDG_ERR_REG(pfd_p) = 659 PCIE_ZALLOC(pf_pcix_bdg_err_regs_t); 660 661 if (PCIX_ECC_VERSION_CHECK(bus_p)) { 662 PCIX_BDG_ECC_REG(pfd_p, 0) = 663 PCIE_ZALLOC(pf_pcix_ecc_regs_t); 664 PCIX_BDG_ECC_REG(pfd_p, 1) = 665 PCIE_ZALLOC(pf_pcix_ecc_regs_t); 666 } 667 } else { 668 PCIX_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pcix_err_regs_t); 669 670 if (PCIX_ECC_VERSION_CHECK(bus_p)) 671 PCIX_ECC_REG(pfd_p) = 672 PCIE_ZALLOC(pf_pcix_ecc_regs_t); 673 } 674 } 675 } 676 677 static void 678 pcie_fini_pfd(dev_info_t *dip) 679 { 680 pf_data_t *pfd_p = PCIE_DIP2PFD(dip); 681 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 682 683 if (PCIE_IS_PCIE(bus_p)) { 684 if (PCIE_IS_PCIE_BDG(bus_p) && PCIE_IS_PCIX(bus_p)) { 685 if (PCIX_ECC_VERSION_CHECK(bus_p)) { 686 kmem_free(PCIX_BDG_ECC_REG(pfd_p, 0), 687 sizeof (pf_pcix_ecc_regs_t)); 688 kmem_free(PCIX_BDG_ECC_REG(pfd_p, 1), 689 sizeof (pf_pcix_ecc_regs_t)); 690 } 691 692 kmem_free(PCIX_BDG_ERR_REG(pfd_p), 693 sizeof (pf_pcix_bdg_err_regs_t)); 694 } 695 696 if (PCIE_IS_RP(bus_p)) 697 kmem_free(PCIE_ADV_RP_REG(pfd_p), 698 sizeof (pf_pcie_adv_rp_err_regs_t)); 699 else if (PCIE_IS_PCIE_BDG(bus_p)) 700 kmem_free(PCIE_ADV_BDG_REG(pfd_p), 701 sizeof (pf_pcie_adv_bdg_err_regs_t)); 702 703 kmem_free(PCIE_ADV_REG(pfd_p), 704 sizeof (pf_pcie_adv_err_regs_t)); 705 706 if (PCIE_IS_RP(bus_p)) 707 kmem_free(PCIE_RP_REG(pfd_p), 708 sizeof (pf_pcie_rp_err_regs_t)); 709 710 kmem_free(PCIE_ERR_REG(pfd_p), sizeof (pf_pcie_err_regs_t)); 711 } else if (PCIE_IS_PCIX(bus_p)) { 712 if (PCIE_IS_BDG(bus_p)) { 713 if (PCIX_ECC_VERSION_CHECK(bus_p)) { 714 kmem_free(PCIX_BDG_ECC_REG(pfd_p, 0), 715 sizeof (pf_pcix_ecc_regs_t)); 716 kmem_free(PCIX_BDG_ECC_REG(pfd_p, 1), 717 sizeof (pf_pcix_ecc_regs_t)); 718 } 719 720 kmem_free(PCIX_BDG_ERR_REG(pfd_p), 721 sizeof (pf_pcix_bdg_err_regs_t)); 722 } else { 723 if (PCIX_ECC_VERSION_CHECK(bus_p)) 724 kmem_free(PCIX_ECC_REG(pfd_p), 725 sizeof (pf_pcix_ecc_regs_t)); 726 727 kmem_free(PCIX_ERR_REG(pfd_p), 728 sizeof (pf_pcix_err_regs_t)); 729 } 730 } 731 732 if (PCIE_IS_BDG(bus_p)) 733 kmem_free(PCI_BDG_ERR_REG(pfd_p), 734 sizeof (pf_pci_bdg_err_regs_t)); 735 736 kmem_free(PCI_ERR_REG(pfd_p), sizeof (pf_pci_err_regs_t)); 737 738 if (PCIE_IS_ROOT(bus_p)) 739 kmem_free(PCIE_ROOT_FAULT(pfd_p), sizeof (pf_root_fault_t)); 740 741 kmem_free(PCIE_DIP2PFD(dip), sizeof (pf_data_t)); 742 743 PCIE_DIP2PFD(dip) = NULL; 744 } 745 746 747 /* 748 * Special functions to allocate pf_data_t's for PCIe root complexes. 749 * Note: Root Complex not Root Port 750 */ 751 void 752 pcie_rc_init_pfd(dev_info_t *dip, pf_data_t *pfd_p) 753 { 754 pfd_p->pe_bus_p = PCIE_DIP2DOWNBUS(dip); 755 pfd_p->pe_severity_flags = 0; 756 pfd_p->pe_lock = B_FALSE; 757 pfd_p->pe_valid = B_FALSE; 758 759 PCIE_ROOT_FAULT(pfd_p) = PCIE_ZALLOC(pf_root_fault_t); 760 PCIE_ROOT_FAULT(pfd_p)->scan_bdf = PCIE_INVALID_BDF; 761 PCI_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pci_err_regs_t); 762 PCI_BDG_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pci_bdg_err_regs_t); 763 PCIE_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_err_regs_t); 764 PCIE_RP_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_rp_err_regs_t); 765 PCIE_ADV_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_adv_err_regs_t); 766 PCIE_ADV_RP_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_adv_rp_err_regs_t); 767 768 PCIE_ADV_REG(pfd_p)->pcie_ue_sev = pcie_aer_uce_severity; 769 } 770 771 void 772 pcie_rc_fini_pfd(pf_data_t *pfd_p) 773 { 774 kmem_free(PCIE_ADV_RP_REG(pfd_p), sizeof (pf_pcie_adv_rp_err_regs_t)); 775 kmem_free(PCIE_ADV_REG(pfd_p), sizeof (pf_pcie_adv_err_regs_t)); 776 kmem_free(PCIE_RP_REG(pfd_p), sizeof (pf_pcie_rp_err_regs_t)); 777 kmem_free(PCIE_ERR_REG(pfd_p), sizeof (pf_pcie_err_regs_t)); 778 kmem_free(PCI_BDG_ERR_REG(pfd_p), sizeof (pf_pci_bdg_err_regs_t)); 779 kmem_free(PCI_ERR_REG(pfd_p), sizeof (pf_pci_err_regs_t)); 780 kmem_free(PCIE_ROOT_FAULT(pfd_p), sizeof (pf_root_fault_t)); 781 } 782 783 /* 784 * init pcie_bus_t for root complex 785 * 786 * Only a few of the fields in bus_t is valid for root complex. 787 * The fields that are bracketed are initialized in this routine: 788 * 789 * dev_info_t * <bus_dip> 790 * dev_info_t * bus_rp_dip 791 * ddi_acc_handle_t bus_cfg_hdl 792 * uint_t <bus_fm_flags> 793 * pcie_req_id_t bus_bdf 794 * pcie_req_id_t bus_rp_bdf 795 * uint32_t bus_dev_ven_id 796 * uint8_t bus_rev_id 797 * uint8_t <bus_hdr_type> 798 * uint16_t <bus_dev_type> 799 * uint8_t bus_bdg_secbus 800 * uint16_t bus_pcie_off 801 * uint16_t <bus_aer_off> 802 * uint16_t bus_pcix_off 803 * uint16_t bus_ecc_ver 804 * pci_bus_range_t bus_bus_range 805 * ppb_ranges_t * bus_addr_ranges 806 * int bus_addr_entries 807 * pci_regspec_t * bus_assigned_addr 808 * int bus_assigned_entries 809 * pf_data_t * bus_pfd 810 * int bus_mps 811 * uint64_t bus_cfgacc_base 812 * void * bus_plat_private 813 */ 814 void 815 pcie_rc_init_bus(dev_info_t *dip) 816 { 817 pcie_bus_t *bus_p; 818 819 bus_p = (pcie_bus_t *)kmem_zalloc(sizeof (pcie_bus_t), KM_SLEEP); 820 bus_p->bus_dip = dip; 821 bus_p->bus_dev_type = PCIE_PCIECAP_DEV_TYPE_RC_PSEUDO; 822 bus_p->bus_hdr_type = PCI_HEADER_ONE; 823 824 /* Fake that there are AER logs */ 825 bus_p->bus_aer_off = (uint16_t)-1; 826 827 /* Needed only for handle lookup */ 828 bus_p->bus_fm_flags |= PF_FM_READY; 829 830 ndi_set_bus_private(dip, B_FALSE, DEVI_PORT_TYPE_PCI, bus_p); 831 } 832 833 void 834 pcie_rc_fini_bus(dev_info_t *dip) 835 { 836 pcie_bus_t *bus_p = PCIE_DIP2DOWNBUS(dip); 837 ndi_set_bus_private(dip, B_FALSE, NULL, NULL); 838 kmem_free(bus_p, sizeof (pcie_bus_t)); 839 } 840 841 /* 842 * partially init pcie_bus_t for device (dip,bdf) for accessing pci 843 * config space 844 * 845 * This routine is invoked during boot, either after creating a devinfo node 846 * (x86 case) or during px driver attach (sparc case); it is also invoked 847 * in hotplug context after a devinfo node is created. 848 * 849 * The fields that are bracketed are initialized if flag PCIE_BUS_INITIAL 850 * is set: 851 * 852 * dev_info_t * <bus_dip> 853 * dev_info_t * <bus_rp_dip> 854 * ddi_acc_handle_t bus_cfg_hdl 855 * uint_t bus_fm_flags 856 * pcie_req_id_t <bus_bdf> 857 * pcie_req_id_t <bus_rp_bdf> 858 * uint32_t <bus_dev_ven_id> 859 * uint8_t <bus_rev_id> 860 * uint8_t <bus_hdr_type> 861 * uint16_t <bus_dev_type> 862 * uint8_t <bus_bdg_secbus 863 * uint16_t <bus_pcie_off> 864 * uint16_t <bus_aer_off> 865 * uint16_t <bus_pcix_off> 866 * uint16_t <bus_ecc_ver> 867 * pci_bus_range_t bus_bus_range 868 * ppb_ranges_t * bus_addr_ranges 869 * int bus_addr_entries 870 * pci_regspec_t * bus_assigned_addr 871 * int bus_assigned_entries 872 * pf_data_t * bus_pfd 873 * int bus_mps 874 * uint64_t bus_cfgacc_base 875 * void * bus_plat_private 876 * 877 * The fields that are bracketed are initialized if flag PCIE_BUS_FINAL 878 * is set: 879 * 880 * dev_info_t * bus_dip 881 * dev_info_t * bus_rp_dip 882 * ddi_acc_handle_t bus_cfg_hdl 883 * uint_t bus_fm_flags 884 * pcie_req_id_t bus_bdf 885 * pcie_req_id_t bus_rp_bdf 886 * uint32_t bus_dev_ven_id 887 * uint8_t bus_rev_id 888 * uint8_t bus_hdr_type 889 * uint16_t bus_dev_type 890 * uint8_t <bus_bdg_secbus> 891 * uint16_t bus_pcie_off 892 * uint16_t bus_aer_off 893 * uint16_t bus_pcix_off 894 * uint16_t bus_ecc_ver 895 * pci_bus_range_t <bus_bus_range> 896 * ppb_ranges_t * <bus_addr_ranges> 897 * int <bus_addr_entries> 898 * pci_regspec_t * <bus_assigned_addr> 899 * int <bus_assigned_entries> 900 * pf_data_t * <bus_pfd> 901 * int bus_mps 902 * uint64_t bus_cfgacc_base 903 * void * <bus_plat_private> 904 */ 905 906 pcie_bus_t * 907 pcie_init_bus(dev_info_t *dip, pcie_req_id_t bdf, uint8_t flags) 908 { 909 uint16_t status, base, baseptr, num_cap; 910 uint32_t capid; 911 int range_size; 912 pcie_bus_t *bus_p; 913 dev_info_t *rcdip; 914 dev_info_t *pdip; 915 const char *errstr = NULL; 916 917 if (!(flags & PCIE_BUS_INITIAL)) 918 goto initial_done; 919 920 bus_p = kmem_zalloc(sizeof (pcie_bus_t), KM_SLEEP); 921 922 bus_p->bus_dip = dip; 923 bus_p->bus_bdf = bdf; 924 925 rcdip = pcie_get_rc_dip(dip); 926 ASSERT(rcdip != NULL); 927 928 /* Save the Vendor ID, Device ID and revision ID */ 929 bus_p->bus_dev_ven_id = pci_cfgacc_get32(rcdip, bdf, PCI_CONF_VENID); 930 bus_p->bus_rev_id = pci_cfgacc_get8(rcdip, bdf, PCI_CONF_REVID); 931 /* Save the Header Type */ 932 bus_p->bus_hdr_type = pci_cfgacc_get8(rcdip, bdf, PCI_CONF_HEADER); 933 bus_p->bus_hdr_type &= PCI_HEADER_TYPE_M; 934 935 /* 936 * Figure out the device type and all the relavant capability offsets 937 */ 938 /* set default value */ 939 bus_p->bus_dev_type = PCIE_PCIECAP_DEV_TYPE_PCI_PSEUDO; 940 941 status = pci_cfgacc_get16(rcdip, bdf, PCI_CONF_STAT); 942 if (status == PCI_CAP_EINVAL16 || !(status & PCI_STAT_CAP)) 943 goto caps_done; /* capability not supported */ 944 945 /* Relevant conventional capabilities first */ 946 947 /* Conventional caps: PCI_CAP_ID_PCI_E, PCI_CAP_ID_PCIX */ 948 num_cap = 2; 949 950 switch (bus_p->bus_hdr_type) { 951 case PCI_HEADER_ZERO: 952 baseptr = PCI_CONF_CAP_PTR; 953 break; 954 case PCI_HEADER_PPB: 955 baseptr = PCI_BCNF_CAP_PTR; 956 break; 957 case PCI_HEADER_CARDBUS: 958 baseptr = PCI_CBUS_CAP_PTR; 959 break; 960 default: 961 cmn_err(CE_WARN, "%s: unexpected pci header type:%x", 962 __func__, bus_p->bus_hdr_type); 963 goto caps_done; 964 } 965 966 base = baseptr; 967 for (base = pci_cfgacc_get8(rcdip, bdf, base); base && num_cap; 968 base = pci_cfgacc_get8(rcdip, bdf, base + PCI_CAP_NEXT_PTR)) { 969 capid = pci_cfgacc_get8(rcdip, bdf, base); 970 switch (capid) { 971 case PCI_CAP_ID_PCI_E: 972 bus_p->bus_pcie_off = base; 973 bus_p->bus_dev_type = pci_cfgacc_get16(rcdip, bdf, 974 base + PCIE_PCIECAP) & PCIE_PCIECAP_DEV_TYPE_MASK; 975 976 /* Check and save PCIe hotplug capability information */ 977 if ((PCIE_IS_RP(bus_p) || PCIE_IS_SWD(bus_p)) && 978 (pci_cfgacc_get16(rcdip, bdf, base + PCIE_PCIECAP) 979 & PCIE_PCIECAP_SLOT_IMPL) && 980 (pci_cfgacc_get32(rcdip, bdf, base + PCIE_SLOTCAP) 981 & PCIE_SLOTCAP_HP_CAPABLE)) 982 bus_p->bus_hp_sup_modes |= PCIE_NATIVE_HP_MODE; 983 984 num_cap--; 985 break; 986 case PCI_CAP_ID_PCIX: 987 bus_p->bus_pcix_off = base; 988 if (PCIE_IS_BDG(bus_p)) 989 bus_p->bus_ecc_ver = 990 pci_cfgacc_get16(rcdip, bdf, base + 991 PCI_PCIX_SEC_STATUS) & PCI_PCIX_VER_MASK; 992 else 993 bus_p->bus_ecc_ver = 994 pci_cfgacc_get16(rcdip, bdf, base + 995 PCI_PCIX_COMMAND) & PCI_PCIX_VER_MASK; 996 num_cap--; 997 break; 998 default: 999 break; 1000 } 1001 } 1002 1003 /* Check and save PCI hotplug (SHPC) capability information */ 1004 if (PCIE_IS_BDG(bus_p)) { 1005 base = baseptr; 1006 for (base = pci_cfgacc_get8(rcdip, bdf, base); 1007 base; base = pci_cfgacc_get8(rcdip, bdf, 1008 base + PCI_CAP_NEXT_PTR)) { 1009 capid = pci_cfgacc_get8(rcdip, bdf, base); 1010 if (capid == PCI_CAP_ID_PCI_HOTPLUG) { 1011 bus_p->bus_pci_hp_off = base; 1012 bus_p->bus_hp_sup_modes |= PCIE_PCI_HP_MODE; 1013 break; 1014 } 1015 } 1016 } 1017 1018 /* Then, relevant extended capabilities */ 1019 1020 if (!PCIE_IS_PCIE(bus_p)) 1021 goto caps_done; 1022 1023 /* Extended caps: PCIE_EXT_CAP_ID_AER */ 1024 for (base = PCIE_EXT_CAP; base; base = (capid >> 1025 PCIE_EXT_CAP_NEXT_PTR_SHIFT) & PCIE_EXT_CAP_NEXT_PTR_MASK) { 1026 capid = pci_cfgacc_get32(rcdip, bdf, base); 1027 if (capid == PCI_CAP_EINVAL32) 1028 break; 1029 if (((capid >> PCIE_EXT_CAP_ID_SHIFT) & PCIE_EXT_CAP_ID_MASK) 1030 == PCIE_EXT_CAP_ID_AER) { 1031 bus_p->bus_aer_off = base; 1032 break; 1033 } 1034 } 1035 1036 caps_done: 1037 /* save RP dip and RP bdf */ 1038 if (PCIE_IS_RP(bus_p)) { 1039 bus_p->bus_rp_dip = dip; 1040 bus_p->bus_rp_bdf = bus_p->bus_bdf; 1041 } else { 1042 for (pdip = ddi_get_parent(dip); pdip; 1043 pdip = ddi_get_parent(pdip)) { 1044 pcie_bus_t *parent_bus_p = PCIE_DIP2BUS(pdip); 1045 1046 /* 1047 * If RP dip and RP bdf in parent's bus_t have 1048 * been initialized, simply use these instead of 1049 * continuing up to the RC. 1050 */ 1051 if (parent_bus_p->bus_rp_dip != NULL) { 1052 bus_p->bus_rp_dip = parent_bus_p->bus_rp_dip; 1053 bus_p->bus_rp_bdf = parent_bus_p->bus_rp_bdf; 1054 break; 1055 } 1056 1057 /* 1058 * When debugging be aware that some NVIDIA x86 1059 * architectures have 2 nodes for each RP, One at Bus 1060 * 0x0 and one at Bus 0x80. The requester is from Bus 1061 * 0x80 1062 */ 1063 if (PCIE_IS_ROOT(parent_bus_p)) { 1064 bus_p->bus_rp_dip = pdip; 1065 bus_p->bus_rp_bdf = parent_bus_p->bus_bdf; 1066 break; 1067 } 1068 } 1069 } 1070 1071 bus_p->bus_soft_state = PCI_SOFT_STATE_CLOSED; 1072 bus_p->bus_fm_flags = 0; 1073 bus_p->bus_mps = 0; 1074 1075 ndi_set_bus_private(dip, B_TRUE, DEVI_PORT_TYPE_PCI, (void *)bus_p); 1076 1077 if (PCIE_IS_HOTPLUG_CAPABLE(dip)) 1078 (void) ndi_prop_create_boolean(DDI_DEV_T_NONE, dip, 1079 "hotplug-capable"); 1080 1081 initial_done: 1082 if (!(flags & PCIE_BUS_FINAL)) 1083 goto final_done; 1084 1085 /* already initialized? */ 1086 bus_p = PCIE_DIP2BUS(dip); 1087 1088 /* Save the Range information if device is a switch/bridge */ 1089 if (PCIE_IS_BDG(bus_p)) { 1090 /* get "bus_range" property */ 1091 range_size = sizeof (pci_bus_range_t); 1092 if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1093 "bus-range", (caddr_t)&bus_p->bus_bus_range, &range_size) 1094 != DDI_PROP_SUCCESS) { 1095 errstr = "Cannot find \"bus-range\" property"; 1096 cmn_err(CE_WARN, 1097 "PCIE init err info failed BDF 0x%x:%s\n", 1098 bus_p->bus_bdf, errstr); 1099 } 1100 1101 /* get secondary bus number */ 1102 rcdip = pcie_get_rc_dip(dip); 1103 ASSERT(rcdip != NULL); 1104 1105 bus_p->bus_bdg_secbus = pci_cfgacc_get8(rcdip, 1106 bus_p->bus_bdf, PCI_BCNF_SECBUS); 1107 1108 /* Get "ranges" property */ 1109 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1110 "ranges", (caddr_t)&bus_p->bus_addr_ranges, 1111 &bus_p->bus_addr_entries) != DDI_PROP_SUCCESS) 1112 bus_p->bus_addr_entries = 0; 1113 bus_p->bus_addr_entries /= sizeof (ppb_ranges_t); 1114 } 1115 1116 /* save "assigned-addresses" property array, ignore failues */ 1117 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1118 "assigned-addresses", (caddr_t)&bus_p->bus_assigned_addr, 1119 &bus_p->bus_assigned_entries) == DDI_PROP_SUCCESS) 1120 bus_p->bus_assigned_entries /= sizeof (pci_regspec_t); 1121 else 1122 bus_p->bus_assigned_entries = 0; 1123 1124 pcie_init_pfd(dip); 1125 1126 pcie_init_plat(dip); 1127 1128 final_done: 1129 1130 PCIE_DBG("Add %s(dip 0x%p, bdf 0x%x, secbus 0x%x)\n", 1131 ddi_driver_name(dip), (void *)dip, bus_p->bus_bdf, 1132 bus_p->bus_bdg_secbus); 1133 #ifdef DEBUG 1134 pcie_print_bus(bus_p); 1135 #endif 1136 1137 return (bus_p); 1138 } 1139 1140 /* 1141 * Invoked before destroying devinfo node, mostly during hotplug 1142 * operation to free pcie_bus_t data structure 1143 */ 1144 /* ARGSUSED */ 1145 void 1146 pcie_fini_bus(dev_info_t *dip, uint8_t flags) 1147 { 1148 pcie_bus_t *bus_p = PCIE_DIP2UPBUS(dip); 1149 ASSERT(bus_p); 1150 1151 if (flags & PCIE_BUS_INITIAL) { 1152 pcie_fini_plat(dip); 1153 pcie_fini_pfd(dip); 1154 1155 kmem_free(bus_p->bus_assigned_addr, 1156 (sizeof (pci_regspec_t) * bus_p->bus_assigned_entries)); 1157 kmem_free(bus_p->bus_addr_ranges, 1158 (sizeof (ppb_ranges_t) * bus_p->bus_addr_entries)); 1159 /* zero out the fields that have been destroyed */ 1160 bus_p->bus_assigned_addr = NULL; 1161 bus_p->bus_addr_ranges = NULL; 1162 bus_p->bus_assigned_entries = 0; 1163 bus_p->bus_addr_entries = 0; 1164 } 1165 1166 if (flags & PCIE_BUS_FINAL) { 1167 if (PCIE_IS_HOTPLUG_CAPABLE(dip)) { 1168 (void) ndi_prop_remove(DDI_DEV_T_NONE, dip, 1169 "hotplug-capable"); 1170 } 1171 1172 ndi_set_bus_private(dip, B_TRUE, NULL, NULL); 1173 kmem_free(bus_p, sizeof (pcie_bus_t)); 1174 } 1175 } 1176 1177 int 1178 pcie_postattach_child(dev_info_t *cdip) 1179 { 1180 pcie_bus_t *bus_p = PCIE_DIP2BUS(cdip); 1181 1182 if (!bus_p) 1183 return (DDI_FAILURE); 1184 1185 return (pcie_enable_ce(cdip)); 1186 } 1187 1188 /* 1189 * PCI-Express child device de-initialization. 1190 * This function disables generic pci-express interrupts and error 1191 * handling. 1192 */ 1193 void 1194 pcie_uninitchild(dev_info_t *cdip) 1195 { 1196 pcie_disable_errors(cdip); 1197 pcie_fini_cfghdl(cdip); 1198 } 1199 1200 /* 1201 * find the root complex dip 1202 */ 1203 dev_info_t * 1204 pcie_get_rc_dip(dev_info_t *dip) 1205 { 1206 dev_info_t *rcdip; 1207 pcie_bus_t *rc_bus_p; 1208 1209 for (rcdip = ddi_get_parent(dip); rcdip; 1210 rcdip = ddi_get_parent(rcdip)) { 1211 rc_bus_p = PCIE_DIP2BUS(rcdip); 1212 if (rc_bus_p && PCIE_IS_RC(rc_bus_p)) 1213 break; 1214 } 1215 1216 return (rcdip); 1217 } 1218 1219 static boolean_t 1220 pcie_is_pci_device(dev_info_t *dip) 1221 { 1222 dev_info_t *pdip; 1223 char *device_type; 1224 1225 pdip = ddi_get_parent(dip); 1226 ASSERT(pdip); 1227 1228 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip, DDI_PROP_DONTPASS, 1229 "device_type", &device_type) != DDI_PROP_SUCCESS) 1230 return (B_FALSE); 1231 1232 if (strcmp(device_type, "pciex") != 0 && 1233 strcmp(device_type, "pci") != 0) { 1234 ddi_prop_free(device_type); 1235 return (B_FALSE); 1236 } 1237 1238 ddi_prop_free(device_type); 1239 return (B_TRUE); 1240 } 1241 1242 typedef struct { 1243 boolean_t init; 1244 uint8_t flags; 1245 } pcie_bus_arg_t; 1246 1247 /*ARGSUSED*/ 1248 static int 1249 pcie_fab_do_init_fini(dev_info_t *dip, void *arg) 1250 { 1251 pcie_req_id_t bdf; 1252 pcie_bus_arg_t *bus_arg = (pcie_bus_arg_t *)arg; 1253 1254 if (!pcie_is_pci_device(dip)) 1255 goto out; 1256 1257 if (bus_arg->init) { 1258 if (pcie_get_bdf_from_dip(dip, &bdf) != DDI_SUCCESS) 1259 goto out; 1260 1261 (void) pcie_init_bus(dip, bdf, bus_arg->flags); 1262 } else { 1263 (void) pcie_fini_bus(dip, bus_arg->flags); 1264 } 1265 1266 return (DDI_WALK_CONTINUE); 1267 1268 out: 1269 return (DDI_WALK_PRUNECHILD); 1270 } 1271 1272 void 1273 pcie_fab_init_bus(dev_info_t *rcdip, uint8_t flags) 1274 { 1275 int circular_count; 1276 dev_info_t *dip = ddi_get_child(rcdip); 1277 pcie_bus_arg_t arg; 1278 1279 arg.init = B_TRUE; 1280 arg.flags = flags; 1281 1282 ndi_devi_enter(rcdip, &circular_count); 1283 ddi_walk_devs(dip, pcie_fab_do_init_fini, &arg); 1284 ndi_devi_exit(rcdip, circular_count); 1285 } 1286 1287 void 1288 pcie_fab_fini_bus(dev_info_t *rcdip, uint8_t flags) 1289 { 1290 int circular_count; 1291 dev_info_t *dip = ddi_get_child(rcdip); 1292 pcie_bus_arg_t arg; 1293 1294 arg.init = B_FALSE; 1295 arg.flags = flags; 1296 1297 ndi_devi_enter(rcdip, &circular_count); 1298 ddi_walk_devs(dip, pcie_fab_do_init_fini, &arg); 1299 ndi_devi_exit(rcdip, circular_count); 1300 } 1301 1302 void 1303 pcie_enable_errors(dev_info_t *dip) 1304 { 1305 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 1306 uint16_t reg16, tmp16; 1307 uint32_t reg32, tmp32; 1308 1309 ASSERT(bus_p); 1310 1311 /* 1312 * Clear any pending errors 1313 */ 1314 pcie_clear_errors(dip); 1315 1316 if (!PCIE_IS_PCIE(bus_p)) 1317 return; 1318 1319 /* 1320 * Enable Baseline Error Handling but leave CE reporting off (poweron 1321 * default). 1322 */ 1323 if ((reg16 = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL)) != 1324 PCI_CAP_EINVAL16) { 1325 tmp16 = (reg16 & (PCIE_DEVCTL_MAX_READ_REQ_MASK | 1326 PCIE_DEVCTL_MAX_PAYLOAD_MASK)) | 1327 (pcie_devctl_default & ~(PCIE_DEVCTL_MAX_READ_REQ_MASK | 1328 PCIE_DEVCTL_MAX_PAYLOAD_MASK)) | 1329 (pcie_base_err_default & (~PCIE_DEVCTL_CE_REPORTING_EN)); 1330 1331 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, tmp16); 1332 PCIE_DBG_CAP(dip, bus_p, "DEVCTL", 16, PCIE_DEVCTL, reg16); 1333 } 1334 1335 /* Enable Root Port Baseline Error Receiving */ 1336 if (PCIE_IS_ROOT(bus_p) && 1337 (reg16 = PCIE_CAP_GET(16, bus_p, PCIE_ROOTCTL)) != 1338 PCI_CAP_EINVAL16) { 1339 1340 tmp16 = pcie_serr_disable_flag ? 1341 (pcie_root_ctrl_default & ~PCIE_ROOT_SYS_ERR) : 1342 pcie_root_ctrl_default; 1343 PCIE_CAP_PUT(16, bus_p, PCIE_ROOTCTL, tmp16); 1344 PCIE_DBG_CAP(dip, bus_p, "ROOT DEVCTL", 16, PCIE_ROOTCTL, 1345 reg16); 1346 } 1347 1348 /* 1349 * Enable PCI-Express Advanced Error Handling if Exists 1350 */ 1351 if (!PCIE_HAS_AER(bus_p)) 1352 return; 1353 1354 /* Set Uncorrectable Severity */ 1355 if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_UCE_SERV)) != 1356 PCI_CAP_EINVAL32) { 1357 tmp32 = pcie_aer_uce_severity; 1358 1359 PCIE_AER_PUT(32, bus_p, PCIE_AER_UCE_SERV, tmp32); 1360 PCIE_DBG_AER(dip, bus_p, "AER UCE SEV", 32, PCIE_AER_UCE_SERV, 1361 reg32); 1362 } 1363 1364 /* Enable Uncorrectable errors */ 1365 if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_UCE_MASK)) != 1366 PCI_CAP_EINVAL32) { 1367 tmp32 = pcie_aer_uce_mask; 1368 1369 PCIE_AER_PUT(32, bus_p, PCIE_AER_UCE_MASK, tmp32); 1370 PCIE_DBG_AER(dip, bus_p, "AER UCE MASK", 32, PCIE_AER_UCE_MASK, 1371 reg32); 1372 } 1373 1374 /* Enable ECRC generation and checking */ 1375 if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_CTL)) != 1376 PCI_CAP_EINVAL32) { 1377 tmp32 = reg32 | pcie_ecrc_value; 1378 PCIE_AER_PUT(32, bus_p, PCIE_AER_CTL, tmp32); 1379 PCIE_DBG_AER(dip, bus_p, "AER CTL", 32, PCIE_AER_CTL, reg32); 1380 } 1381 1382 /* Enable Secondary Uncorrectable errors if this is a bridge */ 1383 if (!PCIE_IS_PCIE_BDG(bus_p)) 1384 goto root; 1385 1386 /* Set Uncorrectable Severity */ 1387 if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_SUCE_SERV)) != 1388 PCI_CAP_EINVAL32) { 1389 tmp32 = pcie_aer_suce_severity; 1390 1391 PCIE_AER_PUT(32, bus_p, PCIE_AER_SUCE_SERV, tmp32); 1392 PCIE_DBG_AER(dip, bus_p, "AER SUCE SEV", 32, PCIE_AER_SUCE_SERV, 1393 reg32); 1394 } 1395 1396 if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_SUCE_MASK)) != 1397 PCI_CAP_EINVAL32) { 1398 PCIE_AER_PUT(32, bus_p, PCIE_AER_SUCE_MASK, pcie_aer_suce_mask); 1399 PCIE_DBG_AER(dip, bus_p, "AER SUCE MASK", 32, 1400 PCIE_AER_SUCE_MASK, reg32); 1401 } 1402 1403 root: 1404 /* 1405 * Enable Root Control this is a Root device 1406 */ 1407 if (!PCIE_IS_ROOT(bus_p)) 1408 return; 1409 1410 if ((reg16 = PCIE_AER_GET(16, bus_p, PCIE_AER_RE_CMD)) != 1411 PCI_CAP_EINVAL16) { 1412 PCIE_AER_PUT(16, bus_p, PCIE_AER_RE_CMD, 1413 pcie_root_error_cmd_default); 1414 PCIE_DBG_AER(dip, bus_p, "AER Root Err Cmd", 16, 1415 PCIE_AER_RE_CMD, reg16); 1416 } 1417 } 1418 1419 /* 1420 * This function is used for enabling CE reporting and setting the AER CE mask. 1421 * When called from outside the pcie module it should always be preceded by 1422 * a call to pcie_enable_errors. 1423 */ 1424 int 1425 pcie_enable_ce(dev_info_t *dip) 1426 { 1427 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 1428 uint16_t device_sts, device_ctl; 1429 uint32_t tmp_pcie_aer_ce_mask; 1430 1431 if (!PCIE_IS_PCIE(bus_p)) 1432 return (DDI_SUCCESS); 1433 1434 /* 1435 * The "pcie_ce_mask" property is used to control both the CE reporting 1436 * enable field in the device control register and the AER CE mask. We 1437 * leave CE reporting disabled if pcie_ce_mask is set to -1. 1438 */ 1439 1440 tmp_pcie_aer_ce_mask = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, dip, 1441 DDI_PROP_DONTPASS, "pcie_ce_mask", pcie_aer_ce_mask); 1442 1443 if (tmp_pcie_aer_ce_mask == (uint32_t)-1) { 1444 /* 1445 * Nothing to do since CE reporting has already been disabled. 1446 */ 1447 return (DDI_SUCCESS); 1448 } 1449 1450 if (PCIE_HAS_AER(bus_p)) { 1451 /* Enable AER CE */ 1452 PCIE_AER_PUT(32, bus_p, PCIE_AER_CE_MASK, tmp_pcie_aer_ce_mask); 1453 PCIE_DBG_AER(dip, bus_p, "AER CE MASK", 32, PCIE_AER_CE_MASK, 1454 0); 1455 1456 /* Clear any pending AER CE errors */ 1457 PCIE_AER_PUT(32, bus_p, PCIE_AER_CE_STS, -1); 1458 } 1459 1460 /* clear any pending CE errors */ 1461 if ((device_sts = PCIE_CAP_GET(16, bus_p, PCIE_DEVSTS)) != 1462 PCI_CAP_EINVAL16) 1463 PCIE_CAP_PUT(16, bus_p, PCIE_DEVSTS, 1464 device_sts & (~PCIE_DEVSTS_CE_DETECTED)); 1465 1466 /* Enable CE reporting */ 1467 device_ctl = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL); 1468 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, 1469 (device_ctl & (~PCIE_DEVCTL_ERR_MASK)) | pcie_base_err_default); 1470 PCIE_DBG_CAP(dip, bus_p, "DEVCTL", 16, PCIE_DEVCTL, device_ctl); 1471 1472 return (DDI_SUCCESS); 1473 } 1474 1475 /* ARGSUSED */ 1476 void 1477 pcie_disable_errors(dev_info_t *dip) 1478 { 1479 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 1480 uint16_t device_ctl; 1481 uint32_t aer_reg; 1482 1483 if (!PCIE_IS_PCIE(bus_p)) 1484 return; 1485 1486 /* 1487 * Disable PCI-Express Baseline Error Handling 1488 */ 1489 device_ctl = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL); 1490 device_ctl &= ~PCIE_DEVCTL_ERR_MASK; 1491 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, device_ctl); 1492 1493 /* 1494 * Disable PCI-Express Advanced Error Handling if Exists 1495 */ 1496 if (!PCIE_HAS_AER(bus_p)) 1497 goto root; 1498 1499 /* Disable Uncorrectable errors */ 1500 PCIE_AER_PUT(32, bus_p, PCIE_AER_UCE_MASK, PCIE_AER_UCE_BITS); 1501 1502 /* Disable Correctable errors */ 1503 PCIE_AER_PUT(32, bus_p, PCIE_AER_CE_MASK, PCIE_AER_CE_BITS); 1504 1505 /* Disable ECRC generation and checking */ 1506 if ((aer_reg = PCIE_AER_GET(32, bus_p, PCIE_AER_CTL)) != 1507 PCI_CAP_EINVAL32) { 1508 aer_reg &= ~(PCIE_AER_CTL_ECRC_GEN_ENA | 1509 PCIE_AER_CTL_ECRC_CHECK_ENA); 1510 1511 PCIE_AER_PUT(32, bus_p, PCIE_AER_CTL, aer_reg); 1512 } 1513 /* 1514 * Disable Secondary Uncorrectable errors if this is a bridge 1515 */ 1516 if (!PCIE_IS_PCIE_BDG(bus_p)) 1517 goto root; 1518 1519 PCIE_AER_PUT(32, bus_p, PCIE_AER_SUCE_MASK, PCIE_AER_SUCE_BITS); 1520 1521 root: 1522 /* 1523 * disable Root Control this is a Root device 1524 */ 1525 if (!PCIE_IS_ROOT(bus_p)) 1526 return; 1527 1528 if (!pcie_serr_disable_flag) { 1529 device_ctl = PCIE_CAP_GET(16, bus_p, PCIE_ROOTCTL); 1530 device_ctl &= ~PCIE_ROOT_SYS_ERR; 1531 PCIE_CAP_PUT(16, bus_p, PCIE_ROOTCTL, device_ctl); 1532 } 1533 1534 if (!PCIE_HAS_AER(bus_p)) 1535 return; 1536 1537 if ((device_ctl = PCIE_CAP_GET(16, bus_p, PCIE_AER_RE_CMD)) != 1538 PCI_CAP_EINVAL16) { 1539 device_ctl &= ~pcie_root_error_cmd_default; 1540 PCIE_CAP_PUT(16, bus_p, PCIE_AER_RE_CMD, device_ctl); 1541 } 1542 } 1543 1544 /* 1545 * Extract bdf from "reg" property. 1546 */ 1547 int 1548 pcie_get_bdf_from_dip(dev_info_t *dip, pcie_req_id_t *bdf) 1549 { 1550 pci_regspec_t *regspec; 1551 int reglen; 1552 1553 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1554 "reg", (int **)®spec, (uint_t *)®len) != DDI_SUCCESS) 1555 return (DDI_FAILURE); 1556 1557 if (reglen < (sizeof (pci_regspec_t) / sizeof (int))) { 1558 ddi_prop_free(regspec); 1559 return (DDI_FAILURE); 1560 } 1561 1562 /* Get phys_hi from first element. All have same bdf. */ 1563 *bdf = (regspec->pci_phys_hi & (PCI_REG_BDFR_M ^ PCI_REG_REG_M)) >> 8; 1564 1565 ddi_prop_free(regspec); 1566 return (DDI_SUCCESS); 1567 } 1568 1569 dev_info_t * 1570 pcie_get_my_childs_dip(dev_info_t *dip, dev_info_t *rdip) 1571 { 1572 dev_info_t *cdip = rdip; 1573 1574 for (; ddi_get_parent(cdip) != dip; cdip = ddi_get_parent(cdip)) 1575 ; 1576 1577 return (cdip); 1578 } 1579 1580 uint32_t 1581 pcie_get_bdf_for_dma_xfer(dev_info_t *dip, dev_info_t *rdip) 1582 { 1583 dev_info_t *cdip; 1584 1585 /* 1586 * As part of the probing, the PCI fcode interpreter may setup a DMA 1587 * request if a given card has a fcode on it using dip and rdip of the 1588 * hotplug connector i.e, dip and rdip of px/pcieb driver. In this 1589 * case, return a invalid value for the bdf since we cannot get to the 1590 * bdf value of the actual device which will be initiating this DMA. 1591 */ 1592 if (rdip == dip) 1593 return (PCIE_INVALID_BDF); 1594 1595 cdip = pcie_get_my_childs_dip(dip, rdip); 1596 1597 /* 1598 * For a given rdip, return the bdf value of dip's (px or pcieb) 1599 * immediate child or secondary bus-id if dip is a PCIe2PCI bridge. 1600 * 1601 * XXX - For now, return a invalid bdf value for all PCI and PCI-X 1602 * devices since this needs more work. 1603 */ 1604 return (PCI_GET_PCIE2PCI_SECBUS(cdip) ? 1605 PCIE_INVALID_BDF : PCI_GET_BDF(cdip)); 1606 } 1607 1608 uint32_t 1609 pcie_get_aer_uce_mask() { 1610 return (pcie_aer_uce_mask); 1611 } 1612 uint32_t 1613 pcie_get_aer_ce_mask() { 1614 return (pcie_aer_ce_mask); 1615 } 1616 uint32_t 1617 pcie_get_aer_suce_mask() { 1618 return (pcie_aer_suce_mask); 1619 } 1620 uint32_t 1621 pcie_get_serr_mask() { 1622 return (pcie_serr_disable_flag); 1623 } 1624 1625 void 1626 pcie_set_aer_uce_mask(uint32_t mask) { 1627 pcie_aer_uce_mask = mask; 1628 if (mask & PCIE_AER_UCE_UR) 1629 pcie_base_err_default &= ~PCIE_DEVCTL_UR_REPORTING_EN; 1630 else 1631 pcie_base_err_default |= PCIE_DEVCTL_UR_REPORTING_EN; 1632 1633 if (mask & PCIE_AER_UCE_ECRC) 1634 pcie_ecrc_value = 0; 1635 } 1636 1637 void 1638 pcie_set_aer_ce_mask(uint32_t mask) { 1639 pcie_aer_ce_mask = mask; 1640 } 1641 void 1642 pcie_set_aer_suce_mask(uint32_t mask) { 1643 pcie_aer_suce_mask = mask; 1644 } 1645 void 1646 pcie_set_serr_mask(uint32_t mask) { 1647 pcie_serr_disable_flag = mask; 1648 } 1649 1650 /* 1651 * Is the rdip a child of dip. Used for checking certain CTLOPS from bubbling 1652 * up erronously. Ex. ISA ctlops to a PCI-PCI Bridge. 1653 */ 1654 boolean_t 1655 pcie_is_child(dev_info_t *dip, dev_info_t *rdip) 1656 { 1657 dev_info_t *cdip = ddi_get_child(dip); 1658 for (; cdip; cdip = ddi_get_next_sibling(cdip)) 1659 if (cdip == rdip) 1660 break; 1661 return (cdip != NULL); 1662 } 1663 1664 boolean_t 1665 pcie_is_link_disabled(dev_info_t *dip) 1666 { 1667 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 1668 1669 if (PCIE_IS_PCIE(bus_p)) { 1670 if (PCIE_CAP_GET(16, bus_p, PCIE_LINKCTL) & 1671 PCIE_LINKCTL_LINK_DISABLE) 1672 return (B_TRUE); 1673 } 1674 return (B_FALSE); 1675 } 1676 1677 /* 1678 * Initialize the MPS for a root port. 1679 * 1680 * dip - dip of root port device. 1681 */ 1682 void 1683 pcie_init_root_port_mps(dev_info_t *dip) 1684 { 1685 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 1686 int rp_cap, max_supported = pcie_max_mps; 1687 1688 (void) pcie_get_fabric_mps(ddi_get_parent(dip), 1689 ddi_get_child(dip), &max_supported); 1690 1691 rp_cap = PCI_CAP_GET16(bus_p->bus_cfg_hdl, NULL, 1692 bus_p->bus_pcie_off, PCIE_DEVCAP) & 1693 PCIE_DEVCAP_MAX_PAYLOAD_MASK; 1694 1695 if (rp_cap < max_supported) 1696 max_supported = rp_cap; 1697 1698 bus_p->bus_mps = max_supported; 1699 (void) pcie_initchild_mps(dip); 1700 } 1701 1702 /* 1703 * Initialize the Maximum Payload Size of a device. 1704 * 1705 * cdip - dip of device. 1706 * 1707 * returns - DDI_SUCCESS or DDI_FAILURE 1708 */ 1709 int 1710 pcie_initchild_mps(dev_info_t *cdip) 1711 { 1712 int max_payload_size; 1713 pcie_bus_t *bus_p; 1714 dev_info_t *pdip = ddi_get_parent(cdip); 1715 uint8_t dev_type; 1716 1717 bus_p = PCIE_DIP2BUS(cdip); 1718 if (bus_p == NULL) { 1719 PCIE_DBG("%s: BUS not found.\n", 1720 ddi_driver_name(cdip)); 1721 return (DDI_FAILURE); 1722 } 1723 1724 dev_type = bus_p->bus_dev_type; 1725 1726 /* 1727 * For ARI Devices, only function zero's MPS needs to be set. 1728 */ 1729 if ((dev_type == PCIE_PCIECAP_DEV_TYPE_PCIE_DEV) && 1730 (pcie_ari_is_enabled(pdip) == PCIE_ARI_FORW_ENABLED)) { 1731 pcie_req_id_t child_bdf; 1732 1733 if (pcie_get_bdf_from_dip(cdip, &child_bdf) == DDI_FAILURE) 1734 return (DDI_FAILURE); 1735 if ((child_bdf & PCIE_REQ_ID_ARI_FUNC_MASK) != 0) 1736 return (DDI_SUCCESS); 1737 } 1738 1739 if (PCIE_IS_RP(bus_p)) { 1740 /* 1741 * If this device is a root port, then the mps scan 1742 * saved the mps in the root ports bus_p. 1743 */ 1744 max_payload_size = bus_p->bus_mps; 1745 } else { 1746 /* 1747 * If the device is not a root port, then the mps of 1748 * its parent should be used. 1749 */ 1750 pcie_bus_t *parent_bus_p = PCIE_DIP2BUS(pdip); 1751 max_payload_size = parent_bus_p->bus_mps; 1752 } 1753 1754 if (PCIE_IS_PCIE(bus_p) && (max_payload_size >= 0)) { 1755 pcie_bus_t *rootp_bus_p = PCIE_DIP2BUS(bus_p->bus_rp_dip); 1756 uint16_t mask, dev_ctrl = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL), 1757 mps = PCIE_CAP_GET(16, bus_p, PCIE_DEVCAP) & 1758 PCIE_DEVCAP_MAX_PAYLOAD_MASK; 1759 1760 mps = MIN(mps, (uint16_t)max_payload_size); 1761 1762 /* 1763 * If the MPS to be set is less than the root ports 1764 * MPS, then MRRS will have to be set the same as MPS. 1765 */ 1766 mask = ((mps < rootp_bus_p->bus_mps) ? 1767 PCIE_DEVCTL_MAX_READ_REQ_MASK : 0) | 1768 PCIE_DEVCTL_MAX_PAYLOAD_MASK; 1769 1770 dev_ctrl &= ~mask; 1771 mask = ((mps < rootp_bus_p->bus_mps) 1772 ? mps << PCIE_DEVCTL_MAX_READ_REQ_SHIFT : 0) 1773 | (mps << PCIE_DEVCTL_MAX_PAYLOAD_SHIFT); 1774 1775 dev_ctrl |= mask; 1776 1777 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, dev_ctrl); 1778 1779 bus_p->bus_mps = mps; 1780 } 1781 1782 return (DDI_SUCCESS); 1783 } 1784 1785 /* 1786 * Scans a device tree/branch for a maximum payload size capabilities. 1787 * 1788 * rc_dip - dip of Root Complex. 1789 * dip - dip of device where scan will begin. 1790 * max_supported (IN) - maximum allowable MPS. 1791 * max_supported (OUT) - maximum payload size capability of fabric. 1792 */ 1793 void 1794 pcie_get_fabric_mps(dev_info_t *rc_dip, dev_info_t *dip, int *max_supported) 1795 { 1796 if (dip == NULL) 1797 return; 1798 1799 /* 1800 * Perform a fabric scan to obtain Maximum Payload Capabilities 1801 */ 1802 (void) pcie_scan_mps(rc_dip, dip, max_supported); 1803 1804 PCIE_DBG("MPS: Highest Common MPS= %x\n", max_supported); 1805 } 1806 1807 /* 1808 * Scans fabric and determines Maximum Payload Size based on 1809 * highest common denominator alogorithm 1810 */ 1811 static void 1812 pcie_scan_mps(dev_info_t *rc_dip, dev_info_t *dip, int *max_supported) 1813 { 1814 int circular_count; 1815 pcie_max_supported_t max_pay_load_supported; 1816 1817 max_pay_load_supported.dip = rc_dip; 1818 max_pay_load_supported.highest_common_mps = *max_supported; 1819 1820 ndi_devi_enter(ddi_get_parent(dip), &circular_count); 1821 ddi_walk_devs(dip, pcie_get_max_supported, 1822 (void *)&max_pay_load_supported); 1823 ndi_devi_exit(ddi_get_parent(dip), circular_count); 1824 1825 *max_supported = max_pay_load_supported.highest_common_mps; 1826 } 1827 1828 /* 1829 * Called as part of the Maximum Payload Size scan. 1830 */ 1831 static int 1832 pcie_get_max_supported(dev_info_t *dip, void *arg) 1833 { 1834 uint32_t max_supported; 1835 uint16_t cap_ptr; 1836 pcie_max_supported_t *current = (pcie_max_supported_t *)arg; 1837 pci_regspec_t *reg; 1838 int rlen; 1839 caddr_t virt; 1840 ddi_acc_handle_t config_handle; 1841 1842 if (ddi_get_child(current->dip) == NULL) { 1843 goto fail1; 1844 } 1845 1846 if (pcie_dev(dip) == DDI_FAILURE) { 1847 PCIE_DBG("MPS: pcie_get_max_supported: %s: " 1848 "Not a PCIe dev\n", ddi_driver_name(dip)); 1849 goto fail1; 1850 } 1851 1852 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "reg", 1853 (caddr_t)®, &rlen) != DDI_PROP_SUCCESS) { 1854 PCIE_DBG("MPS: pcie_get_max_supported: %s: " 1855 "Can not read reg\n", ddi_driver_name(dip)); 1856 goto fail1; 1857 } 1858 1859 if (pcie_map_phys(ddi_get_child(current->dip), reg, &virt, 1860 &config_handle) != DDI_SUCCESS) { 1861 PCIE_DBG("MPS: pcie_get_max_supported: %s: pcie_map_phys " 1862 "failed\n", ddi_driver_name(dip)); 1863 goto fail2; 1864 } 1865 1866 if ((PCI_CAP_LOCATE(config_handle, PCI_CAP_ID_PCI_E, &cap_ptr)) == 1867 DDI_FAILURE) { 1868 goto fail3; 1869 } 1870 1871 max_supported = PCI_CAP_GET16(config_handle, NULL, cap_ptr, 1872 PCIE_DEVCAP) & PCIE_DEVCAP_MAX_PAYLOAD_MASK; 1873 1874 PCIE_DBG("PCIE MPS: %s: MPS Capabilities %x\n", ddi_driver_name(dip), 1875 max_supported); 1876 1877 if (max_supported < current->highest_common_mps) 1878 current->highest_common_mps = max_supported; 1879 1880 fail3: 1881 pcie_unmap_phys(&config_handle, reg); 1882 fail2: 1883 kmem_free(reg, rlen); 1884 fail1: 1885 return (DDI_WALK_CONTINUE); 1886 } 1887 1888 /* 1889 * Determines if there are any root ports attached to a root complex. 1890 * 1891 * dip - dip of root complex 1892 * 1893 * Returns - DDI_SUCCESS if there is at least one root port otherwise 1894 * DDI_FAILURE. 1895 */ 1896 int 1897 pcie_root_port(dev_info_t *dip) 1898 { 1899 int port_type; 1900 uint16_t cap_ptr; 1901 ddi_acc_handle_t config_handle; 1902 dev_info_t *cdip = ddi_get_child(dip); 1903 1904 /* 1905 * Determine if any of the children of the passed in dip 1906 * are root ports. 1907 */ 1908 for (; cdip; cdip = ddi_get_next_sibling(cdip)) { 1909 1910 if (pci_config_setup(cdip, &config_handle) != DDI_SUCCESS) 1911 continue; 1912 1913 if ((PCI_CAP_LOCATE(config_handle, PCI_CAP_ID_PCI_E, 1914 &cap_ptr)) == DDI_FAILURE) { 1915 pci_config_teardown(&config_handle); 1916 continue; 1917 } 1918 1919 port_type = PCI_CAP_GET16(config_handle, NULL, cap_ptr, 1920 PCIE_PCIECAP) & PCIE_PCIECAP_DEV_TYPE_MASK; 1921 1922 pci_config_teardown(&config_handle); 1923 1924 if (port_type == PCIE_PCIECAP_DEV_TYPE_ROOT) 1925 return (DDI_SUCCESS); 1926 } 1927 1928 /* No root ports were found */ 1929 1930 return (DDI_FAILURE); 1931 } 1932 1933 /* 1934 * Function that determines if a device a PCIe device. 1935 * 1936 * dip - dip of device. 1937 * 1938 * returns - DDI_SUCCESS if device is a PCIe device, otherwise DDI_FAILURE. 1939 */ 1940 int 1941 pcie_dev(dev_info_t *dip) 1942 { 1943 /* get parent device's device_type property */ 1944 char *device_type; 1945 int rc = DDI_FAILURE; 1946 dev_info_t *pdip = ddi_get_parent(dip); 1947 1948 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip, 1949 DDI_PROP_DONTPASS, "device_type", &device_type) 1950 != DDI_PROP_SUCCESS) { 1951 return (DDI_FAILURE); 1952 } 1953 1954 if (strcmp(device_type, "pciex") == 0) 1955 rc = DDI_SUCCESS; 1956 else 1957 rc = DDI_FAILURE; 1958 1959 ddi_prop_free(device_type); 1960 return (rc); 1961 } 1962 1963 /* 1964 * Function to map in a device's memory space. 1965 */ 1966 static int 1967 pcie_map_phys(dev_info_t *dip, pci_regspec_t *phys_spec, 1968 caddr_t *addrp, ddi_acc_handle_t *handlep) 1969 { 1970 ddi_map_req_t mr; 1971 ddi_acc_hdl_t *hp; 1972 int result; 1973 ddi_device_acc_attr_t attr; 1974 1975 attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 1976 attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 1977 attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 1978 attr.devacc_attr_access = DDI_CAUTIOUS_ACC; 1979 1980 *handlep = impl_acc_hdl_alloc(KM_SLEEP, NULL); 1981 hp = impl_acc_hdl_get(*handlep); 1982 hp->ah_vers = VERS_ACCHDL; 1983 hp->ah_dip = dip; 1984 hp->ah_rnumber = 0; 1985 hp->ah_offset = 0; 1986 hp->ah_len = 0; 1987 hp->ah_acc = attr; 1988 1989 mr.map_op = DDI_MO_MAP_LOCKED; 1990 mr.map_type = DDI_MT_REGSPEC; 1991 mr.map_obj.rp = (struct regspec *)phys_spec; 1992 mr.map_prot = PROT_READ | PROT_WRITE; 1993 mr.map_flags = DDI_MF_KERNEL_MAPPING; 1994 mr.map_handlep = hp; 1995 mr.map_vers = DDI_MAP_VERSION; 1996 1997 result = ddi_map(dip, &mr, 0, 0, addrp); 1998 1999 if (result != DDI_SUCCESS) { 2000 impl_acc_hdl_free(*handlep); 2001 *handlep = (ddi_acc_handle_t)NULL; 2002 } else { 2003 hp->ah_addr = *addrp; 2004 } 2005 2006 return (result); 2007 } 2008 2009 /* 2010 * Map out memory that was mapped in with pcie_map_phys(); 2011 */ 2012 static void 2013 pcie_unmap_phys(ddi_acc_handle_t *handlep, pci_regspec_t *ph) 2014 { 2015 ddi_map_req_t mr; 2016 ddi_acc_hdl_t *hp; 2017 2018 hp = impl_acc_hdl_get(*handlep); 2019 ASSERT(hp); 2020 2021 mr.map_op = DDI_MO_UNMAP; 2022 mr.map_type = DDI_MT_REGSPEC; 2023 mr.map_obj.rp = (struct regspec *)ph; 2024 mr.map_prot = PROT_READ | PROT_WRITE; 2025 mr.map_flags = DDI_MF_KERNEL_MAPPING; 2026 mr.map_handlep = hp; 2027 mr.map_vers = DDI_MAP_VERSION; 2028 2029 (void) ddi_map(hp->ah_dip, &mr, hp->ah_offset, 2030 hp->ah_len, &hp->ah_addr); 2031 2032 impl_acc_hdl_free(*handlep); 2033 *handlep = (ddi_acc_handle_t)NULL; 2034 } 2035 2036 void 2037 pcie_set_rber_fatal(dev_info_t *dip, boolean_t val) 2038 { 2039 pcie_bus_t *bus_p = PCIE_DIP2UPBUS(dip); 2040 bus_p->bus_pfd->pe_rber_fatal = val; 2041 } 2042 2043 /* 2044 * Return parent Root Port's pe_rber_fatal value. 2045 */ 2046 boolean_t 2047 pcie_get_rber_fatal(dev_info_t *dip) 2048 { 2049 pcie_bus_t *bus_p = PCIE_DIP2UPBUS(dip); 2050 pcie_bus_t *rp_bus_p = PCIE_DIP2UPBUS(bus_p->bus_rp_dip); 2051 return (rp_bus_p->bus_pfd->pe_rber_fatal); 2052 } 2053 2054 int 2055 pcie_ari_supported(dev_info_t *dip) 2056 { 2057 uint32_t devcap2; 2058 uint16_t pciecap; 2059 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 2060 uint8_t dev_type; 2061 2062 PCIE_DBG("pcie_ari_supported: dip=%p\n", dip); 2063 2064 if (bus_p == NULL) 2065 return (PCIE_ARI_FORW_NOT_SUPPORTED); 2066 2067 dev_type = bus_p->bus_dev_type; 2068 2069 if ((dev_type != PCIE_PCIECAP_DEV_TYPE_DOWN) && 2070 (dev_type != PCIE_PCIECAP_DEV_TYPE_ROOT)) 2071 return (PCIE_ARI_FORW_NOT_SUPPORTED); 2072 2073 if (pcie_disable_ari) { 2074 PCIE_DBG("pcie_ari_supported: dip=%p: ARI Disabled\n", dip); 2075 return (PCIE_ARI_FORW_NOT_SUPPORTED); 2076 } 2077 2078 pciecap = PCIE_CAP_GET(16, bus_p, PCIE_PCIECAP); 2079 2080 if ((pciecap & PCIE_PCIECAP_VER_MASK) < PCIE_PCIECAP_VER_2_0) { 2081 PCIE_DBG("pcie_ari_supported: dip=%p: Not 2.0\n", dip); 2082 return (PCIE_ARI_FORW_NOT_SUPPORTED); 2083 } 2084 2085 devcap2 = PCIE_CAP_GET(32, bus_p, PCIE_DEVCAP2); 2086 2087 PCIE_DBG("pcie_ari_supported: dip=%p: DevCap2=0x%x\n", 2088 dip, devcap2); 2089 2090 if (devcap2 & PCIE_DEVCAP2_ARI_FORWARD) { 2091 PCIE_DBG("pcie_ari_supported: " 2092 "dip=%p: ARI Forwarding is supported\n", dip); 2093 return (PCIE_ARI_FORW_SUPPORTED); 2094 } 2095 return (PCIE_ARI_FORW_NOT_SUPPORTED); 2096 } 2097 2098 int 2099 pcie_ari_enable(dev_info_t *dip) 2100 { 2101 uint16_t devctl2; 2102 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 2103 2104 PCIE_DBG("pcie_ari_enable: dip=%p\n", dip); 2105 2106 if (pcie_ari_supported(dip) == PCIE_ARI_FORW_NOT_SUPPORTED) 2107 return (DDI_FAILURE); 2108 2109 devctl2 = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL2); 2110 devctl2 |= PCIE_DEVCTL2_ARI_FORWARD_EN; 2111 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL2, devctl2); 2112 2113 PCIE_DBG("pcie_ari_enable: dip=%p: writing 0x%x to DevCtl2\n", 2114 dip, devctl2); 2115 2116 return (DDI_SUCCESS); 2117 } 2118 2119 int 2120 pcie_ari_disable(dev_info_t *dip) 2121 { 2122 uint16_t devctl2; 2123 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 2124 2125 PCIE_DBG("pcie_ari_disable: dip=%p\n", dip); 2126 2127 if (pcie_ari_supported(dip) == PCIE_ARI_FORW_NOT_SUPPORTED) 2128 return (DDI_FAILURE); 2129 2130 devctl2 = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL2); 2131 devctl2 &= ~PCIE_DEVCTL2_ARI_FORWARD_EN; 2132 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL2, devctl2); 2133 2134 PCIE_DBG("pcie_ari_disable: dip=%p: writing 0x%x to DevCtl2\n", 2135 dip, devctl2); 2136 2137 return (DDI_SUCCESS); 2138 } 2139 2140 int 2141 pcie_ari_is_enabled(dev_info_t *dip) 2142 { 2143 uint16_t devctl2; 2144 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 2145 2146 PCIE_DBG("pcie_ari_is_enabled: dip=%p\n", dip); 2147 2148 if (pcie_ari_supported(dip) == PCIE_ARI_FORW_NOT_SUPPORTED) 2149 return (PCIE_ARI_FORW_DISABLED); 2150 2151 devctl2 = PCIE_CAP_GET(32, bus_p, PCIE_DEVCTL2); 2152 2153 PCIE_DBG("pcie_ari_is_enabled: dip=%p: DevCtl2=0x%x\n", 2154 dip, devctl2); 2155 2156 if (devctl2 & PCIE_DEVCTL2_ARI_FORWARD_EN) { 2157 PCIE_DBG("pcie_ari_is_enabled: " 2158 "dip=%p: ARI Forwarding is enabled\n", dip); 2159 return (PCIE_ARI_FORW_ENABLED); 2160 } 2161 2162 return (PCIE_ARI_FORW_DISABLED); 2163 } 2164 2165 int 2166 pcie_ari_device(dev_info_t *dip) 2167 { 2168 ddi_acc_handle_t handle; 2169 uint16_t cap_ptr; 2170 2171 PCIE_DBG("pcie_ari_device: dip=%p\n", dip); 2172 2173 /* 2174 * XXX - This function may be called before the bus_p structure 2175 * has been populated. This code can be changed to remove 2176 * pci_config_setup()/pci_config_teardown() when the RFE 2177 * to populate the bus_p structures early in boot is putback. 2178 */ 2179 2180 /* First make sure it is a PCIe device */ 2181 2182 if (pci_config_setup(dip, &handle) != DDI_SUCCESS) 2183 return (PCIE_NOT_ARI_DEVICE); 2184 2185 if ((PCI_CAP_LOCATE(handle, PCI_CAP_ID_PCI_E, &cap_ptr)) 2186 != DDI_SUCCESS) { 2187 pci_config_teardown(&handle); 2188 return (PCIE_NOT_ARI_DEVICE); 2189 } 2190 2191 /* Locate the ARI Capability */ 2192 2193 if ((PCI_CAP_LOCATE(handle, PCI_CAP_XCFG_SPC(PCIE_EXT_CAP_ID_ARI), 2194 &cap_ptr)) == DDI_FAILURE) { 2195 pci_config_teardown(&handle); 2196 return (PCIE_NOT_ARI_DEVICE); 2197 } 2198 2199 /* ARI Capability was found so it must be a ARI device */ 2200 PCIE_DBG("pcie_ari_device: ARI Device dip=%p\n", dip); 2201 2202 pci_config_teardown(&handle); 2203 return (PCIE_ARI_DEVICE); 2204 } 2205 2206 int 2207 pcie_ari_get_next_function(dev_info_t *dip, int *func) 2208 { 2209 uint32_t val; 2210 uint16_t cap_ptr, next_function; 2211 ddi_acc_handle_t handle; 2212 2213 /* 2214 * XXX - This function may be called before the bus_p structure 2215 * has been populated. This code can be changed to remove 2216 * pci_config_setup()/pci_config_teardown() when the RFE 2217 * to populate the bus_p structures early in boot is putback. 2218 */ 2219 2220 if (pci_config_setup(dip, &handle) != DDI_SUCCESS) 2221 return (DDI_FAILURE); 2222 2223 if ((PCI_CAP_LOCATE(handle, 2224 PCI_CAP_XCFG_SPC(PCIE_EXT_CAP_ID_ARI), &cap_ptr)) == DDI_FAILURE) { 2225 pci_config_teardown(&handle); 2226 return (DDI_FAILURE); 2227 } 2228 2229 val = PCI_CAP_GET32(handle, NULL, cap_ptr, PCIE_ARI_CAP); 2230 2231 next_function = (val >> PCIE_ARI_CAP_NEXT_FUNC_SHIFT) & 2232 PCIE_ARI_CAP_NEXT_FUNC_MASK; 2233 2234 pci_config_teardown(&handle); 2235 2236 *func = next_function; 2237 2238 return (DDI_SUCCESS); 2239 } 2240 2241 dev_info_t * 2242 pcie_func_to_dip(dev_info_t *dip, pcie_req_id_t function) 2243 { 2244 pcie_req_id_t child_bdf; 2245 dev_info_t *cdip; 2246 2247 for (cdip = ddi_get_child(dip); cdip; 2248 cdip = ddi_get_next_sibling(cdip)) { 2249 2250 if (pcie_get_bdf_from_dip(cdip, &child_bdf) == DDI_FAILURE) 2251 return (NULL); 2252 2253 if ((child_bdf & PCIE_REQ_ID_ARI_FUNC_MASK) == function) 2254 return (cdip); 2255 } 2256 return (NULL); 2257 } 2258 2259 #ifdef DEBUG 2260 2261 static void 2262 pcie_print_bus(pcie_bus_t *bus_p) 2263 { 2264 pcie_dbg("\tbus_dip = 0x%p\n", bus_p->bus_dip); 2265 pcie_dbg("\tbus_fm_flags = 0x%x\n", bus_p->bus_fm_flags); 2266 2267 pcie_dbg("\tbus_bdf = 0x%x\n", bus_p->bus_bdf); 2268 pcie_dbg("\tbus_dev_ven_id = 0x%x\n", bus_p->bus_dev_ven_id); 2269 pcie_dbg("\tbus_rev_id = 0x%x\n", bus_p->bus_rev_id); 2270 pcie_dbg("\tbus_hdr_type = 0x%x\n", bus_p->bus_hdr_type); 2271 pcie_dbg("\tbus_dev_type = 0x%x\n", bus_p->bus_dev_type); 2272 pcie_dbg("\tbus_bdg_secbus = 0x%x\n", bus_p->bus_bdg_secbus); 2273 pcie_dbg("\tbus_pcie_off = 0x%x\n", bus_p->bus_pcie_off); 2274 pcie_dbg("\tbus_aer_off = 0x%x\n", bus_p->bus_aer_off); 2275 pcie_dbg("\tbus_pcix_off = 0x%x\n", bus_p->bus_pcix_off); 2276 pcie_dbg("\tbus_ecc_ver = 0x%x\n", bus_p->bus_ecc_ver); 2277 } 2278 2279 /* 2280 * For debugging purposes set pcie_dbg_print != 0 to see printf messages 2281 * during interrupt. 2282 * 2283 * When a proper solution is in place this code will disappear. 2284 * Potential solutions are: 2285 * o circular buffers 2286 * o taskq to print at lower pil 2287 */ 2288 int pcie_dbg_print = 0; 2289 void 2290 pcie_dbg(char *fmt, ...) 2291 { 2292 va_list ap; 2293 2294 if (!pcie_debug_flags) { 2295 return; 2296 } 2297 va_start(ap, fmt); 2298 if (servicing_interrupt()) { 2299 if (pcie_dbg_print) { 2300 prom_vprintf(fmt, ap); 2301 } 2302 } else { 2303 prom_vprintf(fmt, ap); 2304 } 2305 va_end(ap); 2306 } 2307 #endif /* DEBUG */ 2308 2309 #if defined(__i386) || defined(__amd64) 2310 static void 2311 pcie_check_io_mem_range(ddi_acc_handle_t cfg_hdl, boolean_t *empty_io_range, 2312 boolean_t *empty_mem_range) 2313 { 2314 uint8_t class, subclass; 2315 uint_t val; 2316 2317 class = pci_config_get8(cfg_hdl, PCI_CONF_BASCLASS); 2318 subclass = pci_config_get8(cfg_hdl, PCI_CONF_SUBCLASS); 2319 2320 if ((class == PCI_CLASS_BRIDGE) && (subclass == PCI_BRIDGE_PCI)) { 2321 val = (((uint_t)pci_config_get8(cfg_hdl, PCI_BCNF_IO_BASE_LOW) & 2322 PCI_BCNF_IO_MASK) << 8); 2323 /* 2324 * Assuming that a zero based io_range[0] implies an 2325 * invalid I/O range. Likewise for mem_range[0]. 2326 */ 2327 if (val == 0) 2328 *empty_io_range = B_TRUE; 2329 val = (((uint_t)pci_config_get16(cfg_hdl, PCI_BCNF_MEM_BASE) & 2330 PCI_BCNF_MEM_MASK) << 16); 2331 if (val == 0) 2332 *empty_mem_range = B_TRUE; 2333 } 2334 } 2335 2336 #endif /* defined(__i386) || defined(__amd64) */ 2337