1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <sys/sysmacros.h> 27 #include <sys/types.h> 28 #include <sys/kmem.h> 29 #include <sys/modctl.h> 30 #include <sys/ddi.h> 31 #include <sys/sunddi.h> 32 #include <sys/sunndi.h> 33 #include <sys/fm/protocol.h> 34 #include <sys/fm/util.h> 35 #include <sys/promif.h> 36 #include <sys/disp.h> 37 #include <sys/stat.h> 38 #include <sys/file.h> 39 #include <sys/pci_cap.h> 40 #include <sys/pci_impl.h> 41 #include <sys/pcie_impl.h> 42 #include <sys/hotplug/pci/pcie_hp.h> 43 #include <sys/hotplug/pci/pcicfg.h> 44 45 /* Local functions prototypes */ 46 static void pcie_init_pfd(dev_info_t *); 47 static void pcie_fini_pfd(dev_info_t *); 48 49 #if defined(__i386) || defined(__amd64) 50 static void pcie_check_io_mem_range(ddi_acc_handle_t, boolean_t *, boolean_t *); 51 #endif /* defined(__i386) || defined(__amd64) */ 52 53 #ifdef DEBUG 54 uint_t pcie_debug_flags = 0; 55 static void pcie_print_bus(pcie_bus_t *bus_p); 56 void pcie_dbg(char *fmt, ...); 57 #endif /* DEBUG */ 58 59 /* Variable to control default PCI-Express config settings */ 60 ushort_t pcie_command_default = 61 PCI_COMM_SERR_ENABLE | 62 PCI_COMM_WAIT_CYC_ENAB | 63 PCI_COMM_PARITY_DETECT | 64 PCI_COMM_ME | 65 PCI_COMM_MAE | 66 PCI_COMM_IO; 67 68 /* xxx_fw are bits that are controlled by FW and should not be modified */ 69 ushort_t pcie_command_default_fw = 70 PCI_COMM_SPEC_CYC | 71 PCI_COMM_MEMWR_INVAL | 72 PCI_COMM_PALETTE_SNOOP | 73 PCI_COMM_WAIT_CYC_ENAB | 74 0xF800; /* Reserved Bits */ 75 76 ushort_t pcie_bdg_command_default_fw = 77 PCI_BCNF_BCNTRL_ISA_ENABLE | 78 PCI_BCNF_BCNTRL_VGA_ENABLE | 79 0xF000; /* Reserved Bits */ 80 81 /* PCI-Express Base error defaults */ 82 ushort_t pcie_base_err_default = 83 PCIE_DEVCTL_CE_REPORTING_EN | 84 PCIE_DEVCTL_NFE_REPORTING_EN | 85 PCIE_DEVCTL_FE_REPORTING_EN | 86 PCIE_DEVCTL_UR_REPORTING_EN; 87 88 /* PCI-Express Device Control Register */ 89 uint16_t pcie_devctl_default = PCIE_DEVCTL_RO_EN | 90 PCIE_DEVCTL_MAX_READ_REQ_512; 91 92 /* PCI-Express AER Root Control Register */ 93 #define PCIE_ROOT_SYS_ERR (PCIE_ROOTCTL_SYS_ERR_ON_CE_EN | \ 94 PCIE_ROOTCTL_SYS_ERR_ON_NFE_EN | \ 95 PCIE_ROOTCTL_SYS_ERR_ON_FE_EN) 96 97 #if defined(__xpv) 98 ushort_t pcie_root_ctrl_default = 99 PCIE_ROOTCTL_SYS_ERR_ON_NFE_EN | 100 PCIE_ROOTCTL_SYS_ERR_ON_FE_EN; 101 #else 102 ushort_t pcie_root_ctrl_default = 103 PCIE_ROOTCTL_SYS_ERR_ON_CE_EN | 104 PCIE_ROOTCTL_SYS_ERR_ON_NFE_EN | 105 PCIE_ROOTCTL_SYS_ERR_ON_FE_EN; 106 #endif /* __xpv */ 107 108 /* PCI-Express Root Error Command Register */ 109 ushort_t pcie_root_error_cmd_default = 110 PCIE_AER_RE_CMD_CE_REP_EN | 111 PCIE_AER_RE_CMD_NFE_REP_EN | 112 PCIE_AER_RE_CMD_FE_REP_EN; 113 114 /* ECRC settings in the PCIe AER Control Register */ 115 uint32_t pcie_ecrc_value = 116 PCIE_AER_CTL_ECRC_GEN_ENA | 117 PCIE_AER_CTL_ECRC_CHECK_ENA; 118 119 /* 120 * If a particular platform wants to disable certain errors such as UR/MA, 121 * instead of using #defines have the platform's PCIe Root Complex driver set 122 * these masks using the pcie_get_XXX_mask and pcie_set_XXX_mask functions. For 123 * x86 the closest thing to a PCIe root complex driver is NPE. For SPARC the 124 * closest PCIe root complex driver is PX. 125 * 126 * pcie_serr_disable_flag : disable SERR only (in RCR and command reg) x86 127 * systems may want to disable SERR in general. For root ports, enabling SERR 128 * causes NMIs which are not handled and results in a watchdog timeout error. 129 */ 130 uint32_t pcie_aer_uce_mask = 0; /* AER UE Mask */ 131 uint32_t pcie_aer_ce_mask = 0; /* AER CE Mask */ 132 uint32_t pcie_aer_suce_mask = 0; /* AER Secondary UE Mask */ 133 uint32_t pcie_serr_disable_flag = 0; /* Disable SERR */ 134 135 /* Default severities needed for eversholt. Error handling doesn't care */ 136 uint32_t pcie_aer_uce_severity = PCIE_AER_UCE_MTLP | PCIE_AER_UCE_RO | \ 137 PCIE_AER_UCE_FCP | PCIE_AER_UCE_SD | PCIE_AER_UCE_DLP | \ 138 PCIE_AER_UCE_TRAINING; 139 uint32_t pcie_aer_suce_severity = PCIE_AER_SUCE_SERR_ASSERT | \ 140 PCIE_AER_SUCE_UC_ADDR_ERR | PCIE_AER_SUCE_UC_ATTR_ERR | \ 141 PCIE_AER_SUCE_USC_MSG_DATA_ERR; 142 143 int pcie_max_mps = PCIE_DEVCTL_MAX_PAYLOAD_4096 >> 5; 144 int pcie_disable_ari = 0; 145 146 static void pcie_scan_mps(dev_info_t *rc_dip, dev_info_t *dip, 147 int *max_supported); 148 static int pcie_get_max_supported(dev_info_t *dip, void *arg); 149 static int pcie_map_phys(dev_info_t *dip, pci_regspec_t *phys_spec, 150 caddr_t *addrp, ddi_acc_handle_t *handlep); 151 static void pcie_unmap_phys(ddi_acc_handle_t *handlep, pci_regspec_t *ph); 152 153 /* 154 * modload support 155 */ 156 157 static struct modlmisc modlmisc = { 158 &mod_miscops, /* Type of module */ 159 "PCI Express Framework Module" 160 }; 161 162 static struct modlinkage modlinkage = { 163 MODREV_1, 164 (void *)&modlmisc, 165 NULL 166 }; 167 168 /* 169 * Global Variables needed for a non-atomic version of ddi_fm_ereport_post. 170 * Currently used to send the pci.fabric ereports whose payload depends on the 171 * type of PCI device it is being sent for. 172 */ 173 char *pcie_nv_buf; 174 nv_alloc_t *pcie_nvap; 175 nvlist_t *pcie_nvl; 176 177 int 178 _init(void) 179 { 180 int rval; 181 182 pcie_nv_buf = kmem_alloc(ERPT_DATA_SZ, KM_SLEEP); 183 pcie_nvap = fm_nva_xcreate(pcie_nv_buf, ERPT_DATA_SZ); 184 pcie_nvl = fm_nvlist_create(pcie_nvap); 185 186 rval = mod_install(&modlinkage); 187 return (rval); 188 } 189 190 int 191 _fini() 192 { 193 int rval; 194 195 fm_nvlist_destroy(pcie_nvl, FM_NVA_RETAIN); 196 fm_nva_xdestroy(pcie_nvap); 197 kmem_free(pcie_nv_buf, ERPT_DATA_SZ); 198 199 rval = mod_remove(&modlinkage); 200 return (rval); 201 } 202 203 int 204 _info(struct modinfo *modinfop) 205 { 206 return (mod_info(&modlinkage, modinfop)); 207 } 208 209 /* ARGSUSED */ 210 int 211 pcie_init(dev_info_t *dip, caddr_t arg) 212 { 213 int ret = DDI_SUCCESS; 214 215 /* 216 * Create a "devctl" minor node to support DEVCTL_DEVICE_* 217 * and DEVCTL_BUS_* ioctls to this bus. 218 */ 219 if ((ret = ddi_create_minor_node(dip, "devctl", S_IFCHR, 220 PCI_MINOR_NUM(ddi_get_instance(dip), PCI_DEVCTL_MINOR), 221 DDI_NT_NEXUS, 0)) != DDI_SUCCESS) { 222 PCIE_DBG("Failed to create devctl minor node for %s%d\n", 223 ddi_driver_name(dip), ddi_get_instance(dip)); 224 225 return (ret); 226 } 227 228 if ((ret = pcie_hp_init(dip, arg)) != DDI_SUCCESS) { 229 /* 230 * On a few x86 platforms, we observed unexpected hotplug 231 * initialization failures in recent years. Continue with 232 * a message printed because we don't want to stop PCI 233 * driver attach and system boot because of this hotplug 234 * initialization failure before we address all those issues. 235 */ 236 cmn_err(CE_WARN, "%s%d: Failed setting hotplug framework\n", 237 ddi_driver_name(dip), ddi_get_instance(dip)); 238 239 #if defined(__sparc) 240 ddi_remove_minor_node(dip, "devctl"); 241 242 return (ret); 243 #endif /* defined(__sparc) */ 244 } 245 246 if ((pcie_ari_supported(dip) == PCIE_ARI_FORW_SUPPORTED) && 247 (pcie_ari_is_enabled(dip) == PCIE_ARI_FORW_DISABLED)) 248 (void) pcicfg_configure(dip, 0, PCICFG_ALL_FUNC, 249 PCICFG_FLAG_ENABLE_ARI); 250 251 return (DDI_SUCCESS); 252 } 253 254 /* ARGSUSED */ 255 int 256 pcie_uninit(dev_info_t *dip) 257 { 258 int ret = DDI_SUCCESS; 259 260 if (pcie_ari_is_enabled(dip) == PCIE_ARI_FORW_ENABLED) 261 (void) pcie_ari_disable(dip); 262 263 if ((ret = pcie_hp_uninit(dip)) != DDI_SUCCESS) { 264 PCIE_DBG("Failed to uninitialize hotplug for %s%d\n", 265 ddi_driver_name(dip), ddi_get_instance(dip)); 266 267 return (ret); 268 } 269 270 ddi_remove_minor_node(dip, "devctl"); 271 272 return (ret); 273 } 274 275 /* ARGSUSED */ 276 int 277 pcie_intr(dev_info_t *dip) 278 { 279 return (pcie_hp_intr(dip)); 280 } 281 282 /* ARGSUSED */ 283 int 284 pcie_open(dev_info_t *dip, dev_t *devp, int flags, int otyp, cred_t *credp) 285 { 286 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 287 288 /* 289 * Make sure the open is for the right file type. 290 */ 291 if (otyp != OTYP_CHR) 292 return (EINVAL); 293 294 /* 295 * Handle the open by tracking the device state. 296 */ 297 if ((bus_p->bus_soft_state == PCI_SOFT_STATE_OPEN_EXCL) || 298 ((flags & FEXCL) && 299 (bus_p->bus_soft_state != PCI_SOFT_STATE_CLOSED))) { 300 return (EBUSY); 301 } 302 303 if (flags & FEXCL) 304 bus_p->bus_soft_state = PCI_SOFT_STATE_OPEN_EXCL; 305 else 306 bus_p->bus_soft_state = PCI_SOFT_STATE_OPEN; 307 308 return (0); 309 } 310 311 /* ARGSUSED */ 312 int 313 pcie_close(dev_info_t *dip, dev_t dev, int flags, int otyp, cred_t *credp) 314 { 315 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 316 317 if (otyp != OTYP_CHR) 318 return (EINVAL); 319 320 bus_p->bus_soft_state = PCI_SOFT_STATE_CLOSED; 321 322 return (0); 323 } 324 325 /* ARGSUSED */ 326 int 327 pcie_ioctl(dev_info_t *dip, dev_t dev, int cmd, intptr_t arg, int mode, 328 cred_t *credp, int *rvalp) 329 { 330 struct devctl_iocdata *dcp; 331 uint_t bus_state; 332 int rv = DDI_SUCCESS; 333 334 /* 335 * We can use the generic implementation for devctl ioctl 336 */ 337 switch (cmd) { 338 case DEVCTL_DEVICE_GETSTATE: 339 case DEVCTL_DEVICE_ONLINE: 340 case DEVCTL_DEVICE_OFFLINE: 341 case DEVCTL_BUS_GETSTATE: 342 return (ndi_devctl_ioctl(dip, cmd, arg, mode, 0)); 343 default: 344 break; 345 } 346 347 /* 348 * read devctl ioctl data 349 */ 350 if (ndi_dc_allochdl((void *)arg, &dcp) != NDI_SUCCESS) 351 return (EFAULT); 352 353 switch (cmd) { 354 case DEVCTL_BUS_QUIESCE: 355 if (ndi_get_bus_state(dip, &bus_state) == NDI_SUCCESS) 356 if (bus_state == BUS_QUIESCED) 357 break; 358 (void) ndi_set_bus_state(dip, BUS_QUIESCED); 359 break; 360 case DEVCTL_BUS_UNQUIESCE: 361 if (ndi_get_bus_state(dip, &bus_state) == NDI_SUCCESS) 362 if (bus_state == BUS_ACTIVE) 363 break; 364 (void) ndi_set_bus_state(dip, BUS_ACTIVE); 365 break; 366 case DEVCTL_BUS_RESET: 367 case DEVCTL_BUS_RESETALL: 368 case DEVCTL_DEVICE_RESET: 369 rv = ENOTSUP; 370 break; 371 default: 372 rv = ENOTTY; 373 } 374 375 ndi_dc_freehdl(dcp); 376 return (rv); 377 } 378 379 /* ARGSUSED */ 380 int 381 pcie_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 382 int flags, char *name, caddr_t valuep, int *lengthp) 383 { 384 if (dev == DDI_DEV_T_ANY) 385 goto skip; 386 387 if (PCIE_IS_HOTPLUG_CAPABLE(dip) && 388 strcmp(name, "pci-occupant") == 0) { 389 int pci_dev = PCI_MINOR_NUM_TO_PCI_DEVNUM(getminor(dev)); 390 391 pcie_hp_create_occupant_props(dip, dev, pci_dev); 392 } 393 394 skip: 395 return (ddi_prop_op(dev, dip, prop_op, flags, name, valuep, lengthp)); 396 } 397 398 /* 399 * PCI-Express child device initialization. 400 * This function enables generic pci-express interrupts and error 401 * handling. 402 * 403 * @param pdip root dip (root nexus's dip) 404 * @param cdip child's dip (device's dip) 405 * @return DDI_SUCCESS or DDI_FAILURE 406 */ 407 /* ARGSUSED */ 408 int 409 pcie_initchild(dev_info_t *cdip) 410 { 411 uint16_t tmp16, reg16; 412 pcie_bus_t *bus_p; 413 414 bus_p = PCIE_DIP2BUS(cdip); 415 if (bus_p == NULL) { 416 PCIE_DBG("%s: BUS not found.\n", 417 ddi_driver_name(cdip)); 418 419 return (DDI_FAILURE); 420 } 421 422 /* Clear the device's status register */ 423 reg16 = PCIE_GET(16, bus_p, PCI_CONF_STAT); 424 PCIE_PUT(16, bus_p, PCI_CONF_STAT, reg16); 425 426 /* Setup the device's command register */ 427 reg16 = PCIE_GET(16, bus_p, PCI_CONF_COMM); 428 tmp16 = (reg16 & pcie_command_default_fw) | pcie_command_default; 429 430 #if defined(__i386) || defined(__amd64) 431 boolean_t empty_io_range = B_FALSE; 432 boolean_t empty_mem_range = B_FALSE; 433 /* 434 * Check for empty IO and Mem ranges on bridges. If so disable IO/Mem 435 * access as it can cause a hang if enabled. 436 */ 437 pcie_check_io_mem_range(bus_p->bus_cfg_hdl, &empty_io_range, 438 &empty_mem_range); 439 if ((empty_io_range == B_TRUE) && 440 (pcie_command_default & PCI_COMM_IO)) { 441 tmp16 &= ~PCI_COMM_IO; 442 PCIE_DBG("No I/O range found for %s, bdf 0x%x\n", 443 ddi_driver_name(cdip), bus_p->bus_bdf); 444 } 445 if ((empty_mem_range == B_TRUE) && 446 (pcie_command_default & PCI_COMM_MAE)) { 447 tmp16 &= ~PCI_COMM_MAE; 448 PCIE_DBG("No Mem range found for %s, bdf 0x%x\n", 449 ddi_driver_name(cdip), bus_p->bus_bdf); 450 } 451 #endif /* defined(__i386) || defined(__amd64) */ 452 453 if (pcie_serr_disable_flag && PCIE_IS_PCIE(bus_p)) 454 tmp16 &= ~PCI_COMM_SERR_ENABLE; 455 456 PCIE_PUT(16, bus_p, PCI_CONF_COMM, tmp16); 457 PCIE_DBG_CFG(cdip, bus_p, "COMMAND", 16, PCI_CONF_COMM, reg16); 458 459 /* 460 * If the device has a bus control register then program it 461 * based on the settings in the command register. 462 */ 463 if (PCIE_IS_BDG(bus_p)) { 464 /* Clear the device's secondary status register */ 465 reg16 = PCIE_GET(16, bus_p, PCI_BCNF_SEC_STATUS); 466 PCIE_PUT(16, bus_p, PCI_BCNF_SEC_STATUS, reg16); 467 468 /* Setup the device's secondary command register */ 469 reg16 = PCIE_GET(16, bus_p, PCI_BCNF_BCNTRL); 470 tmp16 = (reg16 & pcie_bdg_command_default_fw); 471 472 tmp16 |= PCI_BCNF_BCNTRL_SERR_ENABLE; 473 /* 474 * Workaround for this Nvidia bridge. Don't enable the SERR 475 * enable bit in the bridge control register as it could lead to 476 * bogus NMIs. 477 */ 478 if (bus_p->bus_dev_ven_id == 0x037010DE) 479 tmp16 &= ~PCI_BCNF_BCNTRL_SERR_ENABLE; 480 481 if (pcie_command_default & PCI_COMM_PARITY_DETECT) 482 tmp16 |= PCI_BCNF_BCNTRL_PARITY_ENABLE; 483 484 /* 485 * Enable Master Abort Mode only if URs have not been masked. 486 * For PCI and PCIe-PCI bridges, enabling this bit causes a 487 * Master Aborts/UR to be forwarded as a UR/TA or SERR. If this 488 * bit is masked, posted requests are dropped and non-posted 489 * requests are returned with -1. 490 */ 491 if (pcie_aer_uce_mask & PCIE_AER_UCE_UR) 492 tmp16 &= ~PCI_BCNF_BCNTRL_MAST_AB_MODE; 493 else 494 tmp16 |= PCI_BCNF_BCNTRL_MAST_AB_MODE; 495 PCIE_PUT(16, bus_p, PCI_BCNF_BCNTRL, tmp16); 496 PCIE_DBG_CFG(cdip, bus_p, "SEC CMD", 16, PCI_BCNF_BCNTRL, 497 reg16); 498 } 499 500 if (PCIE_IS_PCIE(bus_p)) { 501 /* Setup PCIe device control register */ 502 reg16 = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL); 503 tmp16 = pcie_devctl_default; 504 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, tmp16); 505 PCIE_DBG_CAP(cdip, bus_p, "DEVCTL", 16, PCIE_DEVCTL, reg16); 506 507 /* Enable PCIe errors */ 508 pcie_enable_errors(cdip); 509 } 510 511 bus_p->bus_ari = B_FALSE; 512 if ((pcie_ari_is_enabled(ddi_get_parent(cdip)) 513 == PCIE_ARI_FORW_ENABLED) && (pcie_ari_device(cdip) 514 == PCIE_ARI_DEVICE)) { 515 bus_p->bus_ari = B_TRUE; 516 } 517 518 if (pcie_initchild_mps(cdip) == DDI_FAILURE) 519 return (DDI_FAILURE); 520 521 return (DDI_SUCCESS); 522 } 523 524 #define PCIE_ZALLOC(data) kmem_zalloc(sizeof (data), KM_SLEEP) 525 static void 526 pcie_init_pfd(dev_info_t *dip) 527 { 528 pf_data_t *pfd_p = PCIE_ZALLOC(pf_data_t); 529 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 530 531 PCIE_DIP2PFD(dip) = pfd_p; 532 533 pfd_p->pe_bus_p = bus_p; 534 pfd_p->pe_severity_flags = 0; 535 pfd_p->pe_lock = B_FALSE; 536 pfd_p->pe_valid = B_FALSE; 537 538 /* Allocate the root fault struct for both RC and RP */ 539 if (PCIE_IS_ROOT(bus_p)) { 540 PCIE_ROOT_FAULT(pfd_p) = PCIE_ZALLOC(pf_root_fault_t); 541 PCIE_ROOT_FAULT(pfd_p)->scan_bdf = PCIE_INVALID_BDF; 542 } 543 544 PCI_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pci_err_regs_t); 545 546 if (PCIE_IS_BDG(bus_p)) 547 PCI_BDG_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pci_bdg_err_regs_t); 548 549 if (PCIE_IS_PCIE(bus_p)) { 550 PCIE_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_err_regs_t); 551 552 if (PCIE_IS_RP(bus_p)) 553 PCIE_RP_REG(pfd_p) = 554 PCIE_ZALLOC(pf_pcie_rp_err_regs_t); 555 556 PCIE_ADV_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_adv_err_regs_t); 557 PCIE_ADV_REG(pfd_p)->pcie_ue_tgt_bdf = PCIE_INVALID_BDF; 558 559 if (PCIE_IS_RP(bus_p)) { 560 PCIE_ADV_RP_REG(pfd_p) = 561 PCIE_ZALLOC(pf_pcie_adv_rp_err_regs_t); 562 PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ce_src_id = 563 PCIE_INVALID_BDF; 564 PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ue_src_id = 565 PCIE_INVALID_BDF; 566 } else if (PCIE_IS_PCIE_BDG(bus_p)) { 567 PCIE_ADV_BDG_REG(pfd_p) = 568 PCIE_ZALLOC(pf_pcie_adv_bdg_err_regs_t); 569 PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_bdf = 570 PCIE_INVALID_BDF; 571 } 572 573 if (PCIE_IS_PCIE_BDG(bus_p) && PCIE_IS_PCIX(bus_p)) { 574 PCIX_BDG_ERR_REG(pfd_p) = 575 PCIE_ZALLOC(pf_pcix_bdg_err_regs_t); 576 577 if (PCIX_ECC_VERSION_CHECK(bus_p)) { 578 PCIX_BDG_ECC_REG(pfd_p, 0) = 579 PCIE_ZALLOC(pf_pcix_ecc_regs_t); 580 PCIX_BDG_ECC_REG(pfd_p, 1) = 581 PCIE_ZALLOC(pf_pcix_ecc_regs_t); 582 } 583 } 584 } else if (PCIE_IS_PCIX(bus_p)) { 585 if (PCIE_IS_BDG(bus_p)) { 586 PCIX_BDG_ERR_REG(pfd_p) = 587 PCIE_ZALLOC(pf_pcix_bdg_err_regs_t); 588 589 if (PCIX_ECC_VERSION_CHECK(bus_p)) { 590 PCIX_BDG_ECC_REG(pfd_p, 0) = 591 PCIE_ZALLOC(pf_pcix_ecc_regs_t); 592 PCIX_BDG_ECC_REG(pfd_p, 1) = 593 PCIE_ZALLOC(pf_pcix_ecc_regs_t); 594 } 595 } else { 596 PCIX_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pcix_err_regs_t); 597 598 if (PCIX_ECC_VERSION_CHECK(bus_p)) 599 PCIX_ECC_REG(pfd_p) = 600 PCIE_ZALLOC(pf_pcix_ecc_regs_t); 601 } 602 } 603 } 604 605 static void 606 pcie_fini_pfd(dev_info_t *dip) 607 { 608 pf_data_t *pfd_p = PCIE_DIP2PFD(dip); 609 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 610 611 if (PCIE_IS_PCIE(bus_p)) { 612 if (PCIE_IS_PCIE_BDG(bus_p) && PCIE_IS_PCIX(bus_p)) { 613 if (PCIX_ECC_VERSION_CHECK(bus_p)) { 614 kmem_free(PCIX_BDG_ECC_REG(pfd_p, 0), 615 sizeof (pf_pcix_ecc_regs_t)); 616 kmem_free(PCIX_BDG_ECC_REG(pfd_p, 1), 617 sizeof (pf_pcix_ecc_regs_t)); 618 } 619 620 kmem_free(PCIX_BDG_ERR_REG(pfd_p), 621 sizeof (pf_pcix_bdg_err_regs_t)); 622 } 623 624 if (PCIE_IS_RP(bus_p)) 625 kmem_free(PCIE_ADV_RP_REG(pfd_p), 626 sizeof (pf_pcie_adv_rp_err_regs_t)); 627 else if (PCIE_IS_PCIE_BDG(bus_p)) 628 kmem_free(PCIE_ADV_BDG_REG(pfd_p), 629 sizeof (pf_pcie_adv_bdg_err_regs_t)); 630 631 kmem_free(PCIE_ADV_REG(pfd_p), 632 sizeof (pf_pcie_adv_err_regs_t)); 633 634 if (PCIE_IS_RP(bus_p)) 635 kmem_free(PCIE_RP_REG(pfd_p), 636 sizeof (pf_pcie_rp_err_regs_t)); 637 638 kmem_free(PCIE_ERR_REG(pfd_p), sizeof (pf_pcie_err_regs_t)); 639 } else if (PCIE_IS_PCIX(bus_p)) { 640 if (PCIE_IS_BDG(bus_p)) { 641 if (PCIX_ECC_VERSION_CHECK(bus_p)) { 642 kmem_free(PCIX_BDG_ECC_REG(pfd_p, 0), 643 sizeof (pf_pcix_ecc_regs_t)); 644 kmem_free(PCIX_BDG_ECC_REG(pfd_p, 1), 645 sizeof (pf_pcix_ecc_regs_t)); 646 } 647 648 kmem_free(PCIX_BDG_ERR_REG(pfd_p), 649 sizeof (pf_pcix_bdg_err_regs_t)); 650 } else { 651 if (PCIX_ECC_VERSION_CHECK(bus_p)) 652 kmem_free(PCIX_ECC_REG(pfd_p), 653 sizeof (pf_pcix_ecc_regs_t)); 654 655 kmem_free(PCIX_ERR_REG(pfd_p), 656 sizeof (pf_pcix_err_regs_t)); 657 } 658 } 659 660 if (PCIE_IS_BDG(bus_p)) 661 kmem_free(PCI_BDG_ERR_REG(pfd_p), 662 sizeof (pf_pci_bdg_err_regs_t)); 663 664 kmem_free(PCI_ERR_REG(pfd_p), sizeof (pf_pci_err_regs_t)); 665 666 if (PCIE_IS_ROOT(bus_p)) 667 kmem_free(PCIE_ROOT_FAULT(pfd_p), sizeof (pf_root_fault_t)); 668 669 kmem_free(PCIE_DIP2PFD(dip), sizeof (pf_data_t)); 670 671 PCIE_DIP2PFD(dip) = NULL; 672 } 673 674 675 /* 676 * Special functions to allocate pf_data_t's for PCIe root complexes. 677 * Note: Root Complex not Root Port 678 */ 679 void 680 pcie_rc_init_pfd(dev_info_t *dip, pf_data_t *pfd_p) 681 { 682 pfd_p->pe_bus_p = PCIE_DIP2DOWNBUS(dip); 683 pfd_p->pe_severity_flags = 0; 684 pfd_p->pe_lock = B_FALSE; 685 pfd_p->pe_valid = B_FALSE; 686 687 PCIE_ROOT_FAULT(pfd_p) = PCIE_ZALLOC(pf_root_fault_t); 688 PCIE_ROOT_FAULT(pfd_p)->scan_bdf = PCIE_INVALID_BDF; 689 PCI_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pci_err_regs_t); 690 PCI_BDG_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pci_bdg_err_regs_t); 691 PCIE_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_err_regs_t); 692 PCIE_RP_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_rp_err_regs_t); 693 PCIE_ADV_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_adv_err_regs_t); 694 PCIE_ADV_RP_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_adv_rp_err_regs_t); 695 696 PCIE_ADV_REG(pfd_p)->pcie_ue_sev = pcie_aer_uce_severity; 697 } 698 699 void 700 pcie_rc_fini_pfd(pf_data_t *pfd_p) 701 { 702 kmem_free(PCIE_ADV_RP_REG(pfd_p), sizeof (pf_pcie_adv_rp_err_regs_t)); 703 kmem_free(PCIE_ADV_REG(pfd_p), sizeof (pf_pcie_adv_err_regs_t)); 704 kmem_free(PCIE_RP_REG(pfd_p), sizeof (pf_pcie_rp_err_regs_t)); 705 kmem_free(PCIE_ERR_REG(pfd_p), sizeof (pf_pcie_err_regs_t)); 706 kmem_free(PCI_BDG_ERR_REG(pfd_p), sizeof (pf_pci_bdg_err_regs_t)); 707 kmem_free(PCI_ERR_REG(pfd_p), sizeof (pf_pci_err_regs_t)); 708 kmem_free(PCIE_ROOT_FAULT(pfd_p), sizeof (pf_root_fault_t)); 709 } 710 711 void 712 pcie_rc_init_bus(dev_info_t *dip) 713 { 714 pcie_bus_t *bus_p; 715 716 bus_p = (pcie_bus_t *)kmem_zalloc(sizeof (pcie_bus_t), KM_SLEEP); 717 bus_p->bus_dip = dip; 718 bus_p->bus_dev_type = PCIE_PCIECAP_DEV_TYPE_RC_PSEUDO; 719 bus_p->bus_hdr_type = PCI_HEADER_ONE; 720 721 /* Fake that there are AER logs */ 722 bus_p->bus_aer_off = (uint16_t)-1; 723 724 /* Needed only for handle lookup */ 725 bus_p->bus_fm_flags |= PF_FM_READY; 726 727 ndi_set_bus_private(dip, B_FALSE, DEVI_PORT_TYPE_PCI, bus_p); 728 } 729 730 void 731 pcie_rc_fini_bus(dev_info_t *dip) 732 { 733 pcie_bus_t *bus_p = (pcie_bus_t *)ndi_get_bus_private(dip, B_FALSE); 734 735 ndi_set_bus_private(dip, B_FALSE, NULL, NULL); 736 kmem_free(bus_p, sizeof (pcie_bus_t)); 737 } 738 739 /* 740 * Initialize PCIe Bus Private Data 741 * 742 * PCIe Bus Private Data contains commonly used PCI/PCIe information and offsets 743 * to key registers. 744 */ 745 pcie_bus_t * 746 pcie_init_bus(dev_info_t *cdip) 747 { 748 pcie_bus_t *bus_p = 0; 749 ddi_acc_handle_t eh = NULL; 750 int range_size; 751 dev_info_t *pdip; 752 const char *errstr = NULL; 753 754 ASSERT(PCIE_DIP2UPBUS(cdip) == NULL); 755 756 /* allocate memory for pcie bus data */ 757 bus_p = kmem_zalloc(sizeof (pcie_bus_t), KM_SLEEP); 758 759 /* Set back pointer to dip */ 760 bus_p->bus_dip = cdip; 761 762 /* Create an config access special to error handling */ 763 if (pci_config_setup(cdip, &eh) != DDI_SUCCESS) { 764 errstr = "Cannot setup config access"; 765 goto fail; 766 } 767 768 bus_p->bus_cfg_hdl = eh; 769 bus_p->bus_fm_flags = 0; 770 bus_p->bus_soft_state = PCI_SOFT_STATE_CLOSED; 771 772 /* get device's bus/dev/function number */ 773 if (pcie_get_bdf_from_dip(cdip, &bus_p->bus_bdf) != DDI_SUCCESS) { 774 errstr = "Cannot get device BDF"; 775 goto fail; 776 } 777 778 /* Save the Vendor Id Device Id */ 779 bus_p->bus_dev_ven_id = PCIE_GET(32, bus_p, PCI_CONF_VENID); 780 bus_p->bus_rev_id = PCIE_GET(8, bus_p, PCI_CONF_REVID); 781 782 /* Save the Header Type */ 783 bus_p->bus_hdr_type = PCIE_GET(8, bus_p, PCI_CONF_HEADER); 784 bus_p->bus_hdr_type &= PCI_HEADER_TYPE_M; 785 786 /* Figure out the device type and all the relavant capability offsets */ 787 if ((PCI_CAP_LOCATE(eh, PCI_CAP_ID_PCI_E, &bus_p->bus_pcie_off)) 788 != DDI_FAILURE) { 789 bus_p->bus_dev_type = PCI_CAP_GET16(eh, NULL, 790 bus_p->bus_pcie_off, PCIE_PCIECAP) & 791 PCIE_PCIECAP_DEV_TYPE_MASK; 792 793 if (PCI_CAP_LOCATE(eh, PCI_CAP_XCFG_SPC(PCIE_EXT_CAP_ID_AER), 794 &bus_p->bus_aer_off) != DDI_SUCCESS) 795 bus_p->bus_aer_off = NULL; 796 797 /* Check and save PCIe hotplug capability information */ 798 if ((PCIE_IS_RP(bus_p) || PCIE_IS_SWD(bus_p)) && 799 (PCI_CAP_GET16(eh, NULL, bus_p->bus_pcie_off, PCIE_PCIECAP) 800 & PCIE_PCIECAP_SLOT_IMPL) && 801 (PCI_CAP_GET32(eh, NULL, bus_p->bus_pcie_off, PCIE_SLOTCAP) 802 & PCIE_SLOTCAP_HP_CAPABLE)) 803 bus_p->bus_hp_sup_modes |= PCIE_NATIVE_HP_MODE; 804 } else { 805 bus_p->bus_pcie_off = NULL; 806 bus_p->bus_dev_type = PCIE_PCIECAP_DEV_TYPE_PCI_PSEUDO; 807 } 808 809 if ((PCI_CAP_LOCATE(eh, PCI_CAP_ID_PCIX, &bus_p->bus_pcix_off)) 810 != DDI_FAILURE) { 811 if (PCIE_IS_BDG(bus_p)) 812 bus_p->bus_ecc_ver = PCIX_CAP_GET(16, bus_p, 813 PCI_PCIX_SEC_STATUS) & PCI_PCIX_VER_MASK; 814 else 815 bus_p->bus_ecc_ver = PCIX_CAP_GET(16, bus_p, 816 PCI_PCIX_COMMAND) & PCI_PCIX_VER_MASK; 817 } else { 818 bus_p->bus_pcix_off = NULL; 819 bus_p->bus_ecc_ver = NULL; 820 } 821 822 /* Save the Range information if device is a switch/bridge */ 823 if (PCIE_IS_BDG(bus_p)) { 824 /* Check and save PCI hotplug (SHPC) capability information */ 825 if ((PCI_CAP_LOCATE(eh, PCI_CAP_ID_PCI_HOTPLUG, 826 &bus_p->bus_pci_hp_off)) == DDI_SUCCESS) 827 bus_p->bus_hp_sup_modes |= PCIE_PCI_HP_MODE; 828 829 /* get "bus_range" property */ 830 range_size = sizeof (pci_bus_range_t); 831 if (ddi_getlongprop_buf(DDI_DEV_T_ANY, cdip, DDI_PROP_DONTPASS, 832 "bus-range", (caddr_t)&bus_p->bus_bus_range, &range_size) 833 != DDI_PROP_SUCCESS) { 834 errstr = "Cannot find \"bus-range\" property"; 835 goto fail; 836 } 837 838 /* get secondary bus number */ 839 bus_p->bus_bdg_secbus = PCIE_GET(8, bus_p, PCI_BCNF_SECBUS); 840 841 /* Get "ranges" property */ 842 if (ddi_getlongprop(DDI_DEV_T_ANY, cdip, DDI_PROP_DONTPASS, 843 "ranges", (caddr_t)&bus_p->bus_addr_ranges, 844 &bus_p->bus_addr_entries) != DDI_PROP_SUCCESS) 845 bus_p->bus_addr_entries = 0; 846 bus_p->bus_addr_entries /= sizeof (ppb_ranges_t); 847 } 848 849 /* save "assigned-addresses" property array, ignore failues */ 850 if (ddi_getlongprop(DDI_DEV_T_ANY, cdip, DDI_PROP_DONTPASS, 851 "assigned-addresses", (caddr_t)&bus_p->bus_assigned_addr, 852 &bus_p->bus_assigned_entries) == DDI_PROP_SUCCESS) 853 bus_p->bus_assigned_entries /= sizeof (pci_regspec_t); 854 else 855 bus_p->bus_assigned_entries = 0; 856 857 /* save RP dip and RP bdf */ 858 if (PCIE_IS_RP(bus_p)) { 859 bus_p->bus_rp_dip = cdip; 860 bus_p->bus_rp_bdf = bus_p->bus_bdf; 861 } else { 862 for (pdip = ddi_get_parent(cdip); pdip; 863 pdip = ddi_get_parent(pdip)) { 864 pcie_bus_t *parent_bus_p = PCIE_DIP2BUS(pdip); 865 866 /* 867 * When debugging be aware that some NVIDIA x86 868 * architectures have 2 nodes for each RP, One at Bus 869 * 0x0 and one at Bus 0x80. The requester is from Bus 870 * 0x80 871 */ 872 if (PCIE_IS_ROOT(parent_bus_p)) { 873 bus_p->bus_rp_dip = pdip; 874 bus_p->bus_rp_bdf = parent_bus_p->bus_bdf; 875 break; 876 } 877 } 878 } 879 880 ndi_set_bus_private(cdip, B_TRUE, DEVI_PORT_TYPE_PCI, (void *)bus_p); 881 882 if (PCIE_IS_HOTPLUG_CAPABLE(cdip)) 883 (void) ndi_prop_create_boolean(DDI_DEV_T_NONE, cdip, 884 "hotplug-capable"); 885 886 pcie_init_pfd(cdip); 887 888 bus_p->bus_mps = 0; 889 890 pcie_init_plat(cdip); 891 892 PCIE_DBG("Add %s(dip 0x%p, bdf 0x%x, secbus 0x%x)\n", 893 ddi_driver_name(cdip), (void *)cdip, bus_p->bus_bdf, 894 bus_p->bus_bdg_secbus); 895 #ifdef DEBUG 896 pcie_print_bus(bus_p); 897 #endif 898 899 return (bus_p); 900 fail: 901 cmn_err(CE_WARN, "PCIE init err info failed BDF 0x%x:%s\n", 902 bus_p->bus_bdf, errstr); 903 if (eh) 904 pci_config_teardown(&eh); 905 kmem_free(bus_p, sizeof (pcie_bus_t)); 906 return (NULL); 907 } 908 909 int 910 pcie_postattach_child(dev_info_t *cdip) 911 { 912 pcie_bus_t *bus_p = PCIE_DIP2BUS(cdip); 913 914 if (!bus_p) 915 return (DDI_FAILURE); 916 917 return (pcie_enable_ce(cdip)); 918 } 919 920 /* 921 * PCI-Express child device de-initialization. 922 * This function disables generic pci-express interrupts and error 923 * handling. 924 */ 925 void 926 pcie_uninitchild(dev_info_t *cdip) 927 { 928 pcie_disable_errors(cdip); 929 pcie_fini_bus(cdip); 930 } 931 932 void 933 pcie_fini_bus(dev_info_t *cdip) 934 { 935 pcie_bus_t *bus_p; 936 937 pcie_fini_plat(cdip); 938 pcie_fini_pfd(cdip); 939 940 bus_p = PCIE_DIP2UPBUS(cdip); 941 ASSERT(bus_p); 942 943 if (PCIE_IS_HOTPLUG_CAPABLE(cdip)) 944 (void) ndi_prop_remove(DDI_DEV_T_NONE, cdip, "hotplug-capable"); 945 946 pci_config_teardown(&bus_p->bus_cfg_hdl); 947 ndi_set_bus_private(cdip, B_TRUE, NULL, NULL); 948 kmem_free(bus_p->bus_assigned_addr, 949 (sizeof (pci_regspec_t) * bus_p->bus_assigned_entries)); 950 kmem_free(bus_p->bus_addr_ranges, 951 (sizeof (ppb_ranges_t) * bus_p->bus_addr_entries)); 952 953 kmem_free(bus_p, sizeof (pcie_bus_t)); 954 } 955 956 void 957 pcie_enable_errors(dev_info_t *dip) 958 { 959 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 960 uint16_t reg16, tmp16; 961 uint32_t reg32, tmp32; 962 963 ASSERT(bus_p); 964 965 /* 966 * Clear any pending errors 967 */ 968 pcie_clear_errors(dip); 969 970 if (!PCIE_IS_PCIE(bus_p)) 971 return; 972 973 /* 974 * Enable Baseline Error Handling but leave CE reporting off (poweron 975 * default). 976 */ 977 if ((reg16 = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL)) != 978 PCI_CAP_EINVAL16) { 979 tmp16 = (reg16 & (PCIE_DEVCTL_MAX_READ_REQ_MASK | 980 PCIE_DEVCTL_MAX_PAYLOAD_MASK)) | 981 (pcie_devctl_default & ~(PCIE_DEVCTL_MAX_READ_REQ_MASK | 982 PCIE_DEVCTL_MAX_PAYLOAD_MASK)) | 983 (pcie_base_err_default & (~PCIE_DEVCTL_CE_REPORTING_EN)); 984 985 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, tmp16); 986 PCIE_DBG_CAP(dip, bus_p, "DEVCTL", 16, PCIE_DEVCTL, reg16); 987 } 988 989 /* Enable Root Port Baseline Error Receiving */ 990 if (PCIE_IS_ROOT(bus_p) && 991 (reg16 = PCIE_CAP_GET(16, bus_p, PCIE_ROOTCTL)) != 992 PCI_CAP_EINVAL16) { 993 994 #if defined(__xpv) 995 /* 996 * When we're booted under the hypervisor we won't receive 997 * MSI's, so to ensure that uncorrectable errors aren't ignored 998 * we set the SERR_FAT and SERR_NONFAT bits in the Root Control 999 * Register. 1000 */ 1001 tmp16 = pcie_root_ctrl_default; 1002 #else 1003 tmp16 = pcie_serr_disable_flag ? 1004 (pcie_root_ctrl_default & ~PCIE_ROOT_SYS_ERR) : 1005 pcie_root_ctrl_default; 1006 #endif /* __xpv */ 1007 PCIE_CAP_PUT(16, bus_p, PCIE_ROOTCTL, tmp16); 1008 PCIE_DBG_CAP(dip, bus_p, "ROOT DEVCTL", 16, PCIE_ROOTCTL, 1009 reg16); 1010 } 1011 1012 /* 1013 * Enable PCI-Express Advanced Error Handling if Exists 1014 */ 1015 if (!PCIE_HAS_AER(bus_p)) 1016 return; 1017 1018 /* Set Uncorrectable Severity */ 1019 if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_UCE_SERV)) != 1020 PCI_CAP_EINVAL32) { 1021 tmp32 = pcie_aer_uce_severity; 1022 1023 PCIE_AER_PUT(32, bus_p, PCIE_AER_UCE_SERV, tmp32); 1024 PCIE_DBG_AER(dip, bus_p, "AER UCE SEV", 32, PCIE_AER_UCE_SERV, 1025 reg32); 1026 } 1027 1028 /* Enable Uncorrectable errors */ 1029 if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_UCE_MASK)) != 1030 PCI_CAP_EINVAL32) { 1031 tmp32 = pcie_aer_uce_mask; 1032 1033 PCIE_AER_PUT(32, bus_p, PCIE_AER_UCE_MASK, tmp32); 1034 PCIE_DBG_AER(dip, bus_p, "AER UCE MASK", 32, PCIE_AER_UCE_MASK, 1035 reg32); 1036 } 1037 1038 /* Enable ECRC generation and checking */ 1039 if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_CTL)) != 1040 PCI_CAP_EINVAL32) { 1041 tmp32 = reg32 | pcie_ecrc_value; 1042 PCIE_AER_PUT(32, bus_p, PCIE_AER_CTL, tmp32); 1043 PCIE_DBG_AER(dip, bus_p, "AER CTL", 32, PCIE_AER_CTL, reg32); 1044 } 1045 1046 /* Enable Secondary Uncorrectable errors if this is a bridge */ 1047 if (!PCIE_IS_PCIE_BDG(bus_p)) 1048 goto root; 1049 1050 /* Set Uncorrectable Severity */ 1051 if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_SUCE_SERV)) != 1052 PCI_CAP_EINVAL32) { 1053 tmp32 = pcie_aer_suce_severity; 1054 1055 PCIE_AER_PUT(32, bus_p, PCIE_AER_SUCE_SERV, tmp32); 1056 PCIE_DBG_AER(dip, bus_p, "AER SUCE SEV", 32, PCIE_AER_SUCE_SERV, 1057 reg32); 1058 } 1059 1060 if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_SUCE_MASK)) != 1061 PCI_CAP_EINVAL32) { 1062 PCIE_AER_PUT(32, bus_p, PCIE_AER_SUCE_MASK, pcie_aer_suce_mask); 1063 PCIE_DBG_AER(dip, bus_p, "AER SUCE MASK", 32, 1064 PCIE_AER_SUCE_MASK, reg32); 1065 } 1066 1067 root: 1068 /* 1069 * Enable Root Control this is a Root device 1070 */ 1071 if (!PCIE_IS_ROOT(bus_p)) 1072 return; 1073 1074 #if !defined(__xpv) 1075 if ((reg16 = PCIE_AER_GET(16, bus_p, PCIE_AER_RE_CMD)) != 1076 PCI_CAP_EINVAL16) { 1077 PCIE_AER_PUT(16, bus_p, PCIE_AER_RE_CMD, 1078 pcie_root_error_cmd_default); 1079 PCIE_DBG_AER(dip, bus_p, "AER Root Err Cmd", 16, 1080 PCIE_AER_RE_CMD, reg16); 1081 } 1082 #endif /* __xpv */ 1083 } 1084 1085 /* 1086 * This function is used for enabling CE reporting and setting the AER CE mask. 1087 * When called from outside the pcie module it should always be preceded by 1088 * a call to pcie_enable_errors. 1089 */ 1090 int 1091 pcie_enable_ce(dev_info_t *dip) 1092 { 1093 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 1094 uint16_t device_sts, device_ctl; 1095 uint32_t tmp_pcie_aer_ce_mask; 1096 1097 if (!PCIE_IS_PCIE(bus_p)) 1098 return (DDI_SUCCESS); 1099 1100 /* 1101 * The "pcie_ce_mask" property is used to control both the CE reporting 1102 * enable field in the device control register and the AER CE mask. We 1103 * leave CE reporting disabled if pcie_ce_mask is set to -1. 1104 */ 1105 1106 tmp_pcie_aer_ce_mask = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, dip, 1107 DDI_PROP_DONTPASS, "pcie_ce_mask", pcie_aer_ce_mask); 1108 1109 if (tmp_pcie_aer_ce_mask == (uint32_t)-1) { 1110 /* 1111 * Nothing to do since CE reporting has already been disabled. 1112 */ 1113 return (DDI_SUCCESS); 1114 } 1115 1116 if (PCIE_HAS_AER(bus_p)) { 1117 /* Enable AER CE */ 1118 PCIE_AER_PUT(32, bus_p, PCIE_AER_CE_MASK, tmp_pcie_aer_ce_mask); 1119 PCIE_DBG_AER(dip, bus_p, "AER CE MASK", 32, PCIE_AER_CE_MASK, 1120 0); 1121 1122 /* Clear any pending AER CE errors */ 1123 PCIE_AER_PUT(32, bus_p, PCIE_AER_CE_STS, -1); 1124 } 1125 1126 /* clear any pending CE errors */ 1127 if ((device_sts = PCIE_CAP_GET(16, bus_p, PCIE_DEVSTS)) != 1128 PCI_CAP_EINVAL16) 1129 PCIE_CAP_PUT(16, bus_p, PCIE_DEVSTS, 1130 device_sts & (~PCIE_DEVSTS_CE_DETECTED)); 1131 1132 /* Enable CE reporting */ 1133 device_ctl = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL); 1134 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, 1135 (device_ctl & (~PCIE_DEVCTL_ERR_MASK)) | pcie_base_err_default); 1136 PCIE_DBG_CAP(dip, bus_p, "DEVCTL", 16, PCIE_DEVCTL, device_ctl); 1137 1138 return (DDI_SUCCESS); 1139 } 1140 1141 /* ARGSUSED */ 1142 void 1143 pcie_disable_errors(dev_info_t *dip) 1144 { 1145 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 1146 uint16_t device_ctl; 1147 uint32_t aer_reg; 1148 1149 if (!PCIE_IS_PCIE(bus_p)) 1150 return; 1151 1152 /* 1153 * Disable PCI-Express Baseline Error Handling 1154 */ 1155 device_ctl = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL); 1156 device_ctl &= ~PCIE_DEVCTL_ERR_MASK; 1157 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, device_ctl); 1158 1159 /* 1160 * Disable PCI-Express Advanced Error Handling if Exists 1161 */ 1162 if (!PCIE_HAS_AER(bus_p)) 1163 goto root; 1164 1165 /* Disable Uncorrectable errors */ 1166 PCIE_AER_PUT(32, bus_p, PCIE_AER_UCE_MASK, PCIE_AER_UCE_BITS); 1167 1168 /* Disable Correctable errors */ 1169 PCIE_AER_PUT(32, bus_p, PCIE_AER_CE_MASK, PCIE_AER_CE_BITS); 1170 1171 /* Disable ECRC generation and checking */ 1172 if ((aer_reg = PCIE_AER_GET(32, bus_p, PCIE_AER_CTL)) != 1173 PCI_CAP_EINVAL32) { 1174 aer_reg &= ~(PCIE_AER_CTL_ECRC_GEN_ENA | 1175 PCIE_AER_CTL_ECRC_CHECK_ENA); 1176 1177 PCIE_AER_PUT(32, bus_p, PCIE_AER_CTL, aer_reg); 1178 } 1179 /* 1180 * Disable Secondary Uncorrectable errors if this is a bridge 1181 */ 1182 if (!PCIE_IS_PCIE_BDG(bus_p)) 1183 goto root; 1184 1185 PCIE_AER_PUT(32, bus_p, PCIE_AER_SUCE_MASK, PCIE_AER_SUCE_BITS); 1186 1187 root: 1188 /* 1189 * disable Root Control this is a Root device 1190 */ 1191 if (!PCIE_IS_ROOT(bus_p)) 1192 return; 1193 1194 if (!pcie_serr_disable_flag) { 1195 device_ctl = PCIE_CAP_GET(16, bus_p, PCIE_ROOTCTL); 1196 device_ctl &= ~PCIE_ROOT_SYS_ERR; 1197 PCIE_CAP_PUT(16, bus_p, PCIE_ROOTCTL, device_ctl); 1198 } 1199 1200 if (!PCIE_HAS_AER(bus_p)) 1201 return; 1202 1203 if ((device_ctl = PCIE_CAP_GET(16, bus_p, PCIE_AER_RE_CMD)) != 1204 PCI_CAP_EINVAL16) { 1205 device_ctl &= ~pcie_root_error_cmd_default; 1206 PCIE_CAP_PUT(16, bus_p, PCIE_AER_RE_CMD, device_ctl); 1207 } 1208 } 1209 1210 /* 1211 * Extract bdf from "reg" property. 1212 */ 1213 int 1214 pcie_get_bdf_from_dip(dev_info_t *dip, pcie_req_id_t *bdf) 1215 { 1216 pci_regspec_t *regspec; 1217 int reglen; 1218 1219 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1220 "reg", (int **)®spec, (uint_t *)®len) != DDI_SUCCESS) 1221 return (DDI_FAILURE); 1222 1223 if (reglen < (sizeof (pci_regspec_t) / sizeof (int))) { 1224 ddi_prop_free(regspec); 1225 return (DDI_FAILURE); 1226 } 1227 1228 /* Get phys_hi from first element. All have same bdf. */ 1229 *bdf = (regspec->pci_phys_hi & (PCI_REG_BDFR_M ^ PCI_REG_REG_M)) >> 8; 1230 1231 ddi_prop_free(regspec); 1232 return (DDI_SUCCESS); 1233 } 1234 1235 dev_info_t * 1236 pcie_get_my_childs_dip(dev_info_t *dip, dev_info_t *rdip) 1237 { 1238 dev_info_t *cdip = rdip; 1239 1240 for (; ddi_get_parent(cdip) != dip; cdip = ddi_get_parent(cdip)) 1241 ; 1242 1243 return (cdip); 1244 } 1245 1246 uint32_t 1247 pcie_get_bdf_for_dma_xfer(dev_info_t *dip, dev_info_t *rdip) 1248 { 1249 dev_info_t *cdip; 1250 1251 /* 1252 * As part of the probing, the PCI fcode interpreter may setup a DMA 1253 * request if a given card has a fcode on it using dip and rdip of the 1254 * hotplug connector i.e, dip and rdip of px/pcieb driver. In this 1255 * case, return a invalid value for the bdf since we cannot get to the 1256 * bdf value of the actual device which will be initiating this DMA. 1257 */ 1258 if (rdip == dip) 1259 return (PCIE_INVALID_BDF); 1260 1261 cdip = pcie_get_my_childs_dip(dip, rdip); 1262 1263 /* 1264 * For a given rdip, return the bdf value of dip's (px or pcieb) 1265 * immediate child or secondary bus-id if dip is a PCIe2PCI bridge. 1266 * 1267 * XXX - For now, return a invalid bdf value for all PCI and PCI-X 1268 * devices since this needs more work. 1269 */ 1270 return (PCI_GET_PCIE2PCI_SECBUS(cdip) ? 1271 PCIE_INVALID_BDF : PCI_GET_BDF(cdip)); 1272 } 1273 1274 uint32_t 1275 pcie_get_aer_uce_mask() { 1276 return (pcie_aer_uce_mask); 1277 } 1278 uint32_t 1279 pcie_get_aer_ce_mask() { 1280 return (pcie_aer_ce_mask); 1281 } 1282 uint32_t 1283 pcie_get_aer_suce_mask() { 1284 return (pcie_aer_suce_mask); 1285 } 1286 uint32_t 1287 pcie_get_serr_mask() { 1288 return (pcie_serr_disable_flag); 1289 } 1290 1291 void 1292 pcie_set_aer_uce_mask(uint32_t mask) { 1293 pcie_aer_uce_mask = mask; 1294 if (mask & PCIE_AER_UCE_UR) 1295 pcie_base_err_default &= ~PCIE_DEVCTL_UR_REPORTING_EN; 1296 else 1297 pcie_base_err_default |= PCIE_DEVCTL_UR_REPORTING_EN; 1298 1299 if (mask & PCIE_AER_UCE_ECRC) 1300 pcie_ecrc_value = 0; 1301 } 1302 1303 void 1304 pcie_set_aer_ce_mask(uint32_t mask) { 1305 pcie_aer_ce_mask = mask; 1306 } 1307 void 1308 pcie_set_aer_suce_mask(uint32_t mask) { 1309 pcie_aer_suce_mask = mask; 1310 } 1311 void 1312 pcie_set_serr_mask(uint32_t mask) { 1313 pcie_serr_disable_flag = mask; 1314 } 1315 1316 /* 1317 * Is the rdip a child of dip. Used for checking certain CTLOPS from bubbling 1318 * up erronously. Ex. ISA ctlops to a PCI-PCI Bridge. 1319 */ 1320 boolean_t 1321 pcie_is_child(dev_info_t *dip, dev_info_t *rdip) 1322 { 1323 dev_info_t *cdip = ddi_get_child(dip); 1324 for (; cdip; cdip = ddi_get_next_sibling(cdip)) 1325 if (cdip == rdip) 1326 break; 1327 return (cdip != NULL); 1328 } 1329 1330 boolean_t 1331 pcie_is_link_disabled(dev_info_t *dip) 1332 { 1333 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 1334 1335 if (PCIE_IS_PCIE(bus_p)) { 1336 if (PCIE_CAP_GET(16, bus_p, PCIE_LINKCTL) & 1337 PCIE_LINKCTL_LINK_DISABLE) 1338 return (B_TRUE); 1339 } 1340 return (B_FALSE); 1341 } 1342 1343 /* 1344 * Initialize the MPS for a root port. 1345 * 1346 * dip - dip of root port device. 1347 */ 1348 void 1349 pcie_init_root_port_mps(dev_info_t *dip) 1350 { 1351 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 1352 int rp_cap, max_supported = pcie_max_mps; 1353 1354 (void) pcie_get_fabric_mps(ddi_get_parent(dip), 1355 ddi_get_child(dip), &max_supported); 1356 1357 rp_cap = PCI_CAP_GET16(bus_p->bus_cfg_hdl, NULL, 1358 bus_p->bus_pcie_off, PCIE_DEVCAP) & 1359 PCIE_DEVCAP_MAX_PAYLOAD_MASK; 1360 1361 if (rp_cap < max_supported) 1362 max_supported = rp_cap; 1363 1364 bus_p->bus_mps = max_supported; 1365 (void) pcie_initchild_mps(dip); 1366 } 1367 1368 /* 1369 * Initialize the Maximum Payload Size of a device. 1370 * 1371 * cdip - dip of device. 1372 * 1373 * returns - DDI_SUCCESS or DDI_FAILURE 1374 */ 1375 int 1376 pcie_initchild_mps(dev_info_t *cdip) 1377 { 1378 int max_payload_size; 1379 pcie_bus_t *bus_p; 1380 dev_info_t *pdip = ddi_get_parent(cdip); 1381 uint8_t dev_type; 1382 1383 bus_p = PCIE_DIP2BUS(cdip); 1384 if (bus_p == NULL) { 1385 PCIE_DBG("%s: BUS not found.\n", 1386 ddi_driver_name(cdip)); 1387 return (DDI_FAILURE); 1388 } 1389 1390 dev_type = bus_p->bus_dev_type; 1391 1392 /* 1393 * For ARI Devices, only function zero's MPS needs to be set. 1394 */ 1395 if ((dev_type == PCIE_PCIECAP_DEV_TYPE_PCIE_DEV) && 1396 (pcie_ari_is_enabled(pdip) == PCIE_ARI_FORW_ENABLED)) { 1397 pcie_req_id_t child_bdf; 1398 1399 if (pcie_get_bdf_from_dip(cdip, &child_bdf) == DDI_FAILURE) 1400 return (DDI_FAILURE); 1401 if ((child_bdf & PCIE_REQ_ID_ARI_FUNC_MASK) != 0) 1402 return (DDI_SUCCESS); 1403 } 1404 1405 if (PCIE_IS_RP(bus_p)) { 1406 /* 1407 * If this device is a root port, then the mps scan 1408 * saved the mps in the root ports bus_p. 1409 */ 1410 max_payload_size = bus_p->bus_mps; 1411 } else { 1412 /* 1413 * If the device is not a root port, then the mps of 1414 * its parent should be used. 1415 */ 1416 pcie_bus_t *parent_bus_p = PCIE_DIP2BUS(pdip); 1417 max_payload_size = parent_bus_p->bus_mps; 1418 } 1419 1420 if (PCIE_IS_PCIE(bus_p) && (max_payload_size >= 0)) { 1421 pcie_bus_t *rootp_bus_p = PCIE_DIP2BUS(bus_p->bus_rp_dip); 1422 uint16_t mask, dev_ctrl = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL), 1423 mps = PCIE_CAP_GET(16, bus_p, PCIE_DEVCAP) & 1424 PCIE_DEVCAP_MAX_PAYLOAD_MASK; 1425 1426 mps = MIN(mps, (uint16_t)max_payload_size); 1427 1428 /* 1429 * If the MPS to be set is less than the root ports 1430 * MPS, then MRRS will have to be set the same as MPS. 1431 */ 1432 mask = ((mps < rootp_bus_p->bus_mps) ? 1433 PCIE_DEVCTL_MAX_READ_REQ_MASK : 0) | 1434 PCIE_DEVCTL_MAX_PAYLOAD_MASK; 1435 1436 dev_ctrl &= ~mask; 1437 mask = ((mps < rootp_bus_p->bus_mps) 1438 ? mps << PCIE_DEVCTL_MAX_READ_REQ_SHIFT : 0) 1439 | (mps << PCIE_DEVCTL_MAX_PAYLOAD_SHIFT); 1440 1441 dev_ctrl |= mask; 1442 1443 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, dev_ctrl); 1444 1445 bus_p->bus_mps = mps; 1446 } 1447 1448 return (DDI_SUCCESS); 1449 } 1450 1451 /* 1452 * Scans a device tree/branch for a maximum payload size capabilities. 1453 * 1454 * rc_dip - dip of Root Complex. 1455 * dip - dip of device where scan will begin. 1456 * max_supported (IN) - maximum allowable MPS. 1457 * max_supported (OUT) - maximum payload size capability of fabric. 1458 */ 1459 void 1460 pcie_get_fabric_mps(dev_info_t *rc_dip, dev_info_t *dip, int *max_supported) 1461 { 1462 if (dip == NULL) 1463 return; 1464 1465 /* 1466 * Perform a fabric scan to obtain Maximum Payload Capabilities 1467 */ 1468 (void) pcie_scan_mps(rc_dip, dip, max_supported); 1469 1470 PCIE_DBG("MPS: Highest Common MPS= %x\n", max_supported); 1471 } 1472 1473 /* 1474 * Scans fabric and determines Maximum Payload Size based on 1475 * highest common denominator alogorithm 1476 */ 1477 static void 1478 pcie_scan_mps(dev_info_t *rc_dip, dev_info_t *dip, int *max_supported) 1479 { 1480 int circular_count; 1481 pcie_max_supported_t max_pay_load_supported; 1482 1483 max_pay_load_supported.dip = rc_dip; 1484 max_pay_load_supported.highest_common_mps = *max_supported; 1485 1486 ndi_devi_enter(ddi_get_parent(dip), &circular_count); 1487 ddi_walk_devs(dip, pcie_get_max_supported, 1488 (void *)&max_pay_load_supported); 1489 ndi_devi_exit(ddi_get_parent(dip), circular_count); 1490 1491 *max_supported = max_pay_load_supported.highest_common_mps; 1492 } 1493 1494 /* 1495 * Called as part of the Maximum Payload Size scan. 1496 */ 1497 static int 1498 pcie_get_max_supported(dev_info_t *dip, void *arg) 1499 { 1500 uint32_t max_supported; 1501 uint16_t cap_ptr; 1502 pcie_max_supported_t *current = (pcie_max_supported_t *)arg; 1503 pci_regspec_t *reg; 1504 int rlen; 1505 caddr_t virt; 1506 ddi_acc_handle_t config_handle; 1507 1508 if (ddi_get_child(current->dip) == NULL) { 1509 goto fail1; 1510 } 1511 1512 if (pcie_dev(dip) == DDI_FAILURE) { 1513 PCIE_DBG("MPS: pcie_get_max_supported: %s: " 1514 "Not a PCIe dev\n", ddi_driver_name(dip)); 1515 goto fail1; 1516 } 1517 1518 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "reg", 1519 (caddr_t)®, &rlen) != DDI_PROP_SUCCESS) { 1520 PCIE_DBG("MPS: pcie_get_max_supported: %s: " 1521 "Can not read reg\n", ddi_driver_name(dip)); 1522 goto fail1; 1523 } 1524 1525 if (pcie_map_phys(ddi_get_child(current->dip), reg, &virt, 1526 &config_handle) != DDI_SUCCESS) { 1527 PCIE_DBG("MPS: pcie_get_max_supported: %s: pcie_map_phys " 1528 "failed\n", ddi_driver_name(dip)); 1529 goto fail2; 1530 } 1531 1532 if ((PCI_CAP_LOCATE(config_handle, PCI_CAP_ID_PCI_E, &cap_ptr)) == 1533 DDI_FAILURE) { 1534 goto fail3; 1535 } 1536 1537 max_supported = PCI_CAP_GET16(config_handle, NULL, cap_ptr, 1538 PCIE_DEVCAP) & PCIE_DEVCAP_MAX_PAYLOAD_MASK; 1539 1540 PCIE_DBG("PCIE MPS: %s: MPS Capabilities %x\n", ddi_driver_name(dip), 1541 max_supported); 1542 1543 if (max_supported < current->highest_common_mps) 1544 current->highest_common_mps = max_supported; 1545 1546 fail3: 1547 pcie_unmap_phys(&config_handle, reg); 1548 fail2: 1549 kmem_free(reg, rlen); 1550 fail1: 1551 return (DDI_WALK_CONTINUE); 1552 } 1553 1554 /* 1555 * Determines if there are any root ports attached to a root complex. 1556 * 1557 * dip - dip of root complex 1558 * 1559 * Returns - DDI_SUCCESS if there is at least one root port otherwise 1560 * DDI_FAILURE. 1561 */ 1562 int 1563 pcie_root_port(dev_info_t *dip) 1564 { 1565 int port_type; 1566 uint16_t cap_ptr; 1567 ddi_acc_handle_t config_handle; 1568 dev_info_t *cdip = ddi_get_child(dip); 1569 1570 /* 1571 * Determine if any of the children of the passed in dip 1572 * are root ports. 1573 */ 1574 for (; cdip; cdip = ddi_get_next_sibling(cdip)) { 1575 1576 if (pci_config_setup(cdip, &config_handle) != DDI_SUCCESS) 1577 continue; 1578 1579 if ((PCI_CAP_LOCATE(config_handle, PCI_CAP_ID_PCI_E, 1580 &cap_ptr)) == DDI_FAILURE) { 1581 pci_config_teardown(&config_handle); 1582 continue; 1583 } 1584 1585 port_type = PCI_CAP_GET16(config_handle, NULL, cap_ptr, 1586 PCIE_PCIECAP) & PCIE_PCIECAP_DEV_TYPE_MASK; 1587 1588 pci_config_teardown(&config_handle); 1589 1590 if (port_type == PCIE_PCIECAP_DEV_TYPE_ROOT) 1591 return (DDI_SUCCESS); 1592 } 1593 1594 /* No root ports were found */ 1595 1596 return (DDI_FAILURE); 1597 } 1598 1599 /* 1600 * Function that determines if a device a PCIe device. 1601 * 1602 * dip - dip of device. 1603 * 1604 * returns - DDI_SUCCESS if device is a PCIe device, otherwise DDI_FAILURE. 1605 */ 1606 int 1607 pcie_dev(dev_info_t *dip) 1608 { 1609 /* get parent device's device_type property */ 1610 char *device_type; 1611 int rc = DDI_FAILURE; 1612 dev_info_t *pdip = ddi_get_parent(dip); 1613 1614 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip, 1615 DDI_PROP_DONTPASS, "device_type", &device_type) 1616 != DDI_PROP_SUCCESS) { 1617 return (DDI_FAILURE); 1618 } 1619 1620 if (strcmp(device_type, "pciex") == 0) 1621 rc = DDI_SUCCESS; 1622 else 1623 rc = DDI_FAILURE; 1624 1625 ddi_prop_free(device_type); 1626 return (rc); 1627 } 1628 1629 /* 1630 * Function to map in a device's memory space. 1631 */ 1632 static int 1633 pcie_map_phys(dev_info_t *dip, pci_regspec_t *phys_spec, 1634 caddr_t *addrp, ddi_acc_handle_t *handlep) 1635 { 1636 ddi_map_req_t mr; 1637 ddi_acc_hdl_t *hp; 1638 int result; 1639 ddi_device_acc_attr_t attr; 1640 1641 attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 1642 attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 1643 attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 1644 attr.devacc_attr_access = DDI_CAUTIOUS_ACC; 1645 1646 *handlep = impl_acc_hdl_alloc(KM_SLEEP, NULL); 1647 hp = impl_acc_hdl_get(*handlep); 1648 hp->ah_vers = VERS_ACCHDL; 1649 hp->ah_dip = dip; 1650 hp->ah_rnumber = 0; 1651 hp->ah_offset = 0; 1652 hp->ah_len = 0; 1653 hp->ah_acc = attr; 1654 1655 mr.map_op = DDI_MO_MAP_LOCKED; 1656 mr.map_type = DDI_MT_REGSPEC; 1657 mr.map_obj.rp = (struct regspec *)phys_spec; 1658 mr.map_prot = PROT_READ | PROT_WRITE; 1659 mr.map_flags = DDI_MF_KERNEL_MAPPING; 1660 mr.map_handlep = hp; 1661 mr.map_vers = DDI_MAP_VERSION; 1662 1663 result = ddi_map(dip, &mr, 0, 0, addrp); 1664 1665 if (result != DDI_SUCCESS) { 1666 impl_acc_hdl_free(*handlep); 1667 *handlep = (ddi_acc_handle_t)NULL; 1668 } else { 1669 hp->ah_addr = *addrp; 1670 } 1671 1672 return (result); 1673 } 1674 1675 /* 1676 * Map out memory that was mapped in with pcie_map_phys(); 1677 */ 1678 static void 1679 pcie_unmap_phys(ddi_acc_handle_t *handlep, pci_regspec_t *ph) 1680 { 1681 ddi_map_req_t mr; 1682 ddi_acc_hdl_t *hp; 1683 1684 hp = impl_acc_hdl_get(*handlep); 1685 ASSERT(hp); 1686 1687 mr.map_op = DDI_MO_UNMAP; 1688 mr.map_type = DDI_MT_REGSPEC; 1689 mr.map_obj.rp = (struct regspec *)ph; 1690 mr.map_prot = PROT_READ | PROT_WRITE; 1691 mr.map_flags = DDI_MF_KERNEL_MAPPING; 1692 mr.map_handlep = hp; 1693 mr.map_vers = DDI_MAP_VERSION; 1694 1695 (void) ddi_map(hp->ah_dip, &mr, hp->ah_offset, 1696 hp->ah_len, &hp->ah_addr); 1697 1698 impl_acc_hdl_free(*handlep); 1699 *handlep = (ddi_acc_handle_t)NULL; 1700 } 1701 1702 void 1703 pcie_set_rber_fatal(dev_info_t *dip, boolean_t val) 1704 { 1705 pcie_bus_t *bus_p = PCIE_DIP2UPBUS(dip); 1706 bus_p->bus_pfd->pe_rber_fatal = val; 1707 } 1708 1709 /* 1710 * Return parent Root Port's pe_rber_fatal value. 1711 */ 1712 boolean_t 1713 pcie_get_rber_fatal(dev_info_t *dip) 1714 { 1715 pcie_bus_t *bus_p = PCIE_DIP2UPBUS(dip); 1716 pcie_bus_t *rp_bus_p = PCIE_DIP2UPBUS(bus_p->bus_rp_dip); 1717 return (rp_bus_p->bus_pfd->pe_rber_fatal); 1718 } 1719 1720 int 1721 pcie_ari_supported(dev_info_t *dip) 1722 { 1723 uint32_t devcap2; 1724 uint16_t pciecap; 1725 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 1726 uint8_t dev_type; 1727 1728 PCIE_DBG("pcie_ari_supported: dip=%p\n", dip); 1729 1730 if (bus_p == NULL) 1731 return (PCIE_ARI_FORW_NOT_SUPPORTED); 1732 1733 dev_type = bus_p->bus_dev_type; 1734 1735 if ((dev_type != PCIE_PCIECAP_DEV_TYPE_DOWN) && 1736 (dev_type != PCIE_PCIECAP_DEV_TYPE_ROOT)) 1737 return (PCIE_ARI_FORW_NOT_SUPPORTED); 1738 1739 if (pcie_disable_ari) { 1740 PCIE_DBG("pcie_ari_supported: dip=%p: ARI Disabled\n", dip); 1741 return (PCIE_ARI_FORW_NOT_SUPPORTED); 1742 } 1743 1744 pciecap = PCIE_CAP_GET(16, bus_p, PCIE_PCIECAP); 1745 1746 if ((pciecap & PCIE_PCIECAP_VER_MASK) < PCIE_PCIECAP_VER_2_0) { 1747 PCIE_DBG("pcie_ari_supported: dip=%p: Not 2.0\n", dip); 1748 return (PCIE_ARI_FORW_NOT_SUPPORTED); 1749 } 1750 1751 devcap2 = PCIE_CAP_GET(32, bus_p, PCIE_DEVCAP2); 1752 1753 PCIE_DBG("pcie_ari_supported: dip=%p: DevCap2=0x%x\n", 1754 dip, devcap2); 1755 1756 if (devcap2 & PCIE_DEVCAP2_ARI_FORWARD) { 1757 PCIE_DBG("pcie_ari_supported: " 1758 "dip=%p: ARI Forwarding is supported\n", dip); 1759 return (PCIE_ARI_FORW_SUPPORTED); 1760 } 1761 return (PCIE_ARI_FORW_NOT_SUPPORTED); 1762 } 1763 1764 int 1765 pcie_ari_enable(dev_info_t *dip) 1766 { 1767 uint16_t devctl2; 1768 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 1769 1770 PCIE_DBG("pcie_ari_enable: dip=%p\n", dip); 1771 1772 if (pcie_ari_supported(dip) == PCIE_ARI_FORW_NOT_SUPPORTED) 1773 return (DDI_FAILURE); 1774 1775 devctl2 = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL2); 1776 devctl2 |= PCIE_DEVCTL2_ARI_FORWARD_EN; 1777 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL2, devctl2); 1778 1779 PCIE_DBG("pcie_ari_enable: dip=%p: writing 0x%x to DevCtl2\n", 1780 dip, devctl2); 1781 1782 return (DDI_SUCCESS); 1783 } 1784 1785 int 1786 pcie_ari_disable(dev_info_t *dip) 1787 { 1788 uint16_t devctl2; 1789 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 1790 1791 PCIE_DBG("pcie_ari_disable: dip=%p\n", dip); 1792 1793 if (pcie_ari_supported(dip) == PCIE_ARI_FORW_NOT_SUPPORTED) 1794 return (DDI_FAILURE); 1795 1796 devctl2 = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL2); 1797 devctl2 &= ~PCIE_DEVCTL2_ARI_FORWARD_EN; 1798 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL2, devctl2); 1799 1800 PCIE_DBG("pcie_ari_disable: dip=%p: writing 0x%x to DevCtl2\n", 1801 dip, devctl2); 1802 1803 return (DDI_SUCCESS); 1804 } 1805 1806 int 1807 pcie_ari_is_enabled(dev_info_t *dip) 1808 { 1809 uint16_t devctl2; 1810 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 1811 1812 PCIE_DBG("pcie_ari_is_enabled: dip=%p\n", dip); 1813 1814 if (pcie_ari_supported(dip) == PCIE_ARI_FORW_NOT_SUPPORTED) 1815 return (PCIE_ARI_FORW_DISABLED); 1816 1817 devctl2 = PCIE_CAP_GET(32, bus_p, PCIE_DEVCTL2); 1818 1819 PCIE_DBG("pcie_ari_is_enabled: dip=%p: DevCtl2=0x%x\n", 1820 dip, devctl2); 1821 1822 if (devctl2 & PCIE_DEVCTL2_ARI_FORWARD_EN) { 1823 PCIE_DBG("pcie_ari_is_enabled: " 1824 "dip=%p: ARI Forwarding is enabled\n", dip); 1825 return (PCIE_ARI_FORW_ENABLED); 1826 } 1827 1828 return (PCIE_ARI_FORW_DISABLED); 1829 } 1830 1831 int 1832 pcie_ari_device(dev_info_t *dip) 1833 { 1834 ddi_acc_handle_t handle; 1835 uint16_t cap_ptr; 1836 1837 PCIE_DBG("pcie_ari_device: dip=%p\n", dip); 1838 1839 /* 1840 * XXX - This function may be called before the bus_p structure 1841 * has been populated. This code can be changed to remove 1842 * pci_config_setup()/pci_config_teardown() when the RFE 1843 * to populate the bus_p structures early in boot is putback. 1844 */ 1845 1846 /* First make sure it is a PCIe device */ 1847 1848 if (pci_config_setup(dip, &handle) != DDI_SUCCESS) 1849 return (PCIE_NOT_ARI_DEVICE); 1850 1851 if ((PCI_CAP_LOCATE(handle, PCI_CAP_ID_PCI_E, &cap_ptr)) 1852 != DDI_SUCCESS) { 1853 pci_config_teardown(&handle); 1854 return (PCIE_NOT_ARI_DEVICE); 1855 } 1856 1857 /* Locate the ARI Capability */ 1858 1859 if ((PCI_CAP_LOCATE(handle, PCI_CAP_XCFG_SPC(PCIE_EXT_CAP_ID_ARI), 1860 &cap_ptr)) == DDI_FAILURE) { 1861 pci_config_teardown(&handle); 1862 return (PCIE_NOT_ARI_DEVICE); 1863 } 1864 1865 /* ARI Capability was found so it must be a ARI device */ 1866 PCIE_DBG("pcie_ari_device: ARI Device dip=%p\n", dip); 1867 1868 pci_config_teardown(&handle); 1869 return (PCIE_ARI_DEVICE); 1870 } 1871 1872 int 1873 pcie_ari_get_next_function(dev_info_t *dip, int *func) 1874 { 1875 uint32_t val; 1876 uint16_t cap_ptr, next_function; 1877 ddi_acc_handle_t handle; 1878 1879 /* 1880 * XXX - This function may be called before the bus_p structure 1881 * has been populated. This code can be changed to remove 1882 * pci_config_setup()/pci_config_teardown() when the RFE 1883 * to populate the bus_p structures early in boot is putback. 1884 */ 1885 1886 if (pci_config_setup(dip, &handle) != DDI_SUCCESS) 1887 return (DDI_FAILURE); 1888 1889 if ((PCI_CAP_LOCATE(handle, 1890 PCI_CAP_XCFG_SPC(PCIE_EXT_CAP_ID_ARI), &cap_ptr)) == DDI_FAILURE) { 1891 pci_config_teardown(&handle); 1892 return (DDI_FAILURE); 1893 } 1894 1895 val = PCI_CAP_GET32(handle, NULL, cap_ptr, PCIE_ARI_CAP); 1896 1897 next_function = (val >> PCIE_ARI_CAP_NEXT_FUNC_SHIFT) & 1898 PCIE_ARI_CAP_NEXT_FUNC_MASK; 1899 1900 pci_config_teardown(&handle); 1901 1902 *func = next_function; 1903 1904 return (DDI_SUCCESS); 1905 } 1906 1907 dev_info_t * 1908 pcie_func_to_dip(dev_info_t *dip, pcie_req_id_t function) 1909 { 1910 pcie_req_id_t child_bdf; 1911 dev_info_t *cdip; 1912 1913 for (cdip = ddi_get_child(dip); cdip; 1914 cdip = ddi_get_next_sibling(cdip)) { 1915 1916 if (pcie_get_bdf_from_dip(cdip, &child_bdf) == DDI_FAILURE) 1917 return (NULL); 1918 1919 if ((child_bdf & PCIE_REQ_ID_ARI_FUNC_MASK) == function) 1920 return (cdip); 1921 } 1922 return (NULL); 1923 } 1924 1925 #ifdef DEBUG 1926 1927 static void 1928 pcie_print_bus(pcie_bus_t *bus_p) 1929 { 1930 pcie_dbg("\tbus_dip = 0x%p\n", bus_p->bus_dip); 1931 pcie_dbg("\tbus_fm_flags = 0x%x\n", bus_p->bus_fm_flags); 1932 1933 pcie_dbg("\tbus_bdf = 0x%x\n", bus_p->bus_bdf); 1934 pcie_dbg("\tbus_dev_ven_id = 0x%x\n", bus_p->bus_dev_ven_id); 1935 pcie_dbg("\tbus_rev_id = 0x%x\n", bus_p->bus_rev_id); 1936 pcie_dbg("\tbus_hdr_type = 0x%x\n", bus_p->bus_hdr_type); 1937 pcie_dbg("\tbus_dev_type = 0x%x\n", bus_p->bus_dev_type); 1938 pcie_dbg("\tbus_bdg_secbus = 0x%x\n", bus_p->bus_bdg_secbus); 1939 pcie_dbg("\tbus_pcie_off = 0x%x\n", bus_p->bus_pcie_off); 1940 pcie_dbg("\tbus_aer_off = 0x%x\n", bus_p->bus_aer_off); 1941 pcie_dbg("\tbus_pcix_off = 0x%x\n", bus_p->bus_pcix_off); 1942 pcie_dbg("\tbus_ecc_ver = 0x%x\n", bus_p->bus_ecc_ver); 1943 } 1944 1945 /* 1946 * For debugging purposes set pcie_dbg_print != 0 to see printf messages 1947 * during interrupt. 1948 * 1949 * When a proper solution is in place this code will disappear. 1950 * Potential solutions are: 1951 * o circular buffers 1952 * o taskq to print at lower pil 1953 */ 1954 int pcie_dbg_print = 0; 1955 void 1956 pcie_dbg(char *fmt, ...) 1957 { 1958 va_list ap; 1959 1960 if (!pcie_debug_flags) { 1961 return; 1962 } 1963 va_start(ap, fmt); 1964 if (servicing_interrupt()) { 1965 if (pcie_dbg_print) { 1966 prom_vprintf(fmt, ap); 1967 } 1968 } else { 1969 prom_vprintf(fmt, ap); 1970 } 1971 va_end(ap); 1972 } 1973 #endif /* DEBUG */ 1974 1975 #if defined(__i386) || defined(__amd64) 1976 static void 1977 pcie_check_io_mem_range(ddi_acc_handle_t cfg_hdl, boolean_t *empty_io_range, 1978 boolean_t *empty_mem_range) 1979 { 1980 uint8_t class, subclass; 1981 uint_t val; 1982 1983 class = pci_config_get8(cfg_hdl, PCI_CONF_BASCLASS); 1984 subclass = pci_config_get8(cfg_hdl, PCI_CONF_SUBCLASS); 1985 1986 if ((class == PCI_CLASS_BRIDGE) && (subclass == PCI_BRIDGE_PCI)) { 1987 val = (((uint_t)pci_config_get8(cfg_hdl, PCI_BCNF_IO_BASE_LOW) & 1988 PCI_BCNF_IO_MASK) << 8); 1989 /* 1990 * Assuming that a zero based io_range[0] implies an 1991 * invalid I/O range. Likewise for mem_range[0]. 1992 */ 1993 if (val == 0) 1994 *empty_io_range = B_TRUE; 1995 val = (((uint_t)pci_config_get16(cfg_hdl, PCI_BCNF_MEM_BASE) & 1996 PCI_BCNF_MEM_MASK) << 16); 1997 if (val == 0) 1998 *empty_mem_range = B_TRUE; 1999 } 2000 } 2001 #endif /* defined(__i386) || defined(__amd64) */ 2002