1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <sys/sysmacros.h> 27 #include <sys/types.h> 28 #include <sys/kmem.h> 29 #include <sys/modctl.h> 30 #include <sys/ddi.h> 31 #include <sys/sunddi.h> 32 #include <sys/sunndi.h> 33 #include <sys/fm/protocol.h> 34 #include <sys/fm/util.h> 35 #include <sys/promif.h> 36 #include <sys/disp.h> 37 #include <sys/stat.h> 38 #include <sys/file.h> 39 #include <sys/pci_cap.h> 40 #include <sys/pci_impl.h> 41 #include <sys/pcie_impl.h> 42 #include <sys/hotplug/pci/pcie_hp.h> 43 #include <sys/hotplug/pci/pcicfg.h> 44 #include <sys/pci_cfgacc.h> 45 46 /* Local functions prototypes */ 47 static void pcie_init_pfd(dev_info_t *); 48 static void pcie_fini_pfd(dev_info_t *); 49 50 #if defined(__i386) || defined(__amd64) 51 static void pcie_check_io_mem_range(ddi_acc_handle_t, boolean_t *, boolean_t *); 52 #endif /* defined(__i386) || defined(__amd64) */ 53 54 #ifdef DEBUG 55 uint_t pcie_debug_flags = 0; 56 static void pcie_print_bus(pcie_bus_t *bus_p); 57 void pcie_dbg(char *fmt, ...); 58 #endif /* DEBUG */ 59 60 /* Variable to control default PCI-Express config settings */ 61 ushort_t pcie_command_default = 62 PCI_COMM_SERR_ENABLE | 63 PCI_COMM_WAIT_CYC_ENAB | 64 PCI_COMM_PARITY_DETECT | 65 PCI_COMM_ME | 66 PCI_COMM_MAE | 67 PCI_COMM_IO; 68 69 /* xxx_fw are bits that are controlled by FW and should not be modified */ 70 ushort_t pcie_command_default_fw = 71 PCI_COMM_SPEC_CYC | 72 PCI_COMM_MEMWR_INVAL | 73 PCI_COMM_PALETTE_SNOOP | 74 PCI_COMM_WAIT_CYC_ENAB | 75 0xF800; /* Reserved Bits */ 76 77 ushort_t pcie_bdg_command_default_fw = 78 PCI_BCNF_BCNTRL_ISA_ENABLE | 79 PCI_BCNF_BCNTRL_VGA_ENABLE | 80 0xF000; /* Reserved Bits */ 81 82 /* PCI-Express Base error defaults */ 83 ushort_t pcie_base_err_default = 84 PCIE_DEVCTL_CE_REPORTING_EN | 85 PCIE_DEVCTL_NFE_REPORTING_EN | 86 PCIE_DEVCTL_FE_REPORTING_EN | 87 PCIE_DEVCTL_UR_REPORTING_EN; 88 89 /* PCI-Express Device Control Register */ 90 uint16_t pcie_devctl_default = PCIE_DEVCTL_RO_EN | 91 PCIE_DEVCTL_MAX_READ_REQ_512; 92 93 /* PCI-Express AER Root Control Register */ 94 #define PCIE_ROOT_SYS_ERR (PCIE_ROOTCTL_SYS_ERR_ON_CE_EN | \ 95 PCIE_ROOTCTL_SYS_ERR_ON_NFE_EN | \ 96 PCIE_ROOTCTL_SYS_ERR_ON_FE_EN) 97 98 ushort_t pcie_root_ctrl_default = 99 PCIE_ROOTCTL_SYS_ERR_ON_CE_EN | 100 PCIE_ROOTCTL_SYS_ERR_ON_NFE_EN | 101 PCIE_ROOTCTL_SYS_ERR_ON_FE_EN; 102 103 /* PCI-Express Root Error Command Register */ 104 ushort_t pcie_root_error_cmd_default = 105 PCIE_AER_RE_CMD_CE_REP_EN | 106 PCIE_AER_RE_CMD_NFE_REP_EN | 107 PCIE_AER_RE_CMD_FE_REP_EN; 108 109 /* ECRC settings in the PCIe AER Control Register */ 110 uint32_t pcie_ecrc_value = 111 PCIE_AER_CTL_ECRC_GEN_ENA | 112 PCIE_AER_CTL_ECRC_CHECK_ENA; 113 114 /* 115 * If a particular platform wants to disable certain errors such as UR/MA, 116 * instead of using #defines have the platform's PCIe Root Complex driver set 117 * these masks using the pcie_get_XXX_mask and pcie_set_XXX_mask functions. For 118 * x86 the closest thing to a PCIe root complex driver is NPE. For SPARC the 119 * closest PCIe root complex driver is PX. 120 * 121 * pcie_serr_disable_flag : disable SERR only (in RCR and command reg) x86 122 * systems may want to disable SERR in general. For root ports, enabling SERR 123 * causes NMIs which are not handled and results in a watchdog timeout error. 124 */ 125 uint32_t pcie_aer_uce_mask = 0; /* AER UE Mask */ 126 uint32_t pcie_aer_ce_mask = 0; /* AER CE Mask */ 127 uint32_t pcie_aer_suce_mask = 0; /* AER Secondary UE Mask */ 128 uint32_t pcie_serr_disable_flag = 0; /* Disable SERR */ 129 130 /* Default severities needed for eversholt. Error handling doesn't care */ 131 uint32_t pcie_aer_uce_severity = PCIE_AER_UCE_MTLP | PCIE_AER_UCE_RO | \ 132 PCIE_AER_UCE_FCP | PCIE_AER_UCE_SD | PCIE_AER_UCE_DLP | \ 133 PCIE_AER_UCE_TRAINING; 134 uint32_t pcie_aer_suce_severity = PCIE_AER_SUCE_SERR_ASSERT | \ 135 PCIE_AER_SUCE_UC_ADDR_ERR | PCIE_AER_SUCE_UC_ATTR_ERR | \ 136 PCIE_AER_SUCE_USC_MSG_DATA_ERR; 137 138 int pcie_max_mps = PCIE_DEVCTL_MAX_PAYLOAD_4096 >> 5; 139 int pcie_disable_ari = 0; 140 141 static void pcie_scan_mps(dev_info_t *rc_dip, dev_info_t *dip, 142 int *max_supported); 143 static int pcie_get_max_supported(dev_info_t *dip, void *arg); 144 static int pcie_map_phys(dev_info_t *dip, pci_regspec_t *phys_spec, 145 caddr_t *addrp, ddi_acc_handle_t *handlep); 146 static void pcie_unmap_phys(ddi_acc_handle_t *handlep, pci_regspec_t *ph); 147 148 dev_info_t *pcie_get_rc_dip(dev_info_t *dip); 149 150 /* 151 * modload support 152 */ 153 154 static struct modlmisc modlmisc = { 155 &mod_miscops, /* Type of module */ 156 "PCI Express Framework Module" 157 }; 158 159 static struct modlinkage modlinkage = { 160 MODREV_1, 161 (void *)&modlmisc, 162 NULL 163 }; 164 165 /* 166 * Global Variables needed for a non-atomic version of ddi_fm_ereport_post. 167 * Currently used to send the pci.fabric ereports whose payload depends on the 168 * type of PCI device it is being sent for. 169 */ 170 char *pcie_nv_buf; 171 nv_alloc_t *pcie_nvap; 172 nvlist_t *pcie_nvl; 173 174 int 175 _init(void) 176 { 177 int rval; 178 179 pcie_nv_buf = kmem_alloc(ERPT_DATA_SZ, KM_SLEEP); 180 pcie_nvap = fm_nva_xcreate(pcie_nv_buf, ERPT_DATA_SZ); 181 pcie_nvl = fm_nvlist_create(pcie_nvap); 182 183 rval = mod_install(&modlinkage); 184 return (rval); 185 } 186 187 int 188 _fini() 189 { 190 int rval; 191 192 fm_nvlist_destroy(pcie_nvl, FM_NVA_RETAIN); 193 fm_nva_xdestroy(pcie_nvap); 194 kmem_free(pcie_nv_buf, ERPT_DATA_SZ); 195 196 rval = mod_remove(&modlinkage); 197 return (rval); 198 } 199 200 int 201 _info(struct modinfo *modinfop) 202 { 203 return (mod_info(&modlinkage, modinfop)); 204 } 205 206 /* ARGSUSED */ 207 int 208 pcie_init(dev_info_t *dip, caddr_t arg) 209 { 210 int ret = DDI_SUCCESS; 211 212 /* 213 * Create a "devctl" minor node to support DEVCTL_DEVICE_* 214 * and DEVCTL_BUS_* ioctls to this bus. 215 */ 216 if ((ret = ddi_create_minor_node(dip, "devctl", S_IFCHR, 217 PCI_MINOR_NUM(ddi_get_instance(dip), PCI_DEVCTL_MINOR), 218 DDI_NT_NEXUS, 0)) != DDI_SUCCESS) { 219 PCIE_DBG("Failed to create devctl minor node for %s%d\n", 220 ddi_driver_name(dip), ddi_get_instance(dip)); 221 222 return (ret); 223 } 224 225 if ((ret = pcie_hp_init(dip, arg)) != DDI_SUCCESS) { 226 /* 227 * On a few x86 platforms, we observed unexpected hotplug 228 * initialization failures in recent years. Continue with 229 * a message printed because we don't want to stop PCI 230 * driver attach and system boot because of this hotplug 231 * initialization failure before we address all those issues. 232 */ 233 cmn_err(CE_WARN, "%s%d: Failed setting hotplug framework\n", 234 ddi_driver_name(dip), ddi_get_instance(dip)); 235 236 #if defined(__sparc) 237 ddi_remove_minor_node(dip, "devctl"); 238 239 return (ret); 240 #endif /* defined(__sparc) */ 241 } 242 243 if ((pcie_ari_supported(dip) == PCIE_ARI_FORW_SUPPORTED) && 244 (pcie_ari_is_enabled(dip) == PCIE_ARI_FORW_DISABLED)) 245 (void) pcicfg_configure(dip, 0, PCICFG_ALL_FUNC, 246 PCICFG_FLAG_ENABLE_ARI); 247 248 return (DDI_SUCCESS); 249 } 250 251 /* ARGSUSED */ 252 int 253 pcie_uninit(dev_info_t *dip) 254 { 255 int ret = DDI_SUCCESS; 256 257 if (pcie_ari_is_enabled(dip) == PCIE_ARI_FORW_ENABLED) 258 (void) pcie_ari_disable(dip); 259 260 if ((ret = pcie_hp_uninit(dip)) != DDI_SUCCESS) { 261 PCIE_DBG("Failed to uninitialize hotplug for %s%d\n", 262 ddi_driver_name(dip), ddi_get_instance(dip)); 263 264 return (ret); 265 } 266 267 ddi_remove_minor_node(dip, "devctl"); 268 269 return (ret); 270 } 271 272 /* ARGSUSED */ 273 int 274 pcie_intr(dev_info_t *dip) 275 { 276 return (pcie_hp_intr(dip)); 277 } 278 279 /* ARGSUSED */ 280 int 281 pcie_open(dev_info_t *dip, dev_t *devp, int flags, int otyp, cred_t *credp) 282 { 283 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 284 285 /* 286 * Make sure the open is for the right file type. 287 */ 288 if (otyp != OTYP_CHR) 289 return (EINVAL); 290 291 /* 292 * Handle the open by tracking the device state. 293 */ 294 if ((bus_p->bus_soft_state == PCI_SOFT_STATE_OPEN_EXCL) || 295 ((flags & FEXCL) && 296 (bus_p->bus_soft_state != PCI_SOFT_STATE_CLOSED))) { 297 return (EBUSY); 298 } 299 300 if (flags & FEXCL) 301 bus_p->bus_soft_state = PCI_SOFT_STATE_OPEN_EXCL; 302 else 303 bus_p->bus_soft_state = PCI_SOFT_STATE_OPEN; 304 305 return (0); 306 } 307 308 /* ARGSUSED */ 309 int 310 pcie_close(dev_info_t *dip, dev_t dev, int flags, int otyp, cred_t *credp) 311 { 312 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 313 314 if (otyp != OTYP_CHR) 315 return (EINVAL); 316 317 bus_p->bus_soft_state = PCI_SOFT_STATE_CLOSED; 318 319 return (0); 320 } 321 322 /* ARGSUSED */ 323 int 324 pcie_ioctl(dev_info_t *dip, dev_t dev, int cmd, intptr_t arg, int mode, 325 cred_t *credp, int *rvalp) 326 { 327 struct devctl_iocdata *dcp; 328 uint_t bus_state; 329 int rv = DDI_SUCCESS; 330 331 /* 332 * We can use the generic implementation for devctl ioctl 333 */ 334 switch (cmd) { 335 case DEVCTL_DEVICE_GETSTATE: 336 case DEVCTL_DEVICE_ONLINE: 337 case DEVCTL_DEVICE_OFFLINE: 338 case DEVCTL_BUS_GETSTATE: 339 return (ndi_devctl_ioctl(dip, cmd, arg, mode, 0)); 340 default: 341 break; 342 } 343 344 /* 345 * read devctl ioctl data 346 */ 347 if (ndi_dc_allochdl((void *)arg, &dcp) != NDI_SUCCESS) 348 return (EFAULT); 349 350 switch (cmd) { 351 case DEVCTL_BUS_QUIESCE: 352 if (ndi_get_bus_state(dip, &bus_state) == NDI_SUCCESS) 353 if (bus_state == BUS_QUIESCED) 354 break; 355 (void) ndi_set_bus_state(dip, BUS_QUIESCED); 356 break; 357 case DEVCTL_BUS_UNQUIESCE: 358 if (ndi_get_bus_state(dip, &bus_state) == NDI_SUCCESS) 359 if (bus_state == BUS_ACTIVE) 360 break; 361 (void) ndi_set_bus_state(dip, BUS_ACTIVE); 362 break; 363 case DEVCTL_BUS_RESET: 364 case DEVCTL_BUS_RESETALL: 365 case DEVCTL_DEVICE_RESET: 366 rv = ENOTSUP; 367 break; 368 default: 369 rv = ENOTTY; 370 } 371 372 ndi_dc_freehdl(dcp); 373 return (rv); 374 } 375 376 /* ARGSUSED */ 377 int 378 pcie_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 379 int flags, char *name, caddr_t valuep, int *lengthp) 380 { 381 if (dev == DDI_DEV_T_ANY) 382 goto skip; 383 384 if (PCIE_IS_HOTPLUG_CAPABLE(dip) && 385 strcmp(name, "pci-occupant") == 0) { 386 int pci_dev = PCI_MINOR_NUM_TO_PCI_DEVNUM(getminor(dev)); 387 388 pcie_hp_create_occupant_props(dip, dev, pci_dev); 389 } 390 391 skip: 392 return (ddi_prop_op(dev, dip, prop_op, flags, name, valuep, lengthp)); 393 } 394 395 int 396 pcie_init_cfghdl(dev_info_t *cdip) 397 { 398 pcie_bus_t *bus_p; 399 ddi_acc_handle_t eh = NULL; 400 401 bus_p = PCIE_DIP2BUS(cdip); 402 if (bus_p == NULL) 403 return (DDI_FAILURE); 404 405 /* Create an config access special to error handling */ 406 if (pci_config_setup(cdip, &eh) != DDI_SUCCESS) { 407 cmn_err(CE_WARN, "Cannot setup config access" 408 " for BDF 0x%x\n", bus_p->bus_bdf); 409 return (DDI_FAILURE); 410 } 411 412 bus_p->bus_cfg_hdl = eh; 413 return (DDI_SUCCESS); 414 } 415 416 void 417 pcie_fini_cfghdl(dev_info_t *cdip) 418 { 419 pcie_bus_t *bus_p = PCIE_DIP2BUS(cdip); 420 421 pci_config_teardown(&bus_p->bus_cfg_hdl); 422 } 423 424 /* 425 * PCI-Express child device initialization. 426 * This function enables generic pci-express interrupts and error 427 * handling. 428 * 429 * @param pdip root dip (root nexus's dip) 430 * @param cdip child's dip (device's dip) 431 * @return DDI_SUCCESS or DDI_FAILURE 432 */ 433 /* ARGSUSED */ 434 int 435 pcie_initchild(dev_info_t *cdip) 436 { 437 uint16_t tmp16, reg16; 438 pcie_bus_t *bus_p; 439 440 bus_p = PCIE_DIP2BUS(cdip); 441 if (bus_p == NULL) { 442 PCIE_DBG("%s: BUS not found.\n", 443 ddi_driver_name(cdip)); 444 445 return (DDI_FAILURE); 446 } 447 448 if (pcie_init_cfghdl(cdip) != DDI_SUCCESS) 449 return (DDI_FAILURE); 450 451 /* Clear the device's status register */ 452 reg16 = PCIE_GET(16, bus_p, PCI_CONF_STAT); 453 PCIE_PUT(16, bus_p, PCI_CONF_STAT, reg16); 454 455 /* Setup the device's command register */ 456 reg16 = PCIE_GET(16, bus_p, PCI_CONF_COMM); 457 tmp16 = (reg16 & pcie_command_default_fw) | pcie_command_default; 458 459 #if defined(__i386) || defined(__amd64) 460 boolean_t empty_io_range = B_FALSE; 461 boolean_t empty_mem_range = B_FALSE; 462 /* 463 * Check for empty IO and Mem ranges on bridges. If so disable IO/Mem 464 * access as it can cause a hang if enabled. 465 */ 466 pcie_check_io_mem_range(bus_p->bus_cfg_hdl, &empty_io_range, 467 &empty_mem_range); 468 if ((empty_io_range == B_TRUE) && 469 (pcie_command_default & PCI_COMM_IO)) { 470 tmp16 &= ~PCI_COMM_IO; 471 PCIE_DBG("No I/O range found for %s, bdf 0x%x\n", 472 ddi_driver_name(cdip), bus_p->bus_bdf); 473 } 474 if ((empty_mem_range == B_TRUE) && 475 (pcie_command_default & PCI_COMM_MAE)) { 476 tmp16 &= ~PCI_COMM_MAE; 477 PCIE_DBG("No Mem range found for %s, bdf 0x%x\n", 478 ddi_driver_name(cdip), bus_p->bus_bdf); 479 } 480 #endif /* defined(__i386) || defined(__amd64) */ 481 482 if (pcie_serr_disable_flag && PCIE_IS_PCIE(bus_p)) 483 tmp16 &= ~PCI_COMM_SERR_ENABLE; 484 485 PCIE_PUT(16, bus_p, PCI_CONF_COMM, tmp16); 486 PCIE_DBG_CFG(cdip, bus_p, "COMMAND", 16, PCI_CONF_COMM, reg16); 487 488 /* 489 * If the device has a bus control register then program it 490 * based on the settings in the command register. 491 */ 492 if (PCIE_IS_BDG(bus_p)) { 493 /* Clear the device's secondary status register */ 494 reg16 = PCIE_GET(16, bus_p, PCI_BCNF_SEC_STATUS); 495 PCIE_PUT(16, bus_p, PCI_BCNF_SEC_STATUS, reg16); 496 497 /* Setup the device's secondary command register */ 498 reg16 = PCIE_GET(16, bus_p, PCI_BCNF_BCNTRL); 499 tmp16 = (reg16 & pcie_bdg_command_default_fw); 500 501 tmp16 |= PCI_BCNF_BCNTRL_SERR_ENABLE; 502 /* 503 * Workaround for this Nvidia bridge. Don't enable the SERR 504 * enable bit in the bridge control register as it could lead to 505 * bogus NMIs. 506 */ 507 if (bus_p->bus_dev_ven_id == 0x037010DE) 508 tmp16 &= ~PCI_BCNF_BCNTRL_SERR_ENABLE; 509 510 if (pcie_command_default & PCI_COMM_PARITY_DETECT) 511 tmp16 |= PCI_BCNF_BCNTRL_PARITY_ENABLE; 512 513 /* 514 * Enable Master Abort Mode only if URs have not been masked. 515 * For PCI and PCIe-PCI bridges, enabling this bit causes a 516 * Master Aborts/UR to be forwarded as a UR/TA or SERR. If this 517 * bit is masked, posted requests are dropped and non-posted 518 * requests are returned with -1. 519 */ 520 if (pcie_aer_uce_mask & PCIE_AER_UCE_UR) 521 tmp16 &= ~PCI_BCNF_BCNTRL_MAST_AB_MODE; 522 else 523 tmp16 |= PCI_BCNF_BCNTRL_MAST_AB_MODE; 524 PCIE_PUT(16, bus_p, PCI_BCNF_BCNTRL, tmp16); 525 PCIE_DBG_CFG(cdip, bus_p, "SEC CMD", 16, PCI_BCNF_BCNTRL, 526 reg16); 527 } 528 529 if (PCIE_IS_PCIE(bus_p)) { 530 /* Setup PCIe device control register */ 531 reg16 = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL); 532 tmp16 = pcie_devctl_default; 533 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, tmp16); 534 PCIE_DBG_CAP(cdip, bus_p, "DEVCTL", 16, PCIE_DEVCTL, reg16); 535 536 /* Enable PCIe errors */ 537 pcie_enable_errors(cdip); 538 } 539 540 bus_p->bus_ari = B_FALSE; 541 if ((pcie_ari_is_enabled(ddi_get_parent(cdip)) 542 == PCIE_ARI_FORW_ENABLED) && (pcie_ari_device(cdip) 543 == PCIE_ARI_DEVICE)) { 544 bus_p->bus_ari = B_TRUE; 545 } 546 547 if (pcie_initchild_mps(cdip) == DDI_FAILURE) { 548 pcie_fini_cfghdl(cdip); 549 return (DDI_FAILURE); 550 } 551 552 return (DDI_SUCCESS); 553 } 554 555 #define PCIE_ZALLOC(data) kmem_zalloc(sizeof (data), KM_SLEEP) 556 static void 557 pcie_init_pfd(dev_info_t *dip) 558 { 559 pf_data_t *pfd_p = PCIE_ZALLOC(pf_data_t); 560 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 561 562 PCIE_DIP2PFD(dip) = pfd_p; 563 564 pfd_p->pe_bus_p = bus_p; 565 pfd_p->pe_severity_flags = 0; 566 pfd_p->pe_lock = B_FALSE; 567 pfd_p->pe_valid = B_FALSE; 568 569 /* Allocate the root fault struct for both RC and RP */ 570 if (PCIE_IS_ROOT(bus_p)) { 571 PCIE_ROOT_FAULT(pfd_p) = PCIE_ZALLOC(pf_root_fault_t); 572 PCIE_ROOT_FAULT(pfd_p)->scan_bdf = PCIE_INVALID_BDF; 573 } 574 575 PCI_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pci_err_regs_t); 576 577 if (PCIE_IS_BDG(bus_p)) 578 PCI_BDG_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pci_bdg_err_regs_t); 579 580 if (PCIE_IS_PCIE(bus_p)) { 581 PCIE_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_err_regs_t); 582 583 if (PCIE_IS_RP(bus_p)) 584 PCIE_RP_REG(pfd_p) = 585 PCIE_ZALLOC(pf_pcie_rp_err_regs_t); 586 587 PCIE_ADV_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_adv_err_regs_t); 588 PCIE_ADV_REG(pfd_p)->pcie_ue_tgt_bdf = PCIE_INVALID_BDF; 589 590 if (PCIE_IS_RP(bus_p)) { 591 PCIE_ADV_RP_REG(pfd_p) = 592 PCIE_ZALLOC(pf_pcie_adv_rp_err_regs_t); 593 PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ce_src_id = 594 PCIE_INVALID_BDF; 595 PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ue_src_id = 596 PCIE_INVALID_BDF; 597 } else if (PCIE_IS_PCIE_BDG(bus_p)) { 598 PCIE_ADV_BDG_REG(pfd_p) = 599 PCIE_ZALLOC(pf_pcie_adv_bdg_err_regs_t); 600 PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_bdf = 601 PCIE_INVALID_BDF; 602 } 603 604 if (PCIE_IS_PCIE_BDG(bus_p) && PCIE_IS_PCIX(bus_p)) { 605 PCIX_BDG_ERR_REG(pfd_p) = 606 PCIE_ZALLOC(pf_pcix_bdg_err_regs_t); 607 608 if (PCIX_ECC_VERSION_CHECK(bus_p)) { 609 PCIX_BDG_ECC_REG(pfd_p, 0) = 610 PCIE_ZALLOC(pf_pcix_ecc_regs_t); 611 PCIX_BDG_ECC_REG(pfd_p, 1) = 612 PCIE_ZALLOC(pf_pcix_ecc_regs_t); 613 } 614 } 615 } else if (PCIE_IS_PCIX(bus_p)) { 616 if (PCIE_IS_BDG(bus_p)) { 617 PCIX_BDG_ERR_REG(pfd_p) = 618 PCIE_ZALLOC(pf_pcix_bdg_err_regs_t); 619 620 if (PCIX_ECC_VERSION_CHECK(bus_p)) { 621 PCIX_BDG_ECC_REG(pfd_p, 0) = 622 PCIE_ZALLOC(pf_pcix_ecc_regs_t); 623 PCIX_BDG_ECC_REG(pfd_p, 1) = 624 PCIE_ZALLOC(pf_pcix_ecc_regs_t); 625 } 626 } else { 627 PCIX_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pcix_err_regs_t); 628 629 if (PCIX_ECC_VERSION_CHECK(bus_p)) 630 PCIX_ECC_REG(pfd_p) = 631 PCIE_ZALLOC(pf_pcix_ecc_regs_t); 632 } 633 } 634 } 635 636 static void 637 pcie_fini_pfd(dev_info_t *dip) 638 { 639 pf_data_t *pfd_p = PCIE_DIP2PFD(dip); 640 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 641 642 if (PCIE_IS_PCIE(bus_p)) { 643 if (PCIE_IS_PCIE_BDG(bus_p) && PCIE_IS_PCIX(bus_p)) { 644 if (PCIX_ECC_VERSION_CHECK(bus_p)) { 645 kmem_free(PCIX_BDG_ECC_REG(pfd_p, 0), 646 sizeof (pf_pcix_ecc_regs_t)); 647 kmem_free(PCIX_BDG_ECC_REG(pfd_p, 1), 648 sizeof (pf_pcix_ecc_regs_t)); 649 } 650 651 kmem_free(PCIX_BDG_ERR_REG(pfd_p), 652 sizeof (pf_pcix_bdg_err_regs_t)); 653 } 654 655 if (PCIE_IS_RP(bus_p)) 656 kmem_free(PCIE_ADV_RP_REG(pfd_p), 657 sizeof (pf_pcie_adv_rp_err_regs_t)); 658 else if (PCIE_IS_PCIE_BDG(bus_p)) 659 kmem_free(PCIE_ADV_BDG_REG(pfd_p), 660 sizeof (pf_pcie_adv_bdg_err_regs_t)); 661 662 kmem_free(PCIE_ADV_REG(pfd_p), 663 sizeof (pf_pcie_adv_err_regs_t)); 664 665 if (PCIE_IS_RP(bus_p)) 666 kmem_free(PCIE_RP_REG(pfd_p), 667 sizeof (pf_pcie_rp_err_regs_t)); 668 669 kmem_free(PCIE_ERR_REG(pfd_p), sizeof (pf_pcie_err_regs_t)); 670 } else if (PCIE_IS_PCIX(bus_p)) { 671 if (PCIE_IS_BDG(bus_p)) { 672 if (PCIX_ECC_VERSION_CHECK(bus_p)) { 673 kmem_free(PCIX_BDG_ECC_REG(pfd_p, 0), 674 sizeof (pf_pcix_ecc_regs_t)); 675 kmem_free(PCIX_BDG_ECC_REG(pfd_p, 1), 676 sizeof (pf_pcix_ecc_regs_t)); 677 } 678 679 kmem_free(PCIX_BDG_ERR_REG(pfd_p), 680 sizeof (pf_pcix_bdg_err_regs_t)); 681 } else { 682 if (PCIX_ECC_VERSION_CHECK(bus_p)) 683 kmem_free(PCIX_ECC_REG(pfd_p), 684 sizeof (pf_pcix_ecc_regs_t)); 685 686 kmem_free(PCIX_ERR_REG(pfd_p), 687 sizeof (pf_pcix_err_regs_t)); 688 } 689 } 690 691 if (PCIE_IS_BDG(bus_p)) 692 kmem_free(PCI_BDG_ERR_REG(pfd_p), 693 sizeof (pf_pci_bdg_err_regs_t)); 694 695 kmem_free(PCI_ERR_REG(pfd_p), sizeof (pf_pci_err_regs_t)); 696 697 if (PCIE_IS_ROOT(bus_p)) 698 kmem_free(PCIE_ROOT_FAULT(pfd_p), sizeof (pf_root_fault_t)); 699 700 kmem_free(PCIE_DIP2PFD(dip), sizeof (pf_data_t)); 701 702 PCIE_DIP2PFD(dip) = NULL; 703 } 704 705 706 /* 707 * Special functions to allocate pf_data_t's for PCIe root complexes. 708 * Note: Root Complex not Root Port 709 */ 710 void 711 pcie_rc_init_pfd(dev_info_t *dip, pf_data_t *pfd_p) 712 { 713 pfd_p->pe_bus_p = PCIE_DIP2DOWNBUS(dip); 714 pfd_p->pe_severity_flags = 0; 715 pfd_p->pe_lock = B_FALSE; 716 pfd_p->pe_valid = B_FALSE; 717 718 PCIE_ROOT_FAULT(pfd_p) = PCIE_ZALLOC(pf_root_fault_t); 719 PCIE_ROOT_FAULT(pfd_p)->scan_bdf = PCIE_INVALID_BDF; 720 PCI_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pci_err_regs_t); 721 PCI_BDG_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pci_bdg_err_regs_t); 722 PCIE_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_err_regs_t); 723 PCIE_RP_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_rp_err_regs_t); 724 PCIE_ADV_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_adv_err_regs_t); 725 PCIE_ADV_RP_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_adv_rp_err_regs_t); 726 727 PCIE_ADV_REG(pfd_p)->pcie_ue_sev = pcie_aer_uce_severity; 728 } 729 730 void 731 pcie_rc_fini_pfd(pf_data_t *pfd_p) 732 { 733 kmem_free(PCIE_ADV_RP_REG(pfd_p), sizeof (pf_pcie_adv_rp_err_regs_t)); 734 kmem_free(PCIE_ADV_REG(pfd_p), sizeof (pf_pcie_adv_err_regs_t)); 735 kmem_free(PCIE_RP_REG(pfd_p), sizeof (pf_pcie_rp_err_regs_t)); 736 kmem_free(PCIE_ERR_REG(pfd_p), sizeof (pf_pcie_err_regs_t)); 737 kmem_free(PCI_BDG_ERR_REG(pfd_p), sizeof (pf_pci_bdg_err_regs_t)); 738 kmem_free(PCI_ERR_REG(pfd_p), sizeof (pf_pci_err_regs_t)); 739 kmem_free(PCIE_ROOT_FAULT(pfd_p), sizeof (pf_root_fault_t)); 740 } 741 742 /* 743 * init pcie_bus_t for root complex 744 * 745 * Only a few of the fields in bus_t is valid for root complex. 746 * The fields that are bracketed are initialized in this routine: 747 * 748 * dev_info_t * <bus_dip> 749 * dev_info_t * bus_rp_dip 750 * ddi_acc_handle_t bus_cfg_hdl 751 * uint_t <bus_fm_flags> 752 * pcie_req_id_t bus_bdf 753 * pcie_req_id_t bus_rp_bdf 754 * uint32_t bus_dev_ven_id 755 * uint8_t bus_rev_id 756 * uint8_t <bus_hdr_type> 757 * uint16_t <bus_dev_type> 758 * uint8_t bus_bdg_secbus 759 * uint16_t bus_pcie_off 760 * uint16_t <bus_aer_off> 761 * uint16_t bus_pcix_off 762 * uint16_t bus_ecc_ver 763 * pci_bus_range_t bus_bus_range 764 * ppb_ranges_t * bus_addr_ranges 765 * int bus_addr_entries 766 * pci_regspec_t * bus_assigned_addr 767 * int bus_assigned_entries 768 * pf_data_t * bus_pfd 769 * int bus_mps 770 * uint64_t bus_cfgacc_base 771 * void * bus_plat_private 772 */ 773 void 774 pcie_rc_init_bus(dev_info_t *dip) 775 { 776 pcie_bus_t *bus_p; 777 778 bus_p = (pcie_bus_t *)kmem_zalloc(sizeof (pcie_bus_t), KM_SLEEP); 779 bus_p->bus_dip = dip; 780 bus_p->bus_dev_type = PCIE_PCIECAP_DEV_TYPE_RC_PSEUDO; 781 bus_p->bus_hdr_type = PCI_HEADER_ONE; 782 783 /* Fake that there are AER logs */ 784 bus_p->bus_aer_off = (uint16_t)-1; 785 786 /* Needed only for handle lookup */ 787 bus_p->bus_fm_flags |= PF_FM_READY; 788 789 ndi_set_bus_private(dip, B_FALSE, DEVI_PORT_TYPE_PCI, bus_p); 790 } 791 792 void 793 pcie_rc_fini_bus(dev_info_t *dip) 794 { 795 pcie_bus_t *bus_p = PCIE_DIP2DOWNBUS(dip); 796 ndi_set_bus_private(dip, B_FALSE, NULL, NULL); 797 kmem_free(bus_p, sizeof (pcie_bus_t)); 798 } 799 800 /* 801 * partially init pcie_bus_t for device (dip,bdf) for accessing pci 802 * config space 803 * 804 * This routine is invoked during boot, either after creating a devinfo node 805 * (x86 case) or during px driver attach (sparc case); it is also invoked 806 * in hotplug context after a devinfo node is created. 807 * 808 * The fields that are bracketed are initialized if flag PCIE_BUS_INITIAL 809 * is set: 810 * 811 * dev_info_t * <bus_dip> 812 * dev_info_t * <bus_rp_dip> 813 * ddi_acc_handle_t bus_cfg_hdl 814 * uint_t bus_fm_flags 815 * pcie_req_id_t <bus_bdf> 816 * pcie_req_id_t <bus_rp_bdf> 817 * uint32_t <bus_dev_ven_id> 818 * uint8_t <bus_rev_id> 819 * uint8_t <bus_hdr_type> 820 * uint16_t <bus_dev_type> 821 * uint8_t <bus_bdg_secbus 822 * uint16_t <bus_pcie_off> 823 * uint16_t <bus_aer_off> 824 * uint16_t <bus_pcix_off> 825 * uint16_t <bus_ecc_ver> 826 * pci_bus_range_t bus_bus_range 827 * ppb_ranges_t * bus_addr_ranges 828 * int bus_addr_entries 829 * pci_regspec_t * bus_assigned_addr 830 * int bus_assigned_entries 831 * pf_data_t * bus_pfd 832 * int bus_mps 833 * uint64_t bus_cfgacc_base 834 * void * bus_plat_private 835 * 836 * The fields that are bracketed are initialized if flag PCIE_BUS_FINAL 837 * is set: 838 * 839 * dev_info_t * bus_dip 840 * dev_info_t * bus_rp_dip 841 * ddi_acc_handle_t bus_cfg_hdl 842 * uint_t bus_fm_flags 843 * pcie_req_id_t bus_bdf 844 * pcie_req_id_t bus_rp_bdf 845 * uint32_t bus_dev_ven_id 846 * uint8_t bus_rev_id 847 * uint8_t bus_hdr_type 848 * uint16_t bus_dev_type 849 * uint8_t <bus_bdg_secbus> 850 * uint16_t bus_pcie_off 851 * uint16_t bus_aer_off 852 * uint16_t bus_pcix_off 853 * uint16_t bus_ecc_ver 854 * pci_bus_range_t <bus_bus_range> 855 * ppb_ranges_t * <bus_addr_ranges> 856 * int <bus_addr_entries> 857 * pci_regspec_t * <bus_assigned_addr> 858 * int <bus_assigned_entries> 859 * pf_data_t * <bus_pfd> 860 * int bus_mps 861 * uint64_t bus_cfgacc_base 862 * void * <bus_plat_private> 863 */ 864 865 pcie_bus_t * 866 pcie_init_bus(dev_info_t *dip, pcie_req_id_t bdf, uint8_t flags) 867 { 868 uint16_t status, base, baseptr, num_cap; 869 uint32_t capid; 870 int range_size; 871 pcie_bus_t *bus_p; 872 dev_info_t *rcdip; 873 dev_info_t *pdip; 874 const char *errstr = NULL; 875 876 if (!(flags & PCIE_BUS_INITIAL)) 877 goto initial_done; 878 879 bus_p = kmem_zalloc(sizeof (pcie_bus_t), KM_SLEEP); 880 881 bus_p->bus_dip = dip; 882 bus_p->bus_bdf = bdf; 883 884 rcdip = pcie_get_rc_dip(dip); 885 ASSERT(rcdip != NULL); 886 887 /* Save the Vendor ID, Device ID and revision ID */ 888 bus_p->bus_dev_ven_id = pci_cfgacc_get32(rcdip, bdf, PCI_CONF_VENID); 889 bus_p->bus_rev_id = pci_cfgacc_get8(rcdip, bdf, PCI_CONF_REVID); 890 /* Save the Header Type */ 891 bus_p->bus_hdr_type = pci_cfgacc_get8(rcdip, bdf, PCI_CONF_HEADER); 892 bus_p->bus_hdr_type &= PCI_HEADER_TYPE_M; 893 894 /* 895 * Figure out the device type and all the relavant capability offsets 896 */ 897 /* set default value */ 898 bus_p->bus_dev_type = PCIE_PCIECAP_DEV_TYPE_PCI_PSEUDO; 899 900 status = pci_cfgacc_get16(rcdip, bdf, PCI_CONF_STAT); 901 if (status == PCI_CAP_EINVAL16 || !(status & PCI_STAT_CAP)) 902 goto caps_done; /* capability not supported */ 903 904 /* Relevant conventional capabilities first */ 905 906 /* Conventional caps: PCI_CAP_ID_PCI_E, PCI_CAP_ID_PCIX */ 907 num_cap = 2; 908 909 switch (bus_p->bus_hdr_type) { 910 case PCI_HEADER_ZERO: 911 baseptr = PCI_CONF_CAP_PTR; 912 break; 913 case PCI_HEADER_PPB: 914 baseptr = PCI_BCNF_CAP_PTR; 915 break; 916 case PCI_HEADER_CARDBUS: 917 baseptr = PCI_CBUS_CAP_PTR; 918 break; 919 default: 920 cmn_err(CE_WARN, "%s: unexpected pci header type:%x", 921 __func__, bus_p->bus_hdr_type); 922 goto caps_done; 923 } 924 925 base = baseptr; 926 for (base = pci_cfgacc_get8(rcdip, bdf, base); base && num_cap; 927 base = pci_cfgacc_get8(rcdip, bdf, base + PCI_CAP_NEXT_PTR)) { 928 capid = pci_cfgacc_get8(rcdip, bdf, base); 929 switch (capid) { 930 case PCI_CAP_ID_PCI_E: 931 bus_p->bus_pcie_off = base; 932 bus_p->bus_dev_type = pci_cfgacc_get16(rcdip, bdf, 933 base + PCIE_PCIECAP) & PCIE_PCIECAP_DEV_TYPE_MASK; 934 935 /* Check and save PCIe hotplug capability information */ 936 if ((PCIE_IS_RP(bus_p) || PCIE_IS_SWD(bus_p)) && 937 (pci_cfgacc_get16(rcdip, bdf, base + PCIE_PCIECAP) 938 & PCIE_PCIECAP_SLOT_IMPL) && 939 (pci_cfgacc_get32(rcdip, bdf, base + PCIE_SLOTCAP) 940 & PCIE_SLOTCAP_HP_CAPABLE)) 941 bus_p->bus_hp_sup_modes |= PCIE_NATIVE_HP_MODE; 942 943 num_cap--; 944 break; 945 case PCI_CAP_ID_PCIX: 946 bus_p->bus_pcix_off = base; 947 if (PCIE_IS_BDG(bus_p)) 948 bus_p->bus_ecc_ver = 949 pci_cfgacc_get16(rcdip, bdf, base + 950 PCI_PCIX_SEC_STATUS) & PCI_PCIX_VER_MASK; 951 else 952 bus_p->bus_ecc_ver = 953 pci_cfgacc_get16(rcdip, bdf, base + 954 PCI_PCIX_COMMAND) & PCI_PCIX_VER_MASK; 955 num_cap--; 956 break; 957 default: 958 break; 959 } 960 } 961 962 /* Check and save PCI hotplug (SHPC) capability information */ 963 if (PCIE_IS_BDG(bus_p)) { 964 base = baseptr; 965 for (base = pci_cfgacc_get8(rcdip, bdf, base); 966 base; base = pci_cfgacc_get8(rcdip, bdf, 967 base + PCI_CAP_NEXT_PTR)) { 968 capid = pci_cfgacc_get8(rcdip, bdf, base); 969 if (capid == PCI_CAP_ID_PCI_HOTPLUG) { 970 bus_p->bus_pci_hp_off = base; 971 bus_p->bus_hp_sup_modes |= PCIE_PCI_HP_MODE; 972 break; 973 } 974 } 975 } 976 977 /* Then, relevant extended capabilities */ 978 979 if (!PCIE_IS_PCIE(bus_p)) 980 goto caps_done; 981 982 /* Extended caps: PCIE_EXT_CAP_ID_AER */ 983 for (base = PCIE_EXT_CAP; base; base = (capid >> 984 PCIE_EXT_CAP_NEXT_PTR_SHIFT) & PCIE_EXT_CAP_NEXT_PTR_MASK) { 985 capid = pci_cfgacc_get32(rcdip, bdf, base); 986 if (capid == PCI_CAP_EINVAL32) 987 break; 988 if (((capid >> PCIE_EXT_CAP_ID_SHIFT) & PCIE_EXT_CAP_ID_MASK) 989 == PCIE_EXT_CAP_ID_AER) { 990 bus_p->bus_aer_off = base; 991 break; 992 } 993 } 994 995 caps_done: 996 /* save RP dip and RP bdf */ 997 if (PCIE_IS_RP(bus_p)) { 998 bus_p->bus_rp_dip = dip; 999 bus_p->bus_rp_bdf = bus_p->bus_bdf; 1000 } else { 1001 for (pdip = ddi_get_parent(dip); pdip; 1002 pdip = ddi_get_parent(pdip)) { 1003 pcie_bus_t *parent_bus_p = PCIE_DIP2BUS(pdip); 1004 1005 /* 1006 * If RP dip and RP bdf in parent's bus_t have 1007 * been initialized, simply use these instead of 1008 * continuing up to the RC. 1009 */ 1010 if (parent_bus_p->bus_rp_dip != NULL) { 1011 bus_p->bus_rp_dip = parent_bus_p->bus_rp_dip; 1012 bus_p->bus_rp_bdf = parent_bus_p->bus_rp_bdf; 1013 break; 1014 } 1015 1016 /* 1017 * When debugging be aware that some NVIDIA x86 1018 * architectures have 2 nodes for each RP, One at Bus 1019 * 0x0 and one at Bus 0x80. The requester is from Bus 1020 * 0x80 1021 */ 1022 if (PCIE_IS_ROOT(parent_bus_p)) { 1023 bus_p->bus_rp_dip = pdip; 1024 bus_p->bus_rp_bdf = parent_bus_p->bus_bdf; 1025 break; 1026 } 1027 } 1028 } 1029 1030 bus_p->bus_soft_state = PCI_SOFT_STATE_CLOSED; 1031 bus_p->bus_fm_flags = 0; 1032 bus_p->bus_mps = 0; 1033 1034 ndi_set_bus_private(dip, B_TRUE, DEVI_PORT_TYPE_PCI, (void *)bus_p); 1035 1036 if (PCIE_IS_HOTPLUG_CAPABLE(dip)) 1037 (void) ndi_prop_create_boolean(DDI_DEV_T_NONE, dip, 1038 "hotplug-capable"); 1039 1040 initial_done: 1041 if (!(flags & PCIE_BUS_FINAL)) 1042 goto final_done; 1043 1044 /* already initialized? */ 1045 bus_p = PCIE_DIP2BUS(dip); 1046 1047 /* Save the Range information if device is a switch/bridge */ 1048 if (PCIE_IS_BDG(bus_p)) { 1049 /* get "bus_range" property */ 1050 range_size = sizeof (pci_bus_range_t); 1051 if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1052 "bus-range", (caddr_t)&bus_p->bus_bus_range, &range_size) 1053 != DDI_PROP_SUCCESS) { 1054 errstr = "Cannot find \"bus-range\" property"; 1055 cmn_err(CE_WARN, 1056 "PCIE init err info failed BDF 0x%x:%s\n", 1057 bus_p->bus_bdf, errstr); 1058 } 1059 1060 /* get secondary bus number */ 1061 rcdip = pcie_get_rc_dip(dip); 1062 ASSERT(rcdip != NULL); 1063 1064 bus_p->bus_bdg_secbus = pci_cfgacc_get8(rcdip, 1065 bus_p->bus_bdf, PCI_BCNF_SECBUS); 1066 1067 /* Get "ranges" property */ 1068 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1069 "ranges", (caddr_t)&bus_p->bus_addr_ranges, 1070 &bus_p->bus_addr_entries) != DDI_PROP_SUCCESS) 1071 bus_p->bus_addr_entries = 0; 1072 bus_p->bus_addr_entries /= sizeof (ppb_ranges_t); 1073 } 1074 1075 /* save "assigned-addresses" property array, ignore failues */ 1076 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1077 "assigned-addresses", (caddr_t)&bus_p->bus_assigned_addr, 1078 &bus_p->bus_assigned_entries) == DDI_PROP_SUCCESS) 1079 bus_p->bus_assigned_entries /= sizeof (pci_regspec_t); 1080 else 1081 bus_p->bus_assigned_entries = 0; 1082 1083 pcie_init_pfd(dip); 1084 1085 pcie_init_plat(dip); 1086 1087 final_done: 1088 1089 PCIE_DBG("Add %s(dip 0x%p, bdf 0x%x, secbus 0x%x)\n", 1090 ddi_driver_name(dip), (void *)dip, bus_p->bus_bdf, 1091 bus_p->bus_bdg_secbus); 1092 #ifdef DEBUG 1093 pcie_print_bus(bus_p); 1094 #endif 1095 1096 return (bus_p); 1097 } 1098 1099 /* 1100 * Invoked before destroying devinfo node, mostly during hotplug 1101 * operation to free pcie_bus_t data structure 1102 */ 1103 /* ARGSUSED */ 1104 void 1105 pcie_fini_bus(dev_info_t *dip, uint8_t flags) 1106 { 1107 pcie_bus_t *bus_p = PCIE_DIP2UPBUS(dip); 1108 ASSERT(bus_p); 1109 1110 if (flags & PCIE_BUS_INITIAL) { 1111 pcie_fini_plat(dip); 1112 pcie_fini_pfd(dip); 1113 1114 kmem_free(bus_p->bus_assigned_addr, 1115 (sizeof (pci_regspec_t) * bus_p->bus_assigned_entries)); 1116 kmem_free(bus_p->bus_addr_ranges, 1117 (sizeof (ppb_ranges_t) * bus_p->bus_addr_entries)); 1118 /* zero out the fields that have been destroyed */ 1119 bus_p->bus_assigned_addr = NULL; 1120 bus_p->bus_addr_ranges = NULL; 1121 bus_p->bus_assigned_entries = 0; 1122 bus_p->bus_addr_entries = 0; 1123 } 1124 1125 if (flags & PCIE_BUS_FINAL) { 1126 if (PCIE_IS_HOTPLUG_CAPABLE(dip)) { 1127 (void) ndi_prop_remove(DDI_DEV_T_NONE, dip, 1128 "hotplug-capable"); 1129 } 1130 1131 ndi_set_bus_private(dip, B_TRUE, NULL, NULL); 1132 kmem_free(bus_p, sizeof (pcie_bus_t)); 1133 } 1134 } 1135 1136 int 1137 pcie_postattach_child(dev_info_t *cdip) 1138 { 1139 pcie_bus_t *bus_p = PCIE_DIP2BUS(cdip); 1140 1141 if (!bus_p) 1142 return (DDI_FAILURE); 1143 1144 return (pcie_enable_ce(cdip)); 1145 } 1146 1147 /* 1148 * PCI-Express child device de-initialization. 1149 * This function disables generic pci-express interrupts and error 1150 * handling. 1151 */ 1152 void 1153 pcie_uninitchild(dev_info_t *cdip) 1154 { 1155 pcie_disable_errors(cdip); 1156 pcie_fini_cfghdl(cdip); 1157 } 1158 1159 /* 1160 * find the root complex dip 1161 */ 1162 dev_info_t * 1163 pcie_get_rc_dip(dev_info_t *dip) 1164 { 1165 dev_info_t *rcdip; 1166 pcie_bus_t *rc_bus_p; 1167 1168 for (rcdip = ddi_get_parent(dip); rcdip; 1169 rcdip = ddi_get_parent(rcdip)) { 1170 rc_bus_p = PCIE_DIP2BUS(rcdip); 1171 if (rc_bus_p && PCIE_IS_RC(rc_bus_p)) 1172 break; 1173 } 1174 1175 return (rcdip); 1176 } 1177 1178 static boolean_t 1179 pcie_is_pci_device(dev_info_t *dip) 1180 { 1181 dev_info_t *pdip; 1182 char *device_type; 1183 1184 pdip = ddi_get_parent(dip); 1185 ASSERT(pdip); 1186 1187 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip, DDI_PROP_DONTPASS, 1188 "device_type", &device_type) != DDI_PROP_SUCCESS) 1189 return (B_FALSE); 1190 1191 if (strcmp(device_type, "pciex") != 0 && 1192 strcmp(device_type, "pci") != 0) { 1193 ddi_prop_free(device_type); 1194 return (B_FALSE); 1195 } 1196 1197 ddi_prop_free(device_type); 1198 return (B_TRUE); 1199 } 1200 1201 typedef struct { 1202 boolean_t init; 1203 uint8_t flags; 1204 } pcie_bus_arg_t; 1205 1206 /*ARGSUSED*/ 1207 static int 1208 pcie_fab_do_init_fini(dev_info_t *dip, void *arg) 1209 { 1210 pcie_req_id_t bdf; 1211 pcie_bus_arg_t *bus_arg = (pcie_bus_arg_t *)arg; 1212 1213 if (!pcie_is_pci_device(dip)) 1214 goto out; 1215 1216 if (bus_arg->init) { 1217 if (pcie_get_bdf_from_dip(dip, &bdf) != DDI_SUCCESS) 1218 goto out; 1219 1220 (void) pcie_init_bus(dip, bdf, bus_arg->flags); 1221 } else { 1222 (void) pcie_fini_bus(dip, bus_arg->flags); 1223 } 1224 1225 return (DDI_WALK_CONTINUE); 1226 1227 out: 1228 return (DDI_WALK_PRUNECHILD); 1229 } 1230 1231 void 1232 pcie_fab_init_bus(dev_info_t *rcdip, uint8_t flags) 1233 { 1234 int circular_count; 1235 dev_info_t *dip = ddi_get_child(rcdip); 1236 pcie_bus_arg_t arg; 1237 1238 arg.init = B_TRUE; 1239 arg.flags = flags; 1240 1241 ndi_devi_enter(rcdip, &circular_count); 1242 ddi_walk_devs(dip, pcie_fab_do_init_fini, &arg); 1243 ndi_devi_exit(rcdip, circular_count); 1244 } 1245 1246 void 1247 pcie_fab_fini_bus(dev_info_t *rcdip, uint8_t flags) 1248 { 1249 int circular_count; 1250 dev_info_t *dip = ddi_get_child(rcdip); 1251 pcie_bus_arg_t arg; 1252 1253 arg.init = B_FALSE; 1254 arg.flags = flags; 1255 1256 ndi_devi_enter(rcdip, &circular_count); 1257 ddi_walk_devs(dip, pcie_fab_do_init_fini, &arg); 1258 ndi_devi_exit(rcdip, circular_count); 1259 } 1260 1261 void 1262 pcie_enable_errors(dev_info_t *dip) 1263 { 1264 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 1265 uint16_t reg16, tmp16; 1266 uint32_t reg32, tmp32; 1267 1268 ASSERT(bus_p); 1269 1270 /* 1271 * Clear any pending errors 1272 */ 1273 pcie_clear_errors(dip); 1274 1275 if (!PCIE_IS_PCIE(bus_p)) 1276 return; 1277 1278 /* 1279 * Enable Baseline Error Handling but leave CE reporting off (poweron 1280 * default). 1281 */ 1282 if ((reg16 = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL)) != 1283 PCI_CAP_EINVAL16) { 1284 tmp16 = (reg16 & (PCIE_DEVCTL_MAX_READ_REQ_MASK | 1285 PCIE_DEVCTL_MAX_PAYLOAD_MASK)) | 1286 (pcie_devctl_default & ~(PCIE_DEVCTL_MAX_READ_REQ_MASK | 1287 PCIE_DEVCTL_MAX_PAYLOAD_MASK)) | 1288 (pcie_base_err_default & (~PCIE_DEVCTL_CE_REPORTING_EN)); 1289 1290 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, tmp16); 1291 PCIE_DBG_CAP(dip, bus_p, "DEVCTL", 16, PCIE_DEVCTL, reg16); 1292 } 1293 1294 /* Enable Root Port Baseline Error Receiving */ 1295 if (PCIE_IS_ROOT(bus_p) && 1296 (reg16 = PCIE_CAP_GET(16, bus_p, PCIE_ROOTCTL)) != 1297 PCI_CAP_EINVAL16) { 1298 1299 tmp16 = pcie_serr_disable_flag ? 1300 (pcie_root_ctrl_default & ~PCIE_ROOT_SYS_ERR) : 1301 pcie_root_ctrl_default; 1302 PCIE_CAP_PUT(16, bus_p, PCIE_ROOTCTL, tmp16); 1303 PCIE_DBG_CAP(dip, bus_p, "ROOT DEVCTL", 16, PCIE_ROOTCTL, 1304 reg16); 1305 } 1306 1307 /* 1308 * Enable PCI-Express Advanced Error Handling if Exists 1309 */ 1310 if (!PCIE_HAS_AER(bus_p)) 1311 return; 1312 1313 /* Set Uncorrectable Severity */ 1314 if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_UCE_SERV)) != 1315 PCI_CAP_EINVAL32) { 1316 tmp32 = pcie_aer_uce_severity; 1317 1318 PCIE_AER_PUT(32, bus_p, PCIE_AER_UCE_SERV, tmp32); 1319 PCIE_DBG_AER(dip, bus_p, "AER UCE SEV", 32, PCIE_AER_UCE_SERV, 1320 reg32); 1321 } 1322 1323 /* Enable Uncorrectable errors */ 1324 if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_UCE_MASK)) != 1325 PCI_CAP_EINVAL32) { 1326 tmp32 = pcie_aer_uce_mask; 1327 1328 PCIE_AER_PUT(32, bus_p, PCIE_AER_UCE_MASK, tmp32); 1329 PCIE_DBG_AER(dip, bus_p, "AER UCE MASK", 32, PCIE_AER_UCE_MASK, 1330 reg32); 1331 } 1332 1333 /* Enable ECRC generation and checking */ 1334 if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_CTL)) != 1335 PCI_CAP_EINVAL32) { 1336 tmp32 = reg32 | pcie_ecrc_value; 1337 PCIE_AER_PUT(32, bus_p, PCIE_AER_CTL, tmp32); 1338 PCIE_DBG_AER(dip, bus_p, "AER CTL", 32, PCIE_AER_CTL, reg32); 1339 } 1340 1341 /* Enable Secondary Uncorrectable errors if this is a bridge */ 1342 if (!PCIE_IS_PCIE_BDG(bus_p)) 1343 goto root; 1344 1345 /* Set Uncorrectable Severity */ 1346 if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_SUCE_SERV)) != 1347 PCI_CAP_EINVAL32) { 1348 tmp32 = pcie_aer_suce_severity; 1349 1350 PCIE_AER_PUT(32, bus_p, PCIE_AER_SUCE_SERV, tmp32); 1351 PCIE_DBG_AER(dip, bus_p, "AER SUCE SEV", 32, PCIE_AER_SUCE_SERV, 1352 reg32); 1353 } 1354 1355 if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_SUCE_MASK)) != 1356 PCI_CAP_EINVAL32) { 1357 PCIE_AER_PUT(32, bus_p, PCIE_AER_SUCE_MASK, pcie_aer_suce_mask); 1358 PCIE_DBG_AER(dip, bus_p, "AER SUCE MASK", 32, 1359 PCIE_AER_SUCE_MASK, reg32); 1360 } 1361 1362 root: 1363 /* 1364 * Enable Root Control this is a Root device 1365 */ 1366 if (!PCIE_IS_ROOT(bus_p)) 1367 return; 1368 1369 if ((reg16 = PCIE_AER_GET(16, bus_p, PCIE_AER_RE_CMD)) != 1370 PCI_CAP_EINVAL16) { 1371 PCIE_AER_PUT(16, bus_p, PCIE_AER_RE_CMD, 1372 pcie_root_error_cmd_default); 1373 PCIE_DBG_AER(dip, bus_p, "AER Root Err Cmd", 16, 1374 PCIE_AER_RE_CMD, reg16); 1375 } 1376 } 1377 1378 /* 1379 * This function is used for enabling CE reporting and setting the AER CE mask. 1380 * When called from outside the pcie module it should always be preceded by 1381 * a call to pcie_enable_errors. 1382 */ 1383 int 1384 pcie_enable_ce(dev_info_t *dip) 1385 { 1386 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 1387 uint16_t device_sts, device_ctl; 1388 uint32_t tmp_pcie_aer_ce_mask; 1389 1390 if (!PCIE_IS_PCIE(bus_p)) 1391 return (DDI_SUCCESS); 1392 1393 /* 1394 * The "pcie_ce_mask" property is used to control both the CE reporting 1395 * enable field in the device control register and the AER CE mask. We 1396 * leave CE reporting disabled if pcie_ce_mask is set to -1. 1397 */ 1398 1399 tmp_pcie_aer_ce_mask = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, dip, 1400 DDI_PROP_DONTPASS, "pcie_ce_mask", pcie_aer_ce_mask); 1401 1402 if (tmp_pcie_aer_ce_mask == (uint32_t)-1) { 1403 /* 1404 * Nothing to do since CE reporting has already been disabled. 1405 */ 1406 return (DDI_SUCCESS); 1407 } 1408 1409 if (PCIE_HAS_AER(bus_p)) { 1410 /* Enable AER CE */ 1411 PCIE_AER_PUT(32, bus_p, PCIE_AER_CE_MASK, tmp_pcie_aer_ce_mask); 1412 PCIE_DBG_AER(dip, bus_p, "AER CE MASK", 32, PCIE_AER_CE_MASK, 1413 0); 1414 1415 /* Clear any pending AER CE errors */ 1416 PCIE_AER_PUT(32, bus_p, PCIE_AER_CE_STS, -1); 1417 } 1418 1419 /* clear any pending CE errors */ 1420 if ((device_sts = PCIE_CAP_GET(16, bus_p, PCIE_DEVSTS)) != 1421 PCI_CAP_EINVAL16) 1422 PCIE_CAP_PUT(16, bus_p, PCIE_DEVSTS, 1423 device_sts & (~PCIE_DEVSTS_CE_DETECTED)); 1424 1425 /* Enable CE reporting */ 1426 device_ctl = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL); 1427 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, 1428 (device_ctl & (~PCIE_DEVCTL_ERR_MASK)) | pcie_base_err_default); 1429 PCIE_DBG_CAP(dip, bus_p, "DEVCTL", 16, PCIE_DEVCTL, device_ctl); 1430 1431 return (DDI_SUCCESS); 1432 } 1433 1434 /* ARGSUSED */ 1435 void 1436 pcie_disable_errors(dev_info_t *dip) 1437 { 1438 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 1439 uint16_t device_ctl; 1440 uint32_t aer_reg; 1441 1442 if (!PCIE_IS_PCIE(bus_p)) 1443 return; 1444 1445 /* 1446 * Disable PCI-Express Baseline Error Handling 1447 */ 1448 device_ctl = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL); 1449 device_ctl &= ~PCIE_DEVCTL_ERR_MASK; 1450 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, device_ctl); 1451 1452 /* 1453 * Disable PCI-Express Advanced Error Handling if Exists 1454 */ 1455 if (!PCIE_HAS_AER(bus_p)) 1456 goto root; 1457 1458 /* Disable Uncorrectable errors */ 1459 PCIE_AER_PUT(32, bus_p, PCIE_AER_UCE_MASK, PCIE_AER_UCE_BITS); 1460 1461 /* Disable Correctable errors */ 1462 PCIE_AER_PUT(32, bus_p, PCIE_AER_CE_MASK, PCIE_AER_CE_BITS); 1463 1464 /* Disable ECRC generation and checking */ 1465 if ((aer_reg = PCIE_AER_GET(32, bus_p, PCIE_AER_CTL)) != 1466 PCI_CAP_EINVAL32) { 1467 aer_reg &= ~(PCIE_AER_CTL_ECRC_GEN_ENA | 1468 PCIE_AER_CTL_ECRC_CHECK_ENA); 1469 1470 PCIE_AER_PUT(32, bus_p, PCIE_AER_CTL, aer_reg); 1471 } 1472 /* 1473 * Disable Secondary Uncorrectable errors if this is a bridge 1474 */ 1475 if (!PCIE_IS_PCIE_BDG(bus_p)) 1476 goto root; 1477 1478 PCIE_AER_PUT(32, bus_p, PCIE_AER_SUCE_MASK, PCIE_AER_SUCE_BITS); 1479 1480 root: 1481 /* 1482 * disable Root Control this is a Root device 1483 */ 1484 if (!PCIE_IS_ROOT(bus_p)) 1485 return; 1486 1487 if (!pcie_serr_disable_flag) { 1488 device_ctl = PCIE_CAP_GET(16, bus_p, PCIE_ROOTCTL); 1489 device_ctl &= ~PCIE_ROOT_SYS_ERR; 1490 PCIE_CAP_PUT(16, bus_p, PCIE_ROOTCTL, device_ctl); 1491 } 1492 1493 if (!PCIE_HAS_AER(bus_p)) 1494 return; 1495 1496 if ((device_ctl = PCIE_CAP_GET(16, bus_p, PCIE_AER_RE_CMD)) != 1497 PCI_CAP_EINVAL16) { 1498 device_ctl &= ~pcie_root_error_cmd_default; 1499 PCIE_CAP_PUT(16, bus_p, PCIE_AER_RE_CMD, device_ctl); 1500 } 1501 } 1502 1503 /* 1504 * Extract bdf from "reg" property. 1505 */ 1506 int 1507 pcie_get_bdf_from_dip(dev_info_t *dip, pcie_req_id_t *bdf) 1508 { 1509 pci_regspec_t *regspec; 1510 int reglen; 1511 1512 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1513 "reg", (int **)®spec, (uint_t *)®len) != DDI_SUCCESS) 1514 return (DDI_FAILURE); 1515 1516 if (reglen < (sizeof (pci_regspec_t) / sizeof (int))) { 1517 ddi_prop_free(regspec); 1518 return (DDI_FAILURE); 1519 } 1520 1521 /* Get phys_hi from first element. All have same bdf. */ 1522 *bdf = (regspec->pci_phys_hi & (PCI_REG_BDFR_M ^ PCI_REG_REG_M)) >> 8; 1523 1524 ddi_prop_free(regspec); 1525 return (DDI_SUCCESS); 1526 } 1527 1528 dev_info_t * 1529 pcie_get_my_childs_dip(dev_info_t *dip, dev_info_t *rdip) 1530 { 1531 dev_info_t *cdip = rdip; 1532 1533 for (; ddi_get_parent(cdip) != dip; cdip = ddi_get_parent(cdip)) 1534 ; 1535 1536 return (cdip); 1537 } 1538 1539 uint32_t 1540 pcie_get_bdf_for_dma_xfer(dev_info_t *dip, dev_info_t *rdip) 1541 { 1542 dev_info_t *cdip; 1543 1544 /* 1545 * As part of the probing, the PCI fcode interpreter may setup a DMA 1546 * request if a given card has a fcode on it using dip and rdip of the 1547 * hotplug connector i.e, dip and rdip of px/pcieb driver. In this 1548 * case, return a invalid value for the bdf since we cannot get to the 1549 * bdf value of the actual device which will be initiating this DMA. 1550 */ 1551 if (rdip == dip) 1552 return (PCIE_INVALID_BDF); 1553 1554 cdip = pcie_get_my_childs_dip(dip, rdip); 1555 1556 /* 1557 * For a given rdip, return the bdf value of dip's (px or pcieb) 1558 * immediate child or secondary bus-id if dip is a PCIe2PCI bridge. 1559 * 1560 * XXX - For now, return a invalid bdf value for all PCI and PCI-X 1561 * devices since this needs more work. 1562 */ 1563 return (PCI_GET_PCIE2PCI_SECBUS(cdip) ? 1564 PCIE_INVALID_BDF : PCI_GET_BDF(cdip)); 1565 } 1566 1567 uint32_t 1568 pcie_get_aer_uce_mask() { 1569 return (pcie_aer_uce_mask); 1570 } 1571 uint32_t 1572 pcie_get_aer_ce_mask() { 1573 return (pcie_aer_ce_mask); 1574 } 1575 uint32_t 1576 pcie_get_aer_suce_mask() { 1577 return (pcie_aer_suce_mask); 1578 } 1579 uint32_t 1580 pcie_get_serr_mask() { 1581 return (pcie_serr_disable_flag); 1582 } 1583 1584 void 1585 pcie_set_aer_uce_mask(uint32_t mask) { 1586 pcie_aer_uce_mask = mask; 1587 if (mask & PCIE_AER_UCE_UR) 1588 pcie_base_err_default &= ~PCIE_DEVCTL_UR_REPORTING_EN; 1589 else 1590 pcie_base_err_default |= PCIE_DEVCTL_UR_REPORTING_EN; 1591 1592 if (mask & PCIE_AER_UCE_ECRC) 1593 pcie_ecrc_value = 0; 1594 } 1595 1596 void 1597 pcie_set_aer_ce_mask(uint32_t mask) { 1598 pcie_aer_ce_mask = mask; 1599 } 1600 void 1601 pcie_set_aer_suce_mask(uint32_t mask) { 1602 pcie_aer_suce_mask = mask; 1603 } 1604 void 1605 pcie_set_serr_mask(uint32_t mask) { 1606 pcie_serr_disable_flag = mask; 1607 } 1608 1609 /* 1610 * Is the rdip a child of dip. Used for checking certain CTLOPS from bubbling 1611 * up erronously. Ex. ISA ctlops to a PCI-PCI Bridge. 1612 */ 1613 boolean_t 1614 pcie_is_child(dev_info_t *dip, dev_info_t *rdip) 1615 { 1616 dev_info_t *cdip = ddi_get_child(dip); 1617 for (; cdip; cdip = ddi_get_next_sibling(cdip)) 1618 if (cdip == rdip) 1619 break; 1620 return (cdip != NULL); 1621 } 1622 1623 boolean_t 1624 pcie_is_link_disabled(dev_info_t *dip) 1625 { 1626 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 1627 1628 if (PCIE_IS_PCIE(bus_p)) { 1629 if (PCIE_CAP_GET(16, bus_p, PCIE_LINKCTL) & 1630 PCIE_LINKCTL_LINK_DISABLE) 1631 return (B_TRUE); 1632 } 1633 return (B_FALSE); 1634 } 1635 1636 /* 1637 * Initialize the MPS for a root port. 1638 * 1639 * dip - dip of root port device. 1640 */ 1641 void 1642 pcie_init_root_port_mps(dev_info_t *dip) 1643 { 1644 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 1645 int rp_cap, max_supported = pcie_max_mps; 1646 1647 (void) pcie_get_fabric_mps(ddi_get_parent(dip), 1648 ddi_get_child(dip), &max_supported); 1649 1650 rp_cap = PCI_CAP_GET16(bus_p->bus_cfg_hdl, NULL, 1651 bus_p->bus_pcie_off, PCIE_DEVCAP) & 1652 PCIE_DEVCAP_MAX_PAYLOAD_MASK; 1653 1654 if (rp_cap < max_supported) 1655 max_supported = rp_cap; 1656 1657 bus_p->bus_mps = max_supported; 1658 (void) pcie_initchild_mps(dip); 1659 } 1660 1661 /* 1662 * Initialize the Maximum Payload Size of a device. 1663 * 1664 * cdip - dip of device. 1665 * 1666 * returns - DDI_SUCCESS or DDI_FAILURE 1667 */ 1668 int 1669 pcie_initchild_mps(dev_info_t *cdip) 1670 { 1671 int max_payload_size; 1672 pcie_bus_t *bus_p; 1673 dev_info_t *pdip = ddi_get_parent(cdip); 1674 uint8_t dev_type; 1675 1676 bus_p = PCIE_DIP2BUS(cdip); 1677 if (bus_p == NULL) { 1678 PCIE_DBG("%s: BUS not found.\n", 1679 ddi_driver_name(cdip)); 1680 return (DDI_FAILURE); 1681 } 1682 1683 dev_type = bus_p->bus_dev_type; 1684 1685 /* 1686 * For ARI Devices, only function zero's MPS needs to be set. 1687 */ 1688 if ((dev_type == PCIE_PCIECAP_DEV_TYPE_PCIE_DEV) && 1689 (pcie_ari_is_enabled(pdip) == PCIE_ARI_FORW_ENABLED)) { 1690 pcie_req_id_t child_bdf; 1691 1692 if (pcie_get_bdf_from_dip(cdip, &child_bdf) == DDI_FAILURE) 1693 return (DDI_FAILURE); 1694 if ((child_bdf & PCIE_REQ_ID_ARI_FUNC_MASK) != 0) 1695 return (DDI_SUCCESS); 1696 } 1697 1698 if (PCIE_IS_RP(bus_p)) { 1699 /* 1700 * If this device is a root port, then the mps scan 1701 * saved the mps in the root ports bus_p. 1702 */ 1703 max_payload_size = bus_p->bus_mps; 1704 } else { 1705 /* 1706 * If the device is not a root port, then the mps of 1707 * its parent should be used. 1708 */ 1709 pcie_bus_t *parent_bus_p = PCIE_DIP2BUS(pdip); 1710 max_payload_size = parent_bus_p->bus_mps; 1711 } 1712 1713 if (PCIE_IS_PCIE(bus_p) && (max_payload_size >= 0)) { 1714 pcie_bus_t *rootp_bus_p = PCIE_DIP2BUS(bus_p->bus_rp_dip); 1715 uint16_t mask, dev_ctrl = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL), 1716 mps = PCIE_CAP_GET(16, bus_p, PCIE_DEVCAP) & 1717 PCIE_DEVCAP_MAX_PAYLOAD_MASK; 1718 1719 mps = MIN(mps, (uint16_t)max_payload_size); 1720 1721 /* 1722 * If the MPS to be set is less than the root ports 1723 * MPS, then MRRS will have to be set the same as MPS. 1724 */ 1725 mask = ((mps < rootp_bus_p->bus_mps) ? 1726 PCIE_DEVCTL_MAX_READ_REQ_MASK : 0) | 1727 PCIE_DEVCTL_MAX_PAYLOAD_MASK; 1728 1729 dev_ctrl &= ~mask; 1730 mask = ((mps < rootp_bus_p->bus_mps) 1731 ? mps << PCIE_DEVCTL_MAX_READ_REQ_SHIFT : 0) 1732 | (mps << PCIE_DEVCTL_MAX_PAYLOAD_SHIFT); 1733 1734 dev_ctrl |= mask; 1735 1736 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, dev_ctrl); 1737 1738 bus_p->bus_mps = mps; 1739 } 1740 1741 return (DDI_SUCCESS); 1742 } 1743 1744 /* 1745 * Scans a device tree/branch for a maximum payload size capabilities. 1746 * 1747 * rc_dip - dip of Root Complex. 1748 * dip - dip of device where scan will begin. 1749 * max_supported (IN) - maximum allowable MPS. 1750 * max_supported (OUT) - maximum payload size capability of fabric. 1751 */ 1752 void 1753 pcie_get_fabric_mps(dev_info_t *rc_dip, dev_info_t *dip, int *max_supported) 1754 { 1755 if (dip == NULL) 1756 return; 1757 1758 /* 1759 * Perform a fabric scan to obtain Maximum Payload Capabilities 1760 */ 1761 (void) pcie_scan_mps(rc_dip, dip, max_supported); 1762 1763 PCIE_DBG("MPS: Highest Common MPS= %x\n", max_supported); 1764 } 1765 1766 /* 1767 * Scans fabric and determines Maximum Payload Size based on 1768 * highest common denominator alogorithm 1769 */ 1770 static void 1771 pcie_scan_mps(dev_info_t *rc_dip, dev_info_t *dip, int *max_supported) 1772 { 1773 int circular_count; 1774 pcie_max_supported_t max_pay_load_supported; 1775 1776 max_pay_load_supported.dip = rc_dip; 1777 max_pay_load_supported.highest_common_mps = *max_supported; 1778 1779 ndi_devi_enter(ddi_get_parent(dip), &circular_count); 1780 ddi_walk_devs(dip, pcie_get_max_supported, 1781 (void *)&max_pay_load_supported); 1782 ndi_devi_exit(ddi_get_parent(dip), circular_count); 1783 1784 *max_supported = max_pay_load_supported.highest_common_mps; 1785 } 1786 1787 /* 1788 * Called as part of the Maximum Payload Size scan. 1789 */ 1790 static int 1791 pcie_get_max_supported(dev_info_t *dip, void *arg) 1792 { 1793 uint32_t max_supported; 1794 uint16_t cap_ptr; 1795 pcie_max_supported_t *current = (pcie_max_supported_t *)arg; 1796 pci_regspec_t *reg; 1797 int rlen; 1798 caddr_t virt; 1799 ddi_acc_handle_t config_handle; 1800 1801 if (ddi_get_child(current->dip) == NULL) { 1802 goto fail1; 1803 } 1804 1805 if (pcie_dev(dip) == DDI_FAILURE) { 1806 PCIE_DBG("MPS: pcie_get_max_supported: %s: " 1807 "Not a PCIe dev\n", ddi_driver_name(dip)); 1808 goto fail1; 1809 } 1810 1811 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "reg", 1812 (caddr_t)®, &rlen) != DDI_PROP_SUCCESS) { 1813 PCIE_DBG("MPS: pcie_get_max_supported: %s: " 1814 "Can not read reg\n", ddi_driver_name(dip)); 1815 goto fail1; 1816 } 1817 1818 if (pcie_map_phys(ddi_get_child(current->dip), reg, &virt, 1819 &config_handle) != DDI_SUCCESS) { 1820 PCIE_DBG("MPS: pcie_get_max_supported: %s: pcie_map_phys " 1821 "failed\n", ddi_driver_name(dip)); 1822 goto fail2; 1823 } 1824 1825 if ((PCI_CAP_LOCATE(config_handle, PCI_CAP_ID_PCI_E, &cap_ptr)) == 1826 DDI_FAILURE) { 1827 goto fail3; 1828 } 1829 1830 max_supported = PCI_CAP_GET16(config_handle, NULL, cap_ptr, 1831 PCIE_DEVCAP) & PCIE_DEVCAP_MAX_PAYLOAD_MASK; 1832 1833 PCIE_DBG("PCIE MPS: %s: MPS Capabilities %x\n", ddi_driver_name(dip), 1834 max_supported); 1835 1836 if (max_supported < current->highest_common_mps) 1837 current->highest_common_mps = max_supported; 1838 1839 fail3: 1840 pcie_unmap_phys(&config_handle, reg); 1841 fail2: 1842 kmem_free(reg, rlen); 1843 fail1: 1844 return (DDI_WALK_CONTINUE); 1845 } 1846 1847 /* 1848 * Determines if there are any root ports attached to a root complex. 1849 * 1850 * dip - dip of root complex 1851 * 1852 * Returns - DDI_SUCCESS if there is at least one root port otherwise 1853 * DDI_FAILURE. 1854 */ 1855 int 1856 pcie_root_port(dev_info_t *dip) 1857 { 1858 int port_type; 1859 uint16_t cap_ptr; 1860 ddi_acc_handle_t config_handle; 1861 dev_info_t *cdip = ddi_get_child(dip); 1862 1863 /* 1864 * Determine if any of the children of the passed in dip 1865 * are root ports. 1866 */ 1867 for (; cdip; cdip = ddi_get_next_sibling(cdip)) { 1868 1869 if (pci_config_setup(cdip, &config_handle) != DDI_SUCCESS) 1870 continue; 1871 1872 if ((PCI_CAP_LOCATE(config_handle, PCI_CAP_ID_PCI_E, 1873 &cap_ptr)) == DDI_FAILURE) { 1874 pci_config_teardown(&config_handle); 1875 continue; 1876 } 1877 1878 port_type = PCI_CAP_GET16(config_handle, NULL, cap_ptr, 1879 PCIE_PCIECAP) & PCIE_PCIECAP_DEV_TYPE_MASK; 1880 1881 pci_config_teardown(&config_handle); 1882 1883 if (port_type == PCIE_PCIECAP_DEV_TYPE_ROOT) 1884 return (DDI_SUCCESS); 1885 } 1886 1887 /* No root ports were found */ 1888 1889 return (DDI_FAILURE); 1890 } 1891 1892 /* 1893 * Function that determines if a device a PCIe device. 1894 * 1895 * dip - dip of device. 1896 * 1897 * returns - DDI_SUCCESS if device is a PCIe device, otherwise DDI_FAILURE. 1898 */ 1899 int 1900 pcie_dev(dev_info_t *dip) 1901 { 1902 /* get parent device's device_type property */ 1903 char *device_type; 1904 int rc = DDI_FAILURE; 1905 dev_info_t *pdip = ddi_get_parent(dip); 1906 1907 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip, 1908 DDI_PROP_DONTPASS, "device_type", &device_type) 1909 != DDI_PROP_SUCCESS) { 1910 return (DDI_FAILURE); 1911 } 1912 1913 if (strcmp(device_type, "pciex") == 0) 1914 rc = DDI_SUCCESS; 1915 else 1916 rc = DDI_FAILURE; 1917 1918 ddi_prop_free(device_type); 1919 return (rc); 1920 } 1921 1922 /* 1923 * Function to map in a device's memory space. 1924 */ 1925 static int 1926 pcie_map_phys(dev_info_t *dip, pci_regspec_t *phys_spec, 1927 caddr_t *addrp, ddi_acc_handle_t *handlep) 1928 { 1929 ddi_map_req_t mr; 1930 ddi_acc_hdl_t *hp; 1931 int result; 1932 ddi_device_acc_attr_t attr; 1933 1934 attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 1935 attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC; 1936 attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 1937 attr.devacc_attr_access = DDI_CAUTIOUS_ACC; 1938 1939 *handlep = impl_acc_hdl_alloc(KM_SLEEP, NULL); 1940 hp = impl_acc_hdl_get(*handlep); 1941 hp->ah_vers = VERS_ACCHDL; 1942 hp->ah_dip = dip; 1943 hp->ah_rnumber = 0; 1944 hp->ah_offset = 0; 1945 hp->ah_len = 0; 1946 hp->ah_acc = attr; 1947 1948 mr.map_op = DDI_MO_MAP_LOCKED; 1949 mr.map_type = DDI_MT_REGSPEC; 1950 mr.map_obj.rp = (struct regspec *)phys_spec; 1951 mr.map_prot = PROT_READ | PROT_WRITE; 1952 mr.map_flags = DDI_MF_KERNEL_MAPPING; 1953 mr.map_handlep = hp; 1954 mr.map_vers = DDI_MAP_VERSION; 1955 1956 result = ddi_map(dip, &mr, 0, 0, addrp); 1957 1958 if (result != DDI_SUCCESS) { 1959 impl_acc_hdl_free(*handlep); 1960 *handlep = (ddi_acc_handle_t)NULL; 1961 } else { 1962 hp->ah_addr = *addrp; 1963 } 1964 1965 return (result); 1966 } 1967 1968 /* 1969 * Map out memory that was mapped in with pcie_map_phys(); 1970 */ 1971 static void 1972 pcie_unmap_phys(ddi_acc_handle_t *handlep, pci_regspec_t *ph) 1973 { 1974 ddi_map_req_t mr; 1975 ddi_acc_hdl_t *hp; 1976 1977 hp = impl_acc_hdl_get(*handlep); 1978 ASSERT(hp); 1979 1980 mr.map_op = DDI_MO_UNMAP; 1981 mr.map_type = DDI_MT_REGSPEC; 1982 mr.map_obj.rp = (struct regspec *)ph; 1983 mr.map_prot = PROT_READ | PROT_WRITE; 1984 mr.map_flags = DDI_MF_KERNEL_MAPPING; 1985 mr.map_handlep = hp; 1986 mr.map_vers = DDI_MAP_VERSION; 1987 1988 (void) ddi_map(hp->ah_dip, &mr, hp->ah_offset, 1989 hp->ah_len, &hp->ah_addr); 1990 1991 impl_acc_hdl_free(*handlep); 1992 *handlep = (ddi_acc_handle_t)NULL; 1993 } 1994 1995 void 1996 pcie_set_rber_fatal(dev_info_t *dip, boolean_t val) 1997 { 1998 pcie_bus_t *bus_p = PCIE_DIP2UPBUS(dip); 1999 bus_p->bus_pfd->pe_rber_fatal = val; 2000 } 2001 2002 /* 2003 * Return parent Root Port's pe_rber_fatal value. 2004 */ 2005 boolean_t 2006 pcie_get_rber_fatal(dev_info_t *dip) 2007 { 2008 pcie_bus_t *bus_p = PCIE_DIP2UPBUS(dip); 2009 pcie_bus_t *rp_bus_p = PCIE_DIP2UPBUS(bus_p->bus_rp_dip); 2010 return (rp_bus_p->bus_pfd->pe_rber_fatal); 2011 } 2012 2013 int 2014 pcie_ari_supported(dev_info_t *dip) 2015 { 2016 uint32_t devcap2; 2017 uint16_t pciecap; 2018 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 2019 uint8_t dev_type; 2020 2021 PCIE_DBG("pcie_ari_supported: dip=%p\n", dip); 2022 2023 if (bus_p == NULL) 2024 return (PCIE_ARI_FORW_NOT_SUPPORTED); 2025 2026 dev_type = bus_p->bus_dev_type; 2027 2028 if ((dev_type != PCIE_PCIECAP_DEV_TYPE_DOWN) && 2029 (dev_type != PCIE_PCIECAP_DEV_TYPE_ROOT)) 2030 return (PCIE_ARI_FORW_NOT_SUPPORTED); 2031 2032 if (pcie_disable_ari) { 2033 PCIE_DBG("pcie_ari_supported: dip=%p: ARI Disabled\n", dip); 2034 return (PCIE_ARI_FORW_NOT_SUPPORTED); 2035 } 2036 2037 pciecap = PCIE_CAP_GET(16, bus_p, PCIE_PCIECAP); 2038 2039 if ((pciecap & PCIE_PCIECAP_VER_MASK) < PCIE_PCIECAP_VER_2_0) { 2040 PCIE_DBG("pcie_ari_supported: dip=%p: Not 2.0\n", dip); 2041 return (PCIE_ARI_FORW_NOT_SUPPORTED); 2042 } 2043 2044 devcap2 = PCIE_CAP_GET(32, bus_p, PCIE_DEVCAP2); 2045 2046 PCIE_DBG("pcie_ari_supported: dip=%p: DevCap2=0x%x\n", 2047 dip, devcap2); 2048 2049 if (devcap2 & PCIE_DEVCAP2_ARI_FORWARD) { 2050 PCIE_DBG("pcie_ari_supported: " 2051 "dip=%p: ARI Forwarding is supported\n", dip); 2052 return (PCIE_ARI_FORW_SUPPORTED); 2053 } 2054 return (PCIE_ARI_FORW_NOT_SUPPORTED); 2055 } 2056 2057 int 2058 pcie_ari_enable(dev_info_t *dip) 2059 { 2060 uint16_t devctl2; 2061 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 2062 2063 PCIE_DBG("pcie_ari_enable: dip=%p\n", dip); 2064 2065 if (pcie_ari_supported(dip) == PCIE_ARI_FORW_NOT_SUPPORTED) 2066 return (DDI_FAILURE); 2067 2068 devctl2 = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL2); 2069 devctl2 |= PCIE_DEVCTL2_ARI_FORWARD_EN; 2070 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL2, devctl2); 2071 2072 PCIE_DBG("pcie_ari_enable: dip=%p: writing 0x%x to DevCtl2\n", 2073 dip, devctl2); 2074 2075 return (DDI_SUCCESS); 2076 } 2077 2078 int 2079 pcie_ari_disable(dev_info_t *dip) 2080 { 2081 uint16_t devctl2; 2082 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 2083 2084 PCIE_DBG("pcie_ari_disable: dip=%p\n", dip); 2085 2086 if (pcie_ari_supported(dip) == PCIE_ARI_FORW_NOT_SUPPORTED) 2087 return (DDI_FAILURE); 2088 2089 devctl2 = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL2); 2090 devctl2 &= ~PCIE_DEVCTL2_ARI_FORWARD_EN; 2091 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL2, devctl2); 2092 2093 PCIE_DBG("pcie_ari_disable: dip=%p: writing 0x%x to DevCtl2\n", 2094 dip, devctl2); 2095 2096 return (DDI_SUCCESS); 2097 } 2098 2099 int 2100 pcie_ari_is_enabled(dev_info_t *dip) 2101 { 2102 uint16_t devctl2; 2103 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 2104 2105 PCIE_DBG("pcie_ari_is_enabled: dip=%p\n", dip); 2106 2107 if (pcie_ari_supported(dip) == PCIE_ARI_FORW_NOT_SUPPORTED) 2108 return (PCIE_ARI_FORW_DISABLED); 2109 2110 devctl2 = PCIE_CAP_GET(32, bus_p, PCIE_DEVCTL2); 2111 2112 PCIE_DBG("pcie_ari_is_enabled: dip=%p: DevCtl2=0x%x\n", 2113 dip, devctl2); 2114 2115 if (devctl2 & PCIE_DEVCTL2_ARI_FORWARD_EN) { 2116 PCIE_DBG("pcie_ari_is_enabled: " 2117 "dip=%p: ARI Forwarding is enabled\n", dip); 2118 return (PCIE_ARI_FORW_ENABLED); 2119 } 2120 2121 return (PCIE_ARI_FORW_DISABLED); 2122 } 2123 2124 int 2125 pcie_ari_device(dev_info_t *dip) 2126 { 2127 ddi_acc_handle_t handle; 2128 uint16_t cap_ptr; 2129 2130 PCIE_DBG("pcie_ari_device: dip=%p\n", dip); 2131 2132 /* 2133 * XXX - This function may be called before the bus_p structure 2134 * has been populated. This code can be changed to remove 2135 * pci_config_setup()/pci_config_teardown() when the RFE 2136 * to populate the bus_p structures early in boot is putback. 2137 */ 2138 2139 /* First make sure it is a PCIe device */ 2140 2141 if (pci_config_setup(dip, &handle) != DDI_SUCCESS) 2142 return (PCIE_NOT_ARI_DEVICE); 2143 2144 if ((PCI_CAP_LOCATE(handle, PCI_CAP_ID_PCI_E, &cap_ptr)) 2145 != DDI_SUCCESS) { 2146 pci_config_teardown(&handle); 2147 return (PCIE_NOT_ARI_DEVICE); 2148 } 2149 2150 /* Locate the ARI Capability */ 2151 2152 if ((PCI_CAP_LOCATE(handle, PCI_CAP_XCFG_SPC(PCIE_EXT_CAP_ID_ARI), 2153 &cap_ptr)) == DDI_FAILURE) { 2154 pci_config_teardown(&handle); 2155 return (PCIE_NOT_ARI_DEVICE); 2156 } 2157 2158 /* ARI Capability was found so it must be a ARI device */ 2159 PCIE_DBG("pcie_ari_device: ARI Device dip=%p\n", dip); 2160 2161 pci_config_teardown(&handle); 2162 return (PCIE_ARI_DEVICE); 2163 } 2164 2165 int 2166 pcie_ari_get_next_function(dev_info_t *dip, int *func) 2167 { 2168 uint32_t val; 2169 uint16_t cap_ptr, next_function; 2170 ddi_acc_handle_t handle; 2171 2172 /* 2173 * XXX - This function may be called before the bus_p structure 2174 * has been populated. This code can be changed to remove 2175 * pci_config_setup()/pci_config_teardown() when the RFE 2176 * to populate the bus_p structures early in boot is putback. 2177 */ 2178 2179 if (pci_config_setup(dip, &handle) != DDI_SUCCESS) 2180 return (DDI_FAILURE); 2181 2182 if ((PCI_CAP_LOCATE(handle, 2183 PCI_CAP_XCFG_SPC(PCIE_EXT_CAP_ID_ARI), &cap_ptr)) == DDI_FAILURE) { 2184 pci_config_teardown(&handle); 2185 return (DDI_FAILURE); 2186 } 2187 2188 val = PCI_CAP_GET32(handle, NULL, cap_ptr, PCIE_ARI_CAP); 2189 2190 next_function = (val >> PCIE_ARI_CAP_NEXT_FUNC_SHIFT) & 2191 PCIE_ARI_CAP_NEXT_FUNC_MASK; 2192 2193 pci_config_teardown(&handle); 2194 2195 *func = next_function; 2196 2197 return (DDI_SUCCESS); 2198 } 2199 2200 dev_info_t * 2201 pcie_func_to_dip(dev_info_t *dip, pcie_req_id_t function) 2202 { 2203 pcie_req_id_t child_bdf; 2204 dev_info_t *cdip; 2205 2206 for (cdip = ddi_get_child(dip); cdip; 2207 cdip = ddi_get_next_sibling(cdip)) { 2208 2209 if (pcie_get_bdf_from_dip(cdip, &child_bdf) == DDI_FAILURE) 2210 return (NULL); 2211 2212 if ((child_bdf & PCIE_REQ_ID_ARI_FUNC_MASK) == function) 2213 return (cdip); 2214 } 2215 return (NULL); 2216 } 2217 2218 #ifdef DEBUG 2219 2220 static void 2221 pcie_print_bus(pcie_bus_t *bus_p) 2222 { 2223 pcie_dbg("\tbus_dip = 0x%p\n", bus_p->bus_dip); 2224 pcie_dbg("\tbus_fm_flags = 0x%x\n", bus_p->bus_fm_flags); 2225 2226 pcie_dbg("\tbus_bdf = 0x%x\n", bus_p->bus_bdf); 2227 pcie_dbg("\tbus_dev_ven_id = 0x%x\n", bus_p->bus_dev_ven_id); 2228 pcie_dbg("\tbus_rev_id = 0x%x\n", bus_p->bus_rev_id); 2229 pcie_dbg("\tbus_hdr_type = 0x%x\n", bus_p->bus_hdr_type); 2230 pcie_dbg("\tbus_dev_type = 0x%x\n", bus_p->bus_dev_type); 2231 pcie_dbg("\tbus_bdg_secbus = 0x%x\n", bus_p->bus_bdg_secbus); 2232 pcie_dbg("\tbus_pcie_off = 0x%x\n", bus_p->bus_pcie_off); 2233 pcie_dbg("\tbus_aer_off = 0x%x\n", bus_p->bus_aer_off); 2234 pcie_dbg("\tbus_pcix_off = 0x%x\n", bus_p->bus_pcix_off); 2235 pcie_dbg("\tbus_ecc_ver = 0x%x\n", bus_p->bus_ecc_ver); 2236 } 2237 2238 /* 2239 * For debugging purposes set pcie_dbg_print != 0 to see printf messages 2240 * during interrupt. 2241 * 2242 * When a proper solution is in place this code will disappear. 2243 * Potential solutions are: 2244 * o circular buffers 2245 * o taskq to print at lower pil 2246 */ 2247 int pcie_dbg_print = 0; 2248 void 2249 pcie_dbg(char *fmt, ...) 2250 { 2251 va_list ap; 2252 2253 if (!pcie_debug_flags) { 2254 return; 2255 } 2256 va_start(ap, fmt); 2257 if (servicing_interrupt()) { 2258 if (pcie_dbg_print) { 2259 prom_vprintf(fmt, ap); 2260 } 2261 } else { 2262 prom_vprintf(fmt, ap); 2263 } 2264 va_end(ap); 2265 } 2266 #endif /* DEBUG */ 2267 2268 #if defined(__i386) || defined(__amd64) 2269 static void 2270 pcie_check_io_mem_range(ddi_acc_handle_t cfg_hdl, boolean_t *empty_io_range, 2271 boolean_t *empty_mem_range) 2272 { 2273 uint8_t class, subclass; 2274 uint_t val; 2275 2276 class = pci_config_get8(cfg_hdl, PCI_CONF_BASCLASS); 2277 subclass = pci_config_get8(cfg_hdl, PCI_CONF_SUBCLASS); 2278 2279 if ((class == PCI_CLASS_BRIDGE) && (subclass == PCI_BRIDGE_PCI)) { 2280 val = (((uint_t)pci_config_get8(cfg_hdl, PCI_BCNF_IO_BASE_LOW) & 2281 PCI_BCNF_IO_MASK) << 8); 2282 /* 2283 * Assuming that a zero based io_range[0] implies an 2284 * invalid I/O range. Likewise for mem_range[0]. 2285 */ 2286 if (val == 0) 2287 *empty_io_range = B_TRUE; 2288 val = (((uint_t)pci_config_get16(cfg_hdl, PCI_BCNF_MEM_BASE) & 2289 PCI_BCNF_MEM_MASK) << 16); 2290 if (val == 0) 2291 *empty_mem_range = B_TRUE; 2292 } 2293 } 2294 2295 #endif /* defined(__i386) || defined(__amd64) */ 2296