1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * Host to PCI-Express local bus driver 29 */ 30 31 #include <sys/conf.h> 32 #include <sys/modctl.h> 33 #include <sys/pci_impl.h> 34 #include <sys/pcie_impl.h> 35 #include <sys/sysmacros.h> 36 #include <sys/ddi_intr.h> 37 #include <sys/sunndi.h> 38 #include <sys/sunddi.h> 39 #include <sys/ddifm.h> 40 #include <sys/ndifm.h> 41 #include <sys/fm/util.h> 42 #include <sys/hotplug/pci/pcihp.h> 43 #include <io/pci/pci_tools_ext.h> 44 #include <io/pci/pci_common.h> 45 #include <io/pciex/pcie_nvidia.h> 46 47 /* 48 * Bus Operation functions 49 */ 50 static int npe_bus_map(dev_info_t *, dev_info_t *, ddi_map_req_t *, 51 off_t, off_t, caddr_t *); 52 static int npe_ctlops(dev_info_t *, dev_info_t *, ddi_ctl_enum_t, 53 void *, void *); 54 static int npe_intr_ops(dev_info_t *, dev_info_t *, ddi_intr_op_t, 55 ddi_intr_handle_impl_t *, void *); 56 static int npe_fm_init(dev_info_t *, dev_info_t *, int, 57 ddi_iblock_cookie_t *); 58 59 static int npe_fm_callback(dev_info_t *, ddi_fm_error_t *, const void *); 60 61 /* 62 * Disable URs and Received MA for all PCIe devices. Until x86 SW is changed so 63 * that random drivers do not do PIO accesses on devices that it does not own, 64 * these error bits must be disabled. SERR must also be disabled if URs have 65 * been masked. 66 */ 67 uint32_t npe_aer_uce_mask = PCIE_AER_UCE_UR; 68 uint32_t npe_aer_ce_mask = 0; 69 uint32_t npe_aer_suce_mask = PCIE_AER_SUCE_RCVD_MA; 70 71 struct bus_ops npe_bus_ops = { 72 BUSO_REV, 73 npe_bus_map, 74 NULL, 75 NULL, 76 NULL, 77 i_ddi_map_fault, 78 ddi_dma_map, 79 ddi_dma_allochdl, 80 ddi_dma_freehdl, 81 ddi_dma_bindhdl, 82 ddi_dma_unbindhdl, 83 ddi_dma_flush, 84 ddi_dma_win, 85 ddi_dma_mctl, 86 npe_ctlops, 87 ddi_bus_prop_op, 88 0, /* (*bus_get_eventcookie)(); */ 89 0, /* (*bus_add_eventcall)(); */ 90 0, /* (*bus_remove_eventcall)(); */ 91 0, /* (*bus_post_event)(); */ 92 0, /* (*bus_intr_ctl)(); */ 93 0, /* (*bus_config)(); */ 94 0, /* (*bus_unconfig)(); */ 95 npe_fm_init, /* (*bus_fm_init)(); */ 96 NULL, /* (*bus_fm_fini)(); */ 97 NULL, /* (*bus_fm_access_enter)(); */ 98 NULL, /* (*bus_fm_access_exit)(); */ 99 NULL, /* (*bus_power)(); */ 100 npe_intr_ops /* (*bus_intr_op)(); */ 101 }; 102 103 /* 104 * One goal here is to leverage off of the pcihp.c source without making 105 * changes to it. Call into it's cb_ops directly if needed, piggybacking 106 * anything else needed by the pci_tools.c module. Only pci_tools and pcihp 107 * will be using the PCI devctl node. 108 */ 109 static int npe_open(dev_t *, int, int, cred_t *); 110 static int npe_close(dev_t, int, int, cred_t *); 111 static int npe_ioctl(dev_t, int, intptr_t, int, cred_t *, int *); 112 static int npe_prop_op(dev_t, dev_info_t *, ddi_prop_op_t, int, char *, 113 caddr_t, int *); 114 static int npe_info(dev_info_t *, ddi_info_cmd_t, void *, void **); 115 116 struct cb_ops npe_cb_ops = { 117 npe_open, /* open */ 118 npe_close, /* close */ 119 nodev, /* strategy */ 120 nodev, /* print */ 121 nodev, /* dump */ 122 nodev, /* read */ 123 nodev, /* write */ 124 npe_ioctl, /* ioctl */ 125 nodev, /* devmap */ 126 nodev, /* mmap */ 127 nodev, /* segmap */ 128 nochpoll, /* poll */ 129 npe_prop_op, /* cb_prop_op */ 130 NULL, /* streamtab */ 131 D_NEW | D_MP | D_HOTPLUG, /* Driver compatibility flag */ 132 CB_REV, /* rev */ 133 nodev, /* int (*cb_aread)() */ 134 nodev /* int (*cb_awrite)() */ 135 }; 136 137 138 /* 139 * Device Node Operation functions 140 */ 141 static int npe_attach(dev_info_t *devi, ddi_attach_cmd_t cmd); 142 static int npe_detach(dev_info_t *devi, ddi_detach_cmd_t cmd); 143 144 struct dev_ops npe_ops = { 145 DEVO_REV, /* devo_rev */ 146 0, /* refcnt */ 147 npe_info, /* info */ 148 nulldev, /* identify */ 149 nulldev, /* probe */ 150 npe_attach, /* attach */ 151 npe_detach, /* detach */ 152 nulldev, /* reset */ 153 &npe_cb_ops, /* driver operations */ 154 &npe_bus_ops /* bus operations */ 155 }; 156 157 /* 158 * Internal routines in support of particular npe_ctlops. 159 */ 160 static int npe_removechild(dev_info_t *child); 161 static int npe_initchild(dev_info_t *child); 162 163 /* 164 * External support routine 165 */ 166 extern void npe_query_acpi_mcfg(dev_info_t *dip); 167 extern void npe_ck804_fix_aer_ptr(ddi_acc_handle_t cfg_hdl); 168 extern int npe_disable_empty_bridges_workaround(dev_info_t *child); 169 extern void npe_nvidia_error_mask(ddi_acc_handle_t cfg_hdl); 170 extern void npe_intel_error_mask(ddi_acc_handle_t cfg_hdl); 171 172 /* 173 * Module linkage information for the kernel. 174 */ 175 static struct modldrv modldrv = { 176 &mod_driverops, /* Type of module */ 177 "Host to PCIe nexus driver", 178 &npe_ops, /* driver ops */ 179 }; 180 181 static struct modlinkage modlinkage = { 182 MODREV_1, 183 (void *)&modldrv, 184 NULL 185 }; 186 187 /* Save minimal state. */ 188 void *npe_statep; 189 190 int 191 _init(void) 192 { 193 int e; 194 195 /* 196 * Initialize per-pci bus soft state pointer. 197 */ 198 e = ddi_soft_state_init(&npe_statep, sizeof (pci_state_t), 1); 199 if (e != 0) 200 return (e); 201 202 if ((e = mod_install(&modlinkage)) != 0) 203 ddi_soft_state_fini(&npe_statep); 204 205 return (e); 206 } 207 208 209 int 210 _fini(void) 211 { 212 int rc; 213 214 rc = mod_remove(&modlinkage); 215 if (rc != 0) 216 return (rc); 217 218 ddi_soft_state_fini(&npe_statep); 219 return (rc); 220 } 221 222 223 int 224 _info(struct modinfo *modinfop) 225 { 226 return (mod_info(&modlinkage, modinfop)); 227 } 228 229 /*ARGSUSED*/ 230 static int 231 npe_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) 232 { 233 /* 234 * Use the minor number as constructed by pcihp, as the index value to 235 * ddi_soft_state_zalloc. 236 */ 237 int instance = ddi_get_instance(devi); 238 pci_state_t *pcip = NULL; 239 240 if (cmd == DDI_RESUME) 241 return (DDI_SUCCESS); 242 243 if (ddi_prop_update_string(DDI_DEV_T_NONE, devi, "device_type", 244 "pciex") != DDI_PROP_SUCCESS) { 245 cmn_err(CE_WARN, "npe: 'device_type' prop create failed"); 246 } 247 248 if (ddi_soft_state_zalloc(npe_statep, instance) == DDI_SUCCESS) 249 pcip = ddi_get_soft_state(npe_statep, instance); 250 251 if (pcip == NULL) 252 return (DDI_FAILURE); 253 254 pcip->pci_dip = devi; 255 256 pcie_rc_init_bus(devi); 257 258 /* 259 * Initialize hotplug support on this bus. At minimum 260 * (for non hotplug bus) this would create ":devctl" minor 261 * node to support DEVCTL_DEVICE_* and DEVCTL_BUS_* ioctls 262 * to this bus. 263 */ 264 if (pcihp_init(devi) != DDI_SUCCESS) { 265 cmn_err(CE_WARN, "npe: Failed to setup hotplug framework"); 266 ddi_soft_state_free(npe_statep, instance); 267 return (DDI_FAILURE); 268 } 269 270 /* Second arg: initialize for pci_express root nexus */ 271 if (pcitool_init(devi, B_TRUE) != DDI_SUCCESS) { 272 (void) pcihp_uninit(devi); 273 ddi_soft_state_free(npe_statep, instance); 274 return (DDI_FAILURE); 275 } 276 277 pcip->pci_fmcap = DDI_FM_EREPORT_CAPABLE | DDI_FM_ERRCB_CAPABLE | 278 DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE; 279 ddi_fm_init(devi, &pcip->pci_fmcap, &pcip->pci_fm_ibc); 280 281 if (pcip->pci_fmcap & DDI_FM_ERRCB_CAPABLE) { 282 ddi_fm_handler_register(devi, npe_fm_callback, NULL); 283 } 284 285 PCIE_DIP2PFD(devi) = kmem_zalloc(sizeof (pf_data_t), KM_SLEEP); 286 pcie_rc_init_pfd(devi, PCIE_DIP2PFD(devi)); 287 288 npe_query_acpi_mcfg(devi); 289 ddi_report_dev(devi); 290 return (DDI_SUCCESS); 291 292 } 293 294 /*ARGSUSED*/ 295 static int 296 npe_detach(dev_info_t *devi, ddi_detach_cmd_t cmd) 297 { 298 int instance = ddi_get_instance(devi); 299 pci_state_t *pcip; 300 301 pcip = ddi_get_soft_state(npe_statep, ddi_get_instance(devi)); 302 303 switch (cmd) { 304 case DDI_DETACH: 305 306 /* Uninitialize pcitool support. */ 307 pcitool_uninit(devi); 308 309 /* 310 * Uninitialize hotplug support on this bus. 311 */ 312 (void) pcihp_uninit(devi); 313 314 if (pcip->pci_fmcap & DDI_FM_ERRCB_CAPABLE) 315 ddi_fm_handler_unregister(devi); 316 317 pcie_rc_fini_bus(devi); 318 pcie_rc_fini_pfd(PCIE_DIP2PFD(devi)); 319 kmem_free(PCIE_DIP2PFD(devi), sizeof (pf_data_t)); 320 321 ddi_fm_fini(devi); 322 ddi_soft_state_free(npe_statep, instance); 323 return (DDI_SUCCESS); 324 325 case DDI_SUSPEND: 326 return (DDI_SUCCESS); 327 default: 328 return (DDI_FAILURE); 329 } 330 } 331 332 333 static int 334 npe_bus_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp, 335 off_t offset, off_t len, caddr_t *vaddrp) 336 { 337 int rnumber; 338 int length; 339 int space; 340 ddi_acc_impl_t *ap; 341 ddi_acc_hdl_t *hp; 342 ddi_map_req_t mr; 343 pci_regspec_t pci_reg; 344 pci_regspec_t *pci_rp; 345 struct regspec reg; 346 pci_acc_cfblk_t *cfp; 347 int retval; 348 349 mr = *mp; /* Get private copy of request */ 350 mp = &mr; 351 352 /* 353 * check for register number 354 */ 355 switch (mp->map_type) { 356 case DDI_MT_REGSPEC: 357 pci_reg = *(pci_regspec_t *)(mp->map_obj.rp); 358 pci_rp = &pci_reg; 359 if (pci_common_get_reg_prop(rdip, pci_rp) != DDI_SUCCESS) 360 return (DDI_FAILURE); 361 break; 362 case DDI_MT_RNUMBER: 363 rnumber = mp->map_obj.rnumber; 364 /* 365 * get ALL "reg" properties for dip, select the one of 366 * of interest. In x86, "assigned-addresses" property 367 * is identical to the "reg" property, so there is no 368 * need to cross check the two to determine the physical 369 * address of the registers. 370 * This routine still performs some validity checks to 371 * make sure that everything is okay. 372 */ 373 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, rdip, 374 DDI_PROP_DONTPASS, "reg", (int **)&pci_rp, 375 (uint_t *)&length) != DDI_PROP_SUCCESS) 376 return (DDI_FAILURE); 377 378 /* 379 * validate the register number. 380 */ 381 length /= (sizeof (pci_regspec_t) / sizeof (int)); 382 if (rnumber >= length) { 383 ddi_prop_free(pci_rp); 384 return (DDI_FAILURE); 385 } 386 387 /* 388 * copy the required entry. 389 */ 390 pci_reg = pci_rp[rnumber]; 391 392 /* 393 * free the memory allocated by ddi_prop_lookup_int_array 394 */ 395 ddi_prop_free(pci_rp); 396 397 pci_rp = &pci_reg; 398 if (pci_common_get_reg_prop(rdip, pci_rp) != DDI_SUCCESS) 399 return (DDI_FAILURE); 400 mp->map_type = DDI_MT_REGSPEC; 401 break; 402 default: 403 return (DDI_ME_INVAL); 404 } 405 406 space = pci_rp->pci_phys_hi & PCI_REG_ADDR_M; 407 408 /* 409 * check for unmap and unlock of address space 410 */ 411 if ((mp->map_op == DDI_MO_UNMAP) || (mp->map_op == DDI_MO_UNLOCK)) { 412 switch (space) { 413 case PCI_ADDR_IO: 414 reg.regspec_bustype = 1; 415 break; 416 417 case PCI_ADDR_CONFIG: 418 /* 419 * Check for AMD's northbridges 420 * AND 421 * for any PCI device. 422 * 423 * This is a workaround fix for 424 * AMD-8132's inability to handle MMCFG 425 * accesses on Galaxy's PE servers 426 * AND 427 * to disable MMCFG for any PCI device. 428 * 429 * If a device is *not* found to have PCIe 430 * capability, then assume it is a PCI device. 431 */ 432 433 if (is_amd_northbridge(rdip) == 0 || 434 (ddi_prop_get_int(DDI_DEV_T_ANY, rdip, 435 DDI_PROP_DONTPASS, "pcie-capid-pointer", 436 PCI_CAP_NEXT_PTR_NULL) == PCI_CAP_NEXT_PTR_NULL)) { 437 if (DDI_FM_ACC_ERR_CAP(ddi_fm_capable(rdip)) && 438 mp->map_handlep->ah_acc.devacc_attr_access 439 != DDI_DEFAULT_ACC) { 440 ndi_fmc_remove(rdip, ACC_HANDLE, 441 (void *)mp->map_handlep); 442 } 443 return (DDI_SUCCESS); 444 } 445 446 447 /* FALLTHROUGH */ 448 case PCI_ADDR_MEM64: 449 /* 450 * MEM64 requires special treatment on map, to check 451 * that the device is below 4G. On unmap, however, 452 * we can assume that everything is OK... the map 453 * must have succeeded. 454 */ 455 /* FALLTHROUGH */ 456 case PCI_ADDR_MEM32: 457 reg.regspec_bustype = 0; 458 break; 459 460 default: 461 return (DDI_FAILURE); 462 } 463 464 /* 465 * Adjust offset and length 466 * A non-zero length means override the one in the regspec. 467 */ 468 pci_rp->pci_phys_low += (uint_t)offset; 469 if (len != 0) 470 pci_rp->pci_size_low = len; 471 472 reg.regspec_addr = pci_rp->pci_phys_low; 473 reg.regspec_size = pci_rp->pci_size_low; 474 475 mp->map_obj.rp = ® 476 retval = ddi_map(dip, mp, (off_t)0, (off_t)0, vaddrp); 477 if (DDI_FM_ACC_ERR_CAP(ddi_fm_capable(rdip)) && 478 mp->map_handlep->ah_acc.devacc_attr_access != 479 DDI_DEFAULT_ACC) { 480 ndi_fmc_remove(rdip, ACC_HANDLE, 481 (void *)mp->map_handlep); 482 } 483 return (retval); 484 485 } 486 487 /* check for user mapping request - not legal for Config */ 488 if (mp->map_op == DDI_MO_MAP_HANDLE && space == PCI_ADDR_CONFIG) { 489 cmn_err(CE_NOTE, "npe: Config mapping request from user\n"); 490 return (DDI_FAILURE); 491 } 492 493 494 /* 495 * Note that pci_fm_acc_setup() is called to serve two purposes 496 * i) enable legacy PCI I/O style config space access 497 * ii) register with FMA 498 */ 499 if (space == PCI_ADDR_CONFIG) { 500 /* Can't map config space without a handle */ 501 hp = (ddi_acc_hdl_t *)mp->map_handlep; 502 if (hp == NULL) 503 return (DDI_FAILURE); 504 505 /* record the device address for future reference */ 506 cfp = (pci_acc_cfblk_t *)&hp->ah_bus_private; 507 cfp->c_busnum = PCI_REG_BUS_G(pci_rp->pci_phys_hi); 508 cfp->c_devnum = PCI_REG_DEV_G(pci_rp->pci_phys_hi); 509 cfp->c_funcnum = PCI_REG_FUNC_G(pci_rp->pci_phys_hi); 510 511 *vaddrp = (caddr_t)offset; 512 513 /* 514 * Check for AMD's northbridges, pci devices and 515 * devices underneath a pci bridge. This is to setup 516 * I/O based config space access. 517 */ 518 if (is_amd_northbridge(rdip) == 0 || 519 (ddi_prop_get_int(DDI_DEV_T_ANY, rdip, DDI_PROP_DONTPASS, 520 "pcie-capid-pointer", PCI_CAP_NEXT_PTR_NULL) == 521 PCI_CAP_NEXT_PTR_NULL)) { 522 int ret; 523 524 if ((ret = pci_fm_acc_setup(hp, offset, len)) == 525 DDI_SUCCESS) { 526 if (DDI_FM_ACC_ERR_CAP(ddi_fm_capable(rdip)) && 527 mp->map_handlep->ah_acc.devacc_attr_access 528 != DDI_DEFAULT_ACC) { 529 ndi_fmc_insert(rdip, ACC_HANDLE, 530 (void *)mp->map_handlep, NULL); 531 } 532 } 533 return (ret); 534 } 535 536 pci_rp->pci_phys_low = ddi_prop_get_int64(DDI_DEV_T_ANY, 537 rdip, 0, "ecfga-base-address", 0); 538 539 pci_rp->pci_phys_low += ((cfp->c_busnum << 20) | 540 (cfp->c_devnum) << 15 | (cfp->c_funcnum << 12)); 541 542 pci_rp->pci_size_low = PCIE_CONF_HDR_SIZE; 543 } 544 545 length = pci_rp->pci_size_low; 546 547 /* 548 * range check 549 */ 550 if ((offset >= length) || (len > length) || (offset + len > length)) 551 return (DDI_FAILURE); 552 553 /* 554 * Adjust offset and length 555 * A non-zero length means override the one in the regspec. 556 */ 557 pci_rp->pci_phys_low += (uint_t)offset; 558 if (len != 0) 559 pci_rp->pci_size_low = len; 560 561 /* 562 * convert the pci regsec into the generic regspec used by the 563 * parent root nexus driver. 564 */ 565 switch (space) { 566 case PCI_ADDR_IO: 567 reg.regspec_bustype = 1; 568 break; 569 case PCI_ADDR_CONFIG: 570 case PCI_ADDR_MEM64: 571 /* 572 * We can't handle 64-bit devices that are mapped above 573 * 4G or that are larger than 4G. 574 */ 575 if (pci_rp->pci_phys_mid != 0 || pci_rp->pci_size_hi != 0) 576 return (DDI_FAILURE); 577 /* 578 * Other than that, we can treat them as 32-bit mappings 579 */ 580 /* FALLTHROUGH */ 581 case PCI_ADDR_MEM32: 582 reg.regspec_bustype = 0; 583 break; 584 default: 585 return (DDI_FAILURE); 586 } 587 588 reg.regspec_addr = pci_rp->pci_phys_low; 589 reg.regspec_size = pci_rp->pci_size_low; 590 591 mp->map_obj.rp = ® 592 retval = ddi_map(dip, mp, (off_t)0, (off_t)0, vaddrp); 593 if (retval == DDI_SUCCESS) { 594 /* 595 * For config space gets force use of cautious access routines. 596 * These will handle default and protected mode accesses too. 597 */ 598 if (space == PCI_ADDR_CONFIG) { 599 ap = (ddi_acc_impl_t *)mp->map_handlep; 600 ap->ahi_acc_attr &= ~DDI_ACCATTR_DIRECT; 601 ap->ahi_acc_attr |= DDI_ACCATTR_CONFIG_SPACE; 602 ap->ahi_get8 = i_ddi_caut_get8; 603 ap->ahi_get16 = i_ddi_caut_get16; 604 ap->ahi_get32 = i_ddi_caut_get32; 605 ap->ahi_get64 = i_ddi_caut_get64; 606 ap->ahi_rep_get8 = i_ddi_caut_rep_get8; 607 ap->ahi_rep_get16 = i_ddi_caut_rep_get16; 608 ap->ahi_rep_get32 = i_ddi_caut_rep_get32; 609 ap->ahi_rep_get64 = i_ddi_caut_rep_get64; 610 } 611 if (DDI_FM_ACC_ERR_CAP(ddi_fm_capable(rdip)) && 612 mp->map_handlep->ah_acc.devacc_attr_access != 613 DDI_DEFAULT_ACC) { 614 ndi_fmc_insert(rdip, ACC_HANDLE, 615 (void *)mp->map_handlep, NULL); 616 } 617 } 618 return (retval); 619 } 620 621 622 623 /*ARGSUSED*/ 624 static int 625 npe_ctlops(dev_info_t *dip, dev_info_t *rdip, 626 ddi_ctl_enum_t ctlop, void *arg, void *result) 627 { 628 int rn; 629 int totreg; 630 uint_t reglen; 631 pci_regspec_t *drv_regp; 632 struct attachspec *asp; 633 struct detachspec *dsp; 634 pci_state_t *pci_p = ddi_get_soft_state(npe_statep, 635 ddi_get_instance(dip)); 636 637 switch (ctlop) { 638 case DDI_CTLOPS_REPORTDEV: 639 if (rdip == (dev_info_t *)0) 640 return (DDI_FAILURE); 641 cmn_err(CE_CONT, "?PCI Express-device: %s@%s, %s%d\n", 642 ddi_node_name(rdip), ddi_get_name_addr(rdip), 643 ddi_driver_name(rdip), ddi_get_instance(rdip)); 644 return (DDI_SUCCESS); 645 646 case DDI_CTLOPS_INITCHILD: 647 return (npe_initchild((dev_info_t *)arg)); 648 649 case DDI_CTLOPS_UNINITCHILD: 650 return (npe_removechild((dev_info_t *)arg)); 651 652 case DDI_CTLOPS_SIDDEV: 653 return (DDI_SUCCESS); 654 655 case DDI_CTLOPS_REGSIZE: 656 case DDI_CTLOPS_NREGS: 657 if (rdip == (dev_info_t *)0) 658 return (DDI_FAILURE); 659 660 *(int *)result = 0; 661 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, rdip, 662 DDI_PROP_DONTPASS, "reg", (int **)&drv_regp, 663 ®len) != DDI_PROP_SUCCESS) { 664 return (DDI_FAILURE); 665 } 666 667 totreg = (reglen * sizeof (int)) / sizeof (pci_regspec_t); 668 if (ctlop == DDI_CTLOPS_NREGS) 669 *(int *)result = totreg; 670 else if (ctlop == DDI_CTLOPS_REGSIZE) { 671 rn = *(int *)arg; 672 if (rn >= totreg) { 673 ddi_prop_free(drv_regp); 674 return (DDI_FAILURE); 675 } 676 *(off_t *)result = drv_regp[rn].pci_size_low; 677 } 678 ddi_prop_free(drv_regp); 679 680 return (DDI_SUCCESS); 681 682 case DDI_CTLOPS_POWER: 683 { 684 power_req_t *reqp = (power_req_t *)arg; 685 /* 686 * We currently understand reporting of PCI_PM_IDLESPEED 687 * capability. Everything else is passed up. 688 */ 689 if ((reqp->request_type == PMR_REPORT_PMCAP) && 690 (reqp->req.report_pmcap_req.cap == PCI_PM_IDLESPEED)) 691 return (DDI_SUCCESS); 692 693 break; 694 } 695 696 case DDI_CTLOPS_PEEK: 697 case DDI_CTLOPS_POKE: 698 return (pci_common_peekpoke(dip, rdip, ctlop, arg, result)); 699 700 /* X86 systems support PME wakeup from suspended state */ 701 case DDI_CTLOPS_ATTACH: 702 if (!pcie_is_child(dip, rdip)) 703 return (DDI_SUCCESS); 704 705 asp = (struct attachspec *)arg; 706 if ((asp->when == DDI_POST) && (asp->result == DDI_SUCCESS)) { 707 pf_init(rdip, (void *)pci_p->pci_fm_ibc, asp->cmd); 708 (void) pcie_postattach_child(rdip); 709 } 710 711 /* only do this for immediate children */ 712 if (asp->cmd == DDI_RESUME && asp->when == DDI_PRE && 713 ddi_get_parent(rdip) == dip) 714 if (pci_pre_resume(rdip) != DDI_SUCCESS) { 715 /* Not good, better stop now. */ 716 cmn_err(CE_PANIC, 717 "Couldn't pre-resume device %p", 718 (void *) dip); 719 /* NOTREACHED */ 720 } 721 722 return (DDI_SUCCESS); 723 724 case DDI_CTLOPS_DETACH: 725 if (!pcie_is_child(dip, rdip)) 726 return (DDI_SUCCESS); 727 728 dsp = (struct detachspec *)arg; 729 730 if (dsp->when == DDI_PRE) 731 pf_fini(rdip, dsp->cmd); 732 733 /* only do this for immediate children */ 734 if (dsp->cmd == DDI_SUSPEND && dsp->when == DDI_POST && 735 ddi_get_parent(rdip) == dip) 736 if (pci_post_suspend(rdip) != DDI_SUCCESS) 737 return (DDI_FAILURE); 738 739 return (DDI_SUCCESS); 740 741 default: 742 break; 743 } 744 745 return (ddi_ctlops(dip, rdip, ctlop, arg, result)); 746 747 } 748 749 750 /* 751 * npe_intr_ops 752 */ 753 static int 754 npe_intr_ops(dev_info_t *pdip, dev_info_t *rdip, ddi_intr_op_t intr_op, 755 ddi_intr_handle_impl_t *hdlp, void *result) 756 { 757 return (pci_common_intr_ops(pdip, rdip, intr_op, hdlp, result)); 758 } 759 760 761 static int 762 npe_initchild(dev_info_t *child) 763 { 764 char name[80]; 765 pcie_bus_t *bus_p; 766 uint32_t regs; 767 ddi_acc_handle_t cfg_hdl; 768 769 /* 770 * Do not bind drivers to empty bridges. 771 * Fail above, if the bridge is found to be hotplug capable 772 */ 773 if (npe_disable_empty_bridges_workaround(child) == 1) 774 return (DDI_FAILURE); 775 776 if (pci_common_name_child(child, name, 80) != DDI_SUCCESS) 777 return (DDI_FAILURE); 778 779 ddi_set_name_addr(child, name); 780 781 /* 782 * Pseudo nodes indicate a prototype node with per-instance 783 * properties to be merged into the real h/w device node. 784 * The interpretation of the unit-address is DD[,F] 785 * where DD is the device id and F is the function. 786 */ 787 if (ndi_dev_is_persistent_node(child) == 0) { 788 extern int pci_allow_pseudo_children; 789 790 ddi_set_parent_data(child, NULL); 791 792 /* 793 * Try to merge the properties from this prototype 794 * node into real h/w nodes. 795 */ 796 if (ndi_merge_node(child, pci_common_name_child) == 797 DDI_SUCCESS) { 798 /* 799 * Merged ok - return failure to remove the node. 800 */ 801 ddi_set_name_addr(child, NULL); 802 return (DDI_FAILURE); 803 } 804 805 /* workaround for DDIVS to run under PCI Express */ 806 if (pci_allow_pseudo_children) { 807 /* 808 * If the "interrupts" property doesn't exist, 809 * this must be the ddivs no-intr case, and it returns 810 * DDI_SUCCESS instead of DDI_FAILURE. 811 */ 812 if (ddi_prop_get_int(DDI_DEV_T_ANY, child, 813 DDI_PROP_DONTPASS, "interrupts", -1) == -1) 814 return (DDI_SUCCESS); 815 /* 816 * Create the ddi_parent_private_data for a pseudo 817 * child. 818 */ 819 pci_common_set_parent_private_data(child); 820 return (DDI_SUCCESS); 821 } 822 823 /* 824 * The child was not merged into a h/w node, 825 * but there's not much we can do with it other 826 * than return failure to cause the node to be removed. 827 */ 828 cmn_err(CE_WARN, "!%s@%s: %s.conf properties not merged", 829 ddi_get_name(child), ddi_get_name_addr(child), 830 ddi_get_name(child)); 831 ddi_set_name_addr(child, NULL); 832 return (DDI_NOT_WELL_FORMED); 833 } 834 835 if (ddi_prop_get_int(DDI_DEV_T_ANY, child, DDI_PROP_DONTPASS, 836 "interrupts", -1) != -1) 837 pci_common_set_parent_private_data(child); 838 else 839 ddi_set_parent_data(child, NULL); 840 841 /* Disable certain errors on PCIe drivers for x86 platforms */ 842 regs = pcie_get_aer_uce_mask() | npe_aer_uce_mask; 843 pcie_set_aer_uce_mask(regs); 844 regs = pcie_get_aer_ce_mask() | npe_aer_ce_mask; 845 pcie_set_aer_ce_mask(regs); 846 regs = pcie_get_aer_suce_mask() | npe_aer_suce_mask; 847 pcie_set_aer_suce_mask(regs); 848 849 /* 850 * If URs are disabled, mask SERRs as well, otherwise the system will 851 * still be notified of URs 852 */ 853 if (npe_aer_uce_mask & PCIE_AER_UCE_UR) 854 pcie_set_serr_mask(1); 855 856 if (pci_config_setup(child, &cfg_hdl) == DDI_SUCCESS) { 857 npe_ck804_fix_aer_ptr(cfg_hdl); 858 npe_nvidia_error_mask(cfg_hdl); 859 npe_intel_error_mask(cfg_hdl); 860 pci_config_teardown(&cfg_hdl); 861 } 862 863 bus_p = pcie_init_bus(child); 864 if (bus_p) { 865 uint16_t device_id = (uint16_t)(bus_p->bus_dev_ven_id >> 16); 866 uint16_t vendor_id = (uint16_t)(bus_p->bus_dev_ven_id & 0xFFFF); 867 uint16_t rev_id = bus_p->bus_rev_id; 868 869 /* Disable AER for certain NVIDIA Chipsets */ 870 if ((vendor_id == NVIDIA_VENDOR_ID) && 871 (device_id == NVIDIA_CK804_DEVICE_ID) && 872 (rev_id < NVIDIA_CK804_AER_VALID_REVID)) 873 bus_p->bus_aer_off = 0; 874 875 (void) pcie_initchild(child); 876 } 877 878 return (DDI_SUCCESS); 879 } 880 881 882 static int 883 npe_removechild(dev_info_t *dip) 884 { 885 pcie_uninitchild(dip); 886 887 ddi_set_name_addr(dip, NULL); 888 889 /* 890 * Strip the node to properly convert it back to prototype form 891 */ 892 ddi_remove_minor_node(dip, NULL); 893 894 ddi_prop_remove_all(dip); 895 896 return (DDI_SUCCESS); 897 } 898 899 900 /* 901 * When retrofitting this module for pci_tools, functions such as open, close, 902 * and ioctl are now pulled into this module. Before this, the functions in 903 * the pcihp module were referenced directly. Now they are called or 904 * referenced through the pcihp cb_ops structure from functions in this module. 905 */ 906 static int 907 npe_open(dev_t *devp, int flags, int otyp, cred_t *credp) 908 { 909 return ((pcihp_get_cb_ops())->cb_open(devp, flags, otyp, credp)); 910 } 911 912 static int 913 npe_close(dev_t dev, int flags, int otyp, cred_t *credp) 914 { 915 return ((pcihp_get_cb_ops())->cb_close(dev, flags, otyp, credp)); 916 } 917 918 static int 919 npe_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, int *rvalp) 920 { 921 minor_t minor = getminor(dev); 922 int instance = PCIHP_AP_MINOR_NUM_TO_INSTANCE(minor); 923 pci_state_t *pci_p = ddi_get_soft_state(npe_statep, instance); 924 dev_info_t *dip; 925 926 if (pci_p == NULL) 927 return (ENXIO); 928 929 dip = pci_p->pci_dip; 930 931 return (pci_common_ioctl(dip, dev, cmd, arg, mode, credp, rvalp)); 932 } 933 934 static int 935 npe_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 936 int flags, char *name, caddr_t valuep, int *lengthp) 937 { 938 return ((pcihp_get_cb_ops())->cb_prop_op(dev, dip, prop_op, flags, 939 name, valuep, lengthp)); 940 } 941 942 static int 943 npe_info(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result) 944 { 945 return (pcihp_info(dip, cmd, arg, result)); 946 } 947 948 /*ARGSUSED*/ 949 static int 950 npe_fm_init(dev_info_t *dip, dev_info_t *tdip, int cap, 951 ddi_iblock_cookie_t *ibc) 952 { 953 pci_state_t *pcip = ddi_get_soft_state(npe_statep, 954 ddi_get_instance(dip)); 955 956 ASSERT(ibc != NULL); 957 *ibc = pcip->pci_fm_ibc; 958 959 return (pcip->pci_fmcap); 960 } 961 962 /*ARGSUSED*/ 963 static int 964 npe_fm_callback(dev_info_t *dip, ddi_fm_error_t *derr, const void *no_used) 965 { 966 /* 967 * On current x86 systems, npe's callback does not get called for failed 968 * loads. If in the future this feature is used, the fault PA should be 969 * logged in the derr->fme_bus_specific field. The appropriate PCIe 970 * error handling code should be called and needs to be coordinated with 971 * safe access handling. 972 */ 973 974 return (DDI_FM_OK); 975 } 976