1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * Host to PCI-Express local bus driver 31 */ 32 33 #include <sys/conf.h> 34 #include <sys/modctl.h> 35 #include <sys/pci_impl.h> 36 #include <sys/pcie_impl.h> 37 #include <sys/sysmacros.h> 38 #include <sys/ddi_intr.h> 39 #include <sys/sunndi.h> 40 #include <sys/sunddi.h> 41 #include <sys/ddifm.h> 42 #include <sys/ndifm.h> 43 #include <sys/fm/util.h> 44 #include <sys/hotplug/pci/pcihp.h> 45 #include <io/pci/pci_tools_ext.h> 46 #include <io/pci/pci_common.h> 47 #include <io/pciex/pcie_nvidia.h> 48 49 /* 50 * Bus Operation functions 51 */ 52 static int npe_bus_map(dev_info_t *, dev_info_t *, ddi_map_req_t *, 53 off_t, off_t, caddr_t *); 54 static int npe_ctlops(dev_info_t *, dev_info_t *, ddi_ctl_enum_t, 55 void *, void *); 56 static int npe_intr_ops(dev_info_t *, dev_info_t *, ddi_intr_op_t, 57 ddi_intr_handle_impl_t *, void *); 58 static int npe_fm_init(dev_info_t *, dev_info_t *, int, 59 ddi_iblock_cookie_t *); 60 61 static int npe_fm_callback(dev_info_t *, ddi_fm_error_t *, const void *); 62 63 /* 64 * Disable URs and Received MA for all PCIe devices. Until x86 SW is changed so 65 * that random drivers do not do PIO accesses on devices that it does not own, 66 * these error bits must be disabled. SERR must also be disabled if URs have 67 * been masked. 68 */ 69 uint32_t npe_aer_uce_mask = PCIE_AER_UCE_UR; 70 uint32_t npe_aer_ce_mask = 0; 71 uint32_t npe_aer_suce_mask = PCIE_AER_SUCE_RCVD_MA; 72 73 struct bus_ops npe_bus_ops = { 74 BUSO_REV, 75 npe_bus_map, 76 NULL, 77 NULL, 78 NULL, 79 i_ddi_map_fault, 80 ddi_dma_map, 81 ddi_dma_allochdl, 82 ddi_dma_freehdl, 83 ddi_dma_bindhdl, 84 ddi_dma_unbindhdl, 85 ddi_dma_flush, 86 ddi_dma_win, 87 ddi_dma_mctl, 88 npe_ctlops, 89 ddi_bus_prop_op, 90 0, /* (*bus_get_eventcookie)(); */ 91 0, /* (*bus_add_eventcall)(); */ 92 0, /* (*bus_remove_eventcall)(); */ 93 0, /* (*bus_post_event)(); */ 94 0, /* (*bus_intr_ctl)(); */ 95 0, /* (*bus_config)(); */ 96 0, /* (*bus_unconfig)(); */ 97 npe_fm_init, /* (*bus_fm_init)(); */ 98 NULL, /* (*bus_fm_fini)(); */ 99 NULL, /* (*bus_fm_access_enter)(); */ 100 NULL, /* (*bus_fm_access_exit)(); */ 101 NULL, /* (*bus_power)(); */ 102 npe_intr_ops /* (*bus_intr_op)(); */ 103 }; 104 105 /* 106 * One goal here is to leverage off of the pcihp.c source without making 107 * changes to it. Call into it's cb_ops directly if needed, piggybacking 108 * anything else needed by the pci_tools.c module. Only pci_tools and pcihp 109 * will be using the PCI devctl node. 110 */ 111 static int npe_open(dev_t *, int, int, cred_t *); 112 static int npe_close(dev_t, int, int, cred_t *); 113 static int npe_ioctl(dev_t, int, intptr_t, int, cred_t *, int *); 114 static int npe_prop_op(dev_t, dev_info_t *, ddi_prop_op_t, int, char *, 115 caddr_t, int *); 116 static int npe_info(dev_info_t *, ddi_info_cmd_t, void *, void **); 117 118 struct cb_ops npe_cb_ops = { 119 npe_open, /* open */ 120 npe_close, /* close */ 121 nodev, /* strategy */ 122 nodev, /* print */ 123 nodev, /* dump */ 124 nodev, /* read */ 125 nodev, /* write */ 126 npe_ioctl, /* ioctl */ 127 nodev, /* devmap */ 128 nodev, /* mmap */ 129 nodev, /* segmap */ 130 nochpoll, /* poll */ 131 npe_prop_op, /* cb_prop_op */ 132 NULL, /* streamtab */ 133 D_NEW | D_MP | D_HOTPLUG, /* Driver compatibility flag */ 134 CB_REV, /* rev */ 135 nodev, /* int (*cb_aread)() */ 136 nodev /* int (*cb_awrite)() */ 137 }; 138 139 140 /* 141 * Device Node Operation functions 142 */ 143 static int npe_attach(dev_info_t *devi, ddi_attach_cmd_t cmd); 144 static int npe_detach(dev_info_t *devi, ddi_detach_cmd_t cmd); 145 146 struct dev_ops npe_ops = { 147 DEVO_REV, /* devo_rev */ 148 0, /* refcnt */ 149 npe_info, /* info */ 150 nulldev, /* identify */ 151 nulldev, /* probe */ 152 npe_attach, /* attach */ 153 npe_detach, /* detach */ 154 nulldev, /* reset */ 155 &npe_cb_ops, /* driver operations */ 156 &npe_bus_ops /* bus operations */ 157 }; 158 159 /* 160 * Internal routines in support of particular npe_ctlops. 161 */ 162 static int npe_removechild(dev_info_t *child); 163 static int npe_initchild(dev_info_t *child); 164 165 /* 166 * External support routine 167 */ 168 extern void npe_query_acpi_mcfg(dev_info_t *dip); 169 extern void npe_ck804_fix_aer_ptr(ddi_acc_handle_t cfg_hdl); 170 extern int npe_disable_empty_bridges_workaround(dev_info_t *child); 171 extern void npe_nvidia_error_mask(ddi_acc_handle_t cfg_hdl); 172 173 /* 174 * Module linkage information for the kernel. 175 */ 176 static struct modldrv modldrv = { 177 &mod_driverops, /* Type of module */ 178 "Host to PCIe nexus driver %I%", 179 &npe_ops, /* driver ops */ 180 }; 181 182 static struct modlinkage modlinkage = { 183 MODREV_1, 184 (void *)&modldrv, 185 NULL 186 }; 187 188 /* Save minimal state. */ 189 void *npe_statep; 190 191 int 192 _init(void) 193 { 194 int e; 195 196 /* 197 * Initialize per-pci bus soft state pointer. 198 */ 199 e = ddi_soft_state_init(&npe_statep, sizeof (pci_state_t), 1); 200 if (e != 0) 201 return (e); 202 203 if ((e = mod_install(&modlinkage)) != 0) 204 ddi_soft_state_fini(&npe_statep); 205 206 return (e); 207 } 208 209 210 int 211 _fini(void) 212 { 213 int rc; 214 215 rc = mod_remove(&modlinkage); 216 if (rc != 0) 217 return (rc); 218 219 ddi_soft_state_fini(&npe_statep); 220 return (rc); 221 } 222 223 224 int 225 _info(struct modinfo *modinfop) 226 { 227 return (mod_info(&modlinkage, modinfop)); 228 } 229 230 /*ARGSUSED*/ 231 static int 232 npe_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) 233 { 234 /* 235 * Use the minor number as constructed by pcihp, as the index value to 236 * ddi_soft_state_zalloc. 237 */ 238 int instance = ddi_get_instance(devi); 239 pci_state_t *pcip = NULL; 240 241 if (cmd == DDI_RESUME) 242 return (DDI_SUCCESS); 243 244 if (ddi_prop_update_string(DDI_DEV_T_NONE, devi, "device_type", 245 "pciex") != DDI_PROP_SUCCESS) { 246 cmn_err(CE_WARN, "npe: 'device_type' prop create failed"); 247 } 248 249 if (ddi_soft_state_zalloc(npe_statep, instance) == DDI_SUCCESS) 250 pcip = ddi_get_soft_state(npe_statep, instance); 251 252 if (pcip == NULL) 253 return (DDI_FAILURE); 254 255 pcip->pci_dip = devi; 256 257 pcie_rc_init_bus(devi); 258 259 /* 260 * Initialize hotplug support on this bus. At minimum 261 * (for non hotplug bus) this would create ":devctl" minor 262 * node to support DEVCTL_DEVICE_* and DEVCTL_BUS_* ioctls 263 * to this bus. 264 */ 265 if (pcihp_init(devi) != DDI_SUCCESS) { 266 cmn_err(CE_WARN, "npe: Failed to setup hotplug framework"); 267 ddi_soft_state_free(npe_statep, instance); 268 return (DDI_FAILURE); 269 } 270 271 /* Second arg: initialize for pci_express root nexus */ 272 if (pcitool_init(devi, B_TRUE) != DDI_SUCCESS) { 273 (void) pcihp_uninit(devi); 274 ddi_soft_state_free(npe_statep, instance); 275 return (DDI_FAILURE); 276 } 277 278 pcip->pci_fmcap = DDI_FM_EREPORT_CAPABLE | DDI_FM_ERRCB_CAPABLE | 279 DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE; 280 ddi_fm_init(devi, &pcip->pci_fmcap, &pcip->pci_fm_ibc); 281 282 if (pcip->pci_fmcap & DDI_FM_ERRCB_CAPABLE) { 283 ddi_fm_handler_register(devi, npe_fm_callback, NULL); 284 } 285 286 PCIE_DIP2PFD(devi) = kmem_zalloc(sizeof (pf_data_t), KM_SLEEP); 287 pcie_rc_init_pfd(devi, PCIE_DIP2PFD(devi)); 288 289 npe_query_acpi_mcfg(devi); 290 ddi_report_dev(devi); 291 return (DDI_SUCCESS); 292 293 } 294 295 /*ARGSUSED*/ 296 static int 297 npe_detach(dev_info_t *devi, ddi_detach_cmd_t cmd) 298 { 299 int instance = ddi_get_instance(devi); 300 pci_state_t *pcip; 301 302 pcip = ddi_get_soft_state(npe_statep, ddi_get_instance(devi)); 303 304 switch (cmd) { 305 case DDI_DETACH: 306 307 /* Uninitialize pcitool support. */ 308 pcitool_uninit(devi); 309 310 /* 311 * Uninitialize hotplug support on this bus. 312 */ 313 (void) pcihp_uninit(devi); 314 315 if (pcip->pci_fmcap & DDI_FM_ERRCB_CAPABLE) 316 ddi_fm_handler_unregister(devi); 317 318 pcie_rc_fini_bus(devi); 319 pcie_rc_fini_pfd(PCIE_DIP2PFD(devi)); 320 kmem_free(PCIE_DIP2PFD(devi), sizeof (pf_data_t)); 321 322 ddi_fm_fini(devi); 323 ddi_soft_state_free(npe_statep, instance); 324 return (DDI_SUCCESS); 325 326 case DDI_SUSPEND: 327 return (DDI_SUCCESS); 328 default: 329 return (DDI_FAILURE); 330 } 331 } 332 333 334 static int 335 npe_bus_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp, 336 off_t offset, off_t len, caddr_t *vaddrp) 337 { 338 int rnumber; 339 int length; 340 int space; 341 ddi_acc_impl_t *ap; 342 ddi_acc_hdl_t *hp; 343 ddi_map_req_t mr; 344 pci_regspec_t pci_reg; 345 pci_regspec_t *pci_rp; 346 struct regspec reg; 347 pci_acc_cfblk_t *cfp; 348 int retval; 349 350 mr = *mp; /* Get private copy of request */ 351 mp = &mr; 352 353 /* 354 * check for register number 355 */ 356 switch (mp->map_type) { 357 case DDI_MT_REGSPEC: 358 pci_reg = *(pci_regspec_t *)(mp->map_obj.rp); 359 pci_rp = &pci_reg; 360 if (pci_common_get_reg_prop(rdip, pci_rp) != DDI_SUCCESS) 361 return (DDI_FAILURE); 362 break; 363 case DDI_MT_RNUMBER: 364 rnumber = mp->map_obj.rnumber; 365 /* 366 * get ALL "reg" properties for dip, select the one of 367 * of interest. In x86, "assigned-addresses" property 368 * is identical to the "reg" property, so there is no 369 * need to cross check the two to determine the physical 370 * address of the registers. 371 * This routine still performs some validity checks to 372 * make sure that everything is okay. 373 */ 374 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, rdip, 375 DDI_PROP_DONTPASS, "reg", (int **)&pci_rp, 376 (uint_t *)&length) != DDI_PROP_SUCCESS) 377 return (DDI_FAILURE); 378 379 /* 380 * validate the register number. 381 */ 382 length /= (sizeof (pci_regspec_t) / sizeof (int)); 383 if (rnumber >= length) { 384 ddi_prop_free(pci_rp); 385 return (DDI_FAILURE); 386 } 387 388 /* 389 * copy the required entry. 390 */ 391 pci_reg = pci_rp[rnumber]; 392 393 /* 394 * free the memory allocated by ddi_prop_lookup_int_array 395 */ 396 ddi_prop_free(pci_rp); 397 398 pci_rp = &pci_reg; 399 if (pci_common_get_reg_prop(rdip, pci_rp) != DDI_SUCCESS) 400 return (DDI_FAILURE); 401 mp->map_type = DDI_MT_REGSPEC; 402 break; 403 default: 404 return (DDI_ME_INVAL); 405 } 406 407 space = pci_rp->pci_phys_hi & PCI_REG_ADDR_M; 408 409 /* 410 * check for unmap and unlock of address space 411 */ 412 if ((mp->map_op == DDI_MO_UNMAP) || (mp->map_op == DDI_MO_UNLOCK)) { 413 switch (space) { 414 case PCI_ADDR_IO: 415 reg.regspec_bustype = 1; 416 break; 417 418 case PCI_ADDR_CONFIG: 419 /* 420 * Check for AMD's northbridges 421 * AND 422 * for any PCI device. 423 * 424 * This is a workaround fix for 425 * AMD-8132's inability to handle MMCFG 426 * accesses on Galaxy's PE servers 427 * AND 428 * to disable MMCFG for any PCI device. 429 * 430 * If a device is *not* found to have PCIe 431 * capability, then assume it is a PCI device. 432 */ 433 434 if (is_amd_northbridge(rdip) == 0 || 435 (ddi_prop_get_int(DDI_DEV_T_ANY, rdip, 436 DDI_PROP_DONTPASS, "pcie-capid-pointer", 437 PCI_CAP_NEXT_PTR_NULL) == PCI_CAP_NEXT_PTR_NULL)) { 438 if (DDI_FM_ACC_ERR_CAP(ddi_fm_capable(rdip)) && 439 mp->map_handlep->ah_acc.devacc_attr_access 440 != DDI_DEFAULT_ACC) { 441 ndi_fmc_remove(rdip, ACC_HANDLE, 442 (void *)mp->map_handlep); 443 } 444 return (DDI_SUCCESS); 445 } 446 447 448 /* FALLTHROUGH */ 449 case PCI_ADDR_MEM64: 450 /* 451 * MEM64 requires special treatment on map, to check 452 * that the device is below 4G. On unmap, however, 453 * we can assume that everything is OK... the map 454 * must have succeeded. 455 */ 456 /* FALLTHROUGH */ 457 case PCI_ADDR_MEM32: 458 reg.regspec_bustype = 0; 459 break; 460 461 default: 462 return (DDI_FAILURE); 463 } 464 465 /* 466 * Adjust offset and length 467 * A non-zero length means override the one in the regspec. 468 */ 469 pci_rp->pci_phys_low += (uint_t)offset; 470 if (len != 0) 471 pci_rp->pci_size_low = len; 472 473 reg.regspec_addr = pci_rp->pci_phys_low; 474 reg.regspec_size = pci_rp->pci_size_low; 475 476 mp->map_obj.rp = ® 477 retval = ddi_map(dip, mp, (off_t)0, (off_t)0, vaddrp); 478 if (DDI_FM_ACC_ERR_CAP(ddi_fm_capable(rdip)) && 479 mp->map_handlep->ah_acc.devacc_attr_access != 480 DDI_DEFAULT_ACC) { 481 ndi_fmc_remove(rdip, ACC_HANDLE, 482 (void *)mp->map_handlep); 483 } 484 return (retval); 485 486 } 487 488 /* check for user mapping request - not legal for Config */ 489 if (mp->map_op == DDI_MO_MAP_HANDLE && space == PCI_ADDR_CONFIG) { 490 cmn_err(CE_NOTE, "npe: Config mapping request from user\n"); 491 return (DDI_FAILURE); 492 } 493 494 495 /* 496 * Note that pci_fm_acc_setup() is called to serve two purposes 497 * i) enable legacy PCI I/O style config space access 498 * ii) register with FMA 499 */ 500 if (space == PCI_ADDR_CONFIG) { 501 /* Can't map config space without a handle */ 502 hp = (ddi_acc_hdl_t *)mp->map_handlep; 503 if (hp == NULL) 504 return (DDI_FAILURE); 505 506 /* record the device address for future reference */ 507 cfp = (pci_acc_cfblk_t *)&hp->ah_bus_private; 508 cfp->c_busnum = PCI_REG_BUS_G(pci_rp->pci_phys_hi); 509 cfp->c_devnum = PCI_REG_DEV_G(pci_rp->pci_phys_hi); 510 cfp->c_funcnum = PCI_REG_FUNC_G(pci_rp->pci_phys_hi); 511 512 *vaddrp = (caddr_t)offset; 513 514 /* 515 * Check for AMD's northbridges, pci devices and 516 * devices underneath a pci bridge. This is to setup 517 * I/O based config space access. 518 */ 519 if (is_amd_northbridge(rdip) == 0 || 520 (ddi_prop_get_int(DDI_DEV_T_ANY, rdip, DDI_PROP_DONTPASS, 521 "pcie-capid-pointer", PCI_CAP_NEXT_PTR_NULL) == 522 PCI_CAP_NEXT_PTR_NULL)) { 523 int ret; 524 525 if ((ret = pci_fm_acc_setup(hp, offset, len)) == 526 DDI_SUCCESS) { 527 if (DDI_FM_ACC_ERR_CAP(ddi_fm_capable(rdip)) && 528 mp->map_handlep->ah_acc.devacc_attr_access 529 != DDI_DEFAULT_ACC) { 530 ndi_fmc_insert(rdip, ACC_HANDLE, 531 (void *)mp->map_handlep, NULL); 532 } 533 } 534 return (ret); 535 } 536 537 pci_rp->pci_phys_low = ddi_prop_get_int64(DDI_DEV_T_ANY, 538 rdip, 0, "ecfga-base-address", 0); 539 540 pci_rp->pci_phys_low += ((cfp->c_busnum << 20) | 541 (cfp->c_devnum) << 15 | (cfp->c_funcnum << 12)); 542 543 pci_rp->pci_size_low = PCIE_CONF_HDR_SIZE; 544 } 545 546 length = pci_rp->pci_size_low; 547 548 /* 549 * range check 550 */ 551 if ((offset >= length) || (len > length) || (offset + len > length)) 552 return (DDI_FAILURE); 553 554 /* 555 * Adjust offset and length 556 * A non-zero length means override the one in the regspec. 557 */ 558 pci_rp->pci_phys_low += (uint_t)offset; 559 if (len != 0) 560 pci_rp->pci_size_low = len; 561 562 /* 563 * convert the pci regsec into the generic regspec used by the 564 * parent root nexus driver. 565 */ 566 switch (space) { 567 case PCI_ADDR_IO: 568 reg.regspec_bustype = 1; 569 break; 570 case PCI_ADDR_CONFIG: 571 case PCI_ADDR_MEM64: 572 /* 573 * We can't handle 64-bit devices that are mapped above 574 * 4G or that are larger than 4G. 575 */ 576 if (pci_rp->pci_phys_mid != 0 || pci_rp->pci_size_hi != 0) 577 return (DDI_FAILURE); 578 /* 579 * Other than that, we can treat them as 32-bit mappings 580 */ 581 /* FALLTHROUGH */ 582 case PCI_ADDR_MEM32: 583 reg.regspec_bustype = 0; 584 break; 585 default: 586 return (DDI_FAILURE); 587 } 588 589 reg.regspec_addr = pci_rp->pci_phys_low; 590 reg.regspec_size = pci_rp->pci_size_low; 591 592 mp->map_obj.rp = ® 593 retval = ddi_map(dip, mp, (off_t)0, (off_t)0, vaddrp); 594 if (retval == DDI_SUCCESS) { 595 /* 596 * For config space gets force use of cautious access routines. 597 * These will handle default and protected mode accesses too. 598 */ 599 if (space == PCI_ADDR_CONFIG) { 600 ap = (ddi_acc_impl_t *)mp->map_handlep; 601 ap->ahi_acc_attr &= ~DDI_ACCATTR_DIRECT; 602 ap->ahi_acc_attr |= DDI_ACCATTR_CONFIG_SPACE; 603 ap->ahi_get8 = i_ddi_caut_get8; 604 ap->ahi_get16 = i_ddi_caut_get16; 605 ap->ahi_get32 = i_ddi_caut_get32; 606 ap->ahi_get64 = i_ddi_caut_get64; 607 ap->ahi_rep_get8 = i_ddi_caut_rep_get8; 608 ap->ahi_rep_get16 = i_ddi_caut_rep_get16; 609 ap->ahi_rep_get32 = i_ddi_caut_rep_get32; 610 ap->ahi_rep_get64 = i_ddi_caut_rep_get64; 611 } 612 if (DDI_FM_ACC_ERR_CAP(ddi_fm_capable(rdip)) && 613 mp->map_handlep->ah_acc.devacc_attr_access != 614 DDI_DEFAULT_ACC) { 615 ndi_fmc_insert(rdip, ACC_HANDLE, 616 (void *)mp->map_handlep, NULL); 617 } 618 } 619 return (retval); 620 } 621 622 623 624 /*ARGSUSED*/ 625 static int 626 npe_ctlops(dev_info_t *dip, dev_info_t *rdip, 627 ddi_ctl_enum_t ctlop, void *arg, void *result) 628 { 629 int rn; 630 int totreg; 631 uint_t reglen; 632 pci_regspec_t *drv_regp; 633 struct attachspec *asp; 634 struct detachspec *dsp; 635 pci_state_t *pci_p = ddi_get_soft_state(npe_statep, 636 ddi_get_instance(dip)); 637 638 switch (ctlop) { 639 case DDI_CTLOPS_REPORTDEV: 640 if (rdip == (dev_info_t *)0) 641 return (DDI_FAILURE); 642 cmn_err(CE_CONT, "?PCI Express-device: %s@%s, %s%d\n", 643 ddi_node_name(rdip), ddi_get_name_addr(rdip), 644 ddi_driver_name(rdip), ddi_get_instance(rdip)); 645 return (DDI_SUCCESS); 646 647 case DDI_CTLOPS_INITCHILD: 648 return (npe_initchild((dev_info_t *)arg)); 649 650 case DDI_CTLOPS_UNINITCHILD: 651 return (npe_removechild((dev_info_t *)arg)); 652 653 case DDI_CTLOPS_SIDDEV: 654 return (DDI_SUCCESS); 655 656 case DDI_CTLOPS_REGSIZE: 657 case DDI_CTLOPS_NREGS: 658 if (rdip == (dev_info_t *)0) 659 return (DDI_FAILURE); 660 661 *(int *)result = 0; 662 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, rdip, 663 DDI_PROP_DONTPASS, "reg", (int **)&drv_regp, 664 ®len) != DDI_PROP_SUCCESS) { 665 return (DDI_FAILURE); 666 } 667 668 totreg = (reglen * sizeof (int)) / sizeof (pci_regspec_t); 669 if (ctlop == DDI_CTLOPS_NREGS) 670 *(int *)result = totreg; 671 else if (ctlop == DDI_CTLOPS_REGSIZE) { 672 rn = *(int *)arg; 673 if (rn >= totreg) { 674 ddi_prop_free(drv_regp); 675 return (DDI_FAILURE); 676 } 677 *(off_t *)result = drv_regp[rn].pci_size_low; 678 } 679 ddi_prop_free(drv_regp); 680 681 return (DDI_SUCCESS); 682 683 case DDI_CTLOPS_POWER: 684 { 685 power_req_t *reqp = (power_req_t *)arg; 686 /* 687 * We currently understand reporting of PCI_PM_IDLESPEED 688 * capability. Everything else is passed up. 689 */ 690 if ((reqp->request_type == PMR_REPORT_PMCAP) && 691 (reqp->req.report_pmcap_req.cap == PCI_PM_IDLESPEED)) 692 return (DDI_SUCCESS); 693 694 break; 695 } 696 697 case DDI_CTLOPS_PEEK: 698 case DDI_CTLOPS_POKE: 699 return (pci_common_peekpoke(dip, rdip, ctlop, arg, result)); 700 701 /* X86 systems support PME wakeup from suspended state */ 702 case DDI_CTLOPS_ATTACH: 703 if (!pcie_is_child(dip, rdip)) 704 return (DDI_SUCCESS); 705 706 asp = (struct attachspec *)arg; 707 if ((asp->when == DDI_POST) && (asp->result == DDI_SUCCESS)) { 708 pf_init(rdip, (void *)pci_p->pci_fm_ibc, asp->cmd); 709 (void) pcie_postattach_child(rdip); 710 } 711 712 /* only do this for immediate children */ 713 if (asp->cmd == DDI_RESUME && asp->when == DDI_PRE && 714 ddi_get_parent(rdip) == dip) 715 if (pci_pre_resume(rdip) != DDI_SUCCESS) { 716 /* Not good, better stop now. */ 717 cmn_err(CE_PANIC, 718 "Couldn't pre-resume device %p", 719 (void *) dip); 720 /* NOTREACHED */ 721 } 722 723 return (DDI_SUCCESS); 724 725 case DDI_CTLOPS_DETACH: 726 if (!pcie_is_child(dip, rdip)) 727 return (DDI_SUCCESS); 728 729 dsp = (struct detachspec *)arg; 730 731 if (dsp->when == DDI_PRE) 732 pf_fini(rdip, dsp->cmd); 733 734 /* only do this for immediate children */ 735 if (dsp->cmd == DDI_SUSPEND && dsp->when == DDI_POST && 736 ddi_get_parent(rdip) == dip) 737 if (pci_post_suspend(rdip) != DDI_SUCCESS) 738 return (DDI_FAILURE); 739 740 return (DDI_SUCCESS); 741 742 default: 743 break; 744 } 745 746 return (ddi_ctlops(dip, rdip, ctlop, arg, result)); 747 748 } 749 750 751 /* 752 * npe_intr_ops 753 */ 754 static int 755 npe_intr_ops(dev_info_t *pdip, dev_info_t *rdip, ddi_intr_op_t intr_op, 756 ddi_intr_handle_impl_t *hdlp, void *result) 757 { 758 return (pci_common_intr_ops(pdip, rdip, intr_op, hdlp, result)); 759 } 760 761 762 static int 763 npe_initchild(dev_info_t *child) 764 { 765 char name[80]; 766 pcie_bus_t *bus_p; 767 uint32_t regs; 768 ddi_acc_handle_t cfg_hdl; 769 770 /* 771 * Do not bind drivers to empty bridges. 772 * Fail above, if the bridge is found to be hotplug capable 773 */ 774 if (npe_disable_empty_bridges_workaround(child) == 1) 775 return (DDI_FAILURE); 776 777 if (pci_common_name_child(child, name, 80) != DDI_SUCCESS) 778 return (DDI_FAILURE); 779 780 ddi_set_name_addr(child, name); 781 782 /* 783 * Pseudo nodes indicate a prototype node with per-instance 784 * properties to be merged into the real h/w device node. 785 * The interpretation of the unit-address is DD[,F] 786 * where DD is the device id and F is the function. 787 */ 788 if (ndi_dev_is_persistent_node(child) == 0) { 789 extern int pci_allow_pseudo_children; 790 791 ddi_set_parent_data(child, NULL); 792 793 /* 794 * Try to merge the properties from this prototype 795 * node into real h/w nodes. 796 */ 797 if (ndi_merge_node(child, pci_common_name_child) == 798 DDI_SUCCESS) { 799 /* 800 * Merged ok - return failure to remove the node. 801 */ 802 ddi_set_name_addr(child, NULL); 803 return (DDI_FAILURE); 804 } 805 806 /* workaround for DDIVS to run under PCI Express */ 807 if (pci_allow_pseudo_children) { 808 /* 809 * If the "interrupts" property doesn't exist, 810 * this must be the ddivs no-intr case, and it returns 811 * DDI_SUCCESS instead of DDI_FAILURE. 812 */ 813 if (ddi_prop_get_int(DDI_DEV_T_ANY, child, 814 DDI_PROP_DONTPASS, "interrupts", -1) == -1) 815 return (DDI_SUCCESS); 816 /* 817 * Create the ddi_parent_private_data for a pseudo 818 * child. 819 */ 820 pci_common_set_parent_private_data(child); 821 return (DDI_SUCCESS); 822 } 823 824 /* 825 * The child was not merged into a h/w node, 826 * but there's not much we can do with it other 827 * than return failure to cause the node to be removed. 828 */ 829 cmn_err(CE_WARN, "!%s@%s: %s.conf properties not merged", 830 ddi_get_name(child), ddi_get_name_addr(child), 831 ddi_get_name(child)); 832 ddi_set_name_addr(child, NULL); 833 return (DDI_NOT_WELL_FORMED); 834 } 835 836 if (ddi_prop_get_int(DDI_DEV_T_ANY, child, DDI_PROP_DONTPASS, 837 "interrupts", -1) != -1) 838 pci_common_set_parent_private_data(child); 839 else 840 ddi_set_parent_data(child, NULL); 841 842 /* Disable certain errors on PCIe drivers for x86 platforms */ 843 regs = pcie_get_aer_uce_mask() | npe_aer_uce_mask; 844 pcie_set_aer_uce_mask(regs); 845 regs = pcie_get_aer_ce_mask() | npe_aer_ce_mask; 846 pcie_set_aer_ce_mask(regs); 847 regs = pcie_get_aer_suce_mask() | npe_aer_suce_mask; 848 pcie_set_aer_suce_mask(regs); 849 850 /* 851 * If URs are disabled, mask SERRs as well, otherwise the system will 852 * still be notified of URs 853 */ 854 if (npe_aer_uce_mask & PCIE_AER_UCE_UR) 855 pcie_set_serr_mask(1); 856 857 if (pci_config_setup(child, &cfg_hdl) == DDI_SUCCESS) { 858 npe_ck804_fix_aer_ptr(cfg_hdl); 859 npe_nvidia_error_mask(cfg_hdl); 860 pci_config_teardown(&cfg_hdl); 861 } 862 863 bus_p = pcie_init_bus(child); 864 if (bus_p) { 865 uint16_t device_id = (uint16_t)(bus_p->bus_dev_ven_id >> 16); 866 uint16_t vendor_id = (uint16_t)(bus_p->bus_dev_ven_id & 0xFFFF); 867 uint16_t rev_id = bus_p->bus_rev_id; 868 869 /* Disable AER for certain NVIDIA Chipsets */ 870 if ((vendor_id == NVIDIA_VENDOR_ID) && 871 (device_id == NVIDIA_CK804_DEVICE_ID) && 872 (rev_id < NVIDIA_CK804_AER_VALID_REVID)) 873 bus_p->bus_aer_off = 0; 874 875 (void) pcie_initchild(child); 876 877 /* If device is an NVIDIA RC do device specific error setup */ 878 if ((vendor_id == NVIDIA_VENDOR_ID) && 879 NVIDIA_PCIE_RC_DEV_ID(device_id)) { 880 ddi_acc_handle_t cfg_hdl = bus_p->bus_cfg_hdl; 881 uint16_t rc_ctl; 882 883 rc_ctl = pci_config_get16(cfg_hdl, NVIDIA_INTR_BCR_OFF + 884 0x2); 885 pci_config_put16(cfg_hdl, NVIDIA_INTR_BCR_OFF + 0x2, 886 rc_ctl | NVIDIA_INTR_BCR_SERR_FORWARD_BIT); 887 } 888 889 } 890 891 return (DDI_SUCCESS); 892 } 893 894 895 static int 896 npe_removechild(dev_info_t *dip) 897 { 898 pcie_uninitchild(dip); 899 900 ddi_set_name_addr(dip, NULL); 901 902 /* 903 * Strip the node to properly convert it back to prototype form 904 */ 905 ddi_remove_minor_node(dip, NULL); 906 907 ddi_prop_remove_all(dip); 908 909 return (DDI_SUCCESS); 910 } 911 912 913 /* 914 * When retrofitting this module for pci_tools, functions such as open, close, 915 * and ioctl are now pulled into this module. Before this, the functions in 916 * the pcihp module were referenced directly. Now they are called or 917 * referenced through the pcihp cb_ops structure from functions in this module. 918 */ 919 static int 920 npe_open(dev_t *devp, int flags, int otyp, cred_t *credp) 921 { 922 return ((pcihp_get_cb_ops())->cb_open(devp, flags, otyp, credp)); 923 } 924 925 static int 926 npe_close(dev_t dev, int flags, int otyp, cred_t *credp) 927 { 928 return ((pcihp_get_cb_ops())->cb_close(dev, flags, otyp, credp)); 929 } 930 931 static int 932 npe_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, int *rvalp) 933 { 934 minor_t minor = getminor(dev); 935 int instance = PCIHP_AP_MINOR_NUM_TO_INSTANCE(minor); 936 pci_state_t *pci_p = ddi_get_soft_state(npe_statep, instance); 937 dev_info_t *dip; 938 939 if (pci_p == NULL) 940 return (ENXIO); 941 942 dip = pci_p->pci_dip; 943 944 return (pci_common_ioctl(dip, dev, cmd, arg, mode, credp, rvalp)); 945 } 946 947 static int 948 npe_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 949 int flags, char *name, caddr_t valuep, int *lengthp) 950 { 951 return ((pcihp_get_cb_ops())->cb_prop_op(dev, dip, prop_op, flags, 952 name, valuep, lengthp)); 953 } 954 955 static int 956 npe_info(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result) 957 { 958 return (pcihp_info(dip, cmd, arg, result)); 959 } 960 961 /*ARGSUSED*/ 962 static int 963 npe_fm_init(dev_info_t *dip, dev_info_t *tdip, int cap, 964 ddi_iblock_cookie_t *ibc) 965 { 966 pci_state_t *pcip = ddi_get_soft_state(npe_statep, 967 ddi_get_instance(dip)); 968 969 ASSERT(ibc != NULL); 970 *ibc = pcip->pci_fm_ibc; 971 972 return (pcip->pci_fmcap); 973 } 974 975 /*ARGSUSED*/ 976 static int 977 npe_fm_callback(dev_info_t *dip, ddi_fm_error_t *derr, const void *no_used) 978 { 979 /* 980 * On current x86 systems, npe's callback does not get called for failed 981 * loads. If in the future this feature is used, the fault PA should be 982 * logged in the derr->fme_bus_specific field. The appropriate PCIe 983 * error handling code should be called and needs to be coordinated with 984 * safe access handling. 985 */ 986 987 return (DDI_FM_OK); 988 } 989