1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * Host to PCI-Express local bus driver 29 */ 30 31 #include <sys/conf.h> 32 #include <sys/modctl.h> 33 #include <sys/pci_impl.h> 34 #include <sys/pcie_impl.h> 35 #include <sys/sysmacros.h> 36 #include <sys/ddi_intr.h> 37 #include <sys/sunndi.h> 38 #include <sys/sunddi.h> 39 #include <sys/ddifm.h> 40 #include <sys/ndifm.h> 41 #include <sys/fm/util.h> 42 #include <sys/hotplug/pci/pcihp.h> 43 #include <io/pci/pci_tools_ext.h> 44 #include <io/pci/pci_common.h> 45 #include <io/pciex/pcie_nvidia.h> 46 47 /* 48 * Helper Macros 49 */ 50 #define NPE_IS_HANDLE_FOR_STDCFG_ACC(hp) \ 51 ((hp) != NULL && \ 52 ((ddi_acc_hdl_t *)(hp))->ah_platform_private != NULL && \ 53 (((ddi_acc_impl_t *)((ddi_acc_hdl_t *)(hp))-> \ 54 ah_platform_private)-> \ 55 ahi_acc_attr &(DDI_ACCATTR_CPU_VADDR|DDI_ACCATTR_CONFIG_SPACE)) \ 56 == DDI_ACCATTR_CONFIG_SPACE) 57 58 /* 59 * Bus Operation functions 60 */ 61 static int npe_bus_map(dev_info_t *, dev_info_t *, ddi_map_req_t *, 62 off_t, off_t, caddr_t *); 63 static int npe_ctlops(dev_info_t *, dev_info_t *, ddi_ctl_enum_t, 64 void *, void *); 65 static int npe_intr_ops(dev_info_t *, dev_info_t *, ddi_intr_op_t, 66 ddi_intr_handle_impl_t *, void *); 67 static int npe_fm_init(dev_info_t *, dev_info_t *, int, 68 ddi_iblock_cookie_t *); 69 70 static int npe_fm_callback(dev_info_t *, ddi_fm_error_t *, const void *); 71 72 /* 73 * Disable URs and Received MA for all PCIe devices. Until x86 SW is changed so 74 * that random drivers do not do PIO accesses on devices that it does not own, 75 * these error bits must be disabled. SERR must also be disabled if URs have 76 * been masked. 77 */ 78 uint32_t npe_aer_uce_mask = PCIE_AER_UCE_UR; 79 uint32_t npe_aer_ce_mask = 0; 80 uint32_t npe_aer_suce_mask = PCIE_AER_SUCE_RCVD_MA; 81 82 struct bus_ops npe_bus_ops = { 83 BUSO_REV, 84 npe_bus_map, 85 NULL, 86 NULL, 87 NULL, 88 i_ddi_map_fault, 89 ddi_dma_map, 90 ddi_dma_allochdl, 91 ddi_dma_freehdl, 92 ddi_dma_bindhdl, 93 ddi_dma_unbindhdl, 94 ddi_dma_flush, 95 ddi_dma_win, 96 ddi_dma_mctl, 97 npe_ctlops, 98 ddi_bus_prop_op, 99 0, /* (*bus_get_eventcookie)(); */ 100 0, /* (*bus_add_eventcall)(); */ 101 0, /* (*bus_remove_eventcall)(); */ 102 0, /* (*bus_post_event)(); */ 103 0, /* (*bus_intr_ctl)(); */ 104 0, /* (*bus_config)(); */ 105 0, /* (*bus_unconfig)(); */ 106 npe_fm_init, /* (*bus_fm_init)(); */ 107 NULL, /* (*bus_fm_fini)(); */ 108 NULL, /* (*bus_fm_access_enter)(); */ 109 NULL, /* (*bus_fm_access_exit)(); */ 110 NULL, /* (*bus_power)(); */ 111 npe_intr_ops /* (*bus_intr_op)(); */ 112 }; 113 114 /* 115 * One goal here is to leverage off of the pcihp.c source without making 116 * changes to it. Call into it's cb_ops directly if needed, piggybacking 117 * anything else needed by the pci_tools.c module. Only pci_tools and pcihp 118 * will be using the PCI devctl node. 119 */ 120 static int npe_open(dev_t *, int, int, cred_t *); 121 static int npe_close(dev_t, int, int, cred_t *); 122 static int npe_ioctl(dev_t, int, intptr_t, int, cred_t *, int *); 123 static int npe_prop_op(dev_t, dev_info_t *, ddi_prop_op_t, int, char *, 124 caddr_t, int *); 125 static int npe_info(dev_info_t *, ddi_info_cmd_t, void *, void **); 126 127 struct cb_ops npe_cb_ops = { 128 npe_open, /* open */ 129 npe_close, /* close */ 130 nodev, /* strategy */ 131 nodev, /* print */ 132 nodev, /* dump */ 133 nodev, /* read */ 134 nodev, /* write */ 135 npe_ioctl, /* ioctl */ 136 nodev, /* devmap */ 137 nodev, /* mmap */ 138 nodev, /* segmap */ 139 nochpoll, /* poll */ 140 npe_prop_op, /* cb_prop_op */ 141 NULL, /* streamtab */ 142 D_NEW | D_MP | D_HOTPLUG, /* Driver compatibility flag */ 143 CB_REV, /* rev */ 144 nodev, /* int (*cb_aread)() */ 145 nodev /* int (*cb_awrite)() */ 146 }; 147 148 149 /* 150 * Device Node Operation functions 151 */ 152 static int npe_attach(dev_info_t *devi, ddi_attach_cmd_t cmd); 153 static int npe_detach(dev_info_t *devi, ddi_detach_cmd_t cmd); 154 155 struct dev_ops npe_ops = { 156 DEVO_REV, /* devo_rev */ 157 0, /* refcnt */ 158 npe_info, /* info */ 159 nulldev, /* identify */ 160 nulldev, /* probe */ 161 npe_attach, /* attach */ 162 npe_detach, /* detach */ 163 nulldev, /* reset */ 164 &npe_cb_ops, /* driver operations */ 165 &npe_bus_ops, /* bus operations */ 166 NULL, /* power */ 167 ddi_quiesce_not_needed, /* quiesce */ 168 }; 169 170 /* 171 * Internal routines in support of particular npe_ctlops. 172 */ 173 static int npe_removechild(dev_info_t *child); 174 static int npe_initchild(dev_info_t *child); 175 176 /* 177 * External support routine 178 */ 179 extern void npe_query_acpi_mcfg(dev_info_t *dip); 180 extern void npe_ck804_fix_aer_ptr(ddi_acc_handle_t cfg_hdl); 181 extern int npe_disable_empty_bridges_workaround(dev_info_t *child); 182 extern void npe_nvidia_error_mask(ddi_acc_handle_t cfg_hdl); 183 extern void npe_intel_error_mask(ddi_acc_handle_t cfg_hdl); 184 extern boolean_t npe_is_mmcfg_supported(dev_info_t *dip); 185 extern void npe_enable_htmsi_children(dev_info_t *dip); 186 extern int npe_save_htconfig_children(dev_info_t *dip); 187 extern int npe_restore_htconfig_children(dev_info_t *dip); 188 189 /* 190 * Module linkage information for the kernel. 191 */ 192 static struct modldrv modldrv = { 193 &mod_driverops, /* Type of module */ 194 "Host to PCIe nexus driver", 195 &npe_ops, /* driver ops */ 196 }; 197 198 static struct modlinkage modlinkage = { 199 MODREV_1, 200 (void *)&modldrv, 201 NULL 202 }; 203 204 /* Save minimal state. */ 205 void *npe_statep; 206 207 int 208 _init(void) 209 { 210 int e; 211 212 /* 213 * Initialize per-pci bus soft state pointer. 214 */ 215 e = ddi_soft_state_init(&npe_statep, sizeof (pci_state_t), 1); 216 if (e != 0) 217 return (e); 218 219 if ((e = mod_install(&modlinkage)) != 0) 220 ddi_soft_state_fini(&npe_statep); 221 222 return (e); 223 } 224 225 226 int 227 _fini(void) 228 { 229 int rc; 230 231 rc = mod_remove(&modlinkage); 232 if (rc != 0) 233 return (rc); 234 235 ddi_soft_state_fini(&npe_statep); 236 return (rc); 237 } 238 239 240 int 241 _info(struct modinfo *modinfop) 242 { 243 return (mod_info(&modlinkage, modinfop)); 244 } 245 246 /*ARGSUSED*/ 247 static int 248 npe_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) 249 { 250 /* 251 * Use the minor number as constructed by pcihp, as the index value to 252 * ddi_soft_state_zalloc. 253 */ 254 int instance = ddi_get_instance(devi); 255 pci_state_t *pcip = NULL; 256 257 if (cmd == DDI_RESUME) { 258 /* 259 * the system might still be able to resume even if this fails 260 */ 261 (void) npe_restore_htconfig_children(devi); 262 return (DDI_SUCCESS); 263 } 264 265 /* 266 * We must do this here in order to ensure that all top level devices 267 * get their HyperTransport MSI mapping regs programmed first. 268 * "Memory controller" and "hostbridge" class devices are leaf devices 269 * that may affect MSI translation functionality for devices 270 * connected to the same link/bus. 271 * 272 * This will also program HT MSI mapping registers on root buses 273 * devices (basically sitting on an HT bus) that are not dependent 274 * on the aforementioned HT devices for MSI translation. 275 */ 276 npe_enable_htmsi_children(devi); 277 278 if (ddi_prop_update_string(DDI_DEV_T_NONE, devi, "device_type", 279 "pciex") != DDI_PROP_SUCCESS) { 280 cmn_err(CE_WARN, "npe: 'device_type' prop create failed"); 281 } 282 283 if (ddi_soft_state_zalloc(npe_statep, instance) == DDI_SUCCESS) 284 pcip = ddi_get_soft_state(npe_statep, instance); 285 286 if (pcip == NULL) 287 return (DDI_FAILURE); 288 289 pcip->pci_dip = devi; 290 291 pcie_rc_init_bus(devi); 292 293 /* 294 * Initialize hotplug support on this bus. At minimum 295 * (for non hotplug bus) this would create ":devctl" minor 296 * node to support DEVCTL_DEVICE_* and DEVCTL_BUS_* ioctls 297 * to this bus. 298 */ 299 if (pcihp_init(devi) != DDI_SUCCESS) { 300 cmn_err(CE_WARN, "npe: Failed to setup hotplug framework"); 301 ddi_soft_state_free(npe_statep, instance); 302 return (DDI_FAILURE); 303 } 304 305 /* Second arg: initialize for pci_express root nexus */ 306 if (pcitool_init(devi, B_TRUE) != DDI_SUCCESS) { 307 (void) pcihp_uninit(devi); 308 ddi_soft_state_free(npe_statep, instance); 309 return (DDI_FAILURE); 310 } 311 312 pcip->pci_fmcap = DDI_FM_EREPORT_CAPABLE | DDI_FM_ERRCB_CAPABLE | 313 DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE; 314 ddi_fm_init(devi, &pcip->pci_fmcap, &pcip->pci_fm_ibc); 315 316 if (pcip->pci_fmcap & DDI_FM_ERRCB_CAPABLE) { 317 ddi_fm_handler_register(devi, npe_fm_callback, NULL); 318 } 319 320 PCIE_DIP2PFD(devi) = kmem_zalloc(sizeof (pf_data_t), KM_SLEEP); 321 pcie_rc_init_pfd(devi, PCIE_DIP2PFD(devi)); 322 323 npe_query_acpi_mcfg(devi); 324 ddi_report_dev(devi); 325 return (DDI_SUCCESS); 326 327 } 328 329 /*ARGSUSED*/ 330 static int 331 npe_detach(dev_info_t *devi, ddi_detach_cmd_t cmd) 332 { 333 int instance = ddi_get_instance(devi); 334 pci_state_t *pcip; 335 336 pcip = ddi_get_soft_state(npe_statep, ddi_get_instance(devi)); 337 338 switch (cmd) { 339 case DDI_DETACH: 340 341 /* Uninitialize pcitool support. */ 342 pcitool_uninit(devi); 343 344 /* 345 * Uninitialize hotplug support on this bus. 346 */ 347 (void) pcihp_uninit(devi); 348 349 if (pcip->pci_fmcap & DDI_FM_ERRCB_CAPABLE) 350 ddi_fm_handler_unregister(devi); 351 352 pcie_rc_fini_bus(devi); 353 pcie_rc_fini_pfd(PCIE_DIP2PFD(devi)); 354 kmem_free(PCIE_DIP2PFD(devi), sizeof (pf_data_t)); 355 356 ddi_fm_fini(devi); 357 ddi_soft_state_free(npe_statep, instance); 358 return (DDI_SUCCESS); 359 360 case DDI_SUSPEND: 361 /* 362 * the system might still be able to suspend/resume even if 363 * this fails 364 */ 365 (void) npe_save_htconfig_children(devi); 366 return (DDI_SUCCESS); 367 default: 368 return (DDI_FAILURE); 369 } 370 } 371 372 /* 373 * Configure the access handle for standard configuration space 374 * access (see pci_fm_acc_setup for code that initializes the 375 * access-function pointers). 376 */ 377 static int 378 npe_setup_std_pcicfg_acc(dev_info_t *rdip, ddi_map_req_t *mp, 379 ddi_acc_hdl_t *hp, off_t offset, off_t len) 380 { 381 int ret; 382 383 if ((ret = pci_fm_acc_setup(hp, offset, len)) == 384 DDI_SUCCESS) { 385 if (DDI_FM_ACC_ERR_CAP(ddi_fm_capable(rdip)) && 386 mp->map_handlep->ah_acc.devacc_attr_access 387 != DDI_DEFAULT_ACC) { 388 ndi_fmc_insert(rdip, ACC_HANDLE, 389 (void *)mp->map_handlep, NULL); 390 } 391 } 392 return (ret); 393 } 394 395 static int 396 npe_bus_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp, 397 off_t offset, off_t len, caddr_t *vaddrp) 398 { 399 int rnumber; 400 int length; 401 int space; 402 ddi_acc_impl_t *ap; 403 ddi_acc_hdl_t *hp; 404 ddi_map_req_t mr; 405 pci_regspec_t pci_reg; 406 pci_regspec_t *pci_rp; 407 struct regspec reg; 408 pci_acc_cfblk_t *cfp; 409 int retval; 410 int64_t *ecfginfo; 411 uint_t nelem; 412 413 mr = *mp; /* Get private copy of request */ 414 mp = &mr; 415 416 /* 417 * check for register number 418 */ 419 switch (mp->map_type) { 420 case DDI_MT_REGSPEC: 421 pci_reg = *(pci_regspec_t *)(mp->map_obj.rp); 422 pci_rp = &pci_reg; 423 if (pci_common_get_reg_prop(rdip, pci_rp) != DDI_SUCCESS) 424 return (DDI_FAILURE); 425 break; 426 case DDI_MT_RNUMBER: 427 rnumber = mp->map_obj.rnumber; 428 /* 429 * get ALL "reg" properties for dip, select the one of 430 * of interest. In x86, "assigned-addresses" property 431 * is identical to the "reg" property, so there is no 432 * need to cross check the two to determine the physical 433 * address of the registers. 434 * This routine still performs some validity checks to 435 * make sure that everything is okay. 436 */ 437 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, rdip, 438 DDI_PROP_DONTPASS, "reg", (int **)&pci_rp, 439 (uint_t *)&length) != DDI_PROP_SUCCESS) 440 return (DDI_FAILURE); 441 442 /* 443 * validate the register number. 444 */ 445 length /= (sizeof (pci_regspec_t) / sizeof (int)); 446 if (rnumber >= length) { 447 ddi_prop_free(pci_rp); 448 return (DDI_FAILURE); 449 } 450 451 /* 452 * copy the required entry. 453 */ 454 pci_reg = pci_rp[rnumber]; 455 456 /* 457 * free the memory allocated by ddi_prop_lookup_int_array 458 */ 459 ddi_prop_free(pci_rp); 460 461 pci_rp = &pci_reg; 462 if (pci_common_get_reg_prop(rdip, pci_rp) != DDI_SUCCESS) 463 return (DDI_FAILURE); 464 mp->map_type = DDI_MT_REGSPEC; 465 break; 466 default: 467 return (DDI_ME_INVAL); 468 } 469 470 space = pci_rp->pci_phys_hi & PCI_REG_ADDR_M; 471 472 /* 473 * check for unmap and unlock of address space 474 */ 475 if ((mp->map_op == DDI_MO_UNMAP) || (mp->map_op == DDI_MO_UNLOCK)) { 476 switch (space) { 477 case PCI_ADDR_IO: 478 reg.regspec_bustype = 1; 479 break; 480 481 case PCI_ADDR_CONFIG: 482 /* 483 * If this is an unmap/unlock of a standard config 484 * space mapping (memory-mapped config space mappings 485 * would have the DDI_ACCATTR_CPU_VADDR bit set in the 486 * acc_attr), undo that setup here. 487 */ 488 if (NPE_IS_HANDLE_FOR_STDCFG_ACC(mp->map_handlep)) { 489 490 if (DDI_FM_ACC_ERR_CAP(ddi_fm_capable(rdip)) && 491 mp->map_handlep->ah_acc.devacc_attr_access 492 != DDI_DEFAULT_ACC) { 493 ndi_fmc_remove(rdip, ACC_HANDLE, 494 (void *)mp->map_handlep); 495 } 496 return (DDI_SUCCESS); 497 } 498 499 pci_rp->pci_size_low = PCIE_CONF_HDR_SIZE; 500 501 /* FALLTHROUGH */ 502 case PCI_ADDR_MEM64: 503 /* 504 * MEM64 requires special treatment on map, to check 505 * that the device is below 4G. On unmap, however, 506 * we can assume that everything is OK... the map 507 * must have succeeded. 508 */ 509 /* FALLTHROUGH */ 510 case PCI_ADDR_MEM32: 511 reg.regspec_bustype = 0; 512 break; 513 514 default: 515 return (DDI_FAILURE); 516 } 517 518 /* 519 * Adjust offset and length 520 * A non-zero length means override the one in the regspec. 521 */ 522 pci_rp->pci_phys_low += (uint_t)offset; 523 if (len != 0) 524 pci_rp->pci_size_low = len; 525 526 reg.regspec_addr = pci_rp->pci_phys_low; 527 reg.regspec_size = pci_rp->pci_size_low; 528 529 mp->map_obj.rp = ® 530 retval = ddi_map(dip, mp, (off_t)0, (off_t)0, vaddrp); 531 if (DDI_FM_ACC_ERR_CAP(ddi_fm_capable(rdip)) && 532 mp->map_handlep->ah_acc.devacc_attr_access != 533 DDI_DEFAULT_ACC) { 534 ndi_fmc_remove(rdip, ACC_HANDLE, 535 (void *)mp->map_handlep); 536 } 537 return (retval); 538 539 } 540 541 /* check for user mapping request - not legal for Config */ 542 if (mp->map_op == DDI_MO_MAP_HANDLE && space == PCI_ADDR_CONFIG) { 543 cmn_err(CE_NOTE, "npe: Config mapping request from user\n"); 544 return (DDI_FAILURE); 545 } 546 547 548 /* 549 * Note that pci_fm_acc_setup() is called to serve two purposes 550 * i) enable legacy PCI I/O style config space access 551 * ii) register with FMA 552 */ 553 if (space == PCI_ADDR_CONFIG) { 554 555 /* Can't map config space without a handle */ 556 hp = (ddi_acc_hdl_t *)mp->map_handlep; 557 if (hp == NULL) 558 return (DDI_FAILURE); 559 560 /* record the device address for future reference */ 561 cfp = (pci_acc_cfblk_t *)&hp->ah_bus_private; 562 cfp->c_busnum = PCI_REG_BUS_G(pci_rp->pci_phys_hi); 563 cfp->c_devnum = PCI_REG_DEV_G(pci_rp->pci_phys_hi); 564 cfp->c_funcnum = PCI_REG_FUNC_G(pci_rp->pci_phys_hi); 565 566 *vaddrp = (caddr_t)offset; 567 568 /* Check if MMCFG is supported */ 569 if (!npe_is_mmcfg_supported(rdip)) { 570 return (npe_setup_std_pcicfg_acc(rdip, mp, hp, 571 offset, len)); 572 } 573 574 575 if (ddi_prop_lookup_int64_array(DDI_DEV_T_ANY, rdip, 0, 576 "ecfg", &ecfginfo, &nelem) == DDI_PROP_SUCCESS) { 577 578 if (nelem != 4 || 579 cfp->c_busnum < ecfginfo[2] || 580 cfp->c_busnum > ecfginfo[3]) { 581 /* 582 * Invalid property or Doesn't contain the 583 * requested bus; fall back to standard 584 * (I/O-based) config access. 585 */ 586 ddi_prop_free(ecfginfo); 587 return (npe_setup_std_pcicfg_acc(rdip, mp, hp, 588 offset, len)); 589 } else { 590 pci_rp->pci_phys_low = ecfginfo[0]; 591 592 ddi_prop_free(ecfginfo); 593 594 pci_rp->pci_phys_low += ((cfp->c_busnum << 20) | 595 (cfp->c_devnum) << 15 | 596 (cfp->c_funcnum << 12)); 597 598 pci_rp->pci_size_low = PCIE_CONF_HDR_SIZE; 599 } 600 } else { 601 /* 602 * Couldn't find the MMCFG property -- fall back to 603 * standard config access 604 */ 605 return (npe_setup_std_pcicfg_acc(rdip, mp, hp, 606 offset, len)); 607 } 608 } 609 610 length = pci_rp->pci_size_low; 611 612 /* 613 * range check 614 */ 615 if ((offset >= length) || (len > length) || (offset + len > length)) 616 return (DDI_FAILURE); 617 618 /* 619 * Adjust offset and length 620 * A non-zero length means override the one in the regspec. 621 */ 622 pci_rp->pci_phys_low += (uint_t)offset; 623 if (len != 0) 624 pci_rp->pci_size_low = len; 625 626 /* 627 * convert the pci regsec into the generic regspec used by the 628 * parent root nexus driver. 629 */ 630 switch (space) { 631 case PCI_ADDR_IO: 632 reg.regspec_bustype = 1; 633 break; 634 case PCI_ADDR_CONFIG: 635 case PCI_ADDR_MEM64: 636 /* 637 * We can't handle 64-bit devices that are mapped above 638 * 4G or that are larger than 4G. 639 */ 640 if (pci_rp->pci_phys_mid != 0 || pci_rp->pci_size_hi != 0) 641 return (DDI_FAILURE); 642 /* 643 * Other than that, we can treat them as 32-bit mappings 644 */ 645 /* FALLTHROUGH */ 646 case PCI_ADDR_MEM32: 647 reg.regspec_bustype = 0; 648 break; 649 default: 650 return (DDI_FAILURE); 651 } 652 653 reg.regspec_addr = pci_rp->pci_phys_low; 654 reg.regspec_size = pci_rp->pci_size_low; 655 656 mp->map_obj.rp = ® 657 retval = ddi_map(dip, mp, (off_t)0, (off_t)0, vaddrp); 658 if (retval == DDI_SUCCESS) { 659 /* 660 * For config space gets force use of cautious access routines. 661 * These will handle default and protected mode accesses too. 662 */ 663 if (space == PCI_ADDR_CONFIG) { 664 ap = (ddi_acc_impl_t *)mp->map_handlep; 665 ap->ahi_acc_attr &= ~DDI_ACCATTR_DIRECT; 666 ap->ahi_acc_attr |= DDI_ACCATTR_CONFIG_SPACE; 667 ap->ahi_get8 = i_ddi_caut_get8; 668 ap->ahi_get16 = i_ddi_caut_get16; 669 ap->ahi_get32 = i_ddi_caut_get32; 670 ap->ahi_get64 = i_ddi_caut_get64; 671 ap->ahi_rep_get8 = i_ddi_caut_rep_get8; 672 ap->ahi_rep_get16 = i_ddi_caut_rep_get16; 673 ap->ahi_rep_get32 = i_ddi_caut_rep_get32; 674 ap->ahi_rep_get64 = i_ddi_caut_rep_get64; 675 } 676 if (DDI_FM_ACC_ERR_CAP(ddi_fm_capable(rdip)) && 677 mp->map_handlep->ah_acc.devacc_attr_access != 678 DDI_DEFAULT_ACC) { 679 ndi_fmc_insert(rdip, ACC_HANDLE, 680 (void *)mp->map_handlep, NULL); 681 } 682 } 683 return (retval); 684 } 685 686 687 688 /*ARGSUSED*/ 689 static int 690 npe_ctlops(dev_info_t *dip, dev_info_t *rdip, 691 ddi_ctl_enum_t ctlop, void *arg, void *result) 692 { 693 int rn; 694 int totreg; 695 uint_t reglen; 696 pci_regspec_t *drv_regp; 697 struct attachspec *asp; 698 struct detachspec *dsp; 699 pci_state_t *pci_p = ddi_get_soft_state(npe_statep, 700 ddi_get_instance(dip)); 701 702 switch (ctlop) { 703 case DDI_CTLOPS_REPORTDEV: 704 if (rdip == (dev_info_t *)0) 705 return (DDI_FAILURE); 706 cmn_err(CE_CONT, "?PCI Express-device: %s@%s, %s%d\n", 707 ddi_node_name(rdip), ddi_get_name_addr(rdip), 708 ddi_driver_name(rdip), ddi_get_instance(rdip)); 709 return (DDI_SUCCESS); 710 711 case DDI_CTLOPS_INITCHILD: 712 return (npe_initchild((dev_info_t *)arg)); 713 714 case DDI_CTLOPS_UNINITCHILD: 715 return (npe_removechild((dev_info_t *)arg)); 716 717 case DDI_CTLOPS_SIDDEV: 718 return (DDI_SUCCESS); 719 720 case DDI_CTLOPS_REGSIZE: 721 case DDI_CTLOPS_NREGS: 722 if (rdip == (dev_info_t *)0) 723 return (DDI_FAILURE); 724 725 *(int *)result = 0; 726 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, rdip, 727 DDI_PROP_DONTPASS, "reg", (int **)&drv_regp, 728 ®len) != DDI_PROP_SUCCESS) { 729 return (DDI_FAILURE); 730 } 731 732 totreg = (reglen * sizeof (int)) / sizeof (pci_regspec_t); 733 if (ctlop == DDI_CTLOPS_NREGS) 734 *(int *)result = totreg; 735 else if (ctlop == DDI_CTLOPS_REGSIZE) { 736 rn = *(int *)arg; 737 if (rn >= totreg) { 738 ddi_prop_free(drv_regp); 739 return (DDI_FAILURE); 740 } 741 *(off_t *)result = drv_regp[rn].pci_size_low; 742 } 743 ddi_prop_free(drv_regp); 744 745 return (DDI_SUCCESS); 746 747 case DDI_CTLOPS_POWER: 748 { 749 power_req_t *reqp = (power_req_t *)arg; 750 /* 751 * We currently understand reporting of PCI_PM_IDLESPEED 752 * capability. Everything else is passed up. 753 */ 754 if ((reqp->request_type == PMR_REPORT_PMCAP) && 755 (reqp->req.report_pmcap_req.cap == PCI_PM_IDLESPEED)) 756 return (DDI_SUCCESS); 757 758 break; 759 } 760 761 case DDI_CTLOPS_PEEK: 762 case DDI_CTLOPS_POKE: 763 return (pci_common_peekpoke(dip, rdip, ctlop, arg, result)); 764 765 /* X86 systems support PME wakeup from suspended state */ 766 case DDI_CTLOPS_ATTACH: 767 if (!pcie_is_child(dip, rdip)) 768 return (DDI_SUCCESS); 769 770 asp = (struct attachspec *)arg; 771 if ((asp->when == DDI_POST) && (asp->result == DDI_SUCCESS)) { 772 pf_init(rdip, (void *)pci_p->pci_fm_ibc, asp->cmd); 773 (void) pcie_postattach_child(rdip); 774 } 775 776 /* only do this for immediate children */ 777 if (asp->cmd == DDI_RESUME && asp->when == DDI_PRE && 778 ddi_get_parent(rdip) == dip) 779 if (pci_pre_resume(rdip) != DDI_SUCCESS) { 780 /* Not good, better stop now. */ 781 cmn_err(CE_PANIC, 782 "Couldn't pre-resume device %p", 783 (void *) dip); 784 /* NOTREACHED */ 785 } 786 787 return (DDI_SUCCESS); 788 789 case DDI_CTLOPS_DETACH: 790 if (!pcie_is_child(dip, rdip)) 791 return (DDI_SUCCESS); 792 793 dsp = (struct detachspec *)arg; 794 795 if (dsp->when == DDI_PRE) 796 pf_fini(rdip, dsp->cmd); 797 798 /* only do this for immediate children */ 799 if (dsp->cmd == DDI_SUSPEND && dsp->when == DDI_POST && 800 ddi_get_parent(rdip) == dip) 801 if (pci_post_suspend(rdip) != DDI_SUCCESS) 802 return (DDI_FAILURE); 803 804 return (DDI_SUCCESS); 805 806 default: 807 break; 808 } 809 810 return (ddi_ctlops(dip, rdip, ctlop, arg, result)); 811 812 } 813 814 815 /* 816 * npe_intr_ops 817 */ 818 static int 819 npe_intr_ops(dev_info_t *pdip, dev_info_t *rdip, ddi_intr_op_t intr_op, 820 ddi_intr_handle_impl_t *hdlp, void *result) 821 { 822 return (pci_common_intr_ops(pdip, rdip, intr_op, hdlp, result)); 823 } 824 825 826 static int 827 npe_initchild(dev_info_t *child) 828 { 829 char name[80]; 830 pcie_bus_t *bus_p; 831 uint32_t regs; 832 ddi_acc_handle_t cfg_hdl; 833 834 /* 835 * Do not bind drivers to empty bridges. 836 * Fail above, if the bridge is found to be hotplug capable 837 */ 838 if (npe_disable_empty_bridges_workaround(child) == 1) 839 return (DDI_FAILURE); 840 841 if (pci_common_name_child(child, name, 80) != DDI_SUCCESS) 842 return (DDI_FAILURE); 843 844 ddi_set_name_addr(child, name); 845 846 /* 847 * Pseudo nodes indicate a prototype node with per-instance 848 * properties to be merged into the real h/w device node. 849 * The interpretation of the unit-address is DD[,F] 850 * where DD is the device id and F is the function. 851 */ 852 if (ndi_dev_is_persistent_node(child) == 0) { 853 extern int pci_allow_pseudo_children; 854 855 ddi_set_parent_data(child, NULL); 856 857 /* 858 * Try to merge the properties from this prototype 859 * node into real h/w nodes. 860 */ 861 if (ndi_merge_node(child, pci_common_name_child) == 862 DDI_SUCCESS) { 863 /* 864 * Merged ok - return failure to remove the node. 865 */ 866 ddi_set_name_addr(child, NULL); 867 return (DDI_FAILURE); 868 } 869 870 /* workaround for DDIVS to run under PCI Express */ 871 if (pci_allow_pseudo_children) { 872 /* 873 * If the "interrupts" property doesn't exist, 874 * this must be the ddivs no-intr case, and it returns 875 * DDI_SUCCESS instead of DDI_FAILURE. 876 */ 877 if (ddi_prop_get_int(DDI_DEV_T_ANY, child, 878 DDI_PROP_DONTPASS, "interrupts", -1) == -1) 879 return (DDI_SUCCESS); 880 /* 881 * Create the ddi_parent_private_data for a pseudo 882 * child. 883 */ 884 pci_common_set_parent_private_data(child); 885 return (DDI_SUCCESS); 886 } 887 888 /* 889 * The child was not merged into a h/w node, 890 * but there's not much we can do with it other 891 * than return failure to cause the node to be removed. 892 */ 893 cmn_err(CE_WARN, "!%s@%s: %s.conf properties not merged", 894 ddi_get_name(child), ddi_get_name_addr(child), 895 ddi_get_name(child)); 896 ddi_set_name_addr(child, NULL); 897 return (DDI_NOT_WELL_FORMED); 898 } 899 900 if (ddi_prop_get_int(DDI_DEV_T_ANY, child, DDI_PROP_DONTPASS, 901 "interrupts", -1) != -1) 902 pci_common_set_parent_private_data(child); 903 else 904 ddi_set_parent_data(child, NULL); 905 906 /* Disable certain errors on PCIe drivers for x86 platforms */ 907 regs = pcie_get_aer_uce_mask() | npe_aer_uce_mask; 908 pcie_set_aer_uce_mask(regs); 909 regs = pcie_get_aer_ce_mask() | npe_aer_ce_mask; 910 pcie_set_aer_ce_mask(regs); 911 regs = pcie_get_aer_suce_mask() | npe_aer_suce_mask; 912 pcie_set_aer_suce_mask(regs); 913 914 /* 915 * If URs are disabled, mask SERRs as well, otherwise the system will 916 * still be notified of URs 917 */ 918 if (npe_aer_uce_mask & PCIE_AER_UCE_UR) 919 pcie_set_serr_mask(1); 920 921 if (pci_config_setup(child, &cfg_hdl) == DDI_SUCCESS) { 922 npe_ck804_fix_aer_ptr(cfg_hdl); 923 npe_nvidia_error_mask(cfg_hdl); 924 npe_intel_error_mask(cfg_hdl); 925 pci_config_teardown(&cfg_hdl); 926 } 927 928 bus_p = pcie_init_bus(child); 929 if (bus_p) { 930 uint16_t device_id = (uint16_t)(bus_p->bus_dev_ven_id >> 16); 931 uint16_t vendor_id = (uint16_t)(bus_p->bus_dev_ven_id & 0xFFFF); 932 uint16_t rev_id = bus_p->bus_rev_id; 933 934 /* Disable AER for certain NVIDIA Chipsets */ 935 if ((vendor_id == NVIDIA_VENDOR_ID) && 936 (device_id == NVIDIA_CK804_DEVICE_ID) && 937 (rev_id < NVIDIA_CK804_AER_VALID_REVID)) 938 bus_p->bus_aer_off = 0; 939 940 (void) pcie_initchild(child); 941 } 942 943 return (DDI_SUCCESS); 944 } 945 946 947 static int 948 npe_removechild(dev_info_t *dip) 949 { 950 pcie_uninitchild(dip); 951 952 ddi_set_name_addr(dip, NULL); 953 954 /* 955 * Strip the node to properly convert it back to prototype form 956 */ 957 ddi_remove_minor_node(dip, NULL); 958 959 ddi_prop_remove_all(dip); 960 961 return (DDI_SUCCESS); 962 } 963 964 965 /* 966 * When retrofitting this module for pci_tools, functions such as open, close, 967 * and ioctl are now pulled into this module. Before this, the functions in 968 * the pcihp module were referenced directly. Now they are called or 969 * referenced through the pcihp cb_ops structure from functions in this module. 970 */ 971 static int 972 npe_open(dev_t *devp, int flags, int otyp, cred_t *credp) 973 { 974 return ((pcihp_get_cb_ops())->cb_open(devp, flags, otyp, credp)); 975 } 976 977 static int 978 npe_close(dev_t dev, int flags, int otyp, cred_t *credp) 979 { 980 return ((pcihp_get_cb_ops())->cb_close(dev, flags, otyp, credp)); 981 } 982 983 static int 984 npe_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, int *rvalp) 985 { 986 minor_t minor = getminor(dev); 987 int instance = PCIHP_AP_MINOR_NUM_TO_INSTANCE(minor); 988 pci_state_t *pci_p = ddi_get_soft_state(npe_statep, instance); 989 dev_info_t *dip; 990 991 if (pci_p == NULL) 992 return (ENXIO); 993 994 dip = pci_p->pci_dip; 995 996 return (pci_common_ioctl(dip, dev, cmd, arg, mode, credp, rvalp)); 997 } 998 999 static int 1000 npe_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, 1001 int flags, char *name, caddr_t valuep, int *lengthp) 1002 { 1003 return ((pcihp_get_cb_ops())->cb_prop_op(dev, dip, prop_op, flags, 1004 name, valuep, lengthp)); 1005 } 1006 1007 static int 1008 npe_info(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result) 1009 { 1010 return (pcihp_info(dip, cmd, arg, result)); 1011 } 1012 1013 /*ARGSUSED*/ 1014 static int 1015 npe_fm_init(dev_info_t *dip, dev_info_t *tdip, int cap, 1016 ddi_iblock_cookie_t *ibc) 1017 { 1018 pci_state_t *pcip = ddi_get_soft_state(npe_statep, 1019 ddi_get_instance(dip)); 1020 1021 ASSERT(ibc != NULL); 1022 *ibc = pcip->pci_fm_ibc; 1023 1024 return (pcip->pci_fmcap); 1025 } 1026 1027 /*ARGSUSED*/ 1028 static int 1029 npe_fm_callback(dev_info_t *dip, ddi_fm_error_t *derr, const void *no_used) 1030 { 1031 /* 1032 * On current x86 systems, npe's callback does not get called for failed 1033 * loads. If in the future this feature is used, the fault PA should be 1034 * logged in the derr->fme_bus_specific field. The appropriate PCIe 1035 * error handling code should be called and needs to be coordinated with 1036 * safe access handling. 1037 */ 1038 1039 return (DDI_FM_OK); 1040 } 1041