1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * PCI Express nexus driver interface 30 */ 31 32 #include <sys/types.h> 33 #include <sys/conf.h> /* nulldev */ 34 #include <sys/stat.h> /* devctl */ 35 #include <sys/kmem.h> 36 #include <sys/sunddi.h> 37 #include <sys/sunndi.h> 38 #include <sys/hotplug/pci/pcihp.h> 39 #include <sys/ddi_impldefs.h> 40 #include <sys/ddi_subrdefs.h> 41 #include <sys/spl.h> 42 #include <sys/epm.h> 43 #include <sys/iommutsb.h> 44 #include <sys/hotplug/pci/pcihp.h> 45 #include <sys/hotplug/pci/pciehpc.h> 46 #include "px_obj.h" 47 #include <sys/pci_tools.h> 48 #include "px_tools_ext.h" 49 #include "pcie_pwr.h" 50 51 /*LINTLIBRARY*/ 52 53 /* 54 * function prototypes for dev ops routines: 55 */ 56 static int px_attach(dev_info_t *dip, ddi_attach_cmd_t cmd); 57 static int px_detach(dev_info_t *dip, ddi_detach_cmd_t cmd); 58 static int px_info(dev_info_t *dip, ddi_info_cmd_t infocmd, 59 void *arg, void **result); 60 static int px_pwr_setup(dev_info_t *dip); 61 static void px_pwr_teardown(dev_info_t *dip); 62 63 /* 64 * function prototypes for hotplug routines: 65 */ 66 static uint_t px_init_hotplug(px_t *px_p); 67 static uint_t px_uninit_hotplug(dev_info_t *dip); 68 69 /* 70 * bus ops and dev ops structures: 71 */ 72 static struct bus_ops px_bus_ops = { 73 BUSO_REV, 74 px_map, 75 0, 76 0, 77 0, 78 i_ddi_map_fault, 79 px_dma_setup, 80 px_dma_allochdl, 81 px_dma_freehdl, 82 px_dma_bindhdl, 83 px_dma_unbindhdl, 84 px_lib_dma_sync, 85 px_dma_win, 86 px_dma_ctlops, 87 px_ctlops, 88 ddi_bus_prop_op, 89 ndi_busop_get_eventcookie, 90 ndi_busop_add_eventcall, 91 ndi_busop_remove_eventcall, 92 ndi_post_event, 93 NULL, 94 NULL, /* (*bus_config)(); */ 95 NULL, /* (*bus_unconfig)(); */ 96 px_fm_init_child, /* (*bus_fm_init)(); */ 97 NULL, /* (*bus_fm_fini)(); */ 98 px_bus_enter, /* (*bus_fm_access_enter)(); */ 99 px_bus_exit, /* (*bus_fm_access_fini)(); */ 100 pcie_bus_power, /* (*bus_power)(); */ 101 px_intr_ops /* (*bus_intr_op)(); */ 102 }; 103 104 extern struct cb_ops px_cb_ops; 105 106 static struct dev_ops px_ops = { 107 DEVO_REV, 108 0, 109 px_info, 110 nulldev, 111 0, 112 px_attach, 113 px_detach, 114 nodev, 115 &px_cb_ops, 116 &px_bus_ops, 117 nulldev 118 }; 119 120 /* 121 * module definitions: 122 */ 123 #include <sys/modctl.h> 124 extern struct mod_ops mod_driverops; 125 126 static struct modldrv modldrv = { 127 &mod_driverops, /* Type of module - driver */ 128 "PCI Express nexus driver %I%", /* Name of module. */ 129 &px_ops, /* driver ops */ 130 }; 131 132 static struct modlinkage modlinkage = { 133 MODREV_1, (void *)&modldrv, NULL 134 }; 135 136 /* driver soft state */ 137 void *px_state_p; 138 139 int 140 _init(void) 141 { 142 int e; 143 144 /* 145 * Initialize per-px bus soft state pointer. 146 */ 147 e = ddi_soft_state_init(&px_state_p, sizeof (px_t), 1); 148 if (e != DDI_SUCCESS) 149 return (e); 150 151 /* 152 * Install the module. 153 */ 154 e = mod_install(&modlinkage); 155 if (e != DDI_SUCCESS) 156 ddi_soft_state_fini(&px_state_p); 157 return (e); 158 } 159 160 int 161 _fini(void) 162 { 163 int e; 164 165 /* 166 * Remove the module. 167 */ 168 e = mod_remove(&modlinkage); 169 if (e != DDI_SUCCESS) 170 return (e); 171 172 /* Free px soft state */ 173 ddi_soft_state_fini(&px_state_p); 174 175 return (e); 176 } 177 178 int 179 _info(struct modinfo *modinfop) 180 { 181 return (mod_info(&modlinkage, modinfop)); 182 } 183 184 /* ARGSUSED */ 185 static int 186 px_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 187 { 188 int instance = getminor((dev_t)arg); 189 px_t *px_p = INST_TO_STATE(instance); 190 191 /* 192 * Allow hotplug to deal with ones it manages 193 * Hot Plug will be done later. 194 */ 195 if (px_p && (px_p->px_dev_caps & PX_HOTPLUG_CAPABLE)) 196 return (pcihp_info(dip, infocmd, arg, result)); 197 198 /* non-hotplug or not attached */ 199 switch (infocmd) { 200 case DDI_INFO_DEVT2INSTANCE: 201 *result = (void *)(intptr_t)instance; 202 return (DDI_SUCCESS); 203 204 case DDI_INFO_DEVT2DEVINFO: 205 if (px_p == NULL) 206 return (DDI_FAILURE); 207 *result = (void *)px_p->px_dip; 208 return (DDI_SUCCESS); 209 210 default: 211 return (DDI_FAILURE); 212 } 213 } 214 215 /* device driver entry points */ 216 /* 217 * attach entry point: 218 */ 219 /*ARGSUSED*/ 220 static int 221 px_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 222 { 223 px_t *px_p; /* per bus state pointer */ 224 int instance = DIP_TO_INST(dip); 225 int ret = DDI_SUCCESS; 226 devhandle_t dev_hdl = NULL; 227 228 switch (cmd) { 229 case DDI_ATTACH: 230 DBG(DBG_ATTACH, dip, "DDI_ATTACH\n"); 231 232 /* 233 * Allocate and get the per-px soft state structure. 234 */ 235 if (ddi_soft_state_zalloc(px_state_p, instance) 236 != DDI_SUCCESS) { 237 cmn_err(CE_WARN, "%s%d: can't allocate px state", 238 ddi_driver_name(dip), instance); 239 goto err_bad_px_softstate; 240 } 241 px_p = INST_TO_STATE(instance); 242 px_p->px_dip = dip; 243 mutex_init(&px_p->px_mutex, NULL, MUTEX_DRIVER, NULL); 244 px_p->px_soft_state = PX_SOFT_STATE_CLOSED; 245 px_p->px_open_count = 0; 246 247 (void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, 248 "device_type", "pciex"); 249 /* 250 * Get key properties of the pci bridge node and 251 * determine it's type (psycho, schizo, etc ...). 252 */ 253 if (px_get_props(px_p, dip) == DDI_FAILURE) 254 goto err_bad_px_prop; 255 256 if ((px_fm_attach(px_p)) != DDI_SUCCESS) 257 goto err_bad_fm; 258 259 if (px_lib_dev_init(dip, &dev_hdl) != DDI_SUCCESS) 260 goto err_bad_dev_init; 261 262 /* Initilize device handle */ 263 px_p->px_dev_hdl = dev_hdl; 264 265 /* 266 * Initialize interrupt block. Note that this 267 * initialize error handling for the PEC as well. 268 */ 269 if ((ret = px_ib_attach(px_p)) != DDI_SUCCESS) 270 goto err_bad_ib; 271 272 if (px_cb_attach(px_p) != DDI_SUCCESS) 273 goto err_bad_cb; 274 275 /* 276 * Start creating the modules. 277 * Note that attach() routines should 278 * register and enable their own interrupts. 279 */ 280 281 if ((px_mmu_attach(px_p)) != DDI_SUCCESS) 282 goto err_bad_mmu; 283 284 if ((px_msiq_attach(px_p)) != DDI_SUCCESS) 285 goto err_bad_msiq; 286 287 if ((px_msi_attach(px_p)) != DDI_SUCCESS) 288 goto err_bad_msi; 289 290 if ((px_pec_attach(px_p)) != DDI_SUCCESS) 291 goto err_bad_pec; 292 293 if ((px_dma_attach(px_p)) != DDI_SUCCESS) 294 goto err_bad_pec; /* nothing to uninitialize on DMA */ 295 296 /* 297 * All of the error handlers have been registered 298 * by now so it's time to activate the interrupt. 299 */ 300 if ((ret = px_err_add_intr(&px_p->px_fault)) != DDI_SUCCESS) 301 goto err_bad_pec_add_intr; 302 303 (void) px_init_hotplug(px_p); 304 305 /* 306 * Create the "devctl" node for hotplug and pcitool support. 307 * For non-hotplug bus, we still need ":devctl" to 308 * support DEVCTL_DEVICE_* and DEVCTL_BUS_* ioctls. 309 */ 310 if (ddi_create_minor_node(dip, "devctl", S_IFCHR, 311 PCIHP_AP_MINOR_NUM(instance, PCIHP_DEVCTL_MINOR), 312 DDI_NT_NEXUS, 0) != DDI_SUCCESS) { 313 goto err_bad_devctl_node; 314 } 315 316 if (pxtool_init(dip) != DDI_SUCCESS) 317 goto err_bad_pcitool_node; 318 319 /* 320 * power management setup. Even if it fails, attach will 321 * succeed as this is a optional feature. Since we are 322 * always at full power, this is not critical. 323 */ 324 if (pwr_common_setup(dip) != DDI_SUCCESS) { 325 DBG(DBG_PWR, dip, "pwr_common_setup failed\n"); 326 } else if (px_pwr_setup(dip) != DDI_SUCCESS) { 327 DBG(DBG_PWR, dip, "px_pwr_setup failed \n"); 328 pwr_common_teardown(dip); 329 } 330 331 /* 332 * add cpr callback 333 */ 334 px_cpr_add_callb(px_p); 335 336 ddi_report_dev(dip); 337 338 px_p->px_state = PX_ATTACHED; 339 DBG(DBG_ATTACH, dip, "attach success\n"); 340 break; 341 342 err_bad_pcitool_node: 343 ddi_remove_minor_node(dip, "devctl"); 344 err_bad_devctl_node: 345 px_err_rem_intr(&px_p->px_fault); 346 err_bad_pec_add_intr: 347 px_pec_detach(px_p); 348 err_bad_pec: 349 px_msi_detach(px_p); 350 err_bad_msi: 351 px_msiq_detach(px_p); 352 err_bad_msiq: 353 px_mmu_detach(px_p); 354 err_bad_mmu: 355 px_cb_detach(px_p); 356 err_bad_cb: 357 px_ib_detach(px_p); 358 err_bad_ib: 359 (void) px_lib_dev_fini(dip); 360 err_bad_dev_init: 361 px_fm_detach(px_p); 362 err_bad_fm: 363 px_free_props(px_p); 364 err_bad_px_prop: 365 mutex_destroy(&px_p->px_mutex); 366 ddi_soft_state_free(px_state_p, instance); 367 err_bad_px_softstate: 368 ret = DDI_FAILURE; 369 break; 370 371 case DDI_RESUME: 372 DBG(DBG_ATTACH, dip, "DDI_RESUME\n"); 373 374 px_p = INST_TO_STATE(instance); 375 376 mutex_enter(&px_p->px_mutex); 377 378 /* suspend might have not succeeded */ 379 if (px_p->px_state != PX_SUSPENDED) { 380 DBG(DBG_ATTACH, px_p->px_dip, 381 "instance NOT suspended\n"); 382 ret = DDI_FAILURE; 383 break; 384 } 385 386 px_lib_resume(dip); 387 (void) pcie_pwr_resume(dip); 388 px_p->px_state = PX_ATTACHED; 389 390 mutex_exit(&px_p->px_mutex); 391 392 break; 393 default: 394 DBG(DBG_ATTACH, dip, "unsupported attach op\n"); 395 ret = DDI_FAILURE; 396 break; 397 } 398 399 return (ret); 400 } 401 402 /* 403 * detach entry point: 404 */ 405 /*ARGSUSED*/ 406 static int 407 px_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 408 { 409 int instance = ddi_get_instance(dip); 410 px_t *px_p = INST_TO_STATE(instance); 411 int ret; 412 413 /* 414 * Make sure we are currently attached 415 */ 416 if (px_p->px_state != PX_ATTACHED) { 417 DBG(DBG_DETACH, dip, "failed - instance not attached\n"); 418 return (DDI_FAILURE); 419 } 420 421 mutex_enter(&px_p->px_mutex); 422 423 switch (cmd) { 424 case DDI_DETACH: 425 DBG(DBG_DETACH, dip, "DDI_DETACH\n"); 426 427 /* 428 * remove cpr callback 429 */ 430 px_cpr_rem_callb(px_p); 431 432 if (px_p->px_dev_caps & PX_HOTPLUG_CAPABLE) 433 if (px_uninit_hotplug(dip) != DDI_SUCCESS) { 434 mutex_exit(&px_p->px_mutex); 435 return (DDI_FAILURE); 436 } 437 438 /* 439 * things which used to be done in obj_destroy 440 * are now in-lined here. 441 */ 442 443 px_p->px_state = PX_DETACHED; 444 445 pxtool_uninit(dip); 446 447 ddi_remove_minor_node(dip, "devctl"); 448 px_err_rem_intr(&px_p->px_fault); 449 px_pec_detach(px_p); 450 px_pwr_teardown(dip); 451 pwr_common_teardown(dip); 452 px_msi_detach(px_p); 453 px_msiq_detach(px_p); 454 px_mmu_detach(px_p); 455 px_cb_detach(px_p); 456 px_ib_detach(px_p); 457 (void) px_lib_dev_fini(dip); 458 px_fm_detach(px_p); 459 460 /* 461 * Free the px soft state structure and the rest of the 462 * resources it's using. 463 */ 464 px_free_props(px_p); 465 mutex_exit(&px_p->px_mutex); 466 mutex_destroy(&px_p->px_mutex); 467 468 /* Free the interrupt-priorities prop if we created it. */ { 469 int len; 470 471 if (ddi_getproplen(DDI_DEV_T_ANY, dip, 472 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 473 "interrupt-priorities", &len) == DDI_PROP_SUCCESS) 474 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, 475 "interrupt-priorities"); 476 } 477 478 px_p->px_dev_hdl = NULL; 479 ddi_soft_state_free(px_state_p, instance); 480 481 return (DDI_SUCCESS); 482 483 case DDI_SUSPEND: 484 if (pcie_pwr_suspend(dip) != DDI_SUCCESS) { 485 mutex_exit(&px_p->px_mutex); 486 return (DDI_FAILURE); 487 } 488 if ((ret = px_lib_suspend(dip)) == DDI_SUCCESS) 489 px_p->px_state = PX_SUSPENDED; 490 mutex_exit(&px_p->px_mutex); 491 492 return (ret); 493 494 default: 495 DBG(DBG_DETACH, dip, "unsupported detach op\n"); 496 mutex_exit(&px_p->px_mutex); 497 return (DDI_FAILURE); 498 } 499 } 500 501 /* 502 * power management related initialization specific to px 503 * called by px_attach() 504 */ 505 static int 506 px_pwr_setup(dev_info_t *dip) 507 { 508 pcie_pwr_t *pwr_p; 509 int instance = ddi_get_instance(dip); 510 px_t *px_p = INST_TO_STATE(instance); 511 ddi_intr_handle_impl_t hdl; 512 513 ASSERT(PCIE_PMINFO(dip)); 514 pwr_p = PCIE_NEXUS_PMINFO(dip); 515 ASSERT(pwr_p); 516 517 /* 518 * indicate support LDI (Layered Driver Interface) 519 * Create the property, if it is not already there 520 */ 521 if (!ddi_prop_exists(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS, 522 DDI_KERNEL_IOCTL)) { 523 if (ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, 524 DDI_KERNEL_IOCTL, NULL, 0) != DDI_PROP_SUCCESS) { 525 DBG(DBG_PWR, dip, "can't create kernel ioctl prop\n"); 526 return (DDI_FAILURE); 527 } 528 } 529 /* No support for device PM. We are always at full power */ 530 pwr_p->pwr_func_lvl = PM_LEVEL_D0; 531 532 mutex_init(&px_p->px_l23ready_lock, NULL, MUTEX_DRIVER, 533 DDI_INTR_PRI(px_pwr_pil)); 534 cv_init(&px_p->px_l23ready_cv, NULL, CV_DRIVER, NULL); 535 536 537 538 /* Initilize handle */ 539 hdl.ih_cb_arg1 = px_p; 540 hdl.ih_cb_arg2 = NULL; 541 hdl.ih_ver = DDI_INTR_VERSION; 542 hdl.ih_state = DDI_IHDL_STATE_ALLOC; 543 hdl.ih_dip = dip; 544 hdl.ih_inum = 0; 545 hdl.ih_pri = px_pwr_pil; 546 547 /* Add PME_TO_ACK message handler */ 548 hdl.ih_cb_func = (ddi_intr_handler_t *)px_pmeq_intr; 549 if (px_add_msiq_intr(dip, dip, &hdl, MSG_REC, 550 (msgcode_t)PCIE_PME_ACK_MSG, &px_p->px_pm_msiq_id) != DDI_SUCCESS) { 551 DBG(DBG_PWR, dip, "px_pwr_setup: couldn't add " 552 " PME_TO_ACK intr\n"); 553 goto pwr_setup_err1; 554 } 555 px_lib_msg_setmsiq(dip, PCIE_PME_ACK_MSG, px_p->px_pm_msiq_id); 556 px_lib_msg_setvalid(dip, PCIE_PME_ACK_MSG, PCIE_MSG_VALID); 557 558 if (px_ib_update_intr_state(px_p, px_p->px_dip, hdl.ih_inum, 559 px_msiqid_to_devino(px_p, px_p->px_pm_msiq_id), 560 PX_INTR_STATE_ENABLE, MSG_REC, PCIE_PME_ACK_MSG) != DDI_SUCCESS) { 561 DBG(DBG_PWR, dip, "px_pwr_setup: PME_TO_ACK update interrupt" 562 " state failed\n"); 563 goto px_pwrsetup_err_state; 564 } 565 566 return (DDI_SUCCESS); 567 568 px_pwrsetup_err_state: 569 px_lib_msg_setvalid(dip, PCIE_PME_ACK_MSG, PCIE_MSG_INVALID); 570 (void) px_rem_msiq_intr(dip, dip, &hdl, MSG_REC, PCIE_PME_ACK_MSG, 571 px_p->px_pm_msiq_id); 572 pwr_setup_err1: 573 mutex_destroy(&px_p->px_l23ready_lock); 574 cv_destroy(&px_p->px_l23ready_cv); 575 576 return (DDI_FAILURE); 577 } 578 579 /* 580 * undo whatever is done in px_pwr_setup. called by px_detach() 581 */ 582 static void 583 px_pwr_teardown(dev_info_t *dip) 584 { 585 int instance = ddi_get_instance(dip); 586 px_t *px_p = INST_TO_STATE(instance); 587 ddi_intr_handle_impl_t hdl; 588 589 if (!PCIE_PMINFO(dip) || !PCIE_NEXUS_PMINFO(dip)) 590 return; 591 592 /* Initilize handle */ 593 hdl.ih_ver = DDI_INTR_VERSION; 594 hdl.ih_state = DDI_IHDL_STATE_ALLOC; 595 hdl.ih_dip = dip; 596 hdl.ih_inum = 0; 597 598 px_lib_msg_setvalid(dip, PCIE_PME_ACK_MSG, PCIE_MSG_INVALID); 599 (void) px_rem_msiq_intr(dip, dip, &hdl, MSG_REC, PCIE_PME_ACK_MSG, 600 px_p->px_pm_msiq_id); 601 602 (void) px_ib_update_intr_state(px_p, px_p->px_dip, hdl.ih_inum, 603 px_msiqid_to_devino(px_p, px_p->px_pm_msiq_id), 604 PX_INTR_STATE_DISABLE, MSG_REC, PCIE_PME_ACK_MSG); 605 606 px_p->px_pm_msiq_id = -1; 607 608 cv_destroy(&px_p->px_l23ready_cv); 609 mutex_destroy(&px_p->px_l23ready_lock); 610 } 611 612 /* bus driver entry points */ 613 614 /* 615 * bus map entry point: 616 * 617 * if map request is for an rnumber 618 * get the corresponding regspec from device node 619 * build a new regspec in our parent's format 620 * build a new map_req with the new regspec 621 * call up the tree to complete the mapping 622 */ 623 int 624 px_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp, 625 off_t off, off_t len, caddr_t *addrp) 626 { 627 px_t *px_p = DIP_TO_STATE(dip); 628 struct regspec p_regspec; 629 ddi_map_req_t p_mapreq; 630 int reglen, rval, r_no; 631 pci_regspec_t reloc_reg, *rp = &reloc_reg; 632 633 DBG(DBG_MAP, dip, "rdip=%s%d:", 634 ddi_driver_name(rdip), ddi_get_instance(rdip)); 635 636 if (mp->map_flags & DDI_MF_USER_MAPPING) 637 return (DDI_ME_UNIMPLEMENTED); 638 639 switch (mp->map_type) { 640 case DDI_MT_REGSPEC: 641 reloc_reg = *(pci_regspec_t *)mp->map_obj.rp; /* dup whole */ 642 break; 643 644 case DDI_MT_RNUMBER: 645 r_no = mp->map_obj.rnumber; 646 DBG(DBG_MAP | DBG_CONT, dip, " r#=%x", r_no); 647 648 if (ddi_getlongprop(DDI_DEV_T_ANY, rdip, DDI_PROP_DONTPASS, 649 "reg", (caddr_t)&rp, ®len) != DDI_SUCCESS) 650 return (DDI_ME_RNUMBER_RANGE); 651 652 if (r_no < 0 || r_no >= reglen / sizeof (pci_regspec_t)) { 653 kmem_free(rp, reglen); 654 return (DDI_ME_RNUMBER_RANGE); 655 } 656 rp += r_no; 657 break; 658 659 default: 660 return (DDI_ME_INVAL); 661 } 662 DBG(DBG_MAP | DBG_CONT, dip, "\n"); 663 664 if ((rp->pci_phys_hi & PCI_REG_ADDR_M) == PCI_ADDR_CONFIG) { 665 /* 666 * There may be a need to differentiate between PCI 667 * and PCI-Ex devices so the following range check is 668 * done correctly, depending on the implementation of 669 * px_pci bridge nexus driver. 670 */ 671 if ((off >= PCIE_CONF_HDR_SIZE) || 672 (len > PCIE_CONF_HDR_SIZE) || 673 (off + len > PCIE_CONF_HDR_SIZE)) 674 return (DDI_ME_INVAL); 675 /* 676 * the following function returning a DDI_FAILURE assumes 677 * that there are no virtual config space access services 678 * defined in this layer. Otherwise it is availed right 679 * here and we return. 680 */ 681 rval = px_lib_map_vconfig(dip, mp, off, rp, addrp); 682 if (rval == DDI_SUCCESS) 683 goto done; 684 } 685 686 /* 687 * No virtual config space services or we are mapping 688 * a region of memory mapped config/IO/memory space, so proceed 689 * to the parent. 690 */ 691 692 /* relocate within 64-bit pci space through "assigned-addresses" */ 693 if (rval = px_reloc_reg(dip, rdip, px_p, rp)) 694 goto done; 695 696 if (len) /* adjust regspec according to mapping request */ 697 rp->pci_size_low = len; /* MIN ? */ 698 rp->pci_phys_low += off; 699 700 /* translate relocated pci regspec into parent space through "ranges" */ 701 if (rval = px_xlate_reg(px_p, rp, &p_regspec)) 702 goto done; 703 704 p_mapreq = *mp; /* dup the whole structure */ 705 p_mapreq.map_type = DDI_MT_REGSPEC; 706 p_mapreq.map_obj.rp = &p_regspec; 707 px_lib_map_attr_check(&p_mapreq); 708 rval = ddi_map(dip, &p_mapreq, 0, 0, addrp); 709 710 if (rval == DDI_SUCCESS) { 711 /* 712 * Set-up access functions for FM access error capable drivers. 713 */ 714 if (DDI_FM_ACC_ERR_CAP(ddi_fm_capable(rdip)) && 715 mp->map_handlep->ah_acc.devacc_attr_access != 716 DDI_DEFAULT_ACC) 717 px_fm_acc_setup(mp, rdip); 718 } 719 720 done: 721 if (mp->map_type == DDI_MT_RNUMBER) 722 kmem_free(rp - r_no, reglen); 723 724 return (rval); 725 } 726 727 /* 728 * bus dma map entry point 729 * return value: 730 * DDI_DMA_PARTIAL_MAP 1 731 * DDI_DMA_MAPOK 0 732 * DDI_DMA_MAPPED 0 733 * DDI_DMA_NORESOURCES -1 734 * DDI_DMA_NOMAPPING -2 735 * DDI_DMA_TOOBIG -3 736 */ 737 int 738 px_dma_setup(dev_info_t *dip, dev_info_t *rdip, ddi_dma_req_t *dmareq, 739 ddi_dma_handle_t *handlep) 740 { 741 px_t *px_p = DIP_TO_STATE(dip); 742 px_mmu_t *mmu_p = px_p->px_mmu_p; 743 ddi_dma_impl_t *mp; 744 int ret; 745 746 DBG(DBG_DMA_MAP, dip, "mapping - rdip=%s%d type=%s\n", 747 ddi_driver_name(rdip), ddi_get_instance(rdip), 748 handlep ? "alloc" : "advisory"); 749 750 if (!(mp = px_dma_lmts2hdl(dip, rdip, mmu_p, dmareq))) 751 return (DDI_DMA_NORESOURCES); 752 if (mp == (ddi_dma_impl_t *)DDI_DMA_NOMAPPING) 753 return (DDI_DMA_NOMAPPING); 754 if (ret = px_dma_type(px_p, dmareq, mp)) 755 goto freehandle; 756 if (ret = px_dma_pfn(px_p, dmareq, mp)) 757 goto freehandle; 758 759 switch (PX_DMA_TYPE(mp)) { 760 case PX_DMAI_FLAGS_DVMA: /* LINTED E_EQUALITY_NOT_ASSIGNMENT */ 761 if ((ret = px_dvma_win(px_p, dmareq, mp)) || !handlep) 762 goto freehandle; 763 if (!PX_DMA_CANCACHE(mp)) { /* try fast track */ 764 if (PX_DMA_CANFAST(mp)) { 765 if (!px_dvma_map_fast(mmu_p, mp)) 766 break; 767 /* LINTED E_NOP_ELSE_STMT */ 768 } else { 769 PX_DVMA_FASTTRAK_PROF(mp); 770 } 771 } 772 if (ret = px_dvma_map(mp, dmareq, mmu_p)) 773 goto freehandle; 774 break; 775 case PX_DMAI_FLAGS_PTP: /* LINTED E_EQUALITY_NOT_ASSIGNMENT */ 776 if ((ret = px_dma_physwin(px_p, dmareq, mp)) || !handlep) 777 goto freehandle; 778 break; 779 case PX_DMAI_FLAGS_BYPASS: 780 default: 781 cmn_err(CE_PANIC, "%s%d: px_dma_setup: bad dma type 0x%x", 782 ddi_driver_name(rdip), ddi_get_instance(rdip), 783 PX_DMA_TYPE(mp)); 784 /*NOTREACHED*/ 785 } 786 *handlep = (ddi_dma_handle_t)mp; 787 mp->dmai_flags |= PX_DMAI_FLAGS_INUSE; 788 px_dump_dma_handle(DBG_DMA_MAP, dip, mp); 789 790 return ((mp->dmai_nwin == 1) ? DDI_DMA_MAPPED : DDI_DMA_PARTIAL_MAP); 791 freehandle: 792 if (ret == DDI_DMA_NORESOURCES) 793 px_dma_freemp(mp); /* don't run_callback() */ 794 else 795 (void) px_dma_freehdl(dip, rdip, (ddi_dma_handle_t)mp); 796 return (ret); 797 } 798 799 800 /* 801 * bus dma alloc handle entry point: 802 */ 803 int 804 px_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attrp, 805 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep) 806 { 807 px_t *px_p = DIP_TO_STATE(dip); 808 ddi_dma_impl_t *mp; 809 int rval; 810 811 DBG(DBG_DMA_ALLOCH, dip, "rdip=%s%d\n", 812 ddi_driver_name(rdip), ddi_get_instance(rdip)); 813 814 if (attrp->dma_attr_version != DMA_ATTR_V0) 815 return (DDI_DMA_BADATTR); 816 817 if (!(mp = px_dma_allocmp(dip, rdip, waitfp, arg))) 818 return (DDI_DMA_NORESOURCES); 819 820 /* 821 * Save requestor's information 822 */ 823 mp->dmai_attr = *attrp; /* whole object - augmented later */ 824 *PX_DEV_ATTR(mp) = *attrp; /* whole object - device orig attr */ 825 DBG(DBG_DMA_ALLOCH, dip, "mp=%p\n", mp); 826 827 /* check and convert dma attributes to handle parameters */ 828 if (rval = px_dma_attr2hdl(px_p, mp)) { 829 px_dma_freehdl(dip, rdip, (ddi_dma_handle_t)mp); 830 *handlep = NULL; 831 return (rval); 832 } 833 *handlep = (ddi_dma_handle_t)mp; 834 return (DDI_SUCCESS); 835 } 836 837 838 /* 839 * bus dma free handle entry point: 840 */ 841 /*ARGSUSED*/ 842 int 843 px_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle) 844 { 845 DBG(DBG_DMA_FREEH, dip, "rdip=%s%d mp=%p\n", 846 ddi_driver_name(rdip), ddi_get_instance(rdip), handle); 847 px_dma_freemp((ddi_dma_impl_t *)handle); 848 849 if (px_kmem_clid) { 850 DBG(DBG_DMA_FREEH, dip, "run handle callback\n"); 851 ddi_run_callback(&px_kmem_clid); 852 } 853 return (DDI_SUCCESS); 854 } 855 856 857 /* 858 * bus dma bind handle entry point: 859 */ 860 int 861 px_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip, 862 ddi_dma_handle_t handle, ddi_dma_req_t *dmareq, 863 ddi_dma_cookie_t *cookiep, uint_t *ccountp) 864 { 865 px_t *px_p = DIP_TO_STATE(dip); 866 px_mmu_t *mmu_p = px_p->px_mmu_p; 867 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 868 int ret; 869 870 DBG(DBG_DMA_BINDH, dip, "rdip=%s%d mp=%p dmareq=%p\n", 871 ddi_driver_name(rdip), ddi_get_instance(rdip), mp, dmareq); 872 873 if (mp->dmai_flags & PX_DMAI_FLAGS_INUSE) 874 return (DDI_DMA_INUSE); 875 876 ASSERT((mp->dmai_flags & ~PX_DMAI_FLAGS_PRESERVE) == 0); 877 mp->dmai_flags |= PX_DMAI_FLAGS_INUSE; 878 879 if (ret = px_dma_type(px_p, dmareq, mp)) 880 goto err; 881 if (ret = px_dma_pfn(px_p, dmareq, mp)) 882 goto err; 883 884 switch (PX_DMA_TYPE(mp)) { 885 case PX_DMAI_FLAGS_DVMA: 886 if (ret = px_dvma_win(px_p, dmareq, mp)) 887 goto map_err; 888 if (!PX_DMA_CANCACHE(mp)) { /* try fast track */ 889 if (PX_DMA_CANFAST(mp)) { 890 if (!px_dvma_map_fast(mmu_p, mp)) 891 goto mapped; /*LINTED E_NOP_ELSE_STMT*/ 892 } else { 893 PX_DVMA_FASTTRAK_PROF(mp); 894 } 895 } 896 if (ret = px_dvma_map(mp, dmareq, mmu_p)) 897 goto map_err; 898 mapped: 899 *ccountp = 1; 900 MAKE_DMA_COOKIE(cookiep, mp->dmai_mapping, mp->dmai_size); 901 break; 902 case PX_DMAI_FLAGS_BYPASS: 903 case PX_DMAI_FLAGS_PTP: 904 if (ret = px_dma_physwin(px_p, dmareq, mp)) 905 goto map_err; 906 *ccountp = PX_WINLST(mp)->win_ncookies; 907 *cookiep = 908 *(ddi_dma_cookie_t *)(PX_WINLST(mp) + 1); /* wholeobj */ 909 break; 910 default: 911 cmn_err(CE_PANIC, "%s%d: px_dma_bindhdl(%p): bad dma type", 912 ddi_driver_name(rdip), ddi_get_instance(rdip), mp); 913 /*NOTREACHED*/ 914 } 915 DBG(DBG_DMA_BINDH, dip, "cookie %" PRIx64 "+%x\n", 916 cookiep->dmac_address, cookiep->dmac_size); 917 px_dump_dma_handle(DBG_DMA_MAP, dip, mp); 918 919 /* insert dma handle into FMA cache */ 920 if (mp->dmai_attr.dma_attr_flags & DDI_DMA_FLAGERR) 921 (void) ndi_fmc_insert(rdip, DMA_HANDLE, mp, NULL); 922 923 return (mp->dmai_nwin == 1 ? DDI_DMA_MAPPED : DDI_DMA_PARTIAL_MAP); 924 map_err: 925 px_dma_freepfn(mp); 926 err: 927 mp->dmai_flags &= PX_DMAI_FLAGS_PRESERVE; 928 return (ret); 929 } 930 931 932 /* 933 * bus dma unbind handle entry point: 934 */ 935 /*ARGSUSED*/ 936 int 937 px_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle) 938 { 939 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 940 px_t *px_p = DIP_TO_STATE(dip); 941 px_mmu_t *mmu_p = px_p->px_mmu_p; 942 943 DBG(DBG_DMA_UNBINDH, dip, "rdip=%s%d, mp=%p\n", 944 ddi_driver_name(rdip), ddi_get_instance(rdip), handle); 945 if ((mp->dmai_flags & PX_DMAI_FLAGS_INUSE) == 0) { 946 DBG(DBG_DMA_UNBINDH, dip, "handle not inuse\n"); 947 return (DDI_FAILURE); 948 } 949 950 /* remove dma handle from FMA cache */ 951 if (mp->dmai_attr.dma_attr_flags & DDI_DMA_FLAGERR) { 952 if (DEVI(rdip)->devi_fmhdl != NULL && 953 DDI_FM_DMA_ERR_CAP(DEVI(rdip)->devi_fmhdl->fh_cap)) { 954 (void) ndi_fmc_remove(rdip, DMA_HANDLE, mp); 955 } 956 } 957 958 /* 959 * Here if the handle is using the iommu. Unload all the iommu 960 * translations. 961 */ 962 switch (PX_DMA_TYPE(mp)) { 963 case PX_DMAI_FLAGS_DVMA: 964 px_mmu_unmap_window(mmu_p, mp); 965 px_dvma_unmap(mmu_p, mp); 966 px_dma_freepfn(mp); 967 break; 968 case PX_DMAI_FLAGS_BYPASS: 969 case PX_DMAI_FLAGS_PTP: 970 px_dma_freewin(mp); 971 break; 972 default: 973 cmn_err(CE_PANIC, "%s%d: px_dma_unbindhdl:bad dma type %p", 974 ddi_driver_name(rdip), ddi_get_instance(rdip), mp); 975 /*NOTREACHED*/ 976 } 977 if (mmu_p->mmu_dvma_clid != 0) { 978 DBG(DBG_DMA_UNBINDH, dip, "run dvma callback\n"); 979 ddi_run_callback(&mmu_p->mmu_dvma_clid); 980 } 981 if (px_kmem_clid) { 982 DBG(DBG_DMA_UNBINDH, dip, "run handle callback\n"); 983 ddi_run_callback(&px_kmem_clid); 984 } 985 mp->dmai_flags &= PX_DMAI_FLAGS_PRESERVE; 986 987 return (DDI_SUCCESS); 988 } 989 990 /* 991 * bus dma win entry point: 992 */ 993 int 994 px_dma_win(dev_info_t *dip, dev_info_t *rdip, 995 ddi_dma_handle_t handle, uint_t win, off_t *offp, 996 size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp) 997 { 998 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 999 int ret; 1000 1001 DBG(DBG_DMA_WIN, dip, "rdip=%s%d\n", 1002 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1003 1004 px_dump_dma_handle(DBG_DMA_WIN, dip, mp); 1005 if (win >= mp->dmai_nwin) { 1006 DBG(DBG_DMA_WIN, dip, "%x out of range\n", win); 1007 return (DDI_FAILURE); 1008 } 1009 1010 switch (PX_DMA_TYPE(mp)) { 1011 case PX_DMAI_FLAGS_DVMA: 1012 if (win != PX_DMA_CURWIN(mp)) { 1013 px_t *px_p = DIP_TO_STATE(dip); 1014 px_mmu_t *mmu_p = px_p->px_mmu_p; 1015 px_mmu_unmap_window(mmu_p, mp); 1016 1017 /* map_window sets dmai_mapping/size/offset */ 1018 px_mmu_map_window(mmu_p, mp, win); 1019 if ((ret = px_mmu_map_window(mmu_p, 1020 mp, win)) != DDI_SUCCESS) 1021 return (ret); 1022 } 1023 if (cookiep) 1024 MAKE_DMA_COOKIE(cookiep, mp->dmai_mapping, 1025 mp->dmai_size); 1026 if (ccountp) 1027 *ccountp = 1; 1028 break; 1029 case PX_DMAI_FLAGS_PTP: 1030 case PX_DMAI_FLAGS_BYPASS: { 1031 int i; 1032 ddi_dma_cookie_t *ck_p; 1033 px_dma_win_t *win_p = mp->dmai_winlst; 1034 1035 for (i = 0; i < win; win_p = win_p->win_next, i++); 1036 ck_p = (ddi_dma_cookie_t *)(win_p + 1); 1037 *cookiep = *ck_p; 1038 mp->dmai_offset = win_p->win_offset; 1039 mp->dmai_size = win_p->win_size; 1040 mp->dmai_mapping = ck_p->dmac_laddress; 1041 mp->dmai_cookie = ck_p + 1; 1042 win_p->win_curseg = 0; 1043 if (ccountp) 1044 *ccountp = win_p->win_ncookies; 1045 } 1046 break; 1047 default: 1048 cmn_err(CE_WARN, "%s%d: px_dma_win:bad dma type 0x%x", 1049 ddi_driver_name(rdip), ddi_get_instance(rdip), 1050 PX_DMA_TYPE(mp)); 1051 return (DDI_FAILURE); 1052 } 1053 if (cookiep) 1054 DBG(DBG_DMA_WIN, dip, 1055 "cookie - dmac_address=%x dmac_size=%x\n", 1056 cookiep->dmac_address, cookiep->dmac_size); 1057 if (offp) 1058 *offp = (off_t)mp->dmai_offset; 1059 if (lenp) 1060 *lenp = mp->dmai_size; 1061 return (DDI_SUCCESS); 1062 } 1063 1064 #ifdef DEBUG 1065 static char *px_dmactl_str[] = { 1066 "DDI_DMA_FREE", 1067 "DDI_DMA_SYNC", 1068 "DDI_DMA_HTOC", 1069 "DDI_DMA_KVADDR", 1070 "DDI_DMA_MOVWIN", 1071 "DDI_DMA_REPWIN", 1072 "DDI_DMA_GETERR", 1073 "DDI_DMA_COFF", 1074 "DDI_DMA_NEXTWIN", 1075 "DDI_DMA_NEXTSEG", 1076 "DDI_DMA_SEGTOC", 1077 "DDI_DMA_RESERVE", 1078 "DDI_DMA_RELEASE", 1079 "DDI_DMA_RESETH", 1080 "DDI_DMA_CKSYNC", 1081 "DDI_DMA_IOPB_ALLOC", 1082 "DDI_DMA_IOPB_FREE", 1083 "DDI_DMA_SMEM_ALLOC", 1084 "DDI_DMA_SMEM_FREE", 1085 "DDI_DMA_SET_SBUS64" 1086 }; 1087 #endif /* DEBUG */ 1088 1089 /* 1090 * bus dma control entry point: 1091 */ 1092 /*ARGSUSED*/ 1093 int 1094 px_dma_ctlops(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle, 1095 enum ddi_dma_ctlops cmd, off_t *offp, size_t *lenp, caddr_t *objp, 1096 uint_t cache_flags) 1097 { 1098 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 1099 1100 #ifdef DEBUG 1101 DBG(DBG_DMA_CTL, dip, "%s: rdip=%s%d\n", px_dmactl_str[cmd], 1102 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1103 #endif /* DEBUG */ 1104 1105 switch (cmd) { 1106 case DDI_DMA_FREE: 1107 (void) px_dma_unbindhdl(dip, rdip, handle); 1108 (void) px_dma_freehdl(dip, rdip, handle); 1109 return (DDI_SUCCESS); 1110 case DDI_DMA_RESERVE: { 1111 px_t *px_p = DIP_TO_STATE(dip); 1112 return (px_fdvma_reserve(dip, rdip, px_p, 1113 (ddi_dma_req_t *)offp, (ddi_dma_handle_t *)objp)); 1114 } 1115 case DDI_DMA_RELEASE: { 1116 px_t *px_p = DIP_TO_STATE(dip); 1117 return (px_fdvma_release(dip, px_p, mp)); 1118 } 1119 default: 1120 break; 1121 } 1122 1123 switch (PX_DMA_TYPE(mp)) { 1124 case PX_DMAI_FLAGS_DVMA: 1125 return (px_dvma_ctl(dip, rdip, mp, cmd, offp, lenp, objp, 1126 cache_flags)); 1127 case PX_DMAI_FLAGS_PTP: 1128 case PX_DMAI_FLAGS_BYPASS: 1129 return (px_dma_ctl(dip, rdip, mp, cmd, offp, lenp, objp, 1130 cache_flags)); 1131 default: 1132 cmn_err(CE_PANIC, "%s%d: px_dma_ctlops(%x):bad dma type %x", 1133 ddi_driver_name(rdip), ddi_get_instance(rdip), cmd, 1134 mp->dmai_flags); 1135 /*NOTREACHED*/ 1136 } 1137 return (0); 1138 } 1139 1140 /* 1141 * control ops entry point: 1142 * 1143 * Requests handled completely: 1144 * DDI_CTLOPS_INITCHILD see init_child() for details 1145 * DDI_CTLOPS_UNINITCHILD 1146 * DDI_CTLOPS_REPORTDEV see report_dev() for details 1147 * DDI_CTLOPS_IOMIN cache line size if streaming otherwise 1 1148 * DDI_CTLOPS_REGSIZE 1149 * DDI_CTLOPS_NREGS 1150 * DDI_CTLOPS_DVMAPAGESIZE 1151 * DDI_CTLOPS_POKE 1152 * DDI_CTLOPS_PEEK 1153 * 1154 * All others passed to parent. 1155 */ 1156 int 1157 px_ctlops(dev_info_t *dip, dev_info_t *rdip, 1158 ddi_ctl_enum_t op, void *arg, void *result) 1159 { 1160 px_t *px_p = DIP_TO_STATE(dip); 1161 struct detachspec *ds; 1162 struct attachspec *as; 1163 1164 switch (op) { 1165 case DDI_CTLOPS_INITCHILD: 1166 return (px_init_child(px_p, (dev_info_t *)arg)); 1167 1168 case DDI_CTLOPS_UNINITCHILD: 1169 return (px_uninit_child(px_p, (dev_info_t *)arg)); 1170 1171 case DDI_CTLOPS_ATTACH: 1172 as = (struct attachspec *)arg; 1173 switch (as->when) { 1174 case DDI_PRE: 1175 if (as->cmd == DDI_ATTACH) { 1176 DBG(DBG_PWR, dip, "PRE_ATTACH for %s@%d\n", 1177 ddi_driver_name(rdip), 1178 ddi_get_instance(rdip)); 1179 return (pcie_pm_hold(dip)); 1180 } 1181 if (as->cmd == DDI_RESUME) { 1182 ddi_acc_handle_t config_handle; 1183 DBG(DBG_PWR, dip, "PRE_RESUME for %s@%d\n", 1184 ddi_driver_name(rdip), 1185 ddi_get_instance(rdip)); 1186 1187 if (pci_config_setup(rdip, &config_handle) == 1188 DDI_SUCCESS) { 1189 pcie_clear_errors(rdip, config_handle); 1190 pci_config_teardown(&config_handle); 1191 } 1192 } 1193 return (DDI_SUCCESS); 1194 1195 case DDI_POST: 1196 DBG(DBG_PWR, dip, "POST_ATTACH for %s@%d\n", 1197 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1198 if (as->cmd == DDI_ATTACH && as->result != DDI_SUCCESS) 1199 pcie_pm_release(dip); 1200 return (DDI_SUCCESS); 1201 default: 1202 break; 1203 } 1204 break; 1205 1206 case DDI_CTLOPS_DETACH: 1207 ds = (struct detachspec *)arg; 1208 switch (ds->when) { 1209 case DDI_POST: 1210 if (ds->cmd == DDI_DETACH && 1211 ds->result == DDI_SUCCESS) { 1212 DBG(DBG_PWR, dip, "POST_DETACH for %s@%d\n", 1213 ddi_driver_name(rdip), 1214 ddi_get_instance(rdip)); 1215 return (pcie_pm_remove_child(dip, rdip)); 1216 } 1217 return (DDI_SUCCESS); 1218 default: 1219 break; 1220 } 1221 break; 1222 1223 case DDI_CTLOPS_REPORTDEV: 1224 return (px_report_dev(rdip)); 1225 1226 case DDI_CTLOPS_IOMIN: 1227 return (DDI_SUCCESS); 1228 1229 case DDI_CTLOPS_REGSIZE: 1230 *((off_t *)result) = px_get_reg_set_size(rdip, *((int *)arg)); 1231 return (*((off_t *)result) == 0 ? DDI_FAILURE : DDI_SUCCESS); 1232 1233 case DDI_CTLOPS_NREGS: 1234 *((uint_t *)result) = px_get_nreg_set(rdip); 1235 return (DDI_SUCCESS); 1236 1237 case DDI_CTLOPS_DVMAPAGESIZE: 1238 *((ulong_t *)result) = MMU_PAGE_SIZE; 1239 return (DDI_SUCCESS); 1240 1241 case DDI_CTLOPS_POKE: /* platform dependent implementation. */ 1242 return (px_lib_ctlops_poke(dip, rdip, 1243 (peekpoke_ctlops_t *)arg)); 1244 1245 case DDI_CTLOPS_PEEK: /* platform dependent implementation. */ 1246 return (px_lib_ctlops_peek(dip, rdip, 1247 (peekpoke_ctlops_t *)arg, result)); 1248 1249 case DDI_CTLOPS_POWER: 1250 default: 1251 break; 1252 } 1253 1254 /* 1255 * Now pass the request up to our parent. 1256 */ 1257 DBG(DBG_CTLOPS, dip, "passing request to parent: rdip=%s%d\n", 1258 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1259 return (ddi_ctlops(dip, rdip, op, arg, result)); 1260 } 1261 1262 /* ARGSUSED */ 1263 int 1264 px_intr_ops(dev_info_t *dip, dev_info_t *rdip, ddi_intr_op_t intr_op, 1265 ddi_intr_handle_impl_t *hdlp, void *result) 1266 { 1267 int intr_types, ret = DDI_SUCCESS; 1268 1269 DBG(DBG_INTROPS, dip, "px_intr_ops: rdip=%s%d\n", 1270 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1271 1272 /* Process DDI_INTROP_SUPPORTED_TYPES request here */ 1273 if (intr_op == DDI_INTROP_SUPPORTED_TYPES) { 1274 *(int *)result = i_ddi_get_nintrs(rdip) ? 1275 DDI_INTR_TYPE_FIXED : 0; 1276 1277 if ((pci_msi_get_supported_type(rdip, 1278 &intr_types)) == DDI_SUCCESS) { 1279 /* 1280 * Double check supported interrupt types vs. 1281 * what the host bridge supports. 1282 * 1283 * NOTE: 1284 * Currently MSI-X is disabled since px driver 1285 * don't fully support this feature. 1286 */ 1287 *(int *)result |= (intr_types & DDI_INTR_TYPE_MSI); 1288 } 1289 1290 return (ret); 1291 } 1292 1293 /* 1294 * PCI-E nexus driver supports fixed, MSI and MSI-X interrupts. 1295 * Return failure if interrupt type is not supported. 1296 */ 1297 switch (hdlp->ih_type) { 1298 case DDI_INTR_TYPE_FIXED: 1299 ret = px_intx_ops(dip, rdip, intr_op, hdlp, result); 1300 break; 1301 case DDI_INTR_TYPE_MSI: 1302 case DDI_INTR_TYPE_MSIX: 1303 ret = px_msix_ops(dip, rdip, intr_op, hdlp, result); 1304 break; 1305 default: 1306 ret = DDI_ENOTSUP; 1307 break; 1308 } 1309 1310 return (ret); 1311 } 1312 1313 static uint_t 1314 px_init_hotplug(px_t *px_p) 1315 { 1316 px_bus_range_t bus_range; 1317 dev_info_t *dip; 1318 pciehpc_regops_t regops; 1319 1320 dip = px_p->px_dip; 1321 1322 if (ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1323 "hotplug-capable") == 0) 1324 return (DDI_FAILURE); 1325 1326 /* 1327 * Before initializing hotplug - open up bus range. The busra 1328 * module will initialize its pool of bus numbers from this. 1329 * "busra" will be the agent that keeps track of them during 1330 * hotplug. Also, note, that busra will remove any bus numbers 1331 * already in use from boot time. 1332 */ 1333 if (ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1334 "bus-range") == 0) { 1335 cmn_err(CE_WARN, "%s%d: bus-range not found\n", 1336 ddi_driver_name(dip), ddi_get_instance(dip)); 1337 #ifdef DEBUG 1338 bus_range.lo = 0x0; 1339 bus_range.hi = 0xff; 1340 1341 if (ndi_prop_update_int_array(DDI_DEV_T_NONE, 1342 dip, "bus-range", (int *)&bus_range, 2) 1343 != DDI_PROP_SUCCESS) { 1344 return (DDI_FAILURE); 1345 } 1346 #else 1347 return (DDI_FAILURE); 1348 #endif 1349 } 1350 1351 if (px_lib_hotplug_init(dip, (void *)®ops) != DDI_SUCCESS) 1352 return (DDI_FAILURE); 1353 1354 if (pciehpc_init(dip, ®ops) != DDI_SUCCESS) { 1355 px_lib_hotplug_uninit(dip); 1356 return (DDI_FAILURE); 1357 } 1358 1359 if (pcihp_init(dip) != DDI_SUCCESS) { 1360 (void) pciehpc_uninit(dip); 1361 px_lib_hotplug_uninit(dip); 1362 return (DDI_FAILURE); 1363 } 1364 1365 if (pcihp_get_cb_ops() != NULL) { 1366 DBG(DBG_ATTACH, dip, "%s%d hotplug enabled", 1367 ddi_driver_name(dip), ddi_get_instance(dip)); 1368 px_p->px_dev_caps |= PX_HOTPLUG_CAPABLE; 1369 } 1370 1371 return (DDI_SUCCESS); 1372 } 1373 1374 static uint_t 1375 px_uninit_hotplug(dev_info_t *dip) 1376 { 1377 if (pcihp_uninit(dip) != DDI_SUCCESS) 1378 return (DDI_FAILURE); 1379 1380 if (pciehpc_uninit(dip) != DDI_SUCCESS) 1381 return (DDI_FAILURE); 1382 1383 px_lib_hotplug_uninit(dip); 1384 1385 return (DDI_SUCCESS); 1386 } 1387