1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * PCI Express nexus driver interface 30 */ 31 32 #include <sys/types.h> 33 #include <sys/conf.h> /* nulldev */ 34 #include <sys/stat.h> /* devctl */ 35 #include <sys/kmem.h> 36 #include <sys/sunddi.h> 37 #include <sys/sunndi.h> 38 #include <sys/ddi_impldefs.h> 39 #include <sys/ddi_subrdefs.h> 40 #include <sys/spl.h> 41 #include <sys/epm.h> 42 #include <sys/iommutsb.h> 43 #include <sys/hotplug/pci/pcihp.h> 44 #include <sys/hotplug/pci/pciehpc.h> 45 #include "px_obj.h" 46 #include <sys/pci_tools.h> 47 #include "px_tools_ext.h" 48 #include "pcie_pwr.h" 49 50 /*LINTLIBRARY*/ 51 52 /* 53 * function prototypes for dev ops routines: 54 */ 55 static int px_attach(dev_info_t *dip, ddi_attach_cmd_t cmd); 56 static int px_detach(dev_info_t *dip, ddi_detach_cmd_t cmd); 57 static int px_info(dev_info_t *dip, ddi_info_cmd_t infocmd, 58 void *arg, void **result); 59 static int px_cb_attach(px_t *); 60 static void px_cb_detach(px_t *); 61 static int px_pwr_setup(dev_info_t *dip); 62 static void px_pwr_teardown(dev_info_t *dip); 63 64 extern errorq_t *pci_target_queue; 65 66 /* 67 * function prototypes for hotplug routines: 68 */ 69 static int px_init_hotplug(px_t *px_p); 70 static int px_uninit_hotplug(dev_info_t *dip); 71 72 /* 73 * bus ops and dev ops structures: 74 */ 75 static struct bus_ops px_bus_ops = { 76 BUSO_REV, 77 px_map, 78 0, 79 0, 80 0, 81 i_ddi_map_fault, 82 px_dma_setup, 83 px_dma_allochdl, 84 px_dma_freehdl, 85 px_dma_bindhdl, 86 px_dma_unbindhdl, 87 px_lib_dma_sync, 88 px_dma_win, 89 px_dma_ctlops, 90 px_ctlops, 91 ddi_bus_prop_op, 92 ndi_busop_get_eventcookie, 93 ndi_busop_add_eventcall, 94 ndi_busop_remove_eventcall, 95 ndi_post_event, 96 NULL, 97 NULL, /* (*bus_config)(); */ 98 NULL, /* (*bus_unconfig)(); */ 99 px_fm_init_child, /* (*bus_fm_init)(); */ 100 NULL, /* (*bus_fm_fini)(); */ 101 px_bus_enter, /* (*bus_fm_access_enter)(); */ 102 px_bus_exit, /* (*bus_fm_access_fini)(); */ 103 pcie_bus_power, /* (*bus_power)(); */ 104 px_intr_ops /* (*bus_intr_op)(); */ 105 }; 106 107 extern struct cb_ops px_cb_ops; 108 109 static struct dev_ops px_ops = { 110 DEVO_REV, 111 0, 112 px_info, 113 nulldev, 114 0, 115 px_attach, 116 px_detach, 117 nodev, 118 &px_cb_ops, 119 &px_bus_ops, 120 nulldev 121 }; 122 123 /* 124 * module definitions: 125 */ 126 #include <sys/modctl.h> 127 extern struct mod_ops mod_driverops; 128 129 static struct modldrv modldrv = { 130 &mod_driverops, /* Type of module - driver */ 131 "PCI Express nexus driver %I%", /* Name of module. */ 132 &px_ops, /* driver ops */ 133 }; 134 135 static struct modlinkage modlinkage = { 136 MODREV_1, (void *)&modldrv, NULL 137 }; 138 139 /* driver soft state */ 140 void *px_state_p; 141 142 int 143 _init(void) 144 { 145 int e; 146 147 /* 148 * Initialize per-px bus soft state pointer. 149 */ 150 e = ddi_soft_state_init(&px_state_p, sizeof (px_t), 1); 151 if (e != DDI_SUCCESS) 152 return (e); 153 154 /* 155 * Install the module. 156 */ 157 e = mod_install(&modlinkage); 158 if (e != DDI_SUCCESS) 159 ddi_soft_state_fini(&px_state_p); 160 return (e); 161 } 162 163 int 164 _fini(void) 165 { 166 int e; 167 168 /* 169 * Remove the module. 170 */ 171 e = mod_remove(&modlinkage); 172 if (e != DDI_SUCCESS) 173 return (e); 174 /* 175 * Destroy pci_target_queue, and set it to NULL. 176 */ 177 if (pci_target_queue) 178 errorq_destroy(pci_target_queue); 179 180 pci_target_queue = NULL; 181 182 /* Free px soft state */ 183 ddi_soft_state_fini(&px_state_p); 184 185 return (e); 186 } 187 188 int 189 _info(struct modinfo *modinfop) 190 { 191 return (mod_info(&modlinkage, modinfop)); 192 } 193 194 /* ARGSUSED */ 195 static int 196 px_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 197 { 198 int instance = getminor((dev_t)arg); 199 px_t *px_p = INST_TO_STATE(instance); 200 201 /* 202 * Allow hotplug to deal with ones it manages 203 * Hot Plug will be done later. 204 */ 205 if (px_p && (px_p->px_dev_caps & PX_HOTPLUG_CAPABLE)) 206 return (pcihp_info(dip, infocmd, arg, result)); 207 208 /* non-hotplug or not attached */ 209 switch (infocmd) { 210 case DDI_INFO_DEVT2INSTANCE: 211 *result = (void *)(intptr_t)instance; 212 return (DDI_SUCCESS); 213 214 case DDI_INFO_DEVT2DEVINFO: 215 if (px_p == NULL) 216 return (DDI_FAILURE); 217 *result = (void *)px_p->px_dip; 218 return (DDI_SUCCESS); 219 220 default: 221 return (DDI_FAILURE); 222 } 223 } 224 225 /* device driver entry points */ 226 /* 227 * attach entry point: 228 */ 229 /*ARGSUSED*/ 230 static int 231 px_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 232 { 233 px_t *px_p; /* per bus state pointer */ 234 int instance = DIP_TO_INST(dip); 235 int ret = DDI_SUCCESS; 236 devhandle_t dev_hdl = NULL; 237 238 switch (cmd) { 239 case DDI_ATTACH: 240 DBG(DBG_ATTACH, dip, "DDI_ATTACH\n"); 241 242 /* 243 * Allocate and get the per-px soft state structure. 244 */ 245 if (ddi_soft_state_zalloc(px_state_p, instance) 246 != DDI_SUCCESS) { 247 cmn_err(CE_WARN, "%s%d: can't allocate px state", 248 ddi_driver_name(dip), instance); 249 goto err_bad_px_softstate; 250 } 251 px_p = INST_TO_STATE(instance); 252 px_p->px_dip = dip; 253 mutex_init(&px_p->px_mutex, NULL, MUTEX_DRIVER, NULL); 254 px_p->px_soft_state = PX_SOFT_STATE_CLOSED; 255 px_p->px_open_count = 0; 256 257 (void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, 258 "device_type", "pciex"); 259 260 /* Initialize px_dbg for high pil printing */ 261 px_dbg_attach(dip, &px_p->px_dbg_hdl); 262 263 /* 264 * Get key properties of the pci bridge node and 265 * determine it's type (psycho, schizo, etc ...). 266 */ 267 if (px_get_props(px_p, dip) == DDI_FAILURE) 268 goto err_bad_px_prop; 269 270 if (px_lib_dev_init(dip, &dev_hdl) != DDI_SUCCESS) 271 goto err_bad_dev_init; 272 273 /* Initialize device handle */ 274 px_p->px_dev_hdl = dev_hdl; 275 276 /* Cache the BDF of the root port nexus */ 277 px_p->px_bdf = px_lib_get_bdf(px_p); 278 279 /* 280 * Initialize interrupt block. Note that this 281 * initialize error handling for the PEC as well. 282 */ 283 if ((ret = px_ib_attach(px_p)) != DDI_SUCCESS) 284 goto err_bad_ib; 285 286 if (px_cb_attach(px_p) != DDI_SUCCESS) 287 goto err_bad_cb; 288 289 /* 290 * Start creating the modules. 291 * Note that attach() routines should 292 * register and enable their own interrupts. 293 */ 294 295 if ((px_mmu_attach(px_p)) != DDI_SUCCESS) 296 goto err_bad_mmu; 297 298 if ((px_msiq_attach(px_p)) != DDI_SUCCESS) 299 goto err_bad_msiq; 300 301 if ((px_msi_attach(px_p)) != DDI_SUCCESS) 302 goto err_bad_msi; 303 304 if ((px_pec_attach(px_p)) != DDI_SUCCESS) 305 goto err_bad_pec; 306 307 if ((px_dma_attach(px_p)) != DDI_SUCCESS) 308 goto err_bad_dma; /* nothing to uninitialize on DMA */ 309 310 if ((px_fm_attach(px_p)) != DDI_SUCCESS) 311 goto err_bad_dma; 312 313 /* 314 * All of the error handlers have been registered 315 * by now so it's time to activate the interrupt. 316 */ 317 if ((ret = px_err_add_intr(&px_p->px_fault)) != DDI_SUCCESS) 318 goto err_bad_intr; 319 320 (void) px_init_hotplug(px_p); 321 322 /* 323 * Create the "devctl" node for hotplug and pcitool support. 324 * For non-hotplug bus, we still need ":devctl" to 325 * support DEVCTL_DEVICE_* and DEVCTL_BUS_* ioctls. 326 */ 327 if (ddi_create_minor_node(dip, "devctl", S_IFCHR, 328 PCIHP_AP_MINOR_NUM(instance, PCIHP_DEVCTL_MINOR), 329 DDI_NT_NEXUS, 0) != DDI_SUCCESS) { 330 goto err_bad_devctl_node; 331 } 332 333 if (pxtool_init(dip) != DDI_SUCCESS) 334 goto err_bad_pcitool_node; 335 336 /* 337 * power management setup. Even if it fails, attach will 338 * succeed as this is a optional feature. Since we are 339 * always at full power, this is not critical. 340 */ 341 if (pwr_common_setup(dip) != DDI_SUCCESS) { 342 DBG(DBG_PWR, dip, "pwr_common_setup failed\n"); 343 } else if (px_pwr_setup(dip) != DDI_SUCCESS) { 344 DBG(DBG_PWR, dip, "px_pwr_setup failed \n"); 345 pwr_common_teardown(dip); 346 } 347 348 /* 349 * add cpr callback 350 */ 351 px_cpr_add_callb(px_p); 352 353 ddi_report_dev(dip); 354 355 px_p->px_state = PX_ATTACHED; 356 DBG(DBG_ATTACH, dip, "attach success\n"); 357 break; 358 359 err_bad_pcitool_node: 360 ddi_remove_minor_node(dip, "devctl"); 361 err_bad_devctl_node: 362 px_err_rem_intr(&px_p->px_fault); 363 err_bad_intr: 364 px_fm_detach(px_p); 365 err_bad_dma: 366 px_pec_detach(px_p); 367 err_bad_pec: 368 px_msi_detach(px_p); 369 err_bad_msi: 370 px_msiq_detach(px_p); 371 err_bad_msiq: 372 px_mmu_detach(px_p); 373 err_bad_mmu: 374 px_cb_detach(px_p); 375 err_bad_cb: 376 px_ib_detach(px_p); 377 err_bad_ib: 378 if (px_lib_dev_fini(dip) != DDI_SUCCESS) { 379 DBG(DBG_ATTACH, dip, "px_lib_dev_fini failed\n"); 380 } 381 err_bad_dev_init: 382 px_free_props(px_p); 383 err_bad_px_prop: 384 px_dbg_detach(dip, &px_p->px_dbg_hdl); 385 mutex_destroy(&px_p->px_mutex); 386 ddi_soft_state_free(px_state_p, instance); 387 err_bad_px_softstate: 388 ret = DDI_FAILURE; 389 break; 390 391 case DDI_RESUME: 392 DBG(DBG_ATTACH, dip, "DDI_RESUME\n"); 393 394 px_p = INST_TO_STATE(instance); 395 396 mutex_enter(&px_p->px_mutex); 397 398 /* suspend might have not succeeded */ 399 if (px_p->px_state != PX_SUSPENDED) { 400 DBG(DBG_ATTACH, px_p->px_dip, 401 "instance NOT suspended\n"); 402 ret = DDI_FAILURE; 403 break; 404 } 405 406 px_msiq_resume(px_p); 407 px_lib_resume(dip); 408 (void) pcie_pwr_resume(dip); 409 px_p->px_state = PX_ATTACHED; 410 411 mutex_exit(&px_p->px_mutex); 412 413 break; 414 default: 415 DBG(DBG_ATTACH, dip, "unsupported attach op\n"); 416 ret = DDI_FAILURE; 417 break; 418 } 419 420 return (ret); 421 } 422 423 /* 424 * detach entry point: 425 */ 426 /*ARGSUSED*/ 427 static int 428 px_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 429 { 430 int instance = ddi_get_instance(dip); 431 px_t *px_p = INST_TO_STATE(instance); 432 int ret; 433 434 /* 435 * Make sure we are currently attached 436 */ 437 if (px_p->px_state != PX_ATTACHED) { 438 DBG(DBG_DETACH, dip, "Instance not attached\n"); 439 return (DDI_FAILURE); 440 } 441 442 mutex_enter(&px_p->px_mutex); 443 444 switch (cmd) { 445 case DDI_DETACH: 446 DBG(DBG_DETACH, dip, "DDI_DETACH\n"); 447 448 /* 449 * remove cpr callback 450 */ 451 px_cpr_rem_callb(px_p); 452 453 if (px_p->px_dev_caps & PX_HOTPLUG_CAPABLE) 454 if (px_uninit_hotplug(dip) != DDI_SUCCESS) { 455 mutex_exit(&px_p->px_mutex); 456 return (DDI_FAILURE); 457 } 458 459 /* 460 * things which used to be done in obj_destroy 461 * are now in-lined here. 462 */ 463 464 px_p->px_state = PX_DETACHED; 465 466 pxtool_uninit(dip); 467 468 ddi_remove_minor_node(dip, "devctl"); 469 px_err_rem_intr(&px_p->px_fault); 470 px_fm_detach(px_p); 471 px_pec_detach(px_p); 472 px_pwr_teardown(dip); 473 pwr_common_teardown(dip); 474 px_msi_detach(px_p); 475 px_msiq_detach(px_p); 476 px_mmu_detach(px_p); 477 px_cb_detach(px_p); 478 px_ib_detach(px_p); 479 if (px_lib_dev_fini(dip) != DDI_SUCCESS) { 480 DBG(DBG_DETACH, dip, "px_lib_dev_fini failed\n"); 481 } 482 483 /* 484 * Free the px soft state structure and the rest of the 485 * resources it's using. 486 */ 487 px_free_props(px_p); 488 px_dbg_detach(dip, &px_p->px_dbg_hdl); 489 mutex_exit(&px_p->px_mutex); 490 mutex_destroy(&px_p->px_mutex); 491 492 /* Free the interrupt-priorities prop if we created it. */ 493 { 494 int len; 495 496 if (ddi_getproplen(DDI_DEV_T_ANY, dip, 497 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 498 "interrupt-priorities", &len) == DDI_PROP_SUCCESS) 499 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, 500 "interrupt-priorities"); 501 } 502 503 px_p->px_dev_hdl = NULL; 504 ddi_soft_state_free(px_state_p, instance); 505 506 return (DDI_SUCCESS); 507 508 case DDI_SUSPEND: 509 if (pcie_pwr_suspend(dip) != DDI_SUCCESS) { 510 mutex_exit(&px_p->px_mutex); 511 return (DDI_FAILURE); 512 } 513 if ((ret = px_lib_suspend(dip)) == DDI_SUCCESS) 514 px_p->px_state = PX_SUSPENDED; 515 mutex_exit(&px_p->px_mutex); 516 517 return (ret); 518 519 default: 520 DBG(DBG_DETACH, dip, "unsupported detach op\n"); 521 mutex_exit(&px_p->px_mutex); 522 return (DDI_FAILURE); 523 } 524 } 525 526 int 527 px_cb_attach(px_t *px_p) 528 { 529 px_fault_t *fault_p = &px_p->px_cb_fault; 530 dev_info_t *dip = px_p->px_dip; 531 sysino_t sysino; 532 533 if (px_lib_intr_devino_to_sysino(dip, 534 px_p->px_inos[PX_INTR_XBC], &sysino) != DDI_SUCCESS) 535 return (DDI_FAILURE); 536 537 fault_p->px_fh_dip = dip; 538 fault_p->px_fh_sysino = sysino; 539 fault_p->px_err_func = px_err_cb_intr; 540 fault_p->px_intr_ino = px_p->px_inos[PX_INTR_XBC]; 541 542 return (px_cb_add_intr(fault_p)); 543 } 544 545 void 546 px_cb_detach(px_t *px_p) 547 { 548 px_cb_rem_intr(&px_p->px_cb_fault); 549 } 550 551 /* 552 * power management related initialization specific to px 553 * called by px_attach() 554 */ 555 static int 556 px_pwr_setup(dev_info_t *dip) 557 { 558 pcie_pwr_t *pwr_p; 559 int instance = ddi_get_instance(dip); 560 px_t *px_p = INST_TO_STATE(instance); 561 ddi_intr_handle_impl_t hdl; 562 563 ASSERT(PCIE_PMINFO(dip)); 564 pwr_p = PCIE_NEXUS_PMINFO(dip); 565 ASSERT(pwr_p); 566 567 /* 568 * indicate support LDI (Layered Driver Interface) 569 * Create the property, if it is not already there 570 */ 571 if (!ddi_prop_exists(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS, 572 DDI_KERNEL_IOCTL)) { 573 if (ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, 574 DDI_KERNEL_IOCTL, NULL, 0) != DDI_PROP_SUCCESS) { 575 DBG(DBG_PWR, dip, "can't create kernel ioctl prop\n"); 576 return (DDI_FAILURE); 577 } 578 } 579 /* No support for device PM. We are always at full power */ 580 pwr_p->pwr_func_lvl = PM_LEVEL_D0; 581 582 mutex_init(&px_p->px_l23ready_lock, NULL, MUTEX_DRIVER, 583 DDI_INTR_PRI(px_pwr_pil)); 584 cv_init(&px_p->px_l23ready_cv, NULL, CV_DRIVER, NULL); 585 586 /* Initialize handle */ 587 bzero(&hdl, sizeof (ddi_intr_handle_impl_t)); 588 hdl.ih_cb_arg1 = px_p; 589 hdl.ih_ver = DDI_INTR_VERSION; 590 hdl.ih_state = DDI_IHDL_STATE_ALLOC; 591 hdl.ih_dip = dip; 592 hdl.ih_pri = px_pwr_pil; 593 594 /* Add PME_TO_ACK message handler */ 595 hdl.ih_cb_func = (ddi_intr_handler_t *)px_pmeq_intr; 596 if (px_add_msiq_intr(dip, dip, &hdl, MSG_REC, 597 (msgcode_t)PCIE_PME_ACK_MSG, &px_p->px_pm_msiq_id) != DDI_SUCCESS) { 598 DBG(DBG_PWR, dip, "px_pwr_setup: couldn't add " 599 " PME_TO_ACK intr\n"); 600 goto pwr_setup_err1; 601 } 602 px_lib_msg_setmsiq(dip, PCIE_PME_ACK_MSG, px_p->px_pm_msiq_id); 603 px_lib_msg_setvalid(dip, PCIE_PME_ACK_MSG, PCIE_MSG_VALID); 604 605 if (px_ib_update_intr_state(px_p, px_p->px_dip, hdl.ih_inum, 606 px_msiqid_to_devino(px_p, px_p->px_pm_msiq_id), px_pwr_pil, 607 PX_INTR_STATE_ENABLE, MSG_REC, PCIE_PME_ACK_MSG) != DDI_SUCCESS) { 608 DBG(DBG_PWR, dip, "px_pwr_setup: PME_TO_ACK update interrupt" 609 " state failed\n"); 610 goto px_pwrsetup_err_state; 611 } 612 613 return (DDI_SUCCESS); 614 615 px_pwrsetup_err_state: 616 px_lib_msg_setvalid(dip, PCIE_PME_ACK_MSG, PCIE_MSG_INVALID); 617 (void) px_rem_msiq_intr(dip, dip, &hdl, MSG_REC, PCIE_PME_ACK_MSG, 618 px_p->px_pm_msiq_id); 619 pwr_setup_err1: 620 mutex_destroy(&px_p->px_l23ready_lock); 621 cv_destroy(&px_p->px_l23ready_cv); 622 623 return (DDI_FAILURE); 624 } 625 626 /* 627 * undo whatever is done in px_pwr_setup. called by px_detach() 628 */ 629 static void 630 px_pwr_teardown(dev_info_t *dip) 631 { 632 int instance = ddi_get_instance(dip); 633 px_t *px_p = INST_TO_STATE(instance); 634 ddi_intr_handle_impl_t hdl; 635 636 if (!PCIE_PMINFO(dip) || !PCIE_NEXUS_PMINFO(dip)) 637 return; 638 639 /* Initialize handle */ 640 bzero(&hdl, sizeof (ddi_intr_handle_impl_t)); 641 hdl.ih_ver = DDI_INTR_VERSION; 642 hdl.ih_state = DDI_IHDL_STATE_ALLOC; 643 hdl.ih_dip = dip; 644 hdl.ih_pri = px_pwr_pil; 645 646 px_lib_msg_setvalid(dip, PCIE_PME_ACK_MSG, PCIE_MSG_INVALID); 647 (void) px_rem_msiq_intr(dip, dip, &hdl, MSG_REC, PCIE_PME_ACK_MSG, 648 px_p->px_pm_msiq_id); 649 650 (void) px_ib_update_intr_state(px_p, px_p->px_dip, hdl.ih_inum, 651 px_msiqid_to_devino(px_p, px_p->px_pm_msiq_id), px_pwr_pil, 652 PX_INTR_STATE_DISABLE, MSG_REC, PCIE_PME_ACK_MSG); 653 654 px_p->px_pm_msiq_id = (msiqid_t)-1; 655 656 cv_destroy(&px_p->px_l23ready_cv); 657 mutex_destroy(&px_p->px_l23ready_lock); 658 } 659 660 /* bus driver entry points */ 661 662 /* 663 * bus map entry point: 664 * 665 * if map request is for an rnumber 666 * get the corresponding regspec from device node 667 * build a new regspec in our parent's format 668 * build a new map_req with the new regspec 669 * call up the tree to complete the mapping 670 */ 671 int 672 px_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp, 673 off_t off, off_t len, caddr_t *addrp) 674 { 675 px_t *px_p = DIP_TO_STATE(dip); 676 struct regspec p_regspec; 677 ddi_map_req_t p_mapreq; 678 int reglen, rval, r_no; 679 pci_regspec_t reloc_reg, *rp = &reloc_reg; 680 681 DBG(DBG_MAP, dip, "rdip=%s%d:", 682 ddi_driver_name(rdip), ddi_get_instance(rdip)); 683 684 if (mp->map_flags & DDI_MF_USER_MAPPING) 685 return (DDI_ME_UNIMPLEMENTED); 686 687 switch (mp->map_type) { 688 case DDI_MT_REGSPEC: 689 reloc_reg = *(pci_regspec_t *)mp->map_obj.rp; /* dup whole */ 690 break; 691 692 case DDI_MT_RNUMBER: 693 r_no = mp->map_obj.rnumber; 694 DBG(DBG_MAP | DBG_CONT, dip, " r#=%x", r_no); 695 696 if (ddi_getlongprop(DDI_DEV_T_ANY, rdip, DDI_PROP_DONTPASS, 697 "reg", (caddr_t)&rp, ®len) != DDI_SUCCESS) 698 return (DDI_ME_RNUMBER_RANGE); 699 700 if (r_no < 0 || r_no >= reglen / sizeof (pci_regspec_t)) { 701 kmem_free(rp, reglen); 702 return (DDI_ME_RNUMBER_RANGE); 703 } 704 rp += r_no; 705 break; 706 707 default: 708 return (DDI_ME_INVAL); 709 } 710 DBG(DBG_MAP | DBG_CONT, dip, "\n"); 711 712 if ((rp->pci_phys_hi & PCI_REG_ADDR_M) == PCI_ADDR_CONFIG) { 713 /* 714 * There may be a need to differentiate between PCI 715 * and PCI-Ex devices so the following range check is 716 * done correctly, depending on the implementation of 717 * px_pci bridge nexus driver. 718 */ 719 if ((off >= PCIE_CONF_HDR_SIZE) || 720 (len > PCIE_CONF_HDR_SIZE) || 721 (off + len > PCIE_CONF_HDR_SIZE)) 722 return (DDI_ME_INVAL); 723 /* 724 * the following function returning a DDI_FAILURE assumes 725 * that there are no virtual config space access services 726 * defined in this layer. Otherwise it is availed right 727 * here and we return. 728 */ 729 rval = px_lib_map_vconfig(dip, mp, off, rp, addrp); 730 if (rval == DDI_SUCCESS) 731 goto done; 732 } 733 734 /* 735 * No virtual config space services or we are mapping 736 * a region of memory mapped config/IO/memory space, so proceed 737 * to the parent. 738 */ 739 740 /* relocate within 64-bit pci space through "assigned-addresses" */ 741 if (rval = px_reloc_reg(dip, rdip, px_p, rp)) 742 goto done; 743 744 if (len) /* adjust regspec according to mapping request */ 745 rp->pci_size_low = len; /* MIN ? */ 746 rp->pci_phys_low += off; 747 748 /* translate relocated pci regspec into parent space through "ranges" */ 749 if (rval = px_xlate_reg(px_p, rp, &p_regspec)) 750 goto done; 751 752 p_mapreq = *mp; /* dup the whole structure */ 753 p_mapreq.map_type = DDI_MT_REGSPEC; 754 p_mapreq.map_obj.rp = &p_regspec; 755 px_lib_map_attr_check(&p_mapreq); 756 rval = ddi_map(dip, &p_mapreq, 0, 0, addrp); 757 758 if (rval == DDI_SUCCESS) { 759 /* 760 * Set-up access functions for FM access error capable drivers. 761 */ 762 if (DDI_FM_ACC_ERR_CAP(ddi_fm_capable(rdip))) 763 px_fm_acc_setup(mp, rdip, rp); 764 } 765 766 done: 767 if (mp->map_type == DDI_MT_RNUMBER) 768 kmem_free(rp - r_no, reglen); 769 770 return (rval); 771 } 772 773 /* 774 * bus dma map entry point 775 * return value: 776 * DDI_DMA_PARTIAL_MAP 1 777 * DDI_DMA_MAPOK 0 778 * DDI_DMA_MAPPED 0 779 * DDI_DMA_NORESOURCES -1 780 * DDI_DMA_NOMAPPING -2 781 * DDI_DMA_TOOBIG -3 782 */ 783 int 784 px_dma_setup(dev_info_t *dip, dev_info_t *rdip, ddi_dma_req_t *dmareq, 785 ddi_dma_handle_t *handlep) 786 { 787 px_t *px_p = DIP_TO_STATE(dip); 788 px_mmu_t *mmu_p = px_p->px_mmu_p; 789 ddi_dma_impl_t *mp; 790 int ret; 791 792 DBG(DBG_DMA_MAP, dip, "mapping - rdip=%s%d type=%s\n", 793 ddi_driver_name(rdip), ddi_get_instance(rdip), 794 handlep ? "alloc" : "advisory"); 795 796 if (!(mp = px_dma_lmts2hdl(dip, rdip, mmu_p, dmareq))) 797 return (DDI_DMA_NORESOURCES); 798 if (mp == (ddi_dma_impl_t *)DDI_DMA_NOMAPPING) 799 return (DDI_DMA_NOMAPPING); 800 if (ret = px_dma_type(px_p, dmareq, mp)) 801 goto freehandle; 802 if (ret = px_dma_pfn(px_p, dmareq, mp)) 803 goto freehandle; 804 805 switch (PX_DMA_TYPE(mp)) { 806 case PX_DMAI_FLAGS_DVMA: /* LINTED E_EQUALITY_NOT_ASSIGNMENT */ 807 if ((ret = px_dvma_win(px_p, dmareq, mp)) || !handlep) 808 goto freehandle; 809 if (!PX_DMA_CANCACHE(mp)) { /* try fast track */ 810 if (PX_DMA_CANFAST(mp)) { 811 if (!px_dvma_map_fast(mmu_p, mp)) 812 break; 813 /* LINTED E_NOP_ELSE_STMT */ 814 } else { 815 PX_DVMA_FASTTRAK_PROF(mp); 816 } 817 } 818 if (ret = px_dvma_map(mp, dmareq, mmu_p)) 819 goto freehandle; 820 break; 821 case PX_DMAI_FLAGS_PTP: /* LINTED E_EQUALITY_NOT_ASSIGNMENT */ 822 if ((ret = px_dma_physwin(px_p, dmareq, mp)) || !handlep) 823 goto freehandle; 824 break; 825 case PX_DMAI_FLAGS_BYPASS: 826 default: 827 cmn_err(CE_PANIC, "%s%d: px_dma_setup: bad dma type 0x%x", 828 ddi_driver_name(rdip), ddi_get_instance(rdip), 829 PX_DMA_TYPE(mp)); 830 /*NOTREACHED*/ 831 } 832 *handlep = (ddi_dma_handle_t)mp; 833 mp->dmai_flags |= PX_DMAI_FLAGS_INUSE; 834 px_dump_dma_handle(DBG_DMA_MAP, dip, mp); 835 836 return ((mp->dmai_nwin == 1) ? DDI_DMA_MAPPED : DDI_DMA_PARTIAL_MAP); 837 freehandle: 838 if (ret == DDI_DMA_NORESOURCES) 839 px_dma_freemp(mp); /* don't run_callback() */ 840 else 841 (void) px_dma_freehdl(dip, rdip, (ddi_dma_handle_t)mp); 842 return (ret); 843 } 844 845 846 /* 847 * bus dma alloc handle entry point: 848 */ 849 int 850 px_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attrp, 851 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep) 852 { 853 px_t *px_p = DIP_TO_STATE(dip); 854 ddi_dma_impl_t *mp; 855 int rval; 856 857 DBG(DBG_DMA_ALLOCH, dip, "rdip=%s%d\n", 858 ddi_driver_name(rdip), ddi_get_instance(rdip)); 859 860 if (attrp->dma_attr_version != DMA_ATTR_V0) 861 return (DDI_DMA_BADATTR); 862 863 if (!(mp = px_dma_allocmp(dip, rdip, waitfp, arg))) 864 return (DDI_DMA_NORESOURCES); 865 866 /* 867 * Save requestor's information 868 */ 869 mp->dmai_attr = *attrp; /* whole object - augmented later */ 870 *PX_DEV_ATTR(mp) = *attrp; /* whole object - device orig attr */ 871 DBG(DBG_DMA_ALLOCH, dip, "mp=%p\n", mp); 872 873 /* check and convert dma attributes to handle parameters */ 874 if (rval = px_dma_attr2hdl(px_p, mp)) { 875 px_dma_freehdl(dip, rdip, (ddi_dma_handle_t)mp); 876 *handlep = NULL; 877 return (rval); 878 } 879 *handlep = (ddi_dma_handle_t)mp; 880 return (DDI_SUCCESS); 881 } 882 883 884 /* 885 * bus dma free handle entry point: 886 */ 887 /*ARGSUSED*/ 888 int 889 px_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle) 890 { 891 DBG(DBG_DMA_FREEH, dip, "rdip=%s%d mp=%p\n", 892 ddi_driver_name(rdip), ddi_get_instance(rdip), handle); 893 px_dma_freemp((ddi_dma_impl_t *)handle); 894 895 if (px_kmem_clid) { 896 DBG(DBG_DMA_FREEH, dip, "run handle callback\n"); 897 ddi_run_callback(&px_kmem_clid); 898 } 899 return (DDI_SUCCESS); 900 } 901 902 903 /* 904 * bus dma bind handle entry point: 905 */ 906 int 907 px_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip, 908 ddi_dma_handle_t handle, ddi_dma_req_t *dmareq, 909 ddi_dma_cookie_t *cookiep, uint_t *ccountp) 910 { 911 px_t *px_p = DIP_TO_STATE(dip); 912 px_mmu_t *mmu_p = px_p->px_mmu_p; 913 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 914 int ret; 915 916 DBG(DBG_DMA_BINDH, dip, "rdip=%s%d mp=%p dmareq=%p\n", 917 ddi_driver_name(rdip), ddi_get_instance(rdip), mp, dmareq); 918 919 if (mp->dmai_flags & PX_DMAI_FLAGS_INUSE) 920 return (DDI_DMA_INUSE); 921 922 ASSERT((mp->dmai_flags & ~PX_DMAI_FLAGS_PRESERVE) == 0); 923 mp->dmai_flags |= PX_DMAI_FLAGS_INUSE; 924 925 if (ret = px_dma_type(px_p, dmareq, mp)) 926 goto err; 927 if (ret = px_dma_pfn(px_p, dmareq, mp)) 928 goto err; 929 930 switch (PX_DMA_TYPE(mp)) { 931 case PX_DMAI_FLAGS_DVMA: 932 if (ret = px_dvma_win(px_p, dmareq, mp)) 933 goto map_err; 934 if (!PX_DMA_CANCACHE(mp)) { /* try fast track */ 935 if (PX_DMA_CANFAST(mp)) { 936 if (!px_dvma_map_fast(mmu_p, mp)) 937 goto mapped; /*LINTED E_NOP_ELSE_STMT*/ 938 } else { 939 PX_DVMA_FASTTRAK_PROF(mp); 940 } 941 } 942 if (ret = px_dvma_map(mp, dmareq, mmu_p)) 943 goto map_err; 944 mapped: 945 *ccountp = 1; 946 MAKE_DMA_COOKIE(cookiep, mp->dmai_mapping, mp->dmai_size); 947 break; 948 case PX_DMAI_FLAGS_BYPASS: 949 case PX_DMAI_FLAGS_PTP: 950 if (ret = px_dma_physwin(px_p, dmareq, mp)) 951 goto map_err; 952 *ccountp = PX_WINLST(mp)->win_ncookies; 953 *cookiep = 954 *(ddi_dma_cookie_t *)(PX_WINLST(mp) + 1); /* wholeobj */ 955 break; 956 default: 957 cmn_err(CE_PANIC, "%s%d: px_dma_bindhdl(%p): bad dma type", 958 ddi_driver_name(rdip), ddi_get_instance(rdip), mp); 959 /*NOTREACHED*/ 960 } 961 DBG(DBG_DMA_BINDH, dip, "cookie %" PRIx64 "+%x\n", 962 cookiep->dmac_address, cookiep->dmac_size); 963 px_dump_dma_handle(DBG_DMA_MAP, dip, mp); 964 965 /* insert dma handle into FMA cache */ 966 if (mp->dmai_attr.dma_attr_flags & DDI_DMA_FLAGERR) { 967 (void) ndi_fmc_insert(rdip, DMA_HANDLE, mp, NULL); 968 mp->dmai_error.err_cf = px_err_dma_hdl_check; 969 } 970 971 return (mp->dmai_nwin == 1 ? DDI_DMA_MAPPED : DDI_DMA_PARTIAL_MAP); 972 map_err: 973 px_dma_freepfn(mp); 974 err: 975 mp->dmai_flags &= PX_DMAI_FLAGS_PRESERVE; 976 return (ret); 977 } 978 979 980 /* 981 * bus dma unbind handle entry point: 982 */ 983 /*ARGSUSED*/ 984 int 985 px_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle) 986 { 987 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 988 px_t *px_p = DIP_TO_STATE(dip); 989 px_mmu_t *mmu_p = px_p->px_mmu_p; 990 991 DBG(DBG_DMA_UNBINDH, dip, "rdip=%s%d, mp=%p\n", 992 ddi_driver_name(rdip), ddi_get_instance(rdip), handle); 993 if ((mp->dmai_flags & PX_DMAI_FLAGS_INUSE) == 0) { 994 DBG(DBG_DMA_UNBINDH, dip, "handle not inuse\n"); 995 return (DDI_FAILURE); 996 } 997 998 /* remove dma handle from FMA cache */ 999 if (mp->dmai_attr.dma_attr_flags & DDI_DMA_FLAGERR) { 1000 if (DEVI(rdip)->devi_fmhdl != NULL && 1001 DDI_FM_DMA_ERR_CAP(DEVI(rdip)->devi_fmhdl->fh_cap)) { 1002 (void) ndi_fmc_remove(rdip, DMA_HANDLE, mp); 1003 } 1004 } 1005 1006 /* 1007 * Here if the handle is using the iommu. Unload all the iommu 1008 * translations. 1009 */ 1010 switch (PX_DMA_TYPE(mp)) { 1011 case PX_DMAI_FLAGS_DVMA: 1012 px_mmu_unmap_window(mmu_p, mp); 1013 px_dvma_unmap(mmu_p, mp); 1014 px_dma_freepfn(mp); 1015 break; 1016 case PX_DMAI_FLAGS_BYPASS: 1017 case PX_DMAI_FLAGS_PTP: 1018 px_dma_freewin(mp); 1019 break; 1020 default: 1021 cmn_err(CE_PANIC, "%s%d: px_dma_unbindhdl:bad dma type %p", 1022 ddi_driver_name(rdip), ddi_get_instance(rdip), mp); 1023 /*NOTREACHED*/ 1024 } 1025 if (mmu_p->mmu_dvma_clid != 0) { 1026 DBG(DBG_DMA_UNBINDH, dip, "run dvma callback\n"); 1027 ddi_run_callback(&mmu_p->mmu_dvma_clid); 1028 } 1029 if (px_kmem_clid) { 1030 DBG(DBG_DMA_UNBINDH, dip, "run handle callback\n"); 1031 ddi_run_callback(&px_kmem_clid); 1032 } 1033 mp->dmai_flags &= PX_DMAI_FLAGS_PRESERVE; 1034 1035 return (DDI_SUCCESS); 1036 } 1037 1038 /* 1039 * bus dma win entry point: 1040 */ 1041 int 1042 px_dma_win(dev_info_t *dip, dev_info_t *rdip, 1043 ddi_dma_handle_t handle, uint_t win, off_t *offp, 1044 size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp) 1045 { 1046 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 1047 int ret; 1048 1049 DBG(DBG_DMA_WIN, dip, "rdip=%s%d\n", 1050 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1051 1052 px_dump_dma_handle(DBG_DMA_WIN, dip, mp); 1053 if (win >= mp->dmai_nwin) { 1054 DBG(DBG_DMA_WIN, dip, "%x out of range\n", win); 1055 return (DDI_FAILURE); 1056 } 1057 1058 switch (PX_DMA_TYPE(mp)) { 1059 case PX_DMAI_FLAGS_DVMA: 1060 if (win != PX_DMA_CURWIN(mp)) { 1061 px_t *px_p = DIP_TO_STATE(dip); 1062 px_mmu_t *mmu_p = px_p->px_mmu_p; 1063 px_mmu_unmap_window(mmu_p, mp); 1064 1065 /* map_window sets dmai_mapping/size/offset */ 1066 px_mmu_map_window(mmu_p, mp, win); 1067 if ((ret = px_mmu_map_window(mmu_p, 1068 mp, win)) != DDI_SUCCESS) 1069 return (ret); 1070 } 1071 if (cookiep) 1072 MAKE_DMA_COOKIE(cookiep, mp->dmai_mapping, 1073 mp->dmai_size); 1074 if (ccountp) 1075 *ccountp = 1; 1076 break; 1077 case PX_DMAI_FLAGS_PTP: 1078 case PX_DMAI_FLAGS_BYPASS: { 1079 int i; 1080 ddi_dma_cookie_t *ck_p; 1081 px_dma_win_t *win_p = mp->dmai_winlst; 1082 1083 for (i = 0; i < win; win_p = win_p->win_next, i++) {}; 1084 ck_p = (ddi_dma_cookie_t *)(win_p + 1); 1085 *cookiep = *ck_p; 1086 mp->dmai_offset = win_p->win_offset; 1087 mp->dmai_size = win_p->win_size; 1088 mp->dmai_mapping = ck_p->dmac_laddress; 1089 mp->dmai_cookie = ck_p + 1; 1090 win_p->win_curseg = 0; 1091 if (ccountp) 1092 *ccountp = win_p->win_ncookies; 1093 } 1094 break; 1095 default: 1096 cmn_err(CE_WARN, "%s%d: px_dma_win:bad dma type 0x%x", 1097 ddi_driver_name(rdip), ddi_get_instance(rdip), 1098 PX_DMA_TYPE(mp)); 1099 return (DDI_FAILURE); 1100 } 1101 if (cookiep) 1102 DBG(DBG_DMA_WIN, dip, 1103 "cookie - dmac_address=%x dmac_size=%x\n", 1104 cookiep->dmac_address, cookiep->dmac_size); 1105 if (offp) 1106 *offp = (off_t)mp->dmai_offset; 1107 if (lenp) 1108 *lenp = mp->dmai_size; 1109 return (DDI_SUCCESS); 1110 } 1111 1112 #ifdef DEBUG 1113 static char *px_dmactl_str[] = { 1114 "DDI_DMA_FREE", 1115 "DDI_DMA_SYNC", 1116 "DDI_DMA_HTOC", 1117 "DDI_DMA_KVADDR", 1118 "DDI_DMA_MOVWIN", 1119 "DDI_DMA_REPWIN", 1120 "DDI_DMA_GETERR", 1121 "DDI_DMA_COFF", 1122 "DDI_DMA_NEXTWIN", 1123 "DDI_DMA_NEXTSEG", 1124 "DDI_DMA_SEGTOC", 1125 "DDI_DMA_RESERVE", 1126 "DDI_DMA_RELEASE", 1127 "DDI_DMA_RESETH", 1128 "DDI_DMA_CKSYNC", 1129 "DDI_DMA_IOPB_ALLOC", 1130 "DDI_DMA_IOPB_FREE", 1131 "DDI_DMA_SMEM_ALLOC", 1132 "DDI_DMA_SMEM_FREE", 1133 "DDI_DMA_SET_SBUS64" 1134 }; 1135 #endif /* DEBUG */ 1136 1137 /* 1138 * bus dma control entry point: 1139 */ 1140 /*ARGSUSED*/ 1141 int 1142 px_dma_ctlops(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle, 1143 enum ddi_dma_ctlops cmd, off_t *offp, size_t *lenp, caddr_t *objp, 1144 uint_t cache_flags) 1145 { 1146 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 1147 1148 #ifdef DEBUG 1149 DBG(DBG_DMA_CTL, dip, "%s: rdip=%s%d\n", px_dmactl_str[cmd], 1150 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1151 #endif /* DEBUG */ 1152 1153 switch (cmd) { 1154 case DDI_DMA_FREE: 1155 (void) px_dma_unbindhdl(dip, rdip, handle); 1156 (void) px_dma_freehdl(dip, rdip, handle); 1157 return (DDI_SUCCESS); 1158 case DDI_DMA_RESERVE: { 1159 px_t *px_p = DIP_TO_STATE(dip); 1160 return (px_fdvma_reserve(dip, rdip, px_p, 1161 (ddi_dma_req_t *)offp, (ddi_dma_handle_t *)objp)); 1162 } 1163 case DDI_DMA_RELEASE: { 1164 px_t *px_p = DIP_TO_STATE(dip); 1165 return (px_fdvma_release(dip, px_p, mp)); 1166 } 1167 default: 1168 break; 1169 } 1170 1171 switch (PX_DMA_TYPE(mp)) { 1172 case PX_DMAI_FLAGS_DVMA: 1173 return (px_dvma_ctl(dip, rdip, mp, cmd, offp, lenp, objp, 1174 cache_flags)); 1175 case PX_DMAI_FLAGS_PTP: 1176 case PX_DMAI_FLAGS_BYPASS: 1177 return (px_dma_ctl(dip, rdip, mp, cmd, offp, lenp, objp, 1178 cache_flags)); 1179 default: 1180 cmn_err(CE_PANIC, "%s%d: px_dma_ctlops(%x):bad dma type %x", 1181 ddi_driver_name(rdip), ddi_get_instance(rdip), cmd, 1182 mp->dmai_flags); 1183 /*NOTREACHED*/ 1184 } 1185 return (0); 1186 } 1187 1188 /* 1189 * control ops entry point: 1190 * 1191 * Requests handled completely: 1192 * DDI_CTLOPS_INITCHILD see init_child() for details 1193 * DDI_CTLOPS_UNINITCHILD 1194 * DDI_CTLOPS_REPORTDEV see report_dev() for details 1195 * DDI_CTLOPS_IOMIN cache line size if streaming otherwise 1 1196 * DDI_CTLOPS_REGSIZE 1197 * DDI_CTLOPS_NREGS 1198 * DDI_CTLOPS_DVMAPAGESIZE 1199 * DDI_CTLOPS_POKE 1200 * DDI_CTLOPS_PEEK 1201 * 1202 * All others passed to parent. 1203 */ 1204 int 1205 px_ctlops(dev_info_t *dip, dev_info_t *rdip, 1206 ddi_ctl_enum_t op, void *arg, void *result) 1207 { 1208 px_t *px_p = DIP_TO_STATE(dip); 1209 struct detachspec *ds; 1210 struct attachspec *as; 1211 1212 switch (op) { 1213 case DDI_CTLOPS_INITCHILD: 1214 return (px_init_child(px_p, (dev_info_t *)arg)); 1215 1216 case DDI_CTLOPS_UNINITCHILD: 1217 return (px_uninit_child(px_p, (dev_info_t *)arg)); 1218 1219 case DDI_CTLOPS_ATTACH: 1220 if (!pcie_is_child(dip, rdip)) 1221 return (DDI_SUCCESS); 1222 1223 as = (struct attachspec *)arg; 1224 switch (as->when) { 1225 case DDI_PRE: 1226 if (as->cmd == DDI_ATTACH) { 1227 DBG(DBG_PWR, dip, "PRE_ATTACH for %s@%d\n", 1228 ddi_driver_name(rdip), 1229 ddi_get_instance(rdip)); 1230 return (pcie_pm_hold(dip)); 1231 } 1232 if (as->cmd == DDI_RESUME) { 1233 DBG(DBG_PWR, dip, "PRE_RESUME for %s@%d\n", 1234 ddi_driver_name(rdip), 1235 ddi_get_instance(rdip)); 1236 1237 pcie_clear_errors(rdip); 1238 } 1239 return (DDI_SUCCESS); 1240 1241 case DDI_POST: 1242 DBG(DBG_PWR, dip, "POST_ATTACH for %s@%d\n", 1243 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1244 if (as->cmd == DDI_ATTACH && as->result != DDI_SUCCESS) 1245 pcie_pm_release(dip); 1246 1247 if (as->result == DDI_SUCCESS) 1248 pf_init(rdip, (void *)px_p->px_fm_ibc, as->cmd); 1249 1250 (void) pcie_postattach_child(rdip); 1251 1252 return (DDI_SUCCESS); 1253 default: 1254 break; 1255 } 1256 break; 1257 1258 case DDI_CTLOPS_DETACH: 1259 if (!pcie_is_child(dip, rdip)) 1260 return (DDI_SUCCESS); 1261 1262 ds = (struct detachspec *)arg; 1263 switch (ds->when) { 1264 case DDI_POST: 1265 if (ds->cmd == DDI_DETACH && 1266 ds->result == DDI_SUCCESS) { 1267 DBG(DBG_PWR, dip, "POST_DETACH for %s@%d\n", 1268 ddi_driver_name(rdip), 1269 ddi_get_instance(rdip)); 1270 return (pcie_pm_remove_child(dip, rdip)); 1271 } 1272 return (DDI_SUCCESS); 1273 case DDI_PRE: 1274 pf_fini(rdip, ds->cmd); 1275 return (DDI_SUCCESS); 1276 default: 1277 break; 1278 } 1279 break; 1280 1281 case DDI_CTLOPS_REPORTDEV: 1282 return (px_report_dev(rdip)); 1283 1284 case DDI_CTLOPS_IOMIN: 1285 return (DDI_SUCCESS); 1286 1287 case DDI_CTLOPS_REGSIZE: 1288 *((off_t *)result) = px_get_reg_set_size(rdip, *((int *)arg)); 1289 return (*((off_t *)result) == 0 ? DDI_FAILURE : DDI_SUCCESS); 1290 1291 case DDI_CTLOPS_NREGS: 1292 *((uint_t *)result) = px_get_nreg_set(rdip); 1293 return (DDI_SUCCESS); 1294 1295 case DDI_CTLOPS_DVMAPAGESIZE: 1296 *((ulong_t *)result) = MMU_PAGE_SIZE; 1297 return (DDI_SUCCESS); 1298 1299 case DDI_CTLOPS_POKE: /* platform dependent implementation. */ 1300 return (px_lib_ctlops_poke(dip, rdip, 1301 (peekpoke_ctlops_t *)arg)); 1302 1303 case DDI_CTLOPS_PEEK: /* platform dependent implementation. */ 1304 return (px_lib_ctlops_peek(dip, rdip, 1305 (peekpoke_ctlops_t *)arg, result)); 1306 1307 case DDI_CTLOPS_POWER: 1308 default: 1309 break; 1310 } 1311 1312 /* 1313 * Now pass the request up to our parent. 1314 */ 1315 DBG(DBG_CTLOPS, dip, "passing request to parent: rdip=%s%d\n", 1316 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1317 return (ddi_ctlops(dip, rdip, op, arg, result)); 1318 } 1319 1320 /* ARGSUSED */ 1321 int 1322 px_intr_ops(dev_info_t *dip, dev_info_t *rdip, ddi_intr_op_t intr_op, 1323 ddi_intr_handle_impl_t *hdlp, void *result) 1324 { 1325 int intr_types, ret = DDI_SUCCESS; 1326 1327 DBG(DBG_INTROPS, dip, "px_intr_ops: rdip=%s%d\n", 1328 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1329 1330 /* Process DDI_INTROP_SUPPORTED_TYPES request here */ 1331 if (intr_op == DDI_INTROP_SUPPORTED_TYPES) { 1332 *(int *)result = i_ddi_get_intx_nintrs(rdip) ? 1333 DDI_INTR_TYPE_FIXED : 0; 1334 1335 if ((pci_msi_get_supported_type(rdip, 1336 &intr_types)) == DDI_SUCCESS) { 1337 /* 1338 * Double check supported interrupt types vs. 1339 * what the host bridge supports. 1340 */ 1341 *(int *)result |= intr_types; 1342 } 1343 1344 return (ret); 1345 } 1346 1347 /* 1348 * PCI-E nexus driver supports fixed, MSI and MSI-X interrupts. 1349 * Return failure if interrupt type is not supported. 1350 */ 1351 switch (hdlp->ih_type) { 1352 case DDI_INTR_TYPE_FIXED: 1353 ret = px_intx_ops(dip, rdip, intr_op, hdlp, result); 1354 break; 1355 case DDI_INTR_TYPE_MSI: 1356 case DDI_INTR_TYPE_MSIX: 1357 ret = px_msix_ops(dip, rdip, intr_op, hdlp, result); 1358 break; 1359 default: 1360 ret = DDI_ENOTSUP; 1361 break; 1362 } 1363 1364 return (ret); 1365 } 1366 1367 static int 1368 px_init_hotplug(px_t *px_p) 1369 { 1370 px_bus_range_t bus_range; 1371 dev_info_t *dip; 1372 pciehpc_regops_t regops; 1373 1374 dip = px_p->px_dip; 1375 1376 if (ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1377 "hotplug-capable") == 0) 1378 return (DDI_FAILURE); 1379 1380 /* 1381 * Before initializing hotplug - open up bus range. The busra 1382 * module will initialize its pool of bus numbers from this. 1383 * "busra" will be the agent that keeps track of them during 1384 * hotplug. Also, note, that busra will remove any bus numbers 1385 * already in use from boot time. 1386 */ 1387 if (ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1388 "bus-range") == 0) { 1389 cmn_err(CE_WARN, "%s%d: bus-range not found\n", 1390 ddi_driver_name(dip), ddi_get_instance(dip)); 1391 #ifdef DEBUG 1392 bus_range.lo = 0x0; 1393 bus_range.hi = 0xff; 1394 1395 if (ndi_prop_update_int_array(DDI_DEV_T_NONE, 1396 dip, "bus-range", (int *)&bus_range, 2) 1397 != DDI_PROP_SUCCESS) { 1398 return (DDI_FAILURE); 1399 } 1400 #else 1401 return (DDI_FAILURE); 1402 #endif 1403 } 1404 1405 if (px_lib_hotplug_init(dip, (void *)®ops) != DDI_SUCCESS) 1406 return (DDI_FAILURE); 1407 1408 if (pciehpc_init(dip, ®ops) != DDI_SUCCESS) { 1409 px_lib_hotplug_uninit(dip); 1410 return (DDI_FAILURE); 1411 } 1412 1413 if (pcihp_init(dip) != DDI_SUCCESS) { 1414 (void) pciehpc_uninit(dip); 1415 px_lib_hotplug_uninit(dip); 1416 return (DDI_FAILURE); 1417 } 1418 1419 if (pcihp_get_cb_ops() != NULL) { 1420 DBG(DBG_ATTACH, dip, "%s%d hotplug enabled", 1421 ddi_driver_name(dip), ddi_get_instance(dip)); 1422 px_p->px_dev_caps |= PX_HOTPLUG_CAPABLE; 1423 } 1424 1425 return (DDI_SUCCESS); 1426 } 1427 1428 static int 1429 px_uninit_hotplug(dev_info_t *dip) 1430 { 1431 if (pcihp_uninit(dip) != DDI_SUCCESS) 1432 return (DDI_FAILURE); 1433 1434 if (pciehpc_uninit(dip) != DDI_SUCCESS) 1435 return (DDI_FAILURE); 1436 1437 px_lib_hotplug_uninit(dip); 1438 1439 return (DDI_SUCCESS); 1440 } 1441