1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * PCI Express nexus driver interface 30 */ 31 32 #include <sys/types.h> 33 #include <sys/conf.h> /* nulldev */ 34 #include <sys/stat.h> /* devctl */ 35 #include <sys/kmem.h> 36 #include <sys/sunddi.h> 37 #include <sys/sunndi.h> 38 #include <sys/ddi_impldefs.h> 39 #include <sys/ddi_subrdefs.h> 40 #include <sys/spl.h> 41 #include <sys/epm.h> 42 #include <sys/iommutsb.h> 43 #include <sys/hotplug/pci/pcihp.h> 44 #include <sys/hotplug/pci/pciehpc.h> 45 #include "px_obj.h" 46 #include <sys/pci_tools.h> 47 #include "px_tools_ext.h" 48 #include "pcie_pwr.h" 49 50 /*LINTLIBRARY*/ 51 52 /* 53 * function prototypes for dev ops routines: 54 */ 55 static int px_attach(dev_info_t *dip, ddi_attach_cmd_t cmd); 56 static int px_detach(dev_info_t *dip, ddi_detach_cmd_t cmd); 57 static int px_info(dev_info_t *dip, ddi_info_cmd_t infocmd, 58 void *arg, void **result); 59 static int px_cb_attach(px_t *); 60 static void px_cb_detach(px_t *); 61 static int px_pwr_setup(dev_info_t *dip); 62 static void px_pwr_teardown(dev_info_t *dip); 63 64 extern errorq_t *pci_target_queue; 65 66 /* 67 * function prototypes for hotplug routines: 68 */ 69 static int px_init_hotplug(px_t *px_p); 70 static int px_uninit_hotplug(dev_info_t *dip); 71 72 /* 73 * bus ops and dev ops structures: 74 */ 75 static struct bus_ops px_bus_ops = { 76 BUSO_REV, 77 px_map, 78 0, 79 0, 80 0, 81 i_ddi_map_fault, 82 px_dma_setup, 83 px_dma_allochdl, 84 px_dma_freehdl, 85 px_dma_bindhdl, 86 px_dma_unbindhdl, 87 px_lib_dma_sync, 88 px_dma_win, 89 px_dma_ctlops, 90 px_ctlops, 91 ddi_bus_prop_op, 92 ndi_busop_get_eventcookie, 93 ndi_busop_add_eventcall, 94 ndi_busop_remove_eventcall, 95 ndi_post_event, 96 NULL, 97 NULL, /* (*bus_config)(); */ 98 NULL, /* (*bus_unconfig)(); */ 99 px_fm_init_child, /* (*bus_fm_init)(); */ 100 NULL, /* (*bus_fm_fini)(); */ 101 px_bus_enter, /* (*bus_fm_access_enter)(); */ 102 px_bus_exit, /* (*bus_fm_access_fini)(); */ 103 pcie_bus_power, /* (*bus_power)(); */ 104 px_intr_ops /* (*bus_intr_op)(); */ 105 }; 106 107 extern struct cb_ops px_cb_ops; 108 109 static struct dev_ops px_ops = { 110 DEVO_REV, 111 0, 112 px_info, 113 nulldev, 114 0, 115 px_attach, 116 px_detach, 117 nodev, 118 &px_cb_ops, 119 &px_bus_ops, 120 nulldev 121 }; 122 123 /* 124 * module definitions: 125 */ 126 #include <sys/modctl.h> 127 extern struct mod_ops mod_driverops; 128 129 static struct modldrv modldrv = { 130 &mod_driverops, /* Type of module - driver */ 131 "PCI Express nexus driver %I%", /* Name of module. */ 132 &px_ops, /* driver ops */ 133 }; 134 135 static struct modlinkage modlinkage = { 136 MODREV_1, (void *)&modldrv, NULL 137 }; 138 139 /* driver soft state */ 140 void *px_state_p; 141 142 int 143 _init(void) 144 { 145 int e; 146 147 /* 148 * Initialize per-px bus soft state pointer. 149 */ 150 e = ddi_soft_state_init(&px_state_p, sizeof (px_t), 1); 151 if (e != DDI_SUCCESS) 152 return (e); 153 154 /* 155 * Install the module. 156 */ 157 e = mod_install(&modlinkage); 158 if (e != DDI_SUCCESS) 159 ddi_soft_state_fini(&px_state_p); 160 return (e); 161 } 162 163 int 164 _fini(void) 165 { 166 int e; 167 168 /* 169 * Remove the module. 170 */ 171 e = mod_remove(&modlinkage); 172 if (e != DDI_SUCCESS) 173 return (e); 174 /* 175 * Destroy pci_target_queue, and set it to NULL. 176 */ 177 if (pci_target_queue) 178 errorq_destroy(pci_target_queue); 179 180 pci_target_queue = NULL; 181 182 /* Free px soft state */ 183 ddi_soft_state_fini(&px_state_p); 184 185 return (e); 186 } 187 188 int 189 _info(struct modinfo *modinfop) 190 { 191 return (mod_info(&modlinkage, modinfop)); 192 } 193 194 /* ARGSUSED */ 195 static int 196 px_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 197 { 198 int instance = getminor((dev_t)arg); 199 px_t *px_p = INST_TO_STATE(instance); 200 201 /* 202 * Allow hotplug to deal with ones it manages 203 * Hot Plug will be done later. 204 */ 205 if (px_p && (px_p->px_dev_caps & PX_HOTPLUG_CAPABLE)) 206 return (pcihp_info(dip, infocmd, arg, result)); 207 208 /* non-hotplug or not attached */ 209 switch (infocmd) { 210 case DDI_INFO_DEVT2INSTANCE: 211 *result = (void *)(intptr_t)instance; 212 return (DDI_SUCCESS); 213 214 case DDI_INFO_DEVT2DEVINFO: 215 if (px_p == NULL) 216 return (DDI_FAILURE); 217 *result = (void *)px_p->px_dip; 218 return (DDI_SUCCESS); 219 220 default: 221 return (DDI_FAILURE); 222 } 223 } 224 225 /* device driver entry points */ 226 /* 227 * attach entry point: 228 */ 229 /*ARGSUSED*/ 230 static int 231 px_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 232 { 233 px_t *px_p; /* per bus state pointer */ 234 int instance = DIP_TO_INST(dip); 235 int ret = DDI_SUCCESS; 236 devhandle_t dev_hdl = NULL; 237 238 switch (cmd) { 239 case DDI_ATTACH: 240 DBG(DBG_ATTACH, dip, "DDI_ATTACH\n"); 241 242 /* 243 * Allocate and get the per-px soft state structure. 244 */ 245 if (ddi_soft_state_zalloc(px_state_p, instance) 246 != DDI_SUCCESS) { 247 cmn_err(CE_WARN, "%s%d: can't allocate px state", 248 ddi_driver_name(dip), instance); 249 goto err_bad_px_softstate; 250 } 251 px_p = INST_TO_STATE(instance); 252 px_p->px_dip = dip; 253 mutex_init(&px_p->px_mutex, NULL, MUTEX_DRIVER, NULL); 254 px_p->px_soft_state = PX_SOFT_STATE_CLOSED; 255 px_p->px_open_count = 0; 256 257 (void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, 258 "device_type", "pciex"); 259 260 /* Initialize px_dbg for high pil printing */ 261 px_dbg_attach(dip, &px_p->px_dbg_hdl); 262 263 /* 264 * Get key properties of the pci bridge node and 265 * determine it's type (psycho, schizo, etc ...). 266 */ 267 if (px_get_props(px_p, dip) == DDI_FAILURE) 268 goto err_bad_px_prop; 269 270 if (px_lib_dev_init(dip, &dev_hdl) != DDI_SUCCESS) 271 goto err_bad_dev_init; 272 273 /* Initialize device handle */ 274 px_p->px_dev_hdl = dev_hdl; 275 276 px_p->px_dq_p = (pf_data_t *) 277 kmem_zalloc(sizeof (pf_data_t) * pf_get_dq_size(), 278 KM_SLEEP); 279 280 px_p->px_dq_tail = -1; 281 282 /* 283 * Initialize interrupt block. Note that this 284 * initialize error handling for the PEC as well. 285 */ 286 if ((ret = px_ib_attach(px_p)) != DDI_SUCCESS) 287 goto err_bad_ib; 288 289 if (px_cb_attach(px_p) != DDI_SUCCESS) 290 goto err_bad_cb; 291 292 /* 293 * Start creating the modules. 294 * Note that attach() routines should 295 * register and enable their own interrupts. 296 */ 297 298 if ((px_mmu_attach(px_p)) != DDI_SUCCESS) 299 goto err_bad_mmu; 300 301 if ((px_msiq_attach(px_p)) != DDI_SUCCESS) 302 goto err_bad_msiq; 303 304 if ((px_msi_attach(px_p)) != DDI_SUCCESS) 305 goto err_bad_msi; 306 307 if ((px_pec_attach(px_p)) != DDI_SUCCESS) 308 goto err_bad_pec; 309 310 if ((px_dma_attach(px_p)) != DDI_SUCCESS) 311 goto err_bad_dma; /* nothing to uninitialize on DMA */ 312 313 if ((px_fm_attach(px_p)) != DDI_SUCCESS) 314 goto err_bad_dma; 315 316 /* 317 * All of the error handlers have been registered 318 * by now so it's time to activate the interrupt. 319 */ 320 if ((ret = px_err_add_intr(&px_p->px_fault)) != DDI_SUCCESS) 321 goto err_bad_intr; 322 323 (void) px_init_hotplug(px_p); 324 325 /* 326 * Create the "devctl" node for hotplug and pcitool support. 327 * For non-hotplug bus, we still need ":devctl" to 328 * support DEVCTL_DEVICE_* and DEVCTL_BUS_* ioctls. 329 */ 330 if (ddi_create_minor_node(dip, "devctl", S_IFCHR, 331 PCIHP_AP_MINOR_NUM(instance, PCIHP_DEVCTL_MINOR), 332 DDI_NT_NEXUS, 0) != DDI_SUCCESS) { 333 goto err_bad_devctl_node; 334 } 335 336 if (pxtool_init(dip) != DDI_SUCCESS) 337 goto err_bad_pcitool_node; 338 339 /* 340 * power management setup. Even if it fails, attach will 341 * succeed as this is a optional feature. Since we are 342 * always at full power, this is not critical. 343 */ 344 if (pwr_common_setup(dip) != DDI_SUCCESS) { 345 DBG(DBG_PWR, dip, "pwr_common_setup failed\n"); 346 } else if (px_pwr_setup(dip) != DDI_SUCCESS) { 347 DBG(DBG_PWR, dip, "px_pwr_setup failed \n"); 348 pwr_common_teardown(dip); 349 } 350 351 /* 352 * add cpr callback 353 */ 354 px_cpr_add_callb(px_p); 355 356 ddi_report_dev(dip); 357 358 px_p->px_state = PX_ATTACHED; 359 DBG(DBG_ATTACH, dip, "attach success\n"); 360 break; 361 362 err_bad_pcitool_node: 363 ddi_remove_minor_node(dip, "devctl"); 364 err_bad_devctl_node: 365 px_err_rem_intr(&px_p->px_fault); 366 err_bad_intr: 367 px_fm_detach(px_p); 368 err_bad_dma: 369 px_pec_detach(px_p); 370 err_bad_pec: 371 px_msi_detach(px_p); 372 err_bad_msi: 373 px_msiq_detach(px_p); 374 err_bad_msiq: 375 px_mmu_detach(px_p); 376 err_bad_mmu: 377 px_cb_detach(px_p); 378 err_bad_cb: 379 px_ib_detach(px_p); 380 err_bad_ib: 381 (void) px_lib_dev_fini(dip); 382 err_bad_dev_init: 383 px_free_props(px_p); 384 err_bad_px_prop: 385 px_dbg_detach(dip, &px_p->px_dbg_hdl); 386 mutex_destroy(&px_p->px_mutex); 387 ddi_soft_state_free(px_state_p, instance); 388 err_bad_px_softstate: 389 ret = DDI_FAILURE; 390 break; 391 392 case DDI_RESUME: 393 DBG(DBG_ATTACH, dip, "DDI_RESUME\n"); 394 395 px_p = INST_TO_STATE(instance); 396 397 mutex_enter(&px_p->px_mutex); 398 399 /* suspend might have not succeeded */ 400 if (px_p->px_state != PX_SUSPENDED) { 401 DBG(DBG_ATTACH, px_p->px_dip, 402 "instance NOT suspended\n"); 403 ret = DDI_FAILURE; 404 break; 405 } 406 407 px_msiq_resume(px_p); 408 px_lib_resume(dip); 409 (void) pcie_pwr_resume(dip); 410 px_p->px_state = PX_ATTACHED; 411 412 mutex_exit(&px_p->px_mutex); 413 414 break; 415 default: 416 DBG(DBG_ATTACH, dip, "unsupported attach op\n"); 417 ret = DDI_FAILURE; 418 break; 419 } 420 421 return (ret); 422 } 423 424 /* 425 * detach entry point: 426 */ 427 /*ARGSUSED*/ 428 static int 429 px_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 430 { 431 int instance = ddi_get_instance(dip); 432 px_t *px_p = INST_TO_STATE(instance); 433 int ret; 434 435 /* 436 * Make sure we are currently attached 437 */ 438 if (px_p->px_state != PX_ATTACHED) { 439 DBG(DBG_DETACH, dip, "Instance not attached\n"); 440 return (DDI_FAILURE); 441 } 442 443 mutex_enter(&px_p->px_mutex); 444 445 switch (cmd) { 446 case DDI_DETACH: 447 DBG(DBG_DETACH, dip, "DDI_DETACH\n"); 448 449 /* 450 * remove cpr callback 451 */ 452 px_cpr_rem_callb(px_p); 453 454 if (px_p->px_dev_caps & PX_HOTPLUG_CAPABLE) 455 if (px_uninit_hotplug(dip) != DDI_SUCCESS) { 456 mutex_exit(&px_p->px_mutex); 457 return (DDI_FAILURE); 458 } 459 460 /* 461 * things which used to be done in obj_destroy 462 * are now in-lined here. 463 */ 464 465 px_p->px_state = PX_DETACHED; 466 467 pxtool_uninit(dip); 468 469 ddi_remove_minor_node(dip, "devctl"); 470 px_err_rem_intr(&px_p->px_fault); 471 px_fm_detach(px_p); 472 px_pec_detach(px_p); 473 px_pwr_teardown(dip); 474 pwr_common_teardown(dip); 475 px_msi_detach(px_p); 476 px_msiq_detach(px_p); 477 px_mmu_detach(px_p); 478 px_cb_detach(px_p); 479 px_ib_detach(px_p); 480 (void) px_lib_dev_fini(dip); 481 482 kmem_free(px_p->px_dq_p, sizeof (pf_data_t) * 483 pf_get_dq_size()); 484 485 /* 486 * Free the px soft state structure and the rest of the 487 * resources it's using. 488 */ 489 px_free_props(px_p); 490 px_dbg_detach(dip, &px_p->px_dbg_hdl); 491 mutex_exit(&px_p->px_mutex); 492 mutex_destroy(&px_p->px_mutex); 493 494 /* Free the interrupt-priorities prop if we created it. */ { 495 int len; 496 497 if (ddi_getproplen(DDI_DEV_T_ANY, dip, 498 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 499 "interrupt-priorities", &len) == DDI_PROP_SUCCESS) 500 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, 501 "interrupt-priorities"); 502 } 503 504 px_p->px_dev_hdl = NULL; 505 ddi_soft_state_free(px_state_p, instance); 506 507 return (DDI_SUCCESS); 508 509 case DDI_SUSPEND: 510 if (pcie_pwr_suspend(dip) != DDI_SUCCESS) { 511 mutex_exit(&px_p->px_mutex); 512 return (DDI_FAILURE); 513 } 514 if ((ret = px_lib_suspend(dip)) == DDI_SUCCESS) 515 px_p->px_state = PX_SUSPENDED; 516 mutex_exit(&px_p->px_mutex); 517 518 return (ret); 519 520 default: 521 DBG(DBG_DETACH, dip, "unsupported detach op\n"); 522 mutex_exit(&px_p->px_mutex); 523 return (DDI_FAILURE); 524 } 525 } 526 527 int 528 px_cb_attach(px_t *px_p) 529 { 530 px_fault_t *fault_p = &px_p->px_cb_fault; 531 dev_info_t *dip = px_p->px_dip; 532 sysino_t sysino; 533 534 if (px_lib_intr_devino_to_sysino(dip, 535 px_p->px_inos[PX_INTR_XBC], &sysino) != DDI_SUCCESS) 536 return (DDI_FAILURE); 537 538 fault_p->px_fh_dip = dip; 539 fault_p->px_fh_sysino = sysino; 540 fault_p->px_err_func = px_err_cb_intr; 541 fault_p->px_intr_ino = px_p->px_inos[PX_INTR_XBC]; 542 543 return (px_cb_add_intr(fault_p)); 544 } 545 546 void 547 px_cb_detach(px_t *px_p) 548 { 549 px_cb_rem_intr(&px_p->px_cb_fault); 550 } 551 552 /* 553 * power management related initialization specific to px 554 * called by px_attach() 555 */ 556 static int 557 px_pwr_setup(dev_info_t *dip) 558 { 559 pcie_pwr_t *pwr_p; 560 int instance = ddi_get_instance(dip); 561 px_t *px_p = INST_TO_STATE(instance); 562 ddi_intr_handle_impl_t hdl; 563 564 ASSERT(PCIE_PMINFO(dip)); 565 pwr_p = PCIE_NEXUS_PMINFO(dip); 566 ASSERT(pwr_p); 567 568 /* 569 * indicate support LDI (Layered Driver Interface) 570 * Create the property, if it is not already there 571 */ 572 if (!ddi_prop_exists(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS, 573 DDI_KERNEL_IOCTL)) { 574 if (ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, 575 DDI_KERNEL_IOCTL, NULL, 0) != DDI_PROP_SUCCESS) { 576 DBG(DBG_PWR, dip, "can't create kernel ioctl prop\n"); 577 return (DDI_FAILURE); 578 } 579 } 580 /* No support for device PM. We are always at full power */ 581 pwr_p->pwr_func_lvl = PM_LEVEL_D0; 582 583 mutex_init(&px_p->px_l23ready_lock, NULL, MUTEX_DRIVER, 584 DDI_INTR_PRI(px_pwr_pil)); 585 cv_init(&px_p->px_l23ready_cv, NULL, CV_DRIVER, NULL); 586 587 /* Initialize handle */ 588 bzero(&hdl, sizeof (ddi_intr_handle_impl_t)); 589 hdl.ih_cb_arg1 = px_p; 590 hdl.ih_ver = DDI_INTR_VERSION; 591 hdl.ih_state = DDI_IHDL_STATE_ALLOC; 592 hdl.ih_dip = dip; 593 hdl.ih_pri = px_pwr_pil; 594 595 /* Add PME_TO_ACK message handler */ 596 hdl.ih_cb_func = (ddi_intr_handler_t *)px_pmeq_intr; 597 if (px_add_msiq_intr(dip, dip, &hdl, MSG_REC, 598 (msgcode_t)PCIE_PME_ACK_MSG, &px_p->px_pm_msiq_id) != DDI_SUCCESS) { 599 DBG(DBG_PWR, dip, "px_pwr_setup: couldn't add " 600 " PME_TO_ACK intr\n"); 601 goto pwr_setup_err1; 602 } 603 px_lib_msg_setmsiq(dip, PCIE_PME_ACK_MSG, px_p->px_pm_msiq_id); 604 px_lib_msg_setvalid(dip, PCIE_PME_ACK_MSG, PCIE_MSG_VALID); 605 606 if (px_ib_update_intr_state(px_p, px_p->px_dip, hdl.ih_inum, 607 px_msiqid_to_devino(px_p, px_p->px_pm_msiq_id), px_pwr_pil, 608 PX_INTR_STATE_ENABLE, MSG_REC, PCIE_PME_ACK_MSG) != DDI_SUCCESS) { 609 DBG(DBG_PWR, dip, "px_pwr_setup: PME_TO_ACK update interrupt" 610 " state failed\n"); 611 goto px_pwrsetup_err_state; 612 } 613 614 return (DDI_SUCCESS); 615 616 px_pwrsetup_err_state: 617 px_lib_msg_setvalid(dip, PCIE_PME_ACK_MSG, PCIE_MSG_INVALID); 618 (void) px_rem_msiq_intr(dip, dip, &hdl, MSG_REC, PCIE_PME_ACK_MSG, 619 px_p->px_pm_msiq_id); 620 pwr_setup_err1: 621 mutex_destroy(&px_p->px_l23ready_lock); 622 cv_destroy(&px_p->px_l23ready_cv); 623 624 return (DDI_FAILURE); 625 } 626 627 /* 628 * undo whatever is done in px_pwr_setup. called by px_detach() 629 */ 630 static void 631 px_pwr_teardown(dev_info_t *dip) 632 { 633 int instance = ddi_get_instance(dip); 634 px_t *px_p = INST_TO_STATE(instance); 635 ddi_intr_handle_impl_t hdl; 636 637 if (!PCIE_PMINFO(dip) || !PCIE_NEXUS_PMINFO(dip)) 638 return; 639 640 /* Initialize handle */ 641 bzero(&hdl, sizeof (ddi_intr_handle_impl_t)); 642 hdl.ih_ver = DDI_INTR_VERSION; 643 hdl.ih_state = DDI_IHDL_STATE_ALLOC; 644 hdl.ih_dip = dip; 645 hdl.ih_pri = px_pwr_pil; 646 647 px_lib_msg_setvalid(dip, PCIE_PME_ACK_MSG, PCIE_MSG_INVALID); 648 (void) px_rem_msiq_intr(dip, dip, &hdl, MSG_REC, PCIE_PME_ACK_MSG, 649 px_p->px_pm_msiq_id); 650 651 (void) px_ib_update_intr_state(px_p, px_p->px_dip, hdl.ih_inum, 652 px_msiqid_to_devino(px_p, px_p->px_pm_msiq_id), px_pwr_pil, 653 PX_INTR_STATE_DISABLE, MSG_REC, PCIE_PME_ACK_MSG); 654 655 px_p->px_pm_msiq_id = (msiqid_t)-1; 656 657 cv_destroy(&px_p->px_l23ready_cv); 658 mutex_destroy(&px_p->px_l23ready_lock); 659 } 660 661 /* bus driver entry points */ 662 663 /* 664 * bus map entry point: 665 * 666 * if map request is for an rnumber 667 * get the corresponding regspec from device node 668 * build a new regspec in our parent's format 669 * build a new map_req with the new regspec 670 * call up the tree to complete the mapping 671 */ 672 int 673 px_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp, 674 off_t off, off_t len, caddr_t *addrp) 675 { 676 px_t *px_p = DIP_TO_STATE(dip); 677 struct regspec p_regspec; 678 ddi_map_req_t p_mapreq; 679 int reglen, rval, r_no; 680 pci_regspec_t reloc_reg, *rp = &reloc_reg; 681 682 DBG(DBG_MAP, dip, "rdip=%s%d:", 683 ddi_driver_name(rdip), ddi_get_instance(rdip)); 684 685 if (mp->map_flags & DDI_MF_USER_MAPPING) 686 return (DDI_ME_UNIMPLEMENTED); 687 688 switch (mp->map_type) { 689 case DDI_MT_REGSPEC: 690 reloc_reg = *(pci_regspec_t *)mp->map_obj.rp; /* dup whole */ 691 break; 692 693 case DDI_MT_RNUMBER: 694 r_no = mp->map_obj.rnumber; 695 DBG(DBG_MAP | DBG_CONT, dip, " r#=%x", r_no); 696 697 if (ddi_getlongprop(DDI_DEV_T_ANY, rdip, DDI_PROP_DONTPASS, 698 "reg", (caddr_t)&rp, ®len) != DDI_SUCCESS) 699 return (DDI_ME_RNUMBER_RANGE); 700 701 if (r_no < 0 || r_no >= reglen / sizeof (pci_regspec_t)) { 702 kmem_free(rp, reglen); 703 return (DDI_ME_RNUMBER_RANGE); 704 } 705 rp += r_no; 706 break; 707 708 default: 709 return (DDI_ME_INVAL); 710 } 711 DBG(DBG_MAP | DBG_CONT, dip, "\n"); 712 713 if ((rp->pci_phys_hi & PCI_REG_ADDR_M) == PCI_ADDR_CONFIG) { 714 /* 715 * There may be a need to differentiate between PCI 716 * and PCI-Ex devices so the following range check is 717 * done correctly, depending on the implementation of 718 * px_pci bridge nexus driver. 719 */ 720 if ((off >= PCIE_CONF_HDR_SIZE) || 721 (len > PCIE_CONF_HDR_SIZE) || 722 (off + len > PCIE_CONF_HDR_SIZE)) 723 return (DDI_ME_INVAL); 724 /* 725 * the following function returning a DDI_FAILURE assumes 726 * that there are no virtual config space access services 727 * defined in this layer. Otherwise it is availed right 728 * here and we return. 729 */ 730 rval = px_lib_map_vconfig(dip, mp, off, rp, addrp); 731 if (rval == DDI_SUCCESS) 732 goto done; 733 } 734 735 /* 736 * No virtual config space services or we are mapping 737 * a region of memory mapped config/IO/memory space, so proceed 738 * to the parent. 739 */ 740 741 /* relocate within 64-bit pci space through "assigned-addresses" */ 742 if (rval = px_reloc_reg(dip, rdip, px_p, rp)) 743 goto done; 744 745 if (len) /* adjust regspec according to mapping request */ 746 rp->pci_size_low = len; /* MIN ? */ 747 rp->pci_phys_low += off; 748 749 /* translate relocated pci regspec into parent space through "ranges" */ 750 if (rval = px_xlate_reg(px_p, rp, &p_regspec)) 751 goto done; 752 753 p_mapreq = *mp; /* dup the whole structure */ 754 p_mapreq.map_type = DDI_MT_REGSPEC; 755 p_mapreq.map_obj.rp = &p_regspec; 756 px_lib_map_attr_check(&p_mapreq); 757 rval = ddi_map(dip, &p_mapreq, 0, 0, addrp); 758 759 if (rval == DDI_SUCCESS) { 760 /* 761 * Set-up access functions for FM access error capable drivers. 762 */ 763 if (DDI_FM_ACC_ERR_CAP(ddi_fm_capable(rdip)) && 764 mp->map_handlep->ah_acc.devacc_attr_access != 765 DDI_DEFAULT_ACC) 766 px_fm_acc_setup(mp, rdip); 767 } 768 769 done: 770 if (mp->map_type == DDI_MT_RNUMBER) 771 kmem_free(rp - r_no, reglen); 772 773 return (rval); 774 } 775 776 /* 777 * bus dma map entry point 778 * return value: 779 * DDI_DMA_PARTIAL_MAP 1 780 * DDI_DMA_MAPOK 0 781 * DDI_DMA_MAPPED 0 782 * DDI_DMA_NORESOURCES -1 783 * DDI_DMA_NOMAPPING -2 784 * DDI_DMA_TOOBIG -3 785 */ 786 int 787 px_dma_setup(dev_info_t *dip, dev_info_t *rdip, ddi_dma_req_t *dmareq, 788 ddi_dma_handle_t *handlep) 789 { 790 px_t *px_p = DIP_TO_STATE(dip); 791 px_mmu_t *mmu_p = px_p->px_mmu_p; 792 ddi_dma_impl_t *mp; 793 int ret; 794 795 DBG(DBG_DMA_MAP, dip, "mapping - rdip=%s%d type=%s\n", 796 ddi_driver_name(rdip), ddi_get_instance(rdip), 797 handlep ? "alloc" : "advisory"); 798 799 if (!(mp = px_dma_lmts2hdl(dip, rdip, mmu_p, dmareq))) 800 return (DDI_DMA_NORESOURCES); 801 if (mp == (ddi_dma_impl_t *)DDI_DMA_NOMAPPING) 802 return (DDI_DMA_NOMAPPING); 803 if (ret = px_dma_type(px_p, dmareq, mp)) 804 goto freehandle; 805 if (ret = px_dma_pfn(px_p, dmareq, mp)) 806 goto freehandle; 807 808 switch (PX_DMA_TYPE(mp)) { 809 case PX_DMAI_FLAGS_DVMA: /* LINTED E_EQUALITY_NOT_ASSIGNMENT */ 810 if ((ret = px_dvma_win(px_p, dmareq, mp)) || !handlep) 811 goto freehandle; 812 if (!PX_DMA_CANCACHE(mp)) { /* try fast track */ 813 if (PX_DMA_CANFAST(mp)) { 814 if (!px_dvma_map_fast(mmu_p, mp)) 815 break; 816 /* LINTED E_NOP_ELSE_STMT */ 817 } else { 818 PX_DVMA_FASTTRAK_PROF(mp); 819 } 820 } 821 if (ret = px_dvma_map(mp, dmareq, mmu_p)) 822 goto freehandle; 823 break; 824 case PX_DMAI_FLAGS_PTP: /* LINTED E_EQUALITY_NOT_ASSIGNMENT */ 825 if ((ret = px_dma_physwin(px_p, dmareq, mp)) || !handlep) 826 goto freehandle; 827 break; 828 case PX_DMAI_FLAGS_BYPASS: 829 default: 830 cmn_err(CE_PANIC, "%s%d: px_dma_setup: bad dma type 0x%x", 831 ddi_driver_name(rdip), ddi_get_instance(rdip), 832 PX_DMA_TYPE(mp)); 833 /*NOTREACHED*/ 834 } 835 *handlep = (ddi_dma_handle_t)mp; 836 mp->dmai_flags |= PX_DMAI_FLAGS_INUSE; 837 px_dump_dma_handle(DBG_DMA_MAP, dip, mp); 838 839 return ((mp->dmai_nwin == 1) ? DDI_DMA_MAPPED : DDI_DMA_PARTIAL_MAP); 840 freehandle: 841 if (ret == DDI_DMA_NORESOURCES) 842 px_dma_freemp(mp); /* don't run_callback() */ 843 else 844 (void) px_dma_freehdl(dip, rdip, (ddi_dma_handle_t)mp); 845 return (ret); 846 } 847 848 849 /* 850 * bus dma alloc handle entry point: 851 */ 852 int 853 px_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attrp, 854 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep) 855 { 856 px_t *px_p = DIP_TO_STATE(dip); 857 ddi_dma_impl_t *mp; 858 int rval; 859 860 DBG(DBG_DMA_ALLOCH, dip, "rdip=%s%d\n", 861 ddi_driver_name(rdip), ddi_get_instance(rdip)); 862 863 if (attrp->dma_attr_version != DMA_ATTR_V0) 864 return (DDI_DMA_BADATTR); 865 866 if (!(mp = px_dma_allocmp(dip, rdip, waitfp, arg))) 867 return (DDI_DMA_NORESOURCES); 868 869 /* 870 * Save requestor's information 871 */ 872 mp->dmai_attr = *attrp; /* whole object - augmented later */ 873 *PX_DEV_ATTR(mp) = *attrp; /* whole object - device orig attr */ 874 DBG(DBG_DMA_ALLOCH, dip, "mp=%p\n", mp); 875 876 /* check and convert dma attributes to handle parameters */ 877 if (rval = px_dma_attr2hdl(px_p, mp)) { 878 px_dma_freehdl(dip, rdip, (ddi_dma_handle_t)mp); 879 *handlep = NULL; 880 return (rval); 881 } 882 *handlep = (ddi_dma_handle_t)mp; 883 return (DDI_SUCCESS); 884 } 885 886 887 /* 888 * bus dma free handle entry point: 889 */ 890 /*ARGSUSED*/ 891 int 892 px_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle) 893 { 894 DBG(DBG_DMA_FREEH, dip, "rdip=%s%d mp=%p\n", 895 ddi_driver_name(rdip), ddi_get_instance(rdip), handle); 896 px_dma_freemp((ddi_dma_impl_t *)handle); 897 898 if (px_kmem_clid) { 899 DBG(DBG_DMA_FREEH, dip, "run handle callback\n"); 900 ddi_run_callback(&px_kmem_clid); 901 } 902 return (DDI_SUCCESS); 903 } 904 905 906 /* 907 * bus dma bind handle entry point: 908 */ 909 int 910 px_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip, 911 ddi_dma_handle_t handle, ddi_dma_req_t *dmareq, 912 ddi_dma_cookie_t *cookiep, uint_t *ccountp) 913 { 914 px_t *px_p = DIP_TO_STATE(dip); 915 px_mmu_t *mmu_p = px_p->px_mmu_p; 916 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 917 int ret; 918 919 DBG(DBG_DMA_BINDH, dip, "rdip=%s%d mp=%p dmareq=%p\n", 920 ddi_driver_name(rdip), ddi_get_instance(rdip), mp, dmareq); 921 922 if (mp->dmai_flags & PX_DMAI_FLAGS_INUSE) 923 return (DDI_DMA_INUSE); 924 925 ASSERT((mp->dmai_flags & ~PX_DMAI_FLAGS_PRESERVE) == 0); 926 mp->dmai_flags |= PX_DMAI_FLAGS_INUSE; 927 928 if (ret = px_dma_type(px_p, dmareq, mp)) 929 goto err; 930 if (ret = px_dma_pfn(px_p, dmareq, mp)) 931 goto err; 932 933 switch (PX_DMA_TYPE(mp)) { 934 case PX_DMAI_FLAGS_DVMA: 935 if (ret = px_dvma_win(px_p, dmareq, mp)) 936 goto map_err; 937 if (!PX_DMA_CANCACHE(mp)) { /* try fast track */ 938 if (PX_DMA_CANFAST(mp)) { 939 if (!px_dvma_map_fast(mmu_p, mp)) 940 goto mapped; /*LINTED E_NOP_ELSE_STMT*/ 941 } else { 942 PX_DVMA_FASTTRAK_PROF(mp); 943 } 944 } 945 if (ret = px_dvma_map(mp, dmareq, mmu_p)) 946 goto map_err; 947 mapped: 948 *ccountp = 1; 949 MAKE_DMA_COOKIE(cookiep, mp->dmai_mapping, mp->dmai_size); 950 break; 951 case PX_DMAI_FLAGS_BYPASS: 952 case PX_DMAI_FLAGS_PTP: 953 if (ret = px_dma_physwin(px_p, dmareq, mp)) 954 goto map_err; 955 *ccountp = PX_WINLST(mp)->win_ncookies; 956 *cookiep = 957 *(ddi_dma_cookie_t *)(PX_WINLST(mp) + 1); /* wholeobj */ 958 break; 959 default: 960 cmn_err(CE_PANIC, "%s%d: px_dma_bindhdl(%p): bad dma type", 961 ddi_driver_name(rdip), ddi_get_instance(rdip), mp); 962 /*NOTREACHED*/ 963 } 964 DBG(DBG_DMA_BINDH, dip, "cookie %" PRIx64 "+%x\n", 965 cookiep->dmac_address, cookiep->dmac_size); 966 px_dump_dma_handle(DBG_DMA_MAP, dip, mp); 967 968 /* insert dma handle into FMA cache */ 969 if (mp->dmai_attr.dma_attr_flags & DDI_DMA_FLAGERR) { 970 (void) ndi_fmc_insert(rdip, DMA_HANDLE, mp, NULL); 971 mp->dmai_error.err_cf = impl_dma_check; 972 } 973 974 return (mp->dmai_nwin == 1 ? DDI_DMA_MAPPED : DDI_DMA_PARTIAL_MAP); 975 map_err: 976 px_dma_freepfn(mp); 977 err: 978 mp->dmai_flags &= PX_DMAI_FLAGS_PRESERVE; 979 return (ret); 980 } 981 982 983 /* 984 * bus dma unbind handle entry point: 985 */ 986 /*ARGSUSED*/ 987 int 988 px_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle) 989 { 990 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 991 px_t *px_p = DIP_TO_STATE(dip); 992 px_mmu_t *mmu_p = px_p->px_mmu_p; 993 994 DBG(DBG_DMA_UNBINDH, dip, "rdip=%s%d, mp=%p\n", 995 ddi_driver_name(rdip), ddi_get_instance(rdip), handle); 996 if ((mp->dmai_flags & PX_DMAI_FLAGS_INUSE) == 0) { 997 DBG(DBG_DMA_UNBINDH, dip, "handle not inuse\n"); 998 return (DDI_FAILURE); 999 } 1000 1001 /* remove dma handle from FMA cache */ 1002 if (mp->dmai_attr.dma_attr_flags & DDI_DMA_FLAGERR) { 1003 if (DEVI(rdip)->devi_fmhdl != NULL && 1004 DDI_FM_DMA_ERR_CAP(DEVI(rdip)->devi_fmhdl->fh_cap)) { 1005 (void) ndi_fmc_remove(rdip, DMA_HANDLE, mp); 1006 } 1007 } 1008 1009 /* 1010 * Here if the handle is using the iommu. Unload all the iommu 1011 * translations. 1012 */ 1013 switch (PX_DMA_TYPE(mp)) { 1014 case PX_DMAI_FLAGS_DVMA: 1015 px_mmu_unmap_window(mmu_p, mp); 1016 px_dvma_unmap(mmu_p, mp); 1017 px_dma_freepfn(mp); 1018 break; 1019 case PX_DMAI_FLAGS_BYPASS: 1020 case PX_DMAI_FLAGS_PTP: 1021 px_dma_freewin(mp); 1022 break; 1023 default: 1024 cmn_err(CE_PANIC, "%s%d: px_dma_unbindhdl:bad dma type %p", 1025 ddi_driver_name(rdip), ddi_get_instance(rdip), mp); 1026 /*NOTREACHED*/ 1027 } 1028 if (mmu_p->mmu_dvma_clid != 0) { 1029 DBG(DBG_DMA_UNBINDH, dip, "run dvma callback\n"); 1030 ddi_run_callback(&mmu_p->mmu_dvma_clid); 1031 } 1032 if (px_kmem_clid) { 1033 DBG(DBG_DMA_UNBINDH, dip, "run handle callback\n"); 1034 ddi_run_callback(&px_kmem_clid); 1035 } 1036 mp->dmai_flags &= PX_DMAI_FLAGS_PRESERVE; 1037 1038 return (DDI_SUCCESS); 1039 } 1040 1041 /* 1042 * bus dma win entry point: 1043 */ 1044 int 1045 px_dma_win(dev_info_t *dip, dev_info_t *rdip, 1046 ddi_dma_handle_t handle, uint_t win, off_t *offp, 1047 size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp) 1048 { 1049 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 1050 int ret; 1051 1052 DBG(DBG_DMA_WIN, dip, "rdip=%s%d\n", 1053 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1054 1055 px_dump_dma_handle(DBG_DMA_WIN, dip, mp); 1056 if (win >= mp->dmai_nwin) { 1057 DBG(DBG_DMA_WIN, dip, "%x out of range\n", win); 1058 return (DDI_FAILURE); 1059 } 1060 1061 switch (PX_DMA_TYPE(mp)) { 1062 case PX_DMAI_FLAGS_DVMA: 1063 if (win != PX_DMA_CURWIN(mp)) { 1064 px_t *px_p = DIP_TO_STATE(dip); 1065 px_mmu_t *mmu_p = px_p->px_mmu_p; 1066 px_mmu_unmap_window(mmu_p, mp); 1067 1068 /* map_window sets dmai_mapping/size/offset */ 1069 px_mmu_map_window(mmu_p, mp, win); 1070 if ((ret = px_mmu_map_window(mmu_p, 1071 mp, win)) != DDI_SUCCESS) 1072 return (ret); 1073 } 1074 if (cookiep) 1075 MAKE_DMA_COOKIE(cookiep, mp->dmai_mapping, 1076 mp->dmai_size); 1077 if (ccountp) 1078 *ccountp = 1; 1079 break; 1080 case PX_DMAI_FLAGS_PTP: 1081 case PX_DMAI_FLAGS_BYPASS: { 1082 int i; 1083 ddi_dma_cookie_t *ck_p; 1084 px_dma_win_t *win_p = mp->dmai_winlst; 1085 1086 for (i = 0; i < win; win_p = win_p->win_next, i++); 1087 ck_p = (ddi_dma_cookie_t *)(win_p + 1); 1088 *cookiep = *ck_p; 1089 mp->dmai_offset = win_p->win_offset; 1090 mp->dmai_size = win_p->win_size; 1091 mp->dmai_mapping = ck_p->dmac_laddress; 1092 mp->dmai_cookie = ck_p + 1; 1093 win_p->win_curseg = 0; 1094 if (ccountp) 1095 *ccountp = win_p->win_ncookies; 1096 } 1097 break; 1098 default: 1099 cmn_err(CE_WARN, "%s%d: px_dma_win:bad dma type 0x%x", 1100 ddi_driver_name(rdip), ddi_get_instance(rdip), 1101 PX_DMA_TYPE(mp)); 1102 return (DDI_FAILURE); 1103 } 1104 if (cookiep) 1105 DBG(DBG_DMA_WIN, dip, 1106 "cookie - dmac_address=%x dmac_size=%x\n", 1107 cookiep->dmac_address, cookiep->dmac_size); 1108 if (offp) 1109 *offp = (off_t)mp->dmai_offset; 1110 if (lenp) 1111 *lenp = mp->dmai_size; 1112 return (DDI_SUCCESS); 1113 } 1114 1115 #ifdef DEBUG 1116 static char *px_dmactl_str[] = { 1117 "DDI_DMA_FREE", 1118 "DDI_DMA_SYNC", 1119 "DDI_DMA_HTOC", 1120 "DDI_DMA_KVADDR", 1121 "DDI_DMA_MOVWIN", 1122 "DDI_DMA_REPWIN", 1123 "DDI_DMA_GETERR", 1124 "DDI_DMA_COFF", 1125 "DDI_DMA_NEXTWIN", 1126 "DDI_DMA_NEXTSEG", 1127 "DDI_DMA_SEGTOC", 1128 "DDI_DMA_RESERVE", 1129 "DDI_DMA_RELEASE", 1130 "DDI_DMA_RESETH", 1131 "DDI_DMA_CKSYNC", 1132 "DDI_DMA_IOPB_ALLOC", 1133 "DDI_DMA_IOPB_FREE", 1134 "DDI_DMA_SMEM_ALLOC", 1135 "DDI_DMA_SMEM_FREE", 1136 "DDI_DMA_SET_SBUS64" 1137 }; 1138 #endif /* DEBUG */ 1139 1140 /* 1141 * bus dma control entry point: 1142 */ 1143 /*ARGSUSED*/ 1144 int 1145 px_dma_ctlops(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle, 1146 enum ddi_dma_ctlops cmd, off_t *offp, size_t *lenp, caddr_t *objp, 1147 uint_t cache_flags) 1148 { 1149 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 1150 1151 #ifdef DEBUG 1152 DBG(DBG_DMA_CTL, dip, "%s: rdip=%s%d\n", px_dmactl_str[cmd], 1153 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1154 #endif /* DEBUG */ 1155 1156 switch (cmd) { 1157 case DDI_DMA_FREE: 1158 (void) px_dma_unbindhdl(dip, rdip, handle); 1159 (void) px_dma_freehdl(dip, rdip, handle); 1160 return (DDI_SUCCESS); 1161 case DDI_DMA_RESERVE: { 1162 px_t *px_p = DIP_TO_STATE(dip); 1163 return (px_fdvma_reserve(dip, rdip, px_p, 1164 (ddi_dma_req_t *)offp, (ddi_dma_handle_t *)objp)); 1165 } 1166 case DDI_DMA_RELEASE: { 1167 px_t *px_p = DIP_TO_STATE(dip); 1168 return (px_fdvma_release(dip, px_p, mp)); 1169 } 1170 default: 1171 break; 1172 } 1173 1174 switch (PX_DMA_TYPE(mp)) { 1175 case PX_DMAI_FLAGS_DVMA: 1176 return (px_dvma_ctl(dip, rdip, mp, cmd, offp, lenp, objp, 1177 cache_flags)); 1178 case PX_DMAI_FLAGS_PTP: 1179 case PX_DMAI_FLAGS_BYPASS: 1180 return (px_dma_ctl(dip, rdip, mp, cmd, offp, lenp, objp, 1181 cache_flags)); 1182 default: 1183 cmn_err(CE_PANIC, "%s%d: px_dma_ctlops(%x):bad dma type %x", 1184 ddi_driver_name(rdip), ddi_get_instance(rdip), cmd, 1185 mp->dmai_flags); 1186 /*NOTREACHED*/ 1187 } 1188 return (0); 1189 } 1190 1191 /* 1192 * control ops entry point: 1193 * 1194 * Requests handled completely: 1195 * DDI_CTLOPS_INITCHILD see init_child() for details 1196 * DDI_CTLOPS_UNINITCHILD 1197 * DDI_CTLOPS_REPORTDEV see report_dev() for details 1198 * DDI_CTLOPS_IOMIN cache line size if streaming otherwise 1 1199 * DDI_CTLOPS_REGSIZE 1200 * DDI_CTLOPS_NREGS 1201 * DDI_CTLOPS_DVMAPAGESIZE 1202 * DDI_CTLOPS_POKE 1203 * DDI_CTLOPS_PEEK 1204 * 1205 * All others passed to parent. 1206 */ 1207 int 1208 px_ctlops(dev_info_t *dip, dev_info_t *rdip, 1209 ddi_ctl_enum_t op, void *arg, void *result) 1210 { 1211 px_t *px_p = DIP_TO_STATE(dip); 1212 struct detachspec *ds; 1213 struct attachspec *as; 1214 1215 switch (op) { 1216 case DDI_CTLOPS_INITCHILD: 1217 return (px_init_child(px_p, (dev_info_t *)arg)); 1218 1219 case DDI_CTLOPS_UNINITCHILD: 1220 return (px_uninit_child(px_p, (dev_info_t *)arg)); 1221 1222 case DDI_CTLOPS_ATTACH: 1223 if (!pcie_is_child(dip, rdip)) 1224 return (DDI_SUCCESS); 1225 1226 as = (struct attachspec *)arg; 1227 switch (as->when) { 1228 case DDI_PRE: 1229 if (as->cmd == DDI_ATTACH) { 1230 DBG(DBG_PWR, dip, "PRE_ATTACH for %s@%d\n", 1231 ddi_driver_name(rdip), 1232 ddi_get_instance(rdip)); 1233 return (pcie_pm_hold(dip)); 1234 } 1235 if (as->cmd == DDI_RESUME) { 1236 ddi_acc_handle_t config_handle; 1237 DBG(DBG_PWR, dip, "PRE_RESUME for %s@%d\n", 1238 ddi_driver_name(rdip), 1239 ddi_get_instance(rdip)); 1240 1241 if (pci_config_setup(rdip, &config_handle) == 1242 DDI_SUCCESS) { 1243 pcie_clear_errors(rdip, config_handle); 1244 pci_config_teardown(&config_handle); 1245 } 1246 } 1247 return (DDI_SUCCESS); 1248 1249 case DDI_POST: 1250 DBG(DBG_PWR, dip, "POST_ATTACH for %s@%d\n", 1251 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1252 if (as->cmd == DDI_ATTACH && as->result != DDI_SUCCESS) 1253 pcie_pm_release(dip); 1254 1255 pf_init(rdip, (void *)px_p->px_fm_ibc); 1256 1257 (void) pcie_postattach_child(rdip); 1258 1259 return (DDI_SUCCESS); 1260 default: 1261 break; 1262 } 1263 break; 1264 1265 case DDI_CTLOPS_DETACH: 1266 ds = (struct detachspec *)arg; 1267 switch (ds->when) { 1268 case DDI_POST: 1269 if (ds->cmd == DDI_DETACH && 1270 ds->result == DDI_SUCCESS) { 1271 DBG(DBG_PWR, dip, "POST_DETACH for %s@%d\n", 1272 ddi_driver_name(rdip), 1273 ddi_get_instance(rdip)); 1274 return (pcie_pm_remove_child(dip, rdip)); 1275 } 1276 return (DDI_SUCCESS); 1277 case DDI_PRE: 1278 pf_fini(rdip); 1279 return (DDI_SUCCESS); 1280 default: 1281 break; 1282 } 1283 break; 1284 1285 case DDI_CTLOPS_REPORTDEV: 1286 return (px_report_dev(rdip)); 1287 1288 case DDI_CTLOPS_IOMIN: 1289 return (DDI_SUCCESS); 1290 1291 case DDI_CTLOPS_REGSIZE: 1292 *((off_t *)result) = px_get_reg_set_size(rdip, *((int *)arg)); 1293 return (*((off_t *)result) == 0 ? DDI_FAILURE : DDI_SUCCESS); 1294 1295 case DDI_CTLOPS_NREGS: 1296 *((uint_t *)result) = px_get_nreg_set(rdip); 1297 return (DDI_SUCCESS); 1298 1299 case DDI_CTLOPS_DVMAPAGESIZE: 1300 *((ulong_t *)result) = MMU_PAGE_SIZE; 1301 return (DDI_SUCCESS); 1302 1303 case DDI_CTLOPS_POKE: /* platform dependent implementation. */ 1304 return (px_lib_ctlops_poke(dip, rdip, 1305 (peekpoke_ctlops_t *)arg)); 1306 1307 case DDI_CTLOPS_PEEK: /* platform dependent implementation. */ 1308 return (px_lib_ctlops_peek(dip, rdip, 1309 (peekpoke_ctlops_t *)arg, result)); 1310 1311 case DDI_CTLOPS_POWER: 1312 default: 1313 break; 1314 } 1315 1316 /* 1317 * Now pass the request up to our parent. 1318 */ 1319 DBG(DBG_CTLOPS, dip, "passing request to parent: rdip=%s%d\n", 1320 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1321 return (ddi_ctlops(dip, rdip, op, arg, result)); 1322 } 1323 1324 /* ARGSUSED */ 1325 int 1326 px_intr_ops(dev_info_t *dip, dev_info_t *rdip, ddi_intr_op_t intr_op, 1327 ddi_intr_handle_impl_t *hdlp, void *result) 1328 { 1329 int intr_types, ret = DDI_SUCCESS; 1330 1331 DBG(DBG_INTROPS, dip, "px_intr_ops: rdip=%s%d\n", 1332 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1333 1334 /* Process DDI_INTROP_SUPPORTED_TYPES request here */ 1335 if (intr_op == DDI_INTROP_SUPPORTED_TYPES) { 1336 *(int *)result = i_ddi_get_intx_nintrs(rdip) ? 1337 DDI_INTR_TYPE_FIXED : 0; 1338 1339 if ((pci_msi_get_supported_type(rdip, 1340 &intr_types)) == DDI_SUCCESS) { 1341 /* 1342 * Double check supported interrupt types vs. 1343 * what the host bridge supports. 1344 */ 1345 *(int *)result |= intr_types; 1346 } 1347 1348 return (ret); 1349 } 1350 1351 /* 1352 * PCI-E nexus driver supports fixed, MSI and MSI-X interrupts. 1353 * Return failure if interrupt type is not supported. 1354 */ 1355 switch (hdlp->ih_type) { 1356 case DDI_INTR_TYPE_FIXED: 1357 ret = px_intx_ops(dip, rdip, intr_op, hdlp, result); 1358 break; 1359 case DDI_INTR_TYPE_MSI: 1360 case DDI_INTR_TYPE_MSIX: 1361 ret = px_msix_ops(dip, rdip, intr_op, hdlp, result); 1362 break; 1363 default: 1364 ret = DDI_ENOTSUP; 1365 break; 1366 } 1367 1368 return (ret); 1369 } 1370 1371 static int 1372 px_init_hotplug(px_t *px_p) 1373 { 1374 px_bus_range_t bus_range; 1375 dev_info_t *dip; 1376 pciehpc_regops_t regops; 1377 1378 dip = px_p->px_dip; 1379 1380 if (ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1381 "hotplug-capable") == 0) 1382 return (DDI_FAILURE); 1383 1384 /* 1385 * Before initializing hotplug - open up bus range. The busra 1386 * module will initialize its pool of bus numbers from this. 1387 * "busra" will be the agent that keeps track of them during 1388 * hotplug. Also, note, that busra will remove any bus numbers 1389 * already in use from boot time. 1390 */ 1391 if (ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1392 "bus-range") == 0) { 1393 cmn_err(CE_WARN, "%s%d: bus-range not found\n", 1394 ddi_driver_name(dip), ddi_get_instance(dip)); 1395 #ifdef DEBUG 1396 bus_range.lo = 0x0; 1397 bus_range.hi = 0xff; 1398 1399 if (ndi_prop_update_int_array(DDI_DEV_T_NONE, 1400 dip, "bus-range", (int *)&bus_range, 2) 1401 != DDI_PROP_SUCCESS) { 1402 return (DDI_FAILURE); 1403 } 1404 #else 1405 return (DDI_FAILURE); 1406 #endif 1407 } 1408 1409 if (px_lib_hotplug_init(dip, (void *)®ops) != DDI_SUCCESS) 1410 return (DDI_FAILURE); 1411 1412 if (pciehpc_init(dip, ®ops) != DDI_SUCCESS) { 1413 px_lib_hotplug_uninit(dip); 1414 return (DDI_FAILURE); 1415 } 1416 1417 if (pcihp_init(dip) != DDI_SUCCESS) { 1418 (void) pciehpc_uninit(dip); 1419 px_lib_hotplug_uninit(dip); 1420 return (DDI_FAILURE); 1421 } 1422 1423 if (pcihp_get_cb_ops() != NULL) { 1424 DBG(DBG_ATTACH, dip, "%s%d hotplug enabled", 1425 ddi_driver_name(dip), ddi_get_instance(dip)); 1426 px_p->px_dev_caps |= PX_HOTPLUG_CAPABLE; 1427 } 1428 1429 return (DDI_SUCCESS); 1430 } 1431 1432 static int 1433 px_uninit_hotplug(dev_info_t *dip) 1434 { 1435 if (pcihp_uninit(dip) != DDI_SUCCESS) 1436 return (DDI_FAILURE); 1437 1438 if (pciehpc_uninit(dip) != DDI_SUCCESS) 1439 return (DDI_FAILURE); 1440 1441 px_lib_hotplug_uninit(dip); 1442 1443 return (DDI_SUCCESS); 1444 } 1445