1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 */ 25 26 /* 27 * SPARC Host to PCI Express nexus driver 28 */ 29 30 #include <sys/types.h> 31 #include <sys/conf.h> /* nulldev */ 32 #include <sys/stat.h> /* devctl */ 33 #include <sys/kmem.h> 34 #include <sys/sunddi.h> 35 #include <sys/sunndi.h> 36 #include <sys/ddi_subrdefs.h> 37 #include <sys/spl.h> 38 #include <sys/epm.h> 39 #include <sys/iommutsb.h> 40 #include "px_obj.h" 41 #include <sys/hotplug/pci/pcie_hp.h> 42 #include <sys/pci_tools.h> 43 #include "px_tools_ext.h" 44 #include <sys/pcie_pwr.h> 45 #include <sys/pci_cfgacc.h> 46 47 /*LINTLIBRARY*/ 48 49 /* 50 * function prototypes for dev ops routines: 51 */ 52 static int px_attach(dev_info_t *dip, ddi_attach_cmd_t cmd); 53 static int px_detach(dev_info_t *dip, ddi_detach_cmd_t cmd); 54 static int px_info(dev_info_t *dip, ddi_info_cmd_t infocmd, 55 void *arg, void **result); 56 static int px_cb_attach(px_t *); 57 static void px_cb_detach(px_t *); 58 static int px_pwr_setup(dev_info_t *dip); 59 static void px_pwr_teardown(dev_info_t *dip); 60 static void px_set_mps(px_t *px_p); 61 62 extern void pci_cfgacc_acc(pci_cfgacc_req_t *); 63 extern int pcie_max_mps; 64 extern void (*pci_cfgacc_acc_p)(pci_cfgacc_req_t *); 65 /* 66 * bus ops and dev ops structures: 67 */ 68 static struct bus_ops px_bus_ops = { 69 BUSO_REV, 70 px_map, 71 0, 72 0, 73 0, 74 i_ddi_map_fault, 75 px_dma_setup, 76 px_dma_allochdl, 77 px_dma_freehdl, 78 px_dma_bindhdl, 79 px_dma_unbindhdl, 80 px_lib_dma_sync, 81 px_dma_win, 82 px_dma_ctlops, 83 px_ctlops, 84 ddi_bus_prop_op, 85 ndi_busop_get_eventcookie, 86 ndi_busop_add_eventcall, 87 ndi_busop_remove_eventcall, 88 ndi_post_event, 89 NULL, 90 NULL, /* (*bus_config)(); */ 91 NULL, /* (*bus_unconfig)(); */ 92 px_fm_init_child, /* (*bus_fm_init)(); */ 93 NULL, /* (*bus_fm_fini)(); */ 94 px_bus_enter, /* (*bus_fm_access_enter)(); */ 95 px_bus_exit, /* (*bus_fm_access_fini)(); */ 96 pcie_bus_power, /* (*bus_power)(); */ 97 px_intr_ops, /* (*bus_intr_op)(); */ 98 pcie_hp_common_ops /* (*bus_hp_op)(); */ 99 }; 100 101 extern struct cb_ops px_cb_ops; 102 103 static struct dev_ops px_ops = { 104 DEVO_REV, 105 0, 106 px_info, 107 nulldev, 108 0, 109 px_attach, 110 px_detach, 111 nodev, 112 &px_cb_ops, 113 &px_bus_ops, 114 nulldev, 115 ddi_quiesce_not_needed, /* quiesce */ 116 }; 117 118 /* 119 * module definitions: 120 */ 121 #include <sys/modctl.h> 122 extern struct mod_ops mod_driverops; 123 124 static struct modldrv modldrv = { 125 &mod_driverops, /* Type of module - driver */ 126 #if defined(sun4u) 127 "Sun4u Host to PCIe nexus driver", /* Name of module. */ 128 #elif defined(sun4v) 129 "Sun4v Host to PCIe nexus driver", /* Name of module. */ 130 #endif 131 &px_ops, /* driver ops */ 132 }; 133 134 static struct modlinkage modlinkage = { 135 MODREV_1, (void *)&modldrv, NULL 136 }; 137 138 /* driver soft state */ 139 void *px_state_p; 140 141 int px_force_intx_support = 1; 142 143 int 144 _init(void) 145 { 146 int e; 147 148 /* 149 * Initialize per-px bus soft state pointer. 150 */ 151 e = ddi_soft_state_init(&px_state_p, sizeof (px_t), 1); 152 if (e != DDI_SUCCESS) 153 return (e); 154 155 /* 156 * Install the module. 157 */ 158 e = mod_install(&modlinkage); 159 if (e != DDI_SUCCESS) 160 ddi_soft_state_fini(&px_state_p); 161 return (e); 162 } 163 164 int 165 _fini(void) 166 { 167 int e; 168 169 /* 170 * Remove the module. 171 */ 172 e = mod_remove(&modlinkage); 173 if (e != DDI_SUCCESS) 174 return (e); 175 176 /* Free px soft state */ 177 ddi_soft_state_fini(&px_state_p); 178 179 return (e); 180 } 181 182 int 183 _info(struct modinfo *modinfop) 184 { 185 return (mod_info(&modlinkage, modinfop)); 186 } 187 188 /* ARGSUSED */ 189 static int 190 px_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 191 { 192 minor_t minor = getminor((dev_t)arg); 193 int instance = PCI_MINOR_NUM_TO_INSTANCE(minor); 194 px_t *px_p = INST_TO_STATE(instance); 195 int ret = DDI_SUCCESS; 196 197 switch (infocmd) { 198 case DDI_INFO_DEVT2INSTANCE: 199 *result = (void *)(intptr_t)instance; 200 break; 201 case DDI_INFO_DEVT2DEVINFO: 202 if (px_p == NULL) { 203 ret = DDI_FAILURE; 204 break; 205 } 206 207 *result = (void *)px_p->px_dip; 208 break; 209 default: 210 ret = DDI_FAILURE; 211 break; 212 } 213 214 return (ret); 215 } 216 217 /* device driver entry points */ 218 /* 219 * attach entry point: 220 */ 221 /*ARGSUSED*/ 222 static int 223 px_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 224 { 225 px_t *px_p; /* per bus state pointer */ 226 int instance = DIP_TO_INST(dip); 227 int ret = DDI_SUCCESS; 228 devhandle_t dev_hdl = NULL; 229 pcie_hp_regops_t regops; 230 pcie_bus_t *bus_p; 231 232 switch (cmd) { 233 case DDI_ATTACH: 234 DBG(DBG_ATTACH, dip, "DDI_ATTACH\n"); 235 236 /* See pci_cfgacc.c */ 237 pci_cfgacc_acc_p = pci_cfgacc_acc; 238 239 /* 240 * Allocate and get the per-px soft state structure. 241 */ 242 if (ddi_soft_state_zalloc(px_state_p, instance) 243 != DDI_SUCCESS) { 244 cmn_err(CE_WARN, "%s%d: can't allocate px state", 245 ddi_driver_name(dip), instance); 246 goto err_bad_px_softstate; 247 } 248 px_p = INST_TO_STATE(instance); 249 px_p->px_dip = dip; 250 mutex_init(&px_p->px_mutex, NULL, MUTEX_DRIVER, NULL); 251 px_p->px_soft_state = PCI_SOFT_STATE_CLOSED; 252 253 (void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, 254 "device_type", "pciex"); 255 256 /* Initialize px_dbg for high pil printing */ 257 px_dbg_attach(dip, &px_p->px_dbg_hdl); 258 pcie_rc_init_bus(dip); 259 260 /* 261 * Get key properties of the pci bridge node and 262 * determine it's type (psycho, schizo, etc ...). 263 */ 264 if (px_get_props(px_p, dip) == DDI_FAILURE) 265 goto err_bad_px_prop; 266 267 if (px_lib_dev_init(dip, &dev_hdl) != DDI_SUCCESS) 268 goto err_bad_dev_init; 269 270 /* Initialize device handle */ 271 px_p->px_dev_hdl = dev_hdl; 272 273 /* Cache the BDF of the root port nexus */ 274 px_p->px_bdf = px_lib_get_bdf(px_p); 275 276 /* 277 * Initialize interrupt block. Note that this 278 * initialize error handling for the PEC as well. 279 */ 280 if ((ret = px_ib_attach(px_p)) != DDI_SUCCESS) 281 goto err_bad_ib; 282 283 if (px_cb_attach(px_p) != DDI_SUCCESS) 284 goto err_bad_cb; 285 286 /* 287 * Start creating the modules. 288 * Note that attach() routines should 289 * register and enable their own interrupts. 290 */ 291 292 if ((px_mmu_attach(px_p)) != DDI_SUCCESS) 293 goto err_bad_mmu; 294 295 if ((px_msiq_attach(px_p)) != DDI_SUCCESS) 296 goto err_bad_msiq; 297 298 if ((px_msi_attach(px_p)) != DDI_SUCCESS) 299 goto err_bad_msi; 300 301 if ((px_pec_attach(px_p)) != DDI_SUCCESS) 302 goto err_bad_pec; 303 304 if ((px_dma_attach(px_p)) != DDI_SUCCESS) 305 goto err_bad_dma; /* nothing to uninitialize on DMA */ 306 307 if ((px_fm_attach(px_p)) != DDI_SUCCESS) 308 goto err_bad_dma; 309 310 /* 311 * All of the error handlers have been registered 312 * by now so it's time to activate the interrupt. 313 */ 314 if ((ret = px_err_add_intr(&px_p->px_fault)) != DDI_SUCCESS) 315 goto err_bad_intr; 316 317 if (px_lib_hotplug_init(dip, (void *)®ops) == DDI_SUCCESS) { 318 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 319 320 bus_p->bus_hp_sup_modes |= PCIE_NATIVE_HP_MODE; 321 } 322 323 (void) px_set_mps(px_p); 324 325 if (pcie_init(dip, (caddr_t)®ops) != DDI_SUCCESS) 326 goto err_bad_hotplug; 327 328 (void) pcie_hpintr_enable(dip); 329 330 if (pxtool_init(dip) != DDI_SUCCESS) 331 goto err_bad_pcitool_node; 332 333 /* 334 * power management setup. Even if it fails, attach will 335 * succeed as this is a optional feature. Since we are 336 * always at full power, this is not critical. 337 */ 338 if (pwr_common_setup(dip) != DDI_SUCCESS) { 339 DBG(DBG_PWR, dip, "pwr_common_setup failed\n"); 340 } else if (px_pwr_setup(dip) != DDI_SUCCESS) { 341 DBG(DBG_PWR, dip, "px_pwr_setup failed \n"); 342 pwr_common_teardown(dip); 343 } 344 345 /* 346 * add cpr callback 347 */ 348 px_cpr_add_callb(px_p); 349 350 /* 351 * do fabric sync in case we don't need to wait for 352 * any bridge driver to be ready 353 */ 354 (void) px_lib_fabric_sync(dip); 355 356 ddi_report_dev(dip); 357 358 px_p->px_state = PX_ATTACHED; 359 360 /* 361 * save base addr in bus_t for pci_cfgacc_xxx(), this 362 * depends of px structure being properly initialized. 363 */ 364 bus_p = PCIE_DIP2BUS(dip); 365 bus_p->bus_cfgacc_base = px_lib_get_cfgacc_base(dip); 366 367 /* 368 * Partially populate bus_t for all devices in this fabric 369 * for device type macros to work. 370 */ 371 /* 372 * Populate bus_t for all devices in this fabric, after FMA 373 * is initializated, so that config access errors could 374 * trigger panic. 375 */ 376 pcie_fab_init_bus(dip, PCIE_BUS_ALL); 377 378 DBG(DBG_ATTACH, dip, "attach success\n"); 379 break; 380 381 err_bad_pcitool_node: 382 (void) pcie_hpintr_disable(dip); 383 (void) pcie_uninit(dip); 384 err_bad_hotplug: 385 (void) px_lib_hotplug_uninit(dip); 386 px_err_rem_intr(&px_p->px_fault); 387 err_bad_intr: 388 px_fm_detach(px_p); 389 err_bad_dma: 390 px_pec_detach(px_p); 391 err_bad_pec: 392 px_msi_detach(px_p); 393 err_bad_msi: 394 px_msiq_detach(px_p); 395 err_bad_msiq: 396 px_mmu_detach(px_p); 397 err_bad_mmu: 398 px_cb_detach(px_p); 399 err_bad_cb: 400 px_ib_detach(px_p); 401 err_bad_ib: 402 if (px_lib_dev_fini(dip) != DDI_SUCCESS) { 403 DBG(DBG_ATTACH, dip, "px_lib_dev_fini failed\n"); 404 } 405 err_bad_dev_init: 406 px_free_props(px_p); 407 err_bad_px_prop: 408 pcie_rc_fini_bus(dip); 409 px_dbg_detach(dip, &px_p->px_dbg_hdl); 410 mutex_destroy(&px_p->px_mutex); 411 ddi_soft_state_free(px_state_p, instance); 412 err_bad_px_softstate: 413 ret = DDI_FAILURE; 414 break; 415 416 case DDI_RESUME: 417 DBG(DBG_ATTACH, dip, "DDI_RESUME\n"); 418 419 px_p = INST_TO_STATE(instance); 420 421 mutex_enter(&px_p->px_mutex); 422 423 /* suspend might have not succeeded */ 424 if (px_p->px_state != PX_SUSPENDED) { 425 DBG(DBG_ATTACH, px_p->px_dip, 426 "instance NOT suspended\n"); 427 ret = DDI_FAILURE; 428 break; 429 } 430 431 px_msiq_resume(px_p); 432 px_lib_resume(dip); 433 (void) pcie_pwr_resume(dip); 434 px_p->px_state = PX_ATTACHED; 435 436 mutex_exit(&px_p->px_mutex); 437 438 break; 439 default: 440 DBG(DBG_ATTACH, dip, "unsupported attach op\n"); 441 ret = DDI_FAILURE; 442 break; 443 } 444 445 return (ret); 446 } 447 448 /* 449 * detach entry point: 450 */ 451 /*ARGSUSED*/ 452 static int 453 px_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 454 { 455 int instance = ddi_get_instance(dip); 456 px_t *px_p = INST_TO_STATE(instance); 457 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 458 int ret; 459 460 /* 461 * Make sure we are currently attached 462 */ 463 if (px_p->px_state != PX_ATTACHED) { 464 DBG(DBG_DETACH, dip, "Instance not attached\n"); 465 return (DDI_FAILURE); 466 } 467 468 mutex_enter(&px_p->px_mutex); 469 470 switch (cmd) { 471 case DDI_DETACH: 472 DBG(DBG_DETACH, dip, "DDI_DETACH\n"); 473 474 /* 475 * remove cpr callback 476 */ 477 px_cpr_rem_callb(px_p); 478 479 (void) pcie_hpintr_disable(dip); 480 481 if (PCIE_IS_PCIE_HOTPLUG_ENABLED(bus_p)) 482 (void) px_lib_hotplug_uninit(dip); 483 484 if (pcie_uninit(dip) != DDI_SUCCESS) { 485 mutex_exit(&px_p->px_mutex); 486 return (DDI_FAILURE); 487 } 488 489 /* Destroy bus_t for the whole fabric */ 490 pcie_fab_fini_bus(dip, PCIE_BUS_ALL); 491 492 /* 493 * things which used to be done in obj_destroy 494 * are now in-lined here. 495 */ 496 497 px_p->px_state = PX_DETACHED; 498 499 pxtool_uninit(dip); 500 501 px_err_rem_intr(&px_p->px_fault); 502 px_fm_detach(px_p); 503 px_pec_detach(px_p); 504 px_pwr_teardown(dip); 505 pwr_common_teardown(dip); 506 px_msi_detach(px_p); 507 px_msiq_detach(px_p); 508 px_mmu_detach(px_p); 509 px_cb_detach(px_p); 510 px_ib_detach(px_p); 511 if (px_lib_dev_fini(dip) != DDI_SUCCESS) { 512 DBG(DBG_DETACH, dip, "px_lib_dev_fini failed\n"); 513 } 514 515 /* 516 * Free the px soft state structure and the rest of the 517 * resources it's using. 518 */ 519 px_free_props(px_p); 520 pcie_rc_fini_bus(dip); 521 px_dbg_detach(dip, &px_p->px_dbg_hdl); 522 mutex_exit(&px_p->px_mutex); 523 mutex_destroy(&px_p->px_mutex); 524 525 px_p->px_dev_hdl = NULL; 526 ddi_soft_state_free(px_state_p, instance); 527 528 return (DDI_SUCCESS); 529 530 case DDI_SUSPEND: 531 if (pcie_pwr_suspend(dip) != DDI_SUCCESS) { 532 mutex_exit(&px_p->px_mutex); 533 return (DDI_FAILURE); 534 } 535 if ((ret = px_lib_suspend(dip)) == DDI_SUCCESS) 536 px_p->px_state = PX_SUSPENDED; 537 mutex_exit(&px_p->px_mutex); 538 539 return (ret); 540 541 default: 542 DBG(DBG_DETACH, dip, "unsupported detach op\n"); 543 mutex_exit(&px_p->px_mutex); 544 return (DDI_FAILURE); 545 } 546 } 547 548 int 549 px_cb_attach(px_t *px_p) 550 { 551 px_fault_t *fault_p = &px_p->px_cb_fault; 552 dev_info_t *dip = px_p->px_dip; 553 sysino_t sysino; 554 555 if (px_lib_intr_devino_to_sysino(dip, 556 px_p->px_inos[PX_INTR_XBC], &sysino) != DDI_SUCCESS) 557 return (DDI_FAILURE); 558 559 fault_p->px_fh_dip = dip; 560 fault_p->px_fh_sysino = sysino; 561 fault_p->px_err_func = px_err_cb_intr; 562 fault_p->px_intr_ino = px_p->px_inos[PX_INTR_XBC]; 563 564 return (px_cb_add_intr(fault_p)); 565 } 566 567 void 568 px_cb_detach(px_t *px_p) 569 { 570 px_cb_rem_intr(&px_p->px_cb_fault); 571 } 572 573 /* 574 * power management related initialization specific to px 575 * called by px_attach() 576 */ 577 static int 578 px_pwr_setup(dev_info_t *dip) 579 { 580 pcie_pwr_t *pwr_p; 581 int instance = ddi_get_instance(dip); 582 px_t *px_p = INST_TO_STATE(instance); 583 ddi_intr_handle_impl_t hdl; 584 585 ASSERT(PCIE_PMINFO(dip)); 586 pwr_p = PCIE_NEXUS_PMINFO(dip); 587 ASSERT(pwr_p); 588 589 /* 590 * indicate support LDI (Layered Driver Interface) 591 * Create the property, if it is not already there 592 */ 593 if (!ddi_prop_exists(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS, 594 DDI_KERNEL_IOCTL)) { 595 if (ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, 596 DDI_KERNEL_IOCTL, NULL, 0) != DDI_PROP_SUCCESS) { 597 DBG(DBG_PWR, dip, "can't create kernel ioctl prop\n"); 598 return (DDI_FAILURE); 599 } 600 } 601 /* No support for device PM. We are always at full power */ 602 pwr_p->pwr_func_lvl = PM_LEVEL_D0; 603 604 mutex_init(&px_p->px_l23ready_lock, NULL, MUTEX_DRIVER, 605 DDI_INTR_PRI(px_pwr_pil)); 606 cv_init(&px_p->px_l23ready_cv, NULL, CV_DRIVER, NULL); 607 608 /* Initialize handle */ 609 bzero(&hdl, sizeof (ddi_intr_handle_impl_t)); 610 hdl.ih_cb_arg1 = px_p; 611 hdl.ih_ver = DDI_INTR_VERSION; 612 hdl.ih_state = DDI_IHDL_STATE_ALLOC; 613 hdl.ih_dip = dip; 614 hdl.ih_pri = px_pwr_pil; 615 616 /* Add PME_TO_ACK message handler */ 617 hdl.ih_cb_func = (ddi_intr_handler_t *)px_pmeq_intr; 618 if (px_add_msiq_intr(dip, dip, &hdl, MSG_REC, 619 (msgcode_t)PCIE_PME_ACK_MSG, -1, 620 &px_p->px_pm_msiq_id) != DDI_SUCCESS) { 621 DBG(DBG_PWR, dip, "px_pwr_setup: couldn't add " 622 " PME_TO_ACK intr\n"); 623 goto pwr_setup_err1; 624 } 625 px_lib_msg_setmsiq(dip, PCIE_PME_ACK_MSG, px_p->px_pm_msiq_id); 626 px_lib_msg_setvalid(dip, PCIE_PME_ACK_MSG, PCIE_MSG_VALID); 627 628 if (px_ib_update_intr_state(px_p, px_p->px_dip, hdl.ih_inum, 629 px_msiqid_to_devino(px_p, px_p->px_pm_msiq_id), px_pwr_pil, 630 PX_INTR_STATE_ENABLE, MSG_REC, PCIE_PME_ACK_MSG) != DDI_SUCCESS) { 631 DBG(DBG_PWR, dip, "px_pwr_setup: PME_TO_ACK update interrupt" 632 " state failed\n"); 633 goto px_pwrsetup_err_state; 634 } 635 636 return (DDI_SUCCESS); 637 638 px_pwrsetup_err_state: 639 px_lib_msg_setvalid(dip, PCIE_PME_ACK_MSG, PCIE_MSG_INVALID); 640 (void) px_rem_msiq_intr(dip, dip, &hdl, MSG_REC, PCIE_PME_ACK_MSG, 641 px_p->px_pm_msiq_id); 642 pwr_setup_err1: 643 mutex_destroy(&px_p->px_l23ready_lock); 644 cv_destroy(&px_p->px_l23ready_cv); 645 646 return (DDI_FAILURE); 647 } 648 649 /* 650 * undo whatever is done in px_pwr_setup. called by px_detach() 651 */ 652 static void 653 px_pwr_teardown(dev_info_t *dip) 654 { 655 int instance = ddi_get_instance(dip); 656 px_t *px_p = INST_TO_STATE(instance); 657 ddi_intr_handle_impl_t hdl; 658 659 if (!PCIE_PMINFO(dip) || !PCIE_NEXUS_PMINFO(dip)) 660 return; 661 662 /* Initialize handle */ 663 bzero(&hdl, sizeof (ddi_intr_handle_impl_t)); 664 hdl.ih_ver = DDI_INTR_VERSION; 665 hdl.ih_state = DDI_IHDL_STATE_ALLOC; 666 hdl.ih_dip = dip; 667 hdl.ih_pri = px_pwr_pil; 668 669 px_lib_msg_setvalid(dip, PCIE_PME_ACK_MSG, PCIE_MSG_INVALID); 670 (void) px_rem_msiq_intr(dip, dip, &hdl, MSG_REC, PCIE_PME_ACK_MSG, 671 px_p->px_pm_msiq_id); 672 673 (void) px_ib_update_intr_state(px_p, px_p->px_dip, hdl.ih_inum, 674 px_msiqid_to_devino(px_p, px_p->px_pm_msiq_id), px_pwr_pil, 675 PX_INTR_STATE_DISABLE, MSG_REC, PCIE_PME_ACK_MSG); 676 677 px_p->px_pm_msiq_id = (msiqid_t)-1; 678 679 cv_destroy(&px_p->px_l23ready_cv); 680 mutex_destroy(&px_p->px_l23ready_lock); 681 } 682 683 /* bus driver entry points */ 684 685 /* 686 * bus map entry point: 687 * 688 * if map request is for an rnumber 689 * get the corresponding regspec from device node 690 * build a new regspec in our parent's format 691 * build a new map_req with the new regspec 692 * call up the tree to complete the mapping 693 */ 694 int 695 px_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp, 696 off_t off, off_t len, caddr_t *addrp) 697 { 698 px_t *px_p = DIP_TO_STATE(dip); 699 struct regspec p_regspec; 700 ddi_map_req_t p_mapreq; 701 int reglen, rval, r_no; 702 pci_regspec_t reloc_reg, *rp = &reloc_reg; 703 704 DBG(DBG_MAP, dip, "rdip=%s%d:", 705 ddi_driver_name(rdip), ddi_get_instance(rdip)); 706 707 if (mp->map_flags & DDI_MF_USER_MAPPING) 708 return (DDI_ME_UNIMPLEMENTED); 709 710 switch (mp->map_type) { 711 case DDI_MT_REGSPEC: 712 reloc_reg = *(pci_regspec_t *)mp->map_obj.rp; /* dup whole */ 713 break; 714 715 case DDI_MT_RNUMBER: 716 r_no = mp->map_obj.rnumber; 717 DBG(DBG_MAP | DBG_CONT, dip, " r#=%x", r_no); 718 719 if (ddi_getlongprop(DDI_DEV_T_ANY, rdip, DDI_PROP_DONTPASS, 720 "reg", (caddr_t)&rp, ®len) != DDI_SUCCESS) 721 return (DDI_ME_RNUMBER_RANGE); 722 723 if (r_no < 0 || r_no >= reglen / sizeof (pci_regspec_t)) { 724 kmem_free(rp, reglen); 725 return (DDI_ME_RNUMBER_RANGE); 726 } 727 rp += r_no; 728 break; 729 730 default: 731 return (DDI_ME_INVAL); 732 } 733 DBG(DBG_MAP | DBG_CONT, dip, "\n"); 734 735 if ((rp->pci_phys_hi & PCI_REG_ADDR_M) == PCI_ADDR_CONFIG) { 736 /* 737 * There may be a need to differentiate between PCI 738 * and PCI-Ex devices so the following range check is 739 * done correctly, depending on the implementation of 740 * pcieb bridge nexus driver. 741 */ 742 if ((off >= PCIE_CONF_HDR_SIZE) || 743 (len > PCIE_CONF_HDR_SIZE) || 744 (off + len > PCIE_CONF_HDR_SIZE)) 745 return (DDI_ME_INVAL); 746 /* 747 * the following function returning a DDI_FAILURE assumes 748 * that there are no virtual config space access services 749 * defined in this layer. Otherwise it is availed right 750 * here and we return. 751 */ 752 rval = px_lib_map_vconfig(dip, mp, off, rp, addrp); 753 if (rval == DDI_SUCCESS) 754 goto done; 755 } 756 757 /* 758 * No virtual config space services or we are mapping 759 * a region of memory mapped config/IO/memory space, so proceed 760 * to the parent. 761 */ 762 763 /* relocate within 64-bit pci space through "assigned-addresses" */ 764 if (rval = px_reloc_reg(dip, rdip, px_p, rp)) 765 goto done; 766 767 if (len) /* adjust regspec according to mapping request */ 768 rp->pci_size_low = len; /* MIN ? */ 769 rp->pci_phys_low += off; 770 771 /* translate relocated pci regspec into parent space through "ranges" */ 772 if (rval = px_xlate_reg(px_p, rp, &p_regspec)) 773 goto done; 774 775 p_mapreq = *mp; /* dup the whole structure */ 776 p_mapreq.map_type = DDI_MT_REGSPEC; 777 p_mapreq.map_obj.rp = &p_regspec; 778 px_lib_map_attr_check(&p_mapreq); 779 rval = ddi_map(dip, &p_mapreq, 0, 0, addrp); 780 781 if (rval == DDI_SUCCESS) { 782 /* 783 * Set-up access functions for FM access error capable drivers. 784 */ 785 if (DDI_FM_ACC_ERR_CAP(ddi_fm_capable(rdip))) 786 px_fm_acc_setup(mp, rdip, rp); 787 } 788 789 done: 790 if (mp->map_type == DDI_MT_RNUMBER) 791 kmem_free(rp - r_no, reglen); 792 793 return (rval); 794 } 795 796 /* 797 * bus dma map entry point 798 * return value: 799 * DDI_DMA_PARTIAL_MAP 1 800 * DDI_DMA_MAPOK 0 801 * DDI_DMA_MAPPED 0 802 * DDI_DMA_NORESOURCES -1 803 * DDI_DMA_NOMAPPING -2 804 * DDI_DMA_TOOBIG -3 805 */ 806 int 807 px_dma_setup(dev_info_t *dip, dev_info_t *rdip, ddi_dma_req_t *dmareq, 808 ddi_dma_handle_t *handlep) 809 { 810 px_t *px_p = DIP_TO_STATE(dip); 811 px_mmu_t *mmu_p = px_p->px_mmu_p; 812 ddi_dma_impl_t *mp; 813 int ret; 814 815 DBG(DBG_DMA_MAP, dip, "mapping - rdip=%s%d type=%s\n", 816 ddi_driver_name(rdip), ddi_get_instance(rdip), 817 handlep ? "alloc" : "advisory"); 818 819 if (!(mp = px_dma_lmts2hdl(dip, rdip, mmu_p, dmareq))) 820 return (DDI_DMA_NORESOURCES); 821 if (mp == (ddi_dma_impl_t *)DDI_DMA_NOMAPPING) 822 return (DDI_DMA_NOMAPPING); 823 if (ret = px_dma_type(px_p, dmareq, mp)) 824 goto freehandle; 825 if (ret = px_dma_pfn(px_p, dmareq, mp)) 826 goto freehandle; 827 828 switch (PX_DMA_TYPE(mp)) { 829 case PX_DMAI_FLAGS_DVMA: /* LINTED E_EQUALITY_NOT_ASSIGNMENT */ 830 if ((ret = px_dvma_win(px_p, dmareq, mp)) || !handlep) 831 goto freehandle; 832 if (!PX_DMA_CANCACHE(mp)) { /* try fast track */ 833 if (PX_DMA_CANFAST(mp)) { 834 if (!px_dvma_map_fast(mmu_p, mp)) 835 break; 836 /* LINTED E_NOP_ELSE_STMT */ 837 } else { 838 PX_DVMA_FASTTRAK_PROF(mp); 839 } 840 } 841 if (ret = px_dvma_map(mp, dmareq, mmu_p)) 842 goto freehandle; 843 break; 844 case PX_DMAI_FLAGS_PTP: /* LINTED E_EQUALITY_NOT_ASSIGNMENT */ 845 if ((ret = px_dma_physwin(px_p, dmareq, mp)) || !handlep) 846 goto freehandle; 847 break; 848 case PX_DMAI_FLAGS_BYPASS: 849 default: 850 cmn_err(CE_PANIC, "%s%d: px_dma_setup: bad dma type 0x%x", 851 ddi_driver_name(rdip), ddi_get_instance(rdip), 852 PX_DMA_TYPE(mp)); 853 /*NOTREACHED*/ 854 } 855 *handlep = (ddi_dma_handle_t)mp; 856 mp->dmai_flags |= PX_DMAI_FLAGS_INUSE; 857 px_dump_dma_handle(DBG_DMA_MAP, dip, mp); 858 859 return ((mp->dmai_nwin == 1) ? DDI_DMA_MAPPED : DDI_DMA_PARTIAL_MAP); 860 freehandle: 861 if (ret == DDI_DMA_NORESOURCES) 862 px_dma_freemp(mp); /* don't run_callback() */ 863 else 864 (void) px_dma_freehdl(dip, rdip, (ddi_dma_handle_t)mp); 865 return (ret); 866 } 867 868 869 /* 870 * bus dma alloc handle entry point: 871 */ 872 int 873 px_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attrp, 874 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep) 875 { 876 px_t *px_p = DIP_TO_STATE(dip); 877 ddi_dma_impl_t *mp; 878 int rval; 879 880 DBG(DBG_DMA_ALLOCH, dip, "rdip=%s%d\n", 881 ddi_driver_name(rdip), ddi_get_instance(rdip)); 882 883 if (attrp->dma_attr_version != DMA_ATTR_V0) 884 return (DDI_DMA_BADATTR); 885 886 if (!(mp = px_dma_allocmp(dip, rdip, waitfp, arg))) 887 return (DDI_DMA_NORESOURCES); 888 889 /* 890 * Save requestor's information 891 */ 892 mp->dmai_attr = *attrp; /* whole object - augmented later */ 893 *PX_DEV_ATTR(mp) = *attrp; /* whole object - device orig attr */ 894 DBG(DBG_DMA_ALLOCH, dip, "mp=%p\n", mp); 895 896 /* check and convert dma attributes to handle parameters */ 897 if (rval = px_dma_attr2hdl(px_p, mp)) { 898 px_dma_freehdl(dip, rdip, (ddi_dma_handle_t)mp); 899 *handlep = NULL; 900 return (rval); 901 } 902 *handlep = (ddi_dma_handle_t)mp; 903 return (DDI_SUCCESS); 904 } 905 906 907 /* 908 * bus dma free handle entry point: 909 */ 910 /*ARGSUSED*/ 911 int 912 px_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle) 913 { 914 DBG(DBG_DMA_FREEH, dip, "rdip=%s%d mp=%p\n", 915 ddi_driver_name(rdip), ddi_get_instance(rdip), handle); 916 px_dma_freemp((ddi_dma_impl_t *)handle); 917 918 if (px_kmem_clid) { 919 DBG(DBG_DMA_FREEH, dip, "run handle callback\n"); 920 ddi_run_callback(&px_kmem_clid); 921 } 922 return (DDI_SUCCESS); 923 } 924 925 926 /* 927 * bus dma bind handle entry point: 928 */ 929 int 930 px_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip, 931 ddi_dma_handle_t handle, ddi_dma_req_t *dmareq, 932 ddi_dma_cookie_t *cookiep, uint_t *ccountp) 933 { 934 px_t *px_p = DIP_TO_STATE(dip); 935 px_mmu_t *mmu_p = px_p->px_mmu_p; 936 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 937 int ret; 938 939 DBG(DBG_DMA_BINDH, dip, "rdip=%s%d mp=%p dmareq=%p\n", 940 ddi_driver_name(rdip), ddi_get_instance(rdip), mp, dmareq); 941 942 if (mp->dmai_flags & PX_DMAI_FLAGS_INUSE) 943 return (DDI_DMA_INUSE); 944 945 ASSERT((mp->dmai_flags & ~PX_DMAI_FLAGS_PRESERVE) == 0); 946 mp->dmai_flags |= PX_DMAI_FLAGS_INUSE; 947 948 if (ret = px_dma_type(px_p, dmareq, mp)) 949 goto err; 950 if (ret = px_dma_pfn(px_p, dmareq, mp)) 951 goto err; 952 953 switch (PX_DMA_TYPE(mp)) { 954 case PX_DMAI_FLAGS_DVMA: 955 if (ret = px_dvma_win(px_p, dmareq, mp)) 956 goto map_err; 957 if (!PX_DMA_CANCACHE(mp)) { /* try fast track */ 958 if (PX_DMA_CANFAST(mp)) { 959 if (!px_dvma_map_fast(mmu_p, mp)) 960 goto mapped; /*LINTED E_NOP_ELSE_STMT*/ 961 } else { 962 PX_DVMA_FASTTRAK_PROF(mp); 963 } 964 } 965 if (ret = px_dvma_map(mp, dmareq, mmu_p)) 966 goto map_err; 967 mapped: 968 *ccountp = 1; 969 MAKE_DMA_COOKIE(cookiep, mp->dmai_mapping, mp->dmai_size); 970 break; 971 case PX_DMAI_FLAGS_BYPASS: 972 case PX_DMAI_FLAGS_PTP: 973 if (ret = px_dma_physwin(px_p, dmareq, mp)) 974 goto map_err; 975 *ccountp = PX_WINLST(mp)->win_ncookies; 976 *cookiep = 977 *(ddi_dma_cookie_t *)(PX_WINLST(mp) + 1); /* wholeobj */ 978 break; 979 default: 980 cmn_err(CE_PANIC, "%s%d: px_dma_bindhdl(%p): bad dma type", 981 ddi_driver_name(rdip), ddi_get_instance(rdip), mp); 982 /*NOTREACHED*/ 983 } 984 DBG(DBG_DMA_BINDH, dip, "cookie %" PRIx64 "+%x\n", 985 cookiep->dmac_address, cookiep->dmac_size); 986 px_dump_dma_handle(DBG_DMA_MAP, dip, mp); 987 988 /* insert dma handle into FMA cache */ 989 if (mp->dmai_attr.dma_attr_flags & DDI_DMA_FLAGERR) 990 mp->dmai_error.err_cf = px_err_dma_hdl_check; 991 992 return (mp->dmai_nwin == 1 ? DDI_DMA_MAPPED : DDI_DMA_PARTIAL_MAP); 993 map_err: 994 px_dma_freepfn(mp); 995 err: 996 mp->dmai_flags &= PX_DMAI_FLAGS_PRESERVE; 997 return (ret); 998 } 999 1000 1001 /* 1002 * bus dma unbind handle entry point: 1003 */ 1004 /*ARGSUSED*/ 1005 int 1006 px_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle) 1007 { 1008 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 1009 px_t *px_p = DIP_TO_STATE(dip); 1010 px_mmu_t *mmu_p = px_p->px_mmu_p; 1011 1012 DBG(DBG_DMA_UNBINDH, dip, "rdip=%s%d, mp=%p\n", 1013 ddi_driver_name(rdip), ddi_get_instance(rdip), handle); 1014 if ((mp->dmai_flags & PX_DMAI_FLAGS_INUSE) == 0) { 1015 DBG(DBG_DMA_UNBINDH, dip, "handle not inuse\n"); 1016 return (DDI_FAILURE); 1017 } 1018 1019 mp->dmai_error.err_cf = NULL; 1020 1021 /* 1022 * Here if the handle is using the iommu. Unload all the iommu 1023 * translations. 1024 */ 1025 switch (PX_DMA_TYPE(mp)) { 1026 case PX_DMAI_FLAGS_DVMA: 1027 px_mmu_unmap_window(mmu_p, mp); 1028 px_dvma_unmap(mmu_p, mp); 1029 px_dma_freepfn(mp); 1030 break; 1031 case PX_DMAI_FLAGS_BYPASS: 1032 case PX_DMAI_FLAGS_PTP: 1033 px_dma_freewin(mp); 1034 break; 1035 default: 1036 cmn_err(CE_PANIC, "%s%d: px_dma_unbindhdl:bad dma type %p", 1037 ddi_driver_name(rdip), ddi_get_instance(rdip), mp); 1038 /*NOTREACHED*/ 1039 } 1040 if (mmu_p->mmu_dvma_clid != 0) { 1041 DBG(DBG_DMA_UNBINDH, dip, "run dvma callback\n"); 1042 ddi_run_callback(&mmu_p->mmu_dvma_clid); 1043 } 1044 if (px_kmem_clid) { 1045 DBG(DBG_DMA_UNBINDH, dip, "run handle callback\n"); 1046 ddi_run_callback(&px_kmem_clid); 1047 } 1048 mp->dmai_flags &= PX_DMAI_FLAGS_PRESERVE; 1049 1050 return (DDI_SUCCESS); 1051 } 1052 1053 /* 1054 * bus dma win entry point: 1055 */ 1056 int 1057 px_dma_win(dev_info_t *dip, dev_info_t *rdip, 1058 ddi_dma_handle_t handle, uint_t win, off_t *offp, 1059 size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp) 1060 { 1061 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 1062 int ret; 1063 1064 DBG(DBG_DMA_WIN, dip, "rdip=%s%d\n", 1065 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1066 1067 px_dump_dma_handle(DBG_DMA_WIN, dip, mp); 1068 if (win >= mp->dmai_nwin) { 1069 DBG(DBG_DMA_WIN, dip, "%x out of range\n", win); 1070 return (DDI_FAILURE); 1071 } 1072 1073 switch (PX_DMA_TYPE(mp)) { 1074 case PX_DMAI_FLAGS_DVMA: 1075 if (win != PX_DMA_CURWIN(mp)) { 1076 px_t *px_p = DIP_TO_STATE(dip); 1077 px_mmu_t *mmu_p = px_p->px_mmu_p; 1078 px_mmu_unmap_window(mmu_p, mp); 1079 1080 /* map_window sets dmai_mapping/size/offset */ 1081 px_mmu_map_window(mmu_p, mp, win); 1082 if ((ret = px_mmu_map_window(mmu_p, 1083 mp, win)) != DDI_SUCCESS) 1084 return (ret); 1085 } 1086 if (cookiep) 1087 MAKE_DMA_COOKIE(cookiep, mp->dmai_mapping, 1088 mp->dmai_size); 1089 if (ccountp) 1090 *ccountp = 1; 1091 break; 1092 case PX_DMAI_FLAGS_PTP: 1093 case PX_DMAI_FLAGS_BYPASS: { 1094 int i; 1095 ddi_dma_cookie_t *ck_p; 1096 px_dma_win_t *win_p = mp->dmai_winlst; 1097 1098 for (i = 0; i < win; win_p = win_p->win_next, i++) {}; 1099 ck_p = (ddi_dma_cookie_t *)(win_p + 1); 1100 *cookiep = *ck_p; 1101 mp->dmai_offset = win_p->win_offset; 1102 mp->dmai_size = win_p->win_size; 1103 mp->dmai_mapping = ck_p->dmac_laddress; 1104 mp->dmai_cookie = ck_p + 1; 1105 win_p->win_curseg = 0; 1106 if (ccountp) 1107 *ccountp = win_p->win_ncookies; 1108 } 1109 break; 1110 default: 1111 cmn_err(CE_WARN, "%s%d: px_dma_win:bad dma type 0x%x", 1112 ddi_driver_name(rdip), ddi_get_instance(rdip), 1113 PX_DMA_TYPE(mp)); 1114 return (DDI_FAILURE); 1115 } 1116 if (cookiep) 1117 DBG(DBG_DMA_WIN, dip, 1118 "cookie - dmac_address=%x dmac_size=%x\n", 1119 cookiep->dmac_address, cookiep->dmac_size); 1120 if (offp) 1121 *offp = (off_t)mp->dmai_offset; 1122 if (lenp) 1123 *lenp = mp->dmai_size; 1124 return (DDI_SUCCESS); 1125 } 1126 1127 #ifdef DEBUG 1128 static char *px_dmactl_str[] = { 1129 "DDI_DMA_FREE", 1130 "DDI_DMA_SYNC", 1131 "DDI_DMA_HTOC", 1132 "DDI_DMA_KVADDR", 1133 "DDI_DMA_MOVWIN", 1134 "DDI_DMA_REPWIN", 1135 "DDI_DMA_GETERR", 1136 "DDI_DMA_COFF", 1137 "DDI_DMA_NEXTWIN", 1138 "DDI_DMA_NEXTSEG", 1139 "DDI_DMA_SEGTOC", 1140 "DDI_DMA_RESERVE", 1141 "DDI_DMA_RELEASE", 1142 "DDI_DMA_RESETH", 1143 "DDI_DMA_CKSYNC", 1144 "DDI_DMA_IOPB_ALLOC", 1145 "DDI_DMA_IOPB_FREE", 1146 "DDI_DMA_SMEM_ALLOC", 1147 "DDI_DMA_SMEM_FREE", 1148 "DDI_DMA_SET_SBUS64" 1149 }; 1150 #endif /* DEBUG */ 1151 1152 /* 1153 * bus dma control entry point: 1154 */ 1155 /*ARGSUSED*/ 1156 int 1157 px_dma_ctlops(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle, 1158 enum ddi_dma_ctlops cmd, off_t *offp, size_t *lenp, caddr_t *objp, 1159 uint_t cache_flags) 1160 { 1161 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 1162 1163 #ifdef DEBUG 1164 DBG(DBG_DMA_CTL, dip, "%s: rdip=%s%d\n", px_dmactl_str[cmd], 1165 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1166 #endif /* DEBUG */ 1167 1168 switch (cmd) { 1169 case DDI_DMA_FREE: 1170 (void) px_dma_unbindhdl(dip, rdip, handle); 1171 (void) px_dma_freehdl(dip, rdip, handle); 1172 return (DDI_SUCCESS); 1173 case DDI_DMA_RESERVE: { 1174 px_t *px_p = DIP_TO_STATE(dip); 1175 return (px_fdvma_reserve(dip, rdip, px_p, 1176 (ddi_dma_req_t *)offp, (ddi_dma_handle_t *)objp)); 1177 } 1178 case DDI_DMA_RELEASE: { 1179 px_t *px_p = DIP_TO_STATE(dip); 1180 return (px_fdvma_release(dip, px_p, mp)); 1181 } 1182 default: 1183 break; 1184 } 1185 1186 switch (PX_DMA_TYPE(mp)) { 1187 case PX_DMAI_FLAGS_DVMA: 1188 return (px_dvma_ctl(dip, rdip, mp, cmd, offp, lenp, objp, 1189 cache_flags)); 1190 case PX_DMAI_FLAGS_PTP: 1191 case PX_DMAI_FLAGS_BYPASS: 1192 return (px_dma_ctl(dip, rdip, mp, cmd, offp, lenp, objp, 1193 cache_flags)); 1194 default: 1195 cmn_err(CE_PANIC, "%s%d: px_dma_ctlops(%x):bad dma type %x", 1196 ddi_driver_name(rdip), ddi_get_instance(rdip), cmd, 1197 mp->dmai_flags); 1198 /*NOTREACHED*/ 1199 } 1200 return (0); 1201 } 1202 1203 /* 1204 * control ops entry point: 1205 * 1206 * Requests handled completely: 1207 * DDI_CTLOPS_INITCHILD see init_child() for details 1208 * DDI_CTLOPS_UNINITCHILD 1209 * DDI_CTLOPS_REPORTDEV see report_dev() for details 1210 * DDI_CTLOPS_IOMIN cache line size if streaming otherwise 1 1211 * DDI_CTLOPS_REGSIZE 1212 * DDI_CTLOPS_NREGS 1213 * DDI_CTLOPS_DVMAPAGESIZE 1214 * DDI_CTLOPS_POKE 1215 * DDI_CTLOPS_PEEK 1216 * 1217 * All others passed to parent. 1218 */ 1219 int 1220 px_ctlops(dev_info_t *dip, dev_info_t *rdip, 1221 ddi_ctl_enum_t op, void *arg, void *result) 1222 { 1223 px_t *px_p = DIP_TO_STATE(dip); 1224 struct detachspec *ds; 1225 struct attachspec *as; 1226 1227 switch (op) { 1228 case DDI_CTLOPS_INITCHILD: 1229 return (px_init_child(px_p, (dev_info_t *)arg)); 1230 1231 case DDI_CTLOPS_UNINITCHILD: 1232 return (px_uninit_child(px_p, (dev_info_t *)arg)); 1233 1234 case DDI_CTLOPS_ATTACH: 1235 if (!pcie_is_child(dip, rdip)) 1236 return (DDI_SUCCESS); 1237 1238 as = (struct attachspec *)arg; 1239 switch (as->when) { 1240 case DDI_PRE: 1241 if (as->cmd == DDI_ATTACH) { 1242 DBG(DBG_PWR, dip, "PRE_ATTACH for %s@%d\n", 1243 ddi_driver_name(rdip), 1244 ddi_get_instance(rdip)); 1245 return (pcie_pm_hold(dip)); 1246 } 1247 if (as->cmd == DDI_RESUME) { 1248 DBG(DBG_PWR, dip, "PRE_RESUME for %s@%d\n", 1249 ddi_driver_name(rdip), 1250 ddi_get_instance(rdip)); 1251 1252 pcie_clear_errors(rdip); 1253 } 1254 return (DDI_SUCCESS); 1255 1256 case DDI_POST: 1257 DBG(DBG_PWR, dip, "POST_ATTACH for %s@%d\n", 1258 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1259 if (as->cmd == DDI_ATTACH && 1260 as->result != DDI_SUCCESS) { 1261 /* 1262 * Attach failed for the child device. The child 1263 * driver may have made PM calls before the 1264 * attach failed. pcie_pm_remove_child() should 1265 * cleanup PM state and holds (if any) 1266 * associated with the child device. 1267 */ 1268 return (pcie_pm_remove_child(dip, rdip)); 1269 } 1270 1271 if (as->result == DDI_SUCCESS) 1272 pf_init(rdip, (void *)px_p->px_fm_ibc, as->cmd); 1273 1274 (void) pcie_postattach_child(rdip); 1275 1276 return (DDI_SUCCESS); 1277 default: 1278 break; 1279 } 1280 break; 1281 1282 case DDI_CTLOPS_DETACH: 1283 if (!pcie_is_child(dip, rdip)) 1284 return (DDI_SUCCESS); 1285 1286 ds = (struct detachspec *)arg; 1287 switch (ds->when) { 1288 case DDI_POST: 1289 if (ds->cmd == DDI_DETACH && 1290 ds->result == DDI_SUCCESS) { 1291 DBG(DBG_PWR, dip, "POST_DETACH for %s@%d\n", 1292 ddi_driver_name(rdip), 1293 ddi_get_instance(rdip)); 1294 return (pcie_pm_remove_child(dip, rdip)); 1295 } 1296 return (DDI_SUCCESS); 1297 case DDI_PRE: 1298 pf_fini(rdip, ds->cmd); 1299 return (DDI_SUCCESS); 1300 default: 1301 break; 1302 } 1303 break; 1304 1305 case DDI_CTLOPS_REPORTDEV: 1306 if (ddi_get_parent(rdip) == dip) 1307 return (px_report_dev(rdip)); 1308 1309 (void) px_lib_fabric_sync(rdip); 1310 return (DDI_SUCCESS); 1311 1312 case DDI_CTLOPS_IOMIN: 1313 return (DDI_SUCCESS); 1314 1315 case DDI_CTLOPS_REGSIZE: 1316 *((off_t *)result) = px_get_reg_set_size(rdip, *((int *)arg)); 1317 return (*((off_t *)result) == 0 ? DDI_FAILURE : DDI_SUCCESS); 1318 1319 case DDI_CTLOPS_NREGS: 1320 *((uint_t *)result) = px_get_nreg_set(rdip); 1321 return (DDI_SUCCESS); 1322 1323 case DDI_CTLOPS_DVMAPAGESIZE: 1324 *((ulong_t *)result) = MMU_PAGE_SIZE; 1325 return (DDI_SUCCESS); 1326 1327 case DDI_CTLOPS_POKE: /* platform dependent implementation. */ 1328 return (px_lib_ctlops_poke(dip, rdip, 1329 (peekpoke_ctlops_t *)arg)); 1330 1331 case DDI_CTLOPS_PEEK: /* platform dependent implementation. */ 1332 return (px_lib_ctlops_peek(dip, rdip, 1333 (peekpoke_ctlops_t *)arg, result)); 1334 1335 case DDI_CTLOPS_POWER: 1336 default: 1337 break; 1338 } 1339 1340 /* 1341 * Now pass the request up to our parent. 1342 */ 1343 DBG(DBG_CTLOPS, dip, "passing request to parent: rdip=%s%d\n", 1344 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1345 return (ddi_ctlops(dip, rdip, op, arg, result)); 1346 } 1347 1348 /* ARGSUSED */ 1349 int 1350 px_intr_ops(dev_info_t *dip, dev_info_t *rdip, ddi_intr_op_t intr_op, 1351 ddi_intr_handle_impl_t *hdlp, void *result) 1352 { 1353 int intr_types, ret = DDI_SUCCESS; 1354 px_t *px_p = DIP_TO_STATE(dip); 1355 1356 DBG(DBG_INTROPS, dip, "px_intr_ops: rdip=%s%d\n", 1357 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1358 1359 /* Process DDI_INTROP_SUPPORTED_TYPES request here */ 1360 if (intr_op == DDI_INTROP_SUPPORTED_TYPES) { 1361 *(int *)result = i_ddi_get_intx_nintrs(rdip) ? 1362 DDI_INTR_TYPE_FIXED : 0; 1363 1364 if ((pci_msi_get_supported_type(rdip, 1365 &intr_types)) == DDI_SUCCESS) { 1366 /* 1367 * Double check supported interrupt types vs. 1368 * what the host bridge supports. 1369 */ 1370 *(int *)result |= intr_types; 1371 } 1372 1373 *(int *)result &= 1374 (px_force_intx_support ? 1375 (px_p->px_supp_intr_types | DDI_INTR_TYPE_FIXED) : 1376 px_p->px_supp_intr_types); 1377 return (*(int *)result ? DDI_SUCCESS : DDI_FAILURE); 1378 } 1379 1380 /* 1381 * PCI-E nexus driver supports fixed, MSI and MSI-X interrupts. 1382 * Return failure if interrupt type is not supported. 1383 */ 1384 switch (hdlp->ih_type) { 1385 case DDI_INTR_TYPE_FIXED: 1386 ret = px_intx_ops(dip, rdip, intr_op, hdlp, result); 1387 break; 1388 case DDI_INTR_TYPE_MSI: 1389 case DDI_INTR_TYPE_MSIX: 1390 ret = px_msix_ops(dip, rdip, intr_op, hdlp, result); 1391 break; 1392 default: 1393 ret = DDI_ENOTSUP; 1394 break; 1395 } 1396 1397 return (ret); 1398 } 1399 1400 static void 1401 px_set_mps(px_t *px_p) 1402 { 1403 dev_info_t *dip; 1404 pcie_bus_t *bus_p; 1405 int max_supported; 1406 1407 dip = px_p->px_dip; 1408 bus_p = PCIE_DIP2BUS(dip); 1409 1410 bus_p->bus_mps = -1; 1411 1412 if (pcie_root_port(dip) == DDI_FAILURE) { 1413 if (px_lib_get_root_complex_mps(px_p, dip, 1414 &max_supported) < 0) { 1415 1416 DBG(DBG_MPS, dip, "MPS: Can not get RC MPS\n"); 1417 return; 1418 } 1419 1420 DBG(DBG_MPS, dip, "MPS: Root Complex MPS Cap of = %x\n", 1421 max_supported); 1422 1423 if (pcie_max_mps < max_supported) 1424 max_supported = pcie_max_mps; 1425 1426 (void) pcie_get_fabric_mps(dip, ddi_get_child(dip), 1427 &max_supported); 1428 1429 bus_p->bus_mps = max_supported; 1430 1431 (void) px_lib_set_root_complex_mps(px_p, dip, bus_p->bus_mps); 1432 1433 DBG(DBG_MPS, dip, "MPS: Root Complex MPS Set to = %x\n", 1434 bus_p->bus_mps); 1435 } 1436 } 1437