1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * SPARC Host to PCI Express nexus driver 28 */ 29 30 #include <sys/types.h> 31 #include <sys/conf.h> /* nulldev */ 32 #include <sys/stat.h> /* devctl */ 33 #include <sys/kmem.h> 34 #include <sys/sunddi.h> 35 #include <sys/sunndi.h> 36 #include <sys/ddi_subrdefs.h> 37 #include <sys/spl.h> 38 #include <sys/epm.h> 39 #include <sys/iommutsb.h> 40 #include "px_obj.h" 41 #include <sys/hotplug/pci/pcie_hp.h> 42 #include <sys/pci_tools.h> 43 #include "px_tools_ext.h" 44 #include <sys/pcie_pwr.h> 45 46 /*LINTLIBRARY*/ 47 48 /* 49 * function prototypes for dev ops routines: 50 */ 51 static int px_attach(dev_info_t *dip, ddi_attach_cmd_t cmd); 52 static int px_detach(dev_info_t *dip, ddi_detach_cmd_t cmd); 53 static int px_info(dev_info_t *dip, ddi_info_cmd_t infocmd, 54 void *arg, void **result); 55 static int px_cb_attach(px_t *); 56 static void px_cb_detach(px_t *); 57 static int px_pwr_setup(dev_info_t *dip); 58 static void px_pwr_teardown(dev_info_t *dip); 59 60 static void px_set_mps(px_t *px_p); 61 62 extern int pcie_max_mps; 63 64 /* 65 * bus ops and dev ops structures: 66 */ 67 static struct bus_ops px_bus_ops = { 68 BUSO_REV, 69 px_map, 70 0, 71 0, 72 0, 73 i_ddi_map_fault, 74 px_dma_setup, 75 px_dma_allochdl, 76 px_dma_freehdl, 77 px_dma_bindhdl, 78 px_dma_unbindhdl, 79 px_lib_dma_sync, 80 px_dma_win, 81 px_dma_ctlops, 82 px_ctlops, 83 ddi_bus_prop_op, 84 ndi_busop_get_eventcookie, 85 ndi_busop_add_eventcall, 86 ndi_busop_remove_eventcall, 87 ndi_post_event, 88 NULL, 89 NULL, /* (*bus_config)(); */ 90 NULL, /* (*bus_unconfig)(); */ 91 px_fm_init_child, /* (*bus_fm_init)(); */ 92 NULL, /* (*bus_fm_fini)(); */ 93 px_bus_enter, /* (*bus_fm_access_enter)(); */ 94 px_bus_exit, /* (*bus_fm_access_fini)(); */ 95 pcie_bus_power, /* (*bus_power)(); */ 96 px_intr_ops, /* (*bus_intr_op)(); */ 97 pcie_hp_common_ops /* (*bus_hp_op)(); */ 98 }; 99 100 extern struct cb_ops px_cb_ops; 101 102 static struct dev_ops px_ops = { 103 DEVO_REV, 104 0, 105 px_info, 106 nulldev, 107 0, 108 px_attach, 109 px_detach, 110 nodev, 111 &px_cb_ops, 112 &px_bus_ops, 113 nulldev, 114 ddi_quiesce_not_needed, /* quiesce */ 115 }; 116 117 /* 118 * module definitions: 119 */ 120 #include <sys/modctl.h> 121 extern struct mod_ops mod_driverops; 122 123 static struct modldrv modldrv = { 124 &mod_driverops, /* Type of module - driver */ 125 #if defined(sun4u) 126 "Sun4u Host to PCIe nexus driver", /* Name of module. */ 127 #elif defined(sun4v) 128 "Sun4v Host to PCIe nexus driver", /* Name of module. */ 129 #endif 130 &px_ops, /* driver ops */ 131 }; 132 133 static struct modlinkage modlinkage = { 134 MODREV_1, (void *)&modldrv, NULL 135 }; 136 137 /* driver soft state */ 138 void *px_state_p; 139 140 int 141 _init(void) 142 { 143 int e; 144 145 /* 146 * Initialize per-px bus soft state pointer. 147 */ 148 e = ddi_soft_state_init(&px_state_p, sizeof (px_t), 1); 149 if (e != DDI_SUCCESS) 150 return (e); 151 152 /* 153 * Install the module. 154 */ 155 e = mod_install(&modlinkage); 156 if (e != DDI_SUCCESS) 157 ddi_soft_state_fini(&px_state_p); 158 return (e); 159 } 160 161 int 162 _fini(void) 163 { 164 int e; 165 166 /* 167 * Remove the module. 168 */ 169 e = mod_remove(&modlinkage); 170 if (e != DDI_SUCCESS) 171 return (e); 172 173 /* Free px soft state */ 174 ddi_soft_state_fini(&px_state_p); 175 176 return (e); 177 } 178 179 int 180 _info(struct modinfo *modinfop) 181 { 182 return (mod_info(&modlinkage, modinfop)); 183 } 184 185 /* ARGSUSED */ 186 static int 187 px_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 188 { 189 minor_t minor = getminor((dev_t)arg); 190 int instance = PCI_MINOR_NUM_TO_INSTANCE(minor); 191 px_t *px_p = INST_TO_STATE(instance); 192 int ret = DDI_SUCCESS; 193 194 switch (infocmd) { 195 case DDI_INFO_DEVT2INSTANCE: 196 *result = (void *)(intptr_t)instance; 197 break; 198 case DDI_INFO_DEVT2DEVINFO: 199 if (px_p == NULL) { 200 ret = DDI_FAILURE; 201 break; 202 } 203 204 *result = (void *)px_p->px_dip; 205 break; 206 default: 207 ret = DDI_FAILURE; 208 break; 209 } 210 211 return (ret); 212 } 213 214 /* device driver entry points */ 215 /* 216 * attach entry point: 217 */ 218 /*ARGSUSED*/ 219 static int 220 px_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 221 { 222 px_t *px_p; /* per bus state pointer */ 223 int instance = DIP_TO_INST(dip); 224 int ret = DDI_SUCCESS; 225 devhandle_t dev_hdl = NULL; 226 pcie_hp_regops_t regops; 227 228 switch (cmd) { 229 case DDI_ATTACH: 230 DBG(DBG_ATTACH, dip, "DDI_ATTACH\n"); 231 232 /* 233 * Allocate and get the per-px soft state structure. 234 */ 235 if (ddi_soft_state_zalloc(px_state_p, instance) 236 != DDI_SUCCESS) { 237 cmn_err(CE_WARN, "%s%d: can't allocate px state", 238 ddi_driver_name(dip), instance); 239 goto err_bad_px_softstate; 240 } 241 px_p = INST_TO_STATE(instance); 242 px_p->px_dip = dip; 243 mutex_init(&px_p->px_mutex, NULL, MUTEX_DRIVER, NULL); 244 px_p->px_soft_state = PCI_SOFT_STATE_CLOSED; 245 246 (void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, 247 "device_type", "pciex"); 248 249 /* Initialize px_dbg for high pil printing */ 250 px_dbg_attach(dip, &px_p->px_dbg_hdl); 251 pcie_rc_init_bus(dip); 252 253 /* 254 * Get key properties of the pci bridge node and 255 * determine it's type (psycho, schizo, etc ...). 256 */ 257 if (px_get_props(px_p, dip) == DDI_FAILURE) 258 goto err_bad_px_prop; 259 260 if (px_lib_dev_init(dip, &dev_hdl) != DDI_SUCCESS) 261 goto err_bad_dev_init; 262 263 /* Initialize device handle */ 264 px_p->px_dev_hdl = dev_hdl; 265 266 /* Cache the BDF of the root port nexus */ 267 px_p->px_bdf = px_lib_get_bdf(px_p); 268 269 /* 270 * Initialize interrupt block. Note that this 271 * initialize error handling for the PEC as well. 272 */ 273 if ((ret = px_ib_attach(px_p)) != DDI_SUCCESS) 274 goto err_bad_ib; 275 276 if (px_cb_attach(px_p) != DDI_SUCCESS) 277 goto err_bad_cb; 278 279 /* 280 * Start creating the modules. 281 * Note that attach() routines should 282 * register and enable their own interrupts. 283 */ 284 285 if ((px_mmu_attach(px_p)) != DDI_SUCCESS) 286 goto err_bad_mmu; 287 288 if ((px_msiq_attach(px_p)) != DDI_SUCCESS) 289 goto err_bad_msiq; 290 291 if ((px_msi_attach(px_p)) != DDI_SUCCESS) 292 goto err_bad_msi; 293 294 if ((px_pec_attach(px_p)) != DDI_SUCCESS) 295 goto err_bad_pec; 296 297 if ((px_dma_attach(px_p)) != DDI_SUCCESS) 298 goto err_bad_dma; /* nothing to uninitialize on DMA */ 299 300 if ((px_fm_attach(px_p)) != DDI_SUCCESS) 301 goto err_bad_dma; 302 303 /* 304 * All of the error handlers have been registered 305 * by now so it's time to activate the interrupt. 306 */ 307 if ((ret = px_err_add_intr(&px_p->px_fault)) != DDI_SUCCESS) 308 goto err_bad_intr; 309 310 if (px_lib_hotplug_init(dip, (void *)®ops) == DDI_SUCCESS) { 311 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 312 313 bus_p->bus_hp_sup_modes |= PCIE_NATIVE_HP_MODE; 314 } 315 316 (void) px_set_mps(px_p); 317 318 if (pcie_init(dip, (caddr_t)®ops) != DDI_SUCCESS) 319 goto err_bad_hotplug; 320 321 if (pxtool_init(dip) != DDI_SUCCESS) 322 goto err_bad_pcitool_node; 323 324 /* 325 * power management setup. Even if it fails, attach will 326 * succeed as this is a optional feature. Since we are 327 * always at full power, this is not critical. 328 */ 329 if (pwr_common_setup(dip) != DDI_SUCCESS) { 330 DBG(DBG_PWR, dip, "pwr_common_setup failed\n"); 331 } else if (px_pwr_setup(dip) != DDI_SUCCESS) { 332 DBG(DBG_PWR, dip, "px_pwr_setup failed \n"); 333 pwr_common_teardown(dip); 334 } 335 336 /* 337 * add cpr callback 338 */ 339 px_cpr_add_callb(px_p); 340 341 ddi_report_dev(dip); 342 343 px_p->px_state = PX_ATTACHED; 344 DBG(DBG_ATTACH, dip, "attach success\n"); 345 break; 346 347 err_bad_pcitool_node: 348 (void) pcie_uninit(dip); 349 err_bad_hotplug: 350 (void) px_lib_hotplug_uninit(dip); 351 px_err_rem_intr(&px_p->px_fault); 352 err_bad_intr: 353 px_fm_detach(px_p); 354 err_bad_dma: 355 px_pec_detach(px_p); 356 err_bad_pec: 357 px_msi_detach(px_p); 358 err_bad_msi: 359 px_msiq_detach(px_p); 360 err_bad_msiq: 361 px_mmu_detach(px_p); 362 err_bad_mmu: 363 px_cb_detach(px_p); 364 err_bad_cb: 365 px_ib_detach(px_p); 366 err_bad_ib: 367 if (px_lib_dev_fini(dip) != DDI_SUCCESS) { 368 DBG(DBG_ATTACH, dip, "px_lib_dev_fini failed\n"); 369 } 370 err_bad_dev_init: 371 px_free_props(px_p); 372 err_bad_px_prop: 373 pcie_rc_fini_bus(dip); 374 px_dbg_detach(dip, &px_p->px_dbg_hdl); 375 mutex_destroy(&px_p->px_mutex); 376 ddi_soft_state_free(px_state_p, instance); 377 err_bad_px_softstate: 378 ret = DDI_FAILURE; 379 break; 380 381 case DDI_RESUME: 382 DBG(DBG_ATTACH, dip, "DDI_RESUME\n"); 383 384 px_p = INST_TO_STATE(instance); 385 386 mutex_enter(&px_p->px_mutex); 387 388 /* suspend might have not succeeded */ 389 if (px_p->px_state != PX_SUSPENDED) { 390 DBG(DBG_ATTACH, px_p->px_dip, 391 "instance NOT suspended\n"); 392 ret = DDI_FAILURE; 393 break; 394 } 395 396 px_msiq_resume(px_p); 397 px_lib_resume(dip); 398 (void) pcie_pwr_resume(dip); 399 px_p->px_state = PX_ATTACHED; 400 401 mutex_exit(&px_p->px_mutex); 402 403 break; 404 default: 405 DBG(DBG_ATTACH, dip, "unsupported attach op\n"); 406 ret = DDI_FAILURE; 407 break; 408 } 409 410 return (ret); 411 } 412 413 /* 414 * detach entry point: 415 */ 416 /*ARGSUSED*/ 417 static int 418 px_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 419 { 420 int instance = ddi_get_instance(dip); 421 px_t *px_p = INST_TO_STATE(instance); 422 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 423 int ret; 424 425 /* 426 * Make sure we are currently attached 427 */ 428 if (px_p->px_state != PX_ATTACHED) { 429 DBG(DBG_DETACH, dip, "Instance not attached\n"); 430 return (DDI_FAILURE); 431 } 432 433 mutex_enter(&px_p->px_mutex); 434 435 switch (cmd) { 436 case DDI_DETACH: 437 DBG(DBG_DETACH, dip, "DDI_DETACH\n"); 438 439 /* 440 * remove cpr callback 441 */ 442 px_cpr_rem_callb(px_p); 443 444 if (PCIE_IS_PCIE_HOTPLUG_ENABLED(bus_p)) 445 (void) px_lib_hotplug_uninit(dip); 446 447 if (pcie_uninit(dip) != DDI_SUCCESS) { 448 mutex_exit(&px_p->px_mutex); 449 return (DDI_FAILURE); 450 } 451 452 /* 453 * things which used to be done in obj_destroy 454 * are now in-lined here. 455 */ 456 457 px_p->px_state = PX_DETACHED; 458 459 pxtool_uninit(dip); 460 461 px_err_rem_intr(&px_p->px_fault); 462 px_fm_detach(px_p); 463 px_pec_detach(px_p); 464 px_pwr_teardown(dip); 465 pwr_common_teardown(dip); 466 px_msi_detach(px_p); 467 px_msiq_detach(px_p); 468 px_mmu_detach(px_p); 469 px_cb_detach(px_p); 470 px_ib_detach(px_p); 471 if (px_lib_dev_fini(dip) != DDI_SUCCESS) { 472 DBG(DBG_DETACH, dip, "px_lib_dev_fini failed\n"); 473 } 474 475 /* 476 * Free the px soft state structure and the rest of the 477 * resources it's using. 478 */ 479 px_free_props(px_p); 480 pcie_rc_fini_bus(dip); 481 px_dbg_detach(dip, &px_p->px_dbg_hdl); 482 mutex_exit(&px_p->px_mutex); 483 mutex_destroy(&px_p->px_mutex); 484 485 px_p->px_dev_hdl = NULL; 486 ddi_soft_state_free(px_state_p, instance); 487 488 return (DDI_SUCCESS); 489 490 case DDI_SUSPEND: 491 if (pcie_pwr_suspend(dip) != DDI_SUCCESS) { 492 mutex_exit(&px_p->px_mutex); 493 return (DDI_FAILURE); 494 } 495 if ((ret = px_lib_suspend(dip)) == DDI_SUCCESS) 496 px_p->px_state = PX_SUSPENDED; 497 mutex_exit(&px_p->px_mutex); 498 499 return (ret); 500 501 default: 502 DBG(DBG_DETACH, dip, "unsupported detach op\n"); 503 mutex_exit(&px_p->px_mutex); 504 return (DDI_FAILURE); 505 } 506 } 507 508 int 509 px_cb_attach(px_t *px_p) 510 { 511 px_fault_t *fault_p = &px_p->px_cb_fault; 512 dev_info_t *dip = px_p->px_dip; 513 sysino_t sysino; 514 515 if (px_lib_intr_devino_to_sysino(dip, 516 px_p->px_inos[PX_INTR_XBC], &sysino) != DDI_SUCCESS) 517 return (DDI_FAILURE); 518 519 fault_p->px_fh_dip = dip; 520 fault_p->px_fh_sysino = sysino; 521 fault_p->px_err_func = px_err_cb_intr; 522 fault_p->px_intr_ino = px_p->px_inos[PX_INTR_XBC]; 523 524 return (px_cb_add_intr(fault_p)); 525 } 526 527 void 528 px_cb_detach(px_t *px_p) 529 { 530 px_cb_rem_intr(&px_p->px_cb_fault); 531 } 532 533 /* 534 * power management related initialization specific to px 535 * called by px_attach() 536 */ 537 static int 538 px_pwr_setup(dev_info_t *dip) 539 { 540 pcie_pwr_t *pwr_p; 541 int instance = ddi_get_instance(dip); 542 px_t *px_p = INST_TO_STATE(instance); 543 ddi_intr_handle_impl_t hdl; 544 545 ASSERT(PCIE_PMINFO(dip)); 546 pwr_p = PCIE_NEXUS_PMINFO(dip); 547 ASSERT(pwr_p); 548 549 /* 550 * indicate support LDI (Layered Driver Interface) 551 * Create the property, if it is not already there 552 */ 553 if (!ddi_prop_exists(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS, 554 DDI_KERNEL_IOCTL)) { 555 if (ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, 556 DDI_KERNEL_IOCTL, NULL, 0) != DDI_PROP_SUCCESS) { 557 DBG(DBG_PWR, dip, "can't create kernel ioctl prop\n"); 558 return (DDI_FAILURE); 559 } 560 } 561 /* No support for device PM. We are always at full power */ 562 pwr_p->pwr_func_lvl = PM_LEVEL_D0; 563 564 mutex_init(&px_p->px_l23ready_lock, NULL, MUTEX_DRIVER, 565 DDI_INTR_PRI(px_pwr_pil)); 566 cv_init(&px_p->px_l23ready_cv, NULL, CV_DRIVER, NULL); 567 568 /* Initialize handle */ 569 bzero(&hdl, sizeof (ddi_intr_handle_impl_t)); 570 hdl.ih_cb_arg1 = px_p; 571 hdl.ih_ver = DDI_INTR_VERSION; 572 hdl.ih_state = DDI_IHDL_STATE_ALLOC; 573 hdl.ih_dip = dip; 574 hdl.ih_pri = px_pwr_pil; 575 576 /* Add PME_TO_ACK message handler */ 577 hdl.ih_cb_func = (ddi_intr_handler_t *)px_pmeq_intr; 578 if (px_add_msiq_intr(dip, dip, &hdl, MSG_REC, 579 (msgcode_t)PCIE_PME_ACK_MSG, -1, 580 &px_p->px_pm_msiq_id) != DDI_SUCCESS) { 581 DBG(DBG_PWR, dip, "px_pwr_setup: couldn't add " 582 " PME_TO_ACK intr\n"); 583 goto pwr_setup_err1; 584 } 585 px_lib_msg_setmsiq(dip, PCIE_PME_ACK_MSG, px_p->px_pm_msiq_id); 586 px_lib_msg_setvalid(dip, PCIE_PME_ACK_MSG, PCIE_MSG_VALID); 587 588 if (px_ib_update_intr_state(px_p, px_p->px_dip, hdl.ih_inum, 589 px_msiqid_to_devino(px_p, px_p->px_pm_msiq_id), px_pwr_pil, 590 PX_INTR_STATE_ENABLE, MSG_REC, PCIE_PME_ACK_MSG) != DDI_SUCCESS) { 591 DBG(DBG_PWR, dip, "px_pwr_setup: PME_TO_ACK update interrupt" 592 " state failed\n"); 593 goto px_pwrsetup_err_state; 594 } 595 596 return (DDI_SUCCESS); 597 598 px_pwrsetup_err_state: 599 px_lib_msg_setvalid(dip, PCIE_PME_ACK_MSG, PCIE_MSG_INVALID); 600 (void) px_rem_msiq_intr(dip, dip, &hdl, MSG_REC, PCIE_PME_ACK_MSG, 601 px_p->px_pm_msiq_id); 602 pwr_setup_err1: 603 mutex_destroy(&px_p->px_l23ready_lock); 604 cv_destroy(&px_p->px_l23ready_cv); 605 606 return (DDI_FAILURE); 607 } 608 609 /* 610 * undo whatever is done in px_pwr_setup. called by px_detach() 611 */ 612 static void 613 px_pwr_teardown(dev_info_t *dip) 614 { 615 int instance = ddi_get_instance(dip); 616 px_t *px_p = INST_TO_STATE(instance); 617 ddi_intr_handle_impl_t hdl; 618 619 if (!PCIE_PMINFO(dip) || !PCIE_NEXUS_PMINFO(dip)) 620 return; 621 622 /* Initialize handle */ 623 bzero(&hdl, sizeof (ddi_intr_handle_impl_t)); 624 hdl.ih_ver = DDI_INTR_VERSION; 625 hdl.ih_state = DDI_IHDL_STATE_ALLOC; 626 hdl.ih_dip = dip; 627 hdl.ih_pri = px_pwr_pil; 628 629 px_lib_msg_setvalid(dip, PCIE_PME_ACK_MSG, PCIE_MSG_INVALID); 630 (void) px_rem_msiq_intr(dip, dip, &hdl, MSG_REC, PCIE_PME_ACK_MSG, 631 px_p->px_pm_msiq_id); 632 633 (void) px_ib_update_intr_state(px_p, px_p->px_dip, hdl.ih_inum, 634 px_msiqid_to_devino(px_p, px_p->px_pm_msiq_id), px_pwr_pil, 635 PX_INTR_STATE_DISABLE, MSG_REC, PCIE_PME_ACK_MSG); 636 637 px_p->px_pm_msiq_id = (msiqid_t)-1; 638 639 cv_destroy(&px_p->px_l23ready_cv); 640 mutex_destroy(&px_p->px_l23ready_lock); 641 } 642 643 /* bus driver entry points */ 644 645 /* 646 * bus map entry point: 647 * 648 * if map request is for an rnumber 649 * get the corresponding regspec from device node 650 * build a new regspec in our parent's format 651 * build a new map_req with the new regspec 652 * call up the tree to complete the mapping 653 */ 654 int 655 px_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp, 656 off_t off, off_t len, caddr_t *addrp) 657 { 658 px_t *px_p = DIP_TO_STATE(dip); 659 struct regspec p_regspec; 660 ddi_map_req_t p_mapreq; 661 int reglen, rval, r_no; 662 pci_regspec_t reloc_reg, *rp = &reloc_reg; 663 664 DBG(DBG_MAP, dip, "rdip=%s%d:", 665 ddi_driver_name(rdip), ddi_get_instance(rdip)); 666 667 if (mp->map_flags & DDI_MF_USER_MAPPING) 668 return (DDI_ME_UNIMPLEMENTED); 669 670 switch (mp->map_type) { 671 case DDI_MT_REGSPEC: 672 reloc_reg = *(pci_regspec_t *)mp->map_obj.rp; /* dup whole */ 673 break; 674 675 case DDI_MT_RNUMBER: 676 r_no = mp->map_obj.rnumber; 677 DBG(DBG_MAP | DBG_CONT, dip, " r#=%x", r_no); 678 679 if (ddi_getlongprop(DDI_DEV_T_ANY, rdip, DDI_PROP_DONTPASS, 680 "reg", (caddr_t)&rp, ®len) != DDI_SUCCESS) 681 return (DDI_ME_RNUMBER_RANGE); 682 683 if (r_no < 0 || r_no >= reglen / sizeof (pci_regspec_t)) { 684 kmem_free(rp, reglen); 685 return (DDI_ME_RNUMBER_RANGE); 686 } 687 rp += r_no; 688 break; 689 690 default: 691 return (DDI_ME_INVAL); 692 } 693 DBG(DBG_MAP | DBG_CONT, dip, "\n"); 694 695 if ((rp->pci_phys_hi & PCI_REG_ADDR_M) == PCI_ADDR_CONFIG) { 696 /* 697 * There may be a need to differentiate between PCI 698 * and PCI-Ex devices so the following range check is 699 * done correctly, depending on the implementation of 700 * pcieb bridge nexus driver. 701 */ 702 if ((off >= PCIE_CONF_HDR_SIZE) || 703 (len > PCIE_CONF_HDR_SIZE) || 704 (off + len > PCIE_CONF_HDR_SIZE)) 705 return (DDI_ME_INVAL); 706 /* 707 * the following function returning a DDI_FAILURE assumes 708 * that there are no virtual config space access services 709 * defined in this layer. Otherwise it is availed right 710 * here and we return. 711 */ 712 rval = px_lib_map_vconfig(dip, mp, off, rp, addrp); 713 if (rval == DDI_SUCCESS) 714 goto done; 715 } 716 717 /* 718 * No virtual config space services or we are mapping 719 * a region of memory mapped config/IO/memory space, so proceed 720 * to the parent. 721 */ 722 723 /* relocate within 64-bit pci space through "assigned-addresses" */ 724 if (rval = px_reloc_reg(dip, rdip, px_p, rp)) 725 goto done; 726 727 if (len) /* adjust regspec according to mapping request */ 728 rp->pci_size_low = len; /* MIN ? */ 729 rp->pci_phys_low += off; 730 731 /* translate relocated pci regspec into parent space through "ranges" */ 732 if (rval = px_xlate_reg(px_p, rp, &p_regspec)) 733 goto done; 734 735 p_mapreq = *mp; /* dup the whole structure */ 736 p_mapreq.map_type = DDI_MT_REGSPEC; 737 p_mapreq.map_obj.rp = &p_regspec; 738 px_lib_map_attr_check(&p_mapreq); 739 rval = ddi_map(dip, &p_mapreq, 0, 0, addrp); 740 741 if (rval == DDI_SUCCESS) { 742 /* 743 * Set-up access functions for FM access error capable drivers. 744 */ 745 if (DDI_FM_ACC_ERR_CAP(ddi_fm_capable(rdip))) 746 px_fm_acc_setup(mp, rdip, rp); 747 } 748 749 done: 750 if (mp->map_type == DDI_MT_RNUMBER) 751 kmem_free(rp - r_no, reglen); 752 753 return (rval); 754 } 755 756 /* 757 * bus dma map entry point 758 * return value: 759 * DDI_DMA_PARTIAL_MAP 1 760 * DDI_DMA_MAPOK 0 761 * DDI_DMA_MAPPED 0 762 * DDI_DMA_NORESOURCES -1 763 * DDI_DMA_NOMAPPING -2 764 * DDI_DMA_TOOBIG -3 765 */ 766 int 767 px_dma_setup(dev_info_t *dip, dev_info_t *rdip, ddi_dma_req_t *dmareq, 768 ddi_dma_handle_t *handlep) 769 { 770 px_t *px_p = DIP_TO_STATE(dip); 771 px_mmu_t *mmu_p = px_p->px_mmu_p; 772 ddi_dma_impl_t *mp; 773 int ret; 774 775 DBG(DBG_DMA_MAP, dip, "mapping - rdip=%s%d type=%s\n", 776 ddi_driver_name(rdip), ddi_get_instance(rdip), 777 handlep ? "alloc" : "advisory"); 778 779 if (!(mp = px_dma_lmts2hdl(dip, rdip, mmu_p, dmareq))) 780 return (DDI_DMA_NORESOURCES); 781 if (mp == (ddi_dma_impl_t *)DDI_DMA_NOMAPPING) 782 return (DDI_DMA_NOMAPPING); 783 if (ret = px_dma_type(px_p, dmareq, mp)) 784 goto freehandle; 785 if (ret = px_dma_pfn(px_p, dmareq, mp)) 786 goto freehandle; 787 788 switch (PX_DMA_TYPE(mp)) { 789 case PX_DMAI_FLAGS_DVMA: /* LINTED E_EQUALITY_NOT_ASSIGNMENT */ 790 if ((ret = px_dvma_win(px_p, dmareq, mp)) || !handlep) 791 goto freehandle; 792 if (!PX_DMA_CANCACHE(mp)) { /* try fast track */ 793 if (PX_DMA_CANFAST(mp)) { 794 if (!px_dvma_map_fast(mmu_p, mp)) 795 break; 796 /* LINTED E_NOP_ELSE_STMT */ 797 } else { 798 PX_DVMA_FASTTRAK_PROF(mp); 799 } 800 } 801 if (ret = px_dvma_map(mp, dmareq, mmu_p)) 802 goto freehandle; 803 break; 804 case PX_DMAI_FLAGS_PTP: /* LINTED E_EQUALITY_NOT_ASSIGNMENT */ 805 if ((ret = px_dma_physwin(px_p, dmareq, mp)) || !handlep) 806 goto freehandle; 807 break; 808 case PX_DMAI_FLAGS_BYPASS: 809 default: 810 cmn_err(CE_PANIC, "%s%d: px_dma_setup: bad dma type 0x%x", 811 ddi_driver_name(rdip), ddi_get_instance(rdip), 812 PX_DMA_TYPE(mp)); 813 /*NOTREACHED*/ 814 } 815 *handlep = (ddi_dma_handle_t)mp; 816 mp->dmai_flags |= PX_DMAI_FLAGS_INUSE; 817 px_dump_dma_handle(DBG_DMA_MAP, dip, mp); 818 819 return ((mp->dmai_nwin == 1) ? DDI_DMA_MAPPED : DDI_DMA_PARTIAL_MAP); 820 freehandle: 821 if (ret == DDI_DMA_NORESOURCES) 822 px_dma_freemp(mp); /* don't run_callback() */ 823 else 824 (void) px_dma_freehdl(dip, rdip, (ddi_dma_handle_t)mp); 825 return (ret); 826 } 827 828 829 /* 830 * bus dma alloc handle entry point: 831 */ 832 int 833 px_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attrp, 834 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep) 835 { 836 px_t *px_p = DIP_TO_STATE(dip); 837 ddi_dma_impl_t *mp; 838 int rval; 839 840 DBG(DBG_DMA_ALLOCH, dip, "rdip=%s%d\n", 841 ddi_driver_name(rdip), ddi_get_instance(rdip)); 842 843 if (attrp->dma_attr_version != DMA_ATTR_V0) 844 return (DDI_DMA_BADATTR); 845 846 if (!(mp = px_dma_allocmp(dip, rdip, waitfp, arg))) 847 return (DDI_DMA_NORESOURCES); 848 849 /* 850 * Save requestor's information 851 */ 852 mp->dmai_attr = *attrp; /* whole object - augmented later */ 853 *PX_DEV_ATTR(mp) = *attrp; /* whole object - device orig attr */ 854 DBG(DBG_DMA_ALLOCH, dip, "mp=%p\n", mp); 855 856 /* check and convert dma attributes to handle parameters */ 857 if (rval = px_dma_attr2hdl(px_p, mp)) { 858 px_dma_freehdl(dip, rdip, (ddi_dma_handle_t)mp); 859 *handlep = NULL; 860 return (rval); 861 } 862 *handlep = (ddi_dma_handle_t)mp; 863 return (DDI_SUCCESS); 864 } 865 866 867 /* 868 * bus dma free handle entry point: 869 */ 870 /*ARGSUSED*/ 871 int 872 px_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle) 873 { 874 DBG(DBG_DMA_FREEH, dip, "rdip=%s%d mp=%p\n", 875 ddi_driver_name(rdip), ddi_get_instance(rdip), handle); 876 px_dma_freemp((ddi_dma_impl_t *)handle); 877 878 if (px_kmem_clid) { 879 DBG(DBG_DMA_FREEH, dip, "run handle callback\n"); 880 ddi_run_callback(&px_kmem_clid); 881 } 882 return (DDI_SUCCESS); 883 } 884 885 886 /* 887 * bus dma bind handle entry point: 888 */ 889 int 890 px_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip, 891 ddi_dma_handle_t handle, ddi_dma_req_t *dmareq, 892 ddi_dma_cookie_t *cookiep, uint_t *ccountp) 893 { 894 px_t *px_p = DIP_TO_STATE(dip); 895 px_mmu_t *mmu_p = px_p->px_mmu_p; 896 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 897 int ret; 898 899 DBG(DBG_DMA_BINDH, dip, "rdip=%s%d mp=%p dmareq=%p\n", 900 ddi_driver_name(rdip), ddi_get_instance(rdip), mp, dmareq); 901 902 if (mp->dmai_flags & PX_DMAI_FLAGS_INUSE) 903 return (DDI_DMA_INUSE); 904 905 ASSERT((mp->dmai_flags & ~PX_DMAI_FLAGS_PRESERVE) == 0); 906 mp->dmai_flags |= PX_DMAI_FLAGS_INUSE; 907 908 if (ret = px_dma_type(px_p, dmareq, mp)) 909 goto err; 910 if (ret = px_dma_pfn(px_p, dmareq, mp)) 911 goto err; 912 913 switch (PX_DMA_TYPE(mp)) { 914 case PX_DMAI_FLAGS_DVMA: 915 if (ret = px_dvma_win(px_p, dmareq, mp)) 916 goto map_err; 917 if (!PX_DMA_CANCACHE(mp)) { /* try fast track */ 918 if (PX_DMA_CANFAST(mp)) { 919 if (!px_dvma_map_fast(mmu_p, mp)) 920 goto mapped; /*LINTED E_NOP_ELSE_STMT*/ 921 } else { 922 PX_DVMA_FASTTRAK_PROF(mp); 923 } 924 } 925 if (ret = px_dvma_map(mp, dmareq, mmu_p)) 926 goto map_err; 927 mapped: 928 *ccountp = 1; 929 MAKE_DMA_COOKIE(cookiep, mp->dmai_mapping, mp->dmai_size); 930 break; 931 case PX_DMAI_FLAGS_BYPASS: 932 case PX_DMAI_FLAGS_PTP: 933 if (ret = px_dma_physwin(px_p, dmareq, mp)) 934 goto map_err; 935 *ccountp = PX_WINLST(mp)->win_ncookies; 936 *cookiep = 937 *(ddi_dma_cookie_t *)(PX_WINLST(mp) + 1); /* wholeobj */ 938 break; 939 default: 940 cmn_err(CE_PANIC, "%s%d: px_dma_bindhdl(%p): bad dma type", 941 ddi_driver_name(rdip), ddi_get_instance(rdip), mp); 942 /*NOTREACHED*/ 943 } 944 DBG(DBG_DMA_BINDH, dip, "cookie %" PRIx64 "+%x\n", 945 cookiep->dmac_address, cookiep->dmac_size); 946 px_dump_dma_handle(DBG_DMA_MAP, dip, mp); 947 948 /* insert dma handle into FMA cache */ 949 if (mp->dmai_attr.dma_attr_flags & DDI_DMA_FLAGERR) { 950 (void) ndi_fmc_insert(rdip, DMA_HANDLE, mp, NULL); 951 mp->dmai_error.err_cf = px_err_dma_hdl_check; 952 } 953 954 return (mp->dmai_nwin == 1 ? DDI_DMA_MAPPED : DDI_DMA_PARTIAL_MAP); 955 map_err: 956 px_dma_freepfn(mp); 957 err: 958 mp->dmai_flags &= PX_DMAI_FLAGS_PRESERVE; 959 return (ret); 960 } 961 962 963 /* 964 * bus dma unbind handle entry point: 965 */ 966 /*ARGSUSED*/ 967 int 968 px_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle) 969 { 970 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 971 px_t *px_p = DIP_TO_STATE(dip); 972 px_mmu_t *mmu_p = px_p->px_mmu_p; 973 974 DBG(DBG_DMA_UNBINDH, dip, "rdip=%s%d, mp=%p\n", 975 ddi_driver_name(rdip), ddi_get_instance(rdip), handle); 976 if ((mp->dmai_flags & PX_DMAI_FLAGS_INUSE) == 0) { 977 DBG(DBG_DMA_UNBINDH, dip, "handle not inuse\n"); 978 return (DDI_FAILURE); 979 } 980 981 /* remove dma handle from FMA cache */ 982 if (mp->dmai_attr.dma_attr_flags & DDI_DMA_FLAGERR) { 983 if (DEVI(rdip)->devi_fmhdl != NULL && 984 DDI_FM_DMA_ERR_CAP(DEVI(rdip)->devi_fmhdl->fh_cap)) { 985 (void) ndi_fmc_remove(rdip, DMA_HANDLE, mp); 986 } 987 } 988 989 /* 990 * Here if the handle is using the iommu. Unload all the iommu 991 * translations. 992 */ 993 switch (PX_DMA_TYPE(mp)) { 994 case PX_DMAI_FLAGS_DVMA: 995 px_mmu_unmap_window(mmu_p, mp); 996 px_dvma_unmap(mmu_p, mp); 997 px_dma_freepfn(mp); 998 break; 999 case PX_DMAI_FLAGS_BYPASS: 1000 case PX_DMAI_FLAGS_PTP: 1001 px_dma_freewin(mp); 1002 break; 1003 default: 1004 cmn_err(CE_PANIC, "%s%d: px_dma_unbindhdl:bad dma type %p", 1005 ddi_driver_name(rdip), ddi_get_instance(rdip), mp); 1006 /*NOTREACHED*/ 1007 } 1008 if (mmu_p->mmu_dvma_clid != 0) { 1009 DBG(DBG_DMA_UNBINDH, dip, "run dvma callback\n"); 1010 ddi_run_callback(&mmu_p->mmu_dvma_clid); 1011 } 1012 if (px_kmem_clid) { 1013 DBG(DBG_DMA_UNBINDH, dip, "run handle callback\n"); 1014 ddi_run_callback(&px_kmem_clid); 1015 } 1016 mp->dmai_flags &= PX_DMAI_FLAGS_PRESERVE; 1017 1018 return (DDI_SUCCESS); 1019 } 1020 1021 /* 1022 * bus dma win entry point: 1023 */ 1024 int 1025 px_dma_win(dev_info_t *dip, dev_info_t *rdip, 1026 ddi_dma_handle_t handle, uint_t win, off_t *offp, 1027 size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp) 1028 { 1029 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 1030 int ret; 1031 1032 DBG(DBG_DMA_WIN, dip, "rdip=%s%d\n", 1033 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1034 1035 px_dump_dma_handle(DBG_DMA_WIN, dip, mp); 1036 if (win >= mp->dmai_nwin) { 1037 DBG(DBG_DMA_WIN, dip, "%x out of range\n", win); 1038 return (DDI_FAILURE); 1039 } 1040 1041 switch (PX_DMA_TYPE(mp)) { 1042 case PX_DMAI_FLAGS_DVMA: 1043 if (win != PX_DMA_CURWIN(mp)) { 1044 px_t *px_p = DIP_TO_STATE(dip); 1045 px_mmu_t *mmu_p = px_p->px_mmu_p; 1046 px_mmu_unmap_window(mmu_p, mp); 1047 1048 /* map_window sets dmai_mapping/size/offset */ 1049 px_mmu_map_window(mmu_p, mp, win); 1050 if ((ret = px_mmu_map_window(mmu_p, 1051 mp, win)) != DDI_SUCCESS) 1052 return (ret); 1053 } 1054 if (cookiep) 1055 MAKE_DMA_COOKIE(cookiep, mp->dmai_mapping, 1056 mp->dmai_size); 1057 if (ccountp) 1058 *ccountp = 1; 1059 break; 1060 case PX_DMAI_FLAGS_PTP: 1061 case PX_DMAI_FLAGS_BYPASS: { 1062 int i; 1063 ddi_dma_cookie_t *ck_p; 1064 px_dma_win_t *win_p = mp->dmai_winlst; 1065 1066 for (i = 0; i < win; win_p = win_p->win_next, i++) {}; 1067 ck_p = (ddi_dma_cookie_t *)(win_p + 1); 1068 *cookiep = *ck_p; 1069 mp->dmai_offset = win_p->win_offset; 1070 mp->dmai_size = win_p->win_size; 1071 mp->dmai_mapping = ck_p->dmac_laddress; 1072 mp->dmai_cookie = ck_p + 1; 1073 win_p->win_curseg = 0; 1074 if (ccountp) 1075 *ccountp = win_p->win_ncookies; 1076 } 1077 break; 1078 default: 1079 cmn_err(CE_WARN, "%s%d: px_dma_win:bad dma type 0x%x", 1080 ddi_driver_name(rdip), ddi_get_instance(rdip), 1081 PX_DMA_TYPE(mp)); 1082 return (DDI_FAILURE); 1083 } 1084 if (cookiep) 1085 DBG(DBG_DMA_WIN, dip, 1086 "cookie - dmac_address=%x dmac_size=%x\n", 1087 cookiep->dmac_address, cookiep->dmac_size); 1088 if (offp) 1089 *offp = (off_t)mp->dmai_offset; 1090 if (lenp) 1091 *lenp = mp->dmai_size; 1092 return (DDI_SUCCESS); 1093 } 1094 1095 #ifdef DEBUG 1096 static char *px_dmactl_str[] = { 1097 "DDI_DMA_FREE", 1098 "DDI_DMA_SYNC", 1099 "DDI_DMA_HTOC", 1100 "DDI_DMA_KVADDR", 1101 "DDI_DMA_MOVWIN", 1102 "DDI_DMA_REPWIN", 1103 "DDI_DMA_GETERR", 1104 "DDI_DMA_COFF", 1105 "DDI_DMA_NEXTWIN", 1106 "DDI_DMA_NEXTSEG", 1107 "DDI_DMA_SEGTOC", 1108 "DDI_DMA_RESERVE", 1109 "DDI_DMA_RELEASE", 1110 "DDI_DMA_RESETH", 1111 "DDI_DMA_CKSYNC", 1112 "DDI_DMA_IOPB_ALLOC", 1113 "DDI_DMA_IOPB_FREE", 1114 "DDI_DMA_SMEM_ALLOC", 1115 "DDI_DMA_SMEM_FREE", 1116 "DDI_DMA_SET_SBUS64" 1117 }; 1118 #endif /* DEBUG */ 1119 1120 /* 1121 * bus dma control entry point: 1122 */ 1123 /*ARGSUSED*/ 1124 int 1125 px_dma_ctlops(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle, 1126 enum ddi_dma_ctlops cmd, off_t *offp, size_t *lenp, caddr_t *objp, 1127 uint_t cache_flags) 1128 { 1129 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 1130 1131 #ifdef DEBUG 1132 DBG(DBG_DMA_CTL, dip, "%s: rdip=%s%d\n", px_dmactl_str[cmd], 1133 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1134 #endif /* DEBUG */ 1135 1136 switch (cmd) { 1137 case DDI_DMA_FREE: 1138 (void) px_dma_unbindhdl(dip, rdip, handle); 1139 (void) px_dma_freehdl(dip, rdip, handle); 1140 return (DDI_SUCCESS); 1141 case DDI_DMA_RESERVE: { 1142 px_t *px_p = DIP_TO_STATE(dip); 1143 return (px_fdvma_reserve(dip, rdip, px_p, 1144 (ddi_dma_req_t *)offp, (ddi_dma_handle_t *)objp)); 1145 } 1146 case DDI_DMA_RELEASE: { 1147 px_t *px_p = DIP_TO_STATE(dip); 1148 return (px_fdvma_release(dip, px_p, mp)); 1149 } 1150 default: 1151 break; 1152 } 1153 1154 switch (PX_DMA_TYPE(mp)) { 1155 case PX_DMAI_FLAGS_DVMA: 1156 return (px_dvma_ctl(dip, rdip, mp, cmd, offp, lenp, objp, 1157 cache_flags)); 1158 case PX_DMAI_FLAGS_PTP: 1159 case PX_DMAI_FLAGS_BYPASS: 1160 return (px_dma_ctl(dip, rdip, mp, cmd, offp, lenp, objp, 1161 cache_flags)); 1162 default: 1163 cmn_err(CE_PANIC, "%s%d: px_dma_ctlops(%x):bad dma type %x", 1164 ddi_driver_name(rdip), ddi_get_instance(rdip), cmd, 1165 mp->dmai_flags); 1166 /*NOTREACHED*/ 1167 } 1168 return (0); 1169 } 1170 1171 /* 1172 * control ops entry point: 1173 * 1174 * Requests handled completely: 1175 * DDI_CTLOPS_INITCHILD see init_child() for details 1176 * DDI_CTLOPS_UNINITCHILD 1177 * DDI_CTLOPS_REPORTDEV see report_dev() for details 1178 * DDI_CTLOPS_IOMIN cache line size if streaming otherwise 1 1179 * DDI_CTLOPS_REGSIZE 1180 * DDI_CTLOPS_NREGS 1181 * DDI_CTLOPS_DVMAPAGESIZE 1182 * DDI_CTLOPS_POKE 1183 * DDI_CTLOPS_PEEK 1184 * 1185 * All others passed to parent. 1186 */ 1187 int 1188 px_ctlops(dev_info_t *dip, dev_info_t *rdip, 1189 ddi_ctl_enum_t op, void *arg, void *result) 1190 { 1191 px_t *px_p = DIP_TO_STATE(dip); 1192 struct detachspec *ds; 1193 struct attachspec *as; 1194 1195 switch (op) { 1196 case DDI_CTLOPS_INITCHILD: 1197 return (px_init_child(px_p, (dev_info_t *)arg)); 1198 1199 case DDI_CTLOPS_UNINITCHILD: 1200 return (px_uninit_child(px_p, (dev_info_t *)arg)); 1201 1202 case DDI_CTLOPS_ATTACH: 1203 if (!pcie_is_child(dip, rdip)) 1204 return (DDI_SUCCESS); 1205 1206 as = (struct attachspec *)arg; 1207 switch (as->when) { 1208 case DDI_PRE: 1209 if (as->cmd == DDI_ATTACH) { 1210 DBG(DBG_PWR, dip, "PRE_ATTACH for %s@%d\n", 1211 ddi_driver_name(rdip), 1212 ddi_get_instance(rdip)); 1213 return (pcie_pm_hold(dip)); 1214 } 1215 if (as->cmd == DDI_RESUME) { 1216 DBG(DBG_PWR, dip, "PRE_RESUME for %s@%d\n", 1217 ddi_driver_name(rdip), 1218 ddi_get_instance(rdip)); 1219 1220 pcie_clear_errors(rdip); 1221 } 1222 return (DDI_SUCCESS); 1223 1224 case DDI_POST: 1225 DBG(DBG_PWR, dip, "POST_ATTACH for %s@%d\n", 1226 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1227 if (as->cmd == DDI_ATTACH && 1228 as->result != DDI_SUCCESS) { 1229 /* 1230 * Attach failed for the child device. The child 1231 * driver may have made PM calls before the 1232 * attach failed. pcie_pm_remove_child() should 1233 * cleanup PM state and holds (if any) 1234 * associated with the child device. 1235 */ 1236 return (pcie_pm_remove_child(dip, rdip)); 1237 } 1238 1239 if (as->result == DDI_SUCCESS) 1240 pf_init(rdip, (void *)px_p->px_fm_ibc, as->cmd); 1241 1242 (void) pcie_postattach_child(rdip); 1243 1244 return (DDI_SUCCESS); 1245 default: 1246 break; 1247 } 1248 break; 1249 1250 case DDI_CTLOPS_DETACH: 1251 if (!pcie_is_child(dip, rdip)) 1252 return (DDI_SUCCESS); 1253 1254 ds = (struct detachspec *)arg; 1255 switch (ds->when) { 1256 case DDI_POST: 1257 if (ds->cmd == DDI_DETACH && 1258 ds->result == DDI_SUCCESS) { 1259 DBG(DBG_PWR, dip, "POST_DETACH for %s@%d\n", 1260 ddi_driver_name(rdip), 1261 ddi_get_instance(rdip)); 1262 return (pcie_pm_remove_child(dip, rdip)); 1263 } 1264 return (DDI_SUCCESS); 1265 case DDI_PRE: 1266 pf_fini(rdip, ds->cmd); 1267 return (DDI_SUCCESS); 1268 default: 1269 break; 1270 } 1271 break; 1272 1273 case DDI_CTLOPS_REPORTDEV: 1274 return (px_report_dev(rdip)); 1275 1276 case DDI_CTLOPS_IOMIN: 1277 return (DDI_SUCCESS); 1278 1279 case DDI_CTLOPS_REGSIZE: 1280 *((off_t *)result) = px_get_reg_set_size(rdip, *((int *)arg)); 1281 return (*((off_t *)result) == 0 ? DDI_FAILURE : DDI_SUCCESS); 1282 1283 case DDI_CTLOPS_NREGS: 1284 *((uint_t *)result) = px_get_nreg_set(rdip); 1285 return (DDI_SUCCESS); 1286 1287 case DDI_CTLOPS_DVMAPAGESIZE: 1288 *((ulong_t *)result) = MMU_PAGE_SIZE; 1289 return (DDI_SUCCESS); 1290 1291 case DDI_CTLOPS_POKE: /* platform dependent implementation. */ 1292 return (px_lib_ctlops_poke(dip, rdip, 1293 (peekpoke_ctlops_t *)arg)); 1294 1295 case DDI_CTLOPS_PEEK: /* platform dependent implementation. */ 1296 return (px_lib_ctlops_peek(dip, rdip, 1297 (peekpoke_ctlops_t *)arg, result)); 1298 1299 case DDI_CTLOPS_POWER: 1300 default: 1301 break; 1302 } 1303 1304 /* 1305 * Now pass the request up to our parent. 1306 */ 1307 DBG(DBG_CTLOPS, dip, "passing request to parent: rdip=%s%d\n", 1308 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1309 return (ddi_ctlops(dip, rdip, op, arg, result)); 1310 } 1311 1312 /* ARGSUSED */ 1313 int 1314 px_intr_ops(dev_info_t *dip, dev_info_t *rdip, ddi_intr_op_t intr_op, 1315 ddi_intr_handle_impl_t *hdlp, void *result) 1316 { 1317 int intr_types, ret = DDI_SUCCESS; 1318 1319 DBG(DBG_INTROPS, dip, "px_intr_ops: rdip=%s%d\n", 1320 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1321 1322 /* Process DDI_INTROP_SUPPORTED_TYPES request here */ 1323 if (intr_op == DDI_INTROP_SUPPORTED_TYPES) { 1324 *(int *)result = i_ddi_get_intx_nintrs(rdip) ? 1325 DDI_INTR_TYPE_FIXED : 0; 1326 1327 if ((pci_msi_get_supported_type(rdip, 1328 &intr_types)) == DDI_SUCCESS) { 1329 /* 1330 * Double check supported interrupt types vs. 1331 * what the host bridge supports. 1332 */ 1333 *(int *)result |= intr_types; 1334 } 1335 1336 return (ret); 1337 } 1338 1339 /* 1340 * PCI-E nexus driver supports fixed, MSI and MSI-X interrupts. 1341 * Return failure if interrupt type is not supported. 1342 */ 1343 switch (hdlp->ih_type) { 1344 case DDI_INTR_TYPE_FIXED: 1345 ret = px_intx_ops(dip, rdip, intr_op, hdlp, result); 1346 break; 1347 case DDI_INTR_TYPE_MSI: 1348 case DDI_INTR_TYPE_MSIX: 1349 ret = px_msix_ops(dip, rdip, intr_op, hdlp, result); 1350 break; 1351 default: 1352 ret = DDI_ENOTSUP; 1353 break; 1354 } 1355 1356 return (ret); 1357 } 1358 1359 static void 1360 px_set_mps(px_t *px_p) 1361 { 1362 dev_info_t *dip; 1363 pcie_bus_t *bus_p; 1364 int max_supported; 1365 1366 dip = px_p->px_dip; 1367 bus_p = PCIE_DIP2BUS(dip); 1368 1369 bus_p->bus_mps = -1; 1370 1371 if (pcie_root_port(dip) == DDI_FAILURE) { 1372 if (px_lib_get_root_complex_mps(px_p, dip, 1373 &max_supported) < 0) { 1374 1375 DBG(DBG_MPS, dip, "MPS: Can not get RC MPS\n"); 1376 return; 1377 } 1378 1379 DBG(DBG_MPS, dip, "MPS: Root Complex MPS Cap of = %x\n", 1380 max_supported); 1381 1382 if (pcie_max_mps < max_supported) 1383 max_supported = pcie_max_mps; 1384 1385 (void) pcie_get_fabric_mps(dip, ddi_get_child(dip), 1386 &max_supported); 1387 1388 bus_p->bus_mps = max_supported; 1389 1390 (void) px_lib_set_root_complex_mps(px_p, dip, bus_p->bus_mps); 1391 1392 DBG(DBG_MPS, dip, "MPS: Root Complex MPS Set to = %x\n", 1393 bus_p->bus_mps); 1394 } 1395 } 1396