1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * PCI Express nexus driver interface 31 */ 32 33 #include <sys/types.h> 34 #include <sys/conf.h> /* nulldev */ 35 #include <sys/stat.h> /* devctl */ 36 #include <sys/kmem.h> 37 #include <sys/sunddi.h> 38 #include <sys/sunndi.h> 39 #include <sys/hotplug/pci/pcihp.h> 40 #include <sys/ddi_impldefs.h> 41 #include <sys/ddi_subrdefs.h> 42 #include <sys/spl.h> 43 #include <sys/epm.h> 44 #include <sys/iommutsb.h> 45 #include "px_obj.h" 46 #include <sys/pci_tools.h> 47 #include "px_tools_ext.h" 48 #include "pcie_pwr.h" 49 50 /*LINTLIBRARY*/ 51 52 /* 53 * function prototypes for dev ops routines: 54 */ 55 static int px_attach(dev_info_t *dip, ddi_attach_cmd_t cmd); 56 static int px_detach(dev_info_t *dip, ddi_detach_cmd_t cmd); 57 static int px_info(dev_info_t *dip, ddi_info_cmd_t infocmd, 58 void *arg, void **result); 59 static int px_pwr_setup(dev_info_t *dip); 60 static void px_pwr_teardown(dev_info_t *dip); 61 62 /* 63 * bus ops and dev ops structures: 64 */ 65 static struct bus_ops px_bus_ops = { 66 BUSO_REV, 67 px_map, 68 0, 69 0, 70 0, 71 i_ddi_map_fault, 72 px_dma_setup, 73 px_dma_allochdl, 74 px_dma_freehdl, 75 px_dma_bindhdl, 76 px_dma_unbindhdl, 77 px_lib_dma_sync, 78 px_dma_win, 79 px_dma_ctlops, 80 px_ctlops, 81 ddi_bus_prop_op, 82 ndi_busop_get_eventcookie, 83 ndi_busop_add_eventcall, 84 ndi_busop_remove_eventcall, 85 ndi_post_event, 86 NULL, 87 NULL, /* (*bus_config)(); */ 88 NULL, /* (*bus_unconfig)(); */ 89 px_fm_init_child, /* (*bus_fm_init)(); */ 90 NULL, /* (*bus_fm_fini)(); */ 91 px_bus_enter, /* (*bus_fm_access_enter)(); */ 92 px_bus_exit, /* (*bus_fm_access_fini)(); */ 93 pcie_bus_power, /* (*bus_power)(); */ 94 px_intr_ops /* (*bus_intr_op)(); */ 95 }; 96 97 extern struct cb_ops px_cb_ops; 98 99 static struct dev_ops px_ops = { 100 DEVO_REV, 101 0, 102 px_info, 103 nulldev, 104 0, 105 px_attach, 106 px_detach, 107 nodev, 108 &px_cb_ops, 109 &px_bus_ops, 110 nulldev 111 }; 112 113 /* 114 * module definitions: 115 */ 116 #include <sys/modctl.h> 117 extern struct mod_ops mod_driverops; 118 119 static struct modldrv modldrv = { 120 &mod_driverops, /* Type of module - driver */ 121 "PCI Express nexus driver %I%", /* Name of module. */ 122 &px_ops, /* driver ops */ 123 }; 124 125 static struct modlinkage modlinkage = { 126 MODREV_1, (void *)&modldrv, NULL 127 }; 128 129 /* driver soft state */ 130 void *px_state_p; 131 132 int 133 _init(void) 134 { 135 int e; 136 137 /* 138 * Initialize per-px bus soft state pointer. 139 */ 140 e = ddi_soft_state_init(&px_state_p, sizeof (px_t), 1); 141 if (e != DDI_SUCCESS) 142 return (e); 143 144 /* 145 * Install the module. 146 */ 147 e = mod_install(&modlinkage); 148 if (e != DDI_SUCCESS) 149 ddi_soft_state_fini(&px_state_p); 150 return (e); 151 } 152 153 int 154 _fini(void) 155 { 156 int e; 157 158 /* 159 * Remove the module. 160 */ 161 e = mod_remove(&modlinkage); 162 if (e != DDI_SUCCESS) 163 return (e); 164 165 /* Free px soft state */ 166 ddi_soft_state_fini(&px_state_p); 167 168 return (e); 169 } 170 171 int 172 _info(struct modinfo *modinfop) 173 { 174 return (mod_info(&modlinkage, modinfop)); 175 } 176 177 /* ARGSUSED */ 178 static int 179 px_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 180 { 181 int instance = getminor((dev_t)arg); 182 px_t *px_p = INST_TO_STATE(instance); 183 184 #ifdef HOTPLUG 185 /* 186 * Allow hotplug to deal with ones it manages 187 * Hot Plug will be done later. 188 */ 189 if (px_p && (px_p->hotplug_capable == B_TRUE)) 190 return (pcihp_info(dip, infocmd, arg, result)); 191 #endif /* HOTPLUG */ 192 193 /* non-hotplug or not attached */ 194 switch (infocmd) { 195 case DDI_INFO_DEVT2INSTANCE: 196 *result = (void *)(intptr_t)instance; 197 return (DDI_SUCCESS); 198 199 case DDI_INFO_DEVT2DEVINFO: 200 if (px_p == NULL) 201 return (DDI_FAILURE); 202 *result = (void *)px_p->px_dip; 203 return (DDI_SUCCESS); 204 205 default: 206 return (DDI_FAILURE); 207 } 208 } 209 210 /* device driver entry points */ 211 /* 212 * attach entry point: 213 */ 214 /*ARGSUSED*/ 215 static int 216 px_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 217 { 218 px_t *px_p; /* per bus state pointer */ 219 int instance = DIP_TO_INST(dip); 220 int ret = DDI_SUCCESS; 221 devhandle_t dev_hdl = NULL; 222 223 switch (cmd) { 224 case DDI_ATTACH: 225 DBG(DBG_ATTACH, dip, "DDI_ATTACH\n"); 226 227 /* 228 * Allocate and get the per-px soft state structure. 229 */ 230 if (ddi_soft_state_zalloc(px_state_p, instance) 231 != DDI_SUCCESS) { 232 cmn_err(CE_WARN, "%s%d: can't allocate px state", 233 ddi_driver_name(dip), instance); 234 goto err_bad_px_softstate; 235 } 236 px_p = INST_TO_STATE(instance); 237 px_p->px_dip = dip; 238 mutex_init(&px_p->px_mutex, NULL, MUTEX_DRIVER, NULL); 239 px_p->px_soft_state = PX_SOFT_STATE_CLOSED; 240 px_p->px_open_count = 0; 241 242 /* 243 * Get key properties of the pci bridge node and 244 * determine it's type (psycho, schizo, etc ...). 245 */ 246 if (px_get_props(px_p, dip) == DDI_FAILURE) 247 goto err_bad_px_prop; 248 249 if ((px_fm_attach(px_p)) != DDI_SUCCESS) 250 goto err_bad_fm; 251 252 if (px_lib_dev_init(dip, &dev_hdl) != DDI_SUCCESS) 253 goto err_bad_dev_init; 254 255 /* Initilize device handle */ 256 px_p->px_dev_hdl = dev_hdl; 257 258 /* 259 * Initialize interrupt block. Note that this 260 * initialize error handling for the PEC as well. 261 */ 262 if ((ret = px_ib_attach(px_p)) != DDI_SUCCESS) 263 goto err_bad_ib; 264 265 if (px_cb_attach(px_p) != DDI_SUCCESS) 266 goto err_bad_cb; 267 268 /* 269 * Start creating the modules. 270 * Note that attach() routines should 271 * register and enable their own interrupts. 272 */ 273 274 if ((px_mmu_attach(px_p)) != DDI_SUCCESS) 275 goto err_bad_mmu; 276 277 if ((px_msiq_attach(px_p)) != DDI_SUCCESS) 278 goto err_bad_msiq; 279 280 if ((px_msi_attach(px_p)) != DDI_SUCCESS) 281 goto err_bad_msi; 282 283 if ((px_pec_attach(px_p)) != DDI_SUCCESS) 284 goto err_bad_pec; 285 286 if ((px_dma_attach(px_p)) != DDI_SUCCESS) 287 goto err_bad_pec; /* nothing to uninitialize on DMA */ 288 289 /* 290 * All of the error handlers have been registered 291 * by now so it's time to activate the interrupt. 292 */ 293 if ((ret = px_err_add_intr(&px_p->px_fault)) != DDI_SUCCESS) 294 goto err_bad_pec_add_intr; 295 296 /* 297 * Create the "devctl" node for hotplug and pcitool support. 298 * For non-hotplug bus, we still need ":devctl" to 299 * support DEVCTL_DEVICE_* and DEVCTL_BUS_* ioctls. 300 */ 301 if (ddi_create_minor_node(dip, "devctl", S_IFCHR, 302 PCIHP_AP_MINOR_NUM(instance, PCIHP_DEVCTL_MINOR), 303 DDI_NT_NEXUS, 0) != DDI_SUCCESS) { 304 goto err_bad_devctl_node; 305 } 306 307 if (pxtool_init(dip) != DDI_SUCCESS) 308 goto err_bad_pcitool_node; 309 310 /* 311 * power management setup. Even if it fails, attach will 312 * succeed as this is a optional feature. Since we are 313 * always at full power, this is not critical. 314 */ 315 if (pwr_common_setup(dip) != DDI_SUCCESS) { 316 DBG(DBG_PWR, dip, "pwr_common_setup failed\n"); 317 } else if (px_pwr_setup(dip) != DDI_SUCCESS) { 318 DBG(DBG_PWR, dip, "px_pwr_setup failed \n"); 319 pwr_common_teardown(dip); 320 } 321 322 /* 323 * add cpr callback 324 */ 325 px_cpr_add_callb(px_p); 326 327 ddi_report_dev(dip); 328 329 px_p->px_state = PX_ATTACHED; 330 DBG(DBG_ATTACH, dip, "attach success\n"); 331 break; 332 333 err_bad_pcitool_node: 334 ddi_remove_minor_node(dip, "devctl"); 335 err_bad_devctl_node: 336 px_err_rem_intr(&px_p->px_fault); 337 err_bad_pec_add_intr: 338 px_pec_detach(px_p); 339 err_bad_pec: 340 px_msi_detach(px_p); 341 err_bad_msi: 342 px_msiq_detach(px_p); 343 err_bad_msiq: 344 px_mmu_detach(px_p); 345 err_bad_mmu: 346 px_cb_detach(px_p); 347 err_bad_cb: 348 px_ib_detach(px_p); 349 err_bad_ib: 350 (void) px_lib_dev_fini(dip); 351 err_bad_dev_init: 352 px_fm_detach(px_p); 353 err_bad_fm: 354 px_free_props(px_p); 355 err_bad_px_prop: 356 mutex_destroy(&px_p->px_mutex); 357 ddi_soft_state_free(px_state_p, instance); 358 err_bad_px_softstate: 359 ret = DDI_FAILURE; 360 break; 361 362 case DDI_RESUME: 363 DBG(DBG_ATTACH, dip, "DDI_RESUME\n"); 364 365 px_p = INST_TO_STATE(instance); 366 367 mutex_enter(&px_p->px_mutex); 368 369 /* suspend might have not succeeded */ 370 if (px_p->px_state != PX_SUSPENDED) { 371 DBG(DBG_ATTACH, px_p->px_dip, 372 "instance NOT suspended\n"); 373 ret = DDI_FAILURE; 374 break; 375 } 376 377 px_lib_resume(dip); 378 (void) pcie_pwr_resume(dip); 379 px_p->px_state = PX_ATTACHED; 380 381 mutex_exit(&px_p->px_mutex); 382 383 break; 384 default: 385 DBG(DBG_ATTACH, dip, "unsupported attach op\n"); 386 ret = DDI_FAILURE; 387 break; 388 } 389 390 return (ret); 391 } 392 393 /* 394 * detach entry point: 395 */ 396 /*ARGSUSED*/ 397 static int 398 px_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 399 { 400 int instance = ddi_get_instance(dip); 401 px_t *px_p = INST_TO_STATE(instance); 402 int ret; 403 404 /* 405 * Make sure we are currently attached 406 */ 407 if (px_p->px_state != PX_ATTACHED) { 408 DBG(DBG_DETACH, dip, "failed - instance not attached\n"); 409 return (DDI_FAILURE); 410 } 411 412 mutex_enter(&px_p->px_mutex); 413 414 switch (cmd) { 415 case DDI_DETACH: 416 DBG(DBG_DETACH, dip, "DDI_DETACH\n"); 417 418 /* 419 * remove cpr callback 420 */ 421 px_cpr_rem_callb(px_p); 422 423 #ifdef HOTPLUG 424 /* 425 * Hot plug will be done later. 426 */ 427 if (px_p->hotplug_capable == B_TRUE) { 428 if (pxhp_uninit(dip) == DDI_FAILURE) { 429 mutex_exit(&px_p->px_mutex); 430 return (DDI_FAILURE); 431 } 432 } 433 #endif /* HOTPLUG */ 434 435 /* 436 * things which used to be done in obj_destroy 437 * are now in-lined here. 438 */ 439 440 px_p->px_state = PX_DETACHED; 441 442 pxtool_uninit(dip); 443 444 ddi_remove_minor_node(dip, "devctl"); 445 px_err_rem_intr(&px_p->px_fault); 446 px_pec_detach(px_p); 447 px_pwr_teardown(dip); 448 pwr_common_teardown(dip); 449 px_msi_detach(px_p); 450 px_msiq_detach(px_p); 451 px_mmu_detach(px_p); 452 px_cb_detach(px_p); 453 px_ib_detach(px_p); 454 (void) px_lib_dev_fini(dip); 455 px_fm_detach(px_p); 456 457 /* 458 * Free the px soft state structure and the rest of the 459 * resources it's using. 460 */ 461 px_free_props(px_p); 462 mutex_exit(&px_p->px_mutex); 463 mutex_destroy(&px_p->px_mutex); 464 465 /* Free the interrupt-priorities prop if we created it. */ { 466 int len; 467 468 if (ddi_getproplen(DDI_DEV_T_ANY, dip, 469 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 470 "interrupt-priorities", &len) == DDI_PROP_SUCCESS) 471 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, 472 "interrupt-priorities"); 473 } 474 475 px_p->px_dev_hdl = NULL; 476 ddi_soft_state_free(px_state_p, instance); 477 478 return (DDI_SUCCESS); 479 480 case DDI_SUSPEND: 481 if (pcie_pwr_suspend(dip) != DDI_SUCCESS) { 482 mutex_exit(&px_p->px_mutex); 483 return (DDI_FAILURE); 484 } 485 if ((ret = px_lib_suspend(dip)) == DDI_SUCCESS) 486 px_p->px_state = PX_SUSPENDED; 487 mutex_exit(&px_p->px_mutex); 488 489 return (ret); 490 491 default: 492 DBG(DBG_DETACH, dip, "unsupported detach op\n"); 493 mutex_exit(&px_p->px_mutex); 494 return (DDI_FAILURE); 495 } 496 } 497 498 /* 499 * power management related initialization specific to px 500 * called by px_attach() 501 */ 502 static int 503 px_pwr_setup(dev_info_t *dip) 504 { 505 pcie_pwr_t *pwr_p; 506 int instance = ddi_get_instance(dip); 507 px_t *px_p = INST_TO_STATE(instance); 508 ddi_intr_handle_impl_t hdl; 509 ddi_iblock_cookie_t iblk_cookie; 510 511 ASSERT(PCIE_PMINFO(dip)); 512 pwr_p = PCIE_NEXUS_PMINFO(dip); 513 ASSERT(pwr_p); 514 515 /* 516 * indicate support LDI (Layered Driver Interface) 517 * Create the property, if it is not already there 518 */ 519 if (!ddi_prop_exists(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS, 520 DDI_KERNEL_IOCTL)) { 521 if (ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, 522 DDI_KERNEL_IOCTL, NULL, 0) != DDI_PROP_SUCCESS) { 523 DBG(DBG_PWR, dip, "can't create kernel ioctl prop\n"); 524 return (DDI_FAILURE); 525 } 526 } 527 /* No support for device PM. We are always at full power */ 528 pwr_p->pwr_func_lvl = PM_LEVEL_D0; 529 530 mutex_init(&px_p->px_l23ready_lock, NULL, MUTEX_DRIVER, 531 DDI_INTR_PRI(px_pwr_pil)); 532 cv_init(&px_p->px_l23ready_cv, NULL, CV_DRIVER, NULL); 533 534 mutex_init(&px_p->px_lup_lock, NULL, MUTEX_DRIVER, 535 DDI_INTR_PRI(PX_ERR_PIL)); 536 cv_init(&px_p->px_lup_cv, NULL, CV_DRIVER, NULL); 537 538 if (ddi_get_soft_iblock_cookie(dip, DDI_SOFTINT_HIGH, 539 &iblk_cookie) != DDI_SUCCESS) { 540 DBG(DBG_PWR, dip, "px_pwr_setup: couldn't get iblock cookie\n"); 541 goto pwr_setup_err1; 542 } 543 544 mutex_init(&px_p->px_lupsoft_lock, NULL, MUTEX_DRIVER, 545 (void *)iblk_cookie); 546 547 if (ddi_add_softintr(dip, DDI_SOFTINT_HIGH, &px_p->px_lupsoft_id, 548 NULL, NULL, px_lup_softintr, (caddr_t)px_p) != DDI_SUCCESS) { 549 DBG(DBG_PWR, dip, "px_pwr_setup: couldn't add soft intr \n"); 550 goto pwr_setup_err2; 551 } 552 553 /* Initilize handle */ 554 hdl.ih_cb_arg1 = px_p; 555 hdl.ih_cb_arg2 = NULL; 556 hdl.ih_ver = DDI_INTR_VERSION; 557 hdl.ih_state = DDI_IHDL_STATE_ALLOC; 558 hdl.ih_dip = dip; 559 hdl.ih_inum = 0; 560 hdl.ih_pri = px_pwr_pil; 561 562 /* Add PME_TO_ACK message handler */ 563 hdl.ih_cb_func = (ddi_intr_handler_t *)px_pmeq_intr; 564 if (px_add_msiq_intr(dip, dip, &hdl, MSG_REC, 565 (msgcode_t)PCIE_PME_ACK_MSG, &px_p->px_pm_msiq_id) != DDI_SUCCESS) { 566 DBG(DBG_PWR, dip, "px_pwr_setup: couldn't add " 567 " PME_TO_ACK intr\n"); 568 goto px_pwrsetup_err; 569 } 570 px_lib_msg_setmsiq(dip, PCIE_PME_ACK_MSG, px_p->px_pm_msiq_id); 571 px_lib_msg_setvalid(dip, PCIE_PME_ACK_MSG, PCIE_MSG_VALID); 572 573 if (px_ib_update_intr_state(px_p, px_p->px_dip, hdl.ih_inum, 574 px_msiqid_to_devino(px_p, px_p->px_pm_msiq_id), 575 PX_INTR_STATE_ENABLE, MSG_REC, PCIE_PME_ACK_MSG) != DDI_SUCCESS) { 576 DBG(DBG_PWR, dip, "px_pwr_setup: PME_TO_ACK update interrupt" 577 " state failed\n"); 578 goto px_pwrsetup_err_state; 579 } 580 581 return (DDI_SUCCESS); 582 583 px_pwrsetup_err_state: 584 px_lib_msg_setvalid(dip, PCIE_PME_ACK_MSG, PCIE_MSG_INVALID); 585 (void) px_rem_msiq_intr(dip, dip, &hdl, MSG_REC, PCIE_PME_ACK_MSG, 586 px_p->px_pm_msiq_id); 587 px_pwrsetup_err: 588 ddi_remove_softintr(px_p->px_lupsoft_id); 589 pwr_setup_err2: 590 mutex_destroy(&px_p->px_lupsoft_lock); 591 pwr_setup_err1: 592 mutex_destroy(&px_p->px_lup_lock); 593 cv_destroy(&px_p->px_lup_cv); 594 mutex_destroy(&px_p->px_l23ready_lock); 595 cv_destroy(&px_p->px_l23ready_cv); 596 597 return (DDI_FAILURE); 598 } 599 600 /* 601 * undo whatever is done in px_pwr_setup. called by px_detach() 602 */ 603 static void 604 px_pwr_teardown(dev_info_t *dip) 605 { 606 int instance = ddi_get_instance(dip); 607 px_t *px_p = INST_TO_STATE(instance); 608 ddi_intr_handle_impl_t hdl; 609 610 if (!PCIE_PMINFO(dip) || !PCIE_NEXUS_PMINFO(dip)) 611 return; 612 613 /* Initilize handle */ 614 hdl.ih_ver = DDI_INTR_VERSION; 615 hdl.ih_state = DDI_IHDL_STATE_ALLOC; 616 hdl.ih_dip = dip; 617 hdl.ih_inum = 0; 618 619 px_lib_msg_setvalid(dip, PCIE_PME_ACK_MSG, PCIE_MSG_INVALID); 620 (void) px_rem_msiq_intr(dip, dip, &hdl, MSG_REC, PCIE_PME_ACK_MSG, 621 px_p->px_pm_msiq_id); 622 623 (void) px_ib_update_intr_state(px_p, px_p->px_dip, hdl.ih_inum, 624 px_msiqid_to_devino(px_p, px_p->px_pm_msiq_id), 625 PX_INTR_STATE_DISABLE, MSG_REC, PCIE_PME_ACK_MSG); 626 627 px_p->px_pm_msiq_id = -1; 628 629 cv_destroy(&px_p->px_l23ready_cv); 630 ddi_remove_softintr(px_p->px_lupsoft_id); 631 mutex_destroy(&px_p->px_lupsoft_lock); 632 mutex_destroy(&px_p->px_lup_lock); 633 mutex_destroy(&px_p->px_l23ready_lock); 634 } 635 636 /* bus driver entry points */ 637 638 /* 639 * bus map entry point: 640 * 641 * if map request is for an rnumber 642 * get the corresponding regspec from device node 643 * build a new regspec in our parent's format 644 * build a new map_req with the new regspec 645 * call up the tree to complete the mapping 646 */ 647 int 648 px_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp, 649 off_t off, off_t len, caddr_t *addrp) 650 { 651 px_t *px_p = DIP_TO_STATE(dip); 652 struct regspec p_regspec; 653 ddi_map_req_t p_mapreq; 654 int reglen, rval, r_no; 655 pci_regspec_t reloc_reg, *rp = &reloc_reg; 656 657 DBG(DBG_MAP, dip, "rdip=%s%d:", 658 ddi_driver_name(rdip), ddi_get_instance(rdip)); 659 660 if (mp->map_flags & DDI_MF_USER_MAPPING) 661 return (DDI_ME_UNIMPLEMENTED); 662 663 switch (mp->map_type) { 664 case DDI_MT_REGSPEC: 665 reloc_reg = *(pci_regspec_t *)mp->map_obj.rp; /* dup whole */ 666 break; 667 668 case DDI_MT_RNUMBER: 669 r_no = mp->map_obj.rnumber; 670 DBG(DBG_MAP | DBG_CONT, dip, " r#=%x", r_no); 671 672 if (ddi_getlongprop(DDI_DEV_T_ANY, rdip, DDI_PROP_DONTPASS, 673 "reg", (caddr_t)&rp, ®len) != DDI_SUCCESS) 674 return (DDI_ME_RNUMBER_RANGE); 675 676 if (r_no < 0 || r_no >= reglen / sizeof (pci_regspec_t)) { 677 kmem_free(rp, reglen); 678 return (DDI_ME_RNUMBER_RANGE); 679 } 680 rp += r_no; 681 break; 682 683 default: 684 return (DDI_ME_INVAL); 685 } 686 DBG(DBG_MAP | DBG_CONT, dip, "\n"); 687 688 if ((rp->pci_phys_hi & PCI_REG_ADDR_M) == PCI_ADDR_CONFIG) { 689 /* 690 * There may be a need to differentiate between PCI 691 * and PCI-Ex devices so the following range check is 692 * done correctly, depending on the implementation of 693 * px_pci bridge nexus driver. 694 */ 695 if ((off >= PCIE_CONF_HDR_SIZE) || 696 (len > PCIE_CONF_HDR_SIZE) || 697 (off + len > PCIE_CONF_HDR_SIZE)) 698 return (DDI_ME_INVAL); 699 /* 700 * the following function returning a DDI_FAILURE assumes 701 * that there are no virtual config space access services 702 * defined in this layer. Otherwise it is availed right 703 * here and we return. 704 */ 705 rval = px_lib_map_vconfig(dip, mp, off, rp, addrp); 706 if (rval == DDI_SUCCESS) 707 goto done; 708 } 709 710 /* 711 * No virtual config space services or we are mapping 712 * a region of memory mapped config/IO/memory space, so proceed 713 * to the parent. 714 */ 715 716 /* relocate within 64-bit pci space through "assigned-addresses" */ 717 if (rval = px_reloc_reg(dip, rdip, px_p, rp)) 718 goto done; 719 720 if (len) /* adjust regspec according to mapping request */ 721 rp->pci_size_low = len; /* MIN ? */ 722 rp->pci_phys_low += off; 723 724 /* translate relocated pci regspec into parent space through "ranges" */ 725 if (rval = px_xlate_reg(px_p, rp, &p_regspec)) 726 goto done; 727 728 p_mapreq = *mp; /* dup the whole structure */ 729 p_mapreq.map_type = DDI_MT_REGSPEC; 730 p_mapreq.map_obj.rp = &p_regspec; 731 px_lib_map_attr_check(&p_mapreq); 732 rval = ddi_map(dip, &p_mapreq, 0, 0, addrp); 733 734 if (rval == DDI_SUCCESS) { 735 /* 736 * Set-up access functions for FM access error capable drivers. 737 */ 738 if (DDI_FM_ACC_ERR_CAP(ddi_fm_capable(rdip)) && 739 mp->map_handlep->ah_acc.devacc_attr_access != 740 DDI_DEFAULT_ACC) 741 px_fm_acc_setup(mp, rdip); 742 } 743 744 done: 745 if (mp->map_type == DDI_MT_RNUMBER) 746 kmem_free(rp - r_no, reglen); 747 748 return (rval); 749 } 750 751 /* 752 * bus dma map entry point 753 * return value: 754 * DDI_DMA_PARTIAL_MAP 1 755 * DDI_DMA_MAPOK 0 756 * DDI_DMA_MAPPED 0 757 * DDI_DMA_NORESOURCES -1 758 * DDI_DMA_NOMAPPING -2 759 * DDI_DMA_TOOBIG -3 760 */ 761 int 762 px_dma_setup(dev_info_t *dip, dev_info_t *rdip, ddi_dma_req_t *dmareq, 763 ddi_dma_handle_t *handlep) 764 { 765 px_t *px_p = DIP_TO_STATE(dip); 766 px_mmu_t *mmu_p = px_p->px_mmu_p; 767 ddi_dma_impl_t *mp; 768 int ret; 769 770 DBG(DBG_DMA_MAP, dip, "mapping - rdip=%s%d type=%s\n", 771 ddi_driver_name(rdip), ddi_get_instance(rdip), 772 handlep ? "alloc" : "advisory"); 773 774 if (!(mp = px_dma_lmts2hdl(dip, rdip, mmu_p, dmareq))) 775 return (DDI_DMA_NORESOURCES); 776 if (mp == (ddi_dma_impl_t *)DDI_DMA_NOMAPPING) 777 return (DDI_DMA_NOMAPPING); 778 if (ret = px_dma_type(px_p, dmareq, mp)) 779 goto freehandle; 780 if (ret = px_dma_pfn(px_p, dmareq, mp)) 781 goto freehandle; 782 783 switch (PX_DMA_TYPE(mp)) { 784 case PX_DMAI_FLAGS_DVMA: /* LINTED E_EQUALITY_NOT_ASSIGNMENT */ 785 if ((ret = px_dvma_win(px_p, dmareq, mp)) || !handlep) 786 goto freehandle; 787 if (!PX_DMA_CANCACHE(mp)) { /* try fast track */ 788 if (PX_DMA_CANFAST(mp)) { 789 if (!px_dvma_map_fast(mmu_p, mp)) 790 break; 791 /* LINTED E_NOP_ELSE_STMT */ 792 } else { 793 PX_DVMA_FASTTRAK_PROF(mp); 794 } 795 } 796 if (ret = px_dvma_map(mp, dmareq, mmu_p)) 797 goto freehandle; 798 break; 799 case PX_DMAI_FLAGS_PTP: /* LINTED E_EQUALITY_NOT_ASSIGNMENT */ 800 if ((ret = px_dma_physwin(px_p, dmareq, mp)) || !handlep) 801 goto freehandle; 802 break; 803 case PX_DMAI_FLAGS_BYPASS: 804 default: 805 cmn_err(CE_PANIC, "%s%d: px_dma_setup: bad dma type 0x%x", 806 ddi_driver_name(rdip), ddi_get_instance(rdip), 807 PX_DMA_TYPE(mp)); 808 /*NOTREACHED*/ 809 } 810 *handlep = (ddi_dma_handle_t)mp; 811 mp->dmai_flags |= PX_DMAI_FLAGS_INUSE; 812 px_dump_dma_handle(DBG_DMA_MAP, dip, mp); 813 814 return ((mp->dmai_nwin == 1) ? DDI_DMA_MAPPED : DDI_DMA_PARTIAL_MAP); 815 freehandle: 816 if (ret == DDI_DMA_NORESOURCES) 817 px_dma_freemp(mp); /* don't run_callback() */ 818 else 819 (void) px_dma_freehdl(dip, rdip, (ddi_dma_handle_t)mp); 820 return (ret); 821 } 822 823 824 /* 825 * bus dma alloc handle entry point: 826 */ 827 int 828 px_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attrp, 829 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep) 830 { 831 px_t *px_p = DIP_TO_STATE(dip); 832 ddi_dma_impl_t *mp; 833 int rval; 834 835 DBG(DBG_DMA_ALLOCH, dip, "rdip=%s%d\n", 836 ddi_driver_name(rdip), ddi_get_instance(rdip)); 837 838 if (attrp->dma_attr_version != DMA_ATTR_V0) 839 return (DDI_DMA_BADATTR); 840 841 if (!(mp = px_dma_allocmp(dip, rdip, waitfp, arg))) 842 return (DDI_DMA_NORESOURCES); 843 844 /* 845 * Save requestor's information 846 */ 847 mp->dmai_attr = *attrp; /* whole object - augmented later */ 848 *PX_DEV_ATTR(mp) = *attrp; /* whole object - device orig attr */ 849 DBG(DBG_DMA_ALLOCH, dip, "mp=%p\n", mp); 850 851 /* check and convert dma attributes to handle parameters */ 852 if (rval = px_dma_attr2hdl(px_p, mp)) { 853 px_dma_freehdl(dip, rdip, (ddi_dma_handle_t)mp); 854 *handlep = NULL; 855 return (rval); 856 } 857 *handlep = (ddi_dma_handle_t)mp; 858 return (DDI_SUCCESS); 859 } 860 861 862 /* 863 * bus dma free handle entry point: 864 */ 865 /*ARGSUSED*/ 866 int 867 px_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle) 868 { 869 DBG(DBG_DMA_FREEH, dip, "rdip=%s%d mp=%p\n", 870 ddi_driver_name(rdip), ddi_get_instance(rdip), handle); 871 px_dma_freemp((ddi_dma_impl_t *)handle); 872 873 if (px_kmem_clid) { 874 DBG(DBG_DMA_FREEH, dip, "run handle callback\n"); 875 ddi_run_callback(&px_kmem_clid); 876 } 877 return (DDI_SUCCESS); 878 } 879 880 881 /* 882 * bus dma bind handle entry point: 883 */ 884 int 885 px_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip, 886 ddi_dma_handle_t handle, ddi_dma_req_t *dmareq, 887 ddi_dma_cookie_t *cookiep, uint_t *ccountp) 888 { 889 px_t *px_p = DIP_TO_STATE(dip); 890 px_mmu_t *mmu_p = px_p->px_mmu_p; 891 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 892 int ret; 893 894 DBG(DBG_DMA_BINDH, dip, "rdip=%s%d mp=%p dmareq=%p\n", 895 ddi_driver_name(rdip), ddi_get_instance(rdip), mp, dmareq); 896 897 if (mp->dmai_flags & PX_DMAI_FLAGS_INUSE) 898 return (DDI_DMA_INUSE); 899 900 ASSERT((mp->dmai_flags & ~PX_DMAI_FLAGS_PRESERVE) == 0); 901 mp->dmai_flags |= PX_DMAI_FLAGS_INUSE; 902 903 if (ret = px_dma_type(px_p, dmareq, mp)) 904 goto err; 905 if (ret = px_dma_pfn(px_p, dmareq, mp)) 906 goto err; 907 908 switch (PX_DMA_TYPE(mp)) { 909 case PX_DMAI_FLAGS_DVMA: 910 if (ret = px_dvma_win(px_p, dmareq, mp)) 911 goto map_err; 912 if (!PX_DMA_CANCACHE(mp)) { /* try fast track */ 913 if (PX_DMA_CANFAST(mp)) { 914 if (!px_dvma_map_fast(mmu_p, mp)) 915 goto mapped; /*LINTED E_NOP_ELSE_STMT*/ 916 } else { 917 PX_DVMA_FASTTRAK_PROF(mp); 918 } 919 } 920 if (ret = px_dvma_map(mp, dmareq, mmu_p)) 921 goto map_err; 922 mapped: 923 *ccountp = 1; 924 MAKE_DMA_COOKIE(cookiep, mp->dmai_mapping, mp->dmai_size); 925 break; 926 case PX_DMAI_FLAGS_BYPASS: 927 case PX_DMAI_FLAGS_PTP: 928 if (ret = px_dma_physwin(px_p, dmareq, mp)) 929 goto map_err; 930 *ccountp = PX_WINLST(mp)->win_ncookies; 931 *cookiep = 932 *(ddi_dma_cookie_t *)(PX_WINLST(mp) + 1); /* wholeobj */ 933 break; 934 default: 935 cmn_err(CE_PANIC, "%s%d: px_dma_bindhdl(%p): bad dma type", 936 ddi_driver_name(rdip), ddi_get_instance(rdip), mp); 937 /*NOTREACHED*/ 938 } 939 DBG(DBG_DMA_BINDH, dip, "cookie %" PRIx64 "+%x\n", 940 cookiep->dmac_address, cookiep->dmac_size); 941 px_dump_dma_handle(DBG_DMA_MAP, dip, mp); 942 943 /* insert dma handle into FMA cache */ 944 if (mp->dmai_attr.dma_attr_flags & DDI_DMA_FLAGERR) 945 (void) ndi_fmc_insert(rdip, DMA_HANDLE, mp, NULL); 946 947 return (mp->dmai_nwin == 1 ? DDI_DMA_MAPPED : DDI_DMA_PARTIAL_MAP); 948 map_err: 949 px_dma_freepfn(mp); 950 err: 951 mp->dmai_flags &= PX_DMAI_FLAGS_PRESERVE; 952 return (ret); 953 } 954 955 956 /* 957 * bus dma unbind handle entry point: 958 */ 959 /*ARGSUSED*/ 960 int 961 px_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle) 962 { 963 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 964 px_t *px_p = DIP_TO_STATE(dip); 965 px_mmu_t *mmu_p = px_p->px_mmu_p; 966 967 DBG(DBG_DMA_UNBINDH, dip, "rdip=%s%d, mp=%p\n", 968 ddi_driver_name(rdip), ddi_get_instance(rdip), handle); 969 if ((mp->dmai_flags & PX_DMAI_FLAGS_INUSE) == 0) { 970 DBG(DBG_DMA_UNBINDH, dip, "handle not inuse\n"); 971 return (DDI_FAILURE); 972 } 973 974 /* remove dma handle from FMA cache */ 975 if (mp->dmai_attr.dma_attr_flags & DDI_DMA_FLAGERR) { 976 if (DEVI(rdip)->devi_fmhdl != NULL && 977 DDI_FM_DMA_ERR_CAP(DEVI(rdip)->devi_fmhdl->fh_cap)) { 978 (void) ndi_fmc_remove(rdip, DMA_HANDLE, mp); 979 } 980 } 981 982 /* 983 * Here if the handle is using the iommu. Unload all the iommu 984 * translations. 985 */ 986 switch (PX_DMA_TYPE(mp)) { 987 case PX_DMAI_FLAGS_DVMA: 988 px_mmu_unmap_window(mmu_p, mp); 989 px_dvma_unmap(mmu_p, mp); 990 px_dma_freepfn(mp); 991 break; 992 case PX_DMAI_FLAGS_BYPASS: 993 case PX_DMAI_FLAGS_PTP: 994 px_dma_freewin(mp); 995 break; 996 default: 997 cmn_err(CE_PANIC, "%s%d: px_dma_unbindhdl:bad dma type %p", 998 ddi_driver_name(rdip), ddi_get_instance(rdip), mp); 999 /*NOTREACHED*/ 1000 } 1001 if (mmu_p->mmu_dvma_clid != 0) { 1002 DBG(DBG_DMA_UNBINDH, dip, "run dvma callback\n"); 1003 ddi_run_callback(&mmu_p->mmu_dvma_clid); 1004 } 1005 if (px_kmem_clid) { 1006 DBG(DBG_DMA_UNBINDH, dip, "run handle callback\n"); 1007 ddi_run_callback(&px_kmem_clid); 1008 } 1009 mp->dmai_flags &= PX_DMAI_FLAGS_PRESERVE; 1010 1011 return (DDI_SUCCESS); 1012 } 1013 1014 /* 1015 * bus dma win entry point: 1016 */ 1017 int 1018 px_dma_win(dev_info_t *dip, dev_info_t *rdip, 1019 ddi_dma_handle_t handle, uint_t win, off_t *offp, 1020 size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp) 1021 { 1022 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 1023 int ret; 1024 1025 DBG(DBG_DMA_WIN, dip, "rdip=%s%d\n", 1026 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1027 1028 px_dump_dma_handle(DBG_DMA_WIN, dip, mp); 1029 if (win >= mp->dmai_nwin) { 1030 DBG(DBG_DMA_WIN, dip, "%x out of range\n", win); 1031 return (DDI_FAILURE); 1032 } 1033 1034 switch (PX_DMA_TYPE(mp)) { 1035 case PX_DMAI_FLAGS_DVMA: 1036 if (win != PX_DMA_CURWIN(mp)) { 1037 px_t *px_p = DIP_TO_STATE(dip); 1038 px_mmu_t *mmu_p = px_p->px_mmu_p; 1039 px_mmu_unmap_window(mmu_p, mp); 1040 1041 /* map_window sets dmai_mapping/size/offset */ 1042 px_mmu_map_window(mmu_p, mp, win); 1043 if ((ret = px_mmu_map_window(mmu_p, 1044 mp, win)) != DDI_SUCCESS) 1045 return (ret); 1046 } 1047 if (cookiep) 1048 MAKE_DMA_COOKIE(cookiep, mp->dmai_mapping, 1049 mp->dmai_size); 1050 if (ccountp) 1051 *ccountp = 1; 1052 break; 1053 case PX_DMAI_FLAGS_PTP: 1054 case PX_DMAI_FLAGS_BYPASS: { 1055 int i; 1056 ddi_dma_cookie_t *ck_p; 1057 px_dma_win_t *win_p = mp->dmai_winlst; 1058 1059 for (i = 0; i < win; win_p = win_p->win_next, i++); 1060 ck_p = (ddi_dma_cookie_t *)(win_p + 1); 1061 *cookiep = *ck_p; 1062 mp->dmai_offset = win_p->win_offset; 1063 mp->dmai_size = win_p->win_size; 1064 mp->dmai_mapping = ck_p->dmac_laddress; 1065 mp->dmai_cookie = ck_p + 1; 1066 win_p->win_curseg = 0; 1067 if (ccountp) 1068 *ccountp = win_p->win_ncookies; 1069 } 1070 break; 1071 default: 1072 cmn_err(CE_WARN, "%s%d: px_dma_win:bad dma type 0x%x", 1073 ddi_driver_name(rdip), ddi_get_instance(rdip), 1074 PX_DMA_TYPE(mp)); 1075 return (DDI_FAILURE); 1076 } 1077 if (cookiep) 1078 DBG(DBG_DMA_WIN, dip, 1079 "cookie - dmac_address=%x dmac_size=%x\n", 1080 cookiep->dmac_address, cookiep->dmac_size); 1081 if (offp) 1082 *offp = (off_t)mp->dmai_offset; 1083 if (lenp) 1084 *lenp = mp->dmai_size; 1085 return (DDI_SUCCESS); 1086 } 1087 1088 #ifdef DEBUG 1089 static char *px_dmactl_str[] = { 1090 "DDI_DMA_FREE", 1091 "DDI_DMA_SYNC", 1092 "DDI_DMA_HTOC", 1093 "DDI_DMA_KVADDR", 1094 "DDI_DMA_MOVWIN", 1095 "DDI_DMA_REPWIN", 1096 "DDI_DMA_GETERR", 1097 "DDI_DMA_COFF", 1098 "DDI_DMA_NEXTWIN", 1099 "DDI_DMA_NEXTSEG", 1100 "DDI_DMA_SEGTOC", 1101 "DDI_DMA_RESERVE", 1102 "DDI_DMA_RELEASE", 1103 "DDI_DMA_RESETH", 1104 "DDI_DMA_CKSYNC", 1105 "DDI_DMA_IOPB_ALLOC", 1106 "DDI_DMA_IOPB_FREE", 1107 "DDI_DMA_SMEM_ALLOC", 1108 "DDI_DMA_SMEM_FREE", 1109 "DDI_DMA_SET_SBUS64" 1110 }; 1111 #endif /* DEBUG */ 1112 1113 /* 1114 * bus dma control entry point: 1115 */ 1116 /*ARGSUSED*/ 1117 int 1118 px_dma_ctlops(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle, 1119 enum ddi_dma_ctlops cmd, off_t *offp, size_t *lenp, caddr_t *objp, 1120 uint_t cache_flags) 1121 { 1122 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 1123 1124 #ifdef DEBUG 1125 DBG(DBG_DMA_CTL, dip, "%s: rdip=%s%d\n", px_dmactl_str[cmd], 1126 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1127 #endif /* DEBUG */ 1128 1129 switch (cmd) { 1130 case DDI_DMA_FREE: 1131 (void) px_dma_unbindhdl(dip, rdip, handle); 1132 (void) px_dma_freehdl(dip, rdip, handle); 1133 return (DDI_SUCCESS); 1134 case DDI_DMA_RESERVE: { 1135 px_t *px_p = DIP_TO_STATE(dip); 1136 return (px_fdvma_reserve(dip, rdip, px_p, 1137 (ddi_dma_req_t *)offp, (ddi_dma_handle_t *)objp)); 1138 } 1139 case DDI_DMA_RELEASE: { 1140 px_t *px_p = DIP_TO_STATE(dip); 1141 return (px_fdvma_release(dip, px_p, mp)); 1142 } 1143 default: 1144 break; 1145 } 1146 1147 switch (PX_DMA_TYPE(mp)) { 1148 case PX_DMAI_FLAGS_DVMA: 1149 return (px_dvma_ctl(dip, rdip, mp, cmd, offp, lenp, objp, 1150 cache_flags)); 1151 case PX_DMAI_FLAGS_PTP: 1152 case PX_DMAI_FLAGS_BYPASS: 1153 return (px_dma_ctl(dip, rdip, mp, cmd, offp, lenp, objp, 1154 cache_flags)); 1155 default: 1156 cmn_err(CE_PANIC, "%s%d: px_dma_ctlops(%x):bad dma type %x", 1157 ddi_driver_name(rdip), ddi_get_instance(rdip), cmd, 1158 mp->dmai_flags); 1159 /*NOTREACHED*/ 1160 } 1161 return (0); 1162 } 1163 1164 /* 1165 * control ops entry point: 1166 * 1167 * Requests handled completely: 1168 * DDI_CTLOPS_INITCHILD see init_child() for details 1169 * DDI_CTLOPS_UNINITCHILD 1170 * DDI_CTLOPS_REPORTDEV see report_dev() for details 1171 * DDI_CTLOPS_IOMIN cache line size if streaming otherwise 1 1172 * DDI_CTLOPS_REGSIZE 1173 * DDI_CTLOPS_NREGS 1174 * DDI_CTLOPS_DVMAPAGESIZE 1175 * DDI_CTLOPS_POKE 1176 * DDI_CTLOPS_PEEK 1177 * 1178 * All others passed to parent. 1179 */ 1180 int 1181 px_ctlops(dev_info_t *dip, dev_info_t *rdip, 1182 ddi_ctl_enum_t op, void *arg, void *result) 1183 { 1184 px_t *px_p = DIP_TO_STATE(dip); 1185 struct detachspec *ds; 1186 struct attachspec *as; 1187 1188 switch (op) { 1189 case DDI_CTLOPS_INITCHILD: 1190 return (px_init_child(px_p, (dev_info_t *)arg)); 1191 1192 case DDI_CTLOPS_UNINITCHILD: 1193 return (px_uninit_child(px_p, (dev_info_t *)arg)); 1194 1195 case DDI_CTLOPS_ATTACH: 1196 as = (struct attachspec *)arg; 1197 switch (as->when) { 1198 case DDI_PRE: 1199 if (as->cmd == DDI_ATTACH) { 1200 DBG(DBG_PWR, dip, "PRE_ATTACH for %s@%d\n", 1201 ddi_driver_name(rdip), 1202 ddi_get_instance(rdip)); 1203 return (pcie_pm_hold(dip)); 1204 } 1205 if (as->cmd == DDI_RESUME) { 1206 ddi_acc_handle_t config_handle; 1207 DBG(DBG_PWR, dip, "PRE_RESUME for %s@%d\n", 1208 ddi_driver_name(rdip), 1209 ddi_get_instance(rdip)); 1210 1211 if (pci_config_setup(rdip, &config_handle) == 1212 DDI_SUCCESS) { 1213 pcie_clear_errors(rdip, config_handle); 1214 pci_config_teardown(&config_handle); 1215 } 1216 } 1217 return (DDI_SUCCESS); 1218 1219 case DDI_POST: 1220 DBG(DBG_PWR, dip, "POST_ATTACH for %s@%d\n", 1221 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1222 if (as->cmd == DDI_ATTACH && as->result != DDI_SUCCESS) 1223 pcie_pm_release(dip); 1224 return (DDI_SUCCESS); 1225 default: 1226 break; 1227 } 1228 break; 1229 1230 case DDI_CTLOPS_DETACH: 1231 ds = (struct detachspec *)arg; 1232 switch (ds->when) { 1233 case DDI_POST: 1234 if (ds->cmd == DDI_DETACH && 1235 ds->result == DDI_SUCCESS) { 1236 DBG(DBG_PWR, dip, "POST_DETACH for %s@%d\n", 1237 ddi_driver_name(rdip), 1238 ddi_get_instance(rdip)); 1239 return (pcie_pm_remove_child(dip, rdip)); 1240 } 1241 return (DDI_SUCCESS); 1242 default: 1243 break; 1244 } 1245 break; 1246 1247 case DDI_CTLOPS_REPORTDEV: 1248 return (px_report_dev(rdip)); 1249 1250 case DDI_CTLOPS_IOMIN: 1251 return (DDI_SUCCESS); 1252 1253 case DDI_CTLOPS_REGSIZE: 1254 *((off_t *)result) = px_get_reg_set_size(rdip, *((int *)arg)); 1255 return (*((off_t *)result) == 0 ? DDI_FAILURE : DDI_SUCCESS); 1256 1257 case DDI_CTLOPS_NREGS: 1258 *((uint_t *)result) = px_get_nreg_set(rdip); 1259 return (DDI_SUCCESS); 1260 1261 case DDI_CTLOPS_DVMAPAGESIZE: 1262 *((ulong_t *)result) = MMU_PAGE_SIZE; 1263 return (DDI_SUCCESS); 1264 1265 case DDI_CTLOPS_POKE: /* platform dependent implementation. */ 1266 return (px_lib_ctlops_poke(dip, rdip, 1267 (peekpoke_ctlops_t *)arg)); 1268 1269 case DDI_CTLOPS_PEEK: /* platform dependent implementation. */ 1270 return (px_lib_ctlops_peek(dip, rdip, 1271 (peekpoke_ctlops_t *)arg, result)); 1272 1273 case DDI_CTLOPS_POWER: 1274 default: 1275 break; 1276 } 1277 1278 /* 1279 * Now pass the request up to our parent. 1280 */ 1281 DBG(DBG_CTLOPS, dip, "passing request to parent: rdip=%s%d\n", 1282 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1283 return (ddi_ctlops(dip, rdip, op, arg, result)); 1284 } 1285 1286 /* ARGSUSED */ 1287 int 1288 px_intr_ops(dev_info_t *dip, dev_info_t *rdip, ddi_intr_op_t intr_op, 1289 ddi_intr_handle_impl_t *hdlp, void *result) 1290 { 1291 int intr_types, ret = DDI_SUCCESS; 1292 1293 DBG(DBG_INTROPS, dip, "px_intr_ops: rdip=%s%d\n", 1294 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1295 1296 /* Process DDI_INTROP_SUPPORTED_TYPES request here */ 1297 if (intr_op == DDI_INTROP_SUPPORTED_TYPES) { 1298 *(int *)result = i_ddi_get_nintrs(rdip) ? 1299 DDI_INTR_TYPE_FIXED : 0; 1300 1301 if ((pci_msi_get_supported_type(rdip, 1302 &intr_types)) == DDI_SUCCESS) { 1303 /* 1304 * Double check supported interrupt types vs. 1305 * what the host bridge supports. 1306 * 1307 * NOTE: 1308 * Currently MSI-X is disabled since px driver 1309 * don't fully support this feature. 1310 */ 1311 *(int *)result |= (intr_types & DDI_INTR_TYPE_MSI); 1312 } 1313 1314 return (ret); 1315 } 1316 1317 /* 1318 * PCI-E nexus driver supports fixed, MSI and MSI-X interrupts. 1319 * Return failure if interrupt type is not supported. 1320 */ 1321 switch (hdlp->ih_type) { 1322 case DDI_INTR_TYPE_FIXED: 1323 ret = px_intx_ops(dip, rdip, intr_op, hdlp, result); 1324 break; 1325 case DDI_INTR_TYPE_MSI: 1326 case DDI_INTR_TYPE_MSIX: 1327 ret = px_msix_ops(dip, rdip, intr_op, hdlp, result); 1328 break; 1329 default: 1330 ret = DDI_ENOTSUP; 1331 break; 1332 } 1333 1334 return (ret); 1335 } 1336