1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * PCI Express nexus driver interface 28 */ 29 30 #include <sys/types.h> 31 #include <sys/conf.h> /* nulldev */ 32 #include <sys/stat.h> /* devctl */ 33 #include <sys/kmem.h> 34 #include <sys/sunddi.h> 35 #include <sys/sunndi.h> 36 #include <sys/ddi_impldefs.h> 37 #include <sys/ddi_subrdefs.h> 38 #include <sys/spl.h> 39 #include <sys/epm.h> 40 #include <sys/iommutsb.h> 41 #include <sys/hotplug/pci/pcihp.h> 42 #include <sys/hotplug/pci/pciehpc.h> 43 #include "px_obj.h" 44 #include <sys/pci_tools.h> 45 #include "px_tools_ext.h" 46 #include <sys/pcie_pwr.h> 47 48 /*LINTLIBRARY*/ 49 50 /* 51 * function prototypes for dev ops routines: 52 */ 53 static int px_attach(dev_info_t *dip, ddi_attach_cmd_t cmd); 54 static int px_detach(dev_info_t *dip, ddi_detach_cmd_t cmd); 55 static int px_info(dev_info_t *dip, ddi_info_cmd_t infocmd, 56 void *arg, void **result); 57 static int px_cb_attach(px_t *); 58 static void px_cb_detach(px_t *); 59 static int px_pwr_setup(dev_info_t *dip); 60 static void px_pwr_teardown(dev_info_t *dip); 61 62 static void px_set_mps(px_t *px_p); 63 64 extern int pcie_max_mps; 65 66 /* 67 * function prototypes for hotplug routines: 68 */ 69 static int px_init_hotplug(px_t *px_p); 70 static int px_uninit_hotplug(dev_info_t *dip); 71 72 /* 73 * bus ops and dev ops structures: 74 */ 75 static struct bus_ops px_bus_ops = { 76 BUSO_REV, 77 px_map, 78 0, 79 0, 80 0, 81 i_ddi_map_fault, 82 px_dma_setup, 83 px_dma_allochdl, 84 px_dma_freehdl, 85 px_dma_bindhdl, 86 px_dma_unbindhdl, 87 px_lib_dma_sync, 88 px_dma_win, 89 px_dma_ctlops, 90 px_ctlops, 91 ddi_bus_prop_op, 92 ndi_busop_get_eventcookie, 93 ndi_busop_add_eventcall, 94 ndi_busop_remove_eventcall, 95 ndi_post_event, 96 NULL, 97 NULL, /* (*bus_config)(); */ 98 NULL, /* (*bus_unconfig)(); */ 99 px_fm_init_child, /* (*bus_fm_init)(); */ 100 NULL, /* (*bus_fm_fini)(); */ 101 px_bus_enter, /* (*bus_fm_access_enter)(); */ 102 px_bus_exit, /* (*bus_fm_access_fini)(); */ 103 pcie_bus_power, /* (*bus_power)(); */ 104 px_intr_ops /* (*bus_intr_op)(); */ 105 }; 106 107 extern struct cb_ops px_cb_ops; 108 109 static struct dev_ops px_ops = { 110 DEVO_REV, 111 0, 112 px_info, 113 nulldev, 114 0, 115 px_attach, 116 px_detach, 117 nodev, 118 &px_cb_ops, 119 &px_bus_ops, 120 nulldev, 121 ddi_quiesce_not_needed, /* quiesce */ 122 }; 123 124 /* 125 * module definitions: 126 */ 127 #include <sys/modctl.h> 128 extern struct mod_ops mod_driverops; 129 130 static struct modldrv modldrv = { 131 &mod_driverops, /* Type of module - driver */ 132 "PCI Express nexus driver", /* Name of module. */ 133 &px_ops, /* driver ops */ 134 }; 135 136 static struct modlinkage modlinkage = { 137 MODREV_1, (void *)&modldrv, NULL 138 }; 139 140 /* driver soft state */ 141 void *px_state_p; 142 143 int 144 _init(void) 145 { 146 int e; 147 148 /* 149 * Initialize per-px bus soft state pointer. 150 */ 151 e = ddi_soft_state_init(&px_state_p, sizeof (px_t), 1); 152 if (e != DDI_SUCCESS) 153 return (e); 154 155 /* 156 * Install the module. 157 */ 158 e = mod_install(&modlinkage); 159 if (e != DDI_SUCCESS) 160 ddi_soft_state_fini(&px_state_p); 161 return (e); 162 } 163 164 int 165 _fini(void) 166 { 167 int e; 168 169 /* 170 * Remove the module. 171 */ 172 e = mod_remove(&modlinkage); 173 if (e != DDI_SUCCESS) 174 return (e); 175 176 /* Free px soft state */ 177 ddi_soft_state_fini(&px_state_p); 178 179 return (e); 180 } 181 182 int 183 _info(struct modinfo *modinfop) 184 { 185 return (mod_info(&modlinkage, modinfop)); 186 } 187 188 /* ARGSUSED */ 189 static int 190 px_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 191 { 192 int instance = getminor((dev_t)arg); 193 px_t *px_p = INST_TO_STATE(instance); 194 195 /* 196 * Allow hotplug to deal with ones it manages 197 * Hot Plug will be done later. 198 */ 199 if (px_p && (px_p->px_dev_caps & PX_HOTPLUG_CAPABLE)) 200 return (pcihp_info(dip, infocmd, arg, result)); 201 202 /* non-hotplug or not attached */ 203 switch (infocmd) { 204 case DDI_INFO_DEVT2INSTANCE: 205 *result = (void *)(intptr_t)instance; 206 return (DDI_SUCCESS); 207 208 case DDI_INFO_DEVT2DEVINFO: 209 if (px_p == NULL) 210 return (DDI_FAILURE); 211 *result = (void *)px_p->px_dip; 212 return (DDI_SUCCESS); 213 214 default: 215 return (DDI_FAILURE); 216 } 217 } 218 219 /* device driver entry points */ 220 /* 221 * attach entry point: 222 */ 223 /*ARGSUSED*/ 224 static int 225 px_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 226 { 227 px_t *px_p; /* per bus state pointer */ 228 int instance = DIP_TO_INST(dip); 229 int ret = DDI_SUCCESS; 230 devhandle_t dev_hdl = NULL; 231 232 switch (cmd) { 233 case DDI_ATTACH: 234 DBG(DBG_ATTACH, dip, "DDI_ATTACH\n"); 235 236 /* 237 * Allocate and get the per-px soft state structure. 238 */ 239 if (ddi_soft_state_zalloc(px_state_p, instance) 240 != DDI_SUCCESS) { 241 cmn_err(CE_WARN, "%s%d: can't allocate px state", 242 ddi_driver_name(dip), instance); 243 goto err_bad_px_softstate; 244 } 245 px_p = INST_TO_STATE(instance); 246 px_p->px_dip = dip; 247 mutex_init(&px_p->px_mutex, NULL, MUTEX_DRIVER, NULL); 248 px_p->px_soft_state = PX_SOFT_STATE_CLOSED; 249 px_p->px_open_count = 0; 250 251 (void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, 252 "device_type", "pciex"); 253 254 /* Initialize px_dbg for high pil printing */ 255 px_dbg_attach(dip, &px_p->px_dbg_hdl); 256 257 /* 258 * Get key properties of the pci bridge node and 259 * determine it's type (psycho, schizo, etc ...). 260 */ 261 if (px_get_props(px_p, dip) == DDI_FAILURE) 262 goto err_bad_px_prop; 263 264 if (px_lib_dev_init(dip, &dev_hdl) != DDI_SUCCESS) 265 goto err_bad_dev_init; 266 267 /* Initialize device handle */ 268 px_p->px_dev_hdl = dev_hdl; 269 270 /* Cache the BDF of the root port nexus */ 271 px_p->px_bdf = px_lib_get_bdf(px_p); 272 273 /* 274 * Initialize interrupt block. Note that this 275 * initialize error handling for the PEC as well. 276 */ 277 if ((ret = px_ib_attach(px_p)) != DDI_SUCCESS) 278 goto err_bad_ib; 279 280 if (px_cb_attach(px_p) != DDI_SUCCESS) 281 goto err_bad_cb; 282 283 /* 284 * Start creating the modules. 285 * Note that attach() routines should 286 * register and enable their own interrupts. 287 */ 288 289 if ((px_mmu_attach(px_p)) != DDI_SUCCESS) 290 goto err_bad_mmu; 291 292 if ((px_msiq_attach(px_p)) != DDI_SUCCESS) 293 goto err_bad_msiq; 294 295 if ((px_msi_attach(px_p)) != DDI_SUCCESS) 296 goto err_bad_msi; 297 298 if ((px_pec_attach(px_p)) != DDI_SUCCESS) 299 goto err_bad_pec; 300 301 if ((px_dma_attach(px_p)) != DDI_SUCCESS) 302 goto err_bad_dma; /* nothing to uninitialize on DMA */ 303 304 if ((px_fm_attach(px_p)) != DDI_SUCCESS) 305 goto err_bad_dma; 306 307 /* 308 * All of the error handlers have been registered 309 * by now so it's time to activate the interrupt. 310 */ 311 if ((ret = px_err_add_intr(&px_p->px_fault)) != DDI_SUCCESS) 312 goto err_bad_intr; 313 314 (void) px_init_hotplug(px_p); 315 316 (void) px_set_mps(px_p); 317 318 /* 319 * Create the "devctl" node for hotplug and pcitool support. 320 * For non-hotplug bus, we still need ":devctl" to 321 * support DEVCTL_DEVICE_* and DEVCTL_BUS_* ioctls. 322 */ 323 if (ddi_create_minor_node(dip, "devctl", S_IFCHR, 324 PCIHP_AP_MINOR_NUM(instance, PCIHP_DEVCTL_MINOR), 325 DDI_NT_NEXUS, 0) != DDI_SUCCESS) { 326 goto err_bad_devctl_node; 327 } 328 329 if (pxtool_init(dip) != DDI_SUCCESS) 330 goto err_bad_pcitool_node; 331 332 /* 333 * power management setup. Even if it fails, attach will 334 * succeed as this is a optional feature. Since we are 335 * always at full power, this is not critical. 336 */ 337 if (pwr_common_setup(dip) != DDI_SUCCESS) { 338 DBG(DBG_PWR, dip, "pwr_common_setup failed\n"); 339 } else if (px_pwr_setup(dip) != DDI_SUCCESS) { 340 DBG(DBG_PWR, dip, "px_pwr_setup failed \n"); 341 pwr_common_teardown(dip); 342 } 343 344 /* 345 * add cpr callback 346 */ 347 px_cpr_add_callb(px_p); 348 349 ddi_report_dev(dip); 350 351 px_p->px_state = PX_ATTACHED; 352 DBG(DBG_ATTACH, dip, "attach success\n"); 353 break; 354 355 err_bad_pcitool_node: 356 ddi_remove_minor_node(dip, "devctl"); 357 err_bad_devctl_node: 358 px_err_rem_intr(&px_p->px_fault); 359 err_bad_intr: 360 px_fm_detach(px_p); 361 err_bad_dma: 362 px_pec_detach(px_p); 363 err_bad_pec: 364 px_msi_detach(px_p); 365 err_bad_msi: 366 px_msiq_detach(px_p); 367 err_bad_msiq: 368 px_mmu_detach(px_p); 369 err_bad_mmu: 370 px_cb_detach(px_p); 371 err_bad_cb: 372 px_ib_detach(px_p); 373 err_bad_ib: 374 if (px_lib_dev_fini(dip) != DDI_SUCCESS) { 375 DBG(DBG_ATTACH, dip, "px_lib_dev_fini failed\n"); 376 } 377 err_bad_dev_init: 378 px_free_props(px_p); 379 err_bad_px_prop: 380 px_dbg_detach(dip, &px_p->px_dbg_hdl); 381 mutex_destroy(&px_p->px_mutex); 382 ddi_soft_state_free(px_state_p, instance); 383 err_bad_px_softstate: 384 ret = DDI_FAILURE; 385 break; 386 387 case DDI_RESUME: 388 DBG(DBG_ATTACH, dip, "DDI_RESUME\n"); 389 390 px_p = INST_TO_STATE(instance); 391 392 mutex_enter(&px_p->px_mutex); 393 394 /* suspend might have not succeeded */ 395 if (px_p->px_state != PX_SUSPENDED) { 396 DBG(DBG_ATTACH, px_p->px_dip, 397 "instance NOT suspended\n"); 398 ret = DDI_FAILURE; 399 break; 400 } 401 402 px_msiq_resume(px_p); 403 px_lib_resume(dip); 404 (void) pcie_pwr_resume(dip); 405 px_p->px_state = PX_ATTACHED; 406 407 mutex_exit(&px_p->px_mutex); 408 409 break; 410 default: 411 DBG(DBG_ATTACH, dip, "unsupported attach op\n"); 412 ret = DDI_FAILURE; 413 break; 414 } 415 416 return (ret); 417 } 418 419 /* 420 * detach entry point: 421 */ 422 /*ARGSUSED*/ 423 static int 424 px_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 425 { 426 int instance = ddi_get_instance(dip); 427 px_t *px_p = INST_TO_STATE(instance); 428 int ret; 429 430 /* 431 * Make sure we are currently attached 432 */ 433 if (px_p->px_state != PX_ATTACHED) { 434 DBG(DBG_DETACH, dip, "Instance not attached\n"); 435 return (DDI_FAILURE); 436 } 437 438 mutex_enter(&px_p->px_mutex); 439 440 switch (cmd) { 441 case DDI_DETACH: 442 DBG(DBG_DETACH, dip, "DDI_DETACH\n"); 443 444 /* 445 * remove cpr callback 446 */ 447 px_cpr_rem_callb(px_p); 448 449 if (px_p->px_dev_caps & PX_HOTPLUG_CAPABLE) 450 if (px_uninit_hotplug(dip) != DDI_SUCCESS) { 451 mutex_exit(&px_p->px_mutex); 452 return (DDI_FAILURE); 453 } 454 455 /* 456 * things which used to be done in obj_destroy 457 * are now in-lined here. 458 */ 459 460 px_p->px_state = PX_DETACHED; 461 462 pxtool_uninit(dip); 463 464 ddi_remove_minor_node(dip, "devctl"); 465 px_err_rem_intr(&px_p->px_fault); 466 px_fm_detach(px_p); 467 px_pec_detach(px_p); 468 px_pwr_teardown(dip); 469 pwr_common_teardown(dip); 470 px_msi_detach(px_p); 471 px_msiq_detach(px_p); 472 px_mmu_detach(px_p); 473 px_cb_detach(px_p); 474 px_ib_detach(px_p); 475 if (px_lib_dev_fini(dip) != DDI_SUCCESS) { 476 DBG(DBG_DETACH, dip, "px_lib_dev_fini failed\n"); 477 } 478 479 /* 480 * Free the px soft state structure and the rest of the 481 * resources it's using. 482 */ 483 px_free_props(px_p); 484 px_dbg_detach(dip, &px_p->px_dbg_hdl); 485 mutex_exit(&px_p->px_mutex); 486 mutex_destroy(&px_p->px_mutex); 487 488 /* Free the interrupt-priorities prop if we created it. */ 489 { 490 int len; 491 492 if (ddi_getproplen(DDI_DEV_T_ANY, dip, 493 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 494 "interrupt-priorities", &len) == DDI_PROP_SUCCESS) 495 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, 496 "interrupt-priorities"); 497 } 498 499 px_p->px_dev_hdl = NULL; 500 ddi_soft_state_free(px_state_p, instance); 501 502 return (DDI_SUCCESS); 503 504 case DDI_SUSPEND: 505 if (pcie_pwr_suspend(dip) != DDI_SUCCESS) { 506 mutex_exit(&px_p->px_mutex); 507 return (DDI_FAILURE); 508 } 509 if ((ret = px_lib_suspend(dip)) == DDI_SUCCESS) 510 px_p->px_state = PX_SUSPENDED; 511 mutex_exit(&px_p->px_mutex); 512 513 return (ret); 514 515 default: 516 DBG(DBG_DETACH, dip, "unsupported detach op\n"); 517 mutex_exit(&px_p->px_mutex); 518 return (DDI_FAILURE); 519 } 520 } 521 522 int 523 px_cb_attach(px_t *px_p) 524 { 525 px_fault_t *fault_p = &px_p->px_cb_fault; 526 dev_info_t *dip = px_p->px_dip; 527 sysino_t sysino; 528 529 if (px_lib_intr_devino_to_sysino(dip, 530 px_p->px_inos[PX_INTR_XBC], &sysino) != DDI_SUCCESS) 531 return (DDI_FAILURE); 532 533 fault_p->px_fh_dip = dip; 534 fault_p->px_fh_sysino = sysino; 535 fault_p->px_err_func = px_err_cb_intr; 536 fault_p->px_intr_ino = px_p->px_inos[PX_INTR_XBC]; 537 538 return (px_cb_add_intr(fault_p)); 539 } 540 541 void 542 px_cb_detach(px_t *px_p) 543 { 544 px_cb_rem_intr(&px_p->px_cb_fault); 545 } 546 547 /* 548 * power management related initialization specific to px 549 * called by px_attach() 550 */ 551 static int 552 px_pwr_setup(dev_info_t *dip) 553 { 554 pcie_pwr_t *pwr_p; 555 int instance = ddi_get_instance(dip); 556 px_t *px_p = INST_TO_STATE(instance); 557 ddi_intr_handle_impl_t hdl; 558 559 ASSERT(PCIE_PMINFO(dip)); 560 pwr_p = PCIE_NEXUS_PMINFO(dip); 561 ASSERT(pwr_p); 562 563 /* 564 * indicate support LDI (Layered Driver Interface) 565 * Create the property, if it is not already there 566 */ 567 if (!ddi_prop_exists(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS, 568 DDI_KERNEL_IOCTL)) { 569 if (ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, 570 DDI_KERNEL_IOCTL, NULL, 0) != DDI_PROP_SUCCESS) { 571 DBG(DBG_PWR, dip, "can't create kernel ioctl prop\n"); 572 return (DDI_FAILURE); 573 } 574 } 575 /* No support for device PM. We are always at full power */ 576 pwr_p->pwr_func_lvl = PM_LEVEL_D0; 577 578 mutex_init(&px_p->px_l23ready_lock, NULL, MUTEX_DRIVER, 579 DDI_INTR_PRI(px_pwr_pil)); 580 cv_init(&px_p->px_l23ready_cv, NULL, CV_DRIVER, NULL); 581 582 /* Initialize handle */ 583 bzero(&hdl, sizeof (ddi_intr_handle_impl_t)); 584 hdl.ih_cb_arg1 = px_p; 585 hdl.ih_ver = DDI_INTR_VERSION; 586 hdl.ih_state = DDI_IHDL_STATE_ALLOC; 587 hdl.ih_dip = dip; 588 hdl.ih_pri = px_pwr_pil; 589 590 /* Add PME_TO_ACK message handler */ 591 hdl.ih_cb_func = (ddi_intr_handler_t *)px_pmeq_intr; 592 if (px_add_msiq_intr(dip, dip, &hdl, MSG_REC, 593 (msgcode_t)PCIE_PME_ACK_MSG, -1, 594 &px_p->px_pm_msiq_id) != DDI_SUCCESS) { 595 DBG(DBG_PWR, dip, "px_pwr_setup: couldn't add " 596 " PME_TO_ACK intr\n"); 597 goto pwr_setup_err1; 598 } 599 px_lib_msg_setmsiq(dip, PCIE_PME_ACK_MSG, px_p->px_pm_msiq_id); 600 px_lib_msg_setvalid(dip, PCIE_PME_ACK_MSG, PCIE_MSG_VALID); 601 602 if (px_ib_update_intr_state(px_p, px_p->px_dip, hdl.ih_inum, 603 px_msiqid_to_devino(px_p, px_p->px_pm_msiq_id), px_pwr_pil, 604 PX_INTR_STATE_ENABLE, MSG_REC, PCIE_PME_ACK_MSG) != DDI_SUCCESS) { 605 DBG(DBG_PWR, dip, "px_pwr_setup: PME_TO_ACK update interrupt" 606 " state failed\n"); 607 goto px_pwrsetup_err_state; 608 } 609 610 return (DDI_SUCCESS); 611 612 px_pwrsetup_err_state: 613 px_lib_msg_setvalid(dip, PCIE_PME_ACK_MSG, PCIE_MSG_INVALID); 614 (void) px_rem_msiq_intr(dip, dip, &hdl, MSG_REC, PCIE_PME_ACK_MSG, 615 px_p->px_pm_msiq_id); 616 pwr_setup_err1: 617 mutex_destroy(&px_p->px_l23ready_lock); 618 cv_destroy(&px_p->px_l23ready_cv); 619 620 return (DDI_FAILURE); 621 } 622 623 /* 624 * undo whatever is done in px_pwr_setup. called by px_detach() 625 */ 626 static void 627 px_pwr_teardown(dev_info_t *dip) 628 { 629 int instance = ddi_get_instance(dip); 630 px_t *px_p = INST_TO_STATE(instance); 631 ddi_intr_handle_impl_t hdl; 632 633 if (!PCIE_PMINFO(dip) || !PCIE_NEXUS_PMINFO(dip)) 634 return; 635 636 /* Initialize handle */ 637 bzero(&hdl, sizeof (ddi_intr_handle_impl_t)); 638 hdl.ih_ver = DDI_INTR_VERSION; 639 hdl.ih_state = DDI_IHDL_STATE_ALLOC; 640 hdl.ih_dip = dip; 641 hdl.ih_pri = px_pwr_pil; 642 643 px_lib_msg_setvalid(dip, PCIE_PME_ACK_MSG, PCIE_MSG_INVALID); 644 (void) px_rem_msiq_intr(dip, dip, &hdl, MSG_REC, PCIE_PME_ACK_MSG, 645 px_p->px_pm_msiq_id); 646 647 (void) px_ib_update_intr_state(px_p, px_p->px_dip, hdl.ih_inum, 648 px_msiqid_to_devino(px_p, px_p->px_pm_msiq_id), px_pwr_pil, 649 PX_INTR_STATE_DISABLE, MSG_REC, PCIE_PME_ACK_MSG); 650 651 px_p->px_pm_msiq_id = (msiqid_t)-1; 652 653 cv_destroy(&px_p->px_l23ready_cv); 654 mutex_destroy(&px_p->px_l23ready_lock); 655 } 656 657 /* bus driver entry points */ 658 659 /* 660 * bus map entry point: 661 * 662 * if map request is for an rnumber 663 * get the corresponding regspec from device node 664 * build a new regspec in our parent's format 665 * build a new map_req with the new regspec 666 * call up the tree to complete the mapping 667 */ 668 int 669 px_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp, 670 off_t off, off_t len, caddr_t *addrp) 671 { 672 px_t *px_p = DIP_TO_STATE(dip); 673 struct regspec p_regspec; 674 ddi_map_req_t p_mapreq; 675 int reglen, rval, r_no; 676 pci_regspec_t reloc_reg, *rp = &reloc_reg; 677 678 DBG(DBG_MAP, dip, "rdip=%s%d:", 679 ddi_driver_name(rdip), ddi_get_instance(rdip)); 680 681 if (mp->map_flags & DDI_MF_USER_MAPPING) 682 return (DDI_ME_UNIMPLEMENTED); 683 684 switch (mp->map_type) { 685 case DDI_MT_REGSPEC: 686 reloc_reg = *(pci_regspec_t *)mp->map_obj.rp; /* dup whole */ 687 break; 688 689 case DDI_MT_RNUMBER: 690 r_no = mp->map_obj.rnumber; 691 DBG(DBG_MAP | DBG_CONT, dip, " r#=%x", r_no); 692 693 if (ddi_getlongprop(DDI_DEV_T_ANY, rdip, DDI_PROP_DONTPASS, 694 "reg", (caddr_t)&rp, ®len) != DDI_SUCCESS) 695 return (DDI_ME_RNUMBER_RANGE); 696 697 if (r_no < 0 || r_no >= reglen / sizeof (pci_regspec_t)) { 698 kmem_free(rp, reglen); 699 return (DDI_ME_RNUMBER_RANGE); 700 } 701 rp += r_no; 702 break; 703 704 default: 705 return (DDI_ME_INVAL); 706 } 707 DBG(DBG_MAP | DBG_CONT, dip, "\n"); 708 709 if ((rp->pci_phys_hi & PCI_REG_ADDR_M) == PCI_ADDR_CONFIG) { 710 /* 711 * There may be a need to differentiate between PCI 712 * and PCI-Ex devices so the following range check is 713 * done correctly, depending on the implementation of 714 * pcieb bridge nexus driver. 715 */ 716 if ((off >= PCIE_CONF_HDR_SIZE) || 717 (len > PCIE_CONF_HDR_SIZE) || 718 (off + len > PCIE_CONF_HDR_SIZE)) 719 return (DDI_ME_INVAL); 720 /* 721 * the following function returning a DDI_FAILURE assumes 722 * that there are no virtual config space access services 723 * defined in this layer. Otherwise it is availed right 724 * here and we return. 725 */ 726 rval = px_lib_map_vconfig(dip, mp, off, rp, addrp); 727 if (rval == DDI_SUCCESS) 728 goto done; 729 } 730 731 /* 732 * No virtual config space services or we are mapping 733 * a region of memory mapped config/IO/memory space, so proceed 734 * to the parent. 735 */ 736 737 /* relocate within 64-bit pci space through "assigned-addresses" */ 738 if (rval = px_reloc_reg(dip, rdip, px_p, rp)) 739 goto done; 740 741 if (len) /* adjust regspec according to mapping request */ 742 rp->pci_size_low = len; /* MIN ? */ 743 rp->pci_phys_low += off; 744 745 /* translate relocated pci regspec into parent space through "ranges" */ 746 if (rval = px_xlate_reg(px_p, rp, &p_regspec)) 747 goto done; 748 749 p_mapreq = *mp; /* dup the whole structure */ 750 p_mapreq.map_type = DDI_MT_REGSPEC; 751 p_mapreq.map_obj.rp = &p_regspec; 752 px_lib_map_attr_check(&p_mapreq); 753 rval = ddi_map(dip, &p_mapreq, 0, 0, addrp); 754 755 if (rval == DDI_SUCCESS) { 756 /* 757 * Set-up access functions for FM access error capable drivers. 758 */ 759 if (DDI_FM_ACC_ERR_CAP(ddi_fm_capable(rdip))) 760 px_fm_acc_setup(mp, rdip, rp); 761 } 762 763 done: 764 if (mp->map_type == DDI_MT_RNUMBER) 765 kmem_free(rp - r_no, reglen); 766 767 return (rval); 768 } 769 770 /* 771 * bus dma map entry point 772 * return value: 773 * DDI_DMA_PARTIAL_MAP 1 774 * DDI_DMA_MAPOK 0 775 * DDI_DMA_MAPPED 0 776 * DDI_DMA_NORESOURCES -1 777 * DDI_DMA_NOMAPPING -2 778 * DDI_DMA_TOOBIG -3 779 */ 780 int 781 px_dma_setup(dev_info_t *dip, dev_info_t *rdip, ddi_dma_req_t *dmareq, 782 ddi_dma_handle_t *handlep) 783 { 784 px_t *px_p = DIP_TO_STATE(dip); 785 px_mmu_t *mmu_p = px_p->px_mmu_p; 786 ddi_dma_impl_t *mp; 787 int ret; 788 789 DBG(DBG_DMA_MAP, dip, "mapping - rdip=%s%d type=%s\n", 790 ddi_driver_name(rdip), ddi_get_instance(rdip), 791 handlep ? "alloc" : "advisory"); 792 793 if (!(mp = px_dma_lmts2hdl(dip, rdip, mmu_p, dmareq))) 794 return (DDI_DMA_NORESOURCES); 795 if (mp == (ddi_dma_impl_t *)DDI_DMA_NOMAPPING) 796 return (DDI_DMA_NOMAPPING); 797 if (ret = px_dma_type(px_p, dmareq, mp)) 798 goto freehandle; 799 if (ret = px_dma_pfn(px_p, dmareq, mp)) 800 goto freehandle; 801 802 switch (PX_DMA_TYPE(mp)) { 803 case PX_DMAI_FLAGS_DVMA: /* LINTED E_EQUALITY_NOT_ASSIGNMENT */ 804 if ((ret = px_dvma_win(px_p, dmareq, mp)) || !handlep) 805 goto freehandle; 806 if (!PX_DMA_CANCACHE(mp)) { /* try fast track */ 807 if (PX_DMA_CANFAST(mp)) { 808 if (!px_dvma_map_fast(mmu_p, mp)) 809 break; 810 /* LINTED E_NOP_ELSE_STMT */ 811 } else { 812 PX_DVMA_FASTTRAK_PROF(mp); 813 } 814 } 815 if (ret = px_dvma_map(mp, dmareq, mmu_p)) 816 goto freehandle; 817 break; 818 case PX_DMAI_FLAGS_PTP: /* LINTED E_EQUALITY_NOT_ASSIGNMENT */ 819 if ((ret = px_dma_physwin(px_p, dmareq, mp)) || !handlep) 820 goto freehandle; 821 break; 822 case PX_DMAI_FLAGS_BYPASS: 823 default: 824 cmn_err(CE_PANIC, "%s%d: px_dma_setup: bad dma type 0x%x", 825 ddi_driver_name(rdip), ddi_get_instance(rdip), 826 PX_DMA_TYPE(mp)); 827 /*NOTREACHED*/ 828 } 829 *handlep = (ddi_dma_handle_t)mp; 830 mp->dmai_flags |= PX_DMAI_FLAGS_INUSE; 831 px_dump_dma_handle(DBG_DMA_MAP, dip, mp); 832 833 return ((mp->dmai_nwin == 1) ? DDI_DMA_MAPPED : DDI_DMA_PARTIAL_MAP); 834 freehandle: 835 if (ret == DDI_DMA_NORESOURCES) 836 px_dma_freemp(mp); /* don't run_callback() */ 837 else 838 (void) px_dma_freehdl(dip, rdip, (ddi_dma_handle_t)mp); 839 return (ret); 840 } 841 842 843 /* 844 * bus dma alloc handle entry point: 845 */ 846 int 847 px_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attrp, 848 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep) 849 { 850 px_t *px_p = DIP_TO_STATE(dip); 851 ddi_dma_impl_t *mp; 852 int rval; 853 854 DBG(DBG_DMA_ALLOCH, dip, "rdip=%s%d\n", 855 ddi_driver_name(rdip), ddi_get_instance(rdip)); 856 857 if (attrp->dma_attr_version != DMA_ATTR_V0) 858 return (DDI_DMA_BADATTR); 859 860 if (!(mp = px_dma_allocmp(dip, rdip, waitfp, arg))) 861 return (DDI_DMA_NORESOURCES); 862 863 /* 864 * Save requestor's information 865 */ 866 mp->dmai_attr = *attrp; /* whole object - augmented later */ 867 *PX_DEV_ATTR(mp) = *attrp; /* whole object - device orig attr */ 868 DBG(DBG_DMA_ALLOCH, dip, "mp=%p\n", mp); 869 870 /* check and convert dma attributes to handle parameters */ 871 if (rval = px_dma_attr2hdl(px_p, mp)) { 872 px_dma_freehdl(dip, rdip, (ddi_dma_handle_t)mp); 873 *handlep = NULL; 874 return (rval); 875 } 876 *handlep = (ddi_dma_handle_t)mp; 877 return (DDI_SUCCESS); 878 } 879 880 881 /* 882 * bus dma free handle entry point: 883 */ 884 /*ARGSUSED*/ 885 int 886 px_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle) 887 { 888 DBG(DBG_DMA_FREEH, dip, "rdip=%s%d mp=%p\n", 889 ddi_driver_name(rdip), ddi_get_instance(rdip), handle); 890 px_dma_freemp((ddi_dma_impl_t *)handle); 891 892 if (px_kmem_clid) { 893 DBG(DBG_DMA_FREEH, dip, "run handle callback\n"); 894 ddi_run_callback(&px_kmem_clid); 895 } 896 return (DDI_SUCCESS); 897 } 898 899 900 /* 901 * bus dma bind handle entry point: 902 */ 903 int 904 px_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip, 905 ddi_dma_handle_t handle, ddi_dma_req_t *dmareq, 906 ddi_dma_cookie_t *cookiep, uint_t *ccountp) 907 { 908 px_t *px_p = DIP_TO_STATE(dip); 909 px_mmu_t *mmu_p = px_p->px_mmu_p; 910 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 911 int ret; 912 913 DBG(DBG_DMA_BINDH, dip, "rdip=%s%d mp=%p dmareq=%p\n", 914 ddi_driver_name(rdip), ddi_get_instance(rdip), mp, dmareq); 915 916 if (mp->dmai_flags & PX_DMAI_FLAGS_INUSE) 917 return (DDI_DMA_INUSE); 918 919 ASSERT((mp->dmai_flags & ~PX_DMAI_FLAGS_PRESERVE) == 0); 920 mp->dmai_flags |= PX_DMAI_FLAGS_INUSE; 921 922 if (ret = px_dma_type(px_p, dmareq, mp)) 923 goto err; 924 if (ret = px_dma_pfn(px_p, dmareq, mp)) 925 goto err; 926 927 switch (PX_DMA_TYPE(mp)) { 928 case PX_DMAI_FLAGS_DVMA: 929 if (ret = px_dvma_win(px_p, dmareq, mp)) 930 goto map_err; 931 if (!PX_DMA_CANCACHE(mp)) { /* try fast track */ 932 if (PX_DMA_CANFAST(mp)) { 933 if (!px_dvma_map_fast(mmu_p, mp)) 934 goto mapped; /*LINTED E_NOP_ELSE_STMT*/ 935 } else { 936 PX_DVMA_FASTTRAK_PROF(mp); 937 } 938 } 939 if (ret = px_dvma_map(mp, dmareq, mmu_p)) 940 goto map_err; 941 mapped: 942 *ccountp = 1; 943 MAKE_DMA_COOKIE(cookiep, mp->dmai_mapping, mp->dmai_size); 944 break; 945 case PX_DMAI_FLAGS_BYPASS: 946 case PX_DMAI_FLAGS_PTP: 947 if (ret = px_dma_physwin(px_p, dmareq, mp)) 948 goto map_err; 949 *ccountp = PX_WINLST(mp)->win_ncookies; 950 *cookiep = 951 *(ddi_dma_cookie_t *)(PX_WINLST(mp) + 1); /* wholeobj */ 952 break; 953 default: 954 cmn_err(CE_PANIC, "%s%d: px_dma_bindhdl(%p): bad dma type", 955 ddi_driver_name(rdip), ddi_get_instance(rdip), mp); 956 /*NOTREACHED*/ 957 } 958 DBG(DBG_DMA_BINDH, dip, "cookie %" PRIx64 "+%x\n", 959 cookiep->dmac_address, cookiep->dmac_size); 960 px_dump_dma_handle(DBG_DMA_MAP, dip, mp); 961 962 /* insert dma handle into FMA cache */ 963 if (mp->dmai_attr.dma_attr_flags & DDI_DMA_FLAGERR) { 964 (void) ndi_fmc_insert(rdip, DMA_HANDLE, mp, NULL); 965 mp->dmai_error.err_cf = px_err_dma_hdl_check; 966 } 967 968 return (mp->dmai_nwin == 1 ? DDI_DMA_MAPPED : DDI_DMA_PARTIAL_MAP); 969 map_err: 970 px_dma_freepfn(mp); 971 err: 972 mp->dmai_flags &= PX_DMAI_FLAGS_PRESERVE; 973 return (ret); 974 } 975 976 977 /* 978 * bus dma unbind handle entry point: 979 */ 980 /*ARGSUSED*/ 981 int 982 px_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle) 983 { 984 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 985 px_t *px_p = DIP_TO_STATE(dip); 986 px_mmu_t *mmu_p = px_p->px_mmu_p; 987 988 DBG(DBG_DMA_UNBINDH, dip, "rdip=%s%d, mp=%p\n", 989 ddi_driver_name(rdip), ddi_get_instance(rdip), handle); 990 if ((mp->dmai_flags & PX_DMAI_FLAGS_INUSE) == 0) { 991 DBG(DBG_DMA_UNBINDH, dip, "handle not inuse\n"); 992 return (DDI_FAILURE); 993 } 994 995 /* remove dma handle from FMA cache */ 996 if (mp->dmai_attr.dma_attr_flags & DDI_DMA_FLAGERR) { 997 if (DEVI(rdip)->devi_fmhdl != NULL && 998 DDI_FM_DMA_ERR_CAP(DEVI(rdip)->devi_fmhdl->fh_cap)) { 999 (void) ndi_fmc_remove(rdip, DMA_HANDLE, mp); 1000 } 1001 } 1002 1003 /* 1004 * Here if the handle is using the iommu. Unload all the iommu 1005 * translations. 1006 */ 1007 switch (PX_DMA_TYPE(mp)) { 1008 case PX_DMAI_FLAGS_DVMA: 1009 px_mmu_unmap_window(mmu_p, mp); 1010 px_dvma_unmap(mmu_p, mp); 1011 px_dma_freepfn(mp); 1012 break; 1013 case PX_DMAI_FLAGS_BYPASS: 1014 case PX_DMAI_FLAGS_PTP: 1015 px_dma_freewin(mp); 1016 break; 1017 default: 1018 cmn_err(CE_PANIC, "%s%d: px_dma_unbindhdl:bad dma type %p", 1019 ddi_driver_name(rdip), ddi_get_instance(rdip), mp); 1020 /*NOTREACHED*/ 1021 } 1022 if (mmu_p->mmu_dvma_clid != 0) { 1023 DBG(DBG_DMA_UNBINDH, dip, "run dvma callback\n"); 1024 ddi_run_callback(&mmu_p->mmu_dvma_clid); 1025 } 1026 if (px_kmem_clid) { 1027 DBG(DBG_DMA_UNBINDH, dip, "run handle callback\n"); 1028 ddi_run_callback(&px_kmem_clid); 1029 } 1030 mp->dmai_flags &= PX_DMAI_FLAGS_PRESERVE; 1031 1032 return (DDI_SUCCESS); 1033 } 1034 1035 /* 1036 * bus dma win entry point: 1037 */ 1038 int 1039 px_dma_win(dev_info_t *dip, dev_info_t *rdip, 1040 ddi_dma_handle_t handle, uint_t win, off_t *offp, 1041 size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp) 1042 { 1043 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 1044 int ret; 1045 1046 DBG(DBG_DMA_WIN, dip, "rdip=%s%d\n", 1047 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1048 1049 px_dump_dma_handle(DBG_DMA_WIN, dip, mp); 1050 if (win >= mp->dmai_nwin) { 1051 DBG(DBG_DMA_WIN, dip, "%x out of range\n", win); 1052 return (DDI_FAILURE); 1053 } 1054 1055 switch (PX_DMA_TYPE(mp)) { 1056 case PX_DMAI_FLAGS_DVMA: 1057 if (win != PX_DMA_CURWIN(mp)) { 1058 px_t *px_p = DIP_TO_STATE(dip); 1059 px_mmu_t *mmu_p = px_p->px_mmu_p; 1060 px_mmu_unmap_window(mmu_p, mp); 1061 1062 /* map_window sets dmai_mapping/size/offset */ 1063 px_mmu_map_window(mmu_p, mp, win); 1064 if ((ret = px_mmu_map_window(mmu_p, 1065 mp, win)) != DDI_SUCCESS) 1066 return (ret); 1067 } 1068 if (cookiep) 1069 MAKE_DMA_COOKIE(cookiep, mp->dmai_mapping, 1070 mp->dmai_size); 1071 if (ccountp) 1072 *ccountp = 1; 1073 break; 1074 case PX_DMAI_FLAGS_PTP: 1075 case PX_DMAI_FLAGS_BYPASS: { 1076 int i; 1077 ddi_dma_cookie_t *ck_p; 1078 px_dma_win_t *win_p = mp->dmai_winlst; 1079 1080 for (i = 0; i < win; win_p = win_p->win_next, i++) {}; 1081 ck_p = (ddi_dma_cookie_t *)(win_p + 1); 1082 *cookiep = *ck_p; 1083 mp->dmai_offset = win_p->win_offset; 1084 mp->dmai_size = win_p->win_size; 1085 mp->dmai_mapping = ck_p->dmac_laddress; 1086 mp->dmai_cookie = ck_p + 1; 1087 win_p->win_curseg = 0; 1088 if (ccountp) 1089 *ccountp = win_p->win_ncookies; 1090 } 1091 break; 1092 default: 1093 cmn_err(CE_WARN, "%s%d: px_dma_win:bad dma type 0x%x", 1094 ddi_driver_name(rdip), ddi_get_instance(rdip), 1095 PX_DMA_TYPE(mp)); 1096 return (DDI_FAILURE); 1097 } 1098 if (cookiep) 1099 DBG(DBG_DMA_WIN, dip, 1100 "cookie - dmac_address=%x dmac_size=%x\n", 1101 cookiep->dmac_address, cookiep->dmac_size); 1102 if (offp) 1103 *offp = (off_t)mp->dmai_offset; 1104 if (lenp) 1105 *lenp = mp->dmai_size; 1106 return (DDI_SUCCESS); 1107 } 1108 1109 #ifdef DEBUG 1110 static char *px_dmactl_str[] = { 1111 "DDI_DMA_FREE", 1112 "DDI_DMA_SYNC", 1113 "DDI_DMA_HTOC", 1114 "DDI_DMA_KVADDR", 1115 "DDI_DMA_MOVWIN", 1116 "DDI_DMA_REPWIN", 1117 "DDI_DMA_GETERR", 1118 "DDI_DMA_COFF", 1119 "DDI_DMA_NEXTWIN", 1120 "DDI_DMA_NEXTSEG", 1121 "DDI_DMA_SEGTOC", 1122 "DDI_DMA_RESERVE", 1123 "DDI_DMA_RELEASE", 1124 "DDI_DMA_RESETH", 1125 "DDI_DMA_CKSYNC", 1126 "DDI_DMA_IOPB_ALLOC", 1127 "DDI_DMA_IOPB_FREE", 1128 "DDI_DMA_SMEM_ALLOC", 1129 "DDI_DMA_SMEM_FREE", 1130 "DDI_DMA_SET_SBUS64" 1131 }; 1132 #endif /* DEBUG */ 1133 1134 /* 1135 * bus dma control entry point: 1136 */ 1137 /*ARGSUSED*/ 1138 int 1139 px_dma_ctlops(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle, 1140 enum ddi_dma_ctlops cmd, off_t *offp, size_t *lenp, caddr_t *objp, 1141 uint_t cache_flags) 1142 { 1143 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 1144 1145 #ifdef DEBUG 1146 DBG(DBG_DMA_CTL, dip, "%s: rdip=%s%d\n", px_dmactl_str[cmd], 1147 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1148 #endif /* DEBUG */ 1149 1150 switch (cmd) { 1151 case DDI_DMA_FREE: 1152 (void) px_dma_unbindhdl(dip, rdip, handle); 1153 (void) px_dma_freehdl(dip, rdip, handle); 1154 return (DDI_SUCCESS); 1155 case DDI_DMA_RESERVE: { 1156 px_t *px_p = DIP_TO_STATE(dip); 1157 return (px_fdvma_reserve(dip, rdip, px_p, 1158 (ddi_dma_req_t *)offp, (ddi_dma_handle_t *)objp)); 1159 } 1160 case DDI_DMA_RELEASE: { 1161 px_t *px_p = DIP_TO_STATE(dip); 1162 return (px_fdvma_release(dip, px_p, mp)); 1163 } 1164 default: 1165 break; 1166 } 1167 1168 switch (PX_DMA_TYPE(mp)) { 1169 case PX_DMAI_FLAGS_DVMA: 1170 return (px_dvma_ctl(dip, rdip, mp, cmd, offp, lenp, objp, 1171 cache_flags)); 1172 case PX_DMAI_FLAGS_PTP: 1173 case PX_DMAI_FLAGS_BYPASS: 1174 return (px_dma_ctl(dip, rdip, mp, cmd, offp, lenp, objp, 1175 cache_flags)); 1176 default: 1177 cmn_err(CE_PANIC, "%s%d: px_dma_ctlops(%x):bad dma type %x", 1178 ddi_driver_name(rdip), ddi_get_instance(rdip), cmd, 1179 mp->dmai_flags); 1180 /*NOTREACHED*/ 1181 } 1182 return (0); 1183 } 1184 1185 /* 1186 * control ops entry point: 1187 * 1188 * Requests handled completely: 1189 * DDI_CTLOPS_INITCHILD see init_child() for details 1190 * DDI_CTLOPS_UNINITCHILD 1191 * DDI_CTLOPS_REPORTDEV see report_dev() for details 1192 * DDI_CTLOPS_IOMIN cache line size if streaming otherwise 1 1193 * DDI_CTLOPS_REGSIZE 1194 * DDI_CTLOPS_NREGS 1195 * DDI_CTLOPS_DVMAPAGESIZE 1196 * DDI_CTLOPS_POKE 1197 * DDI_CTLOPS_PEEK 1198 * 1199 * All others passed to parent. 1200 */ 1201 int 1202 px_ctlops(dev_info_t *dip, dev_info_t *rdip, 1203 ddi_ctl_enum_t op, void *arg, void *result) 1204 { 1205 px_t *px_p = DIP_TO_STATE(dip); 1206 struct detachspec *ds; 1207 struct attachspec *as; 1208 1209 switch (op) { 1210 case DDI_CTLOPS_INITCHILD: 1211 return (px_init_child(px_p, (dev_info_t *)arg)); 1212 1213 case DDI_CTLOPS_UNINITCHILD: 1214 return (px_uninit_child(px_p, (dev_info_t *)arg)); 1215 1216 case DDI_CTLOPS_ATTACH: 1217 if (!pcie_is_child(dip, rdip)) 1218 return (DDI_SUCCESS); 1219 1220 as = (struct attachspec *)arg; 1221 switch (as->when) { 1222 case DDI_PRE: 1223 if (as->cmd == DDI_ATTACH) { 1224 DBG(DBG_PWR, dip, "PRE_ATTACH for %s@%d\n", 1225 ddi_driver_name(rdip), 1226 ddi_get_instance(rdip)); 1227 return (pcie_pm_hold(dip)); 1228 } 1229 if (as->cmd == DDI_RESUME) { 1230 DBG(DBG_PWR, dip, "PRE_RESUME for %s@%d\n", 1231 ddi_driver_name(rdip), 1232 ddi_get_instance(rdip)); 1233 1234 pcie_clear_errors(rdip); 1235 } 1236 return (DDI_SUCCESS); 1237 1238 case DDI_POST: 1239 DBG(DBG_PWR, dip, "POST_ATTACH for %s@%d\n", 1240 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1241 if (as->cmd == DDI_ATTACH && as->result != DDI_SUCCESS) 1242 pcie_pm_release(dip); 1243 1244 if (as->result == DDI_SUCCESS) 1245 pf_init(rdip, (void *)px_p->px_fm_ibc, as->cmd); 1246 1247 (void) pcie_postattach_child(rdip); 1248 1249 return (DDI_SUCCESS); 1250 default: 1251 break; 1252 } 1253 break; 1254 1255 case DDI_CTLOPS_DETACH: 1256 if (!pcie_is_child(dip, rdip)) 1257 return (DDI_SUCCESS); 1258 1259 ds = (struct detachspec *)arg; 1260 switch (ds->when) { 1261 case DDI_POST: 1262 if (ds->cmd == DDI_DETACH && 1263 ds->result == DDI_SUCCESS) { 1264 DBG(DBG_PWR, dip, "POST_DETACH for %s@%d\n", 1265 ddi_driver_name(rdip), 1266 ddi_get_instance(rdip)); 1267 return (pcie_pm_remove_child(dip, rdip)); 1268 } 1269 return (DDI_SUCCESS); 1270 case DDI_PRE: 1271 pf_fini(rdip, ds->cmd); 1272 return (DDI_SUCCESS); 1273 default: 1274 break; 1275 } 1276 break; 1277 1278 case DDI_CTLOPS_REPORTDEV: 1279 return (px_report_dev(rdip)); 1280 1281 case DDI_CTLOPS_IOMIN: 1282 return (DDI_SUCCESS); 1283 1284 case DDI_CTLOPS_REGSIZE: 1285 *((off_t *)result) = px_get_reg_set_size(rdip, *((int *)arg)); 1286 return (*((off_t *)result) == 0 ? DDI_FAILURE : DDI_SUCCESS); 1287 1288 case DDI_CTLOPS_NREGS: 1289 *((uint_t *)result) = px_get_nreg_set(rdip); 1290 return (DDI_SUCCESS); 1291 1292 case DDI_CTLOPS_DVMAPAGESIZE: 1293 *((ulong_t *)result) = MMU_PAGE_SIZE; 1294 return (DDI_SUCCESS); 1295 1296 case DDI_CTLOPS_POKE: /* platform dependent implementation. */ 1297 return (px_lib_ctlops_poke(dip, rdip, 1298 (peekpoke_ctlops_t *)arg)); 1299 1300 case DDI_CTLOPS_PEEK: /* platform dependent implementation. */ 1301 return (px_lib_ctlops_peek(dip, rdip, 1302 (peekpoke_ctlops_t *)arg, result)); 1303 1304 case DDI_CTLOPS_POWER: 1305 default: 1306 break; 1307 } 1308 1309 /* 1310 * Now pass the request up to our parent. 1311 */ 1312 DBG(DBG_CTLOPS, dip, "passing request to parent: rdip=%s%d\n", 1313 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1314 return (ddi_ctlops(dip, rdip, op, arg, result)); 1315 } 1316 1317 /* ARGSUSED */ 1318 int 1319 px_intr_ops(dev_info_t *dip, dev_info_t *rdip, ddi_intr_op_t intr_op, 1320 ddi_intr_handle_impl_t *hdlp, void *result) 1321 { 1322 int intr_types, ret = DDI_SUCCESS; 1323 1324 DBG(DBG_INTROPS, dip, "px_intr_ops: rdip=%s%d\n", 1325 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1326 1327 /* Process DDI_INTROP_SUPPORTED_TYPES request here */ 1328 if (intr_op == DDI_INTROP_SUPPORTED_TYPES) { 1329 *(int *)result = i_ddi_get_intx_nintrs(rdip) ? 1330 DDI_INTR_TYPE_FIXED : 0; 1331 1332 if ((pci_msi_get_supported_type(rdip, 1333 &intr_types)) == DDI_SUCCESS) { 1334 /* 1335 * Double check supported interrupt types vs. 1336 * what the host bridge supports. 1337 */ 1338 *(int *)result |= intr_types; 1339 } 1340 1341 return (ret); 1342 } 1343 1344 /* 1345 * PCI-E nexus driver supports fixed, MSI and MSI-X interrupts. 1346 * Return failure if interrupt type is not supported. 1347 */ 1348 switch (hdlp->ih_type) { 1349 case DDI_INTR_TYPE_FIXED: 1350 ret = px_intx_ops(dip, rdip, intr_op, hdlp, result); 1351 break; 1352 case DDI_INTR_TYPE_MSI: 1353 case DDI_INTR_TYPE_MSIX: 1354 ret = px_msix_ops(dip, rdip, intr_op, hdlp, result); 1355 break; 1356 default: 1357 ret = DDI_ENOTSUP; 1358 break; 1359 } 1360 1361 return (ret); 1362 } 1363 1364 static int 1365 px_init_hotplug(px_t *px_p) 1366 { 1367 px_bus_range_t bus_range; 1368 dev_info_t *dip; 1369 pciehpc_regops_t regops; 1370 1371 dip = px_p->px_dip; 1372 1373 if (ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1374 "hotplug-capable") == 0) 1375 return (DDI_FAILURE); 1376 1377 /* 1378 * Before initializing hotplug - open up bus range. The busra 1379 * module will initialize its pool of bus numbers from this. 1380 * "busra" will be the agent that keeps track of them during 1381 * hotplug. Also, note, that busra will remove any bus numbers 1382 * already in use from boot time. 1383 */ 1384 if (ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1385 "bus-range") == 0) { 1386 cmn_err(CE_WARN, "%s%d: bus-range not found\n", 1387 ddi_driver_name(dip), ddi_get_instance(dip)); 1388 #ifdef DEBUG 1389 bus_range.lo = 0x0; 1390 bus_range.hi = 0xff; 1391 1392 if (ndi_prop_update_int_array(DDI_DEV_T_NONE, 1393 dip, "bus-range", (int *)&bus_range, 2) 1394 != DDI_PROP_SUCCESS) { 1395 return (DDI_FAILURE); 1396 } 1397 #else 1398 return (DDI_FAILURE); 1399 #endif 1400 } 1401 1402 if (px_lib_hotplug_init(dip, (void *)®ops) != DDI_SUCCESS) 1403 return (DDI_FAILURE); 1404 1405 if (pciehpc_init(dip, ®ops) != DDI_SUCCESS) { 1406 px_lib_hotplug_uninit(dip); 1407 return (DDI_FAILURE); 1408 } 1409 1410 if (pcihp_init(dip) != DDI_SUCCESS) { 1411 (void) pciehpc_uninit(dip); 1412 px_lib_hotplug_uninit(dip); 1413 return (DDI_FAILURE); 1414 } 1415 1416 if (pcihp_get_cb_ops() != NULL) { 1417 DBG(DBG_ATTACH, dip, "%s%d hotplug enabled", 1418 ddi_driver_name(dip), ddi_get_instance(dip)); 1419 px_p->px_dev_caps |= PX_HOTPLUG_CAPABLE; 1420 } 1421 1422 return (DDI_SUCCESS); 1423 } 1424 1425 static int 1426 px_uninit_hotplug(dev_info_t *dip) 1427 { 1428 if (pcihp_uninit(dip) != DDI_SUCCESS) 1429 return (DDI_FAILURE); 1430 1431 if (pciehpc_uninit(dip) != DDI_SUCCESS) 1432 return (DDI_FAILURE); 1433 1434 px_lib_hotplug_uninit(dip); 1435 1436 return (DDI_SUCCESS); 1437 } 1438 1439 static void 1440 px_set_mps(px_t *px_p) 1441 { 1442 dev_info_t *dip; 1443 pcie_bus_t *bus_p; 1444 int max_supported; 1445 1446 dip = px_p->px_dip; 1447 bus_p = PCIE_DIP2BUS(dip); 1448 1449 bus_p->bus_mps = -1; 1450 1451 if (pcie_root_port(dip) == DDI_FAILURE) { 1452 if (px_lib_get_root_complex_mps(px_p, dip, 1453 &max_supported) < 0) { 1454 1455 DBG(DBG_MPS, dip, "MPS: Can not get RC MPS\n"); 1456 return; 1457 } 1458 1459 DBG(DBG_MPS, dip, "MPS: Root Complex MPS Cap of = %x\n", 1460 max_supported); 1461 1462 if (pcie_max_mps < max_supported) 1463 max_supported = pcie_max_mps; 1464 1465 (void) pcie_get_fabric_mps(dip, ddi_get_child(dip), 1466 &max_supported); 1467 1468 bus_p->bus_mps = max_supported; 1469 1470 (void) px_lib_set_root_complex_mps(px_p, dip, bus_p->bus_mps); 1471 1472 DBG(DBG_MPS, dip, "MPS: Root Complex MPS Set to = %x\n", 1473 bus_p->bus_mps); 1474 } 1475 } 1476