1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * SPARC Host to PCI Express nexus driver 28 */ 29 30 #include <sys/types.h> 31 #include <sys/conf.h> /* nulldev */ 32 #include <sys/stat.h> /* devctl */ 33 #include <sys/kmem.h> 34 #include <sys/sunddi.h> 35 #include <sys/sunndi.h> 36 #include <sys/ddi_subrdefs.h> 37 #include <sys/spl.h> 38 #include <sys/epm.h> 39 #include <sys/iommutsb.h> 40 #include "px_obj.h" 41 #include <sys/hotplug/pci/pcie_hp.h> 42 #include <sys/pci_tools.h> 43 #include "px_tools_ext.h" 44 #include <sys/pcie_pwr.h> 45 #include <sys/pci_cfgacc.h> 46 47 /*LINTLIBRARY*/ 48 49 /* 50 * function prototypes for dev ops routines: 51 */ 52 static int px_attach(dev_info_t *dip, ddi_attach_cmd_t cmd); 53 static int px_detach(dev_info_t *dip, ddi_detach_cmd_t cmd); 54 static int px_info(dev_info_t *dip, ddi_info_cmd_t infocmd, 55 void *arg, void **result); 56 static int px_cb_attach(px_t *); 57 static void px_cb_detach(px_t *); 58 static int px_pwr_setup(dev_info_t *dip); 59 static void px_pwr_teardown(dev_info_t *dip); 60 static void px_set_mps(px_t *px_p); 61 62 extern void pci_cfgacc_acc(pci_cfgacc_req_t *); 63 extern int pcie_max_mps; 64 extern void (*pci_cfgacc_acc_p)(pci_cfgacc_req_t *); 65 /* 66 * bus ops and dev ops structures: 67 */ 68 static struct bus_ops px_bus_ops = { 69 BUSO_REV, 70 px_map, 71 0, 72 0, 73 0, 74 i_ddi_map_fault, 75 px_dma_setup, 76 px_dma_allochdl, 77 px_dma_freehdl, 78 px_dma_bindhdl, 79 px_dma_unbindhdl, 80 px_lib_dma_sync, 81 px_dma_win, 82 px_dma_ctlops, 83 px_ctlops, 84 ddi_bus_prop_op, 85 ndi_busop_get_eventcookie, 86 ndi_busop_add_eventcall, 87 ndi_busop_remove_eventcall, 88 ndi_post_event, 89 NULL, 90 NULL, /* (*bus_config)(); */ 91 NULL, /* (*bus_unconfig)(); */ 92 px_fm_init_child, /* (*bus_fm_init)(); */ 93 NULL, /* (*bus_fm_fini)(); */ 94 px_bus_enter, /* (*bus_fm_access_enter)(); */ 95 px_bus_exit, /* (*bus_fm_access_fini)(); */ 96 pcie_bus_power, /* (*bus_power)(); */ 97 px_intr_ops, /* (*bus_intr_op)(); */ 98 pcie_hp_common_ops /* (*bus_hp_op)(); */ 99 }; 100 101 extern struct cb_ops px_cb_ops; 102 103 static struct dev_ops px_ops = { 104 DEVO_REV, 105 0, 106 px_info, 107 nulldev, 108 0, 109 px_attach, 110 px_detach, 111 nodev, 112 &px_cb_ops, 113 &px_bus_ops, 114 nulldev, 115 ddi_quiesce_not_needed, /* quiesce */ 116 }; 117 118 /* 119 * module definitions: 120 */ 121 #include <sys/modctl.h> 122 extern struct mod_ops mod_driverops; 123 124 static struct modldrv modldrv = { 125 &mod_driverops, /* Type of module - driver */ 126 #if defined(sun4u) 127 "Sun4u Host to PCIe nexus driver", /* Name of module. */ 128 #elif defined(sun4v) 129 "Sun4v Host to PCIe nexus driver", /* Name of module. */ 130 #endif 131 &px_ops, /* driver ops */ 132 }; 133 134 static struct modlinkage modlinkage = { 135 MODREV_1, (void *)&modldrv, NULL 136 }; 137 138 /* driver soft state */ 139 void *px_state_p; 140 141 int 142 _init(void) 143 { 144 int e; 145 146 /* 147 * Initialize per-px bus soft state pointer. 148 */ 149 e = ddi_soft_state_init(&px_state_p, sizeof (px_t), 1); 150 if (e != DDI_SUCCESS) 151 return (e); 152 153 /* 154 * Install the module. 155 */ 156 e = mod_install(&modlinkage); 157 if (e != DDI_SUCCESS) 158 ddi_soft_state_fini(&px_state_p); 159 return (e); 160 } 161 162 int 163 _fini(void) 164 { 165 int e; 166 167 /* 168 * Remove the module. 169 */ 170 e = mod_remove(&modlinkage); 171 if (e != DDI_SUCCESS) 172 return (e); 173 174 /* Free px soft state */ 175 ddi_soft_state_fini(&px_state_p); 176 177 return (e); 178 } 179 180 int 181 _info(struct modinfo *modinfop) 182 { 183 return (mod_info(&modlinkage, modinfop)); 184 } 185 186 /* ARGSUSED */ 187 static int 188 px_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 189 { 190 minor_t minor = getminor((dev_t)arg); 191 int instance = PCI_MINOR_NUM_TO_INSTANCE(minor); 192 px_t *px_p = INST_TO_STATE(instance); 193 int ret = DDI_SUCCESS; 194 195 switch (infocmd) { 196 case DDI_INFO_DEVT2INSTANCE: 197 *result = (void *)(intptr_t)instance; 198 break; 199 case DDI_INFO_DEVT2DEVINFO: 200 if (px_p == NULL) { 201 ret = DDI_FAILURE; 202 break; 203 } 204 205 *result = (void *)px_p->px_dip; 206 break; 207 default: 208 ret = DDI_FAILURE; 209 break; 210 } 211 212 return (ret); 213 } 214 215 /* device driver entry points */ 216 /* 217 * attach entry point: 218 */ 219 /*ARGSUSED*/ 220 static int 221 px_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 222 { 223 px_t *px_p; /* per bus state pointer */ 224 int instance = DIP_TO_INST(dip); 225 int ret = DDI_SUCCESS; 226 devhandle_t dev_hdl = NULL; 227 pcie_hp_regops_t regops; 228 pcie_bus_t *bus_p; 229 230 switch (cmd) { 231 case DDI_ATTACH: 232 DBG(DBG_ATTACH, dip, "DDI_ATTACH\n"); 233 234 /* See pci_cfgacc.c */ 235 pci_cfgacc_acc_p = pci_cfgacc_acc; 236 237 /* 238 * Allocate and get the per-px soft state structure. 239 */ 240 if (ddi_soft_state_zalloc(px_state_p, instance) 241 != DDI_SUCCESS) { 242 cmn_err(CE_WARN, "%s%d: can't allocate px state", 243 ddi_driver_name(dip), instance); 244 goto err_bad_px_softstate; 245 } 246 px_p = INST_TO_STATE(instance); 247 px_p->px_dip = dip; 248 mutex_init(&px_p->px_mutex, NULL, MUTEX_DRIVER, NULL); 249 px_p->px_soft_state = PCI_SOFT_STATE_CLOSED; 250 251 (void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, 252 "device_type", "pciex"); 253 254 /* Initialize px_dbg for high pil printing */ 255 px_dbg_attach(dip, &px_p->px_dbg_hdl); 256 pcie_rc_init_bus(dip); 257 258 /* 259 * Get key properties of the pci bridge node and 260 * determine it's type (psycho, schizo, etc ...). 261 */ 262 if (px_get_props(px_p, dip) == DDI_FAILURE) 263 goto err_bad_px_prop; 264 265 if (px_lib_dev_init(dip, &dev_hdl) != DDI_SUCCESS) 266 goto err_bad_dev_init; 267 268 /* Initialize device handle */ 269 px_p->px_dev_hdl = dev_hdl; 270 271 /* Cache the BDF of the root port nexus */ 272 px_p->px_bdf = px_lib_get_bdf(px_p); 273 274 /* 275 * Initialize interrupt block. Note that this 276 * initialize error handling for the PEC as well. 277 */ 278 if ((ret = px_ib_attach(px_p)) != DDI_SUCCESS) 279 goto err_bad_ib; 280 281 if (px_cb_attach(px_p) != DDI_SUCCESS) 282 goto err_bad_cb; 283 284 /* 285 * Start creating the modules. 286 * Note that attach() routines should 287 * register and enable their own interrupts. 288 */ 289 290 if ((px_mmu_attach(px_p)) != DDI_SUCCESS) 291 goto err_bad_mmu; 292 293 if ((px_msiq_attach(px_p)) != DDI_SUCCESS) 294 goto err_bad_msiq; 295 296 if ((px_msi_attach(px_p)) != DDI_SUCCESS) 297 goto err_bad_msi; 298 299 if ((px_pec_attach(px_p)) != DDI_SUCCESS) 300 goto err_bad_pec; 301 302 if ((px_dma_attach(px_p)) != DDI_SUCCESS) 303 goto err_bad_dma; /* nothing to uninitialize on DMA */ 304 305 if ((px_fm_attach(px_p)) != DDI_SUCCESS) 306 goto err_bad_dma; 307 308 /* 309 * All of the error handlers have been registered 310 * by now so it's time to activate the interrupt. 311 */ 312 if ((ret = px_err_add_intr(&px_p->px_fault)) != DDI_SUCCESS) 313 goto err_bad_intr; 314 315 if (px_lib_hotplug_init(dip, (void *)®ops) == DDI_SUCCESS) { 316 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 317 318 bus_p->bus_hp_sup_modes |= PCIE_NATIVE_HP_MODE; 319 } 320 321 (void) px_set_mps(px_p); 322 323 if (pcie_init(dip, (caddr_t)®ops) != DDI_SUCCESS) 324 goto err_bad_hotplug; 325 326 (void) pcie_hpintr_enable(dip); 327 328 if (pxtool_init(dip) != DDI_SUCCESS) 329 goto err_bad_pcitool_node; 330 331 /* 332 * power management setup. Even if it fails, attach will 333 * succeed as this is a optional feature. Since we are 334 * always at full power, this is not critical. 335 */ 336 if (pwr_common_setup(dip) != DDI_SUCCESS) { 337 DBG(DBG_PWR, dip, "pwr_common_setup failed\n"); 338 } else if (px_pwr_setup(dip) != DDI_SUCCESS) { 339 DBG(DBG_PWR, dip, "px_pwr_setup failed \n"); 340 pwr_common_teardown(dip); 341 } 342 343 /* 344 * add cpr callback 345 */ 346 px_cpr_add_callb(px_p); 347 348 /* 349 * do fabric sync in case we don't need to wait for 350 * any bridge driver to be ready 351 */ 352 (void) px_lib_fabric_sync(dip); 353 354 ddi_report_dev(dip); 355 356 px_p->px_state = PX_ATTACHED; 357 358 /* 359 * save base addr in bus_t for pci_cfgacc_xxx(), this 360 * depends of px structure being properly initialized. 361 */ 362 bus_p = PCIE_DIP2BUS(dip); 363 bus_p->bus_cfgacc_base = px_lib_get_cfgacc_base(dip); 364 365 /* 366 * Partially populate bus_t for all devices in this fabric 367 * for device type macros to work. 368 */ 369 /* 370 * Populate bus_t for all devices in this fabric, after FMA 371 * is initializated, so that config access errors could 372 * trigger panic. 373 */ 374 pcie_fab_init_bus(dip, PCIE_BUS_ALL); 375 376 DBG(DBG_ATTACH, dip, "attach success\n"); 377 break; 378 379 err_bad_pcitool_node: 380 (void) pcie_hpintr_disable(dip); 381 (void) pcie_uninit(dip); 382 err_bad_hotplug: 383 (void) px_lib_hotplug_uninit(dip); 384 px_err_rem_intr(&px_p->px_fault); 385 err_bad_intr: 386 px_fm_detach(px_p); 387 err_bad_dma: 388 px_pec_detach(px_p); 389 err_bad_pec: 390 px_msi_detach(px_p); 391 err_bad_msi: 392 px_msiq_detach(px_p); 393 err_bad_msiq: 394 px_mmu_detach(px_p); 395 err_bad_mmu: 396 px_cb_detach(px_p); 397 err_bad_cb: 398 px_ib_detach(px_p); 399 err_bad_ib: 400 if (px_lib_dev_fini(dip) != DDI_SUCCESS) { 401 DBG(DBG_ATTACH, dip, "px_lib_dev_fini failed\n"); 402 } 403 err_bad_dev_init: 404 px_free_props(px_p); 405 err_bad_px_prop: 406 pcie_rc_fini_bus(dip); 407 px_dbg_detach(dip, &px_p->px_dbg_hdl); 408 mutex_destroy(&px_p->px_mutex); 409 ddi_soft_state_free(px_state_p, instance); 410 err_bad_px_softstate: 411 ret = DDI_FAILURE; 412 break; 413 414 case DDI_RESUME: 415 DBG(DBG_ATTACH, dip, "DDI_RESUME\n"); 416 417 px_p = INST_TO_STATE(instance); 418 419 mutex_enter(&px_p->px_mutex); 420 421 /* suspend might have not succeeded */ 422 if (px_p->px_state != PX_SUSPENDED) { 423 DBG(DBG_ATTACH, px_p->px_dip, 424 "instance NOT suspended\n"); 425 ret = DDI_FAILURE; 426 break; 427 } 428 429 px_msiq_resume(px_p); 430 px_lib_resume(dip); 431 (void) pcie_pwr_resume(dip); 432 px_p->px_state = PX_ATTACHED; 433 434 mutex_exit(&px_p->px_mutex); 435 436 break; 437 default: 438 DBG(DBG_ATTACH, dip, "unsupported attach op\n"); 439 ret = DDI_FAILURE; 440 break; 441 } 442 443 return (ret); 444 } 445 446 /* 447 * detach entry point: 448 */ 449 /*ARGSUSED*/ 450 static int 451 px_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 452 { 453 int instance = ddi_get_instance(dip); 454 px_t *px_p = INST_TO_STATE(instance); 455 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 456 int ret; 457 458 /* 459 * Make sure we are currently attached 460 */ 461 if (px_p->px_state != PX_ATTACHED) { 462 DBG(DBG_DETACH, dip, "Instance not attached\n"); 463 return (DDI_FAILURE); 464 } 465 466 mutex_enter(&px_p->px_mutex); 467 468 switch (cmd) { 469 case DDI_DETACH: 470 DBG(DBG_DETACH, dip, "DDI_DETACH\n"); 471 472 /* 473 * remove cpr callback 474 */ 475 px_cpr_rem_callb(px_p); 476 477 (void) pcie_hpintr_disable(dip); 478 479 if (PCIE_IS_PCIE_HOTPLUG_ENABLED(bus_p)) 480 (void) px_lib_hotplug_uninit(dip); 481 482 if (pcie_uninit(dip) != DDI_SUCCESS) { 483 mutex_exit(&px_p->px_mutex); 484 return (DDI_FAILURE); 485 } 486 487 /* Destroy bus_t for the whole fabric */ 488 pcie_fab_fini_bus(dip, PCIE_BUS_ALL); 489 490 /* 491 * things which used to be done in obj_destroy 492 * are now in-lined here. 493 */ 494 495 px_p->px_state = PX_DETACHED; 496 497 pxtool_uninit(dip); 498 499 px_err_rem_intr(&px_p->px_fault); 500 px_fm_detach(px_p); 501 px_pec_detach(px_p); 502 px_pwr_teardown(dip); 503 pwr_common_teardown(dip); 504 px_msi_detach(px_p); 505 px_msiq_detach(px_p); 506 px_mmu_detach(px_p); 507 px_cb_detach(px_p); 508 px_ib_detach(px_p); 509 if (px_lib_dev_fini(dip) != DDI_SUCCESS) { 510 DBG(DBG_DETACH, dip, "px_lib_dev_fini failed\n"); 511 } 512 513 /* 514 * Free the px soft state structure and the rest of the 515 * resources it's using. 516 */ 517 px_free_props(px_p); 518 pcie_rc_fini_bus(dip); 519 px_dbg_detach(dip, &px_p->px_dbg_hdl); 520 mutex_exit(&px_p->px_mutex); 521 mutex_destroy(&px_p->px_mutex); 522 523 px_p->px_dev_hdl = NULL; 524 ddi_soft_state_free(px_state_p, instance); 525 526 return (DDI_SUCCESS); 527 528 case DDI_SUSPEND: 529 if (pcie_pwr_suspend(dip) != DDI_SUCCESS) { 530 mutex_exit(&px_p->px_mutex); 531 return (DDI_FAILURE); 532 } 533 if ((ret = px_lib_suspend(dip)) == DDI_SUCCESS) 534 px_p->px_state = PX_SUSPENDED; 535 mutex_exit(&px_p->px_mutex); 536 537 return (ret); 538 539 default: 540 DBG(DBG_DETACH, dip, "unsupported detach op\n"); 541 mutex_exit(&px_p->px_mutex); 542 return (DDI_FAILURE); 543 } 544 } 545 546 int 547 px_cb_attach(px_t *px_p) 548 { 549 px_fault_t *fault_p = &px_p->px_cb_fault; 550 dev_info_t *dip = px_p->px_dip; 551 sysino_t sysino; 552 553 if (px_lib_intr_devino_to_sysino(dip, 554 px_p->px_inos[PX_INTR_XBC], &sysino) != DDI_SUCCESS) 555 return (DDI_FAILURE); 556 557 fault_p->px_fh_dip = dip; 558 fault_p->px_fh_sysino = sysino; 559 fault_p->px_err_func = px_err_cb_intr; 560 fault_p->px_intr_ino = px_p->px_inos[PX_INTR_XBC]; 561 562 return (px_cb_add_intr(fault_p)); 563 } 564 565 void 566 px_cb_detach(px_t *px_p) 567 { 568 px_cb_rem_intr(&px_p->px_cb_fault); 569 } 570 571 /* 572 * power management related initialization specific to px 573 * called by px_attach() 574 */ 575 static int 576 px_pwr_setup(dev_info_t *dip) 577 { 578 pcie_pwr_t *pwr_p; 579 int instance = ddi_get_instance(dip); 580 px_t *px_p = INST_TO_STATE(instance); 581 ddi_intr_handle_impl_t hdl; 582 583 ASSERT(PCIE_PMINFO(dip)); 584 pwr_p = PCIE_NEXUS_PMINFO(dip); 585 ASSERT(pwr_p); 586 587 /* 588 * indicate support LDI (Layered Driver Interface) 589 * Create the property, if it is not already there 590 */ 591 if (!ddi_prop_exists(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS, 592 DDI_KERNEL_IOCTL)) { 593 if (ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, 594 DDI_KERNEL_IOCTL, NULL, 0) != DDI_PROP_SUCCESS) { 595 DBG(DBG_PWR, dip, "can't create kernel ioctl prop\n"); 596 return (DDI_FAILURE); 597 } 598 } 599 /* No support for device PM. We are always at full power */ 600 pwr_p->pwr_func_lvl = PM_LEVEL_D0; 601 602 mutex_init(&px_p->px_l23ready_lock, NULL, MUTEX_DRIVER, 603 DDI_INTR_PRI(px_pwr_pil)); 604 cv_init(&px_p->px_l23ready_cv, NULL, CV_DRIVER, NULL); 605 606 /* Initialize handle */ 607 bzero(&hdl, sizeof (ddi_intr_handle_impl_t)); 608 hdl.ih_cb_arg1 = px_p; 609 hdl.ih_ver = DDI_INTR_VERSION; 610 hdl.ih_state = DDI_IHDL_STATE_ALLOC; 611 hdl.ih_dip = dip; 612 hdl.ih_pri = px_pwr_pil; 613 614 /* Add PME_TO_ACK message handler */ 615 hdl.ih_cb_func = (ddi_intr_handler_t *)px_pmeq_intr; 616 if (px_add_msiq_intr(dip, dip, &hdl, MSG_REC, 617 (msgcode_t)PCIE_PME_ACK_MSG, -1, 618 &px_p->px_pm_msiq_id) != DDI_SUCCESS) { 619 DBG(DBG_PWR, dip, "px_pwr_setup: couldn't add " 620 " PME_TO_ACK intr\n"); 621 goto pwr_setup_err1; 622 } 623 px_lib_msg_setmsiq(dip, PCIE_PME_ACK_MSG, px_p->px_pm_msiq_id); 624 px_lib_msg_setvalid(dip, PCIE_PME_ACK_MSG, PCIE_MSG_VALID); 625 626 if (px_ib_update_intr_state(px_p, px_p->px_dip, hdl.ih_inum, 627 px_msiqid_to_devino(px_p, px_p->px_pm_msiq_id), px_pwr_pil, 628 PX_INTR_STATE_ENABLE, MSG_REC, PCIE_PME_ACK_MSG) != DDI_SUCCESS) { 629 DBG(DBG_PWR, dip, "px_pwr_setup: PME_TO_ACK update interrupt" 630 " state failed\n"); 631 goto px_pwrsetup_err_state; 632 } 633 634 return (DDI_SUCCESS); 635 636 px_pwrsetup_err_state: 637 px_lib_msg_setvalid(dip, PCIE_PME_ACK_MSG, PCIE_MSG_INVALID); 638 (void) px_rem_msiq_intr(dip, dip, &hdl, MSG_REC, PCIE_PME_ACK_MSG, 639 px_p->px_pm_msiq_id); 640 pwr_setup_err1: 641 mutex_destroy(&px_p->px_l23ready_lock); 642 cv_destroy(&px_p->px_l23ready_cv); 643 644 return (DDI_FAILURE); 645 } 646 647 /* 648 * undo whatever is done in px_pwr_setup. called by px_detach() 649 */ 650 static void 651 px_pwr_teardown(dev_info_t *dip) 652 { 653 int instance = ddi_get_instance(dip); 654 px_t *px_p = INST_TO_STATE(instance); 655 ddi_intr_handle_impl_t hdl; 656 657 if (!PCIE_PMINFO(dip) || !PCIE_NEXUS_PMINFO(dip)) 658 return; 659 660 /* Initialize handle */ 661 bzero(&hdl, sizeof (ddi_intr_handle_impl_t)); 662 hdl.ih_ver = DDI_INTR_VERSION; 663 hdl.ih_state = DDI_IHDL_STATE_ALLOC; 664 hdl.ih_dip = dip; 665 hdl.ih_pri = px_pwr_pil; 666 667 px_lib_msg_setvalid(dip, PCIE_PME_ACK_MSG, PCIE_MSG_INVALID); 668 (void) px_rem_msiq_intr(dip, dip, &hdl, MSG_REC, PCIE_PME_ACK_MSG, 669 px_p->px_pm_msiq_id); 670 671 (void) px_ib_update_intr_state(px_p, px_p->px_dip, hdl.ih_inum, 672 px_msiqid_to_devino(px_p, px_p->px_pm_msiq_id), px_pwr_pil, 673 PX_INTR_STATE_DISABLE, MSG_REC, PCIE_PME_ACK_MSG); 674 675 px_p->px_pm_msiq_id = (msiqid_t)-1; 676 677 cv_destroy(&px_p->px_l23ready_cv); 678 mutex_destroy(&px_p->px_l23ready_lock); 679 } 680 681 /* bus driver entry points */ 682 683 /* 684 * bus map entry point: 685 * 686 * if map request is for an rnumber 687 * get the corresponding regspec from device node 688 * build a new regspec in our parent's format 689 * build a new map_req with the new regspec 690 * call up the tree to complete the mapping 691 */ 692 int 693 px_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp, 694 off_t off, off_t len, caddr_t *addrp) 695 { 696 px_t *px_p = DIP_TO_STATE(dip); 697 struct regspec p_regspec; 698 ddi_map_req_t p_mapreq; 699 int reglen, rval, r_no; 700 pci_regspec_t reloc_reg, *rp = &reloc_reg; 701 702 DBG(DBG_MAP, dip, "rdip=%s%d:", 703 ddi_driver_name(rdip), ddi_get_instance(rdip)); 704 705 if (mp->map_flags & DDI_MF_USER_MAPPING) 706 return (DDI_ME_UNIMPLEMENTED); 707 708 switch (mp->map_type) { 709 case DDI_MT_REGSPEC: 710 reloc_reg = *(pci_regspec_t *)mp->map_obj.rp; /* dup whole */ 711 break; 712 713 case DDI_MT_RNUMBER: 714 r_no = mp->map_obj.rnumber; 715 DBG(DBG_MAP | DBG_CONT, dip, " r#=%x", r_no); 716 717 if (ddi_getlongprop(DDI_DEV_T_ANY, rdip, DDI_PROP_DONTPASS, 718 "reg", (caddr_t)&rp, ®len) != DDI_SUCCESS) 719 return (DDI_ME_RNUMBER_RANGE); 720 721 if (r_no < 0 || r_no >= reglen / sizeof (pci_regspec_t)) { 722 kmem_free(rp, reglen); 723 return (DDI_ME_RNUMBER_RANGE); 724 } 725 rp += r_no; 726 break; 727 728 default: 729 return (DDI_ME_INVAL); 730 } 731 DBG(DBG_MAP | DBG_CONT, dip, "\n"); 732 733 if ((rp->pci_phys_hi & PCI_REG_ADDR_M) == PCI_ADDR_CONFIG) { 734 /* 735 * There may be a need to differentiate between PCI 736 * and PCI-Ex devices so the following range check is 737 * done correctly, depending on the implementation of 738 * pcieb bridge nexus driver. 739 */ 740 if ((off >= PCIE_CONF_HDR_SIZE) || 741 (len > PCIE_CONF_HDR_SIZE) || 742 (off + len > PCIE_CONF_HDR_SIZE)) 743 return (DDI_ME_INVAL); 744 /* 745 * the following function returning a DDI_FAILURE assumes 746 * that there are no virtual config space access services 747 * defined in this layer. Otherwise it is availed right 748 * here and we return. 749 */ 750 rval = px_lib_map_vconfig(dip, mp, off, rp, addrp); 751 if (rval == DDI_SUCCESS) 752 goto done; 753 } 754 755 /* 756 * No virtual config space services or we are mapping 757 * a region of memory mapped config/IO/memory space, so proceed 758 * to the parent. 759 */ 760 761 /* relocate within 64-bit pci space through "assigned-addresses" */ 762 if (rval = px_reloc_reg(dip, rdip, px_p, rp)) 763 goto done; 764 765 if (len) /* adjust regspec according to mapping request */ 766 rp->pci_size_low = len; /* MIN ? */ 767 rp->pci_phys_low += off; 768 769 /* translate relocated pci regspec into parent space through "ranges" */ 770 if (rval = px_xlate_reg(px_p, rp, &p_regspec)) 771 goto done; 772 773 p_mapreq = *mp; /* dup the whole structure */ 774 p_mapreq.map_type = DDI_MT_REGSPEC; 775 p_mapreq.map_obj.rp = &p_regspec; 776 px_lib_map_attr_check(&p_mapreq); 777 rval = ddi_map(dip, &p_mapreq, 0, 0, addrp); 778 779 if (rval == DDI_SUCCESS) { 780 /* 781 * Set-up access functions for FM access error capable drivers. 782 */ 783 if (DDI_FM_ACC_ERR_CAP(ddi_fm_capable(rdip))) 784 px_fm_acc_setup(mp, rdip, rp); 785 } 786 787 done: 788 if (mp->map_type == DDI_MT_RNUMBER) 789 kmem_free(rp - r_no, reglen); 790 791 return (rval); 792 } 793 794 /* 795 * bus dma map entry point 796 * return value: 797 * DDI_DMA_PARTIAL_MAP 1 798 * DDI_DMA_MAPOK 0 799 * DDI_DMA_MAPPED 0 800 * DDI_DMA_NORESOURCES -1 801 * DDI_DMA_NOMAPPING -2 802 * DDI_DMA_TOOBIG -3 803 */ 804 int 805 px_dma_setup(dev_info_t *dip, dev_info_t *rdip, ddi_dma_req_t *dmareq, 806 ddi_dma_handle_t *handlep) 807 { 808 px_t *px_p = DIP_TO_STATE(dip); 809 px_mmu_t *mmu_p = px_p->px_mmu_p; 810 ddi_dma_impl_t *mp; 811 int ret; 812 813 DBG(DBG_DMA_MAP, dip, "mapping - rdip=%s%d type=%s\n", 814 ddi_driver_name(rdip), ddi_get_instance(rdip), 815 handlep ? "alloc" : "advisory"); 816 817 if (!(mp = px_dma_lmts2hdl(dip, rdip, mmu_p, dmareq))) 818 return (DDI_DMA_NORESOURCES); 819 if (mp == (ddi_dma_impl_t *)DDI_DMA_NOMAPPING) 820 return (DDI_DMA_NOMAPPING); 821 if (ret = px_dma_type(px_p, dmareq, mp)) 822 goto freehandle; 823 if (ret = px_dma_pfn(px_p, dmareq, mp)) 824 goto freehandle; 825 826 switch (PX_DMA_TYPE(mp)) { 827 case PX_DMAI_FLAGS_DVMA: /* LINTED E_EQUALITY_NOT_ASSIGNMENT */ 828 if ((ret = px_dvma_win(px_p, dmareq, mp)) || !handlep) 829 goto freehandle; 830 if (!PX_DMA_CANCACHE(mp)) { /* try fast track */ 831 if (PX_DMA_CANFAST(mp)) { 832 if (!px_dvma_map_fast(mmu_p, mp)) 833 break; 834 /* LINTED E_NOP_ELSE_STMT */ 835 } else { 836 PX_DVMA_FASTTRAK_PROF(mp); 837 } 838 } 839 if (ret = px_dvma_map(mp, dmareq, mmu_p)) 840 goto freehandle; 841 break; 842 case PX_DMAI_FLAGS_PTP: /* LINTED E_EQUALITY_NOT_ASSIGNMENT */ 843 if ((ret = px_dma_physwin(px_p, dmareq, mp)) || !handlep) 844 goto freehandle; 845 break; 846 case PX_DMAI_FLAGS_BYPASS: 847 default: 848 cmn_err(CE_PANIC, "%s%d: px_dma_setup: bad dma type 0x%x", 849 ddi_driver_name(rdip), ddi_get_instance(rdip), 850 PX_DMA_TYPE(mp)); 851 /*NOTREACHED*/ 852 } 853 *handlep = (ddi_dma_handle_t)mp; 854 mp->dmai_flags |= PX_DMAI_FLAGS_INUSE; 855 px_dump_dma_handle(DBG_DMA_MAP, dip, mp); 856 857 return ((mp->dmai_nwin == 1) ? DDI_DMA_MAPPED : DDI_DMA_PARTIAL_MAP); 858 freehandle: 859 if (ret == DDI_DMA_NORESOURCES) 860 px_dma_freemp(mp); /* don't run_callback() */ 861 else 862 (void) px_dma_freehdl(dip, rdip, (ddi_dma_handle_t)mp); 863 return (ret); 864 } 865 866 867 /* 868 * bus dma alloc handle entry point: 869 */ 870 int 871 px_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attrp, 872 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep) 873 { 874 px_t *px_p = DIP_TO_STATE(dip); 875 ddi_dma_impl_t *mp; 876 int rval; 877 878 DBG(DBG_DMA_ALLOCH, dip, "rdip=%s%d\n", 879 ddi_driver_name(rdip), ddi_get_instance(rdip)); 880 881 if (attrp->dma_attr_version != DMA_ATTR_V0) 882 return (DDI_DMA_BADATTR); 883 884 if (!(mp = px_dma_allocmp(dip, rdip, waitfp, arg))) 885 return (DDI_DMA_NORESOURCES); 886 887 /* 888 * Save requestor's information 889 */ 890 mp->dmai_attr = *attrp; /* whole object - augmented later */ 891 *PX_DEV_ATTR(mp) = *attrp; /* whole object - device orig attr */ 892 DBG(DBG_DMA_ALLOCH, dip, "mp=%p\n", mp); 893 894 /* check and convert dma attributes to handle parameters */ 895 if (rval = px_dma_attr2hdl(px_p, mp)) { 896 px_dma_freehdl(dip, rdip, (ddi_dma_handle_t)mp); 897 *handlep = NULL; 898 return (rval); 899 } 900 *handlep = (ddi_dma_handle_t)mp; 901 return (DDI_SUCCESS); 902 } 903 904 905 /* 906 * bus dma free handle entry point: 907 */ 908 /*ARGSUSED*/ 909 int 910 px_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle) 911 { 912 DBG(DBG_DMA_FREEH, dip, "rdip=%s%d mp=%p\n", 913 ddi_driver_name(rdip), ddi_get_instance(rdip), handle); 914 px_dma_freemp((ddi_dma_impl_t *)handle); 915 916 if (px_kmem_clid) { 917 DBG(DBG_DMA_FREEH, dip, "run handle callback\n"); 918 ddi_run_callback(&px_kmem_clid); 919 } 920 return (DDI_SUCCESS); 921 } 922 923 924 /* 925 * bus dma bind handle entry point: 926 */ 927 int 928 px_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip, 929 ddi_dma_handle_t handle, ddi_dma_req_t *dmareq, 930 ddi_dma_cookie_t *cookiep, uint_t *ccountp) 931 { 932 px_t *px_p = DIP_TO_STATE(dip); 933 px_mmu_t *mmu_p = px_p->px_mmu_p; 934 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 935 int ret; 936 937 DBG(DBG_DMA_BINDH, dip, "rdip=%s%d mp=%p dmareq=%p\n", 938 ddi_driver_name(rdip), ddi_get_instance(rdip), mp, dmareq); 939 940 if (mp->dmai_flags & PX_DMAI_FLAGS_INUSE) 941 return (DDI_DMA_INUSE); 942 943 ASSERT((mp->dmai_flags & ~PX_DMAI_FLAGS_PRESERVE) == 0); 944 mp->dmai_flags |= PX_DMAI_FLAGS_INUSE; 945 946 if (ret = px_dma_type(px_p, dmareq, mp)) 947 goto err; 948 if (ret = px_dma_pfn(px_p, dmareq, mp)) 949 goto err; 950 951 switch (PX_DMA_TYPE(mp)) { 952 case PX_DMAI_FLAGS_DVMA: 953 if (ret = px_dvma_win(px_p, dmareq, mp)) 954 goto map_err; 955 if (!PX_DMA_CANCACHE(mp)) { /* try fast track */ 956 if (PX_DMA_CANFAST(mp)) { 957 if (!px_dvma_map_fast(mmu_p, mp)) 958 goto mapped; /*LINTED E_NOP_ELSE_STMT*/ 959 } else { 960 PX_DVMA_FASTTRAK_PROF(mp); 961 } 962 } 963 if (ret = px_dvma_map(mp, dmareq, mmu_p)) 964 goto map_err; 965 mapped: 966 *ccountp = 1; 967 MAKE_DMA_COOKIE(cookiep, mp->dmai_mapping, mp->dmai_size); 968 break; 969 case PX_DMAI_FLAGS_BYPASS: 970 case PX_DMAI_FLAGS_PTP: 971 if (ret = px_dma_physwin(px_p, dmareq, mp)) 972 goto map_err; 973 *ccountp = PX_WINLST(mp)->win_ncookies; 974 *cookiep = 975 *(ddi_dma_cookie_t *)(PX_WINLST(mp) + 1); /* wholeobj */ 976 break; 977 default: 978 cmn_err(CE_PANIC, "%s%d: px_dma_bindhdl(%p): bad dma type", 979 ddi_driver_name(rdip), ddi_get_instance(rdip), mp); 980 /*NOTREACHED*/ 981 } 982 DBG(DBG_DMA_BINDH, dip, "cookie %" PRIx64 "+%x\n", 983 cookiep->dmac_address, cookiep->dmac_size); 984 px_dump_dma_handle(DBG_DMA_MAP, dip, mp); 985 986 /* insert dma handle into FMA cache */ 987 if (mp->dmai_attr.dma_attr_flags & DDI_DMA_FLAGERR) { 988 (void) ndi_fmc_insert(rdip, DMA_HANDLE, mp, NULL); 989 mp->dmai_error.err_cf = px_err_dma_hdl_check; 990 } 991 992 return (mp->dmai_nwin == 1 ? DDI_DMA_MAPPED : DDI_DMA_PARTIAL_MAP); 993 map_err: 994 px_dma_freepfn(mp); 995 err: 996 mp->dmai_flags &= PX_DMAI_FLAGS_PRESERVE; 997 return (ret); 998 } 999 1000 1001 /* 1002 * bus dma unbind handle entry point: 1003 */ 1004 /*ARGSUSED*/ 1005 int 1006 px_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle) 1007 { 1008 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 1009 px_t *px_p = DIP_TO_STATE(dip); 1010 px_mmu_t *mmu_p = px_p->px_mmu_p; 1011 1012 DBG(DBG_DMA_UNBINDH, dip, "rdip=%s%d, mp=%p\n", 1013 ddi_driver_name(rdip), ddi_get_instance(rdip), handle); 1014 if ((mp->dmai_flags & PX_DMAI_FLAGS_INUSE) == 0) { 1015 DBG(DBG_DMA_UNBINDH, dip, "handle not inuse\n"); 1016 return (DDI_FAILURE); 1017 } 1018 1019 /* remove dma handle from FMA cache */ 1020 if (mp->dmai_attr.dma_attr_flags & DDI_DMA_FLAGERR) { 1021 if (DEVI(rdip)->devi_fmhdl != NULL && 1022 DDI_FM_DMA_ERR_CAP(DEVI(rdip)->devi_fmhdl->fh_cap)) { 1023 (void) ndi_fmc_remove(rdip, DMA_HANDLE, mp); 1024 } 1025 } 1026 1027 /* 1028 * Here if the handle is using the iommu. Unload all the iommu 1029 * translations. 1030 */ 1031 switch (PX_DMA_TYPE(mp)) { 1032 case PX_DMAI_FLAGS_DVMA: 1033 px_mmu_unmap_window(mmu_p, mp); 1034 px_dvma_unmap(mmu_p, mp); 1035 px_dma_freepfn(mp); 1036 break; 1037 case PX_DMAI_FLAGS_BYPASS: 1038 case PX_DMAI_FLAGS_PTP: 1039 px_dma_freewin(mp); 1040 break; 1041 default: 1042 cmn_err(CE_PANIC, "%s%d: px_dma_unbindhdl:bad dma type %p", 1043 ddi_driver_name(rdip), ddi_get_instance(rdip), mp); 1044 /*NOTREACHED*/ 1045 } 1046 if (mmu_p->mmu_dvma_clid != 0) { 1047 DBG(DBG_DMA_UNBINDH, dip, "run dvma callback\n"); 1048 ddi_run_callback(&mmu_p->mmu_dvma_clid); 1049 } 1050 if (px_kmem_clid) { 1051 DBG(DBG_DMA_UNBINDH, dip, "run handle callback\n"); 1052 ddi_run_callback(&px_kmem_clid); 1053 } 1054 mp->dmai_flags &= PX_DMAI_FLAGS_PRESERVE; 1055 1056 return (DDI_SUCCESS); 1057 } 1058 1059 /* 1060 * bus dma win entry point: 1061 */ 1062 int 1063 px_dma_win(dev_info_t *dip, dev_info_t *rdip, 1064 ddi_dma_handle_t handle, uint_t win, off_t *offp, 1065 size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp) 1066 { 1067 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 1068 int ret; 1069 1070 DBG(DBG_DMA_WIN, dip, "rdip=%s%d\n", 1071 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1072 1073 px_dump_dma_handle(DBG_DMA_WIN, dip, mp); 1074 if (win >= mp->dmai_nwin) { 1075 DBG(DBG_DMA_WIN, dip, "%x out of range\n", win); 1076 return (DDI_FAILURE); 1077 } 1078 1079 switch (PX_DMA_TYPE(mp)) { 1080 case PX_DMAI_FLAGS_DVMA: 1081 if (win != PX_DMA_CURWIN(mp)) { 1082 px_t *px_p = DIP_TO_STATE(dip); 1083 px_mmu_t *mmu_p = px_p->px_mmu_p; 1084 px_mmu_unmap_window(mmu_p, mp); 1085 1086 /* map_window sets dmai_mapping/size/offset */ 1087 px_mmu_map_window(mmu_p, mp, win); 1088 if ((ret = px_mmu_map_window(mmu_p, 1089 mp, win)) != DDI_SUCCESS) 1090 return (ret); 1091 } 1092 if (cookiep) 1093 MAKE_DMA_COOKIE(cookiep, mp->dmai_mapping, 1094 mp->dmai_size); 1095 if (ccountp) 1096 *ccountp = 1; 1097 break; 1098 case PX_DMAI_FLAGS_PTP: 1099 case PX_DMAI_FLAGS_BYPASS: { 1100 int i; 1101 ddi_dma_cookie_t *ck_p; 1102 px_dma_win_t *win_p = mp->dmai_winlst; 1103 1104 for (i = 0; i < win; win_p = win_p->win_next, i++) {}; 1105 ck_p = (ddi_dma_cookie_t *)(win_p + 1); 1106 *cookiep = *ck_p; 1107 mp->dmai_offset = win_p->win_offset; 1108 mp->dmai_size = win_p->win_size; 1109 mp->dmai_mapping = ck_p->dmac_laddress; 1110 mp->dmai_cookie = ck_p + 1; 1111 win_p->win_curseg = 0; 1112 if (ccountp) 1113 *ccountp = win_p->win_ncookies; 1114 } 1115 break; 1116 default: 1117 cmn_err(CE_WARN, "%s%d: px_dma_win:bad dma type 0x%x", 1118 ddi_driver_name(rdip), ddi_get_instance(rdip), 1119 PX_DMA_TYPE(mp)); 1120 return (DDI_FAILURE); 1121 } 1122 if (cookiep) 1123 DBG(DBG_DMA_WIN, dip, 1124 "cookie - dmac_address=%x dmac_size=%x\n", 1125 cookiep->dmac_address, cookiep->dmac_size); 1126 if (offp) 1127 *offp = (off_t)mp->dmai_offset; 1128 if (lenp) 1129 *lenp = mp->dmai_size; 1130 return (DDI_SUCCESS); 1131 } 1132 1133 #ifdef DEBUG 1134 static char *px_dmactl_str[] = { 1135 "DDI_DMA_FREE", 1136 "DDI_DMA_SYNC", 1137 "DDI_DMA_HTOC", 1138 "DDI_DMA_KVADDR", 1139 "DDI_DMA_MOVWIN", 1140 "DDI_DMA_REPWIN", 1141 "DDI_DMA_GETERR", 1142 "DDI_DMA_COFF", 1143 "DDI_DMA_NEXTWIN", 1144 "DDI_DMA_NEXTSEG", 1145 "DDI_DMA_SEGTOC", 1146 "DDI_DMA_RESERVE", 1147 "DDI_DMA_RELEASE", 1148 "DDI_DMA_RESETH", 1149 "DDI_DMA_CKSYNC", 1150 "DDI_DMA_IOPB_ALLOC", 1151 "DDI_DMA_IOPB_FREE", 1152 "DDI_DMA_SMEM_ALLOC", 1153 "DDI_DMA_SMEM_FREE", 1154 "DDI_DMA_SET_SBUS64" 1155 }; 1156 #endif /* DEBUG */ 1157 1158 /* 1159 * bus dma control entry point: 1160 */ 1161 /*ARGSUSED*/ 1162 int 1163 px_dma_ctlops(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle, 1164 enum ddi_dma_ctlops cmd, off_t *offp, size_t *lenp, caddr_t *objp, 1165 uint_t cache_flags) 1166 { 1167 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 1168 1169 #ifdef DEBUG 1170 DBG(DBG_DMA_CTL, dip, "%s: rdip=%s%d\n", px_dmactl_str[cmd], 1171 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1172 #endif /* DEBUG */ 1173 1174 switch (cmd) { 1175 case DDI_DMA_FREE: 1176 (void) px_dma_unbindhdl(dip, rdip, handle); 1177 (void) px_dma_freehdl(dip, rdip, handle); 1178 return (DDI_SUCCESS); 1179 case DDI_DMA_RESERVE: { 1180 px_t *px_p = DIP_TO_STATE(dip); 1181 return (px_fdvma_reserve(dip, rdip, px_p, 1182 (ddi_dma_req_t *)offp, (ddi_dma_handle_t *)objp)); 1183 } 1184 case DDI_DMA_RELEASE: { 1185 px_t *px_p = DIP_TO_STATE(dip); 1186 return (px_fdvma_release(dip, px_p, mp)); 1187 } 1188 default: 1189 break; 1190 } 1191 1192 switch (PX_DMA_TYPE(mp)) { 1193 case PX_DMAI_FLAGS_DVMA: 1194 return (px_dvma_ctl(dip, rdip, mp, cmd, offp, lenp, objp, 1195 cache_flags)); 1196 case PX_DMAI_FLAGS_PTP: 1197 case PX_DMAI_FLAGS_BYPASS: 1198 return (px_dma_ctl(dip, rdip, mp, cmd, offp, lenp, objp, 1199 cache_flags)); 1200 default: 1201 cmn_err(CE_PANIC, "%s%d: px_dma_ctlops(%x):bad dma type %x", 1202 ddi_driver_name(rdip), ddi_get_instance(rdip), cmd, 1203 mp->dmai_flags); 1204 /*NOTREACHED*/ 1205 } 1206 return (0); 1207 } 1208 1209 /* 1210 * control ops entry point: 1211 * 1212 * Requests handled completely: 1213 * DDI_CTLOPS_INITCHILD see init_child() for details 1214 * DDI_CTLOPS_UNINITCHILD 1215 * DDI_CTLOPS_REPORTDEV see report_dev() for details 1216 * DDI_CTLOPS_IOMIN cache line size if streaming otherwise 1 1217 * DDI_CTLOPS_REGSIZE 1218 * DDI_CTLOPS_NREGS 1219 * DDI_CTLOPS_DVMAPAGESIZE 1220 * DDI_CTLOPS_POKE 1221 * DDI_CTLOPS_PEEK 1222 * 1223 * All others passed to parent. 1224 */ 1225 int 1226 px_ctlops(dev_info_t *dip, dev_info_t *rdip, 1227 ddi_ctl_enum_t op, void *arg, void *result) 1228 { 1229 px_t *px_p = DIP_TO_STATE(dip); 1230 struct detachspec *ds; 1231 struct attachspec *as; 1232 1233 switch (op) { 1234 case DDI_CTLOPS_INITCHILD: 1235 return (px_init_child(px_p, (dev_info_t *)arg)); 1236 1237 case DDI_CTLOPS_UNINITCHILD: 1238 return (px_uninit_child(px_p, (dev_info_t *)arg)); 1239 1240 case DDI_CTLOPS_ATTACH: 1241 if (!pcie_is_child(dip, rdip)) 1242 return (DDI_SUCCESS); 1243 1244 as = (struct attachspec *)arg; 1245 switch (as->when) { 1246 case DDI_PRE: 1247 if (as->cmd == DDI_ATTACH) { 1248 DBG(DBG_PWR, dip, "PRE_ATTACH for %s@%d\n", 1249 ddi_driver_name(rdip), 1250 ddi_get_instance(rdip)); 1251 return (pcie_pm_hold(dip)); 1252 } 1253 if (as->cmd == DDI_RESUME) { 1254 DBG(DBG_PWR, dip, "PRE_RESUME for %s@%d\n", 1255 ddi_driver_name(rdip), 1256 ddi_get_instance(rdip)); 1257 1258 pcie_clear_errors(rdip); 1259 } 1260 return (DDI_SUCCESS); 1261 1262 case DDI_POST: 1263 DBG(DBG_PWR, dip, "POST_ATTACH for %s@%d\n", 1264 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1265 if (as->cmd == DDI_ATTACH && 1266 as->result != DDI_SUCCESS) { 1267 /* 1268 * Attach failed for the child device. The child 1269 * driver may have made PM calls before the 1270 * attach failed. pcie_pm_remove_child() should 1271 * cleanup PM state and holds (if any) 1272 * associated with the child device. 1273 */ 1274 return (pcie_pm_remove_child(dip, rdip)); 1275 } 1276 1277 if (as->result == DDI_SUCCESS) 1278 pf_init(rdip, (void *)px_p->px_fm_ibc, as->cmd); 1279 1280 (void) pcie_postattach_child(rdip); 1281 1282 return (DDI_SUCCESS); 1283 default: 1284 break; 1285 } 1286 break; 1287 1288 case DDI_CTLOPS_DETACH: 1289 if (!pcie_is_child(dip, rdip)) 1290 return (DDI_SUCCESS); 1291 1292 ds = (struct detachspec *)arg; 1293 switch (ds->when) { 1294 case DDI_POST: 1295 if (ds->cmd == DDI_DETACH && 1296 ds->result == DDI_SUCCESS) { 1297 DBG(DBG_PWR, dip, "POST_DETACH for %s@%d\n", 1298 ddi_driver_name(rdip), 1299 ddi_get_instance(rdip)); 1300 return (pcie_pm_remove_child(dip, rdip)); 1301 } 1302 return (DDI_SUCCESS); 1303 case DDI_PRE: 1304 pf_fini(rdip, ds->cmd); 1305 return (DDI_SUCCESS); 1306 default: 1307 break; 1308 } 1309 break; 1310 1311 case DDI_CTLOPS_REPORTDEV: 1312 if (ddi_get_parent(rdip) == dip) 1313 return (px_report_dev(rdip)); 1314 1315 (void) px_lib_fabric_sync(rdip); 1316 return (DDI_SUCCESS); 1317 1318 case DDI_CTLOPS_IOMIN: 1319 return (DDI_SUCCESS); 1320 1321 case DDI_CTLOPS_REGSIZE: 1322 *((off_t *)result) = px_get_reg_set_size(rdip, *((int *)arg)); 1323 return (*((off_t *)result) == 0 ? DDI_FAILURE : DDI_SUCCESS); 1324 1325 case DDI_CTLOPS_NREGS: 1326 *((uint_t *)result) = px_get_nreg_set(rdip); 1327 return (DDI_SUCCESS); 1328 1329 case DDI_CTLOPS_DVMAPAGESIZE: 1330 *((ulong_t *)result) = MMU_PAGE_SIZE; 1331 return (DDI_SUCCESS); 1332 1333 case DDI_CTLOPS_POKE: /* platform dependent implementation. */ 1334 return (px_lib_ctlops_poke(dip, rdip, 1335 (peekpoke_ctlops_t *)arg)); 1336 1337 case DDI_CTLOPS_PEEK: /* platform dependent implementation. */ 1338 return (px_lib_ctlops_peek(dip, rdip, 1339 (peekpoke_ctlops_t *)arg, result)); 1340 1341 case DDI_CTLOPS_POWER: 1342 default: 1343 break; 1344 } 1345 1346 /* 1347 * Now pass the request up to our parent. 1348 */ 1349 DBG(DBG_CTLOPS, dip, "passing request to parent: rdip=%s%d\n", 1350 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1351 return (ddi_ctlops(dip, rdip, op, arg, result)); 1352 } 1353 1354 /* ARGSUSED */ 1355 int 1356 px_intr_ops(dev_info_t *dip, dev_info_t *rdip, ddi_intr_op_t intr_op, 1357 ddi_intr_handle_impl_t *hdlp, void *result) 1358 { 1359 int intr_types, ret = DDI_SUCCESS; 1360 px_t *px_p = DIP_TO_STATE(dip); 1361 1362 DBG(DBG_INTROPS, dip, "px_intr_ops: rdip=%s%d\n", 1363 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1364 1365 /* Process DDI_INTROP_SUPPORTED_TYPES request here */ 1366 if (intr_op == DDI_INTROP_SUPPORTED_TYPES) { 1367 *(int *)result = i_ddi_get_intx_nintrs(rdip) ? 1368 DDI_INTR_TYPE_FIXED : 0; 1369 1370 if ((pci_msi_get_supported_type(rdip, 1371 &intr_types)) == DDI_SUCCESS) { 1372 /* 1373 * Double check supported interrupt types vs. 1374 * what the host bridge supports. 1375 */ 1376 *(int *)result |= intr_types; 1377 } 1378 1379 *(int *)result &= px_p->px_supp_intr_types; 1380 return (*(int *)result ? DDI_SUCCESS : DDI_FAILURE); 1381 } 1382 1383 /* 1384 * PCI-E nexus driver supports fixed, MSI and MSI-X interrupts. 1385 * Return failure if interrupt type is not supported. 1386 */ 1387 switch (hdlp->ih_type) { 1388 case DDI_INTR_TYPE_FIXED: 1389 ret = px_intx_ops(dip, rdip, intr_op, hdlp, result); 1390 break; 1391 case DDI_INTR_TYPE_MSI: 1392 case DDI_INTR_TYPE_MSIX: 1393 ret = px_msix_ops(dip, rdip, intr_op, hdlp, result); 1394 break; 1395 default: 1396 ret = DDI_ENOTSUP; 1397 break; 1398 } 1399 1400 return (ret); 1401 } 1402 1403 static void 1404 px_set_mps(px_t *px_p) 1405 { 1406 dev_info_t *dip; 1407 pcie_bus_t *bus_p; 1408 int max_supported; 1409 1410 dip = px_p->px_dip; 1411 bus_p = PCIE_DIP2BUS(dip); 1412 1413 bus_p->bus_mps = -1; 1414 1415 if (pcie_root_port(dip) == DDI_FAILURE) { 1416 if (px_lib_get_root_complex_mps(px_p, dip, 1417 &max_supported) < 0) { 1418 1419 DBG(DBG_MPS, dip, "MPS: Can not get RC MPS\n"); 1420 return; 1421 } 1422 1423 DBG(DBG_MPS, dip, "MPS: Root Complex MPS Cap of = %x\n", 1424 max_supported); 1425 1426 if (pcie_max_mps < max_supported) 1427 max_supported = pcie_max_mps; 1428 1429 (void) pcie_get_fabric_mps(dip, ddi_get_child(dip), 1430 &max_supported); 1431 1432 bus_p->bus_mps = max_supported; 1433 1434 (void) px_lib_set_root_complex_mps(px_p, dip, bus_p->bus_mps); 1435 1436 DBG(DBG_MPS, dip, "MPS: Root Complex MPS Set to = %x\n", 1437 bus_p->bus_mps); 1438 } 1439 } 1440