1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 */ 25 26 /* 27 * SPARC Host to PCI Express nexus driver 28 */ 29 30 #include <sys/types.h> 31 #include <sys/conf.h> /* nulldev */ 32 #include <sys/stat.h> /* devctl */ 33 #include <sys/kmem.h> 34 #include <sys/sunddi.h> 35 #include <sys/sunndi.h> 36 #include <sys/ddi_subrdefs.h> 37 #include <sys/spl.h> 38 #include <sys/epm.h> 39 #include <sys/iommutsb.h> 40 #include "px_obj.h" 41 #include <sys/hotplug/pci/pcie_hp.h> 42 #include <sys/pci_tools.h> 43 #include "px_tools_ext.h" 44 #include <sys/pcie_pwr.h> 45 #include <sys/pci_cfgacc.h> 46 47 /*LINTLIBRARY*/ 48 49 /* 50 * function prototypes for dev ops routines: 51 */ 52 static int px_attach(dev_info_t *dip, ddi_attach_cmd_t cmd); 53 static int px_detach(dev_info_t *dip, ddi_detach_cmd_t cmd); 54 static int px_enable_err_intr(px_t *px_p); 55 static void px_disable_err_intr(px_t *px_p); 56 static int px_info(dev_info_t *dip, ddi_info_cmd_t infocmd, 57 void *arg, void **result); 58 static int px_cb_attach(px_t *); 59 static int px_pwr_setup(dev_info_t *dip); 60 static void px_pwr_teardown(dev_info_t *dip); 61 static void px_set_mps(px_t *px_p); 62 63 extern void pci_cfgacc_acc(pci_cfgacc_req_t *); 64 extern int pcie_max_mps; 65 extern void (*pci_cfgacc_acc_p)(pci_cfgacc_req_t *); 66 /* 67 * bus ops and dev ops structures: 68 */ 69 static struct bus_ops px_bus_ops = { 70 BUSO_REV, 71 px_map, 72 0, 73 0, 74 0, 75 i_ddi_map_fault, 76 px_dma_setup, 77 px_dma_allochdl, 78 px_dma_freehdl, 79 px_dma_bindhdl, 80 px_dma_unbindhdl, 81 px_lib_dma_sync, 82 px_dma_win, 83 px_dma_ctlops, 84 px_ctlops, 85 ddi_bus_prop_op, 86 ndi_busop_get_eventcookie, 87 ndi_busop_add_eventcall, 88 ndi_busop_remove_eventcall, 89 ndi_post_event, 90 NULL, 91 NULL, /* (*bus_config)(); */ 92 NULL, /* (*bus_unconfig)(); */ 93 px_fm_init_child, /* (*bus_fm_init)(); */ 94 NULL, /* (*bus_fm_fini)(); */ 95 px_bus_enter, /* (*bus_fm_access_enter)(); */ 96 px_bus_exit, /* (*bus_fm_access_fini)(); */ 97 pcie_bus_power, /* (*bus_power)(); */ 98 px_intr_ops, /* (*bus_intr_op)(); */ 99 pcie_hp_common_ops /* (*bus_hp_op)(); */ 100 }; 101 102 extern struct cb_ops px_cb_ops; 103 104 static struct dev_ops px_ops = { 105 DEVO_REV, 106 0, 107 px_info, 108 nulldev, 109 0, 110 px_attach, 111 px_detach, 112 nodev, 113 &px_cb_ops, 114 &px_bus_ops, 115 nulldev, 116 ddi_quiesce_not_needed, /* quiesce */ 117 }; 118 119 /* 120 * module definitions: 121 */ 122 #include <sys/modctl.h> 123 extern struct mod_ops mod_driverops; 124 125 static struct modldrv modldrv = { 126 &mod_driverops, /* Type of module - driver */ 127 #if defined(sun4u) 128 "Sun4u Host to PCIe nexus driver", /* Name of module. */ 129 #elif defined(sun4v) 130 "Sun4v Host to PCIe nexus driver", /* Name of module. */ 131 #endif 132 &px_ops, /* driver ops */ 133 }; 134 135 static struct modlinkage modlinkage = { 136 MODREV_1, (void *)&modldrv, NULL 137 }; 138 139 /* driver soft state */ 140 void *px_state_p; 141 142 int px_force_intx_support = 1; 143 144 int 145 _init(void) 146 { 147 int e; 148 149 /* 150 * Initialize per-px bus soft state pointer. 151 */ 152 e = ddi_soft_state_init(&px_state_p, sizeof (px_t), 1); 153 if (e != DDI_SUCCESS) 154 return (e); 155 156 /* 157 * Install the module. 158 */ 159 e = mod_install(&modlinkage); 160 if (e != DDI_SUCCESS) 161 ddi_soft_state_fini(&px_state_p); 162 return (e); 163 } 164 165 int 166 _fini(void) 167 { 168 int e; 169 170 /* 171 * Remove the module. 172 */ 173 e = mod_remove(&modlinkage); 174 if (e != DDI_SUCCESS) 175 return (e); 176 177 /* Free px soft state */ 178 ddi_soft_state_fini(&px_state_p); 179 180 return (e); 181 } 182 183 int 184 _info(struct modinfo *modinfop) 185 { 186 return (mod_info(&modlinkage, modinfop)); 187 } 188 189 /* ARGSUSED */ 190 static int 191 px_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 192 { 193 minor_t minor = getminor((dev_t)arg); 194 int instance = PCI_MINOR_NUM_TO_INSTANCE(minor); 195 px_t *px_p = INST_TO_STATE(instance); 196 int ret = DDI_SUCCESS; 197 198 switch (infocmd) { 199 case DDI_INFO_DEVT2INSTANCE: 200 *result = (void *)(intptr_t)instance; 201 break; 202 case DDI_INFO_DEVT2DEVINFO: 203 if (px_p == NULL) { 204 ret = DDI_FAILURE; 205 break; 206 } 207 208 *result = (void *)px_p->px_dip; 209 break; 210 default: 211 ret = DDI_FAILURE; 212 break; 213 } 214 215 return (ret); 216 } 217 218 /* device driver entry points */ 219 /* 220 * attach entry point: 221 */ 222 /*ARGSUSED*/ 223 static int 224 px_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 225 { 226 px_t *px_p; /* per bus state pointer */ 227 int instance = DIP_TO_INST(dip); 228 int ret = DDI_SUCCESS; 229 devhandle_t dev_hdl = NULL; 230 pcie_hp_regops_t regops; 231 pcie_bus_t *bus_p; 232 233 switch (cmd) { 234 case DDI_ATTACH: 235 DBG(DBG_ATTACH, dip, "DDI_ATTACH\n"); 236 237 /* See pci_cfgacc.c */ 238 pci_cfgacc_acc_p = pci_cfgacc_acc; 239 240 /* 241 * Allocate and get the per-px soft state structure. 242 */ 243 if (ddi_soft_state_zalloc(px_state_p, instance) 244 != DDI_SUCCESS) { 245 cmn_err(CE_WARN, "%s%d: can't allocate px state", 246 ddi_driver_name(dip), instance); 247 goto err_bad_px_softstate; 248 } 249 px_p = INST_TO_STATE(instance); 250 px_p->px_dip = dip; 251 mutex_init(&px_p->px_mutex, NULL, MUTEX_DRIVER, NULL); 252 px_p->px_soft_state = PCI_SOFT_STATE_CLOSED; 253 254 (void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, 255 "device_type", "pciex"); 256 257 /* Initialize px_dbg for high pil printing */ 258 px_dbg_attach(dip, &px_p->px_dbg_hdl); 259 pcie_rc_init_bus(dip); 260 261 /* 262 * Get key properties of the pci bridge node and 263 * determine it's type (psycho, schizo, etc ...). 264 */ 265 if (px_get_props(px_p, dip) == DDI_FAILURE) 266 goto err_bad_px_prop; 267 268 if (px_lib_dev_init(dip, &dev_hdl) != DDI_SUCCESS) 269 goto err_bad_dev_init; 270 271 /* Initialize device handle */ 272 px_p->px_dev_hdl = dev_hdl; 273 274 /* Cache the BDF of the root port nexus */ 275 px_p->px_bdf = px_lib_get_bdf(px_p); 276 277 /* 278 * Initialize interrupt block. Note that this 279 * initialize error handling for the PEC as well. 280 */ 281 if ((ret = px_ib_attach(px_p)) != DDI_SUCCESS) 282 goto err_bad_ib; 283 284 if (px_cb_attach(px_p) != DDI_SUCCESS) 285 goto err_bad_cb; 286 287 /* 288 * Start creating the modules. 289 * Note that attach() routines should 290 * register and enable their own interrupts. 291 */ 292 293 if ((px_mmu_attach(px_p)) != DDI_SUCCESS) 294 goto err_bad_mmu; 295 296 if ((px_msiq_attach(px_p)) != DDI_SUCCESS) 297 goto err_bad_msiq; 298 299 if ((px_msi_attach(px_p)) != DDI_SUCCESS) 300 goto err_bad_msi; 301 302 if ((px_pec_attach(px_p)) != DDI_SUCCESS) 303 goto err_bad_pec; 304 305 if ((px_dma_attach(px_p)) != DDI_SUCCESS) 306 goto err_bad_dma; /* nothing to uninitialize on DMA */ 307 308 if ((px_fm_attach(px_p)) != DDI_SUCCESS) 309 goto err_bad_dma; 310 311 /* 312 * All of the error handlers have been registered 313 * by now so it's time to activate all the interrupt. 314 */ 315 if ((px_enable_err_intr(px_p)) != DDI_SUCCESS) 316 goto err_bad_intr; 317 318 if (px_lib_hotplug_init(dip, (void *)®ops) == DDI_SUCCESS) { 319 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 320 321 bus_p->bus_hp_sup_modes |= PCIE_NATIVE_HP_MODE; 322 } 323 324 (void) px_set_mps(px_p); 325 326 if (pcie_init(dip, (caddr_t)®ops) != DDI_SUCCESS) 327 goto err_bad_hotplug; 328 329 (void) pcie_hpintr_enable(dip); 330 331 if (pxtool_init(dip) != DDI_SUCCESS) 332 goto err_bad_pcitool_node; 333 334 /* 335 * power management setup. Even if it fails, attach will 336 * succeed as this is a optional feature. Since we are 337 * always at full power, this is not critical. 338 */ 339 if (pwr_common_setup(dip) != DDI_SUCCESS) { 340 DBG(DBG_PWR, dip, "pwr_common_setup failed\n"); 341 } else if (px_pwr_setup(dip) != DDI_SUCCESS) { 342 DBG(DBG_PWR, dip, "px_pwr_setup failed \n"); 343 pwr_common_teardown(dip); 344 } 345 346 /* 347 * add cpr callback 348 */ 349 px_cpr_add_callb(px_p); 350 351 /* 352 * do fabric sync in case we don't need to wait for 353 * any bridge driver to be ready 354 */ 355 (void) px_lib_fabric_sync(dip); 356 357 ddi_report_dev(dip); 358 359 px_p->px_state = PX_ATTACHED; 360 361 /* 362 * save base addr in bus_t for pci_cfgacc_xxx(), this 363 * depends of px structure being properly initialized. 364 */ 365 bus_p = PCIE_DIP2BUS(dip); 366 bus_p->bus_cfgacc_base = px_lib_get_cfgacc_base(dip); 367 368 /* 369 * Partially populate bus_t for all devices in this fabric 370 * for device type macros to work. 371 */ 372 /* 373 * Populate bus_t for all devices in this fabric, after FMA 374 * is initializated, so that config access errors could 375 * trigger panic. 376 */ 377 pcie_fab_init_bus(dip, PCIE_BUS_ALL); 378 379 DBG(DBG_ATTACH, dip, "attach success\n"); 380 break; 381 382 err_bad_pcitool_node: 383 (void) pcie_hpintr_disable(dip); 384 (void) pcie_uninit(dip); 385 err_bad_hotplug: 386 (void) px_lib_hotplug_uninit(dip); 387 px_disable_err_intr(px_p); 388 err_bad_intr: 389 px_fm_detach(px_p); 390 err_bad_dma: 391 px_pec_detach(px_p); 392 err_bad_pec: 393 px_msi_detach(px_p); 394 err_bad_msi: 395 px_msiq_detach(px_p); 396 err_bad_msiq: 397 px_mmu_detach(px_p); 398 err_bad_mmu: 399 err_bad_cb: 400 px_ib_detach(px_p); 401 err_bad_ib: 402 if (px_lib_dev_fini(dip) != DDI_SUCCESS) { 403 DBG(DBG_ATTACH, dip, "px_lib_dev_fini failed\n"); 404 } 405 err_bad_dev_init: 406 px_free_props(px_p); 407 err_bad_px_prop: 408 pcie_rc_fini_bus(dip); 409 px_dbg_detach(dip, &px_p->px_dbg_hdl); 410 mutex_destroy(&px_p->px_mutex); 411 ddi_soft_state_free(px_state_p, instance); 412 err_bad_px_softstate: 413 ret = DDI_FAILURE; 414 break; 415 416 case DDI_RESUME: 417 DBG(DBG_ATTACH, dip, "DDI_RESUME\n"); 418 419 px_p = INST_TO_STATE(instance); 420 421 mutex_enter(&px_p->px_mutex); 422 423 /* suspend might have not succeeded */ 424 if (px_p->px_state != PX_SUSPENDED) { 425 DBG(DBG_ATTACH, px_p->px_dip, 426 "instance NOT suspended\n"); 427 ret = DDI_FAILURE; 428 break; 429 } 430 431 px_msiq_resume(px_p); 432 px_lib_resume(dip); 433 (void) pcie_pwr_resume(dip); 434 px_p->px_state = PX_ATTACHED; 435 436 mutex_exit(&px_p->px_mutex); 437 438 break; 439 default: 440 DBG(DBG_ATTACH, dip, "unsupported attach op\n"); 441 ret = DDI_FAILURE; 442 break; 443 } 444 445 return (ret); 446 } 447 448 /* 449 * detach entry point: 450 */ 451 /*ARGSUSED*/ 452 static int 453 px_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 454 { 455 int instance = ddi_get_instance(dip); 456 px_t *px_p = INST_TO_STATE(instance); 457 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip); 458 int ret; 459 460 /* 461 * Make sure we are currently attached 462 */ 463 if (px_p->px_state != PX_ATTACHED) { 464 DBG(DBG_DETACH, dip, "Instance not attached\n"); 465 return (DDI_FAILURE); 466 } 467 468 mutex_enter(&px_p->px_mutex); 469 470 switch (cmd) { 471 case DDI_DETACH: 472 DBG(DBG_DETACH, dip, "DDI_DETACH\n"); 473 474 /* 475 * remove cpr callback 476 */ 477 px_cpr_rem_callb(px_p); 478 479 (void) pcie_hpintr_disable(dip); 480 481 if (PCIE_IS_PCIE_HOTPLUG_ENABLED(bus_p)) 482 (void) px_lib_hotplug_uninit(dip); 483 484 if (pcie_uninit(dip) != DDI_SUCCESS) { 485 mutex_exit(&px_p->px_mutex); 486 return (DDI_FAILURE); 487 } 488 489 /* Destroy bus_t for the whole fabric */ 490 pcie_fab_fini_bus(dip, PCIE_BUS_ALL); 491 492 /* 493 * things which used to be done in obj_destroy 494 * are now in-lined here. 495 */ 496 497 px_p->px_state = PX_DETACHED; 498 499 pxtool_uninit(dip); 500 501 px_disable_err_intr(px_p); 502 px_fm_detach(px_p); 503 px_pec_detach(px_p); 504 px_pwr_teardown(dip); 505 pwr_common_teardown(dip); 506 px_msi_detach(px_p); 507 px_msiq_detach(px_p); 508 px_mmu_detach(px_p); 509 px_ib_detach(px_p); 510 if (px_lib_dev_fini(dip) != DDI_SUCCESS) { 511 DBG(DBG_DETACH, dip, "px_lib_dev_fini failed\n"); 512 } 513 514 /* 515 * Free the px soft state structure and the rest of the 516 * resources it's using. 517 */ 518 px_free_props(px_p); 519 pcie_rc_fini_bus(dip); 520 px_dbg_detach(dip, &px_p->px_dbg_hdl); 521 mutex_exit(&px_p->px_mutex); 522 mutex_destroy(&px_p->px_mutex); 523 524 px_p->px_dev_hdl = NULL; 525 ddi_soft_state_free(px_state_p, instance); 526 527 return (DDI_SUCCESS); 528 529 case DDI_SUSPEND: 530 if (pcie_pwr_suspend(dip) != DDI_SUCCESS) { 531 mutex_exit(&px_p->px_mutex); 532 return (DDI_FAILURE); 533 } 534 if ((ret = px_lib_suspend(dip)) == DDI_SUCCESS) 535 px_p->px_state = PX_SUSPENDED; 536 mutex_exit(&px_p->px_mutex); 537 538 return (ret); 539 540 default: 541 DBG(DBG_DETACH, dip, "unsupported detach op\n"); 542 mutex_exit(&px_p->px_mutex); 543 return (DDI_FAILURE); 544 } 545 } 546 547 static int 548 px_enable_err_intr(px_t *px_p) 549 { 550 /* Add FMA Callback handler for failed PIO Loads */ 551 px_fm_cb_enable(px_p); 552 553 /* Add Common Block mondo handler */ 554 if (px_cb_add_intr(&px_p->px_cb_fault) != DDI_SUCCESS) 555 goto cb_bad; 556 557 /* Add PEU Block Mondo Handler */ 558 if (px_err_add_intr(&px_p->px_fault) != DDI_SUCCESS) 559 goto peu_bad; 560 561 /* Enable interrupt handler for PCIE Fabric Error Messages */ 562 if (px_pec_msg_add_intr(px_p) != DDI_SUCCESS) 563 goto msg_bad; 564 565 return (DDI_SUCCESS); 566 567 msg_bad: 568 px_err_rem_intr(&px_p->px_fault); 569 peu_bad: 570 px_cb_rem_intr(&px_p->px_cb_fault); 571 cb_bad: 572 px_fm_cb_disable(px_p); 573 574 return (DDI_FAILURE); 575 } 576 577 static void 578 px_disable_err_intr(px_t *px_p) 579 { 580 px_pec_msg_rem_intr(px_p); 581 px_err_rem_intr(&px_p->px_fault); 582 px_cb_rem_intr(&px_p->px_cb_fault); 583 px_fm_cb_disable(px_p); 584 } 585 586 int 587 px_cb_attach(px_t *px_p) 588 { 589 px_fault_t *fault_p = &px_p->px_cb_fault; 590 dev_info_t *dip = px_p->px_dip; 591 sysino_t sysino; 592 593 if (px_lib_intr_devino_to_sysino(dip, 594 px_p->px_inos[PX_INTR_XBC], &sysino) != DDI_SUCCESS) 595 return (DDI_FAILURE); 596 597 fault_p->px_fh_dip = dip; 598 fault_p->px_fh_sysino = sysino; 599 fault_p->px_err_func = px_err_cb_intr; 600 fault_p->px_intr_ino = px_p->px_inos[PX_INTR_XBC]; 601 602 return (DDI_SUCCESS); 603 } 604 605 /* 606 * power management related initialization specific to px 607 * called by px_attach() 608 */ 609 static int 610 px_pwr_setup(dev_info_t *dip) 611 { 612 pcie_pwr_t *pwr_p; 613 int instance = ddi_get_instance(dip); 614 px_t *px_p = INST_TO_STATE(instance); 615 ddi_intr_handle_impl_t hdl; 616 617 ASSERT(PCIE_PMINFO(dip)); 618 pwr_p = PCIE_NEXUS_PMINFO(dip); 619 ASSERT(pwr_p); 620 621 /* 622 * indicate support LDI (Layered Driver Interface) 623 * Create the property, if it is not already there 624 */ 625 if (!ddi_prop_exists(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS, 626 DDI_KERNEL_IOCTL)) { 627 if (ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, 628 DDI_KERNEL_IOCTL, NULL, 0) != DDI_PROP_SUCCESS) { 629 DBG(DBG_PWR, dip, "can't create kernel ioctl prop\n"); 630 return (DDI_FAILURE); 631 } 632 } 633 /* No support for device PM. We are always at full power */ 634 pwr_p->pwr_func_lvl = PM_LEVEL_D0; 635 636 mutex_init(&px_p->px_l23ready_lock, NULL, MUTEX_DRIVER, 637 DDI_INTR_PRI(px_pwr_pil)); 638 cv_init(&px_p->px_l23ready_cv, NULL, CV_DRIVER, NULL); 639 640 /* Initialize handle */ 641 bzero(&hdl, sizeof (ddi_intr_handle_impl_t)); 642 hdl.ih_cb_arg1 = px_p; 643 hdl.ih_ver = DDI_INTR_VERSION; 644 hdl.ih_state = DDI_IHDL_STATE_ALLOC; 645 hdl.ih_dip = dip; 646 hdl.ih_pri = px_pwr_pil; 647 648 /* Add PME_TO_ACK message handler */ 649 hdl.ih_cb_func = (ddi_intr_handler_t *)px_pmeq_intr; 650 if (px_add_msiq_intr(dip, dip, &hdl, MSG_REC, 651 (msgcode_t)PCIE_PME_ACK_MSG, -1, 652 &px_p->px_pm_msiq_id) != DDI_SUCCESS) { 653 DBG(DBG_PWR, dip, "px_pwr_setup: couldn't add " 654 " PME_TO_ACK intr\n"); 655 goto pwr_setup_err1; 656 } 657 px_lib_msg_setmsiq(dip, PCIE_PME_ACK_MSG, px_p->px_pm_msiq_id); 658 px_lib_msg_setvalid(dip, PCIE_PME_ACK_MSG, PCIE_MSG_VALID); 659 660 if (px_ib_update_intr_state(px_p, px_p->px_dip, hdl.ih_inum, 661 px_msiqid_to_devino(px_p, px_p->px_pm_msiq_id), px_pwr_pil, 662 PX_INTR_STATE_ENABLE, MSG_REC, PCIE_PME_ACK_MSG) != DDI_SUCCESS) { 663 DBG(DBG_PWR, dip, "px_pwr_setup: PME_TO_ACK update interrupt" 664 " state failed\n"); 665 goto px_pwrsetup_err_state; 666 } 667 668 return (DDI_SUCCESS); 669 670 px_pwrsetup_err_state: 671 px_lib_msg_setvalid(dip, PCIE_PME_ACK_MSG, PCIE_MSG_INVALID); 672 (void) px_rem_msiq_intr(dip, dip, &hdl, MSG_REC, PCIE_PME_ACK_MSG, 673 px_p->px_pm_msiq_id); 674 pwr_setup_err1: 675 mutex_destroy(&px_p->px_l23ready_lock); 676 cv_destroy(&px_p->px_l23ready_cv); 677 678 return (DDI_FAILURE); 679 } 680 681 /* 682 * undo whatever is done in px_pwr_setup. called by px_detach() 683 */ 684 static void 685 px_pwr_teardown(dev_info_t *dip) 686 { 687 int instance = ddi_get_instance(dip); 688 px_t *px_p = INST_TO_STATE(instance); 689 ddi_intr_handle_impl_t hdl; 690 691 if (!PCIE_PMINFO(dip) || !PCIE_NEXUS_PMINFO(dip)) 692 return; 693 694 /* Initialize handle */ 695 bzero(&hdl, sizeof (ddi_intr_handle_impl_t)); 696 hdl.ih_ver = DDI_INTR_VERSION; 697 hdl.ih_state = DDI_IHDL_STATE_ALLOC; 698 hdl.ih_dip = dip; 699 hdl.ih_pri = px_pwr_pil; 700 701 px_lib_msg_setvalid(dip, PCIE_PME_ACK_MSG, PCIE_MSG_INVALID); 702 (void) px_rem_msiq_intr(dip, dip, &hdl, MSG_REC, PCIE_PME_ACK_MSG, 703 px_p->px_pm_msiq_id); 704 705 (void) px_ib_update_intr_state(px_p, px_p->px_dip, hdl.ih_inum, 706 px_msiqid_to_devino(px_p, px_p->px_pm_msiq_id), px_pwr_pil, 707 PX_INTR_STATE_DISABLE, MSG_REC, PCIE_PME_ACK_MSG); 708 709 px_p->px_pm_msiq_id = (msiqid_t)-1; 710 711 cv_destroy(&px_p->px_l23ready_cv); 712 mutex_destroy(&px_p->px_l23ready_lock); 713 } 714 715 /* bus driver entry points */ 716 717 /* 718 * bus map entry point: 719 * 720 * if map request is for an rnumber 721 * get the corresponding regspec from device node 722 * build a new regspec in our parent's format 723 * build a new map_req with the new regspec 724 * call up the tree to complete the mapping 725 */ 726 int 727 px_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp, 728 off_t off, off_t len, caddr_t *addrp) 729 { 730 px_t *px_p = DIP_TO_STATE(dip); 731 struct regspec p_regspec; 732 ddi_map_req_t p_mapreq; 733 int reglen, rval, r_no; 734 pci_regspec_t reloc_reg, *rp = &reloc_reg; 735 736 DBG(DBG_MAP, dip, "rdip=%s%d:", 737 ddi_driver_name(rdip), ddi_get_instance(rdip)); 738 739 if (mp->map_flags & DDI_MF_USER_MAPPING) 740 return (DDI_ME_UNIMPLEMENTED); 741 742 switch (mp->map_type) { 743 case DDI_MT_REGSPEC: 744 reloc_reg = *(pci_regspec_t *)mp->map_obj.rp; /* dup whole */ 745 break; 746 747 case DDI_MT_RNUMBER: 748 r_no = mp->map_obj.rnumber; 749 DBG(DBG_MAP | DBG_CONT, dip, " r#=%x", r_no); 750 751 if (ddi_getlongprop(DDI_DEV_T_ANY, rdip, DDI_PROP_DONTPASS, 752 "reg", (caddr_t)&rp, ®len) != DDI_SUCCESS) 753 return (DDI_ME_RNUMBER_RANGE); 754 755 if (r_no < 0 || r_no >= reglen / sizeof (pci_regspec_t)) { 756 kmem_free(rp, reglen); 757 return (DDI_ME_RNUMBER_RANGE); 758 } 759 rp += r_no; 760 break; 761 762 default: 763 return (DDI_ME_INVAL); 764 } 765 DBG(DBG_MAP | DBG_CONT, dip, "\n"); 766 767 if ((rp->pci_phys_hi & PCI_REG_ADDR_M) == PCI_ADDR_CONFIG) { 768 /* 769 * There may be a need to differentiate between PCI 770 * and PCI-Ex devices so the following range check is 771 * done correctly, depending on the implementation of 772 * pcieb bridge nexus driver. 773 */ 774 if ((off >= PCIE_CONF_HDR_SIZE) || 775 (len > PCIE_CONF_HDR_SIZE) || 776 (off + len > PCIE_CONF_HDR_SIZE)) 777 return (DDI_ME_INVAL); 778 /* 779 * the following function returning a DDI_FAILURE assumes 780 * that there are no virtual config space access services 781 * defined in this layer. Otherwise it is availed right 782 * here and we return. 783 */ 784 rval = px_lib_map_vconfig(dip, mp, off, rp, addrp); 785 if (rval == DDI_SUCCESS) 786 goto done; 787 } 788 789 /* 790 * No virtual config space services or we are mapping 791 * a region of memory mapped config/IO/memory space, so proceed 792 * to the parent. 793 */ 794 795 /* relocate within 64-bit pci space through "assigned-addresses" */ 796 if (rval = px_reloc_reg(dip, rdip, px_p, rp)) 797 goto done; 798 799 if (len) /* adjust regspec according to mapping request */ 800 rp->pci_size_low = len; /* MIN ? */ 801 rp->pci_phys_low += off; 802 803 /* translate relocated pci regspec into parent space through "ranges" */ 804 if (rval = px_xlate_reg(px_p, rp, &p_regspec)) 805 goto done; 806 807 p_mapreq = *mp; /* dup the whole structure */ 808 p_mapreq.map_type = DDI_MT_REGSPEC; 809 p_mapreq.map_obj.rp = &p_regspec; 810 px_lib_map_attr_check(&p_mapreq); 811 rval = ddi_map(dip, &p_mapreq, 0, 0, addrp); 812 813 if (rval == DDI_SUCCESS) { 814 /* 815 * Set-up access functions for FM access error capable drivers. 816 */ 817 if (DDI_FM_ACC_ERR_CAP(ddi_fm_capable(rdip))) 818 px_fm_acc_setup(mp, rdip, rp); 819 } 820 821 done: 822 if (mp->map_type == DDI_MT_RNUMBER) 823 kmem_free(rp - r_no, reglen); 824 825 return (rval); 826 } 827 828 /* 829 * bus dma map entry point 830 * return value: 831 * DDI_DMA_PARTIAL_MAP 1 832 * DDI_DMA_MAPOK 0 833 * DDI_DMA_MAPPED 0 834 * DDI_DMA_NORESOURCES -1 835 * DDI_DMA_NOMAPPING -2 836 * DDI_DMA_TOOBIG -3 837 */ 838 int 839 px_dma_setup(dev_info_t *dip, dev_info_t *rdip, ddi_dma_req_t *dmareq, 840 ddi_dma_handle_t *handlep) 841 { 842 px_t *px_p = DIP_TO_STATE(dip); 843 px_mmu_t *mmu_p = px_p->px_mmu_p; 844 ddi_dma_impl_t *mp; 845 int ret; 846 847 DBG(DBG_DMA_MAP, dip, "mapping - rdip=%s%d type=%s\n", 848 ddi_driver_name(rdip), ddi_get_instance(rdip), 849 handlep ? "alloc" : "advisory"); 850 851 if (!(mp = px_dma_lmts2hdl(dip, rdip, mmu_p, dmareq))) 852 return (DDI_DMA_NORESOURCES); 853 if (mp == (ddi_dma_impl_t *)DDI_DMA_NOMAPPING) 854 return (DDI_DMA_NOMAPPING); 855 if (ret = px_dma_type(px_p, dmareq, mp)) 856 goto freehandle; 857 if (ret = px_dma_pfn(px_p, dmareq, mp)) 858 goto freehandle; 859 860 switch (PX_DMA_TYPE(mp)) { 861 case PX_DMAI_FLAGS_DVMA: /* LINTED E_EQUALITY_NOT_ASSIGNMENT */ 862 if ((ret = px_dvma_win(px_p, dmareq, mp)) || !handlep) 863 goto freehandle; 864 if (!PX_DMA_CANCACHE(mp)) { /* try fast track */ 865 if (PX_DMA_CANFAST(mp)) { 866 if (!px_dvma_map_fast(mmu_p, mp)) 867 break; 868 /* LINTED E_NOP_ELSE_STMT */ 869 } else { 870 PX_DVMA_FASTTRAK_PROF(mp); 871 } 872 } 873 if (ret = px_dvma_map(mp, dmareq, mmu_p)) 874 goto freehandle; 875 break; 876 case PX_DMAI_FLAGS_PTP: /* LINTED E_EQUALITY_NOT_ASSIGNMENT */ 877 if ((ret = px_dma_physwin(px_p, dmareq, mp)) || !handlep) 878 goto freehandle; 879 break; 880 case PX_DMAI_FLAGS_BYPASS: 881 default: 882 cmn_err(CE_PANIC, "%s%d: px_dma_setup: bad dma type 0x%x", 883 ddi_driver_name(rdip), ddi_get_instance(rdip), 884 PX_DMA_TYPE(mp)); 885 /*NOTREACHED*/ 886 } 887 *handlep = (ddi_dma_handle_t)mp; 888 mp->dmai_flags |= PX_DMAI_FLAGS_INUSE; 889 px_dump_dma_handle(DBG_DMA_MAP, dip, mp); 890 891 return ((mp->dmai_nwin == 1) ? DDI_DMA_MAPPED : DDI_DMA_PARTIAL_MAP); 892 freehandle: 893 if (ret == DDI_DMA_NORESOURCES) 894 px_dma_freemp(mp); /* don't run_callback() */ 895 else 896 (void) px_dma_freehdl(dip, rdip, (ddi_dma_handle_t)mp); 897 return (ret); 898 } 899 900 901 /* 902 * bus dma alloc handle entry point: 903 */ 904 int 905 px_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attrp, 906 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep) 907 { 908 px_t *px_p = DIP_TO_STATE(dip); 909 ddi_dma_impl_t *mp; 910 int rval; 911 912 DBG(DBG_DMA_ALLOCH, dip, "rdip=%s%d\n", 913 ddi_driver_name(rdip), ddi_get_instance(rdip)); 914 915 if (attrp->dma_attr_version != DMA_ATTR_V0) 916 return (DDI_DMA_BADATTR); 917 918 if (!(mp = px_dma_allocmp(dip, rdip, waitfp, arg))) 919 return (DDI_DMA_NORESOURCES); 920 921 /* 922 * Save requestor's information 923 */ 924 mp->dmai_attr = *attrp; /* whole object - augmented later */ 925 *PX_DEV_ATTR(mp) = *attrp; /* whole object - device orig attr */ 926 DBG(DBG_DMA_ALLOCH, dip, "mp=%p\n", mp); 927 928 /* check and convert dma attributes to handle parameters */ 929 if (rval = px_dma_attr2hdl(px_p, mp)) { 930 px_dma_freehdl(dip, rdip, (ddi_dma_handle_t)mp); 931 *handlep = NULL; 932 return (rval); 933 } 934 *handlep = (ddi_dma_handle_t)mp; 935 return (DDI_SUCCESS); 936 } 937 938 939 /* 940 * bus dma free handle entry point: 941 */ 942 /*ARGSUSED*/ 943 int 944 px_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle) 945 { 946 DBG(DBG_DMA_FREEH, dip, "rdip=%s%d mp=%p\n", 947 ddi_driver_name(rdip), ddi_get_instance(rdip), handle); 948 px_dma_freemp((ddi_dma_impl_t *)handle); 949 950 if (px_kmem_clid) { 951 DBG(DBG_DMA_FREEH, dip, "run handle callback\n"); 952 ddi_run_callback(&px_kmem_clid); 953 } 954 return (DDI_SUCCESS); 955 } 956 957 958 /* 959 * bus dma bind handle entry point: 960 */ 961 int 962 px_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip, 963 ddi_dma_handle_t handle, ddi_dma_req_t *dmareq, 964 ddi_dma_cookie_t *cookiep, uint_t *ccountp) 965 { 966 px_t *px_p = DIP_TO_STATE(dip); 967 px_mmu_t *mmu_p = px_p->px_mmu_p; 968 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 969 int ret; 970 971 DBG(DBG_DMA_BINDH, dip, "rdip=%s%d mp=%p dmareq=%p\n", 972 ddi_driver_name(rdip), ddi_get_instance(rdip), mp, dmareq); 973 974 if (mp->dmai_flags & PX_DMAI_FLAGS_INUSE) 975 return (DDI_DMA_INUSE); 976 977 ASSERT((mp->dmai_flags & ~PX_DMAI_FLAGS_PRESERVE) == 0); 978 mp->dmai_flags |= PX_DMAI_FLAGS_INUSE; 979 980 if (ret = px_dma_type(px_p, dmareq, mp)) 981 goto err; 982 if (ret = px_dma_pfn(px_p, dmareq, mp)) 983 goto err; 984 985 switch (PX_DMA_TYPE(mp)) { 986 case PX_DMAI_FLAGS_DVMA: 987 if (ret = px_dvma_win(px_p, dmareq, mp)) 988 goto map_err; 989 if (!PX_DMA_CANCACHE(mp)) { /* try fast track */ 990 if (PX_DMA_CANFAST(mp)) { 991 if (!px_dvma_map_fast(mmu_p, mp)) 992 goto mapped; /*LINTED E_NOP_ELSE_STMT*/ 993 } else { 994 PX_DVMA_FASTTRAK_PROF(mp); 995 } 996 } 997 if (ret = px_dvma_map(mp, dmareq, mmu_p)) 998 goto map_err; 999 mapped: 1000 *ccountp = 1; 1001 MAKE_DMA_COOKIE(cookiep, mp->dmai_mapping, mp->dmai_size); 1002 mp->dmai_ncookies = 1; 1003 mp->dmai_curcookie = 1; 1004 break; 1005 case PX_DMAI_FLAGS_BYPASS: 1006 case PX_DMAI_FLAGS_PTP: 1007 if (ret = px_dma_physwin(px_p, dmareq, mp)) 1008 goto map_err; 1009 *ccountp = PX_WINLST(mp)->win_ncookies; 1010 *cookiep = 1011 *(ddi_dma_cookie_t *)(PX_WINLST(mp) + 1); /* wholeobj */ 1012 /* 1013 * mp->dmai_ncookies and mp->dmai_curcookie are set by 1014 * px_dma_physwin(). 1015 */ 1016 break; 1017 default: 1018 cmn_err(CE_PANIC, "%s%d: px_dma_bindhdl(%p): bad dma type", 1019 ddi_driver_name(rdip), ddi_get_instance(rdip), mp); 1020 /*NOTREACHED*/ 1021 } 1022 DBG(DBG_DMA_BINDH, dip, "cookie %" PRIx64 "+%x\n", 1023 cookiep->dmac_address, cookiep->dmac_size); 1024 px_dump_dma_handle(DBG_DMA_MAP, dip, mp); 1025 1026 /* insert dma handle into FMA cache */ 1027 if (mp->dmai_attr.dma_attr_flags & DDI_DMA_FLAGERR) 1028 mp->dmai_error.err_cf = px_err_dma_hdl_check; 1029 1030 return (mp->dmai_nwin == 1 ? DDI_DMA_MAPPED : DDI_DMA_PARTIAL_MAP); 1031 map_err: 1032 px_dma_freepfn(mp); 1033 err: 1034 mp->dmai_flags &= PX_DMAI_FLAGS_PRESERVE; 1035 return (ret); 1036 } 1037 1038 1039 /* 1040 * bus dma unbind handle entry point: 1041 */ 1042 /*ARGSUSED*/ 1043 int 1044 px_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle) 1045 { 1046 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 1047 px_t *px_p = DIP_TO_STATE(dip); 1048 px_mmu_t *mmu_p = px_p->px_mmu_p; 1049 1050 DBG(DBG_DMA_UNBINDH, dip, "rdip=%s%d, mp=%p\n", 1051 ddi_driver_name(rdip), ddi_get_instance(rdip), handle); 1052 if ((mp->dmai_flags & PX_DMAI_FLAGS_INUSE) == 0) { 1053 DBG(DBG_DMA_UNBINDH, dip, "handle not inuse\n"); 1054 return (DDI_FAILURE); 1055 } 1056 1057 mp->dmai_error.err_cf = NULL; 1058 1059 /* 1060 * Here if the handle is using the iommu. Unload all the iommu 1061 * translations. 1062 */ 1063 switch (PX_DMA_TYPE(mp)) { 1064 case PX_DMAI_FLAGS_DVMA: 1065 px_mmu_unmap_window(mmu_p, mp); 1066 px_dvma_unmap(mmu_p, mp); 1067 px_dma_freepfn(mp); 1068 break; 1069 case PX_DMAI_FLAGS_BYPASS: 1070 case PX_DMAI_FLAGS_PTP: 1071 px_dma_freewin(mp); 1072 break; 1073 default: 1074 cmn_err(CE_PANIC, "%s%d: px_dma_unbindhdl:bad dma type %p", 1075 ddi_driver_name(rdip), ddi_get_instance(rdip), mp); 1076 /*NOTREACHED*/ 1077 } 1078 if (mmu_p->mmu_dvma_clid != 0) { 1079 DBG(DBG_DMA_UNBINDH, dip, "run dvma callback\n"); 1080 ddi_run_callback(&mmu_p->mmu_dvma_clid); 1081 } 1082 if (px_kmem_clid) { 1083 DBG(DBG_DMA_UNBINDH, dip, "run handle callback\n"); 1084 ddi_run_callback(&px_kmem_clid); 1085 } 1086 mp->dmai_flags &= PX_DMAI_FLAGS_PRESERVE; 1087 mp->dmai_ncookies = 0; 1088 mp->dmai_curcookie = 0; 1089 1090 return (DDI_SUCCESS); 1091 } 1092 1093 /* 1094 * bus dma win entry point: 1095 */ 1096 int 1097 px_dma_win(dev_info_t *dip, dev_info_t *rdip, 1098 ddi_dma_handle_t handle, uint_t win, off_t *offp, 1099 size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp) 1100 { 1101 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 1102 int ret; 1103 1104 DBG(DBG_DMA_WIN, dip, "rdip=%s%d\n", 1105 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1106 1107 px_dump_dma_handle(DBG_DMA_WIN, dip, mp); 1108 if (win >= mp->dmai_nwin) { 1109 DBG(DBG_DMA_WIN, dip, "%x out of range\n", win); 1110 return (DDI_FAILURE); 1111 } 1112 1113 switch (PX_DMA_TYPE(mp)) { 1114 case PX_DMAI_FLAGS_DVMA: 1115 if (win != PX_DMA_CURWIN(mp)) { 1116 px_t *px_p = DIP_TO_STATE(dip); 1117 px_mmu_t *mmu_p = px_p->px_mmu_p; 1118 px_mmu_unmap_window(mmu_p, mp); 1119 1120 /* map_window sets dmai_mapping/size/offset */ 1121 px_mmu_map_window(mmu_p, mp, win); 1122 if ((ret = px_mmu_map_window(mmu_p, 1123 mp, win)) != DDI_SUCCESS) 1124 return (ret); 1125 } 1126 if (cookiep) 1127 MAKE_DMA_COOKIE(cookiep, mp->dmai_mapping, 1128 mp->dmai_size); 1129 if (ccountp) 1130 *ccountp = 1; 1131 mp->dmai_ncookies = 1; 1132 mp->dmai_curcookie = 1; 1133 break; 1134 case PX_DMAI_FLAGS_PTP: 1135 case PX_DMAI_FLAGS_BYPASS: { 1136 int i; 1137 ddi_dma_cookie_t *ck_p; 1138 px_dma_win_t *win_p = mp->dmai_winlst; 1139 1140 for (i = 0; i < win; win_p = win_p->win_next, i++) {}; 1141 ck_p = (ddi_dma_cookie_t *)(win_p + 1); 1142 *cookiep = *ck_p; 1143 mp->dmai_offset = win_p->win_offset; 1144 mp->dmai_size = win_p->win_size; 1145 mp->dmai_mapping = ck_p->dmac_laddress; 1146 mp->dmai_cookie = ck_p + 1; 1147 win_p->win_curseg = 0; 1148 if (ccountp) 1149 *ccountp = win_p->win_ncookies; 1150 mp->dmai_ncookies = win_p->win_ncookies; 1151 mp->dmai_curcookie = 1; 1152 } 1153 break; 1154 default: 1155 cmn_err(CE_WARN, "%s%d: px_dma_win:bad dma type 0x%x", 1156 ddi_driver_name(rdip), ddi_get_instance(rdip), 1157 PX_DMA_TYPE(mp)); 1158 return (DDI_FAILURE); 1159 } 1160 if (cookiep) 1161 DBG(DBG_DMA_WIN, dip, 1162 "cookie - dmac_address=%x dmac_size=%x\n", 1163 cookiep->dmac_address, cookiep->dmac_size); 1164 if (offp) 1165 *offp = (off_t)mp->dmai_offset; 1166 if (lenp) 1167 *lenp = mp->dmai_size; 1168 return (DDI_SUCCESS); 1169 } 1170 1171 #ifdef DEBUG 1172 static char *px_dmactl_str[] = { 1173 "DDI_DMA_FREE", 1174 "DDI_DMA_SYNC", 1175 "DDI_DMA_HTOC", 1176 "DDI_DMA_KVADDR", 1177 "DDI_DMA_MOVWIN", 1178 "DDI_DMA_REPWIN", 1179 "DDI_DMA_GETERR", 1180 "DDI_DMA_COFF", 1181 "DDI_DMA_NEXTWIN", 1182 "DDI_DMA_NEXTSEG", 1183 "DDI_DMA_SEGTOC", 1184 "DDI_DMA_RESERVE", 1185 "DDI_DMA_RELEASE", 1186 "DDI_DMA_RESETH", 1187 "DDI_DMA_CKSYNC", 1188 "DDI_DMA_IOPB_ALLOC", 1189 "DDI_DMA_IOPB_FREE", 1190 "DDI_DMA_SMEM_ALLOC", 1191 "DDI_DMA_SMEM_FREE", 1192 "DDI_DMA_SET_SBUS64" 1193 }; 1194 #endif /* DEBUG */ 1195 1196 /* 1197 * bus dma control entry point: 1198 */ 1199 /*ARGSUSED*/ 1200 int 1201 px_dma_ctlops(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle, 1202 enum ddi_dma_ctlops cmd, off_t *offp, size_t *lenp, caddr_t *objp, 1203 uint_t cache_flags) 1204 { 1205 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 1206 1207 #ifdef DEBUG 1208 DBG(DBG_DMA_CTL, dip, "%s: rdip=%s%d\n", px_dmactl_str[cmd], 1209 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1210 #endif /* DEBUG */ 1211 1212 switch (cmd) { 1213 case DDI_DMA_FREE: 1214 (void) px_dma_unbindhdl(dip, rdip, handle); 1215 (void) px_dma_freehdl(dip, rdip, handle); 1216 return (DDI_SUCCESS); 1217 case DDI_DMA_RESERVE: { 1218 px_t *px_p = DIP_TO_STATE(dip); 1219 return (px_fdvma_reserve(dip, rdip, px_p, 1220 (ddi_dma_req_t *)offp, (ddi_dma_handle_t *)objp)); 1221 } 1222 case DDI_DMA_RELEASE: { 1223 px_t *px_p = DIP_TO_STATE(dip); 1224 return (px_fdvma_release(dip, px_p, mp)); 1225 } 1226 default: 1227 break; 1228 } 1229 1230 switch (PX_DMA_TYPE(mp)) { 1231 case PX_DMAI_FLAGS_DVMA: 1232 return (px_dvma_ctl(dip, rdip, mp, cmd, offp, lenp, objp, 1233 cache_flags)); 1234 case PX_DMAI_FLAGS_PTP: 1235 case PX_DMAI_FLAGS_BYPASS: 1236 return (px_dma_ctl(dip, rdip, mp, cmd, offp, lenp, objp, 1237 cache_flags)); 1238 default: 1239 cmn_err(CE_PANIC, "%s%d: px_dma_ctlops(%x):bad dma type %x", 1240 ddi_driver_name(rdip), ddi_get_instance(rdip), cmd, 1241 mp->dmai_flags); 1242 /*NOTREACHED*/ 1243 } 1244 return (0); 1245 } 1246 1247 /* 1248 * control ops entry point: 1249 * 1250 * Requests handled completely: 1251 * DDI_CTLOPS_INITCHILD see init_child() for details 1252 * DDI_CTLOPS_UNINITCHILD 1253 * DDI_CTLOPS_REPORTDEV see report_dev() for details 1254 * DDI_CTLOPS_IOMIN cache line size if streaming otherwise 1 1255 * DDI_CTLOPS_REGSIZE 1256 * DDI_CTLOPS_NREGS 1257 * DDI_CTLOPS_DVMAPAGESIZE 1258 * DDI_CTLOPS_POKE 1259 * DDI_CTLOPS_PEEK 1260 * 1261 * All others passed to parent. 1262 */ 1263 int 1264 px_ctlops(dev_info_t *dip, dev_info_t *rdip, 1265 ddi_ctl_enum_t op, void *arg, void *result) 1266 { 1267 px_t *px_p = DIP_TO_STATE(dip); 1268 struct detachspec *ds; 1269 struct attachspec *as; 1270 1271 switch (op) { 1272 case DDI_CTLOPS_INITCHILD: 1273 return (px_init_child(px_p, (dev_info_t *)arg)); 1274 1275 case DDI_CTLOPS_UNINITCHILD: 1276 return (px_uninit_child(px_p, (dev_info_t *)arg)); 1277 1278 case DDI_CTLOPS_ATTACH: 1279 if (!pcie_is_child(dip, rdip)) 1280 return (DDI_SUCCESS); 1281 1282 as = (struct attachspec *)arg; 1283 switch (as->when) { 1284 case DDI_PRE: 1285 if (as->cmd == DDI_ATTACH) { 1286 DBG(DBG_PWR, dip, "PRE_ATTACH for %s@%d\n", 1287 ddi_driver_name(rdip), 1288 ddi_get_instance(rdip)); 1289 return (pcie_pm_hold(dip)); 1290 } 1291 if (as->cmd == DDI_RESUME) { 1292 DBG(DBG_PWR, dip, "PRE_RESUME for %s@%d\n", 1293 ddi_driver_name(rdip), 1294 ddi_get_instance(rdip)); 1295 1296 pcie_clear_errors(rdip); 1297 } 1298 return (DDI_SUCCESS); 1299 1300 case DDI_POST: 1301 DBG(DBG_PWR, dip, "POST_ATTACH for %s@%d\n", 1302 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1303 if (as->cmd == DDI_ATTACH && 1304 as->result != DDI_SUCCESS) { 1305 /* 1306 * Attach failed for the child device. The child 1307 * driver may have made PM calls before the 1308 * attach failed. pcie_pm_remove_child() should 1309 * cleanup PM state and holds (if any) 1310 * associated with the child device. 1311 */ 1312 return (pcie_pm_remove_child(dip, rdip)); 1313 } 1314 1315 if (as->result == DDI_SUCCESS) 1316 pf_init(rdip, (void *)px_p->px_fm_ibc, as->cmd); 1317 1318 (void) pcie_postattach_child(rdip); 1319 1320 return (DDI_SUCCESS); 1321 default: 1322 break; 1323 } 1324 break; 1325 1326 case DDI_CTLOPS_DETACH: 1327 if (!pcie_is_child(dip, rdip)) 1328 return (DDI_SUCCESS); 1329 1330 ds = (struct detachspec *)arg; 1331 switch (ds->when) { 1332 case DDI_POST: 1333 if (ds->cmd == DDI_DETACH && 1334 ds->result == DDI_SUCCESS) { 1335 DBG(DBG_PWR, dip, "POST_DETACH for %s@%d\n", 1336 ddi_driver_name(rdip), 1337 ddi_get_instance(rdip)); 1338 return (pcie_pm_remove_child(dip, rdip)); 1339 } 1340 return (DDI_SUCCESS); 1341 case DDI_PRE: 1342 pf_fini(rdip, ds->cmd); 1343 return (DDI_SUCCESS); 1344 default: 1345 break; 1346 } 1347 break; 1348 1349 case DDI_CTLOPS_REPORTDEV: 1350 if (ddi_get_parent(rdip) == dip) 1351 return (px_report_dev(rdip)); 1352 1353 (void) px_lib_fabric_sync(rdip); 1354 return (DDI_SUCCESS); 1355 1356 case DDI_CTLOPS_IOMIN: 1357 return (DDI_SUCCESS); 1358 1359 case DDI_CTLOPS_REGSIZE: 1360 *((off_t *)result) = px_get_reg_set_size(rdip, *((int *)arg)); 1361 return (*((off_t *)result) == 0 ? DDI_FAILURE : DDI_SUCCESS); 1362 1363 case DDI_CTLOPS_NREGS: 1364 *((uint_t *)result) = px_get_nreg_set(rdip); 1365 return (DDI_SUCCESS); 1366 1367 case DDI_CTLOPS_DVMAPAGESIZE: 1368 *((ulong_t *)result) = MMU_PAGE_SIZE; 1369 return (DDI_SUCCESS); 1370 1371 case DDI_CTLOPS_POKE: /* platform dependent implementation. */ 1372 return (px_lib_ctlops_poke(dip, rdip, 1373 (peekpoke_ctlops_t *)arg)); 1374 1375 case DDI_CTLOPS_PEEK: /* platform dependent implementation. */ 1376 return (px_lib_ctlops_peek(dip, rdip, 1377 (peekpoke_ctlops_t *)arg, result)); 1378 1379 case DDI_CTLOPS_POWER: 1380 default: 1381 break; 1382 } 1383 1384 /* 1385 * Now pass the request up to our parent. 1386 */ 1387 DBG(DBG_CTLOPS, dip, "passing request to parent: rdip=%s%d\n", 1388 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1389 return (ddi_ctlops(dip, rdip, op, arg, result)); 1390 } 1391 1392 /* ARGSUSED */ 1393 int 1394 px_intr_ops(dev_info_t *dip, dev_info_t *rdip, ddi_intr_op_t intr_op, 1395 ddi_intr_handle_impl_t *hdlp, void *result) 1396 { 1397 int intr_types, ret = DDI_SUCCESS; 1398 px_t *px_p = DIP_TO_STATE(dip); 1399 1400 DBG(DBG_INTROPS, dip, "px_intr_ops: rdip=%s%d\n", 1401 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1402 1403 /* Process DDI_INTROP_SUPPORTED_TYPES request here */ 1404 if (intr_op == DDI_INTROP_SUPPORTED_TYPES) { 1405 *(int *)result = i_ddi_get_intx_nintrs(rdip) ? 1406 DDI_INTR_TYPE_FIXED : 0; 1407 1408 if ((pci_msi_get_supported_type(rdip, 1409 &intr_types)) == DDI_SUCCESS) { 1410 /* 1411 * Double check supported interrupt types vs. 1412 * what the host bridge supports. 1413 */ 1414 *(int *)result |= intr_types; 1415 } 1416 1417 *(int *)result &= 1418 (px_force_intx_support ? 1419 (px_p->px_supp_intr_types | DDI_INTR_TYPE_FIXED) : 1420 px_p->px_supp_intr_types); 1421 return (*(int *)result ? DDI_SUCCESS : DDI_FAILURE); 1422 } 1423 1424 /* 1425 * PCI-E nexus driver supports fixed, MSI and MSI-X interrupts. 1426 * Return failure if interrupt type is not supported. 1427 */ 1428 switch (hdlp->ih_type) { 1429 case DDI_INTR_TYPE_FIXED: 1430 ret = px_intx_ops(dip, rdip, intr_op, hdlp, result); 1431 break; 1432 case DDI_INTR_TYPE_MSI: 1433 case DDI_INTR_TYPE_MSIX: 1434 ret = px_msix_ops(dip, rdip, intr_op, hdlp, result); 1435 break; 1436 default: 1437 ret = DDI_ENOTSUP; 1438 break; 1439 } 1440 1441 return (ret); 1442 } 1443 1444 static void 1445 px_set_mps(px_t *px_p) 1446 { 1447 dev_info_t *dip; 1448 pcie_bus_t *bus_p; 1449 int max_supported; 1450 1451 dip = px_p->px_dip; 1452 bus_p = PCIE_DIP2BUS(dip); 1453 1454 bus_p->bus_mps = -1; 1455 1456 if (pcie_root_port(dip) == DDI_FAILURE) { 1457 if (px_lib_get_root_complex_mps(px_p, dip, 1458 &max_supported) < 0) { 1459 1460 DBG(DBG_MPS, dip, "MPS: Can not get RC MPS\n"); 1461 return; 1462 } 1463 1464 DBG(DBG_MPS, dip, "MPS: Root Complex MPS Cap of = %x\n", 1465 max_supported); 1466 1467 if (pcie_max_mps < max_supported) 1468 max_supported = pcie_max_mps; 1469 1470 (void) pcie_get_fabric_mps(dip, ddi_get_child(dip), 1471 &max_supported); 1472 1473 bus_p->bus_mps = max_supported; 1474 1475 (void) px_lib_set_root_complex_mps(px_p, dip, bus_p->bus_mps); 1476 1477 DBG(DBG_MPS, dip, "MPS: Root Complex MPS Set to = %x\n", 1478 bus_p->bus_mps); 1479 } 1480 } 1481