1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * PCI Express nexus driver interface 30 */ 31 32 #include <sys/types.h> 33 #include <sys/conf.h> /* nulldev */ 34 #include <sys/stat.h> /* devctl */ 35 #include <sys/kmem.h> 36 #include <sys/sunddi.h> 37 #include <sys/sunndi.h> 38 #include <sys/ddi_impldefs.h> 39 #include <sys/ddi_subrdefs.h> 40 #include <sys/spl.h> 41 #include <sys/epm.h> 42 #include <sys/iommutsb.h> 43 #include <sys/hotplug/pci/pcihp.h> 44 #include <sys/hotplug/pci/pciehpc.h> 45 #include "px_obj.h" 46 #include <sys/pci_tools.h> 47 #include "px_tools_ext.h" 48 #include "pcie_pwr.h" 49 50 /*LINTLIBRARY*/ 51 52 /* 53 * function prototypes for dev ops routines: 54 */ 55 static int px_attach(dev_info_t *dip, ddi_attach_cmd_t cmd); 56 static int px_detach(dev_info_t *dip, ddi_detach_cmd_t cmd); 57 static int px_info(dev_info_t *dip, ddi_info_cmd_t infocmd, 58 void *arg, void **result); 59 static int px_cb_attach(px_t *); 60 static void px_cb_detach(px_t *); 61 static int px_pwr_setup(dev_info_t *dip); 62 static void px_pwr_teardown(dev_info_t *dip); 63 64 extern errorq_t *pci_target_queue; 65 66 /* 67 * function prototypes for hotplug routines: 68 */ 69 static int px_init_hotplug(px_t *px_p); 70 static int px_uninit_hotplug(dev_info_t *dip); 71 72 /* 73 * bus ops and dev ops structures: 74 */ 75 static struct bus_ops px_bus_ops = { 76 BUSO_REV, 77 px_map, 78 0, 79 0, 80 0, 81 i_ddi_map_fault, 82 px_dma_setup, 83 px_dma_allochdl, 84 px_dma_freehdl, 85 px_dma_bindhdl, 86 px_dma_unbindhdl, 87 px_lib_dma_sync, 88 px_dma_win, 89 px_dma_ctlops, 90 px_ctlops, 91 ddi_bus_prop_op, 92 ndi_busop_get_eventcookie, 93 ndi_busop_add_eventcall, 94 ndi_busop_remove_eventcall, 95 ndi_post_event, 96 NULL, 97 NULL, /* (*bus_config)(); */ 98 NULL, /* (*bus_unconfig)(); */ 99 px_fm_init_child, /* (*bus_fm_init)(); */ 100 NULL, /* (*bus_fm_fini)(); */ 101 px_bus_enter, /* (*bus_fm_access_enter)(); */ 102 px_bus_exit, /* (*bus_fm_access_fini)(); */ 103 pcie_bus_power, /* (*bus_power)(); */ 104 px_intr_ops /* (*bus_intr_op)(); */ 105 }; 106 107 extern struct cb_ops px_cb_ops; 108 109 static struct dev_ops px_ops = { 110 DEVO_REV, 111 0, 112 px_info, 113 nulldev, 114 0, 115 px_attach, 116 px_detach, 117 nodev, 118 &px_cb_ops, 119 &px_bus_ops, 120 nulldev 121 }; 122 123 /* 124 * module definitions: 125 */ 126 #include <sys/modctl.h> 127 extern struct mod_ops mod_driverops; 128 129 static struct modldrv modldrv = { 130 &mod_driverops, /* Type of module - driver */ 131 "PCI Express nexus driver %I%", /* Name of module. */ 132 &px_ops, /* driver ops */ 133 }; 134 135 static struct modlinkage modlinkage = { 136 MODREV_1, (void *)&modldrv, NULL 137 }; 138 139 /* driver soft state */ 140 void *px_state_p; 141 142 int 143 _init(void) 144 { 145 int e; 146 147 /* 148 * Initialize per-px bus soft state pointer. 149 */ 150 e = ddi_soft_state_init(&px_state_p, sizeof (px_t), 1); 151 if (e != DDI_SUCCESS) 152 return (e); 153 154 /* 155 * Install the module. 156 */ 157 e = mod_install(&modlinkage); 158 if (e != DDI_SUCCESS) 159 ddi_soft_state_fini(&px_state_p); 160 return (e); 161 } 162 163 int 164 _fini(void) 165 { 166 int e; 167 168 /* 169 * Remove the module. 170 */ 171 e = mod_remove(&modlinkage); 172 if (e != DDI_SUCCESS) 173 return (e); 174 /* 175 * Destroy pci_target_queue, and set it to NULL. 176 */ 177 if (pci_target_queue) 178 errorq_destroy(pci_target_queue); 179 180 pci_target_queue = NULL; 181 182 /* Free px soft state */ 183 ddi_soft_state_fini(&px_state_p); 184 185 return (e); 186 } 187 188 int 189 _info(struct modinfo *modinfop) 190 { 191 return (mod_info(&modlinkage, modinfop)); 192 } 193 194 /* ARGSUSED */ 195 static int 196 px_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 197 { 198 int instance = getminor((dev_t)arg); 199 px_t *px_p = INST_TO_STATE(instance); 200 201 /* 202 * Allow hotplug to deal with ones it manages 203 * Hot Plug will be done later. 204 */ 205 if (px_p && (px_p->px_dev_caps & PX_HOTPLUG_CAPABLE)) 206 return (pcihp_info(dip, infocmd, arg, result)); 207 208 /* non-hotplug or not attached */ 209 switch (infocmd) { 210 case DDI_INFO_DEVT2INSTANCE: 211 *result = (void *)(intptr_t)instance; 212 return (DDI_SUCCESS); 213 214 case DDI_INFO_DEVT2DEVINFO: 215 if (px_p == NULL) 216 return (DDI_FAILURE); 217 *result = (void *)px_p->px_dip; 218 return (DDI_SUCCESS); 219 220 default: 221 return (DDI_FAILURE); 222 } 223 } 224 225 /* device driver entry points */ 226 /* 227 * attach entry point: 228 */ 229 /*ARGSUSED*/ 230 static int 231 px_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 232 { 233 px_t *px_p; /* per bus state pointer */ 234 int instance = DIP_TO_INST(dip); 235 int ret = DDI_SUCCESS; 236 devhandle_t dev_hdl = NULL; 237 238 switch (cmd) { 239 case DDI_ATTACH: 240 DBG(DBG_ATTACH, dip, "DDI_ATTACH\n"); 241 242 /* 243 * Allocate and get the per-px soft state structure. 244 */ 245 if (ddi_soft_state_zalloc(px_state_p, instance) 246 != DDI_SUCCESS) { 247 cmn_err(CE_WARN, "%s%d: can't allocate px state", 248 ddi_driver_name(dip), instance); 249 goto err_bad_px_softstate; 250 } 251 px_p = INST_TO_STATE(instance); 252 px_p->px_dip = dip; 253 mutex_init(&px_p->px_mutex, NULL, MUTEX_DRIVER, NULL); 254 px_p->px_soft_state = PX_SOFT_STATE_CLOSED; 255 px_p->px_open_count = 0; 256 257 (void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, 258 "device_type", "pciex"); 259 /* 260 * Get key properties of the pci bridge node and 261 * determine it's type (psycho, schizo, etc ...). 262 */ 263 if (px_get_props(px_p, dip) == DDI_FAILURE) 264 goto err_bad_px_prop; 265 266 if (px_lib_dev_init(dip, &dev_hdl) != DDI_SUCCESS) 267 goto err_bad_dev_init; 268 269 /* Initialize device handle */ 270 px_p->px_dev_hdl = dev_hdl; 271 272 /* 273 * Initialize interrupt block. Note that this 274 * initialize error handling for the PEC as well. 275 */ 276 if ((ret = px_ib_attach(px_p)) != DDI_SUCCESS) 277 goto err_bad_ib; 278 279 if (px_cb_attach(px_p) != DDI_SUCCESS) 280 goto err_bad_cb; 281 282 /* 283 * Start creating the modules. 284 * Note that attach() routines should 285 * register and enable their own interrupts. 286 */ 287 288 if ((px_mmu_attach(px_p)) != DDI_SUCCESS) 289 goto err_bad_mmu; 290 291 if ((px_msiq_attach(px_p)) != DDI_SUCCESS) 292 goto err_bad_msiq; 293 294 if ((px_msi_attach(px_p)) != DDI_SUCCESS) 295 goto err_bad_msi; 296 297 if ((px_pec_attach(px_p)) != DDI_SUCCESS) 298 goto err_bad_pec; 299 300 if ((px_dma_attach(px_p)) != DDI_SUCCESS) 301 goto err_bad_dma; /* nothing to uninitialize on DMA */ 302 303 if ((px_fm_attach(px_p)) != DDI_SUCCESS) 304 goto err_bad_dma; 305 306 /* 307 * All of the error handlers have been registered 308 * by now so it's time to activate the interrupt. 309 */ 310 if ((ret = px_err_add_intr(&px_p->px_fault)) != DDI_SUCCESS) 311 goto err_bad_intr; 312 313 (void) px_init_hotplug(px_p); 314 315 /* 316 * Create the "devctl" node for hotplug and pcitool support. 317 * For non-hotplug bus, we still need ":devctl" to 318 * support DEVCTL_DEVICE_* and DEVCTL_BUS_* ioctls. 319 */ 320 if (ddi_create_minor_node(dip, "devctl", S_IFCHR, 321 PCIHP_AP_MINOR_NUM(instance, PCIHP_DEVCTL_MINOR), 322 DDI_NT_NEXUS, 0) != DDI_SUCCESS) { 323 goto err_bad_devctl_node; 324 } 325 326 if (pxtool_init(dip) != DDI_SUCCESS) 327 goto err_bad_pcitool_node; 328 329 /* 330 * power management setup. Even if it fails, attach will 331 * succeed as this is a optional feature. Since we are 332 * always at full power, this is not critical. 333 */ 334 if (pwr_common_setup(dip) != DDI_SUCCESS) { 335 DBG(DBG_PWR, dip, "pwr_common_setup failed\n"); 336 } else if (px_pwr_setup(dip) != DDI_SUCCESS) { 337 DBG(DBG_PWR, dip, "px_pwr_setup failed \n"); 338 pwr_common_teardown(dip); 339 } 340 341 /* 342 * add cpr callback 343 */ 344 px_cpr_add_callb(px_p); 345 346 ddi_report_dev(dip); 347 348 px_p->px_state = PX_ATTACHED; 349 DBG(DBG_ATTACH, dip, "attach success\n"); 350 break; 351 352 err_bad_pcitool_node: 353 ddi_remove_minor_node(dip, "devctl"); 354 err_bad_devctl_node: 355 px_err_rem_intr(&px_p->px_fault); 356 err_bad_intr: 357 px_fm_detach(px_p); 358 err_bad_dma: 359 px_pec_detach(px_p); 360 err_bad_pec: 361 px_msi_detach(px_p); 362 err_bad_msi: 363 px_msiq_detach(px_p); 364 err_bad_msiq: 365 px_mmu_detach(px_p); 366 err_bad_mmu: 367 px_cb_detach(px_p); 368 err_bad_cb: 369 px_ib_detach(px_p); 370 err_bad_ib: 371 (void) px_lib_dev_fini(dip); 372 err_bad_dev_init: 373 px_free_props(px_p); 374 err_bad_px_prop: 375 mutex_destroy(&px_p->px_mutex); 376 ddi_soft_state_free(px_state_p, instance); 377 err_bad_px_softstate: 378 ret = DDI_FAILURE; 379 break; 380 381 case DDI_RESUME: 382 DBG(DBG_ATTACH, dip, "DDI_RESUME\n"); 383 384 px_p = INST_TO_STATE(instance); 385 386 mutex_enter(&px_p->px_mutex); 387 388 /* suspend might have not succeeded */ 389 if (px_p->px_state != PX_SUSPENDED) { 390 DBG(DBG_ATTACH, px_p->px_dip, 391 "instance NOT suspended\n"); 392 ret = DDI_FAILURE; 393 break; 394 } 395 396 px_msiq_resume(px_p); 397 px_lib_resume(dip); 398 (void) pcie_pwr_resume(dip); 399 px_p->px_state = PX_ATTACHED; 400 401 mutex_exit(&px_p->px_mutex); 402 403 break; 404 default: 405 DBG(DBG_ATTACH, dip, "unsupported attach op\n"); 406 ret = DDI_FAILURE; 407 break; 408 } 409 410 return (ret); 411 } 412 413 /* 414 * detach entry point: 415 */ 416 /*ARGSUSED*/ 417 static int 418 px_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 419 { 420 int instance = ddi_get_instance(dip); 421 px_t *px_p = INST_TO_STATE(instance); 422 int ret; 423 424 /* 425 * Make sure we are currently attached 426 */ 427 if (px_p->px_state != PX_ATTACHED) { 428 DBG(DBG_DETACH, dip, "Instance not attached\n"); 429 return (DDI_FAILURE); 430 } 431 432 mutex_enter(&px_p->px_mutex); 433 434 switch (cmd) { 435 case DDI_DETACH: 436 DBG(DBG_DETACH, dip, "DDI_DETACH\n"); 437 438 /* 439 * remove cpr callback 440 */ 441 px_cpr_rem_callb(px_p); 442 443 if (px_p->px_dev_caps & PX_HOTPLUG_CAPABLE) 444 if (px_uninit_hotplug(dip) != DDI_SUCCESS) { 445 mutex_exit(&px_p->px_mutex); 446 return (DDI_FAILURE); 447 } 448 449 /* 450 * things which used to be done in obj_destroy 451 * are now in-lined here. 452 */ 453 454 px_p->px_state = PX_DETACHED; 455 456 pxtool_uninit(dip); 457 458 ddi_remove_minor_node(dip, "devctl"); 459 px_err_rem_intr(&px_p->px_fault); 460 px_fm_detach(px_p); 461 px_pec_detach(px_p); 462 px_pwr_teardown(dip); 463 pwr_common_teardown(dip); 464 px_msi_detach(px_p); 465 px_msiq_detach(px_p); 466 px_mmu_detach(px_p); 467 px_cb_detach(px_p); 468 px_ib_detach(px_p); 469 (void) px_lib_dev_fini(dip); 470 471 /* 472 * Free the px soft state structure and the rest of the 473 * resources it's using. 474 */ 475 px_free_props(px_p); 476 mutex_exit(&px_p->px_mutex); 477 mutex_destroy(&px_p->px_mutex); 478 479 /* Free the interrupt-priorities prop if we created it. */ { 480 int len; 481 482 if (ddi_getproplen(DDI_DEV_T_ANY, dip, 483 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 484 "interrupt-priorities", &len) == DDI_PROP_SUCCESS) 485 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, 486 "interrupt-priorities"); 487 } 488 489 px_p->px_dev_hdl = NULL; 490 ddi_soft_state_free(px_state_p, instance); 491 492 return (DDI_SUCCESS); 493 494 case DDI_SUSPEND: 495 if (pcie_pwr_suspend(dip) != DDI_SUCCESS) { 496 mutex_exit(&px_p->px_mutex); 497 return (DDI_FAILURE); 498 } 499 if ((ret = px_lib_suspend(dip)) == DDI_SUCCESS) 500 px_p->px_state = PX_SUSPENDED; 501 mutex_exit(&px_p->px_mutex); 502 503 return (ret); 504 505 default: 506 DBG(DBG_DETACH, dip, "unsupported detach op\n"); 507 mutex_exit(&px_p->px_mutex); 508 return (DDI_FAILURE); 509 } 510 } 511 512 int 513 px_cb_attach(px_t *px_p) 514 { 515 px_fault_t *fault_p = &px_p->px_cb_fault; 516 dev_info_t *dip = px_p->px_dip; 517 sysino_t sysino; 518 519 if (px_lib_intr_devino_to_sysino(dip, 520 px_p->px_inos[PX_INTR_XBC], &sysino) != DDI_SUCCESS) 521 return (DDI_FAILURE); 522 523 fault_p->px_fh_dip = dip; 524 fault_p->px_fh_sysino = sysino; 525 fault_p->px_err_func = px_err_cb_intr; 526 fault_p->px_intr_ino = px_p->px_inos[PX_INTR_XBC]; 527 528 return (px_cb_add_intr(fault_p)); 529 } 530 531 void 532 px_cb_detach(px_t *px_p) 533 { 534 px_cb_rem_intr(&px_p->px_cb_fault); 535 } 536 537 /* 538 * power management related initialization specific to px 539 * called by px_attach() 540 */ 541 static int 542 px_pwr_setup(dev_info_t *dip) 543 { 544 pcie_pwr_t *pwr_p; 545 int instance = ddi_get_instance(dip); 546 px_t *px_p = INST_TO_STATE(instance); 547 ddi_intr_handle_impl_t hdl; 548 549 ASSERT(PCIE_PMINFO(dip)); 550 pwr_p = PCIE_NEXUS_PMINFO(dip); 551 ASSERT(pwr_p); 552 553 /* 554 * indicate support LDI (Layered Driver Interface) 555 * Create the property, if it is not already there 556 */ 557 if (!ddi_prop_exists(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS, 558 DDI_KERNEL_IOCTL)) { 559 if (ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, 560 DDI_KERNEL_IOCTL, NULL, 0) != DDI_PROP_SUCCESS) { 561 DBG(DBG_PWR, dip, "can't create kernel ioctl prop\n"); 562 return (DDI_FAILURE); 563 } 564 } 565 /* No support for device PM. We are always at full power */ 566 pwr_p->pwr_func_lvl = PM_LEVEL_D0; 567 568 mutex_init(&px_p->px_l23ready_lock, NULL, MUTEX_DRIVER, 569 DDI_INTR_PRI(px_pwr_pil)); 570 cv_init(&px_p->px_l23ready_cv, NULL, CV_DRIVER, NULL); 571 572 /* Initialize handle */ 573 bzero(&hdl, sizeof (ddi_intr_handle_impl_t)); 574 hdl.ih_cb_arg1 = px_p; 575 hdl.ih_ver = DDI_INTR_VERSION; 576 hdl.ih_state = DDI_IHDL_STATE_ALLOC; 577 hdl.ih_dip = dip; 578 hdl.ih_pri = px_pwr_pil; 579 580 /* Add PME_TO_ACK message handler */ 581 hdl.ih_cb_func = (ddi_intr_handler_t *)px_pmeq_intr; 582 if (px_add_msiq_intr(dip, dip, &hdl, MSG_REC, 583 (msgcode_t)PCIE_PME_ACK_MSG, &px_p->px_pm_msiq_id) != DDI_SUCCESS) { 584 DBG(DBG_PWR, dip, "px_pwr_setup: couldn't add " 585 " PME_TO_ACK intr\n"); 586 goto pwr_setup_err1; 587 } 588 px_lib_msg_setmsiq(dip, PCIE_PME_ACK_MSG, px_p->px_pm_msiq_id); 589 px_lib_msg_setvalid(dip, PCIE_PME_ACK_MSG, PCIE_MSG_VALID); 590 591 if (px_ib_update_intr_state(px_p, px_p->px_dip, hdl.ih_inum, 592 px_msiqid_to_devino(px_p, px_p->px_pm_msiq_id), px_pwr_pil, 593 PX_INTR_STATE_ENABLE, MSG_REC, PCIE_PME_ACK_MSG) != DDI_SUCCESS) { 594 DBG(DBG_PWR, dip, "px_pwr_setup: PME_TO_ACK update interrupt" 595 " state failed\n"); 596 goto px_pwrsetup_err_state; 597 } 598 599 return (DDI_SUCCESS); 600 601 px_pwrsetup_err_state: 602 px_lib_msg_setvalid(dip, PCIE_PME_ACK_MSG, PCIE_MSG_INVALID); 603 (void) px_rem_msiq_intr(dip, dip, &hdl, MSG_REC, PCIE_PME_ACK_MSG, 604 px_p->px_pm_msiq_id); 605 pwr_setup_err1: 606 mutex_destroy(&px_p->px_l23ready_lock); 607 cv_destroy(&px_p->px_l23ready_cv); 608 609 return (DDI_FAILURE); 610 } 611 612 /* 613 * undo whatever is done in px_pwr_setup. called by px_detach() 614 */ 615 static void 616 px_pwr_teardown(dev_info_t *dip) 617 { 618 int instance = ddi_get_instance(dip); 619 px_t *px_p = INST_TO_STATE(instance); 620 ddi_intr_handle_impl_t hdl; 621 622 if (!PCIE_PMINFO(dip) || !PCIE_NEXUS_PMINFO(dip)) 623 return; 624 625 /* Initialize handle */ 626 bzero(&hdl, sizeof (ddi_intr_handle_impl_t)); 627 hdl.ih_ver = DDI_INTR_VERSION; 628 hdl.ih_state = DDI_IHDL_STATE_ALLOC; 629 hdl.ih_dip = dip; 630 631 px_lib_msg_setvalid(dip, PCIE_PME_ACK_MSG, PCIE_MSG_INVALID); 632 (void) px_rem_msiq_intr(dip, dip, &hdl, MSG_REC, PCIE_PME_ACK_MSG, 633 px_p->px_pm_msiq_id); 634 635 (void) px_ib_update_intr_state(px_p, px_p->px_dip, hdl.ih_inum, 636 px_msiqid_to_devino(px_p, px_p->px_pm_msiq_id), px_pwr_pil, 637 PX_INTR_STATE_DISABLE, MSG_REC, PCIE_PME_ACK_MSG); 638 639 px_p->px_pm_msiq_id = (msiqid_t)-1; 640 641 cv_destroy(&px_p->px_l23ready_cv); 642 mutex_destroy(&px_p->px_l23ready_lock); 643 } 644 645 /* bus driver entry points */ 646 647 /* 648 * bus map entry point: 649 * 650 * if map request is for an rnumber 651 * get the corresponding regspec from device node 652 * build a new regspec in our parent's format 653 * build a new map_req with the new regspec 654 * call up the tree to complete the mapping 655 */ 656 int 657 px_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp, 658 off_t off, off_t len, caddr_t *addrp) 659 { 660 px_t *px_p = DIP_TO_STATE(dip); 661 struct regspec p_regspec; 662 ddi_map_req_t p_mapreq; 663 int reglen, rval, r_no; 664 pci_regspec_t reloc_reg, *rp = &reloc_reg; 665 666 DBG(DBG_MAP, dip, "rdip=%s%d:", 667 ddi_driver_name(rdip), ddi_get_instance(rdip)); 668 669 if (mp->map_flags & DDI_MF_USER_MAPPING) 670 return (DDI_ME_UNIMPLEMENTED); 671 672 switch (mp->map_type) { 673 case DDI_MT_REGSPEC: 674 reloc_reg = *(pci_regspec_t *)mp->map_obj.rp; /* dup whole */ 675 break; 676 677 case DDI_MT_RNUMBER: 678 r_no = mp->map_obj.rnumber; 679 DBG(DBG_MAP | DBG_CONT, dip, " r#=%x", r_no); 680 681 if (ddi_getlongprop(DDI_DEV_T_ANY, rdip, DDI_PROP_DONTPASS, 682 "reg", (caddr_t)&rp, ®len) != DDI_SUCCESS) 683 return (DDI_ME_RNUMBER_RANGE); 684 685 if (r_no < 0 || r_no >= reglen / sizeof (pci_regspec_t)) { 686 kmem_free(rp, reglen); 687 return (DDI_ME_RNUMBER_RANGE); 688 } 689 rp += r_no; 690 break; 691 692 default: 693 return (DDI_ME_INVAL); 694 } 695 DBG(DBG_MAP | DBG_CONT, dip, "\n"); 696 697 if ((rp->pci_phys_hi & PCI_REG_ADDR_M) == PCI_ADDR_CONFIG) { 698 /* 699 * There may be a need to differentiate between PCI 700 * and PCI-Ex devices so the following range check is 701 * done correctly, depending on the implementation of 702 * px_pci bridge nexus driver. 703 */ 704 if ((off >= PCIE_CONF_HDR_SIZE) || 705 (len > PCIE_CONF_HDR_SIZE) || 706 (off + len > PCIE_CONF_HDR_SIZE)) 707 return (DDI_ME_INVAL); 708 /* 709 * the following function returning a DDI_FAILURE assumes 710 * that there are no virtual config space access services 711 * defined in this layer. Otherwise it is availed right 712 * here and we return. 713 */ 714 rval = px_lib_map_vconfig(dip, mp, off, rp, addrp); 715 if (rval == DDI_SUCCESS) 716 goto done; 717 } 718 719 /* 720 * No virtual config space services or we are mapping 721 * a region of memory mapped config/IO/memory space, so proceed 722 * to the parent. 723 */ 724 725 /* relocate within 64-bit pci space through "assigned-addresses" */ 726 if (rval = px_reloc_reg(dip, rdip, px_p, rp)) 727 goto done; 728 729 if (len) /* adjust regspec according to mapping request */ 730 rp->pci_size_low = len; /* MIN ? */ 731 rp->pci_phys_low += off; 732 733 /* translate relocated pci regspec into parent space through "ranges" */ 734 if (rval = px_xlate_reg(px_p, rp, &p_regspec)) 735 goto done; 736 737 p_mapreq = *mp; /* dup the whole structure */ 738 p_mapreq.map_type = DDI_MT_REGSPEC; 739 p_mapreq.map_obj.rp = &p_regspec; 740 px_lib_map_attr_check(&p_mapreq); 741 rval = ddi_map(dip, &p_mapreq, 0, 0, addrp); 742 743 if (rval == DDI_SUCCESS) { 744 /* 745 * Set-up access functions for FM access error capable drivers. 746 */ 747 if (DDI_FM_ACC_ERR_CAP(ddi_fm_capable(rdip)) && 748 mp->map_handlep->ah_acc.devacc_attr_access != 749 DDI_DEFAULT_ACC) 750 px_fm_acc_setup(mp, rdip); 751 } 752 753 done: 754 if (mp->map_type == DDI_MT_RNUMBER) 755 kmem_free(rp - r_no, reglen); 756 757 return (rval); 758 } 759 760 /* 761 * bus dma map entry point 762 * return value: 763 * DDI_DMA_PARTIAL_MAP 1 764 * DDI_DMA_MAPOK 0 765 * DDI_DMA_MAPPED 0 766 * DDI_DMA_NORESOURCES -1 767 * DDI_DMA_NOMAPPING -2 768 * DDI_DMA_TOOBIG -3 769 */ 770 int 771 px_dma_setup(dev_info_t *dip, dev_info_t *rdip, ddi_dma_req_t *dmareq, 772 ddi_dma_handle_t *handlep) 773 { 774 px_t *px_p = DIP_TO_STATE(dip); 775 px_mmu_t *mmu_p = px_p->px_mmu_p; 776 ddi_dma_impl_t *mp; 777 int ret; 778 779 DBG(DBG_DMA_MAP, dip, "mapping - rdip=%s%d type=%s\n", 780 ddi_driver_name(rdip), ddi_get_instance(rdip), 781 handlep ? "alloc" : "advisory"); 782 783 if (!(mp = px_dma_lmts2hdl(dip, rdip, mmu_p, dmareq))) 784 return (DDI_DMA_NORESOURCES); 785 if (mp == (ddi_dma_impl_t *)DDI_DMA_NOMAPPING) 786 return (DDI_DMA_NOMAPPING); 787 if (ret = px_dma_type(px_p, dmareq, mp)) 788 goto freehandle; 789 if (ret = px_dma_pfn(px_p, dmareq, mp)) 790 goto freehandle; 791 792 switch (PX_DMA_TYPE(mp)) { 793 case PX_DMAI_FLAGS_DVMA: /* LINTED E_EQUALITY_NOT_ASSIGNMENT */ 794 if ((ret = px_dvma_win(px_p, dmareq, mp)) || !handlep) 795 goto freehandle; 796 if (!PX_DMA_CANCACHE(mp)) { /* try fast track */ 797 if (PX_DMA_CANFAST(mp)) { 798 if (!px_dvma_map_fast(mmu_p, mp)) 799 break; 800 /* LINTED E_NOP_ELSE_STMT */ 801 } else { 802 PX_DVMA_FASTTRAK_PROF(mp); 803 } 804 } 805 if (ret = px_dvma_map(mp, dmareq, mmu_p)) 806 goto freehandle; 807 break; 808 case PX_DMAI_FLAGS_PTP: /* LINTED E_EQUALITY_NOT_ASSIGNMENT */ 809 if ((ret = px_dma_physwin(px_p, dmareq, mp)) || !handlep) 810 goto freehandle; 811 break; 812 case PX_DMAI_FLAGS_BYPASS: 813 default: 814 cmn_err(CE_PANIC, "%s%d: px_dma_setup: bad dma type 0x%x", 815 ddi_driver_name(rdip), ddi_get_instance(rdip), 816 PX_DMA_TYPE(mp)); 817 /*NOTREACHED*/ 818 } 819 *handlep = (ddi_dma_handle_t)mp; 820 mp->dmai_flags |= PX_DMAI_FLAGS_INUSE; 821 px_dump_dma_handle(DBG_DMA_MAP, dip, mp); 822 823 return ((mp->dmai_nwin == 1) ? DDI_DMA_MAPPED : DDI_DMA_PARTIAL_MAP); 824 freehandle: 825 if (ret == DDI_DMA_NORESOURCES) 826 px_dma_freemp(mp); /* don't run_callback() */ 827 else 828 (void) px_dma_freehdl(dip, rdip, (ddi_dma_handle_t)mp); 829 return (ret); 830 } 831 832 833 /* 834 * bus dma alloc handle entry point: 835 */ 836 int 837 px_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attrp, 838 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep) 839 { 840 px_t *px_p = DIP_TO_STATE(dip); 841 ddi_dma_impl_t *mp; 842 int rval; 843 844 DBG(DBG_DMA_ALLOCH, dip, "rdip=%s%d\n", 845 ddi_driver_name(rdip), ddi_get_instance(rdip)); 846 847 if (attrp->dma_attr_version != DMA_ATTR_V0) 848 return (DDI_DMA_BADATTR); 849 850 if (!(mp = px_dma_allocmp(dip, rdip, waitfp, arg))) 851 return (DDI_DMA_NORESOURCES); 852 853 /* 854 * Save requestor's information 855 */ 856 mp->dmai_attr = *attrp; /* whole object - augmented later */ 857 *PX_DEV_ATTR(mp) = *attrp; /* whole object - device orig attr */ 858 DBG(DBG_DMA_ALLOCH, dip, "mp=%p\n", mp); 859 860 /* check and convert dma attributes to handle parameters */ 861 if (rval = px_dma_attr2hdl(px_p, mp)) { 862 px_dma_freehdl(dip, rdip, (ddi_dma_handle_t)mp); 863 *handlep = NULL; 864 return (rval); 865 } 866 *handlep = (ddi_dma_handle_t)mp; 867 return (DDI_SUCCESS); 868 } 869 870 871 /* 872 * bus dma free handle entry point: 873 */ 874 /*ARGSUSED*/ 875 int 876 px_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle) 877 { 878 DBG(DBG_DMA_FREEH, dip, "rdip=%s%d mp=%p\n", 879 ddi_driver_name(rdip), ddi_get_instance(rdip), handle); 880 px_dma_freemp((ddi_dma_impl_t *)handle); 881 882 if (px_kmem_clid) { 883 DBG(DBG_DMA_FREEH, dip, "run handle callback\n"); 884 ddi_run_callback(&px_kmem_clid); 885 } 886 return (DDI_SUCCESS); 887 } 888 889 890 /* 891 * bus dma bind handle entry point: 892 */ 893 int 894 px_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip, 895 ddi_dma_handle_t handle, ddi_dma_req_t *dmareq, 896 ddi_dma_cookie_t *cookiep, uint_t *ccountp) 897 { 898 px_t *px_p = DIP_TO_STATE(dip); 899 px_mmu_t *mmu_p = px_p->px_mmu_p; 900 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 901 int ret; 902 903 DBG(DBG_DMA_BINDH, dip, "rdip=%s%d mp=%p dmareq=%p\n", 904 ddi_driver_name(rdip), ddi_get_instance(rdip), mp, dmareq); 905 906 if (mp->dmai_flags & PX_DMAI_FLAGS_INUSE) 907 return (DDI_DMA_INUSE); 908 909 ASSERT((mp->dmai_flags & ~PX_DMAI_FLAGS_PRESERVE) == 0); 910 mp->dmai_flags |= PX_DMAI_FLAGS_INUSE; 911 912 if (ret = px_dma_type(px_p, dmareq, mp)) 913 goto err; 914 if (ret = px_dma_pfn(px_p, dmareq, mp)) 915 goto err; 916 917 switch (PX_DMA_TYPE(mp)) { 918 case PX_DMAI_FLAGS_DVMA: 919 if (ret = px_dvma_win(px_p, dmareq, mp)) 920 goto map_err; 921 if (!PX_DMA_CANCACHE(mp)) { /* try fast track */ 922 if (PX_DMA_CANFAST(mp)) { 923 if (!px_dvma_map_fast(mmu_p, mp)) 924 goto mapped; /*LINTED E_NOP_ELSE_STMT*/ 925 } else { 926 PX_DVMA_FASTTRAK_PROF(mp); 927 } 928 } 929 if (ret = px_dvma_map(mp, dmareq, mmu_p)) 930 goto map_err; 931 mapped: 932 *ccountp = 1; 933 MAKE_DMA_COOKIE(cookiep, mp->dmai_mapping, mp->dmai_size); 934 break; 935 case PX_DMAI_FLAGS_BYPASS: 936 case PX_DMAI_FLAGS_PTP: 937 if (ret = px_dma_physwin(px_p, dmareq, mp)) 938 goto map_err; 939 *ccountp = PX_WINLST(mp)->win_ncookies; 940 *cookiep = 941 *(ddi_dma_cookie_t *)(PX_WINLST(mp) + 1); /* wholeobj */ 942 break; 943 default: 944 cmn_err(CE_PANIC, "%s%d: px_dma_bindhdl(%p): bad dma type", 945 ddi_driver_name(rdip), ddi_get_instance(rdip), mp); 946 /*NOTREACHED*/ 947 } 948 DBG(DBG_DMA_BINDH, dip, "cookie %" PRIx64 "+%x\n", 949 cookiep->dmac_address, cookiep->dmac_size); 950 px_dump_dma_handle(DBG_DMA_MAP, dip, mp); 951 952 /* insert dma handle into FMA cache */ 953 if (mp->dmai_attr.dma_attr_flags & DDI_DMA_FLAGERR) { 954 (void) ndi_fmc_insert(rdip, DMA_HANDLE, mp, NULL); 955 mp->dmai_error.err_cf = impl_dma_check; 956 } 957 958 return (mp->dmai_nwin == 1 ? DDI_DMA_MAPPED : DDI_DMA_PARTIAL_MAP); 959 map_err: 960 px_dma_freepfn(mp); 961 err: 962 mp->dmai_flags &= PX_DMAI_FLAGS_PRESERVE; 963 return (ret); 964 } 965 966 967 /* 968 * bus dma unbind handle entry point: 969 */ 970 /*ARGSUSED*/ 971 int 972 px_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle) 973 { 974 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 975 px_t *px_p = DIP_TO_STATE(dip); 976 px_mmu_t *mmu_p = px_p->px_mmu_p; 977 978 DBG(DBG_DMA_UNBINDH, dip, "rdip=%s%d, mp=%p\n", 979 ddi_driver_name(rdip), ddi_get_instance(rdip), handle); 980 if ((mp->dmai_flags & PX_DMAI_FLAGS_INUSE) == 0) { 981 DBG(DBG_DMA_UNBINDH, dip, "handle not inuse\n"); 982 return (DDI_FAILURE); 983 } 984 985 /* remove dma handle from FMA cache */ 986 if (mp->dmai_attr.dma_attr_flags & DDI_DMA_FLAGERR) { 987 if (DEVI(rdip)->devi_fmhdl != NULL && 988 DDI_FM_DMA_ERR_CAP(DEVI(rdip)->devi_fmhdl->fh_cap)) { 989 (void) ndi_fmc_remove(rdip, DMA_HANDLE, mp); 990 } 991 } 992 993 /* 994 * Here if the handle is using the iommu. Unload all the iommu 995 * translations. 996 */ 997 switch (PX_DMA_TYPE(mp)) { 998 case PX_DMAI_FLAGS_DVMA: 999 px_mmu_unmap_window(mmu_p, mp); 1000 px_dvma_unmap(mmu_p, mp); 1001 px_dma_freepfn(mp); 1002 break; 1003 case PX_DMAI_FLAGS_BYPASS: 1004 case PX_DMAI_FLAGS_PTP: 1005 px_dma_freewin(mp); 1006 break; 1007 default: 1008 cmn_err(CE_PANIC, "%s%d: px_dma_unbindhdl:bad dma type %p", 1009 ddi_driver_name(rdip), ddi_get_instance(rdip), mp); 1010 /*NOTREACHED*/ 1011 } 1012 if (mmu_p->mmu_dvma_clid != 0) { 1013 DBG(DBG_DMA_UNBINDH, dip, "run dvma callback\n"); 1014 ddi_run_callback(&mmu_p->mmu_dvma_clid); 1015 } 1016 if (px_kmem_clid) { 1017 DBG(DBG_DMA_UNBINDH, dip, "run handle callback\n"); 1018 ddi_run_callback(&px_kmem_clid); 1019 } 1020 mp->dmai_flags &= PX_DMAI_FLAGS_PRESERVE; 1021 1022 return (DDI_SUCCESS); 1023 } 1024 1025 /* 1026 * bus dma win entry point: 1027 */ 1028 int 1029 px_dma_win(dev_info_t *dip, dev_info_t *rdip, 1030 ddi_dma_handle_t handle, uint_t win, off_t *offp, 1031 size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp) 1032 { 1033 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 1034 int ret; 1035 1036 DBG(DBG_DMA_WIN, dip, "rdip=%s%d\n", 1037 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1038 1039 px_dump_dma_handle(DBG_DMA_WIN, dip, mp); 1040 if (win >= mp->dmai_nwin) { 1041 DBG(DBG_DMA_WIN, dip, "%x out of range\n", win); 1042 return (DDI_FAILURE); 1043 } 1044 1045 switch (PX_DMA_TYPE(mp)) { 1046 case PX_DMAI_FLAGS_DVMA: 1047 if (win != PX_DMA_CURWIN(mp)) { 1048 px_t *px_p = DIP_TO_STATE(dip); 1049 px_mmu_t *mmu_p = px_p->px_mmu_p; 1050 px_mmu_unmap_window(mmu_p, mp); 1051 1052 /* map_window sets dmai_mapping/size/offset */ 1053 px_mmu_map_window(mmu_p, mp, win); 1054 if ((ret = px_mmu_map_window(mmu_p, 1055 mp, win)) != DDI_SUCCESS) 1056 return (ret); 1057 } 1058 if (cookiep) 1059 MAKE_DMA_COOKIE(cookiep, mp->dmai_mapping, 1060 mp->dmai_size); 1061 if (ccountp) 1062 *ccountp = 1; 1063 break; 1064 case PX_DMAI_FLAGS_PTP: 1065 case PX_DMAI_FLAGS_BYPASS: { 1066 int i; 1067 ddi_dma_cookie_t *ck_p; 1068 px_dma_win_t *win_p = mp->dmai_winlst; 1069 1070 for (i = 0; i < win; win_p = win_p->win_next, i++); 1071 ck_p = (ddi_dma_cookie_t *)(win_p + 1); 1072 *cookiep = *ck_p; 1073 mp->dmai_offset = win_p->win_offset; 1074 mp->dmai_size = win_p->win_size; 1075 mp->dmai_mapping = ck_p->dmac_laddress; 1076 mp->dmai_cookie = ck_p + 1; 1077 win_p->win_curseg = 0; 1078 if (ccountp) 1079 *ccountp = win_p->win_ncookies; 1080 } 1081 break; 1082 default: 1083 cmn_err(CE_WARN, "%s%d: px_dma_win:bad dma type 0x%x", 1084 ddi_driver_name(rdip), ddi_get_instance(rdip), 1085 PX_DMA_TYPE(mp)); 1086 return (DDI_FAILURE); 1087 } 1088 if (cookiep) 1089 DBG(DBG_DMA_WIN, dip, 1090 "cookie - dmac_address=%x dmac_size=%x\n", 1091 cookiep->dmac_address, cookiep->dmac_size); 1092 if (offp) 1093 *offp = (off_t)mp->dmai_offset; 1094 if (lenp) 1095 *lenp = mp->dmai_size; 1096 return (DDI_SUCCESS); 1097 } 1098 1099 #ifdef DEBUG 1100 static char *px_dmactl_str[] = { 1101 "DDI_DMA_FREE", 1102 "DDI_DMA_SYNC", 1103 "DDI_DMA_HTOC", 1104 "DDI_DMA_KVADDR", 1105 "DDI_DMA_MOVWIN", 1106 "DDI_DMA_REPWIN", 1107 "DDI_DMA_GETERR", 1108 "DDI_DMA_COFF", 1109 "DDI_DMA_NEXTWIN", 1110 "DDI_DMA_NEXTSEG", 1111 "DDI_DMA_SEGTOC", 1112 "DDI_DMA_RESERVE", 1113 "DDI_DMA_RELEASE", 1114 "DDI_DMA_RESETH", 1115 "DDI_DMA_CKSYNC", 1116 "DDI_DMA_IOPB_ALLOC", 1117 "DDI_DMA_IOPB_FREE", 1118 "DDI_DMA_SMEM_ALLOC", 1119 "DDI_DMA_SMEM_FREE", 1120 "DDI_DMA_SET_SBUS64" 1121 }; 1122 #endif /* DEBUG */ 1123 1124 /* 1125 * bus dma control entry point: 1126 */ 1127 /*ARGSUSED*/ 1128 int 1129 px_dma_ctlops(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle, 1130 enum ddi_dma_ctlops cmd, off_t *offp, size_t *lenp, caddr_t *objp, 1131 uint_t cache_flags) 1132 { 1133 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 1134 1135 #ifdef DEBUG 1136 DBG(DBG_DMA_CTL, dip, "%s: rdip=%s%d\n", px_dmactl_str[cmd], 1137 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1138 #endif /* DEBUG */ 1139 1140 switch (cmd) { 1141 case DDI_DMA_FREE: 1142 (void) px_dma_unbindhdl(dip, rdip, handle); 1143 (void) px_dma_freehdl(dip, rdip, handle); 1144 return (DDI_SUCCESS); 1145 case DDI_DMA_RESERVE: { 1146 px_t *px_p = DIP_TO_STATE(dip); 1147 return (px_fdvma_reserve(dip, rdip, px_p, 1148 (ddi_dma_req_t *)offp, (ddi_dma_handle_t *)objp)); 1149 } 1150 case DDI_DMA_RELEASE: { 1151 px_t *px_p = DIP_TO_STATE(dip); 1152 return (px_fdvma_release(dip, px_p, mp)); 1153 } 1154 default: 1155 break; 1156 } 1157 1158 switch (PX_DMA_TYPE(mp)) { 1159 case PX_DMAI_FLAGS_DVMA: 1160 return (px_dvma_ctl(dip, rdip, mp, cmd, offp, lenp, objp, 1161 cache_flags)); 1162 case PX_DMAI_FLAGS_PTP: 1163 case PX_DMAI_FLAGS_BYPASS: 1164 return (px_dma_ctl(dip, rdip, mp, cmd, offp, lenp, objp, 1165 cache_flags)); 1166 default: 1167 cmn_err(CE_PANIC, "%s%d: px_dma_ctlops(%x):bad dma type %x", 1168 ddi_driver_name(rdip), ddi_get_instance(rdip), cmd, 1169 mp->dmai_flags); 1170 /*NOTREACHED*/ 1171 } 1172 return (0); 1173 } 1174 1175 /* 1176 * control ops entry point: 1177 * 1178 * Requests handled completely: 1179 * DDI_CTLOPS_INITCHILD see init_child() for details 1180 * DDI_CTLOPS_UNINITCHILD 1181 * DDI_CTLOPS_REPORTDEV see report_dev() for details 1182 * DDI_CTLOPS_IOMIN cache line size if streaming otherwise 1 1183 * DDI_CTLOPS_REGSIZE 1184 * DDI_CTLOPS_NREGS 1185 * DDI_CTLOPS_DVMAPAGESIZE 1186 * DDI_CTLOPS_POKE 1187 * DDI_CTLOPS_PEEK 1188 * 1189 * All others passed to parent. 1190 */ 1191 int 1192 px_ctlops(dev_info_t *dip, dev_info_t *rdip, 1193 ddi_ctl_enum_t op, void *arg, void *result) 1194 { 1195 px_t *px_p = DIP_TO_STATE(dip); 1196 struct detachspec *ds; 1197 struct attachspec *as; 1198 1199 switch (op) { 1200 case DDI_CTLOPS_INITCHILD: 1201 return (px_init_child(px_p, (dev_info_t *)arg)); 1202 1203 case DDI_CTLOPS_UNINITCHILD: 1204 return (px_uninit_child(px_p, (dev_info_t *)arg)); 1205 1206 case DDI_CTLOPS_ATTACH: 1207 as = (struct attachspec *)arg; 1208 switch (as->when) { 1209 case DDI_PRE: 1210 if (as->cmd == DDI_ATTACH) { 1211 DBG(DBG_PWR, dip, "PRE_ATTACH for %s@%d\n", 1212 ddi_driver_name(rdip), 1213 ddi_get_instance(rdip)); 1214 return (pcie_pm_hold(dip)); 1215 } 1216 if (as->cmd == DDI_RESUME) { 1217 ddi_acc_handle_t config_handle; 1218 DBG(DBG_PWR, dip, "PRE_RESUME for %s@%d\n", 1219 ddi_driver_name(rdip), 1220 ddi_get_instance(rdip)); 1221 1222 if (pci_config_setup(rdip, &config_handle) == 1223 DDI_SUCCESS) { 1224 pcie_clear_errors(rdip, config_handle); 1225 pci_config_teardown(&config_handle); 1226 } 1227 } 1228 return (DDI_SUCCESS); 1229 1230 case DDI_POST: 1231 DBG(DBG_PWR, dip, "POST_ATTACH for %s@%d\n", 1232 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1233 if (as->cmd == DDI_ATTACH && as->result != DDI_SUCCESS) 1234 pcie_pm_release(dip); 1235 1236 (void) pcie_postattach_child(rdip); 1237 1238 return (DDI_SUCCESS); 1239 default: 1240 break; 1241 } 1242 break; 1243 1244 case DDI_CTLOPS_DETACH: 1245 ds = (struct detachspec *)arg; 1246 switch (ds->when) { 1247 case DDI_POST: 1248 if (ds->cmd == DDI_DETACH && 1249 ds->result == DDI_SUCCESS) { 1250 DBG(DBG_PWR, dip, "POST_DETACH for %s@%d\n", 1251 ddi_driver_name(rdip), 1252 ddi_get_instance(rdip)); 1253 return (pcie_pm_remove_child(dip, rdip)); 1254 } 1255 return (DDI_SUCCESS); 1256 default: 1257 break; 1258 } 1259 break; 1260 1261 case DDI_CTLOPS_REPORTDEV: 1262 return (px_report_dev(rdip)); 1263 1264 case DDI_CTLOPS_IOMIN: 1265 return (DDI_SUCCESS); 1266 1267 case DDI_CTLOPS_REGSIZE: 1268 *((off_t *)result) = px_get_reg_set_size(rdip, *((int *)arg)); 1269 return (*((off_t *)result) == 0 ? DDI_FAILURE : DDI_SUCCESS); 1270 1271 case DDI_CTLOPS_NREGS: 1272 *((uint_t *)result) = px_get_nreg_set(rdip); 1273 return (DDI_SUCCESS); 1274 1275 case DDI_CTLOPS_DVMAPAGESIZE: 1276 *((ulong_t *)result) = MMU_PAGE_SIZE; 1277 return (DDI_SUCCESS); 1278 1279 case DDI_CTLOPS_POKE: /* platform dependent implementation. */ 1280 return (px_lib_ctlops_poke(dip, rdip, 1281 (peekpoke_ctlops_t *)arg)); 1282 1283 case DDI_CTLOPS_PEEK: /* platform dependent implementation. */ 1284 return (px_lib_ctlops_peek(dip, rdip, 1285 (peekpoke_ctlops_t *)arg, result)); 1286 1287 case DDI_CTLOPS_POWER: 1288 default: 1289 break; 1290 } 1291 1292 /* 1293 * Now pass the request up to our parent. 1294 */ 1295 DBG(DBG_CTLOPS, dip, "passing request to parent: rdip=%s%d\n", 1296 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1297 return (ddi_ctlops(dip, rdip, op, arg, result)); 1298 } 1299 1300 /* ARGSUSED */ 1301 int 1302 px_intr_ops(dev_info_t *dip, dev_info_t *rdip, ddi_intr_op_t intr_op, 1303 ddi_intr_handle_impl_t *hdlp, void *result) 1304 { 1305 int intr_types, ret = DDI_SUCCESS; 1306 1307 DBG(DBG_INTROPS, dip, "px_intr_ops: rdip=%s%d\n", 1308 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1309 1310 /* Process DDI_INTROP_SUPPORTED_TYPES request here */ 1311 if (intr_op == DDI_INTROP_SUPPORTED_TYPES) { 1312 *(int *)result = i_ddi_get_intx_nintrs(rdip) ? 1313 DDI_INTR_TYPE_FIXED : 0; 1314 1315 if ((pci_msi_get_supported_type(rdip, 1316 &intr_types)) == DDI_SUCCESS) { 1317 /* 1318 * Double check supported interrupt types vs. 1319 * what the host bridge supports. 1320 */ 1321 *(int *)result |= intr_types; 1322 } 1323 1324 return (ret); 1325 } 1326 1327 /* 1328 * PCI-E nexus driver supports fixed, MSI and MSI-X interrupts. 1329 * Return failure if interrupt type is not supported. 1330 */ 1331 switch (hdlp->ih_type) { 1332 case DDI_INTR_TYPE_FIXED: 1333 ret = px_intx_ops(dip, rdip, intr_op, hdlp, result); 1334 break; 1335 case DDI_INTR_TYPE_MSI: 1336 case DDI_INTR_TYPE_MSIX: 1337 ret = px_msix_ops(dip, rdip, intr_op, hdlp, result); 1338 break; 1339 default: 1340 ret = DDI_ENOTSUP; 1341 break; 1342 } 1343 1344 return (ret); 1345 } 1346 1347 static int 1348 px_init_hotplug(px_t *px_p) 1349 { 1350 px_bus_range_t bus_range; 1351 dev_info_t *dip; 1352 pciehpc_regops_t regops; 1353 1354 dip = px_p->px_dip; 1355 1356 if (ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1357 "hotplug-capable") == 0) 1358 return (DDI_FAILURE); 1359 1360 /* 1361 * Before initializing hotplug - open up bus range. The busra 1362 * module will initialize its pool of bus numbers from this. 1363 * "busra" will be the agent that keeps track of them during 1364 * hotplug. Also, note, that busra will remove any bus numbers 1365 * already in use from boot time. 1366 */ 1367 if (ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1368 "bus-range") == 0) { 1369 cmn_err(CE_WARN, "%s%d: bus-range not found\n", 1370 ddi_driver_name(dip), ddi_get_instance(dip)); 1371 #ifdef DEBUG 1372 bus_range.lo = 0x0; 1373 bus_range.hi = 0xff; 1374 1375 if (ndi_prop_update_int_array(DDI_DEV_T_NONE, 1376 dip, "bus-range", (int *)&bus_range, 2) 1377 != DDI_PROP_SUCCESS) { 1378 return (DDI_FAILURE); 1379 } 1380 #else 1381 return (DDI_FAILURE); 1382 #endif 1383 } 1384 1385 if (px_lib_hotplug_init(dip, (void *)®ops) != DDI_SUCCESS) 1386 return (DDI_FAILURE); 1387 1388 if (pciehpc_init(dip, ®ops) != DDI_SUCCESS) { 1389 px_lib_hotplug_uninit(dip); 1390 return (DDI_FAILURE); 1391 } 1392 1393 if (pcihp_init(dip) != DDI_SUCCESS) { 1394 (void) pciehpc_uninit(dip); 1395 px_lib_hotplug_uninit(dip); 1396 return (DDI_FAILURE); 1397 } 1398 1399 if (pcihp_get_cb_ops() != NULL) { 1400 DBG(DBG_ATTACH, dip, "%s%d hotplug enabled", 1401 ddi_driver_name(dip), ddi_get_instance(dip)); 1402 px_p->px_dev_caps |= PX_HOTPLUG_CAPABLE; 1403 } 1404 1405 return (DDI_SUCCESS); 1406 } 1407 1408 static int 1409 px_uninit_hotplug(dev_info_t *dip) 1410 { 1411 if (pcihp_uninit(dip) != DDI_SUCCESS) 1412 return (DDI_FAILURE); 1413 1414 if (pciehpc_uninit(dip) != DDI_SUCCESS) 1415 return (DDI_FAILURE); 1416 1417 px_lib_hotplug_uninit(dip); 1418 1419 return (DDI_SUCCESS); 1420 } 1421