1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * PCI Express nexus driver interface 30 */ 31 32 #include <sys/types.h> 33 #include <sys/conf.h> /* nulldev */ 34 #include <sys/stat.h> /* devctl */ 35 #include <sys/kmem.h> 36 #include <sys/sunddi.h> 37 #include <sys/sunndi.h> 38 #include <sys/hotplug/pci/pcihp.h> 39 #include <sys/ddi_impldefs.h> 40 #include <sys/ddi_subrdefs.h> 41 #include <sys/spl.h> 42 #include <sys/epm.h> 43 #include <sys/iommutsb.h> 44 #include <sys/hotplug/pci/pcihp.h> 45 #include <sys/hotplug/pci/pciehpc.h> 46 #include "px_obj.h" 47 #include <sys/pci_tools.h> 48 #include "px_tools_ext.h" 49 #include "pcie_pwr.h" 50 51 /*LINTLIBRARY*/ 52 53 /* 54 * function prototypes for dev ops routines: 55 */ 56 static int px_attach(dev_info_t *dip, ddi_attach_cmd_t cmd); 57 static int px_detach(dev_info_t *dip, ddi_detach_cmd_t cmd); 58 static int px_info(dev_info_t *dip, ddi_info_cmd_t infocmd, 59 void *arg, void **result); 60 static int px_cb_attach(px_t *); 61 static void px_cb_detach(px_t *); 62 static int px_pwr_setup(dev_info_t *dip); 63 static void px_pwr_teardown(dev_info_t *dip); 64 65 extern errorq_t *pci_target_queue; 66 67 /* 68 * function prototypes for hotplug routines: 69 */ 70 static uint_t px_init_hotplug(px_t *px_p); 71 static uint_t px_uninit_hotplug(dev_info_t *dip); 72 73 /* 74 * bus ops and dev ops structures: 75 */ 76 static struct bus_ops px_bus_ops = { 77 BUSO_REV, 78 px_map, 79 0, 80 0, 81 0, 82 i_ddi_map_fault, 83 px_dma_setup, 84 px_dma_allochdl, 85 px_dma_freehdl, 86 px_dma_bindhdl, 87 px_dma_unbindhdl, 88 px_lib_dma_sync, 89 px_dma_win, 90 px_dma_ctlops, 91 px_ctlops, 92 ddi_bus_prop_op, 93 ndi_busop_get_eventcookie, 94 ndi_busop_add_eventcall, 95 ndi_busop_remove_eventcall, 96 ndi_post_event, 97 NULL, 98 NULL, /* (*bus_config)(); */ 99 NULL, /* (*bus_unconfig)(); */ 100 px_fm_init_child, /* (*bus_fm_init)(); */ 101 NULL, /* (*bus_fm_fini)(); */ 102 px_bus_enter, /* (*bus_fm_access_enter)(); */ 103 px_bus_exit, /* (*bus_fm_access_fini)(); */ 104 pcie_bus_power, /* (*bus_power)(); */ 105 px_intr_ops /* (*bus_intr_op)(); */ 106 }; 107 108 extern struct cb_ops px_cb_ops; 109 110 static struct dev_ops px_ops = { 111 DEVO_REV, 112 0, 113 px_info, 114 nulldev, 115 0, 116 px_attach, 117 px_detach, 118 nodev, 119 &px_cb_ops, 120 &px_bus_ops, 121 nulldev 122 }; 123 124 /* 125 * module definitions: 126 */ 127 #include <sys/modctl.h> 128 extern struct mod_ops mod_driverops; 129 130 static struct modldrv modldrv = { 131 &mod_driverops, /* Type of module - driver */ 132 "PCI Express nexus driver %I%", /* Name of module. */ 133 &px_ops, /* driver ops */ 134 }; 135 136 static struct modlinkage modlinkage = { 137 MODREV_1, (void *)&modldrv, NULL 138 }; 139 140 /* driver soft state */ 141 void *px_state_p; 142 143 int 144 _init(void) 145 { 146 int e; 147 148 /* 149 * Initialize per-px bus soft state pointer. 150 */ 151 e = ddi_soft_state_init(&px_state_p, sizeof (px_t), 1); 152 if (e != DDI_SUCCESS) 153 return (e); 154 155 /* 156 * Install the module. 157 */ 158 e = mod_install(&modlinkage); 159 if (e != DDI_SUCCESS) 160 ddi_soft_state_fini(&px_state_p); 161 return (e); 162 } 163 164 int 165 _fini(void) 166 { 167 int e; 168 169 /* 170 * Remove the module. 171 */ 172 e = mod_remove(&modlinkage); 173 if (e != DDI_SUCCESS) 174 return (e); 175 /* 176 * Destroy pci_target_queue, and set it to NULL. 177 */ 178 if (pci_target_queue) 179 errorq_destroy(pci_target_queue); 180 181 pci_target_queue = NULL; 182 183 /* Free px soft state */ 184 ddi_soft_state_fini(&px_state_p); 185 186 return (e); 187 } 188 189 int 190 _info(struct modinfo *modinfop) 191 { 192 return (mod_info(&modlinkage, modinfop)); 193 } 194 195 /* ARGSUSED */ 196 static int 197 px_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 198 { 199 int instance = getminor((dev_t)arg); 200 px_t *px_p = INST_TO_STATE(instance); 201 202 /* 203 * Allow hotplug to deal with ones it manages 204 * Hot Plug will be done later. 205 */ 206 if (px_p && (px_p->px_dev_caps & PX_HOTPLUG_CAPABLE)) 207 return (pcihp_info(dip, infocmd, arg, result)); 208 209 /* non-hotplug or not attached */ 210 switch (infocmd) { 211 case DDI_INFO_DEVT2INSTANCE: 212 *result = (void *)(intptr_t)instance; 213 return (DDI_SUCCESS); 214 215 case DDI_INFO_DEVT2DEVINFO: 216 if (px_p == NULL) 217 return (DDI_FAILURE); 218 *result = (void *)px_p->px_dip; 219 return (DDI_SUCCESS); 220 221 default: 222 return (DDI_FAILURE); 223 } 224 } 225 226 /* device driver entry points */ 227 /* 228 * attach entry point: 229 */ 230 /*ARGSUSED*/ 231 static int 232 px_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 233 { 234 px_t *px_p; /* per bus state pointer */ 235 int instance = DIP_TO_INST(dip); 236 int ret = DDI_SUCCESS; 237 devhandle_t dev_hdl = NULL; 238 239 switch (cmd) { 240 case DDI_ATTACH: 241 DBG(DBG_ATTACH, dip, "DDI_ATTACH\n"); 242 243 /* 244 * Allocate and get the per-px soft state structure. 245 */ 246 if (ddi_soft_state_zalloc(px_state_p, instance) 247 != DDI_SUCCESS) { 248 cmn_err(CE_WARN, "%s%d: can't allocate px state", 249 ddi_driver_name(dip), instance); 250 goto err_bad_px_softstate; 251 } 252 px_p = INST_TO_STATE(instance); 253 px_p->px_dip = dip; 254 mutex_init(&px_p->px_mutex, NULL, MUTEX_DRIVER, NULL); 255 px_p->px_soft_state = PX_SOFT_STATE_CLOSED; 256 px_p->px_open_count = 0; 257 258 (void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, 259 "device_type", "pciex"); 260 /* 261 * Get key properties of the pci bridge node and 262 * determine it's type (psycho, schizo, etc ...). 263 */ 264 if (px_get_props(px_p, dip) == DDI_FAILURE) 265 goto err_bad_px_prop; 266 267 if (px_lib_dev_init(dip, &dev_hdl) != DDI_SUCCESS) 268 goto err_bad_dev_init; 269 270 /* Initialize device handle */ 271 px_p->px_dev_hdl = dev_hdl; 272 273 /* 274 * Initialize interrupt block. Note that this 275 * initialize error handling for the PEC as well. 276 */ 277 if ((ret = px_ib_attach(px_p)) != DDI_SUCCESS) 278 goto err_bad_ib; 279 280 if (px_cb_attach(px_p) != DDI_SUCCESS) 281 goto err_bad_cb; 282 283 /* 284 * Start creating the modules. 285 * Note that attach() routines should 286 * register and enable their own interrupts. 287 */ 288 289 if ((px_mmu_attach(px_p)) != DDI_SUCCESS) 290 goto err_bad_mmu; 291 292 if ((px_msiq_attach(px_p)) != DDI_SUCCESS) 293 goto err_bad_msiq; 294 295 if ((px_msi_attach(px_p)) != DDI_SUCCESS) 296 goto err_bad_msi; 297 298 if ((px_pec_attach(px_p)) != DDI_SUCCESS) 299 goto err_bad_pec; 300 301 if ((px_dma_attach(px_p)) != DDI_SUCCESS) 302 goto err_bad_dma; /* nothing to uninitialize on DMA */ 303 304 if ((px_fm_attach(px_p)) != DDI_SUCCESS) 305 goto err_bad_dma; 306 307 /* 308 * All of the error handlers have been registered 309 * by now so it's time to activate the interrupt. 310 */ 311 if ((ret = px_err_add_intr(&px_p->px_fault)) != DDI_SUCCESS) 312 goto err_bad_intr; 313 314 (void) px_init_hotplug(px_p); 315 316 /* 317 * Create the "devctl" node for hotplug and pcitool support. 318 * For non-hotplug bus, we still need ":devctl" to 319 * support DEVCTL_DEVICE_* and DEVCTL_BUS_* ioctls. 320 */ 321 if (ddi_create_minor_node(dip, "devctl", S_IFCHR, 322 PCIHP_AP_MINOR_NUM(instance, PCIHP_DEVCTL_MINOR), 323 DDI_NT_NEXUS, 0) != DDI_SUCCESS) { 324 goto err_bad_devctl_node; 325 } 326 327 if (pxtool_init(dip) != DDI_SUCCESS) 328 goto err_bad_pcitool_node; 329 330 /* 331 * power management setup. Even if it fails, attach will 332 * succeed as this is a optional feature. Since we are 333 * always at full power, this is not critical. 334 */ 335 if (pwr_common_setup(dip) != DDI_SUCCESS) { 336 DBG(DBG_PWR, dip, "pwr_common_setup failed\n"); 337 } else if (px_pwr_setup(dip) != DDI_SUCCESS) { 338 DBG(DBG_PWR, dip, "px_pwr_setup failed \n"); 339 pwr_common_teardown(dip); 340 } 341 342 /* 343 * add cpr callback 344 */ 345 px_cpr_add_callb(px_p); 346 347 ddi_report_dev(dip); 348 349 px_p->px_state = PX_ATTACHED; 350 DBG(DBG_ATTACH, dip, "attach success\n"); 351 break; 352 353 err_bad_pcitool_node: 354 ddi_remove_minor_node(dip, "devctl"); 355 err_bad_devctl_node: 356 px_err_rem_intr(&px_p->px_fault); 357 err_bad_intr: 358 px_fm_detach(px_p); 359 err_bad_dma: 360 px_pec_detach(px_p); 361 err_bad_pec: 362 px_msi_detach(px_p); 363 err_bad_msi: 364 px_msiq_detach(px_p); 365 err_bad_msiq: 366 px_mmu_detach(px_p); 367 err_bad_mmu: 368 px_cb_detach(px_p); 369 err_bad_cb: 370 px_ib_detach(px_p); 371 err_bad_ib: 372 (void) px_lib_dev_fini(dip); 373 err_bad_dev_init: 374 px_free_props(px_p); 375 err_bad_px_prop: 376 mutex_destroy(&px_p->px_mutex); 377 ddi_soft_state_free(px_state_p, instance); 378 err_bad_px_softstate: 379 ret = DDI_FAILURE; 380 break; 381 382 case DDI_RESUME: 383 DBG(DBG_ATTACH, dip, "DDI_RESUME\n"); 384 385 px_p = INST_TO_STATE(instance); 386 387 mutex_enter(&px_p->px_mutex); 388 389 /* suspend might have not succeeded */ 390 if (px_p->px_state != PX_SUSPENDED) { 391 DBG(DBG_ATTACH, px_p->px_dip, 392 "instance NOT suspended\n"); 393 ret = DDI_FAILURE; 394 break; 395 } 396 397 px_msiq_resume(px_p); 398 px_lib_resume(dip); 399 (void) pcie_pwr_resume(dip); 400 px_p->px_state = PX_ATTACHED; 401 402 mutex_exit(&px_p->px_mutex); 403 404 break; 405 default: 406 DBG(DBG_ATTACH, dip, "unsupported attach op\n"); 407 ret = DDI_FAILURE; 408 break; 409 } 410 411 return (ret); 412 } 413 414 /* 415 * detach entry point: 416 */ 417 /*ARGSUSED*/ 418 static int 419 px_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 420 { 421 int instance = ddi_get_instance(dip); 422 px_t *px_p = INST_TO_STATE(instance); 423 int ret; 424 425 /* 426 * Make sure we are currently attached 427 */ 428 if (px_p->px_state != PX_ATTACHED) { 429 DBG(DBG_DETACH, dip, "Instance not attached\n"); 430 return (DDI_FAILURE); 431 } 432 433 mutex_enter(&px_p->px_mutex); 434 435 switch (cmd) { 436 case DDI_DETACH: 437 DBG(DBG_DETACH, dip, "DDI_DETACH\n"); 438 439 /* 440 * remove cpr callback 441 */ 442 px_cpr_rem_callb(px_p); 443 444 if (px_p->px_dev_caps & PX_HOTPLUG_CAPABLE) 445 if (px_uninit_hotplug(dip) != DDI_SUCCESS) { 446 mutex_exit(&px_p->px_mutex); 447 return (DDI_FAILURE); 448 } 449 450 /* 451 * things which used to be done in obj_destroy 452 * are now in-lined here. 453 */ 454 455 px_p->px_state = PX_DETACHED; 456 457 pxtool_uninit(dip); 458 459 ddi_remove_minor_node(dip, "devctl"); 460 px_err_rem_intr(&px_p->px_fault); 461 px_fm_detach(px_p); 462 px_pec_detach(px_p); 463 px_pwr_teardown(dip); 464 pwr_common_teardown(dip); 465 px_msi_detach(px_p); 466 px_msiq_detach(px_p); 467 px_mmu_detach(px_p); 468 px_cb_detach(px_p); 469 px_ib_detach(px_p); 470 (void) px_lib_dev_fini(dip); 471 472 /* 473 * Free the px soft state structure and the rest of the 474 * resources it's using. 475 */ 476 px_free_props(px_p); 477 mutex_exit(&px_p->px_mutex); 478 mutex_destroy(&px_p->px_mutex); 479 480 /* Free the interrupt-priorities prop if we created it. */ { 481 int len; 482 483 if (ddi_getproplen(DDI_DEV_T_ANY, dip, 484 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 485 "interrupt-priorities", &len) == DDI_PROP_SUCCESS) 486 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, 487 "interrupt-priorities"); 488 } 489 490 px_p->px_dev_hdl = NULL; 491 ddi_soft_state_free(px_state_p, instance); 492 493 return (DDI_SUCCESS); 494 495 case DDI_SUSPEND: 496 if (pcie_pwr_suspend(dip) != DDI_SUCCESS) { 497 mutex_exit(&px_p->px_mutex); 498 return (DDI_FAILURE); 499 } 500 if ((ret = px_lib_suspend(dip)) == DDI_SUCCESS) 501 px_p->px_state = PX_SUSPENDED; 502 mutex_exit(&px_p->px_mutex); 503 504 return (ret); 505 506 default: 507 DBG(DBG_DETACH, dip, "unsupported detach op\n"); 508 mutex_exit(&px_p->px_mutex); 509 return (DDI_FAILURE); 510 } 511 } 512 513 int 514 px_cb_attach(px_t *px_p) 515 { 516 px_fault_t *fault_p = &px_p->px_cb_fault; 517 dev_info_t *dip = px_p->px_dip; 518 sysino_t sysino; 519 520 if (px_lib_intr_devino_to_sysino(dip, 521 px_p->px_inos[PX_INTR_XBC], &sysino) != DDI_SUCCESS) 522 return (DDI_FAILURE); 523 524 fault_p->px_fh_dip = dip; 525 fault_p->px_fh_sysino = sysino; 526 fault_p->px_err_func = px_err_cb_intr; 527 fault_p->px_intr_ino = px_p->px_inos[PX_INTR_XBC]; 528 529 return (px_cb_add_intr(fault_p)); 530 } 531 532 void 533 px_cb_detach(px_t *px_p) 534 { 535 px_cb_rem_intr(&px_p->px_cb_fault); 536 } 537 538 /* 539 * power management related initialization specific to px 540 * called by px_attach() 541 */ 542 static int 543 px_pwr_setup(dev_info_t *dip) 544 { 545 pcie_pwr_t *pwr_p; 546 int instance = ddi_get_instance(dip); 547 px_t *px_p = INST_TO_STATE(instance); 548 ddi_intr_handle_impl_t hdl; 549 550 ASSERT(PCIE_PMINFO(dip)); 551 pwr_p = PCIE_NEXUS_PMINFO(dip); 552 ASSERT(pwr_p); 553 554 /* 555 * indicate support LDI (Layered Driver Interface) 556 * Create the property, if it is not already there 557 */ 558 if (!ddi_prop_exists(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS, 559 DDI_KERNEL_IOCTL)) { 560 if (ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, 561 DDI_KERNEL_IOCTL, NULL, 0) != DDI_PROP_SUCCESS) { 562 DBG(DBG_PWR, dip, "can't create kernel ioctl prop\n"); 563 return (DDI_FAILURE); 564 } 565 } 566 /* No support for device PM. We are always at full power */ 567 pwr_p->pwr_func_lvl = PM_LEVEL_D0; 568 569 mutex_init(&px_p->px_l23ready_lock, NULL, MUTEX_DRIVER, 570 DDI_INTR_PRI(px_pwr_pil)); 571 cv_init(&px_p->px_l23ready_cv, NULL, CV_DRIVER, NULL); 572 573 /* Initialize handle */ 574 bzero(&hdl, sizeof (ddi_intr_handle_impl_t)); 575 hdl.ih_cb_arg1 = px_p; 576 hdl.ih_ver = DDI_INTR_VERSION; 577 hdl.ih_state = DDI_IHDL_STATE_ALLOC; 578 hdl.ih_dip = dip; 579 hdl.ih_pri = px_pwr_pil; 580 581 /* Add PME_TO_ACK message handler */ 582 hdl.ih_cb_func = (ddi_intr_handler_t *)px_pmeq_intr; 583 if (px_add_msiq_intr(dip, dip, &hdl, MSG_REC, 584 (msgcode_t)PCIE_PME_ACK_MSG, &px_p->px_pm_msiq_id) != DDI_SUCCESS) { 585 DBG(DBG_PWR, dip, "px_pwr_setup: couldn't add " 586 " PME_TO_ACK intr\n"); 587 goto pwr_setup_err1; 588 } 589 px_lib_msg_setmsiq(dip, PCIE_PME_ACK_MSG, px_p->px_pm_msiq_id); 590 px_lib_msg_setvalid(dip, PCIE_PME_ACK_MSG, PCIE_MSG_VALID); 591 592 if (px_ib_update_intr_state(px_p, px_p->px_dip, hdl.ih_inum, 593 px_msiqid_to_devino(px_p, px_p->px_pm_msiq_id), 594 PX_INTR_STATE_ENABLE, MSG_REC, PCIE_PME_ACK_MSG) != DDI_SUCCESS) { 595 DBG(DBG_PWR, dip, "px_pwr_setup: PME_TO_ACK update interrupt" 596 " state failed\n"); 597 goto px_pwrsetup_err_state; 598 } 599 600 return (DDI_SUCCESS); 601 602 px_pwrsetup_err_state: 603 px_lib_msg_setvalid(dip, PCIE_PME_ACK_MSG, PCIE_MSG_INVALID); 604 (void) px_rem_msiq_intr(dip, dip, &hdl, MSG_REC, PCIE_PME_ACK_MSG, 605 px_p->px_pm_msiq_id); 606 pwr_setup_err1: 607 mutex_destroy(&px_p->px_l23ready_lock); 608 cv_destroy(&px_p->px_l23ready_cv); 609 610 return (DDI_FAILURE); 611 } 612 613 /* 614 * undo whatever is done in px_pwr_setup. called by px_detach() 615 */ 616 static void 617 px_pwr_teardown(dev_info_t *dip) 618 { 619 int instance = ddi_get_instance(dip); 620 px_t *px_p = INST_TO_STATE(instance); 621 ddi_intr_handle_impl_t hdl; 622 623 if (!PCIE_PMINFO(dip) || !PCIE_NEXUS_PMINFO(dip)) 624 return; 625 626 /* Initialize handle */ 627 bzero(&hdl, sizeof (ddi_intr_handle_impl_t)); 628 hdl.ih_ver = DDI_INTR_VERSION; 629 hdl.ih_state = DDI_IHDL_STATE_ALLOC; 630 hdl.ih_dip = dip; 631 632 px_lib_msg_setvalid(dip, PCIE_PME_ACK_MSG, PCIE_MSG_INVALID); 633 (void) px_rem_msiq_intr(dip, dip, &hdl, MSG_REC, PCIE_PME_ACK_MSG, 634 px_p->px_pm_msiq_id); 635 636 (void) px_ib_update_intr_state(px_p, px_p->px_dip, hdl.ih_inum, 637 px_msiqid_to_devino(px_p, px_p->px_pm_msiq_id), 638 PX_INTR_STATE_DISABLE, MSG_REC, PCIE_PME_ACK_MSG); 639 640 px_p->px_pm_msiq_id = -1; 641 642 cv_destroy(&px_p->px_l23ready_cv); 643 mutex_destroy(&px_p->px_l23ready_lock); 644 } 645 646 /* bus driver entry points */ 647 648 /* 649 * bus map entry point: 650 * 651 * if map request is for an rnumber 652 * get the corresponding regspec from device node 653 * build a new regspec in our parent's format 654 * build a new map_req with the new regspec 655 * call up the tree to complete the mapping 656 */ 657 int 658 px_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp, 659 off_t off, off_t len, caddr_t *addrp) 660 { 661 px_t *px_p = DIP_TO_STATE(dip); 662 struct regspec p_regspec; 663 ddi_map_req_t p_mapreq; 664 int reglen, rval, r_no; 665 pci_regspec_t reloc_reg, *rp = &reloc_reg; 666 667 DBG(DBG_MAP, dip, "rdip=%s%d:", 668 ddi_driver_name(rdip), ddi_get_instance(rdip)); 669 670 if (mp->map_flags & DDI_MF_USER_MAPPING) 671 return (DDI_ME_UNIMPLEMENTED); 672 673 switch (mp->map_type) { 674 case DDI_MT_REGSPEC: 675 reloc_reg = *(pci_regspec_t *)mp->map_obj.rp; /* dup whole */ 676 break; 677 678 case DDI_MT_RNUMBER: 679 r_no = mp->map_obj.rnumber; 680 DBG(DBG_MAP | DBG_CONT, dip, " r#=%x", r_no); 681 682 if (ddi_getlongprop(DDI_DEV_T_ANY, rdip, DDI_PROP_DONTPASS, 683 "reg", (caddr_t)&rp, ®len) != DDI_SUCCESS) 684 return (DDI_ME_RNUMBER_RANGE); 685 686 if (r_no < 0 || r_no >= reglen / sizeof (pci_regspec_t)) { 687 kmem_free(rp, reglen); 688 return (DDI_ME_RNUMBER_RANGE); 689 } 690 rp += r_no; 691 break; 692 693 default: 694 return (DDI_ME_INVAL); 695 } 696 DBG(DBG_MAP | DBG_CONT, dip, "\n"); 697 698 if ((rp->pci_phys_hi & PCI_REG_ADDR_M) == PCI_ADDR_CONFIG) { 699 /* 700 * There may be a need to differentiate between PCI 701 * and PCI-Ex devices so the following range check is 702 * done correctly, depending on the implementation of 703 * px_pci bridge nexus driver. 704 */ 705 if ((off >= PCIE_CONF_HDR_SIZE) || 706 (len > PCIE_CONF_HDR_SIZE) || 707 (off + len > PCIE_CONF_HDR_SIZE)) 708 return (DDI_ME_INVAL); 709 /* 710 * the following function returning a DDI_FAILURE assumes 711 * that there are no virtual config space access services 712 * defined in this layer. Otherwise it is availed right 713 * here and we return. 714 */ 715 rval = px_lib_map_vconfig(dip, mp, off, rp, addrp); 716 if (rval == DDI_SUCCESS) 717 goto done; 718 } 719 720 /* 721 * No virtual config space services or we are mapping 722 * a region of memory mapped config/IO/memory space, so proceed 723 * to the parent. 724 */ 725 726 /* relocate within 64-bit pci space through "assigned-addresses" */ 727 if (rval = px_reloc_reg(dip, rdip, px_p, rp)) 728 goto done; 729 730 if (len) /* adjust regspec according to mapping request */ 731 rp->pci_size_low = len; /* MIN ? */ 732 rp->pci_phys_low += off; 733 734 /* translate relocated pci regspec into parent space through "ranges" */ 735 if (rval = px_xlate_reg(px_p, rp, &p_regspec)) 736 goto done; 737 738 p_mapreq = *mp; /* dup the whole structure */ 739 p_mapreq.map_type = DDI_MT_REGSPEC; 740 p_mapreq.map_obj.rp = &p_regspec; 741 px_lib_map_attr_check(&p_mapreq); 742 rval = ddi_map(dip, &p_mapreq, 0, 0, addrp); 743 744 if (rval == DDI_SUCCESS) { 745 /* 746 * Set-up access functions for FM access error capable drivers. 747 */ 748 if (DDI_FM_ACC_ERR_CAP(ddi_fm_capable(rdip)) && 749 mp->map_handlep->ah_acc.devacc_attr_access != 750 DDI_DEFAULT_ACC) 751 px_fm_acc_setup(mp, rdip); 752 } 753 754 done: 755 if (mp->map_type == DDI_MT_RNUMBER) 756 kmem_free(rp - r_no, reglen); 757 758 return (rval); 759 } 760 761 /* 762 * bus dma map entry point 763 * return value: 764 * DDI_DMA_PARTIAL_MAP 1 765 * DDI_DMA_MAPOK 0 766 * DDI_DMA_MAPPED 0 767 * DDI_DMA_NORESOURCES -1 768 * DDI_DMA_NOMAPPING -2 769 * DDI_DMA_TOOBIG -3 770 */ 771 int 772 px_dma_setup(dev_info_t *dip, dev_info_t *rdip, ddi_dma_req_t *dmareq, 773 ddi_dma_handle_t *handlep) 774 { 775 px_t *px_p = DIP_TO_STATE(dip); 776 px_mmu_t *mmu_p = px_p->px_mmu_p; 777 ddi_dma_impl_t *mp; 778 int ret; 779 780 DBG(DBG_DMA_MAP, dip, "mapping - rdip=%s%d type=%s\n", 781 ddi_driver_name(rdip), ddi_get_instance(rdip), 782 handlep ? "alloc" : "advisory"); 783 784 if (!(mp = px_dma_lmts2hdl(dip, rdip, mmu_p, dmareq))) 785 return (DDI_DMA_NORESOURCES); 786 if (mp == (ddi_dma_impl_t *)DDI_DMA_NOMAPPING) 787 return (DDI_DMA_NOMAPPING); 788 if (ret = px_dma_type(px_p, dmareq, mp)) 789 goto freehandle; 790 if (ret = px_dma_pfn(px_p, dmareq, mp)) 791 goto freehandle; 792 793 switch (PX_DMA_TYPE(mp)) { 794 case PX_DMAI_FLAGS_DVMA: /* LINTED E_EQUALITY_NOT_ASSIGNMENT */ 795 if ((ret = px_dvma_win(px_p, dmareq, mp)) || !handlep) 796 goto freehandle; 797 if (!PX_DMA_CANCACHE(mp)) { /* try fast track */ 798 if (PX_DMA_CANFAST(mp)) { 799 if (!px_dvma_map_fast(mmu_p, mp)) 800 break; 801 /* LINTED E_NOP_ELSE_STMT */ 802 } else { 803 PX_DVMA_FASTTRAK_PROF(mp); 804 } 805 } 806 if (ret = px_dvma_map(mp, dmareq, mmu_p)) 807 goto freehandle; 808 break; 809 case PX_DMAI_FLAGS_PTP: /* LINTED E_EQUALITY_NOT_ASSIGNMENT */ 810 if ((ret = px_dma_physwin(px_p, dmareq, mp)) || !handlep) 811 goto freehandle; 812 break; 813 case PX_DMAI_FLAGS_BYPASS: 814 default: 815 cmn_err(CE_PANIC, "%s%d: px_dma_setup: bad dma type 0x%x", 816 ddi_driver_name(rdip), ddi_get_instance(rdip), 817 PX_DMA_TYPE(mp)); 818 /*NOTREACHED*/ 819 } 820 *handlep = (ddi_dma_handle_t)mp; 821 mp->dmai_flags |= PX_DMAI_FLAGS_INUSE; 822 px_dump_dma_handle(DBG_DMA_MAP, dip, mp); 823 824 return ((mp->dmai_nwin == 1) ? DDI_DMA_MAPPED : DDI_DMA_PARTIAL_MAP); 825 freehandle: 826 if (ret == DDI_DMA_NORESOURCES) 827 px_dma_freemp(mp); /* don't run_callback() */ 828 else 829 (void) px_dma_freehdl(dip, rdip, (ddi_dma_handle_t)mp); 830 return (ret); 831 } 832 833 834 /* 835 * bus dma alloc handle entry point: 836 */ 837 int 838 px_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attrp, 839 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep) 840 { 841 px_t *px_p = DIP_TO_STATE(dip); 842 ddi_dma_impl_t *mp; 843 int rval; 844 845 DBG(DBG_DMA_ALLOCH, dip, "rdip=%s%d\n", 846 ddi_driver_name(rdip), ddi_get_instance(rdip)); 847 848 if (attrp->dma_attr_version != DMA_ATTR_V0) 849 return (DDI_DMA_BADATTR); 850 851 if (!(mp = px_dma_allocmp(dip, rdip, waitfp, arg))) 852 return (DDI_DMA_NORESOURCES); 853 854 /* 855 * Save requestor's information 856 */ 857 mp->dmai_attr = *attrp; /* whole object - augmented later */ 858 *PX_DEV_ATTR(mp) = *attrp; /* whole object - device orig attr */ 859 DBG(DBG_DMA_ALLOCH, dip, "mp=%p\n", mp); 860 861 /* check and convert dma attributes to handle parameters */ 862 if (rval = px_dma_attr2hdl(px_p, mp)) { 863 px_dma_freehdl(dip, rdip, (ddi_dma_handle_t)mp); 864 *handlep = NULL; 865 return (rval); 866 } 867 *handlep = (ddi_dma_handle_t)mp; 868 return (DDI_SUCCESS); 869 } 870 871 872 /* 873 * bus dma free handle entry point: 874 */ 875 /*ARGSUSED*/ 876 int 877 px_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle) 878 { 879 DBG(DBG_DMA_FREEH, dip, "rdip=%s%d mp=%p\n", 880 ddi_driver_name(rdip), ddi_get_instance(rdip), handle); 881 px_dma_freemp((ddi_dma_impl_t *)handle); 882 883 if (px_kmem_clid) { 884 DBG(DBG_DMA_FREEH, dip, "run handle callback\n"); 885 ddi_run_callback(&px_kmem_clid); 886 } 887 return (DDI_SUCCESS); 888 } 889 890 891 /* 892 * bus dma bind handle entry point: 893 */ 894 int 895 px_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip, 896 ddi_dma_handle_t handle, ddi_dma_req_t *dmareq, 897 ddi_dma_cookie_t *cookiep, uint_t *ccountp) 898 { 899 px_t *px_p = DIP_TO_STATE(dip); 900 px_mmu_t *mmu_p = px_p->px_mmu_p; 901 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 902 int ret; 903 904 DBG(DBG_DMA_BINDH, dip, "rdip=%s%d mp=%p dmareq=%p\n", 905 ddi_driver_name(rdip), ddi_get_instance(rdip), mp, dmareq); 906 907 if (mp->dmai_flags & PX_DMAI_FLAGS_INUSE) 908 return (DDI_DMA_INUSE); 909 910 ASSERT((mp->dmai_flags & ~PX_DMAI_FLAGS_PRESERVE) == 0); 911 mp->dmai_flags |= PX_DMAI_FLAGS_INUSE; 912 913 if (ret = px_dma_type(px_p, dmareq, mp)) 914 goto err; 915 if (ret = px_dma_pfn(px_p, dmareq, mp)) 916 goto err; 917 918 switch (PX_DMA_TYPE(mp)) { 919 case PX_DMAI_FLAGS_DVMA: 920 if (ret = px_dvma_win(px_p, dmareq, mp)) 921 goto map_err; 922 if (!PX_DMA_CANCACHE(mp)) { /* try fast track */ 923 if (PX_DMA_CANFAST(mp)) { 924 if (!px_dvma_map_fast(mmu_p, mp)) 925 goto mapped; /*LINTED E_NOP_ELSE_STMT*/ 926 } else { 927 PX_DVMA_FASTTRAK_PROF(mp); 928 } 929 } 930 if (ret = px_dvma_map(mp, dmareq, mmu_p)) 931 goto map_err; 932 mapped: 933 *ccountp = 1; 934 MAKE_DMA_COOKIE(cookiep, mp->dmai_mapping, mp->dmai_size); 935 break; 936 case PX_DMAI_FLAGS_BYPASS: 937 case PX_DMAI_FLAGS_PTP: 938 if (ret = px_dma_physwin(px_p, dmareq, mp)) 939 goto map_err; 940 *ccountp = PX_WINLST(mp)->win_ncookies; 941 *cookiep = 942 *(ddi_dma_cookie_t *)(PX_WINLST(mp) + 1); /* wholeobj */ 943 break; 944 default: 945 cmn_err(CE_PANIC, "%s%d: px_dma_bindhdl(%p): bad dma type", 946 ddi_driver_name(rdip), ddi_get_instance(rdip), mp); 947 /*NOTREACHED*/ 948 } 949 DBG(DBG_DMA_BINDH, dip, "cookie %" PRIx64 "+%x\n", 950 cookiep->dmac_address, cookiep->dmac_size); 951 px_dump_dma_handle(DBG_DMA_MAP, dip, mp); 952 953 /* insert dma handle into FMA cache */ 954 if (mp->dmai_attr.dma_attr_flags & DDI_DMA_FLAGERR) { 955 (void) ndi_fmc_insert(rdip, DMA_HANDLE, mp, NULL); 956 mp->dmai_error.err_cf = impl_dma_check; 957 } 958 959 return (mp->dmai_nwin == 1 ? DDI_DMA_MAPPED : DDI_DMA_PARTIAL_MAP); 960 map_err: 961 px_dma_freepfn(mp); 962 err: 963 mp->dmai_flags &= PX_DMAI_FLAGS_PRESERVE; 964 return (ret); 965 } 966 967 968 /* 969 * bus dma unbind handle entry point: 970 */ 971 /*ARGSUSED*/ 972 int 973 px_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle) 974 { 975 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 976 px_t *px_p = DIP_TO_STATE(dip); 977 px_mmu_t *mmu_p = px_p->px_mmu_p; 978 979 DBG(DBG_DMA_UNBINDH, dip, "rdip=%s%d, mp=%p\n", 980 ddi_driver_name(rdip), ddi_get_instance(rdip), handle); 981 if ((mp->dmai_flags & PX_DMAI_FLAGS_INUSE) == 0) { 982 DBG(DBG_DMA_UNBINDH, dip, "handle not inuse\n"); 983 return (DDI_FAILURE); 984 } 985 986 /* remove dma handle from FMA cache */ 987 if (mp->dmai_attr.dma_attr_flags & DDI_DMA_FLAGERR) { 988 if (DEVI(rdip)->devi_fmhdl != NULL && 989 DDI_FM_DMA_ERR_CAP(DEVI(rdip)->devi_fmhdl->fh_cap)) { 990 (void) ndi_fmc_remove(rdip, DMA_HANDLE, mp); 991 } 992 } 993 994 /* 995 * Here if the handle is using the iommu. Unload all the iommu 996 * translations. 997 */ 998 switch (PX_DMA_TYPE(mp)) { 999 case PX_DMAI_FLAGS_DVMA: 1000 px_mmu_unmap_window(mmu_p, mp); 1001 px_dvma_unmap(mmu_p, mp); 1002 px_dma_freepfn(mp); 1003 break; 1004 case PX_DMAI_FLAGS_BYPASS: 1005 case PX_DMAI_FLAGS_PTP: 1006 px_dma_freewin(mp); 1007 break; 1008 default: 1009 cmn_err(CE_PANIC, "%s%d: px_dma_unbindhdl:bad dma type %p", 1010 ddi_driver_name(rdip), ddi_get_instance(rdip), mp); 1011 /*NOTREACHED*/ 1012 } 1013 if (mmu_p->mmu_dvma_clid != 0) { 1014 DBG(DBG_DMA_UNBINDH, dip, "run dvma callback\n"); 1015 ddi_run_callback(&mmu_p->mmu_dvma_clid); 1016 } 1017 if (px_kmem_clid) { 1018 DBG(DBG_DMA_UNBINDH, dip, "run handle callback\n"); 1019 ddi_run_callback(&px_kmem_clid); 1020 } 1021 mp->dmai_flags &= PX_DMAI_FLAGS_PRESERVE; 1022 1023 return (DDI_SUCCESS); 1024 } 1025 1026 /* 1027 * bus dma win entry point: 1028 */ 1029 int 1030 px_dma_win(dev_info_t *dip, dev_info_t *rdip, 1031 ddi_dma_handle_t handle, uint_t win, off_t *offp, 1032 size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp) 1033 { 1034 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 1035 int ret; 1036 1037 DBG(DBG_DMA_WIN, dip, "rdip=%s%d\n", 1038 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1039 1040 px_dump_dma_handle(DBG_DMA_WIN, dip, mp); 1041 if (win >= mp->dmai_nwin) { 1042 DBG(DBG_DMA_WIN, dip, "%x out of range\n", win); 1043 return (DDI_FAILURE); 1044 } 1045 1046 switch (PX_DMA_TYPE(mp)) { 1047 case PX_DMAI_FLAGS_DVMA: 1048 if (win != PX_DMA_CURWIN(mp)) { 1049 px_t *px_p = DIP_TO_STATE(dip); 1050 px_mmu_t *mmu_p = px_p->px_mmu_p; 1051 px_mmu_unmap_window(mmu_p, mp); 1052 1053 /* map_window sets dmai_mapping/size/offset */ 1054 px_mmu_map_window(mmu_p, mp, win); 1055 if ((ret = px_mmu_map_window(mmu_p, 1056 mp, win)) != DDI_SUCCESS) 1057 return (ret); 1058 } 1059 if (cookiep) 1060 MAKE_DMA_COOKIE(cookiep, mp->dmai_mapping, 1061 mp->dmai_size); 1062 if (ccountp) 1063 *ccountp = 1; 1064 break; 1065 case PX_DMAI_FLAGS_PTP: 1066 case PX_DMAI_FLAGS_BYPASS: { 1067 int i; 1068 ddi_dma_cookie_t *ck_p; 1069 px_dma_win_t *win_p = mp->dmai_winlst; 1070 1071 for (i = 0; i < win; win_p = win_p->win_next, i++); 1072 ck_p = (ddi_dma_cookie_t *)(win_p + 1); 1073 *cookiep = *ck_p; 1074 mp->dmai_offset = win_p->win_offset; 1075 mp->dmai_size = win_p->win_size; 1076 mp->dmai_mapping = ck_p->dmac_laddress; 1077 mp->dmai_cookie = ck_p + 1; 1078 win_p->win_curseg = 0; 1079 if (ccountp) 1080 *ccountp = win_p->win_ncookies; 1081 } 1082 break; 1083 default: 1084 cmn_err(CE_WARN, "%s%d: px_dma_win:bad dma type 0x%x", 1085 ddi_driver_name(rdip), ddi_get_instance(rdip), 1086 PX_DMA_TYPE(mp)); 1087 return (DDI_FAILURE); 1088 } 1089 if (cookiep) 1090 DBG(DBG_DMA_WIN, dip, 1091 "cookie - dmac_address=%x dmac_size=%x\n", 1092 cookiep->dmac_address, cookiep->dmac_size); 1093 if (offp) 1094 *offp = (off_t)mp->dmai_offset; 1095 if (lenp) 1096 *lenp = mp->dmai_size; 1097 return (DDI_SUCCESS); 1098 } 1099 1100 #ifdef DEBUG 1101 static char *px_dmactl_str[] = { 1102 "DDI_DMA_FREE", 1103 "DDI_DMA_SYNC", 1104 "DDI_DMA_HTOC", 1105 "DDI_DMA_KVADDR", 1106 "DDI_DMA_MOVWIN", 1107 "DDI_DMA_REPWIN", 1108 "DDI_DMA_GETERR", 1109 "DDI_DMA_COFF", 1110 "DDI_DMA_NEXTWIN", 1111 "DDI_DMA_NEXTSEG", 1112 "DDI_DMA_SEGTOC", 1113 "DDI_DMA_RESERVE", 1114 "DDI_DMA_RELEASE", 1115 "DDI_DMA_RESETH", 1116 "DDI_DMA_CKSYNC", 1117 "DDI_DMA_IOPB_ALLOC", 1118 "DDI_DMA_IOPB_FREE", 1119 "DDI_DMA_SMEM_ALLOC", 1120 "DDI_DMA_SMEM_FREE", 1121 "DDI_DMA_SET_SBUS64" 1122 }; 1123 #endif /* DEBUG */ 1124 1125 /* 1126 * bus dma control entry point: 1127 */ 1128 /*ARGSUSED*/ 1129 int 1130 px_dma_ctlops(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle, 1131 enum ddi_dma_ctlops cmd, off_t *offp, size_t *lenp, caddr_t *objp, 1132 uint_t cache_flags) 1133 { 1134 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 1135 1136 #ifdef DEBUG 1137 DBG(DBG_DMA_CTL, dip, "%s: rdip=%s%d\n", px_dmactl_str[cmd], 1138 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1139 #endif /* DEBUG */ 1140 1141 switch (cmd) { 1142 case DDI_DMA_FREE: 1143 (void) px_dma_unbindhdl(dip, rdip, handle); 1144 (void) px_dma_freehdl(dip, rdip, handle); 1145 return (DDI_SUCCESS); 1146 case DDI_DMA_RESERVE: { 1147 px_t *px_p = DIP_TO_STATE(dip); 1148 return (px_fdvma_reserve(dip, rdip, px_p, 1149 (ddi_dma_req_t *)offp, (ddi_dma_handle_t *)objp)); 1150 } 1151 case DDI_DMA_RELEASE: { 1152 px_t *px_p = DIP_TO_STATE(dip); 1153 return (px_fdvma_release(dip, px_p, mp)); 1154 } 1155 default: 1156 break; 1157 } 1158 1159 switch (PX_DMA_TYPE(mp)) { 1160 case PX_DMAI_FLAGS_DVMA: 1161 return (px_dvma_ctl(dip, rdip, mp, cmd, offp, lenp, objp, 1162 cache_flags)); 1163 case PX_DMAI_FLAGS_PTP: 1164 case PX_DMAI_FLAGS_BYPASS: 1165 return (px_dma_ctl(dip, rdip, mp, cmd, offp, lenp, objp, 1166 cache_flags)); 1167 default: 1168 cmn_err(CE_PANIC, "%s%d: px_dma_ctlops(%x):bad dma type %x", 1169 ddi_driver_name(rdip), ddi_get_instance(rdip), cmd, 1170 mp->dmai_flags); 1171 /*NOTREACHED*/ 1172 } 1173 return (0); 1174 } 1175 1176 /* 1177 * control ops entry point: 1178 * 1179 * Requests handled completely: 1180 * DDI_CTLOPS_INITCHILD see init_child() for details 1181 * DDI_CTLOPS_UNINITCHILD 1182 * DDI_CTLOPS_REPORTDEV see report_dev() for details 1183 * DDI_CTLOPS_IOMIN cache line size if streaming otherwise 1 1184 * DDI_CTLOPS_REGSIZE 1185 * DDI_CTLOPS_NREGS 1186 * DDI_CTLOPS_DVMAPAGESIZE 1187 * DDI_CTLOPS_POKE 1188 * DDI_CTLOPS_PEEK 1189 * 1190 * All others passed to parent. 1191 */ 1192 int 1193 px_ctlops(dev_info_t *dip, dev_info_t *rdip, 1194 ddi_ctl_enum_t op, void *arg, void *result) 1195 { 1196 px_t *px_p = DIP_TO_STATE(dip); 1197 struct detachspec *ds; 1198 struct attachspec *as; 1199 1200 switch (op) { 1201 case DDI_CTLOPS_INITCHILD: 1202 return (px_init_child(px_p, (dev_info_t *)arg)); 1203 1204 case DDI_CTLOPS_UNINITCHILD: 1205 return (px_uninit_child(px_p, (dev_info_t *)arg)); 1206 1207 case DDI_CTLOPS_ATTACH: 1208 as = (struct attachspec *)arg; 1209 switch (as->when) { 1210 case DDI_PRE: 1211 if (as->cmd == DDI_ATTACH) { 1212 DBG(DBG_PWR, dip, "PRE_ATTACH for %s@%d\n", 1213 ddi_driver_name(rdip), 1214 ddi_get_instance(rdip)); 1215 return (pcie_pm_hold(dip)); 1216 } 1217 if (as->cmd == DDI_RESUME) { 1218 ddi_acc_handle_t config_handle; 1219 DBG(DBG_PWR, dip, "PRE_RESUME for %s@%d\n", 1220 ddi_driver_name(rdip), 1221 ddi_get_instance(rdip)); 1222 1223 if (pci_config_setup(rdip, &config_handle) == 1224 DDI_SUCCESS) { 1225 pcie_clear_errors(rdip, config_handle); 1226 pci_config_teardown(&config_handle); 1227 } 1228 } 1229 return (DDI_SUCCESS); 1230 1231 case DDI_POST: 1232 DBG(DBG_PWR, dip, "POST_ATTACH for %s@%d\n", 1233 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1234 if (as->cmd == DDI_ATTACH && as->result != DDI_SUCCESS) 1235 pcie_pm_release(dip); 1236 1237 (void) pcie_postattach_child(rdip); 1238 1239 return (DDI_SUCCESS); 1240 default: 1241 break; 1242 } 1243 break; 1244 1245 case DDI_CTLOPS_DETACH: 1246 ds = (struct detachspec *)arg; 1247 switch (ds->when) { 1248 case DDI_POST: 1249 if (ds->cmd == DDI_DETACH && 1250 ds->result == DDI_SUCCESS) { 1251 DBG(DBG_PWR, dip, "POST_DETACH for %s@%d\n", 1252 ddi_driver_name(rdip), 1253 ddi_get_instance(rdip)); 1254 return (pcie_pm_remove_child(dip, rdip)); 1255 } 1256 return (DDI_SUCCESS); 1257 default: 1258 break; 1259 } 1260 break; 1261 1262 case DDI_CTLOPS_REPORTDEV: 1263 return (px_report_dev(rdip)); 1264 1265 case DDI_CTLOPS_IOMIN: 1266 return (DDI_SUCCESS); 1267 1268 case DDI_CTLOPS_REGSIZE: 1269 *((off_t *)result) = px_get_reg_set_size(rdip, *((int *)arg)); 1270 return (*((off_t *)result) == 0 ? DDI_FAILURE : DDI_SUCCESS); 1271 1272 case DDI_CTLOPS_NREGS: 1273 *((uint_t *)result) = px_get_nreg_set(rdip); 1274 return (DDI_SUCCESS); 1275 1276 case DDI_CTLOPS_DVMAPAGESIZE: 1277 *((ulong_t *)result) = MMU_PAGE_SIZE; 1278 return (DDI_SUCCESS); 1279 1280 case DDI_CTLOPS_POKE: /* platform dependent implementation. */ 1281 return (px_lib_ctlops_poke(dip, rdip, 1282 (peekpoke_ctlops_t *)arg)); 1283 1284 case DDI_CTLOPS_PEEK: /* platform dependent implementation. */ 1285 return (px_lib_ctlops_peek(dip, rdip, 1286 (peekpoke_ctlops_t *)arg, result)); 1287 1288 case DDI_CTLOPS_POWER: 1289 default: 1290 break; 1291 } 1292 1293 /* 1294 * Now pass the request up to our parent. 1295 */ 1296 DBG(DBG_CTLOPS, dip, "passing request to parent: rdip=%s%d\n", 1297 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1298 return (ddi_ctlops(dip, rdip, op, arg, result)); 1299 } 1300 1301 /* ARGSUSED */ 1302 int 1303 px_intr_ops(dev_info_t *dip, dev_info_t *rdip, ddi_intr_op_t intr_op, 1304 ddi_intr_handle_impl_t *hdlp, void *result) 1305 { 1306 int intr_types, ret = DDI_SUCCESS; 1307 1308 DBG(DBG_INTROPS, dip, "px_intr_ops: rdip=%s%d\n", 1309 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1310 1311 /* Process DDI_INTROP_SUPPORTED_TYPES request here */ 1312 if (intr_op == DDI_INTROP_SUPPORTED_TYPES) { 1313 *(int *)result = i_ddi_get_intx_nintrs(rdip) ? 1314 DDI_INTR_TYPE_FIXED : 0; 1315 1316 if ((pci_msi_get_supported_type(rdip, 1317 &intr_types)) == DDI_SUCCESS) { 1318 /* 1319 * Double check supported interrupt types vs. 1320 * what the host bridge supports. 1321 */ 1322 *(int *)result |= intr_types; 1323 } 1324 1325 return (ret); 1326 } 1327 1328 /* 1329 * PCI-E nexus driver supports fixed, MSI and MSI-X interrupts. 1330 * Return failure if interrupt type is not supported. 1331 */ 1332 switch (hdlp->ih_type) { 1333 case DDI_INTR_TYPE_FIXED: 1334 ret = px_intx_ops(dip, rdip, intr_op, hdlp, result); 1335 break; 1336 case DDI_INTR_TYPE_MSI: 1337 case DDI_INTR_TYPE_MSIX: 1338 ret = px_msix_ops(dip, rdip, intr_op, hdlp, result); 1339 break; 1340 default: 1341 ret = DDI_ENOTSUP; 1342 break; 1343 } 1344 1345 return (ret); 1346 } 1347 1348 static uint_t 1349 px_init_hotplug(px_t *px_p) 1350 { 1351 px_bus_range_t bus_range; 1352 dev_info_t *dip; 1353 pciehpc_regops_t regops; 1354 1355 dip = px_p->px_dip; 1356 1357 if (ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1358 "hotplug-capable") == 0) 1359 return (DDI_FAILURE); 1360 1361 /* 1362 * Before initializing hotplug - open up bus range. The busra 1363 * module will initialize its pool of bus numbers from this. 1364 * "busra" will be the agent that keeps track of them during 1365 * hotplug. Also, note, that busra will remove any bus numbers 1366 * already in use from boot time. 1367 */ 1368 if (ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1369 "bus-range") == 0) { 1370 cmn_err(CE_WARN, "%s%d: bus-range not found\n", 1371 ddi_driver_name(dip), ddi_get_instance(dip)); 1372 #ifdef DEBUG 1373 bus_range.lo = 0x0; 1374 bus_range.hi = 0xff; 1375 1376 if (ndi_prop_update_int_array(DDI_DEV_T_NONE, 1377 dip, "bus-range", (int *)&bus_range, 2) 1378 != DDI_PROP_SUCCESS) { 1379 return (DDI_FAILURE); 1380 } 1381 #else 1382 return (DDI_FAILURE); 1383 #endif 1384 } 1385 1386 if (px_lib_hotplug_init(dip, (void *)®ops) != DDI_SUCCESS) 1387 return (DDI_FAILURE); 1388 1389 if (pciehpc_init(dip, ®ops) != DDI_SUCCESS) { 1390 px_lib_hotplug_uninit(dip); 1391 return (DDI_FAILURE); 1392 } 1393 1394 if (pcihp_init(dip) != DDI_SUCCESS) { 1395 (void) pciehpc_uninit(dip); 1396 px_lib_hotplug_uninit(dip); 1397 return (DDI_FAILURE); 1398 } 1399 1400 if (pcihp_get_cb_ops() != NULL) { 1401 DBG(DBG_ATTACH, dip, "%s%d hotplug enabled", 1402 ddi_driver_name(dip), ddi_get_instance(dip)); 1403 px_p->px_dev_caps |= PX_HOTPLUG_CAPABLE; 1404 } 1405 1406 return (DDI_SUCCESS); 1407 } 1408 1409 static uint_t 1410 px_uninit_hotplug(dev_info_t *dip) 1411 { 1412 if (pcihp_uninit(dip) != DDI_SUCCESS) 1413 return (DDI_FAILURE); 1414 1415 if (pciehpc_uninit(dip) != DDI_SUCCESS) 1416 return (DDI_FAILURE); 1417 1418 px_lib_hotplug_uninit(dip); 1419 1420 return (DDI_SUCCESS); 1421 } 1422