1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * PCI Express nexus driver interface 31 */ 32 33 #include <sys/types.h> 34 #include <sys/conf.h> /* nulldev */ 35 #include <sys/stat.h> /* devctl */ 36 #include <sys/kmem.h> 37 #include <sys/sunddi.h> 38 #include <sys/sunndi.h> 39 #include <sys/hotplug/pci/pcihp.h> 40 #include <sys/ontrap.h> 41 #include <sys/ddi_impldefs.h> 42 #include <sys/ddi_subrdefs.h> 43 #include <sys/epm.h> 44 #include <sys/iommutsb.h> 45 #include "px_obj.h" 46 #include "pcie_pwr.h" 47 48 /*LINTLIBRARY*/ 49 50 /* 51 * function prototypes for dev ops routines: 52 */ 53 static int px_attach(dev_info_t *dip, ddi_attach_cmd_t cmd); 54 static int px_detach(dev_info_t *dip, ddi_detach_cmd_t cmd); 55 static int px_info(dev_info_t *dip, ddi_info_cmd_t infocmd, 56 void *arg, void **result); 57 static int px_pwr_setup(dev_info_t *dip); 58 static void px_pwr_teardown(dev_info_t *dip); 59 60 /* 61 * bus ops and dev ops structures: 62 */ 63 static struct bus_ops px_bus_ops = { 64 BUSO_REV, 65 px_map, 66 0, 67 0, 68 0, 69 i_ddi_map_fault, 70 px_dma_setup, 71 px_dma_allochdl, 72 px_dma_freehdl, 73 px_dma_bindhdl, 74 px_dma_unbindhdl, 75 px_lib_dma_sync, 76 px_dma_win, 77 px_dma_ctlops, 78 px_ctlops, 79 ddi_bus_prop_op, 80 ndi_busop_get_eventcookie, 81 ndi_busop_add_eventcall, 82 ndi_busop_remove_eventcall, 83 ndi_post_event, 84 NULL, 85 NULL, /* (*bus_config)(); */ 86 NULL, /* (*bus_unconfig)(); */ 87 px_fm_init_child, /* (*bus_fm_init)(); */ 88 NULL, /* (*bus_fm_fini)(); */ 89 px_bus_enter, /* (*bus_fm_access_enter)(); */ 90 px_bus_exit, /* (*bus_fm_access_fini)(); */ 91 pcie_bus_power, /* (*bus_power)(); */ 92 px_intr_ops /* (*bus_intr_op)(); */ 93 }; 94 95 extern struct cb_ops px_cb_ops; 96 97 static struct dev_ops px_ops = { 98 DEVO_REV, 99 0, 100 px_info, 101 nulldev, 102 0, 103 px_attach, 104 px_detach, 105 nodev, 106 &px_cb_ops, 107 &px_bus_ops, 108 nulldev 109 }; 110 111 /* 112 * module definitions: 113 */ 114 #include <sys/modctl.h> 115 extern struct mod_ops mod_driverops; 116 117 static struct modldrv modldrv = { 118 &mod_driverops, /* Type of module - driver */ 119 "PCI Express nexus driver %I%", /* Name of module. */ 120 &px_ops, /* driver ops */ 121 }; 122 123 static struct modlinkage modlinkage = { 124 MODREV_1, (void *)&modldrv, NULL 125 }; 126 127 /* driver soft state */ 128 void *px_state_p; 129 130 int 131 _init(void) 132 { 133 int e; 134 135 /* 136 * Initialize per-px bus soft state pointer. 137 */ 138 e = ddi_soft_state_init(&px_state_p, sizeof (px_t), 1); 139 if (e != DDI_SUCCESS) 140 return (e); 141 142 /* 143 * Install the module. 144 */ 145 e = mod_install(&modlinkage); 146 if (e != DDI_SUCCESS) 147 ddi_soft_state_fini(&px_state_p); 148 return (e); 149 } 150 151 int 152 _fini(void) 153 { 154 int e; 155 156 /* 157 * Remove the module. 158 */ 159 e = mod_remove(&modlinkage); 160 if (e != DDI_SUCCESS) 161 return (e); 162 163 /* Free px soft state */ 164 ddi_soft_state_fini(&px_state_p); 165 166 return (e); 167 } 168 169 int 170 _info(struct modinfo *modinfop) 171 { 172 return (mod_info(&modlinkage, modinfop)); 173 } 174 175 /* ARGSUSED */ 176 static int 177 px_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 178 { 179 int instance = getminor((dev_t)arg); 180 px_t *px_p = INST_TO_STATE(instance); 181 182 #ifdef HOTPLUG 183 /* 184 * Allow hotplug to deal with ones it manages 185 * Hot Plug will be done later. 186 */ 187 if (px_p && (px_p->hotplug_capable == B_TRUE)) 188 return (pcihp_info(dip, infocmd, arg, result)); 189 #endif /* HOTPLUG */ 190 191 /* non-hotplug or not attached */ 192 switch (infocmd) { 193 case DDI_INFO_DEVT2INSTANCE: 194 *result = (void *)instance; 195 return (DDI_SUCCESS); 196 197 case DDI_INFO_DEVT2DEVINFO: 198 if (px_p == NULL) 199 return (DDI_FAILURE); 200 *result = (void *)px_p->px_dip; 201 return (DDI_SUCCESS); 202 203 default: 204 return (DDI_FAILURE); 205 } 206 } 207 208 /* device driver entry points */ 209 /* 210 * attach entry point: 211 */ 212 /*ARGSUSED*/ 213 static int 214 px_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 215 { 216 px_t *px_p; /* per bus state pointer */ 217 int instance = DIP_TO_INST(dip); 218 int ret = DDI_SUCCESS; 219 devhandle_t dev_hdl = NULL; 220 221 switch (cmd) { 222 case DDI_ATTACH: 223 DBG(DBG_ATTACH, dip, "DDI_ATTACH\n"); 224 225 /* 226 * Allocate and get the per-px soft state structure. 227 */ 228 if (ddi_soft_state_zalloc(px_state_p, instance) 229 != DDI_SUCCESS) { 230 cmn_err(CE_WARN, "%s%d: can't allocate px state", 231 ddi_driver_name(dip), instance); 232 goto err_bad_px_softstate; 233 } 234 px_p = INST_TO_STATE(instance); 235 px_p->px_dip = dip; 236 mutex_init(&px_p->px_mutex, NULL, MUTEX_DRIVER, NULL); 237 px_p->px_soft_state = PX_SOFT_STATE_CLOSED; 238 px_p->px_open_count = 0; 239 240 /* 241 * Get key properties of the pci bridge node and 242 * determine it's type (psycho, schizo, etc ...). 243 */ 244 if (px_get_props(px_p, dip) == DDI_FAILURE) 245 goto err_bad_px_prop; 246 247 if ((px_fm_attach(px_p)) != DDI_SUCCESS) 248 goto err_bad_fm; 249 250 if (px_lib_dev_init(dip, &dev_hdl) != DDI_SUCCESS) 251 goto err_bad_dev_init; 252 253 /* Initilize device handle */ 254 px_p->px_dev_hdl = dev_hdl; 255 256 /* 257 * Initialize interrupt block. Note that this 258 * initialize error handling for the PEC as well. 259 */ 260 if ((ret = px_ib_attach(px_p)) != DDI_SUCCESS) 261 goto err_bad_ib; 262 263 if (px_cb_attach(px_p) != DDI_SUCCESS) 264 goto err_bad_cb; 265 266 /* 267 * Start creating the modules. 268 * Note that attach() routines should 269 * register and enable their own interrupts. 270 */ 271 272 if ((px_mmu_attach(px_p)) != DDI_SUCCESS) 273 goto err_bad_mmu; 274 275 if ((px_msiq_attach(px_p)) != DDI_SUCCESS) 276 goto err_bad_msiq; 277 278 if ((px_msi_attach(px_p)) != DDI_SUCCESS) 279 goto err_bad_msi; 280 281 if ((px_pec_attach(px_p)) != DDI_SUCCESS) 282 goto err_bad_pec; 283 284 if ((px_dma_attach(px_p)) != DDI_SUCCESS) 285 goto err_bad_pec; /* nothing to uninitialize on DMA */ 286 287 /* 288 * All of the error handlers have been registered 289 * by now so it's time to activate the interrupt. 290 */ 291 if ((ret = px_err_add_intr(&px_p->px_fault)) != DDI_SUCCESS) 292 goto err_bad_pec_add_intr; 293 294 /* 295 * Create the "devctl" node for hotplug and pcitool support. 296 * For non-hotplug bus, we still need ":devctl" to 297 * support DEVCTL_DEVICE_* and DEVCTL_BUS_* ioctls. 298 */ 299 if (ddi_create_minor_node(dip, "devctl", S_IFCHR, 300 PCIHP_AP_MINOR_NUM(instance, PCIHP_DEVCTL_MINOR), 301 DDI_NT_NEXUS, 0) != DDI_SUCCESS) { 302 goto err_bad_devctl_node; 303 } 304 /* 305 * power management setup. Even if it fails, attach will 306 * succeed as this is a optional feature. Since we are 307 * always at full power, this is not critical. 308 */ 309 if (pwr_common_setup(dip) != DDI_SUCCESS) { 310 DBG(DBG_PWR, dip, "pwr_common_setup failed\n"); 311 } else if (px_pwr_setup(dip) != DDI_SUCCESS) { 312 DBG(DBG_PWR, dip, "px_pwr_setup failed \n"); 313 pwr_common_teardown(dip); 314 } 315 316 ddi_report_dev(dip); 317 318 px_p->px_state = PX_ATTACHED; 319 DBG(DBG_ATTACH, dip, "attach success\n"); 320 break; 321 322 err_bad_devctl_node: 323 px_err_rem_intr(&px_p->px_fault); 324 err_bad_pec_add_intr: 325 px_pec_detach(px_p); 326 err_bad_pec: 327 px_msi_detach(px_p); 328 err_bad_msi: 329 px_msiq_detach(px_p); 330 err_bad_msiq: 331 px_mmu_detach(px_p); 332 err_bad_mmu: 333 px_cb_detach(px_p); 334 err_bad_cb: 335 px_ib_detach(px_p); 336 err_bad_ib: 337 (void) px_lib_dev_fini(dip); 338 err_bad_dev_init: 339 px_fm_detach(px_p); 340 err_bad_fm: 341 px_free_props(px_p); 342 err_bad_px_prop: 343 mutex_destroy(&px_p->px_mutex); 344 ddi_soft_state_free(px_state_p, instance); 345 err_bad_px_softstate: 346 ret = DDI_FAILURE; 347 break; 348 349 case DDI_RESUME: 350 DBG(DBG_ATTACH, dip, "DDI_RESUME\n"); 351 352 px_p = INST_TO_STATE(instance); 353 354 mutex_enter(&px_p->px_mutex); 355 356 /* suspend might have not succeeded */ 357 if (px_p->px_state != PX_SUSPENDED) { 358 DBG(DBG_ATTACH, px_p->px_dip, 359 "instance NOT suspended\n"); 360 ret = DDI_FAILURE; 361 break; 362 } 363 364 px_lib_resume(dip); 365 (void) pcie_pwr_resume(dip); 366 px_p->px_state = PX_ATTACHED; 367 368 mutex_exit(&px_p->px_mutex); 369 370 break; 371 default: 372 DBG(DBG_ATTACH, dip, "unsupported attach op\n"); 373 ret = DDI_FAILURE; 374 break; 375 } 376 377 return (ret); 378 } 379 380 /* 381 * detach entry point: 382 */ 383 /*ARGSUSED*/ 384 static int 385 px_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 386 { 387 int instance = ddi_get_instance(dip); 388 px_t *px_p = INST_TO_STATE(instance); 389 int ret; 390 391 /* 392 * Make sure we are currently attached 393 */ 394 if (px_p->px_state != PX_ATTACHED) { 395 DBG(DBG_DETACH, dip, "failed - instance not attached\n"); 396 return (DDI_FAILURE); 397 } 398 399 mutex_enter(&px_p->px_mutex); 400 401 switch (cmd) { 402 case DDI_DETACH: 403 DBG(DBG_DETACH, dip, "DDI_DETACH\n"); 404 405 #ifdef HOTPLUG 406 /* 407 * Hot plug will be done later. 408 */ 409 if (px_p->hotplug_capable == B_TRUE) { 410 if (pxhp_uninit(dip) == DDI_FAILURE) { 411 mutex_exit(&px_p->px_mutex); 412 return (DDI_FAILURE); 413 } 414 } 415 #endif /* HOTPLUG */ 416 417 /* 418 * things which used to be done in obj_destroy 419 * are now in-lined here. 420 */ 421 422 px_p->px_state = PX_DETACHED; 423 424 ddi_remove_minor_node(dip, "devctl"); 425 px_err_rem_intr(&px_p->px_fault); 426 px_pec_detach(px_p); 427 px_msi_detach(px_p); 428 px_msiq_detach(px_p); 429 px_mmu_detach(px_p); 430 px_cb_detach(px_p); 431 px_ib_detach(px_p); 432 (void) px_lib_dev_fini(dip); 433 px_fm_detach(px_p); 434 435 /* 436 * Free the px soft state structure and the rest of the 437 * resources it's using. 438 */ 439 px_free_props(px_p); 440 px_pwr_teardown(dip); 441 pwr_common_teardown(dip); 442 mutex_exit(&px_p->px_mutex); 443 mutex_destroy(&px_p->px_mutex); 444 ddi_soft_state_free(px_state_p, instance); 445 446 /* Free the interrupt-priorities prop if we created it. */ { 447 int len; 448 449 if (ddi_getproplen(DDI_DEV_T_ANY, dip, 450 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 451 "interrupt-priorities", &len) == DDI_PROP_SUCCESS) 452 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, 453 "interrupt-priorities"); 454 } 455 456 px_p->px_dev_hdl = NULL; 457 458 return (DDI_SUCCESS); 459 460 case DDI_SUSPEND: 461 if (pcie_pwr_suspend(dip) != DDI_SUCCESS) { 462 mutex_exit(&px_p->px_mutex); 463 return (DDI_FAILURE); 464 } 465 if ((ret = px_lib_suspend(dip)) == DDI_SUCCESS) 466 px_p->px_state = PX_SUSPENDED; 467 mutex_exit(&px_p->px_mutex); 468 469 return (ret); 470 471 default: 472 DBG(DBG_DETACH, dip, "unsupported detach op\n"); 473 mutex_exit(&px_p->px_mutex); 474 return (DDI_FAILURE); 475 } 476 } 477 478 /* 479 * power management related initialization specific to px 480 * called by px_attach() 481 */ 482 static int 483 px_pwr_setup(dev_info_t *dip) 484 { 485 pcie_pwr_t *pwr_p; 486 ddi_intr_handle_impl_t hdl; 487 488 ASSERT(PCIE_PMINFO(dip)); 489 pwr_p = PCIE_NEXUS_PMINFO(dip); 490 ASSERT(pwr_p); 491 492 /* 493 * indicate support LDI (Layered Driver Interface) 494 * Create the property, if it is not already there 495 */ 496 if (!ddi_prop_exists(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS, 497 DDI_KERNEL_IOCTL)) { 498 if (ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, 499 DDI_KERNEL_IOCTL, NULL, 0) != DDI_PROP_SUCCESS) { 500 DBG(DBG_PWR, dip, "can't create kernel ioctl prop\n"); 501 return (DDI_FAILURE); 502 } 503 } 504 /* No support for device PM. We are always at full power */ 505 pwr_p->pwr_func_lvl = PM_LEVEL_D0; 506 507 /* we know that we are only a low pil interrupt */ 508 mutex_init(&pwr_p->pwr_intr_lock, NULL, MUTEX_DRIVER, NULL); 509 cv_init(&pwr_p->pwr_cv, NULL, CV_DRIVER, NULL); 510 511 /* Initilize handle */ 512 hdl.ih_ver = DDI_INTR_VERSION; 513 hdl.ih_state = DDI_IHDL_STATE_ALLOC; 514 hdl.ih_dip = dip; 515 hdl.ih_inum = 0; 516 hdl.ih_pri = px_pwr_pil; 517 518 /* Add PME_TO_ACK message handler */ 519 hdl.ih_cb_func = (ddi_intr_handler_t *)pcie_pwr_intr; 520 hdl.ih_cb_arg1 = pwr_p; 521 hdl.ih_cb_arg2 = NULL; 522 if (px_add_msiq_intr(dip, dip, &hdl, MSG_REC, 523 (msgcode_t)PCIE_PME_ACK_MSG, &pwr_p->pwr_msiq_id) != DDI_SUCCESS) { 524 DBG(DBG_PWR, dip, "px_pwr_setup: couldn't add intr\n"); 525 goto px_pwrsetup_err; 526 } 527 528 px_lib_msg_setmsiq(dip, PCIE_PME_ACK_MSG, pwr_p->pwr_msiq_id); 529 px_lib_msg_setvalid(dip, PCIE_PME_ACK_MSG, PCIE_MSG_VALID); 530 531 return (DDI_SUCCESS); 532 533 px_pwrsetup_err: 534 cv_destroy(&pwr_p->pwr_cv); 535 mutex_destroy(&pwr_p->pwr_intr_lock); 536 return (DDI_FAILURE); 537 } 538 539 /* 540 * undo whatever is done in px_pwr_setup. called by px_detach() 541 */ 542 static void 543 px_pwr_teardown(dev_info_t *dip) 544 { 545 pcie_pwr_t *pwr_p; 546 ddi_intr_handle_impl_t hdl; 547 548 if (!PCIE_PMINFO(dip) || !(pwr_p = PCIE_NEXUS_PMINFO(dip))) 549 return; 550 551 DBG(DBG_MSG, dip, "px_pwr_teardown: msiq_id 0x%x\n", 552 pwr_p->pwr_msiq_id); 553 554 /* Initilize handle */ 555 hdl.ih_ver = DDI_INTR_VERSION; 556 hdl.ih_state = DDI_IHDL_STATE_ALLOC; 557 hdl.ih_dip = dip; 558 hdl.ih_inum = 0; 559 560 px_lib_msg_setvalid(dip, PCIE_PME_ACK_MSG, PCIE_MSG_INVALID); 561 (void) px_rem_msiq_intr(dip, dip, &hdl, MSG_REC, PCIE_PME_ACK_MSG, 562 pwr_p->pwr_msiq_id); 563 564 pwr_p->pwr_msiq_id = -1; 565 566 cv_destroy(&pwr_p->pwr_cv); 567 mutex_destroy(&pwr_p->pwr_intr_lock); 568 } 569 570 /* bus driver entry points */ 571 572 /* 573 * bus map entry point: 574 * 575 * if map request is for an rnumber 576 * get the corresponding regspec from device node 577 * build a new regspec in our parent's format 578 * build a new map_req with the new regspec 579 * call up the tree to complete the mapping 580 */ 581 int 582 px_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp, 583 off_t off, off_t len, caddr_t *addrp) 584 { 585 px_t *px_p = DIP_TO_STATE(dip); 586 struct regspec p_regspec; 587 ddi_map_req_t p_mapreq; 588 int reglen, rval, r_no; 589 pci_regspec_t reloc_reg, *rp = &reloc_reg; 590 591 DBG(DBG_MAP, dip, "rdip=%s%d:", 592 ddi_driver_name(rdip), ddi_get_instance(rdip)); 593 594 if (mp->map_flags & DDI_MF_USER_MAPPING) 595 return (DDI_ME_UNIMPLEMENTED); 596 597 switch (mp->map_type) { 598 case DDI_MT_REGSPEC: 599 reloc_reg = *(pci_regspec_t *)mp->map_obj.rp; /* dup whole */ 600 break; 601 602 case DDI_MT_RNUMBER: 603 r_no = mp->map_obj.rnumber; 604 DBG(DBG_MAP | DBG_CONT, dip, " r#=%x", r_no); 605 606 if (ddi_getlongprop(DDI_DEV_T_NONE, rdip, DDI_PROP_DONTPASS, 607 "reg", (caddr_t)&rp, ®len) != DDI_SUCCESS) 608 return (DDI_ME_RNUMBER_RANGE); 609 610 if (r_no < 0 || r_no >= reglen / sizeof (pci_regspec_t)) { 611 kmem_free(rp, reglen); 612 return (DDI_ME_RNUMBER_RANGE); 613 } 614 rp += r_no; 615 break; 616 617 default: 618 return (DDI_ME_INVAL); 619 } 620 DBG(DBG_MAP | DBG_CONT, dip, "\n"); 621 622 if ((rp->pci_phys_hi & PCI_REG_ADDR_M) == PCI_ADDR_CONFIG) { 623 /* 624 * There may be a need to differentiate between PCI 625 * and PCI-Ex devices so the following range check is 626 * done correctly, depending on the implementation of 627 * px_pci bridge nexus driver. 628 */ 629 if ((off >= PCIE_CONF_HDR_SIZE) || 630 (len > PCIE_CONF_HDR_SIZE) || 631 (off + len > PCIE_CONF_HDR_SIZE)) 632 return (DDI_ME_INVAL); 633 /* 634 * the following function returning a DDI_FAILURE assumes 635 * that there are no virtual config space access services 636 * defined in this layer. Otherwise it is availed right 637 * here and we return. 638 */ 639 rval = px_lib_map_vconfig(dip, mp, off, rp, addrp); 640 if (rval == DDI_SUCCESS) 641 goto done; 642 } 643 644 /* 645 * No virtual config space services or we are mapping 646 * a region of memory mapped config/IO/memory space, so proceed 647 * to the parent. 648 */ 649 650 /* relocate within 64-bit pci space through "assigned-addresses" */ 651 if (rval = px_reloc_reg(dip, rdip, px_p, rp)) 652 goto done; 653 654 if (len) /* adjust regspec according to mapping request */ 655 rp->pci_size_low = len; /* MIN ? */ 656 rp->pci_phys_low += off; 657 658 /* translate relocated pci regspec into parent space through "ranges" */ 659 if (rval = px_xlate_reg(px_p, rp, &p_regspec)) 660 goto done; 661 662 p_mapreq = *mp; /* dup the whole structure */ 663 p_mapreq.map_type = DDI_MT_REGSPEC; 664 p_mapreq.map_obj.rp = &p_regspec; 665 rval = ddi_map(dip, &p_mapreq, 0, 0, addrp); 666 667 if (rval == DDI_SUCCESS) { 668 /* 669 * Set-up access functions for FM access error capable drivers. 670 */ 671 if (DDI_FM_ACC_ERR_CAP(ddi_fm_capable(rdip)) && 672 mp->map_handlep->ah_acc.devacc_attr_access != 673 DDI_DEFAULT_ACC) 674 px_fm_acc_setup(mp, rdip); 675 } 676 677 done: 678 if (mp->map_type == DDI_MT_RNUMBER) 679 kmem_free(rp - r_no, reglen); 680 681 return (rval); 682 } 683 684 /* 685 * bus dma map entry point 686 * return value: 687 * DDI_DMA_PARTIAL_MAP 1 688 * DDI_DMA_MAPOK 0 689 * DDI_DMA_MAPPED 0 690 * DDI_DMA_NORESOURCES -1 691 * DDI_DMA_NOMAPPING -2 692 * DDI_DMA_TOOBIG -3 693 */ 694 int 695 px_dma_setup(dev_info_t *dip, dev_info_t *rdip, ddi_dma_req_t *dmareq, 696 ddi_dma_handle_t *handlep) 697 { 698 px_t *px_p = DIP_TO_STATE(dip); 699 px_mmu_t *mmu_p = px_p->px_mmu_p; 700 ddi_dma_impl_t *mp; 701 int ret; 702 703 DBG(DBG_DMA_MAP, dip, "mapping - rdip=%s%d type=%s\n", 704 ddi_driver_name(rdip), ddi_get_instance(rdip), 705 handlep ? "alloc" : "advisory"); 706 707 if (!(mp = px_dma_lmts2hdl(dip, rdip, mmu_p, dmareq))) 708 return (DDI_DMA_NORESOURCES); 709 if (mp == (ddi_dma_impl_t *)DDI_DMA_NOMAPPING) 710 return (DDI_DMA_NOMAPPING); 711 if (ret = px_dma_type(px_p, dmareq, mp)) 712 goto freehandle; 713 if (ret = px_dma_pfn(px_p, dmareq, mp)) 714 goto freehandle; 715 716 switch (PX_DMA_TYPE(mp)) { 717 case DMAI_FLAGS_DVMA: /* LINTED E_EQUALITY_NOT_ASSIGNMENT */ 718 if ((ret = px_dvma_win(px_p, dmareq, mp)) || !handlep) 719 goto freehandle; 720 if (!PX_DMA_CANCACHE(mp)) { /* try fast track */ 721 if (PX_DMA_CANFAST(mp)) { 722 if (!px_dvma_map_fast(mmu_p, mp)) 723 break; 724 /* LINTED E_NOP_ELSE_STMT */ 725 } else { 726 PX_DVMA_FASTTRAK_PROF(mp); 727 } 728 } 729 if (ret = px_dvma_map(mp, dmareq, mmu_p)) 730 goto freehandle; 731 break; 732 case DMAI_FLAGS_PTP: /* LINTED E_EQUALITY_NOT_ASSIGNMENT */ 733 if ((ret = px_dma_physwin(px_p, dmareq, mp)) || !handlep) 734 goto freehandle; 735 break; 736 case DMAI_FLAGS_BYPASS: 737 default: 738 cmn_err(CE_PANIC, "%s%d: px_dma_setup: bad dma type 0x%x", 739 ddi_driver_name(rdip), ddi_get_instance(rdip), 740 PX_DMA_TYPE(mp)); 741 /*NOTREACHED*/ 742 } 743 *handlep = (ddi_dma_handle_t)mp; 744 mp->dmai_flags |= DMAI_FLAGS_INUSE; 745 px_dump_dma_handle(DBG_DMA_MAP, dip, mp); 746 747 return ((mp->dmai_nwin == 1) ? DDI_DMA_MAPPED : DDI_DMA_PARTIAL_MAP); 748 freehandle: 749 if (ret == DDI_DMA_NORESOURCES) 750 px_dma_freemp(mp); /* don't run_callback() */ 751 else 752 (void) px_dma_freehdl(dip, rdip, (ddi_dma_handle_t)mp); 753 return (ret); 754 } 755 756 757 /* 758 * bus dma alloc handle entry point: 759 */ 760 int 761 px_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attrp, 762 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep) 763 { 764 px_t *px_p = DIP_TO_STATE(dip); 765 ddi_dma_impl_t *mp; 766 int rval; 767 768 DBG(DBG_DMA_ALLOCH, dip, "rdip=%s%d\n", 769 ddi_driver_name(rdip), ddi_get_instance(rdip)); 770 771 if (attrp->dma_attr_version != DMA_ATTR_V0) 772 return (DDI_DMA_BADATTR); 773 774 if (!(mp = px_dma_allocmp(dip, rdip, waitfp, arg))) 775 return (DDI_DMA_NORESOURCES); 776 777 /* 778 * Save requestor's information 779 */ 780 mp->dmai_attr = *attrp; /* whole object - augmented later */ 781 *DEV_ATTR(mp) = *attrp; /* whole object - device orig attr */ 782 DBG(DBG_DMA_ALLOCH, dip, "mp=%p\n", mp); 783 784 /* check and convert dma attributes to handle parameters */ 785 if (rval = px_dma_attr2hdl(px_p, mp)) { 786 px_dma_freehdl(dip, rdip, (ddi_dma_handle_t)mp); 787 *handlep = NULL; 788 return (rval); 789 } 790 *handlep = (ddi_dma_handle_t)mp; 791 return (DDI_SUCCESS); 792 } 793 794 795 /* 796 * bus dma free handle entry point: 797 */ 798 /*ARGSUSED*/ 799 int 800 px_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle) 801 { 802 DBG(DBG_DMA_FREEH, dip, "rdip=%s%d mp=%p\n", 803 ddi_driver_name(rdip), ddi_get_instance(rdip), handle); 804 px_dma_freemp((ddi_dma_impl_t *)handle); 805 806 if (px_kmem_clid) { 807 DBG(DBG_DMA_FREEH, dip, "run handle callback\n"); 808 ddi_run_callback(&px_kmem_clid); 809 } 810 return (DDI_SUCCESS); 811 } 812 813 814 /* 815 * bus dma bind handle entry point: 816 */ 817 int 818 px_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip, 819 ddi_dma_handle_t handle, ddi_dma_req_t *dmareq, 820 ddi_dma_cookie_t *cookiep, uint_t *ccountp) 821 { 822 px_t *px_p = DIP_TO_STATE(dip); 823 px_mmu_t *mmu_p = px_p->px_mmu_p; 824 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 825 int ret; 826 827 DBG(DBG_DMA_BINDH, dip, "rdip=%s%d mp=%p dmareq=%p\n", 828 ddi_driver_name(rdip), ddi_get_instance(rdip), mp, dmareq); 829 830 if (mp->dmai_flags & DMAI_FLAGS_INUSE) 831 return (DDI_DMA_INUSE); 832 833 ASSERT((mp->dmai_flags & ~DMAI_FLAGS_PRESERVE) == 0); 834 mp->dmai_flags |= DMAI_FLAGS_INUSE; 835 836 if (ret = px_dma_type(px_p, dmareq, mp)) 837 goto err; 838 if (ret = px_dma_pfn(px_p, dmareq, mp)) 839 goto err; 840 841 switch (PX_DMA_TYPE(mp)) { 842 case DMAI_FLAGS_DVMA: 843 if (ret = px_dvma_win(px_p, dmareq, mp)) 844 goto map_err; 845 if (!PX_DMA_CANCACHE(mp)) { /* try fast track */ 846 if (PX_DMA_CANFAST(mp)) { 847 if (!px_dvma_map_fast(mmu_p, mp)) 848 goto mapped; /*LINTED E_NOP_ELSE_STMT*/ 849 } else { 850 PX_DVMA_FASTTRAK_PROF(mp); 851 } 852 } 853 if (ret = px_dvma_map(mp, dmareq, mmu_p)) 854 goto map_err; 855 mapped: 856 *ccountp = 1; 857 MAKE_DMA_COOKIE(cookiep, mp->dmai_mapping, mp->dmai_size); 858 break; 859 case DMAI_FLAGS_BYPASS: 860 case DMAI_FLAGS_PTP: 861 if (ret = px_dma_physwin(px_p, dmareq, mp)) 862 goto map_err; 863 *ccountp = WINLST(mp)->win_ncookies; 864 *cookiep = *(ddi_dma_cookie_t *)(WINLST(mp) + 1); /* wholeobj */ 865 break; 866 default: 867 cmn_err(CE_PANIC, "%s%d: px_dma_bindhdl(%p): bad dma type", 868 ddi_driver_name(rdip), ddi_get_instance(rdip), mp); 869 /*NOTREACHED*/ 870 } 871 DBG(DBG_DMA_BINDH, dip, "cookie %llx+%x\n", cookiep->dmac_address, 872 cookiep->dmac_size); 873 px_dump_dma_handle(DBG_DMA_MAP, dip, mp); 874 875 /* insert dma handle into FMA cache */ 876 if (mp->dmai_attr.dma_attr_flags & DDI_DMA_FLAGERR) 877 (void) ndi_fmc_insert(rdip, DMA_HANDLE, mp, NULL); 878 879 return (mp->dmai_nwin == 1 ? DDI_DMA_MAPPED : DDI_DMA_PARTIAL_MAP); 880 map_err: 881 px_dma_freepfn(mp); 882 err: 883 mp->dmai_flags &= DMAI_FLAGS_PRESERVE; 884 return (ret); 885 } 886 887 888 /* 889 * bus dma unbind handle entry point: 890 */ 891 /*ARGSUSED*/ 892 int 893 px_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle) 894 { 895 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 896 px_t *px_p = DIP_TO_STATE(dip); 897 px_mmu_t *mmu_p = px_p->px_mmu_p; 898 899 DBG(DBG_DMA_UNBINDH, dip, "rdip=%s%d, mp=%p\n", 900 ddi_driver_name(rdip), ddi_get_instance(rdip), handle); 901 if ((mp->dmai_flags & DMAI_FLAGS_INUSE) == 0) { 902 DBG(DBG_DMA_UNBINDH, dip, "handle not inuse\n"); 903 return (DDI_FAILURE); 904 } 905 906 /* remove dma handle from FMA cache */ 907 if (mp->dmai_attr.dma_attr_flags & DDI_DMA_FLAGERR) { 908 if (DEVI(rdip)->devi_fmhdl != NULL && 909 DDI_FM_DMA_ERR_CAP(DEVI(rdip)->devi_fmhdl->fh_cap)) { 910 (void) ndi_fmc_remove(rdip, DMA_HANDLE, mp); 911 } 912 } 913 914 /* 915 * Here if the handle is using the iommu. Unload all the iommu 916 * translations. 917 */ 918 switch (PX_DMA_TYPE(mp)) { 919 case DMAI_FLAGS_DVMA: 920 px_mmu_unmap_window(mmu_p, mp); 921 px_dvma_unmap(mmu_p, mp); 922 px_dma_freepfn(mp); 923 break; 924 case DMAI_FLAGS_BYPASS: 925 case DMAI_FLAGS_PTP: 926 px_dma_freewin(mp); 927 break; 928 default: 929 cmn_err(CE_PANIC, "%s%d: px_dma_unbindhdl:bad dma type %p", 930 ddi_driver_name(rdip), ddi_get_instance(rdip), mp); 931 /*NOTREACHED*/ 932 } 933 if (mmu_p->mmu_dvma_clid != 0) { 934 DBG(DBG_DMA_UNBINDH, dip, "run dvma callback\n"); 935 ddi_run_callback(&mmu_p->mmu_dvma_clid); 936 } 937 if (px_kmem_clid) { 938 DBG(DBG_DMA_UNBINDH, dip, "run handle callback\n"); 939 ddi_run_callback(&px_kmem_clid); 940 } 941 mp->dmai_flags &= DMAI_FLAGS_PRESERVE; 942 943 return (DDI_SUCCESS); 944 } 945 946 /* 947 * bus dma win entry point: 948 */ 949 int 950 px_dma_win(dev_info_t *dip, dev_info_t *rdip, 951 ddi_dma_handle_t handle, uint_t win, off_t *offp, 952 size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp) 953 { 954 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 955 int ret; 956 957 DBG(DBG_DMA_WIN, dip, "rdip=%s%d\n", 958 ddi_driver_name(rdip), ddi_get_instance(rdip)); 959 960 px_dump_dma_handle(DBG_DMA_WIN, dip, mp); 961 if (win >= mp->dmai_nwin) { 962 DBG(DBG_DMA_WIN, dip, "%x out of range\n", win); 963 return (DDI_FAILURE); 964 } 965 966 switch (PX_DMA_TYPE(mp)) { 967 case DMAI_FLAGS_DVMA: 968 if (win != PX_DMA_CURWIN(mp)) { 969 px_t *px_p = DIP_TO_STATE(dip); 970 px_mmu_t *mmu_p = px_p->px_mmu_p; 971 px_mmu_unmap_window(mmu_p, mp); 972 973 /* map_window sets dmai_mapping/size/offset */ 974 px_mmu_map_window(mmu_p, mp, win); 975 if ((ret = px_mmu_map_window(mmu_p, 976 mp, win)) != DDI_SUCCESS) 977 return (ret); 978 } 979 if (cookiep) 980 MAKE_DMA_COOKIE(cookiep, mp->dmai_mapping, 981 mp->dmai_size); 982 if (ccountp) 983 *ccountp = 1; 984 break; 985 case DMAI_FLAGS_PTP: 986 case DMAI_FLAGS_BYPASS: { 987 int i; 988 ddi_dma_cookie_t *ck_p; 989 px_dma_win_t *win_p = mp->dmai_winlst; 990 991 for (i = 0; i < win; win_p = win_p->win_next, i++); 992 ck_p = (ddi_dma_cookie_t *)(win_p + 1); 993 *cookiep = *ck_p; 994 mp->dmai_offset = win_p->win_offset; 995 mp->dmai_size = win_p->win_size; 996 mp->dmai_mapping = ck_p->dmac_laddress; 997 mp->dmai_cookie = ck_p + 1; 998 win_p->win_curseg = 0; 999 if (ccountp) 1000 *ccountp = win_p->win_ncookies; 1001 } 1002 break; 1003 default: 1004 cmn_err(CE_WARN, "%s%d: px_dma_win:bad dma type 0x%x", 1005 ddi_driver_name(rdip), ddi_get_instance(rdip), 1006 PX_DMA_TYPE(mp)); 1007 return (DDI_FAILURE); 1008 } 1009 if (cookiep) 1010 DBG(DBG_DMA_WIN, dip, 1011 "cookie - dmac_address=%x dmac_size=%x\n", 1012 cookiep->dmac_address, cookiep->dmac_size); 1013 if (offp) 1014 *offp = (off_t)mp->dmai_offset; 1015 if (lenp) 1016 *lenp = mp->dmai_size; 1017 return (DDI_SUCCESS); 1018 } 1019 1020 #ifdef DEBUG 1021 static char *px_dmactl_str[] = { 1022 "DDI_DMA_FREE", 1023 "DDI_DMA_SYNC", 1024 "DDI_DMA_HTOC", 1025 "DDI_DMA_KVADDR", 1026 "DDI_DMA_MOVWIN", 1027 "DDI_DMA_REPWIN", 1028 "DDI_DMA_GETERR", 1029 "DDI_DMA_COFF", 1030 "DDI_DMA_NEXTWIN", 1031 "DDI_DMA_NEXTSEG", 1032 "DDI_DMA_SEGTOC", 1033 "DDI_DMA_RESERVE", 1034 "DDI_DMA_RELEASE", 1035 "DDI_DMA_RESETH", 1036 "DDI_DMA_CKSYNC", 1037 "DDI_DMA_IOPB_ALLOC", 1038 "DDI_DMA_IOPB_FREE", 1039 "DDI_DMA_SMEM_ALLOC", 1040 "DDI_DMA_SMEM_FREE", 1041 "DDI_DMA_SET_SBUS64" 1042 }; 1043 #endif /* DEBUG */ 1044 1045 /* 1046 * bus dma control entry point: 1047 */ 1048 /*ARGSUSED*/ 1049 int 1050 px_dma_ctlops(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle, 1051 enum ddi_dma_ctlops cmd, off_t *offp, size_t *lenp, caddr_t *objp, 1052 uint_t cache_flags) 1053 { 1054 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 1055 1056 #ifdef DEBUG 1057 DBG(DBG_DMA_CTL, dip, "%s: rdip=%s%d\n", px_dmactl_str[cmd], 1058 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1059 #endif /* DEBUG */ 1060 1061 switch (cmd) { 1062 case DDI_DMA_FREE: 1063 (void) px_dma_unbindhdl(dip, rdip, handle); 1064 (void) px_dma_freehdl(dip, rdip, handle); 1065 return (DDI_SUCCESS); 1066 case DDI_DMA_RESERVE: { 1067 px_t *px_p = DIP_TO_STATE(dip); 1068 return (px_fdvma_reserve(dip, rdip, px_p, 1069 (ddi_dma_req_t *)offp, (ddi_dma_handle_t *)objp)); 1070 } 1071 case DDI_DMA_RELEASE: { 1072 px_t *px_p = DIP_TO_STATE(dip); 1073 return (px_fdvma_release(dip, px_p, mp)); 1074 } 1075 default: 1076 break; 1077 } 1078 1079 switch (PX_DMA_TYPE(mp)) { 1080 case DMAI_FLAGS_DVMA: 1081 return (px_dvma_ctl(dip, rdip, mp, cmd, offp, lenp, objp, 1082 cache_flags)); 1083 case DMAI_FLAGS_PTP: 1084 case DMAI_FLAGS_BYPASS: 1085 return (px_dma_ctl(dip, rdip, mp, cmd, offp, lenp, objp, 1086 cache_flags)); 1087 default: 1088 cmn_err(CE_PANIC, "%s%d: px_dma_ctlops(%x):bad dma type %x", 1089 ddi_driver_name(rdip), ddi_get_instance(rdip), cmd, 1090 mp->dmai_flags); 1091 /*NOTREACHED*/ 1092 } 1093 } 1094 1095 /* 1096 * control ops entry point: 1097 * 1098 * Requests handled completely: 1099 * DDI_CTLOPS_INITCHILD see init_child() for details 1100 * DDI_CTLOPS_UNINITCHILD 1101 * DDI_CTLOPS_REPORTDEV see report_dev() for details 1102 * DDI_CTLOPS_XLATE_INTRS nothing to do 1103 * DDI_CTLOPS_IOMIN cache line size if streaming otherwise 1 1104 * DDI_CTLOPS_REGSIZE 1105 * DDI_CTLOPS_NREGS 1106 * DDI_CTLOPS_NINTRS 1107 * DDI_CTLOPS_DVMAPAGESIZE 1108 * DDI_CTLOPS_POKE 1109 * DDI_CTLOPS_PEEK 1110 * 1111 * All others passed to parent. 1112 */ 1113 int 1114 px_ctlops(dev_info_t *dip, dev_info_t *rdip, 1115 ddi_ctl_enum_t op, void *arg, void *result) 1116 { 1117 px_t *px_p = DIP_TO_STATE(dip); 1118 struct detachspec *ds; 1119 struct attachspec *as; 1120 1121 switch (op) { 1122 case DDI_CTLOPS_INITCHILD: 1123 return (px_init_child(px_p, (dev_info_t *)arg)); 1124 1125 case DDI_CTLOPS_UNINITCHILD: 1126 return (px_uninit_child(px_p, (dev_info_t *)arg)); 1127 1128 case DDI_CTLOPS_ATTACH: 1129 as = (struct attachspec *)arg; 1130 switch (as->when) { 1131 case DDI_PRE: 1132 if (as->cmd == DDI_ATTACH) { 1133 DBG(DBG_PWR, dip, "PRE_ATTACH for %s@%d\n", 1134 ddi_driver_name(rdip), 1135 ddi_get_instance(rdip)); 1136 return (pcie_pm_hold(dip)); 1137 } 1138 return (DDI_SUCCESS); 1139 1140 case DDI_POST: 1141 DBG(DBG_PWR, dip, "POST_ATTACH for %s@%d\n", 1142 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1143 if (as->cmd == DDI_ATTACH && as->result != DDI_SUCCESS) 1144 pcie_pm_release(dip); 1145 return (DDI_SUCCESS); 1146 default: 1147 break; 1148 } 1149 break; 1150 1151 case DDI_CTLOPS_DETACH: 1152 ds = (struct detachspec *)arg; 1153 switch (ds->when) { 1154 case DDI_POST: 1155 if (ds->cmd == DDI_DETACH && 1156 ds->result == DDI_SUCCESS) { 1157 DBG(DBG_PWR, dip, "POST_DETACH for %s@%d\n", 1158 ddi_driver_name(rdip), 1159 ddi_get_instance(rdip)); 1160 return (pcie_pm_remove_child(dip, rdip)); 1161 } 1162 return (DDI_SUCCESS); 1163 default: 1164 break; 1165 } 1166 break; 1167 1168 case DDI_CTLOPS_REPORTDEV: 1169 return (px_report_dev(rdip)); 1170 1171 case DDI_CTLOPS_IOMIN: 1172 return (DDI_SUCCESS); 1173 1174 case DDI_CTLOPS_REGSIZE: 1175 *((off_t *)result) = px_get_reg_set_size(rdip, *((int *)arg)); 1176 return (*((off_t *)result) == 0 ? DDI_FAILURE : DDI_SUCCESS); 1177 1178 case DDI_CTLOPS_NREGS: 1179 *((uint_t *)result) = px_get_nreg_set(rdip); 1180 return (DDI_SUCCESS); 1181 1182 case DDI_CTLOPS_DVMAPAGESIZE: 1183 *((ulong_t *)result) = MMU_PAGE_SIZE; 1184 return (DDI_SUCCESS); 1185 1186 case DDI_CTLOPS_POKE: /* platform dependent implementation. */ 1187 return (px_lib_ctlops_poke(dip, rdip, 1188 (peekpoke_ctlops_t *)arg)); 1189 1190 case DDI_CTLOPS_PEEK: /* platform dependent implementation. */ 1191 return (px_lib_ctlops_peek(dip, rdip, 1192 (peekpoke_ctlops_t *)arg, result)); 1193 1194 case DDI_CTLOPS_POWER: 1195 default: 1196 break; 1197 } 1198 1199 /* 1200 * Now pass the request up to our parent. 1201 */ 1202 DBG(DBG_CTLOPS, dip, "passing request to parent: rdip=%s%d\n", 1203 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1204 return (ddi_ctlops(dip, rdip, op, arg, result)); 1205 } 1206 1207 /* ARGSUSED */ 1208 int 1209 px_intr_ops(dev_info_t *dip, dev_info_t *rdip, ddi_intr_op_t intr_op, 1210 ddi_intr_handle_impl_t *hdlp, void *result) 1211 { 1212 int intr_types, ret = DDI_SUCCESS; 1213 1214 DBG(DBG_INTROPS, dip, "px_intr_ops: rdip=%s%d\n", 1215 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1216 1217 /* Process DDI_INTROP_SUPPORTED_TYPES request here */ 1218 if (intr_op == DDI_INTROP_SUPPORTED_TYPES) { 1219 px_t *px_p = DIP_TO_STATE(dip); 1220 px_msi_state_t *msi_state_p = &px_p->px_ib_p->ib_msi_state; 1221 1222 *(int *)result = i_ddi_get_nintrs(rdip) ? 1223 DDI_INTR_TYPE_FIXED : 0; 1224 1225 if ((pci_msi_get_supported_type(rdip, 1226 &intr_types)) == DDI_SUCCESS) { 1227 /* 1228 * Double check supported interrupt types vs. 1229 * what the host bridge supports. 1230 */ 1231 *(int *)result |= (intr_types & msi_state_p->msi_type); 1232 } 1233 1234 return (ret); 1235 } 1236 1237 /* 1238 * PCI-E nexus driver supports fixed, MSI and MSI-X interrupts. 1239 * Return failure if interrupt type is not supported. 1240 */ 1241 switch (hdlp->ih_type) { 1242 case DDI_INTR_TYPE_FIXED: 1243 ret = px_intx_ops(dip, rdip, intr_op, hdlp, result); 1244 break; 1245 case DDI_INTR_TYPE_MSI: 1246 case DDI_INTR_TYPE_MSIX: 1247 ret = px_msix_ops(dip, rdip, intr_op, hdlp, result); 1248 break; 1249 default: 1250 ret = DDI_ENOTSUP; 1251 break; 1252 } 1253 1254 return (ret); 1255 } 1256