1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * PCI Express nexus driver interface 31 */ 32 33 #include <sys/types.h> 34 #include <sys/conf.h> /* nulldev */ 35 #include <sys/stat.h> /* devctl */ 36 #include <sys/kmem.h> 37 #include <sys/sunddi.h> 38 #include <sys/sunndi.h> 39 #include <sys/hotplug/pci/pcihp.h> 40 #include <sys/ontrap.h> 41 #include <sys/ddi_impldefs.h> 42 #include <sys/ddi_subrdefs.h> 43 #include <sys/epm.h> 44 #include <sys/iommutsb.h> 45 #include <px_regs.h> 46 #include "px_obj.h" 47 #include "pcie_pwr.h" 48 49 /*LINTLIBRARY*/ 50 51 /* 52 * function prototypes for dev ops routines: 53 */ 54 static int px_attach(dev_info_t *dip, ddi_attach_cmd_t cmd); 55 static int px_detach(dev_info_t *dip, ddi_detach_cmd_t cmd); 56 static int px_info(dev_info_t *dip, ddi_info_cmd_t infocmd, 57 void *arg, void **result); 58 static int px_pwr_setup(dev_info_t *dip); 59 static void px_pwr_teardown(dev_info_t *dip); 60 61 /* 62 * bus ops and dev ops structures: 63 */ 64 static struct bus_ops px_bus_ops = { 65 BUSO_REV, 66 px_map, 67 0, 68 0, 69 0, 70 i_ddi_map_fault, 71 px_dma_setup, 72 px_dma_allochdl, 73 px_dma_freehdl, 74 px_dma_bindhdl, 75 px_dma_unbindhdl, 76 px_lib_dma_sync, 77 px_dma_win, 78 px_dma_ctlops, 79 px_ctlops, 80 ddi_bus_prop_op, 81 ndi_busop_get_eventcookie, 82 ndi_busop_add_eventcall, 83 ndi_busop_remove_eventcall, 84 ndi_post_event, 85 NULL, 86 NULL, /* (*bus_config)(); */ 87 NULL, /* (*bus_unconfig)(); */ 88 px_fm_init_child, /* (*bus_fm_init)(); */ 89 NULL, /* (*bus_fm_fini)(); */ 90 NULL, /* (*bus_fm_access_enter)(); */ 91 NULL, /* (*bus_fm_access_fini)(); */ 92 pcie_bus_power, /* (*bus_power)(); */ 93 px_intr_ops /* (*bus_intr_op)(); */ 94 }; 95 96 extern struct cb_ops px_cb_ops; 97 98 static struct dev_ops px_ops = { 99 DEVO_REV, 100 0, 101 px_info, 102 nulldev, 103 0, 104 px_attach, 105 px_detach, 106 nodev, 107 &px_cb_ops, 108 &px_bus_ops, 109 nulldev 110 }; 111 112 /* 113 * module definitions: 114 */ 115 #include <sys/modctl.h> 116 extern struct mod_ops mod_driverops; 117 118 static struct modldrv modldrv = { 119 &mod_driverops, /* Type of module - driver */ 120 "PCI Express nexus driver %I%", /* Name of module. */ 121 &px_ops, /* driver ops */ 122 }; 123 124 static struct modlinkage modlinkage = { 125 MODREV_1, (void *)&modldrv, NULL 126 }; 127 128 /* driver soft state */ 129 void *px_state_p; 130 131 int 132 _init(void) 133 { 134 int e; 135 136 /* 137 * Initialize per-px bus soft state pointer. 138 */ 139 e = ddi_soft_state_init(&px_state_p, sizeof (px_t), 1); 140 if (e != DDI_SUCCESS) 141 return (e); 142 143 /* 144 * Install the module. 145 */ 146 e = mod_install(&modlinkage); 147 if (e != DDI_SUCCESS) 148 ddi_soft_state_fini(&px_state_p); 149 return (e); 150 } 151 152 int 153 _fini(void) 154 { 155 int e; 156 157 /* 158 * Remove the module. 159 */ 160 e = mod_remove(&modlinkage); 161 if (e != DDI_SUCCESS) 162 return (e); 163 164 /* Free px soft state */ 165 ddi_soft_state_fini(&px_state_p); 166 167 return (e); 168 } 169 170 int 171 _info(struct modinfo *modinfop) 172 { 173 return (mod_info(&modlinkage, modinfop)); 174 } 175 176 /* ARGSUSED */ 177 static int 178 px_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 179 { 180 int instance = getminor((dev_t)arg); 181 px_t *px_p = INST_TO_STATE(instance); 182 183 #ifdef HOTPLUG 184 /* 185 * Allow hotplug to deal with ones it manages 186 * Hot Plug will be done later. 187 */ 188 if (px_p && (px_p->hotplug_capable == B_TRUE)) 189 return (pcihp_info(dip, infocmd, arg, result)); 190 #endif /* HOTPLUG */ 191 192 /* non-hotplug or not attached */ 193 switch (infocmd) { 194 case DDI_INFO_DEVT2INSTANCE: 195 *result = (void *)instance; 196 return (DDI_SUCCESS); 197 198 case DDI_INFO_DEVT2DEVINFO: 199 if (px_p == NULL) 200 return (DDI_FAILURE); 201 *result = (void *)px_p->px_dip; 202 return (DDI_SUCCESS); 203 204 default: 205 return (DDI_FAILURE); 206 } 207 } 208 209 /* device driver entry points */ 210 /* 211 * attach entry point: 212 */ 213 /*ARGSUSED*/ 214 static int 215 px_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 216 { 217 px_t *px_p; /* per bus state pointer */ 218 int instance = DIP_TO_INST(dip); 219 int ret = DDI_SUCCESS; 220 devhandle_t dev_hdl = NULL; 221 222 switch (cmd) { 223 case DDI_ATTACH: 224 DBG(DBG_ATTACH, dip, "DDI_ATTACH\n"); 225 226 /* 227 * Allocate and get the per-px soft state structure. 228 */ 229 if (ddi_soft_state_zalloc(px_state_p, instance) 230 != DDI_SUCCESS) { 231 cmn_err(CE_WARN, "%s%d: can't allocate px state", 232 ddi_driver_name(dip), instance); 233 goto err_bad_px_softstate; 234 } 235 px_p = INST_TO_STATE(instance); 236 px_p->px_dip = dip; 237 mutex_init(&px_p->px_mutex, NULL, MUTEX_DRIVER, NULL); 238 px_p->px_soft_state = PX_SOFT_STATE_CLOSED; 239 px_p->px_open_count = 0; 240 241 /* 242 * Get key properties of the pci bridge node and 243 * determine it's type (psycho, schizo, etc ...). 244 */ 245 if (px_get_props(px_p, dip) == DDI_FAILURE) 246 goto err_bad_px_prop; 247 248 /* 249 * Map in the registers. 250 * 251 * Remove px_map_regs() from here and move them to SUN4U 252 * library code, after complete virtualization 253 * (after porting MSI and Error handling code). 254 */ 255 if (px_map_regs(px_p, dip) == DDI_FAILURE) 256 goto err_bad_reg_prop; 257 258 if ((px_fm_attach(px_p)) != DDI_SUCCESS) 259 goto err_bad_fm; 260 261 if (px_lib_dev_init(dip, &dev_hdl) != DDI_SUCCESS) 262 goto err_bad_dev_init; 263 264 /* Initilize device handle */ 265 px_p->px_dev_hdl = dev_hdl; 266 267 /* 268 * Initialize interrupt block. Note that this 269 * initialize error handling for the PEC as well. 270 */ 271 if ((ret = px_ib_attach(px_p)) != DDI_SUCCESS) 272 goto err_bad_ib; 273 274 if (px_cb_attach(px_p) != DDI_SUCCESS) 275 goto err_bad_cb; 276 277 /* 278 * Start creating the modules. 279 * Note that attach() routines should 280 * register and enable their own interrupts. 281 */ 282 283 if ((px_mmu_attach(px_p)) != DDI_SUCCESS) 284 goto err_bad_mmu; 285 286 if ((px_msiq_attach(px_p)) != DDI_SUCCESS) 287 goto err_bad_msiq; 288 289 if ((px_msi_attach(px_p)) != DDI_SUCCESS) 290 goto err_bad_msi; 291 292 if ((px_pec_attach(px_p)) != DDI_SUCCESS) 293 goto err_bad_pec; 294 295 if ((px_dma_attach(px_p)) != DDI_SUCCESS) 296 goto err_bad_pec; /* nothing to uninitialize on DMA */ 297 298 /* 299 * All of the error handlers have been registered 300 * by now so it's time to activate the interrupt. 301 */ 302 if ((ret = px_err_add_intr(px_p, &px_p->px_fault, 303 PX_FAULT_PEC)) != DDI_SUCCESS) 304 goto err_bad_pec_add_intr; 305 306 /* 307 * Create the "devctl" node for hotplug and pcitool support. 308 * For non-hotplug bus, we still need ":devctl" to 309 * support DEVCTL_DEVICE_* and DEVCTL_BUS_* ioctls. 310 * 311 * Hot Plug will be done at a later time... 312 */ 313 if (ddi_create_minor_node(dip, "devctl", S_IFCHR, 314 PCIHP_AP_MINOR_NUM(instance, PCIHP_DEVCTL_MINOR), 315 DDI_NT_NEXUS, 0) != DDI_SUCCESS) { 316 goto err_bad_devctl_node; 317 } 318 /* 319 * power management setup. Even if it fails, attach will 320 * succeed as this is a optional feature. Since we are 321 * always at full power, this is not critical. 322 */ 323 if (pwr_common_setup(dip) != DDI_SUCCESS) { 324 DBG(DBG_PWR, dip, "pwr_common_setup failed\n"); 325 } else if (px_pwr_setup(dip) != DDI_SUCCESS) { 326 DBG(DBG_PWR, dip, "px_pwr_setup failed \n"); 327 pwr_common_teardown(dip); 328 } 329 330 ddi_report_dev(dip); 331 332 px_p->px_state = PX_ATTACHED; 333 DBG(DBG_ATTACH, dip, "attach success\n"); 334 break; 335 336 err_bad_devctl_node: 337 px_err_rem_intr(px_p, PX_FAULT_PEC); 338 err_bad_pec_add_intr: 339 px_pec_detach(px_p); 340 err_bad_pec: 341 px_msi_detach(px_p); 342 err_bad_msi: 343 px_msiq_detach(px_p); 344 err_bad_msiq: 345 px_mmu_detach(px_p); 346 err_bad_mmu: 347 px_cb_detach(px_p); 348 err_bad_cb: 349 px_ib_detach(px_p); 350 err_bad_ib: 351 (void) px_lib_dev_fini(dip); 352 err_bad_dev_init: 353 px_fm_detach(px_p); 354 err_bad_fm: 355 px_unmap_regs(px_p); 356 err_bad_reg_prop: 357 px_free_props(px_p); 358 err_bad_px_prop: 359 mutex_destroy(&px_p->px_mutex); 360 ddi_soft_state_free(px_state_p, instance); 361 err_bad_px_softstate: 362 ret = DDI_FAILURE; 363 break; 364 365 case DDI_RESUME: 366 DBG(DBG_ATTACH, dip, "DDI_RESUME\n"); 367 368 px_p = INST_TO_STATE(instance); 369 370 mutex_enter(&px_p->px_mutex); 371 372 /* suspend might have not succeeded */ 373 if (px_p->px_state != PX_SUSPENDED) { 374 DBG(DBG_ATTACH, px_p->px_dip, 375 "instance NOT suspended\n"); 376 ret = DDI_FAILURE; 377 break; 378 } 379 380 px_lib_resume(dip); 381 (void) pcie_pwr_resume(dip); 382 px_p->px_state = PX_ATTACHED; 383 384 mutex_exit(&px_p->px_mutex); 385 386 break; 387 default: 388 DBG(DBG_ATTACH, dip, "unsupported attach op\n"); 389 ret = DDI_FAILURE; 390 break; 391 } 392 393 return (ret); 394 } 395 396 /* 397 * detach entry point: 398 */ 399 /*ARGSUSED*/ 400 static int 401 px_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 402 { 403 int instance = ddi_get_instance(dip); 404 px_t *px_p = INST_TO_STATE(instance); 405 int ret; 406 407 /* 408 * Make sure we are currently attached 409 */ 410 if (px_p->px_state != PX_ATTACHED) { 411 DBG(DBG_DETACH, dip, "failed - instance not attached\n"); 412 return (DDI_FAILURE); 413 } 414 415 mutex_enter(&px_p->px_mutex); 416 417 switch (cmd) { 418 case DDI_DETACH: 419 DBG(DBG_DETACH, dip, "DDI_DETACH\n"); 420 421 #ifdef HOTPLUG 422 /* 423 * Hot plug will be done later. 424 */ 425 if (px_p->hotplug_capable == B_TRUE) { 426 if (pxhp_uninit(dip) == DDI_FAILURE) { 427 mutex_exit(&px_p->px_mutex); 428 return (DDI_FAILURE); 429 } 430 } 431 #endif /* HOTPLUG */ 432 433 /* 434 * things which used to be done in obj_destroy 435 * are now in-lined here. 436 */ 437 438 px_p->px_state = PX_DETACHED; 439 440 ddi_remove_minor_node(dip, "devctl"); 441 px_err_rem_intr(px_p, PX_FAULT_PEC); 442 px_pec_detach(px_p); 443 px_msi_detach(px_p); 444 px_msiq_detach(px_p); 445 px_mmu_detach(px_p); 446 px_cb_detach(px_p); 447 px_ib_detach(px_p); 448 (void) px_lib_dev_fini(dip); 449 px_fm_detach(px_p); 450 451 /* 452 * Free the px soft state structure and the rest of the 453 * resources it's using. 454 */ 455 px_unmap_regs(px_p); 456 px_free_props(px_p); 457 px_pwr_teardown(dip); 458 pwr_common_teardown(dip); 459 mutex_exit(&px_p->px_mutex); 460 mutex_destroy(&px_p->px_mutex); 461 ddi_soft_state_free(px_state_p, instance); 462 463 /* Free the interrupt-priorities prop if we created it. */ { 464 int len; 465 466 if (ddi_getproplen(DDI_DEV_T_ANY, dip, 467 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 468 "interrupt-priorities", &len) == DDI_PROP_SUCCESS) 469 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, 470 "interrupt-priorities"); 471 } 472 473 px_p->px_dev_hdl = NULL; 474 475 return (DDI_SUCCESS); 476 477 case DDI_SUSPEND: 478 if (pcie_pwr_suspend(dip) != DDI_SUCCESS) { 479 mutex_exit(&px_p->px_mutex); 480 return (DDI_FAILURE); 481 } 482 if ((ret = px_lib_suspend(dip)) == DDI_SUCCESS) 483 px_p->px_state = PX_SUSPENDED; 484 mutex_exit(&px_p->px_mutex); 485 486 return (ret); 487 488 default: 489 DBG(DBG_DETACH, dip, "unsupported detach op\n"); 490 mutex_exit(&px_p->px_mutex); 491 return (DDI_FAILURE); 492 } 493 } 494 495 /* 496 * power management related initialization specific to px 497 * called by px_attach() 498 */ 499 static int 500 px_pwr_setup(dev_info_t *dip) 501 { 502 pcie_pwr_t *pwr_p; 503 ddi_intr_handle_impl_t hdl; 504 505 ASSERT(PCIE_PMINFO(dip)); 506 pwr_p = PCIE_NEXUS_PMINFO(dip); 507 ASSERT(pwr_p); 508 509 /* 510 * indicate support LDI (Layered Driver Interface) 511 * Create the property, if it is not already there 512 */ 513 if (!ddi_prop_exists(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS, 514 DDI_KERNEL_IOCTL)) { 515 if (ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, 516 DDI_KERNEL_IOCTL, NULL, 0) != DDI_PROP_SUCCESS) { 517 DBG(DBG_PWR, dip, "can't create kernel ioctl prop\n"); 518 return (DDI_FAILURE); 519 } 520 } 521 /* No support for device PM. We are always at full power */ 522 pwr_p->pwr_func_lvl = PM_LEVEL_D0; 523 524 /* we know that we are only a low pil interrupt */ 525 mutex_init(&pwr_p->pwr_intr_lock, NULL, MUTEX_DRIVER, NULL); 526 cv_init(&pwr_p->pwr_cv, NULL, CV_DRIVER, NULL); 527 528 /* Initilize handle */ 529 hdl.ih_ver = DDI_INTR_VERSION; 530 hdl.ih_state = DDI_IHDL_STATE_ALLOC; 531 hdl.ih_dip = dip; 532 hdl.ih_inum = 0; 533 hdl.ih_pri = px_pwr_pil; 534 535 /* Add PME_TO_ACK message handler */ 536 hdl.ih_cb_func = (ddi_intr_handler_t *)pcie_pwr_intr; 537 hdl.ih_cb_arg1 = pwr_p; 538 hdl.ih_cb_arg2 = NULL; 539 540 if (px_add_msiq_intr(dip, dip, &hdl, MSG_REC, 541 (msgcode_t)PCIE_PME_ACK_MSG, &pwr_p->pwr_msiq_id) != DDI_SUCCESS) { 542 DBG(DBG_PWR, dip, "px_pwr_setup: couldn't add intr\n"); 543 goto px_pwrsetup_err; 544 } 545 546 px_lib_msg_setmsiq(dip, PCIE_PME_ACK_MSG, pwr_p->pwr_msiq_id); 547 px_lib_msg_setvalid(dip, PCIE_PME_ACK_MSG, PCIE_MSG_VALID); 548 549 return (DDI_SUCCESS); 550 551 px_pwrsetup_err: 552 cv_destroy(&pwr_p->pwr_cv); 553 mutex_destroy(&pwr_p->pwr_intr_lock); 554 return (DDI_FAILURE); 555 } 556 557 /* 558 * undo whatever is done in px_pwr_setup. called by px_detach() 559 */ 560 static void 561 px_pwr_teardown(dev_info_t *dip) 562 { 563 pcie_pwr_t *pwr_p; 564 ddi_intr_handle_impl_t hdl; 565 566 if (!PCIE_PMINFO(dip) || !(pwr_p = PCIE_NEXUS_PMINFO(dip))) 567 return; 568 569 DBG(DBG_MSG, dip, "px_pwr_teardown: msiq_id 0x%x\n", 570 pwr_p->pwr_msiq_id); 571 572 /* Initilize handle */ 573 hdl.ih_ver = DDI_INTR_VERSION; 574 hdl.ih_state = DDI_IHDL_STATE_ALLOC; 575 hdl.ih_dip = dip; 576 hdl.ih_inum = 0; 577 578 px_lib_msg_setvalid(dip, PCIE_PME_ACK_MSG, PCIE_MSG_INVALID); 579 (void) px_rem_msiq_intr(dip, dip, &hdl, MSG_REC, PCIE_PME_ACK_MSG, 580 pwr_p->pwr_msiq_id); 581 582 pwr_p->pwr_msiq_id = -1; 583 584 cv_destroy(&pwr_p->pwr_cv); 585 mutex_destroy(&pwr_p->pwr_intr_lock); 586 } 587 588 /* bus driver entry points */ 589 590 /* 591 * bus map entry point: 592 * 593 * if map request is for an rnumber 594 * get the corresponding regspec from device node 595 * build a new regspec in our parent's format 596 * build a new map_req with the new regspec 597 * call up the tree to complete the mapping 598 */ 599 int 600 px_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp, 601 off_t off, off_t len, caddr_t *addrp) 602 { 603 px_t *px_p = DIP_TO_STATE(dip); 604 struct regspec p_regspec; 605 ddi_map_req_t p_mapreq; 606 int reglen, rval, r_no; 607 pci_regspec_t reloc_reg, *rp = &reloc_reg; 608 609 DBG(DBG_MAP, dip, "rdip=%s%d:", 610 ddi_driver_name(rdip), ddi_get_instance(rdip)); 611 612 if (mp->map_flags & DDI_MF_USER_MAPPING) 613 return (DDI_ME_UNIMPLEMENTED); 614 615 switch (mp->map_type) { 616 case DDI_MT_REGSPEC: 617 reloc_reg = *(pci_regspec_t *)mp->map_obj.rp; /* dup whole */ 618 break; 619 620 case DDI_MT_RNUMBER: 621 r_no = mp->map_obj.rnumber; 622 DBG(DBG_MAP | DBG_CONT, dip, " r#=%x", r_no); 623 624 if (ddi_getlongprop(DDI_DEV_T_NONE, rdip, DDI_PROP_DONTPASS, 625 "reg", (caddr_t)&rp, ®len) != DDI_SUCCESS) 626 return (DDI_ME_RNUMBER_RANGE); 627 628 if (r_no < 0 || r_no >= reglen / sizeof (pci_regspec_t)) { 629 kmem_free(rp, reglen); 630 return (DDI_ME_RNUMBER_RANGE); 631 } 632 rp += r_no; 633 break; 634 635 default: 636 return (DDI_ME_INVAL); 637 } 638 DBG(DBG_MAP | DBG_CONT, dip, "\n"); 639 640 if ((rp->pci_phys_hi & PCI_REG_ADDR_M) == PCI_ADDR_CONFIG) { 641 /* 642 * There may be a need to differentiate between PCI 643 * and PCI-Ex devices so the following range check is 644 * done correctly, depending on the implementation of 645 * px_pci bridge nexus driver. 646 */ 647 if ((off >= PCIE_CONF_HDR_SIZE) || 648 (len > PCIE_CONF_HDR_SIZE) || 649 (off + len > PCIE_CONF_HDR_SIZE)) 650 return (DDI_ME_INVAL); 651 /* 652 * the following function returning a DDI_FAILURE assumes 653 * that there are no virtual config space access services 654 * defined in this layer. Otherwise it is availed right 655 * here and we return. 656 */ 657 rval = px_lib_map_vconfig(dip, mp, off, rp, addrp); 658 if (rval == DDI_SUCCESS) 659 goto done; 660 } 661 662 /* 663 * No virtual config space services or we are mapping 664 * a region of memory mapped config/IO/memory space, so proceed 665 * to the parent. 666 */ 667 668 /* relocate within 64-bit pci space through "assigned-addresses" */ 669 if (rval = px_reloc_reg(dip, rdip, px_p, rp)) 670 goto done; 671 672 if (len) /* adjust regspec according to mapping request */ 673 rp->pci_size_low = len; /* MIN ? */ 674 rp->pci_phys_low += off; 675 676 /* translate relocated pci regspec into parent space through "ranges" */ 677 if (rval = px_xlate_reg(px_p, rp, &p_regspec)) 678 goto done; 679 680 p_mapreq = *mp; /* dup the whole structure */ 681 p_mapreq.map_type = DDI_MT_REGSPEC; 682 p_mapreq.map_obj.rp = &p_regspec; 683 rval = ddi_map(dip, &p_mapreq, 0, 0, addrp); 684 685 if (rval == DDI_SUCCESS) { 686 /* 687 * Set-up access functions for FM access error capable drivers. 688 */ 689 if (DDI_FM_ACC_ERR_CAP(ddi_fm_capable(rdip)) && 690 mp->map_handlep->ah_acc.devacc_attr_access != 691 DDI_DEFAULT_ACC) 692 px_fm_acc_setup(mp, rdip); 693 } 694 695 done: 696 if (mp->map_type == DDI_MT_RNUMBER) 697 kmem_free(rp - r_no, reglen); 698 699 return (rval); 700 } 701 702 /* 703 * bus dma map entry point 704 * return value: 705 * DDI_DMA_PARTIAL_MAP 1 706 * DDI_DMA_MAPOK 0 707 * DDI_DMA_MAPPED 0 708 * DDI_DMA_NORESOURCES -1 709 * DDI_DMA_NOMAPPING -2 710 * DDI_DMA_TOOBIG -3 711 */ 712 int 713 px_dma_setup(dev_info_t *dip, dev_info_t *rdip, ddi_dma_req_t *dmareq, 714 ddi_dma_handle_t *handlep) 715 { 716 px_t *px_p = DIP_TO_STATE(dip); 717 px_mmu_t *mmu_p = px_p->px_mmu_p; 718 ddi_dma_impl_t *mp; 719 int ret; 720 721 DBG(DBG_DMA_MAP, dip, "mapping - rdip=%s%d type=%s\n", 722 ddi_driver_name(rdip), ddi_get_instance(rdip), 723 handlep ? "alloc" : "advisory"); 724 725 if (!(mp = px_dma_lmts2hdl(dip, rdip, mmu_p, dmareq))) 726 return (DDI_DMA_NORESOURCES); 727 if (mp == (ddi_dma_impl_t *)DDI_DMA_NOMAPPING) 728 return (DDI_DMA_NOMAPPING); 729 if (ret = px_dma_type(px_p, dmareq, mp)) 730 goto freehandle; 731 if (ret = px_dma_pfn(px_p, dmareq, mp)) 732 goto freehandle; 733 734 switch (PX_DMA_TYPE(mp)) { 735 case DMAI_FLAGS_DVMA: /* LINTED E_EQUALITY_NOT_ASSIGNMENT */ 736 if ((ret = px_dvma_win(px_p, dmareq, mp)) || !handlep) 737 goto freehandle; 738 if (!PX_DMA_CANCACHE(mp)) { /* try fast track */ 739 if (PX_DMA_CANFAST(mp)) { 740 if (!px_dvma_map_fast(mmu_p, mp)) 741 break; 742 /* LINTED E_NOP_ELSE_STMT */ 743 } else { 744 PX_DVMA_FASTTRAK_PROF(mp); 745 } 746 } 747 if (ret = px_dvma_map(mp, dmareq, mmu_p)) 748 goto freehandle; 749 break; 750 case DMAI_FLAGS_PTP: /* LINTED E_EQUALITY_NOT_ASSIGNMENT */ 751 if ((ret = px_dma_physwin(px_p, dmareq, mp)) || !handlep) 752 goto freehandle; 753 break; 754 case DMAI_FLAGS_BYPASS: 755 default: 756 cmn_err(CE_PANIC, "%s%d: px_dma_setup: bad dma type 0x%x", 757 ddi_driver_name(rdip), ddi_get_instance(rdip), 758 PX_DMA_TYPE(mp)); 759 /*NOTREACHED*/ 760 } 761 *handlep = (ddi_dma_handle_t)mp; 762 mp->dmai_flags |= DMAI_FLAGS_INUSE; 763 px_dump_dma_handle(DBG_DMA_MAP, dip, mp); 764 765 return ((mp->dmai_nwin == 1) ? DDI_DMA_MAPPED : DDI_DMA_PARTIAL_MAP); 766 freehandle: 767 if (ret == DDI_DMA_NORESOURCES) 768 px_dma_freemp(mp); /* don't run_callback() */ 769 else 770 (void) px_dma_freehdl(dip, rdip, (ddi_dma_handle_t)mp); 771 return (ret); 772 } 773 774 775 /* 776 * bus dma alloc handle entry point: 777 */ 778 int 779 px_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attrp, 780 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep) 781 { 782 px_t *px_p = DIP_TO_STATE(dip); 783 ddi_dma_impl_t *mp; 784 int rval; 785 786 DBG(DBG_DMA_ALLOCH, dip, "rdip=%s%d\n", 787 ddi_driver_name(rdip), ddi_get_instance(rdip)); 788 789 if (attrp->dma_attr_version != DMA_ATTR_V0) 790 return (DDI_DMA_BADATTR); 791 792 if (!(mp = px_dma_allocmp(dip, rdip, waitfp, arg))) 793 return (DDI_DMA_NORESOURCES); 794 795 /* 796 * Save requestor's information 797 */ 798 mp->dmai_attr = *attrp; /* whole object - augmented later */ 799 *DEV_ATTR(mp) = *attrp; /* whole object - device orig attr */ 800 DBG(DBG_DMA_ALLOCH, dip, "mp=%p\n", mp); 801 802 /* check and convert dma attributes to handle parameters */ 803 if (rval = px_dma_attr2hdl(px_p, mp)) { 804 px_dma_freehdl(dip, rdip, (ddi_dma_handle_t)mp); 805 *handlep = NULL; 806 return (rval); 807 } 808 *handlep = (ddi_dma_handle_t)mp; 809 return (DDI_SUCCESS); 810 } 811 812 813 /* 814 * bus dma free handle entry point: 815 */ 816 /*ARGSUSED*/ 817 int 818 px_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle) 819 { 820 DBG(DBG_DMA_FREEH, dip, "rdip=%s%d mp=%p\n", 821 ddi_driver_name(rdip), ddi_get_instance(rdip), handle); 822 px_dma_freemp((ddi_dma_impl_t *)handle); 823 824 if (px_kmem_clid) { 825 DBG(DBG_DMA_FREEH, dip, "run handle callback\n"); 826 ddi_run_callback(&px_kmem_clid); 827 } 828 return (DDI_SUCCESS); 829 } 830 831 832 /* 833 * bus dma bind handle entry point: 834 */ 835 int 836 px_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip, 837 ddi_dma_handle_t handle, ddi_dma_req_t *dmareq, 838 ddi_dma_cookie_t *cookiep, uint_t *ccountp) 839 { 840 px_t *px_p = DIP_TO_STATE(dip); 841 px_mmu_t *mmu_p = px_p->px_mmu_p; 842 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 843 int ret; 844 845 DBG(DBG_DMA_BINDH, dip, "rdip=%s%d mp=%p dmareq=%p\n", 846 ddi_driver_name(rdip), ddi_get_instance(rdip), mp, dmareq); 847 848 if (mp->dmai_flags & DMAI_FLAGS_INUSE) 849 return (DDI_DMA_INUSE); 850 851 ASSERT((mp->dmai_flags & ~DMAI_FLAGS_PRESERVE) == 0); 852 mp->dmai_flags |= DMAI_FLAGS_INUSE; 853 854 if (ret = px_dma_type(px_p, dmareq, mp)) 855 goto err; 856 if (ret = px_dma_pfn(px_p, dmareq, mp)) 857 goto err; 858 859 switch (PX_DMA_TYPE(mp)) { 860 case DMAI_FLAGS_DVMA: 861 if (ret = px_dvma_win(px_p, dmareq, mp)) 862 goto map_err; 863 if (!PX_DMA_CANCACHE(mp)) { /* try fast track */ 864 if (PX_DMA_CANFAST(mp)) { 865 if (!px_dvma_map_fast(mmu_p, mp)) 866 goto mapped; /*LINTED E_NOP_ELSE_STMT*/ 867 } else { 868 PX_DVMA_FASTTRAK_PROF(mp); 869 } 870 } 871 if (ret = px_dvma_map(mp, dmareq, mmu_p)) 872 goto map_err; 873 mapped: 874 *ccountp = 1; 875 MAKE_DMA_COOKIE(cookiep, mp->dmai_mapping, mp->dmai_size); 876 break; 877 case DMAI_FLAGS_BYPASS: 878 case DMAI_FLAGS_PTP: 879 if (ret = px_dma_physwin(px_p, dmareq, mp)) 880 goto map_err; 881 *ccountp = WINLST(mp)->win_ncookies; 882 *cookiep = *(ddi_dma_cookie_t *)(WINLST(mp) + 1); /* wholeobj */ 883 break; 884 default: 885 cmn_err(CE_PANIC, "%s%d: px_dma_bindhdl(%p): bad dma type", 886 ddi_driver_name(rdip), ddi_get_instance(rdip), mp); 887 /*NOTREACHED*/ 888 } 889 DBG(DBG_DMA_BINDH, dip, "cookie %llx+%x\n", cookiep->dmac_address, 890 cookiep->dmac_size); 891 px_dump_dma_handle(DBG_DMA_MAP, dip, mp); 892 return (mp->dmai_nwin == 1 ? DDI_DMA_MAPPED : DDI_DMA_PARTIAL_MAP); 893 map_err: 894 px_dma_freepfn(mp); 895 err: 896 mp->dmai_flags &= DMAI_FLAGS_PRESERVE; 897 return (ret); 898 } 899 900 901 /* 902 * bus dma unbind handle entry point: 903 */ 904 /*ARGSUSED*/ 905 int 906 px_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle) 907 { 908 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 909 px_t *px_p = DIP_TO_STATE(dip); 910 px_mmu_t *mmu_p = px_p->px_mmu_p; 911 912 DBG(DBG_DMA_UNBINDH, dip, "rdip=%s%d, mp=%p\n", 913 ddi_driver_name(rdip), ddi_get_instance(rdip), handle); 914 if ((mp->dmai_flags & DMAI_FLAGS_INUSE) == 0) { 915 DBG(DBG_DMA_UNBINDH, dip, "handle not inuse\n"); 916 return (DDI_FAILURE); 917 } 918 919 /* 920 * Here if the handle is using the iommu. Unload all the iommu 921 * translations. 922 */ 923 switch (PX_DMA_TYPE(mp)) { 924 case DMAI_FLAGS_DVMA: 925 px_mmu_unmap_window(mmu_p, mp); 926 px_dvma_unmap(mmu_p, mp); 927 px_dma_freepfn(mp); 928 break; 929 case DMAI_FLAGS_BYPASS: 930 case DMAI_FLAGS_PTP: 931 px_dma_freewin(mp); 932 break; 933 default: 934 cmn_err(CE_PANIC, "%s%d: px_dma_unbindhdl:bad dma type %p", 935 ddi_driver_name(rdip), ddi_get_instance(rdip), mp); 936 /*NOTREACHED*/ 937 } 938 if (mmu_p->mmu_dvma_clid != 0) { 939 DBG(DBG_DMA_UNBINDH, dip, "run dvma callback\n"); 940 ddi_run_callback(&mmu_p->mmu_dvma_clid); 941 } 942 if (px_kmem_clid) { 943 DBG(DBG_DMA_UNBINDH, dip, "run handle callback\n"); 944 ddi_run_callback(&px_kmem_clid); 945 } 946 mp->dmai_flags &= DMAI_FLAGS_PRESERVE; 947 return (DDI_SUCCESS); 948 } 949 950 /* 951 * bus dma win entry point: 952 */ 953 int 954 px_dma_win(dev_info_t *dip, dev_info_t *rdip, 955 ddi_dma_handle_t handle, uint_t win, off_t *offp, 956 size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp) 957 { 958 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 959 int ret; 960 961 DBG(DBG_DMA_WIN, dip, "rdip=%s%d\n", 962 ddi_driver_name(rdip), ddi_get_instance(rdip)); 963 964 px_dump_dma_handle(DBG_DMA_WIN, dip, mp); 965 if (win >= mp->dmai_nwin) { 966 DBG(DBG_DMA_WIN, dip, "%x out of range\n", win); 967 return (DDI_FAILURE); 968 } 969 970 switch (PX_DMA_TYPE(mp)) { 971 case DMAI_FLAGS_DVMA: 972 if (win != PX_DMA_CURWIN(mp)) { 973 px_t *px_p = DIP_TO_STATE(dip); 974 px_mmu_t *mmu_p = px_p->px_mmu_p; 975 px_mmu_unmap_window(mmu_p, mp); 976 977 /* map_window sets dmai_mapping/size/offset */ 978 px_mmu_map_window(mmu_p, mp, win); 979 if ((ret = px_mmu_map_window(mmu_p, 980 mp, win)) != DDI_SUCCESS) 981 return (ret); 982 } 983 if (cookiep) 984 MAKE_DMA_COOKIE(cookiep, mp->dmai_mapping, 985 mp->dmai_size); 986 if (ccountp) 987 *ccountp = 1; 988 break; 989 case DMAI_FLAGS_PTP: 990 case DMAI_FLAGS_BYPASS: { 991 int i; 992 ddi_dma_cookie_t *ck_p; 993 px_dma_win_t *win_p = mp->dmai_winlst; 994 995 for (i = 0; i < win; win_p = win_p->win_next, i++); 996 ck_p = (ddi_dma_cookie_t *)(win_p + 1); 997 *cookiep = *ck_p; 998 mp->dmai_offset = win_p->win_offset; 999 mp->dmai_size = win_p->win_size; 1000 mp->dmai_mapping = ck_p->dmac_laddress; 1001 mp->dmai_cookie = ck_p + 1; 1002 win_p->win_curseg = 0; 1003 if (ccountp) 1004 *ccountp = win_p->win_ncookies; 1005 } 1006 break; 1007 default: 1008 cmn_err(CE_WARN, "%s%d: px_dma_win:bad dma type 0x%x", 1009 ddi_driver_name(rdip), ddi_get_instance(rdip), 1010 PX_DMA_TYPE(mp)); 1011 return (DDI_FAILURE); 1012 } 1013 if (cookiep) 1014 DBG(DBG_DMA_WIN, dip, 1015 "cookie - dmac_address=%x dmac_size=%x\n", 1016 cookiep->dmac_address, cookiep->dmac_size); 1017 if (offp) 1018 *offp = (off_t)mp->dmai_offset; 1019 if (lenp) 1020 *lenp = mp->dmai_size; 1021 return (DDI_SUCCESS); 1022 } 1023 1024 #ifdef DEBUG 1025 static char *px_dmactl_str[] = { 1026 "DDI_DMA_FREE", 1027 "DDI_DMA_SYNC", 1028 "DDI_DMA_HTOC", 1029 "DDI_DMA_KVADDR", 1030 "DDI_DMA_MOVWIN", 1031 "DDI_DMA_REPWIN", 1032 "DDI_DMA_GETERR", 1033 "DDI_DMA_COFF", 1034 "DDI_DMA_NEXTWIN", 1035 "DDI_DMA_NEXTSEG", 1036 "DDI_DMA_SEGTOC", 1037 "DDI_DMA_RESERVE", 1038 "DDI_DMA_RELEASE", 1039 "DDI_DMA_RESETH", 1040 "DDI_DMA_CKSYNC", 1041 "DDI_DMA_IOPB_ALLOC", 1042 "DDI_DMA_IOPB_FREE", 1043 "DDI_DMA_SMEM_ALLOC", 1044 "DDI_DMA_SMEM_FREE", 1045 "DDI_DMA_SET_SBUS64" 1046 }; 1047 #endif /* DEBUG */ 1048 1049 /* 1050 * bus dma control entry point: 1051 */ 1052 /*ARGSUSED*/ 1053 int 1054 px_dma_ctlops(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle, 1055 enum ddi_dma_ctlops cmd, off_t *offp, size_t *lenp, caddr_t *objp, 1056 uint_t cache_flags) 1057 { 1058 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 1059 1060 #ifdef DEBUG 1061 DBG(DBG_DMA_CTL, dip, "%s: rdip=%s%d\n", px_dmactl_str[cmd], 1062 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1063 #endif /* DEBUG */ 1064 1065 switch (cmd) { 1066 case DDI_DMA_FREE: 1067 (void) px_dma_unbindhdl(dip, rdip, handle); 1068 (void) px_dma_freehdl(dip, rdip, handle); 1069 return (DDI_SUCCESS); 1070 case DDI_DMA_RESERVE: { 1071 px_t *px_p = DIP_TO_STATE(dip); 1072 return (px_fdvma_reserve(dip, rdip, px_p, 1073 (ddi_dma_req_t *)offp, (ddi_dma_handle_t *)objp)); 1074 } 1075 case DDI_DMA_RELEASE: { 1076 px_t *px_p = DIP_TO_STATE(dip); 1077 return (px_fdvma_release(dip, px_p, mp)); 1078 } 1079 default: 1080 break; 1081 } 1082 1083 switch (PX_DMA_TYPE(mp)) { 1084 case DMAI_FLAGS_DVMA: 1085 return (px_dvma_ctl(dip, rdip, mp, cmd, offp, lenp, objp, 1086 cache_flags)); 1087 case DMAI_FLAGS_PTP: 1088 case DMAI_FLAGS_BYPASS: 1089 return (px_dma_ctl(dip, rdip, mp, cmd, offp, lenp, objp, 1090 cache_flags)); 1091 default: 1092 cmn_err(CE_PANIC, "%s%d: px_dma_ctlops(%x):bad dma type %x", 1093 ddi_driver_name(rdip), ddi_get_instance(rdip), cmd, 1094 mp->dmai_flags); 1095 /*NOTREACHED*/ 1096 } 1097 } 1098 1099 /* 1100 * control ops entry point: 1101 * 1102 * Requests handled completely: 1103 * DDI_CTLOPS_INITCHILD see init_child() for details 1104 * DDI_CTLOPS_UNINITCHILD 1105 * DDI_CTLOPS_REPORTDEV see report_dev() for details 1106 * DDI_CTLOPS_XLATE_INTRS nothing to do 1107 * DDI_CTLOPS_IOMIN cache line size if streaming otherwise 1 1108 * DDI_CTLOPS_REGSIZE 1109 * DDI_CTLOPS_NREGS 1110 * DDI_CTLOPS_NINTRS 1111 * DDI_CTLOPS_DVMAPAGESIZE 1112 * DDI_CTLOPS_POKE 1113 * DDI_CTLOPS_PEEK 1114 * 1115 * All others passed to parent. 1116 */ 1117 int 1118 px_ctlops(dev_info_t *dip, dev_info_t *rdip, 1119 ddi_ctl_enum_t op, void *arg, void *result) 1120 { 1121 px_t *px_p = DIP_TO_STATE(dip); 1122 struct detachspec *ds; 1123 struct attachspec *as; 1124 1125 switch (op) { 1126 case DDI_CTLOPS_INITCHILD: 1127 return (px_init_child(px_p, (dev_info_t *)arg)); 1128 1129 case DDI_CTLOPS_UNINITCHILD: 1130 return (px_uninit_child(px_p, (dev_info_t *)arg)); 1131 1132 case DDI_CTLOPS_ATTACH: 1133 as = (struct attachspec *)arg; 1134 switch (as->when) { 1135 case DDI_PRE: 1136 if (as->cmd == DDI_ATTACH) { 1137 DBG(DBG_PWR, dip, "PRE_ATTACH for %s@%d\n", 1138 ddi_driver_name(rdip), 1139 ddi_get_instance(rdip)); 1140 return (pcie_pm_hold(dip)); 1141 } 1142 return (DDI_SUCCESS); 1143 1144 case DDI_POST: 1145 DBG(DBG_PWR, dip, "POST_ATTACH for %s@%d\n", 1146 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1147 if (as->cmd == DDI_ATTACH && as->result != DDI_SUCCESS) 1148 pcie_pm_release(dip); 1149 return (DDI_SUCCESS); 1150 default: 1151 break; 1152 } 1153 break; 1154 1155 case DDI_CTLOPS_DETACH: 1156 ds = (struct detachspec *)arg; 1157 switch (ds->when) { 1158 case DDI_POST: 1159 if (ds->cmd == DDI_DETACH && 1160 ds->result == DDI_SUCCESS) { 1161 DBG(DBG_PWR, dip, "POST_DETACH for %s@%d\n", 1162 ddi_driver_name(rdip), 1163 ddi_get_instance(rdip)); 1164 return (pcie_pm_remove_child(dip, rdip)); 1165 } 1166 return (DDI_SUCCESS); 1167 default: 1168 break; 1169 } 1170 break; 1171 1172 case DDI_CTLOPS_REPORTDEV: 1173 return (px_report_dev(rdip)); 1174 1175 case DDI_CTLOPS_IOMIN: 1176 return (DDI_SUCCESS); 1177 1178 case DDI_CTLOPS_REGSIZE: 1179 *((off_t *)result) = px_get_reg_set_size(rdip, *((int *)arg)); 1180 return (*((off_t *)result) == -1 ? DDI_FAILURE : DDI_SUCCESS); 1181 1182 case DDI_CTLOPS_NREGS: 1183 *((uint_t *)result) = px_get_nreg_set(rdip); 1184 return (DDI_SUCCESS); 1185 1186 case DDI_CTLOPS_DVMAPAGESIZE: 1187 *((ulong_t *)result) = MMU_PAGE_SIZE; 1188 return (DDI_SUCCESS); 1189 1190 case DDI_CTLOPS_POKE: /* platform dependent implementation. */ 1191 return (px_lib_ctlops_poke(dip, rdip, 1192 (peekpoke_ctlops_t *)arg)); 1193 1194 case DDI_CTLOPS_PEEK: /* platform dependent implementation. */ 1195 return (px_lib_ctlops_peek(dip, rdip, 1196 (peekpoke_ctlops_t *)arg, result)); 1197 1198 case DDI_CTLOPS_POWER: 1199 default: 1200 break; 1201 } 1202 1203 /* 1204 * Now pass the request up to our parent. 1205 */ 1206 DBG(DBG_CTLOPS, dip, "passing request to parent: rdip=%s%d\n", 1207 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1208 return (ddi_ctlops(dip, rdip, op, arg, result)); 1209 } 1210 1211 /* ARGSUSED */ 1212 int 1213 px_intr_ops(dev_info_t *dip, dev_info_t *rdip, ddi_intr_op_t intr_op, 1214 ddi_intr_handle_impl_t *hdlp, void *result) 1215 { 1216 int intr_types, ret = DDI_SUCCESS; 1217 1218 DBG(DBG_INTROPS, dip, "px_intr_ops: rdip=%s%d\n", 1219 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1220 1221 /* Process DDI_INTROP_SUPPORTED_TYPES request here */ 1222 if (intr_op == DDI_INTROP_SUPPORTED_TYPES) { 1223 px_t *px_p = DIP_TO_STATE(dip); 1224 px_msi_state_t *msi_state_p = &px_p->px_ib_p->ib_msi_state; 1225 1226 *(int *)result = i_ddi_get_nintrs(rdip) ? 1227 DDI_INTR_TYPE_FIXED : 0; 1228 1229 if ((pci_msi_get_supported_type(rdip, 1230 &intr_types)) == DDI_SUCCESS) { 1231 /* 1232 * Double check supported interrupt types vs. 1233 * what the host bridge supports. 1234 */ 1235 *(int *)result |= (intr_types & msi_state_p->msi_type); 1236 } 1237 1238 return (ret); 1239 } 1240 1241 /* 1242 * PCI-E nexus driver supports fixed, MSI and MSI-X interrupts. 1243 * Return failure if interrupt type is not supported. 1244 */ 1245 switch (hdlp->ih_type) { 1246 case DDI_INTR_TYPE_FIXED: 1247 ret = px_intx_ops(dip, rdip, intr_op, hdlp, result); 1248 break; 1249 case DDI_INTR_TYPE_MSI: 1250 case DDI_INTR_TYPE_MSIX: 1251 ret = px_msix_ops(dip, rdip, intr_op, hdlp, result); 1252 break; 1253 default: 1254 ret = DDI_ENOTSUP; 1255 break; 1256 } 1257 1258 return (ret); 1259 } 1260