1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * PCI nexus driver interface 28 */ 29 30 /* 31 * Copyright 2019 Peter Tribble. 32 */ 33 34 #include <sys/types.h> 35 #include <sys/conf.h> /* nulldev */ 36 #include <sys/stat.h> /* devctl */ 37 #include <sys/kmem.h> 38 #include <sys/async.h> /* ecc_flt for pci_ecc.h */ 39 #include <sys/sunddi.h> 40 #include <sys/sunndi.h> 41 #include <sys/ndifm.h> 42 #include <sys/ontrap.h> 43 #include <sys/ddi_impldefs.h> 44 #include <sys/ddi_subrdefs.h> 45 #include <sys/epm.h> 46 #include <sys/hotplug/pci/pcihp.h> 47 #include <sys/pci/pci_tools_ext.h> 48 #include <sys/spl.h> 49 #include <sys/pci/pci_obj.h> 50 51 /*LINTLIBRARY*/ 52 53 /* 54 * function prototype for hotplug routine: 55 */ 56 static void 57 pci_init_hotplug(struct pci *); 58 59 /* 60 * function prototypes for dev ops routines: 61 */ 62 static int pci_attach(dev_info_t *dip, ddi_attach_cmd_t cmd); 63 static int pci_detach(dev_info_t *dip, ddi_detach_cmd_t cmd); 64 static int pci_info(dev_info_t *dip, ddi_info_cmd_t infocmd, 65 void *arg, void **result); 66 static int pci_ctlops_poke(pci_t *pci_p, peekpoke_ctlops_t *in_args); 67 static int pci_ctlops_peek(pci_t *pci_p, peekpoke_ctlops_t *in_args, 68 void *result); 69 static off_t get_reg_set_size(dev_info_t *child, int rnumber); 70 71 /* 72 * bus ops and dev ops structures: 73 */ 74 static struct bus_ops pci_bus_ops = { 75 BUSO_REV, 76 pci_map, 77 0, 78 0, 79 0, 80 i_ddi_map_fault, 81 pci_dma_setup, 82 pci_dma_allochdl, 83 pci_dma_freehdl, 84 pci_dma_bindhdl, 85 pci_dma_unbindhdl, 86 pci_dma_sync, 87 pci_dma_win, 88 pci_dma_ctlops, 89 pci_ctlops, 90 ddi_bus_prop_op, 91 ndi_busop_get_eventcookie, /* (*bus_get_eventcookie)(); */ 92 ndi_busop_add_eventcall, /* (*bus_add_eventcall)(); */ 93 ndi_busop_remove_eventcall, /* (*bus_remove_eventcall)(); */ 94 ndi_post_event, /* (*bus_post_event)(); */ 95 NULL, /* (*bus_intr_ctl)(); */ 96 NULL, /* (*bus_config)(); */ 97 NULL, /* (*bus_unconfig)(); */ 98 pci_fm_init_child, /* (*bus_fm_init)(); */ 99 NULL, /* (*bus_fm_fini)(); */ 100 pci_bus_enter, /* (*bus_fm_access_enter)(); */ 101 pci_bus_exit, /* (*bus_fm_access_fini)(); */ 102 NULL, /* (*bus_power)(); */ 103 pci_intr_ops /* (*bus_intr_op)(); */ 104 }; 105 106 extern struct cb_ops pci_cb_ops; 107 108 static struct dev_ops pci_ops = { 109 DEVO_REV, 110 0, 111 pci_info, 112 nulldev, 113 0, 114 pci_attach, 115 pci_detach, 116 nodev, 117 &pci_cb_ops, 118 &pci_bus_ops, 119 0, 120 ddi_quiesce_not_supported, /* devo_quiesce */ 121 }; 122 123 /* 124 * module definitions: 125 */ 126 #include <sys/modctl.h> 127 extern struct mod_ops mod_driverops; 128 129 static struct modldrv modldrv = { 130 &mod_driverops, /* Type of module - driver */ 131 "Sun4u Host to PCI nexus driver", /* Name of module. */ 132 &pci_ops, /* driver ops */ 133 }; 134 135 static struct modlinkage modlinkage = { 136 MODREV_1, (void *)&modldrv, NULL 137 }; 138 139 /* 140 * driver global data: 141 */ 142 void *per_pci_state; /* per-pbm soft state pointer */ 143 void *per_pci_common_state; /* per-psycho soft state pointer */ 144 kmutex_t pci_global_mutex; /* attach/detach common struct lock */ 145 errorq_t *pci_ecc_queue = NULL; /* per-system ecc handling queue */ 146 extern errorq_t *pci_target_queue; 147 struct cb_ops *pcihp_ops = NULL; /* hotplug module cb ops */ 148 149 extern void pci_child_cfg_save(dev_info_t *dip); 150 extern void pci_child_cfg_restore(dev_info_t *dip); 151 152 int 153 _init(void) 154 { 155 int e; 156 157 /* 158 * Initialize per-pci bus soft state pointer. 159 */ 160 e = ddi_soft_state_init(&per_pci_state, sizeof (pci_t), 1); 161 if (e != 0) 162 return (e); 163 164 /* 165 * Initialize per-psycho soft state pointer. 166 */ 167 e = ddi_soft_state_init(&per_pci_common_state, 168 sizeof (pci_common_t), 1); 169 if (e != 0) { 170 ddi_soft_state_fini(&per_pci_state); 171 return (e); 172 } 173 174 /* 175 * Initialize global mutexes. 176 */ 177 mutex_init(&pci_global_mutex, NULL, MUTEX_DRIVER, NULL); 178 pci_reloc_init(); 179 180 /* 181 * Create the performance kstats. 182 */ 183 pci_kstat_init(); 184 185 /* 186 * Install the module. 187 */ 188 e = mod_install(&modlinkage); 189 if (e != 0) { 190 ddi_soft_state_fini(&per_pci_state); 191 ddi_soft_state_fini(&per_pci_common_state); 192 mutex_destroy(&pci_global_mutex); 193 } 194 return (e); 195 } 196 197 int 198 _fini(void) 199 { 200 int e; 201 202 /* 203 * Remove the module. 204 */ 205 e = mod_remove(&modlinkage); 206 if (e != 0) 207 return (e); 208 209 /* 210 * Destroy pci_ecc_queue, and set it to NULL. 211 */ 212 if (pci_ecc_queue) 213 errorq_destroy(pci_ecc_queue); 214 215 pci_ecc_queue = NULL; 216 217 /* 218 * Destroy pci_target_queue, and set it to NULL. 219 */ 220 if (pci_target_queue) 221 errorq_destroy(pci_target_queue); 222 223 pci_target_queue = NULL; 224 225 /* 226 * Destroy the performance kstats. 227 */ 228 pci_kstat_fini(); 229 230 /* 231 * Free the per-pci and per-psycho soft state info and destroy 232 * mutex for per-psycho soft state. 233 */ 234 ddi_soft_state_fini(&per_pci_state); 235 ddi_soft_state_fini(&per_pci_common_state); 236 mutex_destroy(&pci_global_mutex); 237 pci_reloc_fini(); 238 return (e); 239 } 240 241 int 242 _info(struct modinfo *modinfop) 243 { 244 return (mod_info(&modlinkage, modinfop)); 245 } 246 247 /*ARGSUSED*/ 248 static int 249 pci_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 250 { 251 int instance = PCIHP_AP_MINOR_NUM_TO_INSTANCE(getminor((dev_t)arg)); 252 pci_t *pci_p = get_pci_soft_state(instance); 253 254 /* allow hotplug to deal with ones it manages */ 255 if (pci_p && (pci_p->hotplug_capable == B_TRUE)) 256 return (pcihp_info(dip, infocmd, arg, result)); 257 258 /* non-hotplug or not attached */ 259 switch (infocmd) { 260 case DDI_INFO_DEVT2INSTANCE: 261 *result = (void *)(uintptr_t)instance; 262 return (DDI_SUCCESS); 263 264 case DDI_INFO_DEVT2DEVINFO: 265 if (pci_p == NULL) 266 return (DDI_FAILURE); 267 *result = (void *)pci_p->pci_dip; 268 return (DDI_SUCCESS); 269 270 default: 271 return (DDI_FAILURE); 272 } 273 } 274 275 276 /* device driver entry points */ 277 /* 278 * attach entry point: 279 */ 280 static int 281 pci_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 282 { 283 pci_t *pci_p; /* per bus state pointer */ 284 int instance = ddi_get_instance(dip); 285 286 switch (cmd) { 287 case DDI_ATTACH: 288 DEBUG0(DBG_ATTACH, dip, "DDI_ATTACH\n"); 289 290 /* 291 * Allocate and get the per-pci soft state structure. 292 */ 293 if (alloc_pci_soft_state(instance) != DDI_SUCCESS) { 294 cmn_err(CE_WARN, "%s%d: can't allocate pci state", 295 ddi_driver_name(dip), instance); 296 goto err_bad_pci_softstate; 297 } 298 pci_p = get_pci_soft_state(instance); 299 pci_p->pci_dip = dip; 300 mutex_init(&pci_p->pci_mutex, NULL, MUTEX_DRIVER, NULL); 301 pci_p->pci_soft_state = PCI_SOFT_STATE_CLOSED; 302 303 /* 304 * Get key properties of the pci bridge node and 305 * determine it's type (psycho, schizo, etc ...). 306 */ 307 if (get_pci_properties(pci_p, dip) == DDI_FAILURE) 308 goto err_bad_pci_prop; 309 310 /* 311 * Map in the registers. 312 */ 313 if (map_pci_registers(pci_p, dip) == DDI_FAILURE) 314 goto err_bad_reg_prop; 315 316 if (pci_obj_setup(pci_p) != DDI_SUCCESS) 317 goto err_bad_objs; 318 319 /* 320 * If this PCI leaf has hotplug and this platform 321 * loads hotplug modules then initialize the 322 * hotplug framework. 323 */ 324 pci_init_hotplug(pci_p); 325 326 /* 327 * Create the "devctl" node for hotplug support. 328 * For non-hotplug bus, we still need ":devctl" to 329 * support DEVCTL_DEVICE_* and DEVCTL_BUS_* ioctls. 330 */ 331 if (pci_p->hotplug_capable == B_FALSE) { 332 if (ddi_create_minor_node(dip, "devctl", S_IFCHR, 333 PCIHP_AP_MINOR_NUM(instance, PCIHP_DEVCTL_MINOR), 334 DDI_NT_NEXUS, 0) != DDI_SUCCESS) 335 goto err_bad_devctl_node; 336 } 337 338 /* 339 * Create pcitool nodes for register access and interrupt 340 * routing. 341 */ 342 if (pcitool_init(dip) != DDI_SUCCESS) { 343 goto err_bad_pcitool_nodes; 344 } 345 ddi_report_dev(dip); 346 347 pci_p->pci_state = PCI_ATTACHED; 348 DEBUG0(DBG_ATTACH, dip, "attach success\n"); 349 break; 350 351 err_bad_pcitool_nodes: 352 if (pci_p->hotplug_capable == B_FALSE) 353 ddi_remove_minor_node(dip, "devctl"); 354 else 355 (void) pcihp_uninit(dip); 356 err_bad_devctl_node: 357 pci_obj_destroy(pci_p); 358 err_bad_objs: 359 unmap_pci_registers(pci_p); 360 err_bad_reg_prop: 361 free_pci_properties(pci_p); 362 err_bad_pci_prop: 363 mutex_destroy(&pci_p->pci_mutex); 364 free_pci_soft_state(instance); 365 err_bad_pci_softstate: 366 return (DDI_FAILURE); 367 368 case DDI_RESUME: 369 DEBUG0(DBG_ATTACH, dip, "DDI_RESUME\n"); 370 371 /* 372 * Make sure the Psycho control registers and IOMMU 373 * are configured properly. 374 */ 375 pci_p = get_pci_soft_state(instance); 376 mutex_enter(&pci_p->pci_mutex); 377 378 /* 379 * Make sure this instance has been suspended. 380 */ 381 if (pci_p->pci_state != PCI_SUSPENDED) { 382 DEBUG0(DBG_ATTACH, dip, "instance NOT suspended\n"); 383 mutex_exit(&pci_p->pci_mutex); 384 return (DDI_FAILURE); 385 } 386 pci_obj_resume(pci_p); 387 pci_p->pci_state = PCI_ATTACHED; 388 389 pci_child_cfg_restore(dip); 390 391 mutex_exit(&pci_p->pci_mutex); 392 break; 393 394 default: 395 DEBUG0(DBG_ATTACH, dip, "unsupported attach op\n"); 396 return (DDI_FAILURE); 397 } 398 399 return (DDI_SUCCESS); 400 } 401 402 /* 403 * detach entry point: 404 */ 405 static int 406 pci_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 407 { 408 int instance = ddi_get_instance(dip); 409 pci_t *pci_p = get_pci_soft_state(instance); 410 411 /* 412 * Make sure we are currently attached 413 */ 414 if (pci_p->pci_state != PCI_ATTACHED) { 415 DEBUG0(DBG_ATTACH, dip, "failed - instance not attached\n"); 416 return (DDI_FAILURE); 417 } 418 419 mutex_enter(&pci_p->pci_mutex); 420 421 switch (cmd) { 422 case DDI_DETACH: 423 DEBUG0(DBG_DETACH, dip, "DDI_DETACH\n"); 424 425 if (pci_p->hotplug_capable == B_TRUE) 426 if (pcihp_uninit(dip) == DDI_FAILURE) { 427 mutex_exit(&pci_p->pci_mutex); 428 return (DDI_FAILURE); 429 } 430 431 pcitool_uninit(dip); 432 433 pci_obj_destroy(pci_p); 434 435 /* 436 * Free the pci soft state structure and the rest of the 437 * resources it's using. 438 */ 439 free_pci_properties(pci_p); 440 unmap_pci_registers(pci_p); 441 mutex_exit(&pci_p->pci_mutex); 442 mutex_destroy(&pci_p->pci_mutex); 443 free_pci_soft_state(instance); 444 445 /* Free the interrupt-priorities prop if we created it. */ 446 { 447 int len; 448 449 if (ddi_getproplen(DDI_DEV_T_ANY, dip, 450 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 451 "interrupt-priorities", &len) == DDI_PROP_SUCCESS) 452 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, 453 "interrupt-priorities"); 454 } 455 return (DDI_SUCCESS); 456 457 case DDI_SUSPEND: 458 pci_child_cfg_save(dip); 459 pci_obj_suspend(pci_p); 460 pci_p->pci_state = PCI_SUSPENDED; 461 462 mutex_exit(&pci_p->pci_mutex); 463 return (DDI_SUCCESS); 464 465 default: 466 DEBUG0(DBG_DETACH, dip, "unsupported detach op\n"); 467 mutex_exit(&pci_p->pci_mutex); 468 return (DDI_FAILURE); 469 } 470 } 471 472 473 /* bus driver entry points */ 474 475 /* 476 * bus map entry point: 477 * 478 * if map request is for an rnumber 479 * get the corresponding regspec from device node 480 * build a new regspec in our parent's format 481 * build a new map_req with the new regspec 482 * call up the tree to complete the mapping 483 */ 484 int 485 pci_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp, 486 off_t off, off_t len, caddr_t *addrp) 487 { 488 pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip)); 489 struct regspec p_regspec; 490 ddi_map_req_t p_mapreq; 491 int reglen, rval, r_no; 492 pci_regspec_t reloc_reg, *rp = &reloc_reg; 493 494 DEBUG2(DBG_MAP, dip, "rdip=%s%d:", 495 ddi_driver_name(rdip), ddi_get_instance(rdip)); 496 497 if (mp->map_flags & DDI_MF_USER_MAPPING) 498 return (DDI_ME_UNIMPLEMENTED); 499 500 switch (mp->map_type) { 501 case DDI_MT_REGSPEC: 502 reloc_reg = *(pci_regspec_t *)mp->map_obj.rp; /* dup whole */ 503 break; 504 505 case DDI_MT_RNUMBER: 506 r_no = mp->map_obj.rnumber; 507 DEBUG1(DBG_MAP | DBG_CONT, dip, " r#=%x", r_no); 508 509 if (ddi_getlongprop(DDI_DEV_T_ANY, rdip, DDI_PROP_DONTPASS, 510 "reg", (caddr_t)&rp, ®len) != DDI_SUCCESS) 511 return (DDI_ME_RNUMBER_RANGE); 512 513 if (r_no < 0 || r_no >= reglen / sizeof (pci_regspec_t)) { 514 kmem_free(rp, reglen); 515 return (DDI_ME_RNUMBER_RANGE); 516 } 517 rp += r_no; 518 break; 519 520 default: 521 return (DDI_ME_INVAL); 522 } 523 DEBUG0(DBG_MAP | DBG_CONT, dip, "\n"); 524 525 /* use "assigned-addresses" to relocate regspec within pci space */ 526 if (rval = pci_reloc_reg(dip, rdip, pci_p, rp)) 527 goto done; 528 529 if (len) /* adjust regspec according to mapping request */ 530 rp->pci_size_low = len; 531 rp->pci_phys_low += off; 532 533 /* use "ranges" to translate relocated pci regspec into parent space */ 534 if (rval = pci_xlate_reg(pci_p, rp, &p_regspec)) 535 goto done; 536 537 p_mapreq = *mp; /* dup the whole structure */ 538 p_mapreq.map_type = DDI_MT_REGSPEC; 539 p_mapreq.map_obj.rp = &p_regspec; 540 rval = ddi_map(dip, &p_mapreq, 0, 0, addrp); 541 542 if (rval == DDI_SUCCESS) { 543 /* 544 * Set-up access functions for FM access error capable drivers. 545 */ 546 if (DDI_FM_ACC_ERR_CAP(pci_p->pci_fm_cap) && 547 DDI_FM_ACC_ERR_CAP(ddi_fm_capable(rdip)) && 548 mp->map_handlep->ah_acc.devacc_attr_access != 549 DDI_DEFAULT_ACC) 550 pci_fm_acc_setup(mp, rdip); 551 } 552 553 done: 554 if (mp->map_type == DDI_MT_RNUMBER) 555 kmem_free(rp - r_no, reglen); 556 557 return (rval); 558 } 559 560 /* 561 * bus dma map entry point 562 * return value: 563 * DDI_DMA_PARTIAL_MAP 1 564 * DDI_DMA_MAPOK 0 565 * DDI_DMA_MAPPED 0 566 * DDI_DMA_NORESOURCES -1 567 * DDI_DMA_NOMAPPING -2 568 * DDI_DMA_TOOBIG -3 569 */ 570 int 571 pci_dma_setup(dev_info_t *dip, dev_info_t *rdip, ddi_dma_req_t *dmareq, 572 ddi_dma_handle_t *handlep) 573 { 574 pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip)); 575 iommu_t *iommu_p = pci_p->pci_iommu_p; 576 ddi_dma_impl_t *mp; 577 int ret; 578 579 DEBUG3(DBG_DMA_MAP, dip, "mapping - rdip=%s%d type=%s\n", 580 ddi_driver_name(rdip), ddi_get_instance(rdip), 581 handlep ? "alloc" : "advisory"); 582 583 if (!(mp = pci_dma_lmts2hdl(dip, rdip, iommu_p, dmareq))) 584 return (DDI_DMA_NORESOURCES); 585 if (mp == (ddi_dma_impl_t *)DDI_DMA_NOMAPPING) 586 return (DDI_DMA_NOMAPPING); 587 if (ret = pci_dma_type(pci_p, dmareq, mp)) 588 goto freehandle; 589 if (ret = pci_dma_pfn(pci_p, dmareq, mp)) 590 goto freehandle; 591 592 switch (PCI_DMA_TYPE(mp)) { 593 case DMAI_FLAGS_DVMA: /* LINTED E_EQUALITY_NOT_ASSIGNMENT */ 594 if ((ret = pci_dvma_win(pci_p, dmareq, mp)) || !handlep) 595 goto freehandle; 596 if (!PCI_DMA_CANCACHE(mp)) { /* try fast track */ 597 if (PCI_DMA_CANFAST(mp)) { 598 if (!pci_dvma_map_fast(iommu_p, mp)) 599 break; 600 /* LINTED E_NOP_ELSE_STMT */ 601 } else { 602 PCI_DVMA_FASTTRAK_PROF(mp); 603 } 604 } 605 if (ret = pci_dvma_map(mp, dmareq, iommu_p)) 606 goto freehandle; 607 break; 608 case DMAI_FLAGS_PEER_TO_PEER: /* LINTED E_EQUALITY_NOT_ASSIGNMENT */ 609 if ((ret = pci_dma_physwin(pci_p, dmareq, mp)) || !handlep) 610 goto freehandle; 611 break; 612 case DMAI_FLAGS_BYPASS: 613 default: 614 panic("%s%d: pci_dma_setup: bad dma type 0x%x", 615 ddi_driver_name(rdip), ddi_get_instance(rdip), 616 PCI_DMA_TYPE(mp)); 617 /*NOTREACHED*/ 618 } 619 *handlep = (ddi_dma_handle_t)mp; 620 mp->dmai_flags |= (DMAI_FLAGS_INUSE | DMAI_FLAGS_MAPPED); 621 dump_dma_handle(DBG_DMA_MAP, dip, mp); 622 623 return ((mp->dmai_nwin == 1) ? DDI_DMA_MAPPED : DDI_DMA_PARTIAL_MAP); 624 freehandle: 625 if (ret == DDI_DMA_NORESOURCES) 626 pci_dma_freemp(mp); /* don't run_callback() */ 627 else 628 (void) pci_dma_freehdl(dip, rdip, (ddi_dma_handle_t)mp); 629 return (ret); 630 } 631 632 633 /* 634 * bus dma alloc handle entry point: 635 */ 636 int 637 pci_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attrp, 638 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep) 639 { 640 pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip)); 641 ddi_dma_impl_t *mp; 642 int rval; 643 644 DEBUG2(DBG_DMA_ALLOCH, dip, "rdip=%s%d\n", 645 ddi_driver_name(rdip), ddi_get_instance(rdip)); 646 647 if (attrp->dma_attr_version != DMA_ATTR_V0) 648 return (DDI_DMA_BADATTR); 649 650 if (!(mp = pci_dma_allocmp(dip, rdip, waitfp, arg))) 651 return (DDI_DMA_NORESOURCES); 652 653 /* 654 * Save requestor's information 655 */ 656 mp->dmai_attr = *attrp; /* whole object - augmented later */ 657 *DEV_ATTR(mp) = *attrp; /* whole object - device orig attr */ 658 DEBUG1(DBG_DMA_ALLOCH, dip, "mp=%p\n", mp); 659 660 /* check and convert dma attributes to handle parameters */ 661 if (rval = pci_dma_attr2hdl(pci_p, mp)) { 662 pci_dma_freehdl(dip, rdip, (ddi_dma_handle_t)mp); 663 *handlep = NULL; 664 return (rval); 665 } 666 *handlep = (ddi_dma_handle_t)mp; 667 return (DDI_SUCCESS); 668 } 669 670 671 /* 672 * bus dma free handle entry point: 673 */ 674 /*ARGSUSED*/ 675 int 676 pci_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle) 677 { 678 DEBUG3(DBG_DMA_FREEH, dip, "rdip=%s%d mp=%p\n", 679 ddi_driver_name(rdip), ddi_get_instance(rdip), handle); 680 pci_dma_freemp((ddi_dma_impl_t *)handle); 681 682 if (pci_kmem_clid) { 683 DEBUG0(DBG_DMA_FREEH, dip, "run handle callback\n"); 684 ddi_run_callback(&pci_kmem_clid); 685 } 686 return (DDI_SUCCESS); 687 } 688 689 690 /* 691 * bus dma bind handle entry point: 692 */ 693 int 694 pci_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip, 695 ddi_dma_handle_t handle, ddi_dma_req_t *dmareq, 696 ddi_dma_cookie_t *cookiep, uint_t *ccountp) 697 { 698 pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip)); 699 iommu_t *iommu_p = pci_p->pci_iommu_p; 700 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 701 int ret; 702 703 DEBUG4(DBG_DMA_BINDH, dip, "rdip=%s%d mp=%p dmareq=%p\n", 704 ddi_driver_name(rdip), ddi_get_instance(rdip), mp, dmareq); 705 706 if (mp->dmai_flags & DMAI_FLAGS_INUSE) 707 return (DDI_DMA_INUSE); 708 709 ASSERT((mp->dmai_flags & ~DMAI_FLAGS_PRESERVE) == 0); 710 mp->dmai_flags |= DMAI_FLAGS_INUSE; 711 712 if (ret = pci_dma_type(pci_p, dmareq, mp)) 713 goto err; 714 if (ret = pci_dma_pfn(pci_p, dmareq, mp)) 715 goto err; 716 717 switch (PCI_DMA_TYPE(mp)) { 718 case DMAI_FLAGS_DVMA: 719 if (ret = pci_dvma_win(pci_p, dmareq, mp)) 720 goto map_err; 721 if (!PCI_DMA_CANCACHE(mp)) { /* try fast track */ 722 if (PCI_DMA_CANFAST(mp)) { 723 if (!pci_dvma_map_fast(iommu_p, mp)) 724 goto mapped; /*LINTED E_NOP_ELSE_STMT*/ 725 } else { 726 PCI_DVMA_FASTTRAK_PROF(mp); 727 } 728 } 729 if (ret = pci_dvma_map(mp, dmareq, iommu_p)) 730 goto map_err; 731 mapped: 732 *ccountp = 1; 733 MAKE_DMA_COOKIE(cookiep, mp->dmai_mapping, mp->dmai_size); 734 mp->dmai_ncookies = 1; 735 mp->dmai_curcookie = 1; 736 break; 737 case DMAI_FLAGS_BYPASS: 738 case DMAI_FLAGS_PEER_TO_PEER: 739 if (ret = pci_dma_physwin(pci_p, dmareq, mp)) 740 goto map_err; 741 *ccountp = WINLST(mp)->win_ncookies; 742 *cookiep = *(ddi_dma_cookie_t *)(WINLST(mp) + 1); /* wholeobj */ 743 /* 744 * mp->dmai_ncookies and mp->dmai_curcookie are set by 745 * pci_dma_physwin(). 746 */ 747 break; 748 default: 749 panic("%s%d: pci_dma_bindhdl(%p): bad dma type", 750 ddi_driver_name(rdip), ddi_get_instance(rdip), mp); 751 /*NOTREACHED*/ 752 } 753 DEBUG2(DBG_DMA_BINDH, dip, "cookie %x+%x\n", cookiep->dmac_address, 754 cookiep->dmac_size); 755 dump_dma_handle(DBG_DMA_MAP, dip, mp); 756 757 if (mp->dmai_attr.dma_attr_flags & DDI_DMA_FLAGERR) 758 mp->dmai_error.err_cf = impl_dma_check; 759 760 mp->dmai_flags |= DMAI_FLAGS_MAPPED; 761 return (mp->dmai_nwin == 1 ? DDI_DMA_MAPPED : DDI_DMA_PARTIAL_MAP); 762 map_err: 763 pci_dvma_unregister_callbacks(pci_p, mp); 764 pci_dma_freepfn(mp); 765 err: 766 mp->dmai_flags &= DMAI_FLAGS_PRESERVE; 767 return (ret); 768 } 769 770 /* 771 * bus dma unbind handle entry point: 772 */ 773 /*ARGSUSED*/ 774 int 775 pci_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle) 776 { 777 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 778 pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip)); 779 iommu_t *iommu_p = pci_p->pci_iommu_p; 780 781 DEBUG3(DBG_DMA_UNBINDH, dip, "rdip=%s%d, mp=%p\n", 782 ddi_driver_name(rdip), ddi_get_instance(rdip), handle); 783 if ((mp->dmai_flags & DMAI_FLAGS_INUSE) == 0) { 784 DEBUG0(DBG_DMA_UNBINDH, dip, "handle not in use\n"); 785 return (DDI_FAILURE); 786 } 787 788 mp->dmai_flags &= ~DMAI_FLAGS_MAPPED; 789 790 switch (PCI_DMA_TYPE(mp)) { 791 case DMAI_FLAGS_DVMA: 792 pci_dvma_unregister_callbacks(pci_p, mp); 793 pci_dma_sync_unmap(dip, rdip, mp); 794 pci_dvma_unmap(iommu_p, mp); 795 pci_dma_freepfn(mp); 796 break; 797 case DMAI_FLAGS_BYPASS: 798 case DMAI_FLAGS_PEER_TO_PEER: 799 pci_dma_freewin(mp); 800 break; 801 default: 802 panic("%s%d: pci_dma_unbindhdl:bad dma type %p", 803 ddi_driver_name(rdip), ddi_get_instance(rdip), mp); 804 /*NOTREACHED*/ 805 } 806 if (iommu_p->iommu_dvma_clid != 0) { 807 DEBUG0(DBG_DMA_UNBINDH, dip, "run dvma callback\n"); 808 ddi_run_callback(&iommu_p->iommu_dvma_clid); 809 } 810 if (pci_kmem_clid) { 811 DEBUG0(DBG_DMA_UNBINDH, dip, "run handle callback\n"); 812 ddi_run_callback(&pci_kmem_clid); 813 } 814 mp->dmai_flags &= DMAI_FLAGS_PRESERVE; 815 SYNC_BUF_PA(mp) = 0; 816 817 mp->dmai_error.err_cf = NULL; 818 mp->dmai_ncookies = 0; 819 mp->dmai_curcookie = 0; 820 821 return (DDI_SUCCESS); 822 } 823 824 825 /* 826 * bus dma win entry point: 827 */ 828 int 829 pci_dma_win(dev_info_t *dip, dev_info_t *rdip, 830 ddi_dma_handle_t handle, uint_t win, off_t *offp, 831 size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp) 832 { 833 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 834 DEBUG2(DBG_DMA_WIN, dip, "rdip=%s%d\n", 835 ddi_driver_name(rdip), ddi_get_instance(rdip)); 836 dump_dma_handle(DBG_DMA_WIN, dip, mp); 837 if (win >= mp->dmai_nwin) { 838 DEBUG1(DBG_DMA_WIN, dip, "%x out of range\n", win); 839 return (DDI_FAILURE); 840 } 841 842 switch (PCI_DMA_TYPE(mp)) { 843 case DMAI_FLAGS_DVMA: 844 if (win != PCI_DMA_CURWIN(mp)) { 845 pci_t *pci_p = 846 get_pci_soft_state(ddi_get_instance(dip)); 847 pci_dma_sync_unmap(dip, rdip, mp); 848 /* map_window sets dmai_mapping/size/offset */ 849 iommu_map_window(pci_p->pci_iommu_p, mp, win); 850 } 851 if (cookiep) 852 MAKE_DMA_COOKIE(cookiep, mp->dmai_mapping, 853 mp->dmai_size); 854 if (ccountp) 855 *ccountp = 1; 856 mp->dmai_ncookies = 1; 857 mp->dmai_curcookie = 1; 858 break; 859 case DMAI_FLAGS_PEER_TO_PEER: 860 case DMAI_FLAGS_BYPASS: { 861 int i; 862 ddi_dma_cookie_t *ck_p; 863 pci_dma_win_t *win_p = mp->dmai_winlst; 864 865 for (i = 0; i < win; win_p = win_p->win_next, i++) 866 ; 867 ck_p = (ddi_dma_cookie_t *)(win_p + 1); 868 *cookiep = *ck_p; 869 mp->dmai_offset = win_p->win_offset; 870 mp->dmai_size = win_p->win_size; 871 mp->dmai_mapping = ck_p->dmac_laddress; 872 mp->dmai_cookie = ck_p + 1; 873 win_p->win_curseg = 0; 874 if (ccountp) 875 *ccountp = win_p->win_ncookies; 876 mp->dmai_ncookies = win_p->win_ncookies; 877 mp->dmai_curcookie = 1; 878 } 879 break; 880 default: 881 cmn_err(CE_WARN, "%s%d: pci_dma_win:bad dma type 0x%x", 882 ddi_driver_name(rdip), ddi_get_instance(rdip), 883 PCI_DMA_TYPE(mp)); 884 return (DDI_FAILURE); 885 } 886 if (cookiep) 887 DEBUG2(DBG_DMA_WIN, dip, 888 "cookie - dmac_address=%x dmac_size=%x\n", 889 cookiep->dmac_address, cookiep->dmac_size); 890 if (offp) 891 *offp = (off_t)mp->dmai_offset; 892 if (lenp) 893 *lenp = mp->dmai_size; 894 return (DDI_SUCCESS); 895 } 896 897 #ifdef DEBUG 898 static char *pci_dmactl_str[] = { 899 "DDI_DMA_FREE", 900 "DDI_DMA_SYNC", 901 "DDI_DMA_HTOC", 902 "DDI_DMA_KVADDR", 903 "DDI_DMA_MOVWIN", 904 "DDI_DMA_REPWIN", 905 "DDI_DMA_GETERR", 906 "DDI_DMA_COFF", 907 "DDI_DMA_NEXTWIN", 908 "DDI_DMA_NEXTSEG", 909 "DDI_DMA_SEGTOC", 910 "DDI_DMA_RESERVE", 911 "DDI_DMA_RELEASE", 912 "DDI_DMA_RESETH", 913 "DDI_DMA_CKSYNC", 914 "DDI_DMA_IOPB_ALLOC", 915 "DDI_DMA_IOPB_FREE", 916 "DDI_DMA_SMEM_ALLOC", 917 "DDI_DMA_SMEM_FREE", 918 "DDI_DMA_SET_SBUS64", 919 "DDI_DMA_REMAP" 920 }; 921 #endif 922 923 /* 924 * bus dma control entry point: 925 */ 926 int 927 pci_dma_ctlops(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle, 928 enum ddi_dma_ctlops cmd, off_t *offp, size_t *lenp, caddr_t *objp, 929 uint_t cache_flags) 930 { 931 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 932 DEBUG3(DBG_DMA_CTL, dip, "%s: rdip=%s%d\n", pci_dmactl_str[cmd], 933 ddi_driver_name(rdip), ddi_get_instance(rdip)); 934 935 switch (cmd) { 936 case DDI_DMA_FREE: 937 (void) pci_dma_unbindhdl(dip, rdip, handle); 938 (void) pci_dma_freehdl(dip, rdip, handle); 939 return (DDI_SUCCESS); 940 case DDI_DMA_RESERVE: { 941 pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip)); 942 return (pci_fdvma_reserve(dip, rdip, pci_p, 943 (ddi_dma_req_t *)offp, (ddi_dma_handle_t *)objp)); 944 } 945 case DDI_DMA_RELEASE: { 946 pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip)); 947 return (pci_fdvma_release(dip, pci_p, mp)); 948 } 949 default: 950 break; 951 } 952 953 switch (PCI_DMA_TYPE(mp)) { 954 case DMAI_FLAGS_DVMA: 955 return (pci_dvma_ctl(dip, rdip, mp, cmd, offp, lenp, objp, 956 cache_flags)); 957 case DMAI_FLAGS_PEER_TO_PEER: 958 case DMAI_FLAGS_BYPASS: 959 return (pci_dma_ctl(dip, rdip, mp, cmd, offp, lenp, objp, 960 cache_flags)); 961 default: 962 panic("%s%d: pci_dma_ctlops(%x):bad dma type %x", 963 ddi_driver_name(rdip), ddi_get_instance(rdip), cmd, 964 mp->dmai_flags); 965 /*NOTREACHED*/ 966 } 967 } 968 969 #ifdef DEBUG 970 int pci_peekfault_cnt = 0; 971 int pci_pokefault_cnt = 0; 972 #endif /* DEBUG */ 973 974 static int 975 pci_do_poke(pci_t *pci_p, peekpoke_ctlops_t *in_args) 976 { 977 pbm_t *pbm_p = pci_p->pci_pbm_p; 978 int err = DDI_SUCCESS; 979 on_trap_data_t otd; 980 981 mutex_enter(&pbm_p->pbm_pokefault_mutex); 982 pbm_p->pbm_ontrap_data = &otd; 983 984 /* Set up protected environment. */ 985 if (!on_trap(&otd, OT_DATA_ACCESS)) { 986 uintptr_t tramp = otd.ot_trampoline; 987 988 otd.ot_trampoline = (uintptr_t)&poke_fault; 989 err = do_poke(in_args->size, (void *)in_args->dev_addr, 990 (void *)in_args->host_addr); 991 otd.ot_trampoline = tramp; 992 } else 993 err = DDI_FAILURE; 994 995 /* 996 * Read the async fault register for the PBM to see it sees 997 * a master-abort. 998 */ 999 pbm_clear_error(pbm_p); 1000 1001 if (otd.ot_trap & OT_DATA_ACCESS) 1002 err = DDI_FAILURE; 1003 1004 /* Take down protected environment. */ 1005 no_trap(); 1006 1007 pbm_p->pbm_ontrap_data = NULL; 1008 mutex_exit(&pbm_p->pbm_pokefault_mutex); 1009 1010 #ifdef DEBUG 1011 if (err == DDI_FAILURE) 1012 pci_pokefault_cnt++; 1013 #endif 1014 return (err); 1015 } 1016 1017 1018 static int 1019 pci_do_caut_put(pci_t *pci_p, peekpoke_ctlops_t *cautacc_ctlops_arg) 1020 { 1021 size_t size = cautacc_ctlops_arg->size; 1022 uintptr_t dev_addr = cautacc_ctlops_arg->dev_addr; 1023 uintptr_t host_addr = cautacc_ctlops_arg->host_addr; 1024 ddi_acc_impl_t *hp = (ddi_acc_impl_t *)cautacc_ctlops_arg->handle; 1025 size_t repcount = cautacc_ctlops_arg->repcount; 1026 uint_t flags = cautacc_ctlops_arg->flags; 1027 1028 hp->ahi_err->err_expected = DDI_FM_ERR_EXPECTED; 1029 1030 /* 1031 * Note that i_ndi_busop_access_enter ends up grabbing the pokefault 1032 * mutex. 1033 */ 1034 i_ndi_busop_access_enter(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp); 1035 1036 if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) { 1037 for (; repcount; repcount--) { 1038 switch (size) { 1039 1040 case sizeof (uint8_t): 1041 i_ddi_put8(hp, (uint8_t *)dev_addr, 1042 *(uint8_t *)host_addr); 1043 break; 1044 1045 case sizeof (uint16_t): 1046 i_ddi_put16(hp, (uint16_t *)dev_addr, 1047 *(uint16_t *)host_addr); 1048 break; 1049 1050 case sizeof (uint32_t): 1051 i_ddi_put32(hp, (uint32_t *)dev_addr, 1052 *(uint32_t *)host_addr); 1053 break; 1054 1055 case sizeof (uint64_t): 1056 i_ddi_put64(hp, (uint64_t *)dev_addr, 1057 *(uint64_t *)host_addr); 1058 break; 1059 } 1060 1061 host_addr += size; 1062 1063 if (flags == DDI_DEV_AUTOINCR) 1064 dev_addr += size; 1065 1066 } 1067 } 1068 1069 i_ddi_notrap((ddi_acc_handle_t)hp); 1070 i_ndi_busop_access_exit(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp); 1071 hp->ahi_err->err_expected = DDI_FM_ERR_UNEXPECTED; 1072 1073 if (hp->ahi_err->err_status != DDI_FM_OK) { 1074 /* Clear the expected fault from the handle before returning */ 1075 hp->ahi_err->err_status = DDI_FM_OK; 1076 return (DDI_FAILURE); 1077 } 1078 1079 return (DDI_SUCCESS); 1080 } 1081 1082 1083 static int 1084 pci_ctlops_poke(pci_t *pci_p, peekpoke_ctlops_t *in_args) 1085 { 1086 return (in_args->handle ? pci_do_caut_put(pci_p, in_args) : 1087 pci_do_poke(pci_p, in_args)); 1088 } 1089 1090 1091 static int 1092 pci_do_peek(pci_t *pci_p, peekpoke_ctlops_t *in_args) 1093 { 1094 int err = DDI_SUCCESS; 1095 on_trap_data_t otd; 1096 1097 if (!on_trap(&otd, OT_DATA_ACCESS)) { 1098 uintptr_t tramp = otd.ot_trampoline; 1099 1100 otd.ot_trampoline = (uintptr_t)&peek_fault; 1101 err = do_peek(in_args->size, (void *)in_args->dev_addr, 1102 (void *)in_args->host_addr); 1103 otd.ot_trampoline = tramp; 1104 } else 1105 err = DDI_FAILURE; 1106 1107 no_trap(); 1108 1109 #ifdef DEBUG 1110 if (err == DDI_FAILURE) 1111 pci_peekfault_cnt++; 1112 #endif 1113 return (err); 1114 } 1115 1116 static int 1117 pci_do_caut_get(pci_t *pci_p, peekpoke_ctlops_t *cautacc_ctlops_arg) 1118 { 1119 size_t size = cautacc_ctlops_arg->size; 1120 uintptr_t dev_addr = cautacc_ctlops_arg->dev_addr; 1121 uintptr_t host_addr = cautacc_ctlops_arg->host_addr; 1122 ddi_acc_impl_t *hp = (ddi_acc_impl_t *)cautacc_ctlops_arg->handle; 1123 size_t repcount = cautacc_ctlops_arg->repcount; 1124 uint_t flags = cautacc_ctlops_arg->flags; 1125 1126 int err = DDI_SUCCESS; 1127 1128 hp->ahi_err->err_expected = DDI_FM_ERR_EXPECTED; 1129 i_ndi_busop_access_enter(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp); 1130 1131 if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) { 1132 for (; repcount; repcount--) { 1133 i_ddi_caut_get(size, (void *)dev_addr, 1134 (void *)host_addr); 1135 1136 host_addr += size; 1137 1138 if (flags == DDI_DEV_AUTOINCR) 1139 dev_addr += size; 1140 } 1141 } else { 1142 int i; 1143 uint8_t *ff_addr = (uint8_t *)host_addr; 1144 for (i = 0; i < size; i++) 1145 *ff_addr++ = 0xff; 1146 1147 err = DDI_FAILURE; 1148 } 1149 1150 i_ddi_notrap((ddi_acc_handle_t)hp); 1151 i_ndi_busop_access_exit(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp); 1152 hp->ahi_err->err_expected = DDI_FM_ERR_UNEXPECTED; 1153 1154 return (err); 1155 } 1156 1157 1158 static int 1159 pci_ctlops_peek(pci_t *pci_p, peekpoke_ctlops_t *in_args, void *result) 1160 { 1161 result = (void *)in_args->host_addr; 1162 return (in_args->handle ? pci_do_caut_get(pci_p, in_args) : 1163 pci_do_peek(pci_p, in_args)); 1164 } 1165 1166 /* 1167 * get_reg_set_size 1168 * 1169 * Given a dev info pointer to a pci child and a register number, this 1170 * routine returns the size element of that reg set property. 1171 * return value: size of reg set on success, -1 on error 1172 */ 1173 static off_t 1174 get_reg_set_size(dev_info_t *child, int rnumber) 1175 { 1176 pci_regspec_t *pci_rp; 1177 off_t size; 1178 int i; 1179 1180 if (rnumber < 0) 1181 return (-1); 1182 1183 /* 1184 * Get the reg property for the device. 1185 */ 1186 if (ddi_getlongprop(DDI_DEV_T_ANY, child, DDI_PROP_DONTPASS, "reg", 1187 (caddr_t)&pci_rp, &i) != DDI_SUCCESS) 1188 return (-1); 1189 1190 if (rnumber >= (i / (int)sizeof (pci_regspec_t))) { 1191 kmem_free(pci_rp, i); 1192 return (-1); 1193 } 1194 1195 size = pci_rp[rnumber].pci_size_low | 1196 ((uint64_t)pci_rp[rnumber].pci_size_hi << 32); 1197 kmem_free(pci_rp, i); 1198 return (size); 1199 } 1200 1201 1202 /* 1203 * control ops entry point: 1204 * 1205 * Requests handled completely: 1206 * DDI_CTLOPS_INITCHILD see init_child() for details 1207 * DDI_CTLOPS_UNINITCHILD 1208 * DDI_CTLOPS_REPORTDEV see report_dev() for details 1209 * DDI_CTLOPS_IOMIN cache line size if streaming otherwise 1 1210 * DDI_CTLOPS_REGSIZE 1211 * DDI_CTLOPS_NREGS 1212 * DDI_CTLOPS_DVMAPAGESIZE 1213 * DDI_CTLOPS_POKE 1214 * DDI_CTLOPS_PEEK 1215 * DDI_CTLOPS_QUIESCE 1216 * DDI_CTLOPS_UNQUIESCE 1217 * 1218 * All others passed to parent. 1219 */ 1220 int 1221 pci_ctlops(dev_info_t *dip, dev_info_t *rdip, 1222 ddi_ctl_enum_t op, void *arg, void *result) 1223 { 1224 pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip)); 1225 1226 switch (op) { 1227 case DDI_CTLOPS_INITCHILD: 1228 return (init_child(pci_p, (dev_info_t *)arg)); 1229 1230 case DDI_CTLOPS_UNINITCHILD: 1231 return (uninit_child(pci_p, (dev_info_t *)arg)); 1232 1233 case DDI_CTLOPS_REPORTDEV: 1234 return (report_dev(rdip)); 1235 1236 case DDI_CTLOPS_IOMIN: 1237 1238 /* 1239 * If we are using the streaming cache, align at 1240 * least on a cache line boundary. Otherwise use 1241 * whatever alignment is passed in. 1242 */ 1243 1244 if ((uintptr_t)arg) { 1245 int val = *((int *)result); 1246 1247 val = maxbit(val, PCI_SBUF_LINE_SIZE); 1248 *((int *)result) = val; 1249 } 1250 return (DDI_SUCCESS); 1251 1252 case DDI_CTLOPS_REGSIZE: 1253 *((off_t *)result) = get_reg_set_size(rdip, *((int *)arg)); 1254 return (*((off_t *)result) == -1 ? DDI_FAILURE : DDI_SUCCESS); 1255 1256 case DDI_CTLOPS_NREGS: 1257 *((uint_t *)result) = get_nreg_set(rdip); 1258 return (DDI_SUCCESS); 1259 1260 case DDI_CTLOPS_DVMAPAGESIZE: 1261 *((ulong_t *)result) = IOMMU_PAGE_SIZE; 1262 return (DDI_SUCCESS); 1263 1264 case DDI_CTLOPS_POKE: 1265 return (pci_ctlops_poke(pci_p, (peekpoke_ctlops_t *)arg)); 1266 1267 case DDI_CTLOPS_PEEK: 1268 return (pci_ctlops_peek(pci_p, (peekpoke_ctlops_t *)arg, 1269 result)); 1270 1271 case DDI_CTLOPS_AFFINITY: 1272 break; 1273 1274 case DDI_CTLOPS_QUIESCE: 1275 return (pci_bus_quiesce(pci_p, rdip, result)); 1276 1277 case DDI_CTLOPS_UNQUIESCE: 1278 return (pci_bus_unquiesce(pci_p, rdip, result)); 1279 1280 default: 1281 break; 1282 } 1283 1284 /* 1285 * Now pass the request up to our parent. 1286 */ 1287 DEBUG2(DBG_CTLOPS, dip, "passing request to parent: rdip=%s%d\n", 1288 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1289 return (ddi_ctlops(dip, rdip, op, arg, result)); 1290 } 1291 1292 1293 /* ARGSUSED */ 1294 int 1295 pci_intr_ops(dev_info_t *dip, dev_info_t *rdip, ddi_intr_op_t intr_op, 1296 ddi_intr_handle_impl_t *hdlp, void *result) 1297 { 1298 pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip)); 1299 ib_ino_t ino; 1300 int ret = DDI_SUCCESS; 1301 1302 switch (intr_op) { 1303 case DDI_INTROP_GETCAP: 1304 /* GetCap will always fail for all non PCI devices */ 1305 (void) pci_intx_get_cap(rdip, (int *)result); 1306 break; 1307 case DDI_INTROP_SETCAP: 1308 ret = DDI_ENOTSUP; 1309 break; 1310 case DDI_INTROP_ALLOC: 1311 *(int *)result = hdlp->ih_scratch1; 1312 break; 1313 case DDI_INTROP_FREE: 1314 break; 1315 case DDI_INTROP_GETPRI: 1316 *(int *)result = hdlp->ih_pri ? 1317 hdlp->ih_pri : pci_class_to_pil(rdip); 1318 break; 1319 case DDI_INTROP_SETPRI: 1320 break; 1321 case DDI_INTROP_ADDISR: 1322 ret = pci_add_intr(dip, rdip, hdlp); 1323 break; 1324 case DDI_INTROP_REMISR: 1325 ret = pci_remove_intr(dip, rdip, hdlp); 1326 break; 1327 case DDI_INTROP_GETTARGET: 1328 ino = IB_MONDO_TO_INO(pci_xlate_intr(dip, rdip, 1329 pci_p->pci_ib_p, IB_MONDO_TO_INO(hdlp->ih_vector))); 1330 ret = ib_get_intr_target(pci_p, ino, (int *)result); 1331 break; 1332 case DDI_INTROP_SETTARGET: 1333 ret = DDI_ENOTSUP; 1334 break; 1335 case DDI_INTROP_ENABLE: 1336 ret = ib_update_intr_state(pci_p, rdip, hdlp, 1337 PCI_INTR_STATE_ENABLE); 1338 break; 1339 case DDI_INTROP_DISABLE: 1340 ret = ib_update_intr_state(pci_p, rdip, hdlp, 1341 PCI_INTR_STATE_DISABLE); 1342 break; 1343 case DDI_INTROP_SETMASK: 1344 ret = pci_intx_set_mask(rdip); 1345 break; 1346 case DDI_INTROP_CLRMASK: 1347 ret = pci_intx_clr_mask(rdip); 1348 break; 1349 case DDI_INTROP_GETPENDING: 1350 ret = pci_intx_get_pending(rdip, (int *)result); 1351 break; 1352 case DDI_INTROP_NINTRS: 1353 case DDI_INTROP_NAVAIL: 1354 *(int *)result = i_ddi_get_intx_nintrs(rdip); 1355 break; 1356 case DDI_INTROP_SUPPORTED_TYPES: 1357 /* PCI nexus driver supports only fixed interrupts */ 1358 *(int *)result = i_ddi_get_intx_nintrs(rdip) ? 1359 DDI_INTR_TYPE_FIXED : 0; 1360 break; 1361 default: 1362 ret = DDI_ENOTSUP; 1363 break; 1364 } 1365 1366 return (ret); 1367 } 1368 1369 static void 1370 pci_init_hotplug(struct pci *pci_p) 1371 { 1372 pci_bus_range_t bus_range; 1373 dev_info_t *dip; 1374 1375 /* 1376 * Before initializing hotplug - open up 1377 * bus range. The busra module will 1378 * initialize its pool of bus numbers from 1379 * this. "busra" will be the agent that keeps 1380 * track of them during hotplug. Also, note, 1381 * that busra will remove any bus numbers 1382 * already in use from boot time. 1383 */ 1384 bus_range.lo = 0x0; 1385 bus_range.hi = 0xff; 1386 dip = pci_p->pci_dip; 1387 pci_p->hotplug_capable = B_FALSE; 1388 1389 /* 1390 * If this property exists, this nexus has hot-plug 1391 * slots. 1392 */ 1393 if (ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1394 "hotplug-capable")) { 1395 if (ndi_prop_update_int_array(DDI_DEV_T_NONE, 1396 dip, "bus-range", 1397 (int *)&bus_range, 1398 2) != DDI_PROP_SUCCESS) { 1399 return; 1400 } 1401 1402 if (pcihp_init(dip) != DDI_SUCCESS) { 1403 return; 1404 } 1405 1406 if ((pcihp_ops = pcihp_get_cb_ops()) != NULL) { 1407 DEBUG2(DBG_ATTACH, dip, "%s%d hotplug enabled", 1408 ddi_driver_name(dip), ddi_get_instance(dip)); 1409 pci_p->hotplug_capable = B_TRUE; 1410 } 1411 } 1412 } 1413