1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 27 /* 28 * PCI nexus driver interface 29 */ 30 31 #include <sys/types.h> 32 #include <sys/conf.h> /* nulldev */ 33 #include <sys/stat.h> /* devctl */ 34 #include <sys/kmem.h> 35 #include <sys/async.h> /* ecc_flt for pci_ecc.h */ 36 #include <sys/sunddi.h> 37 #include <sys/sunndi.h> 38 #include <sys/ndifm.h> 39 #include <sys/ontrap.h> 40 #include <sys/ddi_impldefs.h> 41 #include <sys/ddi_subrdefs.h> 42 #include <sys/epm.h> 43 #include <sys/hotplug/pci/pcihp.h> 44 #include <sys/pci/pci_tools_ext.h> 45 #include <sys/spl.h> 46 #include <sys/pci/pci_obj.h> 47 48 /*LINTLIBRARY*/ 49 50 /* 51 * function prototype for hotplug routine: 52 */ 53 static void 54 pci_init_hotplug(struct pci *); 55 56 /* 57 * function prototypes for dev ops routines: 58 */ 59 static int pci_attach(dev_info_t *dip, ddi_attach_cmd_t cmd); 60 static int pci_detach(dev_info_t *dip, ddi_detach_cmd_t cmd); 61 static int pci_info(dev_info_t *dip, ddi_info_cmd_t infocmd, 62 void *arg, void **result); 63 static int pci_ctlops_poke(pci_t *pci_p, peekpoke_ctlops_t *in_args); 64 static int pci_ctlops_peek(pci_t *pci_p, peekpoke_ctlops_t *in_args, 65 void *result); 66 static off_t get_reg_set_size(dev_info_t *child, int rnumber); 67 68 /* 69 * bus ops and dev ops structures: 70 */ 71 static struct bus_ops pci_bus_ops = { 72 BUSO_REV, 73 pci_map, 74 0, 75 0, 76 0, 77 i_ddi_map_fault, 78 pci_dma_setup, 79 pci_dma_allochdl, 80 pci_dma_freehdl, 81 pci_dma_bindhdl, 82 pci_dma_unbindhdl, 83 pci_dma_sync, 84 pci_dma_win, 85 pci_dma_ctlops, 86 pci_ctlops, 87 ddi_bus_prop_op, 88 ndi_busop_get_eventcookie, /* (*bus_get_eventcookie)(); */ 89 ndi_busop_add_eventcall, /* (*bus_add_eventcall)(); */ 90 ndi_busop_remove_eventcall, /* (*bus_remove_eventcall)(); */ 91 ndi_post_event, /* (*bus_post_event)(); */ 92 NULL, /* (*bus_intr_ctl)(); */ 93 NULL, /* (*bus_config)(); */ 94 NULL, /* (*bus_unconfig)(); */ 95 pci_fm_init_child, /* (*bus_fm_init)(); */ 96 NULL, /* (*bus_fm_fini)(); */ 97 pci_bus_enter, /* (*bus_fm_access_enter)(); */ 98 pci_bus_exit, /* (*bus_fm_access_fini)(); */ 99 NULL, /* (*bus_power)(); */ 100 pci_intr_ops /* (*bus_intr_op)(); */ 101 }; 102 103 extern struct cb_ops pci_cb_ops; 104 105 static struct dev_ops pci_ops = { 106 DEVO_REV, 107 0, 108 pci_info, 109 nulldev, 110 0, 111 pci_attach, 112 pci_detach, 113 nodev, 114 &pci_cb_ops, 115 &pci_bus_ops, 116 0, 117 ddi_quiesce_not_supported, /* devo_quiesce */ 118 }; 119 120 /* 121 * module definitions: 122 */ 123 #include <sys/modctl.h> 124 extern struct mod_ops mod_driverops; 125 126 static struct modldrv modldrv = { 127 &mod_driverops, /* Type of module - driver */ 128 "PCI Bus nexus driver", /* Name of module. */ 129 &pci_ops, /* driver ops */ 130 }; 131 132 static struct modlinkage modlinkage = { 133 MODREV_1, (void *)&modldrv, NULL 134 }; 135 136 /* 137 * driver global data: 138 */ 139 void *per_pci_state; /* per-pbm soft state pointer */ 140 void *per_pci_common_state; /* per-psycho soft state pointer */ 141 kmutex_t pci_global_mutex; /* attach/detach common struct lock */ 142 errorq_t *pci_ecc_queue = NULL; /* per-system ecc handling queue */ 143 extern errorq_t *pci_target_queue; 144 struct cb_ops *pcihp_ops = NULL; /* hotplug module cb ops */ 145 146 extern void pci_child_cfg_save(dev_info_t *dip); 147 extern void pci_child_cfg_restore(dev_info_t *dip); 148 149 int 150 _init(void) 151 { 152 int e; 153 154 /* 155 * Initialize per-pci bus soft state pointer. 156 */ 157 e = ddi_soft_state_init(&per_pci_state, sizeof (pci_t), 1); 158 if (e != 0) 159 return (e); 160 161 /* 162 * Initialize per-psycho soft state pointer. 163 */ 164 e = ddi_soft_state_init(&per_pci_common_state, 165 sizeof (pci_common_t), 1); 166 if (e != 0) { 167 ddi_soft_state_fini(&per_pci_state); 168 return (e); 169 } 170 171 /* 172 * Initialize global mutexes. 173 */ 174 mutex_init(&pci_global_mutex, NULL, MUTEX_DRIVER, NULL); 175 pci_reloc_init(); 176 177 /* 178 * Create the performance kstats. 179 */ 180 pci_kstat_init(); 181 182 /* 183 * Install the module. 184 */ 185 e = mod_install(&modlinkage); 186 if (e != 0) { 187 ddi_soft_state_fini(&per_pci_state); 188 ddi_soft_state_fini(&per_pci_common_state); 189 mutex_destroy(&pci_global_mutex); 190 } 191 return (e); 192 } 193 194 int 195 _fini(void) 196 { 197 int e; 198 199 /* 200 * Remove the module. 201 */ 202 e = mod_remove(&modlinkage); 203 if (e != 0) 204 return (e); 205 206 /* 207 * Destroy pci_ecc_queue, and set it to NULL. 208 */ 209 if (pci_ecc_queue) 210 errorq_destroy(pci_ecc_queue); 211 212 pci_ecc_queue = NULL; 213 214 /* 215 * Destroy pci_target_queue, and set it to NULL. 216 */ 217 if (pci_target_queue) 218 errorq_destroy(pci_target_queue); 219 220 pci_target_queue = NULL; 221 222 /* 223 * Destroy the performance kstats. 224 */ 225 pci_kstat_fini(); 226 227 /* 228 * Free the per-pci and per-psycho soft state info and destroy 229 * mutex for per-psycho soft state. 230 */ 231 ddi_soft_state_fini(&per_pci_state); 232 ddi_soft_state_fini(&per_pci_common_state); 233 mutex_destroy(&pci_global_mutex); 234 pci_reloc_fini(); 235 return (e); 236 } 237 238 int 239 _info(struct modinfo *modinfop) 240 { 241 return (mod_info(&modlinkage, modinfop)); 242 } 243 244 /*ARGSUSED*/ 245 static int 246 pci_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 247 { 248 int instance = PCIHP_AP_MINOR_NUM_TO_INSTANCE(getminor((dev_t)arg)); 249 pci_t *pci_p = get_pci_soft_state(instance); 250 251 /* allow hotplug to deal with ones it manages */ 252 if (pci_p && (pci_p->hotplug_capable == B_TRUE)) 253 return (pcihp_info(dip, infocmd, arg, result)); 254 255 /* non-hotplug or not attached */ 256 switch (infocmd) { 257 case DDI_INFO_DEVT2INSTANCE: 258 *result = (void *)(uintptr_t)instance; 259 return (DDI_SUCCESS); 260 261 case DDI_INFO_DEVT2DEVINFO: 262 if (pci_p == NULL) 263 return (DDI_FAILURE); 264 *result = (void *)pci_p->pci_dip; 265 return (DDI_SUCCESS); 266 267 default: 268 return (DDI_FAILURE); 269 } 270 } 271 272 273 /* device driver entry points */ 274 /* 275 * attach entry point: 276 */ 277 static int 278 pci_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 279 { 280 pci_t *pci_p; /* per bus state pointer */ 281 int instance = ddi_get_instance(dip); 282 283 switch (cmd) { 284 case DDI_ATTACH: 285 DEBUG0(DBG_ATTACH, dip, "DDI_ATTACH\n"); 286 287 /* 288 * Allocate and get the per-pci soft state structure. 289 */ 290 if (alloc_pci_soft_state(instance) != DDI_SUCCESS) { 291 cmn_err(CE_WARN, "%s%d: can't allocate pci state", 292 ddi_driver_name(dip), instance); 293 goto err_bad_pci_softstate; 294 } 295 pci_p = get_pci_soft_state(instance); 296 pci_p->pci_dip = dip; 297 mutex_init(&pci_p->pci_mutex, NULL, MUTEX_DRIVER, NULL); 298 pci_p->pci_soft_state = PCI_SOFT_STATE_CLOSED; 299 pci_p->pci_open_count = 0; 300 301 /* 302 * Get key properties of the pci bridge node and 303 * determine it's type (psycho, schizo, etc ...). 304 */ 305 if (get_pci_properties(pci_p, dip) == DDI_FAILURE) 306 goto err_bad_pci_prop; 307 308 /* 309 * Map in the registers. 310 */ 311 if (map_pci_registers(pci_p, dip) == DDI_FAILURE) 312 goto err_bad_reg_prop; 313 314 if (pci_obj_setup(pci_p) != DDI_SUCCESS) 315 goto err_bad_objs; 316 317 /* 318 * If this PCI leaf has hotplug and this platform 319 * loads hotplug modules then initialize the 320 * hotplug framework. 321 */ 322 pci_init_hotplug(pci_p); 323 324 /* 325 * Create the "devctl" node for hotplug support. 326 * For non-hotplug bus, we still need ":devctl" to 327 * support DEVCTL_DEVICE_* and DEVCTL_BUS_* ioctls. 328 */ 329 if (pci_p->hotplug_capable == B_FALSE) { 330 if (ddi_create_minor_node(dip, "devctl", S_IFCHR, 331 PCIHP_AP_MINOR_NUM(instance, PCIHP_DEVCTL_MINOR), 332 DDI_NT_NEXUS, 0) != DDI_SUCCESS) 333 goto err_bad_devctl_node; 334 } 335 336 /* 337 * Create pcitool nodes for register access and interrupt 338 * routing. 339 */ 340 if (pcitool_init(dip) != DDI_SUCCESS) { 341 goto err_bad_pcitool_nodes; 342 } 343 ddi_report_dev(dip); 344 345 pci_p->pci_state = PCI_ATTACHED; 346 DEBUG0(DBG_ATTACH, dip, "attach success\n"); 347 break; 348 349 err_bad_pcitool_nodes: 350 if (pci_p->hotplug_capable == B_FALSE) 351 ddi_remove_minor_node(dip, "devctl"); 352 else 353 (void) pcihp_uninit(dip); 354 err_bad_devctl_node: 355 pci_obj_destroy(pci_p); 356 err_bad_objs: 357 unmap_pci_registers(pci_p); 358 err_bad_reg_prop: 359 free_pci_properties(pci_p); 360 err_bad_pci_prop: 361 mutex_destroy(&pci_p->pci_mutex); 362 free_pci_soft_state(instance); 363 err_bad_pci_softstate: 364 return (DDI_FAILURE); 365 366 case DDI_RESUME: 367 DEBUG0(DBG_ATTACH, dip, "DDI_RESUME\n"); 368 369 /* 370 * Make sure the Psycho control registers and IOMMU 371 * are configured properly. 372 */ 373 pci_p = get_pci_soft_state(instance); 374 mutex_enter(&pci_p->pci_mutex); 375 376 /* 377 * Make sure this instance has been suspended. 378 */ 379 if (pci_p->pci_state != PCI_SUSPENDED) { 380 DEBUG0(DBG_ATTACH, dip, "instance NOT suspended\n"); 381 mutex_exit(&pci_p->pci_mutex); 382 return (DDI_FAILURE); 383 } 384 pci_obj_resume(pci_p); 385 pci_p->pci_state = PCI_ATTACHED; 386 387 pci_child_cfg_restore(dip); 388 389 mutex_exit(&pci_p->pci_mutex); 390 break; 391 392 default: 393 DEBUG0(DBG_ATTACH, dip, "unsupported attach op\n"); 394 return (DDI_FAILURE); 395 } 396 397 return (DDI_SUCCESS); 398 } 399 400 /* 401 * detach entry point: 402 */ 403 static int 404 pci_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 405 { 406 int instance = ddi_get_instance(dip); 407 pci_t *pci_p = get_pci_soft_state(instance); 408 409 /* 410 * Make sure we are currently attached 411 */ 412 if (pci_p->pci_state != PCI_ATTACHED) { 413 DEBUG0(DBG_ATTACH, dip, "failed - instance not attached\n"); 414 return (DDI_FAILURE); 415 } 416 417 mutex_enter(&pci_p->pci_mutex); 418 419 switch (cmd) { 420 case DDI_DETACH: 421 DEBUG0(DBG_DETACH, dip, "DDI_DETACH\n"); 422 423 if (pci_p->hotplug_capable == B_TRUE) 424 if (pcihp_uninit(dip) == DDI_FAILURE) { 425 mutex_exit(&pci_p->pci_mutex); 426 return (DDI_FAILURE); 427 } 428 429 pcitool_uninit(dip); 430 431 pci_obj_destroy(pci_p); 432 433 /* 434 * Free the pci soft state structure and the rest of the 435 * resources it's using. 436 */ 437 free_pci_properties(pci_p); 438 unmap_pci_registers(pci_p); 439 mutex_exit(&pci_p->pci_mutex); 440 mutex_destroy(&pci_p->pci_mutex); 441 free_pci_soft_state(instance); 442 443 /* Free the interrupt-priorities prop if we created it. */ 444 { 445 int len; 446 447 if (ddi_getproplen(DDI_DEV_T_ANY, dip, 448 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 449 "interrupt-priorities", &len) == DDI_PROP_SUCCESS) 450 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, 451 "interrupt-priorities"); 452 } 453 return (DDI_SUCCESS); 454 455 case DDI_SUSPEND: 456 pci_child_cfg_save(dip); 457 pci_obj_suspend(pci_p); 458 pci_p->pci_state = PCI_SUSPENDED; 459 460 mutex_exit(&pci_p->pci_mutex); 461 return (DDI_SUCCESS); 462 463 default: 464 DEBUG0(DBG_DETACH, dip, "unsupported detach op\n"); 465 mutex_exit(&pci_p->pci_mutex); 466 return (DDI_FAILURE); 467 } 468 } 469 470 471 /* bus driver entry points */ 472 473 /* 474 * bus map entry point: 475 * 476 * if map request is for an rnumber 477 * get the corresponding regspec from device node 478 * build a new regspec in our parent's format 479 * build a new map_req with the new regspec 480 * call up the tree to complete the mapping 481 */ 482 int 483 pci_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp, 484 off_t off, off_t len, caddr_t *addrp) 485 { 486 pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip)); 487 struct regspec p_regspec; 488 ddi_map_req_t p_mapreq; 489 int reglen, rval, r_no; 490 pci_regspec_t reloc_reg, *rp = &reloc_reg; 491 492 DEBUG2(DBG_MAP, dip, "rdip=%s%d:", 493 ddi_driver_name(rdip), ddi_get_instance(rdip)); 494 495 if (mp->map_flags & DDI_MF_USER_MAPPING) 496 return (DDI_ME_UNIMPLEMENTED); 497 498 switch (mp->map_type) { 499 case DDI_MT_REGSPEC: 500 reloc_reg = *(pci_regspec_t *)mp->map_obj.rp; /* dup whole */ 501 break; 502 503 case DDI_MT_RNUMBER: 504 r_no = mp->map_obj.rnumber; 505 DEBUG1(DBG_MAP | DBG_CONT, dip, " r#=%x", r_no); 506 507 if (ddi_getlongprop(DDI_DEV_T_ANY, rdip, DDI_PROP_DONTPASS, 508 "reg", (caddr_t)&rp, ®len) != DDI_SUCCESS) 509 return (DDI_ME_RNUMBER_RANGE); 510 511 if (r_no < 0 || r_no >= reglen / sizeof (pci_regspec_t)) { 512 kmem_free(rp, reglen); 513 return (DDI_ME_RNUMBER_RANGE); 514 } 515 rp += r_no; 516 break; 517 518 default: 519 return (DDI_ME_INVAL); 520 } 521 DEBUG0(DBG_MAP | DBG_CONT, dip, "\n"); 522 523 /* use "assigned-addresses" to relocate regspec within pci space */ 524 if (rval = pci_reloc_reg(dip, rdip, pci_p, rp)) 525 goto done; 526 527 if (len) /* adjust regspec according to mapping request */ 528 rp->pci_size_low = len; 529 rp->pci_phys_low += off; 530 531 /* use "ranges" to translate relocated pci regspec into parent space */ 532 if (rval = pci_xlate_reg(pci_p, rp, &p_regspec)) 533 goto done; 534 535 p_mapreq = *mp; /* dup the whole structure */ 536 p_mapreq.map_type = DDI_MT_REGSPEC; 537 p_mapreq.map_obj.rp = &p_regspec; 538 rval = ddi_map(dip, &p_mapreq, 0, 0, addrp); 539 540 if (rval == DDI_SUCCESS) { 541 /* 542 * Set-up access functions for FM access error capable drivers. 543 * The axq workaround prevents fault management support 544 */ 545 if (DDI_FM_ACC_ERR_CAP(pci_p->pci_fm_cap) && 546 DDI_FM_ACC_ERR_CAP(ddi_fm_capable(rdip)) && 547 mp->map_handlep->ah_acc.devacc_attr_access != 548 DDI_DEFAULT_ACC) 549 pci_fm_acc_setup(mp, rdip); 550 pci_axq_setup(mp, pci_p->pci_pbm_p); 551 } 552 553 done: 554 if (mp->map_type == DDI_MT_RNUMBER) 555 kmem_free(rp - r_no, reglen); 556 557 return (rval); 558 } 559 560 /* 561 * bus dma map entry point 562 * return value: 563 * DDI_DMA_PARTIAL_MAP 1 564 * DDI_DMA_MAPOK 0 565 * DDI_DMA_MAPPED 0 566 * DDI_DMA_NORESOURCES -1 567 * DDI_DMA_NOMAPPING -2 568 * DDI_DMA_TOOBIG -3 569 */ 570 int 571 pci_dma_setup(dev_info_t *dip, dev_info_t *rdip, ddi_dma_req_t *dmareq, 572 ddi_dma_handle_t *handlep) 573 { 574 pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip)); 575 iommu_t *iommu_p = pci_p->pci_iommu_p; 576 ddi_dma_impl_t *mp; 577 int ret; 578 579 DEBUG3(DBG_DMA_MAP, dip, "mapping - rdip=%s%d type=%s\n", 580 ddi_driver_name(rdip), ddi_get_instance(rdip), 581 handlep ? "alloc" : "advisory"); 582 583 if (!(mp = pci_dma_lmts2hdl(dip, rdip, iommu_p, dmareq))) 584 return (DDI_DMA_NORESOURCES); 585 if (mp == (ddi_dma_impl_t *)DDI_DMA_NOMAPPING) 586 return (DDI_DMA_NOMAPPING); 587 if (ret = pci_dma_type(pci_p, dmareq, mp)) 588 goto freehandle; 589 if (ret = pci_dma_pfn(pci_p, dmareq, mp)) 590 goto freehandle; 591 592 switch (PCI_DMA_TYPE(mp)) { 593 case DMAI_FLAGS_DVMA: /* LINTED E_EQUALITY_NOT_ASSIGNMENT */ 594 if ((ret = pci_dvma_win(pci_p, dmareq, mp)) || !handlep) 595 goto freehandle; 596 if (!PCI_DMA_CANCACHE(mp)) { /* try fast track */ 597 if (PCI_DMA_CANFAST(mp)) { 598 if (!pci_dvma_map_fast(iommu_p, mp)) 599 break; 600 /* LINTED E_NOP_ELSE_STMT */ 601 } else { 602 PCI_DVMA_FASTTRAK_PROF(mp); 603 } 604 } 605 if (ret = pci_dvma_map(mp, dmareq, iommu_p)) 606 goto freehandle; 607 break; 608 case DMAI_FLAGS_PEER_TO_PEER: /* LINTED E_EQUALITY_NOT_ASSIGNMENT */ 609 if ((ret = pci_dma_physwin(pci_p, dmareq, mp)) || !handlep) 610 goto freehandle; 611 break; 612 case DMAI_FLAGS_BYPASS: 613 default: 614 panic("%s%d: pci_dma_setup: bad dma type 0x%x", 615 ddi_driver_name(rdip), ddi_get_instance(rdip), 616 PCI_DMA_TYPE(mp)); 617 /*NOTREACHED*/ 618 } 619 *handlep = (ddi_dma_handle_t)mp; 620 mp->dmai_flags |= (DMAI_FLAGS_INUSE | DMAI_FLAGS_MAPPED); 621 dump_dma_handle(DBG_DMA_MAP, dip, mp); 622 623 return ((mp->dmai_nwin == 1) ? DDI_DMA_MAPPED : DDI_DMA_PARTIAL_MAP); 624 freehandle: 625 if (ret == DDI_DMA_NORESOURCES) 626 pci_dma_freemp(mp); /* don't run_callback() */ 627 else 628 (void) pci_dma_freehdl(dip, rdip, (ddi_dma_handle_t)mp); 629 return (ret); 630 } 631 632 633 /* 634 * bus dma alloc handle entry point: 635 */ 636 int 637 pci_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attrp, 638 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep) 639 { 640 pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip)); 641 ddi_dma_impl_t *mp; 642 int rval; 643 644 DEBUG2(DBG_DMA_ALLOCH, dip, "rdip=%s%d\n", 645 ddi_driver_name(rdip), ddi_get_instance(rdip)); 646 647 if (attrp->dma_attr_version != DMA_ATTR_V0) 648 return (DDI_DMA_BADATTR); 649 650 if (!(mp = pci_dma_allocmp(dip, rdip, waitfp, arg))) 651 return (DDI_DMA_NORESOURCES); 652 653 /* 654 * Save requestor's information 655 */ 656 mp->dmai_attr = *attrp; /* whole object - augmented later */ 657 *DEV_ATTR(mp) = *attrp; /* whole object - device orig attr */ 658 DEBUG1(DBG_DMA_ALLOCH, dip, "mp=%p\n", mp); 659 660 /* check and convert dma attributes to handle parameters */ 661 if (rval = pci_dma_attr2hdl(pci_p, mp)) { 662 pci_dma_freehdl(dip, rdip, (ddi_dma_handle_t)mp); 663 *handlep = NULL; 664 return (rval); 665 } 666 *handlep = (ddi_dma_handle_t)mp; 667 return (DDI_SUCCESS); 668 } 669 670 671 /* 672 * bus dma free handle entry point: 673 */ 674 /*ARGSUSED*/ 675 int 676 pci_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle) 677 { 678 DEBUG3(DBG_DMA_FREEH, dip, "rdip=%s%d mp=%p\n", 679 ddi_driver_name(rdip), ddi_get_instance(rdip), handle); 680 pci_dma_freemp((ddi_dma_impl_t *)handle); 681 682 if (pci_kmem_clid) { 683 DEBUG0(DBG_DMA_FREEH, dip, "run handle callback\n"); 684 ddi_run_callback(&pci_kmem_clid); 685 } 686 return (DDI_SUCCESS); 687 } 688 689 690 /* 691 * bus dma bind handle entry point: 692 */ 693 int 694 pci_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip, 695 ddi_dma_handle_t handle, ddi_dma_req_t *dmareq, 696 ddi_dma_cookie_t *cookiep, uint_t *ccountp) 697 { 698 pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip)); 699 iommu_t *iommu_p = pci_p->pci_iommu_p; 700 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 701 int ret; 702 703 DEBUG4(DBG_DMA_BINDH, dip, "rdip=%s%d mp=%p dmareq=%p\n", 704 ddi_driver_name(rdip), ddi_get_instance(rdip), mp, dmareq); 705 706 if (mp->dmai_flags & DMAI_FLAGS_INUSE) 707 return (DDI_DMA_INUSE); 708 709 ASSERT((mp->dmai_flags & ~DMAI_FLAGS_PRESERVE) == 0); 710 mp->dmai_flags |= DMAI_FLAGS_INUSE; 711 712 if (ret = pci_dma_type(pci_p, dmareq, mp)) 713 goto err; 714 if (ret = pci_dma_pfn(pci_p, dmareq, mp)) 715 goto err; 716 717 switch (PCI_DMA_TYPE(mp)) { 718 case DMAI_FLAGS_DVMA: 719 if (ret = pci_dvma_win(pci_p, dmareq, mp)) 720 goto map_err; 721 if (!PCI_DMA_CANCACHE(mp)) { /* try fast track */ 722 if (PCI_DMA_CANFAST(mp)) { 723 if (!pci_dvma_map_fast(iommu_p, mp)) 724 goto mapped; /*LINTED E_NOP_ELSE_STMT*/ 725 } else { 726 PCI_DVMA_FASTTRAK_PROF(mp); 727 } 728 } 729 if (ret = pci_dvma_map(mp, dmareq, iommu_p)) 730 goto map_err; 731 mapped: 732 *ccountp = 1; 733 MAKE_DMA_COOKIE(cookiep, mp->dmai_mapping, mp->dmai_size); 734 break; 735 case DMAI_FLAGS_BYPASS: 736 case DMAI_FLAGS_PEER_TO_PEER: 737 if (ret = pci_dma_physwin(pci_p, dmareq, mp)) 738 goto map_err; 739 *ccountp = WINLST(mp)->win_ncookies; 740 *cookiep = *(ddi_dma_cookie_t *)(WINLST(mp) + 1); /* wholeobj */ 741 break; 742 default: 743 panic("%s%d: pci_dma_bindhdl(%p): bad dma type", 744 ddi_driver_name(rdip), ddi_get_instance(rdip), mp); 745 /*NOTREACHED*/ 746 } 747 DEBUG2(DBG_DMA_BINDH, dip, "cookie %x+%x\n", cookiep->dmac_address, 748 cookiep->dmac_size); 749 dump_dma_handle(DBG_DMA_MAP, dip, mp); 750 751 if (mp->dmai_attr.dma_attr_flags & DDI_DMA_FLAGERR) { 752 (void) ndi_fmc_insert(rdip, DMA_HANDLE, mp, NULL); 753 mp->dmai_error.err_cf = impl_dma_check; 754 } 755 756 mp->dmai_flags |= DMAI_FLAGS_MAPPED; 757 return (mp->dmai_nwin == 1 ? DDI_DMA_MAPPED : DDI_DMA_PARTIAL_MAP); 758 map_err: 759 pci_dvma_unregister_callbacks(pci_p, mp); 760 pci_dma_freepfn(mp); 761 err: 762 mp->dmai_flags &= DMAI_FLAGS_PRESERVE; 763 return (ret); 764 } 765 766 /* 767 * bus dma unbind handle entry point: 768 */ 769 /*ARGSUSED*/ 770 int 771 pci_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle) 772 { 773 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 774 pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip)); 775 iommu_t *iommu_p = pci_p->pci_iommu_p; 776 777 DEBUG3(DBG_DMA_UNBINDH, dip, "rdip=%s%d, mp=%p\n", 778 ddi_driver_name(rdip), ddi_get_instance(rdip), handle); 779 if ((mp->dmai_flags & DMAI_FLAGS_INUSE) == 0) { 780 DEBUG0(DBG_DMA_UNBINDH, dip, "handle not in use\n"); 781 return (DDI_FAILURE); 782 } 783 784 mp->dmai_flags &= ~DMAI_FLAGS_MAPPED; 785 786 switch (PCI_DMA_TYPE(mp)) { 787 case DMAI_FLAGS_DVMA: 788 pci_dvma_unregister_callbacks(pci_p, mp); 789 pci_dma_sync_unmap(dip, rdip, mp); 790 pci_dvma_unmap(iommu_p, mp); 791 pci_dma_freepfn(mp); 792 break; 793 case DMAI_FLAGS_BYPASS: 794 case DMAI_FLAGS_PEER_TO_PEER: 795 pci_dma_freewin(mp); 796 break; 797 default: 798 panic("%s%d: pci_dma_unbindhdl:bad dma type %p", 799 ddi_driver_name(rdip), ddi_get_instance(rdip), mp); 800 /*NOTREACHED*/ 801 } 802 if (iommu_p->iommu_dvma_clid != 0) { 803 DEBUG0(DBG_DMA_UNBINDH, dip, "run dvma callback\n"); 804 ddi_run_callback(&iommu_p->iommu_dvma_clid); 805 } 806 if (pci_kmem_clid) { 807 DEBUG0(DBG_DMA_UNBINDH, dip, "run handle callback\n"); 808 ddi_run_callback(&pci_kmem_clid); 809 } 810 mp->dmai_flags &= DMAI_FLAGS_PRESERVE; 811 SYNC_BUF_PA(mp) = 0; 812 813 if (mp->dmai_attr.dma_attr_flags & DDI_DMA_FLAGERR) { 814 if (DEVI(rdip)->devi_fmhdl != NULL && 815 DDI_FM_DMA_ERR_CAP(DEVI(rdip)->devi_fmhdl->fh_cap)) { 816 (void) ndi_fmc_remove(rdip, DMA_HANDLE, mp); 817 } 818 } 819 820 return (DDI_SUCCESS); 821 } 822 823 824 /* 825 * bus dma win entry point: 826 */ 827 int 828 pci_dma_win(dev_info_t *dip, dev_info_t *rdip, 829 ddi_dma_handle_t handle, uint_t win, off_t *offp, 830 size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp) 831 { 832 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 833 DEBUG2(DBG_DMA_WIN, dip, "rdip=%s%d\n", 834 ddi_driver_name(rdip), ddi_get_instance(rdip)); 835 dump_dma_handle(DBG_DMA_WIN, dip, mp); 836 if (win >= mp->dmai_nwin) { 837 DEBUG1(DBG_DMA_WIN, dip, "%x out of range\n", win); 838 return (DDI_FAILURE); 839 } 840 841 switch (PCI_DMA_TYPE(mp)) { 842 case DMAI_FLAGS_DVMA: 843 if (win != PCI_DMA_CURWIN(mp)) { 844 pci_t *pci_p = 845 get_pci_soft_state(ddi_get_instance(dip)); 846 pci_dma_sync_unmap(dip, rdip, mp); 847 /* map_window sets dmai_mapping/size/offset */ 848 iommu_map_window(pci_p->pci_iommu_p, mp, win); 849 } 850 if (cookiep) 851 MAKE_DMA_COOKIE(cookiep, mp->dmai_mapping, 852 mp->dmai_size); 853 if (ccountp) 854 *ccountp = 1; 855 break; 856 case DMAI_FLAGS_PEER_TO_PEER: 857 case DMAI_FLAGS_BYPASS: { 858 int i; 859 ddi_dma_cookie_t *ck_p; 860 pci_dma_win_t *win_p = mp->dmai_winlst; 861 862 for (i = 0; i < win; win_p = win_p->win_next, i++) 863 ; 864 ck_p = (ddi_dma_cookie_t *)(win_p + 1); 865 *cookiep = *ck_p; 866 mp->dmai_offset = win_p->win_offset; 867 mp->dmai_size = win_p->win_size; 868 mp->dmai_mapping = ck_p->dmac_laddress; 869 mp->dmai_cookie = ck_p + 1; 870 win_p->win_curseg = 0; 871 if (ccountp) 872 *ccountp = win_p->win_ncookies; 873 } 874 break; 875 default: 876 cmn_err(CE_WARN, "%s%d: pci_dma_win:bad dma type 0x%x", 877 ddi_driver_name(rdip), ddi_get_instance(rdip), 878 PCI_DMA_TYPE(mp)); 879 return (DDI_FAILURE); 880 } 881 if (cookiep) 882 DEBUG2(DBG_DMA_WIN, dip, 883 "cookie - dmac_address=%x dmac_size=%x\n", 884 cookiep->dmac_address, cookiep->dmac_size); 885 if (offp) 886 *offp = (off_t)mp->dmai_offset; 887 if (lenp) 888 *lenp = mp->dmai_size; 889 return (DDI_SUCCESS); 890 } 891 892 #ifdef DEBUG 893 static char *pci_dmactl_str[] = { 894 "DDI_DMA_FREE", 895 "DDI_DMA_SYNC", 896 "DDI_DMA_HTOC", 897 "DDI_DMA_KVADDR", 898 "DDI_DMA_MOVWIN", 899 "DDI_DMA_REPWIN", 900 "DDI_DMA_GETERR", 901 "DDI_DMA_COFF", 902 "DDI_DMA_NEXTWIN", 903 "DDI_DMA_NEXTSEG", 904 "DDI_DMA_SEGTOC", 905 "DDI_DMA_RESERVE", 906 "DDI_DMA_RELEASE", 907 "DDI_DMA_RESETH", 908 "DDI_DMA_CKSYNC", 909 "DDI_DMA_IOPB_ALLOC", 910 "DDI_DMA_IOPB_FREE", 911 "DDI_DMA_SMEM_ALLOC", 912 "DDI_DMA_SMEM_FREE", 913 "DDI_DMA_SET_SBUS64", 914 "DDI_DMA_REMAP" 915 }; 916 #endif 917 918 /* 919 * bus dma control entry point: 920 */ 921 int 922 pci_dma_ctlops(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle, 923 enum ddi_dma_ctlops cmd, off_t *offp, size_t *lenp, caddr_t *objp, 924 uint_t cache_flags) 925 { 926 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 927 DEBUG3(DBG_DMA_CTL, dip, "%s: rdip=%s%d\n", pci_dmactl_str[cmd], 928 ddi_driver_name(rdip), ddi_get_instance(rdip)); 929 930 switch (cmd) { 931 case DDI_DMA_FREE: 932 (void) pci_dma_unbindhdl(dip, rdip, handle); 933 (void) pci_dma_freehdl(dip, rdip, handle); 934 return (DDI_SUCCESS); 935 case DDI_DMA_RESERVE: { 936 pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip)); 937 return (pci_fdvma_reserve(dip, rdip, pci_p, 938 (ddi_dma_req_t *)offp, (ddi_dma_handle_t *)objp)); 939 } 940 case DDI_DMA_RELEASE: { 941 pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip)); 942 return (pci_fdvma_release(dip, pci_p, mp)); 943 } 944 default: 945 break; 946 } 947 948 switch (PCI_DMA_TYPE(mp)) { 949 case DMAI_FLAGS_DVMA: 950 return (pci_dvma_ctl(dip, rdip, mp, cmd, offp, lenp, objp, 951 cache_flags)); 952 case DMAI_FLAGS_PEER_TO_PEER: 953 case DMAI_FLAGS_BYPASS: 954 return (pci_dma_ctl(dip, rdip, mp, cmd, offp, lenp, objp, 955 cache_flags)); 956 default: 957 panic("%s%d: pci_dma_ctlops(%x):bad dma type %x", 958 ddi_driver_name(rdip), ddi_get_instance(rdip), cmd, 959 mp->dmai_flags); 960 /*NOTREACHED*/ 961 } 962 } 963 964 #ifdef DEBUG 965 int pci_peekfault_cnt = 0; 966 int pci_pokefault_cnt = 0; 967 #endif /* DEBUG */ 968 969 static int 970 pci_do_poke(pci_t *pci_p, peekpoke_ctlops_t *in_args) 971 { 972 pbm_t *pbm_p = pci_p->pci_pbm_p; 973 int err = DDI_SUCCESS; 974 on_trap_data_t otd; 975 976 mutex_enter(&pbm_p->pbm_pokefault_mutex); 977 pbm_p->pbm_ontrap_data = &otd; 978 979 /* Set up protected environment. */ 980 if (!on_trap(&otd, OT_DATA_ACCESS)) { 981 uintptr_t tramp = otd.ot_trampoline; 982 983 otd.ot_trampoline = (uintptr_t)&poke_fault; 984 err = do_poke(in_args->size, (void *)in_args->dev_addr, 985 (void *)in_args->host_addr); 986 otd.ot_trampoline = tramp; 987 } else 988 err = DDI_FAILURE; 989 990 /* 991 * Read the async fault register for the PBM to see it sees 992 * a master-abort. 993 */ 994 pbm_clear_error(pbm_p); 995 996 if (otd.ot_trap & OT_DATA_ACCESS) 997 err = DDI_FAILURE; 998 999 /* Take down protected environment. */ 1000 no_trap(); 1001 1002 pbm_p->pbm_ontrap_data = NULL; 1003 mutex_exit(&pbm_p->pbm_pokefault_mutex); 1004 1005 #ifdef DEBUG 1006 if (err == DDI_FAILURE) 1007 pci_pokefault_cnt++; 1008 #endif 1009 return (err); 1010 } 1011 1012 1013 static int 1014 pci_do_caut_put(pci_t *pci_p, peekpoke_ctlops_t *cautacc_ctlops_arg) 1015 { 1016 size_t size = cautacc_ctlops_arg->size; 1017 uintptr_t dev_addr = cautacc_ctlops_arg->dev_addr; 1018 uintptr_t host_addr = cautacc_ctlops_arg->host_addr; 1019 ddi_acc_impl_t *hp = (ddi_acc_impl_t *)cautacc_ctlops_arg->handle; 1020 size_t repcount = cautacc_ctlops_arg->repcount; 1021 uint_t flags = cautacc_ctlops_arg->flags; 1022 1023 hp->ahi_err->err_expected = DDI_FM_ERR_EXPECTED; 1024 1025 /* 1026 * Note that i_ndi_busop_access_enter ends up grabbing the pokefault 1027 * mutex. 1028 */ 1029 i_ndi_busop_access_enter(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp); 1030 1031 if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) { 1032 for (; repcount; repcount--) { 1033 switch (size) { 1034 1035 case sizeof (uint8_t): 1036 i_ddi_put8(hp, (uint8_t *)dev_addr, 1037 *(uint8_t *)host_addr); 1038 break; 1039 1040 case sizeof (uint16_t): 1041 i_ddi_put16(hp, (uint16_t *)dev_addr, 1042 *(uint16_t *)host_addr); 1043 break; 1044 1045 case sizeof (uint32_t): 1046 i_ddi_put32(hp, (uint32_t *)dev_addr, 1047 *(uint32_t *)host_addr); 1048 break; 1049 1050 case sizeof (uint64_t): 1051 i_ddi_put64(hp, (uint64_t *)dev_addr, 1052 *(uint64_t *)host_addr); 1053 break; 1054 } 1055 1056 host_addr += size; 1057 1058 if (flags == DDI_DEV_AUTOINCR) 1059 dev_addr += size; 1060 1061 } 1062 } 1063 1064 i_ddi_notrap((ddi_acc_handle_t)hp); 1065 i_ndi_busop_access_exit(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp); 1066 hp->ahi_err->err_expected = DDI_FM_ERR_UNEXPECTED; 1067 1068 if (hp->ahi_err->err_status != DDI_FM_OK) { 1069 /* Clear the expected fault from the handle before returning */ 1070 hp->ahi_err->err_status = DDI_FM_OK; 1071 return (DDI_FAILURE); 1072 } 1073 1074 return (DDI_SUCCESS); 1075 } 1076 1077 1078 static int 1079 pci_ctlops_poke(pci_t *pci_p, peekpoke_ctlops_t *in_args) 1080 { 1081 return (in_args->handle ? pci_do_caut_put(pci_p, in_args) : 1082 pci_do_poke(pci_p, in_args)); 1083 } 1084 1085 1086 static int 1087 pci_do_peek(pci_t *pci_p, peekpoke_ctlops_t *in_args) 1088 { 1089 int err = DDI_SUCCESS; 1090 on_trap_data_t otd; 1091 1092 if (!on_trap(&otd, OT_DATA_ACCESS)) { 1093 uintptr_t tramp = otd.ot_trampoline; 1094 1095 otd.ot_trampoline = (uintptr_t)&peek_fault; 1096 err = do_peek(in_args->size, (void *)in_args->dev_addr, 1097 (void *)in_args->host_addr); 1098 otd.ot_trampoline = tramp; 1099 } else 1100 err = DDI_FAILURE; 1101 1102 no_trap(); 1103 1104 #ifdef DEBUG 1105 if (err == DDI_FAILURE) 1106 pci_peekfault_cnt++; 1107 #endif 1108 return (err); 1109 } 1110 1111 static int 1112 pci_do_caut_get(pci_t *pci_p, peekpoke_ctlops_t *cautacc_ctlops_arg) 1113 { 1114 size_t size = cautacc_ctlops_arg->size; 1115 uintptr_t dev_addr = cautacc_ctlops_arg->dev_addr; 1116 uintptr_t host_addr = cautacc_ctlops_arg->host_addr; 1117 ddi_acc_impl_t *hp = (ddi_acc_impl_t *)cautacc_ctlops_arg->handle; 1118 size_t repcount = cautacc_ctlops_arg->repcount; 1119 uint_t flags = cautacc_ctlops_arg->flags; 1120 1121 int err = DDI_SUCCESS; 1122 1123 hp->ahi_err->err_expected = DDI_FM_ERR_EXPECTED; 1124 i_ndi_busop_access_enter(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp); 1125 1126 if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) { 1127 for (; repcount; repcount--) { 1128 i_ddi_caut_get(size, (void *)dev_addr, 1129 (void *)host_addr); 1130 1131 host_addr += size; 1132 1133 if (flags == DDI_DEV_AUTOINCR) 1134 dev_addr += size; 1135 } 1136 } else { 1137 int i; 1138 uint8_t *ff_addr = (uint8_t *)host_addr; 1139 for (i = 0; i < size; i++) 1140 *ff_addr++ = 0xff; 1141 1142 err = DDI_FAILURE; 1143 } 1144 1145 i_ddi_notrap((ddi_acc_handle_t)hp); 1146 i_ndi_busop_access_exit(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp); 1147 hp->ahi_err->err_expected = DDI_FM_ERR_UNEXPECTED; 1148 1149 return (err); 1150 } 1151 1152 1153 static int 1154 pci_ctlops_peek(pci_t *pci_p, peekpoke_ctlops_t *in_args, void *result) 1155 { 1156 result = (void *)in_args->host_addr; 1157 return (in_args->handle ? pci_do_caut_get(pci_p, in_args) : 1158 pci_do_peek(pci_p, in_args)); 1159 } 1160 1161 /* 1162 * get_reg_set_size 1163 * 1164 * Given a dev info pointer to a pci child and a register number, this 1165 * routine returns the size element of that reg set property. 1166 * return value: size of reg set on success, -1 on error 1167 */ 1168 static off_t 1169 get_reg_set_size(dev_info_t *child, int rnumber) 1170 { 1171 pci_regspec_t *pci_rp; 1172 off_t size; 1173 int i; 1174 1175 if (rnumber < 0) 1176 return (-1); 1177 1178 /* 1179 * Get the reg property for the device. 1180 */ 1181 if (ddi_getlongprop(DDI_DEV_T_ANY, child, DDI_PROP_DONTPASS, "reg", 1182 (caddr_t)&pci_rp, &i) != DDI_SUCCESS) 1183 return (-1); 1184 1185 if (rnumber >= (i / (int)sizeof (pci_regspec_t))) { 1186 kmem_free(pci_rp, i); 1187 return (-1); 1188 } 1189 1190 size = pci_rp[rnumber].pci_size_low | 1191 ((uint64_t)pci_rp[rnumber].pci_size_hi << 32); 1192 kmem_free(pci_rp, i); 1193 return (size); 1194 } 1195 1196 1197 /* 1198 * control ops entry point: 1199 * 1200 * Requests handled completely: 1201 * DDI_CTLOPS_INITCHILD see init_child() for details 1202 * DDI_CTLOPS_UNINITCHILD 1203 * DDI_CTLOPS_REPORTDEV see report_dev() for details 1204 * DDI_CTLOPS_IOMIN cache line size if streaming otherwise 1 1205 * DDI_CTLOPS_REGSIZE 1206 * DDI_CTLOPS_NREGS 1207 * DDI_CTLOPS_DVMAPAGESIZE 1208 * DDI_CTLOPS_POKE 1209 * DDI_CTLOPS_PEEK 1210 * DDI_CTLOPS_QUIESCE 1211 * DDI_CTLOPS_UNQUIESCE 1212 * 1213 * All others passed to parent. 1214 */ 1215 int 1216 pci_ctlops(dev_info_t *dip, dev_info_t *rdip, 1217 ddi_ctl_enum_t op, void *arg, void *result) 1218 { 1219 pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip)); 1220 1221 switch (op) { 1222 case DDI_CTLOPS_INITCHILD: 1223 return (init_child(pci_p, (dev_info_t *)arg)); 1224 1225 case DDI_CTLOPS_UNINITCHILD: 1226 return (uninit_child(pci_p, (dev_info_t *)arg)); 1227 1228 case DDI_CTLOPS_REPORTDEV: 1229 return (report_dev(rdip)); 1230 1231 case DDI_CTLOPS_IOMIN: 1232 1233 /* 1234 * If we are using the streaming cache, align at 1235 * least on a cache line boundary. Otherwise use 1236 * whatever alignment is passed in. 1237 */ 1238 1239 if ((uintptr_t)arg) { 1240 int val = *((int *)result); 1241 1242 val = maxbit(val, PCI_SBUF_LINE_SIZE); 1243 *((int *)result) = val; 1244 } 1245 return (DDI_SUCCESS); 1246 1247 case DDI_CTLOPS_REGSIZE: 1248 *((off_t *)result) = get_reg_set_size(rdip, *((int *)arg)); 1249 return (*((off_t *)result) == -1 ? DDI_FAILURE : DDI_SUCCESS); 1250 1251 case DDI_CTLOPS_NREGS: 1252 *((uint_t *)result) = get_nreg_set(rdip); 1253 return (DDI_SUCCESS); 1254 1255 case DDI_CTLOPS_DVMAPAGESIZE: 1256 *((ulong_t *)result) = IOMMU_PAGE_SIZE; 1257 return (DDI_SUCCESS); 1258 1259 case DDI_CTLOPS_POKE: 1260 return (pci_ctlops_poke(pci_p, (peekpoke_ctlops_t *)arg)); 1261 1262 case DDI_CTLOPS_PEEK: 1263 return (pci_ctlops_peek(pci_p, (peekpoke_ctlops_t *)arg, 1264 result)); 1265 1266 case DDI_CTLOPS_AFFINITY: 1267 break; 1268 1269 case DDI_CTLOPS_QUIESCE: 1270 return (pci_bus_quiesce(pci_p, rdip, result)); 1271 1272 case DDI_CTLOPS_UNQUIESCE: 1273 return (pci_bus_unquiesce(pci_p, rdip, result)); 1274 1275 default: 1276 break; 1277 } 1278 1279 /* 1280 * Now pass the request up to our parent. 1281 */ 1282 DEBUG2(DBG_CTLOPS, dip, "passing request to parent: rdip=%s%d\n", 1283 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1284 return (ddi_ctlops(dip, rdip, op, arg, result)); 1285 } 1286 1287 1288 /* ARGSUSED */ 1289 int 1290 pci_intr_ops(dev_info_t *dip, dev_info_t *rdip, ddi_intr_op_t intr_op, 1291 ddi_intr_handle_impl_t *hdlp, void *result) 1292 { 1293 pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip)); 1294 ib_ino_t ino; 1295 int ret = DDI_SUCCESS; 1296 1297 switch (intr_op) { 1298 case DDI_INTROP_GETCAP: 1299 /* GetCap will always fail for all non PCI devices */ 1300 (void) pci_intx_get_cap(rdip, (int *)result); 1301 break; 1302 case DDI_INTROP_SETCAP: 1303 ret = DDI_ENOTSUP; 1304 break; 1305 case DDI_INTROP_ALLOC: 1306 *(int *)result = hdlp->ih_scratch1; 1307 break; 1308 case DDI_INTROP_FREE: 1309 break; 1310 case DDI_INTROP_GETPRI: 1311 *(int *)result = hdlp->ih_pri ? 1312 hdlp->ih_pri : pci_class_to_pil(rdip); 1313 break; 1314 case DDI_INTROP_SETPRI: 1315 break; 1316 case DDI_INTROP_ADDISR: 1317 ret = pci_add_intr(dip, rdip, hdlp); 1318 break; 1319 case DDI_INTROP_REMISR: 1320 ret = pci_remove_intr(dip, rdip, hdlp); 1321 break; 1322 case DDI_INTROP_GETTARGET: 1323 ino = IB_MONDO_TO_INO(pci_xlate_intr(dip, rdip, 1324 pci_p->pci_ib_p, IB_MONDO_TO_INO(hdlp->ih_vector))); 1325 ret = ib_get_intr_target(pci_p, ino, (int *)result); 1326 break; 1327 case DDI_INTROP_SETTARGET: 1328 ret = DDI_ENOTSUP; 1329 break; 1330 case DDI_INTROP_ENABLE: 1331 ret = ib_update_intr_state(pci_p, rdip, hdlp, 1332 PCI_INTR_STATE_ENABLE); 1333 break; 1334 case DDI_INTROP_DISABLE: 1335 ret = ib_update_intr_state(pci_p, rdip, hdlp, 1336 PCI_INTR_STATE_DISABLE); 1337 break; 1338 case DDI_INTROP_SETMASK: 1339 ret = pci_intx_set_mask(rdip); 1340 break; 1341 case DDI_INTROP_CLRMASK: 1342 ret = pci_intx_clr_mask(rdip); 1343 break; 1344 case DDI_INTROP_GETPENDING: 1345 ret = pci_intx_get_pending(rdip, (int *)result); 1346 break; 1347 case DDI_INTROP_NINTRS: 1348 case DDI_INTROP_NAVAIL: 1349 *(int *)result = i_ddi_get_intx_nintrs(rdip); 1350 break; 1351 case DDI_INTROP_SUPPORTED_TYPES: 1352 /* PCI nexus driver supports only fixed interrupts */ 1353 *(int *)result = i_ddi_get_intx_nintrs(rdip) ? 1354 DDI_INTR_TYPE_FIXED : 0; 1355 break; 1356 default: 1357 ret = DDI_ENOTSUP; 1358 break; 1359 } 1360 1361 return (ret); 1362 } 1363 1364 static void 1365 pci_init_hotplug(struct pci *pci_p) 1366 { 1367 pci_bus_range_t bus_range; 1368 dev_info_t *dip; 1369 1370 /* 1371 * Before initializing hotplug - open up 1372 * bus range. The busra module will 1373 * initialize its pool of bus numbers from 1374 * this. "busra" will be the agent that keeps 1375 * track of them during hotplug. Also, note, 1376 * that busra will remove any bus numbers 1377 * already in use from boot time. 1378 */ 1379 bus_range.lo = 0x0; 1380 bus_range.hi = 0xff; 1381 dip = pci_p->pci_dip; 1382 pci_p->hotplug_capable = B_FALSE; 1383 1384 /* 1385 * If this property exists, this nexus has hot-plug 1386 * slots. 1387 */ 1388 if (ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1389 "hotplug-capable")) { 1390 if (ndi_prop_update_int_array(DDI_DEV_T_NONE, 1391 dip, "bus-range", 1392 (int *)&bus_range, 1393 2) != DDI_PROP_SUCCESS) { 1394 return; 1395 } 1396 1397 if (pcihp_init(dip) != DDI_SUCCESS) { 1398 return; 1399 } 1400 1401 if ((pcihp_ops = pcihp_get_cb_ops()) != NULL) { 1402 DEBUG2(DBG_ATTACH, dip, "%s%d hotplug enabled", 1403 ddi_driver_name(dip), ddi_get_instance(dip)); 1404 pci_p->hotplug_capable = B_TRUE; 1405 } 1406 } 1407 } 1408