1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * PCI nexus driver interface 28 */ 29 30 #include <sys/types.h> 31 #include <sys/conf.h> /* nulldev */ 32 #include <sys/stat.h> /* devctl */ 33 #include <sys/kmem.h> 34 #include <sys/async.h> /* ecc_flt for pci_ecc.h */ 35 #include <sys/sunddi.h> 36 #include <sys/sunndi.h> 37 #include <sys/ndifm.h> 38 #include <sys/ontrap.h> 39 #include <sys/ddi_impldefs.h> 40 #include <sys/ddi_subrdefs.h> 41 #include <sys/epm.h> 42 #include <sys/hotplug/pci/pcihp.h> 43 #include <sys/pci/pci_tools_ext.h> 44 #include <sys/spl.h> 45 #include <sys/pci/pci_obj.h> 46 47 /*LINTLIBRARY*/ 48 49 /* 50 * function prototype for hotplug routine: 51 */ 52 static void 53 pci_init_hotplug(struct pci *); 54 55 /* 56 * function prototypes for dev ops routines: 57 */ 58 static int pci_attach(dev_info_t *dip, ddi_attach_cmd_t cmd); 59 static int pci_detach(dev_info_t *dip, ddi_detach_cmd_t cmd); 60 static int pci_info(dev_info_t *dip, ddi_info_cmd_t infocmd, 61 void *arg, void **result); 62 static int pci_ctlops_poke(pci_t *pci_p, peekpoke_ctlops_t *in_args); 63 static int pci_ctlops_peek(pci_t *pci_p, peekpoke_ctlops_t *in_args, 64 void *result); 65 static off_t get_reg_set_size(dev_info_t *child, int rnumber); 66 67 /* 68 * bus ops and dev ops structures: 69 */ 70 static struct bus_ops pci_bus_ops = { 71 BUSO_REV, 72 pci_map, 73 0, 74 0, 75 0, 76 i_ddi_map_fault, 77 pci_dma_setup, 78 pci_dma_allochdl, 79 pci_dma_freehdl, 80 pci_dma_bindhdl, 81 pci_dma_unbindhdl, 82 pci_dma_sync, 83 pci_dma_win, 84 pci_dma_ctlops, 85 pci_ctlops, 86 ddi_bus_prop_op, 87 ndi_busop_get_eventcookie, /* (*bus_get_eventcookie)(); */ 88 ndi_busop_add_eventcall, /* (*bus_add_eventcall)(); */ 89 ndi_busop_remove_eventcall, /* (*bus_remove_eventcall)(); */ 90 ndi_post_event, /* (*bus_post_event)(); */ 91 NULL, /* (*bus_intr_ctl)(); */ 92 NULL, /* (*bus_config)(); */ 93 NULL, /* (*bus_unconfig)(); */ 94 pci_fm_init_child, /* (*bus_fm_init)(); */ 95 NULL, /* (*bus_fm_fini)(); */ 96 pci_bus_enter, /* (*bus_fm_access_enter)(); */ 97 pci_bus_exit, /* (*bus_fm_access_fini)(); */ 98 NULL, /* (*bus_power)(); */ 99 pci_intr_ops /* (*bus_intr_op)(); */ 100 }; 101 102 extern struct cb_ops pci_cb_ops; 103 104 static struct dev_ops pci_ops = { 105 DEVO_REV, 106 0, 107 pci_info, 108 nulldev, 109 0, 110 pci_attach, 111 pci_detach, 112 nodev, 113 &pci_cb_ops, 114 &pci_bus_ops, 115 0, 116 ddi_quiesce_not_supported, /* devo_quiesce */ 117 }; 118 119 /* 120 * module definitions: 121 */ 122 #include <sys/modctl.h> 123 extern struct mod_ops mod_driverops; 124 125 static struct modldrv modldrv = { 126 &mod_driverops, /* Type of module - driver */ 127 "Sun4u Host to PCI nexus driver", /* Name of module. */ 128 &pci_ops, /* driver ops */ 129 }; 130 131 static struct modlinkage modlinkage = { 132 MODREV_1, (void *)&modldrv, NULL 133 }; 134 135 /* 136 * driver global data: 137 */ 138 void *per_pci_state; /* per-pbm soft state pointer */ 139 void *per_pci_common_state; /* per-psycho soft state pointer */ 140 kmutex_t pci_global_mutex; /* attach/detach common struct lock */ 141 errorq_t *pci_ecc_queue = NULL; /* per-system ecc handling queue */ 142 extern errorq_t *pci_target_queue; 143 struct cb_ops *pcihp_ops = NULL; /* hotplug module cb ops */ 144 145 extern void pci_child_cfg_save(dev_info_t *dip); 146 extern void pci_child_cfg_restore(dev_info_t *dip); 147 148 int 149 _init(void) 150 { 151 int e; 152 153 /* 154 * Initialize per-pci bus soft state pointer. 155 */ 156 e = ddi_soft_state_init(&per_pci_state, sizeof (pci_t), 1); 157 if (e != 0) 158 return (e); 159 160 /* 161 * Initialize per-psycho soft state pointer. 162 */ 163 e = ddi_soft_state_init(&per_pci_common_state, 164 sizeof (pci_common_t), 1); 165 if (e != 0) { 166 ddi_soft_state_fini(&per_pci_state); 167 return (e); 168 } 169 170 /* 171 * Initialize global mutexes. 172 */ 173 mutex_init(&pci_global_mutex, NULL, MUTEX_DRIVER, NULL); 174 pci_reloc_init(); 175 176 /* 177 * Create the performance kstats. 178 */ 179 pci_kstat_init(); 180 181 /* 182 * Install the module. 183 */ 184 e = mod_install(&modlinkage); 185 if (e != 0) { 186 ddi_soft_state_fini(&per_pci_state); 187 ddi_soft_state_fini(&per_pci_common_state); 188 mutex_destroy(&pci_global_mutex); 189 } 190 return (e); 191 } 192 193 int 194 _fini(void) 195 { 196 int e; 197 198 /* 199 * Remove the module. 200 */ 201 e = mod_remove(&modlinkage); 202 if (e != 0) 203 return (e); 204 205 /* 206 * Destroy pci_ecc_queue, and set it to NULL. 207 */ 208 if (pci_ecc_queue) 209 errorq_destroy(pci_ecc_queue); 210 211 pci_ecc_queue = NULL; 212 213 /* 214 * Destroy pci_target_queue, and set it to NULL. 215 */ 216 if (pci_target_queue) 217 errorq_destroy(pci_target_queue); 218 219 pci_target_queue = NULL; 220 221 /* 222 * Destroy the performance kstats. 223 */ 224 pci_kstat_fini(); 225 226 /* 227 * Free the per-pci and per-psycho soft state info and destroy 228 * mutex for per-psycho soft state. 229 */ 230 ddi_soft_state_fini(&per_pci_state); 231 ddi_soft_state_fini(&per_pci_common_state); 232 mutex_destroy(&pci_global_mutex); 233 pci_reloc_fini(); 234 return (e); 235 } 236 237 int 238 _info(struct modinfo *modinfop) 239 { 240 return (mod_info(&modlinkage, modinfop)); 241 } 242 243 /*ARGSUSED*/ 244 static int 245 pci_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 246 { 247 int instance = PCIHP_AP_MINOR_NUM_TO_INSTANCE(getminor((dev_t)arg)); 248 pci_t *pci_p = get_pci_soft_state(instance); 249 250 /* allow hotplug to deal with ones it manages */ 251 if (pci_p && (pci_p->hotplug_capable == B_TRUE)) 252 return (pcihp_info(dip, infocmd, arg, result)); 253 254 /* non-hotplug or not attached */ 255 switch (infocmd) { 256 case DDI_INFO_DEVT2INSTANCE: 257 *result = (void *)(uintptr_t)instance; 258 return (DDI_SUCCESS); 259 260 case DDI_INFO_DEVT2DEVINFO: 261 if (pci_p == NULL) 262 return (DDI_FAILURE); 263 *result = (void *)pci_p->pci_dip; 264 return (DDI_SUCCESS); 265 266 default: 267 return (DDI_FAILURE); 268 } 269 } 270 271 272 /* device driver entry points */ 273 /* 274 * attach entry point: 275 */ 276 static int 277 pci_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 278 { 279 pci_t *pci_p; /* per bus state pointer */ 280 int instance = ddi_get_instance(dip); 281 282 switch (cmd) { 283 case DDI_ATTACH: 284 DEBUG0(DBG_ATTACH, dip, "DDI_ATTACH\n"); 285 286 /* 287 * Allocate and get the per-pci soft state structure. 288 */ 289 if (alloc_pci_soft_state(instance) != DDI_SUCCESS) { 290 cmn_err(CE_WARN, "%s%d: can't allocate pci state", 291 ddi_driver_name(dip), instance); 292 goto err_bad_pci_softstate; 293 } 294 pci_p = get_pci_soft_state(instance); 295 pci_p->pci_dip = dip; 296 mutex_init(&pci_p->pci_mutex, NULL, MUTEX_DRIVER, NULL); 297 pci_p->pci_soft_state = PCI_SOFT_STATE_CLOSED; 298 299 /* 300 * Get key properties of the pci bridge node and 301 * determine it's type (psycho, schizo, etc ...). 302 */ 303 if (get_pci_properties(pci_p, dip) == DDI_FAILURE) 304 goto err_bad_pci_prop; 305 306 /* 307 * Map in the registers. 308 */ 309 if (map_pci_registers(pci_p, dip) == DDI_FAILURE) 310 goto err_bad_reg_prop; 311 312 if (pci_obj_setup(pci_p) != DDI_SUCCESS) 313 goto err_bad_objs; 314 315 /* 316 * If this PCI leaf has hotplug and this platform 317 * loads hotplug modules then initialize the 318 * hotplug framework. 319 */ 320 pci_init_hotplug(pci_p); 321 322 /* 323 * Create the "devctl" node for hotplug support. 324 * For non-hotplug bus, we still need ":devctl" to 325 * support DEVCTL_DEVICE_* and DEVCTL_BUS_* ioctls. 326 */ 327 if (pci_p->hotplug_capable == B_FALSE) { 328 if (ddi_create_minor_node(dip, "devctl", S_IFCHR, 329 PCIHP_AP_MINOR_NUM(instance, PCIHP_DEVCTL_MINOR), 330 DDI_NT_NEXUS, 0) != DDI_SUCCESS) 331 goto err_bad_devctl_node; 332 } 333 334 /* 335 * Create pcitool nodes for register access and interrupt 336 * routing. 337 */ 338 if (pcitool_init(dip) != DDI_SUCCESS) { 339 goto err_bad_pcitool_nodes; 340 } 341 ddi_report_dev(dip); 342 343 pci_p->pci_state = PCI_ATTACHED; 344 DEBUG0(DBG_ATTACH, dip, "attach success\n"); 345 break; 346 347 err_bad_pcitool_nodes: 348 if (pci_p->hotplug_capable == B_FALSE) 349 ddi_remove_minor_node(dip, "devctl"); 350 else 351 (void) pcihp_uninit(dip); 352 err_bad_devctl_node: 353 pci_obj_destroy(pci_p); 354 err_bad_objs: 355 unmap_pci_registers(pci_p); 356 err_bad_reg_prop: 357 free_pci_properties(pci_p); 358 err_bad_pci_prop: 359 mutex_destroy(&pci_p->pci_mutex); 360 free_pci_soft_state(instance); 361 err_bad_pci_softstate: 362 return (DDI_FAILURE); 363 364 case DDI_RESUME: 365 DEBUG0(DBG_ATTACH, dip, "DDI_RESUME\n"); 366 367 /* 368 * Make sure the Psycho control registers and IOMMU 369 * are configured properly. 370 */ 371 pci_p = get_pci_soft_state(instance); 372 mutex_enter(&pci_p->pci_mutex); 373 374 /* 375 * Make sure this instance has been suspended. 376 */ 377 if (pci_p->pci_state != PCI_SUSPENDED) { 378 DEBUG0(DBG_ATTACH, dip, "instance NOT suspended\n"); 379 mutex_exit(&pci_p->pci_mutex); 380 return (DDI_FAILURE); 381 } 382 pci_obj_resume(pci_p); 383 pci_p->pci_state = PCI_ATTACHED; 384 385 pci_child_cfg_restore(dip); 386 387 mutex_exit(&pci_p->pci_mutex); 388 break; 389 390 default: 391 DEBUG0(DBG_ATTACH, dip, "unsupported attach op\n"); 392 return (DDI_FAILURE); 393 } 394 395 return (DDI_SUCCESS); 396 } 397 398 /* 399 * detach entry point: 400 */ 401 static int 402 pci_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 403 { 404 int instance = ddi_get_instance(dip); 405 pci_t *pci_p = get_pci_soft_state(instance); 406 407 /* 408 * Make sure we are currently attached 409 */ 410 if (pci_p->pci_state != PCI_ATTACHED) { 411 DEBUG0(DBG_ATTACH, dip, "failed - instance not attached\n"); 412 return (DDI_FAILURE); 413 } 414 415 mutex_enter(&pci_p->pci_mutex); 416 417 switch (cmd) { 418 case DDI_DETACH: 419 DEBUG0(DBG_DETACH, dip, "DDI_DETACH\n"); 420 421 if (pci_p->hotplug_capable == B_TRUE) 422 if (pcihp_uninit(dip) == DDI_FAILURE) { 423 mutex_exit(&pci_p->pci_mutex); 424 return (DDI_FAILURE); 425 } 426 427 pcitool_uninit(dip); 428 429 pci_obj_destroy(pci_p); 430 431 /* 432 * Free the pci soft state structure and the rest of the 433 * resources it's using. 434 */ 435 free_pci_properties(pci_p); 436 unmap_pci_registers(pci_p); 437 mutex_exit(&pci_p->pci_mutex); 438 mutex_destroy(&pci_p->pci_mutex); 439 free_pci_soft_state(instance); 440 441 /* Free the interrupt-priorities prop if we created it. */ 442 { 443 int len; 444 445 if (ddi_getproplen(DDI_DEV_T_ANY, dip, 446 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 447 "interrupt-priorities", &len) == DDI_PROP_SUCCESS) 448 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, 449 "interrupt-priorities"); 450 } 451 return (DDI_SUCCESS); 452 453 case DDI_SUSPEND: 454 pci_child_cfg_save(dip); 455 pci_obj_suspend(pci_p); 456 pci_p->pci_state = PCI_SUSPENDED; 457 458 mutex_exit(&pci_p->pci_mutex); 459 return (DDI_SUCCESS); 460 461 default: 462 DEBUG0(DBG_DETACH, dip, "unsupported detach op\n"); 463 mutex_exit(&pci_p->pci_mutex); 464 return (DDI_FAILURE); 465 } 466 } 467 468 469 /* bus driver entry points */ 470 471 /* 472 * bus map entry point: 473 * 474 * if map request is for an rnumber 475 * get the corresponding regspec from device node 476 * build a new regspec in our parent's format 477 * build a new map_req with the new regspec 478 * call up the tree to complete the mapping 479 */ 480 int 481 pci_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp, 482 off_t off, off_t len, caddr_t *addrp) 483 { 484 pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip)); 485 struct regspec p_regspec; 486 ddi_map_req_t p_mapreq; 487 int reglen, rval, r_no; 488 pci_regspec_t reloc_reg, *rp = &reloc_reg; 489 490 DEBUG2(DBG_MAP, dip, "rdip=%s%d:", 491 ddi_driver_name(rdip), ddi_get_instance(rdip)); 492 493 if (mp->map_flags & DDI_MF_USER_MAPPING) 494 return (DDI_ME_UNIMPLEMENTED); 495 496 switch (mp->map_type) { 497 case DDI_MT_REGSPEC: 498 reloc_reg = *(pci_regspec_t *)mp->map_obj.rp; /* dup whole */ 499 break; 500 501 case DDI_MT_RNUMBER: 502 r_no = mp->map_obj.rnumber; 503 DEBUG1(DBG_MAP | DBG_CONT, dip, " r#=%x", r_no); 504 505 if (ddi_getlongprop(DDI_DEV_T_ANY, rdip, DDI_PROP_DONTPASS, 506 "reg", (caddr_t)&rp, ®len) != DDI_SUCCESS) 507 return (DDI_ME_RNUMBER_RANGE); 508 509 if (r_no < 0 || r_no >= reglen / sizeof (pci_regspec_t)) { 510 kmem_free(rp, reglen); 511 return (DDI_ME_RNUMBER_RANGE); 512 } 513 rp += r_no; 514 break; 515 516 default: 517 return (DDI_ME_INVAL); 518 } 519 DEBUG0(DBG_MAP | DBG_CONT, dip, "\n"); 520 521 /* use "assigned-addresses" to relocate regspec within pci space */ 522 if (rval = pci_reloc_reg(dip, rdip, pci_p, rp)) 523 goto done; 524 525 if (len) /* adjust regspec according to mapping request */ 526 rp->pci_size_low = len; 527 rp->pci_phys_low += off; 528 529 /* use "ranges" to translate relocated pci regspec into parent space */ 530 if (rval = pci_xlate_reg(pci_p, rp, &p_regspec)) 531 goto done; 532 533 p_mapreq = *mp; /* dup the whole structure */ 534 p_mapreq.map_type = DDI_MT_REGSPEC; 535 p_mapreq.map_obj.rp = &p_regspec; 536 rval = ddi_map(dip, &p_mapreq, 0, 0, addrp); 537 538 if (rval == DDI_SUCCESS) { 539 /* 540 * Set-up access functions for FM access error capable drivers. 541 * The axq workaround prevents fault management support 542 */ 543 if (DDI_FM_ACC_ERR_CAP(pci_p->pci_fm_cap) && 544 DDI_FM_ACC_ERR_CAP(ddi_fm_capable(rdip)) && 545 mp->map_handlep->ah_acc.devacc_attr_access != 546 DDI_DEFAULT_ACC) 547 pci_fm_acc_setup(mp, rdip); 548 pci_axq_setup(mp, pci_p->pci_pbm_p); 549 } 550 551 done: 552 if (mp->map_type == DDI_MT_RNUMBER) 553 kmem_free(rp - r_no, reglen); 554 555 return (rval); 556 } 557 558 /* 559 * bus dma map entry point 560 * return value: 561 * DDI_DMA_PARTIAL_MAP 1 562 * DDI_DMA_MAPOK 0 563 * DDI_DMA_MAPPED 0 564 * DDI_DMA_NORESOURCES -1 565 * DDI_DMA_NOMAPPING -2 566 * DDI_DMA_TOOBIG -3 567 */ 568 int 569 pci_dma_setup(dev_info_t *dip, dev_info_t *rdip, ddi_dma_req_t *dmareq, 570 ddi_dma_handle_t *handlep) 571 { 572 pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip)); 573 iommu_t *iommu_p = pci_p->pci_iommu_p; 574 ddi_dma_impl_t *mp; 575 int ret; 576 577 DEBUG3(DBG_DMA_MAP, dip, "mapping - rdip=%s%d type=%s\n", 578 ddi_driver_name(rdip), ddi_get_instance(rdip), 579 handlep ? "alloc" : "advisory"); 580 581 if (!(mp = pci_dma_lmts2hdl(dip, rdip, iommu_p, dmareq))) 582 return (DDI_DMA_NORESOURCES); 583 if (mp == (ddi_dma_impl_t *)DDI_DMA_NOMAPPING) 584 return (DDI_DMA_NOMAPPING); 585 if (ret = pci_dma_type(pci_p, dmareq, mp)) 586 goto freehandle; 587 if (ret = pci_dma_pfn(pci_p, dmareq, mp)) 588 goto freehandle; 589 590 switch (PCI_DMA_TYPE(mp)) { 591 case DMAI_FLAGS_DVMA: /* LINTED E_EQUALITY_NOT_ASSIGNMENT */ 592 if ((ret = pci_dvma_win(pci_p, dmareq, mp)) || !handlep) 593 goto freehandle; 594 if (!PCI_DMA_CANCACHE(mp)) { /* try fast track */ 595 if (PCI_DMA_CANFAST(mp)) { 596 if (!pci_dvma_map_fast(iommu_p, mp)) 597 break; 598 /* LINTED E_NOP_ELSE_STMT */ 599 } else { 600 PCI_DVMA_FASTTRAK_PROF(mp); 601 } 602 } 603 if (ret = pci_dvma_map(mp, dmareq, iommu_p)) 604 goto freehandle; 605 break; 606 case DMAI_FLAGS_PEER_TO_PEER: /* LINTED E_EQUALITY_NOT_ASSIGNMENT */ 607 if ((ret = pci_dma_physwin(pci_p, dmareq, mp)) || !handlep) 608 goto freehandle; 609 break; 610 case DMAI_FLAGS_BYPASS: 611 default: 612 panic("%s%d: pci_dma_setup: bad dma type 0x%x", 613 ddi_driver_name(rdip), ddi_get_instance(rdip), 614 PCI_DMA_TYPE(mp)); 615 /*NOTREACHED*/ 616 } 617 *handlep = (ddi_dma_handle_t)mp; 618 mp->dmai_flags |= (DMAI_FLAGS_INUSE | DMAI_FLAGS_MAPPED); 619 dump_dma_handle(DBG_DMA_MAP, dip, mp); 620 621 return ((mp->dmai_nwin == 1) ? DDI_DMA_MAPPED : DDI_DMA_PARTIAL_MAP); 622 freehandle: 623 if (ret == DDI_DMA_NORESOURCES) 624 pci_dma_freemp(mp); /* don't run_callback() */ 625 else 626 (void) pci_dma_freehdl(dip, rdip, (ddi_dma_handle_t)mp); 627 return (ret); 628 } 629 630 631 /* 632 * bus dma alloc handle entry point: 633 */ 634 int 635 pci_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attrp, 636 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep) 637 { 638 pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip)); 639 ddi_dma_impl_t *mp; 640 int rval; 641 642 DEBUG2(DBG_DMA_ALLOCH, dip, "rdip=%s%d\n", 643 ddi_driver_name(rdip), ddi_get_instance(rdip)); 644 645 if (attrp->dma_attr_version != DMA_ATTR_V0) 646 return (DDI_DMA_BADATTR); 647 648 if (!(mp = pci_dma_allocmp(dip, rdip, waitfp, arg))) 649 return (DDI_DMA_NORESOURCES); 650 651 /* 652 * Save requestor's information 653 */ 654 mp->dmai_attr = *attrp; /* whole object - augmented later */ 655 *DEV_ATTR(mp) = *attrp; /* whole object - device orig attr */ 656 DEBUG1(DBG_DMA_ALLOCH, dip, "mp=%p\n", mp); 657 658 /* check and convert dma attributes to handle parameters */ 659 if (rval = pci_dma_attr2hdl(pci_p, mp)) { 660 pci_dma_freehdl(dip, rdip, (ddi_dma_handle_t)mp); 661 *handlep = NULL; 662 return (rval); 663 } 664 *handlep = (ddi_dma_handle_t)mp; 665 return (DDI_SUCCESS); 666 } 667 668 669 /* 670 * bus dma free handle entry point: 671 */ 672 /*ARGSUSED*/ 673 int 674 pci_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle) 675 { 676 DEBUG3(DBG_DMA_FREEH, dip, "rdip=%s%d mp=%p\n", 677 ddi_driver_name(rdip), ddi_get_instance(rdip), handle); 678 pci_dma_freemp((ddi_dma_impl_t *)handle); 679 680 if (pci_kmem_clid) { 681 DEBUG0(DBG_DMA_FREEH, dip, "run handle callback\n"); 682 ddi_run_callback(&pci_kmem_clid); 683 } 684 return (DDI_SUCCESS); 685 } 686 687 688 /* 689 * bus dma bind handle entry point: 690 */ 691 int 692 pci_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip, 693 ddi_dma_handle_t handle, ddi_dma_req_t *dmareq, 694 ddi_dma_cookie_t *cookiep, uint_t *ccountp) 695 { 696 pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip)); 697 iommu_t *iommu_p = pci_p->pci_iommu_p; 698 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 699 int ret; 700 701 DEBUG4(DBG_DMA_BINDH, dip, "rdip=%s%d mp=%p dmareq=%p\n", 702 ddi_driver_name(rdip), ddi_get_instance(rdip), mp, dmareq); 703 704 if (mp->dmai_flags & DMAI_FLAGS_INUSE) 705 return (DDI_DMA_INUSE); 706 707 ASSERT((mp->dmai_flags & ~DMAI_FLAGS_PRESERVE) == 0); 708 mp->dmai_flags |= DMAI_FLAGS_INUSE; 709 710 if (ret = pci_dma_type(pci_p, dmareq, mp)) 711 goto err; 712 if (ret = pci_dma_pfn(pci_p, dmareq, mp)) 713 goto err; 714 715 switch (PCI_DMA_TYPE(mp)) { 716 case DMAI_FLAGS_DVMA: 717 if (ret = pci_dvma_win(pci_p, dmareq, mp)) 718 goto map_err; 719 if (!PCI_DMA_CANCACHE(mp)) { /* try fast track */ 720 if (PCI_DMA_CANFAST(mp)) { 721 if (!pci_dvma_map_fast(iommu_p, mp)) 722 goto mapped; /*LINTED E_NOP_ELSE_STMT*/ 723 } else { 724 PCI_DVMA_FASTTRAK_PROF(mp); 725 } 726 } 727 if (ret = pci_dvma_map(mp, dmareq, iommu_p)) 728 goto map_err; 729 mapped: 730 *ccountp = 1; 731 MAKE_DMA_COOKIE(cookiep, mp->dmai_mapping, mp->dmai_size); 732 break; 733 case DMAI_FLAGS_BYPASS: 734 case DMAI_FLAGS_PEER_TO_PEER: 735 if (ret = pci_dma_physwin(pci_p, dmareq, mp)) 736 goto map_err; 737 *ccountp = WINLST(mp)->win_ncookies; 738 *cookiep = *(ddi_dma_cookie_t *)(WINLST(mp) + 1); /* wholeobj */ 739 break; 740 default: 741 panic("%s%d: pci_dma_bindhdl(%p): bad dma type", 742 ddi_driver_name(rdip), ddi_get_instance(rdip), mp); 743 /*NOTREACHED*/ 744 } 745 DEBUG2(DBG_DMA_BINDH, dip, "cookie %x+%x\n", cookiep->dmac_address, 746 cookiep->dmac_size); 747 dump_dma_handle(DBG_DMA_MAP, dip, mp); 748 749 if (mp->dmai_attr.dma_attr_flags & DDI_DMA_FLAGERR) { 750 (void) ndi_fmc_insert(rdip, DMA_HANDLE, mp, NULL); 751 mp->dmai_error.err_cf = impl_dma_check; 752 } 753 754 mp->dmai_flags |= DMAI_FLAGS_MAPPED; 755 return (mp->dmai_nwin == 1 ? DDI_DMA_MAPPED : DDI_DMA_PARTIAL_MAP); 756 map_err: 757 pci_dvma_unregister_callbacks(pci_p, mp); 758 pci_dma_freepfn(mp); 759 err: 760 mp->dmai_flags &= DMAI_FLAGS_PRESERVE; 761 return (ret); 762 } 763 764 /* 765 * bus dma unbind handle entry point: 766 */ 767 /*ARGSUSED*/ 768 int 769 pci_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle) 770 { 771 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 772 pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip)); 773 iommu_t *iommu_p = pci_p->pci_iommu_p; 774 775 DEBUG3(DBG_DMA_UNBINDH, dip, "rdip=%s%d, mp=%p\n", 776 ddi_driver_name(rdip), ddi_get_instance(rdip), handle); 777 if ((mp->dmai_flags & DMAI_FLAGS_INUSE) == 0) { 778 DEBUG0(DBG_DMA_UNBINDH, dip, "handle not in use\n"); 779 return (DDI_FAILURE); 780 } 781 782 mp->dmai_flags &= ~DMAI_FLAGS_MAPPED; 783 784 switch (PCI_DMA_TYPE(mp)) { 785 case DMAI_FLAGS_DVMA: 786 pci_dvma_unregister_callbacks(pci_p, mp); 787 pci_dma_sync_unmap(dip, rdip, mp); 788 pci_dvma_unmap(iommu_p, mp); 789 pci_dma_freepfn(mp); 790 break; 791 case DMAI_FLAGS_BYPASS: 792 case DMAI_FLAGS_PEER_TO_PEER: 793 pci_dma_freewin(mp); 794 break; 795 default: 796 panic("%s%d: pci_dma_unbindhdl:bad dma type %p", 797 ddi_driver_name(rdip), ddi_get_instance(rdip), mp); 798 /*NOTREACHED*/ 799 } 800 if (iommu_p->iommu_dvma_clid != 0) { 801 DEBUG0(DBG_DMA_UNBINDH, dip, "run dvma callback\n"); 802 ddi_run_callback(&iommu_p->iommu_dvma_clid); 803 } 804 if (pci_kmem_clid) { 805 DEBUG0(DBG_DMA_UNBINDH, dip, "run handle callback\n"); 806 ddi_run_callback(&pci_kmem_clid); 807 } 808 mp->dmai_flags &= DMAI_FLAGS_PRESERVE; 809 SYNC_BUF_PA(mp) = 0; 810 811 if (mp->dmai_attr.dma_attr_flags & DDI_DMA_FLAGERR) { 812 if (DEVI(rdip)->devi_fmhdl != NULL && 813 DDI_FM_DMA_ERR_CAP(DEVI(rdip)->devi_fmhdl->fh_cap)) { 814 (void) ndi_fmc_remove(rdip, DMA_HANDLE, mp); 815 } 816 } 817 818 return (DDI_SUCCESS); 819 } 820 821 822 /* 823 * bus dma win entry point: 824 */ 825 int 826 pci_dma_win(dev_info_t *dip, dev_info_t *rdip, 827 ddi_dma_handle_t handle, uint_t win, off_t *offp, 828 size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp) 829 { 830 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 831 DEBUG2(DBG_DMA_WIN, dip, "rdip=%s%d\n", 832 ddi_driver_name(rdip), ddi_get_instance(rdip)); 833 dump_dma_handle(DBG_DMA_WIN, dip, mp); 834 if (win >= mp->dmai_nwin) { 835 DEBUG1(DBG_DMA_WIN, dip, "%x out of range\n", win); 836 return (DDI_FAILURE); 837 } 838 839 switch (PCI_DMA_TYPE(mp)) { 840 case DMAI_FLAGS_DVMA: 841 if (win != PCI_DMA_CURWIN(mp)) { 842 pci_t *pci_p = 843 get_pci_soft_state(ddi_get_instance(dip)); 844 pci_dma_sync_unmap(dip, rdip, mp); 845 /* map_window sets dmai_mapping/size/offset */ 846 iommu_map_window(pci_p->pci_iommu_p, mp, win); 847 } 848 if (cookiep) 849 MAKE_DMA_COOKIE(cookiep, mp->dmai_mapping, 850 mp->dmai_size); 851 if (ccountp) 852 *ccountp = 1; 853 break; 854 case DMAI_FLAGS_PEER_TO_PEER: 855 case DMAI_FLAGS_BYPASS: { 856 int i; 857 ddi_dma_cookie_t *ck_p; 858 pci_dma_win_t *win_p = mp->dmai_winlst; 859 860 for (i = 0; i < win; win_p = win_p->win_next, i++) 861 ; 862 ck_p = (ddi_dma_cookie_t *)(win_p + 1); 863 *cookiep = *ck_p; 864 mp->dmai_offset = win_p->win_offset; 865 mp->dmai_size = win_p->win_size; 866 mp->dmai_mapping = ck_p->dmac_laddress; 867 mp->dmai_cookie = ck_p + 1; 868 win_p->win_curseg = 0; 869 if (ccountp) 870 *ccountp = win_p->win_ncookies; 871 } 872 break; 873 default: 874 cmn_err(CE_WARN, "%s%d: pci_dma_win:bad dma type 0x%x", 875 ddi_driver_name(rdip), ddi_get_instance(rdip), 876 PCI_DMA_TYPE(mp)); 877 return (DDI_FAILURE); 878 } 879 if (cookiep) 880 DEBUG2(DBG_DMA_WIN, dip, 881 "cookie - dmac_address=%x dmac_size=%x\n", 882 cookiep->dmac_address, cookiep->dmac_size); 883 if (offp) 884 *offp = (off_t)mp->dmai_offset; 885 if (lenp) 886 *lenp = mp->dmai_size; 887 return (DDI_SUCCESS); 888 } 889 890 #ifdef DEBUG 891 static char *pci_dmactl_str[] = { 892 "DDI_DMA_FREE", 893 "DDI_DMA_SYNC", 894 "DDI_DMA_HTOC", 895 "DDI_DMA_KVADDR", 896 "DDI_DMA_MOVWIN", 897 "DDI_DMA_REPWIN", 898 "DDI_DMA_GETERR", 899 "DDI_DMA_COFF", 900 "DDI_DMA_NEXTWIN", 901 "DDI_DMA_NEXTSEG", 902 "DDI_DMA_SEGTOC", 903 "DDI_DMA_RESERVE", 904 "DDI_DMA_RELEASE", 905 "DDI_DMA_RESETH", 906 "DDI_DMA_CKSYNC", 907 "DDI_DMA_IOPB_ALLOC", 908 "DDI_DMA_IOPB_FREE", 909 "DDI_DMA_SMEM_ALLOC", 910 "DDI_DMA_SMEM_FREE", 911 "DDI_DMA_SET_SBUS64", 912 "DDI_DMA_REMAP" 913 }; 914 #endif 915 916 /* 917 * bus dma control entry point: 918 */ 919 int 920 pci_dma_ctlops(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle, 921 enum ddi_dma_ctlops cmd, off_t *offp, size_t *lenp, caddr_t *objp, 922 uint_t cache_flags) 923 { 924 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 925 DEBUG3(DBG_DMA_CTL, dip, "%s: rdip=%s%d\n", pci_dmactl_str[cmd], 926 ddi_driver_name(rdip), ddi_get_instance(rdip)); 927 928 switch (cmd) { 929 case DDI_DMA_FREE: 930 (void) pci_dma_unbindhdl(dip, rdip, handle); 931 (void) pci_dma_freehdl(dip, rdip, handle); 932 return (DDI_SUCCESS); 933 case DDI_DMA_RESERVE: { 934 pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip)); 935 return (pci_fdvma_reserve(dip, rdip, pci_p, 936 (ddi_dma_req_t *)offp, (ddi_dma_handle_t *)objp)); 937 } 938 case DDI_DMA_RELEASE: { 939 pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip)); 940 return (pci_fdvma_release(dip, pci_p, mp)); 941 } 942 default: 943 break; 944 } 945 946 switch (PCI_DMA_TYPE(mp)) { 947 case DMAI_FLAGS_DVMA: 948 return (pci_dvma_ctl(dip, rdip, mp, cmd, offp, lenp, objp, 949 cache_flags)); 950 case DMAI_FLAGS_PEER_TO_PEER: 951 case DMAI_FLAGS_BYPASS: 952 return (pci_dma_ctl(dip, rdip, mp, cmd, offp, lenp, objp, 953 cache_flags)); 954 default: 955 panic("%s%d: pci_dma_ctlops(%x):bad dma type %x", 956 ddi_driver_name(rdip), ddi_get_instance(rdip), cmd, 957 mp->dmai_flags); 958 /*NOTREACHED*/ 959 } 960 } 961 962 #ifdef DEBUG 963 int pci_peekfault_cnt = 0; 964 int pci_pokefault_cnt = 0; 965 #endif /* DEBUG */ 966 967 static int 968 pci_do_poke(pci_t *pci_p, peekpoke_ctlops_t *in_args) 969 { 970 pbm_t *pbm_p = pci_p->pci_pbm_p; 971 int err = DDI_SUCCESS; 972 on_trap_data_t otd; 973 974 mutex_enter(&pbm_p->pbm_pokefault_mutex); 975 pbm_p->pbm_ontrap_data = &otd; 976 977 /* Set up protected environment. */ 978 if (!on_trap(&otd, OT_DATA_ACCESS)) { 979 uintptr_t tramp = otd.ot_trampoline; 980 981 otd.ot_trampoline = (uintptr_t)&poke_fault; 982 err = do_poke(in_args->size, (void *)in_args->dev_addr, 983 (void *)in_args->host_addr); 984 otd.ot_trampoline = tramp; 985 } else 986 err = DDI_FAILURE; 987 988 /* 989 * Read the async fault register for the PBM to see it sees 990 * a master-abort. 991 */ 992 pbm_clear_error(pbm_p); 993 994 if (otd.ot_trap & OT_DATA_ACCESS) 995 err = DDI_FAILURE; 996 997 /* Take down protected environment. */ 998 no_trap(); 999 1000 pbm_p->pbm_ontrap_data = NULL; 1001 mutex_exit(&pbm_p->pbm_pokefault_mutex); 1002 1003 #ifdef DEBUG 1004 if (err == DDI_FAILURE) 1005 pci_pokefault_cnt++; 1006 #endif 1007 return (err); 1008 } 1009 1010 1011 static int 1012 pci_do_caut_put(pci_t *pci_p, peekpoke_ctlops_t *cautacc_ctlops_arg) 1013 { 1014 size_t size = cautacc_ctlops_arg->size; 1015 uintptr_t dev_addr = cautacc_ctlops_arg->dev_addr; 1016 uintptr_t host_addr = cautacc_ctlops_arg->host_addr; 1017 ddi_acc_impl_t *hp = (ddi_acc_impl_t *)cautacc_ctlops_arg->handle; 1018 size_t repcount = cautacc_ctlops_arg->repcount; 1019 uint_t flags = cautacc_ctlops_arg->flags; 1020 1021 hp->ahi_err->err_expected = DDI_FM_ERR_EXPECTED; 1022 1023 /* 1024 * Note that i_ndi_busop_access_enter ends up grabbing the pokefault 1025 * mutex. 1026 */ 1027 i_ndi_busop_access_enter(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp); 1028 1029 if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) { 1030 for (; repcount; repcount--) { 1031 switch (size) { 1032 1033 case sizeof (uint8_t): 1034 i_ddi_put8(hp, (uint8_t *)dev_addr, 1035 *(uint8_t *)host_addr); 1036 break; 1037 1038 case sizeof (uint16_t): 1039 i_ddi_put16(hp, (uint16_t *)dev_addr, 1040 *(uint16_t *)host_addr); 1041 break; 1042 1043 case sizeof (uint32_t): 1044 i_ddi_put32(hp, (uint32_t *)dev_addr, 1045 *(uint32_t *)host_addr); 1046 break; 1047 1048 case sizeof (uint64_t): 1049 i_ddi_put64(hp, (uint64_t *)dev_addr, 1050 *(uint64_t *)host_addr); 1051 break; 1052 } 1053 1054 host_addr += size; 1055 1056 if (flags == DDI_DEV_AUTOINCR) 1057 dev_addr += size; 1058 1059 } 1060 } 1061 1062 i_ddi_notrap((ddi_acc_handle_t)hp); 1063 i_ndi_busop_access_exit(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp); 1064 hp->ahi_err->err_expected = DDI_FM_ERR_UNEXPECTED; 1065 1066 if (hp->ahi_err->err_status != DDI_FM_OK) { 1067 /* Clear the expected fault from the handle before returning */ 1068 hp->ahi_err->err_status = DDI_FM_OK; 1069 return (DDI_FAILURE); 1070 } 1071 1072 return (DDI_SUCCESS); 1073 } 1074 1075 1076 static int 1077 pci_ctlops_poke(pci_t *pci_p, peekpoke_ctlops_t *in_args) 1078 { 1079 return (in_args->handle ? pci_do_caut_put(pci_p, in_args) : 1080 pci_do_poke(pci_p, in_args)); 1081 } 1082 1083 1084 static int 1085 pci_do_peek(pci_t *pci_p, peekpoke_ctlops_t *in_args) 1086 { 1087 int err = DDI_SUCCESS; 1088 on_trap_data_t otd; 1089 1090 if (!on_trap(&otd, OT_DATA_ACCESS)) { 1091 uintptr_t tramp = otd.ot_trampoline; 1092 1093 otd.ot_trampoline = (uintptr_t)&peek_fault; 1094 err = do_peek(in_args->size, (void *)in_args->dev_addr, 1095 (void *)in_args->host_addr); 1096 otd.ot_trampoline = tramp; 1097 } else 1098 err = DDI_FAILURE; 1099 1100 no_trap(); 1101 1102 #ifdef DEBUG 1103 if (err == DDI_FAILURE) 1104 pci_peekfault_cnt++; 1105 #endif 1106 return (err); 1107 } 1108 1109 static int 1110 pci_do_caut_get(pci_t *pci_p, peekpoke_ctlops_t *cautacc_ctlops_arg) 1111 { 1112 size_t size = cautacc_ctlops_arg->size; 1113 uintptr_t dev_addr = cautacc_ctlops_arg->dev_addr; 1114 uintptr_t host_addr = cautacc_ctlops_arg->host_addr; 1115 ddi_acc_impl_t *hp = (ddi_acc_impl_t *)cautacc_ctlops_arg->handle; 1116 size_t repcount = cautacc_ctlops_arg->repcount; 1117 uint_t flags = cautacc_ctlops_arg->flags; 1118 1119 int err = DDI_SUCCESS; 1120 1121 hp->ahi_err->err_expected = DDI_FM_ERR_EXPECTED; 1122 i_ndi_busop_access_enter(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp); 1123 1124 if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) { 1125 for (; repcount; repcount--) { 1126 i_ddi_caut_get(size, (void *)dev_addr, 1127 (void *)host_addr); 1128 1129 host_addr += size; 1130 1131 if (flags == DDI_DEV_AUTOINCR) 1132 dev_addr += size; 1133 } 1134 } else { 1135 int i; 1136 uint8_t *ff_addr = (uint8_t *)host_addr; 1137 for (i = 0; i < size; i++) 1138 *ff_addr++ = 0xff; 1139 1140 err = DDI_FAILURE; 1141 } 1142 1143 i_ddi_notrap((ddi_acc_handle_t)hp); 1144 i_ndi_busop_access_exit(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp); 1145 hp->ahi_err->err_expected = DDI_FM_ERR_UNEXPECTED; 1146 1147 return (err); 1148 } 1149 1150 1151 static int 1152 pci_ctlops_peek(pci_t *pci_p, peekpoke_ctlops_t *in_args, void *result) 1153 { 1154 result = (void *)in_args->host_addr; 1155 return (in_args->handle ? pci_do_caut_get(pci_p, in_args) : 1156 pci_do_peek(pci_p, in_args)); 1157 } 1158 1159 /* 1160 * get_reg_set_size 1161 * 1162 * Given a dev info pointer to a pci child and a register number, this 1163 * routine returns the size element of that reg set property. 1164 * return value: size of reg set on success, -1 on error 1165 */ 1166 static off_t 1167 get_reg_set_size(dev_info_t *child, int rnumber) 1168 { 1169 pci_regspec_t *pci_rp; 1170 off_t size; 1171 int i; 1172 1173 if (rnumber < 0) 1174 return (-1); 1175 1176 /* 1177 * Get the reg property for the device. 1178 */ 1179 if (ddi_getlongprop(DDI_DEV_T_ANY, child, DDI_PROP_DONTPASS, "reg", 1180 (caddr_t)&pci_rp, &i) != DDI_SUCCESS) 1181 return (-1); 1182 1183 if (rnumber >= (i / (int)sizeof (pci_regspec_t))) { 1184 kmem_free(pci_rp, i); 1185 return (-1); 1186 } 1187 1188 size = pci_rp[rnumber].pci_size_low | 1189 ((uint64_t)pci_rp[rnumber].pci_size_hi << 32); 1190 kmem_free(pci_rp, i); 1191 return (size); 1192 } 1193 1194 1195 /* 1196 * control ops entry point: 1197 * 1198 * Requests handled completely: 1199 * DDI_CTLOPS_INITCHILD see init_child() for details 1200 * DDI_CTLOPS_UNINITCHILD 1201 * DDI_CTLOPS_REPORTDEV see report_dev() for details 1202 * DDI_CTLOPS_IOMIN cache line size if streaming otherwise 1 1203 * DDI_CTLOPS_REGSIZE 1204 * DDI_CTLOPS_NREGS 1205 * DDI_CTLOPS_DVMAPAGESIZE 1206 * DDI_CTLOPS_POKE 1207 * DDI_CTLOPS_PEEK 1208 * DDI_CTLOPS_QUIESCE 1209 * DDI_CTLOPS_UNQUIESCE 1210 * 1211 * All others passed to parent. 1212 */ 1213 int 1214 pci_ctlops(dev_info_t *dip, dev_info_t *rdip, 1215 ddi_ctl_enum_t op, void *arg, void *result) 1216 { 1217 pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip)); 1218 1219 switch (op) { 1220 case DDI_CTLOPS_INITCHILD: 1221 return (init_child(pci_p, (dev_info_t *)arg)); 1222 1223 case DDI_CTLOPS_UNINITCHILD: 1224 return (uninit_child(pci_p, (dev_info_t *)arg)); 1225 1226 case DDI_CTLOPS_REPORTDEV: 1227 return (report_dev(rdip)); 1228 1229 case DDI_CTLOPS_IOMIN: 1230 1231 /* 1232 * If we are using the streaming cache, align at 1233 * least on a cache line boundary. Otherwise use 1234 * whatever alignment is passed in. 1235 */ 1236 1237 if ((uintptr_t)arg) { 1238 int val = *((int *)result); 1239 1240 val = maxbit(val, PCI_SBUF_LINE_SIZE); 1241 *((int *)result) = val; 1242 } 1243 return (DDI_SUCCESS); 1244 1245 case DDI_CTLOPS_REGSIZE: 1246 *((off_t *)result) = get_reg_set_size(rdip, *((int *)arg)); 1247 return (*((off_t *)result) == -1 ? DDI_FAILURE : DDI_SUCCESS); 1248 1249 case DDI_CTLOPS_NREGS: 1250 *((uint_t *)result) = get_nreg_set(rdip); 1251 return (DDI_SUCCESS); 1252 1253 case DDI_CTLOPS_DVMAPAGESIZE: 1254 *((ulong_t *)result) = IOMMU_PAGE_SIZE; 1255 return (DDI_SUCCESS); 1256 1257 case DDI_CTLOPS_POKE: 1258 return (pci_ctlops_poke(pci_p, (peekpoke_ctlops_t *)arg)); 1259 1260 case DDI_CTLOPS_PEEK: 1261 return (pci_ctlops_peek(pci_p, (peekpoke_ctlops_t *)arg, 1262 result)); 1263 1264 case DDI_CTLOPS_AFFINITY: 1265 break; 1266 1267 case DDI_CTLOPS_QUIESCE: 1268 return (pci_bus_quiesce(pci_p, rdip, result)); 1269 1270 case DDI_CTLOPS_UNQUIESCE: 1271 return (pci_bus_unquiesce(pci_p, rdip, result)); 1272 1273 default: 1274 break; 1275 } 1276 1277 /* 1278 * Now pass the request up to our parent. 1279 */ 1280 DEBUG2(DBG_CTLOPS, dip, "passing request to parent: rdip=%s%d\n", 1281 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1282 return (ddi_ctlops(dip, rdip, op, arg, result)); 1283 } 1284 1285 1286 /* ARGSUSED */ 1287 int 1288 pci_intr_ops(dev_info_t *dip, dev_info_t *rdip, ddi_intr_op_t intr_op, 1289 ddi_intr_handle_impl_t *hdlp, void *result) 1290 { 1291 pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip)); 1292 ib_ino_t ino; 1293 int ret = DDI_SUCCESS; 1294 1295 switch (intr_op) { 1296 case DDI_INTROP_GETCAP: 1297 /* GetCap will always fail for all non PCI devices */ 1298 (void) pci_intx_get_cap(rdip, (int *)result); 1299 break; 1300 case DDI_INTROP_SETCAP: 1301 ret = DDI_ENOTSUP; 1302 break; 1303 case DDI_INTROP_ALLOC: 1304 *(int *)result = hdlp->ih_scratch1; 1305 break; 1306 case DDI_INTROP_FREE: 1307 break; 1308 case DDI_INTROP_GETPRI: 1309 *(int *)result = hdlp->ih_pri ? 1310 hdlp->ih_pri : pci_class_to_pil(rdip); 1311 break; 1312 case DDI_INTROP_SETPRI: 1313 break; 1314 case DDI_INTROP_ADDISR: 1315 ret = pci_add_intr(dip, rdip, hdlp); 1316 break; 1317 case DDI_INTROP_REMISR: 1318 ret = pci_remove_intr(dip, rdip, hdlp); 1319 break; 1320 case DDI_INTROP_GETTARGET: 1321 ino = IB_MONDO_TO_INO(pci_xlate_intr(dip, rdip, 1322 pci_p->pci_ib_p, IB_MONDO_TO_INO(hdlp->ih_vector))); 1323 ret = ib_get_intr_target(pci_p, ino, (int *)result); 1324 break; 1325 case DDI_INTROP_SETTARGET: 1326 ret = DDI_ENOTSUP; 1327 break; 1328 case DDI_INTROP_ENABLE: 1329 ret = ib_update_intr_state(pci_p, rdip, hdlp, 1330 PCI_INTR_STATE_ENABLE); 1331 break; 1332 case DDI_INTROP_DISABLE: 1333 ret = ib_update_intr_state(pci_p, rdip, hdlp, 1334 PCI_INTR_STATE_DISABLE); 1335 break; 1336 case DDI_INTROP_SETMASK: 1337 ret = pci_intx_set_mask(rdip); 1338 break; 1339 case DDI_INTROP_CLRMASK: 1340 ret = pci_intx_clr_mask(rdip); 1341 break; 1342 case DDI_INTROP_GETPENDING: 1343 ret = pci_intx_get_pending(rdip, (int *)result); 1344 break; 1345 case DDI_INTROP_NINTRS: 1346 case DDI_INTROP_NAVAIL: 1347 *(int *)result = i_ddi_get_intx_nintrs(rdip); 1348 break; 1349 case DDI_INTROP_SUPPORTED_TYPES: 1350 /* PCI nexus driver supports only fixed interrupts */ 1351 *(int *)result = i_ddi_get_intx_nintrs(rdip) ? 1352 DDI_INTR_TYPE_FIXED : 0; 1353 break; 1354 default: 1355 ret = DDI_ENOTSUP; 1356 break; 1357 } 1358 1359 return (ret); 1360 } 1361 1362 static void 1363 pci_init_hotplug(struct pci *pci_p) 1364 { 1365 pci_bus_range_t bus_range; 1366 dev_info_t *dip; 1367 1368 /* 1369 * Before initializing hotplug - open up 1370 * bus range. The busra module will 1371 * initialize its pool of bus numbers from 1372 * this. "busra" will be the agent that keeps 1373 * track of them during hotplug. Also, note, 1374 * that busra will remove any bus numbers 1375 * already in use from boot time. 1376 */ 1377 bus_range.lo = 0x0; 1378 bus_range.hi = 0xff; 1379 dip = pci_p->pci_dip; 1380 pci_p->hotplug_capable = B_FALSE; 1381 1382 /* 1383 * If this property exists, this nexus has hot-plug 1384 * slots. 1385 */ 1386 if (ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1387 "hotplug-capable")) { 1388 if (ndi_prop_update_int_array(DDI_DEV_T_NONE, 1389 dip, "bus-range", 1390 (int *)&bus_range, 1391 2) != DDI_PROP_SUCCESS) { 1392 return; 1393 } 1394 1395 if (pcihp_init(dip) != DDI_SUCCESS) { 1396 return; 1397 } 1398 1399 if ((pcihp_ops = pcihp_get_cb_ops()) != NULL) { 1400 DEBUG2(DBG_ATTACH, dip, "%s%d hotplug enabled", 1401 ddi_driver_name(dip), ddi_get_instance(dip)); 1402 pci_p->hotplug_capable = B_TRUE; 1403 } 1404 } 1405 } 1406