1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * PCI nexus driver interface 31 */ 32 33 #include <sys/types.h> 34 #include <sys/conf.h> /* nulldev */ 35 #include <sys/stat.h> /* devctl */ 36 #include <sys/kmem.h> 37 #include <sys/async.h> /* ecc_flt for pci_ecc.h */ 38 #include <sys/sunddi.h> 39 #include <sys/sunndi.h> 40 #include <sys/ndifm.h> 41 #include <sys/ontrap.h> 42 #include <sys/ddi_impldefs.h> 43 #include <sys/ddi_subrdefs.h> 44 #include <sys/epm.h> 45 #include <sys/hotplug/pci/pcihp.h> 46 #include <sys/spl.h> 47 #include <sys/pci/pci_obj.h> 48 49 /*LINTLIBRARY*/ 50 51 /* 52 * function prototype for hotplug routine: 53 */ 54 static void 55 pci_init_hotplug(struct pci *); 56 57 /* 58 * function prototypes for dev ops routines: 59 */ 60 static int pci_attach(dev_info_t *dip, ddi_attach_cmd_t cmd); 61 static int pci_detach(dev_info_t *dip, ddi_detach_cmd_t cmd); 62 static int pci_info(dev_info_t *dip, ddi_info_cmd_t infocmd, 63 void *arg, void **result); 64 static int pci_ctlops_poke(pci_t *pci_p, peekpoke_ctlops_t *in_args); 65 static int pci_ctlops_peek(pci_t *pci_p, peekpoke_ctlops_t *in_args, 66 void *result); 67 static off_t get_reg_set_size(dev_info_t *child, int rnumber); 68 69 /* 70 * bus ops and dev ops structures: 71 */ 72 static struct bus_ops pci_bus_ops = { 73 BUSO_REV, 74 pci_map, 75 0, 76 0, 77 0, 78 i_ddi_map_fault, 79 pci_dma_setup, 80 pci_dma_allochdl, 81 pci_dma_freehdl, 82 pci_dma_bindhdl, 83 pci_dma_unbindhdl, 84 pci_dma_sync, 85 pci_dma_win, 86 pci_dma_ctlops, 87 pci_ctlops, 88 ddi_bus_prop_op, 89 ndi_busop_get_eventcookie, /* (*bus_get_eventcookie)(); */ 90 ndi_busop_add_eventcall, /* (*bus_add_eventcall)(); */ 91 ndi_busop_remove_eventcall, /* (*bus_remove_eventcall)(); */ 92 ndi_post_event, /* (*bus_post_event)(); */ 93 NULL, /* (*bus_intr_ctl)(); */ 94 NULL, /* (*bus_config)(); */ 95 NULL, /* (*bus_unconfig)(); */ 96 pci_fm_init_child, /* (*bus_fm_init)(); */ 97 NULL, /* (*bus_fm_fini)(); */ 98 pci_bus_enter, /* (*bus_fm_access_enter)(); */ 99 pci_bus_exit, /* (*bus_fm_access_fini)(); */ 100 NULL, /* (*bus_power)(); */ 101 pci_intr_ops /* (*bus_intr_op)(); */ 102 }; 103 104 extern struct cb_ops pci_cb_ops; 105 106 static struct dev_ops pci_ops = { 107 DEVO_REV, 108 0, 109 pci_info, 110 nulldev, 111 0, 112 pci_attach, 113 pci_detach, 114 nodev, 115 &pci_cb_ops, 116 &pci_bus_ops, 117 0 118 }; 119 120 /* 121 * module definitions: 122 */ 123 #include <sys/modctl.h> 124 extern struct mod_ops mod_driverops; 125 126 static struct modldrv modldrv = { 127 &mod_driverops, /* Type of module - driver */ 128 "PCI Bus nexus driver %I%", /* Name of module. */ 129 &pci_ops, /* driver ops */ 130 }; 131 132 static struct modlinkage modlinkage = { 133 MODREV_1, (void *)&modldrv, NULL 134 }; 135 136 /* 137 * driver global data: 138 */ 139 void *per_pci_state; /* per-pbm soft state pointer */ 140 void *per_pci_common_state; /* per-psycho soft state pointer */ 141 kmutex_t pci_global_mutex; /* attach/detach common struct lock */ 142 errorq_t *pci_ecc_queue = NULL; /* per-system ecc handling queue */ 143 errorq_t *pci_target_queue = NULL; /* per-system target handling queue */ 144 145 extern void pci_child_cfg_save(dev_info_t *dip); 146 extern void pci_child_cfg_restore(dev_info_t *dip); 147 148 int 149 _init(void) 150 { 151 int e; 152 153 /* 154 * Initialize per-pci bus soft state pointer. 155 */ 156 e = ddi_soft_state_init(&per_pci_state, sizeof (pci_t), 1); 157 if (e != 0) 158 return (e); 159 160 /* 161 * Initialize per-psycho soft state pointer. 162 */ 163 e = ddi_soft_state_init(&per_pci_common_state, 164 sizeof (pci_common_t), 1); 165 if (e != 0) { 166 ddi_soft_state_fini(&per_pci_state); 167 return (e); 168 } 169 170 /* 171 * Initialize global mutexes. 172 */ 173 mutex_init(&pci_global_mutex, NULL, MUTEX_DRIVER, NULL); 174 pci_reloc_init(); 175 176 /* 177 * Create the performance kstats. 178 */ 179 pci_kstat_init(); 180 181 /* 182 * Install the module. 183 */ 184 e = mod_install(&modlinkage); 185 if (e != 0) { 186 ddi_soft_state_fini(&per_pci_state); 187 ddi_soft_state_fini(&per_pci_common_state); 188 mutex_destroy(&pci_global_mutex); 189 } 190 return (e); 191 } 192 193 int 194 _fini(void) 195 { 196 int e; 197 198 /* 199 * Remove the module. 200 */ 201 e = mod_remove(&modlinkage); 202 if (e != 0) 203 return (e); 204 205 /* 206 * Destroy pci_ecc_queue, and set it to NULL. 207 */ 208 if (pci_ecc_queue) 209 errorq_destroy(pci_ecc_queue); 210 211 pci_ecc_queue = NULL; 212 213 /* 214 * Destroy pci_target_queue, and set it to NULL. 215 */ 216 if (pci_target_queue) 217 errorq_destroy(pci_target_queue); 218 219 pci_target_queue = NULL; 220 221 /* 222 * Destroy the performance kstats. 223 */ 224 pci_kstat_fini(); 225 226 /* 227 * Free the per-pci and per-psycho soft state info and destroy 228 * mutex for per-psycho soft state. 229 */ 230 ddi_soft_state_fini(&per_pci_state); 231 ddi_soft_state_fini(&per_pci_common_state); 232 mutex_destroy(&pci_global_mutex); 233 pci_reloc_fini(); 234 return (e); 235 } 236 237 int 238 _info(struct modinfo *modinfop) 239 { 240 return (mod_info(&modlinkage, modinfop)); 241 } 242 243 /*ARGSUSED*/ 244 static int 245 pci_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 246 { 247 int instance = PCIHP_AP_MINOR_NUM_TO_INSTANCE(getminor((dev_t)arg)); 248 pci_t *pci_p = get_pci_soft_state(instance); 249 250 /* allow hotplug to deal with ones it manages */ 251 if (pci_p && (pci_p->hotplug_capable == B_TRUE)) 252 return (pcihp_info(dip, infocmd, arg, result)); 253 254 /* non-hotplug or not attached */ 255 switch (infocmd) { 256 case DDI_INFO_DEVT2INSTANCE: 257 *result = (void *)instance; 258 return (DDI_SUCCESS); 259 260 case DDI_INFO_DEVT2DEVINFO: 261 if (pci_p == NULL) 262 return (DDI_FAILURE); 263 *result = (void *)pci_p->pci_dip; 264 return (DDI_SUCCESS); 265 266 default: 267 return (DDI_FAILURE); 268 } 269 } 270 271 272 /* device driver entry points */ 273 /* 274 * attach entry point: 275 */ 276 static int 277 pci_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 278 { 279 pci_t *pci_p; /* per bus state pointer */ 280 int instance = ddi_get_instance(dip); 281 282 switch (cmd) { 283 case DDI_ATTACH: 284 DEBUG0(DBG_ATTACH, dip, "DDI_ATTACH\n"); 285 286 /* 287 * Allocate and get the per-pci soft state structure. 288 */ 289 if (alloc_pci_soft_state(instance) != DDI_SUCCESS) { 290 cmn_err(CE_WARN, "%s%d: can't allocate pci state", 291 ddi_driver_name(dip), instance); 292 goto err_bad_pci_softstate; 293 } 294 pci_p = get_pci_soft_state(instance); 295 pci_p->pci_dip = dip; 296 mutex_init(&pci_p->pci_mutex, NULL, MUTEX_DRIVER, NULL); 297 pci_p->pci_soft_state = PCI_SOFT_STATE_CLOSED; 298 pci_p->pci_open_count = 0; 299 300 /* 301 * Get key properties of the pci bridge node and 302 * determine it's type (psycho, schizo, etc ...). 303 */ 304 if (get_pci_properties(pci_p, dip) == DDI_FAILURE) 305 goto err_bad_pci_prop; 306 307 /* 308 * Map in the registers. 309 */ 310 if (map_pci_registers(pci_p, dip) == DDI_FAILURE) 311 goto err_bad_reg_prop; 312 313 if (pci_obj_setup(pci_p) != DDI_SUCCESS) 314 goto err_bad_objs; 315 316 /* 317 * If this PCI leaf has hotplug and this platform 318 * loads hotplug modules then initialize the 319 * hotplug framework. 320 */ 321 pci_init_hotplug(pci_p); 322 323 /* 324 * Create the "devctl" node for hotplug support. 325 * For non-hotplug bus, we still need ":devctl" to 326 * support DEVCTL_DEVICE_* and DEVCTL_BUS_* ioctls. 327 */ 328 if (pci_p->hotplug_capable == B_FALSE) { 329 if (ddi_create_minor_node(dip, "devctl", S_IFCHR, 330 PCIHP_AP_MINOR_NUM(instance, PCIHP_DEVCTL_MINOR), 331 DDI_NT_NEXUS, 0) != DDI_SUCCESS) 332 goto err_bad_devctl_node; 333 } 334 335 /* 336 * Due to unresolved hardware issues, disable PCIPM until 337 * the problem is fully understood. 338 * 339 * pci_pwr_setup(pci_p, dip); 340 */ 341 342 ddi_report_dev(dip); 343 344 pci_p->pci_state = PCI_ATTACHED; 345 DEBUG0(DBG_ATTACH, dip, "attach success\n"); 346 break; 347 348 err_bad_objs: 349 ddi_remove_minor_node(dip, "devctl"); 350 err_bad_devctl_node: 351 unmap_pci_registers(pci_p); 352 err_bad_reg_prop: 353 free_pci_properties(pci_p); 354 err_bad_pci_prop: 355 mutex_destroy(&pci_p->pci_mutex); 356 free_pci_soft_state(instance); 357 err_bad_pci_softstate: 358 return (DDI_FAILURE); 359 360 case DDI_RESUME: 361 DEBUG0(DBG_ATTACH, dip, "DDI_RESUME\n"); 362 363 /* 364 * Make sure the Psycho control registers and IOMMU 365 * are configured properly. 366 */ 367 pci_p = get_pci_soft_state(instance); 368 mutex_enter(&pci_p->pci_mutex); 369 370 /* 371 * Make sure this instance has been suspended. 372 */ 373 if (pci_p->pci_state != PCI_SUSPENDED) { 374 DEBUG0(DBG_ATTACH, dip, "instance NOT suspended\n"); 375 mutex_exit(&pci_p->pci_mutex); 376 return (DDI_FAILURE); 377 } 378 pci_obj_resume(pci_p); 379 pci_p->pci_state = PCI_ATTACHED; 380 381 pci_child_cfg_restore(dip); 382 383 mutex_exit(&pci_p->pci_mutex); 384 break; 385 386 default: 387 DEBUG0(DBG_ATTACH, dip, "unsupported attach op\n"); 388 return (DDI_FAILURE); 389 } 390 391 return (DDI_SUCCESS); 392 } 393 394 /* 395 * detach entry point: 396 */ 397 static int 398 pci_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 399 { 400 int instance = ddi_get_instance(dip); 401 pci_t *pci_p = get_pci_soft_state(instance); 402 403 /* 404 * Make sure we are currently attached 405 */ 406 if (pci_p->pci_state != PCI_ATTACHED) { 407 DEBUG0(DBG_ATTACH, dip, "failed - instance not attached\n"); 408 return (DDI_FAILURE); 409 } 410 411 mutex_enter(&pci_p->pci_mutex); 412 413 switch (cmd) { 414 case DDI_DETACH: 415 DEBUG0(DBG_DETACH, dip, "DDI_DETACH\n"); 416 417 if (pci_p->hotplug_capable == B_TRUE) 418 if (pcihp_uninit(dip) == DDI_FAILURE) { 419 mutex_exit(&pci_p->pci_mutex); 420 return (DDI_FAILURE); 421 } 422 423 pci_obj_destroy(pci_p); 424 425 /* 426 * Free the pci soft state structure and the rest of the 427 * resources it's using. 428 */ 429 free_pci_properties(pci_p); 430 unmap_pci_registers(pci_p); 431 mutex_exit(&pci_p->pci_mutex); 432 mutex_destroy(&pci_p->pci_mutex); 433 free_pci_soft_state(instance); 434 435 /* Free the interrupt-priorities prop if we created it. */ { 436 int len; 437 438 if (ddi_getproplen(DDI_DEV_T_ANY, dip, 439 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 440 "interrupt-priorities", &len) == DDI_PROP_SUCCESS) 441 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, 442 "interrupt-priorities"); 443 } 444 return (DDI_SUCCESS); 445 446 case DDI_SUSPEND: 447 pci_child_cfg_save(dip); 448 pci_obj_suspend(pci_p); 449 pci_p->pci_state = PCI_SUSPENDED; 450 451 mutex_exit(&pci_p->pci_mutex); 452 return (DDI_SUCCESS); 453 454 default: 455 DEBUG0(DBG_DETACH, dip, "unsupported detach op\n"); 456 mutex_exit(&pci_p->pci_mutex); 457 return (DDI_FAILURE); 458 } 459 } 460 461 462 /* bus driver entry points */ 463 464 /* 465 * bus map entry point: 466 * 467 * if map request is for an rnumber 468 * get the corresponding regspec from device node 469 * build a new regspec in our parent's format 470 * build a new map_req with the new regspec 471 * call up the tree to complete the mapping 472 */ 473 int 474 pci_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp, 475 off_t off, off_t len, caddr_t *addrp) 476 { 477 pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip)); 478 struct regspec p_regspec; 479 ddi_map_req_t p_mapreq; 480 int reglen, rval, r_no; 481 pci_regspec_t reloc_reg, *rp = &reloc_reg; 482 483 DEBUG2(DBG_MAP, dip, "rdip=%s%d:", 484 ddi_driver_name(rdip), ddi_get_instance(rdip)); 485 486 if (mp->map_flags & DDI_MF_USER_MAPPING) 487 return (DDI_ME_UNIMPLEMENTED); 488 489 switch (mp->map_type) { 490 case DDI_MT_REGSPEC: 491 reloc_reg = *(pci_regspec_t *)mp->map_obj.rp; /* dup whole */ 492 break; 493 494 case DDI_MT_RNUMBER: 495 r_no = mp->map_obj.rnumber; 496 DEBUG1(DBG_MAP | DBG_CONT, dip, " r#=%x", r_no); 497 498 if (ddi_getlongprop(DDI_DEV_T_NONE, rdip, DDI_PROP_DONTPASS, 499 "reg", (caddr_t)&rp, ®len) != DDI_SUCCESS) 500 return (DDI_ME_RNUMBER_RANGE); 501 502 if (r_no < 0 || r_no >= reglen / sizeof (pci_regspec_t)) { 503 kmem_free(rp, reglen); 504 return (DDI_ME_RNUMBER_RANGE); 505 } 506 rp += r_no; 507 break; 508 509 default: 510 return (DDI_ME_INVAL); 511 } 512 DEBUG0(DBG_MAP | DBG_CONT, dip, "\n"); 513 514 /* use "assigned-addresses" to relocate regspec within pci space */ 515 if (rval = pci_reloc_reg(dip, rdip, pci_p, rp)) 516 goto done; 517 518 if (len) /* adjust regspec according to mapping request */ 519 rp->pci_size_low = len; 520 rp->pci_phys_low += off; 521 522 /* use "ranges" to translate relocated pci regspec into parent space */ 523 if (rval = pci_xlate_reg(pci_p, rp, &p_regspec)) 524 goto done; 525 526 p_mapreq = *mp; /* dup the whole structure */ 527 p_mapreq.map_type = DDI_MT_REGSPEC; 528 p_mapreq.map_obj.rp = &p_regspec; 529 rval = ddi_map(dip, &p_mapreq, 0, 0, addrp); 530 531 if (rval == DDI_SUCCESS) { 532 /* 533 * Set-up access functions for FM access error capable drivers. 534 * The axq workaround prevents fault management support 535 */ 536 if (DDI_FM_ACC_ERR_CAP(pci_p->pci_fm_cap) && 537 DDI_FM_ACC_ERR_CAP(ddi_fm_capable(rdip)) && 538 mp->map_handlep->ah_acc.devacc_attr_access != 539 DDI_DEFAULT_ACC) 540 pci_fm_acc_setup(mp, rdip); 541 pci_axq_setup(mp, pci_p->pci_pbm_p); 542 } 543 544 done: 545 if (mp->map_type == DDI_MT_RNUMBER) 546 kmem_free(rp - r_no, reglen); 547 548 return (rval); 549 } 550 551 /* 552 * bus dma map entry point 553 * return value: 554 * DDI_DMA_PARTIAL_MAP 1 555 * DDI_DMA_MAPOK 0 556 * DDI_DMA_MAPPED 0 557 * DDI_DMA_NORESOURCES -1 558 * DDI_DMA_NOMAPPING -2 559 * DDI_DMA_TOOBIG -3 560 */ 561 int 562 pci_dma_setup(dev_info_t *dip, dev_info_t *rdip, ddi_dma_req_t *dmareq, 563 ddi_dma_handle_t *handlep) 564 { 565 pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip)); 566 iommu_t *iommu_p = pci_p->pci_iommu_p; 567 ddi_dma_impl_t *mp; 568 int ret; 569 570 DEBUG3(DBG_DMA_MAP, dip, "mapping - rdip=%s%d type=%s\n", 571 ddi_driver_name(rdip), ddi_get_instance(rdip), 572 handlep ? "alloc" : "advisory"); 573 574 if (!(mp = pci_dma_lmts2hdl(dip, rdip, iommu_p, dmareq))) 575 return (DDI_DMA_NORESOURCES); 576 if (mp == (ddi_dma_impl_t *)DDI_DMA_NOMAPPING) 577 return (DDI_DMA_NOMAPPING); 578 if (ret = pci_dma_type(pci_p, dmareq, mp)) 579 goto freehandle; 580 if (ret = pci_dma_pfn(pci_p, dmareq, mp)) 581 goto freehandle; 582 583 switch (PCI_DMA_TYPE(mp)) { 584 case DMAI_FLAGS_DVMA: /* LINTED E_EQUALITY_NOT_ASSIGNMENT */ 585 if ((ret = pci_dvma_win(pci_p, dmareq, mp)) || !handlep) 586 goto freehandle; 587 if (!PCI_DMA_CANCACHE(mp)) { /* try fast track */ 588 if (PCI_DMA_CANFAST(mp)) { 589 if (!pci_dvma_map_fast(iommu_p, mp)) 590 break; 591 /* LINTED E_NOP_ELSE_STMT */ 592 } else { 593 PCI_DVMA_FASTTRAK_PROF(mp); 594 } 595 } 596 if (ret = pci_dvma_map(mp, dmareq, iommu_p)) 597 goto freehandle; 598 break; 599 case DMAI_FLAGS_PEER_TO_PEER: /* LINTED E_EQUALITY_NOT_ASSIGNMENT */ 600 if ((ret = pci_dma_physwin(pci_p, dmareq, mp)) || !handlep) 601 goto freehandle; 602 break; 603 case DMAI_FLAGS_BYPASS: 604 default: 605 panic("%s%d: pci_dma_setup: bad dma type 0x%x", 606 ddi_driver_name(rdip), ddi_get_instance(rdip), 607 PCI_DMA_TYPE(mp)); 608 /*NOTREACHED*/ 609 } 610 *handlep = (ddi_dma_handle_t)mp; 611 mp->dmai_flags |= (DMAI_FLAGS_INUSE | DMAI_FLAGS_MAPPED); 612 dump_dma_handle(DBG_DMA_MAP, dip, mp); 613 614 return ((mp->dmai_nwin == 1) ? DDI_DMA_MAPPED : DDI_DMA_PARTIAL_MAP); 615 freehandle: 616 if (ret == DDI_DMA_NORESOURCES) 617 pci_dma_freemp(mp); /* don't run_callback() */ 618 else 619 (void) pci_dma_freehdl(dip, rdip, (ddi_dma_handle_t)mp); 620 return (ret); 621 } 622 623 624 /* 625 * bus dma alloc handle entry point: 626 */ 627 int 628 pci_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attrp, 629 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep) 630 { 631 pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip)); 632 ddi_dma_impl_t *mp; 633 int rval; 634 635 DEBUG2(DBG_DMA_ALLOCH, dip, "rdip=%s%d\n", 636 ddi_driver_name(rdip), ddi_get_instance(rdip)); 637 638 if (attrp->dma_attr_version != DMA_ATTR_V0) 639 return (DDI_DMA_BADATTR); 640 641 if (!(mp = pci_dma_allocmp(dip, rdip, waitfp, arg))) 642 return (DDI_DMA_NORESOURCES); 643 644 /* 645 * Save requestor's information 646 */ 647 mp->dmai_attr = *attrp; /* whole object - augmented later */ 648 *DEV_ATTR(mp) = *attrp; /* whole object - device orig attr */ 649 DEBUG1(DBG_DMA_ALLOCH, dip, "mp=%p\n", mp); 650 651 /* check and convert dma attributes to handle parameters */ 652 if (rval = pci_dma_attr2hdl(pci_p, mp)) { 653 pci_dma_freehdl(dip, rdip, (ddi_dma_handle_t)mp); 654 *handlep = NULL; 655 return (rval); 656 } 657 *handlep = (ddi_dma_handle_t)mp; 658 return (DDI_SUCCESS); 659 } 660 661 662 /* 663 * bus dma free handle entry point: 664 */ 665 /*ARGSUSED*/ 666 int 667 pci_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle) 668 { 669 DEBUG3(DBG_DMA_FREEH, dip, "rdip=%s%d mp=%p\n", 670 ddi_driver_name(rdip), ddi_get_instance(rdip), handle); 671 pci_dma_freemp((ddi_dma_impl_t *)handle); 672 673 if (pci_kmem_clid) { 674 DEBUG0(DBG_DMA_FREEH, dip, "run handle callback\n"); 675 ddi_run_callback(&pci_kmem_clid); 676 } 677 return (DDI_SUCCESS); 678 } 679 680 681 /* 682 * bus dma bind handle entry point: 683 */ 684 int 685 pci_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip, 686 ddi_dma_handle_t handle, ddi_dma_req_t *dmareq, 687 ddi_dma_cookie_t *cookiep, uint_t *ccountp) 688 { 689 pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip)); 690 iommu_t *iommu_p = pci_p->pci_iommu_p; 691 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 692 int ret; 693 694 DEBUG4(DBG_DMA_BINDH, dip, "rdip=%s%d mp=%p dmareq=%p\n", 695 ddi_driver_name(rdip), ddi_get_instance(rdip), mp, dmareq); 696 697 if (mp->dmai_flags & DMAI_FLAGS_INUSE) 698 return (DDI_DMA_INUSE); 699 700 ASSERT((mp->dmai_flags & ~DMAI_FLAGS_PRESERVE) == 0); 701 mp->dmai_flags |= DMAI_FLAGS_INUSE; 702 703 if (ret = pci_dma_type(pci_p, dmareq, mp)) 704 goto err; 705 if (ret = pci_dma_pfn(pci_p, dmareq, mp)) 706 goto err; 707 708 switch (PCI_DMA_TYPE(mp)) { 709 case DMAI_FLAGS_DVMA: 710 if (ret = pci_dvma_win(pci_p, dmareq, mp)) 711 goto map_err; 712 if (!PCI_DMA_CANCACHE(mp)) { /* try fast track */ 713 if (PCI_DMA_CANFAST(mp)) { 714 if (!pci_dvma_map_fast(iommu_p, mp)) 715 goto mapped; /*LINTED E_NOP_ELSE_STMT*/ 716 } else { 717 PCI_DVMA_FASTTRAK_PROF(mp); 718 } 719 } 720 if (ret = pci_dvma_map(mp, dmareq, iommu_p)) 721 goto map_err; 722 mapped: 723 *ccountp = 1; 724 MAKE_DMA_COOKIE(cookiep, mp->dmai_mapping, mp->dmai_size); 725 break; 726 case DMAI_FLAGS_BYPASS: 727 case DMAI_FLAGS_PEER_TO_PEER: 728 if (ret = pci_dma_physwin(pci_p, dmareq, mp)) 729 goto map_err; 730 *ccountp = WINLST(mp)->win_ncookies; 731 *cookiep = *(ddi_dma_cookie_t *)(WINLST(mp) + 1); /* wholeobj */ 732 break; 733 default: 734 panic("%s%d: pci_dma_bindhdl(%p): bad dma type", 735 ddi_driver_name(rdip), ddi_get_instance(rdip), mp); 736 /*NOTREACHED*/ 737 } 738 DEBUG2(DBG_DMA_BINDH, dip, "cookie %x+%x\n", cookiep->dmac_address, 739 cookiep->dmac_size); 740 dump_dma_handle(DBG_DMA_MAP, dip, mp); 741 742 if (mp->dmai_attr.dma_attr_flags & DDI_DMA_FLAGERR) 743 (void) ndi_fmc_insert(rdip, DMA_HANDLE, mp, NULL); 744 745 mp->dmai_flags |= DMAI_FLAGS_MAPPED; 746 return (mp->dmai_nwin == 1 ? DDI_DMA_MAPPED : DDI_DMA_PARTIAL_MAP); 747 map_err: 748 pci_dvma_unregister_callbacks(pci_p, mp); 749 pci_dma_freepfn(mp); 750 err: 751 mp->dmai_flags &= DMAI_FLAGS_PRESERVE; 752 return (ret); 753 } 754 755 /* 756 * bus dma unbind handle entry point: 757 */ 758 /*ARGSUSED*/ 759 int 760 pci_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle) 761 { 762 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 763 pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip)); 764 iommu_t *iommu_p = pci_p->pci_iommu_p; 765 766 DEBUG3(DBG_DMA_UNBINDH, dip, "rdip=%s%d, mp=%p\n", 767 ddi_driver_name(rdip), ddi_get_instance(rdip), handle); 768 if ((mp->dmai_flags & DMAI_FLAGS_INUSE) == 0) { 769 DEBUG0(DBG_DMA_UNBINDH, dip, "handle not in use\n"); 770 return (DDI_FAILURE); 771 } 772 773 mp->dmai_flags &= ~DMAI_FLAGS_MAPPED; 774 775 switch (PCI_DMA_TYPE(mp)) { 776 case DMAI_FLAGS_DVMA: 777 pci_dvma_unregister_callbacks(pci_p, mp); 778 pci_dma_sync_unmap(dip, rdip, mp); 779 pci_dvma_unmap(iommu_p, mp); 780 pci_dma_freepfn(mp); 781 break; 782 case DMAI_FLAGS_BYPASS: 783 case DMAI_FLAGS_PEER_TO_PEER: 784 pci_dma_freewin(mp); 785 break; 786 default: 787 panic("%s%d: pci_dma_unbindhdl:bad dma type %p", 788 ddi_driver_name(rdip), ddi_get_instance(rdip), mp); 789 /*NOTREACHED*/ 790 } 791 if (iommu_p->iommu_dvma_clid != 0) { 792 DEBUG0(DBG_DMA_UNBINDH, dip, "run dvma callback\n"); 793 ddi_run_callback(&iommu_p->iommu_dvma_clid); 794 } 795 if (pci_kmem_clid) { 796 DEBUG0(DBG_DMA_UNBINDH, dip, "run handle callback\n"); 797 ddi_run_callback(&pci_kmem_clid); 798 } 799 mp->dmai_flags &= DMAI_FLAGS_PRESERVE; 800 SYNC_BUF_PA(mp) = 0; 801 802 if (mp->dmai_attr.dma_attr_flags & DDI_DMA_FLAGERR) { 803 if (DEVI(rdip)->devi_fmhdl != NULL && 804 DDI_FM_DMA_ERR_CAP(DEVI(rdip)->devi_fmhdl->fh_cap)) { 805 (void) ndi_fmc_remove(rdip, DMA_HANDLE, mp); 806 } 807 } 808 809 return (DDI_SUCCESS); 810 } 811 812 813 /* 814 * bus dma win entry point: 815 */ 816 int 817 pci_dma_win(dev_info_t *dip, dev_info_t *rdip, 818 ddi_dma_handle_t handle, uint_t win, off_t *offp, 819 size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp) 820 { 821 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 822 DEBUG2(DBG_DMA_WIN, dip, "rdip=%s%d\n", 823 ddi_driver_name(rdip), ddi_get_instance(rdip)); 824 dump_dma_handle(DBG_DMA_WIN, dip, mp); 825 if (win >= mp->dmai_nwin) { 826 DEBUG1(DBG_DMA_WIN, dip, "%x out of range\n", win); 827 return (DDI_FAILURE); 828 } 829 830 switch (PCI_DMA_TYPE(mp)) { 831 case DMAI_FLAGS_DVMA: 832 if (win != PCI_DMA_CURWIN(mp)) { 833 pci_t *pci_p = 834 get_pci_soft_state(ddi_get_instance(dip)); 835 pci_dma_sync_unmap(dip, rdip, mp); 836 /* map_window sets dmai_mapping/size/offset */ 837 iommu_map_window(pci_p->pci_iommu_p, mp, win); 838 } 839 if (cookiep) 840 MAKE_DMA_COOKIE(cookiep, mp->dmai_mapping, 841 mp->dmai_size); 842 if (ccountp) 843 *ccountp = 1; 844 break; 845 case DMAI_FLAGS_PEER_TO_PEER: 846 case DMAI_FLAGS_BYPASS: { 847 int i; 848 ddi_dma_cookie_t *ck_p; 849 pci_dma_win_t *win_p = mp->dmai_winlst; 850 851 for (i = 0; i < win; win_p = win_p->win_next, i++); 852 ck_p = (ddi_dma_cookie_t *)(win_p + 1); 853 *cookiep = *ck_p; 854 mp->dmai_offset = win_p->win_offset; 855 mp->dmai_size = win_p->win_size; 856 mp->dmai_mapping = ck_p->dmac_laddress; 857 mp->dmai_cookie = ck_p + 1; 858 win_p->win_curseg = 0; 859 if (ccountp) 860 *ccountp = win_p->win_ncookies; 861 } 862 break; 863 default: 864 cmn_err(CE_WARN, "%s%d: pci_dma_win:bad dma type 0x%x", 865 ddi_driver_name(rdip), ddi_get_instance(rdip), 866 PCI_DMA_TYPE(mp)); 867 return (DDI_FAILURE); 868 } 869 if (cookiep) 870 DEBUG2(DBG_DMA_WIN, dip, 871 "cookie - dmac_address=%x dmac_size=%x\n", 872 cookiep->dmac_address, cookiep->dmac_size); 873 if (offp) 874 *offp = (off_t)mp->dmai_offset; 875 if (lenp) 876 *lenp = mp->dmai_size; 877 return (DDI_SUCCESS); 878 } 879 880 #ifdef DEBUG 881 static char *pci_dmactl_str[] = { 882 "DDI_DMA_FREE", 883 "DDI_DMA_SYNC", 884 "DDI_DMA_HTOC", 885 "DDI_DMA_KVADDR", 886 "DDI_DMA_MOVWIN", 887 "DDI_DMA_REPWIN", 888 "DDI_DMA_GETERR", 889 "DDI_DMA_COFF", 890 "DDI_DMA_NEXTWIN", 891 "DDI_DMA_NEXTSEG", 892 "DDI_DMA_SEGTOC", 893 "DDI_DMA_RESERVE", 894 "DDI_DMA_RELEASE", 895 "DDI_DMA_RESETH", 896 "DDI_DMA_CKSYNC", 897 "DDI_DMA_IOPB_ALLOC", 898 "DDI_DMA_IOPB_FREE", 899 "DDI_DMA_SMEM_ALLOC", 900 "DDI_DMA_SMEM_FREE", 901 "DDI_DMA_SET_SBUS64", 902 "DDI_DMA_REMAP" 903 }; 904 #endif 905 906 /* 907 * bus dma control entry point: 908 */ 909 int 910 pci_dma_ctlops(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle, 911 enum ddi_dma_ctlops cmd, off_t *offp, size_t *lenp, caddr_t *objp, 912 uint_t cache_flags) 913 { 914 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 915 DEBUG3(DBG_DMA_CTL, dip, "%s: rdip=%s%d\n", pci_dmactl_str[cmd], 916 ddi_driver_name(rdip), ddi_get_instance(rdip)); 917 918 switch (cmd) { 919 case DDI_DMA_FREE: 920 (void) pci_dma_unbindhdl(dip, rdip, handle); 921 (void) pci_dma_freehdl(dip, rdip, handle); 922 return (DDI_SUCCESS); 923 case DDI_DMA_RESERVE: { 924 pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip)); 925 return (pci_fdvma_reserve(dip, rdip, pci_p, 926 (ddi_dma_req_t *)offp, (ddi_dma_handle_t *)objp)); 927 } 928 case DDI_DMA_RELEASE: { 929 pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip)); 930 return (pci_fdvma_release(dip, pci_p, mp)); 931 } 932 default: 933 break; 934 } 935 936 switch (PCI_DMA_TYPE(mp)) { 937 case DMAI_FLAGS_DVMA: 938 return (pci_dvma_ctl(dip, rdip, mp, cmd, offp, lenp, objp, 939 cache_flags)); 940 case DMAI_FLAGS_PEER_TO_PEER: 941 case DMAI_FLAGS_BYPASS: 942 return (pci_dma_ctl(dip, rdip, mp, cmd, offp, lenp, objp, 943 cache_flags)); 944 default: 945 panic("%s%d: pci_dma_ctlops(%x):bad dma type %x", 946 ddi_driver_name(rdip), ddi_get_instance(rdip), cmd, 947 mp->dmai_flags); 948 /*NOTREACHED*/ 949 } 950 } 951 952 #ifdef DEBUG 953 int pci_peekfault_cnt = 0; 954 int pci_pokefault_cnt = 0; 955 #endif /* DEBUG */ 956 957 static int 958 pci_do_poke(pci_t *pci_p, peekpoke_ctlops_t *in_args) 959 { 960 pbm_t *pbm_p = pci_p->pci_pbm_p; 961 int err = DDI_SUCCESS; 962 on_trap_data_t otd; 963 964 mutex_enter(&pbm_p->pbm_pokefault_mutex); 965 pbm_p->pbm_ontrap_data = &otd; 966 967 /* Set up protected environment. */ 968 if (!on_trap(&otd, OT_DATA_ACCESS)) { 969 uintptr_t tramp = otd.ot_trampoline; 970 971 otd.ot_trampoline = (uintptr_t)&poke_fault; 972 err = do_poke(in_args->size, (void *)in_args->dev_addr, 973 (void *)in_args->host_addr); 974 otd.ot_trampoline = tramp; 975 } else 976 err = DDI_FAILURE; 977 978 /* 979 * Read the async fault register for the PBM to see it sees 980 * a master-abort. 981 */ 982 pbm_clear_error(pbm_p); 983 984 if (otd.ot_trap & OT_DATA_ACCESS) 985 err = DDI_FAILURE; 986 987 /* Take down protected environment. */ 988 no_trap(); 989 990 pbm_p->pbm_ontrap_data = NULL; 991 mutex_exit(&pbm_p->pbm_pokefault_mutex); 992 993 #ifdef DEBUG 994 if (err == DDI_FAILURE) 995 pci_pokefault_cnt++; 996 #endif 997 return (err); 998 } 999 1000 1001 static int 1002 pci_do_caut_put(pci_t *pci_p, peekpoke_ctlops_t *cautacc_ctlops_arg) 1003 { 1004 size_t size = cautacc_ctlops_arg->size; 1005 uintptr_t dev_addr = cautacc_ctlops_arg->dev_addr; 1006 uintptr_t host_addr = cautacc_ctlops_arg->host_addr; 1007 ddi_acc_impl_t *hp = (ddi_acc_impl_t *)cautacc_ctlops_arg->handle; 1008 size_t repcount = cautacc_ctlops_arg->repcount; 1009 uint_t flags = cautacc_ctlops_arg->flags; 1010 1011 pbm_t *pbm_p = pci_p->pci_pbm_p; 1012 int err = DDI_SUCCESS; 1013 1014 /* Use ontrap data in handle set up by FMA */ 1015 pbm_p->pbm_ontrap_data = (on_trap_data_t *)hp->ahi_err->err_ontrap; 1016 1017 hp->ahi_err->err_expected = DDI_FM_ERR_EXPECTED; 1018 1019 /* 1020 * Note that i_ndi_busop_access_enter ends up grabbing the pokefault 1021 * mutex. 1022 */ 1023 i_ndi_busop_access_enter(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp); 1024 1025 if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) { 1026 for (; repcount; repcount--) { 1027 switch (size) { 1028 1029 case sizeof (uint8_t): 1030 i_ddi_put8(hp, (uint8_t *)dev_addr, 1031 *(uint8_t *)host_addr); 1032 break; 1033 1034 case sizeof (uint16_t): 1035 i_ddi_put16(hp, (uint16_t *)dev_addr, 1036 *(uint16_t *)host_addr); 1037 break; 1038 1039 case sizeof (uint32_t): 1040 i_ddi_put32(hp, (uint32_t *)dev_addr, 1041 *(uint32_t *)host_addr); 1042 break; 1043 1044 case sizeof (uint64_t): 1045 i_ddi_put64(hp, (uint64_t *)dev_addr, 1046 *(uint64_t *)host_addr); 1047 break; 1048 } 1049 1050 host_addr += size; 1051 1052 if (flags == DDI_DEV_AUTOINCR) 1053 dev_addr += size; 1054 1055 /* 1056 * Read the async fault register for the PBM to see if 1057 * it sees a master-abort. 1058 */ 1059 pbm_clear_error(pbm_p); 1060 1061 if (pbm_p->pbm_ontrap_data->ot_trap & OT_DATA_ACCESS) { 1062 err = DDI_FAILURE; 1063 #ifdef DEBUG 1064 pci_pokefault_cnt++; 1065 #endif 1066 break; 1067 } 1068 } 1069 } 1070 1071 i_ddi_notrap((ddi_acc_handle_t)hp); 1072 pbm_p->pbm_ontrap_data = NULL; 1073 i_ndi_busop_access_exit(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp); 1074 hp->ahi_err->err_expected = DDI_FM_ERR_UNEXPECTED; 1075 1076 return (err); 1077 } 1078 1079 1080 static int 1081 pci_ctlops_poke(pci_t *pci_p, peekpoke_ctlops_t *in_args) 1082 { 1083 return (in_args->handle ? pci_do_caut_put(pci_p, in_args) : 1084 pci_do_poke(pci_p, in_args)); 1085 } 1086 1087 1088 static int 1089 pci_do_peek(pci_t *pci_p, peekpoke_ctlops_t *in_args) 1090 { 1091 int err = DDI_SUCCESS; 1092 on_trap_data_t otd; 1093 1094 if (!on_trap(&otd, OT_DATA_ACCESS)) { 1095 uintptr_t tramp = otd.ot_trampoline; 1096 1097 otd.ot_trampoline = (uintptr_t)&peek_fault; 1098 err = do_peek(in_args->size, (void *)in_args->dev_addr, 1099 (void *)in_args->host_addr); 1100 otd.ot_trampoline = tramp; 1101 } else 1102 err = DDI_FAILURE; 1103 1104 no_trap(); 1105 1106 #ifdef DEBUG 1107 if (err == DDI_FAILURE) 1108 pci_peekfault_cnt++; 1109 #endif 1110 return (err); 1111 } 1112 1113 static int 1114 pci_do_caut_get(pci_t *pci_p, peekpoke_ctlops_t *cautacc_ctlops_arg) 1115 { 1116 size_t size = cautacc_ctlops_arg->size; 1117 uintptr_t dev_addr = cautacc_ctlops_arg->dev_addr; 1118 uintptr_t host_addr = cautacc_ctlops_arg->host_addr; 1119 ddi_acc_impl_t *hp = (ddi_acc_impl_t *)cautacc_ctlops_arg->handle; 1120 size_t repcount = cautacc_ctlops_arg->repcount; 1121 uint_t flags = cautacc_ctlops_arg->flags; 1122 1123 pbm_t *pbm_p = pci_p->pci_pbm_p; 1124 int err = DDI_SUCCESS; 1125 1126 hp->ahi_err->err_expected = DDI_FM_ERR_EXPECTED; 1127 i_ndi_busop_access_enter(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp); 1128 1129 /* Can this code be optimized? */ 1130 1131 if (repcount == 1) { 1132 if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) { 1133 i_ddi_caut_get(size, (void *)dev_addr, 1134 (void *)host_addr); 1135 } else { 1136 int i; 1137 uint8_t *ff_addr = (uint8_t *)host_addr; 1138 for (i = 0; i < size; i++) 1139 *ff_addr++ = 0xff; 1140 1141 err = DDI_FAILURE; 1142 #ifdef DEBUG 1143 pci_peekfault_cnt++; 1144 #endif 1145 } 1146 } else { 1147 if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) { 1148 for (; repcount; repcount--) { 1149 i_ddi_caut_get(size, (void *)dev_addr, 1150 (void *)host_addr); 1151 1152 host_addr += size; 1153 1154 if (flags == DDI_DEV_AUTOINCR) 1155 dev_addr += size; 1156 } 1157 } else { 1158 err = DDI_FAILURE; 1159 #ifdef DEBUG 1160 pci_peekfault_cnt++; 1161 #endif 1162 } 1163 } 1164 1165 i_ddi_notrap((ddi_acc_handle_t)hp); 1166 pbm_p->pbm_ontrap_data = NULL; 1167 i_ndi_busop_access_exit(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp); 1168 hp->ahi_err->err_expected = DDI_FM_ERR_UNEXPECTED; 1169 1170 return (err); 1171 } 1172 1173 1174 static int 1175 pci_ctlops_peek(pci_t *pci_p, peekpoke_ctlops_t *in_args, void *result) 1176 { 1177 result = (void *)in_args->host_addr; 1178 return (in_args->handle ? pci_do_caut_get(pci_p, in_args) : 1179 pci_do_peek(pci_p, in_args)); 1180 } 1181 1182 /* 1183 * get_reg_set_size 1184 * 1185 * Given a dev info pointer to a pci child and a register number, this 1186 * routine returns the size element of that reg set property. 1187 * return value: size of reg set on success, -1 on error 1188 */ 1189 static off_t 1190 get_reg_set_size(dev_info_t *child, int rnumber) 1191 { 1192 pci_regspec_t *pci_rp; 1193 off_t size; 1194 int i; 1195 1196 if (rnumber < 0) 1197 return (-1); 1198 1199 /* 1200 * Get the reg property for the device. 1201 */ 1202 if (ddi_getlongprop(DDI_DEV_T_ANY, child, DDI_PROP_DONTPASS, "reg", 1203 (caddr_t)&pci_rp, &i) != DDI_SUCCESS) 1204 return (-1); 1205 1206 if (rnumber >= (i / (int)sizeof (pci_regspec_t))) { 1207 kmem_free(pci_rp, i); 1208 return (-1); 1209 } 1210 1211 size = pci_rp[rnumber].pci_size_low | 1212 ((uint64_t)pci_rp[rnumber].pci_size_hi << 32); 1213 kmem_free(pci_rp, i); 1214 return (size); 1215 } 1216 1217 1218 /* 1219 * control ops entry point: 1220 * 1221 * Requests handled completely: 1222 * DDI_CTLOPS_INITCHILD see init_child() for details 1223 * DDI_CTLOPS_UNINITCHILD 1224 * DDI_CTLOPS_REPORTDEV see report_dev() for details 1225 * DDI_CTLOPS_XLATE_INTRS nothing to do 1226 * DDI_CTLOPS_IOMIN cache line size if streaming otherwise 1 1227 * DDI_CTLOPS_REGSIZE 1228 * DDI_CTLOPS_NREGS 1229 * DDI_CTLOPS_NINTRS 1230 * DDI_CTLOPS_DVMAPAGESIZE 1231 * DDI_CTLOPS_POKE 1232 * DDI_CTLOPS_PEEK 1233 * DDI_CTLOPS_QUIESCE 1234 * DDI_CTLOPS_UNQUIESCE 1235 * 1236 * All others passed to parent. 1237 */ 1238 int 1239 pci_ctlops(dev_info_t *dip, dev_info_t *rdip, 1240 ddi_ctl_enum_t op, void *arg, void *result) 1241 { 1242 pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip)); 1243 1244 switch (op) { 1245 case DDI_CTLOPS_INITCHILD: 1246 return (init_child(pci_p, (dev_info_t *)arg)); 1247 1248 case DDI_CTLOPS_UNINITCHILD: 1249 return (uninit_child(pci_p, (dev_info_t *)arg)); 1250 1251 case DDI_CTLOPS_REPORTDEV: 1252 return (report_dev(rdip)); 1253 1254 case DDI_CTLOPS_IOMIN: 1255 1256 /* 1257 * If we are using the streaming cache, align at 1258 * least on a cache line boundary. Otherwise use 1259 * whatever alignment is passed in. 1260 */ 1261 1262 if ((int)arg) { 1263 int val = *((int *)result); 1264 1265 val = maxbit(val, PCI_SBUF_LINE_SIZE); 1266 *((int *)result) = val; 1267 } 1268 return (DDI_SUCCESS); 1269 1270 case DDI_CTLOPS_REGSIZE: 1271 *((off_t *)result) = get_reg_set_size(rdip, *((int *)arg)); 1272 return (*((off_t *)result) == -1 ? DDI_FAILURE : DDI_SUCCESS); 1273 1274 case DDI_CTLOPS_NREGS: 1275 *((uint_t *)result) = get_nreg_set(rdip); 1276 return (DDI_SUCCESS); 1277 1278 case DDI_CTLOPS_DVMAPAGESIZE: 1279 *((ulong_t *)result) = IOMMU_PAGE_SIZE; 1280 return (DDI_SUCCESS); 1281 1282 case DDI_CTLOPS_POKE: 1283 return (pci_ctlops_poke(pci_p, (peekpoke_ctlops_t *)arg)); 1284 1285 case DDI_CTLOPS_PEEK: 1286 return (pci_ctlops_peek(pci_p, (peekpoke_ctlops_t *)arg, 1287 result)); 1288 1289 case DDI_CTLOPS_AFFINITY: 1290 break; 1291 1292 case DDI_CTLOPS_QUIESCE: 1293 return (pci_bus_quiesce(pci_p, rdip, result)); 1294 1295 case DDI_CTLOPS_UNQUIESCE: 1296 return (pci_bus_unquiesce(pci_p, rdip, result)); 1297 1298 default: 1299 break; 1300 } 1301 1302 /* 1303 * Now pass the request up to our parent. 1304 */ 1305 DEBUG2(DBG_CTLOPS, dip, "passing request to parent: rdip=%s%d\n", 1306 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1307 return (ddi_ctlops(dip, rdip, op, arg, result)); 1308 } 1309 1310 1311 /* ARGSUSED */ 1312 int 1313 pci_intr_ops(dev_info_t *dip, dev_info_t *rdip, ddi_intr_op_t intr_op, 1314 ddi_intr_handle_impl_t *hdlp, void *result) 1315 { 1316 pci_t *pci_p = get_pci_soft_state( 1317 ddi_get_instance(dip)); 1318 ddi_ispec_t *ip = (ddi_ispec_t *)hdlp->ih_private; 1319 int ret = DDI_SUCCESS; 1320 1321 switch (intr_op) { 1322 case DDI_INTROP_GETCAP: 1323 /* GetCap will always fail for all non PCI devices */ 1324 (void) pci_intx_get_cap(rdip, (int *)result); 1325 break; 1326 case DDI_INTROP_SETCAP: 1327 ret = DDI_ENOTSUP; 1328 break; 1329 case DDI_INTROP_ALLOC: 1330 *(int *)result = hdlp->ih_scratch1; 1331 break; 1332 case DDI_INTROP_FREE: 1333 break; 1334 case DDI_INTROP_GETPRI: 1335 *(int *)result = ip->is_pil ? 1336 ip->is_pil : pci_class_to_pil(rdip); 1337 break; 1338 case DDI_INTROP_SETPRI: 1339 ip->is_pil = (*(int *)result); 1340 break; 1341 case DDI_INTROP_ADDISR: 1342 hdlp->ih_vector = *ip->is_intr; 1343 1344 ret = pci_add_intr(dip, rdip, hdlp); 1345 break; 1346 case DDI_INTROP_REMISR: 1347 hdlp->ih_vector = *ip->is_intr; 1348 1349 ret = pci_remove_intr(dip, rdip, hdlp); 1350 break; 1351 case DDI_INTROP_ENABLE: 1352 ret = ib_update_intr_state(pci_p, rdip, hdlp, 1353 PCI_INTR_STATE_ENABLE); 1354 break; 1355 case DDI_INTROP_DISABLE: 1356 ret = ib_update_intr_state(pci_p, rdip, hdlp, 1357 PCI_INTR_STATE_DISABLE); 1358 break; 1359 case DDI_INTROP_SETMASK: 1360 ret = pci_intx_set_mask(rdip); 1361 break; 1362 case DDI_INTROP_CLRMASK: 1363 ret = pci_intx_clr_mask(rdip); 1364 break; 1365 case DDI_INTROP_GETPENDING: 1366 ret = pci_intx_get_pending(rdip, (int *)result); 1367 break; 1368 case DDI_INTROP_NINTRS: 1369 case DDI_INTROP_NAVAIL: 1370 *(int *)result = i_ddi_get_nintrs(rdip); 1371 break; 1372 case DDI_INTROP_SUPPORTED_TYPES: 1373 /* PCI nexus driver supports only fixed interrupts */ 1374 *(int *)result = i_ddi_get_nintrs(rdip) ? 1375 DDI_INTR_TYPE_FIXED : 0; 1376 break; 1377 default: 1378 ret = DDI_ENOTSUP; 1379 break; 1380 } 1381 1382 return (ret); 1383 } 1384 1385 static void 1386 pci_init_hotplug(struct pci *pci_p) 1387 { 1388 pci_bus_range_t bus_range; 1389 dev_info_t *dip; 1390 struct cb_ops *ops; 1391 1392 /* 1393 * Before initializing hotplug - open up 1394 * bus range. The busra module will 1395 * initialize its pool of bus numbers from 1396 * this. "busra" will be the agent that keeps 1397 * track of them during hotplug. Also, note, 1398 * that busra will remove any bus numbers 1399 * already in use from boot time. 1400 */ 1401 bus_range.lo = 0x0; 1402 bus_range.hi = 0xff; 1403 dip = pci_p->pci_dip; 1404 pci_p->hotplug_capable = B_FALSE; 1405 1406 /* 1407 * If this property exists, this nexus has hot-plug 1408 * slots. 1409 */ 1410 if (ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1411 "hotplug-capable")) { 1412 if (ndi_prop_update_int_array(DDI_DEV_T_NONE, 1413 dip, "bus-range", 1414 (int *)&bus_range, 1415 2) != DDI_PROP_SUCCESS) { 1416 return; 1417 } 1418 1419 if (pcihp_init(dip) != DDI_SUCCESS) { 1420 return; 1421 } 1422 1423 if (ops = pcihp_get_cb_ops()) { 1424 pci_ops.devo_cb_ops = ops; 1425 DEBUG2(DBG_ATTACH, dip, "%s%d hotplug enabled", 1426 ddi_driver_name(dip), ddi_get_instance(dip)); 1427 } else { 1428 return; 1429 } 1430 1431 pci_p->hotplug_capable = B_TRUE; 1432 } 1433 } 1434