1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * PCI nexus driver interface 31 */ 32 33 #include <sys/types.h> 34 #include <sys/conf.h> /* nulldev */ 35 #include <sys/stat.h> /* devctl */ 36 #include <sys/kmem.h> 37 #include <sys/async.h> /* ecc_flt for pci_ecc.h */ 38 #include <sys/sunddi.h> 39 #include <sys/sunndi.h> 40 #include <sys/ndifm.h> 41 #include <sys/ontrap.h> 42 #include <sys/ddi_impldefs.h> 43 #include <sys/ddi_subrdefs.h> 44 #include <sys/epm.h> 45 #include <sys/hotplug/pci/pcihp.h> 46 #include <sys/pci_tools_var.h> 47 #include <sys/spl.h> 48 #include <sys/pci/pci_obj.h> 49 50 /*LINTLIBRARY*/ 51 52 /* 53 * function prototype for hotplug routine: 54 */ 55 static void 56 pci_init_hotplug(struct pci *); 57 58 /* 59 * function prototypes for dev ops routines: 60 */ 61 static int pci_attach(dev_info_t *dip, ddi_attach_cmd_t cmd); 62 static int pci_detach(dev_info_t *dip, ddi_detach_cmd_t cmd); 63 static int pci_info(dev_info_t *dip, ddi_info_cmd_t infocmd, 64 void *arg, void **result); 65 static int pci_ctlops_poke(pci_t *pci_p, peekpoke_ctlops_t *in_args); 66 static int pci_ctlops_peek(pci_t *pci_p, peekpoke_ctlops_t *in_args, 67 void *result); 68 static off_t get_reg_set_size(dev_info_t *child, int rnumber); 69 70 /* 71 * bus ops and dev ops structures: 72 */ 73 static struct bus_ops pci_bus_ops = { 74 BUSO_REV, 75 pci_map, 76 0, 77 0, 78 0, 79 i_ddi_map_fault, 80 pci_dma_setup, 81 pci_dma_allochdl, 82 pci_dma_freehdl, 83 pci_dma_bindhdl, 84 pci_dma_unbindhdl, 85 pci_dma_sync, 86 pci_dma_win, 87 pci_dma_ctlops, 88 pci_ctlops, 89 ddi_bus_prop_op, 90 ndi_busop_get_eventcookie, /* (*bus_get_eventcookie)(); */ 91 ndi_busop_add_eventcall, /* (*bus_add_eventcall)(); */ 92 ndi_busop_remove_eventcall, /* (*bus_remove_eventcall)(); */ 93 ndi_post_event, /* (*bus_post_event)(); */ 94 NULL, /* (*bus_intr_ctl)(); */ 95 NULL, /* (*bus_config)(); */ 96 NULL, /* (*bus_unconfig)(); */ 97 pci_fm_init_child, /* (*bus_fm_init)(); */ 98 NULL, /* (*bus_fm_fini)(); */ 99 pci_bus_enter, /* (*bus_fm_access_enter)(); */ 100 pci_bus_exit, /* (*bus_fm_access_fini)(); */ 101 NULL, /* (*bus_power)(); */ 102 pci_intr_ops /* (*bus_intr_op)(); */ 103 }; 104 105 extern struct cb_ops pci_cb_ops; 106 107 static struct dev_ops pci_ops = { 108 DEVO_REV, 109 0, 110 pci_info, 111 nulldev, 112 0, 113 pci_attach, 114 pci_detach, 115 nodev, 116 &pci_cb_ops, 117 &pci_bus_ops, 118 0 119 }; 120 121 /* 122 * module definitions: 123 */ 124 #include <sys/modctl.h> 125 extern struct mod_ops mod_driverops; 126 127 static struct modldrv modldrv = { 128 &mod_driverops, /* Type of module - driver */ 129 "PCI Bus nexus driver %I%", /* Name of module. */ 130 &pci_ops, /* driver ops */ 131 }; 132 133 static struct modlinkage modlinkage = { 134 MODREV_1, (void *)&modldrv, NULL 135 }; 136 137 /* 138 * driver global data: 139 */ 140 void *per_pci_state; /* per-pbm soft state pointer */ 141 void *per_pci_common_state; /* per-psycho soft state pointer */ 142 kmutex_t pci_global_mutex; /* attach/detach common struct lock */ 143 errorq_t *pci_ecc_queue = NULL; /* per-system ecc handling queue */ 144 errorq_t *pci_target_queue = NULL; /* per-system target handling queue */ 145 146 extern void pci_child_cfg_save(dev_info_t *dip); 147 extern void pci_child_cfg_restore(dev_info_t *dip); 148 149 int 150 _init(void) 151 { 152 int e; 153 154 /* 155 * Initialize per-pci bus soft state pointer. 156 */ 157 e = ddi_soft_state_init(&per_pci_state, sizeof (pci_t), 1); 158 if (e != 0) 159 return (e); 160 161 /* 162 * Initialize per-psycho soft state pointer. 163 */ 164 e = ddi_soft_state_init(&per_pci_common_state, 165 sizeof (pci_common_t), 1); 166 if (e != 0) { 167 ddi_soft_state_fini(&per_pci_state); 168 return (e); 169 } 170 171 /* 172 * Initialize global mutexes. 173 */ 174 mutex_init(&pci_global_mutex, NULL, MUTEX_DRIVER, NULL); 175 pci_reloc_init(); 176 177 /* 178 * Create the performance kstats. 179 */ 180 pci_kstat_init(); 181 182 /* 183 * Install the module. 184 */ 185 e = mod_install(&modlinkage); 186 if (e != 0) { 187 ddi_soft_state_fini(&per_pci_state); 188 ddi_soft_state_fini(&per_pci_common_state); 189 mutex_destroy(&pci_global_mutex); 190 } 191 return (e); 192 } 193 194 int 195 _fini(void) 196 { 197 int e; 198 199 /* 200 * Remove the module. 201 */ 202 e = mod_remove(&modlinkage); 203 if (e != 0) 204 return (e); 205 206 /* 207 * Destroy pci_ecc_queue, and set it to NULL. 208 */ 209 if (pci_ecc_queue) 210 errorq_destroy(pci_ecc_queue); 211 212 pci_ecc_queue = NULL; 213 214 /* 215 * Destroy pci_target_queue, and set it to NULL. 216 */ 217 if (pci_target_queue) 218 errorq_destroy(pci_target_queue); 219 220 pci_target_queue = NULL; 221 222 /* 223 * Destroy the performance kstats. 224 */ 225 pci_kstat_fini(); 226 227 /* 228 * Free the per-pci and per-psycho soft state info and destroy 229 * mutex for per-psycho soft state. 230 */ 231 ddi_soft_state_fini(&per_pci_state); 232 ddi_soft_state_fini(&per_pci_common_state); 233 mutex_destroy(&pci_global_mutex); 234 pci_reloc_fini(); 235 return (e); 236 } 237 238 int 239 _info(struct modinfo *modinfop) 240 { 241 return (mod_info(&modlinkage, modinfop)); 242 } 243 244 /*ARGSUSED*/ 245 static int 246 pci_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 247 { 248 int instance = PCIHP_AP_MINOR_NUM_TO_INSTANCE(getminor((dev_t)arg)); 249 pci_t *pci_p = get_pci_soft_state(instance); 250 251 /* allow hotplug to deal with ones it manages */ 252 if (pci_p && (pci_p->hotplug_capable == B_TRUE)) 253 return (pcihp_info(dip, infocmd, arg, result)); 254 255 /* non-hotplug or not attached */ 256 switch (infocmd) { 257 case DDI_INFO_DEVT2INSTANCE: 258 *result = (void *)instance; 259 return (DDI_SUCCESS); 260 261 case DDI_INFO_DEVT2DEVINFO: 262 if (pci_p == NULL) 263 return (DDI_FAILURE); 264 *result = (void *)pci_p->pci_dip; 265 return (DDI_SUCCESS); 266 267 default: 268 return (DDI_FAILURE); 269 } 270 } 271 272 273 /* device driver entry points */ 274 /* 275 * attach entry point: 276 */ 277 static int 278 pci_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 279 { 280 pci_t *pci_p; /* per bus state pointer */ 281 int instance = ddi_get_instance(dip); 282 283 switch (cmd) { 284 case DDI_ATTACH: 285 DEBUG0(DBG_ATTACH, dip, "DDI_ATTACH\n"); 286 287 /* 288 * Allocate and get the per-pci soft state structure. 289 */ 290 if (alloc_pci_soft_state(instance) != DDI_SUCCESS) { 291 cmn_err(CE_WARN, "%s%d: can't allocate pci state", 292 ddi_driver_name(dip), instance); 293 goto err_bad_pci_softstate; 294 } 295 pci_p = get_pci_soft_state(instance); 296 pci_p->pci_dip = dip; 297 mutex_init(&pci_p->pci_mutex, NULL, MUTEX_DRIVER, NULL); 298 pci_p->pci_soft_state = PCI_SOFT_STATE_CLOSED; 299 pci_p->pci_open_count = 0; 300 301 /* 302 * Get key properties of the pci bridge node and 303 * determine it's type (psycho, schizo, etc ...). 304 */ 305 if (get_pci_properties(pci_p, dip) == DDI_FAILURE) 306 goto err_bad_pci_prop; 307 308 /* 309 * Map in the registers. 310 */ 311 if (map_pci_registers(pci_p, dip) == DDI_FAILURE) 312 goto err_bad_reg_prop; 313 314 if (pci_obj_setup(pci_p) != DDI_SUCCESS) 315 goto err_bad_objs; 316 317 /* 318 * If this PCI leaf has hotplug and this platform 319 * loads hotplug modules then initialize the 320 * hotplug framework. 321 */ 322 pci_init_hotplug(pci_p); 323 324 /* 325 * Create the "devctl" node for hotplug support. 326 * For non-hotplug bus, we still need ":devctl" to 327 * support DEVCTL_DEVICE_* and DEVCTL_BUS_* ioctls. 328 */ 329 if (pci_p->hotplug_capable == B_FALSE) { 330 if (ddi_create_minor_node(dip, "devctl", S_IFCHR, 331 PCIHP_AP_MINOR_NUM(instance, PCIHP_DEVCTL_MINOR), 332 DDI_NT_NEXUS, 0) != DDI_SUCCESS) 333 goto err_bad_devctl_node; 334 } 335 336 /* 337 * Create pcitool nodes for register access and interrupt 338 * routing. 339 */ 340 if (pcitool_init(dip) != DDI_SUCCESS) { 341 goto err_bad_pcitool_nodes; 342 } 343 344 /* 345 * Due to unresolved hardware issues, disable PCIPM until 346 * the problem is fully understood. 347 * 348 * pci_pwr_setup(pci_p, dip); 349 */ 350 351 ddi_report_dev(dip); 352 353 pci_p->pci_state = PCI_ATTACHED; 354 DEBUG0(DBG_ATTACH, dip, "attach success\n"); 355 break; 356 357 err_bad_pcitool_nodes: 358 if (pci_p->hotplug_capable == B_FALSE) 359 ddi_remove_minor_node(dip, "devctl"); 360 else 361 (void) pcihp_uninit(dip); 362 err_bad_devctl_node: 363 pci_obj_destroy(pci_p); 364 err_bad_objs: 365 unmap_pci_registers(pci_p); 366 err_bad_reg_prop: 367 free_pci_properties(pci_p); 368 err_bad_pci_prop: 369 mutex_destroy(&pci_p->pci_mutex); 370 free_pci_soft_state(instance); 371 err_bad_pci_softstate: 372 return (DDI_FAILURE); 373 374 case DDI_RESUME: 375 DEBUG0(DBG_ATTACH, dip, "DDI_RESUME\n"); 376 377 /* 378 * Make sure the Psycho control registers and IOMMU 379 * are configured properly. 380 */ 381 pci_p = get_pci_soft_state(instance); 382 mutex_enter(&pci_p->pci_mutex); 383 384 /* 385 * Make sure this instance has been suspended. 386 */ 387 if (pci_p->pci_state != PCI_SUSPENDED) { 388 DEBUG0(DBG_ATTACH, dip, "instance NOT suspended\n"); 389 mutex_exit(&pci_p->pci_mutex); 390 return (DDI_FAILURE); 391 } 392 pci_obj_resume(pci_p); 393 pci_p->pci_state = PCI_ATTACHED; 394 395 pci_child_cfg_restore(dip); 396 397 mutex_exit(&pci_p->pci_mutex); 398 break; 399 400 default: 401 DEBUG0(DBG_ATTACH, dip, "unsupported attach op\n"); 402 return (DDI_FAILURE); 403 } 404 405 return (DDI_SUCCESS); 406 } 407 408 /* 409 * detach entry point: 410 */ 411 static int 412 pci_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 413 { 414 int instance = ddi_get_instance(dip); 415 pci_t *pci_p = get_pci_soft_state(instance); 416 417 /* 418 * Make sure we are currently attached 419 */ 420 if (pci_p->pci_state != PCI_ATTACHED) { 421 DEBUG0(DBG_ATTACH, dip, "failed - instance not attached\n"); 422 return (DDI_FAILURE); 423 } 424 425 mutex_enter(&pci_p->pci_mutex); 426 427 switch (cmd) { 428 case DDI_DETACH: 429 DEBUG0(DBG_DETACH, dip, "DDI_DETACH\n"); 430 431 if (pci_p->hotplug_capable == B_TRUE) 432 if (pcihp_uninit(dip) == DDI_FAILURE) { 433 mutex_exit(&pci_p->pci_mutex); 434 return (DDI_FAILURE); 435 } 436 437 pcitool_uninit(dip); 438 439 pci_obj_destroy(pci_p); 440 441 /* 442 * Free the pci soft state structure and the rest of the 443 * resources it's using. 444 */ 445 free_pci_properties(pci_p); 446 unmap_pci_registers(pci_p); 447 mutex_exit(&pci_p->pci_mutex); 448 mutex_destroy(&pci_p->pci_mutex); 449 free_pci_soft_state(instance); 450 451 /* Free the interrupt-priorities prop if we created it. */ { 452 int len; 453 454 if (ddi_getproplen(DDI_DEV_T_ANY, dip, 455 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 456 "interrupt-priorities", &len) == DDI_PROP_SUCCESS) 457 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, 458 "interrupt-priorities"); 459 } 460 return (DDI_SUCCESS); 461 462 case DDI_SUSPEND: 463 pci_child_cfg_save(dip); 464 pci_obj_suspend(pci_p); 465 pci_p->pci_state = PCI_SUSPENDED; 466 467 mutex_exit(&pci_p->pci_mutex); 468 return (DDI_SUCCESS); 469 470 default: 471 DEBUG0(DBG_DETACH, dip, "unsupported detach op\n"); 472 mutex_exit(&pci_p->pci_mutex); 473 return (DDI_FAILURE); 474 } 475 } 476 477 478 /* bus driver entry points */ 479 480 /* 481 * bus map entry point: 482 * 483 * if map request is for an rnumber 484 * get the corresponding regspec from device node 485 * build a new regspec in our parent's format 486 * build a new map_req with the new regspec 487 * call up the tree to complete the mapping 488 */ 489 int 490 pci_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp, 491 off_t off, off_t len, caddr_t *addrp) 492 { 493 pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip)); 494 struct regspec p_regspec; 495 ddi_map_req_t p_mapreq; 496 int reglen, rval, r_no; 497 pci_regspec_t reloc_reg, *rp = &reloc_reg; 498 499 DEBUG2(DBG_MAP, dip, "rdip=%s%d:", 500 ddi_driver_name(rdip), ddi_get_instance(rdip)); 501 502 if (mp->map_flags & DDI_MF_USER_MAPPING) 503 return (DDI_ME_UNIMPLEMENTED); 504 505 switch (mp->map_type) { 506 case DDI_MT_REGSPEC: 507 reloc_reg = *(pci_regspec_t *)mp->map_obj.rp; /* dup whole */ 508 break; 509 510 case DDI_MT_RNUMBER: 511 r_no = mp->map_obj.rnumber; 512 DEBUG1(DBG_MAP | DBG_CONT, dip, " r#=%x", r_no); 513 514 if (ddi_getlongprop(DDI_DEV_T_NONE, rdip, DDI_PROP_DONTPASS, 515 "reg", (caddr_t)&rp, ®len) != DDI_SUCCESS) 516 return (DDI_ME_RNUMBER_RANGE); 517 518 if (r_no < 0 || r_no >= reglen / sizeof (pci_regspec_t)) { 519 kmem_free(rp, reglen); 520 return (DDI_ME_RNUMBER_RANGE); 521 } 522 rp += r_no; 523 break; 524 525 default: 526 return (DDI_ME_INVAL); 527 } 528 DEBUG0(DBG_MAP | DBG_CONT, dip, "\n"); 529 530 /* use "assigned-addresses" to relocate regspec within pci space */ 531 if (rval = pci_reloc_reg(dip, rdip, pci_p, rp)) 532 goto done; 533 534 if (len) /* adjust regspec according to mapping request */ 535 rp->pci_size_low = len; 536 rp->pci_phys_low += off; 537 538 /* use "ranges" to translate relocated pci regspec into parent space */ 539 if (rval = pci_xlate_reg(pci_p, rp, &p_regspec)) 540 goto done; 541 542 p_mapreq = *mp; /* dup the whole structure */ 543 p_mapreq.map_type = DDI_MT_REGSPEC; 544 p_mapreq.map_obj.rp = &p_regspec; 545 rval = ddi_map(dip, &p_mapreq, 0, 0, addrp); 546 547 if (rval == DDI_SUCCESS) { 548 /* 549 * Set-up access functions for FM access error capable drivers. 550 * The axq workaround prevents fault management support 551 */ 552 if (DDI_FM_ACC_ERR_CAP(pci_p->pci_fm_cap) && 553 DDI_FM_ACC_ERR_CAP(ddi_fm_capable(rdip)) && 554 mp->map_handlep->ah_acc.devacc_attr_access != 555 DDI_DEFAULT_ACC) 556 pci_fm_acc_setup(mp, rdip); 557 pci_axq_setup(mp, pci_p->pci_pbm_p); 558 } 559 560 done: 561 if (mp->map_type == DDI_MT_RNUMBER) 562 kmem_free(rp - r_no, reglen); 563 564 return (rval); 565 } 566 567 /* 568 * bus dma map entry point 569 * return value: 570 * DDI_DMA_PARTIAL_MAP 1 571 * DDI_DMA_MAPOK 0 572 * DDI_DMA_MAPPED 0 573 * DDI_DMA_NORESOURCES -1 574 * DDI_DMA_NOMAPPING -2 575 * DDI_DMA_TOOBIG -3 576 */ 577 int 578 pci_dma_setup(dev_info_t *dip, dev_info_t *rdip, ddi_dma_req_t *dmareq, 579 ddi_dma_handle_t *handlep) 580 { 581 pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip)); 582 iommu_t *iommu_p = pci_p->pci_iommu_p; 583 ddi_dma_impl_t *mp; 584 int ret; 585 586 DEBUG3(DBG_DMA_MAP, dip, "mapping - rdip=%s%d type=%s\n", 587 ddi_driver_name(rdip), ddi_get_instance(rdip), 588 handlep ? "alloc" : "advisory"); 589 590 if (!(mp = pci_dma_lmts2hdl(dip, rdip, iommu_p, dmareq))) 591 return (DDI_DMA_NORESOURCES); 592 if (mp == (ddi_dma_impl_t *)DDI_DMA_NOMAPPING) 593 return (DDI_DMA_NOMAPPING); 594 if (ret = pci_dma_type(pci_p, dmareq, mp)) 595 goto freehandle; 596 if (ret = pci_dma_pfn(pci_p, dmareq, mp)) 597 goto freehandle; 598 599 switch (PCI_DMA_TYPE(mp)) { 600 case DMAI_FLAGS_DVMA: /* LINTED E_EQUALITY_NOT_ASSIGNMENT */ 601 if ((ret = pci_dvma_win(pci_p, dmareq, mp)) || !handlep) 602 goto freehandle; 603 if (!PCI_DMA_CANCACHE(mp)) { /* try fast track */ 604 if (PCI_DMA_CANFAST(mp)) { 605 if (!pci_dvma_map_fast(iommu_p, mp)) 606 break; 607 /* LINTED E_NOP_ELSE_STMT */ 608 } else { 609 PCI_DVMA_FASTTRAK_PROF(mp); 610 } 611 } 612 if (ret = pci_dvma_map(mp, dmareq, iommu_p)) 613 goto freehandle; 614 break; 615 case DMAI_FLAGS_PEER_TO_PEER: /* LINTED E_EQUALITY_NOT_ASSIGNMENT */ 616 if ((ret = pci_dma_physwin(pci_p, dmareq, mp)) || !handlep) 617 goto freehandle; 618 break; 619 case DMAI_FLAGS_BYPASS: 620 default: 621 panic("%s%d: pci_dma_setup: bad dma type 0x%x", 622 ddi_driver_name(rdip), ddi_get_instance(rdip), 623 PCI_DMA_TYPE(mp)); 624 /*NOTREACHED*/ 625 } 626 *handlep = (ddi_dma_handle_t)mp; 627 mp->dmai_flags |= (DMAI_FLAGS_INUSE | DMAI_FLAGS_MAPPED); 628 dump_dma_handle(DBG_DMA_MAP, dip, mp); 629 630 return ((mp->dmai_nwin == 1) ? DDI_DMA_MAPPED : DDI_DMA_PARTIAL_MAP); 631 freehandle: 632 if (ret == DDI_DMA_NORESOURCES) 633 pci_dma_freemp(mp); /* don't run_callback() */ 634 else 635 (void) pci_dma_freehdl(dip, rdip, (ddi_dma_handle_t)mp); 636 return (ret); 637 } 638 639 640 /* 641 * bus dma alloc handle entry point: 642 */ 643 int 644 pci_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attrp, 645 int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep) 646 { 647 pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip)); 648 ddi_dma_impl_t *mp; 649 int rval; 650 651 DEBUG2(DBG_DMA_ALLOCH, dip, "rdip=%s%d\n", 652 ddi_driver_name(rdip), ddi_get_instance(rdip)); 653 654 if (attrp->dma_attr_version != DMA_ATTR_V0) 655 return (DDI_DMA_BADATTR); 656 657 if (!(mp = pci_dma_allocmp(dip, rdip, waitfp, arg))) 658 return (DDI_DMA_NORESOURCES); 659 660 /* 661 * Save requestor's information 662 */ 663 mp->dmai_attr = *attrp; /* whole object - augmented later */ 664 *DEV_ATTR(mp) = *attrp; /* whole object - device orig attr */ 665 DEBUG1(DBG_DMA_ALLOCH, dip, "mp=%p\n", mp); 666 667 /* check and convert dma attributes to handle parameters */ 668 if (rval = pci_dma_attr2hdl(pci_p, mp)) { 669 pci_dma_freehdl(dip, rdip, (ddi_dma_handle_t)mp); 670 *handlep = NULL; 671 return (rval); 672 } 673 *handlep = (ddi_dma_handle_t)mp; 674 return (DDI_SUCCESS); 675 } 676 677 678 /* 679 * bus dma free handle entry point: 680 */ 681 /*ARGSUSED*/ 682 int 683 pci_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle) 684 { 685 DEBUG3(DBG_DMA_FREEH, dip, "rdip=%s%d mp=%p\n", 686 ddi_driver_name(rdip), ddi_get_instance(rdip), handle); 687 pci_dma_freemp((ddi_dma_impl_t *)handle); 688 689 if (pci_kmem_clid) { 690 DEBUG0(DBG_DMA_FREEH, dip, "run handle callback\n"); 691 ddi_run_callback(&pci_kmem_clid); 692 } 693 return (DDI_SUCCESS); 694 } 695 696 697 /* 698 * bus dma bind handle entry point: 699 */ 700 int 701 pci_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip, 702 ddi_dma_handle_t handle, ddi_dma_req_t *dmareq, 703 ddi_dma_cookie_t *cookiep, uint_t *ccountp) 704 { 705 pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip)); 706 iommu_t *iommu_p = pci_p->pci_iommu_p; 707 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 708 int ret; 709 710 DEBUG4(DBG_DMA_BINDH, dip, "rdip=%s%d mp=%p dmareq=%p\n", 711 ddi_driver_name(rdip), ddi_get_instance(rdip), mp, dmareq); 712 713 if (mp->dmai_flags & DMAI_FLAGS_INUSE) 714 return (DDI_DMA_INUSE); 715 716 ASSERT((mp->dmai_flags & ~DMAI_FLAGS_PRESERVE) == 0); 717 mp->dmai_flags |= DMAI_FLAGS_INUSE; 718 719 if (ret = pci_dma_type(pci_p, dmareq, mp)) 720 goto err; 721 if (ret = pci_dma_pfn(pci_p, dmareq, mp)) 722 goto err; 723 724 switch (PCI_DMA_TYPE(mp)) { 725 case DMAI_FLAGS_DVMA: 726 if (ret = pci_dvma_win(pci_p, dmareq, mp)) 727 goto map_err; 728 if (!PCI_DMA_CANCACHE(mp)) { /* try fast track */ 729 if (PCI_DMA_CANFAST(mp)) { 730 if (!pci_dvma_map_fast(iommu_p, mp)) 731 goto mapped; /*LINTED E_NOP_ELSE_STMT*/ 732 } else { 733 PCI_DVMA_FASTTRAK_PROF(mp); 734 } 735 } 736 if (ret = pci_dvma_map(mp, dmareq, iommu_p)) 737 goto map_err; 738 mapped: 739 *ccountp = 1; 740 MAKE_DMA_COOKIE(cookiep, mp->dmai_mapping, mp->dmai_size); 741 break; 742 case DMAI_FLAGS_BYPASS: 743 case DMAI_FLAGS_PEER_TO_PEER: 744 if (ret = pci_dma_physwin(pci_p, dmareq, mp)) 745 goto map_err; 746 *ccountp = WINLST(mp)->win_ncookies; 747 *cookiep = *(ddi_dma_cookie_t *)(WINLST(mp) + 1); /* wholeobj */ 748 break; 749 default: 750 panic("%s%d: pci_dma_bindhdl(%p): bad dma type", 751 ddi_driver_name(rdip), ddi_get_instance(rdip), mp); 752 /*NOTREACHED*/ 753 } 754 DEBUG2(DBG_DMA_BINDH, dip, "cookie %x+%x\n", cookiep->dmac_address, 755 cookiep->dmac_size); 756 dump_dma_handle(DBG_DMA_MAP, dip, mp); 757 758 if (mp->dmai_attr.dma_attr_flags & DDI_DMA_FLAGERR) 759 (void) ndi_fmc_insert(rdip, DMA_HANDLE, mp, NULL); 760 761 mp->dmai_flags |= DMAI_FLAGS_MAPPED; 762 return (mp->dmai_nwin == 1 ? DDI_DMA_MAPPED : DDI_DMA_PARTIAL_MAP); 763 map_err: 764 pci_dvma_unregister_callbacks(pci_p, mp); 765 pci_dma_freepfn(mp); 766 err: 767 mp->dmai_flags &= DMAI_FLAGS_PRESERVE; 768 return (ret); 769 } 770 771 /* 772 * bus dma unbind handle entry point: 773 */ 774 /*ARGSUSED*/ 775 int 776 pci_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle) 777 { 778 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 779 pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip)); 780 iommu_t *iommu_p = pci_p->pci_iommu_p; 781 782 DEBUG3(DBG_DMA_UNBINDH, dip, "rdip=%s%d, mp=%p\n", 783 ddi_driver_name(rdip), ddi_get_instance(rdip), handle); 784 if ((mp->dmai_flags & DMAI_FLAGS_INUSE) == 0) { 785 DEBUG0(DBG_DMA_UNBINDH, dip, "handle not in use\n"); 786 return (DDI_FAILURE); 787 } 788 789 mp->dmai_flags &= ~DMAI_FLAGS_MAPPED; 790 791 switch (PCI_DMA_TYPE(mp)) { 792 case DMAI_FLAGS_DVMA: 793 pci_dvma_unregister_callbacks(pci_p, mp); 794 pci_dma_sync_unmap(dip, rdip, mp); 795 pci_dvma_unmap(iommu_p, mp); 796 pci_dma_freepfn(mp); 797 break; 798 case DMAI_FLAGS_BYPASS: 799 case DMAI_FLAGS_PEER_TO_PEER: 800 pci_dma_freewin(mp); 801 break; 802 default: 803 panic("%s%d: pci_dma_unbindhdl:bad dma type %p", 804 ddi_driver_name(rdip), ddi_get_instance(rdip), mp); 805 /*NOTREACHED*/ 806 } 807 if (iommu_p->iommu_dvma_clid != 0) { 808 DEBUG0(DBG_DMA_UNBINDH, dip, "run dvma callback\n"); 809 ddi_run_callback(&iommu_p->iommu_dvma_clid); 810 } 811 if (pci_kmem_clid) { 812 DEBUG0(DBG_DMA_UNBINDH, dip, "run handle callback\n"); 813 ddi_run_callback(&pci_kmem_clid); 814 } 815 mp->dmai_flags &= DMAI_FLAGS_PRESERVE; 816 SYNC_BUF_PA(mp) = 0; 817 818 if (mp->dmai_attr.dma_attr_flags & DDI_DMA_FLAGERR) { 819 if (DEVI(rdip)->devi_fmhdl != NULL && 820 DDI_FM_DMA_ERR_CAP(DEVI(rdip)->devi_fmhdl->fh_cap)) { 821 (void) ndi_fmc_remove(rdip, DMA_HANDLE, mp); 822 } 823 } 824 825 return (DDI_SUCCESS); 826 } 827 828 829 /* 830 * bus dma win entry point: 831 */ 832 int 833 pci_dma_win(dev_info_t *dip, dev_info_t *rdip, 834 ddi_dma_handle_t handle, uint_t win, off_t *offp, 835 size_t *lenp, ddi_dma_cookie_t *cookiep, uint_t *ccountp) 836 { 837 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 838 DEBUG2(DBG_DMA_WIN, dip, "rdip=%s%d\n", 839 ddi_driver_name(rdip), ddi_get_instance(rdip)); 840 dump_dma_handle(DBG_DMA_WIN, dip, mp); 841 if (win >= mp->dmai_nwin) { 842 DEBUG1(DBG_DMA_WIN, dip, "%x out of range\n", win); 843 return (DDI_FAILURE); 844 } 845 846 switch (PCI_DMA_TYPE(mp)) { 847 case DMAI_FLAGS_DVMA: 848 if (win != PCI_DMA_CURWIN(mp)) { 849 pci_t *pci_p = 850 get_pci_soft_state(ddi_get_instance(dip)); 851 pci_dma_sync_unmap(dip, rdip, mp); 852 /* map_window sets dmai_mapping/size/offset */ 853 iommu_map_window(pci_p->pci_iommu_p, mp, win); 854 } 855 if (cookiep) 856 MAKE_DMA_COOKIE(cookiep, mp->dmai_mapping, 857 mp->dmai_size); 858 if (ccountp) 859 *ccountp = 1; 860 break; 861 case DMAI_FLAGS_PEER_TO_PEER: 862 case DMAI_FLAGS_BYPASS: { 863 int i; 864 ddi_dma_cookie_t *ck_p; 865 pci_dma_win_t *win_p = mp->dmai_winlst; 866 867 for (i = 0; i < win; win_p = win_p->win_next, i++); 868 ck_p = (ddi_dma_cookie_t *)(win_p + 1); 869 *cookiep = *ck_p; 870 mp->dmai_offset = win_p->win_offset; 871 mp->dmai_size = win_p->win_size; 872 mp->dmai_mapping = ck_p->dmac_laddress; 873 mp->dmai_cookie = ck_p + 1; 874 win_p->win_curseg = 0; 875 if (ccountp) 876 *ccountp = win_p->win_ncookies; 877 } 878 break; 879 default: 880 cmn_err(CE_WARN, "%s%d: pci_dma_win:bad dma type 0x%x", 881 ddi_driver_name(rdip), ddi_get_instance(rdip), 882 PCI_DMA_TYPE(mp)); 883 return (DDI_FAILURE); 884 } 885 if (cookiep) 886 DEBUG2(DBG_DMA_WIN, dip, 887 "cookie - dmac_address=%x dmac_size=%x\n", 888 cookiep->dmac_address, cookiep->dmac_size); 889 if (offp) 890 *offp = (off_t)mp->dmai_offset; 891 if (lenp) 892 *lenp = mp->dmai_size; 893 return (DDI_SUCCESS); 894 } 895 896 #ifdef DEBUG 897 static char *pci_dmactl_str[] = { 898 "DDI_DMA_FREE", 899 "DDI_DMA_SYNC", 900 "DDI_DMA_HTOC", 901 "DDI_DMA_KVADDR", 902 "DDI_DMA_MOVWIN", 903 "DDI_DMA_REPWIN", 904 "DDI_DMA_GETERR", 905 "DDI_DMA_COFF", 906 "DDI_DMA_NEXTWIN", 907 "DDI_DMA_NEXTSEG", 908 "DDI_DMA_SEGTOC", 909 "DDI_DMA_RESERVE", 910 "DDI_DMA_RELEASE", 911 "DDI_DMA_RESETH", 912 "DDI_DMA_CKSYNC", 913 "DDI_DMA_IOPB_ALLOC", 914 "DDI_DMA_IOPB_FREE", 915 "DDI_DMA_SMEM_ALLOC", 916 "DDI_DMA_SMEM_FREE", 917 "DDI_DMA_SET_SBUS64", 918 "DDI_DMA_REMAP" 919 }; 920 #endif 921 922 /* 923 * bus dma control entry point: 924 */ 925 int 926 pci_dma_ctlops(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle, 927 enum ddi_dma_ctlops cmd, off_t *offp, size_t *lenp, caddr_t *objp, 928 uint_t cache_flags) 929 { 930 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 931 DEBUG3(DBG_DMA_CTL, dip, "%s: rdip=%s%d\n", pci_dmactl_str[cmd], 932 ddi_driver_name(rdip), ddi_get_instance(rdip)); 933 934 switch (cmd) { 935 case DDI_DMA_FREE: 936 (void) pci_dma_unbindhdl(dip, rdip, handle); 937 (void) pci_dma_freehdl(dip, rdip, handle); 938 return (DDI_SUCCESS); 939 case DDI_DMA_RESERVE: { 940 pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip)); 941 return (pci_fdvma_reserve(dip, rdip, pci_p, 942 (ddi_dma_req_t *)offp, (ddi_dma_handle_t *)objp)); 943 } 944 case DDI_DMA_RELEASE: { 945 pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip)); 946 return (pci_fdvma_release(dip, pci_p, mp)); 947 } 948 default: 949 break; 950 } 951 952 switch (PCI_DMA_TYPE(mp)) { 953 case DMAI_FLAGS_DVMA: 954 return (pci_dvma_ctl(dip, rdip, mp, cmd, offp, lenp, objp, 955 cache_flags)); 956 case DMAI_FLAGS_PEER_TO_PEER: 957 case DMAI_FLAGS_BYPASS: 958 return (pci_dma_ctl(dip, rdip, mp, cmd, offp, lenp, objp, 959 cache_flags)); 960 default: 961 panic("%s%d: pci_dma_ctlops(%x):bad dma type %x", 962 ddi_driver_name(rdip), ddi_get_instance(rdip), cmd, 963 mp->dmai_flags); 964 /*NOTREACHED*/ 965 } 966 } 967 968 #ifdef DEBUG 969 int pci_peekfault_cnt = 0; 970 int pci_pokefault_cnt = 0; 971 #endif /* DEBUG */ 972 973 static int 974 pci_do_poke(pci_t *pci_p, peekpoke_ctlops_t *in_args) 975 { 976 pbm_t *pbm_p = pci_p->pci_pbm_p; 977 int err = DDI_SUCCESS; 978 on_trap_data_t otd; 979 980 mutex_enter(&pbm_p->pbm_pokefault_mutex); 981 pbm_p->pbm_ontrap_data = &otd; 982 983 /* Set up protected environment. */ 984 if (!on_trap(&otd, OT_DATA_ACCESS)) { 985 uintptr_t tramp = otd.ot_trampoline; 986 987 otd.ot_trampoline = (uintptr_t)&poke_fault; 988 err = do_poke(in_args->size, (void *)in_args->dev_addr, 989 (void *)in_args->host_addr); 990 otd.ot_trampoline = tramp; 991 } else 992 err = DDI_FAILURE; 993 994 /* 995 * Read the async fault register for the PBM to see it sees 996 * a master-abort. 997 */ 998 pbm_clear_error(pbm_p); 999 1000 if (otd.ot_trap & OT_DATA_ACCESS) 1001 err = DDI_FAILURE; 1002 1003 /* Take down protected environment. */ 1004 no_trap(); 1005 1006 pbm_p->pbm_ontrap_data = NULL; 1007 mutex_exit(&pbm_p->pbm_pokefault_mutex); 1008 1009 #ifdef DEBUG 1010 if (err == DDI_FAILURE) 1011 pci_pokefault_cnt++; 1012 #endif 1013 return (err); 1014 } 1015 1016 1017 static int 1018 pci_do_caut_put(pci_t *pci_p, peekpoke_ctlops_t *cautacc_ctlops_arg) 1019 { 1020 size_t size = cautacc_ctlops_arg->size; 1021 uintptr_t dev_addr = cautacc_ctlops_arg->dev_addr; 1022 uintptr_t host_addr = cautacc_ctlops_arg->host_addr; 1023 ddi_acc_impl_t *hp = (ddi_acc_impl_t *)cautacc_ctlops_arg->handle; 1024 size_t repcount = cautacc_ctlops_arg->repcount; 1025 uint_t flags = cautacc_ctlops_arg->flags; 1026 1027 pbm_t *pbm_p = pci_p->pci_pbm_p; 1028 int err = DDI_SUCCESS; 1029 1030 /* Use ontrap data in handle set up by FMA */ 1031 pbm_p->pbm_ontrap_data = (on_trap_data_t *)hp->ahi_err->err_ontrap; 1032 1033 hp->ahi_err->err_expected = DDI_FM_ERR_EXPECTED; 1034 1035 /* 1036 * Note that i_ndi_busop_access_enter ends up grabbing the pokefault 1037 * mutex. 1038 */ 1039 i_ndi_busop_access_enter(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp); 1040 1041 if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) { 1042 for (; repcount; repcount--) { 1043 switch (size) { 1044 1045 case sizeof (uint8_t): 1046 i_ddi_put8(hp, (uint8_t *)dev_addr, 1047 *(uint8_t *)host_addr); 1048 break; 1049 1050 case sizeof (uint16_t): 1051 i_ddi_put16(hp, (uint16_t *)dev_addr, 1052 *(uint16_t *)host_addr); 1053 break; 1054 1055 case sizeof (uint32_t): 1056 i_ddi_put32(hp, (uint32_t *)dev_addr, 1057 *(uint32_t *)host_addr); 1058 break; 1059 1060 case sizeof (uint64_t): 1061 i_ddi_put64(hp, (uint64_t *)dev_addr, 1062 *(uint64_t *)host_addr); 1063 break; 1064 } 1065 1066 host_addr += size; 1067 1068 if (flags == DDI_DEV_AUTOINCR) 1069 dev_addr += size; 1070 1071 /* 1072 * Read the async fault register for the PBM to see if 1073 * it sees a master-abort. 1074 */ 1075 pbm_clear_error(pbm_p); 1076 1077 if (pbm_p->pbm_ontrap_data->ot_trap & OT_DATA_ACCESS) { 1078 err = DDI_FAILURE; 1079 #ifdef DEBUG 1080 pci_pokefault_cnt++; 1081 #endif 1082 break; 1083 } 1084 } 1085 } 1086 1087 i_ddi_notrap((ddi_acc_handle_t)hp); 1088 pbm_p->pbm_ontrap_data = NULL; 1089 i_ndi_busop_access_exit(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp); 1090 hp->ahi_err->err_expected = DDI_FM_ERR_UNEXPECTED; 1091 1092 return (err); 1093 } 1094 1095 1096 static int 1097 pci_ctlops_poke(pci_t *pci_p, peekpoke_ctlops_t *in_args) 1098 { 1099 return (in_args->handle ? pci_do_caut_put(pci_p, in_args) : 1100 pci_do_poke(pci_p, in_args)); 1101 } 1102 1103 1104 static int 1105 pci_do_peek(pci_t *pci_p, peekpoke_ctlops_t *in_args) 1106 { 1107 int err = DDI_SUCCESS; 1108 on_trap_data_t otd; 1109 1110 if (!on_trap(&otd, OT_DATA_ACCESS)) { 1111 uintptr_t tramp = otd.ot_trampoline; 1112 1113 otd.ot_trampoline = (uintptr_t)&peek_fault; 1114 err = do_peek(in_args->size, (void *)in_args->dev_addr, 1115 (void *)in_args->host_addr); 1116 otd.ot_trampoline = tramp; 1117 } else 1118 err = DDI_FAILURE; 1119 1120 no_trap(); 1121 1122 #ifdef DEBUG 1123 if (err == DDI_FAILURE) 1124 pci_peekfault_cnt++; 1125 #endif 1126 return (err); 1127 } 1128 1129 static int 1130 pci_do_caut_get(pci_t *pci_p, peekpoke_ctlops_t *cautacc_ctlops_arg) 1131 { 1132 size_t size = cautacc_ctlops_arg->size; 1133 uintptr_t dev_addr = cautacc_ctlops_arg->dev_addr; 1134 uintptr_t host_addr = cautacc_ctlops_arg->host_addr; 1135 ddi_acc_impl_t *hp = (ddi_acc_impl_t *)cautacc_ctlops_arg->handle; 1136 size_t repcount = cautacc_ctlops_arg->repcount; 1137 uint_t flags = cautacc_ctlops_arg->flags; 1138 1139 pbm_t *pbm_p = pci_p->pci_pbm_p; 1140 int err = DDI_SUCCESS; 1141 1142 hp->ahi_err->err_expected = DDI_FM_ERR_EXPECTED; 1143 i_ndi_busop_access_enter(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp); 1144 1145 /* Can this code be optimized? */ 1146 1147 if (repcount == 1) { 1148 if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) { 1149 i_ddi_caut_get(size, (void *)dev_addr, 1150 (void *)host_addr); 1151 } else { 1152 int i; 1153 uint8_t *ff_addr = (uint8_t *)host_addr; 1154 for (i = 0; i < size; i++) 1155 *ff_addr++ = 0xff; 1156 1157 err = DDI_FAILURE; 1158 #ifdef DEBUG 1159 pci_peekfault_cnt++; 1160 #endif 1161 } 1162 } else { 1163 if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) { 1164 for (; repcount; repcount--) { 1165 i_ddi_caut_get(size, (void *)dev_addr, 1166 (void *)host_addr); 1167 1168 host_addr += size; 1169 1170 if (flags == DDI_DEV_AUTOINCR) 1171 dev_addr += size; 1172 } 1173 } else { 1174 err = DDI_FAILURE; 1175 #ifdef DEBUG 1176 pci_peekfault_cnt++; 1177 #endif 1178 } 1179 } 1180 1181 i_ddi_notrap((ddi_acc_handle_t)hp); 1182 pbm_p->pbm_ontrap_data = NULL; 1183 i_ndi_busop_access_exit(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp); 1184 hp->ahi_err->err_expected = DDI_FM_ERR_UNEXPECTED; 1185 1186 return (err); 1187 } 1188 1189 1190 static int 1191 pci_ctlops_peek(pci_t *pci_p, peekpoke_ctlops_t *in_args, void *result) 1192 { 1193 result = (void *)in_args->host_addr; 1194 return (in_args->handle ? pci_do_caut_get(pci_p, in_args) : 1195 pci_do_peek(pci_p, in_args)); 1196 } 1197 1198 /* 1199 * get_reg_set_size 1200 * 1201 * Given a dev info pointer to a pci child and a register number, this 1202 * routine returns the size element of that reg set property. 1203 * return value: size of reg set on success, -1 on error 1204 */ 1205 static off_t 1206 get_reg_set_size(dev_info_t *child, int rnumber) 1207 { 1208 pci_regspec_t *pci_rp; 1209 off_t size; 1210 int i; 1211 1212 if (rnumber < 0) 1213 return (-1); 1214 1215 /* 1216 * Get the reg property for the device. 1217 */ 1218 if (ddi_getlongprop(DDI_DEV_T_ANY, child, DDI_PROP_DONTPASS, "reg", 1219 (caddr_t)&pci_rp, &i) != DDI_SUCCESS) 1220 return (-1); 1221 1222 if (rnumber >= (i / (int)sizeof (pci_regspec_t))) { 1223 kmem_free(pci_rp, i); 1224 return (-1); 1225 } 1226 1227 size = pci_rp[rnumber].pci_size_low | 1228 ((uint64_t)pci_rp[rnumber].pci_size_hi << 32); 1229 kmem_free(pci_rp, i); 1230 return (size); 1231 } 1232 1233 1234 /* 1235 * control ops entry point: 1236 * 1237 * Requests handled completely: 1238 * DDI_CTLOPS_INITCHILD see init_child() for details 1239 * DDI_CTLOPS_UNINITCHILD 1240 * DDI_CTLOPS_REPORTDEV see report_dev() for details 1241 * DDI_CTLOPS_XLATE_INTRS nothing to do 1242 * DDI_CTLOPS_IOMIN cache line size if streaming otherwise 1 1243 * DDI_CTLOPS_REGSIZE 1244 * DDI_CTLOPS_NREGS 1245 * DDI_CTLOPS_NINTRS 1246 * DDI_CTLOPS_DVMAPAGESIZE 1247 * DDI_CTLOPS_POKE 1248 * DDI_CTLOPS_PEEK 1249 * DDI_CTLOPS_QUIESCE 1250 * DDI_CTLOPS_UNQUIESCE 1251 * 1252 * All others passed to parent. 1253 */ 1254 int 1255 pci_ctlops(dev_info_t *dip, dev_info_t *rdip, 1256 ddi_ctl_enum_t op, void *arg, void *result) 1257 { 1258 pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip)); 1259 1260 switch (op) { 1261 case DDI_CTLOPS_INITCHILD: 1262 return (init_child(pci_p, (dev_info_t *)arg)); 1263 1264 case DDI_CTLOPS_UNINITCHILD: 1265 return (uninit_child(pci_p, (dev_info_t *)arg)); 1266 1267 case DDI_CTLOPS_REPORTDEV: 1268 return (report_dev(rdip)); 1269 1270 case DDI_CTLOPS_IOMIN: 1271 1272 /* 1273 * If we are using the streaming cache, align at 1274 * least on a cache line boundary. Otherwise use 1275 * whatever alignment is passed in. 1276 */ 1277 1278 if ((int)arg) { 1279 int val = *((int *)result); 1280 1281 val = maxbit(val, PCI_SBUF_LINE_SIZE); 1282 *((int *)result) = val; 1283 } 1284 return (DDI_SUCCESS); 1285 1286 case DDI_CTLOPS_REGSIZE: 1287 *((off_t *)result) = get_reg_set_size(rdip, *((int *)arg)); 1288 return (*((off_t *)result) == -1 ? DDI_FAILURE : DDI_SUCCESS); 1289 1290 case DDI_CTLOPS_NREGS: 1291 *((uint_t *)result) = get_nreg_set(rdip); 1292 return (DDI_SUCCESS); 1293 1294 case DDI_CTLOPS_DVMAPAGESIZE: 1295 *((ulong_t *)result) = IOMMU_PAGE_SIZE; 1296 return (DDI_SUCCESS); 1297 1298 case DDI_CTLOPS_POKE: 1299 return (pci_ctlops_poke(pci_p, (peekpoke_ctlops_t *)arg)); 1300 1301 case DDI_CTLOPS_PEEK: 1302 return (pci_ctlops_peek(pci_p, (peekpoke_ctlops_t *)arg, 1303 result)); 1304 1305 case DDI_CTLOPS_AFFINITY: 1306 break; 1307 1308 case DDI_CTLOPS_QUIESCE: 1309 return (pci_bus_quiesce(pci_p, rdip, result)); 1310 1311 case DDI_CTLOPS_UNQUIESCE: 1312 return (pci_bus_unquiesce(pci_p, rdip, result)); 1313 1314 default: 1315 break; 1316 } 1317 1318 /* 1319 * Now pass the request up to our parent. 1320 */ 1321 DEBUG2(DBG_CTLOPS, dip, "passing request to parent: rdip=%s%d\n", 1322 ddi_driver_name(rdip), ddi_get_instance(rdip)); 1323 return (ddi_ctlops(dip, rdip, op, arg, result)); 1324 } 1325 1326 1327 /* ARGSUSED */ 1328 int 1329 pci_intr_ops(dev_info_t *dip, dev_info_t *rdip, ddi_intr_op_t intr_op, 1330 ddi_intr_handle_impl_t *hdlp, void *result) 1331 { 1332 pci_t *pci_p = get_pci_soft_state( 1333 ddi_get_instance(dip)); 1334 ddi_ispec_t *ip = (ddi_ispec_t *)hdlp->ih_private; 1335 int ret = DDI_SUCCESS; 1336 1337 switch (intr_op) { 1338 case DDI_INTROP_GETCAP: 1339 /* GetCap will always fail for all non PCI devices */ 1340 (void) pci_intx_get_cap(rdip, (int *)result); 1341 break; 1342 case DDI_INTROP_SETCAP: 1343 ret = DDI_ENOTSUP; 1344 break; 1345 case DDI_INTROP_ALLOC: 1346 *(int *)result = hdlp->ih_scratch1; 1347 break; 1348 case DDI_INTROP_FREE: 1349 break; 1350 case DDI_INTROP_GETPRI: 1351 *(int *)result = ip->is_pil ? 1352 ip->is_pil : pci_class_to_pil(rdip); 1353 break; 1354 case DDI_INTROP_SETPRI: 1355 ip->is_pil = (*(int *)result); 1356 break; 1357 case DDI_INTROP_ADDISR: 1358 hdlp->ih_vector = *ip->is_intr; 1359 1360 ret = pci_add_intr(dip, rdip, hdlp); 1361 break; 1362 case DDI_INTROP_REMISR: 1363 hdlp->ih_vector = *ip->is_intr; 1364 1365 ret = pci_remove_intr(dip, rdip, hdlp); 1366 break; 1367 case DDI_INTROP_ENABLE: 1368 ret = ib_update_intr_state(pci_p, rdip, hdlp, 1369 PCI_INTR_STATE_ENABLE); 1370 break; 1371 case DDI_INTROP_DISABLE: 1372 ret = ib_update_intr_state(pci_p, rdip, hdlp, 1373 PCI_INTR_STATE_DISABLE); 1374 break; 1375 case DDI_INTROP_SETMASK: 1376 ret = pci_intx_set_mask(rdip); 1377 break; 1378 case DDI_INTROP_CLRMASK: 1379 ret = pci_intx_clr_mask(rdip); 1380 break; 1381 case DDI_INTROP_GETPENDING: 1382 ret = pci_intx_get_pending(rdip, (int *)result); 1383 break; 1384 case DDI_INTROP_NINTRS: 1385 case DDI_INTROP_NAVAIL: 1386 *(int *)result = i_ddi_get_nintrs(rdip); 1387 break; 1388 case DDI_INTROP_SUPPORTED_TYPES: 1389 /* PCI nexus driver supports only fixed interrupts */ 1390 *(int *)result = i_ddi_get_nintrs(rdip) ? 1391 DDI_INTR_TYPE_FIXED : 0; 1392 break; 1393 default: 1394 ret = DDI_ENOTSUP; 1395 break; 1396 } 1397 1398 return (ret); 1399 } 1400 1401 static void 1402 pci_init_hotplug(struct pci *pci_p) 1403 { 1404 pci_bus_range_t bus_range; 1405 dev_info_t *dip; 1406 struct cb_ops *ops; 1407 1408 /* 1409 * Before initializing hotplug - open up 1410 * bus range. The busra module will 1411 * initialize its pool of bus numbers from 1412 * this. "busra" will be the agent that keeps 1413 * track of them during hotplug. Also, note, 1414 * that busra will remove any bus numbers 1415 * already in use from boot time. 1416 */ 1417 bus_range.lo = 0x0; 1418 bus_range.hi = 0xff; 1419 dip = pci_p->pci_dip; 1420 pci_p->hotplug_capable = B_FALSE; 1421 1422 /* 1423 * If this property exists, this nexus has hot-plug 1424 * slots. 1425 */ 1426 if (ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1427 "hotplug-capable")) { 1428 if (ndi_prop_update_int_array(DDI_DEV_T_NONE, 1429 dip, "bus-range", 1430 (int *)&bus_range, 1431 2) != DDI_PROP_SUCCESS) { 1432 return; 1433 } 1434 1435 if (pcihp_init(dip) != DDI_SUCCESS) { 1436 return; 1437 } 1438 1439 if (ops = pcihp_get_cb_ops()) { 1440 pci_ops.devo_cb_ops = ops; 1441 DEBUG2(DBG_ATTACH, dip, "%s%d hotplug enabled", 1442 ddi_driver_name(dip), ddi_get_instance(dip)); 1443 } else { 1444 return; 1445 } 1446 1447 pci_p->hotplug_capable = B_TRUE; 1448 } 1449 } 1450