1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright 2022 Oxide Computer Company 25 */ 26 27 /* 28 * File that has code which is common between pci(4D) and npe(4D) 29 * It shares the following: 30 * - interrupt code 31 * - pci_tools ioctl code 32 * - name_child code 33 * - set_parent_private_data code 34 */ 35 36 #include <sys/conf.h> 37 #include <sys/pci.h> 38 #include <sys/sunndi.h> 39 #include <sys/mach_intr.h> 40 #include <sys/pci_intr_lib.h> 41 #include <sys/psm.h> 42 #include <sys/policy.h> 43 #include <sys/sysmacros.h> 44 #include <sys/clock.h> 45 #include <sys/apic.h> 46 #include <sys/pci_tools.h> 47 #include <io/pci/pci_var.h> 48 #include <io/pci/pci_tools_ext.h> 49 #include <io/pci/pci_common.h> 50 #include <sys/pci_cfgspace.h> 51 #include <sys/pci_impl.h> 52 #include <sys/pci_cap.h> 53 54 /* 55 * Function prototypes 56 */ 57 static int pci_get_priority(dev_info_t *, ddi_intr_handle_impl_t *, int *); 58 static int pci_enable_intr(dev_info_t *, dev_info_t *, 59 ddi_intr_handle_impl_t *, uint32_t); 60 static void pci_disable_intr(dev_info_t *, dev_info_t *, 61 ddi_intr_handle_impl_t *, uint32_t); 62 static int pci_alloc_intr_fixed(dev_info_t *, dev_info_t *, 63 ddi_intr_handle_impl_t *, void *); 64 static int pci_free_intr_fixed(dev_info_t *, dev_info_t *, 65 ddi_intr_handle_impl_t *); 66 67 /* Extern declarations for PSM module */ 68 extern int (*psm_intr_ops)(dev_info_t *, ddi_intr_handle_impl_t *, 69 psm_intr_op_t, int *); 70 extern ddi_irm_pool_t *apix_irm_pool_p; 71 72 /* 73 * pci_name_child: 74 * 75 * Assign the address portion of the node name 76 */ 77 int 78 pci_common_name_child(dev_info_t *child, char *name, int namelen) 79 { 80 int dev, func, length; 81 char **unit_addr; 82 uint_t n; 83 pci_regspec_t *pci_rp; 84 85 if (ndi_dev_is_persistent_node(child) == 0) { 86 /* 87 * For .conf node, use "unit-address" property 88 */ 89 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, child, 90 DDI_PROP_DONTPASS, "unit-address", &unit_addr, &n) != 91 DDI_PROP_SUCCESS) { 92 cmn_err(CE_WARN, "cannot find unit-address in %s.conf", 93 ddi_get_name(child)); 94 return (DDI_FAILURE); 95 } 96 if (n != 1 || *unit_addr == NULL || **unit_addr == 0) { 97 cmn_err(CE_WARN, "unit-address property in %s.conf" 98 " not well-formed", ddi_get_name(child)); 99 ddi_prop_free(unit_addr); 100 return (DDI_FAILURE); 101 } 102 (void) snprintf(name, namelen, "%s", *unit_addr); 103 ddi_prop_free(unit_addr); 104 return (DDI_SUCCESS); 105 } 106 107 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, child, DDI_PROP_DONTPASS, 108 "reg", (int **)&pci_rp, (uint_t *)&length) != DDI_PROP_SUCCESS) { 109 cmn_err(CE_WARN, "cannot find reg property in %s", 110 ddi_get_name(child)); 111 return (DDI_FAILURE); 112 } 113 114 /* copy the device identifications */ 115 dev = PCI_REG_DEV_G(pci_rp->pci_phys_hi); 116 func = PCI_REG_FUNC_G(pci_rp->pci_phys_hi); 117 118 /* 119 * free the memory allocated by ddi_prop_lookup_int_array 120 */ 121 ddi_prop_free(pci_rp); 122 123 if (func != 0) { 124 (void) snprintf(name, namelen, "%x,%x", dev, func); 125 } else { 126 (void) snprintf(name, namelen, "%x", dev); 127 } 128 129 return (DDI_SUCCESS); 130 } 131 132 /* 133 * Interrupt related code: 134 * 135 * The following busop is common to npe and pci drivers 136 * bus_introp 137 */ 138 139 /* 140 * Create the ddi_parent_private_data for a pseudo child. 141 */ 142 void 143 pci_common_set_parent_private_data(dev_info_t *dip) 144 { 145 struct ddi_parent_private_data *pdptr; 146 147 pdptr = (struct ddi_parent_private_data *)kmem_zalloc( 148 (sizeof (struct ddi_parent_private_data) + 149 sizeof (struct intrspec)), KM_SLEEP); 150 pdptr->par_intr = (struct intrspec *)(pdptr + 1); 151 pdptr->par_nintr = 1; 152 ddi_set_parent_data(dip, pdptr); 153 } 154 155 /* 156 * pci_get_priority: 157 * Figure out the priority of the device 158 */ 159 static int 160 pci_get_priority(dev_info_t *dip, ddi_intr_handle_impl_t *hdlp, int *pri) 161 { 162 struct intrspec *ispec; 163 164 DDI_INTR_NEXDBG((CE_CONT, "pci_get_priority: dip = 0x%p, hdlp = %p\n", 165 (void *)dip, (void *)hdlp)); 166 167 if ((ispec = (struct intrspec *)pci_intx_get_ispec(dip, dip, 168 hdlp->ih_inum)) == NULL) { 169 if (DDI_INTR_IS_MSI_OR_MSIX(hdlp->ih_type)) { 170 *pri = pci_class_to_pil(dip); 171 pci_common_set_parent_private_data(hdlp->ih_dip); 172 ispec = (struct intrspec *)pci_intx_get_ispec(dip, dip, 173 hdlp->ih_inum); 174 return (DDI_SUCCESS); 175 } 176 return (DDI_FAILURE); 177 } 178 179 *pri = ispec->intrspec_pri; 180 return (DDI_SUCCESS); 181 } 182 183 184 185 static int pcieb_intr_pri_counter = 0; 186 187 /* 188 * pci_common_intr_ops: bus_intr_op() function for interrupt support 189 */ 190 int 191 pci_common_intr_ops(dev_info_t *pdip, dev_info_t *rdip, ddi_intr_op_t intr_op, 192 ddi_intr_handle_impl_t *hdlp, void *result) 193 { 194 int priority = 0; 195 int psm_status = 0; 196 int pci_status = 0; 197 int pci_rval, psm_rval = PSM_FAILURE; 198 int types = 0; 199 int pciepci = 0; 200 int i, j, count; 201 int rv; 202 int behavior; 203 int cap_ptr; 204 uint16_t msi_cap_base, msix_cap_base, cap_ctrl; 205 char *prop; 206 ddi_intrspec_t isp; 207 struct intrspec *ispec; 208 ddi_intr_handle_impl_t tmp_hdl; 209 ddi_intr_msix_t *msix_p; 210 ihdl_plat_t *ihdl_plat_datap; 211 ddi_intr_handle_t *h_array; 212 ddi_acc_handle_t handle; 213 apic_get_intr_t intrinfo; 214 215 DDI_INTR_NEXDBG((CE_CONT, 216 "pci_common_intr_ops: pdip 0x%p, rdip 0x%p, op %x handle 0x%p\n", 217 (void *)pdip, (void *)rdip, intr_op, (void *)hdlp)); 218 219 /* Process the request */ 220 switch (intr_op) { 221 case DDI_INTROP_SUPPORTED_TYPES: 222 /* 223 * First we determine the interrupt types supported by the 224 * device itself, then we filter them through what the OS 225 * and system supports. We determine system-level 226 * interrupt type support for anything other than fixed intrs 227 * through the psm_intr_ops vector 228 */ 229 rv = DDI_FAILURE; 230 231 /* Fixed supported by default */ 232 types = DDI_INTR_TYPE_FIXED; 233 234 if (psm_intr_ops == NULL) { 235 *(int *)result = types; 236 return (DDI_SUCCESS); 237 } 238 if (pci_config_setup(rdip, &handle) != DDI_SUCCESS) 239 return (DDI_FAILURE); 240 241 /* Sanity test cap control values if found */ 242 243 if (PCI_CAP_LOCATE(handle, PCI_CAP_ID_MSI, &msi_cap_base) == 244 DDI_SUCCESS) { 245 cap_ctrl = PCI_CAP_GET16(handle, 0, msi_cap_base, 246 PCI_MSI_CTRL); 247 if (cap_ctrl == PCI_CAP_EINVAL16) 248 goto SUPPORTED_TYPES_OUT; 249 250 types |= DDI_INTR_TYPE_MSI; 251 } 252 253 if (PCI_CAP_LOCATE(handle, PCI_CAP_ID_MSI_X, &msix_cap_base) == 254 DDI_SUCCESS) { 255 cap_ctrl = PCI_CAP_GET16(handle, 0, msix_cap_base, 256 PCI_MSIX_CTRL); 257 if (cap_ctrl == PCI_CAP_EINVAL16) 258 goto SUPPORTED_TYPES_OUT; 259 260 types |= DDI_INTR_TYPE_MSIX; 261 } 262 263 /* 264 * Filter device-level types through system-level support 265 */ 266 tmp_hdl.ih_type = types; 267 if ((*psm_intr_ops)(rdip, &tmp_hdl, PSM_INTR_OP_CHECK_MSI, 268 &types) != PSM_SUCCESS) 269 goto SUPPORTED_TYPES_OUT; 270 271 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: " 272 "rdip: 0x%p supported types: 0x%x\n", (void *)rdip, 273 types)); 274 275 /* 276 * Export any MSI/MSI-X cap locations via properties 277 */ 278 if (types & DDI_INTR_TYPE_MSI) { 279 if (ndi_prop_update_int(DDI_DEV_T_NONE, rdip, 280 "pci-msi-capid-pointer", (int)msi_cap_base) != 281 DDI_PROP_SUCCESS) 282 goto SUPPORTED_TYPES_OUT; 283 } 284 if (types & DDI_INTR_TYPE_MSIX) { 285 if (ndi_prop_update_int(DDI_DEV_T_NONE, rdip, 286 "pci-msix-capid-pointer", (int)msix_cap_base) != 287 DDI_PROP_SUCCESS) 288 goto SUPPORTED_TYPES_OUT; 289 } 290 291 rv = DDI_SUCCESS; 292 293 SUPPORTED_TYPES_OUT: 294 *(int *)result = types; 295 pci_config_teardown(&handle); 296 return (rv); 297 298 case DDI_INTROP_NAVAIL: 299 case DDI_INTROP_NINTRS: 300 if (DDI_INTR_IS_MSI_OR_MSIX(hdlp->ih_type)) { 301 if (pci_msi_get_nintrs(hdlp->ih_dip, hdlp->ih_type, 302 result) != DDI_SUCCESS) 303 return (DDI_FAILURE); 304 } else { 305 *(int *)result = i_ddi_get_intx_nintrs(hdlp->ih_dip); 306 if (*(int *)result == 0) 307 return (DDI_FAILURE); 308 } 309 break; 310 case DDI_INTROP_ALLOC: 311 312 /* 313 * FIXED type 314 */ 315 if (hdlp->ih_type == DDI_INTR_TYPE_FIXED) 316 return (pci_alloc_intr_fixed(pdip, rdip, hdlp, result)); 317 /* 318 * MSI or MSIX (figure out number of vectors available) 319 */ 320 if (DDI_INTR_IS_MSI_OR_MSIX(hdlp->ih_type) && 321 (psm_intr_ops != NULL) && 322 (pci_get_priority(rdip, hdlp, &priority) == DDI_SUCCESS)) { 323 /* 324 * Following check is a special case for 'pcieb'. 325 * This makes sure vectors with the right priority 326 * are allocated for pcieb during ALLOC time. 327 */ 328 if (strcmp(ddi_driver_name(rdip), "pcieb") == 0) { 329 hdlp->ih_pri = 330 (pcieb_intr_pri_counter % 2) ? 4 : 7; 331 pciepci = 1; 332 } else 333 hdlp->ih_pri = priority; 334 behavior = (int)(uintptr_t)hdlp->ih_scratch2; 335 336 /* 337 * Cache in the config handle and cap_ptr 338 */ 339 if (i_ddi_get_pci_config_handle(rdip) == NULL) { 340 if (pci_config_setup(rdip, &handle) != 341 DDI_SUCCESS) 342 return (DDI_FAILURE); 343 i_ddi_set_pci_config_handle(rdip, handle); 344 } 345 346 prop = NULL; 347 cap_ptr = 0; 348 if (hdlp->ih_type == DDI_INTR_TYPE_MSI) 349 prop = "pci-msi-capid-pointer"; 350 else if (hdlp->ih_type == DDI_INTR_TYPE_MSIX) 351 prop = "pci-msix-capid-pointer"; 352 353 /* 354 * Enforce the calling of DDI_INTROP_SUPPORTED_TYPES 355 * for MSI(X) before allocation 356 */ 357 if (prop != NULL) { 358 cap_ptr = ddi_prop_get_int(DDI_DEV_T_ANY, rdip, 359 DDI_PROP_DONTPASS, prop, 0); 360 if (cap_ptr == 0) { 361 DDI_INTR_NEXDBG((CE_CONT, 362 "pci_common_intr_ops: rdip: 0x%p " 363 "attempted MSI(X) alloc without " 364 "cap property\n", (void *)rdip)); 365 return (DDI_FAILURE); 366 } 367 } 368 i_ddi_set_msi_msix_cap_ptr(rdip, cap_ptr); 369 370 /* 371 * Allocate interrupt vectors 372 */ 373 (void) (*psm_intr_ops)(rdip, hdlp, 374 PSM_INTR_OP_ALLOC_VECTORS, result); 375 376 if (*(int *)result == 0) 377 return (DDI_INTR_NOTFOUND); 378 379 /* verify behavior flag and take appropriate action */ 380 if ((behavior == DDI_INTR_ALLOC_STRICT) && 381 (*(int *)result < hdlp->ih_scratch1)) { 382 DDI_INTR_NEXDBG((CE_CONT, 383 "pci_common_intr_ops: behavior %x, " 384 "couldn't get enough intrs\n", behavior)); 385 hdlp->ih_scratch1 = *(int *)result; 386 (void) (*psm_intr_ops)(rdip, hdlp, 387 PSM_INTR_OP_FREE_VECTORS, NULL); 388 return (DDI_EAGAIN); 389 } 390 391 if (hdlp->ih_type == DDI_INTR_TYPE_MSIX) { 392 if (!(msix_p = i_ddi_get_msix(hdlp->ih_dip))) { 393 msix_p = pci_msix_init(hdlp->ih_dip); 394 if (msix_p) { 395 i_ddi_set_msix(hdlp->ih_dip, 396 msix_p); 397 } else { 398 DDI_INTR_NEXDBG((CE_CONT, 399 "pci_common_intr_ops: MSI-X" 400 "table initilization failed" 401 ", rdip 0x%p inum 0x%x\n", 402 (void *)rdip, 403 hdlp->ih_inum)); 404 405 (void) (*psm_intr_ops)(rdip, 406 hdlp, 407 PSM_INTR_OP_FREE_VECTORS, 408 NULL); 409 410 return (DDI_FAILURE); 411 } 412 } 413 } 414 415 if (pciepci) { 416 /* update priority in ispec */ 417 isp = pci_intx_get_ispec(pdip, rdip, 418 (int)hdlp->ih_inum); 419 ispec = (struct intrspec *)isp; 420 if (ispec) 421 ispec->intrspec_pri = hdlp->ih_pri; 422 ++pcieb_intr_pri_counter; 423 } 424 425 } else 426 return (DDI_FAILURE); 427 break; 428 case DDI_INTROP_FREE: 429 if (DDI_INTR_IS_MSI_OR_MSIX(hdlp->ih_type) && 430 (psm_intr_ops != NULL)) { 431 if (i_ddi_intr_get_current_nintrs(hdlp->ih_dip) - 1 == 432 0) { 433 if (handle = i_ddi_get_pci_config_handle( 434 rdip)) { 435 (void) pci_config_teardown(&handle); 436 i_ddi_set_pci_config_handle(rdip, NULL); 437 } 438 if (cap_ptr = i_ddi_get_msi_msix_cap_ptr(rdip)) 439 i_ddi_set_msi_msix_cap_ptr(rdip, 0); 440 } 441 442 (void) (*psm_intr_ops)(rdip, hdlp, 443 PSM_INTR_OP_FREE_VECTORS, NULL); 444 445 if (hdlp->ih_type == DDI_INTR_TYPE_MSIX) { 446 msix_p = i_ddi_get_msix(hdlp->ih_dip); 447 if (msix_p && 448 (i_ddi_intr_get_current_nintrs( 449 hdlp->ih_dip) - 1) == 0) { 450 pci_msix_fini(msix_p); 451 i_ddi_set_msix(hdlp->ih_dip, NULL); 452 } 453 } 454 } else if (hdlp->ih_type == DDI_INTR_TYPE_FIXED) { 455 return (pci_free_intr_fixed(pdip, rdip, hdlp)); 456 } else 457 return (DDI_FAILURE); 458 break; 459 case DDI_INTROP_GETPRI: 460 /* Get the priority */ 461 if (pci_get_priority(rdip, hdlp, &priority) != DDI_SUCCESS) 462 return (DDI_FAILURE); 463 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: " 464 "priority = 0x%x\n", priority)); 465 *(int *)result = priority; 466 break; 467 case DDI_INTROP_SETPRI: 468 /* Validate the interrupt priority passed */ 469 if (*(int *)result > LOCK_LEVEL) 470 return (DDI_FAILURE); 471 472 /* Ensure that PSM is all initialized */ 473 if (psm_intr_ops == NULL) 474 return (DDI_FAILURE); 475 476 isp = pci_intx_get_ispec(pdip, rdip, (int)hdlp->ih_inum); 477 ispec = (struct intrspec *)isp; 478 if (ispec == NULL) 479 return (DDI_FAILURE); 480 481 /* For fixed interrupts */ 482 if (hdlp->ih_type == DDI_INTR_TYPE_FIXED) { 483 /* if interrupt is shared, return failure */ 484 ((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp = ispec; 485 psm_rval = (*psm_intr_ops)(rdip, hdlp, 486 PSM_INTR_OP_GET_SHARED, &psm_status); 487 /* 488 * For fixed interrupts, the irq may not have been 489 * allocated when SET_PRI is called, and the above 490 * GET_SHARED op may return PSM_FAILURE. This is not 491 * a real error and is ignored below. 492 */ 493 if ((psm_rval != PSM_FAILURE) && (psm_status == 1)) { 494 DDI_INTR_NEXDBG((CE_CONT, 495 "pci_common_intr_ops: " 496 "dip 0x%p cannot setpri, psm_rval=%d," 497 "psm_status=%d\n", (void *)rdip, psm_rval, 498 psm_status)); 499 return (DDI_FAILURE); 500 } 501 } 502 503 /* Change the priority */ 504 if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_PRI, result) == 505 PSM_FAILURE) 506 return (DDI_FAILURE); 507 508 /* update ispec */ 509 ispec->intrspec_pri = *(int *)result; 510 break; 511 case DDI_INTROP_ADDISR: 512 /* update ispec */ 513 isp = pci_intx_get_ispec(pdip, rdip, (int)hdlp->ih_inum); 514 ispec = (struct intrspec *)isp; 515 if (ispec) { 516 ispec->intrspec_func = hdlp->ih_cb_func; 517 ihdl_plat_datap = (ihdl_plat_t *)hdlp->ih_private; 518 pci_kstat_create(&ihdl_plat_datap->ip_ksp, pdip, hdlp); 519 } 520 break; 521 case DDI_INTROP_REMISR: 522 /* Get the interrupt structure pointer */ 523 isp = pci_intx_get_ispec(pdip, rdip, (int)hdlp->ih_inum); 524 ispec = (struct intrspec *)isp; 525 if (ispec) { 526 ispec->intrspec_func = (uint_t (*)()) 0; 527 ihdl_plat_datap = (ihdl_plat_t *)hdlp->ih_private; 528 if (ihdl_plat_datap->ip_ksp != NULL) 529 pci_kstat_delete(ihdl_plat_datap->ip_ksp); 530 } 531 break; 532 case DDI_INTROP_GETCAP: 533 /* 534 * First check the config space and/or 535 * MSI capability register(s) 536 */ 537 pci_rval = DDI_FAILURE; 538 if (DDI_INTR_IS_MSI_OR_MSIX(hdlp->ih_type)) 539 pci_rval = pci_msi_get_cap(rdip, hdlp->ih_type, 540 &pci_status); 541 else if (hdlp->ih_type == DDI_INTR_TYPE_FIXED) 542 pci_rval = pci_intx_get_cap(rdip, &pci_status); 543 544 /* next check with PSM module */ 545 if (psm_intr_ops != NULL) 546 psm_rval = (*psm_intr_ops)(rdip, hdlp, 547 PSM_INTR_OP_GET_CAP, &psm_status); 548 549 DDI_INTR_NEXDBG((CE_CONT, "pci: GETCAP returned psm_rval = %x, " 550 "psm_status = %x, pci_rval = %x, pci_status = %x\n", 551 psm_rval, psm_status, pci_rval, pci_status)); 552 553 if (psm_rval == PSM_FAILURE && pci_rval == DDI_FAILURE) { 554 *(int *)result = 0; 555 return (DDI_FAILURE); 556 } 557 558 if (psm_rval == PSM_SUCCESS) 559 *(int *)result = psm_status; 560 561 if (pci_rval == DDI_SUCCESS) 562 *(int *)result |= pci_status; 563 564 DDI_INTR_NEXDBG((CE_CONT, "pci: GETCAP returned = %x\n", 565 *(int *)result)); 566 break; 567 case DDI_INTROP_SETCAP: 568 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: " 569 "SETCAP cap=0x%x\n", *(int *)result)); 570 if (psm_intr_ops == NULL) 571 return (DDI_FAILURE); 572 573 if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_CAP, result)) { 574 DDI_INTR_NEXDBG((CE_CONT, "GETCAP: psm_intr_ops" 575 " returned failure\n")); 576 return (DDI_FAILURE); 577 } 578 break; 579 case DDI_INTROP_ENABLE: 580 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: ENABLE\n")); 581 if (psm_intr_ops == NULL) 582 return (DDI_FAILURE); 583 584 if (pci_enable_intr(pdip, rdip, hdlp, hdlp->ih_inum) != 585 DDI_SUCCESS) 586 return (DDI_FAILURE); 587 588 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: ENABLE " 589 "vector=0x%x\n", hdlp->ih_vector)); 590 break; 591 case DDI_INTROP_DISABLE: 592 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: DISABLE\n")); 593 if (psm_intr_ops == NULL) 594 return (DDI_FAILURE); 595 596 pci_disable_intr(pdip, rdip, hdlp, hdlp->ih_inum); 597 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: DISABLE " 598 "vector = %x\n", hdlp->ih_vector)); 599 break; 600 case DDI_INTROP_BLOCKENABLE: 601 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: " 602 "BLOCKENABLE\n")); 603 if (hdlp->ih_type != DDI_INTR_TYPE_MSI) { 604 DDI_INTR_NEXDBG((CE_CONT, "BLOCKENABLE: not MSI\n")); 605 return (DDI_FAILURE); 606 } 607 608 /* Check if psm_intr_ops is NULL? */ 609 if (psm_intr_ops == NULL) 610 return (DDI_FAILURE); 611 612 count = hdlp->ih_scratch1; 613 h_array = (ddi_intr_handle_t *)hdlp->ih_scratch2; 614 for (i = 0; i < count; i++) { 615 hdlp = (ddi_intr_handle_impl_t *)h_array[i]; 616 if (pci_enable_intr(pdip, rdip, hdlp, 617 hdlp->ih_inum) != DDI_SUCCESS) { 618 DDI_INTR_NEXDBG((CE_CONT, "BLOCKENABLE: " 619 "pci_enable_intr failed for %d\n", i)); 620 for (j = 0; j < i; j++) { 621 hdlp = (ddi_intr_handle_impl_t *) 622 h_array[j]; 623 pci_disable_intr(pdip, rdip, hdlp, 624 hdlp->ih_inum); 625 } 626 return (DDI_FAILURE); 627 } 628 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: " 629 "BLOCKENABLE inum %x done\n", hdlp->ih_inum)); 630 } 631 break; 632 case DDI_INTROP_BLOCKDISABLE: 633 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: " 634 "BLOCKDISABLE\n")); 635 if (hdlp->ih_type != DDI_INTR_TYPE_MSI) { 636 DDI_INTR_NEXDBG((CE_CONT, "BLOCKDISABLE: not MSI\n")); 637 return (DDI_FAILURE); 638 } 639 640 /* Check if psm_intr_ops is present */ 641 if (psm_intr_ops == NULL) 642 return (DDI_FAILURE); 643 644 count = hdlp->ih_scratch1; 645 h_array = (ddi_intr_handle_t *)hdlp->ih_scratch2; 646 for (i = 0; i < count; i++) { 647 hdlp = (ddi_intr_handle_impl_t *)h_array[i]; 648 pci_disable_intr(pdip, rdip, hdlp, hdlp->ih_inum); 649 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: " 650 "BLOCKDISABLE inum %x done\n", hdlp->ih_inum)); 651 } 652 break; 653 case DDI_INTROP_SETMASK: 654 case DDI_INTROP_CLRMASK: 655 /* 656 * First handle in the config space 657 */ 658 if (intr_op == DDI_INTROP_SETMASK) { 659 if (DDI_INTR_IS_MSI_OR_MSIX(hdlp->ih_type)) 660 pci_status = pci_msi_set_mask(rdip, 661 hdlp->ih_type, hdlp->ih_inum); 662 else if (hdlp->ih_type == DDI_INTR_TYPE_FIXED) 663 pci_status = pci_intx_set_mask(rdip); 664 } else { 665 if (DDI_INTR_IS_MSI_OR_MSIX(hdlp->ih_type)) 666 pci_status = pci_msi_clr_mask(rdip, 667 hdlp->ih_type, hdlp->ih_inum); 668 else if (hdlp->ih_type == DDI_INTR_TYPE_FIXED) 669 pci_status = pci_intx_clr_mask(rdip); 670 } 671 672 /* For MSI/X; no need to check with PSM module */ 673 if (hdlp->ih_type != DDI_INTR_TYPE_FIXED) 674 return (pci_status); 675 676 /* For fixed interrupts only: handle config space first */ 677 if (hdlp->ih_type == DDI_INTR_TYPE_FIXED && 678 pci_status == DDI_SUCCESS) 679 break; 680 681 /* For fixed interrupts only: confer with PSM module next */ 682 if (psm_intr_ops != NULL) { 683 /* If interrupt is shared; do nothing */ 684 psm_rval = (*psm_intr_ops)(rdip, hdlp, 685 PSM_INTR_OP_GET_SHARED, &psm_status); 686 687 if (psm_rval == PSM_FAILURE || psm_status == 1) 688 return (pci_status); 689 690 /* Now, PSM module should try to set/clear the mask */ 691 if (intr_op == DDI_INTROP_SETMASK) 692 psm_rval = (*psm_intr_ops)(rdip, hdlp, 693 PSM_INTR_OP_SET_MASK, NULL); 694 else 695 psm_rval = (*psm_intr_ops)(rdip, hdlp, 696 PSM_INTR_OP_CLEAR_MASK, NULL); 697 } 698 return ((psm_rval == PSM_FAILURE) ? DDI_FAILURE : DDI_SUCCESS); 699 case DDI_INTROP_GETPENDING: 700 /* 701 * First check the config space and/or 702 * MSI capability register(s) 703 */ 704 pci_rval = DDI_FAILURE; 705 if (DDI_INTR_IS_MSI_OR_MSIX(hdlp->ih_type)) 706 pci_rval = pci_msi_get_pending(rdip, hdlp->ih_type, 707 hdlp->ih_inum, &pci_status); 708 else if (hdlp->ih_type == DDI_INTR_TYPE_FIXED) 709 pci_rval = pci_intx_get_pending(rdip, &pci_status); 710 711 /* On failure; next try with PSM module */ 712 if (pci_rval != DDI_SUCCESS && psm_intr_ops != NULL) 713 psm_rval = (*psm_intr_ops)(rdip, hdlp, 714 PSM_INTR_OP_GET_PENDING, &psm_status); 715 716 DDI_INTR_NEXDBG((CE_CONT, "pci: GETPENDING returned " 717 "psm_rval = %x, psm_status = %x, pci_rval = %x, " 718 "pci_status = %x\n", psm_rval, psm_status, pci_rval, 719 pci_status)); 720 if (psm_rval == PSM_FAILURE && pci_rval == DDI_FAILURE) { 721 *(int *)result = 0; 722 return (DDI_FAILURE); 723 } 724 725 if (psm_rval != PSM_FAILURE) 726 *(int *)result = psm_status; 727 else if (pci_rval != DDI_FAILURE) 728 *(int *)result = pci_status; 729 DDI_INTR_NEXDBG((CE_CONT, "pci: GETPENDING returned = %x\n", 730 *(int *)result)); 731 break; 732 case DDI_INTROP_GETTARGET: 733 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: GETTARGET\n")); 734 735 bcopy(hdlp, &tmp_hdl, sizeof (ddi_intr_handle_impl_t)); 736 tmp_hdl.ih_private = (void *)&intrinfo; 737 intrinfo.avgi_req_flags = PSMGI_INTRBY_DEFAULT; 738 intrinfo.avgi_req_flags |= PSMGI_REQ_CPUID; 739 740 if ((*psm_intr_ops)(rdip, &tmp_hdl, PSM_INTR_OP_GET_INTR, 741 NULL) == PSM_FAILURE) 742 return (DDI_FAILURE); 743 744 *(int *)result = intrinfo.avgi_cpu_id; 745 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: GETTARGET " 746 "vector = 0x%x, cpu = 0x%x\n", hdlp->ih_vector, 747 *(int *)result)); 748 break; 749 case DDI_INTROP_SETTARGET: 750 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: SETTARGET\n")); 751 752 bcopy(hdlp, &tmp_hdl, sizeof (ddi_intr_handle_impl_t)); 753 tmp_hdl.ih_private = (void *)(uintptr_t)*(int *)result; 754 tmp_hdl.ih_flags = PSMGI_INTRBY_DEFAULT; 755 756 if ((*psm_intr_ops)(rdip, &tmp_hdl, PSM_INTR_OP_SET_CPU, 757 &psm_status) == PSM_FAILURE) 758 return (DDI_FAILURE); 759 760 hdlp->ih_vector = tmp_hdl.ih_vector; 761 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: SETTARGET " 762 "vector = 0x%x\n", hdlp->ih_vector)); 763 break; 764 case DDI_INTROP_GETPOOL: 765 /* 766 * For MSI/X interrupts use global IRM pool if available. 767 */ 768 if (apix_irm_pool_p && DDI_INTR_IS_MSI_OR_MSIX(hdlp->ih_type)) { 769 *(ddi_irm_pool_t **)result = apix_irm_pool_p; 770 return (DDI_SUCCESS); 771 } 772 return (DDI_ENOTSUP); 773 default: 774 return (i_ddi_intr_ops(pdip, rdip, intr_op, hdlp, result)); 775 } 776 777 return (DDI_SUCCESS); 778 } 779 780 /* 781 * Allocate a vector for FIXED type interrupt. 782 */ 783 int 784 pci_alloc_intr_fixed(dev_info_t *pdip, dev_info_t *rdip, 785 ddi_intr_handle_impl_t *hdlp, void *result) 786 { 787 struct intrspec *ispec; 788 ddi_intr_handle_impl_t info_hdl; 789 int ret; 790 int free_phdl = 0; 791 int pci_rval; 792 int pci_status = 0; 793 apic_get_type_t type_info; 794 795 if (psm_intr_ops == NULL) 796 return (DDI_FAILURE); 797 798 /* Figure out if this device supports MASKING */ 799 pci_rval = pci_intx_get_cap(rdip, &pci_status); 800 if (pci_rval == DDI_SUCCESS && pci_status) 801 hdlp->ih_cap |= pci_status; 802 803 /* 804 * If the PSM module is "APIX" then pass the request for 805 * allocating the vector now. 806 */ 807 bzero(&info_hdl, sizeof (ddi_intr_handle_impl_t)); 808 info_hdl.ih_private = &type_info; 809 if ((*psm_intr_ops)(NULL, &info_hdl, PSM_INTR_OP_APIC_TYPE, NULL) == 810 PSM_SUCCESS && strcmp(type_info.avgi_type, APIC_APIX_NAME) == 0) { 811 ispec = (struct intrspec *)pci_intx_get_ispec(pdip, rdip, 812 (int)hdlp->ih_inum); 813 if (ispec == NULL) 814 return (DDI_FAILURE); 815 if (hdlp->ih_private == NULL) { /* allocate phdl structure */ 816 free_phdl = 1; 817 i_ddi_alloc_intr_phdl(hdlp); 818 } 819 ((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp = ispec; 820 ret = (*psm_intr_ops)(rdip, hdlp, 821 PSM_INTR_OP_ALLOC_VECTORS, result); 822 if (free_phdl) { /* free up the phdl structure */ 823 free_phdl = 0; 824 i_ddi_free_intr_phdl(hdlp); 825 hdlp->ih_private = NULL; 826 } 827 } else { 828 /* 829 * No APIX module; fall back to the old scheme where the 830 * interrupt vector is allocated during ddi_enable_intr() call. 831 */ 832 *(int *)result = 1; 833 ret = DDI_SUCCESS; 834 } 835 836 return (ret); 837 } 838 839 /* 840 * Free up the vector for FIXED (legacy) type interrupt. 841 */ 842 static int 843 pci_free_intr_fixed(dev_info_t *pdip, dev_info_t *rdip, 844 ddi_intr_handle_impl_t *hdlp) 845 { 846 struct intrspec *ispec; 847 ddi_intr_handle_impl_t info_hdl; 848 int ret; 849 apic_get_type_t type_info; 850 851 if (psm_intr_ops == NULL) 852 return (DDI_FAILURE); 853 854 /* 855 * If the PSM module is "APIX" then pass the request to it 856 * to free up the vector now. 857 */ 858 bzero(&info_hdl, sizeof (ddi_intr_handle_impl_t)); 859 info_hdl.ih_private = &type_info; 860 if ((*psm_intr_ops)(NULL, &info_hdl, PSM_INTR_OP_APIC_TYPE, NULL) == 861 PSM_SUCCESS && strcmp(type_info.avgi_type, APIC_APIX_NAME) == 0) { 862 ispec = (struct intrspec *)pci_intx_get_ispec(pdip, rdip, 863 (int)hdlp->ih_inum); 864 if (ispec == NULL) 865 return (DDI_FAILURE); 866 ((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp = ispec; 867 ret = (*psm_intr_ops)(rdip, hdlp, 868 PSM_INTR_OP_FREE_VECTORS, NULL); 869 } else { 870 /* 871 * No APIX module; fall back to the old scheme where 872 * the interrupt vector was already freed during 873 * ddi_disable_intr() call. 874 */ 875 ret = DDI_SUCCESS; 876 } 877 878 return (ret); 879 } 880 881 int 882 pci_get_intr_from_vecirq(apic_get_intr_t *intrinfo_p, 883 int vecirq, boolean_t is_irq) 884 { 885 ddi_intr_handle_impl_t get_info_ii_hdl; 886 887 if (is_irq) 888 intrinfo_p->avgi_req_flags |= PSMGI_INTRBY_IRQ; 889 890 /* 891 * For this locally-declared and used handle, ih_private will contain a 892 * pointer to apic_get_intr_t, not an ihdl_plat_t as used for 893 * global interrupt handling. 894 */ 895 get_info_ii_hdl.ih_private = intrinfo_p; 896 get_info_ii_hdl.ih_vector = vecirq; 897 898 if ((*psm_intr_ops)(NULL, &get_info_ii_hdl, 899 PSM_INTR_OP_GET_INTR, NULL) == PSM_FAILURE) 900 return (DDI_FAILURE); 901 902 return (DDI_SUCCESS); 903 } 904 905 906 int 907 pci_get_cpu_from_vecirq(int vecirq, boolean_t is_irq) 908 { 909 int rval; 910 apic_get_intr_t intrinfo; 911 912 intrinfo.avgi_req_flags = PSMGI_REQ_CPUID; 913 rval = pci_get_intr_from_vecirq(&intrinfo, vecirq, is_irq); 914 915 if (rval == DDI_SUCCESS) 916 return (intrinfo.avgi_cpu_id); 917 else 918 return (-1); 919 } 920 921 922 static int 923 pci_enable_intr(dev_info_t *pdip, dev_info_t *rdip, 924 ddi_intr_handle_impl_t *hdlp, uint32_t inum) 925 { 926 struct intrspec *ispec; 927 int irq; 928 ihdl_plat_t *ihdl_plat_datap = (ihdl_plat_t *)hdlp->ih_private; 929 930 DDI_INTR_NEXDBG((CE_CONT, "pci_enable_intr: hdlp %p inum %x\n", 931 (void *)hdlp, inum)); 932 933 /* Translate the interrupt if needed */ 934 ispec = (struct intrspec *)pci_intx_get_ispec(pdip, rdip, (int)inum); 935 if (ispec == NULL) 936 return (DDI_FAILURE); 937 if (DDI_INTR_IS_MSI_OR_MSIX(hdlp->ih_type)) { 938 ispec->intrspec_vec = inum; 939 ispec->intrspec_pri = hdlp->ih_pri; 940 } 941 ihdl_plat_datap->ip_ispecp = ispec; 942 943 /* translate the interrupt if needed */ 944 if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_XLATE_VECTOR, &irq) == 945 PSM_FAILURE) 946 return (DDI_FAILURE); 947 DDI_INTR_NEXDBG((CE_CONT, "pci_enable_intr: priority=%x irq=%x\n", 948 hdlp->ih_pri, irq)); 949 950 /* Add the interrupt handler */ 951 if (!add_avintr((void *)hdlp, hdlp->ih_pri, hdlp->ih_cb_func, 952 DEVI(rdip)->devi_name, irq, hdlp->ih_cb_arg1, 953 hdlp->ih_cb_arg2, &ihdl_plat_datap->ip_ticks, rdip)) 954 return (DDI_FAILURE); 955 956 hdlp->ih_vector = irq; 957 958 return (DDI_SUCCESS); 959 } 960 961 962 static void 963 pci_disable_intr(dev_info_t *pdip, dev_info_t *rdip, 964 ddi_intr_handle_impl_t *hdlp, uint32_t inum) 965 { 966 int irq; 967 struct intrspec *ispec; 968 ihdl_plat_t *ihdl_plat_datap = (ihdl_plat_t *)hdlp->ih_private; 969 970 DDI_INTR_NEXDBG((CE_CONT, "pci_disable_intr: \n")); 971 ispec = (struct intrspec *)pci_intx_get_ispec(pdip, rdip, (int)inum); 972 if (ispec == NULL) 973 return; 974 if (DDI_INTR_IS_MSI_OR_MSIX(hdlp->ih_type)) { 975 ispec->intrspec_vec = inum; 976 ispec->intrspec_pri = hdlp->ih_pri; 977 } 978 ihdl_plat_datap->ip_ispecp = ispec; 979 980 /* translate the interrupt if needed */ 981 (void) (*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_XLATE_VECTOR, &irq); 982 983 /* Disable the interrupt handler */ 984 rem_avintr((void *)hdlp, hdlp->ih_pri, hdlp->ih_cb_func, irq); 985 ihdl_plat_datap->ip_ispecp = NULL; 986 } 987 988 /* 989 * Miscellaneous library function 990 */ 991 int 992 pci_common_get_reg_prop(dev_info_t *dip, pci_regspec_t *pci_rp) 993 { 994 int i; 995 int number; 996 int assigned_addr_len; 997 uint_t phys_hi = pci_rp->pci_phys_hi; 998 pci_regspec_t *assigned_addr; 999 1000 if (((phys_hi & PCI_REG_ADDR_M) == PCI_ADDR_CONFIG) || 1001 (phys_hi & PCI_RELOCAT_B)) 1002 return (DDI_SUCCESS); 1003 1004 /* 1005 * the "reg" property specifies relocatable, get and interpret the 1006 * "assigned-addresses" property. 1007 */ 1008 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1009 "assigned-addresses", (int **)&assigned_addr, 1010 (uint_t *)&assigned_addr_len) != DDI_PROP_SUCCESS) 1011 return (DDI_FAILURE); 1012 1013 /* 1014 * Scan the "assigned-addresses" for one that matches the specified 1015 * "reg" property entry. 1016 */ 1017 phys_hi &= PCI_CONF_ADDR_MASK; 1018 number = assigned_addr_len / (sizeof (pci_regspec_t) / sizeof (int)); 1019 for (i = 0; i < number; i++) { 1020 if ((assigned_addr[i].pci_phys_hi & PCI_CONF_ADDR_MASK) == 1021 phys_hi) { 1022 /* 1023 * When the system does not manage to allocate PCI 1024 * resources for a device, then the value that is stored 1025 * in assigned addresses ends up being the hardware 1026 * default reset value of '0'. On currently supported 1027 * platforms, physical address zero is associated with 1028 * memory; however, on other platforms this may be the 1029 * exception vector table (ARM), etc. and so we opt to 1030 * generally keep the idea in PCI that the reset value 1031 * will not be used for actual MMIO allocations. If such 1032 * a platform comes around where it is worth using that 1033 * bit of MMIO for PCI then we should make this check 1034 * platform-specific. 1035 * 1036 * Note, the +1 in the print statement is because a 1037 * given regs[0] describes B/D/F information for the 1038 * device. 1039 */ 1040 if (assigned_addr[i].pci_phys_mid == 0 && 1041 assigned_addr[i].pci_phys_low == 0) { 1042 dev_err(dip, CE_WARN, "regs[%u] does not have " 1043 "a valid MMIO address", i + 1); 1044 goto err; 1045 } 1046 1047 pci_rp->pci_phys_mid = assigned_addr[i].pci_phys_mid; 1048 pci_rp->pci_phys_low = assigned_addr[i].pci_phys_low; 1049 ddi_prop_free(assigned_addr); 1050 return (DDI_SUCCESS); 1051 } 1052 } 1053 1054 err: 1055 ddi_prop_free(assigned_addr); 1056 return (DDI_FAILURE); 1057 } 1058 1059 1060 /* 1061 * To handle PCI tool ioctls 1062 */ 1063 1064 /*ARGSUSED*/ 1065 int 1066 pci_common_ioctl(dev_info_t *dip, dev_t dev, int cmd, intptr_t arg, 1067 int mode, cred_t *credp, int *rvalp) 1068 { 1069 minor_t minor = getminor(dev); 1070 int rv = ENOTTY; 1071 1072 switch (PCI_MINOR_NUM_TO_PCI_DEVNUM(minor)) { 1073 case PCI_TOOL_REG_MINOR_NUM: 1074 1075 switch (cmd) { 1076 case PCITOOL_DEVICE_SET_REG: 1077 case PCITOOL_DEVICE_GET_REG: 1078 1079 /* Require full privileges. */ 1080 if (secpolicy_kmdb(credp)) 1081 rv = EPERM; 1082 else 1083 rv = pcitool_dev_reg_ops(dip, (void *)arg, 1084 cmd, mode); 1085 break; 1086 1087 case PCITOOL_NEXUS_SET_REG: 1088 case PCITOOL_NEXUS_GET_REG: 1089 1090 /* Require full privileges. */ 1091 if (secpolicy_kmdb(credp)) 1092 rv = EPERM; 1093 else 1094 rv = pcitool_bus_reg_ops(dip, (void *)arg, 1095 cmd, mode); 1096 break; 1097 } 1098 break; 1099 1100 case PCI_TOOL_INTR_MINOR_NUM: 1101 1102 switch (cmd) { 1103 case PCITOOL_DEVICE_SET_INTR: 1104 1105 /* Require PRIV_SYS_RES_CONFIG, same as psradm */ 1106 if (secpolicy_ponline(credp)) { 1107 rv = EPERM; 1108 break; 1109 } 1110 1111 /*FALLTHRU*/ 1112 /* These require no special privileges. */ 1113 case PCITOOL_DEVICE_GET_INTR: 1114 case PCITOOL_SYSTEM_INTR_INFO: 1115 rv = pcitool_intr_admn(dip, (void *)arg, cmd, mode); 1116 break; 1117 } 1118 break; 1119 1120 default: 1121 break; 1122 } 1123 1124 return (rv); 1125 } 1126 1127 1128 int 1129 pci_common_ctlops_poke(peekpoke_ctlops_t *in_args) 1130 { 1131 size_t size = in_args->size; 1132 uintptr_t dev_addr = in_args->dev_addr; 1133 uintptr_t host_addr = in_args->host_addr; 1134 ddi_acc_impl_t *hp = (ddi_acc_impl_t *)in_args->handle; 1135 ddi_acc_hdl_t *hdlp = (ddi_acc_hdl_t *)in_args->handle; 1136 size_t repcount = in_args->repcount; 1137 uint_t flags = in_args->flags; 1138 int err = DDI_SUCCESS; 1139 1140 /* 1141 * if no handle then this is a poke. We have to return failure here 1142 * as we have no way of knowing whether this is a MEM or IO space access 1143 */ 1144 if (in_args->handle == NULL) 1145 return (DDI_FAILURE); 1146 1147 /* 1148 * rest of this function is actually for cautious puts 1149 */ 1150 for (; repcount; repcount--) { 1151 if (hp->ahi_acc_attr == DDI_ACCATTR_CONFIG_SPACE) { 1152 switch (size) { 1153 case sizeof (uint8_t): 1154 pci_config_wr8(hp, (uint8_t *)dev_addr, 1155 *(uint8_t *)host_addr); 1156 break; 1157 case sizeof (uint16_t): 1158 pci_config_wr16(hp, (uint16_t *)dev_addr, 1159 *(uint16_t *)host_addr); 1160 break; 1161 case sizeof (uint32_t): 1162 pci_config_wr32(hp, (uint32_t *)dev_addr, 1163 *(uint32_t *)host_addr); 1164 break; 1165 case sizeof (uint64_t): 1166 pci_config_wr64(hp, (uint64_t *)dev_addr, 1167 *(uint64_t *)host_addr); 1168 break; 1169 default: 1170 err = DDI_FAILURE; 1171 break; 1172 } 1173 } else if (hp->ahi_acc_attr & DDI_ACCATTR_IO_SPACE) { 1174 if (hdlp->ah_acc.devacc_attr_endian_flags == 1175 DDI_STRUCTURE_BE_ACC) { 1176 switch (size) { 1177 case sizeof (uint8_t): 1178 i_ddi_io_put8(hp, 1179 (uint8_t *)dev_addr, 1180 *(uint8_t *)host_addr); 1181 break; 1182 case sizeof (uint16_t): 1183 i_ddi_io_swap_put16(hp, 1184 (uint16_t *)dev_addr, 1185 *(uint16_t *)host_addr); 1186 break; 1187 case sizeof (uint32_t): 1188 i_ddi_io_swap_put32(hp, 1189 (uint32_t *)dev_addr, 1190 *(uint32_t *)host_addr); 1191 break; 1192 /* 1193 * note the 64-bit case is a dummy 1194 * function - so no need to swap 1195 */ 1196 case sizeof (uint64_t): 1197 i_ddi_io_put64(hp, 1198 (uint64_t *)dev_addr, 1199 *(uint64_t *)host_addr); 1200 break; 1201 default: 1202 err = DDI_FAILURE; 1203 break; 1204 } 1205 } else { 1206 switch (size) { 1207 case sizeof (uint8_t): 1208 i_ddi_io_put8(hp, 1209 (uint8_t *)dev_addr, 1210 *(uint8_t *)host_addr); 1211 break; 1212 case sizeof (uint16_t): 1213 i_ddi_io_put16(hp, 1214 (uint16_t *)dev_addr, 1215 *(uint16_t *)host_addr); 1216 break; 1217 case sizeof (uint32_t): 1218 i_ddi_io_put32(hp, 1219 (uint32_t *)dev_addr, 1220 *(uint32_t *)host_addr); 1221 break; 1222 case sizeof (uint64_t): 1223 i_ddi_io_put64(hp, 1224 (uint64_t *)dev_addr, 1225 *(uint64_t *)host_addr); 1226 break; 1227 default: 1228 err = DDI_FAILURE; 1229 break; 1230 } 1231 } 1232 } else { 1233 if (hdlp->ah_acc.devacc_attr_endian_flags == 1234 DDI_STRUCTURE_BE_ACC) { 1235 switch (size) { 1236 case sizeof (uint8_t): 1237 *(uint8_t *)dev_addr = 1238 *(uint8_t *)host_addr; 1239 break; 1240 case sizeof (uint16_t): 1241 *(uint16_t *)dev_addr = 1242 ddi_swap16(*(uint16_t *)host_addr); 1243 break; 1244 case sizeof (uint32_t): 1245 *(uint32_t *)dev_addr = 1246 ddi_swap32(*(uint32_t *)host_addr); 1247 break; 1248 case sizeof (uint64_t): 1249 *(uint64_t *)dev_addr = 1250 ddi_swap64(*(uint64_t *)host_addr); 1251 break; 1252 default: 1253 err = DDI_FAILURE; 1254 break; 1255 } 1256 } else { 1257 switch (size) { 1258 case sizeof (uint8_t): 1259 *(uint8_t *)dev_addr = 1260 *(uint8_t *)host_addr; 1261 break; 1262 case sizeof (uint16_t): 1263 *(uint16_t *)dev_addr = 1264 *(uint16_t *)host_addr; 1265 break; 1266 case sizeof (uint32_t): 1267 *(uint32_t *)dev_addr = 1268 *(uint32_t *)host_addr; 1269 break; 1270 case sizeof (uint64_t): 1271 *(uint64_t *)dev_addr = 1272 *(uint64_t *)host_addr; 1273 break; 1274 default: 1275 err = DDI_FAILURE; 1276 break; 1277 } 1278 } 1279 } 1280 host_addr += size; 1281 if (flags == DDI_DEV_AUTOINCR) 1282 dev_addr += size; 1283 } 1284 return (err); 1285 } 1286 1287 1288 int 1289 pci_fm_acc_setup(ddi_acc_hdl_t *hp, off_t offset, off_t len) 1290 { 1291 ddi_acc_impl_t *ap = (ddi_acc_impl_t *)hp->ah_platform_private; 1292 1293 /* endian-ness check */ 1294 if (hp->ah_acc.devacc_attr_endian_flags == DDI_STRUCTURE_BE_ACC) 1295 return (DDI_FAILURE); 1296 1297 /* 1298 * range check 1299 */ 1300 if ((offset >= PCI_CONF_HDR_SIZE) || 1301 (len > PCI_CONF_HDR_SIZE) || 1302 (offset + len > PCI_CONF_HDR_SIZE)) 1303 return (DDI_FAILURE); 1304 1305 ap->ahi_acc_attr |= DDI_ACCATTR_CONFIG_SPACE; 1306 /* 1307 * always use cautious mechanism for config space gets 1308 */ 1309 ap->ahi_get8 = i_ddi_caut_get8; 1310 ap->ahi_get16 = i_ddi_caut_get16; 1311 ap->ahi_get32 = i_ddi_caut_get32; 1312 ap->ahi_get64 = i_ddi_caut_get64; 1313 ap->ahi_rep_get8 = i_ddi_caut_rep_get8; 1314 ap->ahi_rep_get16 = i_ddi_caut_rep_get16; 1315 ap->ahi_rep_get32 = i_ddi_caut_rep_get32; 1316 ap->ahi_rep_get64 = i_ddi_caut_rep_get64; 1317 if (hp->ah_acc.devacc_attr_access == DDI_CAUTIOUS_ACC) { 1318 ap->ahi_put8 = i_ddi_caut_put8; 1319 ap->ahi_put16 = i_ddi_caut_put16; 1320 ap->ahi_put32 = i_ddi_caut_put32; 1321 ap->ahi_put64 = i_ddi_caut_put64; 1322 ap->ahi_rep_put8 = i_ddi_caut_rep_put8; 1323 ap->ahi_rep_put16 = i_ddi_caut_rep_put16; 1324 ap->ahi_rep_put32 = i_ddi_caut_rep_put32; 1325 ap->ahi_rep_put64 = i_ddi_caut_rep_put64; 1326 } else { 1327 ap->ahi_put8 = pci_config_wr8; 1328 ap->ahi_put16 = pci_config_wr16; 1329 ap->ahi_put32 = pci_config_wr32; 1330 ap->ahi_put64 = pci_config_wr64; 1331 ap->ahi_rep_put8 = pci_config_rep_wr8; 1332 ap->ahi_rep_put16 = pci_config_rep_wr16; 1333 ap->ahi_rep_put32 = pci_config_rep_wr32; 1334 ap->ahi_rep_put64 = pci_config_rep_wr64; 1335 } 1336 1337 /* Initialize to default check/notify functions */ 1338 ap->ahi_fault_check = i_ddi_acc_fault_check; 1339 ap->ahi_fault_notify = i_ddi_acc_fault_notify; 1340 ap->ahi_fault = 0; 1341 impl_acc_err_init(hp); 1342 return (DDI_SUCCESS); 1343 } 1344 1345 1346 int 1347 pci_common_ctlops_peek(peekpoke_ctlops_t *in_args) 1348 { 1349 size_t size = in_args->size; 1350 uintptr_t dev_addr = in_args->dev_addr; 1351 uintptr_t host_addr = in_args->host_addr; 1352 ddi_acc_impl_t *hp = (ddi_acc_impl_t *)in_args->handle; 1353 ddi_acc_hdl_t *hdlp = (ddi_acc_hdl_t *)in_args->handle; 1354 size_t repcount = in_args->repcount; 1355 uint_t flags = in_args->flags; 1356 int err = DDI_SUCCESS; 1357 1358 /* 1359 * if no handle then this is a peek. We have to return failure here 1360 * as we have no way of knowing whether this is a MEM or IO space access 1361 */ 1362 if (in_args->handle == NULL) 1363 return (DDI_FAILURE); 1364 1365 for (; repcount; repcount--) { 1366 if (hp->ahi_acc_attr == DDI_ACCATTR_CONFIG_SPACE) { 1367 switch (size) { 1368 case sizeof (uint8_t): 1369 *(uint8_t *)host_addr = pci_config_rd8(hp, 1370 (uint8_t *)dev_addr); 1371 break; 1372 case sizeof (uint16_t): 1373 *(uint16_t *)host_addr = pci_config_rd16(hp, 1374 (uint16_t *)dev_addr); 1375 break; 1376 case sizeof (uint32_t): 1377 *(uint32_t *)host_addr = pci_config_rd32(hp, 1378 (uint32_t *)dev_addr); 1379 break; 1380 case sizeof (uint64_t): 1381 *(uint64_t *)host_addr = pci_config_rd64(hp, 1382 (uint64_t *)dev_addr); 1383 break; 1384 default: 1385 err = DDI_FAILURE; 1386 break; 1387 } 1388 } else if (hp->ahi_acc_attr & DDI_ACCATTR_IO_SPACE) { 1389 if (hdlp->ah_acc.devacc_attr_endian_flags == 1390 DDI_STRUCTURE_BE_ACC) { 1391 switch (size) { 1392 case sizeof (uint8_t): 1393 *(uint8_t *)host_addr = 1394 i_ddi_io_get8(hp, 1395 (uint8_t *)dev_addr); 1396 break; 1397 case sizeof (uint16_t): 1398 *(uint16_t *)host_addr = 1399 i_ddi_io_swap_get16(hp, 1400 (uint16_t *)dev_addr); 1401 break; 1402 case sizeof (uint32_t): 1403 *(uint32_t *)host_addr = 1404 i_ddi_io_swap_get32(hp, 1405 (uint32_t *)dev_addr); 1406 break; 1407 /* 1408 * note the 64-bit case is a dummy 1409 * function - so no need to swap 1410 */ 1411 case sizeof (uint64_t): 1412 *(uint64_t *)host_addr = 1413 i_ddi_io_get64(hp, 1414 (uint64_t *)dev_addr); 1415 break; 1416 default: 1417 err = DDI_FAILURE; 1418 break; 1419 } 1420 } else { 1421 switch (size) { 1422 case sizeof (uint8_t): 1423 *(uint8_t *)host_addr = 1424 i_ddi_io_get8(hp, 1425 (uint8_t *)dev_addr); 1426 break; 1427 case sizeof (uint16_t): 1428 *(uint16_t *)host_addr = 1429 i_ddi_io_get16(hp, 1430 (uint16_t *)dev_addr); 1431 break; 1432 case sizeof (uint32_t): 1433 *(uint32_t *)host_addr = 1434 i_ddi_io_get32(hp, 1435 (uint32_t *)dev_addr); 1436 break; 1437 case sizeof (uint64_t): 1438 *(uint64_t *)host_addr = 1439 i_ddi_io_get64(hp, 1440 (uint64_t *)dev_addr); 1441 break; 1442 default: 1443 err = DDI_FAILURE; 1444 break; 1445 } 1446 } 1447 } else { 1448 if (hdlp->ah_acc.devacc_attr_endian_flags == 1449 DDI_STRUCTURE_BE_ACC) { 1450 switch (in_args->size) { 1451 case sizeof (uint8_t): 1452 *(uint8_t *)host_addr = 1453 *(uint8_t *)dev_addr; 1454 break; 1455 case sizeof (uint16_t): 1456 *(uint16_t *)host_addr = 1457 ddi_swap16(*(uint16_t *)dev_addr); 1458 break; 1459 case sizeof (uint32_t): 1460 *(uint32_t *)host_addr = 1461 ddi_swap32(*(uint32_t *)dev_addr); 1462 break; 1463 case sizeof (uint64_t): 1464 *(uint64_t *)host_addr = 1465 ddi_swap64(*(uint64_t *)dev_addr); 1466 break; 1467 default: 1468 err = DDI_FAILURE; 1469 break; 1470 } 1471 } else { 1472 switch (in_args->size) { 1473 case sizeof (uint8_t): 1474 *(uint8_t *)host_addr = 1475 *(uint8_t *)dev_addr; 1476 break; 1477 case sizeof (uint16_t): 1478 *(uint16_t *)host_addr = 1479 *(uint16_t *)dev_addr; 1480 break; 1481 case sizeof (uint32_t): 1482 *(uint32_t *)host_addr = 1483 *(uint32_t *)dev_addr; 1484 break; 1485 case sizeof (uint64_t): 1486 *(uint64_t *)host_addr = 1487 *(uint64_t *)dev_addr; 1488 break; 1489 default: 1490 err = DDI_FAILURE; 1491 break; 1492 } 1493 } 1494 } 1495 host_addr += size; 1496 if (flags == DDI_DEV_AUTOINCR) 1497 dev_addr += size; 1498 } 1499 return (err); 1500 } 1501 1502 /*ARGSUSED*/ 1503 int 1504 pci_common_peekpoke(dev_info_t *dip, dev_info_t *rdip, 1505 ddi_ctl_enum_t ctlop, void *arg, void *result) 1506 { 1507 if (ctlop == DDI_CTLOPS_PEEK) 1508 return (pci_common_ctlops_peek((peekpoke_ctlops_t *)arg)); 1509 else 1510 return (pci_common_ctlops_poke((peekpoke_ctlops_t *)arg)); 1511 } 1512 1513 /* 1514 * These are the get and put functions to be shared with drivers. The 1515 * mutex locking is done inside the functions referenced, rather than 1516 * here, and is thus shared across PCI child drivers and any other 1517 * consumers of PCI config space (such as the ACPI subsystem). 1518 * 1519 * The configuration space addresses come in as pointers. This is fine on 1520 * a 32-bit system, where the VM space and configuration space are the same 1521 * size. It's not such a good idea on a 64-bit system, where memory 1522 * addresses are twice as large as configuration space addresses. At some 1523 * point in the call tree we need to take a stand and say "you are 32-bit 1524 * from this time forth", and this seems like a nice self-contained place. 1525 */ 1526 1527 uint8_t 1528 pci_config_rd8(ddi_acc_impl_t *hdlp, uint8_t *addr) 1529 { 1530 pci_acc_cfblk_t *cfp; 1531 uint8_t rval; 1532 int reg; 1533 1534 ASSERT64(((uintptr_t)addr >> 32) == 0); 1535 1536 reg = (int)(uintptr_t)addr; 1537 1538 cfp = (pci_acc_cfblk_t *)&hdlp->ahi_common.ah_bus_private; 1539 1540 rval = (*pci_getb_func)(cfp->c_busnum, cfp->c_devnum, cfp->c_funcnum, 1541 reg); 1542 1543 return (rval); 1544 } 1545 1546 void 1547 pci_config_rep_rd8(ddi_acc_impl_t *hdlp, uint8_t *host_addr, 1548 uint8_t *dev_addr, size_t repcount, uint_t flags) 1549 { 1550 uint8_t *h, *d; 1551 1552 h = host_addr; 1553 d = dev_addr; 1554 1555 if (flags == DDI_DEV_AUTOINCR) 1556 for (; repcount; repcount--) 1557 *h++ = pci_config_rd8(hdlp, d++); 1558 else 1559 for (; repcount; repcount--) 1560 *h++ = pci_config_rd8(hdlp, d); 1561 } 1562 1563 uint16_t 1564 pci_config_rd16(ddi_acc_impl_t *hdlp, uint16_t *addr) 1565 { 1566 pci_acc_cfblk_t *cfp; 1567 uint16_t rval; 1568 int reg; 1569 1570 ASSERT64(((uintptr_t)addr >> 32) == 0); 1571 1572 reg = (int)(uintptr_t)addr; 1573 1574 cfp = (pci_acc_cfblk_t *)&hdlp->ahi_common.ah_bus_private; 1575 1576 rval = (*pci_getw_func)(cfp->c_busnum, cfp->c_devnum, cfp->c_funcnum, 1577 reg); 1578 1579 return (rval); 1580 } 1581 1582 void 1583 pci_config_rep_rd16(ddi_acc_impl_t *hdlp, uint16_t *host_addr, 1584 uint16_t *dev_addr, size_t repcount, uint_t flags) 1585 { 1586 uint16_t *h, *d; 1587 1588 h = host_addr; 1589 d = dev_addr; 1590 1591 if (flags == DDI_DEV_AUTOINCR) 1592 for (; repcount; repcount--) 1593 *h++ = pci_config_rd16(hdlp, d++); 1594 else 1595 for (; repcount; repcount--) 1596 *h++ = pci_config_rd16(hdlp, d); 1597 } 1598 1599 uint32_t 1600 pci_config_rd32(ddi_acc_impl_t *hdlp, uint32_t *addr) 1601 { 1602 pci_acc_cfblk_t *cfp; 1603 uint32_t rval; 1604 int reg; 1605 1606 ASSERT64(((uintptr_t)addr >> 32) == 0); 1607 1608 reg = (int)(uintptr_t)addr; 1609 1610 cfp = (pci_acc_cfblk_t *)&hdlp->ahi_common.ah_bus_private; 1611 1612 rval = (*pci_getl_func)(cfp->c_busnum, cfp->c_devnum, 1613 cfp->c_funcnum, reg); 1614 1615 return (rval); 1616 } 1617 1618 void 1619 pci_config_rep_rd32(ddi_acc_impl_t *hdlp, uint32_t *host_addr, 1620 uint32_t *dev_addr, size_t repcount, uint_t flags) 1621 { 1622 uint32_t *h, *d; 1623 1624 h = host_addr; 1625 d = dev_addr; 1626 1627 if (flags == DDI_DEV_AUTOINCR) 1628 for (; repcount; repcount--) 1629 *h++ = pci_config_rd32(hdlp, d++); 1630 else 1631 for (; repcount; repcount--) 1632 *h++ = pci_config_rd32(hdlp, d); 1633 } 1634 1635 1636 void 1637 pci_config_wr8(ddi_acc_impl_t *hdlp, uint8_t *addr, uint8_t value) 1638 { 1639 pci_acc_cfblk_t *cfp; 1640 int reg; 1641 1642 ASSERT64(((uintptr_t)addr >> 32) == 0); 1643 1644 reg = (int)(uintptr_t)addr; 1645 1646 cfp = (pci_acc_cfblk_t *)&hdlp->ahi_common.ah_bus_private; 1647 1648 (*pci_putb_func)(cfp->c_busnum, cfp->c_devnum, 1649 cfp->c_funcnum, reg, value); 1650 } 1651 1652 void 1653 pci_config_rep_wr8(ddi_acc_impl_t *hdlp, uint8_t *host_addr, 1654 uint8_t *dev_addr, size_t repcount, uint_t flags) 1655 { 1656 uint8_t *h, *d; 1657 1658 h = host_addr; 1659 d = dev_addr; 1660 1661 if (flags == DDI_DEV_AUTOINCR) 1662 for (; repcount; repcount--) 1663 pci_config_wr8(hdlp, d++, *h++); 1664 else 1665 for (; repcount; repcount--) 1666 pci_config_wr8(hdlp, d, *h++); 1667 } 1668 1669 void 1670 pci_config_wr16(ddi_acc_impl_t *hdlp, uint16_t *addr, uint16_t value) 1671 { 1672 pci_acc_cfblk_t *cfp; 1673 int reg; 1674 1675 ASSERT64(((uintptr_t)addr >> 32) == 0); 1676 1677 reg = (int)(uintptr_t)addr; 1678 1679 cfp = (pci_acc_cfblk_t *)&hdlp->ahi_common.ah_bus_private; 1680 1681 (*pci_putw_func)(cfp->c_busnum, cfp->c_devnum, 1682 cfp->c_funcnum, reg, value); 1683 } 1684 1685 void 1686 pci_config_rep_wr16(ddi_acc_impl_t *hdlp, uint16_t *host_addr, 1687 uint16_t *dev_addr, size_t repcount, uint_t flags) 1688 { 1689 uint16_t *h, *d; 1690 1691 h = host_addr; 1692 d = dev_addr; 1693 1694 if (flags == DDI_DEV_AUTOINCR) 1695 for (; repcount; repcount--) 1696 pci_config_wr16(hdlp, d++, *h++); 1697 else 1698 for (; repcount; repcount--) 1699 pci_config_wr16(hdlp, d, *h++); 1700 } 1701 1702 void 1703 pci_config_wr32(ddi_acc_impl_t *hdlp, uint32_t *addr, uint32_t value) 1704 { 1705 pci_acc_cfblk_t *cfp; 1706 int reg; 1707 1708 ASSERT64(((uintptr_t)addr >> 32) == 0); 1709 1710 reg = (int)(uintptr_t)addr; 1711 1712 cfp = (pci_acc_cfblk_t *)&hdlp->ahi_common.ah_bus_private; 1713 1714 (*pci_putl_func)(cfp->c_busnum, cfp->c_devnum, 1715 cfp->c_funcnum, reg, value); 1716 } 1717 1718 void 1719 pci_config_rep_wr32(ddi_acc_impl_t *hdlp, uint32_t *host_addr, 1720 uint32_t *dev_addr, size_t repcount, uint_t flags) 1721 { 1722 uint32_t *h, *d; 1723 1724 h = host_addr; 1725 d = dev_addr; 1726 1727 if (flags == DDI_DEV_AUTOINCR) 1728 for (; repcount; repcount--) 1729 pci_config_wr32(hdlp, d++, *h++); 1730 else 1731 for (; repcount; repcount--) 1732 pci_config_wr32(hdlp, d, *h++); 1733 } 1734 1735 uint64_t 1736 pci_config_rd64(ddi_acc_impl_t *hdlp, uint64_t *addr) 1737 { 1738 uint32_t lw_val; 1739 uint32_t hi_val; 1740 uint32_t *dp; 1741 uint64_t val; 1742 1743 dp = (uint32_t *)addr; 1744 lw_val = pci_config_rd32(hdlp, dp); 1745 dp++; 1746 hi_val = pci_config_rd32(hdlp, dp); 1747 val = ((uint64_t)hi_val << 32) | lw_val; 1748 return (val); 1749 } 1750 1751 void 1752 pci_config_wr64(ddi_acc_impl_t *hdlp, uint64_t *addr, uint64_t value) 1753 { 1754 uint32_t lw_val; 1755 uint32_t hi_val; 1756 uint32_t *dp; 1757 1758 dp = (uint32_t *)addr; 1759 lw_val = (uint32_t)(value & 0xffffffff); 1760 hi_val = (uint32_t)(value >> 32); 1761 pci_config_wr32(hdlp, dp, lw_val); 1762 dp++; 1763 pci_config_wr32(hdlp, dp, hi_val); 1764 } 1765 1766 void 1767 pci_config_rep_rd64(ddi_acc_impl_t *hdlp, uint64_t *host_addr, 1768 uint64_t *dev_addr, size_t repcount, uint_t flags) 1769 { 1770 if (flags == DDI_DEV_AUTOINCR) { 1771 for (; repcount; repcount--) 1772 *host_addr++ = pci_config_rd64(hdlp, dev_addr++); 1773 } else { 1774 for (; repcount; repcount--) 1775 *host_addr++ = pci_config_rd64(hdlp, dev_addr); 1776 } 1777 } 1778 1779 void 1780 pci_config_rep_wr64(ddi_acc_impl_t *hdlp, uint64_t *host_addr, 1781 uint64_t *dev_addr, size_t repcount, uint_t flags) 1782 { 1783 if (flags == DDI_DEV_AUTOINCR) { 1784 for (; repcount; repcount--) 1785 pci_config_wr64(hdlp, host_addr++, *dev_addr++); 1786 } else { 1787 for (; repcount; repcount--) 1788 pci_config_wr64(hdlp, host_addr++, *dev_addr); 1789 } 1790 } 1791