1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * File that has code which is common between pci(7d) and npe(7d) 31 * It shares the following: 32 * - interrupt code 33 * - pci_tools ioctl code 34 * - name_child code 35 * - set_parent_private_data code 36 */ 37 38 #include <sys/conf.h> 39 #include <sys/pci.h> 40 #include <sys/sunndi.h> 41 #include <sys/mach_intr.h> 42 #include <sys/hotplug/pci/pcihp.h> 43 #include <sys/pci_intr_lib.h> 44 #include <sys/psm.h> 45 #include <sys/policy.h> 46 #include <sys/sysmacros.h> 47 #include <sys/clock.h> 48 #include <io/pcplusmp/apic.h> 49 #include <sys/pci_tools.h> 50 #include <io/pci/pci_var.h> 51 #include <io/pci/pci_tools_ext.h> 52 #include <io/pci/pci_common.h> 53 #include <sys/pci_cfgspace.h> 54 #include <sys/pci_impl.h> 55 56 /* 57 * Function prototypes 58 */ 59 static int pci_get_priority(dev_info_t *, ddi_intr_handle_impl_t *, int *); 60 static int pci_enable_intr(dev_info_t *, dev_info_t *, 61 ddi_intr_handle_impl_t *, uint32_t); 62 static void pci_disable_intr(dev_info_t *, dev_info_t *, 63 ddi_intr_handle_impl_t *, uint32_t); 64 65 /* Extern decalration for pcplusmp module */ 66 extern int (*psm_intr_ops)(dev_info_t *, ddi_intr_handle_impl_t *, 67 psm_intr_op_t, int *); 68 69 70 /* 71 * pci_name_child: 72 * 73 * Assign the address portion of the node name 74 */ 75 int 76 pci_common_name_child(dev_info_t *child, char *name, int namelen) 77 { 78 int dev, func, length; 79 char **unit_addr; 80 uint_t n; 81 pci_regspec_t *pci_rp; 82 83 if (ndi_dev_is_persistent_node(child) == 0) { 84 /* 85 * For .conf node, use "unit-address" property 86 */ 87 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, child, 88 DDI_PROP_DONTPASS, "unit-address", &unit_addr, &n) != 89 DDI_PROP_SUCCESS) { 90 cmn_err(CE_WARN, "cannot find unit-address in %s.conf", 91 ddi_get_name(child)); 92 return (DDI_FAILURE); 93 } 94 if (n != 1 || *unit_addr == NULL || **unit_addr == 0) { 95 cmn_err(CE_WARN, "unit-address property in %s.conf" 96 " not well-formed", ddi_get_name(child)); 97 ddi_prop_free(unit_addr); 98 return (DDI_FAILURE); 99 } 100 (void) snprintf(name, namelen, "%s", *unit_addr); 101 ddi_prop_free(unit_addr); 102 return (DDI_SUCCESS); 103 } 104 105 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, child, DDI_PROP_DONTPASS, 106 "reg", (int **)&pci_rp, (uint_t *)&length) != DDI_PROP_SUCCESS) { 107 cmn_err(CE_WARN, "cannot find reg property in %s", 108 ddi_get_name(child)); 109 return (DDI_FAILURE); 110 } 111 112 /* copy the device identifications */ 113 dev = PCI_REG_DEV_G(pci_rp->pci_phys_hi); 114 func = PCI_REG_FUNC_G(pci_rp->pci_phys_hi); 115 116 /* 117 * free the memory allocated by ddi_prop_lookup_int_array 118 */ 119 ddi_prop_free(pci_rp); 120 121 if (func != 0) { 122 (void) snprintf(name, namelen, "%x,%x", dev, func); 123 } else { 124 (void) snprintf(name, namelen, "%x", dev); 125 } 126 127 return (DDI_SUCCESS); 128 } 129 130 /* 131 * Interrupt related code: 132 * 133 * The following busop is common to npe and pci drivers 134 * bus_introp 135 */ 136 137 /* 138 * Create the ddi_parent_private_data for a pseudo child. 139 */ 140 void 141 pci_common_set_parent_private_data(dev_info_t *dip) 142 { 143 struct ddi_parent_private_data *pdptr; 144 145 pdptr = (struct ddi_parent_private_data *)kmem_zalloc( 146 (sizeof (struct ddi_parent_private_data) + 147 sizeof (struct intrspec)), KM_SLEEP); 148 pdptr->par_intr = (struct intrspec *)(pdptr + 1); 149 pdptr->par_nintr = 1; 150 ddi_set_parent_data(dip, pdptr); 151 } 152 153 /* 154 * pci_get_priority: 155 * Figure out the priority of the device 156 */ 157 static int 158 pci_get_priority(dev_info_t *dip, ddi_intr_handle_impl_t *hdlp, int *pri) 159 { 160 struct intrspec *ispec; 161 162 DDI_INTR_NEXDBG((CE_CONT, "pci_get_priority: dip = 0x%p, hdlp = %p\n", 163 (void *)dip, (void *)hdlp)); 164 165 if ((ispec = (struct intrspec *)pci_intx_get_ispec(dip, dip, 166 hdlp->ih_inum)) == NULL) { 167 if (DDI_INTR_IS_MSI_OR_MSIX(hdlp->ih_type)) { 168 int class = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 169 DDI_PROP_DONTPASS, "class-code", -1); 170 171 *pri = (class == -1) ? 1 : pci_devclass_to_ipl(class); 172 pci_common_set_parent_private_data(hdlp->ih_dip); 173 ispec = (struct intrspec *)pci_intx_get_ispec(dip, dip, 174 hdlp->ih_inum); 175 return (DDI_SUCCESS); 176 } 177 return (DDI_FAILURE); 178 } 179 180 *pri = ispec->intrspec_pri; 181 return (DDI_SUCCESS); 182 } 183 184 185 186 static int pcie_pci_intr_pri_counter = 0; 187 188 /* 189 * pci_common_intr_ops: bus_intr_op() function for interrupt support 190 */ 191 int 192 pci_common_intr_ops(dev_info_t *pdip, dev_info_t *rdip, ddi_intr_op_t intr_op, 193 ddi_intr_handle_impl_t *hdlp, void *result) 194 { 195 int priority = 0; 196 int psm_status = 0; 197 int pci_status = 0; 198 int pci_rval, psm_rval = PSM_FAILURE; 199 int types = 0; 200 int pciepci = 0; 201 int i, j, count; 202 int behavior; 203 int cap_ptr; 204 ddi_intrspec_t isp; 205 struct intrspec *ispec; 206 ddi_intr_handle_impl_t tmp_hdl; 207 ddi_intr_msix_t *msix_p; 208 ihdl_plat_t *ihdl_plat_datap; 209 ddi_intr_handle_t *h_array; 210 ddi_acc_handle_t handle; 211 212 DDI_INTR_NEXDBG((CE_CONT, 213 "pci_common_intr_ops: pdip 0x%p, rdip 0x%p, op %x handle 0x%p\n", 214 (void *)pdip, (void *)rdip, intr_op, (void *)hdlp)); 215 216 /* Process the request */ 217 switch (intr_op) { 218 case DDI_INTROP_SUPPORTED_TYPES: 219 /* Fixed supported by default */ 220 *(int *)result = DDI_INTR_TYPE_FIXED; 221 222 /* Figure out if MSI or MSI-X is supported? */ 223 if (pci_msi_get_supported_type(rdip, &types) != DDI_SUCCESS) 224 return (DDI_SUCCESS); 225 226 if (psm_intr_ops != NULL) { 227 /* 228 * Only support MSI for now, OR it in 229 */ 230 *(int *)result |= (types & DDI_INTR_TYPE_MSI); 231 232 tmp_hdl.ih_type = *(int *)result; 233 (void) (*psm_intr_ops)(rdip, &tmp_hdl, 234 PSM_INTR_OP_CHECK_MSI, result); 235 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: " 236 "rdip: 0x%p supported types: 0x%x\n", (void *)rdip, 237 *(int *)result)); 238 } 239 break; 240 case DDI_INTROP_NAVAIL: 241 case DDI_INTROP_NINTRS: 242 if (DDI_INTR_IS_MSI_OR_MSIX(hdlp->ih_type)) { 243 if (pci_msi_get_nintrs(hdlp->ih_dip, hdlp->ih_type, 244 result) != DDI_SUCCESS) 245 return (DDI_FAILURE); 246 } else { 247 *(int *)result = i_ddi_get_intx_nintrs(hdlp->ih_dip); 248 if (*(int *)result == 0) 249 return (DDI_FAILURE); 250 } 251 break; 252 case DDI_INTROP_ALLOC: 253 /* 254 * MSI or MSIX (figure out number of vectors available) 255 * FIXED interrupts: just return available interrupts 256 */ 257 if (DDI_INTR_IS_MSI_OR_MSIX(hdlp->ih_type) && 258 (psm_intr_ops != NULL) && 259 (pci_get_priority(rdip, hdlp, &priority) == DDI_SUCCESS)) { 260 /* 261 * Following check is a special case for 'pcie_pci'. 262 * This makes sure vectors with the right priority 263 * are allocated for pcie_pci during ALLOC time. 264 */ 265 if (strcmp(ddi_driver_name(rdip), "pcie_pci") == 0) { 266 hdlp->ih_pri = 267 (pcie_pci_intr_pri_counter % 2) ? 4 : 7; 268 pciepci = 1; 269 } else 270 hdlp->ih_pri = priority; 271 behavior = (int)(uintptr_t)hdlp->ih_scratch2; 272 273 /* 274 * Cache in the config handle and cap_ptr 275 */ 276 if (i_ddi_get_pci_config_handle(rdip) == NULL) { 277 if (pci_config_setup(rdip, &handle) != 278 DDI_SUCCESS) 279 return (DDI_FAILURE); 280 i_ddi_set_pci_config_handle(rdip, handle); 281 } 282 283 if (i_ddi_get_msi_msix_cap_ptr(rdip) == 0) { 284 char *prop = 285 (hdlp->ih_type == DDI_INTR_TYPE_MSI) ? 286 "pci-msi-capid-pointer" : 287 "pci-msix-capid-pointer"; 288 289 cap_ptr = ddi_prop_get_int(DDI_DEV_T_ANY, rdip, 290 DDI_PROP_DONTPASS, prop, 291 PCI_CAP_NEXT_PTR_NULL); 292 i_ddi_set_msi_msix_cap_ptr(rdip, cap_ptr); 293 } 294 295 296 (void) (*psm_intr_ops)(rdip, hdlp, 297 PSM_INTR_OP_ALLOC_VECTORS, result); 298 299 /* verify behavior flag and take appropriate action */ 300 if ((behavior == DDI_INTR_ALLOC_STRICT) && 301 (*(int *)result < hdlp->ih_scratch1)) { 302 DDI_INTR_NEXDBG((CE_CONT, 303 "pci_common_intr_ops: behavior %x, " 304 "couldn't get enough intrs\n", behavior)); 305 hdlp->ih_scratch1 = *(int *)result; 306 (void) (*psm_intr_ops)(rdip, hdlp, 307 PSM_INTR_OP_FREE_VECTORS, NULL); 308 return (DDI_EAGAIN); 309 } 310 311 if (hdlp->ih_type == DDI_INTR_TYPE_MSIX) { 312 if (!(msix_p = i_ddi_get_msix(hdlp->ih_dip))) { 313 msix_p = pci_msix_init(hdlp->ih_dip); 314 if (msix_p) 315 i_ddi_set_msix(hdlp->ih_dip, 316 msix_p); 317 } 318 } 319 320 if (pciepci) { 321 /* update priority in ispec */ 322 isp = pci_intx_get_ispec(pdip, rdip, 323 (int)hdlp->ih_inum); 324 ispec = (struct intrspec *)isp; 325 if (ispec) 326 ispec->intrspec_pri = hdlp->ih_pri; 327 ++pcie_pci_intr_pri_counter; 328 } 329 330 } else if (hdlp->ih_type == DDI_INTR_TYPE_FIXED) { 331 /* Figure out if this device supports MASKING */ 332 pci_rval = pci_intx_get_cap(rdip, &pci_status); 333 if (pci_rval == DDI_SUCCESS && pci_status) 334 hdlp->ih_cap |= pci_status; 335 *(int *)result = 1; /* DDI_INTR_TYPE_FIXED */ 336 } else 337 return (DDI_FAILURE); 338 break; 339 case DDI_INTROP_FREE: 340 if (DDI_INTR_IS_MSI_OR_MSIX(hdlp->ih_type) && 341 (psm_intr_ops != NULL)) { 342 if (i_ddi_intr_get_current_nintrs(hdlp->ih_dip) - 1 == 343 0) { 344 if (handle = i_ddi_get_pci_config_handle( 345 rdip)) { 346 (void) pci_config_teardown(&handle); 347 i_ddi_set_pci_config_handle(rdip, NULL); 348 } 349 if (cap_ptr = i_ddi_get_msi_msix_cap_ptr(rdip)) 350 i_ddi_set_msi_msix_cap_ptr(rdip, 0); 351 } 352 353 (void) (*psm_intr_ops)(rdip, hdlp, 354 PSM_INTR_OP_FREE_VECTORS, NULL); 355 356 if (hdlp->ih_type == DDI_INTR_TYPE_MSIX) { 357 msix_p = i_ddi_get_msix(hdlp->ih_dip); 358 if (msix_p && 359 (i_ddi_intr_get_current_nintrs( 360 hdlp->ih_dip) - 1) == 0) { 361 pci_msix_fini(msix_p); 362 i_ddi_set_msix(hdlp->ih_dip, NULL); 363 } 364 } 365 } 366 break; 367 case DDI_INTROP_GETPRI: 368 /* Get the priority */ 369 if (pci_get_priority(rdip, hdlp, &priority) != DDI_SUCCESS) 370 return (DDI_FAILURE); 371 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: " 372 "priority = 0x%x\n", priority)); 373 *(int *)result = priority; 374 break; 375 case DDI_INTROP_SETPRI: 376 /* Validate the interrupt priority passed */ 377 if (*(int *)result > LOCK_LEVEL) 378 return (DDI_FAILURE); 379 380 /* Ensure that PSM is all initialized */ 381 if (psm_intr_ops == NULL) 382 return (DDI_FAILURE); 383 384 /* Change the priority */ 385 if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_PRI, result) == 386 PSM_FAILURE) 387 return (DDI_FAILURE); 388 389 /* update ispec */ 390 isp = pci_intx_get_ispec(pdip, rdip, (int)hdlp->ih_inum); 391 ispec = (struct intrspec *)isp; 392 if (ispec) 393 ispec->intrspec_pri = *(int *)result; 394 break; 395 case DDI_INTROP_ADDISR: 396 /* update ispec */ 397 isp = pci_intx_get_ispec(pdip, rdip, (int)hdlp->ih_inum); 398 ispec = (struct intrspec *)isp; 399 if (ispec) { 400 ispec->intrspec_func = hdlp->ih_cb_func; 401 ihdl_plat_datap = (ihdl_plat_t *)hdlp->ih_private; 402 pci_kstat_create(&ihdl_plat_datap->ip_ksp, pdip, hdlp); 403 } 404 break; 405 case DDI_INTROP_REMISR: 406 /* Get the interrupt structure pointer */ 407 isp = pci_intx_get_ispec(pdip, rdip, (int)hdlp->ih_inum); 408 ispec = (struct intrspec *)isp; 409 if (ispec) { 410 ispec->intrspec_func = (uint_t (*)()) 0; 411 ihdl_plat_datap = (ihdl_plat_t *)hdlp->ih_private; 412 if (ihdl_plat_datap->ip_ksp != NULL) 413 pci_kstat_delete(ihdl_plat_datap->ip_ksp); 414 } 415 break; 416 case DDI_INTROP_GETCAP: 417 /* 418 * First check the config space and/or 419 * MSI capability register(s) 420 */ 421 if (DDI_INTR_IS_MSI_OR_MSIX(hdlp->ih_type)) 422 pci_rval = pci_msi_get_cap(rdip, hdlp->ih_type, 423 &pci_status); 424 else if (hdlp->ih_type == DDI_INTR_TYPE_FIXED) 425 pci_rval = pci_intx_get_cap(rdip, &pci_status); 426 427 /* next check with pcplusmp */ 428 if (psm_intr_ops != NULL) 429 psm_rval = (*psm_intr_ops)(rdip, hdlp, 430 PSM_INTR_OP_GET_CAP, &psm_status); 431 432 DDI_INTR_NEXDBG((CE_CONT, "pci: GETCAP returned psm_rval = %x, " 433 "psm_status = %x, pci_rval = %x, pci_status = %x\n", 434 psm_rval, psm_status, pci_rval, pci_status)); 435 436 if (psm_rval == PSM_FAILURE && pci_rval == DDI_FAILURE) { 437 *(int *)result = 0; 438 return (DDI_FAILURE); 439 } 440 441 if (psm_rval == PSM_SUCCESS) 442 *(int *)result = psm_status; 443 444 if (pci_rval == DDI_SUCCESS) 445 *(int *)result |= pci_status; 446 447 DDI_INTR_NEXDBG((CE_CONT, "pci: GETCAP returned = %x\n", 448 *(int *)result)); 449 break; 450 case DDI_INTROP_SETCAP: 451 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: " 452 "SETCAP cap=0x%x\n", *(int *)result)); 453 if (psm_intr_ops == NULL) 454 return (DDI_FAILURE); 455 456 if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_CAP, result)) { 457 DDI_INTR_NEXDBG((CE_CONT, "GETCAP: psm_intr_ops" 458 " returned failure\n")); 459 return (DDI_FAILURE); 460 } 461 break; 462 case DDI_INTROP_ENABLE: 463 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: ENABLE\n")); 464 if (psm_intr_ops == NULL) 465 return (DDI_FAILURE); 466 467 if (pci_enable_intr(pdip, rdip, hdlp, hdlp->ih_inum) != 468 DDI_SUCCESS) 469 return (DDI_FAILURE); 470 471 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: ENABLE " 472 "vector=0x%x\n", hdlp->ih_vector)); 473 break; 474 case DDI_INTROP_DISABLE: 475 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: DISABLE\n")); 476 if (psm_intr_ops == NULL) 477 return (DDI_FAILURE); 478 479 pci_disable_intr(pdip, rdip, hdlp, hdlp->ih_inum); 480 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: DISABLE " 481 "vector = %x\n", hdlp->ih_vector)); 482 break; 483 case DDI_INTROP_BLOCKENABLE: 484 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: " 485 "BLOCKENABLE\n")); 486 if (hdlp->ih_type != DDI_INTR_TYPE_MSI) { 487 DDI_INTR_NEXDBG((CE_CONT, "BLOCKENABLE: not MSI\n")); 488 return (DDI_FAILURE); 489 } 490 491 /* Check if psm_intr_ops is NULL? */ 492 if (psm_intr_ops == NULL) 493 return (DDI_FAILURE); 494 495 count = hdlp->ih_scratch1; 496 h_array = (ddi_intr_handle_t *)hdlp->ih_scratch2; 497 for (i = 0; i < count; i++) { 498 hdlp = (ddi_intr_handle_impl_t *)h_array[i]; 499 if (pci_enable_intr(pdip, rdip, hdlp, 500 hdlp->ih_inum) != DDI_SUCCESS) { 501 DDI_INTR_NEXDBG((CE_CONT, "BLOCKENABLE: " 502 "pci_enable_intr failed for %d\n", i)); 503 for (j = 0; j < i; j++) { 504 hdlp = (ddi_intr_handle_impl_t *)h_array[j]; 505 pci_disable_intr(pdip, rdip, hdlp, 506 hdlp->ih_inum); 507 } 508 return (DDI_FAILURE); 509 } 510 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: " 511 "BLOCKENABLE inum %x done\n", hdlp->ih_inum)); 512 } 513 break; 514 case DDI_INTROP_BLOCKDISABLE: 515 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: " 516 "BLOCKDISABLE\n")); 517 if (hdlp->ih_type != DDI_INTR_TYPE_MSI) { 518 DDI_INTR_NEXDBG((CE_CONT, "BLOCKDISABLE: not MSI\n")); 519 return (DDI_FAILURE); 520 } 521 522 /* Check if psm_intr_ops is present */ 523 if (psm_intr_ops == NULL) 524 return (DDI_FAILURE); 525 526 count = hdlp->ih_scratch1; 527 h_array = (ddi_intr_handle_t *)hdlp->ih_scratch2; 528 for (i = 0; i < count; i++) { 529 hdlp = (ddi_intr_handle_impl_t *)h_array[i]; 530 pci_disable_intr(pdip, rdip, hdlp, hdlp->ih_inum); 531 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: " 532 "BLOCKDISABLE inum %x done\n", hdlp->ih_inum)); 533 } 534 break; 535 case DDI_INTROP_SETMASK: 536 case DDI_INTROP_CLRMASK: 537 /* 538 * First handle in the config space 539 */ 540 if (intr_op == DDI_INTROP_SETMASK) { 541 if (DDI_INTR_IS_MSI_OR_MSIX(hdlp->ih_type)) 542 pci_status = pci_msi_set_mask(rdip, 543 hdlp->ih_type, hdlp->ih_inum); 544 else if (hdlp->ih_type == DDI_INTR_TYPE_FIXED) 545 pci_status = pci_intx_set_mask(rdip); 546 } else { 547 if (DDI_INTR_IS_MSI_OR_MSIX(hdlp->ih_type)) 548 pci_status = pci_msi_clr_mask(rdip, 549 hdlp->ih_type, hdlp->ih_inum); 550 else if (hdlp->ih_type == DDI_INTR_TYPE_FIXED) 551 pci_status = pci_intx_clr_mask(rdip); 552 } 553 554 /* For MSI/X; no need to check with pcplusmp */ 555 if (hdlp->ih_type != DDI_INTR_TYPE_FIXED) 556 return (pci_status); 557 558 /* For fixed interrupts only: handle config space first */ 559 if (hdlp->ih_type == DDI_INTR_TYPE_FIXED && 560 pci_status == DDI_SUCCESS) 561 break; 562 563 /* For fixed interrupts only: confer with pcplusmp next */ 564 if (psm_intr_ops != NULL) { 565 /* If interrupt is shared; do nothing */ 566 psm_rval = (*psm_intr_ops)(rdip, hdlp, 567 PSM_INTR_OP_GET_SHARED, &psm_status); 568 569 if (psm_rval == PSM_FAILURE || psm_status == 1) 570 return (pci_status); 571 572 /* Now, pcplusmp should try to set/clear the mask */ 573 if (intr_op == DDI_INTROP_SETMASK) 574 psm_rval = (*psm_intr_ops)(rdip, hdlp, 575 PSM_INTR_OP_SET_MASK, NULL); 576 else 577 psm_rval = (*psm_intr_ops)(rdip, hdlp, 578 PSM_INTR_OP_CLEAR_MASK, NULL); 579 } 580 return ((psm_rval == PSM_FAILURE) ? DDI_FAILURE : DDI_SUCCESS); 581 case DDI_INTROP_GETPENDING: 582 /* 583 * First check the config space and/or 584 * MSI capability register(s) 585 */ 586 if (DDI_INTR_IS_MSI_OR_MSIX(hdlp->ih_type)) 587 pci_rval = pci_msi_get_pending(rdip, hdlp->ih_type, 588 hdlp->ih_inum, &pci_status); 589 else if (hdlp->ih_type == DDI_INTR_TYPE_FIXED) 590 pci_rval = pci_intx_get_pending(rdip, &pci_status); 591 592 /* On failure; next try with pcplusmp */ 593 if (pci_rval != DDI_SUCCESS && psm_intr_ops != NULL) 594 psm_rval = (*psm_intr_ops)(rdip, hdlp, 595 PSM_INTR_OP_GET_PENDING, &psm_status); 596 597 DDI_INTR_NEXDBG((CE_CONT, "pci: GETPENDING returned " 598 "psm_rval = %x, psm_status = %x, pci_rval = %x, " 599 "pci_status = %x\n", psm_rval, psm_status, pci_rval, 600 pci_status)); 601 if (psm_rval == PSM_FAILURE && pci_rval == DDI_FAILURE) { 602 *(int *)result = 0; 603 return (DDI_FAILURE); 604 } 605 606 if (psm_rval != PSM_FAILURE) 607 *(int *)result = psm_status; 608 else if (pci_rval != DDI_FAILURE) 609 *(int *)result = pci_status; 610 DDI_INTR_NEXDBG((CE_CONT, "pci: GETPENDING returned = %x\n", 611 *(int *)result)); 612 break; 613 default: 614 return (i_ddi_intr_ops(pdip, rdip, intr_op, hdlp, result)); 615 } 616 617 return (DDI_SUCCESS); 618 } 619 620 int 621 pci_get_intr_from_vecirq(apic_get_intr_t *intrinfo_p, 622 int vecirq, boolean_t is_irq) 623 { 624 ddi_intr_handle_impl_t get_info_ii_hdl; 625 626 if (is_irq) 627 intrinfo_p->avgi_req_flags |= PSMGI_INTRBY_IRQ; 628 629 /* 630 * For this locally-declared and used handle, ih_private will contain a 631 * pointer to apic_get_intr_t, not an ihdl_plat_t as used for 632 * global interrupt handling. 633 */ 634 get_info_ii_hdl.ih_private = intrinfo_p; 635 get_info_ii_hdl.ih_vector = (ushort_t)vecirq; 636 637 if ((*psm_intr_ops)(NULL, &get_info_ii_hdl, 638 PSM_INTR_OP_GET_INTR, NULL) == PSM_FAILURE) 639 return (DDI_FAILURE); 640 641 return (DDI_SUCCESS); 642 } 643 644 645 int 646 pci_get_cpu_from_vecirq(int vecirq, boolean_t is_irq) 647 { 648 int rval; 649 650 apic_get_intr_t intrinfo; 651 intrinfo.avgi_req_flags = PSMGI_REQ_CPUID; 652 rval = pci_get_intr_from_vecirq(&intrinfo, vecirq, is_irq); 653 654 if (rval == DDI_SUCCESS) 655 return (intrinfo.avgi_cpu_id); 656 else 657 return (-1); 658 } 659 660 661 static int 662 pci_enable_intr(dev_info_t *pdip, dev_info_t *rdip, 663 ddi_intr_handle_impl_t *hdlp, uint32_t inum) 664 { 665 struct intrspec *ispec; 666 int irq; 667 ihdl_plat_t *ihdl_plat_datap = (ihdl_plat_t *)hdlp->ih_private; 668 669 DDI_INTR_NEXDBG((CE_CONT, "pci_enable_intr: hdlp %p inum %x\n", 670 (void *)hdlp, inum)); 671 672 /* Translate the interrupt if needed */ 673 ispec = (struct intrspec *)pci_intx_get_ispec(pdip, rdip, (int)inum); 674 if (ispec == NULL) 675 return (DDI_FAILURE); 676 if (DDI_INTR_IS_MSI_OR_MSIX(hdlp->ih_type)) 677 ispec->intrspec_vec = inum; 678 ihdl_plat_datap->ip_ispecp = ispec; 679 680 /* translate the interrupt if needed */ 681 (void) (*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_XLATE_VECTOR, &irq); 682 DDI_INTR_NEXDBG((CE_CONT, "pci_enable_intr: priority=%x irq=%x\n", 683 hdlp->ih_pri, irq)); 684 685 /* Add the interrupt handler */ 686 if (!add_avintr((void *)hdlp, hdlp->ih_pri, hdlp->ih_cb_func, 687 DEVI(rdip)->devi_name, irq, hdlp->ih_cb_arg1, 688 hdlp->ih_cb_arg2, &ihdl_plat_datap->ip_ticks, rdip)) 689 return (DDI_FAILURE); 690 691 /* Note this really is an irq. */ 692 hdlp->ih_vector = (ushort_t)irq; 693 694 return (DDI_SUCCESS); 695 } 696 697 698 static void 699 pci_disable_intr(dev_info_t *pdip, dev_info_t *rdip, 700 ddi_intr_handle_impl_t *hdlp, uint32_t inum) 701 { 702 int irq; 703 struct intrspec *ispec; 704 ihdl_plat_t *ihdl_plat_datap = (ihdl_plat_t *)hdlp->ih_private; 705 706 DDI_INTR_NEXDBG((CE_CONT, "pci_disable_intr: \n")); 707 ispec = (struct intrspec *)pci_intx_get_ispec(pdip, rdip, (int)inum); 708 if (ispec == NULL) 709 return; 710 if (DDI_INTR_IS_MSI_OR_MSIX(hdlp->ih_type)) 711 ispec->intrspec_vec = inum; 712 ihdl_plat_datap->ip_ispecp = ispec; 713 714 /* translate the interrupt if needed */ 715 (void) (*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_XLATE_VECTOR, &irq); 716 717 /* Disable the interrupt handler */ 718 rem_avintr((void *)hdlp, hdlp->ih_pri, hdlp->ih_cb_func, irq); 719 ihdl_plat_datap->ip_ispecp = NULL; 720 } 721 722 /* 723 * Miscellaneous library function 724 */ 725 int 726 pci_common_get_reg_prop(dev_info_t *dip, pci_regspec_t *pci_rp) 727 { 728 int i; 729 int number; 730 int assigned_addr_len; 731 uint_t phys_hi = pci_rp->pci_phys_hi; 732 pci_regspec_t *assigned_addr; 733 734 if (((phys_hi & PCI_REG_ADDR_M) == PCI_ADDR_CONFIG) || 735 (phys_hi & PCI_RELOCAT_B)) 736 return (DDI_SUCCESS); 737 738 /* 739 * the "reg" property specifies relocatable, get and interpret the 740 * "assigned-addresses" property. 741 */ 742 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 743 "assigned-addresses", (int **)&assigned_addr, 744 (uint_t *)&assigned_addr_len) != DDI_PROP_SUCCESS) 745 return (DDI_FAILURE); 746 747 /* 748 * Scan the "assigned-addresses" for one that matches the specified 749 * "reg" property entry. 750 */ 751 phys_hi &= PCI_CONF_ADDR_MASK; 752 number = assigned_addr_len / (sizeof (pci_regspec_t) / sizeof (int)); 753 for (i = 0; i < number; i++) { 754 if ((assigned_addr[i].pci_phys_hi & PCI_CONF_ADDR_MASK) == 755 phys_hi) { 756 pci_rp->pci_phys_mid = assigned_addr[i].pci_phys_mid; 757 pci_rp->pci_phys_low = assigned_addr[i].pci_phys_low; 758 ddi_prop_free(assigned_addr); 759 return (DDI_SUCCESS); 760 } 761 } 762 763 ddi_prop_free(assigned_addr); 764 return (DDI_FAILURE); 765 } 766 767 768 /* 769 * For pci_tools 770 */ 771 772 int 773 pci_common_ioctl(dev_info_t *dip, dev_t dev, int cmd, intptr_t arg, 774 int mode, cred_t *credp, int *rvalp) 775 { 776 int rv = ENOTTY; 777 778 minor_t minor = getminor(dev); 779 780 switch (PCIHP_AP_MINOR_NUM_TO_PCI_DEVNUM(minor)) { 781 case PCI_TOOL_REG_MINOR_NUM: 782 783 switch (cmd) { 784 case PCITOOL_DEVICE_SET_REG: 785 case PCITOOL_DEVICE_GET_REG: 786 787 /* Require full privileges. */ 788 if (secpolicy_kmdb(credp)) 789 rv = EPERM; 790 else 791 rv = pcitool_dev_reg_ops(dip, (void *)arg, 792 cmd, mode); 793 break; 794 795 case PCITOOL_NEXUS_SET_REG: 796 case PCITOOL_NEXUS_GET_REG: 797 798 /* Require full privileges. */ 799 if (secpolicy_kmdb(credp)) 800 rv = EPERM; 801 else 802 rv = pcitool_bus_reg_ops(dip, (void *)arg, 803 cmd, mode); 804 break; 805 } 806 break; 807 808 case PCI_TOOL_INTR_MINOR_NUM: 809 810 switch (cmd) { 811 case PCITOOL_DEVICE_SET_INTR: 812 813 /* Require PRIV_SYS_RES_CONFIG, same as psradm */ 814 if (secpolicy_ponline(credp)) { 815 rv = EPERM; 816 break; 817 } 818 819 /*FALLTHRU*/ 820 /* These require no special privileges. */ 821 case PCITOOL_DEVICE_GET_INTR: 822 case PCITOOL_DEVICE_NUM_INTR: 823 rv = pcitool_intr_admn(dip, (void *)arg, cmd, mode); 824 break; 825 } 826 break; 827 828 /* 829 * All non-PCItool ioctls go through here, including: 830 * devctl ioctls with minor number PCIHP_DEVCTL_MINOR and 831 * those for attachment points with where minor number is the 832 * device number. 833 */ 834 default: 835 rv = (pcihp_get_cb_ops())->cb_ioctl(dev, cmd, arg, mode, 836 credp, rvalp); 837 break; 838 } 839 840 return (rv); 841 } 842 843 844 int 845 pci_common_ctlops_poke(peekpoke_ctlops_t *in_args) 846 { 847 size_t size = in_args->size; 848 uintptr_t dev_addr = in_args->dev_addr; 849 uintptr_t host_addr = in_args->host_addr; 850 ddi_acc_impl_t *hp = (ddi_acc_impl_t *)in_args->handle; 851 ddi_acc_hdl_t *hdlp = (ddi_acc_hdl_t *)in_args->handle; 852 size_t repcount = in_args->repcount; 853 uint_t flags = in_args->flags; 854 int err = DDI_SUCCESS; 855 856 /* 857 * if no handle then this is a poke. We have to return failure here 858 * as we have no way of knowing whether this is a MEM or IO space access 859 */ 860 if (in_args->handle == NULL) 861 return (DDI_FAILURE); 862 863 /* 864 * rest of this function is actually for cautious puts 865 */ 866 for (; repcount; repcount--) { 867 if (hp->ahi_acc_attr == DDI_ACCATTR_CONFIG_SPACE) { 868 switch (size) { 869 case sizeof (uint8_t): 870 pci_config_wr8(hp, (uint8_t *)dev_addr, 871 *(uint8_t *)host_addr); 872 break; 873 case sizeof (uint16_t): 874 pci_config_wr16(hp, (uint16_t *)dev_addr, 875 *(uint16_t *)host_addr); 876 break; 877 case sizeof (uint32_t): 878 pci_config_wr32(hp, (uint32_t *)dev_addr, 879 *(uint32_t *)host_addr); 880 break; 881 case sizeof (uint64_t): 882 pci_config_wr64(hp, (uint64_t *)dev_addr, 883 *(uint64_t *)host_addr); 884 break; 885 default: 886 err = DDI_FAILURE; 887 break; 888 } 889 } else if (hp->ahi_acc_attr & DDI_ACCATTR_IO_SPACE) { 890 if (hdlp->ah_acc.devacc_attr_endian_flags == 891 DDI_STRUCTURE_BE_ACC) { 892 switch (size) { 893 case sizeof (uint8_t): 894 i_ddi_io_put8(hp, 895 (uint8_t *)dev_addr, 896 *(uint8_t *)host_addr); 897 break; 898 case sizeof (uint16_t): 899 i_ddi_io_swap_put16(hp, 900 (uint16_t *)dev_addr, 901 *(uint16_t *)host_addr); 902 break; 903 case sizeof (uint32_t): 904 i_ddi_io_swap_put32(hp, 905 (uint32_t *)dev_addr, 906 *(uint32_t *)host_addr); 907 break; 908 /* 909 * note the 64-bit case is a dummy 910 * function - so no need to swap 911 */ 912 case sizeof (uint64_t): 913 i_ddi_io_put64(hp, 914 (uint64_t *)dev_addr, 915 *(uint64_t *)host_addr); 916 break; 917 default: 918 err = DDI_FAILURE; 919 break; 920 } 921 } else { 922 switch (size) { 923 case sizeof (uint8_t): 924 i_ddi_io_put8(hp, 925 (uint8_t *)dev_addr, 926 *(uint8_t *)host_addr); 927 break; 928 case sizeof (uint16_t): 929 i_ddi_io_put16(hp, 930 (uint16_t *)dev_addr, 931 *(uint16_t *)host_addr); 932 break; 933 case sizeof (uint32_t): 934 i_ddi_io_put32(hp, 935 (uint32_t *)dev_addr, 936 *(uint32_t *)host_addr); 937 break; 938 case sizeof (uint64_t): 939 i_ddi_io_put64(hp, 940 (uint64_t *)dev_addr, 941 *(uint64_t *)host_addr); 942 break; 943 default: 944 err = DDI_FAILURE; 945 break; 946 } 947 } 948 } else { 949 if (hdlp->ah_acc.devacc_attr_endian_flags == 950 DDI_STRUCTURE_BE_ACC) { 951 switch (size) { 952 case sizeof (uint8_t): 953 *(uint8_t *)dev_addr = 954 *(uint8_t *)host_addr; 955 break; 956 case sizeof (uint16_t): 957 *(uint16_t *)dev_addr = 958 ddi_swap16(*(uint16_t *)host_addr); 959 break; 960 case sizeof (uint32_t): 961 *(uint32_t *)dev_addr = 962 ddi_swap32(*(uint32_t *)host_addr); 963 break; 964 case sizeof (uint64_t): 965 *(uint64_t *)dev_addr = 966 ddi_swap64(*(uint64_t *)host_addr); 967 break; 968 default: 969 err = DDI_FAILURE; 970 break; 971 } 972 } else { 973 switch (size) { 974 case sizeof (uint8_t): 975 *(uint8_t *)dev_addr = 976 *(uint8_t *)host_addr; 977 break; 978 case sizeof (uint16_t): 979 *(uint16_t *)dev_addr = 980 *(uint16_t *)host_addr; 981 break; 982 case sizeof (uint32_t): 983 *(uint32_t *)dev_addr = 984 *(uint32_t *)host_addr; 985 break; 986 case sizeof (uint64_t): 987 *(uint64_t *)dev_addr = 988 *(uint64_t *)host_addr; 989 break; 990 default: 991 err = DDI_FAILURE; 992 break; 993 } 994 } 995 } 996 host_addr += size; 997 if (flags == DDI_DEV_AUTOINCR) 998 dev_addr += size; 999 } 1000 return (err); 1001 } 1002 1003 1004 int 1005 pci_fm_acc_setup(ddi_acc_hdl_t *hp, off_t offset, off_t len) 1006 { 1007 ddi_acc_impl_t *ap = (ddi_acc_impl_t *)hp->ah_platform_private; 1008 1009 /* endian-ness check */ 1010 if (hp->ah_acc.devacc_attr_endian_flags == DDI_STRUCTURE_BE_ACC) 1011 return (DDI_FAILURE); 1012 1013 /* 1014 * range check 1015 */ 1016 if ((offset >= PCI_CONF_HDR_SIZE) || 1017 (len > PCI_CONF_HDR_SIZE) || 1018 (offset + len > PCI_CONF_HDR_SIZE)) 1019 return (DDI_FAILURE); 1020 1021 ap->ahi_acc_attr |= DDI_ACCATTR_CONFIG_SPACE; 1022 /* 1023 * always use cautious mechanism for config space gets 1024 */ 1025 ap->ahi_get8 = i_ddi_caut_get8; 1026 ap->ahi_get16 = i_ddi_caut_get16; 1027 ap->ahi_get32 = i_ddi_caut_get32; 1028 ap->ahi_get64 = i_ddi_caut_get64; 1029 ap->ahi_rep_get8 = i_ddi_caut_rep_get8; 1030 ap->ahi_rep_get16 = i_ddi_caut_rep_get16; 1031 ap->ahi_rep_get32 = i_ddi_caut_rep_get32; 1032 ap->ahi_rep_get64 = i_ddi_caut_rep_get64; 1033 if (hp->ah_acc.devacc_attr_access == DDI_CAUTIOUS_ACC) { 1034 ap->ahi_put8 = i_ddi_caut_put8; 1035 ap->ahi_put16 = i_ddi_caut_put16; 1036 ap->ahi_put32 = i_ddi_caut_put32; 1037 ap->ahi_put64 = i_ddi_caut_put64; 1038 ap->ahi_rep_put8 = i_ddi_caut_rep_put8; 1039 ap->ahi_rep_put16 = i_ddi_caut_rep_put16; 1040 ap->ahi_rep_put32 = i_ddi_caut_rep_put32; 1041 ap->ahi_rep_put64 = i_ddi_caut_rep_put64; 1042 } else { 1043 ap->ahi_put8 = pci_config_wr8; 1044 ap->ahi_put16 = pci_config_wr16; 1045 ap->ahi_put32 = pci_config_wr32; 1046 ap->ahi_put64 = pci_config_wr64; 1047 ap->ahi_rep_put8 = pci_config_rep_wr8; 1048 ap->ahi_rep_put16 = pci_config_rep_wr16; 1049 ap->ahi_rep_put32 = pci_config_rep_wr32; 1050 ap->ahi_rep_put64 = pci_config_rep_wr64; 1051 } 1052 1053 /* Initialize to default check/notify functions */ 1054 ap->ahi_fault_check = i_ddi_acc_fault_check; 1055 ap->ahi_fault_notify = i_ddi_acc_fault_notify; 1056 ap->ahi_fault = 0; 1057 impl_acc_err_init(hp); 1058 return (DDI_SUCCESS); 1059 } 1060 1061 1062 int 1063 pci_common_ctlops_peek(peekpoke_ctlops_t *in_args) 1064 { 1065 size_t size = in_args->size; 1066 uintptr_t dev_addr = in_args->dev_addr; 1067 uintptr_t host_addr = in_args->host_addr; 1068 ddi_acc_impl_t *hp = (ddi_acc_impl_t *)in_args->handle; 1069 ddi_acc_hdl_t *hdlp = (ddi_acc_hdl_t *)in_args->handle; 1070 size_t repcount = in_args->repcount; 1071 uint_t flags = in_args->flags; 1072 int err = DDI_SUCCESS; 1073 1074 /* 1075 * if no handle then this is a peek. We have to return failure here 1076 * as we have no way of knowing whether this is a MEM or IO space access 1077 */ 1078 if (in_args->handle == NULL) 1079 return (DDI_FAILURE); 1080 1081 for (; repcount; repcount--) { 1082 if (hp->ahi_acc_attr == DDI_ACCATTR_CONFIG_SPACE) { 1083 switch (size) { 1084 case sizeof (uint8_t): 1085 *(uint8_t *)host_addr = pci_config_rd8(hp, 1086 (uint8_t *)dev_addr); 1087 break; 1088 case sizeof (uint16_t): 1089 *(uint16_t *)host_addr = pci_config_rd16(hp, 1090 (uint16_t *)dev_addr); 1091 break; 1092 case sizeof (uint32_t): 1093 *(uint32_t *)host_addr = pci_config_rd32(hp, 1094 (uint32_t *)dev_addr); 1095 break; 1096 case sizeof (uint64_t): 1097 *(uint64_t *)host_addr = pci_config_rd64(hp, 1098 (uint64_t *)dev_addr); 1099 break; 1100 default: 1101 err = DDI_FAILURE; 1102 break; 1103 } 1104 } else if (hp->ahi_acc_attr & DDI_ACCATTR_IO_SPACE) { 1105 if (hdlp->ah_acc.devacc_attr_endian_flags == 1106 DDI_STRUCTURE_BE_ACC) { 1107 switch (size) { 1108 case sizeof (uint8_t): 1109 *(uint8_t *)host_addr = 1110 i_ddi_io_get8(hp, 1111 (uint8_t *)dev_addr); 1112 break; 1113 case sizeof (uint16_t): 1114 *(uint16_t *)host_addr = 1115 i_ddi_io_swap_get16(hp, 1116 (uint16_t *)dev_addr); 1117 break; 1118 case sizeof (uint32_t): 1119 *(uint32_t *)host_addr = 1120 i_ddi_io_swap_get32(hp, 1121 (uint32_t *)dev_addr); 1122 break; 1123 /* 1124 * note the 64-bit case is a dummy 1125 * function - so no need to swap 1126 */ 1127 case sizeof (uint64_t): 1128 *(uint64_t *)host_addr = 1129 i_ddi_io_get64(hp, 1130 (uint64_t *)dev_addr); 1131 break; 1132 default: 1133 err = DDI_FAILURE; 1134 break; 1135 } 1136 } else { 1137 switch (size) { 1138 case sizeof (uint8_t): 1139 *(uint8_t *)host_addr = 1140 i_ddi_io_get8(hp, 1141 (uint8_t *)dev_addr); 1142 break; 1143 case sizeof (uint16_t): 1144 *(uint16_t *)host_addr = 1145 i_ddi_io_get16(hp, 1146 (uint16_t *)dev_addr); 1147 break; 1148 case sizeof (uint32_t): 1149 *(uint32_t *)host_addr = 1150 i_ddi_io_get32(hp, 1151 (uint32_t *)dev_addr); 1152 break; 1153 case sizeof (uint64_t): 1154 *(uint64_t *)host_addr = 1155 i_ddi_io_get64(hp, 1156 (uint64_t *)dev_addr); 1157 break; 1158 default: 1159 err = DDI_FAILURE; 1160 break; 1161 } 1162 } 1163 } else { 1164 if (hdlp->ah_acc.devacc_attr_endian_flags == 1165 DDI_STRUCTURE_BE_ACC) { 1166 switch (in_args->size) { 1167 case sizeof (uint8_t): 1168 *(uint8_t *)host_addr = 1169 *(uint8_t *)dev_addr; 1170 break; 1171 case sizeof (uint16_t): 1172 *(uint16_t *)host_addr = 1173 ddi_swap16(*(uint16_t *)dev_addr); 1174 break; 1175 case sizeof (uint32_t): 1176 *(uint32_t *)host_addr = 1177 ddi_swap32(*(uint32_t *)dev_addr); 1178 break; 1179 case sizeof (uint64_t): 1180 *(uint64_t *)host_addr = 1181 ddi_swap64(*(uint64_t *)dev_addr); 1182 break; 1183 default: 1184 err = DDI_FAILURE; 1185 break; 1186 } 1187 } else { 1188 switch (in_args->size) { 1189 case sizeof (uint8_t): 1190 *(uint8_t *)host_addr = 1191 *(uint8_t *)dev_addr; 1192 break; 1193 case sizeof (uint16_t): 1194 *(uint16_t *)host_addr = 1195 *(uint16_t *)dev_addr; 1196 break; 1197 case sizeof (uint32_t): 1198 *(uint32_t *)host_addr = 1199 *(uint32_t *)dev_addr; 1200 break; 1201 case sizeof (uint64_t): 1202 *(uint64_t *)host_addr = 1203 *(uint64_t *)dev_addr; 1204 break; 1205 default: 1206 err = DDI_FAILURE; 1207 break; 1208 } 1209 } 1210 } 1211 host_addr += size; 1212 if (flags == DDI_DEV_AUTOINCR) 1213 dev_addr += size; 1214 } 1215 return (err); 1216 } 1217 1218 /*ARGSUSED*/ 1219 int 1220 pci_common_peekpoke(dev_info_t *dip, dev_info_t *rdip, 1221 ddi_ctl_enum_t ctlop, void *arg, void *result) 1222 { 1223 if (ctlop == DDI_CTLOPS_PEEK) 1224 return (pci_common_ctlops_peek((peekpoke_ctlops_t *)arg)); 1225 else 1226 return (pci_common_ctlops_poke((peekpoke_ctlops_t *)arg)); 1227 } 1228 1229 /* 1230 * These are the get and put functions to be shared with drivers. The 1231 * mutex locking is done inside the functions referenced, rather than 1232 * here, and is thus shared across PCI child drivers and any other 1233 * consumers of PCI config space (such as the ACPI subsystem). 1234 * 1235 * The configuration space addresses come in as pointers. This is fine on 1236 * a 32-bit system, where the VM space and configuration space are the same 1237 * size. It's not such a good idea on a 64-bit system, where memory 1238 * addresses are twice as large as configuration space addresses. At some 1239 * point in the call tree we need to take a stand and say "you are 32-bit 1240 * from this time forth", and this seems like a nice self-contained place. 1241 */ 1242 1243 uint8_t 1244 pci_config_rd8(ddi_acc_impl_t *hdlp, uint8_t *addr) 1245 { 1246 pci_acc_cfblk_t *cfp; 1247 uint8_t rval; 1248 int reg; 1249 1250 ASSERT64(((uintptr_t)addr >> 32) == 0); 1251 1252 reg = (int)(uintptr_t)addr; 1253 1254 cfp = (pci_acc_cfblk_t *)&hdlp->ahi_common.ah_bus_private; 1255 1256 rval = (*pci_getb_func)(cfp->c_busnum, cfp->c_devnum, cfp->c_funcnum, 1257 reg); 1258 1259 return (rval); 1260 } 1261 1262 void 1263 pci_config_rep_rd8(ddi_acc_impl_t *hdlp, uint8_t *host_addr, 1264 uint8_t *dev_addr, size_t repcount, uint_t flags) 1265 { 1266 uint8_t *h, *d; 1267 1268 h = host_addr; 1269 d = dev_addr; 1270 1271 if (flags == DDI_DEV_AUTOINCR) 1272 for (; repcount; repcount--) 1273 *h++ = pci_config_rd8(hdlp, d++); 1274 else 1275 for (; repcount; repcount--) 1276 *h++ = pci_config_rd8(hdlp, d); 1277 } 1278 1279 uint16_t 1280 pci_config_rd16(ddi_acc_impl_t *hdlp, uint16_t *addr) 1281 { 1282 pci_acc_cfblk_t *cfp; 1283 uint16_t rval; 1284 int reg; 1285 1286 ASSERT64(((uintptr_t)addr >> 32) == 0); 1287 1288 reg = (int)(uintptr_t)addr; 1289 1290 cfp = (pci_acc_cfblk_t *)&hdlp->ahi_common.ah_bus_private; 1291 1292 rval = (*pci_getw_func)(cfp->c_busnum, cfp->c_devnum, cfp->c_funcnum, 1293 reg); 1294 1295 return (rval); 1296 } 1297 1298 void 1299 pci_config_rep_rd16(ddi_acc_impl_t *hdlp, uint16_t *host_addr, 1300 uint16_t *dev_addr, size_t repcount, uint_t flags) 1301 { 1302 uint16_t *h, *d; 1303 1304 h = host_addr; 1305 d = dev_addr; 1306 1307 if (flags == DDI_DEV_AUTOINCR) 1308 for (; repcount; repcount--) 1309 *h++ = pci_config_rd16(hdlp, d++); 1310 else 1311 for (; repcount; repcount--) 1312 *h++ = pci_config_rd16(hdlp, d); 1313 } 1314 1315 uint32_t 1316 pci_config_rd32(ddi_acc_impl_t *hdlp, uint32_t *addr) 1317 { 1318 pci_acc_cfblk_t *cfp; 1319 uint32_t rval; 1320 int reg; 1321 1322 ASSERT64(((uintptr_t)addr >> 32) == 0); 1323 1324 reg = (int)(uintptr_t)addr; 1325 1326 cfp = (pci_acc_cfblk_t *)&hdlp->ahi_common.ah_bus_private; 1327 1328 rval = (*pci_getl_func)(cfp->c_busnum, cfp->c_devnum, 1329 cfp->c_funcnum, reg); 1330 1331 return (rval); 1332 } 1333 1334 void 1335 pci_config_rep_rd32(ddi_acc_impl_t *hdlp, uint32_t *host_addr, 1336 uint32_t *dev_addr, size_t repcount, uint_t flags) 1337 { 1338 uint32_t *h, *d; 1339 1340 h = host_addr; 1341 d = dev_addr; 1342 1343 if (flags == DDI_DEV_AUTOINCR) 1344 for (; repcount; repcount--) 1345 *h++ = pci_config_rd32(hdlp, d++); 1346 else 1347 for (; repcount; repcount--) 1348 *h++ = pci_config_rd32(hdlp, d); 1349 } 1350 1351 1352 void 1353 pci_config_wr8(ddi_acc_impl_t *hdlp, uint8_t *addr, uint8_t value) 1354 { 1355 pci_acc_cfblk_t *cfp; 1356 int reg; 1357 1358 ASSERT64(((uintptr_t)addr >> 32) == 0); 1359 1360 reg = (int)(uintptr_t)addr; 1361 1362 cfp = (pci_acc_cfblk_t *)&hdlp->ahi_common.ah_bus_private; 1363 1364 (*pci_putb_func)(cfp->c_busnum, cfp->c_devnum, 1365 cfp->c_funcnum, reg, value); 1366 } 1367 1368 void 1369 pci_config_rep_wr8(ddi_acc_impl_t *hdlp, uint8_t *host_addr, 1370 uint8_t *dev_addr, size_t repcount, uint_t flags) 1371 { 1372 uint8_t *h, *d; 1373 1374 h = host_addr; 1375 d = dev_addr; 1376 1377 if (flags == DDI_DEV_AUTOINCR) 1378 for (; repcount; repcount--) 1379 pci_config_wr8(hdlp, d++, *h++); 1380 else 1381 for (; repcount; repcount--) 1382 pci_config_wr8(hdlp, d, *h++); 1383 } 1384 1385 void 1386 pci_config_wr16(ddi_acc_impl_t *hdlp, uint16_t *addr, uint16_t value) 1387 { 1388 pci_acc_cfblk_t *cfp; 1389 int reg; 1390 1391 ASSERT64(((uintptr_t)addr >> 32) == 0); 1392 1393 reg = (int)(uintptr_t)addr; 1394 1395 cfp = (pci_acc_cfblk_t *)&hdlp->ahi_common.ah_bus_private; 1396 1397 (*pci_putw_func)(cfp->c_busnum, cfp->c_devnum, 1398 cfp->c_funcnum, reg, value); 1399 } 1400 1401 void 1402 pci_config_rep_wr16(ddi_acc_impl_t *hdlp, uint16_t *host_addr, 1403 uint16_t *dev_addr, size_t repcount, uint_t flags) 1404 { 1405 uint16_t *h, *d; 1406 1407 h = host_addr; 1408 d = dev_addr; 1409 1410 if (flags == DDI_DEV_AUTOINCR) 1411 for (; repcount; repcount--) 1412 pci_config_wr16(hdlp, d++, *h++); 1413 else 1414 for (; repcount; repcount--) 1415 pci_config_wr16(hdlp, d, *h++); 1416 } 1417 1418 void 1419 pci_config_wr32(ddi_acc_impl_t *hdlp, uint32_t *addr, uint32_t value) 1420 { 1421 pci_acc_cfblk_t *cfp; 1422 int reg; 1423 1424 ASSERT64(((uintptr_t)addr >> 32) == 0); 1425 1426 reg = (int)(uintptr_t)addr; 1427 1428 cfp = (pci_acc_cfblk_t *)&hdlp->ahi_common.ah_bus_private; 1429 1430 (*pci_putl_func)(cfp->c_busnum, cfp->c_devnum, 1431 cfp->c_funcnum, reg, value); 1432 } 1433 1434 void 1435 pci_config_rep_wr32(ddi_acc_impl_t *hdlp, uint32_t *host_addr, 1436 uint32_t *dev_addr, size_t repcount, uint_t flags) 1437 { 1438 uint32_t *h, *d; 1439 1440 h = host_addr; 1441 d = dev_addr; 1442 1443 if (flags == DDI_DEV_AUTOINCR) 1444 for (; repcount; repcount--) 1445 pci_config_wr32(hdlp, d++, *h++); 1446 else 1447 for (; repcount; repcount--) 1448 pci_config_wr32(hdlp, d, *h++); 1449 } 1450 1451 uint64_t 1452 pci_config_rd64(ddi_acc_impl_t *hdlp, uint64_t *addr) 1453 { 1454 uint32_t lw_val; 1455 uint32_t hi_val; 1456 uint32_t *dp; 1457 uint64_t val; 1458 1459 dp = (uint32_t *)addr; 1460 lw_val = pci_config_rd32(hdlp, dp); 1461 dp++; 1462 hi_val = pci_config_rd32(hdlp, dp); 1463 val = ((uint64_t)hi_val << 32) | lw_val; 1464 return (val); 1465 } 1466 1467 void 1468 pci_config_wr64(ddi_acc_impl_t *hdlp, uint64_t *addr, uint64_t value) 1469 { 1470 uint32_t lw_val; 1471 uint32_t hi_val; 1472 uint32_t *dp; 1473 1474 dp = (uint32_t *)addr; 1475 lw_val = (uint32_t)(value & 0xffffffff); 1476 hi_val = (uint32_t)(value >> 32); 1477 pci_config_wr32(hdlp, dp, lw_val); 1478 dp++; 1479 pci_config_wr32(hdlp, dp, hi_val); 1480 } 1481 1482 void 1483 pci_config_rep_rd64(ddi_acc_impl_t *hdlp, uint64_t *host_addr, 1484 uint64_t *dev_addr, size_t repcount, uint_t flags) 1485 { 1486 if (flags == DDI_DEV_AUTOINCR) { 1487 for (; repcount; repcount--) 1488 *host_addr++ = pci_config_rd64(hdlp, dev_addr++); 1489 } else { 1490 for (; repcount; repcount--) 1491 *host_addr++ = pci_config_rd64(hdlp, dev_addr); 1492 } 1493 } 1494 1495 void 1496 pci_config_rep_wr64(ddi_acc_impl_t *hdlp, uint64_t *host_addr, 1497 uint64_t *dev_addr, size_t repcount, uint_t flags) 1498 { 1499 if (flags == DDI_DEV_AUTOINCR) { 1500 for (; repcount; repcount--) 1501 pci_config_wr64(hdlp, host_addr++, *dev_addr++); 1502 } else { 1503 for (; repcount; repcount--) 1504 pci_config_wr64(hdlp, host_addr++, *dev_addr); 1505 } 1506 } 1507 1508 1509 /* 1510 * Enable Legacy PCI config space access for the following four north bridges 1511 * Host bridge: AMD HyperTransport Technology Configuration 1512 * Host bridge: AMD Address Map 1513 * Host bridge: AMD DRAM Controller 1514 * Host bridge: AMD Miscellaneous Control 1515 */ 1516 int 1517 is_amd_northbridge(dev_info_t *dip) 1518 { 1519 int vendor_id, device_id; 1520 1521 vendor_id = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1522 "vendor-id", -1); 1523 device_id = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1524 "device-id", -1); 1525 1526 if (IS_AMD_NTBRIDGE(vendor_id, device_id)) 1527 return (0); 1528 1529 return (1); 1530 } 1531