1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * File that has code which is common between pci(7d) and npe(7d) 31 * It shares the following: 32 * - interrupt code 33 * - pci_tools ioctl code 34 * - name_child code 35 * - set_parent_private_data code 36 */ 37 38 #include <sys/conf.h> 39 #include <sys/pci.h> 40 #include <sys/sunndi.h> 41 #include <sys/mach_intr.h> 42 #include <sys/hotplug/pci/pcihp.h> 43 #include <sys/pci_intr_lib.h> 44 #include <sys/psm.h> 45 #include <sys/policy.h> 46 #include <sys/sysmacros.h> 47 #include <sys/clock.h> 48 #include <io/pcplusmp/apic.h> 49 #include <sys/pci_tools.h> 50 #include <io/pci/pci_var.h> 51 #include <io/pci/pci_tools_ext.h> 52 #include <io/pci/pci_common.h> 53 #include <sys/pci_cfgspace.h> 54 #include <sys/pci_impl.h> 55 56 /* 57 * Function prototypes 58 */ 59 static int pci_get_priority(dev_info_t *, ddi_intr_handle_impl_t *, int *); 60 static int pci_get_nintrs(dev_info_t *, int, int *); 61 static int pci_enable_intr(dev_info_t *, dev_info_t *, 62 ddi_intr_handle_impl_t *, uint32_t); 63 static void pci_disable_intr(dev_info_t *, dev_info_t *, 64 ddi_intr_handle_impl_t *, uint32_t); 65 66 /* Extern decalration for pcplusmp module */ 67 extern int (*psm_intr_ops)(dev_info_t *, ddi_intr_handle_impl_t *, 68 psm_intr_op_t, int *); 69 70 71 /* 72 * pci_name_child: 73 * 74 * Assign the address portion of the node name 75 */ 76 int 77 pci_common_name_child(dev_info_t *child, char *name, int namelen) 78 { 79 int dev, func, length; 80 char **unit_addr; 81 uint_t n; 82 pci_regspec_t *pci_rp; 83 84 if (ndi_dev_is_persistent_node(child) == 0) { 85 /* 86 * For .conf node, use "unit-address" property 87 */ 88 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, child, 89 DDI_PROP_DONTPASS, "unit-address", &unit_addr, &n) != 90 DDI_PROP_SUCCESS) { 91 cmn_err(CE_WARN, "cannot find unit-address in %s.conf", 92 ddi_get_name(child)); 93 return (DDI_FAILURE); 94 } 95 if (n != 1 || *unit_addr == NULL || **unit_addr == 0) { 96 cmn_err(CE_WARN, "unit-address property in %s.conf" 97 " not well-formed", ddi_get_name(child)); 98 ddi_prop_free(unit_addr); 99 return (DDI_FAILURE); 100 } 101 (void) snprintf(name, namelen, "%s", *unit_addr); 102 ddi_prop_free(unit_addr); 103 return (DDI_SUCCESS); 104 } 105 106 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, child, DDI_PROP_DONTPASS, 107 "reg", (int **)&pci_rp, (uint_t *)&length) != DDI_PROP_SUCCESS) { 108 cmn_err(CE_WARN, "cannot find reg property in %s", 109 ddi_get_name(child)); 110 return (DDI_FAILURE); 111 } 112 113 /* copy the device identifications */ 114 dev = PCI_REG_DEV_G(pci_rp->pci_phys_hi); 115 func = PCI_REG_FUNC_G(pci_rp->pci_phys_hi); 116 117 /* 118 * free the memory allocated by ddi_prop_lookup_int_array 119 */ 120 ddi_prop_free(pci_rp); 121 122 if (func != 0) { 123 (void) snprintf(name, namelen, "%x,%x", dev, func); 124 } else { 125 (void) snprintf(name, namelen, "%x", dev); 126 } 127 128 return (DDI_SUCCESS); 129 } 130 131 /* 132 * Interrupt related code: 133 * 134 * The following busop is common to npe and pci drivers 135 * bus_introp 136 */ 137 138 /* 139 * Create the ddi_parent_private_data for a pseudo child. 140 */ 141 void 142 pci_common_set_parent_private_data(dev_info_t *dip) 143 { 144 struct ddi_parent_private_data *pdptr; 145 146 pdptr = (struct ddi_parent_private_data *)kmem_zalloc( 147 (sizeof (struct ddi_parent_private_data) + 148 sizeof (struct intrspec)), KM_SLEEP); 149 pdptr->par_intr = (struct intrspec *)(pdptr + 1); 150 pdptr->par_nintr = 1; 151 ddi_set_parent_data(dip, pdptr); 152 } 153 154 /* 155 * pci_get_priority: 156 * Figure out the priority of the device 157 */ 158 static int 159 pci_get_priority(dev_info_t *dip, ddi_intr_handle_impl_t *hdlp, int *pri) 160 { 161 struct intrspec *ispec; 162 163 DDI_INTR_NEXDBG((CE_CONT, "pci_get_priority: dip = 0x%p, hdlp = %p\n", 164 (void *)dip, (void *)hdlp)); 165 166 if ((ispec = (struct intrspec *)pci_intx_get_ispec(dip, dip, 167 hdlp->ih_inum)) == NULL) { 168 if (DDI_INTR_IS_MSI_OR_MSIX(hdlp->ih_type)) { 169 int class = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 170 DDI_PROP_DONTPASS, "class-code", -1); 171 172 *pri = (class == -1) ? 1 : pci_devclass_to_ipl(class); 173 pci_common_set_parent_private_data(hdlp->ih_dip); 174 ispec = (struct intrspec *)pci_intx_get_ispec(dip, dip, 175 hdlp->ih_inum); 176 return (DDI_SUCCESS); 177 } 178 return (DDI_FAILURE); 179 } 180 181 *pri = ispec->intrspec_pri; 182 return (DDI_SUCCESS); 183 } 184 185 186 /* 187 * pci_get_nintrs: 188 * Figure out how many interrupts the device supports 189 */ 190 static int 191 pci_get_nintrs(dev_info_t *dip, int type, int *nintrs) 192 { 193 int ret; 194 195 *nintrs = 0; 196 197 if (DDI_INTR_IS_MSI_OR_MSIX(type)) 198 ret = pci_msi_get_nintrs(dip, type, nintrs); 199 else { 200 ret = DDI_FAILURE; 201 if (ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 202 "interrupts", -1) != -1) { 203 *nintrs = 1; 204 ret = DDI_SUCCESS; 205 } 206 } 207 208 return (ret); 209 } 210 211 static int pcie_pci_intr_pri_counter = 0; 212 213 /* 214 * pci_common_intr_ops: bus_intr_op() function for interrupt support 215 */ 216 int 217 pci_common_intr_ops(dev_info_t *pdip, dev_info_t *rdip, ddi_intr_op_t intr_op, 218 ddi_intr_handle_impl_t *hdlp, void *result) 219 { 220 int priority = 0; 221 int psm_status = 0; 222 int pci_status = 0; 223 int pci_rval, psm_rval = PSM_FAILURE; 224 int types = 0; 225 int pciepci = 0; 226 int i, j, count; 227 int behavior; 228 int cap_ptr; 229 ddi_intrspec_t isp; 230 struct intrspec *ispec; 231 ddi_intr_handle_impl_t tmp_hdl; 232 ddi_intr_msix_t *msix_p; 233 ihdl_plat_t *ihdl_plat_datap; 234 ddi_intr_handle_t *h_array; 235 ddi_acc_handle_t handle; 236 237 DDI_INTR_NEXDBG((CE_CONT, 238 "pci_common_intr_ops: pdip 0x%p, rdip 0x%p, op %x handle 0x%p\n", 239 (void *)pdip, (void *)rdip, intr_op, (void *)hdlp)); 240 241 /* Process the request */ 242 switch (intr_op) { 243 case DDI_INTROP_SUPPORTED_TYPES: 244 /* Fixed supported by default */ 245 *(int *)result = DDI_INTR_TYPE_FIXED; 246 247 /* Figure out if MSI or MSI-X is supported? */ 248 if (pci_msi_get_supported_type(rdip, &types) != DDI_SUCCESS) 249 return (DDI_SUCCESS); 250 251 if (psm_intr_ops != NULL) { 252 /* 253 * Only support MSI for now, OR it in 254 */ 255 *(int *)result |= (types & DDI_INTR_TYPE_MSI); 256 257 tmp_hdl.ih_type = *(int *)result; 258 (void) (*psm_intr_ops)(rdip, &tmp_hdl, 259 PSM_INTR_OP_CHECK_MSI, result); 260 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: " 261 "rdip: 0x%p supported types: 0x%x\n", (void *)rdip, 262 *(int *)result)); 263 } 264 break; 265 case DDI_INTROP_NINTRS: 266 if (pci_get_nintrs(rdip, hdlp->ih_type, result) != DDI_SUCCESS) 267 return (DDI_FAILURE); 268 break; 269 case DDI_INTROP_ALLOC: 270 /* 271 * MSI or MSIX (figure out number of vectors available) 272 * FIXED interrupts: just return available interrupts 273 */ 274 if (DDI_INTR_IS_MSI_OR_MSIX(hdlp->ih_type) && 275 (psm_intr_ops != NULL) && 276 (pci_get_priority(rdip, hdlp, &priority) == DDI_SUCCESS)) { 277 /* 278 * Following check is a special case for 'pcie_pci'. 279 * This makes sure vectors with the right priority 280 * are allocated for pcie_pci during ALLOC time. 281 */ 282 if (strcmp(ddi_driver_name(rdip), "pcie_pci") == 0) { 283 hdlp->ih_pri = 284 (pcie_pci_intr_pri_counter % 2) ? 4 : 7; 285 pciepci = 1; 286 } else 287 hdlp->ih_pri = priority; 288 behavior = (int)(uintptr_t)hdlp->ih_scratch2; 289 290 /* 291 * Cache in the config handle and cap_ptr 292 */ 293 if (i_ddi_get_pci_config_handle(rdip) == NULL) { 294 if (pci_config_setup(rdip, &handle) != 295 DDI_SUCCESS) 296 return (DDI_FAILURE); 297 i_ddi_set_pci_config_handle(rdip, handle); 298 } 299 300 if (i_ddi_get_msi_msix_cap_ptr(rdip) == 0) { 301 char *prop = 302 (hdlp->ih_type == DDI_INTR_TYPE_MSI) ? 303 "pci-msi-capid-pointer" : 304 "pci-msix-capid-pointer"; 305 306 cap_ptr = ddi_prop_get_int(DDI_DEV_T_ANY, rdip, 307 DDI_PROP_DONTPASS, prop, 308 PCI_CAP_NEXT_PTR_NULL); 309 i_ddi_set_msi_msix_cap_ptr(rdip, cap_ptr); 310 } 311 312 313 (void) (*psm_intr_ops)(rdip, hdlp, 314 PSM_INTR_OP_ALLOC_VECTORS, result); 315 316 /* verify behavior flag and take appropriate action */ 317 if ((behavior == DDI_INTR_ALLOC_STRICT) && 318 (*(int *)result < hdlp->ih_scratch1)) { 319 DDI_INTR_NEXDBG((CE_CONT, 320 "pci_common_intr_ops: behavior %x, " 321 "couldn't get enough intrs\n", behavior)); 322 hdlp->ih_scratch1 = *(int *)result; 323 (void) (*psm_intr_ops)(rdip, hdlp, 324 PSM_INTR_OP_FREE_VECTORS, NULL); 325 return (DDI_EAGAIN); 326 } 327 328 if (hdlp->ih_type == DDI_INTR_TYPE_MSIX) { 329 if (!(msix_p = i_ddi_get_msix(hdlp->ih_dip))) { 330 msix_p = pci_msix_init(hdlp->ih_dip); 331 if (msix_p) 332 i_ddi_set_msix(hdlp->ih_dip, 333 msix_p); 334 } 335 } 336 337 if (pciepci) { 338 /* update priority in ispec */ 339 isp = pci_intx_get_ispec(pdip, rdip, 340 (int)hdlp->ih_inum); 341 ispec = (struct intrspec *)isp; 342 if (ispec) 343 ispec->intrspec_pri = hdlp->ih_pri; 344 ++pcie_pci_intr_pri_counter; 345 } 346 347 } else if (hdlp->ih_type == DDI_INTR_TYPE_FIXED) { 348 /* Figure out if this device supports MASKING */ 349 pci_rval = pci_intx_get_cap(rdip, &pci_status); 350 if (pci_rval == DDI_SUCCESS && pci_status) 351 hdlp->ih_cap |= pci_status; 352 *(int *)result = 1; /* DDI_INTR_TYPE_FIXED */ 353 } else 354 return (DDI_FAILURE); 355 break; 356 case DDI_INTROP_FREE: 357 if (DDI_INTR_IS_MSI_OR_MSIX(hdlp->ih_type) && 358 (psm_intr_ops != NULL)) { 359 if (i_ddi_intr_get_current_nintrs(hdlp->ih_dip) - 1 == 360 0) { 361 if (handle = i_ddi_get_pci_config_handle( 362 rdip)) { 363 (void) pci_config_teardown(&handle); 364 i_ddi_set_pci_config_handle(rdip, NULL); 365 } 366 if (cap_ptr = i_ddi_get_msi_msix_cap_ptr(rdip)) 367 i_ddi_set_msi_msix_cap_ptr(rdip, 0); 368 } 369 370 (void) (*psm_intr_ops)(rdip, hdlp, 371 PSM_INTR_OP_FREE_VECTORS, NULL); 372 373 if (hdlp->ih_type == DDI_INTR_TYPE_MSIX) { 374 msix_p = i_ddi_get_msix(hdlp->ih_dip); 375 if (msix_p && 376 (i_ddi_intr_get_current_nintrs( 377 hdlp->ih_dip) - 1) == 0) { 378 pci_msix_fini(msix_p); 379 i_ddi_set_msix(hdlp->ih_dip, NULL); 380 } 381 } 382 } 383 break; 384 case DDI_INTROP_GETPRI: 385 /* Get the priority */ 386 if (pci_get_priority(rdip, hdlp, &priority) != DDI_SUCCESS) 387 return (DDI_FAILURE); 388 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: " 389 "priority = 0x%x\n", priority)); 390 *(int *)result = priority; 391 break; 392 case DDI_INTROP_SETPRI: 393 /* Validate the interrupt priority passed */ 394 if (*(int *)result > LOCK_LEVEL) 395 return (DDI_FAILURE); 396 397 /* Ensure that PSM is all initialized */ 398 if (psm_intr_ops == NULL) 399 return (DDI_FAILURE); 400 401 /* Change the priority */ 402 if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_PRI, result) == 403 PSM_FAILURE) 404 return (DDI_FAILURE); 405 406 /* update ispec */ 407 isp = pci_intx_get_ispec(pdip, rdip, (int)hdlp->ih_inum); 408 ispec = (struct intrspec *)isp; 409 if (ispec) 410 ispec->intrspec_pri = *(int *)result; 411 break; 412 case DDI_INTROP_ADDISR: 413 /* update ispec */ 414 isp = pci_intx_get_ispec(pdip, rdip, (int)hdlp->ih_inum); 415 ispec = (struct intrspec *)isp; 416 if (ispec) { 417 ispec->intrspec_func = hdlp->ih_cb_func; 418 ihdl_plat_datap = (ihdl_plat_t *)hdlp->ih_private; 419 pci_kstat_create(&ihdl_plat_datap->ip_ksp, pdip, hdlp); 420 } 421 break; 422 case DDI_INTROP_REMISR: 423 /* Get the interrupt structure pointer */ 424 isp = pci_intx_get_ispec(pdip, rdip, (int)hdlp->ih_inum); 425 ispec = (struct intrspec *)isp; 426 if (ispec) { 427 ispec->intrspec_func = (uint_t (*)()) 0; 428 ihdl_plat_datap = (ihdl_plat_t *)hdlp->ih_private; 429 if (ihdl_plat_datap->ip_ksp != NULL) 430 pci_kstat_delete(ihdl_plat_datap->ip_ksp); 431 } 432 break; 433 case DDI_INTROP_GETCAP: 434 /* 435 * First check the config space and/or 436 * MSI capability register(s) 437 */ 438 if (DDI_INTR_IS_MSI_OR_MSIX(hdlp->ih_type)) 439 pci_rval = pci_msi_get_cap(rdip, hdlp->ih_type, 440 &pci_status); 441 else if (hdlp->ih_type == DDI_INTR_TYPE_FIXED) 442 pci_rval = pci_intx_get_cap(rdip, &pci_status); 443 444 /* next check with pcplusmp */ 445 if (psm_intr_ops != NULL) 446 psm_rval = (*psm_intr_ops)(rdip, hdlp, 447 PSM_INTR_OP_GET_CAP, &psm_status); 448 449 DDI_INTR_NEXDBG((CE_CONT, "pci: GETCAP returned psm_rval = %x, " 450 "psm_status = %x, pci_rval = %x, pci_status = %x\n", 451 psm_rval, psm_status, pci_rval, pci_status)); 452 453 if (psm_rval == PSM_FAILURE && pci_rval == DDI_FAILURE) { 454 *(int *)result = 0; 455 return (DDI_FAILURE); 456 } 457 458 if (psm_rval == PSM_SUCCESS) 459 *(int *)result = psm_status; 460 461 if (pci_rval == DDI_SUCCESS) 462 *(int *)result |= pci_status; 463 464 DDI_INTR_NEXDBG((CE_CONT, "pci: GETCAP returned = %x\n", 465 *(int *)result)); 466 break; 467 case DDI_INTROP_SETCAP: 468 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: " 469 "SETCAP cap=0x%x\n", *(int *)result)); 470 if (psm_intr_ops == NULL) 471 return (DDI_FAILURE); 472 473 if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_CAP, result)) { 474 DDI_INTR_NEXDBG((CE_CONT, "GETCAP: psm_intr_ops" 475 " returned failure\n")); 476 return (DDI_FAILURE); 477 } 478 break; 479 case DDI_INTROP_ENABLE: 480 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: ENABLE\n")); 481 if (psm_intr_ops == NULL) 482 return (DDI_FAILURE); 483 484 if (pci_enable_intr(pdip, rdip, hdlp, hdlp->ih_inum) != 485 DDI_SUCCESS) 486 return (DDI_FAILURE); 487 488 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: ENABLE " 489 "vector=0x%x\n", hdlp->ih_vector)); 490 break; 491 case DDI_INTROP_DISABLE: 492 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: DISABLE\n")); 493 if (psm_intr_ops == NULL) 494 return (DDI_FAILURE); 495 496 pci_disable_intr(pdip, rdip, hdlp, hdlp->ih_inum); 497 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: DISABLE " 498 "vector = %x\n", hdlp->ih_vector)); 499 break; 500 case DDI_INTROP_BLOCKENABLE: 501 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: " 502 "BLOCKENABLE\n")); 503 if (hdlp->ih_type != DDI_INTR_TYPE_MSI) { 504 DDI_INTR_NEXDBG((CE_CONT, "BLOCKENABLE: not MSI\n")); 505 return (DDI_FAILURE); 506 } 507 508 /* Check if psm_intr_ops is NULL? */ 509 if (psm_intr_ops == NULL) 510 return (DDI_FAILURE); 511 512 count = hdlp->ih_scratch1; 513 h_array = (ddi_intr_handle_t *)hdlp->ih_scratch2; 514 for (i = 0; i < count; i++) { 515 hdlp = (ddi_intr_handle_impl_t *)h_array[i]; 516 if (pci_enable_intr(pdip, rdip, hdlp, 517 hdlp->ih_inum) != DDI_SUCCESS) { 518 DDI_INTR_NEXDBG((CE_CONT, "BLOCKENABLE: " 519 "pci_enable_intr failed for %d\n", i)); 520 for (j = 0; j < i; j++) { 521 hdlp = (ddi_intr_handle_impl_t *)h_array[j]; 522 pci_disable_intr(pdip, rdip, hdlp, 523 hdlp->ih_inum); 524 } 525 return (DDI_FAILURE); 526 } 527 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: " 528 "BLOCKENABLE inum %x done\n", hdlp->ih_inum)); 529 } 530 break; 531 case DDI_INTROP_BLOCKDISABLE: 532 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: " 533 "BLOCKDISABLE\n")); 534 if (hdlp->ih_type != DDI_INTR_TYPE_MSI) { 535 DDI_INTR_NEXDBG((CE_CONT, "BLOCKDISABLE: not MSI\n")); 536 return (DDI_FAILURE); 537 } 538 539 /* Check if psm_intr_ops is present */ 540 if (psm_intr_ops == NULL) 541 return (DDI_FAILURE); 542 543 count = hdlp->ih_scratch1; 544 h_array = (ddi_intr_handle_t *)hdlp->ih_scratch2; 545 for (i = 0; i < count; i++) { 546 hdlp = (ddi_intr_handle_impl_t *)h_array[i]; 547 pci_disable_intr(pdip, rdip, hdlp, hdlp->ih_inum); 548 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: " 549 "BLOCKDISABLE inum %x done\n", hdlp->ih_inum)); 550 } 551 break; 552 case DDI_INTROP_SETMASK: 553 case DDI_INTROP_CLRMASK: 554 /* 555 * First handle in the config space 556 */ 557 if (intr_op == DDI_INTROP_SETMASK) { 558 if (DDI_INTR_IS_MSI_OR_MSIX(hdlp->ih_type)) 559 pci_status = pci_msi_set_mask(rdip, 560 hdlp->ih_type, hdlp->ih_inum); 561 else if (hdlp->ih_type == DDI_INTR_TYPE_FIXED) 562 pci_status = pci_intx_set_mask(rdip); 563 } else { 564 if (DDI_INTR_IS_MSI_OR_MSIX(hdlp->ih_type)) 565 pci_status = pci_msi_clr_mask(rdip, 566 hdlp->ih_type, hdlp->ih_inum); 567 else if (hdlp->ih_type == DDI_INTR_TYPE_FIXED) 568 pci_status = pci_intx_clr_mask(rdip); 569 } 570 571 /* For MSI/X; no need to check with pcplusmp */ 572 if (hdlp->ih_type != DDI_INTR_TYPE_FIXED) 573 return (pci_status); 574 575 /* For fixed interrupts only: handle config space first */ 576 if (hdlp->ih_type == DDI_INTR_TYPE_FIXED && 577 pci_status == DDI_SUCCESS) 578 break; 579 580 /* For fixed interrupts only: confer with pcplusmp next */ 581 if (psm_intr_ops != NULL) { 582 /* If interrupt is shared; do nothing */ 583 psm_rval = (*psm_intr_ops)(rdip, hdlp, 584 PSM_INTR_OP_GET_SHARED, &psm_status); 585 586 if (psm_rval == PSM_FAILURE || psm_status == 1) 587 return (pci_status); 588 589 /* Now, pcplusmp should try to set/clear the mask */ 590 if (intr_op == DDI_INTROP_SETMASK) 591 psm_rval = (*psm_intr_ops)(rdip, hdlp, 592 PSM_INTR_OP_SET_MASK, NULL); 593 else 594 psm_rval = (*psm_intr_ops)(rdip, hdlp, 595 PSM_INTR_OP_CLEAR_MASK, NULL); 596 } 597 return ((psm_rval == PSM_FAILURE) ? DDI_FAILURE : DDI_SUCCESS); 598 case DDI_INTROP_GETPENDING: 599 /* 600 * First check the config space and/or 601 * MSI capability register(s) 602 */ 603 if (DDI_INTR_IS_MSI_OR_MSIX(hdlp->ih_type)) 604 pci_rval = pci_msi_get_pending(rdip, hdlp->ih_type, 605 hdlp->ih_inum, &pci_status); 606 else if (hdlp->ih_type == DDI_INTR_TYPE_FIXED) 607 pci_rval = pci_intx_get_pending(rdip, &pci_status); 608 609 /* On failure; next try with pcplusmp */ 610 if (pci_rval != DDI_SUCCESS && psm_intr_ops != NULL) 611 psm_rval = (*psm_intr_ops)(rdip, hdlp, 612 PSM_INTR_OP_GET_PENDING, &psm_status); 613 614 DDI_INTR_NEXDBG((CE_CONT, "pci: GETPENDING returned " 615 "psm_rval = %x, psm_status = %x, pci_rval = %x, " 616 "pci_status = %x\n", psm_rval, psm_status, pci_rval, 617 pci_status)); 618 if (psm_rval == PSM_FAILURE && pci_rval == DDI_FAILURE) { 619 *(int *)result = 0; 620 return (DDI_FAILURE); 621 } 622 623 if (psm_rval != PSM_FAILURE) 624 *(int *)result = psm_status; 625 else if (pci_rval != DDI_FAILURE) 626 *(int *)result = pci_status; 627 DDI_INTR_NEXDBG((CE_CONT, "pci: GETPENDING returned = %x\n", 628 *(int *)result)); 629 break; 630 case DDI_INTROP_NAVAIL: 631 if ((psm_intr_ops != NULL) && (pci_get_priority(rdip, 632 hdlp, &priority) == DDI_SUCCESS)) { 633 /* Priority in the handle not initialized yet */ 634 hdlp->ih_pri = priority; 635 (void) (*psm_intr_ops)(rdip, hdlp, 636 PSM_INTR_OP_NAVAIL_VECTORS, result); 637 } else { 638 *(int *)result = 1; 639 } 640 DDI_INTR_NEXDBG((CE_CONT, "pci: NAVAIL returned = %x\n", 641 *(int *)result)); 642 break; 643 default: 644 return (i_ddi_intr_ops(pdip, rdip, intr_op, hdlp, result)); 645 } 646 647 return (DDI_SUCCESS); 648 } 649 650 int 651 pci_get_intr_from_vecirq(apic_get_intr_t *intrinfo_p, 652 int vecirq, boolean_t is_irq) 653 { 654 ddi_intr_handle_impl_t get_info_ii_hdl; 655 656 if (is_irq) 657 intrinfo_p->avgi_req_flags |= PSMGI_INTRBY_IRQ; 658 659 /* 660 * For this locally-declared and used handle, ih_private will contain a 661 * pointer to apic_get_intr_t, not an ihdl_plat_t as used for 662 * global interrupt handling. 663 */ 664 get_info_ii_hdl.ih_private = intrinfo_p; 665 get_info_ii_hdl.ih_vector = (ushort_t)vecirq; 666 667 if ((*psm_intr_ops)(NULL, &get_info_ii_hdl, 668 PSM_INTR_OP_GET_INTR, NULL) == PSM_FAILURE) 669 return (DDI_FAILURE); 670 671 return (DDI_SUCCESS); 672 } 673 674 675 int 676 pci_get_cpu_from_vecirq(int vecirq, boolean_t is_irq) 677 { 678 int rval; 679 680 apic_get_intr_t intrinfo; 681 intrinfo.avgi_req_flags = PSMGI_REQ_CPUID; 682 rval = pci_get_intr_from_vecirq(&intrinfo, vecirq, is_irq); 683 684 if (rval == DDI_SUCCESS) 685 return (intrinfo.avgi_cpu_id); 686 else 687 return (-1); 688 } 689 690 691 static int 692 pci_enable_intr(dev_info_t *pdip, dev_info_t *rdip, 693 ddi_intr_handle_impl_t *hdlp, uint32_t inum) 694 { 695 struct intrspec *ispec; 696 int irq; 697 ihdl_plat_t *ihdl_plat_datap = (ihdl_plat_t *)hdlp->ih_private; 698 699 DDI_INTR_NEXDBG((CE_CONT, "pci_enable_intr: hdlp %p inum %x\n", 700 (void *)hdlp, inum)); 701 702 /* Translate the interrupt if needed */ 703 ispec = (struct intrspec *)pci_intx_get_ispec(pdip, rdip, (int)inum); 704 if (DDI_INTR_IS_MSI_OR_MSIX(hdlp->ih_type) && ispec) 705 ispec->intrspec_vec = inum; 706 ihdl_plat_datap->ip_ispecp = ispec; 707 708 /* translate the interrupt if needed */ 709 (void) (*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_XLATE_VECTOR, &irq); 710 DDI_INTR_NEXDBG((CE_CONT, "pci_enable_intr: priority=%x irq=%x\n", 711 hdlp->ih_pri, irq)); 712 713 /* Add the interrupt handler */ 714 if (!add_avintr((void *)hdlp, hdlp->ih_pri, hdlp->ih_cb_func, 715 DEVI(rdip)->devi_name, irq, hdlp->ih_cb_arg1, 716 hdlp->ih_cb_arg2, &ihdl_plat_datap->ip_ticks, rdip)) 717 return (DDI_FAILURE); 718 719 /* Note this really is an irq. */ 720 hdlp->ih_vector = (ushort_t)irq; 721 722 return (DDI_SUCCESS); 723 } 724 725 726 static void 727 pci_disable_intr(dev_info_t *pdip, dev_info_t *rdip, 728 ddi_intr_handle_impl_t *hdlp, uint32_t inum) 729 { 730 int irq; 731 struct intrspec *ispec; 732 ihdl_plat_t *ihdl_plat_datap = (ihdl_plat_t *)hdlp->ih_private; 733 734 DDI_INTR_NEXDBG((CE_CONT, "pci_disable_intr: \n")); 735 ispec = (struct intrspec *)pci_intx_get_ispec(pdip, rdip, (int)inum); 736 if (DDI_INTR_IS_MSI_OR_MSIX(hdlp->ih_type) && ispec) 737 ispec->intrspec_vec = inum; 738 ihdl_plat_datap->ip_ispecp = ispec; 739 740 /* translate the interrupt if needed */ 741 (void) (*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_XLATE_VECTOR, &irq); 742 743 /* Disable the interrupt handler */ 744 rem_avintr((void *)hdlp, hdlp->ih_pri, hdlp->ih_cb_func, irq); 745 ihdl_plat_datap->ip_ispecp = NULL; 746 } 747 748 /* 749 * Miscellaneous library function 750 */ 751 int 752 pci_common_get_reg_prop(dev_info_t *dip, pci_regspec_t *pci_rp) 753 { 754 int i; 755 int number; 756 int assigned_addr_len; 757 uint_t phys_hi = pci_rp->pci_phys_hi; 758 pci_regspec_t *assigned_addr; 759 760 if (((phys_hi & PCI_REG_ADDR_M) == PCI_ADDR_CONFIG) || 761 (phys_hi & PCI_RELOCAT_B)) 762 return (DDI_SUCCESS); 763 764 /* 765 * the "reg" property specifies relocatable, get and interpret the 766 * "assigned-addresses" property. 767 */ 768 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 769 "assigned-addresses", (int **)&assigned_addr, 770 (uint_t *)&assigned_addr_len) != DDI_PROP_SUCCESS) 771 return (DDI_FAILURE); 772 773 /* 774 * Scan the "assigned-addresses" for one that matches the specified 775 * "reg" property entry. 776 */ 777 phys_hi &= PCI_CONF_ADDR_MASK; 778 number = assigned_addr_len / (sizeof (pci_regspec_t) / sizeof (int)); 779 for (i = 0; i < number; i++) { 780 if ((assigned_addr[i].pci_phys_hi & PCI_CONF_ADDR_MASK) == 781 phys_hi) { 782 pci_rp->pci_phys_mid = assigned_addr[i].pci_phys_mid; 783 pci_rp->pci_phys_low = assigned_addr[i].pci_phys_low; 784 ddi_prop_free(assigned_addr); 785 return (DDI_SUCCESS); 786 } 787 } 788 789 ddi_prop_free(assigned_addr); 790 return (DDI_FAILURE); 791 } 792 793 794 /* 795 * For pci_tools 796 */ 797 798 int 799 pci_common_ioctl(dev_info_t *dip, dev_t dev, int cmd, intptr_t arg, 800 int mode, cred_t *credp, int *rvalp) 801 { 802 int rv = ENOTTY; 803 804 minor_t minor = getminor(dev); 805 806 switch (PCIHP_AP_MINOR_NUM_TO_PCI_DEVNUM(minor)) { 807 case PCI_TOOL_REG_MINOR_NUM: 808 809 switch (cmd) { 810 case PCITOOL_DEVICE_SET_REG: 811 case PCITOOL_DEVICE_GET_REG: 812 813 /* Require full privileges. */ 814 if (secpolicy_kmdb(credp)) 815 rv = EPERM; 816 else 817 rv = pcitool_dev_reg_ops(dip, (void *)arg, 818 cmd, mode); 819 break; 820 821 case PCITOOL_NEXUS_SET_REG: 822 case PCITOOL_NEXUS_GET_REG: 823 824 /* Require full privileges. */ 825 if (secpolicy_kmdb(credp)) 826 rv = EPERM; 827 else 828 rv = pcitool_bus_reg_ops(dip, (void *)arg, 829 cmd, mode); 830 break; 831 } 832 break; 833 834 case PCI_TOOL_INTR_MINOR_NUM: 835 836 switch (cmd) { 837 case PCITOOL_DEVICE_SET_INTR: 838 839 /* Require PRIV_SYS_RES_CONFIG, same as psradm */ 840 if (secpolicy_ponline(credp)) { 841 rv = EPERM; 842 break; 843 } 844 845 /*FALLTHRU*/ 846 /* These require no special privileges. */ 847 case PCITOOL_DEVICE_GET_INTR: 848 case PCITOOL_DEVICE_NUM_INTR: 849 rv = pcitool_intr_admn(dip, (void *)arg, cmd, mode); 850 break; 851 } 852 break; 853 854 /* 855 * All non-PCItool ioctls go through here, including: 856 * devctl ioctls with minor number PCIHP_DEVCTL_MINOR and 857 * those for attachment points with where minor number is the 858 * device number. 859 */ 860 default: 861 rv = (pcihp_get_cb_ops())->cb_ioctl(dev, cmd, arg, mode, 862 credp, rvalp); 863 break; 864 } 865 866 return (rv); 867 } 868 869 870 int 871 pci_common_ctlops_poke(peekpoke_ctlops_t *in_args) 872 { 873 size_t size = in_args->size; 874 uintptr_t dev_addr = in_args->dev_addr; 875 uintptr_t host_addr = in_args->host_addr; 876 ddi_acc_impl_t *hp = (ddi_acc_impl_t *)in_args->handle; 877 ddi_acc_hdl_t *hdlp = (ddi_acc_hdl_t *)in_args->handle; 878 size_t repcount = in_args->repcount; 879 uint_t flags = in_args->flags; 880 int err = DDI_SUCCESS; 881 882 /* 883 * if no handle then this is a poke. We have to return failure here 884 * as we have no way of knowing whether this is a MEM or IO space access 885 */ 886 if (in_args->handle == NULL) 887 return (DDI_FAILURE); 888 889 /* 890 * rest of this function is actually for cautious puts 891 */ 892 for (; repcount; repcount--) { 893 if (hp->ahi_acc_attr == DDI_ACCATTR_CONFIG_SPACE) { 894 switch (size) { 895 case sizeof (uint8_t): 896 pci_config_wr8(hp, (uint8_t *)dev_addr, 897 *(uint8_t *)host_addr); 898 break; 899 case sizeof (uint16_t): 900 pci_config_wr16(hp, (uint16_t *)dev_addr, 901 *(uint16_t *)host_addr); 902 break; 903 case sizeof (uint32_t): 904 pci_config_wr32(hp, (uint32_t *)dev_addr, 905 *(uint32_t *)host_addr); 906 break; 907 case sizeof (uint64_t): 908 pci_config_wr64(hp, (uint64_t *)dev_addr, 909 *(uint64_t *)host_addr); 910 break; 911 default: 912 err = DDI_FAILURE; 913 break; 914 } 915 } else if (hp->ahi_acc_attr & DDI_ACCATTR_IO_SPACE) { 916 if (hdlp->ah_acc.devacc_attr_endian_flags == 917 DDI_STRUCTURE_BE_ACC) { 918 switch (size) { 919 case sizeof (uint8_t): 920 i_ddi_io_put8(hp, 921 (uint8_t *)dev_addr, 922 *(uint8_t *)host_addr); 923 break; 924 case sizeof (uint16_t): 925 i_ddi_io_swap_put16(hp, 926 (uint16_t *)dev_addr, 927 *(uint16_t *)host_addr); 928 break; 929 case sizeof (uint32_t): 930 i_ddi_io_swap_put32(hp, 931 (uint32_t *)dev_addr, 932 *(uint32_t *)host_addr); 933 break; 934 /* 935 * note the 64-bit case is a dummy 936 * function - so no need to swap 937 */ 938 case sizeof (uint64_t): 939 i_ddi_io_put64(hp, 940 (uint64_t *)dev_addr, 941 *(uint64_t *)host_addr); 942 break; 943 default: 944 err = DDI_FAILURE; 945 break; 946 } 947 } else { 948 switch (size) { 949 case sizeof (uint8_t): 950 i_ddi_io_put8(hp, 951 (uint8_t *)dev_addr, 952 *(uint8_t *)host_addr); 953 break; 954 case sizeof (uint16_t): 955 i_ddi_io_put16(hp, 956 (uint16_t *)dev_addr, 957 *(uint16_t *)host_addr); 958 break; 959 case sizeof (uint32_t): 960 i_ddi_io_put32(hp, 961 (uint32_t *)dev_addr, 962 *(uint32_t *)host_addr); 963 break; 964 case sizeof (uint64_t): 965 i_ddi_io_put64(hp, 966 (uint64_t *)dev_addr, 967 *(uint64_t *)host_addr); 968 break; 969 default: 970 err = DDI_FAILURE; 971 break; 972 } 973 } 974 } else { 975 if (hdlp->ah_acc.devacc_attr_endian_flags == 976 DDI_STRUCTURE_BE_ACC) { 977 switch (size) { 978 case sizeof (uint8_t): 979 *(uint8_t *)dev_addr = 980 *(uint8_t *)host_addr; 981 break; 982 case sizeof (uint16_t): 983 *(uint16_t *)dev_addr = 984 ddi_swap16(*(uint16_t *)host_addr); 985 break; 986 case sizeof (uint32_t): 987 *(uint32_t *)dev_addr = 988 ddi_swap32(*(uint32_t *)host_addr); 989 break; 990 case sizeof (uint64_t): 991 *(uint64_t *)dev_addr = 992 ddi_swap64(*(uint64_t *)host_addr); 993 break; 994 default: 995 err = DDI_FAILURE; 996 break; 997 } 998 } else { 999 switch (size) { 1000 case sizeof (uint8_t): 1001 *(uint8_t *)dev_addr = 1002 *(uint8_t *)host_addr; 1003 break; 1004 case sizeof (uint16_t): 1005 *(uint16_t *)dev_addr = 1006 *(uint16_t *)host_addr; 1007 break; 1008 case sizeof (uint32_t): 1009 *(uint32_t *)dev_addr = 1010 *(uint32_t *)host_addr; 1011 break; 1012 case sizeof (uint64_t): 1013 *(uint64_t *)dev_addr = 1014 *(uint64_t *)host_addr; 1015 break; 1016 default: 1017 err = DDI_FAILURE; 1018 break; 1019 } 1020 } 1021 } 1022 host_addr += size; 1023 if (flags == DDI_DEV_AUTOINCR) 1024 dev_addr += size; 1025 } 1026 return (err); 1027 } 1028 1029 1030 int 1031 pci_fm_acc_setup(ddi_acc_hdl_t *hp, off_t offset, off_t len) 1032 { 1033 ddi_acc_impl_t *ap = (ddi_acc_impl_t *)hp->ah_platform_private; 1034 1035 /* endian-ness check */ 1036 if (hp->ah_acc.devacc_attr_endian_flags == DDI_STRUCTURE_BE_ACC) 1037 return (DDI_FAILURE); 1038 1039 /* 1040 * range check 1041 */ 1042 if ((offset >= PCI_CONF_HDR_SIZE) || 1043 (len > PCI_CONF_HDR_SIZE) || 1044 (offset + len > PCI_CONF_HDR_SIZE)) 1045 return (DDI_FAILURE); 1046 1047 ap->ahi_acc_attr |= DDI_ACCATTR_CONFIG_SPACE; 1048 /* 1049 * always use cautious mechanism for config space gets 1050 */ 1051 ap->ahi_get8 = i_ddi_caut_get8; 1052 ap->ahi_get16 = i_ddi_caut_get16; 1053 ap->ahi_get32 = i_ddi_caut_get32; 1054 ap->ahi_get64 = i_ddi_caut_get64; 1055 ap->ahi_rep_get8 = i_ddi_caut_rep_get8; 1056 ap->ahi_rep_get16 = i_ddi_caut_rep_get16; 1057 ap->ahi_rep_get32 = i_ddi_caut_rep_get32; 1058 ap->ahi_rep_get64 = i_ddi_caut_rep_get64; 1059 if (hp->ah_acc.devacc_attr_access == DDI_CAUTIOUS_ACC) { 1060 ap->ahi_put8 = i_ddi_caut_put8; 1061 ap->ahi_put16 = i_ddi_caut_put16; 1062 ap->ahi_put32 = i_ddi_caut_put32; 1063 ap->ahi_put64 = i_ddi_caut_put64; 1064 ap->ahi_rep_put8 = i_ddi_caut_rep_put8; 1065 ap->ahi_rep_put16 = i_ddi_caut_rep_put16; 1066 ap->ahi_rep_put32 = i_ddi_caut_rep_put32; 1067 ap->ahi_rep_put64 = i_ddi_caut_rep_put64; 1068 } else { 1069 ap->ahi_put8 = pci_config_wr8; 1070 ap->ahi_put16 = pci_config_wr16; 1071 ap->ahi_put32 = pci_config_wr32; 1072 ap->ahi_put64 = pci_config_wr64; 1073 ap->ahi_rep_put8 = pci_config_rep_wr8; 1074 ap->ahi_rep_put16 = pci_config_rep_wr16; 1075 ap->ahi_rep_put32 = pci_config_rep_wr32; 1076 ap->ahi_rep_put64 = pci_config_rep_wr64; 1077 } 1078 1079 /* Initialize to default check/notify functions */ 1080 ap->ahi_fault_check = i_ddi_acc_fault_check; 1081 ap->ahi_fault_notify = i_ddi_acc_fault_notify; 1082 ap->ahi_fault = 0; 1083 impl_acc_err_init(hp); 1084 return (DDI_SUCCESS); 1085 } 1086 1087 1088 int 1089 pci_common_ctlops_peek(peekpoke_ctlops_t *in_args) 1090 { 1091 size_t size = in_args->size; 1092 uintptr_t dev_addr = in_args->dev_addr; 1093 uintptr_t host_addr = in_args->host_addr; 1094 ddi_acc_impl_t *hp = (ddi_acc_impl_t *)in_args->handle; 1095 ddi_acc_hdl_t *hdlp = (ddi_acc_hdl_t *)in_args->handle; 1096 size_t repcount = in_args->repcount; 1097 uint_t flags = in_args->flags; 1098 int err = DDI_SUCCESS; 1099 1100 /* 1101 * if no handle then this is a peek. We have to return failure here 1102 * as we have no way of knowing whether this is a MEM or IO space access 1103 */ 1104 if (in_args->handle == NULL) 1105 return (DDI_FAILURE); 1106 1107 for (; repcount; repcount--) { 1108 if (hp->ahi_acc_attr == DDI_ACCATTR_CONFIG_SPACE) { 1109 switch (size) { 1110 case sizeof (uint8_t): 1111 *(uint8_t *)host_addr = pci_config_rd8(hp, 1112 (uint8_t *)dev_addr); 1113 break; 1114 case sizeof (uint16_t): 1115 *(uint16_t *)host_addr = pci_config_rd16(hp, 1116 (uint16_t *)dev_addr); 1117 break; 1118 case sizeof (uint32_t): 1119 *(uint32_t *)host_addr = pci_config_rd32(hp, 1120 (uint32_t *)dev_addr); 1121 break; 1122 case sizeof (uint64_t): 1123 *(uint64_t *)host_addr = pci_config_rd64(hp, 1124 (uint64_t *)dev_addr); 1125 break; 1126 default: 1127 err = DDI_FAILURE; 1128 break; 1129 } 1130 } else if (hp->ahi_acc_attr & DDI_ACCATTR_IO_SPACE) { 1131 if (hdlp->ah_acc.devacc_attr_endian_flags == 1132 DDI_STRUCTURE_BE_ACC) { 1133 switch (size) { 1134 case sizeof (uint8_t): 1135 *(uint8_t *)host_addr = 1136 i_ddi_io_get8(hp, 1137 (uint8_t *)dev_addr); 1138 break; 1139 case sizeof (uint16_t): 1140 *(uint16_t *)host_addr = 1141 i_ddi_io_swap_get16(hp, 1142 (uint16_t *)dev_addr); 1143 break; 1144 case sizeof (uint32_t): 1145 *(uint32_t *)host_addr = 1146 i_ddi_io_swap_get32(hp, 1147 (uint32_t *)dev_addr); 1148 break; 1149 /* 1150 * note the 64-bit case is a dummy 1151 * function - so no need to swap 1152 */ 1153 case sizeof (uint64_t): 1154 *(uint64_t *)host_addr = 1155 i_ddi_io_get64(hp, 1156 (uint64_t *)dev_addr); 1157 break; 1158 default: 1159 err = DDI_FAILURE; 1160 break; 1161 } 1162 } else { 1163 switch (size) { 1164 case sizeof (uint8_t): 1165 *(uint8_t *)host_addr = 1166 i_ddi_io_get8(hp, 1167 (uint8_t *)dev_addr); 1168 break; 1169 case sizeof (uint16_t): 1170 *(uint16_t *)host_addr = 1171 i_ddi_io_get16(hp, 1172 (uint16_t *)dev_addr); 1173 break; 1174 case sizeof (uint32_t): 1175 *(uint32_t *)host_addr = 1176 i_ddi_io_get32(hp, 1177 (uint32_t *)dev_addr); 1178 break; 1179 case sizeof (uint64_t): 1180 *(uint64_t *)host_addr = 1181 i_ddi_io_get64(hp, 1182 (uint64_t *)dev_addr); 1183 break; 1184 default: 1185 err = DDI_FAILURE; 1186 break; 1187 } 1188 } 1189 } else { 1190 if (hdlp->ah_acc.devacc_attr_endian_flags == 1191 DDI_STRUCTURE_BE_ACC) { 1192 switch (in_args->size) { 1193 case sizeof (uint8_t): 1194 *(uint8_t *)host_addr = 1195 *(uint8_t *)dev_addr; 1196 break; 1197 case sizeof (uint16_t): 1198 *(uint16_t *)host_addr = 1199 ddi_swap16(*(uint16_t *)dev_addr); 1200 break; 1201 case sizeof (uint32_t): 1202 *(uint32_t *)host_addr = 1203 ddi_swap32(*(uint32_t *)dev_addr); 1204 break; 1205 case sizeof (uint64_t): 1206 *(uint64_t *)host_addr = 1207 ddi_swap64(*(uint64_t *)dev_addr); 1208 break; 1209 default: 1210 err = DDI_FAILURE; 1211 break; 1212 } 1213 } else { 1214 switch (in_args->size) { 1215 case sizeof (uint8_t): 1216 *(uint8_t *)host_addr = 1217 *(uint8_t *)dev_addr; 1218 break; 1219 case sizeof (uint16_t): 1220 *(uint16_t *)host_addr = 1221 *(uint16_t *)dev_addr; 1222 break; 1223 case sizeof (uint32_t): 1224 *(uint32_t *)host_addr = 1225 *(uint32_t *)dev_addr; 1226 break; 1227 case sizeof (uint64_t): 1228 *(uint64_t *)host_addr = 1229 *(uint64_t *)dev_addr; 1230 break; 1231 default: 1232 err = DDI_FAILURE; 1233 break; 1234 } 1235 } 1236 } 1237 host_addr += size; 1238 if (flags == DDI_DEV_AUTOINCR) 1239 dev_addr += size; 1240 } 1241 return (err); 1242 } 1243 1244 /*ARGSUSED*/ 1245 int 1246 pci_common_peekpoke(dev_info_t *dip, dev_info_t *rdip, 1247 ddi_ctl_enum_t ctlop, void *arg, void *result) 1248 { 1249 if (ctlop == DDI_CTLOPS_PEEK) 1250 return (pci_common_ctlops_peek((peekpoke_ctlops_t *)arg)); 1251 else 1252 return (pci_common_ctlops_poke((peekpoke_ctlops_t *)arg)); 1253 } 1254 1255 /* 1256 * These are the get and put functions to be shared with drivers. The 1257 * mutex locking is done inside the functions referenced, rather than 1258 * here, and is thus shared across PCI child drivers and any other 1259 * consumers of PCI config space (such as the ACPI subsystem). 1260 * 1261 * The configuration space addresses come in as pointers. This is fine on 1262 * a 32-bit system, where the VM space and configuration space are the same 1263 * size. It's not such a good idea on a 64-bit system, where memory 1264 * addresses are twice as large as configuration space addresses. At some 1265 * point in the call tree we need to take a stand and say "you are 32-bit 1266 * from this time forth", and this seems like a nice self-contained place. 1267 */ 1268 1269 uint8_t 1270 pci_config_rd8(ddi_acc_impl_t *hdlp, uint8_t *addr) 1271 { 1272 pci_acc_cfblk_t *cfp; 1273 uint8_t rval; 1274 int reg; 1275 1276 ASSERT64(((uintptr_t)addr >> 32) == 0); 1277 1278 reg = (int)(uintptr_t)addr; 1279 1280 cfp = (pci_acc_cfblk_t *)&hdlp->ahi_common.ah_bus_private; 1281 1282 rval = (*pci_getb_func)(cfp->c_busnum, cfp->c_devnum, cfp->c_funcnum, 1283 reg); 1284 1285 return (rval); 1286 } 1287 1288 void 1289 pci_config_rep_rd8(ddi_acc_impl_t *hdlp, uint8_t *host_addr, 1290 uint8_t *dev_addr, size_t repcount, uint_t flags) 1291 { 1292 uint8_t *h, *d; 1293 1294 h = host_addr; 1295 d = dev_addr; 1296 1297 if (flags == DDI_DEV_AUTOINCR) 1298 for (; repcount; repcount--) 1299 *h++ = pci_config_rd8(hdlp, d++); 1300 else 1301 for (; repcount; repcount--) 1302 *h++ = pci_config_rd8(hdlp, d); 1303 } 1304 1305 uint16_t 1306 pci_config_rd16(ddi_acc_impl_t *hdlp, uint16_t *addr) 1307 { 1308 pci_acc_cfblk_t *cfp; 1309 uint16_t rval; 1310 int reg; 1311 1312 ASSERT64(((uintptr_t)addr >> 32) == 0); 1313 1314 reg = (int)(uintptr_t)addr; 1315 1316 cfp = (pci_acc_cfblk_t *)&hdlp->ahi_common.ah_bus_private; 1317 1318 rval = (*pci_getw_func)(cfp->c_busnum, cfp->c_devnum, cfp->c_funcnum, 1319 reg); 1320 1321 return (rval); 1322 } 1323 1324 void 1325 pci_config_rep_rd16(ddi_acc_impl_t *hdlp, uint16_t *host_addr, 1326 uint16_t *dev_addr, size_t repcount, uint_t flags) 1327 { 1328 uint16_t *h, *d; 1329 1330 h = host_addr; 1331 d = dev_addr; 1332 1333 if (flags == DDI_DEV_AUTOINCR) 1334 for (; repcount; repcount--) 1335 *h++ = pci_config_rd16(hdlp, d++); 1336 else 1337 for (; repcount; repcount--) 1338 *h++ = pci_config_rd16(hdlp, d); 1339 } 1340 1341 uint32_t 1342 pci_config_rd32(ddi_acc_impl_t *hdlp, uint32_t *addr) 1343 { 1344 pci_acc_cfblk_t *cfp; 1345 uint32_t rval; 1346 int reg; 1347 1348 ASSERT64(((uintptr_t)addr >> 32) == 0); 1349 1350 reg = (int)(uintptr_t)addr; 1351 1352 cfp = (pci_acc_cfblk_t *)&hdlp->ahi_common.ah_bus_private; 1353 1354 rval = (*pci_getl_func)(cfp->c_busnum, cfp->c_devnum, 1355 cfp->c_funcnum, reg); 1356 1357 return (rval); 1358 } 1359 1360 void 1361 pci_config_rep_rd32(ddi_acc_impl_t *hdlp, uint32_t *host_addr, 1362 uint32_t *dev_addr, size_t repcount, uint_t flags) 1363 { 1364 uint32_t *h, *d; 1365 1366 h = host_addr; 1367 d = dev_addr; 1368 1369 if (flags == DDI_DEV_AUTOINCR) 1370 for (; repcount; repcount--) 1371 *h++ = pci_config_rd32(hdlp, d++); 1372 else 1373 for (; repcount; repcount--) 1374 *h++ = pci_config_rd32(hdlp, d); 1375 } 1376 1377 1378 void 1379 pci_config_wr8(ddi_acc_impl_t *hdlp, uint8_t *addr, uint8_t value) 1380 { 1381 pci_acc_cfblk_t *cfp; 1382 int reg; 1383 1384 ASSERT64(((uintptr_t)addr >> 32) == 0); 1385 1386 reg = (int)(uintptr_t)addr; 1387 1388 cfp = (pci_acc_cfblk_t *)&hdlp->ahi_common.ah_bus_private; 1389 1390 (*pci_putb_func)(cfp->c_busnum, cfp->c_devnum, 1391 cfp->c_funcnum, reg, value); 1392 } 1393 1394 void 1395 pci_config_rep_wr8(ddi_acc_impl_t *hdlp, uint8_t *host_addr, 1396 uint8_t *dev_addr, size_t repcount, uint_t flags) 1397 { 1398 uint8_t *h, *d; 1399 1400 h = host_addr; 1401 d = dev_addr; 1402 1403 if (flags == DDI_DEV_AUTOINCR) 1404 for (; repcount; repcount--) 1405 pci_config_wr8(hdlp, d++, *h++); 1406 else 1407 for (; repcount; repcount--) 1408 pci_config_wr8(hdlp, d, *h++); 1409 } 1410 1411 void 1412 pci_config_wr16(ddi_acc_impl_t *hdlp, uint16_t *addr, uint16_t value) 1413 { 1414 pci_acc_cfblk_t *cfp; 1415 int reg; 1416 1417 ASSERT64(((uintptr_t)addr >> 32) == 0); 1418 1419 reg = (int)(uintptr_t)addr; 1420 1421 cfp = (pci_acc_cfblk_t *)&hdlp->ahi_common.ah_bus_private; 1422 1423 (*pci_putw_func)(cfp->c_busnum, cfp->c_devnum, 1424 cfp->c_funcnum, reg, value); 1425 } 1426 1427 void 1428 pci_config_rep_wr16(ddi_acc_impl_t *hdlp, uint16_t *host_addr, 1429 uint16_t *dev_addr, size_t repcount, uint_t flags) 1430 { 1431 uint16_t *h, *d; 1432 1433 h = host_addr; 1434 d = dev_addr; 1435 1436 if (flags == DDI_DEV_AUTOINCR) 1437 for (; repcount; repcount--) 1438 pci_config_wr16(hdlp, d++, *h++); 1439 else 1440 for (; repcount; repcount--) 1441 pci_config_wr16(hdlp, d, *h++); 1442 } 1443 1444 void 1445 pci_config_wr32(ddi_acc_impl_t *hdlp, uint32_t *addr, uint32_t value) 1446 { 1447 pci_acc_cfblk_t *cfp; 1448 int reg; 1449 1450 ASSERT64(((uintptr_t)addr >> 32) == 0); 1451 1452 reg = (int)(uintptr_t)addr; 1453 1454 cfp = (pci_acc_cfblk_t *)&hdlp->ahi_common.ah_bus_private; 1455 1456 (*pci_putl_func)(cfp->c_busnum, cfp->c_devnum, 1457 cfp->c_funcnum, reg, value); 1458 } 1459 1460 void 1461 pci_config_rep_wr32(ddi_acc_impl_t *hdlp, uint32_t *host_addr, 1462 uint32_t *dev_addr, size_t repcount, uint_t flags) 1463 { 1464 uint32_t *h, *d; 1465 1466 h = host_addr; 1467 d = dev_addr; 1468 1469 if (flags == DDI_DEV_AUTOINCR) 1470 for (; repcount; repcount--) 1471 pci_config_wr32(hdlp, d++, *h++); 1472 else 1473 for (; repcount; repcount--) 1474 pci_config_wr32(hdlp, d, *h++); 1475 } 1476 1477 uint64_t 1478 pci_config_rd64(ddi_acc_impl_t *hdlp, uint64_t *addr) 1479 { 1480 uint32_t lw_val; 1481 uint32_t hi_val; 1482 uint32_t *dp; 1483 uint64_t val; 1484 1485 dp = (uint32_t *)addr; 1486 lw_val = pci_config_rd32(hdlp, dp); 1487 dp++; 1488 hi_val = pci_config_rd32(hdlp, dp); 1489 val = ((uint64_t)hi_val << 32) | lw_val; 1490 return (val); 1491 } 1492 1493 void 1494 pci_config_wr64(ddi_acc_impl_t *hdlp, uint64_t *addr, uint64_t value) 1495 { 1496 uint32_t lw_val; 1497 uint32_t hi_val; 1498 uint32_t *dp; 1499 1500 dp = (uint32_t *)addr; 1501 lw_val = (uint32_t)(value & 0xffffffff); 1502 hi_val = (uint32_t)(value >> 32); 1503 pci_config_wr32(hdlp, dp, lw_val); 1504 dp++; 1505 pci_config_wr32(hdlp, dp, hi_val); 1506 } 1507 1508 void 1509 pci_config_rep_rd64(ddi_acc_impl_t *hdlp, uint64_t *host_addr, 1510 uint64_t *dev_addr, size_t repcount, uint_t flags) 1511 { 1512 if (flags == DDI_DEV_AUTOINCR) { 1513 for (; repcount; repcount--) 1514 *host_addr++ = pci_config_rd64(hdlp, dev_addr++); 1515 } else { 1516 for (; repcount; repcount--) 1517 *host_addr++ = pci_config_rd64(hdlp, dev_addr); 1518 } 1519 } 1520 1521 void 1522 pci_config_rep_wr64(ddi_acc_impl_t *hdlp, uint64_t *host_addr, 1523 uint64_t *dev_addr, size_t repcount, uint_t flags) 1524 { 1525 if (flags == DDI_DEV_AUTOINCR) { 1526 for (; repcount; repcount--) 1527 pci_config_wr64(hdlp, host_addr++, *dev_addr++); 1528 } else { 1529 for (; repcount; repcount--) 1530 pci_config_wr64(hdlp, host_addr++, *dev_addr); 1531 } 1532 } 1533 1534 1535 /* 1536 * Enable Legacy PCI config space access for the following four north bridges 1537 * Host bridge: AMD HyperTransport Technology Configuration 1538 * Host bridge: AMD Address Map 1539 * Host bridge: AMD DRAM Controller 1540 * Host bridge: AMD Miscellaneous Control 1541 */ 1542 int 1543 is_amd_northbridge(dev_info_t *dip) 1544 { 1545 int vendor_id, device_id; 1546 1547 vendor_id = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1548 "vendor-id", -1); 1549 device_id = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1550 "device-id", -1); 1551 1552 if (IS_AMD_NTBRIDGE(vendor_id, device_id)) 1553 return (0); 1554 1555 return (1); 1556 } 1557