1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * File that has code which is common between pci(7d) and npe(7d) 31 * It shares the following: 32 * - interrupt code 33 * - pci_tools ioctl code 34 * - name_child code 35 * - set_parent_private_data code 36 */ 37 38 #include <sys/conf.h> 39 #include <sys/pci.h> 40 #include <sys/sunndi.h> 41 #include <sys/mach_intr.h> 42 #include <sys/hotplug/pci/pcihp.h> 43 #include <sys/pci_intr_lib.h> 44 #include <sys/psm.h> 45 #include <sys/policy.h> 46 #include <sys/sysmacros.h> 47 #include <sys/clock.h> 48 #include <io/pcplusmp/apic.h> 49 #include <sys/pci_tools.h> 50 #include <io/pci/pci_var.h> 51 #include <io/pci/pci_tools_ext.h> 52 #include <io/pci/pci_common.h> 53 #include <sys/pci_cfgspace.h> 54 #include <sys/pci_impl.h> 55 56 /* 57 * Function prototypes 58 */ 59 static int pci_get_priority(dev_info_t *, ddi_intr_handle_impl_t *, int *); 60 static int pci_get_nintrs(dev_info_t *, int, int *); 61 static int pci_enable_intr(dev_info_t *, dev_info_t *, 62 ddi_intr_handle_impl_t *, uint32_t); 63 static void pci_disable_intr(dev_info_t *, dev_info_t *, 64 ddi_intr_handle_impl_t *, uint32_t); 65 66 /* Extern decalration for pcplusmp module */ 67 extern int (*psm_intr_ops)(dev_info_t *, ddi_intr_handle_impl_t *, 68 psm_intr_op_t, int *); 69 70 71 /* 72 * pci_name_child: 73 * 74 * Assign the address portion of the node name 75 */ 76 int 77 pci_common_name_child(dev_info_t *child, char *name, int namelen) 78 { 79 int dev, func, length; 80 char **unit_addr; 81 uint_t n; 82 pci_regspec_t *pci_rp; 83 84 if (ndi_dev_is_persistent_node(child) == 0) { 85 /* 86 * For .conf node, use "unit-address" property 87 */ 88 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, child, 89 DDI_PROP_DONTPASS, "unit-address", &unit_addr, &n) != 90 DDI_PROP_SUCCESS) { 91 cmn_err(CE_WARN, "cannot find unit-address in %s.conf", 92 ddi_get_name(child)); 93 return (DDI_FAILURE); 94 } 95 if (n != 1 || *unit_addr == NULL || **unit_addr == 0) { 96 cmn_err(CE_WARN, "unit-address property in %s.conf" 97 " not well-formed", ddi_get_name(child)); 98 ddi_prop_free(unit_addr); 99 return (DDI_FAILURE); 100 } 101 (void) snprintf(name, namelen, "%s", *unit_addr); 102 ddi_prop_free(unit_addr); 103 return (DDI_SUCCESS); 104 } 105 106 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, child, DDI_PROP_DONTPASS, 107 "reg", (int **)&pci_rp, (uint_t *)&length) != DDI_PROP_SUCCESS) { 108 cmn_err(CE_WARN, "cannot find reg property in %s", 109 ddi_get_name(child)); 110 return (DDI_FAILURE); 111 } 112 113 /* copy the device identifications */ 114 dev = PCI_REG_DEV_G(pci_rp->pci_phys_hi); 115 func = PCI_REG_FUNC_G(pci_rp->pci_phys_hi); 116 117 /* 118 * free the memory allocated by ddi_prop_lookup_int_array 119 */ 120 ddi_prop_free(pci_rp); 121 122 if (func != 0) { 123 (void) snprintf(name, namelen, "%x,%x", dev, func); 124 } else { 125 (void) snprintf(name, namelen, "%x", dev); 126 } 127 128 return (DDI_SUCCESS); 129 } 130 131 /* 132 * Interrupt related code: 133 * 134 * The following busop is common to npe and pci drivers 135 * bus_introp 136 */ 137 138 /* 139 * Create the ddi_parent_private_data for a pseudo child. 140 */ 141 void 142 pci_common_set_parent_private_data(dev_info_t *dip) 143 { 144 struct ddi_parent_private_data *pdptr; 145 146 pdptr = (struct ddi_parent_private_data *)kmem_zalloc( 147 (sizeof (struct ddi_parent_private_data) + 148 sizeof (struct intrspec)), KM_SLEEP); 149 pdptr->par_intr = (struct intrspec *)(pdptr + 1); 150 pdptr->par_nintr = 1; 151 ddi_set_parent_data(dip, pdptr); 152 } 153 154 /* 155 * pci_get_priority: 156 * Figure out the priority of the device 157 */ 158 static int 159 pci_get_priority(dev_info_t *dip, ddi_intr_handle_impl_t *hdlp, int *pri) 160 { 161 struct intrspec *ispec; 162 163 DDI_INTR_NEXDBG((CE_CONT, "pci_get_priority: dip = 0x%p, hdlp = %p\n", 164 (void *)dip, (void *)hdlp)); 165 166 if ((ispec = (struct intrspec *)pci_intx_get_ispec(dip, dip, 167 hdlp->ih_inum)) == NULL) { 168 if (DDI_INTR_IS_MSI_OR_MSIX(hdlp->ih_type)) { 169 int class = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 170 DDI_PROP_DONTPASS, "class-code", -1); 171 172 *pri = (class == -1) ? 1 : pci_devclass_to_ipl(class); 173 pci_common_set_parent_private_data(hdlp->ih_dip); 174 ispec = (struct intrspec *)pci_intx_get_ispec(dip, dip, 175 hdlp->ih_inum); 176 return (DDI_SUCCESS); 177 } 178 return (DDI_FAILURE); 179 } 180 181 *pri = ispec->intrspec_pri; 182 return (DDI_SUCCESS); 183 } 184 185 186 /* 187 * pci_get_nintrs: 188 * Figure out how many interrupts the device supports 189 */ 190 static int 191 pci_get_nintrs(dev_info_t *dip, int type, int *nintrs) 192 { 193 int ret; 194 195 *nintrs = 0; 196 197 if (DDI_INTR_IS_MSI_OR_MSIX(type)) 198 ret = pci_msi_get_nintrs(dip, type, nintrs); 199 else { 200 ret = DDI_FAILURE; 201 if (ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 202 "interrupts", -1) != -1) { 203 *nintrs = 1; 204 ret = DDI_SUCCESS; 205 } 206 } 207 208 return (ret); 209 } 210 211 static int pcie_pci_intr_pri_counter = 0; 212 213 /* 214 * pci_common_intr_ops: bus_intr_op() function for interrupt support 215 */ 216 int 217 pci_common_intr_ops(dev_info_t *pdip, dev_info_t *rdip, ddi_intr_op_t intr_op, 218 ddi_intr_handle_impl_t *hdlp, void *result) 219 { 220 int priority = 0; 221 int psm_status = 0; 222 int pci_status = 0; 223 int pci_rval, psm_rval = PSM_FAILURE; 224 int types = 0; 225 int pciepci = 0; 226 int i, j, count; 227 int behavior; 228 int cap_ptr; 229 ddi_intrspec_t isp; 230 struct intrspec *ispec; 231 ddi_intr_handle_impl_t tmp_hdl; 232 ddi_intr_msix_t *msix_p; 233 ihdl_plat_t *ihdl_plat_datap; 234 ddi_intr_handle_t *h_array; 235 ddi_acc_handle_t handle; 236 237 DDI_INTR_NEXDBG((CE_CONT, 238 "pci_common_intr_ops: pdip 0x%p, rdip 0x%p, op %x handle 0x%p\n", 239 (void *)pdip, (void *)rdip, intr_op, (void *)hdlp)); 240 241 /* Process the request */ 242 switch (intr_op) { 243 case DDI_INTROP_SUPPORTED_TYPES: 244 /* Fixed supported by default */ 245 *(int *)result = DDI_INTR_TYPE_FIXED; 246 247 /* Figure out if MSI or MSI-X is supported? */ 248 if (pci_msi_get_supported_type(rdip, &types) != DDI_SUCCESS) 249 return (DDI_SUCCESS); 250 251 if (psm_intr_ops != NULL) { 252 /* 253 * Only support MSI for now, OR it in 254 */ 255 *(int *)result |= (types & DDI_INTR_TYPE_MSI); 256 257 tmp_hdl.ih_type = *(int *)result; 258 (void) (*psm_intr_ops)(rdip, &tmp_hdl, 259 PSM_INTR_OP_CHECK_MSI, result); 260 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: " 261 "rdip: 0x%p supported types: 0x%x\n", (void *)rdip, 262 *(int *)result)); 263 } 264 break; 265 case DDI_INTROP_NINTRS: 266 if (pci_get_nintrs(rdip, hdlp->ih_type, result) != DDI_SUCCESS) 267 return (DDI_FAILURE); 268 break; 269 case DDI_INTROP_ALLOC: 270 /* 271 * MSI or MSIX (figure out number of vectors available) 272 * FIXED interrupts: just return available interrupts 273 */ 274 if (DDI_INTR_IS_MSI_OR_MSIX(hdlp->ih_type) && 275 (psm_intr_ops != NULL) && 276 (pci_get_priority(rdip, hdlp, &priority) == DDI_SUCCESS)) { 277 /* 278 * Following check is a special case for 'pcie_pci'. 279 * This makes sure vectors with the right priority 280 * are allocated for pcie_pci during ALLOC time. 281 */ 282 if (strcmp(ddi_driver_name(rdip), "pcie_pci") == 0) { 283 hdlp->ih_pri = 284 (pcie_pci_intr_pri_counter % 2) ? 4 : 7; 285 pciepci = 1; 286 } else 287 hdlp->ih_pri = priority; 288 behavior = (int)(uintptr_t)hdlp->ih_scratch2; 289 290 /* 291 * Cache in the config handle and cap_ptr 292 */ 293 if (i_ddi_get_pci_config_handle(rdip) == NULL) { 294 if (pci_config_setup(rdip, &handle) != 295 DDI_SUCCESS) 296 return (DDI_FAILURE); 297 i_ddi_set_pci_config_handle(rdip, handle); 298 } 299 300 if (i_ddi_get_msi_msix_cap_ptr(rdip) == 0) { 301 char *prop = 302 (hdlp->ih_type == DDI_INTR_TYPE_MSI) ? 303 "pci-msi-capid-pointer" : 304 "pci-msix-capid-pointer"; 305 306 cap_ptr = ddi_prop_get_int(DDI_DEV_T_ANY, rdip, 307 DDI_PROP_DONTPASS, prop, 308 PCI_CAP_NEXT_PTR_NULL); 309 i_ddi_set_msi_msix_cap_ptr(rdip, cap_ptr); 310 } 311 312 313 (void) (*psm_intr_ops)(rdip, hdlp, 314 PSM_INTR_OP_ALLOC_VECTORS, result); 315 316 /* verify behavior flag and take appropriate action */ 317 if ((behavior == DDI_INTR_ALLOC_STRICT) && 318 (*(int *)result < hdlp->ih_scratch1)) { 319 DDI_INTR_NEXDBG((CE_CONT, 320 "pci_common_intr_ops: behavior %x, " 321 "couldn't get enough intrs\n", behavior)); 322 hdlp->ih_scratch1 = *(int *)result; 323 (void) (*psm_intr_ops)(rdip, hdlp, 324 PSM_INTR_OP_FREE_VECTORS, NULL); 325 return (DDI_EAGAIN); 326 } 327 328 if (hdlp->ih_type == DDI_INTR_TYPE_MSIX) { 329 if (!(msix_p = i_ddi_get_msix(hdlp->ih_dip))) { 330 msix_p = pci_msix_init(hdlp->ih_dip); 331 if (msix_p) 332 i_ddi_set_msix(hdlp->ih_dip, 333 msix_p); 334 } 335 } 336 337 if (pciepci) { 338 /* update priority in ispec */ 339 isp = pci_intx_get_ispec(pdip, rdip, 340 (int)hdlp->ih_inum); 341 ispec = (struct intrspec *)isp; 342 if (ispec) 343 ispec->intrspec_pri = hdlp->ih_pri; 344 ++pcie_pci_intr_pri_counter; 345 } 346 347 } else if (hdlp->ih_type == DDI_INTR_TYPE_FIXED) { 348 /* Figure out if this device supports MASKING */ 349 pci_rval = pci_intx_get_cap(rdip, &pci_status); 350 if (pci_rval == DDI_SUCCESS && pci_status) 351 hdlp->ih_cap |= pci_status; 352 *(int *)result = 1; /* DDI_INTR_TYPE_FIXED */ 353 } else 354 return (DDI_FAILURE); 355 break; 356 case DDI_INTROP_FREE: 357 if (DDI_INTR_IS_MSI_OR_MSIX(hdlp->ih_type) && 358 (psm_intr_ops != NULL)) { 359 if (i_ddi_intr_get_current_nintrs(hdlp->ih_dip) == 0) { 360 if (handle = i_ddi_get_pci_config_handle( 361 rdip)) { 362 (void) pci_config_teardown(&handle); 363 i_ddi_set_pci_config_handle(rdip, NULL); 364 } 365 if (cap_ptr = i_ddi_get_msi_msix_cap_ptr(rdip)) 366 i_ddi_set_msi_msix_cap_ptr(rdip, 0); 367 } 368 369 (void) (*psm_intr_ops)(rdip, hdlp, 370 PSM_INTR_OP_FREE_VECTORS, NULL); 371 372 if (hdlp->ih_type == DDI_INTR_TYPE_MSIX) { 373 msix_p = i_ddi_get_msix(hdlp->ih_dip); 374 if (msix_p && 375 i_ddi_intr_get_current_nintrs(hdlp->ih_dip) 376 == 0) { 377 pci_msix_fini(msix_p); 378 i_ddi_set_msix(hdlp->ih_dip, NULL); 379 } 380 } 381 } 382 break; 383 case DDI_INTROP_GETPRI: 384 /* Get the priority */ 385 if (pci_get_priority(rdip, hdlp, &priority) != DDI_SUCCESS) 386 return (DDI_FAILURE); 387 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: " 388 "priority = 0x%x\n", priority)); 389 *(int *)result = priority; 390 break; 391 case DDI_INTROP_SETPRI: 392 /* Validate the interrupt priority passed */ 393 if (*(int *)result > LOCK_LEVEL) 394 return (DDI_FAILURE); 395 396 /* Ensure that PSM is all initialized */ 397 if (psm_intr_ops == NULL) 398 return (DDI_FAILURE); 399 400 /* Change the priority */ 401 if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_PRI, result) == 402 PSM_FAILURE) 403 return (DDI_FAILURE); 404 405 /* update ispec */ 406 isp = pci_intx_get_ispec(pdip, rdip, (int)hdlp->ih_inum); 407 ispec = (struct intrspec *)isp; 408 if (ispec) 409 ispec->intrspec_pri = *(int *)result; 410 break; 411 case DDI_INTROP_ADDISR: 412 /* update ispec */ 413 isp = pci_intx_get_ispec(pdip, rdip, (int)hdlp->ih_inum); 414 ispec = (struct intrspec *)isp; 415 if (ispec) { 416 ispec->intrspec_func = hdlp->ih_cb_func; 417 ihdl_plat_datap = (ihdl_plat_t *)hdlp->ih_private; 418 pci_kstat_create(&ihdl_plat_datap->ip_ksp, pdip, hdlp); 419 } 420 break; 421 case DDI_INTROP_REMISR: 422 /* Get the interrupt structure pointer */ 423 isp = pci_intx_get_ispec(pdip, rdip, (int)hdlp->ih_inum); 424 ispec = (struct intrspec *)isp; 425 if (ispec) { 426 ispec->intrspec_func = (uint_t (*)()) 0; 427 ihdl_plat_datap = (ihdl_plat_t *)hdlp->ih_private; 428 if (ihdl_plat_datap->ip_ksp != NULL) 429 pci_kstat_delete(ihdl_plat_datap->ip_ksp); 430 } 431 break; 432 case DDI_INTROP_GETCAP: 433 /* 434 * First check the config space and/or 435 * MSI capability register(s) 436 */ 437 if (DDI_INTR_IS_MSI_OR_MSIX(hdlp->ih_type)) 438 pci_rval = pci_msi_get_cap(rdip, hdlp->ih_type, 439 &pci_status); 440 else if (hdlp->ih_type == DDI_INTR_TYPE_FIXED) 441 pci_rval = pci_intx_get_cap(rdip, &pci_status); 442 443 /* next check with pcplusmp */ 444 if (psm_intr_ops != NULL) 445 psm_rval = (*psm_intr_ops)(rdip, hdlp, 446 PSM_INTR_OP_GET_CAP, &psm_status); 447 448 DDI_INTR_NEXDBG((CE_CONT, "pci: GETCAP returned psm_rval = %x, " 449 "psm_status = %x, pci_rval = %x, pci_status = %x\n", 450 psm_rval, psm_status, pci_rval, pci_status)); 451 452 if (psm_rval == PSM_FAILURE && pci_rval == DDI_FAILURE) { 453 *(int *)result = 0; 454 return (DDI_FAILURE); 455 } 456 457 if (psm_rval == PSM_SUCCESS) 458 *(int *)result = psm_status; 459 460 if (pci_rval == DDI_SUCCESS) 461 *(int *)result |= pci_status; 462 463 DDI_INTR_NEXDBG((CE_CONT, "pci: GETCAP returned = %x\n", 464 *(int *)result)); 465 break; 466 case DDI_INTROP_SETCAP: 467 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: " 468 "SETCAP cap=0x%x\n", *(int *)result)); 469 if (psm_intr_ops == NULL) 470 return (DDI_FAILURE); 471 472 if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_CAP, result)) { 473 DDI_INTR_NEXDBG((CE_CONT, "GETCAP: psm_intr_ops" 474 " returned failure\n")); 475 return (DDI_FAILURE); 476 } 477 break; 478 case DDI_INTROP_ENABLE: 479 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: ENABLE\n")); 480 if (psm_intr_ops == NULL) 481 return (DDI_FAILURE); 482 483 if (pci_enable_intr(pdip, rdip, hdlp, hdlp->ih_inum) != 484 DDI_SUCCESS) 485 return (DDI_FAILURE); 486 487 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: ENABLE " 488 "vector=0x%x\n", hdlp->ih_vector)); 489 break; 490 case DDI_INTROP_DISABLE: 491 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: DISABLE\n")); 492 if (psm_intr_ops == NULL) 493 return (DDI_FAILURE); 494 495 pci_disable_intr(pdip, rdip, hdlp, hdlp->ih_inum); 496 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: DISABLE " 497 "vector = %x\n", hdlp->ih_vector)); 498 break; 499 case DDI_INTROP_BLOCKENABLE: 500 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: " 501 "BLOCKENABLE\n")); 502 if (hdlp->ih_type != DDI_INTR_TYPE_MSI) { 503 DDI_INTR_NEXDBG((CE_CONT, "BLOCKENABLE: not MSI\n")); 504 return (DDI_FAILURE); 505 } 506 507 /* Check if psm_intr_ops is NULL? */ 508 if (psm_intr_ops == NULL) 509 return (DDI_FAILURE); 510 511 count = hdlp->ih_scratch1; 512 h_array = (ddi_intr_handle_t *)hdlp->ih_scratch2; 513 for (i = 0; i < count; i++) { 514 hdlp = (ddi_intr_handle_impl_t *)h_array[i]; 515 if (pci_enable_intr(pdip, rdip, hdlp, 516 hdlp->ih_inum) != DDI_SUCCESS) { 517 DDI_INTR_NEXDBG((CE_CONT, "BLOCKENABLE: " 518 "pci_enable_intr failed for %d\n", i)); 519 for (j = 0; j < i; j++) { 520 hdlp = (ddi_intr_handle_impl_t *)h_array[j]; 521 pci_disable_intr(pdip, rdip, hdlp, 522 hdlp->ih_inum); 523 } 524 return (DDI_FAILURE); 525 } 526 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: " 527 "BLOCKENABLE inum %x done\n", hdlp->ih_inum)); 528 } 529 break; 530 case DDI_INTROP_BLOCKDISABLE: 531 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: " 532 "BLOCKDISABLE\n")); 533 if (hdlp->ih_type != DDI_INTR_TYPE_MSI) { 534 DDI_INTR_NEXDBG((CE_CONT, "BLOCKDISABLE: not MSI\n")); 535 return (DDI_FAILURE); 536 } 537 538 /* Check if psm_intr_ops is present */ 539 if (psm_intr_ops == NULL) 540 return (DDI_FAILURE); 541 542 count = hdlp->ih_scratch1; 543 h_array = (ddi_intr_handle_t *)hdlp->ih_scratch2; 544 for (i = 0; i < count; i++) { 545 hdlp = (ddi_intr_handle_impl_t *)h_array[i]; 546 pci_disable_intr(pdip, rdip, hdlp, hdlp->ih_inum); 547 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: " 548 "BLOCKDISABLE inum %x done\n", hdlp->ih_inum)); 549 } 550 break; 551 case DDI_INTROP_SETMASK: 552 case DDI_INTROP_CLRMASK: 553 /* 554 * First handle in the config space 555 */ 556 if (intr_op == DDI_INTROP_SETMASK) { 557 if (DDI_INTR_IS_MSI_OR_MSIX(hdlp->ih_type)) 558 pci_status = pci_msi_set_mask(rdip, 559 hdlp->ih_type, hdlp->ih_inum); 560 else if (hdlp->ih_type == DDI_INTR_TYPE_FIXED) 561 pci_status = pci_intx_set_mask(rdip); 562 } else { 563 if (DDI_INTR_IS_MSI_OR_MSIX(hdlp->ih_type)) 564 pci_status = pci_msi_clr_mask(rdip, 565 hdlp->ih_type, hdlp->ih_inum); 566 else if (hdlp->ih_type == DDI_INTR_TYPE_FIXED) 567 pci_status = pci_intx_clr_mask(rdip); 568 } 569 570 /* For MSI/X; no need to check with pcplusmp */ 571 if (hdlp->ih_type != DDI_INTR_TYPE_FIXED) 572 return (pci_status); 573 574 /* For fixed interrupts only: handle config space first */ 575 if (hdlp->ih_type == DDI_INTR_TYPE_FIXED && 576 pci_status == DDI_SUCCESS) 577 break; 578 579 /* For fixed interrupts only: confer with pcplusmp next */ 580 if (psm_intr_ops != NULL) { 581 /* If interrupt is shared; do nothing */ 582 psm_rval = (*psm_intr_ops)(rdip, hdlp, 583 PSM_INTR_OP_GET_SHARED, &psm_status); 584 585 if (psm_rval == PSM_FAILURE || psm_status == 1) 586 return (pci_status); 587 588 /* Now, pcplusmp should try to set/clear the mask */ 589 if (intr_op == DDI_INTROP_SETMASK) 590 psm_rval = (*psm_intr_ops)(rdip, hdlp, 591 PSM_INTR_OP_SET_MASK, NULL); 592 else 593 psm_rval = (*psm_intr_ops)(rdip, hdlp, 594 PSM_INTR_OP_CLEAR_MASK, NULL); 595 } 596 return ((psm_rval == PSM_FAILURE) ? DDI_FAILURE : DDI_SUCCESS); 597 case DDI_INTROP_GETPENDING: 598 /* 599 * First check the config space and/or 600 * MSI capability register(s) 601 */ 602 if (DDI_INTR_IS_MSI_OR_MSIX(hdlp->ih_type)) 603 pci_rval = pci_msi_get_pending(rdip, hdlp->ih_type, 604 hdlp->ih_inum, &pci_status); 605 else if (hdlp->ih_type == DDI_INTR_TYPE_FIXED) 606 pci_rval = pci_intx_get_pending(rdip, &pci_status); 607 608 /* On failure; next try with pcplusmp */ 609 if (pci_rval != DDI_SUCCESS && psm_intr_ops != NULL) 610 psm_rval = (*psm_intr_ops)(rdip, hdlp, 611 PSM_INTR_OP_GET_PENDING, &psm_status); 612 613 DDI_INTR_NEXDBG((CE_CONT, "pci: GETPENDING returned " 614 "psm_rval = %x, psm_status = %x, pci_rval = %x, " 615 "pci_status = %x\n", psm_rval, psm_status, pci_rval, 616 pci_status)); 617 if (psm_rval == PSM_FAILURE && pci_rval == DDI_FAILURE) { 618 *(int *)result = 0; 619 return (DDI_FAILURE); 620 } 621 622 if (psm_rval != PSM_FAILURE) 623 *(int *)result = psm_status; 624 else if (pci_rval != DDI_FAILURE) 625 *(int *)result = pci_status; 626 DDI_INTR_NEXDBG((CE_CONT, "pci: GETPENDING returned = %x\n", 627 *(int *)result)); 628 break; 629 case DDI_INTROP_NAVAIL: 630 if ((psm_intr_ops != NULL) && (pci_get_priority(rdip, 631 hdlp, &priority) == DDI_SUCCESS)) { 632 /* Priority in the handle not initialized yet */ 633 hdlp->ih_pri = priority; 634 (void) (*psm_intr_ops)(rdip, hdlp, 635 PSM_INTR_OP_NAVAIL_VECTORS, result); 636 } else { 637 *(int *)result = 1; 638 } 639 DDI_INTR_NEXDBG((CE_CONT, "pci: NAVAIL returned = %x\n", 640 *(int *)result)); 641 break; 642 default: 643 return (i_ddi_intr_ops(pdip, rdip, intr_op, hdlp, result)); 644 } 645 646 return (DDI_SUCCESS); 647 } 648 649 int 650 pci_get_intr_from_vecirq(apic_get_intr_t *intrinfo_p, 651 int vecirq, boolean_t is_irq) 652 { 653 ddi_intr_handle_impl_t get_info_ii_hdl; 654 655 if (is_irq) 656 intrinfo_p->avgi_req_flags |= PSMGI_INTRBY_IRQ; 657 658 /* 659 * For this locally-declared and used handle, ih_private will contain a 660 * pointer to apic_get_intr_t, not an ihdl_plat_t as used for 661 * global interrupt handling. 662 */ 663 get_info_ii_hdl.ih_private = intrinfo_p; 664 get_info_ii_hdl.ih_vector = (ushort_t)vecirq; 665 666 if ((*psm_intr_ops)(NULL, &get_info_ii_hdl, 667 PSM_INTR_OP_GET_INTR, NULL) == PSM_FAILURE) 668 return (DDI_FAILURE); 669 670 return (DDI_SUCCESS); 671 } 672 673 674 int 675 pci_get_cpu_from_vecirq(int vecirq, boolean_t is_irq) 676 { 677 int rval; 678 679 apic_get_intr_t intrinfo; 680 intrinfo.avgi_req_flags = PSMGI_REQ_CPUID; 681 rval = pci_get_intr_from_vecirq(&intrinfo, vecirq, is_irq); 682 683 if (rval == DDI_SUCCESS) 684 return (intrinfo.avgi_cpu_id); 685 else 686 return (-1); 687 } 688 689 690 static int 691 pci_enable_intr(dev_info_t *pdip, dev_info_t *rdip, 692 ddi_intr_handle_impl_t *hdlp, uint32_t inum) 693 { 694 struct intrspec *ispec; 695 int irq; 696 ihdl_plat_t *ihdl_plat_datap = (ihdl_plat_t *)hdlp->ih_private; 697 698 DDI_INTR_NEXDBG((CE_CONT, "pci_enable_intr: hdlp %p inum %x\n", 699 (void *)hdlp, inum)); 700 701 /* Translate the interrupt if needed */ 702 ispec = (struct intrspec *)pci_intx_get_ispec(pdip, rdip, (int)inum); 703 if (DDI_INTR_IS_MSI_OR_MSIX(hdlp->ih_type) && ispec) 704 ispec->intrspec_vec = inum; 705 ihdl_plat_datap->ip_ispecp = ispec; 706 707 /* translate the interrupt if needed */ 708 (void) (*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_XLATE_VECTOR, &irq); 709 DDI_INTR_NEXDBG((CE_CONT, "pci_enable_intr: priority=%x irq=%x\n", 710 hdlp->ih_pri, irq)); 711 712 /* Add the interrupt handler */ 713 if (!add_avintr((void *)hdlp, hdlp->ih_pri, hdlp->ih_cb_func, 714 DEVI(rdip)->devi_name, irq, hdlp->ih_cb_arg1, 715 hdlp->ih_cb_arg2, &ihdl_plat_datap->ip_ticks, rdip)) 716 return (DDI_FAILURE); 717 718 /* Note this really is an irq. */ 719 hdlp->ih_vector = (ushort_t)irq; 720 721 return (DDI_SUCCESS); 722 } 723 724 725 static void 726 pci_disable_intr(dev_info_t *pdip, dev_info_t *rdip, 727 ddi_intr_handle_impl_t *hdlp, uint32_t inum) 728 { 729 int irq; 730 struct intrspec *ispec; 731 ihdl_plat_t *ihdl_plat_datap = (ihdl_plat_t *)hdlp->ih_private; 732 733 DDI_INTR_NEXDBG((CE_CONT, "pci_disable_intr: \n")); 734 ispec = (struct intrspec *)pci_intx_get_ispec(pdip, rdip, (int)inum); 735 if (DDI_INTR_IS_MSI_OR_MSIX(hdlp->ih_type) && ispec) 736 ispec->intrspec_vec = inum; 737 ihdl_plat_datap->ip_ispecp = ispec; 738 739 /* translate the interrupt if needed */ 740 (void) (*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_XLATE_VECTOR, &irq); 741 742 /* Disable the interrupt handler */ 743 rem_avintr((void *)hdlp, hdlp->ih_pri, hdlp->ih_cb_func, irq); 744 ihdl_plat_datap->ip_ispecp = NULL; 745 } 746 747 /* 748 * Miscellaneous library function 749 */ 750 int 751 pci_common_get_reg_prop(dev_info_t *dip, pci_regspec_t *pci_rp) 752 { 753 int i; 754 int number; 755 int assigned_addr_len; 756 uint_t phys_hi = pci_rp->pci_phys_hi; 757 pci_regspec_t *assigned_addr; 758 759 if (((phys_hi & PCI_REG_ADDR_M) == PCI_ADDR_CONFIG) || 760 (phys_hi & PCI_RELOCAT_B)) 761 return (DDI_SUCCESS); 762 763 /* 764 * the "reg" property specifies relocatable, get and interpret the 765 * "assigned-addresses" property. 766 */ 767 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 768 "assigned-addresses", (int **)&assigned_addr, 769 (uint_t *)&assigned_addr_len) != DDI_PROP_SUCCESS) 770 return (DDI_FAILURE); 771 772 /* 773 * Scan the "assigned-addresses" for one that matches the specified 774 * "reg" property entry. 775 */ 776 phys_hi &= PCI_CONF_ADDR_MASK; 777 number = assigned_addr_len / (sizeof (pci_regspec_t) / sizeof (int)); 778 for (i = 0; i < number; i++) { 779 if ((assigned_addr[i].pci_phys_hi & PCI_CONF_ADDR_MASK) == 780 phys_hi) { 781 pci_rp->pci_phys_mid = assigned_addr[i].pci_phys_mid; 782 pci_rp->pci_phys_low = assigned_addr[i].pci_phys_low; 783 ddi_prop_free(assigned_addr); 784 return (DDI_SUCCESS); 785 } 786 } 787 788 ddi_prop_free(assigned_addr); 789 return (DDI_FAILURE); 790 } 791 792 793 /* 794 * For pci_tools 795 */ 796 797 int 798 pci_common_ioctl(dev_info_t *dip, dev_t dev, int cmd, intptr_t arg, 799 int mode, cred_t *credp, int *rvalp) 800 { 801 int rv = ENOTTY; 802 803 minor_t minor = getminor(dev); 804 805 switch (PCIHP_AP_MINOR_NUM_TO_PCI_DEVNUM(minor)) { 806 case PCI_TOOL_REG_MINOR_NUM: 807 808 switch (cmd) { 809 case PCITOOL_DEVICE_SET_REG: 810 case PCITOOL_DEVICE_GET_REG: 811 812 /* Require full privileges. */ 813 if (secpolicy_kmdb(credp)) 814 rv = EPERM; 815 else 816 rv = pcitool_dev_reg_ops(dip, (void *)arg, 817 cmd, mode); 818 break; 819 820 case PCITOOL_NEXUS_SET_REG: 821 case PCITOOL_NEXUS_GET_REG: 822 823 /* Require full privileges. */ 824 if (secpolicy_kmdb(credp)) 825 rv = EPERM; 826 else 827 rv = pcitool_bus_reg_ops(dip, (void *)arg, 828 cmd, mode); 829 break; 830 } 831 break; 832 833 case PCI_TOOL_INTR_MINOR_NUM: 834 835 switch (cmd) { 836 case PCITOOL_DEVICE_SET_INTR: 837 838 /* Require PRIV_SYS_RES_CONFIG, same as psradm */ 839 if (secpolicy_ponline(credp)) { 840 rv = EPERM; 841 break; 842 } 843 844 /*FALLTHRU*/ 845 /* These require no special privileges. */ 846 case PCITOOL_DEVICE_GET_INTR: 847 case PCITOOL_DEVICE_NUM_INTR: 848 rv = pcitool_intr_admn(dip, (void *)arg, cmd, mode); 849 break; 850 } 851 break; 852 853 /* 854 * All non-PCItool ioctls go through here, including: 855 * devctl ioctls with minor number PCIHP_DEVCTL_MINOR and 856 * those for attachment points with where minor number is the 857 * device number. 858 */ 859 default: 860 rv = (pcihp_get_cb_ops())->cb_ioctl(dev, cmd, arg, mode, 861 credp, rvalp); 862 break; 863 } 864 865 return (rv); 866 } 867 868 869 int 870 pci_common_ctlops_poke(peekpoke_ctlops_t *in_args) 871 { 872 size_t size = in_args->size; 873 uintptr_t dev_addr = in_args->dev_addr; 874 uintptr_t host_addr = in_args->host_addr; 875 ddi_acc_impl_t *hp = (ddi_acc_impl_t *)in_args->handle; 876 ddi_acc_hdl_t *hdlp = (ddi_acc_hdl_t *)in_args->handle; 877 size_t repcount = in_args->repcount; 878 uint_t flags = in_args->flags; 879 int err = DDI_SUCCESS; 880 881 /* 882 * if no handle then this is a poke. We have to return failure here 883 * as we have no way of knowing whether this is a MEM or IO space access 884 */ 885 if (in_args->handle == NULL) 886 return (DDI_FAILURE); 887 888 /* 889 * rest of this function is actually for cautious puts 890 */ 891 for (; repcount; repcount--) { 892 if (hp->ahi_acc_attr == DDI_ACCATTR_CONFIG_SPACE) { 893 switch (size) { 894 case sizeof (uint8_t): 895 pci_config_wr8(hp, (uint8_t *)dev_addr, 896 *(uint8_t *)host_addr); 897 break; 898 case sizeof (uint16_t): 899 pci_config_wr16(hp, (uint16_t *)dev_addr, 900 *(uint16_t *)host_addr); 901 break; 902 case sizeof (uint32_t): 903 pci_config_wr32(hp, (uint32_t *)dev_addr, 904 *(uint32_t *)host_addr); 905 break; 906 case sizeof (uint64_t): 907 pci_config_wr64(hp, (uint64_t *)dev_addr, 908 *(uint64_t *)host_addr); 909 break; 910 default: 911 err = DDI_FAILURE; 912 break; 913 } 914 } else if (hp->ahi_acc_attr & DDI_ACCATTR_IO_SPACE) { 915 if (hdlp->ah_acc.devacc_attr_endian_flags == 916 DDI_STRUCTURE_BE_ACC) { 917 switch (size) { 918 case sizeof (uint8_t): 919 i_ddi_io_put8(hp, 920 (uint8_t *)dev_addr, 921 *(uint8_t *)host_addr); 922 break; 923 case sizeof (uint16_t): 924 i_ddi_io_swap_put16(hp, 925 (uint16_t *)dev_addr, 926 *(uint16_t *)host_addr); 927 break; 928 case sizeof (uint32_t): 929 i_ddi_io_swap_put32(hp, 930 (uint32_t *)dev_addr, 931 *(uint32_t *)host_addr); 932 break; 933 /* 934 * note the 64-bit case is a dummy 935 * function - so no need to swap 936 */ 937 case sizeof (uint64_t): 938 i_ddi_io_put64(hp, 939 (uint64_t *)dev_addr, 940 *(uint64_t *)host_addr); 941 break; 942 default: 943 err = DDI_FAILURE; 944 break; 945 } 946 } else { 947 switch (size) { 948 case sizeof (uint8_t): 949 i_ddi_io_put8(hp, 950 (uint8_t *)dev_addr, 951 *(uint8_t *)host_addr); 952 break; 953 case sizeof (uint16_t): 954 i_ddi_io_put16(hp, 955 (uint16_t *)dev_addr, 956 *(uint16_t *)host_addr); 957 break; 958 case sizeof (uint32_t): 959 i_ddi_io_put32(hp, 960 (uint32_t *)dev_addr, 961 *(uint32_t *)host_addr); 962 break; 963 case sizeof (uint64_t): 964 i_ddi_io_put64(hp, 965 (uint64_t *)dev_addr, 966 *(uint64_t *)host_addr); 967 break; 968 default: 969 err = DDI_FAILURE; 970 break; 971 } 972 } 973 } else { 974 if (hdlp->ah_acc.devacc_attr_endian_flags == 975 DDI_STRUCTURE_BE_ACC) { 976 switch (size) { 977 case sizeof (uint8_t): 978 *(uint8_t *)dev_addr = 979 *(uint8_t *)host_addr; 980 break; 981 case sizeof (uint16_t): 982 *(uint16_t *)dev_addr = 983 ddi_swap16(*(uint16_t *)host_addr); 984 break; 985 case sizeof (uint32_t): 986 *(uint32_t *)dev_addr = 987 ddi_swap32(*(uint32_t *)host_addr); 988 break; 989 case sizeof (uint64_t): 990 *(uint64_t *)dev_addr = 991 ddi_swap64(*(uint64_t *)host_addr); 992 break; 993 default: 994 err = DDI_FAILURE; 995 break; 996 } 997 } else { 998 switch (size) { 999 case sizeof (uint8_t): 1000 *(uint8_t *)dev_addr = 1001 *(uint8_t *)host_addr; 1002 break; 1003 case sizeof (uint16_t): 1004 *(uint16_t *)dev_addr = 1005 *(uint16_t *)host_addr; 1006 break; 1007 case sizeof (uint32_t): 1008 *(uint32_t *)dev_addr = 1009 *(uint32_t *)host_addr; 1010 break; 1011 case sizeof (uint64_t): 1012 *(uint64_t *)dev_addr = 1013 *(uint64_t *)host_addr; 1014 break; 1015 default: 1016 err = DDI_FAILURE; 1017 break; 1018 } 1019 } 1020 } 1021 host_addr += size; 1022 if (flags == DDI_DEV_AUTOINCR) 1023 dev_addr += size; 1024 } 1025 return (err); 1026 } 1027 1028 1029 int 1030 pci_fm_acc_setup(ddi_acc_hdl_t *hp, off_t offset, off_t len) 1031 { 1032 ddi_acc_impl_t *ap = (ddi_acc_impl_t *)hp->ah_platform_private; 1033 1034 /* endian-ness check */ 1035 if (hp->ah_acc.devacc_attr_endian_flags == DDI_STRUCTURE_BE_ACC) 1036 return (DDI_FAILURE); 1037 1038 /* 1039 * range check 1040 */ 1041 if ((offset >= PCI_CONF_HDR_SIZE) || 1042 (len > PCI_CONF_HDR_SIZE) || 1043 (offset + len > PCI_CONF_HDR_SIZE)) 1044 return (DDI_FAILURE); 1045 1046 ap->ahi_acc_attr |= DDI_ACCATTR_CONFIG_SPACE; 1047 /* 1048 * always use cautious mechanism for config space gets 1049 */ 1050 ap->ahi_get8 = i_ddi_caut_get8; 1051 ap->ahi_get16 = i_ddi_caut_get16; 1052 ap->ahi_get32 = i_ddi_caut_get32; 1053 ap->ahi_get64 = i_ddi_caut_get64; 1054 ap->ahi_rep_get8 = i_ddi_caut_rep_get8; 1055 ap->ahi_rep_get16 = i_ddi_caut_rep_get16; 1056 ap->ahi_rep_get32 = i_ddi_caut_rep_get32; 1057 ap->ahi_rep_get64 = i_ddi_caut_rep_get64; 1058 if (hp->ah_acc.devacc_attr_access == DDI_CAUTIOUS_ACC) { 1059 ap->ahi_put8 = i_ddi_caut_put8; 1060 ap->ahi_put16 = i_ddi_caut_put16; 1061 ap->ahi_put32 = i_ddi_caut_put32; 1062 ap->ahi_put64 = i_ddi_caut_put64; 1063 ap->ahi_rep_put8 = i_ddi_caut_rep_put8; 1064 ap->ahi_rep_put16 = i_ddi_caut_rep_put16; 1065 ap->ahi_rep_put32 = i_ddi_caut_rep_put32; 1066 ap->ahi_rep_put64 = i_ddi_caut_rep_put64; 1067 } else { 1068 ap->ahi_put8 = pci_config_wr8; 1069 ap->ahi_put16 = pci_config_wr16; 1070 ap->ahi_put32 = pci_config_wr32; 1071 ap->ahi_put64 = pci_config_wr64; 1072 ap->ahi_rep_put8 = pci_config_rep_wr8; 1073 ap->ahi_rep_put16 = pci_config_rep_wr16; 1074 ap->ahi_rep_put32 = pci_config_rep_wr32; 1075 ap->ahi_rep_put64 = pci_config_rep_wr64; 1076 } 1077 1078 /* Initialize to default check/notify functions */ 1079 ap->ahi_fault_check = i_ddi_acc_fault_check; 1080 ap->ahi_fault_notify = i_ddi_acc_fault_notify; 1081 ap->ahi_fault = 0; 1082 impl_acc_err_init(hp); 1083 return (DDI_SUCCESS); 1084 } 1085 1086 1087 int 1088 pci_common_ctlops_peek(peekpoke_ctlops_t *in_args) 1089 { 1090 size_t size = in_args->size; 1091 uintptr_t dev_addr = in_args->dev_addr; 1092 uintptr_t host_addr = in_args->host_addr; 1093 ddi_acc_impl_t *hp = (ddi_acc_impl_t *)in_args->handle; 1094 ddi_acc_hdl_t *hdlp = (ddi_acc_hdl_t *)in_args->handle; 1095 size_t repcount = in_args->repcount; 1096 uint_t flags = in_args->flags; 1097 int err = DDI_SUCCESS; 1098 1099 /* 1100 * if no handle then this is a peek. We have to return failure here 1101 * as we have no way of knowing whether this is a MEM or IO space access 1102 */ 1103 if (in_args->handle == NULL) 1104 return (DDI_FAILURE); 1105 1106 for (; repcount; repcount--) { 1107 if (hp->ahi_acc_attr == DDI_ACCATTR_CONFIG_SPACE) { 1108 switch (size) { 1109 case sizeof (uint8_t): 1110 *(uint8_t *)host_addr = pci_config_rd8(hp, 1111 (uint8_t *)dev_addr); 1112 break; 1113 case sizeof (uint16_t): 1114 *(uint16_t *)host_addr = pci_config_rd16(hp, 1115 (uint16_t *)dev_addr); 1116 break; 1117 case sizeof (uint32_t): 1118 *(uint32_t *)host_addr = pci_config_rd32(hp, 1119 (uint32_t *)dev_addr); 1120 break; 1121 case sizeof (uint64_t): 1122 *(uint64_t *)host_addr = pci_config_rd64(hp, 1123 (uint64_t *)dev_addr); 1124 break; 1125 default: 1126 err = DDI_FAILURE; 1127 break; 1128 } 1129 } else if (hp->ahi_acc_attr & DDI_ACCATTR_IO_SPACE) { 1130 if (hdlp->ah_acc.devacc_attr_endian_flags == 1131 DDI_STRUCTURE_BE_ACC) { 1132 switch (size) { 1133 case sizeof (uint8_t): 1134 *(uint8_t *)host_addr = 1135 i_ddi_io_get8(hp, 1136 (uint8_t *)dev_addr); 1137 break; 1138 case sizeof (uint16_t): 1139 *(uint16_t *)host_addr = 1140 i_ddi_io_swap_get16(hp, 1141 (uint16_t *)dev_addr); 1142 break; 1143 case sizeof (uint32_t): 1144 *(uint32_t *)host_addr = 1145 i_ddi_io_swap_get32(hp, 1146 (uint32_t *)dev_addr); 1147 break; 1148 /* 1149 * note the 64-bit case is a dummy 1150 * function - so no need to swap 1151 */ 1152 case sizeof (uint64_t): 1153 *(uint64_t *)host_addr = 1154 i_ddi_io_get64(hp, 1155 (uint64_t *)dev_addr); 1156 break; 1157 default: 1158 err = DDI_FAILURE; 1159 break; 1160 } 1161 } else { 1162 switch (size) { 1163 case sizeof (uint8_t): 1164 *(uint8_t *)host_addr = 1165 i_ddi_io_get8(hp, 1166 (uint8_t *)dev_addr); 1167 break; 1168 case sizeof (uint16_t): 1169 *(uint16_t *)host_addr = 1170 i_ddi_io_get16(hp, 1171 (uint16_t *)dev_addr); 1172 break; 1173 case sizeof (uint32_t): 1174 *(uint32_t *)host_addr = 1175 i_ddi_io_get32(hp, 1176 (uint32_t *)dev_addr); 1177 break; 1178 case sizeof (uint64_t): 1179 *(uint64_t *)host_addr = 1180 i_ddi_io_get64(hp, 1181 (uint64_t *)dev_addr); 1182 break; 1183 default: 1184 err = DDI_FAILURE; 1185 break; 1186 } 1187 } 1188 } else { 1189 if (hdlp->ah_acc.devacc_attr_endian_flags == 1190 DDI_STRUCTURE_BE_ACC) { 1191 switch (in_args->size) { 1192 case sizeof (uint8_t): 1193 *(uint8_t *)host_addr = 1194 *(uint8_t *)dev_addr; 1195 break; 1196 case sizeof (uint16_t): 1197 *(uint16_t *)host_addr = 1198 ddi_swap16(*(uint16_t *)dev_addr); 1199 break; 1200 case sizeof (uint32_t): 1201 *(uint32_t *)host_addr = 1202 ddi_swap32(*(uint32_t *)dev_addr); 1203 break; 1204 case sizeof (uint64_t): 1205 *(uint64_t *)host_addr = 1206 ddi_swap64(*(uint64_t *)dev_addr); 1207 break; 1208 default: 1209 err = DDI_FAILURE; 1210 break; 1211 } 1212 } else { 1213 switch (in_args->size) { 1214 case sizeof (uint8_t): 1215 *(uint8_t *)host_addr = 1216 *(uint8_t *)dev_addr; 1217 break; 1218 case sizeof (uint16_t): 1219 *(uint16_t *)host_addr = 1220 *(uint16_t *)dev_addr; 1221 break; 1222 case sizeof (uint32_t): 1223 *(uint32_t *)host_addr = 1224 *(uint32_t *)dev_addr; 1225 break; 1226 case sizeof (uint64_t): 1227 *(uint64_t *)host_addr = 1228 *(uint64_t *)dev_addr; 1229 break; 1230 default: 1231 err = DDI_FAILURE; 1232 break; 1233 } 1234 } 1235 } 1236 host_addr += size; 1237 if (flags == DDI_DEV_AUTOINCR) 1238 dev_addr += size; 1239 } 1240 return (err); 1241 } 1242 1243 /*ARGSUSED*/ 1244 int 1245 pci_common_peekpoke(dev_info_t *dip, dev_info_t *rdip, 1246 ddi_ctl_enum_t ctlop, void *arg, void *result) 1247 { 1248 if (ctlop == DDI_CTLOPS_PEEK) 1249 return (pci_common_ctlops_peek((peekpoke_ctlops_t *)arg)); 1250 else 1251 return (pci_common_ctlops_poke((peekpoke_ctlops_t *)arg)); 1252 } 1253 1254 /* 1255 * These are the get and put functions to be shared with drivers. The 1256 * mutex locking is done inside the functions referenced, rather than 1257 * here, and is thus shared across PCI child drivers and any other 1258 * consumers of PCI config space (such as the ACPI subsystem). 1259 * 1260 * The configuration space addresses come in as pointers. This is fine on 1261 * a 32-bit system, where the VM space and configuration space are the same 1262 * size. It's not such a good idea on a 64-bit system, where memory 1263 * addresses are twice as large as configuration space addresses. At some 1264 * point in the call tree we need to take a stand and say "you are 32-bit 1265 * from this time forth", and this seems like a nice self-contained place. 1266 */ 1267 1268 uint8_t 1269 pci_config_rd8(ddi_acc_impl_t *hdlp, uint8_t *addr) 1270 { 1271 pci_acc_cfblk_t *cfp; 1272 uint8_t rval; 1273 int reg; 1274 1275 ASSERT64(((uintptr_t)addr >> 32) == 0); 1276 1277 reg = (int)(uintptr_t)addr; 1278 1279 cfp = (pci_acc_cfblk_t *)&hdlp->ahi_common.ah_bus_private; 1280 1281 rval = (*pci_getb_func)(cfp->c_busnum, cfp->c_devnum, cfp->c_funcnum, 1282 reg); 1283 1284 return (rval); 1285 } 1286 1287 void 1288 pci_config_rep_rd8(ddi_acc_impl_t *hdlp, uint8_t *host_addr, 1289 uint8_t *dev_addr, size_t repcount, uint_t flags) 1290 { 1291 uint8_t *h, *d; 1292 1293 h = host_addr; 1294 d = dev_addr; 1295 1296 if (flags == DDI_DEV_AUTOINCR) 1297 for (; repcount; repcount--) 1298 *h++ = pci_config_rd8(hdlp, d++); 1299 else 1300 for (; repcount; repcount--) 1301 *h++ = pci_config_rd8(hdlp, d); 1302 } 1303 1304 uint16_t 1305 pci_config_rd16(ddi_acc_impl_t *hdlp, uint16_t *addr) 1306 { 1307 pci_acc_cfblk_t *cfp; 1308 uint16_t rval; 1309 int reg; 1310 1311 ASSERT64(((uintptr_t)addr >> 32) == 0); 1312 1313 reg = (int)(uintptr_t)addr; 1314 1315 cfp = (pci_acc_cfblk_t *)&hdlp->ahi_common.ah_bus_private; 1316 1317 rval = (*pci_getw_func)(cfp->c_busnum, cfp->c_devnum, cfp->c_funcnum, 1318 reg); 1319 1320 return (rval); 1321 } 1322 1323 void 1324 pci_config_rep_rd16(ddi_acc_impl_t *hdlp, uint16_t *host_addr, 1325 uint16_t *dev_addr, size_t repcount, uint_t flags) 1326 { 1327 uint16_t *h, *d; 1328 1329 h = host_addr; 1330 d = dev_addr; 1331 1332 if (flags == DDI_DEV_AUTOINCR) 1333 for (; repcount; repcount--) 1334 *h++ = pci_config_rd16(hdlp, d++); 1335 else 1336 for (; repcount; repcount--) 1337 *h++ = pci_config_rd16(hdlp, d); 1338 } 1339 1340 uint32_t 1341 pci_config_rd32(ddi_acc_impl_t *hdlp, uint32_t *addr) 1342 { 1343 pci_acc_cfblk_t *cfp; 1344 uint32_t rval; 1345 int reg; 1346 1347 ASSERT64(((uintptr_t)addr >> 32) == 0); 1348 1349 reg = (int)(uintptr_t)addr; 1350 1351 cfp = (pci_acc_cfblk_t *)&hdlp->ahi_common.ah_bus_private; 1352 1353 rval = (*pci_getl_func)(cfp->c_busnum, cfp->c_devnum, 1354 cfp->c_funcnum, reg); 1355 1356 return (rval); 1357 } 1358 1359 void 1360 pci_config_rep_rd32(ddi_acc_impl_t *hdlp, uint32_t *host_addr, 1361 uint32_t *dev_addr, size_t repcount, uint_t flags) 1362 { 1363 uint32_t *h, *d; 1364 1365 h = host_addr; 1366 d = dev_addr; 1367 1368 if (flags == DDI_DEV_AUTOINCR) 1369 for (; repcount; repcount--) 1370 *h++ = pci_config_rd32(hdlp, d++); 1371 else 1372 for (; repcount; repcount--) 1373 *h++ = pci_config_rd32(hdlp, d); 1374 } 1375 1376 1377 void 1378 pci_config_wr8(ddi_acc_impl_t *hdlp, uint8_t *addr, uint8_t value) 1379 { 1380 pci_acc_cfblk_t *cfp; 1381 int reg; 1382 1383 ASSERT64(((uintptr_t)addr >> 32) == 0); 1384 1385 reg = (int)(uintptr_t)addr; 1386 1387 cfp = (pci_acc_cfblk_t *)&hdlp->ahi_common.ah_bus_private; 1388 1389 (*pci_putb_func)(cfp->c_busnum, cfp->c_devnum, 1390 cfp->c_funcnum, reg, value); 1391 } 1392 1393 void 1394 pci_config_rep_wr8(ddi_acc_impl_t *hdlp, uint8_t *host_addr, 1395 uint8_t *dev_addr, size_t repcount, uint_t flags) 1396 { 1397 uint8_t *h, *d; 1398 1399 h = host_addr; 1400 d = dev_addr; 1401 1402 if (flags == DDI_DEV_AUTOINCR) 1403 for (; repcount; repcount--) 1404 pci_config_wr8(hdlp, d++, *h++); 1405 else 1406 for (; repcount; repcount--) 1407 pci_config_wr8(hdlp, d, *h++); 1408 } 1409 1410 void 1411 pci_config_wr16(ddi_acc_impl_t *hdlp, uint16_t *addr, uint16_t value) 1412 { 1413 pci_acc_cfblk_t *cfp; 1414 int reg; 1415 1416 ASSERT64(((uintptr_t)addr >> 32) == 0); 1417 1418 reg = (int)(uintptr_t)addr; 1419 1420 cfp = (pci_acc_cfblk_t *)&hdlp->ahi_common.ah_bus_private; 1421 1422 (*pci_putw_func)(cfp->c_busnum, cfp->c_devnum, 1423 cfp->c_funcnum, reg, value); 1424 } 1425 1426 void 1427 pci_config_rep_wr16(ddi_acc_impl_t *hdlp, uint16_t *host_addr, 1428 uint16_t *dev_addr, size_t repcount, uint_t flags) 1429 { 1430 uint16_t *h, *d; 1431 1432 h = host_addr; 1433 d = dev_addr; 1434 1435 if (flags == DDI_DEV_AUTOINCR) 1436 for (; repcount; repcount--) 1437 pci_config_wr16(hdlp, d++, *h++); 1438 else 1439 for (; repcount; repcount--) 1440 pci_config_wr16(hdlp, d, *h++); 1441 } 1442 1443 void 1444 pci_config_wr32(ddi_acc_impl_t *hdlp, uint32_t *addr, uint32_t value) 1445 { 1446 pci_acc_cfblk_t *cfp; 1447 int reg; 1448 1449 ASSERT64(((uintptr_t)addr >> 32) == 0); 1450 1451 reg = (int)(uintptr_t)addr; 1452 1453 cfp = (pci_acc_cfblk_t *)&hdlp->ahi_common.ah_bus_private; 1454 1455 (*pci_putl_func)(cfp->c_busnum, cfp->c_devnum, 1456 cfp->c_funcnum, reg, value); 1457 } 1458 1459 void 1460 pci_config_rep_wr32(ddi_acc_impl_t *hdlp, uint32_t *host_addr, 1461 uint32_t *dev_addr, size_t repcount, uint_t flags) 1462 { 1463 uint32_t *h, *d; 1464 1465 h = host_addr; 1466 d = dev_addr; 1467 1468 if (flags == DDI_DEV_AUTOINCR) 1469 for (; repcount; repcount--) 1470 pci_config_wr32(hdlp, d++, *h++); 1471 else 1472 for (; repcount; repcount--) 1473 pci_config_wr32(hdlp, d, *h++); 1474 } 1475 1476 uint64_t 1477 pci_config_rd64(ddi_acc_impl_t *hdlp, uint64_t *addr) 1478 { 1479 uint32_t lw_val; 1480 uint32_t hi_val; 1481 uint32_t *dp; 1482 uint64_t val; 1483 1484 dp = (uint32_t *)addr; 1485 lw_val = pci_config_rd32(hdlp, dp); 1486 dp++; 1487 hi_val = pci_config_rd32(hdlp, dp); 1488 val = ((uint64_t)hi_val << 32) | lw_val; 1489 return (val); 1490 } 1491 1492 void 1493 pci_config_wr64(ddi_acc_impl_t *hdlp, uint64_t *addr, uint64_t value) 1494 { 1495 uint32_t lw_val; 1496 uint32_t hi_val; 1497 uint32_t *dp; 1498 1499 dp = (uint32_t *)addr; 1500 lw_val = (uint32_t)(value & 0xffffffff); 1501 hi_val = (uint32_t)(value >> 32); 1502 pci_config_wr32(hdlp, dp, lw_val); 1503 dp++; 1504 pci_config_wr32(hdlp, dp, hi_val); 1505 } 1506 1507 void 1508 pci_config_rep_rd64(ddi_acc_impl_t *hdlp, uint64_t *host_addr, 1509 uint64_t *dev_addr, size_t repcount, uint_t flags) 1510 { 1511 if (flags == DDI_DEV_AUTOINCR) { 1512 for (; repcount; repcount--) 1513 *host_addr++ = pci_config_rd64(hdlp, dev_addr++); 1514 } else { 1515 for (; repcount; repcount--) 1516 *host_addr++ = pci_config_rd64(hdlp, dev_addr); 1517 } 1518 } 1519 1520 void 1521 pci_config_rep_wr64(ddi_acc_impl_t *hdlp, uint64_t *host_addr, 1522 uint64_t *dev_addr, size_t repcount, uint_t flags) 1523 { 1524 if (flags == DDI_DEV_AUTOINCR) { 1525 for (; repcount; repcount--) 1526 pci_config_wr64(hdlp, host_addr++, *dev_addr++); 1527 } else { 1528 for (; repcount; repcount--) 1529 pci_config_wr64(hdlp, host_addr++, *dev_addr); 1530 } 1531 } 1532 1533 1534 /* 1535 * Enable Legacy PCI config space access for the following four north bridges 1536 * Host bridge: AMD HyperTransport Technology Configuration 1537 * Host bridge: AMD Address Map 1538 * Host bridge: AMD DRAM Controller 1539 * Host bridge: AMD Miscellaneous Control 1540 */ 1541 int 1542 is_amd_northbridge(dev_info_t *dip) 1543 { 1544 int vendor_id, device_id; 1545 1546 vendor_id = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1547 "vendor-id", -1); 1548 device_id = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1549 "device-id", -1); 1550 1551 if (IS_AMD_NTBRIDGE(vendor_id, device_id)) 1552 return (0); 1553 1554 return (1); 1555 } 1556