1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/types.h> 29 #include <sys/kmem.h> 30 #include <sys/conf.h> 31 #include <sys/ddi.h> 32 #include <sys/sunddi.h> 33 #include <sys/fm/protocol.h> 34 #include <sys/fm/util.h> 35 #include <sys/modctl.h> 36 #include <sys/disp.h> 37 #include <sys/stat.h> 38 #include <sys/ddi_impldefs.h> 39 #include <sys/vmem.h> 40 #include <sys/iommutsb.h> 41 #include <sys/cpuvar.h> 42 #include <sys/ivintr.h> 43 #include <sys/byteorder.h> 44 #include <sys/hotplug/pci/pciehpc.h> 45 #include <px_obj.h> 46 #include <pcie_pwr.h> 47 #include "px_tools_var.h" 48 #include <px_regs.h> 49 #include <px_csr.h> 50 #include <sys/machsystm.h> 51 #include "px_lib4u.h" 52 #include "px_err.h" 53 #include "oberon_regs.h" 54 55 #pragma weak jbus_stst_order 56 57 extern void jbus_stst_order(); 58 59 ulong_t px_mmu_dvma_end = 0xfffffffful; 60 uint_t px_ranges_phi_mask = 0xfffffffful; 61 uint64_t *px_oberon_ubc_scratch_regs; 62 uint64_t px_paddr_mask; 63 64 static int px_goto_l23ready(px_t *px_p); 65 static int px_goto_l0(px_t *px_p); 66 static int px_pre_pwron_check(px_t *px_p); 67 static uint32_t px_identity_chip(px_t *px_p); 68 static boolean_t px_cpr_callb(void *arg, int code); 69 static uint_t px_cb_intr(caddr_t arg); 70 71 /* 72 * px_lib_map_registers 73 * 74 * This function is called from the attach routine to map the registers 75 * accessed by this driver. 76 * 77 * used by: px_attach() 78 * 79 * return value: DDI_FAILURE on failure 80 */ 81 int 82 px_lib_map_regs(pxu_t *pxu_p, dev_info_t *dip) 83 { 84 ddi_device_acc_attr_t attr; 85 px_reg_bank_t reg_bank = PX_REG_CSR; 86 87 DBG(DBG_ATTACH, dip, "px_lib_map_regs: pxu_p:0x%p, dip 0x%p\n", 88 pxu_p, dip); 89 90 attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 91 attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 92 attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC; 93 94 /* 95 * PCI CSR Base 96 */ 97 if (ddi_regs_map_setup(dip, reg_bank, &pxu_p->px_address[reg_bank], 98 0, 0, &attr, &pxu_p->px_ac[reg_bank]) != DDI_SUCCESS) { 99 goto fail; 100 } 101 102 reg_bank++; 103 104 /* 105 * XBUS CSR Base 106 */ 107 if (ddi_regs_map_setup(dip, reg_bank, &pxu_p->px_address[reg_bank], 108 0, 0, &attr, &pxu_p->px_ac[reg_bank]) != DDI_SUCCESS) { 109 goto fail; 110 } 111 112 pxu_p->px_address[reg_bank] -= FIRE_CONTROL_STATUS; 113 114 done: 115 for (; reg_bank >= PX_REG_CSR; reg_bank--) { 116 DBG(DBG_ATTACH, dip, "reg_bank 0x%x address 0x%p\n", 117 reg_bank, pxu_p->px_address[reg_bank]); 118 } 119 120 return (DDI_SUCCESS); 121 122 fail: 123 cmn_err(CE_WARN, "%s%d: unable to map reg entry %d\n", 124 ddi_driver_name(dip), ddi_get_instance(dip), reg_bank); 125 126 for (reg_bank--; reg_bank >= PX_REG_CSR; reg_bank--) { 127 pxu_p->px_address[reg_bank] = NULL; 128 ddi_regs_map_free(&pxu_p->px_ac[reg_bank]); 129 } 130 131 return (DDI_FAILURE); 132 } 133 134 /* 135 * px_lib_unmap_regs: 136 * 137 * This routine unmaps the registers mapped by map_px_registers. 138 * 139 * used by: px_detach(), and error conditions in px_attach() 140 * 141 * return value: none 142 */ 143 void 144 px_lib_unmap_regs(pxu_t *pxu_p) 145 { 146 int i; 147 148 for (i = 0; i < PX_REG_MAX; i++) { 149 if (pxu_p->px_ac[i]) 150 ddi_regs_map_free(&pxu_p->px_ac[i]); 151 } 152 } 153 154 int 155 px_lib_dev_init(dev_info_t *dip, devhandle_t *dev_hdl) 156 { 157 px_t *px_p = DIP_TO_STATE(dip); 158 caddr_t xbc_csr_base, csr_base; 159 px_dvma_range_prop_t px_dvma_range; 160 uint32_t chip_id; 161 pxu_t *pxu_p; 162 163 DBG(DBG_ATTACH, dip, "px_lib_dev_init: dip 0x%p\n", dip); 164 165 if ((chip_id = px_identity_chip(px_p)) == PX_CHIP_UNIDENTIFIED) 166 return (DDI_FAILURE); 167 168 switch (chip_id) { 169 case FIRE_VER_10: 170 cmn_err(CE_WARN, "FIRE Hardware Version 1.0 is not supported"); 171 return (DDI_FAILURE); 172 case FIRE_VER_20: 173 DBG(DBG_ATTACH, dip, "FIRE Hardware Version 2.0\n"); 174 px_paddr_mask = MMU_FIRE_PADDR_MASK; 175 break; 176 case OBERON_VER_10: 177 DBG(DBG_ATTACH, dip, "Oberon Hardware Version 1.0\n"); 178 px_paddr_mask = MMU_OBERON_PADDR_MASK; 179 break; 180 default: 181 cmn_err(CE_WARN, "%s%d: PX Hardware Version Unknown\n", 182 ddi_driver_name(dip), ddi_get_instance(dip)); 183 return (DDI_FAILURE); 184 } 185 186 /* 187 * Allocate platform specific structure and link it to 188 * the px state structure. 189 */ 190 pxu_p = kmem_zalloc(sizeof (pxu_t), KM_SLEEP); 191 pxu_p->chip_id = chip_id; 192 pxu_p->portid = ddi_getprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 193 "portid", -1); 194 195 /* Map in the registers */ 196 if (px_lib_map_regs(pxu_p, dip) == DDI_FAILURE) { 197 kmem_free(pxu_p, sizeof (pxu_t)); 198 199 return (DDI_FAILURE); 200 } 201 202 xbc_csr_base = (caddr_t)pxu_p->px_address[PX_REG_XBC]; 203 csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR]; 204 205 pxu_p->tsb_cookie = iommu_tsb_alloc(pxu_p->portid); 206 pxu_p->tsb_size = iommu_tsb_cookie_to_size(pxu_p->tsb_cookie); 207 pxu_p->tsb_vaddr = iommu_tsb_cookie_to_va(pxu_p->tsb_cookie); 208 209 pxu_p->tsb_paddr = va_to_pa(pxu_p->tsb_vaddr); 210 211 /* 212 * Create "virtual-dma" property to support child devices 213 * needing to know DVMA range. 214 */ 215 px_dvma_range.dvma_base = (uint32_t)px_mmu_dvma_end + 1 216 - ((pxu_p->tsb_size >> 3) << MMU_PAGE_SHIFT); 217 px_dvma_range.dvma_len = (uint32_t) 218 px_mmu_dvma_end - px_dvma_range.dvma_base + 1; 219 220 (void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, 221 "virtual-dma", (caddr_t)&px_dvma_range, 222 sizeof (px_dvma_range_prop_t)); 223 /* 224 * Initilize all fire hardware specific blocks. 225 */ 226 hvio_cb_init(xbc_csr_base, pxu_p); 227 hvio_ib_init(csr_base, pxu_p); 228 hvio_pec_init(csr_base, pxu_p); 229 hvio_mmu_init(csr_base, pxu_p); 230 231 px_p->px_plat_p = (void *)pxu_p; 232 233 /* 234 * Initialize all the interrupt handlers 235 */ 236 switch (PX_CHIP_TYPE(pxu_p)) { 237 case PX_CHIP_OBERON: 238 px_err_reg_enable(px_p, PX_ERR_UBC); 239 px_err_reg_enable(px_p, PX_ERR_MMU); 240 px_err_reg_enable(px_p, PX_ERR_IMU); 241 px_err_reg_enable(px_p, PX_ERR_TLU_UE); 242 px_err_reg_enable(px_p, PX_ERR_TLU_CE); 243 px_err_reg_enable(px_p, PX_ERR_TLU_OE); 244 245 /* 246 * Oberon hotplug uses SPARE3 field in ILU Error Log Enable 247 * register to indicate the status of leaf reset, 248 * we need to preserve the value of this bit, and keep it in 249 * px_ilu_log_mask to reflect the state of the bit 250 */ 251 if (CSR_BR(csr_base, ILU_ERROR_LOG_ENABLE, SPARE3)) 252 px_ilu_log_mask |= (1ull << 253 ILU_ERROR_LOG_ENABLE_SPARE3); 254 else 255 px_ilu_log_mask &= ~(1ull << 256 ILU_ERROR_LOG_ENABLE_SPARE3); 257 px_err_reg_enable(px_p, PX_ERR_ILU); 258 259 px_fabric_die_rc_ue |= PCIE_AER_UCE_UC; 260 break; 261 262 case PX_CHIP_FIRE: 263 px_err_reg_enable(px_p, PX_ERR_JBC); 264 px_err_reg_enable(px_p, PX_ERR_MMU); 265 px_err_reg_enable(px_p, PX_ERR_IMU); 266 px_err_reg_enable(px_p, PX_ERR_TLU_UE); 267 px_err_reg_enable(px_p, PX_ERR_TLU_CE); 268 px_err_reg_enable(px_p, PX_ERR_TLU_OE); 269 px_err_reg_enable(px_p, PX_ERR_ILU); 270 px_err_reg_enable(px_p, PX_ERR_LPU_LINK); 271 px_err_reg_enable(px_p, PX_ERR_LPU_PHY); 272 px_err_reg_enable(px_p, PX_ERR_LPU_RX); 273 px_err_reg_enable(px_p, PX_ERR_LPU_TX); 274 px_err_reg_enable(px_p, PX_ERR_LPU_LTSSM); 275 px_err_reg_enable(px_p, PX_ERR_LPU_GIGABLZ); 276 break; 277 default: 278 cmn_err(CE_WARN, "%s%d: PX primary bus Unknown\n", 279 ddi_driver_name(dip), ddi_get_instance(dip)); 280 return (DDI_FAILURE); 281 } 282 283 /* Initilize device handle */ 284 *dev_hdl = (devhandle_t)csr_base; 285 286 DBG(DBG_ATTACH, dip, "px_lib_dev_init: dev_hdl 0x%llx\n", *dev_hdl); 287 288 return (DDI_SUCCESS); 289 } 290 291 int 292 px_lib_dev_fini(dev_info_t *dip) 293 { 294 px_t *px_p = DIP_TO_STATE(dip); 295 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 296 297 DBG(DBG_DETACH, dip, "px_lib_dev_fini: dip 0x%p\n", dip); 298 299 /* 300 * Deinitialize all the interrupt handlers 301 */ 302 switch (PX_CHIP_TYPE(pxu_p)) { 303 case PX_CHIP_OBERON: 304 px_err_reg_disable(px_p, PX_ERR_UBC); 305 px_err_reg_disable(px_p, PX_ERR_MMU); 306 px_err_reg_disable(px_p, PX_ERR_IMU); 307 px_err_reg_disable(px_p, PX_ERR_TLU_UE); 308 px_err_reg_disable(px_p, PX_ERR_TLU_CE); 309 px_err_reg_disable(px_p, PX_ERR_TLU_OE); 310 px_err_reg_disable(px_p, PX_ERR_ILU); 311 break; 312 case PX_CHIP_FIRE: 313 px_err_reg_disable(px_p, PX_ERR_JBC); 314 px_err_reg_disable(px_p, PX_ERR_MMU); 315 px_err_reg_disable(px_p, PX_ERR_IMU); 316 px_err_reg_disable(px_p, PX_ERR_TLU_UE); 317 px_err_reg_disable(px_p, PX_ERR_TLU_CE); 318 px_err_reg_disable(px_p, PX_ERR_TLU_OE); 319 px_err_reg_disable(px_p, PX_ERR_ILU); 320 px_err_reg_disable(px_p, PX_ERR_LPU_LINK); 321 px_err_reg_disable(px_p, PX_ERR_LPU_PHY); 322 px_err_reg_disable(px_p, PX_ERR_LPU_RX); 323 px_err_reg_disable(px_p, PX_ERR_LPU_TX); 324 px_err_reg_disable(px_p, PX_ERR_LPU_LTSSM); 325 px_err_reg_disable(px_p, PX_ERR_LPU_GIGABLZ); 326 break; 327 default: 328 cmn_err(CE_WARN, "%s%d: PX primary bus Unknown\n", 329 ddi_driver_name(dip), ddi_get_instance(dip)); 330 return (DDI_FAILURE); 331 } 332 333 iommu_tsb_free(pxu_p->tsb_cookie); 334 335 px_lib_unmap_regs((pxu_t *)px_p->px_plat_p); 336 kmem_free(px_p->px_plat_p, sizeof (pxu_t)); 337 px_p->px_plat_p = NULL; 338 339 return (DDI_SUCCESS); 340 } 341 342 /*ARGSUSED*/ 343 int 344 px_lib_intr_devino_to_sysino(dev_info_t *dip, devino_t devino, 345 sysino_t *sysino) 346 { 347 px_t *px_p = DIP_TO_STATE(dip); 348 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 349 uint64_t ret; 350 351 DBG(DBG_LIB_INT, dip, "px_lib_intr_devino_to_sysino: dip 0x%p " 352 "devino 0x%x\n", dip, devino); 353 354 if ((ret = hvio_intr_devino_to_sysino(DIP_TO_HANDLE(dip), 355 pxu_p, devino, sysino)) != H_EOK) { 356 DBG(DBG_LIB_INT, dip, 357 "hvio_intr_devino_to_sysino failed, ret 0x%lx\n", ret); 358 return (DDI_FAILURE); 359 } 360 361 DBG(DBG_LIB_INT, dip, "px_lib_intr_devino_to_sysino: sysino 0x%llx\n", 362 *sysino); 363 364 return (DDI_SUCCESS); 365 } 366 367 /*ARGSUSED*/ 368 int 369 px_lib_intr_getvalid(dev_info_t *dip, sysino_t sysino, 370 intr_valid_state_t *intr_valid_state) 371 { 372 uint64_t ret; 373 374 DBG(DBG_LIB_INT, dip, "px_lib_intr_getvalid: dip 0x%p sysino 0x%llx\n", 375 dip, sysino); 376 377 if ((ret = hvio_intr_getvalid(DIP_TO_HANDLE(dip), 378 sysino, intr_valid_state)) != H_EOK) { 379 DBG(DBG_LIB_INT, dip, "hvio_intr_getvalid failed, ret 0x%lx\n", 380 ret); 381 return (DDI_FAILURE); 382 } 383 384 DBG(DBG_LIB_INT, dip, "px_lib_intr_getvalid: intr_valid_state 0x%x\n", 385 *intr_valid_state); 386 387 return (DDI_SUCCESS); 388 } 389 390 /*ARGSUSED*/ 391 int 392 px_lib_intr_setvalid(dev_info_t *dip, sysino_t sysino, 393 intr_valid_state_t intr_valid_state) 394 { 395 uint64_t ret; 396 397 DBG(DBG_LIB_INT, dip, "px_lib_intr_setvalid: dip 0x%p sysino 0x%llx " 398 "intr_valid_state 0x%x\n", dip, sysino, intr_valid_state); 399 400 if ((ret = hvio_intr_setvalid(DIP_TO_HANDLE(dip), 401 sysino, intr_valid_state)) != H_EOK) { 402 DBG(DBG_LIB_INT, dip, "hvio_intr_setvalid failed, ret 0x%lx\n", 403 ret); 404 return (DDI_FAILURE); 405 } 406 407 return (DDI_SUCCESS); 408 } 409 410 /*ARGSUSED*/ 411 int 412 px_lib_intr_getstate(dev_info_t *dip, sysino_t sysino, 413 intr_state_t *intr_state) 414 { 415 uint64_t ret; 416 417 DBG(DBG_LIB_INT, dip, "px_lib_intr_getstate: dip 0x%p sysino 0x%llx\n", 418 dip, sysino); 419 420 if ((ret = hvio_intr_getstate(DIP_TO_HANDLE(dip), 421 sysino, intr_state)) != H_EOK) { 422 DBG(DBG_LIB_INT, dip, "hvio_intr_getstate failed, ret 0x%lx\n", 423 ret); 424 return (DDI_FAILURE); 425 } 426 427 DBG(DBG_LIB_INT, dip, "px_lib_intr_getstate: intr_state 0x%x\n", 428 *intr_state); 429 430 return (DDI_SUCCESS); 431 } 432 433 /*ARGSUSED*/ 434 int 435 px_lib_intr_setstate(dev_info_t *dip, sysino_t sysino, 436 intr_state_t intr_state) 437 { 438 uint64_t ret; 439 440 DBG(DBG_LIB_INT, dip, "px_lib_intr_setstate: dip 0x%p sysino 0x%llx " 441 "intr_state 0x%x\n", dip, sysino, intr_state); 442 443 if ((ret = hvio_intr_setstate(DIP_TO_HANDLE(dip), 444 sysino, intr_state)) != H_EOK) { 445 DBG(DBG_LIB_INT, dip, "hvio_intr_setstate failed, ret 0x%lx\n", 446 ret); 447 return (DDI_FAILURE); 448 } 449 450 return (DDI_SUCCESS); 451 } 452 453 /*ARGSUSED*/ 454 int 455 px_lib_intr_gettarget(dev_info_t *dip, sysino_t sysino, cpuid_t *cpuid) 456 { 457 px_t *px_p = DIP_TO_STATE(dip); 458 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 459 uint64_t ret; 460 461 DBG(DBG_LIB_INT, dip, "px_lib_intr_gettarget: dip 0x%p sysino 0x%llx\n", 462 dip, sysino); 463 464 if ((ret = hvio_intr_gettarget(DIP_TO_HANDLE(dip), pxu_p, 465 sysino, cpuid)) != H_EOK) { 466 DBG(DBG_LIB_INT, dip, "hvio_intr_gettarget failed, ret 0x%lx\n", 467 ret); 468 return (DDI_FAILURE); 469 } 470 471 DBG(DBG_LIB_INT, dip, "px_lib_intr_gettarget: cpuid 0x%x\n", cpuid); 472 473 return (DDI_SUCCESS); 474 } 475 476 /*ARGSUSED*/ 477 int 478 px_lib_intr_settarget(dev_info_t *dip, sysino_t sysino, cpuid_t cpuid) 479 { 480 px_t *px_p = DIP_TO_STATE(dip); 481 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 482 uint64_t ret; 483 484 DBG(DBG_LIB_INT, dip, "px_lib_intr_settarget: dip 0x%p sysino 0x%llx " 485 "cpuid 0x%x\n", dip, sysino, cpuid); 486 487 if ((ret = hvio_intr_settarget(DIP_TO_HANDLE(dip), pxu_p, 488 sysino, cpuid)) != H_EOK) { 489 DBG(DBG_LIB_INT, dip, "hvio_intr_settarget failed, ret 0x%lx\n", 490 ret); 491 return (DDI_FAILURE); 492 } 493 494 return (DDI_SUCCESS); 495 } 496 497 /*ARGSUSED*/ 498 int 499 px_lib_intr_reset(dev_info_t *dip) 500 { 501 devino_t ino; 502 sysino_t sysino; 503 504 DBG(DBG_LIB_INT, dip, "px_lib_intr_reset: dip 0x%p\n", dip); 505 506 /* Reset all Interrupts */ 507 for (ino = 0; ino < INTERRUPT_MAPPING_ENTRIES; ino++) { 508 if (px_lib_intr_devino_to_sysino(dip, ino, 509 &sysino) != DDI_SUCCESS) 510 return (BF_FATAL); 511 512 if (px_lib_intr_setstate(dip, sysino, 513 INTR_IDLE_STATE) != DDI_SUCCESS) 514 return (BF_FATAL); 515 } 516 517 return (BF_NONE); 518 } 519 520 /*ARGSUSED*/ 521 int 522 px_lib_iommu_map(dev_info_t *dip, tsbid_t tsbid, pages_t pages, 523 io_attributes_t attr, void *addr, size_t pfn_index, int flags) 524 { 525 px_t *px_p = DIP_TO_STATE(dip); 526 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 527 uint64_t ret; 528 529 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_map: dip 0x%p tsbid 0x%llx " 530 "pages 0x%x attr 0x%x addr 0x%p pfn_index 0x%llx flags 0x%x\n", 531 dip, tsbid, pages, attr, addr, pfn_index, flags); 532 533 if ((ret = hvio_iommu_map(px_p->px_dev_hdl, pxu_p, tsbid, pages, 534 attr, addr, pfn_index, flags)) != H_EOK) { 535 DBG(DBG_LIB_DMA, dip, 536 "px_lib_iommu_map failed, ret 0x%lx\n", ret); 537 return (DDI_FAILURE); 538 } 539 540 return (DDI_SUCCESS); 541 } 542 543 /*ARGSUSED*/ 544 int 545 px_lib_iommu_demap(dev_info_t *dip, tsbid_t tsbid, pages_t pages) 546 { 547 px_t *px_p = DIP_TO_STATE(dip); 548 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 549 uint64_t ret; 550 551 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_demap: dip 0x%p tsbid 0x%llx " 552 "pages 0x%x\n", dip, tsbid, pages); 553 554 if ((ret = hvio_iommu_demap(px_p->px_dev_hdl, pxu_p, tsbid, pages)) 555 != H_EOK) { 556 DBG(DBG_LIB_DMA, dip, 557 "px_lib_iommu_demap failed, ret 0x%lx\n", ret); 558 559 return (DDI_FAILURE); 560 } 561 562 return (DDI_SUCCESS); 563 } 564 565 /*ARGSUSED*/ 566 int 567 px_lib_iommu_getmap(dev_info_t *dip, tsbid_t tsbid, io_attributes_t *attr_p, 568 r_addr_t *r_addr_p) 569 { 570 px_t *px_p = DIP_TO_STATE(dip); 571 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 572 uint64_t ret; 573 574 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getmap: dip 0x%p tsbid 0x%llx\n", 575 dip, tsbid); 576 577 if ((ret = hvio_iommu_getmap(DIP_TO_HANDLE(dip), pxu_p, tsbid, 578 attr_p, r_addr_p)) != H_EOK) { 579 DBG(DBG_LIB_DMA, dip, 580 "hvio_iommu_getmap failed, ret 0x%lx\n", ret); 581 582 return ((ret == H_ENOMAP) ? DDI_DMA_NOMAPPING:DDI_FAILURE); 583 } 584 585 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getmap: attr 0x%x r_addr 0x%llx\n", 586 *attr_p, *r_addr_p); 587 588 return (DDI_SUCCESS); 589 } 590 591 592 /* 593 * Checks dma attributes against system bypass ranges 594 * The bypass range is determined by the hardware. Return them so the 595 * common code can do generic checking against them. 596 */ 597 /*ARGSUSED*/ 598 int 599 px_lib_dma_bypass_rngchk(dev_info_t *dip, ddi_dma_attr_t *attr_p, 600 uint64_t *lo_p, uint64_t *hi_p) 601 { 602 px_t *px_p = DIP_TO_STATE(dip); 603 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 604 605 *lo_p = hvio_get_bypass_base(pxu_p); 606 *hi_p = hvio_get_bypass_end(pxu_p); 607 608 return (DDI_SUCCESS); 609 } 610 611 612 /*ARGSUSED*/ 613 int 614 px_lib_iommu_getbypass(dev_info_t *dip, r_addr_t ra, io_attributes_t attr, 615 io_addr_t *io_addr_p) 616 { 617 uint64_t ret; 618 px_t *px_p = DIP_TO_STATE(dip); 619 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 620 621 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getbypass: dip 0x%p ra 0x%llx " 622 "attr 0x%x\n", dip, ra, attr); 623 624 if ((ret = hvio_iommu_getbypass(DIP_TO_HANDLE(dip), pxu_p, ra, 625 attr, io_addr_p)) != H_EOK) { 626 DBG(DBG_LIB_DMA, dip, 627 "hvio_iommu_getbypass failed, ret 0x%lx\n", ret); 628 return (DDI_FAILURE); 629 } 630 631 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getbypass: io_addr 0x%llx\n", 632 *io_addr_p); 633 634 return (DDI_SUCCESS); 635 } 636 637 /* 638 * bus dma sync entry point. 639 */ 640 /*ARGSUSED*/ 641 int 642 px_lib_dma_sync(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle, 643 off_t off, size_t len, uint_t cache_flags) 644 { 645 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 646 px_t *px_p = DIP_TO_STATE(dip); 647 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 648 649 DBG(DBG_LIB_DMA, dip, "px_lib_dma_sync: dip 0x%p rdip 0x%p " 650 "handle 0x%llx off 0x%x len 0x%x flags 0x%x\n", 651 dip, rdip, handle, off, len, cache_flags); 652 653 /* 654 * No flush needed for Oberon 655 */ 656 if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) 657 return (DDI_SUCCESS); 658 659 /* 660 * jbus_stst_order is found only in certain cpu modules. 661 * Just return success if not present. 662 */ 663 if (&jbus_stst_order == NULL) 664 return (DDI_SUCCESS); 665 666 if (!(mp->dmai_flags & PX_DMAI_FLAGS_INUSE)) { 667 cmn_err(CE_WARN, "%s%d: Unbound dma handle %p.", 668 ddi_driver_name(rdip), ddi_get_instance(rdip), (void *)mp); 669 670 return (DDI_FAILURE); 671 } 672 673 if (mp->dmai_flags & PX_DMAI_FLAGS_NOSYNC) 674 return (DDI_SUCCESS); 675 676 /* 677 * No flush needed when sending data from memory to device. 678 * Nothing to do to "sync" memory to what device would already see. 679 */ 680 if (!(mp->dmai_rflags & DDI_DMA_READ) || 681 ((cache_flags & PX_DMA_SYNC_DDI_FLAGS) == DDI_DMA_SYNC_FORDEV)) 682 return (DDI_SUCCESS); 683 684 /* 685 * Perform necessary cpu workaround to ensure jbus ordering. 686 * CPU's internal "invalidate FIFOs" are flushed. 687 */ 688 689 #if !defined(lint) 690 kpreempt_disable(); 691 #endif 692 jbus_stst_order(); 693 #if !defined(lint) 694 kpreempt_enable(); 695 #endif 696 return (DDI_SUCCESS); 697 } 698 699 /* 700 * MSIQ Functions: 701 */ 702 /*ARGSUSED*/ 703 int 704 px_lib_msiq_init(dev_info_t *dip) 705 { 706 px_t *px_p = DIP_TO_STATE(dip); 707 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 708 px_msiq_state_t *msiq_state_p = &px_p->px_ib_p->ib_msiq_state; 709 caddr_t msiq_addr; 710 px_dvma_addr_t pg_index; 711 size_t size; 712 int ret; 713 714 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_init: dip 0x%p\n", dip); 715 716 /* 717 * Map the EQ memory into the Fire MMU (has to be 512KB aligned) 718 * and then initialize the base address register. 719 * 720 * Allocate entries from Fire IOMMU so that the resulting address 721 * is properly aligned. Calculate the index of the first allocated 722 * entry. Note: The size of the mapping is assumed to be a multiple 723 * of the page size. 724 */ 725 msiq_addr = (caddr_t)(((uint64_t)msiq_state_p->msiq_buf_p + 726 (MMU_PAGE_SIZE - 1)) >> MMU_PAGE_SHIFT << MMU_PAGE_SHIFT); 727 728 size = msiq_state_p->msiq_cnt * 729 msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t); 730 731 pxu_p->msiq_mapped_p = vmem_xalloc(px_p->px_mmu_p->mmu_dvma_map, 732 size, (512 * 1024), 0, 0, NULL, NULL, VM_NOSLEEP | VM_BESTFIT); 733 734 if (pxu_p->msiq_mapped_p == NULL) 735 return (DDI_FAILURE); 736 737 pg_index = MMU_PAGE_INDEX(px_p->px_mmu_p, 738 MMU_BTOP((ulong_t)pxu_p->msiq_mapped_p)); 739 740 if ((ret = px_lib_iommu_map(px_p->px_dip, PCI_TSBID(0, pg_index), 741 MMU_BTOP(size), PCI_MAP_ATTR_WRITE, (void *)msiq_addr, 0, 742 MMU_MAP_BUF)) != DDI_SUCCESS) { 743 DBG(DBG_LIB_MSIQ, dip, 744 "hvio_msiq_init failed, ret 0x%lx\n", ret); 745 746 (void) px_lib_msiq_fini(dip); 747 return (DDI_FAILURE); 748 } 749 750 (void) hvio_msiq_init(DIP_TO_HANDLE(dip), pxu_p); 751 752 return (DDI_SUCCESS); 753 } 754 755 /*ARGSUSED*/ 756 int 757 px_lib_msiq_fini(dev_info_t *dip) 758 { 759 px_t *px_p = DIP_TO_STATE(dip); 760 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 761 px_msiq_state_t *msiq_state_p = &px_p->px_ib_p->ib_msiq_state; 762 px_dvma_addr_t pg_index; 763 size_t size; 764 765 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_fini: dip 0x%p\n", dip); 766 767 /* 768 * Unmap and free the EQ memory that had been mapped 769 * into the Fire IOMMU. 770 */ 771 size = msiq_state_p->msiq_cnt * 772 msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t); 773 774 pg_index = MMU_PAGE_INDEX(px_p->px_mmu_p, 775 MMU_BTOP((ulong_t)pxu_p->msiq_mapped_p)); 776 777 (void) px_lib_iommu_demap(px_p->px_dip, 778 PCI_TSBID(0, pg_index), MMU_BTOP(size)); 779 780 /* Free the entries from the Fire MMU */ 781 vmem_xfree(px_p->px_mmu_p->mmu_dvma_map, 782 (void *)pxu_p->msiq_mapped_p, size); 783 784 return (DDI_SUCCESS); 785 } 786 787 /*ARGSUSED*/ 788 int 789 px_lib_msiq_info(dev_info_t *dip, msiqid_t msiq_id, r_addr_t *ra_p, 790 uint_t *msiq_rec_cnt_p) 791 { 792 px_t *px_p = DIP_TO_STATE(dip); 793 px_msiq_state_t *msiq_state_p = &px_p->px_ib_p->ib_msiq_state; 794 uint64_t *msiq_addr; 795 size_t msiq_size; 796 797 DBG(DBG_LIB_MSIQ, dip, "px_msiq_info: dip 0x%p msiq_id 0x%x\n", 798 dip, msiq_id); 799 800 msiq_addr = (uint64_t *)(((uint64_t)msiq_state_p->msiq_buf_p + 801 (MMU_PAGE_SIZE - 1)) >> MMU_PAGE_SHIFT << MMU_PAGE_SHIFT); 802 msiq_size = msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t); 803 ra_p = (r_addr_t *)((caddr_t)msiq_addr + (msiq_id * msiq_size)); 804 805 *msiq_rec_cnt_p = msiq_state_p->msiq_rec_cnt; 806 807 DBG(DBG_LIB_MSIQ, dip, "px_msiq_info: ra_p 0x%p msiq_rec_cnt 0x%x\n", 808 ra_p, *msiq_rec_cnt_p); 809 810 return (DDI_SUCCESS); 811 } 812 813 /*ARGSUSED*/ 814 int 815 px_lib_msiq_getvalid(dev_info_t *dip, msiqid_t msiq_id, 816 pci_msiq_valid_state_t *msiq_valid_state) 817 { 818 uint64_t ret; 819 820 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getvalid: dip 0x%p msiq_id 0x%x\n", 821 dip, msiq_id); 822 823 if ((ret = hvio_msiq_getvalid(DIP_TO_HANDLE(dip), 824 msiq_id, msiq_valid_state)) != H_EOK) { 825 DBG(DBG_LIB_MSIQ, dip, 826 "hvio_msiq_getvalid failed, ret 0x%lx\n", ret); 827 return (DDI_FAILURE); 828 } 829 830 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getvalid: msiq_valid_state 0x%x\n", 831 *msiq_valid_state); 832 833 return (DDI_SUCCESS); 834 } 835 836 /*ARGSUSED*/ 837 int 838 px_lib_msiq_setvalid(dev_info_t *dip, msiqid_t msiq_id, 839 pci_msiq_valid_state_t msiq_valid_state) 840 { 841 uint64_t ret; 842 843 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_setvalid: dip 0x%p msiq_id 0x%x " 844 "msiq_valid_state 0x%x\n", dip, msiq_id, msiq_valid_state); 845 846 if ((ret = hvio_msiq_setvalid(DIP_TO_HANDLE(dip), 847 msiq_id, msiq_valid_state)) != H_EOK) { 848 DBG(DBG_LIB_MSIQ, dip, 849 "hvio_msiq_setvalid failed, ret 0x%lx\n", ret); 850 return (DDI_FAILURE); 851 } 852 853 return (DDI_SUCCESS); 854 } 855 856 /*ARGSUSED*/ 857 int 858 px_lib_msiq_getstate(dev_info_t *dip, msiqid_t msiq_id, 859 pci_msiq_state_t *msiq_state) 860 { 861 uint64_t ret; 862 863 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getstate: dip 0x%p msiq_id 0x%x\n", 864 dip, msiq_id); 865 866 if ((ret = hvio_msiq_getstate(DIP_TO_HANDLE(dip), 867 msiq_id, msiq_state)) != H_EOK) { 868 DBG(DBG_LIB_MSIQ, dip, 869 "hvio_msiq_getstate failed, ret 0x%lx\n", ret); 870 return (DDI_FAILURE); 871 } 872 873 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getstate: msiq_state 0x%x\n", 874 *msiq_state); 875 876 return (DDI_SUCCESS); 877 } 878 879 /*ARGSUSED*/ 880 int 881 px_lib_msiq_setstate(dev_info_t *dip, msiqid_t msiq_id, 882 pci_msiq_state_t msiq_state) 883 { 884 uint64_t ret; 885 886 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_setstate: dip 0x%p msiq_id 0x%x " 887 "msiq_state 0x%x\n", dip, msiq_id, msiq_state); 888 889 if ((ret = hvio_msiq_setstate(DIP_TO_HANDLE(dip), 890 msiq_id, msiq_state)) != H_EOK) { 891 DBG(DBG_LIB_MSIQ, dip, 892 "hvio_msiq_setstate failed, ret 0x%lx\n", ret); 893 return (DDI_FAILURE); 894 } 895 896 return (DDI_SUCCESS); 897 } 898 899 /*ARGSUSED*/ 900 int 901 px_lib_msiq_gethead(dev_info_t *dip, msiqid_t msiq_id, 902 msiqhead_t *msiq_head) 903 { 904 uint64_t ret; 905 906 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gethead: dip 0x%p msiq_id 0x%x\n", 907 dip, msiq_id); 908 909 if ((ret = hvio_msiq_gethead(DIP_TO_HANDLE(dip), 910 msiq_id, msiq_head)) != H_EOK) { 911 DBG(DBG_LIB_MSIQ, dip, 912 "hvio_msiq_gethead failed, ret 0x%lx\n", ret); 913 return (DDI_FAILURE); 914 } 915 916 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gethead: msiq_head 0x%x\n", 917 *msiq_head); 918 919 return (DDI_SUCCESS); 920 } 921 922 /*ARGSUSED*/ 923 int 924 px_lib_msiq_sethead(dev_info_t *dip, msiqid_t msiq_id, 925 msiqhead_t msiq_head) 926 { 927 uint64_t ret; 928 929 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_sethead: dip 0x%p msiq_id 0x%x " 930 "msiq_head 0x%x\n", dip, msiq_id, msiq_head); 931 932 if ((ret = hvio_msiq_sethead(DIP_TO_HANDLE(dip), 933 msiq_id, msiq_head)) != H_EOK) { 934 DBG(DBG_LIB_MSIQ, dip, 935 "hvio_msiq_sethead failed, ret 0x%lx\n", ret); 936 return (DDI_FAILURE); 937 } 938 939 return (DDI_SUCCESS); 940 } 941 942 /*ARGSUSED*/ 943 int 944 px_lib_msiq_gettail(dev_info_t *dip, msiqid_t msiq_id, 945 msiqtail_t *msiq_tail) 946 { 947 uint64_t ret; 948 949 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gettail: dip 0x%p msiq_id 0x%x\n", 950 dip, msiq_id); 951 952 if ((ret = hvio_msiq_gettail(DIP_TO_HANDLE(dip), 953 msiq_id, msiq_tail)) != H_EOK) { 954 DBG(DBG_LIB_MSIQ, dip, 955 "hvio_msiq_gettail failed, ret 0x%lx\n", ret); 956 return (DDI_FAILURE); 957 } 958 959 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gettail: msiq_tail 0x%x\n", 960 *msiq_tail); 961 962 return (DDI_SUCCESS); 963 } 964 965 /*ARGSUSED*/ 966 void 967 px_lib_get_msiq_rec(dev_info_t *dip, px_msiq_t *msiq_p, msiq_rec_t *msiq_rec_p) 968 { 969 eq_rec_t *eq_rec_p = (eq_rec_t *)msiq_p->msiq_curr; 970 971 DBG(DBG_LIB_MSIQ, dip, "px_lib_get_msiq_rec: dip 0x%p eq_rec_p 0x%p\n", 972 dip, eq_rec_p); 973 974 if (!eq_rec_p->eq_rec_fmt_type) { 975 /* Set msiq_rec_type to zero */ 976 msiq_rec_p->msiq_rec_type = 0; 977 978 return; 979 } 980 981 DBG(DBG_LIB_MSIQ, dip, "px_lib_get_msiq_rec: EQ RECORD, " 982 "eq_rec_rid 0x%llx eq_rec_fmt_type 0x%llx " 983 "eq_rec_len 0x%llx eq_rec_addr0 0x%llx " 984 "eq_rec_addr1 0x%llx eq_rec_data0 0x%llx " 985 "eq_rec_data1 0x%llx\n", eq_rec_p->eq_rec_rid, 986 eq_rec_p->eq_rec_fmt_type, eq_rec_p->eq_rec_len, 987 eq_rec_p->eq_rec_addr0, eq_rec_p->eq_rec_addr1, 988 eq_rec_p->eq_rec_data0, eq_rec_p->eq_rec_data1); 989 990 /* 991 * Only upper 4 bits of eq_rec_fmt_type is used 992 * to identify the EQ record type. 993 */ 994 switch (eq_rec_p->eq_rec_fmt_type >> 3) { 995 case EQ_REC_MSI32: 996 msiq_rec_p->msiq_rec_type = MSI32_REC; 997 998 msiq_rec_p->msiq_rec_data.msi.msi_data = 999 eq_rec_p->eq_rec_data0; 1000 break; 1001 case EQ_REC_MSI64: 1002 msiq_rec_p->msiq_rec_type = MSI64_REC; 1003 1004 msiq_rec_p->msiq_rec_data.msi.msi_data = 1005 eq_rec_p->eq_rec_data0; 1006 break; 1007 case EQ_REC_MSG: 1008 msiq_rec_p->msiq_rec_type = MSG_REC; 1009 1010 msiq_rec_p->msiq_rec_data.msg.msg_route = 1011 eq_rec_p->eq_rec_fmt_type & 7; 1012 msiq_rec_p->msiq_rec_data.msg.msg_targ = eq_rec_p->eq_rec_rid; 1013 msiq_rec_p->msiq_rec_data.msg.msg_code = eq_rec_p->eq_rec_data0; 1014 break; 1015 default: 1016 cmn_err(CE_WARN, "%s%d: px_lib_get_msiq_rec: " 1017 "0x%x is an unknown EQ record type", 1018 ddi_driver_name(dip), ddi_get_instance(dip), 1019 (int)eq_rec_p->eq_rec_fmt_type); 1020 break; 1021 } 1022 1023 msiq_rec_p->msiq_rec_rid = eq_rec_p->eq_rec_rid; 1024 msiq_rec_p->msiq_rec_msi_addr = ((eq_rec_p->eq_rec_addr1 << 16) | 1025 (eq_rec_p->eq_rec_addr0 << 2)); 1026 1027 /* Zero out eq_rec_fmt_type field */ 1028 eq_rec_p->eq_rec_fmt_type = 0; 1029 } 1030 1031 /* 1032 * MSI Functions: 1033 */ 1034 /*ARGSUSED*/ 1035 int 1036 px_lib_msi_init(dev_info_t *dip) 1037 { 1038 px_t *px_p = DIP_TO_STATE(dip); 1039 px_msi_state_t *msi_state_p = &px_p->px_ib_p->ib_msi_state; 1040 uint64_t ret; 1041 1042 DBG(DBG_LIB_MSI, dip, "px_lib_msi_init: dip 0x%p\n", dip); 1043 1044 if ((ret = hvio_msi_init(DIP_TO_HANDLE(dip), 1045 msi_state_p->msi_addr32, msi_state_p->msi_addr64)) != H_EOK) { 1046 DBG(DBG_LIB_MSIQ, dip, "px_lib_msi_init failed, ret 0x%lx\n", 1047 ret); 1048 return (DDI_FAILURE); 1049 } 1050 1051 return (DDI_SUCCESS); 1052 } 1053 1054 /*ARGSUSED*/ 1055 int 1056 px_lib_msi_getmsiq(dev_info_t *dip, msinum_t msi_num, 1057 msiqid_t *msiq_id) 1058 { 1059 uint64_t ret; 1060 1061 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getmsiq: dip 0x%p msi_num 0x%x\n", 1062 dip, msi_num); 1063 1064 if ((ret = hvio_msi_getmsiq(DIP_TO_HANDLE(dip), 1065 msi_num, msiq_id)) != H_EOK) { 1066 DBG(DBG_LIB_MSI, dip, 1067 "hvio_msi_getmsiq failed, ret 0x%lx\n", ret); 1068 return (DDI_FAILURE); 1069 } 1070 1071 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getmsiq: msiq_id 0x%x\n", 1072 *msiq_id); 1073 1074 return (DDI_SUCCESS); 1075 } 1076 1077 /*ARGSUSED*/ 1078 int 1079 px_lib_msi_setmsiq(dev_info_t *dip, msinum_t msi_num, 1080 msiqid_t msiq_id, msi_type_t msitype) 1081 { 1082 uint64_t ret; 1083 1084 DBG(DBG_LIB_MSI, dip, "px_lib_msi_setmsiq: dip 0x%p msi_num 0x%x " 1085 "msq_id 0x%x\n", dip, msi_num, msiq_id); 1086 1087 if ((ret = hvio_msi_setmsiq(DIP_TO_HANDLE(dip), 1088 msi_num, msiq_id)) != H_EOK) { 1089 DBG(DBG_LIB_MSI, dip, 1090 "hvio_msi_setmsiq failed, ret 0x%lx\n", ret); 1091 return (DDI_FAILURE); 1092 } 1093 1094 return (DDI_SUCCESS); 1095 } 1096 1097 /*ARGSUSED*/ 1098 int 1099 px_lib_msi_getvalid(dev_info_t *dip, msinum_t msi_num, 1100 pci_msi_valid_state_t *msi_valid_state) 1101 { 1102 uint64_t ret; 1103 1104 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getvalid: dip 0x%p msi_num 0x%x\n", 1105 dip, msi_num); 1106 1107 if ((ret = hvio_msi_getvalid(DIP_TO_HANDLE(dip), 1108 msi_num, msi_valid_state)) != H_EOK) { 1109 DBG(DBG_LIB_MSI, dip, 1110 "hvio_msi_getvalid failed, ret 0x%lx\n", ret); 1111 return (DDI_FAILURE); 1112 } 1113 1114 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getvalid: msiq_id 0x%x\n", 1115 *msi_valid_state); 1116 1117 return (DDI_SUCCESS); 1118 } 1119 1120 /*ARGSUSED*/ 1121 int 1122 px_lib_msi_setvalid(dev_info_t *dip, msinum_t msi_num, 1123 pci_msi_valid_state_t msi_valid_state) 1124 { 1125 uint64_t ret; 1126 1127 DBG(DBG_LIB_MSI, dip, "px_lib_msi_setvalid: dip 0x%p msi_num 0x%x " 1128 "msi_valid_state 0x%x\n", dip, msi_num, msi_valid_state); 1129 1130 if ((ret = hvio_msi_setvalid(DIP_TO_HANDLE(dip), 1131 msi_num, msi_valid_state)) != H_EOK) { 1132 DBG(DBG_LIB_MSI, dip, 1133 "hvio_msi_setvalid failed, ret 0x%lx\n", ret); 1134 return (DDI_FAILURE); 1135 } 1136 1137 return (DDI_SUCCESS); 1138 } 1139 1140 /*ARGSUSED*/ 1141 int 1142 px_lib_msi_getstate(dev_info_t *dip, msinum_t msi_num, 1143 pci_msi_state_t *msi_state) 1144 { 1145 uint64_t ret; 1146 1147 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getstate: dip 0x%p msi_num 0x%x\n", 1148 dip, msi_num); 1149 1150 if ((ret = hvio_msi_getstate(DIP_TO_HANDLE(dip), 1151 msi_num, msi_state)) != H_EOK) { 1152 DBG(DBG_LIB_MSI, dip, 1153 "hvio_msi_getstate failed, ret 0x%lx\n", ret); 1154 return (DDI_FAILURE); 1155 } 1156 1157 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getstate: msi_state 0x%x\n", 1158 *msi_state); 1159 1160 return (DDI_SUCCESS); 1161 } 1162 1163 /*ARGSUSED*/ 1164 int 1165 px_lib_msi_setstate(dev_info_t *dip, msinum_t msi_num, 1166 pci_msi_state_t msi_state) 1167 { 1168 uint64_t ret; 1169 1170 DBG(DBG_LIB_MSI, dip, "px_lib_msi_setstate: dip 0x%p msi_num 0x%x " 1171 "msi_state 0x%x\n", dip, msi_num, msi_state); 1172 1173 if ((ret = hvio_msi_setstate(DIP_TO_HANDLE(dip), 1174 msi_num, msi_state)) != H_EOK) { 1175 DBG(DBG_LIB_MSI, dip, 1176 "hvio_msi_setstate failed, ret 0x%lx\n", ret); 1177 return (DDI_FAILURE); 1178 } 1179 1180 return (DDI_SUCCESS); 1181 } 1182 1183 /* 1184 * MSG Functions: 1185 */ 1186 /*ARGSUSED*/ 1187 int 1188 px_lib_msg_getmsiq(dev_info_t *dip, pcie_msg_type_t msg_type, 1189 msiqid_t *msiq_id) 1190 { 1191 uint64_t ret; 1192 1193 DBG(DBG_LIB_MSG, dip, "px_lib_msg_getmsiq: dip 0x%p msg_type 0x%x\n", 1194 dip, msg_type); 1195 1196 if ((ret = hvio_msg_getmsiq(DIP_TO_HANDLE(dip), 1197 msg_type, msiq_id)) != H_EOK) { 1198 DBG(DBG_LIB_MSG, dip, 1199 "hvio_msg_getmsiq failed, ret 0x%lx\n", ret); 1200 return (DDI_FAILURE); 1201 } 1202 1203 DBG(DBG_LIB_MSI, dip, "px_lib_msg_getmsiq: msiq_id 0x%x\n", 1204 *msiq_id); 1205 1206 return (DDI_SUCCESS); 1207 } 1208 1209 /*ARGSUSED*/ 1210 int 1211 px_lib_msg_setmsiq(dev_info_t *dip, pcie_msg_type_t msg_type, 1212 msiqid_t msiq_id) 1213 { 1214 uint64_t ret; 1215 1216 DBG(DBG_LIB_MSG, dip, "px_lib_msi_setstate: dip 0x%p msg_type 0x%x " 1217 "msiq_id 0x%x\n", dip, msg_type, msiq_id); 1218 1219 if ((ret = hvio_msg_setmsiq(DIP_TO_HANDLE(dip), 1220 msg_type, msiq_id)) != H_EOK) { 1221 DBG(DBG_LIB_MSG, dip, 1222 "hvio_msg_setmsiq failed, ret 0x%lx\n", ret); 1223 return (DDI_FAILURE); 1224 } 1225 1226 return (DDI_SUCCESS); 1227 } 1228 1229 /*ARGSUSED*/ 1230 int 1231 px_lib_msg_getvalid(dev_info_t *dip, pcie_msg_type_t msg_type, 1232 pcie_msg_valid_state_t *msg_valid_state) 1233 { 1234 uint64_t ret; 1235 1236 DBG(DBG_LIB_MSG, dip, "px_lib_msg_getvalid: dip 0x%p msg_type 0x%x\n", 1237 dip, msg_type); 1238 1239 if ((ret = hvio_msg_getvalid(DIP_TO_HANDLE(dip), msg_type, 1240 msg_valid_state)) != H_EOK) { 1241 DBG(DBG_LIB_MSG, dip, 1242 "hvio_msg_getvalid failed, ret 0x%lx\n", ret); 1243 return (DDI_FAILURE); 1244 } 1245 1246 DBG(DBG_LIB_MSI, dip, "px_lib_msg_getvalid: msg_valid_state 0x%x\n", 1247 *msg_valid_state); 1248 1249 return (DDI_SUCCESS); 1250 } 1251 1252 /*ARGSUSED*/ 1253 int 1254 px_lib_msg_setvalid(dev_info_t *dip, pcie_msg_type_t msg_type, 1255 pcie_msg_valid_state_t msg_valid_state) 1256 { 1257 uint64_t ret; 1258 1259 DBG(DBG_LIB_MSG, dip, "px_lib_msg_setvalid: dip 0x%p msg_type 0x%x " 1260 "msg_valid_state 0x%x\n", dip, msg_type, msg_valid_state); 1261 1262 if ((ret = hvio_msg_setvalid(DIP_TO_HANDLE(dip), msg_type, 1263 msg_valid_state)) != H_EOK) { 1264 DBG(DBG_LIB_MSG, dip, 1265 "hvio_msg_setvalid failed, ret 0x%lx\n", ret); 1266 return (DDI_FAILURE); 1267 } 1268 1269 return (DDI_SUCCESS); 1270 } 1271 1272 /* 1273 * Suspend/Resume Functions: 1274 * Currently unsupported by hypervisor 1275 */ 1276 int 1277 px_lib_suspend(dev_info_t *dip) 1278 { 1279 px_t *px_p = DIP_TO_STATE(dip); 1280 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 1281 px_cb_t *cb_p = PX2CB(px_p); 1282 devhandle_t dev_hdl, xbus_dev_hdl; 1283 uint64_t ret = H_EOK; 1284 1285 DBG(DBG_DETACH, dip, "px_lib_suspend: dip 0x%p\n", dip); 1286 1287 dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_CSR]; 1288 xbus_dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_XBC]; 1289 1290 if ((ret = hvio_suspend(dev_hdl, pxu_p)) != H_EOK) 1291 goto fail; 1292 1293 if (--cb_p->attachcnt == 0) { 1294 ret = hvio_cb_suspend(xbus_dev_hdl, pxu_p); 1295 if (ret != H_EOK) 1296 cb_p->attachcnt++; 1297 } 1298 1299 fail: 1300 return ((ret != H_EOK) ? DDI_FAILURE: DDI_SUCCESS); 1301 } 1302 1303 void 1304 px_lib_resume(dev_info_t *dip) 1305 { 1306 px_t *px_p = DIP_TO_STATE(dip); 1307 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 1308 px_cb_t *cb_p = PX2CB(px_p); 1309 devhandle_t dev_hdl, xbus_dev_hdl; 1310 devino_t pec_ino = px_p->px_inos[PX_INTR_PEC]; 1311 devino_t xbc_ino = px_p->px_inos[PX_INTR_XBC]; 1312 1313 DBG(DBG_ATTACH, dip, "px_lib_resume: dip 0x%p\n", dip); 1314 1315 dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_CSR]; 1316 xbus_dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_XBC]; 1317 1318 if (++cb_p->attachcnt == 1) 1319 hvio_cb_resume(dev_hdl, xbus_dev_hdl, xbc_ino, pxu_p); 1320 1321 hvio_resume(dev_hdl, pec_ino, pxu_p); 1322 } 1323 1324 /* 1325 * Generate a unique Oberon UBC ID based on the Logicial System Board and 1326 * the IO Channel from the portid property field. 1327 */ 1328 static uint64_t 1329 oberon_get_ubc_id(dev_info_t *dip) 1330 { 1331 px_t *px_p = DIP_TO_STATE(dip); 1332 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 1333 uint64_t ubc_id; 1334 1335 /* 1336 * Generate a unique 6 bit UBC ID using the 2 IO_Channel#[1:0] bits and 1337 * the 4 LSB_ID[3:0] bits from the Oberon's portid property. 1338 */ 1339 ubc_id = (((pxu_p->portid >> OBERON_PORT_ID_IOC) & 1340 OBERON_PORT_ID_IOC_MASK) | (((pxu_p->portid >> 1341 OBERON_PORT_ID_LSB) & OBERON_PORT_ID_LSB_MASK) 1342 << OBERON_UBC_ID_LSB)); 1343 1344 return (ubc_id); 1345 } 1346 1347 /* 1348 * Oberon does not have a UBC scratch register, so alloc an array of scratch 1349 * registers when needed and use a unique UBC ID as an index. This code 1350 * can be simplified if we use a pre-allocated array. They are currently 1351 * being dynamically allocated because it's only needed by the Oberon. 1352 */ 1353 static void 1354 oberon_set_cb(dev_info_t *dip, uint64_t val) 1355 { 1356 uint64_t ubc_id; 1357 1358 if (px_oberon_ubc_scratch_regs == NULL) 1359 px_oberon_ubc_scratch_regs = 1360 (uint64_t *)kmem_zalloc(sizeof (uint64_t)* 1361 OBERON_UBC_ID_MAX, KM_SLEEP); 1362 1363 ubc_id = oberon_get_ubc_id(dip); 1364 1365 px_oberon_ubc_scratch_regs[ubc_id] = val; 1366 1367 /* 1368 * Check if any scratch registers are still in use. If all scratch 1369 * registers are currently set to zero, then deallocate the scratch 1370 * register array. 1371 */ 1372 for (ubc_id = 0; ubc_id < OBERON_UBC_ID_MAX; ubc_id++) { 1373 if (px_oberon_ubc_scratch_regs[ubc_id] != NULL) 1374 return; 1375 } 1376 1377 /* 1378 * All scratch registers are set to zero so deallocate the scratch 1379 * register array and set the pointer to NULL. 1380 */ 1381 kmem_free(px_oberon_ubc_scratch_regs, 1382 (sizeof (uint64_t)*OBERON_UBC_ID_MAX)); 1383 1384 px_oberon_ubc_scratch_regs = NULL; 1385 } 1386 1387 /* 1388 * Oberon does not have a UBC scratch register, so use an allocated array of 1389 * scratch registers and use the unique UBC ID as an index into that array. 1390 */ 1391 static uint64_t 1392 oberon_get_cb(dev_info_t *dip) 1393 { 1394 uint64_t ubc_id; 1395 1396 if (px_oberon_ubc_scratch_regs == NULL) 1397 return (0); 1398 1399 ubc_id = oberon_get_ubc_id(dip); 1400 1401 return (px_oberon_ubc_scratch_regs[ubc_id]); 1402 } 1403 1404 /* 1405 * Misc Functions: 1406 * Currently unsupported by hypervisor 1407 */ 1408 static uint64_t 1409 px_get_cb(dev_info_t *dip) 1410 { 1411 px_t *px_p = DIP_TO_STATE(dip); 1412 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 1413 1414 /* 1415 * Oberon does not currently have Scratchpad registers. 1416 */ 1417 if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) 1418 return (oberon_get_cb(dip)); 1419 1420 return (CSR_XR((caddr_t)pxu_p->px_address[PX_REG_XBC], JBUS_SCRATCH_1)); 1421 } 1422 1423 static void 1424 px_set_cb(dev_info_t *dip, uint64_t val) 1425 { 1426 px_t *px_p = DIP_TO_STATE(dip); 1427 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 1428 1429 /* 1430 * Oberon does not currently have Scratchpad registers. 1431 */ 1432 if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) { 1433 oberon_set_cb(dip, val); 1434 return; 1435 } 1436 1437 CSR_XS((caddr_t)pxu_p->px_address[PX_REG_XBC], JBUS_SCRATCH_1, val); 1438 } 1439 1440 /*ARGSUSED*/ 1441 int 1442 px_lib_map_vconfig(dev_info_t *dip, 1443 ddi_map_req_t *mp, pci_config_offset_t off, 1444 pci_regspec_t *rp, caddr_t *addrp) 1445 { 1446 /* 1447 * No special config space access services in this layer. 1448 */ 1449 return (DDI_FAILURE); 1450 } 1451 1452 void 1453 px_lib_map_attr_check(ddi_map_req_t *mp) 1454 { 1455 ddi_acc_hdl_t *hp = mp->map_handlep; 1456 1457 /* fire does not accept byte masks from PIO store merge */ 1458 if (hp->ah_acc.devacc_attr_dataorder == DDI_STORECACHING_OK_ACC) 1459 hp->ah_acc.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 1460 } 1461 1462 void 1463 px_lib_clr_errs(px_t *px_p) 1464 { 1465 px_pec_t *pec_p = px_p->px_pec_p; 1466 dev_info_t *rpdip = px_p->px_dip; 1467 int err = PX_OK, ret; 1468 int acctype = pec_p->pec_safeacc_type; 1469 ddi_fm_error_t derr; 1470 1471 /* Create the derr */ 1472 bzero(&derr, sizeof (ddi_fm_error_t)); 1473 derr.fme_version = DDI_FME_VERSION; 1474 derr.fme_ena = fm_ena_generate(0, FM_ENA_FMT1); 1475 derr.fme_flag = acctype; 1476 1477 if (acctype == DDI_FM_ERR_EXPECTED) { 1478 derr.fme_status = DDI_FM_NONFATAL; 1479 ndi_fm_acc_err_set(pec_p->pec_acc_hdl, &derr); 1480 } 1481 1482 mutex_enter(&px_p->px_fm_mutex); 1483 1484 /* send ereport/handle/clear fire registers */ 1485 err = px_err_handle(px_p, &derr, PX_LIB_CALL, B_TRUE); 1486 1487 /* Check all child devices for errors */ 1488 ret = ndi_fm_handler_dispatch(rpdip, NULL, &derr); 1489 1490 mutex_exit(&px_p->px_fm_mutex); 1491 1492 /* 1493 * PX_FATAL_HW indicates a condition recovered from Fatal-Reset, 1494 * therefore it does not cause panic. 1495 */ 1496 if ((err & (PX_FATAL_GOS | PX_FATAL_SW)) || (ret == DDI_FM_FATAL)) 1497 PX_FM_PANIC("Fatal System Port Error has occurred\n"); 1498 } 1499 1500 #ifdef DEBUG 1501 int px_peekfault_cnt = 0; 1502 int px_pokefault_cnt = 0; 1503 #endif /* DEBUG */ 1504 1505 /*ARGSUSED*/ 1506 static int 1507 px_lib_do_poke(dev_info_t *dip, dev_info_t *rdip, 1508 peekpoke_ctlops_t *in_args) 1509 { 1510 px_t *px_p = DIP_TO_STATE(dip); 1511 px_pec_t *pec_p = px_p->px_pec_p; 1512 int err = DDI_SUCCESS; 1513 on_trap_data_t otd; 1514 1515 mutex_enter(&pec_p->pec_pokefault_mutex); 1516 pec_p->pec_ontrap_data = &otd; 1517 pec_p->pec_safeacc_type = DDI_FM_ERR_POKE; 1518 1519 /* Set up protected environment. */ 1520 if (!on_trap(&otd, OT_DATA_ACCESS)) { 1521 uintptr_t tramp = otd.ot_trampoline; 1522 1523 otd.ot_trampoline = (uintptr_t)&poke_fault; 1524 err = do_poke(in_args->size, (void *)in_args->dev_addr, 1525 (void *)in_args->host_addr); 1526 otd.ot_trampoline = tramp; 1527 } else 1528 err = DDI_FAILURE; 1529 1530 px_lib_clr_errs(px_p); 1531 1532 if (otd.ot_trap & OT_DATA_ACCESS) 1533 err = DDI_FAILURE; 1534 1535 /* Take down protected environment. */ 1536 no_trap(); 1537 1538 pec_p->pec_ontrap_data = NULL; 1539 pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED; 1540 mutex_exit(&pec_p->pec_pokefault_mutex); 1541 1542 #ifdef DEBUG 1543 if (err == DDI_FAILURE) 1544 px_pokefault_cnt++; 1545 #endif 1546 return (err); 1547 } 1548 1549 /*ARGSUSED*/ 1550 static int 1551 px_lib_do_caut_put(dev_info_t *dip, dev_info_t *rdip, 1552 peekpoke_ctlops_t *cautacc_ctlops_arg) 1553 { 1554 size_t size = cautacc_ctlops_arg->size; 1555 uintptr_t dev_addr = cautacc_ctlops_arg->dev_addr; 1556 uintptr_t host_addr = cautacc_ctlops_arg->host_addr; 1557 ddi_acc_impl_t *hp = (ddi_acc_impl_t *)cautacc_ctlops_arg->handle; 1558 size_t repcount = cautacc_ctlops_arg->repcount; 1559 uint_t flags = cautacc_ctlops_arg->flags; 1560 1561 px_t *px_p = DIP_TO_STATE(dip); 1562 px_pec_t *pec_p = px_p->px_pec_p; 1563 int err = DDI_SUCCESS; 1564 1565 /* 1566 * Note that i_ndi_busop_access_enter ends up grabbing the pokefault 1567 * mutex. 1568 */ 1569 i_ndi_busop_access_enter(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp); 1570 1571 pec_p->pec_ontrap_data = (on_trap_data_t *)hp->ahi_err->err_ontrap; 1572 pec_p->pec_safeacc_type = DDI_FM_ERR_EXPECTED; 1573 hp->ahi_err->err_expected = DDI_FM_ERR_EXPECTED; 1574 1575 if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) { 1576 for (; repcount; repcount--) { 1577 switch (size) { 1578 1579 case sizeof (uint8_t): 1580 i_ddi_put8(hp, (uint8_t *)dev_addr, 1581 *(uint8_t *)host_addr); 1582 break; 1583 1584 case sizeof (uint16_t): 1585 i_ddi_put16(hp, (uint16_t *)dev_addr, 1586 *(uint16_t *)host_addr); 1587 break; 1588 1589 case sizeof (uint32_t): 1590 i_ddi_put32(hp, (uint32_t *)dev_addr, 1591 *(uint32_t *)host_addr); 1592 break; 1593 1594 case sizeof (uint64_t): 1595 i_ddi_put64(hp, (uint64_t *)dev_addr, 1596 *(uint64_t *)host_addr); 1597 break; 1598 } 1599 1600 host_addr += size; 1601 1602 if (flags == DDI_DEV_AUTOINCR) 1603 dev_addr += size; 1604 1605 px_lib_clr_errs(px_p); 1606 1607 if (pec_p->pec_ontrap_data->ot_trap & OT_DATA_ACCESS) { 1608 err = DDI_FAILURE; 1609 #ifdef DEBUG 1610 px_pokefault_cnt++; 1611 #endif 1612 break; 1613 } 1614 } 1615 } 1616 1617 i_ddi_notrap((ddi_acc_handle_t)hp); 1618 pec_p->pec_ontrap_data = NULL; 1619 pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED; 1620 i_ndi_busop_access_exit(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp); 1621 hp->ahi_err->err_expected = DDI_FM_ERR_UNEXPECTED; 1622 1623 return (err); 1624 } 1625 1626 1627 int 1628 px_lib_ctlops_poke(dev_info_t *dip, dev_info_t *rdip, 1629 peekpoke_ctlops_t *in_args) 1630 { 1631 return (in_args->handle ? px_lib_do_caut_put(dip, rdip, in_args) : 1632 px_lib_do_poke(dip, rdip, in_args)); 1633 } 1634 1635 1636 /*ARGSUSED*/ 1637 static int 1638 px_lib_do_peek(dev_info_t *dip, peekpoke_ctlops_t *in_args) 1639 { 1640 px_t *px_p = DIP_TO_STATE(dip); 1641 px_pec_t *pec_p = px_p->px_pec_p; 1642 int err = DDI_SUCCESS; 1643 on_trap_data_t otd; 1644 1645 mutex_enter(&pec_p->pec_pokefault_mutex); 1646 pec_p->pec_safeacc_type = DDI_FM_ERR_PEEK; 1647 1648 if (!on_trap(&otd, OT_DATA_ACCESS)) { 1649 uintptr_t tramp = otd.ot_trampoline; 1650 1651 otd.ot_trampoline = (uintptr_t)&peek_fault; 1652 err = do_peek(in_args->size, (void *)in_args->dev_addr, 1653 (void *)in_args->host_addr); 1654 otd.ot_trampoline = tramp; 1655 } else 1656 err = DDI_FAILURE; 1657 1658 no_trap(); 1659 pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED; 1660 mutex_exit(&pec_p->pec_pokefault_mutex); 1661 1662 #ifdef DEBUG 1663 if (err == DDI_FAILURE) 1664 px_peekfault_cnt++; 1665 #endif 1666 return (err); 1667 } 1668 1669 1670 static int 1671 px_lib_do_caut_get(dev_info_t *dip, peekpoke_ctlops_t *cautacc_ctlops_arg) 1672 { 1673 size_t size = cautacc_ctlops_arg->size; 1674 uintptr_t dev_addr = cautacc_ctlops_arg->dev_addr; 1675 uintptr_t host_addr = cautacc_ctlops_arg->host_addr; 1676 ddi_acc_impl_t *hp = (ddi_acc_impl_t *)cautacc_ctlops_arg->handle; 1677 size_t repcount = cautacc_ctlops_arg->repcount; 1678 uint_t flags = cautacc_ctlops_arg->flags; 1679 1680 px_t *px_p = DIP_TO_STATE(dip); 1681 px_pec_t *pec_p = px_p->px_pec_p; 1682 int err = DDI_SUCCESS; 1683 1684 /* 1685 * Note that i_ndi_busop_access_enter ends up grabbing the pokefault 1686 * mutex. 1687 */ 1688 i_ndi_busop_access_enter(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp); 1689 1690 pec_p->pec_ontrap_data = (on_trap_data_t *)hp->ahi_err->err_ontrap; 1691 pec_p->pec_safeacc_type = DDI_FM_ERR_EXPECTED; 1692 hp->ahi_err->err_expected = DDI_FM_ERR_EXPECTED; 1693 1694 if (repcount == 1) { 1695 if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) { 1696 i_ddi_caut_get(size, (void *)dev_addr, 1697 (void *)host_addr); 1698 } else { 1699 int i; 1700 uint8_t *ff_addr = (uint8_t *)host_addr; 1701 for (i = 0; i < size; i++) 1702 *ff_addr++ = 0xff; 1703 1704 err = DDI_FAILURE; 1705 #ifdef DEBUG 1706 px_peekfault_cnt++; 1707 #endif 1708 } 1709 } else { 1710 if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) { 1711 for (; repcount; repcount--) { 1712 i_ddi_caut_get(size, (void *)dev_addr, 1713 (void *)host_addr); 1714 1715 host_addr += size; 1716 1717 if (flags == DDI_DEV_AUTOINCR) 1718 dev_addr += size; 1719 } 1720 } else { 1721 err = DDI_FAILURE; 1722 #ifdef DEBUG 1723 px_peekfault_cnt++; 1724 #endif 1725 } 1726 } 1727 1728 i_ddi_notrap((ddi_acc_handle_t)hp); 1729 pec_p->pec_ontrap_data = NULL; 1730 pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED; 1731 i_ndi_busop_access_exit(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp); 1732 hp->ahi_err->err_expected = DDI_FM_ERR_UNEXPECTED; 1733 1734 return (err); 1735 } 1736 1737 /*ARGSUSED*/ 1738 int 1739 px_lib_ctlops_peek(dev_info_t *dip, dev_info_t *rdip, 1740 peekpoke_ctlops_t *in_args, void *result) 1741 { 1742 result = (void *)in_args->host_addr; 1743 return (in_args->handle ? px_lib_do_caut_get(dip, in_args) : 1744 px_lib_do_peek(dip, in_args)); 1745 } 1746 1747 /* 1748 * implements PPM interface 1749 */ 1750 int 1751 px_lib_pmctl(int cmd, px_t *px_p) 1752 { 1753 ASSERT((cmd & ~PPMREQ_MASK) == PPMREQ); 1754 switch (cmd) { 1755 case PPMREQ_PRE_PWR_OFF: 1756 /* 1757 * Currently there is no device power management for 1758 * the root complex (fire). When there is we need to make 1759 * sure that it is at full power before trying to send the 1760 * PME_Turn_Off message. 1761 */ 1762 DBG(DBG_PWR, px_p->px_dip, 1763 "ioctl: request to send PME_Turn_Off\n"); 1764 return (px_goto_l23ready(px_p)); 1765 1766 case PPMREQ_PRE_PWR_ON: 1767 DBG(DBG_PWR, px_p->px_dip, "ioctl: PRE_PWR_ON request\n"); 1768 return (px_pre_pwron_check(px_p)); 1769 1770 case PPMREQ_POST_PWR_ON: 1771 DBG(DBG_PWR, px_p->px_dip, "ioctl: POST_PWR_ON request\n"); 1772 return (px_goto_l0(px_p)); 1773 1774 default: 1775 return (DDI_FAILURE); 1776 } 1777 } 1778 1779 /* 1780 * sends PME_Turn_Off message to put the link in L2/L3 ready state. 1781 * called by px_ioctl. 1782 * returns DDI_SUCCESS or DDI_FAILURE 1783 * 1. Wait for link to be in L1 state (link status reg) 1784 * 2. write to PME_Turn_off reg to boradcast 1785 * 3. set timeout 1786 * 4. If timeout, return failure. 1787 * 5. If PM_TO_Ack, wait till link is in L2/L3 ready 1788 */ 1789 static int 1790 px_goto_l23ready(px_t *px_p) 1791 { 1792 pcie_pwr_t *pwr_p; 1793 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 1794 caddr_t csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR]; 1795 int ret = DDI_SUCCESS; 1796 clock_t end, timeleft; 1797 int mutex_held = 1; 1798 1799 /* If no PM info, return failure */ 1800 if (!PCIE_PMINFO(px_p->px_dip) || 1801 !(pwr_p = PCIE_NEXUS_PMINFO(px_p->px_dip))) 1802 return (DDI_FAILURE); 1803 1804 mutex_enter(&pwr_p->pwr_lock); 1805 mutex_enter(&px_p->px_l23ready_lock); 1806 /* Clear the PME_To_ACK receieved flag */ 1807 px_p->px_pm_flags &= ~PX_PMETOACK_RECVD; 1808 /* 1809 * When P25 is the downstream device, after receiving 1810 * PME_To_ACK, fire will go to Detect state, which causes 1811 * the link down event. Inform FMA that this is expected. 1812 * In case of all other cards complaint with the pci express 1813 * spec, this will happen when the power is re-applied. FMA 1814 * code will clear this flag after one instance of LDN. Since 1815 * there will not be a LDN event for the spec compliant cards, 1816 * we need to clear the flag after receiving PME_To_ACK. 1817 */ 1818 px_p->px_pm_flags |= PX_LDN_EXPECTED; 1819 if (px_send_pme_turnoff(csr_base) != DDI_SUCCESS) { 1820 ret = DDI_FAILURE; 1821 goto l23ready_done; 1822 } 1823 px_p->px_pm_flags |= PX_PME_TURNOFF_PENDING; 1824 1825 end = ddi_get_lbolt() + drv_usectohz(px_pme_to_ack_timeout); 1826 while (!(px_p->px_pm_flags & PX_PMETOACK_RECVD)) { 1827 timeleft = cv_timedwait(&px_p->px_l23ready_cv, 1828 &px_p->px_l23ready_lock, end); 1829 /* 1830 * if cv_timedwait returns -1, it is either 1831 * 1) timed out or 1832 * 2) there was a pre-mature wakeup but by the time 1833 * cv_timedwait is called again end < lbolt i.e. 1834 * end is in the past. 1835 * 3) By the time we make first cv_timedwait call, 1836 * end < lbolt is true. 1837 */ 1838 if (timeleft == -1) 1839 break; 1840 } 1841 if (!(px_p->px_pm_flags & PX_PMETOACK_RECVD)) { 1842 /* 1843 * Either timedout or interrupt didn't get a 1844 * chance to grab the mutex and set the flag. 1845 * release the mutex and delay for sometime. 1846 * This will 1) give a chance for interrupt to 1847 * set the flag 2) creates a delay between two 1848 * consequetive requests. 1849 */ 1850 mutex_exit(&px_p->px_l23ready_lock); 1851 delay(drv_usectohz(50 * PX_MSEC_TO_USEC)); 1852 mutex_held = 0; 1853 if (!(px_p->px_pm_flags & PX_PMETOACK_RECVD)) { 1854 ret = DDI_FAILURE; 1855 DBG(DBG_PWR, px_p->px_dip, " Timed out while waiting" 1856 " for PME_TO_ACK\n"); 1857 } 1858 } 1859 px_p->px_pm_flags &= 1860 ~(PX_PME_TURNOFF_PENDING | PX_PMETOACK_RECVD | PX_LDN_EXPECTED); 1861 1862 l23ready_done: 1863 if (mutex_held) 1864 mutex_exit(&px_p->px_l23ready_lock); 1865 /* 1866 * Wait till link is in L1 idle, if sending PME_Turn_Off 1867 * was succesful. 1868 */ 1869 if (ret == DDI_SUCCESS) { 1870 if (px_link_wait4l1idle(csr_base) != DDI_SUCCESS) { 1871 DBG(DBG_PWR, px_p->px_dip, " Link is not at L1" 1872 " even though we received PME_To_ACK.\n"); 1873 /* 1874 * Workaround for hardware bug with P25. 1875 * Due to a hardware bug with P25, link state 1876 * will be Detect state rather than L1 after 1877 * link is transitioned to L23Ready state. Since 1878 * we don't know whether link is L23ready state 1879 * without Fire's state being L1_idle, we delay 1880 * here just to make sure that we wait till link 1881 * is transitioned to L23Ready state. 1882 */ 1883 delay(drv_usectohz(100 * PX_MSEC_TO_USEC)); 1884 } 1885 pwr_p->pwr_link_lvl = PM_LEVEL_L3; 1886 1887 } 1888 mutex_exit(&pwr_p->pwr_lock); 1889 return (ret); 1890 } 1891 1892 /* 1893 * Message interrupt handler intended to be shared for both 1894 * PME and PME_TO_ACK msg handling, currently only handles 1895 * PME_To_ACK message. 1896 */ 1897 uint_t 1898 px_pmeq_intr(caddr_t arg) 1899 { 1900 px_t *px_p = (px_t *)arg; 1901 1902 DBG(DBG_PWR, px_p->px_dip, " PME_To_ACK received \n"); 1903 mutex_enter(&px_p->px_l23ready_lock); 1904 cv_broadcast(&px_p->px_l23ready_cv); 1905 if (px_p->px_pm_flags & PX_PME_TURNOFF_PENDING) { 1906 px_p->px_pm_flags |= PX_PMETOACK_RECVD; 1907 } else { 1908 /* 1909 * This maybe the second ack received. If so then, 1910 * we should be receiving it during wait4L1 stage. 1911 */ 1912 px_p->px_pmetoack_ignored++; 1913 } 1914 mutex_exit(&px_p->px_l23ready_lock); 1915 return (DDI_INTR_CLAIMED); 1916 } 1917 1918 static int 1919 px_pre_pwron_check(px_t *px_p) 1920 { 1921 pcie_pwr_t *pwr_p; 1922 1923 /* If no PM info, return failure */ 1924 if (!PCIE_PMINFO(px_p->px_dip) || 1925 !(pwr_p = PCIE_NEXUS_PMINFO(px_p->px_dip))) 1926 return (DDI_FAILURE); 1927 1928 /* 1929 * For the spec compliant downstream cards link down 1930 * is expected when the device is powered on. 1931 */ 1932 px_p->px_pm_flags |= PX_LDN_EXPECTED; 1933 return (pwr_p->pwr_link_lvl == PM_LEVEL_L3 ? DDI_SUCCESS : DDI_FAILURE); 1934 } 1935 1936 static int 1937 px_goto_l0(px_t *px_p) 1938 { 1939 pcie_pwr_t *pwr_p; 1940 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 1941 caddr_t csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR]; 1942 int ret = DDI_SUCCESS; 1943 uint64_t time_spent = 0; 1944 1945 /* If no PM info, return failure */ 1946 if (!PCIE_PMINFO(px_p->px_dip) || 1947 !(pwr_p = PCIE_NEXUS_PMINFO(px_p->px_dip))) 1948 return (DDI_FAILURE); 1949 1950 mutex_enter(&pwr_p->pwr_lock); 1951 /* 1952 * The following link retrain activity will cause LDN and LUP event. 1953 * Receiving LDN prior to receiving LUP is expected, not an error in 1954 * this case. Receiving LUP indicates link is fully up to support 1955 * powering up down stream device, and of course any further LDN and 1956 * LUP outside this context will be error. 1957 */ 1958 px_p->px_lup_pending = 1; 1959 if (px_link_retrain(csr_base) != DDI_SUCCESS) { 1960 ret = DDI_FAILURE; 1961 goto l0_done; 1962 } 1963 1964 /* LUP event takes the order of 15ms amount of time to occur */ 1965 for (; px_p->px_lup_pending && (time_spent < px_lup_poll_to); 1966 time_spent += px_lup_poll_interval) 1967 drv_usecwait(px_lup_poll_interval); 1968 if (px_p->px_lup_pending) 1969 ret = DDI_FAILURE; 1970 l0_done: 1971 px_enable_detect_quiet(csr_base); 1972 if (ret == DDI_SUCCESS) 1973 pwr_p->pwr_link_lvl = PM_LEVEL_L0; 1974 mutex_exit(&pwr_p->pwr_lock); 1975 return (ret); 1976 } 1977 1978 /* 1979 * Extract the drivers binding name to identify which chip we're binding to. 1980 * Whenever a new bus bridge is created, the driver alias entry should be 1981 * added here to identify the device if needed. If a device isn't added, 1982 * the identity defaults to PX_CHIP_UNIDENTIFIED. 1983 */ 1984 static uint32_t 1985 px_identity_chip(px_t *px_p) 1986 { 1987 dev_info_t *dip = px_p->px_dip; 1988 char *name = ddi_binding_name(dip); 1989 uint32_t revision = 0; 1990 1991 revision = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1992 "module-revision#", 0); 1993 1994 /* Check for Fire driver binding name */ 1995 if ((strcmp(name, "pci108e,80f0") == 0) || 1996 (strcmp(name, "pciex108e,80f0") == 0)) { 1997 DBG(DBG_ATTACH, dip, "px_identity_chip: %s%d: " 1998 "name %s module-revision %d\n", ddi_driver_name(dip), 1999 ddi_get_instance(dip), name, revision); 2000 2001 return (PX_CHIP_ID(PX_CHIP_FIRE, revision, 0x00)); 2002 } 2003 2004 /* Check for Oberon driver binding name */ 2005 if (strcmp(name, "pciex108e,80f8") == 0) { 2006 DBG(DBG_ATTACH, dip, "px_identity_chip: %s%d: " 2007 "name %s module-revision %d\n", ddi_driver_name(dip), 2008 ddi_get_instance(dip), name, revision); 2009 2010 return (PX_CHIP_ID(PX_CHIP_OBERON, revision, 0x00)); 2011 } 2012 2013 DBG(DBG_ATTACH, dip, "%s%d: Unknown PCI Express Host bridge %s %x\n", 2014 ddi_driver_name(dip), ddi_get_instance(dip), name, revision); 2015 2016 return (PX_CHIP_UNIDENTIFIED); 2017 } 2018 2019 int 2020 px_err_add_intr(px_fault_t *px_fault_p) 2021 { 2022 dev_info_t *dip = px_fault_p->px_fh_dip; 2023 px_t *px_p = DIP_TO_STATE(dip); 2024 2025 VERIFY(add_ivintr(px_fault_p->px_fh_sysino, PX_ERR_PIL, 2026 px_fault_p->px_err_func, (caddr_t)px_fault_p, NULL) == 0); 2027 2028 px_ib_intr_enable(px_p, intr_dist_cpuid(), px_fault_p->px_intr_ino); 2029 2030 return (DDI_SUCCESS); 2031 } 2032 2033 void 2034 px_err_rem_intr(px_fault_t *px_fault_p) 2035 { 2036 dev_info_t *dip = px_fault_p->px_fh_dip; 2037 px_t *px_p = DIP_TO_STATE(dip); 2038 2039 px_ib_intr_disable(px_p->px_ib_p, px_fault_p->px_intr_ino, 2040 IB_INTR_WAIT); 2041 2042 rem_ivintr(px_fault_p->px_fh_sysino, NULL); 2043 } 2044 2045 /* 2046 * px_cb_add_intr() - Called from attach(9E) to create CB if not yet 2047 * created, to add CB interrupt vector always, but enable only once. 2048 */ 2049 int 2050 px_cb_add_intr(px_fault_t *fault_p) 2051 { 2052 px_t *px_p = DIP_TO_STATE(fault_p->px_fh_dip); 2053 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 2054 px_cb_t *cb_p = (px_cb_t *)px_get_cb(fault_p->px_fh_dip); 2055 px_cb_list_t *pxl, *pxl_new; 2056 cpuid_t cpuid; 2057 2058 2059 if (cb_p == NULL) { 2060 cb_p = kmem_zalloc(sizeof (px_cb_t), KM_SLEEP); 2061 mutex_init(&cb_p->cb_mutex, NULL, MUTEX_DRIVER, NULL); 2062 cb_p->px_cb_func = px_cb_intr; 2063 pxu_p->px_cb_p = cb_p; 2064 px_set_cb(fault_p->px_fh_dip, (uint64_t)cb_p); 2065 } else 2066 pxu_p->px_cb_p = cb_p; 2067 2068 mutex_enter(&cb_p->cb_mutex); 2069 2070 VERIFY(add_ivintr(fault_p->px_fh_sysino, PX_ERR_PIL, 2071 cb_p->px_cb_func, (caddr_t)cb_p, NULL) == 0); 2072 2073 if (cb_p->pxl == NULL) { 2074 2075 cpuid = intr_dist_cpuid(), 2076 px_ib_intr_enable(px_p, cpuid, fault_p->px_intr_ino); 2077 2078 pxl = kmem_zalloc(sizeof (px_cb_list_t), KM_SLEEP); 2079 pxl->pxp = px_p; 2080 2081 cb_p->pxl = pxl; 2082 cb_p->sysino = fault_p->px_fh_sysino; 2083 cb_p->cpuid = cpuid; 2084 2085 } else { 2086 /* 2087 * Find the last pxl or 2088 * stop short at encoutering a redundent, or 2089 * both. 2090 */ 2091 pxl = cb_p->pxl; 2092 for (; !(pxl->pxp == px_p) && pxl->next; pxl = pxl->next); 2093 if (pxl->pxp == px_p) { 2094 cmn_err(CE_WARN, "px_cb_add_intr: reregister sysino " 2095 "%lx by px_p 0x%p\n", cb_p->sysino, (void *)px_p); 2096 return (DDI_FAILURE); 2097 } 2098 2099 /* add to linked list */ 2100 pxl_new = kmem_zalloc(sizeof (px_cb_list_t), KM_SLEEP); 2101 pxl_new->pxp = px_p; 2102 pxl->next = pxl_new; 2103 } 2104 cb_p->attachcnt++; 2105 2106 mutex_exit(&cb_p->cb_mutex); 2107 2108 return (DDI_SUCCESS); 2109 } 2110 2111 /* 2112 * px_cb_rem_intr() - Called from detach(9E) to remove its CB 2113 * interrupt vector, to shift proxy to the next available px, 2114 * or disable CB interrupt when itself is the last. 2115 */ 2116 void 2117 px_cb_rem_intr(px_fault_t *fault_p) 2118 { 2119 px_t *px_p = DIP_TO_STATE(fault_p->px_fh_dip), *pxp; 2120 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 2121 px_cb_t *cb_p = PX2CB(px_p); 2122 px_cb_list_t *pxl, *prev; 2123 px_fault_t *f_p; 2124 2125 ASSERT(cb_p->pxl); 2126 2127 /* De-list the target px, move the next px up */ 2128 2129 mutex_enter(&cb_p->cb_mutex); 2130 2131 pxl = cb_p->pxl; 2132 if (pxl->pxp == px_p) { 2133 cb_p->pxl = pxl->next; 2134 } else { 2135 prev = pxl; 2136 pxl = pxl->next; 2137 for (; pxl && (pxl->pxp != px_p); prev = pxl, pxl = pxl->next); 2138 if (!pxl) { 2139 cmn_err(CE_WARN, "px_cb_rem_intr: can't find px_p 0x%p " 2140 "in registered CB list.", (void *)px_p); 2141 return; 2142 } 2143 prev->next = pxl->next; 2144 } 2145 kmem_free(pxl, sizeof (px_cb_list_t)); 2146 2147 if (fault_p->px_fh_sysino == cb_p->sysino) { 2148 px_ib_intr_disable(px_p->px_ib_p, fault_p->px_intr_ino, 2149 IB_INTR_WAIT); 2150 2151 if (cb_p->pxl) { 2152 pxp = cb_p->pxl->pxp; 2153 f_p = &pxp->px_cb_fault; 2154 cb_p->sysino = f_p->px_fh_sysino; 2155 2156 PX_INTR_ENABLE(pxp->px_dip, cb_p->sysino, cb_p->cpuid); 2157 (void) px_lib_intr_setstate(pxp->px_dip, cb_p->sysino, 2158 INTR_IDLE_STATE); 2159 } 2160 } 2161 2162 rem_ivintr(fault_p->px_fh_sysino, NULL); 2163 pxu_p->px_cb_p = NULL; 2164 cb_p->attachcnt--; 2165 if (cb_p->pxl) { 2166 mutex_exit(&cb_p->cb_mutex); 2167 return; 2168 } 2169 mutex_exit(&cb_p->cb_mutex); 2170 2171 mutex_destroy(&cb_p->cb_mutex); 2172 px_set_cb(fault_p->px_fh_dip, 0ull); 2173 kmem_free(cb_p, sizeof (px_cb_t)); 2174 } 2175 2176 /* 2177 * px_cb_intr() - sun4u only, CB interrupt dispatcher 2178 */ 2179 uint_t 2180 px_cb_intr(caddr_t arg) 2181 { 2182 px_cb_t *cb_p = (px_cb_t *)arg; 2183 px_cb_list_t *pxl = cb_p->pxl; 2184 px_t *pxp = pxl ? pxl->pxp : NULL; 2185 px_fault_t *fault_p; 2186 2187 while (pxl && pxp && (pxp->px_state != PX_ATTACHED)) { 2188 pxl = pxl->next; 2189 pxp = (pxl) ? pxl->pxp : NULL; 2190 } 2191 2192 if (pxp) { 2193 fault_p = &pxp->px_cb_fault; 2194 return (fault_p->px_err_func((caddr_t)fault_p)); 2195 } else 2196 return (DDI_INTR_UNCLAIMED); 2197 } 2198 2199 /* 2200 * px_cb_intr_redist() - sun4u only, CB interrupt redistribution 2201 */ 2202 void 2203 px_cb_intr_redist(px_t *px_p) 2204 { 2205 px_fault_t *f_p = &px_p->px_cb_fault; 2206 px_cb_t *cb_p = PX2CB(px_p); 2207 devino_t ino = px_p->px_inos[PX_INTR_XBC]; 2208 cpuid_t cpuid; 2209 2210 mutex_enter(&cb_p->cb_mutex); 2211 2212 if (cb_p->sysino != f_p->px_fh_sysino) { 2213 mutex_exit(&cb_p->cb_mutex); 2214 return; 2215 } 2216 2217 cb_p->cpuid = cpuid = intr_dist_cpuid(); 2218 px_ib_intr_dist_en(px_p->px_dip, cpuid, ino, B_FALSE); 2219 2220 mutex_exit(&cb_p->cb_mutex); 2221 } 2222 2223 #ifdef FMA 2224 void 2225 px_fill_rc_status(px_fault_t *px_fault_p, pciex_rc_error_regs_t *rc_status) 2226 { 2227 /* populate the rc_status by reading the registers - TBD */ 2228 } 2229 #endif /* FMA */ 2230 2231 /* 2232 * Unprotected raw reads/writes of fabric device's config space. 2233 * Only used for temporary PCI-E Fabric Error Handling. 2234 */ 2235 uint32_t 2236 px_fab_get(px_t *px_p, pcie_req_id_t bdf, uint16_t offset) 2237 { 2238 px_ranges_t *rp = px_p->px_ranges_p; 2239 uint64_t range_prop, base_addr; 2240 int bank = PCI_REG_ADDR_G(PCI_ADDR_CONFIG); 2241 uint32_t val; 2242 2243 /* Get Fire's Physical Base Address */ 2244 range_prop = px_get_range_prop(px_p, rp, bank); 2245 2246 /* Get config space first. */ 2247 base_addr = range_prop + PX_BDF_TO_CFGADDR(bdf, offset); 2248 2249 val = ldphysio(base_addr); 2250 2251 return (LE_32(val)); 2252 } 2253 2254 void 2255 px_fab_set(px_t *px_p, pcie_req_id_t bdf, uint16_t offset, 2256 uint32_t val) { 2257 px_ranges_t *rp = px_p->px_ranges_p; 2258 uint64_t range_prop, base_addr; 2259 int bank = PCI_REG_ADDR_G(PCI_ADDR_CONFIG); 2260 2261 /* Get Fire's Physical Base Address */ 2262 range_prop = px_get_range_prop(px_p, rp, bank); 2263 2264 /* Get config space first. */ 2265 base_addr = range_prop + PX_BDF_TO_CFGADDR(bdf, offset); 2266 2267 stphysio(base_addr, LE_32(val)); 2268 } 2269 2270 /* 2271 * cpr callback 2272 * 2273 * disable fabric error msg interrupt prior to suspending 2274 * all device drivers; re-enable fabric error msg interrupt 2275 * after all devices are resumed. 2276 */ 2277 static boolean_t 2278 px_cpr_callb(void *arg, int code) 2279 { 2280 px_t *px_p = (px_t *)arg; 2281 px_ib_t *ib_p = px_p->px_ib_p; 2282 px_pec_t *pec_p = px_p->px_pec_p; 2283 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 2284 caddr_t csr_base; 2285 devino_t ce_ino, nf_ino, f_ino; 2286 px_ib_ino_info_t *ce_ino_p, *nf_ino_p, *f_ino_p; 2287 uint64_t imu_log_enable, imu_intr_enable; 2288 uint64_t imu_log_mask, imu_intr_mask; 2289 2290 ce_ino = px_msiqid_to_devino(px_p, pec_p->pec_corr_msg_msiq_id); 2291 nf_ino = px_msiqid_to_devino(px_p, pec_p->pec_non_fatal_msg_msiq_id); 2292 f_ino = px_msiqid_to_devino(px_p, pec_p->pec_fatal_msg_msiq_id); 2293 csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR]; 2294 2295 imu_log_enable = CSR_XR(csr_base, IMU_ERROR_LOG_ENABLE); 2296 imu_intr_enable = CSR_XR(csr_base, IMU_INTERRUPT_ENABLE); 2297 2298 imu_log_mask = BITMASK(IMU_ERROR_LOG_ENABLE_FATAL_MES_NOT_EN_LOG_EN) | 2299 BITMASK(IMU_ERROR_LOG_ENABLE_NONFATAL_MES_NOT_EN_LOG_EN) | 2300 BITMASK(IMU_ERROR_LOG_ENABLE_COR_MES_NOT_EN_LOG_EN); 2301 2302 imu_intr_mask = 2303 BITMASK(IMU_INTERRUPT_ENABLE_FATAL_MES_NOT_EN_S_INT_EN) | 2304 BITMASK(IMU_INTERRUPT_ENABLE_NONFATAL_MES_NOT_EN_S_INT_EN) | 2305 BITMASK(IMU_INTERRUPT_ENABLE_COR_MES_NOT_EN_S_INT_EN) | 2306 BITMASK(IMU_INTERRUPT_ENABLE_FATAL_MES_NOT_EN_P_INT_EN) | 2307 BITMASK(IMU_INTERRUPT_ENABLE_NONFATAL_MES_NOT_EN_P_INT_EN) | 2308 BITMASK(IMU_INTERRUPT_ENABLE_COR_MES_NOT_EN_P_INT_EN); 2309 2310 switch (code) { 2311 case CB_CODE_CPR_CHKPT: 2312 /* disable imu rbne on corr/nonfatal/fatal errors */ 2313 CSR_XS(csr_base, IMU_ERROR_LOG_ENABLE, 2314 imu_log_enable & (~imu_log_mask)); 2315 2316 CSR_XS(csr_base, IMU_INTERRUPT_ENABLE, 2317 imu_intr_enable & (~imu_intr_mask)); 2318 2319 /* disable CORR intr mapping */ 2320 px_ib_intr_disable(ib_p, ce_ino, IB_INTR_NOWAIT); 2321 2322 /* disable NON FATAL intr mapping */ 2323 px_ib_intr_disable(ib_p, nf_ino, IB_INTR_NOWAIT); 2324 2325 /* disable FATAL intr mapping */ 2326 px_ib_intr_disable(ib_p, f_ino, IB_INTR_NOWAIT); 2327 2328 break; 2329 2330 case CB_CODE_CPR_RESUME: 2331 mutex_enter(&ib_p->ib_ino_lst_mutex); 2332 2333 ce_ino_p = px_ib_locate_ino(ib_p, ce_ino); 2334 nf_ino_p = px_ib_locate_ino(ib_p, nf_ino); 2335 f_ino_p = px_ib_locate_ino(ib_p, f_ino); 2336 2337 /* enable CORR intr mapping */ 2338 if (ce_ino_p) 2339 px_ib_intr_enable(px_p, ce_ino_p->ino_cpuid, ce_ino); 2340 else 2341 cmn_err(CE_WARN, "px_cpr_callb: RESUME unable to " 2342 "reenable PCIe Correctable msg intr.\n"); 2343 2344 /* enable NON FATAL intr mapping */ 2345 if (nf_ino_p) 2346 px_ib_intr_enable(px_p, nf_ino_p->ino_cpuid, nf_ino); 2347 else 2348 cmn_err(CE_WARN, "px_cpr_callb: RESUME unable to " 2349 "reenable PCIe Non Fatal msg intr.\n"); 2350 2351 /* enable FATAL intr mapping */ 2352 if (f_ino_p) 2353 px_ib_intr_enable(px_p, f_ino_p->ino_cpuid, f_ino); 2354 else 2355 cmn_err(CE_WARN, "px_cpr_callb: RESUME unable to " 2356 "reenable PCIe Fatal msg intr.\n"); 2357 2358 mutex_exit(&ib_p->ib_ino_lst_mutex); 2359 2360 /* enable corr/nonfatal/fatal not enable error */ 2361 CSR_XS(csr_base, IMU_ERROR_LOG_ENABLE, (imu_log_enable | 2362 (imu_log_mask & px_imu_log_mask))); 2363 CSR_XS(csr_base, IMU_INTERRUPT_ENABLE, (imu_intr_enable | 2364 (imu_intr_mask & px_imu_intr_mask))); 2365 2366 break; 2367 } 2368 2369 return (B_TRUE); 2370 } 2371 2372 uint64_t 2373 px_get_rng_parent_hi_mask(px_t *px_p) 2374 { 2375 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 2376 uint64_t mask; 2377 2378 switch (PX_CHIP_TYPE(pxu_p)) { 2379 case PX_CHIP_OBERON: 2380 mask = OBERON_RANGE_PROP_MASK; 2381 break; 2382 case PX_CHIP_FIRE: 2383 mask = PX_RANGE_PROP_MASK; 2384 break; 2385 default: 2386 mask = PX_RANGE_PROP_MASK; 2387 } 2388 2389 return (mask); 2390 } 2391 2392 /* 2393 * fetch chip's range propery's value 2394 */ 2395 uint64_t 2396 px_get_range_prop(px_t *px_p, px_ranges_t *rp, int bank) 2397 { 2398 uint64_t mask, range_prop; 2399 2400 mask = px_get_rng_parent_hi_mask(px_p); 2401 range_prop = (((uint64_t)(rp[bank].parent_high & mask)) << 32) | 2402 rp[bank].parent_low; 2403 2404 return (range_prop); 2405 } 2406 2407 /* 2408 * add cpr callback 2409 */ 2410 void 2411 px_cpr_add_callb(px_t *px_p) 2412 { 2413 px_p->px_cprcb_id = callb_add(px_cpr_callb, (void *)px_p, 2414 CB_CL_CPR_POST_USER, "px_cpr"); 2415 } 2416 2417 /* 2418 * remove cpr callback 2419 */ 2420 void 2421 px_cpr_rem_callb(px_t *px_p) 2422 { 2423 (void) callb_delete(px_p->px_cprcb_id); 2424 } 2425 2426 /*ARGSUSED*/ 2427 static uint_t 2428 px_hp_intr(caddr_t arg1, caddr_t arg2) 2429 { 2430 px_t *px_p = (px_t *)arg1; 2431 int rval; 2432 2433 rval = pciehpc_intr(px_p->px_dip); 2434 2435 #ifdef DEBUG 2436 if (rval == DDI_INTR_UNCLAIMED) 2437 cmn_err(CE_WARN, "%s%d: UNCLAIMED intr\n", 2438 ddi_driver_name(px_p->px_dip), 2439 ddi_get_instance(px_p->px_dip)); 2440 #endif 2441 2442 return (rval); 2443 } 2444 2445 int 2446 px_lib_hotplug_init(dev_info_t *dip, void *arg) 2447 { 2448 px_t *px_p = DIP_TO_STATE(dip); 2449 uint64_t ret; 2450 2451 if ((ret = hvio_hotplug_init(dip, arg)) == DDI_SUCCESS) { 2452 sysino_t sysino; 2453 2454 if (px_lib_intr_devino_to_sysino(px_p->px_dip, 2455 px_p->px_inos[PX_INTR_HOTPLUG], &sysino) != 2456 DDI_SUCCESS) { 2457 #ifdef DEBUG 2458 cmn_err(CE_WARN, "%s%d: devino_to_sysino fails\n", 2459 ddi_driver_name(px_p->px_dip), 2460 ddi_get_instance(px_p->px_dip)); 2461 #endif 2462 return (DDI_FAILURE); 2463 } 2464 2465 VERIFY(add_ivintr(sysino, PX_PCIEHP_PIL, 2466 (intrfunc)px_hp_intr, (caddr_t)px_p, NULL) == 0); 2467 } 2468 2469 return (ret); 2470 } 2471 2472 void 2473 px_lib_hotplug_uninit(dev_info_t *dip) 2474 { 2475 if (hvio_hotplug_uninit(dip) == DDI_SUCCESS) { 2476 px_t *px_p = DIP_TO_STATE(dip); 2477 sysino_t sysino; 2478 2479 if (px_lib_intr_devino_to_sysino(px_p->px_dip, 2480 px_p->px_inos[PX_INTR_HOTPLUG], &sysino) != 2481 DDI_SUCCESS) { 2482 #ifdef DEBUG 2483 cmn_err(CE_WARN, "%s%d: devino_to_sysino fails\n", 2484 ddi_driver_name(px_p->px_dip), 2485 ddi_get_instance(px_p->px_dip)); 2486 #endif 2487 return; 2488 } 2489 2490 rem_ivintr(sysino, NULL); 2491 } 2492 } 2493