1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/types.h> 29 #include <sys/kmem.h> 30 #include <sys/conf.h> 31 #include <sys/ddi.h> 32 #include <sys/sunddi.h> 33 #include <sys/fm/protocol.h> 34 #include <sys/fm/util.h> 35 #include <sys/modctl.h> 36 #include <sys/disp.h> 37 #include <sys/stat.h> 38 #include <sys/ddi_impldefs.h> 39 #include <sys/vmem.h> 40 #include <sys/iommutsb.h> 41 #include <sys/cpuvar.h> 42 #include <sys/ivintr.h> 43 #include <sys/byteorder.h> 44 #include <sys/hotplug/pci/pciehpc.h> 45 #include <px_obj.h> 46 #include <pcie_pwr.h> 47 #include "px_tools_var.h" 48 #include <px_regs.h> 49 #include <px_csr.h> 50 #include <sys/machsystm.h> 51 #include "px_lib4u.h" 52 #include "px_err.h" 53 #include "oberon_regs.h" 54 55 #pragma weak jbus_stst_order 56 57 extern void jbus_stst_order(); 58 59 ulong_t px_mmu_dvma_end = 0xfffffffful; 60 uint_t px_ranges_phi_mask = 0xfffffffful; 61 uint64_t *px_oberon_ubc_scratch_regs; 62 uint64_t px_paddr_mask; 63 64 static int px_goto_l23ready(px_t *px_p); 65 static int px_goto_l0(px_t *px_p); 66 static int px_pre_pwron_check(px_t *px_p); 67 static uint32_t px_identity_init(px_t *px_p); 68 static boolean_t px_cpr_callb(void *arg, int code); 69 static uint_t px_cb_intr(caddr_t arg); 70 71 /* 72 * px_lib_map_registers 73 * 74 * This function is called from the attach routine to map the registers 75 * accessed by this driver. 76 * 77 * used by: px_attach() 78 * 79 * return value: DDI_FAILURE on failure 80 */ 81 int 82 px_lib_map_regs(pxu_t *pxu_p, dev_info_t *dip) 83 { 84 ddi_device_acc_attr_t attr; 85 px_reg_bank_t reg_bank = PX_REG_CSR; 86 87 DBG(DBG_ATTACH, dip, "px_lib_map_regs: pxu_p:0x%p, dip 0x%p\n", 88 pxu_p, dip); 89 90 attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 91 attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 92 attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC; 93 94 /* 95 * PCI CSR Base 96 */ 97 if (ddi_regs_map_setup(dip, reg_bank, &pxu_p->px_address[reg_bank], 98 0, 0, &attr, &pxu_p->px_ac[reg_bank]) != DDI_SUCCESS) { 99 goto fail; 100 } 101 102 reg_bank++; 103 104 /* 105 * XBUS CSR Base 106 */ 107 if (ddi_regs_map_setup(dip, reg_bank, &pxu_p->px_address[reg_bank], 108 0, 0, &attr, &pxu_p->px_ac[reg_bank]) != DDI_SUCCESS) { 109 goto fail; 110 } 111 112 pxu_p->px_address[reg_bank] -= FIRE_CONTROL_STATUS; 113 114 done: 115 for (; reg_bank >= PX_REG_CSR; reg_bank--) { 116 DBG(DBG_ATTACH, dip, "reg_bank 0x%x address 0x%p\n", 117 reg_bank, pxu_p->px_address[reg_bank]); 118 } 119 120 return (DDI_SUCCESS); 121 122 fail: 123 cmn_err(CE_WARN, "%s%d: unable to map reg entry %d\n", 124 ddi_driver_name(dip), ddi_get_instance(dip), reg_bank); 125 126 for (reg_bank--; reg_bank >= PX_REG_CSR; reg_bank--) { 127 pxu_p->px_address[reg_bank] = NULL; 128 ddi_regs_map_free(&pxu_p->px_ac[reg_bank]); 129 } 130 131 return (DDI_FAILURE); 132 } 133 134 /* 135 * px_lib_unmap_regs: 136 * 137 * This routine unmaps the registers mapped by map_px_registers. 138 * 139 * used by: px_detach(), and error conditions in px_attach() 140 * 141 * return value: none 142 */ 143 void 144 px_lib_unmap_regs(pxu_t *pxu_p) 145 { 146 int i; 147 148 for (i = 0; i < PX_REG_MAX; i++) { 149 if (pxu_p->px_ac[i]) 150 ddi_regs_map_free(&pxu_p->px_ac[i]); 151 } 152 } 153 154 int 155 px_lib_dev_init(dev_info_t *dip, devhandle_t *dev_hdl) 156 { 157 px_t *px_p = DIP_TO_STATE(dip); 158 caddr_t xbc_csr_base, csr_base; 159 px_dvma_range_prop_t px_dvma_range; 160 px_chip_type_t chip_type = px_identity_init(px_p); 161 pxu_t *pxu_p; 162 163 DBG(DBG_ATTACH, dip, "px_lib_dev_init: dip 0x%p", dip); 164 165 if (chip_type == PX_CHIP_UNIDENTIFIED) { 166 cmn_err(CE_WARN, "%s%d: Unrecognized Hardware Version\n", 167 NAMEINST(dip)); 168 return (DDI_FAILURE); 169 } 170 171 px_paddr_mask = (chip_type == PX_CHIP_FIRE) ? MMU_FIRE_PADDR_MASK : 172 MMU_OBERON_PADDR_MASK; 173 174 /* 175 * Allocate platform specific structure and link it to 176 * the px state structure. 177 */ 178 pxu_p = kmem_zalloc(sizeof (pxu_t), KM_SLEEP); 179 pxu_p->chip_type = chip_type; 180 pxu_p->portid = ddi_getprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 181 "portid", -1); 182 183 /* Map in the registers */ 184 if (px_lib_map_regs(pxu_p, dip) == DDI_FAILURE) { 185 kmem_free(pxu_p, sizeof (pxu_t)); 186 187 return (DDI_FAILURE); 188 } 189 190 xbc_csr_base = (caddr_t)pxu_p->px_address[PX_REG_XBC]; 191 csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR]; 192 193 pxu_p->tsb_cookie = iommu_tsb_alloc(pxu_p->portid); 194 pxu_p->tsb_size = iommu_tsb_cookie_to_size(pxu_p->tsb_cookie); 195 pxu_p->tsb_vaddr = iommu_tsb_cookie_to_va(pxu_p->tsb_cookie); 196 197 pxu_p->tsb_paddr = va_to_pa(pxu_p->tsb_vaddr); 198 199 /* 200 * Create "virtual-dma" property to support child devices 201 * needing to know DVMA range. 202 */ 203 px_dvma_range.dvma_base = (uint32_t)px_mmu_dvma_end + 1 204 - ((pxu_p->tsb_size >> 3) << MMU_PAGE_SHIFT); 205 px_dvma_range.dvma_len = (uint32_t) 206 px_mmu_dvma_end - px_dvma_range.dvma_base + 1; 207 208 (void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, 209 "virtual-dma", (caddr_t)&px_dvma_range, 210 sizeof (px_dvma_range_prop_t)); 211 /* 212 * Initilize all fire hardware specific blocks. 213 */ 214 hvio_cb_init(xbc_csr_base, pxu_p); 215 hvio_ib_init(csr_base, pxu_p); 216 hvio_pec_init(csr_base, pxu_p); 217 hvio_mmu_init(csr_base, pxu_p); 218 219 px_p->px_plat_p = (void *)pxu_p; 220 221 /* 222 * Initialize all the interrupt handlers 223 */ 224 switch (PX_CHIP_TYPE(pxu_p)) { 225 case PX_CHIP_OBERON: 226 px_err_reg_enable(px_p, PX_ERR_UBC); 227 px_err_reg_enable(px_p, PX_ERR_MMU); 228 px_err_reg_enable(px_p, PX_ERR_IMU); 229 px_err_reg_enable(px_p, PX_ERR_TLU_UE); 230 px_err_reg_enable(px_p, PX_ERR_TLU_CE); 231 px_err_reg_enable(px_p, PX_ERR_TLU_OE); 232 233 /* 234 * Oberon hotplug uses SPARE3 field in ILU Error Log Enable 235 * register to indicate the status of leaf reset, 236 * we need to preserve the value of this bit, and keep it in 237 * px_ilu_log_mask to reflect the state of the bit 238 */ 239 if (CSR_BR(csr_base, ILU_ERROR_LOG_ENABLE, SPARE3)) 240 px_ilu_log_mask |= (1ull << 241 ILU_ERROR_LOG_ENABLE_SPARE3); 242 else 243 px_ilu_log_mask &= ~(1ull << 244 ILU_ERROR_LOG_ENABLE_SPARE3); 245 px_err_reg_enable(px_p, PX_ERR_ILU); 246 247 px_fabric_die_rc_ue |= PCIE_AER_UCE_UC; 248 break; 249 250 case PX_CHIP_FIRE: 251 px_err_reg_enable(px_p, PX_ERR_JBC); 252 px_err_reg_enable(px_p, PX_ERR_MMU); 253 px_err_reg_enable(px_p, PX_ERR_IMU); 254 px_err_reg_enable(px_p, PX_ERR_TLU_UE); 255 px_err_reg_enable(px_p, PX_ERR_TLU_CE); 256 px_err_reg_enable(px_p, PX_ERR_TLU_OE); 257 px_err_reg_enable(px_p, PX_ERR_ILU); 258 px_err_reg_enable(px_p, PX_ERR_LPU_LINK); 259 px_err_reg_enable(px_p, PX_ERR_LPU_PHY); 260 px_err_reg_enable(px_p, PX_ERR_LPU_RX); 261 px_err_reg_enable(px_p, PX_ERR_LPU_TX); 262 px_err_reg_enable(px_p, PX_ERR_LPU_LTSSM); 263 px_err_reg_enable(px_p, PX_ERR_LPU_GIGABLZ); 264 break; 265 default: 266 cmn_err(CE_WARN, "%s%d: PX primary bus Unknown\n", 267 ddi_driver_name(dip), ddi_get_instance(dip)); 268 return (DDI_FAILURE); 269 } 270 271 /* Initilize device handle */ 272 *dev_hdl = (devhandle_t)csr_base; 273 274 DBG(DBG_ATTACH, dip, "px_lib_dev_init: dev_hdl 0x%llx\n", *dev_hdl); 275 276 return (DDI_SUCCESS); 277 } 278 279 int 280 px_lib_dev_fini(dev_info_t *dip) 281 { 282 px_t *px_p = DIP_TO_STATE(dip); 283 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 284 285 DBG(DBG_DETACH, dip, "px_lib_dev_fini: dip 0x%p\n", dip); 286 287 /* 288 * Deinitialize all the interrupt handlers 289 */ 290 switch (PX_CHIP_TYPE(pxu_p)) { 291 case PX_CHIP_OBERON: 292 px_err_reg_disable(px_p, PX_ERR_UBC); 293 px_err_reg_disable(px_p, PX_ERR_MMU); 294 px_err_reg_disable(px_p, PX_ERR_IMU); 295 px_err_reg_disable(px_p, PX_ERR_TLU_UE); 296 px_err_reg_disable(px_p, PX_ERR_TLU_CE); 297 px_err_reg_disable(px_p, PX_ERR_TLU_OE); 298 px_err_reg_disable(px_p, PX_ERR_ILU); 299 break; 300 case PX_CHIP_FIRE: 301 px_err_reg_disable(px_p, PX_ERR_JBC); 302 px_err_reg_disable(px_p, PX_ERR_MMU); 303 px_err_reg_disable(px_p, PX_ERR_IMU); 304 px_err_reg_disable(px_p, PX_ERR_TLU_UE); 305 px_err_reg_disable(px_p, PX_ERR_TLU_CE); 306 px_err_reg_disable(px_p, PX_ERR_TLU_OE); 307 px_err_reg_disable(px_p, PX_ERR_ILU); 308 px_err_reg_disable(px_p, PX_ERR_LPU_LINK); 309 px_err_reg_disable(px_p, PX_ERR_LPU_PHY); 310 px_err_reg_disable(px_p, PX_ERR_LPU_RX); 311 px_err_reg_disable(px_p, PX_ERR_LPU_TX); 312 px_err_reg_disable(px_p, PX_ERR_LPU_LTSSM); 313 px_err_reg_disable(px_p, PX_ERR_LPU_GIGABLZ); 314 break; 315 default: 316 cmn_err(CE_WARN, "%s%d: PX primary bus Unknown\n", 317 ddi_driver_name(dip), ddi_get_instance(dip)); 318 return (DDI_FAILURE); 319 } 320 321 iommu_tsb_free(pxu_p->tsb_cookie); 322 323 px_lib_unmap_regs((pxu_t *)px_p->px_plat_p); 324 kmem_free(px_p->px_plat_p, sizeof (pxu_t)); 325 px_p->px_plat_p = NULL; 326 327 return (DDI_SUCCESS); 328 } 329 330 /*ARGSUSED*/ 331 int 332 px_lib_intr_devino_to_sysino(dev_info_t *dip, devino_t devino, 333 sysino_t *sysino) 334 { 335 px_t *px_p = DIP_TO_STATE(dip); 336 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 337 uint64_t ret; 338 339 DBG(DBG_LIB_INT, dip, "px_lib_intr_devino_to_sysino: dip 0x%p " 340 "devino 0x%x\n", dip, devino); 341 342 if ((ret = hvio_intr_devino_to_sysino(DIP_TO_HANDLE(dip), 343 pxu_p, devino, sysino)) != H_EOK) { 344 DBG(DBG_LIB_INT, dip, 345 "hvio_intr_devino_to_sysino failed, ret 0x%lx\n", ret); 346 return (DDI_FAILURE); 347 } 348 349 DBG(DBG_LIB_INT, dip, "px_lib_intr_devino_to_sysino: sysino 0x%llx\n", 350 *sysino); 351 352 return (DDI_SUCCESS); 353 } 354 355 /*ARGSUSED*/ 356 int 357 px_lib_intr_getvalid(dev_info_t *dip, sysino_t sysino, 358 intr_valid_state_t *intr_valid_state) 359 { 360 uint64_t ret; 361 362 DBG(DBG_LIB_INT, dip, "px_lib_intr_getvalid: dip 0x%p sysino 0x%llx\n", 363 dip, sysino); 364 365 if ((ret = hvio_intr_getvalid(DIP_TO_HANDLE(dip), 366 sysino, intr_valid_state)) != H_EOK) { 367 DBG(DBG_LIB_INT, dip, "hvio_intr_getvalid failed, ret 0x%lx\n", 368 ret); 369 return (DDI_FAILURE); 370 } 371 372 DBG(DBG_LIB_INT, dip, "px_lib_intr_getvalid: intr_valid_state 0x%x\n", 373 *intr_valid_state); 374 375 return (DDI_SUCCESS); 376 } 377 378 /*ARGSUSED*/ 379 int 380 px_lib_intr_setvalid(dev_info_t *dip, sysino_t sysino, 381 intr_valid_state_t intr_valid_state) 382 { 383 uint64_t ret; 384 385 DBG(DBG_LIB_INT, dip, "px_lib_intr_setvalid: dip 0x%p sysino 0x%llx " 386 "intr_valid_state 0x%x\n", dip, sysino, intr_valid_state); 387 388 if ((ret = hvio_intr_setvalid(DIP_TO_HANDLE(dip), 389 sysino, intr_valid_state)) != H_EOK) { 390 DBG(DBG_LIB_INT, dip, "hvio_intr_setvalid failed, ret 0x%lx\n", 391 ret); 392 return (DDI_FAILURE); 393 } 394 395 return (DDI_SUCCESS); 396 } 397 398 /*ARGSUSED*/ 399 int 400 px_lib_intr_getstate(dev_info_t *dip, sysino_t sysino, 401 intr_state_t *intr_state) 402 { 403 uint64_t ret; 404 405 DBG(DBG_LIB_INT, dip, "px_lib_intr_getstate: dip 0x%p sysino 0x%llx\n", 406 dip, sysino); 407 408 if ((ret = hvio_intr_getstate(DIP_TO_HANDLE(dip), 409 sysino, intr_state)) != H_EOK) { 410 DBG(DBG_LIB_INT, dip, "hvio_intr_getstate failed, ret 0x%lx\n", 411 ret); 412 return (DDI_FAILURE); 413 } 414 415 DBG(DBG_LIB_INT, dip, "px_lib_intr_getstate: intr_state 0x%x\n", 416 *intr_state); 417 418 return (DDI_SUCCESS); 419 } 420 421 /*ARGSUSED*/ 422 int 423 px_lib_intr_setstate(dev_info_t *dip, sysino_t sysino, 424 intr_state_t intr_state) 425 { 426 uint64_t ret; 427 428 DBG(DBG_LIB_INT, dip, "px_lib_intr_setstate: dip 0x%p sysino 0x%llx " 429 "intr_state 0x%x\n", dip, sysino, intr_state); 430 431 if ((ret = hvio_intr_setstate(DIP_TO_HANDLE(dip), 432 sysino, intr_state)) != H_EOK) { 433 DBG(DBG_LIB_INT, dip, "hvio_intr_setstate failed, ret 0x%lx\n", 434 ret); 435 return (DDI_FAILURE); 436 } 437 438 return (DDI_SUCCESS); 439 } 440 441 /*ARGSUSED*/ 442 int 443 px_lib_intr_gettarget(dev_info_t *dip, sysino_t sysino, cpuid_t *cpuid) 444 { 445 px_t *px_p = DIP_TO_STATE(dip); 446 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 447 uint64_t ret; 448 449 DBG(DBG_LIB_INT, dip, "px_lib_intr_gettarget: dip 0x%p sysino 0x%llx\n", 450 dip, sysino); 451 452 if ((ret = hvio_intr_gettarget(DIP_TO_HANDLE(dip), pxu_p, 453 sysino, cpuid)) != H_EOK) { 454 DBG(DBG_LIB_INT, dip, "hvio_intr_gettarget failed, ret 0x%lx\n", 455 ret); 456 return (DDI_FAILURE); 457 } 458 459 DBG(DBG_LIB_INT, dip, "px_lib_intr_gettarget: cpuid 0x%x\n", cpuid); 460 461 return (DDI_SUCCESS); 462 } 463 464 /*ARGSUSED*/ 465 int 466 px_lib_intr_settarget(dev_info_t *dip, sysino_t sysino, cpuid_t cpuid) 467 { 468 px_t *px_p = DIP_TO_STATE(dip); 469 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 470 uint64_t ret; 471 472 DBG(DBG_LIB_INT, dip, "px_lib_intr_settarget: dip 0x%p sysino 0x%llx " 473 "cpuid 0x%x\n", dip, sysino, cpuid); 474 475 if ((ret = hvio_intr_settarget(DIP_TO_HANDLE(dip), pxu_p, 476 sysino, cpuid)) != H_EOK) { 477 DBG(DBG_LIB_INT, dip, "hvio_intr_settarget failed, ret 0x%lx\n", 478 ret); 479 return (DDI_FAILURE); 480 } 481 482 return (DDI_SUCCESS); 483 } 484 485 /*ARGSUSED*/ 486 int 487 px_lib_intr_reset(dev_info_t *dip) 488 { 489 devino_t ino; 490 sysino_t sysino; 491 492 DBG(DBG_LIB_INT, dip, "px_lib_intr_reset: dip 0x%p\n", dip); 493 494 /* Reset all Interrupts */ 495 for (ino = 0; ino < INTERRUPT_MAPPING_ENTRIES; ino++) { 496 if (px_lib_intr_devino_to_sysino(dip, ino, 497 &sysino) != DDI_SUCCESS) 498 return (BF_FATAL); 499 500 if (px_lib_intr_setstate(dip, sysino, 501 INTR_IDLE_STATE) != DDI_SUCCESS) 502 return (BF_FATAL); 503 } 504 505 return (BF_NONE); 506 } 507 508 /*ARGSUSED*/ 509 int 510 px_lib_iommu_map(dev_info_t *dip, tsbid_t tsbid, pages_t pages, 511 io_attributes_t attr, void *addr, size_t pfn_index, int flags) 512 { 513 px_t *px_p = DIP_TO_STATE(dip); 514 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 515 uint64_t ret; 516 517 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_map: dip 0x%p tsbid 0x%llx " 518 "pages 0x%x attr 0x%x addr 0x%p pfn_index 0x%llx flags 0x%x\n", 519 dip, tsbid, pages, attr, addr, pfn_index, flags); 520 521 if ((ret = hvio_iommu_map(px_p->px_dev_hdl, pxu_p, tsbid, pages, 522 attr, addr, pfn_index, flags)) != H_EOK) { 523 DBG(DBG_LIB_DMA, dip, 524 "px_lib_iommu_map failed, ret 0x%lx\n", ret); 525 return (DDI_FAILURE); 526 } 527 528 return (DDI_SUCCESS); 529 } 530 531 /*ARGSUSED*/ 532 int 533 px_lib_iommu_demap(dev_info_t *dip, tsbid_t tsbid, pages_t pages) 534 { 535 px_t *px_p = DIP_TO_STATE(dip); 536 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 537 uint64_t ret; 538 539 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_demap: dip 0x%p tsbid 0x%llx " 540 "pages 0x%x\n", dip, tsbid, pages); 541 542 if ((ret = hvio_iommu_demap(px_p->px_dev_hdl, pxu_p, tsbid, pages)) 543 != H_EOK) { 544 DBG(DBG_LIB_DMA, dip, 545 "px_lib_iommu_demap failed, ret 0x%lx\n", ret); 546 547 return (DDI_FAILURE); 548 } 549 550 return (DDI_SUCCESS); 551 } 552 553 /*ARGSUSED*/ 554 int 555 px_lib_iommu_getmap(dev_info_t *dip, tsbid_t tsbid, io_attributes_t *attr_p, 556 r_addr_t *r_addr_p) 557 { 558 px_t *px_p = DIP_TO_STATE(dip); 559 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 560 uint64_t ret; 561 562 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getmap: dip 0x%p tsbid 0x%llx\n", 563 dip, tsbid); 564 565 if ((ret = hvio_iommu_getmap(DIP_TO_HANDLE(dip), pxu_p, tsbid, 566 attr_p, r_addr_p)) != H_EOK) { 567 DBG(DBG_LIB_DMA, dip, 568 "hvio_iommu_getmap failed, ret 0x%lx\n", ret); 569 570 return ((ret == H_ENOMAP) ? DDI_DMA_NOMAPPING:DDI_FAILURE); 571 } 572 573 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getmap: attr 0x%x r_addr 0x%llx\n", 574 *attr_p, *r_addr_p); 575 576 return (DDI_SUCCESS); 577 } 578 579 580 /* 581 * Checks dma attributes against system bypass ranges 582 * The bypass range is determined by the hardware. Return them so the 583 * common code can do generic checking against them. 584 */ 585 /*ARGSUSED*/ 586 int 587 px_lib_dma_bypass_rngchk(dev_info_t *dip, ddi_dma_attr_t *attr_p, 588 uint64_t *lo_p, uint64_t *hi_p) 589 { 590 px_t *px_p = DIP_TO_STATE(dip); 591 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 592 593 *lo_p = hvio_get_bypass_base(pxu_p); 594 *hi_p = hvio_get_bypass_end(pxu_p); 595 596 return (DDI_SUCCESS); 597 } 598 599 600 /*ARGSUSED*/ 601 int 602 px_lib_iommu_getbypass(dev_info_t *dip, r_addr_t ra, io_attributes_t attr, 603 io_addr_t *io_addr_p) 604 { 605 uint64_t ret; 606 px_t *px_p = DIP_TO_STATE(dip); 607 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 608 609 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getbypass: dip 0x%p ra 0x%llx " 610 "attr 0x%x\n", dip, ra, attr); 611 612 if ((ret = hvio_iommu_getbypass(DIP_TO_HANDLE(dip), pxu_p, ra, 613 attr, io_addr_p)) != H_EOK) { 614 DBG(DBG_LIB_DMA, dip, 615 "hvio_iommu_getbypass failed, ret 0x%lx\n", ret); 616 return (DDI_FAILURE); 617 } 618 619 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getbypass: io_addr 0x%llx\n", 620 *io_addr_p); 621 622 return (DDI_SUCCESS); 623 } 624 625 /* 626 * bus dma sync entry point. 627 */ 628 /*ARGSUSED*/ 629 int 630 px_lib_dma_sync(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle, 631 off_t off, size_t len, uint_t cache_flags) 632 { 633 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 634 px_t *px_p = DIP_TO_STATE(dip); 635 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 636 637 DBG(DBG_LIB_DMA, dip, "px_lib_dma_sync: dip 0x%p rdip 0x%p " 638 "handle 0x%llx off 0x%x len 0x%x flags 0x%x\n", 639 dip, rdip, handle, off, len, cache_flags); 640 641 /* 642 * No flush needed for Oberon 643 */ 644 if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) 645 return (DDI_SUCCESS); 646 647 /* 648 * jbus_stst_order is found only in certain cpu modules. 649 * Just return success if not present. 650 */ 651 if (&jbus_stst_order == NULL) 652 return (DDI_SUCCESS); 653 654 if (!(mp->dmai_flags & PX_DMAI_FLAGS_INUSE)) { 655 cmn_err(CE_WARN, "%s%d: Unbound dma handle %p.", 656 ddi_driver_name(rdip), ddi_get_instance(rdip), (void *)mp); 657 658 return (DDI_FAILURE); 659 } 660 661 if (mp->dmai_flags & PX_DMAI_FLAGS_NOSYNC) 662 return (DDI_SUCCESS); 663 664 /* 665 * No flush needed when sending data from memory to device. 666 * Nothing to do to "sync" memory to what device would already see. 667 */ 668 if (!(mp->dmai_rflags & DDI_DMA_READ) || 669 ((cache_flags & PX_DMA_SYNC_DDI_FLAGS) == DDI_DMA_SYNC_FORDEV)) 670 return (DDI_SUCCESS); 671 672 /* 673 * Perform necessary cpu workaround to ensure jbus ordering. 674 * CPU's internal "invalidate FIFOs" are flushed. 675 */ 676 677 #if !defined(lint) 678 kpreempt_disable(); 679 #endif 680 jbus_stst_order(); 681 #if !defined(lint) 682 kpreempt_enable(); 683 #endif 684 return (DDI_SUCCESS); 685 } 686 687 /* 688 * MSIQ Functions: 689 */ 690 /*ARGSUSED*/ 691 int 692 px_lib_msiq_init(dev_info_t *dip) 693 { 694 px_t *px_p = DIP_TO_STATE(dip); 695 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 696 px_msiq_state_t *msiq_state_p = &px_p->px_ib_p->ib_msiq_state; 697 caddr_t msiq_addr; 698 px_dvma_addr_t pg_index; 699 size_t size; 700 int ret; 701 702 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_init: dip 0x%p\n", dip); 703 704 /* 705 * Map the EQ memory into the Fire MMU (has to be 512KB aligned) 706 * and then initialize the base address register. 707 * 708 * Allocate entries from Fire IOMMU so that the resulting address 709 * is properly aligned. Calculate the index of the first allocated 710 * entry. Note: The size of the mapping is assumed to be a multiple 711 * of the page size. 712 */ 713 msiq_addr = (caddr_t)(((uint64_t)msiq_state_p->msiq_buf_p + 714 (MMU_PAGE_SIZE - 1)) >> MMU_PAGE_SHIFT << MMU_PAGE_SHIFT); 715 716 size = msiq_state_p->msiq_cnt * 717 msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t); 718 719 pxu_p->msiq_mapped_p = vmem_xalloc(px_p->px_mmu_p->mmu_dvma_map, 720 size, (512 * 1024), 0, 0, NULL, NULL, VM_NOSLEEP | VM_BESTFIT); 721 722 if (pxu_p->msiq_mapped_p == NULL) 723 return (DDI_FAILURE); 724 725 pg_index = MMU_PAGE_INDEX(px_p->px_mmu_p, 726 MMU_BTOP((ulong_t)pxu_p->msiq_mapped_p)); 727 728 if ((ret = px_lib_iommu_map(px_p->px_dip, PCI_TSBID(0, pg_index), 729 MMU_BTOP(size), PCI_MAP_ATTR_WRITE, (void *)msiq_addr, 0, 730 MMU_MAP_BUF)) != DDI_SUCCESS) { 731 DBG(DBG_LIB_MSIQ, dip, 732 "hvio_msiq_init failed, ret 0x%lx\n", ret); 733 734 (void) px_lib_msiq_fini(dip); 735 return (DDI_FAILURE); 736 } 737 738 (void) hvio_msiq_init(DIP_TO_HANDLE(dip), pxu_p); 739 740 return (DDI_SUCCESS); 741 } 742 743 /*ARGSUSED*/ 744 int 745 px_lib_msiq_fini(dev_info_t *dip) 746 { 747 px_t *px_p = DIP_TO_STATE(dip); 748 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 749 px_msiq_state_t *msiq_state_p = &px_p->px_ib_p->ib_msiq_state; 750 px_dvma_addr_t pg_index; 751 size_t size; 752 753 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_fini: dip 0x%p\n", dip); 754 755 /* 756 * Unmap and free the EQ memory that had been mapped 757 * into the Fire IOMMU. 758 */ 759 size = msiq_state_p->msiq_cnt * 760 msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t); 761 762 pg_index = MMU_PAGE_INDEX(px_p->px_mmu_p, 763 MMU_BTOP((ulong_t)pxu_p->msiq_mapped_p)); 764 765 (void) px_lib_iommu_demap(px_p->px_dip, 766 PCI_TSBID(0, pg_index), MMU_BTOP(size)); 767 768 /* Free the entries from the Fire MMU */ 769 vmem_xfree(px_p->px_mmu_p->mmu_dvma_map, 770 (void *)pxu_p->msiq_mapped_p, size); 771 772 return (DDI_SUCCESS); 773 } 774 775 /*ARGSUSED*/ 776 int 777 px_lib_msiq_info(dev_info_t *dip, msiqid_t msiq_id, r_addr_t *ra_p, 778 uint_t *msiq_rec_cnt_p) 779 { 780 px_t *px_p = DIP_TO_STATE(dip); 781 px_msiq_state_t *msiq_state_p = &px_p->px_ib_p->ib_msiq_state; 782 uint64_t *msiq_addr; 783 size_t msiq_size; 784 785 DBG(DBG_LIB_MSIQ, dip, "px_msiq_info: dip 0x%p msiq_id 0x%x\n", 786 dip, msiq_id); 787 788 msiq_addr = (uint64_t *)(((uint64_t)msiq_state_p->msiq_buf_p + 789 (MMU_PAGE_SIZE - 1)) >> MMU_PAGE_SHIFT << MMU_PAGE_SHIFT); 790 msiq_size = msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t); 791 ra_p = (r_addr_t *)((caddr_t)msiq_addr + (msiq_id * msiq_size)); 792 793 *msiq_rec_cnt_p = msiq_state_p->msiq_rec_cnt; 794 795 DBG(DBG_LIB_MSIQ, dip, "px_msiq_info: ra_p 0x%p msiq_rec_cnt 0x%x\n", 796 ra_p, *msiq_rec_cnt_p); 797 798 return (DDI_SUCCESS); 799 } 800 801 /*ARGSUSED*/ 802 int 803 px_lib_msiq_getvalid(dev_info_t *dip, msiqid_t msiq_id, 804 pci_msiq_valid_state_t *msiq_valid_state) 805 { 806 uint64_t ret; 807 808 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getvalid: dip 0x%p msiq_id 0x%x\n", 809 dip, msiq_id); 810 811 if ((ret = hvio_msiq_getvalid(DIP_TO_HANDLE(dip), 812 msiq_id, msiq_valid_state)) != H_EOK) { 813 DBG(DBG_LIB_MSIQ, dip, 814 "hvio_msiq_getvalid failed, ret 0x%lx\n", ret); 815 return (DDI_FAILURE); 816 } 817 818 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getvalid: msiq_valid_state 0x%x\n", 819 *msiq_valid_state); 820 821 return (DDI_SUCCESS); 822 } 823 824 /*ARGSUSED*/ 825 int 826 px_lib_msiq_setvalid(dev_info_t *dip, msiqid_t msiq_id, 827 pci_msiq_valid_state_t msiq_valid_state) 828 { 829 uint64_t ret; 830 831 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_setvalid: dip 0x%p msiq_id 0x%x " 832 "msiq_valid_state 0x%x\n", dip, msiq_id, msiq_valid_state); 833 834 if ((ret = hvio_msiq_setvalid(DIP_TO_HANDLE(dip), 835 msiq_id, msiq_valid_state)) != H_EOK) { 836 DBG(DBG_LIB_MSIQ, dip, 837 "hvio_msiq_setvalid failed, ret 0x%lx\n", ret); 838 return (DDI_FAILURE); 839 } 840 841 return (DDI_SUCCESS); 842 } 843 844 /*ARGSUSED*/ 845 int 846 px_lib_msiq_getstate(dev_info_t *dip, msiqid_t msiq_id, 847 pci_msiq_state_t *msiq_state) 848 { 849 uint64_t ret; 850 851 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getstate: dip 0x%p msiq_id 0x%x\n", 852 dip, msiq_id); 853 854 if ((ret = hvio_msiq_getstate(DIP_TO_HANDLE(dip), 855 msiq_id, msiq_state)) != H_EOK) { 856 DBG(DBG_LIB_MSIQ, dip, 857 "hvio_msiq_getstate failed, ret 0x%lx\n", ret); 858 return (DDI_FAILURE); 859 } 860 861 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getstate: msiq_state 0x%x\n", 862 *msiq_state); 863 864 return (DDI_SUCCESS); 865 } 866 867 /*ARGSUSED*/ 868 int 869 px_lib_msiq_setstate(dev_info_t *dip, msiqid_t msiq_id, 870 pci_msiq_state_t msiq_state) 871 { 872 uint64_t ret; 873 874 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_setstate: dip 0x%p msiq_id 0x%x " 875 "msiq_state 0x%x\n", dip, msiq_id, msiq_state); 876 877 if ((ret = hvio_msiq_setstate(DIP_TO_HANDLE(dip), 878 msiq_id, msiq_state)) != H_EOK) { 879 DBG(DBG_LIB_MSIQ, dip, 880 "hvio_msiq_setstate failed, ret 0x%lx\n", ret); 881 return (DDI_FAILURE); 882 } 883 884 return (DDI_SUCCESS); 885 } 886 887 /*ARGSUSED*/ 888 int 889 px_lib_msiq_gethead(dev_info_t *dip, msiqid_t msiq_id, 890 msiqhead_t *msiq_head) 891 { 892 uint64_t ret; 893 894 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gethead: dip 0x%p msiq_id 0x%x\n", 895 dip, msiq_id); 896 897 if ((ret = hvio_msiq_gethead(DIP_TO_HANDLE(dip), 898 msiq_id, msiq_head)) != H_EOK) { 899 DBG(DBG_LIB_MSIQ, dip, 900 "hvio_msiq_gethead failed, ret 0x%lx\n", ret); 901 return (DDI_FAILURE); 902 } 903 904 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gethead: msiq_head 0x%x\n", 905 *msiq_head); 906 907 return (DDI_SUCCESS); 908 } 909 910 /*ARGSUSED*/ 911 int 912 px_lib_msiq_sethead(dev_info_t *dip, msiqid_t msiq_id, 913 msiqhead_t msiq_head) 914 { 915 uint64_t ret; 916 917 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_sethead: dip 0x%p msiq_id 0x%x " 918 "msiq_head 0x%x\n", dip, msiq_id, msiq_head); 919 920 if ((ret = hvio_msiq_sethead(DIP_TO_HANDLE(dip), 921 msiq_id, msiq_head)) != H_EOK) { 922 DBG(DBG_LIB_MSIQ, dip, 923 "hvio_msiq_sethead failed, ret 0x%lx\n", ret); 924 return (DDI_FAILURE); 925 } 926 927 return (DDI_SUCCESS); 928 } 929 930 /*ARGSUSED*/ 931 int 932 px_lib_msiq_gettail(dev_info_t *dip, msiqid_t msiq_id, 933 msiqtail_t *msiq_tail) 934 { 935 uint64_t ret; 936 937 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gettail: dip 0x%p msiq_id 0x%x\n", 938 dip, msiq_id); 939 940 if ((ret = hvio_msiq_gettail(DIP_TO_HANDLE(dip), 941 msiq_id, msiq_tail)) != H_EOK) { 942 DBG(DBG_LIB_MSIQ, dip, 943 "hvio_msiq_gettail failed, ret 0x%lx\n", ret); 944 return (DDI_FAILURE); 945 } 946 947 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gettail: msiq_tail 0x%x\n", 948 *msiq_tail); 949 950 return (DDI_SUCCESS); 951 } 952 953 /*ARGSUSED*/ 954 void 955 px_lib_get_msiq_rec(dev_info_t *dip, px_msiq_t *msiq_p, msiq_rec_t *msiq_rec_p) 956 { 957 eq_rec_t *eq_rec_p = (eq_rec_t *)msiq_p->msiq_curr; 958 959 DBG(DBG_LIB_MSIQ, dip, "px_lib_get_msiq_rec: dip 0x%p eq_rec_p 0x%p\n", 960 dip, eq_rec_p); 961 962 if (!eq_rec_p->eq_rec_fmt_type) { 963 /* Set msiq_rec_type to zero */ 964 msiq_rec_p->msiq_rec_type = 0; 965 966 return; 967 } 968 969 DBG(DBG_LIB_MSIQ, dip, "px_lib_get_msiq_rec: EQ RECORD, " 970 "eq_rec_rid 0x%llx eq_rec_fmt_type 0x%llx " 971 "eq_rec_len 0x%llx eq_rec_addr0 0x%llx " 972 "eq_rec_addr1 0x%llx eq_rec_data0 0x%llx " 973 "eq_rec_data1 0x%llx\n", eq_rec_p->eq_rec_rid, 974 eq_rec_p->eq_rec_fmt_type, eq_rec_p->eq_rec_len, 975 eq_rec_p->eq_rec_addr0, eq_rec_p->eq_rec_addr1, 976 eq_rec_p->eq_rec_data0, eq_rec_p->eq_rec_data1); 977 978 /* 979 * Only upper 4 bits of eq_rec_fmt_type is used 980 * to identify the EQ record type. 981 */ 982 switch (eq_rec_p->eq_rec_fmt_type >> 3) { 983 case EQ_REC_MSI32: 984 msiq_rec_p->msiq_rec_type = MSI32_REC; 985 986 msiq_rec_p->msiq_rec_data.msi.msi_data = 987 eq_rec_p->eq_rec_data0; 988 break; 989 case EQ_REC_MSI64: 990 msiq_rec_p->msiq_rec_type = MSI64_REC; 991 992 msiq_rec_p->msiq_rec_data.msi.msi_data = 993 eq_rec_p->eq_rec_data0; 994 break; 995 case EQ_REC_MSG: 996 msiq_rec_p->msiq_rec_type = MSG_REC; 997 998 msiq_rec_p->msiq_rec_data.msg.msg_route = 999 eq_rec_p->eq_rec_fmt_type & 7; 1000 msiq_rec_p->msiq_rec_data.msg.msg_targ = eq_rec_p->eq_rec_rid; 1001 msiq_rec_p->msiq_rec_data.msg.msg_code = eq_rec_p->eq_rec_data0; 1002 break; 1003 default: 1004 cmn_err(CE_WARN, "%s%d: px_lib_get_msiq_rec: " 1005 "0x%x is an unknown EQ record type", 1006 ddi_driver_name(dip), ddi_get_instance(dip), 1007 (int)eq_rec_p->eq_rec_fmt_type); 1008 break; 1009 } 1010 1011 msiq_rec_p->msiq_rec_rid = eq_rec_p->eq_rec_rid; 1012 msiq_rec_p->msiq_rec_msi_addr = ((eq_rec_p->eq_rec_addr1 << 16) | 1013 (eq_rec_p->eq_rec_addr0 << 2)); 1014 1015 /* Zero out eq_rec_fmt_type field */ 1016 eq_rec_p->eq_rec_fmt_type = 0; 1017 } 1018 1019 /* 1020 * MSI Functions: 1021 */ 1022 /*ARGSUSED*/ 1023 int 1024 px_lib_msi_init(dev_info_t *dip) 1025 { 1026 px_t *px_p = DIP_TO_STATE(dip); 1027 px_msi_state_t *msi_state_p = &px_p->px_ib_p->ib_msi_state; 1028 uint64_t ret; 1029 1030 DBG(DBG_LIB_MSI, dip, "px_lib_msi_init: dip 0x%p\n", dip); 1031 1032 if ((ret = hvio_msi_init(DIP_TO_HANDLE(dip), 1033 msi_state_p->msi_addr32, msi_state_p->msi_addr64)) != H_EOK) { 1034 DBG(DBG_LIB_MSIQ, dip, "px_lib_msi_init failed, ret 0x%lx\n", 1035 ret); 1036 return (DDI_FAILURE); 1037 } 1038 1039 return (DDI_SUCCESS); 1040 } 1041 1042 /*ARGSUSED*/ 1043 int 1044 px_lib_msi_getmsiq(dev_info_t *dip, msinum_t msi_num, 1045 msiqid_t *msiq_id) 1046 { 1047 uint64_t ret; 1048 1049 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getmsiq: dip 0x%p msi_num 0x%x\n", 1050 dip, msi_num); 1051 1052 if ((ret = hvio_msi_getmsiq(DIP_TO_HANDLE(dip), 1053 msi_num, msiq_id)) != H_EOK) { 1054 DBG(DBG_LIB_MSI, dip, 1055 "hvio_msi_getmsiq failed, ret 0x%lx\n", ret); 1056 return (DDI_FAILURE); 1057 } 1058 1059 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getmsiq: msiq_id 0x%x\n", 1060 *msiq_id); 1061 1062 return (DDI_SUCCESS); 1063 } 1064 1065 /*ARGSUSED*/ 1066 int 1067 px_lib_msi_setmsiq(dev_info_t *dip, msinum_t msi_num, 1068 msiqid_t msiq_id, msi_type_t msitype) 1069 { 1070 uint64_t ret; 1071 1072 DBG(DBG_LIB_MSI, dip, "px_lib_msi_setmsiq: dip 0x%p msi_num 0x%x " 1073 "msq_id 0x%x\n", dip, msi_num, msiq_id); 1074 1075 if ((ret = hvio_msi_setmsiq(DIP_TO_HANDLE(dip), 1076 msi_num, msiq_id)) != H_EOK) { 1077 DBG(DBG_LIB_MSI, dip, 1078 "hvio_msi_setmsiq failed, ret 0x%lx\n", ret); 1079 return (DDI_FAILURE); 1080 } 1081 1082 return (DDI_SUCCESS); 1083 } 1084 1085 /*ARGSUSED*/ 1086 int 1087 px_lib_msi_getvalid(dev_info_t *dip, msinum_t msi_num, 1088 pci_msi_valid_state_t *msi_valid_state) 1089 { 1090 uint64_t ret; 1091 1092 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getvalid: dip 0x%p msi_num 0x%x\n", 1093 dip, msi_num); 1094 1095 if ((ret = hvio_msi_getvalid(DIP_TO_HANDLE(dip), 1096 msi_num, msi_valid_state)) != H_EOK) { 1097 DBG(DBG_LIB_MSI, dip, 1098 "hvio_msi_getvalid failed, ret 0x%lx\n", ret); 1099 return (DDI_FAILURE); 1100 } 1101 1102 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getvalid: msiq_id 0x%x\n", 1103 *msi_valid_state); 1104 1105 return (DDI_SUCCESS); 1106 } 1107 1108 /*ARGSUSED*/ 1109 int 1110 px_lib_msi_setvalid(dev_info_t *dip, msinum_t msi_num, 1111 pci_msi_valid_state_t msi_valid_state) 1112 { 1113 uint64_t ret; 1114 1115 DBG(DBG_LIB_MSI, dip, "px_lib_msi_setvalid: dip 0x%p msi_num 0x%x " 1116 "msi_valid_state 0x%x\n", dip, msi_num, msi_valid_state); 1117 1118 if ((ret = hvio_msi_setvalid(DIP_TO_HANDLE(dip), 1119 msi_num, msi_valid_state)) != H_EOK) { 1120 DBG(DBG_LIB_MSI, dip, 1121 "hvio_msi_setvalid failed, ret 0x%lx\n", ret); 1122 return (DDI_FAILURE); 1123 } 1124 1125 return (DDI_SUCCESS); 1126 } 1127 1128 /*ARGSUSED*/ 1129 int 1130 px_lib_msi_getstate(dev_info_t *dip, msinum_t msi_num, 1131 pci_msi_state_t *msi_state) 1132 { 1133 uint64_t ret; 1134 1135 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getstate: dip 0x%p msi_num 0x%x\n", 1136 dip, msi_num); 1137 1138 if ((ret = hvio_msi_getstate(DIP_TO_HANDLE(dip), 1139 msi_num, msi_state)) != H_EOK) { 1140 DBG(DBG_LIB_MSI, dip, 1141 "hvio_msi_getstate failed, ret 0x%lx\n", ret); 1142 return (DDI_FAILURE); 1143 } 1144 1145 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getstate: msi_state 0x%x\n", 1146 *msi_state); 1147 1148 return (DDI_SUCCESS); 1149 } 1150 1151 /*ARGSUSED*/ 1152 int 1153 px_lib_msi_setstate(dev_info_t *dip, msinum_t msi_num, 1154 pci_msi_state_t msi_state) 1155 { 1156 uint64_t ret; 1157 1158 DBG(DBG_LIB_MSI, dip, "px_lib_msi_setstate: dip 0x%p msi_num 0x%x " 1159 "msi_state 0x%x\n", dip, msi_num, msi_state); 1160 1161 if ((ret = hvio_msi_setstate(DIP_TO_HANDLE(dip), 1162 msi_num, msi_state)) != H_EOK) { 1163 DBG(DBG_LIB_MSI, dip, 1164 "hvio_msi_setstate failed, ret 0x%lx\n", ret); 1165 return (DDI_FAILURE); 1166 } 1167 1168 return (DDI_SUCCESS); 1169 } 1170 1171 /* 1172 * MSG Functions: 1173 */ 1174 /*ARGSUSED*/ 1175 int 1176 px_lib_msg_getmsiq(dev_info_t *dip, pcie_msg_type_t msg_type, 1177 msiqid_t *msiq_id) 1178 { 1179 uint64_t ret; 1180 1181 DBG(DBG_LIB_MSG, dip, "px_lib_msg_getmsiq: dip 0x%p msg_type 0x%x\n", 1182 dip, msg_type); 1183 1184 if ((ret = hvio_msg_getmsiq(DIP_TO_HANDLE(dip), 1185 msg_type, msiq_id)) != H_EOK) { 1186 DBG(DBG_LIB_MSG, dip, 1187 "hvio_msg_getmsiq failed, ret 0x%lx\n", ret); 1188 return (DDI_FAILURE); 1189 } 1190 1191 DBG(DBG_LIB_MSI, dip, "px_lib_msg_getmsiq: msiq_id 0x%x\n", 1192 *msiq_id); 1193 1194 return (DDI_SUCCESS); 1195 } 1196 1197 /*ARGSUSED*/ 1198 int 1199 px_lib_msg_setmsiq(dev_info_t *dip, pcie_msg_type_t msg_type, 1200 msiqid_t msiq_id) 1201 { 1202 uint64_t ret; 1203 1204 DBG(DBG_LIB_MSG, dip, "px_lib_msi_setstate: dip 0x%p msg_type 0x%x " 1205 "msiq_id 0x%x\n", dip, msg_type, msiq_id); 1206 1207 if ((ret = hvio_msg_setmsiq(DIP_TO_HANDLE(dip), 1208 msg_type, msiq_id)) != H_EOK) { 1209 DBG(DBG_LIB_MSG, dip, 1210 "hvio_msg_setmsiq failed, ret 0x%lx\n", ret); 1211 return (DDI_FAILURE); 1212 } 1213 1214 return (DDI_SUCCESS); 1215 } 1216 1217 /*ARGSUSED*/ 1218 int 1219 px_lib_msg_getvalid(dev_info_t *dip, pcie_msg_type_t msg_type, 1220 pcie_msg_valid_state_t *msg_valid_state) 1221 { 1222 uint64_t ret; 1223 1224 DBG(DBG_LIB_MSG, dip, "px_lib_msg_getvalid: dip 0x%p msg_type 0x%x\n", 1225 dip, msg_type); 1226 1227 if ((ret = hvio_msg_getvalid(DIP_TO_HANDLE(dip), msg_type, 1228 msg_valid_state)) != H_EOK) { 1229 DBG(DBG_LIB_MSG, dip, 1230 "hvio_msg_getvalid failed, ret 0x%lx\n", ret); 1231 return (DDI_FAILURE); 1232 } 1233 1234 DBG(DBG_LIB_MSI, dip, "px_lib_msg_getvalid: msg_valid_state 0x%x\n", 1235 *msg_valid_state); 1236 1237 return (DDI_SUCCESS); 1238 } 1239 1240 /*ARGSUSED*/ 1241 int 1242 px_lib_msg_setvalid(dev_info_t *dip, pcie_msg_type_t msg_type, 1243 pcie_msg_valid_state_t msg_valid_state) 1244 { 1245 uint64_t ret; 1246 1247 DBG(DBG_LIB_MSG, dip, "px_lib_msg_setvalid: dip 0x%p msg_type 0x%x " 1248 "msg_valid_state 0x%x\n", dip, msg_type, msg_valid_state); 1249 1250 if ((ret = hvio_msg_setvalid(DIP_TO_HANDLE(dip), msg_type, 1251 msg_valid_state)) != H_EOK) { 1252 DBG(DBG_LIB_MSG, dip, 1253 "hvio_msg_setvalid failed, ret 0x%lx\n", ret); 1254 return (DDI_FAILURE); 1255 } 1256 1257 return (DDI_SUCCESS); 1258 } 1259 1260 /* 1261 * Suspend/Resume Functions: 1262 * Currently unsupported by hypervisor 1263 */ 1264 int 1265 px_lib_suspend(dev_info_t *dip) 1266 { 1267 px_t *px_p = DIP_TO_STATE(dip); 1268 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 1269 px_cb_t *cb_p = PX2CB(px_p); 1270 devhandle_t dev_hdl, xbus_dev_hdl; 1271 uint64_t ret = H_EOK; 1272 1273 DBG(DBG_DETACH, dip, "px_lib_suspend: dip 0x%p\n", dip); 1274 1275 dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_CSR]; 1276 xbus_dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_XBC]; 1277 1278 if ((ret = hvio_suspend(dev_hdl, pxu_p)) != H_EOK) 1279 goto fail; 1280 1281 if (--cb_p->attachcnt == 0) { 1282 ret = hvio_cb_suspend(xbus_dev_hdl, pxu_p); 1283 if (ret != H_EOK) 1284 cb_p->attachcnt++; 1285 } 1286 1287 fail: 1288 return ((ret != H_EOK) ? DDI_FAILURE: DDI_SUCCESS); 1289 } 1290 1291 void 1292 px_lib_resume(dev_info_t *dip) 1293 { 1294 px_t *px_p = DIP_TO_STATE(dip); 1295 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 1296 px_cb_t *cb_p = PX2CB(px_p); 1297 devhandle_t dev_hdl, xbus_dev_hdl; 1298 devino_t pec_ino = px_p->px_inos[PX_INTR_PEC]; 1299 devino_t xbc_ino = px_p->px_inos[PX_INTR_XBC]; 1300 1301 DBG(DBG_ATTACH, dip, "px_lib_resume: dip 0x%p\n", dip); 1302 1303 dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_CSR]; 1304 xbus_dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_XBC]; 1305 1306 if (++cb_p->attachcnt == 1) 1307 hvio_cb_resume(dev_hdl, xbus_dev_hdl, xbc_ino, pxu_p); 1308 1309 hvio_resume(dev_hdl, pec_ino, pxu_p); 1310 } 1311 1312 /* 1313 * Generate a unique Oberon UBC ID based on the Logicial System Board and 1314 * the IO Channel from the portid property field. 1315 */ 1316 static uint64_t 1317 oberon_get_ubc_id(dev_info_t *dip) 1318 { 1319 px_t *px_p = DIP_TO_STATE(dip); 1320 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 1321 uint64_t ubc_id; 1322 1323 /* 1324 * Generate a unique 6 bit UBC ID using the 2 IO_Channel#[1:0] bits and 1325 * the 4 LSB_ID[3:0] bits from the Oberon's portid property. 1326 */ 1327 ubc_id = (((pxu_p->portid >> OBERON_PORT_ID_IOC) & 1328 OBERON_PORT_ID_IOC_MASK) | (((pxu_p->portid >> 1329 OBERON_PORT_ID_LSB) & OBERON_PORT_ID_LSB_MASK) 1330 << OBERON_UBC_ID_LSB)); 1331 1332 return (ubc_id); 1333 } 1334 1335 /* 1336 * Oberon does not have a UBC scratch register, so alloc an array of scratch 1337 * registers when needed and use a unique UBC ID as an index. This code 1338 * can be simplified if we use a pre-allocated array. They are currently 1339 * being dynamically allocated because it's only needed by the Oberon. 1340 */ 1341 static void 1342 oberon_set_cb(dev_info_t *dip, uint64_t val) 1343 { 1344 uint64_t ubc_id; 1345 1346 if (px_oberon_ubc_scratch_regs == NULL) 1347 px_oberon_ubc_scratch_regs = 1348 (uint64_t *)kmem_zalloc(sizeof (uint64_t)* 1349 OBERON_UBC_ID_MAX, KM_SLEEP); 1350 1351 ubc_id = oberon_get_ubc_id(dip); 1352 1353 px_oberon_ubc_scratch_regs[ubc_id] = val; 1354 1355 /* 1356 * Check if any scratch registers are still in use. If all scratch 1357 * registers are currently set to zero, then deallocate the scratch 1358 * register array. 1359 */ 1360 for (ubc_id = 0; ubc_id < OBERON_UBC_ID_MAX; ubc_id++) { 1361 if (px_oberon_ubc_scratch_regs[ubc_id] != NULL) 1362 return; 1363 } 1364 1365 /* 1366 * All scratch registers are set to zero so deallocate the scratch 1367 * register array and set the pointer to NULL. 1368 */ 1369 kmem_free(px_oberon_ubc_scratch_regs, 1370 (sizeof (uint64_t)*OBERON_UBC_ID_MAX)); 1371 1372 px_oberon_ubc_scratch_regs = NULL; 1373 } 1374 1375 /* 1376 * Oberon does not have a UBC scratch register, so use an allocated array of 1377 * scratch registers and use the unique UBC ID as an index into that array. 1378 */ 1379 static uint64_t 1380 oberon_get_cb(dev_info_t *dip) 1381 { 1382 uint64_t ubc_id; 1383 1384 if (px_oberon_ubc_scratch_regs == NULL) 1385 return (0); 1386 1387 ubc_id = oberon_get_ubc_id(dip); 1388 1389 return (px_oberon_ubc_scratch_regs[ubc_id]); 1390 } 1391 1392 /* 1393 * Misc Functions: 1394 * Currently unsupported by hypervisor 1395 */ 1396 static uint64_t 1397 px_get_cb(dev_info_t *dip) 1398 { 1399 px_t *px_p = DIP_TO_STATE(dip); 1400 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 1401 1402 /* 1403 * Oberon does not currently have Scratchpad registers. 1404 */ 1405 if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) 1406 return (oberon_get_cb(dip)); 1407 1408 return (CSR_XR((caddr_t)pxu_p->px_address[PX_REG_XBC], JBUS_SCRATCH_1)); 1409 } 1410 1411 static void 1412 px_set_cb(dev_info_t *dip, uint64_t val) 1413 { 1414 px_t *px_p = DIP_TO_STATE(dip); 1415 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 1416 1417 /* 1418 * Oberon does not currently have Scratchpad registers. 1419 */ 1420 if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) { 1421 oberon_set_cb(dip, val); 1422 return; 1423 } 1424 1425 CSR_XS((caddr_t)pxu_p->px_address[PX_REG_XBC], JBUS_SCRATCH_1, val); 1426 } 1427 1428 /*ARGSUSED*/ 1429 int 1430 px_lib_map_vconfig(dev_info_t *dip, 1431 ddi_map_req_t *mp, pci_config_offset_t off, 1432 pci_regspec_t *rp, caddr_t *addrp) 1433 { 1434 /* 1435 * No special config space access services in this layer. 1436 */ 1437 return (DDI_FAILURE); 1438 } 1439 1440 void 1441 px_lib_map_attr_check(ddi_map_req_t *mp) 1442 { 1443 ddi_acc_hdl_t *hp = mp->map_handlep; 1444 1445 /* fire does not accept byte masks from PIO store merge */ 1446 if (hp->ah_acc.devacc_attr_dataorder == DDI_STORECACHING_OK_ACC) 1447 hp->ah_acc.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 1448 } 1449 1450 void 1451 px_lib_clr_errs(px_t *px_p) 1452 { 1453 px_pec_t *pec_p = px_p->px_pec_p; 1454 dev_info_t *rpdip = px_p->px_dip; 1455 int err = PX_OK, ret; 1456 int acctype = pec_p->pec_safeacc_type; 1457 ddi_fm_error_t derr; 1458 1459 /* Create the derr */ 1460 bzero(&derr, sizeof (ddi_fm_error_t)); 1461 derr.fme_version = DDI_FME_VERSION; 1462 derr.fme_ena = fm_ena_generate(0, FM_ENA_FMT1); 1463 derr.fme_flag = acctype; 1464 1465 if (acctype == DDI_FM_ERR_EXPECTED) { 1466 derr.fme_status = DDI_FM_NONFATAL; 1467 ndi_fm_acc_err_set(pec_p->pec_acc_hdl, &derr); 1468 } 1469 1470 mutex_enter(&px_p->px_fm_mutex); 1471 1472 /* send ereport/handle/clear fire registers */ 1473 err = px_err_handle(px_p, &derr, PX_LIB_CALL, B_TRUE); 1474 1475 /* Check all child devices for errors */ 1476 ret = ndi_fm_handler_dispatch(rpdip, NULL, &derr); 1477 1478 mutex_exit(&px_p->px_fm_mutex); 1479 1480 /* 1481 * PX_FATAL_HW indicates a condition recovered from Fatal-Reset, 1482 * therefore it does not cause panic. 1483 */ 1484 if ((err & (PX_FATAL_GOS | PX_FATAL_SW)) || (ret == DDI_FM_FATAL)) 1485 PX_FM_PANIC("Fatal System Port Error has occurred\n"); 1486 } 1487 1488 #ifdef DEBUG 1489 int px_peekfault_cnt = 0; 1490 int px_pokefault_cnt = 0; 1491 #endif /* DEBUG */ 1492 1493 /*ARGSUSED*/ 1494 static int 1495 px_lib_do_poke(dev_info_t *dip, dev_info_t *rdip, 1496 peekpoke_ctlops_t *in_args) 1497 { 1498 px_t *px_p = DIP_TO_STATE(dip); 1499 px_pec_t *pec_p = px_p->px_pec_p; 1500 int err = DDI_SUCCESS; 1501 on_trap_data_t otd; 1502 1503 mutex_enter(&pec_p->pec_pokefault_mutex); 1504 pec_p->pec_ontrap_data = &otd; 1505 pec_p->pec_safeacc_type = DDI_FM_ERR_POKE; 1506 1507 /* Set up protected environment. */ 1508 if (!on_trap(&otd, OT_DATA_ACCESS)) { 1509 uintptr_t tramp = otd.ot_trampoline; 1510 1511 otd.ot_trampoline = (uintptr_t)&poke_fault; 1512 err = do_poke(in_args->size, (void *)in_args->dev_addr, 1513 (void *)in_args->host_addr); 1514 otd.ot_trampoline = tramp; 1515 } else 1516 err = DDI_FAILURE; 1517 1518 px_lib_clr_errs(px_p); 1519 1520 if (otd.ot_trap & OT_DATA_ACCESS) 1521 err = DDI_FAILURE; 1522 1523 /* Take down protected environment. */ 1524 no_trap(); 1525 1526 pec_p->pec_ontrap_data = NULL; 1527 pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED; 1528 mutex_exit(&pec_p->pec_pokefault_mutex); 1529 1530 #ifdef DEBUG 1531 if (err == DDI_FAILURE) 1532 px_pokefault_cnt++; 1533 #endif 1534 return (err); 1535 } 1536 1537 /*ARGSUSED*/ 1538 static int 1539 px_lib_do_caut_put(dev_info_t *dip, dev_info_t *rdip, 1540 peekpoke_ctlops_t *cautacc_ctlops_arg) 1541 { 1542 size_t size = cautacc_ctlops_arg->size; 1543 uintptr_t dev_addr = cautacc_ctlops_arg->dev_addr; 1544 uintptr_t host_addr = cautacc_ctlops_arg->host_addr; 1545 ddi_acc_impl_t *hp = (ddi_acc_impl_t *)cautacc_ctlops_arg->handle; 1546 size_t repcount = cautacc_ctlops_arg->repcount; 1547 uint_t flags = cautacc_ctlops_arg->flags; 1548 1549 px_t *px_p = DIP_TO_STATE(dip); 1550 px_pec_t *pec_p = px_p->px_pec_p; 1551 int err = DDI_SUCCESS; 1552 1553 /* 1554 * Note that i_ndi_busop_access_enter ends up grabbing the pokefault 1555 * mutex. 1556 */ 1557 i_ndi_busop_access_enter(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp); 1558 1559 pec_p->pec_ontrap_data = (on_trap_data_t *)hp->ahi_err->err_ontrap; 1560 pec_p->pec_safeacc_type = DDI_FM_ERR_EXPECTED; 1561 hp->ahi_err->err_expected = DDI_FM_ERR_EXPECTED; 1562 1563 if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) { 1564 for (; repcount; repcount--) { 1565 switch (size) { 1566 1567 case sizeof (uint8_t): 1568 i_ddi_put8(hp, (uint8_t *)dev_addr, 1569 *(uint8_t *)host_addr); 1570 break; 1571 1572 case sizeof (uint16_t): 1573 i_ddi_put16(hp, (uint16_t *)dev_addr, 1574 *(uint16_t *)host_addr); 1575 break; 1576 1577 case sizeof (uint32_t): 1578 i_ddi_put32(hp, (uint32_t *)dev_addr, 1579 *(uint32_t *)host_addr); 1580 break; 1581 1582 case sizeof (uint64_t): 1583 i_ddi_put64(hp, (uint64_t *)dev_addr, 1584 *(uint64_t *)host_addr); 1585 break; 1586 } 1587 1588 host_addr += size; 1589 1590 if (flags == DDI_DEV_AUTOINCR) 1591 dev_addr += size; 1592 1593 px_lib_clr_errs(px_p); 1594 1595 if (pec_p->pec_ontrap_data->ot_trap & OT_DATA_ACCESS) { 1596 err = DDI_FAILURE; 1597 #ifdef DEBUG 1598 px_pokefault_cnt++; 1599 #endif 1600 break; 1601 } 1602 } 1603 } 1604 1605 i_ddi_notrap((ddi_acc_handle_t)hp); 1606 pec_p->pec_ontrap_data = NULL; 1607 pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED; 1608 i_ndi_busop_access_exit(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp); 1609 hp->ahi_err->err_expected = DDI_FM_ERR_UNEXPECTED; 1610 1611 return (err); 1612 } 1613 1614 1615 int 1616 px_lib_ctlops_poke(dev_info_t *dip, dev_info_t *rdip, 1617 peekpoke_ctlops_t *in_args) 1618 { 1619 return (in_args->handle ? px_lib_do_caut_put(dip, rdip, in_args) : 1620 px_lib_do_poke(dip, rdip, in_args)); 1621 } 1622 1623 1624 /*ARGSUSED*/ 1625 static int 1626 px_lib_do_peek(dev_info_t *dip, peekpoke_ctlops_t *in_args) 1627 { 1628 px_t *px_p = DIP_TO_STATE(dip); 1629 px_pec_t *pec_p = px_p->px_pec_p; 1630 int err = DDI_SUCCESS; 1631 on_trap_data_t otd; 1632 1633 mutex_enter(&pec_p->pec_pokefault_mutex); 1634 pec_p->pec_safeacc_type = DDI_FM_ERR_PEEK; 1635 1636 if (!on_trap(&otd, OT_DATA_ACCESS)) { 1637 uintptr_t tramp = otd.ot_trampoline; 1638 1639 otd.ot_trampoline = (uintptr_t)&peek_fault; 1640 err = do_peek(in_args->size, (void *)in_args->dev_addr, 1641 (void *)in_args->host_addr); 1642 otd.ot_trampoline = tramp; 1643 } else 1644 err = DDI_FAILURE; 1645 1646 no_trap(); 1647 pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED; 1648 mutex_exit(&pec_p->pec_pokefault_mutex); 1649 1650 #ifdef DEBUG 1651 if (err == DDI_FAILURE) 1652 px_peekfault_cnt++; 1653 #endif 1654 return (err); 1655 } 1656 1657 1658 static int 1659 px_lib_do_caut_get(dev_info_t *dip, peekpoke_ctlops_t *cautacc_ctlops_arg) 1660 { 1661 size_t size = cautacc_ctlops_arg->size; 1662 uintptr_t dev_addr = cautacc_ctlops_arg->dev_addr; 1663 uintptr_t host_addr = cautacc_ctlops_arg->host_addr; 1664 ddi_acc_impl_t *hp = (ddi_acc_impl_t *)cautacc_ctlops_arg->handle; 1665 size_t repcount = cautacc_ctlops_arg->repcount; 1666 uint_t flags = cautacc_ctlops_arg->flags; 1667 1668 px_t *px_p = DIP_TO_STATE(dip); 1669 px_pec_t *pec_p = px_p->px_pec_p; 1670 int err = DDI_SUCCESS; 1671 1672 /* 1673 * Note that i_ndi_busop_access_enter ends up grabbing the pokefault 1674 * mutex. 1675 */ 1676 i_ndi_busop_access_enter(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp); 1677 1678 pec_p->pec_ontrap_data = (on_trap_data_t *)hp->ahi_err->err_ontrap; 1679 pec_p->pec_safeacc_type = DDI_FM_ERR_EXPECTED; 1680 hp->ahi_err->err_expected = DDI_FM_ERR_EXPECTED; 1681 1682 if (repcount == 1) { 1683 if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) { 1684 i_ddi_caut_get(size, (void *)dev_addr, 1685 (void *)host_addr); 1686 } else { 1687 int i; 1688 uint8_t *ff_addr = (uint8_t *)host_addr; 1689 for (i = 0; i < size; i++) 1690 *ff_addr++ = 0xff; 1691 1692 err = DDI_FAILURE; 1693 #ifdef DEBUG 1694 px_peekfault_cnt++; 1695 #endif 1696 } 1697 } else { 1698 if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) { 1699 for (; repcount; repcount--) { 1700 i_ddi_caut_get(size, (void *)dev_addr, 1701 (void *)host_addr); 1702 1703 host_addr += size; 1704 1705 if (flags == DDI_DEV_AUTOINCR) 1706 dev_addr += size; 1707 } 1708 } else { 1709 err = DDI_FAILURE; 1710 #ifdef DEBUG 1711 px_peekfault_cnt++; 1712 #endif 1713 } 1714 } 1715 1716 i_ddi_notrap((ddi_acc_handle_t)hp); 1717 pec_p->pec_ontrap_data = NULL; 1718 pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED; 1719 i_ndi_busop_access_exit(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp); 1720 hp->ahi_err->err_expected = DDI_FM_ERR_UNEXPECTED; 1721 1722 return (err); 1723 } 1724 1725 /*ARGSUSED*/ 1726 int 1727 px_lib_ctlops_peek(dev_info_t *dip, dev_info_t *rdip, 1728 peekpoke_ctlops_t *in_args, void *result) 1729 { 1730 result = (void *)in_args->host_addr; 1731 return (in_args->handle ? px_lib_do_caut_get(dip, in_args) : 1732 px_lib_do_peek(dip, in_args)); 1733 } 1734 1735 /* 1736 * implements PPM interface 1737 */ 1738 int 1739 px_lib_pmctl(int cmd, px_t *px_p) 1740 { 1741 ASSERT((cmd & ~PPMREQ_MASK) == PPMREQ); 1742 switch (cmd) { 1743 case PPMREQ_PRE_PWR_OFF: 1744 /* 1745 * Currently there is no device power management for 1746 * the root complex (fire). When there is we need to make 1747 * sure that it is at full power before trying to send the 1748 * PME_Turn_Off message. 1749 */ 1750 DBG(DBG_PWR, px_p->px_dip, 1751 "ioctl: request to send PME_Turn_Off\n"); 1752 return (px_goto_l23ready(px_p)); 1753 1754 case PPMREQ_PRE_PWR_ON: 1755 DBG(DBG_PWR, px_p->px_dip, "ioctl: PRE_PWR_ON request\n"); 1756 return (px_pre_pwron_check(px_p)); 1757 1758 case PPMREQ_POST_PWR_ON: 1759 DBG(DBG_PWR, px_p->px_dip, "ioctl: POST_PWR_ON request\n"); 1760 return (px_goto_l0(px_p)); 1761 1762 default: 1763 return (DDI_FAILURE); 1764 } 1765 } 1766 1767 /* 1768 * sends PME_Turn_Off message to put the link in L2/L3 ready state. 1769 * called by px_ioctl. 1770 * returns DDI_SUCCESS or DDI_FAILURE 1771 * 1. Wait for link to be in L1 state (link status reg) 1772 * 2. write to PME_Turn_off reg to boradcast 1773 * 3. set timeout 1774 * 4. If timeout, return failure. 1775 * 5. If PM_TO_Ack, wait till link is in L2/L3 ready 1776 */ 1777 static int 1778 px_goto_l23ready(px_t *px_p) 1779 { 1780 pcie_pwr_t *pwr_p; 1781 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 1782 caddr_t csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR]; 1783 int ret = DDI_SUCCESS; 1784 clock_t end, timeleft; 1785 int mutex_held = 1; 1786 1787 /* If no PM info, return failure */ 1788 if (!PCIE_PMINFO(px_p->px_dip) || 1789 !(pwr_p = PCIE_NEXUS_PMINFO(px_p->px_dip))) 1790 return (DDI_FAILURE); 1791 1792 mutex_enter(&pwr_p->pwr_lock); 1793 mutex_enter(&px_p->px_l23ready_lock); 1794 /* Clear the PME_To_ACK receieved flag */ 1795 px_p->px_pm_flags &= ~PX_PMETOACK_RECVD; 1796 /* 1797 * When P25 is the downstream device, after receiving 1798 * PME_To_ACK, fire will go to Detect state, which causes 1799 * the link down event. Inform FMA that this is expected. 1800 * In case of all other cards complaint with the pci express 1801 * spec, this will happen when the power is re-applied. FMA 1802 * code will clear this flag after one instance of LDN. Since 1803 * there will not be a LDN event for the spec compliant cards, 1804 * we need to clear the flag after receiving PME_To_ACK. 1805 */ 1806 px_p->px_pm_flags |= PX_LDN_EXPECTED; 1807 if (px_send_pme_turnoff(csr_base) != DDI_SUCCESS) { 1808 ret = DDI_FAILURE; 1809 goto l23ready_done; 1810 } 1811 px_p->px_pm_flags |= PX_PME_TURNOFF_PENDING; 1812 1813 end = ddi_get_lbolt() + drv_usectohz(px_pme_to_ack_timeout); 1814 while (!(px_p->px_pm_flags & PX_PMETOACK_RECVD)) { 1815 timeleft = cv_timedwait(&px_p->px_l23ready_cv, 1816 &px_p->px_l23ready_lock, end); 1817 /* 1818 * if cv_timedwait returns -1, it is either 1819 * 1) timed out or 1820 * 2) there was a pre-mature wakeup but by the time 1821 * cv_timedwait is called again end < lbolt i.e. 1822 * end is in the past. 1823 * 3) By the time we make first cv_timedwait call, 1824 * end < lbolt is true. 1825 */ 1826 if (timeleft == -1) 1827 break; 1828 } 1829 if (!(px_p->px_pm_flags & PX_PMETOACK_RECVD)) { 1830 /* 1831 * Either timedout or interrupt didn't get a 1832 * chance to grab the mutex and set the flag. 1833 * release the mutex and delay for sometime. 1834 * This will 1) give a chance for interrupt to 1835 * set the flag 2) creates a delay between two 1836 * consequetive requests. 1837 */ 1838 mutex_exit(&px_p->px_l23ready_lock); 1839 delay(drv_usectohz(50 * PX_MSEC_TO_USEC)); 1840 mutex_held = 0; 1841 if (!(px_p->px_pm_flags & PX_PMETOACK_RECVD)) { 1842 ret = DDI_FAILURE; 1843 DBG(DBG_PWR, px_p->px_dip, " Timed out while waiting" 1844 " for PME_TO_ACK\n"); 1845 } 1846 } 1847 px_p->px_pm_flags &= 1848 ~(PX_PME_TURNOFF_PENDING | PX_PMETOACK_RECVD | PX_LDN_EXPECTED); 1849 1850 l23ready_done: 1851 if (mutex_held) 1852 mutex_exit(&px_p->px_l23ready_lock); 1853 /* 1854 * Wait till link is in L1 idle, if sending PME_Turn_Off 1855 * was succesful. 1856 */ 1857 if (ret == DDI_SUCCESS) { 1858 if (px_link_wait4l1idle(csr_base) != DDI_SUCCESS) { 1859 DBG(DBG_PWR, px_p->px_dip, " Link is not at L1" 1860 " even though we received PME_To_ACK.\n"); 1861 /* 1862 * Workaround for hardware bug with P25. 1863 * Due to a hardware bug with P25, link state 1864 * will be Detect state rather than L1 after 1865 * link is transitioned to L23Ready state. Since 1866 * we don't know whether link is L23ready state 1867 * without Fire's state being L1_idle, we delay 1868 * here just to make sure that we wait till link 1869 * is transitioned to L23Ready state. 1870 */ 1871 delay(drv_usectohz(100 * PX_MSEC_TO_USEC)); 1872 } 1873 pwr_p->pwr_link_lvl = PM_LEVEL_L3; 1874 1875 } 1876 mutex_exit(&pwr_p->pwr_lock); 1877 return (ret); 1878 } 1879 1880 /* 1881 * Message interrupt handler intended to be shared for both 1882 * PME and PME_TO_ACK msg handling, currently only handles 1883 * PME_To_ACK message. 1884 */ 1885 uint_t 1886 px_pmeq_intr(caddr_t arg) 1887 { 1888 px_t *px_p = (px_t *)arg; 1889 1890 DBG(DBG_PWR, px_p->px_dip, " PME_To_ACK received \n"); 1891 mutex_enter(&px_p->px_l23ready_lock); 1892 cv_broadcast(&px_p->px_l23ready_cv); 1893 if (px_p->px_pm_flags & PX_PME_TURNOFF_PENDING) { 1894 px_p->px_pm_flags |= PX_PMETOACK_RECVD; 1895 } else { 1896 /* 1897 * This maybe the second ack received. If so then, 1898 * we should be receiving it during wait4L1 stage. 1899 */ 1900 px_p->px_pmetoack_ignored++; 1901 } 1902 mutex_exit(&px_p->px_l23ready_lock); 1903 return (DDI_INTR_CLAIMED); 1904 } 1905 1906 static int 1907 px_pre_pwron_check(px_t *px_p) 1908 { 1909 pcie_pwr_t *pwr_p; 1910 1911 /* If no PM info, return failure */ 1912 if (!PCIE_PMINFO(px_p->px_dip) || 1913 !(pwr_p = PCIE_NEXUS_PMINFO(px_p->px_dip))) 1914 return (DDI_FAILURE); 1915 1916 /* 1917 * For the spec compliant downstream cards link down 1918 * is expected when the device is powered on. 1919 */ 1920 px_p->px_pm_flags |= PX_LDN_EXPECTED; 1921 return (pwr_p->pwr_link_lvl == PM_LEVEL_L3 ? DDI_SUCCESS : DDI_FAILURE); 1922 } 1923 1924 static int 1925 px_goto_l0(px_t *px_p) 1926 { 1927 pcie_pwr_t *pwr_p; 1928 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 1929 caddr_t csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR]; 1930 int ret = DDI_SUCCESS; 1931 uint64_t time_spent = 0; 1932 1933 /* If no PM info, return failure */ 1934 if (!PCIE_PMINFO(px_p->px_dip) || 1935 !(pwr_p = PCIE_NEXUS_PMINFO(px_p->px_dip))) 1936 return (DDI_FAILURE); 1937 1938 mutex_enter(&pwr_p->pwr_lock); 1939 /* 1940 * The following link retrain activity will cause LDN and LUP event. 1941 * Receiving LDN prior to receiving LUP is expected, not an error in 1942 * this case. Receiving LUP indicates link is fully up to support 1943 * powering up down stream device, and of course any further LDN and 1944 * LUP outside this context will be error. 1945 */ 1946 px_p->px_lup_pending = 1; 1947 if (px_link_retrain(csr_base) != DDI_SUCCESS) { 1948 ret = DDI_FAILURE; 1949 goto l0_done; 1950 } 1951 1952 /* LUP event takes the order of 15ms amount of time to occur */ 1953 for (; px_p->px_lup_pending && (time_spent < px_lup_poll_to); 1954 time_spent += px_lup_poll_interval) 1955 drv_usecwait(px_lup_poll_interval); 1956 if (px_p->px_lup_pending) 1957 ret = DDI_FAILURE; 1958 l0_done: 1959 px_enable_detect_quiet(csr_base); 1960 if (ret == DDI_SUCCESS) 1961 pwr_p->pwr_link_lvl = PM_LEVEL_L0; 1962 mutex_exit(&pwr_p->pwr_lock); 1963 return (ret); 1964 } 1965 1966 /* 1967 * Extract the drivers binding name to identify which chip we're binding to. 1968 * Whenever a new bus bridge is created, the driver alias entry should be 1969 * added here to identify the device if needed. If a device isn't added, 1970 * the identity defaults to PX_CHIP_UNIDENTIFIED. 1971 */ 1972 static uint32_t 1973 px_identity_init(px_t *px_p) 1974 { 1975 dev_info_t *dip = px_p->px_dip; 1976 char *name = ddi_binding_name(dip); 1977 uint32_t revision = 0; 1978 1979 revision = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1980 "module-revision#", 0); 1981 1982 /* Check for Fire driver binding name */ 1983 if (strcmp(name, "pciex108e,80f0") == 0) { 1984 DBG(DBG_ATTACH, dip, "px_identity_init: %s%d: " 1985 "(FIRE), module-revision %d\n", NAMEINST(dip), 1986 revision); 1987 1988 return ((revision >= FIRE_MOD_REV_20) ? 1989 PX_CHIP_FIRE : PX_CHIP_UNIDENTIFIED); 1990 } 1991 1992 /* Check for Oberon driver binding name */ 1993 if (strcmp(name, "pciex108e,80f8") == 0) { 1994 DBG(DBG_ATTACH, dip, "px_identity_init: %s%d: " 1995 "(OBERON), module-revision %d\n", NAMEINST(dip), 1996 revision); 1997 1998 return (PX_CHIP_OBERON); 1999 } 2000 2001 DBG(DBG_ATTACH, dip, "%s%d: Unknown PCI Express Host bridge %s %x\n", 2002 ddi_driver_name(dip), ddi_get_instance(dip), name, revision); 2003 2004 return (PX_CHIP_UNIDENTIFIED); 2005 } 2006 2007 int 2008 px_err_add_intr(px_fault_t *px_fault_p) 2009 { 2010 dev_info_t *dip = px_fault_p->px_fh_dip; 2011 px_t *px_p = DIP_TO_STATE(dip); 2012 2013 VERIFY(add_ivintr(px_fault_p->px_fh_sysino, PX_ERR_PIL, 2014 px_fault_p->px_err_func, (caddr_t)px_fault_p, NULL) == 0); 2015 2016 px_ib_intr_enable(px_p, intr_dist_cpuid(), px_fault_p->px_intr_ino); 2017 2018 return (DDI_SUCCESS); 2019 } 2020 2021 void 2022 px_err_rem_intr(px_fault_t *px_fault_p) 2023 { 2024 dev_info_t *dip = px_fault_p->px_fh_dip; 2025 px_t *px_p = DIP_TO_STATE(dip); 2026 2027 px_ib_intr_disable(px_p->px_ib_p, px_fault_p->px_intr_ino, 2028 IB_INTR_WAIT); 2029 2030 rem_ivintr(px_fault_p->px_fh_sysino, NULL); 2031 } 2032 2033 /* 2034 * px_cb_add_intr() - Called from attach(9E) to create CB if not yet 2035 * created, to add CB interrupt vector always, but enable only once. 2036 */ 2037 int 2038 px_cb_add_intr(px_fault_t *fault_p) 2039 { 2040 px_t *px_p = DIP_TO_STATE(fault_p->px_fh_dip); 2041 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 2042 px_cb_t *cb_p = (px_cb_t *)px_get_cb(fault_p->px_fh_dip); 2043 px_cb_list_t *pxl, *pxl_new; 2044 cpuid_t cpuid; 2045 2046 2047 if (cb_p == NULL) { 2048 cb_p = kmem_zalloc(sizeof (px_cb_t), KM_SLEEP); 2049 mutex_init(&cb_p->cb_mutex, NULL, MUTEX_DRIVER, NULL); 2050 cb_p->px_cb_func = px_cb_intr; 2051 pxu_p->px_cb_p = cb_p; 2052 px_set_cb(fault_p->px_fh_dip, (uint64_t)cb_p); 2053 } else 2054 pxu_p->px_cb_p = cb_p; 2055 2056 mutex_enter(&cb_p->cb_mutex); 2057 2058 VERIFY(add_ivintr(fault_p->px_fh_sysino, PX_ERR_PIL, 2059 cb_p->px_cb_func, (caddr_t)cb_p, NULL) == 0); 2060 2061 if (cb_p->pxl == NULL) { 2062 2063 cpuid = intr_dist_cpuid(), 2064 px_ib_intr_enable(px_p, cpuid, fault_p->px_intr_ino); 2065 2066 pxl = kmem_zalloc(sizeof (px_cb_list_t), KM_SLEEP); 2067 pxl->pxp = px_p; 2068 2069 cb_p->pxl = pxl; 2070 cb_p->sysino = fault_p->px_fh_sysino; 2071 cb_p->cpuid = cpuid; 2072 2073 } else { 2074 /* 2075 * Find the last pxl or 2076 * stop short at encoutering a redundent, or 2077 * both. 2078 */ 2079 pxl = cb_p->pxl; 2080 for (; !(pxl->pxp == px_p) && pxl->next; pxl = pxl->next); 2081 if (pxl->pxp == px_p) { 2082 cmn_err(CE_WARN, "px_cb_add_intr: reregister sysino " 2083 "%lx by px_p 0x%p\n", cb_p->sysino, (void *)px_p); 2084 return (DDI_FAILURE); 2085 } 2086 2087 /* add to linked list */ 2088 pxl_new = kmem_zalloc(sizeof (px_cb_list_t), KM_SLEEP); 2089 pxl_new->pxp = px_p; 2090 pxl->next = pxl_new; 2091 } 2092 cb_p->attachcnt++; 2093 2094 mutex_exit(&cb_p->cb_mutex); 2095 2096 return (DDI_SUCCESS); 2097 } 2098 2099 /* 2100 * px_cb_rem_intr() - Called from detach(9E) to remove its CB 2101 * interrupt vector, to shift proxy to the next available px, 2102 * or disable CB interrupt when itself is the last. 2103 */ 2104 void 2105 px_cb_rem_intr(px_fault_t *fault_p) 2106 { 2107 px_t *px_p = DIP_TO_STATE(fault_p->px_fh_dip), *pxp; 2108 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 2109 px_cb_t *cb_p = PX2CB(px_p); 2110 px_cb_list_t *pxl, *prev; 2111 px_fault_t *f_p; 2112 2113 ASSERT(cb_p->pxl); 2114 2115 /* De-list the target px, move the next px up */ 2116 2117 mutex_enter(&cb_p->cb_mutex); 2118 2119 pxl = cb_p->pxl; 2120 if (pxl->pxp == px_p) { 2121 cb_p->pxl = pxl->next; 2122 } else { 2123 prev = pxl; 2124 pxl = pxl->next; 2125 for (; pxl && (pxl->pxp != px_p); prev = pxl, pxl = pxl->next); 2126 if (!pxl) { 2127 cmn_err(CE_WARN, "px_cb_rem_intr: can't find px_p 0x%p " 2128 "in registered CB list.", (void *)px_p); 2129 return; 2130 } 2131 prev->next = pxl->next; 2132 } 2133 kmem_free(pxl, sizeof (px_cb_list_t)); 2134 2135 if (fault_p->px_fh_sysino == cb_p->sysino) { 2136 px_ib_intr_disable(px_p->px_ib_p, fault_p->px_intr_ino, 2137 IB_INTR_WAIT); 2138 2139 if (cb_p->pxl) { 2140 pxp = cb_p->pxl->pxp; 2141 f_p = &pxp->px_cb_fault; 2142 cb_p->sysino = f_p->px_fh_sysino; 2143 2144 PX_INTR_ENABLE(pxp->px_dip, cb_p->sysino, cb_p->cpuid); 2145 (void) px_lib_intr_setstate(pxp->px_dip, cb_p->sysino, 2146 INTR_IDLE_STATE); 2147 } 2148 } 2149 2150 rem_ivintr(fault_p->px_fh_sysino, NULL); 2151 pxu_p->px_cb_p = NULL; 2152 cb_p->attachcnt--; 2153 if (cb_p->pxl) { 2154 mutex_exit(&cb_p->cb_mutex); 2155 return; 2156 } 2157 mutex_exit(&cb_p->cb_mutex); 2158 2159 mutex_destroy(&cb_p->cb_mutex); 2160 px_set_cb(fault_p->px_fh_dip, 0ull); 2161 kmem_free(cb_p, sizeof (px_cb_t)); 2162 } 2163 2164 /* 2165 * px_cb_intr() - sun4u only, CB interrupt dispatcher 2166 */ 2167 uint_t 2168 px_cb_intr(caddr_t arg) 2169 { 2170 px_cb_t *cb_p = (px_cb_t *)arg; 2171 px_cb_list_t *pxl = cb_p->pxl; 2172 px_t *pxp = pxl ? pxl->pxp : NULL; 2173 px_fault_t *fault_p; 2174 2175 while (pxl && pxp && (pxp->px_state != PX_ATTACHED)) { 2176 pxl = pxl->next; 2177 pxp = (pxl) ? pxl->pxp : NULL; 2178 } 2179 2180 if (pxp) { 2181 fault_p = &pxp->px_cb_fault; 2182 return (fault_p->px_err_func((caddr_t)fault_p)); 2183 } else 2184 return (DDI_INTR_UNCLAIMED); 2185 } 2186 2187 /* 2188 * px_cb_intr_redist() - sun4u only, CB interrupt redistribution 2189 */ 2190 void 2191 px_cb_intr_redist(px_t *px_p) 2192 { 2193 px_fault_t *f_p = &px_p->px_cb_fault; 2194 px_cb_t *cb_p = PX2CB(px_p); 2195 devino_t ino = px_p->px_inos[PX_INTR_XBC]; 2196 cpuid_t cpuid; 2197 2198 mutex_enter(&cb_p->cb_mutex); 2199 2200 if (cb_p->sysino != f_p->px_fh_sysino) { 2201 mutex_exit(&cb_p->cb_mutex); 2202 return; 2203 } 2204 2205 cb_p->cpuid = cpuid = intr_dist_cpuid(); 2206 px_ib_intr_dist_en(px_p->px_dip, cpuid, ino, B_FALSE); 2207 2208 mutex_exit(&cb_p->cb_mutex); 2209 } 2210 2211 #ifdef FMA 2212 void 2213 px_fill_rc_status(px_fault_t *px_fault_p, pciex_rc_error_regs_t *rc_status) 2214 { 2215 /* populate the rc_status by reading the registers - TBD */ 2216 } 2217 #endif /* FMA */ 2218 2219 /* 2220 * Unprotected raw reads/writes of fabric device's config space. 2221 * Only used for temporary PCI-E Fabric Error Handling. 2222 */ 2223 uint32_t 2224 px_fab_get(px_t *px_p, pcie_req_id_t bdf, uint16_t offset) 2225 { 2226 px_ranges_t *rp = px_p->px_ranges_p; 2227 uint64_t range_prop, base_addr; 2228 int bank = PCI_REG_ADDR_G(PCI_ADDR_CONFIG); 2229 uint32_t val; 2230 2231 /* Get Fire's Physical Base Address */ 2232 range_prop = px_get_range_prop(px_p, rp, bank); 2233 2234 /* Get config space first. */ 2235 base_addr = range_prop + PX_BDF_TO_CFGADDR(bdf, offset); 2236 2237 val = ldphysio(base_addr); 2238 2239 return (LE_32(val)); 2240 } 2241 2242 void 2243 px_fab_set(px_t *px_p, pcie_req_id_t bdf, uint16_t offset, 2244 uint32_t val) { 2245 px_ranges_t *rp = px_p->px_ranges_p; 2246 uint64_t range_prop, base_addr; 2247 int bank = PCI_REG_ADDR_G(PCI_ADDR_CONFIG); 2248 2249 /* Get Fire's Physical Base Address */ 2250 range_prop = px_get_range_prop(px_p, rp, bank); 2251 2252 /* Get config space first. */ 2253 base_addr = range_prop + PX_BDF_TO_CFGADDR(bdf, offset); 2254 2255 stphysio(base_addr, LE_32(val)); 2256 } 2257 2258 /* 2259 * cpr callback 2260 * 2261 * disable fabric error msg interrupt prior to suspending 2262 * all device drivers; re-enable fabric error msg interrupt 2263 * after all devices are resumed. 2264 */ 2265 static boolean_t 2266 px_cpr_callb(void *arg, int code) 2267 { 2268 px_t *px_p = (px_t *)arg; 2269 px_ib_t *ib_p = px_p->px_ib_p; 2270 px_pec_t *pec_p = px_p->px_pec_p; 2271 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 2272 caddr_t csr_base; 2273 devino_t ce_ino, nf_ino, f_ino; 2274 px_ib_ino_info_t *ce_ino_p, *nf_ino_p, *f_ino_p; 2275 uint64_t imu_log_enable, imu_intr_enable; 2276 uint64_t imu_log_mask, imu_intr_mask; 2277 2278 ce_ino = px_msiqid_to_devino(px_p, pec_p->pec_corr_msg_msiq_id); 2279 nf_ino = px_msiqid_to_devino(px_p, pec_p->pec_non_fatal_msg_msiq_id); 2280 f_ino = px_msiqid_to_devino(px_p, pec_p->pec_fatal_msg_msiq_id); 2281 csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR]; 2282 2283 imu_log_enable = CSR_XR(csr_base, IMU_ERROR_LOG_ENABLE); 2284 imu_intr_enable = CSR_XR(csr_base, IMU_INTERRUPT_ENABLE); 2285 2286 imu_log_mask = BITMASK(IMU_ERROR_LOG_ENABLE_FATAL_MES_NOT_EN_LOG_EN) | 2287 BITMASK(IMU_ERROR_LOG_ENABLE_NONFATAL_MES_NOT_EN_LOG_EN) | 2288 BITMASK(IMU_ERROR_LOG_ENABLE_COR_MES_NOT_EN_LOG_EN); 2289 2290 imu_intr_mask = 2291 BITMASK(IMU_INTERRUPT_ENABLE_FATAL_MES_NOT_EN_S_INT_EN) | 2292 BITMASK(IMU_INTERRUPT_ENABLE_NONFATAL_MES_NOT_EN_S_INT_EN) | 2293 BITMASK(IMU_INTERRUPT_ENABLE_COR_MES_NOT_EN_S_INT_EN) | 2294 BITMASK(IMU_INTERRUPT_ENABLE_FATAL_MES_NOT_EN_P_INT_EN) | 2295 BITMASK(IMU_INTERRUPT_ENABLE_NONFATAL_MES_NOT_EN_P_INT_EN) | 2296 BITMASK(IMU_INTERRUPT_ENABLE_COR_MES_NOT_EN_P_INT_EN); 2297 2298 switch (code) { 2299 case CB_CODE_CPR_CHKPT: 2300 /* disable imu rbne on corr/nonfatal/fatal errors */ 2301 CSR_XS(csr_base, IMU_ERROR_LOG_ENABLE, 2302 imu_log_enable & (~imu_log_mask)); 2303 2304 CSR_XS(csr_base, IMU_INTERRUPT_ENABLE, 2305 imu_intr_enable & (~imu_intr_mask)); 2306 2307 /* disable CORR intr mapping */ 2308 px_ib_intr_disable(ib_p, ce_ino, IB_INTR_NOWAIT); 2309 2310 /* disable NON FATAL intr mapping */ 2311 px_ib_intr_disable(ib_p, nf_ino, IB_INTR_NOWAIT); 2312 2313 /* disable FATAL intr mapping */ 2314 px_ib_intr_disable(ib_p, f_ino, IB_INTR_NOWAIT); 2315 2316 break; 2317 2318 case CB_CODE_CPR_RESUME: 2319 mutex_enter(&ib_p->ib_ino_lst_mutex); 2320 2321 ce_ino_p = px_ib_locate_ino(ib_p, ce_ino); 2322 nf_ino_p = px_ib_locate_ino(ib_p, nf_ino); 2323 f_ino_p = px_ib_locate_ino(ib_p, f_ino); 2324 2325 /* enable CORR intr mapping */ 2326 if (ce_ino_p) 2327 px_ib_intr_enable(px_p, ce_ino_p->ino_cpuid, ce_ino); 2328 else 2329 cmn_err(CE_WARN, "px_cpr_callb: RESUME unable to " 2330 "reenable PCIe Correctable msg intr.\n"); 2331 2332 /* enable NON FATAL intr mapping */ 2333 if (nf_ino_p) 2334 px_ib_intr_enable(px_p, nf_ino_p->ino_cpuid, nf_ino); 2335 else 2336 cmn_err(CE_WARN, "px_cpr_callb: RESUME unable to " 2337 "reenable PCIe Non Fatal msg intr.\n"); 2338 2339 /* enable FATAL intr mapping */ 2340 if (f_ino_p) 2341 px_ib_intr_enable(px_p, f_ino_p->ino_cpuid, f_ino); 2342 else 2343 cmn_err(CE_WARN, "px_cpr_callb: RESUME unable to " 2344 "reenable PCIe Fatal msg intr.\n"); 2345 2346 mutex_exit(&ib_p->ib_ino_lst_mutex); 2347 2348 /* enable corr/nonfatal/fatal not enable error */ 2349 CSR_XS(csr_base, IMU_ERROR_LOG_ENABLE, (imu_log_enable | 2350 (imu_log_mask & px_imu_log_mask))); 2351 CSR_XS(csr_base, IMU_INTERRUPT_ENABLE, (imu_intr_enable | 2352 (imu_intr_mask & px_imu_intr_mask))); 2353 2354 break; 2355 } 2356 2357 return (B_TRUE); 2358 } 2359 2360 uint64_t 2361 px_get_rng_parent_hi_mask(px_t *px_p) 2362 { 2363 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 2364 uint64_t mask; 2365 2366 switch (PX_CHIP_TYPE(pxu_p)) { 2367 case PX_CHIP_OBERON: 2368 mask = OBERON_RANGE_PROP_MASK; 2369 break; 2370 case PX_CHIP_FIRE: 2371 mask = PX_RANGE_PROP_MASK; 2372 break; 2373 default: 2374 mask = PX_RANGE_PROP_MASK; 2375 } 2376 2377 return (mask); 2378 } 2379 2380 /* 2381 * fetch chip's range propery's value 2382 */ 2383 uint64_t 2384 px_get_range_prop(px_t *px_p, px_ranges_t *rp, int bank) 2385 { 2386 uint64_t mask, range_prop; 2387 2388 mask = px_get_rng_parent_hi_mask(px_p); 2389 range_prop = (((uint64_t)(rp[bank].parent_high & mask)) << 32) | 2390 rp[bank].parent_low; 2391 2392 return (range_prop); 2393 } 2394 2395 /* 2396 * add cpr callback 2397 */ 2398 void 2399 px_cpr_add_callb(px_t *px_p) 2400 { 2401 px_p->px_cprcb_id = callb_add(px_cpr_callb, (void *)px_p, 2402 CB_CL_CPR_POST_USER, "px_cpr"); 2403 } 2404 2405 /* 2406 * remove cpr callback 2407 */ 2408 void 2409 px_cpr_rem_callb(px_t *px_p) 2410 { 2411 (void) callb_delete(px_p->px_cprcb_id); 2412 } 2413 2414 /*ARGSUSED*/ 2415 static uint_t 2416 px_hp_intr(caddr_t arg1, caddr_t arg2) 2417 { 2418 px_t *px_p = (px_t *)arg1; 2419 int rval; 2420 2421 rval = pciehpc_intr(px_p->px_dip); 2422 2423 #ifdef DEBUG 2424 if (rval == DDI_INTR_UNCLAIMED) 2425 cmn_err(CE_WARN, "%s%d: UNCLAIMED intr\n", 2426 ddi_driver_name(px_p->px_dip), 2427 ddi_get_instance(px_p->px_dip)); 2428 #endif 2429 2430 return (rval); 2431 } 2432 2433 int 2434 px_lib_hotplug_init(dev_info_t *dip, void *arg) 2435 { 2436 px_t *px_p = DIP_TO_STATE(dip); 2437 uint64_t ret; 2438 2439 if ((ret = hvio_hotplug_init(dip, arg)) == DDI_SUCCESS) { 2440 sysino_t sysino; 2441 2442 if (px_lib_intr_devino_to_sysino(px_p->px_dip, 2443 px_p->px_inos[PX_INTR_HOTPLUG], &sysino) != 2444 DDI_SUCCESS) { 2445 #ifdef DEBUG 2446 cmn_err(CE_WARN, "%s%d: devino_to_sysino fails\n", 2447 ddi_driver_name(px_p->px_dip), 2448 ddi_get_instance(px_p->px_dip)); 2449 #endif 2450 return (DDI_FAILURE); 2451 } 2452 2453 VERIFY(add_ivintr(sysino, PX_PCIEHP_PIL, 2454 (intrfunc)px_hp_intr, (caddr_t)px_p, NULL) == 0); 2455 } 2456 2457 return (ret); 2458 } 2459 2460 void 2461 px_lib_hotplug_uninit(dev_info_t *dip) 2462 { 2463 if (hvio_hotplug_uninit(dip) == DDI_SUCCESS) { 2464 px_t *px_p = DIP_TO_STATE(dip); 2465 sysino_t sysino; 2466 2467 if (px_lib_intr_devino_to_sysino(px_p->px_dip, 2468 px_p->px_inos[PX_INTR_HOTPLUG], &sysino) != 2469 DDI_SUCCESS) { 2470 #ifdef DEBUG 2471 cmn_err(CE_WARN, "%s%d: devino_to_sysino fails\n", 2472 ddi_driver_name(px_p->px_dip), 2473 ddi_get_instance(px_p->px_dip)); 2474 #endif 2475 return; 2476 } 2477 2478 rem_ivintr(sysino, NULL); 2479 } 2480 } 2481 2482 boolean_t 2483 px_lib_is_in_drain_state(px_t *px_p) 2484 { 2485 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 2486 caddr_t csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR]; 2487 uint64_t drain_status; 2488 2489 if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) { 2490 drain_status = CSR_BR(csr_base, DRAIN_CONTROL_STATUS, DRAIN); 2491 } else { 2492 drain_status = CSR_BR(csr_base, TLU_STATUS, DRAIN); 2493 } 2494 2495 return (drain_status); 2496 } 2497