1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/types.h> 29 #include <sys/kmem.h> 30 #include <sys/conf.h> 31 #include <sys/ddi.h> 32 #include <sys/sunddi.h> 33 #include <sys/fm/protocol.h> 34 #include <sys/fm/util.h> 35 #include <sys/modctl.h> 36 #include <sys/disp.h> 37 #include <sys/stat.h> 38 #include <sys/ddi_impldefs.h> 39 #include <sys/vmem.h> 40 #include <sys/iommutsb.h> 41 #include <sys/cpuvar.h> 42 #include <sys/ivintr.h> 43 #include <sys/byteorder.h> 44 #include <sys/hotplug/pci/pciehpc.h> 45 #include <px_obj.h> 46 #include <pcie_pwr.h> 47 #include "px_tools_var.h" 48 #include <px_regs.h> 49 #include <px_csr.h> 50 #include <sys/machsystm.h> 51 #include "px_lib4u.h" 52 #include "px_err.h" 53 #include "oberon_regs.h" 54 55 #pragma weak jbus_stst_order 56 57 extern void jbus_stst_order(); 58 59 ulong_t px_mmu_dvma_end = 0xfffffffful; 60 uint_t px_ranges_phi_mask = 0xfffffffful; 61 uint64_t *px_oberon_ubc_scratch_regs; 62 63 static int px_goto_l23ready(px_t *px_p); 64 static int px_goto_l0(px_t *px_p); 65 static int px_pre_pwron_check(px_t *px_p); 66 static uint32_t px_identity_chip(px_t *px_p); 67 static boolean_t px_cpr_callb(void *arg, int code); 68 static uint_t px_cb_intr(caddr_t arg); 69 70 /* 71 * px_lib_map_registers 72 * 73 * This function is called from the attach routine to map the registers 74 * accessed by this driver. 75 * 76 * used by: px_attach() 77 * 78 * return value: DDI_FAILURE on failure 79 */ 80 int 81 px_lib_map_regs(pxu_t *pxu_p, dev_info_t *dip) 82 { 83 ddi_device_acc_attr_t attr; 84 px_reg_bank_t reg_bank = PX_REG_CSR; 85 86 DBG(DBG_ATTACH, dip, "px_lib_map_regs: pxu_p:0x%p, dip 0x%p\n", 87 pxu_p, dip); 88 89 attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 90 attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 91 attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC; 92 93 /* 94 * PCI CSR Base 95 */ 96 if (ddi_regs_map_setup(dip, reg_bank, &pxu_p->px_address[reg_bank], 97 0, 0, &attr, &pxu_p->px_ac[reg_bank]) != DDI_SUCCESS) { 98 goto fail; 99 } 100 101 reg_bank++; 102 103 /* 104 * XBUS CSR Base 105 */ 106 if (ddi_regs_map_setup(dip, reg_bank, &pxu_p->px_address[reg_bank], 107 0, 0, &attr, &pxu_p->px_ac[reg_bank]) != DDI_SUCCESS) { 108 goto fail; 109 } 110 111 pxu_p->px_address[reg_bank] -= FIRE_CONTROL_STATUS; 112 113 done: 114 for (; reg_bank >= PX_REG_CSR; reg_bank--) { 115 DBG(DBG_ATTACH, dip, "reg_bank 0x%x address 0x%p\n", 116 reg_bank, pxu_p->px_address[reg_bank]); 117 } 118 119 return (DDI_SUCCESS); 120 121 fail: 122 cmn_err(CE_WARN, "%s%d: unable to map reg entry %d\n", 123 ddi_driver_name(dip), ddi_get_instance(dip), reg_bank); 124 125 for (reg_bank--; reg_bank >= PX_REG_CSR; reg_bank--) { 126 pxu_p->px_address[reg_bank] = NULL; 127 ddi_regs_map_free(&pxu_p->px_ac[reg_bank]); 128 } 129 130 return (DDI_FAILURE); 131 } 132 133 /* 134 * px_lib_unmap_regs: 135 * 136 * This routine unmaps the registers mapped by map_px_registers. 137 * 138 * used by: px_detach(), and error conditions in px_attach() 139 * 140 * return value: none 141 */ 142 void 143 px_lib_unmap_regs(pxu_t *pxu_p) 144 { 145 int i; 146 147 for (i = 0; i < PX_REG_MAX; i++) { 148 if (pxu_p->px_ac[i]) 149 ddi_regs_map_free(&pxu_p->px_ac[i]); 150 } 151 } 152 153 int 154 px_lib_dev_init(dev_info_t *dip, devhandle_t *dev_hdl) 155 { 156 px_t *px_p = DIP_TO_STATE(dip); 157 caddr_t xbc_csr_base, csr_base; 158 px_dvma_range_prop_t px_dvma_range; 159 uint32_t chip_id; 160 pxu_t *pxu_p; 161 162 DBG(DBG_ATTACH, dip, "px_lib_dev_init: dip 0x%p\n", dip); 163 164 if ((chip_id = px_identity_chip(px_p)) == PX_CHIP_UNIDENTIFIED) 165 return (DDI_FAILURE); 166 167 switch (chip_id) { 168 case FIRE_VER_10: 169 cmn_err(CE_WARN, "FIRE Hardware Version 1.0 is not supported"); 170 return (DDI_FAILURE); 171 case FIRE_VER_20: 172 DBG(DBG_ATTACH, dip, "FIRE Hardware Version 2.0\n"); 173 break; 174 case OBERON_VER_10: 175 DBG(DBG_ATTACH, dip, "Oberon Hardware Version 1.0\n"); 176 break; 177 default: 178 cmn_err(CE_WARN, "%s%d: PX Hardware Version Unknown\n", 179 ddi_driver_name(dip), ddi_get_instance(dip)); 180 return (DDI_FAILURE); 181 } 182 183 /* 184 * Allocate platform specific structure and link it to 185 * the px state structure. 186 */ 187 pxu_p = kmem_zalloc(sizeof (pxu_t), KM_SLEEP); 188 pxu_p->chip_id = chip_id; 189 pxu_p->portid = ddi_getprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 190 "portid", -1); 191 192 /* Map in the registers */ 193 if (px_lib_map_regs(pxu_p, dip) == DDI_FAILURE) { 194 kmem_free(pxu_p, sizeof (pxu_t)); 195 196 return (DDI_FAILURE); 197 } 198 199 xbc_csr_base = (caddr_t)pxu_p->px_address[PX_REG_XBC]; 200 csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR]; 201 202 pxu_p->tsb_cookie = iommu_tsb_alloc(pxu_p->portid); 203 pxu_p->tsb_size = iommu_tsb_cookie_to_size(pxu_p->tsb_cookie); 204 pxu_p->tsb_vaddr = iommu_tsb_cookie_to_va(pxu_p->tsb_cookie); 205 206 pxu_p->tsb_paddr = va_to_pa(pxu_p->tsb_vaddr); 207 208 /* 209 * Create "virtual-dma" property to support child devices 210 * needing to know DVMA range. 211 */ 212 px_dvma_range.dvma_base = (uint32_t)px_mmu_dvma_end + 1 213 - ((pxu_p->tsb_size >> 3) << MMU_PAGE_SHIFT); 214 px_dvma_range.dvma_len = (uint32_t) 215 px_mmu_dvma_end - px_dvma_range.dvma_base + 1; 216 217 (void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, 218 "virtual-dma", (caddr_t)&px_dvma_range, 219 sizeof (px_dvma_range_prop_t)); 220 /* 221 * Initilize all fire hardware specific blocks. 222 */ 223 hvio_cb_init(xbc_csr_base, pxu_p); 224 hvio_ib_init(csr_base, pxu_p); 225 hvio_pec_init(csr_base, pxu_p); 226 hvio_mmu_init(csr_base, pxu_p); 227 228 px_p->px_plat_p = (void *)pxu_p; 229 230 /* 231 * Initialize all the interrupt handlers 232 */ 233 switch (PX_CHIP_TYPE(pxu_p)) { 234 case PX_CHIP_OBERON: 235 px_err_reg_enable(px_p, PX_ERR_UBC); 236 px_err_reg_enable(px_p, PX_ERR_MMU); 237 px_err_reg_enable(px_p, PX_ERR_IMU); 238 px_err_reg_enable(px_p, PX_ERR_TLU_UE); 239 px_err_reg_enable(px_p, PX_ERR_TLU_CE); 240 px_err_reg_enable(px_p, PX_ERR_TLU_OE); 241 242 /* 243 * Oberon hotplug uses SPARE3 field in ILU Error Log Enable 244 * register to indicate the status of leaf reset, 245 * we need to preserve the value of this bit, and keep it in 246 * px_ilu_log_mask to reflect the state of the bit 247 */ 248 if (CSR_BR(csr_base, ILU_ERROR_LOG_ENABLE, SPARE3)) 249 px_ilu_log_mask |= (1ull << 250 ILU_ERROR_LOG_ENABLE_SPARE3); 251 else 252 px_ilu_log_mask &= ~(1ull << 253 ILU_ERROR_LOG_ENABLE_SPARE3); 254 px_err_reg_enable(px_p, PX_ERR_ILU); 255 256 px_fabric_die_rc_ue |= PCIE_AER_UCE_UC; 257 break; 258 259 case PX_CHIP_FIRE: 260 px_err_reg_enable(px_p, PX_ERR_JBC); 261 px_err_reg_enable(px_p, PX_ERR_MMU); 262 px_err_reg_enable(px_p, PX_ERR_IMU); 263 px_err_reg_enable(px_p, PX_ERR_TLU_UE); 264 px_err_reg_enable(px_p, PX_ERR_TLU_CE); 265 px_err_reg_enable(px_p, PX_ERR_TLU_OE); 266 px_err_reg_enable(px_p, PX_ERR_ILU); 267 px_err_reg_enable(px_p, PX_ERR_LPU_LINK); 268 px_err_reg_enable(px_p, PX_ERR_LPU_PHY); 269 px_err_reg_enable(px_p, PX_ERR_LPU_RX); 270 px_err_reg_enable(px_p, PX_ERR_LPU_TX); 271 px_err_reg_enable(px_p, PX_ERR_LPU_LTSSM); 272 px_err_reg_enable(px_p, PX_ERR_LPU_GIGABLZ); 273 break; 274 default: 275 cmn_err(CE_WARN, "%s%d: PX primary bus Unknown\n", 276 ddi_driver_name(dip), ddi_get_instance(dip)); 277 return (DDI_FAILURE); 278 } 279 280 /* Initilize device handle */ 281 *dev_hdl = (devhandle_t)csr_base; 282 283 DBG(DBG_ATTACH, dip, "px_lib_dev_init: dev_hdl 0x%llx\n", *dev_hdl); 284 285 return (DDI_SUCCESS); 286 } 287 288 int 289 px_lib_dev_fini(dev_info_t *dip) 290 { 291 px_t *px_p = DIP_TO_STATE(dip); 292 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 293 294 DBG(DBG_DETACH, dip, "px_lib_dev_fini: dip 0x%p\n", dip); 295 296 /* 297 * Deinitialize all the interrupt handlers 298 */ 299 switch (PX_CHIP_TYPE(pxu_p)) { 300 case PX_CHIP_OBERON: 301 px_err_reg_disable(px_p, PX_ERR_UBC); 302 px_err_reg_disable(px_p, PX_ERR_MMU); 303 px_err_reg_disable(px_p, PX_ERR_IMU); 304 px_err_reg_disable(px_p, PX_ERR_TLU_UE); 305 px_err_reg_disable(px_p, PX_ERR_TLU_CE); 306 px_err_reg_disable(px_p, PX_ERR_TLU_OE); 307 px_err_reg_disable(px_p, PX_ERR_ILU); 308 break; 309 case PX_CHIP_FIRE: 310 px_err_reg_disable(px_p, PX_ERR_JBC); 311 px_err_reg_disable(px_p, PX_ERR_MMU); 312 px_err_reg_disable(px_p, PX_ERR_IMU); 313 px_err_reg_disable(px_p, PX_ERR_TLU_UE); 314 px_err_reg_disable(px_p, PX_ERR_TLU_CE); 315 px_err_reg_disable(px_p, PX_ERR_TLU_OE); 316 px_err_reg_disable(px_p, PX_ERR_ILU); 317 px_err_reg_disable(px_p, PX_ERR_LPU_LINK); 318 px_err_reg_disable(px_p, PX_ERR_LPU_PHY); 319 px_err_reg_disable(px_p, PX_ERR_LPU_RX); 320 px_err_reg_disable(px_p, PX_ERR_LPU_TX); 321 px_err_reg_disable(px_p, PX_ERR_LPU_LTSSM); 322 px_err_reg_disable(px_p, PX_ERR_LPU_GIGABLZ); 323 break; 324 default: 325 cmn_err(CE_WARN, "%s%d: PX primary bus Unknown\n", 326 ddi_driver_name(dip), ddi_get_instance(dip)); 327 return (DDI_FAILURE); 328 } 329 330 iommu_tsb_free(pxu_p->tsb_cookie); 331 332 px_lib_unmap_regs((pxu_t *)px_p->px_plat_p); 333 kmem_free(px_p->px_plat_p, sizeof (pxu_t)); 334 px_p->px_plat_p = NULL; 335 336 return (DDI_SUCCESS); 337 } 338 339 /*ARGSUSED*/ 340 int 341 px_lib_intr_devino_to_sysino(dev_info_t *dip, devino_t devino, 342 sysino_t *sysino) 343 { 344 px_t *px_p = DIP_TO_STATE(dip); 345 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 346 uint64_t ret; 347 348 DBG(DBG_LIB_INT, dip, "px_lib_intr_devino_to_sysino: dip 0x%p " 349 "devino 0x%x\n", dip, devino); 350 351 if ((ret = hvio_intr_devino_to_sysino(DIP_TO_HANDLE(dip), 352 pxu_p, devino, sysino)) != H_EOK) { 353 DBG(DBG_LIB_INT, dip, 354 "hvio_intr_devino_to_sysino failed, ret 0x%lx\n", ret); 355 return (DDI_FAILURE); 356 } 357 358 DBG(DBG_LIB_INT, dip, "px_lib_intr_devino_to_sysino: sysino 0x%llx\n", 359 *sysino); 360 361 return (DDI_SUCCESS); 362 } 363 364 /*ARGSUSED*/ 365 int 366 px_lib_intr_getvalid(dev_info_t *dip, sysino_t sysino, 367 intr_valid_state_t *intr_valid_state) 368 { 369 uint64_t ret; 370 371 DBG(DBG_LIB_INT, dip, "px_lib_intr_getvalid: dip 0x%p sysino 0x%llx\n", 372 dip, sysino); 373 374 if ((ret = hvio_intr_getvalid(DIP_TO_HANDLE(dip), 375 sysino, intr_valid_state)) != H_EOK) { 376 DBG(DBG_LIB_INT, dip, "hvio_intr_getvalid failed, ret 0x%lx\n", 377 ret); 378 return (DDI_FAILURE); 379 } 380 381 DBG(DBG_LIB_INT, dip, "px_lib_intr_getvalid: intr_valid_state 0x%x\n", 382 *intr_valid_state); 383 384 return (DDI_SUCCESS); 385 } 386 387 /*ARGSUSED*/ 388 int 389 px_lib_intr_setvalid(dev_info_t *dip, sysino_t sysino, 390 intr_valid_state_t intr_valid_state) 391 { 392 uint64_t ret; 393 394 DBG(DBG_LIB_INT, dip, "px_lib_intr_setvalid: dip 0x%p sysino 0x%llx " 395 "intr_valid_state 0x%x\n", dip, sysino, intr_valid_state); 396 397 if ((ret = hvio_intr_setvalid(DIP_TO_HANDLE(dip), 398 sysino, intr_valid_state)) != H_EOK) { 399 DBG(DBG_LIB_INT, dip, "hvio_intr_setvalid failed, ret 0x%lx\n", 400 ret); 401 return (DDI_FAILURE); 402 } 403 404 return (DDI_SUCCESS); 405 } 406 407 /*ARGSUSED*/ 408 int 409 px_lib_intr_getstate(dev_info_t *dip, sysino_t sysino, 410 intr_state_t *intr_state) 411 { 412 uint64_t ret; 413 414 DBG(DBG_LIB_INT, dip, "px_lib_intr_getstate: dip 0x%p sysino 0x%llx\n", 415 dip, sysino); 416 417 if ((ret = hvio_intr_getstate(DIP_TO_HANDLE(dip), 418 sysino, intr_state)) != H_EOK) { 419 DBG(DBG_LIB_INT, dip, "hvio_intr_getstate failed, ret 0x%lx\n", 420 ret); 421 return (DDI_FAILURE); 422 } 423 424 DBG(DBG_LIB_INT, dip, "px_lib_intr_getstate: intr_state 0x%x\n", 425 *intr_state); 426 427 return (DDI_SUCCESS); 428 } 429 430 /*ARGSUSED*/ 431 int 432 px_lib_intr_setstate(dev_info_t *dip, sysino_t sysino, 433 intr_state_t intr_state) 434 { 435 uint64_t ret; 436 437 DBG(DBG_LIB_INT, dip, "px_lib_intr_setstate: dip 0x%p sysino 0x%llx " 438 "intr_state 0x%x\n", dip, sysino, intr_state); 439 440 if ((ret = hvio_intr_setstate(DIP_TO_HANDLE(dip), 441 sysino, intr_state)) != H_EOK) { 442 DBG(DBG_LIB_INT, dip, "hvio_intr_setstate failed, ret 0x%lx\n", 443 ret); 444 return (DDI_FAILURE); 445 } 446 447 return (DDI_SUCCESS); 448 } 449 450 /*ARGSUSED*/ 451 int 452 px_lib_intr_gettarget(dev_info_t *dip, sysino_t sysino, cpuid_t *cpuid) 453 { 454 px_t *px_p = DIP_TO_STATE(dip); 455 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 456 uint64_t ret; 457 458 DBG(DBG_LIB_INT, dip, "px_lib_intr_gettarget: dip 0x%p sysino 0x%llx\n", 459 dip, sysino); 460 461 if ((ret = hvio_intr_gettarget(DIP_TO_HANDLE(dip), pxu_p, 462 sysino, cpuid)) != H_EOK) { 463 DBG(DBG_LIB_INT, dip, "hvio_intr_gettarget failed, ret 0x%lx\n", 464 ret); 465 return (DDI_FAILURE); 466 } 467 468 DBG(DBG_LIB_INT, dip, "px_lib_intr_gettarget: cpuid 0x%x\n", cpuid); 469 470 return (DDI_SUCCESS); 471 } 472 473 /*ARGSUSED*/ 474 int 475 px_lib_intr_settarget(dev_info_t *dip, sysino_t sysino, cpuid_t cpuid) 476 { 477 px_t *px_p = DIP_TO_STATE(dip); 478 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 479 uint64_t ret; 480 481 DBG(DBG_LIB_INT, dip, "px_lib_intr_settarget: dip 0x%p sysino 0x%llx " 482 "cpuid 0x%x\n", dip, sysino, cpuid); 483 484 if ((ret = hvio_intr_settarget(DIP_TO_HANDLE(dip), pxu_p, 485 sysino, cpuid)) != H_EOK) { 486 DBG(DBG_LIB_INT, dip, "hvio_intr_settarget failed, ret 0x%lx\n", 487 ret); 488 return (DDI_FAILURE); 489 } 490 491 return (DDI_SUCCESS); 492 } 493 494 /*ARGSUSED*/ 495 int 496 px_lib_intr_reset(dev_info_t *dip) 497 { 498 devino_t ino; 499 sysino_t sysino; 500 501 DBG(DBG_LIB_INT, dip, "px_lib_intr_reset: dip 0x%p\n", dip); 502 503 /* Reset all Interrupts */ 504 for (ino = 0; ino < INTERRUPT_MAPPING_ENTRIES; ino++) { 505 if (px_lib_intr_devino_to_sysino(dip, ino, 506 &sysino) != DDI_SUCCESS) 507 return (BF_FATAL); 508 509 if (px_lib_intr_setstate(dip, sysino, 510 INTR_IDLE_STATE) != DDI_SUCCESS) 511 return (BF_FATAL); 512 } 513 514 return (BF_NONE); 515 } 516 517 /*ARGSUSED*/ 518 int 519 px_lib_iommu_map(dev_info_t *dip, tsbid_t tsbid, pages_t pages, 520 io_attributes_t attr, void *addr, size_t pfn_index, int flags) 521 { 522 px_t *px_p = DIP_TO_STATE(dip); 523 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 524 uint64_t ret; 525 526 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_map: dip 0x%p tsbid 0x%llx " 527 "pages 0x%x attr 0x%x addr 0x%p pfn_index 0x%llx flags 0x%x\n", 528 dip, tsbid, pages, attr, addr, pfn_index, flags); 529 530 if ((ret = hvio_iommu_map(px_p->px_dev_hdl, pxu_p, tsbid, pages, 531 attr, addr, pfn_index, flags)) != H_EOK) { 532 DBG(DBG_LIB_DMA, dip, 533 "px_lib_iommu_map failed, ret 0x%lx\n", ret); 534 return (DDI_FAILURE); 535 } 536 537 return (DDI_SUCCESS); 538 } 539 540 /*ARGSUSED*/ 541 int 542 px_lib_iommu_demap(dev_info_t *dip, tsbid_t tsbid, pages_t pages) 543 { 544 px_t *px_p = DIP_TO_STATE(dip); 545 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 546 uint64_t ret; 547 548 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_demap: dip 0x%p tsbid 0x%llx " 549 "pages 0x%x\n", dip, tsbid, pages); 550 551 if ((ret = hvio_iommu_demap(px_p->px_dev_hdl, pxu_p, tsbid, pages)) 552 != H_EOK) { 553 DBG(DBG_LIB_DMA, dip, 554 "px_lib_iommu_demap failed, ret 0x%lx\n", ret); 555 556 return (DDI_FAILURE); 557 } 558 559 return (DDI_SUCCESS); 560 } 561 562 /*ARGSUSED*/ 563 int 564 px_lib_iommu_getmap(dev_info_t *dip, tsbid_t tsbid, io_attributes_t *attr_p, 565 r_addr_t *r_addr_p) 566 { 567 px_t *px_p = DIP_TO_STATE(dip); 568 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 569 uint64_t ret; 570 571 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getmap: dip 0x%p tsbid 0x%llx\n", 572 dip, tsbid); 573 574 if ((ret = hvio_iommu_getmap(DIP_TO_HANDLE(dip), pxu_p, tsbid, 575 attr_p, r_addr_p)) != H_EOK) { 576 DBG(DBG_LIB_DMA, dip, 577 "hvio_iommu_getmap failed, ret 0x%lx\n", ret); 578 579 return ((ret == H_ENOMAP) ? DDI_DMA_NOMAPPING:DDI_FAILURE); 580 } 581 582 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getmap: attr 0x%x r_addr 0x%llx\n", 583 *attr_p, *r_addr_p); 584 585 return (DDI_SUCCESS); 586 } 587 588 589 /* 590 * Checks dma attributes against system bypass ranges 591 * The bypass range is determined by the hardware. Return them so the 592 * common code can do generic checking against them. 593 */ 594 /*ARGSUSED*/ 595 int 596 px_lib_dma_bypass_rngchk(dev_info_t *dip, ddi_dma_attr_t *attr_p, 597 uint64_t *lo_p, uint64_t *hi_p) 598 { 599 px_t *px_p = DIP_TO_STATE(dip); 600 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 601 602 *lo_p = hvio_get_bypass_base(pxu_p); 603 *hi_p = hvio_get_bypass_end(pxu_p); 604 605 return (DDI_SUCCESS); 606 } 607 608 609 /*ARGSUSED*/ 610 int 611 px_lib_iommu_getbypass(dev_info_t *dip, r_addr_t ra, io_attributes_t attr, 612 io_addr_t *io_addr_p) 613 { 614 uint64_t ret; 615 px_t *px_p = DIP_TO_STATE(dip); 616 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 617 618 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getbypass: dip 0x%p ra 0x%llx " 619 "attr 0x%x\n", dip, ra, attr); 620 621 if ((ret = hvio_iommu_getbypass(DIP_TO_HANDLE(dip), pxu_p, ra, 622 attr, io_addr_p)) != H_EOK) { 623 DBG(DBG_LIB_DMA, dip, 624 "hvio_iommu_getbypass failed, ret 0x%lx\n", ret); 625 return (DDI_FAILURE); 626 } 627 628 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getbypass: io_addr 0x%llx\n", 629 *io_addr_p); 630 631 return (DDI_SUCCESS); 632 } 633 634 /* 635 * bus dma sync entry point. 636 */ 637 /*ARGSUSED*/ 638 int 639 px_lib_dma_sync(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle, 640 off_t off, size_t len, uint_t cache_flags) 641 { 642 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 643 px_t *px_p = DIP_TO_STATE(dip); 644 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 645 646 DBG(DBG_LIB_DMA, dip, "px_lib_dma_sync: dip 0x%p rdip 0x%p " 647 "handle 0x%llx off 0x%x len 0x%x flags 0x%x\n", 648 dip, rdip, handle, off, len, cache_flags); 649 650 /* 651 * No flush needed for Oberon 652 */ 653 if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) 654 return (DDI_SUCCESS); 655 656 /* 657 * jbus_stst_order is found only in certain cpu modules. 658 * Just return success if not present. 659 */ 660 if (&jbus_stst_order == NULL) 661 return (DDI_SUCCESS); 662 663 if (!(mp->dmai_flags & PX_DMAI_FLAGS_INUSE)) { 664 cmn_err(CE_WARN, "%s%d: Unbound dma handle %p.", 665 ddi_driver_name(rdip), ddi_get_instance(rdip), (void *)mp); 666 667 return (DDI_FAILURE); 668 } 669 670 if (mp->dmai_flags & PX_DMAI_FLAGS_NOSYNC) 671 return (DDI_SUCCESS); 672 673 /* 674 * No flush needed when sending data from memory to device. 675 * Nothing to do to "sync" memory to what device would already see. 676 */ 677 if (!(mp->dmai_rflags & DDI_DMA_READ) || 678 ((cache_flags & PX_DMA_SYNC_DDI_FLAGS) == DDI_DMA_SYNC_FORDEV)) 679 return (DDI_SUCCESS); 680 681 /* 682 * Perform necessary cpu workaround to ensure jbus ordering. 683 * CPU's internal "invalidate FIFOs" are flushed. 684 */ 685 686 #if !defined(lint) 687 kpreempt_disable(); 688 #endif 689 jbus_stst_order(); 690 #if !defined(lint) 691 kpreempt_enable(); 692 #endif 693 return (DDI_SUCCESS); 694 } 695 696 /* 697 * MSIQ Functions: 698 */ 699 /*ARGSUSED*/ 700 int 701 px_lib_msiq_init(dev_info_t *dip) 702 { 703 px_t *px_p = DIP_TO_STATE(dip); 704 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 705 px_msiq_state_t *msiq_state_p = &px_p->px_ib_p->ib_msiq_state; 706 caddr_t msiq_addr; 707 px_dvma_addr_t pg_index; 708 size_t size; 709 int ret; 710 711 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_init: dip 0x%p\n", dip); 712 713 /* 714 * Map the EQ memory into the Fire MMU (has to be 512KB aligned) 715 * and then initialize the base address register. 716 * 717 * Allocate entries from Fire IOMMU so that the resulting address 718 * is properly aligned. Calculate the index of the first allocated 719 * entry. Note: The size of the mapping is assumed to be a multiple 720 * of the page size. 721 */ 722 msiq_addr = (caddr_t)(((uint64_t)msiq_state_p->msiq_buf_p + 723 (MMU_PAGE_SIZE - 1)) >> MMU_PAGE_SHIFT << MMU_PAGE_SHIFT); 724 725 size = msiq_state_p->msiq_cnt * 726 msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t); 727 728 pxu_p->msiq_mapped_p = vmem_xalloc(px_p->px_mmu_p->mmu_dvma_map, 729 size, (512 * 1024), 0, 0, NULL, NULL, VM_NOSLEEP | VM_BESTFIT); 730 731 if (pxu_p->msiq_mapped_p == NULL) 732 return (DDI_FAILURE); 733 734 pg_index = MMU_PAGE_INDEX(px_p->px_mmu_p, 735 MMU_BTOP((ulong_t)pxu_p->msiq_mapped_p)); 736 737 if ((ret = px_lib_iommu_map(px_p->px_dip, PCI_TSBID(0, pg_index), 738 MMU_BTOP(size), PCI_MAP_ATTR_WRITE, (void *)msiq_addr, 0, 739 MMU_MAP_BUF)) != DDI_SUCCESS) { 740 DBG(DBG_LIB_MSIQ, dip, 741 "hvio_msiq_init failed, ret 0x%lx\n", ret); 742 743 (void) px_lib_msiq_fini(dip); 744 return (DDI_FAILURE); 745 } 746 747 (void) hvio_msiq_init(DIP_TO_HANDLE(dip), pxu_p); 748 749 return (DDI_SUCCESS); 750 } 751 752 /*ARGSUSED*/ 753 int 754 px_lib_msiq_fini(dev_info_t *dip) 755 { 756 px_t *px_p = DIP_TO_STATE(dip); 757 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 758 px_msiq_state_t *msiq_state_p = &px_p->px_ib_p->ib_msiq_state; 759 px_dvma_addr_t pg_index; 760 size_t size; 761 762 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_fini: dip 0x%p\n", dip); 763 764 /* 765 * Unmap and free the EQ memory that had been mapped 766 * into the Fire IOMMU. 767 */ 768 size = msiq_state_p->msiq_cnt * 769 msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t); 770 771 pg_index = MMU_PAGE_INDEX(px_p->px_mmu_p, 772 MMU_BTOP((ulong_t)pxu_p->msiq_mapped_p)); 773 774 (void) px_lib_iommu_demap(px_p->px_dip, 775 PCI_TSBID(0, pg_index), MMU_BTOP(size)); 776 777 /* Free the entries from the Fire MMU */ 778 vmem_xfree(px_p->px_mmu_p->mmu_dvma_map, 779 (void *)pxu_p->msiq_mapped_p, size); 780 781 return (DDI_SUCCESS); 782 } 783 784 /*ARGSUSED*/ 785 int 786 px_lib_msiq_info(dev_info_t *dip, msiqid_t msiq_id, r_addr_t *ra_p, 787 uint_t *msiq_rec_cnt_p) 788 { 789 px_t *px_p = DIP_TO_STATE(dip); 790 px_msiq_state_t *msiq_state_p = &px_p->px_ib_p->ib_msiq_state; 791 uint64_t *msiq_addr; 792 size_t msiq_size; 793 794 DBG(DBG_LIB_MSIQ, dip, "px_msiq_info: dip 0x%p msiq_id 0x%x\n", 795 dip, msiq_id); 796 797 msiq_addr = (uint64_t *)(((uint64_t)msiq_state_p->msiq_buf_p + 798 (MMU_PAGE_SIZE - 1)) >> MMU_PAGE_SHIFT << MMU_PAGE_SHIFT); 799 msiq_size = msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t); 800 ra_p = (r_addr_t *)((caddr_t)msiq_addr + (msiq_id * msiq_size)); 801 802 *msiq_rec_cnt_p = msiq_state_p->msiq_rec_cnt; 803 804 DBG(DBG_LIB_MSIQ, dip, "px_msiq_info: ra_p 0x%p msiq_rec_cnt 0x%x\n", 805 ra_p, *msiq_rec_cnt_p); 806 807 return (DDI_SUCCESS); 808 } 809 810 /*ARGSUSED*/ 811 int 812 px_lib_msiq_getvalid(dev_info_t *dip, msiqid_t msiq_id, 813 pci_msiq_valid_state_t *msiq_valid_state) 814 { 815 uint64_t ret; 816 817 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getvalid: dip 0x%p msiq_id 0x%x\n", 818 dip, msiq_id); 819 820 if ((ret = hvio_msiq_getvalid(DIP_TO_HANDLE(dip), 821 msiq_id, msiq_valid_state)) != H_EOK) { 822 DBG(DBG_LIB_MSIQ, dip, 823 "hvio_msiq_getvalid failed, ret 0x%lx\n", ret); 824 return (DDI_FAILURE); 825 } 826 827 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getvalid: msiq_valid_state 0x%x\n", 828 *msiq_valid_state); 829 830 return (DDI_SUCCESS); 831 } 832 833 /*ARGSUSED*/ 834 int 835 px_lib_msiq_setvalid(dev_info_t *dip, msiqid_t msiq_id, 836 pci_msiq_valid_state_t msiq_valid_state) 837 { 838 uint64_t ret; 839 840 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_setvalid: dip 0x%p msiq_id 0x%x " 841 "msiq_valid_state 0x%x\n", dip, msiq_id, msiq_valid_state); 842 843 if ((ret = hvio_msiq_setvalid(DIP_TO_HANDLE(dip), 844 msiq_id, msiq_valid_state)) != H_EOK) { 845 DBG(DBG_LIB_MSIQ, dip, 846 "hvio_msiq_setvalid failed, ret 0x%lx\n", ret); 847 return (DDI_FAILURE); 848 } 849 850 return (DDI_SUCCESS); 851 } 852 853 /*ARGSUSED*/ 854 int 855 px_lib_msiq_getstate(dev_info_t *dip, msiqid_t msiq_id, 856 pci_msiq_state_t *msiq_state) 857 { 858 uint64_t ret; 859 860 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getstate: dip 0x%p msiq_id 0x%x\n", 861 dip, msiq_id); 862 863 if ((ret = hvio_msiq_getstate(DIP_TO_HANDLE(dip), 864 msiq_id, msiq_state)) != H_EOK) { 865 DBG(DBG_LIB_MSIQ, dip, 866 "hvio_msiq_getstate failed, ret 0x%lx\n", ret); 867 return (DDI_FAILURE); 868 } 869 870 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getstate: msiq_state 0x%x\n", 871 *msiq_state); 872 873 return (DDI_SUCCESS); 874 } 875 876 /*ARGSUSED*/ 877 int 878 px_lib_msiq_setstate(dev_info_t *dip, msiqid_t msiq_id, 879 pci_msiq_state_t msiq_state) 880 { 881 uint64_t ret; 882 883 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_setstate: dip 0x%p msiq_id 0x%x " 884 "msiq_state 0x%x\n", dip, msiq_id, msiq_state); 885 886 if ((ret = hvio_msiq_setstate(DIP_TO_HANDLE(dip), 887 msiq_id, msiq_state)) != H_EOK) { 888 DBG(DBG_LIB_MSIQ, dip, 889 "hvio_msiq_setstate failed, ret 0x%lx\n", ret); 890 return (DDI_FAILURE); 891 } 892 893 return (DDI_SUCCESS); 894 } 895 896 /*ARGSUSED*/ 897 int 898 px_lib_msiq_gethead(dev_info_t *dip, msiqid_t msiq_id, 899 msiqhead_t *msiq_head) 900 { 901 uint64_t ret; 902 903 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gethead: dip 0x%p msiq_id 0x%x\n", 904 dip, msiq_id); 905 906 if ((ret = hvio_msiq_gethead(DIP_TO_HANDLE(dip), 907 msiq_id, msiq_head)) != H_EOK) { 908 DBG(DBG_LIB_MSIQ, dip, 909 "hvio_msiq_gethead failed, ret 0x%lx\n", ret); 910 return (DDI_FAILURE); 911 } 912 913 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gethead: msiq_head 0x%x\n", 914 *msiq_head); 915 916 return (DDI_SUCCESS); 917 } 918 919 /*ARGSUSED*/ 920 int 921 px_lib_msiq_sethead(dev_info_t *dip, msiqid_t msiq_id, 922 msiqhead_t msiq_head) 923 { 924 uint64_t ret; 925 926 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_sethead: dip 0x%p msiq_id 0x%x " 927 "msiq_head 0x%x\n", dip, msiq_id, msiq_head); 928 929 if ((ret = hvio_msiq_sethead(DIP_TO_HANDLE(dip), 930 msiq_id, msiq_head)) != H_EOK) { 931 DBG(DBG_LIB_MSIQ, dip, 932 "hvio_msiq_sethead failed, ret 0x%lx\n", ret); 933 return (DDI_FAILURE); 934 } 935 936 return (DDI_SUCCESS); 937 } 938 939 /*ARGSUSED*/ 940 int 941 px_lib_msiq_gettail(dev_info_t *dip, msiqid_t msiq_id, 942 msiqtail_t *msiq_tail) 943 { 944 uint64_t ret; 945 946 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gettail: dip 0x%p msiq_id 0x%x\n", 947 dip, msiq_id); 948 949 if ((ret = hvio_msiq_gettail(DIP_TO_HANDLE(dip), 950 msiq_id, msiq_tail)) != H_EOK) { 951 DBG(DBG_LIB_MSIQ, dip, 952 "hvio_msiq_gettail failed, ret 0x%lx\n", ret); 953 return (DDI_FAILURE); 954 } 955 956 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gettail: msiq_tail 0x%x\n", 957 *msiq_tail); 958 959 return (DDI_SUCCESS); 960 } 961 962 /*ARGSUSED*/ 963 void 964 px_lib_get_msiq_rec(dev_info_t *dip, px_msiq_t *msiq_p, msiq_rec_t *msiq_rec_p) 965 { 966 eq_rec_t *eq_rec_p = (eq_rec_t *)msiq_p->msiq_curr; 967 968 DBG(DBG_LIB_MSIQ, dip, "px_lib_get_msiq_rec: dip 0x%p eq_rec_p 0x%p\n", 969 dip, eq_rec_p); 970 971 if (!eq_rec_p->eq_rec_fmt_type) { 972 /* Set msiq_rec_type to zero */ 973 msiq_rec_p->msiq_rec_type = 0; 974 975 return; 976 } 977 978 DBG(DBG_LIB_MSIQ, dip, "px_lib_get_msiq_rec: EQ RECORD, " 979 "eq_rec_rid 0x%llx eq_rec_fmt_type 0x%llx " 980 "eq_rec_len 0x%llx eq_rec_addr0 0x%llx " 981 "eq_rec_addr1 0x%llx eq_rec_data0 0x%llx " 982 "eq_rec_data1 0x%llx\n", eq_rec_p->eq_rec_rid, 983 eq_rec_p->eq_rec_fmt_type, eq_rec_p->eq_rec_len, 984 eq_rec_p->eq_rec_addr0, eq_rec_p->eq_rec_addr1, 985 eq_rec_p->eq_rec_data0, eq_rec_p->eq_rec_data1); 986 987 /* 988 * Only upper 4 bits of eq_rec_fmt_type is used 989 * to identify the EQ record type. 990 */ 991 switch (eq_rec_p->eq_rec_fmt_type >> 3) { 992 case EQ_REC_MSI32: 993 msiq_rec_p->msiq_rec_type = MSI32_REC; 994 995 msiq_rec_p->msiq_rec_data.msi.msi_data = 996 eq_rec_p->eq_rec_data0; 997 break; 998 case EQ_REC_MSI64: 999 msiq_rec_p->msiq_rec_type = MSI64_REC; 1000 1001 msiq_rec_p->msiq_rec_data.msi.msi_data = 1002 eq_rec_p->eq_rec_data0; 1003 break; 1004 case EQ_REC_MSG: 1005 msiq_rec_p->msiq_rec_type = MSG_REC; 1006 1007 msiq_rec_p->msiq_rec_data.msg.msg_route = 1008 eq_rec_p->eq_rec_fmt_type & 7; 1009 msiq_rec_p->msiq_rec_data.msg.msg_targ = eq_rec_p->eq_rec_rid; 1010 msiq_rec_p->msiq_rec_data.msg.msg_code = eq_rec_p->eq_rec_data0; 1011 break; 1012 default: 1013 cmn_err(CE_WARN, "%s%d: px_lib_get_msiq_rec: " 1014 "0x%x is an unknown EQ record type", 1015 ddi_driver_name(dip), ddi_get_instance(dip), 1016 (int)eq_rec_p->eq_rec_fmt_type); 1017 break; 1018 } 1019 1020 msiq_rec_p->msiq_rec_rid = eq_rec_p->eq_rec_rid; 1021 msiq_rec_p->msiq_rec_msi_addr = ((eq_rec_p->eq_rec_addr1 << 16) | 1022 (eq_rec_p->eq_rec_addr0 << 2)); 1023 1024 /* Zero out eq_rec_fmt_type field */ 1025 eq_rec_p->eq_rec_fmt_type = 0; 1026 } 1027 1028 /* 1029 * MSI Functions: 1030 */ 1031 /*ARGSUSED*/ 1032 int 1033 px_lib_msi_init(dev_info_t *dip) 1034 { 1035 px_t *px_p = DIP_TO_STATE(dip); 1036 px_msi_state_t *msi_state_p = &px_p->px_ib_p->ib_msi_state; 1037 uint64_t ret; 1038 1039 DBG(DBG_LIB_MSI, dip, "px_lib_msi_init: dip 0x%p\n", dip); 1040 1041 if ((ret = hvio_msi_init(DIP_TO_HANDLE(dip), 1042 msi_state_p->msi_addr32, msi_state_p->msi_addr64)) != H_EOK) { 1043 DBG(DBG_LIB_MSIQ, dip, "px_lib_msi_init failed, ret 0x%lx\n", 1044 ret); 1045 return (DDI_FAILURE); 1046 } 1047 1048 return (DDI_SUCCESS); 1049 } 1050 1051 /*ARGSUSED*/ 1052 int 1053 px_lib_msi_getmsiq(dev_info_t *dip, msinum_t msi_num, 1054 msiqid_t *msiq_id) 1055 { 1056 uint64_t ret; 1057 1058 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getmsiq: dip 0x%p msi_num 0x%x\n", 1059 dip, msi_num); 1060 1061 if ((ret = hvio_msi_getmsiq(DIP_TO_HANDLE(dip), 1062 msi_num, msiq_id)) != H_EOK) { 1063 DBG(DBG_LIB_MSI, dip, 1064 "hvio_msi_getmsiq failed, ret 0x%lx\n", ret); 1065 return (DDI_FAILURE); 1066 } 1067 1068 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getmsiq: msiq_id 0x%x\n", 1069 *msiq_id); 1070 1071 return (DDI_SUCCESS); 1072 } 1073 1074 /*ARGSUSED*/ 1075 int 1076 px_lib_msi_setmsiq(dev_info_t *dip, msinum_t msi_num, 1077 msiqid_t msiq_id, msi_type_t msitype) 1078 { 1079 uint64_t ret; 1080 1081 DBG(DBG_LIB_MSI, dip, "px_lib_msi_setmsiq: dip 0x%p msi_num 0x%x " 1082 "msq_id 0x%x\n", dip, msi_num, msiq_id); 1083 1084 if ((ret = hvio_msi_setmsiq(DIP_TO_HANDLE(dip), 1085 msi_num, msiq_id)) != H_EOK) { 1086 DBG(DBG_LIB_MSI, dip, 1087 "hvio_msi_setmsiq failed, ret 0x%lx\n", ret); 1088 return (DDI_FAILURE); 1089 } 1090 1091 return (DDI_SUCCESS); 1092 } 1093 1094 /*ARGSUSED*/ 1095 int 1096 px_lib_msi_getvalid(dev_info_t *dip, msinum_t msi_num, 1097 pci_msi_valid_state_t *msi_valid_state) 1098 { 1099 uint64_t ret; 1100 1101 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getvalid: dip 0x%p msi_num 0x%x\n", 1102 dip, msi_num); 1103 1104 if ((ret = hvio_msi_getvalid(DIP_TO_HANDLE(dip), 1105 msi_num, msi_valid_state)) != H_EOK) { 1106 DBG(DBG_LIB_MSI, dip, 1107 "hvio_msi_getvalid failed, ret 0x%lx\n", ret); 1108 return (DDI_FAILURE); 1109 } 1110 1111 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getvalid: msiq_id 0x%x\n", 1112 *msi_valid_state); 1113 1114 return (DDI_SUCCESS); 1115 } 1116 1117 /*ARGSUSED*/ 1118 int 1119 px_lib_msi_setvalid(dev_info_t *dip, msinum_t msi_num, 1120 pci_msi_valid_state_t msi_valid_state) 1121 { 1122 uint64_t ret; 1123 1124 DBG(DBG_LIB_MSI, dip, "px_lib_msi_setvalid: dip 0x%p msi_num 0x%x " 1125 "msi_valid_state 0x%x\n", dip, msi_num, msi_valid_state); 1126 1127 if ((ret = hvio_msi_setvalid(DIP_TO_HANDLE(dip), 1128 msi_num, msi_valid_state)) != H_EOK) { 1129 DBG(DBG_LIB_MSI, dip, 1130 "hvio_msi_setvalid failed, ret 0x%lx\n", ret); 1131 return (DDI_FAILURE); 1132 } 1133 1134 return (DDI_SUCCESS); 1135 } 1136 1137 /*ARGSUSED*/ 1138 int 1139 px_lib_msi_getstate(dev_info_t *dip, msinum_t msi_num, 1140 pci_msi_state_t *msi_state) 1141 { 1142 uint64_t ret; 1143 1144 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getstate: dip 0x%p msi_num 0x%x\n", 1145 dip, msi_num); 1146 1147 if ((ret = hvio_msi_getstate(DIP_TO_HANDLE(dip), 1148 msi_num, msi_state)) != H_EOK) { 1149 DBG(DBG_LIB_MSI, dip, 1150 "hvio_msi_getstate failed, ret 0x%lx\n", ret); 1151 return (DDI_FAILURE); 1152 } 1153 1154 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getstate: msi_state 0x%x\n", 1155 *msi_state); 1156 1157 return (DDI_SUCCESS); 1158 } 1159 1160 /*ARGSUSED*/ 1161 int 1162 px_lib_msi_setstate(dev_info_t *dip, msinum_t msi_num, 1163 pci_msi_state_t msi_state) 1164 { 1165 uint64_t ret; 1166 1167 DBG(DBG_LIB_MSI, dip, "px_lib_msi_setstate: dip 0x%p msi_num 0x%x " 1168 "msi_state 0x%x\n", dip, msi_num, msi_state); 1169 1170 if ((ret = hvio_msi_setstate(DIP_TO_HANDLE(dip), 1171 msi_num, msi_state)) != H_EOK) { 1172 DBG(DBG_LIB_MSI, dip, 1173 "hvio_msi_setstate failed, ret 0x%lx\n", ret); 1174 return (DDI_FAILURE); 1175 } 1176 1177 return (DDI_SUCCESS); 1178 } 1179 1180 /* 1181 * MSG Functions: 1182 */ 1183 /*ARGSUSED*/ 1184 int 1185 px_lib_msg_getmsiq(dev_info_t *dip, pcie_msg_type_t msg_type, 1186 msiqid_t *msiq_id) 1187 { 1188 uint64_t ret; 1189 1190 DBG(DBG_LIB_MSG, dip, "px_lib_msg_getmsiq: dip 0x%p msg_type 0x%x\n", 1191 dip, msg_type); 1192 1193 if ((ret = hvio_msg_getmsiq(DIP_TO_HANDLE(dip), 1194 msg_type, msiq_id)) != H_EOK) { 1195 DBG(DBG_LIB_MSG, dip, 1196 "hvio_msg_getmsiq failed, ret 0x%lx\n", ret); 1197 return (DDI_FAILURE); 1198 } 1199 1200 DBG(DBG_LIB_MSI, dip, "px_lib_msg_getmsiq: msiq_id 0x%x\n", 1201 *msiq_id); 1202 1203 return (DDI_SUCCESS); 1204 } 1205 1206 /*ARGSUSED*/ 1207 int 1208 px_lib_msg_setmsiq(dev_info_t *dip, pcie_msg_type_t msg_type, 1209 msiqid_t msiq_id) 1210 { 1211 uint64_t ret; 1212 1213 DBG(DBG_LIB_MSG, dip, "px_lib_msi_setstate: dip 0x%p msg_type 0x%x " 1214 "msiq_id 0x%x\n", dip, msg_type, msiq_id); 1215 1216 if ((ret = hvio_msg_setmsiq(DIP_TO_HANDLE(dip), 1217 msg_type, msiq_id)) != H_EOK) { 1218 DBG(DBG_LIB_MSG, dip, 1219 "hvio_msg_setmsiq failed, ret 0x%lx\n", ret); 1220 return (DDI_FAILURE); 1221 } 1222 1223 return (DDI_SUCCESS); 1224 } 1225 1226 /*ARGSUSED*/ 1227 int 1228 px_lib_msg_getvalid(dev_info_t *dip, pcie_msg_type_t msg_type, 1229 pcie_msg_valid_state_t *msg_valid_state) 1230 { 1231 uint64_t ret; 1232 1233 DBG(DBG_LIB_MSG, dip, "px_lib_msg_getvalid: dip 0x%p msg_type 0x%x\n", 1234 dip, msg_type); 1235 1236 if ((ret = hvio_msg_getvalid(DIP_TO_HANDLE(dip), msg_type, 1237 msg_valid_state)) != H_EOK) { 1238 DBG(DBG_LIB_MSG, dip, 1239 "hvio_msg_getvalid failed, ret 0x%lx\n", ret); 1240 return (DDI_FAILURE); 1241 } 1242 1243 DBG(DBG_LIB_MSI, dip, "px_lib_msg_getvalid: msg_valid_state 0x%x\n", 1244 *msg_valid_state); 1245 1246 return (DDI_SUCCESS); 1247 } 1248 1249 /*ARGSUSED*/ 1250 int 1251 px_lib_msg_setvalid(dev_info_t *dip, pcie_msg_type_t msg_type, 1252 pcie_msg_valid_state_t msg_valid_state) 1253 { 1254 uint64_t ret; 1255 1256 DBG(DBG_LIB_MSG, dip, "px_lib_msg_setvalid: dip 0x%p msg_type 0x%x " 1257 "msg_valid_state 0x%x\n", dip, msg_type, msg_valid_state); 1258 1259 if ((ret = hvio_msg_setvalid(DIP_TO_HANDLE(dip), msg_type, 1260 msg_valid_state)) != H_EOK) { 1261 DBG(DBG_LIB_MSG, dip, 1262 "hvio_msg_setvalid failed, ret 0x%lx\n", ret); 1263 return (DDI_FAILURE); 1264 } 1265 1266 return (DDI_SUCCESS); 1267 } 1268 1269 /* 1270 * Suspend/Resume Functions: 1271 * Currently unsupported by hypervisor 1272 */ 1273 int 1274 px_lib_suspend(dev_info_t *dip) 1275 { 1276 px_t *px_p = DIP_TO_STATE(dip); 1277 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 1278 px_cb_t *cb_p = PX2CB(px_p); 1279 devhandle_t dev_hdl, xbus_dev_hdl; 1280 uint64_t ret = H_EOK; 1281 1282 DBG(DBG_DETACH, dip, "px_lib_suspend: dip 0x%p\n", dip); 1283 1284 dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_CSR]; 1285 xbus_dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_XBC]; 1286 1287 if ((ret = hvio_suspend(dev_hdl, pxu_p)) != H_EOK) 1288 goto fail; 1289 1290 if (--cb_p->attachcnt == 0) { 1291 ret = hvio_cb_suspend(xbus_dev_hdl, pxu_p); 1292 if (ret != H_EOK) 1293 cb_p->attachcnt++; 1294 } 1295 1296 fail: 1297 return ((ret != H_EOK) ? DDI_FAILURE: DDI_SUCCESS); 1298 } 1299 1300 void 1301 px_lib_resume(dev_info_t *dip) 1302 { 1303 px_t *px_p = DIP_TO_STATE(dip); 1304 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 1305 px_cb_t *cb_p = PX2CB(px_p); 1306 devhandle_t dev_hdl, xbus_dev_hdl; 1307 devino_t pec_ino = px_p->px_inos[PX_INTR_PEC]; 1308 devino_t xbc_ino = px_p->px_inos[PX_INTR_XBC]; 1309 1310 DBG(DBG_ATTACH, dip, "px_lib_resume: dip 0x%p\n", dip); 1311 1312 dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_CSR]; 1313 xbus_dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_XBC]; 1314 1315 if (++cb_p->attachcnt == 1) 1316 hvio_cb_resume(dev_hdl, xbus_dev_hdl, xbc_ino, pxu_p); 1317 1318 hvio_resume(dev_hdl, pec_ino, pxu_p); 1319 } 1320 1321 /* 1322 * Generate a unique Oberon UBC ID based on the Logicial System Board and 1323 * the IO Channel from the portid property field. 1324 */ 1325 static uint64_t 1326 oberon_get_ubc_id(dev_info_t *dip) 1327 { 1328 px_t *px_p = DIP_TO_STATE(dip); 1329 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 1330 uint64_t ubc_id; 1331 1332 /* 1333 * Generate a unique 6 bit UBC ID using the 2 IO_Channel#[1:0] bits and 1334 * the 4 LSB_ID[3:0] bits from the Oberon's portid property. 1335 */ 1336 ubc_id = (((pxu_p->portid >> OBERON_PORT_ID_IOC) & 1337 OBERON_PORT_ID_IOC_MASK) | (((pxu_p->portid >> 1338 OBERON_PORT_ID_LSB) & OBERON_PORT_ID_LSB_MASK) 1339 << OBERON_UBC_ID_LSB)); 1340 1341 return (ubc_id); 1342 } 1343 1344 /* 1345 * Oberon does not have a UBC scratch register, so alloc an array of scratch 1346 * registers when needed and use a unique UBC ID as an index. This code 1347 * can be simplified if we use a pre-allocated array. They are currently 1348 * being dynamically allocated because it's only needed by the Oberon. 1349 */ 1350 static void 1351 oberon_set_cb(dev_info_t *dip, uint64_t val) 1352 { 1353 uint64_t ubc_id; 1354 1355 if (px_oberon_ubc_scratch_regs == NULL) 1356 px_oberon_ubc_scratch_regs = 1357 (uint64_t *)kmem_zalloc(sizeof (uint64_t)* 1358 OBERON_UBC_ID_MAX, KM_SLEEP); 1359 1360 ubc_id = oberon_get_ubc_id(dip); 1361 1362 px_oberon_ubc_scratch_regs[ubc_id] = val; 1363 1364 /* 1365 * Check if any scratch registers are still in use. If all scratch 1366 * registers are currently set to zero, then deallocate the scratch 1367 * register array. 1368 */ 1369 for (ubc_id = 0; ubc_id < OBERON_UBC_ID_MAX; ubc_id++) { 1370 if (px_oberon_ubc_scratch_regs[ubc_id] != NULL) 1371 return; 1372 } 1373 1374 /* 1375 * All scratch registers are set to zero so deallocate the scratch 1376 * register array and set the pointer to NULL. 1377 */ 1378 kmem_free(px_oberon_ubc_scratch_regs, 1379 (sizeof (uint64_t)*OBERON_UBC_ID_MAX)); 1380 1381 px_oberon_ubc_scratch_regs = NULL; 1382 } 1383 1384 /* 1385 * Oberon does not have a UBC scratch register, so use an allocated array of 1386 * scratch registers and use the unique UBC ID as an index into that array. 1387 */ 1388 static uint64_t 1389 oberon_get_cb(dev_info_t *dip) 1390 { 1391 uint64_t ubc_id; 1392 1393 if (px_oberon_ubc_scratch_regs == NULL) 1394 return (0); 1395 1396 ubc_id = oberon_get_ubc_id(dip); 1397 1398 return (px_oberon_ubc_scratch_regs[ubc_id]); 1399 } 1400 1401 /* 1402 * Misc Functions: 1403 * Currently unsupported by hypervisor 1404 */ 1405 static uint64_t 1406 px_get_cb(dev_info_t *dip) 1407 { 1408 px_t *px_p = DIP_TO_STATE(dip); 1409 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 1410 1411 /* 1412 * Oberon does not currently have Scratchpad registers. 1413 */ 1414 if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) 1415 return (oberon_get_cb(dip)); 1416 1417 return (CSR_XR((caddr_t)pxu_p->px_address[PX_REG_XBC], JBUS_SCRATCH_1)); 1418 } 1419 1420 static void 1421 px_set_cb(dev_info_t *dip, uint64_t val) 1422 { 1423 px_t *px_p = DIP_TO_STATE(dip); 1424 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 1425 1426 /* 1427 * Oberon does not currently have Scratchpad registers. 1428 */ 1429 if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) { 1430 oberon_set_cb(dip, val); 1431 return; 1432 } 1433 1434 CSR_XS((caddr_t)pxu_p->px_address[PX_REG_XBC], JBUS_SCRATCH_1, val); 1435 } 1436 1437 /*ARGSUSED*/ 1438 int 1439 px_lib_map_vconfig(dev_info_t *dip, 1440 ddi_map_req_t *mp, pci_config_offset_t off, 1441 pci_regspec_t *rp, caddr_t *addrp) 1442 { 1443 /* 1444 * No special config space access services in this layer. 1445 */ 1446 return (DDI_FAILURE); 1447 } 1448 1449 void 1450 px_lib_map_attr_check(ddi_map_req_t *mp) 1451 { 1452 ddi_acc_hdl_t *hp = mp->map_handlep; 1453 1454 /* fire does not accept byte masks from PIO store merge */ 1455 if (hp->ah_acc.devacc_attr_dataorder == DDI_STORECACHING_OK_ACC) 1456 hp->ah_acc.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 1457 } 1458 1459 void 1460 px_lib_clr_errs(px_t *px_p) 1461 { 1462 px_pec_t *pec_p = px_p->px_pec_p; 1463 dev_info_t *rpdip = px_p->px_dip; 1464 int err = PX_OK, ret; 1465 int acctype = pec_p->pec_safeacc_type; 1466 ddi_fm_error_t derr; 1467 1468 /* Create the derr */ 1469 bzero(&derr, sizeof (ddi_fm_error_t)); 1470 derr.fme_version = DDI_FME_VERSION; 1471 derr.fme_ena = fm_ena_generate(0, FM_ENA_FMT1); 1472 derr.fme_flag = acctype; 1473 1474 if (acctype == DDI_FM_ERR_EXPECTED) { 1475 derr.fme_status = DDI_FM_NONFATAL; 1476 ndi_fm_acc_err_set(pec_p->pec_acc_hdl, &derr); 1477 } 1478 1479 mutex_enter(&px_p->px_fm_mutex); 1480 1481 /* send ereport/handle/clear fire registers */ 1482 err = px_err_handle(px_p, &derr, PX_LIB_CALL, B_TRUE); 1483 1484 /* Check all child devices for errors */ 1485 ret = ndi_fm_handler_dispatch(rpdip, NULL, &derr); 1486 1487 mutex_exit(&px_p->px_fm_mutex); 1488 1489 /* 1490 * PX_FATAL_HW indicates a condition recovered from Fatal-Reset, 1491 * therefore it does not cause panic. 1492 */ 1493 if ((err & (PX_FATAL_GOS | PX_FATAL_SW)) || (ret == DDI_FM_FATAL)) 1494 PX_FM_PANIC("Fatal System Port Error has occurred\n"); 1495 } 1496 1497 #ifdef DEBUG 1498 int px_peekfault_cnt = 0; 1499 int px_pokefault_cnt = 0; 1500 #endif /* DEBUG */ 1501 1502 /*ARGSUSED*/ 1503 static int 1504 px_lib_do_poke(dev_info_t *dip, dev_info_t *rdip, 1505 peekpoke_ctlops_t *in_args) 1506 { 1507 px_t *px_p = DIP_TO_STATE(dip); 1508 px_pec_t *pec_p = px_p->px_pec_p; 1509 int err = DDI_SUCCESS; 1510 on_trap_data_t otd; 1511 1512 mutex_enter(&pec_p->pec_pokefault_mutex); 1513 pec_p->pec_ontrap_data = &otd; 1514 pec_p->pec_safeacc_type = DDI_FM_ERR_POKE; 1515 1516 /* Set up protected environment. */ 1517 if (!on_trap(&otd, OT_DATA_ACCESS)) { 1518 uintptr_t tramp = otd.ot_trampoline; 1519 1520 otd.ot_trampoline = (uintptr_t)&poke_fault; 1521 err = do_poke(in_args->size, (void *)in_args->dev_addr, 1522 (void *)in_args->host_addr); 1523 otd.ot_trampoline = tramp; 1524 } else 1525 err = DDI_FAILURE; 1526 1527 px_lib_clr_errs(px_p); 1528 1529 if (otd.ot_trap & OT_DATA_ACCESS) 1530 err = DDI_FAILURE; 1531 1532 /* Take down protected environment. */ 1533 no_trap(); 1534 1535 pec_p->pec_ontrap_data = NULL; 1536 pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED; 1537 mutex_exit(&pec_p->pec_pokefault_mutex); 1538 1539 #ifdef DEBUG 1540 if (err == DDI_FAILURE) 1541 px_pokefault_cnt++; 1542 #endif 1543 return (err); 1544 } 1545 1546 /*ARGSUSED*/ 1547 static int 1548 px_lib_do_caut_put(dev_info_t *dip, dev_info_t *rdip, 1549 peekpoke_ctlops_t *cautacc_ctlops_arg) 1550 { 1551 size_t size = cautacc_ctlops_arg->size; 1552 uintptr_t dev_addr = cautacc_ctlops_arg->dev_addr; 1553 uintptr_t host_addr = cautacc_ctlops_arg->host_addr; 1554 ddi_acc_impl_t *hp = (ddi_acc_impl_t *)cautacc_ctlops_arg->handle; 1555 size_t repcount = cautacc_ctlops_arg->repcount; 1556 uint_t flags = cautacc_ctlops_arg->flags; 1557 1558 px_t *px_p = DIP_TO_STATE(dip); 1559 px_pec_t *pec_p = px_p->px_pec_p; 1560 int err = DDI_SUCCESS; 1561 1562 /* 1563 * Note that i_ndi_busop_access_enter ends up grabbing the pokefault 1564 * mutex. 1565 */ 1566 i_ndi_busop_access_enter(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp); 1567 1568 pec_p->pec_ontrap_data = (on_trap_data_t *)hp->ahi_err->err_ontrap; 1569 pec_p->pec_safeacc_type = DDI_FM_ERR_EXPECTED; 1570 hp->ahi_err->err_expected = DDI_FM_ERR_EXPECTED; 1571 1572 if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) { 1573 for (; repcount; repcount--) { 1574 switch (size) { 1575 1576 case sizeof (uint8_t): 1577 i_ddi_put8(hp, (uint8_t *)dev_addr, 1578 *(uint8_t *)host_addr); 1579 break; 1580 1581 case sizeof (uint16_t): 1582 i_ddi_put16(hp, (uint16_t *)dev_addr, 1583 *(uint16_t *)host_addr); 1584 break; 1585 1586 case sizeof (uint32_t): 1587 i_ddi_put32(hp, (uint32_t *)dev_addr, 1588 *(uint32_t *)host_addr); 1589 break; 1590 1591 case sizeof (uint64_t): 1592 i_ddi_put64(hp, (uint64_t *)dev_addr, 1593 *(uint64_t *)host_addr); 1594 break; 1595 } 1596 1597 host_addr += size; 1598 1599 if (flags == DDI_DEV_AUTOINCR) 1600 dev_addr += size; 1601 1602 px_lib_clr_errs(px_p); 1603 1604 if (pec_p->pec_ontrap_data->ot_trap & OT_DATA_ACCESS) { 1605 err = DDI_FAILURE; 1606 #ifdef DEBUG 1607 px_pokefault_cnt++; 1608 #endif 1609 break; 1610 } 1611 } 1612 } 1613 1614 i_ddi_notrap((ddi_acc_handle_t)hp); 1615 pec_p->pec_ontrap_data = NULL; 1616 pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED; 1617 i_ndi_busop_access_exit(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp); 1618 hp->ahi_err->err_expected = DDI_FM_ERR_UNEXPECTED; 1619 1620 return (err); 1621 } 1622 1623 1624 int 1625 px_lib_ctlops_poke(dev_info_t *dip, dev_info_t *rdip, 1626 peekpoke_ctlops_t *in_args) 1627 { 1628 return (in_args->handle ? px_lib_do_caut_put(dip, rdip, in_args) : 1629 px_lib_do_poke(dip, rdip, in_args)); 1630 } 1631 1632 1633 /*ARGSUSED*/ 1634 static int 1635 px_lib_do_peek(dev_info_t *dip, peekpoke_ctlops_t *in_args) 1636 { 1637 px_t *px_p = DIP_TO_STATE(dip); 1638 px_pec_t *pec_p = px_p->px_pec_p; 1639 int err = DDI_SUCCESS; 1640 on_trap_data_t otd; 1641 1642 mutex_enter(&pec_p->pec_pokefault_mutex); 1643 pec_p->pec_safeacc_type = DDI_FM_ERR_PEEK; 1644 1645 if (!on_trap(&otd, OT_DATA_ACCESS)) { 1646 uintptr_t tramp = otd.ot_trampoline; 1647 1648 otd.ot_trampoline = (uintptr_t)&peek_fault; 1649 err = do_peek(in_args->size, (void *)in_args->dev_addr, 1650 (void *)in_args->host_addr); 1651 otd.ot_trampoline = tramp; 1652 } else 1653 err = DDI_FAILURE; 1654 1655 no_trap(); 1656 pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED; 1657 mutex_exit(&pec_p->pec_pokefault_mutex); 1658 1659 #ifdef DEBUG 1660 if (err == DDI_FAILURE) 1661 px_peekfault_cnt++; 1662 #endif 1663 return (err); 1664 } 1665 1666 1667 static int 1668 px_lib_do_caut_get(dev_info_t *dip, peekpoke_ctlops_t *cautacc_ctlops_arg) 1669 { 1670 size_t size = cautacc_ctlops_arg->size; 1671 uintptr_t dev_addr = cautacc_ctlops_arg->dev_addr; 1672 uintptr_t host_addr = cautacc_ctlops_arg->host_addr; 1673 ddi_acc_impl_t *hp = (ddi_acc_impl_t *)cautacc_ctlops_arg->handle; 1674 size_t repcount = cautacc_ctlops_arg->repcount; 1675 uint_t flags = cautacc_ctlops_arg->flags; 1676 1677 px_t *px_p = DIP_TO_STATE(dip); 1678 px_pec_t *pec_p = px_p->px_pec_p; 1679 int err = DDI_SUCCESS; 1680 1681 /* 1682 * Note that i_ndi_busop_access_enter ends up grabbing the pokefault 1683 * mutex. 1684 */ 1685 i_ndi_busop_access_enter(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp); 1686 1687 pec_p->pec_ontrap_data = (on_trap_data_t *)hp->ahi_err->err_ontrap; 1688 pec_p->pec_safeacc_type = DDI_FM_ERR_EXPECTED; 1689 hp->ahi_err->err_expected = DDI_FM_ERR_EXPECTED; 1690 1691 if (repcount == 1) { 1692 if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) { 1693 i_ddi_caut_get(size, (void *)dev_addr, 1694 (void *)host_addr); 1695 } else { 1696 int i; 1697 uint8_t *ff_addr = (uint8_t *)host_addr; 1698 for (i = 0; i < size; i++) 1699 *ff_addr++ = 0xff; 1700 1701 err = DDI_FAILURE; 1702 #ifdef DEBUG 1703 px_peekfault_cnt++; 1704 #endif 1705 } 1706 } else { 1707 if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) { 1708 for (; repcount; repcount--) { 1709 i_ddi_caut_get(size, (void *)dev_addr, 1710 (void *)host_addr); 1711 1712 host_addr += size; 1713 1714 if (flags == DDI_DEV_AUTOINCR) 1715 dev_addr += size; 1716 } 1717 } else { 1718 err = DDI_FAILURE; 1719 #ifdef DEBUG 1720 px_peekfault_cnt++; 1721 #endif 1722 } 1723 } 1724 1725 i_ddi_notrap((ddi_acc_handle_t)hp); 1726 pec_p->pec_ontrap_data = NULL; 1727 pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED; 1728 i_ndi_busop_access_exit(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp); 1729 hp->ahi_err->err_expected = DDI_FM_ERR_UNEXPECTED; 1730 1731 return (err); 1732 } 1733 1734 /*ARGSUSED*/ 1735 int 1736 px_lib_ctlops_peek(dev_info_t *dip, dev_info_t *rdip, 1737 peekpoke_ctlops_t *in_args, void *result) 1738 { 1739 result = (void *)in_args->host_addr; 1740 return (in_args->handle ? px_lib_do_caut_get(dip, in_args) : 1741 px_lib_do_peek(dip, in_args)); 1742 } 1743 1744 /* 1745 * implements PPM interface 1746 */ 1747 int 1748 px_lib_pmctl(int cmd, px_t *px_p) 1749 { 1750 ASSERT((cmd & ~PPMREQ_MASK) == PPMREQ); 1751 switch (cmd) { 1752 case PPMREQ_PRE_PWR_OFF: 1753 /* 1754 * Currently there is no device power management for 1755 * the root complex (fire). When there is we need to make 1756 * sure that it is at full power before trying to send the 1757 * PME_Turn_Off message. 1758 */ 1759 DBG(DBG_PWR, px_p->px_dip, 1760 "ioctl: request to send PME_Turn_Off\n"); 1761 return (px_goto_l23ready(px_p)); 1762 1763 case PPMREQ_PRE_PWR_ON: 1764 DBG(DBG_PWR, px_p->px_dip, "ioctl: PRE_PWR_ON request\n"); 1765 return (px_pre_pwron_check(px_p)); 1766 1767 case PPMREQ_POST_PWR_ON: 1768 DBG(DBG_PWR, px_p->px_dip, "ioctl: POST_PWR_ON request\n"); 1769 return (px_goto_l0(px_p)); 1770 1771 default: 1772 return (DDI_FAILURE); 1773 } 1774 } 1775 1776 /* 1777 * sends PME_Turn_Off message to put the link in L2/L3 ready state. 1778 * called by px_ioctl. 1779 * returns DDI_SUCCESS or DDI_FAILURE 1780 * 1. Wait for link to be in L1 state (link status reg) 1781 * 2. write to PME_Turn_off reg to boradcast 1782 * 3. set timeout 1783 * 4. If timeout, return failure. 1784 * 5. If PM_TO_Ack, wait till link is in L2/L3 ready 1785 */ 1786 static int 1787 px_goto_l23ready(px_t *px_p) 1788 { 1789 pcie_pwr_t *pwr_p; 1790 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 1791 caddr_t csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR]; 1792 int ret = DDI_SUCCESS; 1793 clock_t end, timeleft; 1794 int mutex_held = 1; 1795 1796 /* If no PM info, return failure */ 1797 if (!PCIE_PMINFO(px_p->px_dip) || 1798 !(pwr_p = PCIE_NEXUS_PMINFO(px_p->px_dip))) 1799 return (DDI_FAILURE); 1800 1801 mutex_enter(&pwr_p->pwr_lock); 1802 mutex_enter(&px_p->px_l23ready_lock); 1803 /* Clear the PME_To_ACK receieved flag */ 1804 px_p->px_pm_flags &= ~PX_PMETOACK_RECVD; 1805 /* 1806 * When P25 is the downstream device, after receiving 1807 * PME_To_ACK, fire will go to Detect state, which causes 1808 * the link down event. Inform FMA that this is expected. 1809 * In case of all other cards complaint with the pci express 1810 * spec, this will happen when the power is re-applied. FMA 1811 * code will clear this flag after one instance of LDN. Since 1812 * there will not be a LDN event for the spec compliant cards, 1813 * we need to clear the flag after receiving PME_To_ACK. 1814 */ 1815 px_p->px_pm_flags |= PX_LDN_EXPECTED; 1816 if (px_send_pme_turnoff(csr_base) != DDI_SUCCESS) { 1817 ret = DDI_FAILURE; 1818 goto l23ready_done; 1819 } 1820 px_p->px_pm_flags |= PX_PME_TURNOFF_PENDING; 1821 1822 end = ddi_get_lbolt() + drv_usectohz(px_pme_to_ack_timeout); 1823 while (!(px_p->px_pm_flags & PX_PMETOACK_RECVD)) { 1824 timeleft = cv_timedwait(&px_p->px_l23ready_cv, 1825 &px_p->px_l23ready_lock, end); 1826 /* 1827 * if cv_timedwait returns -1, it is either 1828 * 1) timed out or 1829 * 2) there was a pre-mature wakeup but by the time 1830 * cv_timedwait is called again end < lbolt i.e. 1831 * end is in the past. 1832 * 3) By the time we make first cv_timedwait call, 1833 * end < lbolt is true. 1834 */ 1835 if (timeleft == -1) 1836 break; 1837 } 1838 if (!(px_p->px_pm_flags & PX_PMETOACK_RECVD)) { 1839 /* 1840 * Either timedout or interrupt didn't get a 1841 * chance to grab the mutex and set the flag. 1842 * release the mutex and delay for sometime. 1843 * This will 1) give a chance for interrupt to 1844 * set the flag 2) creates a delay between two 1845 * consequetive requests. 1846 */ 1847 mutex_exit(&px_p->px_l23ready_lock); 1848 delay(drv_usectohz(50 * PX_MSEC_TO_USEC)); 1849 mutex_held = 0; 1850 if (!(px_p->px_pm_flags & PX_PMETOACK_RECVD)) { 1851 ret = DDI_FAILURE; 1852 DBG(DBG_PWR, px_p->px_dip, " Timed out while waiting" 1853 " for PME_TO_ACK\n"); 1854 } 1855 } 1856 px_p->px_pm_flags &= 1857 ~(PX_PME_TURNOFF_PENDING | PX_PMETOACK_RECVD | PX_LDN_EXPECTED); 1858 1859 l23ready_done: 1860 if (mutex_held) 1861 mutex_exit(&px_p->px_l23ready_lock); 1862 /* 1863 * Wait till link is in L1 idle, if sending PME_Turn_Off 1864 * was succesful. 1865 */ 1866 if (ret == DDI_SUCCESS) { 1867 if (px_link_wait4l1idle(csr_base) != DDI_SUCCESS) { 1868 DBG(DBG_PWR, px_p->px_dip, " Link is not at L1" 1869 " even though we received PME_To_ACK.\n"); 1870 /* 1871 * Workaround for hardware bug with P25. 1872 * Due to a hardware bug with P25, link state 1873 * will be Detect state rather than L1 after 1874 * link is transitioned to L23Ready state. Since 1875 * we don't know whether link is L23ready state 1876 * without Fire's state being L1_idle, we delay 1877 * here just to make sure that we wait till link 1878 * is transitioned to L23Ready state. 1879 */ 1880 delay(drv_usectohz(100 * PX_MSEC_TO_USEC)); 1881 } 1882 pwr_p->pwr_link_lvl = PM_LEVEL_L3; 1883 1884 } 1885 mutex_exit(&pwr_p->pwr_lock); 1886 return (ret); 1887 } 1888 1889 /* 1890 * Message interrupt handler intended to be shared for both 1891 * PME and PME_TO_ACK msg handling, currently only handles 1892 * PME_To_ACK message. 1893 */ 1894 uint_t 1895 px_pmeq_intr(caddr_t arg) 1896 { 1897 px_t *px_p = (px_t *)arg; 1898 1899 DBG(DBG_PWR, px_p->px_dip, " PME_To_ACK received \n"); 1900 mutex_enter(&px_p->px_l23ready_lock); 1901 cv_broadcast(&px_p->px_l23ready_cv); 1902 if (px_p->px_pm_flags & PX_PME_TURNOFF_PENDING) { 1903 px_p->px_pm_flags |= PX_PMETOACK_RECVD; 1904 } else { 1905 /* 1906 * This maybe the second ack received. If so then, 1907 * we should be receiving it during wait4L1 stage. 1908 */ 1909 px_p->px_pmetoack_ignored++; 1910 } 1911 mutex_exit(&px_p->px_l23ready_lock); 1912 return (DDI_INTR_CLAIMED); 1913 } 1914 1915 static int 1916 px_pre_pwron_check(px_t *px_p) 1917 { 1918 pcie_pwr_t *pwr_p; 1919 1920 /* If no PM info, return failure */ 1921 if (!PCIE_PMINFO(px_p->px_dip) || 1922 !(pwr_p = PCIE_NEXUS_PMINFO(px_p->px_dip))) 1923 return (DDI_FAILURE); 1924 1925 /* 1926 * For the spec compliant downstream cards link down 1927 * is expected when the device is powered on. 1928 */ 1929 px_p->px_pm_flags |= PX_LDN_EXPECTED; 1930 return (pwr_p->pwr_link_lvl == PM_LEVEL_L3 ? DDI_SUCCESS : DDI_FAILURE); 1931 } 1932 1933 static int 1934 px_goto_l0(px_t *px_p) 1935 { 1936 pcie_pwr_t *pwr_p; 1937 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 1938 caddr_t csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR]; 1939 int ret = DDI_SUCCESS; 1940 uint64_t time_spent = 0; 1941 1942 /* If no PM info, return failure */ 1943 if (!PCIE_PMINFO(px_p->px_dip) || 1944 !(pwr_p = PCIE_NEXUS_PMINFO(px_p->px_dip))) 1945 return (DDI_FAILURE); 1946 1947 mutex_enter(&pwr_p->pwr_lock); 1948 /* 1949 * The following link retrain activity will cause LDN and LUP event. 1950 * Receiving LDN prior to receiving LUP is expected, not an error in 1951 * this case. Receiving LUP indicates link is fully up to support 1952 * powering up down stream device, and of course any further LDN and 1953 * LUP outside this context will be error. 1954 */ 1955 px_p->px_lup_pending = 1; 1956 if (px_link_retrain(csr_base) != DDI_SUCCESS) { 1957 ret = DDI_FAILURE; 1958 goto l0_done; 1959 } 1960 1961 /* LUP event takes the order of 15ms amount of time to occur */ 1962 for (; px_p->px_lup_pending && (time_spent < px_lup_poll_to); 1963 time_spent += px_lup_poll_interval) 1964 drv_usecwait(px_lup_poll_interval); 1965 if (px_p->px_lup_pending) 1966 ret = DDI_FAILURE; 1967 l0_done: 1968 px_enable_detect_quiet(csr_base); 1969 if (ret == DDI_SUCCESS) 1970 pwr_p->pwr_link_lvl = PM_LEVEL_L0; 1971 mutex_exit(&pwr_p->pwr_lock); 1972 return (ret); 1973 } 1974 1975 /* 1976 * Extract the drivers binding name to identify which chip we're binding to. 1977 * Whenever a new bus bridge is created, the driver alias entry should be 1978 * added here to identify the device if needed. If a device isn't added, 1979 * the identity defaults to PX_CHIP_UNIDENTIFIED. 1980 */ 1981 static uint32_t 1982 px_identity_chip(px_t *px_p) 1983 { 1984 dev_info_t *dip = px_p->px_dip; 1985 char *name = ddi_binding_name(dip); 1986 uint32_t revision = 0; 1987 1988 revision = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1989 "module-revision#", 0); 1990 1991 /* Check for Fire driver binding name */ 1992 if ((strcmp(name, "pci108e,80f0") == 0) || 1993 (strcmp(name, "pciex108e,80f0") == 0)) { 1994 DBG(DBG_ATTACH, dip, "px_identity_chip: %s%d: " 1995 "name %s module-revision %d\n", ddi_driver_name(dip), 1996 ddi_get_instance(dip), name, revision); 1997 1998 return (PX_CHIP_ID(PX_CHIP_FIRE, revision, 0x00)); 1999 } 2000 2001 /* Check for Oberon driver binding name */ 2002 if (strcmp(name, "pciex108e,80f8") == 0) { 2003 DBG(DBG_ATTACH, dip, "px_identity_chip: %s%d: " 2004 "name %s module-revision %d\n", ddi_driver_name(dip), 2005 ddi_get_instance(dip), name, revision); 2006 2007 return (PX_CHIP_ID(PX_CHIP_OBERON, revision, 0x00)); 2008 } 2009 2010 DBG(DBG_ATTACH, dip, "%s%d: Unknown PCI Express Host bridge %s %x\n", 2011 ddi_driver_name(dip), ddi_get_instance(dip), name, revision); 2012 2013 return (PX_CHIP_UNIDENTIFIED); 2014 } 2015 2016 int 2017 px_err_add_intr(px_fault_t *px_fault_p) 2018 { 2019 dev_info_t *dip = px_fault_p->px_fh_dip; 2020 px_t *px_p = DIP_TO_STATE(dip); 2021 2022 VERIFY(add_ivintr(px_fault_p->px_fh_sysino, PX_ERR_PIL, 2023 px_fault_p->px_err_func, (caddr_t)px_fault_p, NULL) == 0); 2024 2025 px_ib_intr_enable(px_p, intr_dist_cpuid(), px_fault_p->px_intr_ino); 2026 2027 return (DDI_SUCCESS); 2028 } 2029 2030 void 2031 px_err_rem_intr(px_fault_t *px_fault_p) 2032 { 2033 dev_info_t *dip = px_fault_p->px_fh_dip; 2034 px_t *px_p = DIP_TO_STATE(dip); 2035 2036 px_ib_intr_disable(px_p->px_ib_p, px_fault_p->px_intr_ino, 2037 IB_INTR_WAIT); 2038 2039 rem_ivintr(px_fault_p->px_fh_sysino, NULL); 2040 } 2041 2042 /* 2043 * px_cb_add_intr() - Called from attach(9E) to create CB if not yet 2044 * created, to add CB interrupt vector always, but enable only once. 2045 */ 2046 int 2047 px_cb_add_intr(px_fault_t *fault_p) 2048 { 2049 px_t *px_p = DIP_TO_STATE(fault_p->px_fh_dip); 2050 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 2051 px_cb_t *cb_p = (px_cb_t *)px_get_cb(fault_p->px_fh_dip); 2052 px_cb_list_t *pxl, *pxl_new; 2053 cpuid_t cpuid; 2054 2055 2056 if (cb_p == NULL) { 2057 cb_p = kmem_zalloc(sizeof (px_cb_t), KM_SLEEP); 2058 mutex_init(&cb_p->cb_mutex, NULL, MUTEX_DRIVER, NULL); 2059 cb_p->px_cb_func = px_cb_intr; 2060 pxu_p->px_cb_p = cb_p; 2061 px_set_cb(fault_p->px_fh_dip, (uint64_t)cb_p); 2062 } else 2063 pxu_p->px_cb_p = cb_p; 2064 2065 mutex_enter(&cb_p->cb_mutex); 2066 2067 VERIFY(add_ivintr(fault_p->px_fh_sysino, PX_ERR_PIL, 2068 cb_p->px_cb_func, (caddr_t)cb_p, NULL) == 0); 2069 2070 if (cb_p->pxl == NULL) { 2071 2072 cpuid = intr_dist_cpuid(), 2073 px_ib_intr_enable(px_p, cpuid, fault_p->px_intr_ino); 2074 2075 pxl = kmem_zalloc(sizeof (px_cb_list_t), KM_SLEEP); 2076 pxl->pxp = px_p; 2077 2078 cb_p->pxl = pxl; 2079 cb_p->sysino = fault_p->px_fh_sysino; 2080 cb_p->cpuid = cpuid; 2081 2082 } else { 2083 /* 2084 * Find the last pxl or 2085 * stop short at encoutering a redundent, or 2086 * both. 2087 */ 2088 pxl = cb_p->pxl; 2089 for (; !(pxl->pxp == px_p) && pxl->next; pxl = pxl->next); 2090 if (pxl->pxp == px_p) { 2091 cmn_err(CE_WARN, "px_cb_add_intr: reregister sysino " 2092 "%lx by px_p 0x%p\n", cb_p->sysino, (void *)px_p); 2093 return (DDI_FAILURE); 2094 } 2095 2096 /* add to linked list */ 2097 pxl_new = kmem_zalloc(sizeof (px_cb_list_t), KM_SLEEP); 2098 pxl_new->pxp = px_p; 2099 pxl->next = pxl_new; 2100 } 2101 cb_p->attachcnt++; 2102 2103 mutex_exit(&cb_p->cb_mutex); 2104 2105 return (DDI_SUCCESS); 2106 } 2107 2108 /* 2109 * px_cb_rem_intr() - Called from detach(9E) to remove its CB 2110 * interrupt vector, to shift proxy to the next available px, 2111 * or disable CB interrupt when itself is the last. 2112 */ 2113 void 2114 px_cb_rem_intr(px_fault_t *fault_p) 2115 { 2116 px_t *px_p = DIP_TO_STATE(fault_p->px_fh_dip), *pxp; 2117 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 2118 px_cb_t *cb_p = PX2CB(px_p); 2119 px_cb_list_t *pxl, *prev; 2120 px_fault_t *f_p; 2121 2122 ASSERT(cb_p->pxl); 2123 2124 /* De-list the target px, move the next px up */ 2125 2126 mutex_enter(&cb_p->cb_mutex); 2127 2128 pxl = cb_p->pxl; 2129 if (pxl->pxp == px_p) { 2130 cb_p->pxl = pxl->next; 2131 } else { 2132 prev = pxl; 2133 pxl = pxl->next; 2134 for (; pxl && (pxl->pxp != px_p); prev = pxl, pxl = pxl->next); 2135 if (!pxl) { 2136 cmn_err(CE_WARN, "px_cb_rem_intr: can't find px_p 0x%p " 2137 "in registered CB list.", (void *)px_p); 2138 return; 2139 } 2140 prev->next = pxl->next; 2141 } 2142 kmem_free(pxl, sizeof (px_cb_list_t)); 2143 2144 if (fault_p->px_fh_sysino == cb_p->sysino) { 2145 px_ib_intr_disable(px_p->px_ib_p, fault_p->px_intr_ino, 2146 IB_INTR_WAIT); 2147 2148 if (cb_p->pxl) { 2149 pxp = cb_p->pxl->pxp; 2150 f_p = &pxp->px_cb_fault; 2151 cb_p->sysino = f_p->px_fh_sysino; 2152 2153 PX_INTR_ENABLE(pxp->px_dip, cb_p->sysino, cb_p->cpuid); 2154 (void) px_lib_intr_setstate(pxp->px_dip, cb_p->sysino, 2155 INTR_IDLE_STATE); 2156 } 2157 } 2158 2159 rem_ivintr(fault_p->px_fh_sysino, NULL); 2160 pxu_p->px_cb_p = NULL; 2161 cb_p->attachcnt--; 2162 if (cb_p->pxl) { 2163 mutex_exit(&cb_p->cb_mutex); 2164 return; 2165 } 2166 mutex_exit(&cb_p->cb_mutex); 2167 2168 mutex_destroy(&cb_p->cb_mutex); 2169 px_set_cb(fault_p->px_fh_dip, 0ull); 2170 kmem_free(cb_p, sizeof (px_cb_t)); 2171 } 2172 2173 /* 2174 * px_cb_intr() - sun4u only, CB interrupt dispatcher 2175 */ 2176 uint_t 2177 px_cb_intr(caddr_t arg) 2178 { 2179 px_cb_t *cb_p = (px_cb_t *)arg; 2180 px_cb_list_t *pxl = cb_p->pxl; 2181 px_t *pxp = pxl ? pxl->pxp : NULL; 2182 px_fault_t *fault_p; 2183 2184 while (pxl && pxp && (pxp->px_state != PX_ATTACHED)) { 2185 pxl = pxl->next; 2186 pxp = (pxl) ? pxl->pxp : NULL; 2187 } 2188 2189 if (pxp) { 2190 fault_p = &pxp->px_cb_fault; 2191 return (fault_p->px_err_func((caddr_t)fault_p)); 2192 } else 2193 return (DDI_INTR_UNCLAIMED); 2194 } 2195 2196 /* 2197 * px_cb_intr_redist() - sun4u only, CB interrupt redistribution 2198 */ 2199 void 2200 px_cb_intr_redist(px_t *px_p) 2201 { 2202 px_fault_t *f_p = &px_p->px_cb_fault; 2203 px_cb_t *cb_p = PX2CB(px_p); 2204 devino_t ino = px_p->px_inos[PX_INTR_XBC]; 2205 cpuid_t cpuid; 2206 2207 mutex_enter(&cb_p->cb_mutex); 2208 2209 if (cb_p->sysino != f_p->px_fh_sysino) { 2210 mutex_exit(&cb_p->cb_mutex); 2211 return; 2212 } 2213 2214 cb_p->cpuid = cpuid = intr_dist_cpuid(); 2215 px_ib_intr_dist_en(px_p->px_dip, cpuid, ino, B_FALSE); 2216 2217 mutex_exit(&cb_p->cb_mutex); 2218 } 2219 2220 #ifdef FMA 2221 void 2222 px_fill_rc_status(px_fault_t *px_fault_p, pciex_rc_error_regs_t *rc_status) 2223 { 2224 /* populate the rc_status by reading the registers - TBD */ 2225 } 2226 #endif /* FMA */ 2227 2228 /* 2229 * Unprotected raw reads/writes of fabric device's config space. 2230 * Only used for temporary PCI-E Fabric Error Handling. 2231 */ 2232 uint32_t 2233 px_fab_get(px_t *px_p, pcie_req_id_t bdf, uint16_t offset) 2234 { 2235 px_ranges_t *rp = px_p->px_ranges_p; 2236 uint64_t range_prop, base_addr; 2237 int bank = PCI_REG_ADDR_G(PCI_ADDR_CONFIG); 2238 uint32_t val; 2239 2240 /* Get Fire's Physical Base Address */ 2241 range_prop = px_get_range_prop(px_p, rp, bank); 2242 2243 /* Get config space first. */ 2244 base_addr = range_prop + PX_BDF_TO_CFGADDR(bdf, offset); 2245 2246 val = ldphysio(base_addr); 2247 2248 return (LE_32(val)); 2249 } 2250 2251 void 2252 px_fab_set(px_t *px_p, pcie_req_id_t bdf, uint16_t offset, 2253 uint32_t val) { 2254 px_ranges_t *rp = px_p->px_ranges_p; 2255 uint64_t range_prop, base_addr; 2256 int bank = PCI_REG_ADDR_G(PCI_ADDR_CONFIG); 2257 2258 /* Get Fire's Physical Base Address */ 2259 range_prop = px_get_range_prop(px_p, rp, bank); 2260 2261 /* Get config space first. */ 2262 base_addr = range_prop + PX_BDF_TO_CFGADDR(bdf, offset); 2263 2264 stphysio(base_addr, LE_32(val)); 2265 } 2266 2267 /* 2268 * cpr callback 2269 * 2270 * disable fabric error msg interrupt prior to suspending 2271 * all device drivers; re-enable fabric error msg interrupt 2272 * after all devices are resumed. 2273 */ 2274 static boolean_t 2275 px_cpr_callb(void *arg, int code) 2276 { 2277 px_t *px_p = (px_t *)arg; 2278 px_ib_t *ib_p = px_p->px_ib_p; 2279 px_pec_t *pec_p = px_p->px_pec_p; 2280 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 2281 caddr_t csr_base; 2282 devino_t ce_ino, nf_ino, f_ino; 2283 px_ib_ino_info_t *ce_ino_p, *nf_ino_p, *f_ino_p; 2284 uint64_t imu_log_enable, imu_intr_enable; 2285 uint64_t imu_log_mask, imu_intr_mask; 2286 2287 ce_ino = px_msiqid_to_devino(px_p, pec_p->pec_corr_msg_msiq_id); 2288 nf_ino = px_msiqid_to_devino(px_p, pec_p->pec_non_fatal_msg_msiq_id); 2289 f_ino = px_msiqid_to_devino(px_p, pec_p->pec_fatal_msg_msiq_id); 2290 csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR]; 2291 2292 imu_log_enable = CSR_XR(csr_base, IMU_ERROR_LOG_ENABLE); 2293 imu_intr_enable = CSR_XR(csr_base, IMU_INTERRUPT_ENABLE); 2294 2295 imu_log_mask = BITMASK(IMU_ERROR_LOG_ENABLE_FATAL_MES_NOT_EN_LOG_EN) | 2296 BITMASK(IMU_ERROR_LOG_ENABLE_NONFATAL_MES_NOT_EN_LOG_EN) | 2297 BITMASK(IMU_ERROR_LOG_ENABLE_COR_MES_NOT_EN_LOG_EN); 2298 2299 imu_intr_mask = 2300 BITMASK(IMU_INTERRUPT_ENABLE_FATAL_MES_NOT_EN_S_INT_EN) | 2301 BITMASK(IMU_INTERRUPT_ENABLE_NONFATAL_MES_NOT_EN_S_INT_EN) | 2302 BITMASK(IMU_INTERRUPT_ENABLE_COR_MES_NOT_EN_S_INT_EN) | 2303 BITMASK(IMU_INTERRUPT_ENABLE_FATAL_MES_NOT_EN_P_INT_EN) | 2304 BITMASK(IMU_INTERRUPT_ENABLE_NONFATAL_MES_NOT_EN_P_INT_EN) | 2305 BITMASK(IMU_INTERRUPT_ENABLE_COR_MES_NOT_EN_P_INT_EN); 2306 2307 switch (code) { 2308 case CB_CODE_CPR_CHKPT: 2309 /* disable imu rbne on corr/nonfatal/fatal errors */ 2310 CSR_XS(csr_base, IMU_ERROR_LOG_ENABLE, 2311 imu_log_enable & (~imu_log_mask)); 2312 2313 CSR_XS(csr_base, IMU_INTERRUPT_ENABLE, 2314 imu_intr_enable & (~imu_intr_mask)); 2315 2316 /* disable CORR intr mapping */ 2317 px_ib_intr_disable(ib_p, ce_ino, IB_INTR_NOWAIT); 2318 2319 /* disable NON FATAL intr mapping */ 2320 px_ib_intr_disable(ib_p, nf_ino, IB_INTR_NOWAIT); 2321 2322 /* disable FATAL intr mapping */ 2323 px_ib_intr_disable(ib_p, f_ino, IB_INTR_NOWAIT); 2324 2325 break; 2326 2327 case CB_CODE_CPR_RESUME: 2328 mutex_enter(&ib_p->ib_ino_lst_mutex); 2329 2330 ce_ino_p = px_ib_locate_ino(ib_p, ce_ino); 2331 nf_ino_p = px_ib_locate_ino(ib_p, nf_ino); 2332 f_ino_p = px_ib_locate_ino(ib_p, f_ino); 2333 2334 /* enable CORR intr mapping */ 2335 if (ce_ino_p) 2336 px_ib_intr_enable(px_p, ce_ino_p->ino_cpuid, ce_ino); 2337 else 2338 cmn_err(CE_WARN, "px_cpr_callb: RESUME unable to " 2339 "reenable PCIe Correctable msg intr.\n"); 2340 2341 /* enable NON FATAL intr mapping */ 2342 if (nf_ino_p) 2343 px_ib_intr_enable(px_p, nf_ino_p->ino_cpuid, nf_ino); 2344 else 2345 cmn_err(CE_WARN, "px_cpr_callb: RESUME unable to " 2346 "reenable PCIe Non Fatal msg intr.\n"); 2347 2348 /* enable FATAL intr mapping */ 2349 if (f_ino_p) 2350 px_ib_intr_enable(px_p, f_ino_p->ino_cpuid, f_ino); 2351 else 2352 cmn_err(CE_WARN, "px_cpr_callb: RESUME unable to " 2353 "reenable PCIe Fatal msg intr.\n"); 2354 2355 mutex_exit(&ib_p->ib_ino_lst_mutex); 2356 2357 /* enable corr/nonfatal/fatal not enable error */ 2358 CSR_XS(csr_base, IMU_ERROR_LOG_ENABLE, (imu_log_enable | 2359 (imu_log_mask & px_imu_log_mask))); 2360 CSR_XS(csr_base, IMU_INTERRUPT_ENABLE, (imu_intr_enable | 2361 (imu_intr_mask & px_imu_intr_mask))); 2362 2363 break; 2364 } 2365 2366 return (B_TRUE); 2367 } 2368 2369 uint64_t 2370 px_get_rng_parent_hi_mask(px_t *px_p) 2371 { 2372 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 2373 uint64_t mask; 2374 2375 switch (PX_CHIP_TYPE(pxu_p)) { 2376 case PX_CHIP_OBERON: 2377 mask = OBERON_RANGE_PROP_MASK; 2378 break; 2379 case PX_CHIP_FIRE: 2380 mask = PX_RANGE_PROP_MASK; 2381 break; 2382 default: 2383 mask = PX_RANGE_PROP_MASK; 2384 } 2385 2386 return (mask); 2387 } 2388 2389 /* 2390 * fetch chip's range propery's value 2391 */ 2392 uint64_t 2393 px_get_range_prop(px_t *px_p, px_ranges_t *rp, int bank) 2394 { 2395 uint64_t mask, range_prop; 2396 2397 mask = px_get_rng_parent_hi_mask(px_p); 2398 range_prop = (((uint64_t)(rp[bank].parent_high & mask)) << 32) | 2399 rp[bank].parent_low; 2400 2401 return (range_prop); 2402 } 2403 2404 /* 2405 * add cpr callback 2406 */ 2407 void 2408 px_cpr_add_callb(px_t *px_p) 2409 { 2410 px_p->px_cprcb_id = callb_add(px_cpr_callb, (void *)px_p, 2411 CB_CL_CPR_POST_USER, "px_cpr"); 2412 } 2413 2414 /* 2415 * remove cpr callback 2416 */ 2417 void 2418 px_cpr_rem_callb(px_t *px_p) 2419 { 2420 (void) callb_delete(px_p->px_cprcb_id); 2421 } 2422 2423 /*ARGSUSED*/ 2424 static uint_t 2425 px_hp_intr(caddr_t arg1, caddr_t arg2) 2426 { 2427 px_t *px_p = (px_t *)arg1; 2428 int rval; 2429 2430 rval = pciehpc_intr(px_p->px_dip); 2431 2432 #ifdef DEBUG 2433 if (rval == DDI_INTR_UNCLAIMED) 2434 cmn_err(CE_WARN, "%s%d: UNCLAIMED intr\n", 2435 ddi_driver_name(px_p->px_dip), 2436 ddi_get_instance(px_p->px_dip)); 2437 #endif 2438 2439 return (rval); 2440 } 2441 2442 int 2443 px_lib_hotplug_init(dev_info_t *dip, void *arg) 2444 { 2445 px_t *px_p = DIP_TO_STATE(dip); 2446 uint64_t ret; 2447 2448 if ((ret = hvio_hotplug_init(dip, arg)) == DDI_SUCCESS) { 2449 sysino_t sysino; 2450 2451 if (px_lib_intr_devino_to_sysino(px_p->px_dip, 2452 px_p->px_inos[PX_INTR_HOTPLUG], &sysino) != 2453 DDI_SUCCESS) { 2454 #ifdef DEBUG 2455 cmn_err(CE_WARN, "%s%d: devino_to_sysino fails\n", 2456 ddi_driver_name(px_p->px_dip), 2457 ddi_get_instance(px_p->px_dip)); 2458 #endif 2459 return (DDI_FAILURE); 2460 } 2461 2462 VERIFY(add_ivintr(sysino, PX_PCIEHP_PIL, 2463 (intrfunc)px_hp_intr, (caddr_t)px_p, NULL) == 0); 2464 } 2465 2466 return (ret); 2467 } 2468 2469 void 2470 px_lib_hotplug_uninit(dev_info_t *dip) 2471 { 2472 if (hvio_hotplug_uninit(dip) == DDI_SUCCESS) { 2473 px_t *px_p = DIP_TO_STATE(dip); 2474 sysino_t sysino; 2475 2476 if (px_lib_intr_devino_to_sysino(px_p->px_dip, 2477 px_p->px_inos[PX_INTR_HOTPLUG], &sysino) != 2478 DDI_SUCCESS) { 2479 #ifdef DEBUG 2480 cmn_err(CE_WARN, "%s%d: devino_to_sysino fails\n", 2481 ddi_driver_name(px_p->px_dip), 2482 ddi_get_instance(px_p->px_dip)); 2483 #endif 2484 return; 2485 } 2486 2487 rem_ivintr(sysino, NULL); 2488 } 2489 } 2490