1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/types.h> 29 #include <sys/kmem.h> 30 #include <sys/conf.h> 31 #include <sys/ddi.h> 32 #include <sys/sunddi.h> 33 #include <sys/fm/protocol.h> 34 #include <sys/fm/util.h> 35 #include <sys/modctl.h> 36 #include <sys/disp.h> 37 #include <sys/stat.h> 38 #include <sys/ddi_impldefs.h> 39 #include <sys/vmem.h> 40 #include <sys/iommutsb.h> 41 #include <sys/cpuvar.h> 42 #include <sys/ivintr.h> 43 #include <sys/byteorder.h> 44 #include <sys/hotplug/pci/pciehpc.h> 45 #include <px_obj.h> 46 #include <pcie_pwr.h> 47 #include "px_tools_var.h" 48 #include <px_regs.h> 49 #include <px_csr.h> 50 #include <sys/machsystm.h> 51 #include "px_lib4u.h" 52 #include "px_err.h" 53 #include "oberon_regs.h" 54 55 #pragma weak jbus_stst_order 56 57 extern void jbus_stst_order(); 58 59 ulong_t px_mmu_dvma_end = 0xfffffffful; 60 uint_t px_ranges_phi_mask = 0xfffffffful; 61 uint64_t *px_oberon_ubc_scratch_regs; 62 uint64_t px_paddr_mask; 63 64 static int px_goto_l23ready(px_t *px_p); 65 static int px_goto_l0(px_t *px_p); 66 static int px_pre_pwron_check(px_t *px_p); 67 static uint32_t px_identity_init(px_t *px_p); 68 static boolean_t px_cpr_callb(void *arg, int code); 69 static uint_t px_cb_intr(caddr_t arg); 70 71 /* 72 * px_lib_map_registers 73 * 74 * This function is called from the attach routine to map the registers 75 * accessed by this driver. 76 * 77 * used by: px_attach() 78 * 79 * return value: DDI_FAILURE on failure 80 */ 81 int 82 px_lib_map_regs(pxu_t *pxu_p, dev_info_t *dip) 83 { 84 ddi_device_acc_attr_t attr; 85 px_reg_bank_t reg_bank = PX_REG_CSR; 86 87 DBG(DBG_ATTACH, dip, "px_lib_map_regs: pxu_p:0x%p, dip 0x%p\n", 88 pxu_p, dip); 89 90 attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 91 attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 92 attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC; 93 94 /* 95 * PCI CSR Base 96 */ 97 if (ddi_regs_map_setup(dip, reg_bank, &pxu_p->px_address[reg_bank], 98 0, 0, &attr, &pxu_p->px_ac[reg_bank]) != DDI_SUCCESS) { 99 goto fail; 100 } 101 102 reg_bank++; 103 104 /* 105 * XBUS CSR Base 106 */ 107 if (ddi_regs_map_setup(dip, reg_bank, &pxu_p->px_address[reg_bank], 108 0, 0, &attr, &pxu_p->px_ac[reg_bank]) != DDI_SUCCESS) { 109 goto fail; 110 } 111 112 pxu_p->px_address[reg_bank] -= FIRE_CONTROL_STATUS; 113 114 done: 115 for (; reg_bank >= PX_REG_CSR; reg_bank--) { 116 DBG(DBG_ATTACH, dip, "reg_bank 0x%x address 0x%p\n", 117 reg_bank, pxu_p->px_address[reg_bank]); 118 } 119 120 return (DDI_SUCCESS); 121 122 fail: 123 cmn_err(CE_WARN, "%s%d: unable to map reg entry %d\n", 124 ddi_driver_name(dip), ddi_get_instance(dip), reg_bank); 125 126 for (reg_bank--; reg_bank >= PX_REG_CSR; reg_bank--) { 127 pxu_p->px_address[reg_bank] = NULL; 128 ddi_regs_map_free(&pxu_p->px_ac[reg_bank]); 129 } 130 131 return (DDI_FAILURE); 132 } 133 134 /* 135 * px_lib_unmap_regs: 136 * 137 * This routine unmaps the registers mapped by map_px_registers. 138 * 139 * used by: px_detach(), and error conditions in px_attach() 140 * 141 * return value: none 142 */ 143 void 144 px_lib_unmap_regs(pxu_t *pxu_p) 145 { 146 int i; 147 148 for (i = 0; i < PX_REG_MAX; i++) { 149 if (pxu_p->px_ac[i]) 150 ddi_regs_map_free(&pxu_p->px_ac[i]); 151 } 152 } 153 154 int 155 px_lib_dev_init(dev_info_t *dip, devhandle_t *dev_hdl) 156 { 157 158 caddr_t xbc_csr_base, csr_base; 159 px_dvma_range_prop_t px_dvma_range; 160 pxu_t *pxu_p; 161 uint8_t chip_mask; 162 px_t *px_p = DIP_TO_STATE(dip); 163 px_chip_type_t chip_type = px_identity_init(px_p); 164 165 DBG(DBG_ATTACH, dip, "px_lib_dev_init: dip 0x%p", dip); 166 167 if (chip_type == PX_CHIP_UNIDENTIFIED) { 168 cmn_err(CE_WARN, "%s%d: Unrecognized Hardware Version\n", 169 NAMEINST(dip)); 170 return (DDI_FAILURE); 171 } 172 173 chip_mask = BITMASK(chip_type); 174 px_paddr_mask = (chip_type == PX_CHIP_FIRE) ? MMU_FIRE_PADDR_MASK : 175 MMU_OBERON_PADDR_MASK; 176 177 /* 178 * Allocate platform specific structure and link it to 179 * the px state structure. 180 */ 181 pxu_p = kmem_zalloc(sizeof (pxu_t), KM_SLEEP); 182 pxu_p->chip_type = chip_type; 183 pxu_p->portid = ddi_getprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 184 "portid", -1); 185 186 /* Map in the registers */ 187 if (px_lib_map_regs(pxu_p, dip) == DDI_FAILURE) { 188 kmem_free(pxu_p, sizeof (pxu_t)); 189 190 return (DDI_FAILURE); 191 } 192 193 xbc_csr_base = (caddr_t)pxu_p->px_address[PX_REG_XBC]; 194 csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR]; 195 196 pxu_p->tsb_cookie = iommu_tsb_alloc(pxu_p->portid); 197 pxu_p->tsb_size = iommu_tsb_cookie_to_size(pxu_p->tsb_cookie); 198 pxu_p->tsb_vaddr = iommu_tsb_cookie_to_va(pxu_p->tsb_cookie); 199 200 pxu_p->tsb_paddr = va_to_pa(pxu_p->tsb_vaddr); 201 202 /* 203 * Create "virtual-dma" property to support child devices 204 * needing to know DVMA range. 205 */ 206 px_dvma_range.dvma_base = (uint32_t)px_mmu_dvma_end + 1 207 - ((pxu_p->tsb_size >> 3) << MMU_PAGE_SHIFT); 208 px_dvma_range.dvma_len = (uint32_t) 209 px_mmu_dvma_end - px_dvma_range.dvma_base + 1; 210 211 (void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, 212 "virtual-dma", (caddr_t)&px_dvma_range, 213 sizeof (px_dvma_range_prop_t)); 214 /* 215 * Initilize all fire hardware specific blocks. 216 */ 217 hvio_cb_init(xbc_csr_base, pxu_p); 218 hvio_ib_init(csr_base, pxu_p); 219 hvio_pec_init(csr_base, pxu_p); 220 hvio_mmu_init(csr_base, pxu_p); 221 222 px_p->px_plat_p = (void *)pxu_p; 223 224 /* 225 * Initialize all the interrupt handlers 226 */ 227 switch (PX_CHIP_TYPE(pxu_p)) { 228 case PX_CHIP_OBERON: 229 /* 230 * Oberon hotplug uses SPARE3 field in ILU Error Log Enable 231 * register to indicate the status of leaf reset, 232 * we need to preserve the value of this bit, and keep it in 233 * px_ilu_log_mask to reflect the state of the bit 234 */ 235 if (CSR_BR(csr_base, ILU_ERROR_LOG_ENABLE, SPARE3)) 236 px_ilu_log_mask |= (1ull << 237 ILU_ERROR_LOG_ENABLE_SPARE3); 238 else 239 px_ilu_log_mask &= ~(1ull << 240 ILU_ERROR_LOG_ENABLE_SPARE3); 241 242 px_err_reg_setup_pcie(chip_mask, csr_base, PX_ERR_ENABLE); 243 px_fabric_die_rc_ue |= PCIE_AER_UCE_UC; 244 break; 245 246 case PX_CHIP_FIRE: 247 px_err_reg_setup_pcie(chip_mask, csr_base, PX_ERR_ENABLE); 248 break; 249 250 default: 251 cmn_err(CE_WARN, "%s%d: PX primary bus Unknown\n", 252 ddi_driver_name(dip), ddi_get_instance(dip)); 253 return (DDI_FAILURE); 254 } 255 256 /* Initilize device handle */ 257 *dev_hdl = (devhandle_t)csr_base; 258 259 DBG(DBG_ATTACH, dip, "px_lib_dev_init: dev_hdl 0x%llx\n", *dev_hdl); 260 261 return (DDI_SUCCESS); 262 } 263 264 int 265 px_lib_dev_fini(dev_info_t *dip) 266 { 267 caddr_t csr_base; 268 uint8_t chip_mask; 269 px_t *px_p = DIP_TO_STATE(dip); 270 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 271 272 DBG(DBG_DETACH, dip, "px_lib_dev_fini: dip 0x%p\n", dip); 273 274 /* 275 * Deinitialize all the interrupt handlers 276 */ 277 switch (PX_CHIP_TYPE(pxu_p)) { 278 case PX_CHIP_OBERON: 279 case PX_CHIP_FIRE: 280 chip_mask = BITMASK(PX_CHIP_TYPE(pxu_p)); 281 csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR]; 282 px_err_reg_setup_pcie(chip_mask, csr_base, PX_ERR_DISABLE); 283 break; 284 285 default: 286 cmn_err(CE_WARN, "%s%d: PX primary bus Unknown\n", 287 ddi_driver_name(dip), ddi_get_instance(dip)); 288 return (DDI_FAILURE); 289 } 290 291 iommu_tsb_free(pxu_p->tsb_cookie); 292 293 px_lib_unmap_regs((pxu_t *)px_p->px_plat_p); 294 kmem_free(px_p->px_plat_p, sizeof (pxu_t)); 295 px_p->px_plat_p = NULL; 296 297 return (DDI_SUCCESS); 298 } 299 300 /*ARGSUSED*/ 301 int 302 px_lib_intr_devino_to_sysino(dev_info_t *dip, devino_t devino, 303 sysino_t *sysino) 304 { 305 px_t *px_p = DIP_TO_STATE(dip); 306 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 307 uint64_t ret; 308 309 DBG(DBG_LIB_INT, dip, "px_lib_intr_devino_to_sysino: dip 0x%p " 310 "devino 0x%x\n", dip, devino); 311 312 if ((ret = hvio_intr_devino_to_sysino(DIP_TO_HANDLE(dip), 313 pxu_p, devino, sysino)) != H_EOK) { 314 DBG(DBG_LIB_INT, dip, 315 "hvio_intr_devino_to_sysino failed, ret 0x%lx\n", ret); 316 return (DDI_FAILURE); 317 } 318 319 DBG(DBG_LIB_INT, dip, "px_lib_intr_devino_to_sysino: sysino 0x%llx\n", 320 *sysino); 321 322 return (DDI_SUCCESS); 323 } 324 325 /*ARGSUSED*/ 326 int 327 px_lib_intr_getvalid(dev_info_t *dip, sysino_t sysino, 328 intr_valid_state_t *intr_valid_state) 329 { 330 uint64_t ret; 331 332 DBG(DBG_LIB_INT, dip, "px_lib_intr_getvalid: dip 0x%p sysino 0x%llx\n", 333 dip, sysino); 334 335 if ((ret = hvio_intr_getvalid(DIP_TO_HANDLE(dip), 336 sysino, intr_valid_state)) != H_EOK) { 337 DBG(DBG_LIB_INT, dip, "hvio_intr_getvalid failed, ret 0x%lx\n", 338 ret); 339 return (DDI_FAILURE); 340 } 341 342 DBG(DBG_LIB_INT, dip, "px_lib_intr_getvalid: intr_valid_state 0x%x\n", 343 *intr_valid_state); 344 345 return (DDI_SUCCESS); 346 } 347 348 /*ARGSUSED*/ 349 int 350 px_lib_intr_setvalid(dev_info_t *dip, sysino_t sysino, 351 intr_valid_state_t intr_valid_state) 352 { 353 uint64_t ret; 354 355 DBG(DBG_LIB_INT, dip, "px_lib_intr_setvalid: dip 0x%p sysino 0x%llx " 356 "intr_valid_state 0x%x\n", dip, sysino, intr_valid_state); 357 358 if ((ret = hvio_intr_setvalid(DIP_TO_HANDLE(dip), 359 sysino, intr_valid_state)) != H_EOK) { 360 DBG(DBG_LIB_INT, dip, "hvio_intr_setvalid failed, ret 0x%lx\n", 361 ret); 362 return (DDI_FAILURE); 363 } 364 365 return (DDI_SUCCESS); 366 } 367 368 /*ARGSUSED*/ 369 int 370 px_lib_intr_getstate(dev_info_t *dip, sysino_t sysino, 371 intr_state_t *intr_state) 372 { 373 uint64_t ret; 374 375 DBG(DBG_LIB_INT, dip, "px_lib_intr_getstate: dip 0x%p sysino 0x%llx\n", 376 dip, sysino); 377 378 if ((ret = hvio_intr_getstate(DIP_TO_HANDLE(dip), 379 sysino, intr_state)) != H_EOK) { 380 DBG(DBG_LIB_INT, dip, "hvio_intr_getstate failed, ret 0x%lx\n", 381 ret); 382 return (DDI_FAILURE); 383 } 384 385 DBG(DBG_LIB_INT, dip, "px_lib_intr_getstate: intr_state 0x%x\n", 386 *intr_state); 387 388 return (DDI_SUCCESS); 389 } 390 391 /*ARGSUSED*/ 392 int 393 px_lib_intr_setstate(dev_info_t *dip, sysino_t sysino, 394 intr_state_t intr_state) 395 { 396 uint64_t ret; 397 398 DBG(DBG_LIB_INT, dip, "px_lib_intr_setstate: dip 0x%p sysino 0x%llx " 399 "intr_state 0x%x\n", dip, sysino, intr_state); 400 401 if ((ret = hvio_intr_setstate(DIP_TO_HANDLE(dip), 402 sysino, intr_state)) != H_EOK) { 403 DBG(DBG_LIB_INT, dip, "hvio_intr_setstate failed, ret 0x%lx\n", 404 ret); 405 return (DDI_FAILURE); 406 } 407 408 return (DDI_SUCCESS); 409 } 410 411 /*ARGSUSED*/ 412 int 413 px_lib_intr_gettarget(dev_info_t *dip, sysino_t sysino, cpuid_t *cpuid) 414 { 415 px_t *px_p = DIP_TO_STATE(dip); 416 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 417 uint64_t ret; 418 419 DBG(DBG_LIB_INT, dip, "px_lib_intr_gettarget: dip 0x%p sysino 0x%llx\n", 420 dip, sysino); 421 422 if ((ret = hvio_intr_gettarget(DIP_TO_HANDLE(dip), pxu_p, 423 sysino, cpuid)) != H_EOK) { 424 DBG(DBG_LIB_INT, dip, "hvio_intr_gettarget failed, ret 0x%lx\n", 425 ret); 426 return (DDI_FAILURE); 427 } 428 429 DBG(DBG_LIB_INT, dip, "px_lib_intr_gettarget: cpuid 0x%x\n", cpuid); 430 431 return (DDI_SUCCESS); 432 } 433 434 /*ARGSUSED*/ 435 int 436 px_lib_intr_settarget(dev_info_t *dip, sysino_t sysino, cpuid_t cpuid) 437 { 438 px_t *px_p = DIP_TO_STATE(dip); 439 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 440 uint64_t ret; 441 442 DBG(DBG_LIB_INT, dip, "px_lib_intr_settarget: dip 0x%p sysino 0x%llx " 443 "cpuid 0x%x\n", dip, sysino, cpuid); 444 445 if ((ret = hvio_intr_settarget(DIP_TO_HANDLE(dip), pxu_p, 446 sysino, cpuid)) != H_EOK) { 447 DBG(DBG_LIB_INT, dip, "hvio_intr_settarget failed, ret 0x%lx\n", 448 ret); 449 return (DDI_FAILURE); 450 } 451 452 return (DDI_SUCCESS); 453 } 454 455 /*ARGSUSED*/ 456 int 457 px_lib_intr_reset(dev_info_t *dip) 458 { 459 devino_t ino; 460 sysino_t sysino; 461 462 DBG(DBG_LIB_INT, dip, "px_lib_intr_reset: dip 0x%p\n", dip); 463 464 /* Reset all Interrupts */ 465 for (ino = 0; ino < INTERRUPT_MAPPING_ENTRIES; ino++) { 466 if (px_lib_intr_devino_to_sysino(dip, ino, 467 &sysino) != DDI_SUCCESS) 468 return (BF_FATAL); 469 470 if (px_lib_intr_setstate(dip, sysino, 471 INTR_IDLE_STATE) != DDI_SUCCESS) 472 return (BF_FATAL); 473 } 474 475 return (BF_NONE); 476 } 477 478 /*ARGSUSED*/ 479 int 480 px_lib_iommu_map(dev_info_t *dip, tsbid_t tsbid, pages_t pages, 481 io_attributes_t attr, void *addr, size_t pfn_index, int flags) 482 { 483 px_t *px_p = DIP_TO_STATE(dip); 484 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 485 uint64_t ret; 486 487 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_map: dip 0x%p tsbid 0x%llx " 488 "pages 0x%x attr 0x%x addr 0x%p pfn_index 0x%llx flags 0x%x\n", 489 dip, tsbid, pages, attr, addr, pfn_index, flags); 490 491 if ((ret = hvio_iommu_map(px_p->px_dev_hdl, pxu_p, tsbid, pages, 492 attr, addr, pfn_index, flags)) != H_EOK) { 493 DBG(DBG_LIB_DMA, dip, 494 "px_lib_iommu_map failed, ret 0x%lx\n", ret); 495 return (DDI_FAILURE); 496 } 497 498 return (DDI_SUCCESS); 499 } 500 501 /*ARGSUSED*/ 502 int 503 px_lib_iommu_demap(dev_info_t *dip, tsbid_t tsbid, pages_t pages) 504 { 505 px_t *px_p = DIP_TO_STATE(dip); 506 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 507 uint64_t ret; 508 509 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_demap: dip 0x%p tsbid 0x%llx " 510 "pages 0x%x\n", dip, tsbid, pages); 511 512 if ((ret = hvio_iommu_demap(px_p->px_dev_hdl, pxu_p, tsbid, pages)) 513 != H_EOK) { 514 DBG(DBG_LIB_DMA, dip, 515 "px_lib_iommu_demap failed, ret 0x%lx\n", ret); 516 517 return (DDI_FAILURE); 518 } 519 520 return (DDI_SUCCESS); 521 } 522 523 /*ARGSUSED*/ 524 int 525 px_lib_iommu_getmap(dev_info_t *dip, tsbid_t tsbid, io_attributes_t *attr_p, 526 r_addr_t *r_addr_p) 527 { 528 px_t *px_p = DIP_TO_STATE(dip); 529 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 530 uint64_t ret; 531 532 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getmap: dip 0x%p tsbid 0x%llx\n", 533 dip, tsbid); 534 535 if ((ret = hvio_iommu_getmap(DIP_TO_HANDLE(dip), pxu_p, tsbid, 536 attr_p, r_addr_p)) != H_EOK) { 537 DBG(DBG_LIB_DMA, dip, 538 "hvio_iommu_getmap failed, ret 0x%lx\n", ret); 539 540 return ((ret == H_ENOMAP) ? DDI_DMA_NOMAPPING:DDI_FAILURE); 541 } 542 543 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getmap: attr 0x%x r_addr 0x%llx\n", 544 *attr_p, *r_addr_p); 545 546 return (DDI_SUCCESS); 547 } 548 549 550 /* 551 * Checks dma attributes against system bypass ranges 552 * The bypass range is determined by the hardware. Return them so the 553 * common code can do generic checking against them. 554 */ 555 /*ARGSUSED*/ 556 int 557 px_lib_dma_bypass_rngchk(dev_info_t *dip, ddi_dma_attr_t *attr_p, 558 uint64_t *lo_p, uint64_t *hi_p) 559 { 560 px_t *px_p = DIP_TO_STATE(dip); 561 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 562 563 *lo_p = hvio_get_bypass_base(pxu_p); 564 *hi_p = hvio_get_bypass_end(pxu_p); 565 566 return (DDI_SUCCESS); 567 } 568 569 570 /*ARGSUSED*/ 571 int 572 px_lib_iommu_getbypass(dev_info_t *dip, r_addr_t ra, io_attributes_t attr, 573 io_addr_t *io_addr_p) 574 { 575 uint64_t ret; 576 px_t *px_p = DIP_TO_STATE(dip); 577 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 578 579 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getbypass: dip 0x%p ra 0x%llx " 580 "attr 0x%x\n", dip, ra, attr); 581 582 if ((ret = hvio_iommu_getbypass(DIP_TO_HANDLE(dip), pxu_p, ra, 583 attr, io_addr_p)) != H_EOK) { 584 DBG(DBG_LIB_DMA, dip, 585 "hvio_iommu_getbypass failed, ret 0x%lx\n", ret); 586 return (DDI_FAILURE); 587 } 588 589 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getbypass: io_addr 0x%llx\n", 590 *io_addr_p); 591 592 return (DDI_SUCCESS); 593 } 594 595 /* 596 * bus dma sync entry point. 597 */ 598 /*ARGSUSED*/ 599 int 600 px_lib_dma_sync(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle, 601 off_t off, size_t len, uint_t cache_flags) 602 { 603 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 604 px_t *px_p = DIP_TO_STATE(dip); 605 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 606 607 DBG(DBG_LIB_DMA, dip, "px_lib_dma_sync: dip 0x%p rdip 0x%p " 608 "handle 0x%llx off 0x%x len 0x%x flags 0x%x\n", 609 dip, rdip, handle, off, len, cache_flags); 610 611 /* 612 * No flush needed for Oberon 613 */ 614 if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) 615 return (DDI_SUCCESS); 616 617 /* 618 * jbus_stst_order is found only in certain cpu modules. 619 * Just return success if not present. 620 */ 621 if (&jbus_stst_order == NULL) 622 return (DDI_SUCCESS); 623 624 if (!(mp->dmai_flags & PX_DMAI_FLAGS_INUSE)) { 625 cmn_err(CE_WARN, "%s%d: Unbound dma handle %p.", 626 ddi_driver_name(rdip), ddi_get_instance(rdip), (void *)mp); 627 628 return (DDI_FAILURE); 629 } 630 631 if (mp->dmai_flags & PX_DMAI_FLAGS_NOSYNC) 632 return (DDI_SUCCESS); 633 634 /* 635 * No flush needed when sending data from memory to device. 636 * Nothing to do to "sync" memory to what device would already see. 637 */ 638 if (!(mp->dmai_rflags & DDI_DMA_READ) || 639 ((cache_flags & PX_DMA_SYNC_DDI_FLAGS) == DDI_DMA_SYNC_FORDEV)) 640 return (DDI_SUCCESS); 641 642 /* 643 * Perform necessary cpu workaround to ensure jbus ordering. 644 * CPU's internal "invalidate FIFOs" are flushed. 645 */ 646 647 #if !defined(lint) 648 kpreempt_disable(); 649 #endif 650 jbus_stst_order(); 651 #if !defined(lint) 652 kpreempt_enable(); 653 #endif 654 return (DDI_SUCCESS); 655 } 656 657 /* 658 * MSIQ Functions: 659 */ 660 /*ARGSUSED*/ 661 int 662 px_lib_msiq_init(dev_info_t *dip) 663 { 664 px_t *px_p = DIP_TO_STATE(dip); 665 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 666 px_msiq_state_t *msiq_state_p = &px_p->px_ib_p->ib_msiq_state; 667 caddr_t msiq_addr; 668 px_dvma_addr_t pg_index; 669 size_t size; 670 int ret; 671 672 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_init: dip 0x%p\n", dip); 673 674 /* 675 * Map the EQ memory into the Fire MMU (has to be 512KB aligned) 676 * and then initialize the base address register. 677 * 678 * Allocate entries from Fire IOMMU so that the resulting address 679 * is properly aligned. Calculate the index of the first allocated 680 * entry. Note: The size of the mapping is assumed to be a multiple 681 * of the page size. 682 */ 683 msiq_addr = (caddr_t)(((uint64_t)msiq_state_p->msiq_buf_p + 684 (MMU_PAGE_SIZE - 1)) >> MMU_PAGE_SHIFT << MMU_PAGE_SHIFT); 685 686 size = msiq_state_p->msiq_cnt * 687 msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t); 688 689 pxu_p->msiq_mapped_p = vmem_xalloc(px_p->px_mmu_p->mmu_dvma_map, 690 size, (512 * 1024), 0, 0, NULL, NULL, VM_NOSLEEP | VM_BESTFIT); 691 692 if (pxu_p->msiq_mapped_p == NULL) 693 return (DDI_FAILURE); 694 695 pg_index = MMU_PAGE_INDEX(px_p->px_mmu_p, 696 MMU_BTOP((ulong_t)pxu_p->msiq_mapped_p)); 697 698 if ((ret = px_lib_iommu_map(px_p->px_dip, PCI_TSBID(0, pg_index), 699 MMU_BTOP(size), PCI_MAP_ATTR_WRITE, (void *)msiq_addr, 0, 700 MMU_MAP_BUF)) != DDI_SUCCESS) { 701 DBG(DBG_LIB_MSIQ, dip, 702 "hvio_msiq_init failed, ret 0x%lx\n", ret); 703 704 (void) px_lib_msiq_fini(dip); 705 return (DDI_FAILURE); 706 } 707 708 (void) hvio_msiq_init(DIP_TO_HANDLE(dip), pxu_p); 709 710 return (DDI_SUCCESS); 711 } 712 713 /*ARGSUSED*/ 714 int 715 px_lib_msiq_fini(dev_info_t *dip) 716 { 717 px_t *px_p = DIP_TO_STATE(dip); 718 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 719 px_msiq_state_t *msiq_state_p = &px_p->px_ib_p->ib_msiq_state; 720 px_dvma_addr_t pg_index; 721 size_t size; 722 723 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_fini: dip 0x%p\n", dip); 724 725 /* 726 * Unmap and free the EQ memory that had been mapped 727 * into the Fire IOMMU. 728 */ 729 size = msiq_state_p->msiq_cnt * 730 msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t); 731 732 pg_index = MMU_PAGE_INDEX(px_p->px_mmu_p, 733 MMU_BTOP((ulong_t)pxu_p->msiq_mapped_p)); 734 735 (void) px_lib_iommu_demap(px_p->px_dip, 736 PCI_TSBID(0, pg_index), MMU_BTOP(size)); 737 738 /* Free the entries from the Fire MMU */ 739 vmem_xfree(px_p->px_mmu_p->mmu_dvma_map, 740 (void *)pxu_p->msiq_mapped_p, size); 741 742 return (DDI_SUCCESS); 743 } 744 745 /*ARGSUSED*/ 746 int 747 px_lib_msiq_info(dev_info_t *dip, msiqid_t msiq_id, r_addr_t *ra_p, 748 uint_t *msiq_rec_cnt_p) 749 { 750 px_t *px_p = DIP_TO_STATE(dip); 751 px_msiq_state_t *msiq_state_p = &px_p->px_ib_p->ib_msiq_state; 752 uint64_t *msiq_addr; 753 size_t msiq_size; 754 755 DBG(DBG_LIB_MSIQ, dip, "px_msiq_info: dip 0x%p msiq_id 0x%x\n", 756 dip, msiq_id); 757 758 msiq_addr = (uint64_t *)(((uint64_t)msiq_state_p->msiq_buf_p + 759 (MMU_PAGE_SIZE - 1)) >> MMU_PAGE_SHIFT << MMU_PAGE_SHIFT); 760 msiq_size = msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t); 761 ra_p = (r_addr_t *)((caddr_t)msiq_addr + (msiq_id * msiq_size)); 762 763 *msiq_rec_cnt_p = msiq_state_p->msiq_rec_cnt; 764 765 DBG(DBG_LIB_MSIQ, dip, "px_msiq_info: ra_p 0x%p msiq_rec_cnt 0x%x\n", 766 ra_p, *msiq_rec_cnt_p); 767 768 return (DDI_SUCCESS); 769 } 770 771 /*ARGSUSED*/ 772 int 773 px_lib_msiq_getvalid(dev_info_t *dip, msiqid_t msiq_id, 774 pci_msiq_valid_state_t *msiq_valid_state) 775 { 776 uint64_t ret; 777 778 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getvalid: dip 0x%p msiq_id 0x%x\n", 779 dip, msiq_id); 780 781 if ((ret = hvio_msiq_getvalid(DIP_TO_HANDLE(dip), 782 msiq_id, msiq_valid_state)) != H_EOK) { 783 DBG(DBG_LIB_MSIQ, dip, 784 "hvio_msiq_getvalid failed, ret 0x%lx\n", ret); 785 return (DDI_FAILURE); 786 } 787 788 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getvalid: msiq_valid_state 0x%x\n", 789 *msiq_valid_state); 790 791 return (DDI_SUCCESS); 792 } 793 794 /*ARGSUSED*/ 795 int 796 px_lib_msiq_setvalid(dev_info_t *dip, msiqid_t msiq_id, 797 pci_msiq_valid_state_t msiq_valid_state) 798 { 799 uint64_t ret; 800 801 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_setvalid: dip 0x%p msiq_id 0x%x " 802 "msiq_valid_state 0x%x\n", dip, msiq_id, msiq_valid_state); 803 804 if ((ret = hvio_msiq_setvalid(DIP_TO_HANDLE(dip), 805 msiq_id, msiq_valid_state)) != H_EOK) { 806 DBG(DBG_LIB_MSIQ, dip, 807 "hvio_msiq_setvalid failed, ret 0x%lx\n", ret); 808 return (DDI_FAILURE); 809 } 810 811 return (DDI_SUCCESS); 812 } 813 814 /*ARGSUSED*/ 815 int 816 px_lib_msiq_getstate(dev_info_t *dip, msiqid_t msiq_id, 817 pci_msiq_state_t *msiq_state) 818 { 819 uint64_t ret; 820 821 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getstate: dip 0x%p msiq_id 0x%x\n", 822 dip, msiq_id); 823 824 if ((ret = hvio_msiq_getstate(DIP_TO_HANDLE(dip), 825 msiq_id, msiq_state)) != H_EOK) { 826 DBG(DBG_LIB_MSIQ, dip, 827 "hvio_msiq_getstate failed, ret 0x%lx\n", ret); 828 return (DDI_FAILURE); 829 } 830 831 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getstate: msiq_state 0x%x\n", 832 *msiq_state); 833 834 return (DDI_SUCCESS); 835 } 836 837 /*ARGSUSED*/ 838 int 839 px_lib_msiq_setstate(dev_info_t *dip, msiqid_t msiq_id, 840 pci_msiq_state_t msiq_state) 841 { 842 uint64_t ret; 843 844 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_setstate: dip 0x%p msiq_id 0x%x " 845 "msiq_state 0x%x\n", dip, msiq_id, msiq_state); 846 847 if ((ret = hvio_msiq_setstate(DIP_TO_HANDLE(dip), 848 msiq_id, msiq_state)) != H_EOK) { 849 DBG(DBG_LIB_MSIQ, dip, 850 "hvio_msiq_setstate failed, ret 0x%lx\n", ret); 851 return (DDI_FAILURE); 852 } 853 854 return (DDI_SUCCESS); 855 } 856 857 /*ARGSUSED*/ 858 int 859 px_lib_msiq_gethead(dev_info_t *dip, msiqid_t msiq_id, 860 msiqhead_t *msiq_head) 861 { 862 uint64_t ret; 863 864 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gethead: dip 0x%p msiq_id 0x%x\n", 865 dip, msiq_id); 866 867 if ((ret = hvio_msiq_gethead(DIP_TO_HANDLE(dip), 868 msiq_id, msiq_head)) != H_EOK) { 869 DBG(DBG_LIB_MSIQ, dip, 870 "hvio_msiq_gethead failed, ret 0x%lx\n", ret); 871 return (DDI_FAILURE); 872 } 873 874 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gethead: msiq_head 0x%x\n", 875 *msiq_head); 876 877 return (DDI_SUCCESS); 878 } 879 880 /*ARGSUSED*/ 881 int 882 px_lib_msiq_sethead(dev_info_t *dip, msiqid_t msiq_id, 883 msiqhead_t msiq_head) 884 { 885 uint64_t ret; 886 887 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_sethead: dip 0x%p msiq_id 0x%x " 888 "msiq_head 0x%x\n", dip, msiq_id, msiq_head); 889 890 if ((ret = hvio_msiq_sethead(DIP_TO_HANDLE(dip), 891 msiq_id, msiq_head)) != H_EOK) { 892 DBG(DBG_LIB_MSIQ, dip, 893 "hvio_msiq_sethead failed, ret 0x%lx\n", ret); 894 return (DDI_FAILURE); 895 } 896 897 return (DDI_SUCCESS); 898 } 899 900 /*ARGSUSED*/ 901 int 902 px_lib_msiq_gettail(dev_info_t *dip, msiqid_t msiq_id, 903 msiqtail_t *msiq_tail) 904 { 905 uint64_t ret; 906 907 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gettail: dip 0x%p msiq_id 0x%x\n", 908 dip, msiq_id); 909 910 if ((ret = hvio_msiq_gettail(DIP_TO_HANDLE(dip), 911 msiq_id, msiq_tail)) != H_EOK) { 912 DBG(DBG_LIB_MSIQ, dip, 913 "hvio_msiq_gettail failed, ret 0x%lx\n", ret); 914 return (DDI_FAILURE); 915 } 916 917 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gettail: msiq_tail 0x%x\n", 918 *msiq_tail); 919 920 return (DDI_SUCCESS); 921 } 922 923 /*ARGSUSED*/ 924 void 925 px_lib_get_msiq_rec(dev_info_t *dip, msiqhead_t *msiq_head_p, 926 msiq_rec_t *msiq_rec_p) 927 { 928 eq_rec_t *eq_rec_p = (eq_rec_t *)msiq_head_p; 929 930 DBG(DBG_LIB_MSIQ, dip, "px_lib_get_msiq_rec: dip 0x%p eq_rec_p 0x%p\n", 931 dip, eq_rec_p); 932 933 if (!eq_rec_p->eq_rec_fmt_type) { 934 /* Set msiq_rec_type to zero */ 935 msiq_rec_p->msiq_rec_type = 0; 936 937 return; 938 } 939 940 DBG(DBG_LIB_MSIQ, dip, "px_lib_get_msiq_rec: EQ RECORD, " 941 "eq_rec_rid 0x%llx eq_rec_fmt_type 0x%llx " 942 "eq_rec_len 0x%llx eq_rec_addr0 0x%llx " 943 "eq_rec_addr1 0x%llx eq_rec_data0 0x%llx " 944 "eq_rec_data1 0x%llx\n", eq_rec_p->eq_rec_rid, 945 eq_rec_p->eq_rec_fmt_type, eq_rec_p->eq_rec_len, 946 eq_rec_p->eq_rec_addr0, eq_rec_p->eq_rec_addr1, 947 eq_rec_p->eq_rec_data0, eq_rec_p->eq_rec_data1); 948 949 /* 950 * Only upper 4 bits of eq_rec_fmt_type is used 951 * to identify the EQ record type. 952 */ 953 switch (eq_rec_p->eq_rec_fmt_type >> 3) { 954 case EQ_REC_MSI32: 955 msiq_rec_p->msiq_rec_type = MSI32_REC; 956 957 msiq_rec_p->msiq_rec_data.msi.msi_data = 958 eq_rec_p->eq_rec_data0; 959 break; 960 case EQ_REC_MSI64: 961 msiq_rec_p->msiq_rec_type = MSI64_REC; 962 963 msiq_rec_p->msiq_rec_data.msi.msi_data = 964 eq_rec_p->eq_rec_data0; 965 break; 966 case EQ_REC_MSG: 967 msiq_rec_p->msiq_rec_type = MSG_REC; 968 969 msiq_rec_p->msiq_rec_data.msg.msg_route = 970 eq_rec_p->eq_rec_fmt_type & 7; 971 msiq_rec_p->msiq_rec_data.msg.msg_targ = eq_rec_p->eq_rec_rid; 972 msiq_rec_p->msiq_rec_data.msg.msg_code = eq_rec_p->eq_rec_data0; 973 break; 974 default: 975 cmn_err(CE_WARN, "%s%d: px_lib_get_msiq_rec: " 976 "0x%x is an unknown EQ record type", 977 ddi_driver_name(dip), ddi_get_instance(dip), 978 (int)eq_rec_p->eq_rec_fmt_type); 979 break; 980 } 981 982 msiq_rec_p->msiq_rec_rid = eq_rec_p->eq_rec_rid; 983 msiq_rec_p->msiq_rec_msi_addr = ((eq_rec_p->eq_rec_addr1 << 16) | 984 (eq_rec_p->eq_rec_addr0 << 2)); 985 986 /* Zero out eq_rec_fmt_type field */ 987 eq_rec_p->eq_rec_fmt_type = 0; 988 } 989 990 /* 991 * MSI Functions: 992 */ 993 /*ARGSUSED*/ 994 int 995 px_lib_msi_init(dev_info_t *dip) 996 { 997 px_t *px_p = DIP_TO_STATE(dip); 998 px_msi_state_t *msi_state_p = &px_p->px_ib_p->ib_msi_state; 999 uint64_t ret; 1000 1001 DBG(DBG_LIB_MSI, dip, "px_lib_msi_init: dip 0x%p\n", dip); 1002 1003 if ((ret = hvio_msi_init(DIP_TO_HANDLE(dip), 1004 msi_state_p->msi_addr32, msi_state_p->msi_addr64)) != H_EOK) { 1005 DBG(DBG_LIB_MSIQ, dip, "px_lib_msi_init failed, ret 0x%lx\n", 1006 ret); 1007 return (DDI_FAILURE); 1008 } 1009 1010 return (DDI_SUCCESS); 1011 } 1012 1013 /*ARGSUSED*/ 1014 int 1015 px_lib_msi_getmsiq(dev_info_t *dip, msinum_t msi_num, 1016 msiqid_t *msiq_id) 1017 { 1018 uint64_t ret; 1019 1020 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getmsiq: dip 0x%p msi_num 0x%x\n", 1021 dip, msi_num); 1022 1023 if ((ret = hvio_msi_getmsiq(DIP_TO_HANDLE(dip), 1024 msi_num, msiq_id)) != H_EOK) { 1025 DBG(DBG_LIB_MSI, dip, 1026 "hvio_msi_getmsiq failed, ret 0x%lx\n", ret); 1027 return (DDI_FAILURE); 1028 } 1029 1030 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getmsiq: msiq_id 0x%x\n", 1031 *msiq_id); 1032 1033 return (DDI_SUCCESS); 1034 } 1035 1036 /*ARGSUSED*/ 1037 int 1038 px_lib_msi_setmsiq(dev_info_t *dip, msinum_t msi_num, 1039 msiqid_t msiq_id, msi_type_t msitype) 1040 { 1041 uint64_t ret; 1042 1043 DBG(DBG_LIB_MSI, dip, "px_lib_msi_setmsiq: dip 0x%p msi_num 0x%x " 1044 "msq_id 0x%x\n", dip, msi_num, msiq_id); 1045 1046 if ((ret = hvio_msi_setmsiq(DIP_TO_HANDLE(dip), 1047 msi_num, msiq_id)) != H_EOK) { 1048 DBG(DBG_LIB_MSI, dip, 1049 "hvio_msi_setmsiq failed, ret 0x%lx\n", ret); 1050 return (DDI_FAILURE); 1051 } 1052 1053 return (DDI_SUCCESS); 1054 } 1055 1056 /*ARGSUSED*/ 1057 int 1058 px_lib_msi_getvalid(dev_info_t *dip, msinum_t msi_num, 1059 pci_msi_valid_state_t *msi_valid_state) 1060 { 1061 uint64_t ret; 1062 1063 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getvalid: dip 0x%p msi_num 0x%x\n", 1064 dip, msi_num); 1065 1066 if ((ret = hvio_msi_getvalid(DIP_TO_HANDLE(dip), 1067 msi_num, msi_valid_state)) != H_EOK) { 1068 DBG(DBG_LIB_MSI, dip, 1069 "hvio_msi_getvalid failed, ret 0x%lx\n", ret); 1070 return (DDI_FAILURE); 1071 } 1072 1073 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getvalid: msiq_id 0x%x\n", 1074 *msi_valid_state); 1075 1076 return (DDI_SUCCESS); 1077 } 1078 1079 /*ARGSUSED*/ 1080 int 1081 px_lib_msi_setvalid(dev_info_t *dip, msinum_t msi_num, 1082 pci_msi_valid_state_t msi_valid_state) 1083 { 1084 uint64_t ret; 1085 1086 DBG(DBG_LIB_MSI, dip, "px_lib_msi_setvalid: dip 0x%p msi_num 0x%x " 1087 "msi_valid_state 0x%x\n", dip, msi_num, msi_valid_state); 1088 1089 if ((ret = hvio_msi_setvalid(DIP_TO_HANDLE(dip), 1090 msi_num, msi_valid_state)) != H_EOK) { 1091 DBG(DBG_LIB_MSI, dip, 1092 "hvio_msi_setvalid failed, ret 0x%lx\n", ret); 1093 return (DDI_FAILURE); 1094 } 1095 1096 return (DDI_SUCCESS); 1097 } 1098 1099 /*ARGSUSED*/ 1100 int 1101 px_lib_msi_getstate(dev_info_t *dip, msinum_t msi_num, 1102 pci_msi_state_t *msi_state) 1103 { 1104 uint64_t ret; 1105 1106 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getstate: dip 0x%p msi_num 0x%x\n", 1107 dip, msi_num); 1108 1109 if ((ret = hvio_msi_getstate(DIP_TO_HANDLE(dip), 1110 msi_num, msi_state)) != H_EOK) { 1111 DBG(DBG_LIB_MSI, dip, 1112 "hvio_msi_getstate failed, ret 0x%lx\n", ret); 1113 return (DDI_FAILURE); 1114 } 1115 1116 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getstate: msi_state 0x%x\n", 1117 *msi_state); 1118 1119 return (DDI_SUCCESS); 1120 } 1121 1122 /*ARGSUSED*/ 1123 int 1124 px_lib_msi_setstate(dev_info_t *dip, msinum_t msi_num, 1125 pci_msi_state_t msi_state) 1126 { 1127 uint64_t ret; 1128 1129 DBG(DBG_LIB_MSI, dip, "px_lib_msi_setstate: dip 0x%p msi_num 0x%x " 1130 "msi_state 0x%x\n", dip, msi_num, msi_state); 1131 1132 if ((ret = hvio_msi_setstate(DIP_TO_HANDLE(dip), 1133 msi_num, msi_state)) != H_EOK) { 1134 DBG(DBG_LIB_MSI, dip, 1135 "hvio_msi_setstate failed, ret 0x%lx\n", ret); 1136 return (DDI_FAILURE); 1137 } 1138 1139 return (DDI_SUCCESS); 1140 } 1141 1142 /* 1143 * MSG Functions: 1144 */ 1145 /*ARGSUSED*/ 1146 int 1147 px_lib_msg_getmsiq(dev_info_t *dip, pcie_msg_type_t msg_type, 1148 msiqid_t *msiq_id) 1149 { 1150 uint64_t ret; 1151 1152 DBG(DBG_LIB_MSG, dip, "px_lib_msg_getmsiq: dip 0x%p msg_type 0x%x\n", 1153 dip, msg_type); 1154 1155 if ((ret = hvio_msg_getmsiq(DIP_TO_HANDLE(dip), 1156 msg_type, msiq_id)) != H_EOK) { 1157 DBG(DBG_LIB_MSG, dip, 1158 "hvio_msg_getmsiq failed, ret 0x%lx\n", ret); 1159 return (DDI_FAILURE); 1160 } 1161 1162 DBG(DBG_LIB_MSI, dip, "px_lib_msg_getmsiq: msiq_id 0x%x\n", 1163 *msiq_id); 1164 1165 return (DDI_SUCCESS); 1166 } 1167 1168 /*ARGSUSED*/ 1169 int 1170 px_lib_msg_setmsiq(dev_info_t *dip, pcie_msg_type_t msg_type, 1171 msiqid_t msiq_id) 1172 { 1173 uint64_t ret; 1174 1175 DBG(DBG_LIB_MSG, dip, "px_lib_msi_setstate: dip 0x%p msg_type 0x%x " 1176 "msiq_id 0x%x\n", dip, msg_type, msiq_id); 1177 1178 if ((ret = hvio_msg_setmsiq(DIP_TO_HANDLE(dip), 1179 msg_type, msiq_id)) != H_EOK) { 1180 DBG(DBG_LIB_MSG, dip, 1181 "hvio_msg_setmsiq failed, ret 0x%lx\n", ret); 1182 return (DDI_FAILURE); 1183 } 1184 1185 return (DDI_SUCCESS); 1186 } 1187 1188 /*ARGSUSED*/ 1189 int 1190 px_lib_msg_getvalid(dev_info_t *dip, pcie_msg_type_t msg_type, 1191 pcie_msg_valid_state_t *msg_valid_state) 1192 { 1193 uint64_t ret; 1194 1195 DBG(DBG_LIB_MSG, dip, "px_lib_msg_getvalid: dip 0x%p msg_type 0x%x\n", 1196 dip, msg_type); 1197 1198 if ((ret = hvio_msg_getvalid(DIP_TO_HANDLE(dip), msg_type, 1199 msg_valid_state)) != H_EOK) { 1200 DBG(DBG_LIB_MSG, dip, 1201 "hvio_msg_getvalid failed, ret 0x%lx\n", ret); 1202 return (DDI_FAILURE); 1203 } 1204 1205 DBG(DBG_LIB_MSI, dip, "px_lib_msg_getvalid: msg_valid_state 0x%x\n", 1206 *msg_valid_state); 1207 1208 return (DDI_SUCCESS); 1209 } 1210 1211 /*ARGSUSED*/ 1212 int 1213 px_lib_msg_setvalid(dev_info_t *dip, pcie_msg_type_t msg_type, 1214 pcie_msg_valid_state_t msg_valid_state) 1215 { 1216 uint64_t ret; 1217 1218 DBG(DBG_LIB_MSG, dip, "px_lib_msg_setvalid: dip 0x%p msg_type 0x%x " 1219 "msg_valid_state 0x%x\n", dip, msg_type, msg_valid_state); 1220 1221 if ((ret = hvio_msg_setvalid(DIP_TO_HANDLE(dip), msg_type, 1222 msg_valid_state)) != H_EOK) { 1223 DBG(DBG_LIB_MSG, dip, 1224 "hvio_msg_setvalid failed, ret 0x%lx\n", ret); 1225 return (DDI_FAILURE); 1226 } 1227 1228 return (DDI_SUCCESS); 1229 } 1230 1231 /* 1232 * Suspend/Resume Functions: 1233 * Currently unsupported by hypervisor 1234 */ 1235 int 1236 px_lib_suspend(dev_info_t *dip) 1237 { 1238 px_t *px_p = DIP_TO_STATE(dip); 1239 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 1240 px_cb_t *cb_p = PX2CB(px_p); 1241 devhandle_t dev_hdl, xbus_dev_hdl; 1242 uint64_t ret = H_EOK; 1243 1244 DBG(DBG_DETACH, dip, "px_lib_suspend: dip 0x%p\n", dip); 1245 1246 dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_CSR]; 1247 xbus_dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_XBC]; 1248 1249 if ((ret = hvio_suspend(dev_hdl, pxu_p)) != H_EOK) 1250 goto fail; 1251 1252 if (--cb_p->attachcnt == 0) { 1253 ret = hvio_cb_suspend(xbus_dev_hdl, pxu_p); 1254 if (ret != H_EOK) 1255 cb_p->attachcnt++; 1256 } 1257 1258 fail: 1259 return ((ret != H_EOK) ? DDI_FAILURE: DDI_SUCCESS); 1260 } 1261 1262 void 1263 px_lib_resume(dev_info_t *dip) 1264 { 1265 px_t *px_p = DIP_TO_STATE(dip); 1266 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 1267 px_cb_t *cb_p = PX2CB(px_p); 1268 devhandle_t dev_hdl, xbus_dev_hdl; 1269 devino_t pec_ino = px_p->px_inos[PX_INTR_PEC]; 1270 devino_t xbc_ino = px_p->px_inos[PX_INTR_XBC]; 1271 1272 DBG(DBG_ATTACH, dip, "px_lib_resume: dip 0x%p\n", dip); 1273 1274 dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_CSR]; 1275 xbus_dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_XBC]; 1276 1277 if (++cb_p->attachcnt == 1) 1278 hvio_cb_resume(dev_hdl, xbus_dev_hdl, xbc_ino, pxu_p); 1279 1280 hvio_resume(dev_hdl, pec_ino, pxu_p); 1281 } 1282 1283 /* 1284 * Generate a unique Oberon UBC ID based on the Logicial System Board and 1285 * the IO Channel from the portid property field. 1286 */ 1287 static uint64_t 1288 oberon_get_ubc_id(dev_info_t *dip) 1289 { 1290 px_t *px_p = DIP_TO_STATE(dip); 1291 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 1292 uint64_t ubc_id; 1293 1294 /* 1295 * Generate a unique 6 bit UBC ID using the 2 IO_Channel#[1:0] bits and 1296 * the 4 LSB_ID[3:0] bits from the Oberon's portid property. 1297 */ 1298 ubc_id = (((pxu_p->portid >> OBERON_PORT_ID_IOC) & 1299 OBERON_PORT_ID_IOC_MASK) | (((pxu_p->portid >> 1300 OBERON_PORT_ID_LSB) & OBERON_PORT_ID_LSB_MASK) 1301 << OBERON_UBC_ID_LSB)); 1302 1303 return (ubc_id); 1304 } 1305 1306 /* 1307 * Oberon does not have a UBC scratch register, so alloc an array of scratch 1308 * registers when needed and use a unique UBC ID as an index. This code 1309 * can be simplified if we use a pre-allocated array. They are currently 1310 * being dynamically allocated because it's only needed by the Oberon. 1311 */ 1312 static void 1313 oberon_set_cb(dev_info_t *dip, uint64_t val) 1314 { 1315 uint64_t ubc_id; 1316 1317 if (px_oberon_ubc_scratch_regs == NULL) 1318 px_oberon_ubc_scratch_regs = 1319 (uint64_t *)kmem_zalloc(sizeof (uint64_t)* 1320 OBERON_UBC_ID_MAX, KM_SLEEP); 1321 1322 ubc_id = oberon_get_ubc_id(dip); 1323 1324 px_oberon_ubc_scratch_regs[ubc_id] = val; 1325 1326 /* 1327 * Check if any scratch registers are still in use. If all scratch 1328 * registers are currently set to zero, then deallocate the scratch 1329 * register array. 1330 */ 1331 for (ubc_id = 0; ubc_id < OBERON_UBC_ID_MAX; ubc_id++) { 1332 if (px_oberon_ubc_scratch_regs[ubc_id] != NULL) 1333 return; 1334 } 1335 1336 /* 1337 * All scratch registers are set to zero so deallocate the scratch 1338 * register array and set the pointer to NULL. 1339 */ 1340 kmem_free(px_oberon_ubc_scratch_regs, 1341 (sizeof (uint64_t)*OBERON_UBC_ID_MAX)); 1342 1343 px_oberon_ubc_scratch_regs = NULL; 1344 } 1345 1346 /* 1347 * Oberon does not have a UBC scratch register, so use an allocated array of 1348 * scratch registers and use the unique UBC ID as an index into that array. 1349 */ 1350 static uint64_t 1351 oberon_get_cb(dev_info_t *dip) 1352 { 1353 uint64_t ubc_id; 1354 1355 if (px_oberon_ubc_scratch_regs == NULL) 1356 return (0); 1357 1358 ubc_id = oberon_get_ubc_id(dip); 1359 1360 return (px_oberon_ubc_scratch_regs[ubc_id]); 1361 } 1362 1363 /* 1364 * Misc Functions: 1365 * Currently unsupported by hypervisor 1366 */ 1367 static uint64_t 1368 px_get_cb(dev_info_t *dip) 1369 { 1370 px_t *px_p = DIP_TO_STATE(dip); 1371 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 1372 1373 /* 1374 * Oberon does not currently have Scratchpad registers. 1375 */ 1376 if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) 1377 return (oberon_get_cb(dip)); 1378 1379 return (CSR_XR((caddr_t)pxu_p->px_address[PX_REG_XBC], JBUS_SCRATCH_1)); 1380 } 1381 1382 static void 1383 px_set_cb(dev_info_t *dip, uint64_t val) 1384 { 1385 px_t *px_p = DIP_TO_STATE(dip); 1386 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 1387 1388 /* 1389 * Oberon does not currently have Scratchpad registers. 1390 */ 1391 if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) { 1392 oberon_set_cb(dip, val); 1393 return; 1394 } 1395 1396 CSR_XS((caddr_t)pxu_p->px_address[PX_REG_XBC], JBUS_SCRATCH_1, val); 1397 } 1398 1399 /*ARGSUSED*/ 1400 int 1401 px_lib_map_vconfig(dev_info_t *dip, 1402 ddi_map_req_t *mp, pci_config_offset_t off, 1403 pci_regspec_t *rp, caddr_t *addrp) 1404 { 1405 /* 1406 * No special config space access services in this layer. 1407 */ 1408 return (DDI_FAILURE); 1409 } 1410 1411 void 1412 px_lib_map_attr_check(ddi_map_req_t *mp) 1413 { 1414 ddi_acc_hdl_t *hp = mp->map_handlep; 1415 1416 /* fire does not accept byte masks from PIO store merge */ 1417 if (hp->ah_acc.devacc_attr_dataorder == DDI_STORECACHING_OK_ACC) 1418 hp->ah_acc.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 1419 } 1420 1421 void 1422 px_lib_clr_errs(px_t *px_p) 1423 { 1424 px_pec_t *pec_p = px_p->px_pec_p; 1425 dev_info_t *rpdip = px_p->px_dip; 1426 int err = PX_OK, ret; 1427 int acctype = pec_p->pec_safeacc_type; 1428 ddi_fm_error_t derr; 1429 1430 /* Create the derr */ 1431 bzero(&derr, sizeof (ddi_fm_error_t)); 1432 derr.fme_version = DDI_FME_VERSION; 1433 derr.fme_ena = fm_ena_generate(0, FM_ENA_FMT1); 1434 derr.fme_flag = acctype; 1435 1436 if (acctype == DDI_FM_ERR_EXPECTED) { 1437 derr.fme_status = DDI_FM_NONFATAL; 1438 ndi_fm_acc_err_set(pec_p->pec_acc_hdl, &derr); 1439 } 1440 1441 mutex_enter(&px_p->px_fm_mutex); 1442 1443 /* send ereport/handle/clear fire registers */ 1444 err = px_err_handle(px_p, &derr, PX_LIB_CALL, B_TRUE); 1445 1446 /* Check all child devices for errors */ 1447 ret = ndi_fm_handler_dispatch(rpdip, NULL, &derr); 1448 1449 mutex_exit(&px_p->px_fm_mutex); 1450 1451 /* 1452 * PX_FATAL_HW indicates a condition recovered from Fatal-Reset, 1453 * therefore it does not cause panic. 1454 */ 1455 if ((err & (PX_FATAL_GOS | PX_FATAL_SW)) || (ret == DDI_FM_FATAL)) 1456 PX_FM_PANIC("Fatal System Port Error has occurred\n"); 1457 } 1458 1459 #ifdef DEBUG 1460 int px_peekfault_cnt = 0; 1461 int px_pokefault_cnt = 0; 1462 #endif /* DEBUG */ 1463 1464 /*ARGSUSED*/ 1465 static int 1466 px_lib_do_poke(dev_info_t *dip, dev_info_t *rdip, 1467 peekpoke_ctlops_t *in_args) 1468 { 1469 px_t *px_p = DIP_TO_STATE(dip); 1470 px_pec_t *pec_p = px_p->px_pec_p; 1471 int err = DDI_SUCCESS; 1472 on_trap_data_t otd; 1473 1474 mutex_enter(&pec_p->pec_pokefault_mutex); 1475 pec_p->pec_ontrap_data = &otd; 1476 pec_p->pec_safeacc_type = DDI_FM_ERR_POKE; 1477 1478 /* Set up protected environment. */ 1479 if (!on_trap(&otd, OT_DATA_ACCESS)) { 1480 uintptr_t tramp = otd.ot_trampoline; 1481 1482 otd.ot_trampoline = (uintptr_t)&poke_fault; 1483 err = do_poke(in_args->size, (void *)in_args->dev_addr, 1484 (void *)in_args->host_addr); 1485 otd.ot_trampoline = tramp; 1486 } else 1487 err = DDI_FAILURE; 1488 1489 px_lib_clr_errs(px_p); 1490 1491 if (otd.ot_trap & OT_DATA_ACCESS) 1492 err = DDI_FAILURE; 1493 1494 /* Take down protected environment. */ 1495 no_trap(); 1496 1497 pec_p->pec_ontrap_data = NULL; 1498 pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED; 1499 mutex_exit(&pec_p->pec_pokefault_mutex); 1500 1501 #ifdef DEBUG 1502 if (err == DDI_FAILURE) 1503 px_pokefault_cnt++; 1504 #endif 1505 return (err); 1506 } 1507 1508 /*ARGSUSED*/ 1509 static int 1510 px_lib_do_caut_put(dev_info_t *dip, dev_info_t *rdip, 1511 peekpoke_ctlops_t *cautacc_ctlops_arg) 1512 { 1513 size_t size = cautacc_ctlops_arg->size; 1514 uintptr_t dev_addr = cautacc_ctlops_arg->dev_addr; 1515 uintptr_t host_addr = cautacc_ctlops_arg->host_addr; 1516 ddi_acc_impl_t *hp = (ddi_acc_impl_t *)cautacc_ctlops_arg->handle; 1517 size_t repcount = cautacc_ctlops_arg->repcount; 1518 uint_t flags = cautacc_ctlops_arg->flags; 1519 1520 px_t *px_p = DIP_TO_STATE(dip); 1521 px_pec_t *pec_p = px_p->px_pec_p; 1522 int err = DDI_SUCCESS; 1523 1524 /* 1525 * Note that i_ndi_busop_access_enter ends up grabbing the pokefault 1526 * mutex. 1527 */ 1528 i_ndi_busop_access_enter(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp); 1529 1530 pec_p->pec_ontrap_data = (on_trap_data_t *)hp->ahi_err->err_ontrap; 1531 pec_p->pec_safeacc_type = DDI_FM_ERR_EXPECTED; 1532 hp->ahi_err->err_expected = DDI_FM_ERR_EXPECTED; 1533 1534 if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) { 1535 for (; repcount; repcount--) { 1536 switch (size) { 1537 1538 case sizeof (uint8_t): 1539 i_ddi_put8(hp, (uint8_t *)dev_addr, 1540 *(uint8_t *)host_addr); 1541 break; 1542 1543 case sizeof (uint16_t): 1544 i_ddi_put16(hp, (uint16_t *)dev_addr, 1545 *(uint16_t *)host_addr); 1546 break; 1547 1548 case sizeof (uint32_t): 1549 i_ddi_put32(hp, (uint32_t *)dev_addr, 1550 *(uint32_t *)host_addr); 1551 break; 1552 1553 case sizeof (uint64_t): 1554 i_ddi_put64(hp, (uint64_t *)dev_addr, 1555 *(uint64_t *)host_addr); 1556 break; 1557 } 1558 1559 host_addr += size; 1560 1561 if (flags == DDI_DEV_AUTOINCR) 1562 dev_addr += size; 1563 1564 px_lib_clr_errs(px_p); 1565 1566 if (pec_p->pec_ontrap_data->ot_trap & OT_DATA_ACCESS) { 1567 err = DDI_FAILURE; 1568 #ifdef DEBUG 1569 px_pokefault_cnt++; 1570 #endif 1571 break; 1572 } 1573 } 1574 } 1575 1576 i_ddi_notrap((ddi_acc_handle_t)hp); 1577 pec_p->pec_ontrap_data = NULL; 1578 pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED; 1579 i_ndi_busop_access_exit(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp); 1580 hp->ahi_err->err_expected = DDI_FM_ERR_UNEXPECTED; 1581 1582 return (err); 1583 } 1584 1585 1586 int 1587 px_lib_ctlops_poke(dev_info_t *dip, dev_info_t *rdip, 1588 peekpoke_ctlops_t *in_args) 1589 { 1590 return (in_args->handle ? px_lib_do_caut_put(dip, rdip, in_args) : 1591 px_lib_do_poke(dip, rdip, in_args)); 1592 } 1593 1594 1595 /*ARGSUSED*/ 1596 static int 1597 px_lib_do_peek(dev_info_t *dip, peekpoke_ctlops_t *in_args) 1598 { 1599 px_t *px_p = DIP_TO_STATE(dip); 1600 px_pec_t *pec_p = px_p->px_pec_p; 1601 int err = DDI_SUCCESS; 1602 on_trap_data_t otd; 1603 1604 mutex_enter(&pec_p->pec_pokefault_mutex); 1605 pec_p->pec_safeacc_type = DDI_FM_ERR_PEEK; 1606 1607 if (!on_trap(&otd, OT_DATA_ACCESS)) { 1608 uintptr_t tramp = otd.ot_trampoline; 1609 1610 otd.ot_trampoline = (uintptr_t)&peek_fault; 1611 err = do_peek(in_args->size, (void *)in_args->dev_addr, 1612 (void *)in_args->host_addr); 1613 otd.ot_trampoline = tramp; 1614 } else 1615 err = DDI_FAILURE; 1616 1617 no_trap(); 1618 pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED; 1619 mutex_exit(&pec_p->pec_pokefault_mutex); 1620 1621 #ifdef DEBUG 1622 if (err == DDI_FAILURE) 1623 px_peekfault_cnt++; 1624 #endif 1625 return (err); 1626 } 1627 1628 1629 static int 1630 px_lib_do_caut_get(dev_info_t *dip, peekpoke_ctlops_t *cautacc_ctlops_arg) 1631 { 1632 size_t size = cautacc_ctlops_arg->size; 1633 uintptr_t dev_addr = cautacc_ctlops_arg->dev_addr; 1634 uintptr_t host_addr = cautacc_ctlops_arg->host_addr; 1635 ddi_acc_impl_t *hp = (ddi_acc_impl_t *)cautacc_ctlops_arg->handle; 1636 size_t repcount = cautacc_ctlops_arg->repcount; 1637 uint_t flags = cautacc_ctlops_arg->flags; 1638 1639 px_t *px_p = DIP_TO_STATE(dip); 1640 px_pec_t *pec_p = px_p->px_pec_p; 1641 int err = DDI_SUCCESS; 1642 1643 /* 1644 * Note that i_ndi_busop_access_enter ends up grabbing the pokefault 1645 * mutex. 1646 */ 1647 i_ndi_busop_access_enter(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp); 1648 1649 pec_p->pec_ontrap_data = (on_trap_data_t *)hp->ahi_err->err_ontrap; 1650 pec_p->pec_safeacc_type = DDI_FM_ERR_EXPECTED; 1651 hp->ahi_err->err_expected = DDI_FM_ERR_EXPECTED; 1652 1653 if (repcount == 1) { 1654 if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) { 1655 i_ddi_caut_get(size, (void *)dev_addr, 1656 (void *)host_addr); 1657 } else { 1658 int i; 1659 uint8_t *ff_addr = (uint8_t *)host_addr; 1660 for (i = 0; i < size; i++) 1661 *ff_addr++ = 0xff; 1662 1663 err = DDI_FAILURE; 1664 #ifdef DEBUG 1665 px_peekfault_cnt++; 1666 #endif 1667 } 1668 } else { 1669 if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) { 1670 for (; repcount; repcount--) { 1671 i_ddi_caut_get(size, (void *)dev_addr, 1672 (void *)host_addr); 1673 1674 host_addr += size; 1675 1676 if (flags == DDI_DEV_AUTOINCR) 1677 dev_addr += size; 1678 } 1679 } else { 1680 err = DDI_FAILURE; 1681 #ifdef DEBUG 1682 px_peekfault_cnt++; 1683 #endif 1684 } 1685 } 1686 1687 i_ddi_notrap((ddi_acc_handle_t)hp); 1688 pec_p->pec_ontrap_data = NULL; 1689 pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED; 1690 i_ndi_busop_access_exit(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp); 1691 hp->ahi_err->err_expected = DDI_FM_ERR_UNEXPECTED; 1692 1693 return (err); 1694 } 1695 1696 /*ARGSUSED*/ 1697 int 1698 px_lib_ctlops_peek(dev_info_t *dip, dev_info_t *rdip, 1699 peekpoke_ctlops_t *in_args, void *result) 1700 { 1701 result = (void *)in_args->host_addr; 1702 return (in_args->handle ? px_lib_do_caut_get(dip, in_args) : 1703 px_lib_do_peek(dip, in_args)); 1704 } 1705 1706 /* 1707 * implements PPM interface 1708 */ 1709 int 1710 px_lib_pmctl(int cmd, px_t *px_p) 1711 { 1712 ASSERT((cmd & ~PPMREQ_MASK) == PPMREQ); 1713 switch (cmd) { 1714 case PPMREQ_PRE_PWR_OFF: 1715 /* 1716 * Currently there is no device power management for 1717 * the root complex (fire). When there is we need to make 1718 * sure that it is at full power before trying to send the 1719 * PME_Turn_Off message. 1720 */ 1721 DBG(DBG_PWR, px_p->px_dip, 1722 "ioctl: request to send PME_Turn_Off\n"); 1723 return (px_goto_l23ready(px_p)); 1724 1725 case PPMREQ_PRE_PWR_ON: 1726 DBG(DBG_PWR, px_p->px_dip, "ioctl: PRE_PWR_ON request\n"); 1727 return (px_pre_pwron_check(px_p)); 1728 1729 case PPMREQ_POST_PWR_ON: 1730 DBG(DBG_PWR, px_p->px_dip, "ioctl: POST_PWR_ON request\n"); 1731 return (px_goto_l0(px_p)); 1732 1733 default: 1734 return (DDI_FAILURE); 1735 } 1736 } 1737 1738 /* 1739 * sends PME_Turn_Off message to put the link in L2/L3 ready state. 1740 * called by px_ioctl. 1741 * returns DDI_SUCCESS or DDI_FAILURE 1742 * 1. Wait for link to be in L1 state (link status reg) 1743 * 2. write to PME_Turn_off reg to boradcast 1744 * 3. set timeout 1745 * 4. If timeout, return failure. 1746 * 5. If PM_TO_Ack, wait till link is in L2/L3 ready 1747 */ 1748 static int 1749 px_goto_l23ready(px_t *px_p) 1750 { 1751 pcie_pwr_t *pwr_p; 1752 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 1753 caddr_t csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR]; 1754 int ret = DDI_SUCCESS; 1755 clock_t end, timeleft; 1756 int mutex_held = 1; 1757 1758 /* If no PM info, return failure */ 1759 if (!PCIE_PMINFO(px_p->px_dip) || 1760 !(pwr_p = PCIE_NEXUS_PMINFO(px_p->px_dip))) 1761 return (DDI_FAILURE); 1762 1763 mutex_enter(&pwr_p->pwr_lock); 1764 mutex_enter(&px_p->px_l23ready_lock); 1765 /* Clear the PME_To_ACK receieved flag */ 1766 px_p->px_pm_flags &= ~PX_PMETOACK_RECVD; 1767 /* 1768 * When P25 is the downstream device, after receiving 1769 * PME_To_ACK, fire will go to Detect state, which causes 1770 * the link down event. Inform FMA that this is expected. 1771 * In case of all other cards complaint with the pci express 1772 * spec, this will happen when the power is re-applied. FMA 1773 * code will clear this flag after one instance of LDN. Since 1774 * there will not be a LDN event for the spec compliant cards, 1775 * we need to clear the flag after receiving PME_To_ACK. 1776 */ 1777 px_p->px_pm_flags |= PX_LDN_EXPECTED; 1778 if (px_send_pme_turnoff(csr_base) != DDI_SUCCESS) { 1779 ret = DDI_FAILURE; 1780 goto l23ready_done; 1781 } 1782 px_p->px_pm_flags |= PX_PME_TURNOFF_PENDING; 1783 1784 end = ddi_get_lbolt() + drv_usectohz(px_pme_to_ack_timeout); 1785 while (!(px_p->px_pm_flags & PX_PMETOACK_RECVD)) { 1786 timeleft = cv_timedwait(&px_p->px_l23ready_cv, 1787 &px_p->px_l23ready_lock, end); 1788 /* 1789 * if cv_timedwait returns -1, it is either 1790 * 1) timed out or 1791 * 2) there was a pre-mature wakeup but by the time 1792 * cv_timedwait is called again end < lbolt i.e. 1793 * end is in the past. 1794 * 3) By the time we make first cv_timedwait call, 1795 * end < lbolt is true. 1796 */ 1797 if (timeleft == -1) 1798 break; 1799 } 1800 if (!(px_p->px_pm_flags & PX_PMETOACK_RECVD)) { 1801 /* 1802 * Either timedout or interrupt didn't get a 1803 * chance to grab the mutex and set the flag. 1804 * release the mutex and delay for sometime. 1805 * This will 1) give a chance for interrupt to 1806 * set the flag 2) creates a delay between two 1807 * consequetive requests. 1808 */ 1809 mutex_exit(&px_p->px_l23ready_lock); 1810 delay(drv_usectohz(50 * PX_MSEC_TO_USEC)); 1811 mutex_held = 0; 1812 if (!(px_p->px_pm_flags & PX_PMETOACK_RECVD)) { 1813 ret = DDI_FAILURE; 1814 DBG(DBG_PWR, px_p->px_dip, " Timed out while waiting" 1815 " for PME_TO_ACK\n"); 1816 } 1817 } 1818 px_p->px_pm_flags &= 1819 ~(PX_PME_TURNOFF_PENDING | PX_PMETOACK_RECVD | PX_LDN_EXPECTED); 1820 1821 l23ready_done: 1822 if (mutex_held) 1823 mutex_exit(&px_p->px_l23ready_lock); 1824 /* 1825 * Wait till link is in L1 idle, if sending PME_Turn_Off 1826 * was succesful. 1827 */ 1828 if (ret == DDI_SUCCESS) { 1829 if (px_link_wait4l1idle(csr_base) != DDI_SUCCESS) { 1830 DBG(DBG_PWR, px_p->px_dip, " Link is not at L1" 1831 " even though we received PME_To_ACK.\n"); 1832 /* 1833 * Workaround for hardware bug with P25. 1834 * Due to a hardware bug with P25, link state 1835 * will be Detect state rather than L1 after 1836 * link is transitioned to L23Ready state. Since 1837 * we don't know whether link is L23ready state 1838 * without Fire's state being L1_idle, we delay 1839 * here just to make sure that we wait till link 1840 * is transitioned to L23Ready state. 1841 */ 1842 delay(drv_usectohz(100 * PX_MSEC_TO_USEC)); 1843 } 1844 pwr_p->pwr_link_lvl = PM_LEVEL_L3; 1845 1846 } 1847 mutex_exit(&pwr_p->pwr_lock); 1848 return (ret); 1849 } 1850 1851 /* 1852 * Message interrupt handler intended to be shared for both 1853 * PME and PME_TO_ACK msg handling, currently only handles 1854 * PME_To_ACK message. 1855 */ 1856 uint_t 1857 px_pmeq_intr(caddr_t arg) 1858 { 1859 px_t *px_p = (px_t *)arg; 1860 1861 DBG(DBG_PWR, px_p->px_dip, " PME_To_ACK received \n"); 1862 mutex_enter(&px_p->px_l23ready_lock); 1863 cv_broadcast(&px_p->px_l23ready_cv); 1864 if (px_p->px_pm_flags & PX_PME_TURNOFF_PENDING) { 1865 px_p->px_pm_flags |= PX_PMETOACK_RECVD; 1866 } else { 1867 /* 1868 * This maybe the second ack received. If so then, 1869 * we should be receiving it during wait4L1 stage. 1870 */ 1871 px_p->px_pmetoack_ignored++; 1872 } 1873 mutex_exit(&px_p->px_l23ready_lock); 1874 return (DDI_INTR_CLAIMED); 1875 } 1876 1877 static int 1878 px_pre_pwron_check(px_t *px_p) 1879 { 1880 pcie_pwr_t *pwr_p; 1881 1882 /* If no PM info, return failure */ 1883 if (!PCIE_PMINFO(px_p->px_dip) || 1884 !(pwr_p = PCIE_NEXUS_PMINFO(px_p->px_dip))) 1885 return (DDI_FAILURE); 1886 1887 /* 1888 * For the spec compliant downstream cards link down 1889 * is expected when the device is powered on. 1890 */ 1891 px_p->px_pm_flags |= PX_LDN_EXPECTED; 1892 return (pwr_p->pwr_link_lvl == PM_LEVEL_L3 ? DDI_SUCCESS : DDI_FAILURE); 1893 } 1894 1895 static int 1896 px_goto_l0(px_t *px_p) 1897 { 1898 pcie_pwr_t *pwr_p; 1899 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 1900 caddr_t csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR]; 1901 int ret = DDI_SUCCESS; 1902 uint64_t time_spent = 0; 1903 1904 /* If no PM info, return failure */ 1905 if (!PCIE_PMINFO(px_p->px_dip) || 1906 !(pwr_p = PCIE_NEXUS_PMINFO(px_p->px_dip))) 1907 return (DDI_FAILURE); 1908 1909 mutex_enter(&pwr_p->pwr_lock); 1910 /* 1911 * The following link retrain activity will cause LDN and LUP event. 1912 * Receiving LDN prior to receiving LUP is expected, not an error in 1913 * this case. Receiving LUP indicates link is fully up to support 1914 * powering up down stream device, and of course any further LDN and 1915 * LUP outside this context will be error. 1916 */ 1917 px_p->px_lup_pending = 1; 1918 if (px_link_retrain(csr_base) != DDI_SUCCESS) { 1919 ret = DDI_FAILURE; 1920 goto l0_done; 1921 } 1922 1923 /* LUP event takes the order of 15ms amount of time to occur */ 1924 for (; px_p->px_lup_pending && (time_spent < px_lup_poll_to); 1925 time_spent += px_lup_poll_interval) 1926 drv_usecwait(px_lup_poll_interval); 1927 if (px_p->px_lup_pending) 1928 ret = DDI_FAILURE; 1929 l0_done: 1930 px_enable_detect_quiet(csr_base); 1931 if (ret == DDI_SUCCESS) 1932 pwr_p->pwr_link_lvl = PM_LEVEL_L0; 1933 mutex_exit(&pwr_p->pwr_lock); 1934 return (ret); 1935 } 1936 1937 /* 1938 * Extract the drivers binding name to identify which chip we're binding to. 1939 * Whenever a new bus bridge is created, the driver alias entry should be 1940 * added here to identify the device if needed. If a device isn't added, 1941 * the identity defaults to PX_CHIP_UNIDENTIFIED. 1942 */ 1943 static uint32_t 1944 px_identity_init(px_t *px_p) 1945 { 1946 dev_info_t *dip = px_p->px_dip; 1947 char *name = ddi_binding_name(dip); 1948 uint32_t revision = 0; 1949 1950 revision = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1951 "module-revision#", 0); 1952 1953 /* Check for Fire driver binding name */ 1954 if (strcmp(name, "pciex108e,80f0") == 0) { 1955 DBG(DBG_ATTACH, dip, "px_identity_init: %s%d: " 1956 "(FIRE), module-revision %d\n", NAMEINST(dip), 1957 revision); 1958 1959 return ((revision >= FIRE_MOD_REV_20) ? 1960 PX_CHIP_FIRE : PX_CHIP_UNIDENTIFIED); 1961 } 1962 1963 /* Check for Oberon driver binding name */ 1964 if (strcmp(name, "pciex108e,80f8") == 0) { 1965 DBG(DBG_ATTACH, dip, "px_identity_init: %s%d: " 1966 "(OBERON), module-revision %d\n", NAMEINST(dip), 1967 revision); 1968 1969 return (PX_CHIP_OBERON); 1970 } 1971 1972 DBG(DBG_ATTACH, dip, "%s%d: Unknown PCI Express Host bridge %s %x\n", 1973 ddi_driver_name(dip), ddi_get_instance(dip), name, revision); 1974 1975 return (PX_CHIP_UNIDENTIFIED); 1976 } 1977 1978 int 1979 px_err_add_intr(px_fault_t *px_fault_p) 1980 { 1981 dev_info_t *dip = px_fault_p->px_fh_dip; 1982 px_t *px_p = DIP_TO_STATE(dip); 1983 1984 VERIFY(add_ivintr(px_fault_p->px_fh_sysino, PX_ERR_PIL, 1985 px_fault_p->px_err_func, (caddr_t)px_fault_p, NULL) == 0); 1986 1987 px_ib_intr_enable(px_p, intr_dist_cpuid(), px_fault_p->px_intr_ino); 1988 1989 return (DDI_SUCCESS); 1990 } 1991 1992 void 1993 px_err_rem_intr(px_fault_t *px_fault_p) 1994 { 1995 dev_info_t *dip = px_fault_p->px_fh_dip; 1996 px_t *px_p = DIP_TO_STATE(dip); 1997 1998 px_ib_intr_disable(px_p->px_ib_p, px_fault_p->px_intr_ino, 1999 IB_INTR_WAIT); 2000 2001 rem_ivintr(px_fault_p->px_fh_sysino, NULL); 2002 } 2003 2004 /* 2005 * px_cb_add_intr() - Called from attach(9E) to create CB if not yet 2006 * created, to add CB interrupt vector always, but enable only once. 2007 */ 2008 int 2009 px_cb_add_intr(px_fault_t *fault_p) 2010 { 2011 px_t *px_p = DIP_TO_STATE(fault_p->px_fh_dip); 2012 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 2013 px_cb_t *cb_p = (px_cb_t *)px_get_cb(fault_p->px_fh_dip); 2014 px_cb_list_t *pxl, *pxl_new; 2015 cpuid_t cpuid; 2016 2017 2018 if (cb_p == NULL) { 2019 cb_p = kmem_zalloc(sizeof (px_cb_t), KM_SLEEP); 2020 mutex_init(&cb_p->cb_mutex, NULL, MUTEX_DRIVER, NULL); 2021 cb_p->px_cb_func = px_cb_intr; 2022 pxu_p->px_cb_p = cb_p; 2023 px_set_cb(fault_p->px_fh_dip, (uint64_t)cb_p); 2024 2025 /* px_lib_dev_init allows only FIRE and OBERON */ 2026 px_err_reg_enable( 2027 (pxu_p->chip_type == PX_CHIP_FIRE) ? 2028 PX_ERR_JBC : PX_ERR_UBC, 2029 pxu_p->px_address[PX_REG_XBC]); 2030 } else 2031 pxu_p->px_cb_p = cb_p; 2032 2033 mutex_enter(&cb_p->cb_mutex); 2034 2035 VERIFY(add_ivintr(fault_p->px_fh_sysino, PX_ERR_PIL, 2036 cb_p->px_cb_func, (caddr_t)cb_p, NULL) == 0); 2037 2038 if (cb_p->pxl == NULL) { 2039 2040 cpuid = intr_dist_cpuid(), 2041 px_ib_intr_enable(px_p, cpuid, fault_p->px_intr_ino); 2042 2043 pxl = kmem_zalloc(sizeof (px_cb_list_t), KM_SLEEP); 2044 pxl->pxp = px_p; 2045 2046 cb_p->pxl = pxl; 2047 cb_p->sysino = fault_p->px_fh_sysino; 2048 cb_p->cpuid = cpuid; 2049 2050 } else { 2051 /* 2052 * Find the last pxl or 2053 * stop short at encoutering a redundent, or 2054 * both. 2055 */ 2056 pxl = cb_p->pxl; 2057 for (; !(pxl->pxp == px_p) && pxl->next; pxl = pxl->next); 2058 if (pxl->pxp == px_p) { 2059 cmn_err(CE_WARN, "px_cb_add_intr: reregister sysino " 2060 "%lx by px_p 0x%p\n", cb_p->sysino, (void *)px_p); 2061 return (DDI_FAILURE); 2062 } 2063 2064 /* add to linked list */ 2065 pxl_new = kmem_zalloc(sizeof (px_cb_list_t), KM_SLEEP); 2066 pxl_new->pxp = px_p; 2067 pxl->next = pxl_new; 2068 } 2069 cb_p->attachcnt++; 2070 2071 mutex_exit(&cb_p->cb_mutex); 2072 2073 return (DDI_SUCCESS); 2074 } 2075 2076 /* 2077 * px_cb_rem_intr() - Called from detach(9E) to remove its CB 2078 * interrupt vector, to shift proxy to the next available px, 2079 * or disable CB interrupt when itself is the last. 2080 */ 2081 void 2082 px_cb_rem_intr(px_fault_t *fault_p) 2083 { 2084 px_t *px_p = DIP_TO_STATE(fault_p->px_fh_dip), *pxp; 2085 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 2086 px_cb_t *cb_p = PX2CB(px_p); 2087 px_cb_list_t *pxl, *prev; 2088 px_fault_t *f_p; 2089 2090 ASSERT(cb_p->pxl); 2091 2092 /* De-list the target px, move the next px up */ 2093 2094 mutex_enter(&cb_p->cb_mutex); 2095 2096 pxl = cb_p->pxl; 2097 if (pxl->pxp == px_p) { 2098 cb_p->pxl = pxl->next; 2099 } else { 2100 prev = pxl; 2101 pxl = pxl->next; 2102 for (; pxl && (pxl->pxp != px_p); prev = pxl, pxl = pxl->next); 2103 if (!pxl) { 2104 cmn_err(CE_WARN, "px_cb_rem_intr: can't find px_p 0x%p " 2105 "in registered CB list.", (void *)px_p); 2106 return; 2107 } 2108 prev->next = pxl->next; 2109 } 2110 kmem_free(pxl, sizeof (px_cb_list_t)); 2111 2112 if (fault_p->px_fh_sysino == cb_p->sysino) { 2113 px_ib_intr_disable(px_p->px_ib_p, fault_p->px_intr_ino, 2114 IB_INTR_WAIT); 2115 2116 if (cb_p->pxl) { 2117 pxp = cb_p->pxl->pxp; 2118 f_p = &pxp->px_cb_fault; 2119 cb_p->sysino = f_p->px_fh_sysino; 2120 2121 PX_INTR_ENABLE(pxp->px_dip, cb_p->sysino, cb_p->cpuid); 2122 (void) px_lib_intr_setstate(pxp->px_dip, cb_p->sysino, 2123 INTR_IDLE_STATE); 2124 } 2125 } 2126 2127 rem_ivintr(fault_p->px_fh_sysino, NULL); 2128 pxu_p->px_cb_p = NULL; 2129 cb_p->attachcnt--; 2130 if (cb_p->pxl) { 2131 mutex_exit(&cb_p->cb_mutex); 2132 return; 2133 } 2134 mutex_exit(&cb_p->cb_mutex); 2135 2136 /* px_lib_dev_init allows only FIRE and OBERON */ 2137 px_err_reg_disable( 2138 (pxu_p->chip_type == PX_CHIP_FIRE) ? PX_ERR_JBC : PX_ERR_UBC, 2139 pxu_p->px_address[PX_REG_XBC]); 2140 2141 mutex_destroy(&cb_p->cb_mutex); 2142 px_set_cb(fault_p->px_fh_dip, 0ull); 2143 kmem_free(cb_p, sizeof (px_cb_t)); 2144 } 2145 2146 /* 2147 * px_cb_intr() - sun4u only, CB interrupt dispatcher 2148 */ 2149 uint_t 2150 px_cb_intr(caddr_t arg) 2151 { 2152 px_cb_t *cb_p = (px_cb_t *)arg; 2153 px_cb_list_t *pxl = cb_p->pxl; 2154 px_t *pxp = pxl ? pxl->pxp : NULL; 2155 px_fault_t *fault_p; 2156 2157 while (pxl && pxp && (pxp->px_state != PX_ATTACHED)) { 2158 pxl = pxl->next; 2159 pxp = (pxl) ? pxl->pxp : NULL; 2160 } 2161 2162 if (pxp) { 2163 fault_p = &pxp->px_cb_fault; 2164 return (fault_p->px_err_func((caddr_t)fault_p)); 2165 } else 2166 return (DDI_INTR_UNCLAIMED); 2167 } 2168 2169 /* 2170 * px_cb_intr_redist() - sun4u only, CB interrupt redistribution 2171 */ 2172 void 2173 px_cb_intr_redist(px_t *px_p) 2174 { 2175 px_fault_t *f_p = &px_p->px_cb_fault; 2176 px_cb_t *cb_p = PX2CB(px_p); 2177 devino_t ino = px_p->px_inos[PX_INTR_XBC]; 2178 cpuid_t cpuid; 2179 2180 mutex_enter(&cb_p->cb_mutex); 2181 2182 if (cb_p->sysino != f_p->px_fh_sysino) { 2183 mutex_exit(&cb_p->cb_mutex); 2184 return; 2185 } 2186 2187 cb_p->cpuid = cpuid = intr_dist_cpuid(); 2188 px_ib_intr_dist_en(px_p->px_dip, cpuid, ino, B_FALSE); 2189 2190 mutex_exit(&cb_p->cb_mutex); 2191 } 2192 2193 #ifdef FMA 2194 void 2195 px_fill_rc_status(px_fault_t *px_fault_p, pciex_rc_error_regs_t *rc_status) 2196 { 2197 /* populate the rc_status by reading the registers - TBD */ 2198 } 2199 #endif /* FMA */ 2200 2201 /* 2202 * Unprotected raw reads/writes of fabric device's config space. 2203 * Only used for temporary PCI-E Fabric Error Handling. 2204 */ 2205 uint32_t 2206 px_fab_get(px_t *px_p, pcie_req_id_t bdf, uint16_t offset) 2207 { 2208 px_ranges_t *rp = px_p->px_ranges_p; 2209 uint64_t range_prop, base_addr; 2210 int bank = PCI_REG_ADDR_G(PCI_ADDR_CONFIG); 2211 uint32_t val; 2212 2213 /* Get Fire's Physical Base Address */ 2214 range_prop = px_get_range_prop(px_p, rp, bank); 2215 2216 /* Get config space first. */ 2217 base_addr = range_prop + PX_BDF_TO_CFGADDR(bdf, offset); 2218 2219 val = ldphysio(base_addr); 2220 2221 return (LE_32(val)); 2222 } 2223 2224 void 2225 px_fab_set(px_t *px_p, pcie_req_id_t bdf, uint16_t offset, 2226 uint32_t val) { 2227 px_ranges_t *rp = px_p->px_ranges_p; 2228 uint64_t range_prop, base_addr; 2229 int bank = PCI_REG_ADDR_G(PCI_ADDR_CONFIG); 2230 2231 /* Get Fire's Physical Base Address */ 2232 range_prop = px_get_range_prop(px_p, rp, bank); 2233 2234 /* Get config space first. */ 2235 base_addr = range_prop + PX_BDF_TO_CFGADDR(bdf, offset); 2236 2237 stphysio(base_addr, LE_32(val)); 2238 } 2239 2240 /* 2241 * cpr callback 2242 * 2243 * disable fabric error msg interrupt prior to suspending 2244 * all device drivers; re-enable fabric error msg interrupt 2245 * after all devices are resumed. 2246 */ 2247 static boolean_t 2248 px_cpr_callb(void *arg, int code) 2249 { 2250 px_t *px_p = (px_t *)arg; 2251 px_ib_t *ib_p = px_p->px_ib_p; 2252 px_pec_t *pec_p = px_p->px_pec_p; 2253 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 2254 caddr_t csr_base; 2255 devino_t ce_ino, nf_ino, f_ino; 2256 px_ib_ino_info_t *ce_ino_p, *nf_ino_p, *f_ino_p; 2257 uint64_t imu_log_enable, imu_intr_enable; 2258 uint64_t imu_log_mask, imu_intr_mask; 2259 2260 ce_ino = px_msiqid_to_devino(px_p, pec_p->pec_corr_msg_msiq_id); 2261 nf_ino = px_msiqid_to_devino(px_p, pec_p->pec_non_fatal_msg_msiq_id); 2262 f_ino = px_msiqid_to_devino(px_p, pec_p->pec_fatal_msg_msiq_id); 2263 csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR]; 2264 2265 imu_log_enable = CSR_XR(csr_base, IMU_ERROR_LOG_ENABLE); 2266 imu_intr_enable = CSR_XR(csr_base, IMU_INTERRUPT_ENABLE); 2267 2268 imu_log_mask = BITMASK(IMU_ERROR_LOG_ENABLE_FATAL_MES_NOT_EN_LOG_EN) | 2269 BITMASK(IMU_ERROR_LOG_ENABLE_NONFATAL_MES_NOT_EN_LOG_EN) | 2270 BITMASK(IMU_ERROR_LOG_ENABLE_COR_MES_NOT_EN_LOG_EN); 2271 2272 imu_intr_mask = 2273 BITMASK(IMU_INTERRUPT_ENABLE_FATAL_MES_NOT_EN_S_INT_EN) | 2274 BITMASK(IMU_INTERRUPT_ENABLE_NONFATAL_MES_NOT_EN_S_INT_EN) | 2275 BITMASK(IMU_INTERRUPT_ENABLE_COR_MES_NOT_EN_S_INT_EN) | 2276 BITMASK(IMU_INTERRUPT_ENABLE_FATAL_MES_NOT_EN_P_INT_EN) | 2277 BITMASK(IMU_INTERRUPT_ENABLE_NONFATAL_MES_NOT_EN_P_INT_EN) | 2278 BITMASK(IMU_INTERRUPT_ENABLE_COR_MES_NOT_EN_P_INT_EN); 2279 2280 switch (code) { 2281 case CB_CODE_CPR_CHKPT: 2282 /* disable imu rbne on corr/nonfatal/fatal errors */ 2283 CSR_XS(csr_base, IMU_ERROR_LOG_ENABLE, 2284 imu_log_enable & (~imu_log_mask)); 2285 2286 CSR_XS(csr_base, IMU_INTERRUPT_ENABLE, 2287 imu_intr_enable & (~imu_intr_mask)); 2288 2289 /* disable CORR intr mapping */ 2290 px_ib_intr_disable(ib_p, ce_ino, IB_INTR_NOWAIT); 2291 2292 /* disable NON FATAL intr mapping */ 2293 px_ib_intr_disable(ib_p, nf_ino, IB_INTR_NOWAIT); 2294 2295 /* disable FATAL intr mapping */ 2296 px_ib_intr_disable(ib_p, f_ino, IB_INTR_NOWAIT); 2297 2298 break; 2299 2300 case CB_CODE_CPR_RESUME: 2301 mutex_enter(&ib_p->ib_ino_lst_mutex); 2302 2303 ce_ino_p = px_ib_locate_ino(ib_p, ce_ino); 2304 nf_ino_p = px_ib_locate_ino(ib_p, nf_ino); 2305 f_ino_p = px_ib_locate_ino(ib_p, f_ino); 2306 2307 /* enable CORR intr mapping */ 2308 if (ce_ino_p) 2309 px_ib_intr_enable(px_p, ce_ino_p->ino_cpuid, ce_ino); 2310 else 2311 cmn_err(CE_WARN, "px_cpr_callb: RESUME unable to " 2312 "reenable PCIe Correctable msg intr.\n"); 2313 2314 /* enable NON FATAL intr mapping */ 2315 if (nf_ino_p) 2316 px_ib_intr_enable(px_p, nf_ino_p->ino_cpuid, nf_ino); 2317 else 2318 cmn_err(CE_WARN, "px_cpr_callb: RESUME unable to " 2319 "reenable PCIe Non Fatal msg intr.\n"); 2320 2321 /* enable FATAL intr mapping */ 2322 if (f_ino_p) 2323 px_ib_intr_enable(px_p, f_ino_p->ino_cpuid, f_ino); 2324 else 2325 cmn_err(CE_WARN, "px_cpr_callb: RESUME unable to " 2326 "reenable PCIe Fatal msg intr.\n"); 2327 2328 mutex_exit(&ib_p->ib_ino_lst_mutex); 2329 2330 /* enable corr/nonfatal/fatal not enable error */ 2331 CSR_XS(csr_base, IMU_ERROR_LOG_ENABLE, (imu_log_enable | 2332 (imu_log_mask & px_imu_log_mask))); 2333 CSR_XS(csr_base, IMU_INTERRUPT_ENABLE, (imu_intr_enable | 2334 (imu_intr_mask & px_imu_intr_mask))); 2335 2336 break; 2337 } 2338 2339 return (B_TRUE); 2340 } 2341 2342 uint64_t 2343 px_get_rng_parent_hi_mask(px_t *px_p) 2344 { 2345 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 2346 uint64_t mask; 2347 2348 switch (PX_CHIP_TYPE(pxu_p)) { 2349 case PX_CHIP_OBERON: 2350 mask = OBERON_RANGE_PROP_MASK; 2351 break; 2352 case PX_CHIP_FIRE: 2353 mask = PX_RANGE_PROP_MASK; 2354 break; 2355 default: 2356 mask = PX_RANGE_PROP_MASK; 2357 } 2358 2359 return (mask); 2360 } 2361 2362 /* 2363 * fetch chip's range propery's value 2364 */ 2365 uint64_t 2366 px_get_range_prop(px_t *px_p, px_ranges_t *rp, int bank) 2367 { 2368 uint64_t mask, range_prop; 2369 2370 mask = px_get_rng_parent_hi_mask(px_p); 2371 range_prop = (((uint64_t)(rp[bank].parent_high & mask)) << 32) | 2372 rp[bank].parent_low; 2373 2374 return (range_prop); 2375 } 2376 2377 /* 2378 * add cpr callback 2379 */ 2380 void 2381 px_cpr_add_callb(px_t *px_p) 2382 { 2383 px_p->px_cprcb_id = callb_add(px_cpr_callb, (void *)px_p, 2384 CB_CL_CPR_POST_USER, "px_cpr"); 2385 } 2386 2387 /* 2388 * remove cpr callback 2389 */ 2390 void 2391 px_cpr_rem_callb(px_t *px_p) 2392 { 2393 (void) callb_delete(px_p->px_cprcb_id); 2394 } 2395 2396 /*ARGSUSED*/ 2397 static uint_t 2398 px_hp_intr(caddr_t arg1, caddr_t arg2) 2399 { 2400 px_t *px_p = (px_t *)arg1; 2401 int rval; 2402 2403 rval = pciehpc_intr(px_p->px_dip); 2404 2405 #ifdef DEBUG 2406 if (rval == DDI_INTR_UNCLAIMED) 2407 cmn_err(CE_WARN, "%s%d: UNCLAIMED intr\n", 2408 ddi_driver_name(px_p->px_dip), 2409 ddi_get_instance(px_p->px_dip)); 2410 #endif 2411 2412 return (rval); 2413 } 2414 2415 int 2416 px_lib_hotplug_init(dev_info_t *dip, void *arg) 2417 { 2418 px_t *px_p = DIP_TO_STATE(dip); 2419 uint64_t ret; 2420 2421 if ((ret = hvio_hotplug_init(dip, arg)) == DDI_SUCCESS) { 2422 sysino_t sysino; 2423 2424 if (px_lib_intr_devino_to_sysino(px_p->px_dip, 2425 px_p->px_inos[PX_INTR_HOTPLUG], &sysino) != 2426 DDI_SUCCESS) { 2427 #ifdef DEBUG 2428 cmn_err(CE_WARN, "%s%d: devino_to_sysino fails\n", 2429 ddi_driver_name(px_p->px_dip), 2430 ddi_get_instance(px_p->px_dip)); 2431 #endif 2432 return (DDI_FAILURE); 2433 } 2434 2435 VERIFY(add_ivintr(sysino, PX_PCIEHP_PIL, 2436 (intrfunc)px_hp_intr, (caddr_t)px_p, NULL) == 0); 2437 } 2438 2439 return (ret); 2440 } 2441 2442 void 2443 px_lib_hotplug_uninit(dev_info_t *dip) 2444 { 2445 if (hvio_hotplug_uninit(dip) == DDI_SUCCESS) { 2446 px_t *px_p = DIP_TO_STATE(dip); 2447 sysino_t sysino; 2448 2449 if (px_lib_intr_devino_to_sysino(px_p->px_dip, 2450 px_p->px_inos[PX_INTR_HOTPLUG], &sysino) != 2451 DDI_SUCCESS) { 2452 #ifdef DEBUG 2453 cmn_err(CE_WARN, "%s%d: devino_to_sysino fails\n", 2454 ddi_driver_name(px_p->px_dip), 2455 ddi_get_instance(px_p->px_dip)); 2456 #endif 2457 return; 2458 } 2459 2460 rem_ivintr(sysino, NULL); 2461 } 2462 } 2463 2464 boolean_t 2465 px_lib_is_in_drain_state(px_t *px_p) 2466 { 2467 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 2468 caddr_t csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR]; 2469 uint64_t drain_status; 2470 2471 if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) { 2472 drain_status = CSR_BR(csr_base, DRAIN_CONTROL_STATUS, DRAIN); 2473 } else { 2474 drain_status = CSR_BR(csr_base, TLU_STATUS, DRAIN); 2475 } 2476 2477 return (drain_status); 2478 } 2479