1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/types.h> 29 #include <sys/kmem.h> 30 #include <sys/conf.h> 31 #include <sys/ddi.h> 32 #include <sys/sunddi.h> 33 #include <sys/sunndi.h> 34 #include <sys/fm/protocol.h> 35 #include <sys/fm/util.h> 36 #include <sys/modctl.h> 37 #include <sys/disp.h> 38 #include <sys/stat.h> 39 #include <sys/ddi_impldefs.h> 40 #include <sys/vmem.h> 41 #include <sys/iommutsb.h> 42 #include <sys/cpuvar.h> 43 #include <sys/ivintr.h> 44 #include <sys/byteorder.h> 45 #include <sys/hotplug/pci/pciehpc.h> 46 #include <sys/spl.h> 47 #include <px_obj.h> 48 #include <pcie_pwr.h> 49 #include "px_tools_var.h" 50 #include <px_regs.h> 51 #include <px_csr.h> 52 #include <sys/machsystm.h> 53 #include "px_lib4u.h" 54 #include "px_err.h" 55 #include "oberon_regs.h" 56 57 #pragma weak jbus_stst_order 58 59 extern void jbus_stst_order(); 60 61 ulong_t px_mmu_dvma_end = 0xfffffffful; 62 uint_t px_ranges_phi_mask = 0xfffffffful; 63 uint64_t *px_oberon_ubc_scratch_regs; 64 uint64_t px_paddr_mask; 65 66 static int px_goto_l23ready(px_t *px_p); 67 static int px_goto_l0(px_t *px_p); 68 static int px_pre_pwron_check(px_t *px_p); 69 static uint32_t px_identity_init(px_t *px_p); 70 static boolean_t px_cpr_callb(void *arg, int code); 71 static uint_t px_cb_intr(caddr_t arg); 72 73 /* 74 * px_lib_map_registers 75 * 76 * This function is called from the attach routine to map the registers 77 * accessed by this driver. 78 * 79 * used by: px_attach() 80 * 81 * return value: DDI_FAILURE on failure 82 */ 83 int 84 px_lib_map_regs(pxu_t *pxu_p, dev_info_t *dip) 85 { 86 ddi_device_acc_attr_t attr; 87 px_reg_bank_t reg_bank = PX_REG_CSR; 88 89 DBG(DBG_ATTACH, dip, "px_lib_map_regs: pxu_p:0x%p, dip 0x%p\n", 90 pxu_p, dip); 91 92 attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 93 attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 94 attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC; 95 96 /* 97 * PCI CSR Base 98 */ 99 if (ddi_regs_map_setup(dip, reg_bank, &pxu_p->px_address[reg_bank], 100 0, 0, &attr, &pxu_p->px_ac[reg_bank]) != DDI_SUCCESS) { 101 goto fail; 102 } 103 104 reg_bank++; 105 106 /* 107 * XBUS CSR Base 108 */ 109 if (ddi_regs_map_setup(dip, reg_bank, &pxu_p->px_address[reg_bank], 110 0, 0, &attr, &pxu_p->px_ac[reg_bank]) != DDI_SUCCESS) { 111 goto fail; 112 } 113 114 pxu_p->px_address[reg_bank] -= FIRE_CONTROL_STATUS; 115 116 done: 117 for (; reg_bank >= PX_REG_CSR; reg_bank--) { 118 DBG(DBG_ATTACH, dip, "reg_bank 0x%x address 0x%p\n", 119 reg_bank, pxu_p->px_address[reg_bank]); 120 } 121 122 return (DDI_SUCCESS); 123 124 fail: 125 cmn_err(CE_WARN, "%s%d: unable to map reg entry %d\n", 126 ddi_driver_name(dip), ddi_get_instance(dip), reg_bank); 127 128 for (reg_bank--; reg_bank >= PX_REG_CSR; reg_bank--) { 129 pxu_p->px_address[reg_bank] = NULL; 130 ddi_regs_map_free(&pxu_p->px_ac[reg_bank]); 131 } 132 133 return (DDI_FAILURE); 134 } 135 136 /* 137 * px_lib_unmap_regs: 138 * 139 * This routine unmaps the registers mapped by map_px_registers. 140 * 141 * used by: px_detach(), and error conditions in px_attach() 142 * 143 * return value: none 144 */ 145 void 146 px_lib_unmap_regs(pxu_t *pxu_p) 147 { 148 int i; 149 150 for (i = 0; i < PX_REG_MAX; i++) { 151 if (pxu_p->px_ac[i]) 152 ddi_regs_map_free(&pxu_p->px_ac[i]); 153 } 154 } 155 156 int 157 px_lib_dev_init(dev_info_t *dip, devhandle_t *dev_hdl) 158 { 159 160 caddr_t xbc_csr_base, csr_base; 161 px_dvma_range_prop_t px_dvma_range; 162 pxu_t *pxu_p; 163 uint8_t chip_mask; 164 px_t *px_p = DIP_TO_STATE(dip); 165 px_chip_type_t chip_type = px_identity_init(px_p); 166 167 DBG(DBG_ATTACH, dip, "px_lib_dev_init: dip 0x%p", dip); 168 169 if (chip_type == PX_CHIP_UNIDENTIFIED) { 170 cmn_err(CE_WARN, "%s%d: Unrecognized Hardware Version\n", 171 NAMEINST(dip)); 172 return (DDI_FAILURE); 173 } 174 175 chip_mask = BITMASK(chip_type); 176 px_paddr_mask = (chip_type == PX_CHIP_FIRE) ? MMU_FIRE_PADDR_MASK : 177 MMU_OBERON_PADDR_MASK; 178 179 /* 180 * Allocate platform specific structure and link it to 181 * the px state structure. 182 */ 183 pxu_p = kmem_zalloc(sizeof (pxu_t), KM_SLEEP); 184 pxu_p->chip_type = chip_type; 185 pxu_p->portid = ddi_getprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 186 "portid", -1); 187 188 /* Map in the registers */ 189 if (px_lib_map_regs(pxu_p, dip) == DDI_FAILURE) { 190 kmem_free(pxu_p, sizeof (pxu_t)); 191 192 return (DDI_FAILURE); 193 } 194 195 xbc_csr_base = (caddr_t)pxu_p->px_address[PX_REG_XBC]; 196 csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR]; 197 198 pxu_p->tsb_cookie = iommu_tsb_alloc(pxu_p->portid); 199 pxu_p->tsb_size = iommu_tsb_cookie_to_size(pxu_p->tsb_cookie); 200 pxu_p->tsb_vaddr = iommu_tsb_cookie_to_va(pxu_p->tsb_cookie); 201 202 pxu_p->tsb_paddr = va_to_pa(pxu_p->tsb_vaddr); 203 204 /* 205 * Create "virtual-dma" property to support child devices 206 * needing to know DVMA range. 207 */ 208 px_dvma_range.dvma_base = (uint32_t)px_mmu_dvma_end + 1 209 - ((pxu_p->tsb_size >> 3) << MMU_PAGE_SHIFT); 210 px_dvma_range.dvma_len = (uint32_t) 211 px_mmu_dvma_end - px_dvma_range.dvma_base + 1; 212 213 (void) ddi_prop_update_int_array(DDI_DEV_T_NONE, dip, 214 "virtual-dma", (int *)&px_dvma_range, 215 sizeof (px_dvma_range_prop_t) / sizeof (int)); 216 /* 217 * Initilize all fire hardware specific blocks. 218 */ 219 hvio_cb_init(xbc_csr_base, pxu_p); 220 hvio_ib_init(csr_base, pxu_p); 221 hvio_pec_init(csr_base, pxu_p); 222 hvio_mmu_init(csr_base, pxu_p); 223 224 px_p->px_plat_p = (void *)pxu_p; 225 226 /* 227 * Initialize all the interrupt handlers 228 */ 229 switch (PX_CHIP_TYPE(pxu_p)) { 230 case PX_CHIP_OBERON: 231 /* 232 * Oberon hotplug uses SPARE3 field in ILU Error Log Enable 233 * register to indicate the status of leaf reset, 234 * we need to preserve the value of this bit, and keep it in 235 * px_ilu_log_mask to reflect the state of the bit 236 */ 237 if (CSR_BR(csr_base, ILU_ERROR_LOG_ENABLE, SPARE3)) 238 px_ilu_log_mask |= (1ull << 239 ILU_ERROR_LOG_ENABLE_SPARE3); 240 else 241 px_ilu_log_mask &= ~(1ull << 242 ILU_ERROR_LOG_ENABLE_SPARE3); 243 244 px_err_reg_setup_pcie(chip_mask, csr_base, PX_ERR_ENABLE); 245 break; 246 247 case PX_CHIP_FIRE: 248 px_err_reg_setup_pcie(chip_mask, csr_base, PX_ERR_ENABLE); 249 break; 250 251 default: 252 cmn_err(CE_WARN, "%s%d: PX primary bus Unknown\n", 253 ddi_driver_name(dip), ddi_get_instance(dip)); 254 return (DDI_FAILURE); 255 } 256 257 /* Initilize device handle */ 258 *dev_hdl = (devhandle_t)csr_base; 259 260 DBG(DBG_ATTACH, dip, "px_lib_dev_init: dev_hdl 0x%llx\n", *dev_hdl); 261 262 return (DDI_SUCCESS); 263 } 264 265 int 266 px_lib_dev_fini(dev_info_t *dip) 267 { 268 caddr_t csr_base; 269 uint8_t chip_mask; 270 px_t *px_p = DIP_TO_STATE(dip); 271 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 272 273 DBG(DBG_DETACH, dip, "px_lib_dev_fini: dip 0x%p\n", dip); 274 275 /* 276 * Deinitialize all the interrupt handlers 277 */ 278 switch (PX_CHIP_TYPE(pxu_p)) { 279 case PX_CHIP_OBERON: 280 case PX_CHIP_FIRE: 281 chip_mask = BITMASK(PX_CHIP_TYPE(pxu_p)); 282 csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR]; 283 px_err_reg_setup_pcie(chip_mask, csr_base, PX_ERR_DISABLE); 284 break; 285 286 default: 287 cmn_err(CE_WARN, "%s%d: PX primary bus Unknown\n", 288 ddi_driver_name(dip), ddi_get_instance(dip)); 289 return (DDI_FAILURE); 290 } 291 292 iommu_tsb_free(pxu_p->tsb_cookie); 293 294 px_lib_unmap_regs((pxu_t *)px_p->px_plat_p); 295 kmem_free(px_p->px_plat_p, sizeof (pxu_t)); 296 px_p->px_plat_p = NULL; 297 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "virtual-dma"); 298 299 return (DDI_SUCCESS); 300 } 301 302 /*ARGSUSED*/ 303 int 304 px_lib_intr_devino_to_sysino(dev_info_t *dip, devino_t devino, 305 sysino_t *sysino) 306 { 307 px_t *px_p = DIP_TO_STATE(dip); 308 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 309 uint64_t ret; 310 311 DBG(DBG_LIB_INT, dip, "px_lib_intr_devino_to_sysino: dip 0x%p " 312 "devino 0x%x\n", dip, devino); 313 314 if ((ret = hvio_intr_devino_to_sysino(DIP_TO_HANDLE(dip), 315 pxu_p, devino, sysino)) != H_EOK) { 316 DBG(DBG_LIB_INT, dip, 317 "hvio_intr_devino_to_sysino failed, ret 0x%lx\n", ret); 318 return (DDI_FAILURE); 319 } 320 321 DBG(DBG_LIB_INT, dip, "px_lib_intr_devino_to_sysino: sysino 0x%llx\n", 322 *sysino); 323 324 return (DDI_SUCCESS); 325 } 326 327 /*ARGSUSED*/ 328 int 329 px_lib_intr_getvalid(dev_info_t *dip, sysino_t sysino, 330 intr_valid_state_t *intr_valid_state) 331 { 332 uint64_t ret; 333 334 DBG(DBG_LIB_INT, dip, "px_lib_intr_getvalid: dip 0x%p sysino 0x%llx\n", 335 dip, sysino); 336 337 if ((ret = hvio_intr_getvalid(DIP_TO_HANDLE(dip), 338 sysino, intr_valid_state)) != H_EOK) { 339 DBG(DBG_LIB_INT, dip, "hvio_intr_getvalid failed, ret 0x%lx\n", 340 ret); 341 return (DDI_FAILURE); 342 } 343 344 DBG(DBG_LIB_INT, dip, "px_lib_intr_getvalid: intr_valid_state 0x%x\n", 345 *intr_valid_state); 346 347 return (DDI_SUCCESS); 348 } 349 350 /*ARGSUSED*/ 351 int 352 px_lib_intr_setvalid(dev_info_t *dip, sysino_t sysino, 353 intr_valid_state_t intr_valid_state) 354 { 355 uint64_t ret; 356 357 DBG(DBG_LIB_INT, dip, "px_lib_intr_setvalid: dip 0x%p sysino 0x%llx " 358 "intr_valid_state 0x%x\n", dip, sysino, intr_valid_state); 359 360 if ((ret = hvio_intr_setvalid(DIP_TO_HANDLE(dip), 361 sysino, intr_valid_state)) != H_EOK) { 362 DBG(DBG_LIB_INT, dip, "hvio_intr_setvalid failed, ret 0x%lx\n", 363 ret); 364 return (DDI_FAILURE); 365 } 366 367 return (DDI_SUCCESS); 368 } 369 370 /*ARGSUSED*/ 371 int 372 px_lib_intr_getstate(dev_info_t *dip, sysino_t sysino, 373 intr_state_t *intr_state) 374 { 375 uint64_t ret; 376 377 DBG(DBG_LIB_INT, dip, "px_lib_intr_getstate: dip 0x%p sysino 0x%llx\n", 378 dip, sysino); 379 380 if ((ret = hvio_intr_getstate(DIP_TO_HANDLE(dip), 381 sysino, intr_state)) != H_EOK) { 382 DBG(DBG_LIB_INT, dip, "hvio_intr_getstate failed, ret 0x%lx\n", 383 ret); 384 return (DDI_FAILURE); 385 } 386 387 DBG(DBG_LIB_INT, dip, "px_lib_intr_getstate: intr_state 0x%x\n", 388 *intr_state); 389 390 return (DDI_SUCCESS); 391 } 392 393 /*ARGSUSED*/ 394 int 395 px_lib_intr_setstate(dev_info_t *dip, sysino_t sysino, 396 intr_state_t intr_state) 397 { 398 uint64_t ret; 399 400 DBG(DBG_LIB_INT, dip, "px_lib_intr_setstate: dip 0x%p sysino 0x%llx " 401 "intr_state 0x%x\n", dip, sysino, intr_state); 402 403 if ((ret = hvio_intr_setstate(DIP_TO_HANDLE(dip), 404 sysino, intr_state)) != H_EOK) { 405 DBG(DBG_LIB_INT, dip, "hvio_intr_setstate failed, ret 0x%lx\n", 406 ret); 407 return (DDI_FAILURE); 408 } 409 410 return (DDI_SUCCESS); 411 } 412 413 /*ARGSUSED*/ 414 int 415 px_lib_intr_gettarget(dev_info_t *dip, sysino_t sysino, cpuid_t *cpuid) 416 { 417 px_t *px_p = DIP_TO_STATE(dip); 418 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 419 uint64_t ret; 420 421 DBG(DBG_LIB_INT, dip, "px_lib_intr_gettarget: dip 0x%p sysino 0x%llx\n", 422 dip, sysino); 423 424 if ((ret = hvio_intr_gettarget(DIP_TO_HANDLE(dip), pxu_p, 425 sysino, cpuid)) != H_EOK) { 426 DBG(DBG_LIB_INT, dip, "hvio_intr_gettarget failed, ret 0x%lx\n", 427 ret); 428 return (DDI_FAILURE); 429 } 430 431 DBG(DBG_LIB_INT, dip, "px_lib_intr_gettarget: cpuid 0x%x\n", cpuid); 432 433 return (DDI_SUCCESS); 434 } 435 436 /*ARGSUSED*/ 437 int 438 px_lib_intr_settarget(dev_info_t *dip, sysino_t sysino, cpuid_t cpuid) 439 { 440 px_t *px_p = DIP_TO_STATE(dip); 441 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 442 uint64_t ret; 443 444 DBG(DBG_LIB_INT, dip, "px_lib_intr_settarget: dip 0x%p sysino 0x%llx " 445 "cpuid 0x%x\n", dip, sysino, cpuid); 446 447 if ((ret = hvio_intr_settarget(DIP_TO_HANDLE(dip), pxu_p, 448 sysino, cpuid)) != H_EOK) { 449 DBG(DBG_LIB_INT, dip, "hvio_intr_settarget failed, ret 0x%lx\n", 450 ret); 451 return (DDI_FAILURE); 452 } 453 454 return (DDI_SUCCESS); 455 } 456 457 /*ARGSUSED*/ 458 int 459 px_lib_intr_reset(dev_info_t *dip) 460 { 461 devino_t ino; 462 sysino_t sysino; 463 464 DBG(DBG_LIB_INT, dip, "px_lib_intr_reset: dip 0x%p\n", dip); 465 466 /* Reset all Interrupts */ 467 for (ino = 0; ino < INTERRUPT_MAPPING_ENTRIES; ino++) { 468 if (px_lib_intr_devino_to_sysino(dip, ino, 469 &sysino) != DDI_SUCCESS) 470 return (BF_FATAL); 471 472 if (px_lib_intr_setstate(dip, sysino, 473 INTR_IDLE_STATE) != DDI_SUCCESS) 474 return (BF_FATAL); 475 } 476 477 return (BF_NONE); 478 } 479 480 /*ARGSUSED*/ 481 int 482 px_lib_iommu_map(dev_info_t *dip, tsbid_t tsbid, pages_t pages, 483 io_attributes_t attr, void *addr, size_t pfn_index, int flags) 484 { 485 px_t *px_p = DIP_TO_STATE(dip); 486 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 487 uint64_t ret; 488 489 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_map: dip 0x%p tsbid 0x%llx " 490 "pages 0x%x attr 0x%x addr 0x%p pfn_index 0x%llx flags 0x%x\n", 491 dip, tsbid, pages, attr, addr, pfn_index, flags); 492 493 if ((ret = hvio_iommu_map(px_p->px_dev_hdl, pxu_p, tsbid, pages, 494 attr, addr, pfn_index, flags)) != H_EOK) { 495 DBG(DBG_LIB_DMA, dip, 496 "px_lib_iommu_map failed, ret 0x%lx\n", ret); 497 return (DDI_FAILURE); 498 } 499 500 return (DDI_SUCCESS); 501 } 502 503 /*ARGSUSED*/ 504 int 505 px_lib_iommu_demap(dev_info_t *dip, tsbid_t tsbid, pages_t pages) 506 { 507 px_t *px_p = DIP_TO_STATE(dip); 508 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 509 uint64_t ret; 510 511 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_demap: dip 0x%p tsbid 0x%llx " 512 "pages 0x%x\n", dip, tsbid, pages); 513 514 if ((ret = hvio_iommu_demap(px_p->px_dev_hdl, pxu_p, tsbid, pages)) 515 != H_EOK) { 516 DBG(DBG_LIB_DMA, dip, 517 "px_lib_iommu_demap failed, ret 0x%lx\n", ret); 518 519 return (DDI_FAILURE); 520 } 521 522 return (DDI_SUCCESS); 523 } 524 525 /*ARGSUSED*/ 526 int 527 px_lib_iommu_getmap(dev_info_t *dip, tsbid_t tsbid, io_attributes_t *attr_p, 528 r_addr_t *r_addr_p) 529 { 530 px_t *px_p = DIP_TO_STATE(dip); 531 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 532 uint64_t ret; 533 534 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getmap: dip 0x%p tsbid 0x%llx\n", 535 dip, tsbid); 536 537 if ((ret = hvio_iommu_getmap(DIP_TO_HANDLE(dip), pxu_p, tsbid, 538 attr_p, r_addr_p)) != H_EOK) { 539 DBG(DBG_LIB_DMA, dip, 540 "hvio_iommu_getmap failed, ret 0x%lx\n", ret); 541 542 return ((ret == H_ENOMAP) ? DDI_DMA_NOMAPPING:DDI_FAILURE); 543 } 544 545 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getmap: attr 0x%x r_addr 0x%llx\n", 546 *attr_p, *r_addr_p); 547 548 return (DDI_SUCCESS); 549 } 550 551 552 /* 553 * Checks dma attributes against system bypass ranges 554 * The bypass range is determined by the hardware. Return them so the 555 * common code can do generic checking against them. 556 */ 557 /*ARGSUSED*/ 558 int 559 px_lib_dma_bypass_rngchk(dev_info_t *dip, ddi_dma_attr_t *attr_p, 560 uint64_t *lo_p, uint64_t *hi_p) 561 { 562 px_t *px_p = DIP_TO_STATE(dip); 563 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 564 565 *lo_p = hvio_get_bypass_base(pxu_p); 566 *hi_p = hvio_get_bypass_end(pxu_p); 567 568 return (DDI_SUCCESS); 569 } 570 571 572 /*ARGSUSED*/ 573 int 574 px_lib_iommu_getbypass(dev_info_t *dip, r_addr_t ra, io_attributes_t attr, 575 io_addr_t *io_addr_p) 576 { 577 uint64_t ret; 578 px_t *px_p = DIP_TO_STATE(dip); 579 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 580 581 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getbypass: dip 0x%p ra 0x%llx " 582 "attr 0x%x\n", dip, ra, attr); 583 584 if ((ret = hvio_iommu_getbypass(DIP_TO_HANDLE(dip), pxu_p, ra, 585 attr, io_addr_p)) != H_EOK) { 586 DBG(DBG_LIB_DMA, dip, 587 "hvio_iommu_getbypass failed, ret 0x%lx\n", ret); 588 return (DDI_FAILURE); 589 } 590 591 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getbypass: io_addr 0x%llx\n", 592 *io_addr_p); 593 594 return (DDI_SUCCESS); 595 } 596 597 /* 598 * bus dma sync entry point. 599 */ 600 /*ARGSUSED*/ 601 int 602 px_lib_dma_sync(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle, 603 off_t off, size_t len, uint_t cache_flags) 604 { 605 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 606 px_t *px_p = DIP_TO_STATE(dip); 607 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 608 609 DBG(DBG_LIB_DMA, dip, "px_lib_dma_sync: dip 0x%p rdip 0x%p " 610 "handle 0x%llx off 0x%x len 0x%x flags 0x%x\n", 611 dip, rdip, handle, off, len, cache_flags); 612 613 /* 614 * No flush needed for Oberon 615 */ 616 if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) 617 return (DDI_SUCCESS); 618 619 /* 620 * jbus_stst_order is found only in certain cpu modules. 621 * Just return success if not present. 622 */ 623 if (&jbus_stst_order == NULL) 624 return (DDI_SUCCESS); 625 626 if (!(mp->dmai_flags & PX_DMAI_FLAGS_INUSE)) { 627 cmn_err(CE_WARN, "%s%d: Unbound dma handle %p.", 628 ddi_driver_name(rdip), ddi_get_instance(rdip), (void *)mp); 629 630 return (DDI_FAILURE); 631 } 632 633 if (mp->dmai_flags & PX_DMAI_FLAGS_NOSYNC) 634 return (DDI_SUCCESS); 635 636 /* 637 * No flush needed when sending data from memory to device. 638 * Nothing to do to "sync" memory to what device would already see. 639 */ 640 if (!(mp->dmai_rflags & DDI_DMA_READ) || 641 ((cache_flags & PX_DMA_SYNC_DDI_FLAGS) == DDI_DMA_SYNC_FORDEV)) 642 return (DDI_SUCCESS); 643 644 /* 645 * Perform necessary cpu workaround to ensure jbus ordering. 646 * CPU's internal "invalidate FIFOs" are flushed. 647 */ 648 649 #if !defined(lint) 650 kpreempt_disable(); 651 #endif 652 jbus_stst_order(); 653 #if !defined(lint) 654 kpreempt_enable(); 655 #endif 656 return (DDI_SUCCESS); 657 } 658 659 /* 660 * MSIQ Functions: 661 */ 662 /*ARGSUSED*/ 663 int 664 px_lib_msiq_init(dev_info_t *dip) 665 { 666 px_t *px_p = DIP_TO_STATE(dip); 667 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 668 px_msiq_state_t *msiq_state_p = &px_p->px_ib_p->ib_msiq_state; 669 px_dvma_addr_t pg_index; 670 size_t size; 671 int ret; 672 673 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_init: dip 0x%p\n", dip); 674 675 /* 676 * Map the EQ memory into the Fire MMU (has to be 512KB aligned) 677 * and then initialize the base address register. 678 * 679 * Allocate entries from Fire IOMMU so that the resulting address 680 * is properly aligned. Calculate the index of the first allocated 681 * entry. Note: The size of the mapping is assumed to be a multiple 682 * of the page size. 683 */ 684 size = msiq_state_p->msiq_cnt * 685 msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t); 686 687 pxu_p->msiq_mapped_p = vmem_xalloc(px_p->px_mmu_p->mmu_dvma_map, 688 size, (512 * 1024), 0, 0, NULL, NULL, VM_NOSLEEP | VM_BESTFIT); 689 690 if (pxu_p->msiq_mapped_p == NULL) 691 return (DDI_FAILURE); 692 693 pg_index = MMU_PAGE_INDEX(px_p->px_mmu_p, 694 MMU_BTOP((ulong_t)pxu_p->msiq_mapped_p)); 695 696 if ((ret = px_lib_iommu_map(px_p->px_dip, PCI_TSBID(0, pg_index), 697 MMU_BTOP(size), PCI_MAP_ATTR_WRITE, msiq_state_p->msiq_buf_p, 698 0, MMU_MAP_BUF)) != DDI_SUCCESS) { 699 DBG(DBG_LIB_MSIQ, dip, 700 "px_lib_msiq_init: px_lib_iommu_map failed, " 701 "ret 0x%lx\n", ret); 702 703 (void) px_lib_msiq_fini(dip); 704 return (DDI_FAILURE); 705 } 706 707 if ((ret = hvio_msiq_init(DIP_TO_HANDLE(dip), 708 pxu_p)) != H_EOK) { 709 DBG(DBG_LIB_MSIQ, dip, 710 "hvio_msiq_init failed, ret 0x%lx\n", ret); 711 712 (void) px_lib_msiq_fini(dip); 713 return (DDI_FAILURE); 714 } 715 716 return (DDI_SUCCESS); 717 } 718 719 /*ARGSUSED*/ 720 int 721 px_lib_msiq_fini(dev_info_t *dip) 722 { 723 px_t *px_p = DIP_TO_STATE(dip); 724 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 725 px_msiq_state_t *msiq_state_p = &px_p->px_ib_p->ib_msiq_state; 726 px_dvma_addr_t pg_index; 727 size_t size; 728 729 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_fini: dip 0x%p\n", dip); 730 731 /* 732 * Unmap and free the EQ memory that had been mapped 733 * into the Fire IOMMU. 734 */ 735 size = msiq_state_p->msiq_cnt * 736 msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t); 737 738 pg_index = MMU_PAGE_INDEX(px_p->px_mmu_p, 739 MMU_BTOP((ulong_t)pxu_p->msiq_mapped_p)); 740 741 (void) px_lib_iommu_demap(px_p->px_dip, 742 PCI_TSBID(0, pg_index), MMU_BTOP(size)); 743 744 /* Free the entries from the Fire MMU */ 745 vmem_xfree(px_p->px_mmu_p->mmu_dvma_map, 746 (void *)pxu_p->msiq_mapped_p, size); 747 748 return (DDI_SUCCESS); 749 } 750 751 /*ARGSUSED*/ 752 int 753 px_lib_msiq_info(dev_info_t *dip, msiqid_t msiq_id, r_addr_t *ra_p, 754 uint_t *msiq_rec_cnt_p) 755 { 756 px_t *px_p = DIP_TO_STATE(dip); 757 px_msiq_state_t *msiq_state_p = &px_p->px_ib_p->ib_msiq_state; 758 size_t msiq_size; 759 760 DBG(DBG_LIB_MSIQ, dip, "px_msiq_info: dip 0x%p msiq_id 0x%x\n", 761 dip, msiq_id); 762 763 msiq_size = msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t); 764 ra_p = (r_addr_t *)((caddr_t)msiq_state_p->msiq_buf_p + 765 (msiq_id * msiq_size)); 766 767 *msiq_rec_cnt_p = msiq_state_p->msiq_rec_cnt; 768 769 DBG(DBG_LIB_MSIQ, dip, "px_msiq_info: ra_p 0x%p msiq_rec_cnt 0x%x\n", 770 ra_p, *msiq_rec_cnt_p); 771 772 return (DDI_SUCCESS); 773 } 774 775 /*ARGSUSED*/ 776 int 777 px_lib_msiq_getvalid(dev_info_t *dip, msiqid_t msiq_id, 778 pci_msiq_valid_state_t *msiq_valid_state) 779 { 780 uint64_t ret; 781 782 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getvalid: dip 0x%p msiq_id 0x%x\n", 783 dip, msiq_id); 784 785 if ((ret = hvio_msiq_getvalid(DIP_TO_HANDLE(dip), 786 msiq_id, msiq_valid_state)) != H_EOK) { 787 DBG(DBG_LIB_MSIQ, dip, 788 "hvio_msiq_getvalid failed, ret 0x%lx\n", ret); 789 return (DDI_FAILURE); 790 } 791 792 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getvalid: msiq_valid_state 0x%x\n", 793 *msiq_valid_state); 794 795 return (DDI_SUCCESS); 796 } 797 798 /*ARGSUSED*/ 799 int 800 px_lib_msiq_setvalid(dev_info_t *dip, msiqid_t msiq_id, 801 pci_msiq_valid_state_t msiq_valid_state) 802 { 803 uint64_t ret; 804 805 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_setvalid: dip 0x%p msiq_id 0x%x " 806 "msiq_valid_state 0x%x\n", dip, msiq_id, msiq_valid_state); 807 808 if ((ret = hvio_msiq_setvalid(DIP_TO_HANDLE(dip), 809 msiq_id, msiq_valid_state)) != H_EOK) { 810 DBG(DBG_LIB_MSIQ, dip, 811 "hvio_msiq_setvalid failed, ret 0x%lx\n", ret); 812 return (DDI_FAILURE); 813 } 814 815 return (DDI_SUCCESS); 816 } 817 818 /*ARGSUSED*/ 819 int 820 px_lib_msiq_getstate(dev_info_t *dip, msiqid_t msiq_id, 821 pci_msiq_state_t *msiq_state) 822 { 823 uint64_t ret; 824 825 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getstate: dip 0x%p msiq_id 0x%x\n", 826 dip, msiq_id); 827 828 if ((ret = hvio_msiq_getstate(DIP_TO_HANDLE(dip), 829 msiq_id, msiq_state)) != H_EOK) { 830 DBG(DBG_LIB_MSIQ, dip, 831 "hvio_msiq_getstate failed, ret 0x%lx\n", ret); 832 return (DDI_FAILURE); 833 } 834 835 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getstate: msiq_state 0x%x\n", 836 *msiq_state); 837 838 return (DDI_SUCCESS); 839 } 840 841 /*ARGSUSED*/ 842 int 843 px_lib_msiq_setstate(dev_info_t *dip, msiqid_t msiq_id, 844 pci_msiq_state_t msiq_state) 845 { 846 uint64_t ret; 847 848 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_setstate: dip 0x%p msiq_id 0x%x " 849 "msiq_state 0x%x\n", dip, msiq_id, msiq_state); 850 851 if ((ret = hvio_msiq_setstate(DIP_TO_HANDLE(dip), 852 msiq_id, msiq_state)) != H_EOK) { 853 DBG(DBG_LIB_MSIQ, dip, 854 "hvio_msiq_setstate failed, ret 0x%lx\n", ret); 855 return (DDI_FAILURE); 856 } 857 858 return (DDI_SUCCESS); 859 } 860 861 /*ARGSUSED*/ 862 int 863 px_lib_msiq_gethead(dev_info_t *dip, msiqid_t msiq_id, 864 msiqhead_t *msiq_head) 865 { 866 uint64_t ret; 867 868 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gethead: dip 0x%p msiq_id 0x%x\n", 869 dip, msiq_id); 870 871 if ((ret = hvio_msiq_gethead(DIP_TO_HANDLE(dip), 872 msiq_id, msiq_head)) != H_EOK) { 873 DBG(DBG_LIB_MSIQ, dip, 874 "hvio_msiq_gethead failed, ret 0x%lx\n", ret); 875 return (DDI_FAILURE); 876 } 877 878 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gethead: msiq_head 0x%x\n", 879 *msiq_head); 880 881 return (DDI_SUCCESS); 882 } 883 884 /*ARGSUSED*/ 885 int 886 px_lib_msiq_sethead(dev_info_t *dip, msiqid_t msiq_id, 887 msiqhead_t msiq_head) 888 { 889 uint64_t ret; 890 891 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_sethead: dip 0x%p msiq_id 0x%x " 892 "msiq_head 0x%x\n", dip, msiq_id, msiq_head); 893 894 if ((ret = hvio_msiq_sethead(DIP_TO_HANDLE(dip), 895 msiq_id, msiq_head)) != H_EOK) { 896 DBG(DBG_LIB_MSIQ, dip, 897 "hvio_msiq_sethead failed, ret 0x%lx\n", ret); 898 return (DDI_FAILURE); 899 } 900 901 return (DDI_SUCCESS); 902 } 903 904 /*ARGSUSED*/ 905 int 906 px_lib_msiq_gettail(dev_info_t *dip, msiqid_t msiq_id, 907 msiqtail_t *msiq_tail) 908 { 909 uint64_t ret; 910 911 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gettail: dip 0x%p msiq_id 0x%x\n", 912 dip, msiq_id); 913 914 if ((ret = hvio_msiq_gettail(DIP_TO_HANDLE(dip), 915 msiq_id, msiq_tail)) != H_EOK) { 916 DBG(DBG_LIB_MSIQ, dip, 917 "hvio_msiq_gettail failed, ret 0x%lx\n", ret); 918 return (DDI_FAILURE); 919 } 920 921 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gettail: msiq_tail 0x%x\n", 922 *msiq_tail); 923 924 return (DDI_SUCCESS); 925 } 926 927 /*ARGSUSED*/ 928 void 929 px_lib_get_msiq_rec(dev_info_t *dip, msiqhead_t *msiq_head_p, 930 msiq_rec_t *msiq_rec_p) 931 { 932 eq_rec_t *eq_rec_p = (eq_rec_t *)msiq_head_p; 933 934 DBG(DBG_LIB_MSIQ, dip, "px_lib_get_msiq_rec: dip 0x%p eq_rec_p 0x%p\n", 935 dip, eq_rec_p); 936 937 if (!eq_rec_p->eq_rec_fmt_type) { 938 /* Set msiq_rec_type to zero */ 939 msiq_rec_p->msiq_rec_type = 0; 940 941 return; 942 } 943 944 DBG(DBG_LIB_MSIQ, dip, "px_lib_get_msiq_rec: EQ RECORD, " 945 "eq_rec_rid 0x%llx eq_rec_fmt_type 0x%llx " 946 "eq_rec_len 0x%llx eq_rec_addr0 0x%llx " 947 "eq_rec_addr1 0x%llx eq_rec_data0 0x%llx " 948 "eq_rec_data1 0x%llx\n", eq_rec_p->eq_rec_rid, 949 eq_rec_p->eq_rec_fmt_type, eq_rec_p->eq_rec_len, 950 eq_rec_p->eq_rec_addr0, eq_rec_p->eq_rec_addr1, 951 eq_rec_p->eq_rec_data0, eq_rec_p->eq_rec_data1); 952 953 /* 954 * Only upper 4 bits of eq_rec_fmt_type is used 955 * to identify the EQ record type. 956 */ 957 switch (eq_rec_p->eq_rec_fmt_type >> 3) { 958 case EQ_REC_MSI32: 959 msiq_rec_p->msiq_rec_type = MSI32_REC; 960 961 msiq_rec_p->msiq_rec_data.msi.msi_data = 962 eq_rec_p->eq_rec_data0; 963 break; 964 case EQ_REC_MSI64: 965 msiq_rec_p->msiq_rec_type = MSI64_REC; 966 967 msiq_rec_p->msiq_rec_data.msi.msi_data = 968 eq_rec_p->eq_rec_data0; 969 break; 970 case EQ_REC_MSG: 971 msiq_rec_p->msiq_rec_type = MSG_REC; 972 973 msiq_rec_p->msiq_rec_data.msg.msg_route = 974 eq_rec_p->eq_rec_fmt_type & 7; 975 msiq_rec_p->msiq_rec_data.msg.msg_targ = eq_rec_p->eq_rec_rid; 976 msiq_rec_p->msiq_rec_data.msg.msg_code = eq_rec_p->eq_rec_data0; 977 break; 978 default: 979 cmn_err(CE_WARN, "%s%d: px_lib_get_msiq_rec: " 980 "0x%x is an unknown EQ record type", 981 ddi_driver_name(dip), ddi_get_instance(dip), 982 (int)eq_rec_p->eq_rec_fmt_type); 983 break; 984 } 985 986 msiq_rec_p->msiq_rec_rid = eq_rec_p->eq_rec_rid; 987 msiq_rec_p->msiq_rec_msi_addr = ((eq_rec_p->eq_rec_addr1 << 16) | 988 (eq_rec_p->eq_rec_addr0 << 2)); 989 } 990 991 /*ARGSUSED*/ 992 void 993 px_lib_clr_msiq_rec(dev_info_t *dip, msiqhead_t *msiq_head_p) 994 { 995 eq_rec_t *eq_rec_p = (eq_rec_t *)msiq_head_p; 996 997 DBG(DBG_LIB_MSIQ, dip, "px_lib_clr_msiq_rec: dip 0x%p eq_rec_p 0x%p\n", 998 dip, eq_rec_p); 999 1000 if (eq_rec_p->eq_rec_fmt_type) { 1001 /* Zero out eq_rec_fmt_type field */ 1002 eq_rec_p->eq_rec_fmt_type = 0; 1003 } 1004 } 1005 1006 /* 1007 * MSI Functions: 1008 */ 1009 /*ARGSUSED*/ 1010 int 1011 px_lib_msi_init(dev_info_t *dip) 1012 { 1013 px_t *px_p = DIP_TO_STATE(dip); 1014 px_msi_state_t *msi_state_p = &px_p->px_ib_p->ib_msi_state; 1015 uint64_t ret; 1016 1017 DBG(DBG_LIB_MSI, dip, "px_lib_msi_init: dip 0x%p\n", dip); 1018 1019 if ((ret = hvio_msi_init(DIP_TO_HANDLE(dip), 1020 msi_state_p->msi_addr32, msi_state_p->msi_addr64)) != H_EOK) { 1021 DBG(DBG_LIB_MSIQ, dip, "px_lib_msi_init failed, ret 0x%lx\n", 1022 ret); 1023 return (DDI_FAILURE); 1024 } 1025 1026 return (DDI_SUCCESS); 1027 } 1028 1029 /*ARGSUSED*/ 1030 int 1031 px_lib_msi_getmsiq(dev_info_t *dip, msinum_t msi_num, 1032 msiqid_t *msiq_id) 1033 { 1034 uint64_t ret; 1035 1036 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getmsiq: dip 0x%p msi_num 0x%x\n", 1037 dip, msi_num); 1038 1039 if ((ret = hvio_msi_getmsiq(DIP_TO_HANDLE(dip), 1040 msi_num, msiq_id)) != H_EOK) { 1041 DBG(DBG_LIB_MSI, dip, 1042 "hvio_msi_getmsiq failed, ret 0x%lx\n", ret); 1043 return (DDI_FAILURE); 1044 } 1045 1046 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getmsiq: msiq_id 0x%x\n", 1047 *msiq_id); 1048 1049 return (DDI_SUCCESS); 1050 } 1051 1052 /*ARGSUSED*/ 1053 int 1054 px_lib_msi_setmsiq(dev_info_t *dip, msinum_t msi_num, 1055 msiqid_t msiq_id, msi_type_t msitype) 1056 { 1057 uint64_t ret; 1058 1059 DBG(DBG_LIB_MSI, dip, "px_lib_msi_setmsiq: dip 0x%p msi_num 0x%x " 1060 "msq_id 0x%x\n", dip, msi_num, msiq_id); 1061 1062 if ((ret = hvio_msi_setmsiq(DIP_TO_HANDLE(dip), 1063 msi_num, msiq_id)) != H_EOK) { 1064 DBG(DBG_LIB_MSI, dip, 1065 "hvio_msi_setmsiq failed, ret 0x%lx\n", ret); 1066 return (DDI_FAILURE); 1067 } 1068 1069 return (DDI_SUCCESS); 1070 } 1071 1072 /*ARGSUSED*/ 1073 int 1074 px_lib_msi_getvalid(dev_info_t *dip, msinum_t msi_num, 1075 pci_msi_valid_state_t *msi_valid_state) 1076 { 1077 uint64_t ret; 1078 1079 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getvalid: dip 0x%p msi_num 0x%x\n", 1080 dip, msi_num); 1081 1082 if ((ret = hvio_msi_getvalid(DIP_TO_HANDLE(dip), 1083 msi_num, msi_valid_state)) != H_EOK) { 1084 DBG(DBG_LIB_MSI, dip, 1085 "hvio_msi_getvalid failed, ret 0x%lx\n", ret); 1086 return (DDI_FAILURE); 1087 } 1088 1089 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getvalid: msiq_id 0x%x\n", 1090 *msi_valid_state); 1091 1092 return (DDI_SUCCESS); 1093 } 1094 1095 /*ARGSUSED*/ 1096 int 1097 px_lib_msi_setvalid(dev_info_t *dip, msinum_t msi_num, 1098 pci_msi_valid_state_t msi_valid_state) 1099 { 1100 uint64_t ret; 1101 1102 DBG(DBG_LIB_MSI, dip, "px_lib_msi_setvalid: dip 0x%p msi_num 0x%x " 1103 "msi_valid_state 0x%x\n", dip, msi_num, msi_valid_state); 1104 1105 if ((ret = hvio_msi_setvalid(DIP_TO_HANDLE(dip), 1106 msi_num, msi_valid_state)) != H_EOK) { 1107 DBG(DBG_LIB_MSI, dip, 1108 "hvio_msi_setvalid failed, ret 0x%lx\n", ret); 1109 return (DDI_FAILURE); 1110 } 1111 1112 return (DDI_SUCCESS); 1113 } 1114 1115 /*ARGSUSED*/ 1116 int 1117 px_lib_msi_getstate(dev_info_t *dip, msinum_t msi_num, 1118 pci_msi_state_t *msi_state) 1119 { 1120 uint64_t ret; 1121 1122 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getstate: dip 0x%p msi_num 0x%x\n", 1123 dip, msi_num); 1124 1125 if ((ret = hvio_msi_getstate(DIP_TO_HANDLE(dip), 1126 msi_num, msi_state)) != H_EOK) { 1127 DBG(DBG_LIB_MSI, dip, 1128 "hvio_msi_getstate failed, ret 0x%lx\n", ret); 1129 return (DDI_FAILURE); 1130 } 1131 1132 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getstate: msi_state 0x%x\n", 1133 *msi_state); 1134 1135 return (DDI_SUCCESS); 1136 } 1137 1138 /*ARGSUSED*/ 1139 int 1140 px_lib_msi_setstate(dev_info_t *dip, msinum_t msi_num, 1141 pci_msi_state_t msi_state) 1142 { 1143 uint64_t ret; 1144 1145 DBG(DBG_LIB_MSI, dip, "px_lib_msi_setstate: dip 0x%p msi_num 0x%x " 1146 "msi_state 0x%x\n", dip, msi_num, msi_state); 1147 1148 if ((ret = hvio_msi_setstate(DIP_TO_HANDLE(dip), 1149 msi_num, msi_state)) != H_EOK) { 1150 DBG(DBG_LIB_MSI, dip, 1151 "hvio_msi_setstate failed, ret 0x%lx\n", ret); 1152 return (DDI_FAILURE); 1153 } 1154 1155 return (DDI_SUCCESS); 1156 } 1157 1158 /* 1159 * MSG Functions: 1160 */ 1161 /*ARGSUSED*/ 1162 int 1163 px_lib_msg_getmsiq(dev_info_t *dip, pcie_msg_type_t msg_type, 1164 msiqid_t *msiq_id) 1165 { 1166 uint64_t ret; 1167 1168 DBG(DBG_LIB_MSG, dip, "px_lib_msg_getmsiq: dip 0x%p msg_type 0x%x\n", 1169 dip, msg_type); 1170 1171 if ((ret = hvio_msg_getmsiq(DIP_TO_HANDLE(dip), 1172 msg_type, msiq_id)) != H_EOK) { 1173 DBG(DBG_LIB_MSG, dip, 1174 "hvio_msg_getmsiq failed, ret 0x%lx\n", ret); 1175 return (DDI_FAILURE); 1176 } 1177 1178 DBG(DBG_LIB_MSI, dip, "px_lib_msg_getmsiq: msiq_id 0x%x\n", 1179 *msiq_id); 1180 1181 return (DDI_SUCCESS); 1182 } 1183 1184 /*ARGSUSED*/ 1185 int 1186 px_lib_msg_setmsiq(dev_info_t *dip, pcie_msg_type_t msg_type, 1187 msiqid_t msiq_id) 1188 { 1189 uint64_t ret; 1190 1191 DBG(DBG_LIB_MSG, dip, "px_lib_msi_setstate: dip 0x%p msg_type 0x%x " 1192 "msiq_id 0x%x\n", dip, msg_type, msiq_id); 1193 1194 if ((ret = hvio_msg_setmsiq(DIP_TO_HANDLE(dip), 1195 msg_type, msiq_id)) != H_EOK) { 1196 DBG(DBG_LIB_MSG, dip, 1197 "hvio_msg_setmsiq failed, ret 0x%lx\n", ret); 1198 return (DDI_FAILURE); 1199 } 1200 1201 return (DDI_SUCCESS); 1202 } 1203 1204 /*ARGSUSED*/ 1205 int 1206 px_lib_msg_getvalid(dev_info_t *dip, pcie_msg_type_t msg_type, 1207 pcie_msg_valid_state_t *msg_valid_state) 1208 { 1209 uint64_t ret; 1210 1211 DBG(DBG_LIB_MSG, dip, "px_lib_msg_getvalid: dip 0x%p msg_type 0x%x\n", 1212 dip, msg_type); 1213 1214 if ((ret = hvio_msg_getvalid(DIP_TO_HANDLE(dip), msg_type, 1215 msg_valid_state)) != H_EOK) { 1216 DBG(DBG_LIB_MSG, dip, 1217 "hvio_msg_getvalid failed, ret 0x%lx\n", ret); 1218 return (DDI_FAILURE); 1219 } 1220 1221 DBG(DBG_LIB_MSI, dip, "px_lib_msg_getvalid: msg_valid_state 0x%x\n", 1222 *msg_valid_state); 1223 1224 return (DDI_SUCCESS); 1225 } 1226 1227 /*ARGSUSED*/ 1228 int 1229 px_lib_msg_setvalid(dev_info_t *dip, pcie_msg_type_t msg_type, 1230 pcie_msg_valid_state_t msg_valid_state) 1231 { 1232 uint64_t ret; 1233 1234 DBG(DBG_LIB_MSG, dip, "px_lib_msg_setvalid: dip 0x%p msg_type 0x%x " 1235 "msg_valid_state 0x%x\n", dip, msg_type, msg_valid_state); 1236 1237 if ((ret = hvio_msg_setvalid(DIP_TO_HANDLE(dip), msg_type, 1238 msg_valid_state)) != H_EOK) { 1239 DBG(DBG_LIB_MSG, dip, 1240 "hvio_msg_setvalid failed, ret 0x%lx\n", ret); 1241 return (DDI_FAILURE); 1242 } 1243 1244 return (DDI_SUCCESS); 1245 } 1246 1247 /* 1248 * Suspend/Resume Functions: 1249 * Currently unsupported by hypervisor 1250 */ 1251 int 1252 px_lib_suspend(dev_info_t *dip) 1253 { 1254 px_t *px_p = DIP_TO_STATE(dip); 1255 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 1256 px_cb_t *cb_p = PX2CB(px_p); 1257 devhandle_t dev_hdl, xbus_dev_hdl; 1258 uint64_t ret = H_EOK; 1259 1260 DBG(DBG_DETACH, dip, "px_lib_suspend: dip 0x%p\n", dip); 1261 1262 dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_CSR]; 1263 xbus_dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_XBC]; 1264 1265 if ((ret = hvio_suspend(dev_hdl, pxu_p)) != H_EOK) 1266 goto fail; 1267 1268 if (--cb_p->attachcnt == 0) { 1269 ret = hvio_cb_suspend(xbus_dev_hdl, pxu_p); 1270 if (ret != H_EOK) 1271 cb_p->attachcnt++; 1272 } 1273 pxu_p->cpr_flag = PX_ENTERED_CPR; 1274 1275 fail: 1276 return ((ret != H_EOK) ? DDI_FAILURE: DDI_SUCCESS); 1277 } 1278 1279 void 1280 px_lib_resume(dev_info_t *dip) 1281 { 1282 px_t *px_p = DIP_TO_STATE(dip); 1283 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 1284 px_cb_t *cb_p = PX2CB(px_p); 1285 devhandle_t dev_hdl, xbus_dev_hdl; 1286 devino_t pec_ino = px_p->px_inos[PX_INTR_PEC]; 1287 devino_t xbc_ino = px_p->px_inos[PX_INTR_XBC]; 1288 1289 DBG(DBG_ATTACH, dip, "px_lib_resume: dip 0x%p\n", dip); 1290 1291 dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_CSR]; 1292 xbus_dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_XBC]; 1293 1294 if (++cb_p->attachcnt == 1) 1295 hvio_cb_resume(dev_hdl, xbus_dev_hdl, xbc_ino, pxu_p); 1296 1297 hvio_resume(dev_hdl, pec_ino, pxu_p); 1298 } 1299 1300 /* 1301 * Generate a unique Oberon UBC ID based on the Logicial System Board and 1302 * the IO Channel from the portid property field. 1303 */ 1304 static uint64_t 1305 oberon_get_ubc_id(dev_info_t *dip) 1306 { 1307 px_t *px_p = DIP_TO_STATE(dip); 1308 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 1309 uint64_t ubc_id; 1310 1311 /* 1312 * Generate a unique 6 bit UBC ID using the 2 IO_Channel#[1:0] bits and 1313 * the 4 LSB_ID[3:0] bits from the Oberon's portid property. 1314 */ 1315 ubc_id = (((pxu_p->portid >> OBERON_PORT_ID_IOC) & 1316 OBERON_PORT_ID_IOC_MASK) | (((pxu_p->portid >> 1317 OBERON_PORT_ID_LSB) & OBERON_PORT_ID_LSB_MASK) 1318 << OBERON_UBC_ID_LSB)); 1319 1320 return (ubc_id); 1321 } 1322 1323 /* 1324 * Oberon does not have a UBC scratch register, so alloc an array of scratch 1325 * registers when needed and use a unique UBC ID as an index. This code 1326 * can be simplified if we use a pre-allocated array. They are currently 1327 * being dynamically allocated because it's only needed by the Oberon. 1328 */ 1329 static void 1330 oberon_set_cb(dev_info_t *dip, uint64_t val) 1331 { 1332 uint64_t ubc_id; 1333 1334 if (px_oberon_ubc_scratch_regs == NULL) 1335 px_oberon_ubc_scratch_regs = 1336 (uint64_t *)kmem_zalloc(sizeof (uint64_t)* 1337 OBERON_UBC_ID_MAX, KM_SLEEP); 1338 1339 ubc_id = oberon_get_ubc_id(dip); 1340 1341 px_oberon_ubc_scratch_regs[ubc_id] = val; 1342 1343 /* 1344 * Check if any scratch registers are still in use. If all scratch 1345 * registers are currently set to zero, then deallocate the scratch 1346 * register array. 1347 */ 1348 for (ubc_id = 0; ubc_id < OBERON_UBC_ID_MAX; ubc_id++) { 1349 if (px_oberon_ubc_scratch_regs[ubc_id] != NULL) 1350 return; 1351 } 1352 1353 /* 1354 * All scratch registers are set to zero so deallocate the scratch 1355 * register array and set the pointer to NULL. 1356 */ 1357 kmem_free(px_oberon_ubc_scratch_regs, 1358 (sizeof (uint64_t)*OBERON_UBC_ID_MAX)); 1359 1360 px_oberon_ubc_scratch_regs = NULL; 1361 } 1362 1363 /* 1364 * Oberon does not have a UBC scratch register, so use an allocated array of 1365 * scratch registers and use the unique UBC ID as an index into that array. 1366 */ 1367 static uint64_t 1368 oberon_get_cb(dev_info_t *dip) 1369 { 1370 uint64_t ubc_id; 1371 1372 if (px_oberon_ubc_scratch_regs == NULL) 1373 return (0); 1374 1375 ubc_id = oberon_get_ubc_id(dip); 1376 1377 return (px_oberon_ubc_scratch_regs[ubc_id]); 1378 } 1379 1380 /* 1381 * Misc Functions: 1382 * Currently unsupported by hypervisor 1383 */ 1384 static uint64_t 1385 px_get_cb(dev_info_t *dip) 1386 { 1387 px_t *px_p = DIP_TO_STATE(dip); 1388 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 1389 1390 /* 1391 * Oberon does not currently have Scratchpad registers. 1392 */ 1393 if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) 1394 return (oberon_get_cb(dip)); 1395 1396 return (CSR_XR((caddr_t)pxu_p->px_address[PX_REG_XBC], JBUS_SCRATCH_1)); 1397 } 1398 1399 static void 1400 px_set_cb(dev_info_t *dip, uint64_t val) 1401 { 1402 px_t *px_p = DIP_TO_STATE(dip); 1403 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 1404 1405 /* 1406 * Oberon does not currently have Scratchpad registers. 1407 */ 1408 if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) { 1409 oberon_set_cb(dip, val); 1410 return; 1411 } 1412 1413 CSR_XS((caddr_t)pxu_p->px_address[PX_REG_XBC], JBUS_SCRATCH_1, val); 1414 } 1415 1416 /*ARGSUSED*/ 1417 int 1418 px_lib_map_vconfig(dev_info_t *dip, 1419 ddi_map_req_t *mp, pci_config_offset_t off, 1420 pci_regspec_t *rp, caddr_t *addrp) 1421 { 1422 /* 1423 * No special config space access services in this layer. 1424 */ 1425 return (DDI_FAILURE); 1426 } 1427 1428 void 1429 px_lib_map_attr_check(ddi_map_req_t *mp) 1430 { 1431 ddi_acc_hdl_t *hp = mp->map_handlep; 1432 1433 /* fire does not accept byte masks from PIO store merge */ 1434 if (hp->ah_acc.devacc_attr_dataorder == DDI_STORECACHING_OK_ACC) 1435 hp->ah_acc.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 1436 } 1437 1438 /* This function is called only by poke, caut put and pxtool poke. */ 1439 void 1440 px_lib_clr_errs(px_t *px_p, dev_info_t *rdip, uint64_t addr) 1441 { 1442 px_pec_t *pec_p = px_p->px_pec_p; 1443 dev_info_t *rpdip = px_p->px_dip; 1444 int rc_err, fab_err, i; 1445 int acctype = pec_p->pec_safeacc_type; 1446 ddi_fm_error_t derr; 1447 px_ranges_t *ranges_p; 1448 int range_len; 1449 uint32_t addr_high, addr_low; 1450 pcie_req_id_t bdf = 0; 1451 1452 /* Create the derr */ 1453 bzero(&derr, sizeof (ddi_fm_error_t)); 1454 derr.fme_version = DDI_FME_VERSION; 1455 derr.fme_ena = fm_ena_generate(0, FM_ENA_FMT1); 1456 derr.fme_flag = acctype; 1457 1458 if (acctype == DDI_FM_ERR_EXPECTED) { 1459 derr.fme_status = DDI_FM_NONFATAL; 1460 ndi_fm_acc_err_set(pec_p->pec_acc_hdl, &derr); 1461 } 1462 1463 if (px_fm_enter(px_p) != DDI_SUCCESS) 1464 return; 1465 1466 /* send ereport/handle/clear fire registers */ 1467 rc_err = px_err_cmn_intr(px_p, &derr, PX_LIB_CALL, PX_FM_BLOCK_ALL); 1468 1469 /* Figure out if this is a cfg or mem32 access */ 1470 addr_high = (uint32_t)(addr >> 32); 1471 addr_low = (uint32_t)addr; 1472 range_len = px_p->px_ranges_length / sizeof (px_ranges_t); 1473 i = 0; 1474 for (ranges_p = px_p->px_ranges_p; i < range_len; i++, ranges_p++) { 1475 if (ranges_p->parent_high == addr_high) { 1476 switch (ranges_p->child_high & PCI_ADDR_MASK) { 1477 case PCI_ADDR_CONFIG: 1478 bdf = (pcie_req_id_t)(addr_low >> 12); 1479 addr_low = 0; 1480 break; 1481 case PCI_ADDR_MEM32: 1482 if (rdip) 1483 bdf = PCI_GET_BDF(rdip); 1484 else 1485 bdf = NULL; 1486 break; 1487 } 1488 break; 1489 } 1490 } 1491 1492 px_rp_en_q(px_p, bdf, addr_low, NULL); 1493 1494 /* 1495 * XXX - Current code scans the fabric for all px_tool accesses. 1496 * In future, do not scan fabric for px_tool access to IO Root Nexus 1497 */ 1498 fab_err = px_scan_fabric(px_p, rpdip, &derr); 1499 1500 px_err_panic(rc_err, PX_RC, fab_err, B_TRUE); 1501 px_fm_exit(px_p); 1502 px_err_panic(rc_err, PX_RC, fab_err, B_FALSE); 1503 } 1504 1505 #ifdef DEBUG 1506 int px_peekfault_cnt = 0; 1507 int px_pokefault_cnt = 0; 1508 #endif /* DEBUG */ 1509 1510 /*ARGSUSED*/ 1511 static int 1512 px_lib_do_poke(dev_info_t *dip, dev_info_t *rdip, 1513 peekpoke_ctlops_t *in_args) 1514 { 1515 px_t *px_p = DIP_TO_STATE(dip); 1516 px_pec_t *pec_p = px_p->px_pec_p; 1517 int err = DDI_SUCCESS; 1518 on_trap_data_t otd; 1519 1520 mutex_enter(&pec_p->pec_pokefault_mutex); 1521 pec_p->pec_ontrap_data = &otd; 1522 pec_p->pec_safeacc_type = DDI_FM_ERR_POKE; 1523 1524 /* Set up protected environment. */ 1525 if (!on_trap(&otd, OT_DATA_ACCESS)) { 1526 uintptr_t tramp = otd.ot_trampoline; 1527 1528 otd.ot_trampoline = (uintptr_t)&poke_fault; 1529 err = do_poke(in_args->size, (void *)in_args->dev_addr, 1530 (void *)in_args->host_addr); 1531 otd.ot_trampoline = tramp; 1532 } else 1533 err = DDI_FAILURE; 1534 1535 px_lib_clr_errs(px_p, rdip, in_args->dev_addr); 1536 1537 if (otd.ot_trap & OT_DATA_ACCESS) 1538 err = DDI_FAILURE; 1539 1540 /* Take down protected environment. */ 1541 no_trap(); 1542 1543 pec_p->pec_ontrap_data = NULL; 1544 pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED; 1545 mutex_exit(&pec_p->pec_pokefault_mutex); 1546 1547 #ifdef DEBUG 1548 if (err == DDI_FAILURE) 1549 px_pokefault_cnt++; 1550 #endif 1551 return (err); 1552 } 1553 1554 /*ARGSUSED*/ 1555 static int 1556 px_lib_do_caut_put(dev_info_t *dip, dev_info_t *rdip, 1557 peekpoke_ctlops_t *cautacc_ctlops_arg) 1558 { 1559 size_t size = cautacc_ctlops_arg->size; 1560 uintptr_t dev_addr = cautacc_ctlops_arg->dev_addr; 1561 uintptr_t host_addr = cautacc_ctlops_arg->host_addr; 1562 ddi_acc_impl_t *hp = (ddi_acc_impl_t *)cautacc_ctlops_arg->handle; 1563 size_t repcount = cautacc_ctlops_arg->repcount; 1564 uint_t flags = cautacc_ctlops_arg->flags; 1565 1566 px_t *px_p = DIP_TO_STATE(dip); 1567 px_pec_t *pec_p = px_p->px_pec_p; 1568 int err = DDI_SUCCESS; 1569 1570 /* 1571 * Note that i_ndi_busop_access_enter ends up grabbing the pokefault 1572 * mutex. 1573 */ 1574 i_ndi_busop_access_enter(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp); 1575 1576 pec_p->pec_ontrap_data = (on_trap_data_t *)hp->ahi_err->err_ontrap; 1577 pec_p->pec_safeacc_type = DDI_FM_ERR_EXPECTED; 1578 hp->ahi_err->err_expected = DDI_FM_ERR_EXPECTED; 1579 1580 if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) { 1581 for (; repcount; repcount--) { 1582 switch (size) { 1583 1584 case sizeof (uint8_t): 1585 i_ddi_put8(hp, (uint8_t *)dev_addr, 1586 *(uint8_t *)host_addr); 1587 break; 1588 1589 case sizeof (uint16_t): 1590 i_ddi_put16(hp, (uint16_t *)dev_addr, 1591 *(uint16_t *)host_addr); 1592 break; 1593 1594 case sizeof (uint32_t): 1595 i_ddi_put32(hp, (uint32_t *)dev_addr, 1596 *(uint32_t *)host_addr); 1597 break; 1598 1599 case sizeof (uint64_t): 1600 i_ddi_put64(hp, (uint64_t *)dev_addr, 1601 *(uint64_t *)host_addr); 1602 break; 1603 } 1604 1605 host_addr += size; 1606 1607 if (flags == DDI_DEV_AUTOINCR) 1608 dev_addr += size; 1609 1610 px_lib_clr_errs(px_p, rdip, dev_addr); 1611 1612 if (pec_p->pec_ontrap_data->ot_trap & OT_DATA_ACCESS) { 1613 err = DDI_FAILURE; 1614 #ifdef DEBUG 1615 px_pokefault_cnt++; 1616 #endif 1617 break; 1618 } 1619 } 1620 } 1621 1622 i_ddi_notrap((ddi_acc_handle_t)hp); 1623 pec_p->pec_ontrap_data = NULL; 1624 pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED; 1625 i_ndi_busop_access_exit(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp); 1626 hp->ahi_err->err_expected = DDI_FM_ERR_UNEXPECTED; 1627 1628 return (err); 1629 } 1630 1631 1632 int 1633 px_lib_ctlops_poke(dev_info_t *dip, dev_info_t *rdip, 1634 peekpoke_ctlops_t *in_args) 1635 { 1636 return (in_args->handle ? px_lib_do_caut_put(dip, rdip, in_args) : 1637 px_lib_do_poke(dip, rdip, in_args)); 1638 } 1639 1640 1641 /*ARGSUSED*/ 1642 static int 1643 px_lib_do_peek(dev_info_t *dip, peekpoke_ctlops_t *in_args) 1644 { 1645 px_t *px_p = DIP_TO_STATE(dip); 1646 px_pec_t *pec_p = px_p->px_pec_p; 1647 int err = DDI_SUCCESS; 1648 on_trap_data_t otd; 1649 1650 mutex_enter(&pec_p->pec_pokefault_mutex); 1651 if (px_fm_enter(px_p) != DDI_SUCCESS) 1652 return (DDI_FAILURE); 1653 pec_p->pec_safeacc_type = DDI_FM_ERR_PEEK; 1654 px_fm_exit(px_p); 1655 1656 if (!on_trap(&otd, OT_DATA_ACCESS)) { 1657 uintptr_t tramp = otd.ot_trampoline; 1658 1659 otd.ot_trampoline = (uintptr_t)&peek_fault; 1660 err = do_peek(in_args->size, (void *)in_args->dev_addr, 1661 (void *)in_args->host_addr); 1662 otd.ot_trampoline = tramp; 1663 } else 1664 err = DDI_FAILURE; 1665 1666 no_trap(); 1667 pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED; 1668 mutex_exit(&pec_p->pec_pokefault_mutex); 1669 1670 #ifdef DEBUG 1671 if (err == DDI_FAILURE) 1672 px_peekfault_cnt++; 1673 #endif 1674 return (err); 1675 } 1676 1677 1678 static int 1679 px_lib_do_caut_get(dev_info_t *dip, peekpoke_ctlops_t *cautacc_ctlops_arg) 1680 { 1681 size_t size = cautacc_ctlops_arg->size; 1682 uintptr_t dev_addr = cautacc_ctlops_arg->dev_addr; 1683 uintptr_t host_addr = cautacc_ctlops_arg->host_addr; 1684 ddi_acc_impl_t *hp = (ddi_acc_impl_t *)cautacc_ctlops_arg->handle; 1685 size_t repcount = cautacc_ctlops_arg->repcount; 1686 uint_t flags = cautacc_ctlops_arg->flags; 1687 1688 px_t *px_p = DIP_TO_STATE(dip); 1689 px_pec_t *pec_p = px_p->px_pec_p; 1690 int err = DDI_SUCCESS; 1691 1692 /* 1693 * Note that i_ndi_busop_access_enter ends up grabbing the pokefault 1694 * mutex. 1695 */ 1696 i_ndi_busop_access_enter(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp); 1697 1698 pec_p->pec_ontrap_data = (on_trap_data_t *)hp->ahi_err->err_ontrap; 1699 pec_p->pec_safeacc_type = DDI_FM_ERR_EXPECTED; 1700 hp->ahi_err->err_expected = DDI_FM_ERR_EXPECTED; 1701 1702 if (repcount == 1) { 1703 if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) { 1704 i_ddi_caut_get(size, (void *)dev_addr, 1705 (void *)host_addr); 1706 } else { 1707 int i; 1708 uint8_t *ff_addr = (uint8_t *)host_addr; 1709 for (i = 0; i < size; i++) 1710 *ff_addr++ = 0xff; 1711 1712 err = DDI_FAILURE; 1713 #ifdef DEBUG 1714 px_peekfault_cnt++; 1715 #endif 1716 } 1717 } else { 1718 if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) { 1719 for (; repcount; repcount--) { 1720 i_ddi_caut_get(size, (void *)dev_addr, 1721 (void *)host_addr); 1722 1723 host_addr += size; 1724 1725 if (flags == DDI_DEV_AUTOINCR) 1726 dev_addr += size; 1727 } 1728 } else { 1729 err = DDI_FAILURE; 1730 #ifdef DEBUG 1731 px_peekfault_cnt++; 1732 #endif 1733 } 1734 } 1735 1736 i_ddi_notrap((ddi_acc_handle_t)hp); 1737 pec_p->pec_ontrap_data = NULL; 1738 pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED; 1739 i_ndi_busop_access_exit(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp); 1740 hp->ahi_err->err_expected = DDI_FM_ERR_UNEXPECTED; 1741 1742 return (err); 1743 } 1744 1745 /*ARGSUSED*/ 1746 int 1747 px_lib_ctlops_peek(dev_info_t *dip, dev_info_t *rdip, 1748 peekpoke_ctlops_t *in_args, void *result) 1749 { 1750 result = (void *)in_args->host_addr; 1751 return (in_args->handle ? px_lib_do_caut_get(dip, in_args) : 1752 px_lib_do_peek(dip, in_args)); 1753 } 1754 1755 /* 1756 * implements PPM interface 1757 */ 1758 int 1759 px_lib_pmctl(int cmd, px_t *px_p) 1760 { 1761 ASSERT((cmd & ~PPMREQ_MASK) == PPMREQ); 1762 switch (cmd) { 1763 case PPMREQ_PRE_PWR_OFF: 1764 /* 1765 * Currently there is no device power management for 1766 * the root complex (fire). When there is we need to make 1767 * sure that it is at full power before trying to send the 1768 * PME_Turn_Off message. 1769 */ 1770 DBG(DBG_PWR, px_p->px_dip, 1771 "ioctl: request to send PME_Turn_Off\n"); 1772 return (px_goto_l23ready(px_p)); 1773 1774 case PPMREQ_PRE_PWR_ON: 1775 DBG(DBG_PWR, px_p->px_dip, "ioctl: PRE_PWR_ON request\n"); 1776 return (px_pre_pwron_check(px_p)); 1777 1778 case PPMREQ_POST_PWR_ON: 1779 DBG(DBG_PWR, px_p->px_dip, "ioctl: POST_PWR_ON request\n"); 1780 return (px_goto_l0(px_p)); 1781 1782 default: 1783 return (DDI_FAILURE); 1784 } 1785 } 1786 1787 /* 1788 * sends PME_Turn_Off message to put the link in L2/L3 ready state. 1789 * called by px_ioctl. 1790 * returns DDI_SUCCESS or DDI_FAILURE 1791 * 1. Wait for link to be in L1 state (link status reg) 1792 * 2. write to PME_Turn_off reg to boradcast 1793 * 3. set timeout 1794 * 4. If timeout, return failure. 1795 * 5. If PM_TO_Ack, wait till link is in L2/L3 ready 1796 */ 1797 static int 1798 px_goto_l23ready(px_t *px_p) 1799 { 1800 pcie_pwr_t *pwr_p; 1801 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 1802 caddr_t csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR]; 1803 int ret = DDI_SUCCESS; 1804 clock_t end, timeleft; 1805 int mutex_held = 1; 1806 1807 /* If no PM info, return failure */ 1808 if (!PCIE_PMINFO(px_p->px_dip) || 1809 !(pwr_p = PCIE_NEXUS_PMINFO(px_p->px_dip))) 1810 return (DDI_FAILURE); 1811 1812 mutex_enter(&pwr_p->pwr_lock); 1813 mutex_enter(&px_p->px_l23ready_lock); 1814 /* Clear the PME_To_ACK receieved flag */ 1815 px_p->px_pm_flags &= ~PX_PMETOACK_RECVD; 1816 /* 1817 * When P25 is the downstream device, after receiving 1818 * PME_To_ACK, fire will go to Detect state, which causes 1819 * the link down event. Inform FMA that this is expected. 1820 * In case of all other cards complaint with the pci express 1821 * spec, this will happen when the power is re-applied. FMA 1822 * code will clear this flag after one instance of LDN. Since 1823 * there will not be a LDN event for the spec compliant cards, 1824 * we need to clear the flag after receiving PME_To_ACK. 1825 */ 1826 px_p->px_pm_flags |= PX_LDN_EXPECTED; 1827 if (px_send_pme_turnoff(csr_base) != DDI_SUCCESS) { 1828 ret = DDI_FAILURE; 1829 goto l23ready_done; 1830 } 1831 px_p->px_pm_flags |= PX_PME_TURNOFF_PENDING; 1832 1833 end = ddi_get_lbolt() + drv_usectohz(px_pme_to_ack_timeout); 1834 while (!(px_p->px_pm_flags & PX_PMETOACK_RECVD)) { 1835 timeleft = cv_timedwait(&px_p->px_l23ready_cv, 1836 &px_p->px_l23ready_lock, end); 1837 /* 1838 * if cv_timedwait returns -1, it is either 1839 * 1) timed out or 1840 * 2) there was a pre-mature wakeup but by the time 1841 * cv_timedwait is called again end < lbolt i.e. 1842 * end is in the past. 1843 * 3) By the time we make first cv_timedwait call, 1844 * end < lbolt is true. 1845 */ 1846 if (timeleft == -1) 1847 break; 1848 } 1849 if (!(px_p->px_pm_flags & PX_PMETOACK_RECVD)) { 1850 /* 1851 * Either timedout or interrupt didn't get a 1852 * chance to grab the mutex and set the flag. 1853 * release the mutex and delay for sometime. 1854 * This will 1) give a chance for interrupt to 1855 * set the flag 2) creates a delay between two 1856 * consequetive requests. 1857 */ 1858 mutex_exit(&px_p->px_l23ready_lock); 1859 delay(drv_usectohz(50 * PX_MSEC_TO_USEC)); 1860 mutex_held = 0; 1861 if (!(px_p->px_pm_flags & PX_PMETOACK_RECVD)) { 1862 ret = DDI_FAILURE; 1863 DBG(DBG_PWR, px_p->px_dip, " Timed out while waiting" 1864 " for PME_TO_ACK\n"); 1865 } 1866 } 1867 px_p->px_pm_flags &= 1868 ~(PX_PME_TURNOFF_PENDING | PX_PMETOACK_RECVD | PX_LDN_EXPECTED); 1869 1870 l23ready_done: 1871 if (mutex_held) 1872 mutex_exit(&px_p->px_l23ready_lock); 1873 /* 1874 * Wait till link is in L1 idle, if sending PME_Turn_Off 1875 * was succesful. 1876 */ 1877 if (ret == DDI_SUCCESS) { 1878 if (px_link_wait4l1idle(csr_base) != DDI_SUCCESS) { 1879 DBG(DBG_PWR, px_p->px_dip, " Link is not at L1" 1880 " even though we received PME_To_ACK.\n"); 1881 /* 1882 * Workaround for hardware bug with P25. 1883 * Due to a hardware bug with P25, link state 1884 * will be Detect state rather than L1 after 1885 * link is transitioned to L23Ready state. Since 1886 * we don't know whether link is L23ready state 1887 * without Fire's state being L1_idle, we delay 1888 * here just to make sure that we wait till link 1889 * is transitioned to L23Ready state. 1890 */ 1891 delay(drv_usectohz(100 * PX_MSEC_TO_USEC)); 1892 } 1893 pwr_p->pwr_link_lvl = PM_LEVEL_L3; 1894 1895 } 1896 mutex_exit(&pwr_p->pwr_lock); 1897 return (ret); 1898 } 1899 1900 /* 1901 * Message interrupt handler intended to be shared for both 1902 * PME and PME_TO_ACK msg handling, currently only handles 1903 * PME_To_ACK message. 1904 */ 1905 uint_t 1906 px_pmeq_intr(caddr_t arg) 1907 { 1908 px_t *px_p = (px_t *)arg; 1909 1910 DBG(DBG_PWR, px_p->px_dip, " PME_To_ACK received \n"); 1911 mutex_enter(&px_p->px_l23ready_lock); 1912 cv_broadcast(&px_p->px_l23ready_cv); 1913 if (px_p->px_pm_flags & PX_PME_TURNOFF_PENDING) { 1914 px_p->px_pm_flags |= PX_PMETOACK_RECVD; 1915 } else { 1916 /* 1917 * This maybe the second ack received. If so then, 1918 * we should be receiving it during wait4L1 stage. 1919 */ 1920 px_p->px_pmetoack_ignored++; 1921 } 1922 mutex_exit(&px_p->px_l23ready_lock); 1923 return (DDI_INTR_CLAIMED); 1924 } 1925 1926 static int 1927 px_pre_pwron_check(px_t *px_p) 1928 { 1929 pcie_pwr_t *pwr_p; 1930 1931 /* If no PM info, return failure */ 1932 if (!PCIE_PMINFO(px_p->px_dip) || 1933 !(pwr_p = PCIE_NEXUS_PMINFO(px_p->px_dip))) 1934 return (DDI_FAILURE); 1935 1936 /* 1937 * For the spec compliant downstream cards link down 1938 * is expected when the device is powered on. 1939 */ 1940 px_p->px_pm_flags |= PX_LDN_EXPECTED; 1941 return (pwr_p->pwr_link_lvl == PM_LEVEL_L3 ? DDI_SUCCESS : DDI_FAILURE); 1942 } 1943 1944 static int 1945 px_goto_l0(px_t *px_p) 1946 { 1947 pcie_pwr_t *pwr_p; 1948 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 1949 caddr_t csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR]; 1950 int ret = DDI_SUCCESS; 1951 uint64_t time_spent = 0; 1952 1953 /* If no PM info, return failure */ 1954 if (!PCIE_PMINFO(px_p->px_dip) || 1955 !(pwr_p = PCIE_NEXUS_PMINFO(px_p->px_dip))) 1956 return (DDI_FAILURE); 1957 1958 mutex_enter(&pwr_p->pwr_lock); 1959 /* 1960 * The following link retrain activity will cause LDN and LUP event. 1961 * Receiving LDN prior to receiving LUP is expected, not an error in 1962 * this case. Receiving LUP indicates link is fully up to support 1963 * powering up down stream device, and of course any further LDN and 1964 * LUP outside this context will be error. 1965 */ 1966 px_p->px_lup_pending = 1; 1967 if (px_link_retrain(csr_base) != DDI_SUCCESS) { 1968 ret = DDI_FAILURE; 1969 goto l0_done; 1970 } 1971 1972 /* LUP event takes the order of 15ms amount of time to occur */ 1973 for (; px_p->px_lup_pending && (time_spent < px_lup_poll_to); 1974 time_spent += px_lup_poll_interval) 1975 drv_usecwait(px_lup_poll_interval); 1976 if (px_p->px_lup_pending) 1977 ret = DDI_FAILURE; 1978 l0_done: 1979 px_enable_detect_quiet(csr_base); 1980 if (ret == DDI_SUCCESS) 1981 pwr_p->pwr_link_lvl = PM_LEVEL_L0; 1982 mutex_exit(&pwr_p->pwr_lock); 1983 return (ret); 1984 } 1985 1986 /* 1987 * Extract the drivers binding name to identify which chip we're binding to. 1988 * Whenever a new bus bridge is created, the driver alias entry should be 1989 * added here to identify the device if needed. If a device isn't added, 1990 * the identity defaults to PX_CHIP_UNIDENTIFIED. 1991 */ 1992 static uint32_t 1993 px_identity_init(px_t *px_p) 1994 { 1995 dev_info_t *dip = px_p->px_dip; 1996 char *name = ddi_binding_name(dip); 1997 uint32_t revision = 0; 1998 1999 revision = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 2000 "module-revision#", 0); 2001 2002 /* Check for Fire driver binding name */ 2003 if (strcmp(name, "pciex108e,80f0") == 0) { 2004 DBG(DBG_ATTACH, dip, "px_identity_init: %s%d: " 2005 "(FIRE), module-revision %d\n", NAMEINST(dip), 2006 revision); 2007 2008 return ((revision >= FIRE_MOD_REV_20) ? 2009 PX_CHIP_FIRE : PX_CHIP_UNIDENTIFIED); 2010 } 2011 2012 /* Check for Oberon driver binding name */ 2013 if (strcmp(name, "pciex108e,80f8") == 0) { 2014 DBG(DBG_ATTACH, dip, "px_identity_init: %s%d: " 2015 "(OBERON), module-revision %d\n", NAMEINST(dip), 2016 revision); 2017 2018 return (PX_CHIP_OBERON); 2019 } 2020 2021 DBG(DBG_ATTACH, dip, "%s%d: Unknown PCI Express Host bridge %s %x\n", 2022 ddi_driver_name(dip), ddi_get_instance(dip), name, revision); 2023 2024 return (PX_CHIP_UNIDENTIFIED); 2025 } 2026 2027 int 2028 px_err_add_intr(px_fault_t *px_fault_p) 2029 { 2030 dev_info_t *dip = px_fault_p->px_fh_dip; 2031 px_t *px_p = DIP_TO_STATE(dip); 2032 2033 VERIFY(add_ivintr(px_fault_p->px_fh_sysino, PX_ERR_PIL, 2034 (intrfunc)px_fault_p->px_err_func, (caddr_t)px_fault_p, 2035 NULL, NULL) == 0); 2036 2037 px_ib_intr_enable(px_p, intr_dist_cpuid(), px_fault_p->px_intr_ino); 2038 2039 return (DDI_SUCCESS); 2040 } 2041 2042 void 2043 px_err_rem_intr(px_fault_t *px_fault_p) 2044 { 2045 dev_info_t *dip = px_fault_p->px_fh_dip; 2046 px_t *px_p = DIP_TO_STATE(dip); 2047 2048 px_ib_intr_disable(px_p->px_ib_p, px_fault_p->px_intr_ino, 2049 IB_INTR_WAIT); 2050 2051 VERIFY(rem_ivintr(px_fault_p->px_fh_sysino, PX_ERR_PIL) == 0); 2052 } 2053 2054 /* 2055 * px_cb_intr_redist() - sun4u only, CB interrupt redistribution 2056 */ 2057 void 2058 px_cb_intr_redist(void *arg) 2059 { 2060 px_cb_t *cb_p = (px_cb_t *)arg; 2061 px_cb_list_t *pxl; 2062 px_t *pxp = NULL; 2063 px_fault_t *f_p = NULL; 2064 uint32_t new_cpuid; 2065 intr_valid_state_t enabled = 0; 2066 2067 mutex_enter(&cb_p->cb_mutex); 2068 2069 pxl = cb_p->pxl; 2070 if (!pxl) 2071 goto cb_done; 2072 2073 pxp = pxl->pxp; 2074 f_p = &pxp->px_cb_fault; 2075 for (; pxl && (f_p->px_fh_sysino != cb_p->sysino); ) { 2076 pxl = pxl->next; 2077 pxp = pxl->pxp; 2078 f_p = &pxp->px_cb_fault; 2079 } 2080 if (pxl == NULL) 2081 goto cb_done; 2082 2083 new_cpuid = intr_dist_cpuid(); 2084 if (new_cpuid == cb_p->cpuid) 2085 goto cb_done; 2086 2087 if ((px_lib_intr_getvalid(pxp->px_dip, f_p->px_fh_sysino, &enabled) 2088 != DDI_SUCCESS) || !enabled) { 2089 DBG(DBG_IB, pxp->px_dip, "px_cb_intr_redist: CB not enabled, " 2090 "sysino(0x%x)\n", f_p->px_fh_sysino); 2091 goto cb_done; 2092 } 2093 2094 PX_INTR_DISABLE(pxp->px_dip, f_p->px_fh_sysino); 2095 2096 cb_p->cpuid = new_cpuid; 2097 cb_p->sysino = f_p->px_fh_sysino; 2098 PX_INTR_ENABLE(pxp->px_dip, cb_p->sysino, cb_p->cpuid); 2099 2100 cb_done: 2101 mutex_exit(&cb_p->cb_mutex); 2102 } 2103 2104 /* 2105 * px_cb_add_intr() - Called from attach(9E) to create CB if not yet 2106 * created, to add CB interrupt vector always, but enable only once. 2107 */ 2108 int 2109 px_cb_add_intr(px_fault_t *fault_p) 2110 { 2111 px_t *px_p = DIP_TO_STATE(fault_p->px_fh_dip); 2112 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 2113 px_cb_t *cb_p = (px_cb_t *)px_get_cb(fault_p->px_fh_dip); 2114 px_cb_list_t *pxl, *pxl_new; 2115 boolean_t is_proxy = B_FALSE; 2116 2117 /* create cb */ 2118 if (cb_p == NULL) { 2119 cb_p = kmem_zalloc(sizeof (px_cb_t), KM_SLEEP); 2120 2121 mutex_init(&cb_p->cb_mutex, NULL, MUTEX_DRIVER, 2122 (void *) ipltospl(FM_ERR_PIL)); 2123 2124 cb_p->px_cb_func = px_cb_intr; 2125 pxu_p->px_cb_p = cb_p; 2126 px_set_cb(fault_p->px_fh_dip, (uint64_t)cb_p); 2127 2128 /* px_lib_dev_init allows only FIRE and OBERON */ 2129 px_err_reg_enable( 2130 (pxu_p->chip_type == PX_CHIP_FIRE) ? 2131 PX_ERR_JBC : PX_ERR_UBC, 2132 pxu_p->px_address[PX_REG_XBC]); 2133 } else 2134 pxu_p->px_cb_p = cb_p; 2135 2136 /* register cb interrupt */ 2137 VERIFY(add_ivintr(fault_p->px_fh_sysino, PX_ERR_PIL, 2138 (intrfunc)cb_p->px_cb_func, (caddr_t)cb_p, NULL, NULL) == 0); 2139 2140 2141 /* update cb list */ 2142 mutex_enter(&cb_p->cb_mutex); 2143 if (cb_p->pxl == NULL) { 2144 is_proxy = B_TRUE; 2145 pxl = kmem_zalloc(sizeof (px_cb_list_t), KM_SLEEP); 2146 pxl->pxp = px_p; 2147 cb_p->pxl = pxl; 2148 cb_p->sysino = fault_p->px_fh_sysino; 2149 cb_p->cpuid = intr_dist_cpuid(); 2150 } else { 2151 /* 2152 * Find the last pxl or 2153 * stop short at encountering a redundent entry, or 2154 * both. 2155 */ 2156 pxl = cb_p->pxl; 2157 for (; !(pxl->pxp == px_p) && pxl->next; pxl = pxl->next) {}; 2158 ASSERT(pxl->pxp != px_p); 2159 2160 /* add to linked list */ 2161 pxl_new = kmem_zalloc(sizeof (px_cb_list_t), KM_SLEEP); 2162 pxl_new->pxp = px_p; 2163 pxl->next = pxl_new; 2164 } 2165 cb_p->attachcnt++; 2166 mutex_exit(&cb_p->cb_mutex); 2167 2168 if (is_proxy) { 2169 /* add to interrupt redistribution list */ 2170 intr_dist_add(px_cb_intr_redist, cb_p); 2171 2172 /* enable cb hw interrupt */ 2173 px_ib_intr_enable(px_p, cb_p->cpuid, fault_p->px_intr_ino); 2174 } 2175 2176 return (DDI_SUCCESS); 2177 } 2178 2179 /* 2180 * px_cb_rem_intr() - Called from detach(9E) to remove its CB 2181 * interrupt vector, to shift proxy to the next available px, 2182 * or disable CB interrupt when itself is the last. 2183 */ 2184 void 2185 px_cb_rem_intr(px_fault_t *fault_p) 2186 { 2187 px_t *px_p = DIP_TO_STATE(fault_p->px_fh_dip), *pxp; 2188 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 2189 px_cb_t *cb_p = PX2CB(px_p); 2190 px_cb_list_t *pxl, *prev; 2191 px_fault_t *f_p; 2192 2193 ASSERT(cb_p->pxl); 2194 2195 /* find and remove this px, and update cb list */ 2196 mutex_enter(&cb_p->cb_mutex); 2197 2198 pxl = cb_p->pxl; 2199 if (pxl->pxp == px_p) { 2200 cb_p->pxl = pxl->next; 2201 } else { 2202 prev = pxl; 2203 pxl = pxl->next; 2204 for (; pxl && (pxl->pxp != px_p); prev = pxl, pxl = pxl->next) { 2205 }; 2206 if (!pxl) { 2207 cmn_err(CE_WARN, "px_cb_rem_intr: can't find px_p 0x%p " 2208 "in registered CB list.", (void *)px_p); 2209 mutex_exit(&cb_p->cb_mutex); 2210 return; 2211 } 2212 prev->next = pxl->next; 2213 } 2214 pxu_p->px_cb_p = NULL; 2215 cb_p->attachcnt--; 2216 kmem_free(pxl, sizeof (px_cb_list_t)); 2217 mutex_exit(&cb_p->cb_mutex); 2218 2219 /* disable cb hw interrupt */ 2220 if (fault_p->px_fh_sysino == cb_p->sysino) 2221 px_ib_intr_disable(px_p->px_ib_p, fault_p->px_intr_ino, 2222 IB_INTR_WAIT); 2223 2224 /* if last px, remove from interrupt redistribution list */ 2225 if (cb_p->pxl == NULL) 2226 intr_dist_rem(px_cb_intr_redist, cb_p); 2227 2228 /* de-register interrupt */ 2229 VERIFY(rem_ivintr(fault_p->px_fh_sysino, PX_ERR_PIL) == 0); 2230 2231 /* if not last px, assign next px to manage cb */ 2232 mutex_enter(&cb_p->cb_mutex); 2233 if (cb_p->pxl) { 2234 if (fault_p->px_fh_sysino == cb_p->sysino) { 2235 pxp = cb_p->pxl->pxp; 2236 f_p = &pxp->px_cb_fault; 2237 cb_p->sysino = f_p->px_fh_sysino; 2238 2239 PX_INTR_ENABLE(pxp->px_dip, cb_p->sysino, cb_p->cpuid); 2240 (void) px_lib_intr_setstate(pxp->px_dip, cb_p->sysino, 2241 INTR_IDLE_STATE); 2242 } 2243 mutex_exit(&cb_p->cb_mutex); 2244 return; 2245 } 2246 2247 /* clean up after the last px */ 2248 mutex_exit(&cb_p->cb_mutex); 2249 2250 /* px_lib_dev_init allows only FIRE and OBERON */ 2251 px_err_reg_disable( 2252 (pxu_p->chip_type == PX_CHIP_FIRE) ? PX_ERR_JBC : PX_ERR_UBC, 2253 pxu_p->px_address[PX_REG_XBC]); 2254 2255 mutex_destroy(&cb_p->cb_mutex); 2256 px_set_cb(fault_p->px_fh_dip, 0ull); 2257 kmem_free(cb_p, sizeof (px_cb_t)); 2258 } 2259 2260 /* 2261 * px_cb_intr() - sun4u only, CB interrupt dispatcher 2262 */ 2263 uint_t 2264 px_cb_intr(caddr_t arg) 2265 { 2266 px_cb_t *cb_p = (px_cb_t *)arg; 2267 px_t *pxp; 2268 px_fault_t *f_p; 2269 int ret; 2270 2271 mutex_enter(&cb_p->cb_mutex); 2272 2273 if (!cb_p->pxl) { 2274 mutex_exit(&cb_p->cb_mutex); 2275 return (DDI_INTR_UNCLAIMED); 2276 } 2277 2278 pxp = cb_p->pxl->pxp; 2279 f_p = &pxp->px_cb_fault; 2280 2281 ret = f_p->px_err_func((caddr_t)f_p); 2282 2283 mutex_exit(&cb_p->cb_mutex); 2284 return (ret); 2285 } 2286 2287 #ifdef FMA 2288 void 2289 px_fill_rc_status(px_fault_t *px_fault_p, pciex_rc_error_regs_t *rc_status) 2290 { 2291 /* populate the rc_status by reading the registers - TBD */ 2292 } 2293 #endif /* FMA */ 2294 2295 /* 2296 * Unprotected raw reads/writes of fabric device's config space. 2297 * Only used for temporary PCI-E Fabric Error Handling. 2298 */ 2299 uint32_t 2300 px_fab_get(px_t *px_p, pcie_req_id_t bdf, uint16_t offset) 2301 { 2302 px_ranges_t *rp = px_p->px_ranges_p; 2303 uint64_t range_prop, base_addr; 2304 int bank = PCI_REG_ADDR_G(PCI_ADDR_CONFIG); 2305 uint32_t val; 2306 2307 /* Get Fire's Physical Base Address */ 2308 range_prop = px_get_range_prop(px_p, rp, bank); 2309 2310 /* Get config space first. */ 2311 base_addr = range_prop + PX_BDF_TO_CFGADDR(bdf, offset); 2312 2313 val = ldphysio(base_addr); 2314 2315 return (LE_32(val)); 2316 } 2317 2318 void 2319 px_fab_set(px_t *px_p, pcie_req_id_t bdf, uint16_t offset, 2320 uint32_t val) { 2321 px_ranges_t *rp = px_p->px_ranges_p; 2322 uint64_t range_prop, base_addr; 2323 int bank = PCI_REG_ADDR_G(PCI_ADDR_CONFIG); 2324 2325 /* Get Fire's Physical Base Address */ 2326 range_prop = px_get_range_prop(px_p, rp, bank); 2327 2328 /* Get config space first. */ 2329 base_addr = range_prop + PX_BDF_TO_CFGADDR(bdf, offset); 2330 2331 stphysio(base_addr, LE_32(val)); 2332 } 2333 2334 /* 2335 * cpr callback 2336 * 2337 * disable fabric error msg interrupt prior to suspending 2338 * all device drivers; re-enable fabric error msg interrupt 2339 * after all devices are resumed. 2340 */ 2341 static boolean_t 2342 px_cpr_callb(void *arg, int code) 2343 { 2344 px_t *px_p = (px_t *)arg; 2345 px_ib_t *ib_p = px_p->px_ib_p; 2346 px_pec_t *pec_p = px_p->px_pec_p; 2347 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 2348 caddr_t csr_base; 2349 devino_t ce_ino, nf_ino, f_ino; 2350 px_ino_t *ce_ino_p, *nf_ino_p, *f_ino_p; 2351 uint64_t imu_log_enable, imu_intr_enable; 2352 uint64_t imu_log_mask, imu_intr_mask; 2353 2354 ce_ino = px_msiqid_to_devino(px_p, pec_p->pec_corr_msg_msiq_id); 2355 nf_ino = px_msiqid_to_devino(px_p, pec_p->pec_non_fatal_msg_msiq_id); 2356 f_ino = px_msiqid_to_devino(px_p, pec_p->pec_fatal_msg_msiq_id); 2357 csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR]; 2358 2359 imu_log_enable = CSR_XR(csr_base, IMU_ERROR_LOG_ENABLE); 2360 imu_intr_enable = CSR_XR(csr_base, IMU_INTERRUPT_ENABLE); 2361 2362 imu_log_mask = BITMASK(IMU_ERROR_LOG_ENABLE_FATAL_MES_NOT_EN_LOG_EN) | 2363 BITMASK(IMU_ERROR_LOG_ENABLE_NONFATAL_MES_NOT_EN_LOG_EN) | 2364 BITMASK(IMU_ERROR_LOG_ENABLE_COR_MES_NOT_EN_LOG_EN); 2365 2366 imu_intr_mask = 2367 BITMASK(IMU_INTERRUPT_ENABLE_FATAL_MES_NOT_EN_S_INT_EN) | 2368 BITMASK(IMU_INTERRUPT_ENABLE_NONFATAL_MES_NOT_EN_S_INT_EN) | 2369 BITMASK(IMU_INTERRUPT_ENABLE_COR_MES_NOT_EN_S_INT_EN) | 2370 BITMASK(IMU_INTERRUPT_ENABLE_FATAL_MES_NOT_EN_P_INT_EN) | 2371 BITMASK(IMU_INTERRUPT_ENABLE_NONFATAL_MES_NOT_EN_P_INT_EN) | 2372 BITMASK(IMU_INTERRUPT_ENABLE_COR_MES_NOT_EN_P_INT_EN); 2373 2374 switch (code) { 2375 case CB_CODE_CPR_CHKPT: 2376 /* disable imu rbne on corr/nonfatal/fatal errors */ 2377 CSR_XS(csr_base, IMU_ERROR_LOG_ENABLE, 2378 imu_log_enable & (~imu_log_mask)); 2379 2380 CSR_XS(csr_base, IMU_INTERRUPT_ENABLE, 2381 imu_intr_enable & (~imu_intr_mask)); 2382 2383 /* disable CORR intr mapping */ 2384 px_ib_intr_disable(ib_p, ce_ino, IB_INTR_NOWAIT); 2385 2386 /* disable NON FATAL intr mapping */ 2387 px_ib_intr_disable(ib_p, nf_ino, IB_INTR_NOWAIT); 2388 2389 /* disable FATAL intr mapping */ 2390 px_ib_intr_disable(ib_p, f_ino, IB_INTR_NOWAIT); 2391 2392 break; 2393 2394 case CB_CODE_CPR_RESUME: 2395 pxu_p->cpr_flag = PX_NOT_CPR; 2396 mutex_enter(&ib_p->ib_ino_lst_mutex); 2397 2398 ce_ino_p = px_ib_locate_ino(ib_p, ce_ino); 2399 nf_ino_p = px_ib_locate_ino(ib_p, nf_ino); 2400 f_ino_p = px_ib_locate_ino(ib_p, f_ino); 2401 2402 /* enable CORR intr mapping */ 2403 if (ce_ino_p) 2404 px_ib_intr_enable(px_p, ce_ino_p->ino_cpuid, ce_ino); 2405 else 2406 cmn_err(CE_WARN, "px_cpr_callb: RESUME unable to " 2407 "reenable PCIe Correctable msg intr.\n"); 2408 2409 /* enable NON FATAL intr mapping */ 2410 if (nf_ino_p) 2411 px_ib_intr_enable(px_p, nf_ino_p->ino_cpuid, nf_ino); 2412 else 2413 cmn_err(CE_WARN, "px_cpr_callb: RESUME unable to " 2414 "reenable PCIe Non Fatal msg intr.\n"); 2415 2416 /* enable FATAL intr mapping */ 2417 if (f_ino_p) 2418 px_ib_intr_enable(px_p, f_ino_p->ino_cpuid, f_ino); 2419 else 2420 cmn_err(CE_WARN, "px_cpr_callb: RESUME unable to " 2421 "reenable PCIe Fatal msg intr.\n"); 2422 2423 mutex_exit(&ib_p->ib_ino_lst_mutex); 2424 2425 /* enable corr/nonfatal/fatal not enable error */ 2426 CSR_XS(csr_base, IMU_ERROR_LOG_ENABLE, (imu_log_enable | 2427 (imu_log_mask & px_imu_log_mask))); 2428 CSR_XS(csr_base, IMU_INTERRUPT_ENABLE, (imu_intr_enable | 2429 (imu_intr_mask & px_imu_intr_mask))); 2430 2431 break; 2432 } 2433 2434 return (B_TRUE); 2435 } 2436 2437 uint64_t 2438 px_get_rng_parent_hi_mask(px_t *px_p) 2439 { 2440 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 2441 uint64_t mask; 2442 2443 switch (PX_CHIP_TYPE(pxu_p)) { 2444 case PX_CHIP_OBERON: 2445 mask = OBERON_RANGE_PROP_MASK; 2446 break; 2447 case PX_CHIP_FIRE: 2448 mask = PX_RANGE_PROP_MASK; 2449 break; 2450 default: 2451 mask = PX_RANGE_PROP_MASK; 2452 } 2453 2454 return (mask); 2455 } 2456 2457 /* 2458 * fetch chip's range propery's value 2459 */ 2460 uint64_t 2461 px_get_range_prop(px_t *px_p, px_ranges_t *rp, int bank) 2462 { 2463 uint64_t mask, range_prop; 2464 2465 mask = px_get_rng_parent_hi_mask(px_p); 2466 range_prop = (((uint64_t)(rp[bank].parent_high & mask)) << 32) | 2467 rp[bank].parent_low; 2468 2469 return (range_prop); 2470 } 2471 2472 /* 2473 * add cpr callback 2474 */ 2475 void 2476 px_cpr_add_callb(px_t *px_p) 2477 { 2478 px_p->px_cprcb_id = callb_add(px_cpr_callb, (void *)px_p, 2479 CB_CL_CPR_POST_USER, "px_cpr"); 2480 } 2481 2482 /* 2483 * remove cpr callback 2484 */ 2485 void 2486 px_cpr_rem_callb(px_t *px_p) 2487 { 2488 (void) callb_delete(px_p->px_cprcb_id); 2489 } 2490 2491 /*ARGSUSED*/ 2492 static uint_t 2493 px_hp_intr(caddr_t arg1, caddr_t arg2) 2494 { 2495 px_t *px_p = (px_t *)arg1; 2496 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 2497 int rval; 2498 2499 rval = pciehpc_intr(px_p->px_dip); 2500 2501 #ifdef DEBUG 2502 if (rval == DDI_INTR_UNCLAIMED) 2503 cmn_err(CE_WARN, "%s%d: UNCLAIMED intr\n", 2504 ddi_driver_name(px_p->px_dip), 2505 ddi_get_instance(px_p->px_dip)); 2506 #endif 2507 2508 /* Set the interrupt state to idle */ 2509 if (px_lib_intr_setstate(px_p->px_dip, 2510 pxu_p->hp_sysino, INTR_IDLE_STATE) != DDI_SUCCESS) 2511 return (DDI_INTR_UNCLAIMED); 2512 2513 return (rval); 2514 } 2515 2516 int 2517 px_lib_hotplug_init(dev_info_t *dip, void *arg) 2518 { 2519 px_t *px_p = DIP_TO_STATE(dip); 2520 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 2521 uint64_t ret; 2522 2523 if ((ret = hvio_hotplug_init(dip, arg)) == DDI_SUCCESS) { 2524 if (px_lib_intr_devino_to_sysino(px_p->px_dip, 2525 px_p->px_inos[PX_INTR_HOTPLUG], &pxu_p->hp_sysino) != 2526 DDI_SUCCESS) { 2527 #ifdef DEBUG 2528 cmn_err(CE_WARN, "%s%d: devino_to_sysino fails\n", 2529 ddi_driver_name(px_p->px_dip), 2530 ddi_get_instance(px_p->px_dip)); 2531 #endif 2532 return (DDI_FAILURE); 2533 } 2534 2535 VERIFY(add_ivintr(pxu_p->hp_sysino, PX_PCIEHP_PIL, 2536 (intrfunc)px_hp_intr, (caddr_t)px_p, NULL, NULL) == 0); 2537 2538 px_ib_intr_enable(px_p, intr_dist_cpuid(), 2539 px_p->px_inos[PX_INTR_HOTPLUG]); 2540 } 2541 2542 return (ret); 2543 } 2544 2545 void 2546 px_lib_hotplug_uninit(dev_info_t *dip) 2547 { 2548 if (hvio_hotplug_uninit(dip) == DDI_SUCCESS) { 2549 px_t *px_p = DIP_TO_STATE(dip); 2550 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 2551 2552 px_ib_intr_disable(px_p->px_ib_p, 2553 px_p->px_inos[PX_INTR_HOTPLUG], IB_INTR_WAIT); 2554 2555 VERIFY(rem_ivintr(pxu_p->hp_sysino, PX_PCIEHP_PIL) == 0); 2556 } 2557 } 2558 2559 /* 2560 * px_hp_intr_redist() - sun4u only, HP interrupt redistribution 2561 */ 2562 void 2563 px_hp_intr_redist(px_t *px_p) 2564 { 2565 if (px_p && (px_p->px_dev_caps & PX_HOTPLUG_CAPABLE)) { 2566 px_ib_intr_dist_en(px_p->px_dip, intr_dist_cpuid(), 2567 px_p->px_inos[PX_INTR_HOTPLUG], B_FALSE); 2568 } 2569 } 2570 2571 boolean_t 2572 px_lib_is_in_drain_state(px_t *px_p) 2573 { 2574 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 2575 caddr_t csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR]; 2576 uint64_t drain_status; 2577 2578 if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) { 2579 drain_status = CSR_BR(csr_base, DRAIN_CONTROL_STATUS, DRAIN); 2580 } else { 2581 drain_status = CSR_BR(csr_base, TLU_STATUS, DRAIN); 2582 } 2583 2584 return (drain_status); 2585 } 2586 2587 pcie_req_id_t 2588 px_lib_get_bdf(px_t *px_p) 2589 { 2590 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 2591 caddr_t csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR]; 2592 pcie_req_id_t bdf; 2593 2594 bdf = CSR_BR(csr_base, DMC_PCI_EXPRESS_CONFIGURATION, REQ_ID); 2595 2596 return (bdf); 2597 } 2598