1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <sys/types.h> 27 #include <sys/kmem.h> 28 #include <sys/conf.h> 29 #include <sys/ddi.h> 30 #include <sys/sunddi.h> 31 #include <sys/sunndi.h> 32 #include <sys/fm/protocol.h> 33 #include <sys/fm/util.h> 34 #include <sys/modctl.h> 35 #include <sys/disp.h> 36 #include <sys/stat.h> 37 #include <sys/ddi_impldefs.h> 38 #include <sys/vmem.h> 39 #include <sys/iommutsb.h> 40 #include <sys/cpuvar.h> 41 #include <sys/ivintr.h> 42 #include <sys/byteorder.h> 43 #include <sys/spl.h> 44 #include <px_obj.h> 45 #include <sys/pcie_pwr.h> 46 #include "px_tools_var.h" 47 #include <px_regs.h> 48 #include <px_csr.h> 49 #include <sys/machsystm.h> 50 #include "px_lib4u.h" 51 #include "px_err.h" 52 #include "oberon_regs.h" 53 #include <sys/hotplug/pci/pcie_hp.h> 54 55 #pragma weak jbus_stst_order 56 57 extern void jbus_stst_order(); 58 59 ulong_t px_mmu_dvma_end = 0xfffffffful; 60 uint_t px_ranges_phi_mask = 0xfffffffful; 61 uint64_t *px_oberon_ubc_scratch_regs; 62 uint64_t px_paddr_mask; 63 64 static int px_goto_l23ready(px_t *px_p); 65 static int px_goto_l0(px_t *px_p); 66 static int px_pre_pwron_check(px_t *px_p); 67 static uint32_t px_identity_init(px_t *px_p); 68 static boolean_t px_cpr_callb(void *arg, int code); 69 static uint_t px_cb_intr(caddr_t arg); 70 71 /* 72 * ACKNAK Latency Threshold Table. 73 * See Fire PRM 2.0 section 1.2.12.2, table 1-17. 74 */ 75 int px_acknak_timer_table[LINK_MAX_PKT_ARR_SIZE][LINK_WIDTH_ARR_SIZE] = { 76 {0xED, 0x49, 0x43, 0x30}, 77 {0x1A0, 0x76, 0x6B, 0x48}, 78 {0x22F, 0x9A, 0x56, 0x56}, 79 {0x42F, 0x11A, 0x96, 0x96}, 80 {0x82F, 0x21A, 0x116, 0x116}, 81 {0x102F, 0x41A, 0x216, 0x216} 82 }; 83 84 /* 85 * TxLink Replay Timer Latency Table 86 * See Fire PRM 2.0 sections 1.2.12.3, table 1-18. 87 */ 88 int px_replay_timer_table[LINK_MAX_PKT_ARR_SIZE][LINK_WIDTH_ARR_SIZE] = { 89 {0x379, 0x112, 0xFC, 0xB4}, 90 {0x618, 0x1BA, 0x192, 0x10E}, 91 {0x831, 0x242, 0x143, 0x143}, 92 {0xFB1, 0x422, 0x233, 0x233}, 93 {0x1EB0, 0x7E1, 0x412, 0x412}, 94 {0x3CB0, 0xF61, 0x7D2, 0x7D2} 95 }; 96 /* 97 * px_lib_map_registers 98 * 99 * This function is called from the attach routine to map the registers 100 * accessed by this driver. 101 * 102 * used by: px_attach() 103 * 104 * return value: DDI_FAILURE on failure 105 */ 106 int 107 px_lib_map_regs(pxu_t *pxu_p, dev_info_t *dip) 108 { 109 ddi_device_acc_attr_t attr; 110 px_reg_bank_t reg_bank = PX_REG_CSR; 111 112 DBG(DBG_ATTACH, dip, "px_lib_map_regs: pxu_p:0x%p, dip 0x%p\n", 113 pxu_p, dip); 114 115 attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 116 attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 117 attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC; 118 119 /* 120 * PCI CSR Base 121 */ 122 if (ddi_regs_map_setup(dip, reg_bank, &pxu_p->px_address[reg_bank], 123 0, 0, &attr, &pxu_p->px_ac[reg_bank]) != DDI_SUCCESS) { 124 goto fail; 125 } 126 127 reg_bank++; 128 129 /* 130 * XBUS CSR Base 131 */ 132 if (ddi_regs_map_setup(dip, reg_bank, &pxu_p->px_address[reg_bank], 133 0, 0, &attr, &pxu_p->px_ac[reg_bank]) != DDI_SUCCESS) { 134 goto fail; 135 } 136 137 pxu_p->px_address[reg_bank] -= FIRE_CONTROL_STATUS; 138 139 done: 140 for (; reg_bank >= PX_REG_CSR; reg_bank--) { 141 DBG(DBG_ATTACH, dip, "reg_bank 0x%x address 0x%p\n", 142 reg_bank, pxu_p->px_address[reg_bank]); 143 } 144 145 return (DDI_SUCCESS); 146 147 fail: 148 cmn_err(CE_WARN, "%s%d: unable to map reg entry %d\n", 149 ddi_driver_name(dip), ddi_get_instance(dip), reg_bank); 150 151 for (reg_bank--; reg_bank >= PX_REG_CSR; reg_bank--) { 152 pxu_p->px_address[reg_bank] = NULL; 153 ddi_regs_map_free(&pxu_p->px_ac[reg_bank]); 154 } 155 156 return (DDI_FAILURE); 157 } 158 159 /* 160 * px_lib_unmap_regs: 161 * 162 * This routine unmaps the registers mapped by map_px_registers. 163 * 164 * used by: px_detach(), and error conditions in px_attach() 165 * 166 * return value: none 167 */ 168 void 169 px_lib_unmap_regs(pxu_t *pxu_p) 170 { 171 int i; 172 173 for (i = 0; i < PX_REG_MAX; i++) { 174 if (pxu_p->px_ac[i]) 175 ddi_regs_map_free(&pxu_p->px_ac[i]); 176 } 177 } 178 179 int 180 px_lib_dev_init(dev_info_t *dip, devhandle_t *dev_hdl) 181 { 182 183 caddr_t xbc_csr_base, csr_base; 184 px_dvma_range_prop_t px_dvma_range; 185 pxu_t *pxu_p; 186 uint8_t chip_mask; 187 px_t *px_p = DIP_TO_STATE(dip); 188 px_chip_type_t chip_type = px_identity_init(px_p); 189 190 DBG(DBG_ATTACH, dip, "px_lib_dev_init: dip 0x%p", dip); 191 192 if (chip_type == PX_CHIP_UNIDENTIFIED) { 193 cmn_err(CE_WARN, "%s%d: Unrecognized Hardware Version\n", 194 NAMEINST(dip)); 195 return (DDI_FAILURE); 196 } 197 198 chip_mask = BITMASK(chip_type); 199 px_paddr_mask = (chip_type == PX_CHIP_FIRE) ? MMU_FIRE_PADDR_MASK : 200 MMU_OBERON_PADDR_MASK; 201 202 /* 203 * Allocate platform specific structure and link it to 204 * the px state structure. 205 */ 206 pxu_p = kmem_zalloc(sizeof (pxu_t), KM_SLEEP); 207 pxu_p->chip_type = chip_type; 208 pxu_p->portid = ddi_getprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 209 "portid", -1); 210 211 /* Map in the registers */ 212 if (px_lib_map_regs(pxu_p, dip) == DDI_FAILURE) { 213 kmem_free(pxu_p, sizeof (pxu_t)); 214 215 return (DDI_FAILURE); 216 } 217 218 xbc_csr_base = (caddr_t)pxu_p->px_address[PX_REG_XBC]; 219 csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR]; 220 221 pxu_p->tsb_cookie = iommu_tsb_alloc(pxu_p->portid); 222 pxu_p->tsb_size = iommu_tsb_cookie_to_size(pxu_p->tsb_cookie); 223 pxu_p->tsb_vaddr = iommu_tsb_cookie_to_va(pxu_p->tsb_cookie); 224 225 pxu_p->tsb_paddr = va_to_pa(pxu_p->tsb_vaddr); 226 227 /* 228 * Create "virtual-dma" property to support child devices 229 * needing to know DVMA range. 230 */ 231 px_dvma_range.dvma_base = (uint32_t)px_mmu_dvma_end + 1 232 - ((pxu_p->tsb_size >> 3) << MMU_PAGE_SHIFT); 233 px_dvma_range.dvma_len = (uint32_t) 234 px_mmu_dvma_end - px_dvma_range.dvma_base + 1; 235 236 (void) ddi_prop_update_int_array(DDI_DEV_T_NONE, dip, 237 "virtual-dma", (int *)&px_dvma_range, 238 sizeof (px_dvma_range_prop_t) / sizeof (int)); 239 /* 240 * Initilize all fire hardware specific blocks. 241 */ 242 hvio_cb_init(xbc_csr_base, pxu_p); 243 hvio_ib_init(csr_base, pxu_p); 244 hvio_pec_init(csr_base, pxu_p); 245 hvio_mmu_init(csr_base, pxu_p); 246 247 px_p->px_plat_p = (void *)pxu_p; 248 249 /* 250 * Initialize all the interrupt handlers 251 */ 252 switch (PX_CHIP_TYPE(pxu_p)) { 253 case PX_CHIP_OBERON: 254 /* 255 * Oberon hotplug uses SPARE3 field in ILU Error Log Enable 256 * register to indicate the status of leaf reset, 257 * we need to preserve the value of this bit, and keep it in 258 * px_ilu_log_mask to reflect the state of the bit 259 */ 260 if (CSR_BR(csr_base, ILU_ERROR_LOG_ENABLE, SPARE3)) 261 px_ilu_log_mask |= (1ull << 262 ILU_ERROR_LOG_ENABLE_SPARE3); 263 else 264 px_ilu_log_mask &= ~(1ull << 265 ILU_ERROR_LOG_ENABLE_SPARE3); 266 267 px_err_reg_setup_pcie(chip_mask, csr_base, PX_ERR_ENABLE); 268 break; 269 270 case PX_CHIP_FIRE: 271 px_err_reg_setup_pcie(chip_mask, csr_base, PX_ERR_ENABLE); 272 break; 273 274 default: 275 cmn_err(CE_WARN, "%s%d: PX primary bus Unknown\n", 276 ddi_driver_name(dip), ddi_get_instance(dip)); 277 return (DDI_FAILURE); 278 } 279 280 /* Initilize device handle */ 281 *dev_hdl = (devhandle_t)csr_base; 282 283 DBG(DBG_ATTACH, dip, "px_lib_dev_init: dev_hdl 0x%llx\n", *dev_hdl); 284 285 return (DDI_SUCCESS); 286 } 287 288 int 289 px_lib_dev_fini(dev_info_t *dip) 290 { 291 caddr_t csr_base; 292 uint8_t chip_mask; 293 px_t *px_p = DIP_TO_STATE(dip); 294 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 295 296 DBG(DBG_DETACH, dip, "px_lib_dev_fini: dip 0x%p\n", dip); 297 298 /* 299 * Deinitialize all the interrupt handlers 300 */ 301 switch (PX_CHIP_TYPE(pxu_p)) { 302 case PX_CHIP_OBERON: 303 case PX_CHIP_FIRE: 304 chip_mask = BITMASK(PX_CHIP_TYPE(pxu_p)); 305 csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR]; 306 px_err_reg_setup_pcie(chip_mask, csr_base, PX_ERR_DISABLE); 307 break; 308 309 default: 310 cmn_err(CE_WARN, "%s%d: PX primary bus Unknown\n", 311 ddi_driver_name(dip), ddi_get_instance(dip)); 312 return (DDI_FAILURE); 313 } 314 315 iommu_tsb_free(pxu_p->tsb_cookie); 316 317 px_lib_unmap_regs((pxu_t *)px_p->px_plat_p); 318 kmem_free(px_p->px_plat_p, sizeof (pxu_t)); 319 px_p->px_plat_p = NULL; 320 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "virtual-dma"); 321 322 return (DDI_SUCCESS); 323 } 324 325 /*ARGSUSED*/ 326 int 327 px_lib_intr_devino_to_sysino(dev_info_t *dip, devino_t devino, 328 sysino_t *sysino) 329 { 330 px_t *px_p = DIP_TO_STATE(dip); 331 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 332 uint64_t ret; 333 334 DBG(DBG_LIB_INT, dip, "px_lib_intr_devino_to_sysino: dip 0x%p " 335 "devino 0x%x\n", dip, devino); 336 337 if ((ret = hvio_intr_devino_to_sysino(DIP_TO_HANDLE(dip), 338 pxu_p, devino, sysino)) != H_EOK) { 339 DBG(DBG_LIB_INT, dip, 340 "hvio_intr_devino_to_sysino failed, ret 0x%lx\n", ret); 341 return (DDI_FAILURE); 342 } 343 344 DBG(DBG_LIB_INT, dip, "px_lib_intr_devino_to_sysino: sysino 0x%llx\n", 345 *sysino); 346 347 return (DDI_SUCCESS); 348 } 349 350 /*ARGSUSED*/ 351 int 352 px_lib_intr_getvalid(dev_info_t *dip, sysino_t sysino, 353 intr_valid_state_t *intr_valid_state) 354 { 355 uint64_t ret; 356 357 DBG(DBG_LIB_INT, dip, "px_lib_intr_getvalid: dip 0x%p sysino 0x%llx\n", 358 dip, sysino); 359 360 if ((ret = hvio_intr_getvalid(DIP_TO_HANDLE(dip), 361 sysino, intr_valid_state)) != H_EOK) { 362 DBG(DBG_LIB_INT, dip, "hvio_intr_getvalid failed, ret 0x%lx\n", 363 ret); 364 return (DDI_FAILURE); 365 } 366 367 DBG(DBG_LIB_INT, dip, "px_lib_intr_getvalid: intr_valid_state 0x%x\n", 368 *intr_valid_state); 369 370 return (DDI_SUCCESS); 371 } 372 373 /*ARGSUSED*/ 374 int 375 px_lib_intr_setvalid(dev_info_t *dip, sysino_t sysino, 376 intr_valid_state_t intr_valid_state) 377 { 378 uint64_t ret; 379 380 DBG(DBG_LIB_INT, dip, "px_lib_intr_setvalid: dip 0x%p sysino 0x%llx " 381 "intr_valid_state 0x%x\n", dip, sysino, intr_valid_state); 382 383 if ((ret = hvio_intr_setvalid(DIP_TO_HANDLE(dip), 384 sysino, intr_valid_state)) != H_EOK) { 385 DBG(DBG_LIB_INT, dip, "hvio_intr_setvalid failed, ret 0x%lx\n", 386 ret); 387 return (DDI_FAILURE); 388 } 389 390 return (DDI_SUCCESS); 391 } 392 393 /*ARGSUSED*/ 394 int 395 px_lib_intr_getstate(dev_info_t *dip, sysino_t sysino, 396 intr_state_t *intr_state) 397 { 398 uint64_t ret; 399 400 DBG(DBG_LIB_INT, dip, "px_lib_intr_getstate: dip 0x%p sysino 0x%llx\n", 401 dip, sysino); 402 403 if ((ret = hvio_intr_getstate(DIP_TO_HANDLE(dip), 404 sysino, intr_state)) != H_EOK) { 405 DBG(DBG_LIB_INT, dip, "hvio_intr_getstate failed, ret 0x%lx\n", 406 ret); 407 return (DDI_FAILURE); 408 } 409 410 DBG(DBG_LIB_INT, dip, "px_lib_intr_getstate: intr_state 0x%x\n", 411 *intr_state); 412 413 return (DDI_SUCCESS); 414 } 415 416 /*ARGSUSED*/ 417 int 418 px_lib_intr_setstate(dev_info_t *dip, sysino_t sysino, 419 intr_state_t intr_state) 420 { 421 uint64_t ret; 422 423 DBG(DBG_LIB_INT, dip, "px_lib_intr_setstate: dip 0x%p sysino 0x%llx " 424 "intr_state 0x%x\n", dip, sysino, intr_state); 425 426 if ((ret = hvio_intr_setstate(DIP_TO_HANDLE(dip), 427 sysino, intr_state)) != H_EOK) { 428 DBG(DBG_LIB_INT, dip, "hvio_intr_setstate failed, ret 0x%lx\n", 429 ret); 430 return (DDI_FAILURE); 431 } 432 433 return (DDI_SUCCESS); 434 } 435 436 /*ARGSUSED*/ 437 int 438 px_lib_intr_gettarget(dev_info_t *dip, sysino_t sysino, cpuid_t *cpuid) 439 { 440 px_t *px_p = DIP_TO_STATE(dip); 441 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 442 uint64_t ret; 443 444 DBG(DBG_LIB_INT, dip, "px_lib_intr_gettarget: dip 0x%p sysino 0x%llx\n", 445 dip, sysino); 446 447 if ((ret = hvio_intr_gettarget(DIP_TO_HANDLE(dip), pxu_p, 448 sysino, cpuid)) != H_EOK) { 449 DBG(DBG_LIB_INT, dip, "hvio_intr_gettarget failed, ret 0x%lx\n", 450 ret); 451 return (DDI_FAILURE); 452 } 453 454 DBG(DBG_LIB_INT, dip, "px_lib_intr_gettarget: cpuid 0x%x\n", cpuid); 455 456 return (DDI_SUCCESS); 457 } 458 459 /*ARGSUSED*/ 460 int 461 px_lib_intr_settarget(dev_info_t *dip, sysino_t sysino, cpuid_t cpuid) 462 { 463 px_t *px_p = DIP_TO_STATE(dip); 464 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 465 uint64_t ret; 466 467 DBG(DBG_LIB_INT, dip, "px_lib_intr_settarget: dip 0x%p sysino 0x%llx " 468 "cpuid 0x%x\n", dip, sysino, cpuid); 469 470 if ((ret = hvio_intr_settarget(DIP_TO_HANDLE(dip), pxu_p, 471 sysino, cpuid)) != H_EOK) { 472 DBG(DBG_LIB_INT, dip, "hvio_intr_settarget failed, ret 0x%lx\n", 473 ret); 474 return (DDI_FAILURE); 475 } 476 477 return (DDI_SUCCESS); 478 } 479 480 /*ARGSUSED*/ 481 int 482 px_lib_intr_reset(dev_info_t *dip) 483 { 484 devino_t ino; 485 sysino_t sysino; 486 487 DBG(DBG_LIB_INT, dip, "px_lib_intr_reset: dip 0x%p\n", dip); 488 489 /* Reset all Interrupts */ 490 for (ino = 0; ino < INTERRUPT_MAPPING_ENTRIES; ino++) { 491 if (px_lib_intr_devino_to_sysino(dip, ino, 492 &sysino) != DDI_SUCCESS) 493 return (BF_FATAL); 494 495 if (px_lib_intr_setstate(dip, sysino, 496 INTR_IDLE_STATE) != DDI_SUCCESS) 497 return (BF_FATAL); 498 } 499 500 return (BF_NONE); 501 } 502 503 /*ARGSUSED*/ 504 int 505 px_lib_iommu_map(dev_info_t *dip, tsbid_t tsbid, pages_t pages, 506 io_attributes_t attr, void *addr, size_t pfn_index, int flags) 507 { 508 px_t *px_p = DIP_TO_STATE(dip); 509 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 510 uint64_t ret; 511 512 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_map: dip 0x%p tsbid 0x%llx " 513 "pages 0x%x attr 0x%llx addr 0x%p pfn_index 0x%llx flags 0x%x\n", 514 dip, tsbid, pages, attr, addr, pfn_index, flags); 515 516 if ((ret = hvio_iommu_map(px_p->px_dev_hdl, pxu_p, tsbid, pages, 517 attr, addr, pfn_index, flags)) != H_EOK) { 518 DBG(DBG_LIB_DMA, dip, 519 "px_lib_iommu_map failed, ret 0x%lx\n", ret); 520 return (DDI_FAILURE); 521 } 522 523 return (DDI_SUCCESS); 524 } 525 526 /*ARGSUSED*/ 527 int 528 px_lib_iommu_demap(dev_info_t *dip, tsbid_t tsbid, pages_t pages) 529 { 530 px_t *px_p = DIP_TO_STATE(dip); 531 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 532 uint64_t ret; 533 534 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_demap: dip 0x%p tsbid 0x%llx " 535 "pages 0x%x\n", dip, tsbid, pages); 536 537 if ((ret = hvio_iommu_demap(px_p->px_dev_hdl, pxu_p, tsbid, pages)) 538 != H_EOK) { 539 DBG(DBG_LIB_DMA, dip, 540 "px_lib_iommu_demap failed, ret 0x%lx\n", ret); 541 542 return (DDI_FAILURE); 543 } 544 545 return (DDI_SUCCESS); 546 } 547 548 /*ARGSUSED*/ 549 int 550 px_lib_iommu_getmap(dev_info_t *dip, tsbid_t tsbid, io_attributes_t *attr_p, 551 r_addr_t *r_addr_p) 552 { 553 px_t *px_p = DIP_TO_STATE(dip); 554 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 555 uint64_t ret; 556 557 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getmap: dip 0x%p tsbid 0x%llx\n", 558 dip, tsbid); 559 560 if ((ret = hvio_iommu_getmap(DIP_TO_HANDLE(dip), pxu_p, tsbid, 561 attr_p, r_addr_p)) != H_EOK) { 562 DBG(DBG_LIB_DMA, dip, 563 "hvio_iommu_getmap failed, ret 0x%lx\n", ret); 564 565 return ((ret == H_ENOMAP) ? DDI_DMA_NOMAPPING:DDI_FAILURE); 566 } 567 568 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getmap: attr 0x%llx " 569 "r_addr 0x%llx\n", *attr_p, *r_addr_p); 570 571 return (DDI_SUCCESS); 572 } 573 574 575 /* 576 * Checks dma attributes against system bypass ranges 577 * The bypass range is determined by the hardware. Return them so the 578 * common code can do generic checking against them. 579 */ 580 /*ARGSUSED*/ 581 int 582 px_lib_dma_bypass_rngchk(dev_info_t *dip, ddi_dma_attr_t *attr_p, 583 uint64_t *lo_p, uint64_t *hi_p) 584 { 585 px_t *px_p = DIP_TO_STATE(dip); 586 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 587 588 *lo_p = hvio_get_bypass_base(pxu_p); 589 *hi_p = hvio_get_bypass_end(pxu_p); 590 591 return (DDI_SUCCESS); 592 } 593 594 595 /*ARGSUSED*/ 596 int 597 px_lib_iommu_getbypass(dev_info_t *dip, r_addr_t ra, io_attributes_t attr, 598 io_addr_t *io_addr_p) 599 { 600 uint64_t ret; 601 px_t *px_p = DIP_TO_STATE(dip); 602 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 603 604 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getbypass: dip 0x%p ra 0x%llx " 605 "attr 0x%llx\n", dip, ra, attr); 606 607 if ((ret = hvio_iommu_getbypass(DIP_TO_HANDLE(dip), pxu_p, ra, 608 attr, io_addr_p)) != H_EOK) { 609 DBG(DBG_LIB_DMA, dip, 610 "hvio_iommu_getbypass failed, ret 0x%lx\n", ret); 611 return (DDI_FAILURE); 612 } 613 614 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getbypass: io_addr 0x%llx\n", 615 *io_addr_p); 616 617 return (DDI_SUCCESS); 618 } 619 620 /* 621 * Returns any needed IO address bit(s) for relaxed ordering in IOMMU 622 * bypass mode. 623 */ 624 uint64_t 625 px_lib_ro_bypass(dev_info_t *dip, io_attributes_t attr, uint64_t ioaddr) 626 { 627 px_t *px_p = DIP_TO_STATE(dip); 628 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 629 630 if ((PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) && (attr & PCI_MAP_ATTR_RO)) 631 return (MMU_OBERON_BYPASS_RO | ioaddr); 632 else 633 return (ioaddr); 634 } 635 636 /* 637 * bus dma sync entry point. 638 */ 639 /*ARGSUSED*/ 640 int 641 px_lib_dma_sync(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle, 642 off_t off, size_t len, uint_t cache_flags) 643 { 644 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 645 px_t *px_p = DIP_TO_STATE(dip); 646 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 647 648 DBG(DBG_LIB_DMA, dip, "px_lib_dma_sync: dip 0x%p rdip 0x%p " 649 "handle 0x%llx off 0x%x len 0x%x flags 0x%x\n", 650 dip, rdip, handle, off, len, cache_flags); 651 652 /* 653 * No flush needed for Oberon 654 */ 655 if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) 656 return (DDI_SUCCESS); 657 658 /* 659 * jbus_stst_order is found only in certain cpu modules. 660 * Just return success if not present. 661 */ 662 if (&jbus_stst_order == NULL) 663 return (DDI_SUCCESS); 664 665 if (!(mp->dmai_flags & PX_DMAI_FLAGS_INUSE)) { 666 cmn_err(CE_WARN, "%s%d: Unbound dma handle %p.", 667 ddi_driver_name(rdip), ddi_get_instance(rdip), (void *)mp); 668 669 return (DDI_FAILURE); 670 } 671 672 if (mp->dmai_flags & PX_DMAI_FLAGS_NOSYNC) 673 return (DDI_SUCCESS); 674 675 /* 676 * No flush needed when sending data from memory to device. 677 * Nothing to do to "sync" memory to what device would already see. 678 */ 679 if (!(mp->dmai_rflags & DDI_DMA_READ) || 680 ((cache_flags & PX_DMA_SYNC_DDI_FLAGS) == DDI_DMA_SYNC_FORDEV)) 681 return (DDI_SUCCESS); 682 683 /* 684 * Perform necessary cpu workaround to ensure jbus ordering. 685 * CPU's internal "invalidate FIFOs" are flushed. 686 */ 687 688 #if !defined(lint) 689 kpreempt_disable(); 690 #endif 691 jbus_stst_order(); 692 #if !defined(lint) 693 kpreempt_enable(); 694 #endif 695 return (DDI_SUCCESS); 696 } 697 698 /* 699 * MSIQ Functions: 700 */ 701 /*ARGSUSED*/ 702 int 703 px_lib_msiq_init(dev_info_t *dip) 704 { 705 px_t *px_p = DIP_TO_STATE(dip); 706 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 707 px_msiq_state_t *msiq_state_p = &px_p->px_ib_p->ib_msiq_state; 708 px_dvma_addr_t pg_index; 709 size_t q_sz = msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t); 710 size_t size; 711 int i, ret; 712 713 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_init: dip 0x%p\n", dip); 714 715 /* must aligned on q_sz (happens to be !!! page) boundary */ 716 ASSERT(q_sz == 8 * 1024); 717 718 /* 719 * Map the EQ memory into the Fire MMU (has to be 512KB aligned) 720 * and then initialize the base address register. 721 * 722 * Allocate entries from Fire IOMMU so that the resulting address 723 * is properly aligned. Calculate the index of the first allocated 724 * entry. Note: The size of the mapping is assumed to be a multiple 725 * of the page size. 726 */ 727 size = msiq_state_p->msiq_cnt * q_sz; 728 729 msiq_state_p->msiq_buf_p = kmem_zalloc(size, KM_SLEEP); 730 731 for (i = 0; i < msiq_state_p->msiq_cnt; i++) 732 msiq_state_p->msiq_p[i].msiq_base_p = (msiqhead_t *) 733 ((caddr_t)msiq_state_p->msiq_buf_p + (i * q_sz)); 734 735 pxu_p->msiq_mapped_p = vmem_xalloc(px_p->px_mmu_p->mmu_dvma_map, 736 size, (512 * 1024), 0, 0, NULL, NULL, VM_NOSLEEP | VM_BESTFIT); 737 738 if (pxu_p->msiq_mapped_p == NULL) 739 return (DDI_FAILURE); 740 741 pg_index = MMU_PAGE_INDEX(px_p->px_mmu_p, 742 MMU_BTOP((ulong_t)pxu_p->msiq_mapped_p)); 743 744 if ((ret = px_lib_iommu_map(px_p->px_dip, PCI_TSBID(0, pg_index), 745 MMU_BTOP(size), PCI_MAP_ATTR_WRITE, msiq_state_p->msiq_buf_p, 746 0, MMU_MAP_BUF)) != DDI_SUCCESS) { 747 DBG(DBG_LIB_MSIQ, dip, 748 "px_lib_msiq_init: px_lib_iommu_map failed, " 749 "ret 0x%lx\n", ret); 750 751 (void) px_lib_msiq_fini(dip); 752 return (DDI_FAILURE); 753 } 754 755 if ((ret = hvio_msiq_init(DIP_TO_HANDLE(dip), 756 pxu_p)) != H_EOK) { 757 DBG(DBG_LIB_MSIQ, dip, 758 "hvio_msiq_init failed, ret 0x%lx\n", ret); 759 760 (void) px_lib_msiq_fini(dip); 761 return (DDI_FAILURE); 762 } 763 764 return (DDI_SUCCESS); 765 } 766 767 /*ARGSUSED*/ 768 int 769 px_lib_msiq_fini(dev_info_t *dip) 770 { 771 px_t *px_p = DIP_TO_STATE(dip); 772 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 773 px_msiq_state_t *msiq_state_p = &px_p->px_ib_p->ib_msiq_state; 774 px_dvma_addr_t pg_index; 775 size_t size; 776 777 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_fini: dip 0x%p\n", dip); 778 779 /* 780 * Unmap and free the EQ memory that had been mapped 781 * into the Fire IOMMU. 782 */ 783 size = msiq_state_p->msiq_cnt * 784 msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t); 785 786 pg_index = MMU_PAGE_INDEX(px_p->px_mmu_p, 787 MMU_BTOP((ulong_t)pxu_p->msiq_mapped_p)); 788 789 (void) px_lib_iommu_demap(px_p->px_dip, 790 PCI_TSBID(0, pg_index), MMU_BTOP(size)); 791 792 /* Free the entries from the Fire MMU */ 793 vmem_xfree(px_p->px_mmu_p->mmu_dvma_map, 794 (void *)pxu_p->msiq_mapped_p, size); 795 796 kmem_free(msiq_state_p->msiq_buf_p, msiq_state_p->msiq_cnt * 797 msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t)); 798 799 return (DDI_SUCCESS); 800 } 801 802 /*ARGSUSED*/ 803 int 804 px_lib_msiq_info(dev_info_t *dip, msiqid_t msiq_id, r_addr_t *ra_p, 805 uint_t *msiq_rec_cnt_p) 806 { 807 px_t *px_p = DIP_TO_STATE(dip); 808 px_msiq_state_t *msiq_state_p = &px_p->px_ib_p->ib_msiq_state; 809 size_t msiq_size; 810 811 DBG(DBG_LIB_MSIQ, dip, "px_msiq_info: dip 0x%p msiq_id 0x%x\n", 812 dip, msiq_id); 813 814 msiq_size = msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t); 815 ra_p = (r_addr_t *)((caddr_t)msiq_state_p->msiq_buf_p + 816 (msiq_id * msiq_size)); 817 818 *msiq_rec_cnt_p = msiq_state_p->msiq_rec_cnt; 819 820 DBG(DBG_LIB_MSIQ, dip, "px_msiq_info: ra_p 0x%p msiq_rec_cnt 0x%x\n", 821 ra_p, *msiq_rec_cnt_p); 822 823 return (DDI_SUCCESS); 824 } 825 826 /*ARGSUSED*/ 827 int 828 px_lib_msiq_getvalid(dev_info_t *dip, msiqid_t msiq_id, 829 pci_msiq_valid_state_t *msiq_valid_state) 830 { 831 uint64_t ret; 832 833 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getvalid: dip 0x%p msiq_id 0x%x\n", 834 dip, msiq_id); 835 836 if ((ret = hvio_msiq_getvalid(DIP_TO_HANDLE(dip), 837 msiq_id, msiq_valid_state)) != H_EOK) { 838 DBG(DBG_LIB_MSIQ, dip, 839 "hvio_msiq_getvalid failed, ret 0x%lx\n", ret); 840 return (DDI_FAILURE); 841 } 842 843 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getvalid: msiq_valid_state 0x%x\n", 844 *msiq_valid_state); 845 846 return (DDI_SUCCESS); 847 } 848 849 /*ARGSUSED*/ 850 int 851 px_lib_msiq_setvalid(dev_info_t *dip, msiqid_t msiq_id, 852 pci_msiq_valid_state_t msiq_valid_state) 853 { 854 uint64_t ret; 855 856 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_setvalid: dip 0x%p msiq_id 0x%x " 857 "msiq_valid_state 0x%x\n", dip, msiq_id, msiq_valid_state); 858 859 if ((ret = hvio_msiq_setvalid(DIP_TO_HANDLE(dip), 860 msiq_id, msiq_valid_state)) != H_EOK) { 861 DBG(DBG_LIB_MSIQ, dip, 862 "hvio_msiq_setvalid failed, ret 0x%lx\n", ret); 863 return (DDI_FAILURE); 864 } 865 866 return (DDI_SUCCESS); 867 } 868 869 /*ARGSUSED*/ 870 int 871 px_lib_msiq_getstate(dev_info_t *dip, msiqid_t msiq_id, 872 pci_msiq_state_t *msiq_state) 873 { 874 uint64_t ret; 875 876 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getstate: dip 0x%p msiq_id 0x%x\n", 877 dip, msiq_id); 878 879 if ((ret = hvio_msiq_getstate(DIP_TO_HANDLE(dip), 880 msiq_id, msiq_state)) != H_EOK) { 881 DBG(DBG_LIB_MSIQ, dip, 882 "hvio_msiq_getstate failed, ret 0x%lx\n", ret); 883 return (DDI_FAILURE); 884 } 885 886 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getstate: msiq_state 0x%x\n", 887 *msiq_state); 888 889 return (DDI_SUCCESS); 890 } 891 892 /*ARGSUSED*/ 893 int 894 px_lib_msiq_setstate(dev_info_t *dip, msiqid_t msiq_id, 895 pci_msiq_state_t msiq_state) 896 { 897 uint64_t ret; 898 899 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_setstate: dip 0x%p msiq_id 0x%x " 900 "msiq_state 0x%x\n", dip, msiq_id, msiq_state); 901 902 if ((ret = hvio_msiq_setstate(DIP_TO_HANDLE(dip), 903 msiq_id, msiq_state)) != H_EOK) { 904 DBG(DBG_LIB_MSIQ, dip, 905 "hvio_msiq_setstate failed, ret 0x%lx\n", ret); 906 return (DDI_FAILURE); 907 } 908 909 return (DDI_SUCCESS); 910 } 911 912 /*ARGSUSED*/ 913 int 914 px_lib_msiq_gethead(dev_info_t *dip, msiqid_t msiq_id, 915 msiqhead_t *msiq_head) 916 { 917 uint64_t ret; 918 919 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gethead: dip 0x%p msiq_id 0x%x\n", 920 dip, msiq_id); 921 922 if ((ret = hvio_msiq_gethead(DIP_TO_HANDLE(dip), 923 msiq_id, msiq_head)) != H_EOK) { 924 DBG(DBG_LIB_MSIQ, dip, 925 "hvio_msiq_gethead failed, ret 0x%lx\n", ret); 926 return (DDI_FAILURE); 927 } 928 929 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gethead: msiq_head 0x%x\n", 930 *msiq_head); 931 932 return (DDI_SUCCESS); 933 } 934 935 /*ARGSUSED*/ 936 int 937 px_lib_msiq_sethead(dev_info_t *dip, msiqid_t msiq_id, 938 msiqhead_t msiq_head) 939 { 940 uint64_t ret; 941 942 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_sethead: dip 0x%p msiq_id 0x%x " 943 "msiq_head 0x%x\n", dip, msiq_id, msiq_head); 944 945 if ((ret = hvio_msiq_sethead(DIP_TO_HANDLE(dip), 946 msiq_id, msiq_head)) != H_EOK) { 947 DBG(DBG_LIB_MSIQ, dip, 948 "hvio_msiq_sethead failed, ret 0x%lx\n", ret); 949 return (DDI_FAILURE); 950 } 951 952 return (DDI_SUCCESS); 953 } 954 955 /*ARGSUSED*/ 956 int 957 px_lib_msiq_gettail(dev_info_t *dip, msiqid_t msiq_id, 958 msiqtail_t *msiq_tail) 959 { 960 uint64_t ret; 961 962 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gettail: dip 0x%p msiq_id 0x%x\n", 963 dip, msiq_id); 964 965 if ((ret = hvio_msiq_gettail(DIP_TO_HANDLE(dip), 966 msiq_id, msiq_tail)) != H_EOK) { 967 DBG(DBG_LIB_MSIQ, dip, 968 "hvio_msiq_gettail failed, ret 0x%lx\n", ret); 969 return (DDI_FAILURE); 970 } 971 972 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gettail: msiq_tail 0x%x\n", 973 *msiq_tail); 974 975 return (DDI_SUCCESS); 976 } 977 978 /*ARGSUSED*/ 979 void 980 px_lib_get_msiq_rec(dev_info_t *dip, msiqhead_t *msiq_head_p, 981 msiq_rec_t *msiq_rec_p) 982 { 983 eq_rec_t *eq_rec_p = (eq_rec_t *)msiq_head_p; 984 985 DBG(DBG_LIB_MSIQ, dip, "px_lib_get_msiq_rec: dip 0x%p eq_rec_p 0x%p\n", 986 dip, eq_rec_p); 987 988 if (!eq_rec_p->eq_rec_fmt_type) { 989 /* Set msiq_rec_type to zero */ 990 msiq_rec_p->msiq_rec_type = 0; 991 992 return; 993 } 994 995 DBG(DBG_LIB_MSIQ, dip, "px_lib_get_msiq_rec: EQ RECORD, " 996 "eq_rec_rid 0x%llx eq_rec_fmt_type 0x%llx " 997 "eq_rec_len 0x%llx eq_rec_addr0 0x%llx " 998 "eq_rec_addr1 0x%llx eq_rec_data0 0x%llx " 999 "eq_rec_data1 0x%llx\n", eq_rec_p->eq_rec_rid, 1000 eq_rec_p->eq_rec_fmt_type, eq_rec_p->eq_rec_len, 1001 eq_rec_p->eq_rec_addr0, eq_rec_p->eq_rec_addr1, 1002 eq_rec_p->eq_rec_data0, eq_rec_p->eq_rec_data1); 1003 1004 /* 1005 * Only upper 4 bits of eq_rec_fmt_type is used 1006 * to identify the EQ record type. 1007 */ 1008 switch (eq_rec_p->eq_rec_fmt_type >> 3) { 1009 case EQ_REC_MSI32: 1010 msiq_rec_p->msiq_rec_type = MSI32_REC; 1011 1012 msiq_rec_p->msiq_rec_data.msi.msi_data = 1013 eq_rec_p->eq_rec_data0; 1014 break; 1015 case EQ_REC_MSI64: 1016 msiq_rec_p->msiq_rec_type = MSI64_REC; 1017 1018 msiq_rec_p->msiq_rec_data.msi.msi_data = 1019 eq_rec_p->eq_rec_data0; 1020 break; 1021 case EQ_REC_MSG: 1022 msiq_rec_p->msiq_rec_type = MSG_REC; 1023 1024 msiq_rec_p->msiq_rec_data.msg.msg_route = 1025 eq_rec_p->eq_rec_fmt_type & 7; 1026 msiq_rec_p->msiq_rec_data.msg.msg_targ = eq_rec_p->eq_rec_rid; 1027 msiq_rec_p->msiq_rec_data.msg.msg_code = eq_rec_p->eq_rec_data0; 1028 break; 1029 default: 1030 cmn_err(CE_WARN, "%s%d: px_lib_get_msiq_rec: " 1031 "0x%x is an unknown EQ record type", 1032 ddi_driver_name(dip), ddi_get_instance(dip), 1033 (int)eq_rec_p->eq_rec_fmt_type); 1034 break; 1035 } 1036 1037 msiq_rec_p->msiq_rec_rid = eq_rec_p->eq_rec_rid; 1038 msiq_rec_p->msiq_rec_msi_addr = ((eq_rec_p->eq_rec_addr1 << 16) | 1039 (eq_rec_p->eq_rec_addr0 << 2)); 1040 } 1041 1042 /*ARGSUSED*/ 1043 void 1044 px_lib_clr_msiq_rec(dev_info_t *dip, msiqhead_t *msiq_head_p) 1045 { 1046 eq_rec_t *eq_rec_p = (eq_rec_t *)msiq_head_p; 1047 1048 DBG(DBG_LIB_MSIQ, dip, "px_lib_clr_msiq_rec: dip 0x%p eq_rec_p 0x%p\n", 1049 dip, eq_rec_p); 1050 1051 if (eq_rec_p->eq_rec_fmt_type) { 1052 /* Zero out eq_rec_fmt_type field */ 1053 eq_rec_p->eq_rec_fmt_type = 0; 1054 } 1055 } 1056 1057 /* 1058 * MSI Functions: 1059 */ 1060 /*ARGSUSED*/ 1061 int 1062 px_lib_msi_init(dev_info_t *dip) 1063 { 1064 px_t *px_p = DIP_TO_STATE(dip); 1065 px_msi_state_t *msi_state_p = &px_p->px_ib_p->ib_msi_state; 1066 uint64_t ret; 1067 1068 DBG(DBG_LIB_MSI, dip, "px_lib_msi_init: dip 0x%p\n", dip); 1069 1070 if ((ret = hvio_msi_init(DIP_TO_HANDLE(dip), 1071 msi_state_p->msi_addr32, msi_state_p->msi_addr64)) != H_EOK) { 1072 DBG(DBG_LIB_MSIQ, dip, "px_lib_msi_init failed, ret 0x%lx\n", 1073 ret); 1074 return (DDI_FAILURE); 1075 } 1076 1077 return (DDI_SUCCESS); 1078 } 1079 1080 /*ARGSUSED*/ 1081 int 1082 px_lib_msi_getmsiq(dev_info_t *dip, msinum_t msi_num, 1083 msiqid_t *msiq_id) 1084 { 1085 uint64_t ret; 1086 1087 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getmsiq: dip 0x%p msi_num 0x%x\n", 1088 dip, msi_num); 1089 1090 if ((ret = hvio_msi_getmsiq(DIP_TO_HANDLE(dip), 1091 msi_num, msiq_id)) != H_EOK) { 1092 DBG(DBG_LIB_MSI, dip, 1093 "hvio_msi_getmsiq failed, ret 0x%lx\n", ret); 1094 return (DDI_FAILURE); 1095 } 1096 1097 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getmsiq: msiq_id 0x%x\n", 1098 *msiq_id); 1099 1100 return (DDI_SUCCESS); 1101 } 1102 1103 /*ARGSUSED*/ 1104 int 1105 px_lib_msi_setmsiq(dev_info_t *dip, msinum_t msi_num, 1106 msiqid_t msiq_id, msi_type_t msitype) 1107 { 1108 uint64_t ret; 1109 1110 DBG(DBG_LIB_MSI, dip, "px_lib_msi_setmsiq: dip 0x%p msi_num 0x%x " 1111 "msq_id 0x%x\n", dip, msi_num, msiq_id); 1112 1113 if ((ret = hvio_msi_setmsiq(DIP_TO_HANDLE(dip), 1114 msi_num, msiq_id)) != H_EOK) { 1115 DBG(DBG_LIB_MSI, dip, 1116 "hvio_msi_setmsiq failed, ret 0x%lx\n", ret); 1117 return (DDI_FAILURE); 1118 } 1119 1120 return (DDI_SUCCESS); 1121 } 1122 1123 /*ARGSUSED*/ 1124 int 1125 px_lib_msi_getvalid(dev_info_t *dip, msinum_t msi_num, 1126 pci_msi_valid_state_t *msi_valid_state) 1127 { 1128 uint64_t ret; 1129 1130 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getvalid: dip 0x%p msi_num 0x%x\n", 1131 dip, msi_num); 1132 1133 if ((ret = hvio_msi_getvalid(DIP_TO_HANDLE(dip), 1134 msi_num, msi_valid_state)) != H_EOK) { 1135 DBG(DBG_LIB_MSI, dip, 1136 "hvio_msi_getvalid failed, ret 0x%lx\n", ret); 1137 return (DDI_FAILURE); 1138 } 1139 1140 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getvalid: msiq_id 0x%x\n", 1141 *msi_valid_state); 1142 1143 return (DDI_SUCCESS); 1144 } 1145 1146 /*ARGSUSED*/ 1147 int 1148 px_lib_msi_setvalid(dev_info_t *dip, msinum_t msi_num, 1149 pci_msi_valid_state_t msi_valid_state) 1150 { 1151 uint64_t ret; 1152 1153 DBG(DBG_LIB_MSI, dip, "px_lib_msi_setvalid: dip 0x%p msi_num 0x%x " 1154 "msi_valid_state 0x%x\n", dip, msi_num, msi_valid_state); 1155 1156 if ((ret = hvio_msi_setvalid(DIP_TO_HANDLE(dip), 1157 msi_num, msi_valid_state)) != H_EOK) { 1158 DBG(DBG_LIB_MSI, dip, 1159 "hvio_msi_setvalid failed, ret 0x%lx\n", ret); 1160 return (DDI_FAILURE); 1161 } 1162 1163 return (DDI_SUCCESS); 1164 } 1165 1166 /*ARGSUSED*/ 1167 int 1168 px_lib_msi_getstate(dev_info_t *dip, msinum_t msi_num, 1169 pci_msi_state_t *msi_state) 1170 { 1171 uint64_t ret; 1172 1173 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getstate: dip 0x%p msi_num 0x%x\n", 1174 dip, msi_num); 1175 1176 if ((ret = hvio_msi_getstate(DIP_TO_HANDLE(dip), 1177 msi_num, msi_state)) != H_EOK) { 1178 DBG(DBG_LIB_MSI, dip, 1179 "hvio_msi_getstate failed, ret 0x%lx\n", ret); 1180 return (DDI_FAILURE); 1181 } 1182 1183 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getstate: msi_state 0x%x\n", 1184 *msi_state); 1185 1186 return (DDI_SUCCESS); 1187 } 1188 1189 /*ARGSUSED*/ 1190 int 1191 px_lib_msi_setstate(dev_info_t *dip, msinum_t msi_num, 1192 pci_msi_state_t msi_state) 1193 { 1194 uint64_t ret; 1195 1196 DBG(DBG_LIB_MSI, dip, "px_lib_msi_setstate: dip 0x%p msi_num 0x%x " 1197 "msi_state 0x%x\n", dip, msi_num, msi_state); 1198 1199 if ((ret = hvio_msi_setstate(DIP_TO_HANDLE(dip), 1200 msi_num, msi_state)) != H_EOK) { 1201 DBG(DBG_LIB_MSI, dip, 1202 "hvio_msi_setstate failed, ret 0x%lx\n", ret); 1203 return (DDI_FAILURE); 1204 } 1205 1206 return (DDI_SUCCESS); 1207 } 1208 1209 /* 1210 * MSG Functions: 1211 */ 1212 /*ARGSUSED*/ 1213 int 1214 px_lib_msg_getmsiq(dev_info_t *dip, pcie_msg_type_t msg_type, 1215 msiqid_t *msiq_id) 1216 { 1217 uint64_t ret; 1218 1219 DBG(DBG_LIB_MSG, dip, "px_lib_msg_getmsiq: dip 0x%p msg_type 0x%x\n", 1220 dip, msg_type); 1221 1222 if ((ret = hvio_msg_getmsiq(DIP_TO_HANDLE(dip), 1223 msg_type, msiq_id)) != H_EOK) { 1224 DBG(DBG_LIB_MSG, dip, 1225 "hvio_msg_getmsiq failed, ret 0x%lx\n", ret); 1226 return (DDI_FAILURE); 1227 } 1228 1229 DBG(DBG_LIB_MSI, dip, "px_lib_msg_getmsiq: msiq_id 0x%x\n", 1230 *msiq_id); 1231 1232 return (DDI_SUCCESS); 1233 } 1234 1235 /*ARGSUSED*/ 1236 int 1237 px_lib_msg_setmsiq(dev_info_t *dip, pcie_msg_type_t msg_type, 1238 msiqid_t msiq_id) 1239 { 1240 uint64_t ret; 1241 1242 DBG(DBG_LIB_MSG, dip, "px_lib_msi_setstate: dip 0x%p msg_type 0x%x " 1243 "msiq_id 0x%x\n", dip, msg_type, msiq_id); 1244 1245 if ((ret = hvio_msg_setmsiq(DIP_TO_HANDLE(dip), 1246 msg_type, msiq_id)) != H_EOK) { 1247 DBG(DBG_LIB_MSG, dip, 1248 "hvio_msg_setmsiq failed, ret 0x%lx\n", ret); 1249 return (DDI_FAILURE); 1250 } 1251 1252 return (DDI_SUCCESS); 1253 } 1254 1255 /*ARGSUSED*/ 1256 int 1257 px_lib_msg_getvalid(dev_info_t *dip, pcie_msg_type_t msg_type, 1258 pcie_msg_valid_state_t *msg_valid_state) 1259 { 1260 uint64_t ret; 1261 1262 DBG(DBG_LIB_MSG, dip, "px_lib_msg_getvalid: dip 0x%p msg_type 0x%x\n", 1263 dip, msg_type); 1264 1265 if ((ret = hvio_msg_getvalid(DIP_TO_HANDLE(dip), msg_type, 1266 msg_valid_state)) != H_EOK) { 1267 DBG(DBG_LIB_MSG, dip, 1268 "hvio_msg_getvalid failed, ret 0x%lx\n", ret); 1269 return (DDI_FAILURE); 1270 } 1271 1272 DBG(DBG_LIB_MSI, dip, "px_lib_msg_getvalid: msg_valid_state 0x%x\n", 1273 *msg_valid_state); 1274 1275 return (DDI_SUCCESS); 1276 } 1277 1278 /*ARGSUSED*/ 1279 int 1280 px_lib_msg_setvalid(dev_info_t *dip, pcie_msg_type_t msg_type, 1281 pcie_msg_valid_state_t msg_valid_state) 1282 { 1283 uint64_t ret; 1284 1285 DBG(DBG_LIB_MSG, dip, "px_lib_msg_setvalid: dip 0x%p msg_type 0x%x " 1286 "msg_valid_state 0x%x\n", dip, msg_type, msg_valid_state); 1287 1288 if ((ret = hvio_msg_setvalid(DIP_TO_HANDLE(dip), msg_type, 1289 msg_valid_state)) != H_EOK) { 1290 DBG(DBG_LIB_MSG, dip, 1291 "hvio_msg_setvalid failed, ret 0x%lx\n", ret); 1292 return (DDI_FAILURE); 1293 } 1294 1295 return (DDI_SUCCESS); 1296 } 1297 1298 /* 1299 * Suspend/Resume Functions: 1300 * Currently unsupported by hypervisor 1301 */ 1302 int 1303 px_lib_suspend(dev_info_t *dip) 1304 { 1305 px_t *px_p = DIP_TO_STATE(dip); 1306 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 1307 px_cb_t *cb_p = PX2CB(px_p); 1308 devhandle_t dev_hdl, xbus_dev_hdl; 1309 uint64_t ret = H_EOK; 1310 1311 DBG(DBG_DETACH, dip, "px_lib_suspend: dip 0x%p\n", dip); 1312 1313 dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_CSR]; 1314 xbus_dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_XBC]; 1315 1316 if ((ret = hvio_suspend(dev_hdl, pxu_p)) != H_EOK) 1317 goto fail; 1318 1319 if (--cb_p->attachcnt == 0) { 1320 ret = hvio_cb_suspend(xbus_dev_hdl, pxu_p); 1321 if (ret != H_EOK) 1322 cb_p->attachcnt++; 1323 } 1324 pxu_p->cpr_flag = PX_ENTERED_CPR; 1325 1326 fail: 1327 return ((ret != H_EOK) ? DDI_FAILURE: DDI_SUCCESS); 1328 } 1329 1330 void 1331 px_lib_resume(dev_info_t *dip) 1332 { 1333 px_t *px_p = DIP_TO_STATE(dip); 1334 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 1335 px_cb_t *cb_p = PX2CB(px_p); 1336 devhandle_t dev_hdl, xbus_dev_hdl; 1337 devino_t pec_ino = px_p->px_inos[PX_INTR_PEC]; 1338 devino_t xbc_ino = px_p->px_inos[PX_INTR_XBC]; 1339 1340 DBG(DBG_ATTACH, dip, "px_lib_resume: dip 0x%p\n", dip); 1341 1342 dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_CSR]; 1343 xbus_dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_XBC]; 1344 1345 if (++cb_p->attachcnt == 1) 1346 hvio_cb_resume(dev_hdl, xbus_dev_hdl, xbc_ino, pxu_p); 1347 1348 hvio_resume(dev_hdl, pec_ino, pxu_p); 1349 } 1350 1351 /* 1352 * Generate a unique Oberon UBC ID based on the Logicial System Board and 1353 * the IO Channel from the portid property field. 1354 */ 1355 static uint64_t 1356 oberon_get_ubc_id(dev_info_t *dip) 1357 { 1358 px_t *px_p = DIP_TO_STATE(dip); 1359 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 1360 uint64_t ubc_id; 1361 1362 /* 1363 * Generate a unique 6 bit UBC ID using the 2 IO_Channel#[1:0] bits and 1364 * the 4 LSB_ID[3:0] bits from the Oberon's portid property. 1365 */ 1366 ubc_id = (((pxu_p->portid >> OBERON_PORT_ID_IOC) & 1367 OBERON_PORT_ID_IOC_MASK) | (((pxu_p->portid >> 1368 OBERON_PORT_ID_LSB) & OBERON_PORT_ID_LSB_MASK) 1369 << OBERON_UBC_ID_LSB)); 1370 1371 return (ubc_id); 1372 } 1373 1374 /* 1375 * Oberon does not have a UBC scratch register, so alloc an array of scratch 1376 * registers when needed and use a unique UBC ID as an index. This code 1377 * can be simplified if we use a pre-allocated array. They are currently 1378 * being dynamically allocated because it's only needed by the Oberon. 1379 */ 1380 static void 1381 oberon_set_cb(dev_info_t *dip, uint64_t val) 1382 { 1383 uint64_t ubc_id; 1384 1385 if (px_oberon_ubc_scratch_regs == NULL) 1386 px_oberon_ubc_scratch_regs = 1387 (uint64_t *)kmem_zalloc(sizeof (uint64_t)* 1388 OBERON_UBC_ID_MAX, KM_SLEEP); 1389 1390 ubc_id = oberon_get_ubc_id(dip); 1391 1392 px_oberon_ubc_scratch_regs[ubc_id] = val; 1393 1394 /* 1395 * Check if any scratch registers are still in use. If all scratch 1396 * registers are currently set to zero, then deallocate the scratch 1397 * register array. 1398 */ 1399 for (ubc_id = 0; ubc_id < OBERON_UBC_ID_MAX; ubc_id++) { 1400 if (px_oberon_ubc_scratch_regs[ubc_id] != NULL) 1401 return; 1402 } 1403 1404 /* 1405 * All scratch registers are set to zero so deallocate the scratch 1406 * register array and set the pointer to NULL. 1407 */ 1408 kmem_free(px_oberon_ubc_scratch_regs, 1409 (sizeof (uint64_t)*OBERON_UBC_ID_MAX)); 1410 1411 px_oberon_ubc_scratch_regs = NULL; 1412 } 1413 1414 /* 1415 * Oberon does not have a UBC scratch register, so use an allocated array of 1416 * scratch registers and use the unique UBC ID as an index into that array. 1417 */ 1418 static uint64_t 1419 oberon_get_cb(dev_info_t *dip) 1420 { 1421 uint64_t ubc_id; 1422 1423 if (px_oberon_ubc_scratch_regs == NULL) 1424 return (0); 1425 1426 ubc_id = oberon_get_ubc_id(dip); 1427 1428 return (px_oberon_ubc_scratch_regs[ubc_id]); 1429 } 1430 1431 /* 1432 * Misc Functions: 1433 * Currently unsupported by hypervisor 1434 */ 1435 static uint64_t 1436 px_get_cb(dev_info_t *dip) 1437 { 1438 px_t *px_p = DIP_TO_STATE(dip); 1439 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 1440 1441 /* 1442 * Oberon does not currently have Scratchpad registers. 1443 */ 1444 if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) 1445 return (oberon_get_cb(dip)); 1446 1447 return (CSR_XR((caddr_t)pxu_p->px_address[PX_REG_XBC], JBUS_SCRATCH_1)); 1448 } 1449 1450 static void 1451 px_set_cb(dev_info_t *dip, uint64_t val) 1452 { 1453 px_t *px_p = DIP_TO_STATE(dip); 1454 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 1455 1456 /* 1457 * Oberon does not currently have Scratchpad registers. 1458 */ 1459 if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) { 1460 oberon_set_cb(dip, val); 1461 return; 1462 } 1463 1464 CSR_XS((caddr_t)pxu_p->px_address[PX_REG_XBC], JBUS_SCRATCH_1, val); 1465 } 1466 1467 /*ARGSUSED*/ 1468 int 1469 px_lib_map_vconfig(dev_info_t *dip, 1470 ddi_map_req_t *mp, pci_config_offset_t off, 1471 pci_regspec_t *rp, caddr_t *addrp) 1472 { 1473 /* 1474 * No special config space access services in this layer. 1475 */ 1476 return (DDI_FAILURE); 1477 } 1478 1479 void 1480 px_lib_map_attr_check(ddi_map_req_t *mp) 1481 { 1482 ddi_acc_hdl_t *hp = mp->map_handlep; 1483 1484 /* fire does not accept byte masks from PIO store merge */ 1485 if (hp->ah_acc.devacc_attr_dataorder == DDI_STORECACHING_OK_ACC) 1486 hp->ah_acc.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 1487 } 1488 1489 /* This function is called only by poke, caut put and pxtool poke. */ 1490 void 1491 px_lib_clr_errs(px_t *px_p, dev_info_t *rdip, uint64_t addr) 1492 { 1493 px_pec_t *pec_p = px_p->px_pec_p; 1494 dev_info_t *rpdip = px_p->px_dip; 1495 int rc_err, fab_err, i; 1496 int acctype = pec_p->pec_safeacc_type; 1497 ddi_fm_error_t derr; 1498 pci_ranges_t *ranges_p; 1499 int range_len; 1500 uint32_t addr_high, addr_low; 1501 pcie_req_id_t bdf = PCIE_INVALID_BDF; 1502 1503 /* Create the derr */ 1504 bzero(&derr, sizeof (ddi_fm_error_t)); 1505 derr.fme_version = DDI_FME_VERSION; 1506 derr.fme_ena = fm_ena_generate(0, FM_ENA_FMT1); 1507 derr.fme_flag = acctype; 1508 1509 if (acctype == DDI_FM_ERR_EXPECTED) { 1510 derr.fme_status = DDI_FM_NONFATAL; 1511 ndi_fm_acc_err_set(pec_p->pec_acc_hdl, &derr); 1512 } 1513 1514 if (px_fm_enter(px_p) != DDI_SUCCESS) 1515 return; 1516 1517 /* send ereport/handle/clear fire registers */ 1518 rc_err = px_err_cmn_intr(px_p, &derr, PX_LIB_CALL, PX_FM_BLOCK_ALL); 1519 1520 /* Figure out if this is a cfg or mem32 access */ 1521 addr_high = (uint32_t)(addr >> 32); 1522 addr_low = (uint32_t)addr; 1523 range_len = px_p->px_ranges_length / sizeof (pci_ranges_t); 1524 i = 0; 1525 for (ranges_p = px_p->px_ranges_p; i < range_len; i++, ranges_p++) { 1526 if (ranges_p->parent_high == addr_high) { 1527 switch (ranges_p->child_high & PCI_ADDR_MASK) { 1528 case PCI_ADDR_CONFIG: 1529 bdf = (pcie_req_id_t)(addr_low >> 12); 1530 addr_low = 0; 1531 break; 1532 case PCI_ADDR_MEM32: 1533 if (rdip) 1534 bdf = PCI_GET_BDF(rdip); 1535 else 1536 bdf = PCIE_INVALID_BDF; 1537 break; 1538 } 1539 break; 1540 } 1541 } 1542 1543 px_rp_en_q(px_p, bdf, addr_low, NULL); 1544 1545 /* 1546 * XXX - Current code scans the fabric for all px_tool accesses. 1547 * In future, do not scan fabric for px_tool access to IO Root Nexus 1548 */ 1549 fab_err = px_scan_fabric(px_p, rpdip, &derr); 1550 1551 px_err_panic(rc_err, PX_RC, fab_err, B_TRUE); 1552 px_fm_exit(px_p); 1553 px_err_panic(rc_err, PX_RC, fab_err, B_FALSE); 1554 } 1555 1556 #ifdef DEBUG 1557 int px_peekfault_cnt = 0; 1558 int px_pokefault_cnt = 0; 1559 #endif /* DEBUG */ 1560 1561 /*ARGSUSED*/ 1562 static int 1563 px_lib_do_poke(dev_info_t *dip, dev_info_t *rdip, 1564 peekpoke_ctlops_t *in_args) 1565 { 1566 px_t *px_p = DIP_TO_STATE(dip); 1567 px_pec_t *pec_p = px_p->px_pec_p; 1568 int err = DDI_SUCCESS; 1569 on_trap_data_t otd; 1570 1571 mutex_enter(&pec_p->pec_pokefault_mutex); 1572 pec_p->pec_ontrap_data = &otd; 1573 pec_p->pec_safeacc_type = DDI_FM_ERR_POKE; 1574 1575 /* Set up protected environment. */ 1576 if (!on_trap(&otd, OT_DATA_ACCESS)) { 1577 uintptr_t tramp = otd.ot_trampoline; 1578 1579 otd.ot_trampoline = (uintptr_t)&poke_fault; 1580 err = do_poke(in_args->size, (void *)in_args->dev_addr, 1581 (void *)in_args->host_addr); 1582 otd.ot_trampoline = tramp; 1583 } else 1584 err = DDI_FAILURE; 1585 1586 px_lib_clr_errs(px_p, rdip, in_args->dev_addr); 1587 1588 if (otd.ot_trap & OT_DATA_ACCESS) 1589 err = DDI_FAILURE; 1590 1591 /* Take down protected environment. */ 1592 no_trap(); 1593 1594 pec_p->pec_ontrap_data = NULL; 1595 pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED; 1596 mutex_exit(&pec_p->pec_pokefault_mutex); 1597 1598 #ifdef DEBUG 1599 if (err == DDI_FAILURE) 1600 px_pokefault_cnt++; 1601 #endif 1602 return (err); 1603 } 1604 1605 /*ARGSUSED*/ 1606 static int 1607 px_lib_do_caut_put(dev_info_t *dip, dev_info_t *rdip, 1608 peekpoke_ctlops_t *cautacc_ctlops_arg) 1609 { 1610 size_t size = cautacc_ctlops_arg->size; 1611 uintptr_t dev_addr = cautacc_ctlops_arg->dev_addr; 1612 uintptr_t host_addr = cautacc_ctlops_arg->host_addr; 1613 ddi_acc_impl_t *hp = (ddi_acc_impl_t *)cautacc_ctlops_arg->handle; 1614 size_t repcount = cautacc_ctlops_arg->repcount; 1615 uint_t flags = cautacc_ctlops_arg->flags; 1616 1617 px_t *px_p = DIP_TO_STATE(dip); 1618 px_pec_t *pec_p = px_p->px_pec_p; 1619 int err = DDI_SUCCESS; 1620 1621 /* 1622 * Note that i_ndi_busop_access_enter ends up grabbing the pokefault 1623 * mutex. 1624 */ 1625 i_ndi_busop_access_enter(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp); 1626 1627 pec_p->pec_ontrap_data = (on_trap_data_t *)hp->ahi_err->err_ontrap; 1628 pec_p->pec_safeacc_type = DDI_FM_ERR_EXPECTED; 1629 hp->ahi_err->err_expected = DDI_FM_ERR_EXPECTED; 1630 1631 if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) { 1632 for (; repcount; repcount--) { 1633 switch (size) { 1634 1635 case sizeof (uint8_t): 1636 i_ddi_put8(hp, (uint8_t *)dev_addr, 1637 *(uint8_t *)host_addr); 1638 break; 1639 1640 case sizeof (uint16_t): 1641 i_ddi_put16(hp, (uint16_t *)dev_addr, 1642 *(uint16_t *)host_addr); 1643 break; 1644 1645 case sizeof (uint32_t): 1646 i_ddi_put32(hp, (uint32_t *)dev_addr, 1647 *(uint32_t *)host_addr); 1648 break; 1649 1650 case sizeof (uint64_t): 1651 i_ddi_put64(hp, (uint64_t *)dev_addr, 1652 *(uint64_t *)host_addr); 1653 break; 1654 } 1655 1656 host_addr += size; 1657 1658 if (flags == DDI_DEV_AUTOINCR) 1659 dev_addr += size; 1660 1661 px_lib_clr_errs(px_p, rdip, dev_addr); 1662 1663 if (pec_p->pec_ontrap_data->ot_trap & OT_DATA_ACCESS) { 1664 err = DDI_FAILURE; 1665 #ifdef DEBUG 1666 px_pokefault_cnt++; 1667 #endif 1668 break; 1669 } 1670 } 1671 } 1672 1673 i_ddi_notrap((ddi_acc_handle_t)hp); 1674 pec_p->pec_ontrap_data = NULL; 1675 pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED; 1676 i_ndi_busop_access_exit(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp); 1677 hp->ahi_err->err_expected = DDI_FM_ERR_UNEXPECTED; 1678 1679 return (err); 1680 } 1681 1682 1683 int 1684 px_lib_ctlops_poke(dev_info_t *dip, dev_info_t *rdip, 1685 peekpoke_ctlops_t *in_args) 1686 { 1687 return (in_args->handle ? px_lib_do_caut_put(dip, rdip, in_args) : 1688 px_lib_do_poke(dip, rdip, in_args)); 1689 } 1690 1691 1692 /*ARGSUSED*/ 1693 static int 1694 px_lib_do_peek(dev_info_t *dip, peekpoke_ctlops_t *in_args) 1695 { 1696 px_t *px_p = DIP_TO_STATE(dip); 1697 px_pec_t *pec_p = px_p->px_pec_p; 1698 int err = DDI_SUCCESS; 1699 on_trap_data_t otd; 1700 1701 mutex_enter(&pec_p->pec_pokefault_mutex); 1702 if (px_fm_enter(px_p) != DDI_SUCCESS) 1703 return (DDI_FAILURE); 1704 pec_p->pec_safeacc_type = DDI_FM_ERR_PEEK; 1705 px_fm_exit(px_p); 1706 1707 if (!on_trap(&otd, OT_DATA_ACCESS)) { 1708 uintptr_t tramp = otd.ot_trampoline; 1709 1710 otd.ot_trampoline = (uintptr_t)&peek_fault; 1711 err = do_peek(in_args->size, (void *)in_args->dev_addr, 1712 (void *)in_args->host_addr); 1713 otd.ot_trampoline = tramp; 1714 } else 1715 err = DDI_FAILURE; 1716 1717 no_trap(); 1718 pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED; 1719 mutex_exit(&pec_p->pec_pokefault_mutex); 1720 1721 #ifdef DEBUG 1722 if (err == DDI_FAILURE) 1723 px_peekfault_cnt++; 1724 #endif 1725 return (err); 1726 } 1727 1728 1729 static int 1730 px_lib_do_caut_get(dev_info_t *dip, peekpoke_ctlops_t *cautacc_ctlops_arg) 1731 { 1732 size_t size = cautacc_ctlops_arg->size; 1733 uintptr_t dev_addr = cautacc_ctlops_arg->dev_addr; 1734 uintptr_t host_addr = cautacc_ctlops_arg->host_addr; 1735 ddi_acc_impl_t *hp = (ddi_acc_impl_t *)cautacc_ctlops_arg->handle; 1736 size_t repcount = cautacc_ctlops_arg->repcount; 1737 uint_t flags = cautacc_ctlops_arg->flags; 1738 1739 px_t *px_p = DIP_TO_STATE(dip); 1740 px_pec_t *pec_p = px_p->px_pec_p; 1741 int err = DDI_SUCCESS; 1742 1743 /* 1744 * Note that i_ndi_busop_access_enter ends up grabbing the pokefault 1745 * mutex. 1746 */ 1747 i_ndi_busop_access_enter(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp); 1748 1749 pec_p->pec_ontrap_data = (on_trap_data_t *)hp->ahi_err->err_ontrap; 1750 pec_p->pec_safeacc_type = DDI_FM_ERR_EXPECTED; 1751 hp->ahi_err->err_expected = DDI_FM_ERR_EXPECTED; 1752 1753 if (repcount == 1) { 1754 if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) { 1755 i_ddi_caut_get(size, (void *)dev_addr, 1756 (void *)host_addr); 1757 } else { 1758 int i; 1759 uint8_t *ff_addr = (uint8_t *)host_addr; 1760 for (i = 0; i < size; i++) 1761 *ff_addr++ = 0xff; 1762 1763 err = DDI_FAILURE; 1764 #ifdef DEBUG 1765 px_peekfault_cnt++; 1766 #endif 1767 } 1768 } else { 1769 if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) { 1770 for (; repcount; repcount--) { 1771 i_ddi_caut_get(size, (void *)dev_addr, 1772 (void *)host_addr); 1773 1774 host_addr += size; 1775 1776 if (flags == DDI_DEV_AUTOINCR) 1777 dev_addr += size; 1778 } 1779 } else { 1780 err = DDI_FAILURE; 1781 #ifdef DEBUG 1782 px_peekfault_cnt++; 1783 #endif 1784 } 1785 } 1786 1787 i_ddi_notrap((ddi_acc_handle_t)hp); 1788 pec_p->pec_ontrap_data = NULL; 1789 pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED; 1790 i_ndi_busop_access_exit(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp); 1791 hp->ahi_err->err_expected = DDI_FM_ERR_UNEXPECTED; 1792 1793 return (err); 1794 } 1795 1796 /*ARGSUSED*/ 1797 int 1798 px_lib_ctlops_peek(dev_info_t *dip, dev_info_t *rdip, 1799 peekpoke_ctlops_t *in_args, void *result) 1800 { 1801 result = (void *)in_args->host_addr; 1802 return (in_args->handle ? px_lib_do_caut_get(dip, in_args) : 1803 px_lib_do_peek(dip, in_args)); 1804 } 1805 1806 /* 1807 * implements PPM interface 1808 */ 1809 int 1810 px_lib_pmctl(int cmd, px_t *px_p) 1811 { 1812 ASSERT((cmd & ~PPMREQ_MASK) == PPMREQ); 1813 switch (cmd) { 1814 case PPMREQ_PRE_PWR_OFF: 1815 /* 1816 * Currently there is no device power management for 1817 * the root complex (fire). When there is we need to make 1818 * sure that it is at full power before trying to send the 1819 * PME_Turn_Off message. 1820 */ 1821 DBG(DBG_PWR, px_p->px_dip, 1822 "ioctl: request to send PME_Turn_Off\n"); 1823 return (px_goto_l23ready(px_p)); 1824 1825 case PPMREQ_PRE_PWR_ON: 1826 DBG(DBG_PWR, px_p->px_dip, "ioctl: PRE_PWR_ON request\n"); 1827 return (px_pre_pwron_check(px_p)); 1828 1829 case PPMREQ_POST_PWR_ON: 1830 DBG(DBG_PWR, px_p->px_dip, "ioctl: POST_PWR_ON request\n"); 1831 return (px_goto_l0(px_p)); 1832 1833 default: 1834 return (DDI_FAILURE); 1835 } 1836 } 1837 1838 /* 1839 * sends PME_Turn_Off message to put the link in L2/L3 ready state. 1840 * called by px_ioctl. 1841 * returns DDI_SUCCESS or DDI_FAILURE 1842 * 1. Wait for link to be in L1 state (link status reg) 1843 * 2. write to PME_Turn_off reg to boradcast 1844 * 3. set timeout 1845 * 4. If timeout, return failure. 1846 * 5. If PM_TO_Ack, wait till link is in L2/L3 ready 1847 */ 1848 static int 1849 px_goto_l23ready(px_t *px_p) 1850 { 1851 pcie_pwr_t *pwr_p; 1852 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 1853 caddr_t csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR]; 1854 int ret = DDI_SUCCESS; 1855 clock_t end, timeleft; 1856 int mutex_held = 1; 1857 1858 /* If no PM info, return failure */ 1859 if (!PCIE_PMINFO(px_p->px_dip) || 1860 !(pwr_p = PCIE_NEXUS_PMINFO(px_p->px_dip))) 1861 return (DDI_FAILURE); 1862 1863 mutex_enter(&pwr_p->pwr_lock); 1864 mutex_enter(&px_p->px_l23ready_lock); 1865 /* Clear the PME_To_ACK receieved flag */ 1866 px_p->px_pm_flags &= ~PX_PMETOACK_RECVD; 1867 /* 1868 * When P25 is the downstream device, after receiving 1869 * PME_To_ACK, fire will go to Detect state, which causes 1870 * the link down event. Inform FMA that this is expected. 1871 * In case of all other cards complaint with the pci express 1872 * spec, this will happen when the power is re-applied. FMA 1873 * code will clear this flag after one instance of LDN. Since 1874 * there will not be a LDN event for the spec compliant cards, 1875 * we need to clear the flag after receiving PME_To_ACK. 1876 */ 1877 px_p->px_pm_flags |= PX_LDN_EXPECTED; 1878 if (px_send_pme_turnoff(csr_base) != DDI_SUCCESS) { 1879 ret = DDI_FAILURE; 1880 goto l23ready_done; 1881 } 1882 px_p->px_pm_flags |= PX_PME_TURNOFF_PENDING; 1883 1884 end = ddi_get_lbolt() + drv_usectohz(px_pme_to_ack_timeout); 1885 while (!(px_p->px_pm_flags & PX_PMETOACK_RECVD)) { 1886 timeleft = cv_timedwait(&px_p->px_l23ready_cv, 1887 &px_p->px_l23ready_lock, end); 1888 /* 1889 * if cv_timedwait returns -1, it is either 1890 * 1) timed out or 1891 * 2) there was a pre-mature wakeup but by the time 1892 * cv_timedwait is called again end < lbolt i.e. 1893 * end is in the past. 1894 * 3) By the time we make first cv_timedwait call, 1895 * end < lbolt is true. 1896 */ 1897 if (timeleft == -1) 1898 break; 1899 } 1900 if (!(px_p->px_pm_flags & PX_PMETOACK_RECVD)) { 1901 /* 1902 * Either timedout or interrupt didn't get a 1903 * chance to grab the mutex and set the flag. 1904 * release the mutex and delay for sometime. 1905 * This will 1) give a chance for interrupt to 1906 * set the flag 2) creates a delay between two 1907 * consequetive requests. 1908 */ 1909 mutex_exit(&px_p->px_l23ready_lock); 1910 delay(drv_usectohz(50 * PX_MSEC_TO_USEC)); 1911 mutex_held = 0; 1912 if (!(px_p->px_pm_flags & PX_PMETOACK_RECVD)) { 1913 ret = DDI_FAILURE; 1914 DBG(DBG_PWR, px_p->px_dip, " Timed out while waiting" 1915 " for PME_TO_ACK\n"); 1916 } 1917 } 1918 px_p->px_pm_flags &= 1919 ~(PX_PME_TURNOFF_PENDING | PX_PMETOACK_RECVD | PX_LDN_EXPECTED); 1920 1921 l23ready_done: 1922 if (mutex_held) 1923 mutex_exit(&px_p->px_l23ready_lock); 1924 /* 1925 * Wait till link is in L1 idle, if sending PME_Turn_Off 1926 * was succesful. 1927 */ 1928 if (ret == DDI_SUCCESS) { 1929 if (px_link_wait4l1idle(csr_base) != DDI_SUCCESS) { 1930 DBG(DBG_PWR, px_p->px_dip, " Link is not at L1" 1931 " even though we received PME_To_ACK.\n"); 1932 /* 1933 * Workaround for hardware bug with P25. 1934 * Due to a hardware bug with P25, link state 1935 * will be Detect state rather than L1 after 1936 * link is transitioned to L23Ready state. Since 1937 * we don't know whether link is L23ready state 1938 * without Fire's state being L1_idle, we delay 1939 * here just to make sure that we wait till link 1940 * is transitioned to L23Ready state. 1941 */ 1942 delay(drv_usectohz(100 * PX_MSEC_TO_USEC)); 1943 } 1944 pwr_p->pwr_link_lvl = PM_LEVEL_L3; 1945 1946 } 1947 mutex_exit(&pwr_p->pwr_lock); 1948 return (ret); 1949 } 1950 1951 /* 1952 * Message interrupt handler intended to be shared for both 1953 * PME and PME_TO_ACK msg handling, currently only handles 1954 * PME_To_ACK message. 1955 */ 1956 uint_t 1957 px_pmeq_intr(caddr_t arg) 1958 { 1959 px_t *px_p = (px_t *)arg; 1960 1961 DBG(DBG_PWR, px_p->px_dip, " PME_To_ACK received \n"); 1962 mutex_enter(&px_p->px_l23ready_lock); 1963 cv_broadcast(&px_p->px_l23ready_cv); 1964 if (px_p->px_pm_flags & PX_PME_TURNOFF_PENDING) { 1965 px_p->px_pm_flags |= PX_PMETOACK_RECVD; 1966 } else { 1967 /* 1968 * This maybe the second ack received. If so then, 1969 * we should be receiving it during wait4L1 stage. 1970 */ 1971 px_p->px_pmetoack_ignored++; 1972 } 1973 mutex_exit(&px_p->px_l23ready_lock); 1974 return (DDI_INTR_CLAIMED); 1975 } 1976 1977 static int 1978 px_pre_pwron_check(px_t *px_p) 1979 { 1980 pcie_pwr_t *pwr_p; 1981 1982 /* If no PM info, return failure */ 1983 if (!PCIE_PMINFO(px_p->px_dip) || 1984 !(pwr_p = PCIE_NEXUS_PMINFO(px_p->px_dip))) 1985 return (DDI_FAILURE); 1986 1987 /* 1988 * For the spec compliant downstream cards link down 1989 * is expected when the device is powered on. 1990 */ 1991 px_p->px_pm_flags |= PX_LDN_EXPECTED; 1992 return (pwr_p->pwr_link_lvl == PM_LEVEL_L3 ? DDI_SUCCESS : DDI_FAILURE); 1993 } 1994 1995 static int 1996 px_goto_l0(px_t *px_p) 1997 { 1998 pcie_pwr_t *pwr_p; 1999 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 2000 caddr_t csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR]; 2001 int ret = DDI_SUCCESS; 2002 uint64_t time_spent = 0; 2003 2004 /* If no PM info, return failure */ 2005 if (!PCIE_PMINFO(px_p->px_dip) || 2006 !(pwr_p = PCIE_NEXUS_PMINFO(px_p->px_dip))) 2007 return (DDI_FAILURE); 2008 2009 mutex_enter(&pwr_p->pwr_lock); 2010 /* 2011 * The following link retrain activity will cause LDN and LUP event. 2012 * Receiving LDN prior to receiving LUP is expected, not an error in 2013 * this case. Receiving LUP indicates link is fully up to support 2014 * powering up down stream device, and of course any further LDN and 2015 * LUP outside this context will be error. 2016 */ 2017 px_p->px_lup_pending = 1; 2018 if (px_link_retrain(csr_base) != DDI_SUCCESS) { 2019 ret = DDI_FAILURE; 2020 goto l0_done; 2021 } 2022 2023 /* LUP event takes the order of 15ms amount of time to occur */ 2024 for (; px_p->px_lup_pending && (time_spent < px_lup_poll_to); 2025 time_spent += px_lup_poll_interval) 2026 drv_usecwait(px_lup_poll_interval); 2027 if (px_p->px_lup_pending) 2028 ret = DDI_FAILURE; 2029 l0_done: 2030 px_enable_detect_quiet(csr_base); 2031 if (ret == DDI_SUCCESS) 2032 pwr_p->pwr_link_lvl = PM_LEVEL_L0; 2033 mutex_exit(&pwr_p->pwr_lock); 2034 return (ret); 2035 } 2036 2037 /* 2038 * Extract the drivers binding name to identify which chip we're binding to. 2039 * Whenever a new bus bridge is created, the driver alias entry should be 2040 * added here to identify the device if needed. If a device isn't added, 2041 * the identity defaults to PX_CHIP_UNIDENTIFIED. 2042 */ 2043 static uint32_t 2044 px_identity_init(px_t *px_p) 2045 { 2046 dev_info_t *dip = px_p->px_dip; 2047 char *name = ddi_binding_name(dip); 2048 uint32_t revision = 0; 2049 2050 revision = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 2051 "module-revision#", 0); 2052 2053 /* Check for Fire driver binding name */ 2054 if (strcmp(name, "pciex108e,80f0") == 0) { 2055 DBG(DBG_ATTACH, dip, "px_identity_init: %s%d: " 2056 "(FIRE), module-revision %d\n", NAMEINST(dip), 2057 revision); 2058 2059 return ((revision >= FIRE_MOD_REV_20) ? 2060 PX_CHIP_FIRE : PX_CHIP_UNIDENTIFIED); 2061 } 2062 2063 /* Check for Oberon driver binding name */ 2064 if (strcmp(name, "pciex108e,80f8") == 0) { 2065 DBG(DBG_ATTACH, dip, "px_identity_init: %s%d: " 2066 "(OBERON), module-revision %d\n", NAMEINST(dip), 2067 revision); 2068 2069 return (PX_CHIP_OBERON); 2070 } 2071 2072 DBG(DBG_ATTACH, dip, "%s%d: Unknown PCI Express Host bridge %s %x\n", 2073 ddi_driver_name(dip), ddi_get_instance(dip), name, revision); 2074 2075 return (PX_CHIP_UNIDENTIFIED); 2076 } 2077 2078 int 2079 px_err_add_intr(px_fault_t *px_fault_p) 2080 { 2081 dev_info_t *dip = px_fault_p->px_fh_dip; 2082 px_t *px_p = DIP_TO_STATE(dip); 2083 2084 VERIFY(add_ivintr(px_fault_p->px_fh_sysino, PX_ERR_PIL, 2085 (intrfunc)px_fault_p->px_err_func, (caddr_t)px_fault_p, 2086 NULL, NULL) == 0); 2087 2088 px_ib_intr_enable(px_p, intr_dist_cpuid(), px_fault_p->px_intr_ino); 2089 2090 return (DDI_SUCCESS); 2091 } 2092 2093 void 2094 px_err_rem_intr(px_fault_t *px_fault_p) 2095 { 2096 dev_info_t *dip = px_fault_p->px_fh_dip; 2097 px_t *px_p = DIP_TO_STATE(dip); 2098 2099 px_ib_intr_disable(px_p->px_ib_p, px_fault_p->px_intr_ino, 2100 IB_INTR_WAIT); 2101 2102 VERIFY(rem_ivintr(px_fault_p->px_fh_sysino, PX_ERR_PIL) == 0); 2103 } 2104 2105 /* 2106 * px_cb_intr_redist() - sun4u only, CB interrupt redistribution 2107 */ 2108 void 2109 px_cb_intr_redist(void *arg) 2110 { 2111 px_cb_t *cb_p = (px_cb_t *)arg; 2112 px_cb_list_t *pxl; 2113 px_t *pxp = NULL; 2114 px_fault_t *f_p = NULL; 2115 uint32_t new_cpuid; 2116 intr_valid_state_t enabled = 0; 2117 2118 mutex_enter(&cb_p->cb_mutex); 2119 2120 pxl = cb_p->pxl; 2121 if (!pxl) 2122 goto cb_done; 2123 2124 pxp = pxl->pxp; 2125 f_p = &pxp->px_cb_fault; 2126 for (; pxl && (f_p->px_fh_sysino != cb_p->sysino); ) { 2127 pxl = pxl->next; 2128 pxp = pxl->pxp; 2129 f_p = &pxp->px_cb_fault; 2130 } 2131 if (pxl == NULL) 2132 goto cb_done; 2133 2134 new_cpuid = intr_dist_cpuid(); 2135 if (new_cpuid == cb_p->cpuid) 2136 goto cb_done; 2137 2138 if ((px_lib_intr_getvalid(pxp->px_dip, f_p->px_fh_sysino, &enabled) 2139 != DDI_SUCCESS) || !enabled) { 2140 DBG(DBG_IB, pxp->px_dip, "px_cb_intr_redist: CB not enabled, " 2141 "sysino(0x%x)\n", f_p->px_fh_sysino); 2142 goto cb_done; 2143 } 2144 2145 PX_INTR_DISABLE(pxp->px_dip, f_p->px_fh_sysino); 2146 2147 cb_p->cpuid = new_cpuid; 2148 cb_p->sysino = f_p->px_fh_sysino; 2149 PX_INTR_ENABLE(pxp->px_dip, cb_p->sysino, cb_p->cpuid); 2150 2151 cb_done: 2152 mutex_exit(&cb_p->cb_mutex); 2153 } 2154 2155 /* 2156 * px_cb_add_intr() - Called from attach(9E) to create CB if not yet 2157 * created, to add CB interrupt vector always, but enable only once. 2158 */ 2159 int 2160 px_cb_add_intr(px_fault_t *fault_p) 2161 { 2162 px_t *px_p = DIP_TO_STATE(fault_p->px_fh_dip); 2163 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 2164 px_cb_t *cb_p = (px_cb_t *)px_get_cb(fault_p->px_fh_dip); 2165 px_cb_list_t *pxl, *pxl_new; 2166 boolean_t is_proxy = B_FALSE; 2167 2168 /* create cb */ 2169 if (cb_p == NULL) { 2170 cb_p = kmem_zalloc(sizeof (px_cb_t), KM_SLEEP); 2171 2172 mutex_init(&cb_p->cb_mutex, NULL, MUTEX_DRIVER, 2173 (void *) ipltospl(FM_ERR_PIL)); 2174 2175 cb_p->px_cb_func = px_cb_intr; 2176 pxu_p->px_cb_p = cb_p; 2177 px_set_cb(fault_p->px_fh_dip, (uint64_t)cb_p); 2178 2179 /* px_lib_dev_init allows only FIRE and OBERON */ 2180 px_err_reg_enable( 2181 (pxu_p->chip_type == PX_CHIP_FIRE) ? 2182 PX_ERR_JBC : PX_ERR_UBC, 2183 pxu_p->px_address[PX_REG_XBC]); 2184 } else 2185 pxu_p->px_cb_p = cb_p; 2186 2187 /* register cb interrupt */ 2188 VERIFY(add_ivintr(fault_p->px_fh_sysino, PX_ERR_PIL, 2189 (intrfunc)cb_p->px_cb_func, (caddr_t)cb_p, NULL, NULL) == 0); 2190 2191 2192 /* update cb list */ 2193 mutex_enter(&cb_p->cb_mutex); 2194 if (cb_p->pxl == NULL) { 2195 is_proxy = B_TRUE; 2196 pxl = kmem_zalloc(sizeof (px_cb_list_t), KM_SLEEP); 2197 pxl->pxp = px_p; 2198 cb_p->pxl = pxl; 2199 cb_p->sysino = fault_p->px_fh_sysino; 2200 cb_p->cpuid = intr_dist_cpuid(); 2201 } else { 2202 /* 2203 * Find the last pxl or 2204 * stop short at encountering a redundent entry, or 2205 * both. 2206 */ 2207 pxl = cb_p->pxl; 2208 for (; !(pxl->pxp == px_p) && pxl->next; pxl = pxl->next) {}; 2209 ASSERT(pxl->pxp != px_p); 2210 2211 /* add to linked list */ 2212 pxl_new = kmem_zalloc(sizeof (px_cb_list_t), KM_SLEEP); 2213 pxl_new->pxp = px_p; 2214 pxl->next = pxl_new; 2215 } 2216 cb_p->attachcnt++; 2217 mutex_exit(&cb_p->cb_mutex); 2218 2219 if (is_proxy) { 2220 /* add to interrupt redistribution list */ 2221 intr_dist_add(px_cb_intr_redist, cb_p); 2222 2223 /* enable cb hw interrupt */ 2224 px_ib_intr_enable(px_p, cb_p->cpuid, fault_p->px_intr_ino); 2225 } 2226 2227 return (DDI_SUCCESS); 2228 } 2229 2230 /* 2231 * px_cb_rem_intr() - Called from detach(9E) to remove its CB 2232 * interrupt vector, to shift proxy to the next available px, 2233 * or disable CB interrupt when itself is the last. 2234 */ 2235 void 2236 px_cb_rem_intr(px_fault_t *fault_p) 2237 { 2238 px_t *px_p = DIP_TO_STATE(fault_p->px_fh_dip), *pxp; 2239 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 2240 px_cb_t *cb_p = PX2CB(px_p); 2241 px_cb_list_t *pxl, *prev; 2242 px_fault_t *f_p; 2243 2244 ASSERT(cb_p->pxl); 2245 2246 /* find and remove this px, and update cb list */ 2247 mutex_enter(&cb_p->cb_mutex); 2248 2249 pxl = cb_p->pxl; 2250 if (pxl->pxp == px_p) { 2251 cb_p->pxl = pxl->next; 2252 } else { 2253 prev = pxl; 2254 pxl = pxl->next; 2255 for (; pxl && (pxl->pxp != px_p); prev = pxl, pxl = pxl->next) { 2256 }; 2257 if (!pxl) { 2258 cmn_err(CE_WARN, "px_cb_rem_intr: can't find px_p 0x%p " 2259 "in registered CB list.", (void *)px_p); 2260 mutex_exit(&cb_p->cb_mutex); 2261 return; 2262 } 2263 prev->next = pxl->next; 2264 } 2265 pxu_p->px_cb_p = NULL; 2266 cb_p->attachcnt--; 2267 kmem_free(pxl, sizeof (px_cb_list_t)); 2268 mutex_exit(&cb_p->cb_mutex); 2269 2270 /* disable cb hw interrupt */ 2271 if (fault_p->px_fh_sysino == cb_p->sysino) 2272 px_ib_intr_disable(px_p->px_ib_p, fault_p->px_intr_ino, 2273 IB_INTR_WAIT); 2274 2275 /* if last px, remove from interrupt redistribution list */ 2276 if (cb_p->pxl == NULL) 2277 intr_dist_rem(px_cb_intr_redist, cb_p); 2278 2279 /* de-register interrupt */ 2280 VERIFY(rem_ivintr(fault_p->px_fh_sysino, PX_ERR_PIL) == 0); 2281 2282 /* if not last px, assign next px to manage cb */ 2283 mutex_enter(&cb_p->cb_mutex); 2284 if (cb_p->pxl) { 2285 if (fault_p->px_fh_sysino == cb_p->sysino) { 2286 pxp = cb_p->pxl->pxp; 2287 f_p = &pxp->px_cb_fault; 2288 cb_p->sysino = f_p->px_fh_sysino; 2289 2290 PX_INTR_ENABLE(pxp->px_dip, cb_p->sysino, cb_p->cpuid); 2291 (void) px_lib_intr_setstate(pxp->px_dip, cb_p->sysino, 2292 INTR_IDLE_STATE); 2293 } 2294 mutex_exit(&cb_p->cb_mutex); 2295 return; 2296 } 2297 2298 /* clean up after the last px */ 2299 mutex_exit(&cb_p->cb_mutex); 2300 2301 /* px_lib_dev_init allows only FIRE and OBERON */ 2302 px_err_reg_disable( 2303 (pxu_p->chip_type == PX_CHIP_FIRE) ? PX_ERR_JBC : PX_ERR_UBC, 2304 pxu_p->px_address[PX_REG_XBC]); 2305 2306 mutex_destroy(&cb_p->cb_mutex); 2307 px_set_cb(fault_p->px_fh_dip, 0ull); 2308 kmem_free(cb_p, sizeof (px_cb_t)); 2309 } 2310 2311 /* 2312 * px_cb_intr() - sun4u only, CB interrupt dispatcher 2313 */ 2314 uint_t 2315 px_cb_intr(caddr_t arg) 2316 { 2317 px_cb_t *cb_p = (px_cb_t *)arg; 2318 px_t *pxp; 2319 px_fault_t *f_p; 2320 int ret; 2321 2322 mutex_enter(&cb_p->cb_mutex); 2323 2324 if (!cb_p->pxl) { 2325 mutex_exit(&cb_p->cb_mutex); 2326 return (DDI_INTR_UNCLAIMED); 2327 } 2328 2329 pxp = cb_p->pxl->pxp; 2330 f_p = &pxp->px_cb_fault; 2331 2332 ret = f_p->px_err_func((caddr_t)f_p); 2333 2334 mutex_exit(&cb_p->cb_mutex); 2335 return (ret); 2336 } 2337 2338 #ifdef FMA 2339 void 2340 px_fill_rc_status(px_fault_t *px_fault_p, pciex_rc_error_regs_t *rc_status) 2341 { 2342 /* populate the rc_status by reading the registers - TBD */ 2343 } 2344 #endif /* FMA */ 2345 2346 /* 2347 * Unprotected raw reads/writes of fabric device's config space. 2348 * Only used for temporary PCI-E Fabric Error Handling. 2349 */ 2350 uint32_t 2351 px_fab_get(px_t *px_p, pcie_req_id_t bdf, uint16_t offset) 2352 { 2353 pci_ranges_t *rp = px_p->px_ranges_p; 2354 uint64_t range_prop, base_addr; 2355 int bank = PCI_REG_ADDR_G(PCI_ADDR_CONFIG); 2356 uint32_t val; 2357 2358 /* Get Fire's Physical Base Address */ 2359 range_prop = px_get_range_prop(px_p, rp, bank); 2360 2361 /* Get config space first. */ 2362 base_addr = range_prop + PX_BDF_TO_CFGADDR(bdf, offset); 2363 2364 val = ldphysio(base_addr); 2365 2366 return (LE_32(val)); 2367 } 2368 2369 void 2370 px_fab_set(px_t *px_p, pcie_req_id_t bdf, uint16_t offset, 2371 uint32_t val) { 2372 pci_ranges_t *rp = px_p->px_ranges_p; 2373 uint64_t range_prop, base_addr; 2374 int bank = PCI_REG_ADDR_G(PCI_ADDR_CONFIG); 2375 2376 /* Get Fire's Physical Base Address */ 2377 range_prop = px_get_range_prop(px_p, rp, bank); 2378 2379 /* Get config space first. */ 2380 base_addr = range_prop + PX_BDF_TO_CFGADDR(bdf, offset); 2381 2382 stphysio(base_addr, LE_32(val)); 2383 } 2384 2385 /* 2386 * cpr callback 2387 * 2388 * disable fabric error msg interrupt prior to suspending 2389 * all device drivers; re-enable fabric error msg interrupt 2390 * after all devices are resumed. 2391 */ 2392 static boolean_t 2393 px_cpr_callb(void *arg, int code) 2394 { 2395 px_t *px_p = (px_t *)arg; 2396 px_ib_t *ib_p = px_p->px_ib_p; 2397 px_pec_t *pec_p = px_p->px_pec_p; 2398 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 2399 caddr_t csr_base; 2400 devino_t ce_ino, nf_ino, f_ino; 2401 px_ino_t *ce_ino_p, *nf_ino_p, *f_ino_p; 2402 uint64_t imu_log_enable, imu_intr_enable; 2403 uint64_t imu_log_mask, imu_intr_mask; 2404 2405 ce_ino = px_msiqid_to_devino(px_p, pec_p->pec_corr_msg_msiq_id); 2406 nf_ino = px_msiqid_to_devino(px_p, pec_p->pec_non_fatal_msg_msiq_id); 2407 f_ino = px_msiqid_to_devino(px_p, pec_p->pec_fatal_msg_msiq_id); 2408 csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR]; 2409 2410 imu_log_enable = CSR_XR(csr_base, IMU_ERROR_LOG_ENABLE); 2411 imu_intr_enable = CSR_XR(csr_base, IMU_INTERRUPT_ENABLE); 2412 2413 imu_log_mask = BITMASK(IMU_ERROR_LOG_ENABLE_FATAL_MES_NOT_EN_LOG_EN) | 2414 BITMASK(IMU_ERROR_LOG_ENABLE_NONFATAL_MES_NOT_EN_LOG_EN) | 2415 BITMASK(IMU_ERROR_LOG_ENABLE_COR_MES_NOT_EN_LOG_EN); 2416 2417 imu_intr_mask = 2418 BITMASK(IMU_INTERRUPT_ENABLE_FATAL_MES_NOT_EN_S_INT_EN) | 2419 BITMASK(IMU_INTERRUPT_ENABLE_NONFATAL_MES_NOT_EN_S_INT_EN) | 2420 BITMASK(IMU_INTERRUPT_ENABLE_COR_MES_NOT_EN_S_INT_EN) | 2421 BITMASK(IMU_INTERRUPT_ENABLE_FATAL_MES_NOT_EN_P_INT_EN) | 2422 BITMASK(IMU_INTERRUPT_ENABLE_NONFATAL_MES_NOT_EN_P_INT_EN) | 2423 BITMASK(IMU_INTERRUPT_ENABLE_COR_MES_NOT_EN_P_INT_EN); 2424 2425 switch (code) { 2426 case CB_CODE_CPR_CHKPT: 2427 /* disable imu rbne on corr/nonfatal/fatal errors */ 2428 CSR_XS(csr_base, IMU_ERROR_LOG_ENABLE, 2429 imu_log_enable & (~imu_log_mask)); 2430 2431 CSR_XS(csr_base, IMU_INTERRUPT_ENABLE, 2432 imu_intr_enable & (~imu_intr_mask)); 2433 2434 /* disable CORR intr mapping */ 2435 px_ib_intr_disable(ib_p, ce_ino, IB_INTR_NOWAIT); 2436 2437 /* disable NON FATAL intr mapping */ 2438 px_ib_intr_disable(ib_p, nf_ino, IB_INTR_NOWAIT); 2439 2440 /* disable FATAL intr mapping */ 2441 px_ib_intr_disable(ib_p, f_ino, IB_INTR_NOWAIT); 2442 2443 break; 2444 2445 case CB_CODE_CPR_RESUME: 2446 pxu_p->cpr_flag = PX_NOT_CPR; 2447 mutex_enter(&ib_p->ib_ino_lst_mutex); 2448 2449 ce_ino_p = px_ib_locate_ino(ib_p, ce_ino); 2450 nf_ino_p = px_ib_locate_ino(ib_p, nf_ino); 2451 f_ino_p = px_ib_locate_ino(ib_p, f_ino); 2452 2453 /* enable CORR intr mapping */ 2454 if (ce_ino_p) 2455 px_ib_intr_enable(px_p, ce_ino_p->ino_cpuid, ce_ino); 2456 else 2457 cmn_err(CE_WARN, "px_cpr_callb: RESUME unable to " 2458 "reenable PCIe Correctable msg intr.\n"); 2459 2460 /* enable NON FATAL intr mapping */ 2461 if (nf_ino_p) 2462 px_ib_intr_enable(px_p, nf_ino_p->ino_cpuid, nf_ino); 2463 else 2464 cmn_err(CE_WARN, "px_cpr_callb: RESUME unable to " 2465 "reenable PCIe Non Fatal msg intr.\n"); 2466 2467 /* enable FATAL intr mapping */ 2468 if (f_ino_p) 2469 px_ib_intr_enable(px_p, f_ino_p->ino_cpuid, f_ino); 2470 else 2471 cmn_err(CE_WARN, "px_cpr_callb: RESUME unable to " 2472 "reenable PCIe Fatal msg intr.\n"); 2473 2474 mutex_exit(&ib_p->ib_ino_lst_mutex); 2475 2476 /* enable corr/nonfatal/fatal not enable error */ 2477 CSR_XS(csr_base, IMU_ERROR_LOG_ENABLE, (imu_log_enable | 2478 (imu_log_mask & px_imu_log_mask))); 2479 CSR_XS(csr_base, IMU_INTERRUPT_ENABLE, (imu_intr_enable | 2480 (imu_intr_mask & px_imu_intr_mask))); 2481 2482 break; 2483 } 2484 2485 return (B_TRUE); 2486 } 2487 2488 uint64_t 2489 px_get_rng_parent_hi_mask(px_t *px_p) 2490 { 2491 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 2492 uint64_t mask; 2493 2494 switch (PX_CHIP_TYPE(pxu_p)) { 2495 case PX_CHIP_OBERON: 2496 mask = OBERON_RANGE_PROP_MASK; 2497 break; 2498 case PX_CHIP_FIRE: 2499 mask = PX_RANGE_PROP_MASK; 2500 break; 2501 default: 2502 mask = PX_RANGE_PROP_MASK; 2503 } 2504 2505 return (mask); 2506 } 2507 2508 /* 2509 * fetch chip's range propery's value 2510 */ 2511 uint64_t 2512 px_get_range_prop(px_t *px_p, pci_ranges_t *rp, int bank) 2513 { 2514 uint64_t mask, range_prop; 2515 2516 mask = px_get_rng_parent_hi_mask(px_p); 2517 range_prop = (((uint64_t)(rp[bank].parent_high & mask)) << 32) | 2518 rp[bank].parent_low; 2519 2520 return (range_prop); 2521 } 2522 2523 /* 2524 * add cpr callback 2525 */ 2526 void 2527 px_cpr_add_callb(px_t *px_p) 2528 { 2529 px_p->px_cprcb_id = callb_add(px_cpr_callb, (void *)px_p, 2530 CB_CL_CPR_POST_USER, "px_cpr"); 2531 } 2532 2533 /* 2534 * remove cpr callback 2535 */ 2536 void 2537 px_cpr_rem_callb(px_t *px_p) 2538 { 2539 (void) callb_delete(px_p->px_cprcb_id); 2540 } 2541 2542 /*ARGSUSED*/ 2543 static uint_t 2544 px_hp_intr(caddr_t arg1, caddr_t arg2) 2545 { 2546 px_t *px_p = (px_t *)arg1; 2547 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 2548 int rval; 2549 2550 rval = pcie_intr(px_p->px_dip); 2551 2552 #ifdef DEBUG 2553 if (rval == DDI_INTR_UNCLAIMED) 2554 cmn_err(CE_WARN, "%s%d: UNCLAIMED intr\n", 2555 ddi_driver_name(px_p->px_dip), 2556 ddi_get_instance(px_p->px_dip)); 2557 #endif 2558 2559 /* Set the interrupt state to idle */ 2560 if (px_lib_intr_setstate(px_p->px_dip, 2561 pxu_p->hp_sysino, INTR_IDLE_STATE) != DDI_SUCCESS) 2562 return (DDI_INTR_UNCLAIMED); 2563 2564 return (rval); 2565 } 2566 2567 int 2568 px_lib_hotplug_init(dev_info_t *dip, void *arg) 2569 { 2570 px_t *px_p = DIP_TO_STATE(dip); 2571 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 2572 uint64_t ret; 2573 2574 if (ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 2575 "hotplug-capable") == 0) 2576 return (DDI_FAILURE); 2577 2578 if ((ret = hvio_hotplug_init(dip, arg)) == DDI_SUCCESS) { 2579 if (px_lib_intr_devino_to_sysino(px_p->px_dip, 2580 px_p->px_inos[PX_INTR_HOTPLUG], &pxu_p->hp_sysino) != 2581 DDI_SUCCESS) { 2582 #ifdef DEBUG 2583 cmn_err(CE_WARN, "%s%d: devino_to_sysino fails\n", 2584 ddi_driver_name(px_p->px_dip), 2585 ddi_get_instance(px_p->px_dip)); 2586 #endif 2587 return (DDI_FAILURE); 2588 } 2589 2590 VERIFY(add_ivintr(pxu_p->hp_sysino, PCIE_INTR_PRI, 2591 (intrfunc)px_hp_intr, (caddr_t)px_p, NULL, NULL) == 0); 2592 2593 px_ib_intr_enable(px_p, intr_dist_cpuid(), 2594 px_p->px_inos[PX_INTR_HOTPLUG]); 2595 } 2596 2597 return (ret); 2598 } 2599 2600 void 2601 px_lib_hotplug_uninit(dev_info_t *dip) 2602 { 2603 if (hvio_hotplug_uninit(dip) == DDI_SUCCESS) { 2604 px_t *px_p = DIP_TO_STATE(dip); 2605 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 2606 2607 px_ib_intr_disable(px_p->px_ib_p, 2608 px_p->px_inos[PX_INTR_HOTPLUG], IB_INTR_WAIT); 2609 2610 VERIFY(rem_ivintr(pxu_p->hp_sysino, PCIE_INTR_PRI) == 0); 2611 } 2612 } 2613 2614 /* 2615 * px_hp_intr_redist() - sun4u only, HP interrupt redistribution 2616 */ 2617 void 2618 px_hp_intr_redist(px_t *px_p) 2619 { 2620 pcie_bus_t *bus_p = PCIE_DIP2BUS(px_p->px_dip); 2621 2622 if (px_p && PCIE_IS_PCIE_HOTPLUG_ENABLED(bus_p)) { 2623 px_ib_intr_dist_en(px_p->px_dip, intr_dist_cpuid(), 2624 px_p->px_inos[PX_INTR_HOTPLUG], B_FALSE); 2625 } 2626 } 2627 2628 boolean_t 2629 px_lib_is_in_drain_state(px_t *px_p) 2630 { 2631 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 2632 caddr_t csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR]; 2633 uint64_t drain_status; 2634 2635 if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) { 2636 drain_status = CSR_BR(csr_base, DRAIN_CONTROL_STATUS, DRAIN); 2637 } else { 2638 drain_status = CSR_BR(csr_base, TLU_STATUS, DRAIN); 2639 } 2640 2641 return (drain_status); 2642 } 2643 2644 pcie_req_id_t 2645 px_lib_get_bdf(px_t *px_p) 2646 { 2647 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 2648 caddr_t csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR]; 2649 pcie_req_id_t bdf; 2650 2651 bdf = CSR_BR(csr_base, DMC_PCI_EXPRESS_CONFIGURATION, REQ_ID); 2652 2653 return (bdf); 2654 } 2655 2656 /*ARGSUSED*/ 2657 int 2658 px_lib_get_root_complex_mps(px_t *px_p, dev_info_t *dip, int *mps) 2659 { 2660 pxu_t *pxu_p; 2661 caddr_t csr_base; 2662 2663 pxu_p = (pxu_t *)px_p->px_plat_p; 2664 2665 if (pxu_p == NULL) 2666 return (DDI_FAILURE); 2667 2668 csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR]; 2669 2670 2671 *mps = CSR_XR(csr_base, TLU_DEVICE_CAPABILITIES) & 2672 TLU_DEVICE_CAPABILITIES_MPS_MASK; 2673 2674 return (DDI_SUCCESS); 2675 } 2676 2677 /*ARGSUSED*/ 2678 int 2679 px_lib_set_root_complex_mps(px_t *px_p, dev_info_t *dip, int mps) 2680 { 2681 pxu_t *pxu_p; 2682 caddr_t csr_base; 2683 uint64_t dev_ctrl; 2684 int link_width, val; 2685 px_chip_type_t chip_type = px_identity_init(px_p); 2686 2687 pxu_p = (pxu_t *)px_p->px_plat_p; 2688 2689 if (pxu_p == NULL) 2690 return (DDI_FAILURE); 2691 2692 csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR]; 2693 2694 dev_ctrl = CSR_XR(csr_base, TLU_DEVICE_CONTROL); 2695 dev_ctrl |= (mps << TLU_DEVICE_CONTROL_MPS); 2696 2697 CSR_XS(csr_base, TLU_DEVICE_CONTROL, dev_ctrl); 2698 2699 link_width = CSR_FR(csr_base, TLU_LINK_STATUS, WIDTH); 2700 2701 /* 2702 * Convert link_width to match timer array configuration. 2703 */ 2704 switch (link_width) { 2705 case 1: 2706 link_width = 0; 2707 break; 2708 case 4: 2709 link_width = 1; 2710 break; 2711 case 8: 2712 link_width = 2; 2713 break; 2714 case 16: 2715 link_width = 3; 2716 break; 2717 default: 2718 link_width = 0; 2719 } 2720 2721 val = px_replay_timer_table[mps][link_width]; 2722 CSR_XS(csr_base, LPU_TXLINK_REPLAY_TIMER_THRESHOLD, val); 2723 2724 if (chip_type == PX_CHIP_OBERON) 2725 return (DDI_SUCCESS); 2726 2727 val = px_acknak_timer_table[mps][link_width]; 2728 CSR_XS(csr_base, LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD, val); 2729 2730 return (DDI_SUCCESS); 2731 } 2732