1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <sys/types.h> 27 #include <sys/kmem.h> 28 #include <sys/conf.h> 29 #include <sys/ddi.h> 30 #include <sys/sunddi.h> 31 #include <sys/sunndi.h> 32 #include <sys/fm/protocol.h> 33 #include <sys/fm/util.h> 34 #include <sys/modctl.h> 35 #include <sys/disp.h> 36 #include <sys/stat.h> 37 #include <sys/ddi_impldefs.h> 38 #include <sys/vmem.h> 39 #include <sys/iommutsb.h> 40 #include <sys/cpuvar.h> 41 #include <sys/ivintr.h> 42 #include <sys/byteorder.h> 43 #include <sys/hotplug/pci/pciehpc.h> 44 #include <sys/spl.h> 45 #include <px_obj.h> 46 #include <pcie_pwr.h> 47 #include "px_tools_var.h" 48 #include <px_regs.h> 49 #include <px_csr.h> 50 #include <sys/machsystm.h> 51 #include "px_lib4u.h" 52 #include "px_err.h" 53 #include "oberon_regs.h" 54 55 #pragma weak jbus_stst_order 56 57 extern void jbus_stst_order(); 58 59 ulong_t px_mmu_dvma_end = 0xfffffffful; 60 uint_t px_ranges_phi_mask = 0xfffffffful; 61 uint64_t *px_oberon_ubc_scratch_regs; 62 uint64_t px_paddr_mask; 63 64 static int px_goto_l23ready(px_t *px_p); 65 static int px_goto_l0(px_t *px_p); 66 static int px_pre_pwron_check(px_t *px_p); 67 static uint32_t px_identity_init(px_t *px_p); 68 static boolean_t px_cpr_callb(void *arg, int code); 69 static uint_t px_cb_intr(caddr_t arg); 70 71 /* 72 * px_lib_map_registers 73 * 74 * This function is called from the attach routine to map the registers 75 * accessed by this driver. 76 * 77 * used by: px_attach() 78 * 79 * return value: DDI_FAILURE on failure 80 */ 81 int 82 px_lib_map_regs(pxu_t *pxu_p, dev_info_t *dip) 83 { 84 ddi_device_acc_attr_t attr; 85 px_reg_bank_t reg_bank = PX_REG_CSR; 86 87 DBG(DBG_ATTACH, dip, "px_lib_map_regs: pxu_p:0x%p, dip 0x%p\n", 88 pxu_p, dip); 89 90 attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 91 attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 92 attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC; 93 94 /* 95 * PCI CSR Base 96 */ 97 if (ddi_regs_map_setup(dip, reg_bank, &pxu_p->px_address[reg_bank], 98 0, 0, &attr, &pxu_p->px_ac[reg_bank]) != DDI_SUCCESS) { 99 goto fail; 100 } 101 102 reg_bank++; 103 104 /* 105 * XBUS CSR Base 106 */ 107 if (ddi_regs_map_setup(dip, reg_bank, &pxu_p->px_address[reg_bank], 108 0, 0, &attr, &pxu_p->px_ac[reg_bank]) != DDI_SUCCESS) { 109 goto fail; 110 } 111 112 pxu_p->px_address[reg_bank] -= FIRE_CONTROL_STATUS; 113 114 done: 115 for (; reg_bank >= PX_REG_CSR; reg_bank--) { 116 DBG(DBG_ATTACH, dip, "reg_bank 0x%x address 0x%p\n", 117 reg_bank, pxu_p->px_address[reg_bank]); 118 } 119 120 return (DDI_SUCCESS); 121 122 fail: 123 cmn_err(CE_WARN, "%s%d: unable to map reg entry %d\n", 124 ddi_driver_name(dip), ddi_get_instance(dip), reg_bank); 125 126 for (reg_bank--; reg_bank >= PX_REG_CSR; reg_bank--) { 127 pxu_p->px_address[reg_bank] = NULL; 128 ddi_regs_map_free(&pxu_p->px_ac[reg_bank]); 129 } 130 131 return (DDI_FAILURE); 132 } 133 134 /* 135 * px_lib_unmap_regs: 136 * 137 * This routine unmaps the registers mapped by map_px_registers. 138 * 139 * used by: px_detach(), and error conditions in px_attach() 140 * 141 * return value: none 142 */ 143 void 144 px_lib_unmap_regs(pxu_t *pxu_p) 145 { 146 int i; 147 148 for (i = 0; i < PX_REG_MAX; i++) { 149 if (pxu_p->px_ac[i]) 150 ddi_regs_map_free(&pxu_p->px_ac[i]); 151 } 152 } 153 154 int 155 px_lib_dev_init(dev_info_t *dip, devhandle_t *dev_hdl) 156 { 157 158 caddr_t xbc_csr_base, csr_base; 159 px_dvma_range_prop_t px_dvma_range; 160 pxu_t *pxu_p; 161 uint8_t chip_mask; 162 px_t *px_p = DIP_TO_STATE(dip); 163 px_chip_type_t chip_type = px_identity_init(px_p); 164 165 DBG(DBG_ATTACH, dip, "px_lib_dev_init: dip 0x%p", dip); 166 167 if (chip_type == PX_CHIP_UNIDENTIFIED) { 168 cmn_err(CE_WARN, "%s%d: Unrecognized Hardware Version\n", 169 NAMEINST(dip)); 170 return (DDI_FAILURE); 171 } 172 173 chip_mask = BITMASK(chip_type); 174 px_paddr_mask = (chip_type == PX_CHIP_FIRE) ? MMU_FIRE_PADDR_MASK : 175 MMU_OBERON_PADDR_MASK; 176 177 /* 178 * Allocate platform specific structure and link it to 179 * the px state structure. 180 */ 181 pxu_p = kmem_zalloc(sizeof (pxu_t), KM_SLEEP); 182 pxu_p->chip_type = chip_type; 183 pxu_p->portid = ddi_getprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 184 "portid", -1); 185 186 /* Map in the registers */ 187 if (px_lib_map_regs(pxu_p, dip) == DDI_FAILURE) { 188 kmem_free(pxu_p, sizeof (pxu_t)); 189 190 return (DDI_FAILURE); 191 } 192 193 xbc_csr_base = (caddr_t)pxu_p->px_address[PX_REG_XBC]; 194 csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR]; 195 196 pxu_p->tsb_cookie = iommu_tsb_alloc(pxu_p->portid); 197 pxu_p->tsb_size = iommu_tsb_cookie_to_size(pxu_p->tsb_cookie); 198 pxu_p->tsb_vaddr = iommu_tsb_cookie_to_va(pxu_p->tsb_cookie); 199 200 pxu_p->tsb_paddr = va_to_pa(pxu_p->tsb_vaddr); 201 202 /* 203 * Create "virtual-dma" property to support child devices 204 * needing to know DVMA range. 205 */ 206 px_dvma_range.dvma_base = (uint32_t)px_mmu_dvma_end + 1 207 - ((pxu_p->tsb_size >> 3) << MMU_PAGE_SHIFT); 208 px_dvma_range.dvma_len = (uint32_t) 209 px_mmu_dvma_end - px_dvma_range.dvma_base + 1; 210 211 (void) ddi_prop_update_int_array(DDI_DEV_T_NONE, dip, 212 "virtual-dma", (int *)&px_dvma_range, 213 sizeof (px_dvma_range_prop_t) / sizeof (int)); 214 /* 215 * Initilize all fire hardware specific blocks. 216 */ 217 hvio_cb_init(xbc_csr_base, pxu_p); 218 hvio_ib_init(csr_base, pxu_p); 219 hvio_pec_init(csr_base, pxu_p); 220 hvio_mmu_init(csr_base, pxu_p); 221 222 px_p->px_plat_p = (void *)pxu_p; 223 224 /* 225 * Initialize all the interrupt handlers 226 */ 227 switch (PX_CHIP_TYPE(pxu_p)) { 228 case PX_CHIP_OBERON: 229 /* 230 * Oberon hotplug uses SPARE3 field in ILU Error Log Enable 231 * register to indicate the status of leaf reset, 232 * we need to preserve the value of this bit, and keep it in 233 * px_ilu_log_mask to reflect the state of the bit 234 */ 235 if (CSR_BR(csr_base, ILU_ERROR_LOG_ENABLE, SPARE3)) 236 px_ilu_log_mask |= (1ull << 237 ILU_ERROR_LOG_ENABLE_SPARE3); 238 else 239 px_ilu_log_mask &= ~(1ull << 240 ILU_ERROR_LOG_ENABLE_SPARE3); 241 242 px_err_reg_setup_pcie(chip_mask, csr_base, PX_ERR_ENABLE); 243 break; 244 245 case PX_CHIP_FIRE: 246 px_err_reg_setup_pcie(chip_mask, csr_base, PX_ERR_ENABLE); 247 break; 248 249 default: 250 cmn_err(CE_WARN, "%s%d: PX primary bus Unknown\n", 251 ddi_driver_name(dip), ddi_get_instance(dip)); 252 return (DDI_FAILURE); 253 } 254 255 /* Initilize device handle */ 256 *dev_hdl = (devhandle_t)csr_base; 257 258 DBG(DBG_ATTACH, dip, "px_lib_dev_init: dev_hdl 0x%llx\n", *dev_hdl); 259 260 return (DDI_SUCCESS); 261 } 262 263 int 264 px_lib_dev_fini(dev_info_t *dip) 265 { 266 caddr_t csr_base; 267 uint8_t chip_mask; 268 px_t *px_p = DIP_TO_STATE(dip); 269 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 270 271 DBG(DBG_DETACH, dip, "px_lib_dev_fini: dip 0x%p\n", dip); 272 273 /* 274 * Deinitialize all the interrupt handlers 275 */ 276 switch (PX_CHIP_TYPE(pxu_p)) { 277 case PX_CHIP_OBERON: 278 case PX_CHIP_FIRE: 279 chip_mask = BITMASK(PX_CHIP_TYPE(pxu_p)); 280 csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR]; 281 px_err_reg_setup_pcie(chip_mask, csr_base, PX_ERR_DISABLE); 282 break; 283 284 default: 285 cmn_err(CE_WARN, "%s%d: PX primary bus Unknown\n", 286 ddi_driver_name(dip), ddi_get_instance(dip)); 287 return (DDI_FAILURE); 288 } 289 290 iommu_tsb_free(pxu_p->tsb_cookie); 291 292 px_lib_unmap_regs((pxu_t *)px_p->px_plat_p); 293 kmem_free(px_p->px_plat_p, sizeof (pxu_t)); 294 px_p->px_plat_p = NULL; 295 (void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "virtual-dma"); 296 297 return (DDI_SUCCESS); 298 } 299 300 /*ARGSUSED*/ 301 int 302 px_lib_intr_devino_to_sysino(dev_info_t *dip, devino_t devino, 303 sysino_t *sysino) 304 { 305 px_t *px_p = DIP_TO_STATE(dip); 306 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 307 uint64_t ret; 308 309 DBG(DBG_LIB_INT, dip, "px_lib_intr_devino_to_sysino: dip 0x%p " 310 "devino 0x%x\n", dip, devino); 311 312 if ((ret = hvio_intr_devino_to_sysino(DIP_TO_HANDLE(dip), 313 pxu_p, devino, sysino)) != H_EOK) { 314 DBG(DBG_LIB_INT, dip, 315 "hvio_intr_devino_to_sysino failed, ret 0x%lx\n", ret); 316 return (DDI_FAILURE); 317 } 318 319 DBG(DBG_LIB_INT, dip, "px_lib_intr_devino_to_sysino: sysino 0x%llx\n", 320 *sysino); 321 322 return (DDI_SUCCESS); 323 } 324 325 /*ARGSUSED*/ 326 int 327 px_lib_intr_getvalid(dev_info_t *dip, sysino_t sysino, 328 intr_valid_state_t *intr_valid_state) 329 { 330 uint64_t ret; 331 332 DBG(DBG_LIB_INT, dip, "px_lib_intr_getvalid: dip 0x%p sysino 0x%llx\n", 333 dip, sysino); 334 335 if ((ret = hvio_intr_getvalid(DIP_TO_HANDLE(dip), 336 sysino, intr_valid_state)) != H_EOK) { 337 DBG(DBG_LIB_INT, dip, "hvio_intr_getvalid failed, ret 0x%lx\n", 338 ret); 339 return (DDI_FAILURE); 340 } 341 342 DBG(DBG_LIB_INT, dip, "px_lib_intr_getvalid: intr_valid_state 0x%x\n", 343 *intr_valid_state); 344 345 return (DDI_SUCCESS); 346 } 347 348 /*ARGSUSED*/ 349 int 350 px_lib_intr_setvalid(dev_info_t *dip, sysino_t sysino, 351 intr_valid_state_t intr_valid_state) 352 { 353 uint64_t ret; 354 355 DBG(DBG_LIB_INT, dip, "px_lib_intr_setvalid: dip 0x%p sysino 0x%llx " 356 "intr_valid_state 0x%x\n", dip, sysino, intr_valid_state); 357 358 if ((ret = hvio_intr_setvalid(DIP_TO_HANDLE(dip), 359 sysino, intr_valid_state)) != H_EOK) { 360 DBG(DBG_LIB_INT, dip, "hvio_intr_setvalid failed, ret 0x%lx\n", 361 ret); 362 return (DDI_FAILURE); 363 } 364 365 return (DDI_SUCCESS); 366 } 367 368 /*ARGSUSED*/ 369 int 370 px_lib_intr_getstate(dev_info_t *dip, sysino_t sysino, 371 intr_state_t *intr_state) 372 { 373 uint64_t ret; 374 375 DBG(DBG_LIB_INT, dip, "px_lib_intr_getstate: dip 0x%p sysino 0x%llx\n", 376 dip, sysino); 377 378 if ((ret = hvio_intr_getstate(DIP_TO_HANDLE(dip), 379 sysino, intr_state)) != H_EOK) { 380 DBG(DBG_LIB_INT, dip, "hvio_intr_getstate failed, ret 0x%lx\n", 381 ret); 382 return (DDI_FAILURE); 383 } 384 385 DBG(DBG_LIB_INT, dip, "px_lib_intr_getstate: intr_state 0x%x\n", 386 *intr_state); 387 388 return (DDI_SUCCESS); 389 } 390 391 /*ARGSUSED*/ 392 int 393 px_lib_intr_setstate(dev_info_t *dip, sysino_t sysino, 394 intr_state_t intr_state) 395 { 396 uint64_t ret; 397 398 DBG(DBG_LIB_INT, dip, "px_lib_intr_setstate: dip 0x%p sysino 0x%llx " 399 "intr_state 0x%x\n", dip, sysino, intr_state); 400 401 if ((ret = hvio_intr_setstate(DIP_TO_HANDLE(dip), 402 sysino, intr_state)) != H_EOK) { 403 DBG(DBG_LIB_INT, dip, "hvio_intr_setstate failed, ret 0x%lx\n", 404 ret); 405 return (DDI_FAILURE); 406 } 407 408 return (DDI_SUCCESS); 409 } 410 411 /*ARGSUSED*/ 412 int 413 px_lib_intr_gettarget(dev_info_t *dip, sysino_t sysino, cpuid_t *cpuid) 414 { 415 px_t *px_p = DIP_TO_STATE(dip); 416 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 417 uint64_t ret; 418 419 DBG(DBG_LIB_INT, dip, "px_lib_intr_gettarget: dip 0x%p sysino 0x%llx\n", 420 dip, sysino); 421 422 if ((ret = hvio_intr_gettarget(DIP_TO_HANDLE(dip), pxu_p, 423 sysino, cpuid)) != H_EOK) { 424 DBG(DBG_LIB_INT, dip, "hvio_intr_gettarget failed, ret 0x%lx\n", 425 ret); 426 return (DDI_FAILURE); 427 } 428 429 DBG(DBG_LIB_INT, dip, "px_lib_intr_gettarget: cpuid 0x%x\n", cpuid); 430 431 return (DDI_SUCCESS); 432 } 433 434 /*ARGSUSED*/ 435 int 436 px_lib_intr_settarget(dev_info_t *dip, sysino_t sysino, cpuid_t cpuid) 437 { 438 px_t *px_p = DIP_TO_STATE(dip); 439 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 440 uint64_t ret; 441 442 DBG(DBG_LIB_INT, dip, "px_lib_intr_settarget: dip 0x%p sysino 0x%llx " 443 "cpuid 0x%x\n", dip, sysino, cpuid); 444 445 if ((ret = hvio_intr_settarget(DIP_TO_HANDLE(dip), pxu_p, 446 sysino, cpuid)) != H_EOK) { 447 DBG(DBG_LIB_INT, dip, "hvio_intr_settarget failed, ret 0x%lx\n", 448 ret); 449 return (DDI_FAILURE); 450 } 451 452 return (DDI_SUCCESS); 453 } 454 455 /*ARGSUSED*/ 456 int 457 px_lib_intr_reset(dev_info_t *dip) 458 { 459 devino_t ino; 460 sysino_t sysino; 461 462 DBG(DBG_LIB_INT, dip, "px_lib_intr_reset: dip 0x%p\n", dip); 463 464 /* Reset all Interrupts */ 465 for (ino = 0; ino < INTERRUPT_MAPPING_ENTRIES; ino++) { 466 if (px_lib_intr_devino_to_sysino(dip, ino, 467 &sysino) != DDI_SUCCESS) 468 return (BF_FATAL); 469 470 if (px_lib_intr_setstate(dip, sysino, 471 INTR_IDLE_STATE) != DDI_SUCCESS) 472 return (BF_FATAL); 473 } 474 475 return (BF_NONE); 476 } 477 478 /*ARGSUSED*/ 479 int 480 px_lib_iommu_map(dev_info_t *dip, tsbid_t tsbid, pages_t pages, 481 io_attributes_t attr, void *addr, size_t pfn_index, int flags) 482 { 483 px_t *px_p = DIP_TO_STATE(dip); 484 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 485 uint64_t ret; 486 487 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_map: dip 0x%p tsbid 0x%llx " 488 "pages 0x%x attr 0x%x addr 0x%p pfn_index 0x%llx flags 0x%x\n", 489 dip, tsbid, pages, attr, addr, pfn_index, flags); 490 491 if ((ret = hvio_iommu_map(px_p->px_dev_hdl, pxu_p, tsbid, pages, 492 attr, addr, pfn_index, flags)) != H_EOK) { 493 DBG(DBG_LIB_DMA, dip, 494 "px_lib_iommu_map failed, ret 0x%lx\n", ret); 495 return (DDI_FAILURE); 496 } 497 498 return (DDI_SUCCESS); 499 } 500 501 /*ARGSUSED*/ 502 int 503 px_lib_iommu_demap(dev_info_t *dip, tsbid_t tsbid, pages_t pages) 504 { 505 px_t *px_p = DIP_TO_STATE(dip); 506 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 507 uint64_t ret; 508 509 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_demap: dip 0x%p tsbid 0x%llx " 510 "pages 0x%x\n", dip, tsbid, pages); 511 512 if ((ret = hvio_iommu_demap(px_p->px_dev_hdl, pxu_p, tsbid, pages)) 513 != H_EOK) { 514 DBG(DBG_LIB_DMA, dip, 515 "px_lib_iommu_demap failed, ret 0x%lx\n", ret); 516 517 return (DDI_FAILURE); 518 } 519 520 return (DDI_SUCCESS); 521 } 522 523 /*ARGSUSED*/ 524 int 525 px_lib_iommu_getmap(dev_info_t *dip, tsbid_t tsbid, io_attributes_t *attr_p, 526 r_addr_t *r_addr_p) 527 { 528 px_t *px_p = DIP_TO_STATE(dip); 529 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 530 uint64_t ret; 531 532 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getmap: dip 0x%p tsbid 0x%llx\n", 533 dip, tsbid); 534 535 if ((ret = hvio_iommu_getmap(DIP_TO_HANDLE(dip), pxu_p, tsbid, 536 attr_p, r_addr_p)) != H_EOK) { 537 DBG(DBG_LIB_DMA, dip, 538 "hvio_iommu_getmap failed, ret 0x%lx\n", ret); 539 540 return ((ret == H_ENOMAP) ? DDI_DMA_NOMAPPING:DDI_FAILURE); 541 } 542 543 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getmap: attr 0x%x r_addr 0x%llx\n", 544 *attr_p, *r_addr_p); 545 546 return (DDI_SUCCESS); 547 } 548 549 550 /* 551 * Checks dma attributes against system bypass ranges 552 * The bypass range is determined by the hardware. Return them so the 553 * common code can do generic checking against them. 554 */ 555 /*ARGSUSED*/ 556 int 557 px_lib_dma_bypass_rngchk(dev_info_t *dip, ddi_dma_attr_t *attr_p, 558 uint64_t *lo_p, uint64_t *hi_p) 559 { 560 px_t *px_p = DIP_TO_STATE(dip); 561 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 562 563 *lo_p = hvio_get_bypass_base(pxu_p); 564 *hi_p = hvio_get_bypass_end(pxu_p); 565 566 return (DDI_SUCCESS); 567 } 568 569 570 /*ARGSUSED*/ 571 int 572 px_lib_iommu_getbypass(dev_info_t *dip, r_addr_t ra, io_attributes_t attr, 573 io_addr_t *io_addr_p) 574 { 575 uint64_t ret; 576 px_t *px_p = DIP_TO_STATE(dip); 577 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 578 579 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getbypass: dip 0x%p ra 0x%llx " 580 "attr 0x%x\n", dip, ra, attr); 581 582 if ((ret = hvio_iommu_getbypass(DIP_TO_HANDLE(dip), pxu_p, ra, 583 attr, io_addr_p)) != H_EOK) { 584 DBG(DBG_LIB_DMA, dip, 585 "hvio_iommu_getbypass failed, ret 0x%lx\n", ret); 586 return (DDI_FAILURE); 587 } 588 589 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getbypass: io_addr 0x%llx\n", 590 *io_addr_p); 591 592 return (DDI_SUCCESS); 593 } 594 595 /* 596 * bus dma sync entry point. 597 */ 598 /*ARGSUSED*/ 599 int 600 px_lib_dma_sync(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle, 601 off_t off, size_t len, uint_t cache_flags) 602 { 603 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 604 px_t *px_p = DIP_TO_STATE(dip); 605 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 606 607 DBG(DBG_LIB_DMA, dip, "px_lib_dma_sync: dip 0x%p rdip 0x%p " 608 "handle 0x%llx off 0x%x len 0x%x flags 0x%x\n", 609 dip, rdip, handle, off, len, cache_flags); 610 611 /* 612 * No flush needed for Oberon 613 */ 614 if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) 615 return (DDI_SUCCESS); 616 617 /* 618 * jbus_stst_order is found only in certain cpu modules. 619 * Just return success if not present. 620 */ 621 if (&jbus_stst_order == NULL) 622 return (DDI_SUCCESS); 623 624 if (!(mp->dmai_flags & PX_DMAI_FLAGS_INUSE)) { 625 cmn_err(CE_WARN, "%s%d: Unbound dma handle %p.", 626 ddi_driver_name(rdip), ddi_get_instance(rdip), (void *)mp); 627 628 return (DDI_FAILURE); 629 } 630 631 if (mp->dmai_flags & PX_DMAI_FLAGS_NOSYNC) 632 return (DDI_SUCCESS); 633 634 /* 635 * No flush needed when sending data from memory to device. 636 * Nothing to do to "sync" memory to what device would already see. 637 */ 638 if (!(mp->dmai_rflags & DDI_DMA_READ) || 639 ((cache_flags & PX_DMA_SYNC_DDI_FLAGS) == DDI_DMA_SYNC_FORDEV)) 640 return (DDI_SUCCESS); 641 642 /* 643 * Perform necessary cpu workaround to ensure jbus ordering. 644 * CPU's internal "invalidate FIFOs" are flushed. 645 */ 646 647 #if !defined(lint) 648 kpreempt_disable(); 649 #endif 650 jbus_stst_order(); 651 #if !defined(lint) 652 kpreempt_enable(); 653 #endif 654 return (DDI_SUCCESS); 655 } 656 657 /* 658 * MSIQ Functions: 659 */ 660 /*ARGSUSED*/ 661 int 662 px_lib_msiq_init(dev_info_t *dip) 663 { 664 px_t *px_p = DIP_TO_STATE(dip); 665 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 666 px_msiq_state_t *msiq_state_p = &px_p->px_ib_p->ib_msiq_state; 667 px_dvma_addr_t pg_index; 668 size_t q_sz = msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t); 669 size_t size; 670 int i, ret; 671 672 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_init: dip 0x%p\n", dip); 673 674 /* must aligned on q_sz (happens to be !!! page) boundary */ 675 ASSERT(q_sz == 8 * 1024); 676 677 /* 678 * Map the EQ memory into the Fire MMU (has to be 512KB aligned) 679 * and then initialize the base address register. 680 * 681 * Allocate entries from Fire IOMMU so that the resulting address 682 * is properly aligned. Calculate the index of the first allocated 683 * entry. Note: The size of the mapping is assumed to be a multiple 684 * of the page size. 685 */ 686 size = msiq_state_p->msiq_cnt * q_sz; 687 688 msiq_state_p->msiq_buf_p = kmem_zalloc(size, KM_SLEEP); 689 690 for (i = 0; i < msiq_state_p->msiq_cnt; i++) 691 msiq_state_p->msiq_p[i].msiq_base_p = (msiqhead_t *) 692 ((caddr_t)msiq_state_p->msiq_buf_p + (i * q_sz)); 693 694 pxu_p->msiq_mapped_p = vmem_xalloc(px_p->px_mmu_p->mmu_dvma_map, 695 size, (512 * 1024), 0, 0, NULL, NULL, VM_NOSLEEP | VM_BESTFIT); 696 697 if (pxu_p->msiq_mapped_p == NULL) 698 return (DDI_FAILURE); 699 700 pg_index = MMU_PAGE_INDEX(px_p->px_mmu_p, 701 MMU_BTOP((ulong_t)pxu_p->msiq_mapped_p)); 702 703 if ((ret = px_lib_iommu_map(px_p->px_dip, PCI_TSBID(0, pg_index), 704 MMU_BTOP(size), PCI_MAP_ATTR_WRITE, msiq_state_p->msiq_buf_p, 705 0, MMU_MAP_BUF)) != DDI_SUCCESS) { 706 DBG(DBG_LIB_MSIQ, dip, 707 "px_lib_msiq_init: px_lib_iommu_map failed, " 708 "ret 0x%lx\n", ret); 709 710 (void) px_lib_msiq_fini(dip); 711 return (DDI_FAILURE); 712 } 713 714 if ((ret = hvio_msiq_init(DIP_TO_HANDLE(dip), 715 pxu_p)) != H_EOK) { 716 DBG(DBG_LIB_MSIQ, dip, 717 "hvio_msiq_init failed, ret 0x%lx\n", ret); 718 719 (void) px_lib_msiq_fini(dip); 720 return (DDI_FAILURE); 721 } 722 723 return (DDI_SUCCESS); 724 } 725 726 /*ARGSUSED*/ 727 int 728 px_lib_msiq_fini(dev_info_t *dip) 729 { 730 px_t *px_p = DIP_TO_STATE(dip); 731 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 732 px_msiq_state_t *msiq_state_p = &px_p->px_ib_p->ib_msiq_state; 733 px_dvma_addr_t pg_index; 734 size_t size; 735 736 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_fini: dip 0x%p\n", dip); 737 738 /* 739 * Unmap and free the EQ memory that had been mapped 740 * into the Fire IOMMU. 741 */ 742 size = msiq_state_p->msiq_cnt * 743 msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t); 744 745 pg_index = MMU_PAGE_INDEX(px_p->px_mmu_p, 746 MMU_BTOP((ulong_t)pxu_p->msiq_mapped_p)); 747 748 (void) px_lib_iommu_demap(px_p->px_dip, 749 PCI_TSBID(0, pg_index), MMU_BTOP(size)); 750 751 /* Free the entries from the Fire MMU */ 752 vmem_xfree(px_p->px_mmu_p->mmu_dvma_map, 753 (void *)pxu_p->msiq_mapped_p, size); 754 755 kmem_free(msiq_state_p->msiq_buf_p, msiq_state_p->msiq_cnt * 756 msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t)); 757 758 return (DDI_SUCCESS); 759 } 760 761 /*ARGSUSED*/ 762 int 763 px_lib_msiq_info(dev_info_t *dip, msiqid_t msiq_id, r_addr_t *ra_p, 764 uint_t *msiq_rec_cnt_p) 765 { 766 px_t *px_p = DIP_TO_STATE(dip); 767 px_msiq_state_t *msiq_state_p = &px_p->px_ib_p->ib_msiq_state; 768 size_t msiq_size; 769 770 DBG(DBG_LIB_MSIQ, dip, "px_msiq_info: dip 0x%p msiq_id 0x%x\n", 771 dip, msiq_id); 772 773 msiq_size = msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t); 774 ra_p = (r_addr_t *)((caddr_t)msiq_state_p->msiq_buf_p + 775 (msiq_id * msiq_size)); 776 777 *msiq_rec_cnt_p = msiq_state_p->msiq_rec_cnt; 778 779 DBG(DBG_LIB_MSIQ, dip, "px_msiq_info: ra_p 0x%p msiq_rec_cnt 0x%x\n", 780 ra_p, *msiq_rec_cnt_p); 781 782 return (DDI_SUCCESS); 783 } 784 785 /*ARGSUSED*/ 786 int 787 px_lib_msiq_getvalid(dev_info_t *dip, msiqid_t msiq_id, 788 pci_msiq_valid_state_t *msiq_valid_state) 789 { 790 uint64_t ret; 791 792 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getvalid: dip 0x%p msiq_id 0x%x\n", 793 dip, msiq_id); 794 795 if ((ret = hvio_msiq_getvalid(DIP_TO_HANDLE(dip), 796 msiq_id, msiq_valid_state)) != H_EOK) { 797 DBG(DBG_LIB_MSIQ, dip, 798 "hvio_msiq_getvalid failed, ret 0x%lx\n", ret); 799 return (DDI_FAILURE); 800 } 801 802 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getvalid: msiq_valid_state 0x%x\n", 803 *msiq_valid_state); 804 805 return (DDI_SUCCESS); 806 } 807 808 /*ARGSUSED*/ 809 int 810 px_lib_msiq_setvalid(dev_info_t *dip, msiqid_t msiq_id, 811 pci_msiq_valid_state_t msiq_valid_state) 812 { 813 uint64_t ret; 814 815 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_setvalid: dip 0x%p msiq_id 0x%x " 816 "msiq_valid_state 0x%x\n", dip, msiq_id, msiq_valid_state); 817 818 if ((ret = hvio_msiq_setvalid(DIP_TO_HANDLE(dip), 819 msiq_id, msiq_valid_state)) != H_EOK) { 820 DBG(DBG_LIB_MSIQ, dip, 821 "hvio_msiq_setvalid failed, ret 0x%lx\n", ret); 822 return (DDI_FAILURE); 823 } 824 825 return (DDI_SUCCESS); 826 } 827 828 /*ARGSUSED*/ 829 int 830 px_lib_msiq_getstate(dev_info_t *dip, msiqid_t msiq_id, 831 pci_msiq_state_t *msiq_state) 832 { 833 uint64_t ret; 834 835 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getstate: dip 0x%p msiq_id 0x%x\n", 836 dip, msiq_id); 837 838 if ((ret = hvio_msiq_getstate(DIP_TO_HANDLE(dip), 839 msiq_id, msiq_state)) != H_EOK) { 840 DBG(DBG_LIB_MSIQ, dip, 841 "hvio_msiq_getstate failed, ret 0x%lx\n", ret); 842 return (DDI_FAILURE); 843 } 844 845 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getstate: msiq_state 0x%x\n", 846 *msiq_state); 847 848 return (DDI_SUCCESS); 849 } 850 851 /*ARGSUSED*/ 852 int 853 px_lib_msiq_setstate(dev_info_t *dip, msiqid_t msiq_id, 854 pci_msiq_state_t msiq_state) 855 { 856 uint64_t ret; 857 858 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_setstate: dip 0x%p msiq_id 0x%x " 859 "msiq_state 0x%x\n", dip, msiq_id, msiq_state); 860 861 if ((ret = hvio_msiq_setstate(DIP_TO_HANDLE(dip), 862 msiq_id, msiq_state)) != H_EOK) { 863 DBG(DBG_LIB_MSIQ, dip, 864 "hvio_msiq_setstate failed, ret 0x%lx\n", ret); 865 return (DDI_FAILURE); 866 } 867 868 return (DDI_SUCCESS); 869 } 870 871 /*ARGSUSED*/ 872 int 873 px_lib_msiq_gethead(dev_info_t *dip, msiqid_t msiq_id, 874 msiqhead_t *msiq_head) 875 { 876 uint64_t ret; 877 878 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gethead: dip 0x%p msiq_id 0x%x\n", 879 dip, msiq_id); 880 881 if ((ret = hvio_msiq_gethead(DIP_TO_HANDLE(dip), 882 msiq_id, msiq_head)) != H_EOK) { 883 DBG(DBG_LIB_MSIQ, dip, 884 "hvio_msiq_gethead failed, ret 0x%lx\n", ret); 885 return (DDI_FAILURE); 886 } 887 888 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gethead: msiq_head 0x%x\n", 889 *msiq_head); 890 891 return (DDI_SUCCESS); 892 } 893 894 /*ARGSUSED*/ 895 int 896 px_lib_msiq_sethead(dev_info_t *dip, msiqid_t msiq_id, 897 msiqhead_t msiq_head) 898 { 899 uint64_t ret; 900 901 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_sethead: dip 0x%p msiq_id 0x%x " 902 "msiq_head 0x%x\n", dip, msiq_id, msiq_head); 903 904 if ((ret = hvio_msiq_sethead(DIP_TO_HANDLE(dip), 905 msiq_id, msiq_head)) != H_EOK) { 906 DBG(DBG_LIB_MSIQ, dip, 907 "hvio_msiq_sethead failed, ret 0x%lx\n", ret); 908 return (DDI_FAILURE); 909 } 910 911 return (DDI_SUCCESS); 912 } 913 914 /*ARGSUSED*/ 915 int 916 px_lib_msiq_gettail(dev_info_t *dip, msiqid_t msiq_id, 917 msiqtail_t *msiq_tail) 918 { 919 uint64_t ret; 920 921 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gettail: dip 0x%p msiq_id 0x%x\n", 922 dip, msiq_id); 923 924 if ((ret = hvio_msiq_gettail(DIP_TO_HANDLE(dip), 925 msiq_id, msiq_tail)) != H_EOK) { 926 DBG(DBG_LIB_MSIQ, dip, 927 "hvio_msiq_gettail failed, ret 0x%lx\n", ret); 928 return (DDI_FAILURE); 929 } 930 931 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gettail: msiq_tail 0x%x\n", 932 *msiq_tail); 933 934 return (DDI_SUCCESS); 935 } 936 937 /*ARGSUSED*/ 938 void 939 px_lib_get_msiq_rec(dev_info_t *dip, msiqhead_t *msiq_head_p, 940 msiq_rec_t *msiq_rec_p) 941 { 942 eq_rec_t *eq_rec_p = (eq_rec_t *)msiq_head_p; 943 944 DBG(DBG_LIB_MSIQ, dip, "px_lib_get_msiq_rec: dip 0x%p eq_rec_p 0x%p\n", 945 dip, eq_rec_p); 946 947 if (!eq_rec_p->eq_rec_fmt_type) { 948 /* Set msiq_rec_type to zero */ 949 msiq_rec_p->msiq_rec_type = 0; 950 951 return; 952 } 953 954 DBG(DBG_LIB_MSIQ, dip, "px_lib_get_msiq_rec: EQ RECORD, " 955 "eq_rec_rid 0x%llx eq_rec_fmt_type 0x%llx " 956 "eq_rec_len 0x%llx eq_rec_addr0 0x%llx " 957 "eq_rec_addr1 0x%llx eq_rec_data0 0x%llx " 958 "eq_rec_data1 0x%llx\n", eq_rec_p->eq_rec_rid, 959 eq_rec_p->eq_rec_fmt_type, eq_rec_p->eq_rec_len, 960 eq_rec_p->eq_rec_addr0, eq_rec_p->eq_rec_addr1, 961 eq_rec_p->eq_rec_data0, eq_rec_p->eq_rec_data1); 962 963 /* 964 * Only upper 4 bits of eq_rec_fmt_type is used 965 * to identify the EQ record type. 966 */ 967 switch (eq_rec_p->eq_rec_fmt_type >> 3) { 968 case EQ_REC_MSI32: 969 msiq_rec_p->msiq_rec_type = MSI32_REC; 970 971 msiq_rec_p->msiq_rec_data.msi.msi_data = 972 eq_rec_p->eq_rec_data0; 973 break; 974 case EQ_REC_MSI64: 975 msiq_rec_p->msiq_rec_type = MSI64_REC; 976 977 msiq_rec_p->msiq_rec_data.msi.msi_data = 978 eq_rec_p->eq_rec_data0; 979 break; 980 case EQ_REC_MSG: 981 msiq_rec_p->msiq_rec_type = MSG_REC; 982 983 msiq_rec_p->msiq_rec_data.msg.msg_route = 984 eq_rec_p->eq_rec_fmt_type & 7; 985 msiq_rec_p->msiq_rec_data.msg.msg_targ = eq_rec_p->eq_rec_rid; 986 msiq_rec_p->msiq_rec_data.msg.msg_code = eq_rec_p->eq_rec_data0; 987 break; 988 default: 989 cmn_err(CE_WARN, "%s%d: px_lib_get_msiq_rec: " 990 "0x%x is an unknown EQ record type", 991 ddi_driver_name(dip), ddi_get_instance(dip), 992 (int)eq_rec_p->eq_rec_fmt_type); 993 break; 994 } 995 996 msiq_rec_p->msiq_rec_rid = eq_rec_p->eq_rec_rid; 997 msiq_rec_p->msiq_rec_msi_addr = ((eq_rec_p->eq_rec_addr1 << 16) | 998 (eq_rec_p->eq_rec_addr0 << 2)); 999 } 1000 1001 /*ARGSUSED*/ 1002 void 1003 px_lib_clr_msiq_rec(dev_info_t *dip, msiqhead_t *msiq_head_p) 1004 { 1005 eq_rec_t *eq_rec_p = (eq_rec_t *)msiq_head_p; 1006 1007 DBG(DBG_LIB_MSIQ, dip, "px_lib_clr_msiq_rec: dip 0x%p eq_rec_p 0x%p\n", 1008 dip, eq_rec_p); 1009 1010 if (eq_rec_p->eq_rec_fmt_type) { 1011 /* Zero out eq_rec_fmt_type field */ 1012 eq_rec_p->eq_rec_fmt_type = 0; 1013 } 1014 } 1015 1016 /* 1017 * MSI Functions: 1018 */ 1019 /*ARGSUSED*/ 1020 int 1021 px_lib_msi_init(dev_info_t *dip) 1022 { 1023 px_t *px_p = DIP_TO_STATE(dip); 1024 px_msi_state_t *msi_state_p = &px_p->px_ib_p->ib_msi_state; 1025 uint64_t ret; 1026 1027 DBG(DBG_LIB_MSI, dip, "px_lib_msi_init: dip 0x%p\n", dip); 1028 1029 if ((ret = hvio_msi_init(DIP_TO_HANDLE(dip), 1030 msi_state_p->msi_addr32, msi_state_p->msi_addr64)) != H_EOK) { 1031 DBG(DBG_LIB_MSIQ, dip, "px_lib_msi_init failed, ret 0x%lx\n", 1032 ret); 1033 return (DDI_FAILURE); 1034 } 1035 1036 return (DDI_SUCCESS); 1037 } 1038 1039 /*ARGSUSED*/ 1040 int 1041 px_lib_msi_getmsiq(dev_info_t *dip, msinum_t msi_num, 1042 msiqid_t *msiq_id) 1043 { 1044 uint64_t ret; 1045 1046 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getmsiq: dip 0x%p msi_num 0x%x\n", 1047 dip, msi_num); 1048 1049 if ((ret = hvio_msi_getmsiq(DIP_TO_HANDLE(dip), 1050 msi_num, msiq_id)) != H_EOK) { 1051 DBG(DBG_LIB_MSI, dip, 1052 "hvio_msi_getmsiq failed, ret 0x%lx\n", ret); 1053 return (DDI_FAILURE); 1054 } 1055 1056 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getmsiq: msiq_id 0x%x\n", 1057 *msiq_id); 1058 1059 return (DDI_SUCCESS); 1060 } 1061 1062 /*ARGSUSED*/ 1063 int 1064 px_lib_msi_setmsiq(dev_info_t *dip, msinum_t msi_num, 1065 msiqid_t msiq_id, msi_type_t msitype) 1066 { 1067 uint64_t ret; 1068 1069 DBG(DBG_LIB_MSI, dip, "px_lib_msi_setmsiq: dip 0x%p msi_num 0x%x " 1070 "msq_id 0x%x\n", dip, msi_num, msiq_id); 1071 1072 if ((ret = hvio_msi_setmsiq(DIP_TO_HANDLE(dip), 1073 msi_num, msiq_id)) != H_EOK) { 1074 DBG(DBG_LIB_MSI, dip, 1075 "hvio_msi_setmsiq failed, ret 0x%lx\n", ret); 1076 return (DDI_FAILURE); 1077 } 1078 1079 return (DDI_SUCCESS); 1080 } 1081 1082 /*ARGSUSED*/ 1083 int 1084 px_lib_msi_getvalid(dev_info_t *dip, msinum_t msi_num, 1085 pci_msi_valid_state_t *msi_valid_state) 1086 { 1087 uint64_t ret; 1088 1089 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getvalid: dip 0x%p msi_num 0x%x\n", 1090 dip, msi_num); 1091 1092 if ((ret = hvio_msi_getvalid(DIP_TO_HANDLE(dip), 1093 msi_num, msi_valid_state)) != H_EOK) { 1094 DBG(DBG_LIB_MSI, dip, 1095 "hvio_msi_getvalid failed, ret 0x%lx\n", ret); 1096 return (DDI_FAILURE); 1097 } 1098 1099 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getvalid: msiq_id 0x%x\n", 1100 *msi_valid_state); 1101 1102 return (DDI_SUCCESS); 1103 } 1104 1105 /*ARGSUSED*/ 1106 int 1107 px_lib_msi_setvalid(dev_info_t *dip, msinum_t msi_num, 1108 pci_msi_valid_state_t msi_valid_state) 1109 { 1110 uint64_t ret; 1111 1112 DBG(DBG_LIB_MSI, dip, "px_lib_msi_setvalid: dip 0x%p msi_num 0x%x " 1113 "msi_valid_state 0x%x\n", dip, msi_num, msi_valid_state); 1114 1115 if ((ret = hvio_msi_setvalid(DIP_TO_HANDLE(dip), 1116 msi_num, msi_valid_state)) != H_EOK) { 1117 DBG(DBG_LIB_MSI, dip, 1118 "hvio_msi_setvalid failed, ret 0x%lx\n", ret); 1119 return (DDI_FAILURE); 1120 } 1121 1122 return (DDI_SUCCESS); 1123 } 1124 1125 /*ARGSUSED*/ 1126 int 1127 px_lib_msi_getstate(dev_info_t *dip, msinum_t msi_num, 1128 pci_msi_state_t *msi_state) 1129 { 1130 uint64_t ret; 1131 1132 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getstate: dip 0x%p msi_num 0x%x\n", 1133 dip, msi_num); 1134 1135 if ((ret = hvio_msi_getstate(DIP_TO_HANDLE(dip), 1136 msi_num, msi_state)) != H_EOK) { 1137 DBG(DBG_LIB_MSI, dip, 1138 "hvio_msi_getstate failed, ret 0x%lx\n", ret); 1139 return (DDI_FAILURE); 1140 } 1141 1142 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getstate: msi_state 0x%x\n", 1143 *msi_state); 1144 1145 return (DDI_SUCCESS); 1146 } 1147 1148 /*ARGSUSED*/ 1149 int 1150 px_lib_msi_setstate(dev_info_t *dip, msinum_t msi_num, 1151 pci_msi_state_t msi_state) 1152 { 1153 uint64_t ret; 1154 1155 DBG(DBG_LIB_MSI, dip, "px_lib_msi_setstate: dip 0x%p msi_num 0x%x " 1156 "msi_state 0x%x\n", dip, msi_num, msi_state); 1157 1158 if ((ret = hvio_msi_setstate(DIP_TO_HANDLE(dip), 1159 msi_num, msi_state)) != H_EOK) { 1160 DBG(DBG_LIB_MSI, dip, 1161 "hvio_msi_setstate failed, ret 0x%lx\n", ret); 1162 return (DDI_FAILURE); 1163 } 1164 1165 return (DDI_SUCCESS); 1166 } 1167 1168 /* 1169 * MSG Functions: 1170 */ 1171 /*ARGSUSED*/ 1172 int 1173 px_lib_msg_getmsiq(dev_info_t *dip, pcie_msg_type_t msg_type, 1174 msiqid_t *msiq_id) 1175 { 1176 uint64_t ret; 1177 1178 DBG(DBG_LIB_MSG, dip, "px_lib_msg_getmsiq: dip 0x%p msg_type 0x%x\n", 1179 dip, msg_type); 1180 1181 if ((ret = hvio_msg_getmsiq(DIP_TO_HANDLE(dip), 1182 msg_type, msiq_id)) != H_EOK) { 1183 DBG(DBG_LIB_MSG, dip, 1184 "hvio_msg_getmsiq failed, ret 0x%lx\n", ret); 1185 return (DDI_FAILURE); 1186 } 1187 1188 DBG(DBG_LIB_MSI, dip, "px_lib_msg_getmsiq: msiq_id 0x%x\n", 1189 *msiq_id); 1190 1191 return (DDI_SUCCESS); 1192 } 1193 1194 /*ARGSUSED*/ 1195 int 1196 px_lib_msg_setmsiq(dev_info_t *dip, pcie_msg_type_t msg_type, 1197 msiqid_t msiq_id) 1198 { 1199 uint64_t ret; 1200 1201 DBG(DBG_LIB_MSG, dip, "px_lib_msi_setstate: dip 0x%p msg_type 0x%x " 1202 "msiq_id 0x%x\n", dip, msg_type, msiq_id); 1203 1204 if ((ret = hvio_msg_setmsiq(DIP_TO_HANDLE(dip), 1205 msg_type, msiq_id)) != H_EOK) { 1206 DBG(DBG_LIB_MSG, dip, 1207 "hvio_msg_setmsiq failed, ret 0x%lx\n", ret); 1208 return (DDI_FAILURE); 1209 } 1210 1211 return (DDI_SUCCESS); 1212 } 1213 1214 /*ARGSUSED*/ 1215 int 1216 px_lib_msg_getvalid(dev_info_t *dip, pcie_msg_type_t msg_type, 1217 pcie_msg_valid_state_t *msg_valid_state) 1218 { 1219 uint64_t ret; 1220 1221 DBG(DBG_LIB_MSG, dip, "px_lib_msg_getvalid: dip 0x%p msg_type 0x%x\n", 1222 dip, msg_type); 1223 1224 if ((ret = hvio_msg_getvalid(DIP_TO_HANDLE(dip), msg_type, 1225 msg_valid_state)) != H_EOK) { 1226 DBG(DBG_LIB_MSG, dip, 1227 "hvio_msg_getvalid failed, ret 0x%lx\n", ret); 1228 return (DDI_FAILURE); 1229 } 1230 1231 DBG(DBG_LIB_MSI, dip, "px_lib_msg_getvalid: msg_valid_state 0x%x\n", 1232 *msg_valid_state); 1233 1234 return (DDI_SUCCESS); 1235 } 1236 1237 /*ARGSUSED*/ 1238 int 1239 px_lib_msg_setvalid(dev_info_t *dip, pcie_msg_type_t msg_type, 1240 pcie_msg_valid_state_t msg_valid_state) 1241 { 1242 uint64_t ret; 1243 1244 DBG(DBG_LIB_MSG, dip, "px_lib_msg_setvalid: dip 0x%p msg_type 0x%x " 1245 "msg_valid_state 0x%x\n", dip, msg_type, msg_valid_state); 1246 1247 if ((ret = hvio_msg_setvalid(DIP_TO_HANDLE(dip), msg_type, 1248 msg_valid_state)) != H_EOK) { 1249 DBG(DBG_LIB_MSG, dip, 1250 "hvio_msg_setvalid failed, ret 0x%lx\n", ret); 1251 return (DDI_FAILURE); 1252 } 1253 1254 return (DDI_SUCCESS); 1255 } 1256 1257 /* 1258 * Suspend/Resume Functions: 1259 * Currently unsupported by hypervisor 1260 */ 1261 int 1262 px_lib_suspend(dev_info_t *dip) 1263 { 1264 px_t *px_p = DIP_TO_STATE(dip); 1265 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 1266 px_cb_t *cb_p = PX2CB(px_p); 1267 devhandle_t dev_hdl, xbus_dev_hdl; 1268 uint64_t ret = H_EOK; 1269 1270 DBG(DBG_DETACH, dip, "px_lib_suspend: dip 0x%p\n", dip); 1271 1272 dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_CSR]; 1273 xbus_dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_XBC]; 1274 1275 if ((ret = hvio_suspend(dev_hdl, pxu_p)) != H_EOK) 1276 goto fail; 1277 1278 if (--cb_p->attachcnt == 0) { 1279 ret = hvio_cb_suspend(xbus_dev_hdl, pxu_p); 1280 if (ret != H_EOK) 1281 cb_p->attachcnt++; 1282 } 1283 pxu_p->cpr_flag = PX_ENTERED_CPR; 1284 1285 fail: 1286 return ((ret != H_EOK) ? DDI_FAILURE: DDI_SUCCESS); 1287 } 1288 1289 void 1290 px_lib_resume(dev_info_t *dip) 1291 { 1292 px_t *px_p = DIP_TO_STATE(dip); 1293 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 1294 px_cb_t *cb_p = PX2CB(px_p); 1295 devhandle_t dev_hdl, xbus_dev_hdl; 1296 devino_t pec_ino = px_p->px_inos[PX_INTR_PEC]; 1297 devino_t xbc_ino = px_p->px_inos[PX_INTR_XBC]; 1298 1299 DBG(DBG_ATTACH, dip, "px_lib_resume: dip 0x%p\n", dip); 1300 1301 dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_CSR]; 1302 xbus_dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_XBC]; 1303 1304 if (++cb_p->attachcnt == 1) 1305 hvio_cb_resume(dev_hdl, xbus_dev_hdl, xbc_ino, pxu_p); 1306 1307 hvio_resume(dev_hdl, pec_ino, pxu_p); 1308 } 1309 1310 /* 1311 * Generate a unique Oberon UBC ID based on the Logicial System Board and 1312 * the IO Channel from the portid property field. 1313 */ 1314 static uint64_t 1315 oberon_get_ubc_id(dev_info_t *dip) 1316 { 1317 px_t *px_p = DIP_TO_STATE(dip); 1318 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 1319 uint64_t ubc_id; 1320 1321 /* 1322 * Generate a unique 6 bit UBC ID using the 2 IO_Channel#[1:0] bits and 1323 * the 4 LSB_ID[3:0] bits from the Oberon's portid property. 1324 */ 1325 ubc_id = (((pxu_p->portid >> OBERON_PORT_ID_IOC) & 1326 OBERON_PORT_ID_IOC_MASK) | (((pxu_p->portid >> 1327 OBERON_PORT_ID_LSB) & OBERON_PORT_ID_LSB_MASK) 1328 << OBERON_UBC_ID_LSB)); 1329 1330 return (ubc_id); 1331 } 1332 1333 /* 1334 * Oberon does not have a UBC scratch register, so alloc an array of scratch 1335 * registers when needed and use a unique UBC ID as an index. This code 1336 * can be simplified if we use a pre-allocated array. They are currently 1337 * being dynamically allocated because it's only needed by the Oberon. 1338 */ 1339 static void 1340 oberon_set_cb(dev_info_t *dip, uint64_t val) 1341 { 1342 uint64_t ubc_id; 1343 1344 if (px_oberon_ubc_scratch_regs == NULL) 1345 px_oberon_ubc_scratch_regs = 1346 (uint64_t *)kmem_zalloc(sizeof (uint64_t)* 1347 OBERON_UBC_ID_MAX, KM_SLEEP); 1348 1349 ubc_id = oberon_get_ubc_id(dip); 1350 1351 px_oberon_ubc_scratch_regs[ubc_id] = val; 1352 1353 /* 1354 * Check if any scratch registers are still in use. If all scratch 1355 * registers are currently set to zero, then deallocate the scratch 1356 * register array. 1357 */ 1358 for (ubc_id = 0; ubc_id < OBERON_UBC_ID_MAX; ubc_id++) { 1359 if (px_oberon_ubc_scratch_regs[ubc_id] != NULL) 1360 return; 1361 } 1362 1363 /* 1364 * All scratch registers are set to zero so deallocate the scratch 1365 * register array and set the pointer to NULL. 1366 */ 1367 kmem_free(px_oberon_ubc_scratch_regs, 1368 (sizeof (uint64_t)*OBERON_UBC_ID_MAX)); 1369 1370 px_oberon_ubc_scratch_regs = NULL; 1371 } 1372 1373 /* 1374 * Oberon does not have a UBC scratch register, so use an allocated array of 1375 * scratch registers and use the unique UBC ID as an index into that array. 1376 */ 1377 static uint64_t 1378 oberon_get_cb(dev_info_t *dip) 1379 { 1380 uint64_t ubc_id; 1381 1382 if (px_oberon_ubc_scratch_regs == NULL) 1383 return (0); 1384 1385 ubc_id = oberon_get_ubc_id(dip); 1386 1387 return (px_oberon_ubc_scratch_regs[ubc_id]); 1388 } 1389 1390 /* 1391 * Misc Functions: 1392 * Currently unsupported by hypervisor 1393 */ 1394 static uint64_t 1395 px_get_cb(dev_info_t *dip) 1396 { 1397 px_t *px_p = DIP_TO_STATE(dip); 1398 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 1399 1400 /* 1401 * Oberon does not currently have Scratchpad registers. 1402 */ 1403 if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) 1404 return (oberon_get_cb(dip)); 1405 1406 return (CSR_XR((caddr_t)pxu_p->px_address[PX_REG_XBC], JBUS_SCRATCH_1)); 1407 } 1408 1409 static void 1410 px_set_cb(dev_info_t *dip, uint64_t val) 1411 { 1412 px_t *px_p = DIP_TO_STATE(dip); 1413 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 1414 1415 /* 1416 * Oberon does not currently have Scratchpad registers. 1417 */ 1418 if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) { 1419 oberon_set_cb(dip, val); 1420 return; 1421 } 1422 1423 CSR_XS((caddr_t)pxu_p->px_address[PX_REG_XBC], JBUS_SCRATCH_1, val); 1424 } 1425 1426 /*ARGSUSED*/ 1427 int 1428 px_lib_map_vconfig(dev_info_t *dip, 1429 ddi_map_req_t *mp, pci_config_offset_t off, 1430 pci_regspec_t *rp, caddr_t *addrp) 1431 { 1432 /* 1433 * No special config space access services in this layer. 1434 */ 1435 return (DDI_FAILURE); 1436 } 1437 1438 void 1439 px_lib_map_attr_check(ddi_map_req_t *mp) 1440 { 1441 ddi_acc_hdl_t *hp = mp->map_handlep; 1442 1443 /* fire does not accept byte masks from PIO store merge */ 1444 if (hp->ah_acc.devacc_attr_dataorder == DDI_STORECACHING_OK_ACC) 1445 hp->ah_acc.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 1446 } 1447 1448 /* This function is called only by poke, caut put and pxtool poke. */ 1449 void 1450 px_lib_clr_errs(px_t *px_p, dev_info_t *rdip, uint64_t addr) 1451 { 1452 px_pec_t *pec_p = px_p->px_pec_p; 1453 dev_info_t *rpdip = px_p->px_dip; 1454 int rc_err, fab_err, i; 1455 int acctype = pec_p->pec_safeacc_type; 1456 ddi_fm_error_t derr; 1457 px_ranges_t *ranges_p; 1458 int range_len; 1459 uint32_t addr_high, addr_low; 1460 pcie_req_id_t bdf = 0; 1461 1462 /* Create the derr */ 1463 bzero(&derr, sizeof (ddi_fm_error_t)); 1464 derr.fme_version = DDI_FME_VERSION; 1465 derr.fme_ena = fm_ena_generate(0, FM_ENA_FMT1); 1466 derr.fme_flag = acctype; 1467 1468 if (acctype == DDI_FM_ERR_EXPECTED) { 1469 derr.fme_status = DDI_FM_NONFATAL; 1470 ndi_fm_acc_err_set(pec_p->pec_acc_hdl, &derr); 1471 } 1472 1473 if (px_fm_enter(px_p) != DDI_SUCCESS) 1474 return; 1475 1476 /* send ereport/handle/clear fire registers */ 1477 rc_err = px_err_cmn_intr(px_p, &derr, PX_LIB_CALL, PX_FM_BLOCK_ALL); 1478 1479 /* Figure out if this is a cfg or mem32 access */ 1480 addr_high = (uint32_t)(addr >> 32); 1481 addr_low = (uint32_t)addr; 1482 range_len = px_p->px_ranges_length / sizeof (px_ranges_t); 1483 i = 0; 1484 for (ranges_p = px_p->px_ranges_p; i < range_len; i++, ranges_p++) { 1485 if (ranges_p->parent_high == addr_high) { 1486 switch (ranges_p->child_high & PCI_ADDR_MASK) { 1487 case PCI_ADDR_CONFIG: 1488 bdf = (pcie_req_id_t)(addr_low >> 12); 1489 addr_low = 0; 1490 break; 1491 case PCI_ADDR_MEM32: 1492 if (rdip) 1493 bdf = PCI_GET_BDF(rdip); 1494 else 1495 bdf = NULL; 1496 break; 1497 } 1498 break; 1499 } 1500 } 1501 1502 px_rp_en_q(px_p, bdf, addr_low, NULL); 1503 1504 /* 1505 * XXX - Current code scans the fabric for all px_tool accesses. 1506 * In future, do not scan fabric for px_tool access to IO Root Nexus 1507 */ 1508 fab_err = px_scan_fabric(px_p, rpdip, &derr); 1509 1510 px_err_panic(rc_err, PX_RC, fab_err, B_TRUE); 1511 px_fm_exit(px_p); 1512 px_err_panic(rc_err, PX_RC, fab_err, B_FALSE); 1513 } 1514 1515 #ifdef DEBUG 1516 int px_peekfault_cnt = 0; 1517 int px_pokefault_cnt = 0; 1518 #endif /* DEBUG */ 1519 1520 /*ARGSUSED*/ 1521 static int 1522 px_lib_do_poke(dev_info_t *dip, dev_info_t *rdip, 1523 peekpoke_ctlops_t *in_args) 1524 { 1525 px_t *px_p = DIP_TO_STATE(dip); 1526 px_pec_t *pec_p = px_p->px_pec_p; 1527 int err = DDI_SUCCESS; 1528 on_trap_data_t otd; 1529 1530 mutex_enter(&pec_p->pec_pokefault_mutex); 1531 pec_p->pec_ontrap_data = &otd; 1532 pec_p->pec_safeacc_type = DDI_FM_ERR_POKE; 1533 1534 /* Set up protected environment. */ 1535 if (!on_trap(&otd, OT_DATA_ACCESS)) { 1536 uintptr_t tramp = otd.ot_trampoline; 1537 1538 otd.ot_trampoline = (uintptr_t)&poke_fault; 1539 err = do_poke(in_args->size, (void *)in_args->dev_addr, 1540 (void *)in_args->host_addr); 1541 otd.ot_trampoline = tramp; 1542 } else 1543 err = DDI_FAILURE; 1544 1545 px_lib_clr_errs(px_p, rdip, in_args->dev_addr); 1546 1547 if (otd.ot_trap & OT_DATA_ACCESS) 1548 err = DDI_FAILURE; 1549 1550 /* Take down protected environment. */ 1551 no_trap(); 1552 1553 pec_p->pec_ontrap_data = NULL; 1554 pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED; 1555 mutex_exit(&pec_p->pec_pokefault_mutex); 1556 1557 #ifdef DEBUG 1558 if (err == DDI_FAILURE) 1559 px_pokefault_cnt++; 1560 #endif 1561 return (err); 1562 } 1563 1564 /*ARGSUSED*/ 1565 static int 1566 px_lib_do_caut_put(dev_info_t *dip, dev_info_t *rdip, 1567 peekpoke_ctlops_t *cautacc_ctlops_arg) 1568 { 1569 size_t size = cautacc_ctlops_arg->size; 1570 uintptr_t dev_addr = cautacc_ctlops_arg->dev_addr; 1571 uintptr_t host_addr = cautacc_ctlops_arg->host_addr; 1572 ddi_acc_impl_t *hp = (ddi_acc_impl_t *)cautacc_ctlops_arg->handle; 1573 size_t repcount = cautacc_ctlops_arg->repcount; 1574 uint_t flags = cautacc_ctlops_arg->flags; 1575 1576 px_t *px_p = DIP_TO_STATE(dip); 1577 px_pec_t *pec_p = px_p->px_pec_p; 1578 int err = DDI_SUCCESS; 1579 1580 /* 1581 * Note that i_ndi_busop_access_enter ends up grabbing the pokefault 1582 * mutex. 1583 */ 1584 i_ndi_busop_access_enter(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp); 1585 1586 pec_p->pec_ontrap_data = (on_trap_data_t *)hp->ahi_err->err_ontrap; 1587 pec_p->pec_safeacc_type = DDI_FM_ERR_EXPECTED; 1588 hp->ahi_err->err_expected = DDI_FM_ERR_EXPECTED; 1589 1590 if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) { 1591 for (; repcount; repcount--) { 1592 switch (size) { 1593 1594 case sizeof (uint8_t): 1595 i_ddi_put8(hp, (uint8_t *)dev_addr, 1596 *(uint8_t *)host_addr); 1597 break; 1598 1599 case sizeof (uint16_t): 1600 i_ddi_put16(hp, (uint16_t *)dev_addr, 1601 *(uint16_t *)host_addr); 1602 break; 1603 1604 case sizeof (uint32_t): 1605 i_ddi_put32(hp, (uint32_t *)dev_addr, 1606 *(uint32_t *)host_addr); 1607 break; 1608 1609 case sizeof (uint64_t): 1610 i_ddi_put64(hp, (uint64_t *)dev_addr, 1611 *(uint64_t *)host_addr); 1612 break; 1613 } 1614 1615 host_addr += size; 1616 1617 if (flags == DDI_DEV_AUTOINCR) 1618 dev_addr += size; 1619 1620 px_lib_clr_errs(px_p, rdip, dev_addr); 1621 1622 if (pec_p->pec_ontrap_data->ot_trap & OT_DATA_ACCESS) { 1623 err = DDI_FAILURE; 1624 #ifdef DEBUG 1625 px_pokefault_cnt++; 1626 #endif 1627 break; 1628 } 1629 } 1630 } 1631 1632 i_ddi_notrap((ddi_acc_handle_t)hp); 1633 pec_p->pec_ontrap_data = NULL; 1634 pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED; 1635 i_ndi_busop_access_exit(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp); 1636 hp->ahi_err->err_expected = DDI_FM_ERR_UNEXPECTED; 1637 1638 return (err); 1639 } 1640 1641 1642 int 1643 px_lib_ctlops_poke(dev_info_t *dip, dev_info_t *rdip, 1644 peekpoke_ctlops_t *in_args) 1645 { 1646 return (in_args->handle ? px_lib_do_caut_put(dip, rdip, in_args) : 1647 px_lib_do_poke(dip, rdip, in_args)); 1648 } 1649 1650 1651 /*ARGSUSED*/ 1652 static int 1653 px_lib_do_peek(dev_info_t *dip, peekpoke_ctlops_t *in_args) 1654 { 1655 px_t *px_p = DIP_TO_STATE(dip); 1656 px_pec_t *pec_p = px_p->px_pec_p; 1657 int err = DDI_SUCCESS; 1658 on_trap_data_t otd; 1659 1660 mutex_enter(&pec_p->pec_pokefault_mutex); 1661 if (px_fm_enter(px_p) != DDI_SUCCESS) 1662 return (DDI_FAILURE); 1663 pec_p->pec_safeacc_type = DDI_FM_ERR_PEEK; 1664 px_fm_exit(px_p); 1665 1666 if (!on_trap(&otd, OT_DATA_ACCESS)) { 1667 uintptr_t tramp = otd.ot_trampoline; 1668 1669 otd.ot_trampoline = (uintptr_t)&peek_fault; 1670 err = do_peek(in_args->size, (void *)in_args->dev_addr, 1671 (void *)in_args->host_addr); 1672 otd.ot_trampoline = tramp; 1673 } else 1674 err = DDI_FAILURE; 1675 1676 no_trap(); 1677 pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED; 1678 mutex_exit(&pec_p->pec_pokefault_mutex); 1679 1680 #ifdef DEBUG 1681 if (err == DDI_FAILURE) 1682 px_peekfault_cnt++; 1683 #endif 1684 return (err); 1685 } 1686 1687 1688 static int 1689 px_lib_do_caut_get(dev_info_t *dip, peekpoke_ctlops_t *cautacc_ctlops_arg) 1690 { 1691 size_t size = cautacc_ctlops_arg->size; 1692 uintptr_t dev_addr = cautacc_ctlops_arg->dev_addr; 1693 uintptr_t host_addr = cautacc_ctlops_arg->host_addr; 1694 ddi_acc_impl_t *hp = (ddi_acc_impl_t *)cautacc_ctlops_arg->handle; 1695 size_t repcount = cautacc_ctlops_arg->repcount; 1696 uint_t flags = cautacc_ctlops_arg->flags; 1697 1698 px_t *px_p = DIP_TO_STATE(dip); 1699 px_pec_t *pec_p = px_p->px_pec_p; 1700 int err = DDI_SUCCESS; 1701 1702 /* 1703 * Note that i_ndi_busop_access_enter ends up grabbing the pokefault 1704 * mutex. 1705 */ 1706 i_ndi_busop_access_enter(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp); 1707 1708 pec_p->pec_ontrap_data = (on_trap_data_t *)hp->ahi_err->err_ontrap; 1709 pec_p->pec_safeacc_type = DDI_FM_ERR_EXPECTED; 1710 hp->ahi_err->err_expected = DDI_FM_ERR_EXPECTED; 1711 1712 if (repcount == 1) { 1713 if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) { 1714 i_ddi_caut_get(size, (void *)dev_addr, 1715 (void *)host_addr); 1716 } else { 1717 int i; 1718 uint8_t *ff_addr = (uint8_t *)host_addr; 1719 for (i = 0; i < size; i++) 1720 *ff_addr++ = 0xff; 1721 1722 err = DDI_FAILURE; 1723 #ifdef DEBUG 1724 px_peekfault_cnt++; 1725 #endif 1726 } 1727 } else { 1728 if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) { 1729 for (; repcount; repcount--) { 1730 i_ddi_caut_get(size, (void *)dev_addr, 1731 (void *)host_addr); 1732 1733 host_addr += size; 1734 1735 if (flags == DDI_DEV_AUTOINCR) 1736 dev_addr += size; 1737 } 1738 } else { 1739 err = DDI_FAILURE; 1740 #ifdef DEBUG 1741 px_peekfault_cnt++; 1742 #endif 1743 } 1744 } 1745 1746 i_ddi_notrap((ddi_acc_handle_t)hp); 1747 pec_p->pec_ontrap_data = NULL; 1748 pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED; 1749 i_ndi_busop_access_exit(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp); 1750 hp->ahi_err->err_expected = DDI_FM_ERR_UNEXPECTED; 1751 1752 return (err); 1753 } 1754 1755 /*ARGSUSED*/ 1756 int 1757 px_lib_ctlops_peek(dev_info_t *dip, dev_info_t *rdip, 1758 peekpoke_ctlops_t *in_args, void *result) 1759 { 1760 result = (void *)in_args->host_addr; 1761 return (in_args->handle ? px_lib_do_caut_get(dip, in_args) : 1762 px_lib_do_peek(dip, in_args)); 1763 } 1764 1765 /* 1766 * implements PPM interface 1767 */ 1768 int 1769 px_lib_pmctl(int cmd, px_t *px_p) 1770 { 1771 ASSERT((cmd & ~PPMREQ_MASK) == PPMREQ); 1772 switch (cmd) { 1773 case PPMREQ_PRE_PWR_OFF: 1774 /* 1775 * Currently there is no device power management for 1776 * the root complex (fire). When there is we need to make 1777 * sure that it is at full power before trying to send the 1778 * PME_Turn_Off message. 1779 */ 1780 DBG(DBG_PWR, px_p->px_dip, 1781 "ioctl: request to send PME_Turn_Off\n"); 1782 return (px_goto_l23ready(px_p)); 1783 1784 case PPMREQ_PRE_PWR_ON: 1785 DBG(DBG_PWR, px_p->px_dip, "ioctl: PRE_PWR_ON request\n"); 1786 return (px_pre_pwron_check(px_p)); 1787 1788 case PPMREQ_POST_PWR_ON: 1789 DBG(DBG_PWR, px_p->px_dip, "ioctl: POST_PWR_ON request\n"); 1790 return (px_goto_l0(px_p)); 1791 1792 default: 1793 return (DDI_FAILURE); 1794 } 1795 } 1796 1797 /* 1798 * sends PME_Turn_Off message to put the link in L2/L3 ready state. 1799 * called by px_ioctl. 1800 * returns DDI_SUCCESS or DDI_FAILURE 1801 * 1. Wait for link to be in L1 state (link status reg) 1802 * 2. write to PME_Turn_off reg to boradcast 1803 * 3. set timeout 1804 * 4. If timeout, return failure. 1805 * 5. If PM_TO_Ack, wait till link is in L2/L3 ready 1806 */ 1807 static int 1808 px_goto_l23ready(px_t *px_p) 1809 { 1810 pcie_pwr_t *pwr_p; 1811 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 1812 caddr_t csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR]; 1813 int ret = DDI_SUCCESS; 1814 clock_t end, timeleft; 1815 int mutex_held = 1; 1816 1817 /* If no PM info, return failure */ 1818 if (!PCIE_PMINFO(px_p->px_dip) || 1819 !(pwr_p = PCIE_NEXUS_PMINFO(px_p->px_dip))) 1820 return (DDI_FAILURE); 1821 1822 mutex_enter(&pwr_p->pwr_lock); 1823 mutex_enter(&px_p->px_l23ready_lock); 1824 /* Clear the PME_To_ACK receieved flag */ 1825 px_p->px_pm_flags &= ~PX_PMETOACK_RECVD; 1826 /* 1827 * When P25 is the downstream device, after receiving 1828 * PME_To_ACK, fire will go to Detect state, which causes 1829 * the link down event. Inform FMA that this is expected. 1830 * In case of all other cards complaint with the pci express 1831 * spec, this will happen when the power is re-applied. FMA 1832 * code will clear this flag after one instance of LDN. Since 1833 * there will not be a LDN event for the spec compliant cards, 1834 * we need to clear the flag after receiving PME_To_ACK. 1835 */ 1836 px_p->px_pm_flags |= PX_LDN_EXPECTED; 1837 if (px_send_pme_turnoff(csr_base) != DDI_SUCCESS) { 1838 ret = DDI_FAILURE; 1839 goto l23ready_done; 1840 } 1841 px_p->px_pm_flags |= PX_PME_TURNOFF_PENDING; 1842 1843 end = ddi_get_lbolt() + drv_usectohz(px_pme_to_ack_timeout); 1844 while (!(px_p->px_pm_flags & PX_PMETOACK_RECVD)) { 1845 timeleft = cv_timedwait(&px_p->px_l23ready_cv, 1846 &px_p->px_l23ready_lock, end); 1847 /* 1848 * if cv_timedwait returns -1, it is either 1849 * 1) timed out or 1850 * 2) there was a pre-mature wakeup but by the time 1851 * cv_timedwait is called again end < lbolt i.e. 1852 * end is in the past. 1853 * 3) By the time we make first cv_timedwait call, 1854 * end < lbolt is true. 1855 */ 1856 if (timeleft == -1) 1857 break; 1858 } 1859 if (!(px_p->px_pm_flags & PX_PMETOACK_RECVD)) { 1860 /* 1861 * Either timedout or interrupt didn't get a 1862 * chance to grab the mutex and set the flag. 1863 * release the mutex and delay for sometime. 1864 * This will 1) give a chance for interrupt to 1865 * set the flag 2) creates a delay between two 1866 * consequetive requests. 1867 */ 1868 mutex_exit(&px_p->px_l23ready_lock); 1869 delay(drv_usectohz(50 * PX_MSEC_TO_USEC)); 1870 mutex_held = 0; 1871 if (!(px_p->px_pm_flags & PX_PMETOACK_RECVD)) { 1872 ret = DDI_FAILURE; 1873 DBG(DBG_PWR, px_p->px_dip, " Timed out while waiting" 1874 " for PME_TO_ACK\n"); 1875 } 1876 } 1877 px_p->px_pm_flags &= 1878 ~(PX_PME_TURNOFF_PENDING | PX_PMETOACK_RECVD | PX_LDN_EXPECTED); 1879 1880 l23ready_done: 1881 if (mutex_held) 1882 mutex_exit(&px_p->px_l23ready_lock); 1883 /* 1884 * Wait till link is in L1 idle, if sending PME_Turn_Off 1885 * was succesful. 1886 */ 1887 if (ret == DDI_SUCCESS) { 1888 if (px_link_wait4l1idle(csr_base) != DDI_SUCCESS) { 1889 DBG(DBG_PWR, px_p->px_dip, " Link is not at L1" 1890 " even though we received PME_To_ACK.\n"); 1891 /* 1892 * Workaround for hardware bug with P25. 1893 * Due to a hardware bug with P25, link state 1894 * will be Detect state rather than L1 after 1895 * link is transitioned to L23Ready state. Since 1896 * we don't know whether link is L23ready state 1897 * without Fire's state being L1_idle, we delay 1898 * here just to make sure that we wait till link 1899 * is transitioned to L23Ready state. 1900 */ 1901 delay(drv_usectohz(100 * PX_MSEC_TO_USEC)); 1902 } 1903 pwr_p->pwr_link_lvl = PM_LEVEL_L3; 1904 1905 } 1906 mutex_exit(&pwr_p->pwr_lock); 1907 return (ret); 1908 } 1909 1910 /* 1911 * Message interrupt handler intended to be shared for both 1912 * PME and PME_TO_ACK msg handling, currently only handles 1913 * PME_To_ACK message. 1914 */ 1915 uint_t 1916 px_pmeq_intr(caddr_t arg) 1917 { 1918 px_t *px_p = (px_t *)arg; 1919 1920 DBG(DBG_PWR, px_p->px_dip, " PME_To_ACK received \n"); 1921 mutex_enter(&px_p->px_l23ready_lock); 1922 cv_broadcast(&px_p->px_l23ready_cv); 1923 if (px_p->px_pm_flags & PX_PME_TURNOFF_PENDING) { 1924 px_p->px_pm_flags |= PX_PMETOACK_RECVD; 1925 } else { 1926 /* 1927 * This maybe the second ack received. If so then, 1928 * we should be receiving it during wait4L1 stage. 1929 */ 1930 px_p->px_pmetoack_ignored++; 1931 } 1932 mutex_exit(&px_p->px_l23ready_lock); 1933 return (DDI_INTR_CLAIMED); 1934 } 1935 1936 static int 1937 px_pre_pwron_check(px_t *px_p) 1938 { 1939 pcie_pwr_t *pwr_p; 1940 1941 /* If no PM info, return failure */ 1942 if (!PCIE_PMINFO(px_p->px_dip) || 1943 !(pwr_p = PCIE_NEXUS_PMINFO(px_p->px_dip))) 1944 return (DDI_FAILURE); 1945 1946 /* 1947 * For the spec compliant downstream cards link down 1948 * is expected when the device is powered on. 1949 */ 1950 px_p->px_pm_flags |= PX_LDN_EXPECTED; 1951 return (pwr_p->pwr_link_lvl == PM_LEVEL_L3 ? DDI_SUCCESS : DDI_FAILURE); 1952 } 1953 1954 static int 1955 px_goto_l0(px_t *px_p) 1956 { 1957 pcie_pwr_t *pwr_p; 1958 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 1959 caddr_t csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR]; 1960 int ret = DDI_SUCCESS; 1961 uint64_t time_spent = 0; 1962 1963 /* If no PM info, return failure */ 1964 if (!PCIE_PMINFO(px_p->px_dip) || 1965 !(pwr_p = PCIE_NEXUS_PMINFO(px_p->px_dip))) 1966 return (DDI_FAILURE); 1967 1968 mutex_enter(&pwr_p->pwr_lock); 1969 /* 1970 * The following link retrain activity will cause LDN and LUP event. 1971 * Receiving LDN prior to receiving LUP is expected, not an error in 1972 * this case. Receiving LUP indicates link is fully up to support 1973 * powering up down stream device, and of course any further LDN and 1974 * LUP outside this context will be error. 1975 */ 1976 px_p->px_lup_pending = 1; 1977 if (px_link_retrain(csr_base) != DDI_SUCCESS) { 1978 ret = DDI_FAILURE; 1979 goto l0_done; 1980 } 1981 1982 /* LUP event takes the order of 15ms amount of time to occur */ 1983 for (; px_p->px_lup_pending && (time_spent < px_lup_poll_to); 1984 time_spent += px_lup_poll_interval) 1985 drv_usecwait(px_lup_poll_interval); 1986 if (px_p->px_lup_pending) 1987 ret = DDI_FAILURE; 1988 l0_done: 1989 px_enable_detect_quiet(csr_base); 1990 if (ret == DDI_SUCCESS) 1991 pwr_p->pwr_link_lvl = PM_LEVEL_L0; 1992 mutex_exit(&pwr_p->pwr_lock); 1993 return (ret); 1994 } 1995 1996 /* 1997 * Extract the drivers binding name to identify which chip we're binding to. 1998 * Whenever a new bus bridge is created, the driver alias entry should be 1999 * added here to identify the device if needed. If a device isn't added, 2000 * the identity defaults to PX_CHIP_UNIDENTIFIED. 2001 */ 2002 static uint32_t 2003 px_identity_init(px_t *px_p) 2004 { 2005 dev_info_t *dip = px_p->px_dip; 2006 char *name = ddi_binding_name(dip); 2007 uint32_t revision = 0; 2008 2009 revision = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 2010 "module-revision#", 0); 2011 2012 /* Check for Fire driver binding name */ 2013 if (strcmp(name, "pciex108e,80f0") == 0) { 2014 DBG(DBG_ATTACH, dip, "px_identity_init: %s%d: " 2015 "(FIRE), module-revision %d\n", NAMEINST(dip), 2016 revision); 2017 2018 return ((revision >= FIRE_MOD_REV_20) ? 2019 PX_CHIP_FIRE : PX_CHIP_UNIDENTIFIED); 2020 } 2021 2022 /* Check for Oberon driver binding name */ 2023 if (strcmp(name, "pciex108e,80f8") == 0) { 2024 DBG(DBG_ATTACH, dip, "px_identity_init: %s%d: " 2025 "(OBERON), module-revision %d\n", NAMEINST(dip), 2026 revision); 2027 2028 return (PX_CHIP_OBERON); 2029 } 2030 2031 DBG(DBG_ATTACH, dip, "%s%d: Unknown PCI Express Host bridge %s %x\n", 2032 ddi_driver_name(dip), ddi_get_instance(dip), name, revision); 2033 2034 return (PX_CHIP_UNIDENTIFIED); 2035 } 2036 2037 int 2038 px_err_add_intr(px_fault_t *px_fault_p) 2039 { 2040 dev_info_t *dip = px_fault_p->px_fh_dip; 2041 px_t *px_p = DIP_TO_STATE(dip); 2042 2043 VERIFY(add_ivintr(px_fault_p->px_fh_sysino, PX_ERR_PIL, 2044 (intrfunc)px_fault_p->px_err_func, (caddr_t)px_fault_p, 2045 NULL, NULL) == 0); 2046 2047 px_ib_intr_enable(px_p, intr_dist_cpuid(), px_fault_p->px_intr_ino); 2048 2049 return (DDI_SUCCESS); 2050 } 2051 2052 void 2053 px_err_rem_intr(px_fault_t *px_fault_p) 2054 { 2055 dev_info_t *dip = px_fault_p->px_fh_dip; 2056 px_t *px_p = DIP_TO_STATE(dip); 2057 2058 px_ib_intr_disable(px_p->px_ib_p, px_fault_p->px_intr_ino, 2059 IB_INTR_WAIT); 2060 2061 VERIFY(rem_ivintr(px_fault_p->px_fh_sysino, PX_ERR_PIL) == 0); 2062 } 2063 2064 /* 2065 * px_cb_intr_redist() - sun4u only, CB interrupt redistribution 2066 */ 2067 void 2068 px_cb_intr_redist(void *arg) 2069 { 2070 px_cb_t *cb_p = (px_cb_t *)arg; 2071 px_cb_list_t *pxl; 2072 px_t *pxp = NULL; 2073 px_fault_t *f_p = NULL; 2074 uint32_t new_cpuid; 2075 intr_valid_state_t enabled = 0; 2076 2077 mutex_enter(&cb_p->cb_mutex); 2078 2079 pxl = cb_p->pxl; 2080 if (!pxl) 2081 goto cb_done; 2082 2083 pxp = pxl->pxp; 2084 f_p = &pxp->px_cb_fault; 2085 for (; pxl && (f_p->px_fh_sysino != cb_p->sysino); ) { 2086 pxl = pxl->next; 2087 pxp = pxl->pxp; 2088 f_p = &pxp->px_cb_fault; 2089 } 2090 if (pxl == NULL) 2091 goto cb_done; 2092 2093 new_cpuid = intr_dist_cpuid(); 2094 if (new_cpuid == cb_p->cpuid) 2095 goto cb_done; 2096 2097 if ((px_lib_intr_getvalid(pxp->px_dip, f_p->px_fh_sysino, &enabled) 2098 != DDI_SUCCESS) || !enabled) { 2099 DBG(DBG_IB, pxp->px_dip, "px_cb_intr_redist: CB not enabled, " 2100 "sysino(0x%x)\n", f_p->px_fh_sysino); 2101 goto cb_done; 2102 } 2103 2104 PX_INTR_DISABLE(pxp->px_dip, f_p->px_fh_sysino); 2105 2106 cb_p->cpuid = new_cpuid; 2107 cb_p->sysino = f_p->px_fh_sysino; 2108 PX_INTR_ENABLE(pxp->px_dip, cb_p->sysino, cb_p->cpuid); 2109 2110 cb_done: 2111 mutex_exit(&cb_p->cb_mutex); 2112 } 2113 2114 /* 2115 * px_cb_add_intr() - Called from attach(9E) to create CB if not yet 2116 * created, to add CB interrupt vector always, but enable only once. 2117 */ 2118 int 2119 px_cb_add_intr(px_fault_t *fault_p) 2120 { 2121 px_t *px_p = DIP_TO_STATE(fault_p->px_fh_dip); 2122 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 2123 px_cb_t *cb_p = (px_cb_t *)px_get_cb(fault_p->px_fh_dip); 2124 px_cb_list_t *pxl, *pxl_new; 2125 boolean_t is_proxy = B_FALSE; 2126 2127 /* create cb */ 2128 if (cb_p == NULL) { 2129 cb_p = kmem_zalloc(sizeof (px_cb_t), KM_SLEEP); 2130 2131 mutex_init(&cb_p->cb_mutex, NULL, MUTEX_DRIVER, 2132 (void *) ipltospl(FM_ERR_PIL)); 2133 2134 cb_p->px_cb_func = px_cb_intr; 2135 pxu_p->px_cb_p = cb_p; 2136 px_set_cb(fault_p->px_fh_dip, (uint64_t)cb_p); 2137 2138 /* px_lib_dev_init allows only FIRE and OBERON */ 2139 px_err_reg_enable( 2140 (pxu_p->chip_type == PX_CHIP_FIRE) ? 2141 PX_ERR_JBC : PX_ERR_UBC, 2142 pxu_p->px_address[PX_REG_XBC]); 2143 } else 2144 pxu_p->px_cb_p = cb_p; 2145 2146 /* register cb interrupt */ 2147 VERIFY(add_ivintr(fault_p->px_fh_sysino, PX_ERR_PIL, 2148 (intrfunc)cb_p->px_cb_func, (caddr_t)cb_p, NULL, NULL) == 0); 2149 2150 2151 /* update cb list */ 2152 mutex_enter(&cb_p->cb_mutex); 2153 if (cb_p->pxl == NULL) { 2154 is_proxy = B_TRUE; 2155 pxl = kmem_zalloc(sizeof (px_cb_list_t), KM_SLEEP); 2156 pxl->pxp = px_p; 2157 cb_p->pxl = pxl; 2158 cb_p->sysino = fault_p->px_fh_sysino; 2159 cb_p->cpuid = intr_dist_cpuid(); 2160 } else { 2161 /* 2162 * Find the last pxl or 2163 * stop short at encountering a redundent entry, or 2164 * both. 2165 */ 2166 pxl = cb_p->pxl; 2167 for (; !(pxl->pxp == px_p) && pxl->next; pxl = pxl->next) {}; 2168 ASSERT(pxl->pxp != px_p); 2169 2170 /* add to linked list */ 2171 pxl_new = kmem_zalloc(sizeof (px_cb_list_t), KM_SLEEP); 2172 pxl_new->pxp = px_p; 2173 pxl->next = pxl_new; 2174 } 2175 cb_p->attachcnt++; 2176 mutex_exit(&cb_p->cb_mutex); 2177 2178 if (is_proxy) { 2179 /* add to interrupt redistribution list */ 2180 intr_dist_add(px_cb_intr_redist, cb_p); 2181 2182 /* enable cb hw interrupt */ 2183 px_ib_intr_enable(px_p, cb_p->cpuid, fault_p->px_intr_ino); 2184 } 2185 2186 return (DDI_SUCCESS); 2187 } 2188 2189 /* 2190 * px_cb_rem_intr() - Called from detach(9E) to remove its CB 2191 * interrupt vector, to shift proxy to the next available px, 2192 * or disable CB interrupt when itself is the last. 2193 */ 2194 void 2195 px_cb_rem_intr(px_fault_t *fault_p) 2196 { 2197 px_t *px_p = DIP_TO_STATE(fault_p->px_fh_dip), *pxp; 2198 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 2199 px_cb_t *cb_p = PX2CB(px_p); 2200 px_cb_list_t *pxl, *prev; 2201 px_fault_t *f_p; 2202 2203 ASSERT(cb_p->pxl); 2204 2205 /* find and remove this px, and update cb list */ 2206 mutex_enter(&cb_p->cb_mutex); 2207 2208 pxl = cb_p->pxl; 2209 if (pxl->pxp == px_p) { 2210 cb_p->pxl = pxl->next; 2211 } else { 2212 prev = pxl; 2213 pxl = pxl->next; 2214 for (; pxl && (pxl->pxp != px_p); prev = pxl, pxl = pxl->next) { 2215 }; 2216 if (!pxl) { 2217 cmn_err(CE_WARN, "px_cb_rem_intr: can't find px_p 0x%p " 2218 "in registered CB list.", (void *)px_p); 2219 mutex_exit(&cb_p->cb_mutex); 2220 return; 2221 } 2222 prev->next = pxl->next; 2223 } 2224 pxu_p->px_cb_p = NULL; 2225 cb_p->attachcnt--; 2226 kmem_free(pxl, sizeof (px_cb_list_t)); 2227 mutex_exit(&cb_p->cb_mutex); 2228 2229 /* disable cb hw interrupt */ 2230 if (fault_p->px_fh_sysino == cb_p->sysino) 2231 px_ib_intr_disable(px_p->px_ib_p, fault_p->px_intr_ino, 2232 IB_INTR_WAIT); 2233 2234 /* if last px, remove from interrupt redistribution list */ 2235 if (cb_p->pxl == NULL) 2236 intr_dist_rem(px_cb_intr_redist, cb_p); 2237 2238 /* de-register interrupt */ 2239 VERIFY(rem_ivintr(fault_p->px_fh_sysino, PX_ERR_PIL) == 0); 2240 2241 /* if not last px, assign next px to manage cb */ 2242 mutex_enter(&cb_p->cb_mutex); 2243 if (cb_p->pxl) { 2244 if (fault_p->px_fh_sysino == cb_p->sysino) { 2245 pxp = cb_p->pxl->pxp; 2246 f_p = &pxp->px_cb_fault; 2247 cb_p->sysino = f_p->px_fh_sysino; 2248 2249 PX_INTR_ENABLE(pxp->px_dip, cb_p->sysino, cb_p->cpuid); 2250 (void) px_lib_intr_setstate(pxp->px_dip, cb_p->sysino, 2251 INTR_IDLE_STATE); 2252 } 2253 mutex_exit(&cb_p->cb_mutex); 2254 return; 2255 } 2256 2257 /* clean up after the last px */ 2258 mutex_exit(&cb_p->cb_mutex); 2259 2260 /* px_lib_dev_init allows only FIRE and OBERON */ 2261 px_err_reg_disable( 2262 (pxu_p->chip_type == PX_CHIP_FIRE) ? PX_ERR_JBC : PX_ERR_UBC, 2263 pxu_p->px_address[PX_REG_XBC]); 2264 2265 mutex_destroy(&cb_p->cb_mutex); 2266 px_set_cb(fault_p->px_fh_dip, 0ull); 2267 kmem_free(cb_p, sizeof (px_cb_t)); 2268 } 2269 2270 /* 2271 * px_cb_intr() - sun4u only, CB interrupt dispatcher 2272 */ 2273 uint_t 2274 px_cb_intr(caddr_t arg) 2275 { 2276 px_cb_t *cb_p = (px_cb_t *)arg; 2277 px_t *pxp; 2278 px_fault_t *f_p; 2279 int ret; 2280 2281 mutex_enter(&cb_p->cb_mutex); 2282 2283 if (!cb_p->pxl) { 2284 mutex_exit(&cb_p->cb_mutex); 2285 return (DDI_INTR_UNCLAIMED); 2286 } 2287 2288 pxp = cb_p->pxl->pxp; 2289 f_p = &pxp->px_cb_fault; 2290 2291 ret = f_p->px_err_func((caddr_t)f_p); 2292 2293 mutex_exit(&cb_p->cb_mutex); 2294 return (ret); 2295 } 2296 2297 #ifdef FMA 2298 void 2299 px_fill_rc_status(px_fault_t *px_fault_p, pciex_rc_error_regs_t *rc_status) 2300 { 2301 /* populate the rc_status by reading the registers - TBD */ 2302 } 2303 #endif /* FMA */ 2304 2305 /* 2306 * Unprotected raw reads/writes of fabric device's config space. 2307 * Only used for temporary PCI-E Fabric Error Handling. 2308 */ 2309 uint32_t 2310 px_fab_get(px_t *px_p, pcie_req_id_t bdf, uint16_t offset) 2311 { 2312 px_ranges_t *rp = px_p->px_ranges_p; 2313 uint64_t range_prop, base_addr; 2314 int bank = PCI_REG_ADDR_G(PCI_ADDR_CONFIG); 2315 uint32_t val; 2316 2317 /* Get Fire's Physical Base Address */ 2318 range_prop = px_get_range_prop(px_p, rp, bank); 2319 2320 /* Get config space first. */ 2321 base_addr = range_prop + PX_BDF_TO_CFGADDR(bdf, offset); 2322 2323 val = ldphysio(base_addr); 2324 2325 return (LE_32(val)); 2326 } 2327 2328 void 2329 px_fab_set(px_t *px_p, pcie_req_id_t bdf, uint16_t offset, 2330 uint32_t val) { 2331 px_ranges_t *rp = px_p->px_ranges_p; 2332 uint64_t range_prop, base_addr; 2333 int bank = PCI_REG_ADDR_G(PCI_ADDR_CONFIG); 2334 2335 /* Get Fire's Physical Base Address */ 2336 range_prop = px_get_range_prop(px_p, rp, bank); 2337 2338 /* Get config space first. */ 2339 base_addr = range_prop + PX_BDF_TO_CFGADDR(bdf, offset); 2340 2341 stphysio(base_addr, LE_32(val)); 2342 } 2343 2344 /* 2345 * cpr callback 2346 * 2347 * disable fabric error msg interrupt prior to suspending 2348 * all device drivers; re-enable fabric error msg interrupt 2349 * after all devices are resumed. 2350 */ 2351 static boolean_t 2352 px_cpr_callb(void *arg, int code) 2353 { 2354 px_t *px_p = (px_t *)arg; 2355 px_ib_t *ib_p = px_p->px_ib_p; 2356 px_pec_t *pec_p = px_p->px_pec_p; 2357 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 2358 caddr_t csr_base; 2359 devino_t ce_ino, nf_ino, f_ino; 2360 px_ino_t *ce_ino_p, *nf_ino_p, *f_ino_p; 2361 uint64_t imu_log_enable, imu_intr_enable; 2362 uint64_t imu_log_mask, imu_intr_mask; 2363 2364 ce_ino = px_msiqid_to_devino(px_p, pec_p->pec_corr_msg_msiq_id); 2365 nf_ino = px_msiqid_to_devino(px_p, pec_p->pec_non_fatal_msg_msiq_id); 2366 f_ino = px_msiqid_to_devino(px_p, pec_p->pec_fatal_msg_msiq_id); 2367 csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR]; 2368 2369 imu_log_enable = CSR_XR(csr_base, IMU_ERROR_LOG_ENABLE); 2370 imu_intr_enable = CSR_XR(csr_base, IMU_INTERRUPT_ENABLE); 2371 2372 imu_log_mask = BITMASK(IMU_ERROR_LOG_ENABLE_FATAL_MES_NOT_EN_LOG_EN) | 2373 BITMASK(IMU_ERROR_LOG_ENABLE_NONFATAL_MES_NOT_EN_LOG_EN) | 2374 BITMASK(IMU_ERROR_LOG_ENABLE_COR_MES_NOT_EN_LOG_EN); 2375 2376 imu_intr_mask = 2377 BITMASK(IMU_INTERRUPT_ENABLE_FATAL_MES_NOT_EN_S_INT_EN) | 2378 BITMASK(IMU_INTERRUPT_ENABLE_NONFATAL_MES_NOT_EN_S_INT_EN) | 2379 BITMASK(IMU_INTERRUPT_ENABLE_COR_MES_NOT_EN_S_INT_EN) | 2380 BITMASK(IMU_INTERRUPT_ENABLE_FATAL_MES_NOT_EN_P_INT_EN) | 2381 BITMASK(IMU_INTERRUPT_ENABLE_NONFATAL_MES_NOT_EN_P_INT_EN) | 2382 BITMASK(IMU_INTERRUPT_ENABLE_COR_MES_NOT_EN_P_INT_EN); 2383 2384 switch (code) { 2385 case CB_CODE_CPR_CHKPT: 2386 /* disable imu rbne on corr/nonfatal/fatal errors */ 2387 CSR_XS(csr_base, IMU_ERROR_LOG_ENABLE, 2388 imu_log_enable & (~imu_log_mask)); 2389 2390 CSR_XS(csr_base, IMU_INTERRUPT_ENABLE, 2391 imu_intr_enable & (~imu_intr_mask)); 2392 2393 /* disable CORR intr mapping */ 2394 px_ib_intr_disable(ib_p, ce_ino, IB_INTR_NOWAIT); 2395 2396 /* disable NON FATAL intr mapping */ 2397 px_ib_intr_disable(ib_p, nf_ino, IB_INTR_NOWAIT); 2398 2399 /* disable FATAL intr mapping */ 2400 px_ib_intr_disable(ib_p, f_ino, IB_INTR_NOWAIT); 2401 2402 break; 2403 2404 case CB_CODE_CPR_RESUME: 2405 pxu_p->cpr_flag = PX_NOT_CPR; 2406 mutex_enter(&ib_p->ib_ino_lst_mutex); 2407 2408 ce_ino_p = px_ib_locate_ino(ib_p, ce_ino); 2409 nf_ino_p = px_ib_locate_ino(ib_p, nf_ino); 2410 f_ino_p = px_ib_locate_ino(ib_p, f_ino); 2411 2412 /* enable CORR intr mapping */ 2413 if (ce_ino_p) 2414 px_ib_intr_enable(px_p, ce_ino_p->ino_cpuid, ce_ino); 2415 else 2416 cmn_err(CE_WARN, "px_cpr_callb: RESUME unable to " 2417 "reenable PCIe Correctable msg intr.\n"); 2418 2419 /* enable NON FATAL intr mapping */ 2420 if (nf_ino_p) 2421 px_ib_intr_enable(px_p, nf_ino_p->ino_cpuid, nf_ino); 2422 else 2423 cmn_err(CE_WARN, "px_cpr_callb: RESUME unable to " 2424 "reenable PCIe Non Fatal msg intr.\n"); 2425 2426 /* enable FATAL intr mapping */ 2427 if (f_ino_p) 2428 px_ib_intr_enable(px_p, f_ino_p->ino_cpuid, f_ino); 2429 else 2430 cmn_err(CE_WARN, "px_cpr_callb: RESUME unable to " 2431 "reenable PCIe Fatal msg intr.\n"); 2432 2433 mutex_exit(&ib_p->ib_ino_lst_mutex); 2434 2435 /* enable corr/nonfatal/fatal not enable error */ 2436 CSR_XS(csr_base, IMU_ERROR_LOG_ENABLE, (imu_log_enable | 2437 (imu_log_mask & px_imu_log_mask))); 2438 CSR_XS(csr_base, IMU_INTERRUPT_ENABLE, (imu_intr_enable | 2439 (imu_intr_mask & px_imu_intr_mask))); 2440 2441 break; 2442 } 2443 2444 return (B_TRUE); 2445 } 2446 2447 uint64_t 2448 px_get_rng_parent_hi_mask(px_t *px_p) 2449 { 2450 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 2451 uint64_t mask; 2452 2453 switch (PX_CHIP_TYPE(pxu_p)) { 2454 case PX_CHIP_OBERON: 2455 mask = OBERON_RANGE_PROP_MASK; 2456 break; 2457 case PX_CHIP_FIRE: 2458 mask = PX_RANGE_PROP_MASK; 2459 break; 2460 default: 2461 mask = PX_RANGE_PROP_MASK; 2462 } 2463 2464 return (mask); 2465 } 2466 2467 /* 2468 * fetch chip's range propery's value 2469 */ 2470 uint64_t 2471 px_get_range_prop(px_t *px_p, px_ranges_t *rp, int bank) 2472 { 2473 uint64_t mask, range_prop; 2474 2475 mask = px_get_rng_parent_hi_mask(px_p); 2476 range_prop = (((uint64_t)(rp[bank].parent_high & mask)) << 32) | 2477 rp[bank].parent_low; 2478 2479 return (range_prop); 2480 } 2481 2482 /* 2483 * add cpr callback 2484 */ 2485 void 2486 px_cpr_add_callb(px_t *px_p) 2487 { 2488 px_p->px_cprcb_id = callb_add(px_cpr_callb, (void *)px_p, 2489 CB_CL_CPR_POST_USER, "px_cpr"); 2490 } 2491 2492 /* 2493 * remove cpr callback 2494 */ 2495 void 2496 px_cpr_rem_callb(px_t *px_p) 2497 { 2498 (void) callb_delete(px_p->px_cprcb_id); 2499 } 2500 2501 /*ARGSUSED*/ 2502 static uint_t 2503 px_hp_intr(caddr_t arg1, caddr_t arg2) 2504 { 2505 px_t *px_p = (px_t *)arg1; 2506 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 2507 int rval; 2508 2509 rval = pciehpc_intr(px_p->px_dip); 2510 2511 #ifdef DEBUG 2512 if (rval == DDI_INTR_UNCLAIMED) 2513 cmn_err(CE_WARN, "%s%d: UNCLAIMED intr\n", 2514 ddi_driver_name(px_p->px_dip), 2515 ddi_get_instance(px_p->px_dip)); 2516 #endif 2517 2518 /* Set the interrupt state to idle */ 2519 if (px_lib_intr_setstate(px_p->px_dip, 2520 pxu_p->hp_sysino, INTR_IDLE_STATE) != DDI_SUCCESS) 2521 return (DDI_INTR_UNCLAIMED); 2522 2523 return (rval); 2524 } 2525 2526 int 2527 px_lib_hotplug_init(dev_info_t *dip, void *arg) 2528 { 2529 px_t *px_p = DIP_TO_STATE(dip); 2530 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 2531 uint64_t ret; 2532 2533 if ((ret = hvio_hotplug_init(dip, arg)) == DDI_SUCCESS) { 2534 if (px_lib_intr_devino_to_sysino(px_p->px_dip, 2535 px_p->px_inos[PX_INTR_HOTPLUG], &pxu_p->hp_sysino) != 2536 DDI_SUCCESS) { 2537 #ifdef DEBUG 2538 cmn_err(CE_WARN, "%s%d: devino_to_sysino fails\n", 2539 ddi_driver_name(px_p->px_dip), 2540 ddi_get_instance(px_p->px_dip)); 2541 #endif 2542 return (DDI_FAILURE); 2543 } 2544 2545 VERIFY(add_ivintr(pxu_p->hp_sysino, PX_PCIEHP_PIL, 2546 (intrfunc)px_hp_intr, (caddr_t)px_p, NULL, NULL) == 0); 2547 2548 px_ib_intr_enable(px_p, intr_dist_cpuid(), 2549 px_p->px_inos[PX_INTR_HOTPLUG]); 2550 } 2551 2552 return (ret); 2553 } 2554 2555 void 2556 px_lib_hotplug_uninit(dev_info_t *dip) 2557 { 2558 if (hvio_hotplug_uninit(dip) == DDI_SUCCESS) { 2559 px_t *px_p = DIP_TO_STATE(dip); 2560 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 2561 2562 px_ib_intr_disable(px_p->px_ib_p, 2563 px_p->px_inos[PX_INTR_HOTPLUG], IB_INTR_WAIT); 2564 2565 VERIFY(rem_ivintr(pxu_p->hp_sysino, PX_PCIEHP_PIL) == 0); 2566 } 2567 } 2568 2569 /* 2570 * px_hp_intr_redist() - sun4u only, HP interrupt redistribution 2571 */ 2572 void 2573 px_hp_intr_redist(px_t *px_p) 2574 { 2575 if (px_p && (px_p->px_dev_caps & PX_HOTPLUG_CAPABLE)) { 2576 px_ib_intr_dist_en(px_p->px_dip, intr_dist_cpuid(), 2577 px_p->px_inos[PX_INTR_HOTPLUG], B_FALSE); 2578 } 2579 } 2580 2581 boolean_t 2582 px_lib_is_in_drain_state(px_t *px_p) 2583 { 2584 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 2585 caddr_t csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR]; 2586 uint64_t drain_status; 2587 2588 if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) { 2589 drain_status = CSR_BR(csr_base, DRAIN_CONTROL_STATUS, DRAIN); 2590 } else { 2591 drain_status = CSR_BR(csr_base, TLU_STATUS, DRAIN); 2592 } 2593 2594 return (drain_status); 2595 } 2596 2597 pcie_req_id_t 2598 px_lib_get_bdf(px_t *px_p) 2599 { 2600 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 2601 caddr_t csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR]; 2602 pcie_req_id_t bdf; 2603 2604 bdf = CSR_BR(csr_base, DMC_PCI_EXPRESS_CONFIGURATION, REQ_ID); 2605 2606 return (bdf); 2607 } 2608