1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include <sys/types.h> 30 #include <sys/kmem.h> 31 #include <sys/conf.h> 32 #include <sys/ddi.h> 33 #include <sys/sunddi.h> 34 #include <sys/fm/protocol.h> 35 #include <sys/fm/util.h> 36 #include <sys/modctl.h> 37 #include <sys/disp.h> 38 #include <sys/stat.h> 39 #include <sys/ddi_impldefs.h> 40 #include <sys/vmem.h> 41 #include <sys/iommutsb.h> 42 #include <sys/cpuvar.h> 43 #include <sys/ivintr.h> 44 #include <sys/byteorder.h> 45 #include <px_obj.h> 46 #include <pcie_pwr.h> 47 #include <px_regs.h> 48 #include <px_csr.h> 49 #include <sys/machsystm.h> 50 #include "px_lib4u.h" 51 #include "px_err.h" 52 53 #pragma weak jbus_stst_order 54 55 extern void jbus_stst_order(); 56 57 ulong_t px_mmu_dvma_end = 0xfffffffful; 58 uint_t px_ranges_phi_mask = 0xfffffffful; 59 60 static int px_goto_l23ready(px_t *px_p); 61 static int px_goto_l0(px_t *px_p); 62 static int px_pre_pwron_check(px_t *px_p); 63 static uint32_t px_identity_chip(px_t *px_p); 64 static boolean_t px_cpr_callb(void *arg, int code); 65 66 /* 67 * px_lib_map_registers 68 * 69 * This function is called from the attach routine to map the registers 70 * accessed by this driver. 71 * 72 * used by: px_attach() 73 * 74 * return value: DDI_FAILURE on failure 75 */ 76 int 77 px_lib_map_regs(pxu_t *pxu_p, dev_info_t *dip) 78 { 79 ddi_device_acc_attr_t attr; 80 px_reg_bank_t reg_bank = PX_REG_CSR; 81 82 DBG(DBG_ATTACH, dip, "px_lib_map_regs: pxu_p:0x%p, dip 0x%p\n", 83 pxu_p, dip); 84 85 attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 86 attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 87 attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC; 88 89 /* 90 * PCI CSR Base 91 */ 92 if (ddi_regs_map_setup(dip, reg_bank, &pxu_p->px_address[reg_bank], 93 0, 0, &attr, &pxu_p->px_ac[reg_bank]) != DDI_SUCCESS) { 94 goto fail; 95 } 96 97 reg_bank++; 98 99 /* 100 * XBUS CSR Base 101 */ 102 if (ddi_regs_map_setup(dip, reg_bank, &pxu_p->px_address[reg_bank], 103 0, 0, &attr, &pxu_p->px_ac[reg_bank]) != DDI_SUCCESS) { 104 goto fail; 105 } 106 107 pxu_p->px_address[reg_bank] -= FIRE_CONTROL_STATUS; 108 109 done: 110 for (; reg_bank >= PX_REG_CSR; reg_bank--) { 111 DBG(DBG_ATTACH, dip, "reg_bank 0x%x address 0x%p\n", 112 reg_bank, pxu_p->px_address[reg_bank]); 113 } 114 115 return (DDI_SUCCESS); 116 117 fail: 118 cmn_err(CE_WARN, "%s%d: unable to map reg entry %d\n", 119 ddi_driver_name(dip), ddi_get_instance(dip), reg_bank); 120 121 for (reg_bank--; reg_bank >= PX_REG_CSR; reg_bank--) { 122 pxu_p->px_address[reg_bank] = NULL; 123 ddi_regs_map_free(&pxu_p->px_ac[reg_bank]); 124 } 125 126 return (DDI_FAILURE); 127 } 128 129 /* 130 * px_lib_unmap_regs: 131 * 132 * This routine unmaps the registers mapped by map_px_registers. 133 * 134 * used by: px_detach(), and error conditions in px_attach() 135 * 136 * return value: none 137 */ 138 void 139 px_lib_unmap_regs(pxu_t *pxu_p) 140 { 141 int i; 142 143 for (i = 0; i < PX_REG_MAX; i++) { 144 if (pxu_p->px_ac[i]) 145 ddi_regs_map_free(&pxu_p->px_ac[i]); 146 } 147 } 148 149 int 150 px_lib_dev_init(dev_info_t *dip, devhandle_t *dev_hdl) 151 { 152 px_t *px_p = DIP_TO_STATE(dip); 153 caddr_t xbc_csr_base, csr_base; 154 px_dvma_range_prop_t px_dvma_range; 155 uint32_t chip_id; 156 pxu_t *pxu_p; 157 158 DBG(DBG_ATTACH, dip, "px_lib_dev_init: dip 0x%p\n", dip); 159 160 if ((chip_id = px_identity_chip(px_p)) == PX_CHIP_UNIDENTIFIED) 161 return (DDI_FAILURE); 162 163 switch (chip_id) { 164 case FIRE_VER_10: 165 cmn_err(CE_WARN, "FIRE Hardware Version 1.0 is not supported"); 166 return (DDI_FAILURE); 167 case FIRE_VER_20: 168 DBG(DBG_ATTACH, dip, "FIRE Hardware Version 2.0\n"); 169 break; 170 default: 171 cmn_err(CE_WARN, "%s%d: FIRE Hardware Version Unknown\n", 172 ddi_driver_name(dip), ddi_get_instance(dip)); 173 return (DDI_FAILURE); 174 } 175 176 /* 177 * Allocate platform specific structure and link it to 178 * the px state structure. 179 */ 180 pxu_p = kmem_zalloc(sizeof (pxu_t), KM_SLEEP); 181 pxu_p->chip_id = chip_id; 182 pxu_p->portid = ddi_getprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 183 "portid", -1); 184 185 /* Map in the registers */ 186 if (px_lib_map_regs(pxu_p, dip) == DDI_FAILURE) { 187 kmem_free(pxu_p, sizeof (pxu_t)); 188 189 return (DDI_FAILURE); 190 } 191 192 xbc_csr_base = (caddr_t)pxu_p->px_address[PX_REG_XBC]; 193 csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR]; 194 195 pxu_p->tsb_cookie = iommu_tsb_alloc(pxu_p->portid); 196 pxu_p->tsb_size = iommu_tsb_cookie_to_size(pxu_p->tsb_cookie); 197 pxu_p->tsb_vaddr = iommu_tsb_cookie_to_va(pxu_p->tsb_cookie); 198 199 /* 200 * Create "virtual-dma" property to support child devices 201 * needing to know DVMA range. 202 */ 203 px_dvma_range.dvma_base = (uint32_t)px_mmu_dvma_end + 1 204 - ((pxu_p->tsb_size >> 3) << MMU_PAGE_SHIFT); 205 px_dvma_range.dvma_len = (uint32_t) 206 px_mmu_dvma_end - px_dvma_range.dvma_base + 1; 207 208 (void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, 209 "virtual-dma", (caddr_t)&px_dvma_range, 210 sizeof (px_dvma_range_prop_t)); 211 /* 212 * Initilize all fire hardware specific blocks. 213 */ 214 hvio_cb_init(xbc_csr_base, pxu_p); 215 hvio_ib_init(csr_base, pxu_p); 216 hvio_pec_init(csr_base, pxu_p); 217 hvio_mmu_init(csr_base, pxu_p); 218 219 px_p->px_plat_p = (void *)pxu_p; 220 221 /* 222 * Initialize all the interrupt handlers 223 */ 224 px_err_reg_enable(px_p, PX_ERR_JBC); 225 px_err_reg_enable(px_p, PX_ERR_MMU); 226 px_err_reg_enable(px_p, PX_ERR_IMU); 227 px_err_reg_enable(px_p, PX_ERR_TLU_UE); 228 px_err_reg_enable(px_p, PX_ERR_TLU_CE); 229 px_err_reg_enable(px_p, PX_ERR_TLU_OE); 230 px_err_reg_enable(px_p, PX_ERR_ILU); 231 px_err_reg_enable(px_p, PX_ERR_LPU_LINK); 232 px_err_reg_enable(px_p, PX_ERR_LPU_PHY); 233 px_err_reg_enable(px_p, PX_ERR_LPU_RX); 234 px_err_reg_enable(px_p, PX_ERR_LPU_TX); 235 px_err_reg_enable(px_p, PX_ERR_LPU_LTSSM); 236 px_err_reg_enable(px_p, PX_ERR_LPU_GIGABLZ); 237 238 /* Initilize device handle */ 239 *dev_hdl = (devhandle_t)csr_base; 240 241 DBG(DBG_ATTACH, dip, "px_lib_dev_init: dev_hdl 0x%llx\n", *dev_hdl); 242 243 return (DDI_SUCCESS); 244 } 245 246 int 247 px_lib_dev_fini(dev_info_t *dip) 248 { 249 px_t *px_p = DIP_TO_STATE(dip); 250 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 251 252 DBG(DBG_DETACH, dip, "px_lib_dev_fini: dip 0x%p\n", dip); 253 254 /* 255 * Deinitialize all the interrupt handlers 256 */ 257 px_err_reg_disable(px_p, PX_ERR_JBC); 258 px_err_reg_disable(px_p, PX_ERR_MMU); 259 px_err_reg_disable(px_p, PX_ERR_IMU); 260 px_err_reg_disable(px_p, PX_ERR_TLU_UE); 261 px_err_reg_disable(px_p, PX_ERR_TLU_CE); 262 px_err_reg_disable(px_p, PX_ERR_TLU_OE); 263 px_err_reg_disable(px_p, PX_ERR_ILU); 264 px_err_reg_disable(px_p, PX_ERR_LPU_LINK); 265 px_err_reg_disable(px_p, PX_ERR_LPU_PHY); 266 px_err_reg_disable(px_p, PX_ERR_LPU_RX); 267 px_err_reg_disable(px_p, PX_ERR_LPU_TX); 268 px_err_reg_disable(px_p, PX_ERR_LPU_LTSSM); 269 px_err_reg_disable(px_p, PX_ERR_LPU_GIGABLZ); 270 271 iommu_tsb_free(pxu_p->tsb_cookie); 272 273 px_lib_unmap_regs((pxu_t *)px_p->px_plat_p); 274 kmem_free(px_p->px_plat_p, sizeof (pxu_t)); 275 px_p->px_plat_p = NULL; 276 277 return (DDI_SUCCESS); 278 } 279 280 /*ARGSUSED*/ 281 int 282 px_lib_intr_devino_to_sysino(dev_info_t *dip, devino_t devino, 283 sysino_t *sysino) 284 { 285 px_t *px_p = DIP_TO_STATE(dip); 286 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 287 uint64_t ret; 288 289 DBG(DBG_LIB_INT, dip, "px_lib_intr_devino_to_sysino: dip 0x%p " 290 "devino 0x%x\n", dip, devino); 291 292 if ((ret = hvio_intr_devino_to_sysino(DIP_TO_HANDLE(dip), 293 pxu_p, devino, sysino)) != H_EOK) { 294 DBG(DBG_LIB_INT, dip, 295 "hvio_intr_devino_to_sysino failed, ret 0x%lx\n", ret); 296 return (DDI_FAILURE); 297 } 298 299 DBG(DBG_LIB_INT, dip, "px_lib_intr_devino_to_sysino: sysino 0x%llx\n", 300 *sysino); 301 302 return (DDI_SUCCESS); 303 } 304 305 /*ARGSUSED*/ 306 int 307 px_lib_intr_getvalid(dev_info_t *dip, sysino_t sysino, 308 intr_valid_state_t *intr_valid_state) 309 { 310 uint64_t ret; 311 312 DBG(DBG_LIB_INT, dip, "px_lib_intr_getvalid: dip 0x%p sysino 0x%llx\n", 313 dip, sysino); 314 315 if ((ret = hvio_intr_getvalid(DIP_TO_HANDLE(dip), 316 sysino, intr_valid_state)) != H_EOK) { 317 DBG(DBG_LIB_INT, dip, "hvio_intr_getvalid failed, ret 0x%lx\n", 318 ret); 319 return (DDI_FAILURE); 320 } 321 322 DBG(DBG_LIB_INT, dip, "px_lib_intr_getvalid: intr_valid_state 0x%x\n", 323 *intr_valid_state); 324 325 return (DDI_SUCCESS); 326 } 327 328 /*ARGSUSED*/ 329 int 330 px_lib_intr_setvalid(dev_info_t *dip, sysino_t sysino, 331 intr_valid_state_t intr_valid_state) 332 { 333 uint64_t ret; 334 335 DBG(DBG_LIB_INT, dip, "px_lib_intr_setvalid: dip 0x%p sysino 0x%llx " 336 "intr_valid_state 0x%x\n", dip, sysino, intr_valid_state); 337 338 if ((ret = hvio_intr_setvalid(DIP_TO_HANDLE(dip), 339 sysino, intr_valid_state)) != H_EOK) { 340 DBG(DBG_LIB_INT, dip, "hvio_intr_setvalid failed, ret 0x%lx\n", 341 ret); 342 return (DDI_FAILURE); 343 } 344 345 return (DDI_SUCCESS); 346 } 347 348 /*ARGSUSED*/ 349 int 350 px_lib_intr_getstate(dev_info_t *dip, sysino_t sysino, 351 intr_state_t *intr_state) 352 { 353 uint64_t ret; 354 355 DBG(DBG_LIB_INT, dip, "px_lib_intr_getstate: dip 0x%p sysino 0x%llx\n", 356 dip, sysino); 357 358 if ((ret = hvio_intr_getstate(DIP_TO_HANDLE(dip), 359 sysino, intr_state)) != H_EOK) { 360 DBG(DBG_LIB_INT, dip, "hvio_intr_getstate failed, ret 0x%lx\n", 361 ret); 362 return (DDI_FAILURE); 363 } 364 365 DBG(DBG_LIB_INT, dip, "px_lib_intr_getstate: intr_state 0x%x\n", 366 *intr_state); 367 368 return (DDI_SUCCESS); 369 } 370 371 /*ARGSUSED*/ 372 int 373 px_lib_intr_setstate(dev_info_t *dip, sysino_t sysino, 374 intr_state_t intr_state) 375 { 376 uint64_t ret; 377 378 DBG(DBG_LIB_INT, dip, "px_lib_intr_setstate: dip 0x%p sysino 0x%llx " 379 "intr_state 0x%x\n", dip, sysino, intr_state); 380 381 if ((ret = hvio_intr_setstate(DIP_TO_HANDLE(dip), 382 sysino, intr_state)) != H_EOK) { 383 DBG(DBG_LIB_INT, dip, "hvio_intr_setstate failed, ret 0x%lx\n", 384 ret); 385 return (DDI_FAILURE); 386 } 387 388 return (DDI_SUCCESS); 389 } 390 391 /*ARGSUSED*/ 392 int 393 px_lib_intr_gettarget(dev_info_t *dip, sysino_t sysino, cpuid_t *cpuid) 394 { 395 uint64_t ret; 396 397 DBG(DBG_LIB_INT, dip, "px_lib_intr_gettarget: dip 0x%p sysino 0x%llx\n", 398 dip, sysino); 399 400 if ((ret = hvio_intr_gettarget(DIP_TO_HANDLE(dip), 401 sysino, cpuid)) != H_EOK) { 402 DBG(DBG_LIB_INT, dip, "hvio_intr_gettarget failed, ret 0x%lx\n", 403 ret); 404 return (DDI_FAILURE); 405 } 406 407 DBG(DBG_LIB_INT, dip, "px_lib_intr_gettarget: cpuid 0x%x\n", cpuid); 408 409 return (DDI_SUCCESS); 410 } 411 412 /*ARGSUSED*/ 413 int 414 px_lib_intr_settarget(dev_info_t *dip, sysino_t sysino, cpuid_t cpuid) 415 { 416 uint64_t ret; 417 418 DBG(DBG_LIB_INT, dip, "px_lib_intr_settarget: dip 0x%p sysino 0x%llx " 419 "cpuid 0x%x\n", dip, sysino, cpuid); 420 421 if ((ret = hvio_intr_settarget(DIP_TO_HANDLE(dip), 422 sysino, cpuid)) != H_EOK) { 423 DBG(DBG_LIB_INT, dip, "hvio_intr_settarget failed, ret 0x%lx\n", 424 ret); 425 return (DDI_FAILURE); 426 } 427 428 return (DDI_SUCCESS); 429 } 430 431 /*ARGSUSED*/ 432 int 433 px_lib_intr_reset(dev_info_t *dip) 434 { 435 devino_t ino; 436 sysino_t sysino; 437 438 DBG(DBG_LIB_INT, dip, "px_lib_intr_reset: dip 0x%p\n", dip); 439 440 /* Reset all Interrupts */ 441 for (ino = 0; ino < INTERRUPT_MAPPING_ENTRIES; ino++) { 442 if (px_lib_intr_devino_to_sysino(dip, ino, 443 &sysino) != DDI_SUCCESS) 444 return (BF_FATAL); 445 446 if (px_lib_intr_setstate(dip, sysino, 447 INTR_IDLE_STATE) != DDI_SUCCESS) 448 return (BF_FATAL); 449 } 450 451 return (BF_NONE); 452 } 453 454 /*ARGSUSED*/ 455 int 456 px_lib_iommu_map(dev_info_t *dip, tsbid_t tsbid, pages_t pages, 457 io_attributes_t io_attributes, void *addr, size_t pfn_index, 458 int flag) 459 { 460 px_t *px_p = DIP_TO_STATE(dip); 461 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 462 uint64_t ret; 463 464 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_map: dip 0x%p tsbid 0x%llx " 465 "pages 0x%x atrr 0x%x addr 0x%p pfn_index 0x%llx, flag 0x%x\n", 466 dip, tsbid, pages, io_attributes, addr, pfn_index, flag); 467 468 if ((ret = hvio_iommu_map(px_p->px_dev_hdl, pxu_p, tsbid, pages, 469 io_attributes, addr, pfn_index, flag)) != H_EOK) { 470 DBG(DBG_LIB_DMA, dip, 471 "px_lib_iommu_map failed, ret 0x%lx\n", ret); 472 return (DDI_FAILURE); 473 } 474 475 return (DDI_SUCCESS); 476 } 477 478 /*ARGSUSED*/ 479 int 480 px_lib_iommu_demap(dev_info_t *dip, tsbid_t tsbid, pages_t pages) 481 { 482 px_t *px_p = DIP_TO_STATE(dip); 483 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 484 uint64_t ret; 485 486 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_demap: dip 0x%p tsbid 0x%llx " 487 "pages 0x%x\n", dip, tsbid, pages); 488 489 if ((ret = hvio_iommu_demap(px_p->px_dev_hdl, pxu_p, tsbid, pages)) 490 != H_EOK) { 491 DBG(DBG_LIB_DMA, dip, 492 "px_lib_iommu_demap failed, ret 0x%lx\n", ret); 493 494 return (DDI_FAILURE); 495 } 496 497 return (DDI_SUCCESS); 498 } 499 500 /*ARGSUSED*/ 501 int 502 px_lib_iommu_getmap(dev_info_t *dip, tsbid_t tsbid, 503 io_attributes_t *attributes_p, r_addr_t *r_addr_p) 504 { 505 px_t *px_p = DIP_TO_STATE(dip); 506 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 507 uint64_t ret; 508 509 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getmap: dip 0x%p tsbid 0x%llx\n", 510 dip, tsbid); 511 512 if ((ret = hvio_iommu_getmap(DIP_TO_HANDLE(dip), pxu_p, tsbid, 513 attributes_p, r_addr_p)) != H_EOK) { 514 DBG(DBG_LIB_DMA, dip, 515 "hvio_iommu_getmap failed, ret 0x%lx\n", ret); 516 517 return ((ret == H_ENOMAP) ? DDI_DMA_NOMAPPING:DDI_FAILURE); 518 } 519 520 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getmap: attr 0x%x r_addr 0x%llx\n", 521 *attributes_p, *r_addr_p); 522 523 return (DDI_SUCCESS); 524 } 525 526 527 /* 528 * Checks dma attributes against system bypass ranges 529 * The bypass range is determined by the hardware. Return them so the 530 * common code can do generic checking against them. 531 */ 532 /*ARGSUSED*/ 533 int 534 px_lib_dma_bypass_rngchk(ddi_dma_attr_t *attrp, uint64_t *lo_p, uint64_t *hi_p) 535 { 536 *lo_p = MMU_BYPASS_BASE; 537 *hi_p = MMU_BYPASS_END; 538 539 return (DDI_SUCCESS); 540 } 541 542 543 /*ARGSUSED*/ 544 int 545 px_lib_iommu_getbypass(dev_info_t *dip, r_addr_t ra, 546 io_attributes_t io_attributes, io_addr_t *io_addr_p) 547 { 548 uint64_t ret; 549 550 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getbypass: dip 0x%p ra 0x%llx " 551 "attr 0x%x\n", dip, ra, io_attributes); 552 553 if ((ret = hvio_iommu_getbypass(DIP_TO_HANDLE(dip), ra, 554 io_attributes, io_addr_p)) != H_EOK) { 555 DBG(DBG_LIB_DMA, dip, 556 "hvio_iommu_getbypass failed, ret 0x%lx\n", ret); 557 return (DDI_FAILURE); 558 } 559 560 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getbypass: io_addr 0x%llx\n", 561 *io_addr_p); 562 563 return (DDI_SUCCESS); 564 } 565 566 /* 567 * bus dma sync entry point. 568 */ 569 /*ARGSUSED*/ 570 int 571 px_lib_dma_sync(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle, 572 off_t off, size_t len, uint_t cache_flags) 573 { 574 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 575 576 DBG(DBG_LIB_DMA, dip, "px_lib_dma_sync: dip 0x%p rdip 0x%p " 577 "handle 0x%llx off 0x%x len 0x%x flags 0x%x\n", 578 dip, rdip, handle, off, len, cache_flags); 579 580 /* 581 * jbus_stst_order is found only in certain cpu modules. 582 * Just return success if not present. 583 */ 584 if (&jbus_stst_order == NULL) 585 return (DDI_SUCCESS); 586 587 if (!(mp->dmai_flags & PX_DMAI_FLAGS_INUSE)) { 588 cmn_err(CE_WARN, "%s%d: Unbound dma handle %p.", 589 ddi_driver_name(rdip), ddi_get_instance(rdip), (void *)mp); 590 591 return (DDI_FAILURE); 592 } 593 594 if (mp->dmai_flags & PX_DMAI_FLAGS_NOSYNC) 595 return (DDI_SUCCESS); 596 597 /* 598 * No flush needed when sending data from memory to device. 599 * Nothing to do to "sync" memory to what device would already see. 600 */ 601 if (!(mp->dmai_rflags & DDI_DMA_READ) || 602 ((cache_flags & PX_DMA_SYNC_DDI_FLAGS) == DDI_DMA_SYNC_FORDEV)) 603 return (DDI_SUCCESS); 604 605 /* 606 * Perform necessary cpu workaround to ensure jbus ordering. 607 * CPU's internal "invalidate FIFOs" are flushed. 608 */ 609 610 #if !defined(lint) 611 kpreempt_disable(); 612 #endif 613 jbus_stst_order(); 614 #if !defined(lint) 615 kpreempt_enable(); 616 #endif 617 return (DDI_SUCCESS); 618 } 619 620 /* 621 * MSIQ Functions: 622 */ 623 /*ARGSUSED*/ 624 int 625 px_lib_msiq_init(dev_info_t *dip) 626 { 627 px_t *px_p = DIP_TO_STATE(dip); 628 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 629 px_msiq_state_t *msiq_state_p = &px_p->px_ib_p->ib_msiq_state; 630 caddr_t msiq_addr; 631 px_dvma_addr_t pg_index; 632 size_t size; 633 int ret; 634 635 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_init: dip 0x%p\n", dip); 636 637 /* 638 * Map the EQ memory into the Fire MMU (has to be 512KB aligned) 639 * and then initialize the base address register. 640 * 641 * Allocate entries from Fire IOMMU so that the resulting address 642 * is properly aligned. Calculate the index of the first allocated 643 * entry. Note: The size of the mapping is assumed to be a multiple 644 * of the page size. 645 */ 646 msiq_addr = (caddr_t)(((uint64_t)msiq_state_p->msiq_buf_p + 647 (MMU_PAGE_SIZE - 1)) >> MMU_PAGE_SHIFT << MMU_PAGE_SHIFT); 648 649 size = msiq_state_p->msiq_cnt * 650 msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t); 651 652 pxu_p->msiq_mapped_p = vmem_xalloc(px_p->px_mmu_p->mmu_dvma_map, 653 size, (512 * 1024), 0, 0, NULL, NULL, VM_NOSLEEP | VM_BESTFIT); 654 655 if (pxu_p->msiq_mapped_p == NULL) 656 return (DDI_FAILURE); 657 658 pg_index = MMU_PAGE_INDEX(px_p->px_mmu_p, 659 MMU_BTOP((ulong_t)pxu_p->msiq_mapped_p)); 660 661 if ((ret = px_lib_iommu_map(px_p->px_dip, PCI_TSBID(0, pg_index), 662 MMU_BTOP(size), PCI_MAP_ATTR_WRITE, (void *)msiq_addr, 0, 663 MMU_MAP_BUF)) != DDI_SUCCESS) { 664 DBG(DBG_LIB_MSIQ, dip, 665 "hvio_msiq_init failed, ret 0x%lx\n", ret); 666 667 (void) px_lib_msiq_fini(dip); 668 return (DDI_FAILURE); 669 } 670 671 (void) hvio_msiq_init(DIP_TO_HANDLE(dip), pxu_p); 672 673 return (DDI_SUCCESS); 674 } 675 676 /*ARGSUSED*/ 677 int 678 px_lib_msiq_fini(dev_info_t *dip) 679 { 680 px_t *px_p = DIP_TO_STATE(dip); 681 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 682 px_msiq_state_t *msiq_state_p = &px_p->px_ib_p->ib_msiq_state; 683 px_dvma_addr_t pg_index; 684 size_t size; 685 686 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_fini: dip 0x%p\n", dip); 687 688 /* 689 * Unmap and free the EQ memory that had been mapped 690 * into the Fire IOMMU. 691 */ 692 size = msiq_state_p->msiq_cnt * 693 msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t); 694 695 pg_index = MMU_PAGE_INDEX(px_p->px_mmu_p, 696 MMU_BTOP((ulong_t)pxu_p->msiq_mapped_p)); 697 698 (void) px_lib_iommu_demap(px_p->px_dip, 699 PCI_TSBID(0, pg_index), MMU_BTOP(size)); 700 701 /* Free the entries from the Fire MMU */ 702 vmem_xfree(px_p->px_mmu_p->mmu_dvma_map, 703 (void *)pxu_p->msiq_mapped_p, size); 704 705 return (DDI_SUCCESS); 706 } 707 708 /*ARGSUSED*/ 709 int 710 px_lib_msiq_info(dev_info_t *dip, msiqid_t msiq_id, r_addr_t *ra_p, 711 uint_t *msiq_rec_cnt_p) 712 { 713 px_t *px_p = DIP_TO_STATE(dip); 714 px_msiq_state_t *msiq_state_p = &px_p->px_ib_p->ib_msiq_state; 715 uint64_t *msiq_addr; 716 size_t msiq_size; 717 718 DBG(DBG_LIB_MSIQ, dip, "px_msiq_info: dip 0x%p msiq_id 0x%x\n", 719 dip, msiq_id); 720 721 msiq_addr = (uint64_t *)(((uint64_t)msiq_state_p->msiq_buf_p + 722 (MMU_PAGE_SIZE - 1)) >> MMU_PAGE_SHIFT << MMU_PAGE_SHIFT); 723 msiq_size = msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t); 724 ra_p = (r_addr_t *)((caddr_t)msiq_addr + (msiq_id * msiq_size)); 725 726 *msiq_rec_cnt_p = msiq_state_p->msiq_rec_cnt; 727 728 DBG(DBG_LIB_MSIQ, dip, "px_msiq_info: ra_p 0x%p msiq_rec_cnt 0x%x\n", 729 ra_p, *msiq_rec_cnt_p); 730 731 return (DDI_SUCCESS); 732 } 733 734 /*ARGSUSED*/ 735 int 736 px_lib_msiq_getvalid(dev_info_t *dip, msiqid_t msiq_id, 737 pci_msiq_valid_state_t *msiq_valid_state) 738 { 739 uint64_t ret; 740 741 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getvalid: dip 0x%p msiq_id 0x%x\n", 742 dip, msiq_id); 743 744 if ((ret = hvio_msiq_getvalid(DIP_TO_HANDLE(dip), 745 msiq_id, msiq_valid_state)) != H_EOK) { 746 DBG(DBG_LIB_MSIQ, dip, 747 "hvio_msiq_getvalid failed, ret 0x%lx\n", ret); 748 return (DDI_FAILURE); 749 } 750 751 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getvalid: msiq_valid_state 0x%x\n", 752 *msiq_valid_state); 753 754 return (DDI_SUCCESS); 755 } 756 757 /*ARGSUSED*/ 758 int 759 px_lib_msiq_setvalid(dev_info_t *dip, msiqid_t msiq_id, 760 pci_msiq_valid_state_t msiq_valid_state) 761 { 762 uint64_t ret; 763 764 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_setvalid: dip 0x%p msiq_id 0x%x " 765 "msiq_valid_state 0x%x\n", dip, msiq_id, msiq_valid_state); 766 767 if ((ret = hvio_msiq_setvalid(DIP_TO_HANDLE(dip), 768 msiq_id, msiq_valid_state)) != H_EOK) { 769 DBG(DBG_LIB_MSIQ, dip, 770 "hvio_msiq_setvalid failed, ret 0x%lx\n", ret); 771 return (DDI_FAILURE); 772 } 773 774 return (DDI_SUCCESS); 775 } 776 777 /*ARGSUSED*/ 778 int 779 px_lib_msiq_getstate(dev_info_t *dip, msiqid_t msiq_id, 780 pci_msiq_state_t *msiq_state) 781 { 782 uint64_t ret; 783 784 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getstate: dip 0x%p msiq_id 0x%x\n", 785 dip, msiq_id); 786 787 if ((ret = hvio_msiq_getstate(DIP_TO_HANDLE(dip), 788 msiq_id, msiq_state)) != H_EOK) { 789 DBG(DBG_LIB_MSIQ, dip, 790 "hvio_msiq_getstate failed, ret 0x%lx\n", ret); 791 return (DDI_FAILURE); 792 } 793 794 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getstate: msiq_state 0x%x\n", 795 *msiq_state); 796 797 return (DDI_SUCCESS); 798 } 799 800 /*ARGSUSED*/ 801 int 802 px_lib_msiq_setstate(dev_info_t *dip, msiqid_t msiq_id, 803 pci_msiq_state_t msiq_state) 804 { 805 uint64_t ret; 806 807 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_setstate: dip 0x%p msiq_id 0x%x " 808 "msiq_state 0x%x\n", dip, msiq_id, msiq_state); 809 810 if ((ret = hvio_msiq_setstate(DIP_TO_HANDLE(dip), 811 msiq_id, msiq_state)) != H_EOK) { 812 DBG(DBG_LIB_MSIQ, dip, 813 "hvio_msiq_setstate failed, ret 0x%lx\n", ret); 814 return (DDI_FAILURE); 815 } 816 817 return (DDI_SUCCESS); 818 } 819 820 /*ARGSUSED*/ 821 int 822 px_lib_msiq_gethead(dev_info_t *dip, msiqid_t msiq_id, 823 msiqhead_t *msiq_head) 824 { 825 uint64_t ret; 826 827 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gethead: dip 0x%p msiq_id 0x%x\n", 828 dip, msiq_id); 829 830 if ((ret = hvio_msiq_gethead(DIP_TO_HANDLE(dip), 831 msiq_id, msiq_head)) != H_EOK) { 832 DBG(DBG_LIB_MSIQ, dip, 833 "hvio_msiq_gethead failed, ret 0x%lx\n", ret); 834 return (DDI_FAILURE); 835 } 836 837 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gethead: msiq_head 0x%x\n", 838 *msiq_head); 839 840 return (DDI_SUCCESS); 841 } 842 843 /*ARGSUSED*/ 844 int 845 px_lib_msiq_sethead(dev_info_t *dip, msiqid_t msiq_id, 846 msiqhead_t msiq_head) 847 { 848 uint64_t ret; 849 850 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_sethead: dip 0x%p msiq_id 0x%x " 851 "msiq_head 0x%x\n", dip, msiq_id, msiq_head); 852 853 if ((ret = hvio_msiq_sethead(DIP_TO_HANDLE(dip), 854 msiq_id, msiq_head)) != H_EOK) { 855 DBG(DBG_LIB_MSIQ, dip, 856 "hvio_msiq_sethead failed, ret 0x%lx\n", ret); 857 return (DDI_FAILURE); 858 } 859 860 return (DDI_SUCCESS); 861 } 862 863 /*ARGSUSED*/ 864 int 865 px_lib_msiq_gettail(dev_info_t *dip, msiqid_t msiq_id, 866 msiqtail_t *msiq_tail) 867 { 868 uint64_t ret; 869 870 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gettail: dip 0x%p msiq_id 0x%x\n", 871 dip, msiq_id); 872 873 if ((ret = hvio_msiq_gettail(DIP_TO_HANDLE(dip), 874 msiq_id, msiq_tail)) != H_EOK) { 875 DBG(DBG_LIB_MSIQ, dip, 876 "hvio_msiq_gettail failed, ret 0x%lx\n", ret); 877 return (DDI_FAILURE); 878 } 879 880 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gettail: msiq_tail 0x%x\n", 881 *msiq_tail); 882 883 return (DDI_SUCCESS); 884 } 885 886 /*ARGSUSED*/ 887 void 888 px_lib_get_msiq_rec(dev_info_t *dip, px_msiq_t *msiq_p, msiq_rec_t *msiq_rec_p) 889 { 890 eq_rec_t *eq_rec_p = (eq_rec_t *)msiq_p->msiq_curr; 891 892 DBG(DBG_LIB_MSIQ, dip, "px_lib_get_msiq_rec: dip 0x%p eq_rec_p 0x%p\n", 893 dip, eq_rec_p); 894 895 if (!eq_rec_p->eq_rec_fmt_type) { 896 /* Set msiq_rec_type to zero */ 897 msiq_rec_p->msiq_rec_type = 0; 898 899 return; 900 } 901 902 DBG(DBG_LIB_MSIQ, dip, "px_lib_get_msiq_rec: EQ RECORD, " 903 "eq_rec_rid 0x%llx eq_rec_fmt_type 0x%llx " 904 "eq_rec_len 0x%llx eq_rec_addr0 0x%llx " 905 "eq_rec_addr1 0x%llx eq_rec_data0 0x%llx " 906 "eq_rec_data1 0x%llx\n", eq_rec_p->eq_rec_rid, 907 eq_rec_p->eq_rec_fmt_type, eq_rec_p->eq_rec_len, 908 eq_rec_p->eq_rec_addr0, eq_rec_p->eq_rec_addr1, 909 eq_rec_p->eq_rec_data0, eq_rec_p->eq_rec_data1); 910 911 /* 912 * Only upper 4 bits of eq_rec_fmt_type is used 913 * to identify the EQ record type. 914 */ 915 switch (eq_rec_p->eq_rec_fmt_type >> 3) { 916 case EQ_REC_MSI32: 917 msiq_rec_p->msiq_rec_type = MSI32_REC; 918 919 msiq_rec_p->msiq_rec_data.msi.msi_data = 920 eq_rec_p->eq_rec_data0; 921 break; 922 case EQ_REC_MSI64: 923 msiq_rec_p->msiq_rec_type = MSI64_REC; 924 925 msiq_rec_p->msiq_rec_data.msi.msi_data = 926 eq_rec_p->eq_rec_data0; 927 break; 928 case EQ_REC_MSG: 929 msiq_rec_p->msiq_rec_type = MSG_REC; 930 931 msiq_rec_p->msiq_rec_data.msg.msg_route = 932 eq_rec_p->eq_rec_fmt_type & 7; 933 msiq_rec_p->msiq_rec_data.msg.msg_targ = eq_rec_p->eq_rec_rid; 934 msiq_rec_p->msiq_rec_data.msg.msg_code = eq_rec_p->eq_rec_data0; 935 break; 936 default: 937 cmn_err(CE_WARN, "%s%d: px_lib_get_msiq_rec: " 938 "0x%x is an unknown EQ record type", 939 ddi_driver_name(dip), ddi_get_instance(dip), 940 (int)eq_rec_p->eq_rec_fmt_type); 941 break; 942 } 943 944 msiq_rec_p->msiq_rec_rid = eq_rec_p->eq_rec_rid; 945 msiq_rec_p->msiq_rec_msi_addr = ((eq_rec_p->eq_rec_addr1 << 16) | 946 (eq_rec_p->eq_rec_addr0 << 2)); 947 948 /* Zero out eq_rec_fmt_type field */ 949 eq_rec_p->eq_rec_fmt_type = 0; 950 } 951 952 /* 953 * MSI Functions: 954 */ 955 /*ARGSUSED*/ 956 int 957 px_lib_msi_init(dev_info_t *dip) 958 { 959 px_t *px_p = DIP_TO_STATE(dip); 960 px_msi_state_t *msi_state_p = &px_p->px_ib_p->ib_msi_state; 961 uint64_t ret; 962 963 DBG(DBG_LIB_MSI, dip, "px_lib_msi_init: dip 0x%p\n", dip); 964 965 if ((ret = hvio_msi_init(DIP_TO_HANDLE(dip), 966 msi_state_p->msi_addr32, msi_state_p->msi_addr64)) != H_EOK) { 967 DBG(DBG_LIB_MSIQ, dip, "px_lib_msi_init failed, ret 0x%lx\n", 968 ret); 969 return (DDI_FAILURE); 970 } 971 972 return (DDI_SUCCESS); 973 } 974 975 /*ARGSUSED*/ 976 int 977 px_lib_msi_getmsiq(dev_info_t *dip, msinum_t msi_num, 978 msiqid_t *msiq_id) 979 { 980 uint64_t ret; 981 982 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getmsiq: dip 0x%p msi_num 0x%x\n", 983 dip, msi_num); 984 985 if ((ret = hvio_msi_getmsiq(DIP_TO_HANDLE(dip), 986 msi_num, msiq_id)) != H_EOK) { 987 DBG(DBG_LIB_MSI, dip, 988 "hvio_msi_getmsiq failed, ret 0x%lx\n", ret); 989 return (DDI_FAILURE); 990 } 991 992 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getmsiq: msiq_id 0x%x\n", 993 *msiq_id); 994 995 return (DDI_SUCCESS); 996 } 997 998 /*ARGSUSED*/ 999 int 1000 px_lib_msi_setmsiq(dev_info_t *dip, msinum_t msi_num, 1001 msiqid_t msiq_id, msi_type_t msitype) 1002 { 1003 uint64_t ret; 1004 1005 DBG(DBG_LIB_MSI, dip, "px_lib_msi_setmsiq: dip 0x%p msi_num 0x%x " 1006 "msq_id 0x%x\n", dip, msi_num, msiq_id); 1007 1008 if ((ret = hvio_msi_setmsiq(DIP_TO_HANDLE(dip), 1009 msi_num, msiq_id)) != H_EOK) { 1010 DBG(DBG_LIB_MSI, dip, 1011 "hvio_msi_setmsiq failed, ret 0x%lx\n", ret); 1012 return (DDI_FAILURE); 1013 } 1014 1015 return (DDI_SUCCESS); 1016 } 1017 1018 /*ARGSUSED*/ 1019 int 1020 px_lib_msi_getvalid(dev_info_t *dip, msinum_t msi_num, 1021 pci_msi_valid_state_t *msi_valid_state) 1022 { 1023 uint64_t ret; 1024 1025 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getvalid: dip 0x%p msi_num 0x%x\n", 1026 dip, msi_num); 1027 1028 if ((ret = hvio_msi_getvalid(DIP_TO_HANDLE(dip), 1029 msi_num, msi_valid_state)) != H_EOK) { 1030 DBG(DBG_LIB_MSI, dip, 1031 "hvio_msi_getvalid failed, ret 0x%lx\n", ret); 1032 return (DDI_FAILURE); 1033 } 1034 1035 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getvalid: msiq_id 0x%x\n", 1036 *msi_valid_state); 1037 1038 return (DDI_SUCCESS); 1039 } 1040 1041 /*ARGSUSED*/ 1042 int 1043 px_lib_msi_setvalid(dev_info_t *dip, msinum_t msi_num, 1044 pci_msi_valid_state_t msi_valid_state) 1045 { 1046 uint64_t ret; 1047 1048 DBG(DBG_LIB_MSI, dip, "px_lib_msi_setvalid: dip 0x%p msi_num 0x%x " 1049 "msi_valid_state 0x%x\n", dip, msi_num, msi_valid_state); 1050 1051 if ((ret = hvio_msi_setvalid(DIP_TO_HANDLE(dip), 1052 msi_num, msi_valid_state)) != H_EOK) { 1053 DBG(DBG_LIB_MSI, dip, 1054 "hvio_msi_setvalid failed, ret 0x%lx\n", ret); 1055 return (DDI_FAILURE); 1056 } 1057 1058 return (DDI_SUCCESS); 1059 } 1060 1061 /*ARGSUSED*/ 1062 int 1063 px_lib_msi_getstate(dev_info_t *dip, msinum_t msi_num, 1064 pci_msi_state_t *msi_state) 1065 { 1066 uint64_t ret; 1067 1068 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getstate: dip 0x%p msi_num 0x%x\n", 1069 dip, msi_num); 1070 1071 if ((ret = hvio_msi_getstate(DIP_TO_HANDLE(dip), 1072 msi_num, msi_state)) != H_EOK) { 1073 DBG(DBG_LIB_MSI, dip, 1074 "hvio_msi_getstate failed, ret 0x%lx\n", ret); 1075 return (DDI_FAILURE); 1076 } 1077 1078 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getstate: msi_state 0x%x\n", 1079 *msi_state); 1080 1081 return (DDI_SUCCESS); 1082 } 1083 1084 /*ARGSUSED*/ 1085 int 1086 px_lib_msi_setstate(dev_info_t *dip, msinum_t msi_num, 1087 pci_msi_state_t msi_state) 1088 { 1089 uint64_t ret; 1090 1091 DBG(DBG_LIB_MSI, dip, "px_lib_msi_setstate: dip 0x%p msi_num 0x%x " 1092 "msi_state 0x%x\n", dip, msi_num, msi_state); 1093 1094 if ((ret = hvio_msi_setstate(DIP_TO_HANDLE(dip), 1095 msi_num, msi_state)) != H_EOK) { 1096 DBG(DBG_LIB_MSI, dip, 1097 "hvio_msi_setstate failed, ret 0x%lx\n", ret); 1098 return (DDI_FAILURE); 1099 } 1100 1101 return (DDI_SUCCESS); 1102 } 1103 1104 /* 1105 * MSG Functions: 1106 */ 1107 /*ARGSUSED*/ 1108 int 1109 px_lib_msg_getmsiq(dev_info_t *dip, pcie_msg_type_t msg_type, 1110 msiqid_t *msiq_id) 1111 { 1112 uint64_t ret; 1113 1114 DBG(DBG_LIB_MSG, dip, "px_lib_msg_getmsiq: dip 0x%p msg_type 0x%x\n", 1115 dip, msg_type); 1116 1117 if ((ret = hvio_msg_getmsiq(DIP_TO_HANDLE(dip), 1118 msg_type, msiq_id)) != H_EOK) { 1119 DBG(DBG_LIB_MSG, dip, 1120 "hvio_msg_getmsiq failed, ret 0x%lx\n", ret); 1121 return (DDI_FAILURE); 1122 } 1123 1124 DBG(DBG_LIB_MSI, dip, "px_lib_msg_getmsiq: msiq_id 0x%x\n", 1125 *msiq_id); 1126 1127 return (DDI_SUCCESS); 1128 } 1129 1130 /*ARGSUSED*/ 1131 int 1132 px_lib_msg_setmsiq(dev_info_t *dip, pcie_msg_type_t msg_type, 1133 msiqid_t msiq_id) 1134 { 1135 uint64_t ret; 1136 1137 DBG(DBG_LIB_MSG, dip, "px_lib_msi_setstate: dip 0x%p msg_type 0x%x " 1138 "msiq_id 0x%x\n", dip, msg_type, msiq_id); 1139 1140 if ((ret = hvio_msg_setmsiq(DIP_TO_HANDLE(dip), 1141 msg_type, msiq_id)) != H_EOK) { 1142 DBG(DBG_LIB_MSG, dip, 1143 "hvio_msg_setmsiq failed, ret 0x%lx\n", ret); 1144 return (DDI_FAILURE); 1145 } 1146 1147 return (DDI_SUCCESS); 1148 } 1149 1150 /*ARGSUSED*/ 1151 int 1152 px_lib_msg_getvalid(dev_info_t *dip, pcie_msg_type_t msg_type, 1153 pcie_msg_valid_state_t *msg_valid_state) 1154 { 1155 uint64_t ret; 1156 1157 DBG(DBG_LIB_MSG, dip, "px_lib_msg_getvalid: dip 0x%p msg_type 0x%x\n", 1158 dip, msg_type); 1159 1160 if ((ret = hvio_msg_getvalid(DIP_TO_HANDLE(dip), msg_type, 1161 msg_valid_state)) != H_EOK) { 1162 DBG(DBG_LIB_MSG, dip, 1163 "hvio_msg_getvalid failed, ret 0x%lx\n", ret); 1164 return (DDI_FAILURE); 1165 } 1166 1167 DBG(DBG_LIB_MSI, dip, "px_lib_msg_getvalid: msg_valid_state 0x%x\n", 1168 *msg_valid_state); 1169 1170 return (DDI_SUCCESS); 1171 } 1172 1173 /*ARGSUSED*/ 1174 int 1175 px_lib_msg_setvalid(dev_info_t *dip, pcie_msg_type_t msg_type, 1176 pcie_msg_valid_state_t msg_valid_state) 1177 { 1178 uint64_t ret; 1179 1180 DBG(DBG_LIB_MSG, dip, "px_lib_msg_setvalid: dip 0x%p msg_type 0x%x " 1181 "msg_valid_state 0x%x\n", dip, msg_type, msg_valid_state); 1182 1183 if ((ret = hvio_msg_setvalid(DIP_TO_HANDLE(dip), msg_type, 1184 msg_valid_state)) != H_EOK) { 1185 DBG(DBG_LIB_MSG, dip, 1186 "hvio_msg_setvalid failed, ret 0x%lx\n", ret); 1187 return (DDI_FAILURE); 1188 } 1189 1190 return (DDI_SUCCESS); 1191 } 1192 1193 /* 1194 * Suspend/Resume Functions: 1195 * Currently unsupported by hypervisor 1196 */ 1197 int 1198 px_lib_suspend(dev_info_t *dip) 1199 { 1200 px_t *px_p = DIP_TO_STATE(dip); 1201 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 1202 devhandle_t dev_hdl, xbus_dev_hdl; 1203 uint64_t ret; 1204 1205 DBG(DBG_DETACH, dip, "px_lib_suspend: dip 0x%p\n", dip); 1206 1207 dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_CSR]; 1208 xbus_dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_XBC]; 1209 1210 if ((ret = hvio_suspend(dev_hdl, pxu_p)) == H_EOK) { 1211 px_p->px_cb_p->xbc_attachcnt--; 1212 if (px_p->px_cb_p->xbc_attachcnt == 0) 1213 if ((ret = hvio_cb_suspend(xbus_dev_hdl, pxu_p)) 1214 != H_EOK) 1215 px_p->px_cb_p->xbc_attachcnt++; 1216 } 1217 1218 return ((ret != H_EOK) ? DDI_FAILURE: DDI_SUCCESS); 1219 } 1220 1221 void 1222 px_lib_resume(dev_info_t *dip) 1223 { 1224 px_t *px_p = DIP_TO_STATE(dip); 1225 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 1226 devhandle_t dev_hdl, xbus_dev_hdl; 1227 devino_t pec_ino = px_p->px_inos[PX_INTR_PEC]; 1228 devino_t xbc_ino = px_p->px_inos[PX_INTR_XBC]; 1229 1230 DBG(DBG_ATTACH, dip, "px_lib_resume: dip 0x%p\n", dip); 1231 1232 dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_CSR]; 1233 xbus_dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_XBC]; 1234 1235 px_p->px_cb_p->xbc_attachcnt++; 1236 if (px_p->px_cb_p->xbc_attachcnt == 1) 1237 hvio_cb_resume(dev_hdl, xbus_dev_hdl, xbc_ino, pxu_p); 1238 hvio_resume(dev_hdl, pec_ino, pxu_p); 1239 } 1240 1241 /* 1242 * Misc Functions: 1243 * Currently unsupported by hypervisor 1244 */ 1245 uint64_t 1246 px_lib_get_cb(dev_info_t *dip) 1247 { 1248 px_t *px_p = DIP_TO_STATE(dip); 1249 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 1250 1251 return (CSR_XR((caddr_t)pxu_p->px_address[PX_REG_XBC], JBUS_SCRATCH_1)); 1252 } 1253 1254 void 1255 px_lib_set_cb(dev_info_t *dip, uint64_t val) 1256 { 1257 px_t *px_p = DIP_TO_STATE(dip); 1258 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 1259 1260 CSR_XS((caddr_t)pxu_p->px_address[PX_REG_XBC], JBUS_SCRATCH_1, val); 1261 } 1262 1263 /*ARGSUSED*/ 1264 int 1265 px_lib_map_vconfig(dev_info_t *dip, 1266 ddi_map_req_t *mp, pci_config_offset_t off, 1267 pci_regspec_t *rp, caddr_t *addrp) 1268 { 1269 /* 1270 * No special config space access services in this layer. 1271 */ 1272 return (DDI_FAILURE); 1273 } 1274 1275 void 1276 px_lib_map_attr_check(ddi_map_req_t *mp) 1277 { 1278 ddi_acc_hdl_t *hp = mp->map_handlep; 1279 1280 /* fire does not accept byte masks from PIO store merge */ 1281 if (hp->ah_acc.devacc_attr_dataorder == DDI_STORECACHING_OK_ACC) 1282 hp->ah_acc.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 1283 } 1284 1285 void 1286 px_lib_clr_errs(px_t *px_p) 1287 { 1288 px_pec_t *pec_p = px_p->px_pec_p; 1289 dev_info_t *rpdip = px_p->px_dip; 1290 px_cb_t *cb_p = px_p->px_cb_p; 1291 int err = PX_OK, ret; 1292 int acctype = pec_p->pec_safeacc_type; 1293 ddi_fm_error_t derr; 1294 1295 /* Create the derr */ 1296 bzero(&derr, sizeof (ddi_fm_error_t)); 1297 derr.fme_version = DDI_FME_VERSION; 1298 derr.fme_ena = fm_ena_generate(0, FM_ENA_FMT1); 1299 derr.fme_flag = acctype; 1300 1301 if (acctype == DDI_FM_ERR_EXPECTED) { 1302 derr.fme_status = DDI_FM_NONFATAL; 1303 ndi_fm_acc_err_set(pec_p->pec_acc_hdl, &derr); 1304 } 1305 1306 mutex_enter(&cb_p->xbc_fm_mutex); 1307 1308 /* send ereport/handle/clear fire registers */ 1309 err = px_err_handle(px_p, &derr, PX_LIB_CALL, B_TRUE); 1310 1311 /* Check all child devices for errors */ 1312 ret = ndi_fm_handler_dispatch(rpdip, NULL, &derr); 1313 1314 mutex_exit(&cb_p->xbc_fm_mutex); 1315 1316 /* 1317 * PX_FATAL_HW indicates a condition recovered from Fatal-Reset, 1318 * therefore it does not cause panic. 1319 */ 1320 if ((err & (PX_FATAL_GOS | PX_FATAL_SW)) || (ret == DDI_FM_FATAL)) 1321 PX_FM_PANIC("Fatal System Port Error has occurred\n"); 1322 } 1323 1324 #ifdef DEBUG 1325 int px_peekfault_cnt = 0; 1326 int px_pokefault_cnt = 0; 1327 #endif /* DEBUG */ 1328 1329 /*ARGSUSED*/ 1330 static int 1331 px_lib_do_poke(dev_info_t *dip, dev_info_t *rdip, 1332 peekpoke_ctlops_t *in_args) 1333 { 1334 px_t *px_p = DIP_TO_STATE(dip); 1335 px_pec_t *pec_p = px_p->px_pec_p; 1336 int err = DDI_SUCCESS; 1337 on_trap_data_t otd; 1338 1339 mutex_enter(&pec_p->pec_pokefault_mutex); 1340 pec_p->pec_ontrap_data = &otd; 1341 pec_p->pec_safeacc_type = DDI_FM_ERR_POKE; 1342 1343 /* Set up protected environment. */ 1344 if (!on_trap(&otd, OT_DATA_ACCESS)) { 1345 uintptr_t tramp = otd.ot_trampoline; 1346 1347 otd.ot_trampoline = (uintptr_t)&poke_fault; 1348 err = do_poke(in_args->size, (void *)in_args->dev_addr, 1349 (void *)in_args->host_addr); 1350 otd.ot_trampoline = tramp; 1351 } else 1352 err = DDI_FAILURE; 1353 1354 px_lib_clr_errs(px_p); 1355 1356 if (otd.ot_trap & OT_DATA_ACCESS) 1357 err = DDI_FAILURE; 1358 1359 /* Take down protected environment. */ 1360 no_trap(); 1361 1362 pec_p->pec_ontrap_data = NULL; 1363 pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED; 1364 mutex_exit(&pec_p->pec_pokefault_mutex); 1365 1366 #ifdef DEBUG 1367 if (err == DDI_FAILURE) 1368 px_pokefault_cnt++; 1369 #endif 1370 return (err); 1371 } 1372 1373 /*ARGSUSED*/ 1374 static int 1375 px_lib_do_caut_put(dev_info_t *dip, dev_info_t *rdip, 1376 peekpoke_ctlops_t *cautacc_ctlops_arg) 1377 { 1378 size_t size = cautacc_ctlops_arg->size; 1379 uintptr_t dev_addr = cautacc_ctlops_arg->dev_addr; 1380 uintptr_t host_addr = cautacc_ctlops_arg->host_addr; 1381 ddi_acc_impl_t *hp = (ddi_acc_impl_t *)cautacc_ctlops_arg->handle; 1382 size_t repcount = cautacc_ctlops_arg->repcount; 1383 uint_t flags = cautacc_ctlops_arg->flags; 1384 1385 px_t *px_p = DIP_TO_STATE(dip); 1386 px_pec_t *pec_p = px_p->px_pec_p; 1387 int err = DDI_SUCCESS; 1388 1389 /* 1390 * Note that i_ndi_busop_access_enter ends up grabbing the pokefault 1391 * mutex. 1392 */ 1393 i_ndi_busop_access_enter(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp); 1394 1395 pec_p->pec_ontrap_data = (on_trap_data_t *)hp->ahi_err->err_ontrap; 1396 pec_p->pec_safeacc_type = DDI_FM_ERR_EXPECTED; 1397 hp->ahi_err->err_expected = DDI_FM_ERR_EXPECTED; 1398 1399 if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) { 1400 for (; repcount; repcount--) { 1401 switch (size) { 1402 1403 case sizeof (uint8_t): 1404 i_ddi_put8(hp, (uint8_t *)dev_addr, 1405 *(uint8_t *)host_addr); 1406 break; 1407 1408 case sizeof (uint16_t): 1409 i_ddi_put16(hp, (uint16_t *)dev_addr, 1410 *(uint16_t *)host_addr); 1411 break; 1412 1413 case sizeof (uint32_t): 1414 i_ddi_put32(hp, (uint32_t *)dev_addr, 1415 *(uint32_t *)host_addr); 1416 break; 1417 1418 case sizeof (uint64_t): 1419 i_ddi_put64(hp, (uint64_t *)dev_addr, 1420 *(uint64_t *)host_addr); 1421 break; 1422 } 1423 1424 host_addr += size; 1425 1426 if (flags == DDI_DEV_AUTOINCR) 1427 dev_addr += size; 1428 1429 px_lib_clr_errs(px_p); 1430 1431 if (pec_p->pec_ontrap_data->ot_trap & OT_DATA_ACCESS) { 1432 err = DDI_FAILURE; 1433 #ifdef DEBUG 1434 px_pokefault_cnt++; 1435 #endif 1436 break; 1437 } 1438 } 1439 } 1440 1441 i_ddi_notrap((ddi_acc_handle_t)hp); 1442 pec_p->pec_ontrap_data = NULL; 1443 pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED; 1444 i_ndi_busop_access_exit(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp); 1445 hp->ahi_err->err_expected = DDI_FM_ERR_UNEXPECTED; 1446 1447 return (err); 1448 } 1449 1450 1451 int 1452 px_lib_ctlops_poke(dev_info_t *dip, dev_info_t *rdip, 1453 peekpoke_ctlops_t *in_args) 1454 { 1455 return (in_args->handle ? px_lib_do_caut_put(dip, rdip, in_args) : 1456 px_lib_do_poke(dip, rdip, in_args)); 1457 } 1458 1459 1460 /*ARGSUSED*/ 1461 static int 1462 px_lib_do_peek(dev_info_t *dip, peekpoke_ctlops_t *in_args) 1463 { 1464 px_t *px_p = DIP_TO_STATE(dip); 1465 px_pec_t *pec_p = px_p->px_pec_p; 1466 int err = DDI_SUCCESS; 1467 on_trap_data_t otd; 1468 1469 mutex_enter(&pec_p->pec_pokefault_mutex); 1470 pec_p->pec_safeacc_type = DDI_FM_ERR_PEEK; 1471 1472 if (!on_trap(&otd, OT_DATA_ACCESS)) { 1473 uintptr_t tramp = otd.ot_trampoline; 1474 1475 otd.ot_trampoline = (uintptr_t)&peek_fault; 1476 err = do_peek(in_args->size, (void *)in_args->dev_addr, 1477 (void *)in_args->host_addr); 1478 otd.ot_trampoline = tramp; 1479 } else 1480 err = DDI_FAILURE; 1481 1482 no_trap(); 1483 pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED; 1484 mutex_exit(&pec_p->pec_pokefault_mutex); 1485 1486 #ifdef DEBUG 1487 if (err == DDI_FAILURE) 1488 px_peekfault_cnt++; 1489 #endif 1490 return (err); 1491 } 1492 1493 1494 static int 1495 px_lib_do_caut_get(dev_info_t *dip, peekpoke_ctlops_t *cautacc_ctlops_arg) 1496 { 1497 size_t size = cautacc_ctlops_arg->size; 1498 uintptr_t dev_addr = cautacc_ctlops_arg->dev_addr; 1499 uintptr_t host_addr = cautacc_ctlops_arg->host_addr; 1500 ddi_acc_impl_t *hp = (ddi_acc_impl_t *)cautacc_ctlops_arg->handle; 1501 size_t repcount = cautacc_ctlops_arg->repcount; 1502 uint_t flags = cautacc_ctlops_arg->flags; 1503 1504 px_t *px_p = DIP_TO_STATE(dip); 1505 px_pec_t *pec_p = px_p->px_pec_p; 1506 int err = DDI_SUCCESS; 1507 1508 /* 1509 * Note that i_ndi_busop_access_enter ends up grabbing the pokefault 1510 * mutex. 1511 */ 1512 i_ndi_busop_access_enter(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp); 1513 1514 pec_p->pec_ontrap_data = (on_trap_data_t *)hp->ahi_err->err_ontrap; 1515 pec_p->pec_safeacc_type = DDI_FM_ERR_EXPECTED; 1516 hp->ahi_err->err_expected = DDI_FM_ERR_EXPECTED; 1517 1518 if (repcount == 1) { 1519 if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) { 1520 i_ddi_caut_get(size, (void *)dev_addr, 1521 (void *)host_addr); 1522 } else { 1523 int i; 1524 uint8_t *ff_addr = (uint8_t *)host_addr; 1525 for (i = 0; i < size; i++) 1526 *ff_addr++ = 0xff; 1527 1528 err = DDI_FAILURE; 1529 #ifdef DEBUG 1530 px_peekfault_cnt++; 1531 #endif 1532 } 1533 } else { 1534 if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) { 1535 for (; repcount; repcount--) { 1536 i_ddi_caut_get(size, (void *)dev_addr, 1537 (void *)host_addr); 1538 1539 host_addr += size; 1540 1541 if (flags == DDI_DEV_AUTOINCR) 1542 dev_addr += size; 1543 } 1544 } else { 1545 err = DDI_FAILURE; 1546 #ifdef DEBUG 1547 px_peekfault_cnt++; 1548 #endif 1549 } 1550 } 1551 1552 i_ddi_notrap((ddi_acc_handle_t)hp); 1553 pec_p->pec_ontrap_data = NULL; 1554 pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED; 1555 i_ndi_busop_access_exit(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp); 1556 hp->ahi_err->err_expected = DDI_FM_ERR_UNEXPECTED; 1557 1558 return (err); 1559 } 1560 1561 /*ARGSUSED*/ 1562 int 1563 px_lib_ctlops_peek(dev_info_t *dip, dev_info_t *rdip, 1564 peekpoke_ctlops_t *in_args, void *result) 1565 { 1566 result = (void *)in_args->host_addr; 1567 return (in_args->handle ? px_lib_do_caut_get(dip, in_args) : 1568 px_lib_do_peek(dip, in_args)); 1569 } 1570 1571 /* 1572 * implements PPM interface 1573 */ 1574 int 1575 px_lib_pmctl(int cmd, px_t *px_p) 1576 { 1577 ASSERT((cmd & ~PPMREQ_MASK) == PPMREQ); 1578 switch (cmd) { 1579 case PPMREQ_PRE_PWR_OFF: 1580 /* 1581 * Currently there is no device power management for 1582 * the root complex (fire). When there is we need to make 1583 * sure that it is at full power before trying to send the 1584 * PME_Turn_Off message. 1585 */ 1586 DBG(DBG_PWR, px_p->px_dip, 1587 "ioctl: request to send PME_Turn_Off\n"); 1588 return (px_goto_l23ready(px_p)); 1589 1590 case PPMREQ_PRE_PWR_ON: 1591 DBG(DBG_PWR, px_p->px_dip, "ioctl: PRE_PWR_ON request\n"); 1592 return (px_pre_pwron_check(px_p)); 1593 1594 case PPMREQ_POST_PWR_ON: 1595 DBG(DBG_PWR, px_p->px_dip, "ioctl: POST_PWR_ON request\n"); 1596 return (px_goto_l0(px_p)); 1597 1598 default: 1599 return (DDI_FAILURE); 1600 } 1601 } 1602 1603 /* 1604 * sends PME_Turn_Off message to put the link in L2/L3 ready state. 1605 * called by px_ioctl. 1606 * returns DDI_SUCCESS or DDI_FAILURE 1607 * 1. Wait for link to be in L1 state (link status reg) 1608 * 2. write to PME_Turn_off reg to boradcast 1609 * 3. set timeout 1610 * 4. If timeout, return failure. 1611 * 5. If PM_TO_Ack, wait till link is in L2/L3 ready 1612 */ 1613 static int 1614 px_goto_l23ready(px_t *px_p) 1615 { 1616 pcie_pwr_t *pwr_p; 1617 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 1618 caddr_t csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR]; 1619 int ret = DDI_SUCCESS; 1620 clock_t end, timeleft; 1621 int mutex_held = 1; 1622 1623 /* If no PM info, return failure */ 1624 if (!PCIE_PMINFO(px_p->px_dip) || 1625 !(pwr_p = PCIE_NEXUS_PMINFO(px_p->px_dip))) 1626 return (DDI_FAILURE); 1627 1628 mutex_enter(&pwr_p->pwr_lock); 1629 mutex_enter(&px_p->px_l23ready_lock); 1630 /* Clear the PME_To_ACK receieved flag */ 1631 px_p->px_pm_flags &= ~PX_PMETOACK_RECVD; 1632 /* 1633 * When P25 is the downstream device, after receiving 1634 * PME_To_ACK, fire will go to Detect state, which causes 1635 * the link down event. Inform FMA that this is expected. 1636 * In case of all other cards complaint with the pci express 1637 * spec, this will happen when the power is re-applied. FMA 1638 * code will clear this flag after one instance of LDN. Since 1639 * there will not be a LDN event for the spec compliant cards, 1640 * we need to clear the flag after receiving PME_To_ACK. 1641 */ 1642 px_p->px_pm_flags |= PX_LDN_EXPECTED; 1643 if (px_send_pme_turnoff(csr_base) != DDI_SUCCESS) { 1644 ret = DDI_FAILURE; 1645 goto l23ready_done; 1646 } 1647 px_p->px_pm_flags |= PX_PME_TURNOFF_PENDING; 1648 1649 end = ddi_get_lbolt() + drv_usectohz(px_pme_to_ack_timeout); 1650 while (!(px_p->px_pm_flags & PX_PMETOACK_RECVD)) { 1651 timeleft = cv_timedwait(&px_p->px_l23ready_cv, 1652 &px_p->px_l23ready_lock, end); 1653 /* 1654 * if cv_timedwait returns -1, it is either 1655 * 1) timed out or 1656 * 2) there was a pre-mature wakeup but by the time 1657 * cv_timedwait is called again end < lbolt i.e. 1658 * end is in the past. 1659 * 3) By the time we make first cv_timedwait call, 1660 * end < lbolt is true. 1661 */ 1662 if (timeleft == -1) 1663 break; 1664 } 1665 if (!(px_p->px_pm_flags & PX_PMETOACK_RECVD)) { 1666 /* 1667 * Either timedout or interrupt didn't get a 1668 * chance to grab the mutex and set the flag. 1669 * release the mutex and delay for sometime. 1670 * This will 1) give a chance for interrupt to 1671 * set the flag 2) creates a delay between two 1672 * consequetive requests. 1673 */ 1674 mutex_exit(&px_p->px_l23ready_lock); 1675 delay(drv_usectohz(50 * PX_MSEC_TO_USEC)); 1676 mutex_held = 0; 1677 if (!(px_p->px_pm_flags & PX_PMETOACK_RECVD)) { 1678 ret = DDI_FAILURE; 1679 DBG(DBG_PWR, px_p->px_dip, " Timed out while waiting" 1680 " for PME_TO_ACK\n"); 1681 } 1682 } 1683 px_p->px_pm_flags &= 1684 ~(PX_PME_TURNOFF_PENDING | PX_PMETOACK_RECVD | PX_LDN_EXPECTED); 1685 1686 l23ready_done: 1687 if (mutex_held) 1688 mutex_exit(&px_p->px_l23ready_lock); 1689 /* 1690 * Wait till link is in L1 idle, if sending PME_Turn_Off 1691 * was succesful. 1692 */ 1693 if (ret == DDI_SUCCESS) { 1694 if (px_link_wait4l1idle(csr_base) != DDI_SUCCESS) { 1695 DBG(DBG_PWR, px_p->px_dip, " Link is not at L1" 1696 " even though we received PME_To_ACK.\n"); 1697 /* 1698 * Workaround for hardware bug with P25. 1699 * Due to a hardware bug with P25, link state 1700 * will be Detect state rather than L1 after 1701 * link is transitioned to L23Ready state. Since 1702 * we don't know whether link is L23ready state 1703 * without Fire's state being L1_idle, we delay 1704 * here just to make sure that we wait till link 1705 * is transitioned to L23Ready state. 1706 */ 1707 delay(drv_usectohz(100 * PX_MSEC_TO_USEC)); 1708 } 1709 pwr_p->pwr_link_lvl = PM_LEVEL_L3; 1710 1711 } 1712 mutex_exit(&pwr_p->pwr_lock); 1713 return (ret); 1714 } 1715 1716 /* 1717 * Message interrupt handler intended to be shared for both 1718 * PME and PME_TO_ACK msg handling, currently only handles 1719 * PME_To_ACK message. 1720 */ 1721 uint_t 1722 px_pmeq_intr(caddr_t arg) 1723 { 1724 px_t *px_p = (px_t *)arg; 1725 1726 DBG(DBG_PWR, px_p->px_dip, " PME_To_ACK received \n"); 1727 mutex_enter(&px_p->px_l23ready_lock); 1728 cv_broadcast(&px_p->px_l23ready_cv); 1729 if (px_p->px_pm_flags & PX_PME_TURNOFF_PENDING) { 1730 px_p->px_pm_flags |= PX_PMETOACK_RECVD; 1731 } else { 1732 /* 1733 * This maybe the second ack received. If so then, 1734 * we should be receiving it during wait4L1 stage. 1735 */ 1736 px_p->px_pmetoack_ignored++; 1737 } 1738 mutex_exit(&px_p->px_l23ready_lock); 1739 return (DDI_INTR_CLAIMED); 1740 } 1741 1742 static int 1743 px_pre_pwron_check(px_t *px_p) 1744 { 1745 pcie_pwr_t *pwr_p; 1746 1747 /* If no PM info, return failure */ 1748 if (!PCIE_PMINFO(px_p->px_dip) || 1749 !(pwr_p = PCIE_NEXUS_PMINFO(px_p->px_dip))) 1750 return (DDI_FAILURE); 1751 1752 /* 1753 * For the spec compliant downstream cards link down 1754 * is expected when the device is powered on. 1755 */ 1756 px_p->px_pm_flags |= PX_LDN_EXPECTED; 1757 return (pwr_p->pwr_link_lvl == PM_LEVEL_L3 ? DDI_SUCCESS : DDI_FAILURE); 1758 } 1759 1760 static int 1761 px_goto_l0(px_t *px_p) 1762 { 1763 pcie_pwr_t *pwr_p; 1764 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 1765 caddr_t csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR]; 1766 int ret = DDI_SUCCESS; 1767 uint64_t time_spent = 0; 1768 1769 /* If no PM info, return failure */ 1770 if (!PCIE_PMINFO(px_p->px_dip) || 1771 !(pwr_p = PCIE_NEXUS_PMINFO(px_p->px_dip))) 1772 return (DDI_FAILURE); 1773 1774 mutex_enter(&pwr_p->pwr_lock); 1775 /* 1776 * The following link retrain activity will cause LDN and LUP event. 1777 * Receiving LDN prior to receiving LUP is expected, not an error in 1778 * this case. Receiving LUP indicates link is fully up to support 1779 * powering up down stream device, and of course any further LDN and 1780 * LUP outside this context will be error. 1781 */ 1782 px_p->px_lup_pending = 1; 1783 if (px_link_retrain(csr_base) != DDI_SUCCESS) { 1784 ret = DDI_FAILURE; 1785 goto l0_done; 1786 } 1787 1788 /* LUP event takes the order of 15ms amount of time to occur */ 1789 for (; px_p->px_lup_pending && (time_spent < px_lup_poll_to); 1790 time_spent += px_lup_poll_interval) 1791 drv_usecwait(px_lup_poll_interval); 1792 if (px_p->px_lup_pending) 1793 ret = DDI_FAILURE; 1794 l0_done: 1795 px_enable_detect_quiet(csr_base); 1796 if (ret == DDI_SUCCESS) 1797 pwr_p->pwr_link_lvl = PM_LEVEL_L0; 1798 mutex_exit(&pwr_p->pwr_lock); 1799 return (ret); 1800 } 1801 1802 /* 1803 * Extract the drivers binding name to identify which chip we're binding to. 1804 * Whenever a new bus bridge is created, the driver alias entry should be 1805 * added here to identify the device if needed. If a device isn't added, 1806 * the identity defaults to PX_CHIP_UNIDENTIFIED. 1807 */ 1808 static uint32_t 1809 px_identity_chip(px_t *px_p) 1810 { 1811 dev_info_t *dip = px_p->px_dip; 1812 char *name = ddi_binding_name(dip); 1813 uint32_t revision = 0; 1814 1815 revision = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1816 "module-revision#", 0); 1817 1818 /* Check for Fire driver binding name */ 1819 if ((strcmp(name, "pci108e,80f0") == 0) || 1820 (strcmp(name, "pciex108e,80f0") == 0)) { 1821 DBG(DBG_ATTACH, dip, "px_identity_chip: %s%d: " 1822 "name %s module-revision %d\n", ddi_driver_name(dip), 1823 ddi_get_instance(dip), name, revision); 1824 1825 return (PX_CHIP_ID(PX_CHIP_FIRE, revision, 0x00)); 1826 } 1827 1828 DBG(DBG_ATTACH, dip, "%s%d: Unknown PCI Express Host bridge %s %x\n", 1829 ddi_driver_name(dip), ddi_get_instance(dip), name, revision); 1830 1831 return (PX_CHIP_UNIDENTIFIED); 1832 } 1833 1834 int 1835 px_err_add_intr(px_fault_t *px_fault_p) 1836 { 1837 dev_info_t *dip = px_fault_p->px_fh_dip; 1838 px_t *px_p = DIP_TO_STATE(dip); 1839 1840 VERIFY(add_ivintr(px_fault_p->px_fh_sysino, PX_ERR_PIL, 1841 px_fault_p->px_err_func, (caddr_t)px_fault_p, NULL) == 0); 1842 1843 px_ib_intr_enable(px_p, intr_dist_cpuid(), px_fault_p->px_intr_ino); 1844 1845 return (DDI_SUCCESS); 1846 } 1847 1848 void 1849 px_err_rem_intr(px_fault_t *px_fault_p) 1850 { 1851 dev_info_t *dip = px_fault_p->px_fh_dip; 1852 px_t *px_p = DIP_TO_STATE(dip); 1853 1854 px_ib_intr_disable(px_p->px_ib_p, px_fault_p->px_intr_ino, 1855 IB_INTR_WAIT); 1856 1857 rem_ivintr(px_fault_p->px_fh_sysino, NULL); 1858 } 1859 1860 #ifdef FMA 1861 void 1862 px_fill_rc_status(px_fault_t *px_fault_p, pciex_rc_error_regs_t *rc_status) 1863 { 1864 /* populate the rc_status by reading the registers - TBD */ 1865 } 1866 #endif /* FMA */ 1867 1868 /* 1869 * Unprotected raw reads/writes of fabric device's config space. 1870 * Only used for temporary PCI-E Fabric Error Handling. 1871 */ 1872 uint32_t 1873 px_fab_get(px_t *px_p, pcie_req_id_t bdf, uint16_t offset) { 1874 px_ranges_t *rp = px_p->px_ranges_p; 1875 uint64_t range_prop, base_addr; 1876 int bank = PCI_REG_ADDR_G(PCI_ADDR_CONFIG); 1877 uint32_t val; 1878 1879 /* Get Fire's Physical Base Address */ 1880 range_prop = (((uint64_t)(rp[bank].parent_high & 0x7ff)) << 32) | 1881 rp[bank].parent_low; 1882 1883 /* Get config space first. */ 1884 base_addr = range_prop + PX_BDF_TO_CFGADDR(bdf, offset); 1885 1886 val = ldphysio(base_addr); 1887 1888 return (LE_32(val)); 1889 } 1890 1891 void 1892 px_fab_set(px_t *px_p, pcie_req_id_t bdf, uint16_t offset, 1893 uint32_t val) { 1894 px_ranges_t *rp = px_p->px_ranges_p; 1895 uint64_t range_prop, base_addr; 1896 int bank = PCI_REG_ADDR_G(PCI_ADDR_CONFIG); 1897 1898 /* Get Fire's Physical Base Address */ 1899 range_prop = (((uint64_t)(rp[bank].parent_high & 0x7ff)) << 32) | 1900 rp[bank].parent_low; 1901 1902 /* Get config space first. */ 1903 base_addr = range_prop + PX_BDF_TO_CFGADDR(bdf, offset); 1904 1905 stphysio(base_addr, LE_32(val)); 1906 } 1907 1908 /* 1909 * cpr callback 1910 * 1911 * disable fabric error msg interrupt prior to suspending 1912 * all device drivers; re-enable fabric error msg interrupt 1913 * after all devices are resumed. 1914 */ 1915 static boolean_t 1916 px_cpr_callb(void *arg, int code) 1917 { 1918 px_t *px_p = (px_t *)arg; 1919 px_ib_t *ib_p = px_p->px_ib_p; 1920 px_pec_t *pec_p = px_p->px_pec_p; 1921 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 1922 caddr_t csr_base; 1923 devino_t ce_ino, nf_ino, f_ino; 1924 px_ib_ino_info_t *ce_ino_p, *nf_ino_p, *f_ino_p; 1925 uint64_t imu_log_enable, imu_intr_enable; 1926 uint64_t imu_log_mask, imu_intr_mask; 1927 1928 ce_ino = px_msiqid_to_devino(px_p, pec_p->pec_corr_msg_msiq_id); 1929 nf_ino = px_msiqid_to_devino(px_p, pec_p->pec_non_fatal_msg_msiq_id); 1930 f_ino = px_msiqid_to_devino(px_p, pec_p->pec_fatal_msg_msiq_id); 1931 csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR]; 1932 1933 imu_log_enable = CSR_XR(csr_base, IMU_ERROR_LOG_ENABLE); 1934 imu_intr_enable = CSR_XR(csr_base, IMU_INTERRUPT_ENABLE); 1935 1936 imu_log_mask = BITMASK(IMU_ERROR_LOG_ENABLE_FATAL_MES_NOT_EN_LOG_EN) | 1937 BITMASK(IMU_ERROR_LOG_ENABLE_NONFATAL_MES_NOT_EN_LOG_EN) | 1938 BITMASK(IMU_ERROR_LOG_ENABLE_COR_MES_NOT_EN_LOG_EN); 1939 1940 imu_intr_mask = 1941 BITMASK(IMU_INTERRUPT_ENABLE_FATAL_MES_NOT_EN_S_INT_EN) | 1942 BITMASK(IMU_INTERRUPT_ENABLE_NONFATAL_MES_NOT_EN_S_INT_EN) | 1943 BITMASK(IMU_INTERRUPT_ENABLE_COR_MES_NOT_EN_S_INT_EN) | 1944 BITMASK(IMU_INTERRUPT_ENABLE_FATAL_MES_NOT_EN_P_INT_EN) | 1945 BITMASK(IMU_INTERRUPT_ENABLE_NONFATAL_MES_NOT_EN_P_INT_EN) | 1946 BITMASK(IMU_INTERRUPT_ENABLE_COR_MES_NOT_EN_P_INT_EN); 1947 1948 switch (code) { 1949 case CB_CODE_CPR_CHKPT: 1950 /* disable imu rbne on corr/nonfatal/fatal errors */ 1951 CSR_XS(csr_base, IMU_ERROR_LOG_ENABLE, 1952 imu_log_enable & (~imu_log_mask)); 1953 1954 CSR_XS(csr_base, IMU_INTERRUPT_ENABLE, 1955 imu_intr_enable & (~imu_intr_mask)); 1956 1957 /* disable CORR intr mapping */ 1958 px_ib_intr_disable(ib_p, ce_ino, IB_INTR_NOWAIT); 1959 1960 /* disable NON FATAL intr mapping */ 1961 px_ib_intr_disable(ib_p, nf_ino, IB_INTR_NOWAIT); 1962 1963 /* disable FATAL intr mapping */ 1964 px_ib_intr_disable(ib_p, f_ino, IB_INTR_NOWAIT); 1965 1966 break; 1967 1968 case CB_CODE_CPR_RESUME: 1969 mutex_enter(&ib_p->ib_ino_lst_mutex); 1970 1971 ce_ino_p = px_ib_locate_ino(ib_p, ce_ino); 1972 nf_ino_p = px_ib_locate_ino(ib_p, nf_ino); 1973 f_ino_p = px_ib_locate_ino(ib_p, f_ino); 1974 1975 /* enable CORR intr mapping */ 1976 if (ce_ino_p) 1977 px_ib_intr_enable(px_p, ce_ino_p->ino_cpuid, ce_ino); 1978 else 1979 cmn_err(CE_WARN, "px_cpr_callb: RESUME unable to " 1980 "reenable PCIe Correctable msg intr.\n"); 1981 1982 /* enable NON FATAL intr mapping */ 1983 if (nf_ino_p) 1984 px_ib_intr_enable(px_p, nf_ino_p->ino_cpuid, nf_ino); 1985 else 1986 cmn_err(CE_WARN, "px_cpr_callb: RESUME unable to " 1987 "reenable PCIe Non Fatal msg intr.\n"); 1988 1989 /* enable FATAL intr mapping */ 1990 if (f_ino_p) 1991 px_ib_intr_enable(px_p, f_ino_p->ino_cpuid, f_ino); 1992 else 1993 cmn_err(CE_WARN, "px_cpr_callb: RESUME unable to " 1994 "reenable PCIe Fatal msg intr.\n"); 1995 1996 mutex_exit(&ib_p->ib_ino_lst_mutex); 1997 1998 /* enable corr/nonfatal/fatal not enable error */ 1999 CSR_XS(csr_base, IMU_ERROR_LOG_ENABLE, (imu_log_enable | 2000 (imu_log_mask & px_imu_log_mask))); 2001 CSR_XS(csr_base, IMU_INTERRUPT_ENABLE, (imu_intr_enable | 2002 (imu_intr_mask & px_imu_intr_mask))); 2003 2004 break; 2005 } 2006 2007 return (B_TRUE); 2008 } 2009 2010 /* 2011 * add cpr callback 2012 */ 2013 void 2014 px_cpr_add_callb(px_t *px_p) 2015 { 2016 px_p->px_cprcb_id = callb_add(px_cpr_callb, (void *)px_p, 2017 CB_CL_CPR_POST_USER, "px_cpr"); 2018 } 2019 2020 /* 2021 * remove cpr callback 2022 */ 2023 void 2024 px_cpr_rem_callb(px_t *px_p) 2025 { 2026 (void) callb_delete(px_p->px_cprcb_id); 2027 } 2028