1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include <sys/types.h> 30 #include <sys/kmem.h> 31 #include <sys/conf.h> 32 #include <sys/ddi.h> 33 #include <sys/sunddi.h> 34 #include <sys/modctl.h> 35 #include <sys/disp.h> 36 #include <sys/stat.h> 37 #include <sys/ddi_impldefs.h> 38 #include <sys/vmem.h> 39 #include <sys/iommutsb.h> 40 #include <sys/cpuvar.h> 41 #include <px_obj.h> 42 #include <pcie_pwr.h> 43 #include "px_tools_var.h" 44 #include <px_regs.h> 45 #include <px_csr.h> 46 #include "px_lib4u.h" 47 48 #pragma weak jbus_stst_order 49 50 extern void jbus_stst_order(); 51 52 ulong_t px_mmu_dvma_end = 0xfffffffful; 53 uint_t px_ranges_phi_mask = 0xfffffffful; 54 55 static int px_goto_l23ready(px_t *px_p); 56 static uint32_t px_identity_chip(px_t *px_p); 57 58 int 59 px_lib_dev_init(dev_info_t *dip, devhandle_t *dev_hdl) 60 { 61 px_t *px_p = DIP_TO_STATE(dip); 62 caddr_t xbc_csr_base = (caddr_t)px_p->px_address[PX_REG_XBC]; 63 caddr_t csr_base = (caddr_t)px_p->px_address[PX_REG_CSR]; 64 px_dvma_range_prop_t px_dvma_range; 65 uint32_t chip_id; 66 pxu_t *pxu_p; 67 68 DBG(DBG_ATTACH, dip, "px_lib_dev_init: dip 0x%p\n", dip); 69 70 if ((chip_id = px_identity_chip(px_p)) == PX_CHIP_UNIDENTIFIED) 71 return (DDI_FAILURE); 72 73 switch (chip_id) { 74 case FIRE_VER_10: 75 DBG(DBG_ATTACH, dip, "FIRE Hardware Version 1.0\n"); 76 break; 77 case FIRE_VER_20: 78 DBG(DBG_ATTACH, dip, "FIRE Hardware Version 2.0\n"); 79 break; 80 default: 81 cmn_err(CE_WARN, "%s(%d): FIRE Hardware Version Unknown\n", 82 ddi_driver_name(dip), ddi_get_instance(dip)); 83 return (DDI_FAILURE); 84 } 85 86 /* 87 * Allocate platform specific structure and link it to 88 * the px state structure. 89 */ 90 pxu_p = kmem_zalloc(sizeof (pxu_t), KM_SLEEP); 91 92 pxu_p->chip_id = chip_id; 93 pxu_p->portid = ddi_getprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 94 "portid", -1); 95 96 /* 97 * XXX - Move all ddi_regs_map_setup() from px_util.c 98 * to to this file before complete virtualization. 99 */ 100 pxu_p->tsb_cookie = iommu_tsb_alloc(pxu_p->portid); 101 pxu_p->tsb_size = iommu_tsb_cookie_to_size(pxu_p->tsb_cookie); 102 pxu_p->tsb_vaddr = iommu_tsb_cookie_to_va(pxu_p->tsb_cookie); 103 104 /* 105 * Create "virtual-dma" property to support child devices 106 * needing to know DVMA range. 107 */ 108 px_dvma_range.dvma_base = (uint32_t)px_mmu_dvma_end + 1 109 - ((pxu_p->tsb_size >> 3) << MMU_PAGE_SHIFT); 110 px_dvma_range.dvma_len = (uint32_t) 111 px_mmu_dvma_end - px_dvma_range.dvma_base + 1; 112 113 (void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, 114 "virtual-dma", (caddr_t)&px_dvma_range, 115 sizeof (px_dvma_range_prop_t)); 116 /* 117 * Initilize all fire hardware specific blocks. 118 */ 119 hvio_cb_init(xbc_csr_base, pxu_p); 120 hvio_ib_init(csr_base, pxu_p); 121 hvio_pec_init(csr_base, pxu_p); 122 hvio_mmu_init(csr_base, pxu_p); 123 124 px_p->px_plat_p = (void *)pxu_p; 125 126 /* Initilize device handle */ 127 *dev_hdl = (devhandle_t)csr_base; 128 129 DBG(DBG_ATTACH, dip, "px_lib_dev_init: dev_hdl 0x%llx\n", *dev_hdl); 130 131 return (DDI_SUCCESS); 132 } 133 134 int 135 px_lib_dev_fini(dev_info_t *dip) 136 { 137 px_t *px_p = DIP_TO_STATE(dip); 138 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 139 140 DBG(DBG_DETACH, dip, "px_lib_dev_fini: dip 0x%p\n", dip); 141 142 iommu_tsb_free(pxu_p->tsb_cookie); 143 144 px_p->px_plat_p = NULL; 145 kmem_free(pxu_p, sizeof (pxu_t)); 146 147 return (DDI_SUCCESS); 148 } 149 150 /*ARGSUSED*/ 151 int 152 px_lib_intr_devino_to_sysino(dev_info_t *dip, devino_t devino, 153 sysino_t *sysino) 154 { 155 px_t *px_p = DIP_TO_STATE(dip); 156 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 157 uint64_t ret; 158 159 DBG(DBG_LIB_INT, dip, "px_lib_intr_devino_to_sysino: dip 0x%p " 160 "devino 0x%x\n", dip, devino); 161 162 if ((ret = hvio_intr_devino_to_sysino(DIP_TO_HANDLE(dip), 163 pxu_p, devino, sysino)) != H_EOK) { 164 DBG(DBG_LIB_INT, dip, 165 "hvio_intr_devino_to_sysino failed, ret 0x%lx\n", ret); 166 return (DDI_FAILURE); 167 } 168 169 DBG(DBG_LIB_INT, dip, "px_lib_intr_devino_to_sysino: sysino 0x%llx\n", 170 *sysino); 171 172 return (DDI_SUCCESS); 173 } 174 175 /*ARGSUSED*/ 176 int 177 px_lib_intr_getvalid(dev_info_t *dip, sysino_t sysino, 178 intr_valid_state_t *intr_valid_state) 179 { 180 uint64_t ret; 181 182 DBG(DBG_LIB_INT, dip, "px_lib_intr_getvalid: dip 0x%p sysino 0x%llx\n", 183 dip, sysino); 184 185 if ((ret = hvio_intr_getvalid(DIP_TO_HANDLE(dip), 186 sysino, intr_valid_state)) != H_EOK) { 187 DBG(DBG_LIB_INT, dip, "hvio_intr_getvalid failed, ret 0x%lx\n", 188 ret); 189 return (DDI_FAILURE); 190 } 191 192 DBG(DBG_LIB_INT, dip, "px_lib_intr_getvalid: intr_valid_state 0x%x\n", 193 *intr_valid_state); 194 195 return (DDI_SUCCESS); 196 } 197 198 /*ARGSUSED*/ 199 int 200 px_lib_intr_setvalid(dev_info_t *dip, sysino_t sysino, 201 intr_valid_state_t intr_valid_state) 202 { 203 uint64_t ret; 204 205 DBG(DBG_LIB_INT, dip, "px_lib_intr_setvalid: dip 0x%p sysino 0x%llx " 206 "intr_valid_state 0x%x\n", dip, sysino, intr_valid_state); 207 208 if ((ret = hvio_intr_setvalid(DIP_TO_HANDLE(dip), 209 sysino, intr_valid_state)) != H_EOK) { 210 DBG(DBG_LIB_INT, dip, "hvio_intr_setvalid failed, ret 0x%lx\n", 211 ret); 212 return (DDI_FAILURE); 213 } 214 215 return (DDI_SUCCESS); 216 } 217 218 /*ARGSUSED*/ 219 int 220 px_lib_intr_getstate(dev_info_t *dip, sysino_t sysino, 221 intr_state_t *intr_state) 222 { 223 uint64_t ret; 224 225 DBG(DBG_LIB_INT, dip, "px_lib_intr_getstate: dip 0x%p sysino 0x%llx\n", 226 dip, sysino); 227 228 if ((ret = hvio_intr_getstate(DIP_TO_HANDLE(dip), 229 sysino, intr_state)) != H_EOK) { 230 DBG(DBG_LIB_INT, dip, "hvio_intr_getstate failed, ret 0x%lx\n", 231 ret); 232 return (DDI_FAILURE); 233 } 234 235 DBG(DBG_LIB_INT, dip, "px_lib_intr_getstate: intr_state 0x%x\n", 236 *intr_state); 237 238 return (DDI_SUCCESS); 239 } 240 241 /*ARGSUSED*/ 242 int 243 px_lib_intr_setstate(dev_info_t *dip, sysino_t sysino, 244 intr_state_t intr_state) 245 { 246 uint64_t ret; 247 248 DBG(DBG_LIB_INT, dip, "px_lib_intr_setstate: dip 0x%p sysino 0x%llx " 249 "intr_state 0x%x\n", dip, sysino, intr_state); 250 251 if ((ret = hvio_intr_setstate(DIP_TO_HANDLE(dip), 252 sysino, intr_state)) != H_EOK) { 253 DBG(DBG_LIB_INT, dip, "hvio_intr_setstate failed, ret 0x%lx\n", 254 ret); 255 return (DDI_FAILURE); 256 } 257 258 return (DDI_SUCCESS); 259 } 260 261 /*ARGSUSED*/ 262 int 263 px_lib_intr_gettarget(dev_info_t *dip, sysino_t sysino, cpuid_t *cpuid) 264 { 265 uint64_t ret; 266 267 DBG(DBG_LIB_INT, dip, "px_lib_intr_gettarget: dip 0x%p sysino 0x%llx\n", 268 dip, sysino); 269 270 if ((ret = hvio_intr_gettarget(DIP_TO_HANDLE(dip), 271 sysino, cpuid)) != H_EOK) { 272 DBG(DBG_LIB_INT, dip, "hvio_intr_gettarget failed, ret 0x%lx\n", 273 ret); 274 return (DDI_FAILURE); 275 } 276 277 DBG(DBG_LIB_INT, dip, "px_lib_intr_gettarget: cpuid 0x%x\n", cpuid); 278 279 return (DDI_SUCCESS); 280 } 281 282 /*ARGSUSED*/ 283 int 284 px_lib_intr_settarget(dev_info_t *dip, sysino_t sysino, cpuid_t cpuid) 285 { 286 uint64_t ret; 287 288 DBG(DBG_LIB_INT, dip, "px_lib_intr_settarget: dip 0x%p sysino 0x%llx " 289 "cpuid 0x%x\n", dip, sysino, cpuid); 290 291 if ((ret = hvio_intr_settarget(DIP_TO_HANDLE(dip), 292 sysino, cpuid)) != H_EOK) { 293 DBG(DBG_LIB_INT, dip, "hvio_intr_settarget failed, ret 0x%lx\n", 294 ret); 295 return (DDI_FAILURE); 296 } 297 298 return (DDI_SUCCESS); 299 } 300 301 /*ARGSUSED*/ 302 int 303 px_lib_intr_reset(dev_info_t *dip) 304 { 305 devino_t ino; 306 sysino_t sysino; 307 308 DBG(DBG_LIB_INT, dip, "px_lib_intr_reset: dip 0x%p\n", dip); 309 310 /* Reset all Interrupts */ 311 for (ino = 0; ino < INTERRUPT_MAPPING_ENTRIES; ino++) { 312 if (px_lib_intr_devino_to_sysino(dip, ino, 313 &sysino) != DDI_SUCCESS) 314 return (BF_FATAL); 315 316 if (px_lib_intr_setstate(dip, sysino, 317 INTR_IDLE_STATE) != DDI_SUCCESS) 318 return (BF_FATAL); 319 } 320 321 return (BF_NONE); 322 } 323 324 /*ARGSUSED*/ 325 int 326 px_lib_iommu_map(dev_info_t *dip, tsbid_t tsbid, pages_t pages, 327 io_attributes_t io_attributes, void *addr, size_t pfn_index, 328 int flag) 329 { 330 px_t *px_p = DIP_TO_STATE(dip); 331 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 332 uint64_t ret; 333 334 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_map: dip 0x%p tsbid 0x%llx " 335 "pages 0x%x atrr 0x%x addr 0x%p pfn_index 0x%llx, flag 0x%x\n", 336 dip, tsbid, pages, io_attributes, addr, pfn_index, flag); 337 338 if ((ret = hvio_iommu_map(px_p->px_dev_hdl, pxu_p, tsbid, pages, 339 io_attributes, addr, pfn_index, flag)) != H_EOK) { 340 DBG(DBG_LIB_DMA, dip, 341 "px_lib_iommu_map failed, ret 0x%lx\n", ret); 342 return (DDI_FAILURE); 343 } 344 345 return (DDI_SUCCESS); 346 } 347 348 /*ARGSUSED*/ 349 int 350 px_lib_iommu_demap(dev_info_t *dip, tsbid_t tsbid, pages_t pages) 351 { 352 px_t *px_p = DIP_TO_STATE(dip); 353 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 354 uint64_t ret; 355 356 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_demap: dip 0x%p tsbid 0x%llx " 357 "pages 0x%x\n", dip, tsbid, pages); 358 359 if ((ret = hvio_iommu_demap(px_p->px_dev_hdl, pxu_p, tsbid, pages)) 360 != H_EOK) { 361 DBG(DBG_LIB_DMA, dip, 362 "px_lib_iommu_demap failed, ret 0x%lx\n", ret); 363 364 return (DDI_FAILURE); 365 } 366 367 return (DDI_SUCCESS); 368 } 369 370 /*ARGSUSED*/ 371 int 372 px_lib_iommu_getmap(dev_info_t *dip, tsbid_t tsbid, 373 io_attributes_t *attributes_p, r_addr_t *r_addr_p) 374 { 375 px_t *px_p = DIP_TO_STATE(dip); 376 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 377 uint64_t ret; 378 379 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getmap: dip 0x%p tsbid 0x%llx\n", 380 dip, tsbid); 381 382 if ((ret = hvio_iommu_getmap(DIP_TO_HANDLE(dip), pxu_p, tsbid, 383 attributes_p, r_addr_p)) != H_EOK) { 384 DBG(DBG_LIB_DMA, dip, 385 "hvio_iommu_getmap failed, ret 0x%lx\n", ret); 386 387 return ((ret == H_ENOMAP) ? DDI_DMA_NOMAPPING:DDI_FAILURE); 388 } 389 390 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getmap: attr 0x%x r_addr 0x%llx\n", 391 *attributes_p, *r_addr_p); 392 393 return (DDI_SUCCESS); 394 } 395 396 397 /* 398 * Checks dma attributes against system bypass ranges 399 * The bypass range is determined by the hardware. Return them so the 400 * common code can do generic checking against them. 401 */ 402 /*ARGSUSED*/ 403 int 404 px_lib_dma_bypass_rngchk(ddi_dma_attr_t *attrp, uint64_t *lo_p, uint64_t *hi_p) 405 { 406 *lo_p = MMU_BYPASS_BASE; 407 *hi_p = MMU_BYPASS_END; 408 409 return (DDI_SUCCESS); 410 } 411 412 413 /*ARGSUSED*/ 414 int 415 px_lib_iommu_getbypass(dev_info_t *dip, r_addr_t ra, 416 io_attributes_t io_attributes, io_addr_t *io_addr_p) 417 { 418 uint64_t ret; 419 420 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getbypass: dip 0x%p ra 0x%llx " 421 "attr 0x%x\n", dip, ra, io_attributes); 422 423 if ((ret = hvio_iommu_getbypass(DIP_TO_HANDLE(dip), ra, 424 io_attributes, io_addr_p)) != H_EOK) { 425 DBG(DBG_LIB_DMA, dip, 426 "hvio_iommu_getbypass failed, ret 0x%lx\n", ret); 427 return (DDI_FAILURE); 428 } 429 430 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getbypass: io_addr 0x%llx\n", 431 *io_addr_p); 432 433 return (DDI_SUCCESS); 434 } 435 436 /* 437 * bus dma sync entry point. 438 */ 439 /*ARGSUSED*/ 440 int 441 px_lib_dma_sync(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle, 442 off_t off, size_t len, uint_t cache_flags) 443 { 444 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 445 446 DBG(DBG_LIB_DMA, dip, "px_lib_dma_sync: dip 0x%p rdip 0x%p " 447 "handle 0x%llx off 0x%x len 0x%x flags 0x%x\n", 448 dip, rdip, handle, off, len, cache_flags); 449 450 /* 451 * jbus_stst_order is found only in certain cpu modules. 452 * Just return success if not present. 453 */ 454 if (&jbus_stst_order == NULL) 455 return (DDI_SUCCESS); 456 457 if (!(mp->dmai_flags & DMAI_FLAGS_INUSE)) { 458 cmn_err(CE_WARN, "Unbound dma handle %p from %s%d", (void *)mp, 459 ddi_driver_name(rdip), ddi_get_instance(rdip)); 460 return (DDI_FAILURE); 461 } 462 463 if (mp->dmai_flags & DMAI_FLAGS_NOSYNC) 464 return (DDI_SUCCESS); 465 466 /* 467 * No flush needed when sending data from memory to device. 468 * Nothing to do to "sync" memory to what device would already see. 469 */ 470 if (!(mp->dmai_rflags & DDI_DMA_READ) || 471 ((cache_flags & PX_DMA_SYNC_DDI_FLAGS) == DDI_DMA_SYNC_FORDEV)) 472 return (DDI_SUCCESS); 473 474 /* 475 * Perform necessary cpu workaround to ensure jbus ordering. 476 * CPU's internal "invalidate FIFOs" are flushed. 477 */ 478 479 #if !defined(lint) 480 kpreempt_disable(); 481 #endif 482 jbus_stst_order(); 483 #if !defined(lint) 484 kpreempt_enable(); 485 #endif 486 return (DDI_SUCCESS); 487 } 488 489 /* 490 * MSIQ Functions: 491 */ 492 /*ARGSUSED*/ 493 int 494 px_lib_msiq_init(dev_info_t *dip) 495 { 496 px_t *px_p = DIP_TO_STATE(dip); 497 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 498 px_msiq_state_t *msiq_state_p = &px_p->px_ib_p->ib_msiq_state; 499 caddr_t msiq_addr; 500 px_dvma_addr_t pg_index; 501 size_t size; 502 int ret; 503 504 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_init: dip 0x%p\n", dip); 505 506 /* 507 * Map the EQ memory into the Fire MMU (has to be 512KB aligned) 508 * and then initialize the base address register. 509 * 510 * Allocate entries from Fire IOMMU so that the resulting address 511 * is properly aligned. Calculate the index of the first allocated 512 * entry. Note: The size of the mapping is assumed to be a multiple 513 * of the page size. 514 */ 515 msiq_addr = (caddr_t)(((uint64_t)msiq_state_p->msiq_buf_p + 516 (MMU_PAGE_SIZE - 1)) >> MMU_PAGE_SHIFT << MMU_PAGE_SHIFT); 517 518 size = msiq_state_p->msiq_cnt * 519 msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t); 520 521 pxu_p->msiq_mapped_p = vmem_xalloc(px_p->px_mmu_p->mmu_dvma_map, 522 size, (512 * 1024), 0, 0, NULL, NULL, VM_NOSLEEP | VM_BESTFIT); 523 524 if (pxu_p->msiq_mapped_p == NULL) 525 return (DDI_FAILURE); 526 527 pg_index = MMU_PAGE_INDEX(px_p->px_mmu_p, 528 MMU_BTOP((ulong_t)pxu_p->msiq_mapped_p)); 529 530 if ((ret = px_lib_iommu_map(px_p->px_dip, PCI_TSBID(0, pg_index), 531 MMU_BTOP(size), PCI_MAP_ATTR_WRITE, (void *)msiq_addr, 0, 532 MMU_MAP_BUF)) != DDI_SUCCESS) { 533 DBG(DBG_LIB_MSIQ, dip, 534 "hvio_msiq_init failed, ret 0x%lx\n", ret); 535 536 (void) px_lib_msiq_fini(dip); 537 return (DDI_FAILURE); 538 } 539 540 (void) hvio_msiq_init(DIP_TO_HANDLE(dip), pxu_p); 541 542 return (DDI_SUCCESS); 543 } 544 545 /*ARGSUSED*/ 546 int 547 px_lib_msiq_fini(dev_info_t *dip) 548 { 549 px_t *px_p = DIP_TO_STATE(dip); 550 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 551 px_msiq_state_t *msiq_state_p = &px_p->px_ib_p->ib_msiq_state; 552 px_dvma_addr_t pg_index; 553 size_t size; 554 555 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_fini: dip 0x%p\n", dip); 556 557 /* 558 * Unmap and free the EQ memory that had been mapped 559 * into the Fire IOMMU. 560 */ 561 size = msiq_state_p->msiq_cnt * 562 msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t); 563 564 pg_index = MMU_PAGE_INDEX(px_p->px_mmu_p, 565 MMU_BTOP((ulong_t)pxu_p->msiq_mapped_p)); 566 567 (void) px_lib_iommu_demap(px_p->px_dip, 568 PCI_TSBID(0, pg_index), MMU_BTOP(size)); 569 570 /* Free the entries from the Fire MMU */ 571 vmem_xfree(px_p->px_mmu_p->mmu_dvma_map, 572 (void *)pxu_p->msiq_mapped_p, size); 573 574 return (DDI_SUCCESS); 575 } 576 577 /*ARGSUSED*/ 578 int 579 px_lib_msiq_info(dev_info_t *dip, msiqid_t msiq_id, r_addr_t *ra_p, 580 uint_t *msiq_rec_cnt_p) 581 { 582 px_t *px_p = DIP_TO_STATE(dip); 583 px_msiq_state_t *msiq_state_p = &px_p->px_ib_p->ib_msiq_state; 584 uint64_t *msiq_addr; 585 size_t msiq_size; 586 587 DBG(DBG_LIB_MSIQ, dip, "px_msiq_info: dip 0x%p msiq_id 0x%x\n", 588 dip, msiq_id); 589 590 msiq_addr = (uint64_t *)(((uint64_t)msiq_state_p->msiq_buf_p + 591 (MMU_PAGE_SIZE - 1)) >> MMU_PAGE_SHIFT << MMU_PAGE_SHIFT); 592 msiq_size = msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t); 593 ra_p = (r_addr_t *)((caddr_t)msiq_addr + (msiq_id * msiq_size)); 594 595 *msiq_rec_cnt_p = msiq_state_p->msiq_rec_cnt; 596 597 DBG(DBG_LIB_MSIQ, dip, "px_msiq_info: ra_p 0x%p msiq_rec_cnt 0x%x\n", 598 ra_p, *msiq_rec_cnt_p); 599 600 return (DDI_SUCCESS); 601 } 602 603 /*ARGSUSED*/ 604 int 605 px_lib_msiq_getvalid(dev_info_t *dip, msiqid_t msiq_id, 606 pci_msiq_valid_state_t *msiq_valid_state) 607 { 608 uint64_t ret; 609 610 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getvalid: dip 0x%p msiq_id 0x%x\n", 611 dip, msiq_id); 612 613 if ((ret = hvio_msiq_getvalid(DIP_TO_HANDLE(dip), 614 msiq_id, msiq_valid_state)) != H_EOK) { 615 DBG(DBG_LIB_MSIQ, dip, 616 "hvio_msiq_getvalid failed, ret 0x%lx\n", ret); 617 return (DDI_FAILURE); 618 } 619 620 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getvalid: msiq_valid_state 0x%x\n", 621 *msiq_valid_state); 622 623 return (DDI_SUCCESS); 624 } 625 626 /*ARGSUSED*/ 627 int 628 px_lib_msiq_setvalid(dev_info_t *dip, msiqid_t msiq_id, 629 pci_msiq_valid_state_t msiq_valid_state) 630 { 631 uint64_t ret; 632 633 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_setvalid: dip 0x%p msiq_id 0x%x " 634 "msiq_valid_state 0x%x\n", dip, msiq_id, msiq_valid_state); 635 636 if ((ret = hvio_msiq_setvalid(DIP_TO_HANDLE(dip), 637 msiq_id, msiq_valid_state)) != H_EOK) { 638 DBG(DBG_LIB_MSIQ, dip, 639 "hvio_msiq_setvalid failed, ret 0x%lx\n", ret); 640 return (DDI_FAILURE); 641 } 642 643 return (DDI_SUCCESS); 644 } 645 646 /*ARGSUSED*/ 647 int 648 px_lib_msiq_getstate(dev_info_t *dip, msiqid_t msiq_id, 649 pci_msiq_state_t *msiq_state) 650 { 651 uint64_t ret; 652 653 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getstate: dip 0x%p msiq_id 0x%x\n", 654 dip, msiq_id); 655 656 if ((ret = hvio_msiq_getstate(DIP_TO_HANDLE(dip), 657 msiq_id, msiq_state)) != H_EOK) { 658 DBG(DBG_LIB_MSIQ, dip, 659 "hvio_msiq_getstate failed, ret 0x%lx\n", ret); 660 return (DDI_FAILURE); 661 } 662 663 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getstate: msiq_state 0x%x\n", 664 *msiq_state); 665 666 return (DDI_SUCCESS); 667 } 668 669 /*ARGSUSED*/ 670 int 671 px_lib_msiq_setstate(dev_info_t *dip, msiqid_t msiq_id, 672 pci_msiq_state_t msiq_state) 673 { 674 uint64_t ret; 675 676 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_setstate: dip 0x%p msiq_id 0x%x " 677 "msiq_state 0x%x\n", dip, msiq_id, msiq_state); 678 679 if ((ret = hvio_msiq_setstate(DIP_TO_HANDLE(dip), 680 msiq_id, msiq_state)) != H_EOK) { 681 DBG(DBG_LIB_MSIQ, dip, 682 "hvio_msiq_setstate failed, ret 0x%lx\n", ret); 683 return (DDI_FAILURE); 684 } 685 686 return (DDI_SUCCESS); 687 } 688 689 /*ARGSUSED*/ 690 int 691 px_lib_msiq_gethead(dev_info_t *dip, msiqid_t msiq_id, 692 msiqhead_t *msiq_head) 693 { 694 uint64_t ret; 695 696 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gethead: dip 0x%p msiq_id 0x%x\n", 697 dip, msiq_id); 698 699 if ((ret = hvio_msiq_gethead(DIP_TO_HANDLE(dip), 700 msiq_id, msiq_head)) != H_EOK) { 701 DBG(DBG_LIB_MSIQ, dip, 702 "hvio_msiq_gethead failed, ret 0x%lx\n", ret); 703 return (DDI_FAILURE); 704 } 705 706 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gethead: msiq_head 0x%x\n", 707 *msiq_head); 708 709 return (DDI_SUCCESS); 710 } 711 712 /*ARGSUSED*/ 713 int 714 px_lib_msiq_sethead(dev_info_t *dip, msiqid_t msiq_id, 715 msiqhead_t msiq_head) 716 { 717 uint64_t ret; 718 719 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_sethead: dip 0x%p msiq_id 0x%x " 720 "msiq_head 0x%x\n", dip, msiq_id, msiq_head); 721 722 if ((ret = hvio_msiq_sethead(DIP_TO_HANDLE(dip), 723 msiq_id, msiq_head)) != H_EOK) { 724 DBG(DBG_LIB_MSIQ, dip, 725 "hvio_msiq_sethead failed, ret 0x%lx\n", ret); 726 return (DDI_FAILURE); 727 } 728 729 return (DDI_SUCCESS); 730 } 731 732 /*ARGSUSED*/ 733 int 734 px_lib_msiq_gettail(dev_info_t *dip, msiqid_t msiq_id, 735 msiqtail_t *msiq_tail) 736 { 737 uint64_t ret; 738 739 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gettail: dip 0x%p msiq_id 0x%x\n", 740 dip, msiq_id); 741 742 if ((ret = hvio_msiq_gettail(DIP_TO_HANDLE(dip), 743 msiq_id, msiq_tail)) != H_EOK) { 744 DBG(DBG_LIB_MSIQ, dip, 745 "hvio_msiq_gettail failed, ret 0x%lx\n", ret); 746 return (DDI_FAILURE); 747 } 748 749 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gettail: msiq_tail 0x%x\n", 750 *msiq_tail); 751 752 return (DDI_SUCCESS); 753 } 754 755 /*ARGSUSED*/ 756 void 757 px_lib_get_msiq_rec(dev_info_t *dip, px_msiq_t *msiq_p, msiq_rec_t *msiq_rec_p) 758 { 759 px_t *px_p = DIP_TO_STATE(dip); 760 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 761 eq_rec_t *eq_rec_p = (eq_rec_t *)msiq_p->msiq_curr; 762 763 DBG(DBG_LIB_MSIQ, dip, "px_lib_get_msiq_rec: dip 0x%p eq_rec_p 0x%p\n", 764 dip, eq_rec_p); 765 766 if (!eq_rec_p->eq_rec_rid) { 767 /* Set msiq_rec_rid to zero */ 768 msiq_rec_p->msiq_rec_rid = 0; 769 770 return; 771 } 772 773 DBG(DBG_LIB_MSIQ, dip, "px_lib_get_msiq_rec: EQ RECORD, " 774 "eq_rec_rid 0x%llx eq_rec_fmt_type 0x%llx " 775 "eq_rec_len 0x%llx eq_rec_addr0 0x%llx " 776 "eq_rec_addr1 0x%llx eq_rec_data0 0x%llx " 777 "eq_rec_data1 0x%llx\n", eq_rec_p->eq_rec_rid, 778 eq_rec_p->eq_rec_fmt_type, eq_rec_p->eq_rec_len, 779 eq_rec_p->eq_rec_addr0, eq_rec_p->eq_rec_addr1, 780 eq_rec_p->eq_rec_data0, eq_rec_p->eq_rec_data1); 781 782 /* 783 * Only upper 4 bits of eq_rec_fmt_type is used 784 * to identify the EQ record type. 785 */ 786 switch (eq_rec_p->eq_rec_fmt_type >> 3) { 787 case EQ_REC_MSI32: 788 msiq_rec_p->msiq_rec_type = MSI32_REC; 789 790 if (pxu_p->chip_id == FIRE_VER_10) { 791 msiq_rec_p->msiq_rec_data.msi.msi_data = 792 (eq_rec_p->eq_rec_data0 & 0xFF) << 8 | 793 (eq_rec_p->eq_rec_data0 & 0xFF00) >> 8; 794 } else { 795 /* Default case is FIRE2.0 */ 796 msiq_rec_p->msiq_rec_data.msi.msi_data = 797 eq_rec_p->eq_rec_data0; 798 } 799 800 break; 801 case EQ_REC_MSI64: 802 msiq_rec_p->msiq_rec_type = MSI64_REC; 803 804 if (pxu_p->chip_id == FIRE_VER_10) { 805 msiq_rec_p->msiq_rec_data.msi.msi_data = 806 (eq_rec_p->eq_rec_data0 & 0xFF) << 8 | 807 (eq_rec_p->eq_rec_data0 & 0xFF00) >> 8; 808 } else { 809 /* Default case is FIRE2.0 */ 810 msiq_rec_p->msiq_rec_data.msi.msi_data = 811 eq_rec_p->eq_rec_data0; 812 } 813 814 break; 815 case EQ_REC_MSG: 816 msiq_rec_p->msiq_rec_type = MSG_REC; 817 818 msiq_rec_p->msiq_rec_data.msg.msg_route = 819 eq_rec_p->eq_rec_fmt_type & 7; 820 msiq_rec_p->msiq_rec_data.msg.msg_targ = eq_rec_p->eq_rec_rid; 821 msiq_rec_p->msiq_rec_data.msg.msg_code = eq_rec_p->eq_rec_data0; 822 break; 823 default: 824 cmn_err(CE_WARN, "%s%d: px_lib_get_msiq_rec: " 825 "0x%lx is an unknown EQ record type", 826 ddi_driver_name(dip), ddi_get_instance(dip), 827 eq_rec_p->eq_rec_fmt_type); 828 break; 829 } 830 831 msiq_rec_p->msiq_rec_rid = eq_rec_p->eq_rec_rid; 832 msiq_rec_p->msiq_rec_msi_addr = ((eq_rec_p->eq_rec_addr1 << 16) | 833 (eq_rec_p->eq_rec_addr0 << 2)); 834 835 /* Zero out eq_rec_rid field */ 836 eq_rec_p->eq_rec_rid = 0; 837 } 838 839 /* 840 * MSI Functions: 841 */ 842 /*ARGSUSED*/ 843 int 844 px_lib_msi_init(dev_info_t *dip) 845 { 846 px_t *px_p = DIP_TO_STATE(dip); 847 px_msi_state_t *msi_state_p = &px_p->px_ib_p->ib_msi_state; 848 uint64_t ret; 849 850 DBG(DBG_LIB_MSI, dip, "px_lib_msi_init: dip 0x%p\n", dip); 851 852 if ((ret = hvio_msi_init(DIP_TO_HANDLE(dip), 853 msi_state_p->msi_addr32, msi_state_p->msi_addr64)) != H_EOK) { 854 DBG(DBG_LIB_MSIQ, dip, "px_lib_msi_init failed, ret 0x%lx\n", 855 ret); 856 return (DDI_FAILURE); 857 } 858 859 return (DDI_SUCCESS); 860 } 861 862 /*ARGSUSED*/ 863 int 864 px_lib_msi_getmsiq(dev_info_t *dip, msinum_t msi_num, 865 msiqid_t *msiq_id) 866 { 867 uint64_t ret; 868 869 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getmsiq: dip 0x%p msi_num 0x%x\n", 870 dip, msi_num); 871 872 if ((ret = hvio_msi_getmsiq(DIP_TO_HANDLE(dip), 873 msi_num, msiq_id)) != H_EOK) { 874 DBG(DBG_LIB_MSI, dip, 875 "hvio_msi_getmsiq failed, ret 0x%lx\n", ret); 876 return (DDI_FAILURE); 877 } 878 879 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getmsiq: msiq_id 0x%x\n", 880 *msiq_id); 881 882 return (DDI_SUCCESS); 883 } 884 885 /*ARGSUSED*/ 886 int 887 px_lib_msi_setmsiq(dev_info_t *dip, msinum_t msi_num, 888 msiqid_t msiq_id, msi_type_t msitype) 889 { 890 uint64_t ret; 891 892 DBG(DBG_LIB_MSI, dip, "px_lib_msi_setmsiq: dip 0x%p msi_num 0x%x " 893 "msq_id 0x%x\n", dip, msi_num, msiq_id); 894 895 if ((ret = hvio_msi_setmsiq(DIP_TO_HANDLE(dip), 896 msi_num, msiq_id)) != H_EOK) { 897 DBG(DBG_LIB_MSI, dip, 898 "hvio_msi_setmsiq failed, ret 0x%lx\n", ret); 899 return (DDI_FAILURE); 900 } 901 902 return (DDI_SUCCESS); 903 } 904 905 /*ARGSUSED*/ 906 int 907 px_lib_msi_getvalid(dev_info_t *dip, msinum_t msi_num, 908 pci_msi_valid_state_t *msi_valid_state) 909 { 910 uint64_t ret; 911 912 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getvalid: dip 0x%p msi_num 0x%x\n", 913 dip, msi_num); 914 915 if ((ret = hvio_msi_getvalid(DIP_TO_HANDLE(dip), 916 msi_num, msi_valid_state)) != H_EOK) { 917 DBG(DBG_LIB_MSI, dip, 918 "hvio_msi_getvalid failed, ret 0x%lx\n", ret); 919 return (DDI_FAILURE); 920 } 921 922 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getvalid: msiq_id 0x%x\n", 923 *msi_valid_state); 924 925 return (DDI_SUCCESS); 926 } 927 928 /*ARGSUSED*/ 929 int 930 px_lib_msi_setvalid(dev_info_t *dip, msinum_t msi_num, 931 pci_msi_valid_state_t msi_valid_state) 932 { 933 uint64_t ret; 934 935 DBG(DBG_LIB_MSI, dip, "px_lib_msi_setvalid: dip 0x%p msi_num 0x%x " 936 "msi_valid_state 0x%x\n", dip, msi_num, msi_valid_state); 937 938 if ((ret = hvio_msi_setvalid(DIP_TO_HANDLE(dip), 939 msi_num, msi_valid_state)) != H_EOK) { 940 DBG(DBG_LIB_MSI, dip, 941 "hvio_msi_setvalid failed, ret 0x%lx\n", ret); 942 return (DDI_FAILURE); 943 } 944 945 return (DDI_SUCCESS); 946 } 947 948 /*ARGSUSED*/ 949 int 950 px_lib_msi_getstate(dev_info_t *dip, msinum_t msi_num, 951 pci_msi_state_t *msi_state) 952 { 953 uint64_t ret; 954 955 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getstate: dip 0x%p msi_num 0x%x\n", 956 dip, msi_num); 957 958 if ((ret = hvio_msi_getstate(DIP_TO_HANDLE(dip), 959 msi_num, msi_state)) != H_EOK) { 960 DBG(DBG_LIB_MSI, dip, 961 "hvio_msi_getstate failed, ret 0x%lx\n", ret); 962 return (DDI_FAILURE); 963 } 964 965 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getstate: msi_state 0x%x\n", 966 *msi_state); 967 968 return (DDI_SUCCESS); 969 } 970 971 /*ARGSUSED*/ 972 int 973 px_lib_msi_setstate(dev_info_t *dip, msinum_t msi_num, 974 pci_msi_state_t msi_state) 975 { 976 uint64_t ret; 977 978 DBG(DBG_LIB_MSI, dip, "px_lib_msi_setstate: dip 0x%p msi_num 0x%x " 979 "msi_state 0x%x\n", dip, msi_num, msi_state); 980 981 if ((ret = hvio_msi_setstate(DIP_TO_HANDLE(dip), 982 msi_num, msi_state)) != H_EOK) { 983 DBG(DBG_LIB_MSI, dip, 984 "hvio_msi_setstate failed, ret 0x%lx\n", ret); 985 return (DDI_FAILURE); 986 } 987 988 return (DDI_SUCCESS); 989 } 990 991 /* 992 * MSG Functions: 993 */ 994 /*ARGSUSED*/ 995 int 996 px_lib_msg_getmsiq(dev_info_t *dip, pcie_msg_type_t msg_type, 997 msiqid_t *msiq_id) 998 { 999 uint64_t ret; 1000 1001 DBG(DBG_LIB_MSG, dip, "px_lib_msg_getmsiq: dip 0x%p msg_type 0x%x\n", 1002 dip, msg_type); 1003 1004 if ((ret = hvio_msg_getmsiq(DIP_TO_HANDLE(dip), 1005 msg_type, msiq_id)) != H_EOK) { 1006 DBG(DBG_LIB_MSG, dip, 1007 "hvio_msg_getmsiq failed, ret 0x%lx\n", ret); 1008 return (DDI_FAILURE); 1009 } 1010 1011 DBG(DBG_LIB_MSI, dip, "px_lib_msg_getmsiq: msiq_id 0x%x\n", 1012 *msiq_id); 1013 1014 return (DDI_SUCCESS); 1015 } 1016 1017 /*ARGSUSED*/ 1018 int 1019 px_lib_msg_setmsiq(dev_info_t *dip, pcie_msg_type_t msg_type, 1020 msiqid_t msiq_id) 1021 { 1022 uint64_t ret; 1023 1024 DBG(DBG_LIB_MSG, dip, "px_lib_msi_setstate: dip 0x%p msg_type 0x%x " 1025 "msiq_id 0x%x\n", dip, msg_type, msiq_id); 1026 1027 if ((ret = hvio_msg_setmsiq(DIP_TO_HANDLE(dip), 1028 msg_type, msiq_id)) != H_EOK) { 1029 DBG(DBG_LIB_MSG, dip, 1030 "hvio_msg_setmsiq failed, ret 0x%lx\n", ret); 1031 return (DDI_FAILURE); 1032 } 1033 1034 return (DDI_SUCCESS); 1035 } 1036 1037 /*ARGSUSED*/ 1038 int 1039 px_lib_msg_getvalid(dev_info_t *dip, pcie_msg_type_t msg_type, 1040 pcie_msg_valid_state_t *msg_valid_state) 1041 { 1042 uint64_t ret; 1043 1044 DBG(DBG_LIB_MSG, dip, "px_lib_msg_getvalid: dip 0x%p msg_type 0x%x\n", 1045 dip, msg_type); 1046 1047 if ((ret = hvio_msg_getvalid(DIP_TO_HANDLE(dip), msg_type, 1048 msg_valid_state)) != H_EOK) { 1049 DBG(DBG_LIB_MSG, dip, 1050 "hvio_msg_getvalid failed, ret 0x%lx\n", ret); 1051 return (DDI_FAILURE); 1052 } 1053 1054 DBG(DBG_LIB_MSI, dip, "px_lib_msg_getvalid: msg_valid_state 0x%x\n", 1055 *msg_valid_state); 1056 1057 return (DDI_SUCCESS); 1058 } 1059 1060 /*ARGSUSED*/ 1061 int 1062 px_lib_msg_setvalid(dev_info_t *dip, pcie_msg_type_t msg_type, 1063 pcie_msg_valid_state_t msg_valid_state) 1064 { 1065 uint64_t ret; 1066 1067 DBG(DBG_LIB_MSG, dip, "px_lib_msg_setvalid: dip 0x%p msg_type 0x%x " 1068 "msg_valid_state 0x%x\n", dip, msg_type, msg_valid_state); 1069 1070 if ((ret = hvio_msg_setvalid(DIP_TO_HANDLE(dip), msg_type, 1071 msg_valid_state)) != H_EOK) { 1072 DBG(DBG_LIB_MSG, dip, 1073 "hvio_msg_setvalid failed, ret 0x%lx\n", ret); 1074 return (DDI_FAILURE); 1075 } 1076 1077 return (DDI_SUCCESS); 1078 } 1079 1080 /* 1081 * Suspend/Resume Functions: 1082 * Currently unsupported by hypervisor 1083 */ 1084 int 1085 px_lib_suspend(dev_info_t *dip) 1086 { 1087 px_t *px_p = DIP_TO_STATE(dip); 1088 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 1089 devhandle_t dev_hdl, xbus_dev_hdl; 1090 uint64_t ret; 1091 1092 DBG(DBG_DETACH, dip, "px_lib_suspend: dip 0x%p\n", dip); 1093 1094 dev_hdl = (devhandle_t)px_p->px_address[PX_REG_CSR]; 1095 xbus_dev_hdl = (devhandle_t)px_p->px_address[PX_REG_XBC]; 1096 1097 if ((ret = hvio_suspend(dev_hdl, pxu_p)) == H_EOK) { 1098 px_p->px_cb_p->xbc_attachcnt--; 1099 if (px_p->px_cb_p->xbc_attachcnt == 0) 1100 if ((ret = hvio_cb_suspend(xbus_dev_hdl, pxu_p)) 1101 != H_EOK) 1102 px_p->px_cb_p->xbc_attachcnt++; 1103 } 1104 1105 return ((ret != H_EOK) ? DDI_FAILURE: DDI_SUCCESS); 1106 } 1107 1108 void 1109 px_lib_resume(dev_info_t *dip) 1110 { 1111 px_t *px_p = DIP_TO_STATE(dip); 1112 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 1113 devhandle_t dev_hdl, xbus_dev_hdl; 1114 devino_t pec_ino = px_p->px_inos[PX_INTR_PEC]; 1115 devino_t xbc_ino = px_p->px_inos[PX_INTR_XBC]; 1116 1117 DBG(DBG_ATTACH, dip, "px_lib_resume: dip 0x%p\n", dip); 1118 1119 dev_hdl = (devhandle_t)px_p->px_address[PX_REG_CSR]; 1120 xbus_dev_hdl = (devhandle_t)px_p->px_address[PX_REG_XBC]; 1121 1122 px_p->px_cb_p->xbc_attachcnt++; 1123 if (px_p->px_cb_p->xbc_attachcnt == 1) 1124 hvio_cb_resume(dev_hdl, xbus_dev_hdl, xbc_ino, pxu_p); 1125 hvio_resume(dev_hdl, pec_ino, pxu_p); 1126 } 1127 1128 /* 1129 * PCI tool Functions: 1130 * Currently unsupported by hypervisor 1131 */ 1132 /*ARGSUSED*/ 1133 int 1134 px_lib_tools_dev_reg_ops(dev_info_t *dip, void *arg, int cmd, int mode) 1135 { 1136 px_t *px_p = DIP_TO_STATE(dip); 1137 1138 DBG(DBG_TOOLS, dip, "px_lib_tools_dev_reg_ops: dip 0x%p arg 0x%p " 1139 "cmd 0x%x mode 0x%x\n", dip, arg, cmd, mode); 1140 1141 return (px_dev_reg_ops(dip, arg, cmd, mode, px_p)); 1142 } 1143 1144 /*ARGSUSED*/ 1145 int 1146 px_lib_tools_bus_reg_ops(dev_info_t *dip, void *arg, int cmd, int mode) 1147 { 1148 DBG(DBG_TOOLS, dip, "px_lib_tools_bus_reg_ops: dip 0x%p arg 0x%p " 1149 "cmd 0x%x mode 0x%x\n", dip, arg, cmd, mode); 1150 1151 return (px_bus_reg_ops(dip, arg, cmd, mode)); 1152 } 1153 1154 /*ARGSUSED*/ 1155 int 1156 px_lib_tools_intr_admn(dev_info_t *dip, void *arg, int cmd, int mode) 1157 { 1158 px_t *px_p = DIP_TO_STATE(dip); 1159 1160 DBG(DBG_TOOLS, dip, "px_lib_tools_intr_admn: dip 0x%p arg 0x%p " 1161 "cmd 0x%x mode 0x%x\n", dip, arg, cmd, mode); 1162 1163 return (px_intr_admn(dip, arg, cmd, mode, px_p)); 1164 } 1165 1166 /* 1167 * Misc Functions: 1168 * Currently unsupported by hypervisor 1169 */ 1170 uint64_t 1171 px_lib_get_cb(caddr_t csr) 1172 { 1173 return (CSR_XR(csr, JBUS_SCRATCH_1)); 1174 } 1175 1176 void 1177 px_lib_set_cb(caddr_t csr, uint64_t val) 1178 { 1179 CSR_XS(csr, JBUS_SCRATCH_1, val); 1180 } 1181 1182 /*ARGSUSED*/ 1183 int 1184 px_lib_map_vconfig(dev_info_t *dip, 1185 ddi_map_req_t *mp, pci_config_offset_t off, 1186 pci_regspec_t *rp, caddr_t *addrp) 1187 { 1188 /* 1189 * No special config space access services in this layer. 1190 */ 1191 return (DDI_FAILURE); 1192 } 1193 1194 #ifdef DEBUG 1195 int px_peekfault_cnt = 0; 1196 int px_pokefault_cnt = 0; 1197 #endif /* DEBUG */ 1198 1199 /*ARGSUSED*/ 1200 static int 1201 px_lib_do_poke(dev_info_t *dip, dev_info_t *rdip, 1202 peekpoke_ctlops_t *in_args) 1203 { 1204 px_t *px_p = DIP_TO_STATE(dip); 1205 px_pec_t *pec_p = px_p->px_pec_p; 1206 int err = DDI_SUCCESS; 1207 on_trap_data_t otd; 1208 1209 mutex_enter(&pec_p->pec_pokefault_mutex); 1210 pec_p->pec_ontrap_data = &otd; 1211 1212 /* Set up protected environment. */ 1213 if (!on_trap(&otd, OT_DATA_ACCESS)) { 1214 uintptr_t tramp = otd.ot_trampoline; 1215 1216 otd.ot_trampoline = (uintptr_t)&poke_fault; 1217 err = do_poke(in_args->size, (void *)in_args->dev_addr, 1218 (void *)in_args->host_addr); 1219 otd.ot_trampoline = tramp; 1220 } else 1221 err = DDI_FAILURE; 1222 1223 /* 1224 * Read the async fault register for the PEC to see it sees 1225 * a master-abort. 1226 * 1227 * XXX check if we need to clear errors at this point. 1228 */ 1229 if (otd.ot_trap & OT_DATA_ACCESS) 1230 err = DDI_FAILURE; 1231 1232 /* Take down protected environment. */ 1233 no_trap(); 1234 1235 pec_p->pec_ontrap_data = NULL; 1236 mutex_exit(&pec_p->pec_pokefault_mutex); 1237 1238 #ifdef DEBUG 1239 if (err == DDI_FAILURE) 1240 px_pokefault_cnt++; 1241 #endif 1242 return (err); 1243 } 1244 1245 /*ARGSUSED*/ 1246 static int 1247 px_lib_do_caut_put(dev_info_t *dip, dev_info_t *rdip, 1248 peekpoke_ctlops_t *cautacc_ctlops_arg) 1249 { 1250 size_t size = cautacc_ctlops_arg->size; 1251 uintptr_t dev_addr = cautacc_ctlops_arg->dev_addr; 1252 uintptr_t host_addr = cautacc_ctlops_arg->host_addr; 1253 ddi_acc_impl_t *hp = (ddi_acc_impl_t *)cautacc_ctlops_arg->handle; 1254 size_t repcount = cautacc_ctlops_arg->repcount; 1255 uint_t flags = cautacc_ctlops_arg->flags; 1256 1257 px_t *px_p = DIP_TO_STATE(dip); 1258 px_pec_t *pec_p = px_p->px_pec_p; 1259 int err = DDI_SUCCESS; 1260 1261 /* Use ontrap data in handle set up by FMA */ 1262 pec_p->pec_ontrap_data = (on_trap_data_t *)hp->ahi_err->err_ontrap; 1263 1264 hp->ahi_err->err_expected = DDI_FM_ERR_EXPECTED; 1265 i_ndi_busop_access_enter(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp); 1266 1267 mutex_enter(&pec_p->pec_pokefault_mutex); 1268 1269 if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) { 1270 for (; repcount; repcount--) { 1271 switch (size) { 1272 1273 case sizeof (uint8_t): 1274 i_ddi_put8(hp, (uint8_t *)dev_addr, 1275 *(uint8_t *)host_addr); 1276 break; 1277 1278 case sizeof (uint16_t): 1279 i_ddi_put16(hp, (uint16_t *)dev_addr, 1280 *(uint16_t *)host_addr); 1281 break; 1282 1283 case sizeof (uint32_t): 1284 i_ddi_put32(hp, (uint32_t *)dev_addr, 1285 *(uint32_t *)host_addr); 1286 break; 1287 1288 case sizeof (uint64_t): 1289 i_ddi_put64(hp, (uint64_t *)dev_addr, 1290 *(uint64_t *)host_addr); 1291 break; 1292 } 1293 1294 host_addr += size; 1295 1296 if (flags == DDI_DEV_AUTOINCR) 1297 dev_addr += size; 1298 1299 /* 1300 * Read the async fault register for the PEC to see it 1301 * sees a master-abort. 1302 * 1303 * XXX check if we need to clear errors at this point. 1304 */ 1305 if (pec_p->pec_ontrap_data->ot_trap & OT_DATA_ACCESS) { 1306 err = DDI_FAILURE; 1307 #ifdef DEBUG 1308 px_pokefault_cnt++; 1309 #endif 1310 break; 1311 } 1312 } 1313 } 1314 1315 i_ddi_notrap((ddi_acc_handle_t)hp); 1316 pec_p->pec_ontrap_data = NULL; 1317 mutex_exit(&pec_p->pec_pokefault_mutex); 1318 i_ndi_busop_access_exit(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp); 1319 hp->ahi_err->err_expected = DDI_FM_ERR_UNEXPECTED; 1320 1321 return (err); 1322 } 1323 1324 1325 int 1326 px_lib_ctlops_poke(dev_info_t *dip, dev_info_t *rdip, 1327 peekpoke_ctlops_t *in_args) 1328 { 1329 return (in_args->handle ? px_lib_do_caut_put(dip, rdip, in_args) : 1330 px_lib_do_poke(dip, rdip, in_args)); 1331 } 1332 1333 1334 /*ARGSUSED*/ 1335 static int 1336 px_lib_do_peek(dev_info_t *dip, peekpoke_ctlops_t *in_args) 1337 { 1338 int err = DDI_SUCCESS; 1339 on_trap_data_t otd; 1340 1341 if (!on_trap(&otd, OT_DATA_ACCESS)) { 1342 uintptr_t tramp = otd.ot_trampoline; 1343 1344 otd.ot_trampoline = (uintptr_t)&peek_fault; 1345 err = do_peek(in_args->size, (void *)in_args->dev_addr, 1346 (void *)in_args->host_addr); 1347 otd.ot_trampoline = tramp; 1348 } else 1349 err = DDI_FAILURE; 1350 1351 no_trap(); 1352 1353 #ifdef DEBUG 1354 if (err == DDI_FAILURE) 1355 px_peekfault_cnt++; 1356 #endif 1357 return (err); 1358 } 1359 1360 1361 static int 1362 px_lib_do_caut_get(dev_info_t *dip, peekpoke_ctlops_t *cautacc_ctlops_arg) 1363 { 1364 size_t size = cautacc_ctlops_arg->size; 1365 uintptr_t dev_addr = cautacc_ctlops_arg->dev_addr; 1366 uintptr_t host_addr = cautacc_ctlops_arg->host_addr; 1367 ddi_acc_impl_t *hp = (ddi_acc_impl_t *)cautacc_ctlops_arg->handle; 1368 size_t repcount = cautacc_ctlops_arg->repcount; 1369 uint_t flags = cautacc_ctlops_arg->flags; 1370 1371 px_t *px_p = DIP_TO_STATE(dip); 1372 px_pec_t *pec_p = px_p->px_pec_p; 1373 int err = DDI_SUCCESS; 1374 1375 hp->ahi_err->err_expected = DDI_FM_ERR_EXPECTED; 1376 i_ndi_busop_access_enter(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp); 1377 1378 if (repcount == 1) { 1379 if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) { 1380 i_ddi_caut_get(size, (void *)dev_addr, 1381 (void *)host_addr); 1382 } else { 1383 int i; 1384 uint8_t *ff_addr = (uint8_t *)host_addr; 1385 for (i = 0; i < size; i++) 1386 *ff_addr++ = 0xff; 1387 1388 err = DDI_FAILURE; 1389 #ifdef DEBUG 1390 px_peekfault_cnt++; 1391 #endif 1392 } 1393 } else { 1394 if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) { 1395 for (; repcount; repcount--) { 1396 i_ddi_caut_get(size, (void *)dev_addr, 1397 (void *)host_addr); 1398 1399 host_addr += size; 1400 1401 if (flags == DDI_DEV_AUTOINCR) 1402 dev_addr += size; 1403 } 1404 } else { 1405 err = DDI_FAILURE; 1406 #ifdef DEBUG 1407 px_peekfault_cnt++; 1408 #endif 1409 } 1410 } 1411 1412 i_ddi_notrap((ddi_acc_handle_t)hp); 1413 pec_p->pec_ontrap_data = NULL; 1414 i_ndi_busop_access_exit(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp); 1415 hp->ahi_err->err_expected = DDI_FM_ERR_UNEXPECTED; 1416 1417 return (err); 1418 } 1419 1420 /*ARGSUSED*/ 1421 int 1422 px_lib_ctlops_peek(dev_info_t *dip, dev_info_t *rdip, 1423 peekpoke_ctlops_t *in_args, void *result) 1424 { 1425 result = (void *)in_args->host_addr; 1426 return (in_args->handle ? px_lib_do_caut_get(dip, in_args) : 1427 px_lib_do_peek(dip, in_args)); 1428 } 1429 /* 1430 * implements PPM interface 1431 */ 1432 int 1433 px_lib_pmctl(int cmd, px_t *px_p) 1434 { 1435 ASSERT((cmd & ~PPMREQ_MASK) == PPMREQ); 1436 switch (cmd) { 1437 case PPMREQ_PRE_PWR_OFF: 1438 /* 1439 * Currently there is no device power management for 1440 * the root complex (fire). When there is we need to make 1441 * sure that it is at full power before trying to send the 1442 * PME_Turn_Off message. 1443 */ 1444 DBG(DBG_PWR, px_p->px_dip, 1445 "ioctl: request to send PME_Turn_Off\n"); 1446 return (px_goto_l23ready(px_p)); 1447 1448 case PPMREQ_PRE_PWR_ON: 1449 case PPMREQ_POST_PWR_ON: 1450 /* code to be written for Fire 2.0. return failure for now */ 1451 return (DDI_FAILURE); 1452 1453 default: 1454 return (DDI_FAILURE); 1455 } 1456 } 1457 1458 /* 1459 * sends PME_Turn_Off message to put the link in L2/L3 ready state. 1460 * called by px_ioctl. 1461 * returns DDI_SUCCESS or DDI_FAILURE 1462 * 1. Wait for link to be in L1 state (link status reg) 1463 * 2. write to PME_Turn_off reg to boradcast 1464 * 3. set timeout 1465 * 4. If timeout, return failure. 1466 * 5. If PM_TO_Ack, wait till link is in L2/L3 ready 1467 */ 1468 static int 1469 px_goto_l23ready(px_t *px_p) 1470 { 1471 pcie_pwr_t *pwr_p; 1472 caddr_t csr_base = (caddr_t)px_p->px_address[PX_REG_CSR]; 1473 int ret = DDI_SUCCESS; 1474 clock_t end, timeleft; 1475 1476 /* If no PM info, return failure */ 1477 if (!PCIE_PMINFO(px_p->px_dip) || 1478 !(pwr_p = PCIE_NEXUS_PMINFO(px_p->px_dip))) 1479 return (DDI_FAILURE); 1480 1481 mutex_enter(&pwr_p->pwr_lock); 1482 mutex_enter(&pwr_p->pwr_intr_lock); 1483 /* Clear the PME_To_ACK receieved flag */ 1484 pwr_p->pwr_flags &= ~PCIE_PMETOACK_RECVD; 1485 if (px_send_pme_turnoff(csr_base) != DDI_SUCCESS) { 1486 ret = DDI_FAILURE; 1487 goto l23ready_done; 1488 } 1489 pwr_p->pwr_flags |= PCIE_PME_TURNOFF_PENDING; 1490 1491 end = ddi_get_lbolt() + drv_usectohz(px_pme_to_ack_timeout); 1492 while (!(pwr_p->pwr_flags & PCIE_PMETOACK_RECVD)) { 1493 timeleft = cv_timedwait(&pwr_p->pwr_cv, 1494 &pwr_p->pwr_intr_lock, end); 1495 /* 1496 * if cv_timedwait returns -1, it is either 1497 * 1) timed out or 1498 * 2) there was a pre-mature wakeup but by the time 1499 * cv_timedwait is called again end < lbolt i.e. 1500 * end is in the past. 1501 * 3) By the time we make first cv_timedwait call, 1502 * end < lbolt is true. 1503 */ 1504 if (timeleft == -1) 1505 break; 1506 } 1507 if (!(pwr_p->pwr_flags & PCIE_PMETOACK_RECVD)) { 1508 /* 1509 * Either timedout or interrupt didn't get a 1510 * chance to grab the mutex and set the flag. 1511 * release the mutex and delay for sometime. 1512 * This will 1) give a chance for interrupt to 1513 * set the flag 2) creates a delay between two 1514 * consequetive requests. 1515 */ 1516 mutex_exit(&pwr_p->pwr_intr_lock); 1517 delay(5); 1518 mutex_enter(&pwr_p->pwr_intr_lock); 1519 if (!(pwr_p->pwr_flags & PCIE_PMETOACK_RECVD)) { 1520 ret = DDI_FAILURE; 1521 DBG(DBG_PWR, px_p->px_dip, " Timed out while waiting" 1522 " for PME_TO_ACK\n"); 1523 } 1524 } 1525 /* PME_To_ACK receieved */ 1526 pwr_p->pwr_flags &= ~(PCIE_PME_TURNOFF_PENDING | PCIE_PMETOACK_RECVD); 1527 1528 /* TBD: wait till link is in L2/L3 ready (link status reg) */ 1529 1530 l23ready_done: 1531 mutex_exit(&pwr_p->pwr_intr_lock); 1532 mutex_exit(&pwr_p->pwr_lock); 1533 return (ret); 1534 } 1535 1536 1537 /* 1538 * Extract the drivers binding name to identify which chip we're binding to. 1539 * Whenever a new bus bridge is created, the driver alias entry should be 1540 * added here to identify the device if needed. If a device isn't added, 1541 * the identity defaults to PX_CHIP_UNIDENTIFIED. 1542 */ 1543 static uint32_t 1544 px_identity_chip(px_t *px_p) 1545 { 1546 dev_info_t *dip = px_p->px_dip; 1547 char *name = ddi_binding_name(dip); 1548 uint32_t revision = 0; 1549 1550 revision = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1551 "module-revision#", 0); 1552 1553 /* Check for Fire driver binding name */ 1554 if (strcmp(name, "pci108e,80f0") == 0) { 1555 DBG(DBG_ATTACH, dip, "px_identity_chip: %s%d: " 1556 "name %s module-revision %d\n", ddi_driver_name(dip), 1557 ddi_get_instance(dip), name, revision); 1558 1559 return (PX_CHIP_ID(PX_CHIP_FIRE, revision, 0x00)); 1560 } 1561 1562 DBG(DBG_ATTACH, dip, "%s%d: Unknown PCI Express Host bridge %s %x\n", 1563 ddi_driver_name(dip), ddi_get_instance(dip), name, revision); 1564 1565 return (PX_CHIP_UNIDENTIFIED); 1566 } 1567