1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include <sys/types.h> 30 #include <sys/kmem.h> 31 #include <sys/conf.h> 32 #include <sys/ddi.h> 33 #include <sys/sunddi.h> 34 #include <sys/fm/protocol.h> 35 #include <sys/fm/util.h> 36 #include <sys/modctl.h> 37 #include <sys/disp.h> 38 #include <sys/stat.h> 39 #include <sys/ddi_impldefs.h> 40 #include <sys/vmem.h> 41 #include <sys/iommutsb.h> 42 #include <sys/cpuvar.h> 43 #include <sys/ivintr.h> 44 #include <px_obj.h> 45 #include <pcie_pwr.h> 46 #include "px_tools_var.h" 47 #include <px_regs.h> 48 #include <px_csr.h> 49 #include <sys/machsystm.h> 50 #include "px_lib4u.h" 51 #include "px_err.h" 52 53 #pragma weak jbus_stst_order 54 55 extern void jbus_stst_order(); 56 57 ulong_t px_mmu_dvma_end = 0xfffffffful; 58 uint_t px_ranges_phi_mask = 0xfffffffful; 59 60 static int px_goto_l23ready(px_t *px_p); 61 static uint32_t px_identity_chip(px_t *px_p); 62 static void px_lib_clr_errs(px_t *px_p, px_pec_t *pec_p); 63 64 /* 65 * px_lib_map_registers 66 * 67 * This function is called from the attach routine to map the registers 68 * accessed by this driver. 69 * 70 * used by: px_attach() 71 * 72 * return value: DDI_FAILURE on failure 73 */ 74 int 75 px_lib_map_regs(pxu_t *pxu_p, dev_info_t *dip) 76 { 77 ddi_device_acc_attr_t attr; 78 px_reg_bank_t reg_bank = PX_REG_CSR; 79 80 DBG(DBG_ATTACH, dip, "px_lib_map_regs: pxu_p:0x%p, dip 0x%p\n", 81 pxu_p, dip); 82 83 attr.devacc_attr_version = DDI_DEVICE_ATTR_V0; 84 attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC; 85 attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC; 86 87 /* 88 * PCI CSR Base 89 */ 90 if (ddi_regs_map_setup(dip, reg_bank, &pxu_p->px_address[reg_bank], 91 0, 0, &attr, &pxu_p->px_ac[reg_bank]) != DDI_SUCCESS) { 92 goto fail; 93 } 94 95 reg_bank++; 96 97 /* 98 * XBUS CSR Base 99 */ 100 if (ddi_regs_map_setup(dip, reg_bank, &pxu_p->px_address[reg_bank], 101 0, 0, &attr, &pxu_p->px_ac[reg_bank]) != DDI_SUCCESS) { 102 goto fail; 103 } 104 105 pxu_p->px_address[reg_bank] -= FIRE_CONTROL_STATUS; 106 107 done: 108 for (; reg_bank >= PX_REG_CSR; reg_bank--) { 109 DBG(DBG_ATTACH, dip, "reg_bank 0x%x address 0x%p\n", 110 reg_bank, pxu_p->px_address[reg_bank]); 111 } 112 113 return (DDI_SUCCESS); 114 115 fail: 116 cmn_err(CE_WARN, "%s%d: unable to map reg entry %d\n", 117 ddi_driver_name(dip), ddi_get_instance(dip), reg_bank); 118 119 for (reg_bank--; reg_bank >= PX_REG_CSR; reg_bank--) { 120 pxu_p->px_address[reg_bank] = NULL; 121 ddi_regs_map_free(&pxu_p->px_ac[reg_bank]); 122 } 123 124 return (DDI_FAILURE); 125 } 126 127 /* 128 * px_lib_unmap_regs: 129 * 130 * This routine unmaps the registers mapped by map_px_registers. 131 * 132 * used by: px_detach(), and error conditions in px_attach() 133 * 134 * return value: none 135 */ 136 void 137 px_lib_unmap_regs(pxu_t *pxu_p) 138 { 139 int i; 140 141 for (i = 0; i < PX_REG_MAX; i++) { 142 if (pxu_p->px_ac[i]) 143 ddi_regs_map_free(&pxu_p->px_ac[i]); 144 } 145 } 146 147 int 148 px_lib_dev_init(dev_info_t *dip, devhandle_t *dev_hdl) 149 { 150 px_t *px_p = DIP_TO_STATE(dip); 151 caddr_t xbc_csr_base, csr_base; 152 px_dvma_range_prop_t px_dvma_range; 153 uint32_t chip_id; 154 pxu_t *pxu_p; 155 156 DBG(DBG_ATTACH, dip, "px_lib_dev_init: dip 0x%p\n", dip); 157 158 if ((chip_id = px_identity_chip(px_p)) == PX_CHIP_UNIDENTIFIED) 159 return (DDI_FAILURE); 160 161 switch (chip_id) { 162 case FIRE_VER_10: 163 DBG(DBG_ATTACH, dip, "FIRE Hardware Version 1.0\n"); 164 break; 165 case FIRE_VER_20: 166 DBG(DBG_ATTACH, dip, "FIRE Hardware Version 2.0\n"); 167 break; 168 default: 169 cmn_err(CE_WARN, "%s%d: FIRE Hardware Version Unknown\n", 170 ddi_driver_name(dip), ddi_get_instance(dip)); 171 return (DDI_FAILURE); 172 } 173 174 /* 175 * Allocate platform specific structure and link it to 176 * the px state structure. 177 */ 178 pxu_p = kmem_zalloc(sizeof (pxu_t), KM_SLEEP); 179 pxu_p->chip_id = chip_id; 180 pxu_p->portid = ddi_getprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 181 "portid", -1); 182 183 /* Map in the registers */ 184 if (px_lib_map_regs(pxu_p, dip) == DDI_FAILURE) { 185 kmem_free(pxu_p, sizeof (pxu_t)); 186 187 return (DDI_FAILURE); 188 } 189 190 xbc_csr_base = (caddr_t)pxu_p->px_address[PX_REG_XBC]; 191 csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR]; 192 193 pxu_p->tsb_cookie = iommu_tsb_alloc(pxu_p->portid); 194 pxu_p->tsb_size = iommu_tsb_cookie_to_size(pxu_p->tsb_cookie); 195 pxu_p->tsb_vaddr = iommu_tsb_cookie_to_va(pxu_p->tsb_cookie); 196 197 /* 198 * Create "virtual-dma" property to support child devices 199 * needing to know DVMA range. 200 */ 201 px_dvma_range.dvma_base = (uint32_t)px_mmu_dvma_end + 1 202 - ((pxu_p->tsb_size >> 3) << MMU_PAGE_SHIFT); 203 px_dvma_range.dvma_len = (uint32_t) 204 px_mmu_dvma_end - px_dvma_range.dvma_base + 1; 205 206 (void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, 207 "virtual-dma", (caddr_t)&px_dvma_range, 208 sizeof (px_dvma_range_prop_t)); 209 /* 210 * Initilize all fire hardware specific blocks. 211 */ 212 hvio_cb_init(xbc_csr_base, pxu_p); 213 hvio_ib_init(csr_base, pxu_p); 214 hvio_pec_init(csr_base, pxu_p); 215 hvio_mmu_init(csr_base, pxu_p); 216 217 px_p->px_plat_p = (void *)pxu_p; 218 219 /* 220 * Initialize all the interrupt handlers 221 */ 222 px_err_reg_enable(px_p, PX_ERR_JBC); 223 px_err_reg_enable(px_p, PX_ERR_MMU); 224 px_err_reg_enable(px_p, PX_ERR_IMU); 225 px_err_reg_enable(px_p, PX_ERR_TLU_UE); 226 px_err_reg_enable(px_p, PX_ERR_TLU_CE); 227 px_err_reg_enable(px_p, PX_ERR_TLU_OE); 228 px_err_reg_enable(px_p, PX_ERR_ILU); 229 px_err_reg_enable(px_p, PX_ERR_LPU_LINK); 230 px_err_reg_enable(px_p, PX_ERR_LPU_PHY); 231 px_err_reg_enable(px_p, PX_ERR_LPU_RX); 232 px_err_reg_enable(px_p, PX_ERR_LPU_TX); 233 px_err_reg_enable(px_p, PX_ERR_LPU_LTSSM); 234 px_err_reg_enable(px_p, PX_ERR_LPU_GIGABLZ); 235 236 /* Initilize device handle */ 237 *dev_hdl = (devhandle_t)csr_base; 238 239 DBG(DBG_ATTACH, dip, "px_lib_dev_init: dev_hdl 0x%llx\n", *dev_hdl); 240 241 return (DDI_SUCCESS); 242 } 243 244 int 245 px_lib_dev_fini(dev_info_t *dip) 246 { 247 px_t *px_p = DIP_TO_STATE(dip); 248 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 249 250 DBG(DBG_DETACH, dip, "px_lib_dev_fini: dip 0x%p\n", dip); 251 252 /* 253 * Deinitialize all the interrupt handlers 254 */ 255 px_err_reg_disable(px_p, PX_ERR_JBC); 256 px_err_reg_disable(px_p, PX_ERR_MMU); 257 px_err_reg_disable(px_p, PX_ERR_IMU); 258 px_err_reg_disable(px_p, PX_ERR_TLU_UE); 259 px_err_reg_disable(px_p, PX_ERR_TLU_CE); 260 px_err_reg_disable(px_p, PX_ERR_TLU_OE); 261 px_err_reg_disable(px_p, PX_ERR_ILU); 262 px_err_reg_disable(px_p, PX_ERR_LPU_LINK); 263 px_err_reg_disable(px_p, PX_ERR_LPU_PHY); 264 px_err_reg_disable(px_p, PX_ERR_LPU_RX); 265 px_err_reg_disable(px_p, PX_ERR_LPU_TX); 266 px_err_reg_disable(px_p, PX_ERR_LPU_LTSSM); 267 px_err_reg_disable(px_p, PX_ERR_LPU_GIGABLZ); 268 269 iommu_tsb_free(pxu_p->tsb_cookie); 270 271 px_lib_unmap_regs((pxu_t *)px_p->px_plat_p); 272 kmem_free(px_p->px_plat_p, sizeof (pxu_t)); 273 px_p->px_plat_p = NULL; 274 275 return (DDI_SUCCESS); 276 } 277 278 /*ARGSUSED*/ 279 int 280 px_lib_intr_devino_to_sysino(dev_info_t *dip, devino_t devino, 281 sysino_t *sysino) 282 { 283 px_t *px_p = DIP_TO_STATE(dip); 284 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 285 uint64_t ret; 286 287 DBG(DBG_LIB_INT, dip, "px_lib_intr_devino_to_sysino: dip 0x%p " 288 "devino 0x%x\n", dip, devino); 289 290 if ((ret = hvio_intr_devino_to_sysino(DIP_TO_HANDLE(dip), 291 pxu_p, devino, sysino)) != H_EOK) { 292 DBG(DBG_LIB_INT, dip, 293 "hvio_intr_devino_to_sysino failed, ret 0x%lx\n", ret); 294 return (DDI_FAILURE); 295 } 296 297 DBG(DBG_LIB_INT, dip, "px_lib_intr_devino_to_sysino: sysino 0x%llx\n", 298 *sysino); 299 300 return (DDI_SUCCESS); 301 } 302 303 /*ARGSUSED*/ 304 int 305 px_lib_intr_getvalid(dev_info_t *dip, sysino_t sysino, 306 intr_valid_state_t *intr_valid_state) 307 { 308 uint64_t ret; 309 310 DBG(DBG_LIB_INT, dip, "px_lib_intr_getvalid: dip 0x%p sysino 0x%llx\n", 311 dip, sysino); 312 313 if ((ret = hvio_intr_getvalid(DIP_TO_HANDLE(dip), 314 sysino, intr_valid_state)) != H_EOK) { 315 DBG(DBG_LIB_INT, dip, "hvio_intr_getvalid failed, ret 0x%lx\n", 316 ret); 317 return (DDI_FAILURE); 318 } 319 320 DBG(DBG_LIB_INT, dip, "px_lib_intr_getvalid: intr_valid_state 0x%x\n", 321 *intr_valid_state); 322 323 return (DDI_SUCCESS); 324 } 325 326 /*ARGSUSED*/ 327 int 328 px_lib_intr_setvalid(dev_info_t *dip, sysino_t sysino, 329 intr_valid_state_t intr_valid_state) 330 { 331 uint64_t ret; 332 333 DBG(DBG_LIB_INT, dip, "px_lib_intr_setvalid: dip 0x%p sysino 0x%llx " 334 "intr_valid_state 0x%x\n", dip, sysino, intr_valid_state); 335 336 if ((ret = hvio_intr_setvalid(DIP_TO_HANDLE(dip), 337 sysino, intr_valid_state)) != H_EOK) { 338 DBG(DBG_LIB_INT, dip, "hvio_intr_setvalid failed, ret 0x%lx\n", 339 ret); 340 return (DDI_FAILURE); 341 } 342 343 return (DDI_SUCCESS); 344 } 345 346 /*ARGSUSED*/ 347 int 348 px_lib_intr_getstate(dev_info_t *dip, sysino_t sysino, 349 intr_state_t *intr_state) 350 { 351 uint64_t ret; 352 353 DBG(DBG_LIB_INT, dip, "px_lib_intr_getstate: dip 0x%p sysino 0x%llx\n", 354 dip, sysino); 355 356 if ((ret = hvio_intr_getstate(DIP_TO_HANDLE(dip), 357 sysino, intr_state)) != H_EOK) { 358 DBG(DBG_LIB_INT, dip, "hvio_intr_getstate failed, ret 0x%lx\n", 359 ret); 360 return (DDI_FAILURE); 361 } 362 363 DBG(DBG_LIB_INT, dip, "px_lib_intr_getstate: intr_state 0x%x\n", 364 *intr_state); 365 366 return (DDI_SUCCESS); 367 } 368 369 /*ARGSUSED*/ 370 int 371 px_lib_intr_setstate(dev_info_t *dip, sysino_t sysino, 372 intr_state_t intr_state) 373 { 374 uint64_t ret; 375 376 DBG(DBG_LIB_INT, dip, "px_lib_intr_setstate: dip 0x%p sysino 0x%llx " 377 "intr_state 0x%x\n", dip, sysino, intr_state); 378 379 if ((ret = hvio_intr_setstate(DIP_TO_HANDLE(dip), 380 sysino, intr_state)) != H_EOK) { 381 DBG(DBG_LIB_INT, dip, "hvio_intr_setstate failed, ret 0x%lx\n", 382 ret); 383 return (DDI_FAILURE); 384 } 385 386 return (DDI_SUCCESS); 387 } 388 389 /*ARGSUSED*/ 390 int 391 px_lib_intr_gettarget(dev_info_t *dip, sysino_t sysino, cpuid_t *cpuid) 392 { 393 uint64_t ret; 394 395 DBG(DBG_LIB_INT, dip, "px_lib_intr_gettarget: dip 0x%p sysino 0x%llx\n", 396 dip, sysino); 397 398 if ((ret = hvio_intr_gettarget(DIP_TO_HANDLE(dip), 399 sysino, cpuid)) != H_EOK) { 400 DBG(DBG_LIB_INT, dip, "hvio_intr_gettarget failed, ret 0x%lx\n", 401 ret); 402 return (DDI_FAILURE); 403 } 404 405 DBG(DBG_LIB_INT, dip, "px_lib_intr_gettarget: cpuid 0x%x\n", cpuid); 406 407 return (DDI_SUCCESS); 408 } 409 410 /*ARGSUSED*/ 411 int 412 px_lib_intr_settarget(dev_info_t *dip, sysino_t sysino, cpuid_t cpuid) 413 { 414 uint64_t ret; 415 416 DBG(DBG_LIB_INT, dip, "px_lib_intr_settarget: dip 0x%p sysino 0x%llx " 417 "cpuid 0x%x\n", dip, sysino, cpuid); 418 419 if ((ret = hvio_intr_settarget(DIP_TO_HANDLE(dip), 420 sysino, cpuid)) != H_EOK) { 421 DBG(DBG_LIB_INT, dip, "hvio_intr_settarget failed, ret 0x%lx\n", 422 ret); 423 return (DDI_FAILURE); 424 } 425 426 return (DDI_SUCCESS); 427 } 428 429 /*ARGSUSED*/ 430 int 431 px_lib_intr_reset(dev_info_t *dip) 432 { 433 devino_t ino; 434 sysino_t sysino; 435 436 DBG(DBG_LIB_INT, dip, "px_lib_intr_reset: dip 0x%p\n", dip); 437 438 /* Reset all Interrupts */ 439 for (ino = 0; ino < INTERRUPT_MAPPING_ENTRIES; ino++) { 440 if (px_lib_intr_devino_to_sysino(dip, ino, 441 &sysino) != DDI_SUCCESS) 442 return (BF_FATAL); 443 444 if (px_lib_intr_setstate(dip, sysino, 445 INTR_IDLE_STATE) != DDI_SUCCESS) 446 return (BF_FATAL); 447 } 448 449 return (BF_NONE); 450 } 451 452 /*ARGSUSED*/ 453 int 454 px_lib_iommu_map(dev_info_t *dip, tsbid_t tsbid, pages_t pages, 455 io_attributes_t io_attributes, void *addr, size_t pfn_index, 456 int flag) 457 { 458 px_t *px_p = DIP_TO_STATE(dip); 459 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 460 uint64_t ret; 461 462 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_map: dip 0x%p tsbid 0x%llx " 463 "pages 0x%x atrr 0x%x addr 0x%p pfn_index 0x%llx, flag 0x%x\n", 464 dip, tsbid, pages, io_attributes, addr, pfn_index, flag); 465 466 if ((ret = hvio_iommu_map(px_p->px_dev_hdl, pxu_p, tsbid, pages, 467 io_attributes, addr, pfn_index, flag)) != H_EOK) { 468 DBG(DBG_LIB_DMA, dip, 469 "px_lib_iommu_map failed, ret 0x%lx\n", ret); 470 return (DDI_FAILURE); 471 } 472 473 return (DDI_SUCCESS); 474 } 475 476 /*ARGSUSED*/ 477 int 478 px_lib_iommu_demap(dev_info_t *dip, tsbid_t tsbid, pages_t pages) 479 { 480 px_t *px_p = DIP_TO_STATE(dip); 481 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 482 uint64_t ret; 483 484 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_demap: dip 0x%p tsbid 0x%llx " 485 "pages 0x%x\n", dip, tsbid, pages); 486 487 if ((ret = hvio_iommu_demap(px_p->px_dev_hdl, pxu_p, tsbid, pages)) 488 != H_EOK) { 489 DBG(DBG_LIB_DMA, dip, 490 "px_lib_iommu_demap failed, ret 0x%lx\n", ret); 491 492 return (DDI_FAILURE); 493 } 494 495 return (DDI_SUCCESS); 496 } 497 498 /*ARGSUSED*/ 499 int 500 px_lib_iommu_getmap(dev_info_t *dip, tsbid_t tsbid, 501 io_attributes_t *attributes_p, r_addr_t *r_addr_p) 502 { 503 px_t *px_p = DIP_TO_STATE(dip); 504 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 505 uint64_t ret; 506 507 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getmap: dip 0x%p tsbid 0x%llx\n", 508 dip, tsbid); 509 510 if ((ret = hvio_iommu_getmap(DIP_TO_HANDLE(dip), pxu_p, tsbid, 511 attributes_p, r_addr_p)) != H_EOK) { 512 DBG(DBG_LIB_DMA, dip, 513 "hvio_iommu_getmap failed, ret 0x%lx\n", ret); 514 515 return ((ret == H_ENOMAP) ? DDI_DMA_NOMAPPING:DDI_FAILURE); 516 } 517 518 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getmap: attr 0x%x r_addr 0x%llx\n", 519 *attributes_p, *r_addr_p); 520 521 return (DDI_SUCCESS); 522 } 523 524 525 /* 526 * Checks dma attributes against system bypass ranges 527 * The bypass range is determined by the hardware. Return them so the 528 * common code can do generic checking against them. 529 */ 530 /*ARGSUSED*/ 531 int 532 px_lib_dma_bypass_rngchk(ddi_dma_attr_t *attrp, uint64_t *lo_p, uint64_t *hi_p) 533 { 534 *lo_p = MMU_BYPASS_BASE; 535 *hi_p = MMU_BYPASS_END; 536 537 return (DDI_SUCCESS); 538 } 539 540 541 /*ARGSUSED*/ 542 int 543 px_lib_iommu_getbypass(dev_info_t *dip, r_addr_t ra, 544 io_attributes_t io_attributes, io_addr_t *io_addr_p) 545 { 546 uint64_t ret; 547 548 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getbypass: dip 0x%p ra 0x%llx " 549 "attr 0x%x\n", dip, ra, io_attributes); 550 551 if ((ret = hvio_iommu_getbypass(DIP_TO_HANDLE(dip), ra, 552 io_attributes, io_addr_p)) != H_EOK) { 553 DBG(DBG_LIB_DMA, dip, 554 "hvio_iommu_getbypass failed, ret 0x%lx\n", ret); 555 return (DDI_FAILURE); 556 } 557 558 DBG(DBG_LIB_DMA, dip, "px_lib_iommu_getbypass: io_addr 0x%llx\n", 559 *io_addr_p); 560 561 return (DDI_SUCCESS); 562 } 563 564 /* 565 * bus dma sync entry point. 566 */ 567 /*ARGSUSED*/ 568 int 569 px_lib_dma_sync(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle, 570 off_t off, size_t len, uint_t cache_flags) 571 { 572 ddi_dma_impl_t *mp = (ddi_dma_impl_t *)handle; 573 574 DBG(DBG_LIB_DMA, dip, "px_lib_dma_sync: dip 0x%p rdip 0x%p " 575 "handle 0x%llx off 0x%x len 0x%x flags 0x%x\n", 576 dip, rdip, handle, off, len, cache_flags); 577 578 /* 579 * jbus_stst_order is found only in certain cpu modules. 580 * Just return success if not present. 581 */ 582 if (&jbus_stst_order == NULL) 583 return (DDI_SUCCESS); 584 585 if (!(mp->dmai_flags & DMAI_FLAGS_INUSE)) { 586 cmn_err(CE_WARN, "%s%d: Unbound dma handle %p.", 587 ddi_driver_name(rdip), ddi_get_instance(rdip), (void *)mp); 588 589 return (DDI_FAILURE); 590 } 591 592 if (mp->dmai_flags & DMAI_FLAGS_NOSYNC) 593 return (DDI_SUCCESS); 594 595 /* 596 * No flush needed when sending data from memory to device. 597 * Nothing to do to "sync" memory to what device would already see. 598 */ 599 if (!(mp->dmai_rflags & DDI_DMA_READ) || 600 ((cache_flags & PX_DMA_SYNC_DDI_FLAGS) == DDI_DMA_SYNC_FORDEV)) 601 return (DDI_SUCCESS); 602 603 /* 604 * Perform necessary cpu workaround to ensure jbus ordering. 605 * CPU's internal "invalidate FIFOs" are flushed. 606 */ 607 608 #if !defined(lint) 609 kpreempt_disable(); 610 #endif 611 jbus_stst_order(); 612 #if !defined(lint) 613 kpreempt_enable(); 614 #endif 615 return (DDI_SUCCESS); 616 } 617 618 /* 619 * MSIQ Functions: 620 */ 621 /*ARGSUSED*/ 622 int 623 px_lib_msiq_init(dev_info_t *dip) 624 { 625 px_t *px_p = DIP_TO_STATE(dip); 626 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 627 px_msiq_state_t *msiq_state_p = &px_p->px_ib_p->ib_msiq_state; 628 caddr_t msiq_addr; 629 px_dvma_addr_t pg_index; 630 size_t size; 631 int ret; 632 633 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_init: dip 0x%p\n", dip); 634 635 /* 636 * Map the EQ memory into the Fire MMU (has to be 512KB aligned) 637 * and then initialize the base address register. 638 * 639 * Allocate entries from Fire IOMMU so that the resulting address 640 * is properly aligned. Calculate the index of the first allocated 641 * entry. Note: The size of the mapping is assumed to be a multiple 642 * of the page size. 643 */ 644 msiq_addr = (caddr_t)(((uint64_t)msiq_state_p->msiq_buf_p + 645 (MMU_PAGE_SIZE - 1)) >> MMU_PAGE_SHIFT << MMU_PAGE_SHIFT); 646 647 size = msiq_state_p->msiq_cnt * 648 msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t); 649 650 pxu_p->msiq_mapped_p = vmem_xalloc(px_p->px_mmu_p->mmu_dvma_map, 651 size, (512 * 1024), 0, 0, NULL, NULL, VM_NOSLEEP | VM_BESTFIT); 652 653 if (pxu_p->msiq_mapped_p == NULL) 654 return (DDI_FAILURE); 655 656 pg_index = MMU_PAGE_INDEX(px_p->px_mmu_p, 657 MMU_BTOP((ulong_t)pxu_p->msiq_mapped_p)); 658 659 if ((ret = px_lib_iommu_map(px_p->px_dip, PCI_TSBID(0, pg_index), 660 MMU_BTOP(size), PCI_MAP_ATTR_WRITE, (void *)msiq_addr, 0, 661 MMU_MAP_BUF)) != DDI_SUCCESS) { 662 DBG(DBG_LIB_MSIQ, dip, 663 "hvio_msiq_init failed, ret 0x%lx\n", ret); 664 665 (void) px_lib_msiq_fini(dip); 666 return (DDI_FAILURE); 667 } 668 669 (void) hvio_msiq_init(DIP_TO_HANDLE(dip), pxu_p); 670 671 return (DDI_SUCCESS); 672 } 673 674 /*ARGSUSED*/ 675 int 676 px_lib_msiq_fini(dev_info_t *dip) 677 { 678 px_t *px_p = DIP_TO_STATE(dip); 679 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 680 px_msiq_state_t *msiq_state_p = &px_p->px_ib_p->ib_msiq_state; 681 px_dvma_addr_t pg_index; 682 size_t size; 683 684 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_fini: dip 0x%p\n", dip); 685 686 /* 687 * Unmap and free the EQ memory that had been mapped 688 * into the Fire IOMMU. 689 */ 690 size = msiq_state_p->msiq_cnt * 691 msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t); 692 693 pg_index = MMU_PAGE_INDEX(px_p->px_mmu_p, 694 MMU_BTOP((ulong_t)pxu_p->msiq_mapped_p)); 695 696 (void) px_lib_iommu_demap(px_p->px_dip, 697 PCI_TSBID(0, pg_index), MMU_BTOP(size)); 698 699 /* Free the entries from the Fire MMU */ 700 vmem_xfree(px_p->px_mmu_p->mmu_dvma_map, 701 (void *)pxu_p->msiq_mapped_p, size); 702 703 return (DDI_SUCCESS); 704 } 705 706 /*ARGSUSED*/ 707 int 708 px_lib_msiq_info(dev_info_t *dip, msiqid_t msiq_id, r_addr_t *ra_p, 709 uint_t *msiq_rec_cnt_p) 710 { 711 px_t *px_p = DIP_TO_STATE(dip); 712 px_msiq_state_t *msiq_state_p = &px_p->px_ib_p->ib_msiq_state; 713 uint64_t *msiq_addr; 714 size_t msiq_size; 715 716 DBG(DBG_LIB_MSIQ, dip, "px_msiq_info: dip 0x%p msiq_id 0x%x\n", 717 dip, msiq_id); 718 719 msiq_addr = (uint64_t *)(((uint64_t)msiq_state_p->msiq_buf_p + 720 (MMU_PAGE_SIZE - 1)) >> MMU_PAGE_SHIFT << MMU_PAGE_SHIFT); 721 msiq_size = msiq_state_p->msiq_rec_cnt * sizeof (msiq_rec_t); 722 ra_p = (r_addr_t *)((caddr_t)msiq_addr + (msiq_id * msiq_size)); 723 724 *msiq_rec_cnt_p = msiq_state_p->msiq_rec_cnt; 725 726 DBG(DBG_LIB_MSIQ, dip, "px_msiq_info: ra_p 0x%p msiq_rec_cnt 0x%x\n", 727 ra_p, *msiq_rec_cnt_p); 728 729 return (DDI_SUCCESS); 730 } 731 732 /*ARGSUSED*/ 733 int 734 px_lib_msiq_getvalid(dev_info_t *dip, msiqid_t msiq_id, 735 pci_msiq_valid_state_t *msiq_valid_state) 736 { 737 uint64_t ret; 738 739 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getvalid: dip 0x%p msiq_id 0x%x\n", 740 dip, msiq_id); 741 742 if ((ret = hvio_msiq_getvalid(DIP_TO_HANDLE(dip), 743 msiq_id, msiq_valid_state)) != H_EOK) { 744 DBG(DBG_LIB_MSIQ, dip, 745 "hvio_msiq_getvalid failed, ret 0x%lx\n", ret); 746 return (DDI_FAILURE); 747 } 748 749 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getvalid: msiq_valid_state 0x%x\n", 750 *msiq_valid_state); 751 752 return (DDI_SUCCESS); 753 } 754 755 /*ARGSUSED*/ 756 int 757 px_lib_msiq_setvalid(dev_info_t *dip, msiqid_t msiq_id, 758 pci_msiq_valid_state_t msiq_valid_state) 759 { 760 uint64_t ret; 761 762 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_setvalid: dip 0x%p msiq_id 0x%x " 763 "msiq_valid_state 0x%x\n", dip, msiq_id, msiq_valid_state); 764 765 if ((ret = hvio_msiq_setvalid(DIP_TO_HANDLE(dip), 766 msiq_id, msiq_valid_state)) != H_EOK) { 767 DBG(DBG_LIB_MSIQ, dip, 768 "hvio_msiq_setvalid failed, ret 0x%lx\n", ret); 769 return (DDI_FAILURE); 770 } 771 772 return (DDI_SUCCESS); 773 } 774 775 /*ARGSUSED*/ 776 int 777 px_lib_msiq_getstate(dev_info_t *dip, msiqid_t msiq_id, 778 pci_msiq_state_t *msiq_state) 779 { 780 uint64_t ret; 781 782 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getstate: dip 0x%p msiq_id 0x%x\n", 783 dip, msiq_id); 784 785 if ((ret = hvio_msiq_getstate(DIP_TO_HANDLE(dip), 786 msiq_id, msiq_state)) != H_EOK) { 787 DBG(DBG_LIB_MSIQ, dip, 788 "hvio_msiq_getstate failed, ret 0x%lx\n", ret); 789 return (DDI_FAILURE); 790 } 791 792 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_getstate: msiq_state 0x%x\n", 793 *msiq_state); 794 795 return (DDI_SUCCESS); 796 } 797 798 /*ARGSUSED*/ 799 int 800 px_lib_msiq_setstate(dev_info_t *dip, msiqid_t msiq_id, 801 pci_msiq_state_t msiq_state) 802 { 803 uint64_t ret; 804 805 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_setstate: dip 0x%p msiq_id 0x%x " 806 "msiq_state 0x%x\n", dip, msiq_id, msiq_state); 807 808 if ((ret = hvio_msiq_setstate(DIP_TO_HANDLE(dip), 809 msiq_id, msiq_state)) != H_EOK) { 810 DBG(DBG_LIB_MSIQ, dip, 811 "hvio_msiq_setstate failed, ret 0x%lx\n", ret); 812 return (DDI_FAILURE); 813 } 814 815 return (DDI_SUCCESS); 816 } 817 818 /*ARGSUSED*/ 819 int 820 px_lib_msiq_gethead(dev_info_t *dip, msiqid_t msiq_id, 821 msiqhead_t *msiq_head) 822 { 823 uint64_t ret; 824 825 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gethead: dip 0x%p msiq_id 0x%x\n", 826 dip, msiq_id); 827 828 if ((ret = hvio_msiq_gethead(DIP_TO_HANDLE(dip), 829 msiq_id, msiq_head)) != H_EOK) { 830 DBG(DBG_LIB_MSIQ, dip, 831 "hvio_msiq_gethead failed, ret 0x%lx\n", ret); 832 return (DDI_FAILURE); 833 } 834 835 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gethead: msiq_head 0x%x\n", 836 *msiq_head); 837 838 return (DDI_SUCCESS); 839 } 840 841 /*ARGSUSED*/ 842 int 843 px_lib_msiq_sethead(dev_info_t *dip, msiqid_t msiq_id, 844 msiqhead_t msiq_head) 845 { 846 uint64_t ret; 847 848 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_sethead: dip 0x%p msiq_id 0x%x " 849 "msiq_head 0x%x\n", dip, msiq_id, msiq_head); 850 851 if ((ret = hvio_msiq_sethead(DIP_TO_HANDLE(dip), 852 msiq_id, msiq_head)) != H_EOK) { 853 DBG(DBG_LIB_MSIQ, dip, 854 "hvio_msiq_sethead failed, ret 0x%lx\n", ret); 855 return (DDI_FAILURE); 856 } 857 858 return (DDI_SUCCESS); 859 } 860 861 /*ARGSUSED*/ 862 int 863 px_lib_msiq_gettail(dev_info_t *dip, msiqid_t msiq_id, 864 msiqtail_t *msiq_tail) 865 { 866 uint64_t ret; 867 868 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gettail: dip 0x%p msiq_id 0x%x\n", 869 dip, msiq_id); 870 871 if ((ret = hvio_msiq_gettail(DIP_TO_HANDLE(dip), 872 msiq_id, msiq_tail)) != H_EOK) { 873 DBG(DBG_LIB_MSIQ, dip, 874 "hvio_msiq_gettail failed, ret 0x%lx\n", ret); 875 return (DDI_FAILURE); 876 } 877 878 DBG(DBG_LIB_MSIQ, dip, "px_lib_msiq_gettail: msiq_tail 0x%x\n", 879 *msiq_tail); 880 881 return (DDI_SUCCESS); 882 } 883 884 /*ARGSUSED*/ 885 void 886 px_lib_get_msiq_rec(dev_info_t *dip, px_msiq_t *msiq_p, msiq_rec_t *msiq_rec_p) 887 { 888 px_t *px_p = DIP_TO_STATE(dip); 889 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 890 eq_rec_t *eq_rec_p = (eq_rec_t *)msiq_p->msiq_curr; 891 892 DBG(DBG_LIB_MSIQ, dip, "px_lib_get_msiq_rec: dip 0x%p eq_rec_p 0x%p\n", 893 dip, eq_rec_p); 894 895 if (!eq_rec_p->eq_rec_rid) { 896 /* Set msiq_rec_rid to zero */ 897 msiq_rec_p->msiq_rec_rid = 0; 898 899 return; 900 } 901 902 DBG(DBG_LIB_MSIQ, dip, "px_lib_get_msiq_rec: EQ RECORD, " 903 "eq_rec_rid 0x%llx eq_rec_fmt_type 0x%llx " 904 "eq_rec_len 0x%llx eq_rec_addr0 0x%llx " 905 "eq_rec_addr1 0x%llx eq_rec_data0 0x%llx " 906 "eq_rec_data1 0x%llx\n", eq_rec_p->eq_rec_rid, 907 eq_rec_p->eq_rec_fmt_type, eq_rec_p->eq_rec_len, 908 eq_rec_p->eq_rec_addr0, eq_rec_p->eq_rec_addr1, 909 eq_rec_p->eq_rec_data0, eq_rec_p->eq_rec_data1); 910 911 /* 912 * Only upper 4 bits of eq_rec_fmt_type is used 913 * to identify the EQ record type. 914 */ 915 switch (eq_rec_p->eq_rec_fmt_type >> 3) { 916 case EQ_REC_MSI32: 917 msiq_rec_p->msiq_rec_type = MSI32_REC; 918 919 if (pxu_p->chip_id == FIRE_VER_10) { 920 msiq_rec_p->msiq_rec_data.msi.msi_data = 921 (eq_rec_p->eq_rec_data0 & 0xFF) << 8 | 922 (eq_rec_p->eq_rec_data0 & 0xFF00) >> 8; 923 } else { 924 /* Default case is FIRE2.0 */ 925 msiq_rec_p->msiq_rec_data.msi.msi_data = 926 eq_rec_p->eq_rec_data0; 927 } 928 929 break; 930 case EQ_REC_MSI64: 931 msiq_rec_p->msiq_rec_type = MSI64_REC; 932 933 if (pxu_p->chip_id == FIRE_VER_10) { 934 msiq_rec_p->msiq_rec_data.msi.msi_data = 935 (eq_rec_p->eq_rec_data0 & 0xFF) << 8 | 936 (eq_rec_p->eq_rec_data0 & 0xFF00) >> 8; 937 } else { 938 /* Default case is FIRE2.0 */ 939 msiq_rec_p->msiq_rec_data.msi.msi_data = 940 eq_rec_p->eq_rec_data0; 941 } 942 943 break; 944 case EQ_REC_MSG: 945 msiq_rec_p->msiq_rec_type = MSG_REC; 946 947 msiq_rec_p->msiq_rec_data.msg.msg_route = 948 eq_rec_p->eq_rec_fmt_type & 7; 949 msiq_rec_p->msiq_rec_data.msg.msg_targ = eq_rec_p->eq_rec_rid; 950 msiq_rec_p->msiq_rec_data.msg.msg_code = eq_rec_p->eq_rec_data0; 951 break; 952 default: 953 cmn_err(CE_WARN, "%s%d: px_lib_get_msiq_rec: " 954 "0x%lx is an unknown EQ record type", 955 ddi_driver_name(dip), ddi_get_instance(dip), 956 eq_rec_p->eq_rec_fmt_type); 957 break; 958 } 959 960 msiq_rec_p->msiq_rec_rid = eq_rec_p->eq_rec_rid; 961 msiq_rec_p->msiq_rec_msi_addr = ((eq_rec_p->eq_rec_addr1 << 16) | 962 (eq_rec_p->eq_rec_addr0 << 2)); 963 964 /* Zero out eq_rec_rid field */ 965 eq_rec_p->eq_rec_rid = 0; 966 } 967 968 /* 969 * MSI Functions: 970 */ 971 /*ARGSUSED*/ 972 int 973 px_lib_msi_init(dev_info_t *dip) 974 { 975 px_t *px_p = DIP_TO_STATE(dip); 976 px_msi_state_t *msi_state_p = &px_p->px_ib_p->ib_msi_state; 977 uint64_t ret; 978 979 DBG(DBG_LIB_MSI, dip, "px_lib_msi_init: dip 0x%p\n", dip); 980 981 if ((ret = hvio_msi_init(DIP_TO_HANDLE(dip), 982 msi_state_p->msi_addr32, msi_state_p->msi_addr64)) != H_EOK) { 983 DBG(DBG_LIB_MSIQ, dip, "px_lib_msi_init failed, ret 0x%lx\n", 984 ret); 985 return (DDI_FAILURE); 986 } 987 988 return (DDI_SUCCESS); 989 } 990 991 /*ARGSUSED*/ 992 int 993 px_lib_msi_getmsiq(dev_info_t *dip, msinum_t msi_num, 994 msiqid_t *msiq_id) 995 { 996 uint64_t ret; 997 998 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getmsiq: dip 0x%p msi_num 0x%x\n", 999 dip, msi_num); 1000 1001 if ((ret = hvio_msi_getmsiq(DIP_TO_HANDLE(dip), 1002 msi_num, msiq_id)) != H_EOK) { 1003 DBG(DBG_LIB_MSI, dip, 1004 "hvio_msi_getmsiq failed, ret 0x%lx\n", ret); 1005 return (DDI_FAILURE); 1006 } 1007 1008 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getmsiq: msiq_id 0x%x\n", 1009 *msiq_id); 1010 1011 return (DDI_SUCCESS); 1012 } 1013 1014 /*ARGSUSED*/ 1015 int 1016 px_lib_msi_setmsiq(dev_info_t *dip, msinum_t msi_num, 1017 msiqid_t msiq_id, msi_type_t msitype) 1018 { 1019 uint64_t ret; 1020 1021 DBG(DBG_LIB_MSI, dip, "px_lib_msi_setmsiq: dip 0x%p msi_num 0x%x " 1022 "msq_id 0x%x\n", dip, msi_num, msiq_id); 1023 1024 if ((ret = hvio_msi_setmsiq(DIP_TO_HANDLE(dip), 1025 msi_num, msiq_id)) != H_EOK) { 1026 DBG(DBG_LIB_MSI, dip, 1027 "hvio_msi_setmsiq failed, ret 0x%lx\n", ret); 1028 return (DDI_FAILURE); 1029 } 1030 1031 return (DDI_SUCCESS); 1032 } 1033 1034 /*ARGSUSED*/ 1035 int 1036 px_lib_msi_getvalid(dev_info_t *dip, msinum_t msi_num, 1037 pci_msi_valid_state_t *msi_valid_state) 1038 { 1039 uint64_t ret; 1040 1041 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getvalid: dip 0x%p msi_num 0x%x\n", 1042 dip, msi_num); 1043 1044 if ((ret = hvio_msi_getvalid(DIP_TO_HANDLE(dip), 1045 msi_num, msi_valid_state)) != H_EOK) { 1046 DBG(DBG_LIB_MSI, dip, 1047 "hvio_msi_getvalid failed, ret 0x%lx\n", ret); 1048 return (DDI_FAILURE); 1049 } 1050 1051 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getvalid: msiq_id 0x%x\n", 1052 *msi_valid_state); 1053 1054 return (DDI_SUCCESS); 1055 } 1056 1057 /*ARGSUSED*/ 1058 int 1059 px_lib_msi_setvalid(dev_info_t *dip, msinum_t msi_num, 1060 pci_msi_valid_state_t msi_valid_state) 1061 { 1062 uint64_t ret; 1063 1064 DBG(DBG_LIB_MSI, dip, "px_lib_msi_setvalid: dip 0x%p msi_num 0x%x " 1065 "msi_valid_state 0x%x\n", dip, msi_num, msi_valid_state); 1066 1067 if ((ret = hvio_msi_setvalid(DIP_TO_HANDLE(dip), 1068 msi_num, msi_valid_state)) != H_EOK) { 1069 DBG(DBG_LIB_MSI, dip, 1070 "hvio_msi_setvalid failed, ret 0x%lx\n", ret); 1071 return (DDI_FAILURE); 1072 } 1073 1074 return (DDI_SUCCESS); 1075 } 1076 1077 /*ARGSUSED*/ 1078 int 1079 px_lib_msi_getstate(dev_info_t *dip, msinum_t msi_num, 1080 pci_msi_state_t *msi_state) 1081 { 1082 uint64_t ret; 1083 1084 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getstate: dip 0x%p msi_num 0x%x\n", 1085 dip, msi_num); 1086 1087 if ((ret = hvio_msi_getstate(DIP_TO_HANDLE(dip), 1088 msi_num, msi_state)) != H_EOK) { 1089 DBG(DBG_LIB_MSI, dip, 1090 "hvio_msi_getstate failed, ret 0x%lx\n", ret); 1091 return (DDI_FAILURE); 1092 } 1093 1094 DBG(DBG_LIB_MSI, dip, "px_lib_msi_getstate: msi_state 0x%x\n", 1095 *msi_state); 1096 1097 return (DDI_SUCCESS); 1098 } 1099 1100 /*ARGSUSED*/ 1101 int 1102 px_lib_msi_setstate(dev_info_t *dip, msinum_t msi_num, 1103 pci_msi_state_t msi_state) 1104 { 1105 uint64_t ret; 1106 1107 DBG(DBG_LIB_MSI, dip, "px_lib_msi_setstate: dip 0x%p msi_num 0x%x " 1108 "msi_state 0x%x\n", dip, msi_num, msi_state); 1109 1110 if ((ret = hvio_msi_setstate(DIP_TO_HANDLE(dip), 1111 msi_num, msi_state)) != H_EOK) { 1112 DBG(DBG_LIB_MSI, dip, 1113 "hvio_msi_setstate failed, ret 0x%lx\n", ret); 1114 return (DDI_FAILURE); 1115 } 1116 1117 return (DDI_SUCCESS); 1118 } 1119 1120 /* 1121 * MSG Functions: 1122 */ 1123 /*ARGSUSED*/ 1124 int 1125 px_lib_msg_getmsiq(dev_info_t *dip, pcie_msg_type_t msg_type, 1126 msiqid_t *msiq_id) 1127 { 1128 uint64_t ret; 1129 1130 DBG(DBG_LIB_MSG, dip, "px_lib_msg_getmsiq: dip 0x%p msg_type 0x%x\n", 1131 dip, msg_type); 1132 1133 if ((ret = hvio_msg_getmsiq(DIP_TO_HANDLE(dip), 1134 msg_type, msiq_id)) != H_EOK) { 1135 DBG(DBG_LIB_MSG, dip, 1136 "hvio_msg_getmsiq failed, ret 0x%lx\n", ret); 1137 return (DDI_FAILURE); 1138 } 1139 1140 DBG(DBG_LIB_MSI, dip, "px_lib_msg_getmsiq: msiq_id 0x%x\n", 1141 *msiq_id); 1142 1143 return (DDI_SUCCESS); 1144 } 1145 1146 /*ARGSUSED*/ 1147 int 1148 px_lib_msg_setmsiq(dev_info_t *dip, pcie_msg_type_t msg_type, 1149 msiqid_t msiq_id) 1150 { 1151 uint64_t ret; 1152 1153 DBG(DBG_LIB_MSG, dip, "px_lib_msi_setstate: dip 0x%p msg_type 0x%x " 1154 "msiq_id 0x%x\n", dip, msg_type, msiq_id); 1155 1156 if ((ret = hvio_msg_setmsiq(DIP_TO_HANDLE(dip), 1157 msg_type, msiq_id)) != H_EOK) { 1158 DBG(DBG_LIB_MSG, dip, 1159 "hvio_msg_setmsiq failed, ret 0x%lx\n", ret); 1160 return (DDI_FAILURE); 1161 } 1162 1163 return (DDI_SUCCESS); 1164 } 1165 1166 /*ARGSUSED*/ 1167 int 1168 px_lib_msg_getvalid(dev_info_t *dip, pcie_msg_type_t msg_type, 1169 pcie_msg_valid_state_t *msg_valid_state) 1170 { 1171 uint64_t ret; 1172 1173 DBG(DBG_LIB_MSG, dip, "px_lib_msg_getvalid: dip 0x%p msg_type 0x%x\n", 1174 dip, msg_type); 1175 1176 if ((ret = hvio_msg_getvalid(DIP_TO_HANDLE(dip), msg_type, 1177 msg_valid_state)) != H_EOK) { 1178 DBG(DBG_LIB_MSG, dip, 1179 "hvio_msg_getvalid failed, ret 0x%lx\n", ret); 1180 return (DDI_FAILURE); 1181 } 1182 1183 DBG(DBG_LIB_MSI, dip, "px_lib_msg_getvalid: msg_valid_state 0x%x\n", 1184 *msg_valid_state); 1185 1186 return (DDI_SUCCESS); 1187 } 1188 1189 /*ARGSUSED*/ 1190 int 1191 px_lib_msg_setvalid(dev_info_t *dip, pcie_msg_type_t msg_type, 1192 pcie_msg_valid_state_t msg_valid_state) 1193 { 1194 uint64_t ret; 1195 1196 DBG(DBG_LIB_MSG, dip, "px_lib_msg_setvalid: dip 0x%p msg_type 0x%x " 1197 "msg_valid_state 0x%x\n", dip, msg_type, msg_valid_state); 1198 1199 if ((ret = hvio_msg_setvalid(DIP_TO_HANDLE(dip), msg_type, 1200 msg_valid_state)) != H_EOK) { 1201 DBG(DBG_LIB_MSG, dip, 1202 "hvio_msg_setvalid failed, ret 0x%lx\n", ret); 1203 return (DDI_FAILURE); 1204 } 1205 1206 return (DDI_SUCCESS); 1207 } 1208 1209 /* 1210 * Suspend/Resume Functions: 1211 * Currently unsupported by hypervisor 1212 */ 1213 int 1214 px_lib_suspend(dev_info_t *dip) 1215 { 1216 px_t *px_p = DIP_TO_STATE(dip); 1217 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 1218 devhandle_t dev_hdl, xbus_dev_hdl; 1219 uint64_t ret; 1220 1221 DBG(DBG_DETACH, dip, "px_lib_suspend: dip 0x%p\n", dip); 1222 1223 dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_CSR]; 1224 xbus_dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_XBC]; 1225 1226 if ((ret = hvio_suspend(dev_hdl, pxu_p)) == H_EOK) { 1227 px_p->px_cb_p->xbc_attachcnt--; 1228 if (px_p->px_cb_p->xbc_attachcnt == 0) 1229 if ((ret = hvio_cb_suspend(xbus_dev_hdl, pxu_p)) 1230 != H_EOK) 1231 px_p->px_cb_p->xbc_attachcnt++; 1232 } 1233 1234 return ((ret != H_EOK) ? DDI_FAILURE: DDI_SUCCESS); 1235 } 1236 1237 void 1238 px_lib_resume(dev_info_t *dip) 1239 { 1240 px_t *px_p = DIP_TO_STATE(dip); 1241 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 1242 devhandle_t dev_hdl, xbus_dev_hdl; 1243 devino_t pec_ino = px_p->px_inos[PX_INTR_PEC]; 1244 devino_t xbc_ino = px_p->px_inos[PX_INTR_XBC]; 1245 1246 DBG(DBG_ATTACH, dip, "px_lib_resume: dip 0x%p\n", dip); 1247 1248 dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_CSR]; 1249 xbus_dev_hdl = (devhandle_t)pxu_p->px_address[PX_REG_XBC]; 1250 1251 px_p->px_cb_p->xbc_attachcnt++; 1252 if (px_p->px_cb_p->xbc_attachcnt == 1) 1253 hvio_cb_resume(dev_hdl, xbus_dev_hdl, xbc_ino, pxu_p); 1254 hvio_resume(dev_hdl, pec_ino, pxu_p); 1255 } 1256 1257 /* 1258 * PCI tool Functions: 1259 * Currently unsupported by hypervisor 1260 */ 1261 /*ARGSUSED*/ 1262 int 1263 px_lib_tools_dev_reg_ops(dev_info_t *dip, void *arg, int cmd, int mode) 1264 { 1265 px_t *px_p = DIP_TO_STATE(dip); 1266 1267 DBG(DBG_TOOLS, dip, "px_lib_tools_dev_reg_ops: dip 0x%p arg 0x%p " 1268 "cmd 0x%x mode 0x%x\n", dip, arg, cmd, mode); 1269 1270 return (px_dev_reg_ops(dip, arg, cmd, mode, px_p)); 1271 } 1272 1273 /*ARGSUSED*/ 1274 int 1275 px_lib_tools_bus_reg_ops(dev_info_t *dip, void *arg, int cmd, int mode) 1276 { 1277 DBG(DBG_TOOLS, dip, "px_lib_tools_bus_reg_ops: dip 0x%p arg 0x%p " 1278 "cmd 0x%x mode 0x%x\n", dip, arg, cmd, mode); 1279 1280 return (px_bus_reg_ops(dip, arg, cmd, mode)); 1281 } 1282 1283 /*ARGSUSED*/ 1284 int 1285 px_lib_tools_intr_admn(dev_info_t *dip, void *arg, int cmd, int mode) 1286 { 1287 px_t *px_p = DIP_TO_STATE(dip); 1288 1289 DBG(DBG_TOOLS, dip, "px_lib_tools_intr_admn: dip 0x%p arg 0x%p " 1290 "cmd 0x%x mode 0x%x\n", dip, arg, cmd, mode); 1291 1292 return (px_intr_admn(dip, arg, cmd, mode, px_p)); 1293 } 1294 1295 /* 1296 * Misc Functions: 1297 * Currently unsupported by hypervisor 1298 */ 1299 uint64_t 1300 px_lib_get_cb(dev_info_t *dip) 1301 { 1302 px_t *px_p = DIP_TO_STATE(dip); 1303 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 1304 1305 return (CSR_XR((caddr_t)pxu_p->px_address[PX_REG_XBC], JBUS_SCRATCH_1)); 1306 } 1307 1308 void 1309 px_lib_set_cb(dev_info_t *dip, uint64_t val) 1310 { 1311 px_t *px_p = DIP_TO_STATE(dip); 1312 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 1313 1314 CSR_XS((caddr_t)pxu_p->px_address[PX_REG_XBC], JBUS_SCRATCH_1, val); 1315 } 1316 1317 /*ARGSUSED*/ 1318 int 1319 px_lib_map_vconfig(dev_info_t *dip, 1320 ddi_map_req_t *mp, pci_config_offset_t off, 1321 pci_regspec_t *rp, caddr_t *addrp) 1322 { 1323 /* 1324 * No special config space access services in this layer. 1325 */ 1326 return (DDI_FAILURE); 1327 } 1328 1329 static void 1330 px_lib_clr_errs(px_t *px_p, px_pec_t *pec_p) 1331 { 1332 dev_info_t *rpdip = px_p->px_dip; 1333 px_cb_t *cb_p = px_p->px_cb_p; 1334 int err = PX_OK, ret; 1335 int acctype = pec_p->pec_safeacc_type; 1336 ddi_fm_error_t derr; 1337 1338 /* Create the derr */ 1339 bzero(&derr, sizeof (ddi_fm_error_t)); 1340 derr.fme_version = DDI_FME_VERSION; 1341 derr.fme_ena = fm_ena_generate(0, FM_ENA_FMT1); 1342 derr.fme_flag = acctype; 1343 1344 if (acctype == DDI_FM_ERR_EXPECTED) { 1345 derr.fme_status = DDI_FM_NONFATAL; 1346 ndi_fm_acc_err_set(pec_p->pec_acc_hdl, &derr); 1347 } 1348 1349 mutex_enter(&cb_p->xbc_fm_mutex); 1350 1351 /* send ereport/handle/clear fire registers */ 1352 err = px_err_handle(px_p, &derr, PX_LIB_CALL, B_TRUE); 1353 1354 /* Check all child devices for errors */ 1355 ret = ndi_fm_handler_dispatch(rpdip, NULL, &derr); 1356 1357 mutex_exit(&cb_p->xbc_fm_mutex); 1358 1359 /* 1360 * PX_FATAL_HW indicates a condition recovered from Fatal-Reset, 1361 * therefore it does not cause panic. 1362 */ 1363 if ((err & (PX_FATAL_GOS | PX_FATAL_SW)) || (ret == DDI_FM_FATAL)) 1364 fm_panic("Fatal System Port Error has occurred\n"); 1365 } 1366 1367 #ifdef DEBUG 1368 int px_peekfault_cnt = 0; 1369 int px_pokefault_cnt = 0; 1370 #endif /* DEBUG */ 1371 1372 /*ARGSUSED*/ 1373 static int 1374 px_lib_do_poke(dev_info_t *dip, dev_info_t *rdip, 1375 peekpoke_ctlops_t *in_args) 1376 { 1377 px_t *px_p = DIP_TO_STATE(dip); 1378 px_pec_t *pec_p = px_p->px_pec_p; 1379 int err = DDI_SUCCESS; 1380 on_trap_data_t otd; 1381 1382 mutex_enter(&pec_p->pec_pokefault_mutex); 1383 pec_p->pec_ontrap_data = &otd; 1384 pec_p->pec_safeacc_type = DDI_FM_ERR_POKE; 1385 1386 /* Set up protected environment. */ 1387 if (!on_trap(&otd, OT_DATA_ACCESS)) { 1388 uintptr_t tramp = otd.ot_trampoline; 1389 1390 otd.ot_trampoline = (uintptr_t)&poke_fault; 1391 err = do_poke(in_args->size, (void *)in_args->dev_addr, 1392 (void *)in_args->host_addr); 1393 otd.ot_trampoline = tramp; 1394 } else 1395 err = DDI_FAILURE; 1396 1397 px_lib_clr_errs(px_p, pec_p); 1398 1399 if (otd.ot_trap & OT_DATA_ACCESS) 1400 err = DDI_FAILURE; 1401 1402 /* Take down protected environment. */ 1403 no_trap(); 1404 1405 pec_p->pec_ontrap_data = NULL; 1406 pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED; 1407 mutex_exit(&pec_p->pec_pokefault_mutex); 1408 1409 #ifdef DEBUG 1410 if (err == DDI_FAILURE) 1411 px_pokefault_cnt++; 1412 #endif 1413 return (err); 1414 } 1415 1416 /*ARGSUSED*/ 1417 static int 1418 px_lib_do_caut_put(dev_info_t *dip, dev_info_t *rdip, 1419 peekpoke_ctlops_t *cautacc_ctlops_arg) 1420 { 1421 size_t size = cautacc_ctlops_arg->size; 1422 uintptr_t dev_addr = cautacc_ctlops_arg->dev_addr; 1423 uintptr_t host_addr = cautacc_ctlops_arg->host_addr; 1424 ddi_acc_impl_t *hp = (ddi_acc_impl_t *)cautacc_ctlops_arg->handle; 1425 size_t repcount = cautacc_ctlops_arg->repcount; 1426 uint_t flags = cautacc_ctlops_arg->flags; 1427 1428 px_t *px_p = DIP_TO_STATE(dip); 1429 px_pec_t *pec_p = px_p->px_pec_p; 1430 int err = DDI_SUCCESS; 1431 1432 /* 1433 * Note that i_ndi_busop_access_enter ends up grabbing the pokefault 1434 * mutex. 1435 */ 1436 i_ndi_busop_access_enter(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp); 1437 1438 pec_p->pec_ontrap_data = (on_trap_data_t *)hp->ahi_err->err_ontrap; 1439 pec_p->pec_safeacc_type = DDI_FM_ERR_EXPECTED; 1440 hp->ahi_err->err_expected = DDI_FM_ERR_EXPECTED; 1441 1442 if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) { 1443 for (; repcount; repcount--) { 1444 switch (size) { 1445 1446 case sizeof (uint8_t): 1447 i_ddi_put8(hp, (uint8_t *)dev_addr, 1448 *(uint8_t *)host_addr); 1449 break; 1450 1451 case sizeof (uint16_t): 1452 i_ddi_put16(hp, (uint16_t *)dev_addr, 1453 *(uint16_t *)host_addr); 1454 break; 1455 1456 case sizeof (uint32_t): 1457 i_ddi_put32(hp, (uint32_t *)dev_addr, 1458 *(uint32_t *)host_addr); 1459 break; 1460 1461 case sizeof (uint64_t): 1462 i_ddi_put64(hp, (uint64_t *)dev_addr, 1463 *(uint64_t *)host_addr); 1464 break; 1465 } 1466 1467 host_addr += size; 1468 1469 if (flags == DDI_DEV_AUTOINCR) 1470 dev_addr += size; 1471 1472 px_lib_clr_errs(px_p, pec_p); 1473 1474 if (pec_p->pec_ontrap_data->ot_trap & OT_DATA_ACCESS) { 1475 err = DDI_FAILURE; 1476 #ifdef DEBUG 1477 px_pokefault_cnt++; 1478 #endif 1479 break; 1480 } 1481 } 1482 } 1483 1484 i_ddi_notrap((ddi_acc_handle_t)hp); 1485 pec_p->pec_ontrap_data = NULL; 1486 pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED; 1487 i_ndi_busop_access_exit(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp); 1488 hp->ahi_err->err_expected = DDI_FM_ERR_UNEXPECTED; 1489 1490 return (err); 1491 } 1492 1493 1494 int 1495 px_lib_ctlops_poke(dev_info_t *dip, dev_info_t *rdip, 1496 peekpoke_ctlops_t *in_args) 1497 { 1498 return (in_args->handle ? px_lib_do_caut_put(dip, rdip, in_args) : 1499 px_lib_do_poke(dip, rdip, in_args)); 1500 } 1501 1502 1503 /*ARGSUSED*/ 1504 static int 1505 px_lib_do_peek(dev_info_t *dip, peekpoke_ctlops_t *in_args) 1506 { 1507 px_t *px_p = DIP_TO_STATE(dip); 1508 px_pec_t *pec_p = px_p->px_pec_p; 1509 int err = DDI_SUCCESS; 1510 on_trap_data_t otd; 1511 1512 mutex_enter(&pec_p->pec_pokefault_mutex); 1513 pec_p->pec_safeacc_type = DDI_FM_ERR_PEEK; 1514 1515 if (!on_trap(&otd, OT_DATA_ACCESS)) { 1516 uintptr_t tramp = otd.ot_trampoline; 1517 1518 otd.ot_trampoline = (uintptr_t)&peek_fault; 1519 err = do_peek(in_args->size, (void *)in_args->dev_addr, 1520 (void *)in_args->host_addr); 1521 otd.ot_trampoline = tramp; 1522 } else 1523 err = DDI_FAILURE; 1524 1525 no_trap(); 1526 pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED; 1527 mutex_exit(&pec_p->pec_pokefault_mutex); 1528 1529 #ifdef DEBUG 1530 if (err == DDI_FAILURE) 1531 px_peekfault_cnt++; 1532 #endif 1533 return (err); 1534 } 1535 1536 1537 static int 1538 px_lib_do_caut_get(dev_info_t *dip, peekpoke_ctlops_t *cautacc_ctlops_arg) 1539 { 1540 size_t size = cautacc_ctlops_arg->size; 1541 uintptr_t dev_addr = cautacc_ctlops_arg->dev_addr; 1542 uintptr_t host_addr = cautacc_ctlops_arg->host_addr; 1543 ddi_acc_impl_t *hp = (ddi_acc_impl_t *)cautacc_ctlops_arg->handle; 1544 size_t repcount = cautacc_ctlops_arg->repcount; 1545 uint_t flags = cautacc_ctlops_arg->flags; 1546 1547 px_t *px_p = DIP_TO_STATE(dip); 1548 px_pec_t *pec_p = px_p->px_pec_p; 1549 int err = DDI_SUCCESS; 1550 1551 /* 1552 * Note that i_ndi_busop_access_enter ends up grabbing the pokefault 1553 * mutex. 1554 */ 1555 i_ndi_busop_access_enter(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp); 1556 1557 pec_p->pec_ontrap_data = (on_trap_data_t *)hp->ahi_err->err_ontrap; 1558 pec_p->pec_safeacc_type = DDI_FM_ERR_EXPECTED; 1559 hp->ahi_err->err_expected = DDI_FM_ERR_EXPECTED; 1560 1561 if (repcount == 1) { 1562 if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) { 1563 i_ddi_caut_get(size, (void *)dev_addr, 1564 (void *)host_addr); 1565 } else { 1566 int i; 1567 uint8_t *ff_addr = (uint8_t *)host_addr; 1568 for (i = 0; i < size; i++) 1569 *ff_addr++ = 0xff; 1570 1571 err = DDI_FAILURE; 1572 #ifdef DEBUG 1573 px_peekfault_cnt++; 1574 #endif 1575 } 1576 } else { 1577 if (!i_ddi_ontrap((ddi_acc_handle_t)hp)) { 1578 for (; repcount; repcount--) { 1579 i_ddi_caut_get(size, (void *)dev_addr, 1580 (void *)host_addr); 1581 1582 host_addr += size; 1583 1584 if (flags == DDI_DEV_AUTOINCR) 1585 dev_addr += size; 1586 } 1587 } else { 1588 err = DDI_FAILURE; 1589 #ifdef DEBUG 1590 px_peekfault_cnt++; 1591 #endif 1592 } 1593 } 1594 1595 i_ddi_notrap((ddi_acc_handle_t)hp); 1596 pec_p->pec_ontrap_data = NULL; 1597 pec_p->pec_safeacc_type = DDI_FM_ERR_UNEXPECTED; 1598 i_ndi_busop_access_exit(hp->ahi_common.ah_dip, (ddi_acc_handle_t)hp); 1599 hp->ahi_err->err_expected = DDI_FM_ERR_UNEXPECTED; 1600 1601 return (err); 1602 } 1603 1604 /*ARGSUSED*/ 1605 int 1606 px_lib_ctlops_peek(dev_info_t *dip, dev_info_t *rdip, 1607 peekpoke_ctlops_t *in_args, void *result) 1608 { 1609 result = (void *)in_args->host_addr; 1610 return (in_args->handle ? px_lib_do_caut_get(dip, in_args) : 1611 px_lib_do_peek(dip, in_args)); 1612 } 1613 /* 1614 * implements PPM interface 1615 */ 1616 int 1617 px_lib_pmctl(int cmd, px_t *px_p) 1618 { 1619 ASSERT((cmd & ~PPMREQ_MASK) == PPMREQ); 1620 switch (cmd) { 1621 case PPMREQ_PRE_PWR_OFF: 1622 /* 1623 * Currently there is no device power management for 1624 * the root complex (fire). When there is we need to make 1625 * sure that it is at full power before trying to send the 1626 * PME_Turn_Off message. 1627 */ 1628 DBG(DBG_PWR, px_p->px_dip, 1629 "ioctl: request to send PME_Turn_Off\n"); 1630 return (px_goto_l23ready(px_p)); 1631 1632 case PPMREQ_PRE_PWR_ON: 1633 case PPMREQ_POST_PWR_ON: 1634 /* code to be written for Fire 2.0. return failure for now */ 1635 return (DDI_FAILURE); 1636 1637 default: 1638 return (DDI_FAILURE); 1639 } 1640 } 1641 1642 /* 1643 * sends PME_Turn_Off message to put the link in L2/L3 ready state. 1644 * called by px_ioctl. 1645 * returns DDI_SUCCESS or DDI_FAILURE 1646 * 1. Wait for link to be in L1 state (link status reg) 1647 * 2. write to PME_Turn_off reg to boradcast 1648 * 3. set timeout 1649 * 4. If timeout, return failure. 1650 * 5. If PM_TO_Ack, wait till link is in L2/L3 ready 1651 */ 1652 static int 1653 px_goto_l23ready(px_t *px_p) 1654 { 1655 pcie_pwr_t *pwr_p; 1656 pxu_t *pxu_p = (pxu_t *)px_p->px_plat_p; 1657 caddr_t csr_base = (caddr_t)pxu_p->px_address[PX_REG_CSR]; 1658 int ret = DDI_SUCCESS; 1659 clock_t end, timeleft; 1660 1661 /* If no PM info, return failure */ 1662 if (!PCIE_PMINFO(px_p->px_dip) || 1663 !(pwr_p = PCIE_NEXUS_PMINFO(px_p->px_dip))) 1664 return (DDI_FAILURE); 1665 1666 mutex_enter(&pwr_p->pwr_lock); 1667 mutex_enter(&pwr_p->pwr_intr_lock); 1668 /* Clear the PME_To_ACK receieved flag */ 1669 pwr_p->pwr_flags &= ~PCIE_PMETOACK_RECVD; 1670 if (px_send_pme_turnoff(csr_base) != DDI_SUCCESS) { 1671 ret = DDI_FAILURE; 1672 goto l23ready_done; 1673 } 1674 pwr_p->pwr_flags |= PCIE_PME_TURNOFF_PENDING; 1675 1676 end = ddi_get_lbolt() + drv_usectohz(px_pme_to_ack_timeout); 1677 while (!(pwr_p->pwr_flags & PCIE_PMETOACK_RECVD)) { 1678 timeleft = cv_timedwait(&pwr_p->pwr_cv, 1679 &pwr_p->pwr_intr_lock, end); 1680 /* 1681 * if cv_timedwait returns -1, it is either 1682 * 1) timed out or 1683 * 2) there was a pre-mature wakeup but by the time 1684 * cv_timedwait is called again end < lbolt i.e. 1685 * end is in the past. 1686 * 3) By the time we make first cv_timedwait call, 1687 * end < lbolt is true. 1688 */ 1689 if (timeleft == -1) 1690 break; 1691 } 1692 if (!(pwr_p->pwr_flags & PCIE_PMETOACK_RECVD)) { 1693 /* 1694 * Either timedout or interrupt didn't get a 1695 * chance to grab the mutex and set the flag. 1696 * release the mutex and delay for sometime. 1697 * This will 1) give a chance for interrupt to 1698 * set the flag 2) creates a delay between two 1699 * consequetive requests. 1700 */ 1701 mutex_exit(&pwr_p->pwr_intr_lock); 1702 delay(5); 1703 mutex_enter(&pwr_p->pwr_intr_lock); 1704 if (!(pwr_p->pwr_flags & PCIE_PMETOACK_RECVD)) { 1705 ret = DDI_FAILURE; 1706 DBG(DBG_PWR, px_p->px_dip, " Timed out while waiting" 1707 " for PME_TO_ACK\n"); 1708 } 1709 } 1710 /* PME_To_ACK receieved */ 1711 pwr_p->pwr_flags &= ~(PCIE_PME_TURNOFF_PENDING | PCIE_PMETOACK_RECVD); 1712 1713 /* TBD: wait till link is in L2/L3 ready (link status reg) */ 1714 1715 l23ready_done: 1716 mutex_exit(&pwr_p->pwr_intr_lock); 1717 mutex_exit(&pwr_p->pwr_lock); 1718 return (ret); 1719 } 1720 1721 1722 /* 1723 * Extract the drivers binding name to identify which chip we're binding to. 1724 * Whenever a new bus bridge is created, the driver alias entry should be 1725 * added here to identify the device if needed. If a device isn't added, 1726 * the identity defaults to PX_CHIP_UNIDENTIFIED. 1727 */ 1728 static uint32_t 1729 px_identity_chip(px_t *px_p) 1730 { 1731 dev_info_t *dip = px_p->px_dip; 1732 char *name = ddi_binding_name(dip); 1733 uint32_t revision = 0; 1734 1735 revision = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1736 "module-revision#", 0); 1737 1738 /* Check for Fire driver binding name */ 1739 if (strcmp(name, "pci108e,80f0") == 0) { 1740 DBG(DBG_ATTACH, dip, "px_identity_chip: %s%d: " 1741 "name %s module-revision %d\n", ddi_driver_name(dip), 1742 ddi_get_instance(dip), name, revision); 1743 1744 return (PX_CHIP_ID(PX_CHIP_FIRE, revision, 0x00)); 1745 } 1746 1747 DBG(DBG_ATTACH, dip, "%s%d: Unknown PCI Express Host bridge %s %x\n", 1748 ddi_driver_name(dip), ddi_get_instance(dip), name, revision); 1749 1750 return (PX_CHIP_UNIDENTIFIED); 1751 } 1752 1753 int 1754 px_err_add_intr(px_fault_t *px_fault_p) 1755 { 1756 dev_info_t *dip = px_fault_p->px_fh_dip; 1757 px_t *px_p = DIP_TO_STATE(dip); 1758 1759 VERIFY(add_ivintr(px_fault_p->px_fh_sysino, PX_ERR_PIL, 1760 px_fault_p->px_err_func, (caddr_t)px_fault_p, NULL) == 0); 1761 1762 px_ib_intr_enable(px_p, intr_dist_cpuid(), px_fault_p->px_intr_ino); 1763 1764 return (DDI_SUCCESS); 1765 } 1766 1767 void 1768 px_err_rem_intr(px_fault_t *px_fault_p) 1769 { 1770 dev_info_t *dip = px_fault_p->px_fh_dip; 1771 px_t *px_p = DIP_TO_STATE(dip); 1772 1773 rem_ivintr(px_fault_p->px_fh_sysino, NULL); 1774 1775 px_ib_intr_disable(px_p->px_ib_p, px_fault_p->px_intr_ino, 1776 IB_INTR_WAIT); 1777 } 1778 1779 #ifdef FMA 1780 void 1781 px_fill_rc_status(px_fault_t *px_fault_p, pciex_rc_error_regs_t *rc_status) 1782 { 1783 /* populate the rc_status by reading the registers - TBD */ 1784 } 1785 #endif /* FMA */ 1786