1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <hxge_impl.h> 29 30 lb_property_t lb_normal = {normal, "normal", hxge_lb_normal}; 31 lb_property_t lb_mac10g = {internal, "mac10g", hxge_lb_mac10g}; 32 33 uint32_t hxge_lb_dbg = 1; 34 35 extern uint32_t hxge_jumbo_mtu; 36 extern boolean_t hxge_jumbo_enable; 37 38 static void hxge_rtrace_ioctl(p_hxge_t, queue_t *, mblk_t *, struct iocblk *); 39 40 void 41 hxge_global_reset(p_hxge_t hxgep) 42 { 43 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_global_reset")); 44 45 (void) hxge_intr_hw_disable(hxgep); 46 47 if (hxgep->suspended) 48 (void) hxge_link_init(hxgep); 49 50 (void) hxge_vmac_init(hxgep); 51 52 (void) hxge_intr_hw_enable(hxgep); 53 54 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_global_reset")); 55 } 56 57 58 void 59 hxge_hw_id_init(p_hxge_t hxgep) 60 { 61 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_hw_id_init")); 62 63 /* 64 * Set up initial hardware parameters required such as mac mtu size. 65 */ 66 hxgep->vmac.is_jumbo = B_FALSE; 67 /* 1518 + 4 + 16 */ 68 hxgep->vmac.maxframesize = STD_FRAME_SIZE + TX_PKT_HEADER_SIZE; 69 if (hxgep->param_arr[param_accept_jumbo].value || hxge_jumbo_enable) { 70 hxgep->vmac.maxframesize = (uint16_t)hxge_jumbo_mtu; 71 hxgep->vmac.is_jumbo = B_TRUE; 72 } 73 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_hw_id_init: maxframesize %d", 74 hxgep->vmac.maxframesize)); 75 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_hw_id_init")); 76 } 77 78 void 79 hxge_hw_init_niu_common(p_hxge_t hxgep) 80 { 81 p_hxge_hw_list_t hw_p; 82 83 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_hw_init_niu_common")); 84 85 if ((hw_p = hxgep->hxge_hw_p) == NULL) { 86 return; 87 } 88 89 MUTEX_ENTER(&hw_p->hxge_cfg_lock); 90 if (hw_p->flags & COMMON_INIT_DONE) { 91 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "hxge_hw_init_niu_common" 92 " already done for dip $%p exiting", hw_p->parent_devp)); 93 MUTEX_EXIT(&hw_p->hxge_cfg_lock); 94 return; 95 } 96 97 hw_p->flags = COMMON_INIT_START; 98 HXGE_DEBUG_MSG((hxgep, MOD_CTL, 99 "hxge_hw_init_niu_common Started for device id %x", 100 hw_p->parent_devp)); 101 102 (void) hxge_pfc_hw_reset(hxgep); 103 hw_p->flags = COMMON_INIT_DONE; 104 MUTEX_EXIT(&hw_p->hxge_cfg_lock); 105 106 HXGE_DEBUG_MSG((hxgep, MOD_CTL, 107 "hxge_hw_init_niu_common Done for device id %x", 108 hw_p->parent_devp)); 109 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_hw_init_niu_common")); 110 } 111 112 uint_t 113 hxge_intr(caddr_t arg1, caddr_t arg2) 114 { 115 p_hxge_ldv_t ldvp = (p_hxge_ldv_t)arg1; 116 p_hxge_t hxgep = (p_hxge_t)arg2; 117 uint_t serviced = DDI_INTR_UNCLAIMED; 118 uint8_t ldv; 119 hpi_handle_t handle; 120 p_hxge_ldgv_t ldgvp; 121 p_hxge_ldg_t ldgp, t_ldgp; 122 p_hxge_ldv_t t_ldvp; 123 uint32_t vector0 = 0, vector1 = 0; 124 int i, j, nldvs, nintrs = 1; 125 hpi_status_t rs = HPI_SUCCESS; 126 127 /* 128 * DDI interface returns second arg as NULL 129 */ 130 if ((arg2 == NULL) || ((void *) ldvp->hxgep != arg2)) { 131 hxgep = ldvp->hxgep; 132 } 133 134 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intr")); 135 136 if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) { 137 HXGE_ERROR_MSG((hxgep, INT_CTL, 138 "<== hxge_intr: not initialized 0x%x", serviced)); 139 return (serviced); 140 } 141 142 ldgvp = hxgep->ldgvp; 143 144 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intr: ldgvp $%p", ldgvp)); 145 146 if (ldvp == NULL && ldgvp) { 147 t_ldvp = ldvp = ldgvp->ldvp; 148 } 149 150 if (ldvp) { 151 ldgp = t_ldgp = ldvp->ldgp; 152 } 153 154 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intr: " 155 "ldgvp $%p ldvp $%p ldgp $%p", ldgvp, ldvp, ldgp)); 156 157 if (ldgvp == NULL || ldvp == NULL || ldgp == NULL) { 158 HXGE_ERROR_MSG((hxgep, INT_CTL, "==> hxge_intr: " 159 "ldgvp $%p ldvp $%p ldgp $%p", ldgvp, ldvp, ldgp)); 160 HXGE_ERROR_MSG((hxgep, INT_CTL, "<== hxge_intr: not ready")); 161 return (DDI_INTR_UNCLAIMED); 162 } 163 164 /* 165 * This interrupt handler will have to go through 166 * all the logical devices to find out which 167 * logical device interrupts us and then call 168 * its handler to process the events. 169 */ 170 handle = HXGE_DEV_HPI_HANDLE(hxgep); 171 t_ldgp = ldgp; 172 t_ldvp = ldgp->ldvp; 173 174 nldvs = ldgp->nldvs; 175 176 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intr: #ldvs %d #intrs %d", 177 nldvs, ldgvp->ldg_intrs)); 178 179 serviced = DDI_INTR_CLAIMED; 180 for (i = 0; i < nintrs; i++, t_ldgp++) { 181 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intr(%d): #ldvs %d " 182 " #intrs %d", i, nldvs, nintrs)); 183 184 /* Get this group's flag bits. */ 185 t_ldgp->interrupted = B_FALSE; 186 rs = hpi_ldsv_ldfs_get(handle, t_ldgp->ldg, &vector0, &vector1); 187 if (rs) { 188 continue; 189 } 190 191 if (!vector0 && !vector1) { 192 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intr: " 193 "no interrupts on group %d", t_ldgp->ldg)); 194 continue; 195 } 196 197 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intr: " 198 "vector0 0x%llx vector1 0x%llx", vector0, vector1)); 199 200 t_ldgp->interrupted = B_TRUE; 201 nldvs = t_ldgp->nldvs; 202 203 for (j = 0; j < nldvs; j++, t_ldvp++) { 204 /* 205 * Call device's handler if flag bits are on. 206 */ 207 ldv = t_ldvp->ldv; 208 if ((LDV_ON(ldv, vector0) | (LDV_ON(ldv, vector1)))) { 209 HXGE_DEBUG_MSG((hxgep, INT_CTL, 210 "==> hxge_intr: calling device %d" 211 " #ldvs %d #intrs %d", j, nldvs, nintrs)); 212 (void) (t_ldvp->ldv_intr_handler)( 213 (caddr_t)t_ldvp, arg2); 214 } 215 } 216 } 217 218 t_ldgp = ldgp; 219 for (i = 0; i < nintrs; i++, t_ldgp++) { 220 /* rearm group interrupts */ 221 if (t_ldgp->interrupted) { 222 HXGE_DEBUG_MSG((hxgep, INT_CTL, 223 "==> hxge_intr: arm group %d", t_ldgp->ldg)); 224 (void) hpi_intr_ldg_mgmt_set(handle, t_ldgp->ldg, 225 t_ldgp->arm, t_ldgp->ldg_timer); 226 } 227 } 228 229 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intr: serviced 0x%x", 230 serviced)); 231 return (serviced); 232 } 233 234 hxge_status_t 235 hxge_peu_handle_sys_errors(p_hxge_t hxgep) 236 { 237 hpi_handle_t handle; 238 p_hxge_peu_sys_stats_t statsp; 239 peu_intr_stat_t stat; 240 241 handle = hxgep->hpi_handle; 242 statsp = (p_hxge_peu_sys_stats_t)&hxgep->statsp->peu_sys_stats; 243 244 HXGE_REG_RD64(handle, PEU_INTR_STAT, &stat.value); 245 246 /* 247 * The PCIE errors are unrecoverrable and cannot be cleared. 248 * The only thing we can do here is to mask them off to prevent 249 * continued interrupts. 250 */ 251 HXGE_REG_WR64(handle, PEU_INTR_MASK, 0xffffffff); 252 253 if (stat.bits.spc_acc_err) { 254 statsp->spc_acc_err++; 255 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 256 "==> hxge_peu_handle_sys_errors: spc_acc_err")); 257 } 258 259 if (stat.bits.tdc_pioacc_err) { 260 statsp->tdc_pioacc_err++; 261 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 262 "==> hxge_peu_handle_sys_errors: tdc_pioacc_err")); 263 } 264 265 if (stat.bits.rdc_pioacc_err) { 266 statsp->rdc_pioacc_err++; 267 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 268 "==> hxge_peu_handle_sys_errors: rdc_pioacc_err")); 269 } 270 271 if (stat.bits.pfc_pioacc_err) { 272 statsp->pfc_pioacc_err++; 273 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 274 "==> hxge_peu_handle_sys_errors: pfc_pioacc_err")); 275 } 276 277 if (stat.bits.vmac_pioacc_err) { 278 statsp->vmac_pioacc_err++; 279 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 280 "==> hxge_peu_handle_sys_errors: vmac_pioacc_err")); 281 } 282 283 if (stat.bits.cpl_hdrq_parerr) { 284 statsp->cpl_hdrq_parerr++; 285 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 286 "==> hxge_peu_handle_sys_errors: cpl_hdrq_parerr")); 287 } 288 289 if (stat.bits.cpl_dataq_parerr) { 290 statsp->cpl_dataq_parerr++; 291 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 292 "==> hxge_peu_handle_sys_errors: cpl_dataq_parerr")); 293 } 294 295 if (stat.bits.retryram_xdlh_parerr) { 296 statsp->retryram_xdlh_parerr++; 297 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 298 "==> hxge_peu_handle_sys_errors: retryram_xdlh_parerr")); 299 } 300 301 if (stat.bits.retrysotram_xdlh_parerr) { 302 statsp->retrysotram_xdlh_parerr++; 303 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 304 "==> hxge_peu_handle_sys_errors: retrysotram_xdlh_parerr")); 305 } 306 307 if (stat.bits.p_hdrq_parerr) { 308 statsp->p_hdrq_parerr++; 309 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 310 "==> hxge_peu_handle_sys_errors: p_hdrq_parerr")); 311 } 312 313 if (stat.bits.p_dataq_parerr) { 314 statsp->p_dataq_parerr++; 315 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 316 "==> hxge_peu_handle_sys_errors: p_dataq_parerr")); 317 } 318 319 if (stat.bits.np_hdrq_parerr) { 320 statsp->np_hdrq_parerr++; 321 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 322 "==> hxge_peu_handle_sys_errors: np_hdrq_parerr")); 323 } 324 325 if (stat.bits.np_dataq_parerr) { 326 statsp->np_dataq_parerr++; 327 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 328 "==> hxge_peu_handle_sys_errors: np_dataq_parerr")); 329 } 330 331 if (stat.bits.eic_msix_parerr) { 332 statsp->eic_msix_parerr++; 333 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 334 "==> hxge_peu_handle_sys_errors: eic_msix_parerr")); 335 } 336 337 if (stat.bits.hcr_parerr) { 338 statsp->hcr_parerr++; 339 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 340 "==> hxge_peu_handle_sys_errors: hcr_parerr")); 341 } 342 343 return (HXGE_OK); 344 } 345 346 /*ARGSUSED*/ 347 uint_t 348 hxge_syserr_intr(caddr_t arg1, caddr_t arg2) 349 { 350 p_hxge_ldv_t ldvp = (p_hxge_ldv_t)arg1; 351 p_hxge_t hxgep = (p_hxge_t)arg2; 352 p_hxge_ldg_t ldgp = NULL; 353 hpi_handle_t handle; 354 dev_err_stat_t estat; 355 uint_t serviced = DDI_INTR_UNCLAIMED; 356 357 if ((arg1 == NULL) && (arg2 == NULL)) { 358 return (serviced); 359 } 360 361 if ((arg2 == NULL) || 362 ((ldvp != NULL) && ((void *)ldvp->hxgep != arg2))) { 363 if (ldvp != NULL) { 364 hxgep = ldvp->hxgep; 365 } 366 } 367 368 HXGE_DEBUG_MSG((hxgep, SYSERR_CTL, 369 "==> hxge_syserr_intr: arg2 $%p arg1 $%p", hxgep, ldvp)); 370 371 if (ldvp != NULL && ldvp->use_timer == B_FALSE) { 372 ldgp = ldvp->ldgp; 373 if (ldgp == NULL) { 374 HXGE_ERROR_MSG((hxgep, SYSERR_CTL, 375 "<== hxge_syserrintr(no logical group): " 376 "arg2 $%p arg1 $%p", hxgep, ldvp)); 377 return (DDI_INTR_UNCLAIMED); 378 } 379 /* 380 * Get the logical device state if the function uses interrupt. 381 */ 382 } 383 384 /* This interrupt handler is for system error interrupts. */ 385 handle = HXGE_DEV_HPI_HANDLE(hxgep); 386 estat.value = 0; 387 (void) hpi_fzc_sys_err_stat_get(handle, &estat); 388 HXGE_DEBUG_MSG((hxgep, SYSERR_CTL, 389 "==> hxge_syserr_intr: device error 0x%016llx", estat.value)); 390 391 if (estat.bits.tdc_err0 || estat.bits.tdc_err1) { 392 /* TDMC */ 393 (void) hxge_txdma_handle_sys_errors(hxgep); 394 } else if (estat.bits.rdc_err0 || estat.bits.rdc_err1) { 395 /* RDMC */ 396 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 397 "==> hxge_syserr_intr: device error - RDMC")); 398 (void) hxge_rxdma_handle_sys_errors(hxgep); 399 } else if (estat.bits.vnm_pio_err1 || estat.bits.peu_err1) { 400 /* PCI-E */ 401 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 402 "==> hxge_syserr_intr: device error - PCI-E")); 403 404 /* kstats are updated here */ 405 (void) hxge_peu_handle_sys_errors(hxgep); 406 407 if (estat.bits.peu_err1) 408 HXGE_FM_REPORT_ERROR(hxgep, NULL, 409 HXGE_FM_EREPORT_PEU_ERR); 410 411 if (estat.bits.vnm_pio_err1) 412 HXGE_FM_REPORT_ERROR(hxgep, NULL, 413 HXGE_FM_EREPORT_PEU_VNM_PIO_ERR); 414 } else if (estat.value != 0) { 415 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 416 "==> hxge_syserr_intr: device error - unknown")); 417 } 418 419 serviced = DDI_INTR_CLAIMED; 420 421 if ((ldgp != NULL) && (ldvp != NULL) && 422 (ldgp->nldvs == 1) && !ldvp->use_timer) { 423 (void) hpi_intr_ldg_mgmt_set(handle, ldgp->ldg, 424 B_TRUE, ldgp->ldg_timer); 425 } 426 427 HXGE_DEBUG_MSG((hxgep, SYSERR_CTL, "<== hxge_syserr_intr")); 428 return (serviced); 429 } 430 431 void 432 hxge_intr_hw_enable(p_hxge_t hxgep) 433 { 434 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intr_hw_enable")); 435 436 (void) hxge_intr_mask_mgmt_set(hxgep, B_TRUE); 437 438 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intr_hw_enable")); 439 } 440 441 void 442 hxge_intr_hw_disable(p_hxge_t hxgep) 443 { 444 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intr_hw_disable")); 445 446 (void) hxge_intr_mask_mgmt_set(hxgep, B_FALSE); 447 448 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intr_hw_disable")); 449 } 450 451 void 452 hxge_rx_hw_blank(void *arg, time_t ticks, uint_t count) 453 { 454 p_hxge_t hxgep = (p_hxge_t)arg; 455 456 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_rx_hw_blank")); 457 458 /* 459 * Replace current ticks and counts for later 460 * processing by the receive packet interrupt routines. 461 */ 462 hxgep->intr_timeout = (uint16_t)ticks; 463 hxgep->intr_threshold = (uint16_t)count; 464 465 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_rx_hw_blank")); 466 } 467 468 void 469 hxge_hw_stop(p_hxge_t hxgep) 470 { 471 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_hw_stop")); 472 473 (void) hxge_tx_vmac_disable(hxgep); 474 (void) hxge_rx_vmac_disable(hxgep); 475 (void) hxge_txdma_hw_mode(hxgep, HXGE_DMA_STOP); 476 (void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_STOP); 477 478 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_hw_stop")); 479 } 480 481 void 482 hxge_hw_ioctl(p_hxge_t hxgep, queue_t *wq, mblk_t *mp, struct iocblk *iocp) 483 { 484 int cmd; 485 486 HXGE_DEBUG_MSG((hxgep, IOC_CTL, "==> hxge_hw_ioctl")); 487 488 if (hxgep == NULL) { 489 miocnak(wq, mp, 0, EINVAL); 490 return; 491 } 492 493 iocp->ioc_error = 0; 494 cmd = iocp->ioc_cmd; 495 496 switch (cmd) { 497 default: 498 miocnak(wq, mp, 0, EINVAL); 499 return; 500 501 case HXGE_GET64: 502 hxge_get64(hxgep, mp->b_cont); 503 miocack(wq, mp, sizeof (uint32_t), 0); 504 break; 505 506 case HXGE_PUT64: 507 hxge_put64(hxgep, mp->b_cont); 508 miocack(wq, mp, 0, 0); 509 break; 510 511 case HXGE_PUT_TCAM: 512 hxge_put_tcam(hxgep, mp->b_cont); 513 miocack(wq, mp, 0, 0); 514 break; 515 516 case HXGE_GET_TCAM: 517 hxge_get_tcam(hxgep, mp->b_cont); 518 miocack(wq, mp, 0, 0); 519 break; 520 521 case HXGE_RTRACE: 522 hxge_rtrace_ioctl(hxgep, wq, mp, iocp); 523 break; 524 } 525 } 526 527 /* 528 * 10G is the only loopback mode for Hydra. 529 */ 530 void 531 hxge_loopback_ioctl(p_hxge_t hxgep, queue_t *wq, mblk_t *mp, 532 struct iocblk *iocp) 533 { 534 p_lb_property_t lb_props; 535 size_t size; 536 int i; 537 538 if (mp->b_cont == NULL) { 539 miocnak(wq, mp, 0, EINVAL); 540 } 541 542 switch (iocp->ioc_cmd) { 543 case LB_GET_MODE: 544 HXGE_DEBUG_MSG((hxgep, IOC_CTL, "HXGE_GET_LB_MODE command")); 545 if (hxgep != NULL) { 546 *(lb_info_sz_t *)mp->b_cont->b_rptr = 547 hxgep->statsp->port_stats.lb_mode; 548 miocack(wq, mp, sizeof (hxge_lb_t), 0); 549 } else 550 miocnak(wq, mp, 0, EINVAL); 551 break; 552 553 case LB_SET_MODE: 554 HXGE_DEBUG_MSG((hxgep, IOC_CTL, "HXGE_SET_LB_MODE command")); 555 if (iocp->ioc_count != sizeof (uint32_t)) { 556 miocack(wq, mp, 0, 0); 557 break; 558 } 559 if ((hxgep != NULL) && hxge_set_lb(hxgep, wq, mp->b_cont)) { 560 miocack(wq, mp, 0, 0); 561 } else { 562 miocnak(wq, mp, 0, EPROTO); 563 } 564 break; 565 566 case LB_GET_INFO_SIZE: 567 HXGE_DEBUG_MSG((hxgep, IOC_CTL, "LB_GET_INFO_SIZE command")); 568 if (hxgep != NULL) { 569 size = sizeof (lb_normal) + sizeof (lb_mac10g); 570 571 *(lb_info_sz_t *)mp->b_cont->b_rptr = size; 572 573 HXGE_DEBUG_MSG((hxgep, IOC_CTL, 574 "HXGE_GET_LB_INFO command: size %d", size)); 575 miocack(wq, mp, sizeof (lb_info_sz_t), 0); 576 } else 577 miocnak(wq, mp, 0, EINVAL); 578 break; 579 580 case LB_GET_INFO: 581 HXGE_DEBUG_MSG((hxgep, IOC_CTL, "HXGE_GET_LB_INFO command")); 582 if (hxgep != NULL) { 583 size = sizeof (lb_normal) + sizeof (lb_mac10g); 584 HXGE_DEBUG_MSG((hxgep, IOC_CTL, 585 "HXGE_GET_LB_INFO command: size %d", size)); 586 if (size == iocp->ioc_count) { 587 i = 0; 588 lb_props = (p_lb_property_t)mp->b_cont->b_rptr; 589 lb_props[i++] = lb_normal; 590 lb_props[i++] = lb_mac10g; 591 592 miocack(wq, mp, size, 0); 593 } else 594 miocnak(wq, mp, 0, EINVAL); 595 } else { 596 miocnak(wq, mp, 0, EINVAL); 597 cmn_err(CE_NOTE, "hxge_hw_ioctl: invalid command 0x%x", 598 iocp->ioc_cmd); 599 } 600 601 break; 602 } 603 } 604 605 /*ARGSUSED*/ 606 boolean_t 607 hxge_set_lb(p_hxge_t hxgep, queue_t *wq, p_mblk_t mp) 608 { 609 boolean_t status = B_TRUE; 610 uint32_t lb_mode; 611 lb_property_t *lb_info; 612 613 HXGE_DEBUG_MSG((hxgep, IOC_CTL, "<== hxge_set_lb")); 614 lb_mode = hxgep->statsp->port_stats.lb_mode; 615 if (lb_mode == *(uint32_t *)mp->b_rptr) { 616 cmn_err(CE_NOTE, 617 "hxge%d: Loopback mode already set (lb_mode %d).\n", 618 hxgep->instance, lb_mode); 619 status = B_FALSE; 620 goto hxge_set_lb_exit; 621 } 622 623 lb_mode = *(uint32_t *)mp->b_rptr; 624 lb_info = NULL; 625 626 /* 10G is the only loopback mode for Hydra */ 627 if (lb_mode == lb_normal.value) 628 lb_info = &lb_normal; 629 else if (lb_mode == lb_mac10g.value) 630 lb_info = &lb_mac10g; 631 else { 632 cmn_err(CE_NOTE, 633 "hxge%d: Loopback mode not supported(mode %d).\n", 634 hxgep->instance, lb_mode); 635 status = B_FALSE; 636 goto hxge_set_lb_exit; 637 } 638 639 if (lb_mode == hxge_lb_normal) { 640 if (hxge_lb_dbg) { 641 cmn_err(CE_NOTE, 642 "!hxge%d: Returning to normal operation", 643 hxgep->instance); 644 } 645 646 hxgep->statsp->port_stats.lb_mode = hxge_lb_normal; 647 hxge_global_reset(hxgep); 648 649 goto hxge_set_lb_exit; 650 } 651 652 hxgep->statsp->port_stats.lb_mode = lb_mode; 653 654 if (hxge_lb_dbg) 655 cmn_err(CE_NOTE, "!hxge%d: Adapter now in %s loopback mode", 656 hxgep->instance, lb_info->key); 657 658 if (lb_info->lb_type == internal) { 659 if ((hxgep->statsp->port_stats.lb_mode == hxge_lb_mac10g)) 660 hxgep->statsp->mac_stats.link_speed = 10000; 661 else { 662 cmn_err(CE_NOTE, 663 "hxge%d: Loopback mode not supported(mode %d).\n", 664 hxgep->instance, lb_mode); 665 status = B_FALSE; 666 goto hxge_set_lb_exit; 667 } 668 hxgep->statsp->mac_stats.link_duplex = 2; 669 hxgep->statsp->mac_stats.link_up = 1; 670 } 671 672 hxge_global_reset(hxgep); 673 674 hxge_set_lb_exit: 675 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 676 "<== hxge_set_lb status = 0x%08x", status)); 677 678 return (status); 679 } 680 681 void 682 hxge_check_hw_state(p_hxge_t hxgep) 683 { 684 p_hxge_ldgv_t ldgvp; 685 p_hxge_ldv_t t_ldvp; 686 687 HXGE_DEBUG_MSG((hxgep, SYSERR_CTL, "==> hxge_check_hw_state")); 688 689 MUTEX_ENTER(hxgep->genlock); 690 691 hxgep->hxge_timerid = 0; 692 if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) { 693 goto hxge_check_hw_state_exit; 694 } 695 696 hxge_check_tx_hang(hxgep); 697 698 ldgvp = hxgep->ldgvp; 699 if (ldgvp == NULL || (ldgvp->ldvp_syserr == NULL)) { 700 HXGE_ERROR_MSG((hxgep, SYSERR_CTL, "<== hxge_check_hw_state: " 701 "NULL ldgvp (interrupt not ready).")); 702 goto hxge_check_hw_state_exit; 703 } 704 705 t_ldvp = ldgvp->ldvp_syserr; 706 if (!t_ldvp->use_timer) { 707 HXGE_DEBUG_MSG((hxgep, SYSERR_CTL, "<== hxge_check_hw_state: " 708 "ldgvp $%p t_ldvp $%p use_timer flag %d", 709 ldgvp, t_ldvp, t_ldvp->use_timer)); 710 goto hxge_check_hw_state_exit; 711 } 712 713 if (fm_check_acc_handle(hxgep->dev_regs->hxge_regh) != DDI_FM_OK) { 714 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 715 "Bad register acc handle")); 716 } 717 718 (void) hxge_syserr_intr((caddr_t)t_ldvp, (caddr_t)hxgep); 719 720 hxgep->hxge_timerid = hxge_start_timer(hxgep, hxge_check_hw_state, 721 HXGE_CHECK_TIMER); 722 723 hxge_check_hw_state_exit: 724 MUTEX_EXIT(hxgep->genlock); 725 726 HXGE_DEBUG_MSG((hxgep, SYSERR_CTL, "<== hxge_check_hw_state")); 727 } 728 729 /*ARGSUSED*/ 730 static void 731 hxge_rtrace_ioctl(p_hxge_t hxgep, queue_t *wq, mblk_t *mp, 732 struct iocblk *iocp) 733 { 734 ssize_t size; 735 rtrace_t *rtp; 736 mblk_t *nmp; 737 uint32_t i, j; 738 uint32_t start_blk; 739 uint32_t base_entry; 740 uint32_t num_entries; 741 742 HXGE_DEBUG_MSG((hxgep, STR_CTL, "==> hxge_rtrace_ioctl")); 743 744 size = 1024; 745 if (mp->b_cont == NULL || MBLKL(mp->b_cont) < size) { 746 HXGE_DEBUG_MSG((hxgep, STR_CTL, 747 "malformed M_IOCTL MBLKL = %d size = %d", 748 MBLKL(mp->b_cont), size)); 749 miocnak(wq, mp, 0, EINVAL); 750 return; 751 } 752 753 nmp = mp->b_cont; 754 rtp = (rtrace_t *)nmp->b_rptr; 755 start_blk = rtp->next_idx; 756 num_entries = rtp->last_idx; 757 base_entry = start_blk * MAX_RTRACE_IOC_ENTRIES; 758 759 HXGE_DEBUG_MSG((hxgep, STR_CTL, "start_blk = %d\n", start_blk)); 760 HXGE_DEBUG_MSG((hxgep, STR_CTL, "num_entries = %d\n", num_entries)); 761 HXGE_DEBUG_MSG((hxgep, STR_CTL, "base_entry = %d\n", base_entry)); 762 763 rtp->next_idx = hpi_rtracebuf.next_idx; 764 rtp->last_idx = hpi_rtracebuf.last_idx; 765 rtp->wrapped = hpi_rtracebuf.wrapped; 766 for (i = 0, j = base_entry; i < num_entries; i++, j++) { 767 rtp->buf[i].ctl_addr = hpi_rtracebuf.buf[j].ctl_addr; 768 rtp->buf[i].val_l32 = hpi_rtracebuf.buf[j].val_l32; 769 rtp->buf[i].val_h32 = hpi_rtracebuf.buf[j].val_h32; 770 } 771 772 nmp->b_wptr = nmp->b_rptr + size; 773 HXGE_DEBUG_MSG((hxgep, STR_CTL, "<== hxge_rtrace_ioctl")); 774 miocack(wq, mp, (int)size, 0); 775 } 776