1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <hxge_impl.h> 27 28 lb_property_t lb_normal = {normal, "normal", hxge_lb_normal}; 29 lb_property_t lb_mac10g = {internal, "mac10g", hxge_lb_mac10g}; 30 31 uint32_t hxge_lb_dbg = 1; 32 33 extern uint32_t hxge_jumbo_frame_size; 34 35 static void hxge_rtrace_ioctl(p_hxge_t, queue_t *, mblk_t *, struct iocblk *); 36 37 void 38 hxge_global_reset(p_hxge_t hxgep) 39 { 40 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_global_reset")); 41 42 (void) hxge_intr_hw_disable(hxgep); 43 44 if (hxgep->suspended) 45 (void) hxge_link_init(hxgep); 46 47 (void) hxge_vmac_init(hxgep); 48 49 (void) hxge_intr_hw_enable(hxgep); 50 51 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_global_reset")); 52 } 53 54 55 void 56 hxge_hw_id_init(p_hxge_t hxgep) 57 { 58 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_hw_id_init")); 59 60 /* 61 * Initialize the frame size to either standard "1500 + 38" or 62 * jumbo. The user may tune the frame size through the "mtu" parameter 63 * using "dladm set-linkprop" 64 */ 65 hxgep->vmac.minframesize = MIN_FRAME_SIZE; 66 hxgep->vmac.maxframesize = HXGE_DEFAULT_MTU + MTU_TO_FRAME_SIZE; 67 if (hxgep->param_arr[param_accept_jumbo].value) 68 hxgep->vmac.maxframesize = (uint16_t)hxge_jumbo_frame_size; 69 70 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_hw_id_init: maxframesize %d", 71 hxgep->vmac.maxframesize)); 72 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_hw_id_init")); 73 } 74 75 void 76 hxge_hw_init_niu_common(p_hxge_t hxgep) 77 { 78 p_hxge_hw_list_t hw_p; 79 80 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_hw_init_niu_common")); 81 82 if ((hw_p = hxgep->hxge_hw_p) == NULL) { 83 return; 84 } 85 86 MUTEX_ENTER(&hw_p->hxge_cfg_lock); 87 if (hw_p->flags & COMMON_INIT_DONE) { 88 HXGE_DEBUG_MSG((hxgep, MOD_CTL, "hxge_hw_init_niu_common" 89 " already done for dip $%p exiting", hw_p->parent_devp)); 90 MUTEX_EXIT(&hw_p->hxge_cfg_lock); 91 return; 92 } 93 94 hw_p->flags = COMMON_INIT_START; 95 HXGE_DEBUG_MSG((hxgep, MOD_CTL, 96 "hxge_hw_init_niu_common Started for device id %x", 97 hw_p->parent_devp)); 98 99 (void) hxge_pfc_hw_reset(hxgep); 100 hw_p->flags = COMMON_INIT_DONE; 101 MUTEX_EXIT(&hw_p->hxge_cfg_lock); 102 103 HXGE_DEBUG_MSG((hxgep, MOD_CTL, 104 "hxge_hw_init_niu_common Done for device id %x", 105 hw_p->parent_devp)); 106 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_hw_init_niu_common")); 107 } 108 109 uint_t 110 hxge_intr(caddr_t arg1, caddr_t arg2) 111 { 112 p_hxge_ldv_t ldvp = (p_hxge_ldv_t)arg1; 113 p_hxge_t hxgep = (p_hxge_t)arg2; 114 uint_t serviced = DDI_INTR_UNCLAIMED; 115 uint8_t ldv; 116 hpi_handle_t handle; 117 p_hxge_ldgv_t ldgvp; 118 p_hxge_ldg_t ldgp, t_ldgp; 119 p_hxge_ldv_t t_ldvp; 120 uint32_t vector0 = 0, vector1 = 0; 121 int i, j, nldvs, nintrs = 1; 122 hpi_status_t rs = HPI_SUCCESS; 123 124 /* 125 * DDI interface returns second arg as NULL 126 */ 127 if ((arg2 == NULL) || ((void *) ldvp->hxgep != arg2)) { 128 hxgep = ldvp->hxgep; 129 } 130 131 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intr")); 132 133 if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) { 134 HXGE_ERROR_MSG((hxgep, INT_CTL, 135 "<== hxge_intr: not initialized 0x%x", serviced)); 136 return (serviced); 137 } 138 139 ldgvp = hxgep->ldgvp; 140 141 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intr: ldgvp $%p", ldgvp)); 142 143 if (ldvp == NULL && ldgvp) { 144 t_ldvp = ldvp = ldgvp->ldvp; 145 } 146 147 if (ldvp) { 148 ldgp = t_ldgp = ldvp->ldgp; 149 } 150 151 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intr: " 152 "ldgvp $%p ldvp $%p ldgp $%p", ldgvp, ldvp, ldgp)); 153 154 if (ldgvp == NULL || ldvp == NULL || ldgp == NULL) { 155 HXGE_ERROR_MSG((hxgep, INT_CTL, "==> hxge_intr: " 156 "ldgvp $%p ldvp $%p ldgp $%p", ldgvp, ldvp, ldgp)); 157 HXGE_ERROR_MSG((hxgep, INT_CTL, "<== hxge_intr: not ready")); 158 return (DDI_INTR_UNCLAIMED); 159 } 160 161 /* 162 * This interrupt handler will have to go through 163 * all the logical devices to find out which 164 * logical device interrupts us and then call 165 * its handler to process the events. 166 */ 167 handle = HXGE_DEV_HPI_HANDLE(hxgep); 168 t_ldgp = ldgp; 169 t_ldvp = ldgp->ldvp; 170 171 nldvs = ldgp->nldvs; 172 173 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intr: #ldvs %d #intrs %d", 174 nldvs, ldgvp->ldg_intrs)); 175 176 serviced = DDI_INTR_CLAIMED; 177 for (i = 0; i < nintrs; i++, t_ldgp++) { 178 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intr(%d): #ldvs %d " 179 " #intrs %d", i, nldvs, nintrs)); 180 181 /* Get this group's flag bits. */ 182 t_ldgp->interrupted = B_FALSE; 183 rs = hpi_ldsv_ldfs_get(handle, t_ldgp->ldg, &vector0, &vector1); 184 if (rs) { 185 continue; 186 } 187 188 if (!vector0 && !vector1) { 189 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intr: " 190 "no interrupts on group %d", t_ldgp->ldg)); 191 continue; 192 } 193 194 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intr: " 195 "vector0 0x%llx vector1 0x%llx", vector0, vector1)); 196 197 t_ldgp->interrupted = B_TRUE; 198 nldvs = t_ldgp->nldvs; 199 200 for (j = 0; j < nldvs; j++, t_ldvp++) { 201 /* 202 * Call device's handler if flag bits are on. 203 */ 204 ldv = t_ldvp->ldv; 205 if ((LDV_ON(ldv, vector0) | (LDV_ON(ldv, vector1)))) { 206 HXGE_DEBUG_MSG((hxgep, INT_CTL, 207 "==> hxge_intr: calling device %d" 208 " #ldvs %d #intrs %d", j, nldvs, nintrs)); 209 (void) (t_ldvp->ldv_intr_handler)( 210 (caddr_t)t_ldvp, arg2); 211 } 212 } 213 } 214 215 t_ldgp = ldgp; 216 for (i = 0; i < nintrs; i++, t_ldgp++) { 217 /* rearm group interrupts */ 218 if (t_ldgp->interrupted) { 219 HXGE_DEBUG_MSG((hxgep, INT_CTL, 220 "==> hxge_intr: arm group %d", t_ldgp->ldg)); 221 (void) hpi_intr_ldg_mgmt_set(handle, t_ldgp->ldg, 222 t_ldgp->arm, t_ldgp->ldg_timer); 223 } 224 } 225 226 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intr: serviced 0x%x", 227 serviced)); 228 return (serviced); 229 } 230 231 hxge_status_t 232 hxge_peu_handle_sys_errors(p_hxge_t hxgep) 233 { 234 hpi_handle_t handle; 235 p_hxge_peu_sys_stats_t statsp; 236 peu_intr_stat_t stat; 237 238 handle = hxgep->hpi_handle; 239 statsp = (p_hxge_peu_sys_stats_t)&hxgep->statsp->peu_sys_stats; 240 241 HXGE_REG_RD32(handle, PEU_INTR_STAT, &stat.value); 242 243 /* 244 * The PCIE errors are unrecoverrable and cannot be cleared. 245 * The only thing we can do here is to mask them off to prevent 246 * continued interrupts. 247 */ 248 HXGE_REG_WR32(handle, PEU_INTR_MASK, 0xffffffff); 249 250 if (stat.bits.spc_acc_err) { 251 statsp->spc_acc_err++; 252 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 253 "==> hxge_peu_handle_sys_errors: spc_acc_err")); 254 } 255 256 if (stat.bits.tdc_pioacc_err) { 257 statsp->tdc_pioacc_err++; 258 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 259 "==> hxge_peu_handle_sys_errors: tdc_pioacc_err")); 260 } 261 262 if (stat.bits.rdc_pioacc_err) { 263 statsp->rdc_pioacc_err++; 264 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 265 "==> hxge_peu_handle_sys_errors: rdc_pioacc_err")); 266 } 267 268 if (stat.bits.pfc_pioacc_err) { 269 statsp->pfc_pioacc_err++; 270 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 271 "==> hxge_peu_handle_sys_errors: pfc_pioacc_err")); 272 } 273 274 if (stat.bits.vmac_pioacc_err) { 275 statsp->vmac_pioacc_err++; 276 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 277 "==> hxge_peu_handle_sys_errors: vmac_pioacc_err")); 278 } 279 280 if (stat.bits.cpl_hdrq_parerr) { 281 statsp->cpl_hdrq_parerr++; 282 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 283 "==> hxge_peu_handle_sys_errors: cpl_hdrq_parerr")); 284 } 285 286 if (stat.bits.cpl_dataq_parerr) { 287 statsp->cpl_dataq_parerr++; 288 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 289 "==> hxge_peu_handle_sys_errors: cpl_dataq_parerr")); 290 } 291 292 if (stat.bits.retryram_xdlh_parerr) { 293 statsp->retryram_xdlh_parerr++; 294 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 295 "==> hxge_peu_handle_sys_errors: retryram_xdlh_parerr")); 296 } 297 298 if (stat.bits.retrysotram_xdlh_parerr) { 299 statsp->retrysotram_xdlh_parerr++; 300 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 301 "==> hxge_peu_handle_sys_errors: retrysotram_xdlh_parerr")); 302 } 303 304 if (stat.bits.p_hdrq_parerr) { 305 statsp->p_hdrq_parerr++; 306 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 307 "==> hxge_peu_handle_sys_errors: p_hdrq_parerr")); 308 } 309 310 if (stat.bits.p_dataq_parerr) { 311 statsp->p_dataq_parerr++; 312 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 313 "==> hxge_peu_handle_sys_errors: p_dataq_parerr")); 314 } 315 316 if (stat.bits.np_hdrq_parerr) { 317 statsp->np_hdrq_parerr++; 318 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 319 "==> hxge_peu_handle_sys_errors: np_hdrq_parerr")); 320 } 321 322 if (stat.bits.np_dataq_parerr) { 323 statsp->np_dataq_parerr++; 324 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 325 "==> hxge_peu_handle_sys_errors: np_dataq_parerr")); 326 } 327 328 if (stat.bits.eic_msix_parerr) { 329 statsp->eic_msix_parerr++; 330 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 331 "==> hxge_peu_handle_sys_errors: eic_msix_parerr")); 332 } 333 334 if (stat.bits.hcr_parerr) { 335 statsp->hcr_parerr++; 336 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 337 "==> hxge_peu_handle_sys_errors: hcr_parerr")); 338 } 339 340 HXGE_FM_REPORT_ERROR(hxgep, NULL, HXGE_FM_EREPORT_PEU_ERR); 341 342 return (HXGE_OK); 343 } 344 345 /*ARGSUSED*/ 346 uint_t 347 hxge_syserr_intr(caddr_t arg1, caddr_t arg2) 348 { 349 p_hxge_ldv_t ldvp = (p_hxge_ldv_t)arg1; 350 p_hxge_t hxgep = (p_hxge_t)arg2; 351 p_hxge_ldg_t ldgp = NULL; 352 hpi_handle_t handle; 353 dev_err_stat_t estat; 354 uint_t serviced = DDI_INTR_UNCLAIMED; 355 356 if ((arg1 == NULL) && (arg2 == NULL)) { 357 return (serviced); 358 } 359 360 if ((arg2 == NULL) || 361 ((ldvp != NULL) && ((void *)ldvp->hxgep != arg2))) { 362 if (ldvp != NULL) { 363 hxgep = ldvp->hxgep; 364 } 365 } 366 367 HXGE_DEBUG_MSG((hxgep, SYSERR_CTL, 368 "==> hxge_syserr_intr: arg2 $%p arg1 $%p", hxgep, ldvp)); 369 370 if (ldvp != NULL && ldvp->use_timer == B_FALSE) { 371 ldgp = ldvp->ldgp; 372 if (ldgp == NULL) { 373 HXGE_ERROR_MSG((hxgep, SYSERR_CTL, 374 "<== hxge_syserrintr(no logical group): " 375 "arg2 $%p arg1 $%p", hxgep, ldvp)); 376 return (DDI_INTR_UNCLAIMED); 377 } 378 /* 379 * Get the logical device state if the function uses interrupt. 380 */ 381 } 382 383 /* This interrupt handler is for system error interrupts. */ 384 handle = HXGE_DEV_HPI_HANDLE(hxgep); 385 estat.value = 0; 386 (void) hpi_fzc_sys_err_stat_get(handle, &estat); 387 HXGE_DEBUG_MSG((hxgep, SYSERR_CTL, 388 "==> hxge_syserr_intr: device error 0x%016llx", estat.value)); 389 390 if (estat.bits.tdc_err0 || estat.bits.tdc_err1) { 391 /* TDMC */ 392 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 393 "==> hxge_syserr_intr: device error - TDMC")); 394 (void) hxge_txdma_handle_sys_errors(hxgep); 395 } else if (estat.bits.rdc_err0 || estat.bits.rdc_err1) { 396 /* RDMC */ 397 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 398 "==> hxge_syserr_intr: device error - RDMC")); 399 (void) hxge_rxdma_handle_sys_errors(hxgep); 400 } else if (estat.bits.vnm_pio_err1 || estat.bits.peu_err1) { 401 /* PCI-E */ 402 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 403 "==> hxge_syserr_intr: device error - PCI-E")); 404 405 /* kstats are updated here */ 406 (void) hxge_peu_handle_sys_errors(hxgep); 407 408 if (estat.bits.peu_err1) 409 HXGE_FM_REPORT_ERROR(hxgep, NULL, 410 HXGE_FM_EREPORT_PEU_ERR); 411 412 if (estat.bits.vnm_pio_err1) 413 HXGE_FM_REPORT_ERROR(hxgep, NULL, 414 HXGE_FM_EREPORT_PEU_VNM_PIO_ERR); 415 } else if (estat.value != 0) { 416 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 417 "==> hxge_syserr_intr: device error - unknown")); 418 } 419 420 serviced = DDI_INTR_CLAIMED; 421 422 if ((ldgp != NULL) && (ldvp != NULL) && 423 (ldgp->nldvs == 1) && !ldvp->use_timer) { 424 (void) hpi_intr_ldg_mgmt_set(handle, ldgp->ldg, 425 B_TRUE, ldgp->ldg_timer); 426 } 427 428 HXGE_DEBUG_MSG((hxgep, SYSERR_CTL, "<== hxge_syserr_intr")); 429 return (serviced); 430 } 431 432 void 433 hxge_intr_hw_enable(p_hxge_t hxgep) 434 { 435 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intr_hw_enable")); 436 437 (void) hxge_intr_mask_mgmt_set(hxgep, B_TRUE); 438 439 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intr_hw_enable")); 440 } 441 442 void 443 hxge_intr_hw_disable(p_hxge_t hxgep) 444 { 445 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_intr_hw_disable")); 446 447 (void) hxge_intr_mask_mgmt_set(hxgep, B_FALSE); 448 449 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_intr_hw_disable")); 450 } 451 452 void 453 hxge_rx_hw_blank(void *arg, time_t ticks, uint_t count) 454 { 455 p_hxge_t hxgep = (p_hxge_t)arg; 456 457 HXGE_DEBUG_MSG((hxgep, INT_CTL, "==> hxge_rx_hw_blank")); 458 459 /* 460 * Replace current ticks and counts for later 461 * processing by the receive packet interrupt routines. 462 */ 463 hxgep->intr_timeout = (uint16_t)ticks; 464 hxgep->intr_threshold = (uint16_t)count; 465 466 HXGE_DEBUG_MSG((hxgep, INT_CTL, "<== hxge_rx_hw_blank")); 467 } 468 469 void 470 hxge_hw_stop(p_hxge_t hxgep) 471 { 472 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "==> hxge_hw_stop")); 473 474 (void) hxge_tx_vmac_disable(hxgep); 475 (void) hxge_rx_vmac_disable(hxgep); 476 (void) hxge_txdma_hw_mode(hxgep, HXGE_DMA_STOP); 477 (void) hxge_rxdma_hw_mode(hxgep, HXGE_DMA_STOP); 478 479 HXGE_DEBUG_MSG((hxgep, DDI_CTL, "<== hxge_hw_stop")); 480 } 481 482 void 483 hxge_hw_ioctl(p_hxge_t hxgep, queue_t *wq, mblk_t *mp, struct iocblk *iocp) 484 { 485 int cmd; 486 487 HXGE_DEBUG_MSG((hxgep, IOC_CTL, "==> hxge_hw_ioctl")); 488 489 if (hxgep == NULL) { 490 miocnak(wq, mp, 0, EINVAL); 491 return; 492 } 493 494 iocp->ioc_error = 0; 495 cmd = iocp->ioc_cmd; 496 497 switch (cmd) { 498 default: 499 miocnak(wq, mp, 0, EINVAL); 500 return; 501 502 case HXGE_GET64: 503 hxge_get64(hxgep, mp->b_cont); 504 miocack(wq, mp, sizeof (uint32_t), 0); 505 break; 506 507 case HXGE_PUT64: 508 hxge_put64(hxgep, mp->b_cont); 509 miocack(wq, mp, 0, 0); 510 break; 511 512 case HXGE_PUT_TCAM: 513 hxge_put_tcam(hxgep, mp->b_cont); 514 miocack(wq, mp, 0, 0); 515 break; 516 517 case HXGE_GET_TCAM: 518 hxge_get_tcam(hxgep, mp->b_cont); 519 miocack(wq, mp, 0, 0); 520 break; 521 522 case HXGE_RTRACE: 523 hxge_rtrace_ioctl(hxgep, wq, mp, iocp); 524 break; 525 } 526 } 527 528 /* 529 * 10G is the only loopback mode for Hydra. 530 */ 531 void 532 hxge_loopback_ioctl(p_hxge_t hxgep, queue_t *wq, mblk_t *mp, 533 struct iocblk *iocp) 534 { 535 p_lb_property_t lb_props; 536 size_t size; 537 int i; 538 539 if (mp->b_cont == NULL) { 540 miocnak(wq, mp, 0, EINVAL); 541 } 542 543 switch (iocp->ioc_cmd) { 544 case LB_GET_MODE: 545 HXGE_DEBUG_MSG((hxgep, IOC_CTL, "HXGE_GET_LB_MODE command")); 546 if (hxgep != NULL) { 547 *(lb_info_sz_t *)mp->b_cont->b_rptr = 548 hxgep->statsp->port_stats.lb_mode; 549 miocack(wq, mp, sizeof (hxge_lb_t), 0); 550 } else 551 miocnak(wq, mp, 0, EINVAL); 552 break; 553 554 case LB_SET_MODE: 555 HXGE_DEBUG_MSG((hxgep, IOC_CTL, "HXGE_SET_LB_MODE command")); 556 if (iocp->ioc_count != sizeof (uint32_t)) { 557 miocack(wq, mp, 0, 0); 558 break; 559 } 560 if ((hxgep != NULL) && hxge_set_lb(hxgep, wq, mp->b_cont)) { 561 miocack(wq, mp, 0, 0); 562 } else { 563 miocnak(wq, mp, 0, EPROTO); 564 } 565 break; 566 567 case LB_GET_INFO_SIZE: 568 HXGE_DEBUG_MSG((hxgep, IOC_CTL, "LB_GET_INFO_SIZE command")); 569 if (hxgep != NULL) { 570 size = sizeof (lb_normal) + sizeof (lb_mac10g); 571 572 *(lb_info_sz_t *)mp->b_cont->b_rptr = size; 573 574 HXGE_DEBUG_MSG((hxgep, IOC_CTL, 575 "HXGE_GET_LB_INFO command: size %d", size)); 576 miocack(wq, mp, sizeof (lb_info_sz_t), 0); 577 } else 578 miocnak(wq, mp, 0, EINVAL); 579 break; 580 581 case LB_GET_INFO: 582 HXGE_DEBUG_MSG((hxgep, IOC_CTL, "HXGE_GET_LB_INFO command")); 583 if (hxgep != NULL) { 584 size = sizeof (lb_normal) + sizeof (lb_mac10g); 585 HXGE_DEBUG_MSG((hxgep, IOC_CTL, 586 "HXGE_GET_LB_INFO command: size %d", size)); 587 if (size == iocp->ioc_count) { 588 i = 0; 589 lb_props = (p_lb_property_t)mp->b_cont->b_rptr; 590 lb_props[i++] = lb_normal; 591 lb_props[i++] = lb_mac10g; 592 593 miocack(wq, mp, size, 0); 594 } else 595 miocnak(wq, mp, 0, EINVAL); 596 } else { 597 miocnak(wq, mp, 0, EINVAL); 598 cmn_err(CE_NOTE, "hxge_hw_ioctl: invalid command 0x%x", 599 iocp->ioc_cmd); 600 } 601 602 break; 603 } 604 } 605 606 /*ARGSUSED*/ 607 boolean_t 608 hxge_set_lb(p_hxge_t hxgep, queue_t *wq, p_mblk_t mp) 609 { 610 boolean_t status = B_TRUE; 611 uint32_t lb_mode; 612 lb_property_t *lb_info; 613 614 HXGE_DEBUG_MSG((hxgep, IOC_CTL, "<== hxge_set_lb")); 615 lb_mode = hxgep->statsp->port_stats.lb_mode; 616 if (lb_mode == *(uint32_t *)mp->b_rptr) { 617 cmn_err(CE_NOTE, 618 "hxge%d: Loopback mode already set (lb_mode %d).\n", 619 hxgep->instance, lb_mode); 620 status = B_FALSE; 621 goto hxge_set_lb_exit; 622 } 623 624 lb_mode = *(uint32_t *)mp->b_rptr; 625 lb_info = NULL; 626 627 /* 10G is the only loopback mode for Hydra */ 628 if (lb_mode == lb_normal.value) 629 lb_info = &lb_normal; 630 else if (lb_mode == lb_mac10g.value) 631 lb_info = &lb_mac10g; 632 else { 633 cmn_err(CE_NOTE, 634 "hxge%d: Loopback mode not supported(mode %d).\n", 635 hxgep->instance, lb_mode); 636 status = B_FALSE; 637 goto hxge_set_lb_exit; 638 } 639 640 if (lb_mode == hxge_lb_normal) { 641 if (hxge_lb_dbg) { 642 cmn_err(CE_NOTE, 643 "!hxge%d: Returning to normal operation", 644 hxgep->instance); 645 } 646 647 hxgep->statsp->port_stats.lb_mode = hxge_lb_normal; 648 hxge_global_reset(hxgep); 649 650 goto hxge_set_lb_exit; 651 } 652 653 hxgep->statsp->port_stats.lb_mode = lb_mode; 654 655 if (hxge_lb_dbg) 656 cmn_err(CE_NOTE, "!hxge%d: Adapter now in %s loopback mode", 657 hxgep->instance, lb_info->key); 658 659 if (lb_info->lb_type == internal) { 660 if ((hxgep->statsp->port_stats.lb_mode == hxge_lb_mac10g)) 661 hxgep->statsp->mac_stats.link_speed = 10000; 662 else { 663 cmn_err(CE_NOTE, 664 "hxge%d: Loopback mode not supported(mode %d).\n", 665 hxgep->instance, lb_mode); 666 status = B_FALSE; 667 goto hxge_set_lb_exit; 668 } 669 hxgep->statsp->mac_stats.link_duplex = 2; 670 hxgep->statsp->mac_stats.link_up = 1; 671 } 672 673 hxge_global_reset(hxgep); 674 675 hxge_set_lb_exit: 676 HXGE_DEBUG_MSG((hxgep, DDI_CTL, 677 "<== hxge_set_lb status = 0x%08x", status)); 678 679 return (status); 680 } 681 682 void 683 hxge_check_hw_state(p_hxge_t hxgep) 684 { 685 p_hxge_ldgv_t ldgvp; 686 p_hxge_ldv_t t_ldvp; 687 688 HXGE_DEBUG_MSG((hxgep, SYSERR_CTL, "==> hxge_check_hw_state")); 689 690 MUTEX_ENTER(hxgep->genlock); 691 692 hxgep->hxge_timerid = 0; 693 if (!(hxgep->drv_state & STATE_HW_INITIALIZED)) { 694 goto hxge_check_hw_state_exit; 695 } 696 697 hxge_check_tx_hang(hxgep); 698 699 ldgvp = hxgep->ldgvp; 700 if (ldgvp == NULL || (ldgvp->ldvp_syserr == NULL)) { 701 HXGE_ERROR_MSG((hxgep, SYSERR_CTL, "<== hxge_check_hw_state: " 702 "NULL ldgvp (interrupt not ready).")); 703 goto hxge_check_hw_state_exit; 704 } 705 706 t_ldvp = ldgvp->ldvp_syserr; 707 if (!t_ldvp->use_timer) { 708 HXGE_DEBUG_MSG((hxgep, SYSERR_CTL, "<== hxge_check_hw_state: " 709 "ldgvp $%p t_ldvp $%p use_timer flag %d", 710 ldgvp, t_ldvp, t_ldvp->use_timer)); 711 goto hxge_check_hw_state_exit; 712 } 713 714 if (fm_check_acc_handle(hxgep->dev_regs->hxge_regh) != DDI_FM_OK) { 715 HXGE_ERROR_MSG((hxgep, HXGE_ERR_CTL, 716 "Bad register acc handle")); 717 } 718 719 (void) hxge_syserr_intr((caddr_t)t_ldvp, (caddr_t)hxgep); 720 721 hxgep->hxge_timerid = hxge_start_timer(hxgep, hxge_check_hw_state, 722 HXGE_CHECK_TIMER); 723 724 hxge_check_hw_state_exit: 725 MUTEX_EXIT(hxgep->genlock); 726 727 HXGE_DEBUG_MSG((hxgep, SYSERR_CTL, "<== hxge_check_hw_state")); 728 } 729 730 /*ARGSUSED*/ 731 static void 732 hxge_rtrace_ioctl(p_hxge_t hxgep, queue_t *wq, mblk_t *mp, 733 struct iocblk *iocp) 734 { 735 ssize_t size; 736 rtrace_t *rtp; 737 mblk_t *nmp; 738 uint32_t i, j; 739 uint32_t start_blk; 740 uint32_t base_entry; 741 uint32_t num_entries; 742 743 HXGE_DEBUG_MSG((hxgep, STR_CTL, "==> hxge_rtrace_ioctl")); 744 745 size = 1024; 746 if (mp->b_cont == NULL || MBLKL(mp->b_cont) < size) { 747 HXGE_DEBUG_MSG((hxgep, STR_CTL, 748 "malformed M_IOCTL MBLKL = %d size = %d", 749 MBLKL(mp->b_cont), size)); 750 miocnak(wq, mp, 0, EINVAL); 751 return; 752 } 753 754 nmp = mp->b_cont; 755 rtp = (rtrace_t *)nmp->b_rptr; 756 start_blk = rtp->next_idx; 757 num_entries = rtp->last_idx; 758 base_entry = start_blk * MAX_RTRACE_IOC_ENTRIES; 759 760 HXGE_DEBUG_MSG((hxgep, STR_CTL, "start_blk = %d\n", start_blk)); 761 HXGE_DEBUG_MSG((hxgep, STR_CTL, "num_entries = %d\n", num_entries)); 762 HXGE_DEBUG_MSG((hxgep, STR_CTL, "base_entry = %d\n", base_entry)); 763 764 rtp->next_idx = hpi_rtracebuf.next_idx; 765 rtp->last_idx = hpi_rtracebuf.last_idx; 766 rtp->wrapped = hpi_rtracebuf.wrapped; 767 for (i = 0, j = base_entry; i < num_entries; i++, j++) { 768 rtp->buf[i].ctl_addr = hpi_rtracebuf.buf[j].ctl_addr; 769 rtp->buf[i].val_l32 = hpi_rtracebuf.buf[j].val_l32; 770 rtp->buf[i].val_h32 = hpi_rtracebuf.buf[j].val_h32; 771 } 772 773 nmp->b_wptr = nmp->b_rptr + size; 774 HXGE_DEBUG_MSG((hxgep, STR_CTL, "<== hxge_rtrace_ioctl")); 775 miocack(wq, mp, (int)size, 0); 776 } 777