1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/nxge/nxge_impl.h> 29 30 /* 31 * Tunable Receive Completion Ring Configuration B parameters. 32 */ 33 uint16_t nxge_rx_pkt_thres; /* 16 bits */ 34 uint8_t nxge_rx_pkt_timeout; /* 6 bits based on DMA clock divider */ 35 36 lb_property_t lb_normal = {normal, "normal", nxge_lb_normal}; 37 lb_property_t lb_external10g = {external, "external10g", nxge_lb_ext10g}; 38 lb_property_t lb_external1000 = {external, "external1000", nxge_lb_ext1000}; 39 lb_property_t lb_external100 = {external, "external100", nxge_lb_ext100}; 40 lb_property_t lb_external10 = {external, "external10", nxge_lb_ext10}; 41 lb_property_t lb_phy10g = {internal, "phy10g", nxge_lb_phy10g}; 42 lb_property_t lb_phy1000 = {internal, "phy1000", nxge_lb_phy1000}; 43 lb_property_t lb_phy = {internal, "phy", nxge_lb_phy}; 44 lb_property_t lb_serdes10g = {internal, "serdes10g", nxge_lb_serdes10g}; 45 lb_property_t lb_serdes1000 = {internal, "serdes", nxge_lb_serdes1000}; 46 lb_property_t lb_mac10g = {internal, "mac10g", nxge_lb_mac10g}; 47 lb_property_t lb_mac1000 = {internal, "mac1000", nxge_lb_mac1000}; 48 lb_property_t lb_mac = {internal, "mac10/100", nxge_lb_mac}; 49 50 uint32_t nxge_lb_dbg = 1; 51 void nxge_get_mii(p_nxge_t nxgep, p_mblk_t mp); 52 void nxge_put_mii(p_nxge_t nxgep, p_mblk_t mp); 53 54 extern uint32_t nxge_rx_mode; 55 extern uint32_t nxge_jumbo_mtu; 56 extern boolean_t nxge_jumbo_enable; 57 58 static void 59 nxge_rtrace_ioctl(p_nxge_t, queue_t *, mblk_t *, struct iocblk *); 60 61 /* ARGSUSED */ 62 void 63 nxge_global_reset(p_nxge_t nxgep) 64 { 65 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_global_reset")); 66 67 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 68 (void) nxge_intr_hw_disable(nxgep); 69 70 if ((nxgep->suspended) || 71 ((nxgep->statsp->port_stats.lb_mode == 72 nxge_lb_phy1000) || 73 (nxgep->statsp->port_stats.lb_mode == 74 nxge_lb_phy10g) || 75 (nxgep->statsp->port_stats.lb_mode == 76 nxge_lb_serdes1000) || 77 (nxgep->statsp->port_stats.lb_mode == 78 nxge_lb_serdes10g))) { 79 (void) nxge_link_init(nxgep); 80 } 81 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 82 (void) nxge_mac_init(nxgep); 83 (void) nxge_intr_hw_enable(nxgep); 84 85 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_global_reset")); 86 } 87 88 /* ARGSUSED */ 89 void 90 nxge_hw_id_init(p_nxge_t nxgep) 91 { 92 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_id_init")); 93 /* 94 * Set up initial hardware parameters required such as mac mtu size. 95 */ 96 nxgep->mac.is_jumbo = B_FALSE; 97 nxgep->mac.maxframesize = NXGE_MTU_DEFAULT_MAX; /* 1522 */ 98 if (nxgep->param_arr[param_accept_jumbo].value || nxge_jumbo_enable) { 99 nxgep->mac.maxframesize = (uint16_t)nxge_jumbo_mtu; 100 nxgep->mac.is_jumbo = B_TRUE; 101 } 102 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 103 "==> nxge_hw_id_init: maxframesize %d", 104 nxgep->mac.maxframesize)); 105 106 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_id_init")); 107 } 108 109 /* ARGSUSED */ 110 void 111 nxge_hw_init_niu_common(p_nxge_t nxgep) 112 { 113 p_nxge_hw_list_t hw_p; 114 115 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_init_niu_common")); 116 117 if ((hw_p = nxgep->nxge_hw_p) == NULL) { 118 return; 119 } 120 MUTEX_ENTER(&hw_p->nxge_cfg_lock); 121 if (hw_p->flags & COMMON_INIT_DONE) { 122 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 123 "nxge_hw_init_niu_common" 124 " already done for dip $%p function %d exiting", 125 hw_p->parent_devp, nxgep->function_num)); 126 MUTEX_EXIT(&hw_p->nxge_cfg_lock); 127 return; 128 } 129 130 hw_p->flags = COMMON_INIT_START; 131 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "nxge_hw_init_niu_common" 132 " Started for device id %x with function %d", 133 hw_p->parent_devp, nxgep->function_num)); 134 135 /* per neptune common block init */ 136 (void) nxge_fflp_hw_reset(nxgep); 137 138 hw_p->flags = COMMON_INIT_DONE; 139 MUTEX_EXIT(&hw_p->nxge_cfg_lock); 140 141 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "nxge_hw_init_niu_common" 142 " Done for device id %x with function %d", 143 hw_p->parent_devp, nxgep->function_num)); 144 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_init_niu_common")); 145 } 146 147 /* ARGSUSED */ 148 uint_t 149 nxge_intr(void *arg1, void *arg2) 150 { 151 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1; 152 p_nxge_t nxgep = (p_nxge_t)arg2; 153 uint_t serviced = DDI_INTR_UNCLAIMED; 154 uint8_t ldv; 155 npi_handle_t handle; 156 p_nxge_ldgv_t ldgvp; 157 p_nxge_ldg_t ldgp, t_ldgp; 158 p_nxge_ldv_t t_ldvp; 159 uint64_t vector0 = 0, vector1 = 0, vector2 = 0; 160 int i, j, nldvs, nintrs = 1; 161 npi_status_t rs = NPI_SUCCESS; 162 163 /* DDI interface returns second arg as NULL (n2 niumx driver) !!! */ 164 if (arg2 == NULL || (void *) ldvp->nxgep != arg2) { 165 nxgep = ldvp->nxgep; 166 } 167 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr")); 168 169 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 170 NXGE_ERROR_MSG((nxgep, INT_CTL, 171 "<== nxge_intr: not initialized 0x%x", serviced)); 172 return (serviced); 173 } 174 175 ldgvp = nxgep->ldgvp; 176 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr: ldgvp $%p", ldgvp)); 177 if (ldvp == NULL && ldgvp) { 178 t_ldvp = ldvp = ldgvp->ldvp; 179 } 180 if (ldvp) { 181 ldgp = t_ldgp = ldvp->ldgp; 182 } 183 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr: " 184 "ldgvp $%p ldvp $%p ldgp $%p", ldgvp, ldvp, ldgp)); 185 if (ldgvp == NULL || ldvp == NULL || ldgp == NULL) { 186 NXGE_ERROR_MSG((nxgep, INT_CTL, "==> nxge_intr: " 187 "ldgvp $%p ldvp $%p ldgp $%p", ldgvp, ldvp, ldgp)); 188 NXGE_ERROR_MSG((nxgep, INT_CTL, "<== nxge_intr: not ready")); 189 return (DDI_INTR_UNCLAIMED); 190 } 191 /* 192 * This interrupt handler will have to go through all the logical 193 * devices to find out which logical device interrupts us and then call 194 * its handler to process the events. 195 */ 196 handle = NXGE_DEV_NPI_HANDLE(nxgep); 197 t_ldgp = ldgp; 198 t_ldvp = ldgp->ldvp; 199 200 nldvs = ldgp->nldvs; 201 202 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr: #ldvs %d #intrs %d", 203 nldvs, ldgvp->ldg_intrs)); 204 205 serviced = DDI_INTR_CLAIMED; 206 for (i = 0; i < nintrs; i++, t_ldgp++) { 207 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr(%d): #ldvs %d " 208 " #intrs %d", i, nldvs, nintrs)); 209 /* Get this group's flag bits. */ 210 t_ldgp->interrupted = B_FALSE; 211 rs = npi_ldsv_ldfs_get(handle, t_ldgp->ldg, 212 &vector0, &vector1, &vector2); 213 if (rs) { 214 continue; 215 } 216 if (!vector0 && !vector1 && !vector2) { 217 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr: " 218 "no interrupts on group %d", t_ldgp->ldg)); 219 continue; 220 } 221 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr: " 222 "vector0 0x%llx vector1 0x%llx vector2 0x%llx", 223 vector0, vector1, vector2)); 224 t_ldgp->interrupted = B_TRUE; 225 nldvs = t_ldgp->nldvs; 226 for (j = 0; j < nldvs; j++, t_ldvp++) { 227 /* 228 * Call device's handler if flag bits are on. 229 */ 230 ldv = t_ldvp->ldv; 231 if (((ldv < NXGE_MAC_LD_START) && 232 (LDV_ON(ldv, vector0) | 233 (LDV_ON(ldv, vector1)))) || 234 (ldv >= NXGE_MAC_LD_START && 235 ((LDV2_ON_1(ldv, vector2)) || 236 (LDV2_ON_2(ldv, vector2))))) { 237 (void) (t_ldvp->ldv_intr_handler)( 238 (caddr_t)t_ldvp, arg2); 239 NXGE_DEBUG_MSG((nxgep, INT_CTL, 240 "==> nxge_intr: " 241 "calling device %d #ldvs %d #intrs %d", 242 j, nldvs, nintrs)); 243 } 244 } 245 } 246 247 t_ldgp = ldgp; 248 for (i = 0; i < nintrs; i++, t_ldgp++) { 249 /* rearm group interrupts */ 250 if (t_ldgp->interrupted) { 251 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr: arm " 252 "group %d", t_ldgp->ldg)); 253 (void) npi_intr_ldg_mgmt_set(handle, t_ldgp->ldg, 254 t_ldgp->arm, t_ldgp->ldg_timer); 255 } 256 } 257 258 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intr: serviced 0x%x", 259 serviced)); 260 return (serviced); 261 } 262 263 /* ARGSUSED */ 264 uint_t 265 nxge_syserr_intr(void *arg1, void *arg2) 266 { 267 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1; 268 p_nxge_t nxgep = (p_nxge_t)arg2; 269 p_nxge_ldg_t ldgp = NULL; 270 npi_handle_t handle; 271 sys_err_stat_t estat; 272 uint_t serviced = DDI_INTR_UNCLAIMED; 273 274 if (arg1 == NULL && arg2 == NULL) { 275 return (serviced); 276 } 277 if (arg2 == NULL || ((ldvp != NULL && (void *) ldvp->nxgep != arg2))) { 278 if (ldvp != NULL) { 279 nxgep = ldvp->nxgep; 280 } 281 } 282 NXGE_DEBUG_MSG((nxgep, SYSERR_CTL, 283 "==> nxge_syserr_intr: arg2 $%p arg1 $%p", nxgep, ldvp)); 284 if (ldvp != NULL && ldvp->use_timer == B_FALSE) { 285 ldgp = ldvp->ldgp; 286 if (ldgp == NULL) { 287 NXGE_ERROR_MSG((nxgep, SYSERR_CTL, 288 "<== nxge_syserrintr(no logical group): " 289 "arg2 $%p arg1 $%p", nxgep, ldvp)); 290 return (DDI_INTR_UNCLAIMED); 291 } 292 /* 293 * Get the logical device state if the function uses interrupt. 294 */ 295 } 296 297 /* This interrupt handler is for system error interrupts. */ 298 handle = NXGE_DEV_NPI_HANDLE(nxgep); 299 estat.value = 0; 300 (void) npi_fzc_sys_err_stat_get(handle, &estat); 301 NXGE_DEBUG_MSG((nxgep, SYSERR_CTL, 302 "==> nxge_syserr_intr: device error 0x%016llx", estat.value)); 303 304 if (estat.bits.ldw.smx) { 305 /* SMX */ 306 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 307 "==> nxge_syserr_intr: device error - SMX")); 308 } else if (estat.bits.ldw.mac) { 309 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 310 "==> nxge_syserr_intr: device error - MAC")); 311 /* 312 * There is nothing to be done here. All MAC errors go to per 313 * MAC port interrupt. MIF interrupt is the only MAC sub-block 314 * that can generate status here. MIF status reported will be 315 * ignored here. It is checked by per port timer instead. 316 */ 317 } else if (estat.bits.ldw.ipp) { 318 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 319 "==> nxge_syserr_intr: device error - IPP")); 320 (void) nxge_ipp_handle_sys_errors(nxgep); 321 } else if (estat.bits.ldw.zcp) { 322 /* ZCP */ 323 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 324 "==> nxge_syserr_intr: device error - ZCP")); 325 (void) nxge_zcp_handle_sys_errors(nxgep); 326 } else if (estat.bits.ldw.tdmc) { 327 /* TDMC */ 328 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 329 "==> nxge_syserr_intr: device error - TDMC")); 330 /* 331 * There is no TDMC system errors defined in the PRM. All TDMC 332 * channel specific errors are reported on a per channel basis. 333 */ 334 } else if (estat.bits.ldw.rdmc) { 335 /* RDMC */ 336 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 337 "==> nxge_syserr_intr: device error - RDMC")); 338 (void) nxge_rxdma_handle_sys_errors(nxgep); 339 } else if (estat.bits.ldw.txc) { 340 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 341 "==> nxge_syserr_intr: device error - TXC")); 342 (void) nxge_txc_handle_sys_errors(nxgep); 343 } else if ((nxgep->niu_type != N2_NIU) && estat.bits.ldw.peu) { 344 /* PCI-E */ 345 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 346 "==> nxge_syserr_intr: device error - PCI-E")); 347 } else if (estat.bits.ldw.meta1) { 348 /* META1 */ 349 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 350 "==> nxge_syserr_intr: device error - META1")); 351 } else if (estat.bits.ldw.meta2) { 352 /* META2 */ 353 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 354 "==> nxge_syserr_intr: device error - META2")); 355 } else if (estat.bits.ldw.fflp) { 356 /* FFLP */ 357 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 358 "==> nxge_syserr_intr: device error - FFLP")); 359 (void) nxge_fflp_handle_sys_errors(nxgep); 360 } 361 serviced = DDI_INTR_CLAIMED; 362 363 if (ldgp != NULL && ldvp != NULL && ldgp->nldvs == 1 && 364 !ldvp->use_timer) { 365 (void) npi_intr_ldg_mgmt_set(handle, ldgp->ldg, 366 B_TRUE, ldgp->ldg_timer); 367 } 368 NXGE_DEBUG_MSG((nxgep, SYSERR_CTL, "<== nxge_syserr_intr")); 369 return (serviced); 370 } 371 372 /* ARGSUSED */ 373 void 374 nxge_intr_hw_enable(p_nxge_t nxgep) 375 { 376 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr_hw_enable")); 377 (void) nxge_intr_mask_mgmt_set(nxgep, B_TRUE); 378 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intr_hw_enable")); 379 } 380 381 /* ARGSUSED */ 382 void 383 nxge_intr_hw_disable(p_nxge_t nxgep) 384 { 385 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr_hw_disable")); 386 (void) nxge_intr_mask_mgmt_set(nxgep, B_FALSE); 387 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intr_hw_disable")); 388 } 389 390 /* ARGSUSED */ 391 void 392 nxge_rx_hw_blank(void *arg, time_t ticks, uint_t count) 393 { 394 p_nxge_t nxgep = (p_nxge_t)arg; 395 uint8_t channel; 396 npi_handle_t handle; 397 p_nxge_ldgv_t ldgvp; 398 p_nxge_ldv_t ldvp; 399 int i; 400 401 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_rx_hw_blank")); 402 handle = NXGE_DEV_NPI_HANDLE(nxgep); 403 404 if ((ldgvp = nxgep->ldgvp) == NULL) { 405 NXGE_ERROR_MSG((nxgep, INT_CTL, 406 "<== nxge_rx_hw_blank (not enabled)")); 407 return; 408 } 409 ldvp = nxgep->ldgvp->ldvp; 410 if (ldvp == NULL) { 411 return; 412 } 413 for (i = 0; i < ldgvp->nldvs; i++, ldvp++) { 414 if (ldvp->is_rxdma) { 415 channel = ldvp->channel; 416 (void) npi_rxdma_cfg_rdc_rcr_threshold(handle, 417 channel, count); 418 (void) npi_rxdma_cfg_rdc_rcr_timeout(handle, 419 channel, ticks); 420 } 421 } 422 423 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_rx_hw_blank")); 424 } 425 426 /* ARGSUSED */ 427 void 428 nxge_hw_stop(p_nxge_t nxgep) 429 { 430 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_stop")); 431 432 (void) nxge_tx_mac_disable(nxgep); 433 (void) nxge_rx_mac_disable(nxgep); 434 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP); 435 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); 436 437 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_stop")); 438 } 439 440 /* ARGSUSED */ 441 void 442 nxge_hw_ioctl(p_nxge_t nxgep, queue_t *wq, mblk_t *mp, struct iocblk *iocp) 443 { 444 int cmd; 445 446 NXGE_DEBUG_MSG((nxgep, IOC_CTL, "==> nxge_hw_ioctl")); 447 448 if (nxgep == NULL) { 449 miocnak(wq, mp, 0, EINVAL); 450 return; 451 } 452 iocp->ioc_error = 0; 453 cmd = iocp->ioc_cmd; 454 455 switch (cmd) { 456 default: 457 miocnak(wq, mp, 0, EINVAL); 458 return; 459 460 case NXGE_GET_MII: 461 nxge_get_mii(nxgep, mp->b_cont); 462 miocack(wq, mp, sizeof (uint16_t), 0); 463 break; 464 465 case NXGE_PUT_MII: 466 nxge_put_mii(nxgep, mp->b_cont); 467 miocack(wq, mp, 0, 0); 468 break; 469 470 case NXGE_GET64: 471 nxge_get64(nxgep, mp->b_cont); 472 miocack(wq, mp, sizeof (uint32_t), 0); 473 break; 474 475 case NXGE_PUT64: 476 nxge_put64(nxgep, mp->b_cont); 477 miocack(wq, mp, 0, 0); 478 break; 479 480 case NXGE_PUT_TCAM: 481 nxge_put_tcam(nxgep, mp->b_cont); 482 miocack(wq, mp, 0, 0); 483 break; 484 485 case NXGE_GET_TCAM: 486 nxge_get_tcam(nxgep, mp->b_cont); 487 miocack(wq, mp, 0, 0); 488 break; 489 490 case NXGE_TX_REGS_DUMP: 491 nxge_txdma_regs_dump_channels(nxgep); 492 miocack(wq, mp, 0, 0); 493 break; 494 case NXGE_RX_REGS_DUMP: 495 nxge_rxdma_regs_dump_channels(nxgep); 496 miocack(wq, mp, 0, 0); 497 break; 498 case NXGE_VIR_INT_REGS_DUMP: 499 case NXGE_INT_REGS_DUMP: 500 nxge_virint_regs_dump(nxgep); 501 miocack(wq, mp, 0, 0); 502 break; 503 case NXGE_RTRACE: 504 nxge_rtrace_ioctl(nxgep, wq, mp, iocp); 505 break; 506 } 507 } 508 509 /* ARGSUSED */ 510 void 511 nxge_loopback_ioctl(p_nxge_t nxgep, queue_t *wq, mblk_t *mp, 512 struct iocblk *iocp) 513 { 514 p_lb_property_t lb_props; 515 516 size_t size; 517 int i; 518 519 if (mp->b_cont == NULL) { 520 miocnak(wq, mp, 0, EINVAL); 521 } 522 switch (iocp->ioc_cmd) { 523 case LB_GET_MODE: 524 NXGE_DEBUG_MSG((nxgep, IOC_CTL, "NXGE_GET_LB_MODE command")); 525 if (nxgep != NULL) { 526 *(lb_info_sz_t *)mp->b_cont->b_rptr = 527 nxgep->statsp->port_stats.lb_mode; 528 miocack(wq, mp, sizeof (nxge_lb_t), 0); 529 } else 530 miocnak(wq, mp, 0, EINVAL); 531 break; 532 case LB_SET_MODE: 533 NXGE_DEBUG_MSG((nxgep, IOC_CTL, "NXGE_SET_LB_MODE command")); 534 if (iocp->ioc_count != sizeof (uint32_t)) { 535 miocack(wq, mp, 0, 0); 536 break; 537 } 538 if ((nxgep != NULL) && nxge_set_lb(nxgep, wq, mp->b_cont)) { 539 miocack(wq, mp, 0, 0); 540 } else { 541 miocnak(wq, mp, 0, EPROTO); 542 } 543 break; 544 case LB_GET_INFO_SIZE: 545 NXGE_DEBUG_MSG((nxgep, IOC_CTL, "LB_GET_INFO_SIZE command")); 546 if (nxgep != NULL) { 547 size = sizeof (lb_normal); 548 if (nxgep->statsp->mac_stats.cap_10gfdx) { 549 size += sizeof (lb_external10g); 550 size += sizeof (lb_phy10g); 551 size += sizeof (lb_serdes10g); 552 size += sizeof (lb_mac10g); 553 } 554 if (nxgep->statsp->mac_stats.cap_1000fdx) { 555 size += sizeof (lb_external1000); 556 size += sizeof (lb_mac1000); 557 if (nxgep->mac.portmode == PORT_1G_COPPER) 558 size += sizeof (lb_phy1000); 559 } 560 if (nxgep->statsp->mac_stats.cap_100fdx) 561 size += sizeof (lb_external100); 562 if (nxgep->statsp->mac_stats.cap_10fdx) 563 size += sizeof (lb_external10); 564 else if ((nxgep->mac.portmode == PORT_1G_FIBER) || 565 (nxgep->mac.portmode == PORT_1G_SERDES)) 566 size += sizeof (lb_serdes1000); 567 568 *(lb_info_sz_t *)mp->b_cont->b_rptr = size; 569 570 NXGE_DEBUG_MSG((nxgep, IOC_CTL, 571 "NXGE_GET_LB_INFO command: size %d", size)); 572 miocack(wq, mp, sizeof (lb_info_sz_t), 0); 573 } else 574 miocnak(wq, mp, 0, EINVAL); 575 break; 576 577 case LB_GET_INFO: 578 NXGE_DEBUG_MSG((nxgep, IOC_CTL, "NXGE_GET_LB_INFO command")); 579 if (nxgep != NULL) { 580 size = sizeof (lb_normal); 581 if (nxgep->statsp->mac_stats.cap_10gfdx) { 582 size += sizeof (lb_external10g); 583 size += sizeof (lb_phy10g); 584 size += sizeof (lb_serdes10g); 585 size += sizeof (lb_mac10g); 586 } 587 if (nxgep->statsp->mac_stats.cap_1000fdx) { 588 size += sizeof (lb_external1000); 589 size += sizeof (lb_mac1000); 590 if (nxgep->mac.portmode == PORT_1G_COPPER) 591 size += sizeof (lb_phy1000); 592 } 593 if (nxgep->statsp->mac_stats.cap_100fdx) 594 size += sizeof (lb_external100); 595 if (nxgep->statsp->mac_stats.cap_10fdx) 596 size += sizeof (lb_external10); 597 else if ((nxgep->mac.portmode == PORT_1G_FIBER) || 598 (nxgep->mac.portmode == PORT_1G_SERDES)) 599 size += sizeof (lb_serdes1000); 600 601 NXGE_DEBUG_MSG((nxgep, IOC_CTL, 602 "NXGE_GET_LB_INFO command: size %d", size)); 603 if (size == iocp->ioc_count) { 604 i = 0; 605 lb_props = (p_lb_property_t)mp->b_cont->b_rptr; 606 lb_props[i++] = lb_normal; 607 if (nxgep->statsp->mac_stats.cap_10gfdx) { 608 lb_props[i++] = lb_mac10g; 609 lb_props[i++] = lb_serdes10g; 610 lb_props[i++] = lb_phy10g; 611 lb_props[i++] = lb_external10g; 612 } 613 if (nxgep->statsp->mac_stats.cap_1000fdx) 614 lb_props[i++] = lb_external1000; 615 if (nxgep->statsp->mac_stats.cap_100fdx) 616 lb_props[i++] = lb_external100; 617 if (nxgep->statsp->mac_stats.cap_10fdx) 618 lb_props[i++] = lb_external10; 619 if (nxgep->statsp->mac_stats.cap_1000fdx) 620 lb_props[i++] = lb_mac1000; 621 if (nxgep->mac.portmode == PORT_1G_COPPER) { 622 if (nxgep->statsp->mac_stats. 623 cap_1000fdx) 624 lb_props[i++] = lb_phy1000; 625 } else if ((nxgep->mac.portmode == 626 PORT_1G_FIBER) || 627 (nxgep->mac.portmode == PORT_1G_SERDES)) { 628 lb_props[i++] = lb_serdes1000; 629 } 630 miocack(wq, mp, size, 0); 631 } else 632 miocnak(wq, mp, 0, EINVAL); 633 } else { 634 miocnak(wq, mp, 0, EINVAL); 635 cmn_err(CE_NOTE, "!nxge_hw_ioctl: invalid command 0x%x", 636 iocp->ioc_cmd); 637 } 638 break; 639 } 640 } 641 642 /* 643 * DMA channel interfaces to access various channel specific 644 * hardware functions. 645 */ 646 /* ARGSUSED */ 647 void 648 nxge_rxdma_channel_put64(nxge_os_acc_handle_t handle, void *reg_addrp, 649 uint32_t reg_base, uint16_t channel, uint64_t reg_data) 650 { 651 uint64_t reg_offset; 652 653 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_channel_put64")); 654 655 /* 656 * Channel is assumed to be from 0 to the maximum DMA channel #. If we 657 * use the virtual DMA CSR address space from the config space (in PCI 658 * case), then the following code need to be use different offset 659 * computation macro. 660 */ 661 reg_offset = reg_base + DMC_OFFSET(channel); 662 NXGE_PIO_WRITE64(handle, reg_addrp, reg_offset, reg_data); 663 664 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_channel_put64")); 665 } 666 667 /* ARGSUSED */ 668 uint64_t 669 nxge_rxdma_channel_get64(nxge_os_acc_handle_t handle, void *reg_addrp, 670 uint32_t reg_base, uint16_t channel) 671 { 672 uint64_t reg_offset; 673 674 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_channel_get64")); 675 676 /* 677 * Channel is assumed to be from 0 to the maximum DMA channel #. If we 678 * use the virtual DMA CSR address space from the config space (in PCI 679 * case), then the following code need to be use different offset 680 * computation macro. 681 */ 682 reg_offset = reg_base + DMC_OFFSET(channel); 683 684 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_channel_get64")); 685 686 return (NXGE_PIO_READ64(handle, reg_addrp, reg_offset)); 687 } 688 689 /* ARGSUSED */ 690 void 691 nxge_get32(p_nxge_t nxgep, p_mblk_t mp) 692 { 693 nxge_os_acc_handle_t nxge_regh; 694 695 NXGE_DEBUG_MSG((nxgep, IOC_CTL, "nxge_get32")); 696 nxge_regh = nxgep->dev_regs->nxge_regh; 697 698 *(uint32_t *)mp->b_rptr = NXGE_PIO_READ32(nxge_regh, 699 nxgep->dev_regs->nxge_regp, *(uint32_t *)mp->b_rptr); 700 701 NXGE_DEBUG_MSG((nxgep, IOC_CTL, "value = 0x%08X", 702 *(uint32_t *)mp->b_rptr)); 703 NXGE_DEBUG_MSG((nxgep, IOC_CTL, "nxge_get32")); 704 } 705 706 /* ARGSUSED */ 707 void 708 nxge_put32(p_nxge_t nxgep, p_mblk_t mp) 709 { 710 nxge_os_acc_handle_t nxge_regh; 711 uint32_t *buf; 712 uint8_t *reg; 713 714 NXGE_DEBUG_MSG((nxgep, IOC_CTL, "nxge_put32")); 715 nxge_regh = nxgep->dev_regs->nxge_regh; 716 717 buf = (uint32_t *)mp->b_rptr; 718 reg = (uint8_t *)(nxgep->dev_regs->nxge_regp) + buf[0]; 719 NXGE_DEBUG_MSG((nxgep, IOC_CTL, 720 "reg = 0x%016llX index = 0x%08X value = 0x%08X", 721 reg, buf[0], buf[1])); 722 NXGE_PIO_WRITE32(nxge_regh, (uint32_t *)reg, 0, buf[1]); 723 NXGE_DEBUG_MSG((nxgep, IOC_CTL, "nxge_put32")); 724 } 725 726 /*ARGSUSED*/ 727 boolean_t 728 nxge_set_lb(p_nxge_t nxgep, queue_t *wq, p_mblk_t mp) 729 { 730 boolean_t status = B_TRUE; 731 uint32_t lb_mode; 732 lb_property_t *lb_info; 733 734 NXGE_DEBUG_MSG((nxgep, IOC_CTL, "<== nxge_set_lb")); 735 lb_mode = nxgep->statsp->port_stats.lb_mode; 736 if (lb_mode == *(uint32_t *)mp->b_rptr) { 737 cmn_err(CE_NOTE, 738 "!nxge%d: Loopback mode already set (lb_mode %d).\n", 739 nxgep->instance, lb_mode); 740 status = B_FALSE; 741 goto nxge_set_lb_exit; 742 } 743 lb_mode = *(uint32_t *)mp->b_rptr; 744 lb_info = NULL; 745 if (lb_mode == lb_normal.value) 746 lb_info = &lb_normal; 747 else if ((lb_mode == lb_external10g.value) && 748 (nxgep->statsp->mac_stats.cap_10gfdx)) 749 lb_info = &lb_external10g; 750 else if ((lb_mode == lb_external1000.value) && 751 (nxgep->statsp->mac_stats.cap_1000fdx)) 752 lb_info = &lb_external1000; 753 else if ((lb_mode == lb_external100.value) && 754 (nxgep->statsp->mac_stats.cap_100fdx)) 755 lb_info = &lb_external100; 756 else if ((lb_mode == lb_external10.value) && 757 (nxgep->statsp->mac_stats.cap_10fdx)) 758 lb_info = &lb_external10; 759 else if ((lb_mode == lb_phy10g.value) && 760 ((nxgep->mac.portmode == PORT_10G_COPPER) || 761 (nxgep->mac.portmode == PORT_10G_FIBER))) 762 lb_info = &lb_phy10g; 763 else if ((lb_mode == lb_phy1000.value) && 764 (nxgep->mac.portmode == PORT_1G_COPPER)) 765 lb_info = &lb_phy1000; 766 else if ((lb_mode == lb_phy.value) && 767 (nxgep->mac.portmode == PORT_1G_COPPER)) 768 lb_info = &lb_phy; 769 else if ((lb_mode == lb_serdes10g.value) && 770 ((nxgep->mac.portmode == PORT_10G_FIBER) || 771 (nxgep->mac.portmode == PORT_10G_COPPER) || 772 (nxgep->mac.portmode == PORT_10G_SERDES))) 773 lb_info = &lb_serdes10g; 774 else if ((lb_mode == lb_serdes1000.value) && 775 (nxgep->mac.portmode == PORT_1G_FIBER || 776 (nxgep->mac.portmode == PORT_1G_SERDES))) 777 lb_info = &lb_serdes1000; 778 else if (lb_mode == lb_mac10g.value) 779 lb_info = &lb_mac10g; 780 else if (lb_mode == lb_mac1000.value) 781 lb_info = &lb_mac1000; 782 else if (lb_mode == lb_mac.value) 783 lb_info = &lb_mac; 784 else { 785 cmn_err(CE_NOTE, 786 "!nxge%d: Loopback mode not supported(mode %d).\n", 787 nxgep->instance, lb_mode); 788 status = B_FALSE; 789 goto nxge_set_lb_exit; 790 } 791 792 if (lb_mode == nxge_lb_normal) { 793 if (nxge_lb_dbg) { 794 cmn_err(CE_NOTE, 795 "!nxge%d: Returning to normal operation", 796 nxgep->instance); 797 } 798 nxge_set_lb_normal(nxgep); 799 goto nxge_set_lb_exit; 800 } 801 nxgep->statsp->port_stats.lb_mode = lb_mode; 802 803 if (nxge_lb_dbg) 804 cmn_err(CE_NOTE, 805 "!nxge%d: Adapter now in %s loopback mode", 806 nxgep->instance, lb_info->key); 807 nxgep->param_arr[param_autoneg].value = 0; 808 nxgep->param_arr[param_anar_10gfdx].value = 809 (nxgep->statsp->port_stats.lb_mode == nxge_lb_ext10g) || 810 (nxgep->statsp->port_stats.lb_mode == nxge_lb_mac10g) || 811 (nxgep->statsp->port_stats.lb_mode == nxge_lb_phy10g) || 812 (nxgep->statsp->port_stats.lb_mode == nxge_lb_serdes10g); 813 nxgep->param_arr[param_anar_10ghdx].value = 0; 814 nxgep->param_arr[param_anar_1000fdx].value = 815 (nxgep->statsp->port_stats.lb_mode == nxge_lb_ext1000) || 816 (nxgep->statsp->port_stats.lb_mode == nxge_lb_mac1000) || 817 (nxgep->statsp->port_stats.lb_mode == nxge_lb_phy1000) || 818 (nxgep->statsp->port_stats.lb_mode == nxge_lb_serdes1000); 819 nxgep->param_arr[param_anar_1000hdx].value = 0; 820 nxgep->param_arr[param_anar_100fdx].value = 821 (nxgep->statsp->port_stats.lb_mode == nxge_lb_phy) || 822 (nxgep->statsp->port_stats.lb_mode == nxge_lb_mac) || 823 (nxgep->statsp->port_stats.lb_mode == nxge_lb_ext100); 824 nxgep->param_arr[param_anar_100hdx].value = 0; 825 nxgep->param_arr[param_anar_10fdx].value = 826 (nxgep->statsp->port_stats.lb_mode == nxge_lb_mac) || 827 (nxgep->statsp->port_stats.lb_mode == nxge_lb_ext10); 828 if (nxgep->statsp->port_stats.lb_mode == nxge_lb_ext1000) { 829 nxgep->param_arr[param_master_cfg_enable].value = 1; 830 nxgep->param_arr[param_master_cfg_value].value = 1; 831 } 832 if ((nxgep->statsp->port_stats.lb_mode == nxge_lb_ext10g) || 833 (nxgep->statsp->port_stats.lb_mode == nxge_lb_ext1000) || 834 (nxgep->statsp->port_stats.lb_mode == nxge_lb_ext100) || 835 (nxgep->statsp->port_stats.lb_mode == nxge_lb_ext10) || 836 (nxgep->statsp->port_stats.lb_mode == nxge_lb_phy10g) || 837 (nxgep->statsp->port_stats.lb_mode == nxge_lb_phy1000) || 838 (nxgep->statsp->port_stats.lb_mode == nxge_lb_phy)) { 839 840 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 841 (void) nxge_xcvr_find(nxgep); 842 (void) nxge_link_init(nxgep); 843 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 844 } 845 if (lb_info->lb_type == internal) { 846 if ((nxgep->statsp->port_stats.lb_mode == nxge_lb_mac10g) || 847 (nxgep->statsp->port_stats.lb_mode == 848 nxge_lb_phy10g) || 849 (nxgep->statsp->port_stats.lb_mode == 850 nxge_lb_serdes10g)) { 851 nxgep->statsp->mac_stats.link_speed = 10000; 852 } else if ((nxgep->statsp->port_stats.lb_mode 853 == nxge_lb_mac1000) || 854 (nxgep->statsp->port_stats.lb_mode == 855 nxge_lb_phy1000) || 856 (nxgep->statsp->port_stats.lb_mode == 857 nxge_lb_serdes1000)) { 858 nxgep->statsp->mac_stats.link_speed = 1000; 859 } else { 860 nxgep->statsp->mac_stats.link_speed = 100; 861 } 862 nxgep->statsp->mac_stats.link_duplex = 2; 863 nxgep->statsp->mac_stats.link_up = 1; 864 } 865 nxge_global_reset(nxgep); 866 867 nxge_set_lb_exit: 868 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 869 "<== nxge_set_lb status = 0x%08x", status)); 870 return (status); 871 } 872 873 /* ARGSUSED */ 874 void 875 nxge_set_lb_normal(p_nxge_t nxgep) 876 { 877 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_set_lb_normal")); 878 nxgep->statsp->port_stats.lb_mode = nxge_lb_normal; 879 nxgep->param_arr[param_autoneg].value = 880 nxgep->param_arr[param_autoneg].old_value; 881 nxgep->param_arr[param_anar_1000fdx].value = 882 nxgep->param_arr[param_anar_1000fdx].old_value; 883 nxgep->param_arr[param_anar_1000hdx].value = 884 nxgep->param_arr[param_anar_1000hdx].old_value; 885 nxgep->param_arr[param_anar_100fdx].value = 886 nxgep->param_arr[param_anar_100fdx].old_value; 887 nxgep->param_arr[param_anar_100hdx].value = 888 nxgep->param_arr[param_anar_100hdx].old_value; 889 nxgep->param_arr[param_anar_10fdx].value = 890 nxgep->param_arr[param_anar_10fdx].old_value; 891 nxgep->param_arr[param_master_cfg_enable].value = 892 nxgep->param_arr[param_master_cfg_enable].old_value; 893 nxgep->param_arr[param_master_cfg_value].value = 894 nxgep->param_arr[param_master_cfg_value].old_value; 895 896 nxge_global_reset(nxgep); 897 898 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 899 (void) nxge_xcvr_find(nxgep); 900 (void) nxge_link_init(nxgep); 901 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 902 903 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_set_lb_normal")); 904 } 905 906 /* ARGSUSED */ 907 void 908 nxge_get_mii(p_nxge_t nxgep, p_mblk_t mp) 909 { 910 uint16_t reg; 911 912 NXGE_DEBUG_MSG((nxgep, IOC_CTL, "==> nxge_get_mii")); 913 914 reg = *(uint16_t *)mp->b_rptr; 915 (void) nxge_mii_read(nxgep, nxgep->statsp->mac_stats.xcvr_portn, reg, 916 (uint16_t *)mp->b_rptr); 917 NXGE_DEBUG_MSG((nxgep, IOC_CTL, "reg = 0x%08X value = 0x%04X", 918 reg, *(uint16_t *)mp->b_rptr)); 919 NXGE_DEBUG_MSG((nxgep, IOC_CTL, "<== nxge_get_mii")); 920 } 921 922 /* ARGSUSED */ 923 void 924 nxge_put_mii(p_nxge_t nxgep, p_mblk_t mp) 925 { 926 uint16_t *buf; 927 uint8_t reg; 928 929 NXGE_DEBUG_MSG((nxgep, IOC_CTL, "==> nxge_put_mii")); 930 buf = (uint16_t *)mp->b_rptr; 931 reg = (uint8_t)buf[0]; 932 NXGE_DEBUG_MSG((nxgep, IOC_CTL, 933 "reg = 0x%08X index = 0x%08X value = 0x%08X", 934 reg, buf[0], buf[1])); 935 (void) nxge_mii_write(nxgep, nxgep->statsp->mac_stats.xcvr_portn, 936 reg, buf[1]); 937 NXGE_DEBUG_MSG((nxgep, IOC_CTL, "<== nxge_put_mii")); 938 } 939 940 /* ARGSUSED */ 941 void 942 nxge_check_hw_state(p_nxge_t nxgep) 943 { 944 p_nxge_ldgv_t ldgvp; 945 p_nxge_ldv_t t_ldvp; 946 947 NXGE_DEBUG_MSG((nxgep, SYSERR_CTL, "==> nxge_check_hw_state")); 948 949 MUTEX_ENTER(nxgep->genlock); 950 nxgep->nxge_timerid = 0; 951 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 952 goto nxge_check_hw_state_exit; 953 } 954 nxge_check_tx_hang(nxgep); 955 956 ldgvp = nxgep->ldgvp; 957 if (ldgvp == NULL || (ldgvp->ldvp_syserr == NULL)) { 958 NXGE_ERROR_MSG((nxgep, SYSERR_CTL, "<== nxge_check_hw_state: " 959 "NULL ldgvp (interrupt not ready).")); 960 goto nxge_check_hw_state_exit; 961 } 962 t_ldvp = ldgvp->ldvp_syserr; 963 if (!t_ldvp->use_timer) { 964 NXGE_DEBUG_MSG((nxgep, SYSERR_CTL, "<== nxge_check_hw_state: " 965 "ldgvp $%p t_ldvp $%p use_timer flag %d", 966 ldgvp, t_ldvp, t_ldvp->use_timer)); 967 goto nxge_check_hw_state_exit; 968 } 969 if (fm_check_acc_handle(nxgep->dev_regs->nxge_regh) != DDI_FM_OK) { 970 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 971 "port%d Bad register acc handle", nxgep->mac.portnum)); 972 } 973 (void) nxge_syserr_intr((void *) t_ldvp, (void *) nxgep); 974 975 nxgep->nxge_timerid = nxge_start_timer(nxgep, nxge_check_hw_state, 976 NXGE_CHECK_TIMER); 977 978 nxge_check_hw_state_exit: 979 MUTEX_EXIT(nxgep->genlock); 980 NXGE_DEBUG_MSG((nxgep, SYSERR_CTL, "<== nxge_check_hw_state")); 981 } 982 983 /*ARGSUSED*/ 984 static void 985 nxge_rtrace_ioctl(p_nxge_t nxgep, queue_t *wq, mblk_t *mp, 986 struct iocblk *iocp) 987 { 988 ssize_t size; 989 rtrace_t *rtp; 990 mblk_t *nmp; 991 uint32_t i, j; 992 uint32_t start_blk; 993 uint32_t base_entry; 994 uint32_t num_entries; 995 996 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_rtrace_ioctl")); 997 998 size = 1024; 999 if (mp->b_cont == NULL || MBLKL(mp->b_cont) < size) { 1000 NXGE_DEBUG_MSG((nxgep, STR_CTL, 1001 "malformed M_IOCTL MBLKL = %d size = %d", 1002 MBLKL(mp->b_cont), size)); 1003 miocnak(wq, mp, 0, EINVAL); 1004 return; 1005 } 1006 nmp = mp->b_cont; 1007 rtp = (rtrace_t *)nmp->b_rptr; 1008 start_blk = rtp->next_idx; 1009 num_entries = rtp->last_idx; 1010 base_entry = start_blk * MAX_RTRACE_IOC_ENTRIES; 1011 1012 NXGE_DEBUG_MSG((nxgep, STR_CTL, "start_blk = %d\n", start_blk)); 1013 NXGE_DEBUG_MSG((nxgep, STR_CTL, "num_entries = %d\n", num_entries)); 1014 NXGE_DEBUG_MSG((nxgep, STR_CTL, "base_entry = %d\n", base_entry)); 1015 1016 rtp->next_idx = npi_rtracebuf.next_idx; 1017 rtp->last_idx = npi_rtracebuf.last_idx; 1018 rtp->wrapped = npi_rtracebuf.wrapped; 1019 for (i = 0, j = base_entry; i < num_entries; i++, j++) { 1020 rtp->buf[i].ctl_addr = npi_rtracebuf.buf[j].ctl_addr; 1021 rtp->buf[i].val_l32 = npi_rtracebuf.buf[j].val_l32; 1022 rtp->buf[i].val_h32 = npi_rtracebuf.buf[j].val_h32; 1023 } 1024 1025 nmp->b_wptr = nmp->b_rptr + size; 1026 NXGE_DEBUG_MSG((nxgep, STR_CTL, "<== nxge_rtrace_ioctl")); 1027 miocack(wq, mp, (int)size, 0); 1028 } 1029