1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/nxge/nxge_impl.h> 29 30 /* 31 * Tunable Receive Completion Ring Configuration B parameters. 32 */ 33 uint16_t nxge_rx_pkt_thres; /* 16 bits */ 34 uint8_t nxge_rx_pkt_timeout; /* 6 bits based on DMA clock divider */ 35 36 lb_property_t lb_normal = {normal, "normal", nxge_lb_normal}; 37 lb_property_t lb_external10g = {external, "external10g", nxge_lb_ext10g}; 38 lb_property_t lb_external1000 = {external, "external1000", nxge_lb_ext1000}; 39 lb_property_t lb_external100 = {external, "external100", nxge_lb_ext100}; 40 lb_property_t lb_external10 = {external, "external10", nxge_lb_ext10}; 41 lb_property_t lb_phy10g = {internal, "phy10g", nxge_lb_phy10g}; 42 lb_property_t lb_phy1000 = {internal, "phy1000", nxge_lb_phy1000}; 43 lb_property_t lb_phy = {internal, "phy", nxge_lb_phy}; 44 lb_property_t lb_serdes10g = {internal, "serdes10g", nxge_lb_serdes10g}; 45 lb_property_t lb_serdes1000 = {internal, "serdes", nxge_lb_serdes1000}; 46 lb_property_t lb_mac10g = {internal, "mac10g", nxge_lb_mac10g}; 47 lb_property_t lb_mac1000 = {internal, "mac1000", nxge_lb_mac1000}; 48 lb_property_t lb_mac = {internal, "mac10/100", nxge_lb_mac}; 49 50 uint32_t nxge_lb_dbg = 1; 51 void nxge_get_mii(p_nxge_t nxgep, p_mblk_t mp); 52 void nxge_put_mii(p_nxge_t nxgep, p_mblk_t mp); 53 static nxge_status_t nxge_check_xaui_xfp(p_nxge_t nxgep); 54 55 extern uint32_t nxge_rx_mode; 56 extern uint32_t nxge_jumbo_mtu; 57 extern boolean_t nxge_jumbo_enable; 58 59 static void 60 nxge_rtrace_ioctl(p_nxge_t, queue_t *, mblk_t *, struct iocblk *); 61 62 /* ARGSUSED */ 63 void 64 nxge_global_reset(p_nxge_t nxgep) 65 { 66 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_global_reset")); 67 68 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 69 (void) nxge_intr_hw_disable(nxgep); 70 71 if ((nxgep->suspended) || 72 ((nxgep->statsp->port_stats.lb_mode == 73 nxge_lb_phy1000) || 74 (nxgep->statsp->port_stats.lb_mode == 75 nxge_lb_phy10g) || 76 (nxgep->statsp->port_stats.lb_mode == 77 nxge_lb_serdes1000) || 78 (nxgep->statsp->port_stats.lb_mode == 79 nxge_lb_serdes10g))) { 80 (void) nxge_link_init(nxgep); 81 } 82 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 83 (void) nxge_mac_init(nxgep); 84 (void) nxge_intr_hw_enable(nxgep); 85 86 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_global_reset")); 87 } 88 89 /* ARGSUSED */ 90 void 91 nxge_hw_id_init(p_nxge_t nxgep) 92 { 93 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_id_init")); 94 /* 95 * Set up initial hardware parameters required such as mac mtu size. 96 */ 97 nxgep->mac.is_jumbo = B_FALSE; 98 nxgep->mac.maxframesize = NXGE_MTU_DEFAULT_MAX; /* 1522 */ 99 if (nxgep->param_arr[param_accept_jumbo].value || nxge_jumbo_enable) { 100 nxgep->mac.maxframesize = (uint16_t)nxge_jumbo_mtu; 101 nxgep->mac.is_jumbo = B_TRUE; 102 } 103 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 104 "==> nxge_hw_id_init: maxframesize %d", 105 nxgep->mac.maxframesize)); 106 107 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_id_init")); 108 } 109 110 /* ARGSUSED */ 111 void 112 nxge_hw_init_niu_common(p_nxge_t nxgep) 113 { 114 p_nxge_hw_list_t hw_p; 115 116 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_init_niu_common")); 117 118 if ((hw_p = nxgep->nxge_hw_p) == NULL) { 119 return; 120 } 121 MUTEX_ENTER(&hw_p->nxge_cfg_lock); 122 if (hw_p->flags & COMMON_INIT_DONE) { 123 NXGE_DEBUG_MSG((nxgep, MOD_CTL, 124 "nxge_hw_init_niu_common" 125 " already done for dip $%p function %d exiting", 126 hw_p->parent_devp, nxgep->function_num)); 127 MUTEX_EXIT(&hw_p->nxge_cfg_lock); 128 return; 129 } 130 131 hw_p->flags = COMMON_INIT_START; 132 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "nxge_hw_init_niu_common" 133 " Started for device id %x with function %d", 134 hw_p->parent_devp, nxgep->function_num)); 135 136 /* per neptune common block init */ 137 (void) nxge_fflp_hw_reset(nxgep); 138 139 hw_p->flags = COMMON_INIT_DONE; 140 MUTEX_EXIT(&hw_p->nxge_cfg_lock); 141 142 NXGE_DEBUG_MSG((nxgep, MOD_CTL, "nxge_hw_init_niu_common" 143 " Done for device id %x with function %d", 144 hw_p->parent_devp, nxgep->function_num)); 145 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_init_niu_common")); 146 } 147 148 /* ARGSUSED */ 149 uint_t 150 nxge_intr(void *arg1, void *arg2) 151 { 152 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1; 153 p_nxge_t nxgep = (p_nxge_t)arg2; 154 uint_t serviced = DDI_INTR_UNCLAIMED; 155 uint8_t ldv; 156 npi_handle_t handle; 157 p_nxge_ldgv_t ldgvp; 158 p_nxge_ldg_t ldgp, t_ldgp; 159 p_nxge_ldv_t t_ldvp; 160 uint64_t vector0 = 0, vector1 = 0, vector2 = 0; 161 int i, j, nldvs, nintrs = 1; 162 npi_status_t rs = NPI_SUCCESS; 163 164 /* DDI interface returns second arg as NULL (n2 niumx driver) !!! */ 165 if (arg2 == NULL || (void *) ldvp->nxgep != arg2) { 166 nxgep = ldvp->nxgep; 167 } 168 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr")); 169 170 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 171 NXGE_ERROR_MSG((nxgep, INT_CTL, 172 "<== nxge_intr: not initialized 0x%x", serviced)); 173 return (serviced); 174 } 175 176 ldgvp = nxgep->ldgvp; 177 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr: ldgvp $%p", ldgvp)); 178 if (ldvp == NULL && ldgvp) { 179 t_ldvp = ldvp = ldgvp->ldvp; 180 } 181 if (ldvp) { 182 ldgp = t_ldgp = ldvp->ldgp; 183 } 184 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr: " 185 "ldgvp $%p ldvp $%p ldgp $%p", ldgvp, ldvp, ldgp)); 186 if (ldgvp == NULL || ldvp == NULL || ldgp == NULL) { 187 NXGE_ERROR_MSG((nxgep, INT_CTL, "==> nxge_intr: " 188 "ldgvp $%p ldvp $%p ldgp $%p", ldgvp, ldvp, ldgp)); 189 NXGE_ERROR_MSG((nxgep, INT_CTL, "<== nxge_intr: not ready")); 190 return (DDI_INTR_UNCLAIMED); 191 } 192 /* 193 * This interrupt handler will have to go through all the logical 194 * devices to find out which logical device interrupts us and then call 195 * its handler to process the events. 196 */ 197 handle = NXGE_DEV_NPI_HANDLE(nxgep); 198 t_ldgp = ldgp; 199 t_ldvp = ldgp->ldvp; 200 201 nldvs = ldgp->nldvs; 202 203 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr: #ldvs %d #intrs %d", 204 nldvs, ldgvp->ldg_intrs)); 205 206 serviced = DDI_INTR_CLAIMED; 207 for (i = 0; i < nintrs; i++, t_ldgp++) { 208 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr(%d): #ldvs %d " 209 " #intrs %d", i, nldvs, nintrs)); 210 /* Get this group's flag bits. */ 211 t_ldgp->interrupted = B_FALSE; 212 rs = npi_ldsv_ldfs_get(handle, t_ldgp->ldg, 213 &vector0, &vector1, &vector2); 214 if (rs) { 215 continue; 216 } 217 if (!vector0 && !vector1 && !vector2) { 218 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr: " 219 "no interrupts on group %d", t_ldgp->ldg)); 220 continue; 221 } 222 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr: " 223 "vector0 0x%llx vector1 0x%llx vector2 0x%llx", 224 vector0, vector1, vector2)); 225 t_ldgp->interrupted = B_TRUE; 226 nldvs = t_ldgp->nldvs; 227 for (j = 0; j < nldvs; j++, t_ldvp++) { 228 /* 229 * Call device's handler if flag bits are on. 230 */ 231 ldv = t_ldvp->ldv; 232 if (((ldv < NXGE_MAC_LD_START) && 233 (LDV_ON(ldv, vector0) | 234 (LDV_ON(ldv, vector1)))) || 235 (ldv >= NXGE_MAC_LD_START && 236 ((LDV2_ON_1(ldv, vector2)) || 237 (LDV2_ON_2(ldv, vector2))))) { 238 (void) (t_ldvp->ldv_intr_handler)( 239 (caddr_t)t_ldvp, arg2); 240 NXGE_DEBUG_MSG((nxgep, INT_CTL, 241 "==> nxge_intr: " 242 "calling device %d #ldvs %d #intrs %d", 243 j, nldvs, nintrs)); 244 } 245 } 246 } 247 248 t_ldgp = ldgp; 249 for (i = 0; i < nintrs; i++, t_ldgp++) { 250 /* rearm group interrupts */ 251 if (t_ldgp->interrupted) { 252 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr: arm " 253 "group %d", t_ldgp->ldg)); 254 (void) npi_intr_ldg_mgmt_set(handle, t_ldgp->ldg, 255 t_ldgp->arm, t_ldgp->ldg_timer); 256 } 257 } 258 259 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intr: serviced 0x%x", 260 serviced)); 261 return (serviced); 262 } 263 264 /* ARGSUSED */ 265 static nxge_status_t 266 nxge_check_xaui_xfp(p_nxge_t nxgep) 267 { 268 nxge_status_t status = NXGE_OK; 269 uint8_t phy_port_addr; 270 uint16_t val; 271 uint16_t val1; 272 uint8_t portn; 273 274 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_check_xaui_xfp")); 275 276 portn = nxgep->mac.portnum; 277 phy_port_addr = nxgep->statsp->mac_stats.xcvr_portn; 278 279 if ((status = nxge_mdio_read(nxgep, phy_port_addr, 280 BCM8704_USER_DEV3_ADDR, 281 BCM8704_USER_ANALOG_STATUS0_REG, &val)) == NXGE_OK) { 282 status = nxge_mdio_read(nxgep, phy_port_addr, 283 BCM8704_USER_DEV3_ADDR, 284 BCM8704_USER_TX_ALARM_STATUS_REG, &val1); 285 } 286 if (status != NXGE_OK) { 287 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 288 NXGE_FM_EREPORT_XAUI_ERR); 289 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 290 "XAUI is bad or absent on port<%d>\n", portn)); 291 } else if (nxgep->mac.portmode == PORT_10G_FIBER) { 292 /* 293 * 0x03FC = 0000 0011 1111 1100 294 * 0x639C = 0110 0011 1001 1100 295 * bit14 = 1: PDM loss-of-light indicator 296 * bit13 = 1: PDM Rx loss-of-signal 297 * bit6 = 0: Light is NOT ok 298 * bit5 = 0: PMD Rx signal is NOT ok 299 */ 300 if (val != 0x3FC && val == 0x639C) { 301 NXGE_FM_REPORT_ERROR(nxgep, portn, NULL, 302 NXGE_FM_EREPORT_XFP_ERR); 303 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 304 "XFP is bad or absent on port<%d>\n", portn)); 305 status = NXGE_ERROR; 306 } 307 } 308 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_check_xaui_xfp")); 309 return (status); 310 } 311 312 313 /* ARGSUSED */ 314 uint_t 315 nxge_syserr_intr(void *arg1, void *arg2) 316 { 317 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1; 318 p_nxge_t nxgep = (p_nxge_t)arg2; 319 p_nxge_ldg_t ldgp = NULL; 320 npi_handle_t handle; 321 sys_err_stat_t estat; 322 uint_t serviced = DDI_INTR_UNCLAIMED; 323 324 if (arg1 == NULL && arg2 == NULL) { 325 return (serviced); 326 } 327 if (arg2 == NULL || ((ldvp != NULL && (void *) ldvp->nxgep != arg2))) { 328 if (ldvp != NULL) { 329 nxgep = ldvp->nxgep; 330 } 331 } 332 NXGE_DEBUG_MSG((nxgep, SYSERR_CTL, 333 "==> nxge_syserr_intr: arg2 $%p arg1 $%p", nxgep, ldvp)); 334 if (ldvp != NULL && ldvp->use_timer == B_FALSE) { 335 ldgp = ldvp->ldgp; 336 if (ldgp == NULL) { 337 NXGE_ERROR_MSG((nxgep, SYSERR_CTL, 338 "<== nxge_syserrintr(no logical group): " 339 "arg2 $%p arg1 $%p", nxgep, ldvp)); 340 return (DDI_INTR_UNCLAIMED); 341 } 342 /* 343 * Get the logical device state if the function uses interrupt. 344 */ 345 } 346 347 /* This interrupt handler is for system error interrupts. */ 348 handle = NXGE_DEV_NPI_HANDLE(nxgep); 349 estat.value = 0; 350 (void) npi_fzc_sys_err_stat_get(handle, &estat); 351 NXGE_DEBUG_MSG((nxgep, SYSERR_CTL, 352 "==> nxge_syserr_intr: device error 0x%016llx", estat.value)); 353 354 if (estat.bits.ldw.smx) { 355 /* SMX */ 356 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 357 "==> nxge_syserr_intr: device error - SMX")); 358 } else if (estat.bits.ldw.mac) { 359 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 360 "==> nxge_syserr_intr: device error - MAC")); 361 /* 362 * There is nothing to be done here. All MAC errors go to per 363 * MAC port interrupt. MIF interrupt is the only MAC sub-block 364 * that can generate status here. MIF status reported will be 365 * ignored here. It is checked by per port timer instead. 366 */ 367 } else if (estat.bits.ldw.ipp) { 368 NXGE_DEBUG_MSG((nxgep, NXGE_ERR_CTL, 369 "==> nxge_syserr_intr: device error - IPP")); 370 (void) nxge_ipp_handle_sys_errors(nxgep); 371 } else if (estat.bits.ldw.zcp) { 372 /* ZCP */ 373 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 374 "==> nxge_syserr_intr: device error - ZCP")); 375 (void) nxge_zcp_handle_sys_errors(nxgep); 376 } else if (estat.bits.ldw.tdmc) { 377 /* TDMC */ 378 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 379 "==> nxge_syserr_intr: device error - TDMC")); 380 /* 381 * There is no TDMC system errors defined in the PRM. All TDMC 382 * channel specific errors are reported on a per channel basis. 383 */ 384 } else if (estat.bits.ldw.rdmc) { 385 /* RDMC */ 386 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 387 "==> nxge_syserr_intr: device error - RDMC")); 388 (void) nxge_rxdma_handle_sys_errors(nxgep); 389 } else if (estat.bits.ldw.txc) { 390 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 391 "==> nxge_syserr_intr: device error - TXC")); 392 (void) nxge_txc_handle_sys_errors(nxgep); 393 } else if ((nxgep->niu_type != N2_NIU) && estat.bits.ldw.peu) { 394 /* PCI-E */ 395 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 396 "==> nxge_syserr_intr: device error - PCI-E")); 397 } else if (estat.bits.ldw.meta1) { 398 /* META1 */ 399 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 400 "==> nxge_syserr_intr: device error - META1")); 401 } else if (estat.bits.ldw.meta2) { 402 /* META2 */ 403 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 404 "==> nxge_syserr_intr: device error - META2")); 405 } else if (estat.bits.ldw.fflp) { 406 /* FFLP */ 407 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 408 "==> nxge_syserr_intr: device error - FFLP")); 409 (void) nxge_fflp_handle_sys_errors(nxgep); 410 } 411 412 if (nxgep->mac.portmode == PORT_10G_FIBER || 413 nxgep->mac.portmode == PORT_10G_COPPER) { 414 if (nxge_check_xaui_xfp(nxgep) != NXGE_OK) { 415 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 416 "==> nxge_syserr_intr: device error - XAUI")); 417 } 418 } 419 420 serviced = DDI_INTR_CLAIMED; 421 422 if (ldgp != NULL && ldvp != NULL && ldgp->nldvs == 1 && 423 !ldvp->use_timer) { 424 (void) npi_intr_ldg_mgmt_set(handle, ldgp->ldg, 425 B_TRUE, ldgp->ldg_timer); 426 } 427 NXGE_DEBUG_MSG((nxgep, SYSERR_CTL, "<== nxge_syserr_intr")); 428 return (serviced); 429 } 430 431 /* ARGSUSED */ 432 void 433 nxge_intr_hw_enable(p_nxge_t nxgep) 434 { 435 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr_hw_enable")); 436 (void) nxge_intr_mask_mgmt_set(nxgep, B_TRUE); 437 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intr_hw_enable")); 438 } 439 440 /* ARGSUSED */ 441 void 442 nxge_intr_hw_disable(p_nxge_t nxgep) 443 { 444 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_intr_hw_disable")); 445 (void) nxge_intr_mask_mgmt_set(nxgep, B_FALSE); 446 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_intr_hw_disable")); 447 } 448 449 /* ARGSUSED */ 450 void 451 nxge_rx_hw_blank(void *arg, time_t ticks, uint_t count) 452 { 453 p_nxge_t nxgep = (p_nxge_t)arg; 454 uint8_t channel; 455 npi_handle_t handle; 456 p_nxge_ldgv_t ldgvp; 457 p_nxge_ldv_t ldvp; 458 int i; 459 460 NXGE_DEBUG_MSG((nxgep, INT_CTL, "==> nxge_rx_hw_blank")); 461 handle = NXGE_DEV_NPI_HANDLE(nxgep); 462 463 if ((ldgvp = nxgep->ldgvp) == NULL) { 464 NXGE_ERROR_MSG((nxgep, INT_CTL, 465 "<== nxge_rx_hw_blank (not enabled)")); 466 return; 467 } 468 ldvp = nxgep->ldgvp->ldvp; 469 if (ldvp == NULL) { 470 return; 471 } 472 for (i = 0; i < ldgvp->nldvs; i++, ldvp++) { 473 if (ldvp->is_rxdma) { 474 channel = ldvp->channel; 475 (void) npi_rxdma_cfg_rdc_rcr_threshold(handle, 476 channel, count); 477 (void) npi_rxdma_cfg_rdc_rcr_timeout(handle, 478 channel, ticks); 479 } 480 } 481 482 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_rx_hw_blank")); 483 } 484 485 /* ARGSUSED */ 486 void 487 nxge_hw_stop(p_nxge_t nxgep) 488 { 489 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_hw_stop")); 490 491 (void) nxge_tx_mac_disable(nxgep); 492 (void) nxge_rx_mac_disable(nxgep); 493 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_STOP); 494 (void) nxge_rxdma_hw_mode(nxgep, NXGE_DMA_STOP); 495 496 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_hw_stop")); 497 } 498 499 /* ARGSUSED */ 500 void 501 nxge_hw_ioctl(p_nxge_t nxgep, queue_t *wq, mblk_t *mp, struct iocblk *iocp) 502 { 503 int cmd; 504 505 NXGE_DEBUG_MSG((nxgep, IOC_CTL, "==> nxge_hw_ioctl")); 506 507 if (nxgep == NULL) { 508 miocnak(wq, mp, 0, EINVAL); 509 return; 510 } 511 iocp->ioc_error = 0; 512 cmd = iocp->ioc_cmd; 513 514 switch (cmd) { 515 default: 516 miocnak(wq, mp, 0, EINVAL); 517 return; 518 519 case NXGE_GET_MII: 520 nxge_get_mii(nxgep, mp->b_cont); 521 miocack(wq, mp, sizeof (uint16_t), 0); 522 break; 523 524 case NXGE_PUT_MII: 525 nxge_put_mii(nxgep, mp->b_cont); 526 miocack(wq, mp, 0, 0); 527 break; 528 529 case NXGE_GET64: 530 nxge_get64(nxgep, mp->b_cont); 531 miocack(wq, mp, sizeof (uint32_t), 0); 532 break; 533 534 case NXGE_PUT64: 535 nxge_put64(nxgep, mp->b_cont); 536 miocack(wq, mp, 0, 0); 537 break; 538 539 case NXGE_PUT_TCAM: 540 nxge_put_tcam(nxgep, mp->b_cont); 541 miocack(wq, mp, 0, 0); 542 break; 543 544 case NXGE_GET_TCAM: 545 nxge_get_tcam(nxgep, mp->b_cont); 546 miocack(wq, mp, 0, 0); 547 break; 548 549 case NXGE_TX_REGS_DUMP: 550 nxge_txdma_regs_dump_channels(nxgep); 551 miocack(wq, mp, 0, 0); 552 break; 553 case NXGE_RX_REGS_DUMP: 554 nxge_rxdma_regs_dump_channels(nxgep); 555 miocack(wq, mp, 0, 0); 556 break; 557 case NXGE_VIR_INT_REGS_DUMP: 558 case NXGE_INT_REGS_DUMP: 559 nxge_virint_regs_dump(nxgep); 560 miocack(wq, mp, 0, 0); 561 break; 562 case NXGE_RTRACE: 563 nxge_rtrace_ioctl(nxgep, wq, mp, iocp); 564 break; 565 } 566 } 567 568 /* ARGSUSED */ 569 void 570 nxge_loopback_ioctl(p_nxge_t nxgep, queue_t *wq, mblk_t *mp, 571 struct iocblk *iocp) 572 { 573 p_lb_property_t lb_props; 574 575 size_t size; 576 int i; 577 578 if (mp->b_cont == NULL) { 579 miocnak(wq, mp, 0, EINVAL); 580 } 581 switch (iocp->ioc_cmd) { 582 case LB_GET_MODE: 583 NXGE_DEBUG_MSG((nxgep, IOC_CTL, "NXGE_GET_LB_MODE command")); 584 if (nxgep != NULL) { 585 *(lb_info_sz_t *)mp->b_cont->b_rptr = 586 nxgep->statsp->port_stats.lb_mode; 587 miocack(wq, mp, sizeof (nxge_lb_t), 0); 588 } else { 589 miocnak(wq, mp, 0, EINVAL); 590 } 591 break; 592 case LB_SET_MODE: 593 NXGE_DEBUG_MSG((nxgep, IOC_CTL, "NXGE_SET_LB_MODE command")); 594 if (iocp->ioc_count != sizeof (uint32_t)) { 595 miocack(wq, mp, 0, 0); 596 break; 597 } 598 if ((nxgep != NULL) && nxge_set_lb(nxgep, wq, mp->b_cont)) { 599 miocack(wq, mp, 0, 0); 600 } else { 601 miocnak(wq, mp, 0, EPROTO); 602 } 603 break; 604 case LB_GET_INFO_SIZE: 605 NXGE_DEBUG_MSG((nxgep, IOC_CTL, "LB_GET_INFO_SIZE command")); 606 if (nxgep != NULL) { 607 size = sizeof (lb_normal); 608 if (nxgep->statsp->mac_stats.cap_10gfdx) { 609 size += sizeof (lb_external10g); 610 size += sizeof (lb_phy10g); 611 size += sizeof (lb_serdes10g); 612 size += sizeof (lb_mac10g); 613 } 614 if (nxgep->statsp->mac_stats.cap_1000fdx) { 615 size += sizeof (lb_external1000); 616 size += sizeof (lb_mac1000); 617 if (nxgep->mac.portmode == PORT_1G_COPPER) 618 size += sizeof (lb_phy1000); 619 } 620 if (nxgep->statsp->mac_stats.cap_100fdx) 621 size += sizeof (lb_external100); 622 if (nxgep->statsp->mac_stats.cap_10fdx) 623 size += sizeof (lb_external10); 624 else if ((nxgep->mac.portmode == PORT_1G_FIBER) || 625 (nxgep->mac.portmode == PORT_1G_SERDES)) 626 size += sizeof (lb_serdes1000); 627 628 *(lb_info_sz_t *)mp->b_cont->b_rptr = size; 629 630 NXGE_DEBUG_MSG((nxgep, IOC_CTL, 631 "NXGE_GET_LB_INFO command: size %d", size)); 632 miocack(wq, mp, sizeof (lb_info_sz_t), 0); 633 } else 634 miocnak(wq, mp, 0, EINVAL); 635 break; 636 637 case LB_GET_INFO: 638 NXGE_DEBUG_MSG((nxgep, IOC_CTL, "NXGE_GET_LB_INFO command")); 639 if (nxgep != NULL) { 640 size = sizeof (lb_normal); 641 if (nxgep->statsp->mac_stats.cap_10gfdx) { 642 size += sizeof (lb_external10g); 643 size += sizeof (lb_phy10g); 644 size += sizeof (lb_serdes10g); 645 size += sizeof (lb_mac10g); 646 } 647 if (nxgep->statsp->mac_stats.cap_1000fdx) { 648 size += sizeof (lb_external1000); 649 size += sizeof (lb_mac1000); 650 if (nxgep->mac.portmode == PORT_1G_COPPER) 651 size += sizeof (lb_phy1000); 652 } 653 if (nxgep->statsp->mac_stats.cap_100fdx) 654 size += sizeof (lb_external100); 655 if (nxgep->statsp->mac_stats.cap_10fdx) 656 size += sizeof (lb_external10); 657 else if ((nxgep->mac.portmode == PORT_1G_FIBER) || 658 (nxgep->mac.portmode == PORT_1G_SERDES)) 659 size += sizeof (lb_serdes1000); 660 661 NXGE_DEBUG_MSG((nxgep, IOC_CTL, 662 "NXGE_GET_LB_INFO command: size %d", size)); 663 if (size == iocp->ioc_count) { 664 i = 0; 665 lb_props = (p_lb_property_t)mp->b_cont->b_rptr; 666 lb_props[i++] = lb_normal; 667 if (nxgep->statsp->mac_stats.cap_10gfdx) { 668 lb_props[i++] = lb_mac10g; 669 lb_props[i++] = lb_serdes10g; 670 lb_props[i++] = lb_phy10g; 671 lb_props[i++] = lb_external10g; 672 } 673 if (nxgep->statsp->mac_stats.cap_1000fdx) 674 lb_props[i++] = lb_external1000; 675 if (nxgep->statsp->mac_stats.cap_100fdx) 676 lb_props[i++] = lb_external100; 677 if (nxgep->statsp->mac_stats.cap_10fdx) 678 lb_props[i++] = lb_external10; 679 if (nxgep->statsp->mac_stats.cap_1000fdx) 680 lb_props[i++] = lb_mac1000; 681 if (nxgep->mac.portmode == PORT_1G_COPPER) { 682 if (nxgep->statsp->mac_stats. 683 cap_1000fdx) 684 lb_props[i++] = lb_phy1000; 685 } else if ((nxgep->mac.portmode == 686 PORT_1G_FIBER) || 687 (nxgep->mac.portmode == PORT_1G_SERDES)) { 688 lb_props[i++] = lb_serdes1000; 689 } 690 miocack(wq, mp, size, 0); 691 } else 692 miocnak(wq, mp, 0, EINVAL); 693 } else { 694 miocnak(wq, mp, 0, EINVAL); 695 cmn_err(CE_NOTE, "!nxge_hw_ioctl: invalid command 0x%x", 696 iocp->ioc_cmd); 697 } 698 break; 699 } 700 } 701 702 /* 703 * DMA channel interfaces to access various channel specific 704 * hardware functions. 705 */ 706 /* ARGSUSED */ 707 void 708 nxge_rxdma_channel_put64(nxge_os_acc_handle_t handle, void *reg_addrp, 709 uint32_t reg_base, uint16_t channel, uint64_t reg_data) 710 { 711 uint64_t reg_offset; 712 713 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_channel_put64")); 714 715 /* 716 * Channel is assumed to be from 0 to the maximum DMA channel #. If we 717 * use the virtual DMA CSR address space from the config space (in PCI 718 * case), then the following code need to be use different offset 719 * computation macro. 720 */ 721 reg_offset = reg_base + DMC_OFFSET(channel); 722 NXGE_PIO_WRITE64(handle, reg_addrp, reg_offset, reg_data); 723 724 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_channel_put64")); 725 } 726 727 /* ARGSUSED */ 728 uint64_t 729 nxge_rxdma_channel_get64(nxge_os_acc_handle_t handle, void *reg_addrp, 730 uint32_t reg_base, uint16_t channel) 731 { 732 uint64_t reg_offset; 733 734 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_channel_get64")); 735 736 /* 737 * Channel is assumed to be from 0 to the maximum DMA channel #. If we 738 * use the virtual DMA CSR address space from the config space (in PCI 739 * case), then the following code need to be use different offset 740 * computation macro. 741 */ 742 reg_offset = reg_base + DMC_OFFSET(channel); 743 744 NXGE_DEBUG_MSG((NULL, DMA_CTL, "<== nxge_rxdma_channel_get64")); 745 746 return (NXGE_PIO_READ64(handle, reg_addrp, reg_offset)); 747 } 748 749 /* ARGSUSED */ 750 void 751 nxge_get32(p_nxge_t nxgep, p_mblk_t mp) 752 { 753 nxge_os_acc_handle_t nxge_regh; 754 755 NXGE_DEBUG_MSG((nxgep, IOC_CTL, "nxge_get32")); 756 nxge_regh = nxgep->dev_regs->nxge_regh; 757 758 *(uint32_t *)mp->b_rptr = NXGE_PIO_READ32(nxge_regh, 759 nxgep->dev_regs->nxge_regp, *(uint32_t *)mp->b_rptr); 760 761 NXGE_DEBUG_MSG((nxgep, IOC_CTL, "value = 0x%08X", 762 *(uint32_t *)mp->b_rptr)); 763 NXGE_DEBUG_MSG((nxgep, IOC_CTL, "nxge_get32")); 764 } 765 766 /* ARGSUSED */ 767 void 768 nxge_put32(p_nxge_t nxgep, p_mblk_t mp) 769 { 770 nxge_os_acc_handle_t nxge_regh; 771 uint32_t *buf; 772 uint8_t *reg; 773 774 NXGE_DEBUG_MSG((nxgep, IOC_CTL, "nxge_put32")); 775 nxge_regh = nxgep->dev_regs->nxge_regh; 776 777 buf = (uint32_t *)mp->b_rptr; 778 reg = (uint8_t *)(nxgep->dev_regs->nxge_regp) + buf[0]; 779 NXGE_DEBUG_MSG((nxgep, IOC_CTL, 780 "reg = 0x%016llX index = 0x%08X value = 0x%08X", 781 reg, buf[0], buf[1])); 782 NXGE_PIO_WRITE32(nxge_regh, (uint32_t *)reg, 0, buf[1]); 783 NXGE_DEBUG_MSG((nxgep, IOC_CTL, "nxge_put32")); 784 } 785 786 /*ARGSUSED*/ 787 boolean_t 788 nxge_set_lb(p_nxge_t nxgep, queue_t *wq, p_mblk_t mp) 789 { 790 boolean_t status = B_TRUE; 791 uint32_t lb_mode; 792 lb_property_t *lb_info; 793 794 NXGE_DEBUG_MSG((nxgep, IOC_CTL, "<== nxge_set_lb")); 795 lb_mode = nxgep->statsp->port_stats.lb_mode; 796 if (lb_mode == *(uint32_t *)mp->b_rptr) { 797 cmn_err(CE_NOTE, 798 "!nxge%d: Loopback mode already set (lb_mode %d).\n", 799 nxgep->instance, lb_mode); 800 status = B_FALSE; 801 goto nxge_set_lb_exit; 802 } 803 lb_mode = *(uint32_t *)mp->b_rptr; 804 lb_info = NULL; 805 if (lb_mode == lb_normal.value) 806 lb_info = &lb_normal; 807 else if ((lb_mode == lb_external10g.value) && 808 (nxgep->statsp->mac_stats.cap_10gfdx)) 809 lb_info = &lb_external10g; 810 else if ((lb_mode == lb_external1000.value) && 811 (nxgep->statsp->mac_stats.cap_1000fdx)) 812 lb_info = &lb_external1000; 813 else if ((lb_mode == lb_external100.value) && 814 (nxgep->statsp->mac_stats.cap_100fdx)) 815 lb_info = &lb_external100; 816 else if ((lb_mode == lb_external10.value) && 817 (nxgep->statsp->mac_stats.cap_10fdx)) 818 lb_info = &lb_external10; 819 else if ((lb_mode == lb_phy10g.value) && 820 ((nxgep->mac.portmode == PORT_10G_COPPER) || 821 (nxgep->mac.portmode == PORT_10G_FIBER))) 822 lb_info = &lb_phy10g; 823 else if ((lb_mode == lb_phy1000.value) && 824 (nxgep->mac.portmode == PORT_1G_COPPER)) 825 lb_info = &lb_phy1000; 826 else if ((lb_mode == lb_phy.value) && 827 (nxgep->mac.portmode == PORT_1G_COPPER)) 828 lb_info = &lb_phy; 829 else if ((lb_mode == lb_serdes10g.value) && 830 ((nxgep->mac.portmode == PORT_10G_FIBER) || 831 (nxgep->mac.portmode == PORT_10G_COPPER) || 832 (nxgep->mac.portmode == PORT_10G_SERDES))) 833 lb_info = &lb_serdes10g; 834 else if ((lb_mode == lb_serdes1000.value) && 835 (nxgep->mac.portmode == PORT_1G_FIBER || 836 (nxgep->mac.portmode == PORT_1G_SERDES))) 837 lb_info = &lb_serdes1000; 838 else if (lb_mode == lb_mac10g.value) 839 lb_info = &lb_mac10g; 840 else if (lb_mode == lb_mac1000.value) 841 lb_info = &lb_mac1000; 842 else if (lb_mode == lb_mac.value) 843 lb_info = &lb_mac; 844 else { 845 cmn_err(CE_NOTE, 846 "!nxge%d: Loopback mode not supported(mode %d).\n", 847 nxgep->instance, lb_mode); 848 status = B_FALSE; 849 goto nxge_set_lb_exit; 850 } 851 852 if (lb_mode == nxge_lb_normal) { 853 if (nxge_lb_dbg) { 854 cmn_err(CE_NOTE, 855 "!nxge%d: Returning to normal operation", 856 nxgep->instance); 857 } 858 nxge_set_lb_normal(nxgep); 859 goto nxge_set_lb_exit; 860 } 861 nxgep->statsp->port_stats.lb_mode = lb_mode; 862 863 if (nxge_lb_dbg) 864 cmn_err(CE_NOTE, 865 "!nxge%d: Adapter now in %s loopback mode", 866 nxgep->instance, lb_info->key); 867 nxgep->param_arr[param_autoneg].value = 0; 868 nxgep->param_arr[param_anar_10gfdx].value = 869 (nxgep->statsp->port_stats.lb_mode == nxge_lb_ext10g) || 870 (nxgep->statsp->port_stats.lb_mode == nxge_lb_mac10g) || 871 (nxgep->statsp->port_stats.lb_mode == nxge_lb_phy10g) || 872 (nxgep->statsp->port_stats.lb_mode == nxge_lb_serdes10g); 873 nxgep->param_arr[param_anar_10ghdx].value = 0; 874 nxgep->param_arr[param_anar_1000fdx].value = 875 (nxgep->statsp->port_stats.lb_mode == nxge_lb_ext1000) || 876 (nxgep->statsp->port_stats.lb_mode == nxge_lb_mac1000) || 877 (nxgep->statsp->port_stats.lb_mode == nxge_lb_phy1000) || 878 (nxgep->statsp->port_stats.lb_mode == nxge_lb_serdes1000); 879 nxgep->param_arr[param_anar_1000hdx].value = 0; 880 nxgep->param_arr[param_anar_100fdx].value = 881 (nxgep->statsp->port_stats.lb_mode == nxge_lb_phy) || 882 (nxgep->statsp->port_stats.lb_mode == nxge_lb_mac) || 883 (nxgep->statsp->port_stats.lb_mode == nxge_lb_ext100); 884 nxgep->param_arr[param_anar_100hdx].value = 0; 885 nxgep->param_arr[param_anar_10fdx].value = 886 (nxgep->statsp->port_stats.lb_mode == nxge_lb_mac) || 887 (nxgep->statsp->port_stats.lb_mode == nxge_lb_ext10); 888 if (nxgep->statsp->port_stats.lb_mode == nxge_lb_ext1000) { 889 nxgep->param_arr[param_master_cfg_enable].value = 1; 890 nxgep->param_arr[param_master_cfg_value].value = 1; 891 } 892 if ((nxgep->statsp->port_stats.lb_mode == nxge_lb_ext10g) || 893 (nxgep->statsp->port_stats.lb_mode == nxge_lb_ext1000) || 894 (nxgep->statsp->port_stats.lb_mode == nxge_lb_ext100) || 895 (nxgep->statsp->port_stats.lb_mode == nxge_lb_ext10) || 896 (nxgep->statsp->port_stats.lb_mode == nxge_lb_phy10g) || 897 (nxgep->statsp->port_stats.lb_mode == nxge_lb_phy1000) || 898 (nxgep->statsp->port_stats.lb_mode == nxge_lb_phy)) { 899 900 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 901 (void) nxge_xcvr_find(nxgep); 902 (void) nxge_link_init(nxgep); 903 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 904 } 905 if (lb_info->lb_type == internal) { 906 if ((nxgep->statsp->port_stats.lb_mode == nxge_lb_mac10g) || 907 (nxgep->statsp->port_stats.lb_mode == 908 nxge_lb_phy10g) || 909 (nxgep->statsp->port_stats.lb_mode == 910 nxge_lb_serdes10g)) { 911 nxgep->statsp->mac_stats.link_speed = 10000; 912 } else if ((nxgep->statsp->port_stats.lb_mode 913 == nxge_lb_mac1000) || 914 (nxgep->statsp->port_stats.lb_mode == 915 nxge_lb_phy1000) || 916 (nxgep->statsp->port_stats.lb_mode == 917 nxge_lb_serdes1000)) { 918 nxgep->statsp->mac_stats.link_speed = 1000; 919 } else { 920 nxgep->statsp->mac_stats.link_speed = 100; 921 } 922 nxgep->statsp->mac_stats.link_duplex = 2; 923 nxgep->statsp->mac_stats.link_up = 1; 924 } 925 nxge_global_reset(nxgep); 926 927 nxge_set_lb_exit: 928 NXGE_DEBUG_MSG((nxgep, DDI_CTL, 929 "<== nxge_set_lb status = 0x%08x", status)); 930 return (status); 931 } 932 933 /* ARGSUSED */ 934 void 935 nxge_set_lb_normal(p_nxge_t nxgep) 936 { 937 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "==> nxge_set_lb_normal")); 938 nxgep->statsp->port_stats.lb_mode = nxge_lb_normal; 939 nxgep->param_arr[param_autoneg].value = 940 nxgep->param_arr[param_autoneg].old_value; 941 nxgep->param_arr[param_anar_1000fdx].value = 942 nxgep->param_arr[param_anar_1000fdx].old_value; 943 nxgep->param_arr[param_anar_1000hdx].value = 944 nxgep->param_arr[param_anar_1000hdx].old_value; 945 nxgep->param_arr[param_anar_100fdx].value = 946 nxgep->param_arr[param_anar_100fdx].old_value; 947 nxgep->param_arr[param_anar_100hdx].value = 948 nxgep->param_arr[param_anar_100hdx].old_value; 949 nxgep->param_arr[param_anar_10fdx].value = 950 nxgep->param_arr[param_anar_10fdx].old_value; 951 nxgep->param_arr[param_master_cfg_enable].value = 952 nxgep->param_arr[param_master_cfg_enable].old_value; 953 nxgep->param_arr[param_master_cfg_value].value = 954 nxgep->param_arr[param_master_cfg_value].old_value; 955 956 nxge_global_reset(nxgep); 957 958 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 959 (void) nxge_xcvr_find(nxgep); 960 (void) nxge_link_init(nxgep); 961 (void) nxge_link_monitor(nxgep, LINK_MONITOR_START); 962 963 NXGE_DEBUG_MSG((nxgep, DDI_CTL, "<== nxge_set_lb_normal")); 964 } 965 966 /* ARGSUSED */ 967 void 968 nxge_get_mii(p_nxge_t nxgep, p_mblk_t mp) 969 { 970 uint16_t reg; 971 972 NXGE_DEBUG_MSG((nxgep, IOC_CTL, "==> nxge_get_mii")); 973 974 reg = *(uint16_t *)mp->b_rptr; 975 (void) nxge_mii_read(nxgep, nxgep->statsp->mac_stats.xcvr_portn, reg, 976 (uint16_t *)mp->b_rptr); 977 NXGE_DEBUG_MSG((nxgep, IOC_CTL, "reg = 0x%08X value = 0x%04X", 978 reg, *(uint16_t *)mp->b_rptr)); 979 NXGE_DEBUG_MSG((nxgep, IOC_CTL, "<== nxge_get_mii")); 980 } 981 982 /* ARGSUSED */ 983 void 984 nxge_put_mii(p_nxge_t nxgep, p_mblk_t mp) 985 { 986 uint16_t *buf; 987 uint8_t reg; 988 989 NXGE_DEBUG_MSG((nxgep, IOC_CTL, "==> nxge_put_mii")); 990 buf = (uint16_t *)mp->b_rptr; 991 reg = (uint8_t)buf[0]; 992 NXGE_DEBUG_MSG((nxgep, IOC_CTL, 993 "reg = 0x%08X index = 0x%08X value = 0x%08X", 994 reg, buf[0], buf[1])); 995 (void) nxge_mii_write(nxgep, nxgep->statsp->mac_stats.xcvr_portn, 996 reg, buf[1]); 997 NXGE_DEBUG_MSG((nxgep, IOC_CTL, "<== nxge_put_mii")); 998 } 999 1000 /* ARGSUSED */ 1001 void 1002 nxge_check_hw_state(p_nxge_t nxgep) 1003 { 1004 p_nxge_ldgv_t ldgvp; 1005 p_nxge_ldv_t t_ldvp; 1006 1007 NXGE_DEBUG_MSG((nxgep, SYSERR_CTL, "==> nxge_check_hw_state")); 1008 1009 MUTEX_ENTER(nxgep->genlock); 1010 nxgep->nxge_timerid = 0; 1011 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1012 goto nxge_check_hw_state_exit; 1013 } 1014 nxge_check_tx_hang(nxgep); 1015 1016 ldgvp = nxgep->ldgvp; 1017 if (ldgvp == NULL || (ldgvp->ldvp_syserr == NULL)) { 1018 NXGE_ERROR_MSG((nxgep, SYSERR_CTL, "<== nxge_check_hw_state: " 1019 "NULL ldgvp (interrupt not ready).")); 1020 goto nxge_check_hw_state_exit; 1021 } 1022 t_ldvp = ldgvp->ldvp_syserr; 1023 if (!t_ldvp->use_timer) { 1024 NXGE_DEBUG_MSG((nxgep, SYSERR_CTL, "<== nxge_check_hw_state: " 1025 "ldgvp $%p t_ldvp $%p use_timer flag %d", 1026 ldgvp, t_ldvp, t_ldvp->use_timer)); 1027 goto nxge_check_hw_state_exit; 1028 } 1029 if (fm_check_acc_handle(nxgep->dev_regs->nxge_regh) != DDI_FM_OK) { 1030 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1031 "port%d Bad register acc handle", nxgep->mac.portnum)); 1032 } 1033 (void) nxge_syserr_intr((void *) t_ldvp, (void *) nxgep); 1034 1035 nxgep->nxge_timerid = nxge_start_timer(nxgep, nxge_check_hw_state, 1036 NXGE_CHECK_TIMER); 1037 1038 nxge_check_hw_state_exit: 1039 MUTEX_EXIT(nxgep->genlock); 1040 NXGE_DEBUG_MSG((nxgep, SYSERR_CTL, "<== nxge_check_hw_state")); 1041 } 1042 1043 /*ARGSUSED*/ 1044 static void 1045 nxge_rtrace_ioctl(p_nxge_t nxgep, queue_t *wq, mblk_t *mp, 1046 struct iocblk *iocp) 1047 { 1048 ssize_t size; 1049 rtrace_t *rtp; 1050 mblk_t *nmp; 1051 uint32_t i, j; 1052 uint32_t start_blk; 1053 uint32_t base_entry; 1054 uint32_t num_entries; 1055 1056 NXGE_DEBUG_MSG((nxgep, STR_CTL, "==> nxge_rtrace_ioctl")); 1057 1058 size = 1024; 1059 if (mp->b_cont == NULL || MBLKL(mp->b_cont) < size) { 1060 NXGE_DEBUG_MSG((nxgep, STR_CTL, 1061 "malformed M_IOCTL MBLKL = %d size = %d", 1062 MBLKL(mp->b_cont), size)); 1063 miocnak(wq, mp, 0, EINVAL); 1064 return; 1065 } 1066 nmp = mp->b_cont; 1067 rtp = (rtrace_t *)nmp->b_rptr; 1068 start_blk = rtp->next_idx; 1069 num_entries = rtp->last_idx; 1070 base_entry = start_blk * MAX_RTRACE_IOC_ENTRIES; 1071 1072 NXGE_DEBUG_MSG((nxgep, STR_CTL, "start_blk = %d\n", start_blk)); 1073 NXGE_DEBUG_MSG((nxgep, STR_CTL, "num_entries = %d\n", num_entries)); 1074 NXGE_DEBUG_MSG((nxgep, STR_CTL, "base_entry = %d\n", base_entry)); 1075 1076 rtp->next_idx = npi_rtracebuf.next_idx; 1077 rtp->last_idx = npi_rtracebuf.last_idx; 1078 rtp->wrapped = npi_rtracebuf.wrapped; 1079 for (i = 0, j = base_entry; i < num_entries; i++, j++) { 1080 rtp->buf[i].ctl_addr = npi_rtracebuf.buf[j].ctl_addr; 1081 rtp->buf[i].val_l32 = npi_rtracebuf.buf[j].val_l32; 1082 rtp->buf[i].val_h32 = npi_rtracebuf.buf[j].val_h32; 1083 } 1084 1085 nmp->b_wptr = nmp->b_rptr + size; 1086 NXGE_DEBUG_MSG((nxgep, STR_CTL, "<== nxge_rtrace_ioctl")); 1087 miocack(wq, mp, (int)size, 0); 1088 } 1089