1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/nxge/nxge_impl.h> 29 #include <sys/nxge/nxge_txdma.h> 30 #include <sys/nxge/nxge_hio.h> 31 #include <npi_tx_rd64.h> 32 #include <npi_tx_wr64.h> 33 #include <sys/llc1.h> 34 35 uint32_t nxge_reclaim_pending = TXDMA_RECLAIM_PENDING_DEFAULT; 36 uint32_t nxge_tx_minfree = 32; 37 uint32_t nxge_tx_intr_thres = 0; 38 uint32_t nxge_tx_max_gathers = TX_MAX_GATHER_POINTERS; 39 uint32_t nxge_tx_tiny_pack = 1; 40 uint32_t nxge_tx_use_bcopy = 1; 41 42 extern uint32_t nxge_tx_ring_size; 43 extern uint32_t nxge_bcopy_thresh; 44 extern uint32_t nxge_dvma_thresh; 45 extern uint32_t nxge_dma_stream_thresh; 46 extern dma_method_t nxge_force_dma; 47 extern uint32_t nxge_cksum_offload; 48 49 /* Device register access attributes for PIO. */ 50 extern ddi_device_acc_attr_t nxge_dev_reg_acc_attr; 51 /* Device descriptor access attributes for DMA. */ 52 extern ddi_device_acc_attr_t nxge_dev_desc_dma_acc_attr; 53 /* Device buffer access attributes for DMA. */ 54 extern ddi_device_acc_attr_t nxge_dev_buf_dma_acc_attr; 55 extern ddi_dma_attr_t nxge_desc_dma_attr; 56 extern ddi_dma_attr_t nxge_tx_dma_attr; 57 58 extern int nxge_serial_tx(mblk_t *mp, void *arg); 59 60 static nxge_status_t nxge_map_txdma(p_nxge_t, int); 61 62 static nxge_status_t nxge_txdma_hw_start(p_nxge_t, int); 63 64 static nxge_status_t nxge_map_txdma_channel(p_nxge_t, uint16_t, 65 p_nxge_dma_common_t *, p_tx_ring_t *, 66 uint32_t, p_nxge_dma_common_t *, 67 p_tx_mbox_t *); 68 static void nxge_unmap_txdma_channel(p_nxge_t, uint16_t); 69 70 static nxge_status_t nxge_map_txdma_channel_buf_ring(p_nxge_t, uint16_t, 71 p_nxge_dma_common_t *, p_tx_ring_t *, uint32_t); 72 static void nxge_unmap_txdma_channel_buf_ring(p_nxge_t, p_tx_ring_t); 73 74 static void nxge_map_txdma_channel_cfg_ring(p_nxge_t, uint16_t, 75 p_nxge_dma_common_t *, p_tx_ring_t, 76 p_tx_mbox_t *); 77 static void nxge_unmap_txdma_channel_cfg_ring(p_nxge_t, 78 p_tx_ring_t, p_tx_mbox_t); 79 80 static nxge_status_t nxge_txdma_start_channel(p_nxge_t, uint16_t, 81 p_tx_ring_t, p_tx_mbox_t); 82 static nxge_status_t nxge_txdma_stop_channel(p_nxge_t, uint16_t); 83 84 static p_tx_ring_t nxge_txdma_get_ring(p_nxge_t, uint16_t); 85 static nxge_status_t nxge_tx_err_evnts(p_nxge_t, uint_t, 86 p_nxge_ldv_t, tx_cs_t); 87 static p_tx_mbox_t nxge_txdma_get_mbox(p_nxge_t, uint16_t); 88 static nxge_status_t nxge_txdma_fatal_err_recover(p_nxge_t, 89 uint16_t, p_tx_ring_t); 90 91 static void nxge_txdma_fixup_hung_channel(p_nxge_t nxgep, 92 p_tx_ring_t ring_p, uint16_t channel); 93 94 nxge_status_t 95 nxge_init_txdma_channels(p_nxge_t nxgep) 96 { 97 nxge_grp_set_t *set = &nxgep->tx_set; 98 int i, count; 99 100 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_init_txdma_channels")); 101 102 for (i = 0, count = 0; i < NXGE_LOGICAL_GROUP_MAX; i++) { 103 if ((1 << i) & set->lg.map) { 104 int tdc; 105 nxge_grp_t *group = set->group[i]; 106 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 107 if ((1 << tdc) & group->map) { 108 if ((nxge_grp_dc_add(nxgep, 109 (vr_handle_t)group, 110 VP_BOUND_TX, tdc))) 111 return (NXGE_ERROR); 112 } 113 } 114 } 115 if (++count == set->lg.count) 116 break; 117 } 118 119 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_init_txdma_channels")); 120 121 return (NXGE_OK); 122 } 123 124 nxge_status_t 125 nxge_init_txdma_channel( 126 p_nxge_t nxge, 127 int channel) 128 { 129 nxge_status_t status; 130 131 NXGE_DEBUG_MSG((nxge, MEM2_CTL, "==> nxge_init_txdma_channel")); 132 133 status = nxge_map_txdma(nxge, channel); 134 if (status != NXGE_OK) { 135 NXGE_ERROR_MSG((nxge, NXGE_ERR_CTL, 136 "<== nxge_init_txdma_channel: status 0x%x", status)); 137 (void) npi_txdma_dump_tdc_regs(nxge->npi_handle, channel); 138 return (status); 139 } 140 141 status = nxge_txdma_hw_start(nxge, channel); 142 if (status != NXGE_OK) { 143 (void) nxge_unmap_txdma_channel(nxge, channel); 144 (void) npi_txdma_dump_tdc_regs(nxge->npi_handle, channel); 145 return (status); 146 } 147 148 if (!nxge->statsp->tdc_ksp[channel]) 149 nxge_setup_tdc_kstats(nxge, channel); 150 151 NXGE_DEBUG_MSG((nxge, MEM2_CTL, "<== nxge_init_txdma_channel")); 152 153 return (status); 154 } 155 156 void 157 nxge_uninit_txdma_channels(p_nxge_t nxgep) 158 { 159 nxge_grp_set_t *set = &nxgep->tx_set; 160 int tdc; 161 162 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "==> nxge_uninit_txdma_channels")); 163 164 if (set->owned.map == 0) { 165 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, 166 "nxge_uninit_txdma_channels: no channels")); 167 return; 168 } 169 170 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 171 if ((1 << tdc) & set->owned.map) { 172 nxge_grp_dc_remove(nxgep, VP_BOUND_TX, tdc); 173 } 174 } 175 176 NXGE_DEBUG_MSG((nxgep, MEM2_CTL, "<== nxge_uninit_txdma_channels")); 177 } 178 179 void 180 nxge_uninit_txdma_channel(p_nxge_t nxgep, int channel) 181 { 182 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_uninit_txdma_channel")); 183 184 if (nxgep->statsp->tdc_ksp[channel]) { 185 kstat_delete(nxgep->statsp->tdc_ksp[channel]); 186 nxgep->statsp->tdc_ksp[channel] = 0; 187 } 188 189 (void) nxge_txdma_stop_channel(nxgep, channel); 190 nxge_unmap_txdma_channel(nxgep, channel); 191 192 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 193 "<== nxge_uninit_txdma_channel")); 194 } 195 196 void 197 nxge_setup_dma_common(p_nxge_dma_common_t dest_p, p_nxge_dma_common_t src_p, 198 uint32_t entries, uint32_t size) 199 { 200 size_t tsize; 201 *dest_p = *src_p; 202 tsize = size * entries; 203 dest_p->alength = tsize; 204 dest_p->nblocks = entries; 205 dest_p->block_size = size; 206 dest_p->offset += tsize; 207 208 src_p->kaddrp = (caddr_t)dest_p->kaddrp + tsize; 209 src_p->alength -= tsize; 210 src_p->dma_cookie.dmac_laddress += tsize; 211 src_p->dma_cookie.dmac_size -= tsize; 212 } 213 214 /* 215 * nxge_reset_txdma_channel 216 * 217 * Reset a TDC. 218 * 219 * Arguments: 220 * nxgep 221 * channel The channel to reset. 222 * reg_data The current TX_CS. 223 * 224 * Notes: 225 * 226 * NPI/NXGE function calls: 227 * npi_txdma_channel_reset() 228 * npi_txdma_channel_control() 229 * 230 * Registers accessed: 231 * TX_CS DMC+0x40028 Transmit Control And Status 232 * TX_RING_KICK DMC+0x40018 Transmit Ring Kick 233 * 234 * Context: 235 * Any domain 236 */ 237 nxge_status_t 238 nxge_reset_txdma_channel(p_nxge_t nxgep, uint16_t channel, uint64_t reg_data) 239 { 240 npi_status_t rs = NPI_SUCCESS; 241 nxge_status_t status = NXGE_OK; 242 npi_handle_t handle; 243 244 NXGE_DEBUG_MSG((nxgep, TX_CTL, " ==> nxge_reset_txdma_channel")); 245 246 handle = NXGE_DEV_NPI_HANDLE(nxgep); 247 if ((reg_data & TX_CS_RST_MASK) == TX_CS_RST_MASK) { 248 rs = npi_txdma_channel_reset(handle, channel); 249 } else { 250 rs = npi_txdma_channel_control(handle, TXDMA_RESET, 251 channel); 252 } 253 254 if (rs != NPI_SUCCESS) { 255 status = NXGE_ERROR | rs; 256 } 257 258 /* 259 * Reset the tail (kick) register to 0. 260 * (Hardware will not reset it. Tx overflow fatal 261 * error if tail is not set to 0 after reset! 262 */ 263 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0); 264 265 NXGE_DEBUG_MSG((nxgep, TX_CTL, " <== nxge_reset_txdma_channel")); 266 return (status); 267 } 268 269 /* 270 * nxge_init_txdma_channel_event_mask 271 * 272 * Enable interrupts for a set of events. 273 * 274 * Arguments: 275 * nxgep 276 * channel The channel to map. 277 * mask_p The events to enable. 278 * 279 * Notes: 280 * 281 * NPI/NXGE function calls: 282 * npi_txdma_event_mask() 283 * 284 * Registers accessed: 285 * TX_ENT_MSK DMC+0x40020 Transmit Event Mask 286 * 287 * Context: 288 * Any domain 289 */ 290 nxge_status_t 291 nxge_init_txdma_channel_event_mask(p_nxge_t nxgep, uint16_t channel, 292 p_tx_dma_ent_msk_t mask_p) 293 { 294 npi_handle_t handle; 295 npi_status_t rs = NPI_SUCCESS; 296 nxge_status_t status = NXGE_OK; 297 298 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 299 "<== nxge_init_txdma_channel_event_mask")); 300 301 handle = NXGE_DEV_NPI_HANDLE(nxgep); 302 rs = npi_txdma_event_mask(handle, OP_SET, channel, mask_p); 303 if (rs != NPI_SUCCESS) { 304 status = NXGE_ERROR | rs; 305 } 306 307 return (status); 308 } 309 310 /* 311 * nxge_init_txdma_channel_cntl_stat 312 * 313 * Stop a TDC. If at first we don't succeed, inject an error. 314 * 315 * Arguments: 316 * nxgep 317 * channel The channel to stop. 318 * 319 * Notes: 320 * 321 * NPI/NXGE function calls: 322 * npi_txdma_control_status() 323 * 324 * Registers accessed: 325 * TX_CS DMC+0x40028 Transmit Control And Status 326 * 327 * Context: 328 * Any domain 329 */ 330 nxge_status_t 331 nxge_init_txdma_channel_cntl_stat(p_nxge_t nxgep, uint16_t channel, 332 uint64_t reg_data) 333 { 334 npi_handle_t handle; 335 npi_status_t rs = NPI_SUCCESS; 336 nxge_status_t status = NXGE_OK; 337 338 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 339 "<== nxge_init_txdma_channel_cntl_stat")); 340 341 handle = NXGE_DEV_NPI_HANDLE(nxgep); 342 rs = npi_txdma_control_status(handle, OP_SET, channel, 343 (p_tx_cs_t)®_data); 344 345 if (rs != NPI_SUCCESS) { 346 status = NXGE_ERROR | rs; 347 } 348 349 return (status); 350 } 351 352 /* 353 * nxge_enable_txdma_channel 354 * 355 * Enable a TDC. 356 * 357 * Arguments: 358 * nxgep 359 * channel The channel to enable. 360 * tx_desc_p channel's transmit descriptor ring. 361 * mbox_p channel's mailbox, 362 * 363 * Notes: 364 * 365 * NPI/NXGE function calls: 366 * npi_txdma_ring_config() 367 * npi_txdma_mbox_config() 368 * npi_txdma_channel_init_enable() 369 * 370 * Registers accessed: 371 * TX_RNG_CFIG DMC+0x40000 Transmit Ring Configuration 372 * TXDMA_MBH DMC+0x40030 TXDMA Mailbox High 373 * TXDMA_MBL DMC+0x40038 TXDMA Mailbox Low 374 * TX_CS DMC+0x40028 Transmit Control And Status 375 * 376 * Context: 377 * Any domain 378 */ 379 nxge_status_t 380 nxge_enable_txdma_channel(p_nxge_t nxgep, 381 uint16_t channel, p_tx_ring_t tx_desc_p, p_tx_mbox_t mbox_p) 382 { 383 npi_handle_t handle; 384 npi_status_t rs = NPI_SUCCESS; 385 nxge_status_t status = NXGE_OK; 386 387 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_enable_txdma_channel")); 388 389 handle = NXGE_DEV_NPI_HANDLE(nxgep); 390 /* 391 * Use configuration data composed at init time. 392 * Write to hardware the transmit ring configurations. 393 */ 394 rs = npi_txdma_ring_config(handle, OP_SET, channel, 395 (uint64_t *)&(tx_desc_p->tx_ring_cfig.value)); 396 397 if (rs != NPI_SUCCESS) { 398 return (NXGE_ERROR | rs); 399 } 400 401 if (isLDOMguest(nxgep)) { 402 /* Add interrupt handler for this channel. */ 403 if (nxge_hio_intr_add(nxgep, VP_BOUND_TX, channel) != NXGE_OK) 404 return (NXGE_ERROR); 405 } 406 407 /* Write to hardware the mailbox */ 408 rs = npi_txdma_mbox_config(handle, OP_SET, channel, 409 (uint64_t *)&mbox_p->tx_mbox.dma_cookie.dmac_laddress); 410 411 if (rs != NPI_SUCCESS) { 412 return (NXGE_ERROR | rs); 413 } 414 415 /* Start the DMA engine. */ 416 rs = npi_txdma_channel_init_enable(handle, channel); 417 418 if (rs != NPI_SUCCESS) { 419 return (NXGE_ERROR | rs); 420 } 421 422 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_enable_txdma_channel")); 423 424 return (status); 425 } 426 427 void 428 nxge_fill_tx_hdr(p_mblk_t mp, boolean_t fill_len, 429 boolean_t l4_cksum, int pkt_len, uint8_t npads, 430 p_tx_pkt_hdr_all_t pkthdrp, 431 t_uscalar_t start_offset, 432 t_uscalar_t stuff_offset) 433 { 434 p_tx_pkt_header_t hdrp; 435 p_mblk_t nmp; 436 uint64_t tmp; 437 size_t mblk_len; 438 size_t iph_len; 439 size_t hdrs_size; 440 uint8_t hdrs_buf[sizeof (struct ether_header) + 441 64 + sizeof (uint32_t)]; 442 uint8_t *cursor; 443 uint8_t *ip_buf; 444 uint16_t eth_type; 445 uint8_t ipproto; 446 boolean_t is_vlan = B_FALSE; 447 size_t eth_hdr_size; 448 449 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: mp $%p", mp)); 450 451 /* 452 * Caller should zero out the headers first. 453 */ 454 hdrp = (p_tx_pkt_header_t)&pkthdrp->pkthdr; 455 456 if (fill_len) { 457 NXGE_DEBUG_MSG((NULL, TX_CTL, 458 "==> nxge_fill_tx_hdr: pkt_len %d " 459 "npads %d", pkt_len, npads)); 460 tmp = (uint64_t)pkt_len; 461 hdrp->value |= (tmp << TX_PKT_HEADER_TOT_XFER_LEN_SHIFT); 462 goto fill_tx_header_done; 463 } 464 465 hdrp->value |= (((uint64_t)npads) << TX_PKT_HEADER_PAD_SHIFT); 466 467 /* 468 * mp is the original data packet (does not include the 469 * Neptune transmit header). 470 */ 471 nmp = mp; 472 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: " 473 "mp $%p b_rptr $%p len %d", 474 mp, nmp->b_rptr, MBLKL(nmp))); 475 /* copy ether_header from mblk to hdrs_buf */ 476 cursor = &hdrs_buf[0]; 477 tmp = sizeof (struct ether_vlan_header); 478 while ((nmp != NULL) && (tmp > 0)) { 479 size_t buflen; 480 mblk_len = MBLKL(nmp); 481 buflen = min((size_t)tmp, mblk_len); 482 bcopy(nmp->b_rptr, cursor, buflen); 483 cursor += buflen; 484 tmp -= buflen; 485 nmp = nmp->b_cont; 486 } 487 488 nmp = mp; 489 mblk_len = MBLKL(nmp); 490 ip_buf = NULL; 491 eth_type = ntohs(((p_ether_header_t)hdrs_buf)->ether_type); 492 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> : nxge_fill_tx_hdr: (value 0x%llx) " 493 "ether type 0x%x", eth_type, hdrp->value)); 494 495 if (eth_type < ETHERMTU) { 496 tmp = 1ull; 497 hdrp->value |= (tmp << TX_PKT_HEADER_LLC_SHIFT); 498 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: LLC " 499 "value 0x%llx", hdrp->value)); 500 if (*(hdrs_buf + sizeof (struct ether_header)) 501 == LLC_SNAP_SAP) { 502 eth_type = ntohs(*((uint16_t *)(hdrs_buf + 503 sizeof (struct ether_header) + 6))); 504 NXGE_DEBUG_MSG((NULL, TX_CTL, 505 "==> nxge_tx_pkt_hdr_init: LLC ether type 0x%x", 506 eth_type)); 507 } else { 508 goto fill_tx_header_done; 509 } 510 } else if (eth_type == VLAN_ETHERTYPE) { 511 tmp = 1ull; 512 hdrp->value |= (tmp << TX_PKT_HEADER_VLAN__SHIFT); 513 514 eth_type = ntohs(((struct ether_vlan_header *) 515 hdrs_buf)->ether_type); 516 is_vlan = B_TRUE; 517 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: VLAN " 518 "value 0x%llx", hdrp->value)); 519 } 520 521 if (!is_vlan) { 522 eth_hdr_size = sizeof (struct ether_header); 523 } else { 524 eth_hdr_size = sizeof (struct ether_vlan_header); 525 } 526 527 switch (eth_type) { 528 case ETHERTYPE_IP: 529 if (mblk_len > eth_hdr_size + sizeof (uint8_t)) { 530 ip_buf = nmp->b_rptr + eth_hdr_size; 531 mblk_len -= eth_hdr_size; 532 iph_len = ((*ip_buf) & 0x0f); 533 if (mblk_len > (iph_len + sizeof (uint32_t))) { 534 ip_buf = nmp->b_rptr; 535 ip_buf += eth_hdr_size; 536 } else { 537 ip_buf = NULL; 538 } 539 540 } 541 if (ip_buf == NULL) { 542 hdrs_size = 0; 543 ((p_ether_header_t)hdrs_buf)->ether_type = 0; 544 while ((nmp) && (hdrs_size < 545 sizeof (hdrs_buf))) { 546 mblk_len = (size_t)nmp->b_wptr - 547 (size_t)nmp->b_rptr; 548 if (mblk_len >= 549 (sizeof (hdrs_buf) - hdrs_size)) 550 mblk_len = sizeof (hdrs_buf) - 551 hdrs_size; 552 bcopy(nmp->b_rptr, 553 &hdrs_buf[hdrs_size], mblk_len); 554 hdrs_size += mblk_len; 555 nmp = nmp->b_cont; 556 } 557 ip_buf = hdrs_buf; 558 ip_buf += eth_hdr_size; 559 iph_len = ((*ip_buf) & 0x0f); 560 } 561 562 ipproto = ip_buf[9]; 563 564 tmp = (uint64_t)iph_len; 565 hdrp->value |= (tmp << TX_PKT_HEADER_IHL_SHIFT); 566 tmp = (uint64_t)(eth_hdr_size >> 1); 567 hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT); 568 569 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv4 " 570 " iph_len %d l3start %d eth_hdr_size %d proto 0x%x" 571 "tmp 0x%x", 572 iph_len, hdrp->bits.hdw.l3start, eth_hdr_size, 573 ipproto, tmp)); 574 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IP " 575 "value 0x%llx", hdrp->value)); 576 577 break; 578 579 case ETHERTYPE_IPV6: 580 hdrs_size = 0; 581 ((p_ether_header_t)hdrs_buf)->ether_type = 0; 582 while ((nmp) && (hdrs_size < 583 sizeof (hdrs_buf))) { 584 mblk_len = (size_t)nmp->b_wptr - (size_t)nmp->b_rptr; 585 if (mblk_len >= 586 (sizeof (hdrs_buf) - hdrs_size)) 587 mblk_len = sizeof (hdrs_buf) - 588 hdrs_size; 589 bcopy(nmp->b_rptr, 590 &hdrs_buf[hdrs_size], mblk_len); 591 hdrs_size += mblk_len; 592 nmp = nmp->b_cont; 593 } 594 ip_buf = hdrs_buf; 595 ip_buf += eth_hdr_size; 596 597 tmp = 1ull; 598 hdrp->value |= (tmp << TX_PKT_HEADER_IP_VER_SHIFT); 599 600 tmp = (eth_hdr_size >> 1); 601 hdrp->value |= (tmp << TX_PKT_HEADER_L3START_SHIFT); 602 603 /* byte 6 is the next header protocol */ 604 ipproto = ip_buf[6]; 605 606 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: IPv6 " 607 " iph_len %d l3start %d eth_hdr_size %d proto 0x%x", 608 iph_len, hdrp->bits.hdw.l3start, eth_hdr_size, 609 ipproto)); 610 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: IPv6 " 611 "value 0x%llx", hdrp->value)); 612 613 break; 614 615 default: 616 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: non-IP")); 617 goto fill_tx_header_done; 618 } 619 620 switch (ipproto) { 621 case IPPROTO_TCP: 622 NXGE_DEBUG_MSG((NULL, TX_CTL, 623 "==> nxge_fill_tx_hdr: TCP (cksum flag %d)", l4_cksum)); 624 if (l4_cksum) { 625 hdrp->value |= TX_CKSUM_EN_PKT_TYPE_TCP; 626 hdrp->value |= 627 (((uint64_t)(start_offset >> 1)) << 628 TX_PKT_HEADER_L4START_SHIFT); 629 hdrp->value |= 630 (((uint64_t)(stuff_offset >> 1)) << 631 TX_PKT_HEADER_L4STUFF_SHIFT); 632 633 NXGE_DEBUG_MSG((NULL, TX_CTL, 634 "==> nxge_tx_pkt_hdr_init: TCP CKSUM " 635 "value 0x%llx", hdrp->value)); 636 } 637 638 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_hdr_init: TCP " 639 "value 0x%llx", hdrp->value)); 640 break; 641 642 case IPPROTO_UDP: 643 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_fill_tx_hdr: UDP")); 644 if (l4_cksum) { 645 if (!nxge_cksum_offload) { 646 uint16_t *up; 647 uint16_t cksum; 648 t_uscalar_t stuff_len; 649 650 /* 651 * The checksum field has the 652 * partial checksum. 653 * IP_CSUM() macro calls ip_cksum() which 654 * can add in the partial checksum. 655 */ 656 cksum = IP_CSUM(mp, start_offset, 0); 657 stuff_len = stuff_offset; 658 nmp = mp; 659 mblk_len = MBLKL(nmp); 660 while ((nmp != NULL) && 661 (mblk_len < stuff_len)) { 662 stuff_len -= mblk_len; 663 nmp = nmp->b_cont; 664 } 665 ASSERT(nmp); 666 up = (uint16_t *)(nmp->b_rptr + stuff_len); 667 668 *up = cksum; 669 hdrp->value &= ~TX_CKSUM_EN_PKT_TYPE_UDP; 670 NXGE_DEBUG_MSG((NULL, TX_CTL, 671 "==> nxge_tx_pkt_hdr_init: UDP offset %d " 672 "use sw cksum " 673 "write to $%p cksum 0x%x content up 0x%x", 674 stuff_len, 675 up, 676 cksum, 677 *up)); 678 } else { 679 /* Hardware will compute the full checksum */ 680 hdrp->value |= TX_CKSUM_EN_PKT_TYPE_UDP; 681 hdrp->value |= 682 (((uint64_t)(start_offset >> 1)) << 683 TX_PKT_HEADER_L4START_SHIFT); 684 hdrp->value |= 685 (((uint64_t)(stuff_offset >> 1)) << 686 TX_PKT_HEADER_L4STUFF_SHIFT); 687 688 NXGE_DEBUG_MSG((NULL, TX_CTL, 689 "==> nxge_tx_pkt_hdr_init: UDP offset %d " 690 " use partial checksum " 691 "cksum 0x%x ", 692 "value 0x%llx", 693 stuff_offset, 694 IP_CSUM(mp, start_offset, 0), 695 hdrp->value)); 696 } 697 } 698 699 NXGE_DEBUG_MSG((NULL, TX_CTL, 700 "==> nxge_tx_pkt_hdr_init: UDP" 701 "value 0x%llx", hdrp->value)); 702 break; 703 704 default: 705 goto fill_tx_header_done; 706 } 707 708 fill_tx_header_done: 709 NXGE_DEBUG_MSG((NULL, TX_CTL, 710 "==> nxge_fill_tx_hdr: pkt_len %d " 711 "npads %d value 0x%llx", pkt_len, npads, hdrp->value)); 712 713 NXGE_DEBUG_MSG((NULL, TX_CTL, "<== nxge_fill_tx_hdr")); 714 } 715 716 /*ARGSUSED*/ 717 p_mblk_t 718 nxge_tx_pkt_header_reserve(p_mblk_t mp, uint8_t *npads) 719 { 720 p_mblk_t newmp = NULL; 721 722 if ((newmp = allocb(TX_PKT_HEADER_SIZE, BPRI_MED)) == NULL) { 723 NXGE_DEBUG_MSG((NULL, TX_CTL, 724 "<== nxge_tx_pkt_header_reserve: allocb failed")); 725 return (NULL); 726 } 727 728 NXGE_DEBUG_MSG((NULL, TX_CTL, 729 "==> nxge_tx_pkt_header_reserve: get new mp")); 730 DB_TYPE(newmp) = M_DATA; 731 newmp->b_rptr = newmp->b_wptr = DB_LIM(newmp); 732 linkb(newmp, mp); 733 newmp->b_rptr -= TX_PKT_HEADER_SIZE; 734 735 NXGE_DEBUG_MSG((NULL, TX_CTL, "==>nxge_tx_pkt_header_reserve: " 736 "b_rptr $%p b_wptr $%p", 737 newmp->b_rptr, newmp->b_wptr)); 738 739 NXGE_DEBUG_MSG((NULL, TX_CTL, 740 "<== nxge_tx_pkt_header_reserve: use new mp")); 741 742 return (newmp); 743 } 744 745 int 746 nxge_tx_pkt_nmblocks(p_mblk_t mp, int *tot_xfer_len_p) 747 { 748 uint_t nmblks; 749 ssize_t len; 750 uint_t pkt_len; 751 p_mblk_t nmp, bmp, tmp; 752 uint8_t *b_wptr; 753 754 NXGE_DEBUG_MSG((NULL, TX_CTL, 755 "==> nxge_tx_pkt_nmblocks: mp $%p rptr $%p wptr $%p " 756 "len %d", mp, mp->b_rptr, mp->b_wptr, MBLKL(mp))); 757 758 nmp = mp; 759 bmp = mp; 760 nmblks = 0; 761 pkt_len = 0; 762 *tot_xfer_len_p = 0; 763 764 while (nmp) { 765 len = MBLKL(nmp); 766 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: " 767 "len %d pkt_len %d nmblks %d tot_xfer_len %d", 768 len, pkt_len, nmblks, 769 *tot_xfer_len_p)); 770 771 if (len <= 0) { 772 bmp = nmp; 773 nmp = nmp->b_cont; 774 NXGE_DEBUG_MSG((NULL, TX_CTL, 775 "==> nxge_tx_pkt_nmblocks: " 776 "len (0) pkt_len %d nmblks %d", 777 pkt_len, nmblks)); 778 continue; 779 } 780 781 *tot_xfer_len_p += len; 782 NXGE_DEBUG_MSG((NULL, TX_CTL, "==> nxge_tx_pkt_nmblocks: " 783 "len %d pkt_len %d nmblks %d tot_xfer_len %d", 784 len, pkt_len, nmblks, 785 *tot_xfer_len_p)); 786 787 if (len < nxge_bcopy_thresh) { 788 NXGE_DEBUG_MSG((NULL, TX_CTL, 789 "==> nxge_tx_pkt_nmblocks: " 790 "len %d (< thresh) pkt_len %d nmblks %d", 791 len, pkt_len, nmblks)); 792 if (pkt_len == 0) 793 nmblks++; 794 pkt_len += len; 795 if (pkt_len >= nxge_bcopy_thresh) { 796 pkt_len = 0; 797 len = 0; 798 nmp = bmp; 799 } 800 } else { 801 NXGE_DEBUG_MSG((NULL, TX_CTL, 802 "==> nxge_tx_pkt_nmblocks: " 803 "len %d (> thresh) pkt_len %d nmblks %d", 804 len, pkt_len, nmblks)); 805 pkt_len = 0; 806 nmblks++; 807 /* 808 * Hardware limits the transfer length to 4K. 809 * If len is more than 4K, we need to break 810 * it up to at most 2 more blocks. 811 */ 812 if (len > TX_MAX_TRANSFER_LENGTH) { 813 uint32_t nsegs; 814 815 nsegs = 1; 816 NXGE_DEBUG_MSG((NULL, TX_CTL, 817 "==> nxge_tx_pkt_nmblocks: " 818 "len %d pkt_len %d nmblks %d nsegs %d", 819 len, pkt_len, nmblks, nsegs)); 820 if (len % (TX_MAX_TRANSFER_LENGTH * 2)) { 821 ++nsegs; 822 } 823 do { 824 b_wptr = nmp->b_rptr + 825 TX_MAX_TRANSFER_LENGTH; 826 nmp->b_wptr = b_wptr; 827 if ((tmp = dupb(nmp)) == NULL) { 828 return (0); 829 } 830 tmp->b_rptr = b_wptr; 831 tmp->b_wptr = nmp->b_wptr; 832 tmp->b_cont = nmp->b_cont; 833 nmp->b_cont = tmp; 834 nmblks++; 835 if (--nsegs) { 836 nmp = tmp; 837 } 838 } while (nsegs); 839 nmp = tmp; 840 } 841 } 842 843 /* 844 * Hardware limits the transmit gather pointers to 15. 845 */ 846 if (nmp->b_cont && (nmblks + TX_GATHER_POINTERS_THRESHOLD) > 847 TX_MAX_GATHER_POINTERS) { 848 NXGE_DEBUG_MSG((NULL, TX_CTL, 849 "==> nxge_tx_pkt_nmblocks: pull msg - " 850 "len %d pkt_len %d nmblks %d", 851 len, pkt_len, nmblks)); 852 /* Pull all message blocks from b_cont */ 853 if ((tmp = msgpullup(nmp->b_cont, -1)) == NULL) { 854 return (0); 855 } 856 freemsg(nmp->b_cont); 857 nmp->b_cont = tmp; 858 pkt_len = 0; 859 } 860 bmp = nmp; 861 nmp = nmp->b_cont; 862 } 863 864 NXGE_DEBUG_MSG((NULL, TX_CTL, 865 "<== nxge_tx_pkt_nmblocks: rptr $%p wptr $%p " 866 "nmblks %d len %d tot_xfer_len %d", 867 mp->b_rptr, mp->b_wptr, nmblks, 868 MBLKL(mp), *tot_xfer_len_p)); 869 870 return (nmblks); 871 } 872 873 boolean_t 874 nxge_txdma_reclaim(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, int nmblks) 875 { 876 boolean_t status = B_TRUE; 877 p_nxge_dma_common_t tx_desc_dma_p; 878 nxge_dma_common_t desc_area; 879 p_tx_desc_t tx_desc_ring_vp; 880 p_tx_desc_t tx_desc_p; 881 p_tx_desc_t tx_desc_pp; 882 tx_desc_t r_tx_desc; 883 p_tx_msg_t tx_msg_ring; 884 p_tx_msg_t tx_msg_p; 885 npi_handle_t handle; 886 tx_ring_hdl_t tx_head; 887 uint32_t pkt_len; 888 uint_t tx_rd_index; 889 uint16_t head_index, tail_index; 890 uint8_t tdc; 891 boolean_t head_wrap, tail_wrap; 892 p_nxge_tx_ring_stats_t tdc_stats; 893 int rc; 894 895 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_reclaim")); 896 897 status = ((tx_ring_p->descs_pending < nxge_reclaim_pending) && 898 (nmblks != 0)); 899 NXGE_DEBUG_MSG((nxgep, TX_CTL, 900 "==> nxge_txdma_reclaim: pending %d reclaim %d nmblks %d", 901 tx_ring_p->descs_pending, nxge_reclaim_pending, 902 nmblks)); 903 if (!status) { 904 tx_desc_dma_p = &tx_ring_p->tdc_desc; 905 desc_area = tx_ring_p->tdc_desc; 906 handle = NXGE_DEV_NPI_HANDLE(nxgep); 907 tx_desc_ring_vp = tx_desc_dma_p->kaddrp; 908 tx_desc_ring_vp = 909 (p_tx_desc_t)DMA_COMMON_VPTR(desc_area); 910 tx_rd_index = tx_ring_p->rd_index; 911 tx_desc_p = &tx_desc_ring_vp[tx_rd_index]; 912 tx_msg_ring = tx_ring_p->tx_msg_ring; 913 tx_msg_p = &tx_msg_ring[tx_rd_index]; 914 tdc = tx_ring_p->tdc; 915 tdc_stats = tx_ring_p->tdc_stats; 916 if (tx_ring_p->descs_pending > tdc_stats->tx_max_pend) { 917 tdc_stats->tx_max_pend = tx_ring_p->descs_pending; 918 } 919 920 tail_index = tx_ring_p->wr_index; 921 tail_wrap = tx_ring_p->wr_index_wrap; 922 923 NXGE_DEBUG_MSG((nxgep, TX_CTL, 924 "==> nxge_txdma_reclaim: tdc %d tx_rd_index %d " 925 "tail_index %d tail_wrap %d " 926 "tx_desc_p $%p ($%p) ", 927 tdc, tx_rd_index, tail_index, tail_wrap, 928 tx_desc_p, (*(uint64_t *)tx_desc_p))); 929 /* 930 * Read the hardware maintained transmit head 931 * and wrap around bit. 932 */ 933 TXDMA_REG_READ64(handle, TX_RING_HDL_REG, tdc, &tx_head.value); 934 head_index = tx_head.bits.ldw.head; 935 head_wrap = tx_head.bits.ldw.wrap; 936 NXGE_DEBUG_MSG((nxgep, TX_CTL, 937 "==> nxge_txdma_reclaim: " 938 "tx_rd_index %d tail %d tail_wrap %d " 939 "head %d wrap %d", 940 tx_rd_index, tail_index, tail_wrap, 941 head_index, head_wrap)); 942 943 if (head_index == tail_index) { 944 if (TXDMA_RING_EMPTY(head_index, head_wrap, 945 tail_index, tail_wrap) && 946 (head_index == tx_rd_index)) { 947 NXGE_DEBUG_MSG((nxgep, TX_CTL, 948 "==> nxge_txdma_reclaim: EMPTY")); 949 return (B_TRUE); 950 } 951 952 NXGE_DEBUG_MSG((nxgep, TX_CTL, 953 "==> nxge_txdma_reclaim: Checking " 954 "if ring full")); 955 if (TXDMA_RING_FULL(head_index, head_wrap, tail_index, 956 tail_wrap)) { 957 NXGE_DEBUG_MSG((nxgep, TX_CTL, 958 "==> nxge_txdma_reclaim: full")); 959 return (B_FALSE); 960 } 961 } 962 963 NXGE_DEBUG_MSG((nxgep, TX_CTL, 964 "==> nxge_txdma_reclaim: tx_rd_index and head_index")); 965 966 tx_desc_pp = &r_tx_desc; 967 while ((tx_rd_index != head_index) && 968 (tx_ring_p->descs_pending != 0)) { 969 970 NXGE_DEBUG_MSG((nxgep, TX_CTL, 971 "==> nxge_txdma_reclaim: Checking if pending")); 972 973 NXGE_DEBUG_MSG((nxgep, TX_CTL, 974 "==> nxge_txdma_reclaim: " 975 "descs_pending %d ", 976 tx_ring_p->descs_pending)); 977 978 NXGE_DEBUG_MSG((nxgep, TX_CTL, 979 "==> nxge_txdma_reclaim: " 980 "(tx_rd_index %d head_index %d " 981 "(tx_desc_p $%p)", 982 tx_rd_index, head_index, 983 tx_desc_p)); 984 985 tx_desc_pp->value = tx_desc_p->value; 986 NXGE_DEBUG_MSG((nxgep, TX_CTL, 987 "==> nxge_txdma_reclaim: " 988 "(tx_rd_index %d head_index %d " 989 "tx_desc_p $%p (desc value 0x%llx) ", 990 tx_rd_index, head_index, 991 tx_desc_pp, (*(uint64_t *)tx_desc_pp))); 992 993 NXGE_DEBUG_MSG((nxgep, TX_CTL, 994 "==> nxge_txdma_reclaim: dump desc:")); 995 996 pkt_len = tx_desc_pp->bits.hdw.tr_len; 997 tdc_stats->obytes += pkt_len; 998 tdc_stats->opackets += tx_desc_pp->bits.hdw.sop; 999 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1000 "==> nxge_txdma_reclaim: pkt_len %d " 1001 "tdc channel %d opackets %d", 1002 pkt_len, 1003 tdc, 1004 tdc_stats->opackets)); 1005 1006 if (tx_msg_p->flags.dma_type == USE_DVMA) { 1007 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1008 "tx_desc_p = $%p " 1009 "tx_desc_pp = $%p " 1010 "index = %d", 1011 tx_desc_p, 1012 tx_desc_pp, 1013 tx_ring_p->rd_index)); 1014 (void) dvma_unload(tx_msg_p->dvma_handle, 1015 0, -1); 1016 tx_msg_p->dvma_handle = NULL; 1017 if (tx_ring_p->dvma_wr_index == 1018 tx_ring_p->dvma_wrap_mask) { 1019 tx_ring_p->dvma_wr_index = 0; 1020 } else { 1021 tx_ring_p->dvma_wr_index++; 1022 } 1023 tx_ring_p->dvma_pending--; 1024 } else if (tx_msg_p->flags.dma_type == 1025 USE_DMA) { 1026 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1027 "==> nxge_txdma_reclaim: " 1028 "USE DMA")); 1029 if (rc = ddi_dma_unbind_handle 1030 (tx_msg_p->dma_handle)) { 1031 cmn_err(CE_WARN, "!nxge_reclaim: " 1032 "ddi_dma_unbind_handle " 1033 "failed. status %d", rc); 1034 } 1035 } 1036 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1037 "==> nxge_txdma_reclaim: count packets")); 1038 /* 1039 * count a chained packet only once. 1040 */ 1041 if (tx_msg_p->tx_message != NULL) { 1042 freemsg(tx_msg_p->tx_message); 1043 tx_msg_p->tx_message = NULL; 1044 } 1045 1046 tx_msg_p->flags.dma_type = USE_NONE; 1047 tx_rd_index = tx_ring_p->rd_index; 1048 tx_rd_index = (tx_rd_index + 1) & 1049 tx_ring_p->tx_wrap_mask; 1050 tx_ring_p->rd_index = tx_rd_index; 1051 tx_ring_p->descs_pending--; 1052 tx_desc_p = &tx_desc_ring_vp[tx_rd_index]; 1053 tx_msg_p = &tx_msg_ring[tx_rd_index]; 1054 } 1055 1056 status = (nmblks <= (tx_ring_p->tx_ring_size - 1057 tx_ring_p->descs_pending - 1058 TX_FULL_MARK)); 1059 if (status) { 1060 cas32((uint32_t *)&tx_ring_p->queueing, 1, 0); 1061 } 1062 } else { 1063 status = (nmblks <= 1064 (tx_ring_p->tx_ring_size - 1065 tx_ring_p->descs_pending - 1066 TX_FULL_MARK)); 1067 } 1068 1069 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1070 "<== nxge_txdma_reclaim status = 0x%08x", status)); 1071 1072 return (status); 1073 } 1074 1075 /* 1076 * nxge_tx_intr 1077 * 1078 * Process a TDC interrupt 1079 * 1080 * Arguments: 1081 * arg1 A Logical Device state Vector (LSV) data structure. 1082 * arg2 nxge_t * 1083 * 1084 * Notes: 1085 * 1086 * NPI/NXGE function calls: 1087 * npi_txdma_control_status() 1088 * npi_intr_ldg_mgmt_set() 1089 * 1090 * nxge_tx_err_evnts() 1091 * nxge_txdma_reclaim() 1092 * 1093 * Registers accessed: 1094 * TX_CS DMC+0x40028 Transmit Control And Status 1095 * PIO_LDSV 1096 * 1097 * Context: 1098 * Any domain 1099 */ 1100 uint_t 1101 nxge_tx_intr(void *arg1, void *arg2) 1102 { 1103 p_nxge_ldv_t ldvp = (p_nxge_ldv_t)arg1; 1104 p_nxge_t nxgep = (p_nxge_t)arg2; 1105 p_nxge_ldg_t ldgp; 1106 uint8_t channel; 1107 uint32_t vindex; 1108 npi_handle_t handle; 1109 tx_cs_t cs; 1110 p_tx_ring_t *tx_rings; 1111 p_tx_ring_t tx_ring_p; 1112 npi_status_t rs = NPI_SUCCESS; 1113 uint_t serviced = DDI_INTR_UNCLAIMED; 1114 nxge_status_t status = NXGE_OK; 1115 1116 if (ldvp == NULL) { 1117 NXGE_DEBUG_MSG((NULL, INT_CTL, 1118 "<== nxge_tx_intr: nxgep $%p ldvp $%p", 1119 nxgep, ldvp)); 1120 return (DDI_INTR_UNCLAIMED); 1121 } 1122 1123 if (arg2 == NULL || (void *)ldvp->nxgep != arg2) { 1124 nxgep = ldvp->nxgep; 1125 } 1126 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1127 "==> nxge_tx_intr: nxgep(arg2) $%p ldvp(arg1) $%p", 1128 nxgep, ldvp)); 1129 /* 1130 * This interrupt handler is for a specific 1131 * transmit dma channel. 1132 */ 1133 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1134 /* Get the control and status for this channel. */ 1135 channel = ldvp->channel; 1136 ldgp = ldvp->ldgp; 1137 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1138 "==> nxge_tx_intr: nxgep $%p ldvp (ldvp) $%p " 1139 "channel %d", 1140 nxgep, ldvp, channel)); 1141 1142 rs = npi_txdma_control_status(handle, OP_GET, channel, &cs); 1143 vindex = ldvp->vdma_index; 1144 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1145 "==> nxge_tx_intr:channel %d ring index %d status 0x%08x", 1146 channel, vindex, rs)); 1147 if (!rs && cs.bits.ldw.mk) { 1148 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1149 "==> nxge_tx_intr:channel %d ring index %d " 1150 "status 0x%08x (mk bit set)", 1151 channel, vindex, rs)); 1152 tx_rings = nxgep->tx_rings->rings; 1153 tx_ring_p = tx_rings[vindex]; 1154 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1155 "==> nxge_tx_intr:channel %d ring index %d " 1156 "status 0x%08x (mk bit set, calling reclaim)", 1157 channel, vindex, rs)); 1158 1159 MUTEX_ENTER(&tx_ring_p->lock); 1160 (void) nxge_txdma_reclaim(nxgep, tx_rings[vindex], 0); 1161 MUTEX_EXIT(&tx_ring_p->lock); 1162 mac_tx_update(nxgep->mach); 1163 } 1164 1165 /* 1166 * Process other transmit control and status. 1167 * Check the ldv state. 1168 */ 1169 status = nxge_tx_err_evnts(nxgep, ldvp->vdma_index, ldvp, cs); 1170 /* 1171 * Rearm this logical group if this is a single device 1172 * group. 1173 */ 1174 if (ldgp->nldvs == 1) { 1175 NXGE_DEBUG_MSG((nxgep, INT_CTL, 1176 "==> nxge_tx_intr: rearm")); 1177 if (status == NXGE_OK) { 1178 if (isLDOMguest(nxgep)) { 1179 nxge_hio_ldgimgn(nxgep, ldgp); 1180 } else { 1181 (void) npi_intr_ldg_mgmt_set(handle, ldgp->ldg, 1182 B_TRUE, ldgp->ldg_timer); 1183 } 1184 } 1185 } 1186 1187 NXGE_DEBUG_MSG((nxgep, INT_CTL, "<== nxge_tx_intr")); 1188 serviced = DDI_INTR_CLAIMED; 1189 return (serviced); 1190 } 1191 1192 void 1193 nxge_txdma_stop(p_nxge_t nxgep) /* Dead */ 1194 { 1195 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop")); 1196 1197 (void) nxge_link_monitor(nxgep, LINK_MONITOR_STOP); 1198 1199 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop")); 1200 } 1201 1202 void 1203 nxge_txdma_stop_start(p_nxge_t nxgep) /* Dead */ 1204 { 1205 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_start")); 1206 1207 (void) nxge_txdma_stop(nxgep); 1208 1209 (void) nxge_fixup_txdma_rings(nxgep); 1210 (void) nxge_txdma_hw_mode(nxgep, NXGE_DMA_START); 1211 (void) nxge_tx_mac_enable(nxgep); 1212 (void) nxge_txdma_hw_kick(nxgep); 1213 1214 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_start")); 1215 } 1216 1217 npi_status_t 1218 nxge_txdma_channel_disable( 1219 nxge_t *nxge, 1220 int channel) 1221 { 1222 npi_handle_t handle = NXGE_DEV_NPI_HANDLE(nxge); 1223 npi_status_t rs; 1224 tdmc_intr_dbg_t intr_dbg; 1225 1226 /* 1227 * Stop the dma channel and wait for the stop-done. 1228 * If the stop-done bit is not present, then force 1229 * an error so TXC will stop. 1230 * All channels bound to this port need to be stopped 1231 * and reset after injecting an interrupt error. 1232 */ 1233 rs = npi_txdma_channel_disable(handle, channel); 1234 NXGE_DEBUG_MSG((nxge, MEM3_CTL, 1235 "==> nxge_txdma_channel_disable(%d) " 1236 "rs 0x%x", channel, rs)); 1237 if (rs != NPI_SUCCESS) { 1238 /* Inject any error */ 1239 intr_dbg.value = 0; 1240 intr_dbg.bits.ldw.nack_pref = 1; 1241 NXGE_DEBUG_MSG((nxge, MEM3_CTL, 1242 "==> nxge_txdma_hw_mode: " 1243 "channel %d (stop failed 0x%x) " 1244 "(inject err)", rs, channel)); 1245 (void) npi_txdma_inj_int_error_set( 1246 handle, channel, &intr_dbg); 1247 rs = npi_txdma_channel_disable(handle, channel); 1248 NXGE_DEBUG_MSG((nxge, MEM3_CTL, 1249 "==> nxge_txdma_hw_mode: " 1250 "channel %d (stop again 0x%x) " 1251 "(after inject err)", 1252 rs, channel)); 1253 } 1254 1255 return (rs); 1256 } 1257 1258 /* 1259 * nxge_txdma_hw_mode 1260 * 1261 * Toggle all TDCs on (enable) or off (disable). 1262 * 1263 * Arguments: 1264 * nxgep 1265 * enable Enable or disable a TDC. 1266 * 1267 * Notes: 1268 * 1269 * NPI/NXGE function calls: 1270 * npi_txdma_channel_enable(TX_CS) 1271 * npi_txdma_channel_disable(TX_CS) 1272 * npi_txdma_inj_int_error_set(TDMC_INTR_DBG) 1273 * 1274 * Registers accessed: 1275 * TX_CS DMC+0x40028 Transmit Control And Status 1276 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug 1277 * 1278 * Context: 1279 * Any domain 1280 */ 1281 nxge_status_t 1282 nxge_txdma_hw_mode(p_nxge_t nxgep, boolean_t enable) 1283 { 1284 nxge_grp_set_t *set = &nxgep->tx_set; 1285 1286 npi_handle_t handle; 1287 nxge_status_t status; 1288 npi_status_t rs; 1289 int tdc; 1290 1291 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1292 "==> nxge_txdma_hw_mode: enable mode %d", enable)); 1293 1294 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 1295 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1296 "<== nxge_txdma_mode: not initialized")); 1297 return (NXGE_ERROR); 1298 } 1299 1300 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 1301 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1302 "<== nxge_txdma_hw_mode: NULL ring pointer(s)")); 1303 return (NXGE_ERROR); 1304 } 1305 1306 /* Enable or disable all of the TDCs owned by us. */ 1307 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1308 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 1309 if ((1 << tdc) & set->owned.map) { 1310 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 1311 if (ring) { 1312 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1313 "==> nxge_txdma_hw_mode: channel %d", tdc)); 1314 if (enable) { 1315 rs = npi_txdma_channel_enable 1316 (handle, tdc); 1317 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1318 "==> nxge_txdma_hw_mode: " 1319 "channel %d (enable) rs 0x%x", 1320 tdc, rs)); 1321 } else { 1322 rs = nxge_txdma_channel_disable 1323 (nxgep, tdc); 1324 } 1325 } 1326 } 1327 } 1328 1329 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 1330 1331 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1332 "<== nxge_txdma_hw_mode: status 0x%x", status)); 1333 1334 return (status); 1335 } 1336 1337 void 1338 nxge_txdma_enable_channel(p_nxge_t nxgep, uint16_t channel) 1339 { 1340 npi_handle_t handle; 1341 1342 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1343 "==> nxge_txdma_enable_channel: channel %d", channel)); 1344 1345 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1346 /* enable the transmit dma channels */ 1347 (void) npi_txdma_channel_enable(handle, channel); 1348 1349 NXGE_DEBUG_MSG((nxgep, DMA_CTL, "<== nxge_txdma_enable_channel")); 1350 } 1351 1352 void 1353 nxge_txdma_disable_channel(p_nxge_t nxgep, uint16_t channel) 1354 { 1355 npi_handle_t handle; 1356 1357 NXGE_DEBUG_MSG((nxgep, DMA_CTL, 1358 "==> nxge_txdma_disable_channel: channel %d", channel)); 1359 1360 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1361 /* stop the transmit dma channels */ 1362 (void) npi_txdma_channel_disable(handle, channel); 1363 1364 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_disable_channel")); 1365 } 1366 1367 /* 1368 * nxge_txdma_stop_inj_err 1369 * 1370 * Stop a TDC. If at first we don't succeed, inject an error. 1371 * 1372 * Arguments: 1373 * nxgep 1374 * channel The channel to stop. 1375 * 1376 * Notes: 1377 * 1378 * NPI/NXGE function calls: 1379 * npi_txdma_channel_disable() 1380 * npi_txdma_inj_int_error_set() 1381 * #if defined(NXGE_DEBUG) 1382 * nxge_txdma_regs_dump_channels(nxgep); 1383 * #endif 1384 * 1385 * Registers accessed: 1386 * TX_CS DMC+0x40028 Transmit Control And Status 1387 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug 1388 * 1389 * Context: 1390 * Any domain 1391 */ 1392 int 1393 nxge_txdma_stop_inj_err(p_nxge_t nxgep, int channel) 1394 { 1395 npi_handle_t handle; 1396 tdmc_intr_dbg_t intr_dbg; 1397 int status; 1398 npi_status_t rs = NPI_SUCCESS; 1399 1400 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_stop_inj_err")); 1401 /* 1402 * Stop the dma channel waits for the stop done. 1403 * If the stop done bit is not set, then create 1404 * an error. 1405 */ 1406 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1407 rs = npi_txdma_channel_disable(handle, channel); 1408 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 1409 if (status == NXGE_OK) { 1410 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1411 "<== nxge_txdma_stop_inj_err (channel %d): " 1412 "stopped OK", channel)); 1413 return (status); 1414 } 1415 1416 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1417 "==> nxge_txdma_stop_inj_err (channel %d): stop failed (0x%x) " 1418 "injecting error", channel, rs)); 1419 /* Inject any error */ 1420 intr_dbg.value = 0; 1421 intr_dbg.bits.ldw.nack_pref = 1; 1422 (void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg); 1423 1424 /* Stop done bit will be set as a result of error injection */ 1425 rs = npi_txdma_channel_disable(handle, channel); 1426 status = ((rs == NPI_SUCCESS) ? NXGE_OK : NXGE_ERROR | rs); 1427 if (!(rs & NPI_TXDMA_STOP_FAILED)) { 1428 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1429 "<== nxge_txdma_stop_inj_err (channel %d): " 1430 "stopped OK ", channel)); 1431 return (status); 1432 } 1433 1434 #if defined(NXGE_DEBUG) 1435 nxge_txdma_regs_dump_channels(nxgep); 1436 #endif 1437 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 1438 "==> nxge_txdma_stop_inj_err (channel): stop failed (0x%x) " 1439 " (injected error but still not stopped)", channel, rs)); 1440 1441 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_stop_inj_err")); 1442 return (status); 1443 } 1444 1445 /*ARGSUSED*/ 1446 void 1447 nxge_fixup_txdma_rings(p_nxge_t nxgep) 1448 { 1449 nxge_grp_set_t *set = &nxgep->tx_set; 1450 int tdc; 1451 1452 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_txdma_rings")); 1453 1454 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 1455 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1456 "<== nxge_fixup_txdma_rings: NULL ring pointer(s)")); 1457 return; 1458 } 1459 1460 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 1461 if ((1 << tdc) & set->owned.map) { 1462 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 1463 if (ring) { 1464 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1465 "==> nxge_fixup_txdma_rings: channel %d", 1466 tdc)); 1467 nxge_txdma_fixup_channel(nxgep, ring, tdc); 1468 } 1469 } 1470 } 1471 1472 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_txdma_rings")); 1473 } 1474 1475 /*ARGSUSED*/ 1476 void 1477 nxge_txdma_fix_channel(p_nxge_t nxgep, uint16_t channel) 1478 { 1479 p_tx_ring_t ring_p; 1480 1481 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_channel")); 1482 ring_p = nxge_txdma_get_ring(nxgep, channel); 1483 if (ring_p == NULL) { 1484 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel")); 1485 return; 1486 } 1487 1488 if (ring_p->tdc != channel) { 1489 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1490 "<== nxge_txdma_fix_channel: channel not matched " 1491 "ring tdc %d passed channel", 1492 ring_p->tdc, channel)); 1493 return; 1494 } 1495 1496 nxge_txdma_fixup_channel(nxgep, ring_p, channel); 1497 1498 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_channel")); 1499 } 1500 1501 /*ARGSUSED*/ 1502 void 1503 nxge_txdma_fixup_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel) 1504 { 1505 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_channel")); 1506 1507 if (ring_p == NULL) { 1508 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1509 "<== nxge_txdma_fixup_channel: NULL ring pointer")); 1510 return; 1511 } 1512 1513 if (ring_p->tdc != channel) { 1514 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1515 "<== nxge_txdma_fixup_channel: channel not matched " 1516 "ring tdc %d passed channel", 1517 ring_p->tdc, channel)); 1518 return; 1519 } 1520 1521 MUTEX_ENTER(&ring_p->lock); 1522 (void) nxge_txdma_reclaim(nxgep, ring_p, 0); 1523 ring_p->rd_index = 0; 1524 ring_p->wr_index = 0; 1525 ring_p->ring_head.value = 0; 1526 ring_p->ring_kick_tail.value = 0; 1527 ring_p->descs_pending = 0; 1528 MUTEX_EXIT(&ring_p->lock); 1529 1530 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_channel")); 1531 } 1532 1533 /*ARGSUSED*/ 1534 void 1535 nxge_txdma_hw_kick(p_nxge_t nxgep) 1536 { 1537 nxge_grp_set_t *set = &nxgep->tx_set; 1538 int tdc; 1539 1540 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick")); 1541 1542 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 1543 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1544 "<== nxge_txdma_hw_kick: NULL ring pointer(s)")); 1545 return; 1546 } 1547 1548 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 1549 if ((1 << tdc) & set->owned.map) { 1550 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 1551 if (ring) { 1552 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 1553 "==> nxge_txdma_hw_kick: channel %d", tdc)); 1554 nxge_txdma_hw_kick_channel(nxgep, ring, tdc); 1555 } 1556 } 1557 } 1558 1559 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick")); 1560 } 1561 1562 /*ARGSUSED*/ 1563 void 1564 nxge_txdma_kick_channel(p_nxge_t nxgep, uint16_t channel) 1565 { 1566 p_tx_ring_t ring_p; 1567 1568 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_kick_channel")); 1569 1570 ring_p = nxge_txdma_get_ring(nxgep, channel); 1571 if (ring_p == NULL) { 1572 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1573 " nxge_txdma_kick_channel")); 1574 return; 1575 } 1576 1577 if (ring_p->tdc != channel) { 1578 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1579 "<== nxge_txdma_kick_channel: channel not matched " 1580 "ring tdc %d passed channel", 1581 ring_p->tdc, channel)); 1582 return; 1583 } 1584 1585 nxge_txdma_hw_kick_channel(nxgep, ring_p, channel); 1586 1587 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_kick_channel")); 1588 } 1589 1590 /*ARGSUSED*/ 1591 void 1592 nxge_txdma_hw_kick_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, uint16_t channel) 1593 { 1594 1595 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hw_kick_channel")); 1596 1597 if (ring_p == NULL) { 1598 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1599 "<== nxge_txdma_hw_kick_channel: NULL ring pointer")); 1600 return; 1601 } 1602 1603 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hw_kick_channel")); 1604 } 1605 1606 /* 1607 * nxge_check_tx_hang 1608 * 1609 * Check the state of all TDCs belonging to nxgep. 1610 * 1611 * Arguments: 1612 * nxgep 1613 * 1614 * Notes: 1615 * Called by nxge_hw.c:nxge_check_hw_state(). 1616 * 1617 * NPI/NXGE function calls: 1618 * 1619 * Registers accessed: 1620 * 1621 * Context: 1622 * Any domain 1623 */ 1624 /*ARGSUSED*/ 1625 void 1626 nxge_check_tx_hang(p_nxge_t nxgep) 1627 { 1628 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_check_tx_hang")); 1629 1630 /* 1631 * Needs inputs from hardware for regs: 1632 * head index had not moved since last timeout. 1633 * packets not transmitted or stuffed registers. 1634 */ 1635 if (nxge_txdma_hung(nxgep)) { 1636 nxge_fixup_hung_txdma_rings(nxgep); 1637 } 1638 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_check_tx_hang")); 1639 } 1640 1641 /* 1642 * nxge_txdma_hung 1643 * 1644 * Reset a TDC. 1645 * 1646 * Arguments: 1647 * nxgep 1648 * channel The channel to reset. 1649 * reg_data The current TX_CS. 1650 * 1651 * Notes: 1652 * Called by nxge_check_tx_hang() 1653 * 1654 * NPI/NXGE function calls: 1655 * nxge_txdma_channel_hung() 1656 * 1657 * Registers accessed: 1658 * 1659 * Context: 1660 * Any domain 1661 */ 1662 int 1663 nxge_txdma_hung(p_nxge_t nxgep) 1664 { 1665 nxge_grp_set_t *set = &nxgep->tx_set; 1666 int tdc; 1667 1668 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_hung")); 1669 1670 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 1671 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1672 "<== nxge_txdma_hung: NULL ring pointer(s)")); 1673 return (B_FALSE); 1674 } 1675 1676 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 1677 if ((1 << tdc) & set->owned.map) { 1678 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 1679 if (ring) { 1680 if (nxge_txdma_channel_hung(nxgep, ring, tdc)) { 1681 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1682 "==> nxge_txdma_hung: TDC %d hung", 1683 tdc)); 1684 return (B_TRUE); 1685 } 1686 } 1687 } 1688 } 1689 1690 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_hung")); 1691 1692 return (B_FALSE); 1693 } 1694 1695 /* 1696 * nxge_txdma_channel_hung 1697 * 1698 * Reset a TDC. 1699 * 1700 * Arguments: 1701 * nxgep 1702 * ring <channel>'s ring. 1703 * channel The channel to reset. 1704 * 1705 * Notes: 1706 * Called by nxge_txdma.c:nxge_txdma_hung() 1707 * 1708 * NPI/NXGE function calls: 1709 * npi_txdma_ring_head_get() 1710 * 1711 * Registers accessed: 1712 * TX_RING_HDL DMC+0x40010 Transmit Ring Head Low 1713 * 1714 * Context: 1715 * Any domain 1716 */ 1717 int 1718 nxge_txdma_channel_hung(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, uint16_t channel) 1719 { 1720 uint16_t head_index, tail_index; 1721 boolean_t head_wrap, tail_wrap; 1722 npi_handle_t handle; 1723 tx_ring_hdl_t tx_head; 1724 uint_t tx_rd_index; 1725 1726 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_channel_hung")); 1727 1728 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1729 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1730 "==> nxge_txdma_channel_hung: channel %d", channel)); 1731 MUTEX_ENTER(&tx_ring_p->lock); 1732 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0); 1733 1734 tail_index = tx_ring_p->wr_index; 1735 tail_wrap = tx_ring_p->wr_index_wrap; 1736 tx_rd_index = tx_ring_p->rd_index; 1737 MUTEX_EXIT(&tx_ring_p->lock); 1738 1739 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1740 "==> nxge_txdma_channel_hung: tdc %d tx_rd_index %d " 1741 "tail_index %d tail_wrap %d ", 1742 channel, tx_rd_index, tail_index, tail_wrap)); 1743 /* 1744 * Read the hardware maintained transmit head 1745 * and wrap around bit. 1746 */ 1747 (void) npi_txdma_ring_head_get(handle, channel, &tx_head); 1748 head_index = tx_head.bits.ldw.head; 1749 head_wrap = tx_head.bits.ldw.wrap; 1750 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1751 "==> nxge_txdma_channel_hung: " 1752 "tx_rd_index %d tail %d tail_wrap %d " 1753 "head %d wrap %d", 1754 tx_rd_index, tail_index, tail_wrap, 1755 head_index, head_wrap)); 1756 1757 if (TXDMA_RING_EMPTY(head_index, head_wrap, 1758 tail_index, tail_wrap) && 1759 (head_index == tx_rd_index)) { 1760 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1761 "==> nxge_txdma_channel_hung: EMPTY")); 1762 return (B_FALSE); 1763 } 1764 1765 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1766 "==> nxge_txdma_channel_hung: Checking if ring full")); 1767 if (TXDMA_RING_FULL(head_index, head_wrap, tail_index, 1768 tail_wrap)) { 1769 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1770 "==> nxge_txdma_channel_hung: full")); 1771 return (B_TRUE); 1772 } 1773 1774 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_channel_hung")); 1775 1776 return (B_FALSE); 1777 } 1778 1779 /* 1780 * nxge_fixup_hung_txdma_rings 1781 * 1782 * Disable a TDC. 1783 * 1784 * Arguments: 1785 * nxgep 1786 * channel The channel to reset. 1787 * reg_data The current TX_CS. 1788 * 1789 * Notes: 1790 * Called by nxge_check_tx_hang() 1791 * 1792 * NPI/NXGE function calls: 1793 * npi_txdma_ring_head_get() 1794 * 1795 * Registers accessed: 1796 * TX_RING_HDL DMC+0x40010 Transmit Ring Head Low 1797 * 1798 * Context: 1799 * Any domain 1800 */ 1801 /*ARGSUSED*/ 1802 void 1803 nxge_fixup_hung_txdma_rings(p_nxge_t nxgep) 1804 { 1805 nxge_grp_set_t *set = &nxgep->tx_set; 1806 int tdc; 1807 1808 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_fixup_hung_txdma_rings")); 1809 1810 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 1811 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1812 "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)")); 1813 return; 1814 } 1815 1816 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 1817 if ((1 << tdc) & set->owned.map) { 1818 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 1819 if (ring) { 1820 nxge_txdma_fixup_hung_channel(nxgep, ring, tdc); 1821 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1822 "==> nxge_fixup_hung_txdma_rings: TDC %d", 1823 tdc)); 1824 } 1825 } 1826 } 1827 1828 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_fixup_hung_txdma_rings")); 1829 } 1830 1831 /* 1832 * nxge_txdma_fixup_hung_channel 1833 * 1834 * 'Fix' a hung TDC. 1835 * 1836 * Arguments: 1837 * nxgep 1838 * channel The channel to fix. 1839 * 1840 * Notes: 1841 * Called by nxge_fixup_hung_txdma_rings() 1842 * 1843 * 1. Reclaim the TDC. 1844 * 2. Disable the TDC. 1845 * 1846 * NPI/NXGE function calls: 1847 * nxge_txdma_reclaim() 1848 * npi_txdma_channel_disable(TX_CS) 1849 * npi_txdma_inj_int_error_set(TDMC_INTR_DBG) 1850 * 1851 * Registers accessed: 1852 * TX_CS DMC+0x40028 Transmit Control And Status 1853 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug 1854 * 1855 * Context: 1856 * Any domain 1857 */ 1858 /*ARGSUSED*/ 1859 void 1860 nxge_txdma_fix_hung_channel(p_nxge_t nxgep, uint16_t channel) 1861 { 1862 p_tx_ring_t ring_p; 1863 1864 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fix_hung_channel")); 1865 ring_p = nxge_txdma_get_ring(nxgep, channel); 1866 if (ring_p == NULL) { 1867 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1868 "<== nxge_txdma_fix_hung_channel")); 1869 return; 1870 } 1871 1872 if (ring_p->tdc != channel) { 1873 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1874 "<== nxge_txdma_fix_hung_channel: channel not matched " 1875 "ring tdc %d passed channel", 1876 ring_p->tdc, channel)); 1877 return; 1878 } 1879 1880 nxge_txdma_fixup_channel(nxgep, ring_p, channel); 1881 1882 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fix_hung_channel")); 1883 } 1884 1885 /*ARGSUSED*/ 1886 void 1887 nxge_txdma_fixup_hung_channel(p_nxge_t nxgep, p_tx_ring_t ring_p, 1888 uint16_t channel) 1889 { 1890 npi_handle_t handle; 1891 tdmc_intr_dbg_t intr_dbg; 1892 int status = NXGE_OK; 1893 1894 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fixup_hung_channel")); 1895 1896 if (ring_p == NULL) { 1897 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1898 "<== nxge_txdma_fixup_channel: NULL ring pointer")); 1899 return; 1900 } 1901 1902 if (ring_p->tdc != channel) { 1903 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1904 "<== nxge_txdma_fixup_hung_channel: channel " 1905 "not matched " 1906 "ring tdc %d passed channel", 1907 ring_p->tdc, channel)); 1908 return; 1909 } 1910 1911 /* Reclaim descriptors */ 1912 MUTEX_ENTER(&ring_p->lock); 1913 (void) nxge_txdma_reclaim(nxgep, ring_p, 0); 1914 MUTEX_EXIT(&ring_p->lock); 1915 1916 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1917 /* 1918 * Stop the dma channel waits for the stop done. 1919 * If the stop done bit is not set, then force 1920 * an error. 1921 */ 1922 status = npi_txdma_channel_disable(handle, channel); 1923 if (!(status & NPI_TXDMA_STOP_FAILED)) { 1924 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1925 "<== nxge_txdma_fixup_hung_channel: stopped OK " 1926 "ring tdc %d passed channel %d", 1927 ring_p->tdc, channel)); 1928 return; 1929 } 1930 1931 /* Inject any error */ 1932 intr_dbg.value = 0; 1933 intr_dbg.bits.ldw.nack_pref = 1; 1934 (void) npi_txdma_inj_int_error_set(handle, channel, &intr_dbg); 1935 1936 /* Stop done bit will be set as a result of error injection */ 1937 status = npi_txdma_channel_disable(handle, channel); 1938 if (!(status & NPI_TXDMA_STOP_FAILED)) { 1939 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1940 "<== nxge_txdma_fixup_hung_channel: stopped again" 1941 "ring tdc %d passed channel", 1942 ring_p->tdc, channel)); 1943 return; 1944 } 1945 1946 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1947 "<== nxge_txdma_fixup_hung_channel: stop done still not set!! " 1948 "ring tdc %d passed channel", 1949 ring_p->tdc, channel)); 1950 1951 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fixup_hung_channel")); 1952 } 1953 1954 /*ARGSUSED*/ 1955 void 1956 nxge_reclaim_rings(p_nxge_t nxgep) 1957 { 1958 nxge_grp_set_t *set = &nxgep->tx_set; 1959 int tdc; 1960 1961 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_reclaim_rings")); 1962 1963 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 1964 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1965 "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)")); 1966 return; 1967 } 1968 1969 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 1970 if ((1 << tdc) & set->owned.map) { 1971 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 1972 if (ring) { 1973 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1974 "==> nxge_reclaim_rings: TDC %d", tdc)); 1975 MUTEX_ENTER(&ring->lock); 1976 (void) nxge_txdma_reclaim(nxgep, ring, tdc); 1977 MUTEX_EXIT(&ring->lock); 1978 } 1979 } 1980 } 1981 1982 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_reclaim_rings")); 1983 } 1984 1985 void 1986 nxge_txdma_regs_dump_channels(p_nxge_t nxgep) 1987 { 1988 nxge_grp_set_t *set = &nxgep->tx_set; 1989 npi_handle_t handle; 1990 int tdc; 1991 1992 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_regs_dump_channels")); 1993 1994 handle = NXGE_DEV_NPI_HANDLE(nxgep); 1995 1996 if (!isLDOMguest(nxgep)) { 1997 (void) npi_txdma_dump_fzc_regs(handle); 1998 1999 /* Dump TXC registers. */ 2000 (void) npi_txc_dump_fzc_regs(handle); 2001 (void) npi_txc_dump_port_fzc_regs(handle, nxgep->function_num); 2002 } 2003 2004 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 2005 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2006 "<== nxge_fixup_hung_txdma_rings: NULL ring pointer(s)")); 2007 return; 2008 } 2009 2010 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 2011 if ((1 << tdc) & set->owned.map) { 2012 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 2013 if (ring) { 2014 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2015 "==> nxge_txdma_regs_dump_channels: " 2016 "TDC %d", tdc)); 2017 (void) npi_txdma_dump_tdc_regs(handle, tdc); 2018 2019 /* Dump TXC registers, if able to. */ 2020 if (!isLDOMguest(nxgep)) { 2021 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2022 "==> nxge_txdma_regs_dump_channels:" 2023 " FZC TDC %d", tdc)); 2024 (void) npi_txc_dump_tdc_fzc_regs 2025 (handle, tdc); 2026 } 2027 nxge_txdma_regs_dump(nxgep, tdc); 2028 } 2029 } 2030 } 2031 2032 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_regs_dump")); 2033 } 2034 2035 void 2036 nxge_txdma_regs_dump(p_nxge_t nxgep, int channel) 2037 { 2038 npi_handle_t handle; 2039 tx_ring_hdl_t hdl; 2040 tx_ring_kick_t kick; 2041 tx_cs_t cs; 2042 txc_control_t control; 2043 uint32_t bitmap = 0; 2044 uint32_t burst = 0; 2045 uint32_t bytes = 0; 2046 dma_log_page_t cfg; 2047 2048 printf("\n\tfunc # %d tdc %d ", 2049 nxgep->function_num, channel); 2050 cfg.page_num = 0; 2051 handle = NXGE_DEV_NPI_HANDLE(nxgep); 2052 (void) npi_txdma_log_page_get(handle, channel, &cfg); 2053 printf("\n\tlog page func %d valid page 0 %d", 2054 cfg.func_num, cfg.valid); 2055 cfg.page_num = 1; 2056 (void) npi_txdma_log_page_get(handle, channel, &cfg); 2057 printf("\n\tlog page func %d valid page 1 %d", 2058 cfg.func_num, cfg.valid); 2059 2060 (void) npi_txdma_ring_head_get(handle, channel, &hdl); 2061 (void) npi_txdma_desc_kick_reg_get(handle, channel, &kick); 2062 printf("\n\thead value is 0x%0llx", 2063 (long long)hdl.value); 2064 printf("\n\thead index %d", hdl.bits.ldw.head); 2065 printf("\n\tkick value is 0x%0llx", 2066 (long long)kick.value); 2067 printf("\n\ttail index %d\n", kick.bits.ldw.tail); 2068 2069 (void) npi_txdma_control_status(handle, OP_GET, channel, &cs); 2070 printf("\n\tControl statue is 0x%0llx", (long long)cs.value); 2071 printf("\n\tControl status RST state %d", cs.bits.ldw.rst); 2072 2073 (void) npi_txc_control(handle, OP_GET, &control); 2074 (void) npi_txc_port_dma_list_get(handle, nxgep->function_num, &bitmap); 2075 (void) npi_txc_dma_max_burst(handle, OP_GET, channel, &burst); 2076 (void) npi_txc_dma_bytes_transmitted(handle, channel, &bytes); 2077 2078 printf("\n\tTXC port control 0x%0llx", 2079 (long long)control.value); 2080 printf("\n\tTXC port bitmap 0x%x", bitmap); 2081 printf("\n\tTXC max burst %d", burst); 2082 printf("\n\tTXC bytes xmt %d\n", bytes); 2083 2084 { 2085 ipp_status_t status; 2086 2087 (void) npi_ipp_get_status(handle, nxgep->function_num, &status); 2088 #if defined(__i386) 2089 printf("\n\tIPP status 0x%llux\n", (uint64_t)status.value); 2090 #else 2091 printf("\n\tIPP status 0x%lux\n", (uint64_t)status.value); 2092 #endif 2093 } 2094 } 2095 2096 /* 2097 * nxge_tdc_hvio_setup 2098 * 2099 * I'm not exactly sure what this code does. 2100 * 2101 * Arguments: 2102 * nxgep 2103 * channel The channel to map. 2104 * 2105 * Notes: 2106 * 2107 * NPI/NXGE function calls: 2108 * na 2109 * 2110 * Context: 2111 * Service domain? 2112 */ 2113 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2114 static void 2115 nxge_tdc_hvio_setup( 2116 nxge_t *nxgep, int channel) 2117 { 2118 nxge_dma_common_t *data; 2119 nxge_dma_common_t *control; 2120 tx_ring_t *ring; 2121 2122 ring = nxgep->tx_rings->rings[channel]; 2123 data = nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 2124 2125 ring->hv_set = B_FALSE; 2126 2127 ring->hv_tx_buf_base_ioaddr_pp = 2128 (uint64_t)data->orig_ioaddr_pp; 2129 ring->hv_tx_buf_ioaddr_size = 2130 (uint64_t)data->orig_alength; 2131 2132 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel: " 2133 "hv data buf base io $%p size 0x%llx (%d) buf base io $%p " 2134 "orig vatopa base io $%p orig_len 0x%llx (%d)", 2135 ring->hv_tx_buf_base_ioaddr_pp, 2136 ring->hv_tx_buf_ioaddr_size, ring->hv_tx_buf_ioaddr_size, 2137 data->ioaddr_pp, data->orig_vatopa, 2138 data->orig_alength, data->orig_alength)); 2139 2140 control = nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 2141 2142 ring->hv_tx_cntl_base_ioaddr_pp = 2143 (uint64_t)control->orig_ioaddr_pp; 2144 ring->hv_tx_cntl_ioaddr_size = 2145 (uint64_t)control->orig_alength; 2146 2147 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel: " 2148 "hv cntl base io $%p orig ioaddr_pp ($%p) " 2149 "orig vatopa ($%p) size 0x%llx (%d 0x%x)", 2150 ring->hv_tx_cntl_base_ioaddr_pp, 2151 control->orig_ioaddr_pp, control->orig_vatopa, 2152 ring->hv_tx_cntl_ioaddr_size, 2153 control->orig_alength, control->orig_alength)); 2154 } 2155 #endif 2156 2157 static nxge_status_t 2158 nxge_map_txdma(p_nxge_t nxgep, int channel) 2159 { 2160 nxge_dma_common_t **pData; 2161 nxge_dma_common_t **pControl; 2162 tx_ring_t **pRing, *ring; 2163 tx_mbox_t **mailbox; 2164 uint32_t num_chunks; 2165 2166 nxge_status_t status = NXGE_OK; 2167 2168 NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma")); 2169 2170 if (!nxgep->tx_cntl_pool_p->buf_allocated) { 2171 if (nxge_alloc_tx_mem_pool(nxgep) != NXGE_OK) { 2172 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2173 "<== nxge_map_txdma: buf not allocated")); 2174 return (NXGE_ERROR); 2175 } 2176 } 2177 2178 if (nxge_alloc_txb(nxgep, channel) != NXGE_OK) 2179 return (NXGE_ERROR); 2180 2181 num_chunks = nxgep->tx_buf_pool_p->num_chunks[channel]; 2182 pData = &nxgep->tx_buf_pool_p->dma_buf_pool_p[channel]; 2183 pControl = &nxgep->tx_cntl_pool_p->dma_buf_pool_p[channel]; 2184 pRing = &nxgep->tx_rings->rings[channel]; 2185 mailbox = &nxgep->tx_mbox_areas_p->txmbox_areas_p[channel]; 2186 2187 NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: " 2188 "tx_rings $%p tx_desc_rings $%p", 2189 nxgep->tx_rings, nxgep->tx_rings->rings)); 2190 2191 /* 2192 * Map descriptors from the buffer pools for <channel>. 2193 */ 2194 2195 /* 2196 * Set up and prepare buffer blocks, descriptors 2197 * and mailbox. 2198 */ 2199 status = nxge_map_txdma_channel(nxgep, channel, 2200 pData, pRing, num_chunks, pControl, mailbox); 2201 if (status != NXGE_OK) { 2202 NXGE_ERROR_MSG((nxgep, MEM3_CTL, 2203 "==> nxge_map_txdma(%d): nxge_map_txdma_channel() " 2204 "returned 0x%x", 2205 nxgep, channel, status)); 2206 return (status); 2207 } 2208 2209 ring = *pRing; 2210 2211 ring->index = (uint16_t)channel; 2212 ring->tdc_stats = &nxgep->statsp->tdc_stats[channel]; 2213 2214 #if defined(sun4v) && defined(NIU_LP_WORKAROUND) 2215 if (isLDOMguest(nxgep)) { 2216 (void) nxge_tdc_lp_conf(nxgep, channel); 2217 } else { 2218 nxge_tdc_hvio_setup(nxgep, channel); 2219 } 2220 #endif 2221 2222 NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma: " 2223 "(status 0x%x channel %d)", status, channel)); 2224 2225 return (status); 2226 } 2227 2228 static nxge_status_t 2229 nxge_map_txdma_channel(p_nxge_t nxgep, uint16_t channel, 2230 p_nxge_dma_common_t *dma_buf_p, 2231 p_tx_ring_t *tx_desc_p, 2232 uint32_t num_chunks, 2233 p_nxge_dma_common_t *dma_cntl_p, 2234 p_tx_mbox_t *tx_mbox_p) 2235 { 2236 int status = NXGE_OK; 2237 2238 /* 2239 * Set up and prepare buffer blocks, descriptors 2240 * and mailbox. 2241 */ 2242 NXGE_ERROR_MSG((nxgep, MEM3_CTL, 2243 "==> nxge_map_txdma_channel (channel %d)", channel)); 2244 /* 2245 * Transmit buffer blocks 2246 */ 2247 status = nxge_map_txdma_channel_buf_ring(nxgep, channel, 2248 dma_buf_p, tx_desc_p, num_chunks); 2249 if (status != NXGE_OK) { 2250 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2251 "==> nxge_map_txdma_channel (channel %d): " 2252 "map buffer failed 0x%x", channel, status)); 2253 goto nxge_map_txdma_channel_exit; 2254 } 2255 2256 /* 2257 * Transmit block ring, and mailbox. 2258 */ 2259 nxge_map_txdma_channel_cfg_ring(nxgep, channel, dma_cntl_p, *tx_desc_p, 2260 tx_mbox_p); 2261 2262 goto nxge_map_txdma_channel_exit; 2263 2264 nxge_map_txdma_channel_fail1: 2265 NXGE_ERROR_MSG((nxgep, MEM3_CTL, 2266 "==> nxge_map_txdma_channel: unmap buf" 2267 "(status 0x%x channel %d)", 2268 status, channel)); 2269 nxge_unmap_txdma_channel_buf_ring(nxgep, *tx_desc_p); 2270 2271 nxge_map_txdma_channel_exit: 2272 NXGE_ERROR_MSG((nxgep, MEM3_CTL, 2273 "<== nxge_map_txdma_channel: " 2274 "(status 0x%x channel %d)", 2275 status, channel)); 2276 2277 return (status); 2278 } 2279 2280 /*ARGSUSED*/ 2281 static void 2282 nxge_unmap_txdma_channel(p_nxge_t nxgep, uint16_t channel) 2283 { 2284 tx_ring_t *ring; 2285 tx_mbox_t *mailbox; 2286 2287 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2288 "==> nxge_unmap_txdma_channel (channel %d)", channel)); 2289 /* 2290 * unmap tx block ring, and mailbox. 2291 */ 2292 ring = nxgep->tx_rings->rings[channel]; 2293 mailbox = nxgep->tx_mbox_areas_p->txmbox_areas_p[channel]; 2294 2295 (void) nxge_unmap_txdma_channel_cfg_ring(nxgep, ring, mailbox); 2296 2297 /* unmap buffer blocks */ 2298 (void) nxge_unmap_txdma_channel_buf_ring(nxgep, ring); 2299 2300 nxge_free_txb(nxgep, channel); 2301 2302 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_unmap_txdma_channel")); 2303 } 2304 2305 /* 2306 * nxge_map_txdma_channel_cfg_ring 2307 * 2308 * Map a TDC into our kernel space. 2309 * This function allocates all of the per-channel data structures. 2310 * 2311 * Arguments: 2312 * nxgep 2313 * dma_channel The channel to map. 2314 * dma_cntl_p 2315 * tx_ring_p dma_channel's transmit ring 2316 * tx_mbox_p dma_channel's mailbox 2317 * 2318 * Notes: 2319 * 2320 * NPI/NXGE function calls: 2321 * nxge_setup_dma_common() 2322 * 2323 * Registers accessed: 2324 * none. 2325 * 2326 * Context: 2327 * Any domain 2328 */ 2329 /*ARGSUSED*/ 2330 static void 2331 nxge_map_txdma_channel_cfg_ring(p_nxge_t nxgep, uint16_t dma_channel, 2332 p_nxge_dma_common_t *dma_cntl_p, 2333 p_tx_ring_t tx_ring_p, 2334 p_tx_mbox_t *tx_mbox_p) 2335 { 2336 p_tx_mbox_t mboxp; 2337 p_nxge_dma_common_t cntl_dmap; 2338 p_nxge_dma_common_t dmap; 2339 p_tx_rng_cfig_t tx_ring_cfig_p; 2340 p_tx_ring_kick_t tx_ring_kick_p; 2341 p_tx_cs_t tx_cs_p; 2342 p_tx_dma_ent_msk_t tx_evmask_p; 2343 p_txdma_mbh_t mboxh_p; 2344 p_txdma_mbl_t mboxl_p; 2345 uint64_t tx_desc_len; 2346 2347 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2348 "==> nxge_map_txdma_channel_cfg_ring")); 2349 2350 cntl_dmap = *dma_cntl_p; 2351 2352 dmap = (p_nxge_dma_common_t)&tx_ring_p->tdc_desc; 2353 nxge_setup_dma_common(dmap, cntl_dmap, tx_ring_p->tx_ring_size, 2354 sizeof (tx_desc_t)); 2355 /* 2356 * Zero out transmit ring descriptors. 2357 */ 2358 bzero((caddr_t)dmap->kaddrp, dmap->alength); 2359 tx_ring_cfig_p = &(tx_ring_p->tx_ring_cfig); 2360 tx_ring_kick_p = &(tx_ring_p->tx_ring_kick); 2361 tx_cs_p = &(tx_ring_p->tx_cs); 2362 tx_evmask_p = &(tx_ring_p->tx_evmask); 2363 tx_ring_cfig_p->value = 0; 2364 tx_ring_kick_p->value = 0; 2365 tx_cs_p->value = 0; 2366 tx_evmask_p->value = 0; 2367 2368 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2369 "==> nxge_map_txdma_channel_cfg_ring: channel %d des $%p", 2370 dma_channel, 2371 dmap->dma_cookie.dmac_laddress)); 2372 2373 tx_ring_cfig_p->value = 0; 2374 tx_desc_len = (uint64_t)(tx_ring_p->tx_ring_size >> 3); 2375 tx_ring_cfig_p->value = 2376 (dmap->dma_cookie.dmac_laddress & TX_RNG_CFIG_ADDR_MASK) | 2377 (tx_desc_len << TX_RNG_CFIG_LEN_SHIFT); 2378 2379 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2380 "==> nxge_map_txdma_channel_cfg_ring: channel %d cfg 0x%llx", 2381 dma_channel, 2382 tx_ring_cfig_p->value)); 2383 2384 tx_cs_p->bits.ldw.rst = 1; 2385 2386 /* Map in mailbox */ 2387 mboxp = (p_tx_mbox_t) 2388 KMEM_ZALLOC(sizeof (tx_mbox_t), KM_SLEEP); 2389 dmap = (p_nxge_dma_common_t)&mboxp->tx_mbox; 2390 nxge_setup_dma_common(dmap, cntl_dmap, 1, sizeof (txdma_mailbox_t)); 2391 mboxh_p = (p_txdma_mbh_t)&tx_ring_p->tx_mbox_mbh; 2392 mboxl_p = (p_txdma_mbl_t)&tx_ring_p->tx_mbox_mbl; 2393 mboxh_p->value = mboxl_p->value = 0; 2394 2395 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2396 "==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx", 2397 dmap->dma_cookie.dmac_laddress)); 2398 2399 mboxh_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress >> 2400 TXDMA_MBH_ADDR_SHIFT) & TXDMA_MBH_MASK); 2401 2402 mboxl_p->bits.ldw.mbaddr = ((dmap->dma_cookie.dmac_laddress & 2403 TXDMA_MBL_MASK) >> TXDMA_MBL_SHIFT); 2404 2405 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2406 "==> nxge_map_txdma_channel_cfg_ring: mbox 0x%lx", 2407 dmap->dma_cookie.dmac_laddress)); 2408 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2409 "==> nxge_map_txdma_channel_cfg_ring: hmbox $%p " 2410 "mbox $%p", 2411 mboxh_p->bits.ldw.mbaddr, mboxl_p->bits.ldw.mbaddr)); 2412 tx_ring_p->page_valid.value = 0; 2413 tx_ring_p->page_mask_1.value = tx_ring_p->page_mask_2.value = 0; 2414 tx_ring_p->page_value_1.value = tx_ring_p->page_value_2.value = 0; 2415 tx_ring_p->page_reloc_1.value = tx_ring_p->page_reloc_2.value = 0; 2416 tx_ring_p->page_hdl.value = 0; 2417 2418 tx_ring_p->page_valid.bits.ldw.page0 = 1; 2419 tx_ring_p->page_valid.bits.ldw.page1 = 1; 2420 2421 tx_ring_p->max_burst.value = 0; 2422 tx_ring_p->max_burst.bits.ldw.dma_max_burst = TXC_DMA_MAX_BURST_DEFAULT; 2423 2424 *tx_mbox_p = mboxp; 2425 2426 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2427 "<== nxge_map_txdma_channel_cfg_ring")); 2428 } 2429 2430 /*ARGSUSED*/ 2431 static void 2432 nxge_unmap_txdma_channel_cfg_ring(p_nxge_t nxgep, 2433 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p) 2434 { 2435 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2436 "==> nxge_unmap_txdma_channel_cfg_ring: channel %d", 2437 tx_ring_p->tdc)); 2438 2439 KMEM_FREE(tx_mbox_p, sizeof (tx_mbox_t)); 2440 2441 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2442 "<== nxge_unmap_txdma_channel_cfg_ring")); 2443 } 2444 2445 /* 2446 * nxge_map_txdma_channel_buf_ring 2447 * 2448 * 2449 * Arguments: 2450 * nxgep 2451 * channel The channel to map. 2452 * dma_buf_p 2453 * tx_desc_p channel's descriptor ring 2454 * num_chunks 2455 * 2456 * Notes: 2457 * 2458 * NPI/NXGE function calls: 2459 * nxge_setup_dma_common() 2460 * 2461 * Registers accessed: 2462 * none. 2463 * 2464 * Context: 2465 * Any domain 2466 */ 2467 static nxge_status_t 2468 nxge_map_txdma_channel_buf_ring(p_nxge_t nxgep, uint16_t channel, 2469 p_nxge_dma_common_t *dma_buf_p, 2470 p_tx_ring_t *tx_desc_p, uint32_t num_chunks) 2471 { 2472 p_nxge_dma_common_t dma_bufp, tmp_bufp; 2473 p_nxge_dma_common_t dmap; 2474 nxge_os_dma_handle_t tx_buf_dma_handle; 2475 p_tx_ring_t tx_ring_p; 2476 p_tx_msg_t tx_msg_ring; 2477 nxge_status_t status = NXGE_OK; 2478 int ddi_status = DDI_SUCCESS; 2479 int i, j, index; 2480 uint32_t size, bsize; 2481 uint32_t nblocks, nmsgs; 2482 2483 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2484 "==> nxge_map_txdma_channel_buf_ring")); 2485 2486 dma_bufp = tmp_bufp = *dma_buf_p; 2487 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2488 " nxge_map_txdma_channel_buf_ring: channel %d to map %d " 2489 "chunks bufp $%p", 2490 channel, num_chunks, dma_bufp)); 2491 2492 nmsgs = 0; 2493 for (i = 0; i < num_chunks; i++, tmp_bufp++) { 2494 nmsgs += tmp_bufp->nblocks; 2495 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2496 "==> nxge_map_txdma_channel_buf_ring: channel %d " 2497 "bufp $%p nblocks %d nmsgs %d", 2498 channel, tmp_bufp, tmp_bufp->nblocks, nmsgs)); 2499 } 2500 if (!nmsgs) { 2501 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2502 "<== nxge_map_txdma_channel_buf_ring: channel %d " 2503 "no msg blocks", 2504 channel)); 2505 status = NXGE_ERROR; 2506 goto nxge_map_txdma_channel_buf_ring_exit; 2507 } 2508 2509 tx_ring_p = (p_tx_ring_t) 2510 KMEM_ZALLOC(sizeof (tx_ring_t), KM_SLEEP); 2511 MUTEX_INIT(&tx_ring_p->lock, NULL, MUTEX_DRIVER, 2512 (void *)nxgep->interrupt_cookie); 2513 2514 tx_ring_p->nxgep = nxgep; 2515 tx_ring_p->serial = nxge_serialize_create(nmsgs, 2516 nxge_serial_tx, tx_ring_p); 2517 /* 2518 * Allocate transmit message rings and handles for packets 2519 * not to be copied to premapped buffers. 2520 */ 2521 size = nmsgs * sizeof (tx_msg_t); 2522 tx_msg_ring = KMEM_ZALLOC(size, KM_SLEEP); 2523 for (i = 0; i < nmsgs; i++) { 2524 ddi_status = ddi_dma_alloc_handle(nxgep->dip, &nxge_tx_dma_attr, 2525 DDI_DMA_DONTWAIT, 0, 2526 &tx_msg_ring[i].dma_handle); 2527 if (ddi_status != DDI_SUCCESS) { 2528 status |= NXGE_DDI_FAILED; 2529 break; 2530 } 2531 } 2532 if (i < nmsgs) { 2533 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2534 "Allocate handles failed.")); 2535 goto nxge_map_txdma_channel_buf_ring_fail1; 2536 } 2537 2538 tx_ring_p->tdc = channel; 2539 tx_ring_p->tx_msg_ring = tx_msg_ring; 2540 tx_ring_p->tx_ring_size = nmsgs; 2541 tx_ring_p->num_chunks = num_chunks; 2542 if (!nxge_tx_intr_thres) { 2543 nxge_tx_intr_thres = tx_ring_p->tx_ring_size/4; 2544 } 2545 tx_ring_p->tx_wrap_mask = tx_ring_p->tx_ring_size - 1; 2546 tx_ring_p->rd_index = 0; 2547 tx_ring_p->wr_index = 0; 2548 tx_ring_p->ring_head.value = 0; 2549 tx_ring_p->ring_kick_tail.value = 0; 2550 tx_ring_p->descs_pending = 0; 2551 2552 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2553 "==> nxge_map_txdma_channel_buf_ring: channel %d " 2554 "actual tx desc max %d nmsgs %d " 2555 "(config nxge_tx_ring_size %d)", 2556 channel, tx_ring_p->tx_ring_size, nmsgs, 2557 nxge_tx_ring_size)); 2558 2559 /* 2560 * Map in buffers from the buffer pool. 2561 */ 2562 index = 0; 2563 bsize = dma_bufp->block_size; 2564 2565 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_map_txdma_channel_buf_ring: " 2566 "dma_bufp $%p tx_rng_p $%p " 2567 "tx_msg_rng_p $%p bsize %d", 2568 dma_bufp, tx_ring_p, tx_msg_ring, bsize)); 2569 2570 tx_buf_dma_handle = dma_bufp->dma_handle; 2571 for (i = 0; i < num_chunks; i++, dma_bufp++) { 2572 bsize = dma_bufp->block_size; 2573 nblocks = dma_bufp->nblocks; 2574 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2575 "==> nxge_map_txdma_channel_buf_ring: dma chunk %d " 2576 "size %d dma_bufp $%p", 2577 i, sizeof (nxge_dma_common_t), dma_bufp)); 2578 2579 for (j = 0; j < nblocks; j++) { 2580 tx_msg_ring[index].buf_dma_handle = tx_buf_dma_handle; 2581 dmap = &tx_msg_ring[index++].buf_dma; 2582 #ifdef TX_MEM_DEBUG 2583 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2584 "==> nxge_map_txdma_channel_buf_ring: j %d" 2585 "dmap $%p", i, dmap)); 2586 #endif 2587 nxge_setup_dma_common(dmap, dma_bufp, 1, 2588 bsize); 2589 } 2590 } 2591 2592 if (i < num_chunks) { 2593 status = NXGE_ERROR; 2594 goto nxge_map_txdma_channel_buf_ring_fail1; 2595 } 2596 2597 *tx_desc_p = tx_ring_p; 2598 2599 goto nxge_map_txdma_channel_buf_ring_exit; 2600 2601 nxge_map_txdma_channel_buf_ring_fail1: 2602 if (tx_ring_p->serial) { 2603 nxge_serialize_destroy(tx_ring_p->serial); 2604 tx_ring_p->serial = NULL; 2605 } 2606 2607 index--; 2608 for (; index >= 0; index--) { 2609 if (tx_msg_ring[index].dma_handle != NULL) { 2610 ddi_dma_free_handle(&tx_msg_ring[index].dma_handle); 2611 } 2612 } 2613 MUTEX_DESTROY(&tx_ring_p->lock); 2614 KMEM_FREE(tx_msg_ring, size); 2615 KMEM_FREE(tx_ring_p, sizeof (tx_ring_t)); 2616 2617 status = NXGE_ERROR; 2618 2619 nxge_map_txdma_channel_buf_ring_exit: 2620 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2621 "<== nxge_map_txdma_channel_buf_ring status 0x%x", status)); 2622 2623 return (status); 2624 } 2625 2626 /*ARGSUSED*/ 2627 static void 2628 nxge_unmap_txdma_channel_buf_ring(p_nxge_t nxgep, p_tx_ring_t tx_ring_p) 2629 { 2630 p_tx_msg_t tx_msg_ring; 2631 p_tx_msg_t tx_msg_p; 2632 int i; 2633 2634 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2635 "==> nxge_unmap_txdma_channel_buf_ring")); 2636 if (tx_ring_p == NULL) { 2637 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2638 "<== nxge_unmap_txdma_channel_buf_ring: NULL ringp")); 2639 return; 2640 } 2641 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2642 "==> nxge_unmap_txdma_channel_buf_ring: channel %d", 2643 tx_ring_p->tdc)); 2644 2645 tx_msg_ring = tx_ring_p->tx_msg_ring; 2646 2647 /* 2648 * Since the serialization thread, timer thread and 2649 * interrupt thread can all call the transmit reclaim, 2650 * the unmapping function needs to acquire the lock 2651 * to free those buffers which were transmitted 2652 * by the hardware already. 2653 */ 2654 MUTEX_ENTER(&tx_ring_p->lock); 2655 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2656 "==> nxge_unmap_txdma_channel_buf_ring (reclaim): " 2657 "channel %d", 2658 tx_ring_p->tdc)); 2659 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0); 2660 2661 for (i = 0; i < tx_ring_p->tx_ring_size; i++) { 2662 tx_msg_p = &tx_msg_ring[i]; 2663 if (tx_msg_p->tx_message != NULL) { 2664 freemsg(tx_msg_p->tx_message); 2665 tx_msg_p->tx_message = NULL; 2666 } 2667 } 2668 2669 for (i = 0; i < tx_ring_p->tx_ring_size; i++) { 2670 if (tx_msg_ring[i].dma_handle != NULL) { 2671 ddi_dma_free_handle(&tx_msg_ring[i].dma_handle); 2672 } 2673 tx_msg_ring[i].dma_handle = NULL; 2674 } 2675 2676 MUTEX_EXIT(&tx_ring_p->lock); 2677 2678 if (tx_ring_p->serial) { 2679 nxge_serialize_destroy(tx_ring_p->serial); 2680 tx_ring_p->serial = NULL; 2681 } 2682 2683 MUTEX_DESTROY(&tx_ring_p->lock); 2684 KMEM_FREE(tx_msg_ring, sizeof (tx_msg_t) * tx_ring_p->tx_ring_size); 2685 KMEM_FREE(tx_ring_p, sizeof (tx_ring_t)); 2686 2687 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2688 "<== nxge_unmap_txdma_channel_buf_ring")); 2689 } 2690 2691 static nxge_status_t 2692 nxge_txdma_hw_start(p_nxge_t nxgep, int channel) 2693 { 2694 p_tx_rings_t tx_rings; 2695 p_tx_ring_t *tx_desc_rings; 2696 p_tx_mbox_areas_t tx_mbox_areas_p; 2697 p_tx_mbox_t *tx_mbox_p; 2698 nxge_status_t status = NXGE_OK; 2699 2700 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start")); 2701 2702 tx_rings = nxgep->tx_rings; 2703 if (tx_rings == NULL) { 2704 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2705 "<== nxge_txdma_hw_start: NULL ring pointer")); 2706 return (NXGE_ERROR); 2707 } 2708 tx_desc_rings = tx_rings->rings; 2709 if (tx_desc_rings == NULL) { 2710 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2711 "<== nxge_txdma_hw_start: NULL ring pointers")); 2712 return (NXGE_ERROR); 2713 } 2714 2715 NXGE_ERROR_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: " 2716 "tx_rings $%p tx_desc_rings $%p", tx_rings, tx_desc_rings)); 2717 2718 tx_mbox_areas_p = nxgep->tx_mbox_areas_p; 2719 tx_mbox_p = tx_mbox_areas_p->txmbox_areas_p; 2720 2721 status = nxge_txdma_start_channel(nxgep, channel, 2722 (p_tx_ring_t)tx_desc_rings[channel], 2723 (p_tx_mbox_t)tx_mbox_p[channel]); 2724 if (status != NXGE_OK) { 2725 goto nxge_txdma_hw_start_fail1; 2726 } 2727 2728 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: " 2729 "tx_rings $%p rings $%p", 2730 nxgep->tx_rings, nxgep->tx_rings->rings)); 2731 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "==> nxge_txdma_hw_start: " 2732 "tx_rings $%p tx_desc_rings $%p", 2733 nxgep->tx_rings, tx_desc_rings)); 2734 2735 goto nxge_txdma_hw_start_exit; 2736 2737 nxge_txdma_hw_start_fail1: 2738 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2739 "==> nxge_txdma_hw_start: disable " 2740 "(status 0x%x channel %d)", status, channel)); 2741 2742 nxge_txdma_hw_start_exit: 2743 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2744 "==> nxge_txdma_hw_start: (status 0x%x)", status)); 2745 2746 return (status); 2747 } 2748 2749 /* 2750 * nxge_txdma_start_channel 2751 * 2752 * Start a TDC. 2753 * 2754 * Arguments: 2755 * nxgep 2756 * channel The channel to start. 2757 * tx_ring_p channel's transmit descriptor ring. 2758 * tx_mbox_p channel' smailbox. 2759 * 2760 * Notes: 2761 * 2762 * NPI/NXGE function calls: 2763 * nxge_reset_txdma_channel() 2764 * nxge_init_txdma_channel_event_mask() 2765 * nxge_enable_txdma_channel() 2766 * 2767 * Registers accessed: 2768 * none directly (see functions above). 2769 * 2770 * Context: 2771 * Any domain 2772 */ 2773 static nxge_status_t 2774 nxge_txdma_start_channel(p_nxge_t nxgep, uint16_t channel, 2775 p_tx_ring_t tx_ring_p, p_tx_mbox_t tx_mbox_p) 2776 2777 { 2778 nxge_status_t status = NXGE_OK; 2779 2780 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2781 "==> nxge_txdma_start_channel (channel %d)", channel)); 2782 /* 2783 * TXDMA/TXC must be in stopped state. 2784 */ 2785 (void) nxge_txdma_stop_inj_err(nxgep, channel); 2786 2787 /* 2788 * Reset TXDMA channel 2789 */ 2790 tx_ring_p->tx_cs.value = 0; 2791 tx_ring_p->tx_cs.bits.ldw.rst = 1; 2792 status = nxge_reset_txdma_channel(nxgep, channel, 2793 tx_ring_p->tx_cs.value); 2794 if (status != NXGE_OK) { 2795 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 2796 "==> nxge_txdma_start_channel (channel %d)" 2797 " reset channel failed 0x%x", channel, status)); 2798 goto nxge_txdma_start_channel_exit; 2799 } 2800 2801 /* 2802 * Initialize the TXDMA channel specific FZC control 2803 * configurations. These FZC registers are pertaining 2804 * to each TX channel (i.e. logical pages). 2805 */ 2806 if (!isLDOMguest(nxgep)) { 2807 status = nxge_init_fzc_txdma_channel(nxgep, channel, 2808 tx_ring_p, tx_mbox_p); 2809 if (status != NXGE_OK) { 2810 goto nxge_txdma_start_channel_exit; 2811 } 2812 } 2813 2814 /* 2815 * Initialize the event masks. 2816 */ 2817 tx_ring_p->tx_evmask.value = 0; 2818 status = nxge_init_txdma_channel_event_mask(nxgep, 2819 channel, &tx_ring_p->tx_evmask); 2820 if (status != NXGE_OK) { 2821 goto nxge_txdma_start_channel_exit; 2822 } 2823 2824 /* 2825 * Load TXDMA descriptors, buffers, mailbox, 2826 * initialise the DMA channels and 2827 * enable each DMA channel. 2828 */ 2829 status = nxge_enable_txdma_channel(nxgep, channel, 2830 tx_ring_p, tx_mbox_p); 2831 if (status != NXGE_OK) { 2832 goto nxge_txdma_start_channel_exit; 2833 } 2834 2835 nxge_txdma_start_channel_exit: 2836 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_start_channel")); 2837 2838 return (status); 2839 } 2840 2841 /* 2842 * nxge_txdma_stop_channel 2843 * 2844 * Stop a TDC. 2845 * 2846 * Arguments: 2847 * nxgep 2848 * channel The channel to stop. 2849 * tx_ring_p channel's transmit descriptor ring. 2850 * tx_mbox_p channel' smailbox. 2851 * 2852 * Notes: 2853 * 2854 * NPI/NXGE function calls: 2855 * nxge_txdma_stop_inj_err() 2856 * nxge_reset_txdma_channel() 2857 * nxge_init_txdma_channel_event_mask() 2858 * nxge_init_txdma_channel_cntl_stat() 2859 * nxge_disable_txdma_channel() 2860 * 2861 * Registers accessed: 2862 * none directly (see functions above). 2863 * 2864 * Context: 2865 * Any domain 2866 */ 2867 /*ARGSUSED*/ 2868 static nxge_status_t 2869 nxge_txdma_stop_channel(p_nxge_t nxgep, uint16_t channel) 2870 { 2871 p_tx_ring_t tx_ring_p; 2872 int status = NXGE_OK; 2873 2874 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2875 "==> nxge_txdma_stop_channel: channel %d", channel)); 2876 2877 /* 2878 * Stop (disable) TXDMA and TXC (if stop bit is set 2879 * and STOP_N_GO bit not set, the TXDMA reset state will 2880 * not be set if reset TXDMA. 2881 */ 2882 (void) nxge_txdma_stop_inj_err(nxgep, channel); 2883 2884 tx_ring_p = nxgep->tx_rings->rings[channel]; 2885 2886 /* 2887 * Reset TXDMA channel 2888 */ 2889 tx_ring_p->tx_cs.value = 0; 2890 tx_ring_p->tx_cs.bits.ldw.rst = 1; 2891 status = nxge_reset_txdma_channel(nxgep, channel, 2892 tx_ring_p->tx_cs.value); 2893 if (status != NXGE_OK) { 2894 goto nxge_txdma_stop_channel_exit; 2895 } 2896 2897 #ifdef HARDWARE_REQUIRED 2898 /* Set up the interrupt event masks. */ 2899 tx_ring_p->tx_evmask.value = 0; 2900 status = nxge_init_txdma_channel_event_mask(nxgep, 2901 channel, &tx_ring_p->tx_evmask); 2902 if (status != NXGE_OK) { 2903 goto nxge_txdma_stop_channel_exit; 2904 } 2905 2906 /* Initialize the DMA control and status register */ 2907 tx_ring_p->tx_cs.value = TX_ENT_MSK_MK_ALL; 2908 status = nxge_init_txdma_channel_cntl_stat(nxgep, channel, 2909 tx_ring_p->tx_cs.value); 2910 if (status != NXGE_OK) { 2911 goto nxge_txdma_stop_channel_exit; 2912 } 2913 2914 tx_mbox_p = nxgep->tx_mbox_areas_p->txmbox_areas_p[channel]; 2915 2916 /* Disable channel */ 2917 status = nxge_disable_txdma_channel(nxgep, channel, 2918 tx_ring_p, tx_mbox_p); 2919 if (status != NXGE_OK) { 2920 goto nxge_txdma_start_channel_exit; 2921 } 2922 2923 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, 2924 "==> nxge_txdma_stop_channel: event done")); 2925 2926 #endif 2927 2928 nxge_txdma_stop_channel_exit: 2929 NXGE_DEBUG_MSG((nxgep, MEM3_CTL, "<== nxge_txdma_stop_channel")); 2930 return (status); 2931 } 2932 2933 /* 2934 * nxge_txdma_get_ring 2935 * 2936 * Get the ring for a TDC. 2937 * 2938 * Arguments: 2939 * nxgep 2940 * channel 2941 * 2942 * Notes: 2943 * 2944 * NPI/NXGE function calls: 2945 * 2946 * Registers accessed: 2947 * 2948 * Context: 2949 * Any domain 2950 */ 2951 static p_tx_ring_t 2952 nxge_txdma_get_ring(p_nxge_t nxgep, uint16_t channel) 2953 { 2954 nxge_grp_set_t *set = &nxgep->tx_set; 2955 int tdc; 2956 2957 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_ring")); 2958 2959 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 2960 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2961 "<== nxge_txdma_get_ring: NULL ring pointer(s)")); 2962 goto return_null; 2963 } 2964 2965 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 2966 if ((1 << tdc) & set->owned.map) { 2967 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 2968 if (ring) { 2969 if (channel == ring->tdc) { 2970 NXGE_DEBUG_MSG((nxgep, TX_CTL, 2971 "<== nxge_txdma_get_ring: " 2972 "tdc %d ring $%p", tdc, ring)); 2973 return (ring); 2974 } 2975 } 2976 } 2977 } 2978 2979 return_null: 2980 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_ring: " 2981 "ring not found")); 2982 2983 return (NULL); 2984 } 2985 2986 /* 2987 * nxge_txdma_get_mbox 2988 * 2989 * Get the mailbox for a TDC. 2990 * 2991 * Arguments: 2992 * nxgep 2993 * channel 2994 * 2995 * Notes: 2996 * 2997 * NPI/NXGE function calls: 2998 * 2999 * Registers accessed: 3000 * 3001 * Context: 3002 * Any domain 3003 */ 3004 static p_tx_mbox_t 3005 nxge_txdma_get_mbox(p_nxge_t nxgep, uint16_t channel) 3006 { 3007 nxge_grp_set_t *set = &nxgep->tx_set; 3008 int tdc; 3009 3010 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_get_mbox")); 3011 3012 if (nxgep->tx_mbox_areas_p == 0 || 3013 nxgep->tx_mbox_areas_p->txmbox_areas_p == 0) { 3014 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3015 "<== nxge_txdma_get_mbox: NULL mailbox pointer(s)")); 3016 goto return_null; 3017 } 3018 3019 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 3020 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3021 "<== nxge_txdma_get_mbox: NULL ring pointer(s)")); 3022 goto return_null; 3023 } 3024 3025 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3026 if ((1 << tdc) & set->owned.map) { 3027 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 3028 if (ring) { 3029 if (channel == ring->tdc) { 3030 tx_mbox_t *mailbox = nxgep-> 3031 tx_mbox_areas_p-> 3032 txmbox_areas_p[tdc]; 3033 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3034 "<== nxge_txdma_get_mbox: tdc %d " 3035 "ring $%p", tdc, mailbox)); 3036 return (mailbox); 3037 } 3038 } 3039 } 3040 } 3041 3042 return_null: 3043 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_get_mbox: " 3044 "mailbox not found")); 3045 3046 return (NULL); 3047 } 3048 3049 /* 3050 * nxge_tx_err_evnts 3051 * 3052 * Recover a TDC. 3053 * 3054 * Arguments: 3055 * nxgep 3056 * index The index to the TDC ring. 3057 * ldvp Used to get the channel number ONLY. 3058 * cs A copy of the bits from TX_CS. 3059 * 3060 * Notes: 3061 * Calling tree: 3062 * nxge_tx_intr() 3063 * 3064 * NPI/NXGE function calls: 3065 * npi_txdma_ring_error_get() 3066 * npi_txdma_inj_par_error_get() 3067 * nxge_txdma_fatal_err_recover() 3068 * 3069 * Registers accessed: 3070 * TX_RNG_ERR_LOGH DMC+0x40048 Transmit Ring Error Log High 3071 * TX_RNG_ERR_LOGL DMC+0x40050 Transmit Ring Error Log Low 3072 * TDMC_INJ_PAR_ERR (FZC_DMC + 0x45040) TDMC Inject Parity Error 3073 * 3074 * Context: 3075 * Any domain XXX Remove code which accesses TDMC_INJ_PAR_ERR. 3076 */ 3077 /*ARGSUSED*/ 3078 static nxge_status_t 3079 nxge_tx_err_evnts(p_nxge_t nxgep, uint_t index, p_nxge_ldv_t ldvp, tx_cs_t cs) 3080 { 3081 npi_handle_t handle; 3082 npi_status_t rs; 3083 uint8_t channel; 3084 p_tx_ring_t *tx_rings; 3085 p_tx_ring_t tx_ring_p; 3086 p_nxge_tx_ring_stats_t tdc_stats; 3087 boolean_t txchan_fatal = B_FALSE; 3088 nxge_status_t status = NXGE_OK; 3089 tdmc_inj_par_err_t par_err; 3090 uint32_t value; 3091 3092 NXGE_DEBUG_MSG((nxgep, TX2_CTL, "==> nxge_tx_err_evnts")); 3093 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3094 channel = ldvp->channel; 3095 3096 tx_rings = nxgep->tx_rings->rings; 3097 tx_ring_p = tx_rings[index]; 3098 tdc_stats = tx_ring_p->tdc_stats; 3099 if ((cs.bits.ldw.pkt_size_err) || (cs.bits.ldw.pref_buf_par_err) || 3100 (cs.bits.ldw.nack_pref) || (cs.bits.ldw.nack_pkt_rd) || 3101 (cs.bits.ldw.conf_part_err) || (cs.bits.ldw.pkt_prt_err)) { 3102 if ((rs = npi_txdma_ring_error_get(handle, channel, 3103 &tdc_stats->errlog)) != NPI_SUCCESS) 3104 return (NXGE_ERROR | rs); 3105 } 3106 3107 if (cs.bits.ldw.mbox_err) { 3108 tdc_stats->mbox_err++; 3109 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 3110 NXGE_FM_EREPORT_TDMC_MBOX_ERR); 3111 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3112 "==> nxge_tx_err_evnts(channel %d): " 3113 "fatal error: mailbox", channel)); 3114 txchan_fatal = B_TRUE; 3115 } 3116 if (cs.bits.ldw.pkt_size_err) { 3117 tdc_stats->pkt_size_err++; 3118 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 3119 NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR); 3120 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3121 "==> nxge_tx_err_evnts(channel %d): " 3122 "fatal error: pkt_size_err", channel)); 3123 txchan_fatal = B_TRUE; 3124 } 3125 if (cs.bits.ldw.tx_ring_oflow) { 3126 tdc_stats->tx_ring_oflow++; 3127 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 3128 NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW); 3129 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3130 "==> nxge_tx_err_evnts(channel %d): " 3131 "fatal error: tx_ring_oflow", channel)); 3132 txchan_fatal = B_TRUE; 3133 } 3134 if (cs.bits.ldw.pref_buf_par_err) { 3135 tdc_stats->pre_buf_par_err++; 3136 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 3137 NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR); 3138 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3139 "==> nxge_tx_err_evnts(channel %d): " 3140 "fatal error: pre_buf_par_err", channel)); 3141 /* Clear error injection source for parity error */ 3142 (void) npi_txdma_inj_par_error_get(handle, &value); 3143 par_err.value = value; 3144 par_err.bits.ldw.inject_parity_error &= ~(1 << channel); 3145 (void) npi_txdma_inj_par_error_set(handle, par_err.value); 3146 txchan_fatal = B_TRUE; 3147 } 3148 if (cs.bits.ldw.nack_pref) { 3149 tdc_stats->nack_pref++; 3150 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 3151 NXGE_FM_EREPORT_TDMC_NACK_PREF); 3152 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3153 "==> nxge_tx_err_evnts(channel %d): " 3154 "fatal error: nack_pref", channel)); 3155 txchan_fatal = B_TRUE; 3156 } 3157 if (cs.bits.ldw.nack_pkt_rd) { 3158 tdc_stats->nack_pkt_rd++; 3159 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 3160 NXGE_FM_EREPORT_TDMC_NACK_PKT_RD); 3161 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3162 "==> nxge_tx_err_evnts(channel %d): " 3163 "fatal error: nack_pkt_rd", channel)); 3164 txchan_fatal = B_TRUE; 3165 } 3166 if (cs.bits.ldw.conf_part_err) { 3167 tdc_stats->conf_part_err++; 3168 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 3169 NXGE_FM_EREPORT_TDMC_CONF_PART_ERR); 3170 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3171 "==> nxge_tx_err_evnts(channel %d): " 3172 "fatal error: config_partition_err", channel)); 3173 txchan_fatal = B_TRUE; 3174 } 3175 if (cs.bits.ldw.pkt_prt_err) { 3176 tdc_stats->pkt_part_err++; 3177 NXGE_FM_REPORT_ERROR(nxgep, nxgep->mac.portnum, channel, 3178 NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR); 3179 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3180 "==> nxge_tx_err_evnts(channel %d): " 3181 "fatal error: pkt_prt_err", channel)); 3182 txchan_fatal = B_TRUE; 3183 } 3184 3185 /* Clear error injection source in case this is an injected error */ 3186 TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG, channel, 0); 3187 3188 if (txchan_fatal) { 3189 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3190 " nxge_tx_err_evnts: " 3191 " fatal error on channel %d cs 0x%llx\n", 3192 channel, cs.value)); 3193 status = nxge_txdma_fatal_err_recover(nxgep, channel, 3194 tx_ring_p); 3195 if (status == NXGE_OK) { 3196 FM_SERVICE_RESTORED(nxgep); 3197 } 3198 } 3199 3200 NXGE_DEBUG_MSG((nxgep, TX2_CTL, "<== nxge_tx_err_evnts")); 3201 3202 return (status); 3203 } 3204 3205 static nxge_status_t 3206 nxge_txdma_fatal_err_recover( 3207 p_nxge_t nxgep, 3208 uint16_t channel, 3209 p_tx_ring_t tx_ring_p) 3210 { 3211 npi_handle_t handle; 3212 npi_status_t rs = NPI_SUCCESS; 3213 p_tx_mbox_t tx_mbox_p; 3214 nxge_status_t status = NXGE_OK; 3215 3216 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_txdma_fatal_err_recover")); 3217 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3218 "Recovering from TxDMAChannel#%d error...", channel)); 3219 3220 /* 3221 * Stop the dma channel waits for the stop done. 3222 * If the stop done bit is not set, then create 3223 * an error. 3224 */ 3225 3226 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3227 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel stop...")); 3228 MUTEX_ENTER(&tx_ring_p->lock); 3229 rs = npi_txdma_channel_control(handle, TXDMA_STOP, channel); 3230 if (rs != NPI_SUCCESS) { 3231 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3232 "==> nxge_txdma_fatal_err_recover (channel %d): " 3233 "stop failed ", channel)); 3234 goto fail; 3235 } 3236 3237 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reclaim...")); 3238 (void) nxge_txdma_reclaim(nxgep, tx_ring_p, 0); 3239 3240 /* 3241 * Reset TXDMA channel 3242 */ 3243 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel reset...")); 3244 if ((rs = npi_txdma_channel_control(handle, TXDMA_RESET, channel)) != 3245 NPI_SUCCESS) { 3246 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3247 "==> nxge_txdma_fatal_err_recover (channel %d)" 3248 " reset channel failed 0x%x", channel, rs)); 3249 goto fail; 3250 } 3251 3252 /* 3253 * Reset the tail (kick) register to 0. 3254 * (Hardware will not reset it. Tx overflow fatal 3255 * error if tail is not set to 0 after reset! 3256 */ 3257 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, channel, 0); 3258 3259 /* Restart TXDMA channel */ 3260 3261 if (!isLDOMguest(nxgep)) { 3262 tx_mbox_p = nxge_txdma_get_mbox(nxgep, channel); 3263 3264 // XXX This is a problem in HIO! 3265 /* 3266 * Initialize the TXDMA channel specific FZC control 3267 * configurations. These FZC registers are pertaining 3268 * to each TX channel (i.e. logical pages). 3269 */ 3270 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel restart...")); 3271 status = nxge_init_fzc_txdma_channel(nxgep, channel, 3272 tx_ring_p, tx_mbox_p); 3273 if (status != NXGE_OK) 3274 goto fail; 3275 } 3276 3277 /* 3278 * Initialize the event masks. 3279 */ 3280 tx_ring_p->tx_evmask.value = 0; 3281 status = nxge_init_txdma_channel_event_mask(nxgep, channel, 3282 &tx_ring_p->tx_evmask); 3283 if (status != NXGE_OK) 3284 goto fail; 3285 3286 tx_ring_p->wr_index_wrap = B_FALSE; 3287 tx_ring_p->wr_index = 0; 3288 tx_ring_p->rd_index = 0; 3289 3290 /* 3291 * Load TXDMA descriptors, buffers, mailbox, 3292 * initialise the DMA channels and 3293 * enable each DMA channel. 3294 */ 3295 NXGE_DEBUG_MSG((nxgep, TX_CTL, "TxDMA channel enable...")); 3296 status = nxge_enable_txdma_channel(nxgep, channel, 3297 tx_ring_p, tx_mbox_p); 3298 MUTEX_EXIT(&tx_ring_p->lock); 3299 if (status != NXGE_OK) 3300 goto fail; 3301 3302 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3303 "Recovery Successful, TxDMAChannel#%d Restored", 3304 channel)); 3305 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_txdma_fatal_err_recover")); 3306 3307 return (NXGE_OK); 3308 3309 fail: 3310 MUTEX_EXIT(&tx_ring_p->lock); 3311 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3312 "nxge_txdma_fatal_err_recover (channel %d): " 3313 "failed to recover this txdma channel", channel)); 3314 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Recovery failed")); 3315 3316 return (status); 3317 } 3318 3319 /* 3320 * nxge_tx_port_fatal_err_recover 3321 * 3322 * Attempt to recover from a fatal port error. 3323 * 3324 * Arguments: 3325 * nxgep 3326 * 3327 * Notes: 3328 * How would a guest do this? 3329 * 3330 * NPI/NXGE function calls: 3331 * 3332 * Registers accessed: 3333 * 3334 * Context: 3335 * Service domain 3336 */ 3337 nxge_status_t 3338 nxge_tx_port_fatal_err_recover(p_nxge_t nxgep) 3339 { 3340 nxge_grp_set_t *set = &nxgep->tx_set; 3341 nxge_channel_t tdc; 3342 3343 tx_ring_t *ring; 3344 tx_mbox_t *mailbox; 3345 3346 npi_handle_t handle; 3347 nxge_status_t status; 3348 npi_status_t rs; 3349 3350 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_tx_port_fatal_err_recover")); 3351 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3352 "Recovering from TxPort error...")); 3353 3354 if (isLDOMguest(nxgep)) { 3355 return (NXGE_OK); 3356 } 3357 3358 if (!(nxgep->drv_state & STATE_HW_INITIALIZED)) { 3359 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3360 "<== nxge_tx_port_fatal_err_recover: not initialized")); 3361 return (NXGE_ERROR); 3362 } 3363 3364 if (nxgep->tx_rings == 0 || nxgep->tx_rings->rings == 0) { 3365 NXGE_DEBUG_MSG((nxgep, TX_CTL, 3366 "<== nxge_tx_port_fatal_err_recover: " 3367 "NULL ring pointer(s)")); 3368 return (NXGE_ERROR); 3369 } 3370 3371 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3372 if ((1 << tdc) & set->owned.map) { 3373 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 3374 if (ring) 3375 MUTEX_ENTER(&ring->lock); 3376 } 3377 } 3378 3379 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3380 3381 /* 3382 * Stop all the TDCs owned by us. 3383 * (The shared TDCs will have been stopped by their owners.) 3384 */ 3385 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3386 if ((1 << tdc) & set->owned.map) { 3387 ring = nxgep->tx_rings->rings[tdc]; 3388 if (ring) { 3389 rs = npi_txdma_channel_control 3390 (handle, TXDMA_STOP, tdc); 3391 if (rs != NPI_SUCCESS) { 3392 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3393 "nxge_tx_port_fatal_err_recover " 3394 "(channel %d): stop failed ", tdc)); 3395 goto fail; 3396 } 3397 } 3398 } 3399 } 3400 3401 NXGE_DEBUG_MSG((nxgep, TX_CTL, "Reclaiming all TDCs...")); 3402 3403 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3404 if ((1 << tdc) & set->owned.map) { 3405 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 3406 if (ring) 3407 (void) nxge_txdma_reclaim(nxgep, ring, 0); 3408 } 3409 } 3410 3411 /* 3412 * Reset all the TDCs. 3413 */ 3414 NXGE_DEBUG_MSG((nxgep, TX_CTL, "Resetting all TDCs...")); 3415 3416 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3417 if ((1 << tdc) & set->owned.map) { 3418 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 3419 if (ring) { 3420 if ((rs = npi_txdma_channel_control 3421 (handle, TXDMA_RESET, tdc)) 3422 != NPI_SUCCESS) { 3423 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 3424 "nxge_tx_port_fatal_err_recover " 3425 "(channel %d) reset channel " 3426 "failed 0x%x", tdc, rs)); 3427 goto fail; 3428 } 3429 } 3430 /* 3431 * Reset the tail (kick) register to 0. 3432 * (Hardware will not reset it. Tx overflow fatal 3433 * error if tail is not set to 0 after reset! 3434 */ 3435 TXDMA_REG_WRITE64(handle, TX_RING_KICK_REG, tdc, 0); 3436 } 3437 } 3438 3439 NXGE_DEBUG_MSG((nxgep, TX_CTL, "Restarting all TDCs...")); 3440 3441 /* Restart all the TDCs */ 3442 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3443 if ((1 << tdc) & set->owned.map) { 3444 ring = nxgep->tx_rings->rings[tdc]; 3445 if (ring) { 3446 mailbox = nxge_txdma_get_mbox(nxgep, tdc); 3447 status = nxge_init_fzc_txdma_channel(nxgep, tdc, 3448 ring, mailbox); 3449 ring->tx_evmask.value = 0; 3450 /* 3451 * Initialize the event masks. 3452 */ 3453 status = nxge_init_txdma_channel_event_mask 3454 (nxgep, tdc, &ring->tx_evmask); 3455 3456 ring->wr_index_wrap = B_FALSE; 3457 ring->wr_index = 0; 3458 ring->rd_index = 0; 3459 3460 if (status != NXGE_OK) 3461 goto fail; 3462 if (status != NXGE_OK) 3463 goto fail; 3464 } 3465 } 3466 } 3467 3468 NXGE_DEBUG_MSG((nxgep, TX_CTL, "Re-enabling all TDCs...")); 3469 3470 /* Re-enable all the TDCs */ 3471 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3472 if ((1 << tdc) & set->owned.map) { 3473 ring = nxgep->tx_rings->rings[tdc]; 3474 if (ring) { 3475 mailbox = nxge_txdma_get_mbox(nxgep, tdc); 3476 status = nxge_enable_txdma_channel(nxgep, tdc, 3477 ring, mailbox); 3478 if (status != NXGE_OK) 3479 goto fail; 3480 } 3481 } 3482 } 3483 3484 /* 3485 * Unlock all the TDCs. 3486 */ 3487 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3488 if ((1 << tdc) & set->owned.map) { 3489 tx_ring_t *ring = nxgep->tx_rings->rings[tdc]; 3490 if (ring) 3491 MUTEX_EXIT(&ring->lock); 3492 } 3493 } 3494 3495 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Tx port recovery succeeded")); 3496 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_port_fatal_err_recover")); 3497 3498 return (NXGE_OK); 3499 3500 fail: 3501 for (tdc = 0; tdc < NXGE_MAX_TDCS; tdc++) { 3502 if ((1 << tdc) & set->owned.map) { 3503 ring = nxgep->tx_rings->rings[tdc]; 3504 if (ring) 3505 MUTEX_EXIT(&ring->lock); 3506 } 3507 } 3508 3509 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "Tx port recovery failed")); 3510 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_tx_port_fatal_err_recover")); 3511 3512 return (status); 3513 } 3514 3515 /* 3516 * nxge_txdma_inject_err 3517 * 3518 * Inject an error into a TDC. 3519 * 3520 * Arguments: 3521 * nxgep 3522 * err_id The error to inject. 3523 * chan The channel to inject into. 3524 * 3525 * Notes: 3526 * This is called from nxge_main.c:nxge_err_inject() 3527 * Has this ioctl ever been used? 3528 * 3529 * NPI/NXGE function calls: 3530 * npi_txdma_inj_par_error_get() 3531 * npi_txdma_inj_par_error_set() 3532 * 3533 * Registers accessed: 3534 * TDMC_INJ_PAR_ERR (FZC_DMC + 0x45040) TDMC Inject Parity Error 3535 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug 3536 * TDMC_INTR_DBG DMC + 0x40060 Transmit DMA Interrupt Debug 3537 * 3538 * Context: 3539 * Service domain 3540 */ 3541 void 3542 nxge_txdma_inject_err(p_nxge_t nxgep, uint32_t err_id, uint8_t chan) 3543 { 3544 tdmc_intr_dbg_t tdi; 3545 tdmc_inj_par_err_t par_err; 3546 uint32_t value; 3547 npi_handle_t handle; 3548 3549 switch (err_id) { 3550 3551 case NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR: 3552 handle = NXGE_DEV_NPI_HANDLE(nxgep); 3553 /* Clear error injection source for parity error */ 3554 (void) npi_txdma_inj_par_error_get(handle, &value); 3555 par_err.value = value; 3556 par_err.bits.ldw.inject_parity_error &= ~(1 << chan); 3557 (void) npi_txdma_inj_par_error_set(handle, par_err.value); 3558 3559 par_err.bits.ldw.inject_parity_error = (1 << chan); 3560 (void) npi_txdma_inj_par_error_get(handle, &value); 3561 par_err.value = value; 3562 par_err.bits.ldw.inject_parity_error |= (1 << chan); 3563 cmn_err(CE_NOTE, "!Write 0x%llx to TDMC_INJ_PAR_ERR_REG\n", 3564 (unsigned long long)par_err.value); 3565 (void) npi_txdma_inj_par_error_set(handle, par_err.value); 3566 break; 3567 3568 case NXGE_FM_EREPORT_TDMC_MBOX_ERR: 3569 case NXGE_FM_EREPORT_TDMC_NACK_PREF: 3570 case NXGE_FM_EREPORT_TDMC_NACK_PKT_RD: 3571 case NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR: 3572 case NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW: 3573 case NXGE_FM_EREPORT_TDMC_CONF_PART_ERR: 3574 case NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR: 3575 TXDMA_REG_READ64(nxgep->npi_handle, TDMC_INTR_DBG_REG, 3576 chan, &tdi.value); 3577 if (err_id == NXGE_FM_EREPORT_TDMC_PREF_BUF_PAR_ERR) 3578 tdi.bits.ldw.pref_buf_par_err = 1; 3579 else if (err_id == NXGE_FM_EREPORT_TDMC_MBOX_ERR) 3580 tdi.bits.ldw.mbox_err = 1; 3581 else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PREF) 3582 tdi.bits.ldw.nack_pref = 1; 3583 else if (err_id == NXGE_FM_EREPORT_TDMC_NACK_PKT_RD) 3584 tdi.bits.ldw.nack_pkt_rd = 1; 3585 else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_SIZE_ERR) 3586 tdi.bits.ldw.pkt_size_err = 1; 3587 else if (err_id == NXGE_FM_EREPORT_TDMC_TX_RING_OFLOW) 3588 tdi.bits.ldw.tx_ring_oflow = 1; 3589 else if (err_id == NXGE_FM_EREPORT_TDMC_CONF_PART_ERR) 3590 tdi.bits.ldw.conf_part_err = 1; 3591 else if (err_id == NXGE_FM_EREPORT_TDMC_PKT_PRT_ERR) 3592 tdi.bits.ldw.pkt_part_err = 1; 3593 #if defined(__i386) 3594 cmn_err(CE_NOTE, "!Write 0x%llx to TDMC_INTR_DBG_REG\n", 3595 tdi.value); 3596 #else 3597 cmn_err(CE_NOTE, "!Write 0x%lx to TDMC_INTR_DBG_REG\n", 3598 tdi.value); 3599 #endif 3600 TXDMA_REG_WRITE64(nxgep->npi_handle, TDMC_INTR_DBG_REG, 3601 chan, tdi.value); 3602 3603 break; 3604 } 3605 } 3606