1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <sys/mac_provider.h> 27 #include <sys/nxge/nxge_impl.h> 28 #include <sys/nxge/nxge_hio.h> 29 #include <npi_tx_wr64.h> 30 31 /* Software LSO required header files */ 32 #include <netinet/tcp.h> 33 #include <inet/ip_impl.h> 34 #include <inet/tcp.h> 35 36 extern uint64_t mac_pkt_hash(uint_t, mblk_t *mp, uint8_t policy, 37 boolean_t is_outbound); 38 39 static mblk_t *nxge_lso_eliminate(mblk_t *); 40 static mblk_t *nxge_do_softlso(mblk_t *mp, uint32_t mss); 41 static void nxge_lso_info_get(mblk_t *, uint32_t *, uint32_t *); 42 static void nxge_hcksum_retrieve(mblk_t *, 43 uint32_t *, uint32_t *, uint32_t *, 44 uint32_t *, uint32_t *); 45 static uint32_t nxge_csgen(uint16_t *, int); 46 47 extern uint32_t nxge_reclaim_pending; 48 extern uint32_t nxge_bcopy_thresh; 49 extern uint32_t nxge_dvma_thresh; 50 extern uint32_t nxge_dma_stream_thresh; 51 extern uint32_t nxge_tx_minfree; 52 extern uint32_t nxge_tx_intr_thres; 53 extern uint32_t nxge_tx_max_gathers; 54 extern uint32_t nxge_tx_tiny_pack; 55 extern uint32_t nxge_tx_use_bcopy; 56 extern nxge_tx_mode_t nxge_tx_scheme; 57 uint32_t nxge_lso_kick_cnt = 2; 58 59 60 void 61 nxge_tx_ring_task(void *arg) 62 { 63 p_tx_ring_t ring = (p_tx_ring_t)arg; 64 65 ASSERT(ring->tx_ring_handle != NULL); 66 67 MUTEX_ENTER(&ring->lock); 68 (void) nxge_txdma_reclaim(ring->nxgep, ring, 0); 69 MUTEX_EXIT(&ring->lock); 70 71 if (!ring->tx_ring_offline) { 72 mac_tx_ring_update(ring->nxgep->mach, ring->tx_ring_handle); 73 } 74 } 75 76 static void 77 nxge_tx_ring_dispatch(p_tx_ring_t ring) 78 { 79 /* 80 * Kick the ring task to reclaim some buffers. 81 */ 82 (void) ddi_taskq_dispatch(ring->taskq, 83 nxge_tx_ring_task, (void *)ring, DDI_SLEEP); 84 } 85 86 mblk_t * 87 nxge_tx_ring_send(void *arg, mblk_t *mp) 88 { 89 p_nxge_ring_handle_t nrhp = (p_nxge_ring_handle_t)arg; 90 p_nxge_t nxgep; 91 p_tx_ring_t tx_ring_p; 92 int status, channel; 93 94 ASSERT(nrhp != NULL); 95 nxgep = nrhp->nxgep; 96 channel = nxgep->pt_config.hw_config.tdc.start + nrhp->index; 97 tx_ring_p = nxgep->tx_rings->rings[channel]; 98 99 /* 100 * We may be in a transition from offlined DMA to onlined 101 * DMA. 102 */ 103 if (tx_ring_p == NULL) { 104 ASSERT(tx_ring_p != NULL); 105 freemsg(mp); 106 return ((mblk_t *)NULL); 107 } 108 109 /* 110 * Valid DMA? 111 */ 112 ASSERT(nxgep == tx_ring_p->nxgep); 113 114 /* 115 * Make sure DMA is not offlined. 116 */ 117 if (isLDOMservice(nxgep) && tx_ring_p->tx_ring_offline) { 118 ASSERT(!tx_ring_p->tx_ring_offline); 119 freemsg(mp); 120 return ((mblk_t *)NULL); 121 } 122 123 /* 124 * Transmit the packet. 125 */ 126 status = nxge_start(nxgep, tx_ring_p, mp); 127 if (status) { 128 nxge_tx_ring_dispatch(tx_ring_p); 129 return (mp); 130 } 131 132 return ((mblk_t *)NULL); 133 } 134 135 int 136 nxge_start(p_nxge_t nxgep, p_tx_ring_t tx_ring_p, p_mblk_t mp) 137 { 138 int dma_status, status = 0; 139 p_tx_desc_t tx_desc_ring_vp; 140 npi_handle_t npi_desc_handle; 141 nxge_os_dma_handle_t tx_desc_dma_handle; 142 p_tx_desc_t tx_desc_p; 143 p_tx_msg_t tx_msg_ring; 144 p_tx_msg_t tx_msg_p; 145 tx_desc_t tx_desc, *tmp_desc_p; 146 tx_desc_t sop_tx_desc, *sop_tx_desc_p; 147 p_tx_pkt_header_t hdrp; 148 tx_pkt_hdr_all_t tmp_hdrp; 149 p_tx_pkt_hdr_all_t pkthdrp; 150 uint8_t npads = 0; 151 uint64_t dma_ioaddr; 152 uint32_t dma_flags; 153 int last_bidx; 154 uint8_t *b_rptr; 155 caddr_t kaddr; 156 uint32_t nmblks; 157 uint32_t ngathers; 158 uint32_t clen; 159 int len; 160 uint32_t pkt_len, pack_len, min_len; 161 uint32_t bcopy_thresh; 162 int i, cur_index, sop_index; 163 uint16_t tail_index; 164 boolean_t tail_wrap = B_FALSE; 165 nxge_dma_common_t desc_area; 166 nxge_os_dma_handle_t dma_handle; 167 ddi_dma_cookie_t dma_cookie; 168 npi_handle_t npi_handle; 169 p_mblk_t nmp; 170 p_mblk_t t_mp; 171 uint32_t ncookies; 172 boolean_t good_packet; 173 boolean_t mark_mode = B_FALSE; 174 p_nxge_stats_t statsp; 175 p_nxge_tx_ring_stats_t tdc_stats; 176 t_uscalar_t start_offset = 0; 177 t_uscalar_t stuff_offset = 0; 178 t_uscalar_t end_offset = 0; 179 t_uscalar_t value = 0; 180 t_uscalar_t cksum_flags = 0; 181 boolean_t cksum_on = B_FALSE; 182 uint32_t boff = 0; 183 uint64_t tot_xfer_len = 0; 184 boolean_t header_set = B_FALSE; 185 #ifdef NXGE_DEBUG 186 p_tx_desc_t tx_desc_ring_pp; 187 p_tx_desc_t tx_desc_pp; 188 tx_desc_t *save_desc_p; 189 int dump_len; 190 int sad_len; 191 uint64_t sad; 192 int xfer_len; 193 uint32_t msgsize; 194 #endif 195 p_mblk_t mp_chain = NULL; 196 boolean_t is_lso = B_FALSE; 197 boolean_t lso_again; 198 int cur_index_lso; 199 p_mblk_t nmp_lso_save; 200 uint32_t lso_ngathers; 201 boolean_t lso_tail_wrap = B_FALSE; 202 203 NXGE_DEBUG_MSG((nxgep, TX_CTL, 204 "==> nxge_start: tx dma channel %d", tx_ring_p->tdc)); 205 NXGE_DEBUG_MSG((nxgep, TX_CTL, 206 "==> nxge_start: Starting tdc %d desc pending %d", 207 tx_ring_p->tdc, tx_ring_p->descs_pending)); 208 209 statsp = nxgep->statsp; 210 211 if (!isLDOMguest(nxgep)) { 212 switch (nxgep->mac.portmode) { 213 default: 214 if (nxgep->statsp->port_stats.lb_mode == 215 nxge_lb_normal) { 216 if (!statsp->mac_stats.link_up) { 217 freemsg(mp); 218 NXGE_DEBUG_MSG((nxgep, TX_CTL, 219 "==> nxge_start: " 220 "link not up")); 221 goto nxge_start_fail1; 222 } 223 } 224 break; 225 case PORT_10G_FIBER: 226 /* 227 * For the following modes, check the link status 228 * before sending the packet out: 229 * nxge_lb_normal, 230 * nxge_lb_ext10g, 231 * nxge_lb_ext1000, 232 * nxge_lb_ext100, 233 * nxge_lb_ext10. 234 */ 235 if (nxgep->statsp->port_stats.lb_mode < 236 nxge_lb_phy10g) { 237 if (!statsp->mac_stats.link_up) { 238 freemsg(mp); 239 NXGE_DEBUG_MSG((nxgep, TX_CTL, 240 "==> nxge_start: " 241 "link not up")); 242 goto nxge_start_fail1; 243 } 244 } 245 break; 246 } 247 } 248 249 if ((!(nxgep->drv_state & STATE_HW_INITIALIZED)) || 250 (nxgep->nxge_mac_state != NXGE_MAC_STARTED)) { 251 NXGE_DEBUG_MSG((nxgep, TX_CTL, 252 "==> nxge_start: hardware not initialized or stopped")); 253 freemsg(mp); 254 goto nxge_start_fail1; 255 } 256 257 if (nxgep->soft_lso_enable) { 258 mp_chain = nxge_lso_eliminate(mp); 259 NXGE_DEBUG_MSG((nxgep, TX_CTL, 260 "==> nxge_start(0): LSO mp $%p mp_chain $%p", 261 mp, mp_chain)); 262 if (mp_chain == NULL) { 263 NXGE_ERROR_MSG((nxgep, TX_CTL, 264 "==> nxge_send(0): NULL mp_chain $%p != mp $%p", 265 mp_chain, mp)); 266 goto nxge_start_fail1; 267 } 268 if (mp_chain != mp) { 269 NXGE_DEBUG_MSG((nxgep, TX_CTL, 270 "==> nxge_send(1): IS LSO mp_chain $%p != mp $%p", 271 mp_chain, mp)); 272 is_lso = B_TRUE; 273 mp = mp_chain; 274 mp_chain = mp_chain->b_next; 275 mp->b_next = NULL; 276 } 277 } 278 279 mac_hcksum_get(mp, &start_offset, &stuff_offset, &end_offset, 280 &value, &cksum_flags); 281 if (!NXGE_IS_VLAN_PACKET(mp->b_rptr)) { 282 start_offset += sizeof (ether_header_t); 283 stuff_offset += sizeof (ether_header_t); 284 } else { 285 start_offset += sizeof (struct ether_vlan_header); 286 stuff_offset += sizeof (struct ether_vlan_header); 287 } 288 289 if (cksum_flags & HCK_PARTIALCKSUM) { 290 NXGE_DEBUG_MSG((nxgep, TX_CTL, 291 "==> nxge_start: mp $%p len %d " 292 "cksum_flags 0x%x (partial checksum) ", 293 mp, MBLKL(mp), cksum_flags)); 294 cksum_on = B_TRUE; 295 } 296 297 pkthdrp = (p_tx_pkt_hdr_all_t)&tmp_hdrp; 298 pkthdrp->reserved = 0; 299 tmp_hdrp.pkthdr.value = 0; 300 nxge_fill_tx_hdr(mp, B_FALSE, cksum_on, 301 0, 0, pkthdrp, 302 start_offset, stuff_offset); 303 304 lso_again = B_FALSE; 305 lso_ngathers = 0; 306 307 MUTEX_ENTER(&tx_ring_p->lock); 308 309 if (isLDOMservice(nxgep)) { 310 tx_ring_p->tx_ring_busy = B_TRUE; 311 if (tx_ring_p->tx_ring_offline) { 312 freemsg(mp); 313 tx_ring_p->tx_ring_busy = B_FALSE; 314 (void) atomic_swap_32(&tx_ring_p->tx_ring_offline, 315 NXGE_TX_RING_OFFLINED); 316 MUTEX_EXIT(&tx_ring_p->lock); 317 return (status); 318 } 319 } 320 321 cur_index_lso = tx_ring_p->wr_index; 322 lso_tail_wrap = tx_ring_p->wr_index_wrap; 323 start_again: 324 ngathers = 0; 325 sop_index = tx_ring_p->wr_index; 326 #ifdef NXGE_DEBUG 327 if (tx_ring_p->descs_pending) { 328 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start: " 329 "desc pending %d ", tx_ring_p->descs_pending)); 330 } 331 332 dump_len = (int)(MBLKL(mp)); 333 dump_len = (dump_len > 128) ? 128: dump_len; 334 335 NXGE_DEBUG_MSG((nxgep, TX_CTL, 336 "==> nxge_start: tdc %d: dumping ...: b_rptr $%p " 337 "(Before header reserve: ORIGINAL LEN %d)", 338 tx_ring_p->tdc, 339 mp->b_rptr, 340 dump_len)); 341 342 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start: dump packets " 343 "(IP ORIGINAL b_rptr $%p): %s", mp->b_rptr, 344 nxge_dump_packet((char *)mp->b_rptr, dump_len))); 345 #endif 346 347 tdc_stats = tx_ring_p->tdc_stats; 348 mark_mode = (tx_ring_p->descs_pending && 349 (((int)tx_ring_p->tx_ring_size - (int)tx_ring_p->descs_pending) < 350 (int)nxge_tx_minfree)); 351 352 NXGE_DEBUG_MSG((nxgep, TX_CTL, 353 "TX Descriptor ring is channel %d mark mode %d", 354 tx_ring_p->tdc, mark_mode)); 355 356 if ((tx_ring_p->descs_pending + lso_ngathers) >= nxge_reclaim_pending) { 357 if (!nxge_txdma_reclaim(nxgep, tx_ring_p, 358 (nxge_tx_minfree + lso_ngathers))) { 359 NXGE_DEBUG_MSG((nxgep, TX_CTL, 360 "TX Descriptor ring is full: channel %d", 361 tx_ring_p->tdc)); 362 NXGE_DEBUG_MSG((nxgep, TX_CTL, 363 "TX Descriptor ring is full: channel %d", 364 tx_ring_p->tdc)); 365 if (is_lso) { 366 /* 367 * free the current mp and mp_chain if not FULL. 368 */ 369 tdc_stats->tx_no_desc++; 370 NXGE_DEBUG_MSG((nxgep, TX_CTL, 371 "LSO packet: TX Descriptor ring is full: " 372 "channel %d", 373 tx_ring_p->tdc)); 374 goto nxge_start_fail_lso; 375 } else { 376 (void) atomic_cas_32( 377 (uint32_t *)&tx_ring_p->queueing, 0, 1); 378 tdc_stats->tx_no_desc++; 379 380 if (isLDOMservice(nxgep)) { 381 tx_ring_p->tx_ring_busy = B_FALSE; 382 if (tx_ring_p->tx_ring_offline) { 383 (void) atomic_swap_32( 384 &tx_ring_p->tx_ring_offline, 385 NXGE_TX_RING_OFFLINED); 386 } 387 } 388 389 MUTEX_EXIT(&tx_ring_p->lock); 390 status = 1; 391 goto nxge_start_fail1; 392 } 393 } 394 } 395 396 nmp = mp; 397 i = sop_index = tx_ring_p->wr_index; 398 nmblks = 0; 399 ngathers = 0; 400 pkt_len = 0; 401 pack_len = 0; 402 clen = 0; 403 last_bidx = -1; 404 good_packet = B_TRUE; 405 406 desc_area = tx_ring_p->tdc_desc; 407 npi_handle = desc_area.npi_handle; 408 npi_desc_handle.regh = (nxge_os_acc_handle_t) 409 DMA_COMMON_ACC_HANDLE(desc_area); 410 tx_desc_ring_vp = (p_tx_desc_t)DMA_COMMON_VPTR(desc_area); 411 tx_desc_dma_handle = (nxge_os_dma_handle_t) 412 DMA_COMMON_HANDLE(desc_area); 413 tx_msg_ring = tx_ring_p->tx_msg_ring; 414 415 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start: wr_index %d i %d", 416 sop_index, i)); 417 418 #ifdef NXGE_DEBUG 419 msgsize = msgdsize(nmp); 420 NXGE_DEBUG_MSG((nxgep, TX_CTL, 421 "==> nxge_start(1): wr_index %d i %d msgdsize %d", 422 sop_index, i, msgsize)); 423 #endif 424 /* 425 * The first 16 bytes of the premapped buffer are reserved 426 * for header. No padding will be used. 427 */ 428 pkt_len = pack_len = boff = TX_PKT_HEADER_SIZE; 429 if (nxge_tx_use_bcopy && (nxgep->niu_type != N2_NIU)) { 430 bcopy_thresh = (nxge_bcopy_thresh - TX_PKT_HEADER_SIZE); 431 } else { 432 bcopy_thresh = (TX_BCOPY_SIZE - TX_PKT_HEADER_SIZE); 433 } 434 while (nmp) { 435 good_packet = B_TRUE; 436 b_rptr = nmp->b_rptr; 437 len = MBLKL(nmp); 438 if (len <= 0) { 439 nmp = nmp->b_cont; 440 continue; 441 } 442 nmblks++; 443 444 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start(1): nmblks %d " 445 "len %d pkt_len %d pack_len %d", 446 nmblks, len, pkt_len, pack_len)); 447 /* 448 * Hardware limits the transfer length to 4K for NIU and 449 * 4076 (TX_MAX_TRANSFER_LENGTH) for Neptune. But we just 450 * use TX_MAX_TRANSFER_LENGTH as the limit for both. 451 * If len is longer than the limit, then we break nmp into 452 * two chunks: Make the first chunk equal to the limit and 453 * the second chunk for the remaining data. If the second 454 * chunk is still larger than the limit, then it will be 455 * broken into two in the next pass. 456 */ 457 if (len > TX_MAX_TRANSFER_LENGTH - TX_PKT_HEADER_SIZE) { 458 if ((t_mp = dupb(nmp)) != NULL) { 459 nmp->b_wptr = nmp->b_rptr + 460 (TX_MAX_TRANSFER_LENGTH 461 - TX_PKT_HEADER_SIZE); 462 t_mp->b_rptr = nmp->b_wptr; 463 t_mp->b_cont = nmp->b_cont; 464 nmp->b_cont = t_mp; 465 len = MBLKL(nmp); 466 } else { 467 if (is_lso) { 468 NXGE_DEBUG_MSG((nxgep, TX_CTL, 469 "LSO packet: dupb failed: " 470 "channel %d", 471 tx_ring_p->tdc)); 472 mp = nmp; 473 goto nxge_start_fail_lso; 474 } else { 475 good_packet = B_FALSE; 476 goto nxge_start_fail2; 477 } 478 } 479 } 480 tx_desc.value = 0; 481 tx_desc_p = &tx_desc_ring_vp[i]; 482 #ifdef NXGE_DEBUG 483 tx_desc_pp = &tx_desc_ring_pp[i]; 484 #endif 485 tx_msg_p = &tx_msg_ring[i]; 486 #if defined(__i386) 487 npi_desc_handle.regp = (uint32_t)tx_desc_p; 488 #else 489 npi_desc_handle.regp = (uint64_t)tx_desc_p; 490 #endif 491 if (!header_set && 492 ((!nxge_tx_use_bcopy && (len > TX_BCOPY_SIZE)) || 493 (len >= bcopy_thresh))) { 494 header_set = B_TRUE; 495 bcopy_thresh += TX_PKT_HEADER_SIZE; 496 boff = 0; 497 pack_len = 0; 498 kaddr = (caddr_t)DMA_COMMON_VPTR(tx_msg_p->buf_dma); 499 hdrp = (p_tx_pkt_header_t)kaddr; 500 clen = pkt_len; 501 dma_handle = tx_msg_p->buf_dma_handle; 502 dma_ioaddr = DMA_COMMON_IOADDR(tx_msg_p->buf_dma); 503 (void) ddi_dma_sync(dma_handle, 504 i * nxge_bcopy_thresh, nxge_bcopy_thresh, 505 DDI_DMA_SYNC_FORDEV); 506 507 tx_msg_p->flags.dma_type = USE_BCOPY; 508 goto nxge_start_control_header_only; 509 } 510 511 pkt_len += len; 512 pack_len += len; 513 514 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start(3): " 515 "desc entry %d " 516 "DESC IOADDR $%p " 517 "desc_vp $%p tx_desc_p $%p " 518 "desc_pp $%p tx_desc_pp $%p " 519 "len %d pkt_len %d pack_len %d", 520 i, 521 DMA_COMMON_IOADDR(desc_area), 522 tx_desc_ring_vp, tx_desc_p, 523 tx_desc_ring_pp, tx_desc_pp, 524 len, pkt_len, pack_len)); 525 526 if (len < bcopy_thresh) { 527 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start(4): " 528 "USE BCOPY: ")); 529 if (nxge_tx_tiny_pack) { 530 uint32_t blst = 531 TXDMA_DESC_NEXT_INDEX(i, -1, 532 tx_ring_p->tx_wrap_mask); 533 NXGE_DEBUG_MSG((nxgep, TX_CTL, 534 "==> nxge_start(5): pack")); 535 if ((pack_len <= bcopy_thresh) && 536 (last_bidx == blst)) { 537 NXGE_DEBUG_MSG((nxgep, TX_CTL, 538 "==> nxge_start: pack(6) " 539 "(pkt_len %d pack_len %d)", 540 pkt_len, pack_len)); 541 i = blst; 542 tx_desc_p = &tx_desc_ring_vp[i]; 543 #ifdef NXGE_DEBUG 544 tx_desc_pp = &tx_desc_ring_pp[i]; 545 #endif 546 tx_msg_p = &tx_msg_ring[i]; 547 boff = pack_len - len; 548 ngathers--; 549 } else if (pack_len > bcopy_thresh && 550 header_set) { 551 pack_len = len; 552 boff = 0; 553 bcopy_thresh = nxge_bcopy_thresh; 554 NXGE_DEBUG_MSG((nxgep, TX_CTL, 555 "==> nxge_start(7): > max NEW " 556 "bcopy thresh %d " 557 "pkt_len %d pack_len %d(next)", 558 bcopy_thresh, 559 pkt_len, pack_len)); 560 } 561 last_bidx = i; 562 } 563 kaddr = (caddr_t)DMA_COMMON_VPTR(tx_msg_p->buf_dma); 564 if ((boff == TX_PKT_HEADER_SIZE) && (nmblks == 1)) { 565 hdrp = (p_tx_pkt_header_t)kaddr; 566 header_set = B_TRUE; 567 NXGE_DEBUG_MSG((nxgep, TX_CTL, 568 "==> nxge_start(7_x2): " 569 "pkt_len %d pack_len %d (new hdrp $%p)", 570 pkt_len, pack_len, hdrp)); 571 } 572 tx_msg_p->flags.dma_type = USE_BCOPY; 573 kaddr += boff; 574 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start(8): " 575 "USE BCOPY: before bcopy " 576 "DESC IOADDR $%p entry %d " 577 "bcopy packets %d " 578 "bcopy kaddr $%p " 579 "bcopy ioaddr (SAD) $%p " 580 "bcopy clen %d " 581 "bcopy boff %d", 582 DMA_COMMON_IOADDR(desc_area), i, 583 tdc_stats->tx_hdr_pkts, 584 kaddr, 585 dma_ioaddr, 586 clen, 587 boff)); 588 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start: " 589 "1USE BCOPY: ")); 590 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start: " 591 "2USE BCOPY: ")); 592 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start: " 593 "last USE BCOPY: copy from b_rptr $%p " 594 "to KADDR $%p (len %d offset %d", 595 b_rptr, kaddr, len, boff)); 596 597 bcopy(b_rptr, kaddr, len); 598 599 #ifdef NXGE_DEBUG 600 dump_len = (len > 128) ? 128: len; 601 NXGE_DEBUG_MSG((nxgep, TX_CTL, 602 "==> nxge_start: dump packets " 603 "(After BCOPY len %d)" 604 "(b_rptr $%p): %s", len, nmp->b_rptr, 605 nxge_dump_packet((char *)nmp->b_rptr, 606 dump_len))); 607 #endif 608 609 dma_handle = tx_msg_p->buf_dma_handle; 610 dma_ioaddr = DMA_COMMON_IOADDR(tx_msg_p->buf_dma); 611 (void) ddi_dma_sync(dma_handle, 612 i * nxge_bcopy_thresh, nxge_bcopy_thresh, 613 DDI_DMA_SYNC_FORDEV); 614 clen = len + boff; 615 tdc_stats->tx_hdr_pkts++; 616 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start(9): " 617 "USE BCOPY: " 618 "DESC IOADDR $%p entry %d " 619 "bcopy packets %d " 620 "bcopy kaddr $%p " 621 "bcopy ioaddr (SAD) $%p " 622 "bcopy clen %d " 623 "bcopy boff %d", 624 DMA_COMMON_IOADDR(desc_area), 625 i, 626 tdc_stats->tx_hdr_pkts, 627 kaddr, 628 dma_ioaddr, 629 clen, 630 boff)); 631 } else { 632 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start(12): " 633 "USE DVMA: len %d", len)); 634 tx_msg_p->flags.dma_type = USE_DMA; 635 dma_flags = DDI_DMA_WRITE; 636 if (len < nxge_dma_stream_thresh) { 637 dma_flags |= DDI_DMA_CONSISTENT; 638 } else { 639 dma_flags |= DDI_DMA_STREAMING; 640 } 641 642 dma_handle = tx_msg_p->dma_handle; 643 dma_status = ddi_dma_addr_bind_handle(dma_handle, NULL, 644 (caddr_t)b_rptr, len, dma_flags, 645 DDI_DMA_DONTWAIT, NULL, 646 &dma_cookie, &ncookies); 647 if (dma_status == DDI_DMA_MAPPED) { 648 dma_ioaddr = dma_cookie.dmac_laddress; 649 len = (int)dma_cookie.dmac_size; 650 clen = (uint32_t)dma_cookie.dmac_size; 651 NXGE_DEBUG_MSG((nxgep, TX_CTL, 652 "==> nxge_start(12_1): " 653 "USE DVMA: len %d clen %d " 654 "ngathers %d", 655 len, clen, 656 ngathers)); 657 #if defined(__i386) 658 npi_desc_handle.regp = (uint32_t)tx_desc_p; 659 #else 660 npi_desc_handle.regp = (uint64_t)tx_desc_p; 661 #endif 662 while (ncookies > 1) { 663 ngathers++; 664 /* 665 * this is the fix for multiple 666 * cookies, which are basically 667 * a descriptor entry, we don't set 668 * SOP bit as well as related fields 669 */ 670 671 (void) npi_txdma_desc_gather_set( 672 npi_desc_handle, 673 &tx_desc, 674 (ngathers -1), 675 mark_mode, 676 ngathers, 677 dma_ioaddr, 678 clen); 679 680 tx_msg_p->tx_msg_size = clen; 681 NXGE_DEBUG_MSG((nxgep, TX_CTL, 682 "==> nxge_start: DMA " 683 "ncookie %d " 684 "ngathers %d " 685 "dma_ioaddr $%p len %d" 686 "desc $%p descp $%p (%d)", 687 ncookies, 688 ngathers, 689 dma_ioaddr, clen, 690 *tx_desc_p, tx_desc_p, i)); 691 692 ddi_dma_nextcookie(dma_handle, 693 &dma_cookie); 694 dma_ioaddr = 695 dma_cookie.dmac_laddress; 696 697 len = (int)dma_cookie.dmac_size; 698 clen = (uint32_t)dma_cookie.dmac_size; 699 NXGE_DEBUG_MSG((nxgep, TX_CTL, 700 "==> nxge_start(12_2): " 701 "USE DVMA: len %d clen %d ", 702 len, clen)); 703 704 i = TXDMA_DESC_NEXT_INDEX(i, 1, 705 tx_ring_p->tx_wrap_mask); 706 tx_desc_p = &tx_desc_ring_vp[i]; 707 708 #if defined(__i386) 709 npi_desc_handle.regp = 710 (uint32_t)tx_desc_p; 711 #else 712 npi_desc_handle.regp = 713 (uint64_t)tx_desc_p; 714 #endif 715 tx_msg_p = &tx_msg_ring[i]; 716 tx_msg_p->flags.dma_type = USE_NONE; 717 tx_desc.value = 0; 718 719 ncookies--; 720 } 721 tdc_stats->tx_ddi_pkts++; 722 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start:" 723 "DMA: ddi packets %d", 724 tdc_stats->tx_ddi_pkts)); 725 } else { 726 NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, 727 "dma mapping failed for %d " 728 "bytes addr $%p flags %x (%d)", 729 len, b_rptr, status, status)); 730 good_packet = B_FALSE; 731 tdc_stats->tx_dma_bind_fail++; 732 tx_msg_p->flags.dma_type = USE_NONE; 733 if (is_lso) { 734 mp = nmp; 735 goto nxge_start_fail_lso; 736 } else { 737 status = 1; 738 goto nxge_start_fail2; 739 } 740 } 741 } /* ddi dvma */ 742 743 if (is_lso) { 744 nmp_lso_save = nmp; 745 } 746 nmp = nmp->b_cont; 747 nxge_start_control_header_only: 748 #if defined(__i386) 749 npi_desc_handle.regp = (uint32_t)tx_desc_p; 750 #else 751 npi_desc_handle.regp = (uint64_t)tx_desc_p; 752 #endif 753 ngathers++; 754 755 if (ngathers == 1) { 756 #ifdef NXGE_DEBUG 757 save_desc_p = &sop_tx_desc; 758 #endif 759 sop_tx_desc_p = &sop_tx_desc; 760 sop_tx_desc_p->value = 0; 761 sop_tx_desc_p->bits.hdw.tr_len = clen; 762 sop_tx_desc_p->bits.hdw.sad = dma_ioaddr >> 32; 763 sop_tx_desc_p->bits.ldw.sad = dma_ioaddr & 0xffffffff; 764 } else { 765 #ifdef NXGE_DEBUG 766 save_desc_p = &tx_desc; 767 #endif 768 tmp_desc_p = &tx_desc; 769 tmp_desc_p->value = 0; 770 tmp_desc_p->bits.hdw.tr_len = clen; 771 tmp_desc_p->bits.hdw.sad = dma_ioaddr >> 32; 772 tmp_desc_p->bits.ldw.sad = dma_ioaddr & 0xffffffff; 773 774 tx_desc_p->value = tmp_desc_p->value; 775 } 776 777 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start(13): " 778 "Desc_entry %d ngathers %d " 779 "desc_vp $%p tx_desc_p $%p " 780 "len %d clen %d pkt_len %d pack_len %d nmblks %d " 781 "dma_ioaddr (SAD) $%p mark %d", 782 i, ngathers, 783 tx_desc_ring_vp, tx_desc_p, 784 len, clen, pkt_len, pack_len, nmblks, 785 dma_ioaddr, mark_mode)); 786 787 #ifdef NXGE_DEBUG 788 npi_desc_handle.nxgep = nxgep; 789 npi_desc_handle.function.function = nxgep->function_num; 790 npi_desc_handle.function.instance = nxgep->instance; 791 sad = (save_desc_p->value & TX_PKT_DESC_SAD_MASK); 792 xfer_len = ((save_desc_p->value & TX_PKT_DESC_TR_LEN_MASK) >> 793 TX_PKT_DESC_TR_LEN_SHIFT); 794 795 796 NXGE_DEBUG_MSG((nxgep, TX_CTL, "\n\t: value 0x%llx\n" 797 "\t\tsad $%p\ttr_len %d len %d\tnptrs %d\t" 798 "mark %d sop %d\n", 799 save_desc_p->value, 800 sad, 801 save_desc_p->bits.hdw.tr_len, 802 xfer_len, 803 save_desc_p->bits.hdw.num_ptr, 804 save_desc_p->bits.hdw.mark, 805 save_desc_p->bits.hdw.sop)); 806 807 npi_txdma_dump_desc_one(npi_desc_handle, NULL, i); 808 #endif 809 810 tx_msg_p->tx_msg_size = clen; 811 i = TXDMA_DESC_NEXT_INDEX(i, 1, tx_ring_p->tx_wrap_mask); 812 if (ngathers > nxge_tx_max_gathers) { 813 good_packet = B_FALSE; 814 mac_hcksum_get(mp, &start_offset, 815 &stuff_offset, &end_offset, &value, 816 &cksum_flags); 817 818 NXGE_DEBUG_MSG((NULL, TX_CTL, 819 "==> nxge_start(14): pull msg - " 820 "len %d pkt_len %d ngathers %d", 821 len, pkt_len, ngathers)); 822 823 /* 824 * Just give up on this packet. 825 */ 826 if (is_lso) { 827 mp = nmp_lso_save; 828 goto nxge_start_fail_lso; 829 } 830 status = 0; 831 goto nxge_start_fail2; 832 } 833 } /* while (nmp) */ 834 835 tx_msg_p->tx_message = mp; 836 tx_desc_p = &tx_desc_ring_vp[sop_index]; 837 #if defined(__i386) 838 npi_desc_handle.regp = (uint32_t)tx_desc_p; 839 #else 840 npi_desc_handle.regp = (uint64_t)tx_desc_p; 841 #endif 842 843 pkthdrp = (p_tx_pkt_hdr_all_t)hdrp; 844 pkthdrp->reserved = 0; 845 hdrp->value = 0; 846 bcopy(&tmp_hdrp, hdrp, sizeof (tx_pkt_header_t)); 847 848 if (pkt_len > NXGE_MTU_DEFAULT_MAX) { 849 tdc_stats->tx_jumbo_pkts++; 850 } 851 852 min_len = (ETHERMIN + TX_PKT_HEADER_SIZE + (npads * 2)); 853 if (pkt_len < min_len) { 854 /* Assume we use bcopy to premapped buffers */ 855 kaddr = (caddr_t)DMA_COMMON_VPTR(tx_msg_p->buf_dma); 856 NXGE_DEBUG_MSG((NULL, TX_CTL, 857 "==> nxge_start(14-1): < (msg_min + 16)" 858 "len %d pkt_len %d min_len %d bzero %d ngathers %d", 859 len, pkt_len, min_len, (min_len - pkt_len), ngathers)); 860 bzero((kaddr + pkt_len), (min_len - pkt_len)); 861 pkt_len = tx_msg_p->tx_msg_size = min_len; 862 863 sop_tx_desc_p->bits.hdw.tr_len = min_len; 864 865 NXGE_MEM_PIO_WRITE64(npi_desc_handle, sop_tx_desc_p->value); 866 tx_desc_p->value = sop_tx_desc_p->value; 867 868 NXGE_DEBUG_MSG((NULL, TX_CTL, 869 "==> nxge_start(14-2): < msg_min - " 870 "len %d pkt_len %d min_len %d ngathers %d", 871 len, pkt_len, min_len, ngathers)); 872 } 873 874 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start: cksum_flags 0x%x ", 875 cksum_flags)); 876 { 877 uint64_t tmp_len; 878 879 /* pkt_len already includes 16 + paddings!! */ 880 /* Update the control header length */ 881 tot_xfer_len = (pkt_len - TX_PKT_HEADER_SIZE); 882 tmp_len = hdrp->value | 883 (tot_xfer_len << TX_PKT_HEADER_TOT_XFER_LEN_SHIFT); 884 885 NXGE_DEBUG_MSG((nxgep, TX_CTL, 886 "==> nxge_start(15_x1): setting SOP " 887 "tot_xfer_len 0x%llx (%d) pkt_len %d tmp_len " 888 "0x%llx hdrp->value 0x%llx", 889 tot_xfer_len, tot_xfer_len, pkt_len, 890 tmp_len, hdrp->value)); 891 #if defined(_BIG_ENDIAN) 892 hdrp->value = ddi_swap64(tmp_len); 893 #else 894 hdrp->value = tmp_len; 895 #endif 896 NXGE_DEBUG_MSG((nxgep, 897 TX_CTL, "==> nxge_start(15_x2): setting SOP " 898 "after SWAP: tot_xfer_len 0x%llx pkt_len %d " 899 "tmp_len 0x%llx hdrp->value 0x%llx", 900 tot_xfer_len, pkt_len, 901 tmp_len, hdrp->value)); 902 } 903 904 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start(15): setting SOP " 905 "wr_index %d " 906 "tot_xfer_len (%d) pkt_len %d npads %d", 907 sop_index, 908 tot_xfer_len, pkt_len, 909 npads)); 910 911 sop_tx_desc_p->bits.hdw.sop = 1; 912 sop_tx_desc_p->bits.hdw.mark = mark_mode; 913 sop_tx_desc_p->bits.hdw.num_ptr = ngathers; 914 915 NXGE_MEM_PIO_WRITE64(npi_desc_handle, sop_tx_desc_p->value); 916 917 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start(16): set SOP done")); 918 919 #ifdef NXGE_DEBUG 920 npi_desc_handle.nxgep = nxgep; 921 npi_desc_handle.function.function = nxgep->function_num; 922 npi_desc_handle.function.instance = nxgep->instance; 923 924 NXGE_DEBUG_MSG((nxgep, TX_CTL, "\n\t: value 0x%llx\n" 925 "\t\tsad $%p\ttr_len %d len %d\tnptrs %d\tmark %d sop %d\n", 926 save_desc_p->value, 927 sad, 928 save_desc_p->bits.hdw.tr_len, 929 xfer_len, 930 save_desc_p->bits.hdw.num_ptr, 931 save_desc_p->bits.hdw.mark, 932 save_desc_p->bits.hdw.sop)); 933 (void) npi_txdma_dump_desc_one(npi_desc_handle, NULL, sop_index); 934 935 dump_len = (pkt_len > 128) ? 128: pkt_len; 936 NXGE_DEBUG_MSG((nxgep, TX_CTL, 937 "==> nxge_start: dump packets(17) (after sop set, len " 938 " (len/dump_len/pkt_len/tot_xfer_len) %d/%d/%d/%d):\n" 939 "ptr $%p: %s", len, dump_len, pkt_len, tot_xfer_len, 940 (char *)hdrp, 941 nxge_dump_packet((char *)hdrp, dump_len))); 942 NXGE_DEBUG_MSG((nxgep, TX_CTL, 943 "==> nxge_start(18): TX desc sync: sop_index %d", 944 sop_index)); 945 #endif 946 947 if ((ngathers == 1) || tx_ring_p->wr_index < i) { 948 (void) ddi_dma_sync(tx_desc_dma_handle, 949 sop_index * sizeof (tx_desc_t), 950 ngathers * sizeof (tx_desc_t), 951 DDI_DMA_SYNC_FORDEV); 952 953 NXGE_DEBUG_MSG((nxgep, TX_CTL, "nxge_start(19): sync 1 " 954 "cs_off = 0x%02X cs_s_off = 0x%02X " 955 "pkt_len %d ngathers %d sop_index %d\n", 956 stuff_offset, start_offset, 957 pkt_len, ngathers, sop_index)); 958 } else { /* more than one descriptor and wrap around */ 959 uint32_t nsdescs = tx_ring_p->tx_ring_size - sop_index; 960 (void) ddi_dma_sync(tx_desc_dma_handle, 961 sop_index * sizeof (tx_desc_t), 962 nsdescs * sizeof (tx_desc_t), 963 DDI_DMA_SYNC_FORDEV); 964 NXGE_DEBUG_MSG((nxgep, TX_CTL, "nxge_start(20): sync 1 " 965 "cs_off = 0x%02X cs_s_off = 0x%02X " 966 "pkt_len %d ngathers %d sop_index %d\n", 967 stuff_offset, start_offset, 968 pkt_len, ngathers, sop_index)); 969 970 (void) ddi_dma_sync(tx_desc_dma_handle, 971 0, 972 (ngathers - nsdescs) * sizeof (tx_desc_t), 973 DDI_DMA_SYNC_FORDEV); 974 NXGE_DEBUG_MSG((nxgep, TX_CTL, "nxge_start(21): sync 2 " 975 "cs_off = 0x%02X cs_s_off = 0x%02X " 976 "pkt_len %d ngathers %d sop_index %d\n", 977 stuff_offset, start_offset, 978 pkt_len, ngathers, sop_index)); 979 } 980 981 tail_index = tx_ring_p->wr_index; 982 tail_wrap = tx_ring_p->wr_index_wrap; 983 984 tx_ring_p->wr_index = i; 985 if (tx_ring_p->wr_index <= tail_index) { 986 tx_ring_p->wr_index_wrap = ((tail_wrap == B_TRUE) ? 987 B_FALSE : B_TRUE); 988 } 989 990 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start: TX kick: " 991 "channel %d wr_index %d wrap %d ngathers %d desc_pend %d", 992 tx_ring_p->tdc, 993 tx_ring_p->wr_index, 994 tx_ring_p->wr_index_wrap, 995 ngathers, 996 tx_ring_p->descs_pending)); 997 998 if (is_lso) { 999 lso_ngathers += ngathers; 1000 if (mp_chain != NULL) { 1001 mp = mp_chain; 1002 mp_chain = mp_chain->b_next; 1003 mp->b_next = NULL; 1004 if (nxge_lso_kick_cnt == lso_ngathers) { 1005 tx_ring_p->descs_pending += lso_ngathers; 1006 { 1007 tx_ring_kick_t kick; 1008 1009 kick.value = 0; 1010 kick.bits.ldw.wrap = 1011 tx_ring_p->wr_index_wrap; 1012 kick.bits.ldw.tail = 1013 (uint16_t)tx_ring_p->wr_index; 1014 1015 /* Kick the Transmit kick register */ 1016 TXDMA_REG_WRITE64( 1017 NXGE_DEV_NPI_HANDLE(nxgep), 1018 TX_RING_KICK_REG, 1019 (uint8_t)tx_ring_p->tdc, 1020 kick.value); 1021 tdc_stats->tx_starts++; 1022 1023 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1024 "==> nxge_start: more LSO: " 1025 "LSO_CNT %d", 1026 lso_ngathers)); 1027 } 1028 lso_ngathers = 0; 1029 ngathers = 0; 1030 cur_index_lso = sop_index = tx_ring_p->wr_index; 1031 lso_tail_wrap = tx_ring_p->wr_index_wrap; 1032 } 1033 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1034 "==> nxge_start: lso again: " 1035 "lso_gathers %d ngathers %d cur_index_lso %d " 1036 "wr_index %d sop_index %d", 1037 lso_ngathers, ngathers, cur_index_lso, 1038 tx_ring_p->wr_index, sop_index)); 1039 1040 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1041 "==> nxge_start: next : count %d", 1042 lso_ngathers)); 1043 lso_again = B_TRUE; 1044 goto start_again; 1045 } 1046 ngathers = lso_ngathers; 1047 } 1048 1049 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start: TX KICKING: ")); 1050 1051 { 1052 tx_ring_kick_t kick; 1053 1054 kick.value = 0; 1055 kick.bits.ldw.wrap = tx_ring_p->wr_index_wrap; 1056 kick.bits.ldw.tail = (uint16_t)tx_ring_p->wr_index; 1057 1058 /* Kick start the Transmit kick register */ 1059 TXDMA_REG_WRITE64(NXGE_DEV_NPI_HANDLE(nxgep), 1060 TX_RING_KICK_REG, 1061 (uint8_t)tx_ring_p->tdc, 1062 kick.value); 1063 } 1064 1065 tx_ring_p->descs_pending += ngathers; 1066 tdc_stats->tx_starts++; 1067 1068 if (isLDOMservice(nxgep)) { 1069 tx_ring_p->tx_ring_busy = B_FALSE; 1070 if (tx_ring_p->tx_ring_offline) { 1071 (void) atomic_swap_32(&tx_ring_p->tx_ring_offline, 1072 NXGE_TX_RING_OFFLINED); 1073 } 1074 } 1075 1076 MUTEX_EXIT(&tx_ring_p->lock); 1077 1078 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_start")); 1079 return (status); 1080 1081 nxge_start_fail_lso: 1082 status = 0; 1083 good_packet = B_FALSE; 1084 if (mp != NULL) 1085 freemsg(mp); 1086 if (mp_chain != NULL) 1087 freemsgchain(mp_chain); 1088 1089 if (!lso_again && !ngathers) { 1090 if (isLDOMservice(nxgep)) { 1091 tx_ring_p->tx_ring_busy = B_FALSE; 1092 if (tx_ring_p->tx_ring_offline) { 1093 (void) atomic_swap_32( 1094 &tx_ring_p->tx_ring_offline, 1095 NXGE_TX_RING_OFFLINED); 1096 } 1097 } 1098 1099 MUTEX_EXIT(&tx_ring_p->lock); 1100 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1101 "==> nxge_start: lso exit (nothing changed)")); 1102 goto nxge_start_fail1; 1103 } 1104 1105 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1106 "==> nxge_start (channel %d): before lso " 1107 "lso_gathers %d ngathers %d cur_index_lso %d " 1108 "wr_index %d sop_index %d lso_again %d", 1109 tx_ring_p->tdc, 1110 lso_ngathers, ngathers, cur_index_lso, 1111 tx_ring_p->wr_index, sop_index, lso_again)); 1112 1113 if (lso_again) { 1114 lso_ngathers += ngathers; 1115 ngathers = lso_ngathers; 1116 sop_index = cur_index_lso; 1117 tx_ring_p->wr_index = sop_index; 1118 tx_ring_p->wr_index_wrap = lso_tail_wrap; 1119 } 1120 1121 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1122 "==> nxge_start (channel %d): after lso " 1123 "lso_gathers %d ngathers %d cur_index_lso %d " 1124 "wr_index %d sop_index %d lso_again %d", 1125 tx_ring_p->tdc, 1126 lso_ngathers, ngathers, cur_index_lso, 1127 tx_ring_p->wr_index, sop_index, lso_again)); 1128 1129 nxge_start_fail2: 1130 if (good_packet == B_FALSE) { 1131 cur_index = sop_index; 1132 NXGE_DEBUG_MSG((nxgep, TX_CTL, "==> nxge_start: clean up")); 1133 for (i = 0; i < ngathers; i++) { 1134 tx_desc_p = &tx_desc_ring_vp[cur_index]; 1135 #if defined(__i386) 1136 npi_handle.regp = (uint32_t)tx_desc_p; 1137 #else 1138 npi_handle.regp = (uint64_t)tx_desc_p; 1139 #endif 1140 tx_msg_p = &tx_msg_ring[cur_index]; 1141 (void) npi_txdma_desc_set_zero(npi_handle, 1); 1142 if (tx_msg_p->flags.dma_type == USE_DVMA) { 1143 NXGE_DEBUG_MSG((nxgep, TX_CTL, 1144 "tx_desc_p = %X index = %d", 1145 tx_desc_p, tx_ring_p->rd_index)); 1146 (void) dvma_unload(tx_msg_p->dvma_handle, 1147 0, -1); 1148 tx_msg_p->dvma_handle = NULL; 1149 if (tx_ring_p->dvma_wr_index == 1150 tx_ring_p->dvma_wrap_mask) 1151 tx_ring_p->dvma_wr_index = 0; 1152 else 1153 tx_ring_p->dvma_wr_index++; 1154 tx_ring_p->dvma_pending--; 1155 } else if (tx_msg_p->flags.dma_type == USE_DMA) { 1156 if (ddi_dma_unbind_handle( 1157 tx_msg_p->dma_handle)) { 1158 cmn_err(CE_WARN, "!nxge_start: " 1159 "ddi_dma_unbind_handle failed"); 1160 } 1161 } 1162 tx_msg_p->flags.dma_type = USE_NONE; 1163 cur_index = TXDMA_DESC_NEXT_INDEX(cur_index, 1, 1164 tx_ring_p->tx_wrap_mask); 1165 1166 } 1167 } 1168 1169 if (isLDOMservice(nxgep)) { 1170 tx_ring_p->tx_ring_busy = B_FALSE; 1171 if (tx_ring_p->tx_ring_offline) { 1172 (void) atomic_swap_32(&tx_ring_p->tx_ring_offline, 1173 NXGE_TX_RING_OFFLINED); 1174 } 1175 } 1176 1177 MUTEX_EXIT(&tx_ring_p->lock); 1178 1179 nxge_start_fail1: 1180 /* Add FMA to check the access handle nxge_hregh */ 1181 1182 NXGE_DEBUG_MSG((nxgep, TX_CTL, "<== nxge_start")); 1183 return (status); 1184 } 1185 1186 /* Software LSO starts here */ 1187 static void 1188 nxge_hcksum_retrieve(mblk_t *mp, 1189 uint32_t *start, uint32_t *stuff, uint32_t *end, 1190 uint32_t *value, uint32_t *flags) 1191 { 1192 if (mp->b_datap->db_type == M_DATA) { 1193 if (flags != NULL) { 1194 *flags = DB_CKSUMFLAGS(mp) & (HCK_IPV4_HDRCKSUM | 1195 HCK_PARTIALCKSUM | HCK_FULLCKSUM | 1196 HCK_FULLCKSUM_OK); 1197 if ((*flags & (HCK_PARTIALCKSUM | 1198 HCK_FULLCKSUM)) != 0) { 1199 if (value != NULL) 1200 *value = (uint32_t)DB_CKSUM16(mp); 1201 if ((*flags & HCK_PARTIALCKSUM) != 0) { 1202 if (start != NULL) 1203 *start = 1204 (uint32_t)DB_CKSUMSTART(mp); 1205 if (stuff != NULL) 1206 *stuff = 1207 (uint32_t)DB_CKSUMSTUFF(mp); 1208 if (end != NULL) 1209 *end = 1210 (uint32_t)DB_CKSUMEND(mp); 1211 } 1212 } 1213 } 1214 } 1215 } 1216 1217 static void 1218 nxge_lso_info_get(mblk_t *mp, uint32_t *mss, uint32_t *flags) 1219 { 1220 ASSERT(DB_TYPE(mp) == M_DATA); 1221 1222 if (mss == NULL || flags == NULL) 1223 return; 1224 1225 *mss = 0; 1226 if (flags != NULL) { 1227 *flags = DB_CKSUMFLAGS(mp) & HW_LSO; 1228 if (*flags != 0) { 1229 *mss = (uint32_t)DB_LSOMSS(mp); 1230 } 1231 NXGE_DEBUG_MSG((NULL, TX_CTL, 1232 "==> nxge_lso_info_get(flag !=NULL): mss %d *flags 0x%x", 1233 *mss, *flags)); 1234 } 1235 1236 NXGE_DEBUG_MSG((NULL, TX_CTL, 1237 "<== nxge_lso_info_get: mss %d", *mss)); 1238 } 1239 1240 /* 1241 * Do Soft LSO on the oversized packet. 1242 * 1243 * 1. Create a chain of message for headers. 1244 * 2. Fill up header messages with proper information. 1245 * 3. Copy Eithernet, IP, and TCP headers from the original message to 1246 * each new message with necessary adjustments. 1247 * * Unchange the ethernet header for DIX frames. (by default) 1248 * * IP Total Length field is updated to MSS or less(only for the last one). 1249 * * IP Identification value is incremented by one for each packet. 1250 * * TCP sequence Number is recalculated according to the payload length. 1251 * * Set FIN and/or PSH flags for the *last* packet if applied. 1252 * * TCP partial Checksum 1253 * 4. Update LSO information in the first message header. 1254 * 5. Release the original message header. 1255 */ 1256 static mblk_t * 1257 nxge_do_softlso(mblk_t *mp, uint32_t mss) 1258 { 1259 uint32_t hckflags; 1260 int pktlen; 1261 int hdrlen; 1262 int segnum; 1263 int i; 1264 struct ether_vlan_header *evh; 1265 int ehlen, iphlen, tcphlen; 1266 struct ip *oiph, *niph; 1267 struct tcphdr *otcph, *ntcph; 1268 int available, len, left; 1269 uint16_t ip_id; 1270 uint32_t tcp_seq; 1271 #ifdef __sparc 1272 uint32_t tcp_seq_tmp; 1273 #endif 1274 mblk_t *datamp; 1275 uchar_t *rptr; 1276 mblk_t *nmp; 1277 mblk_t *cmp; 1278 mblk_t *mp_chain; 1279 boolean_t do_cleanup = B_FALSE; 1280 t_uscalar_t start_offset = 0; 1281 t_uscalar_t stuff_offset = 0; 1282 t_uscalar_t value = 0; 1283 uint16_t l4_len; 1284 ipaddr_t src, dst; 1285 uint32_t cksum, sum, l4cksum; 1286 1287 NXGE_DEBUG_MSG((NULL, TX_CTL, 1288 "==> nxge_do_softlso")); 1289 /* 1290 * check the length of LSO packet payload and calculate the number of 1291 * segments to be generated. 1292 */ 1293 pktlen = msgsize(mp); 1294 evh = (struct ether_vlan_header *)mp->b_rptr; 1295 1296 /* VLAN? */ 1297 if (evh->ether_tpid == htons(ETHERTYPE_VLAN)) 1298 ehlen = sizeof (struct ether_vlan_header); 1299 else 1300 ehlen = sizeof (struct ether_header); 1301 oiph = (struct ip *)(mp->b_rptr + ehlen); 1302 iphlen = oiph->ip_hl * 4; 1303 otcph = (struct tcphdr *)(mp->b_rptr + ehlen + iphlen); 1304 tcphlen = otcph->th_off * 4; 1305 1306 l4_len = pktlen - ehlen - iphlen; 1307 1308 NXGE_DEBUG_MSG((NULL, TX_CTL, 1309 "==> nxge_do_softlso: mss %d oiph $%p " 1310 "original ip_sum oiph->ip_sum 0x%x " 1311 "original tcp_sum otcph->th_sum 0x%x " 1312 "oiph->ip_len %d pktlen %d ehlen %d " 1313 "l4_len %d (0x%x) ip_len - iphlen %d ", 1314 mss, 1315 oiph, 1316 oiph->ip_sum, 1317 otcph->th_sum, 1318 ntohs(oiph->ip_len), pktlen, 1319 ehlen, 1320 l4_len, 1321 l4_len, 1322 ntohs(oiph->ip_len) - iphlen)); 1323 1324 /* IPv4 + TCP */ 1325 if (!(oiph->ip_v == IPV4_VERSION)) { 1326 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 1327 "<== nxge_do_softlso: not IPV4 " 1328 "oiph->ip_len %d pktlen %d ehlen %d tcphlen %d", 1329 ntohs(oiph->ip_len), pktlen, ehlen, 1330 tcphlen)); 1331 freemsg(mp); 1332 return (NULL); 1333 } 1334 1335 if (!(oiph->ip_p == IPPROTO_TCP)) { 1336 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 1337 "<== nxge_do_softlso: not TCP " 1338 "oiph->ip_len %d pktlen %d ehlen %d tcphlen %d", 1339 ntohs(oiph->ip_len), pktlen, ehlen, 1340 tcphlen)); 1341 freemsg(mp); 1342 return (NULL); 1343 } 1344 1345 if (!(ntohs(oiph->ip_len) == pktlen - ehlen)) { 1346 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 1347 "<== nxge_do_softlso: len not matched " 1348 "oiph->ip_len %d pktlen %d ehlen %d tcphlen %d", 1349 ntohs(oiph->ip_len), pktlen, ehlen, 1350 tcphlen)); 1351 freemsg(mp); 1352 return (NULL); 1353 } 1354 1355 otcph = (struct tcphdr *)(mp->b_rptr + ehlen + iphlen); 1356 tcphlen = otcph->th_off * 4; 1357 1358 /* TCP flags can not include URG, RST, or SYN */ 1359 VERIFY((otcph->th_flags & (TH_SYN | TH_RST | TH_URG)) == 0); 1360 1361 hdrlen = ehlen + iphlen + tcphlen; 1362 1363 VERIFY(MBLKL(mp) >= hdrlen); 1364 1365 if (MBLKL(mp) > hdrlen) { 1366 datamp = mp; 1367 rptr = mp->b_rptr + hdrlen; 1368 } else { /* = */ 1369 datamp = mp->b_cont; 1370 rptr = datamp->b_rptr; 1371 } 1372 1373 NXGE_DEBUG_MSG((NULL, TX_CTL, 1374 "nxge_do_softlso: otcph $%p pktlen: %d, " 1375 "hdrlen %d ehlen %d iphlen %d tcphlen %d " 1376 "mblkl(mp): %d, mblkl(datamp): %d", 1377 otcph, 1378 pktlen, hdrlen, ehlen, iphlen, tcphlen, 1379 (int)MBLKL(mp), (int)MBLKL(datamp))); 1380 1381 hckflags = 0; 1382 nxge_hcksum_retrieve(mp, 1383 &start_offset, &stuff_offset, &value, NULL, &hckflags); 1384 1385 dst = oiph->ip_dst.s_addr; 1386 src = oiph->ip_src.s_addr; 1387 1388 cksum = (dst >> 16) + (dst & 0xFFFF) + 1389 (src >> 16) + (src & 0xFFFF); 1390 l4cksum = cksum + IP_TCP_CSUM_COMP; 1391 1392 sum = l4_len + l4cksum; 1393 sum = (sum & 0xFFFF) + (sum >> 16); 1394 1395 NXGE_DEBUG_MSG((NULL, TX_CTL, 1396 "==> nxge_do_softlso: dst 0x%x src 0x%x sum 0x%x ~new 0x%x " 1397 "hckflags 0x%x start_offset %d stuff_offset %d " 1398 "value (original) 0x%x th_sum 0x%x " 1399 "pktlen %d l4_len %d (0x%x) " 1400 "MBLKL(mp): %d, MBLKL(datamp): %d dump header %s", 1401 dst, src, 1402 (sum & 0xffff), (~sum & 0xffff), 1403 hckflags, start_offset, stuff_offset, 1404 value, otcph->th_sum, 1405 pktlen, 1406 l4_len, 1407 l4_len, 1408 ntohs(oiph->ip_len) - (int)MBLKL(mp), 1409 (int)MBLKL(datamp), 1410 nxge_dump_packet((char *)evh, 12))); 1411 1412 /* 1413 * Start to process. 1414 */ 1415 available = pktlen - hdrlen; 1416 segnum = (available - 1) / mss + 1; 1417 1418 NXGE_DEBUG_MSG((NULL, TX_CTL, 1419 "==> nxge_do_softlso: pktlen %d " 1420 "MBLKL(mp): %d, MBLKL(datamp): %d " 1421 "available %d mss %d segnum %d", 1422 pktlen, (int)MBLKL(mp), (int)MBLKL(datamp), 1423 available, 1424 mss, 1425 segnum)); 1426 1427 VERIFY(segnum >= 2); 1428 1429 /* 1430 * Try to pre-allocate all header messages 1431 */ 1432 mp_chain = NULL; 1433 for (i = 0; i < segnum; i++) { 1434 if ((nmp = allocb(hdrlen, 0)) == NULL) { 1435 /* Clean up the mp_chain */ 1436 while (mp_chain != NULL) { 1437 nmp = mp_chain; 1438 mp_chain = mp_chain->b_next; 1439 freemsg(nmp); 1440 } 1441 NXGE_DEBUG_MSG((NULL, TX_CTL, 1442 "<== nxge_do_softlso: " 1443 "Could not allocate enough messages for headers!")); 1444 freemsg(mp); 1445 return (NULL); 1446 } 1447 nmp->b_next = mp_chain; 1448 mp_chain = nmp; 1449 1450 NXGE_DEBUG_MSG((NULL, TX_CTL, 1451 "==> nxge_do_softlso: " 1452 "mp $%p nmp $%p mp_chain $%p mp_chain->b_next $%p", 1453 mp, nmp, mp_chain, mp_chain->b_next)); 1454 } 1455 1456 NXGE_DEBUG_MSG((NULL, TX_CTL, 1457 "==> nxge_do_softlso: mp $%p nmp $%p mp_chain $%p", 1458 mp, nmp, mp_chain)); 1459 1460 /* 1461 * Associate payload with new packets 1462 */ 1463 cmp = mp_chain; 1464 left = available; 1465 while (cmp != NULL) { 1466 nmp = dupb(datamp); 1467 if (nmp == NULL) { 1468 do_cleanup = B_TRUE; 1469 NXGE_DEBUG_MSG((NULL, TX_CTL, 1470 "==>nxge_do_softlso: " 1471 "Can not dupb(datamp), have to do clean up")); 1472 goto cleanup_allocated_msgs; 1473 } 1474 1475 NXGE_DEBUG_MSG((NULL, TX_CTL, 1476 "==> nxge_do_softlso: (loop) before mp $%p cmp $%p " 1477 "dupb nmp $%p len %d left %d msd %d ", 1478 mp, cmp, nmp, len, left, mss)); 1479 1480 cmp->b_cont = nmp; 1481 nmp->b_rptr = rptr; 1482 len = (left < mss) ? left : mss; 1483 left -= len; 1484 1485 NXGE_DEBUG_MSG((NULL, TX_CTL, 1486 "==> nxge_do_softlso: (loop) after mp $%p cmp $%p " 1487 "dupb nmp $%p len %d left %d mss %d ", 1488 mp, cmp, nmp, len, left, mss)); 1489 NXGE_DEBUG_MSG((NULL, TX_CTL, 1490 "nxge_do_softlso: before available: %d, " 1491 "left: %d, len: %d, segnum: %d MBLK(nmp): %d", 1492 available, left, len, segnum, (int)MBLKL(nmp))); 1493 1494 len -= MBLKL(nmp); 1495 NXGE_DEBUG_MSG((NULL, TX_CTL, 1496 "nxge_do_softlso: after available: %d, " 1497 "left: %d, len: %d, segnum: %d MBLK(nmp): %d", 1498 available, left, len, segnum, (int)MBLKL(nmp))); 1499 1500 while (len > 0) { 1501 mblk_t *mmp = NULL; 1502 1503 NXGE_DEBUG_MSG((NULL, TX_CTL, 1504 "nxge_do_softlso: (4) len > 0 available: %d, " 1505 "left: %d, len: %d, segnum: %d MBLK(nmp): %d", 1506 available, left, len, segnum, (int)MBLKL(nmp))); 1507 1508 if (datamp->b_cont != NULL) { 1509 datamp = datamp->b_cont; 1510 rptr = datamp->b_rptr; 1511 mmp = dupb(datamp); 1512 if (mmp == NULL) { 1513 do_cleanup = B_TRUE; 1514 NXGE_DEBUG_MSG((NULL, TX_CTL, 1515 "==> nxge_do_softlso: " 1516 "Can not dupb(datamp) (1), :" 1517 "have to do clean up")); 1518 NXGE_DEBUG_MSG((NULL, TX_CTL, 1519 "==> nxge_do_softlso: " 1520 "available: %d, left: %d, " 1521 "len: %d, MBLKL(nmp): %d", 1522 available, left, len, 1523 (int)MBLKL(nmp))); 1524 goto cleanup_allocated_msgs; 1525 } 1526 } else { 1527 NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, 1528 "==> nxge_do_softlso: " 1529 "(1)available: %d, left: %d, " 1530 "len: %d, MBLKL(nmp): %d", 1531 available, left, len, 1532 (int)MBLKL(nmp))); 1533 cmn_err(CE_PANIC, 1534 "==> nxge_do_softlso: " 1535 "Pointers must have been corrupted!\n" 1536 "datamp: $%p, nmp: $%p, rptr: $%p", 1537 (void *)datamp, 1538 (void *)nmp, 1539 (void *)rptr); 1540 } 1541 nmp->b_cont = mmp; 1542 nmp = mmp; 1543 len -= MBLKL(nmp); 1544 } 1545 if (len < 0) { 1546 nmp->b_wptr += len; 1547 rptr = nmp->b_wptr; 1548 NXGE_DEBUG_MSG((NULL, TX_CTL, 1549 "(5) len < 0 (less than 0)" 1550 "available: %d, left: %d, len: %d, MBLKL(nmp): %d", 1551 available, left, len, (int)MBLKL(nmp))); 1552 1553 } else if (len == 0) { 1554 if (datamp->b_cont != NULL) { 1555 NXGE_DEBUG_MSG((NULL, TX_CTL, 1556 "(5) len == 0" 1557 "available: %d, left: %d, len: %d, " 1558 "MBLKL(nmp): %d", 1559 available, left, len, (int)MBLKL(nmp))); 1560 datamp = datamp->b_cont; 1561 rptr = datamp->b_rptr; 1562 } else { 1563 NXGE_DEBUG_MSG((NULL, TX_CTL, 1564 "(6)available b_cont == NULL : %d, " 1565 "left: %d, len: %d, MBLKL(nmp): %d", 1566 available, left, len, (int)MBLKL(nmp))); 1567 1568 VERIFY(cmp->b_next == NULL); 1569 VERIFY(left == 0); 1570 break; /* Done! */ 1571 } 1572 } 1573 cmp = cmp->b_next; 1574 1575 NXGE_DEBUG_MSG((NULL, TX_CTL, 1576 "(7) do_softlso: " 1577 "next mp in mp_chain available len != 0 : %d, " 1578 "left: %d, len: %d, MBLKL(nmp): %d", 1579 available, left, len, (int)MBLKL(nmp))); 1580 } 1581 1582 /* 1583 * From now, start to fill up all headers for the first message 1584 * Hardware checksum flags need to be updated separately for FULLCKSUM 1585 * and PARTIALCKSUM cases. For full checksum, copy the original flags 1586 * into every new packet is enough. But for HCK_PARTIALCKSUM, all 1587 * required fields need to be updated properly. 1588 */ 1589 nmp = mp_chain; 1590 bcopy(mp->b_rptr, nmp->b_rptr, hdrlen); 1591 nmp->b_wptr = nmp->b_rptr + hdrlen; 1592 niph = (struct ip *)(nmp->b_rptr + ehlen); 1593 niph->ip_len = htons(mss + iphlen + tcphlen); 1594 ip_id = ntohs(niph->ip_id); 1595 ntcph = (struct tcphdr *)(nmp->b_rptr + ehlen + iphlen); 1596 #ifdef __sparc 1597 bcopy((char *)&ntcph->th_seq, &tcp_seq_tmp, 4); 1598 tcp_seq = ntohl(tcp_seq_tmp); 1599 #else 1600 tcp_seq = ntohl(ntcph->th_seq); 1601 #endif 1602 1603 ntcph->th_flags &= ~(TH_FIN | TH_PUSH | TH_RST); 1604 1605 DB_CKSUMFLAGS(nmp) = (uint16_t)hckflags; 1606 DB_CKSUMSTART(nmp) = start_offset; 1607 DB_CKSUMSTUFF(nmp) = stuff_offset; 1608 1609 /* calculate IP checksum and TCP pseudo header checksum */ 1610 niph->ip_sum = 0; 1611 niph->ip_sum = (uint16_t)nxge_csgen((uint16_t *)niph, iphlen); 1612 1613 l4_len = mss + tcphlen; 1614 sum = htons(l4_len) + l4cksum; 1615 sum = (sum & 0xFFFF) + (sum >> 16); 1616 ntcph->th_sum = (sum & 0xffff); 1617 1618 NXGE_DEBUG_MSG((NULL, TX_CTL, 1619 "==> nxge_do_softlso: first mp $%p (mp_chain $%p) " 1620 "mss %d pktlen %d l4_len %d (0x%x) " 1621 "MBLKL(mp): %d, MBLKL(datamp): %d " 1622 "ip_sum 0x%x " 1623 "th_sum 0x%x sum 0x%x ) " 1624 "dump first ip->tcp %s", 1625 nmp, mp_chain, 1626 mss, 1627 pktlen, 1628 l4_len, 1629 l4_len, 1630 (int)MBLKL(mp), (int)MBLKL(datamp), 1631 niph->ip_sum, 1632 ntcph->th_sum, 1633 sum, 1634 nxge_dump_packet((char *)niph, 52))); 1635 1636 cmp = nmp; 1637 while ((nmp = nmp->b_next)->b_next != NULL) { 1638 NXGE_DEBUG_MSG((NULL, TX_CTL, 1639 "==>nxge_do_softlso: middle l4_len %d ", l4_len)); 1640 bcopy(cmp->b_rptr, nmp->b_rptr, hdrlen); 1641 nmp->b_wptr = nmp->b_rptr + hdrlen; 1642 niph = (struct ip *)(nmp->b_rptr + ehlen); 1643 niph->ip_id = htons(++ip_id); 1644 niph->ip_len = htons(mss + iphlen + tcphlen); 1645 ntcph = (struct tcphdr *)(nmp->b_rptr + ehlen + iphlen); 1646 tcp_seq += mss; 1647 1648 ntcph->th_flags &= ~(TH_FIN | TH_PUSH | TH_RST | TH_URG); 1649 1650 #ifdef __sparc 1651 tcp_seq_tmp = htonl(tcp_seq); 1652 bcopy(&tcp_seq_tmp, (char *)&ntcph->th_seq, 4); 1653 #else 1654 ntcph->th_seq = htonl(tcp_seq); 1655 #endif 1656 DB_CKSUMFLAGS(nmp) = (uint16_t)hckflags; 1657 DB_CKSUMSTART(nmp) = start_offset; 1658 DB_CKSUMSTUFF(nmp) = stuff_offset; 1659 1660 /* calculate IP checksum and TCP pseudo header checksum */ 1661 niph->ip_sum = 0; 1662 niph->ip_sum = (uint16_t)nxge_csgen((uint16_t *)niph, iphlen); 1663 ntcph->th_sum = (sum & 0xffff); 1664 1665 NXGE_DEBUG_MSG((NULL, TX_CTL, 1666 "==> nxge_do_softlso: middle ip_sum 0x%x " 1667 "th_sum 0x%x " 1668 " mp $%p (mp_chain $%p) pktlen %d " 1669 "MBLKL(mp): %d, MBLKL(datamp): %d ", 1670 niph->ip_sum, 1671 ntcph->th_sum, 1672 nmp, mp_chain, 1673 pktlen, (int)MBLKL(mp), (int)MBLKL(datamp))); 1674 } 1675 1676 /* Last segment */ 1677 /* 1678 * Set FIN and/or PSH flags if present only in the last packet. 1679 * The ip_len could be different from prior packets. 1680 */ 1681 bcopy(cmp->b_rptr, nmp->b_rptr, hdrlen); 1682 nmp->b_wptr = nmp->b_rptr + hdrlen; 1683 niph = (struct ip *)(nmp->b_rptr + ehlen); 1684 niph->ip_id = htons(++ip_id); 1685 niph->ip_len = htons(msgsize(nmp->b_cont) + iphlen + tcphlen); 1686 ntcph = (struct tcphdr *)(nmp->b_rptr + ehlen + iphlen); 1687 tcp_seq += mss; 1688 #ifdef __sparc 1689 tcp_seq_tmp = htonl(tcp_seq); 1690 bcopy(&tcp_seq_tmp, (char *)&ntcph->th_seq, 4); 1691 #else 1692 ntcph->th_seq = htonl(tcp_seq); 1693 #endif 1694 ntcph->th_flags = (otcph->th_flags & ~TH_URG); 1695 1696 DB_CKSUMFLAGS(nmp) = (uint16_t)hckflags; 1697 DB_CKSUMSTART(nmp) = start_offset; 1698 DB_CKSUMSTUFF(nmp) = stuff_offset; 1699 1700 /* calculate IP checksum and TCP pseudo header checksum */ 1701 niph->ip_sum = 0; 1702 niph->ip_sum = (uint16_t)nxge_csgen((uint16_t *)niph, iphlen); 1703 1704 l4_len = ntohs(niph->ip_len) - iphlen; 1705 sum = htons(l4_len) + l4cksum; 1706 sum = (sum & 0xFFFF) + (sum >> 16); 1707 ntcph->th_sum = (sum & 0xffff); 1708 1709 NXGE_DEBUG_MSG((NULL, TX_CTL, 1710 "==> nxge_do_softlso: last next " 1711 "niph->ip_sum 0x%x " 1712 "ntcph->th_sum 0x%x sum 0x%x " 1713 "dump last ip->tcp %s " 1714 "cmp $%p mp $%p (mp_chain $%p) pktlen %d (0x%x) " 1715 "l4_len %d (0x%x) " 1716 "MBLKL(mp): %d, MBLKL(datamp): %d ", 1717 niph->ip_sum, 1718 ntcph->th_sum, sum, 1719 nxge_dump_packet((char *)niph, 52), 1720 cmp, nmp, mp_chain, 1721 pktlen, pktlen, 1722 l4_len, 1723 l4_len, 1724 (int)MBLKL(mp), (int)MBLKL(datamp))); 1725 1726 cleanup_allocated_msgs: 1727 if (do_cleanup) { 1728 NXGE_DEBUG_MSG((NULL, TX_CTL, 1729 "==> nxge_do_softlso: " 1730 "Failed allocating messages, " 1731 "have to clean up and fail!")); 1732 while (mp_chain != NULL) { 1733 nmp = mp_chain; 1734 mp_chain = mp_chain->b_next; 1735 freemsg(nmp); 1736 } 1737 } 1738 /* 1739 * We're done here, so just free the original message and return the 1740 * new message chain, that could be NULL if failed, back to the caller. 1741 */ 1742 freemsg(mp); 1743 1744 NXGE_DEBUG_MSG((NULL, TX_CTL, 1745 "<== nxge_do_softlso:mp_chain $%p", mp_chain)); 1746 return (mp_chain); 1747 } 1748 1749 /* 1750 * Will be called before NIC driver do further operation on the message. 1751 * The input message may include LSO information, if so, go to softlso logic 1752 * to eliminate the oversized LSO packet for the incapable underlying h/w. 1753 * The return could be the same non-LSO message or a message chain for LSO case. 1754 * 1755 * The driver needs to call this function per packet and process the whole chain 1756 * if applied. 1757 */ 1758 static mblk_t * 1759 nxge_lso_eliminate(mblk_t *mp) 1760 { 1761 uint32_t lsoflags; 1762 uint32_t mss; 1763 1764 NXGE_DEBUG_MSG((NULL, TX_CTL, 1765 "==>nxge_lso_eliminate:")); 1766 nxge_lso_info_get(mp, &mss, &lsoflags); 1767 1768 if (lsoflags & HW_LSO) { 1769 mblk_t *nmp; 1770 1771 NXGE_DEBUG_MSG((NULL, TX_CTL, 1772 "==>nxge_lso_eliminate:" 1773 "HW_LSO:mss %d mp $%p", 1774 mss, mp)); 1775 if ((nmp = nxge_do_softlso(mp, mss)) != NULL) { 1776 NXGE_DEBUG_MSG((NULL, TX_CTL, 1777 "<== nxge_lso_eliminate: " 1778 "LSO: nmp not NULL nmp $%p mss %d mp $%p", 1779 nmp, mss, mp)); 1780 return (nmp); 1781 } else { 1782 NXGE_DEBUG_MSG((NULL, TX_CTL, 1783 "<== nxge_lso_eliminate_ " 1784 "LSO: failed nmp NULL nmp $%p mss %d mp $%p", 1785 nmp, mss, mp)); 1786 return (NULL); 1787 } 1788 } 1789 1790 NXGE_DEBUG_MSG((NULL, TX_CTL, 1791 "<== nxge_lso_eliminate")); 1792 return (mp); 1793 } 1794 1795 static uint32_t 1796 nxge_csgen(uint16_t *adr, int len) 1797 { 1798 int i, odd; 1799 uint32_t sum = 0; 1800 uint32_t c = 0; 1801 1802 odd = len % 2; 1803 for (i = 0; i < (len / 2); i++) { 1804 sum += (adr[i] & 0xffff); 1805 } 1806 if (odd) { 1807 sum += adr[len / 2] & 0xff00; 1808 } 1809 while ((c = ((sum & 0xffff0000) >> 16)) != 0) { 1810 sum &= 0xffff; 1811 sum += c; 1812 } 1813 return (~sum & 0xffff); 1814 } 1815