1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #include "nge.h" 28 29 #define TXD_OWN 0x80000000 30 #define TXD_ERR 0x40000000 31 #define TXD_END 0x20000000 32 #define TXD_BCNT_MSK 0x00003FFF 33 34 35 #undef NGE_DBG 36 #define NGE_DBG NGE_DBG_SEND 37 38 #define NGE_TXSWD_RECYCLE(sd) {\ 39 (sd)->mp = NULL; \ 40 (sd)->frags = 0; \ 41 (sd)->mp_hndl.head = NULL; \ 42 (sd)->mp_hndl.tail = NULL; \ 43 (sd)->flags = HOST_OWN; \ 44 } 45 46 47 static size_t nge_tx_dmah_pop(nge_dmah_list_t *, nge_dmah_list_t *, size_t); 48 static void nge_tx_dmah_push(nge_dmah_list_t *, nge_dmah_list_t *); 49 50 51 void nge_tx_recycle_all(nge_t *ngep); 52 #pragma no_inline(nge_tx_recycle_all) 53 54 void 55 nge_tx_recycle_all(nge_t *ngep) 56 { 57 send_ring_t *srp; 58 sw_tx_sbd_t *ssbdp; 59 nge_dmah_node_t *dmah; 60 uint32_t slot; 61 uint32_t nslots; 62 63 srp = ngep->send; 64 nslots = srp->desc.nslots; 65 66 for (slot = 0; slot < nslots; ++slot) { 67 68 ssbdp = srp->sw_sbds + slot; 69 70 DMA_ZERO(ssbdp->desc); 71 72 if (ssbdp->mp != NULL) { 73 74 for (dmah = ssbdp->mp_hndl.head; dmah != NULL; 75 dmah = dmah->next) 76 (void) ddi_dma_unbind_handle(dmah->hndl); 77 78 freemsg(ssbdp->mp); 79 } 80 81 NGE_TXSWD_RECYCLE(ssbdp); 82 } 83 } 84 85 static size_t 86 nge_tx_dmah_pop(nge_dmah_list_t *src, nge_dmah_list_t *dst, size_t num) 87 { 88 nge_dmah_node_t *node; 89 90 for (node = src->head; node != NULL && --num != 0; node = node->next) 91 ; 92 93 if (num == 0) { 94 95 dst->head = src->head; 96 dst->tail = node; 97 98 if ((src->head = node->next) == NULL) 99 src->tail = NULL; 100 101 node->next = NULL; 102 } 103 104 return (num); 105 } 106 107 static void 108 nge_tx_dmah_push(nge_dmah_list_t *src, nge_dmah_list_t *dst) 109 { 110 if (dst->tail != NULL) 111 dst->tail->next = src->head; 112 else 113 dst->head = src->head; 114 115 dst->tail = src->tail; 116 } 117 118 static void 119 nge_tx_desc_sync(nge_t *ngep, uint32_t start_index, uint32_t bds, uint_t type) 120 { 121 send_ring_t *srp = ngep->send; 122 const size_t txd_size = ngep->desc_attr.txd_size; 123 const uint64_t end = srp->desc.nslots * txd_size; 124 uint64_t start; 125 uint64_t num; 126 127 start = start_index * txd_size; 128 num = bds * txd_size; 129 130 if (start + num <= end) 131 (void) ddi_dma_sync(srp->desc.dma_hdl, start, num, type); 132 else { 133 134 (void) ddi_dma_sync(srp->desc.dma_hdl, start, 0, type); 135 (void) ddi_dma_sync(srp->desc.dma_hdl, 0, start + num - end, 136 type); 137 } 138 } 139 140 /* 141 * Reclaim the resource after tx's completion 142 */ 143 void 144 nge_tx_recycle(nge_t *ngep, boolean_t is_intr) 145 { 146 int resched; 147 uint32_t stflg; 148 size_t len; 149 uint32_t free; 150 uint32_t slot; 151 uint32_t used; 152 uint32_t next; 153 uint32_t nslots; 154 mblk_t *mp; 155 sw_tx_sbd_t *ssbdp; 156 void *hw_sbd_p; 157 send_ring_t *srp; 158 nge_dmah_node_t *dme; 159 nge_dmah_list_t dmah; 160 161 srp = ngep->send; 162 163 if (is_intr) { 164 if (mutex_tryenter(srp->tc_lock) == 0) 165 return; 166 } else 167 mutex_enter(srp->tc_lock); 168 mutex_enter(srp->tx_lock); 169 170 next = srp->tx_next; 171 used = srp->tx_flow; 172 free = srp->tx_free; 173 174 mutex_exit(srp->tx_lock); 175 176 slot = srp->tc_next; 177 nslots = srp->desc.nslots; 178 179 used = nslots - free - used; 180 181 ASSERT(slot == NEXT_INDEX(next, free, nslots)); 182 183 if (used > srp->tx_hwmark) 184 used = srp->tx_hwmark; 185 186 nge_tx_desc_sync(ngep, slot, used, DDI_DMA_SYNC_FORKERNEL); 187 188 /* 189 * Look through the send ring by bd's status part 190 * to find all the bds which has been transmitted sucessfully 191 * then reclaim all resouces associated with these bds 192 */ 193 194 mp = NULL; 195 dmah.head = NULL; 196 dmah.tail = NULL; 197 198 for (free = 0; used-- != 0; slot = NEXT(slot, nslots), ++free) { 199 200 ssbdp = &srp->sw_sbds[slot]; 201 hw_sbd_p = DMA_VPTR(ssbdp->desc); 202 203 if (ssbdp->flags == HOST_OWN) 204 break; 205 stflg = ngep->desc_attr.txd_check(hw_sbd_p, &len); 206 if ((stflg & TXD_OWN) != 0) 207 break; 208 DMA_ZERO(ssbdp->desc); 209 if (ssbdp->mp != NULL) { 210 ssbdp->mp->b_next = mp; 211 mp = ssbdp->mp; 212 213 if (ssbdp->mp_hndl.head != NULL) 214 nge_tx_dmah_push(&ssbdp->mp_hndl, &dmah); 215 } 216 217 NGE_TXSWD_RECYCLE(ssbdp); 218 } 219 220 /* 221 * We're about to release one or more places :-) 222 * These ASSERTions check that our invariants still hold: 223 * there must always be at least one free place 224 * at this point, there must be at least one place NOT free 225 * we're not about to free more places than were claimed! 226 */ 227 228 mutex_enter(srp->tx_lock); 229 230 srp->tx_free += free; 231 ngep->watchdog = (srp->desc.nslots - srp->tx_free != 0); 232 233 srp->tc_next = slot; 234 235 ASSERT(srp->tx_free <= nslots); 236 ASSERT(srp->tc_next == NEXT_INDEX(srp->tx_next, srp->tx_free, nslots)); 237 238 resched = (ngep->resched_needed != 0 && srp->tx_hwmark <= srp->tx_free); 239 240 mutex_exit(srp->tx_lock); 241 mutex_exit(srp->tc_lock); 242 243 /* unbind/free mblks */ 244 245 for (dme = dmah.head; dme != NULL; dme = dme->next) 246 (void) ddi_dma_unbind_handle(dme->hndl); 247 248 mutex_enter(&srp->dmah_lock); 249 nge_tx_dmah_push(&dmah, &srp->dmah_free); 250 mutex_exit(&srp->dmah_lock); 251 252 freemsgchain(mp); 253 254 /* 255 * up to this place, we maybe have reclaim some resouce 256 * if there is a requirement to report to gld, report this. 257 */ 258 259 if (resched) 260 (void) ddi_intr_trigger_softint(ngep->resched_hdl, NULL); 261 } 262 263 static uint32_t 264 nge_tx_alloc(nge_t *ngep, uint32_t num) 265 { 266 uint32_t start; 267 send_ring_t *srp; 268 269 start = (uint32_t)-1; 270 srp = ngep->send; 271 272 mutex_enter(srp->tx_lock); 273 274 if (srp->tx_free < srp->tx_lwmark) { 275 276 mutex_exit(srp->tx_lock); 277 nge_tx_recycle(ngep, B_FALSE); 278 mutex_enter(srp->tx_lock); 279 } 280 281 if (srp->tx_free >= num) { 282 283 start = srp->tx_next; 284 285 srp->tx_next = NEXT_INDEX(start, num, srp->desc.nslots); 286 srp->tx_free -= num; 287 srp->tx_flow += num; 288 } 289 290 mutex_exit(srp->tx_lock); 291 return (start); 292 } 293 294 static void 295 nge_tx_start(nge_t *ngep, uint32_t slotnum) 296 { 297 nge_mode_cntl mode_cntl; 298 send_ring_t *srp; 299 300 srp = ngep->send; 301 302 /* 303 * Because there can be multiple concurrent threads in 304 * transit through this code, we only want to notify the 305 * hardware once the last one is departing ... 306 */ 307 308 mutex_enter(srp->tx_lock); 309 310 srp->tx_flow -= slotnum; 311 if (srp->tx_flow == 0) { 312 313 /* 314 * Bump the watchdog counter, thus guaranteeing that it's 315 * nonzero (watchdog activated). Note that non-synchonised 316 * access here means we may race with the reclaim() code 317 * above, but the outcome will be harmless. At worst, the 318 * counter may not get reset on a partial reclaim; but the 319 * large trigger threshold makes false positives unlikely 320 */ 321 ngep->watchdog ++; 322 323 mode_cntl.mode_val = nge_reg_get32(ngep, NGE_MODE_CNTL); 324 mode_cntl.mode_bits.txdm = NGE_SET; 325 mode_cntl.mode_bits.tx_rcom_en = NGE_SET; 326 nge_reg_put32(ngep, NGE_MODE_CNTL, mode_cntl.mode_val); 327 } 328 mutex_exit(srp->tx_lock); 329 } 330 331 static enum send_status 332 nge_send_copy(nge_t *ngep, mblk_t *mp, send_ring_t *srp); 333 #pragma inline(nge_send_copy) 334 335 static enum send_status 336 nge_send_copy(nge_t *ngep, mblk_t *mp, send_ring_t *srp) 337 { 338 size_t totlen; 339 size_t mblen; 340 uint32_t flags; 341 uint32_t bds; 342 uint32_t start_index; 343 char *txb; 344 mblk_t *bp; 345 void *hw_sbd_p; 346 sw_tx_sbd_t *ssbdp; 347 348 hcksum_retrieve(mp, NULL, NULL, NULL, NULL, 349 NULL, NULL, &flags); 350 bds = 0x1; 351 352 if ((uint32_t)-1 == (start_index = nge_tx_alloc(ngep, bds))) 353 return (SEND_COPY_FAIL); 354 355 ASSERT(start_index < srp->desc.nslots); 356 357 /* 358 * up to this point, there's nothing that can fail, 359 * so we can go straight to claiming our 360 * already-reserved place son the train. 361 * 362 * This is the point of no return! 363 */ 364 365 bp = mp; 366 totlen = 0; 367 ssbdp = &srp->sw_sbds[start_index]; 368 ASSERT(ssbdp->flags == HOST_OWN); 369 370 txb = DMA_VPTR(ssbdp->pbuf); 371 totlen = 0; 372 for (; bp != NULL; bp = bp->b_cont) { 373 if ((mblen = MBLKL(bp)) == 0) 374 continue; 375 if ((totlen += mblen) <= ngep->max_sdu) { 376 bcopy(bp->b_rptr, txb, mblen); 377 txb += mblen; 378 } 379 } 380 381 DMA_SYNC(ssbdp->pbuf, DDI_DMA_SYNC_FORDEV); 382 383 /* Fill & sync hw desc */ 384 385 hw_sbd_p = DMA_VPTR(ssbdp->desc); 386 387 ngep->desc_attr.txd_fill(hw_sbd_p, &ssbdp->pbuf.cookie, totlen, 388 flags, B_TRUE); 389 nge_tx_desc_sync(ngep, start_index, bds, DDI_DMA_SYNC_FORDEV); 390 391 ssbdp->flags = CONTROLER_OWN; 392 393 nge_tx_start(ngep, bds); 394 395 /* 396 * The return status indicates that the message can be freed 397 * right away, as we've already copied the contents ... 398 */ 399 400 freemsg(mp); 401 return (SEND_COPY_SUCESS); 402 } 403 404 /* 405 * static enum send_status 406 * nge_send_mapped(nge_t *ngep, mblk_t *mp, size_t fragno); 407 * #pragma inline(nge_send_mapped) 408 */ 409 410 static enum send_status 411 nge_send_mapped(nge_t *ngep, mblk_t *mp, size_t fragno) 412 { 413 int err; 414 boolean_t end; 415 uint32_t i; 416 uint32_t j; 417 uint32_t ncookies; 418 uint32_t slot; 419 uint32_t nslots; 420 uint32_t mblen; 421 uint32_t flags; 422 uint32_t start_index; 423 uint32_t end_index; 424 mblk_t *bp; 425 void *hw_sbd_p; 426 send_ring_t *srp; 427 nge_dmah_node_t *dmah; 428 nge_dmah_node_t *dmer; 429 nge_dmah_list_t dmah_list; 430 ddi_dma_cookie_t cookie[NGE_MAX_COOKIES * NGE_MAP_FRAGS]; 431 432 srp = ngep->send; 433 nslots = srp->desc.nslots; 434 435 mutex_enter(&srp->dmah_lock); 436 err = nge_tx_dmah_pop(&srp->dmah_free, &dmah_list, fragno); 437 mutex_exit(&srp->dmah_lock); 438 439 if (err != 0) { 440 441 return (SEND_MAP_FAIL); 442 } 443 444 /* 445 * Pre-scan the message chain, noting the total number of bytes, 446 * the number of fragments by pre-doing dma addr bind 447 * if the fragment is larger than NGE_COPY_SIZE. 448 * This way has the following advantages: 449 * 1. Acquire the detailed information of resouce 450 * need to send the message 451 * 452 * 2. If can not pre-apply enough resouce, fails at once 453 * and the driver will chose copy way to send out the 454 * message 455 */ 456 457 slot = 0; 458 dmah = dmah_list.head; 459 460 hcksum_retrieve(mp, NULL, NULL, NULL, NULL, NULL, NULL, &flags); 461 462 for (bp = mp; bp != NULL; bp = bp->b_cont) { 463 464 mblen = MBLKL(bp); 465 if (mblen == 0) 466 continue; 467 468 err = ddi_dma_addr_bind_handle(dmah->hndl, 469 NULL, (caddr_t)bp->b_rptr, mblen, 470 DDI_DMA_STREAMING | DDI_DMA_WRITE, 471 DDI_DMA_DONTWAIT, NULL, cookie + slot, &ncookies); 472 473 /* 474 * If there can not map successfully, it is uncessary 475 * sending the message by map way. Sending the message 476 * by copy way. 477 * 478 * By referring to intel's suggestion, it is better 479 * the number of cookies should be less than 4. 480 */ 481 if (err != DDI_DMA_MAPPED || ncookies > NGE_MAX_COOKIES) { 482 NGE_DEBUG(("err(%x) map tx bulk fails" 483 " cookie(%x), ncookies(%x)", 484 err, cookie[slot].dmac_laddress, ncookies)); 485 goto map_fail; 486 } 487 488 /* 489 * Check How many bds a cookie will consume 490 */ 491 for (end_index = slot + ncookies; 492 ++slot != end_index; 493 ddi_dma_nextcookie(dmah->hndl, cookie + slot)) 494 ; 495 496 dmah = dmah->next; 497 } 498 499 /* 500 * Now allocate tx descriptors and fill them 501 * IMPORTANT: 502 * Up to the point where it claims a place, It is impossibel 503 * to fail. 504 * 505 * In this version, there's no setup to be done here, and there's 506 * nothing that can fail, so we can go straight to claiming our 507 * already-reserved places on the train. 508 * 509 * This is the point of no return! 510 */ 511 512 513 if ((uint32_t)-1 == (start_index = nge_tx_alloc(ngep, slot))) 514 goto map_fail; 515 516 ASSERT(start_index < nslots); 517 518 /* fill&sync hw desc, going in reverse order */ 519 520 end = B_TRUE; 521 end_index = NEXT_INDEX(start_index, slot - 1, nslots); 522 523 for (i = slot - 1, j = end_index; start_index - j != 0; 524 j = PREV(j, nslots), --i) { 525 526 hw_sbd_p = DMA_VPTR(srp->sw_sbds[j].desc); 527 ngep->desc_attr.txd_fill(hw_sbd_p, cookie + i, 528 cookie[i].dmac_size, 0, end); 529 530 end = B_FALSE; 531 } 532 533 hw_sbd_p = DMA_VPTR(srp->sw_sbds[j].desc); 534 ngep->desc_attr.txd_fill(hw_sbd_p, cookie + i, cookie[i].dmac_size, 535 flags, end); 536 537 nge_tx_desc_sync(ngep, start_index, slot, DDI_DMA_SYNC_FORDEV); 538 539 /* fill sw desc */ 540 541 for (j = start_index; end_index - j != 0; j = NEXT(j, nslots)) { 542 543 srp->sw_sbds[j].flags = CONTROLER_OWN; 544 } 545 546 srp->sw_sbds[j].mp = mp; 547 srp->sw_sbds[j].mp_hndl = dmah_list; 548 srp->sw_sbds[j].frags = (uint32_t)fragno; 549 srp->sw_sbds[j].flags = CONTROLER_OWN; 550 551 nge_tx_start(ngep, slot); 552 553 /* 554 * The return status indicates that the message can not be freed 555 * right away, until we can make assure the message has been sent 556 * out sucessfully. 557 */ 558 return (SEND_MAP_SUCCESS); 559 560 map_fail: 561 for (dmer = dmah_list.head; dmah - dmer != 0; dmer = dmer->next) 562 (void) ddi_dma_unbind_handle(dmer->hndl); 563 564 mutex_enter(&srp->dmah_lock); 565 nge_tx_dmah_push(&dmah_list, &srp->dmah_free); 566 mutex_exit(&srp->dmah_lock); 567 568 return (SEND_MAP_FAIL); 569 } 570 571 static boolean_t 572 nge_send(nge_t *ngep, mblk_t *mp) 573 { 574 mblk_t *bp; 575 send_ring_t *srp; 576 enum send_status status; 577 uint32_t mblen = 0; 578 uint32_t frags = 0; 579 nge_statistics_t *nstp = &ngep->statistics; 580 nge_sw_statistics_t *sw_stp = &nstp->sw_statistics; 581 582 ASSERT(mp != NULL); 583 ASSERT(ngep->nge_mac_state == NGE_MAC_STARTED); 584 585 srp = ngep->send; 586 /* 587 * 1.Check the number of the fragments of the messages 588 * If the total number is larger than 3, 589 * Chose copy way 590 * 591 * 2. Check the length of the message whether is larger than 592 * NGE_TX_COPY_SIZE, if so, choose the map way. 593 */ 594 for (frags = 0, bp = mp; bp != NULL; bp = bp->b_cont) { 595 if (MBLKL(bp) == 0) 596 continue; 597 frags++; 598 mblen += MBLKL(bp); 599 } 600 if (mblen > (ngep->max_sdu) || mblen == 0) { 601 freemsg(mp); 602 return (B_TRUE); 603 } 604 605 if ((mblen > ngep->param_txbcopy_threshold) && 606 (srp->tx_free > frags * NGE_MAX_COOKIES)) { 607 status = nge_send_mapped(ngep, mp, frags); 608 if (status == SEND_MAP_FAIL) 609 status = nge_send_copy(ngep, mp, srp); 610 } else { 611 status = nge_send_copy(ngep, mp, srp); 612 } 613 if (status == SEND_COPY_FAIL) { 614 nge_tx_recycle(ngep, B_FALSE); 615 status = nge_send_copy(ngep, mp, srp); 616 if (status == SEND_COPY_FAIL) { 617 ngep->resched_needed = 1; 618 NGE_DEBUG(("nge_send: send fail!")); 619 return (B_FALSE); 620 } 621 } 622 /* Update the software statistics */ 623 sw_stp->obytes += mblen + ETHERFCSL; 624 sw_stp->xmit_count ++; 625 626 return (B_TRUE); 627 } 628 629 /* 630 * nge_m_tx : Send a chain of packets. 631 */ 632 mblk_t * 633 nge_m_tx(void *arg, mblk_t *mp) 634 { 635 nge_t *ngep = arg; 636 mblk_t *next; 637 638 rw_enter(ngep->rwlock, RW_READER); 639 ASSERT(mp != NULL); 640 if (ngep->nge_chip_state != NGE_CHIP_RUNNING) { 641 freemsgchain(mp); 642 mp = NULL; 643 } 644 while (mp != NULL) { 645 next = mp->b_next; 646 mp->b_next = NULL; 647 648 if (!nge_send(ngep, mp)) { 649 mp->b_next = next; 650 break; 651 } 652 653 mp = next; 654 } 655 rw_exit(ngep->rwlock); 656 657 return (mp); 658 } 659 660 /* ARGSUSED */ 661 uint_t 662 nge_reschedule(caddr_t args1, caddr_t args2) 663 { 664 nge_t *ngep; 665 uint_t rslt; 666 667 ngep = (nge_t *)args1; 668 rslt = DDI_INTR_UNCLAIMED; 669 670 /* 671 * when softintr is trigged, checking whether this 672 * is caused by our expected interrupt 673 */ 674 if (ngep->nge_mac_state == NGE_MAC_STARTED && 675 ngep->resched_needed == 1) { 676 ngep->resched_needed = 0; 677 ++ngep->statistics.sw_statistics.tx_resched; 678 mac_tx_update(ngep->mh); 679 rslt = DDI_INTR_CLAIMED; 680 } 681 return (rslt); 682 } 683 684 uint32_t 685 nge_hot_txd_check(const void *hwd, size_t *len) 686 { 687 uint32_t err_flag; 688 const hot_tx_bd * htbdp; 689 690 htbdp = hwd; 691 err_flag = htbdp->control_status.cntl_val & ~TXD_BCNT_MSK; 692 693 *len = htbdp->control_status.status_bits.bcnt; 694 return (err_flag); 695 } 696 697 uint32_t 698 nge_sum_txd_check(const void *hwd, size_t *len) 699 { 700 uint32_t err_flag; 701 const sum_tx_bd * htbdp; 702 703 htbdp = hwd; 704 err_flag = htbdp->control_status.cntl_val & ~TXD_BCNT_MSK; 705 706 *len = htbdp->control_status.status_bits.bcnt; 707 return (err_flag); 708 } 709 710 711 /* 712 * Filling the contents of Tx's data descriptor 713 * before transmitting. 714 */ 715 716 void 717 nge_hot_txd_fill(void *hwdesc, const ddi_dma_cookie_t *cookie, 718 size_t length, uint32_t sum_flag, boolean_t end) 719 { 720 hot_tx_bd * hw_sbd_p = hwdesc; 721 722 hw_sbd_p->host_buf_addr_hi = cookie->dmac_laddress >> 32; 723 hw_sbd_p->host_buf_addr_lo = cookie->dmac_laddress; 724 725 /* 726 * Setting the length of the packet 727 * Note: the length filled in the part should be 728 * the original length subtract 1; 729 */ 730 731 hw_sbd_p->control_status.control_sum_bits.bcnt = length - 1; 732 733 /* setting ip checksum */ 734 if (sum_flag & HCK_IPV4_HDRCKSUM) 735 hw_sbd_p->control_status.control_sum_bits.ip_hsum 736 = NGE_SET; 737 /* setting tcp checksum */ 738 if (sum_flag & HCK_FULLCKSUM) 739 hw_sbd_p->control_status.control_sum_bits.tcp_hsum 740 = NGE_SET; 741 /* 742 * indicating the end of BDs 743 */ 744 if (end) 745 hw_sbd_p->control_status.control_sum_bits.end = NGE_SET; 746 747 membar_producer(); 748 749 /* pass desc to HW */ 750 hw_sbd_p->control_status.control_sum_bits.own = NGE_SET; 751 } 752 753 void 754 nge_sum_txd_fill(void *hwdesc, const ddi_dma_cookie_t *cookie, 755 size_t length, uint32_t sum_flag, boolean_t end) 756 { 757 sum_tx_bd * hw_sbd_p = hwdesc; 758 759 hw_sbd_p->host_buf_addr = cookie->dmac_address; 760 761 /* 762 * Setting the length of the packet 763 * Note: the length filled in the part should be 764 * the original length subtract 1; 765 */ 766 767 hw_sbd_p->control_status.control_sum_bits.bcnt = length - 1; 768 769 /* setting ip checksum */ 770 if (sum_flag & HCK_IPV4_HDRCKSUM) 771 hw_sbd_p->control_status.control_sum_bits.ip_hsum 772 = NGE_SET; 773 /* setting tcp checksum */ 774 if (sum_flag & HCK_FULLCKSUM) 775 hw_sbd_p->control_status.control_sum_bits.tcp_hsum 776 = NGE_SET; 777 /* 778 * indicating the end of BDs 779 */ 780 if (end) 781 hw_sbd_p->control_status.control_sum_bits.end = NGE_SET; 782 783 membar_producer(); 784 785 /* pass desc to HW */ 786 hw_sbd_p->control_status.control_sum_bits.own = NGE_SET; 787 } 788