1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 #include "nge.h"
28
29 #define TXD_OWN 0x80000000
30 #define TXD_ERR 0x40000000
31 #define TXD_END 0x20000000
32 #define TXD_BCNT_MSK 0x00003FFF
33
34
35 #undef NGE_DBG
36 #define NGE_DBG NGE_DBG_SEND
37
38 #define NGE_TXSWD_RECYCLE(sd) {\
39 (sd)->mp = NULL; \
40 (sd)->frags = 0; \
41 (sd)->mp_hndl.head = NULL; \
42 (sd)->mp_hndl.tail = NULL; \
43 (sd)->flags = HOST_OWN; \
44 }
45
46
47 static size_t nge_tx_dmah_pop(nge_dmah_list_t *, nge_dmah_list_t *, size_t);
48 static void nge_tx_dmah_push(nge_dmah_list_t *, nge_dmah_list_t *);
49
50
51 void nge_tx_recycle_all(nge_t *ngep);
52
53 void
nge_tx_recycle_all(nge_t * ngep)54 nge_tx_recycle_all(nge_t *ngep)
55 {
56 send_ring_t *srp;
57 sw_tx_sbd_t *ssbdp;
58 nge_dmah_node_t *dmah;
59 uint32_t slot;
60 uint32_t nslots;
61
62 srp = ngep->send;
63 nslots = srp->desc.nslots;
64
65 for (slot = 0; slot < nslots; ++slot) {
66
67 ssbdp = srp->sw_sbds + slot;
68
69 DMA_ZERO(ssbdp->desc);
70
71 if (ssbdp->mp != NULL) {
72
73 for (dmah = ssbdp->mp_hndl.head; dmah != NULL;
74 dmah = dmah->next)
75 (void) ddi_dma_unbind_handle(dmah->hndl);
76
77 freemsg(ssbdp->mp);
78 }
79
80 NGE_TXSWD_RECYCLE(ssbdp);
81 }
82 if (ngep->nge_mac_state == NGE_MAC_STARTED &&
83 ngep->resched_needed == 1) {
84 ngep->resched_needed = 0;
85 mac_tx_update(ngep->mh);
86 }
87
88 }
89
90 static size_t
nge_tx_dmah_pop(nge_dmah_list_t * src,nge_dmah_list_t * dst,size_t num)91 nge_tx_dmah_pop(nge_dmah_list_t *src, nge_dmah_list_t *dst, size_t num)
92 {
93 nge_dmah_node_t *node;
94
95 for (node = src->head; node != NULL && --num != 0; node = node->next)
96 ;
97
98 if (num == 0) {
99
100 dst->head = src->head;
101 dst->tail = node;
102
103 if ((src->head = node->next) == NULL)
104 src->tail = NULL;
105
106 node->next = NULL;
107 }
108
109 return (num);
110 }
111
112 static void
nge_tx_dmah_push(nge_dmah_list_t * src,nge_dmah_list_t * dst)113 nge_tx_dmah_push(nge_dmah_list_t *src, nge_dmah_list_t *dst)
114 {
115 if (dst->tail != NULL)
116 dst->tail->next = src->head;
117 else
118 dst->head = src->head;
119
120 dst->tail = src->tail;
121 }
122
123 static void
nge_tx_desc_sync(nge_t * ngep,uint32_t start_index,uint32_t bds,uint_t type)124 nge_tx_desc_sync(nge_t *ngep, uint32_t start_index, uint32_t bds, uint_t type)
125 {
126 send_ring_t *srp = ngep->send;
127 const size_t txd_size = ngep->desc_attr.txd_size;
128 const uint64_t end = srp->desc.nslots * txd_size;
129 uint64_t start;
130 uint64_t num;
131
132 start = start_index * txd_size;
133 num = bds * txd_size;
134
135 if (start + num <= end)
136 (void) ddi_dma_sync(srp->desc.dma_hdl, start, num, type);
137 else {
138
139 (void) ddi_dma_sync(srp->desc.dma_hdl, start, 0, type);
140 (void) ddi_dma_sync(srp->desc.dma_hdl, 0, start + num - end,
141 type);
142 }
143 }
144
145 /*
146 * Reclaim the resource after tx's completion
147 */
148 void
nge_tx_recycle(nge_t * ngep,boolean_t is_intr)149 nge_tx_recycle(nge_t *ngep, boolean_t is_intr)
150 {
151 int resched;
152 uint32_t stflg;
153 uint32_t free;
154 uint32_t slot;
155 uint32_t used;
156 uint32_t next;
157 uint32_t nslots;
158 mblk_t *mp;
159 sw_tx_sbd_t *ssbdp;
160 void *hw_sbd_p;
161 send_ring_t *srp;
162 nge_dmah_node_t *dme;
163 nge_dmah_list_t dmah;
164
165 srp = ngep->send;
166
167 if (is_intr) {
168 if (mutex_tryenter(srp->tc_lock) == 0)
169 return;
170 } else
171 mutex_enter(srp->tc_lock);
172 mutex_enter(srp->tx_lock);
173
174 next = srp->tx_next;
175 used = srp->tx_flow;
176 free = srp->tx_free;
177
178 mutex_exit(srp->tx_lock);
179
180 slot = srp->tc_next;
181 nslots = srp->desc.nslots;
182
183 used = nslots - free - used;
184
185 ASSERT(slot == NEXT_INDEX(next, free, nslots));
186 if (used == 0) {
187 ngep->watchdog = 0;
188 mutex_exit(srp->tc_lock);
189 return;
190 }
191
192 if (used > srp->tx_hwmark && ngep->resched_needed == 0)
193 used = srp->tx_hwmark;
194
195 nge_tx_desc_sync(ngep, slot, used, DDI_DMA_SYNC_FORKERNEL);
196
197 /*
198 * Look through the send ring by bd's status part
199 * to find all the bds which has been transmitted sucessfully
200 * then reclaim all resouces associated with these bds
201 */
202
203 mp = NULL;
204 dmah.head = NULL;
205 dmah.tail = NULL;
206
207 for (free = 0; used-- != 0; slot = NEXT(slot, nslots), ++free) {
208
209 ssbdp = &srp->sw_sbds[slot];
210 hw_sbd_p = DMA_VPTR(ssbdp->desc);
211
212 if (ssbdp->flags == HOST_OWN)
213 break;
214 stflg = ngep->desc_attr.txd_check(hw_sbd_p);
215 if ((stflg & TXD_OWN) != 0)
216 break;
217 DMA_ZERO(ssbdp->desc);
218 if (ssbdp->mp != NULL) {
219 ssbdp->mp->b_next = mp;
220 mp = ssbdp->mp;
221
222 if (ssbdp->mp_hndl.head != NULL)
223 nge_tx_dmah_push(&ssbdp->mp_hndl, &dmah);
224 }
225
226 NGE_TXSWD_RECYCLE(ssbdp);
227 }
228
229 /*
230 * We're about to release one or more places :-)
231 * These ASSERTions check that our invariants still hold:
232 * there must always be at least one free place
233 * at this point, there must be at least one place NOT free
234 * we're not about to free more places than were claimed!
235 */
236
237 if (free == 0) {
238 mutex_exit(srp->tc_lock);
239 return;
240 }
241
242 mutex_enter(srp->tx_lock);
243
244 srp->tx_free += free;
245 ngep->watchdog = (srp->desc.nslots - srp->tx_free != 0);
246
247 srp->tc_next = slot;
248
249 ASSERT(srp->tx_free <= nslots);
250 ASSERT(srp->tc_next == NEXT_INDEX(srp->tx_next, srp->tx_free, nslots));
251
252 resched = (ngep->resched_needed != 0 && srp->tx_hwmark <= srp->tx_free);
253
254 mutex_exit(srp->tx_lock);
255 mutex_exit(srp->tc_lock);
256
257 /* unbind/free mblks */
258
259 for (dme = dmah.head; dme != NULL; dme = dme->next)
260 (void) ddi_dma_unbind_handle(dme->hndl);
261 if (dmah.head != NULL) {
262 mutex_enter(&srp->dmah_lock);
263 nge_tx_dmah_push(&dmah, &srp->dmah_free);
264 mutex_exit(&srp->dmah_lock);
265 }
266 freemsgchain(mp);
267
268 /*
269 * up to this place, we maybe have reclaim some resouce
270 * if there is a requirement to report to gld, report this.
271 */
272
273 if (resched)
274 (void) ddi_intr_trigger_softint(ngep->resched_hdl, NULL);
275 }
276
277 static uint32_t
nge_tx_alloc(nge_t * ngep,uint32_t num)278 nge_tx_alloc(nge_t *ngep, uint32_t num)
279 {
280 uint32_t start;
281 send_ring_t *srp;
282
283 start = (uint32_t)-1;
284 srp = ngep->send;
285
286 mutex_enter(srp->tx_lock);
287
288 if (srp->tx_free < srp->tx_lwmark) {
289
290 mutex_exit(srp->tx_lock);
291 nge_tx_recycle(ngep, B_FALSE);
292 mutex_enter(srp->tx_lock);
293 }
294
295 if (srp->tx_free >= num) {
296
297 start = srp->tx_next;
298
299 srp->tx_next = NEXT_INDEX(start, num, srp->desc.nslots);
300 srp->tx_free -= num;
301 srp->tx_flow += num;
302 }
303
304 mutex_exit(srp->tx_lock);
305 return (start);
306 }
307
308 static void
nge_tx_start(nge_t * ngep,uint32_t slotnum)309 nge_tx_start(nge_t *ngep, uint32_t slotnum)
310 {
311 nge_mode_cntl mode_cntl;
312 send_ring_t *srp;
313
314 srp = ngep->send;
315
316 /*
317 * Because there can be multiple concurrent threads in
318 * transit through this code, we only want to notify the
319 * hardware once the last one is departing ...
320 */
321
322 mutex_enter(srp->tx_lock);
323
324 srp->tx_flow -= slotnum;
325 if (srp->tx_flow == 0) {
326
327 /*
328 * Bump the watchdog counter, thus guaranteeing that it's
329 * nonzero (watchdog activated). Note that non-synchonised
330 * access here means we may race with the reclaim() code
331 * above, but the outcome will be harmless. At worst, the
332 * counter may not get reset on a partial reclaim; but the
333 * large trigger threshold makes false positives unlikely
334 */
335 if (ngep->watchdog == 0)
336 ngep->watchdog = 1;
337
338 mode_cntl.mode_val = nge_reg_get32(ngep, NGE_MODE_CNTL);
339 mode_cntl.mode_bits.txdm = NGE_SET;
340 mode_cntl.mode_bits.tx_rcom_en = NGE_SET;
341 nge_reg_put32(ngep, NGE_MODE_CNTL, mode_cntl.mode_val);
342 }
343 mutex_exit(srp->tx_lock);
344 }
345
346 static enum send_status
347 nge_send_copy(nge_t *ngep, mblk_t *mp, send_ring_t *srp);
348
349 static enum send_status
nge_send_copy(nge_t * ngep,mblk_t * mp,send_ring_t * srp)350 nge_send_copy(nge_t *ngep, mblk_t *mp, send_ring_t *srp)
351 {
352 size_t totlen;
353 size_t mblen;
354 uint32_t flags;
355 uint32_t bds;
356 uint32_t start_index;
357 char *txb;
358 mblk_t *bp;
359 void *hw_sbd_p;
360 sw_tx_sbd_t *ssbdp;
361 boolean_t tfint;
362
363 mac_hcksum_get(mp, NULL, NULL, NULL, NULL, &flags);
364 bds = 0x1;
365
366 if ((uint32_t)-1 == (start_index = nge_tx_alloc(ngep, bds)))
367 return (SEND_COPY_FAIL);
368
369 ASSERT(start_index < srp->desc.nslots);
370
371 /*
372 * up to this point, there's nothing that can fail,
373 * so we can go straight to claiming our
374 * already-reserved place son the train.
375 *
376 * This is the point of no return!
377 */
378
379 tfint = ((start_index % ngep->tfint_threshold) == 0);
380 bp = mp;
381 totlen = 0;
382 ssbdp = &srp->sw_sbds[start_index];
383 ASSERT(ssbdp->flags == HOST_OWN);
384
385 txb = DMA_VPTR(ssbdp->pbuf);
386 totlen = 0;
387 for (; bp != NULL; bp = bp->b_cont) {
388 if ((mblen = MBLKL(bp)) == 0)
389 continue;
390 if ((totlen += mblen) <= ngep->max_sdu) {
391 bcopy(bp->b_rptr, txb, mblen);
392 txb += mblen;
393 }
394 }
395
396 DMA_SYNC(ssbdp->pbuf, DDI_DMA_SYNC_FORDEV);
397
398 /* Fill & sync hw desc */
399
400 hw_sbd_p = DMA_VPTR(ssbdp->desc);
401
402 ngep->desc_attr.txd_fill(hw_sbd_p, &ssbdp->pbuf.cookie, totlen,
403 flags, B_TRUE, tfint);
404 nge_tx_desc_sync(ngep, start_index, bds, DDI_DMA_SYNC_FORDEV);
405
406 ssbdp->flags = CONTROLER_OWN;
407
408 nge_tx_start(ngep, bds);
409
410 /*
411 * The return status indicates that the message can be freed
412 * right away, as we've already copied the contents ...
413 */
414
415 freemsg(mp);
416 return (SEND_COPY_SUCESS);
417 }
418
419 /*
420 * static enum send_status
421 * nge_send_mapped(nge_t *ngep, mblk_t *mp, size_t fragno);
422 */
423
424 static enum send_status
nge_send_mapped(nge_t * ngep,mblk_t * mp,size_t fragno)425 nge_send_mapped(nge_t *ngep, mblk_t *mp, size_t fragno)
426 {
427 int err;
428 boolean_t end;
429 uint32_t i;
430 uint32_t j;
431 uint32_t ncookies;
432 uint32_t slot;
433 uint32_t nslots;
434 uint32_t mblen;
435 uint32_t flags;
436 uint32_t start_index;
437 uint32_t end_index;
438 mblk_t *bp;
439 void *hw_sbd_p;
440 send_ring_t *srp;
441 nge_dmah_node_t *dmah;
442 nge_dmah_node_t *dmer;
443 nge_dmah_list_t dmah_list;
444 ddi_dma_cookie_t cookie[NGE_MAX_COOKIES * NGE_MAP_FRAGS];
445 boolean_t tfint;
446
447 srp = ngep->send;
448 nslots = srp->desc.nslots;
449
450 mutex_enter(&srp->dmah_lock);
451 err = nge_tx_dmah_pop(&srp->dmah_free, &dmah_list, fragno);
452 mutex_exit(&srp->dmah_lock);
453
454 if (err != 0) {
455
456 return (SEND_MAP_FAIL);
457 }
458
459 /*
460 * Pre-scan the message chain, noting the total number of bytes,
461 * the number of fragments by pre-doing dma addr bind
462 * if the fragment is larger than NGE_COPY_SIZE.
463 * This way has the following advantages:
464 * 1. Acquire the detailed information of resouce
465 * need to send the message
466 *
467 * 2. If can not pre-apply enough resouce, fails at once
468 * and the driver will chose copy way to send out the
469 * message
470 */
471
472 slot = 0;
473 dmah = dmah_list.head;
474
475 mac_hcksum_get(mp, NULL, NULL, NULL, NULL, &flags);
476
477 for (bp = mp; bp != NULL; bp = bp->b_cont) {
478
479 mblen = MBLKL(bp);
480 if (mblen == 0)
481 continue;
482
483 err = ddi_dma_addr_bind_handle(dmah->hndl,
484 NULL, (caddr_t)bp->b_rptr, mblen,
485 DDI_DMA_STREAMING | DDI_DMA_WRITE,
486 DDI_DMA_DONTWAIT, NULL, cookie + slot, &ncookies);
487
488 /*
489 * If there can not map successfully, it is uncessary
490 * sending the message by map way. Sending the message
491 * by copy way.
492 *
493 * By referring to intel's suggestion, it is better
494 * the number of cookies should be less than 4.
495 */
496 if (err != DDI_DMA_MAPPED || ncookies > NGE_MAX_COOKIES) {
497 NGE_DEBUG(("err(%x) map tx bulk fails"
498 " cookie(%x), ncookies(%x)",
499 err, cookie[slot].dmac_laddress, ncookies));
500 goto map_fail;
501 }
502
503 /*
504 * Check How many bds a cookie will consume
505 */
506 for (end_index = slot + ncookies;
507 ++slot != end_index;
508 ddi_dma_nextcookie(dmah->hndl, cookie + slot))
509 ;
510
511 dmah = dmah->next;
512 }
513
514 /*
515 * Now allocate tx descriptors and fill them
516 * IMPORTANT:
517 * Up to the point where it claims a place, It is impossibel
518 * to fail.
519 *
520 * In this version, there's no setup to be done here, and there's
521 * nothing that can fail, so we can go straight to claiming our
522 * already-reserved places on the train.
523 *
524 * This is the point of no return!
525 */
526
527
528 if ((uint32_t)-1 == (start_index = nge_tx_alloc(ngep, slot)))
529 goto map_fail;
530
531 ASSERT(start_index < nslots);
532
533 /* fill&sync hw desc, going in reverse order */
534
535 end = B_TRUE;
536 end_index = NEXT_INDEX(start_index, slot - 1, nslots);
537
538 for (i = slot - 1, j = end_index; start_index - j != 0;
539 j = PREV(j, nslots), --i) {
540
541 tfint = ((j % ngep->tfint_threshold) == 0);
542 hw_sbd_p = DMA_VPTR(srp->sw_sbds[j].desc);
543 ngep->desc_attr.txd_fill(hw_sbd_p, cookie + i,
544 cookie[i].dmac_size, 0, end, tfint);
545
546 end = B_FALSE;
547 }
548
549 hw_sbd_p = DMA_VPTR(srp->sw_sbds[j].desc);
550 tfint = ((j % ngep->tfint_threshold) == 0);
551 ngep->desc_attr.txd_fill(hw_sbd_p, cookie + i, cookie[i].dmac_size,
552 flags, end, tfint);
553
554 nge_tx_desc_sync(ngep, start_index, slot, DDI_DMA_SYNC_FORDEV);
555
556 /* fill sw desc */
557
558 for (j = start_index; end_index - j != 0; j = NEXT(j, nslots)) {
559
560 srp->sw_sbds[j].flags = CONTROLER_OWN;
561 }
562
563 srp->sw_sbds[j].mp = mp;
564 srp->sw_sbds[j].mp_hndl = dmah_list;
565 srp->sw_sbds[j].frags = (uint32_t)fragno;
566 srp->sw_sbds[j].flags = CONTROLER_OWN;
567
568 nge_tx_start(ngep, slot);
569
570 /*
571 * The return status indicates that the message can not be freed
572 * right away, until we can make assure the message has been sent
573 * out sucessfully.
574 */
575 return (SEND_MAP_SUCCESS);
576
577 map_fail:
578 for (dmer = dmah_list.head; dmah - dmer != 0; dmer = dmer->next)
579 (void) ddi_dma_unbind_handle(dmer->hndl);
580
581 mutex_enter(&srp->dmah_lock);
582 nge_tx_dmah_push(&dmah_list, &srp->dmah_free);
583 mutex_exit(&srp->dmah_lock);
584
585 return (SEND_MAP_FAIL);
586 }
587
588 static boolean_t
nge_send(nge_t * ngep,mblk_t * mp)589 nge_send(nge_t *ngep, mblk_t *mp)
590 {
591 mblk_t *bp;
592 send_ring_t *srp;
593 enum send_status status;
594 uint32_t mblen = 0;
595 uint32_t frags = 0;
596 nge_statistics_t *nstp = &ngep->statistics;
597 nge_sw_statistics_t *sw_stp = &nstp->sw_statistics;
598
599 ASSERT(mp != NULL);
600 ASSERT(ngep->nge_mac_state == NGE_MAC_STARTED);
601
602 srp = ngep->send;
603 /*
604 * 1.Check the number of the fragments of the messages
605 * If the total number is larger than 3,
606 * Chose copy way
607 *
608 * 2. Check the length of the message whether is larger than
609 * NGE_TX_COPY_SIZE, if so, choose the map way.
610 */
611 for (frags = 0, bp = mp; bp != NULL; bp = bp->b_cont) {
612 if (MBLKL(bp) == 0)
613 continue;
614 frags++;
615 mblen += MBLKL(bp);
616 }
617 if (mblen > (ngep->max_sdu) || mblen == 0) {
618 freemsg(mp);
619 return (B_TRUE);
620 }
621 if ((mblen > ngep->param_txbcopy_threshold) &&
622 (frags <= NGE_MAP_FRAGS) &&
623 (srp->tx_free > frags * NGE_MAX_COOKIES)) {
624 status = nge_send_mapped(ngep, mp, frags);
625 if (status == SEND_MAP_FAIL)
626 status = nge_send_copy(ngep, mp, srp);
627 } else {
628 status = nge_send_copy(ngep, mp, srp);
629 }
630 if (status == SEND_COPY_FAIL) {
631 nge_tx_recycle(ngep, B_FALSE);
632 status = nge_send_copy(ngep, mp, srp);
633 if (status == SEND_COPY_FAIL) {
634 ngep->resched_needed = 1;
635 NGE_DEBUG(("nge_send: send fail!"));
636 return (B_FALSE);
637 }
638 }
639 /* Update the software statistics */
640 sw_stp->obytes += mblen + ETHERFCSL;
641 sw_stp->xmit_count ++;
642
643 return (B_TRUE);
644 }
645
646 /*
647 * nge_m_tx : Send a chain of packets.
648 */
649 mblk_t *
nge_m_tx(void * arg,mblk_t * mp)650 nge_m_tx(void *arg, mblk_t *mp)
651 {
652 nge_t *ngep = arg;
653 mblk_t *next;
654
655 rw_enter(ngep->rwlock, RW_READER);
656 ASSERT(mp != NULL);
657 if (ngep->nge_chip_state != NGE_CHIP_RUNNING) {
658 freemsgchain(mp);
659 mp = NULL;
660 }
661 while (mp != NULL) {
662 next = mp->b_next;
663 mp->b_next = NULL;
664
665 if (!nge_send(ngep, mp)) {
666 mp->b_next = next;
667 break;
668 }
669
670 mp = next;
671 }
672 rw_exit(ngep->rwlock);
673
674 return (mp);
675 }
676
677 /* ARGSUSED */
678 uint_t
nge_reschedule(caddr_t args1,caddr_t args2)679 nge_reschedule(caddr_t args1, caddr_t args2)
680 {
681 nge_t *ngep;
682 uint_t rslt;
683
684 ngep = (nge_t *)args1;
685 rslt = DDI_INTR_UNCLAIMED;
686
687 /*
688 * when softintr is trigged, checking whether this
689 * is caused by our expected interrupt
690 */
691 if (ngep->nge_mac_state == NGE_MAC_STARTED &&
692 ngep->resched_needed == 1) {
693 ngep->resched_needed = 0;
694 ++ngep->statistics.sw_statistics.tx_resched;
695 mac_tx_update(ngep->mh);
696 rslt = DDI_INTR_CLAIMED;
697 }
698 return (rslt);
699 }
700
701 uint32_t
nge_hot_txd_check(const void * hwd)702 nge_hot_txd_check(const void *hwd)
703 {
704 uint32_t err_flag;
705 const hot_tx_bd * htbdp;
706
707 htbdp = hwd;
708 err_flag = htbdp->control_status.cntl_val;
709 return (err_flag);
710 }
711
712 uint32_t
nge_sum_txd_check(const void * hwd)713 nge_sum_txd_check(const void *hwd)
714 {
715 uint32_t err_flag;
716 const sum_tx_bd * htbdp;
717
718 htbdp = hwd;
719 err_flag = htbdp->control_status.cntl_val;
720 return (err_flag);
721 }
722
723
724 /*
725 * Filling the contents of Tx's data descriptor
726 * before transmitting.
727 */
728
729 void
nge_hot_txd_fill(void * hwdesc,const ddi_dma_cookie_t * cookie,size_t length,uint32_t sum_flag,boolean_t end,boolean_t tfint)730 nge_hot_txd_fill(void *hwdesc, const ddi_dma_cookie_t *cookie,
731 size_t length, uint32_t sum_flag, boolean_t end, boolean_t tfint)
732 {
733 hot_tx_bd * hw_sbd_p = hwdesc;
734
735 hw_sbd_p->host_buf_addr_hi = cookie->dmac_laddress >> 32;
736 hw_sbd_p->host_buf_addr_lo = cookie->dmac_laddress;
737
738 /*
739 * Setting the length of the packet
740 * Note: the length filled in the part should be
741 * the original length subtract 1;
742 */
743
744 hw_sbd_p->control_status.control_sum_bits.bcnt = length - 1;
745
746 /* setting ip checksum */
747 if (sum_flag & HCK_IPV4_HDRCKSUM)
748 hw_sbd_p->control_status.control_sum_bits.ip_hsum
749 = NGE_SET;
750 /* setting tcp checksum */
751 if (sum_flag & HCK_FULLCKSUM)
752 hw_sbd_p->control_status.control_sum_bits.tcp_hsum
753 = NGE_SET;
754 /*
755 * indicating the end of BDs
756 */
757 if (tfint)
758 hw_sbd_p->control_status.control_sum_bits.inten = NGE_SET;
759 if (end)
760 hw_sbd_p->control_status.control_sum_bits.end = NGE_SET;
761
762 membar_producer();
763
764 /* pass desc to HW */
765 hw_sbd_p->control_status.control_sum_bits.own = NGE_SET;
766 }
767
768 void
nge_sum_txd_fill(void * hwdesc,const ddi_dma_cookie_t * cookie,size_t length,uint32_t sum_flag,boolean_t end,boolean_t tfint)769 nge_sum_txd_fill(void *hwdesc, const ddi_dma_cookie_t *cookie,
770 size_t length, uint32_t sum_flag, boolean_t end, boolean_t tfint)
771 {
772 sum_tx_bd * hw_sbd_p = hwdesc;
773
774 hw_sbd_p->host_buf_addr = cookie->dmac_address;
775
776 /*
777 * Setting the length of the packet
778 * Note: the length filled in the part should be
779 * the original length subtract 1;
780 */
781
782 hw_sbd_p->control_status.control_sum_bits.bcnt = length - 1;
783
784 /* setting ip checksum */
785 if (sum_flag & HCK_IPV4_HDRCKSUM)
786 hw_sbd_p->control_status.control_sum_bits.ip_hsum
787 = NGE_SET;
788 /* setting tcp checksum */
789 if (sum_flag & HCK_FULLCKSUM)
790 hw_sbd_p->control_status.control_sum_bits.tcp_hsum
791 = NGE_SET;
792 /*
793 * indicating the end of BDs
794 */
795 if (tfint)
796 hw_sbd_p->control_status.control_sum_bits.inten = NGE_SET;
797 if (end)
798 hw_sbd_p->control_status.control_sum_bits.end = NGE_SET;
799
800 membar_producer();
801
802 /* pass desc to HW */
803 hw_sbd_p->control_status.control_sum_bits.own = NGE_SET;
804 }
805