1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 #include "nge.h"
28
29 #define TXD_OWN 0x80000000
30 #define TXD_ERR 0x40000000
31 #define TXD_END 0x20000000
32 #define TXD_BCNT_MSK 0x00003FFF
33
34
35 #undef NGE_DBG
36 #define NGE_DBG NGE_DBG_SEND
37
38 #define NGE_TXSWD_RECYCLE(sd) {\
39 (sd)->mp = NULL; \
40 (sd)->frags = 0; \
41 (sd)->mp_hndl.head = NULL; \
42 (sd)->mp_hndl.tail = NULL; \
43 (sd)->flags = HOST_OWN; \
44 }
45
46
47 static size_t nge_tx_dmah_pop(nge_dmah_list_t *, nge_dmah_list_t *, size_t);
48 static void nge_tx_dmah_push(nge_dmah_list_t *, nge_dmah_list_t *);
49
50
51 void nge_tx_recycle_all(nge_t *ngep);
52 #pragma no_inline(nge_tx_recycle_all)
53
54 void
nge_tx_recycle_all(nge_t * ngep)55 nge_tx_recycle_all(nge_t *ngep)
56 {
57 send_ring_t *srp;
58 sw_tx_sbd_t *ssbdp;
59 nge_dmah_node_t *dmah;
60 uint32_t slot;
61 uint32_t nslots;
62
63 srp = ngep->send;
64 nslots = srp->desc.nslots;
65
66 for (slot = 0; slot < nslots; ++slot) {
67
68 ssbdp = srp->sw_sbds + slot;
69
70 DMA_ZERO(ssbdp->desc);
71
72 if (ssbdp->mp != NULL) {
73
74 for (dmah = ssbdp->mp_hndl.head; dmah != NULL;
75 dmah = dmah->next)
76 (void) ddi_dma_unbind_handle(dmah->hndl);
77
78 freemsg(ssbdp->mp);
79 }
80
81 NGE_TXSWD_RECYCLE(ssbdp);
82 }
83 if (ngep->nge_mac_state == NGE_MAC_STARTED &&
84 ngep->resched_needed == 1) {
85 ngep->resched_needed = 0;
86 mac_tx_update(ngep->mh);
87 }
88
89 }
90
91 static size_t
nge_tx_dmah_pop(nge_dmah_list_t * src,nge_dmah_list_t * dst,size_t num)92 nge_tx_dmah_pop(nge_dmah_list_t *src, nge_dmah_list_t *dst, size_t num)
93 {
94 nge_dmah_node_t *node;
95
96 for (node = src->head; node != NULL && --num != 0; node = node->next)
97 ;
98
99 if (num == 0) {
100
101 dst->head = src->head;
102 dst->tail = node;
103
104 if ((src->head = node->next) == NULL)
105 src->tail = NULL;
106
107 node->next = NULL;
108 }
109
110 return (num);
111 }
112
113 static void
nge_tx_dmah_push(nge_dmah_list_t * src,nge_dmah_list_t * dst)114 nge_tx_dmah_push(nge_dmah_list_t *src, nge_dmah_list_t *dst)
115 {
116 if (dst->tail != NULL)
117 dst->tail->next = src->head;
118 else
119 dst->head = src->head;
120
121 dst->tail = src->tail;
122 }
123
124 static void
nge_tx_desc_sync(nge_t * ngep,uint32_t start_index,uint32_t bds,uint_t type)125 nge_tx_desc_sync(nge_t *ngep, uint32_t start_index, uint32_t bds, uint_t type)
126 {
127 send_ring_t *srp = ngep->send;
128 const size_t txd_size = ngep->desc_attr.txd_size;
129 const uint64_t end = srp->desc.nslots * txd_size;
130 uint64_t start;
131 uint64_t num;
132
133 start = start_index * txd_size;
134 num = bds * txd_size;
135
136 if (start + num <= end)
137 (void) ddi_dma_sync(srp->desc.dma_hdl, start, num, type);
138 else {
139
140 (void) ddi_dma_sync(srp->desc.dma_hdl, start, 0, type);
141 (void) ddi_dma_sync(srp->desc.dma_hdl, 0, start + num - end,
142 type);
143 }
144 }
145
146 /*
147 * Reclaim the resource after tx's completion
148 */
149 void
nge_tx_recycle(nge_t * ngep,boolean_t is_intr)150 nge_tx_recycle(nge_t *ngep, boolean_t is_intr)
151 {
152 int resched;
153 uint32_t stflg;
154 uint32_t free;
155 uint32_t slot;
156 uint32_t used;
157 uint32_t next;
158 uint32_t nslots;
159 mblk_t *mp;
160 sw_tx_sbd_t *ssbdp;
161 void *hw_sbd_p;
162 send_ring_t *srp;
163 nge_dmah_node_t *dme;
164 nge_dmah_list_t dmah;
165
166 srp = ngep->send;
167
168 if (is_intr) {
169 if (mutex_tryenter(srp->tc_lock) == 0)
170 return;
171 } else
172 mutex_enter(srp->tc_lock);
173 mutex_enter(srp->tx_lock);
174
175 next = srp->tx_next;
176 used = srp->tx_flow;
177 free = srp->tx_free;
178
179 mutex_exit(srp->tx_lock);
180
181 slot = srp->tc_next;
182 nslots = srp->desc.nslots;
183
184 used = nslots - free - used;
185
186 ASSERT(slot == NEXT_INDEX(next, free, nslots));
187 if (used == 0) {
188 ngep->watchdog = 0;
189 mutex_exit(srp->tc_lock);
190 return;
191 }
192
193 if (used > srp->tx_hwmark && ngep->resched_needed == 0)
194 used = srp->tx_hwmark;
195
196 nge_tx_desc_sync(ngep, slot, used, DDI_DMA_SYNC_FORKERNEL);
197
198 /*
199 * Look through the send ring by bd's status part
200 * to find all the bds which has been transmitted sucessfully
201 * then reclaim all resouces associated with these bds
202 */
203
204 mp = NULL;
205 dmah.head = NULL;
206 dmah.tail = NULL;
207
208 for (free = 0; used-- != 0; slot = NEXT(slot, nslots), ++free) {
209
210 ssbdp = &srp->sw_sbds[slot];
211 hw_sbd_p = DMA_VPTR(ssbdp->desc);
212
213 if (ssbdp->flags == HOST_OWN)
214 break;
215 stflg = ngep->desc_attr.txd_check(hw_sbd_p);
216 if ((stflg & TXD_OWN) != 0)
217 break;
218 DMA_ZERO(ssbdp->desc);
219 if (ssbdp->mp != NULL) {
220 ssbdp->mp->b_next = mp;
221 mp = ssbdp->mp;
222
223 if (ssbdp->mp_hndl.head != NULL)
224 nge_tx_dmah_push(&ssbdp->mp_hndl, &dmah);
225 }
226
227 NGE_TXSWD_RECYCLE(ssbdp);
228 }
229
230 /*
231 * We're about to release one or more places :-)
232 * These ASSERTions check that our invariants still hold:
233 * there must always be at least one free place
234 * at this point, there must be at least one place NOT free
235 * we're not about to free more places than were claimed!
236 */
237
238 if (free == 0) {
239 mutex_exit(srp->tc_lock);
240 return;
241 }
242
243 mutex_enter(srp->tx_lock);
244
245 srp->tx_free += free;
246 ngep->watchdog = (srp->desc.nslots - srp->tx_free != 0);
247
248 srp->tc_next = slot;
249
250 ASSERT(srp->tx_free <= nslots);
251 ASSERT(srp->tc_next == NEXT_INDEX(srp->tx_next, srp->tx_free, nslots));
252
253 resched = (ngep->resched_needed != 0 && srp->tx_hwmark <= srp->tx_free);
254
255 mutex_exit(srp->tx_lock);
256 mutex_exit(srp->tc_lock);
257
258 /* unbind/free mblks */
259
260 for (dme = dmah.head; dme != NULL; dme = dme->next)
261 (void) ddi_dma_unbind_handle(dme->hndl);
262 if (dmah.head != NULL) {
263 mutex_enter(&srp->dmah_lock);
264 nge_tx_dmah_push(&dmah, &srp->dmah_free);
265 mutex_exit(&srp->dmah_lock);
266 }
267 freemsgchain(mp);
268
269 /*
270 * up to this place, we maybe have reclaim some resouce
271 * if there is a requirement to report to gld, report this.
272 */
273
274 if (resched)
275 (void) ddi_intr_trigger_softint(ngep->resched_hdl, NULL);
276 }
277
278 static uint32_t
nge_tx_alloc(nge_t * ngep,uint32_t num)279 nge_tx_alloc(nge_t *ngep, uint32_t num)
280 {
281 uint32_t start;
282 send_ring_t *srp;
283
284 start = (uint32_t)-1;
285 srp = ngep->send;
286
287 mutex_enter(srp->tx_lock);
288
289 if (srp->tx_free < srp->tx_lwmark) {
290
291 mutex_exit(srp->tx_lock);
292 nge_tx_recycle(ngep, B_FALSE);
293 mutex_enter(srp->tx_lock);
294 }
295
296 if (srp->tx_free >= num) {
297
298 start = srp->tx_next;
299
300 srp->tx_next = NEXT_INDEX(start, num, srp->desc.nslots);
301 srp->tx_free -= num;
302 srp->tx_flow += num;
303 }
304
305 mutex_exit(srp->tx_lock);
306 return (start);
307 }
308
309 static void
nge_tx_start(nge_t * ngep,uint32_t slotnum)310 nge_tx_start(nge_t *ngep, uint32_t slotnum)
311 {
312 nge_mode_cntl mode_cntl;
313 send_ring_t *srp;
314
315 srp = ngep->send;
316
317 /*
318 * Because there can be multiple concurrent threads in
319 * transit through this code, we only want to notify the
320 * hardware once the last one is departing ...
321 */
322
323 mutex_enter(srp->tx_lock);
324
325 srp->tx_flow -= slotnum;
326 if (srp->tx_flow == 0) {
327
328 /*
329 * Bump the watchdog counter, thus guaranteeing that it's
330 * nonzero (watchdog activated). Note that non-synchonised
331 * access here means we may race with the reclaim() code
332 * above, but the outcome will be harmless. At worst, the
333 * counter may not get reset on a partial reclaim; but the
334 * large trigger threshold makes false positives unlikely
335 */
336 if (ngep->watchdog == 0)
337 ngep->watchdog = 1;
338
339 mode_cntl.mode_val = nge_reg_get32(ngep, NGE_MODE_CNTL);
340 mode_cntl.mode_bits.txdm = NGE_SET;
341 mode_cntl.mode_bits.tx_rcom_en = NGE_SET;
342 nge_reg_put32(ngep, NGE_MODE_CNTL, mode_cntl.mode_val);
343 }
344 mutex_exit(srp->tx_lock);
345 }
346
347 static enum send_status
348 nge_send_copy(nge_t *ngep, mblk_t *mp, send_ring_t *srp);
349 #pragma inline(nge_send_copy)
350
351 static enum send_status
nge_send_copy(nge_t * ngep,mblk_t * mp,send_ring_t * srp)352 nge_send_copy(nge_t *ngep, mblk_t *mp, send_ring_t *srp)
353 {
354 size_t totlen;
355 size_t mblen;
356 uint32_t flags;
357 uint32_t bds;
358 uint32_t start_index;
359 char *txb;
360 mblk_t *bp;
361 void *hw_sbd_p;
362 sw_tx_sbd_t *ssbdp;
363 boolean_t tfint;
364
365 mac_hcksum_get(mp, NULL, NULL, NULL, NULL, &flags);
366 bds = 0x1;
367
368 if ((uint32_t)-1 == (start_index = nge_tx_alloc(ngep, bds)))
369 return (SEND_COPY_FAIL);
370
371 ASSERT(start_index < srp->desc.nslots);
372
373 /*
374 * up to this point, there's nothing that can fail,
375 * so we can go straight to claiming our
376 * already-reserved place son the train.
377 *
378 * This is the point of no return!
379 */
380
381 tfint = ((start_index % ngep->tfint_threshold) == 0);
382 bp = mp;
383 totlen = 0;
384 ssbdp = &srp->sw_sbds[start_index];
385 ASSERT(ssbdp->flags == HOST_OWN);
386
387 txb = DMA_VPTR(ssbdp->pbuf);
388 totlen = 0;
389 for (; bp != NULL; bp = bp->b_cont) {
390 if ((mblen = MBLKL(bp)) == 0)
391 continue;
392 if ((totlen += mblen) <= ngep->max_sdu) {
393 bcopy(bp->b_rptr, txb, mblen);
394 txb += mblen;
395 }
396 }
397
398 DMA_SYNC(ssbdp->pbuf, DDI_DMA_SYNC_FORDEV);
399
400 /* Fill & sync hw desc */
401
402 hw_sbd_p = DMA_VPTR(ssbdp->desc);
403
404 ngep->desc_attr.txd_fill(hw_sbd_p, &ssbdp->pbuf.cookie, totlen,
405 flags, B_TRUE, tfint);
406 nge_tx_desc_sync(ngep, start_index, bds, DDI_DMA_SYNC_FORDEV);
407
408 ssbdp->flags = CONTROLER_OWN;
409
410 nge_tx_start(ngep, bds);
411
412 /*
413 * The return status indicates that the message can be freed
414 * right away, as we've already copied the contents ...
415 */
416
417 freemsg(mp);
418 return (SEND_COPY_SUCESS);
419 }
420
421 /*
422 * static enum send_status
423 * nge_send_mapped(nge_t *ngep, mblk_t *mp, size_t fragno);
424 * #pragma inline(nge_send_mapped)
425 */
426
427 static enum send_status
nge_send_mapped(nge_t * ngep,mblk_t * mp,size_t fragno)428 nge_send_mapped(nge_t *ngep, mblk_t *mp, size_t fragno)
429 {
430 int err;
431 boolean_t end;
432 uint32_t i;
433 uint32_t j;
434 uint32_t ncookies;
435 uint32_t slot;
436 uint32_t nslots;
437 uint32_t mblen;
438 uint32_t flags;
439 uint32_t start_index;
440 uint32_t end_index;
441 mblk_t *bp;
442 void *hw_sbd_p;
443 send_ring_t *srp;
444 nge_dmah_node_t *dmah;
445 nge_dmah_node_t *dmer;
446 nge_dmah_list_t dmah_list;
447 ddi_dma_cookie_t cookie[NGE_MAX_COOKIES * NGE_MAP_FRAGS];
448 boolean_t tfint;
449
450 srp = ngep->send;
451 nslots = srp->desc.nslots;
452
453 mutex_enter(&srp->dmah_lock);
454 err = nge_tx_dmah_pop(&srp->dmah_free, &dmah_list, fragno);
455 mutex_exit(&srp->dmah_lock);
456
457 if (err != 0) {
458
459 return (SEND_MAP_FAIL);
460 }
461
462 /*
463 * Pre-scan the message chain, noting the total number of bytes,
464 * the number of fragments by pre-doing dma addr bind
465 * if the fragment is larger than NGE_COPY_SIZE.
466 * This way has the following advantages:
467 * 1. Acquire the detailed information of resouce
468 * need to send the message
469 *
470 * 2. If can not pre-apply enough resouce, fails at once
471 * and the driver will chose copy way to send out the
472 * message
473 */
474
475 slot = 0;
476 dmah = dmah_list.head;
477
478 mac_hcksum_get(mp, NULL, NULL, NULL, NULL, &flags);
479
480 for (bp = mp; bp != NULL; bp = bp->b_cont) {
481
482 mblen = MBLKL(bp);
483 if (mblen == 0)
484 continue;
485
486 err = ddi_dma_addr_bind_handle(dmah->hndl,
487 NULL, (caddr_t)bp->b_rptr, mblen,
488 DDI_DMA_STREAMING | DDI_DMA_WRITE,
489 DDI_DMA_DONTWAIT, NULL, cookie + slot, &ncookies);
490
491 /*
492 * If there can not map successfully, it is uncessary
493 * sending the message by map way. Sending the message
494 * by copy way.
495 *
496 * By referring to intel's suggestion, it is better
497 * the number of cookies should be less than 4.
498 */
499 if (err != DDI_DMA_MAPPED || ncookies > NGE_MAX_COOKIES) {
500 NGE_DEBUG(("err(%x) map tx bulk fails"
501 " cookie(%x), ncookies(%x)",
502 err, cookie[slot].dmac_laddress, ncookies));
503 goto map_fail;
504 }
505
506 /*
507 * Check How many bds a cookie will consume
508 */
509 for (end_index = slot + ncookies;
510 ++slot != end_index;
511 ddi_dma_nextcookie(dmah->hndl, cookie + slot))
512 ;
513
514 dmah = dmah->next;
515 }
516
517 /*
518 * Now allocate tx descriptors and fill them
519 * IMPORTANT:
520 * Up to the point where it claims a place, It is impossibel
521 * to fail.
522 *
523 * In this version, there's no setup to be done here, and there's
524 * nothing that can fail, so we can go straight to claiming our
525 * already-reserved places on the train.
526 *
527 * This is the point of no return!
528 */
529
530
531 if ((uint32_t)-1 == (start_index = nge_tx_alloc(ngep, slot)))
532 goto map_fail;
533
534 ASSERT(start_index < nslots);
535
536 /* fill&sync hw desc, going in reverse order */
537
538 end = B_TRUE;
539 end_index = NEXT_INDEX(start_index, slot - 1, nslots);
540
541 for (i = slot - 1, j = end_index; start_index - j != 0;
542 j = PREV(j, nslots), --i) {
543
544 tfint = ((j % ngep->tfint_threshold) == 0);
545 hw_sbd_p = DMA_VPTR(srp->sw_sbds[j].desc);
546 ngep->desc_attr.txd_fill(hw_sbd_p, cookie + i,
547 cookie[i].dmac_size, 0, end, tfint);
548
549 end = B_FALSE;
550 }
551
552 hw_sbd_p = DMA_VPTR(srp->sw_sbds[j].desc);
553 tfint = ((j % ngep->tfint_threshold) == 0);
554 ngep->desc_attr.txd_fill(hw_sbd_p, cookie + i, cookie[i].dmac_size,
555 flags, end, tfint);
556
557 nge_tx_desc_sync(ngep, start_index, slot, DDI_DMA_SYNC_FORDEV);
558
559 /* fill sw desc */
560
561 for (j = start_index; end_index - j != 0; j = NEXT(j, nslots)) {
562
563 srp->sw_sbds[j].flags = CONTROLER_OWN;
564 }
565
566 srp->sw_sbds[j].mp = mp;
567 srp->sw_sbds[j].mp_hndl = dmah_list;
568 srp->sw_sbds[j].frags = (uint32_t)fragno;
569 srp->sw_sbds[j].flags = CONTROLER_OWN;
570
571 nge_tx_start(ngep, slot);
572
573 /*
574 * The return status indicates that the message can not be freed
575 * right away, until we can make assure the message has been sent
576 * out sucessfully.
577 */
578 return (SEND_MAP_SUCCESS);
579
580 map_fail:
581 for (dmer = dmah_list.head; dmah - dmer != 0; dmer = dmer->next)
582 (void) ddi_dma_unbind_handle(dmer->hndl);
583
584 mutex_enter(&srp->dmah_lock);
585 nge_tx_dmah_push(&dmah_list, &srp->dmah_free);
586 mutex_exit(&srp->dmah_lock);
587
588 return (SEND_MAP_FAIL);
589 }
590
591 static boolean_t
nge_send(nge_t * ngep,mblk_t * mp)592 nge_send(nge_t *ngep, mblk_t *mp)
593 {
594 mblk_t *bp;
595 send_ring_t *srp;
596 enum send_status status;
597 uint32_t mblen = 0;
598 uint32_t frags = 0;
599 nge_statistics_t *nstp = &ngep->statistics;
600 nge_sw_statistics_t *sw_stp = &nstp->sw_statistics;
601
602 ASSERT(mp != NULL);
603 ASSERT(ngep->nge_mac_state == NGE_MAC_STARTED);
604
605 srp = ngep->send;
606 /*
607 * 1.Check the number of the fragments of the messages
608 * If the total number is larger than 3,
609 * Chose copy way
610 *
611 * 2. Check the length of the message whether is larger than
612 * NGE_TX_COPY_SIZE, if so, choose the map way.
613 */
614 for (frags = 0, bp = mp; bp != NULL; bp = bp->b_cont) {
615 if (MBLKL(bp) == 0)
616 continue;
617 frags++;
618 mblen += MBLKL(bp);
619 }
620 if (mblen > (ngep->max_sdu) || mblen == 0) {
621 freemsg(mp);
622 return (B_TRUE);
623 }
624 if ((mblen > ngep->param_txbcopy_threshold) &&
625 (frags <= NGE_MAP_FRAGS) &&
626 (srp->tx_free > frags * NGE_MAX_COOKIES)) {
627 status = nge_send_mapped(ngep, mp, frags);
628 if (status == SEND_MAP_FAIL)
629 status = nge_send_copy(ngep, mp, srp);
630 } else {
631 status = nge_send_copy(ngep, mp, srp);
632 }
633 if (status == SEND_COPY_FAIL) {
634 nge_tx_recycle(ngep, B_FALSE);
635 status = nge_send_copy(ngep, mp, srp);
636 if (status == SEND_COPY_FAIL) {
637 ngep->resched_needed = 1;
638 NGE_DEBUG(("nge_send: send fail!"));
639 return (B_FALSE);
640 }
641 }
642 /* Update the software statistics */
643 sw_stp->obytes += mblen + ETHERFCSL;
644 sw_stp->xmit_count ++;
645
646 return (B_TRUE);
647 }
648
649 /*
650 * nge_m_tx : Send a chain of packets.
651 */
652 mblk_t *
nge_m_tx(void * arg,mblk_t * mp)653 nge_m_tx(void *arg, mblk_t *mp)
654 {
655 nge_t *ngep = arg;
656 mblk_t *next;
657
658 rw_enter(ngep->rwlock, RW_READER);
659 ASSERT(mp != NULL);
660 if (ngep->nge_chip_state != NGE_CHIP_RUNNING) {
661 freemsgchain(mp);
662 mp = NULL;
663 }
664 while (mp != NULL) {
665 next = mp->b_next;
666 mp->b_next = NULL;
667
668 if (!nge_send(ngep, mp)) {
669 mp->b_next = next;
670 break;
671 }
672
673 mp = next;
674 }
675 rw_exit(ngep->rwlock);
676
677 return (mp);
678 }
679
680 /* ARGSUSED */
681 uint_t
nge_reschedule(caddr_t args1,caddr_t args2)682 nge_reschedule(caddr_t args1, caddr_t args2)
683 {
684 nge_t *ngep;
685 uint_t rslt;
686
687 ngep = (nge_t *)args1;
688 rslt = DDI_INTR_UNCLAIMED;
689
690 /*
691 * when softintr is trigged, checking whether this
692 * is caused by our expected interrupt
693 */
694 if (ngep->nge_mac_state == NGE_MAC_STARTED &&
695 ngep->resched_needed == 1) {
696 ngep->resched_needed = 0;
697 ++ngep->statistics.sw_statistics.tx_resched;
698 mac_tx_update(ngep->mh);
699 rslt = DDI_INTR_CLAIMED;
700 }
701 return (rslt);
702 }
703
704 uint32_t
nge_hot_txd_check(const void * hwd)705 nge_hot_txd_check(const void *hwd)
706 {
707 uint32_t err_flag;
708 const hot_tx_bd * htbdp;
709
710 htbdp = hwd;
711 err_flag = htbdp->control_status.cntl_val;
712 return (err_flag);
713 }
714
715 uint32_t
nge_sum_txd_check(const void * hwd)716 nge_sum_txd_check(const void *hwd)
717 {
718 uint32_t err_flag;
719 const sum_tx_bd * htbdp;
720
721 htbdp = hwd;
722 err_flag = htbdp->control_status.cntl_val;
723 return (err_flag);
724 }
725
726
727 /*
728 * Filling the contents of Tx's data descriptor
729 * before transmitting.
730 */
731
732 void
nge_hot_txd_fill(void * hwdesc,const ddi_dma_cookie_t * cookie,size_t length,uint32_t sum_flag,boolean_t end,boolean_t tfint)733 nge_hot_txd_fill(void *hwdesc, const ddi_dma_cookie_t *cookie,
734 size_t length, uint32_t sum_flag, boolean_t end, boolean_t tfint)
735 {
736 hot_tx_bd * hw_sbd_p = hwdesc;
737
738 hw_sbd_p->host_buf_addr_hi = cookie->dmac_laddress >> 32;
739 hw_sbd_p->host_buf_addr_lo = cookie->dmac_laddress;
740
741 /*
742 * Setting the length of the packet
743 * Note: the length filled in the part should be
744 * the original length subtract 1;
745 */
746
747 hw_sbd_p->control_status.control_sum_bits.bcnt = length - 1;
748
749 /* setting ip checksum */
750 if (sum_flag & HCK_IPV4_HDRCKSUM)
751 hw_sbd_p->control_status.control_sum_bits.ip_hsum
752 = NGE_SET;
753 /* setting tcp checksum */
754 if (sum_flag & HCK_FULLCKSUM)
755 hw_sbd_p->control_status.control_sum_bits.tcp_hsum
756 = NGE_SET;
757 /*
758 * indicating the end of BDs
759 */
760 if (tfint)
761 hw_sbd_p->control_status.control_sum_bits.inten = NGE_SET;
762 if (end)
763 hw_sbd_p->control_status.control_sum_bits.end = NGE_SET;
764
765 membar_producer();
766
767 /* pass desc to HW */
768 hw_sbd_p->control_status.control_sum_bits.own = NGE_SET;
769 }
770
771 void
nge_sum_txd_fill(void * hwdesc,const ddi_dma_cookie_t * cookie,size_t length,uint32_t sum_flag,boolean_t end,boolean_t tfint)772 nge_sum_txd_fill(void *hwdesc, const ddi_dma_cookie_t *cookie,
773 size_t length, uint32_t sum_flag, boolean_t end, boolean_t tfint)
774 {
775 sum_tx_bd * hw_sbd_p = hwdesc;
776
777 hw_sbd_p->host_buf_addr = cookie->dmac_address;
778
779 /*
780 * Setting the length of the packet
781 * Note: the length filled in the part should be
782 * the original length subtract 1;
783 */
784
785 hw_sbd_p->control_status.control_sum_bits.bcnt = length - 1;
786
787 /* setting ip checksum */
788 if (sum_flag & HCK_IPV4_HDRCKSUM)
789 hw_sbd_p->control_status.control_sum_bits.ip_hsum
790 = NGE_SET;
791 /* setting tcp checksum */
792 if (sum_flag & HCK_FULLCKSUM)
793 hw_sbd_p->control_status.control_sum_bits.tcp_hsum
794 = NGE_SET;
795 /*
796 * indicating the end of BDs
797 */
798 if (tfint)
799 hw_sbd_p->control_status.control_sum_bits.inten = NGE_SET;
800 if (end)
801 hw_sbd_p->control_status.control_sum_bits.end = NGE_SET;
802
803 membar_producer();
804
805 /* pass desc to HW */
806 hw_sbd_p->control_status.control_sum_bits.own = NGE_SET;
807 }
808