xref: /titanic_50/usr/src/uts/common/io/nge/nge_tx.c (revision 79659ce54ea319c9c890f6dd5dbeb1146d725cb9)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include "nge.h"
30 
31 #define	TXD_OWN		0x80000000
32 #define	TXD_ERR		0x40000000
33 #define	TXD_END		0x20000000
34 #define	TXD_BCNT_MSK	0x00003FFF
35 
36 
37 #undef	NGE_DBG
38 #define	NGE_DBG		NGE_DBG_SEND
39 
40 #define	NGE_TXSWD_RECYCLE(sd)	{\
41 					(sd)->mp = NULL; \
42 					(sd)->frags = 0; \
43 					(sd)->mp_hndl.head = NULL; \
44 					(sd)->mp_hndl.tail = NULL; \
45 					(sd)->flags = HOST_OWN; \
46 				}
47 
48 
49 static size_t nge_tx_dmah_pop(nge_dmah_list_t *, nge_dmah_list_t *, size_t);
50 static void nge_tx_dmah_push(nge_dmah_list_t *, nge_dmah_list_t *);
51 
52 
53 void nge_tx_recycle_all(nge_t *ngep);
54 #pragma	no_inline(nge_tx_recycle_all)
55 
56 void
57 nge_tx_recycle_all(nge_t *ngep)
58 {
59 	send_ring_t *srp;
60 	sw_tx_sbd_t *ssbdp;
61 	nge_dmah_node_t	*dmah;
62 	uint32_t slot;
63 	uint32_t nslots;
64 
65 	srp = ngep->send;
66 	nslots = srp->desc.nslots;
67 
68 	for (slot = 0; slot < nslots; ++slot) {
69 
70 		ssbdp = srp->sw_sbds + slot;
71 
72 		DMA_ZERO(ssbdp->desc);
73 
74 		if (ssbdp->mp != NULL)	{
75 
76 			for (dmah = ssbdp->mp_hndl.head; dmah != NULL;
77 			    dmah = dmah->next)
78 				(void) ddi_dma_unbind_handle(dmah->hndl);
79 
80 			freemsg(ssbdp->mp);
81 		}
82 
83 		NGE_TXSWD_RECYCLE(ssbdp);
84 	}
85 }
86 
87 static size_t
88 nge_tx_dmah_pop(nge_dmah_list_t *src, nge_dmah_list_t *dst, size_t num)
89 {
90 	nge_dmah_node_t	*node;
91 
92 	for (node = src->head; node != NULL && --num != 0; node = node->next)
93 		;
94 
95 	if (num == 0)	{
96 
97 		dst->head = src->head;
98 		dst->tail = node;
99 
100 		if ((src->head = node->next) == NULL)
101 			src->tail = NULL;
102 
103 		node->next = NULL;
104 	}
105 
106 	return (num);
107 }
108 
109 static void
110 nge_tx_dmah_push(nge_dmah_list_t *src, nge_dmah_list_t *dst)
111 {
112 	if (dst->tail != NULL)
113 		dst->tail->next = src->head;
114 	else
115 		dst->head = src->head;
116 
117 	dst->tail = src->tail;
118 }
119 
120 static void
121 nge_tx_desc_sync(nge_t *ngep, uint64_t start, uint64_t num, uint_t type)
122 {
123 	send_ring_t *srp = ngep->send;
124 	const size_t txd_size = ngep->desc_attr.txd_size;
125 	const uint64_t end = srp->desc.nslots * txd_size;
126 
127 	start = start * txd_size;
128 	num = num * txd_size;
129 
130 	if (start + num <= end)
131 		(void) ddi_dma_sync(srp->desc.dma_hdl, start, num, type);
132 	else	{
133 
134 		(void) ddi_dma_sync(srp->desc.dma_hdl, start, 0, type);
135 		(void) ddi_dma_sync(srp->desc.dma_hdl, 0, start + num - end,
136 		    type);
137 	}
138 }
139 
140 /*
141  * Reclaim the resource after tx's completion
142  */
143 void
144 nge_tx_recycle(nge_t *ngep, boolean_t is_intr)
145 {
146 	int resched;
147 	uint32_t stflg;
148 	size_t len;
149 	uint64_t free;
150 	uint64_t slot;
151 	uint64_t used;
152 	uint64_t next;
153 	uint64_t nslots;
154 	mblk_t *mp;
155 	sw_tx_sbd_t *ssbdp;
156 	void *hw_sbd_p;
157 	send_ring_t *srp;
158 	nge_dmah_node_t *dme;
159 	nge_dmah_list_t dmah;
160 
161 	srp = ngep->send;
162 
163 	if (is_intr) {
164 		if (mutex_tryenter(srp->tc_lock) == 0)
165 			return;
166 	} else
167 		mutex_enter(srp->tc_lock);
168 	mutex_enter(srp->tx_lock);
169 
170 	next = srp->tx_next;
171 	used = srp->tx_flow;
172 	free = srp->tx_free;
173 
174 	mutex_exit(srp->tx_lock);
175 
176 	slot = srp->tc_next;
177 	nslots = srp->desc.nslots;
178 
179 	used = nslots - free - used;
180 
181 	ASSERT(slot == NEXT_INDEX(next, free, nslots));
182 
183 	if (used > srp->tx_hwmark)
184 		used = srp->tx_hwmark;
185 
186 	nge_tx_desc_sync(ngep, slot, used, DDI_DMA_SYNC_FORKERNEL);
187 
188 	/*
189 	 * Look through the send ring by bd's status part
190 	 * to find all the bds which has been transmitted sucessfully
191 	 * then reclaim all resouces associated with these bds
192 	 */
193 
194 	mp = NULL;
195 	dmah.head = NULL;
196 	dmah.tail = NULL;
197 
198 	for (free = 0; used-- != 0; slot = NEXT(slot, nslots), ++free)	{
199 
200 		ssbdp = &srp->sw_sbds[slot];
201 		hw_sbd_p = DMA_VPTR(ssbdp->desc);
202 
203 		stflg = ngep->desc_attr.txd_check(hw_sbd_p, &len);
204 
205 		if (ssbdp->flags == HOST_OWN || (TXD_OWN & stflg) != 0)
206 			break;
207 
208 		DMA_ZERO(ssbdp->desc);
209 
210 		if (ssbdp->mp != NULL)	{
211 			ssbdp->mp->b_next = mp;
212 			mp = ssbdp->mp;
213 
214 			if (ssbdp->mp_hndl.head != NULL)
215 				nge_tx_dmah_push(&ssbdp->mp_hndl, &dmah);
216 		}
217 
218 		NGE_TXSWD_RECYCLE(ssbdp);
219 	}
220 
221 	/*
222 	 * We're about to release one or more places :-)
223 	 * These ASSERTions check that our invariants still hold:
224 	 * there must always be at least one free place
225 	 * at this point, there must be at least one place NOT free
226 	 * we're not about to free more places than were claimed!
227 	 */
228 
229 	mutex_enter(srp->tx_lock);
230 
231 	srp->tx_free += free;
232 	ngep->watchdog = (srp->desc.nslots - srp->tx_free != 0);
233 
234 	srp->tc_next = slot;
235 
236 	ASSERT(srp->tx_free <= nslots);
237 	ASSERT(srp->tc_next == NEXT_INDEX(srp->tx_next, srp->tx_free, nslots));
238 
239 	resched = (ngep->resched_needed != 0 && srp->tx_hwmark <= srp->tx_free);
240 
241 	mutex_exit(srp->tx_lock);
242 	mutex_exit(srp->tc_lock);
243 
244 	/* unbind/free mblks */
245 
246 	for (dme = dmah.head; dme != NULL; dme = dme->next)
247 		(void) ddi_dma_unbind_handle(dme->hndl);
248 
249 	mutex_enter(&srp->dmah_lock);
250 	nge_tx_dmah_push(&dmah, &srp->dmah_free);
251 	mutex_exit(&srp->dmah_lock);
252 
253 	freemsgchain(mp);
254 
255 	/*
256 	 * up to this place, we maybe have reclaim some resouce
257 	 * if there is a requirement to report to gld, report this.
258 	 */
259 
260 	if (resched)
261 		(void) ddi_intr_trigger_softint(ngep->resched_hdl, NULL);
262 }
263 
264 static uint64_t
265 nge_tx_alloc(nge_t *ngep, uint64_t num)
266 {
267 	uint64_t start;
268 	send_ring_t *srp;
269 
270 	start = (uint64_t)-1;
271 	srp = ngep->send;
272 
273 	mutex_enter(srp->tx_lock);
274 
275 	if (srp->tx_free < srp->tx_lwmark)	{
276 
277 		mutex_exit(srp->tx_lock);
278 		nge_tx_recycle(ngep, B_FALSE);
279 		mutex_enter(srp->tx_lock);
280 	}
281 
282 	if (srp->tx_free >= num)	{
283 
284 		start = srp->tx_next;
285 
286 		srp->tx_next = NEXT_INDEX(start, num, srp->desc.nslots);
287 		srp->tx_free -= num;
288 		srp->tx_flow += num;
289 	}
290 
291 	mutex_exit(srp->tx_lock);
292 	return (start);
293 }
294 
295 static void
296 nge_tx_start(nge_t *ngep, uint64_t slotnum)
297 {
298 	nge_mode_cntl mode_cntl;
299 	send_ring_t *srp;
300 
301 	srp = ngep->send;
302 
303 	/*
304 	 * Because there can be multiple concurrent threads in
305 	 * transit through this code, we only want to notify the
306 	 * hardware once the last one is departing ...
307 	 */
308 
309 	mutex_enter(srp->tx_lock);
310 
311 	srp->tx_flow -= slotnum;
312 	if (srp->tx_flow == 0) {
313 
314 		/*
315 		 * Bump the watchdog counter, thus guaranteeing that it's
316 		 * nonzero (watchdog activated).  Note that non-synchonised
317 		 * access here means we may race with the reclaim() code
318 		 * above, but the outcome will be harmless.  At worst, the
319 		 * counter may not get reset on a partial reclaim; but the
320 		 * large trigger threshold makes false positives unlikely
321 		 */
322 		ngep->watchdog ++;
323 
324 		mode_cntl.mode_val = nge_reg_get32(ngep, NGE_MODE_CNTL);
325 		mode_cntl.mode_bits.txdm = NGE_SET;
326 		mode_cntl.mode_bits.tx_rcom_en = NGE_SET;
327 		nge_reg_put32(ngep, NGE_MODE_CNTL, mode_cntl.mode_val);
328 	}
329 	mutex_exit(srp->tx_lock);
330 }
331 
332 static enum send_status
333 nge_send_copy(nge_t *ngep, mblk_t *mp, send_ring_t *srp);
334 #pragma	inline(nge_send_copy)
335 
336 static enum send_status
337 nge_send_copy(nge_t *ngep, mblk_t *mp, send_ring_t *srp)
338 {
339 	size_t totlen;
340 	size_t mblen;
341 	uint32_t flags;
342 	uint64_t bds;
343 	uint64_t start_index;
344 	char *txb;
345 	mblk_t *bp;
346 	void *hw_sbd_p;
347 	sw_tx_sbd_t *ssbdp;
348 
349 	hcksum_retrieve(mp, NULL, NULL, NULL, NULL,
350 	    NULL, NULL, &flags);
351 	bds = 0x1;
352 
353 	if ((uint64_t)-1 == (start_index = nge_tx_alloc(ngep, bds)))
354 		return (SEND_COPY_FAIL);
355 
356 	ASSERT(start_index < srp->desc.nslots);
357 
358 	/*
359 	 * up to this point, there's nothing that can fail,
360 	 * so we can go straight to claiming our
361 	 * already-reserved place son the train.
362 	 *
363 	 * This is the point of no return!
364 	 */
365 
366 	bp = mp;
367 	totlen = 0;
368 	ssbdp = &srp->sw_sbds[start_index];
369 	ASSERT(ssbdp->flags == HOST_OWN);
370 
371 	txb = DMA_VPTR(ssbdp->pbuf);
372 	totlen = 0;
373 	for (; bp != NULL; bp = bp->b_cont) {
374 		if ((mblen = MBLKL(bp)) == 0)
375 			continue;
376 		if ((totlen += mblen) <= ngep->max_sdu) {
377 			bcopy(bp->b_rptr, txb, mblen);
378 			txb += mblen;
379 		}
380 	}
381 
382 	DMA_SYNC(ssbdp->pbuf, DDI_DMA_SYNC_FORDEV);
383 
384 	/* Fill & sync hw desc */
385 
386 	hw_sbd_p = DMA_VPTR(ssbdp->desc);
387 
388 	ngep->desc_attr.txd_fill(hw_sbd_p, &ssbdp->pbuf.cookie, totlen,
389 	    flags, B_TRUE);
390 	nge_tx_desc_sync(ngep, start_index, bds, DDI_DMA_SYNC_FORDEV);
391 
392 	ssbdp->flags = CONTROLER_OWN;
393 
394 	nge_tx_start(ngep, bds);
395 
396 	/*
397 	 * The return status indicates that the message can be freed
398 	 * right away, as we've already copied the contents ...
399 	 */
400 
401 	freemsg(mp);
402 	return (SEND_COPY_SUCESS);
403 }
404 
405 /*
406  * static enum send_status
407  * nge_send_mapped(nge_t *ngep, mblk_t *mp, size_t fragno);
408  * #pragma	inline(nge_send_mapped)
409  */
410 
411 static enum send_status
412 nge_send_mapped(nge_t *ngep, mblk_t *mp, size_t fragno)
413 {
414 	int err;
415 	boolean_t end;
416 	uint32_t i;
417 	uint32_t j;
418 	uint32_t ncookies;
419 	uint32_t slot;
420 	uint32_t nslots;
421 	uint32_t mblen;
422 	uint32_t flags;
423 	uint64_t start_index;
424 	uint64_t end_index;
425 	mblk_t *bp;
426 	void *hw_sbd_p;
427 	send_ring_t *srp;
428 	nge_dmah_node_t *dmah;
429 	nge_dmah_node_t	*dmer;
430 	nge_dmah_list_t dmah_list;
431 	ddi_dma_cookie_t cookie[NGE_MAX_COOKIES * NGE_MAP_FRAGS];
432 
433 	srp = ngep->send;
434 	nslots = srp->desc.nslots;
435 
436 	mutex_enter(&srp->dmah_lock);
437 	err = nge_tx_dmah_pop(&srp->dmah_free, &dmah_list, fragno);
438 	mutex_exit(&srp->dmah_lock);
439 
440 	if (err != 0)	{
441 
442 		return (SEND_MAP_FAIL);
443 	}
444 
445 	/*
446 	 * Pre-scan the message chain, noting the total number of bytes,
447 	 * the number of fragments by pre-doing dma addr bind
448 	 * if the fragment is larger than NGE_COPY_SIZE.
449 	 * This way has the following advantages:
450 	 * 1. Acquire the detailed information of resouce
451 	 *	need to send the message
452 	 *
453 	 * 2. If can not pre-apply enough resouce, fails  at once
454 	 *	and the driver will chose copy way to send out the
455 	 *	message
456 	 */
457 
458 	slot = 0;
459 	dmah = dmah_list.head;
460 
461 	hcksum_retrieve(mp, NULL, NULL, NULL, NULL, NULL, NULL, &flags);
462 
463 	for (bp = mp; bp != NULL; bp = bp->b_cont)	{
464 
465 		mblen = MBLKL(bp);
466 		if (mblen == 0)
467 			continue;
468 
469 		err = ddi_dma_addr_bind_handle(dmah->hndl,
470 		    NULL, (caddr_t)bp->b_rptr, mblen,
471 		    DDI_DMA_STREAMING | DDI_DMA_WRITE,
472 		    DDI_DMA_DONTWAIT, NULL, cookie + slot, &ncookies);
473 
474 		/*
475 		 * If there can not map successfully, it is uncessary
476 		 * sending the message by map way. Sending the message
477 		 * by copy way.
478 		 *
479 		 * By referring to intel's suggestion, it is better
480 		 * the number of cookies should be less than 4.
481 		 */
482 		if (err != DDI_DMA_MAPPED || ncookies > NGE_MAX_COOKIES) {
483 			NGE_DEBUG(("err(%x) map tx bulk fails"
484 			    " cookie(%x), ncookies(%x)",
485 			    err, cookie[slot].dmac_laddress, ncookies));
486 			goto map_fail;
487 		}
488 
489 		/*
490 		 * Check How many bds a cookie will consume
491 		 */
492 		for (end_index = slot + ncookies;
493 		    ++slot != end_index;
494 		    ddi_dma_nextcookie(dmah->hndl, cookie + slot))
495 			;
496 
497 		dmah = dmah->next;
498 	}
499 
500 	/*
501 	 * Now allocate tx descriptors and fill them
502 	 * IMPORTANT:
503 	 *	Up to the point where it claims a place, It is impossibel
504 	 * 	to fail.
505 	 *
506 	 * In this version, there's no setup to be done here, and there's
507 	 * nothing that can fail, so we can go straight to claiming our
508 	 * already-reserved places on the train.
509 	 *
510 	 * This is the point of no return!
511 	 */
512 
513 
514 	if ((uint64_t)-1 == (start_index = nge_tx_alloc(ngep, slot)))
515 		goto map_fail;
516 
517 	ASSERT(start_index < nslots);
518 
519 	/* fill&sync hw desc, going in reverse order */
520 
521 	end = B_TRUE;
522 	end_index = NEXT_INDEX(start_index, slot - 1, nslots);
523 
524 	for (i = slot - 1, j = end_index; start_index - j != 0;
525 	    j = PREV(j, nslots), --i)	{
526 
527 		hw_sbd_p = DMA_VPTR(srp->sw_sbds[j].desc);
528 		ngep->desc_attr.txd_fill(hw_sbd_p, cookie + i,
529 		    cookie[i].dmac_size, 0, end);
530 
531 		end = B_FALSE;
532 	}
533 
534 	hw_sbd_p = DMA_VPTR(srp->sw_sbds[j].desc);
535 	ngep->desc_attr.txd_fill(hw_sbd_p, cookie + i, cookie[i].dmac_size,
536 	    flags, end);
537 
538 	nge_tx_desc_sync(ngep, start_index, slot, DDI_DMA_SYNC_FORDEV);
539 
540 	/* fill sw desc */
541 
542 	for (j = start_index; end_index - j != 0; j = NEXT(j, nslots))	{
543 
544 		srp->sw_sbds[j].flags = CONTROLER_OWN;
545 	}
546 
547 	srp->sw_sbds[j].mp = mp;
548 	srp->sw_sbds[j].mp_hndl = dmah_list;
549 	srp->sw_sbds[j].frags = fragno;
550 	srp->sw_sbds[j].flags = CONTROLER_OWN;
551 
552 	nge_tx_start(ngep, slot);
553 
554 	/*
555 	 * The return status indicates that the message can not be freed
556 	 * right away, until we can make assure the message has been sent
557 	 * out sucessfully.
558 	 */
559 	return (SEND_MAP_SUCCESS);
560 
561 map_fail:
562 	for (dmer = dmah_list.head; dmah - dmer != 0; dmer = dmer->next)
563 		(void) ddi_dma_unbind_handle(dmer->hndl);
564 
565 	mutex_enter(&srp->dmah_lock);
566 	nge_tx_dmah_push(&dmah_list, &srp->dmah_free);
567 	mutex_exit(&srp->dmah_lock);
568 
569 	return (SEND_MAP_FAIL);
570 }
571 
572 static boolean_t
573 nge_send(nge_t *ngep, mblk_t *mp)
574 {
575 	mblk_t *bp;
576 	send_ring_t *srp;
577 	enum send_status status;
578 	uint32_t mblen = 0;
579 	uint32_t frags = 0;
580 	nge_statistics_t *nstp = &ngep->statistics;
581 	nge_sw_statistics_t *sw_stp = &nstp->sw_statistics;
582 
583 	ASSERT(mp != NULL);
584 	ASSERT(ngep->nge_mac_state == NGE_MAC_STARTED);
585 
586 	srp = ngep->send;
587 	/*
588 	 * 1.Check the number of the fragments of the messages
589 	 * If the total number is larger than 3,
590 	 * Chose copy way
591 	 *
592 	 * 2. Check the length of the message whether is larger than
593 	 * NGE_TX_COPY_SIZE, if so, choose the map way.
594 	 */
595 	for (frags = 0, bp = mp; bp != NULL; bp = bp->b_cont) {
596 		if (MBLKL(bp) == 0)
597 			continue;
598 		frags++;
599 		mblen += MBLKL(bp);
600 	}
601 	if (mblen > (ngep->max_sdu) || mblen == 0) {
602 		freemsg(mp);
603 		return (B_TRUE);
604 	}
605 
606 	if ((mblen > ngep->param_txbcopy_threshold) &&
607 	    (srp->tx_free > frags * NGE_MAX_COOKIES)) {
608 		status = nge_send_mapped(ngep, mp, frags);
609 		if (status == SEND_MAP_FAIL)
610 			status = nge_send_copy(ngep, mp, srp);
611 	} else {
612 		status = nge_send_copy(ngep, mp, srp);
613 	}
614 	if (status == SEND_COPY_FAIL) {
615 		nge_tx_recycle(ngep, B_FALSE);
616 		status = nge_send_copy(ngep, mp, srp);
617 		if (status == SEND_COPY_FAIL) {
618 			ngep->resched_needed = 1;
619 			NGE_DEBUG(("nge_send: send fail!"));
620 			return (B_FALSE);
621 		}
622 	}
623 	/* Update the software statistics */
624 	sw_stp->obytes += mblen + ETHERFCSL;
625 	sw_stp->xmit_count ++;
626 
627 	return (B_TRUE);
628 }
629 
630 /*
631  * nge_m_tx : Send a chain of packets.
632  */
633 mblk_t *
634 nge_m_tx(void *arg, mblk_t *mp)
635 {
636 	nge_t *ngep = arg;
637 	mblk_t *next;
638 
639 	rw_enter(ngep->rwlock, RW_READER);
640 	ASSERT(mp != NULL);
641 	if (ngep->nge_chip_state != NGE_CHIP_RUNNING) {
642 		freemsgchain(mp);
643 		mp = NULL;
644 	}
645 	while (mp != NULL) {
646 		next = mp->b_next;
647 		mp->b_next = NULL;
648 
649 		if (!nge_send(ngep, mp)) {
650 			mp->b_next = next;
651 			break;
652 		}
653 
654 		mp = next;
655 	}
656 	rw_exit(ngep->rwlock);
657 
658 	return (mp);
659 }
660 
661 /* ARGSUSED */
662 uint_t
663 nge_reschedule(caddr_t args1, caddr_t args2)
664 {
665 	nge_t *ngep;
666 	uint_t rslt;
667 
668 	ngep = (nge_t *)args1;
669 	rslt = DDI_INTR_UNCLAIMED;
670 
671 	/*
672 	 * when softintr is trigged, checking whether this
673 	 * is caused by our expected interrupt
674 	 */
675 	if (ngep->nge_mac_state == NGE_MAC_STARTED &&
676 	    ngep->resched_needed == 1) {
677 		ngep->resched_needed = 0;
678 		++ngep->statistics.sw_statistics.tx_resched;
679 		mac_tx_update(ngep->mh);
680 		rslt = DDI_INTR_CLAIMED;
681 	}
682 	return (rslt);
683 }
684 
685 uint32_t
686 nge_hot_txd_check(const void *hwd, size_t *len)
687 {
688 	uint32_t err_flag;
689 	const hot_tx_bd * htbdp;
690 
691 	htbdp = hwd;
692 	err_flag = htbdp->control_status.cntl_val & ~TXD_BCNT_MSK;
693 
694 	*len = htbdp->control_status.status_bits.bcnt;
695 	return (err_flag);
696 }
697 
698 uint32_t
699 nge_sum_txd_check(const void *hwd, size_t *len)
700 {
701 	uint32_t err_flag;
702 	const sum_tx_bd * htbdp;
703 
704 	htbdp = hwd;
705 	err_flag = htbdp->control_status.cntl_val & ~TXD_BCNT_MSK;
706 
707 	*len = htbdp->control_status.status_bits.bcnt;
708 	return (err_flag);
709 }
710 
711 
712 /*
713  * Filling the contents of Tx's data descriptor
714  * before transmitting.
715  */
716 
717 void
718 nge_hot_txd_fill(void *hwdesc, const ddi_dma_cookie_t *cookie,
719 	size_t length, uint32_t sum_flag, boolean_t end)
720 {
721 	hot_tx_bd * hw_sbd_p = hwdesc;
722 
723 	hw_sbd_p->host_buf_addr_hi = cookie->dmac_laddress >> 32;
724 	hw_sbd_p->host_buf_addr_lo = cookie->dmac_laddress;
725 
726 	/*
727 	 * Setting the length of the packet
728 	 * Note: the length filled in the part should be
729 	 * the original length subtract 1;
730 	 */
731 
732 	hw_sbd_p->control_status.control_sum_bits.bcnt = length - 1;
733 
734 	/* setting ip checksum */
735 	if (sum_flag & HCK_IPV4_HDRCKSUM)
736 		hw_sbd_p->control_status.control_sum_bits.ip_hsum
737 		    = NGE_SET;
738 	/* setting tcp checksum */
739 	if (sum_flag & HCK_FULLCKSUM)
740 		hw_sbd_p->control_status.control_sum_bits.tcp_hsum
741 		    = NGE_SET;
742 	/*
743 	 * indicating the end of BDs
744 	 */
745 	if (end)
746 		hw_sbd_p->control_status.control_sum_bits.end = NGE_SET;
747 
748 	membar_producer();
749 
750 	/* pass desc to HW */
751 	hw_sbd_p->control_status.control_sum_bits.own = NGE_SET;
752 }
753 
754 void
755 nge_sum_txd_fill(void *hwdesc, const ddi_dma_cookie_t *cookie,
756 	size_t length, uint32_t sum_flag, boolean_t end)
757 {
758 	sum_tx_bd * hw_sbd_p = hwdesc;
759 
760 	hw_sbd_p->host_buf_addr = cookie->dmac_address;
761 
762 	/*
763 	 * Setting the length of the packet
764 	 * Note: the length filled in the part should be
765 	 * the original length subtract 1;
766 	 */
767 
768 	hw_sbd_p->control_status.control_sum_bits.bcnt = length - 1;
769 
770 	/* setting ip checksum */
771 	if (sum_flag & HCK_IPV4_HDRCKSUM)
772 		hw_sbd_p->control_status.control_sum_bits.ip_hsum
773 		    = NGE_SET;
774 	/* setting tcp checksum */
775 	if (sum_flag & HCK_FULLCKSUM)
776 		hw_sbd_p->control_status.control_sum_bits.tcp_hsum
777 		    = NGE_SET;
778 	/*
779 	 * indicating the end of BDs
780 	 */
781 	if (end)
782 		hw_sbd_p->control_status.control_sum_bits.end = NGE_SET;
783 
784 	membar_producer();
785 
786 	/* pass desc to HW */
787 	hw_sbd_p->control_status.control_sum_bits.own = NGE_SET;
788 }
789