xref: /titanic_51/usr/src/uts/common/io/nge/nge_tx.c (revision 1c3a0e9dd9f5bb14378d99eee9ed261ce24fc4d3)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include "nge.h"
28 
29 #define	TXD_OWN		0x80000000
30 #define	TXD_ERR		0x40000000
31 #define	TXD_END		0x20000000
32 #define	TXD_BCNT_MSK	0x00003FFF
33 
34 
35 #undef	NGE_DBG
36 #define	NGE_DBG		NGE_DBG_SEND
37 
38 #define	NGE_TXSWD_RECYCLE(sd)	{\
39 					(sd)->mp = NULL; \
40 					(sd)->frags = 0; \
41 					(sd)->mp_hndl.head = NULL; \
42 					(sd)->mp_hndl.tail = NULL; \
43 					(sd)->flags = HOST_OWN; \
44 				}
45 
46 
47 static size_t nge_tx_dmah_pop(nge_dmah_list_t *, nge_dmah_list_t *, size_t);
48 static void nge_tx_dmah_push(nge_dmah_list_t *, nge_dmah_list_t *);
49 
50 
51 void nge_tx_recycle_all(nge_t *ngep);
52 #pragma	no_inline(nge_tx_recycle_all)
53 
54 void
55 nge_tx_recycle_all(nge_t *ngep)
56 {
57 	send_ring_t *srp;
58 	sw_tx_sbd_t *ssbdp;
59 	nge_dmah_node_t	*dmah;
60 	uint32_t slot;
61 	uint32_t nslots;
62 
63 	srp = ngep->send;
64 	nslots = srp->desc.nslots;
65 
66 	for (slot = 0; slot < nslots; ++slot) {
67 
68 		ssbdp = srp->sw_sbds + slot;
69 
70 		DMA_ZERO(ssbdp->desc);
71 
72 		if (ssbdp->mp != NULL)	{
73 
74 			for (dmah = ssbdp->mp_hndl.head; dmah != NULL;
75 			    dmah = dmah->next)
76 				(void) ddi_dma_unbind_handle(dmah->hndl);
77 
78 			freemsg(ssbdp->mp);
79 		}
80 
81 		NGE_TXSWD_RECYCLE(ssbdp);
82 	}
83 	if (ngep->nge_mac_state == NGE_MAC_STARTED &&
84 	    ngep->resched_needed == 1) {
85 			ngep->resched_needed = 0;
86 			mac_tx_update(ngep->mh);
87 	}
88 
89 }
90 
91 static size_t
92 nge_tx_dmah_pop(nge_dmah_list_t *src, nge_dmah_list_t *dst, size_t num)
93 {
94 	nge_dmah_node_t	*node;
95 
96 	for (node = src->head; node != NULL && --num != 0; node = node->next)
97 		;
98 
99 	if (num == 0)	{
100 
101 		dst->head = src->head;
102 		dst->tail = node;
103 
104 		if ((src->head = node->next) == NULL)
105 			src->tail = NULL;
106 
107 		node->next = NULL;
108 	}
109 
110 	return (num);
111 }
112 
113 static void
114 nge_tx_dmah_push(nge_dmah_list_t *src, nge_dmah_list_t *dst)
115 {
116 	if (dst->tail != NULL)
117 		dst->tail->next = src->head;
118 	else
119 		dst->head = src->head;
120 
121 	dst->tail = src->tail;
122 }
123 
124 static void
125 nge_tx_desc_sync(nge_t *ngep, uint32_t start_index, uint32_t bds, uint_t type)
126 {
127 	send_ring_t *srp = ngep->send;
128 	const size_t txd_size = ngep->desc_attr.txd_size;
129 	const uint64_t end = srp->desc.nslots * txd_size;
130 	uint64_t start;
131 	uint64_t num;
132 
133 	start = start_index * txd_size;
134 	num = bds * txd_size;
135 
136 	if (start + num <= end)
137 		(void) ddi_dma_sync(srp->desc.dma_hdl, start, num, type);
138 	else	{
139 
140 		(void) ddi_dma_sync(srp->desc.dma_hdl, start, 0, type);
141 		(void) ddi_dma_sync(srp->desc.dma_hdl, 0, start + num - end,
142 		    type);
143 	}
144 }
145 
146 /*
147  * Reclaim the resource after tx's completion
148  */
149 void
150 nge_tx_recycle(nge_t *ngep, boolean_t is_intr)
151 {
152 	int resched;
153 	uint32_t stflg;
154 	uint32_t free;
155 	uint32_t slot;
156 	uint32_t used;
157 	uint32_t next;
158 	uint32_t nslots;
159 	mblk_t *mp;
160 	sw_tx_sbd_t *ssbdp;
161 	void *hw_sbd_p;
162 	send_ring_t *srp;
163 	nge_dmah_node_t *dme;
164 	nge_dmah_list_t dmah;
165 
166 	srp = ngep->send;
167 
168 	if (is_intr) {
169 		if (mutex_tryenter(srp->tc_lock) == 0)
170 			return;
171 	} else
172 		mutex_enter(srp->tc_lock);
173 	mutex_enter(srp->tx_lock);
174 
175 	next = srp->tx_next;
176 	used = srp->tx_flow;
177 	free = srp->tx_free;
178 
179 	mutex_exit(srp->tx_lock);
180 
181 	slot = srp->tc_next;
182 	nslots = srp->desc.nslots;
183 
184 	used = nslots - free - used;
185 
186 	ASSERT(slot == NEXT_INDEX(next, free, nslots));
187 	if (used == 0) {
188 		ngep->watchdog = 0;
189 		mutex_exit(srp->tc_lock);
190 		return;
191 	}
192 
193 	if (used > srp->tx_hwmark && ngep->resched_needed == 0)
194 		used = srp->tx_hwmark;
195 
196 	nge_tx_desc_sync(ngep, slot, used, DDI_DMA_SYNC_FORKERNEL);
197 
198 	/*
199 	 * Look through the send ring by bd's status part
200 	 * to find all the bds which has been transmitted sucessfully
201 	 * then reclaim all resouces associated with these bds
202 	 */
203 
204 	mp = NULL;
205 	dmah.head = NULL;
206 	dmah.tail = NULL;
207 
208 	for (free = 0; used-- != 0; slot = NEXT(slot, nslots), ++free)	{
209 
210 		ssbdp = &srp->sw_sbds[slot];
211 		hw_sbd_p = DMA_VPTR(ssbdp->desc);
212 
213 		if (ssbdp->flags == HOST_OWN)
214 			break;
215 		stflg = ngep->desc_attr.txd_check(hw_sbd_p);
216 		if ((stflg & TXD_OWN) != 0)
217 			break;
218 		DMA_ZERO(ssbdp->desc);
219 		if (ssbdp->mp != NULL)	{
220 			ssbdp->mp->b_next = mp;
221 			mp = ssbdp->mp;
222 
223 			if (ssbdp->mp_hndl.head != NULL)
224 				nge_tx_dmah_push(&ssbdp->mp_hndl, &dmah);
225 		}
226 
227 		NGE_TXSWD_RECYCLE(ssbdp);
228 	}
229 
230 	/*
231 	 * We're about to release one or more places :-)
232 	 * These ASSERTions check that our invariants still hold:
233 	 * there must always be at least one free place
234 	 * at this point, there must be at least one place NOT free
235 	 * we're not about to free more places than were claimed!
236 	 */
237 
238 	if (free == 0) {
239 		mutex_exit(srp->tc_lock);
240 		return;
241 	}
242 
243 	mutex_enter(srp->tx_lock);
244 
245 	srp->tx_free += free;
246 	ngep->watchdog = (srp->desc.nslots - srp->tx_free != 0);
247 
248 	srp->tc_next = slot;
249 
250 	ASSERT(srp->tx_free <= nslots);
251 	ASSERT(srp->tc_next == NEXT_INDEX(srp->tx_next, srp->tx_free, nslots));
252 
253 	resched = (ngep->resched_needed != 0 && srp->tx_hwmark <= srp->tx_free);
254 
255 	mutex_exit(srp->tx_lock);
256 	mutex_exit(srp->tc_lock);
257 
258 	/* unbind/free mblks */
259 
260 	for (dme = dmah.head; dme != NULL; dme = dme->next)
261 		(void) ddi_dma_unbind_handle(dme->hndl);
262 	if (dmah.head != NULL) {
263 		mutex_enter(&srp->dmah_lock);
264 		nge_tx_dmah_push(&dmah, &srp->dmah_free);
265 		mutex_exit(&srp->dmah_lock);
266 	}
267 	freemsgchain(mp);
268 
269 	/*
270 	 * up to this place, we maybe have reclaim some resouce
271 	 * if there is a requirement to report to gld, report this.
272 	 */
273 
274 	if (resched)
275 		(void) ddi_intr_trigger_softint(ngep->resched_hdl, NULL);
276 }
277 
278 static uint32_t
279 nge_tx_alloc(nge_t *ngep, uint32_t num)
280 {
281 	uint32_t start;
282 	send_ring_t *srp;
283 
284 	start = (uint32_t)-1;
285 	srp = ngep->send;
286 
287 	mutex_enter(srp->tx_lock);
288 
289 	if (srp->tx_free < srp->tx_lwmark)	{
290 
291 		mutex_exit(srp->tx_lock);
292 		nge_tx_recycle(ngep, B_FALSE);
293 		mutex_enter(srp->tx_lock);
294 	}
295 
296 	if (srp->tx_free >= num)	{
297 
298 		start = srp->tx_next;
299 
300 		srp->tx_next = NEXT_INDEX(start, num, srp->desc.nslots);
301 		srp->tx_free -= num;
302 		srp->tx_flow += num;
303 	}
304 
305 	mutex_exit(srp->tx_lock);
306 	return (start);
307 }
308 
309 static void
310 nge_tx_start(nge_t *ngep, uint32_t slotnum)
311 {
312 	nge_mode_cntl mode_cntl;
313 	send_ring_t *srp;
314 
315 	srp = ngep->send;
316 
317 	/*
318 	 * Because there can be multiple concurrent threads in
319 	 * transit through this code, we only want to notify the
320 	 * hardware once the last one is departing ...
321 	 */
322 
323 	mutex_enter(srp->tx_lock);
324 
325 	srp->tx_flow -= slotnum;
326 	if (srp->tx_flow == 0) {
327 
328 		/*
329 		 * Bump the watchdog counter, thus guaranteeing that it's
330 		 * nonzero (watchdog activated).  Note that non-synchonised
331 		 * access here means we may race with the reclaim() code
332 		 * above, but the outcome will be harmless.  At worst, the
333 		 * counter may not get reset on a partial reclaim; but the
334 		 * large trigger threshold makes false positives unlikely
335 		 */
336 		if (ngep->watchdog == 0)
337 			ngep->watchdog = 1;
338 
339 		mode_cntl.mode_val = nge_reg_get32(ngep, NGE_MODE_CNTL);
340 		mode_cntl.mode_bits.txdm = NGE_SET;
341 		mode_cntl.mode_bits.tx_rcom_en = NGE_SET;
342 		nge_reg_put32(ngep, NGE_MODE_CNTL, mode_cntl.mode_val);
343 	}
344 	mutex_exit(srp->tx_lock);
345 }
346 
347 static enum send_status
348 nge_send_copy(nge_t *ngep, mblk_t *mp, send_ring_t *srp);
349 #pragma	inline(nge_send_copy)
350 
351 static enum send_status
352 nge_send_copy(nge_t *ngep, mblk_t *mp, send_ring_t *srp)
353 {
354 	size_t totlen;
355 	size_t mblen;
356 	uint32_t flags;
357 	uint32_t bds;
358 	uint32_t start_index;
359 	char *txb;
360 	mblk_t *bp;
361 	void *hw_sbd_p;
362 	sw_tx_sbd_t *ssbdp;
363 	boolean_t tfint;
364 
365 	hcksum_retrieve(mp, NULL, NULL, NULL, NULL,
366 	    NULL, NULL, &flags);
367 	bds = 0x1;
368 
369 	if ((uint32_t)-1 == (start_index = nge_tx_alloc(ngep, bds)))
370 		return (SEND_COPY_FAIL);
371 
372 	ASSERT(start_index < srp->desc.nslots);
373 
374 	/*
375 	 * up to this point, there's nothing that can fail,
376 	 * so we can go straight to claiming our
377 	 * already-reserved place son the train.
378 	 *
379 	 * This is the point of no return!
380 	 */
381 
382 	tfint = ((start_index % ngep->tfint_threshold) == 0);
383 	bp = mp;
384 	totlen = 0;
385 	ssbdp = &srp->sw_sbds[start_index];
386 	ASSERT(ssbdp->flags == HOST_OWN);
387 
388 	txb = DMA_VPTR(ssbdp->pbuf);
389 	totlen = 0;
390 	for (; bp != NULL; bp = bp->b_cont) {
391 		if ((mblen = MBLKL(bp)) == 0)
392 			continue;
393 		if ((totlen += mblen) <= ngep->max_sdu) {
394 			bcopy(bp->b_rptr, txb, mblen);
395 			txb += mblen;
396 		}
397 	}
398 
399 	DMA_SYNC(ssbdp->pbuf, DDI_DMA_SYNC_FORDEV);
400 
401 	/* Fill & sync hw desc */
402 
403 	hw_sbd_p = DMA_VPTR(ssbdp->desc);
404 
405 	ngep->desc_attr.txd_fill(hw_sbd_p, &ssbdp->pbuf.cookie, totlen,
406 	    flags, B_TRUE, tfint);
407 	nge_tx_desc_sync(ngep, start_index, bds, DDI_DMA_SYNC_FORDEV);
408 
409 	ssbdp->flags = CONTROLER_OWN;
410 
411 	nge_tx_start(ngep, bds);
412 
413 	/*
414 	 * The return status indicates that the message can be freed
415 	 * right away, as we've already copied the contents ...
416 	 */
417 
418 	freemsg(mp);
419 	return (SEND_COPY_SUCESS);
420 }
421 
422 /*
423  * static enum send_status
424  * nge_send_mapped(nge_t *ngep, mblk_t *mp, size_t fragno);
425  * #pragma	inline(nge_send_mapped)
426  */
427 
428 static enum send_status
429 nge_send_mapped(nge_t *ngep, mblk_t *mp, size_t fragno)
430 {
431 	int err;
432 	boolean_t end;
433 	uint32_t i;
434 	uint32_t j;
435 	uint32_t ncookies;
436 	uint32_t slot;
437 	uint32_t nslots;
438 	uint32_t mblen;
439 	uint32_t flags;
440 	uint32_t start_index;
441 	uint32_t end_index;
442 	mblk_t *bp;
443 	void *hw_sbd_p;
444 	send_ring_t *srp;
445 	nge_dmah_node_t *dmah;
446 	nge_dmah_node_t	*dmer;
447 	nge_dmah_list_t dmah_list;
448 	ddi_dma_cookie_t cookie[NGE_MAX_COOKIES * NGE_MAP_FRAGS];
449 	boolean_t tfint;
450 
451 	srp = ngep->send;
452 	nslots = srp->desc.nslots;
453 
454 	mutex_enter(&srp->dmah_lock);
455 	err = nge_tx_dmah_pop(&srp->dmah_free, &dmah_list, fragno);
456 	mutex_exit(&srp->dmah_lock);
457 
458 	if (err != 0)	{
459 
460 		return (SEND_MAP_FAIL);
461 	}
462 
463 	/*
464 	 * Pre-scan the message chain, noting the total number of bytes,
465 	 * the number of fragments by pre-doing dma addr bind
466 	 * if the fragment is larger than NGE_COPY_SIZE.
467 	 * This way has the following advantages:
468 	 * 1. Acquire the detailed information of resouce
469 	 *	need to send the message
470 	 *
471 	 * 2. If can not pre-apply enough resouce, fails  at once
472 	 *	and the driver will chose copy way to send out the
473 	 *	message
474 	 */
475 
476 	slot = 0;
477 	dmah = dmah_list.head;
478 
479 	hcksum_retrieve(mp, NULL, NULL, NULL, NULL, NULL, NULL, &flags);
480 
481 	for (bp = mp; bp != NULL; bp = bp->b_cont)	{
482 
483 		mblen = MBLKL(bp);
484 		if (mblen == 0)
485 			continue;
486 
487 		err = ddi_dma_addr_bind_handle(dmah->hndl,
488 		    NULL, (caddr_t)bp->b_rptr, mblen,
489 		    DDI_DMA_STREAMING | DDI_DMA_WRITE,
490 		    DDI_DMA_DONTWAIT, NULL, cookie + slot, &ncookies);
491 
492 		/*
493 		 * If there can not map successfully, it is uncessary
494 		 * sending the message by map way. Sending the message
495 		 * by copy way.
496 		 *
497 		 * By referring to intel's suggestion, it is better
498 		 * the number of cookies should be less than 4.
499 		 */
500 		if (err != DDI_DMA_MAPPED || ncookies > NGE_MAX_COOKIES) {
501 			NGE_DEBUG(("err(%x) map tx bulk fails"
502 			    " cookie(%x), ncookies(%x)",
503 			    err, cookie[slot].dmac_laddress, ncookies));
504 			goto map_fail;
505 		}
506 
507 		/*
508 		 * Check How many bds a cookie will consume
509 		 */
510 		for (end_index = slot + ncookies;
511 		    ++slot != end_index;
512 		    ddi_dma_nextcookie(dmah->hndl, cookie + slot))
513 			;
514 
515 		dmah = dmah->next;
516 	}
517 
518 	/*
519 	 * Now allocate tx descriptors and fill them
520 	 * IMPORTANT:
521 	 *	Up to the point where it claims a place, It is impossibel
522 	 * 	to fail.
523 	 *
524 	 * In this version, there's no setup to be done here, and there's
525 	 * nothing that can fail, so we can go straight to claiming our
526 	 * already-reserved places on the train.
527 	 *
528 	 * This is the point of no return!
529 	 */
530 
531 
532 	if ((uint32_t)-1 == (start_index = nge_tx_alloc(ngep, slot)))
533 		goto map_fail;
534 
535 	ASSERT(start_index < nslots);
536 
537 	/* fill&sync hw desc, going in reverse order */
538 
539 	end = B_TRUE;
540 	end_index = NEXT_INDEX(start_index, slot - 1, nslots);
541 
542 	for (i = slot - 1, j = end_index; start_index - j != 0;
543 	    j = PREV(j, nslots), --i)	{
544 
545 		tfint = ((j % ngep->tfint_threshold) == 0);
546 		hw_sbd_p = DMA_VPTR(srp->sw_sbds[j].desc);
547 		ngep->desc_attr.txd_fill(hw_sbd_p, cookie + i,
548 		    cookie[i].dmac_size, 0, end, tfint);
549 
550 		end = B_FALSE;
551 	}
552 
553 	hw_sbd_p = DMA_VPTR(srp->sw_sbds[j].desc);
554 	tfint = ((j % ngep->tfint_threshold) == 0);
555 	ngep->desc_attr.txd_fill(hw_sbd_p, cookie + i, cookie[i].dmac_size,
556 	    flags, end, tfint);
557 
558 	nge_tx_desc_sync(ngep, start_index, slot, DDI_DMA_SYNC_FORDEV);
559 
560 	/* fill sw desc */
561 
562 	for (j = start_index; end_index - j != 0; j = NEXT(j, nslots)) {
563 
564 		srp->sw_sbds[j].flags = CONTROLER_OWN;
565 	}
566 
567 	srp->sw_sbds[j].mp = mp;
568 	srp->sw_sbds[j].mp_hndl = dmah_list;
569 	srp->sw_sbds[j].frags = (uint32_t)fragno;
570 	srp->sw_sbds[j].flags = CONTROLER_OWN;
571 
572 	nge_tx_start(ngep, slot);
573 
574 	/*
575 	 * The return status indicates that the message can not be freed
576 	 * right away, until we can make assure the message has been sent
577 	 * out sucessfully.
578 	 */
579 	return (SEND_MAP_SUCCESS);
580 
581 map_fail:
582 	for (dmer = dmah_list.head; dmah - dmer != 0; dmer = dmer->next)
583 		(void) ddi_dma_unbind_handle(dmer->hndl);
584 
585 	mutex_enter(&srp->dmah_lock);
586 	nge_tx_dmah_push(&dmah_list, &srp->dmah_free);
587 	mutex_exit(&srp->dmah_lock);
588 
589 	return (SEND_MAP_FAIL);
590 }
591 
592 static boolean_t
593 nge_send(nge_t *ngep, mblk_t *mp)
594 {
595 	mblk_t *bp;
596 	send_ring_t *srp;
597 	enum send_status status;
598 	uint32_t mblen = 0;
599 	uint32_t frags = 0;
600 	nge_statistics_t *nstp = &ngep->statistics;
601 	nge_sw_statistics_t *sw_stp = &nstp->sw_statistics;
602 
603 	ASSERT(mp != NULL);
604 	ASSERT(ngep->nge_mac_state == NGE_MAC_STARTED);
605 
606 	srp = ngep->send;
607 	/*
608 	 * 1.Check the number of the fragments of the messages
609 	 * If the total number is larger than 3,
610 	 * Chose copy way
611 	 *
612 	 * 2. Check the length of the message whether is larger than
613 	 * NGE_TX_COPY_SIZE, if so, choose the map way.
614 	 */
615 	for (frags = 0, bp = mp; bp != NULL; bp = bp->b_cont) {
616 		if (MBLKL(bp) == 0)
617 			continue;
618 		frags++;
619 		mblen += MBLKL(bp);
620 	}
621 	if (mblen > (ngep->max_sdu) || mblen == 0) {
622 		freemsg(mp);
623 		return (B_TRUE);
624 	}
625 	if ((mblen > ngep->param_txbcopy_threshold) &&
626 	    (frags <= NGE_MAP_FRAGS) &&
627 	    (srp->tx_free > frags * NGE_MAX_COOKIES)) {
628 		status = nge_send_mapped(ngep, mp, frags);
629 		if (status == SEND_MAP_FAIL)
630 			status = nge_send_copy(ngep, mp, srp);
631 	} else {
632 		status = nge_send_copy(ngep, mp, srp);
633 	}
634 	if (status == SEND_COPY_FAIL) {
635 		nge_tx_recycle(ngep, B_FALSE);
636 		status = nge_send_copy(ngep, mp, srp);
637 		if (status == SEND_COPY_FAIL) {
638 			ngep->resched_needed = 1;
639 			NGE_DEBUG(("nge_send: send fail!"));
640 			return (B_FALSE);
641 		}
642 	}
643 	/* Update the software statistics */
644 	sw_stp->obytes += mblen + ETHERFCSL;
645 	sw_stp->xmit_count ++;
646 
647 	return (B_TRUE);
648 }
649 
650 /*
651  * nge_m_tx : Send a chain of packets.
652  */
653 mblk_t *
654 nge_m_tx(void *arg, mblk_t *mp)
655 {
656 	nge_t *ngep = arg;
657 	mblk_t *next;
658 
659 	rw_enter(ngep->rwlock, RW_READER);
660 	ASSERT(mp != NULL);
661 	if (ngep->nge_chip_state != NGE_CHIP_RUNNING) {
662 		freemsgchain(mp);
663 		mp = NULL;
664 	}
665 	while (mp != NULL) {
666 		next = mp->b_next;
667 		mp->b_next = NULL;
668 
669 		if (!nge_send(ngep, mp)) {
670 			mp->b_next = next;
671 			break;
672 		}
673 
674 		mp = next;
675 	}
676 	rw_exit(ngep->rwlock);
677 
678 	return (mp);
679 }
680 
681 /* ARGSUSED */
682 uint_t
683 nge_reschedule(caddr_t args1, caddr_t args2)
684 {
685 	nge_t *ngep;
686 	uint_t rslt;
687 
688 	ngep = (nge_t *)args1;
689 	rslt = DDI_INTR_UNCLAIMED;
690 
691 	/*
692 	 * when softintr is trigged, checking whether this
693 	 * is caused by our expected interrupt
694 	 */
695 	if (ngep->nge_mac_state == NGE_MAC_STARTED &&
696 	    ngep->resched_needed == 1) {
697 		ngep->resched_needed = 0;
698 		++ngep->statistics.sw_statistics.tx_resched;
699 		mac_tx_update(ngep->mh);
700 		rslt = DDI_INTR_CLAIMED;
701 	}
702 	return (rslt);
703 }
704 
705 uint32_t
706 nge_hot_txd_check(const void *hwd)
707 {
708 	uint32_t err_flag;
709 	const hot_tx_bd * htbdp;
710 
711 	htbdp = hwd;
712 	err_flag = htbdp->control_status.cntl_val;
713 	return (err_flag);
714 }
715 
716 uint32_t
717 nge_sum_txd_check(const void *hwd)
718 {
719 	uint32_t err_flag;
720 	const sum_tx_bd * htbdp;
721 
722 	htbdp = hwd;
723 	err_flag = htbdp->control_status.cntl_val;
724 	return (err_flag);
725 }
726 
727 
728 /*
729  * Filling the contents of Tx's data descriptor
730  * before transmitting.
731  */
732 
733 void
734 nge_hot_txd_fill(void *hwdesc, const ddi_dma_cookie_t *cookie,
735 	size_t length, uint32_t sum_flag, boolean_t end, boolean_t tfint)
736 {
737 	hot_tx_bd * hw_sbd_p = hwdesc;
738 
739 	hw_sbd_p->host_buf_addr_hi = cookie->dmac_laddress >> 32;
740 	hw_sbd_p->host_buf_addr_lo = cookie->dmac_laddress;
741 
742 	/*
743 	 * Setting the length of the packet
744 	 * Note: the length filled in the part should be
745 	 * the original length subtract 1;
746 	 */
747 
748 	hw_sbd_p->control_status.control_sum_bits.bcnt = length - 1;
749 
750 	/* setting ip checksum */
751 	if (sum_flag & HCK_IPV4_HDRCKSUM)
752 		hw_sbd_p->control_status.control_sum_bits.ip_hsum
753 		    = NGE_SET;
754 	/* setting tcp checksum */
755 	if (sum_flag & HCK_FULLCKSUM)
756 		hw_sbd_p->control_status.control_sum_bits.tcp_hsum
757 		    = NGE_SET;
758 	/*
759 	 * indicating the end of BDs
760 	 */
761 	if (tfint)
762 		hw_sbd_p->control_status.control_sum_bits.inten = NGE_SET;
763 	if (end)
764 		hw_sbd_p->control_status.control_sum_bits.end = NGE_SET;
765 
766 	membar_producer();
767 
768 	/* pass desc to HW */
769 	hw_sbd_p->control_status.control_sum_bits.own = NGE_SET;
770 }
771 
772 void
773 nge_sum_txd_fill(void *hwdesc, const ddi_dma_cookie_t *cookie,
774 	size_t length, uint32_t sum_flag, boolean_t end, boolean_t tfint)
775 {
776 	sum_tx_bd * hw_sbd_p = hwdesc;
777 
778 	hw_sbd_p->host_buf_addr = cookie->dmac_address;
779 
780 	/*
781 	 * Setting the length of the packet
782 	 * Note: the length filled in the part should be
783 	 * the original length subtract 1;
784 	 */
785 
786 	hw_sbd_p->control_status.control_sum_bits.bcnt = length - 1;
787 
788 	/* setting ip checksum */
789 	if (sum_flag & HCK_IPV4_HDRCKSUM)
790 		hw_sbd_p->control_status.control_sum_bits.ip_hsum
791 		    = NGE_SET;
792 	/* setting tcp checksum */
793 	if (sum_flag & HCK_FULLCKSUM)
794 		hw_sbd_p->control_status.control_sum_bits.tcp_hsum
795 		    = NGE_SET;
796 	/*
797 	 * indicating the end of BDs
798 	 */
799 	if (tfint)
800 		hw_sbd_p->control_status.control_sum_bits.inten = NGE_SET;
801 	if (end)
802 		hw_sbd_p->control_status.control_sum_bits.end = NGE_SET;
803 
804 	membar_producer();
805 
806 	/* pass desc to HW */
807 	hw_sbd_p->control_status.control_sum_bits.own = NGE_SET;
808 }
809