xref: /titanic_51/usr/src/uts/common/io/nge/nge_tx.c (revision 613a2f6ba31e891e3d947a356daf5e563d43c1ce)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include "nge.h"
28 
29 #define	TXD_OWN		0x80000000
30 #define	TXD_ERR		0x40000000
31 #define	TXD_END		0x20000000
32 #define	TXD_BCNT_MSK	0x00003FFF
33 
34 
35 #undef	NGE_DBG
36 #define	NGE_DBG		NGE_DBG_SEND
37 
38 #define	NGE_TXSWD_RECYCLE(sd)	{\
39 					(sd)->mp = NULL; \
40 					(sd)->frags = 0; \
41 					(sd)->mp_hndl.head = NULL; \
42 					(sd)->mp_hndl.tail = NULL; \
43 					(sd)->flags = HOST_OWN; \
44 				}
45 
46 
47 static size_t nge_tx_dmah_pop(nge_dmah_list_t *, nge_dmah_list_t *, size_t);
48 static void nge_tx_dmah_push(nge_dmah_list_t *, nge_dmah_list_t *);
49 
50 
51 void nge_tx_recycle_all(nge_t *ngep);
52 #pragma	no_inline(nge_tx_recycle_all)
53 
54 void
55 nge_tx_recycle_all(nge_t *ngep)
56 {
57 	send_ring_t *srp;
58 	sw_tx_sbd_t *ssbdp;
59 	nge_dmah_node_t	*dmah;
60 	uint32_t slot;
61 	uint32_t nslots;
62 
63 	srp = ngep->send;
64 	nslots = srp->desc.nslots;
65 
66 	for (slot = 0; slot < nslots; ++slot) {
67 
68 		ssbdp = srp->sw_sbds + slot;
69 
70 		DMA_ZERO(ssbdp->desc);
71 
72 		if (ssbdp->mp != NULL)	{
73 
74 			for (dmah = ssbdp->mp_hndl.head; dmah != NULL;
75 			    dmah = dmah->next)
76 				(void) ddi_dma_unbind_handle(dmah->hndl);
77 
78 			freemsg(ssbdp->mp);
79 		}
80 
81 		NGE_TXSWD_RECYCLE(ssbdp);
82 	}
83 }
84 
85 static size_t
86 nge_tx_dmah_pop(nge_dmah_list_t *src, nge_dmah_list_t *dst, size_t num)
87 {
88 	nge_dmah_node_t	*node;
89 
90 	for (node = src->head; node != NULL && --num != 0; node = node->next)
91 		;
92 
93 	if (num == 0)	{
94 
95 		dst->head = src->head;
96 		dst->tail = node;
97 
98 		if ((src->head = node->next) == NULL)
99 			src->tail = NULL;
100 
101 		node->next = NULL;
102 	}
103 
104 	return (num);
105 }
106 
107 static void
108 nge_tx_dmah_push(nge_dmah_list_t *src, nge_dmah_list_t *dst)
109 {
110 	if (dst->tail != NULL)
111 		dst->tail->next = src->head;
112 	else
113 		dst->head = src->head;
114 
115 	dst->tail = src->tail;
116 }
117 
118 static void
119 nge_tx_desc_sync(nge_t *ngep, uint32_t start_index, uint32_t bds, uint_t type)
120 {
121 	send_ring_t *srp = ngep->send;
122 	const size_t txd_size = ngep->desc_attr.txd_size;
123 	const uint64_t end = srp->desc.nslots * txd_size;
124 	uint64_t start;
125 	uint64_t num;
126 
127 	start = start_index * txd_size;
128 	num = bds * txd_size;
129 
130 	if (start + num <= end)
131 		(void) ddi_dma_sync(srp->desc.dma_hdl, start, num, type);
132 	else	{
133 
134 		(void) ddi_dma_sync(srp->desc.dma_hdl, start, 0, type);
135 		(void) ddi_dma_sync(srp->desc.dma_hdl, 0, start + num - end,
136 		    type);
137 	}
138 }
139 
140 /*
141  * Reclaim the resource after tx's completion
142  */
143 void
144 nge_tx_recycle(nge_t *ngep, boolean_t is_intr)
145 {
146 	int resched;
147 	uint32_t stflg;
148 	uint32_t free;
149 	uint32_t slot;
150 	uint32_t used;
151 	uint32_t next;
152 	uint32_t nslots;
153 	mblk_t *mp;
154 	sw_tx_sbd_t *ssbdp;
155 	void *hw_sbd_p;
156 	send_ring_t *srp;
157 	nge_dmah_node_t *dme;
158 	nge_dmah_list_t dmah;
159 
160 	srp = ngep->send;
161 
162 	if (is_intr) {
163 		if (mutex_tryenter(srp->tc_lock) == 0)
164 			return;
165 	} else
166 		mutex_enter(srp->tc_lock);
167 	mutex_enter(srp->tx_lock);
168 
169 	next = srp->tx_next;
170 	used = srp->tx_flow;
171 	free = srp->tx_free;
172 
173 	mutex_exit(srp->tx_lock);
174 
175 	slot = srp->tc_next;
176 	nslots = srp->desc.nslots;
177 
178 	used = nslots - free - used;
179 
180 	ASSERT(slot == NEXT_INDEX(next, free, nslots));
181 
182 	if (used > srp->tx_hwmark)
183 		used = srp->tx_hwmark;
184 
185 	nge_tx_desc_sync(ngep, slot, used, DDI_DMA_SYNC_FORKERNEL);
186 
187 	/*
188 	 * Look through the send ring by bd's status part
189 	 * to find all the bds which has been transmitted sucessfully
190 	 * then reclaim all resouces associated with these bds
191 	 */
192 
193 	mp = NULL;
194 	dmah.head = NULL;
195 	dmah.tail = NULL;
196 
197 	for (free = 0; used-- != 0; slot = NEXT(slot, nslots), ++free)	{
198 
199 		ssbdp = &srp->sw_sbds[slot];
200 		hw_sbd_p = DMA_VPTR(ssbdp->desc);
201 
202 		if (ssbdp->flags == HOST_OWN)
203 			break;
204 		stflg = ngep->desc_attr.txd_check(hw_sbd_p);
205 		if ((stflg & TXD_OWN) != 0)
206 			break;
207 		DMA_ZERO(ssbdp->desc);
208 		if (ssbdp->mp != NULL)	{
209 			ssbdp->mp->b_next = mp;
210 			mp = ssbdp->mp;
211 
212 			if (ssbdp->mp_hndl.head != NULL)
213 				nge_tx_dmah_push(&ssbdp->mp_hndl, &dmah);
214 		}
215 
216 		NGE_TXSWD_RECYCLE(ssbdp);
217 	}
218 
219 	/*
220 	 * We're about to release one or more places :-)
221 	 * These ASSERTions check that our invariants still hold:
222 	 * there must always be at least one free place
223 	 * at this point, there must be at least one place NOT free
224 	 * we're not about to free more places than were claimed!
225 	 */
226 
227 	mutex_enter(srp->tx_lock);
228 
229 	srp->tx_free += free;
230 	ngep->watchdog = (srp->desc.nslots - srp->tx_free != 0);
231 
232 	srp->tc_next = slot;
233 
234 	ASSERT(srp->tx_free <= nslots);
235 	ASSERT(srp->tc_next == NEXT_INDEX(srp->tx_next, srp->tx_free, nslots));
236 
237 	resched = (ngep->resched_needed != 0 && srp->tx_hwmark <= srp->tx_free);
238 
239 	mutex_exit(srp->tx_lock);
240 	mutex_exit(srp->tc_lock);
241 
242 	/* unbind/free mblks */
243 
244 	for (dme = dmah.head; dme != NULL; dme = dme->next)
245 		(void) ddi_dma_unbind_handle(dme->hndl);
246 	if (dmah.head != NULL) {
247 		mutex_enter(&srp->dmah_lock);
248 		nge_tx_dmah_push(&dmah, &srp->dmah_free);
249 		mutex_exit(&srp->dmah_lock);
250 	}
251 	freemsgchain(mp);
252 
253 	/*
254 	 * up to this place, we maybe have reclaim some resouce
255 	 * if there is a requirement to report to gld, report this.
256 	 */
257 
258 	if (resched)
259 		(void) ddi_intr_trigger_softint(ngep->resched_hdl, NULL);
260 }
261 
262 static uint32_t
263 nge_tx_alloc(nge_t *ngep, uint32_t num)
264 {
265 	uint32_t start;
266 	send_ring_t *srp;
267 
268 	start = (uint32_t)-1;
269 	srp = ngep->send;
270 
271 	mutex_enter(srp->tx_lock);
272 
273 	if (srp->tx_free < srp->tx_lwmark)	{
274 
275 		mutex_exit(srp->tx_lock);
276 		nge_tx_recycle(ngep, B_FALSE);
277 		mutex_enter(srp->tx_lock);
278 	}
279 
280 	if (srp->tx_free >= num)	{
281 
282 		start = srp->tx_next;
283 
284 		srp->tx_next = NEXT_INDEX(start, num, srp->desc.nslots);
285 		srp->tx_free -= num;
286 		srp->tx_flow += num;
287 	}
288 
289 	mutex_exit(srp->tx_lock);
290 	return (start);
291 }
292 
293 static void
294 nge_tx_start(nge_t *ngep, uint32_t slotnum)
295 {
296 	nge_mode_cntl mode_cntl;
297 	send_ring_t *srp;
298 
299 	srp = ngep->send;
300 
301 	/*
302 	 * Because there can be multiple concurrent threads in
303 	 * transit through this code, we only want to notify the
304 	 * hardware once the last one is departing ...
305 	 */
306 
307 	mutex_enter(srp->tx_lock);
308 
309 	srp->tx_flow -= slotnum;
310 	if (srp->tx_flow == 0) {
311 
312 		/*
313 		 * Bump the watchdog counter, thus guaranteeing that it's
314 		 * nonzero (watchdog activated).  Note that non-synchonised
315 		 * access here means we may race with the reclaim() code
316 		 * above, but the outcome will be harmless.  At worst, the
317 		 * counter may not get reset on a partial reclaim; but the
318 		 * large trigger threshold makes false positives unlikely
319 		 */
320 		ngep->watchdog ++;
321 
322 		mode_cntl.mode_val = nge_reg_get32(ngep, NGE_MODE_CNTL);
323 		mode_cntl.mode_bits.txdm = NGE_SET;
324 		mode_cntl.mode_bits.tx_rcom_en = NGE_SET;
325 		nge_reg_put32(ngep, NGE_MODE_CNTL, mode_cntl.mode_val);
326 	}
327 	mutex_exit(srp->tx_lock);
328 }
329 
330 static enum send_status
331 nge_send_copy(nge_t *ngep, mblk_t *mp, send_ring_t *srp);
332 #pragma	inline(nge_send_copy)
333 
334 static enum send_status
335 nge_send_copy(nge_t *ngep, mblk_t *mp, send_ring_t *srp)
336 {
337 	size_t totlen;
338 	size_t mblen;
339 	uint32_t flags;
340 	uint32_t bds;
341 	uint32_t start_index;
342 	char *txb;
343 	mblk_t *bp;
344 	void *hw_sbd_p;
345 	sw_tx_sbd_t *ssbdp;
346 
347 	hcksum_retrieve(mp, NULL, NULL, NULL, NULL,
348 	    NULL, NULL, &flags);
349 	bds = 0x1;
350 
351 	if ((uint32_t)-1 == (start_index = nge_tx_alloc(ngep, bds)))
352 		return (SEND_COPY_FAIL);
353 
354 	ASSERT(start_index < srp->desc.nslots);
355 
356 	/*
357 	 * up to this point, there's nothing that can fail,
358 	 * so we can go straight to claiming our
359 	 * already-reserved place son the train.
360 	 *
361 	 * This is the point of no return!
362 	 */
363 
364 	bp = mp;
365 	totlen = 0;
366 	ssbdp = &srp->sw_sbds[start_index];
367 	ASSERT(ssbdp->flags == HOST_OWN);
368 
369 	txb = DMA_VPTR(ssbdp->pbuf);
370 	totlen = 0;
371 	for (; bp != NULL; bp = bp->b_cont) {
372 		if ((mblen = MBLKL(bp)) == 0)
373 			continue;
374 		if ((totlen += mblen) <= ngep->max_sdu) {
375 			bcopy(bp->b_rptr, txb, mblen);
376 			txb += mblen;
377 		}
378 	}
379 
380 	DMA_SYNC(ssbdp->pbuf, DDI_DMA_SYNC_FORDEV);
381 
382 	/* Fill & sync hw desc */
383 
384 	hw_sbd_p = DMA_VPTR(ssbdp->desc);
385 
386 	ngep->desc_attr.txd_fill(hw_sbd_p, &ssbdp->pbuf.cookie, totlen,
387 	    flags, B_TRUE);
388 	nge_tx_desc_sync(ngep, start_index, bds, DDI_DMA_SYNC_FORDEV);
389 
390 	ssbdp->flags = CONTROLER_OWN;
391 
392 	nge_tx_start(ngep, bds);
393 
394 	/*
395 	 * The return status indicates that the message can be freed
396 	 * right away, as we've already copied the contents ...
397 	 */
398 
399 	freemsg(mp);
400 	return (SEND_COPY_SUCESS);
401 }
402 
403 /*
404  * static enum send_status
405  * nge_send_mapped(nge_t *ngep, mblk_t *mp, size_t fragno);
406  * #pragma	inline(nge_send_mapped)
407  */
408 
409 static enum send_status
410 nge_send_mapped(nge_t *ngep, mblk_t *mp, size_t fragno)
411 {
412 	int err;
413 	boolean_t end;
414 	uint32_t i;
415 	uint32_t j;
416 	uint32_t ncookies;
417 	uint32_t slot;
418 	uint32_t nslots;
419 	uint32_t mblen;
420 	uint32_t flags;
421 	uint32_t start_index;
422 	uint32_t end_index;
423 	mblk_t *bp;
424 	void *hw_sbd_p;
425 	send_ring_t *srp;
426 	nge_dmah_node_t *dmah;
427 	nge_dmah_node_t	*dmer;
428 	nge_dmah_list_t dmah_list;
429 	ddi_dma_cookie_t cookie[NGE_MAX_COOKIES * NGE_MAP_FRAGS];
430 
431 	srp = ngep->send;
432 	nslots = srp->desc.nslots;
433 
434 	mutex_enter(&srp->dmah_lock);
435 	err = nge_tx_dmah_pop(&srp->dmah_free, &dmah_list, fragno);
436 	mutex_exit(&srp->dmah_lock);
437 
438 	if (err != 0)	{
439 
440 		return (SEND_MAP_FAIL);
441 	}
442 
443 	/*
444 	 * Pre-scan the message chain, noting the total number of bytes,
445 	 * the number of fragments by pre-doing dma addr bind
446 	 * if the fragment is larger than NGE_COPY_SIZE.
447 	 * This way has the following advantages:
448 	 * 1. Acquire the detailed information of resouce
449 	 *	need to send the message
450 	 *
451 	 * 2. If can not pre-apply enough resouce, fails  at once
452 	 *	and the driver will chose copy way to send out the
453 	 *	message
454 	 */
455 
456 	slot = 0;
457 	dmah = dmah_list.head;
458 
459 	hcksum_retrieve(mp, NULL, NULL, NULL, NULL, NULL, NULL, &flags);
460 
461 	for (bp = mp; bp != NULL; bp = bp->b_cont)	{
462 
463 		mblen = MBLKL(bp);
464 		if (mblen == 0)
465 			continue;
466 
467 		err = ddi_dma_addr_bind_handle(dmah->hndl,
468 		    NULL, (caddr_t)bp->b_rptr, mblen,
469 		    DDI_DMA_STREAMING | DDI_DMA_WRITE,
470 		    DDI_DMA_DONTWAIT, NULL, cookie + slot, &ncookies);
471 
472 		/*
473 		 * If there can not map successfully, it is uncessary
474 		 * sending the message by map way. Sending the message
475 		 * by copy way.
476 		 *
477 		 * By referring to intel's suggestion, it is better
478 		 * the number of cookies should be less than 4.
479 		 */
480 		if (err != DDI_DMA_MAPPED || ncookies > NGE_MAX_COOKIES) {
481 			NGE_DEBUG(("err(%x) map tx bulk fails"
482 			    " cookie(%x), ncookies(%x)",
483 			    err, cookie[slot].dmac_laddress, ncookies));
484 			goto map_fail;
485 		}
486 
487 		/*
488 		 * Check How many bds a cookie will consume
489 		 */
490 		for (end_index = slot + ncookies;
491 		    ++slot != end_index;
492 		    ddi_dma_nextcookie(dmah->hndl, cookie + slot))
493 			;
494 
495 		dmah = dmah->next;
496 	}
497 
498 	/*
499 	 * Now allocate tx descriptors and fill them
500 	 * IMPORTANT:
501 	 *	Up to the point where it claims a place, It is impossibel
502 	 * 	to fail.
503 	 *
504 	 * In this version, there's no setup to be done here, and there's
505 	 * nothing that can fail, so we can go straight to claiming our
506 	 * already-reserved places on the train.
507 	 *
508 	 * This is the point of no return!
509 	 */
510 
511 
512 	if ((uint32_t)-1 == (start_index = nge_tx_alloc(ngep, slot)))
513 		goto map_fail;
514 
515 	ASSERT(start_index < nslots);
516 
517 	/* fill&sync hw desc, going in reverse order */
518 
519 	end = B_TRUE;
520 	end_index = NEXT_INDEX(start_index, slot - 1, nslots);
521 
522 	for (i = slot - 1, j = end_index; start_index - j != 0;
523 	    j = PREV(j, nslots), --i)	{
524 
525 		hw_sbd_p = DMA_VPTR(srp->sw_sbds[j].desc);
526 		ngep->desc_attr.txd_fill(hw_sbd_p, cookie + i,
527 		    cookie[i].dmac_size, 0, end);
528 
529 		end = B_FALSE;
530 	}
531 
532 	hw_sbd_p = DMA_VPTR(srp->sw_sbds[j].desc);
533 	ngep->desc_attr.txd_fill(hw_sbd_p, cookie + i, cookie[i].dmac_size,
534 	    flags, end);
535 
536 	nge_tx_desc_sync(ngep, start_index, slot, DDI_DMA_SYNC_FORDEV);
537 
538 	/* fill sw desc */
539 
540 	for (j = start_index; end_index - j != 0; j = NEXT(j, nslots)) {
541 
542 		srp->sw_sbds[j].flags = CONTROLER_OWN;
543 	}
544 
545 	srp->sw_sbds[j].mp = mp;
546 	srp->sw_sbds[j].mp_hndl = dmah_list;
547 	srp->sw_sbds[j].frags = (uint32_t)fragno;
548 	srp->sw_sbds[j].flags = CONTROLER_OWN;
549 
550 	nge_tx_start(ngep, slot);
551 
552 	/*
553 	 * The return status indicates that the message can not be freed
554 	 * right away, until we can make assure the message has been sent
555 	 * out sucessfully.
556 	 */
557 	return (SEND_MAP_SUCCESS);
558 
559 map_fail:
560 	for (dmer = dmah_list.head; dmah - dmer != 0; dmer = dmer->next)
561 		(void) ddi_dma_unbind_handle(dmer->hndl);
562 
563 	mutex_enter(&srp->dmah_lock);
564 	nge_tx_dmah_push(&dmah_list, &srp->dmah_free);
565 	mutex_exit(&srp->dmah_lock);
566 
567 	return (SEND_MAP_FAIL);
568 }
569 
570 static boolean_t
571 nge_send(nge_t *ngep, mblk_t *mp)
572 {
573 	mblk_t *bp;
574 	send_ring_t *srp;
575 	enum send_status status;
576 	uint32_t mblen = 0;
577 	uint32_t frags = 0;
578 	nge_statistics_t *nstp = &ngep->statistics;
579 	nge_sw_statistics_t *sw_stp = &nstp->sw_statistics;
580 
581 	ASSERT(mp != NULL);
582 	ASSERT(ngep->nge_mac_state == NGE_MAC_STARTED);
583 
584 	srp = ngep->send;
585 	/*
586 	 * 1.Check the number of the fragments of the messages
587 	 * If the total number is larger than 3,
588 	 * Chose copy way
589 	 *
590 	 * 2. Check the length of the message whether is larger than
591 	 * NGE_TX_COPY_SIZE, if so, choose the map way.
592 	 */
593 	for (frags = 0, bp = mp; bp != NULL; bp = bp->b_cont) {
594 		if (MBLKL(bp) == 0)
595 			continue;
596 		frags++;
597 		mblen += MBLKL(bp);
598 	}
599 	if (mblen > (ngep->max_sdu) || mblen == 0) {
600 		freemsg(mp);
601 		return (B_TRUE);
602 	}
603 	if ((mblen > ngep->param_txbcopy_threshold) &&
604 	    (frags <= NGE_MAP_FRAGS) &&
605 	    (srp->tx_free > frags * NGE_MAX_COOKIES)) {
606 		status = nge_send_mapped(ngep, mp, frags);
607 		if (status == SEND_MAP_FAIL)
608 			status = nge_send_copy(ngep, mp, srp);
609 	} else {
610 		status = nge_send_copy(ngep, mp, srp);
611 	}
612 	if (status == SEND_COPY_FAIL) {
613 		nge_tx_recycle(ngep, B_FALSE);
614 		status = nge_send_copy(ngep, mp, srp);
615 		if (status == SEND_COPY_FAIL) {
616 			ngep->resched_needed = 1;
617 			NGE_DEBUG(("nge_send: send fail!"));
618 			return (B_FALSE);
619 		}
620 	}
621 	/* Update the software statistics */
622 	sw_stp->obytes += mblen + ETHERFCSL;
623 	sw_stp->xmit_count ++;
624 
625 	return (B_TRUE);
626 }
627 
628 /*
629  * nge_m_tx : Send a chain of packets.
630  */
631 mblk_t *
632 nge_m_tx(void *arg, mblk_t *mp)
633 {
634 	nge_t *ngep = arg;
635 	mblk_t *next;
636 
637 	rw_enter(ngep->rwlock, RW_READER);
638 	ASSERT(mp != NULL);
639 	if (ngep->nge_chip_state != NGE_CHIP_RUNNING) {
640 		freemsgchain(mp);
641 		mp = NULL;
642 	}
643 	while (mp != NULL) {
644 		next = mp->b_next;
645 		mp->b_next = NULL;
646 
647 		if (!nge_send(ngep, mp)) {
648 			mp->b_next = next;
649 			break;
650 		}
651 
652 		mp = next;
653 	}
654 	rw_exit(ngep->rwlock);
655 
656 	return (mp);
657 }
658 
659 /* ARGSUSED */
660 uint_t
661 nge_reschedule(caddr_t args1, caddr_t args2)
662 {
663 	nge_t *ngep;
664 	uint_t rslt;
665 
666 	ngep = (nge_t *)args1;
667 	rslt = DDI_INTR_UNCLAIMED;
668 
669 	/*
670 	 * when softintr is trigged, checking whether this
671 	 * is caused by our expected interrupt
672 	 */
673 	if (ngep->nge_mac_state == NGE_MAC_STARTED &&
674 	    ngep->resched_needed == 1) {
675 		ngep->resched_needed = 0;
676 		++ngep->statistics.sw_statistics.tx_resched;
677 		mac_tx_update(ngep->mh);
678 		rslt = DDI_INTR_CLAIMED;
679 	}
680 	return (rslt);
681 }
682 
683 uint32_t
684 nge_hot_txd_check(const void *hwd)
685 {
686 	uint32_t err_flag;
687 	const hot_tx_bd * htbdp;
688 
689 	htbdp = hwd;
690 	err_flag = htbdp->control_status.cntl_val;
691 	return (err_flag);
692 }
693 
694 uint32_t
695 nge_sum_txd_check(const void *hwd)
696 {
697 	uint32_t err_flag;
698 	const sum_tx_bd * htbdp;
699 
700 	htbdp = hwd;
701 	err_flag = htbdp->control_status.cntl_val;
702 	return (err_flag);
703 }
704 
705 
706 /*
707  * Filling the contents of Tx's data descriptor
708  * before transmitting.
709  */
710 
711 void
712 nge_hot_txd_fill(void *hwdesc, const ddi_dma_cookie_t *cookie,
713 	size_t length, uint32_t sum_flag, boolean_t end)
714 {
715 	hot_tx_bd * hw_sbd_p = hwdesc;
716 
717 	hw_sbd_p->host_buf_addr_hi = cookie->dmac_laddress >> 32;
718 	hw_sbd_p->host_buf_addr_lo = cookie->dmac_laddress;
719 
720 	/*
721 	 * Setting the length of the packet
722 	 * Note: the length filled in the part should be
723 	 * the original length subtract 1;
724 	 */
725 
726 	hw_sbd_p->control_status.control_sum_bits.bcnt = length - 1;
727 
728 	/* setting ip checksum */
729 	if (sum_flag & HCK_IPV4_HDRCKSUM)
730 		hw_sbd_p->control_status.control_sum_bits.ip_hsum
731 		    = NGE_SET;
732 	/* setting tcp checksum */
733 	if (sum_flag & HCK_FULLCKSUM)
734 		hw_sbd_p->control_status.control_sum_bits.tcp_hsum
735 		    = NGE_SET;
736 	/*
737 	 * indicating the end of BDs
738 	 */
739 	if (end)
740 		hw_sbd_p->control_status.control_sum_bits.end = NGE_SET;
741 
742 	membar_producer();
743 
744 	/* pass desc to HW */
745 	hw_sbd_p->control_status.control_sum_bits.own = NGE_SET;
746 }
747 
748 void
749 nge_sum_txd_fill(void *hwdesc, const ddi_dma_cookie_t *cookie,
750 	size_t length, uint32_t sum_flag, boolean_t end)
751 {
752 	sum_tx_bd * hw_sbd_p = hwdesc;
753 
754 	hw_sbd_p->host_buf_addr = cookie->dmac_address;
755 
756 	/*
757 	 * Setting the length of the packet
758 	 * Note: the length filled in the part should be
759 	 * the original length subtract 1;
760 	 */
761 
762 	hw_sbd_p->control_status.control_sum_bits.bcnt = length - 1;
763 
764 	/* setting ip checksum */
765 	if (sum_flag & HCK_IPV4_HDRCKSUM)
766 		hw_sbd_p->control_status.control_sum_bits.ip_hsum
767 		    = NGE_SET;
768 	/* setting tcp checksum */
769 	if (sum_flag & HCK_FULLCKSUM)
770 		hw_sbd_p->control_status.control_sum_bits.tcp_hsum
771 		    = NGE_SET;
772 	/*
773 	 * indicating the end of BDs
774 	 */
775 	if (end)
776 		hw_sbd_p->control_status.control_sum_bits.end = NGE_SET;
777 
778 	membar_producer();
779 
780 	/* pass desc to HW */
781 	hw_sbd_p->control_status.control_sum_bits.own = NGE_SET;
782 }
783