xref: /titanic_44/usr/src/uts/common/io/nge/nge_tx.c (revision 2a9459bdd821c1cf59590a7a9069ac9c591e8a6b)
1 /*
2  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
3  * Use is subject to license terms.
4  */
5 
6 /*
7  * This file may contain confidential information of Nvidia
8  * and should not be distributed in source form without approval
9  * from Sun Legal.
10  */
11 
12 #pragma ident	"%Z%%M%	%I%	%E% SMI"
13 
14 #include "nge.h"
15 
16 #define	TXD_OWN		0x80000000
17 #define	TXD_ERR		0x40000000
18 #define	TXD_END		0x20000000
19 #define	TXD_BCNT_MSK	0x00003FFF
20 
21 
22 #undef	NGE_DBG
23 #define	NGE_DBG		NGE_DBG_SEND
24 
25 #define	NGE_TXSWD_RECYCLE(sd)	{\
26 					(sd)->mp = NULL; \
27 					(sd)->frags = 0; \
28 					(sd)->mp_hndl.head = NULL; \
29 					(sd)->mp_hndl.tail = NULL; \
30 					(sd)->flags = HOST_OWN; \
31 				}
32 
33 
34 static size_t nge_tx_dmah_pop(nge_dmah_list_t *, nge_dmah_list_t *, size_t);
35 static void nge_tx_dmah_push(nge_dmah_list_t *, nge_dmah_list_t *);
36 
37 
38 void nge_tx_recycle_all(nge_t *ngep);
39 #pragma	no_inline(nge_tx_recycle_all)
40 
41 void
42 nge_tx_recycle_all(nge_t *ngep)
43 {
44 	send_ring_t *srp;
45 	sw_tx_sbd_t *ssbdp;
46 	nge_dmah_node_t	*dmah;
47 	uint32_t slot;
48 	uint32_t nslots;
49 
50 	srp = ngep->send;
51 	nslots = srp->desc.nslots;
52 
53 	for (slot = 0; slot < nslots; ++slot) {
54 
55 		ssbdp = srp->sw_sbds + slot;
56 
57 		DMA_ZERO(ssbdp->desc);
58 
59 		if (ssbdp->mp != NULL)	{
60 
61 			for (dmah = ssbdp->mp_hndl.head; dmah != NULL;
62 			    dmah = dmah->next)
63 				(void) ddi_dma_unbind_handle(dmah->hndl);
64 
65 			freemsg(ssbdp->mp);
66 		}
67 
68 		NGE_TXSWD_RECYCLE(ssbdp);
69 	}
70 }
71 
72 static size_t
73 nge_tx_dmah_pop(nge_dmah_list_t *src, nge_dmah_list_t *dst, size_t num)
74 {
75 	nge_dmah_node_t	*node;
76 
77 	for (node = src->head; node != NULL && --num != 0; node = node->next)
78 		;
79 
80 	if (num == 0)	{
81 
82 		dst->head = src->head;
83 		dst->tail = node;
84 
85 		if ((src->head = node->next) == NULL)
86 			src->tail = NULL;
87 
88 		node->next = NULL;
89 	}
90 
91 	return (num);
92 }
93 
94 static void
95 nge_tx_dmah_push(nge_dmah_list_t *src, nge_dmah_list_t *dst)
96 {
97 	if (dst->tail != NULL)
98 		dst->tail->next = src->head;
99 	else
100 		dst->head = src->head;
101 
102 	dst->tail = src->tail;
103 }
104 
105 static void
106 nge_tx_desc_sync(nge_t *ngep, uint64_t start, uint64_t num, uint_t type)
107 {
108 	send_ring_t *srp = ngep->send;
109 	const size_t txd_size = ngep->desc_attr.txd_size;
110 	const uint64_t end = srp->desc.nslots * txd_size;
111 
112 	start = start * txd_size;
113 	num = num * txd_size;
114 
115 	if (start + num <= end)
116 		(void) ddi_dma_sync(srp->desc.dma_hdl, start, num, type);
117 	else	{
118 
119 		(void) ddi_dma_sync(srp->desc.dma_hdl, start, 0, type);
120 		(void) ddi_dma_sync(srp->desc.dma_hdl, 0, start + num - end,
121 		    type);
122 	}
123 }
124 
125 /*
126  * Reclaim the resource after tx's completion
127  */
128 void
129 nge_tx_recycle(nge_t *ngep, boolean_t is_intr)
130 {
131 	int resched;
132 	uint32_t stflg;
133 	size_t len;
134 	uint64_t free;
135 	uint64_t slot;
136 	uint64_t used;
137 	uint64_t next;
138 	uint64_t nslots;
139 	mblk_t *mp;
140 	sw_tx_sbd_t *ssbdp;
141 	void *hw_sbd_p;
142 	send_ring_t *srp;
143 	nge_dmah_node_t *dme;
144 	nge_dmah_list_t dmah;
145 
146 	srp = ngep->send;
147 
148 	if (is_intr) {
149 		if (mutex_tryenter(srp->tc_lock) == 0)
150 			return;
151 	} else
152 		mutex_enter(srp->tc_lock);
153 	mutex_enter(srp->tx_lock);
154 
155 	next = srp->tx_next;
156 	used = srp->tx_flow;
157 	free = srp->tx_free;
158 
159 	mutex_exit(srp->tx_lock);
160 
161 	slot = srp->tc_next;
162 	nslots = srp->desc.nslots;
163 
164 	used = nslots - free - used;
165 
166 	ASSERT(slot == NEXT_INDEX(next, free, nslots));
167 
168 	if (used > srp->tx_hwmark)
169 		used = srp->tx_hwmark;
170 
171 	nge_tx_desc_sync(ngep, slot, used, DDI_DMA_SYNC_FORKERNEL);
172 
173 	/*
174 	 * Look through the send ring by bd's status part
175 	 * to find all the bds which has been transmitted sucessfully
176 	 * then reclaim all resouces associated with these bds
177 	 */
178 
179 	mp = NULL;
180 	dmah.head = NULL;
181 	dmah.tail = NULL;
182 
183 	for (free = 0; used-- != 0; slot = NEXT(slot, nslots), ++free)	{
184 
185 		ssbdp = &srp->sw_sbds[slot];
186 		hw_sbd_p = DMA_VPTR(ssbdp->desc);
187 
188 		stflg = ngep->desc_attr.txd_check(hw_sbd_p, &len);
189 
190 		if (ssbdp->flags == HOST_OWN || (TXD_OWN & stflg) != 0)
191 			break;
192 
193 		DMA_ZERO(ssbdp->desc);
194 
195 		if (ssbdp->mp != NULL)	{
196 			ssbdp->mp->b_next = mp;
197 			mp = ssbdp->mp;
198 
199 			if (ssbdp->mp_hndl.head != NULL)
200 				nge_tx_dmah_push(&ssbdp->mp_hndl, &dmah);
201 		}
202 
203 		NGE_TXSWD_RECYCLE(ssbdp);
204 	}
205 
206 	/*
207 	 * We're about to release one or more places :-)
208 	 * These ASSERTions check that our invariants still hold:
209 	 * there must always be at least one free place
210 	 * at this point, there must be at least one place NOT free
211 	 * we're not about to free more places than were claimed!
212 	 */
213 
214 	mutex_enter(srp->tx_lock);
215 
216 	srp->tx_free += free;
217 	ngep->watchdog = (srp->desc.nslots - srp->tx_free != 0);
218 
219 	srp->tc_next = slot;
220 
221 	ASSERT(srp->tx_free <= nslots);
222 	ASSERT(srp->tc_next == NEXT_INDEX(srp->tx_next, srp->tx_free, nslots));
223 
224 	resched = (ngep->resched_needed != 0 && srp->tx_hwmark <= srp->tx_free);
225 
226 	mutex_exit(srp->tx_lock);
227 	mutex_exit(srp->tc_lock);
228 
229 	/* unbind/free mblks */
230 
231 	for (dme = dmah.head; dme != NULL; dme = dme->next)
232 		(void) ddi_dma_unbind_handle(dme->hndl);
233 
234 	mutex_enter(&srp->dmah_lock);
235 	nge_tx_dmah_push(&dmah, &srp->dmah_free);
236 	mutex_exit(&srp->dmah_lock);
237 
238 	freemsgchain(mp);
239 
240 	/*
241 	 * up to this place, we maybe have reclaim some resouce
242 	 * if there is a requirement to report to gld, report this.
243 	 */
244 
245 	if (resched)
246 		(void) ddi_intr_trigger_softint(ngep->resched_hdl, NULL);
247 }
248 
249 static uint64_t
250 nge_tx_alloc(nge_t *ngep, uint64_t num)
251 {
252 	uint64_t start;
253 	send_ring_t *srp;
254 
255 	start = (uint64_t)-1;
256 	srp = ngep->send;
257 
258 	mutex_enter(srp->tx_lock);
259 
260 	if (srp->tx_free < srp->tx_lwmark)	{
261 
262 		mutex_exit(srp->tx_lock);
263 		nge_tx_recycle(ngep, B_FALSE);
264 		mutex_enter(srp->tx_lock);
265 	}
266 
267 	if (srp->tx_free >= num)	{
268 
269 		start = srp->tx_next;
270 
271 		srp->tx_next = NEXT_INDEX(start, num, srp->desc.nslots);
272 		srp->tx_free -= num;
273 		srp->tx_flow += num;
274 	}
275 
276 	mutex_exit(srp->tx_lock);
277 	return (start);
278 }
279 
280 static void
281 nge_tx_start(nge_t *ngep, uint64_t slotnum)
282 {
283 	nge_mode_cntl mode_cntl;
284 	send_ring_t *srp;
285 
286 	srp = ngep->send;
287 
288 	/*
289 	 * Because there can be multiple concurrent threads in
290 	 * transit through this code, we only want to notify the
291 	 * hardware once the last one is departing ...
292 	 */
293 
294 	mutex_enter(srp->tx_lock);
295 
296 	srp->tx_flow -= slotnum;
297 	if (srp->tx_flow == 0) {
298 
299 		/*
300 		 * Bump the watchdog counter, thus guaranteeing that it's
301 		 * nonzero (watchdog activated).  Note that non-synchonised
302 		 * access here means we may race with the reclaim() code
303 		 * above, but the outcome will be harmless.  At worst, the
304 		 * counter may not get reset on a partial reclaim; but the
305 		 * large trigger threshold makes false positives unlikely
306 		 */
307 		ngep->watchdog ++;
308 
309 		mode_cntl.mode_val = nge_reg_get32(ngep, NGE_MODE_CNTL);
310 		mode_cntl.mode_bits.txdm = NGE_SET;
311 		mode_cntl.mode_bits.tx_rcom_en = NGE_SET;
312 		nge_reg_put32(ngep, NGE_MODE_CNTL, mode_cntl.mode_val);
313 	}
314 	mutex_exit(srp->tx_lock);
315 }
316 
317 static enum send_status
318 nge_send_copy(nge_t *ngep, mblk_t *mp, send_ring_t *srp);
319 #pragma	inline(nge_send_copy)
320 
321 static enum send_status
322 nge_send_copy(nge_t *ngep, mblk_t *mp, send_ring_t *srp)
323 {
324 	size_t totlen;
325 	size_t mblen;
326 	uint32_t flags;
327 	uint64_t bds;
328 	uint64_t start_index;
329 	char *txb;
330 	mblk_t *bp;
331 	void *hw_sbd_p;
332 	sw_tx_sbd_t *ssbdp;
333 
334 	hcksum_retrieve(mp, NULL, NULL, NULL, NULL,
335 	    NULL, NULL, &flags);
336 	bds = 0x1;
337 
338 	if ((uint64_t)-1 == (start_index = nge_tx_alloc(ngep, bds)))
339 		return (SEND_COPY_FAIL);
340 
341 	ASSERT(start_index < srp->desc.nslots);
342 
343 	/*
344 	 * up to this point, there's nothing that can fail,
345 	 * so we can go straight to claiming our
346 	 * already-reserved place son the train.
347 	 *
348 	 * This is the point of no return!
349 	 */
350 
351 	bp = mp;
352 	totlen = 0;
353 	ssbdp = &srp->sw_sbds[start_index];
354 	ASSERT(ssbdp->flags == HOST_OWN);
355 
356 	txb = DMA_VPTR(ssbdp->pbuf);
357 	totlen = 0;
358 	for (; bp != NULL; bp = bp->b_cont) {
359 		if ((mblen = MBLKL(bp)) == 0)
360 			continue;
361 		if ((totlen += mblen) <= ngep->max_sdu) {
362 			bcopy(bp->b_rptr, txb, mblen);
363 			txb += mblen;
364 		}
365 	}
366 
367 	DMA_SYNC(ssbdp->pbuf, DDI_DMA_SYNC_FORDEV);
368 
369 	/* Fill & sync hw desc */
370 
371 	hw_sbd_p = DMA_VPTR(ssbdp->desc);
372 
373 	ngep->desc_attr.txd_fill(hw_sbd_p, &ssbdp->pbuf.cookie, totlen,
374 	    flags, B_TRUE);
375 	nge_tx_desc_sync(ngep, start_index, bds, DDI_DMA_SYNC_FORDEV);
376 
377 	ssbdp->flags = CONTROLER_OWN;
378 
379 	nge_tx_start(ngep, bds);
380 
381 	/*
382 	 * The return status indicates that the message can be freed
383 	 * right away, as we've already copied the contents ...
384 	 */
385 
386 	freemsg(mp);
387 	return (SEND_COPY_SUCESS);
388 }
389 
390 /*
391  * static enum send_status
392  * nge_send_mapped(nge_t *ngep, mblk_t *mp, size_t fragno);
393  * #pragma	inline(nge_send_mapped)
394  */
395 
396 static enum send_status
397 nge_send_mapped(nge_t *ngep, mblk_t *mp, size_t fragno)
398 {
399 	int err;
400 	boolean_t end;
401 	uint32_t i;
402 	uint32_t j;
403 	uint32_t ncookies;
404 	uint32_t slot;
405 	uint32_t nslots;
406 	uint32_t mblen;
407 	uint32_t flags;
408 	uint64_t start_index;
409 	uint64_t end_index;
410 	mblk_t *bp;
411 	void *hw_sbd_p;
412 	send_ring_t *srp;
413 	nge_dmah_node_t *dmah;
414 	nge_dmah_node_t	*dmer;
415 	nge_dmah_list_t dmah_list;
416 	ddi_dma_cookie_t cookie[NGE_MAX_COOKIES * NGE_MAP_FRAGS];
417 
418 	srp = ngep->send;
419 	nslots = srp->desc.nslots;
420 
421 	mutex_enter(&srp->dmah_lock);
422 	err = nge_tx_dmah_pop(&srp->dmah_free, &dmah_list, fragno);
423 	mutex_exit(&srp->dmah_lock);
424 
425 	if (err != 0)	{
426 
427 		return (SEND_MAP_FAIL);
428 	}
429 
430 	/*
431 	 * Pre-scan the message chain, noting the total number of bytes,
432 	 * the number of fragments by pre-doing dma addr bind
433 	 * if the fragment is larger than NGE_COPY_SIZE.
434 	 * This way has the following advantages:
435 	 * 1. Acquire the detailed information of resouce
436 	 *	need to send the message
437 	 *
438 	 * 2. If can not pre-apply enough resouce, fails  at once
439 	 *	and the driver will chose copy way to send out the
440 	 *	message
441 	 */
442 
443 	slot = 0;
444 	dmah = dmah_list.head;
445 
446 	hcksum_retrieve(mp, NULL, NULL, NULL, NULL, NULL, NULL, &flags);
447 
448 	for (bp = mp; bp != NULL; bp = bp->b_cont)	{
449 
450 		mblen = MBLKL(bp);
451 		if (mblen == 0)
452 			continue;
453 
454 		err = ddi_dma_addr_bind_handle(dmah->hndl,
455 		    NULL, (caddr_t)bp->b_rptr, mblen,
456 		    DDI_DMA_STREAMING | DDI_DMA_WRITE,
457 		    DDI_DMA_DONTWAIT, NULL, cookie + slot, &ncookies);
458 
459 		/*
460 		 * If there can not map successfully, it is uncessary
461 		 * sending the message by map way. Sending the message
462 		 * by copy way.
463 		 *
464 		 * By referring to intel's suggestion, it is better
465 		 * the number of cookies should be less than 4.
466 		 */
467 		if (err != DDI_DMA_MAPPED || ncookies > NGE_MAX_COOKIES) {
468 			NGE_DEBUG(("err(%x) map tx bulk fails"
469 			    " cookie(%x), ncookies(%x)",
470 			    err, cookie[slot].dmac_laddress, ncookies));
471 			goto map_fail;
472 		}
473 
474 		/*
475 		 * Check How many bds a cookie will consume
476 		 */
477 		for (end_index = slot + ncookies;
478 		    ++slot != end_index;
479 		    ddi_dma_nextcookie(dmah->hndl, cookie + slot))
480 			;
481 
482 		dmah = dmah->next;
483 	}
484 
485 	/*
486 	 * Now allocate tx descriptors and fill them
487 	 * IMPORTANT:
488 	 *	Up to the point where it claims a place, It is impossibel
489 	 * 	to fail.
490 	 *
491 	 * In this version, there's no setup to be done here, and there's
492 	 * nothing that can fail, so we can go straight to claiming our
493 	 * already-reserved places on the train.
494 	 *
495 	 * This is the point of no return!
496 	 */
497 
498 
499 	if ((uint64_t)-1 == (start_index = nge_tx_alloc(ngep, slot)))
500 		goto map_fail;
501 
502 	ASSERT(start_index < nslots);
503 
504 	/* fill&sync hw desc, going in reverse order */
505 
506 	end = B_TRUE;
507 	end_index = NEXT_INDEX(start_index, slot - 1, nslots);
508 
509 	for (i = slot - 1, j = end_index; start_index - j != 0;
510 	    j = PREV(j, nslots), --i)	{
511 
512 		hw_sbd_p = DMA_VPTR(srp->sw_sbds[j].desc);
513 		ngep->desc_attr.txd_fill(hw_sbd_p, cookie + i,
514 		    cookie[i].dmac_size, 0, end);
515 
516 		end = B_FALSE;
517 	}
518 
519 	hw_sbd_p = DMA_VPTR(srp->sw_sbds[j].desc);
520 	ngep->desc_attr.txd_fill(hw_sbd_p, cookie + i, cookie[i].dmac_size,
521 	    flags, end);
522 
523 	nge_tx_desc_sync(ngep, start_index, slot, DDI_DMA_SYNC_FORDEV);
524 
525 	/* fill sw desc */
526 
527 	for (j = start_index; end_index - j != 0; j = NEXT(j, nslots))	{
528 
529 		srp->sw_sbds[j].flags = CONTROLER_OWN;
530 	}
531 
532 	srp->sw_sbds[j].mp = mp;
533 	srp->sw_sbds[j].mp_hndl = dmah_list;
534 	srp->sw_sbds[j].frags = fragno;
535 	srp->sw_sbds[j].flags = CONTROLER_OWN;
536 
537 	nge_tx_start(ngep, slot);
538 
539 	/*
540 	 * The return status indicates that the message can not be freed
541 	 * right away, until we can make assure the message has been sent
542 	 * out sucessfully.
543 	 */
544 	return (SEND_MAP_SUCCESS);
545 
546 map_fail:
547 	for (dmer = dmah_list.head; dmah - dmer != 0; dmer = dmer->next)
548 		(void) ddi_dma_unbind_handle(dmer->hndl);
549 
550 	mutex_enter(&srp->dmah_lock);
551 	nge_tx_dmah_push(&dmah_list, &srp->dmah_free);
552 	mutex_exit(&srp->dmah_lock);
553 
554 	return (SEND_MAP_FAIL);
555 }
556 
557 static boolean_t
558 nge_send(nge_t *ngep, mblk_t *mp)
559 {
560 	mblk_t *bp;
561 	send_ring_t *srp;
562 	enum send_status status;
563 	uint32_t mblen = 0;
564 	uint32_t frags = 0;
565 	nge_statistics_t *nstp = &ngep->statistics;
566 	nge_sw_statistics_t *sw_stp = &nstp->sw_statistics;
567 
568 	ASSERT(mp != NULL);
569 	ASSERT(ngep->nge_mac_state == NGE_MAC_STARTED);
570 
571 	srp = ngep->send;
572 	/*
573 	 * 1.Check the number of the fragments of the messages
574 	 * If the total number is larger than 3,
575 	 * Chose copy way
576 	 *
577 	 * 2. Check the length of the message whether is larger than
578 	 * NGE_TX_COPY_SIZE, if so, choose the map way.
579 	 */
580 	for (frags = 0, bp = mp; bp != NULL; bp = bp->b_cont) {
581 		if (MBLKL(bp) == 0)
582 			continue;
583 		frags++;
584 		mblen += MBLKL(bp);
585 	}
586 	if (mblen > (ngep->max_sdu) || mblen == 0) {
587 		freemsg(mp);
588 		return (B_TRUE);
589 	}
590 
591 	if ((mblen > ngep->param_txbcopy_threshold) &&
592 	    (srp->tx_free > frags * NGE_MAX_COOKIES)) {
593 		status = nge_send_mapped(ngep, mp, frags);
594 		if (status == SEND_MAP_FAIL)
595 			status = nge_send_copy(ngep, mp, srp);
596 	} else {
597 		status = nge_send_copy(ngep, mp, srp);
598 	}
599 	if (status == SEND_COPY_FAIL) {
600 		nge_tx_recycle(ngep, B_FALSE);
601 		status = nge_send_copy(ngep, mp, srp);
602 		if (status == SEND_COPY_FAIL) {
603 			ngep->resched_needed = 1;
604 			NGE_DEBUG(("nge_send: send fail!"));
605 			return (B_FALSE);
606 		}
607 	}
608 	/* Update the software statistics */
609 	sw_stp->obytes += mblen + ETHERFCSL;
610 	sw_stp->xmit_count ++;
611 
612 	return (B_TRUE);
613 }
614 
615 /*
616  * nge_m_tx : Send a chain of packets.
617  */
618 mblk_t *
619 nge_m_tx(void *arg, mblk_t *mp)
620 {
621 	nge_t *ngep = arg;
622 	mblk_t *next;
623 
624 	rw_enter(ngep->rwlock, RW_READER);
625 	ASSERT(mp != NULL);
626 	if (ngep->nge_chip_state != NGE_CHIP_RUNNING) {
627 		freemsgchain(mp);
628 		mp = NULL;
629 	}
630 	while (mp != NULL) {
631 		next = mp->b_next;
632 		mp->b_next = NULL;
633 
634 		if (!nge_send(ngep, mp)) {
635 			mp->b_next = next;
636 			break;
637 		}
638 
639 		mp = next;
640 	}
641 	rw_exit(ngep->rwlock);
642 
643 	return (mp);
644 }
645 
646 /* ARGSUSED */
647 uint_t
648 nge_reschedule(caddr_t args1, caddr_t args2)
649 {
650 	nge_t *ngep;
651 	uint_t rslt;
652 
653 	ngep = (nge_t *)args1;
654 	rslt = DDI_INTR_UNCLAIMED;
655 
656 	/*
657 	 * when softintr is trigged, checking whether this
658 	 * is caused by our expected interrupt
659 	 */
660 	if (ngep->nge_mac_state == NGE_MAC_STARTED &&
661 	    ngep->resched_needed == 1) {
662 		ngep->resched_needed = 0;
663 		++ngep->statistics.sw_statistics.tx_resched;
664 		mac_tx_update(ngep->mh);
665 		rslt = DDI_INTR_CLAIMED;
666 	}
667 	return (rslt);
668 }
669 
670 uint32_t
671 nge_hot_txd_check(const void *hwd, size_t *len)
672 {
673 	uint32_t err_flag;
674 	const hot_tx_bd * htbdp;
675 
676 	htbdp = hwd;
677 	err_flag = htbdp->control_status.cntl_val & ~TXD_BCNT_MSK;
678 
679 	*len = htbdp->control_status.status_bits.bcnt;
680 	return (err_flag);
681 }
682 
683 uint32_t
684 nge_sum_txd_check(const void *hwd, size_t *len)
685 {
686 	uint32_t err_flag;
687 	const sum_tx_bd * htbdp;
688 
689 	htbdp = hwd;
690 	err_flag = htbdp->control_status.cntl_val & ~TXD_BCNT_MSK;
691 
692 	*len = htbdp->control_status.status_bits.bcnt;
693 	return (err_flag);
694 }
695 
696 
697 /*
698  * Filling the contents of Tx's data descriptor
699  * before transmitting.
700  */
701 
702 void
703 nge_hot_txd_fill(void *hwdesc, const ddi_dma_cookie_t *cookie,
704 	size_t length, uint32_t sum_flag, boolean_t end)
705 {
706 	hot_tx_bd * hw_sbd_p = hwdesc;
707 
708 	hw_sbd_p->host_buf_addr_hi = cookie->dmac_laddress >> 32;
709 	hw_sbd_p->host_buf_addr_lo = cookie->dmac_laddress;
710 
711 	/*
712 	 * Setting the length of the packet
713 	 * Note: the length filled in the part should be
714 	 * the original length subtract 1;
715 	 */
716 
717 	hw_sbd_p->control_status.control_sum_bits.bcnt = length - 1;
718 
719 	/* setting ip checksum */
720 	if (sum_flag & HCK_IPV4_HDRCKSUM)
721 		hw_sbd_p->control_status.control_sum_bits.ip_hsum
722 		    = NGE_SET;
723 	/* setting tcp checksum */
724 	if (sum_flag & HCK_FULLCKSUM)
725 		hw_sbd_p->control_status.control_sum_bits.tcp_hsum
726 		    = NGE_SET;
727 	/*
728 	 * indicating the end of BDs
729 	 */
730 	if (end)
731 		hw_sbd_p->control_status.control_sum_bits.end = NGE_SET;
732 
733 	membar_producer();
734 
735 	/* pass desc to HW */
736 	hw_sbd_p->control_status.control_sum_bits.own = NGE_SET;
737 }
738 
739 void
740 nge_sum_txd_fill(void *hwdesc, const ddi_dma_cookie_t *cookie,
741 	size_t length, uint32_t sum_flag, boolean_t end)
742 {
743 	sum_tx_bd * hw_sbd_p = hwdesc;
744 
745 	hw_sbd_p->host_buf_addr = cookie->dmac_address;
746 
747 	/*
748 	 * Setting the length of the packet
749 	 * Note: the length filled in the part should be
750 	 * the original length subtract 1;
751 	 */
752 
753 	hw_sbd_p->control_status.control_sum_bits.bcnt = length - 1;
754 
755 	/* setting ip checksum */
756 	if (sum_flag & HCK_IPV4_HDRCKSUM)
757 		hw_sbd_p->control_status.control_sum_bits.ip_hsum
758 		    = NGE_SET;
759 	/* setting tcp checksum */
760 	if (sum_flag & HCK_FULLCKSUM)
761 		hw_sbd_p->control_status.control_sum_bits.tcp_hsum
762 		    = NGE_SET;
763 	/*
764 	 * indicating the end of BDs
765 	 */
766 	if (end)
767 		hw_sbd_p->control_status.control_sum_bits.end = NGE_SET;
768 
769 	membar_producer();
770 
771 	/* pass desc to HW */
772 	hw_sbd_p->control_status.control_sum_bits.own = NGE_SET;
773 }
774