xref: /illumos-gate/usr/src/uts/common/io/rge/rge_rxtx.c (revision 4de2612967d06c4fdbf524a62556a1e8118a006f)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include "rge.h"
30 
31 #define	U32TOPTR(x)	((void *)(uintptr_t)(uint32_t)(x))
32 #define	PTRTOU32(x)	((uint32_t)(uintptr_t)(void *)(x))
33 
34 /*
35  * ========== RX side routines ==========
36  */
37 
38 #define	RGE_DBG		RGE_DBG_RECV	/* debug flag for this code	*/
39 
40 static uint32_t rge_atomic_reserve(uint32_t *count_p, uint32_t n);
41 #pragma	inline(rge_atomic_reserve)
42 
43 static uint32_t
44 rge_atomic_reserve(uint32_t *count_p, uint32_t n)
45 {
46 	uint32_t oldval;
47 	uint32_t newval;
48 
49 	/* ATOMICALLY */
50 	do {
51 		oldval = *count_p;
52 		newval = oldval - n;
53 		if (oldval <= n)
54 			return (0);		/* no resources left	*/
55 	} while (cas32(count_p, oldval, newval) != oldval);
56 
57 	return (newval);
58 }
59 
60 /*
61  * Atomically increment a counter
62  */
63 static void rge_atomic_renounce(uint32_t *count_p, uint32_t n);
64 #pragma	inline(rge_atomic_renounce)
65 
66 static void
67 rge_atomic_renounce(uint32_t *count_p, uint32_t n)
68 {
69 	uint32_t oldval;
70 	uint32_t newval;
71 
72 	/* ATOMICALLY */
73 	do {
74 		oldval = *count_p;
75 		newval = oldval + n;
76 	} while (cas32(count_p, oldval, newval) != oldval);
77 }
78 
79 /*
80  * Callback code invoked from STREAMs when the recv data buffer is free
81  * for recycling.
82  */
83 
84 void
85 rge_rx_recycle(caddr_t arg)
86 {
87 	rge_t *rgep;
88 	dma_buf_t *rx_buf;
89 	sw_rbd_t *free_rbdp;
90 	uint32_t slot_recy;
91 
92 	rx_buf = (dma_buf_t *)arg;
93 	rgep = (rge_t *)rx_buf->private;
94 
95 	/*
96 	 * If rge_unattach() is called, this callback function will also
97 	 * be called when we try to free the mp in rge_fini_rings().
98 	 * In such situation, we needn't do below desballoc(), otherwise,
99 	 * there'll be memory leak.
100 	 */
101 	if (rgep->rge_mac_state == RGE_MAC_UNATTACH)
102 		return;
103 
104 	/*
105 	 * Recycle the data buffer again
106 	 * and fill them in free ring
107 	 */
108 	rx_buf->mp = desballoc(DMA_VPTR(rx_buf->pbuf),
109 	    rgep->rxbuf_size, 0, &rx_buf->rx_recycle);
110 	if (rx_buf->mp == NULL) {
111 		rge_problem(rgep, "rge_rx_recycle: desballoc() failed");
112 		return;
113 	}
114 	mutex_enter(rgep->rc_lock);
115 	slot_recy = rgep->rc_next;
116 	free_rbdp = &rgep->free_rbds[slot_recy];
117 	if (free_rbdp->rx_buf == NULL) {
118 		free_rbdp->rx_buf = rx_buf;
119 		rgep->rc_next = NEXT(slot_recy, RGE_BUF_SLOTS);
120 		rge_atomic_renounce(&rgep->rx_free, 1);
121 		if (rgep->rx_bcopy && rgep->rx_free == RGE_BUF_SLOTS)
122 			rgep->rx_bcopy = B_FALSE;
123 		ASSERT(rgep->rx_free <= RGE_BUF_SLOTS);
124 	} else {
125 		/*
126 		 * This situation shouldn't happen
127 		 */
128 		rge_problem(rgep, "rge_rx_recycle: buffer %d recycle error",
129 		    slot_recy);
130 		rgep->stats.recycle_err++;
131 	}
132 	mutex_exit(rgep->rc_lock);
133 }
134 
135 static int rge_rx_refill(rge_t *rgep, uint32_t slot);
136 #pragma	inline(rge_rx_refill)
137 
138 static int
139 rge_rx_refill(rge_t *rgep, uint32_t slot)
140 {
141 	dma_buf_t *free_buf;
142 	rge_bd_t *hw_rbd_p;
143 	sw_rbd_t *srbdp;
144 	uint32_t free_slot;
145 
146 	srbdp = &rgep->sw_rbds[slot];
147 	hw_rbd_p = &rgep->rx_ring[slot];
148 	free_slot = rgep->rf_next;
149 	free_buf = rgep->free_rbds[free_slot].rx_buf;
150 	if (free_buf != NULL) {
151 		srbdp->rx_buf = free_buf;
152 		rgep->free_rbds[free_slot].rx_buf = NULL;
153 		hw_rbd_p->host_buf_addr = RGE_BSWAP_32(RGE_HEADROOM +
154 		    + free_buf->pbuf.cookie.dmac_laddress);
155 		hw_rbd_p->host_buf_addr_hi =
156 		    RGE_BSWAP_32(free_buf->pbuf.cookie.dmac_laddress >> 32);
157 		rgep->rf_next = NEXT(free_slot, RGE_BUF_SLOTS);
158 		return (1);
159 	} else {
160 		/*
161 		 * This situation shouldn't happen
162 		 */
163 		rge_problem(rgep, "rge_rx_refill: free buffer %d is NULL",
164 		    free_slot);
165 		rgep->rx_bcopy = B_TRUE;
166 		return (0);
167 	}
168 }
169 
170 static mblk_t *rge_receive_packet(rge_t *rgep, uint32_t slot);
171 #pragma	inline(rge_receive_packet)
172 
173 static mblk_t *
174 rge_receive_packet(rge_t *rgep, uint32_t slot)
175 {
176 	rge_bd_t *hw_rbd_p;
177 	sw_rbd_t *srbdp;
178 	uchar_t *dp;
179 	mblk_t *mp;
180 	uint8_t *rx_ptr;
181 	uint32_t rx_status;
182 	uint_t packet_len;
183 	uint_t minsize;
184 	uint_t maxsize;
185 	uint32_t proto;
186 	uint32_t pflags;
187 	struct ether_vlan_header *ehp;
188 	uint16_t vtag = 0;
189 
190 	hw_rbd_p = &rgep->rx_ring[slot];
191 	srbdp = &rgep->sw_rbds[slot];
192 	packet_len = RGE_BSWAP_32(hw_rbd_p->flags_len) & RBD_LEN_MASK;
193 
194 	/*
195 	 * Read receive status
196 	 */
197 	rx_status = RGE_BSWAP_32(hw_rbd_p->flags_len) & RBD_FLAGS_MASK;
198 
199 	/*
200 	 * Handle error packet
201 	 */
202 	if (!(rx_status & BD_FLAG_PKT_END)) {
203 		RGE_DEBUG(("rge_receive_packet: not a complete packat"));
204 		return (NULL);
205 	}
206 	if (rx_status & RBD_FLAG_ERROR) {
207 		if (rx_status & RBD_FLAG_CRC_ERR)
208 			rgep->stats.crc_err++;
209 		if (rx_status & RBD_FLAG_RUNT)
210 			rgep->stats.in_short++;
211 		/*
212 		 * Set chip_error flag to reset chip:
213 		 * (suggested in Realtek programming guide.)
214 		 */
215 		RGE_DEBUG(("rge_receive_packet: error packet, status = %x",
216 		    rx_status));
217 		mutex_enter(rgep->genlock);
218 		rgep->rge_chip_state = RGE_CHIP_ERROR;
219 		mutex_exit(rgep->genlock);
220 		return (NULL);
221 	}
222 
223 	/*
224 	 * Handle size error packet
225 	 */
226 	minsize = ETHERMIN  - VLAN_TAGSZ + ETHERFCSL;
227 	maxsize = rgep->ethmax_size + ETHERFCSL;
228 
229 	if (packet_len < minsize || packet_len > maxsize) {
230 		RGE_DEBUG(("rge_receive_packet: len err = %d", packet_len));
231 		return (NULL);
232 	}
233 
234 	DMA_SYNC(srbdp->rx_buf->pbuf, DDI_DMA_SYNC_FORKERNEL);
235 	if (packet_len <= RGE_RECV_COPY_SIZE || rgep->rx_bcopy ||
236 	    !rge_atomic_reserve(&rgep->rx_free, 1)) {
237 		/*
238 		 * Allocate buffer to receive this good packet
239 		 */
240 		mp = allocb(packet_len + RGE_HEADROOM, 0);
241 		if (mp == NULL) {
242 			RGE_DEBUG(("rge_receive_packet: allocate buffer fail"));
243 			rgep->stats.no_rcvbuf++;
244 			return (NULL);
245 		}
246 
247 		/*
248 		 * Copy the data found into the new cluster
249 		 */
250 		rx_ptr = DMA_VPTR(srbdp->rx_buf->pbuf);
251 		mp->b_rptr = dp = mp->b_rptr + RGE_HEADROOM;
252 		bcopy(rx_ptr + RGE_HEADROOM, dp, packet_len);
253 		mp->b_wptr = dp + packet_len - ETHERFCSL;
254 	} else {
255 		mp = srbdp->rx_buf->mp;
256 		mp->b_rptr += RGE_HEADROOM;
257 		mp->b_wptr = mp->b_rptr + packet_len - ETHERFCSL;
258 		mp->b_next = mp->b_cont = NULL;
259 		/*
260 		 * Refill the current receive bd buffer
261 		 *   if fails, will just keep the mp.
262 		 */
263 		if (!rge_rx_refill(rgep, slot))
264 			return (NULL);
265 	}
266 	rgep->stats.rbytes += packet_len;
267 
268 	/*
269 	 * VLAN packet ?
270 	 */
271 	pflags = RGE_BSWAP_32(hw_rbd_p->vlan_tag);
272 	if (pflags & RBD_VLAN_PKT)
273 		vtag = pflags & RBD_VLAN_TAG;
274 	if (vtag) {
275 		vtag = TCI_CHIP2OS(vtag);
276 		/*
277 		 * As h/w strips the VLAN tag from incoming packet, we need
278 		 * insert VLAN tag into this packet before send up here.
279 		 */
280 		(void) memmove(mp->b_rptr - VLAN_TAGSZ, mp->b_rptr,
281 		    2 * ETHERADDRL);
282 		mp->b_rptr -= VLAN_TAGSZ;
283 		ehp = (struct ether_vlan_header *)mp->b_rptr;
284 		ehp->ether_tpid = htons(VLAN_TPID);
285 		ehp->ether_tci = htons(vtag);
286 	}
287 
288 	/*
289 	 * Check h/w checksum offload status
290 	 */
291 	pflags = 0;
292 	proto = rx_status & RBD_FLAG_PROTOCOL;
293 	if ((proto == RBD_FLAG_TCP && !(rx_status & RBD_TCP_CKSUM_ERR)) ||
294 	    (proto == RBD_FLAG_UDP && !(rx_status & RBD_UDP_CKSUM_ERR)))
295 		pflags |= HCK_FULLCKSUM | HCK_FULLCKSUM_OK;
296 	if (proto != RBD_FLAG_NONE_IP && !(rx_status & RBD_IP_CKSUM_ERR))
297 		pflags |= HCK_IPV4_HDRCKSUM;
298 	if (pflags != 0)  {
299 		(void) hcksum_assoc(mp, NULL, NULL, 0, 0, 0, 0, pflags, 0);
300 	}
301 
302 	return (mp);
303 }
304 
305 /*
306  * Accept the packets received in rx ring.
307  *
308  * Returns a chain of mblks containing the received data, to be
309  * passed up to mac_rx().
310  * The routine returns only when a complete scan has been performed
311  * without finding any packets to receive.
312  * This function must SET the OWN bit of BD to indicate the packets
313  * it has accepted from the ring.
314  */
315 static mblk_t *rge_receive_ring(rge_t *rgep);
316 #pragma	inline(rge_receive_ring)
317 
318 static mblk_t *
319 rge_receive_ring(rge_t *rgep)
320 {
321 	rge_bd_t *hw_rbd_p;
322 	mblk_t *head;
323 	mblk_t **tail;
324 	mblk_t *mp;
325 	uint32_t slot;
326 
327 	ASSERT(mutex_owned(rgep->rx_lock));
328 
329 	/*
330 	 * Sync (all) the receive ring descriptors
331 	 * before accepting the packets they describe
332 	 */
333 	DMA_SYNC(rgep->rx_desc, DDI_DMA_SYNC_FORKERNEL);
334 	slot = rgep->rx_next;
335 	hw_rbd_p = &rgep->rx_ring[slot];
336 	head = NULL;
337 	tail = &head;
338 
339 	while (!(hw_rbd_p->flags_len & RGE_BSWAP_32(BD_FLAG_HW_OWN))) {
340 		if ((mp = rge_receive_packet(rgep, slot)) != NULL) {
341 			*tail = mp;
342 			tail = &mp->b_next;
343 		}
344 
345 		/*
346 		 * Clear RBD flags
347 		 */
348 		hw_rbd_p->flags_len =
349 		    RGE_BSWAP_32(rgep->rxbuf_size - RGE_HEADROOM);
350 		HW_RBD_INIT(hw_rbd_p, slot);
351 		slot = NEXT(slot, RGE_RECV_SLOTS);
352 		hw_rbd_p = &rgep->rx_ring[slot];
353 	}
354 
355 	rgep->rx_next = slot;
356 	return (head);
357 }
358 
359 /*
360  * Receive all ready packets.
361  */
362 void rge_receive(rge_t *rgep);
363 #pragma	no_inline(rge_receive)
364 
365 void
366 rge_receive(rge_t *rgep)
367 {
368 	mblk_t *mp;
369 
370 	mutex_enter(rgep->rx_lock);
371 	mp = rge_receive_ring(rgep);
372 	mutex_exit(rgep->rx_lock);
373 
374 	if (mp != NULL)
375 		mac_rx(rgep->macp, rgep->handle, mp);
376 }
377 
378 
379 #undef	RGE_DBG
380 #define	RGE_DBG		RGE_DBG_SEND	/* debug flag for this code	*/
381 
382 
383 /*
384  * ========== Send-side recycle routines ==========
385  */
386 static uint32_t rge_send_claim(rge_t *rgep);
387 #pragma	inline(rge_send_claim)
388 
389 static uint32_t
390 rge_send_claim(rge_t *rgep)
391 {
392 	uint32_t slot;
393 	uint32_t next;
394 
395 	mutex_enter(rgep->tx_lock);
396 	slot = rgep->tx_next;
397 	next = NEXT(slot, RGE_SEND_SLOTS);
398 	rgep->tx_next = next;
399 	rgep->tx_flow++;
400 	mutex_exit(rgep->tx_lock);
401 
402 	/*
403 	 * We check that our invariants still hold:
404 	 * +	the slot and next indexes are in range
405 	 * +	the slot must not be the last one (i.e. the *next*
406 	 *	index must not match the next-recycle index), 'cos
407 	 *	there must always be at least one free slot in a ring
408 	 */
409 	ASSERT(slot < RGE_SEND_SLOTS);
410 	ASSERT(next < RGE_SEND_SLOTS);
411 	ASSERT(next != rgep->tc_next);
412 
413 	return (slot);
414 }
415 
416 /*
417  * We don't want to call this function every time after a successful
418  * h/w transmit done in ISR.  Instead, we call this function in the
419  * rge_send() when there're few or no free tx BDs remained.
420  */
421 static void rge_send_recycle(rge_t *rgep);
422 #pragma	inline(rge_send_recycle)
423 
424 static void
425 rge_send_recycle(rge_t *rgep)
426 {
427 	rge_bd_t *hw_sbd_p;
428 	uint32_t tc_tail;
429 	uint32_t tc_head;
430 	uint32_t n;
431 
432 	if (rgep->tx_free == RGE_SEND_SLOTS)
433 		return;
434 
435 	mutex_enter(rgep->tc_lock);
436 	tc_head = rgep->tc_next;
437 	tc_tail = rgep->tc_tail;
438 
439 	do {
440 		tc_tail = LAST(tc_tail, RGE_SEND_SLOTS);
441 		hw_sbd_p = &rgep->tx_ring[tc_tail];
442 		if (tc_tail == tc_head) {
443 			if (hw_sbd_p->flags_len &
444 			    RGE_BSWAP_32(BD_FLAG_HW_OWN)) {
445 				/*
446 				 * Bump the watchdog counter, thus guaranteeing
447 				 * that it's nonzero (watchdog activated).
448 				 */
449 				rgep->watchdog += 1;
450 				mutex_exit(rgep->tc_lock);
451 				return;
452 			}
453 			break;
454 		}
455 	} while (hw_sbd_p->flags_len & RGE_BSWAP_32(BD_FLAG_HW_OWN));
456 
457 	rgep->tc_next = NEXT(tc_tail, RGE_SEND_SLOTS);
458 	n = rgep->tc_next - tc_head;
459 	if (rgep->tc_next < tc_head)
460 		n += RGE_SEND_SLOTS;
461 	rge_atomic_renounce(&rgep->tx_free, n);
462 	rgep->watchdog = 0;
463 	mutex_exit(rgep->tc_lock);
464 
465 	if (rgep->resched_needed) {
466 		rgep->resched_needed = 0;
467 		ddi_trigger_softintr(rgep->resched_id);
468 	}
469 }
470 
471 /*
472  * Send a message by copying it into a preallocated (and premapped) buffer
473  */
474 static void rge_send_copy(rge_t *rgep, mblk_t *mp, uint16_t tci, uchar_t proto);
475 #pragma	inline(rge_send_copy)
476 
477 static void
478 rge_send_copy(rge_t *rgep, mblk_t *mp, uint16_t tci, uchar_t proto)
479 {
480 	rge_bd_t *hw_sbd_p;
481 	sw_sbd_t *ssbdp;
482 	mblk_t *bp;
483 	char *txb;
484 	uint32_t slot;
485 	size_t totlen;
486 	size_t mblen;
487 	uint32_t pflags;
488 
489 	/*
490 	 * IMPORTANT:
491 	 *	Up to the point where it claims a place, a send_msg()
492 	 *	routine can indicate failure by returning B_FALSE.  Once it's
493 	 *	claimed a place, it mustn't fail.
494 	 *
495 	 * In this version, there's no setup to be done here, and there's
496 	 * nothing that can fail, so we can go straight to claiming our
497 	 * already-reserved place on the train.
498 	 *
499 	 * This is the point of no return!
500 	 */
501 	slot = rge_send_claim(rgep);
502 	ssbdp = &rgep->sw_sbds[slot];
503 
504 	/*
505 	 * Copy the data into a pre-mapped buffer, which avoids the
506 	 * overhead (and complication) of mapping/unmapping STREAMS
507 	 * buffers and keeping hold of them until the DMA has completed.
508 	 *
509 	 * Because all buffers are the same size, and larger than the
510 	 * longest single valid message, we don't have to bother about
511 	 * splitting the message across multiple buffers either.
512 	 */
513 	txb = DMA_VPTR(ssbdp->pbuf);
514 	for (totlen = 0, bp = mp; bp != NULL; bp = bp->b_cont) {
515 		mblen = bp->b_wptr - bp->b_rptr;
516 		if ((totlen += mblen) <= rgep->ethmax_size) {
517 			bcopy(bp->b_rptr, txb, mblen);
518 			txb += mblen;
519 		}
520 	}
521 	rgep->stats.obytes += totlen + ETHERFCSL;
522 
523 	/*
524 	 * We'e reached the end of the chain; and we should have
525 	 * collected no more than ETHERMAX bytes into our buffer.
526 	 */
527 	ASSERT(bp == NULL);
528 	ASSERT(totlen <= rgep->ethmax_size);
529 	DMA_SYNC(ssbdp->pbuf, DDI_DMA_SYNC_FORDEV);
530 
531 	/*
532 	 * Update the hardware send buffer descriptor; then we're done
533 	 * and return. The message can be freed right away in rge_send(),
534 	 * as we've already copied the contents ...
535 	 */
536 	hw_sbd_p = &rgep->tx_ring[slot];
537 	hw_sbd_p->flags_len = RGE_BSWAP_32(totlen & SBD_LEN_MASK);
538 	if (tci != 0) {
539 		tci = TCI_OS2CHIP(tci);
540 		hw_sbd_p->vlan_tag = RGE_BSWAP_32(tci);
541 		hw_sbd_p->vlan_tag |= RGE_BSWAP_32(SBD_VLAN_PKT);
542 	} else {
543 		hw_sbd_p->vlan_tag = 0;
544 	}
545 
546 	hcksum_retrieve(mp, NULL, NULL, NULL, NULL, NULL, NULL, &pflags);
547 	if (pflags & HCK_FULLCKSUM) {
548 		switch (proto) {
549 		case IS_UDP_PKT:
550 			hw_sbd_p->flags_len |= RGE_BSWAP_32(SBD_FLAG_UDP_CKSUM);
551 			proto = IS_IPV4_PKT;
552 			break;
553 		case IS_TCP_PKT:
554 			hw_sbd_p->flags_len |= RGE_BSWAP_32(SBD_FLAG_TCP_CKSUM);
555 			proto = IS_IPV4_PKT;
556 			break;
557 		default:
558 			break;
559 		}
560 	}
561 	if ((pflags & HCK_IPV4_HDRCKSUM) && (proto == IS_IPV4_PKT))
562 		hw_sbd_p->flags_len |= RGE_BSWAP_32(SBD_FLAG_IP_CKSUM);
563 
564 	HW_SBD_SET(hw_sbd_p, slot);
565 }
566 
567 static boolean_t
568 rge_send(rge_t *rgep, mblk_t *mp)
569 {
570 	struct ether_vlan_header *ehp;
571 	boolean_t need_strip = B_FALSE;
572 	uint16_t tci = 0;
573 	uchar_t proto = UNKNOWN_PKT;
574 	struct ether_header *ethhdr;
575 	struct ip *ip_hdr;
576 
577 	ASSERT(mp->b_next == NULL);
578 
579 	/*
580 	 * Determine if the packet is VLAN tagged.
581 	 */
582 	ASSERT(MBLKL(mp) >= sizeof (struct ether_header));
583 	ehp = (struct ether_vlan_header *)mp->b_rptr;
584 
585 	if (ehp->ether_tpid == htons(VLAN_TPID)) {
586 		if (MBLKL(mp) < sizeof (struct ether_vlan_header)) {
587 			uint32_t pflags;
588 
589 			/*
590 			 * Need to preserve checksum flags across pullup.
591 			 */
592 			hcksum_retrieve(mp, NULL, NULL, NULL, NULL, NULL,
593 			    NULL, &pflags);
594 
595 			if (!pullupmsg(mp,
596 			    sizeof (struct ether_vlan_header))) {
597 				RGE_DEBUG(("rge_send: pullup failure"));
598 				rgep->resched_needed = B_TRUE;
599 				return (B_FALSE);
600 			}
601 
602 			(void) hcksum_assoc(mp, NULL, NULL, NULL, NULL, NULL,
603 			    NULL, pflags, KM_NOSLEEP);
604 		}
605 
606 		ehp = (struct ether_vlan_header *)mp->b_rptr;
607 		need_strip = B_TRUE;
608 	}
609 
610 	/*
611 	 * Try to reserve a place in the transmit ring.
612 	 */
613 	if (!rge_atomic_reserve(&rgep->tx_free, 1)) {
614 		RGE_DEBUG(("rge_send: no free slots"));
615 		rgep->stats.defer++;
616 		rgep->resched_needed = B_TRUE;
617 		rge_send_recycle(rgep);
618 		return (B_FALSE);
619 	}
620 
621 	/*
622 	 * We've reserved a place :-)
623 	 * These ASSERTions check that our invariants still hold:
624 	 *	there must still be at least one free place
625 	 *	there must be at least one place NOT free (ours!)
626 	 */
627 	ASSERT(rgep->tx_free < RGE_SEND_SLOTS);
628 
629 	/*
630 	 * Now that we know that there is space to transmit the packet
631 	 * strip any VLAN tag that is present.
632 	 */
633 	if (need_strip) {
634 		tci = ntohs(ehp->ether_tci);
635 		(void) memmove(mp->b_rptr + VLAN_TAGSZ, mp->b_rptr,
636 		    2 * ETHERADDRL);
637 		mp->b_rptr += VLAN_TAGSZ;
638 	}
639 
640 	/*
641 	 * Check the packet protocol type for according h/w checksum offload
642 	 */
643 	if (MBLKL(mp) >= sizeof (struct ether_header) +
644 	    sizeof (struct ip)) {
645 		ethhdr = (struct ether_header *)(mp->b_rptr);
646 		/*
647 		 * Is the packet an IP(v4) packet?
648 		 */
649 		if (ntohs(ethhdr->ether_type) == ETHERTYPE_IP) {
650 			proto = IS_IPV4_PKT;
651 			ip_hdr = (struct ip *)(mp->b_rptr +
652 			    sizeof (struct ether_header));
653 			if (ip_hdr->ip_p == IPPROTO_TCP)
654 				proto = IS_TCP_PKT;
655 			else if (ip_hdr->ip_p == IPPROTO_UDP)
656 				proto = IS_UDP_PKT;
657 		}
658 	}
659 
660 	rge_send_copy(rgep, mp, tci, proto);
661 
662 	/*
663 	 * Trigger chip h/w transmit ...
664 	 */
665 	mutex_enter(rgep->tx_lock);
666 	if (--rgep->tx_flow == 0) {
667 		DMA_SYNC(rgep->tx_desc, DDI_DMA_SYNC_FORDEV);
668 		rge_tx_trigger(rgep);
669 		if (rgep->tx_free < RGE_SEND_SLOTS/32)
670 			rge_send_recycle(rgep);
671 		rgep->tc_tail = rgep->tx_next;
672 	}
673 	mutex_exit(rgep->tx_lock);
674 
675 	freemsg(mp);
676 	return (B_TRUE);
677 }
678 
679 uint_t
680 rge_reschedule(caddr_t arg)
681 {
682 	rge_t *rgep;
683 	uint_t rslt;
684 
685 	rgep = (rge_t *)arg;
686 	rslt = DDI_INTR_UNCLAIMED;
687 
688 	if (rgep->rge_mac_state == RGE_MAC_STARTED && rgep->resched_needed) {
689 		mac_tx_update(rgep->macp);
690 		rgep->resched_needed = B_FALSE;
691 		rslt = DDI_INTR_CLAIMED;
692 	}
693 
694 	return (rslt);
695 }
696 
697 /*
698  * rge_m_tx() - send a chain of packets
699  */
700 mblk_t *
701 rge_m_tx(void *arg, mblk_t *mp)
702 {
703 	rge_t *rgep = arg;		/* private device info	*/
704 	mblk_t *next;
705 
706 	ASSERT(mp != NULL);
707 	ASSERT(rgep->rge_mac_state == RGE_MAC_STARTED);
708 
709 	if (rgep->rge_chip_state != RGE_CHIP_RUNNING) {
710 		RGE_DEBUG(("rge_m_tx: chip not running"));
711 		return (mp);
712 	}
713 
714 	rw_enter(rgep->errlock, RW_READER);
715 	while (mp != NULL) {
716 		next = mp->b_next;
717 		mp->b_next = NULL;
718 
719 		if (!rge_send(rgep, mp)) {
720 			mp->b_next = next;
721 			break;
722 		}
723 
724 		mp = next;
725 	}
726 	rw_exit(rgep->errlock);
727 
728 	return (mp);
729 }
730