xref: /illumos-gate/usr/src/uts/common/io/rge/rge_rxtx.c (revision fcdb3229a31dd4ff700c69238814e326aad49098)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include "rge.h"
27 
28 #define	U32TOPTR(x)	((void *)(uintptr_t)(uint32_t)(x))
29 #define	PTRTOU32(x)	((uint32_t)(uintptr_t)(void *)(x))
30 
31 /*
32  * ========== RX side routines ==========
33  */
34 
35 #define	RGE_DBG		RGE_DBG_RECV	/* debug flag for this code	*/
36 
37 static uint32_t
rge_atomic_reserve(uint32_t * count_p,uint32_t n)38 rge_atomic_reserve(uint32_t *count_p, uint32_t n)
39 {
40 	uint32_t oldval;
41 	uint32_t newval;
42 
43 	/* ATOMICALLY */
44 	do {
45 		oldval = *count_p;
46 		newval = oldval - n;
47 		if (oldval <= n)
48 			return (0);		/* no resources left	*/
49 	} while (atomic_cas_32(count_p, oldval, newval) != oldval);
50 
51 	return (newval);
52 }
53 
54 /*
55  * Atomically increment a counter
56  */
57 static void
rge_atomic_renounce(uint32_t * count_p,uint32_t n)58 rge_atomic_renounce(uint32_t *count_p, uint32_t n)
59 {
60 	uint32_t oldval;
61 	uint32_t newval;
62 
63 	/* ATOMICALLY */
64 	do {
65 		oldval = *count_p;
66 		newval = oldval + n;
67 	} while (atomic_cas_32(count_p, oldval, newval) != oldval);
68 }
69 
70 /*
71  * Callback code invoked from STREAMs when the recv data buffer is free
72  * for recycling.
73  */
74 void
rge_rx_recycle(caddr_t arg)75 rge_rx_recycle(caddr_t arg)
76 {
77 	rge_t *rgep;
78 	dma_buf_t *rx_buf;
79 	sw_rbd_t *free_srbdp;
80 	uint32_t slot_recy;
81 
82 	rx_buf = (dma_buf_t *)arg;
83 	rgep = (rge_t *)rx_buf->private;
84 
85 	/*
86 	 * In rge_unattach() and rge_attach(), this callback function will
87 	 * also be called to free mp in rge_fini_rings() and rge_init_rings().
88 	 * In such situation, we shouldn't do below desballoc(), otherwise,
89 	 * there'll be memory leak.
90 	 */
91 	if (rgep->rge_mac_state == RGE_MAC_UNATTACH ||
92 	    rgep->rge_mac_state == RGE_MAC_ATTACH)
93 		return;
94 
95 	/*
96 	 * Recycle the data buffer again
97 	 * and fill them in free ring
98 	 */
99 	rx_buf->mp = desballoc(DMA_VPTR(rx_buf->pbuf),
100 	    rgep->rxbuf_size, 0, &rx_buf->rx_recycle);
101 	if (rx_buf->mp == NULL) {
102 		rge_problem(rgep, "rge_rx_recycle: desballoc() failed");
103 		return;
104 	}
105 	mutex_enter(rgep->rc_lock);
106 	slot_recy = rgep->rc_next;
107 	free_srbdp = &rgep->free_srbds[slot_recy];
108 
109 	ASSERT(free_srbdp->rx_buf == NULL);
110 	free_srbdp->rx_buf = rx_buf;
111 	rgep->rc_next = NEXT(slot_recy, RGE_BUF_SLOTS);
112 	rge_atomic_renounce(&rgep->rx_free, 1);
113 	if (rgep->rx_bcopy && rgep->rx_free == RGE_BUF_SLOTS)
114 		rgep->rx_bcopy = B_FALSE;
115 	ASSERT(rgep->rx_free <= RGE_BUF_SLOTS);
116 
117 	mutex_exit(rgep->rc_lock);
118 }
119 
120 static int
rge_rx_refill(rge_t * rgep,uint32_t slot)121 rge_rx_refill(rge_t *rgep, uint32_t slot)
122 {
123 	dma_buf_t *free_buf;
124 	rge_bd_t *hw_rbd_p;
125 	sw_rbd_t *srbdp;
126 	uint32_t free_slot;
127 
128 	srbdp = &rgep->sw_rbds[slot];
129 	hw_rbd_p = &rgep->rx_ring[slot];
130 	free_slot = rgep->rf_next;
131 	free_buf = rgep->free_srbds[free_slot].rx_buf;
132 	if (free_buf != NULL) {
133 		srbdp->rx_buf = free_buf;
134 		rgep->free_srbds[free_slot].rx_buf = NULL;
135 		hw_rbd_p->host_buf_addr = RGE_BSWAP_32(rgep->head_room +
136 		    + free_buf->pbuf.cookie.dmac_laddress);
137 		hw_rbd_p->host_buf_addr_hi =
138 		    RGE_BSWAP_32(free_buf->pbuf.cookie.dmac_laddress >> 32);
139 		rgep->rf_next = NEXT(free_slot, RGE_BUF_SLOTS);
140 		return (1);
141 	} else {
142 		/*
143 		 * This situation shouldn't happen
144 		 */
145 		rge_problem(rgep, "rge_rx_refill: free buffer %d is NULL",
146 		    free_slot);
147 		rgep->rx_bcopy = B_TRUE;
148 		return (0);
149 	}
150 }
151 
152 static mblk_t *
rge_receive_packet(rge_t * rgep,uint32_t slot)153 rge_receive_packet(rge_t *rgep, uint32_t slot)
154 {
155 	rge_bd_t *hw_rbd_p;
156 	sw_rbd_t *srbdp;
157 	uchar_t *dp;
158 	mblk_t *mp;
159 	uint8_t *rx_ptr;
160 	uint32_t rx_status;
161 	uint_t packet_len;
162 	uint_t minsize;
163 	uint_t maxsize;
164 	uint32_t proto;
165 	uint32_t pflags;
166 	struct ether_vlan_header *ehp;
167 	uint16_t vtag = 0;
168 
169 	hw_rbd_p = &rgep->rx_ring[slot];
170 	srbdp = &rgep->sw_rbds[slot];
171 
172 	/*
173 	 * Read receive status
174 	 */
175 	rx_status = RGE_BSWAP_32(hw_rbd_p->flags_len) & RBD_FLAGS_MASK;
176 
177 	/*
178 	 * Handle error packet
179 	 */
180 	if (!(rx_status & BD_FLAG_PKT_END)) {
181 		RGE_DEBUG(("rge_receive_packet: not a complete packat"));
182 		return (NULL);
183 	}
184 	if (rx_status & RBD_FLAG_ERROR) {
185 		if (rx_status & RBD_FLAG_CRC_ERR)
186 			rgep->stats.crc_err++;
187 		if (rx_status & RBD_FLAG_RUNT)
188 			rgep->stats.in_short++;
189 		/*
190 		 * Set chip_error flag to reset chip:
191 		 * (suggested in Realtek programming guide.)
192 		 */
193 		RGE_DEBUG(("rge_receive_packet: error packet, status = %x",
194 		    rx_status));
195 		mutex_enter(rgep->genlock);
196 		rgep->rge_chip_state = RGE_CHIP_ERROR;
197 		mutex_exit(rgep->genlock);
198 		return (NULL);
199 	}
200 
201 	/*
202 	 * Handle size error packet
203 	 */
204 	packet_len = RGE_BSWAP_32(hw_rbd_p->flags_len) & RBD_LEN_MASK;
205 	packet_len -= ETHERFCSL;
206 	minsize = ETHERMIN;
207 	pflags = RGE_BSWAP_32(hw_rbd_p->vlan_tag);
208 	if (pflags & RBD_VLAN_PKT)
209 		minsize -= VLAN_TAGSZ;
210 	maxsize = rgep->ethmax_size;
211 	if (packet_len < minsize || packet_len > maxsize) {
212 		RGE_DEBUG(("rge_receive_packet: len err = %d", packet_len));
213 		return (NULL);
214 	}
215 
216 	DMA_SYNC(srbdp->rx_buf->pbuf, DDI_DMA_SYNC_FORKERNEL);
217 	if (rgep->rx_bcopy || packet_len <= RGE_RECV_COPY_SIZE ||
218 	    !rge_atomic_reserve(&rgep->rx_free, 1)) {
219 		/*
220 		 * Allocate buffer to receive this good packet
221 		 */
222 		mp = allocb(packet_len + RGE_HEADROOM, 0);
223 		if (mp == NULL) {
224 			RGE_DEBUG(("rge_receive_packet: allocate buffer fail"));
225 			rgep->stats.no_rcvbuf++;
226 			return (NULL);
227 		}
228 
229 		/*
230 		 * Copy the data found into the new cluster
231 		 */
232 		rx_ptr = DMA_VPTR(srbdp->rx_buf->pbuf);
233 		mp->b_rptr = dp = mp->b_rptr + RGE_HEADROOM;
234 		bcopy(rx_ptr + rgep->head_room, dp, packet_len);
235 		mp->b_wptr = dp + packet_len;
236 	} else {
237 		mp = srbdp->rx_buf->mp;
238 		mp->b_rptr += rgep->head_room;
239 		mp->b_wptr = mp->b_rptr + packet_len;
240 		mp->b_next = mp->b_cont = NULL;
241 		/*
242 		 * Refill the current receive bd buffer
243 		 *   if fails, will just keep the mp.
244 		 */
245 		if (!rge_rx_refill(rgep, slot))
246 			return (NULL);
247 	}
248 	rgep->stats.rbytes += packet_len;
249 	rgep->stats.rpackets ++;
250 
251 	/*
252 	 * VLAN packet ?
253 	 */
254 	if (pflags & RBD_VLAN_PKT)
255 		vtag = pflags & RBD_VLAN_TAG;
256 	if (vtag) {
257 		vtag = TCI_CHIP2OS(vtag);
258 		/*
259 		 * As h/w strips the VLAN tag from incoming packet, we need
260 		 * insert VLAN tag into this packet before send up here.
261 		 */
262 		(void) memmove(mp->b_rptr - VLAN_TAGSZ, mp->b_rptr,
263 		    2 * ETHERADDRL);
264 		mp->b_rptr -= VLAN_TAGSZ;
265 		ehp = (struct ether_vlan_header *)mp->b_rptr;
266 		ehp->ether_tpid = htons(ETHERTYPE_VLAN);
267 		ehp->ether_tci = htons(vtag);
268 		rgep->stats.rbytes += VLAN_TAGSZ;
269 	}
270 
271 	/*
272 	 * Check h/w checksum offload status
273 	 */
274 	pflags = 0;
275 	proto = rx_status & RBD_FLAG_PROTOCOL;
276 	if ((proto == RBD_FLAG_TCP && !(rx_status & RBD_TCP_CKSUM_ERR)) ||
277 	    (proto == RBD_FLAG_UDP && !(rx_status & RBD_UDP_CKSUM_ERR)))
278 		pflags |= HCK_FULLCKSUM_OK;
279 	if (proto != RBD_FLAG_NONE_IP && !(rx_status & RBD_IP_CKSUM_ERR))
280 		pflags |= HCK_IPV4_HDRCKSUM_OK;
281 	if (pflags != 0)  {
282 		mac_hcksum_set(mp, 0, 0, 0, 0, pflags);
283 	}
284 
285 	return (mp);
286 }
287 
288 /*
289  * Accept the packets received in rx ring.
290  *
291  * Returns a chain of mblks containing the received data, to be
292  * passed up to mac_rx().
293  * The routine returns only when a complete scan has been performed
294  * without finding any packets to receive.
295  * This function must SET the OWN bit of BD to indicate the packets
296  * it has accepted from the ring.
297  */
298 static mblk_t *
rge_receive_ring(rge_t * rgep)299 rge_receive_ring(rge_t *rgep)
300 {
301 	rge_bd_t *hw_rbd_p;
302 	mblk_t *head;
303 	mblk_t **tail;
304 	mblk_t *mp;
305 	uint32_t slot;
306 
307 	ASSERT(mutex_owned(rgep->rx_lock));
308 
309 	/*
310 	 * Sync (all) the receive ring descriptors
311 	 * before accepting the packets they describe
312 	 */
313 	DMA_SYNC(rgep->rx_desc, DDI_DMA_SYNC_FORKERNEL);
314 	slot = rgep->rx_next;
315 	hw_rbd_p = &rgep->rx_ring[slot];
316 	head = NULL;
317 	tail = &head;
318 
319 	while (!(hw_rbd_p->flags_len & RGE_BSWAP_32(BD_FLAG_HW_OWN))) {
320 		if ((mp = rge_receive_packet(rgep, slot)) != NULL) {
321 			*tail = mp;
322 			tail = &mp->b_next;
323 		}
324 
325 		/*
326 		 * Clear RBD flags
327 		 */
328 		hw_rbd_p->flags_len =
329 		    RGE_BSWAP_32(rgep->rxbuf_size - rgep->head_room);
330 		HW_RBD_INIT(hw_rbd_p, slot);
331 		slot = NEXT(slot, RGE_RECV_SLOTS);
332 		hw_rbd_p = &rgep->rx_ring[slot];
333 	}
334 
335 	rgep->rx_next = slot;
336 	return (head);
337 }
338 
339 /*
340  * Receive all ready packets.
341  */
342 void
rge_receive(rge_t * rgep)343 rge_receive(rge_t *rgep)
344 {
345 	mblk_t *mp;
346 
347 	mutex_enter(rgep->rx_lock);
348 	mp = rge_receive_ring(rgep);
349 	mutex_exit(rgep->rx_lock);
350 
351 	if (mp != NULL)
352 		mac_rx(rgep->mh, NULL, mp);
353 }
354 
355 
356 #undef	RGE_DBG
357 #define	RGE_DBG		RGE_DBG_SEND	/* debug flag for this code	*/
358 
359 
360 /*
361  * ========== Send-side recycle routines ==========
362  */
363 static uint32_t
rge_send_claim(rge_t * rgep)364 rge_send_claim(rge_t *rgep)
365 {
366 	uint32_t slot;
367 	uint32_t next;
368 
369 	mutex_enter(rgep->tx_lock);
370 	slot = rgep->tx_next;
371 	next = NEXT(slot, RGE_SEND_SLOTS);
372 	rgep->tx_next = next;
373 	rgep->tx_flow++;
374 	mutex_exit(rgep->tx_lock);
375 
376 	/*
377 	 * We check that our invariants still hold:
378 	 * +	the slot and next indexes are in range
379 	 * +	the slot must not be the last one (i.e. the *next*
380 	 *	index must not match the next-recycle index), 'cos
381 	 *	there must always be at least one free slot in a ring
382 	 */
383 	ASSERT(slot < RGE_SEND_SLOTS);
384 	ASSERT(next < RGE_SEND_SLOTS);
385 	ASSERT(next != rgep->tc_next);
386 
387 	return (slot);
388 }
389 
390 /*
391  * We don't want to call this function every time after a successful
392  * h/w transmit done in ISR.  Instead, we call this function in the
393  * rge_send() when there're few or no free tx BDs remained.
394  */
395 void
rge_send_recycle(rge_t * rgep)396 rge_send_recycle(rge_t *rgep)
397 {
398 	rge_bd_t *hw_sbd_p;
399 	uint32_t tc_tail;
400 	uint32_t tc_head;
401 	uint32_t n;
402 
403 	mutex_enter(rgep->tc_lock);
404 	tc_head = rgep->tc_next;
405 	tc_tail = rgep->tc_tail;
406 	if (tc_head == tc_tail)
407 		goto resched;
408 
409 	do {
410 		tc_tail = LAST(tc_tail, RGE_SEND_SLOTS);
411 		hw_sbd_p = &rgep->tx_ring[tc_tail];
412 		if (tc_tail == tc_head) {
413 			if (hw_sbd_p->flags_len &
414 			    RGE_BSWAP_32(BD_FLAG_HW_OWN)) {
415 				/*
416 				 * Recyled nothing: bump the watchdog counter,
417 				 * thus guaranteeing that it's nonzero
418 				 * (watchdog activated).
419 				 */
420 				if (rgep->watchdog == 0)
421 					rgep->watchdog = 1;
422 				mutex_exit(rgep->tc_lock);
423 				return;
424 			}
425 			break;
426 		}
427 	} while (hw_sbd_p->flags_len & RGE_BSWAP_32(BD_FLAG_HW_OWN));
428 
429 	/*
430 	 * Recyled something :-)
431 	 */
432 	rgep->tc_next = NEXT(tc_tail, RGE_SEND_SLOTS);
433 	n = rgep->tc_next - tc_head;
434 	if (rgep->tc_next < tc_head)
435 		n += RGE_SEND_SLOTS;
436 	rge_atomic_renounce(&rgep->tx_free, n);
437 	rgep->watchdog = 0;
438 	ASSERT(rgep->tx_free <= RGE_SEND_SLOTS);
439 
440 resched:
441 	mutex_exit(rgep->tc_lock);
442 	if (rgep->resched_needed &&
443 	    rgep->rge_mac_state == RGE_MAC_STARTED) {
444 		rgep->resched_needed = B_FALSE;
445 		mac_tx_update(rgep->mh);
446 	}
447 }
448 
449 /*
450  * Send a message by copying it into a preallocated (and premapped) buffer
451  */
452 static void
rge_send_copy(rge_t * rgep,mblk_t * mp,uint16_t tci)453 rge_send_copy(rge_t *rgep, mblk_t *mp, uint16_t tci)
454 {
455 	rge_bd_t *hw_sbd_p;
456 	sw_sbd_t *ssbdp;
457 	mblk_t *bp;
458 	char *txb;
459 	uint32_t slot;
460 	size_t totlen;
461 	size_t mblen;
462 	uint32_t pflags;
463 	struct ether_header *ethhdr;
464 	struct ip *ip_hdr;
465 
466 	/*
467 	 * IMPORTANT:
468 	 *	Up to the point where it claims a place, a send_msg()
469 	 *	routine can indicate failure by returning B_FALSE.  Once it's
470 	 *	claimed a place, it mustn't fail.
471 	 *
472 	 * In this version, there's no setup to be done here, and there's
473 	 * nothing that can fail, so we can go straight to claiming our
474 	 * already-reserved place on the train.
475 	 *
476 	 * This is the point of no return!
477 	 */
478 	slot = rge_send_claim(rgep);
479 	ssbdp = &rgep->sw_sbds[slot];
480 
481 	/*
482 	 * Copy the data into a pre-mapped buffer, which avoids the
483 	 * overhead (and complication) of mapping/unmapping STREAMS
484 	 * buffers and keeping hold of them until the DMA has completed.
485 	 *
486 	 * Because all buffers are the same size, and larger than the
487 	 * longest single valid message, we don't have to bother about
488 	 * splitting the message across multiple buffers either.
489 	 */
490 	txb = DMA_VPTR(ssbdp->pbuf);
491 	totlen = 0;
492 	bp = mp;
493 	if (tci != 0) {
494 		/*
495 		 * Do not copy the vlan tag
496 		 */
497 		bcopy(bp->b_rptr, txb, 2 * ETHERADDRL);
498 		txb += 2 * ETHERADDRL;
499 		totlen += 2 * ETHERADDRL;
500 		mblen = MBLKL(bp);
501 		ASSERT(mblen >= 2 * ETHERADDRL + VLAN_TAGSZ);
502 		mblen -= 2 * ETHERADDRL + VLAN_TAGSZ;
503 		if ((totlen += mblen) <= rgep->ethmax_size) {
504 			bcopy(bp->b_rptr + 2 * ETHERADDRL + VLAN_TAGSZ,
505 			    txb, mblen);
506 			txb += mblen;
507 		}
508 		bp = bp->b_cont;
509 		rgep->stats.obytes += VLAN_TAGSZ;
510 	}
511 	for (; bp != NULL; bp = bp->b_cont) {
512 		mblen = MBLKL(bp);
513 		if ((totlen += mblen) <= rgep->ethmax_size) {
514 			bcopy(bp->b_rptr, txb, mblen);
515 			txb += mblen;
516 		}
517 	}
518 	rgep->stats.obytes += totlen;
519 	rgep->stats.tx_pre_ismax = rgep->stats.tx_cur_ismax;
520 	if (totlen == rgep->ethmax_size)
521 		rgep->stats.tx_cur_ismax = B_TRUE;
522 	else
523 		rgep->stats.tx_cur_ismax = B_FALSE;
524 
525 	/*
526 	 * We'e reached the end of the chain; and we should have
527 	 * collected no more than ETHERMAX bytes into our buffer.
528 	 */
529 	ASSERT(bp == NULL);
530 	ASSERT(totlen <= rgep->ethmax_size);
531 	DMA_SYNC(ssbdp->pbuf, DDI_DMA_SYNC_FORDEV);
532 
533 	/*
534 	 * Update the hardware send buffer descriptor flags
535 	 */
536 	hw_sbd_p = &rgep->tx_ring[slot];
537 	ASSERT(hw_sbd_p == ssbdp->desc.mem_va);
538 	hw_sbd_p->flags_len = RGE_BSWAP_32(totlen & SBD_LEN_MASK);
539 	if (tci != 0) {
540 		tci = TCI_OS2CHIP(tci);
541 		hw_sbd_p->vlan_tag = RGE_BSWAP_32(tci);
542 		hw_sbd_p->vlan_tag |= RGE_BSWAP_32(SBD_VLAN_PKT);
543 	} else {
544 		hw_sbd_p->vlan_tag = 0;
545 	}
546 
547 	/*
548 	 * h/w checksum offload flags
549 	 */
550 	mac_hcksum_get(mp, NULL, NULL, NULL, NULL, &pflags);
551 	if (pflags & HCK_FULLCKSUM) {
552 		ASSERT(totlen >= sizeof (struct ether_header) +
553 		    sizeof (struct ip));
554 		ethhdr = (struct ether_header *)(DMA_VPTR(ssbdp->pbuf));
555 		/*
556 		 * Is the packet an IP(v4) packet?
557 		 */
558 		if (ntohs(ethhdr->ether_type) == ETHERTYPE_IP) {
559 			ip_hdr = (struct ip *)
560 			    ((uint8_t *)DMA_VPTR(ssbdp->pbuf) +
561 			    sizeof (struct ether_header));
562 			if (ip_hdr->ip_p == IPPROTO_TCP)
563 				hw_sbd_p->flags_len |=
564 				    RGE_BSWAP_32(SBD_FLAG_TCP_CKSUM);
565 			else if (ip_hdr->ip_p == IPPROTO_UDP)
566 				hw_sbd_p->flags_len |=
567 				    RGE_BSWAP_32(SBD_FLAG_UDP_CKSUM);
568 		}
569 	}
570 	if (pflags & HCK_IPV4_HDRCKSUM)
571 		hw_sbd_p->flags_len |= RGE_BSWAP_32(SBD_FLAG_IP_CKSUM);
572 
573 	HW_SBD_SET(hw_sbd_p, slot);
574 
575 	/*
576 	 * We're done.
577 	 * The message can be freed right away, as we've already
578 	 * copied the contents ...
579 	 */
580 	freemsg(mp);
581 }
582 
583 static boolean_t
rge_send(rge_t * rgep,mblk_t * mp)584 rge_send(rge_t *rgep, mblk_t *mp)
585 {
586 	struct ether_vlan_header *ehp;
587 	uint16_t tci;
588 
589 	ASSERT(mp->b_next == NULL);
590 
591 	/*
592 	 * Try to reserve a place in the transmit ring.
593 	 */
594 	if (!rge_atomic_reserve(&rgep->tx_free, 1)) {
595 		RGE_DEBUG(("rge_send: no free slots"));
596 		rgep->stats.defer++;
597 		rgep->resched_needed = B_TRUE;
598 		return (B_FALSE);
599 	}
600 
601 	/*
602 	 * Determine if the packet is VLAN tagged.
603 	 */
604 	ASSERT(MBLKL(mp) >= sizeof (struct ether_header));
605 	tci = 0;
606 	ehp = (struct ether_vlan_header *)mp->b_rptr;
607 	if (ehp->ether_tpid == htons(ETHERTYPE_VLAN))
608 		tci = ntohs(ehp->ether_tci);
609 
610 	/*
611 	 * We've reserved a place :-)
612 	 * These ASSERTions check that our invariants still hold:
613 	 *	there must still be at least one free place
614 	 *	there must be at least one place NOT free (ours!)
615 	 */
616 	ASSERT(rgep->tx_free < RGE_SEND_SLOTS);
617 	rge_send_copy(rgep, mp, tci);
618 
619 	/*
620 	 * Trigger chip h/w transmit ...
621 	 */
622 	mutex_enter(rgep->tx_lock);
623 	if (--rgep->tx_flow == 0) {
624 		DMA_SYNC(rgep->tx_desc, DDI_DMA_SYNC_FORDEV);
625 		rgep->tc_tail = rgep->tx_next;
626 	}
627 	rgep->stats.opackets++;
628 	mutex_exit(rgep->tx_lock);
629 
630 	return (B_TRUE);
631 }
632 
633 uint_t
rge_reschedule(caddr_t arg1,caddr_t arg2)634 rge_reschedule(caddr_t arg1, caddr_t arg2)
635 {
636 	rge_t *rgep;
637 
638 	rgep = (rge_t *)arg1;
639 	_NOTE(ARGUNUSED(arg2))
640 
641 	rge_send_recycle(rgep);
642 
643 	if (rgep->chipid.is_pcie && rgep->tx_free != RGE_SEND_SLOTS) {
644 		/*
645 		 * It's observed that in current Realtek PCI-E chips, tx
646 		 * request of the second fragment for upper layer packets
647 		 * will be ignored if the hardware transmission is in
648 		 * progress and will not be processed when the tx engine
649 		 * is idle. So one solution is to re-issue the requests
650 		 * if there are untransmitted packets after tx interrupts
651 		 * occur.
652 		 */
653 		rge_tx_trigger(rgep);
654 	}
655 
656 	return (DDI_INTR_CLAIMED);
657 }
658 
659 /*
660  * rge_m_tx() - send a chain of packets
661  */
662 mblk_t *
rge_m_tx(void * arg,mblk_t * mp)663 rge_m_tx(void *arg, mblk_t *mp)
664 {
665 	rge_t *rgep = arg;		/* private device info	*/
666 	mblk_t *next;
667 	mblk_t *mp_org = mp;
668 
669 	ASSERT(mp != NULL);
670 
671 	rw_enter(rgep->errlock, RW_READER);
672 	if ((rgep->rge_mac_state != RGE_MAC_STARTED) ||
673 	    (rgep->rge_chip_state != RGE_CHIP_RUNNING) ||
674 	    (rgep->param_link_up != LINK_STATE_UP)) {
675 		rw_exit(rgep->errlock);
676 		RGE_DEBUG(("rge_m_tx: tx doesn't work"));
677 		freemsgchain(mp);
678 		return (NULL);
679 	}
680 
681 	while (mp != NULL) {
682 		next = mp->b_next;
683 		mp->b_next = NULL;
684 
685 		if (!rge_send(rgep, mp)) {
686 			mp->b_next = next;
687 			break;
688 		}
689 
690 		mp = next;
691 	}
692 	if (mp != mp_org) {
693 		rge_tx_trigger(rgep);
694 	}
695 	rw_exit(rgep->errlock);
696 
697 	return (mp);
698 }
699