xref: /titanic_51/usr/src/uts/common/io/bge/bge_send.c (revision 88447a05f537aabe9a1bc3d5313f22581ec992a7)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include "bge_impl.h"
28 
29 
30 /*
31  * The transmit-side code uses an allocation process which is similar
32  * to some theme park roller-coaster rides, where riders sit in cars
33  * that can go individually, but work better in a train.
34  *
35  * 1)	RESERVE a place - this doesn't refer to any specific car or
36  *	seat, just that you will get a ride.  The attempt to RESERVE a
37  *	place can fail if all spaces in all cars are already committed.
38  *
39  * 2)	Prepare yourself; this may take an arbitrary (but not unbounded)
40  *	time, and you can back out at this stage, in which case you must
41  *	give up (RENOUNCE) your place.
42  *
43  * 3)	CLAIM your space - a specific car (the next sequentially
44  *	numbered one) is allocated at this stage, and is guaranteed
45  *	to be part of the next train to depart.  Once you've done
46  *	this, you can't back out, nor wait for any external event
47  *	or resource.
48  *
49  * 4)	Occupy your car - when all CLAIMED cars are OCCUPIED, they
50  *	all depart together as a single train!
51  *
52  * 5)	At the end of the ride, you climb out of the car and RENOUNCE
53  *	your right to it, so that it can be recycled for another rider.
54  *
55  * For each rider, these have to occur in this order, but the riders
56  * don't have to stay in the same order at each stage.  In particular,
57  * they may overtake each other between RESERVING a place and CLAIMING
58  * it, or between CLAIMING and OCCUPYING a space.
59  *
60  * Once a car is CLAIMED, the train currently being assembled can't go
61  * without that car (this guarantees that the cars in a single train
62  * make up a consecutively-numbered set).  Therefore, when any train
63  * leaves, we know there can't be any riders in transit between CLAIMING
64  * and OCCUPYING their cars.  There can be some who have RESERVED but
65  * not yet CLAIMED their places.  That's OK, though, because they'll go
66  * into the next train.
67  */
68 
69 #define	BGE_DBG		BGE_DBG_SEND	/* debug flag for this code	*/
70 
71 /*
72  * ========== Send-side recycle routines ==========
73  */
74 
75 /*
76  * Recycle all the completed buffers in the specified send ring up to
77  * (but not including) the consumer index in the status block.
78  *
79  * This function must advance (srp->tc_next) AND adjust (srp->tx_free)
80  * to account for the packets it has recycled.
81  *
82  * This is a trivial version that just does that and nothing more, but
83  * it suffices while there's only one method for sending messages (by
84  * copying) and that method doesn't need any special per-buffer action
85  * for recycling.
86  */
87 static void bge_recycle_ring(bge_t *bgep, send_ring_t *srp);
88 #pragma	inline(bge_recycle_ring)
89 
90 static void
91 bge_recycle_ring(bge_t *bgep, send_ring_t *srp)
92 {
93 	sw_sbd_t *ssbdp;
94 	bge_queue_item_t *buf_item;
95 	bge_queue_item_t *buf_item_head;
96 	bge_queue_item_t *buf_item_tail;
97 	bge_queue_t *txbuf_queue;
98 	uint64_t slot;
99 	uint64_t n;
100 
101 	ASSERT(mutex_owned(srp->tc_lock));
102 
103 	/*
104 	 * We're about to release one or more places :-)
105 	 * These ASSERTions check that our invariants still hold:
106 	 *	there must always be at least one free place
107 	 *	at this point, there must be at least one place NOT free
108 	 *	we're not about to free more places than were claimed!
109 	 */
110 	ASSERT(srp->tx_free <= srp->desc.nslots);
111 
112 	buf_item_head = buf_item_tail = NULL;
113 	for (n = 0, slot = srp->tc_next; slot != *srp->cons_index_p;
114 	    slot = NEXT(slot, srp->desc.nslots)) {
115 		ssbdp = &srp->sw_sbds[slot];
116 		ASSERT(ssbdp->pbuf != NULL);
117 		buf_item = ssbdp->pbuf;
118 		if (buf_item_head == NULL)
119 			buf_item_head = buf_item_tail = buf_item;
120 		else {
121 			buf_item_tail->next = buf_item;
122 			buf_item_tail = buf_item;
123 		}
124 		ssbdp->pbuf = NULL;
125 		n++;
126 	}
127 	if (n == 0)
128 		return;
129 
130 	/*
131 	 * Update recycle index and free tx BD number
132 	 */
133 	srp->tc_next = slot;
134 	ASSERT(srp->tx_free + n <= srp->desc.nslots);
135 	bge_atomic_renounce(&srp->tx_free, n);
136 
137 	/*
138 	 * Reset the watchdog count: to 0 if all buffers are
139 	 * now free, or to 1 if some are still outstanding.
140 	 * Note: non-synchonised access here means we may get
141 	 * the "wrong" answer, but only in a harmless fashion
142 	 * (i.e. we deactivate the watchdog because all buffers
143 	 * are apparently free, even though another thread may
144 	 * have claimed one before we leave here; in this case
145 	 * the watchdog will restart on the next send() call).
146 	 */
147 	bgep->watchdog = srp->tx_free == srp->desc.nslots ? 0 : 1;
148 
149 	/*
150 	 * Return tx buffers to buffer push queue
151 	 */
152 	txbuf_queue = srp->txbuf_push_queue;
153 	mutex_enter(txbuf_queue->lock);
154 	buf_item_tail->next = txbuf_queue->head;
155 	txbuf_queue->head = buf_item_head;
156 	txbuf_queue->count += n;
157 	mutex_exit(txbuf_queue->lock);
158 
159 	/*
160 	 * Check if we need exchange the tx buffer push and pop queue
161 	 */
162 	if ((srp->txbuf_pop_queue->count < srp->tx_buffers_low) &&
163 	    (srp->txbuf_pop_queue->count < txbuf_queue->count)) {
164 		srp->txbuf_push_queue = srp->txbuf_pop_queue;
165 		srp->txbuf_pop_queue = txbuf_queue;
166 	}
167 
168 	if (srp->tx_flow != 0 || bgep->tx_resched_needed)
169 		ddi_trigger_softintr(bgep->drain_id);
170 }
171 
172 /*
173  * Recycle all returned slots in all rings.
174  *
175  * To give priority to low-numbered rings, whenever we have recycled any
176  * slots in any ring except 0, we restart scanning again from ring 0.
177  * Thus, for example, if rings 0, 3, and 10 are carrying traffic, the
178  * pattern of recycles might go 0, 3, 10, 3, 0, 10, 0:
179  *
180  *	0	found some - recycle them
181  *	1..2					none found
182  *	3	found some - recycle them	and restart scan
183  *	0..9					none found
184  *	10	found some - recycle them	and restart scan
185  *	0..2					none found
186  *	3	found some more - recycle them	and restart scan
187  *	0	found some more - recycle them
188  *	0..9					none found
189  *	10	found some more - recycle them	and restart scan
190  *	0	found some more - recycle them
191  *	1..15					none found
192  *
193  * The routine returns only when a complete scan has been performed
194  * without finding any slots to recycle.
195  *
196  * Note: the expression (BGE_SEND_RINGS_USED > 1) yields a compile-time
197  * constant and allows the compiler to optimise away the outer do-loop
198  * if only one send ring is being used.
199  */
200 void bge_recycle(bge_t *bgep, bge_status_t *bsp);
201 #pragma	no_inline(bge_recycle)
202 
203 void
204 bge_recycle(bge_t *bgep, bge_status_t *bsp)
205 {
206 	send_ring_t *srp;
207 	uint64_t ring;
208 	uint64_t tx_rings = bgep->chipid.tx_rings;
209 
210 restart:
211 	ring = 0;
212 	srp = &bgep->send[ring];
213 	do {
214 		/*
215 		 * For each ring, (srp->cons_index_p) points to the
216 		 * proper index within the status block (which has
217 		 * already been sync'd by the caller).
218 		 */
219 		ASSERT(srp->cons_index_p == SEND_INDEX_P(bsp, ring));
220 
221 		if (*srp->cons_index_p == srp->tc_next)
222 			continue;		/* no slots to recycle	*/
223 		if (mutex_tryenter(srp->tc_lock) == 0)
224 			continue;		/* already in process	*/
225 		bge_recycle_ring(bgep, srp);
226 		mutex_exit(srp->tc_lock);
227 
228 		/*
229 		 * Restart from ring 0, if we're not on ring 0 already.
230 		 * As H/W selects send BDs totally based on priority and
231 		 * available BDs on the higher priority ring are always
232 		 * selected first, driver should keep consistence with H/W
233 		 * and gives lower-numbered ring with higher priority.
234 		 */
235 		if (tx_rings > 1 && ring > 0)
236 			goto restart;
237 
238 		/*
239 		 * Loop over all rings (if there *are* multiple rings)
240 		 */
241 	} while (++srp, ++ring < tx_rings);
242 }
243 
244 
245 /*
246  * ========== Send-side transmit routines ==========
247  */
248 #define	TCP_CKSUM_OFFSET	16
249 #define	UDP_CKSUM_OFFSET	6
250 
251 static void
252 bge_pseudo_cksum(uint8_t *buf)
253 {
254 	uint32_t cksum;
255 	uint16_t iphl;
256 	uint16_t proto;
257 
258 	/*
259 	 * Point it to the ip header.
260 	 */
261 	buf += sizeof (struct ether_header);
262 
263 	/*
264 	 * Calculate the pseudo-header checksum.
265 	 */
266 	iphl = 4 * (buf[0] & 0xF);
267 	cksum = (((uint16_t)buf[2])<<8) + buf[3] - iphl;
268 	cksum += proto = buf[9];
269 	cksum += (((uint16_t)buf[12])<<8) + buf[13];
270 	cksum += (((uint16_t)buf[14])<<8) + buf[15];
271 	cksum += (((uint16_t)buf[16])<<8) + buf[17];
272 	cksum += (((uint16_t)buf[18])<<8) + buf[19];
273 	cksum = (cksum>>16) + (cksum & 0xFFFF);
274 	cksum = (cksum>>16) + (cksum & 0xFFFF);
275 
276 	/*
277 	 * Point it to the TCP/UDP header, and
278 	 * update the checksum field.
279 	 */
280 	buf += iphl + ((proto == IPPROTO_TCP) ?
281 	    TCP_CKSUM_OFFSET : UDP_CKSUM_OFFSET);
282 
283 	/*
284 	 * A real possibility that pointer cast is a problem.
285 	 * Should be fixed when we know the code better.
286 	 * E_BAD_PTR_CAST_ALIGN is added to make it temporarily clean.
287 	 */
288 	*(uint16_t *)buf = htons((uint16_t)cksum);
289 }
290 
291 static bge_queue_item_t *
292 bge_get_txbuf(bge_t *bgep, send_ring_t *srp)
293 {
294 	bge_queue_item_t *txbuf_item;
295 	bge_queue_t *txbuf_queue;
296 
297 	txbuf_queue = srp->txbuf_pop_queue;
298 	mutex_enter(txbuf_queue->lock);
299 	if (txbuf_queue->count == 0) {
300 		mutex_exit(txbuf_queue->lock);
301 		txbuf_queue = srp->txbuf_push_queue;
302 		mutex_enter(txbuf_queue->lock);
303 		if (txbuf_queue->count == 0) {
304 			mutex_exit(txbuf_queue->lock);
305 			/* Try to allocate more tx buffers */
306 			if (srp->tx_array < srp->tx_array_max) {
307 				mutex_enter(srp->tx_lock);
308 				txbuf_item = bge_alloc_txbuf_array(bgep, srp);
309 				mutex_exit(srp->tx_lock);
310 			} else
311 				txbuf_item = NULL;
312 			return (txbuf_item);
313 		}
314 	}
315 	txbuf_item = txbuf_queue->head;
316 	txbuf_queue->head = (bge_queue_item_t *)txbuf_item->next;
317 	txbuf_queue->count--;
318 	mutex_exit(txbuf_queue->lock);
319 	txbuf_item->next = NULL;
320 
321 	return (txbuf_item);
322 }
323 
324 static void bge_send_fill_txbd(send_ring_t *srp, send_pkt_t *pktp);
325 #pragma	inline(bge_send_fill_txbd)
326 
327 static void
328 bge_send_fill_txbd(send_ring_t *srp, send_pkt_t *pktp)
329 {
330 	bge_sbd_t *hw_sbd_p;
331 	sw_sbd_t *ssbdp;
332 	bge_queue_item_t *txbuf_item;
333 	sw_txbuf_t *txbuf;
334 	uint64_t slot;
335 
336 	ASSERT(mutex_owned(srp->tx_lock));
337 
338 	/*
339 	 * Go straight to claiming our already-reserved places
340 	 * on the train!
341 	 */
342 	ASSERT(pktp->txbuf_item != NULL);
343 	txbuf_item = pktp->txbuf_item;
344 	txbuf = txbuf_item->item;
345 	slot = srp->tx_next;
346 	ssbdp = &srp->sw_sbds[slot];
347 	hw_sbd_p = DMA_VPTR(ssbdp->desc);
348 	hw_sbd_p->flags = 0;
349 	ASSERT(txbuf->copy_len != 0);
350 	(void) ddi_dma_sync(txbuf->buf.dma_hdl,  0,
351 	    txbuf->copy_len, DDI_DMA_SYNC_FORDEV);
352 	ASSERT(ssbdp->pbuf == NULL);
353 	ssbdp->pbuf = txbuf_item;
354 	srp->tx_next = NEXT(slot, srp->desc.nslots);
355 	pktp->txbuf_item = NULL;
356 
357 	/*
358 	 * Setting hardware send buffer descriptor
359 	 */
360 	hw_sbd_p->host_buf_addr = txbuf->buf.cookie.dmac_laddress;
361 	hw_sbd_p->len = txbuf->copy_len;
362 	if (pktp->vlan_tci != 0) {
363 		hw_sbd_p->vlan_tci = pktp->vlan_tci;
364 		hw_sbd_p->host_buf_addr += VLAN_TAGSZ;
365 		hw_sbd_p->flags |= SBD_FLAG_VLAN_TAG;
366 	}
367 	if (pktp->pflags & HCK_IPV4_HDRCKSUM)
368 		hw_sbd_p->flags |= SBD_FLAG_IP_CKSUM;
369 	if (pktp->pflags & HCK_FULLCKSUM)
370 		hw_sbd_p->flags |= SBD_FLAG_TCP_UDP_CKSUM;
371 	hw_sbd_p->flags |= SBD_FLAG_PACKET_END;
372 }
373 
374 /*
375  * Send a message by copying it into a preallocated (and premapped) buffer
376  */
377 static void bge_send_copy(bge_t *bgep, sw_txbuf_t *txbuf, mblk_t *mp);
378 #pragma	inline(bge_send_copy)
379 
380 static void
381 bge_send_copy(bge_t *bgep, sw_txbuf_t *txbuf, mblk_t *mp)
382 {
383 	mblk_t *bp;
384 	uint32_t mblen;
385 	char *pbuf;
386 
387 	txbuf->copy_len = 0;
388 	pbuf = DMA_VPTR(txbuf->buf);
389 	for (bp = mp; bp != NULL; bp = bp->b_cont) {
390 		if ((mblen = MBLKL(bp)) == 0)
391 			continue;
392 		ASSERT(txbuf->copy_len + mblen <=
393 		    bgep->chipid.snd_buff_size);
394 		bcopy(bp->b_rptr, pbuf, mblen);
395 		pbuf += mblen;
396 		txbuf->copy_len += mblen;
397 	}
398 }
399 
400 /*
401  * Fill the Tx buffer descriptors and trigger the h/w transmission
402  */
403 static void
404 bge_send_serial(bge_t *bgep, send_ring_t *srp)
405 {
406 	send_pkt_t *pktp;
407 	uint64_t txfill_next;
408 	uint32_t count;
409 	uint32_t tx_next;
410 	sw_sbd_t *ssbdp;
411 	bge_status_t *bsp;
412 
413 	/*
414 	 * Try to hold the tx lock:
415 	 *	If we are in an interrupt context, use mutex_enter() to
416 	 *	ensure quick response for tx in interrupt context;
417 	 *	Otherwise, use mutex_tryenter() to serialize this h/w tx
418 	 *	BD filling and transmission triggering task.
419 	 */
420 	if (servicing_interrupt() != 0)
421 		mutex_enter(srp->tx_lock);
422 	else if (mutex_tryenter(srp->tx_lock) == 0)
423 		return;		/* already in process	*/
424 
425 	bsp = DMA_VPTR(bgep->status_block);
426 	txfill_next = srp->txfill_next;
427 start_tx:
428 	tx_next = srp->tx_next;
429 	ssbdp = &srp->sw_sbds[tx_next];
430 	for (count = 0; count < bgep->param_drain_max; ++count) {
431 		pktp = &srp->pktp[txfill_next];
432 		if (!pktp->tx_ready) {
433 			if (count == 0)
434 				srp->tx_block++;
435 			break;
436 		}
437 
438 		/*
439 		 * If there are no enough BDs: try to recycle more
440 		 */
441 		if (srp->tx_free <= 1)
442 			bge_recycle(bgep, bsp);
443 
444 		/*
445 		 * Reserved required BDs: 1 is enough
446 		 */
447 		if (!bge_atomic_reserve(&srp->tx_free, 1)) {
448 			srp->tx_nobd++;
449 			break;
450 		}
451 
452 		/*
453 		 * Filling the tx BD
454 		 */
455 		bge_send_fill_txbd(srp, pktp);
456 		txfill_next = NEXT(txfill_next, BGE_SEND_BUF_MAX);
457 		pktp->tx_ready = B_FALSE;
458 	}
459 
460 	/*
461 	 * Trigger h/w to start transmission.
462 	 */
463 	if (count != 0) {
464 		bge_atomic_sub64(&srp->tx_flow, count);
465 		if (tx_next + count > srp->desc.nslots) {
466 			(void) ddi_dma_sync(ssbdp->desc.dma_hdl,  0,
467 			    (srp->desc.nslots - tx_next) * sizeof (bge_sbd_t),
468 			    DDI_DMA_SYNC_FORDEV);
469 			count -= srp->desc.nslots - tx_next;
470 			ssbdp = &srp->sw_sbds[0];
471 		}
472 		(void) ddi_dma_sync(ssbdp->desc.dma_hdl,  0,
473 		    count*sizeof (bge_sbd_t), DDI_DMA_SYNC_FORDEV);
474 		bge_mbx_put(bgep, srp->chip_mbx_reg, srp->tx_next);
475 		srp->txfill_next = txfill_next;
476 		bgep->watchdog++;
477 		if (srp->tx_flow != 0 && srp->tx_free > 1)
478 			goto start_tx;
479 	}
480 
481 	mutex_exit(srp->tx_lock);
482 }
483 
484 mblk_t *
485 bge_ring_tx(void *arg, mblk_t *mp)
486 {
487 	send_ring_t *srp = arg;
488 	bge_t *bgep = srp->bgep;
489 	struct ether_vlan_header *ehp;
490 	bge_queue_item_t *txbuf_item;
491 	sw_txbuf_t *txbuf;
492 	send_pkt_t *pktp;
493 	uint64_t pkt_slot;
494 	uint16_t vlan_tci;
495 	uint32_t pflags;
496 	char *pbuf;
497 
498 	ASSERT(mp->b_next == NULL);
499 
500 	/*
501 	 * Get a s/w tx buffer first
502 	 */
503 	txbuf_item = bge_get_txbuf(bgep, srp);
504 	if (txbuf_item == NULL) {
505 		/* no tx buffer available */
506 		srp->tx_nobuf++;
507 		bgep->tx_resched_needed = B_TRUE;
508 		bge_send_serial(bgep, srp);
509 		return (mp);
510 	}
511 
512 	/*
513 	 * Copy all mp fragments to the pkt buffer
514 	 */
515 	txbuf = txbuf_item->item;
516 	bge_send_copy(bgep, txbuf, mp);
517 
518 	/*
519 	 * Determine if the packet is VLAN tagged.
520 	 */
521 	ASSERT(txbuf->copy_len >= sizeof (struct ether_header));
522 	pbuf = DMA_VPTR(txbuf->buf);
523 
524 	ehp = (void *)pbuf;
525 	if (ehp->ether_tpid == htons(ETHERTYPE_VLAN)) {
526 		/* Strip the vlan tag */
527 		vlan_tci = ntohs(ehp->ether_tci);
528 		pbuf = memmove(pbuf + VLAN_TAGSZ, pbuf, 2 * ETHERADDRL);
529 		txbuf->copy_len -= VLAN_TAGSZ;
530 	} else
531 		vlan_tci = 0;
532 
533 	/*
534 	 * Retrieve checksum offloading info.
535 	 */
536 	hcksum_retrieve(mp, NULL, NULL, NULL, NULL, NULL, NULL, &pflags);
537 
538 	/*
539 	 * Calculate pseudo checksum if needed.
540 	 */
541 	if ((pflags & HCK_FULLCKSUM) &&
542 	    (bgep->chipid.flags & CHIP_FLAG_PARTIAL_CSUM))
543 		bge_pseudo_cksum((uint8_t *)pbuf);
544 
545 	/*
546 	 * Packet buffer is ready to send: get and fill pkt info
547 	 */
548 	pkt_slot = bge_atomic_next(&srp->txpkt_next, BGE_SEND_BUF_MAX);
549 	pktp = &srp->pktp[pkt_slot];
550 	ASSERT(pktp->txbuf_item == NULL);
551 	pktp->txbuf_item = txbuf_item;
552 	pktp->vlan_tci = vlan_tci;
553 	pktp->pflags = pflags;
554 	atomic_inc_64(&srp->tx_flow);
555 	ASSERT(pktp->tx_ready == B_FALSE);
556 	pktp->tx_ready = B_TRUE;
557 
558 	/*
559 	 * Filling the h/w bd and trigger the h/w to start transmission
560 	 */
561 	bge_send_serial(bgep, srp);
562 
563 	srp->pushed_bytes += MBLKL(mp);
564 
565 	/*
566 	 * We've copied the contents, the message can be freed right away
567 	 */
568 	freemsg(mp);
569 	return (NULL);
570 }
571 
572 static mblk_t *
573 bge_send(bge_t *bgep, mblk_t *mp)
574 {
575 	send_ring_t *ring;
576 
577 	ring = &bgep->send[0];	/* ring 0 */
578 
579 	return (bge_ring_tx(ring, mp));
580 }
581 
582 uint_t
583 bge_send_drain(caddr_t arg)
584 {
585 	uint_t ring = 0;	/* use ring 0 */
586 	bge_t *bgep;
587 	send_ring_t *srp;
588 
589 	bgep = (void *)arg;
590 	BGE_TRACE(("bge_send_drain($%p)", (void *)bgep));
591 
592 	srp = &bgep->send[ring];
593 	bge_send_serial(bgep, srp);
594 
595 	if (bgep->tx_resched_needed &&
596 	    (srp->tx_flow < srp->tx_buffers_low) &&
597 	    (bgep->bge_mac_state == BGE_MAC_STARTED)) {
598 		mac_tx_update(bgep->mh);
599 		bgep->tx_resched_needed = B_FALSE;
600 		bgep->tx_resched++;
601 	}
602 
603 	return (DDI_INTR_CLAIMED);
604 }
605 
606 /*
607  * bge_m_tx() - send a chain of packets
608  */
609 mblk_t *
610 bge_m_tx(void *arg, mblk_t *mp)
611 {
612 	bge_t *bgep = arg;		/* private device info	*/
613 	mblk_t *next;
614 
615 	BGE_TRACE(("bge_m_tx($%p, $%p)", arg, (void *)mp));
616 
617 	ASSERT(mp != NULL);
618 	ASSERT(bgep->bge_mac_state == BGE_MAC_STARTED);
619 
620 	rw_enter(bgep->errlock, RW_READER);
621 	if (bgep->bge_chip_state != BGE_CHIP_RUNNING) {
622 		BGE_DEBUG(("bge_m_tx: chip not running"));
623 		freemsgchain(mp);
624 		mp = NULL;
625 	}
626 
627 	while (mp != NULL) {
628 		next = mp->b_next;
629 		mp->b_next = NULL;
630 
631 		if ((mp = bge_send(bgep, mp)) != NULL) {
632 			mp->b_next = next;
633 			break;
634 		}
635 
636 		mp = next;
637 	}
638 	rw_exit(bgep->errlock);
639 
640 	return (mp);
641 }
642