xref: /titanic_51/usr/src/uts/common/io/bge/bge_recv2.c (revision 0dc2366f7b9f9f36e10909b1e95edbf2a261c2ac)
162387023Sdduvall /*
262387023Sdduvall  * CDDL HEADER START
362387023Sdduvall  *
462387023Sdduvall  * The contents of this file are subject to the terms of the
562387023Sdduvall  * Common Development and Distribution License (the "License").
662387023Sdduvall  * You may not use this file except in compliance with the License.
762387023Sdduvall  *
862387023Sdduvall  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
962387023Sdduvall  * or http://www.opensolaris.org/os/licensing.
1062387023Sdduvall  * See the License for the specific language governing permissions
1162387023Sdduvall  * and limitations under the License.
1262387023Sdduvall  *
1362387023Sdduvall  * When distributing Covered Code, include this CDDL HEADER in each
1462387023Sdduvall  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
1562387023Sdduvall  * If applicable, add the following below this CDDL HEADER, with the
1662387023Sdduvall  * fields enclosed by brackets "[]" replaced with your own identifying
1762387023Sdduvall  * information: Portions Copyright [yyyy] [name of copyright owner]
1862387023Sdduvall  *
1962387023Sdduvall  * CDDL HEADER END
2062387023Sdduvall  */
2162387023Sdduvall 
2262387023Sdduvall /*
23*0dc2366fSVenugopal Iyer  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
2462387023Sdduvall  * Use is subject to license terms.
2562387023Sdduvall  */
2662387023Sdduvall 
27f724721bSzh199473 #include "bge_impl.h"
2862387023Sdduvall 
2962387023Sdduvall #define	U32TOPTR(x)	((void *)(uintptr_t)(uint32_t)(x))
3062387023Sdduvall #define	PTRTOU32(x)	((uint32_t)(uintptr_t)(void *)(x))
3162387023Sdduvall 
3262387023Sdduvall /*
3362387023Sdduvall  * ========== RX side routines ==========
3462387023Sdduvall  */
3562387023Sdduvall 
3662387023Sdduvall #define	BGE_DBG		BGE_DBG_RECV	/* debug flag for this code	*/
3762387023Sdduvall 
3862387023Sdduvall static void bge_refill(bge_t *bgep, buff_ring_t *brp, sw_rbd_t *srbdp);
3962387023Sdduvall #pragma	inline(bge_refill)
4062387023Sdduvall 
4162387023Sdduvall /*
4262387023Sdduvall  * Return the specified buffer (srbdp) to the ring it came from (brp).
4362387023Sdduvall  *
4462387023Sdduvall  * Note:
4562387023Sdduvall  *	If the driver is compiled with only one buffer ring *and* one
4662387023Sdduvall  *	return ring, then the buffers must be returned in sequence.
4762387023Sdduvall  *	In this case, we don't have to consider anything about the
4862387023Sdduvall  *	buffer at all; we can simply advance the cyclic counter.  And
4962387023Sdduvall  *	we don't even need the refill mutex <rf_lock>, as the caller
5062387023Sdduvall  *	will already be holding the (one-and-only) <rx_lock>.
5162387023Sdduvall  *
5262387023Sdduvall  *	If the driver supports multiple buffer rings, but only one
5362387023Sdduvall  *	return ring, the same still applies (to each buffer ring
5462387023Sdduvall  *	separately).
5562387023Sdduvall  */
5662387023Sdduvall static void
5762387023Sdduvall bge_refill(bge_t *bgep, buff_ring_t *brp, sw_rbd_t *srbdp)
5862387023Sdduvall {
5962387023Sdduvall 	uint64_t slot;
6062387023Sdduvall 
6162387023Sdduvall 	_NOTE(ARGUNUSED(srbdp))
6262387023Sdduvall 
6362387023Sdduvall 	slot = brp->rf_next;
6462387023Sdduvall 	brp->rf_next = NEXT(slot, brp->desc.nslots);
6562387023Sdduvall 	bge_mbx_put(bgep, brp->chip_mbx_reg, slot);
6662387023Sdduvall }
6762387023Sdduvall 
68*0dc2366fSVenugopal Iyer static mblk_t *bge_receive_packet(bge_t *bgep, bge_rbd_t *hw_rbd_p,
69*0dc2366fSVenugopal Iyer     recv_ring_t *rrp);
7062387023Sdduvall #pragma	inline(bge_receive_packet)
7162387023Sdduvall 
7262387023Sdduvall static mblk_t *
73*0dc2366fSVenugopal Iyer bge_receive_packet(bge_t *bgep, bge_rbd_t *hw_rbd_p, recv_ring_t *rrp)
7462387023Sdduvall {
7562387023Sdduvall 	bge_rbd_t hw_rbd;
7662387023Sdduvall 	buff_ring_t *brp;
7762387023Sdduvall 	sw_rbd_t *srbdp;
7862387023Sdduvall 	uchar_t *dp;
7962387023Sdduvall 	mblk_t *mp;
8062387023Sdduvall 	uint_t len;
8162387023Sdduvall 	uint_t minsize;
8262387023Sdduvall 	uint_t maxsize;
8362387023Sdduvall 	uint32_t pflags;
8462387023Sdduvall 
8562387023Sdduvall 	mp = NULL;
8662387023Sdduvall 	hw_rbd = *hw_rbd_p;
8762387023Sdduvall 
8862387023Sdduvall 	switch (hw_rbd.flags & (RBD_FLAG_MINI_RING|RBD_FLAG_JUMBO_RING)) {
8962387023Sdduvall 	case RBD_FLAG_MINI_RING|RBD_FLAG_JUMBO_RING:
9062387023Sdduvall 	default:
9162387023Sdduvall 		/* error, this shouldn't happen */
9262387023Sdduvall 		BGE_PKTDUMP((bgep, &hw_rbd, NULL, "bad ring flags!"));
9362387023Sdduvall 		goto error;
9462387023Sdduvall 
9562387023Sdduvall 	case RBD_FLAG_JUMBO_RING:
9662387023Sdduvall 		brp = &bgep->buff[BGE_JUMBO_BUFF_RING];
9762387023Sdduvall 		break;
9862387023Sdduvall 
9962387023Sdduvall #if	(BGE_BUFF_RINGS_USED > 2)
10062387023Sdduvall 	case RBD_FLAG_MINI_RING:
10162387023Sdduvall 		brp = &bgep->buff[BGE_MINI_BUFF_RING];
10262387023Sdduvall 		break;
10362387023Sdduvall #endif	/* BGE_BUFF_RINGS_USED > 2 */
10462387023Sdduvall 
10562387023Sdduvall 	case 0:
10662387023Sdduvall 		brp = &bgep->buff[BGE_STD_BUFF_RING];
10762387023Sdduvall 		break;
10862387023Sdduvall 	}
10962387023Sdduvall 
11062387023Sdduvall 	if (hw_rbd.index >= brp->desc.nslots) {
11162387023Sdduvall 		/* error, this shouldn't happen */
11262387023Sdduvall 		BGE_PKTDUMP((bgep, &hw_rbd, NULL, "bad ring index!"));
11362387023Sdduvall 		goto error;
11462387023Sdduvall 	}
11562387023Sdduvall 
11662387023Sdduvall 	srbdp = &brp->sw_rbds[hw_rbd.index];
11762387023Sdduvall 	if (hw_rbd.opaque != srbdp->pbuf.token) {
11862387023Sdduvall 		/* bogus, drop the packet */
11962387023Sdduvall 		BGE_PKTDUMP((bgep, &hw_rbd, srbdp, "bad ring token"));
12062387023Sdduvall 		goto refill;
12162387023Sdduvall 	}
12262387023Sdduvall 
12362387023Sdduvall 	if ((hw_rbd.flags & RBD_FLAG_PACKET_END) == 0) {
12462387023Sdduvall 		/* bogus, drop the packet */
12562387023Sdduvall 		BGE_PKTDUMP((bgep, &hw_rbd, srbdp, "unterminated packet"));
12662387023Sdduvall 		goto refill;
12762387023Sdduvall 	}
12862387023Sdduvall 
12962387023Sdduvall 	if (hw_rbd.flags & RBD_FLAG_FRAME_HAS_ERROR) {
13062387023Sdduvall 		/* bogus, drop the packet */
13162387023Sdduvall 		BGE_PKTDUMP((bgep, &hw_rbd, srbdp, "errored packet"));
13262387023Sdduvall 		goto refill;
13362387023Sdduvall 	}
13462387023Sdduvall 
13562387023Sdduvall 	len = hw_rbd.len;
13662387023Sdduvall 
13767f02347Srandyf #ifdef BGE_IPMI_ASF
13862387023Sdduvall 	/*
13967f02347Srandyf 	 * When IPMI/ASF is enabled, VLAN tag must be stripped.
14067f02347Srandyf 	 */
14167f02347Srandyf 	if (bgep->asf_enabled && (hw_rbd.flags & RBD_FLAG_VLAN_TAG))
14267f02347Srandyf 		maxsize = bgep->chipid.ethmax_size + ETHERFCSL;
14367f02347Srandyf 	else
14467f02347Srandyf #endif
14567f02347Srandyf 		/*
14667f02347Srandyf 		 * H/W will not strip the VLAN tag from incoming packet
14767f02347Srandyf 		 * now, as RECEIVE_MODE_KEEP_VLAN_TAG bit is set in
14867f02347Srandyf 		 * RECEIVE_MAC_MODE_REG register.
14962387023Sdduvall 		 */
15062387023Sdduvall 		maxsize = bgep->chipid.ethmax_size + VLAN_TAGSZ + ETHERFCSL;
15162387023Sdduvall 	if (len > maxsize) {
15262387023Sdduvall 		/* bogus, drop the packet */
15362387023Sdduvall 		BGE_PKTDUMP((bgep, &hw_rbd, srbdp, "oversize packet"));
15462387023Sdduvall 		goto refill;
15562387023Sdduvall 	}
15662387023Sdduvall 
15767f02347Srandyf #ifdef BGE_IPMI_ASF
15867f02347Srandyf 	if (bgep->asf_enabled && (hw_rbd.flags & RBD_FLAG_VLAN_TAG))
15967f02347Srandyf 		minsize = ETHERMIN + ETHERFCSL - VLAN_TAGSZ;
16067f02347Srandyf 	else
16167f02347Srandyf #endif
16262387023Sdduvall 		minsize = ETHERMIN + ETHERFCSL;
16362387023Sdduvall 	if (len < minsize) {
16462387023Sdduvall 		/* bogus, drop the packet */
16562387023Sdduvall 		BGE_PKTDUMP((bgep, &hw_rbd, srbdp, "undersize packet"));
16662387023Sdduvall 		goto refill;
16762387023Sdduvall 	}
16862387023Sdduvall 
16962387023Sdduvall 	/*
17062387023Sdduvall 	 * Packet looks good; get a buffer to copy it into.
17162387023Sdduvall 	 * We want to leave some space at the front of the allocated
17262387023Sdduvall 	 * buffer in case any upstream modules want to prepend some
17362387023Sdduvall 	 * sort of header.  This also has the side-effect of making
17462387023Sdduvall 	 * the packet *contents* 4-byte aligned, as required by NCA!
17562387023Sdduvall 	 */
17667f02347Srandyf #ifdef BGE_IPMI_ASF
17767f02347Srandyf 	if (bgep->asf_enabled && (hw_rbd.flags & RBD_FLAG_VLAN_TAG)) {
17867f02347Srandyf 		mp = allocb(BGE_HEADROOM + len + VLAN_TAGSZ, 0);
17967f02347Srandyf 	} else {
18067f02347Srandyf #endif
18167f02347Srandyf 
18262387023Sdduvall 		mp = allocb(BGE_HEADROOM + len, 0);
18367f02347Srandyf #ifdef BGE_IPMI_ASF
18467f02347Srandyf 	}
18567f02347Srandyf #endif
18662387023Sdduvall 	if (mp == NULL) {
18762387023Sdduvall 		/* Nothing to do but drop the packet */
18862387023Sdduvall 		goto refill;
18962387023Sdduvall 	}
19062387023Sdduvall 
19162387023Sdduvall 	/*
19262387023Sdduvall 	 * Sync the data and copy it to the STREAMS buffer.
19362387023Sdduvall 	 */
19462387023Sdduvall 	DMA_SYNC(srbdp->pbuf, DDI_DMA_SYNC_FORKERNEL);
19500d0963fSdilpreet 	if (bge_check_dma_handle(bgep, srbdp->pbuf.dma_hdl) != DDI_FM_OK) {
19600d0963fSdilpreet 		bgep->bge_dma_error = B_TRUE;
19700d0963fSdilpreet 		bgep->bge_chip_state = BGE_CHIP_ERROR;
19800d0963fSdilpreet 		return (NULL);
19900d0963fSdilpreet 	}
20067f02347Srandyf #ifdef BGE_IPMI_ASF
20167f02347Srandyf 	if (bgep->asf_enabled && (hw_rbd.flags & RBD_FLAG_VLAN_TAG)) {
20267f02347Srandyf 		/*
20367f02347Srandyf 		 * As VLAN tag has been stripped from incoming packet in ASF
20467f02347Srandyf 		 * scenario, we insert it into this packet again.
20567f02347Srandyf 		 */
20667f02347Srandyf 		struct ether_vlan_header *ehp;
20767f02347Srandyf 		mp->b_rptr = dp = mp->b_rptr + BGE_HEADROOM - VLAN_TAGSZ;
20867f02347Srandyf 		bcopy(DMA_VPTR(srbdp->pbuf), dp, 2 * ETHERADDRL);
2094a06b59fSyt223700 		ehp = (void *)dp;
210605445d5Sdg199075 		ehp->ether_tpid = ntohs(ETHERTYPE_VLAN);
21167f02347Srandyf 		ehp->ether_tci = ntohs(hw_rbd.vlan_tci);
21267f02347Srandyf 		bcopy(((uchar_t *)(DMA_VPTR(srbdp->pbuf))) + 2 * ETHERADDRL,
21367f02347Srandyf 		    dp + 2 * ETHERADDRL + VLAN_TAGSZ,
21467f02347Srandyf 		    len - 2 * ETHERADDRL);
21567f02347Srandyf 	} else {
21667f02347Srandyf #endif
21762387023Sdduvall 		mp->b_rptr = dp = mp->b_rptr + BGE_HEADROOM;
21862387023Sdduvall 		bcopy(DMA_VPTR(srbdp->pbuf), dp, len);
21967f02347Srandyf #ifdef BGE_IPMI_ASF
22067f02347Srandyf 	}
22167f02347Srandyf 
22267f02347Srandyf 	if (bgep->asf_enabled && (hw_rbd.flags & RBD_FLAG_VLAN_TAG)) {
22367f02347Srandyf 		mp->b_wptr = dp + len + VLAN_TAGSZ - ETHERFCSL;
22467f02347Srandyf 	} else
22567f02347Srandyf #endif
22662387023Sdduvall 		mp->b_wptr = dp + len - ETHERFCSL;
22762387023Sdduvall 
22862387023Sdduvall 	/*
22962387023Sdduvall 	 * Special check for one specific type of data corruption;
23062387023Sdduvall 	 * in a good packet, the first 8 bytes are *very* unlikely
23162387023Sdduvall 	 * to be the same as the second 8 bytes ... but we let the
23262387023Sdduvall 	 * packet through just in case.
23362387023Sdduvall 	 */
23462387023Sdduvall 	if (bcmp(dp, dp+8, 8) == 0)
23562387023Sdduvall 		BGE_PKTDUMP((bgep, &hw_rbd, srbdp, "stuttered packet?"));
23662387023Sdduvall 
23762387023Sdduvall 	pflags = 0;
23862387023Sdduvall 	if (hw_rbd.flags & RBD_FLAG_TCP_UDP_CHECKSUM)
23962387023Sdduvall 		pflags |= HCK_FULLCKSUM;
24062387023Sdduvall 	if (hw_rbd.flags & RBD_FLAG_IP_CHECKSUM)
241*0dc2366fSVenugopal Iyer 		pflags |= HCK_IPV4_HDRCKSUM_OK;
24262387023Sdduvall 	if (pflags != 0)
243*0dc2366fSVenugopal Iyer 		mac_hcksum_set(mp, 0, 0, 0, hw_rbd.tcp_udp_cksum, pflags);
244*0dc2366fSVenugopal Iyer 
245*0dc2366fSVenugopal Iyer 	/* Update per-ring rx statistics */
246*0dc2366fSVenugopal Iyer 	rrp->rx_pkts++;
247*0dc2366fSVenugopal Iyer 	rrp->rx_bytes += len;
24862387023Sdduvall 
24962387023Sdduvall refill:
25062387023Sdduvall 	/*
25162387023Sdduvall 	 * Replace the buffer in the ring it came from ...
25262387023Sdduvall 	 */
25362387023Sdduvall 	bge_refill(bgep, brp, srbdp);
25462387023Sdduvall 	return (mp);
25562387023Sdduvall 
25662387023Sdduvall error:
25762387023Sdduvall 	/*
25862387023Sdduvall 	 * We come here if the integrity of the ring descriptors
25962387023Sdduvall 	 * (rather than merely packet data) appears corrupted.
26062387023Sdduvall 	 * The factotum will attempt to reset-and-recover.
26162387023Sdduvall 	 */
26262387023Sdduvall 	bgep->bge_chip_state = BGE_CHIP_ERROR;
26300d0963fSdilpreet 	bge_fm_ereport(bgep, DDI_FM_DEVICE_INVAL_STATE);
26462387023Sdduvall 	return (NULL);
26562387023Sdduvall }
26662387023Sdduvall 
26762387023Sdduvall /*
26862387023Sdduvall  * Accept the packets received in the specified ring up to
26962387023Sdduvall  * (but not including) the producer index in the status block.
27062387023Sdduvall  *
27162387023Sdduvall  * Returns a chain of mblks containing the received data, to be
27262387023Sdduvall  * passed up to gld_recv() (we can't call gld_recv() from here,
27362387023Sdduvall  * 'cos we're holding the per-ring receive lock at this point).
27462387023Sdduvall  *
27562387023Sdduvall  * This function must advance (rrp->rx_next) and write it back to
27662387023Sdduvall  * the chip to indicate the packets it has accepted from the ring.
27762387023Sdduvall  */
27862387023Sdduvall static mblk_t *bge_receive_ring(bge_t *bgep, recv_ring_t *rrp);
279da14cebeSEric Cheng #ifndef	DEBUG
28062387023Sdduvall #pragma	inline(bge_receive_ring)
281da14cebeSEric Cheng #endif
28262387023Sdduvall 
28362387023Sdduvall static mblk_t *
28462387023Sdduvall bge_receive_ring(bge_t *bgep, recv_ring_t *rrp)
28562387023Sdduvall {
28662387023Sdduvall 	bge_rbd_t *hw_rbd_p;
28762387023Sdduvall 	uint64_t slot;
28862387023Sdduvall 	mblk_t *head;
28962387023Sdduvall 	mblk_t **tail;
29062387023Sdduvall 	mblk_t *mp;
291a4de4ba2Sml149210 	int recv_cnt = 0;
29262387023Sdduvall 
29362387023Sdduvall 	ASSERT(mutex_owned(rrp->rx_lock));
29462387023Sdduvall 
29562387023Sdduvall 	/*
29662387023Sdduvall 	 * Sync (all) the receive ring descriptors
29762387023Sdduvall 	 * before accepting the packets they describe
29862387023Sdduvall 	 */
29962387023Sdduvall 	DMA_SYNC(rrp->desc, DDI_DMA_SYNC_FORKERNEL);
30000d0963fSdilpreet 	if (*rrp->prod_index_p >= rrp->desc.nslots) {
30100d0963fSdilpreet 		bgep->bge_chip_state = BGE_CHIP_ERROR;
30200d0963fSdilpreet 		bge_fm_ereport(bgep, DDI_FM_DEVICE_INVAL_STATE);
30300d0963fSdilpreet 		return (NULL);
30400d0963fSdilpreet 	}
30500d0963fSdilpreet 	if (bge_check_dma_handle(bgep, rrp->desc.dma_hdl) != DDI_FM_OK) {
30600d0963fSdilpreet 		rrp->rx_next = *rrp->prod_index_p;
30700d0963fSdilpreet 		bge_mbx_put(bgep, rrp->chip_mbx_reg, rrp->rx_next);
30800d0963fSdilpreet 		bgep->bge_dma_error = B_TRUE;
30900d0963fSdilpreet 		bgep->bge_chip_state = BGE_CHIP_ERROR;
31000d0963fSdilpreet 		return (NULL);
31100d0963fSdilpreet 	}
31200d0963fSdilpreet 
31362387023Sdduvall 	hw_rbd_p = DMA_VPTR(rrp->desc);
31462387023Sdduvall 	head = NULL;
31562387023Sdduvall 	tail = &head;
31662387023Sdduvall 	slot = rrp->rx_next;
31762387023Sdduvall 
318a4de4ba2Sml149210 	while ((slot != *rrp->prod_index_p) && /* Note: volatile	*/
319a4de4ba2Sml149210 	    (recv_cnt < BGE_MAXPKT_RCVED)) {
320*0dc2366fSVenugopal Iyer 		if ((mp = bge_receive_packet(bgep, &hw_rbd_p[slot], rrp))
321*0dc2366fSVenugopal Iyer 		    != NULL) {
32262387023Sdduvall 			*tail = mp;
32362387023Sdduvall 			tail = &mp->b_next;
324a4de4ba2Sml149210 			recv_cnt++;
32562387023Sdduvall 		}
32662387023Sdduvall 		rrp->rx_next = slot = NEXT(slot, rrp->desc.nslots);
32762387023Sdduvall 	}
32862387023Sdduvall 
32962387023Sdduvall 	bge_mbx_put(bgep, rrp->chip_mbx_reg, rrp->rx_next);
33000d0963fSdilpreet 	if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK)
33100d0963fSdilpreet 		bgep->bge_chip_state = BGE_CHIP_ERROR;
33262387023Sdduvall 	return (head);
33362387023Sdduvall }
33462387023Sdduvall 
33562387023Sdduvall /*
336da14cebeSEric Cheng  * XXX: Poll a particular ring. The implementation is incomplete.
337da14cebeSEric Cheng  * Once the ring interrupts are disabled, we need to do bge_recyle()
338da14cebeSEric Cheng  * for the ring as well and re enable the ring interrupt automatically
339da14cebeSEric Cheng  * if the poll doesn't find any packets in the ring. We need to
340da14cebeSEric Cheng  * have MSI-X interrupts support for this.
341da14cebeSEric Cheng  *
342da14cebeSEric Cheng  * The basic poll policy is that rings that are dealing with explicit
343da14cebeSEric Cheng  * flows (like TCP or some service) and are marked as such should
344da14cebeSEric Cheng  * have their own MSI-X interrupt per ring. bge_intr() should leave
345da14cebeSEric Cheng  * that interrupt disabled after an upcall. The ring is in poll mode.
346da14cebeSEric Cheng  * When a poll thread comes down and finds nothing, the MSI-X interrupt
347da14cebeSEric Cheng  * is automatically enabled. Squeue needs to deal with the race of
348da14cebeSEric Cheng  * a new interrupt firing and reaching before poll thread returns.
349da14cebeSEric Cheng  */
350da14cebeSEric Cheng mblk_t *
351da14cebeSEric Cheng bge_poll_ring(void *arg, int bytes_to_pickup)
352da14cebeSEric Cheng {
353da14cebeSEric Cheng 	recv_ring_t *rrp = arg;
354da14cebeSEric Cheng 	bge_t *bgep = rrp->bgep;
355da14cebeSEric Cheng 	bge_rbd_t *hw_rbd_p;
356da14cebeSEric Cheng 	uint64_t slot;
357da14cebeSEric Cheng 	mblk_t *head;
358da14cebeSEric Cheng 	mblk_t **tail;
359da14cebeSEric Cheng 	mblk_t *mp;
360da14cebeSEric Cheng 	size_t sz = 0;
361da14cebeSEric Cheng 
362da14cebeSEric Cheng 	mutex_enter(rrp->rx_lock);
363da14cebeSEric Cheng 
364da14cebeSEric Cheng 	/*
365da14cebeSEric Cheng 	 * Sync (all) the receive ring descriptors
366da14cebeSEric Cheng 	 * before accepting the packets they describe
367da14cebeSEric Cheng 	 */
368da14cebeSEric Cheng 	DMA_SYNC(rrp->desc, DDI_DMA_SYNC_FORKERNEL);
369837c1ac4SStephen Hanson 	if (*rrp->prod_index_p >= rrp->desc.nslots) {
370837c1ac4SStephen Hanson 		bgep->bge_chip_state = BGE_CHIP_ERROR;
371837c1ac4SStephen Hanson 		bge_fm_ereport(bgep, DDI_FM_DEVICE_INVAL_STATE);
372837c1ac4SStephen Hanson 		mutex_exit(rrp->rx_lock);
373837c1ac4SStephen Hanson 		return (NULL);
374837c1ac4SStephen Hanson 	}
375837c1ac4SStephen Hanson 	if (bge_check_dma_handle(bgep, rrp->desc.dma_hdl) != DDI_FM_OK) {
376837c1ac4SStephen Hanson 		rrp->rx_next = *rrp->prod_index_p;
377837c1ac4SStephen Hanson 		bge_mbx_put(bgep, rrp->chip_mbx_reg, rrp->rx_next);
378837c1ac4SStephen Hanson 		bgep->bge_dma_error = B_TRUE;
379837c1ac4SStephen Hanson 		bgep->bge_chip_state = BGE_CHIP_ERROR;
380837c1ac4SStephen Hanson 		mutex_exit(rrp->rx_lock);
381837c1ac4SStephen Hanson 		return (NULL);
382837c1ac4SStephen Hanson 	}
383837c1ac4SStephen Hanson 
384da14cebeSEric Cheng 	hw_rbd_p = DMA_VPTR(rrp->desc);
385da14cebeSEric Cheng 	head = NULL;
386da14cebeSEric Cheng 	tail = &head;
387da14cebeSEric Cheng 	slot = rrp->rx_next;
388da14cebeSEric Cheng 
389da14cebeSEric Cheng 	/* Note: volatile */
390da14cebeSEric Cheng 	while ((slot != *rrp->prod_index_p) && (sz <= bytes_to_pickup)) {
391*0dc2366fSVenugopal Iyer 		if ((mp = bge_receive_packet(bgep, &hw_rbd_p[slot], rrp))
392*0dc2366fSVenugopal Iyer 		    != NULL) {
393da14cebeSEric Cheng 			*tail = mp;
394da14cebeSEric Cheng 			sz += msgdsize(mp);
395da14cebeSEric Cheng 			tail = &mp->b_next;
396da14cebeSEric Cheng 		}
397da14cebeSEric Cheng 		rrp->rx_next = slot = NEXT(slot, rrp->desc.nslots);
398da14cebeSEric Cheng 	}
399da14cebeSEric Cheng 
400da14cebeSEric Cheng 	bge_mbx_put(bgep, rrp->chip_mbx_reg, rrp->rx_next);
401837c1ac4SStephen Hanson 	if (bge_check_acc_handle(bgep, bgep->io_handle) != DDI_FM_OK)
402837c1ac4SStephen Hanson 		bgep->bge_chip_state = BGE_CHIP_ERROR;
403da14cebeSEric Cheng 	mutex_exit(rrp->rx_lock);
404da14cebeSEric Cheng 	return (head);
405da14cebeSEric Cheng }
406da14cebeSEric Cheng 
407da14cebeSEric Cheng /*
40862387023Sdduvall  * Receive all packets in all rings.
40962387023Sdduvall  */
41062387023Sdduvall void bge_receive(bge_t *bgep, bge_status_t *bsp);
41162387023Sdduvall #pragma	no_inline(bge_receive)
41262387023Sdduvall 
41362387023Sdduvall void
41462387023Sdduvall bge_receive(bge_t *bgep, bge_status_t *bsp)
41562387023Sdduvall {
41662387023Sdduvall 	recv_ring_t *rrp;
417da14cebeSEric Cheng 	uint64_t index;
41862387023Sdduvall 	mblk_t *mp;
41962387023Sdduvall 
420da14cebeSEric Cheng 	for (index = 0; index < bgep->chipid.rx_rings; index++) {
421da14cebeSEric Cheng 		/*
422da14cebeSEric Cheng 		 * Start from the first ring.
423da14cebeSEric Cheng 		 */
424da14cebeSEric Cheng 		rrp = &bgep->recv[index];
425da14cebeSEric Cheng 
42662387023Sdduvall 		/*
42762387023Sdduvall 		 * For each ring, (rrp->prod_index_p) points to the
42862387023Sdduvall 		 * proper index within the status block (which has
42962387023Sdduvall 		 * already been sync'd by the caller)
43062387023Sdduvall 		 */
431da14cebeSEric Cheng 		ASSERT(rrp->prod_index_p == RECV_INDEX_P(bsp, index));
43262387023Sdduvall 
433da14cebeSEric Cheng 		if (*rrp->prod_index_p == rrp->rx_next || rrp->poll_flag)
43462387023Sdduvall 			continue;		/* no packets		*/
43562387023Sdduvall 		if (mutex_tryenter(rrp->rx_lock) == 0)
43662387023Sdduvall 			continue;		/* already in process	*/
43762387023Sdduvall 		mp = bge_receive_ring(bgep, rrp);
43862387023Sdduvall 		mutex_exit(rrp->rx_lock);
43962387023Sdduvall 
440da14cebeSEric Cheng 		if (mp != NULL)
441da14cebeSEric Cheng 			mac_rx_ring(bgep->mh, rrp->ring_handle, mp,
442da14cebeSEric Cheng 			    rrp->ring_gen_num);
44362387023Sdduvall 	}
44462387023Sdduvall }
445