xref: /titanic_41/usr/src/uts/common/io/sfe/sfe_util.c (revision 0dc2366f7b9f9f36e10909b1e95edbf2a261c2ac)
1f8919bdaSduboff /*
2f8919bdaSduboff  * sfe_util.c: general ethernet mac driver framework version 2.6
3f8919bdaSduboff  *
423d366e3Sduboff  * Copyright (c) 2002-2008 Masayuki Murayama.  All rights reserved.
5f8919bdaSduboff  *
6f8919bdaSduboff  * Redistribution and use in source and binary forms, with or without
7f8919bdaSduboff  * modification, are permitted provided that the following conditions are met:
8f8919bdaSduboff  *
9f8919bdaSduboff  * 1. Redistributions of source code must retain the above copyright notice,
10f8919bdaSduboff  *    this list of conditions and the following disclaimer.
11f8919bdaSduboff  *
12f8919bdaSduboff  * 2. Redistributions in binary form must reproduce the above copyright notice,
13f8919bdaSduboff  *    this list of conditions and the following disclaimer in the documentation
14f8919bdaSduboff  *    and/or other materials provided with the distribution.
15f8919bdaSduboff  *
16f8919bdaSduboff  * 3. Neither the name of the author nor the names of its contributors may be
17f8919bdaSduboff  *    used to endorse or promote products derived from this software without
18f8919bdaSduboff  *    specific prior written permission.
19f8919bdaSduboff  *
20f8919bdaSduboff  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21f8919bdaSduboff  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22f8919bdaSduboff  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23f8919bdaSduboff  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24f8919bdaSduboff  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25f8919bdaSduboff  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26f8919bdaSduboff  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
27f8919bdaSduboff  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28f8919bdaSduboff  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29f8919bdaSduboff  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30f8919bdaSduboff  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
31f8919bdaSduboff  * DAMAGE.
32f8919bdaSduboff  */
33f8919bdaSduboff 
34f8919bdaSduboff /*
35*0dc2366fSVenugopal Iyer  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
36da14cebeSEric Cheng  * Use is subject to license terms.
37da14cebeSEric Cheng  */
38da14cebeSEric Cheng 
39da14cebeSEric Cheng /*
40f8919bdaSduboff  * System Header files.
41f8919bdaSduboff  */
42f8919bdaSduboff #include <sys/types.h>
43f8919bdaSduboff #include <sys/conf.h>
44f8919bdaSduboff #include <sys/debug.h>
45f8919bdaSduboff #include <sys/kmem.h>
46f8919bdaSduboff #include <sys/vtrace.h>
47f8919bdaSduboff #include <sys/ethernet.h>
48f8919bdaSduboff #include <sys/modctl.h>
49f8919bdaSduboff #include <sys/errno.h>
50f8919bdaSduboff #include <sys/ddi.h>
51f8919bdaSduboff #include <sys/sunddi.h>
52f8919bdaSduboff #include <sys/stream.h>		/* required for MBLK* */
53f8919bdaSduboff #include <sys/strsun.h>		/* required for mionack() */
54f8919bdaSduboff #include <sys/byteorder.h>
55f8919bdaSduboff #include <sys/pci.h>
56f8919bdaSduboff #include <inet/common.h>
57f8919bdaSduboff #include <inet/led.h>
58f8919bdaSduboff #include <inet/mi.h>
59f8919bdaSduboff #include <inet/nd.h>
60f8919bdaSduboff #include <sys/crc32.h>
61f8919bdaSduboff 
62f8919bdaSduboff #include <sys/note.h>
63f8919bdaSduboff 
64f8919bdaSduboff #include "sfe_mii.h"
65f8919bdaSduboff #include "sfe_util.h"
66f8919bdaSduboff 
67f8919bdaSduboff 
68f8919bdaSduboff 
69f8919bdaSduboff extern char ident[];
70f8919bdaSduboff 
71f8919bdaSduboff /* Debugging support */
72f8919bdaSduboff #ifdef GEM_DEBUG_LEVEL
73f8919bdaSduboff static int gem_debug = GEM_DEBUG_LEVEL;
74f8919bdaSduboff #define	DPRINTF(n, args)	if (gem_debug > (n)) cmn_err args
75f8919bdaSduboff #else
76f8919bdaSduboff #define	DPRINTF(n, args)
77f8919bdaSduboff #undef ASSERT
78f8919bdaSduboff #define	ASSERT(x)
79f8919bdaSduboff #endif
80f8919bdaSduboff 
81f8919bdaSduboff #define	IOC_LINESIZE	0x40	/* Is it right for amd64? */
82f8919bdaSduboff 
83f8919bdaSduboff /*
84f8919bdaSduboff  * Useful macros and typedefs
85f8919bdaSduboff  */
86f8919bdaSduboff #define	ROUNDUP(x, a)	(((x) + (a) - 1) & ~((a) - 1))
87f8919bdaSduboff 
88f8919bdaSduboff #define	GET_NET16(p)	((((uint8_t *)(p))[0] << 8)| ((uint8_t *)(p))[1])
89f8919bdaSduboff #define	GET_ETHERTYPE(p)	GET_NET16(((uint8_t *)(p)) + ETHERADDRL*2)
90f8919bdaSduboff 
91f8919bdaSduboff #define	GET_IPTYPEv4(p)	(((uint8_t *)(p))[sizeof (struct ether_header) + 9])
92f8919bdaSduboff #define	GET_IPTYPEv6(p)	(((uint8_t *)(p))[sizeof (struct ether_header) + 6])
93f8919bdaSduboff 
94f8919bdaSduboff 
95f8919bdaSduboff #ifndef INT32_MAX
96f8919bdaSduboff #define	INT32_MAX	0x7fffffff
97f8919bdaSduboff #endif
98f8919bdaSduboff 
99f8919bdaSduboff #define	VTAG_OFF	(ETHERADDRL*2)
100f8919bdaSduboff #ifndef VTAG_SIZE
101f8919bdaSduboff #define	VTAG_SIZE	4
102f8919bdaSduboff #endif
103f8919bdaSduboff #ifndef VTAG_TPID
104f8919bdaSduboff #define	VTAG_TPID	0x8100U
105f8919bdaSduboff #endif
106f8919bdaSduboff 
107f8919bdaSduboff #define	GET_TXBUF(dp, sn)	\
108f8919bdaSduboff 	&(dp)->tx_buf[SLOT((dp)->tx_slots_base + (sn), (dp)->gc.gc_tx_buf_size)]
109f8919bdaSduboff 
110f8919bdaSduboff #ifndef offsetof
111f8919bdaSduboff #define	offsetof(t, m)	((long)&(((t *) 0)->m))
112f8919bdaSduboff #endif
113f8919bdaSduboff #define	TXFLAG_VTAG(flag)	\
114f8919bdaSduboff 	(((flag) & GEM_TXFLAG_VTAG) >> GEM_TXFLAG_VTAG_SHIFT)
115f8919bdaSduboff 
116f8919bdaSduboff #define	MAXPKTBUF(dp)	\
117f8919bdaSduboff 	((dp)->mtu + sizeof (struct ether_header) + VTAG_SIZE + ETHERFCSL)
118f8919bdaSduboff 
119f8919bdaSduboff #define	WATCH_INTERVAL_FAST	drv_usectohz(100*1000)	/* 100mS */
12023d366e3Sduboff #define	BOOLEAN(x)	((x) != 0)
121f8919bdaSduboff 
122f8919bdaSduboff /*
123f8919bdaSduboff  * Macros to distinct chip generation.
124f8919bdaSduboff  */
125f8919bdaSduboff 
126f8919bdaSduboff /*
127f8919bdaSduboff  * Private functions
128f8919bdaSduboff  */
129f8919bdaSduboff static void gem_mii_start(struct gem_dev *);
130f8919bdaSduboff static void gem_mii_stop(struct gem_dev *);
131f8919bdaSduboff 
132f8919bdaSduboff /* local buffer management */
133f8919bdaSduboff static void gem_nd_setup(struct gem_dev *dp);
134f8919bdaSduboff static void gem_nd_cleanup(struct gem_dev *dp);
135f8919bdaSduboff static int gem_alloc_memory(struct gem_dev *);
136f8919bdaSduboff static void gem_free_memory(struct gem_dev *);
137f8919bdaSduboff static void gem_init_rx_ring(struct gem_dev *);
138f8919bdaSduboff static void gem_init_tx_ring(struct gem_dev *);
139f8919bdaSduboff __INLINE__ static void gem_append_rxbuf(struct gem_dev *, struct rxbuf *);
140f8919bdaSduboff 
141f8919bdaSduboff static void gem_tx_timeout(struct gem_dev *);
142f8919bdaSduboff static void gem_mii_link_watcher(struct gem_dev *dp);
143f8919bdaSduboff static int gem_mac_init(struct gem_dev *dp);
144f8919bdaSduboff static int gem_mac_start(struct gem_dev *dp);
145f8919bdaSduboff static int gem_mac_stop(struct gem_dev *dp, uint_t flags);
146f8919bdaSduboff static void gem_mac_ioctl(struct gem_dev *dp, queue_t *wq, mblk_t *mp);
147f8919bdaSduboff 
148f8919bdaSduboff static	struct ether_addr	gem_etherbroadcastaddr = {
149f8919bdaSduboff 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff
150f8919bdaSduboff };
151f8919bdaSduboff 
152f8919bdaSduboff int gem_speed_value[] = {10, 100, 1000};
153f8919bdaSduboff 
154f8919bdaSduboff /* ============================================================== */
155f8919bdaSduboff /*
156f8919bdaSduboff  * Misc runtime routines
157f8919bdaSduboff  */
158f8919bdaSduboff /* ============================================================== */
159f8919bdaSduboff /*
160f8919bdaSduboff  * Ether CRC calculation according to 21143 data sheet
161f8919bdaSduboff  */
162f8919bdaSduboff uint32_t
gem_ether_crc_le(const uint8_t * addr,int len)163f8919bdaSduboff gem_ether_crc_le(const uint8_t *addr, int len)
164f8919bdaSduboff {
165f8919bdaSduboff 	uint32_t	crc;
166f8919bdaSduboff 
167f8919bdaSduboff 	CRC32(crc, addr, ETHERADDRL, 0xffffffffU, crc32_table);
168f8919bdaSduboff 	return (crc);
169f8919bdaSduboff }
170f8919bdaSduboff 
171f8919bdaSduboff uint32_t
gem_ether_crc_be(const uint8_t * addr,int len)172f8919bdaSduboff gem_ether_crc_be(const uint8_t *addr, int len)
173f8919bdaSduboff {
174f8919bdaSduboff 	int		idx;
175f8919bdaSduboff 	int		bit;
176f8919bdaSduboff 	uint_t		data;
177f8919bdaSduboff 	uint32_t	crc;
178f8919bdaSduboff #define	CRC32_POLY_BE	0x04c11db7
179f8919bdaSduboff 
180f8919bdaSduboff 	crc = 0xffffffff;
181f8919bdaSduboff 	for (idx = 0; idx < len; idx++) {
182f8919bdaSduboff 		for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) {
183f8919bdaSduboff 			crc = (crc << 1)
184f8919bdaSduboff 			    ^ ((((crc >> 31) ^ data) & 1) ? CRC32_POLY_BE : 0);
185f8919bdaSduboff 		}
186f8919bdaSduboff 	}
187f8919bdaSduboff 	return (crc);
188f8919bdaSduboff #undef	CRC32_POLY_BE
189f8919bdaSduboff }
190f8919bdaSduboff 
191f8919bdaSduboff int
gem_prop_get_int(struct gem_dev * dp,char * prop_template,int def_val)192f8919bdaSduboff gem_prop_get_int(struct gem_dev *dp, char *prop_template, int def_val)
193f8919bdaSduboff {
194f8919bdaSduboff 	char	propname[32];
195f8919bdaSduboff 
196f8919bdaSduboff 	(void) sprintf(propname, prop_template, dp->name);
197f8919bdaSduboff 
198f8919bdaSduboff 	return (ddi_prop_get_int(DDI_DEV_T_ANY, dp->dip,
199f8919bdaSduboff 	    DDI_PROP_DONTPASS, propname, def_val));
200f8919bdaSduboff }
201f8919bdaSduboff 
202f8919bdaSduboff static int
gem_population(uint32_t x)203f8919bdaSduboff gem_population(uint32_t x)
204f8919bdaSduboff {
205f8919bdaSduboff 	int	i;
206f8919bdaSduboff 	int	cnt;
207f8919bdaSduboff 
208f8919bdaSduboff 	cnt = 0;
209f8919bdaSduboff 	for (i = 0; i < 32; i++) {
210f8919bdaSduboff 		if (x & (1 << i)) {
211f8919bdaSduboff 			cnt++;
212f8919bdaSduboff 		}
213f8919bdaSduboff 	}
214f8919bdaSduboff 	return (cnt);
215f8919bdaSduboff }
216f8919bdaSduboff 
21723d366e3Sduboff #ifdef GEM_DEBUG_LEVEL
21823d366e3Sduboff #ifdef GEM_DEBUG_VLAN
219f8919bdaSduboff static void
gem_dump_packet(struct gem_dev * dp,char * title,mblk_t * mp,boolean_t check_cksum)22023d366e3Sduboff gem_dump_packet(struct gem_dev *dp, char *title, mblk_t *mp,
22123d366e3Sduboff     boolean_t check_cksum)
222f8919bdaSduboff {
22323d366e3Sduboff 	char	msg[180];
22423d366e3Sduboff 	uint8_t	buf[18+20+20];
22523d366e3Sduboff 	uint8_t	*p;
22623d366e3Sduboff 	size_t	offset;
22723d366e3Sduboff 	uint_t	ethertype;
22823d366e3Sduboff 	uint_t	proto;
22923d366e3Sduboff 	uint_t	ipproto = 0;
23023d366e3Sduboff 	uint_t	iplen;
23123d366e3Sduboff 	uint_t	iphlen;
23223d366e3Sduboff 	uint_t	tcplen;
23323d366e3Sduboff 	uint_t	udplen;
23423d366e3Sduboff 	uint_t	cksum;
23523d366e3Sduboff 	int	rest;
23623d366e3Sduboff 	int	len;
23723d366e3Sduboff 	char	*bp;
23823d366e3Sduboff 	mblk_t	*tp;
23923d366e3Sduboff 	extern uint_t	ip_cksum(mblk_t *, int, uint32_t);
240f8919bdaSduboff 
24123d366e3Sduboff 	msg[0] = 0;
24223d366e3Sduboff 	bp = msg;
243f8919bdaSduboff 
24423d366e3Sduboff 	rest = sizeof (buf);
24523d366e3Sduboff 	offset = 0;
24623d366e3Sduboff 	for (tp = mp; tp; tp = tp->b_cont) {
24723d366e3Sduboff 		len = tp->b_wptr - tp->b_rptr;
24823d366e3Sduboff 		len = min(rest, len);
24923d366e3Sduboff 		bcopy(tp->b_rptr, &buf[offset], len);
25023d366e3Sduboff 		rest -= len;
25123d366e3Sduboff 		offset += len;
25223d366e3Sduboff 		if (rest == 0) {
253f8919bdaSduboff 			break;
254f8919bdaSduboff 		}
255f8919bdaSduboff 	}
25623d366e3Sduboff 
25723d366e3Sduboff 	offset = 0;
25823d366e3Sduboff 	p = &buf[offset];
25923d366e3Sduboff 
26023d366e3Sduboff 	/* ethernet address */
26123d366e3Sduboff 	sprintf(bp,
26223d366e3Sduboff 	    "ether: %02x:%02x:%02x:%02x:%02x:%02x"
26323d366e3Sduboff 	    " -> %02x:%02x:%02x:%02x:%02x:%02x",
26423d366e3Sduboff 	    p[6], p[7], p[8], p[9], p[10], p[11],
26523d366e3Sduboff 	    p[0], p[1], p[2], p[3], p[4], p[5]);
26623d366e3Sduboff 	bp = &msg[strlen(msg)];
26723d366e3Sduboff 
26823d366e3Sduboff 	/* vlag tag and etherrtype */
26923d366e3Sduboff 	ethertype = GET_ETHERTYPE(p);
27023d366e3Sduboff 	if (ethertype == VTAG_TPID) {
27123d366e3Sduboff 		sprintf(bp, " vtag:0x%04x", GET_NET16(&p[14]));
27223d366e3Sduboff 		bp = &msg[strlen(msg)];
27323d366e3Sduboff 
27423d366e3Sduboff 		offset += VTAG_SIZE;
27523d366e3Sduboff 		p = &buf[offset];
27623d366e3Sduboff 		ethertype = GET_ETHERTYPE(p);
27723d366e3Sduboff 	}
27823d366e3Sduboff 	sprintf(bp, " type:%04x", ethertype);
27923d366e3Sduboff 	bp = &msg[strlen(msg)];
28023d366e3Sduboff 
28123d366e3Sduboff 	/* ethernet packet length */
28223d366e3Sduboff 	sprintf(bp, " mblklen:%d", msgdsize(mp));
28323d366e3Sduboff 	bp = &msg[strlen(msg)];
28423d366e3Sduboff 	if (mp->b_cont) {
28523d366e3Sduboff 		sprintf(bp, "(");
28623d366e3Sduboff 		bp = &msg[strlen(msg)];
28723d366e3Sduboff 		for (tp = mp; tp; tp = tp->b_cont) {
28823d366e3Sduboff 			if (tp == mp) {
28923d366e3Sduboff 				sprintf(bp, "%d", tp->b_wptr - tp->b_rptr);
29023d366e3Sduboff 			} else {
29123d366e3Sduboff 				sprintf(bp, "+%d", tp->b_wptr - tp->b_rptr);
29223d366e3Sduboff 			}
29323d366e3Sduboff 			bp = &msg[strlen(msg)];
29423d366e3Sduboff 		}
29523d366e3Sduboff 		sprintf(bp, ")");
29623d366e3Sduboff 		bp = &msg[strlen(msg)];
29723d366e3Sduboff 	}
29823d366e3Sduboff 
29923d366e3Sduboff 	if (ethertype != ETHERTYPE_IP) {
30023d366e3Sduboff 		goto x;
30123d366e3Sduboff 	}
30223d366e3Sduboff 
30323d366e3Sduboff 	/* ip address */
30423d366e3Sduboff 	offset += sizeof (struct ether_header);
30523d366e3Sduboff 	p = &buf[offset];
30623d366e3Sduboff 	ipproto = p[9];
30723d366e3Sduboff 	iplen = GET_NET16(&p[2]);
30823d366e3Sduboff 	sprintf(bp, ", ip: %d.%d.%d.%d -> %d.%d.%d.%d proto:%d iplen:%d",
30923d366e3Sduboff 	    p[12], p[13], p[14], p[15],
31023d366e3Sduboff 	    p[16], p[17], p[18], p[19],
31123d366e3Sduboff 	    ipproto, iplen);
31223d366e3Sduboff 	bp = (void *)&msg[strlen(msg)];
31323d366e3Sduboff 
31423d366e3Sduboff 	iphlen = (p[0] & 0xf) * 4;
31523d366e3Sduboff 
31623d366e3Sduboff 	/* cksum for psuedo header */
31723d366e3Sduboff 	cksum = *(uint16_t *)&p[12];
31823d366e3Sduboff 	cksum += *(uint16_t *)&p[14];
31923d366e3Sduboff 	cksum += *(uint16_t *)&p[16];
32023d366e3Sduboff 	cksum += *(uint16_t *)&p[18];
32123d366e3Sduboff 	cksum += BE_16(ipproto);
32223d366e3Sduboff 
32323d366e3Sduboff 	/* tcp or udp protocol header */
32423d366e3Sduboff 	offset += iphlen;
32523d366e3Sduboff 	p = &buf[offset];
32623d366e3Sduboff 	if (ipproto == IPPROTO_TCP) {
32723d366e3Sduboff 		tcplen = iplen - iphlen;
32823d366e3Sduboff 		sprintf(bp, ", tcp: len:%d cksum:%x",
32923d366e3Sduboff 		    tcplen, GET_NET16(&p[16]));
33023d366e3Sduboff 		bp = (void *)&msg[strlen(msg)];
33123d366e3Sduboff 
33223d366e3Sduboff 		if (check_cksum) {
33323d366e3Sduboff 			cksum += BE_16(tcplen);
33423d366e3Sduboff 			cksum = (uint16_t)ip_cksum(mp, offset, cksum);
33523d366e3Sduboff 			sprintf(bp, " (%s)",
33623d366e3Sduboff 			    (cksum == 0 || cksum == 0xffff) ? "ok" : "ng");
33723d366e3Sduboff 			bp = (void *)&msg[strlen(msg)];
33823d366e3Sduboff 		}
33923d366e3Sduboff 	} else if (ipproto == IPPROTO_UDP) {
34023d366e3Sduboff 		udplen = GET_NET16(&p[4]);
34123d366e3Sduboff 		sprintf(bp, ", udp: len:%d cksum:%x",
34223d366e3Sduboff 		    udplen, GET_NET16(&p[6]));
34323d366e3Sduboff 		bp = (void *)&msg[strlen(msg)];
34423d366e3Sduboff 
34523d366e3Sduboff 		if (GET_NET16(&p[6]) && check_cksum) {
34623d366e3Sduboff 			cksum += *(uint16_t *)&p[4];
34723d366e3Sduboff 			cksum = (uint16_t)ip_cksum(mp, offset, cksum);
34823d366e3Sduboff 			sprintf(bp, " (%s)",
34923d366e3Sduboff 			    (cksum == 0 || cksum == 0xffff) ? "ok" : "ng");
35023d366e3Sduboff 			bp = (void *)&msg[strlen(msg)];
35123d366e3Sduboff 		}
35223d366e3Sduboff 	}
35323d366e3Sduboff x:
35423d366e3Sduboff 	cmn_err(CE_CONT, "!%s: %s: %s", dp->name, title, msg);
35523d366e3Sduboff }
35623d366e3Sduboff #endif /* GEM_DEBUG_VLAN */
35723d366e3Sduboff #endif /* GEM_DEBUG_LEVEL */
35823d366e3Sduboff 
359f8919bdaSduboff /* ============================================================== */
360f8919bdaSduboff /*
361f8919bdaSduboff  * IO cache flush
362f8919bdaSduboff  */
363f8919bdaSduboff /* ============================================================== */
364f8919bdaSduboff __INLINE__ void
gem_rx_desc_dma_sync(struct gem_dev * dp,int head,int nslot,int how)365f8919bdaSduboff gem_rx_desc_dma_sync(struct gem_dev *dp, int head, int nslot, int how)
366f8919bdaSduboff {
367f8919bdaSduboff 	int	n;
368f8919bdaSduboff 	int	m;
369f8919bdaSduboff 	int	rx_desc_unit_shift = dp->gc.gc_rx_desc_unit_shift;
370f8919bdaSduboff 
371f8919bdaSduboff 	/* sync active descriptors */
372f8919bdaSduboff 	if (rx_desc_unit_shift < 0 || nslot == 0) {
373f8919bdaSduboff 		/* no rx descriptor ring */
374f8919bdaSduboff 		return;
375f8919bdaSduboff 	}
376f8919bdaSduboff 
377f8919bdaSduboff 	n = dp->gc.gc_rx_ring_size - head;
378f8919bdaSduboff 	if ((m = nslot - n) > 0) {
379f8919bdaSduboff 		(void) ddi_dma_sync(dp->desc_dma_handle,
380f8919bdaSduboff 		    (off_t)0,
381f8919bdaSduboff 		    (size_t)(m << rx_desc_unit_shift),
382f8919bdaSduboff 		    how);
383f8919bdaSduboff 		nslot = n;
384f8919bdaSduboff 	}
385f8919bdaSduboff 
386f8919bdaSduboff 	(void) ddi_dma_sync(dp->desc_dma_handle,
387f8919bdaSduboff 	    (off_t)(head << rx_desc_unit_shift),
388f8919bdaSduboff 	    (size_t)(nslot << rx_desc_unit_shift),
389f8919bdaSduboff 	    how);
390f8919bdaSduboff }
391f8919bdaSduboff 
392f8919bdaSduboff __INLINE__ void
gem_tx_desc_dma_sync(struct gem_dev * dp,int head,int nslot,int how)393f8919bdaSduboff gem_tx_desc_dma_sync(struct gem_dev *dp, int head, int nslot, int how)
394f8919bdaSduboff {
395f8919bdaSduboff 	int	n;
396f8919bdaSduboff 	int	m;
397f8919bdaSduboff 	int	tx_desc_unit_shift = dp->gc.gc_tx_desc_unit_shift;
398f8919bdaSduboff 
399f8919bdaSduboff 	/* sync active descriptors */
400f8919bdaSduboff 	if (tx_desc_unit_shift < 0 || nslot == 0) {
401f8919bdaSduboff 		/* no tx descriptor ring */
402f8919bdaSduboff 		return;
403f8919bdaSduboff 	}
404f8919bdaSduboff 
405f8919bdaSduboff 	n = dp->gc.gc_tx_ring_size - head;
406f8919bdaSduboff 	if ((m = nslot - n) > 0) {
407f8919bdaSduboff 		(void) ddi_dma_sync(dp->desc_dma_handle,
408f8919bdaSduboff 		    (off_t)(dp->tx_ring_dma - dp->rx_ring_dma),
409f8919bdaSduboff 		    (size_t)(m << tx_desc_unit_shift),
410f8919bdaSduboff 		    how);
411f8919bdaSduboff 		nslot = n;
412f8919bdaSduboff 	}
413f8919bdaSduboff 
414f8919bdaSduboff 	(void) ddi_dma_sync(dp->desc_dma_handle,
415f8919bdaSduboff 	    (off_t)((head << tx_desc_unit_shift)
416f8919bdaSduboff 	    + (dp->tx_ring_dma - dp->rx_ring_dma)),
417f8919bdaSduboff 	    (size_t)(nslot << tx_desc_unit_shift),
418f8919bdaSduboff 	    how);
419f8919bdaSduboff }
420f8919bdaSduboff 
421f8919bdaSduboff static void
gem_rx_start_default(struct gem_dev * dp,int head,int nslot)422f8919bdaSduboff gem_rx_start_default(struct gem_dev *dp, int head, int nslot)
423f8919bdaSduboff {
424f8919bdaSduboff 	gem_rx_desc_dma_sync(dp,
425f8919bdaSduboff 	    SLOT(head, dp->gc.gc_rx_ring_size), nslot,
426f8919bdaSduboff 	    DDI_DMA_SYNC_FORDEV);
427f8919bdaSduboff }
428f8919bdaSduboff 
429f8919bdaSduboff /* ============================================================== */
430f8919bdaSduboff /*
431f8919bdaSduboff  * Buffer management
432f8919bdaSduboff  */
433f8919bdaSduboff /* ============================================================== */
434f8919bdaSduboff static void
gem_dump_txbuf(struct gem_dev * dp,int level,const char * title)435f8919bdaSduboff gem_dump_txbuf(struct gem_dev *dp, int level, const char *title)
436f8919bdaSduboff {
437f8919bdaSduboff 	cmn_err(level,
438f8919bdaSduboff 	    "!%s: %s: tx_active: %d[%d] %d[%d] (+%d), "
439f8919bdaSduboff 	    "tx_softq: %d[%d] %d[%d] (+%d), "
440f8919bdaSduboff 	    "tx_free: %d[%d] %d[%d] (+%d), "
441f8919bdaSduboff 	    "tx_desc: %d[%d] %d[%d] (+%d), "
44223d366e3Sduboff 	    "intr: %d[%d] (+%d), ",
443f8919bdaSduboff 	    dp->name, title,
444f8919bdaSduboff 	    dp->tx_active_head,
445f8919bdaSduboff 	    SLOT(dp->tx_active_head, dp->gc.gc_tx_buf_size),
446f8919bdaSduboff 	    dp->tx_active_tail,
447f8919bdaSduboff 	    SLOT(dp->tx_active_tail, dp->gc.gc_tx_buf_size),
448f8919bdaSduboff 	    dp->tx_active_tail - dp->tx_active_head,
449f8919bdaSduboff 	    dp->tx_softq_head,
450f8919bdaSduboff 	    SLOT(dp->tx_softq_head, dp->gc.gc_tx_buf_size),
451f8919bdaSduboff 	    dp->tx_softq_tail,
452f8919bdaSduboff 	    SLOT(dp->tx_softq_tail, dp->gc.gc_tx_buf_size),
453f8919bdaSduboff 	    dp->tx_softq_tail - dp->tx_softq_head,
454f8919bdaSduboff 	    dp->tx_free_head,
455f8919bdaSduboff 	    SLOT(dp->tx_free_head, dp->gc.gc_tx_buf_size),
456f8919bdaSduboff 	    dp->tx_free_tail,
457f8919bdaSduboff 	    SLOT(dp->tx_free_tail, dp->gc.gc_tx_buf_size),
458f8919bdaSduboff 	    dp->tx_free_tail - dp->tx_free_head,
459f8919bdaSduboff 	    dp->tx_desc_head,
460f8919bdaSduboff 	    SLOT(dp->tx_desc_head, dp->gc.gc_tx_ring_size),
461f8919bdaSduboff 	    dp->tx_desc_tail,
462f8919bdaSduboff 	    SLOT(dp->tx_desc_tail, dp->gc.gc_tx_ring_size),
463f8919bdaSduboff 	    dp->tx_desc_tail - dp->tx_desc_head,
464f8919bdaSduboff 	    dp->tx_desc_intr,
465f8919bdaSduboff 	    SLOT(dp->tx_desc_intr, dp->gc.gc_tx_ring_size),
466f8919bdaSduboff 	    dp->tx_desc_intr - dp->tx_desc_head);
467f8919bdaSduboff }
468f8919bdaSduboff 
469f8919bdaSduboff static void
gem_free_rxbuf(struct rxbuf * rbp)470f8919bdaSduboff gem_free_rxbuf(struct rxbuf *rbp)
471f8919bdaSduboff {
472f8919bdaSduboff 	struct gem_dev	*dp;
473f8919bdaSduboff 
474f8919bdaSduboff 	dp = rbp->rxb_devp;
475f8919bdaSduboff 	ASSERT(mutex_owned(&dp->intrlock));
476f8919bdaSduboff 	rbp->rxb_next = dp->rx_buf_freelist;
477f8919bdaSduboff 	dp->rx_buf_freelist = rbp;
478f8919bdaSduboff 	dp->rx_buf_freecnt++;
479f8919bdaSduboff }
480f8919bdaSduboff 
481f8919bdaSduboff /*
482f8919bdaSduboff  * gem_get_rxbuf: supply a receive buffer which have been mapped into
483f8919bdaSduboff  * DMA space.
484f8919bdaSduboff  */
485f8919bdaSduboff struct rxbuf *
gem_get_rxbuf(struct gem_dev * dp,int cansleep)486f8919bdaSduboff gem_get_rxbuf(struct gem_dev *dp, int cansleep)
487f8919bdaSduboff {
488f8919bdaSduboff 	struct rxbuf		*rbp;
489f8919bdaSduboff 	uint_t			count = 0;
490f8919bdaSduboff 	int			i;
491f8919bdaSduboff 	int			err;
492f8919bdaSduboff 
493f8919bdaSduboff 	ASSERT(mutex_owned(&dp->intrlock));
494f8919bdaSduboff 
495f8919bdaSduboff 	DPRINTF(3, (CE_CONT, "!gem_get_rxbuf: called freecnt:%d",
496f8919bdaSduboff 	    dp->rx_buf_freecnt));
497f8919bdaSduboff 	/*
498f8919bdaSduboff 	 * Get rx buffer management structure
499f8919bdaSduboff 	 */
500f8919bdaSduboff 	rbp = dp->rx_buf_freelist;
501f8919bdaSduboff 	if (rbp) {
502f8919bdaSduboff 		/* get one from the recycle list */
503f8919bdaSduboff 		ASSERT(dp->rx_buf_freecnt > 0);
504f8919bdaSduboff 
505f8919bdaSduboff 		dp->rx_buf_freelist = rbp->rxb_next;
506f8919bdaSduboff 		dp->rx_buf_freecnt--;
507f8919bdaSduboff 		rbp->rxb_next = NULL;
508f8919bdaSduboff 		return (rbp);
509f8919bdaSduboff 	}
510f8919bdaSduboff 
511f8919bdaSduboff 	/*
512f8919bdaSduboff 	 * Allocate a rx buffer management structure
513f8919bdaSduboff 	 */
514f8919bdaSduboff 	rbp = kmem_zalloc(sizeof (*rbp), cansleep ? KM_SLEEP : KM_NOSLEEP);
515f8919bdaSduboff 	if (rbp == NULL) {
516f8919bdaSduboff 		/* no memory */
517f8919bdaSduboff 		return (NULL);
518f8919bdaSduboff 	}
519f8919bdaSduboff 
520f8919bdaSduboff 	/*
521f8919bdaSduboff 	 * Prepare a back pointer to the device structure which will be
522f8919bdaSduboff 	 * refered on freeing the buffer later.
523f8919bdaSduboff 	 */
524f8919bdaSduboff 	rbp->rxb_devp = dp;
525f8919bdaSduboff 
526f8919bdaSduboff 	/* allocate a dma handle for rx data buffer */
527f8919bdaSduboff 	if ((err = ddi_dma_alloc_handle(dp->dip,
528f8919bdaSduboff 	    &dp->gc.gc_dma_attr_rxbuf,
529f8919bdaSduboff 	    (cansleep ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT),
530f8919bdaSduboff 	    NULL, &rbp->rxb_dh)) != DDI_SUCCESS) {
531f8919bdaSduboff 
532f8919bdaSduboff 		cmn_err(CE_WARN,
533f8919bdaSduboff 		    "!%s: %s: ddi_dma_alloc_handle:1 failed, err=%d",
534f8919bdaSduboff 		    dp->name, __func__, err);
535f8919bdaSduboff 
536f8919bdaSduboff 		kmem_free(rbp, sizeof (struct rxbuf));
537f8919bdaSduboff 		return (NULL);
538f8919bdaSduboff 	}
539f8919bdaSduboff 
540f8919bdaSduboff 	/* allocate a bounce buffer for rx */
541f8919bdaSduboff 	if ((err = ddi_dma_mem_alloc(rbp->rxb_dh,
542f8919bdaSduboff 	    ROUNDUP(dp->rx_buf_len, IOC_LINESIZE),
543f8919bdaSduboff 	    &dp->gc.gc_buf_attr,
544f8919bdaSduboff 		/*
545f8919bdaSduboff 		 * if the nic requires a header at the top of receive buffers,
546f8919bdaSduboff 		 * it may access the rx buffer randomly.
547f8919bdaSduboff 		 */
548f8919bdaSduboff 	    (dp->gc.gc_rx_header_len > 0)
549f8919bdaSduboff 	    ? DDI_DMA_CONSISTENT : DDI_DMA_STREAMING,
550f8919bdaSduboff 	    cansleep ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT,
551f8919bdaSduboff 	    NULL,
552f8919bdaSduboff 	    &rbp->rxb_buf, &rbp->rxb_buf_len,
553f8919bdaSduboff 	    &rbp->rxb_bah)) != DDI_SUCCESS) {
554f8919bdaSduboff 
555f8919bdaSduboff 		cmn_err(CE_WARN,
556f8919bdaSduboff 		    "!%s: %s: ddi_dma_mem_alloc: failed, err=%d",
557f8919bdaSduboff 		    dp->name, __func__, err);
558f8919bdaSduboff 
559f8919bdaSduboff 		ddi_dma_free_handle(&rbp->rxb_dh);
560f8919bdaSduboff 		kmem_free(rbp, sizeof (struct rxbuf));
561f8919bdaSduboff 		return (NULL);
562f8919bdaSduboff 	}
563f8919bdaSduboff 
564f8919bdaSduboff 	/* Mapin the bounce buffer into the DMA space */
565f8919bdaSduboff 	if ((err = ddi_dma_addr_bind_handle(rbp->rxb_dh,
566f8919bdaSduboff 	    NULL, rbp->rxb_buf, dp->rx_buf_len,
567f8919bdaSduboff 	    ((dp->gc.gc_rx_header_len > 0)
568f8919bdaSduboff 	    ?(DDI_DMA_RDWR | DDI_DMA_CONSISTENT)
569f8919bdaSduboff 	    :(DDI_DMA_READ | DDI_DMA_STREAMING)),
570f8919bdaSduboff 	    cansleep ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT,
571f8919bdaSduboff 	    NULL,
572f8919bdaSduboff 	    rbp->rxb_dmacookie,
573f8919bdaSduboff 	    &count)) != DDI_DMA_MAPPED) {
574f8919bdaSduboff 
575f8919bdaSduboff 		ASSERT(err != DDI_DMA_INUSE);
576f8919bdaSduboff 		DPRINTF(0, (CE_WARN,
577f8919bdaSduboff 		    "!%s: ddi_dma_addr_bind_handle: failed, err=%d",
578f8919bdaSduboff 		    dp->name, __func__, err));
579f8919bdaSduboff 
580f8919bdaSduboff 		/*
581f8919bdaSduboff 		 * we failed to allocate a dma resource
582f8919bdaSduboff 		 * for the rx bounce buffer.
583f8919bdaSduboff 		 */
584f8919bdaSduboff 		ddi_dma_mem_free(&rbp->rxb_bah);
585f8919bdaSduboff 		ddi_dma_free_handle(&rbp->rxb_dh);
586f8919bdaSduboff 		kmem_free(rbp, sizeof (struct rxbuf));
587f8919bdaSduboff 		return (NULL);
588f8919bdaSduboff 	}
589f8919bdaSduboff 
590f8919bdaSduboff 	/* correct the rest of the DMA mapping */
591f8919bdaSduboff 	for (i = 1; i < count; i++) {
592f8919bdaSduboff 		ddi_dma_nextcookie(rbp->rxb_dh, &rbp->rxb_dmacookie[i]);
593f8919bdaSduboff 	}
594f8919bdaSduboff 	rbp->rxb_nfrags = count;
595f8919bdaSduboff 
596f8919bdaSduboff 	/* Now we successfully prepared an rx buffer */
597f8919bdaSduboff 	dp->rx_buf_allocated++;
598f8919bdaSduboff 
599f8919bdaSduboff 	return (rbp);
600f8919bdaSduboff }
601f8919bdaSduboff 
602f8919bdaSduboff /* ============================================================== */
603f8919bdaSduboff /*
604f8919bdaSduboff  * memory resource management
605f8919bdaSduboff  */
606f8919bdaSduboff /* ============================================================== */
607f8919bdaSduboff static int
gem_alloc_memory(struct gem_dev * dp)608f8919bdaSduboff gem_alloc_memory(struct gem_dev *dp)
609f8919bdaSduboff {
610f8919bdaSduboff 	caddr_t			ring;
611f8919bdaSduboff 	caddr_t			buf;
612f8919bdaSduboff 	size_t			req_size;
613f8919bdaSduboff 	size_t			ring_len;
614f8919bdaSduboff 	size_t			buf_len;
615f8919bdaSduboff 	ddi_dma_cookie_t	ring_cookie;
616f8919bdaSduboff 	ddi_dma_cookie_t	buf_cookie;
617f8919bdaSduboff 	uint_t			count;
618f8919bdaSduboff 	int			i;
619f8919bdaSduboff 	int			err;
620f8919bdaSduboff 	struct txbuf		*tbp;
621f8919bdaSduboff 	int			tx_buf_len;
622f8919bdaSduboff 	ddi_dma_attr_t		dma_attr_txbounce;
623f8919bdaSduboff 
624f8919bdaSduboff 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
625f8919bdaSduboff 
626f8919bdaSduboff 	dp->desc_dma_handle = NULL;
627f8919bdaSduboff 	req_size = dp->rx_desc_size + dp->tx_desc_size + dp->gc.gc_io_area_size;
628f8919bdaSduboff 
629f8919bdaSduboff 	if (req_size > 0) {
630f8919bdaSduboff 		/*
631f8919bdaSduboff 		 * Alloc RX/TX descriptors and a io area.
632f8919bdaSduboff 		 */
633f8919bdaSduboff 		if ((err = ddi_dma_alloc_handle(dp->dip,
634f8919bdaSduboff 		    &dp->gc.gc_dma_attr_desc,
635f8919bdaSduboff 		    DDI_DMA_SLEEP, NULL,
636f8919bdaSduboff 		    &dp->desc_dma_handle)) != DDI_SUCCESS) {
637f8919bdaSduboff 			cmn_err(CE_WARN,
638f8919bdaSduboff 			    "!%s: %s: ddi_dma_alloc_handle failed: %d",
639f8919bdaSduboff 			    dp->name, __func__, err);
640f8919bdaSduboff 			return (ENOMEM);
641f8919bdaSduboff 		}
642f8919bdaSduboff 
643f8919bdaSduboff 		if ((err = ddi_dma_mem_alloc(dp->desc_dma_handle,
644f8919bdaSduboff 		    req_size, &dp->gc.gc_desc_attr,
645f8919bdaSduboff 		    DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
646f8919bdaSduboff 		    &ring, &ring_len,
647f8919bdaSduboff 		    &dp->desc_acc_handle)) != DDI_SUCCESS) {
648f8919bdaSduboff 			cmn_err(CE_WARN,
649f8919bdaSduboff 			    "!%s: %s: ddi_dma_mem_alloc failed: "
650f8919bdaSduboff 			    "ret %d, request size: %d",
651f8919bdaSduboff 			    dp->name, __func__, err, (int)req_size);
652f8919bdaSduboff 			ddi_dma_free_handle(&dp->desc_dma_handle);
653f8919bdaSduboff 			return (ENOMEM);
654f8919bdaSduboff 		}
655f8919bdaSduboff 
656f8919bdaSduboff 		if ((err = ddi_dma_addr_bind_handle(dp->desc_dma_handle,
657f8919bdaSduboff 		    NULL, ring, ring_len,
658f8919bdaSduboff 		    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
659f8919bdaSduboff 		    DDI_DMA_SLEEP, NULL,
660f8919bdaSduboff 		    &ring_cookie, &count)) != DDI_SUCCESS) {
661f8919bdaSduboff 			ASSERT(err != DDI_DMA_INUSE);
662f8919bdaSduboff 			cmn_err(CE_WARN,
663f8919bdaSduboff 			    "!%s: %s: ddi_dma_addr_bind_handle failed: %d",
664f8919bdaSduboff 			    dp->name, __func__, err);
665f8919bdaSduboff 			ddi_dma_mem_free(&dp->desc_acc_handle);
666f8919bdaSduboff 			ddi_dma_free_handle(&dp->desc_dma_handle);
667f8919bdaSduboff 			return (ENOMEM);
668f8919bdaSduboff 		}
669f8919bdaSduboff 		ASSERT(count == 1);
670f8919bdaSduboff 
671f8919bdaSduboff 		/* set base of rx descriptor ring */
672f8919bdaSduboff 		dp->rx_ring = ring;
673f8919bdaSduboff 		dp->rx_ring_dma = ring_cookie.dmac_laddress;
674f8919bdaSduboff 
675f8919bdaSduboff 		/* set base of tx descriptor ring */
676f8919bdaSduboff 		dp->tx_ring = dp->rx_ring + dp->rx_desc_size;
677f8919bdaSduboff 		dp->tx_ring_dma = dp->rx_ring_dma + dp->rx_desc_size;
678f8919bdaSduboff 
679f8919bdaSduboff 		/* set base of io area */
680f8919bdaSduboff 		dp->io_area = dp->tx_ring + dp->tx_desc_size;
681f8919bdaSduboff 		dp->io_area_dma = dp->tx_ring_dma + dp->tx_desc_size;
682f8919bdaSduboff 	}
683f8919bdaSduboff 
684f8919bdaSduboff 	/*
685f8919bdaSduboff 	 * Prepare DMA resources for tx packets
686f8919bdaSduboff 	 */
687f8919bdaSduboff 	ASSERT(dp->gc.gc_tx_buf_size > 0);
688f8919bdaSduboff 
689f8919bdaSduboff 	/* Special dma attribute for tx bounce buffers */
690f8919bdaSduboff 	dma_attr_txbounce = dp->gc.gc_dma_attr_txbuf;
691f8919bdaSduboff 	dma_attr_txbounce.dma_attr_sgllen = 1;
692f8919bdaSduboff 	dma_attr_txbounce.dma_attr_align =
693f8919bdaSduboff 	    max(dma_attr_txbounce.dma_attr_align, IOC_LINESIZE);
694f8919bdaSduboff 
695f8919bdaSduboff 	/* Size for tx bounce buffers must be max tx packet size. */
696f8919bdaSduboff 	tx_buf_len = MAXPKTBUF(dp);
697f8919bdaSduboff 	tx_buf_len = ROUNDUP(tx_buf_len, IOC_LINESIZE);
698f8919bdaSduboff 
699f8919bdaSduboff 	ASSERT(tx_buf_len >= ETHERMAX+ETHERFCSL);
700f8919bdaSduboff 
701f8919bdaSduboff 	for (i = 0, tbp = dp->tx_buf;
702f8919bdaSduboff 	    i < dp->gc.gc_tx_buf_size; i++, tbp++) {
703f8919bdaSduboff 
704f8919bdaSduboff 		/* setup bounce buffers for tx packets */
705f8919bdaSduboff 		if ((err = ddi_dma_alloc_handle(dp->dip,
706f8919bdaSduboff 		    &dma_attr_txbounce,
707f8919bdaSduboff 		    DDI_DMA_SLEEP, NULL,
708f8919bdaSduboff 		    &tbp->txb_bdh)) != DDI_SUCCESS) {
709f8919bdaSduboff 
710f8919bdaSduboff 			cmn_err(CE_WARN,
711f8919bdaSduboff 		    "!%s: %s ddi_dma_alloc_handle for bounce buffer failed:"
712f8919bdaSduboff 			    " err=%d, i=%d",
713f8919bdaSduboff 			    dp->name, __func__, err, i);
714f8919bdaSduboff 			goto err_alloc_dh;
715f8919bdaSduboff 		}
716f8919bdaSduboff 
717f8919bdaSduboff 		if ((err = ddi_dma_mem_alloc(tbp->txb_bdh,
718f8919bdaSduboff 		    tx_buf_len,
719f8919bdaSduboff 		    &dp->gc.gc_buf_attr,
720f8919bdaSduboff 		    DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL,
721f8919bdaSduboff 		    &buf, &buf_len,
722f8919bdaSduboff 		    &tbp->txb_bah)) != DDI_SUCCESS) {
723f8919bdaSduboff 			cmn_err(CE_WARN,
724f8919bdaSduboff 		    "!%s: %s: ddi_dma_mem_alloc for bounce buffer failed"
725f8919bdaSduboff 			    "ret %d, request size %d",
726f8919bdaSduboff 			    dp->name, __func__, err, tx_buf_len);
727f8919bdaSduboff 			ddi_dma_free_handle(&tbp->txb_bdh);
728f8919bdaSduboff 			goto err_alloc_dh;
729f8919bdaSduboff 		}
730f8919bdaSduboff 
731f8919bdaSduboff 		if ((err = ddi_dma_addr_bind_handle(tbp->txb_bdh,
732f8919bdaSduboff 		    NULL, buf, buf_len,
733f8919bdaSduboff 		    DDI_DMA_WRITE | DDI_DMA_STREAMING,
734f8919bdaSduboff 		    DDI_DMA_SLEEP, NULL,
735f8919bdaSduboff 		    &buf_cookie, &count)) != DDI_SUCCESS) {
736f8919bdaSduboff 				ASSERT(err != DDI_DMA_INUSE);
737f8919bdaSduboff 				cmn_err(CE_WARN,
738f8919bdaSduboff 	"!%s: %s: ddi_dma_addr_bind_handle for bounce buffer failed: %d",
739f8919bdaSduboff 				    dp->name, __func__, err);
740f8919bdaSduboff 				ddi_dma_mem_free(&tbp->txb_bah);
741f8919bdaSduboff 				ddi_dma_free_handle(&tbp->txb_bdh);
742f8919bdaSduboff 				goto err_alloc_dh;
743f8919bdaSduboff 		}
744f8919bdaSduboff 		ASSERT(count == 1);
745f8919bdaSduboff 		tbp->txb_buf = buf;
746f8919bdaSduboff 		tbp->txb_buf_dma = buf_cookie.dmac_laddress;
747f8919bdaSduboff 	}
748f8919bdaSduboff 
749f8919bdaSduboff 	return (0);
750f8919bdaSduboff 
751f8919bdaSduboff err_alloc_dh:
752f8919bdaSduboff 	if (dp->gc.gc_tx_buf_size > 0) {
753f8919bdaSduboff 		while (i-- > 0) {
754f8919bdaSduboff 			(void) ddi_dma_unbind_handle(dp->tx_buf[i].txb_bdh);
755f8919bdaSduboff 			ddi_dma_mem_free(&dp->tx_buf[i].txb_bah);
756f8919bdaSduboff 			ddi_dma_free_handle(&dp->tx_buf[i].txb_bdh);
757f8919bdaSduboff 		}
758f8919bdaSduboff 	}
759f8919bdaSduboff 
760f8919bdaSduboff 	if (dp->desc_dma_handle) {
761f8919bdaSduboff 		(void) ddi_dma_unbind_handle(dp->desc_dma_handle);
762f8919bdaSduboff 		ddi_dma_mem_free(&dp->desc_acc_handle);
763f8919bdaSduboff 		ddi_dma_free_handle(&dp->desc_dma_handle);
764f8919bdaSduboff 		dp->desc_dma_handle = NULL;
765f8919bdaSduboff 	}
766f8919bdaSduboff 
767f8919bdaSduboff 	return (ENOMEM);
768f8919bdaSduboff }
769f8919bdaSduboff 
770f8919bdaSduboff static void
gem_free_memory(struct gem_dev * dp)771f8919bdaSduboff gem_free_memory(struct gem_dev *dp)
772f8919bdaSduboff {
773f8919bdaSduboff 	int		i;
774f8919bdaSduboff 	struct rxbuf	*rbp;
775f8919bdaSduboff 	struct txbuf	*tbp;
776f8919bdaSduboff 
777f8919bdaSduboff 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
778f8919bdaSduboff 
779f8919bdaSduboff 	/* Free TX/RX descriptors and tx padding buffer */
780f8919bdaSduboff 	if (dp->desc_dma_handle) {
781f8919bdaSduboff 		(void) ddi_dma_unbind_handle(dp->desc_dma_handle);
782f8919bdaSduboff 		ddi_dma_mem_free(&dp->desc_acc_handle);
783f8919bdaSduboff 		ddi_dma_free_handle(&dp->desc_dma_handle);
784f8919bdaSduboff 		dp->desc_dma_handle = NULL;
785f8919bdaSduboff 	}
786f8919bdaSduboff 
787f8919bdaSduboff 	/* Free dma handles for Tx */
788f8919bdaSduboff 	for (i = dp->gc.gc_tx_buf_size, tbp = dp->tx_buf; i--; tbp++) {
789f8919bdaSduboff 		/* Free bounce buffer associated to each txbuf */
790f8919bdaSduboff 		(void) ddi_dma_unbind_handle(tbp->txb_bdh);
791f8919bdaSduboff 		ddi_dma_mem_free(&tbp->txb_bah);
792f8919bdaSduboff 		ddi_dma_free_handle(&tbp->txb_bdh);
793f8919bdaSduboff 	}
794f8919bdaSduboff 
795f8919bdaSduboff 	/* Free rx buffer */
796f8919bdaSduboff 	while ((rbp = dp->rx_buf_freelist) != NULL) {
797f8919bdaSduboff 
798f8919bdaSduboff 		ASSERT(dp->rx_buf_freecnt > 0);
799f8919bdaSduboff 
800f8919bdaSduboff 		dp->rx_buf_freelist = rbp->rxb_next;
801f8919bdaSduboff 		dp->rx_buf_freecnt--;
802f8919bdaSduboff 
803f8919bdaSduboff 		/* release DMA mapping */
804f8919bdaSduboff 		ASSERT(rbp->rxb_dh != NULL);
805f8919bdaSduboff 
806f8919bdaSduboff 		/* free dma handles for rx bbuf */
807f8919bdaSduboff 		/* it has dma mapping always */
808f8919bdaSduboff 		ASSERT(rbp->rxb_nfrags > 0);
809f8919bdaSduboff 		(void) ddi_dma_unbind_handle(rbp->rxb_dh);
810f8919bdaSduboff 
811f8919bdaSduboff 		/* free the associated bounce buffer and dma handle */
812f8919bdaSduboff 		ASSERT(rbp->rxb_bah != NULL);
813f8919bdaSduboff 		ddi_dma_mem_free(&rbp->rxb_bah);
814f8919bdaSduboff 		/* free the associated dma handle */
815f8919bdaSduboff 		ddi_dma_free_handle(&rbp->rxb_dh);
816f8919bdaSduboff 
817f8919bdaSduboff 		/* free the base memory of rx buffer management */
818f8919bdaSduboff 		kmem_free(rbp, sizeof (struct rxbuf));
819f8919bdaSduboff 	}
820f8919bdaSduboff }
821f8919bdaSduboff 
822f8919bdaSduboff /* ============================================================== */
823f8919bdaSduboff /*
824f8919bdaSduboff  * Rx/Tx descriptor slot management
825f8919bdaSduboff  */
826f8919bdaSduboff /* ============================================================== */
827f8919bdaSduboff /*
828f8919bdaSduboff  * Initialize an empty rx ring.
829f8919bdaSduboff  */
830f8919bdaSduboff static void
gem_init_rx_ring(struct gem_dev * dp)831f8919bdaSduboff gem_init_rx_ring(struct gem_dev *dp)
832f8919bdaSduboff {
833f8919bdaSduboff 	int		i;
834f8919bdaSduboff 	int		rx_ring_size = dp->gc.gc_rx_ring_size;
835f8919bdaSduboff 
836f8919bdaSduboff 	DPRINTF(1, (CE_CONT, "!%s: %s ring_size:%d, buf_max:%d",
837f8919bdaSduboff 	    dp->name, __func__,
838f8919bdaSduboff 	    rx_ring_size, dp->gc.gc_rx_buf_max));
839f8919bdaSduboff 
840f8919bdaSduboff 	/* make a physical chain of rx descriptors */
841f8919bdaSduboff 	for (i = 0; i < rx_ring_size; i++) {
842f8919bdaSduboff 		(*dp->gc.gc_rx_desc_init)(dp, i);
843f8919bdaSduboff 	}
844f8919bdaSduboff 	gem_rx_desc_dma_sync(dp, 0, rx_ring_size, DDI_DMA_SYNC_FORDEV);
845f8919bdaSduboff 
846f8919bdaSduboff 	dp->rx_active_head = (seqnum_t)0;
847f8919bdaSduboff 	dp->rx_active_tail = (seqnum_t)0;
848f8919bdaSduboff 
849f8919bdaSduboff 	ASSERT(dp->rx_buf_head == (struct rxbuf *)NULL);
850f8919bdaSduboff 	ASSERT(dp->rx_buf_tail == (struct rxbuf *)NULL);
851f8919bdaSduboff }
852f8919bdaSduboff 
853f8919bdaSduboff /*
854f8919bdaSduboff  * Prepare rx buffers and put them into the rx buffer/descriptor ring.
855f8919bdaSduboff  */
856f8919bdaSduboff static void
gem_prepare_rx_buf(struct gem_dev * dp)857f8919bdaSduboff gem_prepare_rx_buf(struct gem_dev *dp)
858f8919bdaSduboff {
859f8919bdaSduboff 	int		i;
860f8919bdaSduboff 	int		nrbuf;
861f8919bdaSduboff 	struct rxbuf	*rbp;
862f8919bdaSduboff 
863f8919bdaSduboff 	ASSERT(mutex_owned(&dp->intrlock));
864f8919bdaSduboff 
865f8919bdaSduboff 	/* Now we have no active buffers in rx ring */
866f8919bdaSduboff 
867f8919bdaSduboff 	nrbuf = min(dp->gc.gc_rx_ring_size, dp->gc.gc_rx_buf_max);
868f8919bdaSduboff 	for (i = 0; i < nrbuf; i++) {
869f8919bdaSduboff 		if ((rbp = gem_get_rxbuf(dp, B_TRUE)) == NULL) {
870f8919bdaSduboff 			break;
871f8919bdaSduboff 		}
872f8919bdaSduboff 		gem_append_rxbuf(dp, rbp);
873f8919bdaSduboff 	}
874f8919bdaSduboff 
875f8919bdaSduboff 	gem_rx_desc_dma_sync(dp,
876f8919bdaSduboff 	    0, dp->gc.gc_rx_ring_size, DDI_DMA_SYNC_FORDEV);
877f8919bdaSduboff }
878f8919bdaSduboff 
879f8919bdaSduboff /*
880f8919bdaSduboff  * Reclaim active rx buffers in rx buffer ring.
881f8919bdaSduboff  */
882f8919bdaSduboff static void
gem_clean_rx_buf(struct gem_dev * dp)883f8919bdaSduboff gem_clean_rx_buf(struct gem_dev *dp)
884f8919bdaSduboff {
885f8919bdaSduboff 	int		i;
886f8919bdaSduboff 	struct rxbuf	*rbp;
887f8919bdaSduboff 	int		rx_ring_size = dp->gc.gc_rx_ring_size;
888f8919bdaSduboff #ifdef GEM_DEBUG_LEVEL
889f8919bdaSduboff 	int		total;
890f8919bdaSduboff #endif
891f8919bdaSduboff 	ASSERT(mutex_owned(&dp->intrlock));
892f8919bdaSduboff 
893f8919bdaSduboff 	DPRINTF(2, (CE_CONT, "!%s: %s: %d buffers are free",
894f8919bdaSduboff 	    dp->name, __func__, dp->rx_buf_freecnt));
895f8919bdaSduboff 	/*
896f8919bdaSduboff 	 * clean up HW descriptors
897f8919bdaSduboff 	 */
898f8919bdaSduboff 	for (i = 0; i < rx_ring_size; i++) {
899f8919bdaSduboff 		(*dp->gc.gc_rx_desc_clean)(dp, i);
900f8919bdaSduboff 	}
901f8919bdaSduboff 	gem_rx_desc_dma_sync(dp, 0, rx_ring_size, DDI_DMA_SYNC_FORDEV);
902f8919bdaSduboff 
903f8919bdaSduboff #ifdef GEM_DEBUG_LEVEL
904f8919bdaSduboff 	total = 0;
905f8919bdaSduboff #endif
906f8919bdaSduboff 	/*
907f8919bdaSduboff 	 * Reclaim allocated rx buffers
908f8919bdaSduboff 	 */
909f8919bdaSduboff 	while ((rbp = dp->rx_buf_head) != NULL) {
910f8919bdaSduboff #ifdef GEM_DEBUG_LEVEL
911f8919bdaSduboff 		total++;
912f8919bdaSduboff #endif
913f8919bdaSduboff 		/* remove the first one from rx buffer list */
914f8919bdaSduboff 		dp->rx_buf_head = rbp->rxb_next;
915f8919bdaSduboff 
916f8919bdaSduboff 		/* recycle the rxbuf */
917f8919bdaSduboff 		gem_free_rxbuf(rbp);
918f8919bdaSduboff 	}
919f8919bdaSduboff 	dp->rx_buf_tail = (struct rxbuf *)NULL;
920f8919bdaSduboff 
921f8919bdaSduboff 	DPRINTF(2, (CE_CONT,
922f8919bdaSduboff 	    "!%s: %s: %d buffers freeed, total: %d free",
923f8919bdaSduboff 	    dp->name, __func__, total, dp->rx_buf_freecnt));
924f8919bdaSduboff }
925f8919bdaSduboff 
926f8919bdaSduboff /*
927f8919bdaSduboff  * Initialize an empty transmit buffer/descriptor ring
928f8919bdaSduboff  */
929f8919bdaSduboff static void
gem_init_tx_ring(struct gem_dev * dp)930f8919bdaSduboff gem_init_tx_ring(struct gem_dev *dp)
931f8919bdaSduboff {
932f8919bdaSduboff 	int		i;
933f8919bdaSduboff 	int		tx_buf_size = dp->gc.gc_tx_buf_size;
934f8919bdaSduboff 	int		tx_ring_size = dp->gc.gc_tx_ring_size;
935f8919bdaSduboff 
936f8919bdaSduboff 	DPRINTF(2, (CE_CONT, "!%s: %s: ring_size:%d, buf_size:%d",
937f8919bdaSduboff 	    dp->name, __func__,
938f8919bdaSduboff 	    dp->gc.gc_tx_ring_size, dp->gc.gc_tx_buf_size));
939f8919bdaSduboff 
940f8919bdaSduboff 	ASSERT(!dp->mac_active);
941f8919bdaSduboff 
942f8919bdaSduboff 	/* initialize active list and free list */
943f8919bdaSduboff 	dp->tx_slots_base =
944f8919bdaSduboff 	    SLOT(dp->tx_slots_base + dp->tx_softq_head, tx_buf_size);
945f8919bdaSduboff 	dp->tx_softq_tail -= dp->tx_softq_head;
946f8919bdaSduboff 	dp->tx_softq_head = (seqnum_t)0;
947f8919bdaSduboff 
948f8919bdaSduboff 	dp->tx_active_head = dp->tx_softq_head;
949f8919bdaSduboff 	dp->tx_active_tail = dp->tx_softq_head;
950f8919bdaSduboff 
951f8919bdaSduboff 	dp->tx_free_head   = dp->tx_softq_tail;
952f8919bdaSduboff 	dp->tx_free_tail   = dp->gc.gc_tx_buf_limit;
953f8919bdaSduboff 
954f8919bdaSduboff 	dp->tx_desc_head = (seqnum_t)0;
955f8919bdaSduboff 	dp->tx_desc_tail = (seqnum_t)0;
956f8919bdaSduboff 	dp->tx_desc_intr = (seqnum_t)0;
957f8919bdaSduboff 
958f8919bdaSduboff 	for (i = 0; i < tx_ring_size; i++) {
959f8919bdaSduboff 		(*dp->gc.gc_tx_desc_init)(dp, i);
960f8919bdaSduboff 	}
961f8919bdaSduboff 	gem_tx_desc_dma_sync(dp, 0, tx_ring_size, DDI_DMA_SYNC_FORDEV);
962f8919bdaSduboff }
963f8919bdaSduboff 
964f8919bdaSduboff __INLINE__
965f8919bdaSduboff static void
gem_txbuf_free_dma_resources(struct txbuf * tbp)966f8919bdaSduboff gem_txbuf_free_dma_resources(struct txbuf *tbp)
967f8919bdaSduboff {
968f8919bdaSduboff 	if (tbp->txb_mp) {
969f8919bdaSduboff 		freemsg(tbp->txb_mp);
970f8919bdaSduboff 		tbp->txb_mp = NULL;
971f8919bdaSduboff 	}
972f8919bdaSduboff 	tbp->txb_nfrags = 0;
97323d366e3Sduboff 	tbp->txb_flag = 0;
974f8919bdaSduboff }
975f8919bdaSduboff #pragma inline(gem_txbuf_free_dma_resources)
976f8919bdaSduboff 
977f8919bdaSduboff /*
978f8919bdaSduboff  * reclaim active tx buffers and reset positions in tx rings.
979f8919bdaSduboff  */
980f8919bdaSduboff static void
gem_clean_tx_buf(struct gem_dev * dp)981f8919bdaSduboff gem_clean_tx_buf(struct gem_dev *dp)
982f8919bdaSduboff {
983f8919bdaSduboff 	int		i;
984f8919bdaSduboff 	seqnum_t	head;
985f8919bdaSduboff 	seqnum_t	tail;
986f8919bdaSduboff 	seqnum_t	sn;
987f8919bdaSduboff 	struct txbuf	*tbp;
988f8919bdaSduboff 	int		tx_ring_size = dp->gc.gc_tx_ring_size;
989f8919bdaSduboff #ifdef GEM_DEBUG_LEVEL
990f8919bdaSduboff 	int		err;
991f8919bdaSduboff #endif
992f8919bdaSduboff 
993f8919bdaSduboff 	ASSERT(!dp->mac_active);
994f8919bdaSduboff 	ASSERT(dp->tx_busy == 0);
995f8919bdaSduboff 	ASSERT(dp->tx_softq_tail == dp->tx_free_head);
996f8919bdaSduboff 
997f8919bdaSduboff 	/*
998f8919bdaSduboff 	 * clean up all HW descriptors
999f8919bdaSduboff 	 */
1000f8919bdaSduboff 	for (i = 0; i < tx_ring_size; i++) {
1001f8919bdaSduboff 		(*dp->gc.gc_tx_desc_clean)(dp, i);
1002f8919bdaSduboff 	}
1003f8919bdaSduboff 	gem_tx_desc_dma_sync(dp, 0, tx_ring_size, DDI_DMA_SYNC_FORDEV);
1004f8919bdaSduboff 
1005f8919bdaSduboff 	/* dequeue all active and loaded buffers */
1006f8919bdaSduboff 	head = dp->tx_active_head;
1007f8919bdaSduboff 	tail = dp->tx_softq_tail;
1008f8919bdaSduboff 
1009f8919bdaSduboff 	ASSERT(dp->tx_free_head - head >= 0);
1010f8919bdaSduboff 	tbp = GET_TXBUF(dp, head);
1011f8919bdaSduboff 	for (sn = head; sn != tail; sn++) {
1012f8919bdaSduboff 		gem_txbuf_free_dma_resources(tbp);
1013f8919bdaSduboff 		ASSERT(tbp->txb_mp == NULL);
1014f8919bdaSduboff 		dp->stats.errxmt++;
1015f8919bdaSduboff 		tbp = tbp->txb_next;
1016f8919bdaSduboff 	}
1017f8919bdaSduboff 
1018f8919bdaSduboff #ifdef GEM_DEBUG_LEVEL
1019f8919bdaSduboff 	/* ensure no dma resources for tx are not in use now */
1020f8919bdaSduboff 	err = 0;
1021f8919bdaSduboff 	while (sn != head + dp->gc.gc_tx_buf_size) {
1022f8919bdaSduboff 		if (tbp->txb_mp || tbp->txb_nfrags) {
1023f8919bdaSduboff 			DPRINTF(0, (CE_CONT,
1024f8919bdaSduboff 			    "%s: %s: sn:%d[%d] mp:%p nfrags:%d",
1025f8919bdaSduboff 			    dp->name, __func__,
1026f8919bdaSduboff 			    sn, SLOT(sn, dp->gc.gc_tx_buf_size),
1027f8919bdaSduboff 			    tbp->txb_mp, tbp->txb_nfrags));
1028f8919bdaSduboff 			err = 1;
1029f8919bdaSduboff 		}
1030f8919bdaSduboff 		sn++;
1031f8919bdaSduboff 		tbp = tbp->txb_next;
1032f8919bdaSduboff 	}
1033f8919bdaSduboff 
1034f8919bdaSduboff 	if (err) {
1035f8919bdaSduboff 		gem_dump_txbuf(dp, CE_WARN,
1036f8919bdaSduboff 		    "gem_clean_tx_buf: tbp->txb_mp != NULL");
1037f8919bdaSduboff 	}
1038f8919bdaSduboff #endif
1039f8919bdaSduboff 	/* recycle buffers, now no active tx buffers in the ring */
1040f8919bdaSduboff 	dp->tx_free_tail += tail - head;
1041f8919bdaSduboff 	ASSERT(dp->tx_free_tail == dp->tx_free_head + dp->gc.gc_tx_buf_limit);
1042f8919bdaSduboff 
1043f8919bdaSduboff 	/* fix positions in tx buffer rings */
1044f8919bdaSduboff 	dp->tx_active_head = dp->tx_free_head;
1045f8919bdaSduboff 	dp->tx_active_tail = dp->tx_free_head;
1046f8919bdaSduboff 	dp->tx_softq_head  = dp->tx_free_head;
1047f8919bdaSduboff 	dp->tx_softq_tail  = dp->tx_free_head;
1048f8919bdaSduboff }
1049f8919bdaSduboff 
1050f8919bdaSduboff /*
1051f8919bdaSduboff  * Reclaim transmitted buffers from tx buffer/descriptor ring.
1052f8919bdaSduboff  */
1053f8919bdaSduboff __INLINE__ int
gem_reclaim_txbuf(struct gem_dev * dp)1054f8919bdaSduboff gem_reclaim_txbuf(struct gem_dev *dp)
1055f8919bdaSduboff {
1056f8919bdaSduboff 	struct txbuf	*tbp;
1057f8919bdaSduboff 	uint_t		txstat;
1058f8919bdaSduboff 	int		err = GEM_SUCCESS;
1059f8919bdaSduboff 	seqnum_t	head;
1060f8919bdaSduboff 	seqnum_t	tail;
1061f8919bdaSduboff 	seqnum_t	sn;
1062f8919bdaSduboff 	seqnum_t	desc_head;
1063f8919bdaSduboff 	int		tx_ring_size = dp->gc.gc_tx_ring_size;
1064f8919bdaSduboff 	uint_t (*tx_desc_stat)(struct gem_dev *dp,
1065f8919bdaSduboff 	    int slot, int ndesc) = dp->gc.gc_tx_desc_stat;
106623d366e3Sduboff 	clock_t		now;
106723d366e3Sduboff 
106823d366e3Sduboff 	now = ddi_get_lbolt();
106923d366e3Sduboff 	if (now == (clock_t)0) {
107023d366e3Sduboff 		/* make non-zero timestamp */
107123d366e3Sduboff 		now--;
107223d366e3Sduboff 	}
1073f8919bdaSduboff 
1074f8919bdaSduboff 	mutex_enter(&dp->xmitlock);
1075f8919bdaSduboff 
1076f8919bdaSduboff 	head = dp->tx_active_head;
1077f8919bdaSduboff 	tail = dp->tx_active_tail;
1078f8919bdaSduboff 
1079f8919bdaSduboff #if GEM_DEBUG_LEVEL > 2
1080f8919bdaSduboff 	if (head != tail) {
1081f8919bdaSduboff 		cmn_err(CE_CONT, "!%s: %s: "
1082f8919bdaSduboff 		    "testing active_head:%d[%d], active_tail:%d[%d]",
1083f8919bdaSduboff 		    dp->name, __func__,
1084f8919bdaSduboff 		    head, SLOT(head, dp->gc.gc_tx_buf_size),
1085f8919bdaSduboff 		    tail, SLOT(tail, dp->gc.gc_tx_buf_size));
1086f8919bdaSduboff 	}
1087f8919bdaSduboff #endif
1088f8919bdaSduboff #ifdef DEBUG
1089f8919bdaSduboff 	if (dp->tx_reclaim_busy == 0) {
1090f8919bdaSduboff 		/* check tx buffer management consistency */
1091f8919bdaSduboff 		ASSERT(dp->tx_free_tail - dp->tx_active_head
1092f8919bdaSduboff 		    == dp->gc.gc_tx_buf_limit);
1093f8919bdaSduboff 		/* EMPTY */
1094f8919bdaSduboff 	}
1095f8919bdaSduboff #endif
1096f8919bdaSduboff 	dp->tx_reclaim_busy++;
1097f8919bdaSduboff 
1098f8919bdaSduboff 	/* sync all active HW descriptors */
1099f8919bdaSduboff 	gem_tx_desc_dma_sync(dp,
1100f8919bdaSduboff 	    SLOT(dp->tx_desc_head, tx_ring_size),
1101f8919bdaSduboff 	    dp->tx_desc_tail - dp->tx_desc_head,
1102f8919bdaSduboff 	    DDI_DMA_SYNC_FORKERNEL);
1103f8919bdaSduboff 
1104f8919bdaSduboff 	tbp = GET_TXBUF(dp, head);
1105f8919bdaSduboff 	desc_head = dp->tx_desc_head;
1106f8919bdaSduboff 	for (sn = head; sn != tail;
1107f8919bdaSduboff 	    dp->tx_active_head = (++sn), tbp = tbp->txb_next) {
1108f8919bdaSduboff 		int	ndescs;
1109f8919bdaSduboff 
1110f8919bdaSduboff 		ASSERT(tbp->txb_desc == desc_head);
1111f8919bdaSduboff 
1112f8919bdaSduboff 		ndescs = tbp->txb_ndescs;
111323d366e3Sduboff 		if (ndescs == 0) {
111423d366e3Sduboff 			/* skip errored descriptors */
111523d366e3Sduboff 			continue;
111623d366e3Sduboff 		}
1117f8919bdaSduboff 		txstat = (*tx_desc_stat)(dp,
1118f8919bdaSduboff 		    SLOT(tbp->txb_desc, tx_ring_size), ndescs);
1119f8919bdaSduboff 
1120f8919bdaSduboff 		if (txstat == 0) {
1121f8919bdaSduboff 			/* not transmitted yet */
1122f8919bdaSduboff 			break;
1123f8919bdaSduboff 		}
1124f8919bdaSduboff 
112523d366e3Sduboff 		if (!dp->tx_blocked && (tbp->txb_flag & GEM_TXFLAG_INTR)) {
112623d366e3Sduboff 			dp->tx_blocked = now;
112723d366e3Sduboff 		}
112823d366e3Sduboff 
1129f8919bdaSduboff 		ASSERT(txstat & (GEM_TX_DONE | GEM_TX_ERR));
1130f8919bdaSduboff 
1131f8919bdaSduboff 		if (txstat & GEM_TX_ERR) {
1132f8919bdaSduboff 			err = GEM_FAILURE;
1133f8919bdaSduboff 			cmn_err(CE_WARN, "!%s: tx error at desc %d[%d]",
1134f8919bdaSduboff 			    dp->name, sn, SLOT(sn, tx_ring_size));
1135f8919bdaSduboff 		}
1136f8919bdaSduboff #if GEM_DEBUG_LEVEL > 4
1137f8919bdaSduboff 		if (now - tbp->txb_stime >= 50) {
1138f8919bdaSduboff 			cmn_err(CE_WARN, "!%s: tx delay while %d mS",
1139f8919bdaSduboff 			    dp->name, (now - tbp->txb_stime)*10);
1140f8919bdaSduboff 		}
1141f8919bdaSduboff #endif
1142f8919bdaSduboff 		/* free transmitted descriptors */
1143f8919bdaSduboff 		desc_head += ndescs;
1144f8919bdaSduboff 	}
1145f8919bdaSduboff 
1146f8919bdaSduboff 	if (dp->tx_desc_head != desc_head) {
1147f8919bdaSduboff 		/* we have reclaimed one or more tx buffers */
1148f8919bdaSduboff 		dp->tx_desc_head = desc_head;
1149f8919bdaSduboff 
1150f8919bdaSduboff 		/* If we passed the next interrupt position, update it */
115123d366e3Sduboff 		if (desc_head - dp->tx_desc_intr > 0) {
1152f8919bdaSduboff 			dp->tx_desc_intr = desc_head;
1153f8919bdaSduboff 		}
1154f8919bdaSduboff 	}
1155f8919bdaSduboff 	mutex_exit(&dp->xmitlock);
1156f8919bdaSduboff 
1157f8919bdaSduboff 	/* free dma mapping resources associated with transmitted tx buffers */
1158f8919bdaSduboff 	tbp = GET_TXBUF(dp, head);
1159f8919bdaSduboff 	tail = sn;
1160f8919bdaSduboff #if GEM_DEBUG_LEVEL > 2
1161f8919bdaSduboff 	if (head != tail) {
1162f8919bdaSduboff 		cmn_err(CE_CONT, "%s: freeing head:%d[%d], tail:%d[%d]",
1163f8919bdaSduboff 		    __func__,
1164f8919bdaSduboff 		    head, SLOT(head, dp->gc.gc_tx_buf_size),
1165f8919bdaSduboff 		    tail, SLOT(tail, dp->gc.gc_tx_buf_size));
1166f8919bdaSduboff 	}
1167f8919bdaSduboff #endif
1168f8919bdaSduboff 	for (sn = head; sn != tail; sn++, tbp = tbp->txb_next) {
1169f8919bdaSduboff 		gem_txbuf_free_dma_resources(tbp);
1170f8919bdaSduboff 	}
1171f8919bdaSduboff 
1172f8919bdaSduboff 	/* recycle the tx buffers */
1173f8919bdaSduboff 	mutex_enter(&dp->xmitlock);
1174f8919bdaSduboff 	if (--dp->tx_reclaim_busy == 0) {
1175f8919bdaSduboff 		/* we are the last thread who can update free tail */
1176f8919bdaSduboff #if GEM_DEBUG_LEVEL > 4
1177f8919bdaSduboff 		/* check all resouces have been deallocated */
1178f8919bdaSduboff 		sn = dp->tx_free_tail;
1179f8919bdaSduboff 		tbp = GET_TXBUF(dp, new_tail);
1180f8919bdaSduboff 		while (sn != dp->tx_active_head + dp->gc.gc_tx_buf_limit) {
1181f8919bdaSduboff 			if (tbp->txb_nfrags) {
1182f8919bdaSduboff 				/* in use */
1183f8919bdaSduboff 				break;
1184f8919bdaSduboff 			}
1185f8919bdaSduboff 			ASSERT(tbp->txb_mp == NULL);
1186f8919bdaSduboff 			tbp = tbp->txb_next;
1187f8919bdaSduboff 			sn++;
1188f8919bdaSduboff 		}
1189f8919bdaSduboff 		ASSERT(dp->tx_active_head + dp->gc.gc_tx_buf_limit == sn);
1190f8919bdaSduboff #endif
1191f8919bdaSduboff 		dp->tx_free_tail =
1192f8919bdaSduboff 		    dp->tx_active_head + dp->gc.gc_tx_buf_limit;
1193f8919bdaSduboff 	}
1194f8919bdaSduboff 	if (!dp->mac_active) {
1195f8919bdaSduboff 		/* someone may be waiting for me. */
1196f8919bdaSduboff 		cv_broadcast(&dp->tx_drain_cv);
1197f8919bdaSduboff 	}
1198f8919bdaSduboff #if GEM_DEBUG_LEVEL > 2
1199f8919bdaSduboff 	cmn_err(CE_CONT, "!%s: %s: called, "
1200f8919bdaSduboff 	    "free_head:%d free_tail:%d(+%d) added:%d",
1201f8919bdaSduboff 	    dp->name, __func__,
1202f8919bdaSduboff 	    dp->tx_free_head, dp->tx_free_tail,
1203f8919bdaSduboff 	    dp->tx_free_tail - dp->tx_free_head, tail - head);
1204f8919bdaSduboff #endif
1205f8919bdaSduboff 	mutex_exit(&dp->xmitlock);
1206f8919bdaSduboff 
1207f8919bdaSduboff 	return (err);
1208f8919bdaSduboff }
1209f8919bdaSduboff #pragma inline(gem_reclaim_txbuf)
1210f8919bdaSduboff 
1211f8919bdaSduboff 
1212f8919bdaSduboff /*
1213f8919bdaSduboff  * Make tx descriptors in out-of-order manner
1214f8919bdaSduboff  */
1215f8919bdaSduboff static void
gem_tx_load_descs_oo(struct gem_dev * dp,seqnum_t start_slot,seqnum_t end_slot,uint64_t flags)1216f8919bdaSduboff gem_tx_load_descs_oo(struct gem_dev *dp,
121723d366e3Sduboff 	seqnum_t start_slot, seqnum_t end_slot, uint64_t flags)
1218f8919bdaSduboff {
1219f8919bdaSduboff 	seqnum_t	sn;
1220f8919bdaSduboff 	struct txbuf	*tbp;
1221f8919bdaSduboff 	int	tx_ring_size = dp->gc.gc_tx_ring_size;
1222f8919bdaSduboff 	int	(*tx_desc_write)
1223f8919bdaSduboff 	    (struct gem_dev *dp, int slot,
1224f8919bdaSduboff 	    ddi_dma_cookie_t *dmacookie,
1225f8919bdaSduboff 	    int frags, uint64_t flag) = dp->gc.gc_tx_desc_write;
1226f8919bdaSduboff 	clock_t	now = ddi_get_lbolt();
1227f8919bdaSduboff 
1228f8919bdaSduboff 	sn = start_slot;
1229f8919bdaSduboff 	tbp = GET_TXBUF(dp, sn);
1230f8919bdaSduboff 	do {
1231f8919bdaSduboff #if GEM_DEBUG_LEVEL > 1
1232f8919bdaSduboff 		if (dp->tx_cnt < 100) {
1233f8919bdaSduboff 			dp->tx_cnt++;
1234f8919bdaSduboff 			flags |= GEM_TXFLAG_INTR;
1235f8919bdaSduboff 		}
1236f8919bdaSduboff #endif
1237f8919bdaSduboff 		/* write a tx descriptor */
1238f8919bdaSduboff 		tbp->txb_desc = sn;
1239f8919bdaSduboff 		tbp->txb_ndescs = (*tx_desc_write)(dp,
1240f8919bdaSduboff 		    SLOT(sn, tx_ring_size),
1241f8919bdaSduboff 		    tbp->txb_dmacookie,
1242f8919bdaSduboff 		    tbp->txb_nfrags, flags | tbp->txb_flag);
1243f8919bdaSduboff 		tbp->txb_stime = now;
1244f8919bdaSduboff 		ASSERT(tbp->txb_ndescs == 1);
1245f8919bdaSduboff 
1246f8919bdaSduboff 		flags = 0;
1247f8919bdaSduboff 		sn++;
1248f8919bdaSduboff 		tbp = tbp->txb_next;
1249f8919bdaSduboff 	} while (sn != end_slot);
1250f8919bdaSduboff }
1251f8919bdaSduboff 
1252f8919bdaSduboff __INLINE__
125323d366e3Sduboff static size_t
gem_setup_txbuf_copy(struct gem_dev * dp,mblk_t * mp,struct txbuf * tbp)1254f8919bdaSduboff gem_setup_txbuf_copy(struct gem_dev *dp, mblk_t *mp, struct txbuf *tbp)
1255f8919bdaSduboff {
1256f8919bdaSduboff 	size_t			min_pkt;
1257f8919bdaSduboff 	caddr_t			bp;
1258f8919bdaSduboff 	size_t			off;
1259f8919bdaSduboff 	mblk_t			*tp;
1260f8919bdaSduboff 	size_t			len;
1261f8919bdaSduboff 	uint64_t		flag;
1262f8919bdaSduboff 
1263f8919bdaSduboff 	ASSERT(tbp->txb_mp == NULL);
1264f8919bdaSduboff 
1265f8919bdaSduboff 	/* we use bounce buffer for the packet */
1266f8919bdaSduboff 	min_pkt = ETHERMIN;
1267f8919bdaSduboff 	bp = tbp->txb_buf;
1268f8919bdaSduboff 	off = 0;
1269f8919bdaSduboff 	tp = mp;
1270f8919bdaSduboff 
1271f8919bdaSduboff 	flag = tbp->txb_flag;
1272f8919bdaSduboff 	if (flag & GEM_TXFLAG_SWVTAG) {
1273f8919bdaSduboff 		/* need to increase min packet size */
1274f8919bdaSduboff 		min_pkt += VTAG_SIZE;
1275f8919bdaSduboff 		ASSERT((flag & GEM_TXFLAG_VTAG) == 0);
1276f8919bdaSduboff 	}
1277f8919bdaSduboff 
1278f8919bdaSduboff 	/* copy the rest */
1279f8919bdaSduboff 	for (; tp; tp = tp->b_cont) {
1280f8919bdaSduboff 		if ((len = (long)tp->b_wptr - (long)tp->b_rptr) > 0) {
1281f8919bdaSduboff 			bcopy(tp->b_rptr, &bp[off], len);
1282f8919bdaSduboff 			off += len;
1283f8919bdaSduboff 		}
1284f8919bdaSduboff 	}
1285f8919bdaSduboff 
1286f8919bdaSduboff 	if (off < min_pkt &&
1287f8919bdaSduboff 	    (min_pkt > ETHERMIN || !dp->gc.gc_tx_auto_pad)) {
1288f8919bdaSduboff 		/*
128923d366e3Sduboff 		 * Extend the packet to minimum packet size explicitly.
1290f8919bdaSduboff 		 * For software vlan packets, we shouldn't use tx autopad
129123d366e3Sduboff 		 * function because nics may not be aware of vlan.
1292f8919bdaSduboff 		 * we must keep 46 octet of payload even if we use vlan.
1293f8919bdaSduboff 		 */
1294f8919bdaSduboff 		bzero(&bp[off], min_pkt - off);
1295f8919bdaSduboff 		off = min_pkt;
1296f8919bdaSduboff 	}
1297f8919bdaSduboff 
1298f8919bdaSduboff 	(void) ddi_dma_sync(tbp->txb_bdh, (off_t)0, off, DDI_DMA_SYNC_FORDEV);
1299f8919bdaSduboff 
1300f8919bdaSduboff 	tbp->txb_dmacookie[0].dmac_laddress = tbp->txb_buf_dma;
1301f8919bdaSduboff 	tbp->txb_dmacookie[0].dmac_size = off;
1302f8919bdaSduboff 
1303f8919bdaSduboff 	DPRINTF(2, (CE_CONT,
1304f8919bdaSduboff 	    "!%s: %s: copy: addr:0x%llx len:0x%x, vtag:0x%04x, min_pkt:%d",
1305f8919bdaSduboff 	    dp->name, __func__,
1306f8919bdaSduboff 	    tbp->txb_dmacookie[0].dmac_laddress,
1307f8919bdaSduboff 	    tbp->txb_dmacookie[0].dmac_size,
1308f8919bdaSduboff 	    (flag & GEM_TXFLAG_VTAG) >> GEM_TXFLAG_VTAG_SHIFT,
1309f8919bdaSduboff 	    min_pkt));
1310f8919bdaSduboff 
1311f8919bdaSduboff 	/* save misc info */
1312f8919bdaSduboff 	tbp->txb_mp = mp;
1313f8919bdaSduboff 	tbp->txb_nfrags = 1;
1314f8919bdaSduboff #ifdef DEBUG_MULTIFRAGS
1315f8919bdaSduboff 	if (dp->gc.gc_tx_max_frags >= 3 &&
1316f8919bdaSduboff 	    tbp->txb_dmacookie[0].dmac_size > 16*3) {
1317f8919bdaSduboff 		tbp->txb_dmacookie[1].dmac_laddress =
1318f8919bdaSduboff 		    tbp->txb_dmacookie[0].dmac_laddress + 16;
1319f8919bdaSduboff 		tbp->txb_dmacookie[2].dmac_laddress =
1320f8919bdaSduboff 		    tbp->txb_dmacookie[1].dmac_laddress + 16;
1321f8919bdaSduboff 
1322f8919bdaSduboff 		tbp->txb_dmacookie[2].dmac_size =
1323f8919bdaSduboff 		    tbp->txb_dmacookie[0].dmac_size - 16*2;
1324f8919bdaSduboff 		tbp->txb_dmacookie[1].dmac_size = 16;
1325f8919bdaSduboff 		tbp->txb_dmacookie[0].dmac_size = 16;
1326f8919bdaSduboff 		tbp->txb_nfrags  = 3;
1327f8919bdaSduboff 	}
1328f8919bdaSduboff #endif
132923d366e3Sduboff 	return (off);
1330f8919bdaSduboff }
1331f8919bdaSduboff #pragma inline(gem_setup_txbuf_copy)
1332f8919bdaSduboff 
1333f8919bdaSduboff __INLINE__
1334f8919bdaSduboff static void
gem_tx_start_unit(struct gem_dev * dp)1335f8919bdaSduboff gem_tx_start_unit(struct gem_dev *dp)
1336f8919bdaSduboff {
1337f8919bdaSduboff 	seqnum_t	head;
1338f8919bdaSduboff 	seqnum_t	tail;
1339f8919bdaSduboff 	struct txbuf	*tbp_head;
1340f8919bdaSduboff 	struct txbuf	*tbp_tail;
1341f8919bdaSduboff 
1342f8919bdaSduboff 	/* update HW descriptors from soft queue */
1343f8919bdaSduboff 	ASSERT(mutex_owned(&dp->xmitlock));
1344f8919bdaSduboff 	ASSERT(dp->tx_softq_head == dp->tx_active_tail);
1345f8919bdaSduboff 
1346f8919bdaSduboff 	head = dp->tx_softq_head;
1347f8919bdaSduboff 	tail = dp->tx_softq_tail;
1348f8919bdaSduboff 
1349f8919bdaSduboff 	DPRINTF(1, (CE_CONT,
1350f8919bdaSduboff 	    "%s: %s: called, softq %d %d[+%d], desc %d %d[+%d]",
1351f8919bdaSduboff 	    dp->name, __func__, head, tail, tail - head,
1352f8919bdaSduboff 	    dp->tx_desc_head, dp->tx_desc_tail,
1353f8919bdaSduboff 	    dp->tx_desc_tail - dp->tx_desc_head));
1354f8919bdaSduboff 
1355f8919bdaSduboff 	ASSERT(tail - head > 0);
1356f8919bdaSduboff 
1357f8919bdaSduboff 	dp->tx_desc_tail = tail;
1358f8919bdaSduboff 
1359f8919bdaSduboff 	tbp_head = GET_TXBUF(dp, head);
1360f8919bdaSduboff 	tbp_tail = GET_TXBUF(dp, tail - 1);
1361f8919bdaSduboff 
1362f8919bdaSduboff 	ASSERT(tbp_tail->txb_desc + tbp_tail->txb_ndescs == dp->tx_desc_tail);
1363f8919bdaSduboff 
1364f8919bdaSduboff 	dp->gc.gc_tx_start(dp,
1365f8919bdaSduboff 	    SLOT(tbp_head->txb_desc, dp->gc.gc_tx_ring_size),
1366f8919bdaSduboff 	    tbp_tail->txb_desc + tbp_tail->txb_ndescs - tbp_head->txb_desc);
1367f8919bdaSduboff 
1368f8919bdaSduboff 	/* advance softq head and active tail */
1369f8919bdaSduboff 	dp->tx_softq_head = dp->tx_active_tail = tail;
1370f8919bdaSduboff }
1371f8919bdaSduboff #pragma inline(gem_tx_start_unit)
1372f8919bdaSduboff 
1373f8919bdaSduboff #ifdef GEM_DEBUG_LEVEL
1374f8919bdaSduboff static int gem_send_cnt[10];
1375f8919bdaSduboff #endif
137623d366e3Sduboff #define	PKT_MIN_SIZE	(sizeof (struct ether_header) + 10 + VTAG_SIZE)
137723d366e3Sduboff #define	EHLEN	(sizeof (struct ether_header))
137823d366e3Sduboff /*
137923d366e3Sduboff  * check ether packet type and ip protocol
138023d366e3Sduboff  */
138123d366e3Sduboff static uint64_t
gem_txbuf_options(struct gem_dev * dp,mblk_t * mp,uint8_t * bp)138223d366e3Sduboff gem_txbuf_options(struct gem_dev *dp, mblk_t *mp, uint8_t *bp)
138323d366e3Sduboff {
138423d366e3Sduboff 	mblk_t		*tp;
138523d366e3Sduboff 	ssize_t		len;
138623d366e3Sduboff 	uint_t		vtag;
138723d366e3Sduboff 	int		off;
138823d366e3Sduboff 	uint64_t	flag;
1389f8919bdaSduboff 
139023d366e3Sduboff 	flag = 0ULL;
139123d366e3Sduboff 
139223d366e3Sduboff 	/*
139323d366e3Sduboff 	 * prepare continuous header of the packet for protocol analysis
139423d366e3Sduboff 	 */
139523d366e3Sduboff 	if ((long)mp->b_wptr - (long)mp->b_rptr < PKT_MIN_SIZE) {
139623d366e3Sduboff 		/* we use work buffer to copy mblk */
139723d366e3Sduboff 		for (tp = mp, off = 0;
139823d366e3Sduboff 		    tp && (off < PKT_MIN_SIZE);
139923d366e3Sduboff 		    tp = tp->b_cont, off += len) {
140023d366e3Sduboff 			len = (long)tp->b_wptr - (long)tp->b_rptr;
140123d366e3Sduboff 			len = min(len, PKT_MIN_SIZE - off);
140223d366e3Sduboff 			bcopy(tp->b_rptr, &bp[off], len);
140323d366e3Sduboff 		}
140423d366e3Sduboff 	} else {
140523d366e3Sduboff 		/* we can use mblk without copy */
140623d366e3Sduboff 		bp = mp->b_rptr;
140723d366e3Sduboff 	}
140823d366e3Sduboff 
140923d366e3Sduboff 	/* process vlan tag for GLD v3 */
141023d366e3Sduboff 	if (GET_NET16(&bp[VTAG_OFF]) == VTAG_TPID) {
141123d366e3Sduboff 		if (dp->misc_flag & GEM_VLAN_HARD) {
141223d366e3Sduboff 			vtag = GET_NET16(&bp[VTAG_OFF + 2]);
141323d366e3Sduboff 			ASSERT(vtag);
141423d366e3Sduboff 			flag |= vtag << GEM_TXFLAG_VTAG_SHIFT;
141523d366e3Sduboff 		} else {
141623d366e3Sduboff 			flag |= GEM_TXFLAG_SWVTAG;
141723d366e3Sduboff 		}
141823d366e3Sduboff 	}
141923d366e3Sduboff 	return (flag);
142023d366e3Sduboff }
142123d366e3Sduboff #undef EHLEN
142223d366e3Sduboff #undef PKT_MIN_SIZE
1423f8919bdaSduboff /*
1424f8919bdaSduboff  * gem_send_common is an exported function because hw depend routines may
1425f8919bdaSduboff  * use it for sending control frames like setup frames for 2114x chipset.
1426f8919bdaSduboff  */
1427f8919bdaSduboff mblk_t *
gem_send_common(struct gem_dev * dp,mblk_t * mp_head,uint32_t flags)1428f8919bdaSduboff gem_send_common(struct gem_dev *dp, mblk_t *mp_head, uint32_t flags)
1429f8919bdaSduboff {
1430f8919bdaSduboff 	int			nmblk;
1431f8919bdaSduboff 	int			avail;
1432f8919bdaSduboff 	mblk_t			*tp;
1433f8919bdaSduboff 	mblk_t			*mp;
143423d366e3Sduboff 	int			i;
1435f8919bdaSduboff 	struct txbuf		*tbp;
1436f8919bdaSduboff 	seqnum_t		head;
1437f8919bdaSduboff 	uint64_t		load_flags;
1438f8919bdaSduboff 	uint64_t		len_total = 0;
143923d366e3Sduboff 	uint32_t		bcast = 0;
144023d366e3Sduboff 	uint32_t		mcast = 0;
1441f8919bdaSduboff 
1442f8919bdaSduboff 	ASSERT(mp_head != NULL);
1443f8919bdaSduboff 
1444f8919bdaSduboff 	mp = mp_head;
1445f8919bdaSduboff 	nmblk = 1;
1446f8919bdaSduboff 	while ((mp = mp->b_next) != NULL) {
1447f8919bdaSduboff 		nmblk++;
1448f8919bdaSduboff 	}
1449f8919bdaSduboff #ifdef GEM_DEBUG_LEVEL
1450f8919bdaSduboff 	gem_send_cnt[0]++;
1451f8919bdaSduboff 	gem_send_cnt[min(nmblk, 9)]++;
1452f8919bdaSduboff #endif
1453f8919bdaSduboff 	/*
1454f8919bdaSduboff 	 * Aquire resources
1455f8919bdaSduboff 	 */
1456f8919bdaSduboff 	mutex_enter(&dp->xmitlock);
1457f8919bdaSduboff 	if (dp->mac_suspended) {
1458f8919bdaSduboff 		mutex_exit(&dp->xmitlock);
1459f8919bdaSduboff 		mp = mp_head;
1460f8919bdaSduboff 		while (mp) {
1461f8919bdaSduboff 			tp = mp->b_next;
1462f8919bdaSduboff 			freemsg(mp);
1463f8919bdaSduboff 			mp = tp;
1464f8919bdaSduboff 		}
1465f8919bdaSduboff 		return (NULL);
1466f8919bdaSduboff 	}
1467f8919bdaSduboff 
1468f8919bdaSduboff 	if (!dp->mac_active && (flags & GEM_SEND_CTRL) == 0) {
1469f8919bdaSduboff 		/* don't send data packets while mac isn't active */
147023d366e3Sduboff 		/* XXX - should we discard packets? */
1471f8919bdaSduboff 		mutex_exit(&dp->xmitlock);
1472f8919bdaSduboff 		return (mp_head);
1473f8919bdaSduboff 	}
1474f8919bdaSduboff 
1475f8919bdaSduboff 	/* allocate free slots */
1476f8919bdaSduboff 	head = dp->tx_free_head;
1477f8919bdaSduboff 	avail = dp->tx_free_tail - head;
1478f8919bdaSduboff 
1479f8919bdaSduboff 	DPRINTF(2, (CE_CONT,
1480f8919bdaSduboff 	    "!%s: %s: called, free_head:%d free_tail:%d(+%d) req:%d",
1481f8919bdaSduboff 	    dp->name, __func__,
1482f8919bdaSduboff 	    dp->tx_free_head, dp->tx_free_tail, avail, nmblk));
1483f8919bdaSduboff 
148423d366e3Sduboff 	avail = min(avail, dp->tx_max_packets);
1485f8919bdaSduboff 
1486f8919bdaSduboff 	if (nmblk > avail) {
1487f8919bdaSduboff 		if (avail == 0) {
1488f8919bdaSduboff 			/* no resources; short cut */
1489f8919bdaSduboff 			DPRINTF(2, (CE_CONT, "!%s: no resources", __func__));
149023d366e3Sduboff 			dp->tx_max_packets = max(dp->tx_max_packets - 1, 1);
1491f8919bdaSduboff 			goto done;
1492f8919bdaSduboff 		}
1493f8919bdaSduboff 		nmblk = avail;
1494f8919bdaSduboff 	}
1495f8919bdaSduboff 
1496f8919bdaSduboff 	dp->tx_free_head = head + nmblk;
1497f8919bdaSduboff 	load_flags = ((dp->tx_busy++) == 0) ? GEM_TXFLAG_HEAD : 0;
1498f8919bdaSduboff 
149923d366e3Sduboff 	/* update last interrupt position if tx buffers exhaust.  */
150023d366e3Sduboff 	if (nmblk == avail) {
150123d366e3Sduboff 		tbp = GET_TXBUF(dp, head + avail - 1);
150223d366e3Sduboff 		tbp->txb_flag = GEM_TXFLAG_INTR;
150323d366e3Sduboff 		dp->tx_desc_intr = head + avail;
1504f8919bdaSduboff 	}
1505f8919bdaSduboff 	mutex_exit(&dp->xmitlock);
1506f8919bdaSduboff 
1507f8919bdaSduboff 	tbp = GET_TXBUF(dp, head);
1508f8919bdaSduboff 
150923d366e3Sduboff 	for (i = nmblk; i > 0; i--, tbp = tbp->txb_next) {
1510f8919bdaSduboff 		uint8_t		*bp;
151123d366e3Sduboff 		uint64_t	txflag;
1512f8919bdaSduboff 
1513f8919bdaSduboff 		/* remove one from the mblk list */
1514f8919bdaSduboff 		ASSERT(mp_head != NULL);
1515f8919bdaSduboff 		mp = mp_head;
1516f8919bdaSduboff 		mp_head = mp_head->b_next;
1517f8919bdaSduboff 		mp->b_next = NULL;
1518f8919bdaSduboff 
1519f8919bdaSduboff 		/* statistics for non-unicast packets */
152023d366e3Sduboff 		bp = mp->b_rptr;
152123d366e3Sduboff 		if ((bp[0] & 1) && (flags & GEM_SEND_CTRL) == 0) {
1522f8919bdaSduboff 			if (bcmp(bp, gem_etherbroadcastaddr.ether_addr_octet,
1523f8919bdaSduboff 			    ETHERADDRL) == 0) {
152423d366e3Sduboff 				bcast++;
1525f8919bdaSduboff 			} else {
152623d366e3Sduboff 				mcast++;
1527f8919bdaSduboff 			}
1528f8919bdaSduboff 		}
1529f8919bdaSduboff 
153023d366e3Sduboff 		/* save misc info */
153123d366e3Sduboff 		txflag = tbp->txb_flag;
153223d366e3Sduboff 		txflag |= (flags & GEM_SEND_CTRL) << GEM_TXFLAG_PRIVATE_SHIFT;
153323d366e3Sduboff 		txflag |= gem_txbuf_options(dp, mp, (uint8_t *)tbp->txb_buf);
153423d366e3Sduboff 		tbp->txb_flag = txflag;
153523d366e3Sduboff 
153623d366e3Sduboff 		len_total += gem_setup_txbuf_copy(dp, mp, tbp);
1537f8919bdaSduboff 	}
1538f8919bdaSduboff 
153923d366e3Sduboff 	(void) gem_tx_load_descs_oo(dp, head, head + nmblk, load_flags);
1540f8919bdaSduboff 
1541f8919bdaSduboff 	/* Append the tbp at the tail of the active tx buffer list */
1542f8919bdaSduboff 	mutex_enter(&dp->xmitlock);
1543f8919bdaSduboff 
1544f8919bdaSduboff 	if ((--dp->tx_busy) == 0) {
1545f8919bdaSduboff 		/* extend the tail of softq, as new packets have been ready. */
1546f8919bdaSduboff 		dp->tx_softq_tail = dp->tx_free_head;
1547f8919bdaSduboff 
1548f8919bdaSduboff 		if (!dp->mac_active && (flags & GEM_SEND_CTRL) == 0) {
1549f8919bdaSduboff 			/*
1550f8919bdaSduboff 			 * The device status has changed while we are
1551f8919bdaSduboff 			 * preparing tx buf.
1552f8919bdaSduboff 			 * As we are the last one that make tx non-busy.
1553f8919bdaSduboff 			 * wake up someone who may wait for us.
1554f8919bdaSduboff 			 */
1555f8919bdaSduboff 			cv_broadcast(&dp->tx_drain_cv);
1556f8919bdaSduboff 		} else {
1557f8919bdaSduboff 			ASSERT(dp->tx_softq_tail - dp->tx_softq_head > 0);
1558f8919bdaSduboff 			gem_tx_start_unit(dp);
1559f8919bdaSduboff 		}
1560f8919bdaSduboff 	}
1561f8919bdaSduboff 	dp->stats.obytes += len_total;
156223d366e3Sduboff 	dp->stats.opackets += nmblk;
156323d366e3Sduboff 	dp->stats.obcast += bcast;
156423d366e3Sduboff 	dp->stats.omcast += mcast;
1565f8919bdaSduboff done:
1566f8919bdaSduboff 	mutex_exit(&dp->xmitlock);
1567f8919bdaSduboff 
1568f8919bdaSduboff 	return (mp_head);
1569f8919bdaSduboff }
1570f8919bdaSduboff 
1571f8919bdaSduboff /* ========================================================== */
1572f8919bdaSduboff /*
1573f8919bdaSduboff  * error detection and restart routines
1574f8919bdaSduboff  */
1575f8919bdaSduboff /* ========================================================== */
1576f8919bdaSduboff int
gem_restart_nic(struct gem_dev * dp,uint_t flags)1577f8919bdaSduboff gem_restart_nic(struct gem_dev *dp, uint_t flags)
1578f8919bdaSduboff {
1579f8919bdaSduboff 	ASSERT(mutex_owned(&dp->intrlock));
1580f8919bdaSduboff 
158123d366e3Sduboff 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
158223d366e3Sduboff #ifdef GEM_DEBUG_LEVEL
158323d366e3Sduboff #if GEM_DEBUG_LEVEL > 1
158423d366e3Sduboff 	gem_dump_txbuf(dp, CE_CONT, "gem_restart_nic");
158523d366e3Sduboff #endif
158623d366e3Sduboff #endif
1587f8919bdaSduboff 
1588f8919bdaSduboff 	if (dp->mac_suspended) {
1589f8919bdaSduboff 		/* should we return GEM_FAILURE ? */
1590f8919bdaSduboff 		return (GEM_FAILURE);
1591f8919bdaSduboff 	}
1592f8919bdaSduboff 
1593f8919bdaSduboff 	/*
1594f8919bdaSduboff 	 * We should avoid calling any routines except xxx_chip_reset
1595f8919bdaSduboff 	 * when we are resuming the system.
1596f8919bdaSduboff 	 */
1597f8919bdaSduboff 	if (dp->mac_active) {
1598f8919bdaSduboff 		if (flags & GEM_RESTART_KEEP_BUF) {
1599f8919bdaSduboff 			/* stop rx gracefully */
1600f8919bdaSduboff 			dp->rxmode &= ~RXMODE_ENABLE;
1601f8919bdaSduboff 			(void) (*dp->gc.gc_set_rx_filter)(dp);
1602f8919bdaSduboff 		}
1603f8919bdaSduboff 		(void) gem_mac_stop(dp, flags);
1604f8919bdaSduboff 	}
1605f8919bdaSduboff 
1606f8919bdaSduboff 	/* reset the chip. */
1607f8919bdaSduboff 	if ((*dp->gc.gc_reset_chip)(dp) != GEM_SUCCESS) {
1608f8919bdaSduboff 		cmn_err(CE_WARN, "%s: %s: failed to reset chip",
1609f8919bdaSduboff 		    dp->name, __func__);
1610f8919bdaSduboff 		goto err;
1611f8919bdaSduboff 	}
1612f8919bdaSduboff 
1613f8919bdaSduboff 	if (gem_mac_init(dp) != GEM_SUCCESS) {
1614f8919bdaSduboff 		goto err;
1615f8919bdaSduboff 	}
1616f8919bdaSduboff 
1617f8919bdaSduboff 	/* setup media mode if the link have been up */
1618f8919bdaSduboff 	if (dp->mii_state == MII_STATE_LINKUP) {
1619f8919bdaSduboff 		if ((dp->gc.gc_set_media)(dp) != GEM_SUCCESS) {
1620f8919bdaSduboff 			goto err;
1621f8919bdaSduboff 		}
1622f8919bdaSduboff 	}
1623f8919bdaSduboff 
1624f8919bdaSduboff 	/* setup mac address and enable rx filter */
1625f8919bdaSduboff 	dp->rxmode |= RXMODE_ENABLE;
1626f8919bdaSduboff 	if ((*dp->gc.gc_set_rx_filter)(dp) != GEM_SUCCESS) {
1627f8919bdaSduboff 		goto err;
1628f8919bdaSduboff 	}
1629f8919bdaSduboff 
1630f8919bdaSduboff 	/*
163123d366e3Sduboff 	 * XXX - a panic happened because of linkdown.
1632f8919bdaSduboff 	 * We must check mii_state here, because the link can be down just
1633f8919bdaSduboff 	 * before the restart event happen. If the link is down now,
1634f8919bdaSduboff 	 * gem_mac_start() will be called from gem_mii_link_check() when
1635f8919bdaSduboff 	 * the link become up later.
1636f8919bdaSduboff 	 */
1637f8919bdaSduboff 	if (dp->mii_state == MII_STATE_LINKUP) {
1638f8919bdaSduboff 		/* restart the nic */
1639f8919bdaSduboff 		ASSERT(!dp->mac_active);
1640f8919bdaSduboff 		(void) gem_mac_start(dp);
1641f8919bdaSduboff 	}
1642f8919bdaSduboff 	return (GEM_SUCCESS);
1643f8919bdaSduboff err:
1644f8919bdaSduboff 	return (GEM_FAILURE);
1645f8919bdaSduboff }
1646f8919bdaSduboff 
1647f8919bdaSduboff 
1648f8919bdaSduboff static void
gem_tx_timeout(struct gem_dev * dp)1649f8919bdaSduboff gem_tx_timeout(struct gem_dev *dp)
1650f8919bdaSduboff {
1651f8919bdaSduboff 	clock_t		now;
1652f8919bdaSduboff 	boolean_t	tx_sched;
1653f8919bdaSduboff 	struct txbuf	*tbp;
1654f8919bdaSduboff 
1655f8919bdaSduboff 	mutex_enter(&dp->intrlock);
1656f8919bdaSduboff 
1657f8919bdaSduboff 	tx_sched = B_FALSE;
1658f8919bdaSduboff 	now = ddi_get_lbolt();
1659f8919bdaSduboff 
1660f8919bdaSduboff 	mutex_enter(&dp->xmitlock);
1661f8919bdaSduboff 	if (!dp->mac_active || dp->mii_state != MII_STATE_LINKUP) {
1662f8919bdaSduboff 		mutex_exit(&dp->xmitlock);
1663f8919bdaSduboff 		goto schedule_next;
1664f8919bdaSduboff 	}
1665f8919bdaSduboff 	mutex_exit(&dp->xmitlock);
1666f8919bdaSduboff 
1667f8919bdaSduboff 	/* reclaim transmitted buffers to check the trasmitter hangs or not. */
1668f8919bdaSduboff 	if (gem_reclaim_txbuf(dp) != GEM_SUCCESS) {
1669f8919bdaSduboff 		/* tx error happened, reset transmitter in the chip */
1670f8919bdaSduboff 		(void) gem_restart_nic(dp, 0);
1671f8919bdaSduboff 		tx_sched = B_TRUE;
167223d366e3Sduboff 		dp->tx_blocked = (clock_t)0;
1673f8919bdaSduboff 
1674f8919bdaSduboff 		goto schedule_next;
1675f8919bdaSduboff 	}
1676f8919bdaSduboff 
1677f8919bdaSduboff 	mutex_enter(&dp->xmitlock);
167823d366e3Sduboff 	/* check if the transmitter thread is stuck */
1679f8919bdaSduboff 	if (dp->tx_active_head == dp->tx_active_tail) {
1680f8919bdaSduboff 		/* no tx buffer is loaded to the nic */
168123d366e3Sduboff 		if (dp->tx_blocked &&
168223d366e3Sduboff 		    now - dp->tx_blocked > dp->gc.gc_tx_timeout_interval) {
168323d366e3Sduboff 			gem_dump_txbuf(dp, CE_WARN,
168423d366e3Sduboff 			    "gem_tx_timeout: tx blocked");
168523d366e3Sduboff 			tx_sched = B_TRUE;
168623d366e3Sduboff 			dp->tx_blocked = (clock_t)0;
168723d366e3Sduboff 		}
1688f8919bdaSduboff 		mutex_exit(&dp->xmitlock);
1689f8919bdaSduboff 		goto schedule_next;
1690f8919bdaSduboff 	}
1691f8919bdaSduboff 
1692f8919bdaSduboff 	tbp = GET_TXBUF(dp, dp->tx_active_head);
1693f8919bdaSduboff 	if (now - tbp->txb_stime < dp->gc.gc_tx_timeout) {
1694f8919bdaSduboff 		mutex_exit(&dp->xmitlock);
1695f8919bdaSduboff 		goto schedule_next;
1696f8919bdaSduboff 	}
1697f8919bdaSduboff 	mutex_exit(&dp->xmitlock);
1698f8919bdaSduboff 
169923d366e3Sduboff 	gem_dump_txbuf(dp, CE_WARN, "gem_tx_timeout: tx timeout");
1700f8919bdaSduboff 
1701f8919bdaSduboff 	/* discard untransmitted packet and restart tx.  */
170223d366e3Sduboff 	(void) gem_restart_nic(dp, GEM_RESTART_NOWAIT);
1703f8919bdaSduboff 	tx_sched = B_TRUE;
170423d366e3Sduboff 	dp->tx_blocked = (clock_t)0;
1705f8919bdaSduboff 
1706f8919bdaSduboff schedule_next:
1707f8919bdaSduboff 	mutex_exit(&dp->intrlock);
1708f8919bdaSduboff 
1709f8919bdaSduboff 	/* restart the downstream if needed */
1710f8919bdaSduboff 	if (tx_sched) {
1711f8919bdaSduboff 		mac_tx_update(dp->mh);
1712f8919bdaSduboff 	}
1713f8919bdaSduboff 
1714f8919bdaSduboff 	DPRINTF(4, (CE_CONT,
171523d366e3Sduboff 	    "!%s: blocked:%d active_head:%d active_tail:%d desc_intr:%d",
171623d366e3Sduboff 	    dp->name, BOOLEAN(dp->tx_blocked),
1717f8919bdaSduboff 	    dp->tx_active_head, dp->tx_active_tail, dp->tx_desc_intr));
1718f8919bdaSduboff 	dp->timeout_id =
1719f8919bdaSduboff 	    timeout((void (*)(void *))gem_tx_timeout,
1720f8919bdaSduboff 	    (void *)dp, dp->gc.gc_tx_timeout_interval);
1721f8919bdaSduboff }
1722f8919bdaSduboff 
1723f8919bdaSduboff /* ================================================================== */
1724f8919bdaSduboff /*
1725f8919bdaSduboff  * Interrupt handler
1726f8919bdaSduboff  */
1727f8919bdaSduboff /* ================================================================== */
1728f8919bdaSduboff __INLINE__
1729f8919bdaSduboff static void
gem_append_rxbuf(struct gem_dev * dp,struct rxbuf * rbp_head)1730f8919bdaSduboff gem_append_rxbuf(struct gem_dev *dp, struct rxbuf *rbp_head)
1731f8919bdaSduboff {
1732f8919bdaSduboff 	struct rxbuf	*rbp;
1733f8919bdaSduboff 	seqnum_t	tail;
1734f8919bdaSduboff 	int		rx_ring_size = dp->gc.gc_rx_ring_size;
1735f8919bdaSduboff 
1736f8919bdaSduboff 	ASSERT(rbp_head != NULL);
1737f8919bdaSduboff 	ASSERT(mutex_owned(&dp->intrlock));
1738f8919bdaSduboff 
1739f8919bdaSduboff 	DPRINTF(3, (CE_CONT, "!%s: %s: slot_head:%d, slot_tail:%d",
1740f8919bdaSduboff 	    dp->name, __func__, dp->rx_active_head, dp->rx_active_tail));
1741f8919bdaSduboff 
1742f8919bdaSduboff 	/*
1743f8919bdaSduboff 	 * Add new buffers into active rx buffer list
1744f8919bdaSduboff 	 */
1745f8919bdaSduboff 	if (dp->rx_buf_head == NULL) {
1746f8919bdaSduboff 		dp->rx_buf_head = rbp_head;
1747f8919bdaSduboff 		ASSERT(dp->rx_buf_tail == NULL);
1748f8919bdaSduboff 	} else {
1749f8919bdaSduboff 		dp->rx_buf_tail->rxb_next = rbp_head;
1750f8919bdaSduboff 	}
1751f8919bdaSduboff 
1752f8919bdaSduboff 	tail = dp->rx_active_tail;
1753f8919bdaSduboff 	for (rbp = rbp_head; rbp; rbp = rbp->rxb_next) {
1754f8919bdaSduboff 		/* need to notify the tail for the lower layer */
1755f8919bdaSduboff 		dp->rx_buf_tail = rbp;
1756f8919bdaSduboff 
1757f8919bdaSduboff 		dp->gc.gc_rx_desc_write(dp,
1758f8919bdaSduboff 		    SLOT(tail, rx_ring_size),
1759f8919bdaSduboff 		    rbp->rxb_dmacookie,
1760f8919bdaSduboff 		    rbp->rxb_nfrags);
1761f8919bdaSduboff 
1762f8919bdaSduboff 		dp->rx_active_tail = tail = tail + 1;
1763f8919bdaSduboff 	}
1764f8919bdaSduboff }
1765f8919bdaSduboff #pragma inline(gem_append_rxbuf)
1766f8919bdaSduboff 
1767f8919bdaSduboff mblk_t *
gem_get_packet_default(struct gem_dev * dp,struct rxbuf * rbp,size_t len)1768f8919bdaSduboff gem_get_packet_default(struct gem_dev *dp, struct rxbuf *rbp, size_t len)
1769f8919bdaSduboff {
1770f8919bdaSduboff 	int		rx_header_len = dp->gc.gc_rx_header_len;
1771f8919bdaSduboff 	uint8_t		*bp;
1772f8919bdaSduboff 	mblk_t		*mp;
1773f8919bdaSduboff 
1774f8919bdaSduboff 	/* allocate a new mblk */
1775f8919bdaSduboff 	if (mp = allocb(len + VTAG_SIZE, BPRI_MED)) {
1776f8919bdaSduboff 		ASSERT(mp->b_next == NULL);
1777f8919bdaSduboff 		ASSERT(mp->b_cont == NULL);
1778f8919bdaSduboff 
1779f8919bdaSduboff 		mp->b_rptr += VTAG_SIZE;
1780f8919bdaSduboff 		bp = mp->b_rptr;
1781f8919bdaSduboff 		mp->b_wptr = bp + len;
1782f8919bdaSduboff 
178323d366e3Sduboff 		/*
178423d366e3Sduboff 		 * flush the range of the entire buffer to invalidate
178523d366e3Sduboff 		 * all of corresponding dirty entries in iocache.
178623d366e3Sduboff 		 */
1787f8919bdaSduboff 		(void) ddi_dma_sync(rbp->rxb_dh, rx_header_len,
178823d366e3Sduboff 		    0, DDI_DMA_SYNC_FORKERNEL);
1789f8919bdaSduboff 
1790f8919bdaSduboff 		bcopy(rbp->rxb_buf + rx_header_len, bp, len);
1791f8919bdaSduboff 	}
1792f8919bdaSduboff 	return (mp);
1793f8919bdaSduboff }
1794f8919bdaSduboff 
1795f8919bdaSduboff #ifdef GEM_DEBUG_LEVEL
1796f8919bdaSduboff uint_t	gem_rx_pkts[17];
1797f8919bdaSduboff #endif
1798f8919bdaSduboff 
1799f8919bdaSduboff 
1800f8919bdaSduboff int
gem_receive(struct gem_dev * dp)1801f8919bdaSduboff gem_receive(struct gem_dev *dp)
1802f8919bdaSduboff {
1803f8919bdaSduboff 	uint64_t	len_total = 0;
1804f8919bdaSduboff 	struct rxbuf	*rbp;
1805f8919bdaSduboff 	mblk_t		*mp;
1806f8919bdaSduboff 	int		cnt = 0;
1807f8919bdaSduboff 	uint64_t	rxstat;
1808f8919bdaSduboff 	struct rxbuf	*newbufs;
1809f8919bdaSduboff 	struct rxbuf	**newbufs_tailp;
1810f8919bdaSduboff 	mblk_t		*rx_head;
1811f8919bdaSduboff 	mblk_t 		**rx_tailp;
1812f8919bdaSduboff 	int		rx_ring_size = dp->gc.gc_rx_ring_size;
1813f8919bdaSduboff 	seqnum_t	active_head;
1814f8919bdaSduboff 	uint64_t	(*rx_desc_stat)(struct gem_dev *dp,
1815f8919bdaSduboff 	    int slot, int ndesc);
1816f8919bdaSduboff 	int		ethermin = ETHERMIN;
1817f8919bdaSduboff 	int		ethermax = dp->mtu + sizeof (struct ether_header);
181823d366e3Sduboff 	int		rx_header_len = dp->gc.gc_rx_header_len;
1819f8919bdaSduboff 
1820f8919bdaSduboff 	ASSERT(mutex_owned(&dp->intrlock));
1821f8919bdaSduboff 
1822f8919bdaSduboff 	DPRINTF(3, (CE_CONT, "!%s: gem_receive: rx_buf_head:%p",
1823f8919bdaSduboff 	    dp->name, dp->rx_buf_head));
1824f8919bdaSduboff 
1825f8919bdaSduboff 	rx_desc_stat  = dp->gc.gc_rx_desc_stat;
1826f8919bdaSduboff 	newbufs_tailp = &newbufs;
1827f8919bdaSduboff 	rx_tailp = &rx_head;
1828f8919bdaSduboff 	for (active_head = dp->rx_active_head;
1829f8919bdaSduboff 	    (rbp = dp->rx_buf_head) != NULL; active_head++) {
1830f8919bdaSduboff 		int		len;
1831f8919bdaSduboff 		if (cnt == 0) {
1832f8919bdaSduboff 			cnt = max(dp->poll_pkt_delay*2, 10);
1833f8919bdaSduboff 			cnt = min(cnt,
1834f8919bdaSduboff 			    dp->rx_active_tail - active_head);
1835f8919bdaSduboff 			gem_rx_desc_dma_sync(dp,
1836f8919bdaSduboff 			    SLOT(active_head, rx_ring_size),
1837f8919bdaSduboff 			    cnt,
1838f8919bdaSduboff 			    DDI_DMA_SYNC_FORKERNEL);
1839f8919bdaSduboff 		}
184023d366e3Sduboff 
184123d366e3Sduboff 		if (rx_header_len > 0) {
184223d366e3Sduboff 			(void) ddi_dma_sync(rbp->rxb_dh, 0,
184323d366e3Sduboff 			    rx_header_len, DDI_DMA_SYNC_FORKERNEL);
184423d366e3Sduboff 		}
184523d366e3Sduboff 
1846f8919bdaSduboff 		if (((rxstat = (*rx_desc_stat)(dp,
1847f8919bdaSduboff 		    SLOT(active_head, rx_ring_size),
1848f8919bdaSduboff 		    rbp->rxb_nfrags))
1849f8919bdaSduboff 		    & (GEM_RX_DONE | GEM_RX_ERR)) == 0) {
1850f8919bdaSduboff 			/* not received yet */
1851f8919bdaSduboff 			break;
1852f8919bdaSduboff 		}
1853f8919bdaSduboff 
1854f8919bdaSduboff 		/* Remove the head of the rx buffer list */
1855f8919bdaSduboff 		dp->rx_buf_head = rbp->rxb_next;
1856f8919bdaSduboff 		cnt--;
1857f8919bdaSduboff 
1858f8919bdaSduboff 
1859f8919bdaSduboff 		if (rxstat & GEM_RX_ERR) {
1860f8919bdaSduboff 			goto next;
1861f8919bdaSduboff 		}
1862f8919bdaSduboff 
1863f8919bdaSduboff 		len = rxstat & GEM_RX_LEN;
1864f8919bdaSduboff 		DPRINTF(3, (CE_CONT, "!%s: %s: rxstat:0x%llx, len:0x%x",
1865f8919bdaSduboff 		    dp->name, __func__, rxstat, len));
1866f8919bdaSduboff 
1867f8919bdaSduboff 		/*
1868f8919bdaSduboff 		 * Copy the packet
1869f8919bdaSduboff 		 */
1870f8919bdaSduboff 		if ((mp = dp->gc.gc_get_packet(dp, rbp, len)) == NULL) {
1871f8919bdaSduboff 			/* no memory, discard the packet */
1872f8919bdaSduboff 			dp->stats.norcvbuf++;
1873f8919bdaSduboff 			goto next;
1874f8919bdaSduboff 		}
1875f8919bdaSduboff 
1876f8919bdaSduboff 		/*
1877f8919bdaSduboff 		 * Process VLAN tag
1878f8919bdaSduboff 		 */
1879f8919bdaSduboff 		ethermin = ETHERMIN;
1880f8919bdaSduboff 		ethermax = dp->mtu + sizeof (struct ether_header);
188123d366e3Sduboff 		if (GET_NET16(mp->b_rptr + VTAG_OFF) == VTAG_TPID) {
1882f8919bdaSduboff 			ethermax += VTAG_SIZE;
1883f8919bdaSduboff 		}
1884f8919bdaSduboff 
1885f8919bdaSduboff 		/* check packet size */
1886f8919bdaSduboff 		if (len < ethermin) {
1887f8919bdaSduboff 			dp->stats.errrcv++;
1888f8919bdaSduboff 			dp->stats.runt++;
1889f8919bdaSduboff 			freemsg(mp);
1890f8919bdaSduboff 			goto next;
1891f8919bdaSduboff 		}
1892f8919bdaSduboff 
1893f8919bdaSduboff 		if (len > ethermax) {
1894f8919bdaSduboff 			dp->stats.errrcv++;
1895f8919bdaSduboff 			dp->stats.frame_too_long++;
1896f8919bdaSduboff 			freemsg(mp);
1897f8919bdaSduboff 			goto next;
1898f8919bdaSduboff 		}
1899f8919bdaSduboff 
1900f8919bdaSduboff 		len_total += len;
1901f8919bdaSduboff 
190223d366e3Sduboff #ifdef GEM_DEBUG_VLAN
190323d366e3Sduboff 		if (GET_ETHERTYPE(mp->b_rptr) == VTAG_TPID) {
190423d366e3Sduboff 			gem_dump_packet(dp, (char *)__func__, mp, B_TRUE);
190523d366e3Sduboff 		}
190623d366e3Sduboff #endif
1907f8919bdaSduboff 		/* append received packet to temporaly rx buffer list */
1908f8919bdaSduboff 		*rx_tailp = mp;
1909f8919bdaSduboff 		rx_tailp  = &mp->b_next;
1910f8919bdaSduboff 
1911f8919bdaSduboff 		if (mp->b_rptr[0] & 1) {
1912f8919bdaSduboff 			if (bcmp(mp->b_rptr,
1913f8919bdaSduboff 			    gem_etherbroadcastaddr.ether_addr_octet,
1914f8919bdaSduboff 			    ETHERADDRL) == 0) {
1915f8919bdaSduboff 				dp->stats.rbcast++;
1916f8919bdaSduboff 			} else {
1917f8919bdaSduboff 				dp->stats.rmcast++;
1918f8919bdaSduboff 			}
1919f8919bdaSduboff 		}
1920f8919bdaSduboff next:
1921f8919bdaSduboff 		ASSERT(rbp != NULL);
1922f8919bdaSduboff 
1923f8919bdaSduboff 		/* append new one to temporal new buffer list */
1924f8919bdaSduboff 		*newbufs_tailp = rbp;
1925f8919bdaSduboff 		newbufs_tailp  = &rbp->rxb_next;
1926f8919bdaSduboff 	}
1927f8919bdaSduboff 
1928f8919bdaSduboff 	/* advance rx_active_head */
1929f8919bdaSduboff 	if ((cnt = active_head - dp->rx_active_head) > 0) {
1930f8919bdaSduboff 		dp->stats.rbytes += len_total;
1931f8919bdaSduboff 		dp->stats.rpackets += cnt;
1932f8919bdaSduboff 	}
1933f8919bdaSduboff 	dp->rx_active_head = active_head;
1934f8919bdaSduboff 
1935f8919bdaSduboff 	/* terminate the working list */
1936f8919bdaSduboff 	*newbufs_tailp = NULL;
1937f8919bdaSduboff 	*rx_tailp = NULL;
1938f8919bdaSduboff 
1939f8919bdaSduboff 	if (dp->rx_buf_head == NULL) {
1940f8919bdaSduboff 		dp->rx_buf_tail = NULL;
1941f8919bdaSduboff 	}
1942f8919bdaSduboff 
1943f8919bdaSduboff 	DPRINTF(4, (CE_CONT, "%s: %s: cnt:%d, rx_head:%p",
1944f8919bdaSduboff 	    dp->name, __func__, cnt, rx_head));
1945f8919bdaSduboff 
1946f8919bdaSduboff 	if (newbufs) {
1947f8919bdaSduboff 		/*
1948f8919bdaSduboff 		 * fillfull rx list with new buffers
1949f8919bdaSduboff 		 */
1950f8919bdaSduboff 		seqnum_t	head;
1951f8919bdaSduboff 
1952f8919bdaSduboff 		/* save current tail */
1953f8919bdaSduboff 		head = dp->rx_active_tail;
1954f8919bdaSduboff 		gem_append_rxbuf(dp, newbufs);
1955f8919bdaSduboff 
1956f8919bdaSduboff 		/* call hw depend start routine if we have. */
1957f8919bdaSduboff 		dp->gc.gc_rx_start(dp,
1958f8919bdaSduboff 		    SLOT(head, rx_ring_size), dp->rx_active_tail - head);
1959f8919bdaSduboff 	}
1960f8919bdaSduboff 
1961f8919bdaSduboff 	if (rx_head) {
1962f8919bdaSduboff 		/*
1963f8919bdaSduboff 		 * send up received packets
1964f8919bdaSduboff 		 */
1965f8919bdaSduboff 		mutex_exit(&dp->intrlock);
1966da14cebeSEric Cheng 		mac_rx(dp->mh, NULL, rx_head);
1967f8919bdaSduboff 		mutex_enter(&dp->intrlock);
1968f8919bdaSduboff 	}
1969f8919bdaSduboff 
1970f8919bdaSduboff #ifdef GEM_DEBUG_LEVEL
1971f8919bdaSduboff 	gem_rx_pkts[min(cnt, sizeof (gem_rx_pkts)/sizeof (uint_t)-1)]++;
1972f8919bdaSduboff #endif
1973f8919bdaSduboff 	return (cnt);
1974f8919bdaSduboff }
1975f8919bdaSduboff 
1976f8919bdaSduboff boolean_t
gem_tx_done(struct gem_dev * dp)1977f8919bdaSduboff gem_tx_done(struct gem_dev *dp)
1978f8919bdaSduboff {
1979f8919bdaSduboff 	boolean_t	tx_sched = B_FALSE;
1980f8919bdaSduboff 
1981f8919bdaSduboff 	if (gem_reclaim_txbuf(dp) != GEM_SUCCESS) {
1982f8919bdaSduboff 		(void) gem_restart_nic(dp, GEM_RESTART_KEEP_BUF);
1983f8919bdaSduboff 		DPRINTF(2, (CE_CONT, "!%s: gem_tx_done: tx_desc: %d %d",
1984f8919bdaSduboff 		    dp->name, dp->tx_active_head, dp->tx_active_tail));
1985f8919bdaSduboff 		tx_sched = B_TRUE;
1986f8919bdaSduboff 		goto x;
1987f8919bdaSduboff 	}
1988f8919bdaSduboff 
1989f8919bdaSduboff 	mutex_enter(&dp->xmitlock);
1990f8919bdaSduboff 
199123d366e3Sduboff 	/* XXX - we must not have any packets in soft queue */
199223d366e3Sduboff 	ASSERT(dp->tx_softq_head == dp->tx_softq_tail);
1993f8919bdaSduboff 	/*
199423d366e3Sduboff 	 * If we won't have chance to get more free tx buffers, and blocked,
1995f8919bdaSduboff 	 * it is worth to reschedule the downstream i.e. tx side.
1996f8919bdaSduboff 	 */
199723d366e3Sduboff 	ASSERT(dp->tx_desc_intr - dp->tx_desc_head >= 0);
199823d366e3Sduboff 	if (dp->tx_blocked && dp->tx_desc_intr == dp->tx_desc_head) {
1999f8919bdaSduboff 		/*
2000f8919bdaSduboff 		 * As no further tx-done interrupts are scheduled, this
2001f8919bdaSduboff 		 * is the last chance to kick tx side, which may be
2002f8919bdaSduboff 		 * blocked now, otherwise the tx side never works again.
2003f8919bdaSduboff 		 */
2004f8919bdaSduboff 		tx_sched = B_TRUE;
200523d366e3Sduboff 		dp->tx_blocked = (clock_t)0;
200623d366e3Sduboff 		dp->tx_max_packets =
200723d366e3Sduboff 		    min(dp->tx_max_packets + 2, dp->gc.gc_tx_buf_limit);
2008f8919bdaSduboff 	}
2009f8919bdaSduboff 
2010f8919bdaSduboff 	mutex_exit(&dp->xmitlock);
2011f8919bdaSduboff 
201223d366e3Sduboff 	DPRINTF(3, (CE_CONT, "!%s: %s: ret: blocked:%d",
201323d366e3Sduboff 	    dp->name, __func__, BOOLEAN(dp->tx_blocked)));
2014f8919bdaSduboff x:
2015f8919bdaSduboff 	return (tx_sched);
2016f8919bdaSduboff }
2017f8919bdaSduboff 
2018f8919bdaSduboff static uint_t
gem_intr(struct gem_dev * dp)2019f8919bdaSduboff gem_intr(struct gem_dev	*dp)
2020f8919bdaSduboff {
2021f8919bdaSduboff 	uint_t		ret;
2022f8919bdaSduboff 
2023f8919bdaSduboff 	mutex_enter(&dp->intrlock);
2024f8919bdaSduboff 	if (dp->mac_suspended) {
2025f8919bdaSduboff 		mutex_exit(&dp->intrlock);
2026f8919bdaSduboff 		return (DDI_INTR_UNCLAIMED);
2027f8919bdaSduboff 	}
2028f8919bdaSduboff 	dp->intr_busy = B_TRUE;
2029f8919bdaSduboff 
2030f8919bdaSduboff 	ret = (*dp->gc.gc_interrupt)(dp);
2031f8919bdaSduboff 
2032f8919bdaSduboff 	if (ret == DDI_INTR_UNCLAIMED) {
2033f8919bdaSduboff 		dp->intr_busy = B_FALSE;
2034f8919bdaSduboff 		mutex_exit(&dp->intrlock);
2035f8919bdaSduboff 		return (ret);
2036f8919bdaSduboff 	}
2037f8919bdaSduboff 
2038f8919bdaSduboff 	if (!dp->mac_active) {
2039f8919bdaSduboff 		cv_broadcast(&dp->tx_drain_cv);
2040f8919bdaSduboff 	}
2041f8919bdaSduboff 
2042f8919bdaSduboff 
2043f8919bdaSduboff 	dp->stats.intr++;
2044f8919bdaSduboff 	dp->intr_busy = B_FALSE;
2045f8919bdaSduboff 
2046f8919bdaSduboff 	mutex_exit(&dp->intrlock);
2047f8919bdaSduboff 
2048f8919bdaSduboff 	if (ret & INTR_RESTART_TX) {
2049f8919bdaSduboff 		DPRINTF(4, (CE_CONT, "!%s: calling mac_tx_update", dp->name));
2050f8919bdaSduboff 		mac_tx_update(dp->mh);
2051f8919bdaSduboff 		ret &= ~INTR_RESTART_TX;
2052f8919bdaSduboff 	}
2053f8919bdaSduboff 	return (ret);
2054f8919bdaSduboff }
2055f8919bdaSduboff 
2056f8919bdaSduboff static void
gem_intr_watcher(struct gem_dev * dp)2057f8919bdaSduboff gem_intr_watcher(struct gem_dev *dp)
2058f8919bdaSduboff {
2059f8919bdaSduboff 	(void) gem_intr(dp);
2060f8919bdaSduboff 
2061f8919bdaSduboff 	/* schedule next call of tu_intr_watcher */
2062f8919bdaSduboff 	dp->intr_watcher_id =
2063f8919bdaSduboff 	    timeout((void (*)(void *))gem_intr_watcher, (void *)dp, 1);
2064f8919bdaSduboff }
2065f8919bdaSduboff 
2066f8919bdaSduboff /* ======================================================================== */
2067f8919bdaSduboff /*
2068f8919bdaSduboff  * MII support routines
2069f8919bdaSduboff  */
2070f8919bdaSduboff /* ======================================================================== */
2071f8919bdaSduboff static void
gem_choose_forcedmode(struct gem_dev * dp)2072f8919bdaSduboff gem_choose_forcedmode(struct gem_dev *dp)
2073f8919bdaSduboff {
2074f8919bdaSduboff 	/* choose media mode */
2075f8919bdaSduboff 	if (dp->anadv_1000fdx || dp->anadv_1000hdx) {
2076f8919bdaSduboff 		dp->speed = GEM_SPD_1000;
2077f8919bdaSduboff 		dp->full_duplex = dp->anadv_1000fdx;
2078f8919bdaSduboff 	} else if (dp->anadv_100fdx || dp->anadv_100t4) {
2079f8919bdaSduboff 		dp->speed = GEM_SPD_100;
2080f8919bdaSduboff 		dp->full_duplex = B_TRUE;
2081f8919bdaSduboff 	} else if (dp->anadv_100hdx) {
2082f8919bdaSduboff 		dp->speed = GEM_SPD_100;
2083f8919bdaSduboff 		dp->full_duplex = B_FALSE;
2084f8919bdaSduboff 	} else {
2085f8919bdaSduboff 		dp->speed = GEM_SPD_10;
2086f8919bdaSduboff 		dp->full_duplex = dp->anadv_10fdx;
2087f8919bdaSduboff 	}
2088f8919bdaSduboff }
2089f8919bdaSduboff 
2090f8919bdaSduboff uint16_t
gem_mii_read(struct gem_dev * dp,uint_t reg)2091f8919bdaSduboff gem_mii_read(struct gem_dev *dp, uint_t reg)
2092f8919bdaSduboff {
2093f8919bdaSduboff 	if ((dp->mii_status & MII_STATUS_MFPRMBLSUPR) == 0) {
2094f8919bdaSduboff 		(*dp->gc.gc_mii_sync)(dp);
2095f8919bdaSduboff 	}
2096f8919bdaSduboff 	return ((*dp->gc.gc_mii_read)(dp, reg));
2097f8919bdaSduboff }
2098f8919bdaSduboff 
2099f8919bdaSduboff void
gem_mii_write(struct gem_dev * dp,uint_t reg,uint16_t val)2100f8919bdaSduboff gem_mii_write(struct gem_dev *dp, uint_t reg, uint16_t val)
2101f8919bdaSduboff {
2102f8919bdaSduboff 	if ((dp->mii_status & MII_STATUS_MFPRMBLSUPR) == 0) {
2103f8919bdaSduboff 		(*dp->gc.gc_mii_sync)(dp);
2104f8919bdaSduboff 	}
2105f8919bdaSduboff 	(*dp->gc.gc_mii_write)(dp, reg, val);
2106f8919bdaSduboff }
2107f8919bdaSduboff 
2108f8919bdaSduboff #define	fc_cap_decode(x)	\
2109f8919bdaSduboff 	((((x) & MII_ABILITY_PAUSE) ? 1 : 0) |	\
2110bdb9230aSGarrett D'Amore 	(((x) & MII_ABILITY_ASMPAUSE) ? 2 : 0))
2111f8919bdaSduboff 
2112f8919bdaSduboff int
gem_mii_config_default(struct gem_dev * dp)2113f8919bdaSduboff gem_mii_config_default(struct gem_dev *dp)
2114f8919bdaSduboff {
2115f8919bdaSduboff 	uint16_t	mii_stat;
2116f8919bdaSduboff 	uint16_t	val;
2117f8919bdaSduboff 	static uint16_t fc_cap_encode[4] = {
2118bdb9230aSGarrett D'Amore 		0, /* none */
2119bdb9230aSGarrett D'Amore 		MII_ABILITY_PAUSE, /* symmetric */
2120bdb9230aSGarrett D'Amore 		MII_ABILITY_ASMPAUSE, /* tx */
2121bdb9230aSGarrett D'Amore 		MII_ABILITY_PAUSE | MII_ABILITY_ASMPAUSE, /* rx-symmetric */
2122f8919bdaSduboff 	};
2123f8919bdaSduboff 
2124f8919bdaSduboff 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
2125f8919bdaSduboff 
2126f8919bdaSduboff 	/*
2127f8919bdaSduboff 	 * Configure bits in advertisement register
2128f8919bdaSduboff 	 */
2129f8919bdaSduboff 	mii_stat = dp->mii_status;
2130f8919bdaSduboff 
2131f8919bdaSduboff 	DPRINTF(1, (CE_CONT, "!%s: %s: MII_STATUS reg:%b",
2132f8919bdaSduboff 	    dp->name, __func__, mii_stat, MII_STATUS_BITS));
2133f8919bdaSduboff 
2134f8919bdaSduboff 	if ((mii_stat & MII_STATUS_ABILITY_TECH) == 0) {
2135f8919bdaSduboff 		/* it's funny */
2136f8919bdaSduboff 		cmn_err(CE_WARN, "!%s: wrong ability bits: mii_status:%b",
2137f8919bdaSduboff 		    dp->name, mii_stat, MII_STATUS_BITS);
2138f8919bdaSduboff 		return (GEM_FAILURE);
2139f8919bdaSduboff 	}
2140f8919bdaSduboff 
2141f8919bdaSduboff 	/* Do not change the rest of the ability bits in the advert reg */
2142f8919bdaSduboff 	val = gem_mii_read(dp, MII_AN_ADVERT) & ~MII_ABILITY_ALL;
2143f8919bdaSduboff 
2144f8919bdaSduboff 	DPRINTF(0, (CE_CONT,
2145f8919bdaSduboff 	    "!%s: %s: 100T4:%d 100F:%d 100H:%d 10F:%d 10H:%d",
2146f8919bdaSduboff 	    dp->name, __func__,
2147f8919bdaSduboff 	    dp->anadv_100t4, dp->anadv_100fdx, dp->anadv_100hdx,
2148f8919bdaSduboff 	    dp->anadv_10fdx, dp->anadv_10hdx));
2149f8919bdaSduboff 
2150f8919bdaSduboff 	if (dp->anadv_100t4) {
2151f8919bdaSduboff 		val |= MII_ABILITY_100BASE_T4;
2152f8919bdaSduboff 	}
2153f8919bdaSduboff 	if (dp->anadv_100fdx) {
2154f8919bdaSduboff 		val |= MII_ABILITY_100BASE_TX_FD;
2155f8919bdaSduboff 	}
2156f8919bdaSduboff 	if (dp->anadv_100hdx) {
2157f8919bdaSduboff 		val |= MII_ABILITY_100BASE_TX;
2158f8919bdaSduboff 	}
2159f8919bdaSduboff 	if (dp->anadv_10fdx) {
2160f8919bdaSduboff 		val |= MII_ABILITY_10BASE_T_FD;
2161f8919bdaSduboff 	}
2162f8919bdaSduboff 	if (dp->anadv_10hdx) {
2163f8919bdaSduboff 		val |= MII_ABILITY_10BASE_T;
2164f8919bdaSduboff 	}
2165f8919bdaSduboff 
2166f8919bdaSduboff 	/* set flow control capability */
2167f8919bdaSduboff 	val |= fc_cap_encode[dp->anadv_flow_control];
2168f8919bdaSduboff 
2169f8919bdaSduboff 	DPRINTF(0, (CE_CONT,
2170f8919bdaSduboff 	    "!%s: %s: setting MII_AN_ADVERT reg:%b, mii_mode:%d, fc:%d",
2171f8919bdaSduboff 	    dp->name, __func__, val, MII_ABILITY_BITS, dp->gc.gc_mii_mode,
2172f8919bdaSduboff 	    dp->anadv_flow_control));
2173f8919bdaSduboff 
2174f8919bdaSduboff 	gem_mii_write(dp, MII_AN_ADVERT, val);
2175f8919bdaSduboff 
2176f8919bdaSduboff 	if (mii_stat & MII_STATUS_XSTATUS) {
2177f8919bdaSduboff 		/*
2178f8919bdaSduboff 		 * 1000Base-T GMII support
2179f8919bdaSduboff 		 */
2180f8919bdaSduboff 		if (!dp->anadv_autoneg) {
2181f8919bdaSduboff 			/* enable manual configuration */
2182f8919bdaSduboff 			val = MII_1000TC_CFG_EN;
2183f8919bdaSduboff 		} else {
2184f8919bdaSduboff 			val = 0;
2185f8919bdaSduboff 			if (dp->anadv_1000fdx) {
2186f8919bdaSduboff 				val |= MII_1000TC_ADV_FULL;
2187f8919bdaSduboff 			}
2188f8919bdaSduboff 			if (dp->anadv_1000hdx) {
2189f8919bdaSduboff 				val |= MII_1000TC_ADV_HALF;
2190f8919bdaSduboff 			}
2191f8919bdaSduboff 		}
2192f8919bdaSduboff 		DPRINTF(0, (CE_CONT,
2193f8919bdaSduboff 		    "!%s: %s: setting MII_1000TC reg:%b",
2194f8919bdaSduboff 		    dp->name, __func__, val, MII_1000TC_BITS));
2195f8919bdaSduboff 
2196f8919bdaSduboff 		gem_mii_write(dp, MII_1000TC, val);
2197f8919bdaSduboff 	}
2198f8919bdaSduboff 
2199f8919bdaSduboff 	return (GEM_SUCCESS);
2200f8919bdaSduboff }
2201f8919bdaSduboff 
2202f8919bdaSduboff #define	GEM_LINKUP(dp)		mac_link_update((dp)->mh, LINK_STATE_UP)
2203f8919bdaSduboff #define	GEM_LINKDOWN(dp)	mac_link_update((dp)->mh, LINK_STATE_DOWN)
2204f8919bdaSduboff 
2205f8919bdaSduboff static uint8_t gem_fc_result[4 /* my cap */ ][4 /* lp cap */] = {
2206f8919bdaSduboff /*	 none	symm	tx	rx/symm */
2207f8919bdaSduboff /* none */
2208f8919bdaSduboff 	{FLOW_CONTROL_NONE,
2209f8919bdaSduboff 		FLOW_CONTROL_NONE,
2210f8919bdaSduboff 			FLOW_CONTROL_NONE,
2211f8919bdaSduboff 				FLOW_CONTROL_NONE},
2212f8919bdaSduboff /* sym */
2213f8919bdaSduboff 	{FLOW_CONTROL_NONE,
2214f8919bdaSduboff 		FLOW_CONTROL_SYMMETRIC,
2215f8919bdaSduboff 			FLOW_CONTROL_NONE,
2216f8919bdaSduboff 				FLOW_CONTROL_SYMMETRIC},
2217f8919bdaSduboff /* tx */
2218f8919bdaSduboff 	{FLOW_CONTROL_NONE,
2219f8919bdaSduboff 		FLOW_CONTROL_NONE,
2220f8919bdaSduboff 			FLOW_CONTROL_NONE,
2221f8919bdaSduboff 				FLOW_CONTROL_TX_PAUSE},
2222f8919bdaSduboff /* rx/symm */
2223f8919bdaSduboff 	{FLOW_CONTROL_NONE,
2224f8919bdaSduboff 		FLOW_CONTROL_SYMMETRIC,
2225f8919bdaSduboff 			FLOW_CONTROL_RX_PAUSE,
2226f8919bdaSduboff 				FLOW_CONTROL_SYMMETRIC},
2227f8919bdaSduboff };
2228f8919bdaSduboff 
2229f8919bdaSduboff static char *gem_fc_type[] = {
2230f8919bdaSduboff 	"without",
2231f8919bdaSduboff 	"with symmetric",
2232f8919bdaSduboff 	"with tx",
2233f8919bdaSduboff 	"with rx",
2234f8919bdaSduboff };
2235f8919bdaSduboff 
2236f8919bdaSduboff boolean_t
gem_mii_link_check(struct gem_dev * dp)2237f8919bdaSduboff gem_mii_link_check(struct gem_dev *dp)
2238f8919bdaSduboff {
2239f8919bdaSduboff 	uint16_t	old_mii_state;
2240f8919bdaSduboff 	boolean_t	tx_sched = B_FALSE;
2241f8919bdaSduboff 	uint16_t	status;
2242f8919bdaSduboff 	uint16_t	advert;
2243f8919bdaSduboff 	uint16_t	lpable;
2244f8919bdaSduboff 	uint16_t	exp;
2245f8919bdaSduboff 	uint16_t	ctl1000;
2246f8919bdaSduboff 	uint16_t	stat1000;
2247f8919bdaSduboff 	uint16_t	val;
2248f8919bdaSduboff 	clock_t		now;
2249f8919bdaSduboff 	clock_t		diff;
2250f8919bdaSduboff 	int		linkdown_action;
2251f8919bdaSduboff 	boolean_t	fix_phy = B_FALSE;
2252f8919bdaSduboff 
2253f8919bdaSduboff 	now = ddi_get_lbolt();
2254f8919bdaSduboff 	old_mii_state = dp->mii_state;
2255f8919bdaSduboff 
2256f8919bdaSduboff 	DPRINTF(3, (CE_CONT, "!%s: %s: time:%d state:%d",
2257f8919bdaSduboff 	    dp->name, __func__, now, dp->mii_state));
2258f8919bdaSduboff 
2259f8919bdaSduboff 	diff = now - dp->mii_last_check;
2260f8919bdaSduboff 	dp->mii_last_check = now;
2261f8919bdaSduboff 
226223d366e3Sduboff 	/*
226323d366e3Sduboff 	 * For NWAM, don't show linkdown state right
226423d366e3Sduboff 	 * after the system boots
226523d366e3Sduboff 	 */
226623d366e3Sduboff 	if (dp->linkup_delay > 0) {
226723d366e3Sduboff 		if (dp->linkup_delay > diff) {
226823d366e3Sduboff 			dp->linkup_delay -= diff;
226923d366e3Sduboff 		} else {
227023d366e3Sduboff 			/* link up timeout */
227123d366e3Sduboff 			dp->linkup_delay = -1;
227223d366e3Sduboff 		}
227323d366e3Sduboff 	}
227423d366e3Sduboff 
2275f8919bdaSduboff next_nowait:
2276f8919bdaSduboff 	switch (dp->mii_state) {
2277f8919bdaSduboff 	case MII_STATE_UNKNOWN:
2278f8919bdaSduboff 		/* power-up, DP83840 requires 32 sync bits */
2279f8919bdaSduboff 		(*dp->gc.gc_mii_sync)(dp);
2280f8919bdaSduboff 		goto reset_phy;
2281f8919bdaSduboff 
2282f8919bdaSduboff 	case MII_STATE_RESETTING:
2283f8919bdaSduboff 		dp->mii_timer -= diff;
2284f8919bdaSduboff 		if (dp->mii_timer > 0) {
2285f8919bdaSduboff 			/* don't read phy registers in resetting */
2286f8919bdaSduboff 			dp->mii_interval = WATCH_INTERVAL_FAST;
2287f8919bdaSduboff 			goto next;
2288f8919bdaSduboff 		}
2289f8919bdaSduboff 
2290f8919bdaSduboff 		/* Timer expired, ensure reset bit is not set */
2291f8919bdaSduboff 
2292f8919bdaSduboff 		if (dp->mii_status & MII_STATUS_MFPRMBLSUPR) {
2293f8919bdaSduboff 			/* some phys need sync bits after reset */
2294f8919bdaSduboff 			(*dp->gc.gc_mii_sync)(dp);
2295f8919bdaSduboff 		}
2296f8919bdaSduboff 		val = gem_mii_read(dp, MII_CONTROL);
2297f8919bdaSduboff 		if (val & MII_CONTROL_RESET) {
2298f8919bdaSduboff 			cmn_err(CE_NOTE,
2299f8919bdaSduboff 			    "!%s: time:%ld resetting phy not complete."
2300f8919bdaSduboff 			    " mii_control:0x%b",
2301f8919bdaSduboff 			    dp->name, ddi_get_lbolt(),
2302f8919bdaSduboff 			    val, MII_CONTROL_BITS);
2303f8919bdaSduboff 		}
2304f8919bdaSduboff 
2305f8919bdaSduboff 		/* ensure neither isolated nor pwrdown nor auto-nego mode */
2306f8919bdaSduboff 		/* XXX -- this operation is required for NS DP83840A. */
2307f8919bdaSduboff 		gem_mii_write(dp, MII_CONTROL, 0);
2308f8919bdaSduboff 
2309f8919bdaSduboff 		/* As resetting PHY has completed, configure PHY registers */
2310f8919bdaSduboff 		if ((*dp->gc.gc_mii_config)(dp) != GEM_SUCCESS) {
2311f8919bdaSduboff 			/* we failed to configure PHY. */
2312f8919bdaSduboff 			goto reset_phy;
2313f8919bdaSduboff 		}
2314f8919bdaSduboff 
2315f8919bdaSduboff 		/* mii_config may disable autonegatiation */
2316f8919bdaSduboff 		gem_choose_forcedmode(dp);
2317f8919bdaSduboff 
2318f8919bdaSduboff 		dp->mii_lpable = 0;
2319f8919bdaSduboff 		dp->mii_advert = 0;
2320f8919bdaSduboff 		dp->mii_exp = 0;
2321f8919bdaSduboff 		dp->mii_ctl1000 = 0;
2322f8919bdaSduboff 		dp->mii_stat1000 = 0;
2323f8919bdaSduboff 		dp->flow_control = FLOW_CONTROL_NONE;
2324f8919bdaSduboff 
2325f8919bdaSduboff 		if (!dp->anadv_autoneg) {
2326f8919bdaSduboff 			/* skip auto-negotiation phase */
2327f8919bdaSduboff 			dp->mii_state = MII_STATE_MEDIA_SETUP;
2328f8919bdaSduboff 			dp->mii_timer = 0;
2329f8919bdaSduboff 			dp->mii_interval = 0;
2330f8919bdaSduboff 			goto next_nowait;
2331f8919bdaSduboff 		}
2332f8919bdaSduboff 
2333f8919bdaSduboff 		/* Issue auto-negotiation command */
2334f8919bdaSduboff 		goto autonego;
2335f8919bdaSduboff 
2336f8919bdaSduboff 	case MII_STATE_AUTONEGOTIATING:
2337f8919bdaSduboff 		/*
2338f8919bdaSduboff 		 * Autonegotiation is in progress
2339f8919bdaSduboff 		 */
2340f8919bdaSduboff 		dp->mii_timer -= diff;
2341f8919bdaSduboff 		if (dp->mii_timer -
2342f8919bdaSduboff 		    (dp->gc.gc_mii_an_timeout
2343f8919bdaSduboff 		    - dp->gc.gc_mii_an_wait) > 0) {
2344f8919bdaSduboff 			/*
2345f8919bdaSduboff 			 * wait for a while, typically autonegotiation
2346f8919bdaSduboff 			 * completes in 2.3 - 2.5 sec.
2347f8919bdaSduboff 			 */
2348f8919bdaSduboff 			dp->mii_interval = WATCH_INTERVAL_FAST;
2349f8919bdaSduboff 			goto next;
2350f8919bdaSduboff 		}
2351f8919bdaSduboff 
2352f8919bdaSduboff 		/* read PHY status */
2353f8919bdaSduboff 		status = gem_mii_read(dp, MII_STATUS);
2354f8919bdaSduboff 		DPRINTF(4, (CE_CONT,
2355f8919bdaSduboff 		    "!%s: %s: called: mii_state:%d MII_STATUS reg:%b",
2356f8919bdaSduboff 		    dp->name, __func__, dp->mii_state,
2357f8919bdaSduboff 		    status, MII_STATUS_BITS));
2358f8919bdaSduboff 
2359f8919bdaSduboff 		if (status & MII_STATUS_REMFAULT) {
2360f8919bdaSduboff 			/*
2361f8919bdaSduboff 			 * The link parnert told me something wrong happend.
2362f8919bdaSduboff 			 * What do we do ?
2363f8919bdaSduboff 			 */
2364f8919bdaSduboff 			cmn_err(CE_CONT,
2365f8919bdaSduboff 			    "!%s: auto-negotiation failed: remote fault",
2366f8919bdaSduboff 			    dp->name);
2367f8919bdaSduboff 			goto autonego;
2368f8919bdaSduboff 		}
2369f8919bdaSduboff 
2370f8919bdaSduboff 		if ((status & MII_STATUS_ANDONE) == 0) {
2371f8919bdaSduboff 			if (dp->mii_timer <= 0) {
2372f8919bdaSduboff 				/*
2373f8919bdaSduboff 				 * Auto-negotiation was timed out,
2374f8919bdaSduboff 				 * try again w/o resetting phy.
2375f8919bdaSduboff 				 */
2376f8919bdaSduboff 				if (!dp->mii_supress_msg) {
2377f8919bdaSduboff 					cmn_err(CE_WARN,
2378f8919bdaSduboff 				    "!%s: auto-negotiation failed: timeout",
2379f8919bdaSduboff 					    dp->name);
2380f8919bdaSduboff 					dp->mii_supress_msg = B_TRUE;
2381f8919bdaSduboff 				}
2382f8919bdaSduboff 				goto autonego;
2383f8919bdaSduboff 			}
2384f8919bdaSduboff 			/*
2385f8919bdaSduboff 			 * Auto-negotiation is in progress. Wait.
2386f8919bdaSduboff 			 */
2387f8919bdaSduboff 			dp->mii_interval = dp->gc.gc_mii_an_watch_interval;
2388f8919bdaSduboff 			goto next;
2389f8919bdaSduboff 		}
2390f8919bdaSduboff 
2391f8919bdaSduboff 		/*
2392f8919bdaSduboff 		 * Auto-negotiation have completed.
2393f8919bdaSduboff 		 * Assume linkdown and fall through.
2394f8919bdaSduboff 		 */
2395f8919bdaSduboff 		dp->mii_supress_msg = B_FALSE;
2396f8919bdaSduboff 		dp->mii_state = MII_STATE_AN_DONE;
2397f8919bdaSduboff 		DPRINTF(0, (CE_CONT,
2398f8919bdaSduboff 		    "!%s: auto-negotiation completed, MII_STATUS:%b",
2399f8919bdaSduboff 		    dp->name, status, MII_STATUS_BITS));
2400f8919bdaSduboff 
2401f8919bdaSduboff 		if (dp->gc.gc_mii_an_delay > 0) {
2402f8919bdaSduboff 			dp->mii_timer = dp->gc.gc_mii_an_delay;
2403f8919bdaSduboff 			dp->mii_interval = drv_usectohz(20*1000);
2404f8919bdaSduboff 			goto next;
2405f8919bdaSduboff 		}
2406f8919bdaSduboff 
2407f8919bdaSduboff 		dp->mii_timer = 0;
2408f8919bdaSduboff 		diff = 0;
2409f8919bdaSduboff 		goto next_nowait;
2410f8919bdaSduboff 
2411f8919bdaSduboff 	case MII_STATE_AN_DONE:
2412f8919bdaSduboff 		/*
2413f8919bdaSduboff 		 * Auto-negotiation have done. Now we can set up media.
2414f8919bdaSduboff 		 */
2415f8919bdaSduboff 		dp->mii_timer -= diff;
2416f8919bdaSduboff 		if (dp->mii_timer > 0) {
2417f8919bdaSduboff 			/* wait for a while */
2418f8919bdaSduboff 			dp->mii_interval = WATCH_INTERVAL_FAST;
2419f8919bdaSduboff 			goto next;
2420f8919bdaSduboff 		}
2421f8919bdaSduboff 
2422f8919bdaSduboff 		/*
2423f8919bdaSduboff 		 * set up the result of auto negotiation
2424f8919bdaSduboff 		 */
2425f8919bdaSduboff 
2426f8919bdaSduboff 		/*
2427f8919bdaSduboff 		 * Read registers required to determin current
2428f8919bdaSduboff 		 * duplex mode and media speed.
2429f8919bdaSduboff 		 */
2430f8919bdaSduboff 		if (dp->gc.gc_mii_an_delay > 0) {
2431f8919bdaSduboff 			/*
2432f8919bdaSduboff 			 * As the link watcher context has been suspended,
2433f8919bdaSduboff 			 * 'status' is invalid. We must status register here
2434f8919bdaSduboff 			 */
2435f8919bdaSduboff 			status = gem_mii_read(dp, MII_STATUS);
2436f8919bdaSduboff 		}
2437f8919bdaSduboff 		advert = gem_mii_read(dp, MII_AN_ADVERT);
2438f8919bdaSduboff 		lpable = gem_mii_read(dp, MII_AN_LPABLE);
2439f8919bdaSduboff 		exp = gem_mii_read(dp, MII_AN_EXPANSION);
2440f8919bdaSduboff 		if (exp == 0xffff) {
2441f8919bdaSduboff 			/* some phys don't have exp register */
2442f8919bdaSduboff 			exp = 0;
2443f8919bdaSduboff 		}
2444f8919bdaSduboff 		ctl1000  = 0;
2445f8919bdaSduboff 		stat1000 = 0;
2446f8919bdaSduboff 		if (dp->mii_status & MII_STATUS_XSTATUS) {
2447f8919bdaSduboff 			ctl1000  = gem_mii_read(dp, MII_1000TC);
2448f8919bdaSduboff 			stat1000 = gem_mii_read(dp, MII_1000TS);
2449f8919bdaSduboff 		}
2450f8919bdaSduboff 		dp->mii_lpable = lpable;
2451f8919bdaSduboff 		dp->mii_advert = advert;
2452f8919bdaSduboff 		dp->mii_exp = exp;
2453f8919bdaSduboff 		dp->mii_ctl1000  = ctl1000;
2454f8919bdaSduboff 		dp->mii_stat1000 = stat1000;
2455f8919bdaSduboff 
2456f8919bdaSduboff 		cmn_err(CE_CONT,
2457f8919bdaSduboff 		"!%s: auto-negotiation done, advert:%b, lpable:%b, exp:%b",
2458f8919bdaSduboff 		    dp->name,
2459f8919bdaSduboff 		    advert, MII_ABILITY_BITS,
2460f8919bdaSduboff 		    lpable, MII_ABILITY_BITS,
2461f8919bdaSduboff 		    exp, MII_AN_EXP_BITS);
2462f8919bdaSduboff 
2463f8919bdaSduboff 		if (dp->mii_status & MII_STATUS_XSTATUS) {
2464f8919bdaSduboff 			cmn_err(CE_CONT,
2465f8919bdaSduboff 			    "! MII_1000TC:%b, MII_1000TS:%b",
2466f8919bdaSduboff 			    ctl1000, MII_1000TC_BITS,
2467f8919bdaSduboff 			    stat1000, MII_1000TS_BITS);
2468f8919bdaSduboff 		}
2469f8919bdaSduboff 
2470f8919bdaSduboff 		if (gem_population(lpable) <= 1 &&
2471f8919bdaSduboff 		    (exp & MII_AN_EXP_LPCANAN) == 0) {
2472f8919bdaSduboff 			if ((advert & MII_ABILITY_TECH) != lpable) {
2473f8919bdaSduboff 				cmn_err(CE_WARN,
2474f8919bdaSduboff 				    "!%s: but the link partnar doesn't seem"
2475f8919bdaSduboff 				    " to have auto-negotiation capability."
2476f8919bdaSduboff 				    " please check the link configuration.",
2477f8919bdaSduboff 				    dp->name);
2478f8919bdaSduboff 			}
2479f8919bdaSduboff 			/*
2480bdb9230aSGarrett D'Amore 			 * it should be result of parallel detection, which
2481f8919bdaSduboff 			 * cannot detect duplex mode.
2482f8919bdaSduboff 			 */
2483f8919bdaSduboff 			if (lpable & MII_ABILITY_100BASE_TX) {
2484f8919bdaSduboff 				/*
2485f8919bdaSduboff 				 * we prefer full duplex mode for 100Mbps
2486f8919bdaSduboff 				 * connection, if we can.
2487f8919bdaSduboff 				 */
2488f8919bdaSduboff 				lpable |= advert & MII_ABILITY_100BASE_TX_FD;
2489f8919bdaSduboff 			}
2490f8919bdaSduboff 
2491f8919bdaSduboff 			if ((advert & lpable) == 0 &&
2492f8919bdaSduboff 			    lpable & MII_ABILITY_10BASE_T) {
2493f8919bdaSduboff 				lpable |= advert & MII_ABILITY_10BASE_T_FD;
2494f8919bdaSduboff 			}
2495f8919bdaSduboff 			/*
2496f8919bdaSduboff 			 * as the link partnar isn't auto-negotiatable, use
2497f8919bdaSduboff 			 * fixed mode temporally.
2498f8919bdaSduboff 			 */
2499f8919bdaSduboff 			fix_phy = B_TRUE;
2500f8919bdaSduboff 		} else if (lpable == 0) {
2501f8919bdaSduboff 			cmn_err(CE_WARN, "!%s: wrong lpable.", dp->name);
2502f8919bdaSduboff 			goto reset_phy;
2503f8919bdaSduboff 		}
2504f8919bdaSduboff 		/*
2505f8919bdaSduboff 		 * configure current link mode according to AN priority.
2506f8919bdaSduboff 		 */
2507f8919bdaSduboff 		val = advert & lpable;
2508f8919bdaSduboff 		if ((ctl1000 & MII_1000TC_ADV_FULL) &&
2509f8919bdaSduboff 		    (stat1000 & MII_1000TS_LP_FULL)) {
2510f8919bdaSduboff 			/* 1000BaseT & full duplex */
2511f8919bdaSduboff 			dp->speed	 = GEM_SPD_1000;
2512f8919bdaSduboff 			dp->full_duplex  = B_TRUE;
2513f8919bdaSduboff 		} else if ((ctl1000 & MII_1000TC_ADV_HALF) &&
2514f8919bdaSduboff 		    (stat1000 & MII_1000TS_LP_HALF)) {
2515f8919bdaSduboff 			/* 1000BaseT & half duplex */
2516f8919bdaSduboff 			dp->speed = GEM_SPD_1000;
2517f8919bdaSduboff 			dp->full_duplex = B_FALSE;
2518f8919bdaSduboff 		} else if (val & MII_ABILITY_100BASE_TX_FD) {
2519f8919bdaSduboff 			/* 100BaseTx & full duplex */
2520f8919bdaSduboff 			dp->speed = GEM_SPD_100;
2521f8919bdaSduboff 			dp->full_duplex = B_TRUE;
2522f8919bdaSduboff 		} else if (val & MII_ABILITY_100BASE_T4) {
2523f8919bdaSduboff 			/* 100BaseT4 & full duplex */
2524f8919bdaSduboff 			dp->speed = GEM_SPD_100;
2525f8919bdaSduboff 			dp->full_duplex = B_TRUE;
2526f8919bdaSduboff 		} else if (val & MII_ABILITY_100BASE_TX) {
2527f8919bdaSduboff 			/* 100BaseTx & half duplex */
2528f8919bdaSduboff 			dp->speed	 = GEM_SPD_100;
2529f8919bdaSduboff 			dp->full_duplex  = B_FALSE;
2530f8919bdaSduboff 		} else if (val & MII_ABILITY_10BASE_T_FD) {
2531f8919bdaSduboff 			/* 10BaseT & full duplex */
2532f8919bdaSduboff 			dp->speed	 = GEM_SPD_10;
2533f8919bdaSduboff 			dp->full_duplex  = B_TRUE;
2534f8919bdaSduboff 		} else if (val & MII_ABILITY_10BASE_T) {
2535f8919bdaSduboff 			/* 10BaseT & half duplex */
2536f8919bdaSduboff 			dp->speed	 = GEM_SPD_10;
2537f8919bdaSduboff 			dp->full_duplex  = B_FALSE;
2538f8919bdaSduboff 		} else {
2539f8919bdaSduboff 			/*
2540f8919bdaSduboff 			 * It seems that the link partnar doesn't have
2541f8919bdaSduboff 			 * auto-negotiation capability and our PHY
2542f8919bdaSduboff 			 * could not report the correct current mode.
2543f8919bdaSduboff 			 * We guess current mode by mii_control register.
2544f8919bdaSduboff 			 */
2545f8919bdaSduboff 			val = gem_mii_read(dp, MII_CONTROL);
2546f8919bdaSduboff 
2547f8919bdaSduboff 			/* select 100m full or 10m half */
2548f8919bdaSduboff 			dp->speed = (val & MII_CONTROL_100MB) ?
2549f8919bdaSduboff 			    GEM_SPD_100 : GEM_SPD_10;
2550f8919bdaSduboff 			dp->full_duplex = dp->speed != GEM_SPD_10;
2551f8919bdaSduboff 			fix_phy = B_TRUE;
2552f8919bdaSduboff 
2553f8919bdaSduboff 			cmn_err(CE_NOTE,
2554f8919bdaSduboff 			    "!%s: auto-negotiation done but "
2555f8919bdaSduboff 			    "common ability not found.\n"
2556f8919bdaSduboff 			    "PHY state: control:%b advert:%b lpable:%b\n"
2557f8919bdaSduboff 			    "guessing %d Mbps %s duplex mode",
2558f8919bdaSduboff 			    dp->name,
2559f8919bdaSduboff 			    val, MII_CONTROL_BITS,
2560f8919bdaSduboff 			    advert, MII_ABILITY_BITS,
2561f8919bdaSduboff 			    lpable, MII_ABILITY_BITS,
2562f8919bdaSduboff 			    gem_speed_value[dp->speed],
2563f8919bdaSduboff 			    dp->full_duplex ? "full" : "half");
2564f8919bdaSduboff 		}
2565f8919bdaSduboff 
2566f8919bdaSduboff 		if (dp->full_duplex) {
2567f8919bdaSduboff 			dp->flow_control =
2568f8919bdaSduboff 			    gem_fc_result[fc_cap_decode(advert)]
2569f8919bdaSduboff 			    [fc_cap_decode(lpable)];
2570f8919bdaSduboff 		} else {
2571f8919bdaSduboff 			dp->flow_control = FLOW_CONTROL_NONE;
2572f8919bdaSduboff 		}
2573f8919bdaSduboff 		dp->mii_state = MII_STATE_MEDIA_SETUP;
2574f8919bdaSduboff 		/* FALLTHROUGH */
2575f8919bdaSduboff 
2576f8919bdaSduboff 	case MII_STATE_MEDIA_SETUP:
2577f8919bdaSduboff 		dp->mii_state = MII_STATE_LINKDOWN;
2578f8919bdaSduboff 		dp->mii_timer = dp->gc.gc_mii_linkdown_timeout;
2579f8919bdaSduboff 		DPRINTF(2, (CE_CONT, "!%s: setup midia mode done", dp->name));
2580f8919bdaSduboff 		dp->mii_supress_msg = B_FALSE;
2581f8919bdaSduboff 
2582f8919bdaSduboff 		/* use short interval */
2583f8919bdaSduboff 		dp->mii_interval = WATCH_INTERVAL_FAST;
2584f8919bdaSduboff 
2585f8919bdaSduboff 		if ((!dp->anadv_autoneg) ||
2586f8919bdaSduboff 		    dp->gc.gc_mii_an_oneshot || fix_phy) {
2587f8919bdaSduboff 
2588f8919bdaSduboff 			/*
2589f8919bdaSduboff 			 * write specified mode to phy.
2590f8919bdaSduboff 			 */
2591f8919bdaSduboff 			val = gem_mii_read(dp, MII_CONTROL);
2592f8919bdaSduboff 			val &= ~(MII_CONTROL_SPEED | MII_CONTROL_FDUPLEX |
2593f8919bdaSduboff 			    MII_CONTROL_ANE | MII_CONTROL_RSAN);
2594f8919bdaSduboff 
2595f8919bdaSduboff 			if (dp->full_duplex) {
2596f8919bdaSduboff 				val |= MII_CONTROL_FDUPLEX;
2597f8919bdaSduboff 			}
2598f8919bdaSduboff 
2599f8919bdaSduboff 			switch (dp->speed) {
2600f8919bdaSduboff 			case GEM_SPD_1000:
2601f8919bdaSduboff 				val |= MII_CONTROL_1000MB;
2602f8919bdaSduboff 				break;
2603f8919bdaSduboff 
2604f8919bdaSduboff 			case GEM_SPD_100:
2605f8919bdaSduboff 				val |= MII_CONTROL_100MB;
2606f8919bdaSduboff 				break;
2607f8919bdaSduboff 
2608f8919bdaSduboff 			default:
2609f8919bdaSduboff 				cmn_err(CE_WARN, "%s: unknown speed:%d",
2610f8919bdaSduboff 				    dp->name, dp->speed);
2611f8919bdaSduboff 				/* FALLTHROUGH */
2612f8919bdaSduboff 			case GEM_SPD_10:
2613f8919bdaSduboff 				/* for GEM_SPD_10, do nothing */
2614f8919bdaSduboff 				break;
2615f8919bdaSduboff 			}
2616f8919bdaSduboff 
2617f8919bdaSduboff 			if (dp->mii_status & MII_STATUS_XSTATUS) {
2618f8919bdaSduboff 				gem_mii_write(dp,
2619f8919bdaSduboff 				    MII_1000TC, MII_1000TC_CFG_EN);
2620f8919bdaSduboff 			}
2621f8919bdaSduboff 			gem_mii_write(dp, MII_CONTROL, val);
2622f8919bdaSduboff 		}
2623f8919bdaSduboff 
2624f8919bdaSduboff 		if (dp->nic_state >= NIC_STATE_INITIALIZED) {
2625f8919bdaSduboff 			/* notify the result of auto-negotiation to mac */
2626f8919bdaSduboff 			(*dp->gc.gc_set_media)(dp);
2627f8919bdaSduboff 		}
2628f8919bdaSduboff 
2629f8919bdaSduboff 		if ((void *)dp->gc.gc_mii_tune_phy) {
2630f8919bdaSduboff 			/* for built-in sis900 */
2631f8919bdaSduboff 			/* XXX - this code should be removed.  */
2632f8919bdaSduboff 			(*dp->gc.gc_mii_tune_phy)(dp);
2633f8919bdaSduboff 		}
2634f8919bdaSduboff 
2635f8919bdaSduboff 		goto next_nowait;
2636f8919bdaSduboff 
2637f8919bdaSduboff 	case MII_STATE_LINKDOWN:
2638f8919bdaSduboff 		status = gem_mii_read(dp, MII_STATUS);
2639f8919bdaSduboff 		if (status & MII_STATUS_LINKUP) {
2640f8919bdaSduboff 			/*
2641f8919bdaSduboff 			 * Link going up
2642f8919bdaSduboff 			 */
2643f8919bdaSduboff 			dp->mii_state = MII_STATE_LINKUP;
2644f8919bdaSduboff 			dp->mii_supress_msg = B_FALSE;
2645f8919bdaSduboff 
2646f8919bdaSduboff 			DPRINTF(0, (CE_CONT,
2647f8919bdaSduboff 			    "!%s: link up detected: mii_stat:%b",
2648f8919bdaSduboff 			    dp->name, status, MII_STATUS_BITS));
2649f8919bdaSduboff 
2650f8919bdaSduboff 			/*
2651f8919bdaSduboff 			 * MII_CONTROL_100MB and  MII_CONTROL_FDUPLEX are
2652f8919bdaSduboff 			 * ignored when MII_CONTROL_ANE is set.
2653f8919bdaSduboff 			 */
2654f8919bdaSduboff 			cmn_err(CE_CONT,
2655f8919bdaSduboff 			    "!%s: Link up: %d Mbps %s duplex %s flow control",
2656f8919bdaSduboff 			    dp->name,
2657f8919bdaSduboff 			    gem_speed_value[dp->speed],
2658f8919bdaSduboff 			    dp->full_duplex ? "full" : "half",
2659f8919bdaSduboff 			    gem_fc_type[dp->flow_control]);
2660f8919bdaSduboff 
2661f8919bdaSduboff 			dp->mii_interval = dp->gc.gc_mii_link_watch_interval;
2662f8919bdaSduboff 
2663f8919bdaSduboff 			/* XXX - we need other timer to watch statictics */
2664f8919bdaSduboff 			if (dp->gc.gc_mii_hw_link_detection &&
2665f8919bdaSduboff 			    dp->nic_state == NIC_STATE_ONLINE) {
2666f8919bdaSduboff 				dp->mii_interval = 0;
2667f8919bdaSduboff 			}
2668f8919bdaSduboff 
2669f8919bdaSduboff 			if (dp->nic_state == NIC_STATE_ONLINE) {
2670f8919bdaSduboff 				if (!dp->mac_active) {
2671f8919bdaSduboff 					(void) gem_mac_start(dp);
2672f8919bdaSduboff 				}
2673f8919bdaSduboff 				tx_sched = B_TRUE;
2674f8919bdaSduboff 			}
2675f8919bdaSduboff 			goto next;
2676f8919bdaSduboff 		}
2677f8919bdaSduboff 
2678f8919bdaSduboff 		dp->mii_supress_msg = B_TRUE;
2679f8919bdaSduboff 		if (dp->anadv_autoneg) {
2680f8919bdaSduboff 			dp->mii_timer -= diff;
2681f8919bdaSduboff 			if (dp->mii_timer <= 0) {
2682f8919bdaSduboff 				/*
2683f8919bdaSduboff 				 * link down timer expired.
2684f8919bdaSduboff 				 * need to restart auto-negotiation.
2685f8919bdaSduboff 				 */
2686f8919bdaSduboff 				linkdown_action =
2687f8919bdaSduboff 				    dp->gc.gc_mii_linkdown_timeout_action;
2688f8919bdaSduboff 				goto restart_autonego;
2689f8919bdaSduboff 			}
2690f8919bdaSduboff 		}
2691f8919bdaSduboff 		/* don't change mii_state */
2692f8919bdaSduboff 		break;
2693f8919bdaSduboff 
2694f8919bdaSduboff 	case MII_STATE_LINKUP:
2695f8919bdaSduboff 		status = gem_mii_read(dp, MII_STATUS);
2696f8919bdaSduboff 		if ((status & MII_STATUS_LINKUP) == 0) {
2697f8919bdaSduboff 			/*
2698f8919bdaSduboff 			 * Link going down
2699f8919bdaSduboff 			 */
2700f8919bdaSduboff 			cmn_err(CE_NOTE,
2701f8919bdaSduboff 			    "!%s: link down detected: mii_stat:%b",
2702f8919bdaSduboff 			    dp->name, status, MII_STATUS_BITS);
2703f8919bdaSduboff 
2704f8919bdaSduboff 			if (dp->nic_state == NIC_STATE_ONLINE &&
2705f8919bdaSduboff 			    dp->mac_active &&
2706f8919bdaSduboff 			    dp->gc.gc_mii_stop_mac_on_linkdown) {
2707f8919bdaSduboff 				(void) gem_mac_stop(dp, 0);
270823d366e3Sduboff 
270923d366e3Sduboff 				if (dp->tx_blocked) {
271023d366e3Sduboff 					/* drain tx */
271123d366e3Sduboff 					tx_sched = B_TRUE;
271223d366e3Sduboff 				}
2713f8919bdaSduboff 			}
2714f8919bdaSduboff 
2715f8919bdaSduboff 			if (dp->anadv_autoneg) {
2716f8919bdaSduboff 				/* need to restart auto-negotiation */
2717f8919bdaSduboff 				linkdown_action = dp->gc.gc_mii_linkdown_action;
2718f8919bdaSduboff 				goto restart_autonego;
2719f8919bdaSduboff 			}
2720f8919bdaSduboff 
2721f8919bdaSduboff 			dp->mii_state = MII_STATE_LINKDOWN;
2722f8919bdaSduboff 			dp->mii_timer = dp->gc.gc_mii_linkdown_timeout;
2723f8919bdaSduboff 
2724f8919bdaSduboff 			if ((void *)dp->gc.gc_mii_tune_phy) {
2725f8919bdaSduboff 				/* for built-in sis900 */
2726f8919bdaSduboff 				(*dp->gc.gc_mii_tune_phy)(dp);
2727f8919bdaSduboff 			}
2728f8919bdaSduboff 			dp->mii_interval = dp->gc.gc_mii_link_watch_interval;
2729f8919bdaSduboff 			goto next;
2730f8919bdaSduboff 		}
2731f8919bdaSduboff 
2732f8919bdaSduboff 		/* don't change mii_state */
2733f8919bdaSduboff 		if (dp->gc.gc_mii_hw_link_detection &&
2734f8919bdaSduboff 		    dp->nic_state == NIC_STATE_ONLINE) {
2735f8919bdaSduboff 			dp->mii_interval = 0;
2736f8919bdaSduboff 			goto next;
2737f8919bdaSduboff 		}
2738f8919bdaSduboff 		break;
2739f8919bdaSduboff 	}
2740f8919bdaSduboff 	dp->mii_interval = dp->gc.gc_mii_link_watch_interval;
2741f8919bdaSduboff 	goto next;
2742f8919bdaSduboff 
2743f8919bdaSduboff 	/* Actions on the end of state routine */
2744f8919bdaSduboff 
2745f8919bdaSduboff restart_autonego:
2746f8919bdaSduboff 	switch (linkdown_action) {
2747f8919bdaSduboff 	case MII_ACTION_RESET:
2748f8919bdaSduboff 		if (!dp->mii_supress_msg) {
2749f8919bdaSduboff 			cmn_err(CE_CONT, "!%s: resetting PHY", dp->name);
2750f8919bdaSduboff 		}
2751f8919bdaSduboff 		dp->mii_supress_msg = B_TRUE;
2752f8919bdaSduboff 		goto reset_phy;
2753f8919bdaSduboff 
2754f8919bdaSduboff 	case MII_ACTION_NONE:
2755f8919bdaSduboff 		dp->mii_supress_msg = B_TRUE;
2756f8919bdaSduboff 		if (dp->gc.gc_mii_an_oneshot) {
2757f8919bdaSduboff 			goto autonego;
2758f8919bdaSduboff 		}
2759f8919bdaSduboff 		/* PHY will restart autonego automatically */
2760f8919bdaSduboff 		dp->mii_state = MII_STATE_AUTONEGOTIATING;
2761f8919bdaSduboff 		dp->mii_timer = dp->gc.gc_mii_an_timeout;
2762f8919bdaSduboff 		dp->mii_interval = dp->gc.gc_mii_an_watch_interval;
2763f8919bdaSduboff 		goto next;
2764f8919bdaSduboff 
2765f8919bdaSduboff 	case MII_ACTION_RSA:
2766f8919bdaSduboff 		if (!dp->mii_supress_msg) {
2767f8919bdaSduboff 			cmn_err(CE_CONT, "!%s: restarting auto-negotiation",
2768f8919bdaSduboff 			    dp->name);
2769f8919bdaSduboff 		}
2770f8919bdaSduboff 		dp->mii_supress_msg = B_TRUE;
2771f8919bdaSduboff 		goto autonego;
2772f8919bdaSduboff 
2773f8919bdaSduboff 	default:
2774f8919bdaSduboff 		cmn_err(CE_WARN, "!%s: unknowm linkdown action: %d",
2775f8919bdaSduboff 		    dp->name, dp->gc.gc_mii_linkdown_action);
2776f8919bdaSduboff 		dp->mii_supress_msg = B_TRUE;
2777f8919bdaSduboff 	}
2778f8919bdaSduboff 	/* NOTREACHED */
2779f8919bdaSduboff 
2780f8919bdaSduboff reset_phy:
2781f8919bdaSduboff 	if (!dp->mii_supress_msg) {
2782f8919bdaSduboff 		cmn_err(CE_CONT, "!%s: resetting PHY", dp->name);
2783f8919bdaSduboff 	}
2784f8919bdaSduboff 	dp->mii_state = MII_STATE_RESETTING;
2785f8919bdaSduboff 	dp->mii_timer = dp->gc.gc_mii_reset_timeout;
2786f8919bdaSduboff 	if (!dp->gc.gc_mii_dont_reset) {
2787f8919bdaSduboff 		gem_mii_write(dp, MII_CONTROL, MII_CONTROL_RESET);
2788f8919bdaSduboff 	}
2789f8919bdaSduboff 	dp->mii_interval = WATCH_INTERVAL_FAST;
2790f8919bdaSduboff 	goto next;
2791f8919bdaSduboff 
2792f8919bdaSduboff autonego:
2793f8919bdaSduboff 	if (!dp->mii_supress_msg) {
2794f8919bdaSduboff 		cmn_err(CE_CONT, "!%s: auto-negotiation started", dp->name);
2795f8919bdaSduboff 	}
2796f8919bdaSduboff 	dp->mii_state = MII_STATE_AUTONEGOTIATING;
2797f8919bdaSduboff 	dp->mii_timer = dp->gc.gc_mii_an_timeout;
2798f8919bdaSduboff 
2799f8919bdaSduboff 	/* start/restart auto nego */
2800f8919bdaSduboff 	val = gem_mii_read(dp, MII_CONTROL) &
2801f8919bdaSduboff 	    ~(MII_CONTROL_ISOLATE | MII_CONTROL_PWRDN | MII_CONTROL_RESET);
2802f8919bdaSduboff 
280323d366e3Sduboff 	gem_mii_write(dp, MII_CONTROL,
280423d366e3Sduboff 	    val | MII_CONTROL_RSAN | MII_CONTROL_ANE);
2805f8919bdaSduboff 
2806f8919bdaSduboff 	dp->mii_interval = dp->gc.gc_mii_an_watch_interval;
2807f8919bdaSduboff 
2808f8919bdaSduboff next:
2809f8919bdaSduboff 	if (dp->link_watcher_id == 0 && dp->mii_interval) {
2810f8919bdaSduboff 		/* we must schedule next mii_watcher */
2811f8919bdaSduboff 		dp->link_watcher_id =
2812f8919bdaSduboff 		    timeout((void (*)(void *))&gem_mii_link_watcher,
2813f8919bdaSduboff 		    (void *)dp, dp->mii_interval);
2814f8919bdaSduboff 	}
2815f8919bdaSduboff 
281623d366e3Sduboff 	if (old_mii_state != dp->mii_state) {
2817f8919bdaSduboff 		/* notify new mii link state */
2818f8919bdaSduboff 		if (dp->mii_state == MII_STATE_LINKUP) {
281923d366e3Sduboff 			dp->linkup_delay = 0;
2820f8919bdaSduboff 			GEM_LINKUP(dp);
282123d366e3Sduboff 		} else if (dp->linkup_delay <= 0) {
2822f8919bdaSduboff 			GEM_LINKDOWN(dp);
2823f8919bdaSduboff 		}
282423d366e3Sduboff 	} else if (dp->linkup_delay < 0) {
282523d366e3Sduboff 		/* first linkup timeout */
282623d366e3Sduboff 		dp->linkup_delay = 0;
282723d366e3Sduboff 		GEM_LINKDOWN(dp);
2828f8919bdaSduboff 	}
282923d366e3Sduboff 
2830f8919bdaSduboff 	return (tx_sched);
2831f8919bdaSduboff }
2832f8919bdaSduboff 
2833f8919bdaSduboff static void
gem_mii_link_watcher(struct gem_dev * dp)2834f8919bdaSduboff gem_mii_link_watcher(struct gem_dev *dp)
2835f8919bdaSduboff {
2836f8919bdaSduboff 	boolean_t	tx_sched;
2837f8919bdaSduboff 
2838f8919bdaSduboff 	mutex_enter(&dp->intrlock);
2839f8919bdaSduboff 
2840f8919bdaSduboff 	dp->link_watcher_id = 0;
2841f8919bdaSduboff 	tx_sched = gem_mii_link_check(dp);
2842f8919bdaSduboff #if GEM_DEBUG_LEVEL > 2
2843f8919bdaSduboff 	if (dp->link_watcher_id == 0) {
2844f8919bdaSduboff 		cmn_err(CE_CONT, "%s: link watcher stopped", dp->name);
2845f8919bdaSduboff 	}
2846f8919bdaSduboff #endif
2847f8919bdaSduboff 	mutex_exit(&dp->intrlock);
2848f8919bdaSduboff 
2849f8919bdaSduboff 	if (tx_sched) {
2850f8919bdaSduboff 		/* kick potentially stopped downstream */
2851f8919bdaSduboff 		mac_tx_update(dp->mh);
2852f8919bdaSduboff 	}
2853f8919bdaSduboff }
2854f8919bdaSduboff 
2855f8919bdaSduboff int
gem_mii_probe_default(struct gem_dev * dp)2856f8919bdaSduboff gem_mii_probe_default(struct gem_dev *dp)
2857f8919bdaSduboff {
2858f8919bdaSduboff 	int8_t		phy;
2859f8919bdaSduboff 	uint16_t	status;
2860f8919bdaSduboff 	uint16_t	adv;
2861f8919bdaSduboff 	uint16_t	adv_org;
2862f8919bdaSduboff 
2863f8919bdaSduboff 	DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__));
2864f8919bdaSduboff 
2865f8919bdaSduboff 	/*
2866f8919bdaSduboff 	 * Scan PHY
2867f8919bdaSduboff 	 */
2868f8919bdaSduboff 	/* ensure to send sync bits */
2869f8919bdaSduboff 	dp->mii_status = 0;
2870f8919bdaSduboff 
2871f8919bdaSduboff 	/* Try default phy first */
2872f8919bdaSduboff 	if (dp->mii_phy_addr) {
2873f8919bdaSduboff 		status = gem_mii_read(dp, MII_STATUS);
2874f8919bdaSduboff 		if (status != 0xffff && status != 0) {
2875f8919bdaSduboff 			gem_mii_write(dp, MII_CONTROL, 0);
2876f8919bdaSduboff 			goto PHY_found;
2877f8919bdaSduboff 		}
2878f8919bdaSduboff 
2879f8919bdaSduboff 		if (dp->mii_phy_addr < 0) {
2880f8919bdaSduboff 			cmn_err(CE_NOTE,
2881f8919bdaSduboff 	    "!%s: failed to probe default internal and/or non-MII PHY",
2882f8919bdaSduboff 			    dp->name);
2883f8919bdaSduboff 			return (GEM_FAILURE);
2884f8919bdaSduboff 		}
2885f8919bdaSduboff 
2886f8919bdaSduboff 		cmn_err(CE_NOTE,
2887f8919bdaSduboff 		    "!%s: failed to probe default MII PHY at %d",
2888f8919bdaSduboff 		    dp->name, dp->mii_phy_addr);
2889f8919bdaSduboff 	}
2890f8919bdaSduboff 
2891f8919bdaSduboff 	/* Try all possible address */
2892f8919bdaSduboff 	for (phy = dp->gc.gc_mii_addr_min; phy < 32; phy++) {
2893f8919bdaSduboff 		dp->mii_phy_addr = phy;
2894f8919bdaSduboff 		status = gem_mii_read(dp, MII_STATUS);
2895f8919bdaSduboff 
2896f8919bdaSduboff 		if (status != 0xffff && status != 0) {
2897f8919bdaSduboff 			gem_mii_write(dp, MII_CONTROL, 0);
2898f8919bdaSduboff 			goto PHY_found;
2899f8919bdaSduboff 		}
2900f8919bdaSduboff 	}
2901f8919bdaSduboff 
2902f8919bdaSduboff 	for (phy = dp->gc.gc_mii_addr_min; phy < 32; phy++) {
2903f8919bdaSduboff 		dp->mii_phy_addr = phy;
2904f8919bdaSduboff 		gem_mii_write(dp, MII_CONTROL, 0);
2905f8919bdaSduboff 		status = gem_mii_read(dp, MII_STATUS);
2906f8919bdaSduboff 
2907f8919bdaSduboff 		if (status != 0xffff && status != 0) {
2908f8919bdaSduboff 			goto PHY_found;
2909f8919bdaSduboff 		}
2910f8919bdaSduboff 	}
2911f8919bdaSduboff 
2912f8919bdaSduboff 	cmn_err(CE_NOTE, "!%s: no MII PHY found", dp->name);
2913f8919bdaSduboff 	dp->mii_phy_addr = -1;
2914f8919bdaSduboff 
2915f8919bdaSduboff 	return (GEM_FAILURE);
2916f8919bdaSduboff 
2917f8919bdaSduboff PHY_found:
2918f8919bdaSduboff 	dp->mii_status = status;
2919f8919bdaSduboff 	dp->mii_phy_id  = (gem_mii_read(dp, MII_PHYIDH) << 16) |
2920f8919bdaSduboff 	    gem_mii_read(dp, MII_PHYIDL);
2921f8919bdaSduboff 
2922f8919bdaSduboff 	if (dp->mii_phy_addr < 0) {
2923f8919bdaSduboff 		cmn_err(CE_CONT, "!%s: using internal/non-MII PHY(0x%08x)",
2924f8919bdaSduboff 		    dp->name, dp->mii_phy_id);
2925f8919bdaSduboff 	} else {
2926f8919bdaSduboff 		cmn_err(CE_CONT, "!%s: MII PHY (0x%08x) found at %d",
2927f8919bdaSduboff 		    dp->name, dp->mii_phy_id, dp->mii_phy_addr);
2928f8919bdaSduboff 	}
2929f8919bdaSduboff 
2930f8919bdaSduboff 	cmn_err(CE_CONT, "!%s: PHY control:%b, status:%b, advert:%b, lpar:%b",
2931f8919bdaSduboff 	    dp->name,
2932f8919bdaSduboff 	    gem_mii_read(dp, MII_CONTROL), MII_CONTROL_BITS,
2933f8919bdaSduboff 	    status, MII_STATUS_BITS,
2934f8919bdaSduboff 	    gem_mii_read(dp, MII_AN_ADVERT), MII_ABILITY_BITS,
2935f8919bdaSduboff 	    gem_mii_read(dp, MII_AN_LPABLE), MII_ABILITY_BITS);
2936f8919bdaSduboff 
2937f8919bdaSduboff 	dp->mii_xstatus = 0;
2938f8919bdaSduboff 	if (status & MII_STATUS_XSTATUS) {
2939f8919bdaSduboff 		dp->mii_xstatus = gem_mii_read(dp, MII_XSTATUS);
2940f8919bdaSduboff 
2941f8919bdaSduboff 		cmn_err(CE_CONT, "!%s: xstatus:%b",
2942f8919bdaSduboff 		    dp->name, dp->mii_xstatus, MII_XSTATUS_BITS);
2943f8919bdaSduboff 	}
2944f8919bdaSduboff 
2945f8919bdaSduboff 	/* check if the phy can advertize pause abilities */
2946f8919bdaSduboff 	adv_org = gem_mii_read(dp, MII_AN_ADVERT);
2947f8919bdaSduboff 
2948f8919bdaSduboff 	gem_mii_write(dp, MII_AN_ADVERT,
2949bdb9230aSGarrett D'Amore 	    MII_ABILITY_PAUSE | MII_ABILITY_ASMPAUSE);
2950f8919bdaSduboff 
2951f8919bdaSduboff 	adv = gem_mii_read(dp, MII_AN_ADVERT);
2952f8919bdaSduboff 
2953f8919bdaSduboff 	if ((adv & MII_ABILITY_PAUSE) == 0) {
2954f8919bdaSduboff 		dp->gc.gc_flow_control &= ~1;
2955f8919bdaSduboff 	}
2956f8919bdaSduboff 
2957bdb9230aSGarrett D'Amore 	if ((adv & MII_ABILITY_ASMPAUSE) == 0) {
2958f8919bdaSduboff 		dp->gc.gc_flow_control &= ~2;
2959f8919bdaSduboff 	}
2960f8919bdaSduboff 
2961f8919bdaSduboff 	gem_mii_write(dp, MII_AN_ADVERT, adv_org);
2962f8919bdaSduboff 
2963f8919bdaSduboff 	return (GEM_SUCCESS);
2964f8919bdaSduboff }
2965f8919bdaSduboff 
2966f8919bdaSduboff static void
gem_mii_start(struct gem_dev * dp)2967f8919bdaSduboff gem_mii_start(struct gem_dev *dp)
2968f8919bdaSduboff {
2969f8919bdaSduboff 	DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__));
2970f8919bdaSduboff 
2971f8919bdaSduboff 	/* make a first call of check link */
2972f8919bdaSduboff 	dp->mii_state = MII_STATE_UNKNOWN;
2973f8919bdaSduboff 	dp->mii_last_check = ddi_get_lbolt();
297423d366e3Sduboff 	dp->linkup_delay = dp->gc.gc_mii_linkdown_timeout;
2975f8919bdaSduboff 	(void) gem_mii_link_watcher(dp);
2976f8919bdaSduboff }
2977f8919bdaSduboff 
2978f8919bdaSduboff static void
gem_mii_stop(struct gem_dev * dp)2979f8919bdaSduboff gem_mii_stop(struct gem_dev *dp)
2980f8919bdaSduboff {
2981f8919bdaSduboff 	DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__));
2982f8919bdaSduboff 
2983f8919bdaSduboff 	/* Ensure timer routine stopped */
2984f8919bdaSduboff 	mutex_enter(&dp->intrlock);
2985f8919bdaSduboff 	if (dp->link_watcher_id) {
2986f8919bdaSduboff 		while (untimeout(dp->link_watcher_id) == -1)
2987f8919bdaSduboff 			;
2988f8919bdaSduboff 		dp->link_watcher_id = 0;
2989f8919bdaSduboff 	}
2990f8919bdaSduboff 	mutex_exit(&dp->intrlock);
2991f8919bdaSduboff }
2992f8919bdaSduboff 
2993f8919bdaSduboff boolean_t
gem_get_mac_addr_conf(struct gem_dev * dp)2994f8919bdaSduboff gem_get_mac_addr_conf(struct gem_dev *dp)
2995f8919bdaSduboff {
2996f8919bdaSduboff 	char		propname[32];
2997f8919bdaSduboff 	char		*valstr;
2998f8919bdaSduboff 	uint8_t		mac[ETHERADDRL];
2999f8919bdaSduboff 	char		*cp;
3000f8919bdaSduboff 	int		c;
3001f8919bdaSduboff 	int		i;
3002f8919bdaSduboff 	int		j;
3003f8919bdaSduboff 	uint8_t		v;
3004f8919bdaSduboff 	uint8_t		d;
3005f8919bdaSduboff 	uint8_t		ored;
3006f8919bdaSduboff 
3007f8919bdaSduboff 	DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3008f8919bdaSduboff 	/*
3009f8919bdaSduboff 	 * Get ethernet address from .conf file
3010f8919bdaSduboff 	 */
3011f8919bdaSduboff 	(void) sprintf(propname, "mac-addr");
3012f8919bdaSduboff 	if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, dp->dip,
3013f8919bdaSduboff 	    DDI_PROP_DONTPASS, propname, &valstr)) !=
3014f8919bdaSduboff 	    DDI_PROP_SUCCESS) {
3015f8919bdaSduboff 		return (B_FALSE);
3016f8919bdaSduboff 	}
3017f8919bdaSduboff 
3018f8919bdaSduboff 	if (strlen(valstr) != ETHERADDRL*3-1) {
3019f8919bdaSduboff 		goto syntax_err;
3020f8919bdaSduboff 	}
3021f8919bdaSduboff 
3022f8919bdaSduboff 	cp = valstr;
3023f8919bdaSduboff 	j  = 0;
3024f8919bdaSduboff 	ored = 0;
3025f8919bdaSduboff 	for (;;) {
3026f8919bdaSduboff 		v = 0;
3027f8919bdaSduboff 		for (i = 0; i < 2; i++) {
3028f8919bdaSduboff 			c = *cp++;
3029f8919bdaSduboff 
3030f8919bdaSduboff 			if (c >= 'a' && c <= 'f') {
3031f8919bdaSduboff 				d = c - 'a' + 10;
3032f8919bdaSduboff 			} else if (c >= 'A' && c <= 'F') {
3033f8919bdaSduboff 				d = c - 'A' + 10;
3034f8919bdaSduboff 			} else if (c >= '0' && c <= '9') {
3035f8919bdaSduboff 				d = c - '0';
3036f8919bdaSduboff 			} else {
3037f8919bdaSduboff 				goto syntax_err;
3038f8919bdaSduboff 			}
3039f8919bdaSduboff 			v = (v << 4) | d;
3040f8919bdaSduboff 		}
3041f8919bdaSduboff 
3042f8919bdaSduboff 		mac[j++] = v;
3043f8919bdaSduboff 		ored |= v;
3044f8919bdaSduboff 		if (j == ETHERADDRL) {
3045f8919bdaSduboff 			/* done */
3046f8919bdaSduboff 			break;
3047f8919bdaSduboff 		}
3048f8919bdaSduboff 
3049f8919bdaSduboff 		c = *cp++;
3050f8919bdaSduboff 		if (c != ':') {
3051f8919bdaSduboff 			goto syntax_err;
3052f8919bdaSduboff 		}
3053f8919bdaSduboff 	}
3054f8919bdaSduboff 
3055f8919bdaSduboff 	if (ored == 0) {
3056f8919bdaSduboff 		goto err;
3057f8919bdaSduboff 	}
3058f8919bdaSduboff 	for (i = 0; i < ETHERADDRL; i++) {
3059f8919bdaSduboff 		dp->dev_addr.ether_addr_octet[i] = mac[i];
3060f8919bdaSduboff 	}
3061f8919bdaSduboff 	ddi_prop_free(valstr);
3062f8919bdaSduboff 	return (B_TRUE);
3063f8919bdaSduboff 
3064f8919bdaSduboff syntax_err:
3065f8919bdaSduboff 	cmn_err(CE_CONT,
3066f8919bdaSduboff 	    "!%s: read mac addr: trying .conf: syntax err %s",
3067f8919bdaSduboff 	    dp->name, valstr);
3068f8919bdaSduboff err:
3069f8919bdaSduboff 	ddi_prop_free(valstr);
3070f8919bdaSduboff 
3071f8919bdaSduboff 	return (B_FALSE);
3072f8919bdaSduboff }
3073f8919bdaSduboff 
3074f8919bdaSduboff 
3075f8919bdaSduboff /* ============================================================== */
3076f8919bdaSduboff /*
3077f8919bdaSduboff  * internal start/stop interface
3078f8919bdaSduboff  */
3079f8919bdaSduboff /* ============================================================== */
3080f8919bdaSduboff static int
gem_mac_set_rx_filter(struct gem_dev * dp)3081f8919bdaSduboff gem_mac_set_rx_filter(struct gem_dev *dp)
3082f8919bdaSduboff {
3083f8919bdaSduboff 	return ((*dp->gc.gc_set_rx_filter)(dp));
3084f8919bdaSduboff }
3085f8919bdaSduboff 
3086f8919bdaSduboff /*
3087f8919bdaSduboff  * gem_mac_init: cold start
3088f8919bdaSduboff  */
3089f8919bdaSduboff static int
gem_mac_init(struct gem_dev * dp)3090f8919bdaSduboff gem_mac_init(struct gem_dev *dp)
3091f8919bdaSduboff {
3092f8919bdaSduboff 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3093f8919bdaSduboff 
3094f8919bdaSduboff 	if (dp->mac_suspended) {
3095f8919bdaSduboff 		return (GEM_FAILURE);
3096f8919bdaSduboff 	}
3097f8919bdaSduboff 
3098f8919bdaSduboff 	dp->mac_active = B_FALSE;
3099f8919bdaSduboff 
3100f8919bdaSduboff 	gem_init_rx_ring(dp);
3101f8919bdaSduboff 	gem_init_tx_ring(dp);
3102f8919bdaSduboff 
3103f8919bdaSduboff 	/* reset transmitter state */
310423d366e3Sduboff 	dp->tx_blocked = (clock_t)0;
3105f8919bdaSduboff 	dp->tx_busy = 0;
3106f8919bdaSduboff 	dp->tx_reclaim_busy = 0;
310723d366e3Sduboff 	dp->tx_max_packets = dp->gc.gc_tx_buf_limit;
3108f8919bdaSduboff 
3109f8919bdaSduboff 	if ((*dp->gc.gc_init_chip)(dp) != GEM_SUCCESS) {
3110f8919bdaSduboff 		return (GEM_FAILURE);
3111f8919bdaSduboff 	}
3112f8919bdaSduboff 
3113f8919bdaSduboff 	gem_prepare_rx_buf(dp);
3114f8919bdaSduboff 
3115f8919bdaSduboff 	return (GEM_SUCCESS);
3116f8919bdaSduboff }
3117f8919bdaSduboff /*
3118f8919bdaSduboff  * gem_mac_start: warm start
3119f8919bdaSduboff  */
3120f8919bdaSduboff static int
gem_mac_start(struct gem_dev * dp)3121f8919bdaSduboff gem_mac_start(struct gem_dev *dp)
3122f8919bdaSduboff {
3123f8919bdaSduboff 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3124f8919bdaSduboff 
3125f8919bdaSduboff 	ASSERT(mutex_owned(&dp->intrlock));
3126f8919bdaSduboff 	ASSERT(dp->nic_state == NIC_STATE_ONLINE);
3127f8919bdaSduboff 	ASSERT(dp->mii_state ==  MII_STATE_LINKUP);
3128f8919bdaSduboff 
3129f8919bdaSduboff 	/* enable tx and rx */
3130f8919bdaSduboff 	mutex_enter(&dp->xmitlock);
3131f8919bdaSduboff 	if (dp->mac_suspended) {
3132f8919bdaSduboff 		mutex_exit(&dp->xmitlock);
3133f8919bdaSduboff 		return (GEM_FAILURE);
3134f8919bdaSduboff 	}
3135f8919bdaSduboff 	dp->mac_active = B_TRUE;
3136f8919bdaSduboff 	mutex_exit(&dp->xmitlock);
3137f8919bdaSduboff 
313823d366e3Sduboff 	/* setup rx buffers */
313923d366e3Sduboff 	(*dp->gc.gc_rx_start)(dp,
314023d366e3Sduboff 	    SLOT(dp->rx_active_head, dp->gc.gc_rx_ring_size),
314123d366e3Sduboff 	    dp->rx_active_tail - dp->rx_active_head);
314223d366e3Sduboff 
3143f8919bdaSduboff 	if ((*dp->gc.gc_start_chip)(dp) != GEM_SUCCESS) {
3144f8919bdaSduboff 		cmn_err(CE_WARN, "%s: %s: start_chip: failed",
3145f8919bdaSduboff 		    dp->name, __func__);
3146f8919bdaSduboff 		return (GEM_FAILURE);
3147f8919bdaSduboff 	}
3148f8919bdaSduboff 
3149f8919bdaSduboff 	mutex_enter(&dp->xmitlock);
3150f8919bdaSduboff 
3151f8919bdaSduboff 	/* load untranmitted packets to the nic */
3152f8919bdaSduboff 	ASSERT(dp->tx_softq_tail - dp->tx_softq_head >= 0);
3153f8919bdaSduboff 	if (dp->tx_softq_tail - dp->tx_softq_head > 0) {
3154f8919bdaSduboff 		gem_tx_load_descs_oo(dp,
3155f8919bdaSduboff 		    dp->tx_softq_head, dp->tx_softq_tail,
3156f8919bdaSduboff 		    GEM_TXFLAG_HEAD);
3157f8919bdaSduboff 		/* issue preloaded tx buffers */
3158f8919bdaSduboff 		gem_tx_start_unit(dp);
3159f8919bdaSduboff 	}
3160f8919bdaSduboff 
3161f8919bdaSduboff 	mutex_exit(&dp->xmitlock);
3162f8919bdaSduboff 
3163f8919bdaSduboff 	return (GEM_SUCCESS);
3164f8919bdaSduboff }
3165f8919bdaSduboff 
3166f8919bdaSduboff static int
gem_mac_stop(struct gem_dev * dp,uint_t flags)3167f8919bdaSduboff gem_mac_stop(struct gem_dev *dp, uint_t flags)
3168f8919bdaSduboff {
3169f8919bdaSduboff 	int		i;
3170f8919bdaSduboff 	int		wait_time; /* in uS */
3171f8919bdaSduboff #ifdef GEM_DEBUG_LEVEL
3172f8919bdaSduboff 	clock_t		now;
3173f8919bdaSduboff #endif
3174f8919bdaSduboff 	int		ret = GEM_SUCCESS;
3175f8919bdaSduboff 
3176f8919bdaSduboff 	DPRINTF(1, (CE_CONT, "!%s: %s: called, rx_buf_free:%d",
3177f8919bdaSduboff 	    dp->name, __func__, dp->rx_buf_freecnt));
3178f8919bdaSduboff 
3179f8919bdaSduboff 	ASSERT(mutex_owned(&dp->intrlock));
3180f8919bdaSduboff 	ASSERT(!mutex_owned(&dp->xmitlock));
3181f8919bdaSduboff 
3182f8919bdaSduboff 	/*
3183f8919bdaSduboff 	 * Block transmits
3184f8919bdaSduboff 	 */
3185f8919bdaSduboff 	mutex_enter(&dp->xmitlock);
3186f8919bdaSduboff 	if (dp->mac_suspended) {
3187f8919bdaSduboff 		mutex_exit(&dp->xmitlock);
3188f8919bdaSduboff 		return (GEM_SUCCESS);
3189f8919bdaSduboff 	}
3190f8919bdaSduboff 	dp->mac_active = B_FALSE;
3191f8919bdaSduboff 
3192f8919bdaSduboff 	while (dp->tx_busy > 0) {
3193f8919bdaSduboff 		cv_wait(&dp->tx_drain_cv, &dp->xmitlock);
3194f8919bdaSduboff 	}
3195f8919bdaSduboff 	mutex_exit(&dp->xmitlock);
3196f8919bdaSduboff 
3197f8919bdaSduboff 	if ((flags & GEM_RESTART_NOWAIT) == 0) {
3198f8919bdaSduboff 		/*
3199f8919bdaSduboff 		 * Wait for all tx buffers sent.
3200f8919bdaSduboff 		 */
3201f8919bdaSduboff 		wait_time =
3202f8919bdaSduboff 		    2 * (8 * MAXPKTBUF(dp) / gem_speed_value[dp->speed]) *
3203f8919bdaSduboff 		    (dp->tx_active_tail - dp->tx_active_head);
3204f8919bdaSduboff 
3205f8919bdaSduboff 		DPRINTF(0, (CE_CONT, "%s: %s: max drain time: %d uS",
3206f8919bdaSduboff 		    dp->name, __func__, wait_time));
3207f8919bdaSduboff 		i = 0;
3208f8919bdaSduboff #ifdef GEM_DEBUG_LEVEL
3209f8919bdaSduboff 		now = ddi_get_lbolt();
3210f8919bdaSduboff #endif
3211f8919bdaSduboff 		while (dp->tx_active_tail != dp->tx_active_head) {
3212f8919bdaSduboff 			if (i > wait_time) {
3213f8919bdaSduboff 				/* timeout */
3214f8919bdaSduboff 				cmn_err(CE_NOTE, "%s: %s timeout: tx drain",
3215f8919bdaSduboff 				    dp->name, __func__);
3216f8919bdaSduboff 				break;
3217f8919bdaSduboff 			}
3218f8919bdaSduboff 			(void) gem_reclaim_txbuf(dp);
3219f8919bdaSduboff 			drv_usecwait(100);
3220f8919bdaSduboff 			i += 100;
3221f8919bdaSduboff 		}
3222f8919bdaSduboff 		DPRINTF(0, (CE_NOTE,
3223f8919bdaSduboff 		    "!%s: %s: the nic have drained in %d uS, real %d mS",
3224f8919bdaSduboff 		    dp->name, __func__, i,
3225f8919bdaSduboff 		    10*((int)(ddi_get_lbolt() - now))));
3226f8919bdaSduboff 	}
3227f8919bdaSduboff 
3228f8919bdaSduboff 	/*
3229f8919bdaSduboff 	 * Now we can stop the nic safely.
3230f8919bdaSduboff 	 */
3231f8919bdaSduboff 	if ((*dp->gc.gc_stop_chip)(dp) != GEM_SUCCESS) {
3232f8919bdaSduboff 		cmn_err(CE_NOTE, "%s: %s: resetting the chip to stop it",
3233f8919bdaSduboff 		    dp->name, __func__);
3234f8919bdaSduboff 		if ((*dp->gc.gc_reset_chip)(dp) != GEM_SUCCESS) {
3235f8919bdaSduboff 			cmn_err(CE_WARN, "%s: %s: failed to reset chip",
3236f8919bdaSduboff 			    dp->name, __func__);
3237f8919bdaSduboff 		}
3238f8919bdaSduboff 	}
3239f8919bdaSduboff 
3240f8919bdaSduboff 	/*
3241f8919bdaSduboff 	 * Clear all rx buffers
3242f8919bdaSduboff 	 */
3243f8919bdaSduboff 	if (flags & GEM_RESTART_KEEP_BUF) {
3244f8919bdaSduboff 		(void) gem_receive(dp);
3245f8919bdaSduboff 	}
3246f8919bdaSduboff 	gem_clean_rx_buf(dp);
3247f8919bdaSduboff 
3248f8919bdaSduboff 	/*
3249f8919bdaSduboff 	 * Update final statistics
3250f8919bdaSduboff 	 */
3251f8919bdaSduboff 	(*dp->gc.gc_get_stats)(dp);
3252f8919bdaSduboff 
3253f8919bdaSduboff 	/*
3254f8919bdaSduboff 	 * Clear all pended tx packets
3255f8919bdaSduboff 	 */
3256f8919bdaSduboff 	ASSERT(dp->tx_active_tail == dp->tx_softq_head);
3257f8919bdaSduboff 	ASSERT(dp->tx_softq_tail == dp->tx_free_head);
3258f8919bdaSduboff 	if (flags & GEM_RESTART_KEEP_BUF) {
3259f8919bdaSduboff 		/* restore active tx buffers */
3260f8919bdaSduboff 		dp->tx_active_tail = dp->tx_active_head;
3261f8919bdaSduboff 		dp->tx_softq_head  = dp->tx_active_head;
3262f8919bdaSduboff 	} else {
3263f8919bdaSduboff 		gem_clean_tx_buf(dp);
3264f8919bdaSduboff 	}
3265f8919bdaSduboff 
3266f8919bdaSduboff 	return (ret);
3267f8919bdaSduboff }
3268f8919bdaSduboff 
3269f8919bdaSduboff static int
gem_add_multicast(struct gem_dev * dp,const uint8_t * ep)3270f8919bdaSduboff gem_add_multicast(struct gem_dev *dp, const uint8_t *ep)
3271f8919bdaSduboff {
3272f8919bdaSduboff 	int		cnt;
3273f8919bdaSduboff 	int		err;
3274f8919bdaSduboff 
3275f8919bdaSduboff 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3276f8919bdaSduboff 
3277f8919bdaSduboff 	mutex_enter(&dp->intrlock);
3278f8919bdaSduboff 	if (dp->mac_suspended) {
3279f8919bdaSduboff 		mutex_exit(&dp->intrlock);
3280f8919bdaSduboff 		return (GEM_FAILURE);
3281f8919bdaSduboff 	}
3282f8919bdaSduboff 
3283f8919bdaSduboff 	if (dp->mc_count_req++ < GEM_MAXMC) {
3284f8919bdaSduboff 		/* append the new address at the end of the mclist */
3285f8919bdaSduboff 		cnt = dp->mc_count;
3286f8919bdaSduboff 		bcopy(ep, dp->mc_list[cnt].addr.ether_addr_octet,
3287f8919bdaSduboff 		    ETHERADDRL);
3288f8919bdaSduboff 		if (dp->gc.gc_multicast_hash) {
3289f8919bdaSduboff 			dp->mc_list[cnt].hash =
3290f8919bdaSduboff 			    (*dp->gc.gc_multicast_hash)(dp, (uint8_t *)ep);
3291f8919bdaSduboff 		}
3292f8919bdaSduboff 		dp->mc_count = cnt + 1;
3293f8919bdaSduboff 	}
3294f8919bdaSduboff 
3295f8919bdaSduboff 	if (dp->mc_count_req != dp->mc_count) {
3296f8919bdaSduboff 		/* multicast address list overflow */
3297f8919bdaSduboff 		dp->rxmode |= RXMODE_MULTI_OVF;
3298f8919bdaSduboff 	} else {
3299f8919bdaSduboff 		dp->rxmode &= ~RXMODE_MULTI_OVF;
3300f8919bdaSduboff 	}
3301f8919bdaSduboff 
330223d366e3Sduboff 	/* tell new multicast list to the hardware */
3303f8919bdaSduboff 	err = gem_mac_set_rx_filter(dp);
3304f8919bdaSduboff 
3305f8919bdaSduboff 	mutex_exit(&dp->intrlock);
3306f8919bdaSduboff 
3307f8919bdaSduboff 	return (err);
3308f8919bdaSduboff }
3309f8919bdaSduboff 
3310f8919bdaSduboff static int
gem_remove_multicast(struct gem_dev * dp,const uint8_t * ep)3311f8919bdaSduboff gem_remove_multicast(struct gem_dev *dp, const uint8_t *ep)
3312f8919bdaSduboff {
3313f8919bdaSduboff 	size_t		len;
3314f8919bdaSduboff 	int		i;
3315f8919bdaSduboff 	int		cnt;
3316f8919bdaSduboff 	int		err;
3317f8919bdaSduboff 
3318f8919bdaSduboff 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3319f8919bdaSduboff 
3320f8919bdaSduboff 	mutex_enter(&dp->intrlock);
3321f8919bdaSduboff 	if (dp->mac_suspended) {
3322f8919bdaSduboff 		mutex_exit(&dp->intrlock);
3323f8919bdaSduboff 		return (GEM_FAILURE);
3324f8919bdaSduboff 	}
3325f8919bdaSduboff 
3326f8919bdaSduboff 	dp->mc_count_req--;
3327f8919bdaSduboff 	cnt = dp->mc_count;
3328f8919bdaSduboff 	for (i = 0; i < cnt; i++) {
3329f8919bdaSduboff 		if (bcmp(ep, &dp->mc_list[i].addr, ETHERADDRL)) {
3330f8919bdaSduboff 			continue;
3331f8919bdaSduboff 		}
3332f8919bdaSduboff 		/* shrink the mclist by copying forward */
3333f8919bdaSduboff 		len = (cnt - (i + 1)) * sizeof (*dp->mc_list);
3334f8919bdaSduboff 		if (len > 0) {
3335f8919bdaSduboff 			bcopy(&dp->mc_list[i+1], &dp->mc_list[i], len);
3336f8919bdaSduboff 		}
3337f8919bdaSduboff 		dp->mc_count--;
3338f8919bdaSduboff 		break;
3339f8919bdaSduboff 	}
3340f8919bdaSduboff 
3341f8919bdaSduboff 	if (dp->mc_count_req != dp->mc_count) {
3342f8919bdaSduboff 		/* multicast address list overflow */
3343f8919bdaSduboff 		dp->rxmode |= RXMODE_MULTI_OVF;
3344f8919bdaSduboff 	} else {
3345f8919bdaSduboff 		dp->rxmode &= ~RXMODE_MULTI_OVF;
3346f8919bdaSduboff 	}
3347f8919bdaSduboff 	/* In gem v2, don't hold xmitlock on calling set_rx_filter */
3348f8919bdaSduboff 	err = gem_mac_set_rx_filter(dp);
3349f8919bdaSduboff 
3350f8919bdaSduboff 	mutex_exit(&dp->intrlock);
3351f8919bdaSduboff 
3352f8919bdaSduboff 	return (err);
3353f8919bdaSduboff }
3354f8919bdaSduboff 
3355f8919bdaSduboff /* ============================================================== */
3356f8919bdaSduboff /*
3357f8919bdaSduboff  * ND interface
3358f8919bdaSduboff  */
3359f8919bdaSduboff /* ============================================================== */
3360f8919bdaSduboff enum {
3361f8919bdaSduboff 	PARAM_AUTONEG_CAP,
3362f8919bdaSduboff 	PARAM_PAUSE_CAP,
3363f8919bdaSduboff 	PARAM_ASYM_PAUSE_CAP,
3364f8919bdaSduboff 	PARAM_1000FDX_CAP,
3365f8919bdaSduboff 	PARAM_1000HDX_CAP,
3366f8919bdaSduboff 	PARAM_100T4_CAP,
3367f8919bdaSduboff 	PARAM_100FDX_CAP,
3368f8919bdaSduboff 	PARAM_100HDX_CAP,
3369f8919bdaSduboff 	PARAM_10FDX_CAP,
3370f8919bdaSduboff 	PARAM_10HDX_CAP,
3371f8919bdaSduboff 
3372f8919bdaSduboff 	PARAM_ADV_AUTONEG_CAP,
3373f8919bdaSduboff 	PARAM_ADV_PAUSE_CAP,
3374f8919bdaSduboff 	PARAM_ADV_ASYM_PAUSE_CAP,
3375f8919bdaSduboff 	PARAM_ADV_1000FDX_CAP,
3376f8919bdaSduboff 	PARAM_ADV_1000HDX_CAP,
3377f8919bdaSduboff 	PARAM_ADV_100T4_CAP,
3378f8919bdaSduboff 	PARAM_ADV_100FDX_CAP,
3379f8919bdaSduboff 	PARAM_ADV_100HDX_CAP,
3380f8919bdaSduboff 	PARAM_ADV_10FDX_CAP,
3381f8919bdaSduboff 	PARAM_ADV_10HDX_CAP,
3382f8919bdaSduboff 
3383f8919bdaSduboff 	PARAM_LP_AUTONEG_CAP,
3384f8919bdaSduboff 	PARAM_LP_PAUSE_CAP,
3385f8919bdaSduboff 	PARAM_LP_ASYM_PAUSE_CAP,
3386f8919bdaSduboff 	PARAM_LP_1000FDX_CAP,
3387f8919bdaSduboff 	PARAM_LP_1000HDX_CAP,
3388f8919bdaSduboff 	PARAM_LP_100T4_CAP,
3389f8919bdaSduboff 	PARAM_LP_100FDX_CAP,
3390f8919bdaSduboff 	PARAM_LP_100HDX_CAP,
3391f8919bdaSduboff 	PARAM_LP_10FDX_CAP,
3392f8919bdaSduboff 	PARAM_LP_10HDX_CAP,
3393f8919bdaSduboff 
3394f8919bdaSduboff 	PARAM_LINK_STATUS,
3395f8919bdaSduboff 	PARAM_LINK_SPEED,
3396f8919bdaSduboff 	PARAM_LINK_DUPLEX,
3397f8919bdaSduboff 
3398f8919bdaSduboff 	PARAM_LINK_AUTONEG,
3399f8919bdaSduboff 	PARAM_LINK_RX_PAUSE,
3400f8919bdaSduboff 	PARAM_LINK_TX_PAUSE,
3401f8919bdaSduboff 
3402f8919bdaSduboff 	PARAM_LOOP_MODE,
3403f8919bdaSduboff 	PARAM_MSI_CNT,
3404f8919bdaSduboff 
3405f8919bdaSduboff #ifdef DEBUG_RESUME
3406f8919bdaSduboff 	PARAM_RESUME_TEST,
3407f8919bdaSduboff #endif
3408f8919bdaSduboff 	PARAM_COUNT
3409f8919bdaSduboff };
3410f8919bdaSduboff 
3411f8919bdaSduboff enum ioc_reply {
3412f8919bdaSduboff 	IOC_INVAL = -1,				/* bad, NAK with EINVAL	*/
3413f8919bdaSduboff 	IOC_DONE,				/* OK, reply sent	*/
3414f8919bdaSduboff 	IOC_ACK,				/* OK, just send ACK	*/
3415f8919bdaSduboff 	IOC_REPLY,				/* OK, just send reply	*/
3416f8919bdaSduboff 	IOC_RESTART_ACK,			/* OK, restart & ACK	*/
3417f8919bdaSduboff 	IOC_RESTART_REPLY			/* OK, restart & reply	*/
3418f8919bdaSduboff };
3419f8919bdaSduboff 
3420f8919bdaSduboff struct gem_nd_arg {
3421f8919bdaSduboff 	struct gem_dev	*dp;
3422f8919bdaSduboff 	int		item;
3423f8919bdaSduboff };
3424f8919bdaSduboff 
3425f8919bdaSduboff static int
gem_param_get(queue_t * q,mblk_t * mp,caddr_t arg,cred_t * credp)3426f8919bdaSduboff gem_param_get(queue_t *q, mblk_t *mp, caddr_t arg, cred_t *credp)
3427f8919bdaSduboff {
3428f8919bdaSduboff 	struct gem_dev	*dp = ((struct gem_nd_arg *)(void *)arg)->dp;
3429f8919bdaSduboff 	int		item = ((struct gem_nd_arg *)(void *)arg)->item;
3430f8919bdaSduboff 	long		val;
3431f8919bdaSduboff 
3432f8919bdaSduboff 	DPRINTF(0, (CE_CONT, "!%s: %s: called, item:%d",
3433f8919bdaSduboff 	    dp->name, __func__, item));
3434f8919bdaSduboff 
3435f8919bdaSduboff 	switch (item) {
3436f8919bdaSduboff 	case PARAM_AUTONEG_CAP:
3437f8919bdaSduboff 		val = BOOLEAN(dp->mii_status & MII_STATUS_CANAUTONEG);
3438f8919bdaSduboff 		DPRINTF(0, (CE_CONT, "autoneg_cap:%d", val));
3439f8919bdaSduboff 		break;
3440f8919bdaSduboff 
3441f8919bdaSduboff 	case PARAM_PAUSE_CAP:
3442f8919bdaSduboff 		val = BOOLEAN(dp->gc.gc_flow_control & 1);
3443f8919bdaSduboff 		break;
3444f8919bdaSduboff 
3445f8919bdaSduboff 	case PARAM_ASYM_PAUSE_CAP:
3446f8919bdaSduboff 		val = BOOLEAN(dp->gc.gc_flow_control & 2);
3447f8919bdaSduboff 		break;
3448f8919bdaSduboff 
3449f8919bdaSduboff 	case PARAM_1000FDX_CAP:
3450f8919bdaSduboff 		val = (dp->mii_xstatus & MII_XSTATUS_1000BASET_FD) ||
3451f8919bdaSduboff 		    (dp->mii_xstatus & MII_XSTATUS_1000BASEX_FD);
3452f8919bdaSduboff 		break;
3453f8919bdaSduboff 
3454f8919bdaSduboff 	case PARAM_1000HDX_CAP:
3455f8919bdaSduboff 		val = (dp->mii_xstatus & MII_XSTATUS_1000BASET) ||
3456f8919bdaSduboff 		    (dp->mii_xstatus & MII_XSTATUS_1000BASEX);
3457f8919bdaSduboff 		break;
3458f8919bdaSduboff 
3459f8919bdaSduboff 	case PARAM_100T4_CAP:
3460f8919bdaSduboff 		val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASE_T4);
3461f8919bdaSduboff 		break;
3462f8919bdaSduboff 
3463f8919bdaSduboff 	case PARAM_100FDX_CAP:
3464f8919bdaSduboff 		val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX_FD);
3465f8919bdaSduboff 		break;
3466f8919bdaSduboff 
3467f8919bdaSduboff 	case PARAM_100HDX_CAP:
3468f8919bdaSduboff 		val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX);
3469f8919bdaSduboff 		break;
3470f8919bdaSduboff 
3471f8919bdaSduboff 	case PARAM_10FDX_CAP:
3472f8919bdaSduboff 		val = BOOLEAN(dp->mii_status & MII_STATUS_10_FD);
3473f8919bdaSduboff 		break;
3474f8919bdaSduboff 
3475f8919bdaSduboff 	case PARAM_10HDX_CAP:
3476f8919bdaSduboff 		val = BOOLEAN(dp->mii_status & MII_STATUS_10);
3477f8919bdaSduboff 		break;
3478f8919bdaSduboff 
3479f8919bdaSduboff 	case PARAM_ADV_AUTONEG_CAP:
3480f8919bdaSduboff 		val = dp->anadv_autoneg;
3481f8919bdaSduboff 		break;
3482f8919bdaSduboff 
3483f8919bdaSduboff 	case PARAM_ADV_PAUSE_CAP:
3484f8919bdaSduboff 		val = BOOLEAN(dp->anadv_flow_control & 1);
3485f8919bdaSduboff 		break;
3486f8919bdaSduboff 
3487f8919bdaSduboff 	case PARAM_ADV_ASYM_PAUSE_CAP:
3488f8919bdaSduboff 		val = BOOLEAN(dp->anadv_flow_control & 2);
3489f8919bdaSduboff 		break;
3490f8919bdaSduboff 
3491f8919bdaSduboff 	case PARAM_ADV_1000FDX_CAP:
3492f8919bdaSduboff 		val = dp->anadv_1000fdx;
3493f8919bdaSduboff 		break;
3494f8919bdaSduboff 
3495f8919bdaSduboff 	case PARAM_ADV_1000HDX_CAP:
3496f8919bdaSduboff 		val = dp->anadv_1000hdx;
3497f8919bdaSduboff 		break;
3498f8919bdaSduboff 
3499f8919bdaSduboff 	case PARAM_ADV_100T4_CAP:
3500f8919bdaSduboff 		val = dp->anadv_100t4;
3501f8919bdaSduboff 		break;
3502f8919bdaSduboff 
3503f8919bdaSduboff 	case PARAM_ADV_100FDX_CAP:
3504f8919bdaSduboff 		val = dp->anadv_100fdx;
3505f8919bdaSduboff 		break;
3506f8919bdaSduboff 
3507f8919bdaSduboff 	case PARAM_ADV_100HDX_CAP:
3508f8919bdaSduboff 		val = dp->anadv_100hdx;
3509f8919bdaSduboff 		break;
3510f8919bdaSduboff 
3511f8919bdaSduboff 	case PARAM_ADV_10FDX_CAP:
3512f8919bdaSduboff 		val = dp->anadv_10fdx;
3513f8919bdaSduboff 		break;
3514f8919bdaSduboff 
3515f8919bdaSduboff 	case PARAM_ADV_10HDX_CAP:
3516f8919bdaSduboff 		val = dp->anadv_10hdx;
3517f8919bdaSduboff 		break;
3518f8919bdaSduboff 
3519f8919bdaSduboff 	case PARAM_LP_AUTONEG_CAP:
3520f8919bdaSduboff 		val = BOOLEAN(dp->mii_exp & MII_AN_EXP_LPCANAN);
3521f8919bdaSduboff 		break;
3522f8919bdaSduboff 
3523f8919bdaSduboff 	case PARAM_LP_PAUSE_CAP:
3524f8919bdaSduboff 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_PAUSE);
3525f8919bdaSduboff 		break;
3526f8919bdaSduboff 
3527f8919bdaSduboff 	case PARAM_LP_ASYM_PAUSE_CAP:
3528bdb9230aSGarrett D'Amore 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_ASMPAUSE);
3529f8919bdaSduboff 		break;
3530f8919bdaSduboff 
3531f8919bdaSduboff 	case PARAM_LP_1000FDX_CAP:
3532f8919bdaSduboff 		val = BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_FULL);
3533f8919bdaSduboff 		break;
3534f8919bdaSduboff 
3535f8919bdaSduboff 	case PARAM_LP_1000HDX_CAP:
3536f8919bdaSduboff 		val = BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_HALF);
3537f8919bdaSduboff 		break;
3538f8919bdaSduboff 
3539f8919bdaSduboff 	case PARAM_LP_100T4_CAP:
3540f8919bdaSduboff 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_T4);
3541f8919bdaSduboff 		break;
3542f8919bdaSduboff 
3543f8919bdaSduboff 	case PARAM_LP_100FDX_CAP:
3544f8919bdaSduboff 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX_FD);
3545f8919bdaSduboff 		break;
3546f8919bdaSduboff 
3547f8919bdaSduboff 	case PARAM_LP_100HDX_CAP:
3548f8919bdaSduboff 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX);
3549f8919bdaSduboff 		break;
3550f8919bdaSduboff 
3551f8919bdaSduboff 	case PARAM_LP_10FDX_CAP:
3552f8919bdaSduboff 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T_FD);
3553f8919bdaSduboff 		break;
3554f8919bdaSduboff 
3555f8919bdaSduboff 	case PARAM_LP_10HDX_CAP:
3556f8919bdaSduboff 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T);
3557f8919bdaSduboff 		break;
3558f8919bdaSduboff 
3559f8919bdaSduboff 	case PARAM_LINK_STATUS:
3560f8919bdaSduboff 		val = (dp->mii_state == MII_STATE_LINKUP);
3561f8919bdaSduboff 		break;
3562f8919bdaSduboff 
3563f8919bdaSduboff 	case PARAM_LINK_SPEED:
3564f8919bdaSduboff 		val = gem_speed_value[dp->speed];
3565f8919bdaSduboff 		break;
3566f8919bdaSduboff 
3567f8919bdaSduboff 	case PARAM_LINK_DUPLEX:
3568f8919bdaSduboff 		val = 0;
3569f8919bdaSduboff 		if (dp->mii_state == MII_STATE_LINKUP) {
3570f8919bdaSduboff 			val = dp->full_duplex ? 2 : 1;
3571f8919bdaSduboff 		}
3572f8919bdaSduboff 		break;
3573f8919bdaSduboff 
3574f8919bdaSduboff 	case PARAM_LINK_AUTONEG:
3575f8919bdaSduboff 		val = BOOLEAN(dp->mii_exp & MII_AN_EXP_LPCANAN);
3576f8919bdaSduboff 		break;
3577f8919bdaSduboff 
3578f8919bdaSduboff 	case PARAM_LINK_RX_PAUSE:
3579f8919bdaSduboff 		val = (dp->flow_control == FLOW_CONTROL_SYMMETRIC) ||
3580f8919bdaSduboff 		    (dp->flow_control == FLOW_CONTROL_RX_PAUSE);
3581f8919bdaSduboff 		break;
3582f8919bdaSduboff 
3583f8919bdaSduboff 	case PARAM_LINK_TX_PAUSE:
3584f8919bdaSduboff 		val = (dp->flow_control == FLOW_CONTROL_SYMMETRIC) ||
3585f8919bdaSduboff 		    (dp->flow_control == FLOW_CONTROL_TX_PAUSE);
3586f8919bdaSduboff 		break;
3587f8919bdaSduboff 
3588f8919bdaSduboff #ifdef DEBUG_RESUME
3589f8919bdaSduboff 	case PARAM_RESUME_TEST:
3590f8919bdaSduboff 		val = 0;
3591f8919bdaSduboff 		break;
3592f8919bdaSduboff #endif
3593f8919bdaSduboff 	default:
3594f8919bdaSduboff 		cmn_err(CE_WARN, "%s: unimplemented ndd control (%d)",
3595f8919bdaSduboff 		    dp->name, item);
3596f8919bdaSduboff 		break;
3597f8919bdaSduboff 	}
3598f8919bdaSduboff 
3599f8919bdaSduboff 	(void) mi_mpprintf(mp, "%ld", val);
3600f8919bdaSduboff 
3601f8919bdaSduboff 	return (0);
3602f8919bdaSduboff }
3603f8919bdaSduboff 
3604f8919bdaSduboff static int
gem_param_set(queue_t * q,mblk_t * mp,char * value,caddr_t arg,cred_t * credp)3605f8919bdaSduboff gem_param_set(queue_t *q, mblk_t *mp, char *value, caddr_t arg, cred_t *credp)
3606f8919bdaSduboff {
3607f8919bdaSduboff 	struct gem_dev	*dp = ((struct gem_nd_arg *)(void *)arg)->dp;
3608f8919bdaSduboff 	int		item = ((struct gem_nd_arg *)(void *)arg)->item;
3609f8919bdaSduboff 	long		val;
3610f8919bdaSduboff 	char		*end;
3611f8919bdaSduboff 
3612f8919bdaSduboff 	DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3613f8919bdaSduboff 	if (ddi_strtol(value, &end, 10, &val)) {
3614f8919bdaSduboff 		return (EINVAL);
3615f8919bdaSduboff 	}
3616f8919bdaSduboff 	if (end == value) {
3617f8919bdaSduboff 		return (EINVAL);
3618f8919bdaSduboff 	}
3619f8919bdaSduboff 
3620f8919bdaSduboff 	switch (item) {
3621f8919bdaSduboff 	case PARAM_ADV_AUTONEG_CAP:
3622f8919bdaSduboff 		if (val != 0 && val != 1) {
3623f8919bdaSduboff 			goto err;
3624f8919bdaSduboff 		}
3625f8919bdaSduboff 		if (val && (dp->mii_status & MII_STATUS_CANAUTONEG) == 0) {
3626f8919bdaSduboff 			goto err;
3627f8919bdaSduboff 		}
3628f8919bdaSduboff 		dp->anadv_autoneg = (int)val;
3629f8919bdaSduboff 		break;
3630f8919bdaSduboff 
3631f8919bdaSduboff 	case PARAM_ADV_PAUSE_CAP:
3632f8919bdaSduboff 		if (val != 0 && val != 1) {
3633f8919bdaSduboff 			goto err;
3634f8919bdaSduboff 		}
3635f8919bdaSduboff 		if (val) {
3636f8919bdaSduboff 			dp->anadv_flow_control |= 1;
3637f8919bdaSduboff 		} else {
3638f8919bdaSduboff 			dp->anadv_flow_control &= ~1;
3639f8919bdaSduboff 		}
3640f8919bdaSduboff 		break;
3641f8919bdaSduboff 
3642f8919bdaSduboff 	case PARAM_ADV_ASYM_PAUSE_CAP:
3643f8919bdaSduboff 		if (val != 0 && val != 1) {
3644f8919bdaSduboff 			goto err;
3645f8919bdaSduboff 		}
3646f8919bdaSduboff 		if (val) {
3647f8919bdaSduboff 			dp->anadv_flow_control |= 2;
3648f8919bdaSduboff 		} else {
3649f8919bdaSduboff 			dp->anadv_flow_control &= ~2;
3650f8919bdaSduboff 		}
3651f8919bdaSduboff 		break;
3652f8919bdaSduboff 
3653f8919bdaSduboff 	case PARAM_ADV_1000FDX_CAP:
3654f8919bdaSduboff 		if (val != 0 && val != 1) {
3655f8919bdaSduboff 			goto err;
3656f8919bdaSduboff 		}
3657f8919bdaSduboff 		if (val && (dp->mii_xstatus &
3658f8919bdaSduboff 		    (MII_XSTATUS_1000BASET_FD |
3659f8919bdaSduboff 		    MII_XSTATUS_1000BASEX_FD)) == 0) {
3660f8919bdaSduboff 			goto err;
3661f8919bdaSduboff 		}
3662f8919bdaSduboff 		dp->anadv_1000fdx = (int)val;
3663f8919bdaSduboff 		break;
3664f8919bdaSduboff 
3665f8919bdaSduboff 	case PARAM_ADV_1000HDX_CAP:
3666f8919bdaSduboff 		if (val != 0 && val != 1) {
3667f8919bdaSduboff 			goto err;
3668f8919bdaSduboff 		}
3669f8919bdaSduboff 		if (val && (dp->mii_xstatus &
3670f8919bdaSduboff 		    (MII_XSTATUS_1000BASET | MII_XSTATUS_1000BASEX)) == 0) {
3671f8919bdaSduboff 			goto err;
3672f8919bdaSduboff 		}
3673f8919bdaSduboff 		dp->anadv_1000hdx = (int)val;
3674f8919bdaSduboff 		break;
3675f8919bdaSduboff 
3676f8919bdaSduboff 	case PARAM_ADV_100T4_CAP:
3677f8919bdaSduboff 		if (val != 0 && val != 1) {
3678f8919bdaSduboff 			goto err;
3679f8919bdaSduboff 		}
3680f8919bdaSduboff 		if (val && (dp->mii_status & MII_STATUS_100_BASE_T4) == 0) {
3681f8919bdaSduboff 			goto err;
3682f8919bdaSduboff 		}
3683f8919bdaSduboff 		dp->anadv_100t4 = (int)val;
3684f8919bdaSduboff 		break;
3685f8919bdaSduboff 
3686f8919bdaSduboff 	case PARAM_ADV_100FDX_CAP:
3687f8919bdaSduboff 		if (val != 0 && val != 1) {
3688f8919bdaSduboff 			goto err;
3689f8919bdaSduboff 		}
3690f8919bdaSduboff 		if (val && (dp->mii_status & MII_STATUS_100_BASEX_FD) == 0) {
3691f8919bdaSduboff 			goto err;
3692f8919bdaSduboff 		}
3693f8919bdaSduboff 		dp->anadv_100fdx = (int)val;
3694f8919bdaSduboff 		break;
3695f8919bdaSduboff 
3696f8919bdaSduboff 	case PARAM_ADV_100HDX_CAP:
3697f8919bdaSduboff 		if (val != 0 && val != 1) {
3698f8919bdaSduboff 			goto err;
3699f8919bdaSduboff 		}
3700f8919bdaSduboff 		if (val && (dp->mii_status & MII_STATUS_100_BASEX) == 0) {
3701f8919bdaSduboff 			goto err;
3702f8919bdaSduboff 		}
3703f8919bdaSduboff 		dp->anadv_100hdx = (int)val;
3704f8919bdaSduboff 		break;
3705f8919bdaSduboff 
3706f8919bdaSduboff 	case PARAM_ADV_10FDX_CAP:
3707f8919bdaSduboff 		if (val != 0 && val != 1) {
3708f8919bdaSduboff 			goto err;
3709f8919bdaSduboff 		}
3710f8919bdaSduboff 		if (val && (dp->mii_status & MII_STATUS_10_FD) == 0) {
3711f8919bdaSduboff 			goto err;
3712f8919bdaSduboff 		}
3713f8919bdaSduboff 		dp->anadv_10fdx = (int)val;
3714f8919bdaSduboff 		break;
3715f8919bdaSduboff 
3716f8919bdaSduboff 	case PARAM_ADV_10HDX_CAP:
3717f8919bdaSduboff 		if (val != 0 && val != 1) {
3718f8919bdaSduboff 			goto err;
3719f8919bdaSduboff 		}
3720f8919bdaSduboff 		if (val && (dp->mii_status & MII_STATUS_10) == 0) {
3721f8919bdaSduboff 			goto err;
3722f8919bdaSduboff 		}
3723f8919bdaSduboff 		dp->anadv_10hdx = (int)val;
3724f8919bdaSduboff 		break;
3725f8919bdaSduboff 	}
3726f8919bdaSduboff 
3727f8919bdaSduboff 	/* sync with PHY */
3728f8919bdaSduboff 	gem_choose_forcedmode(dp);
3729f8919bdaSduboff 
3730f8919bdaSduboff 	dp->mii_state = MII_STATE_UNKNOWN;
3731f8919bdaSduboff 	if (dp->gc.gc_mii_hw_link_detection && dp->link_watcher_id == 0) {
3732f8919bdaSduboff 		/* XXX - Can we ignore the return code ? */
3733f8919bdaSduboff 		(void) gem_mii_link_check(dp);
3734f8919bdaSduboff 	}
3735f8919bdaSduboff 
3736f8919bdaSduboff 	return (0);
3737f8919bdaSduboff err:
3738f8919bdaSduboff 	return (EINVAL);
3739f8919bdaSduboff }
3740f8919bdaSduboff 
3741f8919bdaSduboff static void
gem_nd_load(struct gem_dev * dp,char * name,ndgetf_t gf,ndsetf_t sf,int item)3742f8919bdaSduboff gem_nd_load(struct gem_dev *dp, char *name, ndgetf_t gf, ndsetf_t sf, int item)
3743f8919bdaSduboff {
3744f8919bdaSduboff 	struct gem_nd_arg	*arg;
3745f8919bdaSduboff 
3746f8919bdaSduboff 	ASSERT(item >= 0);
3747f8919bdaSduboff 	ASSERT(item < PARAM_COUNT);
3748f8919bdaSduboff 
3749f8919bdaSduboff 	arg = &((struct gem_nd_arg *)(void *)dp->nd_arg_p)[item];
3750f8919bdaSduboff 	arg->dp = dp;
3751f8919bdaSduboff 	arg->item = item;
3752f8919bdaSduboff 
3753f8919bdaSduboff 	DPRINTF(2, (CE_CONT, "!%s: %s: name:%s, item:%d",
3754f8919bdaSduboff 	    dp->name, __func__, name, item));
375523d366e3Sduboff 	(void) nd_load(&dp->nd_data_p, name, gf, sf, (caddr_t)arg);
3756f8919bdaSduboff }
3757f8919bdaSduboff 
3758f8919bdaSduboff static void
gem_nd_setup(struct gem_dev * dp)3759f8919bdaSduboff gem_nd_setup(struct gem_dev *dp)
3760f8919bdaSduboff {
3761f8919bdaSduboff 	DPRINTF(0, (CE_CONT, "!%s: %s: called, mii_status:0x%b",
3762f8919bdaSduboff 	    dp->name, __func__, dp->mii_status, MII_STATUS_BITS));
3763f8919bdaSduboff 
3764f8919bdaSduboff 	ASSERT(dp->nd_arg_p == NULL);
3765f8919bdaSduboff 
3766f8919bdaSduboff 	dp->nd_arg_p =
3767f8919bdaSduboff 	    kmem_zalloc(sizeof (struct gem_nd_arg) * PARAM_COUNT, KM_SLEEP);
3768f8919bdaSduboff 
3769f8919bdaSduboff #define	SETFUNC(x)	((x) ? gem_param_set : NULL)
3770f8919bdaSduboff 
3771f8919bdaSduboff 	gem_nd_load(dp, "autoneg_cap",
3772f8919bdaSduboff 	    gem_param_get, NULL, PARAM_AUTONEG_CAP);
3773f8919bdaSduboff 	gem_nd_load(dp, "pause_cap",
3774f8919bdaSduboff 	    gem_param_get, NULL, PARAM_PAUSE_CAP);
3775f8919bdaSduboff 	gem_nd_load(dp, "asym_pause_cap",
3776f8919bdaSduboff 	    gem_param_get, NULL, PARAM_ASYM_PAUSE_CAP);
3777f8919bdaSduboff 	gem_nd_load(dp, "1000fdx_cap",
3778f8919bdaSduboff 	    gem_param_get, NULL, PARAM_1000FDX_CAP);
3779f8919bdaSduboff 	gem_nd_load(dp, "1000hdx_cap",
3780f8919bdaSduboff 	    gem_param_get, NULL, PARAM_1000HDX_CAP);
3781f8919bdaSduboff 	gem_nd_load(dp, "100T4_cap",
3782f8919bdaSduboff 	    gem_param_get, NULL, PARAM_100T4_CAP);
3783f8919bdaSduboff 	gem_nd_load(dp, "100fdx_cap",
3784f8919bdaSduboff 	    gem_param_get, NULL, PARAM_100FDX_CAP);
3785f8919bdaSduboff 	gem_nd_load(dp, "100hdx_cap",
3786f8919bdaSduboff 	    gem_param_get, NULL, PARAM_100HDX_CAP);
3787f8919bdaSduboff 	gem_nd_load(dp, "10fdx_cap",
3788f8919bdaSduboff 	    gem_param_get, NULL, PARAM_10FDX_CAP);
3789f8919bdaSduboff 	gem_nd_load(dp, "10hdx_cap",
3790f8919bdaSduboff 	    gem_param_get, NULL, PARAM_10HDX_CAP);
3791f8919bdaSduboff 
3792f8919bdaSduboff 	/* Our advertised capabilities */
3793f8919bdaSduboff 	gem_nd_load(dp, "adv_autoneg_cap", gem_param_get,
3794f8919bdaSduboff 	    SETFUNC(dp->mii_status & MII_STATUS_CANAUTONEG),
3795f8919bdaSduboff 	    PARAM_ADV_AUTONEG_CAP);
3796f8919bdaSduboff 	gem_nd_load(dp, "adv_pause_cap", gem_param_get,
3797f8919bdaSduboff 	    SETFUNC(dp->gc.gc_flow_control & 1),
3798f8919bdaSduboff 	    PARAM_ADV_PAUSE_CAP);
3799f8919bdaSduboff 	gem_nd_load(dp, "adv_asym_pause_cap", gem_param_get,
3800f8919bdaSduboff 	    SETFUNC(dp->gc.gc_flow_control & 2),
3801f8919bdaSduboff 	    PARAM_ADV_ASYM_PAUSE_CAP);
3802f8919bdaSduboff 	gem_nd_load(dp, "adv_1000fdx_cap", gem_param_get,
3803f8919bdaSduboff 	    SETFUNC(dp->mii_xstatus &
3804f8919bdaSduboff 	    (MII_XSTATUS_1000BASEX_FD | MII_XSTATUS_1000BASET_FD)),
3805f8919bdaSduboff 	    PARAM_ADV_1000FDX_CAP);
3806f8919bdaSduboff 	gem_nd_load(dp, "adv_1000hdx_cap", gem_param_get,
3807f8919bdaSduboff 	    SETFUNC(dp->mii_xstatus &
3808f8919bdaSduboff 	    (MII_XSTATUS_1000BASEX | MII_XSTATUS_1000BASET)),
3809f8919bdaSduboff 	    PARAM_ADV_1000HDX_CAP);
3810f8919bdaSduboff 	gem_nd_load(dp, "adv_100T4_cap", gem_param_get,
3811f8919bdaSduboff 	    SETFUNC((dp->mii_status & MII_STATUS_100_BASE_T4) &&
3812f8919bdaSduboff 	    !dp->mii_advert_ro),
3813f8919bdaSduboff 	    PARAM_ADV_100T4_CAP);
3814f8919bdaSduboff 	gem_nd_load(dp, "adv_100fdx_cap", gem_param_get,
3815f8919bdaSduboff 	    SETFUNC((dp->mii_status & MII_STATUS_100_BASEX_FD) &&
3816f8919bdaSduboff 	    !dp->mii_advert_ro),
3817f8919bdaSduboff 	    PARAM_ADV_100FDX_CAP);
3818f8919bdaSduboff 	gem_nd_load(dp, "adv_100hdx_cap", gem_param_get,
3819f8919bdaSduboff 	    SETFUNC((dp->mii_status & MII_STATUS_100_BASEX) &&
3820f8919bdaSduboff 	    !dp->mii_advert_ro),
3821f8919bdaSduboff 	    PARAM_ADV_100HDX_CAP);
3822f8919bdaSduboff 	gem_nd_load(dp, "adv_10fdx_cap", gem_param_get,
3823f8919bdaSduboff 	    SETFUNC((dp->mii_status & MII_STATUS_10_FD) &&
3824f8919bdaSduboff 	    !dp->mii_advert_ro),
3825f8919bdaSduboff 	    PARAM_ADV_10FDX_CAP);
3826f8919bdaSduboff 	gem_nd_load(dp, "adv_10hdx_cap", gem_param_get,
3827f8919bdaSduboff 	    SETFUNC((dp->mii_status & MII_STATUS_10) &&
3828f8919bdaSduboff 	    !dp->mii_advert_ro),
3829f8919bdaSduboff 	    PARAM_ADV_10HDX_CAP);
3830f8919bdaSduboff 
3831f8919bdaSduboff 	/* Partner's advertised capabilities */
3832f8919bdaSduboff 	gem_nd_load(dp, "lp_autoneg_cap",
3833f8919bdaSduboff 	    gem_param_get, NULL, PARAM_LP_AUTONEG_CAP);
3834f8919bdaSduboff 	gem_nd_load(dp, "lp_pause_cap",
3835f8919bdaSduboff 	    gem_param_get, NULL, PARAM_LP_PAUSE_CAP);
3836f8919bdaSduboff 	gem_nd_load(dp, "lp_asym_pause_cap",
3837f8919bdaSduboff 	    gem_param_get, NULL, PARAM_LP_ASYM_PAUSE_CAP);
3838f8919bdaSduboff 	gem_nd_load(dp, "lp_1000fdx_cap",
3839f8919bdaSduboff 	    gem_param_get, NULL, PARAM_LP_1000FDX_CAP);
3840f8919bdaSduboff 	gem_nd_load(dp, "lp_1000hdx_cap",
3841f8919bdaSduboff 	    gem_param_get, NULL, PARAM_LP_1000HDX_CAP);
3842f8919bdaSduboff 	gem_nd_load(dp, "lp_100T4_cap",
3843f8919bdaSduboff 	    gem_param_get, NULL, PARAM_LP_100T4_CAP);
3844f8919bdaSduboff 	gem_nd_load(dp, "lp_100fdx_cap",
3845f8919bdaSduboff 	    gem_param_get, NULL, PARAM_LP_100FDX_CAP);
3846f8919bdaSduboff 	gem_nd_load(dp, "lp_100hdx_cap",
3847f8919bdaSduboff 	    gem_param_get, NULL, PARAM_LP_100HDX_CAP);
3848f8919bdaSduboff 	gem_nd_load(dp, "lp_10fdx_cap",
3849f8919bdaSduboff 	    gem_param_get, NULL, PARAM_LP_10FDX_CAP);
3850f8919bdaSduboff 	gem_nd_load(dp, "lp_10hdx_cap",
3851f8919bdaSduboff 	    gem_param_get, NULL, PARAM_LP_10HDX_CAP);
3852f8919bdaSduboff 
3853f8919bdaSduboff 	/* Current operating modes */
3854f8919bdaSduboff 	gem_nd_load(dp, "link_status",
3855f8919bdaSduboff 	    gem_param_get, NULL, PARAM_LINK_STATUS);
3856f8919bdaSduboff 	gem_nd_load(dp, "link_speed",
3857f8919bdaSduboff 	    gem_param_get, NULL, PARAM_LINK_SPEED);
3858f8919bdaSduboff 	gem_nd_load(dp, "link_duplex",
3859f8919bdaSduboff 	    gem_param_get, NULL, PARAM_LINK_DUPLEX);
3860f8919bdaSduboff 	gem_nd_load(dp, "link_autoneg",
3861f8919bdaSduboff 	    gem_param_get, NULL, PARAM_LINK_AUTONEG);
3862f8919bdaSduboff 	gem_nd_load(dp, "link_rx_pause",
3863f8919bdaSduboff 	    gem_param_get, NULL, PARAM_LINK_RX_PAUSE);
3864f8919bdaSduboff 	gem_nd_load(dp, "link_tx_pause",
3865f8919bdaSduboff 	    gem_param_get, NULL, PARAM_LINK_TX_PAUSE);
3866f8919bdaSduboff #ifdef DEBUG_RESUME
3867f8919bdaSduboff 	gem_nd_load(dp, "resume_test",
3868f8919bdaSduboff 	    gem_param_get, NULL, PARAM_RESUME_TEST);
3869f8919bdaSduboff #endif
3870f8919bdaSduboff #undef	SETFUNC
3871f8919bdaSduboff }
3872f8919bdaSduboff 
3873f8919bdaSduboff static
3874f8919bdaSduboff enum ioc_reply
gem_nd_ioctl(struct gem_dev * dp,queue_t * wq,mblk_t * mp,struct iocblk * iocp)3875f8919bdaSduboff gem_nd_ioctl(struct gem_dev *dp, queue_t *wq, mblk_t *mp, struct iocblk *iocp)
3876f8919bdaSduboff {
3877f8919bdaSduboff 	boolean_t	ok;
3878f8919bdaSduboff 
3879f8919bdaSduboff 	ASSERT(mutex_owned(&dp->intrlock));
3880f8919bdaSduboff 
3881f8919bdaSduboff 	DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3882f8919bdaSduboff 
3883f8919bdaSduboff 	switch (iocp->ioc_cmd) {
3884f8919bdaSduboff 	case ND_GET:
3885f8919bdaSduboff 		ok = nd_getset(wq, dp->nd_data_p, mp);
3886f8919bdaSduboff 		DPRINTF(0, (CE_CONT,
3887f8919bdaSduboff 		    "%s: get %s", dp->name, ok ? "OK" : "FAIL"));
3888f8919bdaSduboff 		return (ok ? IOC_REPLY : IOC_INVAL);
3889f8919bdaSduboff 
3890f8919bdaSduboff 	case ND_SET:
3891f8919bdaSduboff 		ok = nd_getset(wq, dp->nd_data_p, mp);
3892f8919bdaSduboff 
3893f8919bdaSduboff 		DPRINTF(0, (CE_CONT, "%s: set %s err %d",
3894f8919bdaSduboff 		    dp->name, ok ? "OK" : "FAIL", iocp->ioc_error));
3895f8919bdaSduboff 
3896f8919bdaSduboff 		if (!ok) {
3897f8919bdaSduboff 			return (IOC_INVAL);
3898f8919bdaSduboff 		}
3899f8919bdaSduboff 
3900f8919bdaSduboff 		if (iocp->ioc_error) {
3901f8919bdaSduboff 			return (IOC_REPLY);
3902f8919bdaSduboff 		}
3903f8919bdaSduboff 
3904f8919bdaSduboff 		return (IOC_RESTART_REPLY);
3905f8919bdaSduboff 	}
3906f8919bdaSduboff 
3907f8919bdaSduboff 	cmn_err(CE_WARN, "%s: invalid cmd 0x%x", dp->name, iocp->ioc_cmd);
3908f8919bdaSduboff 
3909f8919bdaSduboff 	return (IOC_INVAL);
3910f8919bdaSduboff }
3911f8919bdaSduboff 
3912f8919bdaSduboff static void
gem_nd_cleanup(struct gem_dev * dp)3913f8919bdaSduboff gem_nd_cleanup(struct gem_dev *dp)
3914f8919bdaSduboff {
3915f8919bdaSduboff 	ASSERT(dp->nd_data_p != NULL);
3916f8919bdaSduboff 	ASSERT(dp->nd_arg_p != NULL);
3917f8919bdaSduboff 
3918f8919bdaSduboff 	nd_free(&dp->nd_data_p);
3919f8919bdaSduboff 
3920f8919bdaSduboff 	kmem_free(dp->nd_arg_p, sizeof (struct gem_nd_arg) * PARAM_COUNT);
3921f8919bdaSduboff 	dp->nd_arg_p = NULL;
3922f8919bdaSduboff }
3923f8919bdaSduboff 
3924f8919bdaSduboff static void
gem_mac_ioctl(struct gem_dev * dp,queue_t * wq,mblk_t * mp)3925f8919bdaSduboff gem_mac_ioctl(struct gem_dev *dp, queue_t *wq, mblk_t *mp)
3926f8919bdaSduboff {
3927f8919bdaSduboff 	struct iocblk	*iocp;
3928f8919bdaSduboff 	enum ioc_reply	status;
3929f8919bdaSduboff 	int		cmd;
3930f8919bdaSduboff 
3931f8919bdaSduboff 	DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3932f8919bdaSduboff 
3933f8919bdaSduboff 	/*
3934f8919bdaSduboff 	 * Validate the command before bothering with the mutex ...
3935f8919bdaSduboff 	 */
3936f8919bdaSduboff 	iocp = (void *)mp->b_rptr;
3937f8919bdaSduboff 	iocp->ioc_error = 0;
3938f8919bdaSduboff 	cmd = iocp->ioc_cmd;
3939f8919bdaSduboff 
3940f8919bdaSduboff 	DPRINTF(0, (CE_CONT, "%s: %s cmd:0x%x", dp->name, __func__, cmd));
3941f8919bdaSduboff 
3942f8919bdaSduboff 	mutex_enter(&dp->intrlock);
3943f8919bdaSduboff 	mutex_enter(&dp->xmitlock);
3944f8919bdaSduboff 
3945f8919bdaSduboff 	switch (cmd) {
3946f8919bdaSduboff 	default:
3947f8919bdaSduboff 		_NOTE(NOTREACHED)
3948f8919bdaSduboff 		status = IOC_INVAL;
3949f8919bdaSduboff 		break;
3950f8919bdaSduboff 
3951f8919bdaSduboff 	case ND_GET:
3952f8919bdaSduboff 	case ND_SET:
3953f8919bdaSduboff 		status = gem_nd_ioctl(dp, wq, mp, iocp);
3954f8919bdaSduboff 		break;
3955f8919bdaSduboff 	}
3956f8919bdaSduboff 
3957f8919bdaSduboff 	mutex_exit(&dp->xmitlock);
3958f8919bdaSduboff 	mutex_exit(&dp->intrlock);
3959f8919bdaSduboff 
3960f8919bdaSduboff #ifdef DEBUG_RESUME
3961f8919bdaSduboff 	if (cmd == ND_GET)  {
3962f8919bdaSduboff 		gem_suspend(dp->dip);
3963f8919bdaSduboff 		gem_resume(dp->dip);
3964f8919bdaSduboff 	}
3965f8919bdaSduboff #endif
3966f8919bdaSduboff 	/*
3967f8919bdaSduboff 	 * Finally, decide how to reply
3968f8919bdaSduboff 	 */
3969f8919bdaSduboff 	switch (status) {
3970f8919bdaSduboff 	default:
3971f8919bdaSduboff 	case IOC_INVAL:
3972f8919bdaSduboff 		/*
3973f8919bdaSduboff 		 * Error, reply with a NAK and EINVAL or the specified error
3974f8919bdaSduboff 		 */
3975f8919bdaSduboff 		miocnak(wq, mp, 0, iocp->ioc_error == 0 ?
3976f8919bdaSduboff 		    EINVAL : iocp->ioc_error);
3977f8919bdaSduboff 		break;
3978f8919bdaSduboff 
3979f8919bdaSduboff 	case IOC_DONE:
3980f8919bdaSduboff 		/*
3981f8919bdaSduboff 		 * OK, reply already sent
3982f8919bdaSduboff 		 */
3983f8919bdaSduboff 		break;
3984f8919bdaSduboff 
3985f8919bdaSduboff 	case IOC_RESTART_ACK:
3986f8919bdaSduboff 	case IOC_ACK:
3987f8919bdaSduboff 		/*
3988f8919bdaSduboff 		 * OK, reply with an ACK
3989f8919bdaSduboff 		 */
3990f8919bdaSduboff 		miocack(wq, mp, 0, 0);
3991f8919bdaSduboff 		break;
3992f8919bdaSduboff 
3993f8919bdaSduboff 	case IOC_RESTART_REPLY:
3994f8919bdaSduboff 	case IOC_REPLY:
3995f8919bdaSduboff 		/*
3996f8919bdaSduboff 		 * OK, send prepared reply as ACK or NAK
3997f8919bdaSduboff 		 */
3998f8919bdaSduboff 		mp->b_datap->db_type =
3999f8919bdaSduboff 		    iocp->ioc_error == 0 ? M_IOCACK : M_IOCNAK;
4000f8919bdaSduboff 		qreply(wq, mp);
4001f8919bdaSduboff 		break;
4002f8919bdaSduboff 	}
4003f8919bdaSduboff }
4004f8919bdaSduboff 
4005f8919bdaSduboff #ifndef SYS_MAC_H
4006f8919bdaSduboff #define	XCVR_UNDEFINED	0
4007f8919bdaSduboff #define	XCVR_NONE	1
4008f8919bdaSduboff #define	XCVR_10		2
4009f8919bdaSduboff #define	XCVR_100T4	3
4010f8919bdaSduboff #define	XCVR_100X	4
4011f8919bdaSduboff #define	XCVR_100T2	5
4012f8919bdaSduboff #define	XCVR_1000X	6
4013f8919bdaSduboff #define	XCVR_1000T	7
4014f8919bdaSduboff #endif
4015f8919bdaSduboff static int
gem_mac_xcvr_inuse(struct gem_dev * dp)4016f8919bdaSduboff gem_mac_xcvr_inuse(struct gem_dev *dp)
4017f8919bdaSduboff {
4018f8919bdaSduboff 	int	val = XCVR_UNDEFINED;
4019f8919bdaSduboff 
4020f8919bdaSduboff 	if ((dp->mii_status & MII_STATUS_XSTATUS) == 0) {
4021f8919bdaSduboff 		if (dp->mii_status & MII_STATUS_100_BASE_T4) {
4022f8919bdaSduboff 			val = XCVR_100T4;
4023f8919bdaSduboff 		} else if (dp->mii_status &
4024f8919bdaSduboff 		    (MII_STATUS_100_BASEX_FD |
4025f8919bdaSduboff 		    MII_STATUS_100_BASEX)) {
4026f8919bdaSduboff 			val = XCVR_100X;
4027f8919bdaSduboff 		} else if (dp->mii_status &
4028f8919bdaSduboff 		    (MII_STATUS_100_BASE_T2_FD |
4029f8919bdaSduboff 		    MII_STATUS_100_BASE_T2)) {
4030f8919bdaSduboff 			val = XCVR_100T2;
4031f8919bdaSduboff 		} else if (dp->mii_status &
4032f8919bdaSduboff 		    (MII_STATUS_10_FD | MII_STATUS_10)) {
4033f8919bdaSduboff 			val = XCVR_10;
4034f8919bdaSduboff 		}
4035f8919bdaSduboff 	} else if (dp->mii_xstatus &
4036f8919bdaSduboff 	    (MII_XSTATUS_1000BASET_FD | MII_XSTATUS_1000BASET)) {
4037f8919bdaSduboff 		val = XCVR_1000T;
4038f8919bdaSduboff 	} else if (dp->mii_xstatus &
4039f8919bdaSduboff 	    (MII_XSTATUS_1000BASEX_FD | MII_XSTATUS_1000BASEX)) {
4040f8919bdaSduboff 		val = XCVR_1000X;
4041f8919bdaSduboff 	}
4042f8919bdaSduboff 
4043f8919bdaSduboff 	return (val);
4044f8919bdaSduboff }
4045f8919bdaSduboff 
4046f8919bdaSduboff /* ============================================================== */
4047f8919bdaSduboff /*
4048f8919bdaSduboff  * GLDv3 interface
4049f8919bdaSduboff  */
4050f8919bdaSduboff /* ============================================================== */
4051f8919bdaSduboff static int		gem_m_getstat(void *, uint_t, uint64_t *);
4052f8919bdaSduboff static int		gem_m_start(void *);
4053f8919bdaSduboff static void		gem_m_stop(void *);
4054f8919bdaSduboff static int		gem_m_setpromisc(void *, boolean_t);
4055f8919bdaSduboff static int		gem_m_multicst(void *, boolean_t, const uint8_t *);
4056f8919bdaSduboff static int		gem_m_unicst(void *, const uint8_t *);
4057f8919bdaSduboff static mblk_t		*gem_m_tx(void *, mblk_t *);
4058f8919bdaSduboff static void		gem_m_ioctl(void *, queue_t *, mblk_t *);
4059f8919bdaSduboff static boolean_t	gem_m_getcapab(void *, mac_capab_t, void *);
4060f8919bdaSduboff 
4061da14cebeSEric Cheng #define	GEM_M_CALLBACK_FLAGS	(MC_IOCTL | MC_GETCAPAB)
4062f8919bdaSduboff 
4063f8919bdaSduboff static mac_callbacks_t gem_m_callbacks = {
4064f8919bdaSduboff 	GEM_M_CALLBACK_FLAGS,
4065f8919bdaSduboff 	gem_m_getstat,
4066f8919bdaSduboff 	gem_m_start,
4067f8919bdaSduboff 	gem_m_stop,
4068f8919bdaSduboff 	gem_m_setpromisc,
4069f8919bdaSduboff 	gem_m_multicst,
4070f8919bdaSduboff 	gem_m_unicst,
4071f8919bdaSduboff 	gem_m_tx,
4072*0dc2366fSVenugopal Iyer 	NULL,
4073f8919bdaSduboff 	gem_m_ioctl,
4074f8919bdaSduboff 	gem_m_getcapab,
4075f8919bdaSduboff };
4076f8919bdaSduboff 
4077f8919bdaSduboff static int
gem_m_start(void * arg)4078f8919bdaSduboff gem_m_start(void *arg)
4079f8919bdaSduboff {
4080f8919bdaSduboff 	int		err = 0;
4081f8919bdaSduboff 	struct gem_dev *dp = arg;
4082f8919bdaSduboff 
4083f8919bdaSduboff 	DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4084f8919bdaSduboff 
4085f8919bdaSduboff 	mutex_enter(&dp->intrlock);
4086f8919bdaSduboff 	if (dp->mac_suspended) {
4087f8919bdaSduboff 		err = EIO;
4088f8919bdaSduboff 		goto x;
4089f8919bdaSduboff 	}
4090f8919bdaSduboff 	if (gem_mac_init(dp) != GEM_SUCCESS) {
4091f8919bdaSduboff 		err = EIO;
4092f8919bdaSduboff 		goto x;
4093f8919bdaSduboff 	}
4094f8919bdaSduboff 	dp->nic_state = NIC_STATE_INITIALIZED;
4095f8919bdaSduboff 
4096f8919bdaSduboff 	/* reset rx filter state */
4097f8919bdaSduboff 	dp->mc_count = 0;
4098f8919bdaSduboff 	dp->mc_count_req = 0;
4099f8919bdaSduboff 
4100f8919bdaSduboff 	/* setup media mode if the link have been up */
4101f8919bdaSduboff 	if (dp->mii_state == MII_STATE_LINKUP) {
4102f8919bdaSduboff 		(dp->gc.gc_set_media)(dp);
4103f8919bdaSduboff 	}
4104f8919bdaSduboff 
4105f8919bdaSduboff 	/* setup initial rx filter */
4106f8919bdaSduboff 	bcopy(dp->dev_addr.ether_addr_octet,
4107f8919bdaSduboff 	    dp->cur_addr.ether_addr_octet, ETHERADDRL);
4108f8919bdaSduboff 	dp->rxmode |= RXMODE_ENABLE;
4109f8919bdaSduboff 
4110f8919bdaSduboff 	if (gem_mac_set_rx_filter(dp) != GEM_SUCCESS) {
4111f8919bdaSduboff 		err = EIO;
4112f8919bdaSduboff 		goto x;
4113f8919bdaSduboff 	}
4114f8919bdaSduboff 
4115f8919bdaSduboff 	dp->nic_state = NIC_STATE_ONLINE;
4116f8919bdaSduboff 	if (dp->mii_state == MII_STATE_LINKUP) {
4117f8919bdaSduboff 		if (gem_mac_start(dp) != GEM_SUCCESS) {
4118f8919bdaSduboff 			err = EIO;
4119f8919bdaSduboff 			goto x;
4120f8919bdaSduboff 		}
4121f8919bdaSduboff 	}
4122f8919bdaSduboff 
4123f8919bdaSduboff 	dp->timeout_id = timeout((void (*)(void *))gem_tx_timeout,
4124f8919bdaSduboff 	    (void *)dp, dp->gc.gc_tx_timeout_interval);
4125f8919bdaSduboff 	mutex_exit(&dp->intrlock);
4126f8919bdaSduboff 
4127f8919bdaSduboff 	return (0);
4128f8919bdaSduboff x:
4129f8919bdaSduboff 	dp->nic_state = NIC_STATE_STOPPED;
4130f8919bdaSduboff 	mutex_exit(&dp->intrlock);
4131f8919bdaSduboff 	return (err);
4132f8919bdaSduboff }
4133f8919bdaSduboff 
4134f8919bdaSduboff static void
gem_m_stop(void * arg)4135f8919bdaSduboff gem_m_stop(void *arg)
4136f8919bdaSduboff {
4137f8919bdaSduboff 	struct gem_dev	*dp = arg;
4138f8919bdaSduboff 
4139f8919bdaSduboff 	DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4140f8919bdaSduboff 
4141f8919bdaSduboff 	/* stop rx */
4142f8919bdaSduboff 	mutex_enter(&dp->intrlock);
4143f8919bdaSduboff 	if (dp->mac_suspended) {
4144f8919bdaSduboff 		mutex_exit(&dp->intrlock);
4145f8919bdaSduboff 		return;
4146f8919bdaSduboff 	}
4147f8919bdaSduboff 	dp->rxmode &= ~RXMODE_ENABLE;
4148f8919bdaSduboff 	(void) gem_mac_set_rx_filter(dp);
4149f8919bdaSduboff 	mutex_exit(&dp->intrlock);
4150f8919bdaSduboff 
4151f8919bdaSduboff 	/* stop tx timeout watcher */
4152f8919bdaSduboff 	if (dp->timeout_id) {
4153f8919bdaSduboff 		while (untimeout(dp->timeout_id) == -1)
4154f8919bdaSduboff 			;
4155f8919bdaSduboff 		dp->timeout_id = 0;
4156f8919bdaSduboff 	}
4157f8919bdaSduboff 
4158f8919bdaSduboff 	/* make the nic state inactive */
4159f8919bdaSduboff 	mutex_enter(&dp->intrlock);
4160f8919bdaSduboff 	if (dp->mac_suspended) {
4161f8919bdaSduboff 		mutex_exit(&dp->intrlock);
4162f8919bdaSduboff 		return;
4163f8919bdaSduboff 	}
4164f8919bdaSduboff 	dp->nic_state = NIC_STATE_STOPPED;
4165f8919bdaSduboff 
4166f8919bdaSduboff 	/* we need deassert mac_active due to block interrupt handler */
4167f8919bdaSduboff 	mutex_enter(&dp->xmitlock);
4168f8919bdaSduboff 	dp->mac_active = B_FALSE;
4169f8919bdaSduboff 	mutex_exit(&dp->xmitlock);
4170f8919bdaSduboff 
4171f8919bdaSduboff 	/* block interrupts */
4172f8919bdaSduboff 	while (dp->intr_busy) {
4173f8919bdaSduboff 		cv_wait(&dp->tx_drain_cv, &dp->intrlock);
4174f8919bdaSduboff 	}
4175f8919bdaSduboff 	(void) gem_mac_stop(dp, 0);
4176f8919bdaSduboff 	mutex_exit(&dp->intrlock);
4177f8919bdaSduboff }
4178f8919bdaSduboff 
4179f8919bdaSduboff static int
gem_m_multicst(void * arg,boolean_t add,const uint8_t * ep)4180f8919bdaSduboff gem_m_multicst(void *arg, boolean_t add, const uint8_t *ep)
4181f8919bdaSduboff {
4182f8919bdaSduboff 	int		err;
4183f8919bdaSduboff 	int		ret;
4184f8919bdaSduboff 	struct gem_dev	*dp = arg;
4185f8919bdaSduboff 
4186f8919bdaSduboff 	DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4187f8919bdaSduboff 
4188f8919bdaSduboff 	if (add) {
4189f8919bdaSduboff 		ret = gem_add_multicast(dp, ep);
4190f8919bdaSduboff 	} else {
4191f8919bdaSduboff 		ret = gem_remove_multicast(dp, ep);
4192f8919bdaSduboff 	}
4193f8919bdaSduboff 
4194f8919bdaSduboff 	err = 0;
4195f8919bdaSduboff 	if (ret != GEM_SUCCESS) {
4196f8919bdaSduboff 		err = EIO;
4197f8919bdaSduboff 	}
4198f8919bdaSduboff 
4199f8919bdaSduboff 	return (err);
4200f8919bdaSduboff }
4201f8919bdaSduboff 
4202f8919bdaSduboff static int
gem_m_setpromisc(void * arg,boolean_t on)4203f8919bdaSduboff gem_m_setpromisc(void *arg, boolean_t on)
4204f8919bdaSduboff {
4205f8919bdaSduboff 	int		err = 0;	/* no error */
4206f8919bdaSduboff 	struct gem_dev	*dp = arg;
4207f8919bdaSduboff 
4208f8919bdaSduboff 	DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4209f8919bdaSduboff 
4210f8919bdaSduboff 	mutex_enter(&dp->intrlock);
4211f8919bdaSduboff 	if (dp->mac_suspended) {
4212f8919bdaSduboff 		mutex_exit(&dp->intrlock);
4213f8919bdaSduboff 		return (EIO);
4214f8919bdaSduboff 	}
4215f8919bdaSduboff 	if (on) {
4216f8919bdaSduboff 		dp->rxmode |= RXMODE_PROMISC;
4217f8919bdaSduboff 	} else {
4218f8919bdaSduboff 		dp->rxmode &= ~RXMODE_PROMISC;
4219f8919bdaSduboff 	}
4220f8919bdaSduboff 
4221f8919bdaSduboff 	if (gem_mac_set_rx_filter(dp) != GEM_SUCCESS) {
4222f8919bdaSduboff 		err = EIO;
4223f8919bdaSduboff 	}
4224f8919bdaSduboff 	mutex_exit(&dp->intrlock);
4225f8919bdaSduboff 
4226f8919bdaSduboff 	return (err);
4227f8919bdaSduboff }
4228f8919bdaSduboff 
4229f8919bdaSduboff int
gem_m_getstat(void * arg,uint_t stat,uint64_t * valp)4230f8919bdaSduboff gem_m_getstat(void *arg, uint_t stat, uint64_t *valp)
4231f8919bdaSduboff {
4232f8919bdaSduboff 	struct gem_dev		*dp = arg;
4233f8919bdaSduboff 	struct gem_stats	*gstp = &dp->stats;
4234f8919bdaSduboff 	uint64_t		val = 0;
4235f8919bdaSduboff 
4236f8919bdaSduboff 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4237f8919bdaSduboff 
423823d366e3Sduboff 	if (mutex_owned(&dp->intrlock)) {
423923d366e3Sduboff 		if (dp->mac_suspended) {
424023d366e3Sduboff 			return (EIO);
424123d366e3Sduboff 		}
424223d366e3Sduboff 	} else {
4243f8919bdaSduboff 		mutex_enter(&dp->intrlock);
4244f8919bdaSduboff 		if (dp->mac_suspended) {
4245f8919bdaSduboff 			mutex_exit(&dp->intrlock);
4246f8919bdaSduboff 			return (EIO);
4247f8919bdaSduboff 		}
4248f8919bdaSduboff 		mutex_exit(&dp->intrlock);
424923d366e3Sduboff 	}
4250f8919bdaSduboff 
4251f8919bdaSduboff 	if ((*dp->gc.gc_get_stats)(dp) != GEM_SUCCESS) {
4252f8919bdaSduboff 		return (EIO);
4253f8919bdaSduboff 	}
4254f8919bdaSduboff 
4255f8919bdaSduboff 	switch (stat) {
4256f8919bdaSduboff 	case MAC_STAT_IFSPEED:
4257f8919bdaSduboff 		val = gem_speed_value[dp->speed] *1000000ull;
4258f8919bdaSduboff 		break;
4259f8919bdaSduboff 
4260f8919bdaSduboff 	case MAC_STAT_MULTIRCV:
4261f8919bdaSduboff 		val = gstp->rmcast;
4262f8919bdaSduboff 		break;
4263f8919bdaSduboff 
4264f8919bdaSduboff 	case MAC_STAT_BRDCSTRCV:
4265f8919bdaSduboff 		val = gstp->rbcast;
4266f8919bdaSduboff 		break;
4267f8919bdaSduboff 
4268f8919bdaSduboff 	case MAC_STAT_MULTIXMT:
4269f8919bdaSduboff 		val = gstp->omcast;
4270f8919bdaSduboff 		break;
4271f8919bdaSduboff 
4272f8919bdaSduboff 	case MAC_STAT_BRDCSTXMT:
4273f8919bdaSduboff 		val = gstp->obcast;
4274f8919bdaSduboff 		break;
4275f8919bdaSduboff 
4276f8919bdaSduboff 	case MAC_STAT_NORCVBUF:
4277f8919bdaSduboff 		val = gstp->norcvbuf + gstp->missed;
4278f8919bdaSduboff 		break;
4279f8919bdaSduboff 
4280f8919bdaSduboff 	case MAC_STAT_IERRORS:
4281f8919bdaSduboff 		val = gstp->errrcv;
4282f8919bdaSduboff 		break;
4283f8919bdaSduboff 
4284f8919bdaSduboff 	case MAC_STAT_NOXMTBUF:
4285f8919bdaSduboff 		val = gstp->noxmtbuf;
4286f8919bdaSduboff 		break;
4287f8919bdaSduboff 
4288f8919bdaSduboff 	case MAC_STAT_OERRORS:
4289f8919bdaSduboff 		val = gstp->errxmt;
4290f8919bdaSduboff 		break;
4291f8919bdaSduboff 
4292f8919bdaSduboff 	case MAC_STAT_COLLISIONS:
4293f8919bdaSduboff 		val = gstp->collisions;
4294f8919bdaSduboff 		break;
4295f8919bdaSduboff 
4296f8919bdaSduboff 	case MAC_STAT_RBYTES:
4297f8919bdaSduboff 		val = gstp->rbytes;
4298f8919bdaSduboff 		break;
4299f8919bdaSduboff 
4300f8919bdaSduboff 	case MAC_STAT_IPACKETS:
4301f8919bdaSduboff 		val = gstp->rpackets;
4302f8919bdaSduboff 		break;
4303f8919bdaSduboff 
4304f8919bdaSduboff 	case MAC_STAT_OBYTES:
4305f8919bdaSduboff 		val = gstp->obytes;
4306f8919bdaSduboff 		break;
4307f8919bdaSduboff 
4308f8919bdaSduboff 	case MAC_STAT_OPACKETS:
4309f8919bdaSduboff 		val = gstp->opackets;
4310f8919bdaSduboff 		break;
4311f8919bdaSduboff 
4312f8919bdaSduboff 	case MAC_STAT_UNDERFLOWS:
4313f8919bdaSduboff 		val = gstp->underflow;
4314f8919bdaSduboff 		break;
4315f8919bdaSduboff 
4316f8919bdaSduboff 	case MAC_STAT_OVERFLOWS:
4317f8919bdaSduboff 		val = gstp->overflow;
4318f8919bdaSduboff 		break;
4319f8919bdaSduboff 
4320f8919bdaSduboff 	case ETHER_STAT_ALIGN_ERRORS:
4321f8919bdaSduboff 		val = gstp->frame;
4322f8919bdaSduboff 		break;
4323f8919bdaSduboff 
4324f8919bdaSduboff 	case ETHER_STAT_FCS_ERRORS:
4325f8919bdaSduboff 		val = gstp->crc;
4326f8919bdaSduboff 		break;
4327f8919bdaSduboff 
4328f8919bdaSduboff 	case ETHER_STAT_FIRST_COLLISIONS:
4329f8919bdaSduboff 		val = gstp->first_coll;
4330f8919bdaSduboff 		break;
4331f8919bdaSduboff 
4332f8919bdaSduboff 	case ETHER_STAT_MULTI_COLLISIONS:
4333f8919bdaSduboff 		val = gstp->multi_coll;
4334f8919bdaSduboff 		break;
4335f8919bdaSduboff 
4336f8919bdaSduboff 	case ETHER_STAT_SQE_ERRORS:
4337f8919bdaSduboff 		val = gstp->sqe;
4338f8919bdaSduboff 		break;
4339f8919bdaSduboff 
4340f8919bdaSduboff 	case ETHER_STAT_DEFER_XMTS:
4341f8919bdaSduboff 		val = gstp->defer;
4342f8919bdaSduboff 		break;
4343f8919bdaSduboff 
4344f8919bdaSduboff 	case ETHER_STAT_TX_LATE_COLLISIONS:
4345f8919bdaSduboff 		val = gstp->xmtlatecoll;
4346f8919bdaSduboff 		break;
4347f8919bdaSduboff 
4348f8919bdaSduboff 	case ETHER_STAT_EX_COLLISIONS:
4349f8919bdaSduboff 		val = gstp->excoll;
4350f8919bdaSduboff 		break;
4351f8919bdaSduboff 
4352f8919bdaSduboff 	case ETHER_STAT_MACXMT_ERRORS:
4353f8919bdaSduboff 		val = gstp->xmit_internal_err;
4354f8919bdaSduboff 		break;
4355f8919bdaSduboff 
4356f8919bdaSduboff 	case ETHER_STAT_CARRIER_ERRORS:
4357f8919bdaSduboff 		val = gstp->nocarrier;
4358f8919bdaSduboff 		break;
4359f8919bdaSduboff 
4360f8919bdaSduboff 	case ETHER_STAT_TOOLONG_ERRORS:
4361f8919bdaSduboff 		val = gstp->frame_too_long;
4362f8919bdaSduboff 		break;
4363f8919bdaSduboff 
4364f8919bdaSduboff 	case ETHER_STAT_MACRCV_ERRORS:
4365f8919bdaSduboff 		val = gstp->rcv_internal_err;
4366f8919bdaSduboff 		break;
4367f8919bdaSduboff 
4368f8919bdaSduboff 	case ETHER_STAT_XCVR_ADDR:
4369f8919bdaSduboff 		val = dp->mii_phy_addr;
4370f8919bdaSduboff 		break;
4371f8919bdaSduboff 
4372f8919bdaSduboff 	case ETHER_STAT_XCVR_ID:
4373f8919bdaSduboff 		val = dp->mii_phy_id;
4374f8919bdaSduboff 		break;
4375f8919bdaSduboff 
4376f8919bdaSduboff 	case ETHER_STAT_XCVR_INUSE:
4377f8919bdaSduboff 		val = gem_mac_xcvr_inuse(dp);
4378f8919bdaSduboff 		break;
4379f8919bdaSduboff 
4380f8919bdaSduboff 	case ETHER_STAT_CAP_1000FDX:
4381f8919bdaSduboff 		val = (dp->mii_xstatus & MII_XSTATUS_1000BASET_FD) ||
4382f8919bdaSduboff 		    (dp->mii_xstatus & MII_XSTATUS_1000BASEX_FD);
4383f8919bdaSduboff 		break;
4384f8919bdaSduboff 
4385f8919bdaSduboff 	case ETHER_STAT_CAP_1000HDX:
4386f8919bdaSduboff 		val = (dp->mii_xstatus & MII_XSTATUS_1000BASET) ||
4387f8919bdaSduboff 		    (dp->mii_xstatus & MII_XSTATUS_1000BASEX);
4388f8919bdaSduboff 		break;
4389f8919bdaSduboff 
4390f8919bdaSduboff 	case ETHER_STAT_CAP_100FDX:
4391f8919bdaSduboff 		val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX_FD);
4392f8919bdaSduboff 		break;
4393f8919bdaSduboff 
4394f8919bdaSduboff 	case ETHER_STAT_CAP_100HDX:
4395f8919bdaSduboff 		val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX);
4396f8919bdaSduboff 		break;
4397f8919bdaSduboff 
4398f8919bdaSduboff 	case ETHER_STAT_CAP_10FDX:
4399f8919bdaSduboff 		val = BOOLEAN(dp->mii_status & MII_STATUS_10_FD);
4400f8919bdaSduboff 		break;
4401f8919bdaSduboff 
4402f8919bdaSduboff 	case ETHER_STAT_CAP_10HDX:
4403f8919bdaSduboff 		val = BOOLEAN(dp->mii_status & MII_STATUS_10);
4404f8919bdaSduboff 		break;
4405f8919bdaSduboff 
4406f8919bdaSduboff 	case ETHER_STAT_CAP_ASMPAUSE:
4407f8919bdaSduboff 		val = BOOLEAN(dp->gc.gc_flow_control & 2);
4408f8919bdaSduboff 		break;
4409f8919bdaSduboff 
4410f8919bdaSduboff 	case ETHER_STAT_CAP_PAUSE:
4411f8919bdaSduboff 		val = BOOLEAN(dp->gc.gc_flow_control & 1);
4412f8919bdaSduboff 		break;
4413f8919bdaSduboff 
4414f8919bdaSduboff 	case ETHER_STAT_CAP_AUTONEG:
441523d366e3Sduboff 		val = BOOLEAN(dp->mii_status & MII_STATUS_CANAUTONEG);
4416f8919bdaSduboff 		break;
4417f8919bdaSduboff 
4418f8919bdaSduboff 	case ETHER_STAT_ADV_CAP_1000FDX:
4419f8919bdaSduboff 		val = dp->anadv_1000fdx;
4420f8919bdaSduboff 		break;
4421f8919bdaSduboff 
4422f8919bdaSduboff 	case ETHER_STAT_ADV_CAP_1000HDX:
4423f8919bdaSduboff 		val = dp->anadv_1000hdx;
4424f8919bdaSduboff 		break;
4425f8919bdaSduboff 
4426f8919bdaSduboff 	case ETHER_STAT_ADV_CAP_100FDX:
4427f8919bdaSduboff 		val = dp->anadv_100fdx;
4428f8919bdaSduboff 		break;
4429f8919bdaSduboff 
4430f8919bdaSduboff 	case ETHER_STAT_ADV_CAP_100HDX:
4431f8919bdaSduboff 		val = dp->anadv_100hdx;
4432f8919bdaSduboff 		break;
4433f8919bdaSduboff 
4434f8919bdaSduboff 	case ETHER_STAT_ADV_CAP_10FDX:
4435f8919bdaSduboff 		val = dp->anadv_10fdx;
4436f8919bdaSduboff 		break;
4437f8919bdaSduboff 
4438f8919bdaSduboff 	case ETHER_STAT_ADV_CAP_10HDX:
4439f8919bdaSduboff 		val = dp->anadv_10hdx;
4440f8919bdaSduboff 		break;
4441f8919bdaSduboff 
4442f8919bdaSduboff 	case ETHER_STAT_ADV_CAP_ASMPAUSE:
4443f8919bdaSduboff 		val = BOOLEAN(dp->anadv_flow_control & 2);
4444f8919bdaSduboff 		break;
4445f8919bdaSduboff 
4446f8919bdaSduboff 	case ETHER_STAT_ADV_CAP_PAUSE:
4447f8919bdaSduboff 		val = BOOLEAN(dp->anadv_flow_control & 1);
4448f8919bdaSduboff 		break;
4449f8919bdaSduboff 
4450f8919bdaSduboff 	case ETHER_STAT_ADV_CAP_AUTONEG:
4451f8919bdaSduboff 		val = dp->anadv_autoneg;
4452f8919bdaSduboff 		break;
4453f8919bdaSduboff 
4454f8919bdaSduboff 	case ETHER_STAT_LP_CAP_1000FDX:
4455f8919bdaSduboff 		val = BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_FULL);
4456f8919bdaSduboff 		break;
4457f8919bdaSduboff 
4458f8919bdaSduboff 	case ETHER_STAT_LP_CAP_1000HDX:
4459f8919bdaSduboff 		val = BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_HALF);
4460f8919bdaSduboff 		break;
4461f8919bdaSduboff 
4462f8919bdaSduboff 	case ETHER_STAT_LP_CAP_100FDX:
4463f8919bdaSduboff 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX_FD);
4464f8919bdaSduboff 		break;
4465f8919bdaSduboff 
4466f8919bdaSduboff 	case ETHER_STAT_LP_CAP_100HDX:
4467f8919bdaSduboff 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX);
4468f8919bdaSduboff 		break;
4469f8919bdaSduboff 
4470f8919bdaSduboff 	case ETHER_STAT_LP_CAP_10FDX:
4471f8919bdaSduboff 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T_FD);
4472f8919bdaSduboff 		break;
4473f8919bdaSduboff 
4474f8919bdaSduboff 	case ETHER_STAT_LP_CAP_10HDX:
4475f8919bdaSduboff 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T);
4476f8919bdaSduboff 		break;
4477f8919bdaSduboff 
4478f8919bdaSduboff 	case ETHER_STAT_LP_CAP_ASMPAUSE:
4479bdb9230aSGarrett D'Amore 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_ASMPAUSE);
4480f8919bdaSduboff 		break;
4481f8919bdaSduboff 
4482f8919bdaSduboff 	case ETHER_STAT_LP_CAP_PAUSE:
4483f8919bdaSduboff 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_PAUSE);
4484f8919bdaSduboff 		break;
4485f8919bdaSduboff 
4486f8919bdaSduboff 	case ETHER_STAT_LP_CAP_AUTONEG:
4487f8919bdaSduboff 		val = BOOLEAN(dp->mii_exp & MII_AN_EXP_LPCANAN);
4488f8919bdaSduboff 		break;
4489f8919bdaSduboff 
4490f8919bdaSduboff 	case ETHER_STAT_LINK_ASMPAUSE:
4491f8919bdaSduboff 		val = BOOLEAN(dp->flow_control & 2);
4492f8919bdaSduboff 		break;
4493f8919bdaSduboff 
4494f8919bdaSduboff 	case ETHER_STAT_LINK_PAUSE:
4495f8919bdaSduboff 		val = BOOLEAN(dp->flow_control & 1);
4496f8919bdaSduboff 		break;
4497f8919bdaSduboff 
4498f8919bdaSduboff 	case ETHER_STAT_LINK_AUTONEG:
4499f8919bdaSduboff 		val = dp->anadv_autoneg &&
4500f8919bdaSduboff 		    BOOLEAN(dp->mii_exp & MII_AN_EXP_LPCANAN);
4501f8919bdaSduboff 		break;
4502f8919bdaSduboff 
4503f8919bdaSduboff 	case ETHER_STAT_LINK_DUPLEX:
4504f8919bdaSduboff 		val = (dp->mii_state == MII_STATE_LINKUP) ?
4505f8919bdaSduboff 		    (dp->full_duplex ? 2 : 1) : 0;
4506f8919bdaSduboff 		break;
4507f8919bdaSduboff 
4508f8919bdaSduboff 	case ETHER_STAT_TOOSHORT_ERRORS:
4509f8919bdaSduboff 		val = gstp->runt;
4510f8919bdaSduboff 		break;
4511f8919bdaSduboff 	case ETHER_STAT_LP_REMFAULT:
4512f8919bdaSduboff 		val = BOOLEAN(dp->mii_lpable & MII_AN_ADVERT_REMFAULT);
4513f8919bdaSduboff 		break;
4514f8919bdaSduboff 
4515f8919bdaSduboff 	case ETHER_STAT_JABBER_ERRORS:
4516f8919bdaSduboff 		val = gstp->jabber;
4517f8919bdaSduboff 		break;
4518f8919bdaSduboff 
4519f8919bdaSduboff 	case ETHER_STAT_CAP_100T4:
4520f8919bdaSduboff 		val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASE_T4);
4521f8919bdaSduboff 		break;
4522f8919bdaSduboff 
4523f8919bdaSduboff 	case ETHER_STAT_ADV_CAP_100T4:
4524f8919bdaSduboff 		val = dp->anadv_100t4;
4525f8919bdaSduboff 		break;
4526f8919bdaSduboff 
4527f8919bdaSduboff 	case ETHER_STAT_LP_CAP_100T4:
4528f8919bdaSduboff 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_T4);
4529f8919bdaSduboff 		break;
4530f8919bdaSduboff 
4531f8919bdaSduboff 	default:
4532f8919bdaSduboff #if GEM_DEBUG_LEVEL > 2
4533f8919bdaSduboff 		cmn_err(CE_WARN,
4534f8919bdaSduboff 		    "%s: unrecognized parameter value = %d",
4535f8919bdaSduboff 		    __func__, stat);
4536f8919bdaSduboff #endif
4537f8919bdaSduboff 		return (ENOTSUP);
4538f8919bdaSduboff 	}
4539f8919bdaSduboff 
4540f8919bdaSduboff 	*valp = val;
4541f8919bdaSduboff 
4542f8919bdaSduboff 	return (0);
4543f8919bdaSduboff }
4544f8919bdaSduboff 
4545f8919bdaSduboff static int
gem_m_unicst(void * arg,const uint8_t * mac)4546f8919bdaSduboff gem_m_unicst(void *arg, const uint8_t *mac)
4547f8919bdaSduboff {
4548f8919bdaSduboff 	int		err = 0;
4549f8919bdaSduboff 	struct gem_dev	*dp = arg;
4550f8919bdaSduboff 
4551f8919bdaSduboff 	DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4552f8919bdaSduboff 
4553f8919bdaSduboff 	mutex_enter(&dp->intrlock);
4554f8919bdaSduboff 	if (dp->mac_suspended) {
4555f8919bdaSduboff 		mutex_exit(&dp->intrlock);
4556f8919bdaSduboff 		return (EIO);
4557f8919bdaSduboff 	}
4558f8919bdaSduboff 	bcopy(mac, dp->cur_addr.ether_addr_octet, ETHERADDRL);
4559f8919bdaSduboff 	dp->rxmode |= RXMODE_ENABLE;
4560f8919bdaSduboff 
4561f8919bdaSduboff 	if (gem_mac_set_rx_filter(dp) != GEM_SUCCESS) {
4562f8919bdaSduboff 		err = EIO;
4563f8919bdaSduboff 	}
4564f8919bdaSduboff 	mutex_exit(&dp->intrlock);
4565f8919bdaSduboff 
4566f8919bdaSduboff 	return (err);
4567f8919bdaSduboff }
4568f8919bdaSduboff 
4569f8919bdaSduboff /*
4570f8919bdaSduboff  * gem_m_tx is used only for sending data packets into ethernet wire.
4571f8919bdaSduboff  */
4572f8919bdaSduboff static mblk_t *
gem_m_tx(void * arg,mblk_t * mp)4573f8919bdaSduboff gem_m_tx(void *arg, mblk_t *mp)
4574f8919bdaSduboff {
4575f8919bdaSduboff 	uint32_t	flags = 0;
4576f8919bdaSduboff 	struct gem_dev	*dp = arg;
4577f8919bdaSduboff 	mblk_t		*tp;
4578f8919bdaSduboff 
4579f8919bdaSduboff 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4580f8919bdaSduboff 
4581f8919bdaSduboff 	ASSERT(dp->nic_state == NIC_STATE_ONLINE);
4582f8919bdaSduboff 	if (dp->mii_state != MII_STATE_LINKUP) {
4583f8919bdaSduboff 		/* Some nics hate to send packets when the link is down. */
4584f8919bdaSduboff 		while (mp) {
4585f8919bdaSduboff 			tp = mp->b_next;
4586f8919bdaSduboff 			mp->b_next = NULL;
4587f8919bdaSduboff 			freemsg(mp);
4588f8919bdaSduboff 			mp = tp;
4589f8919bdaSduboff 		}
4590f8919bdaSduboff 		return (NULL);
4591f8919bdaSduboff 	}
4592f8919bdaSduboff 
4593f8919bdaSduboff 	return (gem_send_common(dp, mp, flags));
4594f8919bdaSduboff }
4595f8919bdaSduboff 
4596f8919bdaSduboff static void
gem_m_ioctl(void * arg,queue_t * wq,mblk_t * mp)4597f8919bdaSduboff gem_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
4598f8919bdaSduboff {
4599f8919bdaSduboff 	DPRINTF(0, (CE_CONT, "!%s: %s: called",
4600f8919bdaSduboff 	    ((struct gem_dev *)arg)->name, __func__));
4601f8919bdaSduboff 
4602f8919bdaSduboff 	gem_mac_ioctl((struct gem_dev *)arg, wq, mp);
4603f8919bdaSduboff }
4604f8919bdaSduboff 
4605da14cebeSEric Cheng /* ARGSUSED */
4606f8919bdaSduboff static boolean_t
gem_m_getcapab(void * arg,mac_capab_t cap,void * cap_data)4607f8919bdaSduboff gem_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
4608f8919bdaSduboff {
4609da14cebeSEric Cheng 	return (B_FALSE);
4610f8919bdaSduboff }
4611f8919bdaSduboff 
4612f8919bdaSduboff static void
gem_gld3_init(struct gem_dev * dp,mac_register_t * macp)4613f8919bdaSduboff gem_gld3_init(struct gem_dev *dp, mac_register_t *macp)
4614f8919bdaSduboff {
4615f8919bdaSduboff 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
4616f8919bdaSduboff 	macp->m_driver = dp;
4617f8919bdaSduboff 	macp->m_dip = dp->dip;
4618f8919bdaSduboff 	macp->m_src_addr = dp->dev_addr.ether_addr_octet;
4619f8919bdaSduboff 	macp->m_callbacks = &gem_m_callbacks;
4620f8919bdaSduboff 	macp->m_min_sdu = 0;
4621f8919bdaSduboff 	macp->m_max_sdu = dp->mtu;
462223d366e3Sduboff 
462323d366e3Sduboff 	if (dp->misc_flag & GEM_VLAN) {
4624d62bc4baSyz147064 		macp->m_margin = VTAG_SIZE;
4625f8919bdaSduboff 	}
462623d366e3Sduboff }
4627f8919bdaSduboff 
4628f8919bdaSduboff /* ======================================================================== */
4629f8919bdaSduboff /*
4630f8919bdaSduboff  * attach/detatch support
4631f8919bdaSduboff  */
4632f8919bdaSduboff /* ======================================================================== */
4633f8919bdaSduboff static void
gem_read_conf(struct gem_dev * dp)4634f8919bdaSduboff gem_read_conf(struct gem_dev *dp)
4635f8919bdaSduboff {
4636f8919bdaSduboff 	int	val;
4637f8919bdaSduboff 
4638f8919bdaSduboff 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4639f8919bdaSduboff 
4640f8919bdaSduboff 	/*
4641f8919bdaSduboff 	 * Get media mode infomation from .conf file
4642f8919bdaSduboff 	 */
4643f8919bdaSduboff 	dp->anadv_autoneg = gem_prop_get_int(dp, "adv_autoneg_cap", 1) != 0;
4644f8919bdaSduboff 	dp->anadv_1000fdx = gem_prop_get_int(dp, "adv_1000fdx_cap", 1) != 0;
4645f8919bdaSduboff 	dp->anadv_1000hdx = gem_prop_get_int(dp, "adv_1000hdx_cap", 1) != 0;
4646f8919bdaSduboff 	dp->anadv_100t4   = gem_prop_get_int(dp, "adv_100T4_cap", 1) != 0;
4647f8919bdaSduboff 	dp->anadv_100fdx  = gem_prop_get_int(dp, "adv_100fdx_cap", 1) != 0;
4648f8919bdaSduboff 	dp->anadv_100hdx  = gem_prop_get_int(dp, "adv_100hdx_cap", 1) != 0;
4649f8919bdaSduboff 	dp->anadv_10fdx   = gem_prop_get_int(dp, "adv_10fdx_cap", 1) != 0;
4650f8919bdaSduboff 	dp->anadv_10hdx   = gem_prop_get_int(dp, "adv_10hdx_cap", 1) != 0;
4651f8919bdaSduboff 
4652f8919bdaSduboff 	if ((ddi_prop_exists(DDI_DEV_T_ANY, dp->dip,
4653f8919bdaSduboff 	    DDI_PROP_DONTPASS, "full-duplex"))) {
4654f8919bdaSduboff 		dp->full_duplex = gem_prop_get_int(dp, "full-duplex", 1) != 0;
4655f8919bdaSduboff 		dp->anadv_autoneg = B_FALSE;
465623d366e3Sduboff 		if (dp->full_duplex) {
4657f8919bdaSduboff 			dp->anadv_1000hdx = B_FALSE;
4658f8919bdaSduboff 			dp->anadv_100hdx = B_FALSE;
4659f8919bdaSduboff 			dp->anadv_10hdx = B_FALSE;
466023d366e3Sduboff 		} else {
466123d366e3Sduboff 			dp->anadv_1000fdx = B_FALSE;
466223d366e3Sduboff 			dp->anadv_100fdx = B_FALSE;
466323d366e3Sduboff 			dp->anadv_10fdx = B_FALSE;
466423d366e3Sduboff 		}
4665f8919bdaSduboff 	}
4666f8919bdaSduboff 
4667f8919bdaSduboff 	if ((val = gem_prop_get_int(dp, "speed", 0)) > 0) {
4668f8919bdaSduboff 		dp->anadv_autoneg = B_FALSE;
4669f8919bdaSduboff 		switch (val) {
4670f8919bdaSduboff 		case 1000:
4671f8919bdaSduboff 			dp->speed = GEM_SPD_1000;
4672f8919bdaSduboff 			dp->anadv_100t4   = B_FALSE;
4673f8919bdaSduboff 			dp->anadv_100fdx  = B_FALSE;
4674f8919bdaSduboff 			dp->anadv_100hdx  = B_FALSE;
4675f8919bdaSduboff 			dp->anadv_10fdx   = B_FALSE;
4676f8919bdaSduboff 			dp->anadv_10hdx   = B_FALSE;
4677f8919bdaSduboff 			break;
4678f8919bdaSduboff 		case 100:
4679f8919bdaSduboff 			dp->speed = GEM_SPD_100;
4680f8919bdaSduboff 			dp->anadv_1000fdx = B_FALSE;
4681f8919bdaSduboff 			dp->anadv_1000hdx = B_FALSE;
4682f8919bdaSduboff 			dp->anadv_10fdx   = B_FALSE;
4683f8919bdaSduboff 			dp->anadv_10hdx   = B_FALSE;
4684f8919bdaSduboff 			break;
4685f8919bdaSduboff 		case 10:
4686f8919bdaSduboff 			dp->speed = GEM_SPD_10;
4687f8919bdaSduboff 			dp->anadv_1000fdx = B_FALSE;
4688f8919bdaSduboff 			dp->anadv_1000hdx = B_FALSE;
4689f8919bdaSduboff 			dp->anadv_100t4   = B_FALSE;
4690f8919bdaSduboff 			dp->anadv_100fdx  = B_FALSE;
4691f8919bdaSduboff 			dp->anadv_100hdx  = B_FALSE;
4692f8919bdaSduboff 			break;
4693f8919bdaSduboff 		default:
4694f8919bdaSduboff 			cmn_err(CE_WARN,
4695f8919bdaSduboff 			    "!%s: property %s: illegal value:%d",
469623d366e3Sduboff 			    dp->name, "speed", val);
4697f8919bdaSduboff 			dp->anadv_autoneg = B_TRUE;
4698f8919bdaSduboff 			break;
4699f8919bdaSduboff 		}
4700f8919bdaSduboff 	}
4701f8919bdaSduboff 
4702f8919bdaSduboff 	val = gem_prop_get_int(dp, "flow-control", dp->gc.gc_flow_control);
4703f8919bdaSduboff 	if (val > FLOW_CONTROL_RX_PAUSE || val < FLOW_CONTROL_NONE) {
4704f8919bdaSduboff 		cmn_err(CE_WARN,
4705f8919bdaSduboff 		    "!%s: property %s: illegal value:%d",
470623d366e3Sduboff 		    dp->name, "flow-control", val);
4707f8919bdaSduboff 	} else {
4708f8919bdaSduboff 		val = min(val, dp->gc.gc_flow_control);
4709f8919bdaSduboff 	}
4710f8919bdaSduboff 	dp->anadv_flow_control = val;
4711f8919bdaSduboff 
4712f8919bdaSduboff 	if (gem_prop_get_int(dp, "nointr", 0)) {
4713f8919bdaSduboff 		dp->misc_flag |= GEM_NOINTR;
4714f8919bdaSduboff 		cmn_err(CE_NOTE, "!%s: polling mode enabled", dp->name);
4715f8919bdaSduboff 	}
4716f8919bdaSduboff 
4717f8919bdaSduboff 	dp->mtu = gem_prop_get_int(dp, "mtu", dp->mtu);
4718f8919bdaSduboff 	dp->txthr = gem_prop_get_int(dp, "txthr", dp->txthr);
4719f8919bdaSduboff 	dp->rxthr = gem_prop_get_int(dp, "rxthr", dp->rxthr);
4720f8919bdaSduboff 	dp->txmaxdma = gem_prop_get_int(dp, "txmaxdma", dp->txmaxdma);
4721f8919bdaSduboff 	dp->rxmaxdma = gem_prop_get_int(dp, "rxmaxdma", dp->rxmaxdma);
4722f8919bdaSduboff }
4723f8919bdaSduboff 
4724f8919bdaSduboff 
4725f8919bdaSduboff /*
4726f8919bdaSduboff  * Gem kstat support
4727f8919bdaSduboff  */
4728f8919bdaSduboff 
4729f8919bdaSduboff #define	GEM_LOCAL_DATA_SIZE(gc)	\
4730f8919bdaSduboff 	(sizeof (struct gem_dev) + \
4731f8919bdaSduboff 	sizeof (struct mcast_addr) * GEM_MAXMC + \
4732f8919bdaSduboff 	sizeof (struct txbuf) * ((gc)->gc_tx_buf_size) + \
4733f8919bdaSduboff 	sizeof (void *) * ((gc)->gc_tx_buf_size))
4734f8919bdaSduboff 
4735f8919bdaSduboff struct gem_dev *
gem_do_attach(dev_info_t * dip,int port,struct gem_conf * gc,void * base,ddi_acc_handle_t * regs_handlep,void * lp,int lmsize)4736f8919bdaSduboff gem_do_attach(dev_info_t *dip, int port,
4737f8919bdaSduboff 	struct gem_conf *gc, void *base, ddi_acc_handle_t *regs_handlep,
4738f8919bdaSduboff 	void *lp, int lmsize)
4739f8919bdaSduboff {
4740f8919bdaSduboff 	struct gem_dev		*dp;
4741f8919bdaSduboff 	int			i;
4742f8919bdaSduboff 	ddi_iblock_cookie_t	c;
4743f8919bdaSduboff 	mac_register_t		*macp = NULL;
4744f8919bdaSduboff 	int			ret;
4745f8919bdaSduboff 	int			unit;
4746f8919bdaSduboff 	int			nports;
4747f8919bdaSduboff 
4748f8919bdaSduboff 	unit = ddi_get_instance(dip);
4749f8919bdaSduboff 	if ((nports = gc->gc_nports) == 0) {
4750f8919bdaSduboff 		nports = 1;
4751f8919bdaSduboff 	}
4752f8919bdaSduboff 	if (nports == 1) {
4753f8919bdaSduboff 		ddi_set_driver_private(dip, NULL);
4754f8919bdaSduboff 	}
4755f8919bdaSduboff 
4756f8919bdaSduboff 	DPRINTF(2, (CE_CONT, "!gem%d: gem_do_attach: called cmd:ATTACH",
4757f8919bdaSduboff 	    unit));
4758f8919bdaSduboff 
4759f8919bdaSduboff 	/*
4760f8919bdaSduboff 	 * Allocate soft data structure
4761f8919bdaSduboff 	 */
4762f8919bdaSduboff 	dp = kmem_zalloc(GEM_LOCAL_DATA_SIZE(gc), KM_SLEEP);
4763f8919bdaSduboff 
4764f8919bdaSduboff 	if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
4765f8919bdaSduboff 		cmn_err(CE_WARN, "!gem%d: %s: mac_alloc failed",
4766f8919bdaSduboff 		    unit, __func__);
4767f8919bdaSduboff 		return (NULL);
4768f8919bdaSduboff 	}
4769f8919bdaSduboff 	/* ddi_set_driver_private(dip, dp); */
4770f8919bdaSduboff 
4771f8919bdaSduboff 	/* link to private area */
4772f8919bdaSduboff 	dp->private = lp;
4773f8919bdaSduboff 	dp->priv_size = lmsize;
4774f8919bdaSduboff 	dp->mc_list = (struct mcast_addr *)&dp[1];
4775f8919bdaSduboff 
4776f8919bdaSduboff 	dp->dip = dip;
4777f8919bdaSduboff 	(void) sprintf(dp->name, gc->gc_name, nports * unit + port);
4778f8919bdaSduboff 
4779f8919bdaSduboff 	/*
4780f8919bdaSduboff 	 * Get iblock cookie
4781f8919bdaSduboff 	 */
4782f8919bdaSduboff 	if (ddi_get_iblock_cookie(dip, 0, &c) != DDI_SUCCESS) {
4783f8919bdaSduboff 		cmn_err(CE_CONT,
4784f8919bdaSduboff 		    "!%s: gem_do_attach: ddi_get_iblock_cookie: failed",
4785f8919bdaSduboff 		    dp->name);
4786f8919bdaSduboff 		goto err_free_private;
4787f8919bdaSduboff 	}
4788f8919bdaSduboff 	dp->iblock_cookie = c;
4789f8919bdaSduboff 
4790f8919bdaSduboff 	/*
4791f8919bdaSduboff 	 * Initialize mutex's for this device.
4792f8919bdaSduboff 	 */
4793f8919bdaSduboff 	mutex_init(&dp->intrlock, NULL, MUTEX_DRIVER, (void *)c);
4794f8919bdaSduboff 	mutex_init(&dp->xmitlock, NULL, MUTEX_DRIVER, (void *)c);
4795f8919bdaSduboff 	cv_init(&dp->tx_drain_cv, NULL, CV_DRIVER, NULL);
4796f8919bdaSduboff 
4797f8919bdaSduboff 	/*
4798f8919bdaSduboff 	 * configure gem parameter
4799f8919bdaSduboff 	 */
4800f8919bdaSduboff 	dp->base_addr = base;
4801f8919bdaSduboff 	dp->regs_handle = *regs_handlep;
4802f8919bdaSduboff 	dp->gc = *gc;
4803f8919bdaSduboff 	gc = &dp->gc;
4804f8919bdaSduboff 	/* patch for simplify dma resource management */
4805f8919bdaSduboff 	gc->gc_tx_max_frags = 1;
4806f8919bdaSduboff 	gc->gc_tx_max_descs_per_pkt = 1;
4807f8919bdaSduboff 	gc->gc_tx_ring_size = gc->gc_tx_buf_size;
4808f8919bdaSduboff 	gc->gc_tx_ring_limit = gc->gc_tx_buf_limit;
4809f8919bdaSduboff 	gc->gc_tx_desc_write_oo = B_TRUE;
4810f8919bdaSduboff 
4811f8919bdaSduboff 	gc->gc_nports = nports;	/* fix nports */
4812f8919bdaSduboff 
4813f8919bdaSduboff 	/* fix copy threadsholds */
4814f8919bdaSduboff 	gc->gc_tx_copy_thresh = max(ETHERMIN, gc->gc_tx_copy_thresh);
4815f8919bdaSduboff 	gc->gc_rx_copy_thresh = max(ETHERMIN, gc->gc_rx_copy_thresh);
4816f8919bdaSduboff 
4817f8919bdaSduboff 	/* fix rx buffer boundary for iocache line size */
4818f8919bdaSduboff 	ASSERT(gc->gc_dma_attr_txbuf.dma_attr_align-1 == gc->gc_tx_buf_align);
4819f8919bdaSduboff 	ASSERT(gc->gc_dma_attr_rxbuf.dma_attr_align-1 == gc->gc_rx_buf_align);
4820f8919bdaSduboff 	gc->gc_rx_buf_align = max(gc->gc_rx_buf_align, IOC_LINESIZE - 1);
4821f8919bdaSduboff 	gc->gc_dma_attr_rxbuf.dma_attr_align = gc->gc_rx_buf_align + 1;
4822f8919bdaSduboff 
482323d366e3Sduboff 	/* fix descriptor boundary for cache line size */
482423d366e3Sduboff 	gc->gc_dma_attr_desc.dma_attr_align =
482523d366e3Sduboff 	    max(gc->gc_dma_attr_desc.dma_attr_align, IOC_LINESIZE);
482623d366e3Sduboff 
4827f8919bdaSduboff 	/* patch get_packet method */
4828f8919bdaSduboff 	if (gc->gc_get_packet == NULL) {
4829f8919bdaSduboff 		gc->gc_get_packet = &gem_get_packet_default;
4830f8919bdaSduboff 	}
4831f8919bdaSduboff 
4832f8919bdaSduboff 	/* patch get_rx_start method */
4833f8919bdaSduboff 	if (gc->gc_rx_start == NULL) {
4834f8919bdaSduboff 		gc->gc_rx_start = &gem_rx_start_default;
4835f8919bdaSduboff 	}
4836f8919bdaSduboff 
4837f8919bdaSduboff 	/* calculate descriptor area */
4838f8919bdaSduboff 	if (gc->gc_rx_desc_unit_shift >= 0) {
4839f8919bdaSduboff 		dp->rx_desc_size =
4840f8919bdaSduboff 		    ROUNDUP(gc->gc_rx_ring_size << gc->gc_rx_desc_unit_shift,
4841f8919bdaSduboff 		    gc->gc_dma_attr_desc.dma_attr_align);
4842f8919bdaSduboff 	}
4843f8919bdaSduboff 	if (gc->gc_tx_desc_unit_shift >= 0) {
4844f8919bdaSduboff 		dp->tx_desc_size =
4845f8919bdaSduboff 		    ROUNDUP(gc->gc_tx_ring_size << gc->gc_tx_desc_unit_shift,
4846f8919bdaSduboff 		    gc->gc_dma_attr_desc.dma_attr_align);
4847f8919bdaSduboff 	}
4848f8919bdaSduboff 
4849f8919bdaSduboff 	dp->mtu = ETHERMTU;
4850f8919bdaSduboff 	dp->tx_buf = (void *)&dp->mc_list[GEM_MAXMC];
4851f8919bdaSduboff 	/* link tx buffers */
4852f8919bdaSduboff 	for (i = 0; i < dp->gc.gc_tx_buf_size; i++) {
4853f8919bdaSduboff 		dp->tx_buf[i].txb_next =
4854f8919bdaSduboff 		    &dp->tx_buf[SLOT(i + 1, dp->gc.gc_tx_buf_size)];
4855f8919bdaSduboff 	}
4856f8919bdaSduboff 
4857f8919bdaSduboff 	dp->rxmode	   = 0;
4858f8919bdaSduboff 	dp->speed	   = GEM_SPD_10;	/* default is 10Mbps */
4859f8919bdaSduboff 	dp->full_duplex    = B_FALSE;		/* default is half */
4860f8919bdaSduboff 	dp->flow_control   = FLOW_CONTROL_NONE;
486123d366e3Sduboff 	dp->poll_pkt_delay = 8;		/* typical coalease for rx packets */
4862f8919bdaSduboff 
4863f8919bdaSduboff 	/* performance tuning parameters */
4864f8919bdaSduboff 	dp->txthr    = ETHERMAX;	/* tx fifo threshold */
4865f8919bdaSduboff 	dp->txmaxdma = 16*4;		/* tx max dma burst size */
4866f8919bdaSduboff 	dp->rxthr    = 128;		/* rx fifo threshold */
4867f8919bdaSduboff 	dp->rxmaxdma = 16*4;		/* rx max dma burst size */
4868f8919bdaSduboff 
4869f8919bdaSduboff 	/*
4870f8919bdaSduboff 	 * Get media mode information from .conf file
4871f8919bdaSduboff 	 */
4872f8919bdaSduboff 	gem_read_conf(dp);
4873f8919bdaSduboff 
4874f8919bdaSduboff 	/* rx_buf_len is required buffer length without padding for alignment */
4875f8919bdaSduboff 	dp->rx_buf_len = MAXPKTBUF(dp) + dp->gc.gc_rx_header_len;
4876f8919bdaSduboff 
4877f8919bdaSduboff 	/*
4878f8919bdaSduboff 	 * Reset the chip
4879f8919bdaSduboff 	 */
4880f8919bdaSduboff 	mutex_enter(&dp->intrlock);
4881f8919bdaSduboff 	dp->nic_state = NIC_STATE_STOPPED;
4882f8919bdaSduboff 	ret = (*dp->gc.gc_reset_chip)(dp);
4883f8919bdaSduboff 	mutex_exit(&dp->intrlock);
4884f8919bdaSduboff 	if (ret != GEM_SUCCESS) {
4885f8919bdaSduboff 		goto err_free_regs;
4886f8919bdaSduboff 	}
4887f8919bdaSduboff 
4888f8919bdaSduboff 	/*
4889f8919bdaSduboff 	 * HW dependant paremeter initialization
4890f8919bdaSduboff 	 */
4891f8919bdaSduboff 	mutex_enter(&dp->intrlock);
4892f8919bdaSduboff 	ret = (*dp->gc.gc_attach_chip)(dp);
4893f8919bdaSduboff 	mutex_exit(&dp->intrlock);
4894f8919bdaSduboff 	if (ret != GEM_SUCCESS) {
4895f8919bdaSduboff 		goto err_free_regs;
4896f8919bdaSduboff 	}
4897f8919bdaSduboff 
4898f8919bdaSduboff #ifdef DEBUG_MULTIFRAGS
4899f8919bdaSduboff 	dp->gc.gc_tx_copy_thresh = dp->mtu;
4900f8919bdaSduboff #endif
4901f8919bdaSduboff 	/* allocate tx and rx resources */
4902f8919bdaSduboff 	if (gem_alloc_memory(dp)) {
4903f8919bdaSduboff 		goto err_free_regs;
4904f8919bdaSduboff 	}
4905f8919bdaSduboff 
4906f8919bdaSduboff 	DPRINTF(0, (CE_CONT,
4907f8919bdaSduboff 	    "!%s: at 0x%x, %02x:%02x:%02x:%02x:%02x:%02x",
4908f8919bdaSduboff 	    dp->name, (long)dp->base_addr,
4909f8919bdaSduboff 	    dp->dev_addr.ether_addr_octet[0],
4910f8919bdaSduboff 	    dp->dev_addr.ether_addr_octet[1],
4911f8919bdaSduboff 	    dp->dev_addr.ether_addr_octet[2],
4912f8919bdaSduboff 	    dp->dev_addr.ether_addr_octet[3],
4913f8919bdaSduboff 	    dp->dev_addr.ether_addr_octet[4],
4914f8919bdaSduboff 	    dp->dev_addr.ether_addr_octet[5]));
4915f8919bdaSduboff 
4916f8919bdaSduboff 	/* copy mac address */
4917f8919bdaSduboff 	dp->cur_addr = dp->dev_addr;
4918f8919bdaSduboff 
4919f8919bdaSduboff 	gem_gld3_init(dp, macp);
4920f8919bdaSduboff 
4921f8919bdaSduboff 	/* Probe MII phy (scan phy) */
4922f8919bdaSduboff 	dp->mii_lpable = 0;
4923f8919bdaSduboff 	dp->mii_advert = 0;
4924f8919bdaSduboff 	dp->mii_exp = 0;
4925f8919bdaSduboff 	dp->mii_ctl1000 = 0;
4926f8919bdaSduboff 	dp->mii_stat1000 = 0;
4927f8919bdaSduboff 	if ((*dp->gc.gc_mii_probe)(dp) != GEM_SUCCESS) {
4928f8919bdaSduboff 		goto err_free_ring;
4929f8919bdaSduboff 	}
4930f8919bdaSduboff 
4931f8919bdaSduboff 	/* mask unsupported abilities */
493223d366e3Sduboff 	dp->anadv_autoneg &= BOOLEAN(dp->mii_status & MII_STATUS_CANAUTONEG);
4933f8919bdaSduboff 	dp->anadv_1000fdx &=
4934f8919bdaSduboff 	    BOOLEAN(dp->mii_xstatus &
4935f8919bdaSduboff 	    (MII_XSTATUS_1000BASEX_FD | MII_XSTATUS_1000BASET_FD));
4936f8919bdaSduboff 	dp->anadv_1000hdx &=
4937f8919bdaSduboff 	    BOOLEAN(dp->mii_xstatus &
4938f8919bdaSduboff 	    (MII_XSTATUS_1000BASEX | MII_XSTATUS_1000BASET));
4939f8919bdaSduboff 	dp->anadv_100t4  &= BOOLEAN(dp->mii_status & MII_STATUS_100_BASE_T4);
4940f8919bdaSduboff 	dp->anadv_100fdx &= BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX_FD);
4941f8919bdaSduboff 	dp->anadv_100hdx &= BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX);
4942f8919bdaSduboff 	dp->anadv_10fdx  &= BOOLEAN(dp->mii_status & MII_STATUS_10_FD);
4943f8919bdaSduboff 	dp->anadv_10hdx  &= BOOLEAN(dp->mii_status & MII_STATUS_10);
4944f8919bdaSduboff 
4945f8919bdaSduboff 	gem_choose_forcedmode(dp);
4946f8919bdaSduboff 
4947f8919bdaSduboff 	/* initialize MII phy if required */
4948f8919bdaSduboff 	if (dp->gc.gc_mii_init) {
4949f8919bdaSduboff 		if ((*dp->gc.gc_mii_init)(dp) != GEM_SUCCESS) {
4950f8919bdaSduboff 			goto err_free_ring;
4951f8919bdaSduboff 		}
4952f8919bdaSduboff 	}
4953f8919bdaSduboff 
4954f8919bdaSduboff 	/*
4955f8919bdaSduboff 	 * initialize kstats including mii statistics
4956f8919bdaSduboff 	 */
4957f8919bdaSduboff 	gem_nd_setup(dp);
4958f8919bdaSduboff 
4959f8919bdaSduboff 	/*
4960f8919bdaSduboff 	 * Add interrupt to system.
4961f8919bdaSduboff 	 */
4962f8919bdaSduboff 	if (ret = mac_register(macp, &dp->mh)) {
4963f8919bdaSduboff 		cmn_err(CE_WARN, "!%s: mac_register failed, error:%d",
4964f8919bdaSduboff 		    dp->name, ret);
4965f8919bdaSduboff 		goto err_release_stats;
4966f8919bdaSduboff 	}
4967f8919bdaSduboff 	mac_free(macp);
4968f8919bdaSduboff 	macp = NULL;
4969f8919bdaSduboff 
4970f8919bdaSduboff 	if (dp->misc_flag & GEM_SOFTINTR) {
4971f8919bdaSduboff 		if (ddi_add_softintr(dip,
4972f8919bdaSduboff 		    DDI_SOFTINT_LOW, &dp->soft_id,
4973f8919bdaSduboff 		    NULL, NULL,
4974f8919bdaSduboff 		    (uint_t (*)(caddr_t))gem_intr,
4975f8919bdaSduboff 		    (caddr_t)dp) != DDI_SUCCESS) {
4976f8919bdaSduboff 			cmn_err(CE_WARN, "!%s: ddi_add_softintr failed",
4977f8919bdaSduboff 			    dp->name);
4978f8919bdaSduboff 			goto err_unregister;
4979f8919bdaSduboff 		}
4980f8919bdaSduboff 	} else if ((dp->misc_flag & GEM_NOINTR) == 0) {
4981f8919bdaSduboff 		if (ddi_add_intr(dip, 0, NULL, NULL,
4982f8919bdaSduboff 		    (uint_t (*)(caddr_t))gem_intr,
4983f8919bdaSduboff 		    (caddr_t)dp) != DDI_SUCCESS) {
4984f8919bdaSduboff 			cmn_err(CE_WARN, "!%s: ddi_add_intr failed", dp->name);
4985f8919bdaSduboff 			goto err_unregister;
4986f8919bdaSduboff 		}
4987f8919bdaSduboff 	} else {
4988f8919bdaSduboff 		/*
4989f8919bdaSduboff 		 * Dont use interrupt.
4990f8919bdaSduboff 		 * schedule first call of gem_intr_watcher
4991f8919bdaSduboff 		 */
4992f8919bdaSduboff 		dp->intr_watcher_id =
4993f8919bdaSduboff 		    timeout((void (*)(void *))gem_intr_watcher,
4994f8919bdaSduboff 		    (void *)dp, drv_usectohz(3*1000000));
4995f8919bdaSduboff 	}
4996f8919bdaSduboff 
4997f8919bdaSduboff 	/* link this device to dev_info */
4998f8919bdaSduboff 	dp->next = (struct gem_dev *)ddi_get_driver_private(dip);
499923d366e3Sduboff 	dp->port = port;
5000f8919bdaSduboff 	ddi_set_driver_private(dip, (caddr_t)dp);
5001f8919bdaSduboff 
500223d366e3Sduboff 	/* reset mii phy and start mii link watcher */
5003f8919bdaSduboff 	gem_mii_start(dp);
5004f8919bdaSduboff 
5005f8919bdaSduboff 	DPRINTF(2, (CE_CONT, "!gem_do_attach: return: success"));
5006f8919bdaSduboff 	return (dp);
5007f8919bdaSduboff 
5008f8919bdaSduboff err_unregister:
5009f8919bdaSduboff 	(void) mac_unregister(dp->mh);
5010f8919bdaSduboff err_release_stats:
5011f8919bdaSduboff 	/* release NDD resources */
5012f8919bdaSduboff 	gem_nd_cleanup(dp);
5013f8919bdaSduboff 
5014f8919bdaSduboff err_free_ring:
5015f8919bdaSduboff 	gem_free_memory(dp);
5016f8919bdaSduboff err_free_regs:
5017f8919bdaSduboff 	ddi_regs_map_free(&dp->regs_handle);
5018f8919bdaSduboff err_free_locks:
5019f8919bdaSduboff 	mutex_destroy(&dp->xmitlock);
5020f8919bdaSduboff 	mutex_destroy(&dp->intrlock);
5021f8919bdaSduboff 	cv_destroy(&dp->tx_drain_cv);
5022f8919bdaSduboff err_free_private:
5023f8919bdaSduboff 	if (macp) {
5024f8919bdaSduboff 		mac_free(macp);
5025f8919bdaSduboff 	}
5026f8919bdaSduboff 	kmem_free((caddr_t)dp, GEM_LOCAL_DATA_SIZE(gc));
5027f8919bdaSduboff 
5028f8919bdaSduboff 	return (NULL);
5029f8919bdaSduboff }
5030f8919bdaSduboff 
5031f8919bdaSduboff int
gem_do_detach(dev_info_t * dip)5032f8919bdaSduboff gem_do_detach(dev_info_t *dip)
5033f8919bdaSduboff {
5034f8919bdaSduboff 	struct gem_dev	*dp;
5035f8919bdaSduboff 	struct gem_dev	*tmp;
5036f8919bdaSduboff 	caddr_t		private;
5037f8919bdaSduboff 	int		priv_size;
5038f8919bdaSduboff 	ddi_acc_handle_t	rh;
5039f8919bdaSduboff 
5040f8919bdaSduboff 	dp = GEM_GET_DEV(dip);
5041f8919bdaSduboff 	if (dp == NULL) {
5042f8919bdaSduboff 		return (DDI_SUCCESS);
5043f8919bdaSduboff 	}
5044f8919bdaSduboff 
5045f8919bdaSduboff 	rh = dp->regs_handle;
5046f8919bdaSduboff 	private = dp->private;
5047f8919bdaSduboff 	priv_size = dp->priv_size;
5048f8919bdaSduboff 
5049f8919bdaSduboff 	while (dp) {
505023d366e3Sduboff 		/* unregister with gld v3 */
505123d366e3Sduboff 		if (mac_unregister(dp->mh) != 0) {
505223d366e3Sduboff 			return (DDI_FAILURE);
505323d366e3Sduboff 		}
505423d366e3Sduboff 
5055f8919bdaSduboff 		/* ensure any rx buffers are not used */
5056f8919bdaSduboff 		if (dp->rx_buf_allocated != dp->rx_buf_freecnt) {
5057f8919bdaSduboff 			/* resource is busy */
5058f8919bdaSduboff 			cmn_err(CE_PANIC,
5059f8919bdaSduboff 			    "!%s: %s: rxbuf is busy: allocated:%d, freecnt:%d",
5060f8919bdaSduboff 			    dp->name, __func__,
5061f8919bdaSduboff 			    dp->rx_buf_allocated, dp->rx_buf_freecnt);
5062f8919bdaSduboff 			/* NOT REACHED */
5063f8919bdaSduboff 		}
5064f8919bdaSduboff 
5065f8919bdaSduboff 		/* stop mii link watcher */
5066f8919bdaSduboff 		gem_mii_stop(dp);
5067f8919bdaSduboff 
5068f8919bdaSduboff 		/* unregister interrupt handler */
5069f8919bdaSduboff 		if (dp->misc_flag & GEM_SOFTINTR) {
5070f8919bdaSduboff 			ddi_remove_softintr(dp->soft_id);
5071f8919bdaSduboff 		} else if ((dp->misc_flag & GEM_NOINTR) == 0) {
5072f8919bdaSduboff 			ddi_remove_intr(dip, 0, dp->iblock_cookie);
5073f8919bdaSduboff 		} else {
5074f8919bdaSduboff 			/* stop interrupt watcher */
5075f8919bdaSduboff 			if (dp->intr_watcher_id) {
5076f8919bdaSduboff 				while (untimeout(dp->intr_watcher_id) == -1)
5077f8919bdaSduboff 					;
5078f8919bdaSduboff 				dp->intr_watcher_id = 0;
5079f8919bdaSduboff 			}
5080f8919bdaSduboff 		}
5081f8919bdaSduboff 
5082f8919bdaSduboff 		/* release NDD resources */
5083f8919bdaSduboff 		gem_nd_cleanup(dp);
5084f8919bdaSduboff 		/* release buffers, descriptors and dma resources */
5085f8919bdaSduboff 		gem_free_memory(dp);
5086f8919bdaSduboff 
5087f8919bdaSduboff 		/* release locks and condition variables */
5088f8919bdaSduboff 		mutex_destroy(&dp->xmitlock);
5089f8919bdaSduboff 		mutex_destroy(&dp->intrlock);
5090f8919bdaSduboff 		cv_destroy(&dp->tx_drain_cv);
5091f8919bdaSduboff 
5092f8919bdaSduboff 		/* release basic memory resources */
5093f8919bdaSduboff 		tmp = dp->next;
5094f8919bdaSduboff 		kmem_free((caddr_t)dp, GEM_LOCAL_DATA_SIZE(&dp->gc));
5095f8919bdaSduboff 		dp = tmp;
5096f8919bdaSduboff 	}
5097f8919bdaSduboff 
5098f8919bdaSduboff 	/* release common private memory for the nic */
5099f8919bdaSduboff 	kmem_free(private, priv_size);
5100f8919bdaSduboff 
5101f8919bdaSduboff 	/* release register mapping resources */
5102f8919bdaSduboff 	ddi_regs_map_free(&rh);
5103f8919bdaSduboff 
5104f8919bdaSduboff 	DPRINTF(2, (CE_CONT, "!%s%d: gem_do_detach: return: success",
5105f8919bdaSduboff 	    ddi_driver_name(dip), ddi_get_instance(dip)));
5106f8919bdaSduboff 
5107f8919bdaSduboff 	return (DDI_SUCCESS);
5108f8919bdaSduboff }
5109f8919bdaSduboff 
5110f8919bdaSduboff int
gem_suspend(dev_info_t * dip)5111f8919bdaSduboff gem_suspend(dev_info_t *dip)
5112f8919bdaSduboff {
5113f8919bdaSduboff 	struct gem_dev	*dp;
5114f8919bdaSduboff 
5115f8919bdaSduboff 	/*
5116f8919bdaSduboff 	 * stop the device
5117f8919bdaSduboff 	 */
5118f8919bdaSduboff 	dp = GEM_GET_DEV(dip);
5119f8919bdaSduboff 	ASSERT(dp);
5120f8919bdaSduboff 
5121f8919bdaSduboff 	DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
5122f8919bdaSduboff 
5123f8919bdaSduboff 	for (; dp; dp = dp->next) {
5124f8919bdaSduboff 
5125f8919bdaSduboff 		/* stop mii link watcher */
5126f8919bdaSduboff 		gem_mii_stop(dp);
5127f8919bdaSduboff 
5128f8919bdaSduboff 		/* stop interrupt watcher for no-intr mode */
5129f8919bdaSduboff 		if (dp->misc_flag & GEM_NOINTR) {
5130f8919bdaSduboff 			if (dp->intr_watcher_id) {
5131f8919bdaSduboff 				while (untimeout(dp->intr_watcher_id) == -1)
5132f8919bdaSduboff 					;
5133f8919bdaSduboff 			}
5134f8919bdaSduboff 			dp->intr_watcher_id = 0;
5135f8919bdaSduboff 		}
5136f8919bdaSduboff 
5137f8919bdaSduboff 		/* stop tx timeout watcher */
5138f8919bdaSduboff 		if (dp->timeout_id) {
5139f8919bdaSduboff 			while (untimeout(dp->timeout_id) == -1)
5140f8919bdaSduboff 				;
5141f8919bdaSduboff 			dp->timeout_id = 0;
5142f8919bdaSduboff 		}
5143f8919bdaSduboff 
5144f8919bdaSduboff 		/* make the nic state inactive */
5145f8919bdaSduboff 		mutex_enter(&dp->intrlock);
5146f8919bdaSduboff 		(void) gem_mac_stop(dp, 0);
5147f8919bdaSduboff 		ASSERT(!dp->mac_active);
5148f8919bdaSduboff 
5149f8919bdaSduboff 		/* no further register access */
5150f8919bdaSduboff 		dp->mac_suspended = B_TRUE;
5151f8919bdaSduboff 		mutex_exit(&dp->intrlock);
5152f8919bdaSduboff 	}
5153f8919bdaSduboff 
5154f8919bdaSduboff 	/* XXX - power down the nic */
5155f8919bdaSduboff 
5156f8919bdaSduboff 	return (DDI_SUCCESS);
5157f8919bdaSduboff }
5158f8919bdaSduboff 
5159f8919bdaSduboff int
gem_resume(dev_info_t * dip)5160f8919bdaSduboff gem_resume(dev_info_t *dip)
5161f8919bdaSduboff {
5162f8919bdaSduboff 	struct gem_dev	*dp;
5163f8919bdaSduboff 
5164f8919bdaSduboff 	/*
5165f8919bdaSduboff 	 * restart the device
5166f8919bdaSduboff 	 */
5167f8919bdaSduboff 	dp = GEM_GET_DEV(dip);
5168f8919bdaSduboff 	ASSERT(dp);
5169f8919bdaSduboff 
5170f8919bdaSduboff 	DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
5171f8919bdaSduboff 
5172f8919bdaSduboff 	for (; dp; dp = dp->next) {
5173f8919bdaSduboff 
5174f8919bdaSduboff 		/*
5175f8919bdaSduboff 		 * Bring up the nic after power up
5176f8919bdaSduboff 		 */
5177f8919bdaSduboff 
5178f8919bdaSduboff 		/* gem_xxx.c layer to setup power management state. */
5179f8919bdaSduboff 		ASSERT(!dp->mac_active);
5180f8919bdaSduboff 
5181f8919bdaSduboff 		/* reset the chip, because we are just after power up. */
5182f8919bdaSduboff 		mutex_enter(&dp->intrlock);
5183f8919bdaSduboff 
5184f8919bdaSduboff 		dp->mac_suspended = B_FALSE;
5185f8919bdaSduboff 		dp->nic_state = NIC_STATE_STOPPED;
5186f8919bdaSduboff 
5187f8919bdaSduboff 		if ((*dp->gc.gc_reset_chip)(dp) != GEM_SUCCESS) {
5188f8919bdaSduboff 			cmn_err(CE_WARN, "%s: %s: failed to reset chip",
5189f8919bdaSduboff 			    dp->name, __func__);
5190f8919bdaSduboff 			mutex_exit(&dp->intrlock);
5191f8919bdaSduboff 			goto err;
5192f8919bdaSduboff 		}
5193f8919bdaSduboff 		mutex_exit(&dp->intrlock);
5194f8919bdaSduboff 
5195f8919bdaSduboff 		/* initialize mii phy because we are just after power up */
5196f8919bdaSduboff 		if (dp->gc.gc_mii_init) {
5197f8919bdaSduboff 			(void) (*dp->gc.gc_mii_init)(dp);
5198f8919bdaSduboff 		}
5199f8919bdaSduboff 
5200f8919bdaSduboff 		if (dp->misc_flag & GEM_NOINTR) {
5201f8919bdaSduboff 			/*
5202f8919bdaSduboff 			 * schedule first call of gem_intr_watcher
5203f8919bdaSduboff 			 * instead of interrupts.
5204f8919bdaSduboff 			 */
5205f8919bdaSduboff 			dp->intr_watcher_id =
5206f8919bdaSduboff 			    timeout((void (*)(void *))gem_intr_watcher,
5207f8919bdaSduboff 			    (void *)dp, drv_usectohz(3*1000000));
5208f8919bdaSduboff 		}
5209f8919bdaSduboff 
5210f8919bdaSduboff 		/* restart mii link watcher */
5211f8919bdaSduboff 		gem_mii_start(dp);
5212f8919bdaSduboff 
5213f8919bdaSduboff 		/* restart mac */
5214f8919bdaSduboff 		mutex_enter(&dp->intrlock);
5215f8919bdaSduboff 
5216f8919bdaSduboff 		if (gem_mac_init(dp) != GEM_SUCCESS) {
5217f8919bdaSduboff 			mutex_exit(&dp->intrlock);
5218f8919bdaSduboff 			goto err_reset;
5219f8919bdaSduboff 		}
5220f8919bdaSduboff 		dp->nic_state = NIC_STATE_INITIALIZED;
5221f8919bdaSduboff 
5222f8919bdaSduboff 		/* setup media mode if the link have been up */
5223f8919bdaSduboff 		if (dp->mii_state == MII_STATE_LINKUP) {
5224f8919bdaSduboff 			if ((dp->gc.gc_set_media)(dp) != GEM_SUCCESS) {
5225f8919bdaSduboff 				mutex_exit(&dp->intrlock);
5226f8919bdaSduboff 				goto err_reset;
5227f8919bdaSduboff 			}
5228f8919bdaSduboff 		}
5229f8919bdaSduboff 
5230f8919bdaSduboff 		/* enable mac address and rx filter */
5231f8919bdaSduboff 		dp->rxmode |= RXMODE_ENABLE;
5232f8919bdaSduboff 		if ((*dp->gc.gc_set_rx_filter)(dp) != GEM_SUCCESS) {
5233f8919bdaSduboff 			mutex_exit(&dp->intrlock);
5234f8919bdaSduboff 			goto err_reset;
5235f8919bdaSduboff 		}
5236f8919bdaSduboff 		dp->nic_state = NIC_STATE_ONLINE;
5237f8919bdaSduboff 
5238f8919bdaSduboff 		/* restart tx timeout watcher */
5239f8919bdaSduboff 		dp->timeout_id = timeout((void (*)(void *))gem_tx_timeout,
5240f8919bdaSduboff 		    (void *)dp,
5241f8919bdaSduboff 		    dp->gc.gc_tx_timeout_interval);
5242f8919bdaSduboff 
5243f8919bdaSduboff 		/* now the nic is fully functional */
5244f8919bdaSduboff 		if (dp->mii_state == MII_STATE_LINKUP) {
5245f8919bdaSduboff 			if (gem_mac_start(dp) != GEM_SUCCESS) {
5246f8919bdaSduboff 				mutex_exit(&dp->intrlock);
5247f8919bdaSduboff 				goto err_reset;
5248f8919bdaSduboff 			}
5249f8919bdaSduboff 		}
5250f8919bdaSduboff 		mutex_exit(&dp->intrlock);
5251f8919bdaSduboff 	}
5252f8919bdaSduboff 
5253f8919bdaSduboff 	return (DDI_SUCCESS);
5254f8919bdaSduboff 
5255f8919bdaSduboff err_reset:
5256f8919bdaSduboff 	if (dp->intr_watcher_id) {
5257f8919bdaSduboff 		while (untimeout(dp->intr_watcher_id) == -1)
5258f8919bdaSduboff 			;
5259f8919bdaSduboff 		dp->intr_watcher_id = 0;
5260f8919bdaSduboff 	}
5261f8919bdaSduboff 	mutex_enter(&dp->intrlock);
5262f8919bdaSduboff 	(*dp->gc.gc_reset_chip)(dp);
5263f8919bdaSduboff 	dp->nic_state = NIC_STATE_STOPPED;
5264f8919bdaSduboff 	mutex_exit(&dp->intrlock);
5265f8919bdaSduboff 
5266f8919bdaSduboff err:
5267f8919bdaSduboff 	return (DDI_FAILURE);
5268f8919bdaSduboff }
5269f8919bdaSduboff 
5270f8919bdaSduboff /*
5271f8919bdaSduboff  * misc routines for PCI
5272f8919bdaSduboff  */
5273f8919bdaSduboff uint8_t
gem_search_pci_cap(dev_info_t * dip,ddi_acc_handle_t conf_handle,uint8_t target)5274f8919bdaSduboff gem_search_pci_cap(dev_info_t *dip,
5275f8919bdaSduboff 		ddi_acc_handle_t conf_handle, uint8_t target)
5276f8919bdaSduboff {
5277f8919bdaSduboff 	uint8_t		pci_cap_ptr;
5278f8919bdaSduboff 	uint32_t	pci_cap;
5279f8919bdaSduboff 
5280f8919bdaSduboff 	/* search power management capablities */
5281f8919bdaSduboff 	pci_cap_ptr = pci_config_get8(conf_handle, PCI_CONF_CAP_PTR);
5282f8919bdaSduboff 	while (pci_cap_ptr) {
5283f8919bdaSduboff 		/* read pci capability header */
5284f8919bdaSduboff 		pci_cap = pci_config_get32(conf_handle, pci_cap_ptr);
5285f8919bdaSduboff 		if ((pci_cap & 0xff) == target) {
5286f8919bdaSduboff 			/* found */
5287f8919bdaSduboff 			break;
5288f8919bdaSduboff 		}
5289f8919bdaSduboff 		/* get next_ptr */
5290f8919bdaSduboff 		pci_cap_ptr = (pci_cap >> 8) & 0xff;
5291f8919bdaSduboff 	}
5292f8919bdaSduboff 	return (pci_cap_ptr);
5293f8919bdaSduboff }
5294f8919bdaSduboff 
5295f8919bdaSduboff int
gem_pci_set_power_state(dev_info_t * dip,ddi_acc_handle_t conf_handle,uint_t new_mode)5296f8919bdaSduboff gem_pci_set_power_state(dev_info_t *dip,
5297f8919bdaSduboff 		ddi_acc_handle_t conf_handle, uint_t new_mode)
5298f8919bdaSduboff {
5299f8919bdaSduboff 	uint8_t		pci_cap_ptr;
5300f8919bdaSduboff 	uint32_t	pmcsr;
5301f8919bdaSduboff 	uint_t		unit;
5302f8919bdaSduboff 	const char	*drv_name;
5303f8919bdaSduboff 
5304f8919bdaSduboff 	ASSERT(new_mode < 4);
5305f8919bdaSduboff 
5306f8919bdaSduboff 	unit = ddi_get_instance(dip);
5307f8919bdaSduboff 	drv_name = ddi_driver_name(dip);
5308f8919bdaSduboff 
5309f8919bdaSduboff 	/* search power management capablities */
5310f8919bdaSduboff 	pci_cap_ptr = gem_search_pci_cap(dip, conf_handle, PCI_CAP_ID_PM);
5311f8919bdaSduboff 
5312f8919bdaSduboff 	if (pci_cap_ptr == 0) {
5313f8919bdaSduboff 		cmn_err(CE_CONT,
5314f8919bdaSduboff 		    "!%s%d: doesn't have pci power management capability",
5315f8919bdaSduboff 		    drv_name, unit);
5316f8919bdaSduboff 		return (DDI_FAILURE);
5317f8919bdaSduboff 	}
5318f8919bdaSduboff 
5319f8919bdaSduboff 	/* read power management capabilities */
5320f8919bdaSduboff 	pmcsr = pci_config_get32(conf_handle, pci_cap_ptr + PCI_PMCSR);
5321f8919bdaSduboff 
5322f8919bdaSduboff 	DPRINTF(0, (CE_CONT,
5323f8919bdaSduboff 	    "!%s%d: pmc found at 0x%x: pmcsr: 0x%08x",
5324f8919bdaSduboff 	    drv_name, unit, pci_cap_ptr, pmcsr));
5325f8919bdaSduboff 
5326f8919bdaSduboff 	/*
5327f8919bdaSduboff 	 * Is the resuested power mode supported?
5328f8919bdaSduboff 	 */
5329f8919bdaSduboff 	/* not yet */
5330f8919bdaSduboff 
5331f8919bdaSduboff 	/*
5332f8919bdaSduboff 	 * move to new mode
5333f8919bdaSduboff 	 */
5334f8919bdaSduboff 	pmcsr = (pmcsr & ~PCI_PMCSR_STATE_MASK) | new_mode;
5335f8919bdaSduboff 	pci_config_put32(conf_handle, pci_cap_ptr + PCI_PMCSR, pmcsr);
5336f8919bdaSduboff 
5337f8919bdaSduboff 	return (DDI_SUCCESS);
5338f8919bdaSduboff }
5339f8919bdaSduboff 
5340f8919bdaSduboff /*
5341f8919bdaSduboff  * select suitable register for by specified address space or register
5342f8919bdaSduboff  * offset in PCI config space
5343f8919bdaSduboff  */
5344f8919bdaSduboff int
gem_pci_regs_map_setup(dev_info_t * dip,uint32_t which,uint32_t mask,struct ddi_device_acc_attr * attrp,caddr_t * basep,ddi_acc_handle_t * hp)5345f8919bdaSduboff gem_pci_regs_map_setup(dev_info_t *dip, uint32_t which, uint32_t mask,
5346f8919bdaSduboff 	struct ddi_device_acc_attr *attrp,
5347f8919bdaSduboff 	caddr_t *basep, ddi_acc_handle_t *hp)
5348f8919bdaSduboff {
5349f8919bdaSduboff 	struct pci_phys_spec	*regs;
5350f8919bdaSduboff 	uint_t		len;
5351f8919bdaSduboff 	uint_t		unit;
5352f8919bdaSduboff 	uint_t		n;
5353f8919bdaSduboff 	uint_t		i;
5354f8919bdaSduboff 	int		ret;
5355f8919bdaSduboff 	const char	*drv_name;
5356f8919bdaSduboff 
5357f8919bdaSduboff 	unit = ddi_get_instance(dip);
5358f8919bdaSduboff 	drv_name = ddi_driver_name(dip);
5359f8919bdaSduboff 
5360f8919bdaSduboff 	/* Search IO-range or memory-range to be mapped */
5361f8919bdaSduboff 	regs = NULL;
5362f8919bdaSduboff 	len  = 0;
5363f8919bdaSduboff 
5364f8919bdaSduboff 	if ((ret = ddi_prop_lookup_int_array(
5365f8919bdaSduboff 	    DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
5366f8919bdaSduboff 	    "reg", (void *)&regs, &len)) != DDI_PROP_SUCCESS) {
5367f8919bdaSduboff 		cmn_err(CE_WARN,
5368f8919bdaSduboff 		    "!%s%d: failed to get reg property (ret:%d)",
5369f8919bdaSduboff 		    drv_name, unit, ret);
5370f8919bdaSduboff 		return (DDI_FAILURE);
5371f8919bdaSduboff 	}
5372f8919bdaSduboff 	n = len / (sizeof (struct pci_phys_spec) / sizeof (int));
5373f8919bdaSduboff 
5374f8919bdaSduboff 	ASSERT(regs != NULL && len > 0);
5375f8919bdaSduboff 
5376f8919bdaSduboff #if GEM_DEBUG_LEVEL > 0
5377f8919bdaSduboff 	for (i = 0; i < n; i++) {
5378f8919bdaSduboff 		cmn_err(CE_CONT,
5379f8919bdaSduboff 		    "!%s%d: regs[%d]: %08x.%08x.%08x.%08x.%08x",
5380f8919bdaSduboff 		    drv_name, unit, i,
5381f8919bdaSduboff 		    regs[i].pci_phys_hi,
5382f8919bdaSduboff 		    regs[i].pci_phys_mid,
5383f8919bdaSduboff 		    regs[i].pci_phys_low,
5384f8919bdaSduboff 		    regs[i].pci_size_hi,
5385f8919bdaSduboff 		    regs[i].pci_size_low);
5386f8919bdaSduboff 	}
5387f8919bdaSduboff #endif
5388f8919bdaSduboff 	for (i = 0; i < n; i++) {
5389f8919bdaSduboff 		if ((regs[i].pci_phys_hi & mask) == which) {
5390f8919bdaSduboff 			/* it's the requested space */
5391f8919bdaSduboff 			ddi_prop_free(regs);
5392f8919bdaSduboff 			goto address_range_found;
5393f8919bdaSduboff 		}
5394f8919bdaSduboff 	}
5395f8919bdaSduboff 	ddi_prop_free(regs);
5396f8919bdaSduboff 	return (DDI_FAILURE);
5397f8919bdaSduboff 
5398f8919bdaSduboff address_range_found:
5399f8919bdaSduboff 	if ((ret = ddi_regs_map_setup(dip, i, basep, 0, 0, attrp, hp))
5400f8919bdaSduboff 	    != DDI_SUCCESS) {
5401f8919bdaSduboff 		cmn_err(CE_CONT,
5402f8919bdaSduboff 		    "!%s%d: ddi_regs_map_setup failed (ret:%d)",
5403f8919bdaSduboff 		    drv_name, unit, ret);
5404f8919bdaSduboff 	}
5405f8919bdaSduboff 
5406f8919bdaSduboff 	return (ret);
5407f8919bdaSduboff }
5408f8919bdaSduboff 
5409f8919bdaSduboff void
gem_mod_init(struct dev_ops * dop,char * name)5410f8919bdaSduboff gem_mod_init(struct dev_ops *dop, char *name)
5411f8919bdaSduboff {
5412f8919bdaSduboff 	mac_init_ops(dop, name);
5413f8919bdaSduboff }
5414f8919bdaSduboff 
5415f8919bdaSduboff void
gem_mod_fini(struct dev_ops * dop)5416f8919bdaSduboff gem_mod_fini(struct dev_ops *dop)
5417f8919bdaSduboff {
5418f8919bdaSduboff 	mac_fini_ops(dop);
5419f8919bdaSduboff }
5420