1f8919bdaSduboff /*
2f8919bdaSduboff * sfe_util.c: general ethernet mac driver framework version 2.6
3f8919bdaSduboff *
423d366e3Sduboff * Copyright (c) 2002-2008 Masayuki Murayama. All rights reserved.
5f8919bdaSduboff *
6f8919bdaSduboff * Redistribution and use in source and binary forms, with or without
7f8919bdaSduboff * modification, are permitted provided that the following conditions are met:
8f8919bdaSduboff *
9f8919bdaSduboff * 1. Redistributions of source code must retain the above copyright notice,
10f8919bdaSduboff * this list of conditions and the following disclaimer.
11f8919bdaSduboff *
12f8919bdaSduboff * 2. Redistributions in binary form must reproduce the above copyright notice,
13f8919bdaSduboff * this list of conditions and the following disclaimer in the documentation
14f8919bdaSduboff * and/or other materials provided with the distribution.
15f8919bdaSduboff *
16f8919bdaSduboff * 3. Neither the name of the author nor the names of its contributors may be
17f8919bdaSduboff * used to endorse or promote products derived from this software without
18f8919bdaSduboff * specific prior written permission.
19f8919bdaSduboff *
20f8919bdaSduboff * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21f8919bdaSduboff * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22f8919bdaSduboff * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23f8919bdaSduboff * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24f8919bdaSduboff * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25f8919bdaSduboff * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26f8919bdaSduboff * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
27f8919bdaSduboff * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28f8919bdaSduboff * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29f8919bdaSduboff * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30f8919bdaSduboff * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
31f8919bdaSduboff * DAMAGE.
32f8919bdaSduboff */
33f8919bdaSduboff
34f8919bdaSduboff /*
350dc2366fSVenugopal Iyer * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
36da14cebeSEric Cheng * Use is subject to license terms.
37da14cebeSEric Cheng */
38da14cebeSEric Cheng
39da14cebeSEric Cheng /*
40f8919bdaSduboff * System Header files.
41f8919bdaSduboff */
42f8919bdaSduboff #include <sys/types.h>
43f8919bdaSduboff #include <sys/conf.h>
44f8919bdaSduboff #include <sys/debug.h>
45f8919bdaSduboff #include <sys/kmem.h>
46f8919bdaSduboff #include <sys/vtrace.h>
47f8919bdaSduboff #include <sys/ethernet.h>
48f8919bdaSduboff #include <sys/modctl.h>
49f8919bdaSduboff #include <sys/errno.h>
50f8919bdaSduboff #include <sys/ddi.h>
51f8919bdaSduboff #include <sys/sunddi.h>
52f8919bdaSduboff #include <sys/stream.h> /* required for MBLK* */
53f8919bdaSduboff #include <sys/strsun.h> /* required for mionack() */
54f8919bdaSduboff #include <sys/byteorder.h>
55*c5d54b67SRichard Lowe #include <sys/sysmacros.h>
56f8919bdaSduboff #include <sys/pci.h>
57f8919bdaSduboff #include <inet/common.h>
58f8919bdaSduboff #include <inet/led.h>
59f8919bdaSduboff #include <inet/mi.h>
60f8919bdaSduboff #include <inet/nd.h>
61f8919bdaSduboff #include <sys/crc32.h>
62f8919bdaSduboff
63f8919bdaSduboff #include <sys/note.h>
64f8919bdaSduboff
65f8919bdaSduboff #include "sfe_mii.h"
66f8919bdaSduboff #include "sfe_util.h"
67f8919bdaSduboff
68f8919bdaSduboff
69f8919bdaSduboff
70f8919bdaSduboff extern char ident[];
71f8919bdaSduboff
72f8919bdaSduboff /* Debugging support */
73f8919bdaSduboff #ifdef GEM_DEBUG_LEVEL
74f8919bdaSduboff static int gem_debug = GEM_DEBUG_LEVEL;
75f8919bdaSduboff #define DPRINTF(n, args) if (gem_debug > (n)) cmn_err args
76f8919bdaSduboff #else
77f8919bdaSduboff #define DPRINTF(n, args)
78f8919bdaSduboff #undef ASSERT
79f8919bdaSduboff #define ASSERT(x)
80f8919bdaSduboff #endif
81f8919bdaSduboff
82f8919bdaSduboff #define IOC_LINESIZE 0x40 /* Is it right for amd64? */
83f8919bdaSduboff
84f8919bdaSduboff /*
85f8919bdaSduboff * Useful macros and typedefs
86f8919bdaSduboff */
87f8919bdaSduboff #define ROUNDUP(x, a) (((x) + (a) - 1) & ~((a) - 1))
88f8919bdaSduboff
89f8919bdaSduboff #define GET_NET16(p) ((((uint8_t *)(p))[0] << 8)| ((uint8_t *)(p))[1])
90f8919bdaSduboff #define GET_ETHERTYPE(p) GET_NET16(((uint8_t *)(p)) + ETHERADDRL*2)
91f8919bdaSduboff
92f8919bdaSduboff #define GET_IPTYPEv4(p) (((uint8_t *)(p))[sizeof (struct ether_header) + 9])
93f8919bdaSduboff #define GET_IPTYPEv6(p) (((uint8_t *)(p))[sizeof (struct ether_header) + 6])
94f8919bdaSduboff
95f8919bdaSduboff
96f8919bdaSduboff #ifndef INT32_MAX
97f8919bdaSduboff #define INT32_MAX 0x7fffffff
98f8919bdaSduboff #endif
99f8919bdaSduboff
100f8919bdaSduboff #define VTAG_OFF (ETHERADDRL*2)
101f8919bdaSduboff #ifndef VTAG_SIZE
102f8919bdaSduboff #define VTAG_SIZE 4
103f8919bdaSduboff #endif
104f8919bdaSduboff #ifndef VTAG_TPID
105f8919bdaSduboff #define VTAG_TPID 0x8100U
106f8919bdaSduboff #endif
107f8919bdaSduboff
108f8919bdaSduboff #define GET_TXBUF(dp, sn) \
109f8919bdaSduboff &(dp)->tx_buf[SLOT((dp)->tx_slots_base + (sn), (dp)->gc.gc_tx_buf_size)]
110f8919bdaSduboff
111f8919bdaSduboff #define TXFLAG_VTAG(flag) \
112f8919bdaSduboff (((flag) & GEM_TXFLAG_VTAG) >> GEM_TXFLAG_VTAG_SHIFT)
113f8919bdaSduboff
114f8919bdaSduboff #define MAXPKTBUF(dp) \
115f8919bdaSduboff ((dp)->mtu + sizeof (struct ether_header) + VTAG_SIZE + ETHERFCSL)
116f8919bdaSduboff
117f8919bdaSduboff #define WATCH_INTERVAL_FAST drv_usectohz(100*1000) /* 100mS */
11823d366e3Sduboff #define BOOLEAN(x) ((x) != 0)
119f8919bdaSduboff
120f8919bdaSduboff /*
121f8919bdaSduboff * Macros to distinct chip generation.
122f8919bdaSduboff */
123f8919bdaSduboff
124f8919bdaSduboff /*
125f8919bdaSduboff * Private functions
126f8919bdaSduboff */
127f8919bdaSduboff static void gem_mii_start(struct gem_dev *);
128f8919bdaSduboff static void gem_mii_stop(struct gem_dev *);
129f8919bdaSduboff
130f8919bdaSduboff /* local buffer management */
131f8919bdaSduboff static void gem_nd_setup(struct gem_dev *dp);
132f8919bdaSduboff static void gem_nd_cleanup(struct gem_dev *dp);
133f8919bdaSduboff static int gem_alloc_memory(struct gem_dev *);
134f8919bdaSduboff static void gem_free_memory(struct gem_dev *);
135f8919bdaSduboff static void gem_init_rx_ring(struct gem_dev *);
136f8919bdaSduboff static void gem_init_tx_ring(struct gem_dev *);
137f8919bdaSduboff __INLINE__ static void gem_append_rxbuf(struct gem_dev *, struct rxbuf *);
138f8919bdaSduboff
139f8919bdaSduboff static void gem_tx_timeout(struct gem_dev *);
140f8919bdaSduboff static void gem_mii_link_watcher(struct gem_dev *dp);
141f8919bdaSduboff static int gem_mac_init(struct gem_dev *dp);
142f8919bdaSduboff static int gem_mac_start(struct gem_dev *dp);
143f8919bdaSduboff static int gem_mac_stop(struct gem_dev *dp, uint_t flags);
144f8919bdaSduboff static void gem_mac_ioctl(struct gem_dev *dp, queue_t *wq, mblk_t *mp);
145f8919bdaSduboff
146f8919bdaSduboff static struct ether_addr gem_etherbroadcastaddr = {
147f8919bdaSduboff 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
148f8919bdaSduboff };
149f8919bdaSduboff
150f8919bdaSduboff int gem_speed_value[] = {10, 100, 1000};
151f8919bdaSduboff
152f8919bdaSduboff /* ============================================================== */
153f8919bdaSduboff /*
154f8919bdaSduboff * Misc runtime routines
155f8919bdaSduboff */
156f8919bdaSduboff /* ============================================================== */
157f8919bdaSduboff /*
158f8919bdaSduboff * Ether CRC calculation according to 21143 data sheet
159f8919bdaSduboff */
160f8919bdaSduboff uint32_t
gem_ether_crc_le(const uint8_t * addr,int len)161f8919bdaSduboff gem_ether_crc_le(const uint8_t *addr, int len)
162f8919bdaSduboff {
163f8919bdaSduboff uint32_t crc;
164f8919bdaSduboff
165f8919bdaSduboff CRC32(crc, addr, ETHERADDRL, 0xffffffffU, crc32_table);
166f8919bdaSduboff return (crc);
167f8919bdaSduboff }
168f8919bdaSduboff
169f8919bdaSduboff uint32_t
gem_ether_crc_be(const uint8_t * addr,int len)170f8919bdaSduboff gem_ether_crc_be(const uint8_t *addr, int len)
171f8919bdaSduboff {
172f8919bdaSduboff int idx;
173f8919bdaSduboff int bit;
174f8919bdaSduboff uint_t data;
175f8919bdaSduboff uint32_t crc;
176f8919bdaSduboff #define CRC32_POLY_BE 0x04c11db7
177f8919bdaSduboff
178f8919bdaSduboff crc = 0xffffffff;
179f8919bdaSduboff for (idx = 0; idx < len; idx++) {
180f8919bdaSduboff for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) {
181f8919bdaSduboff crc = (crc << 1)
182f8919bdaSduboff ^ ((((crc >> 31) ^ data) & 1) ? CRC32_POLY_BE : 0);
183f8919bdaSduboff }
184f8919bdaSduboff }
185f8919bdaSduboff return (crc);
186f8919bdaSduboff #undef CRC32_POLY_BE
187f8919bdaSduboff }
188f8919bdaSduboff
189f8919bdaSduboff int
gem_prop_get_int(struct gem_dev * dp,char * prop_template,int def_val)190f8919bdaSduboff gem_prop_get_int(struct gem_dev *dp, char *prop_template, int def_val)
191f8919bdaSduboff {
192f8919bdaSduboff char propname[32];
193f8919bdaSduboff
194f8919bdaSduboff (void) sprintf(propname, prop_template, dp->name);
195f8919bdaSduboff
196f8919bdaSduboff return (ddi_prop_get_int(DDI_DEV_T_ANY, dp->dip,
197f8919bdaSduboff DDI_PROP_DONTPASS, propname, def_val));
198f8919bdaSduboff }
199f8919bdaSduboff
200f8919bdaSduboff static int
gem_population(uint32_t x)201f8919bdaSduboff gem_population(uint32_t x)
202f8919bdaSduboff {
203f8919bdaSduboff int i;
204f8919bdaSduboff int cnt;
205f8919bdaSduboff
206f8919bdaSduboff cnt = 0;
207f8919bdaSduboff for (i = 0; i < 32; i++) {
208f8919bdaSduboff if (x & (1 << i)) {
209f8919bdaSduboff cnt++;
210f8919bdaSduboff }
211f8919bdaSduboff }
212f8919bdaSduboff return (cnt);
213f8919bdaSduboff }
214f8919bdaSduboff
21523d366e3Sduboff #ifdef GEM_DEBUG_LEVEL
21623d366e3Sduboff #ifdef GEM_DEBUG_VLAN
217f8919bdaSduboff static void
gem_dump_packet(struct gem_dev * dp,char * title,mblk_t * mp,boolean_t check_cksum)21823d366e3Sduboff gem_dump_packet(struct gem_dev *dp, char *title, mblk_t *mp,
21923d366e3Sduboff boolean_t check_cksum)
220f8919bdaSduboff {
22123d366e3Sduboff char msg[180];
22223d366e3Sduboff uint8_t buf[18+20+20];
22323d366e3Sduboff uint8_t *p;
22423d366e3Sduboff size_t offset;
22523d366e3Sduboff uint_t ethertype;
22623d366e3Sduboff uint_t proto;
22723d366e3Sduboff uint_t ipproto = 0;
22823d366e3Sduboff uint_t iplen;
22923d366e3Sduboff uint_t iphlen;
23023d366e3Sduboff uint_t tcplen;
23123d366e3Sduboff uint_t udplen;
23223d366e3Sduboff uint_t cksum;
23323d366e3Sduboff int rest;
23423d366e3Sduboff int len;
23523d366e3Sduboff char *bp;
23623d366e3Sduboff mblk_t *tp;
23723d366e3Sduboff extern uint_t ip_cksum(mblk_t *, int, uint32_t);
238f8919bdaSduboff
23923d366e3Sduboff msg[0] = 0;
24023d366e3Sduboff bp = msg;
241f8919bdaSduboff
24223d366e3Sduboff rest = sizeof (buf);
24323d366e3Sduboff offset = 0;
24423d366e3Sduboff for (tp = mp; tp; tp = tp->b_cont) {
24523d366e3Sduboff len = tp->b_wptr - tp->b_rptr;
24623d366e3Sduboff len = min(rest, len);
24723d366e3Sduboff bcopy(tp->b_rptr, &buf[offset], len);
24823d366e3Sduboff rest -= len;
24923d366e3Sduboff offset += len;
25023d366e3Sduboff if (rest == 0) {
251f8919bdaSduboff break;
252f8919bdaSduboff }
253f8919bdaSduboff }
25423d366e3Sduboff
25523d366e3Sduboff offset = 0;
25623d366e3Sduboff p = &buf[offset];
25723d366e3Sduboff
25823d366e3Sduboff /* ethernet address */
25923d366e3Sduboff sprintf(bp,
26023d366e3Sduboff "ether: %02x:%02x:%02x:%02x:%02x:%02x"
26123d366e3Sduboff " -> %02x:%02x:%02x:%02x:%02x:%02x",
26223d366e3Sduboff p[6], p[7], p[8], p[9], p[10], p[11],
26323d366e3Sduboff p[0], p[1], p[2], p[3], p[4], p[5]);
26423d366e3Sduboff bp = &msg[strlen(msg)];
26523d366e3Sduboff
26623d366e3Sduboff /* vlag tag and etherrtype */
26723d366e3Sduboff ethertype = GET_ETHERTYPE(p);
26823d366e3Sduboff if (ethertype == VTAG_TPID) {
26923d366e3Sduboff sprintf(bp, " vtag:0x%04x", GET_NET16(&p[14]));
27023d366e3Sduboff bp = &msg[strlen(msg)];
27123d366e3Sduboff
27223d366e3Sduboff offset += VTAG_SIZE;
27323d366e3Sduboff p = &buf[offset];
27423d366e3Sduboff ethertype = GET_ETHERTYPE(p);
27523d366e3Sduboff }
27623d366e3Sduboff sprintf(bp, " type:%04x", ethertype);
27723d366e3Sduboff bp = &msg[strlen(msg)];
27823d366e3Sduboff
27923d366e3Sduboff /* ethernet packet length */
28023d366e3Sduboff sprintf(bp, " mblklen:%d", msgdsize(mp));
28123d366e3Sduboff bp = &msg[strlen(msg)];
28223d366e3Sduboff if (mp->b_cont) {
28323d366e3Sduboff sprintf(bp, "(");
28423d366e3Sduboff bp = &msg[strlen(msg)];
28523d366e3Sduboff for (tp = mp; tp; tp = tp->b_cont) {
28623d366e3Sduboff if (tp == mp) {
28723d366e3Sduboff sprintf(bp, "%d", tp->b_wptr - tp->b_rptr);
28823d366e3Sduboff } else {
28923d366e3Sduboff sprintf(bp, "+%d", tp->b_wptr - tp->b_rptr);
29023d366e3Sduboff }
29123d366e3Sduboff bp = &msg[strlen(msg)];
29223d366e3Sduboff }
29323d366e3Sduboff sprintf(bp, ")");
29423d366e3Sduboff bp = &msg[strlen(msg)];
29523d366e3Sduboff }
29623d366e3Sduboff
29723d366e3Sduboff if (ethertype != ETHERTYPE_IP) {
29823d366e3Sduboff goto x;
29923d366e3Sduboff }
30023d366e3Sduboff
30123d366e3Sduboff /* ip address */
30223d366e3Sduboff offset += sizeof (struct ether_header);
30323d366e3Sduboff p = &buf[offset];
30423d366e3Sduboff ipproto = p[9];
30523d366e3Sduboff iplen = GET_NET16(&p[2]);
30623d366e3Sduboff sprintf(bp, ", ip: %d.%d.%d.%d -> %d.%d.%d.%d proto:%d iplen:%d",
30723d366e3Sduboff p[12], p[13], p[14], p[15],
30823d366e3Sduboff p[16], p[17], p[18], p[19],
30923d366e3Sduboff ipproto, iplen);
31023d366e3Sduboff bp = (void *)&msg[strlen(msg)];
31123d366e3Sduboff
31223d366e3Sduboff iphlen = (p[0] & 0xf) * 4;
31323d366e3Sduboff
31423d366e3Sduboff /* cksum for psuedo header */
31523d366e3Sduboff cksum = *(uint16_t *)&p[12];
31623d366e3Sduboff cksum += *(uint16_t *)&p[14];
31723d366e3Sduboff cksum += *(uint16_t *)&p[16];
31823d366e3Sduboff cksum += *(uint16_t *)&p[18];
31923d366e3Sduboff cksum += BE_16(ipproto);
32023d366e3Sduboff
32123d366e3Sduboff /* tcp or udp protocol header */
32223d366e3Sduboff offset += iphlen;
32323d366e3Sduboff p = &buf[offset];
32423d366e3Sduboff if (ipproto == IPPROTO_TCP) {
32523d366e3Sduboff tcplen = iplen - iphlen;
32623d366e3Sduboff sprintf(bp, ", tcp: len:%d cksum:%x",
32723d366e3Sduboff tcplen, GET_NET16(&p[16]));
32823d366e3Sduboff bp = (void *)&msg[strlen(msg)];
32923d366e3Sduboff
33023d366e3Sduboff if (check_cksum) {
33123d366e3Sduboff cksum += BE_16(tcplen);
33223d366e3Sduboff cksum = (uint16_t)ip_cksum(mp, offset, cksum);
33323d366e3Sduboff sprintf(bp, " (%s)",
33423d366e3Sduboff (cksum == 0 || cksum == 0xffff) ? "ok" : "ng");
33523d366e3Sduboff bp = (void *)&msg[strlen(msg)];
33623d366e3Sduboff }
33723d366e3Sduboff } else if (ipproto == IPPROTO_UDP) {
33823d366e3Sduboff udplen = GET_NET16(&p[4]);
33923d366e3Sduboff sprintf(bp, ", udp: len:%d cksum:%x",
34023d366e3Sduboff udplen, GET_NET16(&p[6]));
34123d366e3Sduboff bp = (void *)&msg[strlen(msg)];
34223d366e3Sduboff
34323d366e3Sduboff if (GET_NET16(&p[6]) && check_cksum) {
34423d366e3Sduboff cksum += *(uint16_t *)&p[4];
34523d366e3Sduboff cksum = (uint16_t)ip_cksum(mp, offset, cksum);
34623d366e3Sduboff sprintf(bp, " (%s)",
34723d366e3Sduboff (cksum == 0 || cksum == 0xffff) ? "ok" : "ng");
34823d366e3Sduboff bp = (void *)&msg[strlen(msg)];
34923d366e3Sduboff }
35023d366e3Sduboff }
35123d366e3Sduboff x:
35223d366e3Sduboff cmn_err(CE_CONT, "!%s: %s: %s", dp->name, title, msg);
35323d366e3Sduboff }
35423d366e3Sduboff #endif /* GEM_DEBUG_VLAN */
35523d366e3Sduboff #endif /* GEM_DEBUG_LEVEL */
35623d366e3Sduboff
357f8919bdaSduboff /* ============================================================== */
358f8919bdaSduboff /*
359f8919bdaSduboff * IO cache flush
360f8919bdaSduboff */
361f8919bdaSduboff /* ============================================================== */
362f8919bdaSduboff __INLINE__ void
gem_rx_desc_dma_sync(struct gem_dev * dp,int head,int nslot,int how)363f8919bdaSduboff gem_rx_desc_dma_sync(struct gem_dev *dp, int head, int nslot, int how)
364f8919bdaSduboff {
365f8919bdaSduboff int n;
366f8919bdaSduboff int m;
367f8919bdaSduboff int rx_desc_unit_shift = dp->gc.gc_rx_desc_unit_shift;
368f8919bdaSduboff
369f8919bdaSduboff /* sync active descriptors */
370f8919bdaSduboff if (rx_desc_unit_shift < 0 || nslot == 0) {
371f8919bdaSduboff /* no rx descriptor ring */
372f8919bdaSduboff return;
373f8919bdaSduboff }
374f8919bdaSduboff
375f8919bdaSduboff n = dp->gc.gc_rx_ring_size - head;
376f8919bdaSduboff if ((m = nslot - n) > 0) {
377f8919bdaSduboff (void) ddi_dma_sync(dp->desc_dma_handle,
378f8919bdaSduboff (off_t)0,
379f8919bdaSduboff (size_t)(m << rx_desc_unit_shift),
380f8919bdaSduboff how);
381f8919bdaSduboff nslot = n;
382f8919bdaSduboff }
383f8919bdaSduboff
384f8919bdaSduboff (void) ddi_dma_sync(dp->desc_dma_handle,
385f8919bdaSduboff (off_t)(head << rx_desc_unit_shift),
386f8919bdaSduboff (size_t)(nslot << rx_desc_unit_shift),
387f8919bdaSduboff how);
388f8919bdaSduboff }
389f8919bdaSduboff
390f8919bdaSduboff __INLINE__ void
gem_tx_desc_dma_sync(struct gem_dev * dp,int head,int nslot,int how)391f8919bdaSduboff gem_tx_desc_dma_sync(struct gem_dev *dp, int head, int nslot, int how)
392f8919bdaSduboff {
393f8919bdaSduboff int n;
394f8919bdaSduboff int m;
395f8919bdaSduboff int tx_desc_unit_shift = dp->gc.gc_tx_desc_unit_shift;
396f8919bdaSduboff
397f8919bdaSduboff /* sync active descriptors */
398f8919bdaSduboff if (tx_desc_unit_shift < 0 || nslot == 0) {
399f8919bdaSduboff /* no tx descriptor ring */
400f8919bdaSduboff return;
401f8919bdaSduboff }
402f8919bdaSduboff
403f8919bdaSduboff n = dp->gc.gc_tx_ring_size - head;
404f8919bdaSduboff if ((m = nslot - n) > 0) {
405f8919bdaSduboff (void) ddi_dma_sync(dp->desc_dma_handle,
406f8919bdaSduboff (off_t)(dp->tx_ring_dma - dp->rx_ring_dma),
407f8919bdaSduboff (size_t)(m << tx_desc_unit_shift),
408f8919bdaSduboff how);
409f8919bdaSduboff nslot = n;
410f8919bdaSduboff }
411f8919bdaSduboff
412f8919bdaSduboff (void) ddi_dma_sync(dp->desc_dma_handle,
413f8919bdaSduboff (off_t)((head << tx_desc_unit_shift)
414f8919bdaSduboff + (dp->tx_ring_dma - dp->rx_ring_dma)),
415f8919bdaSduboff (size_t)(nslot << tx_desc_unit_shift),
416f8919bdaSduboff how);
417f8919bdaSduboff }
418f8919bdaSduboff
419f8919bdaSduboff static void
gem_rx_start_default(struct gem_dev * dp,int head,int nslot)420f8919bdaSduboff gem_rx_start_default(struct gem_dev *dp, int head, int nslot)
421f8919bdaSduboff {
422f8919bdaSduboff gem_rx_desc_dma_sync(dp,
423f8919bdaSduboff SLOT(head, dp->gc.gc_rx_ring_size), nslot,
424f8919bdaSduboff DDI_DMA_SYNC_FORDEV);
425f8919bdaSduboff }
426f8919bdaSduboff
427f8919bdaSduboff /* ============================================================== */
428f8919bdaSduboff /*
429f8919bdaSduboff * Buffer management
430f8919bdaSduboff */
431f8919bdaSduboff /* ============================================================== */
432f8919bdaSduboff static void
gem_dump_txbuf(struct gem_dev * dp,int level,const char * title)433f8919bdaSduboff gem_dump_txbuf(struct gem_dev *dp, int level, const char *title)
434f8919bdaSduboff {
435f8919bdaSduboff cmn_err(level,
436f8919bdaSduboff "!%s: %s: tx_active: %d[%d] %d[%d] (+%d), "
437f8919bdaSduboff "tx_softq: %d[%d] %d[%d] (+%d), "
438f8919bdaSduboff "tx_free: %d[%d] %d[%d] (+%d), "
439f8919bdaSduboff "tx_desc: %d[%d] %d[%d] (+%d), "
44023d366e3Sduboff "intr: %d[%d] (+%d), ",
441f8919bdaSduboff dp->name, title,
442f8919bdaSduboff dp->tx_active_head,
443f8919bdaSduboff SLOT(dp->tx_active_head, dp->gc.gc_tx_buf_size),
444f8919bdaSduboff dp->tx_active_tail,
445f8919bdaSduboff SLOT(dp->tx_active_tail, dp->gc.gc_tx_buf_size),
446f8919bdaSduboff dp->tx_active_tail - dp->tx_active_head,
447f8919bdaSduboff dp->tx_softq_head,
448f8919bdaSduboff SLOT(dp->tx_softq_head, dp->gc.gc_tx_buf_size),
449f8919bdaSduboff dp->tx_softq_tail,
450f8919bdaSduboff SLOT(dp->tx_softq_tail, dp->gc.gc_tx_buf_size),
451f8919bdaSduboff dp->tx_softq_tail - dp->tx_softq_head,
452f8919bdaSduboff dp->tx_free_head,
453f8919bdaSduboff SLOT(dp->tx_free_head, dp->gc.gc_tx_buf_size),
454f8919bdaSduboff dp->tx_free_tail,
455f8919bdaSduboff SLOT(dp->tx_free_tail, dp->gc.gc_tx_buf_size),
456f8919bdaSduboff dp->tx_free_tail - dp->tx_free_head,
457f8919bdaSduboff dp->tx_desc_head,
458f8919bdaSduboff SLOT(dp->tx_desc_head, dp->gc.gc_tx_ring_size),
459f8919bdaSduboff dp->tx_desc_tail,
460f8919bdaSduboff SLOT(dp->tx_desc_tail, dp->gc.gc_tx_ring_size),
461f8919bdaSduboff dp->tx_desc_tail - dp->tx_desc_head,
462f8919bdaSduboff dp->tx_desc_intr,
463f8919bdaSduboff SLOT(dp->tx_desc_intr, dp->gc.gc_tx_ring_size),
464f8919bdaSduboff dp->tx_desc_intr - dp->tx_desc_head);
465f8919bdaSduboff }
466f8919bdaSduboff
467f8919bdaSduboff static void
gem_free_rxbuf(struct rxbuf * rbp)468f8919bdaSduboff gem_free_rxbuf(struct rxbuf *rbp)
469f8919bdaSduboff {
470f8919bdaSduboff struct gem_dev *dp;
471f8919bdaSduboff
472f8919bdaSduboff dp = rbp->rxb_devp;
473f8919bdaSduboff ASSERT(mutex_owned(&dp->intrlock));
474f8919bdaSduboff rbp->rxb_next = dp->rx_buf_freelist;
475f8919bdaSduboff dp->rx_buf_freelist = rbp;
476f8919bdaSduboff dp->rx_buf_freecnt++;
477f8919bdaSduboff }
478f8919bdaSduboff
479f8919bdaSduboff /*
480f8919bdaSduboff * gem_get_rxbuf: supply a receive buffer which have been mapped into
481f8919bdaSduboff * DMA space.
482f8919bdaSduboff */
483f8919bdaSduboff struct rxbuf *
gem_get_rxbuf(struct gem_dev * dp,int cansleep)484f8919bdaSduboff gem_get_rxbuf(struct gem_dev *dp, int cansleep)
485f8919bdaSduboff {
486f8919bdaSduboff struct rxbuf *rbp;
487f8919bdaSduboff uint_t count = 0;
488f8919bdaSduboff int i;
489f8919bdaSduboff int err;
490f8919bdaSduboff
491f8919bdaSduboff ASSERT(mutex_owned(&dp->intrlock));
492f8919bdaSduboff
493f8919bdaSduboff DPRINTF(3, (CE_CONT, "!gem_get_rxbuf: called freecnt:%d",
494f8919bdaSduboff dp->rx_buf_freecnt));
495f8919bdaSduboff /*
496f8919bdaSduboff * Get rx buffer management structure
497f8919bdaSduboff */
498f8919bdaSduboff rbp = dp->rx_buf_freelist;
499f8919bdaSduboff if (rbp) {
500f8919bdaSduboff /* get one from the recycle list */
501f8919bdaSduboff ASSERT(dp->rx_buf_freecnt > 0);
502f8919bdaSduboff
503f8919bdaSduboff dp->rx_buf_freelist = rbp->rxb_next;
504f8919bdaSduboff dp->rx_buf_freecnt--;
505f8919bdaSduboff rbp->rxb_next = NULL;
506f8919bdaSduboff return (rbp);
507f8919bdaSduboff }
508f8919bdaSduboff
509f8919bdaSduboff /*
510f8919bdaSduboff * Allocate a rx buffer management structure
511f8919bdaSduboff */
512f8919bdaSduboff rbp = kmem_zalloc(sizeof (*rbp), cansleep ? KM_SLEEP : KM_NOSLEEP);
513f8919bdaSduboff if (rbp == NULL) {
514f8919bdaSduboff /* no memory */
515f8919bdaSduboff return (NULL);
516f8919bdaSduboff }
517f8919bdaSduboff
518f8919bdaSduboff /*
519f8919bdaSduboff * Prepare a back pointer to the device structure which will be
520f8919bdaSduboff * refered on freeing the buffer later.
521f8919bdaSduboff */
522f8919bdaSduboff rbp->rxb_devp = dp;
523f8919bdaSduboff
524f8919bdaSduboff /* allocate a dma handle for rx data buffer */
525f8919bdaSduboff if ((err = ddi_dma_alloc_handle(dp->dip,
526f8919bdaSduboff &dp->gc.gc_dma_attr_rxbuf,
527f8919bdaSduboff (cansleep ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT),
528f8919bdaSduboff NULL, &rbp->rxb_dh)) != DDI_SUCCESS) {
529f8919bdaSduboff
530f8919bdaSduboff cmn_err(CE_WARN,
531f8919bdaSduboff "!%s: %s: ddi_dma_alloc_handle:1 failed, err=%d",
532f8919bdaSduboff dp->name, __func__, err);
533f8919bdaSduboff
534f8919bdaSduboff kmem_free(rbp, sizeof (struct rxbuf));
535f8919bdaSduboff return (NULL);
536f8919bdaSduboff }
537f8919bdaSduboff
538f8919bdaSduboff /* allocate a bounce buffer for rx */
539f8919bdaSduboff if ((err = ddi_dma_mem_alloc(rbp->rxb_dh,
540f8919bdaSduboff ROUNDUP(dp->rx_buf_len, IOC_LINESIZE),
541f8919bdaSduboff &dp->gc.gc_buf_attr,
542f8919bdaSduboff /*
543f8919bdaSduboff * if the nic requires a header at the top of receive buffers,
544f8919bdaSduboff * it may access the rx buffer randomly.
545f8919bdaSduboff */
546f8919bdaSduboff (dp->gc.gc_rx_header_len > 0)
547f8919bdaSduboff ? DDI_DMA_CONSISTENT : DDI_DMA_STREAMING,
548f8919bdaSduboff cansleep ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT,
549f8919bdaSduboff NULL,
550f8919bdaSduboff &rbp->rxb_buf, &rbp->rxb_buf_len,
551f8919bdaSduboff &rbp->rxb_bah)) != DDI_SUCCESS) {
552f8919bdaSduboff
553f8919bdaSduboff cmn_err(CE_WARN,
554f8919bdaSduboff "!%s: %s: ddi_dma_mem_alloc: failed, err=%d",
555f8919bdaSduboff dp->name, __func__, err);
556f8919bdaSduboff
557f8919bdaSduboff ddi_dma_free_handle(&rbp->rxb_dh);
558f8919bdaSduboff kmem_free(rbp, sizeof (struct rxbuf));
559f8919bdaSduboff return (NULL);
560f8919bdaSduboff }
561f8919bdaSduboff
562f8919bdaSduboff /* Mapin the bounce buffer into the DMA space */
563f8919bdaSduboff if ((err = ddi_dma_addr_bind_handle(rbp->rxb_dh,
564f8919bdaSduboff NULL, rbp->rxb_buf, dp->rx_buf_len,
565f8919bdaSduboff ((dp->gc.gc_rx_header_len > 0)
566f8919bdaSduboff ?(DDI_DMA_RDWR | DDI_DMA_CONSISTENT)
567f8919bdaSduboff :(DDI_DMA_READ | DDI_DMA_STREAMING)),
568f8919bdaSduboff cansleep ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT,
569f8919bdaSduboff NULL,
570f8919bdaSduboff rbp->rxb_dmacookie,
571f8919bdaSduboff &count)) != DDI_DMA_MAPPED) {
572f8919bdaSduboff
573f8919bdaSduboff ASSERT(err != DDI_DMA_INUSE);
574f8919bdaSduboff DPRINTF(0, (CE_WARN,
575f8919bdaSduboff "!%s: ddi_dma_addr_bind_handle: failed, err=%d",
576f8919bdaSduboff dp->name, __func__, err));
577f8919bdaSduboff
578f8919bdaSduboff /*
579f8919bdaSduboff * we failed to allocate a dma resource
580f8919bdaSduboff * for the rx bounce buffer.
581f8919bdaSduboff */
582f8919bdaSduboff ddi_dma_mem_free(&rbp->rxb_bah);
583f8919bdaSduboff ddi_dma_free_handle(&rbp->rxb_dh);
584f8919bdaSduboff kmem_free(rbp, sizeof (struct rxbuf));
585f8919bdaSduboff return (NULL);
586f8919bdaSduboff }
587f8919bdaSduboff
588f8919bdaSduboff /* correct the rest of the DMA mapping */
589f8919bdaSduboff for (i = 1; i < count; i++) {
590f8919bdaSduboff ddi_dma_nextcookie(rbp->rxb_dh, &rbp->rxb_dmacookie[i]);
591f8919bdaSduboff }
592f8919bdaSduboff rbp->rxb_nfrags = count;
593f8919bdaSduboff
594f8919bdaSduboff /* Now we successfully prepared an rx buffer */
595f8919bdaSduboff dp->rx_buf_allocated++;
596f8919bdaSduboff
597f8919bdaSduboff return (rbp);
598f8919bdaSduboff }
599f8919bdaSduboff
600f8919bdaSduboff /* ============================================================== */
601f8919bdaSduboff /*
602f8919bdaSduboff * memory resource management
603f8919bdaSduboff */
604f8919bdaSduboff /* ============================================================== */
605f8919bdaSduboff static int
gem_alloc_memory(struct gem_dev * dp)606f8919bdaSduboff gem_alloc_memory(struct gem_dev *dp)
607f8919bdaSduboff {
608f8919bdaSduboff caddr_t ring;
609f8919bdaSduboff caddr_t buf;
610f8919bdaSduboff size_t req_size;
611f8919bdaSduboff size_t ring_len;
612f8919bdaSduboff size_t buf_len;
613f8919bdaSduboff ddi_dma_cookie_t ring_cookie;
614f8919bdaSduboff ddi_dma_cookie_t buf_cookie;
615f8919bdaSduboff uint_t count;
616f8919bdaSduboff int i;
617f8919bdaSduboff int err;
618f8919bdaSduboff struct txbuf *tbp;
619f8919bdaSduboff int tx_buf_len;
620f8919bdaSduboff ddi_dma_attr_t dma_attr_txbounce;
621f8919bdaSduboff
622f8919bdaSduboff DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
623f8919bdaSduboff
624f8919bdaSduboff dp->desc_dma_handle = NULL;
625f8919bdaSduboff req_size = dp->rx_desc_size + dp->tx_desc_size + dp->gc.gc_io_area_size;
626f8919bdaSduboff
627f8919bdaSduboff if (req_size > 0) {
628f8919bdaSduboff /*
629f8919bdaSduboff * Alloc RX/TX descriptors and a io area.
630f8919bdaSduboff */
631f8919bdaSduboff if ((err = ddi_dma_alloc_handle(dp->dip,
632f8919bdaSduboff &dp->gc.gc_dma_attr_desc,
633f8919bdaSduboff DDI_DMA_SLEEP, NULL,
634f8919bdaSduboff &dp->desc_dma_handle)) != DDI_SUCCESS) {
635f8919bdaSduboff cmn_err(CE_WARN,
636f8919bdaSduboff "!%s: %s: ddi_dma_alloc_handle failed: %d",
637f8919bdaSduboff dp->name, __func__, err);
638f8919bdaSduboff return (ENOMEM);
639f8919bdaSduboff }
640f8919bdaSduboff
641f8919bdaSduboff if ((err = ddi_dma_mem_alloc(dp->desc_dma_handle,
642f8919bdaSduboff req_size, &dp->gc.gc_desc_attr,
643f8919bdaSduboff DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
644f8919bdaSduboff &ring, &ring_len,
645f8919bdaSduboff &dp->desc_acc_handle)) != DDI_SUCCESS) {
646f8919bdaSduboff cmn_err(CE_WARN,
647f8919bdaSduboff "!%s: %s: ddi_dma_mem_alloc failed: "
648f8919bdaSduboff "ret %d, request size: %d",
649f8919bdaSduboff dp->name, __func__, err, (int)req_size);
650f8919bdaSduboff ddi_dma_free_handle(&dp->desc_dma_handle);
651f8919bdaSduboff return (ENOMEM);
652f8919bdaSduboff }
653f8919bdaSduboff
654f8919bdaSduboff if ((err = ddi_dma_addr_bind_handle(dp->desc_dma_handle,
655f8919bdaSduboff NULL, ring, ring_len,
656f8919bdaSduboff DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
657f8919bdaSduboff DDI_DMA_SLEEP, NULL,
658f8919bdaSduboff &ring_cookie, &count)) != DDI_SUCCESS) {
659f8919bdaSduboff ASSERT(err != DDI_DMA_INUSE);
660f8919bdaSduboff cmn_err(CE_WARN,
661f8919bdaSduboff "!%s: %s: ddi_dma_addr_bind_handle failed: %d",
662f8919bdaSduboff dp->name, __func__, err);
663f8919bdaSduboff ddi_dma_mem_free(&dp->desc_acc_handle);
664f8919bdaSduboff ddi_dma_free_handle(&dp->desc_dma_handle);
665f8919bdaSduboff return (ENOMEM);
666f8919bdaSduboff }
667f8919bdaSduboff ASSERT(count == 1);
668f8919bdaSduboff
669f8919bdaSduboff /* set base of rx descriptor ring */
670f8919bdaSduboff dp->rx_ring = ring;
671f8919bdaSduboff dp->rx_ring_dma = ring_cookie.dmac_laddress;
672f8919bdaSduboff
673f8919bdaSduboff /* set base of tx descriptor ring */
674f8919bdaSduboff dp->tx_ring = dp->rx_ring + dp->rx_desc_size;
675f8919bdaSduboff dp->tx_ring_dma = dp->rx_ring_dma + dp->rx_desc_size;
676f8919bdaSduboff
677f8919bdaSduboff /* set base of io area */
678f8919bdaSduboff dp->io_area = dp->tx_ring + dp->tx_desc_size;
679f8919bdaSduboff dp->io_area_dma = dp->tx_ring_dma + dp->tx_desc_size;
680f8919bdaSduboff }
681f8919bdaSduboff
682f8919bdaSduboff /*
683f8919bdaSduboff * Prepare DMA resources for tx packets
684f8919bdaSduboff */
685f8919bdaSduboff ASSERT(dp->gc.gc_tx_buf_size > 0);
686f8919bdaSduboff
687f8919bdaSduboff /* Special dma attribute for tx bounce buffers */
688f8919bdaSduboff dma_attr_txbounce = dp->gc.gc_dma_attr_txbuf;
689f8919bdaSduboff dma_attr_txbounce.dma_attr_sgllen = 1;
690f8919bdaSduboff dma_attr_txbounce.dma_attr_align =
691f8919bdaSduboff max(dma_attr_txbounce.dma_attr_align, IOC_LINESIZE);
692f8919bdaSduboff
693f8919bdaSduboff /* Size for tx bounce buffers must be max tx packet size. */
694f8919bdaSduboff tx_buf_len = MAXPKTBUF(dp);
695f8919bdaSduboff tx_buf_len = ROUNDUP(tx_buf_len, IOC_LINESIZE);
696f8919bdaSduboff
697f8919bdaSduboff ASSERT(tx_buf_len >= ETHERMAX+ETHERFCSL);
698f8919bdaSduboff
699f8919bdaSduboff for (i = 0, tbp = dp->tx_buf;
700f8919bdaSduboff i < dp->gc.gc_tx_buf_size; i++, tbp++) {
701f8919bdaSduboff
702f8919bdaSduboff /* setup bounce buffers for tx packets */
703f8919bdaSduboff if ((err = ddi_dma_alloc_handle(dp->dip,
704f8919bdaSduboff &dma_attr_txbounce,
705f8919bdaSduboff DDI_DMA_SLEEP, NULL,
706f8919bdaSduboff &tbp->txb_bdh)) != DDI_SUCCESS) {
707f8919bdaSduboff
708f8919bdaSduboff cmn_err(CE_WARN,
709f8919bdaSduboff "!%s: %s ddi_dma_alloc_handle for bounce buffer failed:"
710f8919bdaSduboff " err=%d, i=%d",
711f8919bdaSduboff dp->name, __func__, err, i);
712f8919bdaSduboff goto err_alloc_dh;
713f8919bdaSduboff }
714f8919bdaSduboff
715f8919bdaSduboff if ((err = ddi_dma_mem_alloc(tbp->txb_bdh,
716f8919bdaSduboff tx_buf_len,
717f8919bdaSduboff &dp->gc.gc_buf_attr,
718f8919bdaSduboff DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL,
719f8919bdaSduboff &buf, &buf_len,
720f8919bdaSduboff &tbp->txb_bah)) != DDI_SUCCESS) {
721f8919bdaSduboff cmn_err(CE_WARN,
722f8919bdaSduboff "!%s: %s: ddi_dma_mem_alloc for bounce buffer failed"
723f8919bdaSduboff "ret %d, request size %d",
724f8919bdaSduboff dp->name, __func__, err, tx_buf_len);
725f8919bdaSduboff ddi_dma_free_handle(&tbp->txb_bdh);
726f8919bdaSduboff goto err_alloc_dh;
727f8919bdaSduboff }
728f8919bdaSduboff
729f8919bdaSduboff if ((err = ddi_dma_addr_bind_handle(tbp->txb_bdh,
730f8919bdaSduboff NULL, buf, buf_len,
731f8919bdaSduboff DDI_DMA_WRITE | DDI_DMA_STREAMING,
732f8919bdaSduboff DDI_DMA_SLEEP, NULL,
733f8919bdaSduboff &buf_cookie, &count)) != DDI_SUCCESS) {
734f8919bdaSduboff ASSERT(err != DDI_DMA_INUSE);
735f8919bdaSduboff cmn_err(CE_WARN,
736f8919bdaSduboff "!%s: %s: ddi_dma_addr_bind_handle for bounce buffer failed: %d",
737f8919bdaSduboff dp->name, __func__, err);
738f8919bdaSduboff ddi_dma_mem_free(&tbp->txb_bah);
739f8919bdaSduboff ddi_dma_free_handle(&tbp->txb_bdh);
740f8919bdaSduboff goto err_alloc_dh;
741f8919bdaSduboff }
742f8919bdaSduboff ASSERT(count == 1);
743f8919bdaSduboff tbp->txb_buf = buf;
744f8919bdaSduboff tbp->txb_buf_dma = buf_cookie.dmac_laddress;
745f8919bdaSduboff }
746f8919bdaSduboff
747f8919bdaSduboff return (0);
748f8919bdaSduboff
749f8919bdaSduboff err_alloc_dh:
750f8919bdaSduboff if (dp->gc.gc_tx_buf_size > 0) {
751f8919bdaSduboff while (i-- > 0) {
752f8919bdaSduboff (void) ddi_dma_unbind_handle(dp->tx_buf[i].txb_bdh);
753f8919bdaSduboff ddi_dma_mem_free(&dp->tx_buf[i].txb_bah);
754f8919bdaSduboff ddi_dma_free_handle(&dp->tx_buf[i].txb_bdh);
755f8919bdaSduboff }
756f8919bdaSduboff }
757f8919bdaSduboff
758f8919bdaSduboff if (dp->desc_dma_handle) {
759f8919bdaSduboff (void) ddi_dma_unbind_handle(dp->desc_dma_handle);
760f8919bdaSduboff ddi_dma_mem_free(&dp->desc_acc_handle);
761f8919bdaSduboff ddi_dma_free_handle(&dp->desc_dma_handle);
762f8919bdaSduboff dp->desc_dma_handle = NULL;
763f8919bdaSduboff }
764f8919bdaSduboff
765f8919bdaSduboff return (ENOMEM);
766f8919bdaSduboff }
767f8919bdaSduboff
768f8919bdaSduboff static void
gem_free_memory(struct gem_dev * dp)769f8919bdaSduboff gem_free_memory(struct gem_dev *dp)
770f8919bdaSduboff {
771f8919bdaSduboff int i;
772f8919bdaSduboff struct rxbuf *rbp;
773f8919bdaSduboff struct txbuf *tbp;
774f8919bdaSduboff
775f8919bdaSduboff DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
776f8919bdaSduboff
777f8919bdaSduboff /* Free TX/RX descriptors and tx padding buffer */
778f8919bdaSduboff if (dp->desc_dma_handle) {
779f8919bdaSduboff (void) ddi_dma_unbind_handle(dp->desc_dma_handle);
780f8919bdaSduboff ddi_dma_mem_free(&dp->desc_acc_handle);
781f8919bdaSduboff ddi_dma_free_handle(&dp->desc_dma_handle);
782f8919bdaSduboff dp->desc_dma_handle = NULL;
783f8919bdaSduboff }
784f8919bdaSduboff
785f8919bdaSduboff /* Free dma handles for Tx */
786f8919bdaSduboff for (i = dp->gc.gc_tx_buf_size, tbp = dp->tx_buf; i--; tbp++) {
787f8919bdaSduboff /* Free bounce buffer associated to each txbuf */
788f8919bdaSduboff (void) ddi_dma_unbind_handle(tbp->txb_bdh);
789f8919bdaSduboff ddi_dma_mem_free(&tbp->txb_bah);
790f8919bdaSduboff ddi_dma_free_handle(&tbp->txb_bdh);
791f8919bdaSduboff }
792f8919bdaSduboff
793f8919bdaSduboff /* Free rx buffer */
794f8919bdaSduboff while ((rbp = dp->rx_buf_freelist) != NULL) {
795f8919bdaSduboff
796f8919bdaSduboff ASSERT(dp->rx_buf_freecnt > 0);
797f8919bdaSduboff
798f8919bdaSduboff dp->rx_buf_freelist = rbp->rxb_next;
799f8919bdaSduboff dp->rx_buf_freecnt--;
800f8919bdaSduboff
801f8919bdaSduboff /* release DMA mapping */
802f8919bdaSduboff ASSERT(rbp->rxb_dh != NULL);
803f8919bdaSduboff
804f8919bdaSduboff /* free dma handles for rx bbuf */
805f8919bdaSduboff /* it has dma mapping always */
806f8919bdaSduboff ASSERT(rbp->rxb_nfrags > 0);
807f8919bdaSduboff (void) ddi_dma_unbind_handle(rbp->rxb_dh);
808f8919bdaSduboff
809f8919bdaSduboff /* free the associated bounce buffer and dma handle */
810f8919bdaSduboff ASSERT(rbp->rxb_bah != NULL);
811f8919bdaSduboff ddi_dma_mem_free(&rbp->rxb_bah);
812f8919bdaSduboff /* free the associated dma handle */
813f8919bdaSduboff ddi_dma_free_handle(&rbp->rxb_dh);
814f8919bdaSduboff
815f8919bdaSduboff /* free the base memory of rx buffer management */
816f8919bdaSduboff kmem_free(rbp, sizeof (struct rxbuf));
817f8919bdaSduboff }
818f8919bdaSduboff }
819f8919bdaSduboff
820f8919bdaSduboff /* ============================================================== */
821f8919bdaSduboff /*
822f8919bdaSduboff * Rx/Tx descriptor slot management
823f8919bdaSduboff */
824f8919bdaSduboff /* ============================================================== */
825f8919bdaSduboff /*
826f8919bdaSduboff * Initialize an empty rx ring.
827f8919bdaSduboff */
828f8919bdaSduboff static void
gem_init_rx_ring(struct gem_dev * dp)829f8919bdaSduboff gem_init_rx_ring(struct gem_dev *dp)
830f8919bdaSduboff {
831f8919bdaSduboff int i;
832f8919bdaSduboff int rx_ring_size = dp->gc.gc_rx_ring_size;
833f8919bdaSduboff
834f8919bdaSduboff DPRINTF(1, (CE_CONT, "!%s: %s ring_size:%d, buf_max:%d",
835f8919bdaSduboff dp->name, __func__,
836f8919bdaSduboff rx_ring_size, dp->gc.gc_rx_buf_max));
837f8919bdaSduboff
838f8919bdaSduboff /* make a physical chain of rx descriptors */
839f8919bdaSduboff for (i = 0; i < rx_ring_size; i++) {
840f8919bdaSduboff (*dp->gc.gc_rx_desc_init)(dp, i);
841f8919bdaSduboff }
842f8919bdaSduboff gem_rx_desc_dma_sync(dp, 0, rx_ring_size, DDI_DMA_SYNC_FORDEV);
843f8919bdaSduboff
844f8919bdaSduboff dp->rx_active_head = (seqnum_t)0;
845f8919bdaSduboff dp->rx_active_tail = (seqnum_t)0;
846f8919bdaSduboff
847f8919bdaSduboff ASSERT(dp->rx_buf_head == (struct rxbuf *)NULL);
848f8919bdaSduboff ASSERT(dp->rx_buf_tail == (struct rxbuf *)NULL);
849f8919bdaSduboff }
850f8919bdaSduboff
851f8919bdaSduboff /*
852f8919bdaSduboff * Prepare rx buffers and put them into the rx buffer/descriptor ring.
853f8919bdaSduboff */
854f8919bdaSduboff static void
gem_prepare_rx_buf(struct gem_dev * dp)855f8919bdaSduboff gem_prepare_rx_buf(struct gem_dev *dp)
856f8919bdaSduboff {
857f8919bdaSduboff int i;
858f8919bdaSduboff int nrbuf;
859f8919bdaSduboff struct rxbuf *rbp;
860f8919bdaSduboff
861f8919bdaSduboff ASSERT(mutex_owned(&dp->intrlock));
862f8919bdaSduboff
863f8919bdaSduboff /* Now we have no active buffers in rx ring */
864f8919bdaSduboff
865f8919bdaSduboff nrbuf = min(dp->gc.gc_rx_ring_size, dp->gc.gc_rx_buf_max);
866f8919bdaSduboff for (i = 0; i < nrbuf; i++) {
867f8919bdaSduboff if ((rbp = gem_get_rxbuf(dp, B_TRUE)) == NULL) {
868f8919bdaSduboff break;
869f8919bdaSduboff }
870f8919bdaSduboff gem_append_rxbuf(dp, rbp);
871f8919bdaSduboff }
872f8919bdaSduboff
873f8919bdaSduboff gem_rx_desc_dma_sync(dp,
874f8919bdaSduboff 0, dp->gc.gc_rx_ring_size, DDI_DMA_SYNC_FORDEV);
875f8919bdaSduboff }
876f8919bdaSduboff
877f8919bdaSduboff /*
878f8919bdaSduboff * Reclaim active rx buffers in rx buffer ring.
879f8919bdaSduboff */
880f8919bdaSduboff static void
gem_clean_rx_buf(struct gem_dev * dp)881f8919bdaSduboff gem_clean_rx_buf(struct gem_dev *dp)
882f8919bdaSduboff {
883f8919bdaSduboff int i;
884f8919bdaSduboff struct rxbuf *rbp;
885f8919bdaSduboff int rx_ring_size = dp->gc.gc_rx_ring_size;
886f8919bdaSduboff #ifdef GEM_DEBUG_LEVEL
887f8919bdaSduboff int total;
888f8919bdaSduboff #endif
889f8919bdaSduboff ASSERT(mutex_owned(&dp->intrlock));
890f8919bdaSduboff
891f8919bdaSduboff DPRINTF(2, (CE_CONT, "!%s: %s: %d buffers are free",
892f8919bdaSduboff dp->name, __func__, dp->rx_buf_freecnt));
893f8919bdaSduboff /*
894f8919bdaSduboff * clean up HW descriptors
895f8919bdaSduboff */
896f8919bdaSduboff for (i = 0; i < rx_ring_size; i++) {
897f8919bdaSduboff (*dp->gc.gc_rx_desc_clean)(dp, i);
898f8919bdaSduboff }
899f8919bdaSduboff gem_rx_desc_dma_sync(dp, 0, rx_ring_size, DDI_DMA_SYNC_FORDEV);
900f8919bdaSduboff
901f8919bdaSduboff #ifdef GEM_DEBUG_LEVEL
902f8919bdaSduboff total = 0;
903f8919bdaSduboff #endif
904f8919bdaSduboff /*
905f8919bdaSduboff * Reclaim allocated rx buffers
906f8919bdaSduboff */
907f8919bdaSduboff while ((rbp = dp->rx_buf_head) != NULL) {
908f8919bdaSduboff #ifdef GEM_DEBUG_LEVEL
909f8919bdaSduboff total++;
910f8919bdaSduboff #endif
911f8919bdaSduboff /* remove the first one from rx buffer list */
912f8919bdaSduboff dp->rx_buf_head = rbp->rxb_next;
913f8919bdaSduboff
914f8919bdaSduboff /* recycle the rxbuf */
915f8919bdaSduboff gem_free_rxbuf(rbp);
916f8919bdaSduboff }
917f8919bdaSduboff dp->rx_buf_tail = (struct rxbuf *)NULL;
918f8919bdaSduboff
919f8919bdaSduboff DPRINTF(2, (CE_CONT,
920f8919bdaSduboff "!%s: %s: %d buffers freeed, total: %d free",
921f8919bdaSduboff dp->name, __func__, total, dp->rx_buf_freecnt));
922f8919bdaSduboff }
923f8919bdaSduboff
924f8919bdaSduboff /*
925f8919bdaSduboff * Initialize an empty transmit buffer/descriptor ring
926f8919bdaSduboff */
927f8919bdaSduboff static void
gem_init_tx_ring(struct gem_dev * dp)928f8919bdaSduboff gem_init_tx_ring(struct gem_dev *dp)
929f8919bdaSduboff {
930f8919bdaSduboff int i;
931f8919bdaSduboff int tx_buf_size = dp->gc.gc_tx_buf_size;
932f8919bdaSduboff int tx_ring_size = dp->gc.gc_tx_ring_size;
933f8919bdaSduboff
934f8919bdaSduboff DPRINTF(2, (CE_CONT, "!%s: %s: ring_size:%d, buf_size:%d",
935f8919bdaSduboff dp->name, __func__,
936f8919bdaSduboff dp->gc.gc_tx_ring_size, dp->gc.gc_tx_buf_size));
937f8919bdaSduboff
938f8919bdaSduboff ASSERT(!dp->mac_active);
939f8919bdaSduboff
940f8919bdaSduboff /* initialize active list and free list */
941f8919bdaSduboff dp->tx_slots_base =
942f8919bdaSduboff SLOT(dp->tx_slots_base + dp->tx_softq_head, tx_buf_size);
943f8919bdaSduboff dp->tx_softq_tail -= dp->tx_softq_head;
944f8919bdaSduboff dp->tx_softq_head = (seqnum_t)0;
945f8919bdaSduboff
946f8919bdaSduboff dp->tx_active_head = dp->tx_softq_head;
947f8919bdaSduboff dp->tx_active_tail = dp->tx_softq_head;
948f8919bdaSduboff
949f8919bdaSduboff dp->tx_free_head = dp->tx_softq_tail;
950f8919bdaSduboff dp->tx_free_tail = dp->gc.gc_tx_buf_limit;
951f8919bdaSduboff
952f8919bdaSduboff dp->tx_desc_head = (seqnum_t)0;
953f8919bdaSduboff dp->tx_desc_tail = (seqnum_t)0;
954f8919bdaSduboff dp->tx_desc_intr = (seqnum_t)0;
955f8919bdaSduboff
956f8919bdaSduboff for (i = 0; i < tx_ring_size; i++) {
957f8919bdaSduboff (*dp->gc.gc_tx_desc_init)(dp, i);
958f8919bdaSduboff }
959f8919bdaSduboff gem_tx_desc_dma_sync(dp, 0, tx_ring_size, DDI_DMA_SYNC_FORDEV);
960f8919bdaSduboff }
961f8919bdaSduboff
962f8919bdaSduboff __INLINE__
963f8919bdaSduboff static void
gem_txbuf_free_dma_resources(struct txbuf * tbp)964f8919bdaSduboff gem_txbuf_free_dma_resources(struct txbuf *tbp)
965f8919bdaSduboff {
966f8919bdaSduboff if (tbp->txb_mp) {
967f8919bdaSduboff freemsg(tbp->txb_mp);
968f8919bdaSduboff tbp->txb_mp = NULL;
969f8919bdaSduboff }
970f8919bdaSduboff tbp->txb_nfrags = 0;
97123d366e3Sduboff tbp->txb_flag = 0;
972f8919bdaSduboff }
973f8919bdaSduboff #pragma inline(gem_txbuf_free_dma_resources)
974f8919bdaSduboff
975f8919bdaSduboff /*
976f8919bdaSduboff * reclaim active tx buffers and reset positions in tx rings.
977f8919bdaSduboff */
978f8919bdaSduboff static void
gem_clean_tx_buf(struct gem_dev * dp)979f8919bdaSduboff gem_clean_tx_buf(struct gem_dev *dp)
980f8919bdaSduboff {
981f8919bdaSduboff int i;
982f8919bdaSduboff seqnum_t head;
983f8919bdaSduboff seqnum_t tail;
984f8919bdaSduboff seqnum_t sn;
985f8919bdaSduboff struct txbuf *tbp;
986f8919bdaSduboff int tx_ring_size = dp->gc.gc_tx_ring_size;
987f8919bdaSduboff #ifdef GEM_DEBUG_LEVEL
988f8919bdaSduboff int err;
989f8919bdaSduboff #endif
990f8919bdaSduboff
991f8919bdaSduboff ASSERT(!dp->mac_active);
992f8919bdaSduboff ASSERT(dp->tx_busy == 0);
993f8919bdaSduboff ASSERT(dp->tx_softq_tail == dp->tx_free_head);
994f8919bdaSduboff
995f8919bdaSduboff /*
996f8919bdaSduboff * clean up all HW descriptors
997f8919bdaSduboff */
998f8919bdaSduboff for (i = 0; i < tx_ring_size; i++) {
999f8919bdaSduboff (*dp->gc.gc_tx_desc_clean)(dp, i);
1000f8919bdaSduboff }
1001f8919bdaSduboff gem_tx_desc_dma_sync(dp, 0, tx_ring_size, DDI_DMA_SYNC_FORDEV);
1002f8919bdaSduboff
1003f8919bdaSduboff /* dequeue all active and loaded buffers */
1004f8919bdaSduboff head = dp->tx_active_head;
1005f8919bdaSduboff tail = dp->tx_softq_tail;
1006f8919bdaSduboff
1007f8919bdaSduboff ASSERT(dp->tx_free_head - head >= 0);
1008f8919bdaSduboff tbp = GET_TXBUF(dp, head);
1009f8919bdaSduboff for (sn = head; sn != tail; sn++) {
1010f8919bdaSduboff gem_txbuf_free_dma_resources(tbp);
1011f8919bdaSduboff ASSERT(tbp->txb_mp == NULL);
1012f8919bdaSduboff dp->stats.errxmt++;
1013f8919bdaSduboff tbp = tbp->txb_next;
1014f8919bdaSduboff }
1015f8919bdaSduboff
1016f8919bdaSduboff #ifdef GEM_DEBUG_LEVEL
1017f8919bdaSduboff /* ensure no dma resources for tx are not in use now */
1018f8919bdaSduboff err = 0;
1019f8919bdaSduboff while (sn != head + dp->gc.gc_tx_buf_size) {
1020f8919bdaSduboff if (tbp->txb_mp || tbp->txb_nfrags) {
1021f8919bdaSduboff DPRINTF(0, (CE_CONT,
1022f8919bdaSduboff "%s: %s: sn:%d[%d] mp:%p nfrags:%d",
1023f8919bdaSduboff dp->name, __func__,
1024f8919bdaSduboff sn, SLOT(sn, dp->gc.gc_tx_buf_size),
1025f8919bdaSduboff tbp->txb_mp, tbp->txb_nfrags));
1026f8919bdaSduboff err = 1;
1027f8919bdaSduboff }
1028f8919bdaSduboff sn++;
1029f8919bdaSduboff tbp = tbp->txb_next;
1030f8919bdaSduboff }
1031f8919bdaSduboff
1032f8919bdaSduboff if (err) {
1033f8919bdaSduboff gem_dump_txbuf(dp, CE_WARN,
1034f8919bdaSduboff "gem_clean_tx_buf: tbp->txb_mp != NULL");
1035f8919bdaSduboff }
1036f8919bdaSduboff #endif
1037f8919bdaSduboff /* recycle buffers, now no active tx buffers in the ring */
1038f8919bdaSduboff dp->tx_free_tail += tail - head;
1039f8919bdaSduboff ASSERT(dp->tx_free_tail == dp->tx_free_head + dp->gc.gc_tx_buf_limit);
1040f8919bdaSduboff
1041f8919bdaSduboff /* fix positions in tx buffer rings */
1042f8919bdaSduboff dp->tx_active_head = dp->tx_free_head;
1043f8919bdaSduboff dp->tx_active_tail = dp->tx_free_head;
1044f8919bdaSduboff dp->tx_softq_head = dp->tx_free_head;
1045f8919bdaSduboff dp->tx_softq_tail = dp->tx_free_head;
1046f8919bdaSduboff }
1047f8919bdaSduboff
1048f8919bdaSduboff /*
1049f8919bdaSduboff * Reclaim transmitted buffers from tx buffer/descriptor ring.
1050f8919bdaSduboff */
1051f8919bdaSduboff __INLINE__ int
gem_reclaim_txbuf(struct gem_dev * dp)1052f8919bdaSduboff gem_reclaim_txbuf(struct gem_dev *dp)
1053f8919bdaSduboff {
1054f8919bdaSduboff struct txbuf *tbp;
1055f8919bdaSduboff uint_t txstat;
1056f8919bdaSduboff int err = GEM_SUCCESS;
1057f8919bdaSduboff seqnum_t head;
1058f8919bdaSduboff seqnum_t tail;
1059f8919bdaSduboff seqnum_t sn;
1060f8919bdaSduboff seqnum_t desc_head;
1061f8919bdaSduboff int tx_ring_size = dp->gc.gc_tx_ring_size;
1062f8919bdaSduboff uint_t (*tx_desc_stat)(struct gem_dev *dp,
1063f8919bdaSduboff int slot, int ndesc) = dp->gc.gc_tx_desc_stat;
106423d366e3Sduboff clock_t now;
106523d366e3Sduboff
106623d366e3Sduboff now = ddi_get_lbolt();
106723d366e3Sduboff if (now == (clock_t)0) {
106823d366e3Sduboff /* make non-zero timestamp */
106923d366e3Sduboff now--;
107023d366e3Sduboff }
1071f8919bdaSduboff
1072f8919bdaSduboff mutex_enter(&dp->xmitlock);
1073f8919bdaSduboff
1074f8919bdaSduboff head = dp->tx_active_head;
1075f8919bdaSduboff tail = dp->tx_active_tail;
1076f8919bdaSduboff
1077f8919bdaSduboff #if GEM_DEBUG_LEVEL > 2
1078f8919bdaSduboff if (head != tail) {
1079f8919bdaSduboff cmn_err(CE_CONT, "!%s: %s: "
1080f8919bdaSduboff "testing active_head:%d[%d], active_tail:%d[%d]",
1081f8919bdaSduboff dp->name, __func__,
1082f8919bdaSduboff head, SLOT(head, dp->gc.gc_tx_buf_size),
1083f8919bdaSduboff tail, SLOT(tail, dp->gc.gc_tx_buf_size));
1084f8919bdaSduboff }
1085f8919bdaSduboff #endif
1086f8919bdaSduboff #ifdef DEBUG
1087f8919bdaSduboff if (dp->tx_reclaim_busy == 0) {
1088f8919bdaSduboff /* check tx buffer management consistency */
1089f8919bdaSduboff ASSERT(dp->tx_free_tail - dp->tx_active_head
1090f8919bdaSduboff == dp->gc.gc_tx_buf_limit);
1091f8919bdaSduboff /* EMPTY */
1092f8919bdaSduboff }
1093f8919bdaSduboff #endif
1094f8919bdaSduboff dp->tx_reclaim_busy++;
1095f8919bdaSduboff
1096f8919bdaSduboff /* sync all active HW descriptors */
1097f8919bdaSduboff gem_tx_desc_dma_sync(dp,
1098f8919bdaSduboff SLOT(dp->tx_desc_head, tx_ring_size),
1099f8919bdaSduboff dp->tx_desc_tail - dp->tx_desc_head,
1100f8919bdaSduboff DDI_DMA_SYNC_FORKERNEL);
1101f8919bdaSduboff
1102f8919bdaSduboff tbp = GET_TXBUF(dp, head);
1103f8919bdaSduboff desc_head = dp->tx_desc_head;
1104f8919bdaSduboff for (sn = head; sn != tail;
1105f8919bdaSduboff dp->tx_active_head = (++sn), tbp = tbp->txb_next) {
1106f8919bdaSduboff int ndescs;
1107f8919bdaSduboff
1108f8919bdaSduboff ASSERT(tbp->txb_desc == desc_head);
1109f8919bdaSduboff
1110f8919bdaSduboff ndescs = tbp->txb_ndescs;
111123d366e3Sduboff if (ndescs == 0) {
111223d366e3Sduboff /* skip errored descriptors */
111323d366e3Sduboff continue;
111423d366e3Sduboff }
1115f8919bdaSduboff txstat = (*tx_desc_stat)(dp,
1116f8919bdaSduboff SLOT(tbp->txb_desc, tx_ring_size), ndescs);
1117f8919bdaSduboff
1118f8919bdaSduboff if (txstat == 0) {
1119f8919bdaSduboff /* not transmitted yet */
1120f8919bdaSduboff break;
1121f8919bdaSduboff }
1122f8919bdaSduboff
112323d366e3Sduboff if (!dp->tx_blocked && (tbp->txb_flag & GEM_TXFLAG_INTR)) {
112423d366e3Sduboff dp->tx_blocked = now;
112523d366e3Sduboff }
112623d366e3Sduboff
1127f8919bdaSduboff ASSERT(txstat & (GEM_TX_DONE | GEM_TX_ERR));
1128f8919bdaSduboff
1129f8919bdaSduboff if (txstat & GEM_TX_ERR) {
1130f8919bdaSduboff err = GEM_FAILURE;
1131f8919bdaSduboff cmn_err(CE_WARN, "!%s: tx error at desc %d[%d]",
1132f8919bdaSduboff dp->name, sn, SLOT(sn, tx_ring_size));
1133f8919bdaSduboff }
1134f8919bdaSduboff #if GEM_DEBUG_LEVEL > 4
1135f8919bdaSduboff if (now - tbp->txb_stime >= 50) {
1136f8919bdaSduboff cmn_err(CE_WARN, "!%s: tx delay while %d mS",
1137f8919bdaSduboff dp->name, (now - tbp->txb_stime)*10);
1138f8919bdaSduboff }
1139f8919bdaSduboff #endif
1140f8919bdaSduboff /* free transmitted descriptors */
1141f8919bdaSduboff desc_head += ndescs;
1142f8919bdaSduboff }
1143f8919bdaSduboff
1144f8919bdaSduboff if (dp->tx_desc_head != desc_head) {
1145f8919bdaSduboff /* we have reclaimed one or more tx buffers */
1146f8919bdaSduboff dp->tx_desc_head = desc_head;
1147f8919bdaSduboff
1148f8919bdaSduboff /* If we passed the next interrupt position, update it */
114923d366e3Sduboff if (desc_head - dp->tx_desc_intr > 0) {
1150f8919bdaSduboff dp->tx_desc_intr = desc_head;
1151f8919bdaSduboff }
1152f8919bdaSduboff }
1153f8919bdaSduboff mutex_exit(&dp->xmitlock);
1154f8919bdaSduboff
1155f8919bdaSduboff /* free dma mapping resources associated with transmitted tx buffers */
1156f8919bdaSduboff tbp = GET_TXBUF(dp, head);
1157f8919bdaSduboff tail = sn;
1158f8919bdaSduboff #if GEM_DEBUG_LEVEL > 2
1159f8919bdaSduboff if (head != tail) {
1160f8919bdaSduboff cmn_err(CE_CONT, "%s: freeing head:%d[%d], tail:%d[%d]",
1161f8919bdaSduboff __func__,
1162f8919bdaSduboff head, SLOT(head, dp->gc.gc_tx_buf_size),
1163f8919bdaSduboff tail, SLOT(tail, dp->gc.gc_tx_buf_size));
1164f8919bdaSduboff }
1165f8919bdaSduboff #endif
1166f8919bdaSduboff for (sn = head; sn != tail; sn++, tbp = tbp->txb_next) {
1167f8919bdaSduboff gem_txbuf_free_dma_resources(tbp);
1168f8919bdaSduboff }
1169f8919bdaSduboff
1170f8919bdaSduboff /* recycle the tx buffers */
1171f8919bdaSduboff mutex_enter(&dp->xmitlock);
1172f8919bdaSduboff if (--dp->tx_reclaim_busy == 0) {
1173f8919bdaSduboff /* we are the last thread who can update free tail */
1174f8919bdaSduboff #if GEM_DEBUG_LEVEL > 4
1175f8919bdaSduboff /* check all resouces have been deallocated */
1176f8919bdaSduboff sn = dp->tx_free_tail;
1177f8919bdaSduboff tbp = GET_TXBUF(dp, new_tail);
1178f8919bdaSduboff while (sn != dp->tx_active_head + dp->gc.gc_tx_buf_limit) {
1179f8919bdaSduboff if (tbp->txb_nfrags) {
1180f8919bdaSduboff /* in use */
1181f8919bdaSduboff break;
1182f8919bdaSduboff }
1183f8919bdaSduboff ASSERT(tbp->txb_mp == NULL);
1184f8919bdaSduboff tbp = tbp->txb_next;
1185f8919bdaSduboff sn++;
1186f8919bdaSduboff }
1187f8919bdaSduboff ASSERT(dp->tx_active_head + dp->gc.gc_tx_buf_limit == sn);
1188f8919bdaSduboff #endif
1189f8919bdaSduboff dp->tx_free_tail =
1190f8919bdaSduboff dp->tx_active_head + dp->gc.gc_tx_buf_limit;
1191f8919bdaSduboff }
1192f8919bdaSduboff if (!dp->mac_active) {
1193f8919bdaSduboff /* someone may be waiting for me. */
1194f8919bdaSduboff cv_broadcast(&dp->tx_drain_cv);
1195f8919bdaSduboff }
1196f8919bdaSduboff #if GEM_DEBUG_LEVEL > 2
1197f8919bdaSduboff cmn_err(CE_CONT, "!%s: %s: called, "
1198f8919bdaSduboff "free_head:%d free_tail:%d(+%d) added:%d",
1199f8919bdaSduboff dp->name, __func__,
1200f8919bdaSduboff dp->tx_free_head, dp->tx_free_tail,
1201f8919bdaSduboff dp->tx_free_tail - dp->tx_free_head, tail - head);
1202f8919bdaSduboff #endif
1203f8919bdaSduboff mutex_exit(&dp->xmitlock);
1204f8919bdaSduboff
1205f8919bdaSduboff return (err);
1206f8919bdaSduboff }
1207f8919bdaSduboff #pragma inline(gem_reclaim_txbuf)
1208f8919bdaSduboff
1209f8919bdaSduboff
1210f8919bdaSduboff /*
1211f8919bdaSduboff * Make tx descriptors in out-of-order manner
1212f8919bdaSduboff */
1213f8919bdaSduboff static void
gem_tx_load_descs_oo(struct gem_dev * dp,seqnum_t start_slot,seqnum_t end_slot,uint64_t flags)1214f8919bdaSduboff gem_tx_load_descs_oo(struct gem_dev *dp,
121523d366e3Sduboff seqnum_t start_slot, seqnum_t end_slot, uint64_t flags)
1216f8919bdaSduboff {
1217f8919bdaSduboff seqnum_t sn;
1218f8919bdaSduboff struct txbuf *tbp;
1219f8919bdaSduboff int tx_ring_size = dp->gc.gc_tx_ring_size;
1220f8919bdaSduboff int (*tx_desc_write)
1221f8919bdaSduboff (struct gem_dev *dp, int slot,
1222f8919bdaSduboff ddi_dma_cookie_t *dmacookie,
1223f8919bdaSduboff int frags, uint64_t flag) = dp->gc.gc_tx_desc_write;
1224f8919bdaSduboff clock_t now = ddi_get_lbolt();
1225f8919bdaSduboff
1226f8919bdaSduboff sn = start_slot;
1227f8919bdaSduboff tbp = GET_TXBUF(dp, sn);
1228f8919bdaSduboff do {
1229f8919bdaSduboff #if GEM_DEBUG_LEVEL > 1
1230f8919bdaSduboff if (dp->tx_cnt < 100) {
1231f8919bdaSduboff dp->tx_cnt++;
1232f8919bdaSduboff flags |= GEM_TXFLAG_INTR;
1233f8919bdaSduboff }
1234f8919bdaSduboff #endif
1235f8919bdaSduboff /* write a tx descriptor */
1236f8919bdaSduboff tbp->txb_desc = sn;
1237f8919bdaSduboff tbp->txb_ndescs = (*tx_desc_write)(dp,
1238f8919bdaSduboff SLOT(sn, tx_ring_size),
1239f8919bdaSduboff tbp->txb_dmacookie,
1240f8919bdaSduboff tbp->txb_nfrags, flags | tbp->txb_flag);
1241f8919bdaSduboff tbp->txb_stime = now;
1242f8919bdaSduboff ASSERT(tbp->txb_ndescs == 1);
1243f8919bdaSduboff
1244f8919bdaSduboff flags = 0;
1245f8919bdaSduboff sn++;
1246f8919bdaSduboff tbp = tbp->txb_next;
1247f8919bdaSduboff } while (sn != end_slot);
1248f8919bdaSduboff }
1249f8919bdaSduboff
1250f8919bdaSduboff __INLINE__
125123d366e3Sduboff static size_t
gem_setup_txbuf_copy(struct gem_dev * dp,mblk_t * mp,struct txbuf * tbp)1252f8919bdaSduboff gem_setup_txbuf_copy(struct gem_dev *dp, mblk_t *mp, struct txbuf *tbp)
1253f8919bdaSduboff {
1254f8919bdaSduboff size_t min_pkt;
1255f8919bdaSduboff caddr_t bp;
1256f8919bdaSduboff size_t off;
1257f8919bdaSduboff mblk_t *tp;
1258f8919bdaSduboff size_t len;
1259f8919bdaSduboff uint64_t flag;
1260f8919bdaSduboff
1261f8919bdaSduboff ASSERT(tbp->txb_mp == NULL);
1262f8919bdaSduboff
1263f8919bdaSduboff /* we use bounce buffer for the packet */
1264f8919bdaSduboff min_pkt = ETHERMIN;
1265f8919bdaSduboff bp = tbp->txb_buf;
1266f8919bdaSduboff off = 0;
1267f8919bdaSduboff tp = mp;
1268f8919bdaSduboff
1269f8919bdaSduboff flag = tbp->txb_flag;
1270f8919bdaSduboff if (flag & GEM_TXFLAG_SWVTAG) {
1271f8919bdaSduboff /* need to increase min packet size */
1272f8919bdaSduboff min_pkt += VTAG_SIZE;
1273f8919bdaSduboff ASSERT((flag & GEM_TXFLAG_VTAG) == 0);
1274f8919bdaSduboff }
1275f8919bdaSduboff
1276f8919bdaSduboff /* copy the rest */
1277f8919bdaSduboff for (; tp; tp = tp->b_cont) {
1278f8919bdaSduboff if ((len = (long)tp->b_wptr - (long)tp->b_rptr) > 0) {
1279f8919bdaSduboff bcopy(tp->b_rptr, &bp[off], len);
1280f8919bdaSduboff off += len;
1281f8919bdaSduboff }
1282f8919bdaSduboff }
1283f8919bdaSduboff
1284f8919bdaSduboff if (off < min_pkt &&
1285f8919bdaSduboff (min_pkt > ETHERMIN || !dp->gc.gc_tx_auto_pad)) {
1286f8919bdaSduboff /*
128723d366e3Sduboff * Extend the packet to minimum packet size explicitly.
1288f8919bdaSduboff * For software vlan packets, we shouldn't use tx autopad
128923d366e3Sduboff * function because nics may not be aware of vlan.
1290f8919bdaSduboff * we must keep 46 octet of payload even if we use vlan.
1291f8919bdaSduboff */
1292f8919bdaSduboff bzero(&bp[off], min_pkt - off);
1293f8919bdaSduboff off = min_pkt;
1294f8919bdaSduboff }
1295f8919bdaSduboff
1296f8919bdaSduboff (void) ddi_dma_sync(tbp->txb_bdh, (off_t)0, off, DDI_DMA_SYNC_FORDEV);
1297f8919bdaSduboff
1298f8919bdaSduboff tbp->txb_dmacookie[0].dmac_laddress = tbp->txb_buf_dma;
1299f8919bdaSduboff tbp->txb_dmacookie[0].dmac_size = off;
1300f8919bdaSduboff
1301f8919bdaSduboff DPRINTF(2, (CE_CONT,
1302f8919bdaSduboff "!%s: %s: copy: addr:0x%llx len:0x%x, vtag:0x%04x, min_pkt:%d",
1303f8919bdaSduboff dp->name, __func__,
1304f8919bdaSduboff tbp->txb_dmacookie[0].dmac_laddress,
1305f8919bdaSduboff tbp->txb_dmacookie[0].dmac_size,
1306f8919bdaSduboff (flag & GEM_TXFLAG_VTAG) >> GEM_TXFLAG_VTAG_SHIFT,
1307f8919bdaSduboff min_pkt));
1308f8919bdaSduboff
1309f8919bdaSduboff /* save misc info */
1310f8919bdaSduboff tbp->txb_mp = mp;
1311f8919bdaSduboff tbp->txb_nfrags = 1;
1312f8919bdaSduboff #ifdef DEBUG_MULTIFRAGS
1313f8919bdaSduboff if (dp->gc.gc_tx_max_frags >= 3 &&
1314f8919bdaSduboff tbp->txb_dmacookie[0].dmac_size > 16*3) {
1315f8919bdaSduboff tbp->txb_dmacookie[1].dmac_laddress =
1316f8919bdaSduboff tbp->txb_dmacookie[0].dmac_laddress + 16;
1317f8919bdaSduboff tbp->txb_dmacookie[2].dmac_laddress =
1318f8919bdaSduboff tbp->txb_dmacookie[1].dmac_laddress + 16;
1319f8919bdaSduboff
1320f8919bdaSduboff tbp->txb_dmacookie[2].dmac_size =
1321f8919bdaSduboff tbp->txb_dmacookie[0].dmac_size - 16*2;
1322f8919bdaSduboff tbp->txb_dmacookie[1].dmac_size = 16;
1323f8919bdaSduboff tbp->txb_dmacookie[0].dmac_size = 16;
1324f8919bdaSduboff tbp->txb_nfrags = 3;
1325f8919bdaSduboff }
1326f8919bdaSduboff #endif
132723d366e3Sduboff return (off);
1328f8919bdaSduboff }
1329f8919bdaSduboff #pragma inline(gem_setup_txbuf_copy)
1330f8919bdaSduboff
1331f8919bdaSduboff __INLINE__
1332f8919bdaSduboff static void
gem_tx_start_unit(struct gem_dev * dp)1333f8919bdaSduboff gem_tx_start_unit(struct gem_dev *dp)
1334f8919bdaSduboff {
1335f8919bdaSduboff seqnum_t head;
1336f8919bdaSduboff seqnum_t tail;
1337f8919bdaSduboff struct txbuf *tbp_head;
1338f8919bdaSduboff struct txbuf *tbp_tail;
1339f8919bdaSduboff
1340f8919bdaSduboff /* update HW descriptors from soft queue */
1341f8919bdaSduboff ASSERT(mutex_owned(&dp->xmitlock));
1342f8919bdaSduboff ASSERT(dp->tx_softq_head == dp->tx_active_tail);
1343f8919bdaSduboff
1344f8919bdaSduboff head = dp->tx_softq_head;
1345f8919bdaSduboff tail = dp->tx_softq_tail;
1346f8919bdaSduboff
1347f8919bdaSduboff DPRINTF(1, (CE_CONT,
1348f8919bdaSduboff "%s: %s: called, softq %d %d[+%d], desc %d %d[+%d]",
1349f8919bdaSduboff dp->name, __func__, head, tail, tail - head,
1350f8919bdaSduboff dp->tx_desc_head, dp->tx_desc_tail,
1351f8919bdaSduboff dp->tx_desc_tail - dp->tx_desc_head));
1352f8919bdaSduboff
1353f8919bdaSduboff ASSERT(tail - head > 0);
1354f8919bdaSduboff
1355f8919bdaSduboff dp->tx_desc_tail = tail;
1356f8919bdaSduboff
1357f8919bdaSduboff tbp_head = GET_TXBUF(dp, head);
1358f8919bdaSduboff tbp_tail = GET_TXBUF(dp, tail - 1);
1359f8919bdaSduboff
1360f8919bdaSduboff ASSERT(tbp_tail->txb_desc + tbp_tail->txb_ndescs == dp->tx_desc_tail);
1361f8919bdaSduboff
1362f8919bdaSduboff dp->gc.gc_tx_start(dp,
1363f8919bdaSduboff SLOT(tbp_head->txb_desc, dp->gc.gc_tx_ring_size),
1364f8919bdaSduboff tbp_tail->txb_desc + tbp_tail->txb_ndescs - tbp_head->txb_desc);
1365f8919bdaSduboff
1366f8919bdaSduboff /* advance softq head and active tail */
1367f8919bdaSduboff dp->tx_softq_head = dp->tx_active_tail = tail;
1368f8919bdaSduboff }
1369f8919bdaSduboff #pragma inline(gem_tx_start_unit)
1370f8919bdaSduboff
1371f8919bdaSduboff #ifdef GEM_DEBUG_LEVEL
1372f8919bdaSduboff static int gem_send_cnt[10];
1373f8919bdaSduboff #endif
137423d366e3Sduboff #define PKT_MIN_SIZE (sizeof (struct ether_header) + 10 + VTAG_SIZE)
137523d366e3Sduboff #define EHLEN (sizeof (struct ether_header))
137623d366e3Sduboff /*
137723d366e3Sduboff * check ether packet type and ip protocol
137823d366e3Sduboff */
137923d366e3Sduboff static uint64_t
gem_txbuf_options(struct gem_dev * dp,mblk_t * mp,uint8_t * bp)138023d366e3Sduboff gem_txbuf_options(struct gem_dev *dp, mblk_t *mp, uint8_t *bp)
138123d366e3Sduboff {
138223d366e3Sduboff mblk_t *tp;
138323d366e3Sduboff ssize_t len;
138423d366e3Sduboff uint_t vtag;
138523d366e3Sduboff int off;
138623d366e3Sduboff uint64_t flag;
1387f8919bdaSduboff
138823d366e3Sduboff flag = 0ULL;
138923d366e3Sduboff
139023d366e3Sduboff /*
139123d366e3Sduboff * prepare continuous header of the packet for protocol analysis
139223d366e3Sduboff */
139323d366e3Sduboff if ((long)mp->b_wptr - (long)mp->b_rptr < PKT_MIN_SIZE) {
139423d366e3Sduboff /* we use work buffer to copy mblk */
139523d366e3Sduboff for (tp = mp, off = 0;
139623d366e3Sduboff tp && (off < PKT_MIN_SIZE);
139723d366e3Sduboff tp = tp->b_cont, off += len) {
139823d366e3Sduboff len = (long)tp->b_wptr - (long)tp->b_rptr;
139923d366e3Sduboff len = min(len, PKT_MIN_SIZE - off);
140023d366e3Sduboff bcopy(tp->b_rptr, &bp[off], len);
140123d366e3Sduboff }
140223d366e3Sduboff } else {
140323d366e3Sduboff /* we can use mblk without copy */
140423d366e3Sduboff bp = mp->b_rptr;
140523d366e3Sduboff }
140623d366e3Sduboff
140723d366e3Sduboff /* process vlan tag for GLD v3 */
140823d366e3Sduboff if (GET_NET16(&bp[VTAG_OFF]) == VTAG_TPID) {
140923d366e3Sduboff if (dp->misc_flag & GEM_VLAN_HARD) {
141023d366e3Sduboff vtag = GET_NET16(&bp[VTAG_OFF + 2]);
141123d366e3Sduboff ASSERT(vtag);
141223d366e3Sduboff flag |= vtag << GEM_TXFLAG_VTAG_SHIFT;
141323d366e3Sduboff } else {
141423d366e3Sduboff flag |= GEM_TXFLAG_SWVTAG;
141523d366e3Sduboff }
141623d366e3Sduboff }
141723d366e3Sduboff return (flag);
141823d366e3Sduboff }
141923d366e3Sduboff #undef EHLEN
142023d366e3Sduboff #undef PKT_MIN_SIZE
1421f8919bdaSduboff /*
1422f8919bdaSduboff * gem_send_common is an exported function because hw depend routines may
1423f8919bdaSduboff * use it for sending control frames like setup frames for 2114x chipset.
1424f8919bdaSduboff */
1425f8919bdaSduboff mblk_t *
gem_send_common(struct gem_dev * dp,mblk_t * mp_head,uint32_t flags)1426f8919bdaSduboff gem_send_common(struct gem_dev *dp, mblk_t *mp_head, uint32_t flags)
1427f8919bdaSduboff {
1428f8919bdaSduboff int nmblk;
1429f8919bdaSduboff int avail;
1430f8919bdaSduboff mblk_t *tp;
1431f8919bdaSduboff mblk_t *mp;
143223d366e3Sduboff int i;
1433f8919bdaSduboff struct txbuf *tbp;
1434f8919bdaSduboff seqnum_t head;
1435f8919bdaSduboff uint64_t load_flags;
1436f8919bdaSduboff uint64_t len_total = 0;
143723d366e3Sduboff uint32_t bcast = 0;
143823d366e3Sduboff uint32_t mcast = 0;
1439f8919bdaSduboff
1440f8919bdaSduboff ASSERT(mp_head != NULL);
1441f8919bdaSduboff
1442f8919bdaSduboff mp = mp_head;
1443f8919bdaSduboff nmblk = 1;
1444f8919bdaSduboff while ((mp = mp->b_next) != NULL) {
1445f8919bdaSduboff nmblk++;
1446f8919bdaSduboff }
1447f8919bdaSduboff #ifdef GEM_DEBUG_LEVEL
1448f8919bdaSduboff gem_send_cnt[0]++;
1449f8919bdaSduboff gem_send_cnt[min(nmblk, 9)]++;
1450f8919bdaSduboff #endif
1451f8919bdaSduboff /*
1452f8919bdaSduboff * Aquire resources
1453f8919bdaSduboff */
1454f8919bdaSduboff mutex_enter(&dp->xmitlock);
1455f8919bdaSduboff if (dp->mac_suspended) {
1456f8919bdaSduboff mutex_exit(&dp->xmitlock);
1457f8919bdaSduboff mp = mp_head;
1458f8919bdaSduboff while (mp) {
1459f8919bdaSduboff tp = mp->b_next;
1460f8919bdaSduboff freemsg(mp);
1461f8919bdaSduboff mp = tp;
1462f8919bdaSduboff }
1463f8919bdaSduboff return (NULL);
1464f8919bdaSduboff }
1465f8919bdaSduboff
1466f8919bdaSduboff if (!dp->mac_active && (flags & GEM_SEND_CTRL) == 0) {
1467f8919bdaSduboff /* don't send data packets while mac isn't active */
146823d366e3Sduboff /* XXX - should we discard packets? */
1469f8919bdaSduboff mutex_exit(&dp->xmitlock);
1470f8919bdaSduboff return (mp_head);
1471f8919bdaSduboff }
1472f8919bdaSduboff
1473f8919bdaSduboff /* allocate free slots */
1474f8919bdaSduboff head = dp->tx_free_head;
1475f8919bdaSduboff avail = dp->tx_free_tail - head;
1476f8919bdaSduboff
1477f8919bdaSduboff DPRINTF(2, (CE_CONT,
1478f8919bdaSduboff "!%s: %s: called, free_head:%d free_tail:%d(+%d) req:%d",
1479f8919bdaSduboff dp->name, __func__,
1480f8919bdaSduboff dp->tx_free_head, dp->tx_free_tail, avail, nmblk));
1481f8919bdaSduboff
148223d366e3Sduboff avail = min(avail, dp->tx_max_packets);
1483f8919bdaSduboff
1484f8919bdaSduboff if (nmblk > avail) {
1485f8919bdaSduboff if (avail == 0) {
1486f8919bdaSduboff /* no resources; short cut */
1487f8919bdaSduboff DPRINTF(2, (CE_CONT, "!%s: no resources", __func__));
148823d366e3Sduboff dp->tx_max_packets = max(dp->tx_max_packets - 1, 1);
1489f8919bdaSduboff goto done;
1490f8919bdaSduboff }
1491f8919bdaSduboff nmblk = avail;
1492f8919bdaSduboff }
1493f8919bdaSduboff
1494f8919bdaSduboff dp->tx_free_head = head + nmblk;
1495f8919bdaSduboff load_flags = ((dp->tx_busy++) == 0) ? GEM_TXFLAG_HEAD : 0;
1496f8919bdaSduboff
149723d366e3Sduboff /* update last interrupt position if tx buffers exhaust. */
149823d366e3Sduboff if (nmblk == avail) {
149923d366e3Sduboff tbp = GET_TXBUF(dp, head + avail - 1);
150023d366e3Sduboff tbp->txb_flag = GEM_TXFLAG_INTR;
150123d366e3Sduboff dp->tx_desc_intr = head + avail;
1502f8919bdaSduboff }
1503f8919bdaSduboff mutex_exit(&dp->xmitlock);
1504f8919bdaSduboff
1505f8919bdaSduboff tbp = GET_TXBUF(dp, head);
1506f8919bdaSduboff
150723d366e3Sduboff for (i = nmblk; i > 0; i--, tbp = tbp->txb_next) {
1508f8919bdaSduboff uint8_t *bp;
150923d366e3Sduboff uint64_t txflag;
1510f8919bdaSduboff
1511f8919bdaSduboff /* remove one from the mblk list */
1512f8919bdaSduboff ASSERT(mp_head != NULL);
1513f8919bdaSduboff mp = mp_head;
1514f8919bdaSduboff mp_head = mp_head->b_next;
1515f8919bdaSduboff mp->b_next = NULL;
1516f8919bdaSduboff
1517f8919bdaSduboff /* statistics for non-unicast packets */
151823d366e3Sduboff bp = mp->b_rptr;
151923d366e3Sduboff if ((bp[0] & 1) && (flags & GEM_SEND_CTRL) == 0) {
1520f8919bdaSduboff if (bcmp(bp, gem_etherbroadcastaddr.ether_addr_octet,
1521f8919bdaSduboff ETHERADDRL) == 0) {
152223d366e3Sduboff bcast++;
1523f8919bdaSduboff } else {
152423d366e3Sduboff mcast++;
1525f8919bdaSduboff }
1526f8919bdaSduboff }
1527f8919bdaSduboff
152823d366e3Sduboff /* save misc info */
152923d366e3Sduboff txflag = tbp->txb_flag;
153023d366e3Sduboff txflag |= (flags & GEM_SEND_CTRL) << GEM_TXFLAG_PRIVATE_SHIFT;
153123d366e3Sduboff txflag |= gem_txbuf_options(dp, mp, (uint8_t *)tbp->txb_buf);
153223d366e3Sduboff tbp->txb_flag = txflag;
153323d366e3Sduboff
153423d366e3Sduboff len_total += gem_setup_txbuf_copy(dp, mp, tbp);
1535f8919bdaSduboff }
1536f8919bdaSduboff
153723d366e3Sduboff (void) gem_tx_load_descs_oo(dp, head, head + nmblk, load_flags);
1538f8919bdaSduboff
1539f8919bdaSduboff /* Append the tbp at the tail of the active tx buffer list */
1540f8919bdaSduboff mutex_enter(&dp->xmitlock);
1541f8919bdaSduboff
1542f8919bdaSduboff if ((--dp->tx_busy) == 0) {
1543f8919bdaSduboff /* extend the tail of softq, as new packets have been ready. */
1544f8919bdaSduboff dp->tx_softq_tail = dp->tx_free_head;
1545f8919bdaSduboff
1546f8919bdaSduboff if (!dp->mac_active && (flags & GEM_SEND_CTRL) == 0) {
1547f8919bdaSduboff /*
1548f8919bdaSduboff * The device status has changed while we are
1549f8919bdaSduboff * preparing tx buf.
1550f8919bdaSduboff * As we are the last one that make tx non-busy.
1551f8919bdaSduboff * wake up someone who may wait for us.
1552f8919bdaSduboff */
1553f8919bdaSduboff cv_broadcast(&dp->tx_drain_cv);
1554f8919bdaSduboff } else {
1555f8919bdaSduboff ASSERT(dp->tx_softq_tail - dp->tx_softq_head > 0);
1556f8919bdaSduboff gem_tx_start_unit(dp);
1557f8919bdaSduboff }
1558f8919bdaSduboff }
1559f8919bdaSduboff dp->stats.obytes += len_total;
156023d366e3Sduboff dp->stats.opackets += nmblk;
156123d366e3Sduboff dp->stats.obcast += bcast;
156223d366e3Sduboff dp->stats.omcast += mcast;
1563f8919bdaSduboff done:
1564f8919bdaSduboff mutex_exit(&dp->xmitlock);
1565f8919bdaSduboff
1566f8919bdaSduboff return (mp_head);
1567f8919bdaSduboff }
1568f8919bdaSduboff
1569f8919bdaSduboff /* ========================================================== */
1570f8919bdaSduboff /*
1571f8919bdaSduboff * error detection and restart routines
1572f8919bdaSduboff */
1573f8919bdaSduboff /* ========================================================== */
1574f8919bdaSduboff int
gem_restart_nic(struct gem_dev * dp,uint_t flags)1575f8919bdaSduboff gem_restart_nic(struct gem_dev *dp, uint_t flags)
1576f8919bdaSduboff {
1577f8919bdaSduboff ASSERT(mutex_owned(&dp->intrlock));
1578f8919bdaSduboff
157923d366e3Sduboff DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
158023d366e3Sduboff #ifdef GEM_DEBUG_LEVEL
158123d366e3Sduboff #if GEM_DEBUG_LEVEL > 1
158223d366e3Sduboff gem_dump_txbuf(dp, CE_CONT, "gem_restart_nic");
158323d366e3Sduboff #endif
158423d366e3Sduboff #endif
1585f8919bdaSduboff
1586f8919bdaSduboff if (dp->mac_suspended) {
1587f8919bdaSduboff /* should we return GEM_FAILURE ? */
1588f8919bdaSduboff return (GEM_FAILURE);
1589f8919bdaSduboff }
1590f8919bdaSduboff
1591f8919bdaSduboff /*
1592f8919bdaSduboff * We should avoid calling any routines except xxx_chip_reset
1593f8919bdaSduboff * when we are resuming the system.
1594f8919bdaSduboff */
1595f8919bdaSduboff if (dp->mac_active) {
1596f8919bdaSduboff if (flags & GEM_RESTART_KEEP_BUF) {
1597f8919bdaSduboff /* stop rx gracefully */
1598f8919bdaSduboff dp->rxmode &= ~RXMODE_ENABLE;
1599f8919bdaSduboff (void) (*dp->gc.gc_set_rx_filter)(dp);
1600f8919bdaSduboff }
1601f8919bdaSduboff (void) gem_mac_stop(dp, flags);
1602f8919bdaSduboff }
1603f8919bdaSduboff
1604f8919bdaSduboff /* reset the chip. */
1605f8919bdaSduboff if ((*dp->gc.gc_reset_chip)(dp) != GEM_SUCCESS) {
1606f8919bdaSduboff cmn_err(CE_WARN, "%s: %s: failed to reset chip",
1607f8919bdaSduboff dp->name, __func__);
1608f8919bdaSduboff goto err;
1609f8919bdaSduboff }
1610f8919bdaSduboff
1611f8919bdaSduboff if (gem_mac_init(dp) != GEM_SUCCESS) {
1612f8919bdaSduboff goto err;
1613f8919bdaSduboff }
1614f8919bdaSduboff
1615f8919bdaSduboff /* setup media mode if the link have been up */
1616f8919bdaSduboff if (dp->mii_state == MII_STATE_LINKUP) {
1617f8919bdaSduboff if ((dp->gc.gc_set_media)(dp) != GEM_SUCCESS) {
1618f8919bdaSduboff goto err;
1619f8919bdaSduboff }
1620f8919bdaSduboff }
1621f8919bdaSduboff
1622f8919bdaSduboff /* setup mac address and enable rx filter */
1623f8919bdaSduboff dp->rxmode |= RXMODE_ENABLE;
1624f8919bdaSduboff if ((*dp->gc.gc_set_rx_filter)(dp) != GEM_SUCCESS) {
1625f8919bdaSduboff goto err;
1626f8919bdaSduboff }
1627f8919bdaSduboff
1628f8919bdaSduboff /*
162923d366e3Sduboff * XXX - a panic happened because of linkdown.
1630f8919bdaSduboff * We must check mii_state here, because the link can be down just
1631f8919bdaSduboff * before the restart event happen. If the link is down now,
1632f8919bdaSduboff * gem_mac_start() will be called from gem_mii_link_check() when
1633f8919bdaSduboff * the link become up later.
1634f8919bdaSduboff */
1635f8919bdaSduboff if (dp->mii_state == MII_STATE_LINKUP) {
1636f8919bdaSduboff /* restart the nic */
1637f8919bdaSduboff ASSERT(!dp->mac_active);
1638f8919bdaSduboff (void) gem_mac_start(dp);
1639f8919bdaSduboff }
1640f8919bdaSduboff return (GEM_SUCCESS);
1641f8919bdaSduboff err:
1642f8919bdaSduboff return (GEM_FAILURE);
1643f8919bdaSduboff }
1644f8919bdaSduboff
1645f8919bdaSduboff
1646f8919bdaSduboff static void
gem_tx_timeout(struct gem_dev * dp)1647f8919bdaSduboff gem_tx_timeout(struct gem_dev *dp)
1648f8919bdaSduboff {
1649f8919bdaSduboff clock_t now;
1650f8919bdaSduboff boolean_t tx_sched;
1651f8919bdaSduboff struct txbuf *tbp;
1652f8919bdaSduboff
1653f8919bdaSduboff mutex_enter(&dp->intrlock);
1654f8919bdaSduboff
1655f8919bdaSduboff tx_sched = B_FALSE;
1656f8919bdaSduboff now = ddi_get_lbolt();
1657f8919bdaSduboff
1658f8919bdaSduboff mutex_enter(&dp->xmitlock);
1659f8919bdaSduboff if (!dp->mac_active || dp->mii_state != MII_STATE_LINKUP) {
1660f8919bdaSduboff mutex_exit(&dp->xmitlock);
1661f8919bdaSduboff goto schedule_next;
1662f8919bdaSduboff }
1663f8919bdaSduboff mutex_exit(&dp->xmitlock);
1664f8919bdaSduboff
1665f8919bdaSduboff /* reclaim transmitted buffers to check the trasmitter hangs or not. */
1666f8919bdaSduboff if (gem_reclaim_txbuf(dp) != GEM_SUCCESS) {
1667f8919bdaSduboff /* tx error happened, reset transmitter in the chip */
1668f8919bdaSduboff (void) gem_restart_nic(dp, 0);
1669f8919bdaSduboff tx_sched = B_TRUE;
167023d366e3Sduboff dp->tx_blocked = (clock_t)0;
1671f8919bdaSduboff
1672f8919bdaSduboff goto schedule_next;
1673f8919bdaSduboff }
1674f8919bdaSduboff
1675f8919bdaSduboff mutex_enter(&dp->xmitlock);
167623d366e3Sduboff /* check if the transmitter thread is stuck */
1677f8919bdaSduboff if (dp->tx_active_head == dp->tx_active_tail) {
1678f8919bdaSduboff /* no tx buffer is loaded to the nic */
167923d366e3Sduboff if (dp->tx_blocked &&
168023d366e3Sduboff now - dp->tx_blocked > dp->gc.gc_tx_timeout_interval) {
168123d366e3Sduboff gem_dump_txbuf(dp, CE_WARN,
168223d366e3Sduboff "gem_tx_timeout: tx blocked");
168323d366e3Sduboff tx_sched = B_TRUE;
168423d366e3Sduboff dp->tx_blocked = (clock_t)0;
168523d366e3Sduboff }
1686f8919bdaSduboff mutex_exit(&dp->xmitlock);
1687f8919bdaSduboff goto schedule_next;
1688f8919bdaSduboff }
1689f8919bdaSduboff
1690f8919bdaSduboff tbp = GET_TXBUF(dp, dp->tx_active_head);
1691f8919bdaSduboff if (now - tbp->txb_stime < dp->gc.gc_tx_timeout) {
1692f8919bdaSduboff mutex_exit(&dp->xmitlock);
1693f8919bdaSduboff goto schedule_next;
1694f8919bdaSduboff }
1695f8919bdaSduboff mutex_exit(&dp->xmitlock);
1696f8919bdaSduboff
169723d366e3Sduboff gem_dump_txbuf(dp, CE_WARN, "gem_tx_timeout: tx timeout");
1698f8919bdaSduboff
1699f8919bdaSduboff /* discard untransmitted packet and restart tx. */
170023d366e3Sduboff (void) gem_restart_nic(dp, GEM_RESTART_NOWAIT);
1701f8919bdaSduboff tx_sched = B_TRUE;
170223d366e3Sduboff dp->tx_blocked = (clock_t)0;
1703f8919bdaSduboff
1704f8919bdaSduboff schedule_next:
1705f8919bdaSduboff mutex_exit(&dp->intrlock);
1706f8919bdaSduboff
1707f8919bdaSduboff /* restart the downstream if needed */
1708f8919bdaSduboff if (tx_sched) {
1709f8919bdaSduboff mac_tx_update(dp->mh);
1710f8919bdaSduboff }
1711f8919bdaSduboff
1712f8919bdaSduboff DPRINTF(4, (CE_CONT,
171323d366e3Sduboff "!%s: blocked:%d active_head:%d active_tail:%d desc_intr:%d",
171423d366e3Sduboff dp->name, BOOLEAN(dp->tx_blocked),
1715f8919bdaSduboff dp->tx_active_head, dp->tx_active_tail, dp->tx_desc_intr));
1716f8919bdaSduboff dp->timeout_id =
1717f8919bdaSduboff timeout((void (*)(void *))gem_tx_timeout,
1718f8919bdaSduboff (void *)dp, dp->gc.gc_tx_timeout_interval);
1719f8919bdaSduboff }
1720f8919bdaSduboff
1721f8919bdaSduboff /* ================================================================== */
1722f8919bdaSduboff /*
1723f8919bdaSduboff * Interrupt handler
1724f8919bdaSduboff */
1725f8919bdaSduboff /* ================================================================== */
1726f8919bdaSduboff __INLINE__
1727f8919bdaSduboff static void
gem_append_rxbuf(struct gem_dev * dp,struct rxbuf * rbp_head)1728f8919bdaSduboff gem_append_rxbuf(struct gem_dev *dp, struct rxbuf *rbp_head)
1729f8919bdaSduboff {
1730f8919bdaSduboff struct rxbuf *rbp;
1731f8919bdaSduboff seqnum_t tail;
1732f8919bdaSduboff int rx_ring_size = dp->gc.gc_rx_ring_size;
1733f8919bdaSduboff
1734f8919bdaSduboff ASSERT(rbp_head != NULL);
1735f8919bdaSduboff ASSERT(mutex_owned(&dp->intrlock));
1736f8919bdaSduboff
1737f8919bdaSduboff DPRINTF(3, (CE_CONT, "!%s: %s: slot_head:%d, slot_tail:%d",
1738f8919bdaSduboff dp->name, __func__, dp->rx_active_head, dp->rx_active_tail));
1739f8919bdaSduboff
1740f8919bdaSduboff /*
1741f8919bdaSduboff * Add new buffers into active rx buffer list
1742f8919bdaSduboff */
1743f8919bdaSduboff if (dp->rx_buf_head == NULL) {
1744f8919bdaSduboff dp->rx_buf_head = rbp_head;
1745f8919bdaSduboff ASSERT(dp->rx_buf_tail == NULL);
1746f8919bdaSduboff } else {
1747f8919bdaSduboff dp->rx_buf_tail->rxb_next = rbp_head;
1748f8919bdaSduboff }
1749f8919bdaSduboff
1750f8919bdaSduboff tail = dp->rx_active_tail;
1751f8919bdaSduboff for (rbp = rbp_head; rbp; rbp = rbp->rxb_next) {
1752f8919bdaSduboff /* need to notify the tail for the lower layer */
1753f8919bdaSduboff dp->rx_buf_tail = rbp;
1754f8919bdaSduboff
1755f8919bdaSduboff dp->gc.gc_rx_desc_write(dp,
1756f8919bdaSduboff SLOT(tail, rx_ring_size),
1757f8919bdaSduboff rbp->rxb_dmacookie,
1758f8919bdaSduboff rbp->rxb_nfrags);
1759f8919bdaSduboff
1760f8919bdaSduboff dp->rx_active_tail = tail = tail + 1;
1761f8919bdaSduboff }
1762f8919bdaSduboff }
1763f8919bdaSduboff #pragma inline(gem_append_rxbuf)
1764f8919bdaSduboff
1765f8919bdaSduboff mblk_t *
gem_get_packet_default(struct gem_dev * dp,struct rxbuf * rbp,size_t len)1766f8919bdaSduboff gem_get_packet_default(struct gem_dev *dp, struct rxbuf *rbp, size_t len)
1767f8919bdaSduboff {
1768f8919bdaSduboff int rx_header_len = dp->gc.gc_rx_header_len;
1769f8919bdaSduboff uint8_t *bp;
1770f8919bdaSduboff mblk_t *mp;
1771f8919bdaSduboff
1772f8919bdaSduboff /* allocate a new mblk */
1773f8919bdaSduboff if (mp = allocb(len + VTAG_SIZE, BPRI_MED)) {
1774f8919bdaSduboff ASSERT(mp->b_next == NULL);
1775f8919bdaSduboff ASSERT(mp->b_cont == NULL);
1776f8919bdaSduboff
1777f8919bdaSduboff mp->b_rptr += VTAG_SIZE;
1778f8919bdaSduboff bp = mp->b_rptr;
1779f8919bdaSduboff mp->b_wptr = bp + len;
1780f8919bdaSduboff
178123d366e3Sduboff /*
178223d366e3Sduboff * flush the range of the entire buffer to invalidate
178323d366e3Sduboff * all of corresponding dirty entries in iocache.
178423d366e3Sduboff */
1785f8919bdaSduboff (void) ddi_dma_sync(rbp->rxb_dh, rx_header_len,
178623d366e3Sduboff 0, DDI_DMA_SYNC_FORKERNEL);
1787f8919bdaSduboff
1788f8919bdaSduboff bcopy(rbp->rxb_buf + rx_header_len, bp, len);
1789f8919bdaSduboff }
1790f8919bdaSduboff return (mp);
1791f8919bdaSduboff }
1792f8919bdaSduboff
1793f8919bdaSduboff #ifdef GEM_DEBUG_LEVEL
1794f8919bdaSduboff uint_t gem_rx_pkts[17];
1795f8919bdaSduboff #endif
1796f8919bdaSduboff
1797f8919bdaSduboff
1798f8919bdaSduboff int
gem_receive(struct gem_dev * dp)1799f8919bdaSduboff gem_receive(struct gem_dev *dp)
1800f8919bdaSduboff {
1801f8919bdaSduboff uint64_t len_total = 0;
1802f8919bdaSduboff struct rxbuf *rbp;
1803f8919bdaSduboff mblk_t *mp;
1804f8919bdaSduboff int cnt = 0;
1805f8919bdaSduboff uint64_t rxstat;
1806f8919bdaSduboff struct rxbuf *newbufs;
1807f8919bdaSduboff struct rxbuf **newbufs_tailp;
1808f8919bdaSduboff mblk_t *rx_head;
1809f8919bdaSduboff mblk_t **rx_tailp;
1810f8919bdaSduboff int rx_ring_size = dp->gc.gc_rx_ring_size;
1811f8919bdaSduboff seqnum_t active_head;
1812f8919bdaSduboff uint64_t (*rx_desc_stat)(struct gem_dev *dp,
1813f8919bdaSduboff int slot, int ndesc);
1814f8919bdaSduboff int ethermin = ETHERMIN;
1815f8919bdaSduboff int ethermax = dp->mtu + sizeof (struct ether_header);
181623d366e3Sduboff int rx_header_len = dp->gc.gc_rx_header_len;
1817f8919bdaSduboff
1818f8919bdaSduboff ASSERT(mutex_owned(&dp->intrlock));
1819f8919bdaSduboff
1820f8919bdaSduboff DPRINTF(3, (CE_CONT, "!%s: gem_receive: rx_buf_head:%p",
1821f8919bdaSduboff dp->name, dp->rx_buf_head));
1822f8919bdaSduboff
1823f8919bdaSduboff rx_desc_stat = dp->gc.gc_rx_desc_stat;
1824f8919bdaSduboff newbufs_tailp = &newbufs;
1825f8919bdaSduboff rx_tailp = &rx_head;
1826f8919bdaSduboff for (active_head = dp->rx_active_head;
1827f8919bdaSduboff (rbp = dp->rx_buf_head) != NULL; active_head++) {
1828f8919bdaSduboff int len;
1829f8919bdaSduboff if (cnt == 0) {
1830f8919bdaSduboff cnt = max(dp->poll_pkt_delay*2, 10);
1831f8919bdaSduboff cnt = min(cnt,
1832f8919bdaSduboff dp->rx_active_tail - active_head);
1833f8919bdaSduboff gem_rx_desc_dma_sync(dp,
1834f8919bdaSduboff SLOT(active_head, rx_ring_size),
1835f8919bdaSduboff cnt,
1836f8919bdaSduboff DDI_DMA_SYNC_FORKERNEL);
1837f8919bdaSduboff }
183823d366e3Sduboff
183923d366e3Sduboff if (rx_header_len > 0) {
184023d366e3Sduboff (void) ddi_dma_sync(rbp->rxb_dh, 0,
184123d366e3Sduboff rx_header_len, DDI_DMA_SYNC_FORKERNEL);
184223d366e3Sduboff }
184323d366e3Sduboff
1844f8919bdaSduboff if (((rxstat = (*rx_desc_stat)(dp,
1845f8919bdaSduboff SLOT(active_head, rx_ring_size),
1846f8919bdaSduboff rbp->rxb_nfrags))
1847f8919bdaSduboff & (GEM_RX_DONE | GEM_RX_ERR)) == 0) {
1848f8919bdaSduboff /* not received yet */
1849f8919bdaSduboff break;
1850f8919bdaSduboff }
1851f8919bdaSduboff
1852f8919bdaSduboff /* Remove the head of the rx buffer list */
1853f8919bdaSduboff dp->rx_buf_head = rbp->rxb_next;
1854f8919bdaSduboff cnt--;
1855f8919bdaSduboff
1856f8919bdaSduboff
1857f8919bdaSduboff if (rxstat & GEM_RX_ERR) {
1858f8919bdaSduboff goto next;
1859f8919bdaSduboff }
1860f8919bdaSduboff
1861f8919bdaSduboff len = rxstat & GEM_RX_LEN;
1862f8919bdaSduboff DPRINTF(3, (CE_CONT, "!%s: %s: rxstat:0x%llx, len:0x%x",
1863f8919bdaSduboff dp->name, __func__, rxstat, len));
1864f8919bdaSduboff
1865f8919bdaSduboff /*
1866f8919bdaSduboff * Copy the packet
1867f8919bdaSduboff */
1868f8919bdaSduboff if ((mp = dp->gc.gc_get_packet(dp, rbp, len)) == NULL) {
1869f8919bdaSduboff /* no memory, discard the packet */
1870f8919bdaSduboff dp->stats.norcvbuf++;
1871f8919bdaSduboff goto next;
1872f8919bdaSduboff }
1873f8919bdaSduboff
1874f8919bdaSduboff /*
1875f8919bdaSduboff * Process VLAN tag
1876f8919bdaSduboff */
1877f8919bdaSduboff ethermin = ETHERMIN;
1878f8919bdaSduboff ethermax = dp->mtu + sizeof (struct ether_header);
187923d366e3Sduboff if (GET_NET16(mp->b_rptr + VTAG_OFF) == VTAG_TPID) {
1880f8919bdaSduboff ethermax += VTAG_SIZE;
1881f8919bdaSduboff }
1882f8919bdaSduboff
1883f8919bdaSduboff /* check packet size */
1884f8919bdaSduboff if (len < ethermin) {
1885f8919bdaSduboff dp->stats.errrcv++;
1886f8919bdaSduboff dp->stats.runt++;
1887f8919bdaSduboff freemsg(mp);
1888f8919bdaSduboff goto next;
1889f8919bdaSduboff }
1890f8919bdaSduboff
1891f8919bdaSduboff if (len > ethermax) {
1892f8919bdaSduboff dp->stats.errrcv++;
1893f8919bdaSduboff dp->stats.frame_too_long++;
1894f8919bdaSduboff freemsg(mp);
1895f8919bdaSduboff goto next;
1896f8919bdaSduboff }
1897f8919bdaSduboff
1898f8919bdaSduboff len_total += len;
1899f8919bdaSduboff
190023d366e3Sduboff #ifdef GEM_DEBUG_VLAN
190123d366e3Sduboff if (GET_ETHERTYPE(mp->b_rptr) == VTAG_TPID) {
190223d366e3Sduboff gem_dump_packet(dp, (char *)__func__, mp, B_TRUE);
190323d366e3Sduboff }
190423d366e3Sduboff #endif
1905f8919bdaSduboff /* append received packet to temporaly rx buffer list */
1906f8919bdaSduboff *rx_tailp = mp;
1907f8919bdaSduboff rx_tailp = &mp->b_next;
1908f8919bdaSduboff
1909f8919bdaSduboff if (mp->b_rptr[0] & 1) {
1910f8919bdaSduboff if (bcmp(mp->b_rptr,
1911f8919bdaSduboff gem_etherbroadcastaddr.ether_addr_octet,
1912f8919bdaSduboff ETHERADDRL) == 0) {
1913f8919bdaSduboff dp->stats.rbcast++;
1914f8919bdaSduboff } else {
1915f8919bdaSduboff dp->stats.rmcast++;
1916f8919bdaSduboff }
1917f8919bdaSduboff }
1918f8919bdaSduboff next:
1919f8919bdaSduboff ASSERT(rbp != NULL);
1920f8919bdaSduboff
1921f8919bdaSduboff /* append new one to temporal new buffer list */
1922f8919bdaSduboff *newbufs_tailp = rbp;
1923f8919bdaSduboff newbufs_tailp = &rbp->rxb_next;
1924f8919bdaSduboff }
1925f8919bdaSduboff
1926f8919bdaSduboff /* advance rx_active_head */
1927f8919bdaSduboff if ((cnt = active_head - dp->rx_active_head) > 0) {
1928f8919bdaSduboff dp->stats.rbytes += len_total;
1929f8919bdaSduboff dp->stats.rpackets += cnt;
1930f8919bdaSduboff }
1931f8919bdaSduboff dp->rx_active_head = active_head;
1932f8919bdaSduboff
1933f8919bdaSduboff /* terminate the working list */
1934f8919bdaSduboff *newbufs_tailp = NULL;
1935f8919bdaSduboff *rx_tailp = NULL;
1936f8919bdaSduboff
1937f8919bdaSduboff if (dp->rx_buf_head == NULL) {
1938f8919bdaSduboff dp->rx_buf_tail = NULL;
1939f8919bdaSduboff }
1940f8919bdaSduboff
1941f8919bdaSduboff DPRINTF(4, (CE_CONT, "%s: %s: cnt:%d, rx_head:%p",
1942f8919bdaSduboff dp->name, __func__, cnt, rx_head));
1943f8919bdaSduboff
1944f8919bdaSduboff if (newbufs) {
1945f8919bdaSduboff /*
1946f8919bdaSduboff * fillfull rx list with new buffers
1947f8919bdaSduboff */
1948f8919bdaSduboff seqnum_t head;
1949f8919bdaSduboff
1950f8919bdaSduboff /* save current tail */
1951f8919bdaSduboff head = dp->rx_active_tail;
1952f8919bdaSduboff gem_append_rxbuf(dp, newbufs);
1953f8919bdaSduboff
1954f8919bdaSduboff /* call hw depend start routine if we have. */
1955f8919bdaSduboff dp->gc.gc_rx_start(dp,
1956f8919bdaSduboff SLOT(head, rx_ring_size), dp->rx_active_tail - head);
1957f8919bdaSduboff }
1958f8919bdaSduboff
1959f8919bdaSduboff if (rx_head) {
1960f8919bdaSduboff /*
1961f8919bdaSduboff * send up received packets
1962f8919bdaSduboff */
1963f8919bdaSduboff mutex_exit(&dp->intrlock);
1964da14cebeSEric Cheng mac_rx(dp->mh, NULL, rx_head);
1965f8919bdaSduboff mutex_enter(&dp->intrlock);
1966f8919bdaSduboff }
1967f8919bdaSduboff
1968f8919bdaSduboff #ifdef GEM_DEBUG_LEVEL
1969f8919bdaSduboff gem_rx_pkts[min(cnt, sizeof (gem_rx_pkts)/sizeof (uint_t)-1)]++;
1970f8919bdaSduboff #endif
1971f8919bdaSduboff return (cnt);
1972f8919bdaSduboff }
1973f8919bdaSduboff
1974f8919bdaSduboff boolean_t
gem_tx_done(struct gem_dev * dp)1975f8919bdaSduboff gem_tx_done(struct gem_dev *dp)
1976f8919bdaSduboff {
1977f8919bdaSduboff boolean_t tx_sched = B_FALSE;
1978f8919bdaSduboff
1979f8919bdaSduboff if (gem_reclaim_txbuf(dp) != GEM_SUCCESS) {
1980f8919bdaSduboff (void) gem_restart_nic(dp, GEM_RESTART_KEEP_BUF);
1981f8919bdaSduboff DPRINTF(2, (CE_CONT, "!%s: gem_tx_done: tx_desc: %d %d",
1982f8919bdaSduboff dp->name, dp->tx_active_head, dp->tx_active_tail));
1983f8919bdaSduboff tx_sched = B_TRUE;
1984f8919bdaSduboff goto x;
1985f8919bdaSduboff }
1986f8919bdaSduboff
1987f8919bdaSduboff mutex_enter(&dp->xmitlock);
1988f8919bdaSduboff
198923d366e3Sduboff /* XXX - we must not have any packets in soft queue */
199023d366e3Sduboff ASSERT(dp->tx_softq_head == dp->tx_softq_tail);
1991f8919bdaSduboff /*
199223d366e3Sduboff * If we won't have chance to get more free tx buffers, and blocked,
1993f8919bdaSduboff * it is worth to reschedule the downstream i.e. tx side.
1994f8919bdaSduboff */
199523d366e3Sduboff ASSERT(dp->tx_desc_intr - dp->tx_desc_head >= 0);
199623d366e3Sduboff if (dp->tx_blocked && dp->tx_desc_intr == dp->tx_desc_head) {
1997f8919bdaSduboff /*
1998f8919bdaSduboff * As no further tx-done interrupts are scheduled, this
1999f8919bdaSduboff * is the last chance to kick tx side, which may be
2000f8919bdaSduboff * blocked now, otherwise the tx side never works again.
2001f8919bdaSduboff */
2002f8919bdaSduboff tx_sched = B_TRUE;
200323d366e3Sduboff dp->tx_blocked = (clock_t)0;
200423d366e3Sduboff dp->tx_max_packets =
200523d366e3Sduboff min(dp->tx_max_packets + 2, dp->gc.gc_tx_buf_limit);
2006f8919bdaSduboff }
2007f8919bdaSduboff
2008f8919bdaSduboff mutex_exit(&dp->xmitlock);
2009f8919bdaSduboff
201023d366e3Sduboff DPRINTF(3, (CE_CONT, "!%s: %s: ret: blocked:%d",
201123d366e3Sduboff dp->name, __func__, BOOLEAN(dp->tx_blocked)));
2012f8919bdaSduboff x:
2013f8919bdaSduboff return (tx_sched);
2014f8919bdaSduboff }
2015f8919bdaSduboff
2016f8919bdaSduboff static uint_t
gem_intr(struct gem_dev * dp)2017f8919bdaSduboff gem_intr(struct gem_dev *dp)
2018f8919bdaSduboff {
2019f8919bdaSduboff uint_t ret;
2020f8919bdaSduboff
2021f8919bdaSduboff mutex_enter(&dp->intrlock);
2022f8919bdaSduboff if (dp->mac_suspended) {
2023f8919bdaSduboff mutex_exit(&dp->intrlock);
2024f8919bdaSduboff return (DDI_INTR_UNCLAIMED);
2025f8919bdaSduboff }
2026f8919bdaSduboff dp->intr_busy = B_TRUE;
2027f8919bdaSduboff
2028f8919bdaSduboff ret = (*dp->gc.gc_interrupt)(dp);
2029f8919bdaSduboff
2030f8919bdaSduboff if (ret == DDI_INTR_UNCLAIMED) {
2031f8919bdaSduboff dp->intr_busy = B_FALSE;
2032f8919bdaSduboff mutex_exit(&dp->intrlock);
2033f8919bdaSduboff return (ret);
2034f8919bdaSduboff }
2035f8919bdaSduboff
2036f8919bdaSduboff if (!dp->mac_active) {
2037f8919bdaSduboff cv_broadcast(&dp->tx_drain_cv);
2038f8919bdaSduboff }
2039f8919bdaSduboff
2040f8919bdaSduboff
2041f8919bdaSduboff dp->stats.intr++;
2042f8919bdaSduboff dp->intr_busy = B_FALSE;
2043f8919bdaSduboff
2044f8919bdaSduboff mutex_exit(&dp->intrlock);
2045f8919bdaSduboff
2046f8919bdaSduboff if (ret & INTR_RESTART_TX) {
2047f8919bdaSduboff DPRINTF(4, (CE_CONT, "!%s: calling mac_tx_update", dp->name));
2048f8919bdaSduboff mac_tx_update(dp->mh);
2049f8919bdaSduboff ret &= ~INTR_RESTART_TX;
2050f8919bdaSduboff }
2051f8919bdaSduboff return (ret);
2052f8919bdaSduboff }
2053f8919bdaSduboff
2054f8919bdaSduboff static void
gem_intr_watcher(struct gem_dev * dp)2055f8919bdaSduboff gem_intr_watcher(struct gem_dev *dp)
2056f8919bdaSduboff {
2057f8919bdaSduboff (void) gem_intr(dp);
2058f8919bdaSduboff
2059f8919bdaSduboff /* schedule next call of tu_intr_watcher */
2060f8919bdaSduboff dp->intr_watcher_id =
2061f8919bdaSduboff timeout((void (*)(void *))gem_intr_watcher, (void *)dp, 1);
2062f8919bdaSduboff }
2063f8919bdaSduboff
2064f8919bdaSduboff /* ======================================================================== */
2065f8919bdaSduboff /*
2066f8919bdaSduboff * MII support routines
2067f8919bdaSduboff */
2068f8919bdaSduboff /* ======================================================================== */
2069f8919bdaSduboff static void
gem_choose_forcedmode(struct gem_dev * dp)2070f8919bdaSduboff gem_choose_forcedmode(struct gem_dev *dp)
2071f8919bdaSduboff {
2072f8919bdaSduboff /* choose media mode */
2073f8919bdaSduboff if (dp->anadv_1000fdx || dp->anadv_1000hdx) {
2074f8919bdaSduboff dp->speed = GEM_SPD_1000;
2075f8919bdaSduboff dp->full_duplex = dp->anadv_1000fdx;
2076f8919bdaSduboff } else if (dp->anadv_100fdx || dp->anadv_100t4) {
2077f8919bdaSduboff dp->speed = GEM_SPD_100;
2078f8919bdaSduboff dp->full_duplex = B_TRUE;
2079f8919bdaSduboff } else if (dp->anadv_100hdx) {
2080f8919bdaSduboff dp->speed = GEM_SPD_100;
2081f8919bdaSduboff dp->full_duplex = B_FALSE;
2082f8919bdaSduboff } else {
2083f8919bdaSduboff dp->speed = GEM_SPD_10;
2084f8919bdaSduboff dp->full_duplex = dp->anadv_10fdx;
2085f8919bdaSduboff }
2086f8919bdaSduboff }
2087f8919bdaSduboff
2088f8919bdaSduboff uint16_t
gem_mii_read(struct gem_dev * dp,uint_t reg)2089f8919bdaSduboff gem_mii_read(struct gem_dev *dp, uint_t reg)
2090f8919bdaSduboff {
2091f8919bdaSduboff if ((dp->mii_status & MII_STATUS_MFPRMBLSUPR) == 0) {
2092f8919bdaSduboff (*dp->gc.gc_mii_sync)(dp);
2093f8919bdaSduboff }
2094f8919bdaSduboff return ((*dp->gc.gc_mii_read)(dp, reg));
2095f8919bdaSduboff }
2096f8919bdaSduboff
2097f8919bdaSduboff void
gem_mii_write(struct gem_dev * dp,uint_t reg,uint16_t val)2098f8919bdaSduboff gem_mii_write(struct gem_dev *dp, uint_t reg, uint16_t val)
2099f8919bdaSduboff {
2100f8919bdaSduboff if ((dp->mii_status & MII_STATUS_MFPRMBLSUPR) == 0) {
2101f8919bdaSduboff (*dp->gc.gc_mii_sync)(dp);
2102f8919bdaSduboff }
2103f8919bdaSduboff (*dp->gc.gc_mii_write)(dp, reg, val);
2104f8919bdaSduboff }
2105f8919bdaSduboff
2106f8919bdaSduboff #define fc_cap_decode(x) \
2107f8919bdaSduboff ((((x) & MII_ABILITY_PAUSE) ? 1 : 0) | \
2108bdb9230aSGarrett D'Amore (((x) & MII_ABILITY_ASMPAUSE) ? 2 : 0))
2109f8919bdaSduboff
2110f8919bdaSduboff int
gem_mii_config_default(struct gem_dev * dp)2111f8919bdaSduboff gem_mii_config_default(struct gem_dev *dp)
2112f8919bdaSduboff {
2113f8919bdaSduboff uint16_t mii_stat;
2114f8919bdaSduboff uint16_t val;
2115f8919bdaSduboff static uint16_t fc_cap_encode[4] = {
2116bdb9230aSGarrett D'Amore 0, /* none */
2117bdb9230aSGarrett D'Amore MII_ABILITY_PAUSE, /* symmetric */
2118bdb9230aSGarrett D'Amore MII_ABILITY_ASMPAUSE, /* tx */
2119bdb9230aSGarrett D'Amore MII_ABILITY_PAUSE | MII_ABILITY_ASMPAUSE, /* rx-symmetric */
2120f8919bdaSduboff };
2121f8919bdaSduboff
2122f8919bdaSduboff DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
2123f8919bdaSduboff
2124f8919bdaSduboff /*
2125f8919bdaSduboff * Configure bits in advertisement register
2126f8919bdaSduboff */
2127f8919bdaSduboff mii_stat = dp->mii_status;
2128f8919bdaSduboff
2129f8919bdaSduboff DPRINTF(1, (CE_CONT, "!%s: %s: MII_STATUS reg:%b",
2130f8919bdaSduboff dp->name, __func__, mii_stat, MII_STATUS_BITS));
2131f8919bdaSduboff
2132f8919bdaSduboff if ((mii_stat & MII_STATUS_ABILITY_TECH) == 0) {
2133f8919bdaSduboff /* it's funny */
2134f8919bdaSduboff cmn_err(CE_WARN, "!%s: wrong ability bits: mii_status:%b",
2135f8919bdaSduboff dp->name, mii_stat, MII_STATUS_BITS);
2136f8919bdaSduboff return (GEM_FAILURE);
2137f8919bdaSduboff }
2138f8919bdaSduboff
2139f8919bdaSduboff /* Do not change the rest of the ability bits in the advert reg */
2140f8919bdaSduboff val = gem_mii_read(dp, MII_AN_ADVERT) & ~MII_ABILITY_ALL;
2141f8919bdaSduboff
2142f8919bdaSduboff DPRINTF(0, (CE_CONT,
2143f8919bdaSduboff "!%s: %s: 100T4:%d 100F:%d 100H:%d 10F:%d 10H:%d",
2144f8919bdaSduboff dp->name, __func__,
2145f8919bdaSduboff dp->anadv_100t4, dp->anadv_100fdx, dp->anadv_100hdx,
2146f8919bdaSduboff dp->anadv_10fdx, dp->anadv_10hdx));
2147f8919bdaSduboff
2148f8919bdaSduboff if (dp->anadv_100t4) {
2149f8919bdaSduboff val |= MII_ABILITY_100BASE_T4;
2150f8919bdaSduboff }
2151f8919bdaSduboff if (dp->anadv_100fdx) {
2152f8919bdaSduboff val |= MII_ABILITY_100BASE_TX_FD;
2153f8919bdaSduboff }
2154f8919bdaSduboff if (dp->anadv_100hdx) {
2155f8919bdaSduboff val |= MII_ABILITY_100BASE_TX;
2156f8919bdaSduboff }
2157f8919bdaSduboff if (dp->anadv_10fdx) {
2158f8919bdaSduboff val |= MII_ABILITY_10BASE_T_FD;
2159f8919bdaSduboff }
2160f8919bdaSduboff if (dp->anadv_10hdx) {
2161f8919bdaSduboff val |= MII_ABILITY_10BASE_T;
2162f8919bdaSduboff }
2163f8919bdaSduboff
2164f8919bdaSduboff /* set flow control capability */
2165f8919bdaSduboff val |= fc_cap_encode[dp->anadv_flow_control];
2166f8919bdaSduboff
2167f8919bdaSduboff DPRINTF(0, (CE_CONT,
2168f8919bdaSduboff "!%s: %s: setting MII_AN_ADVERT reg:%b, mii_mode:%d, fc:%d",
2169f8919bdaSduboff dp->name, __func__, val, MII_ABILITY_BITS, dp->gc.gc_mii_mode,
2170f8919bdaSduboff dp->anadv_flow_control));
2171f8919bdaSduboff
2172f8919bdaSduboff gem_mii_write(dp, MII_AN_ADVERT, val);
2173f8919bdaSduboff
2174f8919bdaSduboff if (mii_stat & MII_STATUS_XSTATUS) {
2175f8919bdaSduboff /*
2176f8919bdaSduboff * 1000Base-T GMII support
2177f8919bdaSduboff */
2178f8919bdaSduboff if (!dp->anadv_autoneg) {
2179f8919bdaSduboff /* enable manual configuration */
2180f8919bdaSduboff val = MII_1000TC_CFG_EN;
2181f8919bdaSduboff } else {
2182f8919bdaSduboff val = 0;
2183f8919bdaSduboff if (dp->anadv_1000fdx) {
2184f8919bdaSduboff val |= MII_1000TC_ADV_FULL;
2185f8919bdaSduboff }
2186f8919bdaSduboff if (dp->anadv_1000hdx) {
2187f8919bdaSduboff val |= MII_1000TC_ADV_HALF;
2188f8919bdaSduboff }
2189f8919bdaSduboff }
2190f8919bdaSduboff DPRINTF(0, (CE_CONT,
2191f8919bdaSduboff "!%s: %s: setting MII_1000TC reg:%b",
2192f8919bdaSduboff dp->name, __func__, val, MII_1000TC_BITS));
2193f8919bdaSduboff
2194f8919bdaSduboff gem_mii_write(dp, MII_1000TC, val);
2195f8919bdaSduboff }
2196f8919bdaSduboff
2197f8919bdaSduboff return (GEM_SUCCESS);
2198f8919bdaSduboff }
2199f8919bdaSduboff
2200f8919bdaSduboff #define GEM_LINKUP(dp) mac_link_update((dp)->mh, LINK_STATE_UP)
2201f8919bdaSduboff #define GEM_LINKDOWN(dp) mac_link_update((dp)->mh, LINK_STATE_DOWN)
2202f8919bdaSduboff
2203f8919bdaSduboff static uint8_t gem_fc_result[4 /* my cap */ ][4 /* lp cap */] = {
2204f8919bdaSduboff /* none symm tx rx/symm */
2205f8919bdaSduboff /* none */
2206f8919bdaSduboff {FLOW_CONTROL_NONE,
2207f8919bdaSduboff FLOW_CONTROL_NONE,
2208f8919bdaSduboff FLOW_CONTROL_NONE,
2209f8919bdaSduboff FLOW_CONTROL_NONE},
2210f8919bdaSduboff /* sym */
2211f8919bdaSduboff {FLOW_CONTROL_NONE,
2212f8919bdaSduboff FLOW_CONTROL_SYMMETRIC,
2213f8919bdaSduboff FLOW_CONTROL_NONE,
2214f8919bdaSduboff FLOW_CONTROL_SYMMETRIC},
2215f8919bdaSduboff /* tx */
2216f8919bdaSduboff {FLOW_CONTROL_NONE,
2217f8919bdaSduboff FLOW_CONTROL_NONE,
2218f8919bdaSduboff FLOW_CONTROL_NONE,
2219f8919bdaSduboff FLOW_CONTROL_TX_PAUSE},
2220f8919bdaSduboff /* rx/symm */
2221f8919bdaSduboff {FLOW_CONTROL_NONE,
2222f8919bdaSduboff FLOW_CONTROL_SYMMETRIC,
2223f8919bdaSduboff FLOW_CONTROL_RX_PAUSE,
2224f8919bdaSduboff FLOW_CONTROL_SYMMETRIC},
2225f8919bdaSduboff };
2226f8919bdaSduboff
2227f8919bdaSduboff static char *gem_fc_type[] = {
2228f8919bdaSduboff "without",
2229f8919bdaSduboff "with symmetric",
2230f8919bdaSduboff "with tx",
2231f8919bdaSduboff "with rx",
2232f8919bdaSduboff };
2233f8919bdaSduboff
2234f8919bdaSduboff boolean_t
gem_mii_link_check(struct gem_dev * dp)2235f8919bdaSduboff gem_mii_link_check(struct gem_dev *dp)
2236f8919bdaSduboff {
2237f8919bdaSduboff uint16_t old_mii_state;
2238f8919bdaSduboff boolean_t tx_sched = B_FALSE;
2239f8919bdaSduboff uint16_t status;
2240f8919bdaSduboff uint16_t advert;
2241f8919bdaSduboff uint16_t lpable;
2242f8919bdaSduboff uint16_t exp;
2243f8919bdaSduboff uint16_t ctl1000;
2244f8919bdaSduboff uint16_t stat1000;
2245f8919bdaSduboff uint16_t val;
2246f8919bdaSduboff clock_t now;
2247f8919bdaSduboff clock_t diff;
2248f8919bdaSduboff int linkdown_action;
2249f8919bdaSduboff boolean_t fix_phy = B_FALSE;
2250f8919bdaSduboff
2251f8919bdaSduboff now = ddi_get_lbolt();
2252f8919bdaSduboff old_mii_state = dp->mii_state;
2253f8919bdaSduboff
2254f8919bdaSduboff DPRINTF(3, (CE_CONT, "!%s: %s: time:%d state:%d",
2255f8919bdaSduboff dp->name, __func__, now, dp->mii_state));
2256f8919bdaSduboff
2257f8919bdaSduboff diff = now - dp->mii_last_check;
2258f8919bdaSduboff dp->mii_last_check = now;
2259f8919bdaSduboff
226023d366e3Sduboff /*
226123d366e3Sduboff * For NWAM, don't show linkdown state right
226223d366e3Sduboff * after the system boots
226323d366e3Sduboff */
226423d366e3Sduboff if (dp->linkup_delay > 0) {
226523d366e3Sduboff if (dp->linkup_delay > diff) {
226623d366e3Sduboff dp->linkup_delay -= diff;
226723d366e3Sduboff } else {
226823d366e3Sduboff /* link up timeout */
226923d366e3Sduboff dp->linkup_delay = -1;
227023d366e3Sduboff }
227123d366e3Sduboff }
227223d366e3Sduboff
2273f8919bdaSduboff next_nowait:
2274f8919bdaSduboff switch (dp->mii_state) {
2275f8919bdaSduboff case MII_STATE_UNKNOWN:
2276f8919bdaSduboff /* power-up, DP83840 requires 32 sync bits */
2277f8919bdaSduboff (*dp->gc.gc_mii_sync)(dp);
2278f8919bdaSduboff goto reset_phy;
2279f8919bdaSduboff
2280f8919bdaSduboff case MII_STATE_RESETTING:
2281f8919bdaSduboff dp->mii_timer -= diff;
2282f8919bdaSduboff if (dp->mii_timer > 0) {
2283f8919bdaSduboff /* don't read phy registers in resetting */
2284f8919bdaSduboff dp->mii_interval = WATCH_INTERVAL_FAST;
2285f8919bdaSduboff goto next;
2286f8919bdaSduboff }
2287f8919bdaSduboff
2288f8919bdaSduboff /* Timer expired, ensure reset bit is not set */
2289f8919bdaSduboff
2290f8919bdaSduboff if (dp->mii_status & MII_STATUS_MFPRMBLSUPR) {
2291f8919bdaSduboff /* some phys need sync bits after reset */
2292f8919bdaSduboff (*dp->gc.gc_mii_sync)(dp);
2293f8919bdaSduboff }
2294f8919bdaSduboff val = gem_mii_read(dp, MII_CONTROL);
2295f8919bdaSduboff if (val & MII_CONTROL_RESET) {
2296f8919bdaSduboff cmn_err(CE_NOTE,
2297f8919bdaSduboff "!%s: time:%ld resetting phy not complete."
2298f8919bdaSduboff " mii_control:0x%b",
2299f8919bdaSduboff dp->name, ddi_get_lbolt(),
2300f8919bdaSduboff val, MII_CONTROL_BITS);
2301f8919bdaSduboff }
2302f8919bdaSduboff
2303f8919bdaSduboff /* ensure neither isolated nor pwrdown nor auto-nego mode */
2304f8919bdaSduboff /* XXX -- this operation is required for NS DP83840A. */
2305f8919bdaSduboff gem_mii_write(dp, MII_CONTROL, 0);
2306f8919bdaSduboff
2307f8919bdaSduboff /* As resetting PHY has completed, configure PHY registers */
2308f8919bdaSduboff if ((*dp->gc.gc_mii_config)(dp) != GEM_SUCCESS) {
2309f8919bdaSduboff /* we failed to configure PHY. */
2310f8919bdaSduboff goto reset_phy;
2311f8919bdaSduboff }
2312f8919bdaSduboff
2313f8919bdaSduboff /* mii_config may disable autonegatiation */
2314f8919bdaSduboff gem_choose_forcedmode(dp);
2315f8919bdaSduboff
2316f8919bdaSduboff dp->mii_lpable = 0;
2317f8919bdaSduboff dp->mii_advert = 0;
2318f8919bdaSduboff dp->mii_exp = 0;
2319f8919bdaSduboff dp->mii_ctl1000 = 0;
2320f8919bdaSduboff dp->mii_stat1000 = 0;
2321f8919bdaSduboff dp->flow_control = FLOW_CONTROL_NONE;
2322f8919bdaSduboff
2323f8919bdaSduboff if (!dp->anadv_autoneg) {
2324f8919bdaSduboff /* skip auto-negotiation phase */
2325f8919bdaSduboff dp->mii_state = MII_STATE_MEDIA_SETUP;
2326f8919bdaSduboff dp->mii_timer = 0;
2327f8919bdaSduboff dp->mii_interval = 0;
2328f8919bdaSduboff goto next_nowait;
2329f8919bdaSduboff }
2330f8919bdaSduboff
2331f8919bdaSduboff /* Issue auto-negotiation command */
2332f8919bdaSduboff goto autonego;
2333f8919bdaSduboff
2334f8919bdaSduboff case MII_STATE_AUTONEGOTIATING:
2335f8919bdaSduboff /*
2336f8919bdaSduboff * Autonegotiation is in progress
2337f8919bdaSduboff */
2338f8919bdaSduboff dp->mii_timer -= diff;
2339f8919bdaSduboff if (dp->mii_timer -
2340f8919bdaSduboff (dp->gc.gc_mii_an_timeout
2341f8919bdaSduboff - dp->gc.gc_mii_an_wait) > 0) {
2342f8919bdaSduboff /*
2343f8919bdaSduboff * wait for a while, typically autonegotiation
2344f8919bdaSduboff * completes in 2.3 - 2.5 sec.
2345f8919bdaSduboff */
2346f8919bdaSduboff dp->mii_interval = WATCH_INTERVAL_FAST;
2347f8919bdaSduboff goto next;
2348f8919bdaSduboff }
2349f8919bdaSduboff
2350f8919bdaSduboff /* read PHY status */
2351f8919bdaSduboff status = gem_mii_read(dp, MII_STATUS);
2352f8919bdaSduboff DPRINTF(4, (CE_CONT,
2353f8919bdaSduboff "!%s: %s: called: mii_state:%d MII_STATUS reg:%b",
2354f8919bdaSduboff dp->name, __func__, dp->mii_state,
2355f8919bdaSduboff status, MII_STATUS_BITS));
2356f8919bdaSduboff
2357f8919bdaSduboff if (status & MII_STATUS_REMFAULT) {
2358f8919bdaSduboff /*
2359f8919bdaSduboff * The link parnert told me something wrong happend.
2360f8919bdaSduboff * What do we do ?
2361f8919bdaSduboff */
2362f8919bdaSduboff cmn_err(CE_CONT,
2363f8919bdaSduboff "!%s: auto-negotiation failed: remote fault",
2364f8919bdaSduboff dp->name);
2365f8919bdaSduboff goto autonego;
2366f8919bdaSduboff }
2367f8919bdaSduboff
2368f8919bdaSduboff if ((status & MII_STATUS_ANDONE) == 0) {
2369f8919bdaSduboff if (dp->mii_timer <= 0) {
2370f8919bdaSduboff /*
2371f8919bdaSduboff * Auto-negotiation was timed out,
2372f8919bdaSduboff * try again w/o resetting phy.
2373f8919bdaSduboff */
2374f8919bdaSduboff if (!dp->mii_supress_msg) {
2375f8919bdaSduboff cmn_err(CE_WARN,
2376f8919bdaSduboff "!%s: auto-negotiation failed: timeout",
2377f8919bdaSduboff dp->name);
2378f8919bdaSduboff dp->mii_supress_msg = B_TRUE;
2379f8919bdaSduboff }
2380f8919bdaSduboff goto autonego;
2381f8919bdaSduboff }
2382f8919bdaSduboff /*
2383f8919bdaSduboff * Auto-negotiation is in progress. Wait.
2384f8919bdaSduboff */
2385f8919bdaSduboff dp->mii_interval = dp->gc.gc_mii_an_watch_interval;
2386f8919bdaSduboff goto next;
2387f8919bdaSduboff }
2388f8919bdaSduboff
2389f8919bdaSduboff /*
2390f8919bdaSduboff * Auto-negotiation have completed.
2391f8919bdaSduboff * Assume linkdown and fall through.
2392f8919bdaSduboff */
2393f8919bdaSduboff dp->mii_supress_msg = B_FALSE;
2394f8919bdaSduboff dp->mii_state = MII_STATE_AN_DONE;
2395f8919bdaSduboff DPRINTF(0, (CE_CONT,
2396f8919bdaSduboff "!%s: auto-negotiation completed, MII_STATUS:%b",
2397f8919bdaSduboff dp->name, status, MII_STATUS_BITS));
2398f8919bdaSduboff
2399f8919bdaSduboff if (dp->gc.gc_mii_an_delay > 0) {
2400f8919bdaSduboff dp->mii_timer = dp->gc.gc_mii_an_delay;
2401f8919bdaSduboff dp->mii_interval = drv_usectohz(20*1000);
2402f8919bdaSduboff goto next;
2403f8919bdaSduboff }
2404f8919bdaSduboff
2405f8919bdaSduboff dp->mii_timer = 0;
2406f8919bdaSduboff diff = 0;
2407f8919bdaSduboff goto next_nowait;
2408f8919bdaSduboff
2409f8919bdaSduboff case MII_STATE_AN_DONE:
2410f8919bdaSduboff /*
2411f8919bdaSduboff * Auto-negotiation have done. Now we can set up media.
2412f8919bdaSduboff */
2413f8919bdaSduboff dp->mii_timer -= diff;
2414f8919bdaSduboff if (dp->mii_timer > 0) {
2415f8919bdaSduboff /* wait for a while */
2416f8919bdaSduboff dp->mii_interval = WATCH_INTERVAL_FAST;
2417f8919bdaSduboff goto next;
2418f8919bdaSduboff }
2419f8919bdaSduboff
2420f8919bdaSduboff /*
2421f8919bdaSduboff * set up the result of auto negotiation
2422f8919bdaSduboff */
2423f8919bdaSduboff
2424f8919bdaSduboff /*
2425f8919bdaSduboff * Read registers required to determin current
2426f8919bdaSduboff * duplex mode and media speed.
2427f8919bdaSduboff */
2428f8919bdaSduboff if (dp->gc.gc_mii_an_delay > 0) {
2429f8919bdaSduboff /*
2430f8919bdaSduboff * As the link watcher context has been suspended,
2431f8919bdaSduboff * 'status' is invalid. We must status register here
2432f8919bdaSduboff */
2433f8919bdaSduboff status = gem_mii_read(dp, MII_STATUS);
2434f8919bdaSduboff }
2435f8919bdaSduboff advert = gem_mii_read(dp, MII_AN_ADVERT);
2436f8919bdaSduboff lpable = gem_mii_read(dp, MII_AN_LPABLE);
2437f8919bdaSduboff exp = gem_mii_read(dp, MII_AN_EXPANSION);
2438f8919bdaSduboff if (exp == 0xffff) {
2439f8919bdaSduboff /* some phys don't have exp register */
2440f8919bdaSduboff exp = 0;
2441f8919bdaSduboff }
2442f8919bdaSduboff ctl1000 = 0;
2443f8919bdaSduboff stat1000 = 0;
2444f8919bdaSduboff if (dp->mii_status & MII_STATUS_XSTATUS) {
2445f8919bdaSduboff ctl1000 = gem_mii_read(dp, MII_1000TC);
2446f8919bdaSduboff stat1000 = gem_mii_read(dp, MII_1000TS);
2447f8919bdaSduboff }
2448f8919bdaSduboff dp->mii_lpable = lpable;
2449f8919bdaSduboff dp->mii_advert = advert;
2450f8919bdaSduboff dp->mii_exp = exp;
2451f8919bdaSduboff dp->mii_ctl1000 = ctl1000;
2452f8919bdaSduboff dp->mii_stat1000 = stat1000;
2453f8919bdaSduboff
2454f8919bdaSduboff cmn_err(CE_CONT,
2455f8919bdaSduboff "!%s: auto-negotiation done, advert:%b, lpable:%b, exp:%b",
2456f8919bdaSduboff dp->name,
2457f8919bdaSduboff advert, MII_ABILITY_BITS,
2458f8919bdaSduboff lpable, MII_ABILITY_BITS,
2459f8919bdaSduboff exp, MII_AN_EXP_BITS);
2460f8919bdaSduboff
2461f8919bdaSduboff if (dp->mii_status & MII_STATUS_XSTATUS) {
2462f8919bdaSduboff cmn_err(CE_CONT,
2463f8919bdaSduboff "! MII_1000TC:%b, MII_1000TS:%b",
2464f8919bdaSduboff ctl1000, MII_1000TC_BITS,
2465f8919bdaSduboff stat1000, MII_1000TS_BITS);
2466f8919bdaSduboff }
2467f8919bdaSduboff
2468f8919bdaSduboff if (gem_population(lpable) <= 1 &&
2469f8919bdaSduboff (exp & MII_AN_EXP_LPCANAN) == 0) {
2470f8919bdaSduboff if ((advert & MII_ABILITY_TECH) != lpable) {
2471f8919bdaSduboff cmn_err(CE_WARN,
2472f8919bdaSduboff "!%s: but the link partnar doesn't seem"
2473f8919bdaSduboff " to have auto-negotiation capability."
2474f8919bdaSduboff " please check the link configuration.",
2475f8919bdaSduboff dp->name);
2476f8919bdaSduboff }
2477f8919bdaSduboff /*
2478bdb9230aSGarrett D'Amore * it should be result of parallel detection, which
2479f8919bdaSduboff * cannot detect duplex mode.
2480f8919bdaSduboff */
2481f8919bdaSduboff if (lpable & MII_ABILITY_100BASE_TX) {
2482f8919bdaSduboff /*
2483f8919bdaSduboff * we prefer full duplex mode for 100Mbps
2484f8919bdaSduboff * connection, if we can.
2485f8919bdaSduboff */
2486f8919bdaSduboff lpable |= advert & MII_ABILITY_100BASE_TX_FD;
2487f8919bdaSduboff }
2488f8919bdaSduboff
2489f8919bdaSduboff if ((advert & lpable) == 0 &&
2490f8919bdaSduboff lpable & MII_ABILITY_10BASE_T) {
2491f8919bdaSduboff lpable |= advert & MII_ABILITY_10BASE_T_FD;
2492f8919bdaSduboff }
2493f8919bdaSduboff /*
2494f8919bdaSduboff * as the link partnar isn't auto-negotiatable, use
2495f8919bdaSduboff * fixed mode temporally.
2496f8919bdaSduboff */
2497f8919bdaSduboff fix_phy = B_TRUE;
2498f8919bdaSduboff } else if (lpable == 0) {
2499f8919bdaSduboff cmn_err(CE_WARN, "!%s: wrong lpable.", dp->name);
2500f8919bdaSduboff goto reset_phy;
2501f8919bdaSduboff }
2502f8919bdaSduboff /*
2503f8919bdaSduboff * configure current link mode according to AN priority.
2504f8919bdaSduboff */
2505f8919bdaSduboff val = advert & lpable;
2506f8919bdaSduboff if ((ctl1000 & MII_1000TC_ADV_FULL) &&
2507f8919bdaSduboff (stat1000 & MII_1000TS_LP_FULL)) {
2508f8919bdaSduboff /* 1000BaseT & full duplex */
2509f8919bdaSduboff dp->speed = GEM_SPD_1000;
2510f8919bdaSduboff dp->full_duplex = B_TRUE;
2511f8919bdaSduboff } else if ((ctl1000 & MII_1000TC_ADV_HALF) &&
2512f8919bdaSduboff (stat1000 & MII_1000TS_LP_HALF)) {
2513f8919bdaSduboff /* 1000BaseT & half duplex */
2514f8919bdaSduboff dp->speed = GEM_SPD_1000;
2515f8919bdaSduboff dp->full_duplex = B_FALSE;
2516f8919bdaSduboff } else if (val & MII_ABILITY_100BASE_TX_FD) {
2517f8919bdaSduboff /* 100BaseTx & full duplex */
2518f8919bdaSduboff dp->speed = GEM_SPD_100;
2519f8919bdaSduboff dp->full_duplex = B_TRUE;
2520f8919bdaSduboff } else if (val & MII_ABILITY_100BASE_T4) {
2521f8919bdaSduboff /* 100BaseT4 & full duplex */
2522f8919bdaSduboff dp->speed = GEM_SPD_100;
2523f8919bdaSduboff dp->full_duplex = B_TRUE;
2524f8919bdaSduboff } else if (val & MII_ABILITY_100BASE_TX) {
2525f8919bdaSduboff /* 100BaseTx & half duplex */
2526f8919bdaSduboff dp->speed = GEM_SPD_100;
2527f8919bdaSduboff dp->full_duplex = B_FALSE;
2528f8919bdaSduboff } else if (val & MII_ABILITY_10BASE_T_FD) {
2529f8919bdaSduboff /* 10BaseT & full duplex */
2530f8919bdaSduboff dp->speed = GEM_SPD_10;
2531f8919bdaSduboff dp->full_duplex = B_TRUE;
2532f8919bdaSduboff } else if (val & MII_ABILITY_10BASE_T) {
2533f8919bdaSduboff /* 10BaseT & half duplex */
2534f8919bdaSduboff dp->speed = GEM_SPD_10;
2535f8919bdaSduboff dp->full_duplex = B_FALSE;
2536f8919bdaSduboff } else {
2537f8919bdaSduboff /*
2538f8919bdaSduboff * It seems that the link partnar doesn't have
2539f8919bdaSduboff * auto-negotiation capability and our PHY
2540f8919bdaSduboff * could not report the correct current mode.
2541f8919bdaSduboff * We guess current mode by mii_control register.
2542f8919bdaSduboff */
2543f8919bdaSduboff val = gem_mii_read(dp, MII_CONTROL);
2544f8919bdaSduboff
2545f8919bdaSduboff /* select 100m full or 10m half */
2546f8919bdaSduboff dp->speed = (val & MII_CONTROL_100MB) ?
2547f8919bdaSduboff GEM_SPD_100 : GEM_SPD_10;
2548f8919bdaSduboff dp->full_duplex = dp->speed != GEM_SPD_10;
2549f8919bdaSduboff fix_phy = B_TRUE;
2550f8919bdaSduboff
2551f8919bdaSduboff cmn_err(CE_NOTE,
2552f8919bdaSduboff "!%s: auto-negotiation done but "
2553f8919bdaSduboff "common ability not found.\n"
2554f8919bdaSduboff "PHY state: control:%b advert:%b lpable:%b\n"
2555f8919bdaSduboff "guessing %d Mbps %s duplex mode",
2556f8919bdaSduboff dp->name,
2557f8919bdaSduboff val, MII_CONTROL_BITS,
2558f8919bdaSduboff advert, MII_ABILITY_BITS,
2559f8919bdaSduboff lpable, MII_ABILITY_BITS,
2560f8919bdaSduboff gem_speed_value[dp->speed],
2561f8919bdaSduboff dp->full_duplex ? "full" : "half");
2562f8919bdaSduboff }
2563f8919bdaSduboff
2564f8919bdaSduboff if (dp->full_duplex) {
2565f8919bdaSduboff dp->flow_control =
2566f8919bdaSduboff gem_fc_result[fc_cap_decode(advert)]
2567f8919bdaSduboff [fc_cap_decode(lpable)];
2568f8919bdaSduboff } else {
2569f8919bdaSduboff dp->flow_control = FLOW_CONTROL_NONE;
2570f8919bdaSduboff }
2571f8919bdaSduboff dp->mii_state = MII_STATE_MEDIA_SETUP;
2572f8919bdaSduboff /* FALLTHROUGH */
2573f8919bdaSduboff
2574f8919bdaSduboff case MII_STATE_MEDIA_SETUP:
2575f8919bdaSduboff dp->mii_state = MII_STATE_LINKDOWN;
2576f8919bdaSduboff dp->mii_timer = dp->gc.gc_mii_linkdown_timeout;
2577f8919bdaSduboff DPRINTF(2, (CE_CONT, "!%s: setup midia mode done", dp->name));
2578f8919bdaSduboff dp->mii_supress_msg = B_FALSE;
2579f8919bdaSduboff
2580f8919bdaSduboff /* use short interval */
2581f8919bdaSduboff dp->mii_interval = WATCH_INTERVAL_FAST;
2582f8919bdaSduboff
2583f8919bdaSduboff if ((!dp->anadv_autoneg) ||
2584f8919bdaSduboff dp->gc.gc_mii_an_oneshot || fix_phy) {
2585f8919bdaSduboff
2586f8919bdaSduboff /*
2587f8919bdaSduboff * write specified mode to phy.
2588f8919bdaSduboff */
2589f8919bdaSduboff val = gem_mii_read(dp, MII_CONTROL);
2590f8919bdaSduboff val &= ~(MII_CONTROL_SPEED | MII_CONTROL_FDUPLEX |
2591f8919bdaSduboff MII_CONTROL_ANE | MII_CONTROL_RSAN);
2592f8919bdaSduboff
2593f8919bdaSduboff if (dp->full_duplex) {
2594f8919bdaSduboff val |= MII_CONTROL_FDUPLEX;
2595f8919bdaSduboff }
2596f8919bdaSduboff
2597f8919bdaSduboff switch (dp->speed) {
2598f8919bdaSduboff case GEM_SPD_1000:
2599f8919bdaSduboff val |= MII_CONTROL_1000MB;
2600f8919bdaSduboff break;
2601f8919bdaSduboff
2602f8919bdaSduboff case GEM_SPD_100:
2603f8919bdaSduboff val |= MII_CONTROL_100MB;
2604f8919bdaSduboff break;
2605f8919bdaSduboff
2606f8919bdaSduboff default:
2607f8919bdaSduboff cmn_err(CE_WARN, "%s: unknown speed:%d",
2608f8919bdaSduboff dp->name, dp->speed);
2609f8919bdaSduboff /* FALLTHROUGH */
2610f8919bdaSduboff case GEM_SPD_10:
2611f8919bdaSduboff /* for GEM_SPD_10, do nothing */
2612f8919bdaSduboff break;
2613f8919bdaSduboff }
2614f8919bdaSduboff
2615f8919bdaSduboff if (dp->mii_status & MII_STATUS_XSTATUS) {
2616f8919bdaSduboff gem_mii_write(dp,
2617f8919bdaSduboff MII_1000TC, MII_1000TC_CFG_EN);
2618f8919bdaSduboff }
2619f8919bdaSduboff gem_mii_write(dp, MII_CONTROL, val);
2620f8919bdaSduboff }
2621f8919bdaSduboff
2622f8919bdaSduboff if (dp->nic_state >= NIC_STATE_INITIALIZED) {
2623f8919bdaSduboff /* notify the result of auto-negotiation to mac */
2624f8919bdaSduboff (*dp->gc.gc_set_media)(dp);
2625f8919bdaSduboff }
2626f8919bdaSduboff
2627f8919bdaSduboff if ((void *)dp->gc.gc_mii_tune_phy) {
2628f8919bdaSduboff /* for built-in sis900 */
2629f8919bdaSduboff /* XXX - this code should be removed. */
2630f8919bdaSduboff (*dp->gc.gc_mii_tune_phy)(dp);
2631f8919bdaSduboff }
2632f8919bdaSduboff
2633f8919bdaSduboff goto next_nowait;
2634f8919bdaSduboff
2635f8919bdaSduboff case MII_STATE_LINKDOWN:
2636f8919bdaSduboff status = gem_mii_read(dp, MII_STATUS);
2637f8919bdaSduboff if (status & MII_STATUS_LINKUP) {
2638f8919bdaSduboff /*
2639f8919bdaSduboff * Link going up
2640f8919bdaSduboff */
2641f8919bdaSduboff dp->mii_state = MII_STATE_LINKUP;
2642f8919bdaSduboff dp->mii_supress_msg = B_FALSE;
2643f8919bdaSduboff
2644f8919bdaSduboff DPRINTF(0, (CE_CONT,
2645f8919bdaSduboff "!%s: link up detected: mii_stat:%b",
2646f8919bdaSduboff dp->name, status, MII_STATUS_BITS));
2647f8919bdaSduboff
2648f8919bdaSduboff /*
2649f8919bdaSduboff * MII_CONTROL_100MB and MII_CONTROL_FDUPLEX are
2650f8919bdaSduboff * ignored when MII_CONTROL_ANE is set.
2651f8919bdaSduboff */
2652f8919bdaSduboff cmn_err(CE_CONT,
2653f8919bdaSduboff "!%s: Link up: %d Mbps %s duplex %s flow control",
2654f8919bdaSduboff dp->name,
2655f8919bdaSduboff gem_speed_value[dp->speed],
2656f8919bdaSduboff dp->full_duplex ? "full" : "half",
2657f8919bdaSduboff gem_fc_type[dp->flow_control]);
2658f8919bdaSduboff
2659f8919bdaSduboff dp->mii_interval = dp->gc.gc_mii_link_watch_interval;
2660f8919bdaSduboff
2661f8919bdaSduboff /* XXX - we need other timer to watch statictics */
2662f8919bdaSduboff if (dp->gc.gc_mii_hw_link_detection &&
2663f8919bdaSduboff dp->nic_state == NIC_STATE_ONLINE) {
2664f8919bdaSduboff dp->mii_interval = 0;
2665f8919bdaSduboff }
2666f8919bdaSduboff
2667f8919bdaSduboff if (dp->nic_state == NIC_STATE_ONLINE) {
2668f8919bdaSduboff if (!dp->mac_active) {
2669f8919bdaSduboff (void) gem_mac_start(dp);
2670f8919bdaSduboff }
2671f8919bdaSduboff tx_sched = B_TRUE;
2672f8919bdaSduboff }
2673f8919bdaSduboff goto next;
2674f8919bdaSduboff }
2675f8919bdaSduboff
2676f8919bdaSduboff dp->mii_supress_msg = B_TRUE;
2677f8919bdaSduboff if (dp->anadv_autoneg) {
2678f8919bdaSduboff dp->mii_timer -= diff;
2679f8919bdaSduboff if (dp->mii_timer <= 0) {
2680f8919bdaSduboff /*
2681f8919bdaSduboff * link down timer expired.
2682f8919bdaSduboff * need to restart auto-negotiation.
2683f8919bdaSduboff */
2684f8919bdaSduboff linkdown_action =
2685f8919bdaSduboff dp->gc.gc_mii_linkdown_timeout_action;
2686f8919bdaSduboff goto restart_autonego;
2687f8919bdaSduboff }
2688f8919bdaSduboff }
2689f8919bdaSduboff /* don't change mii_state */
2690f8919bdaSduboff break;
2691f8919bdaSduboff
2692f8919bdaSduboff case MII_STATE_LINKUP:
2693f8919bdaSduboff status = gem_mii_read(dp, MII_STATUS);
2694f8919bdaSduboff if ((status & MII_STATUS_LINKUP) == 0) {
2695f8919bdaSduboff /*
2696f8919bdaSduboff * Link going down
2697f8919bdaSduboff */
2698f8919bdaSduboff cmn_err(CE_NOTE,
2699f8919bdaSduboff "!%s: link down detected: mii_stat:%b",
2700f8919bdaSduboff dp->name, status, MII_STATUS_BITS);
2701f8919bdaSduboff
2702f8919bdaSduboff if (dp->nic_state == NIC_STATE_ONLINE &&
2703f8919bdaSduboff dp->mac_active &&
2704f8919bdaSduboff dp->gc.gc_mii_stop_mac_on_linkdown) {
2705f8919bdaSduboff (void) gem_mac_stop(dp, 0);
270623d366e3Sduboff
270723d366e3Sduboff if (dp->tx_blocked) {
270823d366e3Sduboff /* drain tx */
270923d366e3Sduboff tx_sched = B_TRUE;
271023d366e3Sduboff }
2711f8919bdaSduboff }
2712f8919bdaSduboff
2713f8919bdaSduboff if (dp->anadv_autoneg) {
2714f8919bdaSduboff /* need to restart auto-negotiation */
2715f8919bdaSduboff linkdown_action = dp->gc.gc_mii_linkdown_action;
2716f8919bdaSduboff goto restart_autonego;
2717f8919bdaSduboff }
2718f8919bdaSduboff
2719f8919bdaSduboff dp->mii_state = MII_STATE_LINKDOWN;
2720f8919bdaSduboff dp->mii_timer = dp->gc.gc_mii_linkdown_timeout;
2721f8919bdaSduboff
2722f8919bdaSduboff if ((void *)dp->gc.gc_mii_tune_phy) {
2723f8919bdaSduboff /* for built-in sis900 */
2724f8919bdaSduboff (*dp->gc.gc_mii_tune_phy)(dp);
2725f8919bdaSduboff }
2726f8919bdaSduboff dp->mii_interval = dp->gc.gc_mii_link_watch_interval;
2727f8919bdaSduboff goto next;
2728f8919bdaSduboff }
2729f8919bdaSduboff
2730f8919bdaSduboff /* don't change mii_state */
2731f8919bdaSduboff if (dp->gc.gc_mii_hw_link_detection &&
2732f8919bdaSduboff dp->nic_state == NIC_STATE_ONLINE) {
2733f8919bdaSduboff dp->mii_interval = 0;
2734f8919bdaSduboff goto next;
2735f8919bdaSduboff }
2736f8919bdaSduboff break;
2737f8919bdaSduboff }
2738f8919bdaSduboff dp->mii_interval = dp->gc.gc_mii_link_watch_interval;
2739f8919bdaSduboff goto next;
2740f8919bdaSduboff
2741f8919bdaSduboff /* Actions on the end of state routine */
2742f8919bdaSduboff
2743f8919bdaSduboff restart_autonego:
2744f8919bdaSduboff switch (linkdown_action) {
2745f8919bdaSduboff case MII_ACTION_RESET:
2746f8919bdaSduboff if (!dp->mii_supress_msg) {
2747f8919bdaSduboff cmn_err(CE_CONT, "!%s: resetting PHY", dp->name);
2748f8919bdaSduboff }
2749f8919bdaSduboff dp->mii_supress_msg = B_TRUE;
2750f8919bdaSduboff goto reset_phy;
2751f8919bdaSduboff
2752f8919bdaSduboff case MII_ACTION_NONE:
2753f8919bdaSduboff dp->mii_supress_msg = B_TRUE;
2754f8919bdaSduboff if (dp->gc.gc_mii_an_oneshot) {
2755f8919bdaSduboff goto autonego;
2756f8919bdaSduboff }
2757f8919bdaSduboff /* PHY will restart autonego automatically */
2758f8919bdaSduboff dp->mii_state = MII_STATE_AUTONEGOTIATING;
2759f8919bdaSduboff dp->mii_timer = dp->gc.gc_mii_an_timeout;
2760f8919bdaSduboff dp->mii_interval = dp->gc.gc_mii_an_watch_interval;
2761f8919bdaSduboff goto next;
2762f8919bdaSduboff
2763f8919bdaSduboff case MII_ACTION_RSA:
2764f8919bdaSduboff if (!dp->mii_supress_msg) {
2765f8919bdaSduboff cmn_err(CE_CONT, "!%s: restarting auto-negotiation",
2766f8919bdaSduboff dp->name);
2767f8919bdaSduboff }
2768f8919bdaSduboff dp->mii_supress_msg = B_TRUE;
2769f8919bdaSduboff goto autonego;
2770f8919bdaSduboff
2771f8919bdaSduboff default:
2772f8919bdaSduboff cmn_err(CE_WARN, "!%s: unknowm linkdown action: %d",
2773f8919bdaSduboff dp->name, dp->gc.gc_mii_linkdown_action);
2774f8919bdaSduboff dp->mii_supress_msg = B_TRUE;
2775f8919bdaSduboff }
2776f8919bdaSduboff /* NOTREACHED */
2777f8919bdaSduboff
2778f8919bdaSduboff reset_phy:
2779f8919bdaSduboff if (!dp->mii_supress_msg) {
2780f8919bdaSduboff cmn_err(CE_CONT, "!%s: resetting PHY", dp->name);
2781f8919bdaSduboff }
2782f8919bdaSduboff dp->mii_state = MII_STATE_RESETTING;
2783f8919bdaSduboff dp->mii_timer = dp->gc.gc_mii_reset_timeout;
2784f8919bdaSduboff if (!dp->gc.gc_mii_dont_reset) {
2785f8919bdaSduboff gem_mii_write(dp, MII_CONTROL, MII_CONTROL_RESET);
2786f8919bdaSduboff }
2787f8919bdaSduboff dp->mii_interval = WATCH_INTERVAL_FAST;
2788f8919bdaSduboff goto next;
2789f8919bdaSduboff
2790f8919bdaSduboff autonego:
2791f8919bdaSduboff if (!dp->mii_supress_msg) {
2792f8919bdaSduboff cmn_err(CE_CONT, "!%s: auto-negotiation started", dp->name);
2793f8919bdaSduboff }
2794f8919bdaSduboff dp->mii_state = MII_STATE_AUTONEGOTIATING;
2795f8919bdaSduboff dp->mii_timer = dp->gc.gc_mii_an_timeout;
2796f8919bdaSduboff
2797f8919bdaSduboff /* start/restart auto nego */
2798f8919bdaSduboff val = gem_mii_read(dp, MII_CONTROL) &
2799f8919bdaSduboff ~(MII_CONTROL_ISOLATE | MII_CONTROL_PWRDN | MII_CONTROL_RESET);
2800f8919bdaSduboff
280123d366e3Sduboff gem_mii_write(dp, MII_CONTROL,
280223d366e3Sduboff val | MII_CONTROL_RSAN | MII_CONTROL_ANE);
2803f8919bdaSduboff
2804f8919bdaSduboff dp->mii_interval = dp->gc.gc_mii_an_watch_interval;
2805f8919bdaSduboff
2806f8919bdaSduboff next:
2807f8919bdaSduboff if (dp->link_watcher_id == 0 && dp->mii_interval) {
2808f8919bdaSduboff /* we must schedule next mii_watcher */
2809f8919bdaSduboff dp->link_watcher_id =
2810f8919bdaSduboff timeout((void (*)(void *))&gem_mii_link_watcher,
2811f8919bdaSduboff (void *)dp, dp->mii_interval);
2812f8919bdaSduboff }
2813f8919bdaSduboff
281423d366e3Sduboff if (old_mii_state != dp->mii_state) {
2815f8919bdaSduboff /* notify new mii link state */
2816f8919bdaSduboff if (dp->mii_state == MII_STATE_LINKUP) {
281723d366e3Sduboff dp->linkup_delay = 0;
2818f8919bdaSduboff GEM_LINKUP(dp);
281923d366e3Sduboff } else if (dp->linkup_delay <= 0) {
2820f8919bdaSduboff GEM_LINKDOWN(dp);
2821f8919bdaSduboff }
282223d366e3Sduboff } else if (dp->linkup_delay < 0) {
282323d366e3Sduboff /* first linkup timeout */
282423d366e3Sduboff dp->linkup_delay = 0;
282523d366e3Sduboff GEM_LINKDOWN(dp);
2826f8919bdaSduboff }
282723d366e3Sduboff
2828f8919bdaSduboff return (tx_sched);
2829f8919bdaSduboff }
2830f8919bdaSduboff
2831f8919bdaSduboff static void
gem_mii_link_watcher(struct gem_dev * dp)2832f8919bdaSduboff gem_mii_link_watcher(struct gem_dev *dp)
2833f8919bdaSduboff {
2834f8919bdaSduboff boolean_t tx_sched;
2835f8919bdaSduboff
2836f8919bdaSduboff mutex_enter(&dp->intrlock);
2837f8919bdaSduboff
2838f8919bdaSduboff dp->link_watcher_id = 0;
2839f8919bdaSduboff tx_sched = gem_mii_link_check(dp);
2840f8919bdaSduboff #if GEM_DEBUG_LEVEL > 2
2841f8919bdaSduboff if (dp->link_watcher_id == 0) {
2842f8919bdaSduboff cmn_err(CE_CONT, "%s: link watcher stopped", dp->name);
2843f8919bdaSduboff }
2844f8919bdaSduboff #endif
2845f8919bdaSduboff mutex_exit(&dp->intrlock);
2846f8919bdaSduboff
2847f8919bdaSduboff if (tx_sched) {
2848f8919bdaSduboff /* kick potentially stopped downstream */
2849f8919bdaSduboff mac_tx_update(dp->mh);
2850f8919bdaSduboff }
2851f8919bdaSduboff }
2852f8919bdaSduboff
2853f8919bdaSduboff int
gem_mii_probe_default(struct gem_dev * dp)2854f8919bdaSduboff gem_mii_probe_default(struct gem_dev *dp)
2855f8919bdaSduboff {
2856f8919bdaSduboff int8_t phy;
2857f8919bdaSduboff uint16_t status;
2858f8919bdaSduboff uint16_t adv;
2859f8919bdaSduboff uint16_t adv_org;
2860f8919bdaSduboff
2861f8919bdaSduboff DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__));
2862f8919bdaSduboff
2863f8919bdaSduboff /*
2864f8919bdaSduboff * Scan PHY
2865f8919bdaSduboff */
2866f8919bdaSduboff /* ensure to send sync bits */
2867f8919bdaSduboff dp->mii_status = 0;
2868f8919bdaSduboff
2869f8919bdaSduboff /* Try default phy first */
2870f8919bdaSduboff if (dp->mii_phy_addr) {
2871f8919bdaSduboff status = gem_mii_read(dp, MII_STATUS);
2872f8919bdaSduboff if (status != 0xffff && status != 0) {
2873f8919bdaSduboff gem_mii_write(dp, MII_CONTROL, 0);
2874f8919bdaSduboff goto PHY_found;
2875f8919bdaSduboff }
2876f8919bdaSduboff
2877f8919bdaSduboff if (dp->mii_phy_addr < 0) {
2878f8919bdaSduboff cmn_err(CE_NOTE,
2879f8919bdaSduboff "!%s: failed to probe default internal and/or non-MII PHY",
2880f8919bdaSduboff dp->name);
2881f8919bdaSduboff return (GEM_FAILURE);
2882f8919bdaSduboff }
2883f8919bdaSduboff
2884f8919bdaSduboff cmn_err(CE_NOTE,
2885f8919bdaSduboff "!%s: failed to probe default MII PHY at %d",
2886f8919bdaSduboff dp->name, dp->mii_phy_addr);
2887f8919bdaSduboff }
2888f8919bdaSduboff
2889f8919bdaSduboff /* Try all possible address */
2890f8919bdaSduboff for (phy = dp->gc.gc_mii_addr_min; phy < 32; phy++) {
2891f8919bdaSduboff dp->mii_phy_addr = phy;
2892f8919bdaSduboff status = gem_mii_read(dp, MII_STATUS);
2893f8919bdaSduboff
2894f8919bdaSduboff if (status != 0xffff && status != 0) {
2895f8919bdaSduboff gem_mii_write(dp, MII_CONTROL, 0);
2896f8919bdaSduboff goto PHY_found;
2897f8919bdaSduboff }
2898f8919bdaSduboff }
2899f8919bdaSduboff
2900f8919bdaSduboff for (phy = dp->gc.gc_mii_addr_min; phy < 32; phy++) {
2901f8919bdaSduboff dp->mii_phy_addr = phy;
2902f8919bdaSduboff gem_mii_write(dp, MII_CONTROL, 0);
2903f8919bdaSduboff status = gem_mii_read(dp, MII_STATUS);
2904f8919bdaSduboff
2905f8919bdaSduboff if (status != 0xffff && status != 0) {
2906f8919bdaSduboff goto PHY_found;
2907f8919bdaSduboff }
2908f8919bdaSduboff }
2909f8919bdaSduboff
2910f8919bdaSduboff cmn_err(CE_NOTE, "!%s: no MII PHY found", dp->name);
2911f8919bdaSduboff dp->mii_phy_addr = -1;
2912f8919bdaSduboff
2913f8919bdaSduboff return (GEM_FAILURE);
2914f8919bdaSduboff
2915f8919bdaSduboff PHY_found:
2916f8919bdaSduboff dp->mii_status = status;
2917f8919bdaSduboff dp->mii_phy_id = (gem_mii_read(dp, MII_PHYIDH) << 16) |
2918f8919bdaSduboff gem_mii_read(dp, MII_PHYIDL);
2919f8919bdaSduboff
2920f8919bdaSduboff if (dp->mii_phy_addr < 0) {
2921f8919bdaSduboff cmn_err(CE_CONT, "!%s: using internal/non-MII PHY(0x%08x)",
2922f8919bdaSduboff dp->name, dp->mii_phy_id);
2923f8919bdaSduboff } else {
2924f8919bdaSduboff cmn_err(CE_CONT, "!%s: MII PHY (0x%08x) found at %d",
2925f8919bdaSduboff dp->name, dp->mii_phy_id, dp->mii_phy_addr);
2926f8919bdaSduboff }
2927f8919bdaSduboff
2928f8919bdaSduboff cmn_err(CE_CONT, "!%s: PHY control:%b, status:%b, advert:%b, lpar:%b",
2929f8919bdaSduboff dp->name,
2930f8919bdaSduboff gem_mii_read(dp, MII_CONTROL), MII_CONTROL_BITS,
2931f8919bdaSduboff status, MII_STATUS_BITS,
2932f8919bdaSduboff gem_mii_read(dp, MII_AN_ADVERT), MII_ABILITY_BITS,
2933f8919bdaSduboff gem_mii_read(dp, MII_AN_LPABLE), MII_ABILITY_BITS);
2934f8919bdaSduboff
2935f8919bdaSduboff dp->mii_xstatus = 0;
2936f8919bdaSduboff if (status & MII_STATUS_XSTATUS) {
2937f8919bdaSduboff dp->mii_xstatus = gem_mii_read(dp, MII_XSTATUS);
2938f8919bdaSduboff
2939f8919bdaSduboff cmn_err(CE_CONT, "!%s: xstatus:%b",
2940f8919bdaSduboff dp->name, dp->mii_xstatus, MII_XSTATUS_BITS);
2941f8919bdaSduboff }
2942f8919bdaSduboff
2943f8919bdaSduboff /* check if the phy can advertize pause abilities */
2944f8919bdaSduboff adv_org = gem_mii_read(dp, MII_AN_ADVERT);
2945f8919bdaSduboff
2946f8919bdaSduboff gem_mii_write(dp, MII_AN_ADVERT,
2947bdb9230aSGarrett D'Amore MII_ABILITY_PAUSE | MII_ABILITY_ASMPAUSE);
2948f8919bdaSduboff
2949f8919bdaSduboff adv = gem_mii_read(dp, MII_AN_ADVERT);
2950f8919bdaSduboff
2951f8919bdaSduboff if ((adv & MII_ABILITY_PAUSE) == 0) {
2952f8919bdaSduboff dp->gc.gc_flow_control &= ~1;
2953f8919bdaSduboff }
2954f8919bdaSduboff
2955bdb9230aSGarrett D'Amore if ((adv & MII_ABILITY_ASMPAUSE) == 0) {
2956f8919bdaSduboff dp->gc.gc_flow_control &= ~2;
2957f8919bdaSduboff }
2958f8919bdaSduboff
2959f8919bdaSduboff gem_mii_write(dp, MII_AN_ADVERT, adv_org);
2960f8919bdaSduboff
2961f8919bdaSduboff return (GEM_SUCCESS);
2962f8919bdaSduboff }
2963f8919bdaSduboff
2964f8919bdaSduboff static void
gem_mii_start(struct gem_dev * dp)2965f8919bdaSduboff gem_mii_start(struct gem_dev *dp)
2966f8919bdaSduboff {
2967f8919bdaSduboff DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__));
2968f8919bdaSduboff
2969f8919bdaSduboff /* make a first call of check link */
2970f8919bdaSduboff dp->mii_state = MII_STATE_UNKNOWN;
2971f8919bdaSduboff dp->mii_last_check = ddi_get_lbolt();
297223d366e3Sduboff dp->linkup_delay = dp->gc.gc_mii_linkdown_timeout;
2973f8919bdaSduboff (void) gem_mii_link_watcher(dp);
2974f8919bdaSduboff }
2975f8919bdaSduboff
2976f8919bdaSduboff static void
gem_mii_stop(struct gem_dev * dp)2977f8919bdaSduboff gem_mii_stop(struct gem_dev *dp)
2978f8919bdaSduboff {
2979f8919bdaSduboff DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__));
2980f8919bdaSduboff
2981f8919bdaSduboff /* Ensure timer routine stopped */
2982f8919bdaSduboff mutex_enter(&dp->intrlock);
2983f8919bdaSduboff if (dp->link_watcher_id) {
2984f8919bdaSduboff while (untimeout(dp->link_watcher_id) == -1)
2985f8919bdaSduboff ;
2986f8919bdaSduboff dp->link_watcher_id = 0;
2987f8919bdaSduboff }
2988f8919bdaSduboff mutex_exit(&dp->intrlock);
2989f8919bdaSduboff }
2990f8919bdaSduboff
2991f8919bdaSduboff boolean_t
gem_get_mac_addr_conf(struct gem_dev * dp)2992f8919bdaSduboff gem_get_mac_addr_conf(struct gem_dev *dp)
2993f8919bdaSduboff {
2994f8919bdaSduboff char propname[32];
2995f8919bdaSduboff char *valstr;
2996f8919bdaSduboff uint8_t mac[ETHERADDRL];
2997f8919bdaSduboff char *cp;
2998f8919bdaSduboff int c;
2999f8919bdaSduboff int i;
3000f8919bdaSduboff int j;
3001f8919bdaSduboff uint8_t v;
3002f8919bdaSduboff uint8_t d;
3003f8919bdaSduboff uint8_t ored;
3004f8919bdaSduboff
3005f8919bdaSduboff DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3006f8919bdaSduboff /*
3007f8919bdaSduboff * Get ethernet address from .conf file
3008f8919bdaSduboff */
3009f8919bdaSduboff (void) sprintf(propname, "mac-addr");
3010f8919bdaSduboff if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, dp->dip,
3011f8919bdaSduboff DDI_PROP_DONTPASS, propname, &valstr)) !=
3012f8919bdaSduboff DDI_PROP_SUCCESS) {
3013f8919bdaSduboff return (B_FALSE);
3014f8919bdaSduboff }
3015f8919bdaSduboff
3016f8919bdaSduboff if (strlen(valstr) != ETHERADDRL*3-1) {
3017f8919bdaSduboff goto syntax_err;
3018f8919bdaSduboff }
3019f8919bdaSduboff
3020f8919bdaSduboff cp = valstr;
3021f8919bdaSduboff j = 0;
3022f8919bdaSduboff ored = 0;
3023f8919bdaSduboff for (;;) {
3024f8919bdaSduboff v = 0;
3025f8919bdaSduboff for (i = 0; i < 2; i++) {
3026f8919bdaSduboff c = *cp++;
3027f8919bdaSduboff
3028f8919bdaSduboff if (c >= 'a' && c <= 'f') {
3029f8919bdaSduboff d = c - 'a' + 10;
3030f8919bdaSduboff } else if (c >= 'A' && c <= 'F') {
3031f8919bdaSduboff d = c - 'A' + 10;
3032f8919bdaSduboff } else if (c >= '0' && c <= '9') {
3033f8919bdaSduboff d = c - '0';
3034f8919bdaSduboff } else {
3035f8919bdaSduboff goto syntax_err;
3036f8919bdaSduboff }
3037f8919bdaSduboff v = (v << 4) | d;
3038f8919bdaSduboff }
3039f8919bdaSduboff
3040f8919bdaSduboff mac[j++] = v;
3041f8919bdaSduboff ored |= v;
3042f8919bdaSduboff if (j == ETHERADDRL) {
3043f8919bdaSduboff /* done */
3044f8919bdaSduboff break;
3045f8919bdaSduboff }
3046f8919bdaSduboff
3047f8919bdaSduboff c = *cp++;
3048f8919bdaSduboff if (c != ':') {
3049f8919bdaSduboff goto syntax_err;
3050f8919bdaSduboff }
3051f8919bdaSduboff }
3052f8919bdaSduboff
3053f8919bdaSduboff if (ored == 0) {
3054f8919bdaSduboff goto err;
3055f8919bdaSduboff }
3056f8919bdaSduboff for (i = 0; i < ETHERADDRL; i++) {
3057f8919bdaSduboff dp->dev_addr.ether_addr_octet[i] = mac[i];
3058f8919bdaSduboff }
3059f8919bdaSduboff ddi_prop_free(valstr);
3060f8919bdaSduboff return (B_TRUE);
3061f8919bdaSduboff
3062f8919bdaSduboff syntax_err:
3063f8919bdaSduboff cmn_err(CE_CONT,
3064f8919bdaSduboff "!%s: read mac addr: trying .conf: syntax err %s",
3065f8919bdaSduboff dp->name, valstr);
3066f8919bdaSduboff err:
3067f8919bdaSduboff ddi_prop_free(valstr);
3068f8919bdaSduboff
3069f8919bdaSduboff return (B_FALSE);
3070f8919bdaSduboff }
3071f8919bdaSduboff
3072f8919bdaSduboff
3073f8919bdaSduboff /* ============================================================== */
3074f8919bdaSduboff /*
3075f8919bdaSduboff * internal start/stop interface
3076f8919bdaSduboff */
3077f8919bdaSduboff /* ============================================================== */
3078f8919bdaSduboff static int
gem_mac_set_rx_filter(struct gem_dev * dp)3079f8919bdaSduboff gem_mac_set_rx_filter(struct gem_dev *dp)
3080f8919bdaSduboff {
3081f8919bdaSduboff return ((*dp->gc.gc_set_rx_filter)(dp));
3082f8919bdaSduboff }
3083f8919bdaSduboff
3084f8919bdaSduboff /*
3085f8919bdaSduboff * gem_mac_init: cold start
3086f8919bdaSduboff */
3087f8919bdaSduboff static int
gem_mac_init(struct gem_dev * dp)3088f8919bdaSduboff gem_mac_init(struct gem_dev *dp)
3089f8919bdaSduboff {
3090f8919bdaSduboff DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3091f8919bdaSduboff
3092f8919bdaSduboff if (dp->mac_suspended) {
3093f8919bdaSduboff return (GEM_FAILURE);
3094f8919bdaSduboff }
3095f8919bdaSduboff
3096f8919bdaSduboff dp->mac_active = B_FALSE;
3097f8919bdaSduboff
3098f8919bdaSduboff gem_init_rx_ring(dp);
3099f8919bdaSduboff gem_init_tx_ring(dp);
3100f8919bdaSduboff
3101f8919bdaSduboff /* reset transmitter state */
310223d366e3Sduboff dp->tx_blocked = (clock_t)0;
3103f8919bdaSduboff dp->tx_busy = 0;
3104f8919bdaSduboff dp->tx_reclaim_busy = 0;
310523d366e3Sduboff dp->tx_max_packets = dp->gc.gc_tx_buf_limit;
3106f8919bdaSduboff
3107f8919bdaSduboff if ((*dp->gc.gc_init_chip)(dp) != GEM_SUCCESS) {
3108f8919bdaSduboff return (GEM_FAILURE);
3109f8919bdaSduboff }
3110f8919bdaSduboff
3111f8919bdaSduboff gem_prepare_rx_buf(dp);
3112f8919bdaSduboff
3113f8919bdaSduboff return (GEM_SUCCESS);
3114f8919bdaSduboff }
3115f8919bdaSduboff /*
3116f8919bdaSduboff * gem_mac_start: warm start
3117f8919bdaSduboff */
3118f8919bdaSduboff static int
gem_mac_start(struct gem_dev * dp)3119f8919bdaSduboff gem_mac_start(struct gem_dev *dp)
3120f8919bdaSduboff {
3121f8919bdaSduboff DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3122f8919bdaSduboff
3123f8919bdaSduboff ASSERT(mutex_owned(&dp->intrlock));
3124f8919bdaSduboff ASSERT(dp->nic_state == NIC_STATE_ONLINE);
3125f8919bdaSduboff ASSERT(dp->mii_state == MII_STATE_LINKUP);
3126f8919bdaSduboff
3127f8919bdaSduboff /* enable tx and rx */
3128f8919bdaSduboff mutex_enter(&dp->xmitlock);
3129f8919bdaSduboff if (dp->mac_suspended) {
3130f8919bdaSduboff mutex_exit(&dp->xmitlock);
3131f8919bdaSduboff return (GEM_FAILURE);
3132f8919bdaSduboff }
3133f8919bdaSduboff dp->mac_active = B_TRUE;
3134f8919bdaSduboff mutex_exit(&dp->xmitlock);
3135f8919bdaSduboff
313623d366e3Sduboff /* setup rx buffers */
313723d366e3Sduboff (*dp->gc.gc_rx_start)(dp,
313823d366e3Sduboff SLOT(dp->rx_active_head, dp->gc.gc_rx_ring_size),
313923d366e3Sduboff dp->rx_active_tail - dp->rx_active_head);
314023d366e3Sduboff
3141f8919bdaSduboff if ((*dp->gc.gc_start_chip)(dp) != GEM_SUCCESS) {
3142f8919bdaSduboff cmn_err(CE_WARN, "%s: %s: start_chip: failed",
3143f8919bdaSduboff dp->name, __func__);
3144f8919bdaSduboff return (GEM_FAILURE);
3145f8919bdaSduboff }
3146f8919bdaSduboff
3147f8919bdaSduboff mutex_enter(&dp->xmitlock);
3148f8919bdaSduboff
3149f8919bdaSduboff /* load untranmitted packets to the nic */
3150f8919bdaSduboff ASSERT(dp->tx_softq_tail - dp->tx_softq_head >= 0);
3151f8919bdaSduboff if (dp->tx_softq_tail - dp->tx_softq_head > 0) {
3152f8919bdaSduboff gem_tx_load_descs_oo(dp,
3153f8919bdaSduboff dp->tx_softq_head, dp->tx_softq_tail,
3154f8919bdaSduboff GEM_TXFLAG_HEAD);
3155f8919bdaSduboff /* issue preloaded tx buffers */
3156f8919bdaSduboff gem_tx_start_unit(dp);
3157f8919bdaSduboff }
3158f8919bdaSduboff
3159f8919bdaSduboff mutex_exit(&dp->xmitlock);
3160f8919bdaSduboff
3161f8919bdaSduboff return (GEM_SUCCESS);
3162f8919bdaSduboff }
3163f8919bdaSduboff
3164f8919bdaSduboff static int
gem_mac_stop(struct gem_dev * dp,uint_t flags)3165f8919bdaSduboff gem_mac_stop(struct gem_dev *dp, uint_t flags)
3166f8919bdaSduboff {
3167f8919bdaSduboff int i;
3168f8919bdaSduboff int wait_time; /* in uS */
3169f8919bdaSduboff #ifdef GEM_DEBUG_LEVEL
3170f8919bdaSduboff clock_t now;
3171f8919bdaSduboff #endif
3172f8919bdaSduboff int ret = GEM_SUCCESS;
3173f8919bdaSduboff
3174f8919bdaSduboff DPRINTF(1, (CE_CONT, "!%s: %s: called, rx_buf_free:%d",
3175f8919bdaSduboff dp->name, __func__, dp->rx_buf_freecnt));
3176f8919bdaSduboff
3177f8919bdaSduboff ASSERT(mutex_owned(&dp->intrlock));
3178f8919bdaSduboff ASSERT(!mutex_owned(&dp->xmitlock));
3179f8919bdaSduboff
3180f8919bdaSduboff /*
3181f8919bdaSduboff * Block transmits
3182f8919bdaSduboff */
3183f8919bdaSduboff mutex_enter(&dp->xmitlock);
3184f8919bdaSduboff if (dp->mac_suspended) {
3185f8919bdaSduboff mutex_exit(&dp->xmitlock);
3186f8919bdaSduboff return (GEM_SUCCESS);
3187f8919bdaSduboff }
3188f8919bdaSduboff dp->mac_active = B_FALSE;
3189f8919bdaSduboff
3190f8919bdaSduboff while (dp->tx_busy > 0) {
3191f8919bdaSduboff cv_wait(&dp->tx_drain_cv, &dp->xmitlock);
3192f8919bdaSduboff }
3193f8919bdaSduboff mutex_exit(&dp->xmitlock);
3194f8919bdaSduboff
3195f8919bdaSduboff if ((flags & GEM_RESTART_NOWAIT) == 0) {
3196f8919bdaSduboff /*
3197f8919bdaSduboff * Wait for all tx buffers sent.
3198f8919bdaSduboff */
3199f8919bdaSduboff wait_time =
3200f8919bdaSduboff 2 * (8 * MAXPKTBUF(dp) / gem_speed_value[dp->speed]) *
3201f8919bdaSduboff (dp->tx_active_tail - dp->tx_active_head);
3202f8919bdaSduboff
3203f8919bdaSduboff DPRINTF(0, (CE_CONT, "%s: %s: max drain time: %d uS",
3204f8919bdaSduboff dp->name, __func__, wait_time));
3205f8919bdaSduboff i = 0;
3206f8919bdaSduboff #ifdef GEM_DEBUG_LEVEL
3207f8919bdaSduboff now = ddi_get_lbolt();
3208f8919bdaSduboff #endif
3209f8919bdaSduboff while (dp->tx_active_tail != dp->tx_active_head) {
3210f8919bdaSduboff if (i > wait_time) {
3211f8919bdaSduboff /* timeout */
3212f8919bdaSduboff cmn_err(CE_NOTE, "%s: %s timeout: tx drain",
3213f8919bdaSduboff dp->name, __func__);
3214f8919bdaSduboff break;
3215f8919bdaSduboff }
3216f8919bdaSduboff (void) gem_reclaim_txbuf(dp);
3217f8919bdaSduboff drv_usecwait(100);
3218f8919bdaSduboff i += 100;
3219f8919bdaSduboff }
3220f8919bdaSduboff DPRINTF(0, (CE_NOTE,
3221f8919bdaSduboff "!%s: %s: the nic have drained in %d uS, real %d mS",
3222f8919bdaSduboff dp->name, __func__, i,
3223f8919bdaSduboff 10*((int)(ddi_get_lbolt() - now))));
3224f8919bdaSduboff }
3225f8919bdaSduboff
3226f8919bdaSduboff /*
3227f8919bdaSduboff * Now we can stop the nic safely.
3228f8919bdaSduboff */
3229f8919bdaSduboff if ((*dp->gc.gc_stop_chip)(dp) != GEM_SUCCESS) {
3230f8919bdaSduboff cmn_err(CE_NOTE, "%s: %s: resetting the chip to stop it",
3231f8919bdaSduboff dp->name, __func__);
3232f8919bdaSduboff if ((*dp->gc.gc_reset_chip)(dp) != GEM_SUCCESS) {
3233f8919bdaSduboff cmn_err(CE_WARN, "%s: %s: failed to reset chip",
3234f8919bdaSduboff dp->name, __func__);
3235f8919bdaSduboff }
3236f8919bdaSduboff }
3237f8919bdaSduboff
3238f8919bdaSduboff /*
3239f8919bdaSduboff * Clear all rx buffers
3240f8919bdaSduboff */
3241f8919bdaSduboff if (flags & GEM_RESTART_KEEP_BUF) {
3242f8919bdaSduboff (void) gem_receive(dp);
3243f8919bdaSduboff }
3244f8919bdaSduboff gem_clean_rx_buf(dp);
3245f8919bdaSduboff
3246f8919bdaSduboff /*
3247f8919bdaSduboff * Update final statistics
3248f8919bdaSduboff */
3249f8919bdaSduboff (*dp->gc.gc_get_stats)(dp);
3250f8919bdaSduboff
3251f8919bdaSduboff /*
3252f8919bdaSduboff * Clear all pended tx packets
3253f8919bdaSduboff */
3254f8919bdaSduboff ASSERT(dp->tx_active_tail == dp->tx_softq_head);
3255f8919bdaSduboff ASSERT(dp->tx_softq_tail == dp->tx_free_head);
3256f8919bdaSduboff if (flags & GEM_RESTART_KEEP_BUF) {
3257f8919bdaSduboff /* restore active tx buffers */
3258f8919bdaSduboff dp->tx_active_tail = dp->tx_active_head;
3259f8919bdaSduboff dp->tx_softq_head = dp->tx_active_head;
3260f8919bdaSduboff } else {
3261f8919bdaSduboff gem_clean_tx_buf(dp);
3262f8919bdaSduboff }
3263f8919bdaSduboff
3264f8919bdaSduboff return (ret);
3265f8919bdaSduboff }
3266f8919bdaSduboff
3267f8919bdaSduboff static int
gem_add_multicast(struct gem_dev * dp,const uint8_t * ep)3268f8919bdaSduboff gem_add_multicast(struct gem_dev *dp, const uint8_t *ep)
3269f8919bdaSduboff {
3270f8919bdaSduboff int cnt;
3271f8919bdaSduboff int err;
3272f8919bdaSduboff
3273f8919bdaSduboff DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3274f8919bdaSduboff
3275f8919bdaSduboff mutex_enter(&dp->intrlock);
3276f8919bdaSduboff if (dp->mac_suspended) {
3277f8919bdaSduboff mutex_exit(&dp->intrlock);
3278f8919bdaSduboff return (GEM_FAILURE);
3279f8919bdaSduboff }
3280f8919bdaSduboff
3281f8919bdaSduboff if (dp->mc_count_req++ < GEM_MAXMC) {
3282f8919bdaSduboff /* append the new address at the end of the mclist */
3283f8919bdaSduboff cnt = dp->mc_count;
3284f8919bdaSduboff bcopy(ep, dp->mc_list[cnt].addr.ether_addr_octet,
3285f8919bdaSduboff ETHERADDRL);
3286f8919bdaSduboff if (dp->gc.gc_multicast_hash) {
3287f8919bdaSduboff dp->mc_list[cnt].hash =
3288f8919bdaSduboff (*dp->gc.gc_multicast_hash)(dp, (uint8_t *)ep);
3289f8919bdaSduboff }
3290f8919bdaSduboff dp->mc_count = cnt + 1;
3291f8919bdaSduboff }
3292f8919bdaSduboff
3293f8919bdaSduboff if (dp->mc_count_req != dp->mc_count) {
3294f8919bdaSduboff /* multicast address list overflow */
3295f8919bdaSduboff dp->rxmode |= RXMODE_MULTI_OVF;
3296f8919bdaSduboff } else {
3297f8919bdaSduboff dp->rxmode &= ~RXMODE_MULTI_OVF;
3298f8919bdaSduboff }
3299f8919bdaSduboff
330023d366e3Sduboff /* tell new multicast list to the hardware */
3301f8919bdaSduboff err = gem_mac_set_rx_filter(dp);
3302f8919bdaSduboff
3303f8919bdaSduboff mutex_exit(&dp->intrlock);
3304f8919bdaSduboff
3305f8919bdaSduboff return (err);
3306f8919bdaSduboff }
3307f8919bdaSduboff
3308f8919bdaSduboff static int
gem_remove_multicast(struct gem_dev * dp,const uint8_t * ep)3309f8919bdaSduboff gem_remove_multicast(struct gem_dev *dp, const uint8_t *ep)
3310f8919bdaSduboff {
3311f8919bdaSduboff size_t len;
3312f8919bdaSduboff int i;
3313f8919bdaSduboff int cnt;
3314f8919bdaSduboff int err;
3315f8919bdaSduboff
3316f8919bdaSduboff DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3317f8919bdaSduboff
3318f8919bdaSduboff mutex_enter(&dp->intrlock);
3319f8919bdaSduboff if (dp->mac_suspended) {
3320f8919bdaSduboff mutex_exit(&dp->intrlock);
3321f8919bdaSduboff return (GEM_FAILURE);
3322f8919bdaSduboff }
3323f8919bdaSduboff
3324f8919bdaSduboff dp->mc_count_req--;
3325f8919bdaSduboff cnt = dp->mc_count;
3326f8919bdaSduboff for (i = 0; i < cnt; i++) {
3327f8919bdaSduboff if (bcmp(ep, &dp->mc_list[i].addr, ETHERADDRL)) {
3328f8919bdaSduboff continue;
3329f8919bdaSduboff }
3330f8919bdaSduboff /* shrink the mclist by copying forward */
3331f8919bdaSduboff len = (cnt - (i + 1)) * sizeof (*dp->mc_list);
3332f8919bdaSduboff if (len > 0) {
3333f8919bdaSduboff bcopy(&dp->mc_list[i+1], &dp->mc_list[i], len);
3334f8919bdaSduboff }
3335f8919bdaSduboff dp->mc_count--;
3336f8919bdaSduboff break;
3337f8919bdaSduboff }
3338f8919bdaSduboff
3339f8919bdaSduboff if (dp->mc_count_req != dp->mc_count) {
3340f8919bdaSduboff /* multicast address list overflow */
3341f8919bdaSduboff dp->rxmode |= RXMODE_MULTI_OVF;
3342f8919bdaSduboff } else {
3343f8919bdaSduboff dp->rxmode &= ~RXMODE_MULTI_OVF;
3344f8919bdaSduboff }
3345f8919bdaSduboff /* In gem v2, don't hold xmitlock on calling set_rx_filter */
3346f8919bdaSduboff err = gem_mac_set_rx_filter(dp);
3347f8919bdaSduboff
3348f8919bdaSduboff mutex_exit(&dp->intrlock);
3349f8919bdaSduboff
3350f8919bdaSduboff return (err);
3351f8919bdaSduboff }
3352f8919bdaSduboff
3353f8919bdaSduboff /* ============================================================== */
3354f8919bdaSduboff /*
3355f8919bdaSduboff * ND interface
3356f8919bdaSduboff */
3357f8919bdaSduboff /* ============================================================== */
3358f8919bdaSduboff enum {
3359f8919bdaSduboff PARAM_AUTONEG_CAP,
3360f8919bdaSduboff PARAM_PAUSE_CAP,
3361f8919bdaSduboff PARAM_ASYM_PAUSE_CAP,
3362f8919bdaSduboff PARAM_1000FDX_CAP,
3363f8919bdaSduboff PARAM_1000HDX_CAP,
3364f8919bdaSduboff PARAM_100T4_CAP,
3365f8919bdaSduboff PARAM_100FDX_CAP,
3366f8919bdaSduboff PARAM_100HDX_CAP,
3367f8919bdaSduboff PARAM_10FDX_CAP,
3368f8919bdaSduboff PARAM_10HDX_CAP,
3369f8919bdaSduboff
3370f8919bdaSduboff PARAM_ADV_AUTONEG_CAP,
3371f8919bdaSduboff PARAM_ADV_PAUSE_CAP,
3372f8919bdaSduboff PARAM_ADV_ASYM_PAUSE_CAP,
3373f8919bdaSduboff PARAM_ADV_1000FDX_CAP,
3374f8919bdaSduboff PARAM_ADV_1000HDX_CAP,
3375f8919bdaSduboff PARAM_ADV_100T4_CAP,
3376f8919bdaSduboff PARAM_ADV_100FDX_CAP,
3377f8919bdaSduboff PARAM_ADV_100HDX_CAP,
3378f8919bdaSduboff PARAM_ADV_10FDX_CAP,
3379f8919bdaSduboff PARAM_ADV_10HDX_CAP,
3380f8919bdaSduboff
3381f8919bdaSduboff PARAM_LP_AUTONEG_CAP,
3382f8919bdaSduboff PARAM_LP_PAUSE_CAP,
3383f8919bdaSduboff PARAM_LP_ASYM_PAUSE_CAP,
3384f8919bdaSduboff PARAM_LP_1000FDX_CAP,
3385f8919bdaSduboff PARAM_LP_1000HDX_CAP,
3386f8919bdaSduboff PARAM_LP_100T4_CAP,
3387f8919bdaSduboff PARAM_LP_100FDX_CAP,
3388f8919bdaSduboff PARAM_LP_100HDX_CAP,
3389f8919bdaSduboff PARAM_LP_10FDX_CAP,
3390f8919bdaSduboff PARAM_LP_10HDX_CAP,
3391f8919bdaSduboff
3392f8919bdaSduboff PARAM_LINK_STATUS,
3393f8919bdaSduboff PARAM_LINK_SPEED,
3394f8919bdaSduboff PARAM_LINK_DUPLEX,
3395f8919bdaSduboff
3396f8919bdaSduboff PARAM_LINK_AUTONEG,
3397f8919bdaSduboff PARAM_LINK_RX_PAUSE,
3398f8919bdaSduboff PARAM_LINK_TX_PAUSE,
3399f8919bdaSduboff
3400f8919bdaSduboff PARAM_LOOP_MODE,
3401f8919bdaSduboff PARAM_MSI_CNT,
3402f8919bdaSduboff
3403f8919bdaSduboff #ifdef DEBUG_RESUME
3404f8919bdaSduboff PARAM_RESUME_TEST,
3405f8919bdaSduboff #endif
3406f8919bdaSduboff PARAM_COUNT
3407f8919bdaSduboff };
3408f8919bdaSduboff
3409f8919bdaSduboff enum ioc_reply {
3410f8919bdaSduboff IOC_INVAL = -1, /* bad, NAK with EINVAL */
3411f8919bdaSduboff IOC_DONE, /* OK, reply sent */
3412f8919bdaSduboff IOC_ACK, /* OK, just send ACK */
3413f8919bdaSduboff IOC_REPLY, /* OK, just send reply */
3414f8919bdaSduboff IOC_RESTART_ACK, /* OK, restart & ACK */
3415f8919bdaSduboff IOC_RESTART_REPLY /* OK, restart & reply */
3416f8919bdaSduboff };
3417f8919bdaSduboff
3418f8919bdaSduboff struct gem_nd_arg {
3419f8919bdaSduboff struct gem_dev *dp;
3420f8919bdaSduboff int item;
3421f8919bdaSduboff };
3422f8919bdaSduboff
3423f8919bdaSduboff static int
gem_param_get(queue_t * q,mblk_t * mp,caddr_t arg,cred_t * credp)3424f8919bdaSduboff gem_param_get(queue_t *q, mblk_t *mp, caddr_t arg, cred_t *credp)
3425f8919bdaSduboff {
3426f8919bdaSduboff struct gem_dev *dp = ((struct gem_nd_arg *)(void *)arg)->dp;
3427f8919bdaSduboff int item = ((struct gem_nd_arg *)(void *)arg)->item;
3428f8919bdaSduboff long val;
3429f8919bdaSduboff
3430f8919bdaSduboff DPRINTF(0, (CE_CONT, "!%s: %s: called, item:%d",
3431f8919bdaSduboff dp->name, __func__, item));
3432f8919bdaSduboff
3433f8919bdaSduboff switch (item) {
3434f8919bdaSduboff case PARAM_AUTONEG_CAP:
3435f8919bdaSduboff val = BOOLEAN(dp->mii_status & MII_STATUS_CANAUTONEG);
3436f8919bdaSduboff DPRINTF(0, (CE_CONT, "autoneg_cap:%d", val));
3437f8919bdaSduboff break;
3438f8919bdaSduboff
3439f8919bdaSduboff case PARAM_PAUSE_CAP:
3440f8919bdaSduboff val = BOOLEAN(dp->gc.gc_flow_control & 1);
3441f8919bdaSduboff break;
3442f8919bdaSduboff
3443f8919bdaSduboff case PARAM_ASYM_PAUSE_CAP:
3444f8919bdaSduboff val = BOOLEAN(dp->gc.gc_flow_control & 2);
3445f8919bdaSduboff break;
3446f8919bdaSduboff
3447f8919bdaSduboff case PARAM_1000FDX_CAP:
3448f8919bdaSduboff val = (dp->mii_xstatus & MII_XSTATUS_1000BASET_FD) ||
3449f8919bdaSduboff (dp->mii_xstatus & MII_XSTATUS_1000BASEX_FD);
3450f8919bdaSduboff break;
3451f8919bdaSduboff
3452f8919bdaSduboff case PARAM_1000HDX_CAP:
3453f8919bdaSduboff val = (dp->mii_xstatus & MII_XSTATUS_1000BASET) ||
3454f8919bdaSduboff (dp->mii_xstatus & MII_XSTATUS_1000BASEX);
3455f8919bdaSduboff break;
3456f8919bdaSduboff
3457f8919bdaSduboff case PARAM_100T4_CAP:
3458f8919bdaSduboff val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASE_T4);
3459f8919bdaSduboff break;
3460f8919bdaSduboff
3461f8919bdaSduboff case PARAM_100FDX_CAP:
3462f8919bdaSduboff val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX_FD);
3463f8919bdaSduboff break;
3464f8919bdaSduboff
3465f8919bdaSduboff case PARAM_100HDX_CAP:
3466f8919bdaSduboff val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX);
3467f8919bdaSduboff break;
3468f8919bdaSduboff
3469f8919bdaSduboff case PARAM_10FDX_CAP:
3470f8919bdaSduboff val = BOOLEAN(dp->mii_status & MII_STATUS_10_FD);
3471f8919bdaSduboff break;
3472f8919bdaSduboff
3473f8919bdaSduboff case PARAM_10HDX_CAP:
3474f8919bdaSduboff val = BOOLEAN(dp->mii_status & MII_STATUS_10);
3475f8919bdaSduboff break;
3476f8919bdaSduboff
3477f8919bdaSduboff case PARAM_ADV_AUTONEG_CAP:
3478f8919bdaSduboff val = dp->anadv_autoneg;
3479f8919bdaSduboff break;
3480f8919bdaSduboff
3481f8919bdaSduboff case PARAM_ADV_PAUSE_CAP:
3482f8919bdaSduboff val = BOOLEAN(dp->anadv_flow_control & 1);
3483f8919bdaSduboff break;
3484f8919bdaSduboff
3485f8919bdaSduboff case PARAM_ADV_ASYM_PAUSE_CAP:
3486f8919bdaSduboff val = BOOLEAN(dp->anadv_flow_control & 2);
3487f8919bdaSduboff break;
3488f8919bdaSduboff
3489f8919bdaSduboff case PARAM_ADV_1000FDX_CAP:
3490f8919bdaSduboff val = dp->anadv_1000fdx;
3491f8919bdaSduboff break;
3492f8919bdaSduboff
3493f8919bdaSduboff case PARAM_ADV_1000HDX_CAP:
3494f8919bdaSduboff val = dp->anadv_1000hdx;
3495f8919bdaSduboff break;
3496f8919bdaSduboff
3497f8919bdaSduboff case PARAM_ADV_100T4_CAP:
3498f8919bdaSduboff val = dp->anadv_100t4;
3499f8919bdaSduboff break;
3500f8919bdaSduboff
3501f8919bdaSduboff case PARAM_ADV_100FDX_CAP:
3502f8919bdaSduboff val = dp->anadv_100fdx;
3503f8919bdaSduboff break;
3504f8919bdaSduboff
3505f8919bdaSduboff case PARAM_ADV_100HDX_CAP:
3506f8919bdaSduboff val = dp->anadv_100hdx;
3507f8919bdaSduboff break;
3508f8919bdaSduboff
3509f8919bdaSduboff case PARAM_ADV_10FDX_CAP:
3510f8919bdaSduboff val = dp->anadv_10fdx;
3511f8919bdaSduboff break;
3512f8919bdaSduboff
3513f8919bdaSduboff case PARAM_ADV_10HDX_CAP:
3514f8919bdaSduboff val = dp->anadv_10hdx;
3515f8919bdaSduboff break;
3516f8919bdaSduboff
3517f8919bdaSduboff case PARAM_LP_AUTONEG_CAP:
3518f8919bdaSduboff val = BOOLEAN(dp->mii_exp & MII_AN_EXP_LPCANAN);
3519f8919bdaSduboff break;
3520f8919bdaSduboff
3521f8919bdaSduboff case PARAM_LP_PAUSE_CAP:
3522f8919bdaSduboff val = BOOLEAN(dp->mii_lpable & MII_ABILITY_PAUSE);
3523f8919bdaSduboff break;
3524f8919bdaSduboff
3525f8919bdaSduboff case PARAM_LP_ASYM_PAUSE_CAP:
3526bdb9230aSGarrett D'Amore val = BOOLEAN(dp->mii_lpable & MII_ABILITY_ASMPAUSE);
3527f8919bdaSduboff break;
3528f8919bdaSduboff
3529f8919bdaSduboff case PARAM_LP_1000FDX_CAP:
3530f8919bdaSduboff val = BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_FULL);
3531f8919bdaSduboff break;
3532f8919bdaSduboff
3533f8919bdaSduboff case PARAM_LP_1000HDX_CAP:
3534f8919bdaSduboff val = BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_HALF);
3535f8919bdaSduboff break;
3536f8919bdaSduboff
3537f8919bdaSduboff case PARAM_LP_100T4_CAP:
3538f8919bdaSduboff val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_T4);
3539f8919bdaSduboff break;
3540f8919bdaSduboff
3541f8919bdaSduboff case PARAM_LP_100FDX_CAP:
3542f8919bdaSduboff val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX_FD);
3543f8919bdaSduboff break;
3544f8919bdaSduboff
3545f8919bdaSduboff case PARAM_LP_100HDX_CAP:
3546f8919bdaSduboff val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX);
3547f8919bdaSduboff break;
3548f8919bdaSduboff
3549f8919bdaSduboff case PARAM_LP_10FDX_CAP:
3550f8919bdaSduboff val = BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T_FD);
3551f8919bdaSduboff break;
3552f8919bdaSduboff
3553f8919bdaSduboff case PARAM_LP_10HDX_CAP:
3554f8919bdaSduboff val = BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T);
3555f8919bdaSduboff break;
3556f8919bdaSduboff
3557f8919bdaSduboff case PARAM_LINK_STATUS:
3558f8919bdaSduboff val = (dp->mii_state == MII_STATE_LINKUP);
3559f8919bdaSduboff break;
3560f8919bdaSduboff
3561f8919bdaSduboff case PARAM_LINK_SPEED:
3562f8919bdaSduboff val = gem_speed_value[dp->speed];
3563f8919bdaSduboff break;
3564f8919bdaSduboff
3565f8919bdaSduboff case PARAM_LINK_DUPLEX:
3566f8919bdaSduboff val = 0;
3567f8919bdaSduboff if (dp->mii_state == MII_STATE_LINKUP) {
3568f8919bdaSduboff val = dp->full_duplex ? 2 : 1;
3569f8919bdaSduboff }
3570f8919bdaSduboff break;
3571f8919bdaSduboff
3572f8919bdaSduboff case PARAM_LINK_AUTONEG:
3573f8919bdaSduboff val = BOOLEAN(dp->mii_exp & MII_AN_EXP_LPCANAN);
3574f8919bdaSduboff break;
3575f8919bdaSduboff
3576f8919bdaSduboff case PARAM_LINK_RX_PAUSE:
3577f8919bdaSduboff val = (dp->flow_control == FLOW_CONTROL_SYMMETRIC) ||
3578f8919bdaSduboff (dp->flow_control == FLOW_CONTROL_RX_PAUSE);
3579f8919bdaSduboff break;
3580f8919bdaSduboff
3581f8919bdaSduboff case PARAM_LINK_TX_PAUSE:
3582f8919bdaSduboff val = (dp->flow_control == FLOW_CONTROL_SYMMETRIC) ||
3583f8919bdaSduboff (dp->flow_control == FLOW_CONTROL_TX_PAUSE);
3584f8919bdaSduboff break;
3585f8919bdaSduboff
3586f8919bdaSduboff #ifdef DEBUG_RESUME
3587f8919bdaSduboff case PARAM_RESUME_TEST:
3588f8919bdaSduboff val = 0;
3589f8919bdaSduboff break;
3590f8919bdaSduboff #endif
3591f8919bdaSduboff default:
3592f8919bdaSduboff cmn_err(CE_WARN, "%s: unimplemented ndd control (%d)",
3593f8919bdaSduboff dp->name, item);
3594f8919bdaSduboff break;
3595f8919bdaSduboff }
3596f8919bdaSduboff
3597f8919bdaSduboff (void) mi_mpprintf(mp, "%ld", val);
3598f8919bdaSduboff
3599f8919bdaSduboff return (0);
3600f8919bdaSduboff }
3601f8919bdaSduboff
3602f8919bdaSduboff static int
gem_param_set(queue_t * q,mblk_t * mp,char * value,caddr_t arg,cred_t * credp)3603f8919bdaSduboff gem_param_set(queue_t *q, mblk_t *mp, char *value, caddr_t arg, cred_t *credp)
3604f8919bdaSduboff {
3605f8919bdaSduboff struct gem_dev *dp = ((struct gem_nd_arg *)(void *)arg)->dp;
3606f8919bdaSduboff int item = ((struct gem_nd_arg *)(void *)arg)->item;
3607f8919bdaSduboff long val;
3608f8919bdaSduboff char *end;
3609f8919bdaSduboff
3610f8919bdaSduboff DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3611f8919bdaSduboff if (ddi_strtol(value, &end, 10, &val)) {
3612f8919bdaSduboff return (EINVAL);
3613f8919bdaSduboff }
3614f8919bdaSduboff if (end == value) {
3615f8919bdaSduboff return (EINVAL);
3616f8919bdaSduboff }
3617f8919bdaSduboff
3618f8919bdaSduboff switch (item) {
3619f8919bdaSduboff case PARAM_ADV_AUTONEG_CAP:
3620f8919bdaSduboff if (val != 0 && val != 1) {
3621f8919bdaSduboff goto err;
3622f8919bdaSduboff }
3623f8919bdaSduboff if (val && (dp->mii_status & MII_STATUS_CANAUTONEG) == 0) {
3624f8919bdaSduboff goto err;
3625f8919bdaSduboff }
3626f8919bdaSduboff dp->anadv_autoneg = (int)val;
3627f8919bdaSduboff break;
3628f8919bdaSduboff
3629f8919bdaSduboff case PARAM_ADV_PAUSE_CAP:
3630f8919bdaSduboff if (val != 0 && val != 1) {
3631f8919bdaSduboff goto err;
3632f8919bdaSduboff }
3633f8919bdaSduboff if (val) {
3634f8919bdaSduboff dp->anadv_flow_control |= 1;
3635f8919bdaSduboff } else {
3636f8919bdaSduboff dp->anadv_flow_control &= ~1;
3637f8919bdaSduboff }
3638f8919bdaSduboff break;
3639f8919bdaSduboff
3640f8919bdaSduboff case PARAM_ADV_ASYM_PAUSE_CAP:
3641f8919bdaSduboff if (val != 0 && val != 1) {
3642f8919bdaSduboff goto err;
3643f8919bdaSduboff }
3644f8919bdaSduboff if (val) {
3645f8919bdaSduboff dp->anadv_flow_control |= 2;
3646f8919bdaSduboff } else {
3647f8919bdaSduboff dp->anadv_flow_control &= ~2;
3648f8919bdaSduboff }
3649f8919bdaSduboff break;
3650f8919bdaSduboff
3651f8919bdaSduboff case PARAM_ADV_1000FDX_CAP:
3652f8919bdaSduboff if (val != 0 && val != 1) {
3653f8919bdaSduboff goto err;
3654f8919bdaSduboff }
3655f8919bdaSduboff if (val && (dp->mii_xstatus &
3656f8919bdaSduboff (MII_XSTATUS_1000BASET_FD |
3657f8919bdaSduboff MII_XSTATUS_1000BASEX_FD)) == 0) {
3658f8919bdaSduboff goto err;
3659f8919bdaSduboff }
3660f8919bdaSduboff dp->anadv_1000fdx = (int)val;
3661f8919bdaSduboff break;
3662f8919bdaSduboff
3663f8919bdaSduboff case PARAM_ADV_1000HDX_CAP:
3664f8919bdaSduboff if (val != 0 && val != 1) {
3665f8919bdaSduboff goto err;
3666f8919bdaSduboff }
3667f8919bdaSduboff if (val && (dp->mii_xstatus &
3668f8919bdaSduboff (MII_XSTATUS_1000BASET | MII_XSTATUS_1000BASEX)) == 0) {
3669f8919bdaSduboff goto err;
3670f8919bdaSduboff }
3671f8919bdaSduboff dp->anadv_1000hdx = (int)val;
3672f8919bdaSduboff break;
3673f8919bdaSduboff
3674f8919bdaSduboff case PARAM_ADV_100T4_CAP:
3675f8919bdaSduboff if (val != 0 && val != 1) {
3676f8919bdaSduboff goto err;
3677f8919bdaSduboff }
3678f8919bdaSduboff if (val && (dp->mii_status & MII_STATUS_100_BASE_T4) == 0) {
3679f8919bdaSduboff goto err;
3680f8919bdaSduboff }
3681f8919bdaSduboff dp->anadv_100t4 = (int)val;
3682f8919bdaSduboff break;
3683f8919bdaSduboff
3684f8919bdaSduboff case PARAM_ADV_100FDX_CAP:
3685f8919bdaSduboff if (val != 0 && val != 1) {
3686f8919bdaSduboff goto err;
3687f8919bdaSduboff }
3688f8919bdaSduboff if (val && (dp->mii_status & MII_STATUS_100_BASEX_FD) == 0) {
3689f8919bdaSduboff goto err;
3690f8919bdaSduboff }
3691f8919bdaSduboff dp->anadv_100fdx = (int)val;
3692f8919bdaSduboff break;
3693f8919bdaSduboff
3694f8919bdaSduboff case PARAM_ADV_100HDX_CAP:
3695f8919bdaSduboff if (val != 0 && val != 1) {
3696f8919bdaSduboff goto err;
3697f8919bdaSduboff }
3698f8919bdaSduboff if (val && (dp->mii_status & MII_STATUS_100_BASEX) == 0) {
3699f8919bdaSduboff goto err;
3700f8919bdaSduboff }
3701f8919bdaSduboff dp->anadv_100hdx = (int)val;
3702f8919bdaSduboff break;
3703f8919bdaSduboff
3704f8919bdaSduboff case PARAM_ADV_10FDX_CAP:
3705f8919bdaSduboff if (val != 0 && val != 1) {
3706f8919bdaSduboff goto err;
3707f8919bdaSduboff }
3708f8919bdaSduboff if (val && (dp->mii_status & MII_STATUS_10_FD) == 0) {
3709f8919bdaSduboff goto err;
3710f8919bdaSduboff }
3711f8919bdaSduboff dp->anadv_10fdx = (int)val;
3712f8919bdaSduboff break;
3713f8919bdaSduboff
3714f8919bdaSduboff case PARAM_ADV_10HDX_CAP:
3715f8919bdaSduboff if (val != 0 && val != 1) {
3716f8919bdaSduboff goto err;
3717f8919bdaSduboff }
3718f8919bdaSduboff if (val && (dp->mii_status & MII_STATUS_10) == 0) {
3719f8919bdaSduboff goto err;
3720f8919bdaSduboff }
3721f8919bdaSduboff dp->anadv_10hdx = (int)val;
3722f8919bdaSduboff break;
3723f8919bdaSduboff }
3724f8919bdaSduboff
3725f8919bdaSduboff /* sync with PHY */
3726f8919bdaSduboff gem_choose_forcedmode(dp);
3727f8919bdaSduboff
3728f8919bdaSduboff dp->mii_state = MII_STATE_UNKNOWN;
3729f8919bdaSduboff if (dp->gc.gc_mii_hw_link_detection && dp->link_watcher_id == 0) {
3730f8919bdaSduboff /* XXX - Can we ignore the return code ? */
3731f8919bdaSduboff (void) gem_mii_link_check(dp);
3732f8919bdaSduboff }
3733f8919bdaSduboff
3734f8919bdaSduboff return (0);
3735f8919bdaSduboff err:
3736f8919bdaSduboff return (EINVAL);
3737f8919bdaSduboff }
3738f8919bdaSduboff
3739f8919bdaSduboff static void
gem_nd_load(struct gem_dev * dp,char * name,ndgetf_t gf,ndsetf_t sf,int item)3740f8919bdaSduboff gem_nd_load(struct gem_dev *dp, char *name, ndgetf_t gf, ndsetf_t sf, int item)
3741f8919bdaSduboff {
3742f8919bdaSduboff struct gem_nd_arg *arg;
3743f8919bdaSduboff
3744f8919bdaSduboff ASSERT(item >= 0);
3745f8919bdaSduboff ASSERT(item < PARAM_COUNT);
3746f8919bdaSduboff
3747f8919bdaSduboff arg = &((struct gem_nd_arg *)(void *)dp->nd_arg_p)[item];
3748f8919bdaSduboff arg->dp = dp;
3749f8919bdaSduboff arg->item = item;
3750f8919bdaSduboff
3751f8919bdaSduboff DPRINTF(2, (CE_CONT, "!%s: %s: name:%s, item:%d",
3752f8919bdaSduboff dp->name, __func__, name, item));
375323d366e3Sduboff (void) nd_load(&dp->nd_data_p, name, gf, sf, (caddr_t)arg);
3754f8919bdaSduboff }
3755f8919bdaSduboff
3756f8919bdaSduboff static void
gem_nd_setup(struct gem_dev * dp)3757f8919bdaSduboff gem_nd_setup(struct gem_dev *dp)
3758f8919bdaSduboff {
3759f8919bdaSduboff DPRINTF(0, (CE_CONT, "!%s: %s: called, mii_status:0x%b",
3760f8919bdaSduboff dp->name, __func__, dp->mii_status, MII_STATUS_BITS));
3761f8919bdaSduboff
3762f8919bdaSduboff ASSERT(dp->nd_arg_p == NULL);
3763f8919bdaSduboff
3764f8919bdaSduboff dp->nd_arg_p =
3765f8919bdaSduboff kmem_zalloc(sizeof (struct gem_nd_arg) * PARAM_COUNT, KM_SLEEP);
3766f8919bdaSduboff
3767f8919bdaSduboff #define SETFUNC(x) ((x) ? gem_param_set : NULL)
3768f8919bdaSduboff
3769f8919bdaSduboff gem_nd_load(dp, "autoneg_cap",
3770f8919bdaSduboff gem_param_get, NULL, PARAM_AUTONEG_CAP);
3771f8919bdaSduboff gem_nd_load(dp, "pause_cap",
3772f8919bdaSduboff gem_param_get, NULL, PARAM_PAUSE_CAP);
3773f8919bdaSduboff gem_nd_load(dp, "asym_pause_cap",
3774f8919bdaSduboff gem_param_get, NULL, PARAM_ASYM_PAUSE_CAP);
3775f8919bdaSduboff gem_nd_load(dp, "1000fdx_cap",
3776f8919bdaSduboff gem_param_get, NULL, PARAM_1000FDX_CAP);
3777f8919bdaSduboff gem_nd_load(dp, "1000hdx_cap",
3778f8919bdaSduboff gem_param_get, NULL, PARAM_1000HDX_CAP);
3779f8919bdaSduboff gem_nd_load(dp, "100T4_cap",
3780f8919bdaSduboff gem_param_get, NULL, PARAM_100T4_CAP);
3781f8919bdaSduboff gem_nd_load(dp, "100fdx_cap",
3782f8919bdaSduboff gem_param_get, NULL, PARAM_100FDX_CAP);
3783f8919bdaSduboff gem_nd_load(dp, "100hdx_cap",
3784f8919bdaSduboff gem_param_get, NULL, PARAM_100HDX_CAP);
3785f8919bdaSduboff gem_nd_load(dp, "10fdx_cap",
3786f8919bdaSduboff gem_param_get, NULL, PARAM_10FDX_CAP);
3787f8919bdaSduboff gem_nd_load(dp, "10hdx_cap",
3788f8919bdaSduboff gem_param_get, NULL, PARAM_10HDX_CAP);
3789f8919bdaSduboff
3790f8919bdaSduboff /* Our advertised capabilities */
3791f8919bdaSduboff gem_nd_load(dp, "adv_autoneg_cap", gem_param_get,
3792f8919bdaSduboff SETFUNC(dp->mii_status & MII_STATUS_CANAUTONEG),
3793f8919bdaSduboff PARAM_ADV_AUTONEG_CAP);
3794f8919bdaSduboff gem_nd_load(dp, "adv_pause_cap", gem_param_get,
3795f8919bdaSduboff SETFUNC(dp->gc.gc_flow_control & 1),
3796f8919bdaSduboff PARAM_ADV_PAUSE_CAP);
3797f8919bdaSduboff gem_nd_load(dp, "adv_asym_pause_cap", gem_param_get,
3798f8919bdaSduboff SETFUNC(dp->gc.gc_flow_control & 2),
3799f8919bdaSduboff PARAM_ADV_ASYM_PAUSE_CAP);
3800f8919bdaSduboff gem_nd_load(dp, "adv_1000fdx_cap", gem_param_get,
3801f8919bdaSduboff SETFUNC(dp->mii_xstatus &
3802f8919bdaSduboff (MII_XSTATUS_1000BASEX_FD | MII_XSTATUS_1000BASET_FD)),
3803f8919bdaSduboff PARAM_ADV_1000FDX_CAP);
3804f8919bdaSduboff gem_nd_load(dp, "adv_1000hdx_cap", gem_param_get,
3805f8919bdaSduboff SETFUNC(dp->mii_xstatus &
3806f8919bdaSduboff (MII_XSTATUS_1000BASEX | MII_XSTATUS_1000BASET)),
3807f8919bdaSduboff PARAM_ADV_1000HDX_CAP);
3808f8919bdaSduboff gem_nd_load(dp, "adv_100T4_cap", gem_param_get,
3809f8919bdaSduboff SETFUNC((dp->mii_status & MII_STATUS_100_BASE_T4) &&
3810f8919bdaSduboff !dp->mii_advert_ro),
3811f8919bdaSduboff PARAM_ADV_100T4_CAP);
3812f8919bdaSduboff gem_nd_load(dp, "adv_100fdx_cap", gem_param_get,
3813f8919bdaSduboff SETFUNC((dp->mii_status & MII_STATUS_100_BASEX_FD) &&
3814f8919bdaSduboff !dp->mii_advert_ro),
3815f8919bdaSduboff PARAM_ADV_100FDX_CAP);
3816f8919bdaSduboff gem_nd_load(dp, "adv_100hdx_cap", gem_param_get,
3817f8919bdaSduboff SETFUNC((dp->mii_status & MII_STATUS_100_BASEX) &&
3818f8919bdaSduboff !dp->mii_advert_ro),
3819f8919bdaSduboff PARAM_ADV_100HDX_CAP);
3820f8919bdaSduboff gem_nd_load(dp, "adv_10fdx_cap", gem_param_get,
3821f8919bdaSduboff SETFUNC((dp->mii_status & MII_STATUS_10_FD) &&
3822f8919bdaSduboff !dp->mii_advert_ro),
3823f8919bdaSduboff PARAM_ADV_10FDX_CAP);
3824f8919bdaSduboff gem_nd_load(dp, "adv_10hdx_cap", gem_param_get,
3825f8919bdaSduboff SETFUNC((dp->mii_status & MII_STATUS_10) &&
3826f8919bdaSduboff !dp->mii_advert_ro),
3827f8919bdaSduboff PARAM_ADV_10HDX_CAP);
3828f8919bdaSduboff
3829f8919bdaSduboff /* Partner's advertised capabilities */
3830f8919bdaSduboff gem_nd_load(dp, "lp_autoneg_cap",
3831f8919bdaSduboff gem_param_get, NULL, PARAM_LP_AUTONEG_CAP);
3832f8919bdaSduboff gem_nd_load(dp, "lp_pause_cap",
3833f8919bdaSduboff gem_param_get, NULL, PARAM_LP_PAUSE_CAP);
3834f8919bdaSduboff gem_nd_load(dp, "lp_asym_pause_cap",
3835f8919bdaSduboff gem_param_get, NULL, PARAM_LP_ASYM_PAUSE_CAP);
3836f8919bdaSduboff gem_nd_load(dp, "lp_1000fdx_cap",
3837f8919bdaSduboff gem_param_get, NULL, PARAM_LP_1000FDX_CAP);
3838f8919bdaSduboff gem_nd_load(dp, "lp_1000hdx_cap",
3839f8919bdaSduboff gem_param_get, NULL, PARAM_LP_1000HDX_CAP);
3840f8919bdaSduboff gem_nd_load(dp, "lp_100T4_cap",
3841f8919bdaSduboff gem_param_get, NULL, PARAM_LP_100T4_CAP);
3842f8919bdaSduboff gem_nd_load(dp, "lp_100fdx_cap",
3843f8919bdaSduboff gem_param_get, NULL, PARAM_LP_100FDX_CAP);
3844f8919bdaSduboff gem_nd_load(dp, "lp_100hdx_cap",
3845f8919bdaSduboff gem_param_get, NULL, PARAM_LP_100HDX_CAP);
3846f8919bdaSduboff gem_nd_load(dp, "lp_10fdx_cap",
3847f8919bdaSduboff gem_param_get, NULL, PARAM_LP_10FDX_CAP);
3848f8919bdaSduboff gem_nd_load(dp, "lp_10hdx_cap",
3849f8919bdaSduboff gem_param_get, NULL, PARAM_LP_10HDX_CAP);
3850f8919bdaSduboff
3851f8919bdaSduboff /* Current operating modes */
3852f8919bdaSduboff gem_nd_load(dp, "link_status",
3853f8919bdaSduboff gem_param_get, NULL, PARAM_LINK_STATUS);
3854f8919bdaSduboff gem_nd_load(dp, "link_speed",
3855f8919bdaSduboff gem_param_get, NULL, PARAM_LINK_SPEED);
3856f8919bdaSduboff gem_nd_load(dp, "link_duplex",
3857f8919bdaSduboff gem_param_get, NULL, PARAM_LINK_DUPLEX);
3858f8919bdaSduboff gem_nd_load(dp, "link_autoneg",
3859f8919bdaSduboff gem_param_get, NULL, PARAM_LINK_AUTONEG);
3860f8919bdaSduboff gem_nd_load(dp, "link_rx_pause",
3861f8919bdaSduboff gem_param_get, NULL, PARAM_LINK_RX_PAUSE);
3862f8919bdaSduboff gem_nd_load(dp, "link_tx_pause",
3863f8919bdaSduboff gem_param_get, NULL, PARAM_LINK_TX_PAUSE);
3864f8919bdaSduboff #ifdef DEBUG_RESUME
3865f8919bdaSduboff gem_nd_load(dp, "resume_test",
3866f8919bdaSduboff gem_param_get, NULL, PARAM_RESUME_TEST);
3867f8919bdaSduboff #endif
3868f8919bdaSduboff #undef SETFUNC
3869f8919bdaSduboff }
3870f8919bdaSduboff
3871f8919bdaSduboff static
3872f8919bdaSduboff enum ioc_reply
gem_nd_ioctl(struct gem_dev * dp,queue_t * wq,mblk_t * mp,struct iocblk * iocp)3873f8919bdaSduboff gem_nd_ioctl(struct gem_dev *dp, queue_t *wq, mblk_t *mp, struct iocblk *iocp)
3874f8919bdaSduboff {
3875f8919bdaSduboff boolean_t ok;
3876f8919bdaSduboff
3877f8919bdaSduboff ASSERT(mutex_owned(&dp->intrlock));
3878f8919bdaSduboff
3879f8919bdaSduboff DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3880f8919bdaSduboff
3881f8919bdaSduboff switch (iocp->ioc_cmd) {
3882f8919bdaSduboff case ND_GET:
3883f8919bdaSduboff ok = nd_getset(wq, dp->nd_data_p, mp);
3884f8919bdaSduboff DPRINTF(0, (CE_CONT,
3885f8919bdaSduboff "%s: get %s", dp->name, ok ? "OK" : "FAIL"));
3886f8919bdaSduboff return (ok ? IOC_REPLY : IOC_INVAL);
3887f8919bdaSduboff
3888f8919bdaSduboff case ND_SET:
3889f8919bdaSduboff ok = nd_getset(wq, dp->nd_data_p, mp);
3890f8919bdaSduboff
3891f8919bdaSduboff DPRINTF(0, (CE_CONT, "%s: set %s err %d",
3892f8919bdaSduboff dp->name, ok ? "OK" : "FAIL", iocp->ioc_error));
3893f8919bdaSduboff
3894f8919bdaSduboff if (!ok) {
3895f8919bdaSduboff return (IOC_INVAL);
3896f8919bdaSduboff }
3897f8919bdaSduboff
3898f8919bdaSduboff if (iocp->ioc_error) {
3899f8919bdaSduboff return (IOC_REPLY);
3900f8919bdaSduboff }
3901f8919bdaSduboff
3902f8919bdaSduboff return (IOC_RESTART_REPLY);
3903f8919bdaSduboff }
3904f8919bdaSduboff
3905f8919bdaSduboff cmn_err(CE_WARN, "%s: invalid cmd 0x%x", dp->name, iocp->ioc_cmd);
3906f8919bdaSduboff
3907f8919bdaSduboff return (IOC_INVAL);
3908f8919bdaSduboff }
3909f8919bdaSduboff
3910f8919bdaSduboff static void
gem_nd_cleanup(struct gem_dev * dp)3911f8919bdaSduboff gem_nd_cleanup(struct gem_dev *dp)
3912f8919bdaSduboff {
3913f8919bdaSduboff ASSERT(dp->nd_data_p != NULL);
3914f8919bdaSduboff ASSERT(dp->nd_arg_p != NULL);
3915f8919bdaSduboff
3916f8919bdaSduboff nd_free(&dp->nd_data_p);
3917f8919bdaSduboff
3918f8919bdaSduboff kmem_free(dp->nd_arg_p, sizeof (struct gem_nd_arg) * PARAM_COUNT);
3919f8919bdaSduboff dp->nd_arg_p = NULL;
3920f8919bdaSduboff }
3921f8919bdaSduboff
3922f8919bdaSduboff static void
gem_mac_ioctl(struct gem_dev * dp,queue_t * wq,mblk_t * mp)3923f8919bdaSduboff gem_mac_ioctl(struct gem_dev *dp, queue_t *wq, mblk_t *mp)
3924f8919bdaSduboff {
3925f8919bdaSduboff struct iocblk *iocp;
3926f8919bdaSduboff enum ioc_reply status;
3927f8919bdaSduboff int cmd;
3928f8919bdaSduboff
3929f8919bdaSduboff DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3930f8919bdaSduboff
3931f8919bdaSduboff /*
3932f8919bdaSduboff * Validate the command before bothering with the mutex ...
3933f8919bdaSduboff */
3934f8919bdaSduboff iocp = (void *)mp->b_rptr;
3935f8919bdaSduboff iocp->ioc_error = 0;
3936f8919bdaSduboff cmd = iocp->ioc_cmd;
3937f8919bdaSduboff
3938f8919bdaSduboff DPRINTF(0, (CE_CONT, "%s: %s cmd:0x%x", dp->name, __func__, cmd));
3939f8919bdaSduboff
3940f8919bdaSduboff mutex_enter(&dp->intrlock);
3941f8919bdaSduboff mutex_enter(&dp->xmitlock);
3942f8919bdaSduboff
3943f8919bdaSduboff switch (cmd) {
3944f8919bdaSduboff default:
3945f8919bdaSduboff _NOTE(NOTREACHED)
3946f8919bdaSduboff status = IOC_INVAL;
3947f8919bdaSduboff break;
3948f8919bdaSduboff
3949f8919bdaSduboff case ND_GET:
3950f8919bdaSduboff case ND_SET:
3951f8919bdaSduboff status = gem_nd_ioctl(dp, wq, mp, iocp);
3952f8919bdaSduboff break;
3953f8919bdaSduboff }
3954f8919bdaSduboff
3955f8919bdaSduboff mutex_exit(&dp->xmitlock);
3956f8919bdaSduboff mutex_exit(&dp->intrlock);
3957f8919bdaSduboff
3958f8919bdaSduboff #ifdef DEBUG_RESUME
3959f8919bdaSduboff if (cmd == ND_GET) {
3960f8919bdaSduboff gem_suspend(dp->dip);
3961f8919bdaSduboff gem_resume(dp->dip);
3962f8919bdaSduboff }
3963f8919bdaSduboff #endif
3964f8919bdaSduboff /*
3965f8919bdaSduboff * Finally, decide how to reply
3966f8919bdaSduboff */
3967f8919bdaSduboff switch (status) {
3968f8919bdaSduboff default:
3969f8919bdaSduboff case IOC_INVAL:
3970f8919bdaSduboff /*
3971f8919bdaSduboff * Error, reply with a NAK and EINVAL or the specified error
3972f8919bdaSduboff */
3973f8919bdaSduboff miocnak(wq, mp, 0, iocp->ioc_error == 0 ?
3974f8919bdaSduboff EINVAL : iocp->ioc_error);
3975f8919bdaSduboff break;
3976f8919bdaSduboff
3977f8919bdaSduboff case IOC_DONE:
3978f8919bdaSduboff /*
3979f8919bdaSduboff * OK, reply already sent
3980f8919bdaSduboff */
3981f8919bdaSduboff break;
3982f8919bdaSduboff
3983f8919bdaSduboff case IOC_RESTART_ACK:
3984f8919bdaSduboff case IOC_ACK:
3985f8919bdaSduboff /*
3986f8919bdaSduboff * OK, reply with an ACK
3987f8919bdaSduboff */
3988f8919bdaSduboff miocack(wq, mp, 0, 0);
3989f8919bdaSduboff break;
3990f8919bdaSduboff
3991f8919bdaSduboff case IOC_RESTART_REPLY:
3992f8919bdaSduboff case IOC_REPLY:
3993f8919bdaSduboff /*
3994f8919bdaSduboff * OK, send prepared reply as ACK or NAK
3995f8919bdaSduboff */
3996f8919bdaSduboff mp->b_datap->db_type =
3997f8919bdaSduboff iocp->ioc_error == 0 ? M_IOCACK : M_IOCNAK;
3998f8919bdaSduboff qreply(wq, mp);
3999f8919bdaSduboff break;
4000f8919bdaSduboff }
4001f8919bdaSduboff }
4002f8919bdaSduboff
4003f8919bdaSduboff #ifndef SYS_MAC_H
4004f8919bdaSduboff #define XCVR_UNDEFINED 0
4005f8919bdaSduboff #define XCVR_NONE 1
4006f8919bdaSduboff #define XCVR_10 2
4007f8919bdaSduboff #define XCVR_100T4 3
4008f8919bdaSduboff #define XCVR_100X 4
4009f8919bdaSduboff #define XCVR_100T2 5
4010f8919bdaSduboff #define XCVR_1000X 6
4011f8919bdaSduboff #define XCVR_1000T 7
4012f8919bdaSduboff #endif
4013f8919bdaSduboff static int
gem_mac_xcvr_inuse(struct gem_dev * dp)4014f8919bdaSduboff gem_mac_xcvr_inuse(struct gem_dev *dp)
4015f8919bdaSduboff {
4016f8919bdaSduboff int val = XCVR_UNDEFINED;
4017f8919bdaSduboff
4018f8919bdaSduboff if ((dp->mii_status & MII_STATUS_XSTATUS) == 0) {
4019f8919bdaSduboff if (dp->mii_status & MII_STATUS_100_BASE_T4) {
4020f8919bdaSduboff val = XCVR_100T4;
4021f8919bdaSduboff } else if (dp->mii_status &
4022f8919bdaSduboff (MII_STATUS_100_BASEX_FD |
4023f8919bdaSduboff MII_STATUS_100_BASEX)) {
4024f8919bdaSduboff val = XCVR_100X;
4025f8919bdaSduboff } else if (dp->mii_status &
4026f8919bdaSduboff (MII_STATUS_100_BASE_T2_FD |
4027f8919bdaSduboff MII_STATUS_100_BASE_T2)) {
4028f8919bdaSduboff val = XCVR_100T2;
4029f8919bdaSduboff } else if (dp->mii_status &
4030f8919bdaSduboff (MII_STATUS_10_FD | MII_STATUS_10)) {
4031f8919bdaSduboff val = XCVR_10;
4032f8919bdaSduboff }
4033f8919bdaSduboff } else if (dp->mii_xstatus &
4034f8919bdaSduboff (MII_XSTATUS_1000BASET_FD | MII_XSTATUS_1000BASET)) {
4035f8919bdaSduboff val = XCVR_1000T;
4036f8919bdaSduboff } else if (dp->mii_xstatus &
4037f8919bdaSduboff (MII_XSTATUS_1000BASEX_FD | MII_XSTATUS_1000BASEX)) {
4038f8919bdaSduboff val = XCVR_1000X;
4039f8919bdaSduboff }
4040f8919bdaSduboff
4041f8919bdaSduboff return (val);
4042f8919bdaSduboff }
4043f8919bdaSduboff
4044f8919bdaSduboff /* ============================================================== */
4045f8919bdaSduboff /*
4046f8919bdaSduboff * GLDv3 interface
4047f8919bdaSduboff */
4048f8919bdaSduboff /* ============================================================== */
4049f8919bdaSduboff static int gem_m_getstat(void *, uint_t, uint64_t *);
4050f8919bdaSduboff static int gem_m_start(void *);
4051f8919bdaSduboff static void gem_m_stop(void *);
4052f8919bdaSduboff static int gem_m_setpromisc(void *, boolean_t);
4053f8919bdaSduboff static int gem_m_multicst(void *, boolean_t, const uint8_t *);
4054f8919bdaSduboff static int gem_m_unicst(void *, const uint8_t *);
4055f8919bdaSduboff static mblk_t *gem_m_tx(void *, mblk_t *);
4056f8919bdaSduboff static void gem_m_ioctl(void *, queue_t *, mblk_t *);
4057f8919bdaSduboff static boolean_t gem_m_getcapab(void *, mac_capab_t, void *);
4058f8919bdaSduboff
4059da14cebeSEric Cheng #define GEM_M_CALLBACK_FLAGS (MC_IOCTL | MC_GETCAPAB)
4060f8919bdaSduboff
4061f8919bdaSduboff static mac_callbacks_t gem_m_callbacks = {
4062f8919bdaSduboff GEM_M_CALLBACK_FLAGS,
4063f8919bdaSduboff gem_m_getstat,
4064f8919bdaSduboff gem_m_start,
4065f8919bdaSduboff gem_m_stop,
4066f8919bdaSduboff gem_m_setpromisc,
4067f8919bdaSduboff gem_m_multicst,
4068f8919bdaSduboff gem_m_unicst,
4069f8919bdaSduboff gem_m_tx,
40700dc2366fSVenugopal Iyer NULL,
4071f8919bdaSduboff gem_m_ioctl,
4072f8919bdaSduboff gem_m_getcapab,
4073f8919bdaSduboff };
4074f8919bdaSduboff
4075f8919bdaSduboff static int
gem_m_start(void * arg)4076f8919bdaSduboff gem_m_start(void *arg)
4077f8919bdaSduboff {
4078f8919bdaSduboff int err = 0;
4079f8919bdaSduboff struct gem_dev *dp = arg;
4080f8919bdaSduboff
4081f8919bdaSduboff DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4082f8919bdaSduboff
4083f8919bdaSduboff mutex_enter(&dp->intrlock);
4084f8919bdaSduboff if (dp->mac_suspended) {
4085f8919bdaSduboff err = EIO;
4086f8919bdaSduboff goto x;
4087f8919bdaSduboff }
4088f8919bdaSduboff if (gem_mac_init(dp) != GEM_SUCCESS) {
4089f8919bdaSduboff err = EIO;
4090f8919bdaSduboff goto x;
4091f8919bdaSduboff }
4092f8919bdaSduboff dp->nic_state = NIC_STATE_INITIALIZED;
4093f8919bdaSduboff
4094f8919bdaSduboff /* reset rx filter state */
4095f8919bdaSduboff dp->mc_count = 0;
4096f8919bdaSduboff dp->mc_count_req = 0;
4097f8919bdaSduboff
4098f8919bdaSduboff /* setup media mode if the link have been up */
4099f8919bdaSduboff if (dp->mii_state == MII_STATE_LINKUP) {
4100f8919bdaSduboff (dp->gc.gc_set_media)(dp);
4101f8919bdaSduboff }
4102f8919bdaSduboff
4103f8919bdaSduboff /* setup initial rx filter */
4104f8919bdaSduboff bcopy(dp->dev_addr.ether_addr_octet,
4105f8919bdaSduboff dp->cur_addr.ether_addr_octet, ETHERADDRL);
4106f8919bdaSduboff dp->rxmode |= RXMODE_ENABLE;
4107f8919bdaSduboff
4108f8919bdaSduboff if (gem_mac_set_rx_filter(dp) != GEM_SUCCESS) {
4109f8919bdaSduboff err = EIO;
4110f8919bdaSduboff goto x;
4111f8919bdaSduboff }
4112f8919bdaSduboff
4113f8919bdaSduboff dp->nic_state = NIC_STATE_ONLINE;
4114f8919bdaSduboff if (dp->mii_state == MII_STATE_LINKUP) {
4115f8919bdaSduboff if (gem_mac_start(dp) != GEM_SUCCESS) {
4116f8919bdaSduboff err = EIO;
4117f8919bdaSduboff goto x;
4118f8919bdaSduboff }
4119f8919bdaSduboff }
4120f8919bdaSduboff
4121f8919bdaSduboff dp->timeout_id = timeout((void (*)(void *))gem_tx_timeout,
4122f8919bdaSduboff (void *)dp, dp->gc.gc_tx_timeout_interval);
4123f8919bdaSduboff mutex_exit(&dp->intrlock);
4124f8919bdaSduboff
4125f8919bdaSduboff return (0);
4126f8919bdaSduboff x:
4127f8919bdaSduboff dp->nic_state = NIC_STATE_STOPPED;
4128f8919bdaSduboff mutex_exit(&dp->intrlock);
4129f8919bdaSduboff return (err);
4130f8919bdaSduboff }
4131f8919bdaSduboff
4132f8919bdaSduboff static void
gem_m_stop(void * arg)4133f8919bdaSduboff gem_m_stop(void *arg)
4134f8919bdaSduboff {
4135f8919bdaSduboff struct gem_dev *dp = arg;
4136f8919bdaSduboff
4137f8919bdaSduboff DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4138f8919bdaSduboff
4139f8919bdaSduboff /* stop rx */
4140f8919bdaSduboff mutex_enter(&dp->intrlock);
4141f8919bdaSduboff if (dp->mac_suspended) {
4142f8919bdaSduboff mutex_exit(&dp->intrlock);
4143f8919bdaSduboff return;
4144f8919bdaSduboff }
4145f8919bdaSduboff dp->rxmode &= ~RXMODE_ENABLE;
4146f8919bdaSduboff (void) gem_mac_set_rx_filter(dp);
4147f8919bdaSduboff mutex_exit(&dp->intrlock);
4148f8919bdaSduboff
4149f8919bdaSduboff /* stop tx timeout watcher */
4150f8919bdaSduboff if (dp->timeout_id) {
4151f8919bdaSduboff while (untimeout(dp->timeout_id) == -1)
4152f8919bdaSduboff ;
4153f8919bdaSduboff dp->timeout_id = 0;
4154f8919bdaSduboff }
4155f8919bdaSduboff
4156f8919bdaSduboff /* make the nic state inactive */
4157f8919bdaSduboff mutex_enter(&dp->intrlock);
4158f8919bdaSduboff if (dp->mac_suspended) {
4159f8919bdaSduboff mutex_exit(&dp->intrlock);
4160f8919bdaSduboff return;
4161f8919bdaSduboff }
4162f8919bdaSduboff dp->nic_state = NIC_STATE_STOPPED;
4163f8919bdaSduboff
4164f8919bdaSduboff /* we need deassert mac_active due to block interrupt handler */
4165f8919bdaSduboff mutex_enter(&dp->xmitlock);
4166f8919bdaSduboff dp->mac_active = B_FALSE;
4167f8919bdaSduboff mutex_exit(&dp->xmitlock);
4168f8919bdaSduboff
4169f8919bdaSduboff /* block interrupts */
4170f8919bdaSduboff while (dp->intr_busy) {
4171f8919bdaSduboff cv_wait(&dp->tx_drain_cv, &dp->intrlock);
4172f8919bdaSduboff }
4173f8919bdaSduboff (void) gem_mac_stop(dp, 0);
4174f8919bdaSduboff mutex_exit(&dp->intrlock);
4175f8919bdaSduboff }
4176f8919bdaSduboff
4177f8919bdaSduboff static int
gem_m_multicst(void * arg,boolean_t add,const uint8_t * ep)4178f8919bdaSduboff gem_m_multicst(void *arg, boolean_t add, const uint8_t *ep)
4179f8919bdaSduboff {
4180f8919bdaSduboff int err;
4181f8919bdaSduboff int ret;
4182f8919bdaSduboff struct gem_dev *dp = arg;
4183f8919bdaSduboff
4184f8919bdaSduboff DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4185f8919bdaSduboff
4186f8919bdaSduboff if (add) {
4187f8919bdaSduboff ret = gem_add_multicast(dp, ep);
4188f8919bdaSduboff } else {
4189f8919bdaSduboff ret = gem_remove_multicast(dp, ep);
4190f8919bdaSduboff }
4191f8919bdaSduboff
4192f8919bdaSduboff err = 0;
4193f8919bdaSduboff if (ret != GEM_SUCCESS) {
4194f8919bdaSduboff err = EIO;
4195f8919bdaSduboff }
4196f8919bdaSduboff
4197f8919bdaSduboff return (err);
4198f8919bdaSduboff }
4199f8919bdaSduboff
4200f8919bdaSduboff static int
gem_m_setpromisc(void * arg,boolean_t on)4201f8919bdaSduboff gem_m_setpromisc(void *arg, boolean_t on)
4202f8919bdaSduboff {
4203f8919bdaSduboff int err = 0; /* no error */
4204f8919bdaSduboff struct gem_dev *dp = arg;
4205f8919bdaSduboff
4206f8919bdaSduboff DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4207f8919bdaSduboff
4208f8919bdaSduboff mutex_enter(&dp->intrlock);
4209f8919bdaSduboff if (dp->mac_suspended) {
4210f8919bdaSduboff mutex_exit(&dp->intrlock);
4211f8919bdaSduboff return (EIO);
4212f8919bdaSduboff }
4213f8919bdaSduboff if (on) {
4214f8919bdaSduboff dp->rxmode |= RXMODE_PROMISC;
4215f8919bdaSduboff } else {
4216f8919bdaSduboff dp->rxmode &= ~RXMODE_PROMISC;
4217f8919bdaSduboff }
4218f8919bdaSduboff
4219f8919bdaSduboff if (gem_mac_set_rx_filter(dp) != GEM_SUCCESS) {
4220f8919bdaSduboff err = EIO;
4221f8919bdaSduboff }
4222f8919bdaSduboff mutex_exit(&dp->intrlock);
4223f8919bdaSduboff
4224f8919bdaSduboff return (err);
4225f8919bdaSduboff }
4226f8919bdaSduboff
4227f8919bdaSduboff int
gem_m_getstat(void * arg,uint_t stat,uint64_t * valp)4228f8919bdaSduboff gem_m_getstat(void *arg, uint_t stat, uint64_t *valp)
4229f8919bdaSduboff {
4230f8919bdaSduboff struct gem_dev *dp = arg;
4231f8919bdaSduboff struct gem_stats *gstp = &dp->stats;
4232f8919bdaSduboff uint64_t val = 0;
4233f8919bdaSduboff
4234f8919bdaSduboff DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4235f8919bdaSduboff
423623d366e3Sduboff if (mutex_owned(&dp->intrlock)) {
423723d366e3Sduboff if (dp->mac_suspended) {
423823d366e3Sduboff return (EIO);
423923d366e3Sduboff }
424023d366e3Sduboff } else {
4241f8919bdaSduboff mutex_enter(&dp->intrlock);
4242f8919bdaSduboff if (dp->mac_suspended) {
4243f8919bdaSduboff mutex_exit(&dp->intrlock);
4244f8919bdaSduboff return (EIO);
4245f8919bdaSduboff }
4246f8919bdaSduboff mutex_exit(&dp->intrlock);
424723d366e3Sduboff }
4248f8919bdaSduboff
4249f8919bdaSduboff if ((*dp->gc.gc_get_stats)(dp) != GEM_SUCCESS) {
4250f8919bdaSduboff return (EIO);
4251f8919bdaSduboff }
4252f8919bdaSduboff
4253f8919bdaSduboff switch (stat) {
4254f8919bdaSduboff case MAC_STAT_IFSPEED:
4255f8919bdaSduboff val = gem_speed_value[dp->speed] *1000000ull;
4256f8919bdaSduboff break;
4257f8919bdaSduboff
4258f8919bdaSduboff case MAC_STAT_MULTIRCV:
4259f8919bdaSduboff val = gstp->rmcast;
4260f8919bdaSduboff break;
4261f8919bdaSduboff
4262f8919bdaSduboff case MAC_STAT_BRDCSTRCV:
4263f8919bdaSduboff val = gstp->rbcast;
4264f8919bdaSduboff break;
4265f8919bdaSduboff
4266f8919bdaSduboff case MAC_STAT_MULTIXMT:
4267f8919bdaSduboff val = gstp->omcast;
4268f8919bdaSduboff break;
4269f8919bdaSduboff
4270f8919bdaSduboff case MAC_STAT_BRDCSTXMT:
4271f8919bdaSduboff val = gstp->obcast;
4272f8919bdaSduboff break;
4273f8919bdaSduboff
4274f8919bdaSduboff case MAC_STAT_NORCVBUF:
4275f8919bdaSduboff val = gstp->norcvbuf + gstp->missed;
4276f8919bdaSduboff break;
4277f8919bdaSduboff
4278f8919bdaSduboff case MAC_STAT_IERRORS:
4279f8919bdaSduboff val = gstp->errrcv;
4280f8919bdaSduboff break;
4281f8919bdaSduboff
4282f8919bdaSduboff case MAC_STAT_NOXMTBUF:
4283f8919bdaSduboff val = gstp->noxmtbuf;
4284f8919bdaSduboff break;
4285f8919bdaSduboff
4286f8919bdaSduboff case MAC_STAT_OERRORS:
4287f8919bdaSduboff val = gstp->errxmt;
4288f8919bdaSduboff break;
4289f8919bdaSduboff
4290f8919bdaSduboff case MAC_STAT_COLLISIONS:
4291f8919bdaSduboff val = gstp->collisions;
4292f8919bdaSduboff break;
4293f8919bdaSduboff
4294f8919bdaSduboff case MAC_STAT_RBYTES:
4295f8919bdaSduboff val = gstp->rbytes;
4296f8919bdaSduboff break;
4297f8919bdaSduboff
4298f8919bdaSduboff case MAC_STAT_IPACKETS:
4299f8919bdaSduboff val = gstp->rpackets;
4300f8919bdaSduboff break;
4301f8919bdaSduboff
4302f8919bdaSduboff case MAC_STAT_OBYTES:
4303f8919bdaSduboff val = gstp->obytes;
4304f8919bdaSduboff break;
4305f8919bdaSduboff
4306f8919bdaSduboff case MAC_STAT_OPACKETS:
4307f8919bdaSduboff val = gstp->opackets;
4308f8919bdaSduboff break;
4309f8919bdaSduboff
4310f8919bdaSduboff case MAC_STAT_UNDERFLOWS:
4311f8919bdaSduboff val = gstp->underflow;
4312f8919bdaSduboff break;
4313f8919bdaSduboff
4314f8919bdaSduboff case MAC_STAT_OVERFLOWS:
4315f8919bdaSduboff val = gstp->overflow;
4316f8919bdaSduboff break;
4317f8919bdaSduboff
4318f8919bdaSduboff case ETHER_STAT_ALIGN_ERRORS:
4319f8919bdaSduboff val = gstp->frame;
4320f8919bdaSduboff break;
4321f8919bdaSduboff
4322f8919bdaSduboff case ETHER_STAT_FCS_ERRORS:
4323f8919bdaSduboff val = gstp->crc;
4324f8919bdaSduboff break;
4325f8919bdaSduboff
4326f8919bdaSduboff case ETHER_STAT_FIRST_COLLISIONS:
4327f8919bdaSduboff val = gstp->first_coll;
4328f8919bdaSduboff break;
4329f8919bdaSduboff
4330f8919bdaSduboff case ETHER_STAT_MULTI_COLLISIONS:
4331f8919bdaSduboff val = gstp->multi_coll;
4332f8919bdaSduboff break;
4333f8919bdaSduboff
4334f8919bdaSduboff case ETHER_STAT_SQE_ERRORS:
4335f8919bdaSduboff val = gstp->sqe;
4336f8919bdaSduboff break;
4337f8919bdaSduboff
4338f8919bdaSduboff case ETHER_STAT_DEFER_XMTS:
4339f8919bdaSduboff val = gstp->defer;
4340f8919bdaSduboff break;
4341f8919bdaSduboff
4342f8919bdaSduboff case ETHER_STAT_TX_LATE_COLLISIONS:
4343f8919bdaSduboff val = gstp->xmtlatecoll;
4344f8919bdaSduboff break;
4345f8919bdaSduboff
4346f8919bdaSduboff case ETHER_STAT_EX_COLLISIONS:
4347f8919bdaSduboff val = gstp->excoll;
4348f8919bdaSduboff break;
4349f8919bdaSduboff
4350f8919bdaSduboff case ETHER_STAT_MACXMT_ERRORS:
4351f8919bdaSduboff val = gstp->xmit_internal_err;
4352f8919bdaSduboff break;
4353f8919bdaSduboff
4354f8919bdaSduboff case ETHER_STAT_CARRIER_ERRORS:
4355f8919bdaSduboff val = gstp->nocarrier;
4356f8919bdaSduboff break;
4357f8919bdaSduboff
4358f8919bdaSduboff case ETHER_STAT_TOOLONG_ERRORS:
4359f8919bdaSduboff val = gstp->frame_too_long;
4360f8919bdaSduboff break;
4361f8919bdaSduboff
4362f8919bdaSduboff case ETHER_STAT_MACRCV_ERRORS:
4363f8919bdaSduboff val = gstp->rcv_internal_err;
4364f8919bdaSduboff break;
4365f8919bdaSduboff
4366f8919bdaSduboff case ETHER_STAT_XCVR_ADDR:
4367f8919bdaSduboff val = dp->mii_phy_addr;
4368f8919bdaSduboff break;
4369f8919bdaSduboff
4370f8919bdaSduboff case ETHER_STAT_XCVR_ID:
4371f8919bdaSduboff val = dp->mii_phy_id;
4372f8919bdaSduboff break;
4373f8919bdaSduboff
4374f8919bdaSduboff case ETHER_STAT_XCVR_INUSE:
4375f8919bdaSduboff val = gem_mac_xcvr_inuse(dp);
4376f8919bdaSduboff break;
4377f8919bdaSduboff
4378f8919bdaSduboff case ETHER_STAT_CAP_1000FDX:
4379f8919bdaSduboff val = (dp->mii_xstatus & MII_XSTATUS_1000BASET_FD) ||
4380f8919bdaSduboff (dp->mii_xstatus & MII_XSTATUS_1000BASEX_FD);
4381f8919bdaSduboff break;
4382f8919bdaSduboff
4383f8919bdaSduboff case ETHER_STAT_CAP_1000HDX:
4384f8919bdaSduboff val = (dp->mii_xstatus & MII_XSTATUS_1000BASET) ||
4385f8919bdaSduboff (dp->mii_xstatus & MII_XSTATUS_1000BASEX);
4386f8919bdaSduboff break;
4387f8919bdaSduboff
4388f8919bdaSduboff case ETHER_STAT_CAP_100FDX:
4389f8919bdaSduboff val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX_FD);
4390f8919bdaSduboff break;
4391f8919bdaSduboff
4392f8919bdaSduboff case ETHER_STAT_CAP_100HDX:
4393f8919bdaSduboff val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX);
4394f8919bdaSduboff break;
4395f8919bdaSduboff
4396f8919bdaSduboff case ETHER_STAT_CAP_10FDX:
4397f8919bdaSduboff val = BOOLEAN(dp->mii_status & MII_STATUS_10_FD);
4398f8919bdaSduboff break;
4399f8919bdaSduboff
4400f8919bdaSduboff case ETHER_STAT_CAP_10HDX:
4401f8919bdaSduboff val = BOOLEAN(dp->mii_status & MII_STATUS_10);
4402f8919bdaSduboff break;
4403f8919bdaSduboff
4404f8919bdaSduboff case ETHER_STAT_CAP_ASMPAUSE:
4405f8919bdaSduboff val = BOOLEAN(dp->gc.gc_flow_control & 2);
4406f8919bdaSduboff break;
4407f8919bdaSduboff
4408f8919bdaSduboff case ETHER_STAT_CAP_PAUSE:
4409f8919bdaSduboff val = BOOLEAN(dp->gc.gc_flow_control & 1);
4410f8919bdaSduboff break;
4411f8919bdaSduboff
4412f8919bdaSduboff case ETHER_STAT_CAP_AUTONEG:
441323d366e3Sduboff val = BOOLEAN(dp->mii_status & MII_STATUS_CANAUTONEG);
4414f8919bdaSduboff break;
4415f8919bdaSduboff
4416f8919bdaSduboff case ETHER_STAT_ADV_CAP_1000FDX:
4417f8919bdaSduboff val = dp->anadv_1000fdx;
4418f8919bdaSduboff break;
4419f8919bdaSduboff
4420f8919bdaSduboff case ETHER_STAT_ADV_CAP_1000HDX:
4421f8919bdaSduboff val = dp->anadv_1000hdx;
4422f8919bdaSduboff break;
4423f8919bdaSduboff
4424f8919bdaSduboff case ETHER_STAT_ADV_CAP_100FDX:
4425f8919bdaSduboff val = dp->anadv_100fdx;
4426f8919bdaSduboff break;
4427f8919bdaSduboff
4428f8919bdaSduboff case ETHER_STAT_ADV_CAP_100HDX:
4429f8919bdaSduboff val = dp->anadv_100hdx;
4430f8919bdaSduboff break;
4431f8919bdaSduboff
4432f8919bdaSduboff case ETHER_STAT_ADV_CAP_10FDX:
4433f8919bdaSduboff val = dp->anadv_10fdx;
4434f8919bdaSduboff break;
4435f8919bdaSduboff
4436f8919bdaSduboff case ETHER_STAT_ADV_CAP_10HDX:
4437f8919bdaSduboff val = dp->anadv_10hdx;
4438f8919bdaSduboff break;
4439f8919bdaSduboff
4440f8919bdaSduboff case ETHER_STAT_ADV_CAP_ASMPAUSE:
4441f8919bdaSduboff val = BOOLEAN(dp->anadv_flow_control & 2);
4442f8919bdaSduboff break;
4443f8919bdaSduboff
4444f8919bdaSduboff case ETHER_STAT_ADV_CAP_PAUSE:
4445f8919bdaSduboff val = BOOLEAN(dp->anadv_flow_control & 1);
4446f8919bdaSduboff break;
4447f8919bdaSduboff
4448f8919bdaSduboff case ETHER_STAT_ADV_CAP_AUTONEG:
4449f8919bdaSduboff val = dp->anadv_autoneg;
4450f8919bdaSduboff break;
4451f8919bdaSduboff
4452f8919bdaSduboff case ETHER_STAT_LP_CAP_1000FDX:
4453f8919bdaSduboff val = BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_FULL);
4454f8919bdaSduboff break;
4455f8919bdaSduboff
4456f8919bdaSduboff case ETHER_STAT_LP_CAP_1000HDX:
4457f8919bdaSduboff val = BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_HALF);
4458f8919bdaSduboff break;
4459f8919bdaSduboff
4460f8919bdaSduboff case ETHER_STAT_LP_CAP_100FDX:
4461f8919bdaSduboff val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX_FD);
4462f8919bdaSduboff break;
4463f8919bdaSduboff
4464f8919bdaSduboff case ETHER_STAT_LP_CAP_100HDX:
4465f8919bdaSduboff val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX);
4466f8919bdaSduboff break;
4467f8919bdaSduboff
4468f8919bdaSduboff case ETHER_STAT_LP_CAP_10FDX:
4469f8919bdaSduboff val = BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T_FD);
4470f8919bdaSduboff break;
4471f8919bdaSduboff
4472f8919bdaSduboff case ETHER_STAT_LP_CAP_10HDX:
4473f8919bdaSduboff val = BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T);
4474f8919bdaSduboff break;
4475f8919bdaSduboff
4476f8919bdaSduboff case ETHER_STAT_LP_CAP_ASMPAUSE:
4477bdb9230aSGarrett D'Amore val = BOOLEAN(dp->mii_lpable & MII_ABILITY_ASMPAUSE);
4478f8919bdaSduboff break;
4479f8919bdaSduboff
4480f8919bdaSduboff case ETHER_STAT_LP_CAP_PAUSE:
4481f8919bdaSduboff val = BOOLEAN(dp->mii_lpable & MII_ABILITY_PAUSE);
4482f8919bdaSduboff break;
4483f8919bdaSduboff
4484f8919bdaSduboff case ETHER_STAT_LP_CAP_AUTONEG:
4485f8919bdaSduboff val = BOOLEAN(dp->mii_exp & MII_AN_EXP_LPCANAN);
4486f8919bdaSduboff break;
4487f8919bdaSduboff
4488f8919bdaSduboff case ETHER_STAT_LINK_ASMPAUSE:
4489f8919bdaSduboff val = BOOLEAN(dp->flow_control & 2);
4490f8919bdaSduboff break;
4491f8919bdaSduboff
4492f8919bdaSduboff case ETHER_STAT_LINK_PAUSE:
4493f8919bdaSduboff val = BOOLEAN(dp->flow_control & 1);
4494f8919bdaSduboff break;
4495f8919bdaSduboff
4496f8919bdaSduboff case ETHER_STAT_LINK_AUTONEG:
4497f8919bdaSduboff val = dp->anadv_autoneg &&
4498f8919bdaSduboff BOOLEAN(dp->mii_exp & MII_AN_EXP_LPCANAN);
4499f8919bdaSduboff break;
4500f8919bdaSduboff
4501f8919bdaSduboff case ETHER_STAT_LINK_DUPLEX:
4502f8919bdaSduboff val = (dp->mii_state == MII_STATE_LINKUP) ?
4503f8919bdaSduboff (dp->full_duplex ? 2 : 1) : 0;
4504f8919bdaSduboff break;
4505f8919bdaSduboff
4506f8919bdaSduboff case ETHER_STAT_TOOSHORT_ERRORS:
4507f8919bdaSduboff val = gstp->runt;
4508f8919bdaSduboff break;
4509f8919bdaSduboff case ETHER_STAT_LP_REMFAULT:
4510f8919bdaSduboff val = BOOLEAN(dp->mii_lpable & MII_AN_ADVERT_REMFAULT);
4511f8919bdaSduboff break;
4512f8919bdaSduboff
4513f8919bdaSduboff case ETHER_STAT_JABBER_ERRORS:
4514f8919bdaSduboff val = gstp->jabber;
4515f8919bdaSduboff break;
4516f8919bdaSduboff
4517f8919bdaSduboff case ETHER_STAT_CAP_100T4:
4518f8919bdaSduboff val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASE_T4);
4519f8919bdaSduboff break;
4520f8919bdaSduboff
4521f8919bdaSduboff case ETHER_STAT_ADV_CAP_100T4:
4522f8919bdaSduboff val = dp->anadv_100t4;
4523f8919bdaSduboff break;
4524f8919bdaSduboff
4525f8919bdaSduboff case ETHER_STAT_LP_CAP_100T4:
4526f8919bdaSduboff val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_T4);
4527f8919bdaSduboff break;
4528f8919bdaSduboff
4529f8919bdaSduboff default:
4530f8919bdaSduboff #if GEM_DEBUG_LEVEL > 2
4531f8919bdaSduboff cmn_err(CE_WARN,
4532f8919bdaSduboff "%s: unrecognized parameter value = %d",
4533f8919bdaSduboff __func__, stat);
4534f8919bdaSduboff #endif
4535f8919bdaSduboff return (ENOTSUP);
4536f8919bdaSduboff }
4537f8919bdaSduboff
4538f8919bdaSduboff *valp = val;
4539f8919bdaSduboff
4540f8919bdaSduboff return (0);
4541f8919bdaSduboff }
4542f8919bdaSduboff
4543f8919bdaSduboff static int
gem_m_unicst(void * arg,const uint8_t * mac)4544f8919bdaSduboff gem_m_unicst(void *arg, const uint8_t *mac)
4545f8919bdaSduboff {
4546f8919bdaSduboff int err = 0;
4547f8919bdaSduboff struct gem_dev *dp = arg;
4548f8919bdaSduboff
4549f8919bdaSduboff DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4550f8919bdaSduboff
4551f8919bdaSduboff mutex_enter(&dp->intrlock);
4552f8919bdaSduboff if (dp->mac_suspended) {
4553f8919bdaSduboff mutex_exit(&dp->intrlock);
4554f8919bdaSduboff return (EIO);
4555f8919bdaSduboff }
4556f8919bdaSduboff bcopy(mac, dp->cur_addr.ether_addr_octet, ETHERADDRL);
4557f8919bdaSduboff dp->rxmode |= RXMODE_ENABLE;
4558f8919bdaSduboff
4559f8919bdaSduboff if (gem_mac_set_rx_filter(dp) != GEM_SUCCESS) {
4560f8919bdaSduboff err = EIO;
4561f8919bdaSduboff }
4562f8919bdaSduboff mutex_exit(&dp->intrlock);
4563f8919bdaSduboff
4564f8919bdaSduboff return (err);
4565f8919bdaSduboff }
4566f8919bdaSduboff
4567f8919bdaSduboff /*
4568f8919bdaSduboff * gem_m_tx is used only for sending data packets into ethernet wire.
4569f8919bdaSduboff */
4570f8919bdaSduboff static mblk_t *
gem_m_tx(void * arg,mblk_t * mp)4571f8919bdaSduboff gem_m_tx(void *arg, mblk_t *mp)
4572f8919bdaSduboff {
4573f8919bdaSduboff uint32_t flags = 0;
4574f8919bdaSduboff struct gem_dev *dp = arg;
4575f8919bdaSduboff mblk_t *tp;
4576f8919bdaSduboff
4577f8919bdaSduboff DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4578f8919bdaSduboff
4579f8919bdaSduboff ASSERT(dp->nic_state == NIC_STATE_ONLINE);
4580f8919bdaSduboff if (dp->mii_state != MII_STATE_LINKUP) {
4581f8919bdaSduboff /* Some nics hate to send packets when the link is down. */
4582f8919bdaSduboff while (mp) {
4583f8919bdaSduboff tp = mp->b_next;
4584f8919bdaSduboff mp->b_next = NULL;
4585f8919bdaSduboff freemsg(mp);
4586f8919bdaSduboff mp = tp;
4587f8919bdaSduboff }
4588f8919bdaSduboff return (NULL);
4589f8919bdaSduboff }
4590f8919bdaSduboff
4591f8919bdaSduboff return (gem_send_common(dp, mp, flags));
4592f8919bdaSduboff }
4593f8919bdaSduboff
4594f8919bdaSduboff static void
gem_m_ioctl(void * arg,queue_t * wq,mblk_t * mp)4595f8919bdaSduboff gem_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
4596f8919bdaSduboff {
4597f8919bdaSduboff DPRINTF(0, (CE_CONT, "!%s: %s: called",
4598f8919bdaSduboff ((struct gem_dev *)arg)->name, __func__));
4599f8919bdaSduboff
4600f8919bdaSduboff gem_mac_ioctl((struct gem_dev *)arg, wq, mp);
4601f8919bdaSduboff }
4602f8919bdaSduboff
4603da14cebeSEric Cheng /* ARGSUSED */
4604f8919bdaSduboff static boolean_t
gem_m_getcapab(void * arg,mac_capab_t cap,void * cap_data)4605f8919bdaSduboff gem_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
4606f8919bdaSduboff {
4607da14cebeSEric Cheng return (B_FALSE);
4608f8919bdaSduboff }
4609f8919bdaSduboff
4610f8919bdaSduboff static void
gem_gld3_init(struct gem_dev * dp,mac_register_t * macp)4611f8919bdaSduboff gem_gld3_init(struct gem_dev *dp, mac_register_t *macp)
4612f8919bdaSduboff {
4613f8919bdaSduboff macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
4614f8919bdaSduboff macp->m_driver = dp;
4615f8919bdaSduboff macp->m_dip = dp->dip;
4616f8919bdaSduboff macp->m_src_addr = dp->dev_addr.ether_addr_octet;
4617f8919bdaSduboff macp->m_callbacks = &gem_m_callbacks;
4618f8919bdaSduboff macp->m_min_sdu = 0;
4619f8919bdaSduboff macp->m_max_sdu = dp->mtu;
462023d366e3Sduboff
462123d366e3Sduboff if (dp->misc_flag & GEM_VLAN) {
4622d62bc4baSyz147064 macp->m_margin = VTAG_SIZE;
4623f8919bdaSduboff }
462423d366e3Sduboff }
4625f8919bdaSduboff
4626f8919bdaSduboff /* ======================================================================== */
4627f8919bdaSduboff /*
4628f8919bdaSduboff * attach/detatch support
4629f8919bdaSduboff */
4630f8919bdaSduboff /* ======================================================================== */
4631f8919bdaSduboff static void
gem_read_conf(struct gem_dev * dp)4632f8919bdaSduboff gem_read_conf(struct gem_dev *dp)
4633f8919bdaSduboff {
4634f8919bdaSduboff int val;
4635f8919bdaSduboff
4636f8919bdaSduboff DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4637f8919bdaSduboff
4638f8919bdaSduboff /*
4639f8919bdaSduboff * Get media mode infomation from .conf file
4640f8919bdaSduboff */
4641f8919bdaSduboff dp->anadv_autoneg = gem_prop_get_int(dp, "adv_autoneg_cap", 1) != 0;
4642f8919bdaSduboff dp->anadv_1000fdx = gem_prop_get_int(dp, "adv_1000fdx_cap", 1) != 0;
4643f8919bdaSduboff dp->anadv_1000hdx = gem_prop_get_int(dp, "adv_1000hdx_cap", 1) != 0;
4644f8919bdaSduboff dp->anadv_100t4 = gem_prop_get_int(dp, "adv_100T4_cap", 1) != 0;
4645f8919bdaSduboff dp->anadv_100fdx = gem_prop_get_int(dp, "adv_100fdx_cap", 1) != 0;
4646f8919bdaSduboff dp->anadv_100hdx = gem_prop_get_int(dp, "adv_100hdx_cap", 1) != 0;
4647f8919bdaSduboff dp->anadv_10fdx = gem_prop_get_int(dp, "adv_10fdx_cap", 1) != 0;
4648f8919bdaSduboff dp->anadv_10hdx = gem_prop_get_int(dp, "adv_10hdx_cap", 1) != 0;
4649f8919bdaSduboff
4650f8919bdaSduboff if ((ddi_prop_exists(DDI_DEV_T_ANY, dp->dip,
4651f8919bdaSduboff DDI_PROP_DONTPASS, "full-duplex"))) {
4652f8919bdaSduboff dp->full_duplex = gem_prop_get_int(dp, "full-duplex", 1) != 0;
4653f8919bdaSduboff dp->anadv_autoneg = B_FALSE;
465423d366e3Sduboff if (dp->full_duplex) {
4655f8919bdaSduboff dp->anadv_1000hdx = B_FALSE;
4656f8919bdaSduboff dp->anadv_100hdx = B_FALSE;
4657f8919bdaSduboff dp->anadv_10hdx = B_FALSE;
465823d366e3Sduboff } else {
465923d366e3Sduboff dp->anadv_1000fdx = B_FALSE;
466023d366e3Sduboff dp->anadv_100fdx = B_FALSE;
466123d366e3Sduboff dp->anadv_10fdx = B_FALSE;
466223d366e3Sduboff }
4663f8919bdaSduboff }
4664f8919bdaSduboff
4665f8919bdaSduboff if ((val = gem_prop_get_int(dp, "speed", 0)) > 0) {
4666f8919bdaSduboff dp->anadv_autoneg = B_FALSE;
4667f8919bdaSduboff switch (val) {
4668f8919bdaSduboff case 1000:
4669f8919bdaSduboff dp->speed = GEM_SPD_1000;
4670f8919bdaSduboff dp->anadv_100t4 = B_FALSE;
4671f8919bdaSduboff dp->anadv_100fdx = B_FALSE;
4672f8919bdaSduboff dp->anadv_100hdx = B_FALSE;
4673f8919bdaSduboff dp->anadv_10fdx = B_FALSE;
4674f8919bdaSduboff dp->anadv_10hdx = B_FALSE;
4675f8919bdaSduboff break;
4676f8919bdaSduboff case 100:
4677f8919bdaSduboff dp->speed = GEM_SPD_100;
4678f8919bdaSduboff dp->anadv_1000fdx = B_FALSE;
4679f8919bdaSduboff dp->anadv_1000hdx = B_FALSE;
4680f8919bdaSduboff dp->anadv_10fdx = B_FALSE;
4681f8919bdaSduboff dp->anadv_10hdx = B_FALSE;
4682f8919bdaSduboff break;
4683f8919bdaSduboff case 10:
4684f8919bdaSduboff dp->speed = GEM_SPD_10;
4685f8919bdaSduboff dp->anadv_1000fdx = B_FALSE;
4686f8919bdaSduboff dp->anadv_1000hdx = B_FALSE;
4687f8919bdaSduboff dp->anadv_100t4 = B_FALSE;
4688f8919bdaSduboff dp->anadv_100fdx = B_FALSE;
4689f8919bdaSduboff dp->anadv_100hdx = B_FALSE;
4690f8919bdaSduboff break;
4691f8919bdaSduboff default:
4692f8919bdaSduboff cmn_err(CE_WARN,
4693f8919bdaSduboff "!%s: property %s: illegal value:%d",
469423d366e3Sduboff dp->name, "speed", val);
4695f8919bdaSduboff dp->anadv_autoneg = B_TRUE;
4696f8919bdaSduboff break;
4697f8919bdaSduboff }
4698f8919bdaSduboff }
4699f8919bdaSduboff
4700f8919bdaSduboff val = gem_prop_get_int(dp, "flow-control", dp->gc.gc_flow_control);
4701f8919bdaSduboff if (val > FLOW_CONTROL_RX_PAUSE || val < FLOW_CONTROL_NONE) {
4702f8919bdaSduboff cmn_err(CE_WARN,
4703f8919bdaSduboff "!%s: property %s: illegal value:%d",
470423d366e3Sduboff dp->name, "flow-control", val);
4705f8919bdaSduboff } else {
4706f8919bdaSduboff val = min(val, dp->gc.gc_flow_control);
4707f8919bdaSduboff }
4708f8919bdaSduboff dp->anadv_flow_control = val;
4709f8919bdaSduboff
4710f8919bdaSduboff if (gem_prop_get_int(dp, "nointr", 0)) {
4711f8919bdaSduboff dp->misc_flag |= GEM_NOINTR;
4712f8919bdaSduboff cmn_err(CE_NOTE, "!%s: polling mode enabled", dp->name);
4713f8919bdaSduboff }
4714f8919bdaSduboff
4715f8919bdaSduboff dp->mtu = gem_prop_get_int(dp, "mtu", dp->mtu);
4716f8919bdaSduboff dp->txthr = gem_prop_get_int(dp, "txthr", dp->txthr);
4717f8919bdaSduboff dp->rxthr = gem_prop_get_int(dp, "rxthr", dp->rxthr);
4718f8919bdaSduboff dp->txmaxdma = gem_prop_get_int(dp, "txmaxdma", dp->txmaxdma);
4719f8919bdaSduboff dp->rxmaxdma = gem_prop_get_int(dp, "rxmaxdma", dp->rxmaxdma);
4720f8919bdaSduboff }
4721f8919bdaSduboff
4722f8919bdaSduboff
4723f8919bdaSduboff /*
4724f8919bdaSduboff * Gem kstat support
4725f8919bdaSduboff */
4726f8919bdaSduboff
4727f8919bdaSduboff #define GEM_LOCAL_DATA_SIZE(gc) \
4728f8919bdaSduboff (sizeof (struct gem_dev) + \
4729f8919bdaSduboff sizeof (struct mcast_addr) * GEM_MAXMC + \
4730f8919bdaSduboff sizeof (struct txbuf) * ((gc)->gc_tx_buf_size) + \
4731f8919bdaSduboff sizeof (void *) * ((gc)->gc_tx_buf_size))
4732f8919bdaSduboff
4733f8919bdaSduboff struct gem_dev *
gem_do_attach(dev_info_t * dip,int port,struct gem_conf * gc,void * base,ddi_acc_handle_t * regs_handlep,void * lp,int lmsize)4734f8919bdaSduboff gem_do_attach(dev_info_t *dip, int port,
4735f8919bdaSduboff struct gem_conf *gc, void *base, ddi_acc_handle_t *regs_handlep,
4736f8919bdaSduboff void *lp, int lmsize)
4737f8919bdaSduboff {
4738f8919bdaSduboff struct gem_dev *dp;
4739f8919bdaSduboff int i;
4740f8919bdaSduboff ddi_iblock_cookie_t c;
4741f8919bdaSduboff mac_register_t *macp = NULL;
4742f8919bdaSduboff int ret;
4743f8919bdaSduboff int unit;
4744f8919bdaSduboff int nports;
4745f8919bdaSduboff
4746f8919bdaSduboff unit = ddi_get_instance(dip);
4747f8919bdaSduboff if ((nports = gc->gc_nports) == 0) {
4748f8919bdaSduboff nports = 1;
4749f8919bdaSduboff }
4750f8919bdaSduboff if (nports == 1) {
4751f8919bdaSduboff ddi_set_driver_private(dip, NULL);
4752f8919bdaSduboff }
4753f8919bdaSduboff
4754f8919bdaSduboff DPRINTF(2, (CE_CONT, "!gem%d: gem_do_attach: called cmd:ATTACH",
4755f8919bdaSduboff unit));
4756f8919bdaSduboff
4757f8919bdaSduboff /*
4758f8919bdaSduboff * Allocate soft data structure
4759f8919bdaSduboff */
4760f8919bdaSduboff dp = kmem_zalloc(GEM_LOCAL_DATA_SIZE(gc), KM_SLEEP);
4761f8919bdaSduboff
4762f8919bdaSduboff if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
4763f8919bdaSduboff cmn_err(CE_WARN, "!gem%d: %s: mac_alloc failed",
4764f8919bdaSduboff unit, __func__);
4765f8919bdaSduboff return (NULL);
4766f8919bdaSduboff }
4767f8919bdaSduboff /* ddi_set_driver_private(dip, dp); */
4768f8919bdaSduboff
4769f8919bdaSduboff /* link to private area */
4770f8919bdaSduboff dp->private = lp;
4771f8919bdaSduboff dp->priv_size = lmsize;
4772f8919bdaSduboff dp->mc_list = (struct mcast_addr *)&dp[1];
4773f8919bdaSduboff
4774f8919bdaSduboff dp->dip = dip;
4775f8919bdaSduboff (void) sprintf(dp->name, gc->gc_name, nports * unit + port);
4776f8919bdaSduboff
4777f8919bdaSduboff /*
4778f8919bdaSduboff * Get iblock cookie
4779f8919bdaSduboff */
4780f8919bdaSduboff if (ddi_get_iblock_cookie(dip, 0, &c) != DDI_SUCCESS) {
4781f8919bdaSduboff cmn_err(CE_CONT,
4782f8919bdaSduboff "!%s: gem_do_attach: ddi_get_iblock_cookie: failed",
4783f8919bdaSduboff dp->name);
4784f8919bdaSduboff goto err_free_private;
4785f8919bdaSduboff }
4786f8919bdaSduboff dp->iblock_cookie = c;
4787f8919bdaSduboff
4788f8919bdaSduboff /*
4789f8919bdaSduboff * Initialize mutex's for this device.
4790f8919bdaSduboff */
4791f8919bdaSduboff mutex_init(&dp->intrlock, NULL, MUTEX_DRIVER, (void *)c);
4792f8919bdaSduboff mutex_init(&dp->xmitlock, NULL, MUTEX_DRIVER, (void *)c);
4793f8919bdaSduboff cv_init(&dp->tx_drain_cv, NULL, CV_DRIVER, NULL);
4794f8919bdaSduboff
4795f8919bdaSduboff /*
4796f8919bdaSduboff * configure gem parameter
4797f8919bdaSduboff */
4798f8919bdaSduboff dp->base_addr = base;
4799f8919bdaSduboff dp->regs_handle = *regs_handlep;
4800f8919bdaSduboff dp->gc = *gc;
4801f8919bdaSduboff gc = &dp->gc;
4802f8919bdaSduboff /* patch for simplify dma resource management */
4803f8919bdaSduboff gc->gc_tx_max_frags = 1;
4804f8919bdaSduboff gc->gc_tx_max_descs_per_pkt = 1;
4805f8919bdaSduboff gc->gc_tx_ring_size = gc->gc_tx_buf_size;
4806f8919bdaSduboff gc->gc_tx_ring_limit = gc->gc_tx_buf_limit;
4807f8919bdaSduboff gc->gc_tx_desc_write_oo = B_TRUE;
4808f8919bdaSduboff
4809f8919bdaSduboff gc->gc_nports = nports; /* fix nports */
4810f8919bdaSduboff
4811f8919bdaSduboff /* fix copy threadsholds */
4812f8919bdaSduboff gc->gc_tx_copy_thresh = max(ETHERMIN, gc->gc_tx_copy_thresh);
4813f8919bdaSduboff gc->gc_rx_copy_thresh = max(ETHERMIN, gc->gc_rx_copy_thresh);
4814f8919bdaSduboff
4815f8919bdaSduboff /* fix rx buffer boundary for iocache line size */
4816f8919bdaSduboff ASSERT(gc->gc_dma_attr_txbuf.dma_attr_align-1 == gc->gc_tx_buf_align);
4817f8919bdaSduboff ASSERT(gc->gc_dma_attr_rxbuf.dma_attr_align-1 == gc->gc_rx_buf_align);
4818f8919bdaSduboff gc->gc_rx_buf_align = max(gc->gc_rx_buf_align, IOC_LINESIZE - 1);
4819f8919bdaSduboff gc->gc_dma_attr_rxbuf.dma_attr_align = gc->gc_rx_buf_align + 1;
4820f8919bdaSduboff
482123d366e3Sduboff /* fix descriptor boundary for cache line size */
482223d366e3Sduboff gc->gc_dma_attr_desc.dma_attr_align =
482323d366e3Sduboff max(gc->gc_dma_attr_desc.dma_attr_align, IOC_LINESIZE);
482423d366e3Sduboff
4825f8919bdaSduboff /* patch get_packet method */
4826f8919bdaSduboff if (gc->gc_get_packet == NULL) {
4827f8919bdaSduboff gc->gc_get_packet = &gem_get_packet_default;
4828f8919bdaSduboff }
4829f8919bdaSduboff
4830f8919bdaSduboff /* patch get_rx_start method */
4831f8919bdaSduboff if (gc->gc_rx_start == NULL) {
4832f8919bdaSduboff gc->gc_rx_start = &gem_rx_start_default;
4833f8919bdaSduboff }
4834f8919bdaSduboff
4835f8919bdaSduboff /* calculate descriptor area */
4836f8919bdaSduboff if (gc->gc_rx_desc_unit_shift >= 0) {
4837f8919bdaSduboff dp->rx_desc_size =
4838f8919bdaSduboff ROUNDUP(gc->gc_rx_ring_size << gc->gc_rx_desc_unit_shift,
4839f8919bdaSduboff gc->gc_dma_attr_desc.dma_attr_align);
4840f8919bdaSduboff }
4841f8919bdaSduboff if (gc->gc_tx_desc_unit_shift >= 0) {
4842f8919bdaSduboff dp->tx_desc_size =
4843f8919bdaSduboff ROUNDUP(gc->gc_tx_ring_size << gc->gc_tx_desc_unit_shift,
4844f8919bdaSduboff gc->gc_dma_attr_desc.dma_attr_align);
4845f8919bdaSduboff }
4846f8919bdaSduboff
4847f8919bdaSduboff dp->mtu = ETHERMTU;
4848f8919bdaSduboff dp->tx_buf = (void *)&dp->mc_list[GEM_MAXMC];
4849f8919bdaSduboff /* link tx buffers */
4850f8919bdaSduboff for (i = 0; i < dp->gc.gc_tx_buf_size; i++) {
4851f8919bdaSduboff dp->tx_buf[i].txb_next =
4852f8919bdaSduboff &dp->tx_buf[SLOT(i + 1, dp->gc.gc_tx_buf_size)];
4853f8919bdaSduboff }
4854f8919bdaSduboff
4855f8919bdaSduboff dp->rxmode = 0;
4856f8919bdaSduboff dp->speed = GEM_SPD_10; /* default is 10Mbps */
4857f8919bdaSduboff dp->full_duplex = B_FALSE; /* default is half */
4858f8919bdaSduboff dp->flow_control = FLOW_CONTROL_NONE;
485923d366e3Sduboff dp->poll_pkt_delay = 8; /* typical coalease for rx packets */
4860f8919bdaSduboff
4861f8919bdaSduboff /* performance tuning parameters */
4862f8919bdaSduboff dp->txthr = ETHERMAX; /* tx fifo threshold */
4863f8919bdaSduboff dp->txmaxdma = 16*4; /* tx max dma burst size */
4864f8919bdaSduboff dp->rxthr = 128; /* rx fifo threshold */
4865f8919bdaSduboff dp->rxmaxdma = 16*4; /* rx max dma burst size */
4866f8919bdaSduboff
4867f8919bdaSduboff /*
4868f8919bdaSduboff * Get media mode information from .conf file
4869f8919bdaSduboff */
4870f8919bdaSduboff gem_read_conf(dp);
4871f8919bdaSduboff
4872f8919bdaSduboff /* rx_buf_len is required buffer length without padding for alignment */
4873f8919bdaSduboff dp->rx_buf_len = MAXPKTBUF(dp) + dp->gc.gc_rx_header_len;
4874f8919bdaSduboff
4875f8919bdaSduboff /*
4876f8919bdaSduboff * Reset the chip
4877f8919bdaSduboff */
4878f8919bdaSduboff mutex_enter(&dp->intrlock);
4879f8919bdaSduboff dp->nic_state = NIC_STATE_STOPPED;
4880f8919bdaSduboff ret = (*dp->gc.gc_reset_chip)(dp);
4881f8919bdaSduboff mutex_exit(&dp->intrlock);
4882f8919bdaSduboff if (ret != GEM_SUCCESS) {
4883f8919bdaSduboff goto err_free_regs;
4884f8919bdaSduboff }
4885f8919bdaSduboff
4886f8919bdaSduboff /*
4887f8919bdaSduboff * HW dependant paremeter initialization
4888f8919bdaSduboff */
4889f8919bdaSduboff mutex_enter(&dp->intrlock);
4890f8919bdaSduboff ret = (*dp->gc.gc_attach_chip)(dp);
4891f8919bdaSduboff mutex_exit(&dp->intrlock);
4892f8919bdaSduboff if (ret != GEM_SUCCESS) {
4893f8919bdaSduboff goto err_free_regs;
4894f8919bdaSduboff }
4895f8919bdaSduboff
4896f8919bdaSduboff #ifdef DEBUG_MULTIFRAGS
4897f8919bdaSduboff dp->gc.gc_tx_copy_thresh = dp->mtu;
4898f8919bdaSduboff #endif
4899f8919bdaSduboff /* allocate tx and rx resources */
4900f8919bdaSduboff if (gem_alloc_memory(dp)) {
4901f8919bdaSduboff goto err_free_regs;
4902f8919bdaSduboff }
4903f8919bdaSduboff
4904f8919bdaSduboff DPRINTF(0, (CE_CONT,
4905f8919bdaSduboff "!%s: at 0x%x, %02x:%02x:%02x:%02x:%02x:%02x",
4906f8919bdaSduboff dp->name, (long)dp->base_addr,
4907f8919bdaSduboff dp->dev_addr.ether_addr_octet[0],
4908f8919bdaSduboff dp->dev_addr.ether_addr_octet[1],
4909f8919bdaSduboff dp->dev_addr.ether_addr_octet[2],
4910f8919bdaSduboff dp->dev_addr.ether_addr_octet[3],
4911f8919bdaSduboff dp->dev_addr.ether_addr_octet[4],
4912f8919bdaSduboff dp->dev_addr.ether_addr_octet[5]));
4913f8919bdaSduboff
4914f8919bdaSduboff /* copy mac address */
4915f8919bdaSduboff dp->cur_addr = dp->dev_addr;
4916f8919bdaSduboff
4917f8919bdaSduboff gem_gld3_init(dp, macp);
4918f8919bdaSduboff
4919f8919bdaSduboff /* Probe MII phy (scan phy) */
4920f8919bdaSduboff dp->mii_lpable = 0;
4921f8919bdaSduboff dp->mii_advert = 0;
4922f8919bdaSduboff dp->mii_exp = 0;
4923f8919bdaSduboff dp->mii_ctl1000 = 0;
4924f8919bdaSduboff dp->mii_stat1000 = 0;
4925f8919bdaSduboff if ((*dp->gc.gc_mii_probe)(dp) != GEM_SUCCESS) {
4926f8919bdaSduboff goto err_free_ring;
4927f8919bdaSduboff }
4928f8919bdaSduboff
4929f8919bdaSduboff /* mask unsupported abilities */
493023d366e3Sduboff dp->anadv_autoneg &= BOOLEAN(dp->mii_status & MII_STATUS_CANAUTONEG);
4931f8919bdaSduboff dp->anadv_1000fdx &=
4932f8919bdaSduboff BOOLEAN(dp->mii_xstatus &
4933f8919bdaSduboff (MII_XSTATUS_1000BASEX_FD | MII_XSTATUS_1000BASET_FD));
4934f8919bdaSduboff dp->anadv_1000hdx &=
4935f8919bdaSduboff BOOLEAN(dp->mii_xstatus &
4936f8919bdaSduboff (MII_XSTATUS_1000BASEX | MII_XSTATUS_1000BASET));
4937f8919bdaSduboff dp->anadv_100t4 &= BOOLEAN(dp->mii_status & MII_STATUS_100_BASE_T4);
4938f8919bdaSduboff dp->anadv_100fdx &= BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX_FD);
4939f8919bdaSduboff dp->anadv_100hdx &= BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX);
4940f8919bdaSduboff dp->anadv_10fdx &= BOOLEAN(dp->mii_status & MII_STATUS_10_FD);
4941f8919bdaSduboff dp->anadv_10hdx &= BOOLEAN(dp->mii_status & MII_STATUS_10);
4942f8919bdaSduboff
4943f8919bdaSduboff gem_choose_forcedmode(dp);
4944f8919bdaSduboff
4945f8919bdaSduboff /* initialize MII phy if required */
4946f8919bdaSduboff if (dp->gc.gc_mii_init) {
4947f8919bdaSduboff if ((*dp->gc.gc_mii_init)(dp) != GEM_SUCCESS) {
4948f8919bdaSduboff goto err_free_ring;
4949f8919bdaSduboff }
4950f8919bdaSduboff }
4951f8919bdaSduboff
4952f8919bdaSduboff /*
4953f8919bdaSduboff * initialize kstats including mii statistics
4954f8919bdaSduboff */
4955f8919bdaSduboff gem_nd_setup(dp);
4956f8919bdaSduboff
4957f8919bdaSduboff /*
4958f8919bdaSduboff * Add interrupt to system.
4959f8919bdaSduboff */
4960f8919bdaSduboff if (ret = mac_register(macp, &dp->mh)) {
4961f8919bdaSduboff cmn_err(CE_WARN, "!%s: mac_register failed, error:%d",
4962f8919bdaSduboff dp->name, ret);
4963f8919bdaSduboff goto err_release_stats;
4964f8919bdaSduboff }
4965f8919bdaSduboff mac_free(macp);
4966f8919bdaSduboff macp = NULL;
4967f8919bdaSduboff
4968f8919bdaSduboff if (dp->misc_flag & GEM_SOFTINTR) {
4969f8919bdaSduboff if (ddi_add_softintr(dip,
4970f8919bdaSduboff DDI_SOFTINT_LOW, &dp->soft_id,
4971f8919bdaSduboff NULL, NULL,
4972f8919bdaSduboff (uint_t (*)(caddr_t))gem_intr,
4973f8919bdaSduboff (caddr_t)dp) != DDI_SUCCESS) {
4974f8919bdaSduboff cmn_err(CE_WARN, "!%s: ddi_add_softintr failed",
4975f8919bdaSduboff dp->name);
4976f8919bdaSduboff goto err_unregister;
4977f8919bdaSduboff }
4978f8919bdaSduboff } else if ((dp->misc_flag & GEM_NOINTR) == 0) {
4979f8919bdaSduboff if (ddi_add_intr(dip, 0, NULL, NULL,
4980f8919bdaSduboff (uint_t (*)(caddr_t))gem_intr,
4981f8919bdaSduboff (caddr_t)dp) != DDI_SUCCESS) {
4982f8919bdaSduboff cmn_err(CE_WARN, "!%s: ddi_add_intr failed", dp->name);
4983f8919bdaSduboff goto err_unregister;
4984f8919bdaSduboff }
4985f8919bdaSduboff } else {
4986f8919bdaSduboff /*
4987f8919bdaSduboff * Dont use interrupt.
4988f8919bdaSduboff * schedule first call of gem_intr_watcher
4989f8919bdaSduboff */
4990f8919bdaSduboff dp->intr_watcher_id =
4991f8919bdaSduboff timeout((void (*)(void *))gem_intr_watcher,
4992f8919bdaSduboff (void *)dp, drv_usectohz(3*1000000));
4993f8919bdaSduboff }
4994f8919bdaSduboff
4995f8919bdaSduboff /* link this device to dev_info */
4996f8919bdaSduboff dp->next = (struct gem_dev *)ddi_get_driver_private(dip);
499723d366e3Sduboff dp->port = port;
4998f8919bdaSduboff ddi_set_driver_private(dip, (caddr_t)dp);
4999f8919bdaSduboff
500023d366e3Sduboff /* reset mii phy and start mii link watcher */
5001f8919bdaSduboff gem_mii_start(dp);
5002f8919bdaSduboff
5003f8919bdaSduboff DPRINTF(2, (CE_CONT, "!gem_do_attach: return: success"));
5004f8919bdaSduboff return (dp);
5005f8919bdaSduboff
5006f8919bdaSduboff err_unregister:
5007f8919bdaSduboff (void) mac_unregister(dp->mh);
5008f8919bdaSduboff err_release_stats:
5009f8919bdaSduboff /* release NDD resources */
5010f8919bdaSduboff gem_nd_cleanup(dp);
5011f8919bdaSduboff
5012f8919bdaSduboff err_free_ring:
5013f8919bdaSduboff gem_free_memory(dp);
5014f8919bdaSduboff err_free_regs:
5015f8919bdaSduboff ddi_regs_map_free(&dp->regs_handle);
5016f8919bdaSduboff err_free_locks:
5017f8919bdaSduboff mutex_destroy(&dp->xmitlock);
5018f8919bdaSduboff mutex_destroy(&dp->intrlock);
5019f8919bdaSduboff cv_destroy(&dp->tx_drain_cv);
5020f8919bdaSduboff err_free_private:
5021f8919bdaSduboff if (macp) {
5022f8919bdaSduboff mac_free(macp);
5023f8919bdaSduboff }
5024f8919bdaSduboff kmem_free((caddr_t)dp, GEM_LOCAL_DATA_SIZE(gc));
5025f8919bdaSduboff
5026f8919bdaSduboff return (NULL);
5027f8919bdaSduboff }
5028f8919bdaSduboff
5029f8919bdaSduboff int
gem_do_detach(dev_info_t * dip)5030f8919bdaSduboff gem_do_detach(dev_info_t *dip)
5031f8919bdaSduboff {
5032f8919bdaSduboff struct gem_dev *dp;
5033f8919bdaSduboff struct gem_dev *tmp;
5034f8919bdaSduboff caddr_t private;
5035f8919bdaSduboff int priv_size;
5036f8919bdaSduboff ddi_acc_handle_t rh;
5037f8919bdaSduboff
5038f8919bdaSduboff dp = GEM_GET_DEV(dip);
5039f8919bdaSduboff if (dp == NULL) {
5040f8919bdaSduboff return (DDI_SUCCESS);
5041f8919bdaSduboff }
5042f8919bdaSduboff
5043f8919bdaSduboff rh = dp->regs_handle;
5044f8919bdaSduboff private = dp->private;
5045f8919bdaSduboff priv_size = dp->priv_size;
5046f8919bdaSduboff
5047f8919bdaSduboff while (dp) {
504823d366e3Sduboff /* unregister with gld v3 */
504923d366e3Sduboff if (mac_unregister(dp->mh) != 0) {
505023d366e3Sduboff return (DDI_FAILURE);
505123d366e3Sduboff }
505223d366e3Sduboff
5053f8919bdaSduboff /* ensure any rx buffers are not used */
5054f8919bdaSduboff if (dp->rx_buf_allocated != dp->rx_buf_freecnt) {
5055f8919bdaSduboff /* resource is busy */
5056f8919bdaSduboff cmn_err(CE_PANIC,
5057f8919bdaSduboff "!%s: %s: rxbuf is busy: allocated:%d, freecnt:%d",
5058f8919bdaSduboff dp->name, __func__,
5059f8919bdaSduboff dp->rx_buf_allocated, dp->rx_buf_freecnt);
5060f8919bdaSduboff /* NOT REACHED */
5061f8919bdaSduboff }
5062f8919bdaSduboff
5063f8919bdaSduboff /* stop mii link watcher */
5064f8919bdaSduboff gem_mii_stop(dp);
5065f8919bdaSduboff
5066f8919bdaSduboff /* unregister interrupt handler */
5067f8919bdaSduboff if (dp->misc_flag & GEM_SOFTINTR) {
5068f8919bdaSduboff ddi_remove_softintr(dp->soft_id);
5069f8919bdaSduboff } else if ((dp->misc_flag & GEM_NOINTR) == 0) {
5070f8919bdaSduboff ddi_remove_intr(dip, 0, dp->iblock_cookie);
5071f8919bdaSduboff } else {
5072f8919bdaSduboff /* stop interrupt watcher */
5073f8919bdaSduboff if (dp->intr_watcher_id) {
5074f8919bdaSduboff while (untimeout(dp->intr_watcher_id) == -1)
5075f8919bdaSduboff ;
5076f8919bdaSduboff dp->intr_watcher_id = 0;
5077f8919bdaSduboff }
5078f8919bdaSduboff }
5079f8919bdaSduboff
5080f8919bdaSduboff /* release NDD resources */
5081f8919bdaSduboff gem_nd_cleanup(dp);
5082f8919bdaSduboff /* release buffers, descriptors and dma resources */
5083f8919bdaSduboff gem_free_memory(dp);
5084f8919bdaSduboff
5085f8919bdaSduboff /* release locks and condition variables */
5086f8919bdaSduboff mutex_destroy(&dp->xmitlock);
5087f8919bdaSduboff mutex_destroy(&dp->intrlock);
5088f8919bdaSduboff cv_destroy(&dp->tx_drain_cv);
5089f8919bdaSduboff
5090f8919bdaSduboff /* release basic memory resources */
5091f8919bdaSduboff tmp = dp->next;
5092f8919bdaSduboff kmem_free((caddr_t)dp, GEM_LOCAL_DATA_SIZE(&dp->gc));
5093f8919bdaSduboff dp = tmp;
5094f8919bdaSduboff }
5095f8919bdaSduboff
5096f8919bdaSduboff /* release common private memory for the nic */
5097f8919bdaSduboff kmem_free(private, priv_size);
5098f8919bdaSduboff
5099f8919bdaSduboff /* release register mapping resources */
5100f8919bdaSduboff ddi_regs_map_free(&rh);
5101f8919bdaSduboff
5102f8919bdaSduboff DPRINTF(2, (CE_CONT, "!%s%d: gem_do_detach: return: success",
5103f8919bdaSduboff ddi_driver_name(dip), ddi_get_instance(dip)));
5104f8919bdaSduboff
5105f8919bdaSduboff return (DDI_SUCCESS);
5106f8919bdaSduboff }
5107f8919bdaSduboff
5108f8919bdaSduboff int
gem_suspend(dev_info_t * dip)5109f8919bdaSduboff gem_suspend(dev_info_t *dip)
5110f8919bdaSduboff {
5111f8919bdaSduboff struct gem_dev *dp;
5112f8919bdaSduboff
5113f8919bdaSduboff /*
5114f8919bdaSduboff * stop the device
5115f8919bdaSduboff */
5116f8919bdaSduboff dp = GEM_GET_DEV(dip);
5117f8919bdaSduboff ASSERT(dp);
5118f8919bdaSduboff
5119f8919bdaSduboff DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
5120f8919bdaSduboff
5121f8919bdaSduboff for (; dp; dp = dp->next) {
5122f8919bdaSduboff
5123f8919bdaSduboff /* stop mii link watcher */
5124f8919bdaSduboff gem_mii_stop(dp);
5125f8919bdaSduboff
5126f8919bdaSduboff /* stop interrupt watcher for no-intr mode */
5127f8919bdaSduboff if (dp->misc_flag & GEM_NOINTR) {
5128f8919bdaSduboff if (dp->intr_watcher_id) {
5129f8919bdaSduboff while (untimeout(dp->intr_watcher_id) == -1)
5130f8919bdaSduboff ;
5131f8919bdaSduboff }
5132f8919bdaSduboff dp->intr_watcher_id = 0;
5133f8919bdaSduboff }
5134f8919bdaSduboff
5135f8919bdaSduboff /* stop tx timeout watcher */
5136f8919bdaSduboff if (dp->timeout_id) {
5137f8919bdaSduboff while (untimeout(dp->timeout_id) == -1)
5138f8919bdaSduboff ;
5139f8919bdaSduboff dp->timeout_id = 0;
5140f8919bdaSduboff }
5141f8919bdaSduboff
5142f8919bdaSduboff /* make the nic state inactive */
5143f8919bdaSduboff mutex_enter(&dp->intrlock);
5144f8919bdaSduboff (void) gem_mac_stop(dp, 0);
5145f8919bdaSduboff ASSERT(!dp->mac_active);
5146f8919bdaSduboff
5147f8919bdaSduboff /* no further register access */
5148f8919bdaSduboff dp->mac_suspended = B_TRUE;
5149f8919bdaSduboff mutex_exit(&dp->intrlock);
5150f8919bdaSduboff }
5151f8919bdaSduboff
5152f8919bdaSduboff /* XXX - power down the nic */
5153f8919bdaSduboff
5154f8919bdaSduboff return (DDI_SUCCESS);
5155f8919bdaSduboff }
5156f8919bdaSduboff
5157f8919bdaSduboff int
gem_resume(dev_info_t * dip)5158f8919bdaSduboff gem_resume(dev_info_t *dip)
5159f8919bdaSduboff {
5160f8919bdaSduboff struct gem_dev *dp;
5161f8919bdaSduboff
5162f8919bdaSduboff /*
5163f8919bdaSduboff * restart the device
5164f8919bdaSduboff */
5165f8919bdaSduboff dp = GEM_GET_DEV(dip);
5166f8919bdaSduboff ASSERT(dp);
5167f8919bdaSduboff
5168f8919bdaSduboff DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
5169f8919bdaSduboff
5170f8919bdaSduboff for (; dp; dp = dp->next) {
5171f8919bdaSduboff
5172f8919bdaSduboff /*
5173f8919bdaSduboff * Bring up the nic after power up
5174f8919bdaSduboff */
5175f8919bdaSduboff
5176f8919bdaSduboff /* gem_xxx.c layer to setup power management state. */
5177f8919bdaSduboff ASSERT(!dp->mac_active);
5178f8919bdaSduboff
5179f8919bdaSduboff /* reset the chip, because we are just after power up. */
5180f8919bdaSduboff mutex_enter(&dp->intrlock);
5181f8919bdaSduboff
5182f8919bdaSduboff dp->mac_suspended = B_FALSE;
5183f8919bdaSduboff dp->nic_state = NIC_STATE_STOPPED;
5184f8919bdaSduboff
5185f8919bdaSduboff if ((*dp->gc.gc_reset_chip)(dp) != GEM_SUCCESS) {
5186f8919bdaSduboff cmn_err(CE_WARN, "%s: %s: failed to reset chip",
5187f8919bdaSduboff dp->name, __func__);
5188f8919bdaSduboff mutex_exit(&dp->intrlock);
5189f8919bdaSduboff goto err;
5190f8919bdaSduboff }
5191f8919bdaSduboff mutex_exit(&dp->intrlock);
5192f8919bdaSduboff
5193f8919bdaSduboff /* initialize mii phy because we are just after power up */
5194f8919bdaSduboff if (dp->gc.gc_mii_init) {
5195f8919bdaSduboff (void) (*dp->gc.gc_mii_init)(dp);
5196f8919bdaSduboff }
5197f8919bdaSduboff
5198f8919bdaSduboff if (dp->misc_flag & GEM_NOINTR) {
5199f8919bdaSduboff /*
5200f8919bdaSduboff * schedule first call of gem_intr_watcher
5201f8919bdaSduboff * instead of interrupts.
5202f8919bdaSduboff */
5203f8919bdaSduboff dp->intr_watcher_id =
5204f8919bdaSduboff timeout((void (*)(void *))gem_intr_watcher,
5205f8919bdaSduboff (void *)dp, drv_usectohz(3*1000000));
5206f8919bdaSduboff }
5207f8919bdaSduboff
5208f8919bdaSduboff /* restart mii link watcher */
5209f8919bdaSduboff gem_mii_start(dp);
5210f8919bdaSduboff
5211f8919bdaSduboff /* restart mac */
5212f8919bdaSduboff mutex_enter(&dp->intrlock);
5213f8919bdaSduboff
5214f8919bdaSduboff if (gem_mac_init(dp) != GEM_SUCCESS) {
5215f8919bdaSduboff mutex_exit(&dp->intrlock);
5216f8919bdaSduboff goto err_reset;
5217f8919bdaSduboff }
5218f8919bdaSduboff dp->nic_state = NIC_STATE_INITIALIZED;
5219f8919bdaSduboff
5220f8919bdaSduboff /* setup media mode if the link have been up */
5221f8919bdaSduboff if (dp->mii_state == MII_STATE_LINKUP) {
5222f8919bdaSduboff if ((dp->gc.gc_set_media)(dp) != GEM_SUCCESS) {
5223f8919bdaSduboff mutex_exit(&dp->intrlock);
5224f8919bdaSduboff goto err_reset;
5225f8919bdaSduboff }
5226f8919bdaSduboff }
5227f8919bdaSduboff
5228f8919bdaSduboff /* enable mac address and rx filter */
5229f8919bdaSduboff dp->rxmode |= RXMODE_ENABLE;
5230f8919bdaSduboff if ((*dp->gc.gc_set_rx_filter)(dp) != GEM_SUCCESS) {
5231f8919bdaSduboff mutex_exit(&dp->intrlock);
5232f8919bdaSduboff goto err_reset;
5233f8919bdaSduboff }
5234f8919bdaSduboff dp->nic_state = NIC_STATE_ONLINE;
5235f8919bdaSduboff
5236f8919bdaSduboff /* restart tx timeout watcher */
5237f8919bdaSduboff dp->timeout_id = timeout((void (*)(void *))gem_tx_timeout,
5238f8919bdaSduboff (void *)dp,
5239f8919bdaSduboff dp->gc.gc_tx_timeout_interval);
5240f8919bdaSduboff
5241f8919bdaSduboff /* now the nic is fully functional */
5242f8919bdaSduboff if (dp->mii_state == MII_STATE_LINKUP) {
5243f8919bdaSduboff if (gem_mac_start(dp) != GEM_SUCCESS) {
5244f8919bdaSduboff mutex_exit(&dp->intrlock);
5245f8919bdaSduboff goto err_reset;
5246f8919bdaSduboff }
5247f8919bdaSduboff }
5248f8919bdaSduboff mutex_exit(&dp->intrlock);
5249f8919bdaSduboff }
5250f8919bdaSduboff
5251f8919bdaSduboff return (DDI_SUCCESS);
5252f8919bdaSduboff
5253f8919bdaSduboff err_reset:
5254f8919bdaSduboff if (dp->intr_watcher_id) {
5255f8919bdaSduboff while (untimeout(dp->intr_watcher_id) == -1)
5256f8919bdaSduboff ;
5257f8919bdaSduboff dp->intr_watcher_id = 0;
5258f8919bdaSduboff }
5259f8919bdaSduboff mutex_enter(&dp->intrlock);
5260f8919bdaSduboff (*dp->gc.gc_reset_chip)(dp);
5261f8919bdaSduboff dp->nic_state = NIC_STATE_STOPPED;
5262f8919bdaSduboff mutex_exit(&dp->intrlock);
5263f8919bdaSduboff
5264f8919bdaSduboff err:
5265f8919bdaSduboff return (DDI_FAILURE);
5266f8919bdaSduboff }
5267f8919bdaSduboff
5268f8919bdaSduboff /*
5269f8919bdaSduboff * misc routines for PCI
5270f8919bdaSduboff */
5271f8919bdaSduboff uint8_t
gem_search_pci_cap(dev_info_t * dip,ddi_acc_handle_t conf_handle,uint8_t target)5272f8919bdaSduboff gem_search_pci_cap(dev_info_t *dip,
5273f8919bdaSduboff ddi_acc_handle_t conf_handle, uint8_t target)
5274f8919bdaSduboff {
5275f8919bdaSduboff uint8_t pci_cap_ptr;
5276f8919bdaSduboff uint32_t pci_cap;
5277f8919bdaSduboff
5278f8919bdaSduboff /* search power management capablities */
5279f8919bdaSduboff pci_cap_ptr = pci_config_get8(conf_handle, PCI_CONF_CAP_PTR);
5280f8919bdaSduboff while (pci_cap_ptr) {
5281f8919bdaSduboff /* read pci capability header */
5282f8919bdaSduboff pci_cap = pci_config_get32(conf_handle, pci_cap_ptr);
5283f8919bdaSduboff if ((pci_cap & 0xff) == target) {
5284f8919bdaSduboff /* found */
5285f8919bdaSduboff break;
5286f8919bdaSduboff }
5287f8919bdaSduboff /* get next_ptr */
5288f8919bdaSduboff pci_cap_ptr = (pci_cap >> 8) & 0xff;
5289f8919bdaSduboff }
5290f8919bdaSduboff return (pci_cap_ptr);
5291f8919bdaSduboff }
5292f8919bdaSduboff
5293f8919bdaSduboff int
gem_pci_set_power_state(dev_info_t * dip,ddi_acc_handle_t conf_handle,uint_t new_mode)5294f8919bdaSduboff gem_pci_set_power_state(dev_info_t *dip,
5295f8919bdaSduboff ddi_acc_handle_t conf_handle, uint_t new_mode)
5296f8919bdaSduboff {
5297f8919bdaSduboff uint8_t pci_cap_ptr;
5298f8919bdaSduboff uint32_t pmcsr;
5299f8919bdaSduboff uint_t unit;
5300f8919bdaSduboff const char *drv_name;
5301f8919bdaSduboff
5302f8919bdaSduboff ASSERT(new_mode < 4);
5303f8919bdaSduboff
5304f8919bdaSduboff unit = ddi_get_instance(dip);
5305f8919bdaSduboff drv_name = ddi_driver_name(dip);
5306f8919bdaSduboff
5307f8919bdaSduboff /* search power management capablities */
5308f8919bdaSduboff pci_cap_ptr = gem_search_pci_cap(dip, conf_handle, PCI_CAP_ID_PM);
5309f8919bdaSduboff
5310f8919bdaSduboff if (pci_cap_ptr == 0) {
5311f8919bdaSduboff cmn_err(CE_CONT,
5312f8919bdaSduboff "!%s%d: doesn't have pci power management capability",
5313f8919bdaSduboff drv_name, unit);
5314f8919bdaSduboff return (DDI_FAILURE);
5315f8919bdaSduboff }
5316f8919bdaSduboff
5317f8919bdaSduboff /* read power management capabilities */
5318f8919bdaSduboff pmcsr = pci_config_get32(conf_handle, pci_cap_ptr + PCI_PMCSR);
5319f8919bdaSduboff
5320f8919bdaSduboff DPRINTF(0, (CE_CONT,
5321f8919bdaSduboff "!%s%d: pmc found at 0x%x: pmcsr: 0x%08x",
5322f8919bdaSduboff drv_name, unit, pci_cap_ptr, pmcsr));
5323f8919bdaSduboff
5324f8919bdaSduboff /*
5325f8919bdaSduboff * Is the resuested power mode supported?
5326f8919bdaSduboff */
5327f8919bdaSduboff /* not yet */
5328f8919bdaSduboff
5329f8919bdaSduboff /*
5330f8919bdaSduboff * move to new mode
5331f8919bdaSduboff */
5332f8919bdaSduboff pmcsr = (pmcsr & ~PCI_PMCSR_STATE_MASK) | new_mode;
5333f8919bdaSduboff pci_config_put32(conf_handle, pci_cap_ptr + PCI_PMCSR, pmcsr);
5334f8919bdaSduboff
5335f8919bdaSduboff return (DDI_SUCCESS);
5336f8919bdaSduboff }
5337f8919bdaSduboff
5338f8919bdaSduboff /*
5339f8919bdaSduboff * select suitable register for by specified address space or register
5340f8919bdaSduboff * offset in PCI config space
5341f8919bdaSduboff */
5342f8919bdaSduboff int
gem_pci_regs_map_setup(dev_info_t * dip,uint32_t which,uint32_t mask,struct ddi_device_acc_attr * attrp,caddr_t * basep,ddi_acc_handle_t * hp)5343f8919bdaSduboff gem_pci_regs_map_setup(dev_info_t *dip, uint32_t which, uint32_t mask,
5344f8919bdaSduboff struct ddi_device_acc_attr *attrp,
5345f8919bdaSduboff caddr_t *basep, ddi_acc_handle_t *hp)
5346f8919bdaSduboff {
5347f8919bdaSduboff struct pci_phys_spec *regs;
5348f8919bdaSduboff uint_t len;
5349f8919bdaSduboff uint_t unit;
5350f8919bdaSduboff uint_t n;
5351f8919bdaSduboff uint_t i;
5352f8919bdaSduboff int ret;
5353f8919bdaSduboff const char *drv_name;
5354f8919bdaSduboff
5355f8919bdaSduboff unit = ddi_get_instance(dip);
5356f8919bdaSduboff drv_name = ddi_driver_name(dip);
5357f8919bdaSduboff
5358f8919bdaSduboff /* Search IO-range or memory-range to be mapped */
5359f8919bdaSduboff regs = NULL;
5360f8919bdaSduboff len = 0;
5361f8919bdaSduboff
5362f8919bdaSduboff if ((ret = ddi_prop_lookup_int_array(
5363f8919bdaSduboff DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
5364f8919bdaSduboff "reg", (void *)®s, &len)) != DDI_PROP_SUCCESS) {
5365f8919bdaSduboff cmn_err(CE_WARN,
5366f8919bdaSduboff "!%s%d: failed to get reg property (ret:%d)",
5367f8919bdaSduboff drv_name, unit, ret);
5368f8919bdaSduboff return (DDI_FAILURE);
5369f8919bdaSduboff }
5370f8919bdaSduboff n = len / (sizeof (struct pci_phys_spec) / sizeof (int));
5371f8919bdaSduboff
5372f8919bdaSduboff ASSERT(regs != NULL && len > 0);
5373f8919bdaSduboff
5374f8919bdaSduboff #if GEM_DEBUG_LEVEL > 0
5375f8919bdaSduboff for (i = 0; i < n; i++) {
5376f8919bdaSduboff cmn_err(CE_CONT,
5377f8919bdaSduboff "!%s%d: regs[%d]: %08x.%08x.%08x.%08x.%08x",
5378f8919bdaSduboff drv_name, unit, i,
5379f8919bdaSduboff regs[i].pci_phys_hi,
5380f8919bdaSduboff regs[i].pci_phys_mid,
5381f8919bdaSduboff regs[i].pci_phys_low,
5382f8919bdaSduboff regs[i].pci_size_hi,
5383f8919bdaSduboff regs[i].pci_size_low);
5384f8919bdaSduboff }
5385f8919bdaSduboff #endif
5386f8919bdaSduboff for (i = 0; i < n; i++) {
5387f8919bdaSduboff if ((regs[i].pci_phys_hi & mask) == which) {
5388f8919bdaSduboff /* it's the requested space */
5389f8919bdaSduboff ddi_prop_free(regs);
5390f8919bdaSduboff goto address_range_found;
5391f8919bdaSduboff }
5392f8919bdaSduboff }
5393f8919bdaSduboff ddi_prop_free(regs);
5394f8919bdaSduboff return (DDI_FAILURE);
5395f8919bdaSduboff
5396f8919bdaSduboff address_range_found:
5397f8919bdaSduboff if ((ret = ddi_regs_map_setup(dip, i, basep, 0, 0, attrp, hp))
5398f8919bdaSduboff != DDI_SUCCESS) {
5399f8919bdaSduboff cmn_err(CE_CONT,
5400f8919bdaSduboff "!%s%d: ddi_regs_map_setup failed (ret:%d)",
5401f8919bdaSduboff drv_name, unit, ret);
5402f8919bdaSduboff }
5403f8919bdaSduboff
5404f8919bdaSduboff return (ret);
5405f8919bdaSduboff }
5406f8919bdaSduboff
5407f8919bdaSduboff void
gem_mod_init(struct dev_ops * dop,char * name)5408f8919bdaSduboff gem_mod_init(struct dev_ops *dop, char *name)
5409f8919bdaSduboff {
5410f8919bdaSduboff mac_init_ops(dop, name);
5411f8919bdaSduboff }
5412f8919bdaSduboff
5413f8919bdaSduboff void
gem_mod_fini(struct dev_ops * dop)5414f8919bdaSduboff gem_mod_fini(struct dev_ops *dop)
5415f8919bdaSduboff {
5416f8919bdaSduboff mac_fini_ops(dop);
5417f8919bdaSduboff }
5418