xref: /illumos-gate/usr/src/uts/common/io/sfe/sfe_util.c (revision fcdb3229a31dd4ff700c69238814e326aad49098)
1 /*
2  * sfe_util.c: general ethernet mac driver framework version 2.6
3  *
4  * Copyright (c) 2002-2008 Masayuki Murayama.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * 2. Redistributions in binary form must reproduce the above copyright notice,
13  *    this list of conditions and the following disclaimer in the documentation
14  *    and/or other materials provided with the distribution.
15  *
16  * 3. Neither the name of the author nor the names of its contributors may be
17  *    used to endorse or promote products derived from this software without
18  *    specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
27  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
31  * DAMAGE.
32  */
33 
34 /*
35  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
36  * Use is subject to license terms.
37  */
38 
39 /*
40  * System Header files.
41  */
42 #include <sys/types.h>
43 #include <sys/conf.h>
44 #include <sys/debug.h>
45 #include <sys/kmem.h>
46 #include <sys/vtrace.h>
47 #include <sys/ethernet.h>
48 #include <sys/modctl.h>
49 #include <sys/errno.h>
50 #include <sys/ddi.h>
51 #include <sys/sunddi.h>
52 #include <sys/stream.h>		/* required for MBLK* */
53 #include <sys/strsun.h>		/* required for mionack() */
54 #include <sys/byteorder.h>
55 #include <sys/sysmacros.h>
56 #include <sys/pci.h>
57 #include <inet/common.h>
58 #include <inet/led.h>
59 #include <inet/mi.h>
60 #include <inet/nd.h>
61 #include <sys/crc32.h>
62 
63 #include <sys/note.h>
64 
65 #include "sfe_mii.h"
66 #include "sfe_util.h"
67 
68 
69 
70 extern char ident[];
71 
72 /* Debugging support */
73 #ifdef GEM_DEBUG_LEVEL
74 static int gem_debug = GEM_DEBUG_LEVEL;
75 #define	DPRINTF(n, args)	if (gem_debug > (n)) cmn_err args
76 #else
77 #define	DPRINTF(n, args)
78 #undef ASSERT
79 #define	ASSERT(x)
80 #endif
81 
82 #define	IOC_LINESIZE	0x40	/* Is it right for amd64? */
83 
84 /*
85  * Useful macros and typedefs
86  */
87 #define	ROUNDUP(x, a)	(((x) + (a) - 1) & ~((a) - 1))
88 
89 #define	GET_NET16(p)	((((uint8_t *)(p))[0] << 8)| ((uint8_t *)(p))[1])
90 #define	GET_ETHERTYPE(p)	GET_NET16(((uint8_t *)(p)) + ETHERADDRL*2)
91 
92 #define	GET_IPTYPEv4(p)	(((uint8_t *)(p))[sizeof (struct ether_header) + 9])
93 #define	GET_IPTYPEv6(p)	(((uint8_t *)(p))[sizeof (struct ether_header) + 6])
94 
95 
96 #ifndef INT32_MAX
97 #define	INT32_MAX	0x7fffffff
98 #endif
99 
100 #define	VTAG_OFF	(ETHERADDRL*2)
101 #ifndef VTAG_SIZE
102 #define	VTAG_SIZE	4
103 #endif
104 #ifndef VTAG_TPID
105 #define	VTAG_TPID	0x8100U
106 #endif
107 
108 #define	GET_TXBUF(dp, sn)	\
109 	&(dp)->tx_buf[SLOT((dp)->tx_slots_base + (sn), (dp)->gc.gc_tx_buf_size)]
110 
111 #define	TXFLAG_VTAG(flag)	\
112 	(((flag) & GEM_TXFLAG_VTAG) >> GEM_TXFLAG_VTAG_SHIFT)
113 
114 #define	MAXPKTBUF(dp)	\
115 	((dp)->mtu + sizeof (struct ether_header) + VTAG_SIZE + ETHERFCSL)
116 
117 #define	WATCH_INTERVAL_FAST	drv_usectohz(100*1000)	/* 100mS */
118 #define	BOOLEAN(x)	((x) != 0)
119 
120 /*
121  * Macros to distinct chip generation.
122  */
123 
124 /*
125  * Private functions
126  */
127 static void gem_mii_start(struct gem_dev *);
128 static void gem_mii_stop(struct gem_dev *);
129 
130 /* local buffer management */
131 static void gem_nd_setup(struct gem_dev *dp);
132 static void gem_nd_cleanup(struct gem_dev *dp);
133 static int gem_alloc_memory(struct gem_dev *);
134 static void gem_free_memory(struct gem_dev *);
135 static void gem_init_rx_ring(struct gem_dev *);
136 static void gem_init_tx_ring(struct gem_dev *);
137 __INLINE__ static void gem_append_rxbuf(struct gem_dev *, struct rxbuf *);
138 
139 static void gem_tx_timeout(struct gem_dev *);
140 static void gem_mii_link_watcher(struct gem_dev *dp);
141 static int gem_mac_init(struct gem_dev *dp);
142 static int gem_mac_start(struct gem_dev *dp);
143 static int gem_mac_stop(struct gem_dev *dp, uint_t flags);
144 static void gem_mac_ioctl(struct gem_dev *dp, queue_t *wq, mblk_t *mp);
145 
146 static	struct ether_addr	gem_etherbroadcastaddr = {
147 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff
148 };
149 
150 int gem_speed_value[] = {10, 100, 1000};
151 
152 /* ============================================================== */
153 /*
154  * Misc runtime routines
155  */
156 /* ============================================================== */
157 /*
158  * Ether CRC calculation according to 21143 data sheet
159  */
160 uint32_t
gem_ether_crc_le(const uint8_t * addr,int len)161 gem_ether_crc_le(const uint8_t *addr, int len)
162 {
163 	uint32_t	crc;
164 
165 	CRC32(crc, addr, ETHERADDRL, 0xffffffffU, crc32_table);
166 	return (crc);
167 }
168 
169 uint32_t
gem_ether_crc_be(const uint8_t * addr,int len)170 gem_ether_crc_be(const uint8_t *addr, int len)
171 {
172 	int		idx;
173 	int		bit;
174 	uint_t		data;
175 	uint32_t	crc;
176 #define	CRC32_POLY_BE	0x04c11db7
177 
178 	crc = 0xffffffff;
179 	for (idx = 0; idx < len; idx++) {
180 		for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) {
181 			crc = (crc << 1)
182 			    ^ ((((crc >> 31) ^ data) & 1) ? CRC32_POLY_BE : 0);
183 		}
184 	}
185 	return (crc);
186 #undef	CRC32_POLY_BE
187 }
188 
189 int
gem_prop_get_int(struct gem_dev * dp,char * prop_template,int def_val)190 gem_prop_get_int(struct gem_dev *dp, char *prop_template, int def_val)
191 {
192 	char	propname[32];
193 
194 	(void) sprintf(propname, prop_template, dp->name);
195 
196 	return (ddi_prop_get_int(DDI_DEV_T_ANY, dp->dip,
197 	    DDI_PROP_DONTPASS, propname, def_val));
198 }
199 
200 static int
gem_population(uint32_t x)201 gem_population(uint32_t x)
202 {
203 	int	i;
204 	int	cnt;
205 
206 	cnt = 0;
207 	for (i = 0; i < 32; i++) {
208 		if (x & (1 << i)) {
209 			cnt++;
210 		}
211 	}
212 	return (cnt);
213 }
214 
215 #ifdef GEM_DEBUG_LEVEL
216 #ifdef GEM_DEBUG_VLAN
217 static void
gem_dump_packet(struct gem_dev * dp,char * title,mblk_t * mp,boolean_t check_cksum)218 gem_dump_packet(struct gem_dev *dp, char *title, mblk_t *mp,
219     boolean_t check_cksum)
220 {
221 	char	msg[180];
222 	uint8_t	buf[18+20+20];
223 	uint8_t	*p;
224 	size_t	offset;
225 	uint_t	ethertype;
226 	uint_t	proto;
227 	uint_t	ipproto = 0;
228 	uint_t	iplen;
229 	uint_t	iphlen;
230 	uint_t	tcplen;
231 	uint_t	udplen;
232 	uint_t	cksum;
233 	int	rest;
234 	int	len;
235 	char	*bp;
236 	mblk_t	*tp;
237 	extern uint_t	ip_cksum(mblk_t *, int, uint32_t);
238 
239 	msg[0] = 0;
240 	bp = msg;
241 
242 	rest = sizeof (buf);
243 	offset = 0;
244 	for (tp = mp; tp; tp = tp->b_cont) {
245 		len = tp->b_wptr - tp->b_rptr;
246 		len = min(rest, len);
247 		bcopy(tp->b_rptr, &buf[offset], len);
248 		rest -= len;
249 		offset += len;
250 		if (rest == 0) {
251 			break;
252 		}
253 	}
254 
255 	offset = 0;
256 	p = &buf[offset];
257 
258 	/* ethernet address */
259 	sprintf(bp,
260 	    "ether: %02x:%02x:%02x:%02x:%02x:%02x"
261 	    " -> %02x:%02x:%02x:%02x:%02x:%02x",
262 	    p[6], p[7], p[8], p[9], p[10], p[11],
263 	    p[0], p[1], p[2], p[3], p[4], p[5]);
264 	bp = &msg[strlen(msg)];
265 
266 	/* vlag tag and etherrtype */
267 	ethertype = GET_ETHERTYPE(p);
268 	if (ethertype == VTAG_TPID) {
269 		sprintf(bp, " vtag:0x%04x", GET_NET16(&p[14]));
270 		bp = &msg[strlen(msg)];
271 
272 		offset += VTAG_SIZE;
273 		p = &buf[offset];
274 		ethertype = GET_ETHERTYPE(p);
275 	}
276 	sprintf(bp, " type:%04x", ethertype);
277 	bp = &msg[strlen(msg)];
278 
279 	/* ethernet packet length */
280 	sprintf(bp, " mblklen:%d", msgdsize(mp));
281 	bp = &msg[strlen(msg)];
282 	if (mp->b_cont) {
283 		sprintf(bp, "(");
284 		bp = &msg[strlen(msg)];
285 		for (tp = mp; tp; tp = tp->b_cont) {
286 			if (tp == mp) {
287 				sprintf(bp, "%d", tp->b_wptr - tp->b_rptr);
288 			} else {
289 				sprintf(bp, "+%d", tp->b_wptr - tp->b_rptr);
290 			}
291 			bp = &msg[strlen(msg)];
292 		}
293 		sprintf(bp, ")");
294 		bp = &msg[strlen(msg)];
295 	}
296 
297 	if (ethertype != ETHERTYPE_IP) {
298 		goto x;
299 	}
300 
301 	/* ip address */
302 	offset += sizeof (struct ether_header);
303 	p = &buf[offset];
304 	ipproto = p[9];
305 	iplen = GET_NET16(&p[2]);
306 	sprintf(bp, ", ip: %d.%d.%d.%d -> %d.%d.%d.%d proto:%d iplen:%d",
307 	    p[12], p[13], p[14], p[15],
308 	    p[16], p[17], p[18], p[19],
309 	    ipproto, iplen);
310 	bp = (void *)&msg[strlen(msg)];
311 
312 	iphlen = (p[0] & 0xf) * 4;
313 
314 	/* cksum for psuedo header */
315 	cksum = *(uint16_t *)&p[12];
316 	cksum += *(uint16_t *)&p[14];
317 	cksum += *(uint16_t *)&p[16];
318 	cksum += *(uint16_t *)&p[18];
319 	cksum += BE_16(ipproto);
320 
321 	/* tcp or udp protocol header */
322 	offset += iphlen;
323 	p = &buf[offset];
324 	if (ipproto == IPPROTO_TCP) {
325 		tcplen = iplen - iphlen;
326 		sprintf(bp, ", tcp: len:%d cksum:%x",
327 		    tcplen, GET_NET16(&p[16]));
328 		bp = (void *)&msg[strlen(msg)];
329 
330 		if (check_cksum) {
331 			cksum += BE_16(tcplen);
332 			cksum = (uint16_t)ip_cksum(mp, offset, cksum);
333 			sprintf(bp, " (%s)",
334 			    (cksum == 0 || cksum == 0xffff) ? "ok" : "ng");
335 			bp = (void *)&msg[strlen(msg)];
336 		}
337 	} else if (ipproto == IPPROTO_UDP) {
338 		udplen = GET_NET16(&p[4]);
339 		sprintf(bp, ", udp: len:%d cksum:%x",
340 		    udplen, GET_NET16(&p[6]));
341 		bp = (void *)&msg[strlen(msg)];
342 
343 		if (GET_NET16(&p[6]) && check_cksum) {
344 			cksum += *(uint16_t *)&p[4];
345 			cksum = (uint16_t)ip_cksum(mp, offset, cksum);
346 			sprintf(bp, " (%s)",
347 			    (cksum == 0 || cksum == 0xffff) ? "ok" : "ng");
348 			bp = (void *)&msg[strlen(msg)];
349 		}
350 	}
351 x:
352 	cmn_err(CE_CONT, "!%s: %s: %s", dp->name, title, msg);
353 }
354 #endif /* GEM_DEBUG_VLAN */
355 #endif /* GEM_DEBUG_LEVEL */
356 
357 /* ============================================================== */
358 /*
359  * IO cache flush
360  */
361 /* ============================================================== */
362 __INLINE__ void
gem_rx_desc_dma_sync(struct gem_dev * dp,int head,int nslot,int how)363 gem_rx_desc_dma_sync(struct gem_dev *dp, int head, int nslot, int how)
364 {
365 	int	n;
366 	int	m;
367 	int	rx_desc_unit_shift = dp->gc.gc_rx_desc_unit_shift;
368 
369 	/* sync active descriptors */
370 	if (rx_desc_unit_shift < 0 || nslot == 0) {
371 		/* no rx descriptor ring */
372 		return;
373 	}
374 
375 	n = dp->gc.gc_rx_ring_size - head;
376 	if ((m = nslot - n) > 0) {
377 		(void) ddi_dma_sync(dp->desc_dma_handle,
378 		    (off_t)0,
379 		    (size_t)(m << rx_desc_unit_shift),
380 		    how);
381 		nslot = n;
382 	}
383 
384 	(void) ddi_dma_sync(dp->desc_dma_handle,
385 	    (off_t)(head << rx_desc_unit_shift),
386 	    (size_t)(nslot << rx_desc_unit_shift),
387 	    how);
388 }
389 
390 __INLINE__ void
gem_tx_desc_dma_sync(struct gem_dev * dp,int head,int nslot,int how)391 gem_tx_desc_dma_sync(struct gem_dev *dp, int head, int nslot, int how)
392 {
393 	int	n;
394 	int	m;
395 	int	tx_desc_unit_shift = dp->gc.gc_tx_desc_unit_shift;
396 
397 	/* sync active descriptors */
398 	if (tx_desc_unit_shift < 0 || nslot == 0) {
399 		/* no tx descriptor ring */
400 		return;
401 	}
402 
403 	n = dp->gc.gc_tx_ring_size - head;
404 	if ((m = nslot - n) > 0) {
405 		(void) ddi_dma_sync(dp->desc_dma_handle,
406 		    (off_t)(dp->tx_ring_dma - dp->rx_ring_dma),
407 		    (size_t)(m << tx_desc_unit_shift),
408 		    how);
409 		nslot = n;
410 	}
411 
412 	(void) ddi_dma_sync(dp->desc_dma_handle,
413 	    (off_t)((head << tx_desc_unit_shift)
414 	    + (dp->tx_ring_dma - dp->rx_ring_dma)),
415 	    (size_t)(nslot << tx_desc_unit_shift),
416 	    how);
417 }
418 
419 static void
gem_rx_start_default(struct gem_dev * dp,int head,int nslot)420 gem_rx_start_default(struct gem_dev *dp, int head, int nslot)
421 {
422 	gem_rx_desc_dma_sync(dp,
423 	    SLOT(head, dp->gc.gc_rx_ring_size), nslot,
424 	    DDI_DMA_SYNC_FORDEV);
425 }
426 
427 /* ============================================================== */
428 /*
429  * Buffer management
430  */
431 /* ============================================================== */
432 static void
gem_dump_txbuf(struct gem_dev * dp,int level,const char * title)433 gem_dump_txbuf(struct gem_dev *dp, int level, const char *title)
434 {
435 	cmn_err(level,
436 	    "!%s: %s: tx_active: %d[%d] %d[%d] (+%d), "
437 	    "tx_softq: %d[%d] %d[%d] (+%d), "
438 	    "tx_free: %d[%d] %d[%d] (+%d), "
439 	    "tx_desc: %d[%d] %d[%d] (+%d), "
440 	    "intr: %d[%d] (+%d), ",
441 	    dp->name, title,
442 	    dp->tx_active_head,
443 	    SLOT(dp->tx_active_head, dp->gc.gc_tx_buf_size),
444 	    dp->tx_active_tail,
445 	    SLOT(dp->tx_active_tail, dp->gc.gc_tx_buf_size),
446 	    dp->tx_active_tail - dp->tx_active_head,
447 	    dp->tx_softq_head,
448 	    SLOT(dp->tx_softq_head, dp->gc.gc_tx_buf_size),
449 	    dp->tx_softq_tail,
450 	    SLOT(dp->tx_softq_tail, dp->gc.gc_tx_buf_size),
451 	    dp->tx_softq_tail - dp->tx_softq_head,
452 	    dp->tx_free_head,
453 	    SLOT(dp->tx_free_head, dp->gc.gc_tx_buf_size),
454 	    dp->tx_free_tail,
455 	    SLOT(dp->tx_free_tail, dp->gc.gc_tx_buf_size),
456 	    dp->tx_free_tail - dp->tx_free_head,
457 	    dp->tx_desc_head,
458 	    SLOT(dp->tx_desc_head, dp->gc.gc_tx_ring_size),
459 	    dp->tx_desc_tail,
460 	    SLOT(dp->tx_desc_tail, dp->gc.gc_tx_ring_size),
461 	    dp->tx_desc_tail - dp->tx_desc_head,
462 	    dp->tx_desc_intr,
463 	    SLOT(dp->tx_desc_intr, dp->gc.gc_tx_ring_size),
464 	    dp->tx_desc_intr - dp->tx_desc_head);
465 }
466 
467 static void
gem_free_rxbuf(struct rxbuf * rbp)468 gem_free_rxbuf(struct rxbuf *rbp)
469 {
470 	struct gem_dev	*dp;
471 
472 	dp = rbp->rxb_devp;
473 	ASSERT(mutex_owned(&dp->intrlock));
474 	rbp->rxb_next = dp->rx_buf_freelist;
475 	dp->rx_buf_freelist = rbp;
476 	dp->rx_buf_freecnt++;
477 }
478 
479 /*
480  * gem_get_rxbuf: supply a receive buffer which have been mapped into
481  * DMA space.
482  */
483 struct rxbuf *
gem_get_rxbuf(struct gem_dev * dp,int cansleep)484 gem_get_rxbuf(struct gem_dev *dp, int cansleep)
485 {
486 	struct rxbuf		*rbp;
487 	uint_t			count = 0;
488 	int			i;
489 	int			err;
490 
491 	ASSERT(mutex_owned(&dp->intrlock));
492 
493 	DPRINTF(3, (CE_CONT, "!gem_get_rxbuf: called freecnt:%d",
494 	    dp->rx_buf_freecnt));
495 	/*
496 	 * Get rx buffer management structure
497 	 */
498 	rbp = dp->rx_buf_freelist;
499 	if (rbp) {
500 		/* get one from the recycle list */
501 		ASSERT(dp->rx_buf_freecnt > 0);
502 
503 		dp->rx_buf_freelist = rbp->rxb_next;
504 		dp->rx_buf_freecnt--;
505 		rbp->rxb_next = NULL;
506 		return (rbp);
507 	}
508 
509 	/*
510 	 * Allocate a rx buffer management structure
511 	 */
512 	rbp = kmem_zalloc(sizeof (*rbp), cansleep ? KM_SLEEP : KM_NOSLEEP);
513 	if (rbp == NULL) {
514 		/* no memory */
515 		return (NULL);
516 	}
517 
518 	/*
519 	 * Prepare a back pointer to the device structure which will be
520 	 * refered on freeing the buffer later.
521 	 */
522 	rbp->rxb_devp = dp;
523 
524 	/* allocate a dma handle for rx data buffer */
525 	if ((err = ddi_dma_alloc_handle(dp->dip,
526 	    &dp->gc.gc_dma_attr_rxbuf,
527 	    (cansleep ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT),
528 	    NULL, &rbp->rxb_dh)) != DDI_SUCCESS) {
529 
530 		cmn_err(CE_WARN,
531 		    "!%s: %s: ddi_dma_alloc_handle:1 failed, err=%d",
532 		    dp->name, __func__, err);
533 
534 		kmem_free(rbp, sizeof (struct rxbuf));
535 		return (NULL);
536 	}
537 
538 	/* allocate a bounce buffer for rx */
539 	if ((err = ddi_dma_mem_alloc(rbp->rxb_dh,
540 	    ROUNDUP(dp->rx_buf_len, IOC_LINESIZE),
541 	    &dp->gc.gc_buf_attr,
542 		/*
543 		 * if the nic requires a header at the top of receive buffers,
544 		 * it may access the rx buffer randomly.
545 		 */
546 	    (dp->gc.gc_rx_header_len > 0)
547 	    ? DDI_DMA_CONSISTENT : DDI_DMA_STREAMING,
548 	    cansleep ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT,
549 	    NULL,
550 	    &rbp->rxb_buf, &rbp->rxb_buf_len,
551 	    &rbp->rxb_bah)) != DDI_SUCCESS) {
552 
553 		cmn_err(CE_WARN,
554 		    "!%s: %s: ddi_dma_mem_alloc: failed, err=%d",
555 		    dp->name, __func__, err);
556 
557 		ddi_dma_free_handle(&rbp->rxb_dh);
558 		kmem_free(rbp, sizeof (struct rxbuf));
559 		return (NULL);
560 	}
561 
562 	/* Mapin the bounce buffer into the DMA space */
563 	if ((err = ddi_dma_addr_bind_handle(rbp->rxb_dh,
564 	    NULL, rbp->rxb_buf, dp->rx_buf_len,
565 	    ((dp->gc.gc_rx_header_len > 0)
566 	    ?(DDI_DMA_RDWR | DDI_DMA_CONSISTENT)
567 	    :(DDI_DMA_READ | DDI_DMA_STREAMING)),
568 	    cansleep ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT,
569 	    NULL,
570 	    rbp->rxb_dmacookie,
571 	    &count)) != DDI_DMA_MAPPED) {
572 
573 		ASSERT(err != DDI_DMA_INUSE);
574 		DPRINTF(0, (CE_WARN,
575 		    "!%s: ddi_dma_addr_bind_handle: failed, err=%d",
576 		    dp->name, __func__, err));
577 
578 		/*
579 		 * we failed to allocate a dma resource
580 		 * for the rx bounce buffer.
581 		 */
582 		ddi_dma_mem_free(&rbp->rxb_bah);
583 		ddi_dma_free_handle(&rbp->rxb_dh);
584 		kmem_free(rbp, sizeof (struct rxbuf));
585 		return (NULL);
586 	}
587 
588 	/* correct the rest of the DMA mapping */
589 	for (i = 1; i < count; i++) {
590 		ddi_dma_nextcookie(rbp->rxb_dh, &rbp->rxb_dmacookie[i]);
591 	}
592 	rbp->rxb_nfrags = count;
593 
594 	/* Now we successfully prepared an rx buffer */
595 	dp->rx_buf_allocated++;
596 
597 	return (rbp);
598 }
599 
600 /* ============================================================== */
601 /*
602  * memory resource management
603  */
604 /* ============================================================== */
605 static int
gem_alloc_memory(struct gem_dev * dp)606 gem_alloc_memory(struct gem_dev *dp)
607 {
608 	caddr_t			ring;
609 	caddr_t			buf;
610 	size_t			req_size;
611 	size_t			ring_len;
612 	size_t			buf_len;
613 	ddi_dma_cookie_t	ring_cookie;
614 	ddi_dma_cookie_t	buf_cookie;
615 	uint_t			count;
616 	int			i;
617 	int			err;
618 	struct txbuf		*tbp;
619 	int			tx_buf_len;
620 	ddi_dma_attr_t		dma_attr_txbounce;
621 
622 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
623 
624 	dp->desc_dma_handle = NULL;
625 	req_size = dp->rx_desc_size + dp->tx_desc_size + dp->gc.gc_io_area_size;
626 
627 	if (req_size > 0) {
628 		/*
629 		 * Alloc RX/TX descriptors and a io area.
630 		 */
631 		if ((err = ddi_dma_alloc_handle(dp->dip,
632 		    &dp->gc.gc_dma_attr_desc,
633 		    DDI_DMA_SLEEP, NULL,
634 		    &dp->desc_dma_handle)) != DDI_SUCCESS) {
635 			cmn_err(CE_WARN,
636 			    "!%s: %s: ddi_dma_alloc_handle failed: %d",
637 			    dp->name, __func__, err);
638 			return (ENOMEM);
639 		}
640 
641 		if ((err = ddi_dma_mem_alloc(dp->desc_dma_handle,
642 		    req_size, &dp->gc.gc_desc_attr,
643 		    DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
644 		    &ring, &ring_len,
645 		    &dp->desc_acc_handle)) != DDI_SUCCESS) {
646 			cmn_err(CE_WARN,
647 			    "!%s: %s: ddi_dma_mem_alloc failed: "
648 			    "ret %d, request size: %d",
649 			    dp->name, __func__, err, (int)req_size);
650 			ddi_dma_free_handle(&dp->desc_dma_handle);
651 			return (ENOMEM);
652 		}
653 
654 		if ((err = ddi_dma_addr_bind_handle(dp->desc_dma_handle,
655 		    NULL, ring, ring_len,
656 		    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
657 		    DDI_DMA_SLEEP, NULL,
658 		    &ring_cookie, &count)) != DDI_SUCCESS) {
659 			ASSERT(err != DDI_DMA_INUSE);
660 			cmn_err(CE_WARN,
661 			    "!%s: %s: ddi_dma_addr_bind_handle failed: %d",
662 			    dp->name, __func__, err);
663 			ddi_dma_mem_free(&dp->desc_acc_handle);
664 			ddi_dma_free_handle(&dp->desc_dma_handle);
665 			return (ENOMEM);
666 		}
667 		ASSERT(count == 1);
668 
669 		/* set base of rx descriptor ring */
670 		dp->rx_ring = ring;
671 		dp->rx_ring_dma = ring_cookie.dmac_laddress;
672 
673 		/* set base of tx descriptor ring */
674 		dp->tx_ring = dp->rx_ring + dp->rx_desc_size;
675 		dp->tx_ring_dma = dp->rx_ring_dma + dp->rx_desc_size;
676 
677 		/* set base of io area */
678 		dp->io_area = dp->tx_ring + dp->tx_desc_size;
679 		dp->io_area_dma = dp->tx_ring_dma + dp->tx_desc_size;
680 	}
681 
682 	/*
683 	 * Prepare DMA resources for tx packets
684 	 */
685 	ASSERT(dp->gc.gc_tx_buf_size > 0);
686 
687 	/* Special dma attribute for tx bounce buffers */
688 	dma_attr_txbounce = dp->gc.gc_dma_attr_txbuf;
689 	dma_attr_txbounce.dma_attr_sgllen = 1;
690 	dma_attr_txbounce.dma_attr_align =
691 	    max(dma_attr_txbounce.dma_attr_align, IOC_LINESIZE);
692 
693 	/* Size for tx bounce buffers must be max tx packet size. */
694 	tx_buf_len = MAXPKTBUF(dp);
695 	tx_buf_len = ROUNDUP(tx_buf_len, IOC_LINESIZE);
696 
697 	ASSERT(tx_buf_len >= ETHERMAX+ETHERFCSL);
698 
699 	for (i = 0, tbp = dp->tx_buf;
700 	    i < dp->gc.gc_tx_buf_size; i++, tbp++) {
701 
702 		/* setup bounce buffers for tx packets */
703 		if ((err = ddi_dma_alloc_handle(dp->dip,
704 		    &dma_attr_txbounce,
705 		    DDI_DMA_SLEEP, NULL,
706 		    &tbp->txb_bdh)) != DDI_SUCCESS) {
707 
708 			cmn_err(CE_WARN,
709 		    "!%s: %s ddi_dma_alloc_handle for bounce buffer failed:"
710 			    " err=%d, i=%d",
711 			    dp->name, __func__, err, i);
712 			goto err_alloc_dh;
713 		}
714 
715 		if ((err = ddi_dma_mem_alloc(tbp->txb_bdh,
716 		    tx_buf_len,
717 		    &dp->gc.gc_buf_attr,
718 		    DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL,
719 		    &buf, &buf_len,
720 		    &tbp->txb_bah)) != DDI_SUCCESS) {
721 			cmn_err(CE_WARN,
722 		    "!%s: %s: ddi_dma_mem_alloc for bounce buffer failed"
723 			    "ret %d, request size %d",
724 			    dp->name, __func__, err, tx_buf_len);
725 			ddi_dma_free_handle(&tbp->txb_bdh);
726 			goto err_alloc_dh;
727 		}
728 
729 		if ((err = ddi_dma_addr_bind_handle(tbp->txb_bdh,
730 		    NULL, buf, buf_len,
731 		    DDI_DMA_WRITE | DDI_DMA_STREAMING,
732 		    DDI_DMA_SLEEP, NULL,
733 		    &buf_cookie, &count)) != DDI_SUCCESS) {
734 				ASSERT(err != DDI_DMA_INUSE);
735 				cmn_err(CE_WARN,
736 	"!%s: %s: ddi_dma_addr_bind_handle for bounce buffer failed: %d",
737 				    dp->name, __func__, err);
738 				ddi_dma_mem_free(&tbp->txb_bah);
739 				ddi_dma_free_handle(&tbp->txb_bdh);
740 				goto err_alloc_dh;
741 		}
742 		ASSERT(count == 1);
743 		tbp->txb_buf = buf;
744 		tbp->txb_buf_dma = buf_cookie.dmac_laddress;
745 	}
746 
747 	return (0);
748 
749 err_alloc_dh:
750 	if (dp->gc.gc_tx_buf_size > 0) {
751 		while (i-- > 0) {
752 			(void) ddi_dma_unbind_handle(dp->tx_buf[i].txb_bdh);
753 			ddi_dma_mem_free(&dp->tx_buf[i].txb_bah);
754 			ddi_dma_free_handle(&dp->tx_buf[i].txb_bdh);
755 		}
756 	}
757 
758 	if (dp->desc_dma_handle) {
759 		(void) ddi_dma_unbind_handle(dp->desc_dma_handle);
760 		ddi_dma_mem_free(&dp->desc_acc_handle);
761 		ddi_dma_free_handle(&dp->desc_dma_handle);
762 		dp->desc_dma_handle = NULL;
763 	}
764 
765 	return (ENOMEM);
766 }
767 
768 static void
gem_free_memory(struct gem_dev * dp)769 gem_free_memory(struct gem_dev *dp)
770 {
771 	int		i;
772 	struct rxbuf	*rbp;
773 	struct txbuf	*tbp;
774 
775 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
776 
777 	/* Free TX/RX descriptors and tx padding buffer */
778 	if (dp->desc_dma_handle) {
779 		(void) ddi_dma_unbind_handle(dp->desc_dma_handle);
780 		ddi_dma_mem_free(&dp->desc_acc_handle);
781 		ddi_dma_free_handle(&dp->desc_dma_handle);
782 		dp->desc_dma_handle = NULL;
783 	}
784 
785 	/* Free dma handles for Tx */
786 	for (i = dp->gc.gc_tx_buf_size, tbp = dp->tx_buf; i--; tbp++) {
787 		/* Free bounce buffer associated to each txbuf */
788 		(void) ddi_dma_unbind_handle(tbp->txb_bdh);
789 		ddi_dma_mem_free(&tbp->txb_bah);
790 		ddi_dma_free_handle(&tbp->txb_bdh);
791 	}
792 
793 	/* Free rx buffer */
794 	while ((rbp = dp->rx_buf_freelist) != NULL) {
795 
796 		ASSERT(dp->rx_buf_freecnt > 0);
797 
798 		dp->rx_buf_freelist = rbp->rxb_next;
799 		dp->rx_buf_freecnt--;
800 
801 		/* release DMA mapping */
802 		ASSERT(rbp->rxb_dh != NULL);
803 
804 		/* free dma handles for rx bbuf */
805 		/* it has dma mapping always */
806 		ASSERT(rbp->rxb_nfrags > 0);
807 		(void) ddi_dma_unbind_handle(rbp->rxb_dh);
808 
809 		/* free the associated bounce buffer and dma handle */
810 		ASSERT(rbp->rxb_bah != NULL);
811 		ddi_dma_mem_free(&rbp->rxb_bah);
812 		/* free the associated dma handle */
813 		ddi_dma_free_handle(&rbp->rxb_dh);
814 
815 		/* free the base memory of rx buffer management */
816 		kmem_free(rbp, sizeof (struct rxbuf));
817 	}
818 }
819 
820 /* ============================================================== */
821 /*
822  * Rx/Tx descriptor slot management
823  */
824 /* ============================================================== */
825 /*
826  * Initialize an empty rx ring.
827  */
828 static void
gem_init_rx_ring(struct gem_dev * dp)829 gem_init_rx_ring(struct gem_dev *dp)
830 {
831 	int		i;
832 	int		rx_ring_size = dp->gc.gc_rx_ring_size;
833 
834 	DPRINTF(1, (CE_CONT, "!%s: %s ring_size:%d, buf_max:%d",
835 	    dp->name, __func__,
836 	    rx_ring_size, dp->gc.gc_rx_buf_max));
837 
838 	/* make a physical chain of rx descriptors */
839 	for (i = 0; i < rx_ring_size; i++) {
840 		(*dp->gc.gc_rx_desc_init)(dp, i);
841 	}
842 	gem_rx_desc_dma_sync(dp, 0, rx_ring_size, DDI_DMA_SYNC_FORDEV);
843 
844 	dp->rx_active_head = (seqnum_t)0;
845 	dp->rx_active_tail = (seqnum_t)0;
846 
847 	ASSERT(dp->rx_buf_head == (struct rxbuf *)NULL);
848 	ASSERT(dp->rx_buf_tail == (struct rxbuf *)NULL);
849 }
850 
851 /*
852  * Prepare rx buffers and put them into the rx buffer/descriptor ring.
853  */
854 static void
gem_prepare_rx_buf(struct gem_dev * dp)855 gem_prepare_rx_buf(struct gem_dev *dp)
856 {
857 	int		i;
858 	int		nrbuf;
859 	struct rxbuf	*rbp;
860 
861 	ASSERT(mutex_owned(&dp->intrlock));
862 
863 	/* Now we have no active buffers in rx ring */
864 
865 	nrbuf = min(dp->gc.gc_rx_ring_size, dp->gc.gc_rx_buf_max);
866 	for (i = 0; i < nrbuf; i++) {
867 		if ((rbp = gem_get_rxbuf(dp, B_TRUE)) == NULL) {
868 			break;
869 		}
870 		gem_append_rxbuf(dp, rbp);
871 	}
872 
873 	gem_rx_desc_dma_sync(dp,
874 	    0, dp->gc.gc_rx_ring_size, DDI_DMA_SYNC_FORDEV);
875 }
876 
877 /*
878  * Reclaim active rx buffers in rx buffer ring.
879  */
880 static void
gem_clean_rx_buf(struct gem_dev * dp)881 gem_clean_rx_buf(struct gem_dev *dp)
882 {
883 	int		i;
884 	struct rxbuf	*rbp;
885 	int		rx_ring_size = dp->gc.gc_rx_ring_size;
886 #ifdef GEM_DEBUG_LEVEL
887 	int		total;
888 #endif
889 	ASSERT(mutex_owned(&dp->intrlock));
890 
891 	DPRINTF(2, (CE_CONT, "!%s: %s: %d buffers are free",
892 	    dp->name, __func__, dp->rx_buf_freecnt));
893 	/*
894 	 * clean up HW descriptors
895 	 */
896 	for (i = 0; i < rx_ring_size; i++) {
897 		(*dp->gc.gc_rx_desc_clean)(dp, i);
898 	}
899 	gem_rx_desc_dma_sync(dp, 0, rx_ring_size, DDI_DMA_SYNC_FORDEV);
900 
901 #ifdef GEM_DEBUG_LEVEL
902 	total = 0;
903 #endif
904 	/*
905 	 * Reclaim allocated rx buffers
906 	 */
907 	while ((rbp = dp->rx_buf_head) != NULL) {
908 #ifdef GEM_DEBUG_LEVEL
909 		total++;
910 #endif
911 		/* remove the first one from rx buffer list */
912 		dp->rx_buf_head = rbp->rxb_next;
913 
914 		/* recycle the rxbuf */
915 		gem_free_rxbuf(rbp);
916 	}
917 	dp->rx_buf_tail = (struct rxbuf *)NULL;
918 
919 	DPRINTF(2, (CE_CONT,
920 	    "!%s: %s: %d buffers freeed, total: %d free",
921 	    dp->name, __func__, total, dp->rx_buf_freecnt));
922 }
923 
924 /*
925  * Initialize an empty transmit buffer/descriptor ring
926  */
927 static void
gem_init_tx_ring(struct gem_dev * dp)928 gem_init_tx_ring(struct gem_dev *dp)
929 {
930 	int		i;
931 	int		tx_buf_size = dp->gc.gc_tx_buf_size;
932 	int		tx_ring_size = dp->gc.gc_tx_ring_size;
933 
934 	DPRINTF(2, (CE_CONT, "!%s: %s: ring_size:%d, buf_size:%d",
935 	    dp->name, __func__,
936 	    dp->gc.gc_tx_ring_size, dp->gc.gc_tx_buf_size));
937 
938 	ASSERT(!dp->mac_active);
939 
940 	/* initialize active list and free list */
941 	dp->tx_slots_base =
942 	    SLOT(dp->tx_slots_base + dp->tx_softq_head, tx_buf_size);
943 	dp->tx_softq_tail -= dp->tx_softq_head;
944 	dp->tx_softq_head = (seqnum_t)0;
945 
946 	dp->tx_active_head = dp->tx_softq_head;
947 	dp->tx_active_tail = dp->tx_softq_head;
948 
949 	dp->tx_free_head   = dp->tx_softq_tail;
950 	dp->tx_free_tail   = dp->gc.gc_tx_buf_limit;
951 
952 	dp->tx_desc_head = (seqnum_t)0;
953 	dp->tx_desc_tail = (seqnum_t)0;
954 	dp->tx_desc_intr = (seqnum_t)0;
955 
956 	for (i = 0; i < tx_ring_size; i++) {
957 		(*dp->gc.gc_tx_desc_init)(dp, i);
958 	}
959 	gem_tx_desc_dma_sync(dp, 0, tx_ring_size, DDI_DMA_SYNC_FORDEV);
960 }
961 
962 __INLINE__
963 static void
gem_txbuf_free_dma_resources(struct txbuf * tbp)964 gem_txbuf_free_dma_resources(struct txbuf *tbp)
965 {
966 	if (tbp->txb_mp) {
967 		freemsg(tbp->txb_mp);
968 		tbp->txb_mp = NULL;
969 	}
970 	tbp->txb_nfrags = 0;
971 	tbp->txb_flag = 0;
972 }
973 
974 /*
975  * reclaim active tx buffers and reset positions in tx rings.
976  */
977 static void
gem_clean_tx_buf(struct gem_dev * dp)978 gem_clean_tx_buf(struct gem_dev *dp)
979 {
980 	int		i;
981 	seqnum_t	head;
982 	seqnum_t	tail;
983 	seqnum_t	sn;
984 	struct txbuf	*tbp;
985 	int		tx_ring_size = dp->gc.gc_tx_ring_size;
986 #ifdef GEM_DEBUG_LEVEL
987 	int		err;
988 #endif
989 
990 	ASSERT(!dp->mac_active);
991 	ASSERT(dp->tx_busy == 0);
992 	ASSERT(dp->tx_softq_tail == dp->tx_free_head);
993 
994 	/*
995 	 * clean up all HW descriptors
996 	 */
997 	for (i = 0; i < tx_ring_size; i++) {
998 		(*dp->gc.gc_tx_desc_clean)(dp, i);
999 	}
1000 	gem_tx_desc_dma_sync(dp, 0, tx_ring_size, DDI_DMA_SYNC_FORDEV);
1001 
1002 	/* dequeue all active and loaded buffers */
1003 	head = dp->tx_active_head;
1004 	tail = dp->tx_softq_tail;
1005 
1006 	ASSERT(dp->tx_free_head - head >= 0);
1007 	tbp = GET_TXBUF(dp, head);
1008 	for (sn = head; sn != tail; sn++) {
1009 		gem_txbuf_free_dma_resources(tbp);
1010 		ASSERT(tbp->txb_mp == NULL);
1011 		dp->stats.errxmt++;
1012 		tbp = tbp->txb_next;
1013 	}
1014 
1015 #ifdef GEM_DEBUG_LEVEL
1016 	/* ensure no dma resources for tx are not in use now */
1017 	err = 0;
1018 	while (sn != head + dp->gc.gc_tx_buf_size) {
1019 		if (tbp->txb_mp || tbp->txb_nfrags) {
1020 			DPRINTF(0, (CE_CONT,
1021 			    "%s: %s: sn:%d[%d] mp:%p nfrags:%d",
1022 			    dp->name, __func__,
1023 			    sn, SLOT(sn, dp->gc.gc_tx_buf_size),
1024 			    tbp->txb_mp, tbp->txb_nfrags));
1025 			err = 1;
1026 		}
1027 		sn++;
1028 		tbp = tbp->txb_next;
1029 	}
1030 
1031 	if (err) {
1032 		gem_dump_txbuf(dp, CE_WARN,
1033 		    "gem_clean_tx_buf: tbp->txb_mp != NULL");
1034 	}
1035 #endif
1036 	/* recycle buffers, now no active tx buffers in the ring */
1037 	dp->tx_free_tail += tail - head;
1038 	ASSERT(dp->tx_free_tail == dp->tx_free_head + dp->gc.gc_tx_buf_limit);
1039 
1040 	/* fix positions in tx buffer rings */
1041 	dp->tx_active_head = dp->tx_free_head;
1042 	dp->tx_active_tail = dp->tx_free_head;
1043 	dp->tx_softq_head  = dp->tx_free_head;
1044 	dp->tx_softq_tail  = dp->tx_free_head;
1045 }
1046 
1047 /*
1048  * Reclaim transmitted buffers from tx buffer/descriptor ring.
1049  */
1050 __INLINE__ int
gem_reclaim_txbuf(struct gem_dev * dp)1051 gem_reclaim_txbuf(struct gem_dev *dp)
1052 {
1053 	struct txbuf	*tbp;
1054 	uint_t		txstat;
1055 	int		err = GEM_SUCCESS;
1056 	seqnum_t	head;
1057 	seqnum_t	tail;
1058 	seqnum_t	sn;
1059 	seqnum_t	desc_head;
1060 	int		tx_ring_size = dp->gc.gc_tx_ring_size;
1061 	uint_t (*tx_desc_stat)(struct gem_dev *dp,
1062 	    int slot, int ndesc) = dp->gc.gc_tx_desc_stat;
1063 	clock_t		now;
1064 
1065 	now = ddi_get_lbolt();
1066 	if (now == (clock_t)0) {
1067 		/* make non-zero timestamp */
1068 		now--;
1069 	}
1070 
1071 	mutex_enter(&dp->xmitlock);
1072 
1073 	head = dp->tx_active_head;
1074 	tail = dp->tx_active_tail;
1075 
1076 #if GEM_DEBUG_LEVEL > 2
1077 	if (head != tail) {
1078 		cmn_err(CE_CONT, "!%s: %s: "
1079 		    "testing active_head:%d[%d], active_tail:%d[%d]",
1080 		    dp->name, __func__,
1081 		    head, SLOT(head, dp->gc.gc_tx_buf_size),
1082 		    tail, SLOT(tail, dp->gc.gc_tx_buf_size));
1083 	}
1084 #endif
1085 #ifdef DEBUG
1086 	if (dp->tx_reclaim_busy == 0) {
1087 		/* check tx buffer management consistency */
1088 		ASSERT(dp->tx_free_tail - dp->tx_active_head
1089 		    == dp->gc.gc_tx_buf_limit);
1090 		/* EMPTY */
1091 	}
1092 #endif
1093 	dp->tx_reclaim_busy++;
1094 
1095 	/* sync all active HW descriptors */
1096 	gem_tx_desc_dma_sync(dp,
1097 	    SLOT(dp->tx_desc_head, tx_ring_size),
1098 	    dp->tx_desc_tail - dp->tx_desc_head,
1099 	    DDI_DMA_SYNC_FORKERNEL);
1100 
1101 	tbp = GET_TXBUF(dp, head);
1102 	desc_head = dp->tx_desc_head;
1103 	for (sn = head; sn != tail;
1104 	    dp->tx_active_head = (++sn), tbp = tbp->txb_next) {
1105 		int	ndescs;
1106 
1107 		ASSERT(tbp->txb_desc == desc_head);
1108 
1109 		ndescs = tbp->txb_ndescs;
1110 		if (ndescs == 0) {
1111 			/* skip errored descriptors */
1112 			continue;
1113 		}
1114 		txstat = (*tx_desc_stat)(dp,
1115 		    SLOT(tbp->txb_desc, tx_ring_size), ndescs);
1116 
1117 		if (txstat == 0) {
1118 			/* not transmitted yet */
1119 			break;
1120 		}
1121 
1122 		if (!dp->tx_blocked && (tbp->txb_flag & GEM_TXFLAG_INTR)) {
1123 			dp->tx_blocked = now;
1124 		}
1125 
1126 		ASSERT(txstat & (GEM_TX_DONE | GEM_TX_ERR));
1127 
1128 		if (txstat & GEM_TX_ERR) {
1129 			err = GEM_FAILURE;
1130 			cmn_err(CE_WARN, "!%s: tx error at desc %d[%d]",
1131 			    dp->name, sn, SLOT(sn, tx_ring_size));
1132 		}
1133 #if GEM_DEBUG_LEVEL > 4
1134 		if (now - tbp->txb_stime >= 50) {
1135 			cmn_err(CE_WARN, "!%s: tx delay while %d mS",
1136 			    dp->name, (now - tbp->txb_stime)*10);
1137 		}
1138 #endif
1139 		/* free transmitted descriptors */
1140 		desc_head += ndescs;
1141 	}
1142 
1143 	if (dp->tx_desc_head != desc_head) {
1144 		/* we have reclaimed one or more tx buffers */
1145 		dp->tx_desc_head = desc_head;
1146 
1147 		/* If we passed the next interrupt position, update it */
1148 		if (desc_head - dp->tx_desc_intr > 0) {
1149 			dp->tx_desc_intr = desc_head;
1150 		}
1151 	}
1152 	mutex_exit(&dp->xmitlock);
1153 
1154 	/* free dma mapping resources associated with transmitted tx buffers */
1155 	tbp = GET_TXBUF(dp, head);
1156 	tail = sn;
1157 #if GEM_DEBUG_LEVEL > 2
1158 	if (head != tail) {
1159 		cmn_err(CE_CONT, "%s: freeing head:%d[%d], tail:%d[%d]",
1160 		    __func__,
1161 		    head, SLOT(head, dp->gc.gc_tx_buf_size),
1162 		    tail, SLOT(tail, dp->gc.gc_tx_buf_size));
1163 	}
1164 #endif
1165 	for (sn = head; sn != tail; sn++, tbp = tbp->txb_next) {
1166 		gem_txbuf_free_dma_resources(tbp);
1167 	}
1168 
1169 	/* recycle the tx buffers */
1170 	mutex_enter(&dp->xmitlock);
1171 	if (--dp->tx_reclaim_busy == 0) {
1172 		/* we are the last thread who can update free tail */
1173 #if GEM_DEBUG_LEVEL > 4
1174 		/* check all resouces have been deallocated */
1175 		sn = dp->tx_free_tail;
1176 		tbp = GET_TXBUF(dp, new_tail);
1177 		while (sn != dp->tx_active_head + dp->gc.gc_tx_buf_limit) {
1178 			if (tbp->txb_nfrags) {
1179 				/* in use */
1180 				break;
1181 			}
1182 			ASSERT(tbp->txb_mp == NULL);
1183 			tbp = tbp->txb_next;
1184 			sn++;
1185 		}
1186 		ASSERT(dp->tx_active_head + dp->gc.gc_tx_buf_limit == sn);
1187 #endif
1188 		dp->tx_free_tail =
1189 		    dp->tx_active_head + dp->gc.gc_tx_buf_limit;
1190 	}
1191 	if (!dp->mac_active) {
1192 		/* someone may be waiting for me. */
1193 		cv_broadcast(&dp->tx_drain_cv);
1194 	}
1195 #if GEM_DEBUG_LEVEL > 2
1196 	cmn_err(CE_CONT, "!%s: %s: called, "
1197 	    "free_head:%d free_tail:%d(+%d) added:%d",
1198 	    dp->name, __func__,
1199 	    dp->tx_free_head, dp->tx_free_tail,
1200 	    dp->tx_free_tail - dp->tx_free_head, tail - head);
1201 #endif
1202 	mutex_exit(&dp->xmitlock);
1203 
1204 	return (err);
1205 }
1206 
1207 /*
1208  * Make tx descriptors in out-of-order manner
1209  */
1210 static void
gem_tx_load_descs_oo(struct gem_dev * dp,seqnum_t start_slot,seqnum_t end_slot,uint64_t flags)1211 gem_tx_load_descs_oo(struct gem_dev *dp,
1212     seqnum_t start_slot, seqnum_t end_slot, uint64_t flags)
1213 {
1214 	seqnum_t	sn;
1215 	struct txbuf	*tbp;
1216 	int	tx_ring_size = dp->gc.gc_tx_ring_size;
1217 	int	(*tx_desc_write)
1218 	    (struct gem_dev *dp, int slot,
1219 	    ddi_dma_cookie_t *dmacookie,
1220 	    int frags, uint64_t flag) = dp->gc.gc_tx_desc_write;
1221 	clock_t	now = ddi_get_lbolt();
1222 
1223 	sn = start_slot;
1224 	tbp = GET_TXBUF(dp, sn);
1225 	do {
1226 #if GEM_DEBUG_LEVEL > 1
1227 		if (dp->tx_cnt < 100) {
1228 			dp->tx_cnt++;
1229 			flags |= GEM_TXFLAG_INTR;
1230 		}
1231 #endif
1232 		/* write a tx descriptor */
1233 		tbp->txb_desc = sn;
1234 		tbp->txb_ndescs = (*tx_desc_write)(dp,
1235 		    SLOT(sn, tx_ring_size),
1236 		    tbp->txb_dmacookie,
1237 		    tbp->txb_nfrags, flags | tbp->txb_flag);
1238 		tbp->txb_stime = now;
1239 		ASSERT(tbp->txb_ndescs == 1);
1240 
1241 		flags = 0;
1242 		sn++;
1243 		tbp = tbp->txb_next;
1244 	} while (sn != end_slot);
1245 }
1246 
1247 __INLINE__
1248 static size_t
gem_setup_txbuf_copy(struct gem_dev * dp,mblk_t * mp,struct txbuf * tbp)1249 gem_setup_txbuf_copy(struct gem_dev *dp, mblk_t *mp, struct txbuf *tbp)
1250 {
1251 	size_t			min_pkt;
1252 	caddr_t			bp;
1253 	size_t			off;
1254 	mblk_t			*tp;
1255 	size_t			len;
1256 	uint64_t		flag;
1257 
1258 	ASSERT(tbp->txb_mp == NULL);
1259 
1260 	/* we use bounce buffer for the packet */
1261 	min_pkt = ETHERMIN;
1262 	bp = tbp->txb_buf;
1263 	off = 0;
1264 	tp = mp;
1265 
1266 	flag = tbp->txb_flag;
1267 	if (flag & GEM_TXFLAG_SWVTAG) {
1268 		/* need to increase min packet size */
1269 		min_pkt += VTAG_SIZE;
1270 		ASSERT((flag & GEM_TXFLAG_VTAG) == 0);
1271 	}
1272 
1273 	/* copy the rest */
1274 	for (; tp; tp = tp->b_cont) {
1275 		if ((len = (long)tp->b_wptr - (long)tp->b_rptr) > 0) {
1276 			bcopy(tp->b_rptr, &bp[off], len);
1277 			off += len;
1278 		}
1279 	}
1280 
1281 	if (off < min_pkt &&
1282 	    (min_pkt > ETHERMIN || !dp->gc.gc_tx_auto_pad)) {
1283 		/*
1284 		 * Extend the packet to minimum packet size explicitly.
1285 		 * For software vlan packets, we shouldn't use tx autopad
1286 		 * function because nics may not be aware of vlan.
1287 		 * we must keep 46 octet of payload even if we use vlan.
1288 		 */
1289 		bzero(&bp[off], min_pkt - off);
1290 		off = min_pkt;
1291 	}
1292 
1293 	(void) ddi_dma_sync(tbp->txb_bdh, (off_t)0, off, DDI_DMA_SYNC_FORDEV);
1294 
1295 	tbp->txb_dmacookie[0].dmac_laddress = tbp->txb_buf_dma;
1296 	tbp->txb_dmacookie[0].dmac_size = off;
1297 
1298 	DPRINTF(2, (CE_CONT,
1299 	    "!%s: %s: copy: addr:0x%llx len:0x%x, vtag:0x%04x, min_pkt:%d",
1300 	    dp->name, __func__,
1301 	    tbp->txb_dmacookie[0].dmac_laddress,
1302 	    tbp->txb_dmacookie[0].dmac_size,
1303 	    (flag & GEM_TXFLAG_VTAG) >> GEM_TXFLAG_VTAG_SHIFT,
1304 	    min_pkt));
1305 
1306 	/* save misc info */
1307 	tbp->txb_mp = mp;
1308 	tbp->txb_nfrags = 1;
1309 #ifdef DEBUG_MULTIFRAGS
1310 	if (dp->gc.gc_tx_max_frags >= 3 &&
1311 	    tbp->txb_dmacookie[0].dmac_size > 16*3) {
1312 		tbp->txb_dmacookie[1].dmac_laddress =
1313 		    tbp->txb_dmacookie[0].dmac_laddress + 16;
1314 		tbp->txb_dmacookie[2].dmac_laddress =
1315 		    tbp->txb_dmacookie[1].dmac_laddress + 16;
1316 
1317 		tbp->txb_dmacookie[2].dmac_size =
1318 		    tbp->txb_dmacookie[0].dmac_size - 16*2;
1319 		tbp->txb_dmacookie[1].dmac_size = 16;
1320 		tbp->txb_dmacookie[0].dmac_size = 16;
1321 		tbp->txb_nfrags  = 3;
1322 	}
1323 #endif
1324 	return (off);
1325 }
1326 
1327 __INLINE__
1328 static void
gem_tx_start_unit(struct gem_dev * dp)1329 gem_tx_start_unit(struct gem_dev *dp)
1330 {
1331 	seqnum_t	head;
1332 	seqnum_t	tail;
1333 	struct txbuf	*tbp_head;
1334 	struct txbuf	*tbp_tail;
1335 
1336 	/* update HW descriptors from soft queue */
1337 	ASSERT(mutex_owned(&dp->xmitlock));
1338 	ASSERT(dp->tx_softq_head == dp->tx_active_tail);
1339 
1340 	head = dp->tx_softq_head;
1341 	tail = dp->tx_softq_tail;
1342 
1343 	DPRINTF(1, (CE_CONT,
1344 	    "%s: %s: called, softq %d %d[+%d], desc %d %d[+%d]",
1345 	    dp->name, __func__, head, tail, tail - head,
1346 	    dp->tx_desc_head, dp->tx_desc_tail,
1347 	    dp->tx_desc_tail - dp->tx_desc_head));
1348 
1349 	ASSERT(tail - head > 0);
1350 
1351 	dp->tx_desc_tail = tail;
1352 
1353 	tbp_head = GET_TXBUF(dp, head);
1354 	tbp_tail = GET_TXBUF(dp, tail - 1);
1355 
1356 	ASSERT(tbp_tail->txb_desc + tbp_tail->txb_ndescs == dp->tx_desc_tail);
1357 
1358 	dp->gc.gc_tx_start(dp,
1359 	    SLOT(tbp_head->txb_desc, dp->gc.gc_tx_ring_size),
1360 	    tbp_tail->txb_desc + tbp_tail->txb_ndescs - tbp_head->txb_desc);
1361 
1362 	/* advance softq head and active tail */
1363 	dp->tx_softq_head = dp->tx_active_tail = tail;
1364 }
1365 
1366 #ifdef GEM_DEBUG_LEVEL
1367 static int gem_send_cnt[10];
1368 #endif
1369 #define	PKT_MIN_SIZE	(sizeof (struct ether_header) + 10 + VTAG_SIZE)
1370 #define	EHLEN	(sizeof (struct ether_header))
1371 /*
1372  * check ether packet type and ip protocol
1373  */
1374 static uint64_t
gem_txbuf_options(struct gem_dev * dp,mblk_t * mp,uint8_t * bp)1375 gem_txbuf_options(struct gem_dev *dp, mblk_t *mp, uint8_t *bp)
1376 {
1377 	mblk_t		*tp;
1378 	ssize_t		len;
1379 	uint_t		vtag;
1380 	int		off;
1381 	uint64_t	flag;
1382 
1383 	flag = 0ULL;
1384 
1385 	/*
1386 	 * prepare continuous header of the packet for protocol analysis
1387 	 */
1388 	if ((long)mp->b_wptr - (long)mp->b_rptr < PKT_MIN_SIZE) {
1389 		/* we use work buffer to copy mblk */
1390 		for (tp = mp, off = 0;
1391 		    tp && (off < PKT_MIN_SIZE);
1392 		    tp = tp->b_cont, off += len) {
1393 			len = (long)tp->b_wptr - (long)tp->b_rptr;
1394 			len = min(len, PKT_MIN_SIZE - off);
1395 			bcopy(tp->b_rptr, &bp[off], len);
1396 		}
1397 	} else {
1398 		/* we can use mblk without copy */
1399 		bp = mp->b_rptr;
1400 	}
1401 
1402 	/* process vlan tag for GLD v3 */
1403 	if (GET_NET16(&bp[VTAG_OFF]) == VTAG_TPID) {
1404 		if (dp->misc_flag & GEM_VLAN_HARD) {
1405 			vtag = GET_NET16(&bp[VTAG_OFF + 2]);
1406 			ASSERT(vtag);
1407 			flag |= vtag << GEM_TXFLAG_VTAG_SHIFT;
1408 		} else {
1409 			flag |= GEM_TXFLAG_SWVTAG;
1410 		}
1411 	}
1412 	return (flag);
1413 }
1414 #undef EHLEN
1415 #undef PKT_MIN_SIZE
1416 /*
1417  * gem_send_common is an exported function because hw depend routines may
1418  * use it for sending control frames like setup frames for 2114x chipset.
1419  */
1420 mblk_t *
gem_send_common(struct gem_dev * dp,mblk_t * mp_head,uint32_t flags)1421 gem_send_common(struct gem_dev *dp, mblk_t *mp_head, uint32_t flags)
1422 {
1423 	int			nmblk;
1424 	int			avail;
1425 	mblk_t			*tp;
1426 	mblk_t			*mp;
1427 	int			i;
1428 	struct txbuf		*tbp;
1429 	seqnum_t		head;
1430 	uint64_t		load_flags;
1431 	uint64_t		len_total = 0;
1432 	uint32_t		bcast = 0;
1433 	uint32_t		mcast = 0;
1434 
1435 	ASSERT(mp_head != NULL);
1436 
1437 	mp = mp_head;
1438 	nmblk = 1;
1439 	while ((mp = mp->b_next) != NULL) {
1440 		nmblk++;
1441 	}
1442 #ifdef GEM_DEBUG_LEVEL
1443 	gem_send_cnt[0]++;
1444 	gem_send_cnt[min(nmblk, 9)]++;
1445 #endif
1446 	/*
1447 	 * Aquire resources
1448 	 */
1449 	mutex_enter(&dp->xmitlock);
1450 	if (dp->mac_suspended) {
1451 		mutex_exit(&dp->xmitlock);
1452 		mp = mp_head;
1453 		while (mp) {
1454 			tp = mp->b_next;
1455 			freemsg(mp);
1456 			mp = tp;
1457 		}
1458 		return (NULL);
1459 	}
1460 
1461 	if (!dp->mac_active && (flags & GEM_SEND_CTRL) == 0) {
1462 		/* don't send data packets while mac isn't active */
1463 		/* XXX - should we discard packets? */
1464 		mutex_exit(&dp->xmitlock);
1465 		return (mp_head);
1466 	}
1467 
1468 	/* allocate free slots */
1469 	head = dp->tx_free_head;
1470 	avail = dp->tx_free_tail - head;
1471 
1472 	DPRINTF(2, (CE_CONT,
1473 	    "!%s: %s: called, free_head:%d free_tail:%d(+%d) req:%d",
1474 	    dp->name, __func__,
1475 	    dp->tx_free_head, dp->tx_free_tail, avail, nmblk));
1476 
1477 	avail = min(avail, dp->tx_max_packets);
1478 
1479 	if (nmblk > avail) {
1480 		if (avail == 0) {
1481 			/* no resources; short cut */
1482 			DPRINTF(2, (CE_CONT, "!%s: no resources", __func__));
1483 			dp->tx_max_packets = max(dp->tx_max_packets - 1, 1);
1484 			goto done;
1485 		}
1486 		nmblk = avail;
1487 	}
1488 
1489 	dp->tx_free_head = head + nmblk;
1490 	load_flags = ((dp->tx_busy++) == 0) ? GEM_TXFLAG_HEAD : 0;
1491 
1492 	/* update last interrupt position if tx buffers exhaust.  */
1493 	if (nmblk == avail) {
1494 		tbp = GET_TXBUF(dp, head + avail - 1);
1495 		tbp->txb_flag = GEM_TXFLAG_INTR;
1496 		dp->tx_desc_intr = head + avail;
1497 	}
1498 	mutex_exit(&dp->xmitlock);
1499 
1500 	tbp = GET_TXBUF(dp, head);
1501 
1502 	for (i = nmblk; i > 0; i--, tbp = tbp->txb_next) {
1503 		uint8_t		*bp;
1504 		uint64_t	txflag;
1505 
1506 		/* remove one from the mblk list */
1507 		ASSERT(mp_head != NULL);
1508 		mp = mp_head;
1509 		mp_head = mp_head->b_next;
1510 		mp->b_next = NULL;
1511 
1512 		/* statistics for non-unicast packets */
1513 		bp = mp->b_rptr;
1514 		if ((bp[0] & 1) && (flags & GEM_SEND_CTRL) == 0) {
1515 			if (bcmp(bp, gem_etherbroadcastaddr.ether_addr_octet,
1516 			    ETHERADDRL) == 0) {
1517 				bcast++;
1518 			} else {
1519 				mcast++;
1520 			}
1521 		}
1522 
1523 		/* save misc info */
1524 		txflag = tbp->txb_flag;
1525 		txflag |= (flags & GEM_SEND_CTRL) << GEM_TXFLAG_PRIVATE_SHIFT;
1526 		txflag |= gem_txbuf_options(dp, mp, (uint8_t *)tbp->txb_buf);
1527 		tbp->txb_flag = txflag;
1528 
1529 		len_total += gem_setup_txbuf_copy(dp, mp, tbp);
1530 	}
1531 
1532 	(void) gem_tx_load_descs_oo(dp, head, head + nmblk, load_flags);
1533 
1534 	/* Append the tbp at the tail of the active tx buffer list */
1535 	mutex_enter(&dp->xmitlock);
1536 
1537 	if ((--dp->tx_busy) == 0) {
1538 		/* extend the tail of softq, as new packets have been ready. */
1539 		dp->tx_softq_tail = dp->tx_free_head;
1540 
1541 		if (!dp->mac_active && (flags & GEM_SEND_CTRL) == 0) {
1542 			/*
1543 			 * The device status has changed while we are
1544 			 * preparing tx buf.
1545 			 * As we are the last one that make tx non-busy.
1546 			 * wake up someone who may wait for us.
1547 			 */
1548 			cv_broadcast(&dp->tx_drain_cv);
1549 		} else {
1550 			ASSERT(dp->tx_softq_tail - dp->tx_softq_head > 0);
1551 			gem_tx_start_unit(dp);
1552 		}
1553 	}
1554 	dp->stats.obytes += len_total;
1555 	dp->stats.opackets += nmblk;
1556 	dp->stats.obcast += bcast;
1557 	dp->stats.omcast += mcast;
1558 done:
1559 	mutex_exit(&dp->xmitlock);
1560 
1561 	return (mp_head);
1562 }
1563 
1564 /* ========================================================== */
1565 /*
1566  * error detection and restart routines
1567  */
1568 /* ========================================================== */
1569 int
gem_restart_nic(struct gem_dev * dp,uint_t flags)1570 gem_restart_nic(struct gem_dev *dp, uint_t flags)
1571 {
1572 	ASSERT(mutex_owned(&dp->intrlock));
1573 
1574 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
1575 #ifdef GEM_DEBUG_LEVEL
1576 #if GEM_DEBUG_LEVEL > 1
1577 	gem_dump_txbuf(dp, CE_CONT, "gem_restart_nic");
1578 #endif
1579 #endif
1580 
1581 	if (dp->mac_suspended) {
1582 		/* should we return GEM_FAILURE ? */
1583 		return (GEM_FAILURE);
1584 	}
1585 
1586 	/*
1587 	 * We should avoid calling any routines except xxx_chip_reset
1588 	 * when we are resuming the system.
1589 	 */
1590 	if (dp->mac_active) {
1591 		if (flags & GEM_RESTART_KEEP_BUF) {
1592 			/* stop rx gracefully */
1593 			dp->rxmode &= ~RXMODE_ENABLE;
1594 			(void) (*dp->gc.gc_set_rx_filter)(dp);
1595 		}
1596 		(void) gem_mac_stop(dp, flags);
1597 	}
1598 
1599 	/* reset the chip. */
1600 	if ((*dp->gc.gc_reset_chip)(dp) != GEM_SUCCESS) {
1601 		cmn_err(CE_WARN, "%s: %s: failed to reset chip",
1602 		    dp->name, __func__);
1603 		goto err;
1604 	}
1605 
1606 	if (gem_mac_init(dp) != GEM_SUCCESS) {
1607 		goto err;
1608 	}
1609 
1610 	/* setup media mode if the link have been up */
1611 	if (dp->mii_state == MII_STATE_LINKUP) {
1612 		if ((dp->gc.gc_set_media)(dp) != GEM_SUCCESS) {
1613 			goto err;
1614 		}
1615 	}
1616 
1617 	/* setup mac address and enable rx filter */
1618 	dp->rxmode |= RXMODE_ENABLE;
1619 	if ((*dp->gc.gc_set_rx_filter)(dp) != GEM_SUCCESS) {
1620 		goto err;
1621 	}
1622 
1623 	/*
1624 	 * XXX - a panic happened because of linkdown.
1625 	 * We must check mii_state here, because the link can be down just
1626 	 * before the restart event happen. If the link is down now,
1627 	 * gem_mac_start() will be called from gem_mii_link_check() when
1628 	 * the link become up later.
1629 	 */
1630 	if (dp->mii_state == MII_STATE_LINKUP) {
1631 		/* restart the nic */
1632 		ASSERT(!dp->mac_active);
1633 		(void) gem_mac_start(dp);
1634 	}
1635 	return (GEM_SUCCESS);
1636 err:
1637 	return (GEM_FAILURE);
1638 }
1639 
1640 
1641 static void
gem_tx_timeout(struct gem_dev * dp)1642 gem_tx_timeout(struct gem_dev *dp)
1643 {
1644 	clock_t		now;
1645 	boolean_t	tx_sched;
1646 	struct txbuf	*tbp;
1647 
1648 	mutex_enter(&dp->intrlock);
1649 
1650 	tx_sched = B_FALSE;
1651 	now = ddi_get_lbolt();
1652 
1653 	mutex_enter(&dp->xmitlock);
1654 	if (!dp->mac_active || dp->mii_state != MII_STATE_LINKUP) {
1655 		mutex_exit(&dp->xmitlock);
1656 		goto schedule_next;
1657 	}
1658 	mutex_exit(&dp->xmitlock);
1659 
1660 	/* reclaim transmitted buffers to check the trasmitter hangs or not. */
1661 	if (gem_reclaim_txbuf(dp) != GEM_SUCCESS) {
1662 		/* tx error happened, reset transmitter in the chip */
1663 		(void) gem_restart_nic(dp, 0);
1664 		tx_sched = B_TRUE;
1665 		dp->tx_blocked = (clock_t)0;
1666 
1667 		goto schedule_next;
1668 	}
1669 
1670 	mutex_enter(&dp->xmitlock);
1671 	/* check if the transmitter thread is stuck */
1672 	if (dp->tx_active_head == dp->tx_active_tail) {
1673 		/* no tx buffer is loaded to the nic */
1674 		if (dp->tx_blocked &&
1675 		    now - dp->tx_blocked > dp->gc.gc_tx_timeout_interval) {
1676 			gem_dump_txbuf(dp, CE_WARN,
1677 			    "gem_tx_timeout: tx blocked");
1678 			tx_sched = B_TRUE;
1679 			dp->tx_blocked = (clock_t)0;
1680 		}
1681 		mutex_exit(&dp->xmitlock);
1682 		goto schedule_next;
1683 	}
1684 
1685 	tbp = GET_TXBUF(dp, dp->tx_active_head);
1686 	if (now - tbp->txb_stime < dp->gc.gc_tx_timeout) {
1687 		mutex_exit(&dp->xmitlock);
1688 		goto schedule_next;
1689 	}
1690 	mutex_exit(&dp->xmitlock);
1691 
1692 	gem_dump_txbuf(dp, CE_WARN, "gem_tx_timeout: tx timeout");
1693 
1694 	/* discard untransmitted packet and restart tx.  */
1695 	(void) gem_restart_nic(dp, GEM_RESTART_NOWAIT);
1696 	tx_sched = B_TRUE;
1697 	dp->tx_blocked = (clock_t)0;
1698 
1699 schedule_next:
1700 	mutex_exit(&dp->intrlock);
1701 
1702 	/* restart the downstream if needed */
1703 	if (tx_sched) {
1704 		mac_tx_update(dp->mh);
1705 	}
1706 
1707 	DPRINTF(4, (CE_CONT,
1708 	    "!%s: blocked:%d active_head:%d active_tail:%d desc_intr:%d",
1709 	    dp->name, BOOLEAN(dp->tx_blocked),
1710 	    dp->tx_active_head, dp->tx_active_tail, dp->tx_desc_intr));
1711 	dp->timeout_id =
1712 	    timeout((void (*)(void *))gem_tx_timeout,
1713 	    (void *)dp, dp->gc.gc_tx_timeout_interval);
1714 }
1715 
1716 /* ================================================================== */
1717 /*
1718  * Interrupt handler
1719  */
1720 /* ================================================================== */
1721 __INLINE__
1722 static void
gem_append_rxbuf(struct gem_dev * dp,struct rxbuf * rbp_head)1723 gem_append_rxbuf(struct gem_dev *dp, struct rxbuf *rbp_head)
1724 {
1725 	struct rxbuf	*rbp;
1726 	seqnum_t	tail;
1727 	int		rx_ring_size = dp->gc.gc_rx_ring_size;
1728 
1729 	ASSERT(rbp_head != NULL);
1730 	ASSERT(mutex_owned(&dp->intrlock));
1731 
1732 	DPRINTF(3, (CE_CONT, "!%s: %s: slot_head:%d, slot_tail:%d",
1733 	    dp->name, __func__, dp->rx_active_head, dp->rx_active_tail));
1734 
1735 	/*
1736 	 * Add new buffers into active rx buffer list
1737 	 */
1738 	if (dp->rx_buf_head == NULL) {
1739 		dp->rx_buf_head = rbp_head;
1740 		ASSERT(dp->rx_buf_tail == NULL);
1741 	} else {
1742 		dp->rx_buf_tail->rxb_next = rbp_head;
1743 	}
1744 
1745 	tail = dp->rx_active_tail;
1746 	for (rbp = rbp_head; rbp; rbp = rbp->rxb_next) {
1747 		/* need to notify the tail for the lower layer */
1748 		dp->rx_buf_tail = rbp;
1749 
1750 		dp->gc.gc_rx_desc_write(dp,
1751 		    SLOT(tail, rx_ring_size),
1752 		    rbp->rxb_dmacookie,
1753 		    rbp->rxb_nfrags);
1754 
1755 		dp->rx_active_tail = tail = tail + 1;
1756 	}
1757 }
1758 
1759 mblk_t *
gem_get_packet_default(struct gem_dev * dp,struct rxbuf * rbp,size_t len)1760 gem_get_packet_default(struct gem_dev *dp, struct rxbuf *rbp, size_t len)
1761 {
1762 	int		rx_header_len = dp->gc.gc_rx_header_len;
1763 	uint8_t		*bp;
1764 	mblk_t		*mp;
1765 
1766 	/* allocate a new mblk */
1767 	if (mp = allocb(len + VTAG_SIZE, BPRI_MED)) {
1768 		ASSERT(mp->b_next == NULL);
1769 		ASSERT(mp->b_cont == NULL);
1770 
1771 		mp->b_rptr += VTAG_SIZE;
1772 		bp = mp->b_rptr;
1773 		mp->b_wptr = bp + len;
1774 
1775 		/*
1776 		 * flush the range of the entire buffer to invalidate
1777 		 * all of corresponding dirty entries in iocache.
1778 		 */
1779 		(void) ddi_dma_sync(rbp->rxb_dh, rx_header_len,
1780 		    0, DDI_DMA_SYNC_FORKERNEL);
1781 
1782 		bcopy(rbp->rxb_buf + rx_header_len, bp, len);
1783 	}
1784 	return (mp);
1785 }
1786 
1787 #ifdef GEM_DEBUG_LEVEL
1788 uint_t	gem_rx_pkts[17];
1789 #endif
1790 
1791 
1792 int
gem_receive(struct gem_dev * dp)1793 gem_receive(struct gem_dev *dp)
1794 {
1795 	uint64_t	len_total = 0;
1796 	struct rxbuf	*rbp;
1797 	mblk_t		*mp;
1798 	int		cnt = 0;
1799 	uint64_t	rxstat;
1800 	struct rxbuf	*newbufs;
1801 	struct rxbuf	**newbufs_tailp;
1802 	mblk_t		*rx_head;
1803 	mblk_t		**rx_tailp;
1804 	int		rx_ring_size = dp->gc.gc_rx_ring_size;
1805 	seqnum_t	active_head;
1806 	uint64_t	(*rx_desc_stat)(struct gem_dev *dp,
1807 	    int slot, int ndesc);
1808 	int		ethermin = ETHERMIN;
1809 	int		ethermax = dp->mtu + sizeof (struct ether_header);
1810 	int		rx_header_len = dp->gc.gc_rx_header_len;
1811 
1812 	ASSERT(mutex_owned(&dp->intrlock));
1813 
1814 	DPRINTF(3, (CE_CONT, "!%s: gem_receive: rx_buf_head:%p",
1815 	    dp->name, dp->rx_buf_head));
1816 
1817 	rx_desc_stat  = dp->gc.gc_rx_desc_stat;
1818 	newbufs_tailp = &newbufs;
1819 	rx_tailp = &rx_head;
1820 	for (active_head = dp->rx_active_head;
1821 	    (rbp = dp->rx_buf_head) != NULL; active_head++) {
1822 		int		len;
1823 		if (cnt == 0) {
1824 			cnt = max(dp->poll_pkt_delay*2, 10);
1825 			cnt = min(cnt,
1826 			    dp->rx_active_tail - active_head);
1827 			gem_rx_desc_dma_sync(dp,
1828 			    SLOT(active_head, rx_ring_size),
1829 			    cnt,
1830 			    DDI_DMA_SYNC_FORKERNEL);
1831 		}
1832 
1833 		if (rx_header_len > 0) {
1834 			(void) ddi_dma_sync(rbp->rxb_dh, 0,
1835 			    rx_header_len, DDI_DMA_SYNC_FORKERNEL);
1836 		}
1837 
1838 		if (((rxstat = (*rx_desc_stat)(dp,
1839 		    SLOT(active_head, rx_ring_size),
1840 		    rbp->rxb_nfrags))
1841 		    & (GEM_RX_DONE | GEM_RX_ERR)) == 0) {
1842 			/* not received yet */
1843 			break;
1844 		}
1845 
1846 		/* Remove the head of the rx buffer list */
1847 		dp->rx_buf_head = rbp->rxb_next;
1848 		cnt--;
1849 
1850 
1851 		if (rxstat & GEM_RX_ERR) {
1852 			goto next;
1853 		}
1854 
1855 		len = rxstat & GEM_RX_LEN;
1856 		DPRINTF(3, (CE_CONT, "!%s: %s: rxstat:0x%llx, len:0x%x",
1857 		    dp->name, __func__, rxstat, len));
1858 
1859 		/*
1860 		 * Copy the packet
1861 		 */
1862 		if ((mp = dp->gc.gc_get_packet(dp, rbp, len)) == NULL) {
1863 			/* no memory, discard the packet */
1864 			dp->stats.norcvbuf++;
1865 			goto next;
1866 		}
1867 
1868 		/*
1869 		 * Process VLAN tag
1870 		 */
1871 		ethermin = ETHERMIN;
1872 		ethermax = dp->mtu + sizeof (struct ether_header);
1873 		if (GET_NET16(mp->b_rptr + VTAG_OFF) == VTAG_TPID) {
1874 			ethermax += VTAG_SIZE;
1875 		}
1876 
1877 		/* check packet size */
1878 		if (len < ethermin) {
1879 			dp->stats.errrcv++;
1880 			dp->stats.runt++;
1881 			freemsg(mp);
1882 			goto next;
1883 		}
1884 
1885 		if (len > ethermax) {
1886 			dp->stats.errrcv++;
1887 			dp->stats.frame_too_long++;
1888 			freemsg(mp);
1889 			goto next;
1890 		}
1891 
1892 		len_total += len;
1893 
1894 #ifdef GEM_DEBUG_VLAN
1895 		if (GET_ETHERTYPE(mp->b_rptr) == VTAG_TPID) {
1896 			gem_dump_packet(dp, (char *)__func__, mp, B_TRUE);
1897 		}
1898 #endif
1899 		/* append received packet to temporaly rx buffer list */
1900 		*rx_tailp = mp;
1901 		rx_tailp  = &mp->b_next;
1902 
1903 		if (mp->b_rptr[0] & 1) {
1904 			if (bcmp(mp->b_rptr,
1905 			    gem_etherbroadcastaddr.ether_addr_octet,
1906 			    ETHERADDRL) == 0) {
1907 				dp->stats.rbcast++;
1908 			} else {
1909 				dp->stats.rmcast++;
1910 			}
1911 		}
1912 next:
1913 		ASSERT(rbp != NULL);
1914 
1915 		/* append new one to temporal new buffer list */
1916 		*newbufs_tailp = rbp;
1917 		newbufs_tailp  = &rbp->rxb_next;
1918 	}
1919 
1920 	/* advance rx_active_head */
1921 	if ((cnt = active_head - dp->rx_active_head) > 0) {
1922 		dp->stats.rbytes += len_total;
1923 		dp->stats.rpackets += cnt;
1924 	}
1925 	dp->rx_active_head = active_head;
1926 
1927 	/* terminate the working list */
1928 	*newbufs_tailp = NULL;
1929 	*rx_tailp = NULL;
1930 
1931 	if (dp->rx_buf_head == NULL) {
1932 		dp->rx_buf_tail = NULL;
1933 	}
1934 
1935 	DPRINTF(4, (CE_CONT, "%s: %s: cnt:%d, rx_head:%p",
1936 	    dp->name, __func__, cnt, rx_head));
1937 
1938 	if (newbufs) {
1939 		/*
1940 		 * fillfull rx list with new buffers
1941 		 */
1942 		seqnum_t	head;
1943 
1944 		/* save current tail */
1945 		head = dp->rx_active_tail;
1946 		gem_append_rxbuf(dp, newbufs);
1947 
1948 		/* call hw depend start routine if we have. */
1949 		dp->gc.gc_rx_start(dp,
1950 		    SLOT(head, rx_ring_size), dp->rx_active_tail - head);
1951 	}
1952 
1953 	if (rx_head) {
1954 		/*
1955 		 * send up received packets
1956 		 */
1957 		mutex_exit(&dp->intrlock);
1958 		mac_rx(dp->mh, NULL, rx_head);
1959 		mutex_enter(&dp->intrlock);
1960 	}
1961 
1962 #ifdef GEM_DEBUG_LEVEL
1963 	gem_rx_pkts[min(cnt, sizeof (gem_rx_pkts)/sizeof (uint_t)-1)]++;
1964 #endif
1965 	return (cnt);
1966 }
1967 
1968 boolean_t
gem_tx_done(struct gem_dev * dp)1969 gem_tx_done(struct gem_dev *dp)
1970 {
1971 	boolean_t	tx_sched = B_FALSE;
1972 
1973 	if (gem_reclaim_txbuf(dp) != GEM_SUCCESS) {
1974 		(void) gem_restart_nic(dp, GEM_RESTART_KEEP_BUF);
1975 		DPRINTF(2, (CE_CONT, "!%s: gem_tx_done: tx_desc: %d %d",
1976 		    dp->name, dp->tx_active_head, dp->tx_active_tail));
1977 		tx_sched = B_TRUE;
1978 		goto x;
1979 	}
1980 
1981 	mutex_enter(&dp->xmitlock);
1982 
1983 	/* XXX - we must not have any packets in soft queue */
1984 	ASSERT(dp->tx_softq_head == dp->tx_softq_tail);
1985 	/*
1986 	 * If we won't have chance to get more free tx buffers, and blocked,
1987 	 * it is worth to reschedule the downstream i.e. tx side.
1988 	 */
1989 	ASSERT(dp->tx_desc_intr - dp->tx_desc_head >= 0);
1990 	if (dp->tx_blocked && dp->tx_desc_intr == dp->tx_desc_head) {
1991 		/*
1992 		 * As no further tx-done interrupts are scheduled, this
1993 		 * is the last chance to kick tx side, which may be
1994 		 * blocked now, otherwise the tx side never works again.
1995 		 */
1996 		tx_sched = B_TRUE;
1997 		dp->tx_blocked = (clock_t)0;
1998 		dp->tx_max_packets =
1999 		    min(dp->tx_max_packets + 2, dp->gc.gc_tx_buf_limit);
2000 	}
2001 
2002 	mutex_exit(&dp->xmitlock);
2003 
2004 	DPRINTF(3, (CE_CONT, "!%s: %s: ret: blocked:%d",
2005 	    dp->name, __func__, BOOLEAN(dp->tx_blocked)));
2006 x:
2007 	return (tx_sched);
2008 }
2009 
2010 static uint_t
gem_intr(struct gem_dev * dp)2011 gem_intr(struct gem_dev	*dp)
2012 {
2013 	uint_t		ret;
2014 
2015 	mutex_enter(&dp->intrlock);
2016 	if (dp->mac_suspended) {
2017 		mutex_exit(&dp->intrlock);
2018 		return (DDI_INTR_UNCLAIMED);
2019 	}
2020 	dp->intr_busy = B_TRUE;
2021 
2022 	ret = (*dp->gc.gc_interrupt)(dp);
2023 
2024 	if (ret == DDI_INTR_UNCLAIMED) {
2025 		dp->intr_busy = B_FALSE;
2026 		mutex_exit(&dp->intrlock);
2027 		return (ret);
2028 	}
2029 
2030 	if (!dp->mac_active) {
2031 		cv_broadcast(&dp->tx_drain_cv);
2032 	}
2033 
2034 
2035 	dp->stats.intr++;
2036 	dp->intr_busy = B_FALSE;
2037 
2038 	mutex_exit(&dp->intrlock);
2039 
2040 	if (ret & INTR_RESTART_TX) {
2041 		DPRINTF(4, (CE_CONT, "!%s: calling mac_tx_update", dp->name));
2042 		mac_tx_update(dp->mh);
2043 		ret &= ~INTR_RESTART_TX;
2044 	}
2045 	return (ret);
2046 }
2047 
2048 static void
gem_intr_watcher(struct gem_dev * dp)2049 gem_intr_watcher(struct gem_dev *dp)
2050 {
2051 	(void) gem_intr(dp);
2052 
2053 	/* schedule next call of tu_intr_watcher */
2054 	dp->intr_watcher_id =
2055 	    timeout((void (*)(void *))gem_intr_watcher, (void *)dp, 1);
2056 }
2057 
2058 /* ======================================================================== */
2059 /*
2060  * MII support routines
2061  */
2062 /* ======================================================================== */
2063 static void
gem_choose_forcedmode(struct gem_dev * dp)2064 gem_choose_forcedmode(struct gem_dev *dp)
2065 {
2066 	/* choose media mode */
2067 	if (dp->anadv_1000fdx || dp->anadv_1000hdx) {
2068 		dp->speed = GEM_SPD_1000;
2069 		dp->full_duplex = dp->anadv_1000fdx;
2070 	} else if (dp->anadv_100fdx || dp->anadv_100t4) {
2071 		dp->speed = GEM_SPD_100;
2072 		dp->full_duplex = B_TRUE;
2073 	} else if (dp->anadv_100hdx) {
2074 		dp->speed = GEM_SPD_100;
2075 		dp->full_duplex = B_FALSE;
2076 	} else {
2077 		dp->speed = GEM_SPD_10;
2078 		dp->full_duplex = dp->anadv_10fdx;
2079 	}
2080 }
2081 
2082 uint16_t
gem_mii_read(struct gem_dev * dp,uint_t reg)2083 gem_mii_read(struct gem_dev *dp, uint_t reg)
2084 {
2085 	if ((dp->mii_status & MII_STATUS_MFPRMBLSUPR) == 0) {
2086 		(*dp->gc.gc_mii_sync)(dp);
2087 	}
2088 	return ((*dp->gc.gc_mii_read)(dp, reg));
2089 }
2090 
2091 void
gem_mii_write(struct gem_dev * dp,uint_t reg,uint16_t val)2092 gem_mii_write(struct gem_dev *dp, uint_t reg, uint16_t val)
2093 {
2094 	if ((dp->mii_status & MII_STATUS_MFPRMBLSUPR) == 0) {
2095 		(*dp->gc.gc_mii_sync)(dp);
2096 	}
2097 	(*dp->gc.gc_mii_write)(dp, reg, val);
2098 }
2099 
2100 #define	fc_cap_decode(x)	\
2101 	((((x) & MII_ABILITY_PAUSE) ? 1 : 0) |	\
2102 	(((x) & MII_ABILITY_ASMPAUSE) ? 2 : 0))
2103 
2104 int
gem_mii_config_default(struct gem_dev * dp)2105 gem_mii_config_default(struct gem_dev *dp)
2106 {
2107 	uint16_t	mii_stat;
2108 	uint16_t	val;
2109 	static uint16_t fc_cap_encode[4] = {
2110 		0, /* none */
2111 		MII_ABILITY_PAUSE, /* symmetric */
2112 		MII_ABILITY_ASMPAUSE, /* tx */
2113 		MII_ABILITY_PAUSE | MII_ABILITY_ASMPAUSE, /* rx-symmetric */
2114 	};
2115 
2116 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
2117 
2118 	/*
2119 	 * Configure bits in advertisement register
2120 	 */
2121 	mii_stat = dp->mii_status;
2122 
2123 	DPRINTF(1, (CE_CONT, "!%s: %s: MII_STATUS reg:%b",
2124 	    dp->name, __func__, mii_stat, MII_STATUS_BITS));
2125 
2126 	if ((mii_stat & MII_STATUS_ABILITY_TECH) == 0) {
2127 		/* it's funny */
2128 		cmn_err(CE_WARN, "!%s: wrong ability bits: mii_status:%b",
2129 		    dp->name, mii_stat, MII_STATUS_BITS);
2130 		return (GEM_FAILURE);
2131 	}
2132 
2133 	/* Do not change the rest of the ability bits in the advert reg */
2134 	val = gem_mii_read(dp, MII_AN_ADVERT) & ~MII_ABILITY_ALL;
2135 
2136 	DPRINTF(0, (CE_CONT,
2137 	    "!%s: %s: 100T4:%d 100F:%d 100H:%d 10F:%d 10H:%d",
2138 	    dp->name, __func__,
2139 	    dp->anadv_100t4, dp->anadv_100fdx, dp->anadv_100hdx,
2140 	    dp->anadv_10fdx, dp->anadv_10hdx));
2141 
2142 	if (dp->anadv_100t4) {
2143 		val |= MII_ABILITY_100BASE_T4;
2144 	}
2145 	if (dp->anadv_100fdx) {
2146 		val |= MII_ABILITY_100BASE_TX_FD;
2147 	}
2148 	if (dp->anadv_100hdx) {
2149 		val |= MII_ABILITY_100BASE_TX;
2150 	}
2151 	if (dp->anadv_10fdx) {
2152 		val |= MII_ABILITY_10BASE_T_FD;
2153 	}
2154 	if (dp->anadv_10hdx) {
2155 		val |= MII_ABILITY_10BASE_T;
2156 	}
2157 
2158 	/* set flow control capability */
2159 	val |= fc_cap_encode[dp->anadv_flow_control];
2160 
2161 	DPRINTF(0, (CE_CONT,
2162 	    "!%s: %s: setting MII_AN_ADVERT reg:%b, mii_mode:%d, fc:%d",
2163 	    dp->name, __func__, val, MII_ABILITY_BITS, dp->gc.gc_mii_mode,
2164 	    dp->anadv_flow_control));
2165 
2166 	gem_mii_write(dp, MII_AN_ADVERT, val);
2167 
2168 	if (mii_stat & MII_STATUS_XSTATUS) {
2169 		/*
2170 		 * 1000Base-T GMII support
2171 		 */
2172 		if (!dp->anadv_autoneg) {
2173 			/* enable manual configuration */
2174 			val = MII_1000TC_CFG_EN;
2175 		} else {
2176 			val = 0;
2177 			if (dp->anadv_1000fdx) {
2178 				val |= MII_1000TC_ADV_FULL;
2179 			}
2180 			if (dp->anadv_1000hdx) {
2181 				val |= MII_1000TC_ADV_HALF;
2182 			}
2183 		}
2184 		DPRINTF(0, (CE_CONT,
2185 		    "!%s: %s: setting MII_1000TC reg:%b",
2186 		    dp->name, __func__, val, MII_1000TC_BITS));
2187 
2188 		gem_mii_write(dp, MII_1000TC, val);
2189 	}
2190 
2191 	return (GEM_SUCCESS);
2192 }
2193 
2194 #define	GEM_LINKUP(dp)		mac_link_update((dp)->mh, LINK_STATE_UP)
2195 #define	GEM_LINKDOWN(dp)	mac_link_update((dp)->mh, LINK_STATE_DOWN)
2196 
2197 static uint8_t gem_fc_result[4 /* my cap */ ][4 /* lp cap */] = {
2198 /*	 none	symm	tx	rx/symm */
2199 /* none */
2200 	{FLOW_CONTROL_NONE,
2201 		FLOW_CONTROL_NONE,
2202 			FLOW_CONTROL_NONE,
2203 				FLOW_CONTROL_NONE},
2204 /* sym */
2205 	{FLOW_CONTROL_NONE,
2206 		FLOW_CONTROL_SYMMETRIC,
2207 			FLOW_CONTROL_NONE,
2208 				FLOW_CONTROL_SYMMETRIC},
2209 /* tx */
2210 	{FLOW_CONTROL_NONE,
2211 		FLOW_CONTROL_NONE,
2212 			FLOW_CONTROL_NONE,
2213 				FLOW_CONTROL_TX_PAUSE},
2214 /* rx/symm */
2215 	{FLOW_CONTROL_NONE,
2216 		FLOW_CONTROL_SYMMETRIC,
2217 			FLOW_CONTROL_RX_PAUSE,
2218 				FLOW_CONTROL_SYMMETRIC},
2219 };
2220 
2221 static char *gem_fc_type[] = {
2222 	"without",
2223 	"with symmetric",
2224 	"with tx",
2225 	"with rx",
2226 };
2227 
2228 boolean_t
gem_mii_link_check(struct gem_dev * dp)2229 gem_mii_link_check(struct gem_dev *dp)
2230 {
2231 	uint16_t	old_mii_state;
2232 	boolean_t	tx_sched = B_FALSE;
2233 	uint16_t	status;
2234 	uint16_t	advert;
2235 	uint16_t	lpable;
2236 	uint16_t	exp;
2237 	uint16_t	ctl1000;
2238 	uint16_t	stat1000;
2239 	uint16_t	val;
2240 	clock_t		now;
2241 	clock_t		diff;
2242 	int		linkdown_action;
2243 	boolean_t	fix_phy = B_FALSE;
2244 
2245 	now = ddi_get_lbolt();
2246 	old_mii_state = dp->mii_state;
2247 
2248 	DPRINTF(3, (CE_CONT, "!%s: %s: time:%d state:%d",
2249 	    dp->name, __func__, now, dp->mii_state));
2250 
2251 	diff = now - dp->mii_last_check;
2252 	dp->mii_last_check = now;
2253 
2254 	/*
2255 	 * For NWAM, don't show linkdown state right
2256 	 * after the system boots
2257 	 */
2258 	if (dp->linkup_delay > 0) {
2259 		if (dp->linkup_delay > diff) {
2260 			dp->linkup_delay -= diff;
2261 		} else {
2262 			/* link up timeout */
2263 			dp->linkup_delay = -1;
2264 		}
2265 	}
2266 
2267 next_nowait:
2268 	switch (dp->mii_state) {
2269 	case MII_STATE_UNKNOWN:
2270 		/* power-up, DP83840 requires 32 sync bits */
2271 		(*dp->gc.gc_mii_sync)(dp);
2272 		goto reset_phy;
2273 
2274 	case MII_STATE_RESETTING:
2275 		dp->mii_timer -= diff;
2276 		if (dp->mii_timer > 0) {
2277 			/* don't read phy registers in resetting */
2278 			dp->mii_interval = WATCH_INTERVAL_FAST;
2279 			goto next;
2280 		}
2281 
2282 		/* Timer expired, ensure reset bit is not set */
2283 
2284 		if (dp->mii_status & MII_STATUS_MFPRMBLSUPR) {
2285 			/* some phys need sync bits after reset */
2286 			(*dp->gc.gc_mii_sync)(dp);
2287 		}
2288 		val = gem_mii_read(dp, MII_CONTROL);
2289 		if (val & MII_CONTROL_RESET) {
2290 			cmn_err(CE_NOTE,
2291 			    "!%s: time:%ld resetting phy not complete."
2292 			    " mii_control:0x%b",
2293 			    dp->name, ddi_get_lbolt(),
2294 			    val, MII_CONTROL_BITS);
2295 		}
2296 
2297 		/* ensure neither isolated nor pwrdown nor auto-nego mode */
2298 		/* XXX -- this operation is required for NS DP83840A. */
2299 		gem_mii_write(dp, MII_CONTROL, 0);
2300 
2301 		/* As resetting PHY has completed, configure PHY registers */
2302 		if ((*dp->gc.gc_mii_config)(dp) != GEM_SUCCESS) {
2303 			/* we failed to configure PHY. */
2304 			goto reset_phy;
2305 		}
2306 
2307 		/* mii_config may disable autonegatiation */
2308 		gem_choose_forcedmode(dp);
2309 
2310 		dp->mii_lpable = 0;
2311 		dp->mii_advert = 0;
2312 		dp->mii_exp = 0;
2313 		dp->mii_ctl1000 = 0;
2314 		dp->mii_stat1000 = 0;
2315 		dp->flow_control = FLOW_CONTROL_NONE;
2316 
2317 		if (!dp->anadv_autoneg) {
2318 			/* skip auto-negotiation phase */
2319 			dp->mii_state = MII_STATE_MEDIA_SETUP;
2320 			dp->mii_timer = 0;
2321 			dp->mii_interval = 0;
2322 			goto next_nowait;
2323 		}
2324 
2325 		/* Issue auto-negotiation command */
2326 		goto autonego;
2327 
2328 	case MII_STATE_AUTONEGOTIATING:
2329 		/*
2330 		 * Autonegotiation is in progress
2331 		 */
2332 		dp->mii_timer -= diff;
2333 		if (dp->mii_timer -
2334 		    (dp->gc.gc_mii_an_timeout
2335 		    - dp->gc.gc_mii_an_wait) > 0) {
2336 			/*
2337 			 * wait for a while, typically autonegotiation
2338 			 * completes in 2.3 - 2.5 sec.
2339 			 */
2340 			dp->mii_interval = WATCH_INTERVAL_FAST;
2341 			goto next;
2342 		}
2343 
2344 		/* read PHY status */
2345 		status = gem_mii_read(dp, MII_STATUS);
2346 		DPRINTF(4, (CE_CONT,
2347 		    "!%s: %s: called: mii_state:%d MII_STATUS reg:%b",
2348 		    dp->name, __func__, dp->mii_state,
2349 		    status, MII_STATUS_BITS));
2350 
2351 		if (status & MII_STATUS_REMFAULT) {
2352 			/*
2353 			 * The link parnert told me something wrong happend.
2354 			 * What do we do ?
2355 			 */
2356 			cmn_err(CE_CONT,
2357 			    "!%s: auto-negotiation failed: remote fault",
2358 			    dp->name);
2359 			goto autonego;
2360 		}
2361 
2362 		if ((status & MII_STATUS_ANDONE) == 0) {
2363 			if (dp->mii_timer <= 0) {
2364 				/*
2365 				 * Auto-negotiation was timed out,
2366 				 * try again w/o resetting phy.
2367 				 */
2368 				if (!dp->mii_supress_msg) {
2369 					cmn_err(CE_WARN,
2370 				    "!%s: auto-negotiation failed: timeout",
2371 					    dp->name);
2372 					dp->mii_supress_msg = B_TRUE;
2373 				}
2374 				goto autonego;
2375 			}
2376 			/*
2377 			 * Auto-negotiation is in progress. Wait.
2378 			 */
2379 			dp->mii_interval = dp->gc.gc_mii_an_watch_interval;
2380 			goto next;
2381 		}
2382 
2383 		/*
2384 		 * Auto-negotiation have completed.
2385 		 * Assume linkdown and fall through.
2386 		 */
2387 		dp->mii_supress_msg = B_FALSE;
2388 		dp->mii_state = MII_STATE_AN_DONE;
2389 		DPRINTF(0, (CE_CONT,
2390 		    "!%s: auto-negotiation completed, MII_STATUS:%b",
2391 		    dp->name, status, MII_STATUS_BITS));
2392 
2393 		if (dp->gc.gc_mii_an_delay > 0) {
2394 			dp->mii_timer = dp->gc.gc_mii_an_delay;
2395 			dp->mii_interval = drv_usectohz(20*1000);
2396 			goto next;
2397 		}
2398 
2399 		dp->mii_timer = 0;
2400 		diff = 0;
2401 		goto next_nowait;
2402 
2403 	case MII_STATE_AN_DONE:
2404 		/*
2405 		 * Auto-negotiation have done. Now we can set up media.
2406 		 */
2407 		dp->mii_timer -= diff;
2408 		if (dp->mii_timer > 0) {
2409 			/* wait for a while */
2410 			dp->mii_interval = WATCH_INTERVAL_FAST;
2411 			goto next;
2412 		}
2413 
2414 		/*
2415 		 * set up the result of auto negotiation
2416 		 */
2417 
2418 		/*
2419 		 * Read registers required to determin current
2420 		 * duplex mode and media speed.
2421 		 */
2422 		if (dp->gc.gc_mii_an_delay > 0) {
2423 			/*
2424 			 * As the link watcher context has been suspended,
2425 			 * 'status' is invalid. We must status register here
2426 			 */
2427 			status = gem_mii_read(dp, MII_STATUS);
2428 		}
2429 		advert = gem_mii_read(dp, MII_AN_ADVERT);
2430 		lpable = gem_mii_read(dp, MII_AN_LPABLE);
2431 		exp = gem_mii_read(dp, MII_AN_EXPANSION);
2432 		if (exp == 0xffff) {
2433 			/* some phys don't have exp register */
2434 			exp = 0;
2435 		}
2436 		ctl1000  = 0;
2437 		stat1000 = 0;
2438 		if (dp->mii_status & MII_STATUS_XSTATUS) {
2439 			ctl1000  = gem_mii_read(dp, MII_1000TC);
2440 			stat1000 = gem_mii_read(dp, MII_1000TS);
2441 		}
2442 		dp->mii_lpable = lpable;
2443 		dp->mii_advert = advert;
2444 		dp->mii_exp = exp;
2445 		dp->mii_ctl1000  = ctl1000;
2446 		dp->mii_stat1000 = stat1000;
2447 
2448 		cmn_err(CE_CONT,
2449 		"!%s: auto-negotiation done, advert:%b, lpable:%b, exp:%b",
2450 		    dp->name,
2451 		    advert, MII_ABILITY_BITS,
2452 		    lpable, MII_ABILITY_BITS,
2453 		    exp, MII_AN_EXP_BITS);
2454 
2455 		if (dp->mii_status & MII_STATUS_XSTATUS) {
2456 			cmn_err(CE_CONT,
2457 			    "! MII_1000TC:%b, MII_1000TS:%b",
2458 			    ctl1000, MII_1000TC_BITS,
2459 			    stat1000, MII_1000TS_BITS);
2460 		}
2461 
2462 		if (gem_population(lpable) <= 1 &&
2463 		    (exp & MII_AN_EXP_LPCANAN) == 0) {
2464 			if ((advert & MII_ABILITY_TECH) != lpable) {
2465 				cmn_err(CE_WARN,
2466 				    "!%s: but the link partnar doesn't seem"
2467 				    " to have auto-negotiation capability."
2468 				    " please check the link configuration.",
2469 				    dp->name);
2470 			}
2471 			/*
2472 			 * it should be result of parallel detection, which
2473 			 * cannot detect duplex mode.
2474 			 */
2475 			if (lpable & MII_ABILITY_100BASE_TX) {
2476 				/*
2477 				 * we prefer full duplex mode for 100Mbps
2478 				 * connection, if we can.
2479 				 */
2480 				lpable |= advert & MII_ABILITY_100BASE_TX_FD;
2481 			}
2482 
2483 			if ((advert & lpable) == 0 &&
2484 			    lpable & MII_ABILITY_10BASE_T) {
2485 				lpable |= advert & MII_ABILITY_10BASE_T_FD;
2486 			}
2487 			/*
2488 			 * as the link partnar isn't auto-negotiatable, use
2489 			 * fixed mode temporally.
2490 			 */
2491 			fix_phy = B_TRUE;
2492 		} else if (lpable == 0) {
2493 			cmn_err(CE_WARN, "!%s: wrong lpable.", dp->name);
2494 			goto reset_phy;
2495 		}
2496 		/*
2497 		 * configure current link mode according to AN priority.
2498 		 */
2499 		val = advert & lpable;
2500 		if ((ctl1000 & MII_1000TC_ADV_FULL) &&
2501 		    (stat1000 & MII_1000TS_LP_FULL)) {
2502 			/* 1000BaseT & full duplex */
2503 			dp->speed	 = GEM_SPD_1000;
2504 			dp->full_duplex  = B_TRUE;
2505 		} else if ((ctl1000 & MII_1000TC_ADV_HALF) &&
2506 		    (stat1000 & MII_1000TS_LP_HALF)) {
2507 			/* 1000BaseT & half duplex */
2508 			dp->speed = GEM_SPD_1000;
2509 			dp->full_duplex = B_FALSE;
2510 		} else if (val & MII_ABILITY_100BASE_TX_FD) {
2511 			/* 100BaseTx & full duplex */
2512 			dp->speed = GEM_SPD_100;
2513 			dp->full_duplex = B_TRUE;
2514 		} else if (val & MII_ABILITY_100BASE_T4) {
2515 			/* 100BaseT4 & full duplex */
2516 			dp->speed = GEM_SPD_100;
2517 			dp->full_duplex = B_TRUE;
2518 		} else if (val & MII_ABILITY_100BASE_TX) {
2519 			/* 100BaseTx & half duplex */
2520 			dp->speed	 = GEM_SPD_100;
2521 			dp->full_duplex  = B_FALSE;
2522 		} else if (val & MII_ABILITY_10BASE_T_FD) {
2523 			/* 10BaseT & full duplex */
2524 			dp->speed	 = GEM_SPD_10;
2525 			dp->full_duplex  = B_TRUE;
2526 		} else if (val & MII_ABILITY_10BASE_T) {
2527 			/* 10BaseT & half duplex */
2528 			dp->speed	 = GEM_SPD_10;
2529 			dp->full_duplex  = B_FALSE;
2530 		} else {
2531 			/*
2532 			 * It seems that the link partnar doesn't have
2533 			 * auto-negotiation capability and our PHY
2534 			 * could not report the correct current mode.
2535 			 * We guess current mode by mii_control register.
2536 			 */
2537 			val = gem_mii_read(dp, MII_CONTROL);
2538 
2539 			/* select 100m full or 10m half */
2540 			dp->speed = (val & MII_CONTROL_100MB) ?
2541 			    GEM_SPD_100 : GEM_SPD_10;
2542 			dp->full_duplex = dp->speed != GEM_SPD_10;
2543 			fix_phy = B_TRUE;
2544 
2545 			cmn_err(CE_NOTE,
2546 			    "!%s: auto-negotiation done but "
2547 			    "common ability not found.\n"
2548 			    "PHY state: control:%b advert:%b lpable:%b\n"
2549 			    "guessing %d Mbps %s duplex mode",
2550 			    dp->name,
2551 			    val, MII_CONTROL_BITS,
2552 			    advert, MII_ABILITY_BITS,
2553 			    lpable, MII_ABILITY_BITS,
2554 			    gem_speed_value[dp->speed],
2555 			    dp->full_duplex ? "full" : "half");
2556 		}
2557 
2558 		if (dp->full_duplex) {
2559 			dp->flow_control =
2560 			    gem_fc_result[fc_cap_decode(advert)]
2561 			    [fc_cap_decode(lpable)];
2562 		} else {
2563 			dp->flow_control = FLOW_CONTROL_NONE;
2564 		}
2565 		dp->mii_state = MII_STATE_MEDIA_SETUP;
2566 		/* FALLTHROUGH */
2567 
2568 	case MII_STATE_MEDIA_SETUP:
2569 		dp->mii_state = MII_STATE_LINKDOWN;
2570 		dp->mii_timer = dp->gc.gc_mii_linkdown_timeout;
2571 		DPRINTF(2, (CE_CONT, "!%s: setup midia mode done", dp->name));
2572 		dp->mii_supress_msg = B_FALSE;
2573 
2574 		/* use short interval */
2575 		dp->mii_interval = WATCH_INTERVAL_FAST;
2576 
2577 		if ((!dp->anadv_autoneg) ||
2578 		    dp->gc.gc_mii_an_oneshot || fix_phy) {
2579 
2580 			/*
2581 			 * write specified mode to phy.
2582 			 */
2583 			val = gem_mii_read(dp, MII_CONTROL);
2584 			val &= ~(MII_CONTROL_SPEED | MII_CONTROL_FDUPLEX |
2585 			    MII_CONTROL_ANE | MII_CONTROL_RSAN);
2586 
2587 			if (dp->full_duplex) {
2588 				val |= MII_CONTROL_FDUPLEX;
2589 			}
2590 
2591 			switch (dp->speed) {
2592 			case GEM_SPD_1000:
2593 				val |= MII_CONTROL_1000MB;
2594 				break;
2595 
2596 			case GEM_SPD_100:
2597 				val |= MII_CONTROL_100MB;
2598 				break;
2599 
2600 			default:
2601 				cmn_err(CE_WARN, "%s: unknown speed:%d",
2602 				    dp->name, dp->speed);
2603 				/* FALLTHROUGH */
2604 			case GEM_SPD_10:
2605 				/* for GEM_SPD_10, do nothing */
2606 				break;
2607 			}
2608 
2609 			if (dp->mii_status & MII_STATUS_XSTATUS) {
2610 				gem_mii_write(dp,
2611 				    MII_1000TC, MII_1000TC_CFG_EN);
2612 			}
2613 			gem_mii_write(dp, MII_CONTROL, val);
2614 		}
2615 
2616 		if (dp->nic_state >= NIC_STATE_INITIALIZED) {
2617 			/* notify the result of auto-negotiation to mac */
2618 			(*dp->gc.gc_set_media)(dp);
2619 		}
2620 
2621 		if ((void *)dp->gc.gc_mii_tune_phy) {
2622 			/* for built-in sis900 */
2623 			/* XXX - this code should be removed.  */
2624 			(*dp->gc.gc_mii_tune_phy)(dp);
2625 		}
2626 
2627 		goto next_nowait;
2628 
2629 	case MII_STATE_LINKDOWN:
2630 		status = gem_mii_read(dp, MII_STATUS);
2631 		if (status & MII_STATUS_LINKUP) {
2632 			/*
2633 			 * Link going up
2634 			 */
2635 			dp->mii_state = MII_STATE_LINKUP;
2636 			dp->mii_supress_msg = B_FALSE;
2637 
2638 			DPRINTF(0, (CE_CONT,
2639 			    "!%s: link up detected: mii_stat:%b",
2640 			    dp->name, status, MII_STATUS_BITS));
2641 
2642 			/*
2643 			 * MII_CONTROL_100MB and  MII_CONTROL_FDUPLEX are
2644 			 * ignored when MII_CONTROL_ANE is set.
2645 			 */
2646 			cmn_err(CE_CONT,
2647 			    "!%s: Link up: %d Mbps %s duplex %s flow control",
2648 			    dp->name,
2649 			    gem_speed_value[dp->speed],
2650 			    dp->full_duplex ? "full" : "half",
2651 			    gem_fc_type[dp->flow_control]);
2652 
2653 			dp->mii_interval = dp->gc.gc_mii_link_watch_interval;
2654 
2655 			/* XXX - we need other timer to watch statictics */
2656 			if (dp->gc.gc_mii_hw_link_detection &&
2657 			    dp->nic_state == NIC_STATE_ONLINE) {
2658 				dp->mii_interval = 0;
2659 			}
2660 
2661 			if (dp->nic_state == NIC_STATE_ONLINE) {
2662 				if (!dp->mac_active) {
2663 					(void) gem_mac_start(dp);
2664 				}
2665 				tx_sched = B_TRUE;
2666 			}
2667 			goto next;
2668 		}
2669 
2670 		dp->mii_supress_msg = B_TRUE;
2671 		if (dp->anadv_autoneg) {
2672 			dp->mii_timer -= diff;
2673 			if (dp->mii_timer <= 0) {
2674 				/*
2675 				 * link down timer expired.
2676 				 * need to restart auto-negotiation.
2677 				 */
2678 				linkdown_action =
2679 				    dp->gc.gc_mii_linkdown_timeout_action;
2680 				goto restart_autonego;
2681 			}
2682 		}
2683 		/* don't change mii_state */
2684 		break;
2685 
2686 	case MII_STATE_LINKUP:
2687 		status = gem_mii_read(dp, MII_STATUS);
2688 		if ((status & MII_STATUS_LINKUP) == 0) {
2689 			/*
2690 			 * Link going down
2691 			 */
2692 			cmn_err(CE_NOTE,
2693 			    "!%s: link down detected: mii_stat:%b",
2694 			    dp->name, status, MII_STATUS_BITS);
2695 
2696 			if (dp->nic_state == NIC_STATE_ONLINE &&
2697 			    dp->mac_active &&
2698 			    dp->gc.gc_mii_stop_mac_on_linkdown) {
2699 				(void) gem_mac_stop(dp, 0);
2700 
2701 				if (dp->tx_blocked) {
2702 					/* drain tx */
2703 					tx_sched = B_TRUE;
2704 				}
2705 			}
2706 
2707 			if (dp->anadv_autoneg) {
2708 				/* need to restart auto-negotiation */
2709 				linkdown_action = dp->gc.gc_mii_linkdown_action;
2710 				goto restart_autonego;
2711 			}
2712 
2713 			dp->mii_state = MII_STATE_LINKDOWN;
2714 			dp->mii_timer = dp->gc.gc_mii_linkdown_timeout;
2715 
2716 			if ((void *)dp->gc.gc_mii_tune_phy) {
2717 				/* for built-in sis900 */
2718 				(*dp->gc.gc_mii_tune_phy)(dp);
2719 			}
2720 			dp->mii_interval = dp->gc.gc_mii_link_watch_interval;
2721 			goto next;
2722 		}
2723 
2724 		/* don't change mii_state */
2725 		if (dp->gc.gc_mii_hw_link_detection &&
2726 		    dp->nic_state == NIC_STATE_ONLINE) {
2727 			dp->mii_interval = 0;
2728 			goto next;
2729 		}
2730 		break;
2731 	}
2732 	dp->mii_interval = dp->gc.gc_mii_link_watch_interval;
2733 	goto next;
2734 
2735 	/* Actions on the end of state routine */
2736 
2737 restart_autonego:
2738 	switch (linkdown_action) {
2739 	case MII_ACTION_RESET:
2740 		if (!dp->mii_supress_msg) {
2741 			cmn_err(CE_CONT, "!%s: resetting PHY", dp->name);
2742 		}
2743 		dp->mii_supress_msg = B_TRUE;
2744 		goto reset_phy;
2745 
2746 	case MII_ACTION_NONE:
2747 		dp->mii_supress_msg = B_TRUE;
2748 		if (dp->gc.gc_mii_an_oneshot) {
2749 			goto autonego;
2750 		}
2751 		/* PHY will restart autonego automatically */
2752 		dp->mii_state = MII_STATE_AUTONEGOTIATING;
2753 		dp->mii_timer = dp->gc.gc_mii_an_timeout;
2754 		dp->mii_interval = dp->gc.gc_mii_an_watch_interval;
2755 		goto next;
2756 
2757 	case MII_ACTION_RSA:
2758 		if (!dp->mii_supress_msg) {
2759 			cmn_err(CE_CONT, "!%s: restarting auto-negotiation",
2760 			    dp->name);
2761 		}
2762 		dp->mii_supress_msg = B_TRUE;
2763 		goto autonego;
2764 
2765 	default:
2766 		cmn_err(CE_WARN, "!%s: unknowm linkdown action: %d",
2767 		    dp->name, dp->gc.gc_mii_linkdown_action);
2768 		dp->mii_supress_msg = B_TRUE;
2769 	}
2770 	/* NOTREACHED */
2771 
2772 reset_phy:
2773 	if (!dp->mii_supress_msg) {
2774 		cmn_err(CE_CONT, "!%s: resetting PHY", dp->name);
2775 	}
2776 	dp->mii_state = MII_STATE_RESETTING;
2777 	dp->mii_timer = dp->gc.gc_mii_reset_timeout;
2778 	if (!dp->gc.gc_mii_dont_reset) {
2779 		gem_mii_write(dp, MII_CONTROL, MII_CONTROL_RESET);
2780 	}
2781 	dp->mii_interval = WATCH_INTERVAL_FAST;
2782 	goto next;
2783 
2784 autonego:
2785 	if (!dp->mii_supress_msg) {
2786 		cmn_err(CE_CONT, "!%s: auto-negotiation started", dp->name);
2787 	}
2788 	dp->mii_state = MII_STATE_AUTONEGOTIATING;
2789 	dp->mii_timer = dp->gc.gc_mii_an_timeout;
2790 
2791 	/* start/restart auto nego */
2792 	val = gem_mii_read(dp, MII_CONTROL) &
2793 	    ~(MII_CONTROL_ISOLATE | MII_CONTROL_PWRDN | MII_CONTROL_RESET);
2794 
2795 	gem_mii_write(dp, MII_CONTROL,
2796 	    val | MII_CONTROL_RSAN | MII_CONTROL_ANE);
2797 
2798 	dp->mii_interval = dp->gc.gc_mii_an_watch_interval;
2799 
2800 next:
2801 	if (dp->link_watcher_id == 0 && dp->mii_interval) {
2802 		/* we must schedule next mii_watcher */
2803 		dp->link_watcher_id =
2804 		    timeout((void (*)(void *))&gem_mii_link_watcher,
2805 		    (void *)dp, dp->mii_interval);
2806 	}
2807 
2808 	if (old_mii_state != dp->mii_state) {
2809 		/* notify new mii link state */
2810 		if (dp->mii_state == MII_STATE_LINKUP) {
2811 			dp->linkup_delay = 0;
2812 			GEM_LINKUP(dp);
2813 		} else if (dp->linkup_delay <= 0) {
2814 			GEM_LINKDOWN(dp);
2815 		}
2816 	} else if (dp->linkup_delay < 0) {
2817 		/* first linkup timeout */
2818 		dp->linkup_delay = 0;
2819 		GEM_LINKDOWN(dp);
2820 	}
2821 
2822 	return (tx_sched);
2823 }
2824 
2825 static void
gem_mii_link_watcher(struct gem_dev * dp)2826 gem_mii_link_watcher(struct gem_dev *dp)
2827 {
2828 	boolean_t	tx_sched;
2829 
2830 	mutex_enter(&dp->intrlock);
2831 
2832 	dp->link_watcher_id = 0;
2833 	tx_sched = gem_mii_link_check(dp);
2834 #if GEM_DEBUG_LEVEL > 2
2835 	if (dp->link_watcher_id == 0) {
2836 		cmn_err(CE_CONT, "%s: link watcher stopped", dp->name);
2837 	}
2838 #endif
2839 	mutex_exit(&dp->intrlock);
2840 
2841 	if (tx_sched) {
2842 		/* kick potentially stopped downstream */
2843 		mac_tx_update(dp->mh);
2844 	}
2845 }
2846 
2847 int
gem_mii_probe_default(struct gem_dev * dp)2848 gem_mii_probe_default(struct gem_dev *dp)
2849 {
2850 	int8_t		phy;
2851 	uint16_t	status;
2852 	uint16_t	adv;
2853 	uint16_t	adv_org;
2854 
2855 	DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__));
2856 
2857 	/*
2858 	 * Scan PHY
2859 	 */
2860 	/* ensure to send sync bits */
2861 	dp->mii_status = 0;
2862 
2863 	/* Try default phy first */
2864 	if (dp->mii_phy_addr) {
2865 		status = gem_mii_read(dp, MII_STATUS);
2866 		if (status != 0xffff && status != 0) {
2867 			gem_mii_write(dp, MII_CONTROL, 0);
2868 			goto PHY_found;
2869 		}
2870 
2871 		if (dp->mii_phy_addr < 0) {
2872 			cmn_err(CE_NOTE,
2873 	    "!%s: failed to probe default internal and/or non-MII PHY",
2874 			    dp->name);
2875 			return (GEM_FAILURE);
2876 		}
2877 
2878 		cmn_err(CE_NOTE,
2879 		    "!%s: failed to probe default MII PHY at %d",
2880 		    dp->name, dp->mii_phy_addr);
2881 	}
2882 
2883 	/* Try all possible address */
2884 	for (phy = dp->gc.gc_mii_addr_min; phy < 32; phy++) {
2885 		dp->mii_phy_addr = phy;
2886 		status = gem_mii_read(dp, MII_STATUS);
2887 
2888 		if (status != 0xffff && status != 0) {
2889 			gem_mii_write(dp, MII_CONTROL, 0);
2890 			goto PHY_found;
2891 		}
2892 	}
2893 
2894 	for (phy = dp->gc.gc_mii_addr_min; phy < 32; phy++) {
2895 		dp->mii_phy_addr = phy;
2896 		gem_mii_write(dp, MII_CONTROL, 0);
2897 		status = gem_mii_read(dp, MII_STATUS);
2898 
2899 		if (status != 0xffff && status != 0) {
2900 			goto PHY_found;
2901 		}
2902 	}
2903 
2904 	cmn_err(CE_NOTE, "!%s: no MII PHY found", dp->name);
2905 	dp->mii_phy_addr = -1;
2906 
2907 	return (GEM_FAILURE);
2908 
2909 PHY_found:
2910 	dp->mii_status = status;
2911 	dp->mii_phy_id  = (gem_mii_read(dp, MII_PHYIDH) << 16) |
2912 	    gem_mii_read(dp, MII_PHYIDL);
2913 
2914 	if (dp->mii_phy_addr < 0) {
2915 		cmn_err(CE_CONT, "!%s: using internal/non-MII PHY(0x%08x)",
2916 		    dp->name, dp->mii_phy_id);
2917 	} else {
2918 		cmn_err(CE_CONT, "!%s: MII PHY (0x%08x) found at %d",
2919 		    dp->name, dp->mii_phy_id, dp->mii_phy_addr);
2920 	}
2921 
2922 	cmn_err(CE_CONT, "!%s: PHY control:%b, status:%b, advert:%b, lpar:%b",
2923 	    dp->name,
2924 	    gem_mii_read(dp, MII_CONTROL), MII_CONTROL_BITS,
2925 	    status, MII_STATUS_BITS,
2926 	    gem_mii_read(dp, MII_AN_ADVERT), MII_ABILITY_BITS,
2927 	    gem_mii_read(dp, MII_AN_LPABLE), MII_ABILITY_BITS);
2928 
2929 	dp->mii_xstatus = 0;
2930 	if (status & MII_STATUS_XSTATUS) {
2931 		dp->mii_xstatus = gem_mii_read(dp, MII_XSTATUS);
2932 
2933 		cmn_err(CE_CONT, "!%s: xstatus:%b",
2934 		    dp->name, dp->mii_xstatus, MII_XSTATUS_BITS);
2935 	}
2936 
2937 	/* check if the phy can advertize pause abilities */
2938 	adv_org = gem_mii_read(dp, MII_AN_ADVERT);
2939 
2940 	gem_mii_write(dp, MII_AN_ADVERT,
2941 	    MII_ABILITY_PAUSE | MII_ABILITY_ASMPAUSE);
2942 
2943 	adv = gem_mii_read(dp, MII_AN_ADVERT);
2944 
2945 	if ((adv & MII_ABILITY_PAUSE) == 0) {
2946 		dp->gc.gc_flow_control &= ~1;
2947 	}
2948 
2949 	if ((adv & MII_ABILITY_ASMPAUSE) == 0) {
2950 		dp->gc.gc_flow_control &= ~2;
2951 	}
2952 
2953 	gem_mii_write(dp, MII_AN_ADVERT, adv_org);
2954 
2955 	return (GEM_SUCCESS);
2956 }
2957 
2958 static void
gem_mii_start(struct gem_dev * dp)2959 gem_mii_start(struct gem_dev *dp)
2960 {
2961 	DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__));
2962 
2963 	/* make a first call of check link */
2964 	dp->mii_state = MII_STATE_UNKNOWN;
2965 	dp->mii_last_check = ddi_get_lbolt();
2966 	dp->linkup_delay = dp->gc.gc_mii_linkdown_timeout;
2967 	(void) gem_mii_link_watcher(dp);
2968 }
2969 
2970 static void
gem_mii_stop(struct gem_dev * dp)2971 gem_mii_stop(struct gem_dev *dp)
2972 {
2973 	DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__));
2974 
2975 	/* Ensure timer routine stopped */
2976 	mutex_enter(&dp->intrlock);
2977 	if (dp->link_watcher_id) {
2978 		while (untimeout(dp->link_watcher_id) == -1)
2979 			;
2980 		dp->link_watcher_id = 0;
2981 	}
2982 	mutex_exit(&dp->intrlock);
2983 }
2984 
2985 boolean_t
gem_get_mac_addr_conf(struct gem_dev * dp)2986 gem_get_mac_addr_conf(struct gem_dev *dp)
2987 {
2988 	char		propname[32];
2989 	char		*valstr;
2990 	uint8_t		mac[ETHERADDRL];
2991 	char		*cp;
2992 	int		c;
2993 	int		i;
2994 	int		j;
2995 	uint8_t		v;
2996 	uint8_t		d;
2997 	uint8_t		ored;
2998 
2999 	DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3000 	/*
3001 	 * Get ethernet address from .conf file
3002 	 */
3003 	(void) sprintf(propname, "mac-addr");
3004 	if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, dp->dip,
3005 	    DDI_PROP_DONTPASS, propname, &valstr)) !=
3006 	    DDI_PROP_SUCCESS) {
3007 		return (B_FALSE);
3008 	}
3009 
3010 	if (strlen(valstr) != ETHERADDRL*3-1) {
3011 		goto syntax_err;
3012 	}
3013 
3014 	cp = valstr;
3015 	j  = 0;
3016 	ored = 0;
3017 	for (;;) {
3018 		v = 0;
3019 		for (i = 0; i < 2; i++) {
3020 			c = *cp++;
3021 
3022 			if (c >= 'a' && c <= 'f') {
3023 				d = c - 'a' + 10;
3024 			} else if (c >= 'A' && c <= 'F') {
3025 				d = c - 'A' + 10;
3026 			} else if (c >= '0' && c <= '9') {
3027 				d = c - '0';
3028 			} else {
3029 				goto syntax_err;
3030 			}
3031 			v = (v << 4) | d;
3032 		}
3033 
3034 		mac[j++] = v;
3035 		ored |= v;
3036 		if (j == ETHERADDRL) {
3037 			/* done */
3038 			break;
3039 		}
3040 
3041 		c = *cp++;
3042 		if (c != ':') {
3043 			goto syntax_err;
3044 		}
3045 	}
3046 
3047 	if (ored == 0) {
3048 		goto err;
3049 	}
3050 	for (i = 0; i < ETHERADDRL; i++) {
3051 		dp->dev_addr.ether_addr_octet[i] = mac[i];
3052 	}
3053 	ddi_prop_free(valstr);
3054 	return (B_TRUE);
3055 
3056 syntax_err:
3057 	cmn_err(CE_CONT,
3058 	    "!%s: read mac addr: trying .conf: syntax err %s",
3059 	    dp->name, valstr);
3060 err:
3061 	ddi_prop_free(valstr);
3062 
3063 	return (B_FALSE);
3064 }
3065 
3066 
3067 /* ============================================================== */
3068 /*
3069  * internal start/stop interface
3070  */
3071 /* ============================================================== */
3072 static int
gem_mac_set_rx_filter(struct gem_dev * dp)3073 gem_mac_set_rx_filter(struct gem_dev *dp)
3074 {
3075 	return ((*dp->gc.gc_set_rx_filter)(dp));
3076 }
3077 
3078 /*
3079  * gem_mac_init: cold start
3080  */
3081 static int
gem_mac_init(struct gem_dev * dp)3082 gem_mac_init(struct gem_dev *dp)
3083 {
3084 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3085 
3086 	if (dp->mac_suspended) {
3087 		return (GEM_FAILURE);
3088 	}
3089 
3090 	dp->mac_active = B_FALSE;
3091 
3092 	gem_init_rx_ring(dp);
3093 	gem_init_tx_ring(dp);
3094 
3095 	/* reset transmitter state */
3096 	dp->tx_blocked = (clock_t)0;
3097 	dp->tx_busy = 0;
3098 	dp->tx_reclaim_busy = 0;
3099 	dp->tx_max_packets = dp->gc.gc_tx_buf_limit;
3100 
3101 	if ((*dp->gc.gc_init_chip)(dp) != GEM_SUCCESS) {
3102 		return (GEM_FAILURE);
3103 	}
3104 
3105 	gem_prepare_rx_buf(dp);
3106 
3107 	return (GEM_SUCCESS);
3108 }
3109 /*
3110  * gem_mac_start: warm start
3111  */
3112 static int
gem_mac_start(struct gem_dev * dp)3113 gem_mac_start(struct gem_dev *dp)
3114 {
3115 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3116 
3117 	ASSERT(mutex_owned(&dp->intrlock));
3118 	ASSERT(dp->nic_state == NIC_STATE_ONLINE);
3119 	ASSERT(dp->mii_state ==  MII_STATE_LINKUP);
3120 
3121 	/* enable tx and rx */
3122 	mutex_enter(&dp->xmitlock);
3123 	if (dp->mac_suspended) {
3124 		mutex_exit(&dp->xmitlock);
3125 		return (GEM_FAILURE);
3126 	}
3127 	dp->mac_active = B_TRUE;
3128 	mutex_exit(&dp->xmitlock);
3129 
3130 	/* setup rx buffers */
3131 	(*dp->gc.gc_rx_start)(dp,
3132 	    SLOT(dp->rx_active_head, dp->gc.gc_rx_ring_size),
3133 	    dp->rx_active_tail - dp->rx_active_head);
3134 
3135 	if ((*dp->gc.gc_start_chip)(dp) != GEM_SUCCESS) {
3136 		cmn_err(CE_WARN, "%s: %s: start_chip: failed",
3137 		    dp->name, __func__);
3138 		return (GEM_FAILURE);
3139 	}
3140 
3141 	mutex_enter(&dp->xmitlock);
3142 
3143 	/* load untranmitted packets to the nic */
3144 	ASSERT(dp->tx_softq_tail - dp->tx_softq_head >= 0);
3145 	if (dp->tx_softq_tail - dp->tx_softq_head > 0) {
3146 		gem_tx_load_descs_oo(dp,
3147 		    dp->tx_softq_head, dp->tx_softq_tail,
3148 		    GEM_TXFLAG_HEAD);
3149 		/* issue preloaded tx buffers */
3150 		gem_tx_start_unit(dp);
3151 	}
3152 
3153 	mutex_exit(&dp->xmitlock);
3154 
3155 	return (GEM_SUCCESS);
3156 }
3157 
3158 static int
gem_mac_stop(struct gem_dev * dp,uint_t flags)3159 gem_mac_stop(struct gem_dev *dp, uint_t flags)
3160 {
3161 	int		i;
3162 	int		wait_time; /* in uS */
3163 #ifdef GEM_DEBUG_LEVEL
3164 	clock_t		now;
3165 #endif
3166 	int		ret = GEM_SUCCESS;
3167 
3168 	DPRINTF(1, (CE_CONT, "!%s: %s: called, rx_buf_free:%d",
3169 	    dp->name, __func__, dp->rx_buf_freecnt));
3170 
3171 	ASSERT(mutex_owned(&dp->intrlock));
3172 	ASSERT(!mutex_owned(&dp->xmitlock));
3173 
3174 	/*
3175 	 * Block transmits
3176 	 */
3177 	mutex_enter(&dp->xmitlock);
3178 	if (dp->mac_suspended) {
3179 		mutex_exit(&dp->xmitlock);
3180 		return (GEM_SUCCESS);
3181 	}
3182 	dp->mac_active = B_FALSE;
3183 
3184 	while (dp->tx_busy > 0) {
3185 		cv_wait(&dp->tx_drain_cv, &dp->xmitlock);
3186 	}
3187 	mutex_exit(&dp->xmitlock);
3188 
3189 	if ((flags & GEM_RESTART_NOWAIT) == 0) {
3190 		/*
3191 		 * Wait for all tx buffers sent.
3192 		 */
3193 		wait_time =
3194 		    2 * (8 * MAXPKTBUF(dp) / gem_speed_value[dp->speed]) *
3195 		    (dp->tx_active_tail - dp->tx_active_head);
3196 
3197 		DPRINTF(0, (CE_CONT, "%s: %s: max drain time: %d uS",
3198 		    dp->name, __func__, wait_time));
3199 		i = 0;
3200 #ifdef GEM_DEBUG_LEVEL
3201 		now = ddi_get_lbolt();
3202 #endif
3203 		while (dp->tx_active_tail != dp->tx_active_head) {
3204 			if (i > wait_time) {
3205 				/* timeout */
3206 				cmn_err(CE_NOTE, "%s: %s timeout: tx drain",
3207 				    dp->name, __func__);
3208 				break;
3209 			}
3210 			(void) gem_reclaim_txbuf(dp);
3211 			drv_usecwait(100);
3212 			i += 100;
3213 		}
3214 		DPRINTF(0, (CE_NOTE,
3215 		    "!%s: %s: the nic have drained in %d uS, real %d mS",
3216 		    dp->name, __func__, i,
3217 		    10*((int)(ddi_get_lbolt() - now))));
3218 	}
3219 
3220 	/*
3221 	 * Now we can stop the nic safely.
3222 	 */
3223 	if ((*dp->gc.gc_stop_chip)(dp) != GEM_SUCCESS) {
3224 		cmn_err(CE_NOTE, "%s: %s: resetting the chip to stop it",
3225 		    dp->name, __func__);
3226 		if ((*dp->gc.gc_reset_chip)(dp) != GEM_SUCCESS) {
3227 			cmn_err(CE_WARN, "%s: %s: failed to reset chip",
3228 			    dp->name, __func__);
3229 		}
3230 	}
3231 
3232 	/*
3233 	 * Clear all rx buffers
3234 	 */
3235 	if (flags & GEM_RESTART_KEEP_BUF) {
3236 		(void) gem_receive(dp);
3237 	}
3238 	gem_clean_rx_buf(dp);
3239 
3240 	/*
3241 	 * Update final statistics
3242 	 */
3243 	(*dp->gc.gc_get_stats)(dp);
3244 
3245 	/*
3246 	 * Clear all pended tx packets
3247 	 */
3248 	ASSERT(dp->tx_active_tail == dp->tx_softq_head);
3249 	ASSERT(dp->tx_softq_tail == dp->tx_free_head);
3250 	if (flags & GEM_RESTART_KEEP_BUF) {
3251 		/* restore active tx buffers */
3252 		dp->tx_active_tail = dp->tx_active_head;
3253 		dp->tx_softq_head  = dp->tx_active_head;
3254 	} else {
3255 		gem_clean_tx_buf(dp);
3256 	}
3257 
3258 	return (ret);
3259 }
3260 
3261 static int
gem_add_multicast(struct gem_dev * dp,const uint8_t * ep)3262 gem_add_multicast(struct gem_dev *dp, const uint8_t *ep)
3263 {
3264 	int		cnt;
3265 	int		err;
3266 
3267 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3268 
3269 	mutex_enter(&dp->intrlock);
3270 	if (dp->mac_suspended) {
3271 		mutex_exit(&dp->intrlock);
3272 		return (GEM_FAILURE);
3273 	}
3274 
3275 	if (dp->mc_count_req++ < GEM_MAXMC) {
3276 		/* append the new address at the end of the mclist */
3277 		cnt = dp->mc_count;
3278 		bcopy(ep, dp->mc_list[cnt].addr.ether_addr_octet,
3279 		    ETHERADDRL);
3280 		if (dp->gc.gc_multicast_hash) {
3281 			dp->mc_list[cnt].hash =
3282 			    (*dp->gc.gc_multicast_hash)(dp, (uint8_t *)ep);
3283 		}
3284 		dp->mc_count = cnt + 1;
3285 	}
3286 
3287 	if (dp->mc_count_req != dp->mc_count) {
3288 		/* multicast address list overflow */
3289 		dp->rxmode |= RXMODE_MULTI_OVF;
3290 	} else {
3291 		dp->rxmode &= ~RXMODE_MULTI_OVF;
3292 	}
3293 
3294 	/* tell new multicast list to the hardware */
3295 	err = gem_mac_set_rx_filter(dp);
3296 
3297 	mutex_exit(&dp->intrlock);
3298 
3299 	return (err);
3300 }
3301 
3302 static int
gem_remove_multicast(struct gem_dev * dp,const uint8_t * ep)3303 gem_remove_multicast(struct gem_dev *dp, const uint8_t *ep)
3304 {
3305 	size_t		len;
3306 	int		i;
3307 	int		cnt;
3308 	int		err;
3309 
3310 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3311 
3312 	mutex_enter(&dp->intrlock);
3313 	if (dp->mac_suspended) {
3314 		mutex_exit(&dp->intrlock);
3315 		return (GEM_FAILURE);
3316 	}
3317 
3318 	dp->mc_count_req--;
3319 	cnt = dp->mc_count;
3320 	for (i = 0; i < cnt; i++) {
3321 		if (bcmp(ep, &dp->mc_list[i].addr, ETHERADDRL)) {
3322 			continue;
3323 		}
3324 		/* shrink the mclist by copying forward */
3325 		len = (cnt - (i + 1)) * sizeof (*dp->mc_list);
3326 		if (len > 0) {
3327 			bcopy(&dp->mc_list[i+1], &dp->mc_list[i], len);
3328 		}
3329 		dp->mc_count--;
3330 		break;
3331 	}
3332 
3333 	if (dp->mc_count_req != dp->mc_count) {
3334 		/* multicast address list overflow */
3335 		dp->rxmode |= RXMODE_MULTI_OVF;
3336 	} else {
3337 		dp->rxmode &= ~RXMODE_MULTI_OVF;
3338 	}
3339 	/* In gem v2, don't hold xmitlock on calling set_rx_filter */
3340 	err = gem_mac_set_rx_filter(dp);
3341 
3342 	mutex_exit(&dp->intrlock);
3343 
3344 	return (err);
3345 }
3346 
3347 /* ============================================================== */
3348 /*
3349  * ND interface
3350  */
3351 /* ============================================================== */
3352 enum {
3353 	PARAM_AUTONEG_CAP,
3354 	PARAM_PAUSE_CAP,
3355 	PARAM_ASYM_PAUSE_CAP,
3356 	PARAM_1000FDX_CAP,
3357 	PARAM_1000HDX_CAP,
3358 	PARAM_100T4_CAP,
3359 	PARAM_100FDX_CAP,
3360 	PARAM_100HDX_CAP,
3361 	PARAM_10FDX_CAP,
3362 	PARAM_10HDX_CAP,
3363 
3364 	PARAM_ADV_AUTONEG_CAP,
3365 	PARAM_ADV_PAUSE_CAP,
3366 	PARAM_ADV_ASYM_PAUSE_CAP,
3367 	PARAM_ADV_1000FDX_CAP,
3368 	PARAM_ADV_1000HDX_CAP,
3369 	PARAM_ADV_100T4_CAP,
3370 	PARAM_ADV_100FDX_CAP,
3371 	PARAM_ADV_100HDX_CAP,
3372 	PARAM_ADV_10FDX_CAP,
3373 	PARAM_ADV_10HDX_CAP,
3374 
3375 	PARAM_LP_AUTONEG_CAP,
3376 	PARAM_LP_PAUSE_CAP,
3377 	PARAM_LP_ASYM_PAUSE_CAP,
3378 	PARAM_LP_1000FDX_CAP,
3379 	PARAM_LP_1000HDX_CAP,
3380 	PARAM_LP_100T4_CAP,
3381 	PARAM_LP_100FDX_CAP,
3382 	PARAM_LP_100HDX_CAP,
3383 	PARAM_LP_10FDX_CAP,
3384 	PARAM_LP_10HDX_CAP,
3385 
3386 	PARAM_LINK_STATUS,
3387 	PARAM_LINK_SPEED,
3388 	PARAM_LINK_DUPLEX,
3389 
3390 	PARAM_LINK_AUTONEG,
3391 	PARAM_LINK_RX_PAUSE,
3392 	PARAM_LINK_TX_PAUSE,
3393 
3394 	PARAM_LOOP_MODE,
3395 	PARAM_MSI_CNT,
3396 
3397 #ifdef DEBUG_RESUME
3398 	PARAM_RESUME_TEST,
3399 #endif
3400 	PARAM_COUNT
3401 };
3402 
3403 enum ioc_reply {
3404 	IOC_INVAL = -1,				/* bad, NAK with EINVAL	*/
3405 	IOC_DONE,				/* OK, reply sent	*/
3406 	IOC_ACK,				/* OK, just send ACK	*/
3407 	IOC_REPLY,				/* OK, just send reply	*/
3408 	IOC_RESTART_ACK,			/* OK, restart & ACK	*/
3409 	IOC_RESTART_REPLY			/* OK, restart & reply	*/
3410 };
3411 
3412 struct gem_nd_arg {
3413 	struct gem_dev	*dp;
3414 	int		item;
3415 };
3416 
3417 static int
gem_param_get(queue_t * q,mblk_t * mp,caddr_t arg,cred_t * credp)3418 gem_param_get(queue_t *q, mblk_t *mp, caddr_t arg, cred_t *credp)
3419 {
3420 	struct gem_dev	*dp = ((struct gem_nd_arg *)(void *)arg)->dp;
3421 	int		item = ((struct gem_nd_arg *)(void *)arg)->item;
3422 	long		val;
3423 
3424 	DPRINTF(0, (CE_CONT, "!%s: %s: called, item:%d",
3425 	    dp->name, __func__, item));
3426 
3427 	switch (item) {
3428 	case PARAM_AUTONEG_CAP:
3429 		val = BOOLEAN(dp->mii_status & MII_STATUS_CANAUTONEG);
3430 		DPRINTF(0, (CE_CONT, "autoneg_cap:%d", val));
3431 		break;
3432 
3433 	case PARAM_PAUSE_CAP:
3434 		val = BOOLEAN(dp->gc.gc_flow_control & 1);
3435 		break;
3436 
3437 	case PARAM_ASYM_PAUSE_CAP:
3438 		val = BOOLEAN(dp->gc.gc_flow_control & 2);
3439 		break;
3440 
3441 	case PARAM_1000FDX_CAP:
3442 		val = (dp->mii_xstatus & MII_XSTATUS_1000BASET_FD) ||
3443 		    (dp->mii_xstatus & MII_XSTATUS_1000BASEX_FD);
3444 		break;
3445 
3446 	case PARAM_1000HDX_CAP:
3447 		val = (dp->mii_xstatus & MII_XSTATUS_1000BASET) ||
3448 		    (dp->mii_xstatus & MII_XSTATUS_1000BASEX);
3449 		break;
3450 
3451 	case PARAM_100T4_CAP:
3452 		val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASE_T4);
3453 		break;
3454 
3455 	case PARAM_100FDX_CAP:
3456 		val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX_FD);
3457 		break;
3458 
3459 	case PARAM_100HDX_CAP:
3460 		val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX);
3461 		break;
3462 
3463 	case PARAM_10FDX_CAP:
3464 		val = BOOLEAN(dp->mii_status & MII_STATUS_10_FD);
3465 		break;
3466 
3467 	case PARAM_10HDX_CAP:
3468 		val = BOOLEAN(dp->mii_status & MII_STATUS_10);
3469 		break;
3470 
3471 	case PARAM_ADV_AUTONEG_CAP:
3472 		val = dp->anadv_autoneg;
3473 		break;
3474 
3475 	case PARAM_ADV_PAUSE_CAP:
3476 		val = BOOLEAN(dp->anadv_flow_control & 1);
3477 		break;
3478 
3479 	case PARAM_ADV_ASYM_PAUSE_CAP:
3480 		val = BOOLEAN(dp->anadv_flow_control & 2);
3481 		break;
3482 
3483 	case PARAM_ADV_1000FDX_CAP:
3484 		val = dp->anadv_1000fdx;
3485 		break;
3486 
3487 	case PARAM_ADV_1000HDX_CAP:
3488 		val = dp->anadv_1000hdx;
3489 		break;
3490 
3491 	case PARAM_ADV_100T4_CAP:
3492 		val = dp->anadv_100t4;
3493 		break;
3494 
3495 	case PARAM_ADV_100FDX_CAP:
3496 		val = dp->anadv_100fdx;
3497 		break;
3498 
3499 	case PARAM_ADV_100HDX_CAP:
3500 		val = dp->anadv_100hdx;
3501 		break;
3502 
3503 	case PARAM_ADV_10FDX_CAP:
3504 		val = dp->anadv_10fdx;
3505 		break;
3506 
3507 	case PARAM_ADV_10HDX_CAP:
3508 		val = dp->anadv_10hdx;
3509 		break;
3510 
3511 	case PARAM_LP_AUTONEG_CAP:
3512 		val = BOOLEAN(dp->mii_exp & MII_AN_EXP_LPCANAN);
3513 		break;
3514 
3515 	case PARAM_LP_PAUSE_CAP:
3516 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_PAUSE);
3517 		break;
3518 
3519 	case PARAM_LP_ASYM_PAUSE_CAP:
3520 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_ASMPAUSE);
3521 		break;
3522 
3523 	case PARAM_LP_1000FDX_CAP:
3524 		val = BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_FULL);
3525 		break;
3526 
3527 	case PARAM_LP_1000HDX_CAP:
3528 		val = BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_HALF);
3529 		break;
3530 
3531 	case PARAM_LP_100T4_CAP:
3532 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_T4);
3533 		break;
3534 
3535 	case PARAM_LP_100FDX_CAP:
3536 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX_FD);
3537 		break;
3538 
3539 	case PARAM_LP_100HDX_CAP:
3540 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX);
3541 		break;
3542 
3543 	case PARAM_LP_10FDX_CAP:
3544 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T_FD);
3545 		break;
3546 
3547 	case PARAM_LP_10HDX_CAP:
3548 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T);
3549 		break;
3550 
3551 	case PARAM_LINK_STATUS:
3552 		val = (dp->mii_state == MII_STATE_LINKUP);
3553 		break;
3554 
3555 	case PARAM_LINK_SPEED:
3556 		val = gem_speed_value[dp->speed];
3557 		break;
3558 
3559 	case PARAM_LINK_DUPLEX:
3560 		val = 0;
3561 		if (dp->mii_state == MII_STATE_LINKUP) {
3562 			val = dp->full_duplex ? 2 : 1;
3563 		}
3564 		break;
3565 
3566 	case PARAM_LINK_AUTONEG:
3567 		val = BOOLEAN(dp->mii_exp & MII_AN_EXP_LPCANAN);
3568 		break;
3569 
3570 	case PARAM_LINK_RX_PAUSE:
3571 		val = (dp->flow_control == FLOW_CONTROL_SYMMETRIC) ||
3572 		    (dp->flow_control == FLOW_CONTROL_RX_PAUSE);
3573 		break;
3574 
3575 	case PARAM_LINK_TX_PAUSE:
3576 		val = (dp->flow_control == FLOW_CONTROL_SYMMETRIC) ||
3577 		    (dp->flow_control == FLOW_CONTROL_TX_PAUSE);
3578 		break;
3579 
3580 #ifdef DEBUG_RESUME
3581 	case PARAM_RESUME_TEST:
3582 		val = 0;
3583 		break;
3584 #endif
3585 	default:
3586 		cmn_err(CE_WARN, "%s: unimplemented ndd control (%d)",
3587 		    dp->name, item);
3588 		break;
3589 	}
3590 
3591 	(void) mi_mpprintf(mp, "%ld", val);
3592 
3593 	return (0);
3594 }
3595 
3596 static int
gem_param_set(queue_t * q,mblk_t * mp,char * value,caddr_t arg,cred_t * credp)3597 gem_param_set(queue_t *q, mblk_t *mp, char *value, caddr_t arg, cred_t *credp)
3598 {
3599 	struct gem_dev	*dp = ((struct gem_nd_arg *)(void *)arg)->dp;
3600 	int		item = ((struct gem_nd_arg *)(void *)arg)->item;
3601 	long		val;
3602 	char		*end;
3603 
3604 	DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3605 	if (ddi_strtol(value, &end, 10, &val)) {
3606 		return (EINVAL);
3607 	}
3608 	if (end == value) {
3609 		return (EINVAL);
3610 	}
3611 
3612 	switch (item) {
3613 	case PARAM_ADV_AUTONEG_CAP:
3614 		if (val != 0 && val != 1) {
3615 			goto err;
3616 		}
3617 		if (val && (dp->mii_status & MII_STATUS_CANAUTONEG) == 0) {
3618 			goto err;
3619 		}
3620 		dp->anadv_autoneg = (int)val;
3621 		break;
3622 
3623 	case PARAM_ADV_PAUSE_CAP:
3624 		if (val != 0 && val != 1) {
3625 			goto err;
3626 		}
3627 		if (val) {
3628 			dp->anadv_flow_control |= 1;
3629 		} else {
3630 			dp->anadv_flow_control &= ~1;
3631 		}
3632 		break;
3633 
3634 	case PARAM_ADV_ASYM_PAUSE_CAP:
3635 		if (val != 0 && val != 1) {
3636 			goto err;
3637 		}
3638 		if (val) {
3639 			dp->anadv_flow_control |= 2;
3640 		} else {
3641 			dp->anadv_flow_control &= ~2;
3642 		}
3643 		break;
3644 
3645 	case PARAM_ADV_1000FDX_CAP:
3646 		if (val != 0 && val != 1) {
3647 			goto err;
3648 		}
3649 		if (val && (dp->mii_xstatus &
3650 		    (MII_XSTATUS_1000BASET_FD |
3651 		    MII_XSTATUS_1000BASEX_FD)) == 0) {
3652 			goto err;
3653 		}
3654 		dp->anadv_1000fdx = (int)val;
3655 		break;
3656 
3657 	case PARAM_ADV_1000HDX_CAP:
3658 		if (val != 0 && val != 1) {
3659 			goto err;
3660 		}
3661 		if (val && (dp->mii_xstatus &
3662 		    (MII_XSTATUS_1000BASET | MII_XSTATUS_1000BASEX)) == 0) {
3663 			goto err;
3664 		}
3665 		dp->anadv_1000hdx = (int)val;
3666 		break;
3667 
3668 	case PARAM_ADV_100T4_CAP:
3669 		if (val != 0 && val != 1) {
3670 			goto err;
3671 		}
3672 		if (val && (dp->mii_status & MII_STATUS_100_BASE_T4) == 0) {
3673 			goto err;
3674 		}
3675 		dp->anadv_100t4 = (int)val;
3676 		break;
3677 
3678 	case PARAM_ADV_100FDX_CAP:
3679 		if (val != 0 && val != 1) {
3680 			goto err;
3681 		}
3682 		if (val && (dp->mii_status & MII_STATUS_100_BASEX_FD) == 0) {
3683 			goto err;
3684 		}
3685 		dp->anadv_100fdx = (int)val;
3686 		break;
3687 
3688 	case PARAM_ADV_100HDX_CAP:
3689 		if (val != 0 && val != 1) {
3690 			goto err;
3691 		}
3692 		if (val && (dp->mii_status & MII_STATUS_100_BASEX) == 0) {
3693 			goto err;
3694 		}
3695 		dp->anadv_100hdx = (int)val;
3696 		break;
3697 
3698 	case PARAM_ADV_10FDX_CAP:
3699 		if (val != 0 && val != 1) {
3700 			goto err;
3701 		}
3702 		if (val && (dp->mii_status & MII_STATUS_10_FD) == 0) {
3703 			goto err;
3704 		}
3705 		dp->anadv_10fdx = (int)val;
3706 		break;
3707 
3708 	case PARAM_ADV_10HDX_CAP:
3709 		if (val != 0 && val != 1) {
3710 			goto err;
3711 		}
3712 		if (val && (dp->mii_status & MII_STATUS_10) == 0) {
3713 			goto err;
3714 		}
3715 		dp->anadv_10hdx = (int)val;
3716 		break;
3717 	}
3718 
3719 	/* sync with PHY */
3720 	gem_choose_forcedmode(dp);
3721 
3722 	dp->mii_state = MII_STATE_UNKNOWN;
3723 	if (dp->gc.gc_mii_hw_link_detection && dp->link_watcher_id == 0) {
3724 		/* XXX - Can we ignore the return code ? */
3725 		(void) gem_mii_link_check(dp);
3726 	}
3727 
3728 	return (0);
3729 err:
3730 	return (EINVAL);
3731 }
3732 
3733 static void
gem_nd_load(struct gem_dev * dp,char * name,ndgetf_t gf,ndsetf_t sf,int item)3734 gem_nd_load(struct gem_dev *dp, char *name, ndgetf_t gf, ndsetf_t sf, int item)
3735 {
3736 	struct gem_nd_arg	*arg;
3737 
3738 	ASSERT(item >= 0);
3739 	ASSERT(item < PARAM_COUNT);
3740 
3741 	arg = &((struct gem_nd_arg *)(void *)dp->nd_arg_p)[item];
3742 	arg->dp = dp;
3743 	arg->item = item;
3744 
3745 	DPRINTF(2, (CE_CONT, "!%s: %s: name:%s, item:%d",
3746 	    dp->name, __func__, name, item));
3747 	(void) nd_load(&dp->nd_data_p, name, gf, sf, (caddr_t)arg);
3748 }
3749 
3750 static void
gem_nd_setup(struct gem_dev * dp)3751 gem_nd_setup(struct gem_dev *dp)
3752 {
3753 	DPRINTF(0, (CE_CONT, "!%s: %s: called, mii_status:0x%b",
3754 	    dp->name, __func__, dp->mii_status, MII_STATUS_BITS));
3755 
3756 	ASSERT(dp->nd_arg_p == NULL);
3757 
3758 	dp->nd_arg_p =
3759 	    kmem_zalloc(sizeof (struct gem_nd_arg) * PARAM_COUNT, KM_SLEEP);
3760 
3761 #define	SETFUNC(x)	((x) ? gem_param_set : NULL)
3762 
3763 	gem_nd_load(dp, "autoneg_cap",
3764 	    gem_param_get, NULL, PARAM_AUTONEG_CAP);
3765 	gem_nd_load(dp, "pause_cap",
3766 	    gem_param_get, NULL, PARAM_PAUSE_CAP);
3767 	gem_nd_load(dp, "asym_pause_cap",
3768 	    gem_param_get, NULL, PARAM_ASYM_PAUSE_CAP);
3769 	gem_nd_load(dp, "1000fdx_cap",
3770 	    gem_param_get, NULL, PARAM_1000FDX_CAP);
3771 	gem_nd_load(dp, "1000hdx_cap",
3772 	    gem_param_get, NULL, PARAM_1000HDX_CAP);
3773 	gem_nd_load(dp, "100T4_cap",
3774 	    gem_param_get, NULL, PARAM_100T4_CAP);
3775 	gem_nd_load(dp, "100fdx_cap",
3776 	    gem_param_get, NULL, PARAM_100FDX_CAP);
3777 	gem_nd_load(dp, "100hdx_cap",
3778 	    gem_param_get, NULL, PARAM_100HDX_CAP);
3779 	gem_nd_load(dp, "10fdx_cap",
3780 	    gem_param_get, NULL, PARAM_10FDX_CAP);
3781 	gem_nd_load(dp, "10hdx_cap",
3782 	    gem_param_get, NULL, PARAM_10HDX_CAP);
3783 
3784 	/* Our advertised capabilities */
3785 	gem_nd_load(dp, "adv_autoneg_cap", gem_param_get,
3786 	    SETFUNC(dp->mii_status & MII_STATUS_CANAUTONEG),
3787 	    PARAM_ADV_AUTONEG_CAP);
3788 	gem_nd_load(dp, "adv_pause_cap", gem_param_get,
3789 	    SETFUNC(dp->gc.gc_flow_control & 1),
3790 	    PARAM_ADV_PAUSE_CAP);
3791 	gem_nd_load(dp, "adv_asym_pause_cap", gem_param_get,
3792 	    SETFUNC(dp->gc.gc_flow_control & 2),
3793 	    PARAM_ADV_ASYM_PAUSE_CAP);
3794 	gem_nd_load(dp, "adv_1000fdx_cap", gem_param_get,
3795 	    SETFUNC(dp->mii_xstatus &
3796 	    (MII_XSTATUS_1000BASEX_FD | MII_XSTATUS_1000BASET_FD)),
3797 	    PARAM_ADV_1000FDX_CAP);
3798 	gem_nd_load(dp, "adv_1000hdx_cap", gem_param_get,
3799 	    SETFUNC(dp->mii_xstatus &
3800 	    (MII_XSTATUS_1000BASEX | MII_XSTATUS_1000BASET)),
3801 	    PARAM_ADV_1000HDX_CAP);
3802 	gem_nd_load(dp, "adv_100T4_cap", gem_param_get,
3803 	    SETFUNC((dp->mii_status & MII_STATUS_100_BASE_T4) &&
3804 	    !dp->mii_advert_ro),
3805 	    PARAM_ADV_100T4_CAP);
3806 	gem_nd_load(dp, "adv_100fdx_cap", gem_param_get,
3807 	    SETFUNC((dp->mii_status & MII_STATUS_100_BASEX_FD) &&
3808 	    !dp->mii_advert_ro),
3809 	    PARAM_ADV_100FDX_CAP);
3810 	gem_nd_load(dp, "adv_100hdx_cap", gem_param_get,
3811 	    SETFUNC((dp->mii_status & MII_STATUS_100_BASEX) &&
3812 	    !dp->mii_advert_ro),
3813 	    PARAM_ADV_100HDX_CAP);
3814 	gem_nd_load(dp, "adv_10fdx_cap", gem_param_get,
3815 	    SETFUNC((dp->mii_status & MII_STATUS_10_FD) &&
3816 	    !dp->mii_advert_ro),
3817 	    PARAM_ADV_10FDX_CAP);
3818 	gem_nd_load(dp, "adv_10hdx_cap", gem_param_get,
3819 	    SETFUNC((dp->mii_status & MII_STATUS_10) &&
3820 	    !dp->mii_advert_ro),
3821 	    PARAM_ADV_10HDX_CAP);
3822 
3823 	/* Partner's advertised capabilities */
3824 	gem_nd_load(dp, "lp_autoneg_cap",
3825 	    gem_param_get, NULL, PARAM_LP_AUTONEG_CAP);
3826 	gem_nd_load(dp, "lp_pause_cap",
3827 	    gem_param_get, NULL, PARAM_LP_PAUSE_CAP);
3828 	gem_nd_load(dp, "lp_asym_pause_cap",
3829 	    gem_param_get, NULL, PARAM_LP_ASYM_PAUSE_CAP);
3830 	gem_nd_load(dp, "lp_1000fdx_cap",
3831 	    gem_param_get, NULL, PARAM_LP_1000FDX_CAP);
3832 	gem_nd_load(dp, "lp_1000hdx_cap",
3833 	    gem_param_get, NULL, PARAM_LP_1000HDX_CAP);
3834 	gem_nd_load(dp, "lp_100T4_cap",
3835 	    gem_param_get, NULL, PARAM_LP_100T4_CAP);
3836 	gem_nd_load(dp, "lp_100fdx_cap",
3837 	    gem_param_get, NULL, PARAM_LP_100FDX_CAP);
3838 	gem_nd_load(dp, "lp_100hdx_cap",
3839 	    gem_param_get, NULL, PARAM_LP_100HDX_CAP);
3840 	gem_nd_load(dp, "lp_10fdx_cap",
3841 	    gem_param_get, NULL, PARAM_LP_10FDX_CAP);
3842 	gem_nd_load(dp, "lp_10hdx_cap",
3843 	    gem_param_get, NULL, PARAM_LP_10HDX_CAP);
3844 
3845 	/* Current operating modes */
3846 	gem_nd_load(dp, "link_status",
3847 	    gem_param_get, NULL, PARAM_LINK_STATUS);
3848 	gem_nd_load(dp, "link_speed",
3849 	    gem_param_get, NULL, PARAM_LINK_SPEED);
3850 	gem_nd_load(dp, "link_duplex",
3851 	    gem_param_get, NULL, PARAM_LINK_DUPLEX);
3852 	gem_nd_load(dp, "link_autoneg",
3853 	    gem_param_get, NULL, PARAM_LINK_AUTONEG);
3854 	gem_nd_load(dp, "link_rx_pause",
3855 	    gem_param_get, NULL, PARAM_LINK_RX_PAUSE);
3856 	gem_nd_load(dp, "link_tx_pause",
3857 	    gem_param_get, NULL, PARAM_LINK_TX_PAUSE);
3858 #ifdef DEBUG_RESUME
3859 	gem_nd_load(dp, "resume_test",
3860 	    gem_param_get, NULL, PARAM_RESUME_TEST);
3861 #endif
3862 #undef	SETFUNC
3863 }
3864 
3865 static
3866 enum ioc_reply
gem_nd_ioctl(struct gem_dev * dp,queue_t * wq,mblk_t * mp,struct iocblk * iocp)3867 gem_nd_ioctl(struct gem_dev *dp, queue_t *wq, mblk_t *mp, struct iocblk *iocp)
3868 {
3869 	boolean_t	ok;
3870 
3871 	ASSERT(mutex_owned(&dp->intrlock));
3872 
3873 	DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3874 
3875 	switch (iocp->ioc_cmd) {
3876 	case ND_GET:
3877 		ok = nd_getset(wq, dp->nd_data_p, mp);
3878 		DPRINTF(0, (CE_CONT,
3879 		    "%s: get %s", dp->name, ok ? "OK" : "FAIL"));
3880 		return (ok ? IOC_REPLY : IOC_INVAL);
3881 
3882 	case ND_SET:
3883 		ok = nd_getset(wq, dp->nd_data_p, mp);
3884 
3885 		DPRINTF(0, (CE_CONT, "%s: set %s err %d",
3886 		    dp->name, ok ? "OK" : "FAIL", iocp->ioc_error));
3887 
3888 		if (!ok) {
3889 			return (IOC_INVAL);
3890 		}
3891 
3892 		if (iocp->ioc_error) {
3893 			return (IOC_REPLY);
3894 		}
3895 
3896 		return (IOC_RESTART_REPLY);
3897 	}
3898 
3899 	cmn_err(CE_WARN, "%s: invalid cmd 0x%x", dp->name, iocp->ioc_cmd);
3900 
3901 	return (IOC_INVAL);
3902 }
3903 
3904 static void
gem_nd_cleanup(struct gem_dev * dp)3905 gem_nd_cleanup(struct gem_dev *dp)
3906 {
3907 	ASSERT(dp->nd_data_p != NULL);
3908 	ASSERT(dp->nd_arg_p != NULL);
3909 
3910 	nd_free(&dp->nd_data_p);
3911 
3912 	kmem_free(dp->nd_arg_p, sizeof (struct gem_nd_arg) * PARAM_COUNT);
3913 	dp->nd_arg_p = NULL;
3914 }
3915 
3916 static void
gem_mac_ioctl(struct gem_dev * dp,queue_t * wq,mblk_t * mp)3917 gem_mac_ioctl(struct gem_dev *dp, queue_t *wq, mblk_t *mp)
3918 {
3919 	struct iocblk	*iocp;
3920 	enum ioc_reply	status;
3921 	int		cmd;
3922 
3923 	DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3924 
3925 	/*
3926 	 * Validate the command before bothering with the mutex ...
3927 	 */
3928 	iocp = (void *)mp->b_rptr;
3929 	iocp->ioc_error = 0;
3930 	cmd = iocp->ioc_cmd;
3931 
3932 	DPRINTF(0, (CE_CONT, "%s: %s cmd:0x%x", dp->name, __func__, cmd));
3933 
3934 	mutex_enter(&dp->intrlock);
3935 	mutex_enter(&dp->xmitlock);
3936 
3937 	switch (cmd) {
3938 	default:
3939 		_NOTE(NOTREACHED)
3940 		status = IOC_INVAL;
3941 		break;
3942 
3943 	case ND_GET:
3944 	case ND_SET:
3945 		status = gem_nd_ioctl(dp, wq, mp, iocp);
3946 		break;
3947 	}
3948 
3949 	mutex_exit(&dp->xmitlock);
3950 	mutex_exit(&dp->intrlock);
3951 
3952 #ifdef DEBUG_RESUME
3953 	if (cmd == ND_GET)  {
3954 		gem_suspend(dp->dip);
3955 		gem_resume(dp->dip);
3956 	}
3957 #endif
3958 	/*
3959 	 * Finally, decide how to reply
3960 	 */
3961 	switch (status) {
3962 	default:
3963 	case IOC_INVAL:
3964 		/*
3965 		 * Error, reply with a NAK and EINVAL or the specified error
3966 		 */
3967 		miocnak(wq, mp, 0, iocp->ioc_error == 0 ?
3968 		    EINVAL : iocp->ioc_error);
3969 		break;
3970 
3971 	case IOC_DONE:
3972 		/*
3973 		 * OK, reply already sent
3974 		 */
3975 		break;
3976 
3977 	case IOC_RESTART_ACK:
3978 	case IOC_ACK:
3979 		/*
3980 		 * OK, reply with an ACK
3981 		 */
3982 		miocack(wq, mp, 0, 0);
3983 		break;
3984 
3985 	case IOC_RESTART_REPLY:
3986 	case IOC_REPLY:
3987 		/*
3988 		 * OK, send prepared reply as ACK or NAK
3989 		 */
3990 		mp->b_datap->db_type =
3991 		    iocp->ioc_error == 0 ? M_IOCACK : M_IOCNAK;
3992 		qreply(wq, mp);
3993 		break;
3994 	}
3995 }
3996 
3997 #ifndef SYS_MAC_H
3998 #define	XCVR_UNDEFINED	0
3999 #define	XCVR_NONE	1
4000 #define	XCVR_10		2
4001 #define	XCVR_100T4	3
4002 #define	XCVR_100X	4
4003 #define	XCVR_100T2	5
4004 #define	XCVR_1000X	6
4005 #define	XCVR_1000T	7
4006 #endif
4007 static int
gem_mac_xcvr_inuse(struct gem_dev * dp)4008 gem_mac_xcvr_inuse(struct gem_dev *dp)
4009 {
4010 	int	val = XCVR_UNDEFINED;
4011 
4012 	if ((dp->mii_status & MII_STATUS_XSTATUS) == 0) {
4013 		if (dp->mii_status & MII_STATUS_100_BASE_T4) {
4014 			val = XCVR_100T4;
4015 		} else if (dp->mii_status &
4016 		    (MII_STATUS_100_BASEX_FD |
4017 		    MII_STATUS_100_BASEX)) {
4018 			val = XCVR_100X;
4019 		} else if (dp->mii_status &
4020 		    (MII_STATUS_100_BASE_T2_FD |
4021 		    MII_STATUS_100_BASE_T2)) {
4022 			val = XCVR_100T2;
4023 		} else if (dp->mii_status &
4024 		    (MII_STATUS_10_FD | MII_STATUS_10)) {
4025 			val = XCVR_10;
4026 		}
4027 	} else if (dp->mii_xstatus &
4028 	    (MII_XSTATUS_1000BASET_FD | MII_XSTATUS_1000BASET)) {
4029 		val = XCVR_1000T;
4030 	} else if (dp->mii_xstatus &
4031 	    (MII_XSTATUS_1000BASEX_FD | MII_XSTATUS_1000BASEX)) {
4032 		val = XCVR_1000X;
4033 	}
4034 
4035 	return (val);
4036 }
4037 
4038 /* ============================================================== */
4039 /*
4040  * GLDv3 interface
4041  */
4042 /* ============================================================== */
4043 static int		gem_m_getstat(void *, uint_t, uint64_t *);
4044 static int		gem_m_start(void *);
4045 static void		gem_m_stop(void *);
4046 static int		gem_m_setpromisc(void *, boolean_t);
4047 static int		gem_m_multicst(void *, boolean_t, const uint8_t *);
4048 static int		gem_m_unicst(void *, const uint8_t *);
4049 static mblk_t		*gem_m_tx(void *, mblk_t *);
4050 static void		gem_m_ioctl(void *, queue_t *, mblk_t *);
4051 static boolean_t	gem_m_getcapab(void *, mac_capab_t, void *);
4052 
4053 #define	GEM_M_CALLBACK_FLAGS	(MC_IOCTL | MC_GETCAPAB)
4054 
4055 static mac_callbacks_t gem_m_callbacks = {
4056 	GEM_M_CALLBACK_FLAGS,
4057 	gem_m_getstat,
4058 	gem_m_start,
4059 	gem_m_stop,
4060 	gem_m_setpromisc,
4061 	gem_m_multicst,
4062 	gem_m_unicst,
4063 	gem_m_tx,
4064 	NULL,
4065 	gem_m_ioctl,
4066 	gem_m_getcapab,
4067 };
4068 
4069 static int
gem_m_start(void * arg)4070 gem_m_start(void *arg)
4071 {
4072 	int		err = 0;
4073 	struct gem_dev *dp = arg;
4074 
4075 	DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4076 
4077 	mutex_enter(&dp->intrlock);
4078 	if (dp->mac_suspended) {
4079 		err = EIO;
4080 		goto x;
4081 	}
4082 	if (gem_mac_init(dp) != GEM_SUCCESS) {
4083 		err = EIO;
4084 		goto x;
4085 	}
4086 	dp->nic_state = NIC_STATE_INITIALIZED;
4087 
4088 	/* reset rx filter state */
4089 	dp->mc_count = 0;
4090 	dp->mc_count_req = 0;
4091 
4092 	/* setup media mode if the link have been up */
4093 	if (dp->mii_state == MII_STATE_LINKUP) {
4094 		(dp->gc.gc_set_media)(dp);
4095 	}
4096 
4097 	/* setup initial rx filter */
4098 	bcopy(dp->dev_addr.ether_addr_octet,
4099 	    dp->cur_addr.ether_addr_octet, ETHERADDRL);
4100 	dp->rxmode |= RXMODE_ENABLE;
4101 
4102 	if (gem_mac_set_rx_filter(dp) != GEM_SUCCESS) {
4103 		err = EIO;
4104 		goto x;
4105 	}
4106 
4107 	dp->nic_state = NIC_STATE_ONLINE;
4108 	if (dp->mii_state == MII_STATE_LINKUP) {
4109 		if (gem_mac_start(dp) != GEM_SUCCESS) {
4110 			err = EIO;
4111 			goto x;
4112 		}
4113 	}
4114 
4115 	dp->timeout_id = timeout((void (*)(void *))gem_tx_timeout,
4116 	    (void *)dp, dp->gc.gc_tx_timeout_interval);
4117 	mutex_exit(&dp->intrlock);
4118 
4119 	return (0);
4120 x:
4121 	dp->nic_state = NIC_STATE_STOPPED;
4122 	mutex_exit(&dp->intrlock);
4123 	return (err);
4124 }
4125 
4126 static void
gem_m_stop(void * arg)4127 gem_m_stop(void *arg)
4128 {
4129 	struct gem_dev	*dp = arg;
4130 
4131 	DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4132 
4133 	/* stop rx */
4134 	mutex_enter(&dp->intrlock);
4135 	if (dp->mac_suspended) {
4136 		mutex_exit(&dp->intrlock);
4137 		return;
4138 	}
4139 	dp->rxmode &= ~RXMODE_ENABLE;
4140 	(void) gem_mac_set_rx_filter(dp);
4141 	mutex_exit(&dp->intrlock);
4142 
4143 	/* stop tx timeout watcher */
4144 	if (dp->timeout_id) {
4145 		while (untimeout(dp->timeout_id) == -1)
4146 			;
4147 		dp->timeout_id = 0;
4148 	}
4149 
4150 	/* make the nic state inactive */
4151 	mutex_enter(&dp->intrlock);
4152 	if (dp->mac_suspended) {
4153 		mutex_exit(&dp->intrlock);
4154 		return;
4155 	}
4156 	dp->nic_state = NIC_STATE_STOPPED;
4157 
4158 	/* we need deassert mac_active due to block interrupt handler */
4159 	mutex_enter(&dp->xmitlock);
4160 	dp->mac_active = B_FALSE;
4161 	mutex_exit(&dp->xmitlock);
4162 
4163 	/* block interrupts */
4164 	while (dp->intr_busy) {
4165 		cv_wait(&dp->tx_drain_cv, &dp->intrlock);
4166 	}
4167 	(void) gem_mac_stop(dp, 0);
4168 	mutex_exit(&dp->intrlock);
4169 }
4170 
4171 static int
gem_m_multicst(void * arg,boolean_t add,const uint8_t * ep)4172 gem_m_multicst(void *arg, boolean_t add, const uint8_t *ep)
4173 {
4174 	int		err;
4175 	int		ret;
4176 	struct gem_dev	*dp = arg;
4177 
4178 	DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4179 
4180 	if (add) {
4181 		ret = gem_add_multicast(dp, ep);
4182 	} else {
4183 		ret = gem_remove_multicast(dp, ep);
4184 	}
4185 
4186 	err = 0;
4187 	if (ret != GEM_SUCCESS) {
4188 		err = EIO;
4189 	}
4190 
4191 	return (err);
4192 }
4193 
4194 static int
gem_m_setpromisc(void * arg,boolean_t on)4195 gem_m_setpromisc(void *arg, boolean_t on)
4196 {
4197 	int		err = 0;	/* no error */
4198 	struct gem_dev	*dp = arg;
4199 
4200 	DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4201 
4202 	mutex_enter(&dp->intrlock);
4203 	if (dp->mac_suspended) {
4204 		mutex_exit(&dp->intrlock);
4205 		return (EIO);
4206 	}
4207 	if (on) {
4208 		dp->rxmode |= RXMODE_PROMISC;
4209 	} else {
4210 		dp->rxmode &= ~RXMODE_PROMISC;
4211 	}
4212 
4213 	if (gem_mac_set_rx_filter(dp) != GEM_SUCCESS) {
4214 		err = EIO;
4215 	}
4216 	mutex_exit(&dp->intrlock);
4217 
4218 	return (err);
4219 }
4220 
4221 int
gem_m_getstat(void * arg,uint_t stat,uint64_t * valp)4222 gem_m_getstat(void *arg, uint_t stat, uint64_t *valp)
4223 {
4224 	struct gem_dev		*dp = arg;
4225 	struct gem_stats	*gstp = &dp->stats;
4226 	uint64_t		val = 0;
4227 
4228 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4229 
4230 	if (mutex_owned(&dp->intrlock)) {
4231 		if (dp->mac_suspended) {
4232 			return (EIO);
4233 		}
4234 	} else {
4235 		mutex_enter(&dp->intrlock);
4236 		if (dp->mac_suspended) {
4237 			mutex_exit(&dp->intrlock);
4238 			return (EIO);
4239 		}
4240 		mutex_exit(&dp->intrlock);
4241 	}
4242 
4243 	if ((*dp->gc.gc_get_stats)(dp) != GEM_SUCCESS) {
4244 		return (EIO);
4245 	}
4246 
4247 	switch (stat) {
4248 	case MAC_STAT_IFSPEED:
4249 		val = gem_speed_value[dp->speed] *1000000ull;
4250 		break;
4251 
4252 	case MAC_STAT_MULTIRCV:
4253 		val = gstp->rmcast;
4254 		break;
4255 
4256 	case MAC_STAT_BRDCSTRCV:
4257 		val = gstp->rbcast;
4258 		break;
4259 
4260 	case MAC_STAT_MULTIXMT:
4261 		val = gstp->omcast;
4262 		break;
4263 
4264 	case MAC_STAT_BRDCSTXMT:
4265 		val = gstp->obcast;
4266 		break;
4267 
4268 	case MAC_STAT_NORCVBUF:
4269 		val = gstp->norcvbuf + gstp->missed;
4270 		break;
4271 
4272 	case MAC_STAT_IERRORS:
4273 		val = gstp->errrcv;
4274 		break;
4275 
4276 	case MAC_STAT_NOXMTBUF:
4277 		val = gstp->noxmtbuf;
4278 		break;
4279 
4280 	case MAC_STAT_OERRORS:
4281 		val = gstp->errxmt;
4282 		break;
4283 
4284 	case MAC_STAT_COLLISIONS:
4285 		val = gstp->collisions;
4286 		break;
4287 
4288 	case MAC_STAT_RBYTES:
4289 		val = gstp->rbytes;
4290 		break;
4291 
4292 	case MAC_STAT_IPACKETS:
4293 		val = gstp->rpackets;
4294 		break;
4295 
4296 	case MAC_STAT_OBYTES:
4297 		val = gstp->obytes;
4298 		break;
4299 
4300 	case MAC_STAT_OPACKETS:
4301 		val = gstp->opackets;
4302 		break;
4303 
4304 	case MAC_STAT_UNDERFLOWS:
4305 		val = gstp->underflow;
4306 		break;
4307 
4308 	case MAC_STAT_OVERFLOWS:
4309 		val = gstp->overflow;
4310 		break;
4311 
4312 	case ETHER_STAT_ALIGN_ERRORS:
4313 		val = gstp->frame;
4314 		break;
4315 
4316 	case ETHER_STAT_FCS_ERRORS:
4317 		val = gstp->crc;
4318 		break;
4319 
4320 	case ETHER_STAT_FIRST_COLLISIONS:
4321 		val = gstp->first_coll;
4322 		break;
4323 
4324 	case ETHER_STAT_MULTI_COLLISIONS:
4325 		val = gstp->multi_coll;
4326 		break;
4327 
4328 	case ETHER_STAT_SQE_ERRORS:
4329 		val = gstp->sqe;
4330 		break;
4331 
4332 	case ETHER_STAT_DEFER_XMTS:
4333 		val = gstp->defer;
4334 		break;
4335 
4336 	case ETHER_STAT_TX_LATE_COLLISIONS:
4337 		val = gstp->xmtlatecoll;
4338 		break;
4339 
4340 	case ETHER_STAT_EX_COLLISIONS:
4341 		val = gstp->excoll;
4342 		break;
4343 
4344 	case ETHER_STAT_MACXMT_ERRORS:
4345 		val = gstp->xmit_internal_err;
4346 		break;
4347 
4348 	case ETHER_STAT_CARRIER_ERRORS:
4349 		val = gstp->nocarrier;
4350 		break;
4351 
4352 	case ETHER_STAT_TOOLONG_ERRORS:
4353 		val = gstp->frame_too_long;
4354 		break;
4355 
4356 	case ETHER_STAT_MACRCV_ERRORS:
4357 		val = gstp->rcv_internal_err;
4358 		break;
4359 
4360 	case ETHER_STAT_XCVR_ADDR:
4361 		val = dp->mii_phy_addr;
4362 		break;
4363 
4364 	case ETHER_STAT_XCVR_ID:
4365 		val = dp->mii_phy_id;
4366 		break;
4367 
4368 	case ETHER_STAT_XCVR_INUSE:
4369 		val = gem_mac_xcvr_inuse(dp);
4370 		break;
4371 
4372 	case ETHER_STAT_CAP_1000FDX:
4373 		val = (dp->mii_xstatus & MII_XSTATUS_1000BASET_FD) ||
4374 		    (dp->mii_xstatus & MII_XSTATUS_1000BASEX_FD);
4375 		break;
4376 
4377 	case ETHER_STAT_CAP_1000HDX:
4378 		val = (dp->mii_xstatus & MII_XSTATUS_1000BASET) ||
4379 		    (dp->mii_xstatus & MII_XSTATUS_1000BASEX);
4380 		break;
4381 
4382 	case ETHER_STAT_CAP_100FDX:
4383 		val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX_FD);
4384 		break;
4385 
4386 	case ETHER_STAT_CAP_100HDX:
4387 		val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX);
4388 		break;
4389 
4390 	case ETHER_STAT_CAP_10FDX:
4391 		val = BOOLEAN(dp->mii_status & MII_STATUS_10_FD);
4392 		break;
4393 
4394 	case ETHER_STAT_CAP_10HDX:
4395 		val = BOOLEAN(dp->mii_status & MII_STATUS_10);
4396 		break;
4397 
4398 	case ETHER_STAT_CAP_ASMPAUSE:
4399 		val = BOOLEAN(dp->gc.gc_flow_control & 2);
4400 		break;
4401 
4402 	case ETHER_STAT_CAP_PAUSE:
4403 		val = BOOLEAN(dp->gc.gc_flow_control & 1);
4404 		break;
4405 
4406 	case ETHER_STAT_CAP_AUTONEG:
4407 		val = BOOLEAN(dp->mii_status & MII_STATUS_CANAUTONEG);
4408 		break;
4409 
4410 	case ETHER_STAT_ADV_CAP_1000FDX:
4411 		val = dp->anadv_1000fdx;
4412 		break;
4413 
4414 	case ETHER_STAT_ADV_CAP_1000HDX:
4415 		val = dp->anadv_1000hdx;
4416 		break;
4417 
4418 	case ETHER_STAT_ADV_CAP_100FDX:
4419 		val = dp->anadv_100fdx;
4420 		break;
4421 
4422 	case ETHER_STAT_ADV_CAP_100HDX:
4423 		val = dp->anadv_100hdx;
4424 		break;
4425 
4426 	case ETHER_STAT_ADV_CAP_10FDX:
4427 		val = dp->anadv_10fdx;
4428 		break;
4429 
4430 	case ETHER_STAT_ADV_CAP_10HDX:
4431 		val = dp->anadv_10hdx;
4432 		break;
4433 
4434 	case ETHER_STAT_ADV_CAP_ASMPAUSE:
4435 		val = BOOLEAN(dp->anadv_flow_control & 2);
4436 		break;
4437 
4438 	case ETHER_STAT_ADV_CAP_PAUSE:
4439 		val = BOOLEAN(dp->anadv_flow_control & 1);
4440 		break;
4441 
4442 	case ETHER_STAT_ADV_CAP_AUTONEG:
4443 		val = dp->anadv_autoneg;
4444 		break;
4445 
4446 	case ETHER_STAT_LP_CAP_1000FDX:
4447 		val = BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_FULL);
4448 		break;
4449 
4450 	case ETHER_STAT_LP_CAP_1000HDX:
4451 		val = BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_HALF);
4452 		break;
4453 
4454 	case ETHER_STAT_LP_CAP_100FDX:
4455 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX_FD);
4456 		break;
4457 
4458 	case ETHER_STAT_LP_CAP_100HDX:
4459 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX);
4460 		break;
4461 
4462 	case ETHER_STAT_LP_CAP_10FDX:
4463 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T_FD);
4464 		break;
4465 
4466 	case ETHER_STAT_LP_CAP_10HDX:
4467 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T);
4468 		break;
4469 
4470 	case ETHER_STAT_LP_CAP_ASMPAUSE:
4471 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_ASMPAUSE);
4472 		break;
4473 
4474 	case ETHER_STAT_LP_CAP_PAUSE:
4475 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_PAUSE);
4476 		break;
4477 
4478 	case ETHER_STAT_LP_CAP_AUTONEG:
4479 		val = BOOLEAN(dp->mii_exp & MII_AN_EXP_LPCANAN);
4480 		break;
4481 
4482 	case ETHER_STAT_LINK_ASMPAUSE:
4483 		val = BOOLEAN(dp->flow_control & 2);
4484 		break;
4485 
4486 	case ETHER_STAT_LINK_PAUSE:
4487 		val = BOOLEAN(dp->flow_control & 1);
4488 		break;
4489 
4490 	case ETHER_STAT_LINK_AUTONEG:
4491 		val = dp->anadv_autoneg &&
4492 		    BOOLEAN(dp->mii_exp & MII_AN_EXP_LPCANAN);
4493 		break;
4494 
4495 	case ETHER_STAT_LINK_DUPLEX:
4496 		val = (dp->mii_state == MII_STATE_LINKUP) ?
4497 		    (dp->full_duplex ? 2 : 1) : 0;
4498 		break;
4499 
4500 	case ETHER_STAT_TOOSHORT_ERRORS:
4501 		val = gstp->runt;
4502 		break;
4503 	case ETHER_STAT_LP_REMFAULT:
4504 		val = BOOLEAN(dp->mii_lpable & MII_AN_ADVERT_REMFAULT);
4505 		break;
4506 
4507 	case ETHER_STAT_JABBER_ERRORS:
4508 		val = gstp->jabber;
4509 		break;
4510 
4511 	case ETHER_STAT_CAP_100T4:
4512 		val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASE_T4);
4513 		break;
4514 
4515 	case ETHER_STAT_ADV_CAP_100T4:
4516 		val = dp->anadv_100t4;
4517 		break;
4518 
4519 	case ETHER_STAT_LP_CAP_100T4:
4520 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_T4);
4521 		break;
4522 
4523 	default:
4524 #if GEM_DEBUG_LEVEL > 2
4525 		cmn_err(CE_WARN,
4526 		    "%s: unrecognized parameter value = %d",
4527 		    __func__, stat);
4528 #endif
4529 		return (ENOTSUP);
4530 	}
4531 
4532 	*valp = val;
4533 
4534 	return (0);
4535 }
4536 
4537 static int
gem_m_unicst(void * arg,const uint8_t * mac)4538 gem_m_unicst(void *arg, const uint8_t *mac)
4539 {
4540 	int		err = 0;
4541 	struct gem_dev	*dp = arg;
4542 
4543 	DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4544 
4545 	mutex_enter(&dp->intrlock);
4546 	if (dp->mac_suspended) {
4547 		mutex_exit(&dp->intrlock);
4548 		return (EIO);
4549 	}
4550 	bcopy(mac, dp->cur_addr.ether_addr_octet, ETHERADDRL);
4551 	dp->rxmode |= RXMODE_ENABLE;
4552 
4553 	if (gem_mac_set_rx_filter(dp) != GEM_SUCCESS) {
4554 		err = EIO;
4555 	}
4556 	mutex_exit(&dp->intrlock);
4557 
4558 	return (err);
4559 }
4560 
4561 /*
4562  * gem_m_tx is used only for sending data packets into ethernet wire.
4563  */
4564 static mblk_t *
gem_m_tx(void * arg,mblk_t * mp)4565 gem_m_tx(void *arg, mblk_t *mp)
4566 {
4567 	uint32_t	flags = 0;
4568 	struct gem_dev	*dp = arg;
4569 	mblk_t		*tp;
4570 
4571 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4572 
4573 	ASSERT(dp->nic_state == NIC_STATE_ONLINE);
4574 	if (dp->mii_state != MII_STATE_LINKUP) {
4575 		/* Some nics hate to send packets when the link is down. */
4576 		while (mp) {
4577 			tp = mp->b_next;
4578 			mp->b_next = NULL;
4579 			freemsg(mp);
4580 			mp = tp;
4581 		}
4582 		return (NULL);
4583 	}
4584 
4585 	return (gem_send_common(dp, mp, flags));
4586 }
4587 
4588 static void
gem_m_ioctl(void * arg,queue_t * wq,mblk_t * mp)4589 gem_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
4590 {
4591 	DPRINTF(0, (CE_CONT, "!%s: %s: called",
4592 	    ((struct gem_dev *)arg)->name, __func__));
4593 
4594 	gem_mac_ioctl((struct gem_dev *)arg, wq, mp);
4595 }
4596 
4597 /* ARGSUSED */
4598 static boolean_t
gem_m_getcapab(void * arg,mac_capab_t cap,void * cap_data)4599 gem_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
4600 {
4601 	return (B_FALSE);
4602 }
4603 
4604 static void
gem_gld3_init(struct gem_dev * dp,mac_register_t * macp)4605 gem_gld3_init(struct gem_dev *dp, mac_register_t *macp)
4606 {
4607 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
4608 	macp->m_driver = dp;
4609 	macp->m_dip = dp->dip;
4610 	macp->m_src_addr = dp->dev_addr.ether_addr_octet;
4611 	macp->m_callbacks = &gem_m_callbacks;
4612 	macp->m_min_sdu = 0;
4613 	macp->m_max_sdu = dp->mtu;
4614 
4615 	if (dp->misc_flag & GEM_VLAN) {
4616 		macp->m_margin = VTAG_SIZE;
4617 	}
4618 }
4619 
4620 /* ======================================================================== */
4621 /*
4622  * attach/detatch support
4623  */
4624 /* ======================================================================== */
4625 static void
gem_read_conf(struct gem_dev * dp)4626 gem_read_conf(struct gem_dev *dp)
4627 {
4628 	int	val;
4629 
4630 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4631 
4632 	/*
4633 	 * Get media mode infomation from .conf file
4634 	 */
4635 	dp->anadv_autoneg = gem_prop_get_int(dp, "adv_autoneg_cap", 1) != 0;
4636 	dp->anadv_1000fdx = gem_prop_get_int(dp, "adv_1000fdx_cap", 1) != 0;
4637 	dp->anadv_1000hdx = gem_prop_get_int(dp, "adv_1000hdx_cap", 1) != 0;
4638 	dp->anadv_100t4   = gem_prop_get_int(dp, "adv_100T4_cap", 1) != 0;
4639 	dp->anadv_100fdx  = gem_prop_get_int(dp, "adv_100fdx_cap", 1) != 0;
4640 	dp->anadv_100hdx  = gem_prop_get_int(dp, "adv_100hdx_cap", 1) != 0;
4641 	dp->anadv_10fdx   = gem_prop_get_int(dp, "adv_10fdx_cap", 1) != 0;
4642 	dp->anadv_10hdx   = gem_prop_get_int(dp, "adv_10hdx_cap", 1) != 0;
4643 
4644 	if ((ddi_prop_exists(DDI_DEV_T_ANY, dp->dip,
4645 	    DDI_PROP_DONTPASS, "full-duplex"))) {
4646 		dp->full_duplex = gem_prop_get_int(dp, "full-duplex", 1) != 0;
4647 		dp->anadv_autoneg = B_FALSE;
4648 		if (dp->full_duplex) {
4649 			dp->anadv_1000hdx = B_FALSE;
4650 			dp->anadv_100hdx = B_FALSE;
4651 			dp->anadv_10hdx = B_FALSE;
4652 		} else {
4653 			dp->anadv_1000fdx = B_FALSE;
4654 			dp->anadv_100fdx = B_FALSE;
4655 			dp->anadv_10fdx = B_FALSE;
4656 		}
4657 	}
4658 
4659 	if ((val = gem_prop_get_int(dp, "speed", 0)) > 0) {
4660 		dp->anadv_autoneg = B_FALSE;
4661 		switch (val) {
4662 		case 1000:
4663 			dp->speed = GEM_SPD_1000;
4664 			dp->anadv_100t4   = B_FALSE;
4665 			dp->anadv_100fdx  = B_FALSE;
4666 			dp->anadv_100hdx  = B_FALSE;
4667 			dp->anadv_10fdx   = B_FALSE;
4668 			dp->anadv_10hdx   = B_FALSE;
4669 			break;
4670 		case 100:
4671 			dp->speed = GEM_SPD_100;
4672 			dp->anadv_1000fdx = B_FALSE;
4673 			dp->anadv_1000hdx = B_FALSE;
4674 			dp->anadv_10fdx   = B_FALSE;
4675 			dp->anadv_10hdx   = B_FALSE;
4676 			break;
4677 		case 10:
4678 			dp->speed = GEM_SPD_10;
4679 			dp->anadv_1000fdx = B_FALSE;
4680 			dp->anadv_1000hdx = B_FALSE;
4681 			dp->anadv_100t4   = B_FALSE;
4682 			dp->anadv_100fdx  = B_FALSE;
4683 			dp->anadv_100hdx  = B_FALSE;
4684 			break;
4685 		default:
4686 			cmn_err(CE_WARN,
4687 			    "!%s: property %s: illegal value:%d",
4688 			    dp->name, "speed", val);
4689 			dp->anadv_autoneg = B_TRUE;
4690 			break;
4691 		}
4692 	}
4693 
4694 	val = gem_prop_get_int(dp, "flow-control", dp->gc.gc_flow_control);
4695 	if (val > FLOW_CONTROL_RX_PAUSE || val < FLOW_CONTROL_NONE) {
4696 		cmn_err(CE_WARN,
4697 		    "!%s: property %s: illegal value:%d",
4698 		    dp->name, "flow-control", val);
4699 	} else {
4700 		val = min(val, dp->gc.gc_flow_control);
4701 	}
4702 	dp->anadv_flow_control = val;
4703 
4704 	if (gem_prop_get_int(dp, "nointr", 0)) {
4705 		dp->misc_flag |= GEM_NOINTR;
4706 		cmn_err(CE_NOTE, "!%s: polling mode enabled", dp->name);
4707 	}
4708 
4709 	dp->mtu = gem_prop_get_int(dp, "mtu", dp->mtu);
4710 	dp->txthr = gem_prop_get_int(dp, "txthr", dp->txthr);
4711 	dp->rxthr = gem_prop_get_int(dp, "rxthr", dp->rxthr);
4712 	dp->txmaxdma = gem_prop_get_int(dp, "txmaxdma", dp->txmaxdma);
4713 	dp->rxmaxdma = gem_prop_get_int(dp, "rxmaxdma", dp->rxmaxdma);
4714 }
4715 
4716 
4717 /*
4718  * Gem kstat support
4719  */
4720 
4721 #define	GEM_LOCAL_DATA_SIZE(gc)	\
4722 	(sizeof (struct gem_dev) + \
4723 	sizeof (struct mcast_addr) * GEM_MAXMC + \
4724 	sizeof (struct txbuf) * ((gc)->gc_tx_buf_size) + \
4725 	sizeof (void *) * ((gc)->gc_tx_buf_size))
4726 
4727 struct gem_dev *
gem_do_attach(dev_info_t * dip,int port,struct gem_conf * gc,void * base,ddi_acc_handle_t * regs_handlep,void * lp,int lmsize)4728 gem_do_attach(dev_info_t *dip, int port,
4729     struct gem_conf *gc, void *base, ddi_acc_handle_t *regs_handlep,
4730     void *lp, int lmsize)
4731 {
4732 	struct gem_dev		*dp;
4733 	int			i;
4734 	ddi_iblock_cookie_t	c;
4735 	mac_register_t		*macp = NULL;
4736 	int			ret;
4737 	int			unit;
4738 	int			nports;
4739 
4740 	unit = ddi_get_instance(dip);
4741 	if ((nports = gc->gc_nports) == 0) {
4742 		nports = 1;
4743 	}
4744 	if (nports == 1) {
4745 		ddi_set_driver_private(dip, NULL);
4746 	}
4747 
4748 	DPRINTF(2, (CE_CONT, "!gem%d: gem_do_attach: called cmd:ATTACH",
4749 	    unit));
4750 
4751 	/*
4752 	 * Allocate soft data structure
4753 	 */
4754 	dp = kmem_zalloc(GEM_LOCAL_DATA_SIZE(gc), KM_SLEEP);
4755 
4756 	if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
4757 		cmn_err(CE_WARN, "!gem%d: %s: mac_alloc failed",
4758 		    unit, __func__);
4759 		kmem_free(dp, GEM_LOCAL_DATA_SIZE(gc));
4760 		return (NULL);
4761 	}
4762 	/* ddi_set_driver_private(dip, dp); */
4763 
4764 	/* link to private area */
4765 	dp->private = lp;
4766 	dp->priv_size = lmsize;
4767 	dp->mc_list = (struct mcast_addr *)&dp[1];
4768 
4769 	dp->dip = dip;
4770 	(void) sprintf(dp->name, gc->gc_name, nports * unit + port);
4771 
4772 	/*
4773 	 * Get iblock cookie
4774 	 */
4775 	if (ddi_get_iblock_cookie(dip, 0, &c) != DDI_SUCCESS) {
4776 		cmn_err(CE_CONT,
4777 		    "!%s: gem_do_attach: ddi_get_iblock_cookie: failed",
4778 		    dp->name);
4779 		goto err_free_private;
4780 	}
4781 	dp->iblock_cookie = c;
4782 
4783 	/*
4784 	 * Initialize mutex's for this device.
4785 	 */
4786 	mutex_init(&dp->intrlock, NULL, MUTEX_DRIVER, (void *)c);
4787 	mutex_init(&dp->xmitlock, NULL, MUTEX_DRIVER, (void *)c);
4788 	cv_init(&dp->tx_drain_cv, NULL, CV_DRIVER, NULL);
4789 
4790 	/*
4791 	 * configure gem parameter
4792 	 */
4793 	dp->base_addr = base;
4794 	dp->regs_handle = *regs_handlep;
4795 	dp->gc = *gc;
4796 	gc = &dp->gc;
4797 	/* patch for simplify dma resource management */
4798 	gc->gc_tx_max_frags = 1;
4799 	gc->gc_tx_max_descs_per_pkt = 1;
4800 	gc->gc_tx_ring_size = gc->gc_tx_buf_size;
4801 	gc->gc_tx_ring_limit = gc->gc_tx_buf_limit;
4802 	gc->gc_tx_desc_write_oo = B_TRUE;
4803 
4804 	gc->gc_nports = nports;	/* fix nports */
4805 
4806 	/* fix copy threadsholds */
4807 	gc->gc_tx_copy_thresh = max(ETHERMIN, gc->gc_tx_copy_thresh);
4808 	gc->gc_rx_copy_thresh = max(ETHERMIN, gc->gc_rx_copy_thresh);
4809 
4810 	/* fix rx buffer boundary for iocache line size */
4811 	ASSERT(gc->gc_dma_attr_txbuf.dma_attr_align-1 == gc->gc_tx_buf_align);
4812 	ASSERT(gc->gc_dma_attr_rxbuf.dma_attr_align-1 == gc->gc_rx_buf_align);
4813 	gc->gc_rx_buf_align = max(gc->gc_rx_buf_align, IOC_LINESIZE - 1);
4814 	gc->gc_dma_attr_rxbuf.dma_attr_align = gc->gc_rx_buf_align + 1;
4815 
4816 	/* fix descriptor boundary for cache line size */
4817 	gc->gc_dma_attr_desc.dma_attr_align =
4818 	    max(gc->gc_dma_attr_desc.dma_attr_align, IOC_LINESIZE);
4819 
4820 	/* patch get_packet method */
4821 	if (gc->gc_get_packet == NULL) {
4822 		gc->gc_get_packet = &gem_get_packet_default;
4823 	}
4824 
4825 	/* patch get_rx_start method */
4826 	if (gc->gc_rx_start == NULL) {
4827 		gc->gc_rx_start = &gem_rx_start_default;
4828 	}
4829 
4830 	/* calculate descriptor area */
4831 	if (gc->gc_rx_desc_unit_shift >= 0) {
4832 		dp->rx_desc_size =
4833 		    ROUNDUP(gc->gc_rx_ring_size << gc->gc_rx_desc_unit_shift,
4834 		    gc->gc_dma_attr_desc.dma_attr_align);
4835 	}
4836 	if (gc->gc_tx_desc_unit_shift >= 0) {
4837 		dp->tx_desc_size =
4838 		    ROUNDUP(gc->gc_tx_ring_size << gc->gc_tx_desc_unit_shift,
4839 		    gc->gc_dma_attr_desc.dma_attr_align);
4840 	}
4841 
4842 	dp->mtu = ETHERMTU;
4843 	dp->tx_buf = (void *)&dp->mc_list[GEM_MAXMC];
4844 	/* link tx buffers */
4845 	for (i = 0; i < dp->gc.gc_tx_buf_size; i++) {
4846 		dp->tx_buf[i].txb_next =
4847 		    &dp->tx_buf[SLOT(i + 1, dp->gc.gc_tx_buf_size)];
4848 	}
4849 
4850 	dp->rxmode	   = 0;
4851 	dp->speed	   = GEM_SPD_10;	/* default is 10Mbps */
4852 	dp->full_duplex    = B_FALSE;		/* default is half */
4853 	dp->flow_control   = FLOW_CONTROL_NONE;
4854 	dp->poll_pkt_delay = 8;		/* typical coalease for rx packets */
4855 
4856 	/* performance tuning parameters */
4857 	dp->txthr    = ETHERMAX;	/* tx fifo threshold */
4858 	dp->txmaxdma = 16*4;		/* tx max dma burst size */
4859 	dp->rxthr    = 128;		/* rx fifo threshold */
4860 	dp->rxmaxdma = 16*4;		/* rx max dma burst size */
4861 
4862 	/*
4863 	 * Get media mode information from .conf file
4864 	 */
4865 	gem_read_conf(dp);
4866 
4867 	/* rx_buf_len is required buffer length without padding for alignment */
4868 	dp->rx_buf_len = MAXPKTBUF(dp) + dp->gc.gc_rx_header_len;
4869 
4870 	/*
4871 	 * Reset the chip
4872 	 */
4873 	mutex_enter(&dp->intrlock);
4874 	dp->nic_state = NIC_STATE_STOPPED;
4875 	ret = (*dp->gc.gc_reset_chip)(dp);
4876 	mutex_exit(&dp->intrlock);
4877 	if (ret != GEM_SUCCESS) {
4878 		goto err_free_regs;
4879 	}
4880 
4881 	/*
4882 	 * HW dependant paremeter initialization
4883 	 */
4884 	mutex_enter(&dp->intrlock);
4885 	ret = (*dp->gc.gc_attach_chip)(dp);
4886 	mutex_exit(&dp->intrlock);
4887 	if (ret != GEM_SUCCESS) {
4888 		goto err_free_regs;
4889 	}
4890 
4891 #ifdef DEBUG_MULTIFRAGS
4892 	dp->gc.gc_tx_copy_thresh = dp->mtu;
4893 #endif
4894 	/* allocate tx and rx resources */
4895 	if (gem_alloc_memory(dp)) {
4896 		goto err_free_regs;
4897 	}
4898 
4899 	DPRINTF(0, (CE_CONT,
4900 	    "!%s: at 0x%x, %02x:%02x:%02x:%02x:%02x:%02x",
4901 	    dp->name, (long)dp->base_addr,
4902 	    dp->dev_addr.ether_addr_octet[0],
4903 	    dp->dev_addr.ether_addr_octet[1],
4904 	    dp->dev_addr.ether_addr_octet[2],
4905 	    dp->dev_addr.ether_addr_octet[3],
4906 	    dp->dev_addr.ether_addr_octet[4],
4907 	    dp->dev_addr.ether_addr_octet[5]));
4908 
4909 	/* copy mac address */
4910 	dp->cur_addr = dp->dev_addr;
4911 
4912 	gem_gld3_init(dp, macp);
4913 
4914 	/* Probe MII phy (scan phy) */
4915 	dp->mii_lpable = 0;
4916 	dp->mii_advert = 0;
4917 	dp->mii_exp = 0;
4918 	dp->mii_ctl1000 = 0;
4919 	dp->mii_stat1000 = 0;
4920 	if ((*dp->gc.gc_mii_probe)(dp) != GEM_SUCCESS) {
4921 		goto err_free_ring;
4922 	}
4923 
4924 	/* mask unsupported abilities */
4925 	dp->anadv_autoneg &= BOOLEAN(dp->mii_status & MII_STATUS_CANAUTONEG);
4926 	dp->anadv_1000fdx &=
4927 	    BOOLEAN(dp->mii_xstatus &
4928 	    (MII_XSTATUS_1000BASEX_FD | MII_XSTATUS_1000BASET_FD));
4929 	dp->anadv_1000hdx &=
4930 	    BOOLEAN(dp->mii_xstatus &
4931 	    (MII_XSTATUS_1000BASEX | MII_XSTATUS_1000BASET));
4932 	dp->anadv_100t4  &= BOOLEAN(dp->mii_status & MII_STATUS_100_BASE_T4);
4933 	dp->anadv_100fdx &= BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX_FD);
4934 	dp->anadv_100hdx &= BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX);
4935 	dp->anadv_10fdx  &= BOOLEAN(dp->mii_status & MII_STATUS_10_FD);
4936 	dp->anadv_10hdx  &= BOOLEAN(dp->mii_status & MII_STATUS_10);
4937 
4938 	gem_choose_forcedmode(dp);
4939 
4940 	/* initialize MII phy if required */
4941 	if (dp->gc.gc_mii_init) {
4942 		if ((*dp->gc.gc_mii_init)(dp) != GEM_SUCCESS) {
4943 			goto err_free_ring;
4944 		}
4945 	}
4946 
4947 	/*
4948 	 * initialize kstats including mii statistics
4949 	 */
4950 	gem_nd_setup(dp);
4951 
4952 	/*
4953 	 * Add interrupt to system.
4954 	 */
4955 	if (ret = mac_register(macp, &dp->mh)) {
4956 		cmn_err(CE_WARN, "!%s: mac_register failed, error:%d",
4957 		    dp->name, ret);
4958 		goto err_release_stats;
4959 	}
4960 	mac_free(macp);
4961 	macp = NULL;
4962 
4963 	if (dp->misc_flag & GEM_SOFTINTR) {
4964 		if (ddi_add_softintr(dip,
4965 		    DDI_SOFTINT_LOW, &dp->soft_id,
4966 		    NULL, NULL,
4967 		    (uint_t (*)(caddr_t))gem_intr,
4968 		    (caddr_t)dp) != DDI_SUCCESS) {
4969 			cmn_err(CE_WARN, "!%s: ddi_add_softintr failed",
4970 			    dp->name);
4971 			goto err_unregister;
4972 		}
4973 	} else if ((dp->misc_flag & GEM_NOINTR) == 0) {
4974 		if (ddi_add_intr(dip, 0, NULL, NULL,
4975 		    (uint_t (*)(caddr_t))gem_intr,
4976 		    (caddr_t)dp) != DDI_SUCCESS) {
4977 			cmn_err(CE_WARN, "!%s: ddi_add_intr failed", dp->name);
4978 			goto err_unregister;
4979 		}
4980 	} else {
4981 		/*
4982 		 * Dont use interrupt.
4983 		 * schedule first call of gem_intr_watcher
4984 		 */
4985 		dp->intr_watcher_id =
4986 		    timeout((void (*)(void *))gem_intr_watcher,
4987 		    (void *)dp, drv_usectohz(3*1000000));
4988 	}
4989 
4990 	/* link this device to dev_info */
4991 	dp->next = (struct gem_dev *)ddi_get_driver_private(dip);
4992 	dp->port = port;
4993 	ddi_set_driver_private(dip, (caddr_t)dp);
4994 
4995 	/* reset mii phy and start mii link watcher */
4996 	gem_mii_start(dp);
4997 
4998 	DPRINTF(2, (CE_CONT, "!gem_do_attach: return: success"));
4999 	return (dp);
5000 
5001 err_unregister:
5002 	(void) mac_unregister(dp->mh);
5003 err_release_stats:
5004 	/* release NDD resources */
5005 	gem_nd_cleanup(dp);
5006 
5007 err_free_ring:
5008 	gem_free_memory(dp);
5009 err_free_regs:
5010 	ddi_regs_map_free(&dp->regs_handle);
5011 err_free_locks:
5012 	mutex_destroy(&dp->xmitlock);
5013 	mutex_destroy(&dp->intrlock);
5014 	cv_destroy(&dp->tx_drain_cv);
5015 err_free_private:
5016 	if (macp) {
5017 		mac_free(macp);
5018 	}
5019 	kmem_free((caddr_t)dp, GEM_LOCAL_DATA_SIZE(gc));
5020 
5021 	return (NULL);
5022 }
5023 
5024 int
gem_do_detach(dev_info_t * dip)5025 gem_do_detach(dev_info_t *dip)
5026 {
5027 	struct gem_dev	*dp;
5028 	struct gem_dev	*tmp;
5029 	caddr_t		private;
5030 	int		priv_size;
5031 	ddi_acc_handle_t	rh;
5032 
5033 	dp = GEM_GET_DEV(dip);
5034 	if (dp == NULL) {
5035 		return (DDI_SUCCESS);
5036 	}
5037 
5038 	rh = dp->regs_handle;
5039 	private = dp->private;
5040 	priv_size = dp->priv_size;
5041 
5042 	while (dp) {
5043 		/* unregister with gld v3 */
5044 		if (mac_unregister(dp->mh) != 0) {
5045 			return (DDI_FAILURE);
5046 		}
5047 
5048 		/* ensure any rx buffers are not used */
5049 		if (dp->rx_buf_allocated != dp->rx_buf_freecnt) {
5050 			/* resource is busy */
5051 			cmn_err(CE_PANIC,
5052 			    "!%s: %s: rxbuf is busy: allocated:%d, freecnt:%d",
5053 			    dp->name, __func__,
5054 			    dp->rx_buf_allocated, dp->rx_buf_freecnt);
5055 			/* NOT REACHED */
5056 		}
5057 
5058 		/* stop mii link watcher */
5059 		gem_mii_stop(dp);
5060 
5061 		/* unregister interrupt handler */
5062 		if (dp->misc_flag & GEM_SOFTINTR) {
5063 			ddi_remove_softintr(dp->soft_id);
5064 		} else if ((dp->misc_flag & GEM_NOINTR) == 0) {
5065 			ddi_remove_intr(dip, 0, dp->iblock_cookie);
5066 		} else {
5067 			/* stop interrupt watcher */
5068 			if (dp->intr_watcher_id) {
5069 				while (untimeout(dp->intr_watcher_id) == -1)
5070 					;
5071 				dp->intr_watcher_id = 0;
5072 			}
5073 		}
5074 
5075 		/* release NDD resources */
5076 		gem_nd_cleanup(dp);
5077 		/* release buffers, descriptors and dma resources */
5078 		gem_free_memory(dp);
5079 
5080 		/* release locks and condition variables */
5081 		mutex_destroy(&dp->xmitlock);
5082 		mutex_destroy(&dp->intrlock);
5083 		cv_destroy(&dp->tx_drain_cv);
5084 
5085 		/* release basic memory resources */
5086 		tmp = dp->next;
5087 		kmem_free((caddr_t)dp, GEM_LOCAL_DATA_SIZE(&dp->gc));
5088 		dp = tmp;
5089 	}
5090 
5091 	/* release common private memory for the nic */
5092 	kmem_free(private, priv_size);
5093 
5094 	/* release register mapping resources */
5095 	ddi_regs_map_free(&rh);
5096 
5097 	DPRINTF(2, (CE_CONT, "!%s%d: gem_do_detach: return: success",
5098 	    ddi_driver_name(dip), ddi_get_instance(dip)));
5099 
5100 	return (DDI_SUCCESS);
5101 }
5102 
5103 int
gem_suspend(dev_info_t * dip)5104 gem_suspend(dev_info_t *dip)
5105 {
5106 	struct gem_dev	*dp;
5107 
5108 	/*
5109 	 * stop the device
5110 	 */
5111 	dp = GEM_GET_DEV(dip);
5112 	ASSERT(dp);
5113 
5114 	DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
5115 
5116 	for (; dp; dp = dp->next) {
5117 
5118 		/* stop mii link watcher */
5119 		gem_mii_stop(dp);
5120 
5121 		/* stop interrupt watcher for no-intr mode */
5122 		if (dp->misc_flag & GEM_NOINTR) {
5123 			if (dp->intr_watcher_id) {
5124 				while (untimeout(dp->intr_watcher_id) == -1)
5125 					;
5126 			}
5127 			dp->intr_watcher_id = 0;
5128 		}
5129 
5130 		/* stop tx timeout watcher */
5131 		if (dp->timeout_id) {
5132 			while (untimeout(dp->timeout_id) == -1)
5133 				;
5134 			dp->timeout_id = 0;
5135 		}
5136 
5137 		/* make the nic state inactive */
5138 		mutex_enter(&dp->intrlock);
5139 		(void) gem_mac_stop(dp, 0);
5140 		ASSERT(!dp->mac_active);
5141 
5142 		/* no further register access */
5143 		dp->mac_suspended = B_TRUE;
5144 		mutex_exit(&dp->intrlock);
5145 	}
5146 
5147 	/* XXX - power down the nic */
5148 
5149 	return (DDI_SUCCESS);
5150 }
5151 
5152 int
gem_resume(dev_info_t * dip)5153 gem_resume(dev_info_t *dip)
5154 {
5155 	struct gem_dev	*dp;
5156 
5157 	/*
5158 	 * restart the device
5159 	 */
5160 	dp = GEM_GET_DEV(dip);
5161 	ASSERT(dp);
5162 
5163 	DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
5164 
5165 	for (; dp; dp = dp->next) {
5166 
5167 		/*
5168 		 * Bring up the nic after power up
5169 		 */
5170 
5171 		/* gem_xxx.c layer to setup power management state. */
5172 		ASSERT(!dp->mac_active);
5173 
5174 		/* reset the chip, because we are just after power up. */
5175 		mutex_enter(&dp->intrlock);
5176 
5177 		dp->mac_suspended = B_FALSE;
5178 		dp->nic_state = NIC_STATE_STOPPED;
5179 
5180 		if ((*dp->gc.gc_reset_chip)(dp) != GEM_SUCCESS) {
5181 			cmn_err(CE_WARN, "%s: %s: failed to reset chip",
5182 			    dp->name, __func__);
5183 			mutex_exit(&dp->intrlock);
5184 			goto err;
5185 		}
5186 		mutex_exit(&dp->intrlock);
5187 
5188 		/* initialize mii phy because we are just after power up */
5189 		if (dp->gc.gc_mii_init) {
5190 			(void) (*dp->gc.gc_mii_init)(dp);
5191 		}
5192 
5193 		if (dp->misc_flag & GEM_NOINTR) {
5194 			/*
5195 			 * schedule first call of gem_intr_watcher
5196 			 * instead of interrupts.
5197 			 */
5198 			dp->intr_watcher_id =
5199 			    timeout((void (*)(void *))gem_intr_watcher,
5200 			    (void *)dp, drv_usectohz(3*1000000));
5201 		}
5202 
5203 		/* restart mii link watcher */
5204 		gem_mii_start(dp);
5205 
5206 		/* restart mac */
5207 		mutex_enter(&dp->intrlock);
5208 
5209 		if (gem_mac_init(dp) != GEM_SUCCESS) {
5210 			mutex_exit(&dp->intrlock);
5211 			goto err_reset;
5212 		}
5213 		dp->nic_state = NIC_STATE_INITIALIZED;
5214 
5215 		/* setup media mode if the link have been up */
5216 		if (dp->mii_state == MII_STATE_LINKUP) {
5217 			if ((dp->gc.gc_set_media)(dp) != GEM_SUCCESS) {
5218 				mutex_exit(&dp->intrlock);
5219 				goto err_reset;
5220 			}
5221 		}
5222 
5223 		/* enable mac address and rx filter */
5224 		dp->rxmode |= RXMODE_ENABLE;
5225 		if ((*dp->gc.gc_set_rx_filter)(dp) != GEM_SUCCESS) {
5226 			mutex_exit(&dp->intrlock);
5227 			goto err_reset;
5228 		}
5229 		dp->nic_state = NIC_STATE_ONLINE;
5230 
5231 		/* restart tx timeout watcher */
5232 		dp->timeout_id = timeout((void (*)(void *))gem_tx_timeout,
5233 		    (void *)dp,
5234 		    dp->gc.gc_tx_timeout_interval);
5235 
5236 		/* now the nic is fully functional */
5237 		if (dp->mii_state == MII_STATE_LINKUP) {
5238 			if (gem_mac_start(dp) != GEM_SUCCESS) {
5239 				mutex_exit(&dp->intrlock);
5240 				goto err_reset;
5241 			}
5242 		}
5243 		mutex_exit(&dp->intrlock);
5244 	}
5245 
5246 	return (DDI_SUCCESS);
5247 
5248 err_reset:
5249 	if (dp->intr_watcher_id) {
5250 		while (untimeout(dp->intr_watcher_id) == -1)
5251 			;
5252 		dp->intr_watcher_id = 0;
5253 	}
5254 	mutex_enter(&dp->intrlock);
5255 	(*dp->gc.gc_reset_chip)(dp);
5256 	dp->nic_state = NIC_STATE_STOPPED;
5257 	mutex_exit(&dp->intrlock);
5258 
5259 err:
5260 	return (DDI_FAILURE);
5261 }
5262 
5263 /*
5264  * misc routines for PCI
5265  */
5266 uint8_t
gem_search_pci_cap(dev_info_t * dip,ddi_acc_handle_t conf_handle,uint8_t target)5267 gem_search_pci_cap(dev_info_t *dip,
5268     ddi_acc_handle_t conf_handle, uint8_t target)
5269 {
5270 	uint8_t		pci_cap_ptr;
5271 	uint32_t	pci_cap;
5272 
5273 	/* search power management capablities */
5274 	pci_cap_ptr = pci_config_get8(conf_handle, PCI_CONF_CAP_PTR);
5275 	while (pci_cap_ptr) {
5276 		/* read pci capability header */
5277 		pci_cap = pci_config_get32(conf_handle, pci_cap_ptr);
5278 		if ((pci_cap & 0xff) == target) {
5279 			/* found */
5280 			break;
5281 		}
5282 		/* get next_ptr */
5283 		pci_cap_ptr = (pci_cap >> 8) & 0xff;
5284 	}
5285 	return (pci_cap_ptr);
5286 }
5287 
5288 int
gem_pci_set_power_state(dev_info_t * dip,ddi_acc_handle_t conf_handle,uint_t new_mode)5289 gem_pci_set_power_state(dev_info_t *dip,
5290     ddi_acc_handle_t conf_handle, uint_t new_mode)
5291 {
5292 	uint8_t		pci_cap_ptr;
5293 	uint32_t	pmcsr;
5294 	uint_t		unit;
5295 	const char	*drv_name;
5296 
5297 	ASSERT(new_mode < 4);
5298 
5299 	unit = ddi_get_instance(dip);
5300 	drv_name = ddi_driver_name(dip);
5301 
5302 	/* search power management capablities */
5303 	pci_cap_ptr = gem_search_pci_cap(dip, conf_handle, PCI_CAP_ID_PM);
5304 
5305 	if (pci_cap_ptr == 0) {
5306 		cmn_err(CE_CONT,
5307 		    "!%s%d: doesn't have pci power management capability",
5308 		    drv_name, unit);
5309 		return (DDI_FAILURE);
5310 	}
5311 
5312 	/* read power management capabilities */
5313 	pmcsr = pci_config_get32(conf_handle, pci_cap_ptr + PCI_PMCSR);
5314 
5315 	DPRINTF(0, (CE_CONT,
5316 	    "!%s%d: pmc found at 0x%x: pmcsr: 0x%08x",
5317 	    drv_name, unit, pci_cap_ptr, pmcsr));
5318 
5319 	/*
5320 	 * Is the resuested power mode supported?
5321 	 */
5322 	/* not yet */
5323 
5324 	/*
5325 	 * move to new mode
5326 	 */
5327 	pmcsr = (pmcsr & ~PCI_PMCSR_STATE_MASK) | new_mode;
5328 	pci_config_put32(conf_handle, pci_cap_ptr + PCI_PMCSR, pmcsr);
5329 
5330 	return (DDI_SUCCESS);
5331 }
5332 
5333 /*
5334  * select suitable register for by specified address space or register
5335  * offset in PCI config space
5336  */
5337 int
gem_pci_regs_map_setup(dev_info_t * dip,uint32_t which,uint32_t mask,struct ddi_device_acc_attr * attrp,caddr_t * basep,ddi_acc_handle_t * hp)5338 gem_pci_regs_map_setup(dev_info_t *dip, uint32_t which, uint32_t mask,
5339     struct ddi_device_acc_attr *attrp,
5340     caddr_t *basep, ddi_acc_handle_t *hp)
5341 {
5342 	struct pci_phys_spec	*regs;
5343 	uint_t		len;
5344 	uint_t		unit;
5345 	uint_t		n;
5346 	uint_t		i;
5347 	int		ret;
5348 	const char	*drv_name;
5349 
5350 	unit = ddi_get_instance(dip);
5351 	drv_name = ddi_driver_name(dip);
5352 
5353 	/* Search IO-range or memory-range to be mapped */
5354 	regs = NULL;
5355 	len  = 0;
5356 
5357 	if ((ret = ddi_prop_lookup_int_array(
5358 	    DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
5359 	    "reg", (void *)&regs, &len)) != DDI_PROP_SUCCESS) {
5360 		cmn_err(CE_WARN,
5361 		    "!%s%d: failed to get reg property (ret:%d)",
5362 		    drv_name, unit, ret);
5363 		return (DDI_FAILURE);
5364 	}
5365 	n = len / (sizeof (struct pci_phys_spec) / sizeof (int));
5366 
5367 	ASSERT(regs != NULL && len > 0);
5368 
5369 #if GEM_DEBUG_LEVEL > 0
5370 	for (i = 0; i < n; i++) {
5371 		cmn_err(CE_CONT,
5372 		    "!%s%d: regs[%d]: %08x.%08x.%08x.%08x.%08x",
5373 		    drv_name, unit, i,
5374 		    regs[i].pci_phys_hi,
5375 		    regs[i].pci_phys_mid,
5376 		    regs[i].pci_phys_low,
5377 		    regs[i].pci_size_hi,
5378 		    regs[i].pci_size_low);
5379 	}
5380 #endif
5381 	for (i = 0; i < n; i++) {
5382 		if ((regs[i].pci_phys_hi & mask) == which) {
5383 			/* it's the requested space */
5384 			ddi_prop_free(regs);
5385 			goto address_range_found;
5386 		}
5387 	}
5388 	ddi_prop_free(regs);
5389 	return (DDI_FAILURE);
5390 
5391 address_range_found:
5392 	if ((ret = ddi_regs_map_setup(dip, i, basep, 0, 0, attrp, hp))
5393 	    != DDI_SUCCESS) {
5394 		cmn_err(CE_CONT,
5395 		    "!%s%d: ddi_regs_map_setup failed (ret:%d)",
5396 		    drv_name, unit, ret);
5397 	}
5398 
5399 	return (ret);
5400 }
5401 
5402 void
gem_mod_init(struct dev_ops * dop,char * name)5403 gem_mod_init(struct dev_ops *dop, char *name)
5404 {
5405 	mac_init_ops(dop, name);
5406 }
5407 
5408 void
gem_mod_fini(struct dev_ops * dop)5409 gem_mod_fini(struct dev_ops *dop)
5410 {
5411 	mac_fini_ops(dop);
5412 }
5413