xref: /titanic_50/usr/src/uts/common/io/sfe/sfe_util.c (revision 5c51f1241dbbdf2656d0e10011981411ed0c9673)
1 /*
2  * sfe_util.c: general ethernet mac driver framework version 2.6
3  *
4  * Copyright (c) 2002-2008 Masayuki Murayama.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * 2. Redistributions in binary form must reproduce the above copyright notice,
13  *    this list of conditions and the following disclaimer in the documentation
14  *    and/or other materials provided with the distribution.
15  *
16  * 3. Neither the name of the author nor the names of its contributors may be
17  *    used to endorse or promote products derived from this software without
18  *    specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
27  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
31  * DAMAGE.
32  */
33 
34 /*
35  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
36  * Use is subject to license terms.
37  */
38 
39 /*
40  * System Header files.
41  */
42 #include <sys/types.h>
43 #include <sys/conf.h>
44 #include <sys/debug.h>
45 #include <sys/kmem.h>
46 #include <sys/vtrace.h>
47 #include <sys/ethernet.h>
48 #include <sys/modctl.h>
49 #include <sys/errno.h>
50 #include <sys/ddi.h>
51 #include <sys/sunddi.h>
52 #include <sys/stream.h>		/* required for MBLK* */
53 #include <sys/strsun.h>		/* required for mionack() */
54 #include <sys/byteorder.h>
55 #include <sys/pci.h>
56 #include <inet/common.h>
57 #include <inet/led.h>
58 #include <inet/mi.h>
59 #include <inet/nd.h>
60 #include <sys/crc32.h>
61 
62 #include <sys/note.h>
63 
64 #include "sfe_mii.h"
65 #include "sfe_util.h"
66 
67 
68 
69 extern char ident[];
70 
71 /* Debugging support */
72 #ifdef GEM_DEBUG_LEVEL
73 static int gem_debug = GEM_DEBUG_LEVEL;
74 #define	DPRINTF(n, args)	if (gem_debug > (n)) cmn_err args
75 #else
76 #define	DPRINTF(n, args)
77 #undef ASSERT
78 #define	ASSERT(x)
79 #endif
80 
81 #define	IOC_LINESIZE	0x40	/* Is it right for amd64? */
82 
83 /*
84  * Useful macros and typedefs
85  */
86 #define	ROUNDUP(x, a)	(((x) + (a) - 1) & ~((a) - 1))
87 
88 #define	GET_NET16(p)	((((uint8_t *)(p))[0] << 8)| ((uint8_t *)(p))[1])
89 #define	GET_ETHERTYPE(p)	GET_NET16(((uint8_t *)(p)) + ETHERADDRL*2)
90 
91 #define	GET_IPTYPEv4(p)	(((uint8_t *)(p))[sizeof (struct ether_header) + 9])
92 #define	GET_IPTYPEv6(p)	(((uint8_t *)(p))[sizeof (struct ether_header) + 6])
93 
94 
95 #ifndef INT32_MAX
96 #define	INT32_MAX	0x7fffffff
97 #endif
98 
99 #define	VTAG_OFF	(ETHERADDRL*2)
100 #ifndef VTAG_SIZE
101 #define	VTAG_SIZE	4
102 #endif
103 #ifndef VTAG_TPID
104 #define	VTAG_TPID	0x8100U
105 #endif
106 
107 #define	GET_TXBUF(dp, sn)	\
108 	&(dp)->tx_buf[SLOT((dp)->tx_slots_base + (sn), (dp)->gc.gc_tx_buf_size)]
109 
110 #ifndef offsetof
111 #define	offsetof(t, m)	((long)&(((t *) 0)->m))
112 #endif
113 #define	TXFLAG_VTAG(flag)	\
114 	(((flag) & GEM_TXFLAG_VTAG) >> GEM_TXFLAG_VTAG_SHIFT)
115 
116 #define	MAXPKTBUF(dp)	\
117 	((dp)->mtu + sizeof (struct ether_header) + VTAG_SIZE + ETHERFCSL)
118 
119 #define	WATCH_INTERVAL_FAST	drv_usectohz(100*1000)	/* 100mS */
120 #define	BOOLEAN(x)	((x) != 0)
121 
122 /*
123  * Macros to distinct chip generation.
124  */
125 
126 /*
127  * Private functions
128  */
129 static void gem_mii_start(struct gem_dev *);
130 static void gem_mii_stop(struct gem_dev *);
131 
132 /* local buffer management */
133 static void gem_nd_setup(struct gem_dev *dp);
134 static void gem_nd_cleanup(struct gem_dev *dp);
135 static int gem_alloc_memory(struct gem_dev *);
136 static void gem_free_memory(struct gem_dev *);
137 static void gem_init_rx_ring(struct gem_dev *);
138 static void gem_init_tx_ring(struct gem_dev *);
139 __INLINE__ static void gem_append_rxbuf(struct gem_dev *, struct rxbuf *);
140 
141 static void gem_tx_timeout(struct gem_dev *);
142 static void gem_mii_link_watcher(struct gem_dev *dp);
143 static int gem_mac_init(struct gem_dev *dp);
144 static int gem_mac_start(struct gem_dev *dp);
145 static int gem_mac_stop(struct gem_dev *dp, uint_t flags);
146 static void gem_mac_ioctl(struct gem_dev *dp, queue_t *wq, mblk_t *mp);
147 
148 static	struct ether_addr	gem_etherbroadcastaddr = {
149 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff
150 };
151 
152 int gem_speed_value[] = {10, 100, 1000};
153 
154 /* ============================================================== */
155 /*
156  * Misc runtime routines
157  */
158 /* ============================================================== */
159 /*
160  * Ether CRC calculation according to 21143 data sheet
161  */
162 uint32_t
163 gem_ether_crc_le(const uint8_t *addr, int len)
164 {
165 	uint32_t	crc;
166 
167 	CRC32(crc, addr, ETHERADDRL, 0xffffffffU, crc32_table);
168 	return (crc);
169 }
170 
171 uint32_t
172 gem_ether_crc_be(const uint8_t *addr, int len)
173 {
174 	int		idx;
175 	int		bit;
176 	uint_t		data;
177 	uint32_t	crc;
178 #define	CRC32_POLY_BE	0x04c11db7
179 
180 	crc = 0xffffffff;
181 	for (idx = 0; idx < len; idx++) {
182 		for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) {
183 			crc = (crc << 1)
184 			    ^ ((((crc >> 31) ^ data) & 1) ? CRC32_POLY_BE : 0);
185 		}
186 	}
187 	return (crc);
188 #undef	CRC32_POLY_BE
189 }
190 
191 int
192 gem_prop_get_int(struct gem_dev *dp, char *prop_template, int def_val)
193 {
194 	char	propname[32];
195 
196 	(void) sprintf(propname, prop_template, dp->name);
197 
198 	return (ddi_prop_get_int(DDI_DEV_T_ANY, dp->dip,
199 	    DDI_PROP_DONTPASS, propname, def_val));
200 }
201 
202 static int
203 gem_population(uint32_t x)
204 {
205 	int	i;
206 	int	cnt;
207 
208 	cnt = 0;
209 	for (i = 0; i < 32; i++) {
210 		if (x & (1 << i)) {
211 			cnt++;
212 		}
213 	}
214 	return (cnt);
215 }
216 
217 #ifdef GEM_DEBUG_LEVEL
218 #ifdef GEM_DEBUG_VLAN
219 static void
220 gem_dump_packet(struct gem_dev *dp, char *title, mblk_t *mp,
221     boolean_t check_cksum)
222 {
223 	char	msg[180];
224 	uint8_t	buf[18+20+20];
225 	uint8_t	*p;
226 	size_t	offset;
227 	uint_t	ethertype;
228 	uint_t	proto;
229 	uint_t	ipproto = 0;
230 	uint_t	iplen;
231 	uint_t	iphlen;
232 	uint_t	tcplen;
233 	uint_t	udplen;
234 	uint_t	cksum;
235 	int	rest;
236 	int	len;
237 	char	*bp;
238 	mblk_t	*tp;
239 	extern uint_t	ip_cksum(mblk_t *, int, uint32_t);
240 
241 	msg[0] = 0;
242 	bp = msg;
243 
244 	rest = sizeof (buf);
245 	offset = 0;
246 	for (tp = mp; tp; tp = tp->b_cont) {
247 		len = tp->b_wptr - tp->b_rptr;
248 		len = min(rest, len);
249 		bcopy(tp->b_rptr, &buf[offset], len);
250 		rest -= len;
251 		offset += len;
252 		if (rest == 0) {
253 			break;
254 		}
255 	}
256 
257 	offset = 0;
258 	p = &buf[offset];
259 
260 	/* ethernet address */
261 	sprintf(bp,
262 	    "ether: %02x:%02x:%02x:%02x:%02x:%02x"
263 	    " -> %02x:%02x:%02x:%02x:%02x:%02x",
264 	    p[6], p[7], p[8], p[9], p[10], p[11],
265 	    p[0], p[1], p[2], p[3], p[4], p[5]);
266 	bp = &msg[strlen(msg)];
267 
268 	/* vlag tag and etherrtype */
269 	ethertype = GET_ETHERTYPE(p);
270 	if (ethertype == VTAG_TPID) {
271 		sprintf(bp, " vtag:0x%04x", GET_NET16(&p[14]));
272 		bp = &msg[strlen(msg)];
273 
274 		offset += VTAG_SIZE;
275 		p = &buf[offset];
276 		ethertype = GET_ETHERTYPE(p);
277 	}
278 	sprintf(bp, " type:%04x", ethertype);
279 	bp = &msg[strlen(msg)];
280 
281 	/* ethernet packet length */
282 	sprintf(bp, " mblklen:%d", msgdsize(mp));
283 	bp = &msg[strlen(msg)];
284 	if (mp->b_cont) {
285 		sprintf(bp, "(");
286 		bp = &msg[strlen(msg)];
287 		for (tp = mp; tp; tp = tp->b_cont) {
288 			if (tp == mp) {
289 				sprintf(bp, "%d", tp->b_wptr - tp->b_rptr);
290 			} else {
291 				sprintf(bp, "+%d", tp->b_wptr - tp->b_rptr);
292 			}
293 			bp = &msg[strlen(msg)];
294 		}
295 		sprintf(bp, ")");
296 		bp = &msg[strlen(msg)];
297 	}
298 
299 	if (ethertype != ETHERTYPE_IP) {
300 		goto x;
301 	}
302 
303 	/* ip address */
304 	offset += sizeof (struct ether_header);
305 	p = &buf[offset];
306 	ipproto = p[9];
307 	iplen = GET_NET16(&p[2]);
308 	sprintf(bp, ", ip: %d.%d.%d.%d -> %d.%d.%d.%d proto:%d iplen:%d",
309 	    p[12], p[13], p[14], p[15],
310 	    p[16], p[17], p[18], p[19],
311 	    ipproto, iplen);
312 	bp = (void *)&msg[strlen(msg)];
313 
314 	iphlen = (p[0] & 0xf) * 4;
315 
316 	/* cksum for psuedo header */
317 	cksum = *(uint16_t *)&p[12];
318 	cksum += *(uint16_t *)&p[14];
319 	cksum += *(uint16_t *)&p[16];
320 	cksum += *(uint16_t *)&p[18];
321 	cksum += BE_16(ipproto);
322 
323 	/* tcp or udp protocol header */
324 	offset += iphlen;
325 	p = &buf[offset];
326 	if (ipproto == IPPROTO_TCP) {
327 		tcplen = iplen - iphlen;
328 		sprintf(bp, ", tcp: len:%d cksum:%x",
329 		    tcplen, GET_NET16(&p[16]));
330 		bp = (void *)&msg[strlen(msg)];
331 
332 		if (check_cksum) {
333 			cksum += BE_16(tcplen);
334 			cksum = (uint16_t)ip_cksum(mp, offset, cksum);
335 			sprintf(bp, " (%s)",
336 			    (cksum == 0 || cksum == 0xffff) ? "ok" : "ng");
337 			bp = (void *)&msg[strlen(msg)];
338 		}
339 	} else if (ipproto == IPPROTO_UDP) {
340 		udplen = GET_NET16(&p[4]);
341 		sprintf(bp, ", udp: len:%d cksum:%x",
342 		    udplen, GET_NET16(&p[6]));
343 		bp = (void *)&msg[strlen(msg)];
344 
345 		if (GET_NET16(&p[6]) && check_cksum) {
346 			cksum += *(uint16_t *)&p[4];
347 			cksum = (uint16_t)ip_cksum(mp, offset, cksum);
348 			sprintf(bp, " (%s)",
349 			    (cksum == 0 || cksum == 0xffff) ? "ok" : "ng");
350 			bp = (void *)&msg[strlen(msg)];
351 		}
352 	}
353 x:
354 	cmn_err(CE_CONT, "!%s: %s: %s", dp->name, title, msg);
355 }
356 #endif /* GEM_DEBUG_VLAN */
357 #endif /* GEM_DEBUG_LEVEL */
358 
359 /* ============================================================== */
360 /*
361  * IO cache flush
362  */
363 /* ============================================================== */
364 __INLINE__ void
365 gem_rx_desc_dma_sync(struct gem_dev *dp, int head, int nslot, int how)
366 {
367 	int	n;
368 	int	m;
369 	int	rx_desc_unit_shift = dp->gc.gc_rx_desc_unit_shift;
370 
371 	/* sync active descriptors */
372 	if (rx_desc_unit_shift < 0 || nslot == 0) {
373 		/* no rx descriptor ring */
374 		return;
375 	}
376 
377 	n = dp->gc.gc_rx_ring_size - head;
378 	if ((m = nslot - n) > 0) {
379 		(void) ddi_dma_sync(dp->desc_dma_handle,
380 		    (off_t)0,
381 		    (size_t)(m << rx_desc_unit_shift),
382 		    how);
383 		nslot = n;
384 	}
385 
386 	(void) ddi_dma_sync(dp->desc_dma_handle,
387 	    (off_t)(head << rx_desc_unit_shift),
388 	    (size_t)(nslot << rx_desc_unit_shift),
389 	    how);
390 }
391 
392 __INLINE__ void
393 gem_tx_desc_dma_sync(struct gem_dev *dp, int head, int nslot, int how)
394 {
395 	int	n;
396 	int	m;
397 	int	tx_desc_unit_shift = dp->gc.gc_tx_desc_unit_shift;
398 
399 	/* sync active descriptors */
400 	if (tx_desc_unit_shift < 0 || nslot == 0) {
401 		/* no tx descriptor ring */
402 		return;
403 	}
404 
405 	n = dp->gc.gc_tx_ring_size - head;
406 	if ((m = nslot - n) > 0) {
407 		(void) ddi_dma_sync(dp->desc_dma_handle,
408 		    (off_t)(dp->tx_ring_dma - dp->rx_ring_dma),
409 		    (size_t)(m << tx_desc_unit_shift),
410 		    how);
411 		nslot = n;
412 	}
413 
414 	(void) ddi_dma_sync(dp->desc_dma_handle,
415 	    (off_t)((head << tx_desc_unit_shift)
416 	    + (dp->tx_ring_dma - dp->rx_ring_dma)),
417 	    (size_t)(nslot << tx_desc_unit_shift),
418 	    how);
419 }
420 
421 static void
422 gem_rx_start_default(struct gem_dev *dp, int head, int nslot)
423 {
424 	gem_rx_desc_dma_sync(dp,
425 	    SLOT(head, dp->gc.gc_rx_ring_size), nslot,
426 	    DDI_DMA_SYNC_FORDEV);
427 }
428 
429 /* ============================================================== */
430 /*
431  * Buffer management
432  */
433 /* ============================================================== */
434 static void
435 gem_dump_txbuf(struct gem_dev *dp, int level, const char *title)
436 {
437 	cmn_err(level,
438 	    "!%s: %s: tx_active: %d[%d] %d[%d] (+%d), "
439 	    "tx_softq: %d[%d] %d[%d] (+%d), "
440 	    "tx_free: %d[%d] %d[%d] (+%d), "
441 	    "tx_desc: %d[%d] %d[%d] (+%d), "
442 	    "intr: %d[%d] (+%d), ",
443 	    dp->name, title,
444 	    dp->tx_active_head,
445 	    SLOT(dp->tx_active_head, dp->gc.gc_tx_buf_size),
446 	    dp->tx_active_tail,
447 	    SLOT(dp->tx_active_tail, dp->gc.gc_tx_buf_size),
448 	    dp->tx_active_tail - dp->tx_active_head,
449 	    dp->tx_softq_head,
450 	    SLOT(dp->tx_softq_head, dp->gc.gc_tx_buf_size),
451 	    dp->tx_softq_tail,
452 	    SLOT(dp->tx_softq_tail, dp->gc.gc_tx_buf_size),
453 	    dp->tx_softq_tail - dp->tx_softq_head,
454 	    dp->tx_free_head,
455 	    SLOT(dp->tx_free_head, dp->gc.gc_tx_buf_size),
456 	    dp->tx_free_tail,
457 	    SLOT(dp->tx_free_tail, dp->gc.gc_tx_buf_size),
458 	    dp->tx_free_tail - dp->tx_free_head,
459 	    dp->tx_desc_head,
460 	    SLOT(dp->tx_desc_head, dp->gc.gc_tx_ring_size),
461 	    dp->tx_desc_tail,
462 	    SLOT(dp->tx_desc_tail, dp->gc.gc_tx_ring_size),
463 	    dp->tx_desc_tail - dp->tx_desc_head,
464 	    dp->tx_desc_intr,
465 	    SLOT(dp->tx_desc_intr, dp->gc.gc_tx_ring_size),
466 	    dp->tx_desc_intr - dp->tx_desc_head);
467 }
468 
469 static void
470 gem_free_rxbuf(struct rxbuf *rbp)
471 {
472 	struct gem_dev	*dp;
473 
474 	dp = rbp->rxb_devp;
475 	ASSERT(mutex_owned(&dp->intrlock));
476 	rbp->rxb_next = dp->rx_buf_freelist;
477 	dp->rx_buf_freelist = rbp;
478 	dp->rx_buf_freecnt++;
479 }
480 
481 /*
482  * gem_get_rxbuf: supply a receive buffer which have been mapped into
483  * DMA space.
484  */
485 struct rxbuf *
486 gem_get_rxbuf(struct gem_dev *dp, int cansleep)
487 {
488 	struct rxbuf		*rbp;
489 	uint_t			count = 0;
490 	int			i;
491 	int			err;
492 
493 	ASSERT(mutex_owned(&dp->intrlock));
494 
495 	DPRINTF(3, (CE_CONT, "!gem_get_rxbuf: called freecnt:%d",
496 	    dp->rx_buf_freecnt));
497 	/*
498 	 * Get rx buffer management structure
499 	 */
500 	rbp = dp->rx_buf_freelist;
501 	if (rbp) {
502 		/* get one from the recycle list */
503 		ASSERT(dp->rx_buf_freecnt > 0);
504 
505 		dp->rx_buf_freelist = rbp->rxb_next;
506 		dp->rx_buf_freecnt--;
507 		rbp->rxb_next = NULL;
508 		return (rbp);
509 	}
510 
511 	/*
512 	 * Allocate a rx buffer management structure
513 	 */
514 	rbp = kmem_zalloc(sizeof (*rbp), cansleep ? KM_SLEEP : KM_NOSLEEP);
515 	if (rbp == NULL) {
516 		/* no memory */
517 		return (NULL);
518 	}
519 
520 	/*
521 	 * Prepare a back pointer to the device structure which will be
522 	 * refered on freeing the buffer later.
523 	 */
524 	rbp->rxb_devp = dp;
525 
526 	/* allocate a dma handle for rx data buffer */
527 	if ((err = ddi_dma_alloc_handle(dp->dip,
528 	    &dp->gc.gc_dma_attr_rxbuf,
529 	    (cansleep ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT),
530 	    NULL, &rbp->rxb_dh)) != DDI_SUCCESS) {
531 
532 		cmn_err(CE_WARN,
533 		    "!%s: %s: ddi_dma_alloc_handle:1 failed, err=%d",
534 		    dp->name, __func__, err);
535 
536 		kmem_free(rbp, sizeof (struct rxbuf));
537 		return (NULL);
538 	}
539 
540 	/* allocate a bounce buffer for rx */
541 	if ((err = ddi_dma_mem_alloc(rbp->rxb_dh,
542 	    ROUNDUP(dp->rx_buf_len, IOC_LINESIZE),
543 	    &dp->gc.gc_buf_attr,
544 		/*
545 		 * if the nic requires a header at the top of receive buffers,
546 		 * it may access the rx buffer randomly.
547 		 */
548 	    (dp->gc.gc_rx_header_len > 0)
549 	    ? DDI_DMA_CONSISTENT : DDI_DMA_STREAMING,
550 	    cansleep ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT,
551 	    NULL,
552 	    &rbp->rxb_buf, &rbp->rxb_buf_len,
553 	    &rbp->rxb_bah)) != DDI_SUCCESS) {
554 
555 		cmn_err(CE_WARN,
556 		    "!%s: %s: ddi_dma_mem_alloc: failed, err=%d",
557 		    dp->name, __func__, err);
558 
559 		ddi_dma_free_handle(&rbp->rxb_dh);
560 		kmem_free(rbp, sizeof (struct rxbuf));
561 		return (NULL);
562 	}
563 
564 	/* Mapin the bounce buffer into the DMA space */
565 	if ((err = ddi_dma_addr_bind_handle(rbp->rxb_dh,
566 	    NULL, rbp->rxb_buf, dp->rx_buf_len,
567 	    ((dp->gc.gc_rx_header_len > 0)
568 	    ?(DDI_DMA_RDWR | DDI_DMA_CONSISTENT)
569 	    :(DDI_DMA_READ | DDI_DMA_STREAMING)),
570 	    cansleep ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT,
571 	    NULL,
572 	    rbp->rxb_dmacookie,
573 	    &count)) != DDI_DMA_MAPPED) {
574 
575 		ASSERT(err != DDI_DMA_INUSE);
576 		DPRINTF(0, (CE_WARN,
577 		    "!%s: ddi_dma_addr_bind_handle: failed, err=%d",
578 		    dp->name, __func__, err));
579 
580 		/*
581 		 * we failed to allocate a dma resource
582 		 * for the rx bounce buffer.
583 		 */
584 		ddi_dma_mem_free(&rbp->rxb_bah);
585 		ddi_dma_free_handle(&rbp->rxb_dh);
586 		kmem_free(rbp, sizeof (struct rxbuf));
587 		return (NULL);
588 	}
589 
590 	/* correct the rest of the DMA mapping */
591 	for (i = 1; i < count; i++) {
592 		ddi_dma_nextcookie(rbp->rxb_dh, &rbp->rxb_dmacookie[i]);
593 	}
594 	rbp->rxb_nfrags = count;
595 
596 	/* Now we successfully prepared an rx buffer */
597 	dp->rx_buf_allocated++;
598 
599 	return (rbp);
600 }
601 
602 /* ============================================================== */
603 /*
604  * memory resource management
605  */
606 /* ============================================================== */
607 static int
608 gem_alloc_memory(struct gem_dev *dp)
609 {
610 	caddr_t			ring;
611 	caddr_t			buf;
612 	size_t			req_size;
613 	size_t			ring_len;
614 	size_t			buf_len;
615 	ddi_dma_cookie_t	ring_cookie;
616 	ddi_dma_cookie_t	buf_cookie;
617 	uint_t			count;
618 	int			i;
619 	int			err;
620 	struct txbuf		*tbp;
621 	int			tx_buf_len;
622 	ddi_dma_attr_t		dma_attr_txbounce;
623 
624 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
625 
626 	dp->desc_dma_handle = NULL;
627 	req_size = dp->rx_desc_size + dp->tx_desc_size + dp->gc.gc_io_area_size;
628 
629 	if (req_size > 0) {
630 		/*
631 		 * Alloc RX/TX descriptors and a io area.
632 		 */
633 		if ((err = ddi_dma_alloc_handle(dp->dip,
634 		    &dp->gc.gc_dma_attr_desc,
635 		    DDI_DMA_SLEEP, NULL,
636 		    &dp->desc_dma_handle)) != DDI_SUCCESS) {
637 			cmn_err(CE_WARN,
638 			    "!%s: %s: ddi_dma_alloc_handle failed: %d",
639 			    dp->name, __func__, err);
640 			return (ENOMEM);
641 		}
642 
643 		if ((err = ddi_dma_mem_alloc(dp->desc_dma_handle,
644 		    req_size, &dp->gc.gc_desc_attr,
645 		    DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
646 		    &ring, &ring_len,
647 		    &dp->desc_acc_handle)) != DDI_SUCCESS) {
648 			cmn_err(CE_WARN,
649 			    "!%s: %s: ddi_dma_mem_alloc failed: "
650 			    "ret %d, request size: %d",
651 			    dp->name, __func__, err, (int)req_size);
652 			ddi_dma_free_handle(&dp->desc_dma_handle);
653 			return (ENOMEM);
654 		}
655 
656 		if ((err = ddi_dma_addr_bind_handle(dp->desc_dma_handle,
657 		    NULL, ring, ring_len,
658 		    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
659 		    DDI_DMA_SLEEP, NULL,
660 		    &ring_cookie, &count)) != DDI_SUCCESS) {
661 			ASSERT(err != DDI_DMA_INUSE);
662 			cmn_err(CE_WARN,
663 			    "!%s: %s: ddi_dma_addr_bind_handle failed: %d",
664 			    dp->name, __func__, err);
665 			ddi_dma_mem_free(&dp->desc_acc_handle);
666 			ddi_dma_free_handle(&dp->desc_dma_handle);
667 			return (ENOMEM);
668 		}
669 		ASSERT(count == 1);
670 
671 		/* set base of rx descriptor ring */
672 		dp->rx_ring = ring;
673 		dp->rx_ring_dma = ring_cookie.dmac_laddress;
674 
675 		/* set base of tx descriptor ring */
676 		dp->tx_ring = dp->rx_ring + dp->rx_desc_size;
677 		dp->tx_ring_dma = dp->rx_ring_dma + dp->rx_desc_size;
678 
679 		/* set base of io area */
680 		dp->io_area = dp->tx_ring + dp->tx_desc_size;
681 		dp->io_area_dma = dp->tx_ring_dma + dp->tx_desc_size;
682 	}
683 
684 	/*
685 	 * Prepare DMA resources for tx packets
686 	 */
687 	ASSERT(dp->gc.gc_tx_buf_size > 0);
688 
689 	/* Special dma attribute for tx bounce buffers */
690 	dma_attr_txbounce = dp->gc.gc_dma_attr_txbuf;
691 	dma_attr_txbounce.dma_attr_sgllen = 1;
692 	dma_attr_txbounce.dma_attr_align =
693 	    max(dma_attr_txbounce.dma_attr_align, IOC_LINESIZE);
694 
695 	/* Size for tx bounce buffers must be max tx packet size. */
696 	tx_buf_len = MAXPKTBUF(dp);
697 	tx_buf_len = ROUNDUP(tx_buf_len, IOC_LINESIZE);
698 
699 	ASSERT(tx_buf_len >= ETHERMAX+ETHERFCSL);
700 
701 	for (i = 0, tbp = dp->tx_buf;
702 	    i < dp->gc.gc_tx_buf_size; i++, tbp++) {
703 
704 		/* setup bounce buffers for tx packets */
705 		if ((err = ddi_dma_alloc_handle(dp->dip,
706 		    &dma_attr_txbounce,
707 		    DDI_DMA_SLEEP, NULL,
708 		    &tbp->txb_bdh)) != DDI_SUCCESS) {
709 
710 			cmn_err(CE_WARN,
711 		    "!%s: %s ddi_dma_alloc_handle for bounce buffer failed:"
712 			    " err=%d, i=%d",
713 			    dp->name, __func__, err, i);
714 			goto err_alloc_dh;
715 		}
716 
717 		if ((err = ddi_dma_mem_alloc(tbp->txb_bdh,
718 		    tx_buf_len,
719 		    &dp->gc.gc_buf_attr,
720 		    DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL,
721 		    &buf, &buf_len,
722 		    &tbp->txb_bah)) != DDI_SUCCESS) {
723 			cmn_err(CE_WARN,
724 		    "!%s: %s: ddi_dma_mem_alloc for bounce buffer failed"
725 			    "ret %d, request size %d",
726 			    dp->name, __func__, err, tx_buf_len);
727 			ddi_dma_free_handle(&tbp->txb_bdh);
728 			goto err_alloc_dh;
729 		}
730 
731 		if ((err = ddi_dma_addr_bind_handle(tbp->txb_bdh,
732 		    NULL, buf, buf_len,
733 		    DDI_DMA_WRITE | DDI_DMA_STREAMING,
734 		    DDI_DMA_SLEEP, NULL,
735 		    &buf_cookie, &count)) != DDI_SUCCESS) {
736 				ASSERT(err != DDI_DMA_INUSE);
737 				cmn_err(CE_WARN,
738 	"!%s: %s: ddi_dma_addr_bind_handle for bounce buffer failed: %d",
739 				    dp->name, __func__, err);
740 				ddi_dma_mem_free(&tbp->txb_bah);
741 				ddi_dma_free_handle(&tbp->txb_bdh);
742 				goto err_alloc_dh;
743 		}
744 		ASSERT(count == 1);
745 		tbp->txb_buf = buf;
746 		tbp->txb_buf_dma = buf_cookie.dmac_laddress;
747 	}
748 
749 	return (0);
750 
751 err_alloc_dh:
752 	if (dp->gc.gc_tx_buf_size > 0) {
753 		while (i-- > 0) {
754 			(void) ddi_dma_unbind_handle(dp->tx_buf[i].txb_bdh);
755 			ddi_dma_mem_free(&dp->tx_buf[i].txb_bah);
756 			ddi_dma_free_handle(&dp->tx_buf[i].txb_bdh);
757 		}
758 	}
759 
760 	if (dp->desc_dma_handle) {
761 		(void) ddi_dma_unbind_handle(dp->desc_dma_handle);
762 		ddi_dma_mem_free(&dp->desc_acc_handle);
763 		ddi_dma_free_handle(&dp->desc_dma_handle);
764 		dp->desc_dma_handle = NULL;
765 	}
766 
767 	return (ENOMEM);
768 }
769 
770 static void
771 gem_free_memory(struct gem_dev *dp)
772 {
773 	int		i;
774 	struct rxbuf	*rbp;
775 	struct txbuf	*tbp;
776 
777 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
778 
779 	/* Free TX/RX descriptors and tx padding buffer */
780 	if (dp->desc_dma_handle) {
781 		(void) ddi_dma_unbind_handle(dp->desc_dma_handle);
782 		ddi_dma_mem_free(&dp->desc_acc_handle);
783 		ddi_dma_free_handle(&dp->desc_dma_handle);
784 		dp->desc_dma_handle = NULL;
785 	}
786 
787 	/* Free dma handles for Tx */
788 	for (i = dp->gc.gc_tx_buf_size, tbp = dp->tx_buf; i--; tbp++) {
789 		/* Free bounce buffer associated to each txbuf */
790 		(void) ddi_dma_unbind_handle(tbp->txb_bdh);
791 		ddi_dma_mem_free(&tbp->txb_bah);
792 		ddi_dma_free_handle(&tbp->txb_bdh);
793 	}
794 
795 	/* Free rx buffer */
796 	while ((rbp = dp->rx_buf_freelist) != NULL) {
797 
798 		ASSERT(dp->rx_buf_freecnt > 0);
799 
800 		dp->rx_buf_freelist = rbp->rxb_next;
801 		dp->rx_buf_freecnt--;
802 
803 		/* release DMA mapping */
804 		ASSERT(rbp->rxb_dh != NULL);
805 
806 		/* free dma handles for rx bbuf */
807 		/* it has dma mapping always */
808 		ASSERT(rbp->rxb_nfrags > 0);
809 		(void) ddi_dma_unbind_handle(rbp->rxb_dh);
810 
811 		/* free the associated bounce buffer and dma handle */
812 		ASSERT(rbp->rxb_bah != NULL);
813 		ddi_dma_mem_free(&rbp->rxb_bah);
814 		/* free the associated dma handle */
815 		ddi_dma_free_handle(&rbp->rxb_dh);
816 
817 		/* free the base memory of rx buffer management */
818 		kmem_free(rbp, sizeof (struct rxbuf));
819 	}
820 }
821 
822 /* ============================================================== */
823 /*
824  * Rx/Tx descriptor slot management
825  */
826 /* ============================================================== */
827 /*
828  * Initialize an empty rx ring.
829  */
830 static void
831 gem_init_rx_ring(struct gem_dev *dp)
832 {
833 	int		i;
834 	int		rx_ring_size = dp->gc.gc_rx_ring_size;
835 
836 	DPRINTF(1, (CE_CONT, "!%s: %s ring_size:%d, buf_max:%d",
837 	    dp->name, __func__,
838 	    rx_ring_size, dp->gc.gc_rx_buf_max));
839 
840 	/* make a physical chain of rx descriptors */
841 	for (i = 0; i < rx_ring_size; i++) {
842 		(*dp->gc.gc_rx_desc_init)(dp, i);
843 	}
844 	gem_rx_desc_dma_sync(dp, 0, rx_ring_size, DDI_DMA_SYNC_FORDEV);
845 
846 	dp->rx_active_head = (seqnum_t)0;
847 	dp->rx_active_tail = (seqnum_t)0;
848 
849 	ASSERT(dp->rx_buf_head == (struct rxbuf *)NULL);
850 	ASSERT(dp->rx_buf_tail == (struct rxbuf *)NULL);
851 }
852 
853 /*
854  * Prepare rx buffers and put them into the rx buffer/descriptor ring.
855  */
856 static void
857 gem_prepare_rx_buf(struct gem_dev *dp)
858 {
859 	int		i;
860 	int		nrbuf;
861 	struct rxbuf	*rbp;
862 
863 	ASSERT(mutex_owned(&dp->intrlock));
864 
865 	/* Now we have no active buffers in rx ring */
866 
867 	nrbuf = min(dp->gc.gc_rx_ring_size, dp->gc.gc_rx_buf_max);
868 	for (i = 0; i < nrbuf; i++) {
869 		if ((rbp = gem_get_rxbuf(dp, B_TRUE)) == NULL) {
870 			break;
871 		}
872 		gem_append_rxbuf(dp, rbp);
873 	}
874 
875 	gem_rx_desc_dma_sync(dp,
876 	    0, dp->gc.gc_rx_ring_size, DDI_DMA_SYNC_FORDEV);
877 }
878 
879 /*
880  * Reclaim active rx buffers in rx buffer ring.
881  */
882 static void
883 gem_clean_rx_buf(struct gem_dev *dp)
884 {
885 	int		i;
886 	struct rxbuf	*rbp;
887 	int		rx_ring_size = dp->gc.gc_rx_ring_size;
888 #ifdef GEM_DEBUG_LEVEL
889 	int		total;
890 #endif
891 	ASSERT(mutex_owned(&dp->intrlock));
892 
893 	DPRINTF(2, (CE_CONT, "!%s: %s: %d buffers are free",
894 	    dp->name, __func__, dp->rx_buf_freecnt));
895 	/*
896 	 * clean up HW descriptors
897 	 */
898 	for (i = 0; i < rx_ring_size; i++) {
899 		(*dp->gc.gc_rx_desc_clean)(dp, i);
900 	}
901 	gem_rx_desc_dma_sync(dp, 0, rx_ring_size, DDI_DMA_SYNC_FORDEV);
902 
903 #ifdef GEM_DEBUG_LEVEL
904 	total = 0;
905 #endif
906 	/*
907 	 * Reclaim allocated rx buffers
908 	 */
909 	while ((rbp = dp->rx_buf_head) != NULL) {
910 #ifdef GEM_DEBUG_LEVEL
911 		total++;
912 #endif
913 		/* remove the first one from rx buffer list */
914 		dp->rx_buf_head = rbp->rxb_next;
915 
916 		/* recycle the rxbuf */
917 		gem_free_rxbuf(rbp);
918 	}
919 	dp->rx_buf_tail = (struct rxbuf *)NULL;
920 
921 	DPRINTF(2, (CE_CONT,
922 	    "!%s: %s: %d buffers freeed, total: %d free",
923 	    dp->name, __func__, total, dp->rx_buf_freecnt));
924 }
925 
926 /*
927  * Initialize an empty transmit buffer/descriptor ring
928  */
929 static void
930 gem_init_tx_ring(struct gem_dev *dp)
931 {
932 	int		i;
933 	int		tx_buf_size = dp->gc.gc_tx_buf_size;
934 	int		tx_ring_size = dp->gc.gc_tx_ring_size;
935 
936 	DPRINTF(2, (CE_CONT, "!%s: %s: ring_size:%d, buf_size:%d",
937 	    dp->name, __func__,
938 	    dp->gc.gc_tx_ring_size, dp->gc.gc_tx_buf_size));
939 
940 	ASSERT(!dp->mac_active);
941 
942 	/* initialize active list and free list */
943 	dp->tx_slots_base =
944 	    SLOT(dp->tx_slots_base + dp->tx_softq_head, tx_buf_size);
945 	dp->tx_softq_tail -= dp->tx_softq_head;
946 	dp->tx_softq_head = (seqnum_t)0;
947 
948 	dp->tx_active_head = dp->tx_softq_head;
949 	dp->tx_active_tail = dp->tx_softq_head;
950 
951 	dp->tx_free_head   = dp->tx_softq_tail;
952 	dp->tx_free_tail   = dp->gc.gc_tx_buf_limit;
953 
954 	dp->tx_desc_head = (seqnum_t)0;
955 	dp->tx_desc_tail = (seqnum_t)0;
956 	dp->tx_desc_intr = (seqnum_t)0;
957 
958 	for (i = 0; i < tx_ring_size; i++) {
959 		(*dp->gc.gc_tx_desc_init)(dp, i);
960 	}
961 	gem_tx_desc_dma_sync(dp, 0, tx_ring_size, DDI_DMA_SYNC_FORDEV);
962 }
963 
964 __INLINE__
965 static void
966 gem_txbuf_free_dma_resources(struct txbuf *tbp)
967 {
968 	if (tbp->txb_mp) {
969 		freemsg(tbp->txb_mp);
970 		tbp->txb_mp = NULL;
971 	}
972 	tbp->txb_nfrags = 0;
973 	tbp->txb_flag = 0;
974 }
975 #pragma inline(gem_txbuf_free_dma_resources)
976 
977 /*
978  * reclaim active tx buffers and reset positions in tx rings.
979  */
980 static void
981 gem_clean_tx_buf(struct gem_dev *dp)
982 {
983 	int		i;
984 	seqnum_t	head;
985 	seqnum_t	tail;
986 	seqnum_t	sn;
987 	struct txbuf	*tbp;
988 	int		tx_ring_size = dp->gc.gc_tx_ring_size;
989 #ifdef GEM_DEBUG_LEVEL
990 	int		err;
991 #endif
992 
993 	ASSERT(!dp->mac_active);
994 	ASSERT(dp->tx_busy == 0);
995 	ASSERT(dp->tx_softq_tail == dp->tx_free_head);
996 
997 	/*
998 	 * clean up all HW descriptors
999 	 */
1000 	for (i = 0; i < tx_ring_size; i++) {
1001 		(*dp->gc.gc_tx_desc_clean)(dp, i);
1002 	}
1003 	gem_tx_desc_dma_sync(dp, 0, tx_ring_size, DDI_DMA_SYNC_FORDEV);
1004 
1005 	/* dequeue all active and loaded buffers */
1006 	head = dp->tx_active_head;
1007 	tail = dp->tx_softq_tail;
1008 
1009 	ASSERT(dp->tx_free_head - head >= 0);
1010 	tbp = GET_TXBUF(dp, head);
1011 	for (sn = head; sn != tail; sn++) {
1012 		gem_txbuf_free_dma_resources(tbp);
1013 		ASSERT(tbp->txb_mp == NULL);
1014 		dp->stats.errxmt++;
1015 		tbp = tbp->txb_next;
1016 	}
1017 
1018 #ifdef GEM_DEBUG_LEVEL
1019 	/* ensure no dma resources for tx are not in use now */
1020 	err = 0;
1021 	while (sn != head + dp->gc.gc_tx_buf_size) {
1022 		if (tbp->txb_mp || tbp->txb_nfrags) {
1023 			DPRINTF(0, (CE_CONT,
1024 			    "%s: %s: sn:%d[%d] mp:%p nfrags:%d",
1025 			    dp->name, __func__,
1026 			    sn, SLOT(sn, dp->gc.gc_tx_buf_size),
1027 			    tbp->txb_mp, tbp->txb_nfrags));
1028 			err = 1;
1029 		}
1030 		sn++;
1031 		tbp = tbp->txb_next;
1032 	}
1033 
1034 	if (err) {
1035 		gem_dump_txbuf(dp, CE_WARN,
1036 		    "gem_clean_tx_buf: tbp->txb_mp != NULL");
1037 	}
1038 #endif
1039 	/* recycle buffers, now no active tx buffers in the ring */
1040 	dp->tx_free_tail += tail - head;
1041 	ASSERT(dp->tx_free_tail == dp->tx_free_head + dp->gc.gc_tx_buf_limit);
1042 
1043 	/* fix positions in tx buffer rings */
1044 	dp->tx_active_head = dp->tx_free_head;
1045 	dp->tx_active_tail = dp->tx_free_head;
1046 	dp->tx_softq_head  = dp->tx_free_head;
1047 	dp->tx_softq_tail  = dp->tx_free_head;
1048 }
1049 
1050 /*
1051  * Reclaim transmitted buffers from tx buffer/descriptor ring.
1052  */
1053 __INLINE__ int
1054 gem_reclaim_txbuf(struct gem_dev *dp)
1055 {
1056 	struct txbuf	*tbp;
1057 	uint_t		txstat;
1058 	int		err = GEM_SUCCESS;
1059 	seqnum_t	head;
1060 	seqnum_t	tail;
1061 	seqnum_t	sn;
1062 	seqnum_t	desc_head;
1063 	int		tx_ring_size = dp->gc.gc_tx_ring_size;
1064 	uint_t (*tx_desc_stat)(struct gem_dev *dp,
1065 	    int slot, int ndesc) = dp->gc.gc_tx_desc_stat;
1066 	clock_t		now;
1067 
1068 	now = ddi_get_lbolt();
1069 	if (now == (clock_t)0) {
1070 		/* make non-zero timestamp */
1071 		now--;
1072 	}
1073 
1074 	mutex_enter(&dp->xmitlock);
1075 
1076 	head = dp->tx_active_head;
1077 	tail = dp->tx_active_tail;
1078 
1079 #if GEM_DEBUG_LEVEL > 2
1080 	if (head != tail) {
1081 		cmn_err(CE_CONT, "!%s: %s: "
1082 		    "testing active_head:%d[%d], active_tail:%d[%d]",
1083 		    dp->name, __func__,
1084 		    head, SLOT(head, dp->gc.gc_tx_buf_size),
1085 		    tail, SLOT(tail, dp->gc.gc_tx_buf_size));
1086 	}
1087 #endif
1088 #ifdef DEBUG
1089 	if (dp->tx_reclaim_busy == 0) {
1090 		/* check tx buffer management consistency */
1091 		ASSERT(dp->tx_free_tail - dp->tx_active_head
1092 		    == dp->gc.gc_tx_buf_limit);
1093 		/* EMPTY */
1094 	}
1095 #endif
1096 	dp->tx_reclaim_busy++;
1097 
1098 	/* sync all active HW descriptors */
1099 	gem_tx_desc_dma_sync(dp,
1100 	    SLOT(dp->tx_desc_head, tx_ring_size),
1101 	    dp->tx_desc_tail - dp->tx_desc_head,
1102 	    DDI_DMA_SYNC_FORKERNEL);
1103 
1104 	tbp = GET_TXBUF(dp, head);
1105 	desc_head = dp->tx_desc_head;
1106 	for (sn = head; sn != tail;
1107 	    dp->tx_active_head = (++sn), tbp = tbp->txb_next) {
1108 		int	ndescs;
1109 
1110 		ASSERT(tbp->txb_desc == desc_head);
1111 
1112 		ndescs = tbp->txb_ndescs;
1113 		if (ndescs == 0) {
1114 			/* skip errored descriptors */
1115 			continue;
1116 		}
1117 		txstat = (*tx_desc_stat)(dp,
1118 		    SLOT(tbp->txb_desc, tx_ring_size), ndescs);
1119 
1120 		if (txstat == 0) {
1121 			/* not transmitted yet */
1122 			break;
1123 		}
1124 
1125 		if (!dp->tx_blocked && (tbp->txb_flag & GEM_TXFLAG_INTR)) {
1126 			dp->tx_blocked = now;
1127 		}
1128 
1129 		ASSERT(txstat & (GEM_TX_DONE | GEM_TX_ERR));
1130 
1131 		if (txstat & GEM_TX_ERR) {
1132 			err = GEM_FAILURE;
1133 			cmn_err(CE_WARN, "!%s: tx error at desc %d[%d]",
1134 			    dp->name, sn, SLOT(sn, tx_ring_size));
1135 		}
1136 #if GEM_DEBUG_LEVEL > 4
1137 		if (now - tbp->txb_stime >= 50) {
1138 			cmn_err(CE_WARN, "!%s: tx delay while %d mS",
1139 			    dp->name, (now - tbp->txb_stime)*10);
1140 		}
1141 #endif
1142 		/* free transmitted descriptors */
1143 		desc_head += ndescs;
1144 	}
1145 
1146 	if (dp->tx_desc_head != desc_head) {
1147 		/* we have reclaimed one or more tx buffers */
1148 		dp->tx_desc_head = desc_head;
1149 
1150 		/* If we passed the next interrupt position, update it */
1151 		if (desc_head - dp->tx_desc_intr > 0) {
1152 			dp->tx_desc_intr = desc_head;
1153 		}
1154 	}
1155 	mutex_exit(&dp->xmitlock);
1156 
1157 	/* free dma mapping resources associated with transmitted tx buffers */
1158 	tbp = GET_TXBUF(dp, head);
1159 	tail = sn;
1160 #if GEM_DEBUG_LEVEL > 2
1161 	if (head != tail) {
1162 		cmn_err(CE_CONT, "%s: freeing head:%d[%d], tail:%d[%d]",
1163 		    __func__,
1164 		    head, SLOT(head, dp->gc.gc_tx_buf_size),
1165 		    tail, SLOT(tail, dp->gc.gc_tx_buf_size));
1166 	}
1167 #endif
1168 	for (sn = head; sn != tail; sn++, tbp = tbp->txb_next) {
1169 		gem_txbuf_free_dma_resources(tbp);
1170 	}
1171 
1172 	/* recycle the tx buffers */
1173 	mutex_enter(&dp->xmitlock);
1174 	if (--dp->tx_reclaim_busy == 0) {
1175 		/* we are the last thread who can update free tail */
1176 #if GEM_DEBUG_LEVEL > 4
1177 		/* check all resouces have been deallocated */
1178 		sn = dp->tx_free_tail;
1179 		tbp = GET_TXBUF(dp, new_tail);
1180 		while (sn != dp->tx_active_head + dp->gc.gc_tx_buf_limit) {
1181 			if (tbp->txb_nfrags) {
1182 				/* in use */
1183 				break;
1184 			}
1185 			ASSERT(tbp->txb_mp == NULL);
1186 			tbp = tbp->txb_next;
1187 			sn++;
1188 		}
1189 		ASSERT(dp->tx_active_head + dp->gc.gc_tx_buf_limit == sn);
1190 #endif
1191 		dp->tx_free_tail =
1192 		    dp->tx_active_head + dp->gc.gc_tx_buf_limit;
1193 	}
1194 	if (!dp->mac_active) {
1195 		/* someone may be waiting for me. */
1196 		cv_broadcast(&dp->tx_drain_cv);
1197 	}
1198 #if GEM_DEBUG_LEVEL > 2
1199 	cmn_err(CE_CONT, "!%s: %s: called, "
1200 	    "free_head:%d free_tail:%d(+%d) added:%d",
1201 	    dp->name, __func__,
1202 	    dp->tx_free_head, dp->tx_free_tail,
1203 	    dp->tx_free_tail - dp->tx_free_head, tail - head);
1204 #endif
1205 	mutex_exit(&dp->xmitlock);
1206 
1207 	return (err);
1208 }
1209 #pragma inline(gem_reclaim_txbuf)
1210 
1211 
1212 /*
1213  * Make tx descriptors in out-of-order manner
1214  */
1215 static void
1216 gem_tx_load_descs_oo(struct gem_dev *dp,
1217 	seqnum_t start_slot, seqnum_t end_slot, uint64_t flags)
1218 {
1219 	seqnum_t	sn;
1220 	struct txbuf	*tbp;
1221 	int	tx_ring_size = dp->gc.gc_tx_ring_size;
1222 	int	(*tx_desc_write)
1223 	    (struct gem_dev *dp, int slot,
1224 	    ddi_dma_cookie_t *dmacookie,
1225 	    int frags, uint64_t flag) = dp->gc.gc_tx_desc_write;
1226 	clock_t	now = ddi_get_lbolt();
1227 
1228 	sn = start_slot;
1229 	tbp = GET_TXBUF(dp, sn);
1230 	do {
1231 #if GEM_DEBUG_LEVEL > 1
1232 		if (dp->tx_cnt < 100) {
1233 			dp->tx_cnt++;
1234 			flags |= GEM_TXFLAG_INTR;
1235 		}
1236 #endif
1237 		/* write a tx descriptor */
1238 		tbp->txb_desc = sn;
1239 		tbp->txb_ndescs = (*tx_desc_write)(dp,
1240 		    SLOT(sn, tx_ring_size),
1241 		    tbp->txb_dmacookie,
1242 		    tbp->txb_nfrags, flags | tbp->txb_flag);
1243 		tbp->txb_stime = now;
1244 		ASSERT(tbp->txb_ndescs == 1);
1245 
1246 		flags = 0;
1247 		sn++;
1248 		tbp = tbp->txb_next;
1249 	} while (sn != end_slot);
1250 }
1251 
1252 __INLINE__
1253 static size_t
1254 gem_setup_txbuf_copy(struct gem_dev *dp, mblk_t *mp, struct txbuf *tbp)
1255 {
1256 	size_t			min_pkt;
1257 	caddr_t			bp;
1258 	size_t			off;
1259 	mblk_t			*tp;
1260 	size_t			len;
1261 	uint64_t		flag;
1262 
1263 	ASSERT(tbp->txb_mp == NULL);
1264 
1265 	/* we use bounce buffer for the packet */
1266 	min_pkt = ETHERMIN;
1267 	bp = tbp->txb_buf;
1268 	off = 0;
1269 	tp = mp;
1270 
1271 	flag = tbp->txb_flag;
1272 	if (flag & GEM_TXFLAG_SWVTAG) {
1273 		/* need to increase min packet size */
1274 		min_pkt += VTAG_SIZE;
1275 		ASSERT((flag & GEM_TXFLAG_VTAG) == 0);
1276 	}
1277 
1278 	/* copy the rest */
1279 	for (; tp; tp = tp->b_cont) {
1280 		if ((len = (long)tp->b_wptr - (long)tp->b_rptr) > 0) {
1281 			bcopy(tp->b_rptr, &bp[off], len);
1282 			off += len;
1283 		}
1284 	}
1285 
1286 	if (off < min_pkt &&
1287 	    (min_pkt > ETHERMIN || !dp->gc.gc_tx_auto_pad)) {
1288 		/*
1289 		 * Extend the packet to minimum packet size explicitly.
1290 		 * For software vlan packets, we shouldn't use tx autopad
1291 		 * function because nics may not be aware of vlan.
1292 		 * we must keep 46 octet of payload even if we use vlan.
1293 		 */
1294 		bzero(&bp[off], min_pkt - off);
1295 		off = min_pkt;
1296 	}
1297 
1298 	(void) ddi_dma_sync(tbp->txb_bdh, (off_t)0, off, DDI_DMA_SYNC_FORDEV);
1299 
1300 	tbp->txb_dmacookie[0].dmac_laddress = tbp->txb_buf_dma;
1301 	tbp->txb_dmacookie[0].dmac_size = off;
1302 
1303 	DPRINTF(2, (CE_CONT,
1304 	    "!%s: %s: copy: addr:0x%llx len:0x%x, vtag:0x%04x, min_pkt:%d",
1305 	    dp->name, __func__,
1306 	    tbp->txb_dmacookie[0].dmac_laddress,
1307 	    tbp->txb_dmacookie[0].dmac_size,
1308 	    (flag & GEM_TXFLAG_VTAG) >> GEM_TXFLAG_VTAG_SHIFT,
1309 	    min_pkt));
1310 
1311 	/* save misc info */
1312 	tbp->txb_mp = mp;
1313 	tbp->txb_nfrags = 1;
1314 #ifdef DEBUG_MULTIFRAGS
1315 	if (dp->gc.gc_tx_max_frags >= 3 &&
1316 	    tbp->txb_dmacookie[0].dmac_size > 16*3) {
1317 		tbp->txb_dmacookie[1].dmac_laddress =
1318 		    tbp->txb_dmacookie[0].dmac_laddress + 16;
1319 		tbp->txb_dmacookie[2].dmac_laddress =
1320 		    tbp->txb_dmacookie[1].dmac_laddress + 16;
1321 
1322 		tbp->txb_dmacookie[2].dmac_size =
1323 		    tbp->txb_dmacookie[0].dmac_size - 16*2;
1324 		tbp->txb_dmacookie[1].dmac_size = 16;
1325 		tbp->txb_dmacookie[0].dmac_size = 16;
1326 		tbp->txb_nfrags  = 3;
1327 	}
1328 #endif
1329 	return (off);
1330 }
1331 #pragma inline(gem_setup_txbuf_copy)
1332 
1333 __INLINE__
1334 static void
1335 gem_tx_start_unit(struct gem_dev *dp)
1336 {
1337 	seqnum_t	head;
1338 	seqnum_t	tail;
1339 	struct txbuf	*tbp_head;
1340 	struct txbuf	*tbp_tail;
1341 
1342 	/* update HW descriptors from soft queue */
1343 	ASSERT(mutex_owned(&dp->xmitlock));
1344 	ASSERT(dp->tx_softq_head == dp->tx_active_tail);
1345 
1346 	head = dp->tx_softq_head;
1347 	tail = dp->tx_softq_tail;
1348 
1349 	DPRINTF(1, (CE_CONT,
1350 	    "%s: %s: called, softq %d %d[+%d], desc %d %d[+%d]",
1351 	    dp->name, __func__, head, tail, tail - head,
1352 	    dp->tx_desc_head, dp->tx_desc_tail,
1353 	    dp->tx_desc_tail - dp->tx_desc_head));
1354 
1355 	ASSERT(tail - head > 0);
1356 
1357 	dp->tx_desc_tail = tail;
1358 
1359 	tbp_head = GET_TXBUF(dp, head);
1360 	tbp_tail = GET_TXBUF(dp, tail - 1);
1361 
1362 	ASSERT(tbp_tail->txb_desc + tbp_tail->txb_ndescs == dp->tx_desc_tail);
1363 
1364 	dp->gc.gc_tx_start(dp,
1365 	    SLOT(tbp_head->txb_desc, dp->gc.gc_tx_ring_size),
1366 	    tbp_tail->txb_desc + tbp_tail->txb_ndescs - tbp_head->txb_desc);
1367 
1368 	/* advance softq head and active tail */
1369 	dp->tx_softq_head = dp->tx_active_tail = tail;
1370 }
1371 #pragma inline(gem_tx_start_unit)
1372 
1373 #ifdef GEM_DEBUG_LEVEL
1374 static int gem_send_cnt[10];
1375 #endif
1376 #define	PKT_MIN_SIZE	(sizeof (struct ether_header) + 10 + VTAG_SIZE)
1377 #define	EHLEN	(sizeof (struct ether_header))
1378 /*
1379  * check ether packet type and ip protocol
1380  */
1381 static uint64_t
1382 gem_txbuf_options(struct gem_dev *dp, mblk_t *mp, uint8_t *bp)
1383 {
1384 	mblk_t		*tp;
1385 	ssize_t		len;
1386 	uint_t		vtag;
1387 	int		off;
1388 	uint64_t	flag;
1389 
1390 	flag = 0ULL;
1391 
1392 	/*
1393 	 * prepare continuous header of the packet for protocol analysis
1394 	 */
1395 	if ((long)mp->b_wptr - (long)mp->b_rptr < PKT_MIN_SIZE) {
1396 		/* we use work buffer to copy mblk */
1397 		for (tp = mp, off = 0;
1398 		    tp && (off < PKT_MIN_SIZE);
1399 		    tp = tp->b_cont, off += len) {
1400 			len = (long)tp->b_wptr - (long)tp->b_rptr;
1401 			len = min(len, PKT_MIN_SIZE - off);
1402 			bcopy(tp->b_rptr, &bp[off], len);
1403 		}
1404 	} else {
1405 		/* we can use mblk without copy */
1406 		bp = mp->b_rptr;
1407 	}
1408 
1409 	/* process vlan tag for GLD v3 */
1410 	if (GET_NET16(&bp[VTAG_OFF]) == VTAG_TPID) {
1411 		if (dp->misc_flag & GEM_VLAN_HARD) {
1412 			vtag = GET_NET16(&bp[VTAG_OFF + 2]);
1413 			ASSERT(vtag);
1414 			flag |= vtag << GEM_TXFLAG_VTAG_SHIFT;
1415 		} else {
1416 			flag |= GEM_TXFLAG_SWVTAG;
1417 		}
1418 	}
1419 	return (flag);
1420 }
1421 #undef EHLEN
1422 #undef PKT_MIN_SIZE
1423 /*
1424  * gem_send_common is an exported function because hw depend routines may
1425  * use it for sending control frames like setup frames for 2114x chipset.
1426  */
1427 mblk_t *
1428 gem_send_common(struct gem_dev *dp, mblk_t *mp_head, uint32_t flags)
1429 {
1430 	int			nmblk;
1431 	int			avail;
1432 	mblk_t			*tp;
1433 	mblk_t			*mp;
1434 	int			i;
1435 	struct txbuf		*tbp;
1436 	seqnum_t		head;
1437 	uint64_t		load_flags;
1438 	uint64_t		len_total = 0;
1439 	uint32_t		bcast = 0;
1440 	uint32_t		mcast = 0;
1441 
1442 	ASSERT(mp_head != NULL);
1443 
1444 	mp = mp_head;
1445 	nmblk = 1;
1446 	while ((mp = mp->b_next) != NULL) {
1447 		nmblk++;
1448 	}
1449 #ifdef GEM_DEBUG_LEVEL
1450 	gem_send_cnt[0]++;
1451 	gem_send_cnt[min(nmblk, 9)]++;
1452 #endif
1453 	/*
1454 	 * Aquire resources
1455 	 */
1456 	mutex_enter(&dp->xmitlock);
1457 	if (dp->mac_suspended) {
1458 		mutex_exit(&dp->xmitlock);
1459 		mp = mp_head;
1460 		while (mp) {
1461 			tp = mp->b_next;
1462 			freemsg(mp);
1463 			mp = tp;
1464 		}
1465 		return (NULL);
1466 	}
1467 
1468 	if (!dp->mac_active && (flags & GEM_SEND_CTRL) == 0) {
1469 		/* don't send data packets while mac isn't active */
1470 		/* XXX - should we discard packets? */
1471 		mutex_exit(&dp->xmitlock);
1472 		return (mp_head);
1473 	}
1474 
1475 	/* allocate free slots */
1476 	head = dp->tx_free_head;
1477 	avail = dp->tx_free_tail - head;
1478 
1479 	DPRINTF(2, (CE_CONT,
1480 	    "!%s: %s: called, free_head:%d free_tail:%d(+%d) req:%d",
1481 	    dp->name, __func__,
1482 	    dp->tx_free_head, dp->tx_free_tail, avail, nmblk));
1483 
1484 	avail = min(avail, dp->tx_max_packets);
1485 
1486 	if (nmblk > avail) {
1487 		if (avail == 0) {
1488 			/* no resources; short cut */
1489 			DPRINTF(2, (CE_CONT, "!%s: no resources", __func__));
1490 			dp->tx_max_packets = max(dp->tx_max_packets - 1, 1);
1491 			goto done;
1492 		}
1493 		nmblk = avail;
1494 	}
1495 
1496 	dp->tx_free_head = head + nmblk;
1497 	load_flags = ((dp->tx_busy++) == 0) ? GEM_TXFLAG_HEAD : 0;
1498 
1499 	/* update last interrupt position if tx buffers exhaust.  */
1500 	if (nmblk == avail) {
1501 		tbp = GET_TXBUF(dp, head + avail - 1);
1502 		tbp->txb_flag = GEM_TXFLAG_INTR;
1503 		dp->tx_desc_intr = head + avail;
1504 	}
1505 	mutex_exit(&dp->xmitlock);
1506 
1507 	tbp = GET_TXBUF(dp, head);
1508 
1509 	for (i = nmblk; i > 0; i--, tbp = tbp->txb_next) {
1510 		uint8_t		*bp;
1511 		uint64_t	txflag;
1512 
1513 		/* remove one from the mblk list */
1514 		ASSERT(mp_head != NULL);
1515 		mp = mp_head;
1516 		mp_head = mp_head->b_next;
1517 		mp->b_next = NULL;
1518 
1519 		/* statistics for non-unicast packets */
1520 		bp = mp->b_rptr;
1521 		if ((bp[0] & 1) && (flags & GEM_SEND_CTRL) == 0) {
1522 			if (bcmp(bp, gem_etherbroadcastaddr.ether_addr_octet,
1523 			    ETHERADDRL) == 0) {
1524 				bcast++;
1525 			} else {
1526 				mcast++;
1527 			}
1528 		}
1529 
1530 		/* save misc info */
1531 		txflag = tbp->txb_flag;
1532 		txflag |= (flags & GEM_SEND_CTRL) << GEM_TXFLAG_PRIVATE_SHIFT;
1533 		txflag |= gem_txbuf_options(dp, mp, (uint8_t *)tbp->txb_buf);
1534 		tbp->txb_flag = txflag;
1535 
1536 		len_total += gem_setup_txbuf_copy(dp, mp, tbp);
1537 	}
1538 
1539 	(void) gem_tx_load_descs_oo(dp, head, head + nmblk, load_flags);
1540 
1541 	/* Append the tbp at the tail of the active tx buffer list */
1542 	mutex_enter(&dp->xmitlock);
1543 
1544 	if ((--dp->tx_busy) == 0) {
1545 		/* extend the tail of softq, as new packets have been ready. */
1546 		dp->tx_softq_tail = dp->tx_free_head;
1547 
1548 		if (!dp->mac_active && (flags & GEM_SEND_CTRL) == 0) {
1549 			/*
1550 			 * The device status has changed while we are
1551 			 * preparing tx buf.
1552 			 * As we are the last one that make tx non-busy.
1553 			 * wake up someone who may wait for us.
1554 			 */
1555 			cv_broadcast(&dp->tx_drain_cv);
1556 		} else {
1557 			ASSERT(dp->tx_softq_tail - dp->tx_softq_head > 0);
1558 			gem_tx_start_unit(dp);
1559 		}
1560 	}
1561 	dp->stats.obytes += len_total;
1562 	dp->stats.opackets += nmblk;
1563 	dp->stats.obcast += bcast;
1564 	dp->stats.omcast += mcast;
1565 done:
1566 	mutex_exit(&dp->xmitlock);
1567 
1568 	return (mp_head);
1569 }
1570 
1571 /* ========================================================== */
1572 /*
1573  * error detection and restart routines
1574  */
1575 /* ========================================================== */
1576 int
1577 gem_restart_nic(struct gem_dev *dp, uint_t flags)
1578 {
1579 	ASSERT(mutex_owned(&dp->intrlock));
1580 
1581 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
1582 #ifdef GEM_DEBUG_LEVEL
1583 #if GEM_DEBUG_LEVEL > 1
1584 	gem_dump_txbuf(dp, CE_CONT, "gem_restart_nic");
1585 #endif
1586 #endif
1587 
1588 	if (dp->mac_suspended) {
1589 		/* should we return GEM_FAILURE ? */
1590 		return (GEM_FAILURE);
1591 	}
1592 
1593 	/*
1594 	 * We should avoid calling any routines except xxx_chip_reset
1595 	 * when we are resuming the system.
1596 	 */
1597 	if (dp->mac_active) {
1598 		if (flags & GEM_RESTART_KEEP_BUF) {
1599 			/* stop rx gracefully */
1600 			dp->rxmode &= ~RXMODE_ENABLE;
1601 			(void) (*dp->gc.gc_set_rx_filter)(dp);
1602 		}
1603 		(void) gem_mac_stop(dp, flags);
1604 	}
1605 
1606 	/* reset the chip. */
1607 	if ((*dp->gc.gc_reset_chip)(dp) != GEM_SUCCESS) {
1608 		cmn_err(CE_WARN, "%s: %s: failed to reset chip",
1609 		    dp->name, __func__);
1610 		goto err;
1611 	}
1612 
1613 	if (gem_mac_init(dp) != GEM_SUCCESS) {
1614 		goto err;
1615 	}
1616 
1617 	/* setup media mode if the link have been up */
1618 	if (dp->mii_state == MII_STATE_LINKUP) {
1619 		if ((dp->gc.gc_set_media)(dp) != GEM_SUCCESS) {
1620 			goto err;
1621 		}
1622 	}
1623 
1624 	/* setup mac address and enable rx filter */
1625 	dp->rxmode |= RXMODE_ENABLE;
1626 	if ((*dp->gc.gc_set_rx_filter)(dp) != GEM_SUCCESS) {
1627 		goto err;
1628 	}
1629 
1630 	/*
1631 	 * XXX - a panic happened because of linkdown.
1632 	 * We must check mii_state here, because the link can be down just
1633 	 * before the restart event happen. If the link is down now,
1634 	 * gem_mac_start() will be called from gem_mii_link_check() when
1635 	 * the link become up later.
1636 	 */
1637 	if (dp->mii_state == MII_STATE_LINKUP) {
1638 		/* restart the nic */
1639 		ASSERT(!dp->mac_active);
1640 		(void) gem_mac_start(dp);
1641 	}
1642 	return (GEM_SUCCESS);
1643 err:
1644 	return (GEM_FAILURE);
1645 }
1646 
1647 
1648 static void
1649 gem_tx_timeout(struct gem_dev *dp)
1650 {
1651 	clock_t		now;
1652 	boolean_t	tx_sched;
1653 	struct txbuf	*tbp;
1654 
1655 	mutex_enter(&dp->intrlock);
1656 
1657 	tx_sched = B_FALSE;
1658 	now = ddi_get_lbolt();
1659 
1660 	mutex_enter(&dp->xmitlock);
1661 	if (!dp->mac_active || dp->mii_state != MII_STATE_LINKUP) {
1662 		mutex_exit(&dp->xmitlock);
1663 		goto schedule_next;
1664 	}
1665 	mutex_exit(&dp->xmitlock);
1666 
1667 	/* reclaim transmitted buffers to check the trasmitter hangs or not. */
1668 	if (gem_reclaim_txbuf(dp) != GEM_SUCCESS) {
1669 		/* tx error happened, reset transmitter in the chip */
1670 		(void) gem_restart_nic(dp, 0);
1671 		tx_sched = B_TRUE;
1672 		dp->tx_blocked = (clock_t)0;
1673 
1674 		goto schedule_next;
1675 	}
1676 
1677 	mutex_enter(&dp->xmitlock);
1678 	/* check if the transmitter thread is stuck */
1679 	if (dp->tx_active_head == dp->tx_active_tail) {
1680 		/* no tx buffer is loaded to the nic */
1681 		if (dp->tx_blocked &&
1682 		    now - dp->tx_blocked > dp->gc.gc_tx_timeout_interval) {
1683 			gem_dump_txbuf(dp, CE_WARN,
1684 			    "gem_tx_timeout: tx blocked");
1685 			tx_sched = B_TRUE;
1686 			dp->tx_blocked = (clock_t)0;
1687 		}
1688 		mutex_exit(&dp->xmitlock);
1689 		goto schedule_next;
1690 	}
1691 
1692 	tbp = GET_TXBUF(dp, dp->tx_active_head);
1693 	if (now - tbp->txb_stime < dp->gc.gc_tx_timeout) {
1694 		mutex_exit(&dp->xmitlock);
1695 		goto schedule_next;
1696 	}
1697 	mutex_exit(&dp->xmitlock);
1698 
1699 	gem_dump_txbuf(dp, CE_WARN, "gem_tx_timeout: tx timeout");
1700 
1701 	/* discard untransmitted packet and restart tx.  */
1702 	(void) gem_restart_nic(dp, GEM_RESTART_NOWAIT);
1703 	tx_sched = B_TRUE;
1704 	dp->tx_blocked = (clock_t)0;
1705 
1706 schedule_next:
1707 	mutex_exit(&dp->intrlock);
1708 
1709 	/* restart the downstream if needed */
1710 	if (tx_sched) {
1711 		mac_tx_update(dp->mh);
1712 	}
1713 
1714 	DPRINTF(4, (CE_CONT,
1715 	    "!%s: blocked:%d active_head:%d active_tail:%d desc_intr:%d",
1716 	    dp->name, BOOLEAN(dp->tx_blocked),
1717 	    dp->tx_active_head, dp->tx_active_tail, dp->tx_desc_intr));
1718 	dp->timeout_id =
1719 	    timeout((void (*)(void *))gem_tx_timeout,
1720 	    (void *)dp, dp->gc.gc_tx_timeout_interval);
1721 }
1722 
1723 /* ================================================================== */
1724 /*
1725  * Interrupt handler
1726  */
1727 /* ================================================================== */
1728 __INLINE__
1729 static void
1730 gem_append_rxbuf(struct gem_dev *dp, struct rxbuf *rbp_head)
1731 {
1732 	struct rxbuf	*rbp;
1733 	seqnum_t	tail;
1734 	int		rx_ring_size = dp->gc.gc_rx_ring_size;
1735 
1736 	ASSERT(rbp_head != NULL);
1737 	ASSERT(mutex_owned(&dp->intrlock));
1738 
1739 	DPRINTF(3, (CE_CONT, "!%s: %s: slot_head:%d, slot_tail:%d",
1740 	    dp->name, __func__, dp->rx_active_head, dp->rx_active_tail));
1741 
1742 	/*
1743 	 * Add new buffers into active rx buffer list
1744 	 */
1745 	if (dp->rx_buf_head == NULL) {
1746 		dp->rx_buf_head = rbp_head;
1747 		ASSERT(dp->rx_buf_tail == NULL);
1748 	} else {
1749 		dp->rx_buf_tail->rxb_next = rbp_head;
1750 	}
1751 
1752 	tail = dp->rx_active_tail;
1753 	for (rbp = rbp_head; rbp; rbp = rbp->rxb_next) {
1754 		/* need to notify the tail for the lower layer */
1755 		dp->rx_buf_tail = rbp;
1756 
1757 		dp->gc.gc_rx_desc_write(dp,
1758 		    SLOT(tail, rx_ring_size),
1759 		    rbp->rxb_dmacookie,
1760 		    rbp->rxb_nfrags);
1761 
1762 		dp->rx_active_tail = tail = tail + 1;
1763 	}
1764 }
1765 #pragma inline(gem_append_rxbuf)
1766 
1767 mblk_t *
1768 gem_get_packet_default(struct gem_dev *dp, struct rxbuf *rbp, size_t len)
1769 {
1770 	int		rx_header_len = dp->gc.gc_rx_header_len;
1771 	uint8_t		*bp;
1772 	mblk_t		*mp;
1773 
1774 	/* allocate a new mblk */
1775 	if (mp = allocb(len + VTAG_SIZE, BPRI_MED)) {
1776 		ASSERT(mp->b_next == NULL);
1777 		ASSERT(mp->b_cont == NULL);
1778 
1779 		mp->b_rptr += VTAG_SIZE;
1780 		bp = mp->b_rptr;
1781 		mp->b_wptr = bp + len;
1782 
1783 		/*
1784 		 * flush the range of the entire buffer to invalidate
1785 		 * all of corresponding dirty entries in iocache.
1786 		 */
1787 		(void) ddi_dma_sync(rbp->rxb_dh, rx_header_len,
1788 		    0, DDI_DMA_SYNC_FORKERNEL);
1789 
1790 		bcopy(rbp->rxb_buf + rx_header_len, bp, len);
1791 	}
1792 	return (mp);
1793 }
1794 
1795 #ifdef GEM_DEBUG_LEVEL
1796 uint_t	gem_rx_pkts[17];
1797 #endif
1798 
1799 
1800 int
1801 gem_receive(struct gem_dev *dp)
1802 {
1803 	uint64_t	len_total = 0;
1804 	struct rxbuf	*rbp;
1805 	mblk_t		*mp;
1806 	int		cnt = 0;
1807 	uint64_t	rxstat;
1808 	struct rxbuf	*newbufs;
1809 	struct rxbuf	**newbufs_tailp;
1810 	mblk_t		*rx_head;
1811 	mblk_t 		**rx_tailp;
1812 	int		rx_ring_size = dp->gc.gc_rx_ring_size;
1813 	seqnum_t	active_head;
1814 	uint64_t	(*rx_desc_stat)(struct gem_dev *dp,
1815 	    int slot, int ndesc);
1816 	int		ethermin = ETHERMIN;
1817 	int		ethermax = dp->mtu + sizeof (struct ether_header);
1818 	int		rx_header_len = dp->gc.gc_rx_header_len;
1819 
1820 	ASSERT(mutex_owned(&dp->intrlock));
1821 
1822 	DPRINTF(3, (CE_CONT, "!%s: gem_receive: rx_buf_head:%p",
1823 	    dp->name, dp->rx_buf_head));
1824 
1825 	rx_desc_stat  = dp->gc.gc_rx_desc_stat;
1826 	newbufs_tailp = &newbufs;
1827 	rx_tailp = &rx_head;
1828 	for (active_head = dp->rx_active_head;
1829 	    (rbp = dp->rx_buf_head) != NULL; active_head++) {
1830 		int		len;
1831 		if (cnt == 0) {
1832 			cnt = max(dp->poll_pkt_delay*2, 10);
1833 			cnt = min(cnt,
1834 			    dp->rx_active_tail - active_head);
1835 			gem_rx_desc_dma_sync(dp,
1836 			    SLOT(active_head, rx_ring_size),
1837 			    cnt,
1838 			    DDI_DMA_SYNC_FORKERNEL);
1839 		}
1840 
1841 		if (rx_header_len > 0) {
1842 			(void) ddi_dma_sync(rbp->rxb_dh, 0,
1843 			    rx_header_len, DDI_DMA_SYNC_FORKERNEL);
1844 		}
1845 
1846 		if (((rxstat = (*rx_desc_stat)(dp,
1847 		    SLOT(active_head, rx_ring_size),
1848 		    rbp->rxb_nfrags))
1849 		    & (GEM_RX_DONE | GEM_RX_ERR)) == 0) {
1850 			/* not received yet */
1851 			break;
1852 		}
1853 
1854 		/* Remove the head of the rx buffer list */
1855 		dp->rx_buf_head = rbp->rxb_next;
1856 		cnt--;
1857 
1858 
1859 		if (rxstat & GEM_RX_ERR) {
1860 			goto next;
1861 		}
1862 
1863 		len = rxstat & GEM_RX_LEN;
1864 		DPRINTF(3, (CE_CONT, "!%s: %s: rxstat:0x%llx, len:0x%x",
1865 		    dp->name, __func__, rxstat, len));
1866 
1867 		/*
1868 		 * Copy the packet
1869 		 */
1870 		if ((mp = dp->gc.gc_get_packet(dp, rbp, len)) == NULL) {
1871 			/* no memory, discard the packet */
1872 			dp->stats.norcvbuf++;
1873 			goto next;
1874 		}
1875 
1876 		/*
1877 		 * Process VLAN tag
1878 		 */
1879 		ethermin = ETHERMIN;
1880 		ethermax = dp->mtu + sizeof (struct ether_header);
1881 		if (GET_NET16(mp->b_rptr + VTAG_OFF) == VTAG_TPID) {
1882 			ethermax += VTAG_SIZE;
1883 		}
1884 
1885 		/* check packet size */
1886 		if (len < ethermin) {
1887 			dp->stats.errrcv++;
1888 			dp->stats.runt++;
1889 			freemsg(mp);
1890 			goto next;
1891 		}
1892 
1893 		if (len > ethermax) {
1894 			dp->stats.errrcv++;
1895 			dp->stats.frame_too_long++;
1896 			freemsg(mp);
1897 			goto next;
1898 		}
1899 
1900 		len_total += len;
1901 
1902 #ifdef GEM_DEBUG_VLAN
1903 		if (GET_ETHERTYPE(mp->b_rptr) == VTAG_TPID) {
1904 			gem_dump_packet(dp, (char *)__func__, mp, B_TRUE);
1905 		}
1906 #endif
1907 		/* append received packet to temporaly rx buffer list */
1908 		*rx_tailp = mp;
1909 		rx_tailp  = &mp->b_next;
1910 
1911 		if (mp->b_rptr[0] & 1) {
1912 			if (bcmp(mp->b_rptr,
1913 			    gem_etherbroadcastaddr.ether_addr_octet,
1914 			    ETHERADDRL) == 0) {
1915 				dp->stats.rbcast++;
1916 			} else {
1917 				dp->stats.rmcast++;
1918 			}
1919 		}
1920 next:
1921 		ASSERT(rbp != NULL);
1922 
1923 		/* append new one to temporal new buffer list */
1924 		*newbufs_tailp = rbp;
1925 		newbufs_tailp  = &rbp->rxb_next;
1926 	}
1927 
1928 	/* advance rx_active_head */
1929 	if ((cnt = active_head - dp->rx_active_head) > 0) {
1930 		dp->stats.rbytes += len_total;
1931 		dp->stats.rpackets += cnt;
1932 	}
1933 	dp->rx_active_head = active_head;
1934 
1935 	/* terminate the working list */
1936 	*newbufs_tailp = NULL;
1937 	*rx_tailp = NULL;
1938 
1939 	if (dp->rx_buf_head == NULL) {
1940 		dp->rx_buf_tail = NULL;
1941 	}
1942 
1943 	DPRINTF(4, (CE_CONT, "%s: %s: cnt:%d, rx_head:%p",
1944 	    dp->name, __func__, cnt, rx_head));
1945 
1946 	if (newbufs) {
1947 		/*
1948 		 * fillfull rx list with new buffers
1949 		 */
1950 		seqnum_t	head;
1951 
1952 		/* save current tail */
1953 		head = dp->rx_active_tail;
1954 		gem_append_rxbuf(dp, newbufs);
1955 
1956 		/* call hw depend start routine if we have. */
1957 		dp->gc.gc_rx_start(dp,
1958 		    SLOT(head, rx_ring_size), dp->rx_active_tail - head);
1959 	}
1960 
1961 	if (rx_head) {
1962 		/*
1963 		 * send up received packets
1964 		 */
1965 		mutex_exit(&dp->intrlock);
1966 		mac_rx(dp->mh, NULL, rx_head);
1967 		mutex_enter(&dp->intrlock);
1968 	}
1969 
1970 #ifdef GEM_DEBUG_LEVEL
1971 	gem_rx_pkts[min(cnt, sizeof (gem_rx_pkts)/sizeof (uint_t)-1)]++;
1972 #endif
1973 	return (cnt);
1974 }
1975 
1976 boolean_t
1977 gem_tx_done(struct gem_dev *dp)
1978 {
1979 	boolean_t	tx_sched = B_FALSE;
1980 
1981 	if (gem_reclaim_txbuf(dp) != GEM_SUCCESS) {
1982 		(void) gem_restart_nic(dp, GEM_RESTART_KEEP_BUF);
1983 		DPRINTF(2, (CE_CONT, "!%s: gem_tx_done: tx_desc: %d %d",
1984 		    dp->name, dp->tx_active_head, dp->tx_active_tail));
1985 		tx_sched = B_TRUE;
1986 		goto x;
1987 	}
1988 
1989 	mutex_enter(&dp->xmitlock);
1990 
1991 	/* XXX - we must not have any packets in soft queue */
1992 	ASSERT(dp->tx_softq_head == dp->tx_softq_tail);
1993 	/*
1994 	 * If we won't have chance to get more free tx buffers, and blocked,
1995 	 * it is worth to reschedule the downstream i.e. tx side.
1996 	 */
1997 	ASSERT(dp->tx_desc_intr - dp->tx_desc_head >= 0);
1998 	if (dp->tx_blocked && dp->tx_desc_intr == dp->tx_desc_head) {
1999 		/*
2000 		 * As no further tx-done interrupts are scheduled, this
2001 		 * is the last chance to kick tx side, which may be
2002 		 * blocked now, otherwise the tx side never works again.
2003 		 */
2004 		tx_sched = B_TRUE;
2005 		dp->tx_blocked = (clock_t)0;
2006 		dp->tx_max_packets =
2007 		    min(dp->tx_max_packets + 2, dp->gc.gc_tx_buf_limit);
2008 	}
2009 
2010 	mutex_exit(&dp->xmitlock);
2011 
2012 	DPRINTF(3, (CE_CONT, "!%s: %s: ret: blocked:%d",
2013 	    dp->name, __func__, BOOLEAN(dp->tx_blocked)));
2014 x:
2015 	return (tx_sched);
2016 }
2017 
2018 static uint_t
2019 gem_intr(struct gem_dev	*dp)
2020 {
2021 	uint_t		ret;
2022 
2023 	mutex_enter(&dp->intrlock);
2024 	if (dp->mac_suspended) {
2025 		mutex_exit(&dp->intrlock);
2026 		return (DDI_INTR_UNCLAIMED);
2027 	}
2028 	dp->intr_busy = B_TRUE;
2029 
2030 	ret = (*dp->gc.gc_interrupt)(dp);
2031 
2032 	if (ret == DDI_INTR_UNCLAIMED) {
2033 		dp->intr_busy = B_FALSE;
2034 		mutex_exit(&dp->intrlock);
2035 		return (ret);
2036 	}
2037 
2038 	if (!dp->mac_active) {
2039 		cv_broadcast(&dp->tx_drain_cv);
2040 	}
2041 
2042 
2043 	dp->stats.intr++;
2044 	dp->intr_busy = B_FALSE;
2045 
2046 	mutex_exit(&dp->intrlock);
2047 
2048 	if (ret & INTR_RESTART_TX) {
2049 		DPRINTF(4, (CE_CONT, "!%s: calling mac_tx_update", dp->name));
2050 		mac_tx_update(dp->mh);
2051 		ret &= ~INTR_RESTART_TX;
2052 	}
2053 	return (ret);
2054 }
2055 
2056 static void
2057 gem_intr_watcher(struct gem_dev *dp)
2058 {
2059 	(void) gem_intr(dp);
2060 
2061 	/* schedule next call of tu_intr_watcher */
2062 	dp->intr_watcher_id =
2063 	    timeout((void (*)(void *))gem_intr_watcher, (void *)dp, 1);
2064 }
2065 
2066 /* ======================================================================== */
2067 /*
2068  * MII support routines
2069  */
2070 /* ======================================================================== */
2071 static void
2072 gem_choose_forcedmode(struct gem_dev *dp)
2073 {
2074 	/* choose media mode */
2075 	if (dp->anadv_1000fdx || dp->anadv_1000hdx) {
2076 		dp->speed = GEM_SPD_1000;
2077 		dp->full_duplex = dp->anadv_1000fdx;
2078 	} else if (dp->anadv_100fdx || dp->anadv_100t4) {
2079 		dp->speed = GEM_SPD_100;
2080 		dp->full_duplex = B_TRUE;
2081 	} else if (dp->anadv_100hdx) {
2082 		dp->speed = GEM_SPD_100;
2083 		dp->full_duplex = B_FALSE;
2084 	} else {
2085 		dp->speed = GEM_SPD_10;
2086 		dp->full_duplex = dp->anadv_10fdx;
2087 	}
2088 }
2089 
2090 uint16_t
2091 gem_mii_read(struct gem_dev *dp, uint_t reg)
2092 {
2093 	if ((dp->mii_status & MII_STATUS_MFPRMBLSUPR) == 0) {
2094 		(*dp->gc.gc_mii_sync)(dp);
2095 	}
2096 	return ((*dp->gc.gc_mii_read)(dp, reg));
2097 }
2098 
2099 void
2100 gem_mii_write(struct gem_dev *dp, uint_t reg, uint16_t val)
2101 {
2102 	if ((dp->mii_status & MII_STATUS_MFPRMBLSUPR) == 0) {
2103 		(*dp->gc.gc_mii_sync)(dp);
2104 	}
2105 	(*dp->gc.gc_mii_write)(dp, reg, val);
2106 }
2107 
2108 #define	fc_cap_decode(x)	\
2109 	((((x) & MII_ABILITY_PAUSE) ? 1 : 0) |	\
2110 	(((x) & MII_ABILITY_ASM_DIR) ? 2 : 0))
2111 
2112 int
2113 gem_mii_config_default(struct gem_dev *dp)
2114 {
2115 	uint16_t	mii_stat;
2116 	uint16_t	val;
2117 	static uint16_t fc_cap_encode[4] = {
2118 		/* none */		0,
2119 		/* symmetric */		MII_ABILITY_PAUSE,
2120 		/* tx */		MII_ABILITY_ASM_DIR,
2121 		/* rx-symmetric */	MII_ABILITY_PAUSE | MII_ABILITY_ASM_DIR,
2122 	};
2123 
2124 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
2125 
2126 	/*
2127 	 * Configure bits in advertisement register
2128 	 */
2129 	mii_stat = dp->mii_status;
2130 
2131 	DPRINTF(1, (CE_CONT, "!%s: %s: MII_STATUS reg:%b",
2132 	    dp->name, __func__, mii_stat, MII_STATUS_BITS));
2133 
2134 	if ((mii_stat & MII_STATUS_ABILITY_TECH) == 0) {
2135 		/* it's funny */
2136 		cmn_err(CE_WARN, "!%s: wrong ability bits: mii_status:%b",
2137 		    dp->name, mii_stat, MII_STATUS_BITS);
2138 		return (GEM_FAILURE);
2139 	}
2140 
2141 	/* Do not change the rest of the ability bits in the advert reg */
2142 	val = gem_mii_read(dp, MII_AN_ADVERT) & ~MII_ABILITY_ALL;
2143 
2144 	DPRINTF(0, (CE_CONT,
2145 	    "!%s: %s: 100T4:%d 100F:%d 100H:%d 10F:%d 10H:%d",
2146 	    dp->name, __func__,
2147 	    dp->anadv_100t4, dp->anadv_100fdx, dp->anadv_100hdx,
2148 	    dp->anadv_10fdx, dp->anadv_10hdx));
2149 
2150 	if (dp->anadv_100t4) {
2151 		val |= MII_ABILITY_100BASE_T4;
2152 	}
2153 	if (dp->anadv_100fdx) {
2154 		val |= MII_ABILITY_100BASE_TX_FD;
2155 	}
2156 	if (dp->anadv_100hdx) {
2157 		val |= MII_ABILITY_100BASE_TX;
2158 	}
2159 	if (dp->anadv_10fdx) {
2160 		val |= MII_ABILITY_10BASE_T_FD;
2161 	}
2162 	if (dp->anadv_10hdx) {
2163 		val |= MII_ABILITY_10BASE_T;
2164 	}
2165 
2166 	/* set flow control capability */
2167 	val |= fc_cap_encode[dp->anadv_flow_control];
2168 
2169 	DPRINTF(0, (CE_CONT,
2170 	    "!%s: %s: setting MII_AN_ADVERT reg:%b, mii_mode:%d, fc:%d",
2171 	    dp->name, __func__, val, MII_ABILITY_BITS, dp->gc.gc_mii_mode,
2172 	    dp->anadv_flow_control));
2173 
2174 	gem_mii_write(dp, MII_AN_ADVERT, val);
2175 
2176 	if (mii_stat & MII_STATUS_XSTATUS) {
2177 		/*
2178 		 * 1000Base-T GMII support
2179 		 */
2180 		if (!dp->anadv_autoneg) {
2181 			/* enable manual configuration */
2182 			val = MII_1000TC_CFG_EN;
2183 		} else {
2184 			val = 0;
2185 			if (dp->anadv_1000fdx) {
2186 				val |= MII_1000TC_ADV_FULL;
2187 			}
2188 			if (dp->anadv_1000hdx) {
2189 				val |= MII_1000TC_ADV_HALF;
2190 			}
2191 		}
2192 		DPRINTF(0, (CE_CONT,
2193 		    "!%s: %s: setting MII_1000TC reg:%b",
2194 		    dp->name, __func__, val, MII_1000TC_BITS));
2195 
2196 		gem_mii_write(dp, MII_1000TC, val);
2197 	}
2198 
2199 	return (GEM_SUCCESS);
2200 }
2201 
2202 #define	GEM_LINKUP(dp)		mac_link_update((dp)->mh, LINK_STATE_UP)
2203 #define	GEM_LINKDOWN(dp)	mac_link_update((dp)->mh, LINK_STATE_DOWN)
2204 
2205 static uint8_t gem_fc_result[4 /* my cap */ ][4 /* lp cap */] = {
2206 /*	 none	symm	tx	rx/symm */
2207 /* none */
2208 	{FLOW_CONTROL_NONE,
2209 		FLOW_CONTROL_NONE,
2210 			FLOW_CONTROL_NONE,
2211 				FLOW_CONTROL_NONE},
2212 /* sym */
2213 	{FLOW_CONTROL_NONE,
2214 		FLOW_CONTROL_SYMMETRIC,
2215 			FLOW_CONTROL_NONE,
2216 				FLOW_CONTROL_SYMMETRIC},
2217 /* tx */
2218 	{FLOW_CONTROL_NONE,
2219 		FLOW_CONTROL_NONE,
2220 			FLOW_CONTROL_NONE,
2221 				FLOW_CONTROL_TX_PAUSE},
2222 /* rx/symm */
2223 	{FLOW_CONTROL_NONE,
2224 		FLOW_CONTROL_SYMMETRIC,
2225 			FLOW_CONTROL_RX_PAUSE,
2226 				FLOW_CONTROL_SYMMETRIC},
2227 };
2228 
2229 static char *gem_fc_type[] = {
2230 	"without",
2231 	"with symmetric",
2232 	"with tx",
2233 	"with rx",
2234 };
2235 
2236 boolean_t
2237 gem_mii_link_check(struct gem_dev *dp)
2238 {
2239 	uint16_t	old_mii_state;
2240 	boolean_t	tx_sched = B_FALSE;
2241 	uint16_t	status;
2242 	uint16_t	advert;
2243 	uint16_t	lpable;
2244 	uint16_t	exp;
2245 	uint16_t	ctl1000;
2246 	uint16_t	stat1000;
2247 	uint16_t	val;
2248 	clock_t		now;
2249 	clock_t		diff;
2250 	int		linkdown_action;
2251 	boolean_t	fix_phy = B_FALSE;
2252 
2253 	now = ddi_get_lbolt();
2254 	old_mii_state = dp->mii_state;
2255 
2256 	DPRINTF(3, (CE_CONT, "!%s: %s: time:%d state:%d",
2257 	    dp->name, __func__, now, dp->mii_state));
2258 
2259 	diff = now - dp->mii_last_check;
2260 	dp->mii_last_check = now;
2261 
2262 	/*
2263 	 * For NWAM, don't show linkdown state right
2264 	 * after the system boots
2265 	 */
2266 	if (dp->linkup_delay > 0) {
2267 		if (dp->linkup_delay > diff) {
2268 			dp->linkup_delay -= diff;
2269 		} else {
2270 			/* link up timeout */
2271 			dp->linkup_delay = -1;
2272 		}
2273 	}
2274 
2275 next_nowait:
2276 	switch (dp->mii_state) {
2277 	case MII_STATE_UNKNOWN:
2278 		/* power-up, DP83840 requires 32 sync bits */
2279 		(*dp->gc.gc_mii_sync)(dp);
2280 		goto reset_phy;
2281 
2282 	case MII_STATE_RESETTING:
2283 		dp->mii_timer -= diff;
2284 		if (dp->mii_timer > 0) {
2285 			/* don't read phy registers in resetting */
2286 			dp->mii_interval = WATCH_INTERVAL_FAST;
2287 			goto next;
2288 		}
2289 
2290 		/* Timer expired, ensure reset bit is not set */
2291 
2292 		if (dp->mii_status & MII_STATUS_MFPRMBLSUPR) {
2293 			/* some phys need sync bits after reset */
2294 			(*dp->gc.gc_mii_sync)(dp);
2295 		}
2296 		val = gem_mii_read(dp, MII_CONTROL);
2297 		if (val & MII_CONTROL_RESET) {
2298 			cmn_err(CE_NOTE,
2299 			    "!%s: time:%ld resetting phy not complete."
2300 			    " mii_control:0x%b",
2301 			    dp->name, ddi_get_lbolt(),
2302 			    val, MII_CONTROL_BITS);
2303 		}
2304 
2305 		/* ensure neither isolated nor pwrdown nor auto-nego mode */
2306 		/* XXX -- this operation is required for NS DP83840A. */
2307 		gem_mii_write(dp, MII_CONTROL, 0);
2308 
2309 		/* As resetting PHY has completed, configure PHY registers */
2310 		if ((*dp->gc.gc_mii_config)(dp) != GEM_SUCCESS) {
2311 			/* we failed to configure PHY. */
2312 			goto reset_phy;
2313 		}
2314 
2315 		/* mii_config may disable autonegatiation */
2316 		gem_choose_forcedmode(dp);
2317 
2318 		dp->mii_lpable = 0;
2319 		dp->mii_advert = 0;
2320 		dp->mii_exp = 0;
2321 		dp->mii_ctl1000 = 0;
2322 		dp->mii_stat1000 = 0;
2323 		dp->flow_control = FLOW_CONTROL_NONE;
2324 
2325 		if (!dp->anadv_autoneg) {
2326 			/* skip auto-negotiation phase */
2327 			dp->mii_state = MII_STATE_MEDIA_SETUP;
2328 			dp->mii_timer = 0;
2329 			dp->mii_interval = 0;
2330 			goto next_nowait;
2331 		}
2332 
2333 		/* Issue auto-negotiation command */
2334 		goto autonego;
2335 
2336 	case MII_STATE_AUTONEGOTIATING:
2337 		/*
2338 		 * Autonegotiation is in progress
2339 		 */
2340 		dp->mii_timer -= diff;
2341 		if (dp->mii_timer -
2342 		    (dp->gc.gc_mii_an_timeout
2343 		    - dp->gc.gc_mii_an_wait) > 0) {
2344 			/*
2345 			 * wait for a while, typically autonegotiation
2346 			 * completes in 2.3 - 2.5 sec.
2347 			 */
2348 			dp->mii_interval = WATCH_INTERVAL_FAST;
2349 			goto next;
2350 		}
2351 
2352 		/* read PHY status */
2353 		status = gem_mii_read(dp, MII_STATUS);
2354 		DPRINTF(4, (CE_CONT,
2355 		    "!%s: %s: called: mii_state:%d MII_STATUS reg:%b",
2356 		    dp->name, __func__, dp->mii_state,
2357 		    status, MII_STATUS_BITS));
2358 
2359 		if (status & MII_STATUS_REMFAULT) {
2360 			/*
2361 			 * The link parnert told me something wrong happend.
2362 			 * What do we do ?
2363 			 */
2364 			cmn_err(CE_CONT,
2365 			    "!%s: auto-negotiation failed: remote fault",
2366 			    dp->name);
2367 			goto autonego;
2368 		}
2369 
2370 		if ((status & MII_STATUS_ANDONE) == 0) {
2371 			if (dp->mii_timer <= 0) {
2372 				/*
2373 				 * Auto-negotiation was timed out,
2374 				 * try again w/o resetting phy.
2375 				 */
2376 				if (!dp->mii_supress_msg) {
2377 					cmn_err(CE_WARN,
2378 				    "!%s: auto-negotiation failed: timeout",
2379 					    dp->name);
2380 					dp->mii_supress_msg = B_TRUE;
2381 				}
2382 				goto autonego;
2383 			}
2384 			/*
2385 			 * Auto-negotiation is in progress. Wait.
2386 			 */
2387 			dp->mii_interval = dp->gc.gc_mii_an_watch_interval;
2388 			goto next;
2389 		}
2390 
2391 		/*
2392 		 * Auto-negotiation have completed.
2393 		 * Assume linkdown and fall through.
2394 		 */
2395 		dp->mii_supress_msg = B_FALSE;
2396 		dp->mii_state = MII_STATE_AN_DONE;
2397 		DPRINTF(0, (CE_CONT,
2398 		    "!%s: auto-negotiation completed, MII_STATUS:%b",
2399 		    dp->name, status, MII_STATUS_BITS));
2400 
2401 		if (dp->gc.gc_mii_an_delay > 0) {
2402 			dp->mii_timer = dp->gc.gc_mii_an_delay;
2403 			dp->mii_interval = drv_usectohz(20*1000);
2404 			goto next;
2405 		}
2406 
2407 		dp->mii_timer = 0;
2408 		diff = 0;
2409 		goto next_nowait;
2410 
2411 	case MII_STATE_AN_DONE:
2412 		/*
2413 		 * Auto-negotiation have done. Now we can set up media.
2414 		 */
2415 		dp->mii_timer -= diff;
2416 		if (dp->mii_timer > 0) {
2417 			/* wait for a while */
2418 			dp->mii_interval = WATCH_INTERVAL_FAST;
2419 			goto next;
2420 		}
2421 
2422 		/*
2423 		 * set up the result of auto negotiation
2424 		 */
2425 
2426 		/*
2427 		 * Read registers required to determin current
2428 		 * duplex mode and media speed.
2429 		 */
2430 		if (dp->gc.gc_mii_an_delay > 0) {
2431 			/*
2432 			 * As the link watcher context has been suspended,
2433 			 * 'status' is invalid. We must status register here
2434 			 */
2435 			status = gem_mii_read(dp, MII_STATUS);
2436 		}
2437 		advert = gem_mii_read(dp, MII_AN_ADVERT);
2438 		lpable = gem_mii_read(dp, MII_AN_LPABLE);
2439 		exp = gem_mii_read(dp, MII_AN_EXPANSION);
2440 		if (exp == 0xffff) {
2441 			/* some phys don't have exp register */
2442 			exp = 0;
2443 		}
2444 		ctl1000  = 0;
2445 		stat1000 = 0;
2446 		if (dp->mii_status & MII_STATUS_XSTATUS) {
2447 			ctl1000  = gem_mii_read(dp, MII_1000TC);
2448 			stat1000 = gem_mii_read(dp, MII_1000TS);
2449 		}
2450 		dp->mii_lpable = lpable;
2451 		dp->mii_advert = advert;
2452 		dp->mii_exp = exp;
2453 		dp->mii_ctl1000  = ctl1000;
2454 		dp->mii_stat1000 = stat1000;
2455 
2456 		cmn_err(CE_CONT,
2457 		"!%s: auto-negotiation done, advert:%b, lpable:%b, exp:%b",
2458 		    dp->name,
2459 		    advert, MII_ABILITY_BITS,
2460 		    lpable, MII_ABILITY_BITS,
2461 		    exp, MII_AN_EXP_BITS);
2462 
2463 		if (dp->mii_status & MII_STATUS_XSTATUS) {
2464 			cmn_err(CE_CONT,
2465 			    "! MII_1000TC:%b, MII_1000TS:%b",
2466 			    ctl1000, MII_1000TC_BITS,
2467 			    stat1000, MII_1000TS_BITS);
2468 		}
2469 
2470 		if (gem_population(lpable) <= 1 &&
2471 		    (exp & MII_AN_EXP_LPCANAN) == 0) {
2472 			if ((advert & MII_ABILITY_TECH) != lpable) {
2473 				cmn_err(CE_WARN,
2474 				    "!%s: but the link partnar doesn't seem"
2475 				    " to have auto-negotiation capability."
2476 				    " please check the link configuration.",
2477 				    dp->name);
2478 			}
2479 			/*
2480 			 * it should be result of pararell detection, which
2481 			 * cannot detect duplex mode.
2482 			 */
2483 			if (lpable & MII_ABILITY_100BASE_TX) {
2484 				/*
2485 				 * we prefer full duplex mode for 100Mbps
2486 				 * connection, if we can.
2487 				 */
2488 				lpable |= advert & MII_ABILITY_100BASE_TX_FD;
2489 			}
2490 
2491 			if ((advert & lpable) == 0 &&
2492 			    lpable & MII_ABILITY_10BASE_T) {
2493 				lpable |= advert & MII_ABILITY_10BASE_T_FD;
2494 			}
2495 			/*
2496 			 * as the link partnar isn't auto-negotiatable, use
2497 			 * fixed mode temporally.
2498 			 */
2499 			fix_phy = B_TRUE;
2500 		} else if (lpable == 0) {
2501 			cmn_err(CE_WARN, "!%s: wrong lpable.", dp->name);
2502 			goto reset_phy;
2503 		}
2504 		/*
2505 		 * configure current link mode according to AN priority.
2506 		 */
2507 		val = advert & lpable;
2508 		if ((ctl1000 & MII_1000TC_ADV_FULL) &&
2509 		    (stat1000 & MII_1000TS_LP_FULL)) {
2510 			/* 1000BaseT & full duplex */
2511 			dp->speed	 = GEM_SPD_1000;
2512 			dp->full_duplex  = B_TRUE;
2513 		} else if ((ctl1000 & MII_1000TC_ADV_HALF) &&
2514 		    (stat1000 & MII_1000TS_LP_HALF)) {
2515 			/* 1000BaseT & half duplex */
2516 			dp->speed = GEM_SPD_1000;
2517 			dp->full_duplex = B_FALSE;
2518 		} else if (val & MII_ABILITY_100BASE_TX_FD) {
2519 			/* 100BaseTx & full duplex */
2520 			dp->speed = GEM_SPD_100;
2521 			dp->full_duplex = B_TRUE;
2522 		} else if (val & MII_ABILITY_100BASE_T4) {
2523 			/* 100BaseT4 & full duplex */
2524 			dp->speed = GEM_SPD_100;
2525 			dp->full_duplex = B_TRUE;
2526 		} else if (val & MII_ABILITY_100BASE_TX) {
2527 			/* 100BaseTx & half duplex */
2528 			dp->speed	 = GEM_SPD_100;
2529 			dp->full_duplex  = B_FALSE;
2530 		} else if (val & MII_ABILITY_10BASE_T_FD) {
2531 			/* 10BaseT & full duplex */
2532 			dp->speed	 = GEM_SPD_10;
2533 			dp->full_duplex  = B_TRUE;
2534 		} else if (val & MII_ABILITY_10BASE_T) {
2535 			/* 10BaseT & half duplex */
2536 			dp->speed	 = GEM_SPD_10;
2537 			dp->full_duplex  = B_FALSE;
2538 		} else {
2539 			/*
2540 			 * It seems that the link partnar doesn't have
2541 			 * auto-negotiation capability and our PHY
2542 			 * could not report the correct current mode.
2543 			 * We guess current mode by mii_control register.
2544 			 */
2545 			val = gem_mii_read(dp, MII_CONTROL);
2546 
2547 			/* select 100m full or 10m half */
2548 			dp->speed = (val & MII_CONTROL_100MB) ?
2549 			    GEM_SPD_100 : GEM_SPD_10;
2550 			dp->full_duplex = dp->speed != GEM_SPD_10;
2551 			fix_phy = B_TRUE;
2552 
2553 			cmn_err(CE_NOTE,
2554 			    "!%s: auto-negotiation done but "
2555 			    "common ability not found.\n"
2556 			    "PHY state: control:%b advert:%b lpable:%b\n"
2557 			    "guessing %d Mbps %s duplex mode",
2558 			    dp->name,
2559 			    val, MII_CONTROL_BITS,
2560 			    advert, MII_ABILITY_BITS,
2561 			    lpable, MII_ABILITY_BITS,
2562 			    gem_speed_value[dp->speed],
2563 			    dp->full_duplex ? "full" : "half");
2564 		}
2565 
2566 		if (dp->full_duplex) {
2567 			dp->flow_control =
2568 			    gem_fc_result[fc_cap_decode(advert)]
2569 			    [fc_cap_decode(lpable)];
2570 		} else {
2571 			dp->flow_control = FLOW_CONTROL_NONE;
2572 		}
2573 		dp->mii_state = MII_STATE_MEDIA_SETUP;
2574 		/* FALLTHROUGH */
2575 
2576 	case MII_STATE_MEDIA_SETUP:
2577 		dp->mii_state = MII_STATE_LINKDOWN;
2578 		dp->mii_timer = dp->gc.gc_mii_linkdown_timeout;
2579 		DPRINTF(2, (CE_CONT, "!%s: setup midia mode done", dp->name));
2580 		dp->mii_supress_msg = B_FALSE;
2581 
2582 		/* use short interval */
2583 		dp->mii_interval = WATCH_INTERVAL_FAST;
2584 
2585 		if ((!dp->anadv_autoneg) ||
2586 		    dp->gc.gc_mii_an_oneshot || fix_phy) {
2587 
2588 			/*
2589 			 * write specified mode to phy.
2590 			 */
2591 			val = gem_mii_read(dp, MII_CONTROL);
2592 			val &= ~(MII_CONTROL_SPEED | MII_CONTROL_FDUPLEX |
2593 			    MII_CONTROL_ANE | MII_CONTROL_RSAN);
2594 
2595 			if (dp->full_duplex) {
2596 				val |= MII_CONTROL_FDUPLEX;
2597 			}
2598 
2599 			switch (dp->speed) {
2600 			case GEM_SPD_1000:
2601 				val |= MII_CONTROL_1000MB;
2602 				break;
2603 
2604 			case GEM_SPD_100:
2605 				val |= MII_CONTROL_100MB;
2606 				break;
2607 
2608 			default:
2609 				cmn_err(CE_WARN, "%s: unknown speed:%d",
2610 				    dp->name, dp->speed);
2611 				/* FALLTHROUGH */
2612 			case GEM_SPD_10:
2613 				/* for GEM_SPD_10, do nothing */
2614 				break;
2615 			}
2616 
2617 			if (dp->mii_status & MII_STATUS_XSTATUS) {
2618 				gem_mii_write(dp,
2619 				    MII_1000TC, MII_1000TC_CFG_EN);
2620 			}
2621 			gem_mii_write(dp, MII_CONTROL, val);
2622 		}
2623 
2624 		if (dp->nic_state >= NIC_STATE_INITIALIZED) {
2625 			/* notify the result of auto-negotiation to mac */
2626 			(*dp->gc.gc_set_media)(dp);
2627 		}
2628 
2629 		if ((void *)dp->gc.gc_mii_tune_phy) {
2630 			/* for built-in sis900 */
2631 			/* XXX - this code should be removed.  */
2632 			(*dp->gc.gc_mii_tune_phy)(dp);
2633 		}
2634 
2635 		goto next_nowait;
2636 
2637 	case MII_STATE_LINKDOWN:
2638 		status = gem_mii_read(dp, MII_STATUS);
2639 		if (status & MII_STATUS_LINKUP) {
2640 			/*
2641 			 * Link going up
2642 			 */
2643 			dp->mii_state = MII_STATE_LINKUP;
2644 			dp->mii_supress_msg = B_FALSE;
2645 
2646 			DPRINTF(0, (CE_CONT,
2647 			    "!%s: link up detected: mii_stat:%b",
2648 			    dp->name, status, MII_STATUS_BITS));
2649 
2650 			/*
2651 			 * MII_CONTROL_100MB and  MII_CONTROL_FDUPLEX are
2652 			 * ignored when MII_CONTROL_ANE is set.
2653 			 */
2654 			cmn_err(CE_CONT,
2655 			    "!%s: Link up: %d Mbps %s duplex %s flow control",
2656 			    dp->name,
2657 			    gem_speed_value[dp->speed],
2658 			    dp->full_duplex ? "full" : "half",
2659 			    gem_fc_type[dp->flow_control]);
2660 
2661 			dp->mii_interval = dp->gc.gc_mii_link_watch_interval;
2662 
2663 			/* XXX - we need other timer to watch statictics */
2664 			if (dp->gc.gc_mii_hw_link_detection &&
2665 			    dp->nic_state == NIC_STATE_ONLINE) {
2666 				dp->mii_interval = 0;
2667 			}
2668 
2669 			if (dp->nic_state == NIC_STATE_ONLINE) {
2670 				if (!dp->mac_active) {
2671 					(void) gem_mac_start(dp);
2672 				}
2673 				tx_sched = B_TRUE;
2674 			}
2675 			goto next;
2676 		}
2677 
2678 		dp->mii_supress_msg = B_TRUE;
2679 		if (dp->anadv_autoneg) {
2680 			dp->mii_timer -= diff;
2681 			if (dp->mii_timer <= 0) {
2682 				/*
2683 				 * link down timer expired.
2684 				 * need to restart auto-negotiation.
2685 				 */
2686 				linkdown_action =
2687 				    dp->gc.gc_mii_linkdown_timeout_action;
2688 				goto restart_autonego;
2689 			}
2690 		}
2691 		/* don't change mii_state */
2692 		break;
2693 
2694 	case MII_STATE_LINKUP:
2695 		status = gem_mii_read(dp, MII_STATUS);
2696 		if ((status & MII_STATUS_LINKUP) == 0) {
2697 			/*
2698 			 * Link going down
2699 			 */
2700 			cmn_err(CE_NOTE,
2701 			    "!%s: link down detected: mii_stat:%b",
2702 			    dp->name, status, MII_STATUS_BITS);
2703 
2704 			if (dp->nic_state == NIC_STATE_ONLINE &&
2705 			    dp->mac_active &&
2706 			    dp->gc.gc_mii_stop_mac_on_linkdown) {
2707 				(void) gem_mac_stop(dp, 0);
2708 
2709 				if (dp->tx_blocked) {
2710 					/* drain tx */
2711 					tx_sched = B_TRUE;
2712 				}
2713 			}
2714 
2715 			if (dp->anadv_autoneg) {
2716 				/* need to restart auto-negotiation */
2717 				linkdown_action = dp->gc.gc_mii_linkdown_action;
2718 				goto restart_autonego;
2719 			}
2720 
2721 			dp->mii_state = MII_STATE_LINKDOWN;
2722 			dp->mii_timer = dp->gc.gc_mii_linkdown_timeout;
2723 
2724 			if ((void *)dp->gc.gc_mii_tune_phy) {
2725 				/* for built-in sis900 */
2726 				(*dp->gc.gc_mii_tune_phy)(dp);
2727 			}
2728 			dp->mii_interval = dp->gc.gc_mii_link_watch_interval;
2729 			goto next;
2730 		}
2731 
2732 		/* don't change mii_state */
2733 		if (dp->gc.gc_mii_hw_link_detection &&
2734 		    dp->nic_state == NIC_STATE_ONLINE) {
2735 			dp->mii_interval = 0;
2736 			goto next;
2737 		}
2738 		break;
2739 	}
2740 	dp->mii_interval = dp->gc.gc_mii_link_watch_interval;
2741 	goto next;
2742 
2743 	/* Actions on the end of state routine */
2744 
2745 restart_autonego:
2746 	switch (linkdown_action) {
2747 	case MII_ACTION_RESET:
2748 		if (!dp->mii_supress_msg) {
2749 			cmn_err(CE_CONT, "!%s: resetting PHY", dp->name);
2750 		}
2751 		dp->mii_supress_msg = B_TRUE;
2752 		goto reset_phy;
2753 
2754 	case MII_ACTION_NONE:
2755 		dp->mii_supress_msg = B_TRUE;
2756 		if (dp->gc.gc_mii_an_oneshot) {
2757 			goto autonego;
2758 		}
2759 		/* PHY will restart autonego automatically */
2760 		dp->mii_state = MII_STATE_AUTONEGOTIATING;
2761 		dp->mii_timer = dp->gc.gc_mii_an_timeout;
2762 		dp->mii_interval = dp->gc.gc_mii_an_watch_interval;
2763 		goto next;
2764 
2765 	case MII_ACTION_RSA:
2766 		if (!dp->mii_supress_msg) {
2767 			cmn_err(CE_CONT, "!%s: restarting auto-negotiation",
2768 			    dp->name);
2769 		}
2770 		dp->mii_supress_msg = B_TRUE;
2771 		goto autonego;
2772 
2773 	default:
2774 		cmn_err(CE_WARN, "!%s: unknowm linkdown action: %d",
2775 		    dp->name, dp->gc.gc_mii_linkdown_action);
2776 		dp->mii_supress_msg = B_TRUE;
2777 	}
2778 	/* NOTREACHED */
2779 
2780 reset_phy:
2781 	if (!dp->mii_supress_msg) {
2782 		cmn_err(CE_CONT, "!%s: resetting PHY", dp->name);
2783 	}
2784 	dp->mii_state = MII_STATE_RESETTING;
2785 	dp->mii_timer = dp->gc.gc_mii_reset_timeout;
2786 	if (!dp->gc.gc_mii_dont_reset) {
2787 		gem_mii_write(dp, MII_CONTROL, MII_CONTROL_RESET);
2788 	}
2789 	dp->mii_interval = WATCH_INTERVAL_FAST;
2790 	goto next;
2791 
2792 autonego:
2793 	if (!dp->mii_supress_msg) {
2794 		cmn_err(CE_CONT, "!%s: auto-negotiation started", dp->name);
2795 	}
2796 	dp->mii_state = MII_STATE_AUTONEGOTIATING;
2797 	dp->mii_timer = dp->gc.gc_mii_an_timeout;
2798 
2799 	/* start/restart auto nego */
2800 	val = gem_mii_read(dp, MII_CONTROL) &
2801 	    ~(MII_CONTROL_ISOLATE | MII_CONTROL_PWRDN | MII_CONTROL_RESET);
2802 
2803 	gem_mii_write(dp, MII_CONTROL,
2804 	    val | MII_CONTROL_RSAN | MII_CONTROL_ANE);
2805 
2806 	dp->mii_interval = dp->gc.gc_mii_an_watch_interval;
2807 
2808 next:
2809 	if (dp->link_watcher_id == 0 && dp->mii_interval) {
2810 		/* we must schedule next mii_watcher */
2811 		dp->link_watcher_id =
2812 		    timeout((void (*)(void *))&gem_mii_link_watcher,
2813 		    (void *)dp, dp->mii_interval);
2814 	}
2815 
2816 	if (old_mii_state != dp->mii_state) {
2817 		/* notify new mii link state */
2818 		if (dp->mii_state == MII_STATE_LINKUP) {
2819 			dp->linkup_delay = 0;
2820 			GEM_LINKUP(dp);
2821 		} else if (dp->linkup_delay <= 0) {
2822 			GEM_LINKDOWN(dp);
2823 		}
2824 	} else if (dp->linkup_delay < 0) {
2825 		/* first linkup timeout */
2826 		dp->linkup_delay = 0;
2827 		GEM_LINKDOWN(dp);
2828 	}
2829 
2830 	return (tx_sched);
2831 }
2832 
2833 static void
2834 gem_mii_link_watcher(struct gem_dev *dp)
2835 {
2836 	boolean_t	tx_sched;
2837 
2838 	mutex_enter(&dp->intrlock);
2839 
2840 	dp->link_watcher_id = 0;
2841 	tx_sched = gem_mii_link_check(dp);
2842 #if GEM_DEBUG_LEVEL > 2
2843 	if (dp->link_watcher_id == 0) {
2844 		cmn_err(CE_CONT, "%s: link watcher stopped", dp->name);
2845 	}
2846 #endif
2847 	mutex_exit(&dp->intrlock);
2848 
2849 	if (tx_sched) {
2850 		/* kick potentially stopped downstream */
2851 		mac_tx_update(dp->mh);
2852 	}
2853 }
2854 
2855 int
2856 gem_mii_probe_default(struct gem_dev *dp)
2857 {
2858 	int8_t		phy;
2859 	uint16_t	status;
2860 	uint16_t	adv;
2861 	uint16_t	adv_org;
2862 
2863 	DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__));
2864 
2865 	/*
2866 	 * Scan PHY
2867 	 */
2868 	/* ensure to send sync bits */
2869 	dp->mii_status = 0;
2870 
2871 	/* Try default phy first */
2872 	if (dp->mii_phy_addr) {
2873 		status = gem_mii_read(dp, MII_STATUS);
2874 		if (status != 0xffff && status != 0) {
2875 			gem_mii_write(dp, MII_CONTROL, 0);
2876 			goto PHY_found;
2877 		}
2878 
2879 		if (dp->mii_phy_addr < 0) {
2880 			cmn_err(CE_NOTE,
2881 	    "!%s: failed to probe default internal and/or non-MII PHY",
2882 			    dp->name);
2883 			return (GEM_FAILURE);
2884 		}
2885 
2886 		cmn_err(CE_NOTE,
2887 		    "!%s: failed to probe default MII PHY at %d",
2888 		    dp->name, dp->mii_phy_addr);
2889 	}
2890 
2891 	/* Try all possible address */
2892 	for (phy = dp->gc.gc_mii_addr_min; phy < 32; phy++) {
2893 		dp->mii_phy_addr = phy;
2894 		status = gem_mii_read(dp, MII_STATUS);
2895 
2896 		if (status != 0xffff && status != 0) {
2897 			gem_mii_write(dp, MII_CONTROL, 0);
2898 			goto PHY_found;
2899 		}
2900 	}
2901 
2902 	for (phy = dp->gc.gc_mii_addr_min; phy < 32; phy++) {
2903 		dp->mii_phy_addr = phy;
2904 		gem_mii_write(dp, MII_CONTROL, 0);
2905 		status = gem_mii_read(dp, MII_STATUS);
2906 
2907 		if (status != 0xffff && status != 0) {
2908 			goto PHY_found;
2909 		}
2910 	}
2911 
2912 	cmn_err(CE_NOTE, "!%s: no MII PHY found", dp->name);
2913 	dp->mii_phy_addr = -1;
2914 
2915 	return (GEM_FAILURE);
2916 
2917 PHY_found:
2918 	dp->mii_status = status;
2919 	dp->mii_phy_id  = (gem_mii_read(dp, MII_PHYIDH) << 16) |
2920 	    gem_mii_read(dp, MII_PHYIDL);
2921 
2922 	if (dp->mii_phy_addr < 0) {
2923 		cmn_err(CE_CONT, "!%s: using internal/non-MII PHY(0x%08x)",
2924 		    dp->name, dp->mii_phy_id);
2925 	} else {
2926 		cmn_err(CE_CONT, "!%s: MII PHY (0x%08x) found at %d",
2927 		    dp->name, dp->mii_phy_id, dp->mii_phy_addr);
2928 	}
2929 
2930 	cmn_err(CE_CONT, "!%s: PHY control:%b, status:%b, advert:%b, lpar:%b",
2931 	    dp->name,
2932 	    gem_mii_read(dp, MII_CONTROL), MII_CONTROL_BITS,
2933 	    status, MII_STATUS_BITS,
2934 	    gem_mii_read(dp, MII_AN_ADVERT), MII_ABILITY_BITS,
2935 	    gem_mii_read(dp, MII_AN_LPABLE), MII_ABILITY_BITS);
2936 
2937 	dp->mii_xstatus = 0;
2938 	if (status & MII_STATUS_XSTATUS) {
2939 		dp->mii_xstatus = gem_mii_read(dp, MII_XSTATUS);
2940 
2941 		cmn_err(CE_CONT, "!%s: xstatus:%b",
2942 		    dp->name, dp->mii_xstatus, MII_XSTATUS_BITS);
2943 	}
2944 
2945 	/* check if the phy can advertize pause abilities */
2946 	adv_org = gem_mii_read(dp, MII_AN_ADVERT);
2947 
2948 	gem_mii_write(dp, MII_AN_ADVERT,
2949 	    MII_ABILITY_PAUSE | MII_ABILITY_ASM_DIR);
2950 
2951 	adv = gem_mii_read(dp, MII_AN_ADVERT);
2952 
2953 	if ((adv & MII_ABILITY_PAUSE) == 0) {
2954 		dp->gc.gc_flow_control &= ~1;
2955 	}
2956 
2957 	if ((adv & MII_ABILITY_ASM_DIR) == 0) {
2958 		dp->gc.gc_flow_control &= ~2;
2959 	}
2960 
2961 	gem_mii_write(dp, MII_AN_ADVERT, adv_org);
2962 
2963 	return (GEM_SUCCESS);
2964 }
2965 
2966 static void
2967 gem_mii_start(struct gem_dev *dp)
2968 {
2969 	DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__));
2970 
2971 	/* make a first call of check link */
2972 	dp->mii_state = MII_STATE_UNKNOWN;
2973 	dp->mii_last_check = ddi_get_lbolt();
2974 	dp->linkup_delay = dp->gc.gc_mii_linkdown_timeout;
2975 	(void) gem_mii_link_watcher(dp);
2976 }
2977 
2978 static void
2979 gem_mii_stop(struct gem_dev *dp)
2980 {
2981 	DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__));
2982 
2983 	/* Ensure timer routine stopped */
2984 	mutex_enter(&dp->intrlock);
2985 	if (dp->link_watcher_id) {
2986 		while (untimeout(dp->link_watcher_id) == -1)
2987 			;
2988 		dp->link_watcher_id = 0;
2989 	}
2990 	mutex_exit(&dp->intrlock);
2991 }
2992 
2993 boolean_t
2994 gem_get_mac_addr_conf(struct gem_dev *dp)
2995 {
2996 	char		propname[32];
2997 	char		*valstr;
2998 	uint8_t		mac[ETHERADDRL];
2999 	char		*cp;
3000 	int		c;
3001 	int		i;
3002 	int		j;
3003 	uint8_t		v;
3004 	uint8_t		d;
3005 	uint8_t		ored;
3006 
3007 	DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3008 	/*
3009 	 * Get ethernet address from .conf file
3010 	 */
3011 	(void) sprintf(propname, "mac-addr");
3012 	if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, dp->dip,
3013 	    DDI_PROP_DONTPASS, propname, &valstr)) !=
3014 	    DDI_PROP_SUCCESS) {
3015 		return (B_FALSE);
3016 	}
3017 
3018 	if (strlen(valstr) != ETHERADDRL*3-1) {
3019 		goto syntax_err;
3020 	}
3021 
3022 	cp = valstr;
3023 	j  = 0;
3024 	ored = 0;
3025 	for (;;) {
3026 		v = 0;
3027 		for (i = 0; i < 2; i++) {
3028 			c = *cp++;
3029 
3030 			if (c >= 'a' && c <= 'f') {
3031 				d = c - 'a' + 10;
3032 			} else if (c >= 'A' && c <= 'F') {
3033 				d = c - 'A' + 10;
3034 			} else if (c >= '0' && c <= '9') {
3035 				d = c - '0';
3036 			} else {
3037 				goto syntax_err;
3038 			}
3039 			v = (v << 4) | d;
3040 		}
3041 
3042 		mac[j++] = v;
3043 		ored |= v;
3044 		if (j == ETHERADDRL) {
3045 			/* done */
3046 			break;
3047 		}
3048 
3049 		c = *cp++;
3050 		if (c != ':') {
3051 			goto syntax_err;
3052 		}
3053 	}
3054 
3055 	if (ored == 0) {
3056 		goto err;
3057 	}
3058 	for (i = 0; i < ETHERADDRL; i++) {
3059 		dp->dev_addr.ether_addr_octet[i] = mac[i];
3060 	}
3061 	ddi_prop_free(valstr);
3062 	return (B_TRUE);
3063 
3064 syntax_err:
3065 	cmn_err(CE_CONT,
3066 	    "!%s: read mac addr: trying .conf: syntax err %s",
3067 	    dp->name, valstr);
3068 err:
3069 	ddi_prop_free(valstr);
3070 
3071 	return (B_FALSE);
3072 }
3073 
3074 
3075 /* ============================================================== */
3076 /*
3077  * internal start/stop interface
3078  */
3079 /* ============================================================== */
3080 static int
3081 gem_mac_set_rx_filter(struct gem_dev *dp)
3082 {
3083 	return ((*dp->gc.gc_set_rx_filter)(dp));
3084 }
3085 
3086 /*
3087  * gem_mac_init: cold start
3088  */
3089 static int
3090 gem_mac_init(struct gem_dev *dp)
3091 {
3092 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3093 
3094 	if (dp->mac_suspended) {
3095 		return (GEM_FAILURE);
3096 	}
3097 
3098 	dp->mac_active = B_FALSE;
3099 
3100 	gem_init_rx_ring(dp);
3101 	gem_init_tx_ring(dp);
3102 
3103 	/* reset transmitter state */
3104 	dp->tx_blocked = (clock_t)0;
3105 	dp->tx_busy = 0;
3106 	dp->tx_reclaim_busy = 0;
3107 	dp->tx_max_packets = dp->gc.gc_tx_buf_limit;
3108 
3109 	if ((*dp->gc.gc_init_chip)(dp) != GEM_SUCCESS) {
3110 		return (GEM_FAILURE);
3111 	}
3112 
3113 	gem_prepare_rx_buf(dp);
3114 
3115 	return (GEM_SUCCESS);
3116 }
3117 /*
3118  * gem_mac_start: warm start
3119  */
3120 static int
3121 gem_mac_start(struct gem_dev *dp)
3122 {
3123 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3124 
3125 	ASSERT(mutex_owned(&dp->intrlock));
3126 	ASSERT(dp->nic_state == NIC_STATE_ONLINE);
3127 	ASSERT(dp->mii_state ==  MII_STATE_LINKUP);
3128 
3129 	/* enable tx and rx */
3130 	mutex_enter(&dp->xmitlock);
3131 	if (dp->mac_suspended) {
3132 		mutex_exit(&dp->xmitlock);
3133 		return (GEM_FAILURE);
3134 	}
3135 	dp->mac_active = B_TRUE;
3136 	mutex_exit(&dp->xmitlock);
3137 
3138 	/* setup rx buffers */
3139 	(*dp->gc.gc_rx_start)(dp,
3140 	    SLOT(dp->rx_active_head, dp->gc.gc_rx_ring_size),
3141 	    dp->rx_active_tail - dp->rx_active_head);
3142 
3143 	if ((*dp->gc.gc_start_chip)(dp) != GEM_SUCCESS) {
3144 		cmn_err(CE_WARN, "%s: %s: start_chip: failed",
3145 		    dp->name, __func__);
3146 		return (GEM_FAILURE);
3147 	}
3148 
3149 	mutex_enter(&dp->xmitlock);
3150 
3151 	/* load untranmitted packets to the nic */
3152 	ASSERT(dp->tx_softq_tail - dp->tx_softq_head >= 0);
3153 	if (dp->tx_softq_tail - dp->tx_softq_head > 0) {
3154 		gem_tx_load_descs_oo(dp,
3155 		    dp->tx_softq_head, dp->tx_softq_tail,
3156 		    GEM_TXFLAG_HEAD);
3157 		/* issue preloaded tx buffers */
3158 		gem_tx_start_unit(dp);
3159 	}
3160 
3161 	mutex_exit(&dp->xmitlock);
3162 
3163 	return (GEM_SUCCESS);
3164 }
3165 
3166 static int
3167 gem_mac_stop(struct gem_dev *dp, uint_t flags)
3168 {
3169 	int		i;
3170 	int		wait_time; /* in uS */
3171 #ifdef GEM_DEBUG_LEVEL
3172 	clock_t		now;
3173 #endif
3174 	int		ret = GEM_SUCCESS;
3175 
3176 	DPRINTF(1, (CE_CONT, "!%s: %s: called, rx_buf_free:%d",
3177 	    dp->name, __func__, dp->rx_buf_freecnt));
3178 
3179 	ASSERT(mutex_owned(&dp->intrlock));
3180 	ASSERT(!mutex_owned(&dp->xmitlock));
3181 
3182 	/*
3183 	 * Block transmits
3184 	 */
3185 	mutex_enter(&dp->xmitlock);
3186 	if (dp->mac_suspended) {
3187 		mutex_exit(&dp->xmitlock);
3188 		return (GEM_SUCCESS);
3189 	}
3190 	dp->mac_active = B_FALSE;
3191 
3192 	while (dp->tx_busy > 0) {
3193 		cv_wait(&dp->tx_drain_cv, &dp->xmitlock);
3194 	}
3195 	mutex_exit(&dp->xmitlock);
3196 
3197 	if ((flags & GEM_RESTART_NOWAIT) == 0) {
3198 		/*
3199 		 * Wait for all tx buffers sent.
3200 		 */
3201 		wait_time =
3202 		    2 * (8 * MAXPKTBUF(dp) / gem_speed_value[dp->speed]) *
3203 		    (dp->tx_active_tail - dp->tx_active_head);
3204 
3205 		DPRINTF(0, (CE_CONT, "%s: %s: max drain time: %d uS",
3206 		    dp->name, __func__, wait_time));
3207 		i = 0;
3208 #ifdef GEM_DEBUG_LEVEL
3209 		now = ddi_get_lbolt();
3210 #endif
3211 		while (dp->tx_active_tail != dp->tx_active_head) {
3212 			if (i > wait_time) {
3213 				/* timeout */
3214 				cmn_err(CE_NOTE, "%s: %s timeout: tx drain",
3215 				    dp->name, __func__);
3216 				break;
3217 			}
3218 			(void) gem_reclaim_txbuf(dp);
3219 			drv_usecwait(100);
3220 			i += 100;
3221 		}
3222 		DPRINTF(0, (CE_NOTE,
3223 		    "!%s: %s: the nic have drained in %d uS, real %d mS",
3224 		    dp->name, __func__, i,
3225 		    10*((int)(ddi_get_lbolt() - now))));
3226 	}
3227 
3228 	/*
3229 	 * Now we can stop the nic safely.
3230 	 */
3231 	if ((*dp->gc.gc_stop_chip)(dp) != GEM_SUCCESS) {
3232 		cmn_err(CE_NOTE, "%s: %s: resetting the chip to stop it",
3233 		    dp->name, __func__);
3234 		if ((*dp->gc.gc_reset_chip)(dp) != GEM_SUCCESS) {
3235 			cmn_err(CE_WARN, "%s: %s: failed to reset chip",
3236 			    dp->name, __func__);
3237 		}
3238 	}
3239 
3240 	/*
3241 	 * Clear all rx buffers
3242 	 */
3243 	if (flags & GEM_RESTART_KEEP_BUF) {
3244 		(void) gem_receive(dp);
3245 	}
3246 	gem_clean_rx_buf(dp);
3247 
3248 	/*
3249 	 * Update final statistics
3250 	 */
3251 	(*dp->gc.gc_get_stats)(dp);
3252 
3253 	/*
3254 	 * Clear all pended tx packets
3255 	 */
3256 	ASSERT(dp->tx_active_tail == dp->tx_softq_head);
3257 	ASSERT(dp->tx_softq_tail == dp->tx_free_head);
3258 	if (flags & GEM_RESTART_KEEP_BUF) {
3259 		/* restore active tx buffers */
3260 		dp->tx_active_tail = dp->tx_active_head;
3261 		dp->tx_softq_head  = dp->tx_active_head;
3262 	} else {
3263 		gem_clean_tx_buf(dp);
3264 	}
3265 
3266 	return (ret);
3267 }
3268 
3269 static int
3270 gem_add_multicast(struct gem_dev *dp, const uint8_t *ep)
3271 {
3272 	int		cnt;
3273 	int		err;
3274 
3275 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3276 
3277 	mutex_enter(&dp->intrlock);
3278 	if (dp->mac_suspended) {
3279 		mutex_exit(&dp->intrlock);
3280 		return (GEM_FAILURE);
3281 	}
3282 
3283 	if (dp->mc_count_req++ < GEM_MAXMC) {
3284 		/* append the new address at the end of the mclist */
3285 		cnt = dp->mc_count;
3286 		bcopy(ep, dp->mc_list[cnt].addr.ether_addr_octet,
3287 		    ETHERADDRL);
3288 		if (dp->gc.gc_multicast_hash) {
3289 			dp->mc_list[cnt].hash =
3290 			    (*dp->gc.gc_multicast_hash)(dp, (uint8_t *)ep);
3291 		}
3292 		dp->mc_count = cnt + 1;
3293 	}
3294 
3295 	if (dp->mc_count_req != dp->mc_count) {
3296 		/* multicast address list overflow */
3297 		dp->rxmode |= RXMODE_MULTI_OVF;
3298 	} else {
3299 		dp->rxmode &= ~RXMODE_MULTI_OVF;
3300 	}
3301 
3302 	/* tell new multicast list to the hardware */
3303 	err = gem_mac_set_rx_filter(dp);
3304 
3305 	mutex_exit(&dp->intrlock);
3306 
3307 	return (err);
3308 }
3309 
3310 static int
3311 gem_remove_multicast(struct gem_dev *dp, const uint8_t *ep)
3312 {
3313 	size_t		len;
3314 	int		i;
3315 	int		cnt;
3316 	int		err;
3317 
3318 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3319 
3320 	mutex_enter(&dp->intrlock);
3321 	if (dp->mac_suspended) {
3322 		mutex_exit(&dp->intrlock);
3323 		return (GEM_FAILURE);
3324 	}
3325 
3326 	dp->mc_count_req--;
3327 	cnt = dp->mc_count;
3328 	for (i = 0; i < cnt; i++) {
3329 		if (bcmp(ep, &dp->mc_list[i].addr, ETHERADDRL)) {
3330 			continue;
3331 		}
3332 		/* shrink the mclist by copying forward */
3333 		len = (cnt - (i + 1)) * sizeof (*dp->mc_list);
3334 		if (len > 0) {
3335 			bcopy(&dp->mc_list[i+1], &dp->mc_list[i], len);
3336 		}
3337 		dp->mc_count--;
3338 		break;
3339 	}
3340 
3341 	if (dp->mc_count_req != dp->mc_count) {
3342 		/* multicast address list overflow */
3343 		dp->rxmode |= RXMODE_MULTI_OVF;
3344 	} else {
3345 		dp->rxmode &= ~RXMODE_MULTI_OVF;
3346 	}
3347 	/* In gem v2, don't hold xmitlock on calling set_rx_filter */
3348 	err = gem_mac_set_rx_filter(dp);
3349 
3350 	mutex_exit(&dp->intrlock);
3351 
3352 	return (err);
3353 }
3354 
3355 /* ============================================================== */
3356 /*
3357  * ND interface
3358  */
3359 /* ============================================================== */
3360 enum {
3361 	PARAM_AUTONEG_CAP,
3362 	PARAM_PAUSE_CAP,
3363 	PARAM_ASYM_PAUSE_CAP,
3364 	PARAM_1000FDX_CAP,
3365 	PARAM_1000HDX_CAP,
3366 	PARAM_100T4_CAP,
3367 	PARAM_100FDX_CAP,
3368 	PARAM_100HDX_CAP,
3369 	PARAM_10FDX_CAP,
3370 	PARAM_10HDX_CAP,
3371 
3372 	PARAM_ADV_AUTONEG_CAP,
3373 	PARAM_ADV_PAUSE_CAP,
3374 	PARAM_ADV_ASYM_PAUSE_CAP,
3375 	PARAM_ADV_1000FDX_CAP,
3376 	PARAM_ADV_1000HDX_CAP,
3377 	PARAM_ADV_100T4_CAP,
3378 	PARAM_ADV_100FDX_CAP,
3379 	PARAM_ADV_100HDX_CAP,
3380 	PARAM_ADV_10FDX_CAP,
3381 	PARAM_ADV_10HDX_CAP,
3382 
3383 	PARAM_LP_AUTONEG_CAP,
3384 	PARAM_LP_PAUSE_CAP,
3385 	PARAM_LP_ASYM_PAUSE_CAP,
3386 	PARAM_LP_1000FDX_CAP,
3387 	PARAM_LP_1000HDX_CAP,
3388 	PARAM_LP_100T4_CAP,
3389 	PARAM_LP_100FDX_CAP,
3390 	PARAM_LP_100HDX_CAP,
3391 	PARAM_LP_10FDX_CAP,
3392 	PARAM_LP_10HDX_CAP,
3393 
3394 	PARAM_LINK_STATUS,
3395 	PARAM_LINK_SPEED,
3396 	PARAM_LINK_DUPLEX,
3397 
3398 	PARAM_LINK_AUTONEG,
3399 	PARAM_LINK_RX_PAUSE,
3400 	PARAM_LINK_TX_PAUSE,
3401 
3402 	PARAM_LOOP_MODE,
3403 	PARAM_MSI_CNT,
3404 
3405 #ifdef DEBUG_RESUME
3406 	PARAM_RESUME_TEST,
3407 #endif
3408 	PARAM_COUNT
3409 };
3410 
3411 enum ioc_reply {
3412 	IOC_INVAL = -1,				/* bad, NAK with EINVAL	*/
3413 	IOC_DONE,				/* OK, reply sent	*/
3414 	IOC_ACK,				/* OK, just send ACK	*/
3415 	IOC_REPLY,				/* OK, just send reply	*/
3416 	IOC_RESTART_ACK,			/* OK, restart & ACK	*/
3417 	IOC_RESTART_REPLY			/* OK, restart & reply	*/
3418 };
3419 
3420 struct gem_nd_arg {
3421 	struct gem_dev	*dp;
3422 	int		item;
3423 };
3424 
3425 static int
3426 gem_param_get(queue_t *q, mblk_t *mp, caddr_t arg, cred_t *credp)
3427 {
3428 	struct gem_dev	*dp = ((struct gem_nd_arg *)(void *)arg)->dp;
3429 	int		item = ((struct gem_nd_arg *)(void *)arg)->item;
3430 	long		val;
3431 
3432 	DPRINTF(0, (CE_CONT, "!%s: %s: called, item:%d",
3433 	    dp->name, __func__, item));
3434 
3435 	switch (item) {
3436 	case PARAM_AUTONEG_CAP:
3437 		val = BOOLEAN(dp->mii_status & MII_STATUS_CANAUTONEG);
3438 		DPRINTF(0, (CE_CONT, "autoneg_cap:%d", val));
3439 		break;
3440 
3441 	case PARAM_PAUSE_CAP:
3442 		val = BOOLEAN(dp->gc.gc_flow_control & 1);
3443 		break;
3444 
3445 	case PARAM_ASYM_PAUSE_CAP:
3446 		val = BOOLEAN(dp->gc.gc_flow_control & 2);
3447 		break;
3448 
3449 	case PARAM_1000FDX_CAP:
3450 		val = (dp->mii_xstatus & MII_XSTATUS_1000BASET_FD) ||
3451 		    (dp->mii_xstatus & MII_XSTATUS_1000BASEX_FD);
3452 		break;
3453 
3454 	case PARAM_1000HDX_CAP:
3455 		val = (dp->mii_xstatus & MII_XSTATUS_1000BASET) ||
3456 		    (dp->mii_xstatus & MII_XSTATUS_1000BASEX);
3457 		break;
3458 
3459 	case PARAM_100T4_CAP:
3460 		val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASE_T4);
3461 		break;
3462 
3463 	case PARAM_100FDX_CAP:
3464 		val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX_FD);
3465 		break;
3466 
3467 	case PARAM_100HDX_CAP:
3468 		val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX);
3469 		break;
3470 
3471 	case PARAM_10FDX_CAP:
3472 		val = BOOLEAN(dp->mii_status & MII_STATUS_10_FD);
3473 		break;
3474 
3475 	case PARAM_10HDX_CAP:
3476 		val = BOOLEAN(dp->mii_status & MII_STATUS_10);
3477 		break;
3478 
3479 	case PARAM_ADV_AUTONEG_CAP:
3480 		val = dp->anadv_autoneg;
3481 		break;
3482 
3483 	case PARAM_ADV_PAUSE_CAP:
3484 		val = BOOLEAN(dp->anadv_flow_control & 1);
3485 		break;
3486 
3487 	case PARAM_ADV_ASYM_PAUSE_CAP:
3488 		val = BOOLEAN(dp->anadv_flow_control & 2);
3489 		break;
3490 
3491 	case PARAM_ADV_1000FDX_CAP:
3492 		val = dp->anadv_1000fdx;
3493 		break;
3494 
3495 	case PARAM_ADV_1000HDX_CAP:
3496 		val = dp->anadv_1000hdx;
3497 		break;
3498 
3499 	case PARAM_ADV_100T4_CAP:
3500 		val = dp->anadv_100t4;
3501 		break;
3502 
3503 	case PARAM_ADV_100FDX_CAP:
3504 		val = dp->anadv_100fdx;
3505 		break;
3506 
3507 	case PARAM_ADV_100HDX_CAP:
3508 		val = dp->anadv_100hdx;
3509 		break;
3510 
3511 	case PARAM_ADV_10FDX_CAP:
3512 		val = dp->anadv_10fdx;
3513 		break;
3514 
3515 	case PARAM_ADV_10HDX_CAP:
3516 		val = dp->anadv_10hdx;
3517 		break;
3518 
3519 	case PARAM_LP_AUTONEG_CAP:
3520 		val = BOOLEAN(dp->mii_exp & MII_AN_EXP_LPCANAN);
3521 		break;
3522 
3523 	case PARAM_LP_PAUSE_CAP:
3524 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_PAUSE);
3525 		break;
3526 
3527 	case PARAM_LP_ASYM_PAUSE_CAP:
3528 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_ASM_DIR);
3529 		break;
3530 
3531 	case PARAM_LP_1000FDX_CAP:
3532 		val = BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_FULL);
3533 		break;
3534 
3535 	case PARAM_LP_1000HDX_CAP:
3536 		val = BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_HALF);
3537 		break;
3538 
3539 	case PARAM_LP_100T4_CAP:
3540 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_T4);
3541 		break;
3542 
3543 	case PARAM_LP_100FDX_CAP:
3544 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX_FD);
3545 		break;
3546 
3547 	case PARAM_LP_100HDX_CAP:
3548 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX);
3549 		break;
3550 
3551 	case PARAM_LP_10FDX_CAP:
3552 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T_FD);
3553 		break;
3554 
3555 	case PARAM_LP_10HDX_CAP:
3556 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T);
3557 		break;
3558 
3559 	case PARAM_LINK_STATUS:
3560 		val = (dp->mii_state == MII_STATE_LINKUP);
3561 		break;
3562 
3563 	case PARAM_LINK_SPEED:
3564 		val = gem_speed_value[dp->speed];
3565 		break;
3566 
3567 	case PARAM_LINK_DUPLEX:
3568 		val = 0;
3569 		if (dp->mii_state == MII_STATE_LINKUP) {
3570 			val = dp->full_duplex ? 2 : 1;
3571 		}
3572 		break;
3573 
3574 	case PARAM_LINK_AUTONEG:
3575 		val = BOOLEAN(dp->mii_exp & MII_AN_EXP_LPCANAN);
3576 		break;
3577 
3578 	case PARAM_LINK_RX_PAUSE:
3579 		val = (dp->flow_control == FLOW_CONTROL_SYMMETRIC) ||
3580 		    (dp->flow_control == FLOW_CONTROL_RX_PAUSE);
3581 		break;
3582 
3583 	case PARAM_LINK_TX_PAUSE:
3584 		val = (dp->flow_control == FLOW_CONTROL_SYMMETRIC) ||
3585 		    (dp->flow_control == FLOW_CONTROL_TX_PAUSE);
3586 		break;
3587 
3588 #ifdef DEBUG_RESUME
3589 	case PARAM_RESUME_TEST:
3590 		val = 0;
3591 		break;
3592 #endif
3593 	default:
3594 		cmn_err(CE_WARN, "%s: unimplemented ndd control (%d)",
3595 		    dp->name, item);
3596 		break;
3597 	}
3598 
3599 	(void) mi_mpprintf(mp, "%ld", val);
3600 
3601 	return (0);
3602 }
3603 
3604 static int
3605 gem_param_set(queue_t *q, mblk_t *mp, char *value, caddr_t arg, cred_t *credp)
3606 {
3607 	struct gem_dev	*dp = ((struct gem_nd_arg *)(void *)arg)->dp;
3608 	int		item = ((struct gem_nd_arg *)(void *)arg)->item;
3609 	long		val;
3610 	char		*end;
3611 
3612 	DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3613 	if (ddi_strtol(value, &end, 10, &val)) {
3614 		return (EINVAL);
3615 	}
3616 	if (end == value) {
3617 		return (EINVAL);
3618 	}
3619 
3620 	switch (item) {
3621 	case PARAM_ADV_AUTONEG_CAP:
3622 		if (val != 0 && val != 1) {
3623 			goto err;
3624 		}
3625 		if (val && (dp->mii_status & MII_STATUS_CANAUTONEG) == 0) {
3626 			goto err;
3627 		}
3628 		dp->anadv_autoneg = (int)val;
3629 		break;
3630 
3631 	case PARAM_ADV_PAUSE_CAP:
3632 		if (val != 0 && val != 1) {
3633 			goto err;
3634 		}
3635 		if (val) {
3636 			dp->anadv_flow_control |= 1;
3637 		} else {
3638 			dp->anadv_flow_control &= ~1;
3639 		}
3640 		break;
3641 
3642 	case PARAM_ADV_ASYM_PAUSE_CAP:
3643 		if (val != 0 && val != 1) {
3644 			goto err;
3645 		}
3646 		if (val) {
3647 			dp->anadv_flow_control |= 2;
3648 		} else {
3649 			dp->anadv_flow_control &= ~2;
3650 		}
3651 		break;
3652 
3653 	case PARAM_ADV_1000FDX_CAP:
3654 		if (val != 0 && val != 1) {
3655 			goto err;
3656 		}
3657 		if (val && (dp->mii_xstatus &
3658 		    (MII_XSTATUS_1000BASET_FD |
3659 		    MII_XSTATUS_1000BASEX_FD)) == 0) {
3660 			goto err;
3661 		}
3662 		dp->anadv_1000fdx = (int)val;
3663 		break;
3664 
3665 	case PARAM_ADV_1000HDX_CAP:
3666 		if (val != 0 && val != 1) {
3667 			goto err;
3668 		}
3669 		if (val && (dp->mii_xstatus &
3670 		    (MII_XSTATUS_1000BASET | MII_XSTATUS_1000BASEX)) == 0) {
3671 			goto err;
3672 		}
3673 		dp->anadv_1000hdx = (int)val;
3674 		break;
3675 
3676 	case PARAM_ADV_100T4_CAP:
3677 		if (val != 0 && val != 1) {
3678 			goto err;
3679 		}
3680 		if (val && (dp->mii_status & MII_STATUS_100_BASE_T4) == 0) {
3681 			goto err;
3682 		}
3683 		dp->anadv_100t4 = (int)val;
3684 		break;
3685 
3686 	case PARAM_ADV_100FDX_CAP:
3687 		if (val != 0 && val != 1) {
3688 			goto err;
3689 		}
3690 		if (val && (dp->mii_status & MII_STATUS_100_BASEX_FD) == 0) {
3691 			goto err;
3692 		}
3693 		dp->anadv_100fdx = (int)val;
3694 		break;
3695 
3696 	case PARAM_ADV_100HDX_CAP:
3697 		if (val != 0 && val != 1) {
3698 			goto err;
3699 		}
3700 		if (val && (dp->mii_status & MII_STATUS_100_BASEX) == 0) {
3701 			goto err;
3702 		}
3703 		dp->anadv_100hdx = (int)val;
3704 		break;
3705 
3706 	case PARAM_ADV_10FDX_CAP:
3707 		if (val != 0 && val != 1) {
3708 			goto err;
3709 		}
3710 		if (val && (dp->mii_status & MII_STATUS_10_FD) == 0) {
3711 			goto err;
3712 		}
3713 		dp->anadv_10fdx = (int)val;
3714 		break;
3715 
3716 	case PARAM_ADV_10HDX_CAP:
3717 		if (val != 0 && val != 1) {
3718 			goto err;
3719 		}
3720 		if (val && (dp->mii_status & MII_STATUS_10) == 0) {
3721 			goto err;
3722 		}
3723 		dp->anadv_10hdx = (int)val;
3724 		break;
3725 	}
3726 
3727 	/* sync with PHY */
3728 	gem_choose_forcedmode(dp);
3729 
3730 	dp->mii_state = MII_STATE_UNKNOWN;
3731 	if (dp->gc.gc_mii_hw_link_detection && dp->link_watcher_id == 0) {
3732 		/* XXX - Can we ignore the return code ? */
3733 		(void) gem_mii_link_check(dp);
3734 	}
3735 
3736 	return (0);
3737 err:
3738 	return (EINVAL);
3739 }
3740 
3741 static void
3742 gem_nd_load(struct gem_dev *dp, char *name, ndgetf_t gf, ndsetf_t sf, int item)
3743 {
3744 	struct gem_nd_arg	*arg;
3745 
3746 	ASSERT(item >= 0);
3747 	ASSERT(item < PARAM_COUNT);
3748 
3749 	arg = &((struct gem_nd_arg *)(void *)dp->nd_arg_p)[item];
3750 	arg->dp = dp;
3751 	arg->item = item;
3752 
3753 	DPRINTF(2, (CE_CONT, "!%s: %s: name:%s, item:%d",
3754 	    dp->name, __func__, name, item));
3755 	(void) nd_load(&dp->nd_data_p, name, gf, sf, (caddr_t)arg);
3756 }
3757 
3758 static void
3759 gem_nd_setup(struct gem_dev *dp)
3760 {
3761 	DPRINTF(0, (CE_CONT, "!%s: %s: called, mii_status:0x%b",
3762 	    dp->name, __func__, dp->mii_status, MII_STATUS_BITS));
3763 
3764 	ASSERT(dp->nd_arg_p == NULL);
3765 
3766 	dp->nd_arg_p =
3767 	    kmem_zalloc(sizeof (struct gem_nd_arg) * PARAM_COUNT, KM_SLEEP);
3768 
3769 #define	SETFUNC(x)	((x) ? gem_param_set : NULL)
3770 
3771 	gem_nd_load(dp, "autoneg_cap",
3772 	    gem_param_get, NULL, PARAM_AUTONEG_CAP);
3773 	gem_nd_load(dp, "pause_cap",
3774 	    gem_param_get, NULL, PARAM_PAUSE_CAP);
3775 	gem_nd_load(dp, "asym_pause_cap",
3776 	    gem_param_get, NULL, PARAM_ASYM_PAUSE_CAP);
3777 	gem_nd_load(dp, "1000fdx_cap",
3778 	    gem_param_get, NULL, PARAM_1000FDX_CAP);
3779 	gem_nd_load(dp, "1000hdx_cap",
3780 	    gem_param_get, NULL, PARAM_1000HDX_CAP);
3781 	gem_nd_load(dp, "100T4_cap",
3782 	    gem_param_get, NULL, PARAM_100T4_CAP);
3783 	gem_nd_load(dp, "100fdx_cap",
3784 	    gem_param_get, NULL, PARAM_100FDX_CAP);
3785 	gem_nd_load(dp, "100hdx_cap",
3786 	    gem_param_get, NULL, PARAM_100HDX_CAP);
3787 	gem_nd_load(dp, "10fdx_cap",
3788 	    gem_param_get, NULL, PARAM_10FDX_CAP);
3789 	gem_nd_load(dp, "10hdx_cap",
3790 	    gem_param_get, NULL, PARAM_10HDX_CAP);
3791 
3792 	/* Our advertised capabilities */
3793 	gem_nd_load(dp, "adv_autoneg_cap", gem_param_get,
3794 	    SETFUNC(dp->mii_status & MII_STATUS_CANAUTONEG),
3795 	    PARAM_ADV_AUTONEG_CAP);
3796 	gem_nd_load(dp, "adv_pause_cap", gem_param_get,
3797 	    SETFUNC(dp->gc.gc_flow_control & 1),
3798 	    PARAM_ADV_PAUSE_CAP);
3799 	gem_nd_load(dp, "adv_asym_pause_cap", gem_param_get,
3800 	    SETFUNC(dp->gc.gc_flow_control & 2),
3801 	    PARAM_ADV_ASYM_PAUSE_CAP);
3802 	gem_nd_load(dp, "adv_1000fdx_cap", gem_param_get,
3803 	    SETFUNC(dp->mii_xstatus &
3804 	    (MII_XSTATUS_1000BASEX_FD | MII_XSTATUS_1000BASET_FD)),
3805 	    PARAM_ADV_1000FDX_CAP);
3806 	gem_nd_load(dp, "adv_1000hdx_cap", gem_param_get,
3807 	    SETFUNC(dp->mii_xstatus &
3808 	    (MII_XSTATUS_1000BASEX | MII_XSTATUS_1000BASET)),
3809 	    PARAM_ADV_1000HDX_CAP);
3810 	gem_nd_load(dp, "adv_100T4_cap", gem_param_get,
3811 	    SETFUNC((dp->mii_status & MII_STATUS_100_BASE_T4) &&
3812 	    !dp->mii_advert_ro),
3813 	    PARAM_ADV_100T4_CAP);
3814 	gem_nd_load(dp, "adv_100fdx_cap", gem_param_get,
3815 	    SETFUNC((dp->mii_status & MII_STATUS_100_BASEX_FD) &&
3816 	    !dp->mii_advert_ro),
3817 	    PARAM_ADV_100FDX_CAP);
3818 	gem_nd_load(dp, "adv_100hdx_cap", gem_param_get,
3819 	    SETFUNC((dp->mii_status & MII_STATUS_100_BASEX) &&
3820 	    !dp->mii_advert_ro),
3821 	    PARAM_ADV_100HDX_CAP);
3822 	gem_nd_load(dp, "adv_10fdx_cap", gem_param_get,
3823 	    SETFUNC((dp->mii_status & MII_STATUS_10_FD) &&
3824 	    !dp->mii_advert_ro),
3825 	    PARAM_ADV_10FDX_CAP);
3826 	gem_nd_load(dp, "adv_10hdx_cap", gem_param_get,
3827 	    SETFUNC((dp->mii_status & MII_STATUS_10) &&
3828 	    !dp->mii_advert_ro),
3829 	    PARAM_ADV_10HDX_CAP);
3830 
3831 	/* Partner's advertised capabilities */
3832 	gem_nd_load(dp, "lp_autoneg_cap",
3833 	    gem_param_get, NULL, PARAM_LP_AUTONEG_CAP);
3834 	gem_nd_load(dp, "lp_pause_cap",
3835 	    gem_param_get, NULL, PARAM_LP_PAUSE_CAP);
3836 	gem_nd_load(dp, "lp_asym_pause_cap",
3837 	    gem_param_get, NULL, PARAM_LP_ASYM_PAUSE_CAP);
3838 	gem_nd_load(dp, "lp_1000fdx_cap",
3839 	    gem_param_get, NULL, PARAM_LP_1000FDX_CAP);
3840 	gem_nd_load(dp, "lp_1000hdx_cap",
3841 	    gem_param_get, NULL, PARAM_LP_1000HDX_CAP);
3842 	gem_nd_load(dp, "lp_100T4_cap",
3843 	    gem_param_get, NULL, PARAM_LP_100T4_CAP);
3844 	gem_nd_load(dp, "lp_100fdx_cap",
3845 	    gem_param_get, NULL, PARAM_LP_100FDX_CAP);
3846 	gem_nd_load(dp, "lp_100hdx_cap",
3847 	    gem_param_get, NULL, PARAM_LP_100HDX_CAP);
3848 	gem_nd_load(dp, "lp_10fdx_cap",
3849 	    gem_param_get, NULL, PARAM_LP_10FDX_CAP);
3850 	gem_nd_load(dp, "lp_10hdx_cap",
3851 	    gem_param_get, NULL, PARAM_LP_10HDX_CAP);
3852 
3853 	/* Current operating modes */
3854 	gem_nd_load(dp, "link_status",
3855 	    gem_param_get, NULL, PARAM_LINK_STATUS);
3856 	gem_nd_load(dp, "link_speed",
3857 	    gem_param_get, NULL, PARAM_LINK_SPEED);
3858 	gem_nd_load(dp, "link_duplex",
3859 	    gem_param_get, NULL, PARAM_LINK_DUPLEX);
3860 	gem_nd_load(dp, "link_autoneg",
3861 	    gem_param_get, NULL, PARAM_LINK_AUTONEG);
3862 	gem_nd_load(dp, "link_rx_pause",
3863 	    gem_param_get, NULL, PARAM_LINK_RX_PAUSE);
3864 	gem_nd_load(dp, "link_tx_pause",
3865 	    gem_param_get, NULL, PARAM_LINK_TX_PAUSE);
3866 #ifdef DEBUG_RESUME
3867 	gem_nd_load(dp, "resume_test",
3868 	    gem_param_get, NULL, PARAM_RESUME_TEST);
3869 #endif
3870 #undef	SETFUNC
3871 }
3872 
3873 static
3874 enum ioc_reply
3875 gem_nd_ioctl(struct gem_dev *dp, queue_t *wq, mblk_t *mp, struct iocblk *iocp)
3876 {
3877 	boolean_t	ok;
3878 
3879 	ASSERT(mutex_owned(&dp->intrlock));
3880 
3881 	DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3882 
3883 	switch (iocp->ioc_cmd) {
3884 	case ND_GET:
3885 		ok = nd_getset(wq, dp->nd_data_p, mp);
3886 		DPRINTF(0, (CE_CONT,
3887 		    "%s: get %s", dp->name, ok ? "OK" : "FAIL"));
3888 		return (ok ? IOC_REPLY : IOC_INVAL);
3889 
3890 	case ND_SET:
3891 		ok = nd_getset(wq, dp->nd_data_p, mp);
3892 
3893 		DPRINTF(0, (CE_CONT, "%s: set %s err %d",
3894 		    dp->name, ok ? "OK" : "FAIL", iocp->ioc_error));
3895 
3896 		if (!ok) {
3897 			return (IOC_INVAL);
3898 		}
3899 
3900 		if (iocp->ioc_error) {
3901 			return (IOC_REPLY);
3902 		}
3903 
3904 		return (IOC_RESTART_REPLY);
3905 	}
3906 
3907 	cmn_err(CE_WARN, "%s: invalid cmd 0x%x", dp->name, iocp->ioc_cmd);
3908 
3909 	return (IOC_INVAL);
3910 }
3911 
3912 static void
3913 gem_nd_cleanup(struct gem_dev *dp)
3914 {
3915 	ASSERT(dp->nd_data_p != NULL);
3916 	ASSERT(dp->nd_arg_p != NULL);
3917 
3918 	nd_free(&dp->nd_data_p);
3919 
3920 	kmem_free(dp->nd_arg_p, sizeof (struct gem_nd_arg) * PARAM_COUNT);
3921 	dp->nd_arg_p = NULL;
3922 }
3923 
3924 static void
3925 gem_mac_ioctl(struct gem_dev *dp, queue_t *wq, mblk_t *mp)
3926 {
3927 	struct iocblk	*iocp;
3928 	enum ioc_reply	status;
3929 	int		cmd;
3930 
3931 	DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3932 
3933 	/*
3934 	 * Validate the command before bothering with the mutex ...
3935 	 */
3936 	iocp = (void *)mp->b_rptr;
3937 	iocp->ioc_error = 0;
3938 	cmd = iocp->ioc_cmd;
3939 
3940 	DPRINTF(0, (CE_CONT, "%s: %s cmd:0x%x", dp->name, __func__, cmd));
3941 
3942 	mutex_enter(&dp->intrlock);
3943 	mutex_enter(&dp->xmitlock);
3944 
3945 	switch (cmd) {
3946 	default:
3947 		_NOTE(NOTREACHED)
3948 		status = IOC_INVAL;
3949 		break;
3950 
3951 	case ND_GET:
3952 	case ND_SET:
3953 		status = gem_nd_ioctl(dp, wq, mp, iocp);
3954 		break;
3955 	}
3956 
3957 	mutex_exit(&dp->xmitlock);
3958 	mutex_exit(&dp->intrlock);
3959 
3960 #ifdef DEBUG_RESUME
3961 	if (cmd == ND_GET)  {
3962 		gem_suspend(dp->dip);
3963 		gem_resume(dp->dip);
3964 	}
3965 #endif
3966 	/*
3967 	 * Finally, decide how to reply
3968 	 */
3969 	switch (status) {
3970 	default:
3971 	case IOC_INVAL:
3972 		/*
3973 		 * Error, reply with a NAK and EINVAL or the specified error
3974 		 */
3975 		miocnak(wq, mp, 0, iocp->ioc_error == 0 ?
3976 		    EINVAL : iocp->ioc_error);
3977 		break;
3978 
3979 	case IOC_DONE:
3980 		/*
3981 		 * OK, reply already sent
3982 		 */
3983 		break;
3984 
3985 	case IOC_RESTART_ACK:
3986 	case IOC_ACK:
3987 		/*
3988 		 * OK, reply with an ACK
3989 		 */
3990 		miocack(wq, mp, 0, 0);
3991 		break;
3992 
3993 	case IOC_RESTART_REPLY:
3994 	case IOC_REPLY:
3995 		/*
3996 		 * OK, send prepared reply as ACK or NAK
3997 		 */
3998 		mp->b_datap->db_type =
3999 		    iocp->ioc_error == 0 ? M_IOCACK : M_IOCNAK;
4000 		qreply(wq, mp);
4001 		break;
4002 	}
4003 }
4004 
4005 #ifndef SYS_MAC_H
4006 #define	XCVR_UNDEFINED	0
4007 #define	XCVR_NONE	1
4008 #define	XCVR_10		2
4009 #define	XCVR_100T4	3
4010 #define	XCVR_100X	4
4011 #define	XCVR_100T2	5
4012 #define	XCVR_1000X	6
4013 #define	XCVR_1000T	7
4014 #endif
4015 static int
4016 gem_mac_xcvr_inuse(struct gem_dev *dp)
4017 {
4018 	int	val = XCVR_UNDEFINED;
4019 
4020 	if ((dp->mii_status & MII_STATUS_XSTATUS) == 0) {
4021 		if (dp->mii_status & MII_STATUS_100_BASE_T4) {
4022 			val = XCVR_100T4;
4023 		} else if (dp->mii_status &
4024 		    (MII_STATUS_100_BASEX_FD |
4025 		    MII_STATUS_100_BASEX)) {
4026 			val = XCVR_100X;
4027 		} else if (dp->mii_status &
4028 		    (MII_STATUS_100_BASE_T2_FD |
4029 		    MII_STATUS_100_BASE_T2)) {
4030 			val = XCVR_100T2;
4031 		} else if (dp->mii_status &
4032 		    (MII_STATUS_10_FD | MII_STATUS_10)) {
4033 			val = XCVR_10;
4034 		}
4035 	} else if (dp->mii_xstatus &
4036 	    (MII_XSTATUS_1000BASET_FD | MII_XSTATUS_1000BASET)) {
4037 		val = XCVR_1000T;
4038 	} else if (dp->mii_xstatus &
4039 	    (MII_XSTATUS_1000BASEX_FD | MII_XSTATUS_1000BASEX)) {
4040 		val = XCVR_1000X;
4041 	}
4042 
4043 	return (val);
4044 }
4045 
4046 /* ============================================================== */
4047 /*
4048  * GLDv3 interface
4049  */
4050 /* ============================================================== */
4051 static int		gem_m_getstat(void *, uint_t, uint64_t *);
4052 static int		gem_m_start(void *);
4053 static void		gem_m_stop(void *);
4054 static int		gem_m_setpromisc(void *, boolean_t);
4055 static int		gem_m_multicst(void *, boolean_t, const uint8_t *);
4056 static int		gem_m_unicst(void *, const uint8_t *);
4057 static mblk_t		*gem_m_tx(void *, mblk_t *);
4058 static void		gem_m_ioctl(void *, queue_t *, mblk_t *);
4059 static boolean_t	gem_m_getcapab(void *, mac_capab_t, void *);
4060 
4061 #define	GEM_M_CALLBACK_FLAGS	(MC_IOCTL | MC_GETCAPAB)
4062 
4063 static mac_callbacks_t gem_m_callbacks = {
4064 	GEM_M_CALLBACK_FLAGS,
4065 	gem_m_getstat,
4066 	gem_m_start,
4067 	gem_m_stop,
4068 	gem_m_setpromisc,
4069 	gem_m_multicst,
4070 	gem_m_unicst,
4071 	gem_m_tx,
4072 	gem_m_ioctl,
4073 	gem_m_getcapab,
4074 };
4075 
4076 static int
4077 gem_m_start(void *arg)
4078 {
4079 	int		err = 0;
4080 	struct gem_dev *dp = arg;
4081 
4082 	DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4083 
4084 	mutex_enter(&dp->intrlock);
4085 	if (dp->mac_suspended) {
4086 		err = EIO;
4087 		goto x;
4088 	}
4089 	if (gem_mac_init(dp) != GEM_SUCCESS) {
4090 		err = EIO;
4091 		goto x;
4092 	}
4093 	dp->nic_state = NIC_STATE_INITIALIZED;
4094 
4095 	/* reset rx filter state */
4096 	dp->mc_count = 0;
4097 	dp->mc_count_req = 0;
4098 
4099 	/* setup media mode if the link have been up */
4100 	if (dp->mii_state == MII_STATE_LINKUP) {
4101 		(dp->gc.gc_set_media)(dp);
4102 	}
4103 
4104 	/* setup initial rx filter */
4105 	bcopy(dp->dev_addr.ether_addr_octet,
4106 	    dp->cur_addr.ether_addr_octet, ETHERADDRL);
4107 	dp->rxmode |= RXMODE_ENABLE;
4108 
4109 	if (gem_mac_set_rx_filter(dp) != GEM_SUCCESS) {
4110 		err = EIO;
4111 		goto x;
4112 	}
4113 
4114 	dp->nic_state = NIC_STATE_ONLINE;
4115 	if (dp->mii_state == MII_STATE_LINKUP) {
4116 		if (gem_mac_start(dp) != GEM_SUCCESS) {
4117 			err = EIO;
4118 			goto x;
4119 		}
4120 	}
4121 
4122 	dp->timeout_id = timeout((void (*)(void *))gem_tx_timeout,
4123 	    (void *)dp, dp->gc.gc_tx_timeout_interval);
4124 	mutex_exit(&dp->intrlock);
4125 
4126 	return (0);
4127 x:
4128 	dp->nic_state = NIC_STATE_STOPPED;
4129 	mutex_exit(&dp->intrlock);
4130 	return (err);
4131 }
4132 
4133 static void
4134 gem_m_stop(void *arg)
4135 {
4136 	struct gem_dev	*dp = arg;
4137 
4138 	DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4139 
4140 	/* stop rx */
4141 	mutex_enter(&dp->intrlock);
4142 	if (dp->mac_suspended) {
4143 		mutex_exit(&dp->intrlock);
4144 		return;
4145 	}
4146 	dp->rxmode &= ~RXMODE_ENABLE;
4147 	(void) gem_mac_set_rx_filter(dp);
4148 	mutex_exit(&dp->intrlock);
4149 
4150 	/* stop tx timeout watcher */
4151 	if (dp->timeout_id) {
4152 		while (untimeout(dp->timeout_id) == -1)
4153 			;
4154 		dp->timeout_id = 0;
4155 	}
4156 
4157 	/* make the nic state inactive */
4158 	mutex_enter(&dp->intrlock);
4159 	if (dp->mac_suspended) {
4160 		mutex_exit(&dp->intrlock);
4161 		return;
4162 	}
4163 	dp->nic_state = NIC_STATE_STOPPED;
4164 
4165 	/* we need deassert mac_active due to block interrupt handler */
4166 	mutex_enter(&dp->xmitlock);
4167 	dp->mac_active = B_FALSE;
4168 	mutex_exit(&dp->xmitlock);
4169 
4170 	/* block interrupts */
4171 	while (dp->intr_busy) {
4172 		cv_wait(&dp->tx_drain_cv, &dp->intrlock);
4173 	}
4174 	(void) gem_mac_stop(dp, 0);
4175 	mutex_exit(&dp->intrlock);
4176 }
4177 
4178 static int
4179 gem_m_multicst(void *arg, boolean_t add, const uint8_t *ep)
4180 {
4181 	int		err;
4182 	int		ret;
4183 	struct gem_dev	*dp = arg;
4184 
4185 	DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4186 
4187 	if (add) {
4188 		ret = gem_add_multicast(dp, ep);
4189 	} else {
4190 		ret = gem_remove_multicast(dp, ep);
4191 	}
4192 
4193 	err = 0;
4194 	if (ret != GEM_SUCCESS) {
4195 		err = EIO;
4196 	}
4197 
4198 	return (err);
4199 }
4200 
4201 static int
4202 gem_m_setpromisc(void *arg, boolean_t on)
4203 {
4204 	int		err = 0;	/* no error */
4205 	struct gem_dev	*dp = arg;
4206 
4207 	DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4208 
4209 	mutex_enter(&dp->intrlock);
4210 	if (dp->mac_suspended) {
4211 		mutex_exit(&dp->intrlock);
4212 		return (EIO);
4213 	}
4214 	if (on) {
4215 		dp->rxmode |= RXMODE_PROMISC;
4216 	} else {
4217 		dp->rxmode &= ~RXMODE_PROMISC;
4218 	}
4219 
4220 	if (gem_mac_set_rx_filter(dp) != GEM_SUCCESS) {
4221 		err = EIO;
4222 	}
4223 	mutex_exit(&dp->intrlock);
4224 
4225 	return (err);
4226 }
4227 
4228 int
4229 gem_m_getstat(void *arg, uint_t stat, uint64_t *valp)
4230 {
4231 	struct gem_dev		*dp = arg;
4232 	struct gem_stats	*gstp = &dp->stats;
4233 	uint64_t		val = 0;
4234 
4235 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4236 
4237 	if (mutex_owned(&dp->intrlock)) {
4238 		if (dp->mac_suspended) {
4239 			return (EIO);
4240 		}
4241 	} else {
4242 		mutex_enter(&dp->intrlock);
4243 		if (dp->mac_suspended) {
4244 			mutex_exit(&dp->intrlock);
4245 			return (EIO);
4246 		}
4247 		mutex_exit(&dp->intrlock);
4248 	}
4249 
4250 	if ((*dp->gc.gc_get_stats)(dp) != GEM_SUCCESS) {
4251 		return (EIO);
4252 	}
4253 
4254 	switch (stat) {
4255 	case MAC_STAT_IFSPEED:
4256 		val = gem_speed_value[dp->speed] *1000000ull;
4257 		break;
4258 
4259 	case MAC_STAT_MULTIRCV:
4260 		val = gstp->rmcast;
4261 		break;
4262 
4263 	case MAC_STAT_BRDCSTRCV:
4264 		val = gstp->rbcast;
4265 		break;
4266 
4267 	case MAC_STAT_MULTIXMT:
4268 		val = gstp->omcast;
4269 		break;
4270 
4271 	case MAC_STAT_BRDCSTXMT:
4272 		val = gstp->obcast;
4273 		break;
4274 
4275 	case MAC_STAT_NORCVBUF:
4276 		val = gstp->norcvbuf + gstp->missed;
4277 		break;
4278 
4279 	case MAC_STAT_IERRORS:
4280 		val = gstp->errrcv;
4281 		break;
4282 
4283 	case MAC_STAT_NOXMTBUF:
4284 		val = gstp->noxmtbuf;
4285 		break;
4286 
4287 	case MAC_STAT_OERRORS:
4288 		val = gstp->errxmt;
4289 		break;
4290 
4291 	case MAC_STAT_COLLISIONS:
4292 		val = gstp->collisions;
4293 		break;
4294 
4295 	case MAC_STAT_RBYTES:
4296 		val = gstp->rbytes;
4297 		break;
4298 
4299 	case MAC_STAT_IPACKETS:
4300 		val = gstp->rpackets;
4301 		break;
4302 
4303 	case MAC_STAT_OBYTES:
4304 		val = gstp->obytes;
4305 		break;
4306 
4307 	case MAC_STAT_OPACKETS:
4308 		val = gstp->opackets;
4309 		break;
4310 
4311 	case MAC_STAT_UNDERFLOWS:
4312 		val = gstp->underflow;
4313 		break;
4314 
4315 	case MAC_STAT_OVERFLOWS:
4316 		val = gstp->overflow;
4317 		break;
4318 
4319 	case ETHER_STAT_ALIGN_ERRORS:
4320 		val = gstp->frame;
4321 		break;
4322 
4323 	case ETHER_STAT_FCS_ERRORS:
4324 		val = gstp->crc;
4325 		break;
4326 
4327 	case ETHER_STAT_FIRST_COLLISIONS:
4328 		val = gstp->first_coll;
4329 		break;
4330 
4331 	case ETHER_STAT_MULTI_COLLISIONS:
4332 		val = gstp->multi_coll;
4333 		break;
4334 
4335 	case ETHER_STAT_SQE_ERRORS:
4336 		val = gstp->sqe;
4337 		break;
4338 
4339 	case ETHER_STAT_DEFER_XMTS:
4340 		val = gstp->defer;
4341 		break;
4342 
4343 	case ETHER_STAT_TX_LATE_COLLISIONS:
4344 		val = gstp->xmtlatecoll;
4345 		break;
4346 
4347 	case ETHER_STAT_EX_COLLISIONS:
4348 		val = gstp->excoll;
4349 		break;
4350 
4351 	case ETHER_STAT_MACXMT_ERRORS:
4352 		val = gstp->xmit_internal_err;
4353 		break;
4354 
4355 	case ETHER_STAT_CARRIER_ERRORS:
4356 		val = gstp->nocarrier;
4357 		break;
4358 
4359 	case ETHER_STAT_TOOLONG_ERRORS:
4360 		val = gstp->frame_too_long;
4361 		break;
4362 
4363 	case ETHER_STAT_MACRCV_ERRORS:
4364 		val = gstp->rcv_internal_err;
4365 		break;
4366 
4367 	case ETHER_STAT_XCVR_ADDR:
4368 		val = dp->mii_phy_addr;
4369 		break;
4370 
4371 	case ETHER_STAT_XCVR_ID:
4372 		val = dp->mii_phy_id;
4373 		break;
4374 
4375 	case ETHER_STAT_XCVR_INUSE:
4376 		val = gem_mac_xcvr_inuse(dp);
4377 		break;
4378 
4379 	case ETHER_STAT_CAP_1000FDX:
4380 		val = (dp->mii_xstatus & MII_XSTATUS_1000BASET_FD) ||
4381 		    (dp->mii_xstatus & MII_XSTATUS_1000BASEX_FD);
4382 		break;
4383 
4384 	case ETHER_STAT_CAP_1000HDX:
4385 		val = (dp->mii_xstatus & MII_XSTATUS_1000BASET) ||
4386 		    (dp->mii_xstatus & MII_XSTATUS_1000BASEX);
4387 		break;
4388 
4389 	case ETHER_STAT_CAP_100FDX:
4390 		val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX_FD);
4391 		break;
4392 
4393 	case ETHER_STAT_CAP_100HDX:
4394 		val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX);
4395 		break;
4396 
4397 	case ETHER_STAT_CAP_10FDX:
4398 		val = BOOLEAN(dp->mii_status & MII_STATUS_10_FD);
4399 		break;
4400 
4401 	case ETHER_STAT_CAP_10HDX:
4402 		val = BOOLEAN(dp->mii_status & MII_STATUS_10);
4403 		break;
4404 
4405 	case ETHER_STAT_CAP_ASMPAUSE:
4406 		val = BOOLEAN(dp->gc.gc_flow_control & 2);
4407 		break;
4408 
4409 	case ETHER_STAT_CAP_PAUSE:
4410 		val = BOOLEAN(dp->gc.gc_flow_control & 1);
4411 		break;
4412 
4413 	case ETHER_STAT_CAP_AUTONEG:
4414 		val = BOOLEAN(dp->mii_status & MII_STATUS_CANAUTONEG);
4415 		break;
4416 
4417 	case ETHER_STAT_ADV_CAP_1000FDX:
4418 		val = dp->anadv_1000fdx;
4419 		break;
4420 
4421 	case ETHER_STAT_ADV_CAP_1000HDX:
4422 		val = dp->anadv_1000hdx;
4423 		break;
4424 
4425 	case ETHER_STAT_ADV_CAP_100FDX:
4426 		val = dp->anadv_100fdx;
4427 		break;
4428 
4429 	case ETHER_STAT_ADV_CAP_100HDX:
4430 		val = dp->anadv_100hdx;
4431 		break;
4432 
4433 	case ETHER_STAT_ADV_CAP_10FDX:
4434 		val = dp->anadv_10fdx;
4435 		break;
4436 
4437 	case ETHER_STAT_ADV_CAP_10HDX:
4438 		val = dp->anadv_10hdx;
4439 		break;
4440 
4441 	case ETHER_STAT_ADV_CAP_ASMPAUSE:
4442 		val = BOOLEAN(dp->anadv_flow_control & 2);
4443 		break;
4444 
4445 	case ETHER_STAT_ADV_CAP_PAUSE:
4446 		val = BOOLEAN(dp->anadv_flow_control & 1);
4447 		break;
4448 
4449 	case ETHER_STAT_ADV_CAP_AUTONEG:
4450 		val = dp->anadv_autoneg;
4451 		break;
4452 
4453 	case ETHER_STAT_LP_CAP_1000FDX:
4454 		val = BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_FULL);
4455 		break;
4456 
4457 	case ETHER_STAT_LP_CAP_1000HDX:
4458 		val = BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_HALF);
4459 		break;
4460 
4461 	case ETHER_STAT_LP_CAP_100FDX:
4462 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX_FD);
4463 		break;
4464 
4465 	case ETHER_STAT_LP_CAP_100HDX:
4466 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX);
4467 		break;
4468 
4469 	case ETHER_STAT_LP_CAP_10FDX:
4470 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T_FD);
4471 		break;
4472 
4473 	case ETHER_STAT_LP_CAP_10HDX:
4474 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T);
4475 		break;
4476 
4477 	case ETHER_STAT_LP_CAP_ASMPAUSE:
4478 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_ASM_DIR);
4479 		break;
4480 
4481 	case ETHER_STAT_LP_CAP_PAUSE:
4482 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_PAUSE);
4483 		break;
4484 
4485 	case ETHER_STAT_LP_CAP_AUTONEG:
4486 		val = BOOLEAN(dp->mii_exp & MII_AN_EXP_LPCANAN);
4487 		break;
4488 
4489 	case ETHER_STAT_LINK_ASMPAUSE:
4490 		val = BOOLEAN(dp->flow_control & 2);
4491 		break;
4492 
4493 	case ETHER_STAT_LINK_PAUSE:
4494 		val = BOOLEAN(dp->flow_control & 1);
4495 		break;
4496 
4497 	case ETHER_STAT_LINK_AUTONEG:
4498 		val = dp->anadv_autoneg &&
4499 		    BOOLEAN(dp->mii_exp & MII_AN_EXP_LPCANAN);
4500 		break;
4501 
4502 	case ETHER_STAT_LINK_DUPLEX:
4503 		val = (dp->mii_state == MII_STATE_LINKUP) ?
4504 		    (dp->full_duplex ? 2 : 1) : 0;
4505 		break;
4506 
4507 	case ETHER_STAT_TOOSHORT_ERRORS:
4508 		val = gstp->runt;
4509 		break;
4510 	case ETHER_STAT_LP_REMFAULT:
4511 		val = BOOLEAN(dp->mii_lpable & MII_AN_ADVERT_REMFAULT);
4512 		break;
4513 
4514 	case ETHER_STAT_JABBER_ERRORS:
4515 		val = gstp->jabber;
4516 		break;
4517 
4518 	case ETHER_STAT_CAP_100T4:
4519 		val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASE_T4);
4520 		break;
4521 
4522 	case ETHER_STAT_ADV_CAP_100T4:
4523 		val = dp->anadv_100t4;
4524 		break;
4525 
4526 	case ETHER_STAT_LP_CAP_100T4:
4527 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_T4);
4528 		break;
4529 
4530 	default:
4531 #if GEM_DEBUG_LEVEL > 2
4532 		cmn_err(CE_WARN,
4533 		    "%s: unrecognized parameter value = %d",
4534 		    __func__, stat);
4535 #endif
4536 		return (ENOTSUP);
4537 	}
4538 
4539 	*valp = val;
4540 
4541 	return (0);
4542 }
4543 
4544 static int
4545 gem_m_unicst(void *arg, const uint8_t *mac)
4546 {
4547 	int		err = 0;
4548 	struct gem_dev	*dp = arg;
4549 
4550 	DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4551 
4552 	mutex_enter(&dp->intrlock);
4553 	if (dp->mac_suspended) {
4554 		mutex_exit(&dp->intrlock);
4555 		return (EIO);
4556 	}
4557 	bcopy(mac, dp->cur_addr.ether_addr_octet, ETHERADDRL);
4558 	dp->rxmode |= RXMODE_ENABLE;
4559 
4560 	if (gem_mac_set_rx_filter(dp) != GEM_SUCCESS) {
4561 		err = EIO;
4562 	}
4563 	mutex_exit(&dp->intrlock);
4564 
4565 	return (err);
4566 }
4567 
4568 /*
4569  * gem_m_tx is used only for sending data packets into ethernet wire.
4570  */
4571 static mblk_t *
4572 gem_m_tx(void *arg, mblk_t *mp)
4573 {
4574 	uint32_t	flags = 0;
4575 	struct gem_dev	*dp = arg;
4576 	mblk_t		*tp;
4577 
4578 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4579 
4580 	ASSERT(dp->nic_state == NIC_STATE_ONLINE);
4581 	if (dp->mii_state != MII_STATE_LINKUP) {
4582 		/* Some nics hate to send packets when the link is down. */
4583 		while (mp) {
4584 			tp = mp->b_next;
4585 			mp->b_next = NULL;
4586 			freemsg(mp);
4587 			mp = tp;
4588 		}
4589 		return (NULL);
4590 	}
4591 
4592 	return (gem_send_common(dp, mp, flags));
4593 }
4594 
4595 static void
4596 gem_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
4597 {
4598 	DPRINTF(0, (CE_CONT, "!%s: %s: called",
4599 	    ((struct gem_dev *)arg)->name, __func__));
4600 
4601 	gem_mac_ioctl((struct gem_dev *)arg, wq, mp);
4602 }
4603 
4604 /* ARGSUSED */
4605 static boolean_t
4606 gem_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
4607 {
4608 	return (B_FALSE);
4609 }
4610 
4611 static void
4612 gem_gld3_init(struct gem_dev *dp, mac_register_t *macp)
4613 {
4614 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
4615 	macp->m_driver = dp;
4616 	macp->m_dip = dp->dip;
4617 	macp->m_src_addr = dp->dev_addr.ether_addr_octet;
4618 	macp->m_callbacks = &gem_m_callbacks;
4619 	macp->m_min_sdu = 0;
4620 	macp->m_max_sdu = dp->mtu;
4621 
4622 	if (dp->misc_flag & GEM_VLAN) {
4623 		macp->m_margin = VTAG_SIZE;
4624 	}
4625 }
4626 
4627 /* ======================================================================== */
4628 /*
4629  * attach/detatch support
4630  */
4631 /* ======================================================================== */
4632 static void
4633 gem_read_conf(struct gem_dev *dp)
4634 {
4635 	int	val;
4636 
4637 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4638 
4639 	/*
4640 	 * Get media mode infomation from .conf file
4641 	 */
4642 	dp->anadv_autoneg = gem_prop_get_int(dp, "adv_autoneg_cap", 1) != 0;
4643 	dp->anadv_1000fdx = gem_prop_get_int(dp, "adv_1000fdx_cap", 1) != 0;
4644 	dp->anadv_1000hdx = gem_prop_get_int(dp, "adv_1000hdx_cap", 1) != 0;
4645 	dp->anadv_100t4   = gem_prop_get_int(dp, "adv_100T4_cap", 1) != 0;
4646 	dp->anadv_100fdx  = gem_prop_get_int(dp, "adv_100fdx_cap", 1) != 0;
4647 	dp->anadv_100hdx  = gem_prop_get_int(dp, "adv_100hdx_cap", 1) != 0;
4648 	dp->anadv_10fdx   = gem_prop_get_int(dp, "adv_10fdx_cap", 1) != 0;
4649 	dp->anadv_10hdx   = gem_prop_get_int(dp, "adv_10hdx_cap", 1) != 0;
4650 
4651 	if ((ddi_prop_exists(DDI_DEV_T_ANY, dp->dip,
4652 	    DDI_PROP_DONTPASS, "full-duplex"))) {
4653 		dp->full_duplex = gem_prop_get_int(dp, "full-duplex", 1) != 0;
4654 		dp->anadv_autoneg = B_FALSE;
4655 		if (dp->full_duplex) {
4656 			dp->anadv_1000hdx = B_FALSE;
4657 			dp->anadv_100hdx = B_FALSE;
4658 			dp->anadv_10hdx = B_FALSE;
4659 		} else {
4660 			dp->anadv_1000fdx = B_FALSE;
4661 			dp->anadv_100fdx = B_FALSE;
4662 			dp->anadv_10fdx = B_FALSE;
4663 		}
4664 	}
4665 
4666 	if ((val = gem_prop_get_int(dp, "speed", 0)) > 0) {
4667 		dp->anadv_autoneg = B_FALSE;
4668 		switch (val) {
4669 		case 1000:
4670 			dp->speed = GEM_SPD_1000;
4671 			dp->anadv_100t4   = B_FALSE;
4672 			dp->anadv_100fdx  = B_FALSE;
4673 			dp->anadv_100hdx  = B_FALSE;
4674 			dp->anadv_10fdx   = B_FALSE;
4675 			dp->anadv_10hdx   = B_FALSE;
4676 			break;
4677 		case 100:
4678 			dp->speed = GEM_SPD_100;
4679 			dp->anadv_1000fdx = B_FALSE;
4680 			dp->anadv_1000hdx = B_FALSE;
4681 			dp->anadv_10fdx   = B_FALSE;
4682 			dp->anadv_10hdx   = B_FALSE;
4683 			break;
4684 		case 10:
4685 			dp->speed = GEM_SPD_10;
4686 			dp->anadv_1000fdx = B_FALSE;
4687 			dp->anadv_1000hdx = B_FALSE;
4688 			dp->anadv_100t4   = B_FALSE;
4689 			dp->anadv_100fdx  = B_FALSE;
4690 			dp->anadv_100hdx  = B_FALSE;
4691 			break;
4692 		default:
4693 			cmn_err(CE_WARN,
4694 			    "!%s: property %s: illegal value:%d",
4695 			    dp->name, "speed", val);
4696 			dp->anadv_autoneg = B_TRUE;
4697 			break;
4698 		}
4699 	}
4700 
4701 	val = gem_prop_get_int(dp, "flow-control", dp->gc.gc_flow_control);
4702 	if (val > FLOW_CONTROL_RX_PAUSE || val < FLOW_CONTROL_NONE) {
4703 		cmn_err(CE_WARN,
4704 		    "!%s: property %s: illegal value:%d",
4705 		    dp->name, "flow-control", val);
4706 	} else {
4707 		val = min(val, dp->gc.gc_flow_control);
4708 	}
4709 	dp->anadv_flow_control = val;
4710 
4711 	if (gem_prop_get_int(dp, "nointr", 0)) {
4712 		dp->misc_flag |= GEM_NOINTR;
4713 		cmn_err(CE_NOTE, "!%s: polling mode enabled", dp->name);
4714 	}
4715 
4716 	dp->mtu = gem_prop_get_int(dp, "mtu", dp->mtu);
4717 	dp->txthr = gem_prop_get_int(dp, "txthr", dp->txthr);
4718 	dp->rxthr = gem_prop_get_int(dp, "rxthr", dp->rxthr);
4719 	dp->txmaxdma = gem_prop_get_int(dp, "txmaxdma", dp->txmaxdma);
4720 	dp->rxmaxdma = gem_prop_get_int(dp, "rxmaxdma", dp->rxmaxdma);
4721 }
4722 
4723 
4724 /*
4725  * Gem kstat support
4726  */
4727 
4728 #define	GEM_LOCAL_DATA_SIZE(gc)	\
4729 	(sizeof (struct gem_dev) + \
4730 	sizeof (struct mcast_addr) * GEM_MAXMC + \
4731 	sizeof (struct txbuf) * ((gc)->gc_tx_buf_size) + \
4732 	sizeof (void *) * ((gc)->gc_tx_buf_size))
4733 
4734 struct gem_dev *
4735 gem_do_attach(dev_info_t *dip, int port,
4736 	struct gem_conf *gc, void *base, ddi_acc_handle_t *regs_handlep,
4737 	void *lp, int lmsize)
4738 {
4739 	struct gem_dev		*dp;
4740 	int			i;
4741 	ddi_iblock_cookie_t	c;
4742 	mac_register_t		*macp = NULL;
4743 	int			ret;
4744 	int			unit;
4745 	int			nports;
4746 
4747 	unit = ddi_get_instance(dip);
4748 	if ((nports = gc->gc_nports) == 0) {
4749 		nports = 1;
4750 	}
4751 	if (nports == 1) {
4752 		ddi_set_driver_private(dip, NULL);
4753 	}
4754 
4755 	DPRINTF(2, (CE_CONT, "!gem%d: gem_do_attach: called cmd:ATTACH",
4756 	    unit));
4757 
4758 	/*
4759 	 * Allocate soft data structure
4760 	 */
4761 	dp = kmem_zalloc(GEM_LOCAL_DATA_SIZE(gc), KM_SLEEP);
4762 
4763 	if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
4764 		cmn_err(CE_WARN, "!gem%d: %s: mac_alloc failed",
4765 		    unit, __func__);
4766 		return (NULL);
4767 	}
4768 	/* ddi_set_driver_private(dip, dp); */
4769 
4770 	/* link to private area */
4771 	dp->private = lp;
4772 	dp->priv_size = lmsize;
4773 	dp->mc_list = (struct mcast_addr *)&dp[1];
4774 
4775 	dp->dip = dip;
4776 	(void) sprintf(dp->name, gc->gc_name, nports * unit + port);
4777 
4778 	/*
4779 	 * Get iblock cookie
4780 	 */
4781 	if (ddi_get_iblock_cookie(dip, 0, &c) != DDI_SUCCESS) {
4782 		cmn_err(CE_CONT,
4783 		    "!%s: gem_do_attach: ddi_get_iblock_cookie: failed",
4784 		    dp->name);
4785 		goto err_free_private;
4786 	}
4787 	dp->iblock_cookie = c;
4788 
4789 	/*
4790 	 * Initialize mutex's for this device.
4791 	 */
4792 	mutex_init(&dp->intrlock, NULL, MUTEX_DRIVER, (void *)c);
4793 	mutex_init(&dp->xmitlock, NULL, MUTEX_DRIVER, (void *)c);
4794 	cv_init(&dp->tx_drain_cv, NULL, CV_DRIVER, NULL);
4795 
4796 	/*
4797 	 * configure gem parameter
4798 	 */
4799 	dp->base_addr = base;
4800 	dp->regs_handle = *regs_handlep;
4801 	dp->gc = *gc;
4802 	gc = &dp->gc;
4803 	/* patch for simplify dma resource management */
4804 	gc->gc_tx_max_frags = 1;
4805 	gc->gc_tx_max_descs_per_pkt = 1;
4806 	gc->gc_tx_ring_size = gc->gc_tx_buf_size;
4807 	gc->gc_tx_ring_limit = gc->gc_tx_buf_limit;
4808 	gc->gc_tx_desc_write_oo = B_TRUE;
4809 
4810 	gc->gc_nports = nports;	/* fix nports */
4811 
4812 	/* fix copy threadsholds */
4813 	gc->gc_tx_copy_thresh = max(ETHERMIN, gc->gc_tx_copy_thresh);
4814 	gc->gc_rx_copy_thresh = max(ETHERMIN, gc->gc_rx_copy_thresh);
4815 
4816 	/* fix rx buffer boundary for iocache line size */
4817 	ASSERT(gc->gc_dma_attr_txbuf.dma_attr_align-1 == gc->gc_tx_buf_align);
4818 	ASSERT(gc->gc_dma_attr_rxbuf.dma_attr_align-1 == gc->gc_rx_buf_align);
4819 	gc->gc_rx_buf_align = max(gc->gc_rx_buf_align, IOC_LINESIZE - 1);
4820 	gc->gc_dma_attr_rxbuf.dma_attr_align = gc->gc_rx_buf_align + 1;
4821 
4822 	/* fix descriptor boundary for cache line size */
4823 	gc->gc_dma_attr_desc.dma_attr_align =
4824 	    max(gc->gc_dma_attr_desc.dma_attr_align, IOC_LINESIZE);
4825 
4826 	/* patch get_packet method */
4827 	if (gc->gc_get_packet == NULL) {
4828 		gc->gc_get_packet = &gem_get_packet_default;
4829 	}
4830 
4831 	/* patch get_rx_start method */
4832 	if (gc->gc_rx_start == NULL) {
4833 		gc->gc_rx_start = &gem_rx_start_default;
4834 	}
4835 
4836 	/* calculate descriptor area */
4837 	if (gc->gc_rx_desc_unit_shift >= 0) {
4838 		dp->rx_desc_size =
4839 		    ROUNDUP(gc->gc_rx_ring_size << gc->gc_rx_desc_unit_shift,
4840 		    gc->gc_dma_attr_desc.dma_attr_align);
4841 	}
4842 	if (gc->gc_tx_desc_unit_shift >= 0) {
4843 		dp->tx_desc_size =
4844 		    ROUNDUP(gc->gc_tx_ring_size << gc->gc_tx_desc_unit_shift,
4845 		    gc->gc_dma_attr_desc.dma_attr_align);
4846 	}
4847 
4848 	dp->mtu = ETHERMTU;
4849 	dp->tx_buf = (void *)&dp->mc_list[GEM_MAXMC];
4850 	/* link tx buffers */
4851 	for (i = 0; i < dp->gc.gc_tx_buf_size; i++) {
4852 		dp->tx_buf[i].txb_next =
4853 		    &dp->tx_buf[SLOT(i + 1, dp->gc.gc_tx_buf_size)];
4854 	}
4855 
4856 	dp->rxmode	   = 0;
4857 	dp->speed	   = GEM_SPD_10;	/* default is 10Mbps */
4858 	dp->full_duplex    = B_FALSE;		/* default is half */
4859 	dp->flow_control   = FLOW_CONTROL_NONE;
4860 	dp->poll_pkt_delay = 8;		/* typical coalease for rx packets */
4861 
4862 	/* performance tuning parameters */
4863 	dp->txthr    = ETHERMAX;	/* tx fifo threshold */
4864 	dp->txmaxdma = 16*4;		/* tx max dma burst size */
4865 	dp->rxthr    = 128;		/* rx fifo threshold */
4866 	dp->rxmaxdma = 16*4;		/* rx max dma burst size */
4867 
4868 	/*
4869 	 * Get media mode information from .conf file
4870 	 */
4871 	gem_read_conf(dp);
4872 
4873 	/* rx_buf_len is required buffer length without padding for alignment */
4874 	dp->rx_buf_len = MAXPKTBUF(dp) + dp->gc.gc_rx_header_len;
4875 
4876 	/*
4877 	 * Reset the chip
4878 	 */
4879 	mutex_enter(&dp->intrlock);
4880 	dp->nic_state = NIC_STATE_STOPPED;
4881 	ret = (*dp->gc.gc_reset_chip)(dp);
4882 	mutex_exit(&dp->intrlock);
4883 	if (ret != GEM_SUCCESS) {
4884 		goto err_free_regs;
4885 	}
4886 
4887 	/*
4888 	 * HW dependant paremeter initialization
4889 	 */
4890 	mutex_enter(&dp->intrlock);
4891 	ret = (*dp->gc.gc_attach_chip)(dp);
4892 	mutex_exit(&dp->intrlock);
4893 	if (ret != GEM_SUCCESS) {
4894 		goto err_free_regs;
4895 	}
4896 
4897 #ifdef DEBUG_MULTIFRAGS
4898 	dp->gc.gc_tx_copy_thresh = dp->mtu;
4899 #endif
4900 	/* allocate tx and rx resources */
4901 	if (gem_alloc_memory(dp)) {
4902 		goto err_free_regs;
4903 	}
4904 
4905 	DPRINTF(0, (CE_CONT,
4906 	    "!%s: at 0x%x, %02x:%02x:%02x:%02x:%02x:%02x",
4907 	    dp->name, (long)dp->base_addr,
4908 	    dp->dev_addr.ether_addr_octet[0],
4909 	    dp->dev_addr.ether_addr_octet[1],
4910 	    dp->dev_addr.ether_addr_octet[2],
4911 	    dp->dev_addr.ether_addr_octet[3],
4912 	    dp->dev_addr.ether_addr_octet[4],
4913 	    dp->dev_addr.ether_addr_octet[5]));
4914 
4915 	/* copy mac address */
4916 	dp->cur_addr = dp->dev_addr;
4917 
4918 	gem_gld3_init(dp, macp);
4919 
4920 	/* Probe MII phy (scan phy) */
4921 	dp->mii_lpable = 0;
4922 	dp->mii_advert = 0;
4923 	dp->mii_exp = 0;
4924 	dp->mii_ctl1000 = 0;
4925 	dp->mii_stat1000 = 0;
4926 	if ((*dp->gc.gc_mii_probe)(dp) != GEM_SUCCESS) {
4927 		goto err_free_ring;
4928 	}
4929 
4930 	/* mask unsupported abilities */
4931 	dp->anadv_autoneg &= BOOLEAN(dp->mii_status & MII_STATUS_CANAUTONEG);
4932 	dp->anadv_1000fdx &=
4933 	    BOOLEAN(dp->mii_xstatus &
4934 	    (MII_XSTATUS_1000BASEX_FD | MII_XSTATUS_1000BASET_FD));
4935 	dp->anadv_1000hdx &=
4936 	    BOOLEAN(dp->mii_xstatus &
4937 	    (MII_XSTATUS_1000BASEX | MII_XSTATUS_1000BASET));
4938 	dp->anadv_100t4  &= BOOLEAN(dp->mii_status & MII_STATUS_100_BASE_T4);
4939 	dp->anadv_100fdx &= BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX_FD);
4940 	dp->anadv_100hdx &= BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX);
4941 	dp->anadv_10fdx  &= BOOLEAN(dp->mii_status & MII_STATUS_10_FD);
4942 	dp->anadv_10hdx  &= BOOLEAN(dp->mii_status & MII_STATUS_10);
4943 
4944 	gem_choose_forcedmode(dp);
4945 
4946 	/* initialize MII phy if required */
4947 	if (dp->gc.gc_mii_init) {
4948 		if ((*dp->gc.gc_mii_init)(dp) != GEM_SUCCESS) {
4949 			goto err_free_ring;
4950 		}
4951 	}
4952 
4953 	/*
4954 	 * initialize kstats including mii statistics
4955 	 */
4956 	gem_nd_setup(dp);
4957 
4958 	/*
4959 	 * Add interrupt to system.
4960 	 */
4961 	if (ret = mac_register(macp, &dp->mh)) {
4962 		cmn_err(CE_WARN, "!%s: mac_register failed, error:%d",
4963 		    dp->name, ret);
4964 		goto err_release_stats;
4965 	}
4966 	mac_free(macp);
4967 	macp = NULL;
4968 
4969 	if (dp->misc_flag & GEM_SOFTINTR) {
4970 		if (ddi_add_softintr(dip,
4971 		    DDI_SOFTINT_LOW, &dp->soft_id,
4972 		    NULL, NULL,
4973 		    (uint_t (*)(caddr_t))gem_intr,
4974 		    (caddr_t)dp) != DDI_SUCCESS) {
4975 			cmn_err(CE_WARN, "!%s: ddi_add_softintr failed",
4976 			    dp->name);
4977 			goto err_unregister;
4978 		}
4979 	} else if ((dp->misc_flag & GEM_NOINTR) == 0) {
4980 		if (ddi_add_intr(dip, 0, NULL, NULL,
4981 		    (uint_t (*)(caddr_t))gem_intr,
4982 		    (caddr_t)dp) != DDI_SUCCESS) {
4983 			cmn_err(CE_WARN, "!%s: ddi_add_intr failed", dp->name);
4984 			goto err_unregister;
4985 		}
4986 	} else {
4987 		/*
4988 		 * Dont use interrupt.
4989 		 * schedule first call of gem_intr_watcher
4990 		 */
4991 		dp->intr_watcher_id =
4992 		    timeout((void (*)(void *))gem_intr_watcher,
4993 		    (void *)dp, drv_usectohz(3*1000000));
4994 	}
4995 
4996 	/* link this device to dev_info */
4997 	dp->next = (struct gem_dev *)ddi_get_driver_private(dip);
4998 	dp->port = port;
4999 	ddi_set_driver_private(dip, (caddr_t)dp);
5000 
5001 	/* reset mii phy and start mii link watcher */
5002 	gem_mii_start(dp);
5003 
5004 	DPRINTF(2, (CE_CONT, "!gem_do_attach: return: success"));
5005 	return (dp);
5006 
5007 err_unregister:
5008 	(void) mac_unregister(dp->mh);
5009 err_release_stats:
5010 	/* release NDD resources */
5011 	gem_nd_cleanup(dp);
5012 
5013 err_free_ring:
5014 	gem_free_memory(dp);
5015 err_free_regs:
5016 	ddi_regs_map_free(&dp->regs_handle);
5017 err_free_locks:
5018 	mutex_destroy(&dp->xmitlock);
5019 	mutex_destroy(&dp->intrlock);
5020 	cv_destroy(&dp->tx_drain_cv);
5021 err_free_private:
5022 	if (macp) {
5023 		mac_free(macp);
5024 	}
5025 	kmem_free((caddr_t)dp, GEM_LOCAL_DATA_SIZE(gc));
5026 
5027 	return (NULL);
5028 }
5029 
5030 int
5031 gem_do_detach(dev_info_t *dip)
5032 {
5033 	struct gem_dev	*dp;
5034 	struct gem_dev	*tmp;
5035 	caddr_t		private;
5036 	int		priv_size;
5037 	ddi_acc_handle_t	rh;
5038 
5039 	dp = GEM_GET_DEV(dip);
5040 	if (dp == NULL) {
5041 		return (DDI_SUCCESS);
5042 	}
5043 
5044 	rh = dp->regs_handle;
5045 	private = dp->private;
5046 	priv_size = dp->priv_size;
5047 
5048 	while (dp) {
5049 		/* unregister with gld v3 */
5050 		if (mac_unregister(dp->mh) != 0) {
5051 			return (DDI_FAILURE);
5052 		}
5053 
5054 		/* ensure any rx buffers are not used */
5055 		if (dp->rx_buf_allocated != dp->rx_buf_freecnt) {
5056 			/* resource is busy */
5057 			cmn_err(CE_PANIC,
5058 			    "!%s: %s: rxbuf is busy: allocated:%d, freecnt:%d",
5059 			    dp->name, __func__,
5060 			    dp->rx_buf_allocated, dp->rx_buf_freecnt);
5061 			/* NOT REACHED */
5062 		}
5063 
5064 		/* stop mii link watcher */
5065 		gem_mii_stop(dp);
5066 
5067 		/* unregister interrupt handler */
5068 		if (dp->misc_flag & GEM_SOFTINTR) {
5069 			ddi_remove_softintr(dp->soft_id);
5070 		} else if ((dp->misc_flag & GEM_NOINTR) == 0) {
5071 			ddi_remove_intr(dip, 0, dp->iblock_cookie);
5072 		} else {
5073 			/* stop interrupt watcher */
5074 			if (dp->intr_watcher_id) {
5075 				while (untimeout(dp->intr_watcher_id) == -1)
5076 					;
5077 				dp->intr_watcher_id = 0;
5078 			}
5079 		}
5080 
5081 		/* release NDD resources */
5082 		gem_nd_cleanup(dp);
5083 		/* release buffers, descriptors and dma resources */
5084 		gem_free_memory(dp);
5085 
5086 		/* release locks and condition variables */
5087 		mutex_destroy(&dp->xmitlock);
5088 		mutex_destroy(&dp->intrlock);
5089 		cv_destroy(&dp->tx_drain_cv);
5090 
5091 		/* release basic memory resources */
5092 		tmp = dp->next;
5093 		kmem_free((caddr_t)dp, GEM_LOCAL_DATA_SIZE(&dp->gc));
5094 		dp = tmp;
5095 	}
5096 
5097 	/* release common private memory for the nic */
5098 	kmem_free(private, priv_size);
5099 
5100 	/* release register mapping resources */
5101 	ddi_regs_map_free(&rh);
5102 
5103 	DPRINTF(2, (CE_CONT, "!%s%d: gem_do_detach: return: success",
5104 	    ddi_driver_name(dip), ddi_get_instance(dip)));
5105 
5106 	return (DDI_SUCCESS);
5107 }
5108 
5109 int
5110 gem_suspend(dev_info_t *dip)
5111 {
5112 	struct gem_dev	*dp;
5113 
5114 	/*
5115 	 * stop the device
5116 	 */
5117 	dp = GEM_GET_DEV(dip);
5118 	ASSERT(dp);
5119 
5120 	DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
5121 
5122 	for (; dp; dp = dp->next) {
5123 
5124 		/* stop mii link watcher */
5125 		gem_mii_stop(dp);
5126 
5127 		/* stop interrupt watcher for no-intr mode */
5128 		if (dp->misc_flag & GEM_NOINTR) {
5129 			if (dp->intr_watcher_id) {
5130 				while (untimeout(dp->intr_watcher_id) == -1)
5131 					;
5132 			}
5133 			dp->intr_watcher_id = 0;
5134 		}
5135 
5136 		/* stop tx timeout watcher */
5137 		if (dp->timeout_id) {
5138 			while (untimeout(dp->timeout_id) == -1)
5139 				;
5140 			dp->timeout_id = 0;
5141 		}
5142 
5143 		/* make the nic state inactive */
5144 		mutex_enter(&dp->intrlock);
5145 		(void) gem_mac_stop(dp, 0);
5146 		ASSERT(!dp->mac_active);
5147 
5148 		/* no further register access */
5149 		dp->mac_suspended = B_TRUE;
5150 		mutex_exit(&dp->intrlock);
5151 	}
5152 
5153 	/* XXX - power down the nic */
5154 
5155 	return (DDI_SUCCESS);
5156 }
5157 
5158 int
5159 gem_resume(dev_info_t *dip)
5160 {
5161 	struct gem_dev	*dp;
5162 
5163 	/*
5164 	 * restart the device
5165 	 */
5166 	dp = GEM_GET_DEV(dip);
5167 	ASSERT(dp);
5168 
5169 	DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
5170 
5171 	for (; dp; dp = dp->next) {
5172 
5173 		/*
5174 		 * Bring up the nic after power up
5175 		 */
5176 
5177 		/* gem_xxx.c layer to setup power management state. */
5178 		ASSERT(!dp->mac_active);
5179 
5180 		/* reset the chip, because we are just after power up. */
5181 		mutex_enter(&dp->intrlock);
5182 
5183 		dp->mac_suspended = B_FALSE;
5184 		dp->nic_state = NIC_STATE_STOPPED;
5185 
5186 		if ((*dp->gc.gc_reset_chip)(dp) != GEM_SUCCESS) {
5187 			cmn_err(CE_WARN, "%s: %s: failed to reset chip",
5188 			    dp->name, __func__);
5189 			mutex_exit(&dp->intrlock);
5190 			goto err;
5191 		}
5192 		mutex_exit(&dp->intrlock);
5193 
5194 		/* initialize mii phy because we are just after power up */
5195 		if (dp->gc.gc_mii_init) {
5196 			(void) (*dp->gc.gc_mii_init)(dp);
5197 		}
5198 
5199 		if (dp->misc_flag & GEM_NOINTR) {
5200 			/*
5201 			 * schedule first call of gem_intr_watcher
5202 			 * instead of interrupts.
5203 			 */
5204 			dp->intr_watcher_id =
5205 			    timeout((void (*)(void *))gem_intr_watcher,
5206 			    (void *)dp, drv_usectohz(3*1000000));
5207 		}
5208 
5209 		/* restart mii link watcher */
5210 		gem_mii_start(dp);
5211 
5212 		/* restart mac */
5213 		mutex_enter(&dp->intrlock);
5214 
5215 		if (gem_mac_init(dp) != GEM_SUCCESS) {
5216 			mutex_exit(&dp->intrlock);
5217 			goto err_reset;
5218 		}
5219 		dp->nic_state = NIC_STATE_INITIALIZED;
5220 
5221 		/* setup media mode if the link have been up */
5222 		if (dp->mii_state == MII_STATE_LINKUP) {
5223 			if ((dp->gc.gc_set_media)(dp) != GEM_SUCCESS) {
5224 				mutex_exit(&dp->intrlock);
5225 				goto err_reset;
5226 			}
5227 		}
5228 
5229 		/* enable mac address and rx filter */
5230 		dp->rxmode |= RXMODE_ENABLE;
5231 		if ((*dp->gc.gc_set_rx_filter)(dp) != GEM_SUCCESS) {
5232 			mutex_exit(&dp->intrlock);
5233 			goto err_reset;
5234 		}
5235 		dp->nic_state = NIC_STATE_ONLINE;
5236 
5237 		/* restart tx timeout watcher */
5238 		dp->timeout_id = timeout((void (*)(void *))gem_tx_timeout,
5239 		    (void *)dp,
5240 		    dp->gc.gc_tx_timeout_interval);
5241 
5242 		/* now the nic is fully functional */
5243 		if (dp->mii_state == MII_STATE_LINKUP) {
5244 			if (gem_mac_start(dp) != GEM_SUCCESS) {
5245 				mutex_exit(&dp->intrlock);
5246 				goto err_reset;
5247 			}
5248 		}
5249 		mutex_exit(&dp->intrlock);
5250 	}
5251 
5252 	return (DDI_SUCCESS);
5253 
5254 err_reset:
5255 	if (dp->intr_watcher_id) {
5256 		while (untimeout(dp->intr_watcher_id) == -1)
5257 			;
5258 		dp->intr_watcher_id = 0;
5259 	}
5260 	mutex_enter(&dp->intrlock);
5261 	(*dp->gc.gc_reset_chip)(dp);
5262 	dp->nic_state = NIC_STATE_STOPPED;
5263 	mutex_exit(&dp->intrlock);
5264 
5265 err:
5266 	return (DDI_FAILURE);
5267 }
5268 
5269 /*
5270  * misc routines for PCI
5271  */
5272 uint8_t
5273 gem_search_pci_cap(dev_info_t *dip,
5274 		ddi_acc_handle_t conf_handle, uint8_t target)
5275 {
5276 	uint8_t		pci_cap_ptr;
5277 	uint32_t	pci_cap;
5278 
5279 	/* search power management capablities */
5280 	pci_cap_ptr = pci_config_get8(conf_handle, PCI_CONF_CAP_PTR);
5281 	while (pci_cap_ptr) {
5282 		/* read pci capability header */
5283 		pci_cap = pci_config_get32(conf_handle, pci_cap_ptr);
5284 		if ((pci_cap & 0xff) == target) {
5285 			/* found */
5286 			break;
5287 		}
5288 		/* get next_ptr */
5289 		pci_cap_ptr = (pci_cap >> 8) & 0xff;
5290 	}
5291 	return (pci_cap_ptr);
5292 }
5293 
5294 int
5295 gem_pci_set_power_state(dev_info_t *dip,
5296 		ddi_acc_handle_t conf_handle, uint_t new_mode)
5297 {
5298 	uint8_t		pci_cap_ptr;
5299 	uint32_t	pmcsr;
5300 	uint_t		unit;
5301 	const char	*drv_name;
5302 
5303 	ASSERT(new_mode < 4);
5304 
5305 	unit = ddi_get_instance(dip);
5306 	drv_name = ddi_driver_name(dip);
5307 
5308 	/* search power management capablities */
5309 	pci_cap_ptr = gem_search_pci_cap(dip, conf_handle, PCI_CAP_ID_PM);
5310 
5311 	if (pci_cap_ptr == 0) {
5312 		cmn_err(CE_CONT,
5313 		    "!%s%d: doesn't have pci power management capability",
5314 		    drv_name, unit);
5315 		return (DDI_FAILURE);
5316 	}
5317 
5318 	/* read power management capabilities */
5319 	pmcsr = pci_config_get32(conf_handle, pci_cap_ptr + PCI_PMCSR);
5320 
5321 	DPRINTF(0, (CE_CONT,
5322 	    "!%s%d: pmc found at 0x%x: pmcsr: 0x%08x",
5323 	    drv_name, unit, pci_cap_ptr, pmcsr));
5324 
5325 	/*
5326 	 * Is the resuested power mode supported?
5327 	 */
5328 	/* not yet */
5329 
5330 	/*
5331 	 * move to new mode
5332 	 */
5333 	pmcsr = (pmcsr & ~PCI_PMCSR_STATE_MASK) | new_mode;
5334 	pci_config_put32(conf_handle, pci_cap_ptr + PCI_PMCSR, pmcsr);
5335 
5336 	return (DDI_SUCCESS);
5337 }
5338 
5339 /*
5340  * select suitable register for by specified address space or register
5341  * offset in PCI config space
5342  */
5343 int
5344 gem_pci_regs_map_setup(dev_info_t *dip, uint32_t which, uint32_t mask,
5345 	struct ddi_device_acc_attr *attrp,
5346 	caddr_t *basep, ddi_acc_handle_t *hp)
5347 {
5348 	struct pci_phys_spec	*regs;
5349 	uint_t		len;
5350 	uint_t		unit;
5351 	uint_t		n;
5352 	uint_t		i;
5353 	int		ret;
5354 	const char	*drv_name;
5355 
5356 	unit = ddi_get_instance(dip);
5357 	drv_name = ddi_driver_name(dip);
5358 
5359 	/* Search IO-range or memory-range to be mapped */
5360 	regs = NULL;
5361 	len  = 0;
5362 
5363 	if ((ret = ddi_prop_lookup_int_array(
5364 	    DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
5365 	    "reg", (void *)&regs, &len)) != DDI_PROP_SUCCESS) {
5366 		cmn_err(CE_WARN,
5367 		    "!%s%d: failed to get reg property (ret:%d)",
5368 		    drv_name, unit, ret);
5369 		return (DDI_FAILURE);
5370 	}
5371 	n = len / (sizeof (struct pci_phys_spec) / sizeof (int));
5372 
5373 	ASSERT(regs != NULL && len > 0);
5374 
5375 #if GEM_DEBUG_LEVEL > 0
5376 	for (i = 0; i < n; i++) {
5377 		cmn_err(CE_CONT,
5378 		    "!%s%d: regs[%d]: %08x.%08x.%08x.%08x.%08x",
5379 		    drv_name, unit, i,
5380 		    regs[i].pci_phys_hi,
5381 		    regs[i].pci_phys_mid,
5382 		    regs[i].pci_phys_low,
5383 		    regs[i].pci_size_hi,
5384 		    regs[i].pci_size_low);
5385 	}
5386 #endif
5387 	for (i = 0; i < n; i++) {
5388 		if ((regs[i].pci_phys_hi & mask) == which) {
5389 			/* it's the requested space */
5390 			ddi_prop_free(regs);
5391 			goto address_range_found;
5392 		}
5393 	}
5394 	ddi_prop_free(regs);
5395 	return (DDI_FAILURE);
5396 
5397 address_range_found:
5398 	if ((ret = ddi_regs_map_setup(dip, i, basep, 0, 0, attrp, hp))
5399 	    != DDI_SUCCESS) {
5400 		cmn_err(CE_CONT,
5401 		    "!%s%d: ddi_regs_map_setup failed (ret:%d)",
5402 		    drv_name, unit, ret);
5403 	}
5404 
5405 	return (ret);
5406 }
5407 
5408 void
5409 gem_mod_init(struct dev_ops *dop, char *name)
5410 {
5411 	mac_init_ops(dop, name);
5412 }
5413 
5414 void
5415 gem_mod_fini(struct dev_ops *dop)
5416 {
5417 	mac_fini_ops(dop);
5418 }
5419