xref: /titanic_50/usr/src/uts/common/io/sfe/sfe_util.c (revision 6f3a36cd4abb5c5058716ccff5c673379a4b802b)
1 /*
2  * sfe_util.c: general ethernet mac driver framework version 2.6
3  *
4  * Copyright (c) 2002-2008 Masayuki Murayama.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * 2. Redistributions in binary form must reproduce the above copyright notice,
13  *    this list of conditions and the following disclaimer in the documentation
14  *    and/or other materials provided with the distribution.
15  *
16  * 3. Neither the name of the author nor the names of its contributors may be
17  *    used to endorse or promote products derived from this software without
18  *    specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
27  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
31  * DAMAGE.
32  */
33 
34 /*
35  * System Header files.
36  */
37 #include <sys/types.h>
38 #include <sys/conf.h>
39 #include <sys/debug.h>
40 #include <sys/kmem.h>
41 #include <sys/vtrace.h>
42 #include <sys/ethernet.h>
43 #include <sys/modctl.h>
44 #include <sys/errno.h>
45 #include <sys/ddi.h>
46 #include <sys/sunddi.h>
47 #include <sys/stream.h>		/* required for MBLK* */
48 #include <sys/strsun.h>		/* required for mionack() */
49 #include <sys/byteorder.h>
50 #include <sys/pci.h>
51 #include <inet/common.h>
52 #include <inet/led.h>
53 #include <inet/mi.h>
54 #include <inet/nd.h>
55 #include <sys/crc32.h>
56 
57 #include <sys/note.h>
58 
59 #include "sfe_mii.h"
60 #include "sfe_util.h"
61 
62 
63 
64 extern char ident[];
65 
66 /* Debugging support */
67 #ifdef GEM_DEBUG_LEVEL
68 static int gem_debug = GEM_DEBUG_LEVEL;
69 #define	DPRINTF(n, args)	if (gem_debug > (n)) cmn_err args
70 #else
71 #define	DPRINTF(n, args)
72 #undef ASSERT
73 #define	ASSERT(x)
74 #endif
75 
76 #define	IOC_LINESIZE	0x40	/* Is it right for amd64? */
77 
78 /*
79  * Useful macros and typedefs
80  */
81 #define	ROUNDUP(x, a)	(((x) + (a) - 1) & ~((a) - 1))
82 
83 #define	GET_NET16(p)	((((uint8_t *)(p))[0] << 8)| ((uint8_t *)(p))[1])
84 #define	GET_ETHERTYPE(p)	GET_NET16(((uint8_t *)(p)) + ETHERADDRL*2)
85 
86 #define	GET_IPTYPEv4(p)	(((uint8_t *)(p))[sizeof (struct ether_header) + 9])
87 #define	GET_IPTYPEv6(p)	(((uint8_t *)(p))[sizeof (struct ether_header) + 6])
88 
89 
90 #ifndef INT32_MAX
91 #define	INT32_MAX	0x7fffffff
92 #endif
93 
94 #define	VTAG_OFF	(ETHERADDRL*2)
95 #ifndef VTAG_SIZE
96 #define	VTAG_SIZE	4
97 #endif
98 #ifndef VTAG_TPID
99 #define	VTAG_TPID	0x8100U
100 #endif
101 
102 #define	GET_TXBUF(dp, sn)	\
103 	&(dp)->tx_buf[SLOT((dp)->tx_slots_base + (sn), (dp)->gc.gc_tx_buf_size)]
104 
105 #ifndef offsetof
106 #define	offsetof(t, m)	((long)&(((t *) 0)->m))
107 #endif
108 #define	TXFLAG_VTAG(flag)	\
109 	(((flag) & GEM_TXFLAG_VTAG) >> GEM_TXFLAG_VTAG_SHIFT)
110 
111 #define	MAXPKTBUF(dp)	\
112 	((dp)->mtu + sizeof (struct ether_header) + VTAG_SIZE + ETHERFCSL)
113 
114 #define	WATCH_INTERVAL_FAST	drv_usectohz(100*1000)	/* 100mS */
115 #define	BOOLEAN(x)	((x) != 0)
116 
117 /*
118  * Macros to distinct chip generation.
119  */
120 
121 /*
122  * Private functions
123  */
124 static void gem_mii_start(struct gem_dev *);
125 static void gem_mii_stop(struct gem_dev *);
126 
127 /* local buffer management */
128 static void gem_nd_setup(struct gem_dev *dp);
129 static void gem_nd_cleanup(struct gem_dev *dp);
130 static int gem_alloc_memory(struct gem_dev *);
131 static void gem_free_memory(struct gem_dev *);
132 static void gem_init_rx_ring(struct gem_dev *);
133 static void gem_init_tx_ring(struct gem_dev *);
134 __INLINE__ static void gem_append_rxbuf(struct gem_dev *, struct rxbuf *);
135 
136 static void gem_tx_timeout(struct gem_dev *);
137 static void gem_mii_link_watcher(struct gem_dev *dp);
138 static int gem_mac_init(struct gem_dev *dp);
139 static int gem_mac_start(struct gem_dev *dp);
140 static int gem_mac_stop(struct gem_dev *dp, uint_t flags);
141 static void gem_mac_ioctl(struct gem_dev *dp, queue_t *wq, mblk_t *mp);
142 
143 static	struct ether_addr	gem_etherbroadcastaddr = {
144 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff
145 };
146 
147 int gem_speed_value[] = {10, 100, 1000};
148 
149 /* ============================================================== */
150 /*
151  * Misc runtime routines
152  */
153 /* ============================================================== */
154 /*
155  * Ether CRC calculation according to 21143 data sheet
156  */
157 uint32_t
158 gem_ether_crc_le(const uint8_t *addr, int len)
159 {
160 	uint32_t	crc;
161 
162 	CRC32(crc, addr, ETHERADDRL, 0xffffffffU, crc32_table);
163 	return (crc);
164 }
165 
166 uint32_t
167 gem_ether_crc_be(const uint8_t *addr, int len)
168 {
169 	int		idx;
170 	int		bit;
171 	uint_t		data;
172 	uint32_t	crc;
173 #define	CRC32_POLY_BE	0x04c11db7
174 
175 	crc = 0xffffffff;
176 	for (idx = 0; idx < len; idx++) {
177 		for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) {
178 			crc = (crc << 1)
179 			    ^ ((((crc >> 31) ^ data) & 1) ? CRC32_POLY_BE : 0);
180 		}
181 	}
182 	return (crc);
183 #undef	CRC32_POLY_BE
184 }
185 
186 int
187 gem_prop_get_int(struct gem_dev *dp, char *prop_template, int def_val)
188 {
189 	char	propname[32];
190 
191 	(void) sprintf(propname, prop_template, dp->name);
192 
193 	return (ddi_prop_get_int(DDI_DEV_T_ANY, dp->dip,
194 	    DDI_PROP_DONTPASS, propname, def_val));
195 }
196 
197 static int
198 gem_population(uint32_t x)
199 {
200 	int	i;
201 	int	cnt;
202 
203 	cnt = 0;
204 	for (i = 0; i < 32; i++) {
205 		if (x & (1 << i)) {
206 			cnt++;
207 		}
208 	}
209 	return (cnt);
210 }
211 
212 #ifdef GEM_DEBUG_LEVEL
213 #ifdef GEM_DEBUG_VLAN
214 static void
215 gem_dump_packet(struct gem_dev *dp, char *title, mblk_t *mp,
216     boolean_t check_cksum)
217 {
218 	char	msg[180];
219 	uint8_t	buf[18+20+20];
220 	uint8_t	*p;
221 	size_t	offset;
222 	uint_t	ethertype;
223 	uint_t	proto;
224 	uint_t	ipproto = 0;
225 	uint_t	iplen;
226 	uint_t	iphlen;
227 	uint_t	tcplen;
228 	uint_t	udplen;
229 	uint_t	cksum;
230 	int	rest;
231 	int	len;
232 	char	*bp;
233 	mblk_t	*tp;
234 	extern uint_t	ip_cksum(mblk_t *, int, uint32_t);
235 
236 	msg[0] = 0;
237 	bp = msg;
238 
239 	rest = sizeof (buf);
240 	offset = 0;
241 	for (tp = mp; tp; tp = tp->b_cont) {
242 		len = tp->b_wptr - tp->b_rptr;
243 		len = min(rest, len);
244 		bcopy(tp->b_rptr, &buf[offset], len);
245 		rest -= len;
246 		offset += len;
247 		if (rest == 0) {
248 			break;
249 		}
250 	}
251 
252 	offset = 0;
253 	p = &buf[offset];
254 
255 	/* ethernet address */
256 	sprintf(bp,
257 	    "ether: %02x:%02x:%02x:%02x:%02x:%02x"
258 	    " -> %02x:%02x:%02x:%02x:%02x:%02x",
259 	    p[6], p[7], p[8], p[9], p[10], p[11],
260 	    p[0], p[1], p[2], p[3], p[4], p[5]);
261 	bp = &msg[strlen(msg)];
262 
263 	/* vlag tag and etherrtype */
264 	ethertype = GET_ETHERTYPE(p);
265 	if (ethertype == VTAG_TPID) {
266 		sprintf(bp, " vtag:0x%04x", GET_NET16(&p[14]));
267 		bp = &msg[strlen(msg)];
268 
269 		offset += VTAG_SIZE;
270 		p = &buf[offset];
271 		ethertype = GET_ETHERTYPE(p);
272 	}
273 	sprintf(bp, " type:%04x", ethertype);
274 	bp = &msg[strlen(msg)];
275 
276 	/* ethernet packet length */
277 	sprintf(bp, " mblklen:%d", msgdsize(mp));
278 	bp = &msg[strlen(msg)];
279 	if (mp->b_cont) {
280 		sprintf(bp, "(");
281 		bp = &msg[strlen(msg)];
282 		for (tp = mp; tp; tp = tp->b_cont) {
283 			if (tp == mp) {
284 				sprintf(bp, "%d", tp->b_wptr - tp->b_rptr);
285 			} else {
286 				sprintf(bp, "+%d", tp->b_wptr - tp->b_rptr);
287 			}
288 			bp = &msg[strlen(msg)];
289 		}
290 		sprintf(bp, ")");
291 		bp = &msg[strlen(msg)];
292 	}
293 
294 	if (ethertype != ETHERTYPE_IP) {
295 		goto x;
296 	}
297 
298 	/* ip address */
299 	offset += sizeof (struct ether_header);
300 	p = &buf[offset];
301 	ipproto = p[9];
302 	iplen = GET_NET16(&p[2]);
303 	sprintf(bp, ", ip: %d.%d.%d.%d -> %d.%d.%d.%d proto:%d iplen:%d",
304 	    p[12], p[13], p[14], p[15],
305 	    p[16], p[17], p[18], p[19],
306 	    ipproto, iplen);
307 	bp = (void *)&msg[strlen(msg)];
308 
309 	iphlen = (p[0] & 0xf) * 4;
310 
311 	/* cksum for psuedo header */
312 	cksum = *(uint16_t *)&p[12];
313 	cksum += *(uint16_t *)&p[14];
314 	cksum += *(uint16_t *)&p[16];
315 	cksum += *(uint16_t *)&p[18];
316 	cksum += BE_16(ipproto);
317 
318 	/* tcp or udp protocol header */
319 	offset += iphlen;
320 	p = &buf[offset];
321 	if (ipproto == IPPROTO_TCP) {
322 		tcplen = iplen - iphlen;
323 		sprintf(bp, ", tcp: len:%d cksum:%x",
324 		    tcplen, GET_NET16(&p[16]));
325 		bp = (void *)&msg[strlen(msg)];
326 
327 		if (check_cksum) {
328 			cksum += BE_16(tcplen);
329 			cksum = (uint16_t)ip_cksum(mp, offset, cksum);
330 			sprintf(bp, " (%s)",
331 			    (cksum == 0 || cksum == 0xffff) ? "ok" : "ng");
332 			bp = (void *)&msg[strlen(msg)];
333 		}
334 	} else if (ipproto == IPPROTO_UDP) {
335 		udplen = GET_NET16(&p[4]);
336 		sprintf(bp, ", udp: len:%d cksum:%x",
337 		    udplen, GET_NET16(&p[6]));
338 		bp = (void *)&msg[strlen(msg)];
339 
340 		if (GET_NET16(&p[6]) && check_cksum) {
341 			cksum += *(uint16_t *)&p[4];
342 			cksum = (uint16_t)ip_cksum(mp, offset, cksum);
343 			sprintf(bp, " (%s)",
344 			    (cksum == 0 || cksum == 0xffff) ? "ok" : "ng");
345 			bp = (void *)&msg[strlen(msg)];
346 		}
347 	}
348 x:
349 	cmn_err(CE_CONT, "!%s: %s: %s", dp->name, title, msg);
350 }
351 #endif /* GEM_DEBUG_VLAN */
352 #endif /* GEM_DEBUG_LEVEL */
353 
354 /* ============================================================== */
355 /*
356  * IO cache flush
357  */
358 /* ============================================================== */
359 __INLINE__ void
360 gem_rx_desc_dma_sync(struct gem_dev *dp, int head, int nslot, int how)
361 {
362 	int	n;
363 	int	m;
364 	int	rx_desc_unit_shift = dp->gc.gc_rx_desc_unit_shift;
365 
366 	/* sync active descriptors */
367 	if (rx_desc_unit_shift < 0 || nslot == 0) {
368 		/* no rx descriptor ring */
369 		return;
370 	}
371 
372 	n = dp->gc.gc_rx_ring_size - head;
373 	if ((m = nslot - n) > 0) {
374 		(void) ddi_dma_sync(dp->desc_dma_handle,
375 		    (off_t)0,
376 		    (size_t)(m << rx_desc_unit_shift),
377 		    how);
378 		nslot = n;
379 	}
380 
381 	(void) ddi_dma_sync(dp->desc_dma_handle,
382 	    (off_t)(head << rx_desc_unit_shift),
383 	    (size_t)(nslot << rx_desc_unit_shift),
384 	    how);
385 }
386 
387 __INLINE__ void
388 gem_tx_desc_dma_sync(struct gem_dev *dp, int head, int nslot, int how)
389 {
390 	int	n;
391 	int	m;
392 	int	tx_desc_unit_shift = dp->gc.gc_tx_desc_unit_shift;
393 
394 	/* sync active descriptors */
395 	if (tx_desc_unit_shift < 0 || nslot == 0) {
396 		/* no tx descriptor ring */
397 		return;
398 	}
399 
400 	n = dp->gc.gc_tx_ring_size - head;
401 	if ((m = nslot - n) > 0) {
402 		(void) ddi_dma_sync(dp->desc_dma_handle,
403 		    (off_t)(dp->tx_ring_dma - dp->rx_ring_dma),
404 		    (size_t)(m << tx_desc_unit_shift),
405 		    how);
406 		nslot = n;
407 	}
408 
409 	(void) ddi_dma_sync(dp->desc_dma_handle,
410 	    (off_t)((head << tx_desc_unit_shift)
411 	    + (dp->tx_ring_dma - dp->rx_ring_dma)),
412 	    (size_t)(nslot << tx_desc_unit_shift),
413 	    how);
414 }
415 
416 static void
417 gem_rx_start_default(struct gem_dev *dp, int head, int nslot)
418 {
419 	gem_rx_desc_dma_sync(dp,
420 	    SLOT(head, dp->gc.gc_rx_ring_size), nslot,
421 	    DDI_DMA_SYNC_FORDEV);
422 }
423 
424 /* ============================================================== */
425 /*
426  * Buffer management
427  */
428 /* ============================================================== */
429 static void
430 gem_dump_txbuf(struct gem_dev *dp, int level, const char *title)
431 {
432 	cmn_err(level,
433 	    "!%s: %s: tx_active: %d[%d] %d[%d] (+%d), "
434 	    "tx_softq: %d[%d] %d[%d] (+%d), "
435 	    "tx_free: %d[%d] %d[%d] (+%d), "
436 	    "tx_desc: %d[%d] %d[%d] (+%d), "
437 	    "intr: %d[%d] (+%d), ",
438 	    dp->name, title,
439 	    dp->tx_active_head,
440 	    SLOT(dp->tx_active_head, dp->gc.gc_tx_buf_size),
441 	    dp->tx_active_tail,
442 	    SLOT(dp->tx_active_tail, dp->gc.gc_tx_buf_size),
443 	    dp->tx_active_tail - dp->tx_active_head,
444 	    dp->tx_softq_head,
445 	    SLOT(dp->tx_softq_head, dp->gc.gc_tx_buf_size),
446 	    dp->tx_softq_tail,
447 	    SLOT(dp->tx_softq_tail, dp->gc.gc_tx_buf_size),
448 	    dp->tx_softq_tail - dp->tx_softq_head,
449 	    dp->tx_free_head,
450 	    SLOT(dp->tx_free_head, dp->gc.gc_tx_buf_size),
451 	    dp->tx_free_tail,
452 	    SLOT(dp->tx_free_tail, dp->gc.gc_tx_buf_size),
453 	    dp->tx_free_tail - dp->tx_free_head,
454 	    dp->tx_desc_head,
455 	    SLOT(dp->tx_desc_head, dp->gc.gc_tx_ring_size),
456 	    dp->tx_desc_tail,
457 	    SLOT(dp->tx_desc_tail, dp->gc.gc_tx_ring_size),
458 	    dp->tx_desc_tail - dp->tx_desc_head,
459 	    dp->tx_desc_intr,
460 	    SLOT(dp->tx_desc_intr, dp->gc.gc_tx_ring_size),
461 	    dp->tx_desc_intr - dp->tx_desc_head);
462 }
463 
464 static void
465 gem_free_rxbuf(struct rxbuf *rbp)
466 {
467 	struct gem_dev	*dp;
468 
469 	dp = rbp->rxb_devp;
470 	ASSERT(mutex_owned(&dp->intrlock));
471 	rbp->rxb_next = dp->rx_buf_freelist;
472 	dp->rx_buf_freelist = rbp;
473 	dp->rx_buf_freecnt++;
474 }
475 
476 /*
477  * gem_get_rxbuf: supply a receive buffer which have been mapped into
478  * DMA space.
479  */
480 struct rxbuf *
481 gem_get_rxbuf(struct gem_dev *dp, int cansleep)
482 {
483 	struct rxbuf		*rbp;
484 	uint_t			count = 0;
485 	int			i;
486 	int			err;
487 
488 	ASSERT(mutex_owned(&dp->intrlock));
489 
490 	DPRINTF(3, (CE_CONT, "!gem_get_rxbuf: called freecnt:%d",
491 	    dp->rx_buf_freecnt));
492 	/*
493 	 * Get rx buffer management structure
494 	 */
495 	rbp = dp->rx_buf_freelist;
496 	if (rbp) {
497 		/* get one from the recycle list */
498 		ASSERT(dp->rx_buf_freecnt > 0);
499 
500 		dp->rx_buf_freelist = rbp->rxb_next;
501 		dp->rx_buf_freecnt--;
502 		rbp->rxb_next = NULL;
503 		return (rbp);
504 	}
505 
506 	/*
507 	 * Allocate a rx buffer management structure
508 	 */
509 	rbp = kmem_zalloc(sizeof (*rbp), cansleep ? KM_SLEEP : KM_NOSLEEP);
510 	if (rbp == NULL) {
511 		/* no memory */
512 		return (NULL);
513 	}
514 
515 	/*
516 	 * Prepare a back pointer to the device structure which will be
517 	 * refered on freeing the buffer later.
518 	 */
519 	rbp->rxb_devp = dp;
520 
521 	/* allocate a dma handle for rx data buffer */
522 	if ((err = ddi_dma_alloc_handle(dp->dip,
523 	    &dp->gc.gc_dma_attr_rxbuf,
524 	    (cansleep ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT),
525 	    NULL, &rbp->rxb_dh)) != DDI_SUCCESS) {
526 
527 		cmn_err(CE_WARN,
528 		    "!%s: %s: ddi_dma_alloc_handle:1 failed, err=%d",
529 		    dp->name, __func__, err);
530 
531 		kmem_free(rbp, sizeof (struct rxbuf));
532 		return (NULL);
533 	}
534 
535 	/* allocate a bounce buffer for rx */
536 	if ((err = ddi_dma_mem_alloc(rbp->rxb_dh,
537 	    ROUNDUP(dp->rx_buf_len, IOC_LINESIZE),
538 	    &dp->gc.gc_buf_attr,
539 		/*
540 		 * if the nic requires a header at the top of receive buffers,
541 		 * it may access the rx buffer randomly.
542 		 */
543 	    (dp->gc.gc_rx_header_len > 0)
544 	    ? DDI_DMA_CONSISTENT : DDI_DMA_STREAMING,
545 	    cansleep ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT,
546 	    NULL,
547 	    &rbp->rxb_buf, &rbp->rxb_buf_len,
548 	    &rbp->rxb_bah)) != DDI_SUCCESS) {
549 
550 		cmn_err(CE_WARN,
551 		    "!%s: %s: ddi_dma_mem_alloc: failed, err=%d",
552 		    dp->name, __func__, err);
553 
554 		ddi_dma_free_handle(&rbp->rxb_dh);
555 		kmem_free(rbp, sizeof (struct rxbuf));
556 		return (NULL);
557 	}
558 
559 	/* Mapin the bounce buffer into the DMA space */
560 	if ((err = ddi_dma_addr_bind_handle(rbp->rxb_dh,
561 	    NULL, rbp->rxb_buf, dp->rx_buf_len,
562 	    ((dp->gc.gc_rx_header_len > 0)
563 	    ?(DDI_DMA_RDWR | DDI_DMA_CONSISTENT)
564 	    :(DDI_DMA_READ | DDI_DMA_STREAMING)),
565 	    cansleep ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT,
566 	    NULL,
567 	    rbp->rxb_dmacookie,
568 	    &count)) != DDI_DMA_MAPPED) {
569 
570 		ASSERT(err != DDI_DMA_INUSE);
571 		DPRINTF(0, (CE_WARN,
572 		    "!%s: ddi_dma_addr_bind_handle: failed, err=%d",
573 		    dp->name, __func__, err));
574 
575 		/*
576 		 * we failed to allocate a dma resource
577 		 * for the rx bounce buffer.
578 		 */
579 		ddi_dma_mem_free(&rbp->rxb_bah);
580 		ddi_dma_free_handle(&rbp->rxb_dh);
581 		kmem_free(rbp, sizeof (struct rxbuf));
582 		return (NULL);
583 	}
584 
585 	/* correct the rest of the DMA mapping */
586 	for (i = 1; i < count; i++) {
587 		ddi_dma_nextcookie(rbp->rxb_dh, &rbp->rxb_dmacookie[i]);
588 	}
589 	rbp->rxb_nfrags = count;
590 
591 	/* Now we successfully prepared an rx buffer */
592 	dp->rx_buf_allocated++;
593 
594 	return (rbp);
595 }
596 
597 /* ============================================================== */
598 /*
599  * memory resource management
600  */
601 /* ============================================================== */
602 static int
603 gem_alloc_memory(struct gem_dev *dp)
604 {
605 	caddr_t			ring;
606 	caddr_t			buf;
607 	size_t			req_size;
608 	size_t			ring_len;
609 	size_t			buf_len;
610 	ddi_dma_cookie_t	ring_cookie;
611 	ddi_dma_cookie_t	buf_cookie;
612 	uint_t			count;
613 	int			i;
614 	int			err;
615 	struct txbuf		*tbp;
616 	int			tx_buf_len;
617 	ddi_dma_attr_t		dma_attr_txbounce;
618 
619 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
620 
621 	dp->desc_dma_handle = NULL;
622 	req_size = dp->rx_desc_size + dp->tx_desc_size + dp->gc.gc_io_area_size;
623 
624 	if (req_size > 0) {
625 		/*
626 		 * Alloc RX/TX descriptors and a io area.
627 		 */
628 		if ((err = ddi_dma_alloc_handle(dp->dip,
629 		    &dp->gc.gc_dma_attr_desc,
630 		    DDI_DMA_SLEEP, NULL,
631 		    &dp->desc_dma_handle)) != DDI_SUCCESS) {
632 			cmn_err(CE_WARN,
633 			    "!%s: %s: ddi_dma_alloc_handle failed: %d",
634 			    dp->name, __func__, err);
635 			return (ENOMEM);
636 		}
637 
638 		if ((err = ddi_dma_mem_alloc(dp->desc_dma_handle,
639 		    req_size, &dp->gc.gc_desc_attr,
640 		    DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
641 		    &ring, &ring_len,
642 		    &dp->desc_acc_handle)) != DDI_SUCCESS) {
643 			cmn_err(CE_WARN,
644 			    "!%s: %s: ddi_dma_mem_alloc failed: "
645 			    "ret %d, request size: %d",
646 			    dp->name, __func__, err, (int)req_size);
647 			ddi_dma_free_handle(&dp->desc_dma_handle);
648 			return (ENOMEM);
649 		}
650 
651 		if ((err = ddi_dma_addr_bind_handle(dp->desc_dma_handle,
652 		    NULL, ring, ring_len,
653 		    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
654 		    DDI_DMA_SLEEP, NULL,
655 		    &ring_cookie, &count)) != DDI_SUCCESS) {
656 			ASSERT(err != DDI_DMA_INUSE);
657 			cmn_err(CE_WARN,
658 			    "!%s: %s: ddi_dma_addr_bind_handle failed: %d",
659 			    dp->name, __func__, err);
660 			ddi_dma_mem_free(&dp->desc_acc_handle);
661 			ddi_dma_free_handle(&dp->desc_dma_handle);
662 			return (ENOMEM);
663 		}
664 		ASSERT(count == 1);
665 
666 		/* set base of rx descriptor ring */
667 		dp->rx_ring = ring;
668 		dp->rx_ring_dma = ring_cookie.dmac_laddress;
669 
670 		/* set base of tx descriptor ring */
671 		dp->tx_ring = dp->rx_ring + dp->rx_desc_size;
672 		dp->tx_ring_dma = dp->rx_ring_dma + dp->rx_desc_size;
673 
674 		/* set base of io area */
675 		dp->io_area = dp->tx_ring + dp->tx_desc_size;
676 		dp->io_area_dma = dp->tx_ring_dma + dp->tx_desc_size;
677 	}
678 
679 	/*
680 	 * Prepare DMA resources for tx packets
681 	 */
682 	ASSERT(dp->gc.gc_tx_buf_size > 0);
683 
684 	/* Special dma attribute for tx bounce buffers */
685 	dma_attr_txbounce = dp->gc.gc_dma_attr_txbuf;
686 	dma_attr_txbounce.dma_attr_sgllen = 1;
687 	dma_attr_txbounce.dma_attr_align =
688 	    max(dma_attr_txbounce.dma_attr_align, IOC_LINESIZE);
689 
690 	/* Size for tx bounce buffers must be max tx packet size. */
691 	tx_buf_len = MAXPKTBUF(dp);
692 	tx_buf_len = ROUNDUP(tx_buf_len, IOC_LINESIZE);
693 
694 	ASSERT(tx_buf_len >= ETHERMAX+ETHERFCSL);
695 
696 	for (i = 0, tbp = dp->tx_buf;
697 	    i < dp->gc.gc_tx_buf_size; i++, tbp++) {
698 
699 		/* setup bounce buffers for tx packets */
700 		if ((err = ddi_dma_alloc_handle(dp->dip,
701 		    &dma_attr_txbounce,
702 		    DDI_DMA_SLEEP, NULL,
703 		    &tbp->txb_bdh)) != DDI_SUCCESS) {
704 
705 			cmn_err(CE_WARN,
706 		    "!%s: %s ddi_dma_alloc_handle for bounce buffer failed:"
707 			    " err=%d, i=%d",
708 			    dp->name, __func__, err, i);
709 			goto err_alloc_dh;
710 		}
711 
712 		if ((err = ddi_dma_mem_alloc(tbp->txb_bdh,
713 		    tx_buf_len,
714 		    &dp->gc.gc_buf_attr,
715 		    DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL,
716 		    &buf, &buf_len,
717 		    &tbp->txb_bah)) != DDI_SUCCESS) {
718 			cmn_err(CE_WARN,
719 		    "!%s: %s: ddi_dma_mem_alloc for bounce buffer failed"
720 			    "ret %d, request size %d",
721 			    dp->name, __func__, err, tx_buf_len);
722 			ddi_dma_free_handle(&tbp->txb_bdh);
723 			goto err_alloc_dh;
724 		}
725 
726 		if ((err = ddi_dma_addr_bind_handle(tbp->txb_bdh,
727 		    NULL, buf, buf_len,
728 		    DDI_DMA_WRITE | DDI_DMA_STREAMING,
729 		    DDI_DMA_SLEEP, NULL,
730 		    &buf_cookie, &count)) != DDI_SUCCESS) {
731 				ASSERT(err != DDI_DMA_INUSE);
732 				cmn_err(CE_WARN,
733 	"!%s: %s: ddi_dma_addr_bind_handle for bounce buffer failed: %d",
734 				    dp->name, __func__, err);
735 				ddi_dma_mem_free(&tbp->txb_bah);
736 				ddi_dma_free_handle(&tbp->txb_bdh);
737 				goto err_alloc_dh;
738 		}
739 		ASSERT(count == 1);
740 		tbp->txb_buf = buf;
741 		tbp->txb_buf_dma = buf_cookie.dmac_laddress;
742 	}
743 
744 	return (0);
745 
746 err_alloc_dh:
747 	if (dp->gc.gc_tx_buf_size > 0) {
748 		while (i-- > 0) {
749 			(void) ddi_dma_unbind_handle(dp->tx_buf[i].txb_bdh);
750 			ddi_dma_mem_free(&dp->tx_buf[i].txb_bah);
751 			ddi_dma_free_handle(&dp->tx_buf[i].txb_bdh);
752 		}
753 	}
754 
755 	if (dp->desc_dma_handle) {
756 		(void) ddi_dma_unbind_handle(dp->desc_dma_handle);
757 		ddi_dma_mem_free(&dp->desc_acc_handle);
758 		ddi_dma_free_handle(&dp->desc_dma_handle);
759 		dp->desc_dma_handle = NULL;
760 	}
761 
762 	return (ENOMEM);
763 }
764 
765 static void
766 gem_free_memory(struct gem_dev *dp)
767 {
768 	int		i;
769 	struct rxbuf	*rbp;
770 	struct txbuf	*tbp;
771 
772 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
773 
774 	/* Free TX/RX descriptors and tx padding buffer */
775 	if (dp->desc_dma_handle) {
776 		(void) ddi_dma_unbind_handle(dp->desc_dma_handle);
777 		ddi_dma_mem_free(&dp->desc_acc_handle);
778 		ddi_dma_free_handle(&dp->desc_dma_handle);
779 		dp->desc_dma_handle = NULL;
780 	}
781 
782 	/* Free dma handles for Tx */
783 	for (i = dp->gc.gc_tx_buf_size, tbp = dp->tx_buf; i--; tbp++) {
784 		/* Free bounce buffer associated to each txbuf */
785 		(void) ddi_dma_unbind_handle(tbp->txb_bdh);
786 		ddi_dma_mem_free(&tbp->txb_bah);
787 		ddi_dma_free_handle(&tbp->txb_bdh);
788 	}
789 
790 	/* Free rx buffer */
791 	while ((rbp = dp->rx_buf_freelist) != NULL) {
792 
793 		ASSERT(dp->rx_buf_freecnt > 0);
794 
795 		dp->rx_buf_freelist = rbp->rxb_next;
796 		dp->rx_buf_freecnt--;
797 
798 		/* release DMA mapping */
799 		ASSERT(rbp->rxb_dh != NULL);
800 
801 		/* free dma handles for rx bbuf */
802 		/* it has dma mapping always */
803 		ASSERT(rbp->rxb_nfrags > 0);
804 		(void) ddi_dma_unbind_handle(rbp->rxb_dh);
805 
806 		/* free the associated bounce buffer and dma handle */
807 		ASSERT(rbp->rxb_bah != NULL);
808 		ddi_dma_mem_free(&rbp->rxb_bah);
809 		/* free the associated dma handle */
810 		ddi_dma_free_handle(&rbp->rxb_dh);
811 
812 		/* free the base memory of rx buffer management */
813 		kmem_free(rbp, sizeof (struct rxbuf));
814 	}
815 }
816 
817 /* ============================================================== */
818 /*
819  * Rx/Tx descriptor slot management
820  */
821 /* ============================================================== */
822 /*
823  * Initialize an empty rx ring.
824  */
825 static void
826 gem_init_rx_ring(struct gem_dev *dp)
827 {
828 	int		i;
829 	int		rx_ring_size = dp->gc.gc_rx_ring_size;
830 
831 	DPRINTF(1, (CE_CONT, "!%s: %s ring_size:%d, buf_max:%d",
832 	    dp->name, __func__,
833 	    rx_ring_size, dp->gc.gc_rx_buf_max));
834 
835 	/* make a physical chain of rx descriptors */
836 	for (i = 0; i < rx_ring_size; i++) {
837 		(*dp->gc.gc_rx_desc_init)(dp, i);
838 	}
839 	gem_rx_desc_dma_sync(dp, 0, rx_ring_size, DDI_DMA_SYNC_FORDEV);
840 
841 	dp->rx_active_head = (seqnum_t)0;
842 	dp->rx_active_tail = (seqnum_t)0;
843 
844 	ASSERT(dp->rx_buf_head == (struct rxbuf *)NULL);
845 	ASSERT(dp->rx_buf_tail == (struct rxbuf *)NULL);
846 }
847 
848 /*
849  * Prepare rx buffers and put them into the rx buffer/descriptor ring.
850  */
851 static void
852 gem_prepare_rx_buf(struct gem_dev *dp)
853 {
854 	int		i;
855 	int		nrbuf;
856 	struct rxbuf	*rbp;
857 
858 	ASSERT(mutex_owned(&dp->intrlock));
859 
860 	/* Now we have no active buffers in rx ring */
861 
862 	nrbuf = min(dp->gc.gc_rx_ring_size, dp->gc.gc_rx_buf_max);
863 	for (i = 0; i < nrbuf; i++) {
864 		if ((rbp = gem_get_rxbuf(dp, B_TRUE)) == NULL) {
865 			break;
866 		}
867 		gem_append_rxbuf(dp, rbp);
868 	}
869 
870 	gem_rx_desc_dma_sync(dp,
871 	    0, dp->gc.gc_rx_ring_size, DDI_DMA_SYNC_FORDEV);
872 }
873 
874 /*
875  * Reclaim active rx buffers in rx buffer ring.
876  */
877 static void
878 gem_clean_rx_buf(struct gem_dev *dp)
879 {
880 	int		i;
881 	struct rxbuf	*rbp;
882 	int		rx_ring_size = dp->gc.gc_rx_ring_size;
883 #ifdef GEM_DEBUG_LEVEL
884 	int		total;
885 #endif
886 	ASSERT(mutex_owned(&dp->intrlock));
887 
888 	DPRINTF(2, (CE_CONT, "!%s: %s: %d buffers are free",
889 	    dp->name, __func__, dp->rx_buf_freecnt));
890 	/*
891 	 * clean up HW descriptors
892 	 */
893 	for (i = 0; i < rx_ring_size; i++) {
894 		(*dp->gc.gc_rx_desc_clean)(dp, i);
895 	}
896 	gem_rx_desc_dma_sync(dp, 0, rx_ring_size, DDI_DMA_SYNC_FORDEV);
897 
898 #ifdef GEM_DEBUG_LEVEL
899 	total = 0;
900 #endif
901 	/*
902 	 * Reclaim allocated rx buffers
903 	 */
904 	while ((rbp = dp->rx_buf_head) != NULL) {
905 #ifdef GEM_DEBUG_LEVEL
906 		total++;
907 #endif
908 		/* remove the first one from rx buffer list */
909 		dp->rx_buf_head = rbp->rxb_next;
910 
911 		/* recycle the rxbuf */
912 		gem_free_rxbuf(rbp);
913 	}
914 	dp->rx_buf_tail = (struct rxbuf *)NULL;
915 
916 	DPRINTF(2, (CE_CONT,
917 	    "!%s: %s: %d buffers freeed, total: %d free",
918 	    dp->name, __func__, total, dp->rx_buf_freecnt));
919 }
920 
921 /*
922  * Initialize an empty transmit buffer/descriptor ring
923  */
924 static void
925 gem_init_tx_ring(struct gem_dev *dp)
926 {
927 	int		i;
928 	int		tx_buf_size = dp->gc.gc_tx_buf_size;
929 	int		tx_ring_size = dp->gc.gc_tx_ring_size;
930 
931 	DPRINTF(2, (CE_CONT, "!%s: %s: ring_size:%d, buf_size:%d",
932 	    dp->name, __func__,
933 	    dp->gc.gc_tx_ring_size, dp->gc.gc_tx_buf_size));
934 
935 	ASSERT(!dp->mac_active);
936 
937 	/* initialize active list and free list */
938 	dp->tx_slots_base =
939 	    SLOT(dp->tx_slots_base + dp->tx_softq_head, tx_buf_size);
940 	dp->tx_softq_tail -= dp->tx_softq_head;
941 	dp->tx_softq_head = (seqnum_t)0;
942 
943 	dp->tx_active_head = dp->tx_softq_head;
944 	dp->tx_active_tail = dp->tx_softq_head;
945 
946 	dp->tx_free_head   = dp->tx_softq_tail;
947 	dp->tx_free_tail   = dp->gc.gc_tx_buf_limit;
948 
949 	dp->tx_desc_head = (seqnum_t)0;
950 	dp->tx_desc_tail = (seqnum_t)0;
951 	dp->tx_desc_intr = (seqnum_t)0;
952 
953 	for (i = 0; i < tx_ring_size; i++) {
954 		(*dp->gc.gc_tx_desc_init)(dp, i);
955 	}
956 	gem_tx_desc_dma_sync(dp, 0, tx_ring_size, DDI_DMA_SYNC_FORDEV);
957 }
958 
959 __INLINE__
960 static void
961 gem_txbuf_free_dma_resources(struct txbuf *tbp)
962 {
963 	if (tbp->txb_mp) {
964 		freemsg(tbp->txb_mp);
965 		tbp->txb_mp = NULL;
966 	}
967 	tbp->txb_nfrags = 0;
968 	tbp->txb_flag = 0;
969 }
970 #pragma inline(gem_txbuf_free_dma_resources)
971 
972 /*
973  * reclaim active tx buffers and reset positions in tx rings.
974  */
975 static void
976 gem_clean_tx_buf(struct gem_dev *dp)
977 {
978 	int		i;
979 	seqnum_t	head;
980 	seqnum_t	tail;
981 	seqnum_t	sn;
982 	struct txbuf	*tbp;
983 	int		tx_ring_size = dp->gc.gc_tx_ring_size;
984 #ifdef GEM_DEBUG_LEVEL
985 	int		err;
986 #endif
987 
988 	ASSERT(!dp->mac_active);
989 	ASSERT(dp->tx_busy == 0);
990 	ASSERT(dp->tx_softq_tail == dp->tx_free_head);
991 
992 	/*
993 	 * clean up all HW descriptors
994 	 */
995 	for (i = 0; i < tx_ring_size; i++) {
996 		(*dp->gc.gc_tx_desc_clean)(dp, i);
997 	}
998 	gem_tx_desc_dma_sync(dp, 0, tx_ring_size, DDI_DMA_SYNC_FORDEV);
999 
1000 	/* dequeue all active and loaded buffers */
1001 	head = dp->tx_active_head;
1002 	tail = dp->tx_softq_tail;
1003 
1004 	ASSERT(dp->tx_free_head - head >= 0);
1005 	tbp = GET_TXBUF(dp, head);
1006 	for (sn = head; sn != tail; sn++) {
1007 		gem_txbuf_free_dma_resources(tbp);
1008 		ASSERT(tbp->txb_mp == NULL);
1009 		dp->stats.errxmt++;
1010 		tbp = tbp->txb_next;
1011 	}
1012 
1013 #ifdef GEM_DEBUG_LEVEL
1014 	/* ensure no dma resources for tx are not in use now */
1015 	err = 0;
1016 	while (sn != head + dp->gc.gc_tx_buf_size) {
1017 		if (tbp->txb_mp || tbp->txb_nfrags) {
1018 			DPRINTF(0, (CE_CONT,
1019 			    "%s: %s: sn:%d[%d] mp:%p nfrags:%d",
1020 			    dp->name, __func__,
1021 			    sn, SLOT(sn, dp->gc.gc_tx_buf_size),
1022 			    tbp->txb_mp, tbp->txb_nfrags));
1023 			err = 1;
1024 		}
1025 		sn++;
1026 		tbp = tbp->txb_next;
1027 	}
1028 
1029 	if (err) {
1030 		gem_dump_txbuf(dp, CE_WARN,
1031 		    "gem_clean_tx_buf: tbp->txb_mp != NULL");
1032 	}
1033 #endif
1034 	/* recycle buffers, now no active tx buffers in the ring */
1035 	dp->tx_free_tail += tail - head;
1036 	ASSERT(dp->tx_free_tail == dp->tx_free_head + dp->gc.gc_tx_buf_limit);
1037 
1038 	/* fix positions in tx buffer rings */
1039 	dp->tx_active_head = dp->tx_free_head;
1040 	dp->tx_active_tail = dp->tx_free_head;
1041 	dp->tx_softq_head  = dp->tx_free_head;
1042 	dp->tx_softq_tail  = dp->tx_free_head;
1043 }
1044 
1045 /*
1046  * Reclaim transmitted buffers from tx buffer/descriptor ring.
1047  */
1048 __INLINE__ int
1049 gem_reclaim_txbuf(struct gem_dev *dp)
1050 {
1051 	struct txbuf	*tbp;
1052 	uint_t		txstat;
1053 	int		err = GEM_SUCCESS;
1054 	seqnum_t	head;
1055 	seqnum_t	tail;
1056 	seqnum_t	sn;
1057 	seqnum_t	desc_head;
1058 	int		tx_ring_size = dp->gc.gc_tx_ring_size;
1059 	uint_t (*tx_desc_stat)(struct gem_dev *dp,
1060 	    int slot, int ndesc) = dp->gc.gc_tx_desc_stat;
1061 	clock_t		now;
1062 
1063 	now = ddi_get_lbolt();
1064 	if (now == (clock_t)0) {
1065 		/* make non-zero timestamp */
1066 		now--;
1067 	}
1068 
1069 	mutex_enter(&dp->xmitlock);
1070 
1071 	head = dp->tx_active_head;
1072 	tail = dp->tx_active_tail;
1073 
1074 #if GEM_DEBUG_LEVEL > 2
1075 	if (head != tail) {
1076 		cmn_err(CE_CONT, "!%s: %s: "
1077 		    "testing active_head:%d[%d], active_tail:%d[%d]",
1078 		    dp->name, __func__,
1079 		    head, SLOT(head, dp->gc.gc_tx_buf_size),
1080 		    tail, SLOT(tail, dp->gc.gc_tx_buf_size));
1081 	}
1082 #endif
1083 #ifdef DEBUG
1084 	if (dp->tx_reclaim_busy == 0) {
1085 		/* check tx buffer management consistency */
1086 		ASSERT(dp->tx_free_tail - dp->tx_active_head
1087 		    == dp->gc.gc_tx_buf_limit);
1088 		/* EMPTY */
1089 	}
1090 #endif
1091 	dp->tx_reclaim_busy++;
1092 
1093 	/* sync all active HW descriptors */
1094 	gem_tx_desc_dma_sync(dp,
1095 	    SLOT(dp->tx_desc_head, tx_ring_size),
1096 	    dp->tx_desc_tail - dp->tx_desc_head,
1097 	    DDI_DMA_SYNC_FORKERNEL);
1098 
1099 	tbp = GET_TXBUF(dp, head);
1100 	desc_head = dp->tx_desc_head;
1101 	for (sn = head; sn != tail;
1102 	    dp->tx_active_head = (++sn), tbp = tbp->txb_next) {
1103 		int	ndescs;
1104 
1105 		ASSERT(tbp->txb_desc == desc_head);
1106 
1107 		ndescs = tbp->txb_ndescs;
1108 		if (ndescs == 0) {
1109 			/* skip errored descriptors */
1110 			continue;
1111 		}
1112 		txstat = (*tx_desc_stat)(dp,
1113 		    SLOT(tbp->txb_desc, tx_ring_size), ndescs);
1114 
1115 		if (txstat == 0) {
1116 			/* not transmitted yet */
1117 			break;
1118 		}
1119 
1120 		if (!dp->tx_blocked && (tbp->txb_flag & GEM_TXFLAG_INTR)) {
1121 			dp->tx_blocked = now;
1122 		}
1123 
1124 		ASSERT(txstat & (GEM_TX_DONE | GEM_TX_ERR));
1125 
1126 		if (txstat & GEM_TX_ERR) {
1127 			err = GEM_FAILURE;
1128 			cmn_err(CE_WARN, "!%s: tx error at desc %d[%d]",
1129 			    dp->name, sn, SLOT(sn, tx_ring_size));
1130 		}
1131 #if GEM_DEBUG_LEVEL > 4
1132 		if (now - tbp->txb_stime >= 50) {
1133 			cmn_err(CE_WARN, "!%s: tx delay while %d mS",
1134 			    dp->name, (now - tbp->txb_stime)*10);
1135 		}
1136 #endif
1137 		/* free transmitted descriptors */
1138 		desc_head += ndescs;
1139 	}
1140 
1141 	if (dp->tx_desc_head != desc_head) {
1142 		/* we have reclaimed one or more tx buffers */
1143 		dp->tx_desc_head = desc_head;
1144 
1145 		/* If we passed the next interrupt position, update it */
1146 		if (desc_head - dp->tx_desc_intr > 0) {
1147 			dp->tx_desc_intr = desc_head;
1148 		}
1149 	}
1150 	mutex_exit(&dp->xmitlock);
1151 
1152 	/* free dma mapping resources associated with transmitted tx buffers */
1153 	tbp = GET_TXBUF(dp, head);
1154 	tail = sn;
1155 #if GEM_DEBUG_LEVEL > 2
1156 	if (head != tail) {
1157 		cmn_err(CE_CONT, "%s: freeing head:%d[%d], tail:%d[%d]",
1158 		    __func__,
1159 		    head, SLOT(head, dp->gc.gc_tx_buf_size),
1160 		    tail, SLOT(tail, dp->gc.gc_tx_buf_size));
1161 	}
1162 #endif
1163 	for (sn = head; sn != tail; sn++, tbp = tbp->txb_next) {
1164 		gem_txbuf_free_dma_resources(tbp);
1165 	}
1166 
1167 	/* recycle the tx buffers */
1168 	mutex_enter(&dp->xmitlock);
1169 	if (--dp->tx_reclaim_busy == 0) {
1170 		/* we are the last thread who can update free tail */
1171 #if GEM_DEBUG_LEVEL > 4
1172 		/* check all resouces have been deallocated */
1173 		sn = dp->tx_free_tail;
1174 		tbp = GET_TXBUF(dp, new_tail);
1175 		while (sn != dp->tx_active_head + dp->gc.gc_tx_buf_limit) {
1176 			if (tbp->txb_nfrags) {
1177 				/* in use */
1178 				break;
1179 			}
1180 			ASSERT(tbp->txb_mp == NULL);
1181 			tbp = tbp->txb_next;
1182 			sn++;
1183 		}
1184 		ASSERT(dp->tx_active_head + dp->gc.gc_tx_buf_limit == sn);
1185 #endif
1186 		dp->tx_free_tail =
1187 		    dp->tx_active_head + dp->gc.gc_tx_buf_limit;
1188 	}
1189 	if (!dp->mac_active) {
1190 		/* someone may be waiting for me. */
1191 		cv_broadcast(&dp->tx_drain_cv);
1192 	}
1193 #if GEM_DEBUG_LEVEL > 2
1194 	cmn_err(CE_CONT, "!%s: %s: called, "
1195 	    "free_head:%d free_tail:%d(+%d) added:%d",
1196 	    dp->name, __func__,
1197 	    dp->tx_free_head, dp->tx_free_tail,
1198 	    dp->tx_free_tail - dp->tx_free_head, tail - head);
1199 #endif
1200 	mutex_exit(&dp->xmitlock);
1201 
1202 	return (err);
1203 }
1204 #pragma inline(gem_reclaim_txbuf)
1205 
1206 
1207 /*
1208  * Make tx descriptors in out-of-order manner
1209  */
1210 static void
1211 gem_tx_load_descs_oo(struct gem_dev *dp,
1212 	seqnum_t start_slot, seqnum_t end_slot, uint64_t flags)
1213 {
1214 	seqnum_t	sn;
1215 	struct txbuf	*tbp;
1216 	int	tx_ring_size = dp->gc.gc_tx_ring_size;
1217 	int	(*tx_desc_write)
1218 	    (struct gem_dev *dp, int slot,
1219 	    ddi_dma_cookie_t *dmacookie,
1220 	    int frags, uint64_t flag) = dp->gc.gc_tx_desc_write;
1221 	clock_t	now = ddi_get_lbolt();
1222 
1223 	sn = start_slot;
1224 	tbp = GET_TXBUF(dp, sn);
1225 	do {
1226 #if GEM_DEBUG_LEVEL > 1
1227 		if (dp->tx_cnt < 100) {
1228 			dp->tx_cnt++;
1229 			flags |= GEM_TXFLAG_INTR;
1230 		}
1231 #endif
1232 		/* write a tx descriptor */
1233 		tbp->txb_desc = sn;
1234 		tbp->txb_ndescs = (*tx_desc_write)(dp,
1235 		    SLOT(sn, tx_ring_size),
1236 		    tbp->txb_dmacookie,
1237 		    tbp->txb_nfrags, flags | tbp->txb_flag);
1238 		tbp->txb_stime = now;
1239 		ASSERT(tbp->txb_ndescs == 1);
1240 
1241 		flags = 0;
1242 		sn++;
1243 		tbp = tbp->txb_next;
1244 	} while (sn != end_slot);
1245 }
1246 
1247 __INLINE__
1248 static size_t
1249 gem_setup_txbuf_copy(struct gem_dev *dp, mblk_t *mp, struct txbuf *tbp)
1250 {
1251 	size_t			min_pkt;
1252 	caddr_t			bp;
1253 	size_t			off;
1254 	mblk_t			*tp;
1255 	size_t			len;
1256 	uint64_t		flag;
1257 
1258 	ASSERT(tbp->txb_mp == NULL);
1259 
1260 	/* we use bounce buffer for the packet */
1261 	min_pkt = ETHERMIN;
1262 	bp = tbp->txb_buf;
1263 	off = 0;
1264 	tp = mp;
1265 
1266 	flag = tbp->txb_flag;
1267 	if (flag & GEM_TXFLAG_SWVTAG) {
1268 		/* need to increase min packet size */
1269 		min_pkt += VTAG_SIZE;
1270 		ASSERT((flag & GEM_TXFLAG_VTAG) == 0);
1271 	}
1272 
1273 	/* copy the rest */
1274 	for (; tp; tp = tp->b_cont) {
1275 		if ((len = (long)tp->b_wptr - (long)tp->b_rptr) > 0) {
1276 			bcopy(tp->b_rptr, &bp[off], len);
1277 			off += len;
1278 		}
1279 	}
1280 
1281 	if (off < min_pkt &&
1282 	    (min_pkt > ETHERMIN || !dp->gc.gc_tx_auto_pad)) {
1283 		/*
1284 		 * Extend the packet to minimum packet size explicitly.
1285 		 * For software vlan packets, we shouldn't use tx autopad
1286 		 * function because nics may not be aware of vlan.
1287 		 * we must keep 46 octet of payload even if we use vlan.
1288 		 */
1289 		bzero(&bp[off], min_pkt - off);
1290 		off = min_pkt;
1291 	}
1292 
1293 	(void) ddi_dma_sync(tbp->txb_bdh, (off_t)0, off, DDI_DMA_SYNC_FORDEV);
1294 
1295 	tbp->txb_dmacookie[0].dmac_laddress = tbp->txb_buf_dma;
1296 	tbp->txb_dmacookie[0].dmac_size = off;
1297 
1298 	DPRINTF(2, (CE_CONT,
1299 	    "!%s: %s: copy: addr:0x%llx len:0x%x, vtag:0x%04x, min_pkt:%d",
1300 	    dp->name, __func__,
1301 	    tbp->txb_dmacookie[0].dmac_laddress,
1302 	    tbp->txb_dmacookie[0].dmac_size,
1303 	    (flag & GEM_TXFLAG_VTAG) >> GEM_TXFLAG_VTAG_SHIFT,
1304 	    min_pkt));
1305 
1306 	/* save misc info */
1307 	tbp->txb_mp = mp;
1308 	tbp->txb_nfrags = 1;
1309 #ifdef DEBUG_MULTIFRAGS
1310 	if (dp->gc.gc_tx_max_frags >= 3 &&
1311 	    tbp->txb_dmacookie[0].dmac_size > 16*3) {
1312 		tbp->txb_dmacookie[1].dmac_laddress =
1313 		    tbp->txb_dmacookie[0].dmac_laddress + 16;
1314 		tbp->txb_dmacookie[2].dmac_laddress =
1315 		    tbp->txb_dmacookie[1].dmac_laddress + 16;
1316 
1317 		tbp->txb_dmacookie[2].dmac_size =
1318 		    tbp->txb_dmacookie[0].dmac_size - 16*2;
1319 		tbp->txb_dmacookie[1].dmac_size = 16;
1320 		tbp->txb_dmacookie[0].dmac_size = 16;
1321 		tbp->txb_nfrags  = 3;
1322 	}
1323 #endif
1324 	return (off);
1325 }
1326 #pragma inline(gem_setup_txbuf_copy)
1327 
1328 __INLINE__
1329 static void
1330 gem_tx_start_unit(struct gem_dev *dp)
1331 {
1332 	seqnum_t	head;
1333 	seqnum_t	tail;
1334 	struct txbuf	*tbp_head;
1335 	struct txbuf	*tbp_tail;
1336 
1337 	/* update HW descriptors from soft queue */
1338 	ASSERT(mutex_owned(&dp->xmitlock));
1339 	ASSERT(dp->tx_softq_head == dp->tx_active_tail);
1340 
1341 	head = dp->tx_softq_head;
1342 	tail = dp->tx_softq_tail;
1343 
1344 	DPRINTF(1, (CE_CONT,
1345 	    "%s: %s: called, softq %d %d[+%d], desc %d %d[+%d]",
1346 	    dp->name, __func__, head, tail, tail - head,
1347 	    dp->tx_desc_head, dp->tx_desc_tail,
1348 	    dp->tx_desc_tail - dp->tx_desc_head));
1349 
1350 	ASSERT(tail - head > 0);
1351 
1352 	dp->tx_desc_tail = tail;
1353 
1354 	tbp_head = GET_TXBUF(dp, head);
1355 	tbp_tail = GET_TXBUF(dp, tail - 1);
1356 
1357 	ASSERT(tbp_tail->txb_desc + tbp_tail->txb_ndescs == dp->tx_desc_tail);
1358 
1359 	dp->gc.gc_tx_start(dp,
1360 	    SLOT(tbp_head->txb_desc, dp->gc.gc_tx_ring_size),
1361 	    tbp_tail->txb_desc + tbp_tail->txb_ndescs - tbp_head->txb_desc);
1362 
1363 	/* advance softq head and active tail */
1364 	dp->tx_softq_head = dp->tx_active_tail = tail;
1365 }
1366 #pragma inline(gem_tx_start_unit)
1367 
1368 #ifdef GEM_DEBUG_LEVEL
1369 static int gem_send_cnt[10];
1370 #endif
1371 #define	PKT_MIN_SIZE	(sizeof (struct ether_header) + 10 + VTAG_SIZE)
1372 #define	EHLEN	(sizeof (struct ether_header))
1373 /*
1374  * check ether packet type and ip protocol
1375  */
1376 static uint64_t
1377 gem_txbuf_options(struct gem_dev *dp, mblk_t *mp, uint8_t *bp)
1378 {
1379 	mblk_t		*tp;
1380 	ssize_t		len;
1381 	uint_t		vtag;
1382 	int		off;
1383 	uint64_t	flag;
1384 
1385 	flag = 0ULL;
1386 
1387 	/*
1388 	 * prepare continuous header of the packet for protocol analysis
1389 	 */
1390 	if ((long)mp->b_wptr - (long)mp->b_rptr < PKT_MIN_SIZE) {
1391 		/* we use work buffer to copy mblk */
1392 		for (tp = mp, off = 0;
1393 		    tp && (off < PKT_MIN_SIZE);
1394 		    tp = tp->b_cont, off += len) {
1395 			len = (long)tp->b_wptr - (long)tp->b_rptr;
1396 			len = min(len, PKT_MIN_SIZE - off);
1397 			bcopy(tp->b_rptr, &bp[off], len);
1398 		}
1399 	} else {
1400 		/* we can use mblk without copy */
1401 		bp = mp->b_rptr;
1402 	}
1403 
1404 	/* process vlan tag for GLD v3 */
1405 	if (GET_NET16(&bp[VTAG_OFF]) == VTAG_TPID) {
1406 		if (dp->misc_flag & GEM_VLAN_HARD) {
1407 			vtag = GET_NET16(&bp[VTAG_OFF + 2]);
1408 			ASSERT(vtag);
1409 			flag |= vtag << GEM_TXFLAG_VTAG_SHIFT;
1410 		} else {
1411 			flag |= GEM_TXFLAG_SWVTAG;
1412 		}
1413 	}
1414 	return (flag);
1415 }
1416 #undef EHLEN
1417 #undef PKT_MIN_SIZE
1418 /*
1419  * gem_send_common is an exported function because hw depend routines may
1420  * use it for sending control frames like setup frames for 2114x chipset.
1421  */
1422 mblk_t *
1423 gem_send_common(struct gem_dev *dp, mblk_t *mp_head, uint32_t flags)
1424 {
1425 	int			nmblk;
1426 	int			avail;
1427 	mblk_t			*tp;
1428 	mblk_t			*mp;
1429 	int			i;
1430 	struct txbuf		*tbp;
1431 	seqnum_t		head;
1432 	uint64_t		load_flags;
1433 	uint64_t		len_total = 0;
1434 	uint32_t		bcast = 0;
1435 	uint32_t		mcast = 0;
1436 
1437 	ASSERT(mp_head != NULL);
1438 
1439 	mp = mp_head;
1440 	nmblk = 1;
1441 	while ((mp = mp->b_next) != NULL) {
1442 		nmblk++;
1443 	}
1444 #ifdef GEM_DEBUG_LEVEL
1445 	gem_send_cnt[0]++;
1446 	gem_send_cnt[min(nmblk, 9)]++;
1447 #endif
1448 	/*
1449 	 * Aquire resources
1450 	 */
1451 	mutex_enter(&dp->xmitlock);
1452 	if (dp->mac_suspended) {
1453 		mutex_exit(&dp->xmitlock);
1454 		mp = mp_head;
1455 		while (mp) {
1456 			tp = mp->b_next;
1457 			freemsg(mp);
1458 			mp = tp;
1459 		}
1460 		return (NULL);
1461 	}
1462 
1463 	if (!dp->mac_active && (flags & GEM_SEND_CTRL) == 0) {
1464 		/* don't send data packets while mac isn't active */
1465 		/* XXX - should we discard packets? */
1466 		mutex_exit(&dp->xmitlock);
1467 		return (mp_head);
1468 	}
1469 
1470 	/* allocate free slots */
1471 	head = dp->tx_free_head;
1472 	avail = dp->tx_free_tail - head;
1473 
1474 	DPRINTF(2, (CE_CONT,
1475 	    "!%s: %s: called, free_head:%d free_tail:%d(+%d) req:%d",
1476 	    dp->name, __func__,
1477 	    dp->tx_free_head, dp->tx_free_tail, avail, nmblk));
1478 
1479 	avail = min(avail, dp->tx_max_packets);
1480 
1481 	if (nmblk > avail) {
1482 		if (avail == 0) {
1483 			/* no resources; short cut */
1484 			DPRINTF(2, (CE_CONT, "!%s: no resources", __func__));
1485 			dp->tx_max_packets = max(dp->tx_max_packets - 1, 1);
1486 			goto done;
1487 		}
1488 		nmblk = avail;
1489 	}
1490 
1491 	dp->tx_free_head = head + nmblk;
1492 	load_flags = ((dp->tx_busy++) == 0) ? GEM_TXFLAG_HEAD : 0;
1493 
1494 	/* update last interrupt position if tx buffers exhaust.  */
1495 	if (nmblk == avail) {
1496 		tbp = GET_TXBUF(dp, head + avail - 1);
1497 		tbp->txb_flag = GEM_TXFLAG_INTR;
1498 		dp->tx_desc_intr = head + avail;
1499 	}
1500 	mutex_exit(&dp->xmitlock);
1501 
1502 	tbp = GET_TXBUF(dp, head);
1503 
1504 	for (i = nmblk; i > 0; i--, tbp = tbp->txb_next) {
1505 		uint8_t		*bp;
1506 		uint64_t	txflag;
1507 
1508 		/* remove one from the mblk list */
1509 		ASSERT(mp_head != NULL);
1510 		mp = mp_head;
1511 		mp_head = mp_head->b_next;
1512 		mp->b_next = NULL;
1513 
1514 		/* statistics for non-unicast packets */
1515 		bp = mp->b_rptr;
1516 		if ((bp[0] & 1) && (flags & GEM_SEND_CTRL) == 0) {
1517 			if (bcmp(bp, gem_etherbroadcastaddr.ether_addr_octet,
1518 			    ETHERADDRL) == 0) {
1519 				bcast++;
1520 			} else {
1521 				mcast++;
1522 			}
1523 		}
1524 
1525 		/* save misc info */
1526 		txflag = tbp->txb_flag;
1527 		txflag |= (flags & GEM_SEND_CTRL) << GEM_TXFLAG_PRIVATE_SHIFT;
1528 		txflag |= gem_txbuf_options(dp, mp, (uint8_t *)tbp->txb_buf);
1529 		tbp->txb_flag = txflag;
1530 
1531 		len_total += gem_setup_txbuf_copy(dp, mp, tbp);
1532 	}
1533 
1534 	(void) gem_tx_load_descs_oo(dp, head, head + nmblk, load_flags);
1535 
1536 	/* Append the tbp at the tail of the active tx buffer list */
1537 	mutex_enter(&dp->xmitlock);
1538 
1539 	if ((--dp->tx_busy) == 0) {
1540 		/* extend the tail of softq, as new packets have been ready. */
1541 		dp->tx_softq_tail = dp->tx_free_head;
1542 
1543 		if (!dp->mac_active && (flags & GEM_SEND_CTRL) == 0) {
1544 			/*
1545 			 * The device status has changed while we are
1546 			 * preparing tx buf.
1547 			 * As we are the last one that make tx non-busy.
1548 			 * wake up someone who may wait for us.
1549 			 */
1550 			cv_broadcast(&dp->tx_drain_cv);
1551 		} else {
1552 			ASSERT(dp->tx_softq_tail - dp->tx_softq_head > 0);
1553 			gem_tx_start_unit(dp);
1554 		}
1555 	}
1556 	dp->stats.obytes += len_total;
1557 	dp->stats.opackets += nmblk;
1558 	dp->stats.obcast += bcast;
1559 	dp->stats.omcast += mcast;
1560 done:
1561 	mutex_exit(&dp->xmitlock);
1562 
1563 	return (mp_head);
1564 }
1565 
1566 /* ========================================================== */
1567 /*
1568  * error detection and restart routines
1569  */
1570 /* ========================================================== */
1571 int
1572 gem_restart_nic(struct gem_dev *dp, uint_t flags)
1573 {
1574 	ASSERT(mutex_owned(&dp->intrlock));
1575 
1576 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
1577 #ifdef GEM_DEBUG_LEVEL
1578 #if GEM_DEBUG_LEVEL > 1
1579 	gem_dump_txbuf(dp, CE_CONT, "gem_restart_nic");
1580 #endif
1581 #endif
1582 
1583 	if (dp->mac_suspended) {
1584 		/* should we return GEM_FAILURE ? */
1585 		return (GEM_FAILURE);
1586 	}
1587 
1588 	/*
1589 	 * We should avoid calling any routines except xxx_chip_reset
1590 	 * when we are resuming the system.
1591 	 */
1592 	if (dp->mac_active) {
1593 		if (flags & GEM_RESTART_KEEP_BUF) {
1594 			/* stop rx gracefully */
1595 			dp->rxmode &= ~RXMODE_ENABLE;
1596 			(void) (*dp->gc.gc_set_rx_filter)(dp);
1597 		}
1598 		(void) gem_mac_stop(dp, flags);
1599 	}
1600 
1601 	/* reset the chip. */
1602 	if ((*dp->gc.gc_reset_chip)(dp) != GEM_SUCCESS) {
1603 		cmn_err(CE_WARN, "%s: %s: failed to reset chip",
1604 		    dp->name, __func__);
1605 		goto err;
1606 	}
1607 
1608 	if (gem_mac_init(dp) != GEM_SUCCESS) {
1609 		goto err;
1610 	}
1611 
1612 	/* setup media mode if the link have been up */
1613 	if (dp->mii_state == MII_STATE_LINKUP) {
1614 		if ((dp->gc.gc_set_media)(dp) != GEM_SUCCESS) {
1615 			goto err;
1616 		}
1617 	}
1618 
1619 	/* setup mac address and enable rx filter */
1620 	dp->rxmode |= RXMODE_ENABLE;
1621 	if ((*dp->gc.gc_set_rx_filter)(dp) != GEM_SUCCESS) {
1622 		goto err;
1623 	}
1624 
1625 	/*
1626 	 * XXX - a panic happened because of linkdown.
1627 	 * We must check mii_state here, because the link can be down just
1628 	 * before the restart event happen. If the link is down now,
1629 	 * gem_mac_start() will be called from gem_mii_link_check() when
1630 	 * the link become up later.
1631 	 */
1632 	if (dp->mii_state == MII_STATE_LINKUP) {
1633 		/* restart the nic */
1634 		ASSERT(!dp->mac_active);
1635 		(void) gem_mac_start(dp);
1636 	}
1637 	return (GEM_SUCCESS);
1638 err:
1639 	return (GEM_FAILURE);
1640 }
1641 
1642 
1643 static void
1644 gem_tx_timeout(struct gem_dev *dp)
1645 {
1646 	clock_t		now;
1647 	boolean_t	tx_sched;
1648 	struct txbuf	*tbp;
1649 
1650 	mutex_enter(&dp->intrlock);
1651 
1652 	tx_sched = B_FALSE;
1653 	now = ddi_get_lbolt();
1654 
1655 	mutex_enter(&dp->xmitlock);
1656 	if (!dp->mac_active || dp->mii_state != MII_STATE_LINKUP) {
1657 		mutex_exit(&dp->xmitlock);
1658 		goto schedule_next;
1659 	}
1660 	mutex_exit(&dp->xmitlock);
1661 
1662 	/* reclaim transmitted buffers to check the trasmitter hangs or not. */
1663 	if (gem_reclaim_txbuf(dp) != GEM_SUCCESS) {
1664 		/* tx error happened, reset transmitter in the chip */
1665 		(void) gem_restart_nic(dp, 0);
1666 		tx_sched = B_TRUE;
1667 		dp->tx_blocked = (clock_t)0;
1668 
1669 		goto schedule_next;
1670 	}
1671 
1672 	mutex_enter(&dp->xmitlock);
1673 	/* check if the transmitter thread is stuck */
1674 	if (dp->tx_active_head == dp->tx_active_tail) {
1675 		/* no tx buffer is loaded to the nic */
1676 		if (dp->tx_blocked &&
1677 		    now - dp->tx_blocked > dp->gc.gc_tx_timeout_interval) {
1678 			gem_dump_txbuf(dp, CE_WARN,
1679 			    "gem_tx_timeout: tx blocked");
1680 			tx_sched = B_TRUE;
1681 			dp->tx_blocked = (clock_t)0;
1682 		}
1683 		mutex_exit(&dp->xmitlock);
1684 		goto schedule_next;
1685 	}
1686 
1687 	tbp = GET_TXBUF(dp, dp->tx_active_head);
1688 	if (now - tbp->txb_stime < dp->gc.gc_tx_timeout) {
1689 		mutex_exit(&dp->xmitlock);
1690 		goto schedule_next;
1691 	}
1692 	mutex_exit(&dp->xmitlock);
1693 
1694 	gem_dump_txbuf(dp, CE_WARN, "gem_tx_timeout: tx timeout");
1695 
1696 	/* discard untransmitted packet and restart tx.  */
1697 	(void) gem_restart_nic(dp, GEM_RESTART_NOWAIT);
1698 	tx_sched = B_TRUE;
1699 	dp->tx_blocked = (clock_t)0;
1700 
1701 schedule_next:
1702 	mutex_exit(&dp->intrlock);
1703 
1704 	/* restart the downstream if needed */
1705 	if (tx_sched) {
1706 		mac_tx_update(dp->mh);
1707 	}
1708 
1709 	DPRINTF(4, (CE_CONT,
1710 	    "!%s: blocked:%d active_head:%d active_tail:%d desc_intr:%d",
1711 	    dp->name, BOOLEAN(dp->tx_blocked),
1712 	    dp->tx_active_head, dp->tx_active_tail, dp->tx_desc_intr));
1713 	dp->timeout_id =
1714 	    timeout((void (*)(void *))gem_tx_timeout,
1715 	    (void *)dp, dp->gc.gc_tx_timeout_interval);
1716 }
1717 
1718 /* ================================================================== */
1719 /*
1720  * Interrupt handler
1721  */
1722 /* ================================================================== */
1723 __INLINE__
1724 static void
1725 gem_append_rxbuf(struct gem_dev *dp, struct rxbuf *rbp_head)
1726 {
1727 	struct rxbuf	*rbp;
1728 	seqnum_t	tail;
1729 	int		rx_ring_size = dp->gc.gc_rx_ring_size;
1730 
1731 	ASSERT(rbp_head != NULL);
1732 	ASSERT(mutex_owned(&dp->intrlock));
1733 
1734 	DPRINTF(3, (CE_CONT, "!%s: %s: slot_head:%d, slot_tail:%d",
1735 	    dp->name, __func__, dp->rx_active_head, dp->rx_active_tail));
1736 
1737 	/*
1738 	 * Add new buffers into active rx buffer list
1739 	 */
1740 	if (dp->rx_buf_head == NULL) {
1741 		dp->rx_buf_head = rbp_head;
1742 		ASSERT(dp->rx_buf_tail == NULL);
1743 	} else {
1744 		dp->rx_buf_tail->rxb_next = rbp_head;
1745 	}
1746 
1747 	tail = dp->rx_active_tail;
1748 	for (rbp = rbp_head; rbp; rbp = rbp->rxb_next) {
1749 		/* need to notify the tail for the lower layer */
1750 		dp->rx_buf_tail = rbp;
1751 
1752 		dp->gc.gc_rx_desc_write(dp,
1753 		    SLOT(tail, rx_ring_size),
1754 		    rbp->rxb_dmacookie,
1755 		    rbp->rxb_nfrags);
1756 
1757 		dp->rx_active_tail = tail = tail + 1;
1758 	}
1759 }
1760 #pragma inline(gem_append_rxbuf)
1761 
1762 mblk_t *
1763 gem_get_packet_default(struct gem_dev *dp, struct rxbuf *rbp, size_t len)
1764 {
1765 	int		rx_header_len = dp->gc.gc_rx_header_len;
1766 	uint8_t		*bp;
1767 	mblk_t		*mp;
1768 
1769 	/* allocate a new mblk */
1770 	if (mp = allocb(len + VTAG_SIZE, BPRI_MED)) {
1771 		ASSERT(mp->b_next == NULL);
1772 		ASSERT(mp->b_cont == NULL);
1773 
1774 		mp->b_rptr += VTAG_SIZE;
1775 		bp = mp->b_rptr;
1776 		mp->b_wptr = bp + len;
1777 
1778 		/*
1779 		 * flush the range of the entire buffer to invalidate
1780 		 * all of corresponding dirty entries in iocache.
1781 		 */
1782 		(void) ddi_dma_sync(rbp->rxb_dh, rx_header_len,
1783 		    0, DDI_DMA_SYNC_FORKERNEL);
1784 
1785 		bcopy(rbp->rxb_buf + rx_header_len, bp, len);
1786 	}
1787 	return (mp);
1788 }
1789 
1790 #ifdef GEM_DEBUG_LEVEL
1791 uint_t	gem_rx_pkts[17];
1792 #endif
1793 
1794 
1795 int
1796 gem_receive(struct gem_dev *dp)
1797 {
1798 	uint64_t	len_total = 0;
1799 	struct rxbuf	*rbp;
1800 	mblk_t		*mp;
1801 	int		cnt = 0;
1802 	uint64_t	rxstat;
1803 	struct rxbuf	*newbufs;
1804 	struct rxbuf	**newbufs_tailp;
1805 	mblk_t		*rx_head;
1806 	mblk_t 		**rx_tailp;
1807 	int		rx_ring_size = dp->gc.gc_rx_ring_size;
1808 	seqnum_t	active_head;
1809 	uint64_t	(*rx_desc_stat)(struct gem_dev *dp,
1810 	    int slot, int ndesc);
1811 	int		ethermin = ETHERMIN;
1812 	int		ethermax = dp->mtu + sizeof (struct ether_header);
1813 	int		rx_header_len = dp->gc.gc_rx_header_len;
1814 
1815 	ASSERT(mutex_owned(&dp->intrlock));
1816 
1817 	DPRINTF(3, (CE_CONT, "!%s: gem_receive: rx_buf_head:%p",
1818 	    dp->name, dp->rx_buf_head));
1819 
1820 	rx_desc_stat  = dp->gc.gc_rx_desc_stat;
1821 	newbufs_tailp = &newbufs;
1822 	rx_tailp = &rx_head;
1823 	for (active_head = dp->rx_active_head;
1824 	    (rbp = dp->rx_buf_head) != NULL; active_head++) {
1825 		int		len;
1826 		if (cnt == 0) {
1827 			cnt = max(dp->poll_pkt_delay*2, 10);
1828 			cnt = min(cnt,
1829 			    dp->rx_active_tail - active_head);
1830 			gem_rx_desc_dma_sync(dp,
1831 			    SLOT(active_head, rx_ring_size),
1832 			    cnt,
1833 			    DDI_DMA_SYNC_FORKERNEL);
1834 		}
1835 
1836 		if (rx_header_len > 0) {
1837 			(void) ddi_dma_sync(rbp->rxb_dh, 0,
1838 			    rx_header_len, DDI_DMA_SYNC_FORKERNEL);
1839 		}
1840 
1841 		if (((rxstat = (*rx_desc_stat)(dp,
1842 		    SLOT(active_head, rx_ring_size),
1843 		    rbp->rxb_nfrags))
1844 		    & (GEM_RX_DONE | GEM_RX_ERR)) == 0) {
1845 			/* not received yet */
1846 			break;
1847 		}
1848 
1849 		/* Remove the head of the rx buffer list */
1850 		dp->rx_buf_head = rbp->rxb_next;
1851 		cnt--;
1852 
1853 
1854 		if (rxstat & GEM_RX_ERR) {
1855 			goto next;
1856 		}
1857 
1858 		len = rxstat & GEM_RX_LEN;
1859 		DPRINTF(3, (CE_CONT, "!%s: %s: rxstat:0x%llx, len:0x%x",
1860 		    dp->name, __func__, rxstat, len));
1861 
1862 		/*
1863 		 * Copy the packet
1864 		 */
1865 		if ((mp = dp->gc.gc_get_packet(dp, rbp, len)) == NULL) {
1866 			/* no memory, discard the packet */
1867 			dp->stats.norcvbuf++;
1868 			goto next;
1869 		}
1870 
1871 		/*
1872 		 * Process VLAN tag
1873 		 */
1874 		ethermin = ETHERMIN;
1875 		ethermax = dp->mtu + sizeof (struct ether_header);
1876 		if (GET_NET16(mp->b_rptr + VTAG_OFF) == VTAG_TPID) {
1877 			ethermax += VTAG_SIZE;
1878 		}
1879 
1880 		/* check packet size */
1881 		if (len < ethermin) {
1882 			dp->stats.errrcv++;
1883 			dp->stats.runt++;
1884 			freemsg(mp);
1885 			goto next;
1886 		}
1887 
1888 		if (len > ethermax) {
1889 			dp->stats.errrcv++;
1890 			dp->stats.frame_too_long++;
1891 			freemsg(mp);
1892 			goto next;
1893 		}
1894 
1895 		len_total += len;
1896 
1897 #ifdef GEM_DEBUG_VLAN
1898 		if (GET_ETHERTYPE(mp->b_rptr) == VTAG_TPID) {
1899 			gem_dump_packet(dp, (char *)__func__, mp, B_TRUE);
1900 		}
1901 #endif
1902 		/* append received packet to temporaly rx buffer list */
1903 		*rx_tailp = mp;
1904 		rx_tailp  = &mp->b_next;
1905 
1906 		if (mp->b_rptr[0] & 1) {
1907 			if (bcmp(mp->b_rptr,
1908 			    gem_etherbroadcastaddr.ether_addr_octet,
1909 			    ETHERADDRL) == 0) {
1910 				dp->stats.rbcast++;
1911 			} else {
1912 				dp->stats.rmcast++;
1913 			}
1914 		}
1915 next:
1916 		ASSERT(rbp != NULL);
1917 
1918 		/* append new one to temporal new buffer list */
1919 		*newbufs_tailp = rbp;
1920 		newbufs_tailp  = &rbp->rxb_next;
1921 	}
1922 
1923 	/* advance rx_active_head */
1924 	if ((cnt = active_head - dp->rx_active_head) > 0) {
1925 		dp->stats.rbytes += len_total;
1926 		dp->stats.rpackets += cnt;
1927 	}
1928 	dp->rx_active_head = active_head;
1929 
1930 	/* terminate the working list */
1931 	*newbufs_tailp = NULL;
1932 	*rx_tailp = NULL;
1933 
1934 	if (dp->rx_buf_head == NULL) {
1935 		dp->rx_buf_tail = NULL;
1936 	}
1937 
1938 	DPRINTF(4, (CE_CONT, "%s: %s: cnt:%d, rx_head:%p",
1939 	    dp->name, __func__, cnt, rx_head));
1940 
1941 	if (newbufs) {
1942 		/*
1943 		 * fillfull rx list with new buffers
1944 		 */
1945 		seqnum_t	head;
1946 
1947 		/* save current tail */
1948 		head = dp->rx_active_tail;
1949 		gem_append_rxbuf(dp, newbufs);
1950 
1951 		/* call hw depend start routine if we have. */
1952 		dp->gc.gc_rx_start(dp,
1953 		    SLOT(head, rx_ring_size), dp->rx_active_tail - head);
1954 	}
1955 
1956 	if (rx_head) {
1957 		/*
1958 		 * send up received packets
1959 		 */
1960 		mutex_exit(&dp->intrlock);
1961 		mac_rx(dp->mh, dp->mac_rx_ring_ha, rx_head);
1962 		mutex_enter(&dp->intrlock);
1963 	}
1964 
1965 #ifdef GEM_DEBUG_LEVEL
1966 	gem_rx_pkts[min(cnt, sizeof (gem_rx_pkts)/sizeof (uint_t)-1)]++;
1967 #endif
1968 	return (cnt);
1969 }
1970 
1971 boolean_t
1972 gem_tx_done(struct gem_dev *dp)
1973 {
1974 	boolean_t	tx_sched = B_FALSE;
1975 
1976 	if (gem_reclaim_txbuf(dp) != GEM_SUCCESS) {
1977 		(void) gem_restart_nic(dp, GEM_RESTART_KEEP_BUF);
1978 		DPRINTF(2, (CE_CONT, "!%s: gem_tx_done: tx_desc: %d %d",
1979 		    dp->name, dp->tx_active_head, dp->tx_active_tail));
1980 		tx_sched = B_TRUE;
1981 		goto x;
1982 	}
1983 
1984 	mutex_enter(&dp->xmitlock);
1985 
1986 	/* XXX - we must not have any packets in soft queue */
1987 	ASSERT(dp->tx_softq_head == dp->tx_softq_tail);
1988 	/*
1989 	 * If we won't have chance to get more free tx buffers, and blocked,
1990 	 * it is worth to reschedule the downstream i.e. tx side.
1991 	 */
1992 	ASSERT(dp->tx_desc_intr - dp->tx_desc_head >= 0);
1993 	if (dp->tx_blocked && dp->tx_desc_intr == dp->tx_desc_head) {
1994 		/*
1995 		 * As no further tx-done interrupts are scheduled, this
1996 		 * is the last chance to kick tx side, which may be
1997 		 * blocked now, otherwise the tx side never works again.
1998 		 */
1999 		tx_sched = B_TRUE;
2000 		dp->tx_blocked = (clock_t)0;
2001 		dp->tx_max_packets =
2002 		    min(dp->tx_max_packets + 2, dp->gc.gc_tx_buf_limit);
2003 	}
2004 
2005 	mutex_exit(&dp->xmitlock);
2006 
2007 	DPRINTF(3, (CE_CONT, "!%s: %s: ret: blocked:%d",
2008 	    dp->name, __func__, BOOLEAN(dp->tx_blocked)));
2009 x:
2010 	return (tx_sched);
2011 }
2012 
2013 static uint_t
2014 gem_intr(struct gem_dev	*dp)
2015 {
2016 	uint_t		ret;
2017 
2018 	mutex_enter(&dp->intrlock);
2019 	if (dp->mac_suspended) {
2020 		mutex_exit(&dp->intrlock);
2021 		return (DDI_INTR_UNCLAIMED);
2022 	}
2023 	dp->intr_busy = B_TRUE;
2024 
2025 	ret = (*dp->gc.gc_interrupt)(dp);
2026 
2027 	if (ret == DDI_INTR_UNCLAIMED) {
2028 		dp->intr_busy = B_FALSE;
2029 		mutex_exit(&dp->intrlock);
2030 		return (ret);
2031 	}
2032 
2033 	if (!dp->mac_active) {
2034 		cv_broadcast(&dp->tx_drain_cv);
2035 	}
2036 
2037 
2038 	dp->stats.intr++;
2039 	dp->intr_busy = B_FALSE;
2040 
2041 	mutex_exit(&dp->intrlock);
2042 
2043 	if (ret & INTR_RESTART_TX) {
2044 		DPRINTF(4, (CE_CONT, "!%s: calling mac_tx_update", dp->name));
2045 		mac_tx_update(dp->mh);
2046 		ret &= ~INTR_RESTART_TX;
2047 	}
2048 	return (ret);
2049 }
2050 
2051 static void
2052 gem_intr_watcher(struct gem_dev *dp)
2053 {
2054 	(void) gem_intr(dp);
2055 
2056 	/* schedule next call of tu_intr_watcher */
2057 	dp->intr_watcher_id =
2058 	    timeout((void (*)(void *))gem_intr_watcher, (void *)dp, 1);
2059 }
2060 
2061 /* ======================================================================== */
2062 /*
2063  * MII support routines
2064  */
2065 /* ======================================================================== */
2066 static void
2067 gem_choose_forcedmode(struct gem_dev *dp)
2068 {
2069 	/* choose media mode */
2070 	if (dp->anadv_1000fdx || dp->anadv_1000hdx) {
2071 		dp->speed = GEM_SPD_1000;
2072 		dp->full_duplex = dp->anadv_1000fdx;
2073 	} else if (dp->anadv_100fdx || dp->anadv_100t4) {
2074 		dp->speed = GEM_SPD_100;
2075 		dp->full_duplex = B_TRUE;
2076 	} else if (dp->anadv_100hdx) {
2077 		dp->speed = GEM_SPD_100;
2078 		dp->full_duplex = B_FALSE;
2079 	} else {
2080 		dp->speed = GEM_SPD_10;
2081 		dp->full_duplex = dp->anadv_10fdx;
2082 	}
2083 }
2084 
2085 uint16_t
2086 gem_mii_read(struct gem_dev *dp, uint_t reg)
2087 {
2088 	if ((dp->mii_status & MII_STATUS_MFPRMBLSUPR) == 0) {
2089 		(*dp->gc.gc_mii_sync)(dp);
2090 	}
2091 	return ((*dp->gc.gc_mii_read)(dp, reg));
2092 }
2093 
2094 void
2095 gem_mii_write(struct gem_dev *dp, uint_t reg, uint16_t val)
2096 {
2097 	if ((dp->mii_status & MII_STATUS_MFPRMBLSUPR) == 0) {
2098 		(*dp->gc.gc_mii_sync)(dp);
2099 	}
2100 	(*dp->gc.gc_mii_write)(dp, reg, val);
2101 }
2102 
2103 #define	fc_cap_decode(x)	\
2104 	((((x) & MII_ABILITY_PAUSE) ? 1 : 0) |	\
2105 	(((x) & MII_ABILITY_ASM_DIR) ? 2 : 0))
2106 
2107 int
2108 gem_mii_config_default(struct gem_dev *dp)
2109 {
2110 	uint16_t	mii_stat;
2111 	uint16_t	val;
2112 	static uint16_t fc_cap_encode[4] = {
2113 		/* none */		0,
2114 		/* symmetric */		MII_ABILITY_PAUSE,
2115 		/* tx */		MII_ABILITY_ASM_DIR,
2116 		/* rx-symmetric */	MII_ABILITY_PAUSE | MII_ABILITY_ASM_DIR,
2117 	};
2118 
2119 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
2120 
2121 	/*
2122 	 * Configure bits in advertisement register
2123 	 */
2124 	mii_stat = dp->mii_status;
2125 
2126 	DPRINTF(1, (CE_CONT, "!%s: %s: MII_STATUS reg:%b",
2127 	    dp->name, __func__, mii_stat, MII_STATUS_BITS));
2128 
2129 	if ((mii_stat & MII_STATUS_ABILITY_TECH) == 0) {
2130 		/* it's funny */
2131 		cmn_err(CE_WARN, "!%s: wrong ability bits: mii_status:%b",
2132 		    dp->name, mii_stat, MII_STATUS_BITS);
2133 		return (GEM_FAILURE);
2134 	}
2135 
2136 	/* Do not change the rest of the ability bits in the advert reg */
2137 	val = gem_mii_read(dp, MII_AN_ADVERT) & ~MII_ABILITY_ALL;
2138 
2139 	DPRINTF(0, (CE_CONT,
2140 	    "!%s: %s: 100T4:%d 100F:%d 100H:%d 10F:%d 10H:%d",
2141 	    dp->name, __func__,
2142 	    dp->anadv_100t4, dp->anadv_100fdx, dp->anadv_100hdx,
2143 	    dp->anadv_10fdx, dp->anadv_10hdx));
2144 
2145 	if (dp->anadv_100t4) {
2146 		val |= MII_ABILITY_100BASE_T4;
2147 	}
2148 	if (dp->anadv_100fdx) {
2149 		val |= MII_ABILITY_100BASE_TX_FD;
2150 	}
2151 	if (dp->anadv_100hdx) {
2152 		val |= MII_ABILITY_100BASE_TX;
2153 	}
2154 	if (dp->anadv_10fdx) {
2155 		val |= MII_ABILITY_10BASE_T_FD;
2156 	}
2157 	if (dp->anadv_10hdx) {
2158 		val |= MII_ABILITY_10BASE_T;
2159 	}
2160 
2161 	/* set flow control capability */
2162 	val |= fc_cap_encode[dp->anadv_flow_control];
2163 
2164 	DPRINTF(0, (CE_CONT,
2165 	    "!%s: %s: setting MII_AN_ADVERT reg:%b, mii_mode:%d, fc:%d",
2166 	    dp->name, __func__, val, MII_ABILITY_BITS, dp->gc.gc_mii_mode,
2167 	    dp->anadv_flow_control));
2168 
2169 	gem_mii_write(dp, MII_AN_ADVERT, val);
2170 
2171 	if (mii_stat & MII_STATUS_XSTATUS) {
2172 		/*
2173 		 * 1000Base-T GMII support
2174 		 */
2175 		if (!dp->anadv_autoneg) {
2176 			/* enable manual configuration */
2177 			val = MII_1000TC_CFG_EN;
2178 		} else {
2179 			val = 0;
2180 			if (dp->anadv_1000fdx) {
2181 				val |= MII_1000TC_ADV_FULL;
2182 			}
2183 			if (dp->anadv_1000hdx) {
2184 				val |= MII_1000TC_ADV_HALF;
2185 			}
2186 		}
2187 		DPRINTF(0, (CE_CONT,
2188 		    "!%s: %s: setting MII_1000TC reg:%b",
2189 		    dp->name, __func__, val, MII_1000TC_BITS));
2190 
2191 		gem_mii_write(dp, MII_1000TC, val);
2192 	}
2193 
2194 	return (GEM_SUCCESS);
2195 }
2196 
2197 #define	GEM_LINKUP(dp)		mac_link_update((dp)->mh, LINK_STATE_UP)
2198 #define	GEM_LINKDOWN(dp)	mac_link_update((dp)->mh, LINK_STATE_DOWN)
2199 
2200 static uint8_t gem_fc_result[4 /* my cap */ ][4 /* lp cap */] = {
2201 /*	 none	symm	tx	rx/symm */
2202 /* none */
2203 	{FLOW_CONTROL_NONE,
2204 		FLOW_CONTROL_NONE,
2205 			FLOW_CONTROL_NONE,
2206 				FLOW_CONTROL_NONE},
2207 /* sym */
2208 	{FLOW_CONTROL_NONE,
2209 		FLOW_CONTROL_SYMMETRIC,
2210 			FLOW_CONTROL_NONE,
2211 				FLOW_CONTROL_SYMMETRIC},
2212 /* tx */
2213 	{FLOW_CONTROL_NONE,
2214 		FLOW_CONTROL_NONE,
2215 			FLOW_CONTROL_NONE,
2216 				FLOW_CONTROL_TX_PAUSE},
2217 /* rx/symm */
2218 	{FLOW_CONTROL_NONE,
2219 		FLOW_CONTROL_SYMMETRIC,
2220 			FLOW_CONTROL_RX_PAUSE,
2221 				FLOW_CONTROL_SYMMETRIC},
2222 };
2223 
2224 static char *gem_fc_type[] = {
2225 	"without",
2226 	"with symmetric",
2227 	"with tx",
2228 	"with rx",
2229 };
2230 
2231 boolean_t
2232 gem_mii_link_check(struct gem_dev *dp)
2233 {
2234 	uint16_t	old_mii_state;
2235 	boolean_t	tx_sched = B_FALSE;
2236 	uint16_t	status;
2237 	uint16_t	advert;
2238 	uint16_t	lpable;
2239 	uint16_t	exp;
2240 	uint16_t	ctl1000;
2241 	uint16_t	stat1000;
2242 	uint16_t	val;
2243 	clock_t		now;
2244 	clock_t		diff;
2245 	int		linkdown_action;
2246 	boolean_t	fix_phy = B_FALSE;
2247 
2248 	now = ddi_get_lbolt();
2249 	old_mii_state = dp->mii_state;
2250 
2251 	DPRINTF(3, (CE_CONT, "!%s: %s: time:%d state:%d",
2252 	    dp->name, __func__, now, dp->mii_state));
2253 
2254 	diff = now - dp->mii_last_check;
2255 	dp->mii_last_check = now;
2256 
2257 	/*
2258 	 * For NWAM, don't show linkdown state right
2259 	 * after the system boots
2260 	 */
2261 	if (dp->linkup_delay > 0) {
2262 		if (dp->linkup_delay > diff) {
2263 			dp->linkup_delay -= diff;
2264 		} else {
2265 			/* link up timeout */
2266 			dp->linkup_delay = -1;
2267 		}
2268 	}
2269 
2270 next_nowait:
2271 	switch (dp->mii_state) {
2272 	case MII_STATE_UNKNOWN:
2273 		/* power-up, DP83840 requires 32 sync bits */
2274 		(*dp->gc.gc_mii_sync)(dp);
2275 		goto reset_phy;
2276 
2277 	case MII_STATE_RESETTING:
2278 		dp->mii_timer -= diff;
2279 		if (dp->mii_timer > 0) {
2280 			/* don't read phy registers in resetting */
2281 			dp->mii_interval = WATCH_INTERVAL_FAST;
2282 			goto next;
2283 		}
2284 
2285 		/* Timer expired, ensure reset bit is not set */
2286 
2287 		if (dp->mii_status & MII_STATUS_MFPRMBLSUPR) {
2288 			/* some phys need sync bits after reset */
2289 			(*dp->gc.gc_mii_sync)(dp);
2290 		}
2291 		val = gem_mii_read(dp, MII_CONTROL);
2292 		if (val & MII_CONTROL_RESET) {
2293 			cmn_err(CE_NOTE,
2294 			    "!%s: time:%ld resetting phy not complete."
2295 			    " mii_control:0x%b",
2296 			    dp->name, ddi_get_lbolt(),
2297 			    val, MII_CONTROL_BITS);
2298 		}
2299 
2300 		/* ensure neither isolated nor pwrdown nor auto-nego mode */
2301 		/* XXX -- this operation is required for NS DP83840A. */
2302 		gem_mii_write(dp, MII_CONTROL, 0);
2303 
2304 		/* As resetting PHY has completed, configure PHY registers */
2305 		if ((*dp->gc.gc_mii_config)(dp) != GEM_SUCCESS) {
2306 			/* we failed to configure PHY. */
2307 			goto reset_phy;
2308 		}
2309 
2310 		/* mii_config may disable autonegatiation */
2311 		gem_choose_forcedmode(dp);
2312 
2313 		dp->mii_lpable = 0;
2314 		dp->mii_advert = 0;
2315 		dp->mii_exp = 0;
2316 		dp->mii_ctl1000 = 0;
2317 		dp->mii_stat1000 = 0;
2318 		dp->flow_control = FLOW_CONTROL_NONE;
2319 
2320 		if (!dp->anadv_autoneg) {
2321 			/* skip auto-negotiation phase */
2322 			dp->mii_state = MII_STATE_MEDIA_SETUP;
2323 			dp->mii_timer = 0;
2324 			dp->mii_interval = 0;
2325 			goto next_nowait;
2326 		}
2327 
2328 		/* Issue auto-negotiation command */
2329 		goto autonego;
2330 
2331 	case MII_STATE_AUTONEGOTIATING:
2332 		/*
2333 		 * Autonegotiation is in progress
2334 		 */
2335 		dp->mii_timer -= diff;
2336 		if (dp->mii_timer -
2337 		    (dp->gc.gc_mii_an_timeout
2338 		    - dp->gc.gc_mii_an_wait) > 0) {
2339 			/*
2340 			 * wait for a while, typically autonegotiation
2341 			 * completes in 2.3 - 2.5 sec.
2342 			 */
2343 			dp->mii_interval = WATCH_INTERVAL_FAST;
2344 			goto next;
2345 		}
2346 
2347 		/* read PHY status */
2348 		status = gem_mii_read(dp, MII_STATUS);
2349 		DPRINTF(4, (CE_CONT,
2350 		    "!%s: %s: called: mii_state:%d MII_STATUS reg:%b",
2351 		    dp->name, __func__, dp->mii_state,
2352 		    status, MII_STATUS_BITS));
2353 
2354 		if (status & MII_STATUS_REMFAULT) {
2355 			/*
2356 			 * The link parnert told me something wrong happend.
2357 			 * What do we do ?
2358 			 */
2359 			cmn_err(CE_CONT,
2360 			    "!%s: auto-negotiation failed: remote fault",
2361 			    dp->name);
2362 			goto autonego;
2363 		}
2364 
2365 		if ((status & MII_STATUS_ANDONE) == 0) {
2366 			if (dp->mii_timer <= 0) {
2367 				/*
2368 				 * Auto-negotiation was timed out,
2369 				 * try again w/o resetting phy.
2370 				 */
2371 				if (!dp->mii_supress_msg) {
2372 					cmn_err(CE_WARN,
2373 				    "!%s: auto-negotiation failed: timeout",
2374 					    dp->name);
2375 					dp->mii_supress_msg = B_TRUE;
2376 				}
2377 				goto autonego;
2378 			}
2379 			/*
2380 			 * Auto-negotiation is in progress. Wait.
2381 			 */
2382 			dp->mii_interval = dp->gc.gc_mii_an_watch_interval;
2383 			goto next;
2384 		}
2385 
2386 		/*
2387 		 * Auto-negotiation have completed.
2388 		 * Assume linkdown and fall through.
2389 		 */
2390 		dp->mii_supress_msg = B_FALSE;
2391 		dp->mii_state = MII_STATE_AN_DONE;
2392 		DPRINTF(0, (CE_CONT,
2393 		    "!%s: auto-negotiation completed, MII_STATUS:%b",
2394 		    dp->name, status, MII_STATUS_BITS));
2395 
2396 		if (dp->gc.gc_mii_an_delay > 0) {
2397 			dp->mii_timer = dp->gc.gc_mii_an_delay;
2398 			dp->mii_interval = drv_usectohz(20*1000);
2399 			goto next;
2400 		}
2401 
2402 		dp->mii_timer = 0;
2403 		diff = 0;
2404 		goto next_nowait;
2405 
2406 	case MII_STATE_AN_DONE:
2407 		/*
2408 		 * Auto-negotiation have done. Now we can set up media.
2409 		 */
2410 		dp->mii_timer -= diff;
2411 		if (dp->mii_timer > 0) {
2412 			/* wait for a while */
2413 			dp->mii_interval = WATCH_INTERVAL_FAST;
2414 			goto next;
2415 		}
2416 
2417 		/*
2418 		 * set up the result of auto negotiation
2419 		 */
2420 
2421 		/*
2422 		 * Read registers required to determin current
2423 		 * duplex mode and media speed.
2424 		 */
2425 		if (dp->gc.gc_mii_an_delay > 0) {
2426 			/*
2427 			 * As the link watcher context has been suspended,
2428 			 * 'status' is invalid. We must status register here
2429 			 */
2430 			status = gem_mii_read(dp, MII_STATUS);
2431 		}
2432 		advert = gem_mii_read(dp, MII_AN_ADVERT);
2433 		lpable = gem_mii_read(dp, MII_AN_LPABLE);
2434 		exp = gem_mii_read(dp, MII_AN_EXPANSION);
2435 		if (exp == 0xffff) {
2436 			/* some phys don't have exp register */
2437 			exp = 0;
2438 		}
2439 		ctl1000  = 0;
2440 		stat1000 = 0;
2441 		if (dp->mii_status & MII_STATUS_XSTATUS) {
2442 			ctl1000  = gem_mii_read(dp, MII_1000TC);
2443 			stat1000 = gem_mii_read(dp, MII_1000TS);
2444 		}
2445 		dp->mii_lpable = lpable;
2446 		dp->mii_advert = advert;
2447 		dp->mii_exp = exp;
2448 		dp->mii_ctl1000  = ctl1000;
2449 		dp->mii_stat1000 = stat1000;
2450 
2451 		cmn_err(CE_CONT,
2452 		"!%s: auto-negotiation done, advert:%b, lpable:%b, exp:%b",
2453 		    dp->name,
2454 		    advert, MII_ABILITY_BITS,
2455 		    lpable, MII_ABILITY_BITS,
2456 		    exp, MII_AN_EXP_BITS);
2457 
2458 		if (dp->mii_status & MII_STATUS_XSTATUS) {
2459 			cmn_err(CE_CONT,
2460 			    "! MII_1000TC:%b, MII_1000TS:%b",
2461 			    ctl1000, MII_1000TC_BITS,
2462 			    stat1000, MII_1000TS_BITS);
2463 		}
2464 
2465 		if (gem_population(lpable) <= 1 &&
2466 		    (exp & MII_AN_EXP_LPCANAN) == 0) {
2467 			if ((advert & MII_ABILITY_TECH) != lpable) {
2468 				cmn_err(CE_WARN,
2469 				    "!%s: but the link partnar doesn't seem"
2470 				    " to have auto-negotiation capability."
2471 				    " please check the link configuration.",
2472 				    dp->name);
2473 			}
2474 			/*
2475 			 * it should be result of pararell detection, which
2476 			 * cannot detect duplex mode.
2477 			 */
2478 			if (lpable & MII_ABILITY_100BASE_TX) {
2479 				/*
2480 				 * we prefer full duplex mode for 100Mbps
2481 				 * connection, if we can.
2482 				 */
2483 				lpable |= advert & MII_ABILITY_100BASE_TX_FD;
2484 			}
2485 
2486 			if ((advert & lpable) == 0 &&
2487 			    lpable & MII_ABILITY_10BASE_T) {
2488 				lpable |= advert & MII_ABILITY_10BASE_T_FD;
2489 			}
2490 			/*
2491 			 * as the link partnar isn't auto-negotiatable, use
2492 			 * fixed mode temporally.
2493 			 */
2494 			fix_phy = B_TRUE;
2495 		} else if (lpable == 0) {
2496 			cmn_err(CE_WARN, "!%s: wrong lpable.", dp->name);
2497 			goto reset_phy;
2498 		}
2499 		/*
2500 		 * configure current link mode according to AN priority.
2501 		 */
2502 		val = advert & lpable;
2503 		if ((ctl1000 & MII_1000TC_ADV_FULL) &&
2504 		    (stat1000 & MII_1000TS_LP_FULL)) {
2505 			/* 1000BaseT & full duplex */
2506 			dp->speed	 = GEM_SPD_1000;
2507 			dp->full_duplex  = B_TRUE;
2508 		} else if ((ctl1000 & MII_1000TC_ADV_HALF) &&
2509 		    (stat1000 & MII_1000TS_LP_HALF)) {
2510 			/* 1000BaseT & half duplex */
2511 			dp->speed = GEM_SPD_1000;
2512 			dp->full_duplex = B_FALSE;
2513 		} else if (val & MII_ABILITY_100BASE_TX_FD) {
2514 			/* 100BaseTx & full duplex */
2515 			dp->speed = GEM_SPD_100;
2516 			dp->full_duplex = B_TRUE;
2517 		} else if (val & MII_ABILITY_100BASE_T4) {
2518 			/* 100BaseT4 & full duplex */
2519 			dp->speed = GEM_SPD_100;
2520 			dp->full_duplex = B_TRUE;
2521 		} else if (val & MII_ABILITY_100BASE_TX) {
2522 			/* 100BaseTx & half duplex */
2523 			dp->speed	 = GEM_SPD_100;
2524 			dp->full_duplex  = B_FALSE;
2525 		} else if (val & MII_ABILITY_10BASE_T_FD) {
2526 			/* 10BaseT & full duplex */
2527 			dp->speed	 = GEM_SPD_10;
2528 			dp->full_duplex  = B_TRUE;
2529 		} else if (val & MII_ABILITY_10BASE_T) {
2530 			/* 10BaseT & half duplex */
2531 			dp->speed	 = GEM_SPD_10;
2532 			dp->full_duplex  = B_FALSE;
2533 		} else {
2534 			/*
2535 			 * It seems that the link partnar doesn't have
2536 			 * auto-negotiation capability and our PHY
2537 			 * could not report the correct current mode.
2538 			 * We guess current mode by mii_control register.
2539 			 */
2540 			val = gem_mii_read(dp, MII_CONTROL);
2541 
2542 			/* select 100m full or 10m half */
2543 			dp->speed = (val & MII_CONTROL_100MB) ?
2544 			    GEM_SPD_100 : GEM_SPD_10;
2545 			dp->full_duplex = dp->speed != GEM_SPD_10;
2546 			fix_phy = B_TRUE;
2547 
2548 			cmn_err(CE_NOTE,
2549 			    "!%s: auto-negotiation done but "
2550 			    "common ability not found.\n"
2551 			    "PHY state: control:%b advert:%b lpable:%b\n"
2552 			    "guessing %d Mbps %s duplex mode",
2553 			    dp->name,
2554 			    val, MII_CONTROL_BITS,
2555 			    advert, MII_ABILITY_BITS,
2556 			    lpable, MII_ABILITY_BITS,
2557 			    gem_speed_value[dp->speed],
2558 			    dp->full_duplex ? "full" : "half");
2559 		}
2560 
2561 		if (dp->full_duplex) {
2562 			dp->flow_control =
2563 			    gem_fc_result[fc_cap_decode(advert)]
2564 			    [fc_cap_decode(lpable)];
2565 		} else {
2566 			dp->flow_control = FLOW_CONTROL_NONE;
2567 		}
2568 		dp->mii_state = MII_STATE_MEDIA_SETUP;
2569 		/* FALLTHROUGH */
2570 
2571 	case MII_STATE_MEDIA_SETUP:
2572 		dp->mii_state = MII_STATE_LINKDOWN;
2573 		dp->mii_timer = dp->gc.gc_mii_linkdown_timeout;
2574 		DPRINTF(2, (CE_CONT, "!%s: setup midia mode done", dp->name));
2575 		dp->mii_supress_msg = B_FALSE;
2576 
2577 		/* use short interval */
2578 		dp->mii_interval = WATCH_INTERVAL_FAST;
2579 
2580 		if ((!dp->anadv_autoneg) ||
2581 		    dp->gc.gc_mii_an_oneshot || fix_phy) {
2582 
2583 			/*
2584 			 * write specified mode to phy.
2585 			 */
2586 			val = gem_mii_read(dp, MII_CONTROL);
2587 			val &= ~(MII_CONTROL_SPEED | MII_CONTROL_FDUPLEX |
2588 			    MII_CONTROL_ANE | MII_CONTROL_RSAN);
2589 
2590 			if (dp->full_duplex) {
2591 				val |= MII_CONTROL_FDUPLEX;
2592 			}
2593 
2594 			switch (dp->speed) {
2595 			case GEM_SPD_1000:
2596 				val |= MII_CONTROL_1000MB;
2597 				break;
2598 
2599 			case GEM_SPD_100:
2600 				val |= MII_CONTROL_100MB;
2601 				break;
2602 
2603 			default:
2604 				cmn_err(CE_WARN, "%s: unknown speed:%d",
2605 				    dp->name, dp->speed);
2606 				/* FALLTHROUGH */
2607 			case GEM_SPD_10:
2608 				/* for GEM_SPD_10, do nothing */
2609 				break;
2610 			}
2611 
2612 			if (dp->mii_status & MII_STATUS_XSTATUS) {
2613 				gem_mii_write(dp,
2614 				    MII_1000TC, MII_1000TC_CFG_EN);
2615 			}
2616 			gem_mii_write(dp, MII_CONTROL, val);
2617 		}
2618 
2619 		if (dp->nic_state >= NIC_STATE_INITIALIZED) {
2620 			/* notify the result of auto-negotiation to mac */
2621 			(*dp->gc.gc_set_media)(dp);
2622 		}
2623 
2624 		if ((void *)dp->gc.gc_mii_tune_phy) {
2625 			/* for built-in sis900 */
2626 			/* XXX - this code should be removed.  */
2627 			(*dp->gc.gc_mii_tune_phy)(dp);
2628 		}
2629 
2630 		goto next_nowait;
2631 
2632 	case MII_STATE_LINKDOWN:
2633 		status = gem_mii_read(dp, MII_STATUS);
2634 		if (status & MII_STATUS_LINKUP) {
2635 			/*
2636 			 * Link going up
2637 			 */
2638 			dp->mii_state = MII_STATE_LINKUP;
2639 			dp->mii_supress_msg = B_FALSE;
2640 
2641 			DPRINTF(0, (CE_CONT,
2642 			    "!%s: link up detected: mii_stat:%b",
2643 			    dp->name, status, MII_STATUS_BITS));
2644 
2645 			/*
2646 			 * MII_CONTROL_100MB and  MII_CONTROL_FDUPLEX are
2647 			 * ignored when MII_CONTROL_ANE is set.
2648 			 */
2649 			cmn_err(CE_CONT,
2650 			    "!%s: Link up: %d Mbps %s duplex %s flow control",
2651 			    dp->name,
2652 			    gem_speed_value[dp->speed],
2653 			    dp->full_duplex ? "full" : "half",
2654 			    gem_fc_type[dp->flow_control]);
2655 
2656 			dp->mii_interval = dp->gc.gc_mii_link_watch_interval;
2657 
2658 			/* XXX - we need other timer to watch statictics */
2659 			if (dp->gc.gc_mii_hw_link_detection &&
2660 			    dp->nic_state == NIC_STATE_ONLINE) {
2661 				dp->mii_interval = 0;
2662 			}
2663 
2664 			if (dp->nic_state == NIC_STATE_ONLINE) {
2665 				if (!dp->mac_active) {
2666 					(void) gem_mac_start(dp);
2667 				}
2668 				tx_sched = B_TRUE;
2669 			}
2670 			goto next;
2671 		}
2672 
2673 		dp->mii_supress_msg = B_TRUE;
2674 		if (dp->anadv_autoneg) {
2675 			dp->mii_timer -= diff;
2676 			if (dp->mii_timer <= 0) {
2677 				/*
2678 				 * link down timer expired.
2679 				 * need to restart auto-negotiation.
2680 				 */
2681 				linkdown_action =
2682 				    dp->gc.gc_mii_linkdown_timeout_action;
2683 				goto restart_autonego;
2684 			}
2685 		}
2686 		/* don't change mii_state */
2687 		break;
2688 
2689 	case MII_STATE_LINKUP:
2690 		status = gem_mii_read(dp, MII_STATUS);
2691 		if ((status & MII_STATUS_LINKUP) == 0) {
2692 			/*
2693 			 * Link going down
2694 			 */
2695 			cmn_err(CE_NOTE,
2696 			    "!%s: link down detected: mii_stat:%b",
2697 			    dp->name, status, MII_STATUS_BITS);
2698 
2699 			if (dp->nic_state == NIC_STATE_ONLINE &&
2700 			    dp->mac_active &&
2701 			    dp->gc.gc_mii_stop_mac_on_linkdown) {
2702 				(void) gem_mac_stop(dp, 0);
2703 
2704 				if (dp->tx_blocked) {
2705 					/* drain tx */
2706 					tx_sched = B_TRUE;
2707 				}
2708 			}
2709 
2710 			if (dp->anadv_autoneg) {
2711 				/* need to restart auto-negotiation */
2712 				linkdown_action = dp->gc.gc_mii_linkdown_action;
2713 				goto restart_autonego;
2714 			}
2715 
2716 			dp->mii_state = MII_STATE_LINKDOWN;
2717 			dp->mii_timer = dp->gc.gc_mii_linkdown_timeout;
2718 
2719 			if ((void *)dp->gc.gc_mii_tune_phy) {
2720 				/* for built-in sis900 */
2721 				(*dp->gc.gc_mii_tune_phy)(dp);
2722 			}
2723 			dp->mii_interval = dp->gc.gc_mii_link_watch_interval;
2724 			goto next;
2725 		}
2726 
2727 		/* don't change mii_state */
2728 		if (dp->gc.gc_mii_hw_link_detection &&
2729 		    dp->nic_state == NIC_STATE_ONLINE) {
2730 			dp->mii_interval = 0;
2731 			goto next;
2732 		}
2733 		break;
2734 	}
2735 	dp->mii_interval = dp->gc.gc_mii_link_watch_interval;
2736 	goto next;
2737 
2738 	/* Actions on the end of state routine */
2739 
2740 restart_autonego:
2741 	switch (linkdown_action) {
2742 	case MII_ACTION_RESET:
2743 		if (!dp->mii_supress_msg) {
2744 			cmn_err(CE_CONT, "!%s: resetting PHY", dp->name);
2745 		}
2746 		dp->mii_supress_msg = B_TRUE;
2747 		goto reset_phy;
2748 
2749 	case MII_ACTION_NONE:
2750 		dp->mii_supress_msg = B_TRUE;
2751 		if (dp->gc.gc_mii_an_oneshot) {
2752 			goto autonego;
2753 		}
2754 		/* PHY will restart autonego automatically */
2755 		dp->mii_state = MII_STATE_AUTONEGOTIATING;
2756 		dp->mii_timer = dp->gc.gc_mii_an_timeout;
2757 		dp->mii_interval = dp->gc.gc_mii_an_watch_interval;
2758 		goto next;
2759 
2760 	case MII_ACTION_RSA:
2761 		if (!dp->mii_supress_msg) {
2762 			cmn_err(CE_CONT, "!%s: restarting auto-negotiation",
2763 			    dp->name);
2764 		}
2765 		dp->mii_supress_msg = B_TRUE;
2766 		goto autonego;
2767 
2768 	default:
2769 		cmn_err(CE_WARN, "!%s: unknowm linkdown action: %d",
2770 		    dp->name, dp->gc.gc_mii_linkdown_action);
2771 		dp->mii_supress_msg = B_TRUE;
2772 	}
2773 	/* NOTREACHED */
2774 
2775 reset_phy:
2776 	if (!dp->mii_supress_msg) {
2777 		cmn_err(CE_CONT, "!%s: resetting PHY", dp->name);
2778 	}
2779 	dp->mii_state = MII_STATE_RESETTING;
2780 	dp->mii_timer = dp->gc.gc_mii_reset_timeout;
2781 	if (!dp->gc.gc_mii_dont_reset) {
2782 		gem_mii_write(dp, MII_CONTROL, MII_CONTROL_RESET);
2783 	}
2784 	dp->mii_interval = WATCH_INTERVAL_FAST;
2785 	goto next;
2786 
2787 autonego:
2788 	if (!dp->mii_supress_msg) {
2789 		cmn_err(CE_CONT, "!%s: auto-negotiation started", dp->name);
2790 	}
2791 	dp->mii_state = MII_STATE_AUTONEGOTIATING;
2792 	dp->mii_timer = dp->gc.gc_mii_an_timeout;
2793 
2794 	/* start/restart auto nego */
2795 	val = gem_mii_read(dp, MII_CONTROL) &
2796 	    ~(MII_CONTROL_ISOLATE | MII_CONTROL_PWRDN | MII_CONTROL_RESET);
2797 
2798 	gem_mii_write(dp, MII_CONTROL,
2799 	    val | MII_CONTROL_RSAN | MII_CONTROL_ANE);
2800 
2801 	dp->mii_interval = dp->gc.gc_mii_an_watch_interval;
2802 
2803 next:
2804 	if (dp->link_watcher_id == 0 && dp->mii_interval) {
2805 		/* we must schedule next mii_watcher */
2806 		dp->link_watcher_id =
2807 		    timeout((void (*)(void *))&gem_mii_link_watcher,
2808 		    (void *)dp, dp->mii_interval);
2809 	}
2810 
2811 	if (old_mii_state != dp->mii_state) {
2812 		/* notify new mii link state */
2813 		if (dp->mii_state == MII_STATE_LINKUP) {
2814 			dp->linkup_delay = 0;
2815 			GEM_LINKUP(dp);
2816 		} else if (dp->linkup_delay <= 0) {
2817 			GEM_LINKDOWN(dp);
2818 		}
2819 	} else if (dp->linkup_delay < 0) {
2820 		/* first linkup timeout */
2821 		dp->linkup_delay = 0;
2822 		GEM_LINKDOWN(dp);
2823 	}
2824 
2825 	return (tx_sched);
2826 }
2827 
2828 static void
2829 gem_mii_link_watcher(struct gem_dev *dp)
2830 {
2831 	boolean_t	tx_sched;
2832 
2833 	mutex_enter(&dp->intrlock);
2834 
2835 	dp->link_watcher_id = 0;
2836 	tx_sched = gem_mii_link_check(dp);
2837 #if GEM_DEBUG_LEVEL > 2
2838 	if (dp->link_watcher_id == 0) {
2839 		cmn_err(CE_CONT, "%s: link watcher stopped", dp->name);
2840 	}
2841 #endif
2842 	mutex_exit(&dp->intrlock);
2843 
2844 	if (tx_sched) {
2845 		/* kick potentially stopped downstream */
2846 		mac_tx_update(dp->mh);
2847 	}
2848 }
2849 
2850 int
2851 gem_mii_probe_default(struct gem_dev *dp)
2852 {
2853 	int8_t		phy;
2854 	uint16_t	status;
2855 	uint16_t	adv;
2856 	uint16_t	adv_org;
2857 
2858 	DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__));
2859 
2860 	/*
2861 	 * Scan PHY
2862 	 */
2863 	/* ensure to send sync bits */
2864 	dp->mii_status = 0;
2865 
2866 	/* Try default phy first */
2867 	if (dp->mii_phy_addr) {
2868 		status = gem_mii_read(dp, MII_STATUS);
2869 		if (status != 0xffff && status != 0) {
2870 			gem_mii_write(dp, MII_CONTROL, 0);
2871 			goto PHY_found;
2872 		}
2873 
2874 		if (dp->mii_phy_addr < 0) {
2875 			cmn_err(CE_NOTE,
2876 	    "!%s: failed to probe default internal and/or non-MII PHY",
2877 			    dp->name);
2878 			return (GEM_FAILURE);
2879 		}
2880 
2881 		cmn_err(CE_NOTE,
2882 		    "!%s: failed to probe default MII PHY at %d",
2883 		    dp->name, dp->mii_phy_addr);
2884 	}
2885 
2886 	/* Try all possible address */
2887 	for (phy = dp->gc.gc_mii_addr_min; phy < 32; phy++) {
2888 		dp->mii_phy_addr = phy;
2889 		status = gem_mii_read(dp, MII_STATUS);
2890 
2891 		if (status != 0xffff && status != 0) {
2892 			gem_mii_write(dp, MII_CONTROL, 0);
2893 			goto PHY_found;
2894 		}
2895 	}
2896 
2897 	for (phy = dp->gc.gc_mii_addr_min; phy < 32; phy++) {
2898 		dp->mii_phy_addr = phy;
2899 		gem_mii_write(dp, MII_CONTROL, 0);
2900 		status = gem_mii_read(dp, MII_STATUS);
2901 
2902 		if (status != 0xffff && status != 0) {
2903 			goto PHY_found;
2904 		}
2905 	}
2906 
2907 	cmn_err(CE_NOTE, "!%s: no MII PHY found", dp->name);
2908 	dp->mii_phy_addr = -1;
2909 
2910 	return (GEM_FAILURE);
2911 
2912 PHY_found:
2913 	dp->mii_status = status;
2914 	dp->mii_phy_id  = (gem_mii_read(dp, MII_PHYIDH) << 16) |
2915 	    gem_mii_read(dp, MII_PHYIDL);
2916 
2917 	if (dp->mii_phy_addr < 0) {
2918 		cmn_err(CE_CONT, "!%s: using internal/non-MII PHY(0x%08x)",
2919 		    dp->name, dp->mii_phy_id);
2920 	} else {
2921 		cmn_err(CE_CONT, "!%s: MII PHY (0x%08x) found at %d",
2922 		    dp->name, dp->mii_phy_id, dp->mii_phy_addr);
2923 	}
2924 
2925 	cmn_err(CE_CONT, "!%s: PHY control:%b, status:%b, advert:%b, lpar:%b",
2926 	    dp->name,
2927 	    gem_mii_read(dp, MII_CONTROL), MII_CONTROL_BITS,
2928 	    status, MII_STATUS_BITS,
2929 	    gem_mii_read(dp, MII_AN_ADVERT), MII_ABILITY_BITS,
2930 	    gem_mii_read(dp, MII_AN_LPABLE), MII_ABILITY_BITS);
2931 
2932 	dp->mii_xstatus = 0;
2933 	if (status & MII_STATUS_XSTATUS) {
2934 		dp->mii_xstatus = gem_mii_read(dp, MII_XSTATUS);
2935 
2936 		cmn_err(CE_CONT, "!%s: xstatus:%b",
2937 		    dp->name, dp->mii_xstatus, MII_XSTATUS_BITS);
2938 	}
2939 
2940 	/* check if the phy can advertize pause abilities */
2941 	adv_org = gem_mii_read(dp, MII_AN_ADVERT);
2942 
2943 	gem_mii_write(dp, MII_AN_ADVERT,
2944 	    MII_ABILITY_PAUSE | MII_ABILITY_ASM_DIR);
2945 
2946 	adv = gem_mii_read(dp, MII_AN_ADVERT);
2947 
2948 	if ((adv & MII_ABILITY_PAUSE) == 0) {
2949 		dp->gc.gc_flow_control &= ~1;
2950 	}
2951 
2952 	if ((adv & MII_ABILITY_ASM_DIR) == 0) {
2953 		dp->gc.gc_flow_control &= ~2;
2954 	}
2955 
2956 	gem_mii_write(dp, MII_AN_ADVERT, adv_org);
2957 
2958 	return (GEM_SUCCESS);
2959 }
2960 
2961 static void
2962 gem_mii_start(struct gem_dev *dp)
2963 {
2964 	DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__));
2965 
2966 	/* make a first call of check link */
2967 	dp->mii_state = MII_STATE_UNKNOWN;
2968 	dp->mii_last_check = ddi_get_lbolt();
2969 	dp->linkup_delay = dp->gc.gc_mii_linkdown_timeout;
2970 	(void) gem_mii_link_watcher(dp);
2971 }
2972 
2973 static void
2974 gem_mii_stop(struct gem_dev *dp)
2975 {
2976 	DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__));
2977 
2978 	/* Ensure timer routine stopped */
2979 	mutex_enter(&dp->intrlock);
2980 	if (dp->link_watcher_id) {
2981 		while (untimeout(dp->link_watcher_id) == -1)
2982 			;
2983 		dp->link_watcher_id = 0;
2984 	}
2985 	mutex_exit(&dp->intrlock);
2986 }
2987 
2988 boolean_t
2989 gem_get_mac_addr_conf(struct gem_dev *dp)
2990 {
2991 	char		propname[32];
2992 	char		*valstr;
2993 	uint8_t		mac[ETHERADDRL];
2994 	char		*cp;
2995 	int		c;
2996 	int		i;
2997 	int		j;
2998 	uint8_t		v;
2999 	uint8_t		d;
3000 	uint8_t		ored;
3001 
3002 	DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3003 	/*
3004 	 * Get ethernet address from .conf file
3005 	 */
3006 	(void) sprintf(propname, "mac-addr");
3007 	if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, dp->dip,
3008 	    DDI_PROP_DONTPASS, propname, &valstr)) !=
3009 	    DDI_PROP_SUCCESS) {
3010 		return (B_FALSE);
3011 	}
3012 
3013 	if (strlen(valstr) != ETHERADDRL*3-1) {
3014 		goto syntax_err;
3015 	}
3016 
3017 	cp = valstr;
3018 	j  = 0;
3019 	ored = 0;
3020 	for (;;) {
3021 		v = 0;
3022 		for (i = 0; i < 2; i++) {
3023 			c = *cp++;
3024 
3025 			if (c >= 'a' && c <= 'f') {
3026 				d = c - 'a' + 10;
3027 			} else if (c >= 'A' && c <= 'F') {
3028 				d = c - 'A' + 10;
3029 			} else if (c >= '0' && c <= '9') {
3030 				d = c - '0';
3031 			} else {
3032 				goto syntax_err;
3033 			}
3034 			v = (v << 4) | d;
3035 		}
3036 
3037 		mac[j++] = v;
3038 		ored |= v;
3039 		if (j == ETHERADDRL) {
3040 			/* done */
3041 			break;
3042 		}
3043 
3044 		c = *cp++;
3045 		if (c != ':') {
3046 			goto syntax_err;
3047 		}
3048 	}
3049 
3050 	if (ored == 0) {
3051 		goto err;
3052 	}
3053 	for (i = 0; i < ETHERADDRL; i++) {
3054 		dp->dev_addr.ether_addr_octet[i] = mac[i];
3055 	}
3056 	ddi_prop_free(valstr);
3057 	return (B_TRUE);
3058 
3059 syntax_err:
3060 	cmn_err(CE_CONT,
3061 	    "!%s: read mac addr: trying .conf: syntax err %s",
3062 	    dp->name, valstr);
3063 err:
3064 	ddi_prop_free(valstr);
3065 
3066 	return (B_FALSE);
3067 }
3068 
3069 
3070 /* ============================================================== */
3071 /*
3072  * internal start/stop interface
3073  */
3074 /* ============================================================== */
3075 static int
3076 gem_mac_set_rx_filter(struct gem_dev *dp)
3077 {
3078 	return ((*dp->gc.gc_set_rx_filter)(dp));
3079 }
3080 
3081 /*
3082  * gem_mac_init: cold start
3083  */
3084 static int
3085 gem_mac_init(struct gem_dev *dp)
3086 {
3087 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3088 
3089 	if (dp->mac_suspended) {
3090 		return (GEM_FAILURE);
3091 	}
3092 
3093 	dp->mac_active = B_FALSE;
3094 
3095 	gem_init_rx_ring(dp);
3096 	gem_init_tx_ring(dp);
3097 
3098 	/* reset transmitter state */
3099 	dp->tx_blocked = (clock_t)0;
3100 	dp->tx_busy = 0;
3101 	dp->tx_reclaim_busy = 0;
3102 	dp->tx_max_packets = dp->gc.gc_tx_buf_limit;
3103 
3104 	if ((*dp->gc.gc_init_chip)(dp) != GEM_SUCCESS) {
3105 		return (GEM_FAILURE);
3106 	}
3107 
3108 	gem_prepare_rx_buf(dp);
3109 
3110 	return (GEM_SUCCESS);
3111 }
3112 /*
3113  * gem_mac_start: warm start
3114  */
3115 static int
3116 gem_mac_start(struct gem_dev *dp)
3117 {
3118 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3119 
3120 	ASSERT(mutex_owned(&dp->intrlock));
3121 	ASSERT(dp->nic_state == NIC_STATE_ONLINE);
3122 	ASSERT(dp->mii_state ==  MII_STATE_LINKUP);
3123 
3124 	/* enable tx and rx */
3125 	mutex_enter(&dp->xmitlock);
3126 	if (dp->mac_suspended) {
3127 		mutex_exit(&dp->xmitlock);
3128 		return (GEM_FAILURE);
3129 	}
3130 	dp->mac_active = B_TRUE;
3131 	mutex_exit(&dp->xmitlock);
3132 
3133 	/* setup rx buffers */
3134 	(*dp->gc.gc_rx_start)(dp,
3135 	    SLOT(dp->rx_active_head, dp->gc.gc_rx_ring_size),
3136 	    dp->rx_active_tail - dp->rx_active_head);
3137 
3138 	if ((*dp->gc.gc_start_chip)(dp) != GEM_SUCCESS) {
3139 		cmn_err(CE_WARN, "%s: %s: start_chip: failed",
3140 		    dp->name, __func__);
3141 		return (GEM_FAILURE);
3142 	}
3143 
3144 	mutex_enter(&dp->xmitlock);
3145 
3146 	/* load untranmitted packets to the nic */
3147 	ASSERT(dp->tx_softq_tail - dp->tx_softq_head >= 0);
3148 	if (dp->tx_softq_tail - dp->tx_softq_head > 0) {
3149 		gem_tx_load_descs_oo(dp,
3150 		    dp->tx_softq_head, dp->tx_softq_tail,
3151 		    GEM_TXFLAG_HEAD);
3152 		/* issue preloaded tx buffers */
3153 		gem_tx_start_unit(dp);
3154 	}
3155 
3156 	mutex_exit(&dp->xmitlock);
3157 
3158 	return (GEM_SUCCESS);
3159 }
3160 
3161 static int
3162 gem_mac_stop(struct gem_dev *dp, uint_t flags)
3163 {
3164 	int		i;
3165 	int		wait_time; /* in uS */
3166 #ifdef GEM_DEBUG_LEVEL
3167 	clock_t		now;
3168 #endif
3169 	int		ret = GEM_SUCCESS;
3170 
3171 	DPRINTF(1, (CE_CONT, "!%s: %s: called, rx_buf_free:%d",
3172 	    dp->name, __func__, dp->rx_buf_freecnt));
3173 
3174 	ASSERT(mutex_owned(&dp->intrlock));
3175 	ASSERT(!mutex_owned(&dp->xmitlock));
3176 
3177 	/*
3178 	 * Block transmits
3179 	 */
3180 	mutex_enter(&dp->xmitlock);
3181 	if (dp->mac_suspended) {
3182 		mutex_exit(&dp->xmitlock);
3183 		return (GEM_SUCCESS);
3184 	}
3185 	dp->mac_active = B_FALSE;
3186 
3187 	while (dp->tx_busy > 0) {
3188 		cv_wait(&dp->tx_drain_cv, &dp->xmitlock);
3189 	}
3190 	mutex_exit(&dp->xmitlock);
3191 
3192 	if ((flags & GEM_RESTART_NOWAIT) == 0) {
3193 		/*
3194 		 * Wait for all tx buffers sent.
3195 		 */
3196 		wait_time =
3197 		    2 * (8 * MAXPKTBUF(dp) / gem_speed_value[dp->speed]) *
3198 		    (dp->tx_active_tail - dp->tx_active_head);
3199 
3200 		DPRINTF(0, (CE_CONT, "%s: %s: max drain time: %d uS",
3201 		    dp->name, __func__, wait_time));
3202 		i = 0;
3203 #ifdef GEM_DEBUG_LEVEL
3204 		now = ddi_get_lbolt();
3205 #endif
3206 		while (dp->tx_active_tail != dp->tx_active_head) {
3207 			if (i > wait_time) {
3208 				/* timeout */
3209 				cmn_err(CE_NOTE, "%s: %s timeout: tx drain",
3210 				    dp->name, __func__);
3211 				break;
3212 			}
3213 			(void) gem_reclaim_txbuf(dp);
3214 			drv_usecwait(100);
3215 			i += 100;
3216 		}
3217 		DPRINTF(0, (CE_NOTE,
3218 		    "!%s: %s: the nic have drained in %d uS, real %d mS",
3219 		    dp->name, __func__, i,
3220 		    10*((int)(ddi_get_lbolt() - now))));
3221 	}
3222 
3223 	/*
3224 	 * Now we can stop the nic safely.
3225 	 */
3226 	if ((*dp->gc.gc_stop_chip)(dp) != GEM_SUCCESS) {
3227 		cmn_err(CE_NOTE, "%s: %s: resetting the chip to stop it",
3228 		    dp->name, __func__);
3229 		if ((*dp->gc.gc_reset_chip)(dp) != GEM_SUCCESS) {
3230 			cmn_err(CE_WARN, "%s: %s: failed to reset chip",
3231 			    dp->name, __func__);
3232 		}
3233 	}
3234 
3235 	/*
3236 	 * Clear all rx buffers
3237 	 */
3238 	if (flags & GEM_RESTART_KEEP_BUF) {
3239 		(void) gem_receive(dp);
3240 	}
3241 	gem_clean_rx_buf(dp);
3242 
3243 	/*
3244 	 * Update final statistics
3245 	 */
3246 	(*dp->gc.gc_get_stats)(dp);
3247 
3248 	/*
3249 	 * Clear all pended tx packets
3250 	 */
3251 	ASSERT(dp->tx_active_tail == dp->tx_softq_head);
3252 	ASSERT(dp->tx_softq_tail == dp->tx_free_head);
3253 	if (flags & GEM_RESTART_KEEP_BUF) {
3254 		/* restore active tx buffers */
3255 		dp->tx_active_tail = dp->tx_active_head;
3256 		dp->tx_softq_head  = dp->tx_active_head;
3257 	} else {
3258 		gem_clean_tx_buf(dp);
3259 	}
3260 
3261 	return (ret);
3262 }
3263 
3264 static int
3265 gem_add_multicast(struct gem_dev *dp, const uint8_t *ep)
3266 {
3267 	int		cnt;
3268 	int		err;
3269 
3270 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3271 
3272 	mutex_enter(&dp->intrlock);
3273 	if (dp->mac_suspended) {
3274 		mutex_exit(&dp->intrlock);
3275 		return (GEM_FAILURE);
3276 	}
3277 
3278 	if (dp->mc_count_req++ < GEM_MAXMC) {
3279 		/* append the new address at the end of the mclist */
3280 		cnt = dp->mc_count;
3281 		bcopy(ep, dp->mc_list[cnt].addr.ether_addr_octet,
3282 		    ETHERADDRL);
3283 		if (dp->gc.gc_multicast_hash) {
3284 			dp->mc_list[cnt].hash =
3285 			    (*dp->gc.gc_multicast_hash)(dp, (uint8_t *)ep);
3286 		}
3287 		dp->mc_count = cnt + 1;
3288 	}
3289 
3290 	if (dp->mc_count_req != dp->mc_count) {
3291 		/* multicast address list overflow */
3292 		dp->rxmode |= RXMODE_MULTI_OVF;
3293 	} else {
3294 		dp->rxmode &= ~RXMODE_MULTI_OVF;
3295 	}
3296 
3297 	/* tell new multicast list to the hardware */
3298 	err = gem_mac_set_rx_filter(dp);
3299 
3300 	mutex_exit(&dp->intrlock);
3301 
3302 	return (err);
3303 }
3304 
3305 static int
3306 gem_remove_multicast(struct gem_dev *dp, const uint8_t *ep)
3307 {
3308 	size_t		len;
3309 	int		i;
3310 	int		cnt;
3311 	int		err;
3312 
3313 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3314 
3315 	mutex_enter(&dp->intrlock);
3316 	if (dp->mac_suspended) {
3317 		mutex_exit(&dp->intrlock);
3318 		return (GEM_FAILURE);
3319 	}
3320 
3321 	dp->mc_count_req--;
3322 	cnt = dp->mc_count;
3323 	for (i = 0; i < cnt; i++) {
3324 		if (bcmp(ep, &dp->mc_list[i].addr, ETHERADDRL)) {
3325 			continue;
3326 		}
3327 		/* shrink the mclist by copying forward */
3328 		len = (cnt - (i + 1)) * sizeof (*dp->mc_list);
3329 		if (len > 0) {
3330 			bcopy(&dp->mc_list[i+1], &dp->mc_list[i], len);
3331 		}
3332 		dp->mc_count--;
3333 		break;
3334 	}
3335 
3336 	if (dp->mc_count_req != dp->mc_count) {
3337 		/* multicast address list overflow */
3338 		dp->rxmode |= RXMODE_MULTI_OVF;
3339 	} else {
3340 		dp->rxmode &= ~RXMODE_MULTI_OVF;
3341 	}
3342 	/* In gem v2, don't hold xmitlock on calling set_rx_filter */
3343 	err = gem_mac_set_rx_filter(dp);
3344 
3345 	mutex_exit(&dp->intrlock);
3346 
3347 	return (err);
3348 }
3349 
3350 /* ============================================================== */
3351 /*
3352  * ND interface
3353  */
3354 /* ============================================================== */
3355 enum {
3356 	PARAM_AUTONEG_CAP,
3357 	PARAM_PAUSE_CAP,
3358 	PARAM_ASYM_PAUSE_CAP,
3359 	PARAM_1000FDX_CAP,
3360 	PARAM_1000HDX_CAP,
3361 	PARAM_100T4_CAP,
3362 	PARAM_100FDX_CAP,
3363 	PARAM_100HDX_CAP,
3364 	PARAM_10FDX_CAP,
3365 	PARAM_10HDX_CAP,
3366 
3367 	PARAM_ADV_AUTONEG_CAP,
3368 	PARAM_ADV_PAUSE_CAP,
3369 	PARAM_ADV_ASYM_PAUSE_CAP,
3370 	PARAM_ADV_1000FDX_CAP,
3371 	PARAM_ADV_1000HDX_CAP,
3372 	PARAM_ADV_100T4_CAP,
3373 	PARAM_ADV_100FDX_CAP,
3374 	PARAM_ADV_100HDX_CAP,
3375 	PARAM_ADV_10FDX_CAP,
3376 	PARAM_ADV_10HDX_CAP,
3377 
3378 	PARAM_LP_AUTONEG_CAP,
3379 	PARAM_LP_PAUSE_CAP,
3380 	PARAM_LP_ASYM_PAUSE_CAP,
3381 	PARAM_LP_1000FDX_CAP,
3382 	PARAM_LP_1000HDX_CAP,
3383 	PARAM_LP_100T4_CAP,
3384 	PARAM_LP_100FDX_CAP,
3385 	PARAM_LP_100HDX_CAP,
3386 	PARAM_LP_10FDX_CAP,
3387 	PARAM_LP_10HDX_CAP,
3388 
3389 	PARAM_LINK_STATUS,
3390 	PARAM_LINK_SPEED,
3391 	PARAM_LINK_DUPLEX,
3392 
3393 	PARAM_LINK_AUTONEG,
3394 	PARAM_LINK_RX_PAUSE,
3395 	PARAM_LINK_TX_PAUSE,
3396 
3397 	PARAM_LOOP_MODE,
3398 	PARAM_MSI_CNT,
3399 
3400 #ifdef DEBUG_RESUME
3401 	PARAM_RESUME_TEST,
3402 #endif
3403 	PARAM_COUNT
3404 };
3405 
3406 enum ioc_reply {
3407 	IOC_INVAL = -1,				/* bad, NAK with EINVAL	*/
3408 	IOC_DONE,				/* OK, reply sent	*/
3409 	IOC_ACK,				/* OK, just send ACK	*/
3410 	IOC_REPLY,				/* OK, just send reply	*/
3411 	IOC_RESTART_ACK,			/* OK, restart & ACK	*/
3412 	IOC_RESTART_REPLY			/* OK, restart & reply	*/
3413 };
3414 
3415 struct gem_nd_arg {
3416 	struct gem_dev	*dp;
3417 	int		item;
3418 };
3419 
3420 static int
3421 gem_param_get(queue_t *q, mblk_t *mp, caddr_t arg, cred_t *credp)
3422 {
3423 	struct gem_dev	*dp = ((struct gem_nd_arg *)(void *)arg)->dp;
3424 	int		item = ((struct gem_nd_arg *)(void *)arg)->item;
3425 	long		val;
3426 
3427 	DPRINTF(0, (CE_CONT, "!%s: %s: called, item:%d",
3428 	    dp->name, __func__, item));
3429 
3430 	switch (item) {
3431 	case PARAM_AUTONEG_CAP:
3432 		val = BOOLEAN(dp->mii_status & MII_STATUS_CANAUTONEG);
3433 		DPRINTF(0, (CE_CONT, "autoneg_cap:%d", val));
3434 		break;
3435 
3436 	case PARAM_PAUSE_CAP:
3437 		val = BOOLEAN(dp->gc.gc_flow_control & 1);
3438 		break;
3439 
3440 	case PARAM_ASYM_PAUSE_CAP:
3441 		val = BOOLEAN(dp->gc.gc_flow_control & 2);
3442 		break;
3443 
3444 	case PARAM_1000FDX_CAP:
3445 		val = (dp->mii_xstatus & MII_XSTATUS_1000BASET_FD) ||
3446 		    (dp->mii_xstatus & MII_XSTATUS_1000BASEX_FD);
3447 		break;
3448 
3449 	case PARAM_1000HDX_CAP:
3450 		val = (dp->mii_xstatus & MII_XSTATUS_1000BASET) ||
3451 		    (dp->mii_xstatus & MII_XSTATUS_1000BASEX);
3452 		break;
3453 
3454 	case PARAM_100T4_CAP:
3455 		val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASE_T4);
3456 		break;
3457 
3458 	case PARAM_100FDX_CAP:
3459 		val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX_FD);
3460 		break;
3461 
3462 	case PARAM_100HDX_CAP:
3463 		val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX);
3464 		break;
3465 
3466 	case PARAM_10FDX_CAP:
3467 		val = BOOLEAN(dp->mii_status & MII_STATUS_10_FD);
3468 		break;
3469 
3470 	case PARAM_10HDX_CAP:
3471 		val = BOOLEAN(dp->mii_status & MII_STATUS_10);
3472 		break;
3473 
3474 	case PARAM_ADV_AUTONEG_CAP:
3475 		val = dp->anadv_autoneg;
3476 		break;
3477 
3478 	case PARAM_ADV_PAUSE_CAP:
3479 		val = BOOLEAN(dp->anadv_flow_control & 1);
3480 		break;
3481 
3482 	case PARAM_ADV_ASYM_PAUSE_CAP:
3483 		val = BOOLEAN(dp->anadv_flow_control & 2);
3484 		break;
3485 
3486 	case PARAM_ADV_1000FDX_CAP:
3487 		val = dp->anadv_1000fdx;
3488 		break;
3489 
3490 	case PARAM_ADV_1000HDX_CAP:
3491 		val = dp->anadv_1000hdx;
3492 		break;
3493 
3494 	case PARAM_ADV_100T4_CAP:
3495 		val = dp->anadv_100t4;
3496 		break;
3497 
3498 	case PARAM_ADV_100FDX_CAP:
3499 		val = dp->anadv_100fdx;
3500 		break;
3501 
3502 	case PARAM_ADV_100HDX_CAP:
3503 		val = dp->anadv_100hdx;
3504 		break;
3505 
3506 	case PARAM_ADV_10FDX_CAP:
3507 		val = dp->anadv_10fdx;
3508 		break;
3509 
3510 	case PARAM_ADV_10HDX_CAP:
3511 		val = dp->anadv_10hdx;
3512 		break;
3513 
3514 	case PARAM_LP_AUTONEG_CAP:
3515 		val = BOOLEAN(dp->mii_exp & MII_AN_EXP_LPCANAN);
3516 		break;
3517 
3518 	case PARAM_LP_PAUSE_CAP:
3519 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_PAUSE);
3520 		break;
3521 
3522 	case PARAM_LP_ASYM_PAUSE_CAP:
3523 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_ASM_DIR);
3524 		break;
3525 
3526 	case PARAM_LP_1000FDX_CAP:
3527 		val = BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_FULL);
3528 		break;
3529 
3530 	case PARAM_LP_1000HDX_CAP:
3531 		val = BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_HALF);
3532 		break;
3533 
3534 	case PARAM_LP_100T4_CAP:
3535 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_T4);
3536 		break;
3537 
3538 	case PARAM_LP_100FDX_CAP:
3539 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX_FD);
3540 		break;
3541 
3542 	case PARAM_LP_100HDX_CAP:
3543 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX);
3544 		break;
3545 
3546 	case PARAM_LP_10FDX_CAP:
3547 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T_FD);
3548 		break;
3549 
3550 	case PARAM_LP_10HDX_CAP:
3551 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T);
3552 		break;
3553 
3554 	case PARAM_LINK_STATUS:
3555 		val = (dp->mii_state == MII_STATE_LINKUP);
3556 		break;
3557 
3558 	case PARAM_LINK_SPEED:
3559 		val = gem_speed_value[dp->speed];
3560 		break;
3561 
3562 	case PARAM_LINK_DUPLEX:
3563 		val = 0;
3564 		if (dp->mii_state == MII_STATE_LINKUP) {
3565 			val = dp->full_duplex ? 2 : 1;
3566 		}
3567 		break;
3568 
3569 	case PARAM_LINK_AUTONEG:
3570 		val = BOOLEAN(dp->mii_exp & MII_AN_EXP_LPCANAN);
3571 		break;
3572 
3573 	case PARAM_LINK_RX_PAUSE:
3574 		val = (dp->flow_control == FLOW_CONTROL_SYMMETRIC) ||
3575 		    (dp->flow_control == FLOW_CONTROL_RX_PAUSE);
3576 		break;
3577 
3578 	case PARAM_LINK_TX_PAUSE:
3579 		val = (dp->flow_control == FLOW_CONTROL_SYMMETRIC) ||
3580 		    (dp->flow_control == FLOW_CONTROL_TX_PAUSE);
3581 		break;
3582 
3583 #ifdef DEBUG_RESUME
3584 	case PARAM_RESUME_TEST:
3585 		val = 0;
3586 		break;
3587 #endif
3588 	default:
3589 		cmn_err(CE_WARN, "%s: unimplemented ndd control (%d)",
3590 		    dp->name, item);
3591 		break;
3592 	}
3593 
3594 	(void) mi_mpprintf(mp, "%ld", val);
3595 
3596 	return (0);
3597 }
3598 
3599 static int
3600 gem_param_set(queue_t *q, mblk_t *mp, char *value, caddr_t arg, cred_t *credp)
3601 {
3602 	struct gem_dev	*dp = ((struct gem_nd_arg *)(void *)arg)->dp;
3603 	int		item = ((struct gem_nd_arg *)(void *)arg)->item;
3604 	long		val;
3605 	char		*end;
3606 
3607 	DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3608 	if (ddi_strtol(value, &end, 10, &val)) {
3609 		return (EINVAL);
3610 	}
3611 	if (end == value) {
3612 		return (EINVAL);
3613 	}
3614 
3615 	switch (item) {
3616 	case PARAM_ADV_AUTONEG_CAP:
3617 		if (val != 0 && val != 1) {
3618 			goto err;
3619 		}
3620 		if (val && (dp->mii_status & MII_STATUS_CANAUTONEG) == 0) {
3621 			goto err;
3622 		}
3623 		dp->anadv_autoneg = (int)val;
3624 		break;
3625 
3626 	case PARAM_ADV_PAUSE_CAP:
3627 		if (val != 0 && val != 1) {
3628 			goto err;
3629 		}
3630 		if (val) {
3631 			dp->anadv_flow_control |= 1;
3632 		} else {
3633 			dp->anadv_flow_control &= ~1;
3634 		}
3635 		break;
3636 
3637 	case PARAM_ADV_ASYM_PAUSE_CAP:
3638 		if (val != 0 && val != 1) {
3639 			goto err;
3640 		}
3641 		if (val) {
3642 			dp->anadv_flow_control |= 2;
3643 		} else {
3644 			dp->anadv_flow_control &= ~2;
3645 		}
3646 		break;
3647 
3648 	case PARAM_ADV_1000FDX_CAP:
3649 		if (val != 0 && val != 1) {
3650 			goto err;
3651 		}
3652 		if (val && (dp->mii_xstatus &
3653 		    (MII_XSTATUS_1000BASET_FD |
3654 		    MII_XSTATUS_1000BASEX_FD)) == 0) {
3655 			goto err;
3656 		}
3657 		dp->anadv_1000fdx = (int)val;
3658 		break;
3659 
3660 	case PARAM_ADV_1000HDX_CAP:
3661 		if (val != 0 && val != 1) {
3662 			goto err;
3663 		}
3664 		if (val && (dp->mii_xstatus &
3665 		    (MII_XSTATUS_1000BASET | MII_XSTATUS_1000BASEX)) == 0) {
3666 			goto err;
3667 		}
3668 		dp->anadv_1000hdx = (int)val;
3669 		break;
3670 
3671 	case PARAM_ADV_100T4_CAP:
3672 		if (val != 0 && val != 1) {
3673 			goto err;
3674 		}
3675 		if (val && (dp->mii_status & MII_STATUS_100_BASE_T4) == 0) {
3676 			goto err;
3677 		}
3678 		dp->anadv_100t4 = (int)val;
3679 		break;
3680 
3681 	case PARAM_ADV_100FDX_CAP:
3682 		if (val != 0 && val != 1) {
3683 			goto err;
3684 		}
3685 		if (val && (dp->mii_status & MII_STATUS_100_BASEX_FD) == 0) {
3686 			goto err;
3687 		}
3688 		dp->anadv_100fdx = (int)val;
3689 		break;
3690 
3691 	case PARAM_ADV_100HDX_CAP:
3692 		if (val != 0 && val != 1) {
3693 			goto err;
3694 		}
3695 		if (val && (dp->mii_status & MII_STATUS_100_BASEX) == 0) {
3696 			goto err;
3697 		}
3698 		dp->anadv_100hdx = (int)val;
3699 		break;
3700 
3701 	case PARAM_ADV_10FDX_CAP:
3702 		if (val != 0 && val != 1) {
3703 			goto err;
3704 		}
3705 		if (val && (dp->mii_status & MII_STATUS_10_FD) == 0) {
3706 			goto err;
3707 		}
3708 		dp->anadv_10fdx = (int)val;
3709 		break;
3710 
3711 	case PARAM_ADV_10HDX_CAP:
3712 		if (val != 0 && val != 1) {
3713 			goto err;
3714 		}
3715 		if (val && (dp->mii_status & MII_STATUS_10) == 0) {
3716 			goto err;
3717 		}
3718 		dp->anadv_10hdx = (int)val;
3719 		break;
3720 	}
3721 
3722 	/* sync with PHY */
3723 	gem_choose_forcedmode(dp);
3724 
3725 	dp->mii_state = MII_STATE_UNKNOWN;
3726 	if (dp->gc.gc_mii_hw_link_detection && dp->link_watcher_id == 0) {
3727 		/* XXX - Can we ignore the return code ? */
3728 		(void) gem_mii_link_check(dp);
3729 	}
3730 
3731 	return (0);
3732 err:
3733 	return (EINVAL);
3734 }
3735 
3736 static void
3737 gem_nd_load(struct gem_dev *dp, char *name, ndgetf_t gf, ndsetf_t sf, int item)
3738 {
3739 	struct gem_nd_arg	*arg;
3740 
3741 	ASSERT(item >= 0);
3742 	ASSERT(item < PARAM_COUNT);
3743 
3744 	arg = &((struct gem_nd_arg *)(void *)dp->nd_arg_p)[item];
3745 	arg->dp = dp;
3746 	arg->item = item;
3747 
3748 	DPRINTF(2, (CE_CONT, "!%s: %s: name:%s, item:%d",
3749 	    dp->name, __func__, name, item));
3750 	(void) nd_load(&dp->nd_data_p, name, gf, sf, (caddr_t)arg);
3751 }
3752 
3753 static void
3754 gem_nd_setup(struct gem_dev *dp)
3755 {
3756 	DPRINTF(0, (CE_CONT, "!%s: %s: called, mii_status:0x%b",
3757 	    dp->name, __func__, dp->mii_status, MII_STATUS_BITS));
3758 
3759 	ASSERT(dp->nd_arg_p == NULL);
3760 
3761 	dp->nd_arg_p =
3762 	    kmem_zalloc(sizeof (struct gem_nd_arg) * PARAM_COUNT, KM_SLEEP);
3763 
3764 #define	SETFUNC(x)	((x) ? gem_param_set : NULL)
3765 
3766 	gem_nd_load(dp, "autoneg_cap",
3767 	    gem_param_get, NULL, PARAM_AUTONEG_CAP);
3768 	gem_nd_load(dp, "pause_cap",
3769 	    gem_param_get, NULL, PARAM_PAUSE_CAP);
3770 	gem_nd_load(dp, "asym_pause_cap",
3771 	    gem_param_get, NULL, PARAM_ASYM_PAUSE_CAP);
3772 	gem_nd_load(dp, "1000fdx_cap",
3773 	    gem_param_get, NULL, PARAM_1000FDX_CAP);
3774 	gem_nd_load(dp, "1000hdx_cap",
3775 	    gem_param_get, NULL, PARAM_1000HDX_CAP);
3776 	gem_nd_load(dp, "100T4_cap",
3777 	    gem_param_get, NULL, PARAM_100T4_CAP);
3778 	gem_nd_load(dp, "100fdx_cap",
3779 	    gem_param_get, NULL, PARAM_100FDX_CAP);
3780 	gem_nd_load(dp, "100hdx_cap",
3781 	    gem_param_get, NULL, PARAM_100HDX_CAP);
3782 	gem_nd_load(dp, "10fdx_cap",
3783 	    gem_param_get, NULL, PARAM_10FDX_CAP);
3784 	gem_nd_load(dp, "10hdx_cap",
3785 	    gem_param_get, NULL, PARAM_10HDX_CAP);
3786 
3787 	/* Our advertised capabilities */
3788 	gem_nd_load(dp, "adv_autoneg_cap", gem_param_get,
3789 	    SETFUNC(dp->mii_status & MII_STATUS_CANAUTONEG),
3790 	    PARAM_ADV_AUTONEG_CAP);
3791 	gem_nd_load(dp, "adv_pause_cap", gem_param_get,
3792 	    SETFUNC(dp->gc.gc_flow_control & 1),
3793 	    PARAM_ADV_PAUSE_CAP);
3794 	gem_nd_load(dp, "adv_asym_pause_cap", gem_param_get,
3795 	    SETFUNC(dp->gc.gc_flow_control & 2),
3796 	    PARAM_ADV_ASYM_PAUSE_CAP);
3797 	gem_nd_load(dp, "adv_1000fdx_cap", gem_param_get,
3798 	    SETFUNC(dp->mii_xstatus &
3799 	    (MII_XSTATUS_1000BASEX_FD | MII_XSTATUS_1000BASET_FD)),
3800 	    PARAM_ADV_1000FDX_CAP);
3801 	gem_nd_load(dp, "adv_1000hdx_cap", gem_param_get,
3802 	    SETFUNC(dp->mii_xstatus &
3803 	    (MII_XSTATUS_1000BASEX | MII_XSTATUS_1000BASET)),
3804 	    PARAM_ADV_1000HDX_CAP);
3805 	gem_nd_load(dp, "adv_100T4_cap", gem_param_get,
3806 	    SETFUNC((dp->mii_status & MII_STATUS_100_BASE_T4) &&
3807 	    !dp->mii_advert_ro),
3808 	    PARAM_ADV_100T4_CAP);
3809 	gem_nd_load(dp, "adv_100fdx_cap", gem_param_get,
3810 	    SETFUNC((dp->mii_status & MII_STATUS_100_BASEX_FD) &&
3811 	    !dp->mii_advert_ro),
3812 	    PARAM_ADV_100FDX_CAP);
3813 	gem_nd_load(dp, "adv_100hdx_cap", gem_param_get,
3814 	    SETFUNC((dp->mii_status & MII_STATUS_100_BASEX) &&
3815 	    !dp->mii_advert_ro),
3816 	    PARAM_ADV_100HDX_CAP);
3817 	gem_nd_load(dp, "adv_10fdx_cap", gem_param_get,
3818 	    SETFUNC((dp->mii_status & MII_STATUS_10_FD) &&
3819 	    !dp->mii_advert_ro),
3820 	    PARAM_ADV_10FDX_CAP);
3821 	gem_nd_load(dp, "adv_10hdx_cap", gem_param_get,
3822 	    SETFUNC((dp->mii_status & MII_STATUS_10) &&
3823 	    !dp->mii_advert_ro),
3824 	    PARAM_ADV_10HDX_CAP);
3825 
3826 	/* Partner's advertised capabilities */
3827 	gem_nd_load(dp, "lp_autoneg_cap",
3828 	    gem_param_get, NULL, PARAM_LP_AUTONEG_CAP);
3829 	gem_nd_load(dp, "lp_pause_cap",
3830 	    gem_param_get, NULL, PARAM_LP_PAUSE_CAP);
3831 	gem_nd_load(dp, "lp_asym_pause_cap",
3832 	    gem_param_get, NULL, PARAM_LP_ASYM_PAUSE_CAP);
3833 	gem_nd_load(dp, "lp_1000fdx_cap",
3834 	    gem_param_get, NULL, PARAM_LP_1000FDX_CAP);
3835 	gem_nd_load(dp, "lp_1000hdx_cap",
3836 	    gem_param_get, NULL, PARAM_LP_1000HDX_CAP);
3837 	gem_nd_load(dp, "lp_100T4_cap",
3838 	    gem_param_get, NULL, PARAM_LP_100T4_CAP);
3839 	gem_nd_load(dp, "lp_100fdx_cap",
3840 	    gem_param_get, NULL, PARAM_LP_100FDX_CAP);
3841 	gem_nd_load(dp, "lp_100hdx_cap",
3842 	    gem_param_get, NULL, PARAM_LP_100HDX_CAP);
3843 	gem_nd_load(dp, "lp_10fdx_cap",
3844 	    gem_param_get, NULL, PARAM_LP_10FDX_CAP);
3845 	gem_nd_load(dp, "lp_10hdx_cap",
3846 	    gem_param_get, NULL, PARAM_LP_10HDX_CAP);
3847 
3848 	/* Current operating modes */
3849 	gem_nd_load(dp, "link_status",
3850 	    gem_param_get, NULL, PARAM_LINK_STATUS);
3851 	gem_nd_load(dp, "link_speed",
3852 	    gem_param_get, NULL, PARAM_LINK_SPEED);
3853 	gem_nd_load(dp, "link_duplex",
3854 	    gem_param_get, NULL, PARAM_LINK_DUPLEX);
3855 	gem_nd_load(dp, "link_autoneg",
3856 	    gem_param_get, NULL, PARAM_LINK_AUTONEG);
3857 	gem_nd_load(dp, "link_rx_pause",
3858 	    gem_param_get, NULL, PARAM_LINK_RX_PAUSE);
3859 	gem_nd_load(dp, "link_tx_pause",
3860 	    gem_param_get, NULL, PARAM_LINK_TX_PAUSE);
3861 #ifdef DEBUG_RESUME
3862 	gem_nd_load(dp, "resume_test",
3863 	    gem_param_get, NULL, PARAM_RESUME_TEST);
3864 #endif
3865 #undef	SETFUNC
3866 }
3867 
3868 static
3869 enum ioc_reply
3870 gem_nd_ioctl(struct gem_dev *dp, queue_t *wq, mblk_t *mp, struct iocblk *iocp)
3871 {
3872 	boolean_t	ok;
3873 
3874 	ASSERT(mutex_owned(&dp->intrlock));
3875 
3876 	DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3877 
3878 	switch (iocp->ioc_cmd) {
3879 	case ND_GET:
3880 		ok = nd_getset(wq, dp->nd_data_p, mp);
3881 		DPRINTF(0, (CE_CONT,
3882 		    "%s: get %s", dp->name, ok ? "OK" : "FAIL"));
3883 		return (ok ? IOC_REPLY : IOC_INVAL);
3884 
3885 	case ND_SET:
3886 		ok = nd_getset(wq, dp->nd_data_p, mp);
3887 
3888 		DPRINTF(0, (CE_CONT, "%s: set %s err %d",
3889 		    dp->name, ok ? "OK" : "FAIL", iocp->ioc_error));
3890 
3891 		if (!ok) {
3892 			return (IOC_INVAL);
3893 		}
3894 
3895 		if (iocp->ioc_error) {
3896 			return (IOC_REPLY);
3897 		}
3898 
3899 		return (IOC_RESTART_REPLY);
3900 	}
3901 
3902 	cmn_err(CE_WARN, "%s: invalid cmd 0x%x", dp->name, iocp->ioc_cmd);
3903 
3904 	return (IOC_INVAL);
3905 }
3906 
3907 static void
3908 gem_nd_cleanup(struct gem_dev *dp)
3909 {
3910 	ASSERT(dp->nd_data_p != NULL);
3911 	ASSERT(dp->nd_arg_p != NULL);
3912 
3913 	nd_free(&dp->nd_data_p);
3914 
3915 	kmem_free(dp->nd_arg_p, sizeof (struct gem_nd_arg) * PARAM_COUNT);
3916 	dp->nd_arg_p = NULL;
3917 }
3918 
3919 static void
3920 gem_mac_ioctl(struct gem_dev *dp, queue_t *wq, mblk_t *mp)
3921 {
3922 	struct iocblk	*iocp;
3923 	enum ioc_reply	status;
3924 	int		cmd;
3925 
3926 	DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3927 
3928 	/*
3929 	 * Validate the command before bothering with the mutex ...
3930 	 */
3931 	iocp = (void *)mp->b_rptr;
3932 	iocp->ioc_error = 0;
3933 	cmd = iocp->ioc_cmd;
3934 
3935 	DPRINTF(0, (CE_CONT, "%s: %s cmd:0x%x", dp->name, __func__, cmd));
3936 
3937 	mutex_enter(&dp->intrlock);
3938 	mutex_enter(&dp->xmitlock);
3939 
3940 	switch (cmd) {
3941 	default:
3942 		_NOTE(NOTREACHED)
3943 		status = IOC_INVAL;
3944 		break;
3945 
3946 	case ND_GET:
3947 	case ND_SET:
3948 		status = gem_nd_ioctl(dp, wq, mp, iocp);
3949 		break;
3950 	}
3951 
3952 	mutex_exit(&dp->xmitlock);
3953 	mutex_exit(&dp->intrlock);
3954 
3955 #ifdef DEBUG_RESUME
3956 	if (cmd == ND_GET)  {
3957 		gem_suspend(dp->dip);
3958 		gem_resume(dp->dip);
3959 	}
3960 #endif
3961 	/*
3962 	 * Finally, decide how to reply
3963 	 */
3964 	switch (status) {
3965 	default:
3966 	case IOC_INVAL:
3967 		/*
3968 		 * Error, reply with a NAK and EINVAL or the specified error
3969 		 */
3970 		miocnak(wq, mp, 0, iocp->ioc_error == 0 ?
3971 		    EINVAL : iocp->ioc_error);
3972 		break;
3973 
3974 	case IOC_DONE:
3975 		/*
3976 		 * OK, reply already sent
3977 		 */
3978 		break;
3979 
3980 	case IOC_RESTART_ACK:
3981 	case IOC_ACK:
3982 		/*
3983 		 * OK, reply with an ACK
3984 		 */
3985 		miocack(wq, mp, 0, 0);
3986 		break;
3987 
3988 	case IOC_RESTART_REPLY:
3989 	case IOC_REPLY:
3990 		/*
3991 		 * OK, send prepared reply as ACK or NAK
3992 		 */
3993 		mp->b_datap->db_type =
3994 		    iocp->ioc_error == 0 ? M_IOCACK : M_IOCNAK;
3995 		qreply(wq, mp);
3996 		break;
3997 	}
3998 }
3999 
4000 #ifndef SYS_MAC_H
4001 #define	XCVR_UNDEFINED	0
4002 #define	XCVR_NONE	1
4003 #define	XCVR_10		2
4004 #define	XCVR_100T4	3
4005 #define	XCVR_100X	4
4006 #define	XCVR_100T2	5
4007 #define	XCVR_1000X	6
4008 #define	XCVR_1000T	7
4009 #endif
4010 static int
4011 gem_mac_xcvr_inuse(struct gem_dev *dp)
4012 {
4013 	int	val = XCVR_UNDEFINED;
4014 
4015 	if ((dp->mii_status & MII_STATUS_XSTATUS) == 0) {
4016 		if (dp->mii_status & MII_STATUS_100_BASE_T4) {
4017 			val = XCVR_100T4;
4018 		} else if (dp->mii_status &
4019 		    (MII_STATUS_100_BASEX_FD |
4020 		    MII_STATUS_100_BASEX)) {
4021 			val = XCVR_100X;
4022 		} else if (dp->mii_status &
4023 		    (MII_STATUS_100_BASE_T2_FD |
4024 		    MII_STATUS_100_BASE_T2)) {
4025 			val = XCVR_100T2;
4026 		} else if (dp->mii_status &
4027 		    (MII_STATUS_10_FD | MII_STATUS_10)) {
4028 			val = XCVR_10;
4029 		}
4030 	} else if (dp->mii_xstatus &
4031 	    (MII_XSTATUS_1000BASET_FD | MII_XSTATUS_1000BASET)) {
4032 		val = XCVR_1000T;
4033 	} else if (dp->mii_xstatus &
4034 	    (MII_XSTATUS_1000BASEX_FD | MII_XSTATUS_1000BASEX)) {
4035 		val = XCVR_1000X;
4036 	}
4037 
4038 	return (val);
4039 }
4040 
4041 /* ============================================================== */
4042 /*
4043  * GLDv3 interface
4044  */
4045 /* ============================================================== */
4046 static int		gem_m_getstat(void *, uint_t, uint64_t *);
4047 static int		gem_m_start(void *);
4048 static void		gem_m_stop(void *);
4049 static int		gem_m_setpromisc(void *, boolean_t);
4050 static int		gem_m_multicst(void *, boolean_t, const uint8_t *);
4051 static int		gem_m_unicst(void *, const uint8_t *);
4052 static mblk_t		*gem_m_tx(void *, mblk_t *);
4053 static void		gem_m_resources(void *);
4054 static void		gem_m_ioctl(void *, queue_t *, mblk_t *);
4055 static boolean_t	gem_m_getcapab(void *, mac_capab_t, void *);
4056 
4057 #define	GEM_M_CALLBACK_FLAGS	(MC_RESOURCES | MC_IOCTL | MC_GETCAPAB)
4058 
4059 static mac_callbacks_t gem_m_callbacks = {
4060 	GEM_M_CALLBACK_FLAGS,
4061 	gem_m_getstat,
4062 	gem_m_start,
4063 	gem_m_stop,
4064 	gem_m_setpromisc,
4065 	gem_m_multicst,
4066 	gem_m_unicst,
4067 	gem_m_tx,
4068 	gem_m_resources,
4069 	gem_m_ioctl,
4070 	gem_m_getcapab,
4071 };
4072 
4073 static int
4074 gem_m_start(void *arg)
4075 {
4076 	int		err = 0;
4077 	struct gem_dev *dp = arg;
4078 
4079 	DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4080 
4081 	mutex_enter(&dp->intrlock);
4082 	if (dp->mac_suspended) {
4083 		err = EIO;
4084 		goto x;
4085 	}
4086 	if (gem_mac_init(dp) != GEM_SUCCESS) {
4087 		err = EIO;
4088 		goto x;
4089 	}
4090 	dp->nic_state = NIC_STATE_INITIALIZED;
4091 
4092 	/* reset rx filter state */
4093 	dp->mc_count = 0;
4094 	dp->mc_count_req = 0;
4095 
4096 	/* setup media mode if the link have been up */
4097 	if (dp->mii_state == MII_STATE_LINKUP) {
4098 		(dp->gc.gc_set_media)(dp);
4099 	}
4100 
4101 	/* setup initial rx filter */
4102 	bcopy(dp->dev_addr.ether_addr_octet,
4103 	    dp->cur_addr.ether_addr_octet, ETHERADDRL);
4104 	dp->rxmode |= RXMODE_ENABLE;
4105 
4106 	if (gem_mac_set_rx_filter(dp) != GEM_SUCCESS) {
4107 		err = EIO;
4108 		goto x;
4109 	}
4110 
4111 	dp->nic_state = NIC_STATE_ONLINE;
4112 	if (dp->mii_state == MII_STATE_LINKUP) {
4113 		if (gem_mac_start(dp) != GEM_SUCCESS) {
4114 			err = EIO;
4115 			goto x;
4116 		}
4117 	}
4118 
4119 	dp->timeout_id = timeout((void (*)(void *))gem_tx_timeout,
4120 	    (void *)dp, dp->gc.gc_tx_timeout_interval);
4121 	mutex_exit(&dp->intrlock);
4122 
4123 	return (0);
4124 x:
4125 	dp->nic_state = NIC_STATE_STOPPED;
4126 	mutex_exit(&dp->intrlock);
4127 	return (err);
4128 }
4129 
4130 static void
4131 gem_m_stop(void *arg)
4132 {
4133 	struct gem_dev	*dp = arg;
4134 
4135 	DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4136 
4137 	/* stop rx */
4138 	mutex_enter(&dp->intrlock);
4139 	if (dp->mac_suspended) {
4140 		mutex_exit(&dp->intrlock);
4141 		return;
4142 	}
4143 	dp->rxmode &= ~RXMODE_ENABLE;
4144 	(void) gem_mac_set_rx_filter(dp);
4145 	mutex_exit(&dp->intrlock);
4146 
4147 	/* stop tx timeout watcher */
4148 	if (dp->timeout_id) {
4149 		while (untimeout(dp->timeout_id) == -1)
4150 			;
4151 		dp->timeout_id = 0;
4152 	}
4153 
4154 	/* make the nic state inactive */
4155 	mutex_enter(&dp->intrlock);
4156 	if (dp->mac_suspended) {
4157 		mutex_exit(&dp->intrlock);
4158 		return;
4159 	}
4160 	dp->nic_state = NIC_STATE_STOPPED;
4161 
4162 	/* we need deassert mac_active due to block interrupt handler */
4163 	mutex_enter(&dp->xmitlock);
4164 	dp->mac_active = B_FALSE;
4165 	mutex_exit(&dp->xmitlock);
4166 
4167 	/* block interrupts */
4168 	while (dp->intr_busy) {
4169 		cv_wait(&dp->tx_drain_cv, &dp->intrlock);
4170 	}
4171 	(void) gem_mac_stop(dp, 0);
4172 	mutex_exit(&dp->intrlock);
4173 }
4174 
4175 static int
4176 gem_m_multicst(void *arg, boolean_t add, const uint8_t *ep)
4177 {
4178 	int		err;
4179 	int		ret;
4180 	struct gem_dev	*dp = arg;
4181 
4182 	DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4183 
4184 	if (add) {
4185 		ret = gem_add_multicast(dp, ep);
4186 	} else {
4187 		ret = gem_remove_multicast(dp, ep);
4188 	}
4189 
4190 	err = 0;
4191 	if (ret != GEM_SUCCESS) {
4192 		err = EIO;
4193 	}
4194 
4195 	return (err);
4196 }
4197 
4198 static int
4199 gem_m_setpromisc(void *arg, boolean_t on)
4200 {
4201 	int		err = 0;	/* no error */
4202 	struct gem_dev	*dp = arg;
4203 
4204 	DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4205 
4206 	mutex_enter(&dp->intrlock);
4207 	if (dp->mac_suspended) {
4208 		mutex_exit(&dp->intrlock);
4209 		return (EIO);
4210 	}
4211 	if (on) {
4212 		dp->rxmode |= RXMODE_PROMISC;
4213 	} else {
4214 		dp->rxmode &= ~RXMODE_PROMISC;
4215 	}
4216 
4217 	if (gem_mac_set_rx_filter(dp) != GEM_SUCCESS) {
4218 		err = EIO;
4219 	}
4220 	mutex_exit(&dp->intrlock);
4221 
4222 	return (err);
4223 }
4224 
4225 int
4226 gem_m_getstat(void *arg, uint_t stat, uint64_t *valp)
4227 {
4228 	struct gem_dev		*dp = arg;
4229 	struct gem_stats	*gstp = &dp->stats;
4230 	uint64_t		val = 0;
4231 
4232 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4233 
4234 	if (mutex_owned(&dp->intrlock)) {
4235 		if (dp->mac_suspended) {
4236 			return (EIO);
4237 		}
4238 	} else {
4239 		mutex_enter(&dp->intrlock);
4240 		if (dp->mac_suspended) {
4241 			mutex_exit(&dp->intrlock);
4242 			return (EIO);
4243 		}
4244 		mutex_exit(&dp->intrlock);
4245 	}
4246 
4247 	if ((*dp->gc.gc_get_stats)(dp) != GEM_SUCCESS) {
4248 		return (EIO);
4249 	}
4250 
4251 	switch (stat) {
4252 	case MAC_STAT_IFSPEED:
4253 		val = gem_speed_value[dp->speed] *1000000ull;
4254 		break;
4255 
4256 	case MAC_STAT_MULTIRCV:
4257 		val = gstp->rmcast;
4258 		break;
4259 
4260 	case MAC_STAT_BRDCSTRCV:
4261 		val = gstp->rbcast;
4262 		break;
4263 
4264 	case MAC_STAT_MULTIXMT:
4265 		val = gstp->omcast;
4266 		break;
4267 
4268 	case MAC_STAT_BRDCSTXMT:
4269 		val = gstp->obcast;
4270 		break;
4271 
4272 	case MAC_STAT_NORCVBUF:
4273 		val = gstp->norcvbuf + gstp->missed;
4274 		break;
4275 
4276 	case MAC_STAT_IERRORS:
4277 		val = gstp->errrcv;
4278 		break;
4279 
4280 	case MAC_STAT_NOXMTBUF:
4281 		val = gstp->noxmtbuf;
4282 		break;
4283 
4284 	case MAC_STAT_OERRORS:
4285 		val = gstp->errxmt;
4286 		break;
4287 
4288 	case MAC_STAT_COLLISIONS:
4289 		val = gstp->collisions;
4290 		break;
4291 
4292 	case MAC_STAT_RBYTES:
4293 		val = gstp->rbytes;
4294 		break;
4295 
4296 	case MAC_STAT_IPACKETS:
4297 		val = gstp->rpackets;
4298 		break;
4299 
4300 	case MAC_STAT_OBYTES:
4301 		val = gstp->obytes;
4302 		break;
4303 
4304 	case MAC_STAT_OPACKETS:
4305 		val = gstp->opackets;
4306 		break;
4307 
4308 	case MAC_STAT_UNDERFLOWS:
4309 		val = gstp->underflow;
4310 		break;
4311 
4312 	case MAC_STAT_OVERFLOWS:
4313 		val = gstp->overflow;
4314 		break;
4315 
4316 	case ETHER_STAT_ALIGN_ERRORS:
4317 		val = gstp->frame;
4318 		break;
4319 
4320 	case ETHER_STAT_FCS_ERRORS:
4321 		val = gstp->crc;
4322 		break;
4323 
4324 	case ETHER_STAT_FIRST_COLLISIONS:
4325 		val = gstp->first_coll;
4326 		break;
4327 
4328 	case ETHER_STAT_MULTI_COLLISIONS:
4329 		val = gstp->multi_coll;
4330 		break;
4331 
4332 	case ETHER_STAT_SQE_ERRORS:
4333 		val = gstp->sqe;
4334 		break;
4335 
4336 	case ETHER_STAT_DEFER_XMTS:
4337 		val = gstp->defer;
4338 		break;
4339 
4340 	case ETHER_STAT_TX_LATE_COLLISIONS:
4341 		val = gstp->xmtlatecoll;
4342 		break;
4343 
4344 	case ETHER_STAT_EX_COLLISIONS:
4345 		val = gstp->excoll;
4346 		break;
4347 
4348 	case ETHER_STAT_MACXMT_ERRORS:
4349 		val = gstp->xmit_internal_err;
4350 		break;
4351 
4352 	case ETHER_STAT_CARRIER_ERRORS:
4353 		val = gstp->nocarrier;
4354 		break;
4355 
4356 	case ETHER_STAT_TOOLONG_ERRORS:
4357 		val = gstp->frame_too_long;
4358 		break;
4359 
4360 	case ETHER_STAT_MACRCV_ERRORS:
4361 		val = gstp->rcv_internal_err;
4362 		break;
4363 
4364 	case ETHER_STAT_XCVR_ADDR:
4365 		val = dp->mii_phy_addr;
4366 		break;
4367 
4368 	case ETHER_STAT_XCVR_ID:
4369 		val = dp->mii_phy_id;
4370 		break;
4371 
4372 	case ETHER_STAT_XCVR_INUSE:
4373 		val = gem_mac_xcvr_inuse(dp);
4374 		break;
4375 
4376 	case ETHER_STAT_CAP_1000FDX:
4377 		val = (dp->mii_xstatus & MII_XSTATUS_1000BASET_FD) ||
4378 		    (dp->mii_xstatus & MII_XSTATUS_1000BASEX_FD);
4379 		break;
4380 
4381 	case ETHER_STAT_CAP_1000HDX:
4382 		val = (dp->mii_xstatus & MII_XSTATUS_1000BASET) ||
4383 		    (dp->mii_xstatus & MII_XSTATUS_1000BASEX);
4384 		break;
4385 
4386 	case ETHER_STAT_CAP_100FDX:
4387 		val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX_FD);
4388 		break;
4389 
4390 	case ETHER_STAT_CAP_100HDX:
4391 		val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX);
4392 		break;
4393 
4394 	case ETHER_STAT_CAP_10FDX:
4395 		val = BOOLEAN(dp->mii_status & MII_STATUS_10_FD);
4396 		break;
4397 
4398 	case ETHER_STAT_CAP_10HDX:
4399 		val = BOOLEAN(dp->mii_status & MII_STATUS_10);
4400 		break;
4401 
4402 	case ETHER_STAT_CAP_ASMPAUSE:
4403 		val = BOOLEAN(dp->gc.gc_flow_control & 2);
4404 		break;
4405 
4406 	case ETHER_STAT_CAP_PAUSE:
4407 		val = BOOLEAN(dp->gc.gc_flow_control & 1);
4408 		break;
4409 
4410 	case ETHER_STAT_CAP_AUTONEG:
4411 		val = BOOLEAN(dp->mii_status & MII_STATUS_CANAUTONEG);
4412 		break;
4413 
4414 	case ETHER_STAT_ADV_CAP_1000FDX:
4415 		val = dp->anadv_1000fdx;
4416 		break;
4417 
4418 	case ETHER_STAT_ADV_CAP_1000HDX:
4419 		val = dp->anadv_1000hdx;
4420 		break;
4421 
4422 	case ETHER_STAT_ADV_CAP_100FDX:
4423 		val = dp->anadv_100fdx;
4424 		break;
4425 
4426 	case ETHER_STAT_ADV_CAP_100HDX:
4427 		val = dp->anadv_100hdx;
4428 		break;
4429 
4430 	case ETHER_STAT_ADV_CAP_10FDX:
4431 		val = dp->anadv_10fdx;
4432 		break;
4433 
4434 	case ETHER_STAT_ADV_CAP_10HDX:
4435 		val = dp->anadv_10hdx;
4436 		break;
4437 
4438 	case ETHER_STAT_ADV_CAP_ASMPAUSE:
4439 		val = BOOLEAN(dp->anadv_flow_control & 2);
4440 		break;
4441 
4442 	case ETHER_STAT_ADV_CAP_PAUSE:
4443 		val = BOOLEAN(dp->anadv_flow_control & 1);
4444 		break;
4445 
4446 	case ETHER_STAT_ADV_CAP_AUTONEG:
4447 		val = dp->anadv_autoneg;
4448 		break;
4449 
4450 	case ETHER_STAT_LP_CAP_1000FDX:
4451 		val = BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_FULL);
4452 		break;
4453 
4454 	case ETHER_STAT_LP_CAP_1000HDX:
4455 		val = BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_HALF);
4456 		break;
4457 
4458 	case ETHER_STAT_LP_CAP_100FDX:
4459 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX_FD);
4460 		break;
4461 
4462 	case ETHER_STAT_LP_CAP_100HDX:
4463 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX);
4464 		break;
4465 
4466 	case ETHER_STAT_LP_CAP_10FDX:
4467 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T_FD);
4468 		break;
4469 
4470 	case ETHER_STAT_LP_CAP_10HDX:
4471 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T);
4472 		break;
4473 
4474 	case ETHER_STAT_LP_CAP_ASMPAUSE:
4475 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_ASM_DIR);
4476 		break;
4477 
4478 	case ETHER_STAT_LP_CAP_PAUSE:
4479 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_PAUSE);
4480 		break;
4481 
4482 	case ETHER_STAT_LP_CAP_AUTONEG:
4483 		val = BOOLEAN(dp->mii_exp & MII_AN_EXP_LPCANAN);
4484 		break;
4485 
4486 	case ETHER_STAT_LINK_ASMPAUSE:
4487 		val = BOOLEAN(dp->flow_control & 2);
4488 		break;
4489 
4490 	case ETHER_STAT_LINK_PAUSE:
4491 		val = BOOLEAN(dp->flow_control & 1);
4492 		break;
4493 
4494 	case ETHER_STAT_LINK_AUTONEG:
4495 		val = dp->anadv_autoneg &&
4496 		    BOOLEAN(dp->mii_exp & MII_AN_EXP_LPCANAN);
4497 		break;
4498 
4499 	case ETHER_STAT_LINK_DUPLEX:
4500 		val = (dp->mii_state == MII_STATE_LINKUP) ?
4501 		    (dp->full_duplex ? 2 : 1) : 0;
4502 		break;
4503 
4504 	case ETHER_STAT_TOOSHORT_ERRORS:
4505 		val = gstp->runt;
4506 		break;
4507 	case ETHER_STAT_LP_REMFAULT:
4508 		val = BOOLEAN(dp->mii_lpable & MII_AN_ADVERT_REMFAULT);
4509 		break;
4510 
4511 	case ETHER_STAT_JABBER_ERRORS:
4512 		val = gstp->jabber;
4513 		break;
4514 
4515 	case ETHER_STAT_CAP_100T4:
4516 		val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASE_T4);
4517 		break;
4518 
4519 	case ETHER_STAT_ADV_CAP_100T4:
4520 		val = dp->anadv_100t4;
4521 		break;
4522 
4523 	case ETHER_STAT_LP_CAP_100T4:
4524 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_T4);
4525 		break;
4526 
4527 	default:
4528 #if GEM_DEBUG_LEVEL > 2
4529 		cmn_err(CE_WARN,
4530 		    "%s: unrecognized parameter value = %d",
4531 		    __func__, stat);
4532 #endif
4533 		return (ENOTSUP);
4534 	}
4535 
4536 	*valp = val;
4537 
4538 	return (0);
4539 }
4540 
4541 static int
4542 gem_m_unicst(void *arg, const uint8_t *mac)
4543 {
4544 	int		err = 0;
4545 	struct gem_dev	*dp = arg;
4546 
4547 	DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4548 
4549 	mutex_enter(&dp->intrlock);
4550 	if (dp->mac_suspended) {
4551 		mutex_exit(&dp->intrlock);
4552 		return (EIO);
4553 	}
4554 	bcopy(mac, dp->cur_addr.ether_addr_octet, ETHERADDRL);
4555 	dp->rxmode |= RXMODE_ENABLE;
4556 
4557 	if (gem_mac_set_rx_filter(dp) != GEM_SUCCESS) {
4558 		err = EIO;
4559 	}
4560 	mutex_exit(&dp->intrlock);
4561 
4562 	return (err);
4563 }
4564 
4565 /*
4566  * gem_m_tx is used only for sending data packets into ethernet wire.
4567  */
4568 static mblk_t *
4569 gem_m_tx(void *arg, mblk_t *mp)
4570 {
4571 	uint32_t	flags = 0;
4572 	struct gem_dev	*dp = arg;
4573 	mblk_t		*tp;
4574 
4575 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4576 
4577 	ASSERT(dp->nic_state == NIC_STATE_ONLINE);
4578 	if (dp->mii_state != MII_STATE_LINKUP) {
4579 		/* Some nics hate to send packets when the link is down. */
4580 		while (mp) {
4581 			tp = mp->b_next;
4582 			mp->b_next = NULL;
4583 			freemsg(mp);
4584 			mp = tp;
4585 		}
4586 		return (NULL);
4587 	}
4588 
4589 	return (gem_send_common(dp, mp, flags));
4590 }
4591 
4592 static void
4593 gem_set_coalease(void *arg, time_t ticks, uint_t count)
4594 {
4595 	struct gem_dev *dp = arg;
4596 	DPRINTF(1, (CE_CONT, "%s: %s: ticks:%d count:%d",
4597 	    dp->name, __func__, ticks, count));
4598 
4599 	mutex_enter(&dp->intrlock);
4600 	dp->poll_pkt_delay = min(count, dp->gc.gc_rx_ring_size/2);
4601 	mutex_exit(&dp->intrlock);
4602 }
4603 
4604 static void
4605 gem_m_resources(void *arg)
4606 {
4607 	struct gem_dev		*dp = arg;
4608 	mac_rx_fifo_t		mrf;
4609 
4610 	DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4611 
4612 	mutex_enter(&dp->intrlock);
4613 	mutex_enter(&dp->xmitlock);
4614 
4615 	/*
4616 	 * Register Rx rings as resources and save mac
4617 	 * resource id for future reference
4618 	 */
4619 	mrf.mrf_type = MAC_RX_FIFO;
4620 	mrf.mrf_blank = gem_set_coalease;
4621 	mrf.mrf_arg = (void *)dp;
4622 	mrf.mrf_normal_blank_time = 1; /* in uS */
4623 	mrf.mrf_normal_pkt_count = dp->poll_pkt_delay;
4624 
4625 	dp->mac_rx_ring_ha = mac_resource_add(dp->mh, (mac_resource_t *)&mrf);
4626 
4627 	mutex_exit(&dp->xmitlock);
4628 	mutex_exit(&dp->intrlock);
4629 }
4630 
4631 static void
4632 gem_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
4633 {
4634 	DPRINTF(0, (CE_CONT, "!%s: %s: called",
4635 	    ((struct gem_dev *)arg)->name, __func__));
4636 
4637 	gem_mac_ioctl((struct gem_dev *)arg, wq, mp);
4638 }
4639 
4640 static boolean_t
4641 gem_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
4642 {
4643 	boolean_t	ret;
4644 
4645 	ret = B_FALSE;
4646 	switch (cap) {
4647 	case MAC_CAPAB_POLL:
4648 		ret = B_TRUE;
4649 		break;
4650 	}
4651 	return (ret);
4652 }
4653 
4654 static void
4655 gem_gld3_init(struct gem_dev *dp, mac_register_t *macp)
4656 {
4657 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
4658 	macp->m_driver = dp;
4659 	macp->m_dip = dp->dip;
4660 	macp->m_src_addr = dp->dev_addr.ether_addr_octet;
4661 	macp->m_callbacks = &gem_m_callbacks;
4662 	macp->m_min_sdu = 0;
4663 	macp->m_max_sdu = dp->mtu;
4664 
4665 	if (dp->misc_flag & GEM_VLAN) {
4666 		macp->m_margin = VTAG_SIZE;
4667 	}
4668 }
4669 
4670 /* ======================================================================== */
4671 /*
4672  * attach/detatch support
4673  */
4674 /* ======================================================================== */
4675 static void
4676 gem_read_conf(struct gem_dev *dp)
4677 {
4678 	int	val;
4679 
4680 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4681 
4682 	/*
4683 	 * Get media mode infomation from .conf file
4684 	 */
4685 	dp->anadv_autoneg = gem_prop_get_int(dp, "adv_autoneg_cap", 1) != 0;
4686 	dp->anadv_1000fdx = gem_prop_get_int(dp, "adv_1000fdx_cap", 1) != 0;
4687 	dp->anadv_1000hdx = gem_prop_get_int(dp, "adv_1000hdx_cap", 1) != 0;
4688 	dp->anadv_100t4   = gem_prop_get_int(dp, "adv_100T4_cap", 1) != 0;
4689 	dp->anadv_100fdx  = gem_prop_get_int(dp, "adv_100fdx_cap", 1) != 0;
4690 	dp->anadv_100hdx  = gem_prop_get_int(dp, "adv_100hdx_cap", 1) != 0;
4691 	dp->anadv_10fdx   = gem_prop_get_int(dp, "adv_10fdx_cap", 1) != 0;
4692 	dp->anadv_10hdx   = gem_prop_get_int(dp, "adv_10hdx_cap", 1) != 0;
4693 
4694 	if ((ddi_prop_exists(DDI_DEV_T_ANY, dp->dip,
4695 	    DDI_PROP_DONTPASS, "full-duplex"))) {
4696 		dp->full_duplex = gem_prop_get_int(dp, "full-duplex", 1) != 0;
4697 		dp->anadv_autoneg = B_FALSE;
4698 		if (dp->full_duplex) {
4699 			dp->anadv_1000hdx = B_FALSE;
4700 			dp->anadv_100hdx = B_FALSE;
4701 			dp->anadv_10hdx = B_FALSE;
4702 		} else {
4703 			dp->anadv_1000fdx = B_FALSE;
4704 			dp->anadv_100fdx = B_FALSE;
4705 			dp->anadv_10fdx = B_FALSE;
4706 		}
4707 	}
4708 
4709 	if ((val = gem_prop_get_int(dp, "speed", 0)) > 0) {
4710 		dp->anadv_autoneg = B_FALSE;
4711 		switch (val) {
4712 		case 1000:
4713 			dp->speed = GEM_SPD_1000;
4714 			dp->anadv_100t4   = B_FALSE;
4715 			dp->anadv_100fdx  = B_FALSE;
4716 			dp->anadv_100hdx  = B_FALSE;
4717 			dp->anadv_10fdx   = B_FALSE;
4718 			dp->anadv_10hdx   = B_FALSE;
4719 			break;
4720 		case 100:
4721 			dp->speed = GEM_SPD_100;
4722 			dp->anadv_1000fdx = B_FALSE;
4723 			dp->anadv_1000hdx = B_FALSE;
4724 			dp->anadv_10fdx   = B_FALSE;
4725 			dp->anadv_10hdx   = B_FALSE;
4726 			break;
4727 		case 10:
4728 			dp->speed = GEM_SPD_10;
4729 			dp->anadv_1000fdx = B_FALSE;
4730 			dp->anadv_1000hdx = B_FALSE;
4731 			dp->anadv_100t4   = B_FALSE;
4732 			dp->anadv_100fdx  = B_FALSE;
4733 			dp->anadv_100hdx  = B_FALSE;
4734 			break;
4735 		default:
4736 			cmn_err(CE_WARN,
4737 			    "!%s: property %s: illegal value:%d",
4738 			    dp->name, "speed", val);
4739 			dp->anadv_autoneg = B_TRUE;
4740 			break;
4741 		}
4742 	}
4743 
4744 	val = gem_prop_get_int(dp, "flow-control", dp->gc.gc_flow_control);
4745 	if (val > FLOW_CONTROL_RX_PAUSE || val < FLOW_CONTROL_NONE) {
4746 		cmn_err(CE_WARN,
4747 		    "!%s: property %s: illegal value:%d",
4748 		    dp->name, "flow-control", val);
4749 	} else {
4750 		val = min(val, dp->gc.gc_flow_control);
4751 	}
4752 	dp->anadv_flow_control = val;
4753 
4754 	if (gem_prop_get_int(dp, "nointr", 0)) {
4755 		dp->misc_flag |= GEM_NOINTR;
4756 		cmn_err(CE_NOTE, "!%s: polling mode enabled", dp->name);
4757 	}
4758 
4759 	dp->mtu = gem_prop_get_int(dp, "mtu", dp->mtu);
4760 	dp->txthr = gem_prop_get_int(dp, "txthr", dp->txthr);
4761 	dp->rxthr = gem_prop_get_int(dp, "rxthr", dp->rxthr);
4762 	dp->txmaxdma = gem_prop_get_int(dp, "txmaxdma", dp->txmaxdma);
4763 	dp->rxmaxdma = gem_prop_get_int(dp, "rxmaxdma", dp->rxmaxdma);
4764 }
4765 
4766 
4767 /*
4768  * Gem kstat support
4769  */
4770 
4771 #define	GEM_LOCAL_DATA_SIZE(gc)	\
4772 	(sizeof (struct gem_dev) + \
4773 	sizeof (struct mcast_addr) * GEM_MAXMC + \
4774 	sizeof (struct txbuf) * ((gc)->gc_tx_buf_size) + \
4775 	sizeof (void *) * ((gc)->gc_tx_buf_size))
4776 
4777 struct gem_dev *
4778 gem_do_attach(dev_info_t *dip, int port,
4779 	struct gem_conf *gc, void *base, ddi_acc_handle_t *regs_handlep,
4780 	void *lp, int lmsize)
4781 {
4782 	struct gem_dev		*dp;
4783 	int			i;
4784 	ddi_iblock_cookie_t	c;
4785 	mac_register_t		*macp = NULL;
4786 	int			ret;
4787 	int			unit;
4788 	int			nports;
4789 
4790 	unit = ddi_get_instance(dip);
4791 	if ((nports = gc->gc_nports) == 0) {
4792 		nports = 1;
4793 	}
4794 	if (nports == 1) {
4795 		ddi_set_driver_private(dip, NULL);
4796 	}
4797 
4798 	DPRINTF(2, (CE_CONT, "!gem%d: gem_do_attach: called cmd:ATTACH",
4799 	    unit));
4800 
4801 	/*
4802 	 * Allocate soft data structure
4803 	 */
4804 	dp = kmem_zalloc(GEM_LOCAL_DATA_SIZE(gc), KM_SLEEP);
4805 
4806 	if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
4807 		cmn_err(CE_WARN, "!gem%d: %s: mac_alloc failed",
4808 		    unit, __func__);
4809 		return (NULL);
4810 	}
4811 	/* ddi_set_driver_private(dip, dp); */
4812 
4813 	/* link to private area */
4814 	dp->private = lp;
4815 	dp->priv_size = lmsize;
4816 	dp->mc_list = (struct mcast_addr *)&dp[1];
4817 
4818 	dp->dip = dip;
4819 	(void) sprintf(dp->name, gc->gc_name, nports * unit + port);
4820 
4821 	/*
4822 	 * Get iblock cookie
4823 	 */
4824 	if (ddi_get_iblock_cookie(dip, 0, &c) != DDI_SUCCESS) {
4825 		cmn_err(CE_CONT,
4826 		    "!%s: gem_do_attach: ddi_get_iblock_cookie: failed",
4827 		    dp->name);
4828 		goto err_free_private;
4829 	}
4830 	dp->iblock_cookie = c;
4831 
4832 	/*
4833 	 * Initialize mutex's for this device.
4834 	 */
4835 	mutex_init(&dp->intrlock, NULL, MUTEX_DRIVER, (void *)c);
4836 	mutex_init(&dp->xmitlock, NULL, MUTEX_DRIVER, (void *)c);
4837 	cv_init(&dp->tx_drain_cv, NULL, CV_DRIVER, NULL);
4838 
4839 	/*
4840 	 * configure gem parameter
4841 	 */
4842 	dp->base_addr = base;
4843 	dp->regs_handle = *regs_handlep;
4844 	dp->gc = *gc;
4845 	gc = &dp->gc;
4846 	/* patch for simplify dma resource management */
4847 	gc->gc_tx_max_frags = 1;
4848 	gc->gc_tx_max_descs_per_pkt = 1;
4849 	gc->gc_tx_ring_size = gc->gc_tx_buf_size;
4850 	gc->gc_tx_ring_limit = gc->gc_tx_buf_limit;
4851 	gc->gc_tx_desc_write_oo = B_TRUE;
4852 
4853 	gc->gc_nports = nports;	/* fix nports */
4854 
4855 	/* fix copy threadsholds */
4856 	gc->gc_tx_copy_thresh = max(ETHERMIN, gc->gc_tx_copy_thresh);
4857 	gc->gc_rx_copy_thresh = max(ETHERMIN, gc->gc_rx_copy_thresh);
4858 
4859 	/* fix rx buffer boundary for iocache line size */
4860 	ASSERT(gc->gc_dma_attr_txbuf.dma_attr_align-1 == gc->gc_tx_buf_align);
4861 	ASSERT(gc->gc_dma_attr_rxbuf.dma_attr_align-1 == gc->gc_rx_buf_align);
4862 	gc->gc_rx_buf_align = max(gc->gc_rx_buf_align, IOC_LINESIZE - 1);
4863 	gc->gc_dma_attr_rxbuf.dma_attr_align = gc->gc_rx_buf_align + 1;
4864 
4865 	/* fix descriptor boundary for cache line size */
4866 	gc->gc_dma_attr_desc.dma_attr_align =
4867 	    max(gc->gc_dma_attr_desc.dma_attr_align, IOC_LINESIZE);
4868 
4869 	/* patch get_packet method */
4870 	if (gc->gc_get_packet == NULL) {
4871 		gc->gc_get_packet = &gem_get_packet_default;
4872 	}
4873 
4874 	/* patch get_rx_start method */
4875 	if (gc->gc_rx_start == NULL) {
4876 		gc->gc_rx_start = &gem_rx_start_default;
4877 	}
4878 
4879 	/* calculate descriptor area */
4880 	if (gc->gc_rx_desc_unit_shift >= 0) {
4881 		dp->rx_desc_size =
4882 		    ROUNDUP(gc->gc_rx_ring_size << gc->gc_rx_desc_unit_shift,
4883 		    gc->gc_dma_attr_desc.dma_attr_align);
4884 	}
4885 	if (gc->gc_tx_desc_unit_shift >= 0) {
4886 		dp->tx_desc_size =
4887 		    ROUNDUP(gc->gc_tx_ring_size << gc->gc_tx_desc_unit_shift,
4888 		    gc->gc_dma_attr_desc.dma_attr_align);
4889 	}
4890 
4891 	dp->mtu = ETHERMTU;
4892 	dp->tx_buf = (void *)&dp->mc_list[GEM_MAXMC];
4893 	/* link tx buffers */
4894 	for (i = 0; i < dp->gc.gc_tx_buf_size; i++) {
4895 		dp->tx_buf[i].txb_next =
4896 		    &dp->tx_buf[SLOT(i + 1, dp->gc.gc_tx_buf_size)];
4897 	}
4898 
4899 	dp->rxmode	   = 0;
4900 	dp->speed	   = GEM_SPD_10;	/* default is 10Mbps */
4901 	dp->full_duplex    = B_FALSE;		/* default is half */
4902 	dp->flow_control   = FLOW_CONTROL_NONE;
4903 	dp->poll_pkt_delay = 8;		/* typical coalease for rx packets */
4904 
4905 	/* performance tuning parameters */
4906 	dp->txthr    = ETHERMAX;	/* tx fifo threshold */
4907 	dp->txmaxdma = 16*4;		/* tx max dma burst size */
4908 	dp->rxthr    = 128;		/* rx fifo threshold */
4909 	dp->rxmaxdma = 16*4;		/* rx max dma burst size */
4910 
4911 	/*
4912 	 * Get media mode information from .conf file
4913 	 */
4914 	gem_read_conf(dp);
4915 
4916 	/* rx_buf_len is required buffer length without padding for alignment */
4917 	dp->rx_buf_len = MAXPKTBUF(dp) + dp->gc.gc_rx_header_len;
4918 
4919 	/*
4920 	 * Reset the chip
4921 	 */
4922 	mutex_enter(&dp->intrlock);
4923 	dp->nic_state = NIC_STATE_STOPPED;
4924 	ret = (*dp->gc.gc_reset_chip)(dp);
4925 	mutex_exit(&dp->intrlock);
4926 	if (ret != GEM_SUCCESS) {
4927 		goto err_free_regs;
4928 	}
4929 
4930 	/*
4931 	 * HW dependant paremeter initialization
4932 	 */
4933 	mutex_enter(&dp->intrlock);
4934 	ret = (*dp->gc.gc_attach_chip)(dp);
4935 	mutex_exit(&dp->intrlock);
4936 	if (ret != GEM_SUCCESS) {
4937 		goto err_free_regs;
4938 	}
4939 
4940 #ifdef DEBUG_MULTIFRAGS
4941 	dp->gc.gc_tx_copy_thresh = dp->mtu;
4942 #endif
4943 	/* allocate tx and rx resources */
4944 	if (gem_alloc_memory(dp)) {
4945 		goto err_free_regs;
4946 	}
4947 
4948 	DPRINTF(0, (CE_CONT,
4949 	    "!%s: at 0x%x, %02x:%02x:%02x:%02x:%02x:%02x",
4950 	    dp->name, (long)dp->base_addr,
4951 	    dp->dev_addr.ether_addr_octet[0],
4952 	    dp->dev_addr.ether_addr_octet[1],
4953 	    dp->dev_addr.ether_addr_octet[2],
4954 	    dp->dev_addr.ether_addr_octet[3],
4955 	    dp->dev_addr.ether_addr_octet[4],
4956 	    dp->dev_addr.ether_addr_octet[5]));
4957 
4958 	/* copy mac address */
4959 	dp->cur_addr = dp->dev_addr;
4960 
4961 	gem_gld3_init(dp, macp);
4962 
4963 	/* Probe MII phy (scan phy) */
4964 	dp->mii_lpable = 0;
4965 	dp->mii_advert = 0;
4966 	dp->mii_exp = 0;
4967 	dp->mii_ctl1000 = 0;
4968 	dp->mii_stat1000 = 0;
4969 	if ((*dp->gc.gc_mii_probe)(dp) != GEM_SUCCESS) {
4970 		goto err_free_ring;
4971 	}
4972 
4973 	/* mask unsupported abilities */
4974 	dp->anadv_autoneg &= BOOLEAN(dp->mii_status & MII_STATUS_CANAUTONEG);
4975 	dp->anadv_1000fdx &=
4976 	    BOOLEAN(dp->mii_xstatus &
4977 	    (MII_XSTATUS_1000BASEX_FD | MII_XSTATUS_1000BASET_FD));
4978 	dp->anadv_1000hdx &=
4979 	    BOOLEAN(dp->mii_xstatus &
4980 	    (MII_XSTATUS_1000BASEX | MII_XSTATUS_1000BASET));
4981 	dp->anadv_100t4  &= BOOLEAN(dp->mii_status & MII_STATUS_100_BASE_T4);
4982 	dp->anadv_100fdx &= BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX_FD);
4983 	dp->anadv_100hdx &= BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX);
4984 	dp->anadv_10fdx  &= BOOLEAN(dp->mii_status & MII_STATUS_10_FD);
4985 	dp->anadv_10hdx  &= BOOLEAN(dp->mii_status & MII_STATUS_10);
4986 
4987 	gem_choose_forcedmode(dp);
4988 
4989 	/* initialize MII phy if required */
4990 	if (dp->gc.gc_mii_init) {
4991 		if ((*dp->gc.gc_mii_init)(dp) != GEM_SUCCESS) {
4992 			goto err_free_ring;
4993 		}
4994 	}
4995 
4996 	/*
4997 	 * initialize kstats including mii statistics
4998 	 */
4999 	gem_nd_setup(dp);
5000 
5001 	/*
5002 	 * Add interrupt to system.
5003 	 */
5004 	if (ret = mac_register(macp, &dp->mh)) {
5005 		cmn_err(CE_WARN, "!%s: mac_register failed, error:%d",
5006 		    dp->name, ret);
5007 		goto err_release_stats;
5008 	}
5009 	mac_free(macp);
5010 	macp = NULL;
5011 
5012 	if (dp->misc_flag & GEM_SOFTINTR) {
5013 		if (ddi_add_softintr(dip,
5014 		    DDI_SOFTINT_LOW, &dp->soft_id,
5015 		    NULL, NULL,
5016 		    (uint_t (*)(caddr_t))gem_intr,
5017 		    (caddr_t)dp) != DDI_SUCCESS) {
5018 			cmn_err(CE_WARN, "!%s: ddi_add_softintr failed",
5019 			    dp->name);
5020 			goto err_unregister;
5021 		}
5022 	} else if ((dp->misc_flag & GEM_NOINTR) == 0) {
5023 		if (ddi_add_intr(dip, 0, NULL, NULL,
5024 		    (uint_t (*)(caddr_t))gem_intr,
5025 		    (caddr_t)dp) != DDI_SUCCESS) {
5026 			cmn_err(CE_WARN, "!%s: ddi_add_intr failed", dp->name);
5027 			goto err_unregister;
5028 		}
5029 	} else {
5030 		/*
5031 		 * Dont use interrupt.
5032 		 * schedule first call of gem_intr_watcher
5033 		 */
5034 		dp->intr_watcher_id =
5035 		    timeout((void (*)(void *))gem_intr_watcher,
5036 		    (void *)dp, drv_usectohz(3*1000000));
5037 	}
5038 
5039 	/* link this device to dev_info */
5040 	dp->next = (struct gem_dev *)ddi_get_driver_private(dip);
5041 	dp->port = port;
5042 	ddi_set_driver_private(dip, (caddr_t)dp);
5043 
5044 	/* reset mii phy and start mii link watcher */
5045 	gem_mii_start(dp);
5046 
5047 	DPRINTF(2, (CE_CONT, "!gem_do_attach: return: success"));
5048 	return (dp);
5049 
5050 err_unregister:
5051 	(void) mac_unregister(dp->mh);
5052 err_release_stats:
5053 	/* release NDD resources */
5054 	gem_nd_cleanup(dp);
5055 
5056 err_free_ring:
5057 	gem_free_memory(dp);
5058 err_free_regs:
5059 	ddi_regs_map_free(&dp->regs_handle);
5060 err_free_locks:
5061 	mutex_destroy(&dp->xmitlock);
5062 	mutex_destroy(&dp->intrlock);
5063 	cv_destroy(&dp->tx_drain_cv);
5064 err_free_private:
5065 	if (macp) {
5066 		mac_free(macp);
5067 	}
5068 	kmem_free((caddr_t)dp, GEM_LOCAL_DATA_SIZE(gc));
5069 
5070 	return (NULL);
5071 }
5072 
5073 int
5074 gem_do_detach(dev_info_t *dip)
5075 {
5076 	struct gem_dev	*dp;
5077 	struct gem_dev	*tmp;
5078 	caddr_t		private;
5079 	int		priv_size;
5080 	ddi_acc_handle_t	rh;
5081 
5082 	dp = GEM_GET_DEV(dip);
5083 	if (dp == NULL) {
5084 		return (DDI_SUCCESS);
5085 	}
5086 
5087 	rh = dp->regs_handle;
5088 	private = dp->private;
5089 	priv_size = dp->priv_size;
5090 
5091 	while (dp) {
5092 		/* unregister with gld v3 */
5093 		if (mac_unregister(dp->mh) != 0) {
5094 			return (DDI_FAILURE);
5095 		}
5096 
5097 		/* ensure any rx buffers are not used */
5098 		if (dp->rx_buf_allocated != dp->rx_buf_freecnt) {
5099 			/* resource is busy */
5100 			cmn_err(CE_PANIC,
5101 			    "!%s: %s: rxbuf is busy: allocated:%d, freecnt:%d",
5102 			    dp->name, __func__,
5103 			    dp->rx_buf_allocated, dp->rx_buf_freecnt);
5104 			/* NOT REACHED */
5105 		}
5106 
5107 		/* stop mii link watcher */
5108 		gem_mii_stop(dp);
5109 
5110 		/* unregister interrupt handler */
5111 		if (dp->misc_flag & GEM_SOFTINTR) {
5112 			ddi_remove_softintr(dp->soft_id);
5113 		} else if ((dp->misc_flag & GEM_NOINTR) == 0) {
5114 			ddi_remove_intr(dip, 0, dp->iblock_cookie);
5115 		} else {
5116 			/* stop interrupt watcher */
5117 			if (dp->intr_watcher_id) {
5118 				while (untimeout(dp->intr_watcher_id) == -1)
5119 					;
5120 				dp->intr_watcher_id = 0;
5121 			}
5122 		}
5123 
5124 		/* release NDD resources */
5125 		gem_nd_cleanup(dp);
5126 		/* release buffers, descriptors and dma resources */
5127 		gem_free_memory(dp);
5128 
5129 		/* release locks and condition variables */
5130 		mutex_destroy(&dp->xmitlock);
5131 		mutex_destroy(&dp->intrlock);
5132 		cv_destroy(&dp->tx_drain_cv);
5133 
5134 		/* release basic memory resources */
5135 		tmp = dp->next;
5136 		kmem_free((caddr_t)dp, GEM_LOCAL_DATA_SIZE(&dp->gc));
5137 		dp = tmp;
5138 	}
5139 
5140 	/* release common private memory for the nic */
5141 	kmem_free(private, priv_size);
5142 
5143 	/* release register mapping resources */
5144 	ddi_regs_map_free(&rh);
5145 
5146 	DPRINTF(2, (CE_CONT, "!%s%d: gem_do_detach: return: success",
5147 	    ddi_driver_name(dip), ddi_get_instance(dip)));
5148 
5149 	return (DDI_SUCCESS);
5150 }
5151 
5152 int
5153 gem_suspend(dev_info_t *dip)
5154 {
5155 	struct gem_dev	*dp;
5156 
5157 	/*
5158 	 * stop the device
5159 	 */
5160 	dp = GEM_GET_DEV(dip);
5161 	ASSERT(dp);
5162 
5163 	DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
5164 
5165 	for (; dp; dp = dp->next) {
5166 
5167 		/* stop mii link watcher */
5168 		gem_mii_stop(dp);
5169 
5170 		/* stop interrupt watcher for no-intr mode */
5171 		if (dp->misc_flag & GEM_NOINTR) {
5172 			if (dp->intr_watcher_id) {
5173 				while (untimeout(dp->intr_watcher_id) == -1)
5174 					;
5175 			}
5176 			dp->intr_watcher_id = 0;
5177 		}
5178 
5179 		/* stop tx timeout watcher */
5180 		if (dp->timeout_id) {
5181 			while (untimeout(dp->timeout_id) == -1)
5182 				;
5183 			dp->timeout_id = 0;
5184 		}
5185 
5186 		/* make the nic state inactive */
5187 		mutex_enter(&dp->intrlock);
5188 		(void) gem_mac_stop(dp, 0);
5189 		ASSERT(!dp->mac_active);
5190 
5191 		/* no further register access */
5192 		dp->mac_suspended = B_TRUE;
5193 		mutex_exit(&dp->intrlock);
5194 	}
5195 
5196 	/* XXX - power down the nic */
5197 
5198 	return (DDI_SUCCESS);
5199 }
5200 
5201 int
5202 gem_resume(dev_info_t *dip)
5203 {
5204 	struct gem_dev	*dp;
5205 
5206 	/*
5207 	 * restart the device
5208 	 */
5209 	dp = GEM_GET_DEV(dip);
5210 	ASSERT(dp);
5211 
5212 	DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
5213 
5214 	for (; dp; dp = dp->next) {
5215 
5216 		/*
5217 		 * Bring up the nic after power up
5218 		 */
5219 
5220 		/* gem_xxx.c layer to setup power management state. */
5221 		ASSERT(!dp->mac_active);
5222 
5223 		/* reset the chip, because we are just after power up. */
5224 		mutex_enter(&dp->intrlock);
5225 
5226 		dp->mac_suspended = B_FALSE;
5227 		dp->nic_state = NIC_STATE_STOPPED;
5228 
5229 		if ((*dp->gc.gc_reset_chip)(dp) != GEM_SUCCESS) {
5230 			cmn_err(CE_WARN, "%s: %s: failed to reset chip",
5231 			    dp->name, __func__);
5232 			mutex_exit(&dp->intrlock);
5233 			goto err;
5234 		}
5235 		mutex_exit(&dp->intrlock);
5236 
5237 		/* initialize mii phy because we are just after power up */
5238 		if (dp->gc.gc_mii_init) {
5239 			(void) (*dp->gc.gc_mii_init)(dp);
5240 		}
5241 
5242 		if (dp->misc_flag & GEM_NOINTR) {
5243 			/*
5244 			 * schedule first call of gem_intr_watcher
5245 			 * instead of interrupts.
5246 			 */
5247 			dp->intr_watcher_id =
5248 			    timeout((void (*)(void *))gem_intr_watcher,
5249 			    (void *)dp, drv_usectohz(3*1000000));
5250 		}
5251 
5252 		/* restart mii link watcher */
5253 		gem_mii_start(dp);
5254 
5255 		/* restart mac */
5256 		mutex_enter(&dp->intrlock);
5257 
5258 		if (gem_mac_init(dp) != GEM_SUCCESS) {
5259 			mutex_exit(&dp->intrlock);
5260 			goto err_reset;
5261 		}
5262 		dp->nic_state = NIC_STATE_INITIALIZED;
5263 
5264 		/* setup media mode if the link have been up */
5265 		if (dp->mii_state == MII_STATE_LINKUP) {
5266 			if ((dp->gc.gc_set_media)(dp) != GEM_SUCCESS) {
5267 				mutex_exit(&dp->intrlock);
5268 				goto err_reset;
5269 			}
5270 		}
5271 
5272 		/* enable mac address and rx filter */
5273 		dp->rxmode |= RXMODE_ENABLE;
5274 		if ((*dp->gc.gc_set_rx_filter)(dp) != GEM_SUCCESS) {
5275 			mutex_exit(&dp->intrlock);
5276 			goto err_reset;
5277 		}
5278 		dp->nic_state = NIC_STATE_ONLINE;
5279 
5280 		/* restart tx timeout watcher */
5281 		dp->timeout_id = timeout((void (*)(void *))gem_tx_timeout,
5282 		    (void *)dp,
5283 		    dp->gc.gc_tx_timeout_interval);
5284 
5285 		/* now the nic is fully functional */
5286 		if (dp->mii_state == MII_STATE_LINKUP) {
5287 			if (gem_mac_start(dp) != GEM_SUCCESS) {
5288 				mutex_exit(&dp->intrlock);
5289 				goto err_reset;
5290 			}
5291 		}
5292 		mutex_exit(&dp->intrlock);
5293 	}
5294 
5295 	return (DDI_SUCCESS);
5296 
5297 err_reset:
5298 	if (dp->intr_watcher_id) {
5299 		while (untimeout(dp->intr_watcher_id) == -1)
5300 			;
5301 		dp->intr_watcher_id = 0;
5302 	}
5303 	mutex_enter(&dp->intrlock);
5304 	(*dp->gc.gc_reset_chip)(dp);
5305 	dp->nic_state = NIC_STATE_STOPPED;
5306 	mutex_exit(&dp->intrlock);
5307 
5308 err:
5309 	return (DDI_FAILURE);
5310 }
5311 
5312 /*
5313  * misc routines for PCI
5314  */
5315 uint8_t
5316 gem_search_pci_cap(dev_info_t *dip,
5317 		ddi_acc_handle_t conf_handle, uint8_t target)
5318 {
5319 	uint8_t		pci_cap_ptr;
5320 	uint32_t	pci_cap;
5321 
5322 	/* search power management capablities */
5323 	pci_cap_ptr = pci_config_get8(conf_handle, PCI_CONF_CAP_PTR);
5324 	while (pci_cap_ptr) {
5325 		/* read pci capability header */
5326 		pci_cap = pci_config_get32(conf_handle, pci_cap_ptr);
5327 		if ((pci_cap & 0xff) == target) {
5328 			/* found */
5329 			break;
5330 		}
5331 		/* get next_ptr */
5332 		pci_cap_ptr = (pci_cap >> 8) & 0xff;
5333 	}
5334 	return (pci_cap_ptr);
5335 }
5336 
5337 int
5338 gem_pci_set_power_state(dev_info_t *dip,
5339 		ddi_acc_handle_t conf_handle, uint_t new_mode)
5340 {
5341 	uint8_t		pci_cap_ptr;
5342 	uint32_t	pmcsr;
5343 	uint_t		unit;
5344 	const char	*drv_name;
5345 
5346 	ASSERT(new_mode < 4);
5347 
5348 	unit = ddi_get_instance(dip);
5349 	drv_name = ddi_driver_name(dip);
5350 
5351 	/* search power management capablities */
5352 	pci_cap_ptr = gem_search_pci_cap(dip, conf_handle, PCI_CAP_ID_PM);
5353 
5354 	if (pci_cap_ptr == 0) {
5355 		cmn_err(CE_CONT,
5356 		    "!%s%d: doesn't have pci power management capability",
5357 		    drv_name, unit);
5358 		return (DDI_FAILURE);
5359 	}
5360 
5361 	/* read power management capabilities */
5362 	pmcsr = pci_config_get32(conf_handle, pci_cap_ptr + PCI_PMCSR);
5363 
5364 	DPRINTF(0, (CE_CONT,
5365 	    "!%s%d: pmc found at 0x%x: pmcsr: 0x%08x",
5366 	    drv_name, unit, pci_cap_ptr, pmcsr));
5367 
5368 	/*
5369 	 * Is the resuested power mode supported?
5370 	 */
5371 	/* not yet */
5372 
5373 	/*
5374 	 * move to new mode
5375 	 */
5376 	pmcsr = (pmcsr & ~PCI_PMCSR_STATE_MASK) | new_mode;
5377 	pci_config_put32(conf_handle, pci_cap_ptr + PCI_PMCSR, pmcsr);
5378 
5379 	return (DDI_SUCCESS);
5380 }
5381 
5382 /*
5383  * select suitable register for by specified address space or register
5384  * offset in PCI config space
5385  */
5386 int
5387 gem_pci_regs_map_setup(dev_info_t *dip, uint32_t which, uint32_t mask,
5388 	struct ddi_device_acc_attr *attrp,
5389 	caddr_t *basep, ddi_acc_handle_t *hp)
5390 {
5391 	struct pci_phys_spec	*regs;
5392 	uint_t		len;
5393 	uint_t		unit;
5394 	uint_t		n;
5395 	uint_t		i;
5396 	int		ret;
5397 	const char	*drv_name;
5398 
5399 	unit = ddi_get_instance(dip);
5400 	drv_name = ddi_driver_name(dip);
5401 
5402 	/* Search IO-range or memory-range to be mapped */
5403 	regs = NULL;
5404 	len  = 0;
5405 
5406 	if ((ret = ddi_prop_lookup_int_array(
5407 	    DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
5408 	    "reg", (void *)&regs, &len)) != DDI_PROP_SUCCESS) {
5409 		cmn_err(CE_WARN,
5410 		    "!%s%d: failed to get reg property (ret:%d)",
5411 		    drv_name, unit, ret);
5412 		return (DDI_FAILURE);
5413 	}
5414 	n = len / (sizeof (struct pci_phys_spec) / sizeof (int));
5415 
5416 	ASSERT(regs != NULL && len > 0);
5417 
5418 #if GEM_DEBUG_LEVEL > 0
5419 	for (i = 0; i < n; i++) {
5420 		cmn_err(CE_CONT,
5421 		    "!%s%d: regs[%d]: %08x.%08x.%08x.%08x.%08x",
5422 		    drv_name, unit, i,
5423 		    regs[i].pci_phys_hi,
5424 		    regs[i].pci_phys_mid,
5425 		    regs[i].pci_phys_low,
5426 		    regs[i].pci_size_hi,
5427 		    regs[i].pci_size_low);
5428 	}
5429 #endif
5430 	for (i = 0; i < n; i++) {
5431 		if ((regs[i].pci_phys_hi & mask) == which) {
5432 			/* it's the requested space */
5433 			ddi_prop_free(regs);
5434 			goto address_range_found;
5435 		}
5436 	}
5437 	ddi_prop_free(regs);
5438 	return (DDI_FAILURE);
5439 
5440 address_range_found:
5441 	if ((ret = ddi_regs_map_setup(dip, i, basep, 0, 0, attrp, hp))
5442 	    != DDI_SUCCESS) {
5443 		cmn_err(CE_CONT,
5444 		    "!%s%d: ddi_regs_map_setup failed (ret:%d)",
5445 		    drv_name, unit, ret);
5446 	}
5447 
5448 	return (ret);
5449 }
5450 
5451 void
5452 gem_mod_init(struct dev_ops *dop, char *name)
5453 {
5454 	mac_init_ops(dop, name);
5455 }
5456 
5457 void
5458 gem_mod_fini(struct dev_ops *dop)
5459 {
5460 	mac_fini_ops(dop);
5461 }
5462