xref: /titanic_52/usr/src/uts/common/io/sfe/sfe_util.c (revision c1ecd8b9404ee0d96d93f02e82c441b9bb149a3d)
1 /*
2  * sfe_util.c: general ethernet mac driver framework version 2.6
3  *
4  * Copyright (c) 2002-2008 Masayuki Murayama.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * 2. Redistributions in binary form must reproduce the above copyright notice,
13  *    this list of conditions and the following disclaimer in the documentation
14  *    and/or other materials provided with the distribution.
15  *
16  * 3. Neither the name of the author nor the names of its contributors may be
17  *    used to endorse or promote products derived from this software without
18  *    specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
27  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
31  * DAMAGE.
32  */
33 
34 #pragma ident	"%Z%%M%	%I%	%E% SMI"	/* sfe device driver */
35 
36 /*
37  * System Header files.
38  */
39 #include <sys/types.h>
40 #include <sys/conf.h>
41 #include <sys/debug.h>
42 #include <sys/kmem.h>
43 #include <sys/vtrace.h>
44 #include <sys/ethernet.h>
45 #include <sys/modctl.h>
46 #include <sys/errno.h>
47 #include <sys/ddi.h>
48 #include <sys/sunddi.h>
49 #include <sys/stream.h>		/* required for MBLK* */
50 #include <sys/strsun.h>		/* required for mionack() */
51 #include <sys/byteorder.h>
52 #include <sys/pci.h>
53 #include <inet/common.h>
54 #include <inet/led.h>
55 #include <inet/mi.h>
56 #include <inet/nd.h>
57 #include <sys/crc32.h>
58 
59 #include <sys/note.h>
60 
61 #include "sfe_mii.h"
62 #include "sfe_util.h"
63 
64 
65 
66 extern char ident[];
67 
68 /* Debugging support */
69 #ifdef GEM_DEBUG_LEVEL
70 static int gem_debug = GEM_DEBUG_LEVEL;
71 #define	DPRINTF(n, args)	if (gem_debug > (n)) cmn_err args
72 #else
73 #define	DPRINTF(n, args)
74 #undef ASSERT
75 #define	ASSERT(x)
76 #endif
77 
78 #define	IOC_LINESIZE	0x40	/* Is it right for amd64? */
79 
80 /*
81  * Useful macros and typedefs
82  */
83 #define	ROUNDUP(x, a)	(((x) + (a) - 1) & ~((a) - 1))
84 
85 #define	GET_NET16(p)	((((uint8_t *)(p))[0] << 8)| ((uint8_t *)(p))[1])
86 #define	GET_ETHERTYPE(p)	GET_NET16(((uint8_t *)(p)) + ETHERADDRL*2)
87 
88 #define	GET_IPTYPEv4(p)	(((uint8_t *)(p))[sizeof (struct ether_header) + 9])
89 #define	GET_IPTYPEv6(p)	(((uint8_t *)(p))[sizeof (struct ether_header) + 6])
90 
91 
92 #ifndef INT32_MAX
93 #define	INT32_MAX	0x7fffffff
94 #endif
95 
96 #define	VTAG_OFF	(ETHERADDRL*2)
97 #ifndef VTAG_SIZE
98 #define	VTAG_SIZE	4
99 #endif
100 #ifndef VTAG_TPID
101 #define	VTAG_TPID	0x8100U
102 #endif
103 
104 #define	GET_TXBUF(dp, sn)	\
105 	&(dp)->tx_buf[SLOT((dp)->tx_slots_base + (sn), (dp)->gc.gc_tx_buf_size)]
106 
107 #ifndef offsetof
108 #define	offsetof(t, m)	((long)&(((t *) 0)->m))
109 #endif
110 #define	TXFLAG_VTAG(flag)	\
111 	(((flag) & GEM_TXFLAG_VTAG) >> GEM_TXFLAG_VTAG_SHIFT)
112 
113 #define	MAXPKTBUF(dp)	\
114 	((dp)->mtu + sizeof (struct ether_header) + VTAG_SIZE + ETHERFCSL)
115 
116 #define	WATCH_INTERVAL_FAST	drv_usectohz(100*1000)	/* 100mS */
117 #define	BOOLEAN(x)	((x) != 0)
118 
119 /*
120  * Macros to distinct chip generation.
121  */
122 
123 /*
124  * Private functions
125  */
126 static void gem_mii_start(struct gem_dev *);
127 static void gem_mii_stop(struct gem_dev *);
128 
129 /* local buffer management */
130 static void gem_nd_setup(struct gem_dev *dp);
131 static void gem_nd_cleanup(struct gem_dev *dp);
132 static int gem_alloc_memory(struct gem_dev *);
133 static void gem_free_memory(struct gem_dev *);
134 static void gem_init_rx_ring(struct gem_dev *);
135 static void gem_init_tx_ring(struct gem_dev *);
136 __INLINE__ static void gem_append_rxbuf(struct gem_dev *, struct rxbuf *);
137 
138 static void gem_tx_timeout(struct gem_dev *);
139 static void gem_mii_link_watcher(struct gem_dev *dp);
140 static int gem_mac_init(struct gem_dev *dp);
141 static int gem_mac_start(struct gem_dev *dp);
142 static int gem_mac_stop(struct gem_dev *dp, uint_t flags);
143 static void gem_mac_ioctl(struct gem_dev *dp, queue_t *wq, mblk_t *mp);
144 
145 static	struct ether_addr	gem_etherbroadcastaddr = {
146 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff
147 };
148 
149 int gem_speed_value[] = {10, 100, 1000};
150 
151 /* ============================================================== */
152 /*
153  * Misc runtime routines
154  */
155 /* ============================================================== */
156 /*
157  * Ether CRC calculation according to 21143 data sheet
158  */
159 uint32_t
160 gem_ether_crc_le(const uint8_t *addr, int len)
161 {
162 	uint32_t	crc;
163 
164 	CRC32(crc, addr, ETHERADDRL, 0xffffffffU, crc32_table);
165 	return (crc);
166 }
167 
168 uint32_t
169 gem_ether_crc_be(const uint8_t *addr, int len)
170 {
171 	int		idx;
172 	int		bit;
173 	uint_t		data;
174 	uint32_t	crc;
175 #define	CRC32_POLY_BE	0x04c11db7
176 
177 	crc = 0xffffffff;
178 	for (idx = 0; idx < len; idx++) {
179 		for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) {
180 			crc = (crc << 1)
181 			    ^ ((((crc >> 31) ^ data) & 1) ? CRC32_POLY_BE : 0);
182 		}
183 	}
184 	return (crc);
185 #undef	CRC32_POLY_BE
186 }
187 
188 int
189 gem_prop_get_int(struct gem_dev *dp, char *prop_template, int def_val)
190 {
191 	char	propname[32];
192 
193 	(void) sprintf(propname, prop_template, dp->name);
194 
195 	return (ddi_prop_get_int(DDI_DEV_T_ANY, dp->dip,
196 	    DDI_PROP_DONTPASS, propname, def_val));
197 }
198 
199 static int
200 gem_population(uint32_t x)
201 {
202 	int	i;
203 	int	cnt;
204 
205 	cnt = 0;
206 	for (i = 0; i < 32; i++) {
207 		if (x & (1 << i)) {
208 			cnt++;
209 		}
210 	}
211 	return (cnt);
212 }
213 
214 #ifdef GEM_DEBUG_LEVEL
215 #ifdef GEM_DEBUG_VLAN
216 static void
217 gem_dump_packet(struct gem_dev *dp, char *title, mblk_t *mp,
218     boolean_t check_cksum)
219 {
220 	char	msg[180];
221 	uint8_t	buf[18+20+20];
222 	uint8_t	*p;
223 	size_t	offset;
224 	uint_t	ethertype;
225 	uint_t	proto;
226 	uint_t	ipproto = 0;
227 	uint_t	iplen;
228 	uint_t	iphlen;
229 	uint_t	tcplen;
230 	uint_t	udplen;
231 	uint_t	cksum;
232 	int	rest;
233 	int	len;
234 	char	*bp;
235 	mblk_t	*tp;
236 	extern uint_t	ip_cksum(mblk_t *, int, uint32_t);
237 
238 	msg[0] = 0;
239 	bp = msg;
240 
241 	rest = sizeof (buf);
242 	offset = 0;
243 	for (tp = mp; tp; tp = tp->b_cont) {
244 		len = tp->b_wptr - tp->b_rptr;
245 		len = min(rest, len);
246 		bcopy(tp->b_rptr, &buf[offset], len);
247 		rest -= len;
248 		offset += len;
249 		if (rest == 0) {
250 			break;
251 		}
252 	}
253 
254 	offset = 0;
255 	p = &buf[offset];
256 
257 	/* ethernet address */
258 	sprintf(bp,
259 	    "ether: %02x:%02x:%02x:%02x:%02x:%02x"
260 	    " -> %02x:%02x:%02x:%02x:%02x:%02x",
261 	    p[6], p[7], p[8], p[9], p[10], p[11],
262 	    p[0], p[1], p[2], p[3], p[4], p[5]);
263 	bp = &msg[strlen(msg)];
264 
265 	/* vlag tag and etherrtype */
266 	ethertype = GET_ETHERTYPE(p);
267 	if (ethertype == VTAG_TPID) {
268 		sprintf(bp, " vtag:0x%04x", GET_NET16(&p[14]));
269 		bp = &msg[strlen(msg)];
270 
271 		offset += VTAG_SIZE;
272 		p = &buf[offset];
273 		ethertype = GET_ETHERTYPE(p);
274 	}
275 	sprintf(bp, " type:%04x", ethertype);
276 	bp = &msg[strlen(msg)];
277 
278 	/* ethernet packet length */
279 	sprintf(bp, " mblklen:%d", msgdsize(mp));
280 	bp = &msg[strlen(msg)];
281 	if (mp->b_cont) {
282 		sprintf(bp, "(");
283 		bp = &msg[strlen(msg)];
284 		for (tp = mp; tp; tp = tp->b_cont) {
285 			if (tp == mp) {
286 				sprintf(bp, "%d", tp->b_wptr - tp->b_rptr);
287 			} else {
288 				sprintf(bp, "+%d", tp->b_wptr - tp->b_rptr);
289 			}
290 			bp = &msg[strlen(msg)];
291 		}
292 		sprintf(bp, ")");
293 		bp = &msg[strlen(msg)];
294 	}
295 
296 	if (ethertype != ETHERTYPE_IP) {
297 		goto x;
298 	}
299 
300 	/* ip address */
301 	offset += sizeof (struct ether_header);
302 	p = &buf[offset];
303 	ipproto = p[9];
304 	iplen = GET_NET16(&p[2]);
305 	sprintf(bp, ", ip: %d.%d.%d.%d -> %d.%d.%d.%d proto:%d iplen:%d",
306 	    p[12], p[13], p[14], p[15],
307 	    p[16], p[17], p[18], p[19],
308 	    ipproto, iplen);
309 	bp = (void *)&msg[strlen(msg)];
310 
311 	iphlen = (p[0] & 0xf) * 4;
312 
313 	/* cksum for psuedo header */
314 	cksum = *(uint16_t *)&p[12];
315 	cksum += *(uint16_t *)&p[14];
316 	cksum += *(uint16_t *)&p[16];
317 	cksum += *(uint16_t *)&p[18];
318 	cksum += BE_16(ipproto);
319 
320 	/* tcp or udp protocol header */
321 	offset += iphlen;
322 	p = &buf[offset];
323 	if (ipproto == IPPROTO_TCP) {
324 		tcplen = iplen - iphlen;
325 		sprintf(bp, ", tcp: len:%d cksum:%x",
326 		    tcplen, GET_NET16(&p[16]));
327 		bp = (void *)&msg[strlen(msg)];
328 
329 		if (check_cksum) {
330 			cksum += BE_16(tcplen);
331 			cksum = (uint16_t)ip_cksum(mp, offset, cksum);
332 			sprintf(bp, " (%s)",
333 			    (cksum == 0 || cksum == 0xffff) ? "ok" : "ng");
334 			bp = (void *)&msg[strlen(msg)];
335 		}
336 	} else if (ipproto == IPPROTO_UDP) {
337 		udplen = GET_NET16(&p[4]);
338 		sprintf(bp, ", udp: len:%d cksum:%x",
339 		    udplen, GET_NET16(&p[6]));
340 		bp = (void *)&msg[strlen(msg)];
341 
342 		if (GET_NET16(&p[6]) && check_cksum) {
343 			cksum += *(uint16_t *)&p[4];
344 			cksum = (uint16_t)ip_cksum(mp, offset, cksum);
345 			sprintf(bp, " (%s)",
346 			    (cksum == 0 || cksum == 0xffff) ? "ok" : "ng");
347 			bp = (void *)&msg[strlen(msg)];
348 		}
349 	}
350 x:
351 	cmn_err(CE_CONT, "!%s: %s: %s", dp->name, title, msg);
352 }
353 #endif /* GEM_DEBUG_VLAN */
354 #endif /* GEM_DEBUG_LEVEL */
355 
356 /* ============================================================== */
357 /*
358  * IO cache flush
359  */
360 /* ============================================================== */
361 __INLINE__ void
362 gem_rx_desc_dma_sync(struct gem_dev *dp, int head, int nslot, int how)
363 {
364 	int	n;
365 	int	m;
366 	int	rx_desc_unit_shift = dp->gc.gc_rx_desc_unit_shift;
367 
368 	/* sync active descriptors */
369 	if (rx_desc_unit_shift < 0 || nslot == 0) {
370 		/* no rx descriptor ring */
371 		return;
372 	}
373 
374 	n = dp->gc.gc_rx_ring_size - head;
375 	if ((m = nslot - n) > 0) {
376 		(void) ddi_dma_sync(dp->desc_dma_handle,
377 		    (off_t)0,
378 		    (size_t)(m << rx_desc_unit_shift),
379 		    how);
380 		nslot = n;
381 	}
382 
383 	(void) ddi_dma_sync(dp->desc_dma_handle,
384 	    (off_t)(head << rx_desc_unit_shift),
385 	    (size_t)(nslot << rx_desc_unit_shift),
386 	    how);
387 }
388 
389 __INLINE__ void
390 gem_tx_desc_dma_sync(struct gem_dev *dp, int head, int nslot, int how)
391 {
392 	int	n;
393 	int	m;
394 	int	tx_desc_unit_shift = dp->gc.gc_tx_desc_unit_shift;
395 
396 	/* sync active descriptors */
397 	if (tx_desc_unit_shift < 0 || nslot == 0) {
398 		/* no tx descriptor ring */
399 		return;
400 	}
401 
402 	n = dp->gc.gc_tx_ring_size - head;
403 	if ((m = nslot - n) > 0) {
404 		(void) ddi_dma_sync(dp->desc_dma_handle,
405 		    (off_t)(dp->tx_ring_dma - dp->rx_ring_dma),
406 		    (size_t)(m << tx_desc_unit_shift),
407 		    how);
408 		nslot = n;
409 	}
410 
411 	(void) ddi_dma_sync(dp->desc_dma_handle,
412 	    (off_t)((head << tx_desc_unit_shift)
413 	    + (dp->tx_ring_dma - dp->rx_ring_dma)),
414 	    (size_t)(nslot << tx_desc_unit_shift),
415 	    how);
416 }
417 
418 static void
419 gem_rx_start_default(struct gem_dev *dp, int head, int nslot)
420 {
421 	gem_rx_desc_dma_sync(dp,
422 	    SLOT(head, dp->gc.gc_rx_ring_size), nslot,
423 	    DDI_DMA_SYNC_FORDEV);
424 }
425 
426 /* ============================================================== */
427 /*
428  * Buffer management
429  */
430 /* ============================================================== */
431 static void
432 gem_dump_txbuf(struct gem_dev *dp, int level, const char *title)
433 {
434 	cmn_err(level,
435 	    "!%s: %s: tx_active: %d[%d] %d[%d] (+%d), "
436 	    "tx_softq: %d[%d] %d[%d] (+%d), "
437 	    "tx_free: %d[%d] %d[%d] (+%d), "
438 	    "tx_desc: %d[%d] %d[%d] (+%d), "
439 	    "intr: %d[%d] (+%d), ",
440 	    dp->name, title,
441 	    dp->tx_active_head,
442 	    SLOT(dp->tx_active_head, dp->gc.gc_tx_buf_size),
443 	    dp->tx_active_tail,
444 	    SLOT(dp->tx_active_tail, dp->gc.gc_tx_buf_size),
445 	    dp->tx_active_tail - dp->tx_active_head,
446 	    dp->tx_softq_head,
447 	    SLOT(dp->tx_softq_head, dp->gc.gc_tx_buf_size),
448 	    dp->tx_softq_tail,
449 	    SLOT(dp->tx_softq_tail, dp->gc.gc_tx_buf_size),
450 	    dp->tx_softq_tail - dp->tx_softq_head,
451 	    dp->tx_free_head,
452 	    SLOT(dp->tx_free_head, dp->gc.gc_tx_buf_size),
453 	    dp->tx_free_tail,
454 	    SLOT(dp->tx_free_tail, dp->gc.gc_tx_buf_size),
455 	    dp->tx_free_tail - dp->tx_free_head,
456 	    dp->tx_desc_head,
457 	    SLOT(dp->tx_desc_head, dp->gc.gc_tx_ring_size),
458 	    dp->tx_desc_tail,
459 	    SLOT(dp->tx_desc_tail, dp->gc.gc_tx_ring_size),
460 	    dp->tx_desc_tail - dp->tx_desc_head,
461 	    dp->tx_desc_intr,
462 	    SLOT(dp->tx_desc_intr, dp->gc.gc_tx_ring_size),
463 	    dp->tx_desc_intr - dp->tx_desc_head);
464 }
465 
466 static void
467 gem_free_rxbuf(struct rxbuf *rbp)
468 {
469 	struct gem_dev	*dp;
470 
471 	dp = rbp->rxb_devp;
472 	ASSERT(mutex_owned(&dp->intrlock));
473 	rbp->rxb_next = dp->rx_buf_freelist;
474 	dp->rx_buf_freelist = rbp;
475 	dp->rx_buf_freecnt++;
476 }
477 
478 /*
479  * gem_get_rxbuf: supply a receive buffer which have been mapped into
480  * DMA space.
481  */
482 struct rxbuf *
483 gem_get_rxbuf(struct gem_dev *dp, int cansleep)
484 {
485 	struct rxbuf		*rbp;
486 	uint_t			count = 0;
487 	int			i;
488 	int			err;
489 
490 	ASSERT(mutex_owned(&dp->intrlock));
491 
492 	DPRINTF(3, (CE_CONT, "!gem_get_rxbuf: called freecnt:%d",
493 	    dp->rx_buf_freecnt));
494 	/*
495 	 * Get rx buffer management structure
496 	 */
497 	rbp = dp->rx_buf_freelist;
498 	if (rbp) {
499 		/* get one from the recycle list */
500 		ASSERT(dp->rx_buf_freecnt > 0);
501 
502 		dp->rx_buf_freelist = rbp->rxb_next;
503 		dp->rx_buf_freecnt--;
504 		rbp->rxb_next = NULL;
505 		return (rbp);
506 	}
507 
508 	/*
509 	 * Allocate a rx buffer management structure
510 	 */
511 	rbp = kmem_zalloc(sizeof (*rbp), cansleep ? KM_SLEEP : KM_NOSLEEP);
512 	if (rbp == NULL) {
513 		/* no memory */
514 		return (NULL);
515 	}
516 
517 	/*
518 	 * Prepare a back pointer to the device structure which will be
519 	 * refered on freeing the buffer later.
520 	 */
521 	rbp->rxb_devp = dp;
522 
523 	/* allocate a dma handle for rx data buffer */
524 	if ((err = ddi_dma_alloc_handle(dp->dip,
525 	    &dp->gc.gc_dma_attr_rxbuf,
526 	    (cansleep ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT),
527 	    NULL, &rbp->rxb_dh)) != DDI_SUCCESS) {
528 
529 		cmn_err(CE_WARN,
530 		    "!%s: %s: ddi_dma_alloc_handle:1 failed, err=%d",
531 		    dp->name, __func__, err);
532 
533 		kmem_free(rbp, sizeof (struct rxbuf));
534 		return (NULL);
535 	}
536 
537 	/* allocate a bounce buffer for rx */
538 	if ((err = ddi_dma_mem_alloc(rbp->rxb_dh,
539 	    ROUNDUP(dp->rx_buf_len, IOC_LINESIZE),
540 	    &dp->gc.gc_buf_attr,
541 		/*
542 		 * if the nic requires a header at the top of receive buffers,
543 		 * it may access the rx buffer randomly.
544 		 */
545 	    (dp->gc.gc_rx_header_len > 0)
546 	    ? DDI_DMA_CONSISTENT : DDI_DMA_STREAMING,
547 	    cansleep ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT,
548 	    NULL,
549 	    &rbp->rxb_buf, &rbp->rxb_buf_len,
550 	    &rbp->rxb_bah)) != DDI_SUCCESS) {
551 
552 		cmn_err(CE_WARN,
553 		    "!%s: %s: ddi_dma_mem_alloc: failed, err=%d",
554 		    dp->name, __func__, err);
555 
556 		ddi_dma_free_handle(&rbp->rxb_dh);
557 		kmem_free(rbp, sizeof (struct rxbuf));
558 		return (NULL);
559 	}
560 
561 	/* Mapin the bounce buffer into the DMA space */
562 	if ((err = ddi_dma_addr_bind_handle(rbp->rxb_dh,
563 	    NULL, rbp->rxb_buf, dp->rx_buf_len,
564 	    ((dp->gc.gc_rx_header_len > 0)
565 	    ?(DDI_DMA_RDWR | DDI_DMA_CONSISTENT)
566 	    :(DDI_DMA_READ | DDI_DMA_STREAMING)),
567 	    cansleep ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT,
568 	    NULL,
569 	    rbp->rxb_dmacookie,
570 	    &count)) != DDI_DMA_MAPPED) {
571 
572 		ASSERT(err != DDI_DMA_INUSE);
573 		DPRINTF(0, (CE_WARN,
574 		    "!%s: ddi_dma_addr_bind_handle: failed, err=%d",
575 		    dp->name, __func__, err));
576 
577 		/*
578 		 * we failed to allocate a dma resource
579 		 * for the rx bounce buffer.
580 		 */
581 		ddi_dma_mem_free(&rbp->rxb_bah);
582 		ddi_dma_free_handle(&rbp->rxb_dh);
583 		kmem_free(rbp, sizeof (struct rxbuf));
584 		return (NULL);
585 	}
586 
587 	/* correct the rest of the DMA mapping */
588 	for (i = 1; i < count; i++) {
589 		ddi_dma_nextcookie(rbp->rxb_dh, &rbp->rxb_dmacookie[i]);
590 	}
591 	rbp->rxb_nfrags = count;
592 
593 	/* Now we successfully prepared an rx buffer */
594 	dp->rx_buf_allocated++;
595 
596 	return (rbp);
597 }
598 
599 /* ============================================================== */
600 /*
601  * memory resource management
602  */
603 /* ============================================================== */
604 static int
605 gem_alloc_memory(struct gem_dev *dp)
606 {
607 	caddr_t			ring;
608 	caddr_t			buf;
609 	size_t			req_size;
610 	size_t			ring_len;
611 	size_t			buf_len;
612 	ddi_dma_cookie_t	ring_cookie;
613 	ddi_dma_cookie_t	buf_cookie;
614 	uint_t			count;
615 	int			i;
616 	int			err;
617 	struct txbuf		*tbp;
618 	int			tx_buf_len;
619 	ddi_dma_attr_t		dma_attr_txbounce;
620 
621 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
622 
623 	dp->desc_dma_handle = NULL;
624 	req_size = dp->rx_desc_size + dp->tx_desc_size + dp->gc.gc_io_area_size;
625 
626 	if (req_size > 0) {
627 		/*
628 		 * Alloc RX/TX descriptors and a io area.
629 		 */
630 		if ((err = ddi_dma_alloc_handle(dp->dip,
631 		    &dp->gc.gc_dma_attr_desc,
632 		    DDI_DMA_SLEEP, NULL,
633 		    &dp->desc_dma_handle)) != DDI_SUCCESS) {
634 			cmn_err(CE_WARN,
635 			    "!%s: %s: ddi_dma_alloc_handle failed: %d",
636 			    dp->name, __func__, err);
637 			return (ENOMEM);
638 		}
639 
640 		if ((err = ddi_dma_mem_alloc(dp->desc_dma_handle,
641 		    req_size, &dp->gc.gc_desc_attr,
642 		    DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
643 		    &ring, &ring_len,
644 		    &dp->desc_acc_handle)) != DDI_SUCCESS) {
645 			cmn_err(CE_WARN,
646 			    "!%s: %s: ddi_dma_mem_alloc failed: "
647 			    "ret %d, request size: %d",
648 			    dp->name, __func__, err, (int)req_size);
649 			ddi_dma_free_handle(&dp->desc_dma_handle);
650 			return (ENOMEM);
651 		}
652 
653 		if ((err = ddi_dma_addr_bind_handle(dp->desc_dma_handle,
654 		    NULL, ring, ring_len,
655 		    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
656 		    DDI_DMA_SLEEP, NULL,
657 		    &ring_cookie, &count)) != DDI_SUCCESS) {
658 			ASSERT(err != DDI_DMA_INUSE);
659 			cmn_err(CE_WARN,
660 			    "!%s: %s: ddi_dma_addr_bind_handle failed: %d",
661 			    dp->name, __func__, err);
662 			ddi_dma_mem_free(&dp->desc_acc_handle);
663 			ddi_dma_free_handle(&dp->desc_dma_handle);
664 			return (ENOMEM);
665 		}
666 		ASSERT(count == 1);
667 
668 		/* set base of rx descriptor ring */
669 		dp->rx_ring = ring;
670 		dp->rx_ring_dma = ring_cookie.dmac_laddress;
671 
672 		/* set base of tx descriptor ring */
673 		dp->tx_ring = dp->rx_ring + dp->rx_desc_size;
674 		dp->tx_ring_dma = dp->rx_ring_dma + dp->rx_desc_size;
675 
676 		/* set base of io area */
677 		dp->io_area = dp->tx_ring + dp->tx_desc_size;
678 		dp->io_area_dma = dp->tx_ring_dma + dp->tx_desc_size;
679 	}
680 
681 	/*
682 	 * Prepare DMA resources for tx packets
683 	 */
684 	ASSERT(dp->gc.gc_tx_buf_size > 0);
685 
686 	/* Special dma attribute for tx bounce buffers */
687 	dma_attr_txbounce = dp->gc.gc_dma_attr_txbuf;
688 	dma_attr_txbounce.dma_attr_sgllen = 1;
689 	dma_attr_txbounce.dma_attr_align =
690 	    max(dma_attr_txbounce.dma_attr_align, IOC_LINESIZE);
691 
692 	/* Size for tx bounce buffers must be max tx packet size. */
693 	tx_buf_len = MAXPKTBUF(dp);
694 	tx_buf_len = ROUNDUP(tx_buf_len, IOC_LINESIZE);
695 
696 	ASSERT(tx_buf_len >= ETHERMAX+ETHERFCSL);
697 
698 	for (i = 0, tbp = dp->tx_buf;
699 	    i < dp->gc.gc_tx_buf_size; i++, tbp++) {
700 
701 		/* setup bounce buffers for tx packets */
702 		if ((err = ddi_dma_alloc_handle(dp->dip,
703 		    &dma_attr_txbounce,
704 		    DDI_DMA_SLEEP, NULL,
705 		    &tbp->txb_bdh)) != DDI_SUCCESS) {
706 
707 			cmn_err(CE_WARN,
708 		    "!%s: %s ddi_dma_alloc_handle for bounce buffer failed:"
709 			    " err=%d, i=%d",
710 			    dp->name, __func__, err, i);
711 			goto err_alloc_dh;
712 		}
713 
714 		if ((err = ddi_dma_mem_alloc(tbp->txb_bdh,
715 		    tx_buf_len,
716 		    &dp->gc.gc_buf_attr,
717 		    DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL,
718 		    &buf, &buf_len,
719 		    &tbp->txb_bah)) != DDI_SUCCESS) {
720 			cmn_err(CE_WARN,
721 		    "!%s: %s: ddi_dma_mem_alloc for bounce buffer failed"
722 			    "ret %d, request size %d",
723 			    dp->name, __func__, err, tx_buf_len);
724 			ddi_dma_free_handle(&tbp->txb_bdh);
725 			goto err_alloc_dh;
726 		}
727 
728 		if ((err = ddi_dma_addr_bind_handle(tbp->txb_bdh,
729 		    NULL, buf, buf_len,
730 		    DDI_DMA_WRITE | DDI_DMA_STREAMING,
731 		    DDI_DMA_SLEEP, NULL,
732 		    &buf_cookie, &count)) != DDI_SUCCESS) {
733 				ASSERT(err != DDI_DMA_INUSE);
734 				cmn_err(CE_WARN,
735 	"!%s: %s: ddi_dma_addr_bind_handle for bounce buffer failed: %d",
736 				    dp->name, __func__, err);
737 				ddi_dma_mem_free(&tbp->txb_bah);
738 				ddi_dma_free_handle(&tbp->txb_bdh);
739 				goto err_alloc_dh;
740 		}
741 		ASSERT(count == 1);
742 		tbp->txb_buf = buf;
743 		tbp->txb_buf_dma = buf_cookie.dmac_laddress;
744 	}
745 
746 	return (0);
747 
748 err_alloc_dh:
749 	if (dp->gc.gc_tx_buf_size > 0) {
750 		while (i-- > 0) {
751 			(void) ddi_dma_unbind_handle(dp->tx_buf[i].txb_bdh);
752 			ddi_dma_mem_free(&dp->tx_buf[i].txb_bah);
753 			ddi_dma_free_handle(&dp->tx_buf[i].txb_bdh);
754 		}
755 	}
756 
757 	if (dp->desc_dma_handle) {
758 		(void) ddi_dma_unbind_handle(dp->desc_dma_handle);
759 		ddi_dma_mem_free(&dp->desc_acc_handle);
760 		ddi_dma_free_handle(&dp->desc_dma_handle);
761 		dp->desc_dma_handle = NULL;
762 	}
763 
764 	return (ENOMEM);
765 }
766 
767 static void
768 gem_free_memory(struct gem_dev *dp)
769 {
770 	int		i;
771 	struct rxbuf	*rbp;
772 	struct txbuf	*tbp;
773 
774 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
775 
776 	/* Free TX/RX descriptors and tx padding buffer */
777 	if (dp->desc_dma_handle) {
778 		(void) ddi_dma_unbind_handle(dp->desc_dma_handle);
779 		ddi_dma_mem_free(&dp->desc_acc_handle);
780 		ddi_dma_free_handle(&dp->desc_dma_handle);
781 		dp->desc_dma_handle = NULL;
782 	}
783 
784 	/* Free dma handles for Tx */
785 	for (i = dp->gc.gc_tx_buf_size, tbp = dp->tx_buf; i--; tbp++) {
786 		/* Free bounce buffer associated to each txbuf */
787 		(void) ddi_dma_unbind_handle(tbp->txb_bdh);
788 		ddi_dma_mem_free(&tbp->txb_bah);
789 		ddi_dma_free_handle(&tbp->txb_bdh);
790 	}
791 
792 	/* Free rx buffer */
793 	while ((rbp = dp->rx_buf_freelist) != NULL) {
794 
795 		ASSERT(dp->rx_buf_freecnt > 0);
796 
797 		dp->rx_buf_freelist = rbp->rxb_next;
798 		dp->rx_buf_freecnt--;
799 
800 		/* release DMA mapping */
801 		ASSERT(rbp->rxb_dh != NULL);
802 
803 		/* free dma handles for rx bbuf */
804 		/* it has dma mapping always */
805 		ASSERT(rbp->rxb_nfrags > 0);
806 		(void) ddi_dma_unbind_handle(rbp->rxb_dh);
807 
808 		/* free the associated bounce buffer and dma handle */
809 		ASSERT(rbp->rxb_bah != NULL);
810 		ddi_dma_mem_free(&rbp->rxb_bah);
811 		/* free the associated dma handle */
812 		ddi_dma_free_handle(&rbp->rxb_dh);
813 
814 		/* free the base memory of rx buffer management */
815 		kmem_free(rbp, sizeof (struct rxbuf));
816 	}
817 }
818 
819 /* ============================================================== */
820 /*
821  * Rx/Tx descriptor slot management
822  */
823 /* ============================================================== */
824 /*
825  * Initialize an empty rx ring.
826  */
827 static void
828 gem_init_rx_ring(struct gem_dev *dp)
829 {
830 	int		i;
831 	int		rx_ring_size = dp->gc.gc_rx_ring_size;
832 
833 	DPRINTF(1, (CE_CONT, "!%s: %s ring_size:%d, buf_max:%d",
834 	    dp->name, __func__,
835 	    rx_ring_size, dp->gc.gc_rx_buf_max));
836 
837 	/* make a physical chain of rx descriptors */
838 	for (i = 0; i < rx_ring_size; i++) {
839 		(*dp->gc.gc_rx_desc_init)(dp, i);
840 	}
841 	gem_rx_desc_dma_sync(dp, 0, rx_ring_size, DDI_DMA_SYNC_FORDEV);
842 
843 	dp->rx_active_head = (seqnum_t)0;
844 	dp->rx_active_tail = (seqnum_t)0;
845 
846 	ASSERT(dp->rx_buf_head == (struct rxbuf *)NULL);
847 	ASSERT(dp->rx_buf_tail == (struct rxbuf *)NULL);
848 }
849 
850 /*
851  * Prepare rx buffers and put them into the rx buffer/descriptor ring.
852  */
853 static void
854 gem_prepare_rx_buf(struct gem_dev *dp)
855 {
856 	int		i;
857 	int		nrbuf;
858 	struct rxbuf	*rbp;
859 
860 	ASSERT(mutex_owned(&dp->intrlock));
861 
862 	/* Now we have no active buffers in rx ring */
863 
864 	nrbuf = min(dp->gc.gc_rx_ring_size, dp->gc.gc_rx_buf_max);
865 	for (i = 0; i < nrbuf; i++) {
866 		if ((rbp = gem_get_rxbuf(dp, B_TRUE)) == NULL) {
867 			break;
868 		}
869 		gem_append_rxbuf(dp, rbp);
870 	}
871 
872 	gem_rx_desc_dma_sync(dp,
873 	    0, dp->gc.gc_rx_ring_size, DDI_DMA_SYNC_FORDEV);
874 }
875 
876 /*
877  * Reclaim active rx buffers in rx buffer ring.
878  */
879 static void
880 gem_clean_rx_buf(struct gem_dev *dp)
881 {
882 	int		i;
883 	struct rxbuf	*rbp;
884 	int		rx_ring_size = dp->gc.gc_rx_ring_size;
885 #ifdef GEM_DEBUG_LEVEL
886 	int		total;
887 #endif
888 	ASSERT(mutex_owned(&dp->intrlock));
889 
890 	DPRINTF(2, (CE_CONT, "!%s: %s: %d buffers are free",
891 	    dp->name, __func__, dp->rx_buf_freecnt));
892 	/*
893 	 * clean up HW descriptors
894 	 */
895 	for (i = 0; i < rx_ring_size; i++) {
896 		(*dp->gc.gc_rx_desc_clean)(dp, i);
897 	}
898 	gem_rx_desc_dma_sync(dp, 0, rx_ring_size, DDI_DMA_SYNC_FORDEV);
899 
900 #ifdef GEM_DEBUG_LEVEL
901 	total = 0;
902 #endif
903 	/*
904 	 * Reclaim allocated rx buffers
905 	 */
906 	while ((rbp = dp->rx_buf_head) != NULL) {
907 #ifdef GEM_DEBUG_LEVEL
908 		total++;
909 #endif
910 		/* remove the first one from rx buffer list */
911 		dp->rx_buf_head = rbp->rxb_next;
912 
913 		/* recycle the rxbuf */
914 		gem_free_rxbuf(rbp);
915 	}
916 	dp->rx_buf_tail = (struct rxbuf *)NULL;
917 
918 	DPRINTF(2, (CE_CONT,
919 	    "!%s: %s: %d buffers freeed, total: %d free",
920 	    dp->name, __func__, total, dp->rx_buf_freecnt));
921 }
922 
923 /*
924  * Initialize an empty transmit buffer/descriptor ring
925  */
926 static void
927 gem_init_tx_ring(struct gem_dev *dp)
928 {
929 	int		i;
930 	int		tx_buf_size = dp->gc.gc_tx_buf_size;
931 	int		tx_ring_size = dp->gc.gc_tx_ring_size;
932 
933 	DPRINTF(2, (CE_CONT, "!%s: %s: ring_size:%d, buf_size:%d",
934 	    dp->name, __func__,
935 	    dp->gc.gc_tx_ring_size, dp->gc.gc_tx_buf_size));
936 
937 	ASSERT(!dp->mac_active);
938 
939 	/* initialize active list and free list */
940 	dp->tx_slots_base =
941 	    SLOT(dp->tx_slots_base + dp->tx_softq_head, tx_buf_size);
942 	dp->tx_softq_tail -= dp->tx_softq_head;
943 	dp->tx_softq_head = (seqnum_t)0;
944 
945 	dp->tx_active_head = dp->tx_softq_head;
946 	dp->tx_active_tail = dp->tx_softq_head;
947 
948 	dp->tx_free_head   = dp->tx_softq_tail;
949 	dp->tx_free_tail   = dp->gc.gc_tx_buf_limit;
950 
951 	dp->tx_desc_head = (seqnum_t)0;
952 	dp->tx_desc_tail = (seqnum_t)0;
953 	dp->tx_desc_intr = (seqnum_t)0;
954 
955 	for (i = 0; i < tx_ring_size; i++) {
956 		(*dp->gc.gc_tx_desc_init)(dp, i);
957 	}
958 	gem_tx_desc_dma_sync(dp, 0, tx_ring_size, DDI_DMA_SYNC_FORDEV);
959 }
960 
961 __INLINE__
962 static void
963 gem_txbuf_free_dma_resources(struct txbuf *tbp)
964 {
965 	if (tbp->txb_mp) {
966 		freemsg(tbp->txb_mp);
967 		tbp->txb_mp = NULL;
968 	}
969 	tbp->txb_nfrags = 0;
970 	tbp->txb_flag = 0;
971 }
972 #pragma inline(gem_txbuf_free_dma_resources)
973 
974 /*
975  * reclaim active tx buffers and reset positions in tx rings.
976  */
977 static void
978 gem_clean_tx_buf(struct gem_dev *dp)
979 {
980 	int		i;
981 	seqnum_t	head;
982 	seqnum_t	tail;
983 	seqnum_t	sn;
984 	struct txbuf	*tbp;
985 	int		tx_ring_size = dp->gc.gc_tx_ring_size;
986 #ifdef GEM_DEBUG_LEVEL
987 	int		err;
988 #endif
989 
990 	ASSERT(!dp->mac_active);
991 	ASSERT(dp->tx_busy == 0);
992 	ASSERT(dp->tx_softq_tail == dp->tx_free_head);
993 
994 	/*
995 	 * clean up all HW descriptors
996 	 */
997 	for (i = 0; i < tx_ring_size; i++) {
998 		(*dp->gc.gc_tx_desc_clean)(dp, i);
999 	}
1000 	gem_tx_desc_dma_sync(dp, 0, tx_ring_size, DDI_DMA_SYNC_FORDEV);
1001 
1002 	/* dequeue all active and loaded buffers */
1003 	head = dp->tx_active_head;
1004 	tail = dp->tx_softq_tail;
1005 
1006 	ASSERT(dp->tx_free_head - head >= 0);
1007 	tbp = GET_TXBUF(dp, head);
1008 	for (sn = head; sn != tail; sn++) {
1009 		gem_txbuf_free_dma_resources(tbp);
1010 		ASSERT(tbp->txb_mp == NULL);
1011 		dp->stats.errxmt++;
1012 		tbp = tbp->txb_next;
1013 	}
1014 
1015 #ifdef GEM_DEBUG_LEVEL
1016 	/* ensure no dma resources for tx are not in use now */
1017 	err = 0;
1018 	while (sn != head + dp->gc.gc_tx_buf_size) {
1019 		if (tbp->txb_mp || tbp->txb_nfrags) {
1020 			DPRINTF(0, (CE_CONT,
1021 			    "%s: %s: sn:%d[%d] mp:%p nfrags:%d",
1022 			    dp->name, __func__,
1023 			    sn, SLOT(sn, dp->gc.gc_tx_buf_size),
1024 			    tbp->txb_mp, tbp->txb_nfrags));
1025 			err = 1;
1026 		}
1027 		sn++;
1028 		tbp = tbp->txb_next;
1029 	}
1030 
1031 	if (err) {
1032 		gem_dump_txbuf(dp, CE_WARN,
1033 		    "gem_clean_tx_buf: tbp->txb_mp != NULL");
1034 	}
1035 #endif
1036 	/* recycle buffers, now no active tx buffers in the ring */
1037 	dp->tx_free_tail += tail - head;
1038 	ASSERT(dp->tx_free_tail == dp->tx_free_head + dp->gc.gc_tx_buf_limit);
1039 
1040 	/* fix positions in tx buffer rings */
1041 	dp->tx_active_head = dp->tx_free_head;
1042 	dp->tx_active_tail = dp->tx_free_head;
1043 	dp->tx_softq_head  = dp->tx_free_head;
1044 	dp->tx_softq_tail  = dp->tx_free_head;
1045 }
1046 
1047 /*
1048  * Reclaim transmitted buffers from tx buffer/descriptor ring.
1049  */
1050 __INLINE__ int
1051 gem_reclaim_txbuf(struct gem_dev *dp)
1052 {
1053 	struct txbuf	*tbp;
1054 	uint_t		txstat;
1055 	int		err = GEM_SUCCESS;
1056 	seqnum_t	head;
1057 	seqnum_t	tail;
1058 	seqnum_t	sn;
1059 	seqnum_t	desc_head;
1060 	int		tx_ring_size = dp->gc.gc_tx_ring_size;
1061 	uint_t (*tx_desc_stat)(struct gem_dev *dp,
1062 	    int slot, int ndesc) = dp->gc.gc_tx_desc_stat;
1063 	clock_t		now;
1064 
1065 	now = ddi_get_lbolt();
1066 	if (now == (clock_t)0) {
1067 		/* make non-zero timestamp */
1068 		now--;
1069 	}
1070 
1071 	mutex_enter(&dp->xmitlock);
1072 
1073 	head = dp->tx_active_head;
1074 	tail = dp->tx_active_tail;
1075 
1076 #if GEM_DEBUG_LEVEL > 2
1077 	if (head != tail) {
1078 		cmn_err(CE_CONT, "!%s: %s: "
1079 		    "testing active_head:%d[%d], active_tail:%d[%d]",
1080 		    dp->name, __func__,
1081 		    head, SLOT(head, dp->gc.gc_tx_buf_size),
1082 		    tail, SLOT(tail, dp->gc.gc_tx_buf_size));
1083 	}
1084 #endif
1085 #ifdef DEBUG
1086 	if (dp->tx_reclaim_busy == 0) {
1087 		/* check tx buffer management consistency */
1088 		ASSERT(dp->tx_free_tail - dp->tx_active_head
1089 		    == dp->gc.gc_tx_buf_limit);
1090 		/* EMPTY */
1091 	}
1092 #endif
1093 	dp->tx_reclaim_busy++;
1094 
1095 	/* sync all active HW descriptors */
1096 	gem_tx_desc_dma_sync(dp,
1097 	    SLOT(dp->tx_desc_head, tx_ring_size),
1098 	    dp->tx_desc_tail - dp->tx_desc_head,
1099 	    DDI_DMA_SYNC_FORKERNEL);
1100 
1101 	tbp = GET_TXBUF(dp, head);
1102 	desc_head = dp->tx_desc_head;
1103 	for (sn = head; sn != tail;
1104 	    dp->tx_active_head = (++sn), tbp = tbp->txb_next) {
1105 		int	ndescs;
1106 
1107 		ASSERT(tbp->txb_desc == desc_head);
1108 
1109 		ndescs = tbp->txb_ndescs;
1110 		if (ndescs == 0) {
1111 			/* skip errored descriptors */
1112 			continue;
1113 		}
1114 		txstat = (*tx_desc_stat)(dp,
1115 		    SLOT(tbp->txb_desc, tx_ring_size), ndescs);
1116 
1117 		if (txstat == 0) {
1118 			/* not transmitted yet */
1119 			break;
1120 		}
1121 
1122 		if (!dp->tx_blocked && (tbp->txb_flag & GEM_TXFLAG_INTR)) {
1123 			dp->tx_blocked = now;
1124 		}
1125 
1126 		ASSERT(txstat & (GEM_TX_DONE | GEM_TX_ERR));
1127 
1128 		if (txstat & GEM_TX_ERR) {
1129 			err = GEM_FAILURE;
1130 			cmn_err(CE_WARN, "!%s: tx error at desc %d[%d]",
1131 			    dp->name, sn, SLOT(sn, tx_ring_size));
1132 		}
1133 #if GEM_DEBUG_LEVEL > 4
1134 		if (now - tbp->txb_stime >= 50) {
1135 			cmn_err(CE_WARN, "!%s: tx delay while %d mS",
1136 			    dp->name, (now - tbp->txb_stime)*10);
1137 		}
1138 #endif
1139 		/* free transmitted descriptors */
1140 		desc_head += ndescs;
1141 	}
1142 
1143 	if (dp->tx_desc_head != desc_head) {
1144 		/* we have reclaimed one or more tx buffers */
1145 		dp->tx_desc_head = desc_head;
1146 
1147 		/* If we passed the next interrupt position, update it */
1148 		if (desc_head - dp->tx_desc_intr > 0) {
1149 			dp->tx_desc_intr = desc_head;
1150 		}
1151 	}
1152 	mutex_exit(&dp->xmitlock);
1153 
1154 	/* free dma mapping resources associated with transmitted tx buffers */
1155 	tbp = GET_TXBUF(dp, head);
1156 	tail = sn;
1157 #if GEM_DEBUG_LEVEL > 2
1158 	if (head != tail) {
1159 		cmn_err(CE_CONT, "%s: freeing head:%d[%d], tail:%d[%d]",
1160 		    __func__,
1161 		    head, SLOT(head, dp->gc.gc_tx_buf_size),
1162 		    tail, SLOT(tail, dp->gc.gc_tx_buf_size));
1163 	}
1164 #endif
1165 	for (sn = head; sn != tail; sn++, tbp = tbp->txb_next) {
1166 		gem_txbuf_free_dma_resources(tbp);
1167 	}
1168 
1169 	/* recycle the tx buffers */
1170 	mutex_enter(&dp->xmitlock);
1171 	if (--dp->tx_reclaim_busy == 0) {
1172 		/* we are the last thread who can update free tail */
1173 #if GEM_DEBUG_LEVEL > 4
1174 		/* check all resouces have been deallocated */
1175 		sn = dp->tx_free_tail;
1176 		tbp = GET_TXBUF(dp, new_tail);
1177 		while (sn != dp->tx_active_head + dp->gc.gc_tx_buf_limit) {
1178 			if (tbp->txb_nfrags) {
1179 				/* in use */
1180 				break;
1181 			}
1182 			ASSERT(tbp->txb_mp == NULL);
1183 			tbp = tbp->txb_next;
1184 			sn++;
1185 		}
1186 		ASSERT(dp->tx_active_head + dp->gc.gc_tx_buf_limit == sn);
1187 #endif
1188 		dp->tx_free_tail =
1189 		    dp->tx_active_head + dp->gc.gc_tx_buf_limit;
1190 	}
1191 	if (!dp->mac_active) {
1192 		/* someone may be waiting for me. */
1193 		cv_broadcast(&dp->tx_drain_cv);
1194 	}
1195 #if GEM_DEBUG_LEVEL > 2
1196 	cmn_err(CE_CONT, "!%s: %s: called, "
1197 	    "free_head:%d free_tail:%d(+%d) added:%d",
1198 	    dp->name, __func__,
1199 	    dp->tx_free_head, dp->tx_free_tail,
1200 	    dp->tx_free_tail - dp->tx_free_head, tail - head);
1201 #endif
1202 	mutex_exit(&dp->xmitlock);
1203 
1204 	return (err);
1205 }
1206 #pragma inline(gem_reclaim_txbuf)
1207 
1208 
1209 /*
1210  * Make tx descriptors in out-of-order manner
1211  */
1212 static void
1213 gem_tx_load_descs_oo(struct gem_dev *dp,
1214 	seqnum_t start_slot, seqnum_t end_slot, uint64_t flags)
1215 {
1216 	seqnum_t	sn;
1217 	struct txbuf	*tbp;
1218 	int	tx_ring_size = dp->gc.gc_tx_ring_size;
1219 	int	(*tx_desc_write)
1220 	    (struct gem_dev *dp, int slot,
1221 	    ddi_dma_cookie_t *dmacookie,
1222 	    int frags, uint64_t flag) = dp->gc.gc_tx_desc_write;
1223 	clock_t	now = ddi_get_lbolt();
1224 
1225 	sn = start_slot;
1226 	tbp = GET_TXBUF(dp, sn);
1227 	do {
1228 #if GEM_DEBUG_LEVEL > 1
1229 		if (dp->tx_cnt < 100) {
1230 			dp->tx_cnt++;
1231 			flags |= GEM_TXFLAG_INTR;
1232 		}
1233 #endif
1234 		/* write a tx descriptor */
1235 		tbp->txb_desc = sn;
1236 		tbp->txb_ndescs = (*tx_desc_write)(dp,
1237 		    SLOT(sn, tx_ring_size),
1238 		    tbp->txb_dmacookie,
1239 		    tbp->txb_nfrags, flags | tbp->txb_flag);
1240 		tbp->txb_stime = now;
1241 		ASSERT(tbp->txb_ndescs == 1);
1242 
1243 		flags = 0;
1244 		sn++;
1245 		tbp = tbp->txb_next;
1246 	} while (sn != end_slot);
1247 }
1248 
1249 __INLINE__
1250 static size_t
1251 gem_setup_txbuf_copy(struct gem_dev *dp, mblk_t *mp, struct txbuf *tbp)
1252 {
1253 	size_t			min_pkt;
1254 	caddr_t			bp;
1255 	size_t			off;
1256 	mblk_t			*tp;
1257 	size_t			len;
1258 	uint64_t		flag;
1259 
1260 	ASSERT(tbp->txb_mp == NULL);
1261 
1262 	/* we use bounce buffer for the packet */
1263 	min_pkt = ETHERMIN;
1264 	bp = tbp->txb_buf;
1265 	off = 0;
1266 	tp = mp;
1267 
1268 	flag = tbp->txb_flag;
1269 	if (flag & GEM_TXFLAG_SWVTAG) {
1270 		/* need to increase min packet size */
1271 		min_pkt += VTAG_SIZE;
1272 		ASSERT((flag & GEM_TXFLAG_VTAG) == 0);
1273 	}
1274 
1275 	/* copy the rest */
1276 	for (; tp; tp = tp->b_cont) {
1277 		if ((len = (long)tp->b_wptr - (long)tp->b_rptr) > 0) {
1278 			bcopy(tp->b_rptr, &bp[off], len);
1279 			off += len;
1280 		}
1281 	}
1282 
1283 	if (off < min_pkt &&
1284 	    (min_pkt > ETHERMIN || !dp->gc.gc_tx_auto_pad)) {
1285 		/*
1286 		 * Extend the packet to minimum packet size explicitly.
1287 		 * For software vlan packets, we shouldn't use tx autopad
1288 		 * function because nics may not be aware of vlan.
1289 		 * we must keep 46 octet of payload even if we use vlan.
1290 		 */
1291 		bzero(&bp[off], min_pkt - off);
1292 		off = min_pkt;
1293 	}
1294 
1295 	(void) ddi_dma_sync(tbp->txb_bdh, (off_t)0, off, DDI_DMA_SYNC_FORDEV);
1296 
1297 	tbp->txb_dmacookie[0].dmac_laddress = tbp->txb_buf_dma;
1298 	tbp->txb_dmacookie[0].dmac_size = off;
1299 
1300 	DPRINTF(2, (CE_CONT,
1301 	    "!%s: %s: copy: addr:0x%llx len:0x%x, vtag:0x%04x, min_pkt:%d",
1302 	    dp->name, __func__,
1303 	    tbp->txb_dmacookie[0].dmac_laddress,
1304 	    tbp->txb_dmacookie[0].dmac_size,
1305 	    (flag & GEM_TXFLAG_VTAG) >> GEM_TXFLAG_VTAG_SHIFT,
1306 	    min_pkt));
1307 
1308 	/* save misc info */
1309 	tbp->txb_mp = mp;
1310 	tbp->txb_nfrags = 1;
1311 #ifdef DEBUG_MULTIFRAGS
1312 	if (dp->gc.gc_tx_max_frags >= 3 &&
1313 	    tbp->txb_dmacookie[0].dmac_size > 16*3) {
1314 		tbp->txb_dmacookie[1].dmac_laddress =
1315 		    tbp->txb_dmacookie[0].dmac_laddress + 16;
1316 		tbp->txb_dmacookie[2].dmac_laddress =
1317 		    tbp->txb_dmacookie[1].dmac_laddress + 16;
1318 
1319 		tbp->txb_dmacookie[2].dmac_size =
1320 		    tbp->txb_dmacookie[0].dmac_size - 16*2;
1321 		tbp->txb_dmacookie[1].dmac_size = 16;
1322 		tbp->txb_dmacookie[0].dmac_size = 16;
1323 		tbp->txb_nfrags  = 3;
1324 	}
1325 #endif
1326 	return (off);
1327 }
1328 #pragma inline(gem_setup_txbuf_copy)
1329 
1330 __INLINE__
1331 static void
1332 gem_tx_start_unit(struct gem_dev *dp)
1333 {
1334 	seqnum_t	head;
1335 	seqnum_t	tail;
1336 	struct txbuf	*tbp_head;
1337 	struct txbuf	*tbp_tail;
1338 
1339 	/* update HW descriptors from soft queue */
1340 	ASSERT(mutex_owned(&dp->xmitlock));
1341 	ASSERT(dp->tx_softq_head == dp->tx_active_tail);
1342 
1343 	head = dp->tx_softq_head;
1344 	tail = dp->tx_softq_tail;
1345 
1346 	DPRINTF(1, (CE_CONT,
1347 	    "%s: %s: called, softq %d %d[+%d], desc %d %d[+%d]",
1348 	    dp->name, __func__, head, tail, tail - head,
1349 	    dp->tx_desc_head, dp->tx_desc_tail,
1350 	    dp->tx_desc_tail - dp->tx_desc_head));
1351 
1352 	ASSERT(tail - head > 0);
1353 
1354 	dp->tx_desc_tail = tail;
1355 
1356 	tbp_head = GET_TXBUF(dp, head);
1357 	tbp_tail = GET_TXBUF(dp, tail - 1);
1358 
1359 	ASSERT(tbp_tail->txb_desc + tbp_tail->txb_ndescs == dp->tx_desc_tail);
1360 
1361 	dp->gc.gc_tx_start(dp,
1362 	    SLOT(tbp_head->txb_desc, dp->gc.gc_tx_ring_size),
1363 	    tbp_tail->txb_desc + tbp_tail->txb_ndescs - tbp_head->txb_desc);
1364 
1365 	/* advance softq head and active tail */
1366 	dp->tx_softq_head = dp->tx_active_tail = tail;
1367 }
1368 #pragma inline(gem_tx_start_unit)
1369 
1370 #ifdef GEM_DEBUG_LEVEL
1371 static int gem_send_cnt[10];
1372 #endif
1373 #define	PKT_MIN_SIZE	(sizeof (struct ether_header) + 10 + VTAG_SIZE)
1374 #define	EHLEN	(sizeof (struct ether_header))
1375 /*
1376  * check ether packet type and ip protocol
1377  */
1378 static uint64_t
1379 gem_txbuf_options(struct gem_dev *dp, mblk_t *mp, uint8_t *bp)
1380 {
1381 	mblk_t		*tp;
1382 	ssize_t		len;
1383 	uint_t		vtag;
1384 	int		off;
1385 	uint64_t	flag;
1386 
1387 	flag = 0ULL;
1388 
1389 	/*
1390 	 * prepare continuous header of the packet for protocol analysis
1391 	 */
1392 	if ((long)mp->b_wptr - (long)mp->b_rptr < PKT_MIN_SIZE) {
1393 		/* we use work buffer to copy mblk */
1394 		for (tp = mp, off = 0;
1395 		    tp && (off < PKT_MIN_SIZE);
1396 		    tp = tp->b_cont, off += len) {
1397 			len = (long)tp->b_wptr - (long)tp->b_rptr;
1398 			len = min(len, PKT_MIN_SIZE - off);
1399 			bcopy(tp->b_rptr, &bp[off], len);
1400 		}
1401 	} else {
1402 		/* we can use mblk without copy */
1403 		bp = mp->b_rptr;
1404 	}
1405 
1406 	/* process vlan tag for GLD v3 */
1407 	if (GET_NET16(&bp[VTAG_OFF]) == VTAG_TPID) {
1408 		if (dp->misc_flag & GEM_VLAN_HARD) {
1409 			vtag = GET_NET16(&bp[VTAG_OFF + 2]);
1410 			ASSERT(vtag);
1411 			flag |= vtag << GEM_TXFLAG_VTAG_SHIFT;
1412 		} else {
1413 			flag |= GEM_TXFLAG_SWVTAG;
1414 		}
1415 	}
1416 	return (flag);
1417 }
1418 #undef EHLEN
1419 #undef PKT_MIN_SIZE
1420 /*
1421  * gem_send_common is an exported function because hw depend routines may
1422  * use it for sending control frames like setup frames for 2114x chipset.
1423  */
1424 mblk_t *
1425 gem_send_common(struct gem_dev *dp, mblk_t *mp_head, uint32_t flags)
1426 {
1427 	int			nmblk;
1428 	int			avail;
1429 	mblk_t			*tp;
1430 	mblk_t			*mp;
1431 	int			i;
1432 	struct txbuf		*tbp;
1433 	seqnum_t		head;
1434 	uint64_t		load_flags;
1435 	uint64_t		len_total = 0;
1436 	uint32_t		bcast = 0;
1437 	uint32_t		mcast = 0;
1438 
1439 	ASSERT(mp_head != NULL);
1440 
1441 	mp = mp_head;
1442 	nmblk = 1;
1443 	while ((mp = mp->b_next) != NULL) {
1444 		nmblk++;
1445 	}
1446 #ifdef GEM_DEBUG_LEVEL
1447 	gem_send_cnt[0]++;
1448 	gem_send_cnt[min(nmblk, 9)]++;
1449 #endif
1450 	/*
1451 	 * Aquire resources
1452 	 */
1453 	mutex_enter(&dp->xmitlock);
1454 	if (dp->mac_suspended) {
1455 		mutex_exit(&dp->xmitlock);
1456 		mp = mp_head;
1457 		while (mp) {
1458 			tp = mp->b_next;
1459 			freemsg(mp);
1460 			mp = tp;
1461 		}
1462 		return (NULL);
1463 	}
1464 
1465 	if (!dp->mac_active && (flags & GEM_SEND_CTRL) == 0) {
1466 		/* don't send data packets while mac isn't active */
1467 		/* XXX - should we discard packets? */
1468 		mutex_exit(&dp->xmitlock);
1469 		return (mp_head);
1470 	}
1471 
1472 	/* allocate free slots */
1473 	head = dp->tx_free_head;
1474 	avail = dp->tx_free_tail - head;
1475 
1476 	DPRINTF(2, (CE_CONT,
1477 	    "!%s: %s: called, free_head:%d free_tail:%d(+%d) req:%d",
1478 	    dp->name, __func__,
1479 	    dp->tx_free_head, dp->tx_free_tail, avail, nmblk));
1480 
1481 	avail = min(avail, dp->tx_max_packets);
1482 
1483 	if (nmblk > avail) {
1484 		if (avail == 0) {
1485 			/* no resources; short cut */
1486 			DPRINTF(2, (CE_CONT, "!%s: no resources", __func__));
1487 			dp->tx_max_packets = max(dp->tx_max_packets - 1, 1);
1488 			goto done;
1489 		}
1490 		nmblk = avail;
1491 	}
1492 
1493 	dp->tx_free_head = head + nmblk;
1494 	load_flags = ((dp->tx_busy++) == 0) ? GEM_TXFLAG_HEAD : 0;
1495 
1496 	/* update last interrupt position if tx buffers exhaust.  */
1497 	if (nmblk == avail) {
1498 		tbp = GET_TXBUF(dp, head + avail - 1);
1499 		tbp->txb_flag = GEM_TXFLAG_INTR;
1500 		dp->tx_desc_intr = head + avail;
1501 	}
1502 	mutex_exit(&dp->xmitlock);
1503 
1504 	tbp = GET_TXBUF(dp, head);
1505 
1506 	for (i = nmblk; i > 0; i--, tbp = tbp->txb_next) {
1507 		uint8_t		*bp;
1508 		uint64_t	txflag;
1509 
1510 		/* remove one from the mblk list */
1511 		ASSERT(mp_head != NULL);
1512 		mp = mp_head;
1513 		mp_head = mp_head->b_next;
1514 		mp->b_next = NULL;
1515 
1516 		/* statistics for non-unicast packets */
1517 		bp = mp->b_rptr;
1518 		if ((bp[0] & 1) && (flags & GEM_SEND_CTRL) == 0) {
1519 			if (bcmp(bp, gem_etherbroadcastaddr.ether_addr_octet,
1520 			    ETHERADDRL) == 0) {
1521 				bcast++;
1522 			} else {
1523 				mcast++;
1524 			}
1525 		}
1526 
1527 		/* save misc info */
1528 		txflag = tbp->txb_flag;
1529 		txflag |= (flags & GEM_SEND_CTRL) << GEM_TXFLAG_PRIVATE_SHIFT;
1530 		txflag |= gem_txbuf_options(dp, mp, (uint8_t *)tbp->txb_buf);
1531 		tbp->txb_flag = txflag;
1532 
1533 		len_total += gem_setup_txbuf_copy(dp, mp, tbp);
1534 	}
1535 
1536 	(void) gem_tx_load_descs_oo(dp, head, head + nmblk, load_flags);
1537 
1538 	/* Append the tbp at the tail of the active tx buffer list */
1539 	mutex_enter(&dp->xmitlock);
1540 
1541 	if ((--dp->tx_busy) == 0) {
1542 		/* extend the tail of softq, as new packets have been ready. */
1543 		dp->tx_softq_tail = dp->tx_free_head;
1544 
1545 		if (!dp->mac_active && (flags & GEM_SEND_CTRL) == 0) {
1546 			/*
1547 			 * The device status has changed while we are
1548 			 * preparing tx buf.
1549 			 * As we are the last one that make tx non-busy.
1550 			 * wake up someone who may wait for us.
1551 			 */
1552 			cv_broadcast(&dp->tx_drain_cv);
1553 		} else {
1554 			ASSERT(dp->tx_softq_tail - dp->tx_softq_head > 0);
1555 			gem_tx_start_unit(dp);
1556 		}
1557 	}
1558 	dp->stats.obytes += len_total;
1559 	dp->stats.opackets += nmblk;
1560 	dp->stats.obcast += bcast;
1561 	dp->stats.omcast += mcast;
1562 done:
1563 	mutex_exit(&dp->xmitlock);
1564 
1565 	return (mp_head);
1566 }
1567 
1568 /* ========================================================== */
1569 /*
1570  * error detection and restart routines
1571  */
1572 /* ========================================================== */
1573 int
1574 gem_restart_nic(struct gem_dev *dp, uint_t flags)
1575 {
1576 	ASSERT(mutex_owned(&dp->intrlock));
1577 
1578 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
1579 #ifdef GEM_DEBUG_LEVEL
1580 #if GEM_DEBUG_LEVEL > 1
1581 	gem_dump_txbuf(dp, CE_CONT, "gem_restart_nic");
1582 #endif
1583 #endif
1584 
1585 	if (dp->mac_suspended) {
1586 		/* should we return GEM_FAILURE ? */
1587 		return (GEM_FAILURE);
1588 	}
1589 
1590 	/*
1591 	 * We should avoid calling any routines except xxx_chip_reset
1592 	 * when we are resuming the system.
1593 	 */
1594 	if (dp->mac_active) {
1595 		if (flags & GEM_RESTART_KEEP_BUF) {
1596 			/* stop rx gracefully */
1597 			dp->rxmode &= ~RXMODE_ENABLE;
1598 			(void) (*dp->gc.gc_set_rx_filter)(dp);
1599 		}
1600 		(void) gem_mac_stop(dp, flags);
1601 	}
1602 
1603 	/* reset the chip. */
1604 	if ((*dp->gc.gc_reset_chip)(dp) != GEM_SUCCESS) {
1605 		cmn_err(CE_WARN, "%s: %s: failed to reset chip",
1606 		    dp->name, __func__);
1607 		goto err;
1608 	}
1609 
1610 	if (gem_mac_init(dp) != GEM_SUCCESS) {
1611 		goto err;
1612 	}
1613 
1614 	/* setup media mode if the link have been up */
1615 	if (dp->mii_state == MII_STATE_LINKUP) {
1616 		if ((dp->gc.gc_set_media)(dp) != GEM_SUCCESS) {
1617 			goto err;
1618 		}
1619 	}
1620 
1621 	/* setup mac address and enable rx filter */
1622 	dp->rxmode |= RXMODE_ENABLE;
1623 	if ((*dp->gc.gc_set_rx_filter)(dp) != GEM_SUCCESS) {
1624 		goto err;
1625 	}
1626 
1627 	/*
1628 	 * XXX - a panic happened because of linkdown.
1629 	 * We must check mii_state here, because the link can be down just
1630 	 * before the restart event happen. If the link is down now,
1631 	 * gem_mac_start() will be called from gem_mii_link_check() when
1632 	 * the link become up later.
1633 	 */
1634 	if (dp->mii_state == MII_STATE_LINKUP) {
1635 		/* restart the nic */
1636 		ASSERT(!dp->mac_active);
1637 		(void) gem_mac_start(dp);
1638 	}
1639 	return (GEM_SUCCESS);
1640 err:
1641 	return (GEM_FAILURE);
1642 }
1643 
1644 
1645 static void
1646 gem_tx_timeout(struct gem_dev *dp)
1647 {
1648 	clock_t		now;
1649 	boolean_t	tx_sched;
1650 	struct txbuf	*tbp;
1651 
1652 	mutex_enter(&dp->intrlock);
1653 
1654 	tx_sched = B_FALSE;
1655 	now = ddi_get_lbolt();
1656 
1657 	mutex_enter(&dp->xmitlock);
1658 	if (!dp->mac_active || dp->mii_state != MII_STATE_LINKUP) {
1659 		mutex_exit(&dp->xmitlock);
1660 		goto schedule_next;
1661 	}
1662 	mutex_exit(&dp->xmitlock);
1663 
1664 	/* reclaim transmitted buffers to check the trasmitter hangs or not. */
1665 	if (gem_reclaim_txbuf(dp) != GEM_SUCCESS) {
1666 		/* tx error happened, reset transmitter in the chip */
1667 		(void) gem_restart_nic(dp, 0);
1668 		tx_sched = B_TRUE;
1669 		dp->tx_blocked = (clock_t)0;
1670 
1671 		goto schedule_next;
1672 	}
1673 
1674 	mutex_enter(&dp->xmitlock);
1675 	/* check if the transmitter thread is stuck */
1676 	if (dp->tx_active_head == dp->tx_active_tail) {
1677 		/* no tx buffer is loaded to the nic */
1678 		if (dp->tx_blocked &&
1679 		    now - dp->tx_blocked > dp->gc.gc_tx_timeout_interval) {
1680 			gem_dump_txbuf(dp, CE_WARN,
1681 			    "gem_tx_timeout: tx blocked");
1682 			tx_sched = B_TRUE;
1683 			dp->tx_blocked = (clock_t)0;
1684 		}
1685 		mutex_exit(&dp->xmitlock);
1686 		goto schedule_next;
1687 	}
1688 
1689 	tbp = GET_TXBUF(dp, dp->tx_active_head);
1690 	if (now - tbp->txb_stime < dp->gc.gc_tx_timeout) {
1691 		mutex_exit(&dp->xmitlock);
1692 		goto schedule_next;
1693 	}
1694 	mutex_exit(&dp->xmitlock);
1695 
1696 	gem_dump_txbuf(dp, CE_WARN, "gem_tx_timeout: tx timeout");
1697 
1698 	/* discard untransmitted packet and restart tx.  */
1699 	(void) gem_restart_nic(dp, GEM_RESTART_NOWAIT);
1700 	tx_sched = B_TRUE;
1701 	dp->tx_blocked = (clock_t)0;
1702 
1703 schedule_next:
1704 	mutex_exit(&dp->intrlock);
1705 
1706 	/* restart the downstream if needed */
1707 	if (tx_sched) {
1708 		mac_tx_update(dp->mh);
1709 	}
1710 
1711 	DPRINTF(4, (CE_CONT,
1712 	    "!%s: blocked:%d active_head:%d active_tail:%d desc_intr:%d",
1713 	    dp->name, BOOLEAN(dp->tx_blocked),
1714 	    dp->tx_active_head, dp->tx_active_tail, dp->tx_desc_intr));
1715 	dp->timeout_id =
1716 	    timeout((void (*)(void *))gem_tx_timeout,
1717 	    (void *)dp, dp->gc.gc_tx_timeout_interval);
1718 }
1719 
1720 /* ================================================================== */
1721 /*
1722  * Interrupt handler
1723  */
1724 /* ================================================================== */
1725 __INLINE__
1726 static void
1727 gem_append_rxbuf(struct gem_dev *dp, struct rxbuf *rbp_head)
1728 {
1729 	struct rxbuf	*rbp;
1730 	seqnum_t	tail;
1731 	int		rx_ring_size = dp->gc.gc_rx_ring_size;
1732 
1733 	ASSERT(rbp_head != NULL);
1734 	ASSERT(mutex_owned(&dp->intrlock));
1735 
1736 	DPRINTF(3, (CE_CONT, "!%s: %s: slot_head:%d, slot_tail:%d",
1737 	    dp->name, __func__, dp->rx_active_head, dp->rx_active_tail));
1738 
1739 	/*
1740 	 * Add new buffers into active rx buffer list
1741 	 */
1742 	if (dp->rx_buf_head == NULL) {
1743 		dp->rx_buf_head = rbp_head;
1744 		ASSERT(dp->rx_buf_tail == NULL);
1745 	} else {
1746 		dp->rx_buf_tail->rxb_next = rbp_head;
1747 	}
1748 
1749 	tail = dp->rx_active_tail;
1750 	for (rbp = rbp_head; rbp; rbp = rbp->rxb_next) {
1751 		/* need to notify the tail for the lower layer */
1752 		dp->rx_buf_tail = rbp;
1753 
1754 		dp->gc.gc_rx_desc_write(dp,
1755 		    SLOT(tail, rx_ring_size),
1756 		    rbp->rxb_dmacookie,
1757 		    rbp->rxb_nfrags);
1758 
1759 		dp->rx_active_tail = tail = tail + 1;
1760 	}
1761 }
1762 #pragma inline(gem_append_rxbuf)
1763 
1764 mblk_t *
1765 gem_get_packet_default(struct gem_dev *dp, struct rxbuf *rbp, size_t len)
1766 {
1767 	int		rx_header_len = dp->gc.gc_rx_header_len;
1768 	uint8_t		*bp;
1769 	mblk_t		*mp;
1770 
1771 	/* allocate a new mblk */
1772 	if (mp = allocb(len + VTAG_SIZE, BPRI_MED)) {
1773 		ASSERT(mp->b_next == NULL);
1774 		ASSERT(mp->b_cont == NULL);
1775 
1776 		mp->b_rptr += VTAG_SIZE;
1777 		bp = mp->b_rptr;
1778 		mp->b_wptr = bp + len;
1779 
1780 		/*
1781 		 * flush the range of the entire buffer to invalidate
1782 		 * all of corresponding dirty entries in iocache.
1783 		 */
1784 		(void) ddi_dma_sync(rbp->rxb_dh, rx_header_len,
1785 		    0, DDI_DMA_SYNC_FORKERNEL);
1786 
1787 		bcopy(rbp->rxb_buf + rx_header_len, bp, len);
1788 	}
1789 	return (mp);
1790 }
1791 
1792 #ifdef GEM_DEBUG_LEVEL
1793 uint_t	gem_rx_pkts[17];
1794 #endif
1795 
1796 
1797 int
1798 gem_receive(struct gem_dev *dp)
1799 {
1800 	uint64_t	len_total = 0;
1801 	struct rxbuf	*rbp;
1802 	mblk_t		*mp;
1803 	int		cnt = 0;
1804 	uint64_t	rxstat;
1805 	struct rxbuf	*newbufs;
1806 	struct rxbuf	**newbufs_tailp;
1807 	mblk_t		*rx_head;
1808 	mblk_t 		**rx_tailp;
1809 	int		rx_ring_size = dp->gc.gc_rx_ring_size;
1810 	seqnum_t	active_head;
1811 	uint64_t	(*rx_desc_stat)(struct gem_dev *dp,
1812 	    int slot, int ndesc);
1813 	int		ethermin = ETHERMIN;
1814 	int		ethermax = dp->mtu + sizeof (struct ether_header);
1815 	int		rx_header_len = dp->gc.gc_rx_header_len;
1816 
1817 	ASSERT(mutex_owned(&dp->intrlock));
1818 
1819 	DPRINTF(3, (CE_CONT, "!%s: gem_receive: rx_buf_head:%p",
1820 	    dp->name, dp->rx_buf_head));
1821 
1822 	rx_desc_stat  = dp->gc.gc_rx_desc_stat;
1823 	newbufs_tailp = &newbufs;
1824 	rx_tailp = &rx_head;
1825 	for (active_head = dp->rx_active_head;
1826 	    (rbp = dp->rx_buf_head) != NULL; active_head++) {
1827 		int		len;
1828 		if (cnt == 0) {
1829 			cnt = max(dp->poll_pkt_delay*2, 10);
1830 			cnt = min(cnt,
1831 			    dp->rx_active_tail - active_head);
1832 			gem_rx_desc_dma_sync(dp,
1833 			    SLOT(active_head, rx_ring_size),
1834 			    cnt,
1835 			    DDI_DMA_SYNC_FORKERNEL);
1836 		}
1837 
1838 		if (rx_header_len > 0) {
1839 			(void) ddi_dma_sync(rbp->rxb_dh, 0,
1840 			    rx_header_len, DDI_DMA_SYNC_FORKERNEL);
1841 		}
1842 
1843 		if (((rxstat = (*rx_desc_stat)(dp,
1844 		    SLOT(active_head, rx_ring_size),
1845 		    rbp->rxb_nfrags))
1846 		    & (GEM_RX_DONE | GEM_RX_ERR)) == 0) {
1847 			/* not received yet */
1848 			break;
1849 		}
1850 
1851 		/* Remove the head of the rx buffer list */
1852 		dp->rx_buf_head = rbp->rxb_next;
1853 		cnt--;
1854 
1855 
1856 		if (rxstat & GEM_RX_ERR) {
1857 			goto next;
1858 		}
1859 
1860 		len = rxstat & GEM_RX_LEN;
1861 		DPRINTF(3, (CE_CONT, "!%s: %s: rxstat:0x%llx, len:0x%x",
1862 		    dp->name, __func__, rxstat, len));
1863 
1864 		/*
1865 		 * Copy the packet
1866 		 */
1867 		if ((mp = dp->gc.gc_get_packet(dp, rbp, len)) == NULL) {
1868 			/* no memory, discard the packet */
1869 			dp->stats.norcvbuf++;
1870 			goto next;
1871 		}
1872 
1873 		/*
1874 		 * Process VLAN tag
1875 		 */
1876 		ethermin = ETHERMIN;
1877 		ethermax = dp->mtu + sizeof (struct ether_header);
1878 		if (GET_NET16(mp->b_rptr + VTAG_OFF) == VTAG_TPID) {
1879 			ethermax += VTAG_SIZE;
1880 		}
1881 
1882 		/* check packet size */
1883 		if (len < ethermin) {
1884 			dp->stats.errrcv++;
1885 			dp->stats.runt++;
1886 			freemsg(mp);
1887 			goto next;
1888 		}
1889 
1890 		if (len > ethermax) {
1891 			dp->stats.errrcv++;
1892 			dp->stats.frame_too_long++;
1893 			freemsg(mp);
1894 			goto next;
1895 		}
1896 
1897 		len_total += len;
1898 
1899 #ifdef GEM_DEBUG_VLAN
1900 		if (GET_ETHERTYPE(mp->b_rptr) == VTAG_TPID) {
1901 			gem_dump_packet(dp, (char *)__func__, mp, B_TRUE);
1902 		}
1903 #endif
1904 		/* append received packet to temporaly rx buffer list */
1905 		*rx_tailp = mp;
1906 		rx_tailp  = &mp->b_next;
1907 
1908 		if (mp->b_rptr[0] & 1) {
1909 			if (bcmp(mp->b_rptr,
1910 			    gem_etherbroadcastaddr.ether_addr_octet,
1911 			    ETHERADDRL) == 0) {
1912 				dp->stats.rbcast++;
1913 			} else {
1914 				dp->stats.rmcast++;
1915 			}
1916 		}
1917 next:
1918 		ASSERT(rbp != NULL);
1919 
1920 		/* append new one to temporal new buffer list */
1921 		*newbufs_tailp = rbp;
1922 		newbufs_tailp  = &rbp->rxb_next;
1923 	}
1924 
1925 	/* advance rx_active_head */
1926 	if ((cnt = active_head - dp->rx_active_head) > 0) {
1927 		dp->stats.rbytes += len_total;
1928 		dp->stats.rpackets += cnt;
1929 	}
1930 	dp->rx_active_head = active_head;
1931 
1932 	/* terminate the working list */
1933 	*newbufs_tailp = NULL;
1934 	*rx_tailp = NULL;
1935 
1936 	if (dp->rx_buf_head == NULL) {
1937 		dp->rx_buf_tail = NULL;
1938 	}
1939 
1940 	DPRINTF(4, (CE_CONT, "%s: %s: cnt:%d, rx_head:%p",
1941 	    dp->name, __func__, cnt, rx_head));
1942 
1943 	if (newbufs) {
1944 		/*
1945 		 * fillfull rx list with new buffers
1946 		 */
1947 		seqnum_t	head;
1948 
1949 		/* save current tail */
1950 		head = dp->rx_active_tail;
1951 		gem_append_rxbuf(dp, newbufs);
1952 
1953 		/* call hw depend start routine if we have. */
1954 		dp->gc.gc_rx_start(dp,
1955 		    SLOT(head, rx_ring_size), dp->rx_active_tail - head);
1956 	}
1957 
1958 	if (rx_head) {
1959 		/*
1960 		 * send up received packets
1961 		 */
1962 		mutex_exit(&dp->intrlock);
1963 		mac_rx(dp->mh, dp->mac_rx_ring_ha, rx_head);
1964 		mutex_enter(&dp->intrlock);
1965 	}
1966 
1967 #ifdef GEM_DEBUG_LEVEL
1968 	gem_rx_pkts[min(cnt, sizeof (gem_rx_pkts)/sizeof (uint_t)-1)]++;
1969 #endif
1970 	return (cnt);
1971 }
1972 
1973 boolean_t
1974 gem_tx_done(struct gem_dev *dp)
1975 {
1976 	boolean_t	tx_sched = B_FALSE;
1977 
1978 	if (gem_reclaim_txbuf(dp) != GEM_SUCCESS) {
1979 		(void) gem_restart_nic(dp, GEM_RESTART_KEEP_BUF);
1980 		DPRINTF(2, (CE_CONT, "!%s: gem_tx_done: tx_desc: %d %d",
1981 		    dp->name, dp->tx_active_head, dp->tx_active_tail));
1982 		tx_sched = B_TRUE;
1983 		goto x;
1984 	}
1985 
1986 	mutex_enter(&dp->xmitlock);
1987 
1988 	/* XXX - we must not have any packets in soft queue */
1989 	ASSERT(dp->tx_softq_head == dp->tx_softq_tail);
1990 	/*
1991 	 * If we won't have chance to get more free tx buffers, and blocked,
1992 	 * it is worth to reschedule the downstream i.e. tx side.
1993 	 */
1994 	ASSERT(dp->tx_desc_intr - dp->tx_desc_head >= 0);
1995 	if (dp->tx_blocked && dp->tx_desc_intr == dp->tx_desc_head) {
1996 		/*
1997 		 * As no further tx-done interrupts are scheduled, this
1998 		 * is the last chance to kick tx side, which may be
1999 		 * blocked now, otherwise the tx side never works again.
2000 		 */
2001 		tx_sched = B_TRUE;
2002 		dp->tx_blocked = (clock_t)0;
2003 		dp->tx_max_packets =
2004 		    min(dp->tx_max_packets + 2, dp->gc.gc_tx_buf_limit);
2005 	}
2006 
2007 	mutex_exit(&dp->xmitlock);
2008 
2009 	DPRINTF(3, (CE_CONT, "!%s: %s: ret: blocked:%d",
2010 	    dp->name, __func__, BOOLEAN(dp->tx_blocked)));
2011 x:
2012 	return (tx_sched);
2013 }
2014 
2015 static uint_t
2016 gem_intr(struct gem_dev	*dp)
2017 {
2018 	uint_t		ret;
2019 
2020 	mutex_enter(&dp->intrlock);
2021 	if (dp->mac_suspended) {
2022 		mutex_exit(&dp->intrlock);
2023 		return (DDI_INTR_UNCLAIMED);
2024 	}
2025 	dp->intr_busy = B_TRUE;
2026 
2027 	ret = (*dp->gc.gc_interrupt)(dp);
2028 
2029 	if (ret == DDI_INTR_UNCLAIMED) {
2030 		dp->intr_busy = B_FALSE;
2031 		mutex_exit(&dp->intrlock);
2032 		return (ret);
2033 	}
2034 
2035 	if (!dp->mac_active) {
2036 		cv_broadcast(&dp->tx_drain_cv);
2037 	}
2038 
2039 
2040 	dp->stats.intr++;
2041 	dp->intr_busy = B_FALSE;
2042 
2043 	mutex_exit(&dp->intrlock);
2044 
2045 	if (ret & INTR_RESTART_TX) {
2046 		DPRINTF(4, (CE_CONT, "!%s: calling mac_tx_update", dp->name));
2047 		mac_tx_update(dp->mh);
2048 		ret &= ~INTR_RESTART_TX;
2049 	}
2050 	return (ret);
2051 }
2052 
2053 static void
2054 gem_intr_watcher(struct gem_dev *dp)
2055 {
2056 	(void) gem_intr(dp);
2057 
2058 	/* schedule next call of tu_intr_watcher */
2059 	dp->intr_watcher_id =
2060 	    timeout((void (*)(void *))gem_intr_watcher, (void *)dp, 1);
2061 }
2062 
2063 /* ======================================================================== */
2064 /*
2065  * MII support routines
2066  */
2067 /* ======================================================================== */
2068 static void
2069 gem_choose_forcedmode(struct gem_dev *dp)
2070 {
2071 	/* choose media mode */
2072 	if (dp->anadv_1000fdx || dp->anadv_1000hdx) {
2073 		dp->speed = GEM_SPD_1000;
2074 		dp->full_duplex = dp->anadv_1000fdx;
2075 	} else if (dp->anadv_100fdx || dp->anadv_100t4) {
2076 		dp->speed = GEM_SPD_100;
2077 		dp->full_duplex = B_TRUE;
2078 	} else if (dp->anadv_100hdx) {
2079 		dp->speed = GEM_SPD_100;
2080 		dp->full_duplex = B_FALSE;
2081 	} else {
2082 		dp->speed = GEM_SPD_10;
2083 		dp->full_duplex = dp->anadv_10fdx;
2084 	}
2085 }
2086 
2087 uint16_t
2088 gem_mii_read(struct gem_dev *dp, uint_t reg)
2089 {
2090 	if ((dp->mii_status & MII_STATUS_MFPRMBLSUPR) == 0) {
2091 		(*dp->gc.gc_mii_sync)(dp);
2092 	}
2093 	return ((*dp->gc.gc_mii_read)(dp, reg));
2094 }
2095 
2096 void
2097 gem_mii_write(struct gem_dev *dp, uint_t reg, uint16_t val)
2098 {
2099 	if ((dp->mii_status & MII_STATUS_MFPRMBLSUPR) == 0) {
2100 		(*dp->gc.gc_mii_sync)(dp);
2101 	}
2102 	(*dp->gc.gc_mii_write)(dp, reg, val);
2103 }
2104 
2105 #define	fc_cap_decode(x)	\
2106 	((((x) & MII_ABILITY_PAUSE) ? 1 : 0) |	\
2107 	(((x) & MII_ABILITY_ASM_DIR) ? 2 : 0))
2108 
2109 int
2110 gem_mii_config_default(struct gem_dev *dp)
2111 {
2112 	uint16_t	mii_stat;
2113 	uint16_t	val;
2114 	static uint16_t fc_cap_encode[4] = {
2115 		/* none */		0,
2116 		/* symmetric */		MII_ABILITY_PAUSE,
2117 		/* tx */		MII_ABILITY_ASM_DIR,
2118 		/* rx-symmetric */	MII_ABILITY_PAUSE | MII_ABILITY_ASM_DIR,
2119 	};
2120 
2121 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
2122 
2123 	/*
2124 	 * Configure bits in advertisement register
2125 	 */
2126 	mii_stat = dp->mii_status;
2127 
2128 	DPRINTF(1, (CE_CONT, "!%s: %s: MII_STATUS reg:%b",
2129 	    dp->name, __func__, mii_stat, MII_STATUS_BITS));
2130 
2131 	if ((mii_stat & MII_STATUS_ABILITY_TECH) == 0) {
2132 		/* it's funny */
2133 		cmn_err(CE_WARN, "!%s: wrong ability bits: mii_status:%b",
2134 		    dp->name, mii_stat, MII_STATUS_BITS);
2135 		return (GEM_FAILURE);
2136 	}
2137 
2138 	/* Do not change the rest of the ability bits in the advert reg */
2139 	val = gem_mii_read(dp, MII_AN_ADVERT) & ~MII_ABILITY_ALL;
2140 
2141 	DPRINTF(0, (CE_CONT,
2142 	    "!%s: %s: 100T4:%d 100F:%d 100H:%d 10F:%d 10H:%d",
2143 	    dp->name, __func__,
2144 	    dp->anadv_100t4, dp->anadv_100fdx, dp->anadv_100hdx,
2145 	    dp->anadv_10fdx, dp->anadv_10hdx));
2146 
2147 	if (dp->anadv_100t4) {
2148 		val |= MII_ABILITY_100BASE_T4;
2149 	}
2150 	if (dp->anadv_100fdx) {
2151 		val |= MII_ABILITY_100BASE_TX_FD;
2152 	}
2153 	if (dp->anadv_100hdx) {
2154 		val |= MII_ABILITY_100BASE_TX;
2155 	}
2156 	if (dp->anadv_10fdx) {
2157 		val |= MII_ABILITY_10BASE_T_FD;
2158 	}
2159 	if (dp->anadv_10hdx) {
2160 		val |= MII_ABILITY_10BASE_T;
2161 	}
2162 
2163 	/* set flow control capability */
2164 	val |= fc_cap_encode[dp->anadv_flow_control];
2165 
2166 	DPRINTF(0, (CE_CONT,
2167 	    "!%s: %s: setting MII_AN_ADVERT reg:%b, mii_mode:%d, fc:%d",
2168 	    dp->name, __func__, val, MII_ABILITY_BITS, dp->gc.gc_mii_mode,
2169 	    dp->anadv_flow_control));
2170 
2171 	gem_mii_write(dp, MII_AN_ADVERT, val);
2172 
2173 	if (mii_stat & MII_STATUS_XSTATUS) {
2174 		/*
2175 		 * 1000Base-T GMII support
2176 		 */
2177 		if (!dp->anadv_autoneg) {
2178 			/* enable manual configuration */
2179 			val = MII_1000TC_CFG_EN;
2180 		} else {
2181 			val = 0;
2182 			if (dp->anadv_1000fdx) {
2183 				val |= MII_1000TC_ADV_FULL;
2184 			}
2185 			if (dp->anadv_1000hdx) {
2186 				val |= MII_1000TC_ADV_HALF;
2187 			}
2188 		}
2189 		DPRINTF(0, (CE_CONT,
2190 		    "!%s: %s: setting MII_1000TC reg:%b",
2191 		    dp->name, __func__, val, MII_1000TC_BITS));
2192 
2193 		gem_mii_write(dp, MII_1000TC, val);
2194 	}
2195 
2196 	return (GEM_SUCCESS);
2197 }
2198 
2199 #define	GEM_LINKUP(dp)		mac_link_update((dp)->mh, LINK_STATE_UP)
2200 #define	GEM_LINKDOWN(dp)	mac_link_update((dp)->mh, LINK_STATE_DOWN)
2201 
2202 static uint8_t gem_fc_result[4 /* my cap */ ][4 /* lp cap */] = {
2203 /*	 none	symm	tx	rx/symm */
2204 /* none */
2205 	{FLOW_CONTROL_NONE,
2206 		FLOW_CONTROL_NONE,
2207 			FLOW_CONTROL_NONE,
2208 				FLOW_CONTROL_NONE},
2209 /* sym */
2210 	{FLOW_CONTROL_NONE,
2211 		FLOW_CONTROL_SYMMETRIC,
2212 			FLOW_CONTROL_NONE,
2213 				FLOW_CONTROL_SYMMETRIC},
2214 /* tx */
2215 	{FLOW_CONTROL_NONE,
2216 		FLOW_CONTROL_NONE,
2217 			FLOW_CONTROL_NONE,
2218 				FLOW_CONTROL_TX_PAUSE},
2219 /* rx/symm */
2220 	{FLOW_CONTROL_NONE,
2221 		FLOW_CONTROL_SYMMETRIC,
2222 			FLOW_CONTROL_RX_PAUSE,
2223 				FLOW_CONTROL_SYMMETRIC},
2224 };
2225 
2226 static char *gem_fc_type[] = {
2227 	"without",
2228 	"with symmetric",
2229 	"with tx",
2230 	"with rx",
2231 };
2232 
2233 boolean_t
2234 gem_mii_link_check(struct gem_dev *dp)
2235 {
2236 	uint16_t	old_mii_state;
2237 	boolean_t	tx_sched = B_FALSE;
2238 	uint16_t	status;
2239 	uint16_t	advert;
2240 	uint16_t	lpable;
2241 	uint16_t	exp;
2242 	uint16_t	ctl1000;
2243 	uint16_t	stat1000;
2244 	uint16_t	val;
2245 	clock_t		now;
2246 	clock_t		diff;
2247 	int		linkdown_action;
2248 	boolean_t	fix_phy = B_FALSE;
2249 
2250 	now = ddi_get_lbolt();
2251 	old_mii_state = dp->mii_state;
2252 
2253 	DPRINTF(3, (CE_CONT, "!%s: %s: time:%d state:%d",
2254 	    dp->name, __func__, now, dp->mii_state));
2255 
2256 	diff = now - dp->mii_last_check;
2257 	dp->mii_last_check = now;
2258 
2259 	/*
2260 	 * For NWAM, don't show linkdown state right
2261 	 * after the system boots
2262 	 */
2263 	if (dp->linkup_delay > 0) {
2264 		if (dp->linkup_delay > diff) {
2265 			dp->linkup_delay -= diff;
2266 		} else {
2267 			/* link up timeout */
2268 			dp->linkup_delay = -1;
2269 		}
2270 	}
2271 
2272 next_nowait:
2273 	switch (dp->mii_state) {
2274 	case MII_STATE_UNKNOWN:
2275 		/* power-up, DP83840 requires 32 sync bits */
2276 		(*dp->gc.gc_mii_sync)(dp);
2277 		goto reset_phy;
2278 
2279 	case MII_STATE_RESETTING:
2280 		dp->mii_timer -= diff;
2281 		if (dp->mii_timer > 0) {
2282 			/* don't read phy registers in resetting */
2283 			dp->mii_interval = WATCH_INTERVAL_FAST;
2284 			goto next;
2285 		}
2286 
2287 		/* Timer expired, ensure reset bit is not set */
2288 
2289 		if (dp->mii_status & MII_STATUS_MFPRMBLSUPR) {
2290 			/* some phys need sync bits after reset */
2291 			(*dp->gc.gc_mii_sync)(dp);
2292 		}
2293 		val = gem_mii_read(dp, MII_CONTROL);
2294 		if (val & MII_CONTROL_RESET) {
2295 			cmn_err(CE_NOTE,
2296 			    "!%s: time:%ld resetting phy not complete."
2297 			    " mii_control:0x%b",
2298 			    dp->name, ddi_get_lbolt(),
2299 			    val, MII_CONTROL_BITS);
2300 		}
2301 
2302 		/* ensure neither isolated nor pwrdown nor auto-nego mode */
2303 		/* XXX -- this operation is required for NS DP83840A. */
2304 		gem_mii_write(dp, MII_CONTROL, 0);
2305 
2306 		/* As resetting PHY has completed, configure PHY registers */
2307 		if ((*dp->gc.gc_mii_config)(dp) != GEM_SUCCESS) {
2308 			/* we failed to configure PHY. */
2309 			goto reset_phy;
2310 		}
2311 
2312 		/* mii_config may disable autonegatiation */
2313 		gem_choose_forcedmode(dp);
2314 
2315 		dp->mii_lpable = 0;
2316 		dp->mii_advert = 0;
2317 		dp->mii_exp = 0;
2318 		dp->mii_ctl1000 = 0;
2319 		dp->mii_stat1000 = 0;
2320 		dp->flow_control = FLOW_CONTROL_NONE;
2321 
2322 		if (!dp->anadv_autoneg) {
2323 			/* skip auto-negotiation phase */
2324 			dp->mii_state = MII_STATE_MEDIA_SETUP;
2325 			dp->mii_timer = 0;
2326 			dp->mii_interval = 0;
2327 			goto next_nowait;
2328 		}
2329 
2330 		/* Issue auto-negotiation command */
2331 		goto autonego;
2332 
2333 	case MII_STATE_AUTONEGOTIATING:
2334 		/*
2335 		 * Autonegotiation is in progress
2336 		 */
2337 		dp->mii_timer -= diff;
2338 		if (dp->mii_timer -
2339 		    (dp->gc.gc_mii_an_timeout
2340 		    - dp->gc.gc_mii_an_wait) > 0) {
2341 			/*
2342 			 * wait for a while, typically autonegotiation
2343 			 * completes in 2.3 - 2.5 sec.
2344 			 */
2345 			dp->mii_interval = WATCH_INTERVAL_FAST;
2346 			goto next;
2347 		}
2348 
2349 		/* read PHY status */
2350 		status = gem_mii_read(dp, MII_STATUS);
2351 		DPRINTF(4, (CE_CONT,
2352 		    "!%s: %s: called: mii_state:%d MII_STATUS reg:%b",
2353 		    dp->name, __func__, dp->mii_state,
2354 		    status, MII_STATUS_BITS));
2355 
2356 		if (status & MII_STATUS_REMFAULT) {
2357 			/*
2358 			 * The link parnert told me something wrong happend.
2359 			 * What do we do ?
2360 			 */
2361 			cmn_err(CE_CONT,
2362 			    "!%s: auto-negotiation failed: remote fault",
2363 			    dp->name);
2364 			goto autonego;
2365 		}
2366 
2367 		if ((status & MII_STATUS_ANDONE) == 0) {
2368 			if (dp->mii_timer <= 0) {
2369 				/*
2370 				 * Auto-negotiation was timed out,
2371 				 * try again w/o resetting phy.
2372 				 */
2373 				if (!dp->mii_supress_msg) {
2374 					cmn_err(CE_WARN,
2375 				    "!%s: auto-negotiation failed: timeout",
2376 					    dp->name);
2377 					dp->mii_supress_msg = B_TRUE;
2378 				}
2379 				goto autonego;
2380 			}
2381 			/*
2382 			 * Auto-negotiation is in progress. Wait.
2383 			 */
2384 			dp->mii_interval = dp->gc.gc_mii_an_watch_interval;
2385 			goto next;
2386 		}
2387 
2388 		/*
2389 		 * Auto-negotiation have completed.
2390 		 * Assume linkdown and fall through.
2391 		 */
2392 		dp->mii_supress_msg = B_FALSE;
2393 		dp->mii_state = MII_STATE_AN_DONE;
2394 		DPRINTF(0, (CE_CONT,
2395 		    "!%s: auto-negotiation completed, MII_STATUS:%b",
2396 		    dp->name, status, MII_STATUS_BITS));
2397 
2398 		if (dp->gc.gc_mii_an_delay > 0) {
2399 			dp->mii_timer = dp->gc.gc_mii_an_delay;
2400 			dp->mii_interval = drv_usectohz(20*1000);
2401 			goto next;
2402 		}
2403 
2404 		dp->mii_timer = 0;
2405 		diff = 0;
2406 		goto next_nowait;
2407 
2408 	case MII_STATE_AN_DONE:
2409 		/*
2410 		 * Auto-negotiation have done. Now we can set up media.
2411 		 */
2412 		dp->mii_timer -= diff;
2413 		if (dp->mii_timer > 0) {
2414 			/* wait for a while */
2415 			dp->mii_interval = WATCH_INTERVAL_FAST;
2416 			goto next;
2417 		}
2418 
2419 		/*
2420 		 * set up the result of auto negotiation
2421 		 */
2422 
2423 		/*
2424 		 * Read registers required to determin current
2425 		 * duplex mode and media speed.
2426 		 */
2427 		if (dp->gc.gc_mii_an_delay > 0) {
2428 			/*
2429 			 * As the link watcher context has been suspended,
2430 			 * 'status' is invalid. We must status register here
2431 			 */
2432 			status = gem_mii_read(dp, MII_STATUS);
2433 		}
2434 		advert = gem_mii_read(dp, MII_AN_ADVERT);
2435 		lpable = gem_mii_read(dp, MII_AN_LPABLE);
2436 		exp = gem_mii_read(dp, MII_AN_EXPANSION);
2437 		if (exp == 0xffff) {
2438 			/* some phys don't have exp register */
2439 			exp = 0;
2440 		}
2441 		ctl1000  = 0;
2442 		stat1000 = 0;
2443 		if (dp->mii_status & MII_STATUS_XSTATUS) {
2444 			ctl1000  = gem_mii_read(dp, MII_1000TC);
2445 			stat1000 = gem_mii_read(dp, MII_1000TS);
2446 		}
2447 		dp->mii_lpable = lpable;
2448 		dp->mii_advert = advert;
2449 		dp->mii_exp = exp;
2450 		dp->mii_ctl1000  = ctl1000;
2451 		dp->mii_stat1000 = stat1000;
2452 
2453 		cmn_err(CE_CONT,
2454 		"!%s: auto-negotiation done, advert:%b, lpable:%b, exp:%b",
2455 		    dp->name,
2456 		    advert, MII_ABILITY_BITS,
2457 		    lpable, MII_ABILITY_BITS,
2458 		    exp, MII_AN_EXP_BITS);
2459 
2460 		if (dp->mii_status & MII_STATUS_XSTATUS) {
2461 			cmn_err(CE_CONT,
2462 			    "! MII_1000TC:%b, MII_1000TS:%b",
2463 			    ctl1000, MII_1000TC_BITS,
2464 			    stat1000, MII_1000TS_BITS);
2465 		}
2466 
2467 		if (gem_population(lpable) <= 1 &&
2468 		    (exp & MII_AN_EXP_LPCANAN) == 0) {
2469 			if ((advert & MII_ABILITY_TECH) != lpable) {
2470 				cmn_err(CE_WARN,
2471 				    "!%s: but the link partnar doesn't seem"
2472 				    " to have auto-negotiation capability."
2473 				    " please check the link configuration.",
2474 				    dp->name);
2475 			}
2476 			/*
2477 			 * it should be result of pararell detection, which
2478 			 * cannot detect duplex mode.
2479 			 */
2480 			if (lpable & MII_ABILITY_100BASE_TX) {
2481 				/*
2482 				 * we prefer full duplex mode for 100Mbps
2483 				 * connection, if we can.
2484 				 */
2485 				lpable |= advert & MII_ABILITY_100BASE_TX_FD;
2486 			}
2487 
2488 			if ((advert & lpable) == 0 &&
2489 			    lpable & MII_ABILITY_10BASE_T) {
2490 				lpable |= advert & MII_ABILITY_10BASE_T_FD;
2491 			}
2492 			/*
2493 			 * as the link partnar isn't auto-negotiatable, use
2494 			 * fixed mode temporally.
2495 			 */
2496 			fix_phy = B_TRUE;
2497 		} else if (lpable == 0) {
2498 			cmn_err(CE_WARN, "!%s: wrong lpable.", dp->name);
2499 			goto reset_phy;
2500 		}
2501 		/*
2502 		 * configure current link mode according to AN priority.
2503 		 */
2504 		val = advert & lpable;
2505 		if ((ctl1000 & MII_1000TC_ADV_FULL) &&
2506 		    (stat1000 & MII_1000TS_LP_FULL)) {
2507 			/* 1000BaseT & full duplex */
2508 			dp->speed	 = GEM_SPD_1000;
2509 			dp->full_duplex  = B_TRUE;
2510 		} else if ((ctl1000 & MII_1000TC_ADV_HALF) &&
2511 		    (stat1000 & MII_1000TS_LP_HALF)) {
2512 			/* 1000BaseT & half duplex */
2513 			dp->speed = GEM_SPD_1000;
2514 			dp->full_duplex = B_FALSE;
2515 		} else if (val & MII_ABILITY_100BASE_TX_FD) {
2516 			/* 100BaseTx & full duplex */
2517 			dp->speed = GEM_SPD_100;
2518 			dp->full_duplex = B_TRUE;
2519 		} else if (val & MII_ABILITY_100BASE_T4) {
2520 			/* 100BaseT4 & full duplex */
2521 			dp->speed = GEM_SPD_100;
2522 			dp->full_duplex = B_TRUE;
2523 		} else if (val & MII_ABILITY_100BASE_TX) {
2524 			/* 100BaseTx & half duplex */
2525 			dp->speed	 = GEM_SPD_100;
2526 			dp->full_duplex  = B_FALSE;
2527 		} else if (val & MII_ABILITY_10BASE_T_FD) {
2528 			/* 10BaseT & full duplex */
2529 			dp->speed	 = GEM_SPD_10;
2530 			dp->full_duplex  = B_TRUE;
2531 		} else if (val & MII_ABILITY_10BASE_T) {
2532 			/* 10BaseT & half duplex */
2533 			dp->speed	 = GEM_SPD_10;
2534 			dp->full_duplex  = B_FALSE;
2535 		} else {
2536 			/*
2537 			 * It seems that the link partnar doesn't have
2538 			 * auto-negotiation capability and our PHY
2539 			 * could not report the correct current mode.
2540 			 * We guess current mode by mii_control register.
2541 			 */
2542 			val = gem_mii_read(dp, MII_CONTROL);
2543 
2544 			/* select 100m full or 10m half */
2545 			dp->speed = (val & MII_CONTROL_100MB) ?
2546 			    GEM_SPD_100 : GEM_SPD_10;
2547 			dp->full_duplex = dp->speed != GEM_SPD_10;
2548 			fix_phy = B_TRUE;
2549 
2550 			cmn_err(CE_NOTE,
2551 			    "!%s: auto-negotiation done but "
2552 			    "common ability not found.\n"
2553 			    "PHY state: control:%b advert:%b lpable:%b\n"
2554 			    "guessing %d Mbps %s duplex mode",
2555 			    dp->name,
2556 			    val, MII_CONTROL_BITS,
2557 			    advert, MII_ABILITY_BITS,
2558 			    lpable, MII_ABILITY_BITS,
2559 			    gem_speed_value[dp->speed],
2560 			    dp->full_duplex ? "full" : "half");
2561 		}
2562 
2563 		if (dp->full_duplex) {
2564 			dp->flow_control =
2565 			    gem_fc_result[fc_cap_decode(advert)]
2566 			    [fc_cap_decode(lpable)];
2567 		} else {
2568 			dp->flow_control = FLOW_CONTROL_NONE;
2569 		}
2570 		dp->mii_state = MII_STATE_MEDIA_SETUP;
2571 		/* FALLTHROUGH */
2572 
2573 	case MII_STATE_MEDIA_SETUP:
2574 		dp->mii_state = MII_STATE_LINKDOWN;
2575 		dp->mii_timer = dp->gc.gc_mii_linkdown_timeout;
2576 		DPRINTF(2, (CE_CONT, "!%s: setup midia mode done", dp->name));
2577 		dp->mii_supress_msg = B_FALSE;
2578 
2579 		/* use short interval */
2580 		dp->mii_interval = WATCH_INTERVAL_FAST;
2581 
2582 		if ((!dp->anadv_autoneg) ||
2583 		    dp->gc.gc_mii_an_oneshot || fix_phy) {
2584 
2585 			/*
2586 			 * write specified mode to phy.
2587 			 */
2588 			val = gem_mii_read(dp, MII_CONTROL);
2589 			val &= ~(MII_CONTROL_SPEED | MII_CONTROL_FDUPLEX |
2590 			    MII_CONTROL_ANE | MII_CONTROL_RSAN);
2591 
2592 			if (dp->full_duplex) {
2593 				val |= MII_CONTROL_FDUPLEX;
2594 			}
2595 
2596 			switch (dp->speed) {
2597 			case GEM_SPD_1000:
2598 				val |= MII_CONTROL_1000MB;
2599 				break;
2600 
2601 			case GEM_SPD_100:
2602 				val |= MII_CONTROL_100MB;
2603 				break;
2604 
2605 			default:
2606 				cmn_err(CE_WARN, "%s: unknown speed:%d",
2607 				    dp->name, dp->speed);
2608 				/* FALLTHROUGH */
2609 			case GEM_SPD_10:
2610 				/* for GEM_SPD_10, do nothing */
2611 				break;
2612 			}
2613 
2614 			if (dp->mii_status & MII_STATUS_XSTATUS) {
2615 				gem_mii_write(dp,
2616 				    MII_1000TC, MII_1000TC_CFG_EN);
2617 			}
2618 			gem_mii_write(dp, MII_CONTROL, val);
2619 		}
2620 
2621 		if (dp->nic_state >= NIC_STATE_INITIALIZED) {
2622 			/* notify the result of auto-negotiation to mac */
2623 			(*dp->gc.gc_set_media)(dp);
2624 		}
2625 
2626 		if ((void *)dp->gc.gc_mii_tune_phy) {
2627 			/* for built-in sis900 */
2628 			/* XXX - this code should be removed.  */
2629 			(*dp->gc.gc_mii_tune_phy)(dp);
2630 		}
2631 
2632 		goto next_nowait;
2633 
2634 	case MII_STATE_LINKDOWN:
2635 		status = gem_mii_read(dp, MII_STATUS);
2636 		if (status & MII_STATUS_LINKUP) {
2637 			/*
2638 			 * Link going up
2639 			 */
2640 			dp->mii_state = MII_STATE_LINKUP;
2641 			dp->mii_supress_msg = B_FALSE;
2642 
2643 			DPRINTF(0, (CE_CONT,
2644 			    "!%s: link up detected: mii_stat:%b",
2645 			    dp->name, status, MII_STATUS_BITS));
2646 
2647 			/*
2648 			 * MII_CONTROL_100MB and  MII_CONTROL_FDUPLEX are
2649 			 * ignored when MII_CONTROL_ANE is set.
2650 			 */
2651 			cmn_err(CE_CONT,
2652 			    "!%s: Link up: %d Mbps %s duplex %s flow control",
2653 			    dp->name,
2654 			    gem_speed_value[dp->speed],
2655 			    dp->full_duplex ? "full" : "half",
2656 			    gem_fc_type[dp->flow_control]);
2657 
2658 			dp->mii_interval = dp->gc.gc_mii_link_watch_interval;
2659 
2660 			/* XXX - we need other timer to watch statictics */
2661 			if (dp->gc.gc_mii_hw_link_detection &&
2662 			    dp->nic_state == NIC_STATE_ONLINE) {
2663 				dp->mii_interval = 0;
2664 			}
2665 
2666 			if (dp->nic_state == NIC_STATE_ONLINE) {
2667 				if (!dp->mac_active) {
2668 					(void) gem_mac_start(dp);
2669 				}
2670 				tx_sched = B_TRUE;
2671 			}
2672 			goto next;
2673 		}
2674 
2675 		dp->mii_supress_msg = B_TRUE;
2676 		if (dp->anadv_autoneg) {
2677 			dp->mii_timer -= diff;
2678 			if (dp->mii_timer <= 0) {
2679 				/*
2680 				 * link down timer expired.
2681 				 * need to restart auto-negotiation.
2682 				 */
2683 				linkdown_action =
2684 				    dp->gc.gc_mii_linkdown_timeout_action;
2685 				goto restart_autonego;
2686 			}
2687 		}
2688 		/* don't change mii_state */
2689 		break;
2690 
2691 	case MII_STATE_LINKUP:
2692 		status = gem_mii_read(dp, MII_STATUS);
2693 		if ((status & MII_STATUS_LINKUP) == 0) {
2694 			/*
2695 			 * Link going down
2696 			 */
2697 			cmn_err(CE_NOTE,
2698 			    "!%s: link down detected: mii_stat:%b",
2699 			    dp->name, status, MII_STATUS_BITS);
2700 
2701 			if (dp->nic_state == NIC_STATE_ONLINE &&
2702 			    dp->mac_active &&
2703 			    dp->gc.gc_mii_stop_mac_on_linkdown) {
2704 				(void) gem_mac_stop(dp, 0);
2705 
2706 				if (dp->tx_blocked) {
2707 					/* drain tx */
2708 					tx_sched = B_TRUE;
2709 				}
2710 			}
2711 
2712 			if (dp->anadv_autoneg) {
2713 				/* need to restart auto-negotiation */
2714 				linkdown_action = dp->gc.gc_mii_linkdown_action;
2715 				goto restart_autonego;
2716 			}
2717 
2718 			dp->mii_state = MII_STATE_LINKDOWN;
2719 			dp->mii_timer = dp->gc.gc_mii_linkdown_timeout;
2720 
2721 			if ((void *)dp->gc.gc_mii_tune_phy) {
2722 				/* for built-in sis900 */
2723 				(*dp->gc.gc_mii_tune_phy)(dp);
2724 			}
2725 			dp->mii_interval = dp->gc.gc_mii_link_watch_interval;
2726 			goto next;
2727 		}
2728 
2729 		/* don't change mii_state */
2730 		if (dp->gc.gc_mii_hw_link_detection &&
2731 		    dp->nic_state == NIC_STATE_ONLINE) {
2732 			dp->mii_interval = 0;
2733 			goto next;
2734 		}
2735 		break;
2736 	}
2737 	dp->mii_interval = dp->gc.gc_mii_link_watch_interval;
2738 	goto next;
2739 
2740 	/* Actions on the end of state routine */
2741 
2742 restart_autonego:
2743 	switch (linkdown_action) {
2744 	case MII_ACTION_RESET:
2745 		if (!dp->mii_supress_msg) {
2746 			cmn_err(CE_CONT, "!%s: resetting PHY", dp->name);
2747 		}
2748 		dp->mii_supress_msg = B_TRUE;
2749 		goto reset_phy;
2750 
2751 	case MII_ACTION_NONE:
2752 		dp->mii_supress_msg = B_TRUE;
2753 		if (dp->gc.gc_mii_an_oneshot) {
2754 			goto autonego;
2755 		}
2756 		/* PHY will restart autonego automatically */
2757 		dp->mii_state = MII_STATE_AUTONEGOTIATING;
2758 		dp->mii_timer = dp->gc.gc_mii_an_timeout;
2759 		dp->mii_interval = dp->gc.gc_mii_an_watch_interval;
2760 		goto next;
2761 
2762 	case MII_ACTION_RSA:
2763 		if (!dp->mii_supress_msg) {
2764 			cmn_err(CE_CONT, "!%s: restarting auto-negotiation",
2765 			    dp->name);
2766 		}
2767 		dp->mii_supress_msg = B_TRUE;
2768 		goto autonego;
2769 
2770 	default:
2771 		cmn_err(CE_WARN, "!%s: unknowm linkdown action: %d",
2772 		    dp->name, dp->gc.gc_mii_linkdown_action);
2773 		dp->mii_supress_msg = B_TRUE;
2774 	}
2775 	/* NOTREACHED */
2776 
2777 reset_phy:
2778 	if (!dp->mii_supress_msg) {
2779 		cmn_err(CE_CONT, "!%s: resetting PHY", dp->name);
2780 	}
2781 	dp->mii_state = MII_STATE_RESETTING;
2782 	dp->mii_timer = dp->gc.gc_mii_reset_timeout;
2783 	if (!dp->gc.gc_mii_dont_reset) {
2784 		gem_mii_write(dp, MII_CONTROL, MII_CONTROL_RESET);
2785 	}
2786 	dp->mii_interval = WATCH_INTERVAL_FAST;
2787 	goto next;
2788 
2789 autonego:
2790 	if (!dp->mii_supress_msg) {
2791 		cmn_err(CE_CONT, "!%s: auto-negotiation started", dp->name);
2792 	}
2793 	dp->mii_state = MII_STATE_AUTONEGOTIATING;
2794 	dp->mii_timer = dp->gc.gc_mii_an_timeout;
2795 
2796 	/* start/restart auto nego */
2797 	val = gem_mii_read(dp, MII_CONTROL) &
2798 	    ~(MII_CONTROL_ISOLATE | MII_CONTROL_PWRDN | MII_CONTROL_RESET);
2799 
2800 	gem_mii_write(dp, MII_CONTROL,
2801 	    val | MII_CONTROL_RSAN | MII_CONTROL_ANE);
2802 
2803 	dp->mii_interval = dp->gc.gc_mii_an_watch_interval;
2804 
2805 next:
2806 	if (dp->link_watcher_id == 0 && dp->mii_interval) {
2807 		/* we must schedule next mii_watcher */
2808 		dp->link_watcher_id =
2809 		    timeout((void (*)(void *))&gem_mii_link_watcher,
2810 		    (void *)dp, dp->mii_interval);
2811 	}
2812 
2813 	if (old_mii_state != dp->mii_state) {
2814 		/* notify new mii link state */
2815 		if (dp->mii_state == MII_STATE_LINKUP) {
2816 			dp->linkup_delay = 0;
2817 			GEM_LINKUP(dp);
2818 		} else if (dp->linkup_delay <= 0) {
2819 			GEM_LINKDOWN(dp);
2820 		}
2821 	} else if (dp->linkup_delay < 0) {
2822 		/* first linkup timeout */
2823 		dp->linkup_delay = 0;
2824 		GEM_LINKDOWN(dp);
2825 	}
2826 
2827 	return (tx_sched);
2828 }
2829 
2830 static void
2831 gem_mii_link_watcher(struct gem_dev *dp)
2832 {
2833 	boolean_t	tx_sched;
2834 
2835 	mutex_enter(&dp->intrlock);
2836 
2837 	dp->link_watcher_id = 0;
2838 	tx_sched = gem_mii_link_check(dp);
2839 #if GEM_DEBUG_LEVEL > 2
2840 	if (dp->link_watcher_id == 0) {
2841 		cmn_err(CE_CONT, "%s: link watcher stopped", dp->name);
2842 	}
2843 #endif
2844 	mutex_exit(&dp->intrlock);
2845 
2846 	if (tx_sched) {
2847 		/* kick potentially stopped downstream */
2848 		mac_tx_update(dp->mh);
2849 	}
2850 }
2851 
2852 int
2853 gem_mii_probe_default(struct gem_dev *dp)
2854 {
2855 	int8_t		phy;
2856 	uint16_t	status;
2857 	uint16_t	adv;
2858 	uint16_t	adv_org;
2859 
2860 	DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__));
2861 
2862 	/*
2863 	 * Scan PHY
2864 	 */
2865 	/* ensure to send sync bits */
2866 	dp->mii_status = 0;
2867 
2868 	/* Try default phy first */
2869 	if (dp->mii_phy_addr) {
2870 		status = gem_mii_read(dp, MII_STATUS);
2871 		if (status != 0xffff && status != 0) {
2872 			gem_mii_write(dp, MII_CONTROL, 0);
2873 			goto PHY_found;
2874 		}
2875 
2876 		if (dp->mii_phy_addr < 0) {
2877 			cmn_err(CE_NOTE,
2878 	    "!%s: failed to probe default internal and/or non-MII PHY",
2879 			    dp->name);
2880 			return (GEM_FAILURE);
2881 		}
2882 
2883 		cmn_err(CE_NOTE,
2884 		    "!%s: failed to probe default MII PHY at %d",
2885 		    dp->name, dp->mii_phy_addr);
2886 	}
2887 
2888 	/* Try all possible address */
2889 	for (phy = dp->gc.gc_mii_addr_min; phy < 32; phy++) {
2890 		dp->mii_phy_addr = phy;
2891 		status = gem_mii_read(dp, MII_STATUS);
2892 
2893 		if (status != 0xffff && status != 0) {
2894 			gem_mii_write(dp, MII_CONTROL, 0);
2895 			goto PHY_found;
2896 		}
2897 	}
2898 
2899 	for (phy = dp->gc.gc_mii_addr_min; phy < 32; phy++) {
2900 		dp->mii_phy_addr = phy;
2901 		gem_mii_write(dp, MII_CONTROL, 0);
2902 		status = gem_mii_read(dp, MII_STATUS);
2903 
2904 		if (status != 0xffff && status != 0) {
2905 			goto PHY_found;
2906 		}
2907 	}
2908 
2909 	cmn_err(CE_NOTE, "!%s: no MII PHY found", dp->name);
2910 	dp->mii_phy_addr = -1;
2911 
2912 	return (GEM_FAILURE);
2913 
2914 PHY_found:
2915 	dp->mii_status = status;
2916 	dp->mii_phy_id  = (gem_mii_read(dp, MII_PHYIDH) << 16) |
2917 	    gem_mii_read(dp, MII_PHYIDL);
2918 
2919 	if (dp->mii_phy_addr < 0) {
2920 		cmn_err(CE_CONT, "!%s: using internal/non-MII PHY(0x%08x)",
2921 		    dp->name, dp->mii_phy_id);
2922 	} else {
2923 		cmn_err(CE_CONT, "!%s: MII PHY (0x%08x) found at %d",
2924 		    dp->name, dp->mii_phy_id, dp->mii_phy_addr);
2925 	}
2926 
2927 	cmn_err(CE_CONT, "!%s: PHY control:%b, status:%b, advert:%b, lpar:%b",
2928 	    dp->name,
2929 	    gem_mii_read(dp, MII_CONTROL), MII_CONTROL_BITS,
2930 	    status, MII_STATUS_BITS,
2931 	    gem_mii_read(dp, MII_AN_ADVERT), MII_ABILITY_BITS,
2932 	    gem_mii_read(dp, MII_AN_LPABLE), MII_ABILITY_BITS);
2933 
2934 	dp->mii_xstatus = 0;
2935 	if (status & MII_STATUS_XSTATUS) {
2936 		dp->mii_xstatus = gem_mii_read(dp, MII_XSTATUS);
2937 
2938 		cmn_err(CE_CONT, "!%s: xstatus:%b",
2939 		    dp->name, dp->mii_xstatus, MII_XSTATUS_BITS);
2940 	}
2941 
2942 	/* check if the phy can advertize pause abilities */
2943 	adv_org = gem_mii_read(dp, MII_AN_ADVERT);
2944 
2945 	gem_mii_write(dp, MII_AN_ADVERT,
2946 	    MII_ABILITY_PAUSE | MII_ABILITY_ASM_DIR);
2947 
2948 	adv = gem_mii_read(dp, MII_AN_ADVERT);
2949 
2950 	if ((adv & MII_ABILITY_PAUSE) == 0) {
2951 		dp->gc.gc_flow_control &= ~1;
2952 	}
2953 
2954 	if ((adv & MII_ABILITY_ASM_DIR) == 0) {
2955 		dp->gc.gc_flow_control &= ~2;
2956 	}
2957 
2958 	gem_mii_write(dp, MII_AN_ADVERT, adv_org);
2959 
2960 	return (GEM_SUCCESS);
2961 }
2962 
2963 static void
2964 gem_mii_start(struct gem_dev *dp)
2965 {
2966 	DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__));
2967 
2968 	/* make a first call of check link */
2969 	dp->mii_state = MII_STATE_UNKNOWN;
2970 	dp->mii_last_check = ddi_get_lbolt();
2971 	dp->linkup_delay = dp->gc.gc_mii_linkdown_timeout;
2972 	(void) gem_mii_link_watcher(dp);
2973 }
2974 
2975 static void
2976 gem_mii_stop(struct gem_dev *dp)
2977 {
2978 	DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__));
2979 
2980 	/* Ensure timer routine stopped */
2981 	mutex_enter(&dp->intrlock);
2982 	if (dp->link_watcher_id) {
2983 		while (untimeout(dp->link_watcher_id) == -1)
2984 			;
2985 		dp->link_watcher_id = 0;
2986 	}
2987 	mutex_exit(&dp->intrlock);
2988 }
2989 
2990 boolean_t
2991 gem_get_mac_addr_conf(struct gem_dev *dp)
2992 {
2993 	char		propname[32];
2994 	char		*valstr;
2995 	uint8_t		mac[ETHERADDRL];
2996 	char		*cp;
2997 	int		c;
2998 	int		i;
2999 	int		j;
3000 	uint8_t		v;
3001 	uint8_t		d;
3002 	uint8_t		ored;
3003 
3004 	DPRINTF(3, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3005 	/*
3006 	 * Get ethernet address from .conf file
3007 	 */
3008 	(void) sprintf(propname, "mac-addr");
3009 	if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, dp->dip,
3010 	    DDI_PROP_DONTPASS, propname, &valstr)) !=
3011 	    DDI_PROP_SUCCESS) {
3012 		return (B_FALSE);
3013 	}
3014 
3015 	if (strlen(valstr) != ETHERADDRL*3-1) {
3016 		goto syntax_err;
3017 	}
3018 
3019 	cp = valstr;
3020 	j  = 0;
3021 	ored = 0;
3022 	for (;;) {
3023 		v = 0;
3024 		for (i = 0; i < 2; i++) {
3025 			c = *cp++;
3026 
3027 			if (c >= 'a' && c <= 'f') {
3028 				d = c - 'a' + 10;
3029 			} else if (c >= 'A' && c <= 'F') {
3030 				d = c - 'A' + 10;
3031 			} else if (c >= '0' && c <= '9') {
3032 				d = c - '0';
3033 			} else {
3034 				goto syntax_err;
3035 			}
3036 			v = (v << 4) | d;
3037 		}
3038 
3039 		mac[j++] = v;
3040 		ored |= v;
3041 		if (j == ETHERADDRL) {
3042 			/* done */
3043 			break;
3044 		}
3045 
3046 		c = *cp++;
3047 		if (c != ':') {
3048 			goto syntax_err;
3049 		}
3050 	}
3051 
3052 	if (ored == 0) {
3053 		goto err;
3054 	}
3055 	for (i = 0; i < ETHERADDRL; i++) {
3056 		dp->dev_addr.ether_addr_octet[i] = mac[i];
3057 	}
3058 	ddi_prop_free(valstr);
3059 	return (B_TRUE);
3060 
3061 syntax_err:
3062 	cmn_err(CE_CONT,
3063 	    "!%s: read mac addr: trying .conf: syntax err %s",
3064 	    dp->name, valstr);
3065 err:
3066 	ddi_prop_free(valstr);
3067 
3068 	return (B_FALSE);
3069 }
3070 
3071 
3072 /* ============================================================== */
3073 /*
3074  * internal start/stop interface
3075  */
3076 /* ============================================================== */
3077 static int
3078 gem_mac_set_rx_filter(struct gem_dev *dp)
3079 {
3080 	return ((*dp->gc.gc_set_rx_filter)(dp));
3081 }
3082 
3083 /*
3084  * gem_mac_init: cold start
3085  */
3086 static int
3087 gem_mac_init(struct gem_dev *dp)
3088 {
3089 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3090 
3091 	if (dp->mac_suspended) {
3092 		return (GEM_FAILURE);
3093 	}
3094 
3095 	dp->mac_active = B_FALSE;
3096 
3097 	gem_init_rx_ring(dp);
3098 	gem_init_tx_ring(dp);
3099 
3100 	/* reset transmitter state */
3101 	dp->tx_blocked = (clock_t)0;
3102 	dp->tx_busy = 0;
3103 	dp->tx_reclaim_busy = 0;
3104 	dp->tx_max_packets = dp->gc.gc_tx_buf_limit;
3105 
3106 	if ((*dp->gc.gc_init_chip)(dp) != GEM_SUCCESS) {
3107 		return (GEM_FAILURE);
3108 	}
3109 
3110 	gem_prepare_rx_buf(dp);
3111 
3112 	return (GEM_SUCCESS);
3113 }
3114 /*
3115  * gem_mac_start: warm start
3116  */
3117 static int
3118 gem_mac_start(struct gem_dev *dp)
3119 {
3120 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3121 
3122 	ASSERT(mutex_owned(&dp->intrlock));
3123 	ASSERT(dp->nic_state == NIC_STATE_ONLINE);
3124 	ASSERT(dp->mii_state ==  MII_STATE_LINKUP);
3125 
3126 	/* enable tx and rx */
3127 	mutex_enter(&dp->xmitlock);
3128 	if (dp->mac_suspended) {
3129 		mutex_exit(&dp->xmitlock);
3130 		return (GEM_FAILURE);
3131 	}
3132 	dp->mac_active = B_TRUE;
3133 	mutex_exit(&dp->xmitlock);
3134 
3135 	/* setup rx buffers */
3136 	(*dp->gc.gc_rx_start)(dp,
3137 	    SLOT(dp->rx_active_head, dp->gc.gc_rx_ring_size),
3138 	    dp->rx_active_tail - dp->rx_active_head);
3139 
3140 	if ((*dp->gc.gc_start_chip)(dp) != GEM_SUCCESS) {
3141 		cmn_err(CE_WARN, "%s: %s: start_chip: failed",
3142 		    dp->name, __func__);
3143 		return (GEM_FAILURE);
3144 	}
3145 
3146 	mutex_enter(&dp->xmitlock);
3147 
3148 	/* load untranmitted packets to the nic */
3149 	ASSERT(dp->tx_softq_tail - dp->tx_softq_head >= 0);
3150 	if (dp->tx_softq_tail - dp->tx_softq_head > 0) {
3151 		gem_tx_load_descs_oo(dp,
3152 		    dp->tx_softq_head, dp->tx_softq_tail,
3153 		    GEM_TXFLAG_HEAD);
3154 		/* issue preloaded tx buffers */
3155 		gem_tx_start_unit(dp);
3156 	}
3157 
3158 	mutex_exit(&dp->xmitlock);
3159 
3160 	return (GEM_SUCCESS);
3161 }
3162 
3163 static int
3164 gem_mac_stop(struct gem_dev *dp, uint_t flags)
3165 {
3166 	int		i;
3167 	int		wait_time; /* in uS */
3168 #ifdef GEM_DEBUG_LEVEL
3169 	clock_t		now;
3170 #endif
3171 	int		ret = GEM_SUCCESS;
3172 
3173 	DPRINTF(1, (CE_CONT, "!%s: %s: called, rx_buf_free:%d",
3174 	    dp->name, __func__, dp->rx_buf_freecnt));
3175 
3176 	ASSERT(mutex_owned(&dp->intrlock));
3177 	ASSERT(!mutex_owned(&dp->xmitlock));
3178 
3179 	/*
3180 	 * Block transmits
3181 	 */
3182 	mutex_enter(&dp->xmitlock);
3183 	if (dp->mac_suspended) {
3184 		mutex_exit(&dp->xmitlock);
3185 		return (GEM_SUCCESS);
3186 	}
3187 	dp->mac_active = B_FALSE;
3188 
3189 	while (dp->tx_busy > 0) {
3190 		cv_wait(&dp->tx_drain_cv, &dp->xmitlock);
3191 	}
3192 	mutex_exit(&dp->xmitlock);
3193 
3194 	if ((flags & GEM_RESTART_NOWAIT) == 0) {
3195 		/*
3196 		 * Wait for all tx buffers sent.
3197 		 */
3198 		wait_time =
3199 		    2 * (8 * MAXPKTBUF(dp) / gem_speed_value[dp->speed]) *
3200 		    (dp->tx_active_tail - dp->tx_active_head);
3201 
3202 		DPRINTF(0, (CE_CONT, "%s: %s: max drain time: %d uS",
3203 		    dp->name, __func__, wait_time));
3204 		i = 0;
3205 #ifdef GEM_DEBUG_LEVEL
3206 		now = ddi_get_lbolt();
3207 #endif
3208 		while (dp->tx_active_tail != dp->tx_active_head) {
3209 			if (i > wait_time) {
3210 				/* timeout */
3211 				cmn_err(CE_NOTE, "%s: %s timeout: tx drain",
3212 				    dp->name, __func__);
3213 				break;
3214 			}
3215 			(void) gem_reclaim_txbuf(dp);
3216 			drv_usecwait(100);
3217 			i += 100;
3218 		}
3219 		DPRINTF(0, (CE_NOTE,
3220 		    "!%s: %s: the nic have drained in %d uS, real %d mS",
3221 		    dp->name, __func__, i,
3222 		    10*((int)(ddi_get_lbolt() - now))));
3223 	}
3224 
3225 	/*
3226 	 * Now we can stop the nic safely.
3227 	 */
3228 	if ((*dp->gc.gc_stop_chip)(dp) != GEM_SUCCESS) {
3229 		cmn_err(CE_NOTE, "%s: %s: resetting the chip to stop it",
3230 		    dp->name, __func__);
3231 		if ((*dp->gc.gc_reset_chip)(dp) != GEM_SUCCESS) {
3232 			cmn_err(CE_WARN, "%s: %s: failed to reset chip",
3233 			    dp->name, __func__);
3234 		}
3235 	}
3236 
3237 	/*
3238 	 * Clear all rx buffers
3239 	 */
3240 	if (flags & GEM_RESTART_KEEP_BUF) {
3241 		(void) gem_receive(dp);
3242 	}
3243 	gem_clean_rx_buf(dp);
3244 
3245 	/*
3246 	 * Update final statistics
3247 	 */
3248 	(*dp->gc.gc_get_stats)(dp);
3249 
3250 	/*
3251 	 * Clear all pended tx packets
3252 	 */
3253 	ASSERT(dp->tx_active_tail == dp->tx_softq_head);
3254 	ASSERT(dp->tx_softq_tail == dp->tx_free_head);
3255 	if (flags & GEM_RESTART_KEEP_BUF) {
3256 		/* restore active tx buffers */
3257 		dp->tx_active_tail = dp->tx_active_head;
3258 		dp->tx_softq_head  = dp->tx_active_head;
3259 	} else {
3260 		gem_clean_tx_buf(dp);
3261 	}
3262 
3263 	return (ret);
3264 }
3265 
3266 static int
3267 gem_add_multicast(struct gem_dev *dp, const uint8_t *ep)
3268 {
3269 	int		cnt;
3270 	int		err;
3271 
3272 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3273 
3274 	mutex_enter(&dp->intrlock);
3275 	if (dp->mac_suspended) {
3276 		mutex_exit(&dp->intrlock);
3277 		return (GEM_FAILURE);
3278 	}
3279 
3280 	if (dp->mc_count_req++ < GEM_MAXMC) {
3281 		/* append the new address at the end of the mclist */
3282 		cnt = dp->mc_count;
3283 		bcopy(ep, dp->mc_list[cnt].addr.ether_addr_octet,
3284 		    ETHERADDRL);
3285 		if (dp->gc.gc_multicast_hash) {
3286 			dp->mc_list[cnt].hash =
3287 			    (*dp->gc.gc_multicast_hash)(dp, (uint8_t *)ep);
3288 		}
3289 		dp->mc_count = cnt + 1;
3290 	}
3291 
3292 	if (dp->mc_count_req != dp->mc_count) {
3293 		/* multicast address list overflow */
3294 		dp->rxmode |= RXMODE_MULTI_OVF;
3295 	} else {
3296 		dp->rxmode &= ~RXMODE_MULTI_OVF;
3297 	}
3298 
3299 	/* tell new multicast list to the hardware */
3300 	err = gem_mac_set_rx_filter(dp);
3301 
3302 	mutex_exit(&dp->intrlock);
3303 
3304 	return (err);
3305 }
3306 
3307 static int
3308 gem_remove_multicast(struct gem_dev *dp, const uint8_t *ep)
3309 {
3310 	size_t		len;
3311 	int		i;
3312 	int		cnt;
3313 	int		err;
3314 
3315 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3316 
3317 	mutex_enter(&dp->intrlock);
3318 	if (dp->mac_suspended) {
3319 		mutex_exit(&dp->intrlock);
3320 		return (GEM_FAILURE);
3321 	}
3322 
3323 	dp->mc_count_req--;
3324 	cnt = dp->mc_count;
3325 	for (i = 0; i < cnt; i++) {
3326 		if (bcmp(ep, &dp->mc_list[i].addr, ETHERADDRL)) {
3327 			continue;
3328 		}
3329 		/* shrink the mclist by copying forward */
3330 		len = (cnt - (i + 1)) * sizeof (*dp->mc_list);
3331 		if (len > 0) {
3332 			bcopy(&dp->mc_list[i+1], &dp->mc_list[i], len);
3333 		}
3334 		dp->mc_count--;
3335 		break;
3336 	}
3337 
3338 	if (dp->mc_count_req != dp->mc_count) {
3339 		/* multicast address list overflow */
3340 		dp->rxmode |= RXMODE_MULTI_OVF;
3341 	} else {
3342 		dp->rxmode &= ~RXMODE_MULTI_OVF;
3343 	}
3344 	/* In gem v2, don't hold xmitlock on calling set_rx_filter */
3345 	err = gem_mac_set_rx_filter(dp);
3346 
3347 	mutex_exit(&dp->intrlock);
3348 
3349 	return (err);
3350 }
3351 
3352 /* ============================================================== */
3353 /*
3354  * ND interface
3355  */
3356 /* ============================================================== */
3357 enum {
3358 	PARAM_AUTONEG_CAP,
3359 	PARAM_PAUSE_CAP,
3360 	PARAM_ASYM_PAUSE_CAP,
3361 	PARAM_1000FDX_CAP,
3362 	PARAM_1000HDX_CAP,
3363 	PARAM_100T4_CAP,
3364 	PARAM_100FDX_CAP,
3365 	PARAM_100HDX_CAP,
3366 	PARAM_10FDX_CAP,
3367 	PARAM_10HDX_CAP,
3368 
3369 	PARAM_ADV_AUTONEG_CAP,
3370 	PARAM_ADV_PAUSE_CAP,
3371 	PARAM_ADV_ASYM_PAUSE_CAP,
3372 	PARAM_ADV_1000FDX_CAP,
3373 	PARAM_ADV_1000HDX_CAP,
3374 	PARAM_ADV_100T4_CAP,
3375 	PARAM_ADV_100FDX_CAP,
3376 	PARAM_ADV_100HDX_CAP,
3377 	PARAM_ADV_10FDX_CAP,
3378 	PARAM_ADV_10HDX_CAP,
3379 
3380 	PARAM_LP_AUTONEG_CAP,
3381 	PARAM_LP_PAUSE_CAP,
3382 	PARAM_LP_ASYM_PAUSE_CAP,
3383 	PARAM_LP_1000FDX_CAP,
3384 	PARAM_LP_1000HDX_CAP,
3385 	PARAM_LP_100T4_CAP,
3386 	PARAM_LP_100FDX_CAP,
3387 	PARAM_LP_100HDX_CAP,
3388 	PARAM_LP_10FDX_CAP,
3389 	PARAM_LP_10HDX_CAP,
3390 
3391 	PARAM_LINK_STATUS,
3392 	PARAM_LINK_SPEED,
3393 	PARAM_LINK_DUPLEX,
3394 
3395 	PARAM_LINK_AUTONEG,
3396 	PARAM_LINK_RX_PAUSE,
3397 	PARAM_LINK_TX_PAUSE,
3398 
3399 	PARAM_LOOP_MODE,
3400 	PARAM_MSI_CNT,
3401 
3402 #ifdef DEBUG_RESUME
3403 	PARAM_RESUME_TEST,
3404 #endif
3405 	PARAM_COUNT
3406 };
3407 
3408 enum ioc_reply {
3409 	IOC_INVAL = -1,				/* bad, NAK with EINVAL	*/
3410 	IOC_DONE,				/* OK, reply sent	*/
3411 	IOC_ACK,				/* OK, just send ACK	*/
3412 	IOC_REPLY,				/* OK, just send reply	*/
3413 	IOC_RESTART_ACK,			/* OK, restart & ACK	*/
3414 	IOC_RESTART_REPLY			/* OK, restart & reply	*/
3415 };
3416 
3417 struct gem_nd_arg {
3418 	struct gem_dev	*dp;
3419 	int		item;
3420 };
3421 
3422 static int
3423 gem_param_get(queue_t *q, mblk_t *mp, caddr_t arg, cred_t *credp)
3424 {
3425 	struct gem_dev	*dp = ((struct gem_nd_arg *)(void *)arg)->dp;
3426 	int		item = ((struct gem_nd_arg *)(void *)arg)->item;
3427 	long		val;
3428 
3429 	DPRINTF(0, (CE_CONT, "!%s: %s: called, item:%d",
3430 	    dp->name, __func__, item));
3431 
3432 	switch (item) {
3433 	case PARAM_AUTONEG_CAP:
3434 		val = BOOLEAN(dp->mii_status & MII_STATUS_CANAUTONEG);
3435 		DPRINTF(0, (CE_CONT, "autoneg_cap:%d", val));
3436 		break;
3437 
3438 	case PARAM_PAUSE_CAP:
3439 		val = BOOLEAN(dp->gc.gc_flow_control & 1);
3440 		break;
3441 
3442 	case PARAM_ASYM_PAUSE_CAP:
3443 		val = BOOLEAN(dp->gc.gc_flow_control & 2);
3444 		break;
3445 
3446 	case PARAM_1000FDX_CAP:
3447 		val = (dp->mii_xstatus & MII_XSTATUS_1000BASET_FD) ||
3448 		    (dp->mii_xstatus & MII_XSTATUS_1000BASEX_FD);
3449 		break;
3450 
3451 	case PARAM_1000HDX_CAP:
3452 		val = (dp->mii_xstatus & MII_XSTATUS_1000BASET) ||
3453 		    (dp->mii_xstatus & MII_XSTATUS_1000BASEX);
3454 		break;
3455 
3456 	case PARAM_100T4_CAP:
3457 		val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASE_T4);
3458 		break;
3459 
3460 	case PARAM_100FDX_CAP:
3461 		val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX_FD);
3462 		break;
3463 
3464 	case PARAM_100HDX_CAP:
3465 		val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX);
3466 		break;
3467 
3468 	case PARAM_10FDX_CAP:
3469 		val = BOOLEAN(dp->mii_status & MII_STATUS_10_FD);
3470 		break;
3471 
3472 	case PARAM_10HDX_CAP:
3473 		val = BOOLEAN(dp->mii_status & MII_STATUS_10);
3474 		break;
3475 
3476 	case PARAM_ADV_AUTONEG_CAP:
3477 		val = dp->anadv_autoneg;
3478 		break;
3479 
3480 	case PARAM_ADV_PAUSE_CAP:
3481 		val = BOOLEAN(dp->anadv_flow_control & 1);
3482 		break;
3483 
3484 	case PARAM_ADV_ASYM_PAUSE_CAP:
3485 		val = BOOLEAN(dp->anadv_flow_control & 2);
3486 		break;
3487 
3488 	case PARAM_ADV_1000FDX_CAP:
3489 		val = dp->anadv_1000fdx;
3490 		break;
3491 
3492 	case PARAM_ADV_1000HDX_CAP:
3493 		val = dp->anadv_1000hdx;
3494 		break;
3495 
3496 	case PARAM_ADV_100T4_CAP:
3497 		val = dp->anadv_100t4;
3498 		break;
3499 
3500 	case PARAM_ADV_100FDX_CAP:
3501 		val = dp->anadv_100fdx;
3502 		break;
3503 
3504 	case PARAM_ADV_100HDX_CAP:
3505 		val = dp->anadv_100hdx;
3506 		break;
3507 
3508 	case PARAM_ADV_10FDX_CAP:
3509 		val = dp->anadv_10fdx;
3510 		break;
3511 
3512 	case PARAM_ADV_10HDX_CAP:
3513 		val = dp->anadv_10hdx;
3514 		break;
3515 
3516 	case PARAM_LP_AUTONEG_CAP:
3517 		val = BOOLEAN(dp->mii_exp & MII_AN_EXP_LPCANAN);
3518 		break;
3519 
3520 	case PARAM_LP_PAUSE_CAP:
3521 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_PAUSE);
3522 		break;
3523 
3524 	case PARAM_LP_ASYM_PAUSE_CAP:
3525 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_ASM_DIR);
3526 		break;
3527 
3528 	case PARAM_LP_1000FDX_CAP:
3529 		val = BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_FULL);
3530 		break;
3531 
3532 	case PARAM_LP_1000HDX_CAP:
3533 		val = BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_HALF);
3534 		break;
3535 
3536 	case PARAM_LP_100T4_CAP:
3537 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_T4);
3538 		break;
3539 
3540 	case PARAM_LP_100FDX_CAP:
3541 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX_FD);
3542 		break;
3543 
3544 	case PARAM_LP_100HDX_CAP:
3545 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX);
3546 		break;
3547 
3548 	case PARAM_LP_10FDX_CAP:
3549 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T_FD);
3550 		break;
3551 
3552 	case PARAM_LP_10HDX_CAP:
3553 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T);
3554 		break;
3555 
3556 	case PARAM_LINK_STATUS:
3557 		val = (dp->mii_state == MII_STATE_LINKUP);
3558 		break;
3559 
3560 	case PARAM_LINK_SPEED:
3561 		val = gem_speed_value[dp->speed];
3562 		break;
3563 
3564 	case PARAM_LINK_DUPLEX:
3565 		val = 0;
3566 		if (dp->mii_state == MII_STATE_LINKUP) {
3567 			val = dp->full_duplex ? 2 : 1;
3568 		}
3569 		break;
3570 
3571 	case PARAM_LINK_AUTONEG:
3572 		val = BOOLEAN(dp->mii_exp & MII_AN_EXP_LPCANAN);
3573 		break;
3574 
3575 	case PARAM_LINK_RX_PAUSE:
3576 		val = (dp->flow_control == FLOW_CONTROL_SYMMETRIC) ||
3577 		    (dp->flow_control == FLOW_CONTROL_RX_PAUSE);
3578 		break;
3579 
3580 	case PARAM_LINK_TX_PAUSE:
3581 		val = (dp->flow_control == FLOW_CONTROL_SYMMETRIC) ||
3582 		    (dp->flow_control == FLOW_CONTROL_TX_PAUSE);
3583 		break;
3584 
3585 #ifdef DEBUG_RESUME
3586 	case PARAM_RESUME_TEST:
3587 		val = 0;
3588 		break;
3589 #endif
3590 	default:
3591 		cmn_err(CE_WARN, "%s: unimplemented ndd control (%d)",
3592 		    dp->name, item);
3593 		break;
3594 	}
3595 
3596 	(void) mi_mpprintf(mp, "%ld", val);
3597 
3598 	return (0);
3599 }
3600 
3601 static int
3602 gem_param_set(queue_t *q, mblk_t *mp, char *value, caddr_t arg, cred_t *credp)
3603 {
3604 	struct gem_dev	*dp = ((struct gem_nd_arg *)(void *)arg)->dp;
3605 	int		item = ((struct gem_nd_arg *)(void *)arg)->item;
3606 	long		val;
3607 	char		*end;
3608 
3609 	DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3610 	if (ddi_strtol(value, &end, 10, &val)) {
3611 		return (EINVAL);
3612 	}
3613 	if (end == value) {
3614 		return (EINVAL);
3615 	}
3616 
3617 	switch (item) {
3618 	case PARAM_ADV_AUTONEG_CAP:
3619 		if (val != 0 && val != 1) {
3620 			goto err;
3621 		}
3622 		if (val && (dp->mii_status & MII_STATUS_CANAUTONEG) == 0) {
3623 			goto err;
3624 		}
3625 		dp->anadv_autoneg = (int)val;
3626 		break;
3627 
3628 	case PARAM_ADV_PAUSE_CAP:
3629 		if (val != 0 && val != 1) {
3630 			goto err;
3631 		}
3632 		if (val) {
3633 			dp->anadv_flow_control |= 1;
3634 		} else {
3635 			dp->anadv_flow_control &= ~1;
3636 		}
3637 		break;
3638 
3639 	case PARAM_ADV_ASYM_PAUSE_CAP:
3640 		if (val != 0 && val != 1) {
3641 			goto err;
3642 		}
3643 		if (val) {
3644 			dp->anadv_flow_control |= 2;
3645 		} else {
3646 			dp->anadv_flow_control &= ~2;
3647 		}
3648 		break;
3649 
3650 	case PARAM_ADV_1000FDX_CAP:
3651 		if (val != 0 && val != 1) {
3652 			goto err;
3653 		}
3654 		if (val && (dp->mii_xstatus &
3655 		    (MII_XSTATUS_1000BASET_FD |
3656 		    MII_XSTATUS_1000BASEX_FD)) == 0) {
3657 			goto err;
3658 		}
3659 		dp->anadv_1000fdx = (int)val;
3660 		break;
3661 
3662 	case PARAM_ADV_1000HDX_CAP:
3663 		if (val != 0 && val != 1) {
3664 			goto err;
3665 		}
3666 		if (val && (dp->mii_xstatus &
3667 		    (MII_XSTATUS_1000BASET | MII_XSTATUS_1000BASEX)) == 0) {
3668 			goto err;
3669 		}
3670 		dp->anadv_1000hdx = (int)val;
3671 		break;
3672 
3673 	case PARAM_ADV_100T4_CAP:
3674 		if (val != 0 && val != 1) {
3675 			goto err;
3676 		}
3677 		if (val && (dp->mii_status & MII_STATUS_100_BASE_T4) == 0) {
3678 			goto err;
3679 		}
3680 		dp->anadv_100t4 = (int)val;
3681 		break;
3682 
3683 	case PARAM_ADV_100FDX_CAP:
3684 		if (val != 0 && val != 1) {
3685 			goto err;
3686 		}
3687 		if (val && (dp->mii_status & MII_STATUS_100_BASEX_FD) == 0) {
3688 			goto err;
3689 		}
3690 		dp->anadv_100fdx = (int)val;
3691 		break;
3692 
3693 	case PARAM_ADV_100HDX_CAP:
3694 		if (val != 0 && val != 1) {
3695 			goto err;
3696 		}
3697 		if (val && (dp->mii_status & MII_STATUS_100_BASEX) == 0) {
3698 			goto err;
3699 		}
3700 		dp->anadv_100hdx = (int)val;
3701 		break;
3702 
3703 	case PARAM_ADV_10FDX_CAP:
3704 		if (val != 0 && val != 1) {
3705 			goto err;
3706 		}
3707 		if (val && (dp->mii_status & MII_STATUS_10_FD) == 0) {
3708 			goto err;
3709 		}
3710 		dp->anadv_10fdx = (int)val;
3711 		break;
3712 
3713 	case PARAM_ADV_10HDX_CAP:
3714 		if (val != 0 && val != 1) {
3715 			goto err;
3716 		}
3717 		if (val && (dp->mii_status & MII_STATUS_10) == 0) {
3718 			goto err;
3719 		}
3720 		dp->anadv_10hdx = (int)val;
3721 		break;
3722 	}
3723 
3724 	/* sync with PHY */
3725 	gem_choose_forcedmode(dp);
3726 
3727 	dp->mii_state = MII_STATE_UNKNOWN;
3728 	if (dp->gc.gc_mii_hw_link_detection && dp->link_watcher_id == 0) {
3729 		/* XXX - Can we ignore the return code ? */
3730 		(void) gem_mii_link_check(dp);
3731 	}
3732 
3733 	return (0);
3734 err:
3735 	return (EINVAL);
3736 }
3737 
3738 static void
3739 gem_nd_load(struct gem_dev *dp, char *name, ndgetf_t gf, ndsetf_t sf, int item)
3740 {
3741 	struct gem_nd_arg	*arg;
3742 
3743 	ASSERT(item >= 0);
3744 	ASSERT(item < PARAM_COUNT);
3745 
3746 	arg = &((struct gem_nd_arg *)(void *)dp->nd_arg_p)[item];
3747 	arg->dp = dp;
3748 	arg->item = item;
3749 
3750 	DPRINTF(2, (CE_CONT, "!%s: %s: name:%s, item:%d",
3751 	    dp->name, __func__, name, item));
3752 	(void) nd_load(&dp->nd_data_p, name, gf, sf, (caddr_t)arg);
3753 }
3754 
3755 static void
3756 gem_nd_setup(struct gem_dev *dp)
3757 {
3758 	DPRINTF(0, (CE_CONT, "!%s: %s: called, mii_status:0x%b",
3759 	    dp->name, __func__, dp->mii_status, MII_STATUS_BITS));
3760 
3761 	ASSERT(dp->nd_arg_p == NULL);
3762 
3763 	dp->nd_arg_p =
3764 	    kmem_zalloc(sizeof (struct gem_nd_arg) * PARAM_COUNT, KM_SLEEP);
3765 
3766 #define	SETFUNC(x)	((x) ? gem_param_set : NULL)
3767 
3768 	gem_nd_load(dp, "autoneg_cap",
3769 	    gem_param_get, NULL, PARAM_AUTONEG_CAP);
3770 	gem_nd_load(dp, "pause_cap",
3771 	    gem_param_get, NULL, PARAM_PAUSE_CAP);
3772 	gem_nd_load(dp, "asym_pause_cap",
3773 	    gem_param_get, NULL, PARAM_ASYM_PAUSE_CAP);
3774 	gem_nd_load(dp, "1000fdx_cap",
3775 	    gem_param_get, NULL, PARAM_1000FDX_CAP);
3776 	gem_nd_load(dp, "1000hdx_cap",
3777 	    gem_param_get, NULL, PARAM_1000HDX_CAP);
3778 	gem_nd_load(dp, "100T4_cap",
3779 	    gem_param_get, NULL, PARAM_100T4_CAP);
3780 	gem_nd_load(dp, "100fdx_cap",
3781 	    gem_param_get, NULL, PARAM_100FDX_CAP);
3782 	gem_nd_load(dp, "100hdx_cap",
3783 	    gem_param_get, NULL, PARAM_100HDX_CAP);
3784 	gem_nd_load(dp, "10fdx_cap",
3785 	    gem_param_get, NULL, PARAM_10FDX_CAP);
3786 	gem_nd_load(dp, "10hdx_cap",
3787 	    gem_param_get, NULL, PARAM_10HDX_CAP);
3788 
3789 	/* Our advertised capabilities */
3790 	gem_nd_load(dp, "adv_autoneg_cap", gem_param_get,
3791 	    SETFUNC(dp->mii_status & MII_STATUS_CANAUTONEG),
3792 	    PARAM_ADV_AUTONEG_CAP);
3793 	gem_nd_load(dp, "adv_pause_cap", gem_param_get,
3794 	    SETFUNC(dp->gc.gc_flow_control & 1),
3795 	    PARAM_ADV_PAUSE_CAP);
3796 	gem_nd_load(dp, "adv_asym_pause_cap", gem_param_get,
3797 	    SETFUNC(dp->gc.gc_flow_control & 2),
3798 	    PARAM_ADV_ASYM_PAUSE_CAP);
3799 	gem_nd_load(dp, "adv_1000fdx_cap", gem_param_get,
3800 	    SETFUNC(dp->mii_xstatus &
3801 	    (MII_XSTATUS_1000BASEX_FD | MII_XSTATUS_1000BASET_FD)),
3802 	    PARAM_ADV_1000FDX_CAP);
3803 	gem_nd_load(dp, "adv_1000hdx_cap", gem_param_get,
3804 	    SETFUNC(dp->mii_xstatus &
3805 	    (MII_XSTATUS_1000BASEX | MII_XSTATUS_1000BASET)),
3806 	    PARAM_ADV_1000HDX_CAP);
3807 	gem_nd_load(dp, "adv_100T4_cap", gem_param_get,
3808 	    SETFUNC((dp->mii_status & MII_STATUS_100_BASE_T4) &&
3809 	    !dp->mii_advert_ro),
3810 	    PARAM_ADV_100T4_CAP);
3811 	gem_nd_load(dp, "adv_100fdx_cap", gem_param_get,
3812 	    SETFUNC((dp->mii_status & MII_STATUS_100_BASEX_FD) &&
3813 	    !dp->mii_advert_ro),
3814 	    PARAM_ADV_100FDX_CAP);
3815 	gem_nd_load(dp, "adv_100hdx_cap", gem_param_get,
3816 	    SETFUNC((dp->mii_status & MII_STATUS_100_BASEX) &&
3817 	    !dp->mii_advert_ro),
3818 	    PARAM_ADV_100HDX_CAP);
3819 	gem_nd_load(dp, "adv_10fdx_cap", gem_param_get,
3820 	    SETFUNC((dp->mii_status & MII_STATUS_10_FD) &&
3821 	    !dp->mii_advert_ro),
3822 	    PARAM_ADV_10FDX_CAP);
3823 	gem_nd_load(dp, "adv_10hdx_cap", gem_param_get,
3824 	    SETFUNC((dp->mii_status & MII_STATUS_10) &&
3825 	    !dp->mii_advert_ro),
3826 	    PARAM_ADV_10HDX_CAP);
3827 
3828 	/* Partner's advertised capabilities */
3829 	gem_nd_load(dp, "lp_autoneg_cap",
3830 	    gem_param_get, NULL, PARAM_LP_AUTONEG_CAP);
3831 	gem_nd_load(dp, "lp_pause_cap",
3832 	    gem_param_get, NULL, PARAM_LP_PAUSE_CAP);
3833 	gem_nd_load(dp, "lp_asym_pause_cap",
3834 	    gem_param_get, NULL, PARAM_LP_ASYM_PAUSE_CAP);
3835 	gem_nd_load(dp, "lp_1000fdx_cap",
3836 	    gem_param_get, NULL, PARAM_LP_1000FDX_CAP);
3837 	gem_nd_load(dp, "lp_1000hdx_cap",
3838 	    gem_param_get, NULL, PARAM_LP_1000HDX_CAP);
3839 	gem_nd_load(dp, "lp_100T4_cap",
3840 	    gem_param_get, NULL, PARAM_LP_100T4_CAP);
3841 	gem_nd_load(dp, "lp_100fdx_cap",
3842 	    gem_param_get, NULL, PARAM_LP_100FDX_CAP);
3843 	gem_nd_load(dp, "lp_100hdx_cap",
3844 	    gem_param_get, NULL, PARAM_LP_100HDX_CAP);
3845 	gem_nd_load(dp, "lp_10fdx_cap",
3846 	    gem_param_get, NULL, PARAM_LP_10FDX_CAP);
3847 	gem_nd_load(dp, "lp_10hdx_cap",
3848 	    gem_param_get, NULL, PARAM_LP_10HDX_CAP);
3849 
3850 	/* Current operating modes */
3851 	gem_nd_load(dp, "link_status",
3852 	    gem_param_get, NULL, PARAM_LINK_STATUS);
3853 	gem_nd_load(dp, "link_speed",
3854 	    gem_param_get, NULL, PARAM_LINK_SPEED);
3855 	gem_nd_load(dp, "link_duplex",
3856 	    gem_param_get, NULL, PARAM_LINK_DUPLEX);
3857 	gem_nd_load(dp, "link_autoneg",
3858 	    gem_param_get, NULL, PARAM_LINK_AUTONEG);
3859 	gem_nd_load(dp, "link_rx_pause",
3860 	    gem_param_get, NULL, PARAM_LINK_RX_PAUSE);
3861 	gem_nd_load(dp, "link_tx_pause",
3862 	    gem_param_get, NULL, PARAM_LINK_TX_PAUSE);
3863 #ifdef DEBUG_RESUME
3864 	gem_nd_load(dp, "resume_test",
3865 	    gem_param_get, NULL, PARAM_RESUME_TEST);
3866 #endif
3867 #undef	SETFUNC
3868 }
3869 
3870 static
3871 enum ioc_reply
3872 gem_nd_ioctl(struct gem_dev *dp, queue_t *wq, mblk_t *mp, struct iocblk *iocp)
3873 {
3874 	boolean_t	ok;
3875 
3876 	ASSERT(mutex_owned(&dp->intrlock));
3877 
3878 	DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3879 
3880 	switch (iocp->ioc_cmd) {
3881 	case ND_GET:
3882 		ok = nd_getset(wq, dp->nd_data_p, mp);
3883 		DPRINTF(0, (CE_CONT,
3884 		    "%s: get %s", dp->name, ok ? "OK" : "FAIL"));
3885 		return (ok ? IOC_REPLY : IOC_INVAL);
3886 
3887 	case ND_SET:
3888 		ok = nd_getset(wq, dp->nd_data_p, mp);
3889 
3890 		DPRINTF(0, (CE_CONT, "%s: set %s err %d",
3891 		    dp->name, ok ? "OK" : "FAIL", iocp->ioc_error));
3892 
3893 		if (!ok) {
3894 			return (IOC_INVAL);
3895 		}
3896 
3897 		if (iocp->ioc_error) {
3898 			return (IOC_REPLY);
3899 		}
3900 
3901 		return (IOC_RESTART_REPLY);
3902 	}
3903 
3904 	cmn_err(CE_WARN, "%s: invalid cmd 0x%x", dp->name, iocp->ioc_cmd);
3905 
3906 	return (IOC_INVAL);
3907 }
3908 
3909 static void
3910 gem_nd_cleanup(struct gem_dev *dp)
3911 {
3912 	ASSERT(dp->nd_data_p != NULL);
3913 	ASSERT(dp->nd_arg_p != NULL);
3914 
3915 	nd_free(&dp->nd_data_p);
3916 
3917 	kmem_free(dp->nd_arg_p, sizeof (struct gem_nd_arg) * PARAM_COUNT);
3918 	dp->nd_arg_p = NULL;
3919 }
3920 
3921 static void
3922 gem_mac_ioctl(struct gem_dev *dp, queue_t *wq, mblk_t *mp)
3923 {
3924 	struct iocblk	*iocp;
3925 	enum ioc_reply	status;
3926 	int		cmd;
3927 
3928 	DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
3929 
3930 	/*
3931 	 * Validate the command before bothering with the mutex ...
3932 	 */
3933 	iocp = (void *)mp->b_rptr;
3934 	iocp->ioc_error = 0;
3935 	cmd = iocp->ioc_cmd;
3936 
3937 	DPRINTF(0, (CE_CONT, "%s: %s cmd:0x%x", dp->name, __func__, cmd));
3938 
3939 	mutex_enter(&dp->intrlock);
3940 	mutex_enter(&dp->xmitlock);
3941 
3942 	switch (cmd) {
3943 	default:
3944 		_NOTE(NOTREACHED)
3945 		status = IOC_INVAL;
3946 		break;
3947 
3948 	case ND_GET:
3949 	case ND_SET:
3950 		status = gem_nd_ioctl(dp, wq, mp, iocp);
3951 		break;
3952 	}
3953 
3954 	mutex_exit(&dp->xmitlock);
3955 	mutex_exit(&dp->intrlock);
3956 
3957 #ifdef DEBUG_RESUME
3958 	if (cmd == ND_GET)  {
3959 		gem_suspend(dp->dip);
3960 		gem_resume(dp->dip);
3961 	}
3962 #endif
3963 	/*
3964 	 * Finally, decide how to reply
3965 	 */
3966 	switch (status) {
3967 	default:
3968 	case IOC_INVAL:
3969 		/*
3970 		 * Error, reply with a NAK and EINVAL or the specified error
3971 		 */
3972 		miocnak(wq, mp, 0, iocp->ioc_error == 0 ?
3973 		    EINVAL : iocp->ioc_error);
3974 		break;
3975 
3976 	case IOC_DONE:
3977 		/*
3978 		 * OK, reply already sent
3979 		 */
3980 		break;
3981 
3982 	case IOC_RESTART_ACK:
3983 	case IOC_ACK:
3984 		/*
3985 		 * OK, reply with an ACK
3986 		 */
3987 		miocack(wq, mp, 0, 0);
3988 		break;
3989 
3990 	case IOC_RESTART_REPLY:
3991 	case IOC_REPLY:
3992 		/*
3993 		 * OK, send prepared reply as ACK or NAK
3994 		 */
3995 		mp->b_datap->db_type =
3996 		    iocp->ioc_error == 0 ? M_IOCACK : M_IOCNAK;
3997 		qreply(wq, mp);
3998 		break;
3999 	}
4000 }
4001 
4002 #ifndef SYS_MAC_H
4003 #define	XCVR_UNDEFINED	0
4004 #define	XCVR_NONE	1
4005 #define	XCVR_10		2
4006 #define	XCVR_100T4	3
4007 #define	XCVR_100X	4
4008 #define	XCVR_100T2	5
4009 #define	XCVR_1000X	6
4010 #define	XCVR_1000T	7
4011 #endif
4012 static int
4013 gem_mac_xcvr_inuse(struct gem_dev *dp)
4014 {
4015 	int	val = XCVR_UNDEFINED;
4016 
4017 	if ((dp->mii_status & MII_STATUS_XSTATUS) == 0) {
4018 		if (dp->mii_status & MII_STATUS_100_BASE_T4) {
4019 			val = XCVR_100T4;
4020 		} else if (dp->mii_status &
4021 		    (MII_STATUS_100_BASEX_FD |
4022 		    MII_STATUS_100_BASEX)) {
4023 			val = XCVR_100X;
4024 		} else if (dp->mii_status &
4025 		    (MII_STATUS_100_BASE_T2_FD |
4026 		    MII_STATUS_100_BASE_T2)) {
4027 			val = XCVR_100T2;
4028 		} else if (dp->mii_status &
4029 		    (MII_STATUS_10_FD | MII_STATUS_10)) {
4030 			val = XCVR_10;
4031 		}
4032 	} else if (dp->mii_xstatus &
4033 	    (MII_XSTATUS_1000BASET_FD | MII_XSTATUS_1000BASET)) {
4034 		val = XCVR_1000T;
4035 	} else if (dp->mii_xstatus &
4036 	    (MII_XSTATUS_1000BASEX_FD | MII_XSTATUS_1000BASEX)) {
4037 		val = XCVR_1000X;
4038 	}
4039 
4040 	return (val);
4041 }
4042 
4043 /* ============================================================== */
4044 /*
4045  * GLDv3 interface
4046  */
4047 /* ============================================================== */
4048 static int		gem_m_getstat(void *, uint_t, uint64_t *);
4049 static int		gem_m_start(void *);
4050 static void		gem_m_stop(void *);
4051 static int		gem_m_setpromisc(void *, boolean_t);
4052 static int		gem_m_multicst(void *, boolean_t, const uint8_t *);
4053 static int		gem_m_unicst(void *, const uint8_t *);
4054 static mblk_t		*gem_m_tx(void *, mblk_t *);
4055 static void		gem_m_resources(void *);
4056 static void		gem_m_ioctl(void *, queue_t *, mblk_t *);
4057 static boolean_t	gem_m_getcapab(void *, mac_capab_t, void *);
4058 
4059 #define	GEM_M_CALLBACK_FLAGS	(MC_RESOURCES | MC_IOCTL | MC_GETCAPAB)
4060 
4061 static mac_callbacks_t gem_m_callbacks = {
4062 	GEM_M_CALLBACK_FLAGS,
4063 	gem_m_getstat,
4064 	gem_m_start,
4065 	gem_m_stop,
4066 	gem_m_setpromisc,
4067 	gem_m_multicst,
4068 	gem_m_unicst,
4069 	gem_m_tx,
4070 	gem_m_resources,
4071 	gem_m_ioctl,
4072 	gem_m_getcapab,
4073 };
4074 
4075 static int
4076 gem_m_start(void *arg)
4077 {
4078 	int		err = 0;
4079 	struct gem_dev *dp = arg;
4080 
4081 	DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4082 
4083 	mutex_enter(&dp->intrlock);
4084 	if (dp->mac_suspended) {
4085 		err = EIO;
4086 		goto x;
4087 	}
4088 	if (gem_mac_init(dp) != GEM_SUCCESS) {
4089 		err = EIO;
4090 		goto x;
4091 	}
4092 	dp->nic_state = NIC_STATE_INITIALIZED;
4093 
4094 	/* reset rx filter state */
4095 	dp->mc_count = 0;
4096 	dp->mc_count_req = 0;
4097 
4098 	/* setup media mode if the link have been up */
4099 	if (dp->mii_state == MII_STATE_LINKUP) {
4100 		(dp->gc.gc_set_media)(dp);
4101 	}
4102 
4103 	/* setup initial rx filter */
4104 	bcopy(dp->dev_addr.ether_addr_octet,
4105 	    dp->cur_addr.ether_addr_octet, ETHERADDRL);
4106 	dp->rxmode |= RXMODE_ENABLE;
4107 
4108 	if (gem_mac_set_rx_filter(dp) != GEM_SUCCESS) {
4109 		err = EIO;
4110 		goto x;
4111 	}
4112 
4113 	dp->nic_state = NIC_STATE_ONLINE;
4114 	if (dp->mii_state == MII_STATE_LINKUP) {
4115 		if (gem_mac_start(dp) != GEM_SUCCESS) {
4116 			err = EIO;
4117 			goto x;
4118 		}
4119 	}
4120 
4121 	dp->timeout_id = timeout((void (*)(void *))gem_tx_timeout,
4122 	    (void *)dp, dp->gc.gc_tx_timeout_interval);
4123 	mutex_exit(&dp->intrlock);
4124 
4125 	return (0);
4126 x:
4127 	dp->nic_state = NIC_STATE_STOPPED;
4128 	mutex_exit(&dp->intrlock);
4129 	return (err);
4130 }
4131 
4132 static void
4133 gem_m_stop(void *arg)
4134 {
4135 	struct gem_dev	*dp = arg;
4136 
4137 	DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4138 
4139 	/* stop rx */
4140 	mutex_enter(&dp->intrlock);
4141 	if (dp->mac_suspended) {
4142 		mutex_exit(&dp->intrlock);
4143 		return;
4144 	}
4145 	dp->rxmode &= ~RXMODE_ENABLE;
4146 	(void) gem_mac_set_rx_filter(dp);
4147 	mutex_exit(&dp->intrlock);
4148 
4149 	/* stop tx timeout watcher */
4150 	if (dp->timeout_id) {
4151 		while (untimeout(dp->timeout_id) == -1)
4152 			;
4153 		dp->timeout_id = 0;
4154 	}
4155 
4156 	/* make the nic state inactive */
4157 	mutex_enter(&dp->intrlock);
4158 	if (dp->mac_suspended) {
4159 		mutex_exit(&dp->intrlock);
4160 		return;
4161 	}
4162 	dp->nic_state = NIC_STATE_STOPPED;
4163 
4164 	/* we need deassert mac_active due to block interrupt handler */
4165 	mutex_enter(&dp->xmitlock);
4166 	dp->mac_active = B_FALSE;
4167 	mutex_exit(&dp->xmitlock);
4168 
4169 	/* block interrupts */
4170 	while (dp->intr_busy) {
4171 		cv_wait(&dp->tx_drain_cv, &dp->intrlock);
4172 	}
4173 	(void) gem_mac_stop(dp, 0);
4174 	mutex_exit(&dp->intrlock);
4175 }
4176 
4177 static int
4178 gem_m_multicst(void *arg, boolean_t add, const uint8_t *ep)
4179 {
4180 	int		err;
4181 	int		ret;
4182 	struct gem_dev	*dp = arg;
4183 
4184 	DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4185 
4186 	if (add) {
4187 		ret = gem_add_multicast(dp, ep);
4188 	} else {
4189 		ret = gem_remove_multicast(dp, ep);
4190 	}
4191 
4192 	err = 0;
4193 	if (ret != GEM_SUCCESS) {
4194 		err = EIO;
4195 	}
4196 
4197 	return (err);
4198 }
4199 
4200 static int
4201 gem_m_setpromisc(void *arg, boolean_t on)
4202 {
4203 	int		err = 0;	/* no error */
4204 	struct gem_dev	*dp = arg;
4205 
4206 	DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4207 
4208 	mutex_enter(&dp->intrlock);
4209 	if (dp->mac_suspended) {
4210 		mutex_exit(&dp->intrlock);
4211 		return (EIO);
4212 	}
4213 	if (on) {
4214 		dp->rxmode |= RXMODE_PROMISC;
4215 	} else {
4216 		dp->rxmode &= ~RXMODE_PROMISC;
4217 	}
4218 
4219 	if (gem_mac_set_rx_filter(dp) != GEM_SUCCESS) {
4220 		err = EIO;
4221 	}
4222 	mutex_exit(&dp->intrlock);
4223 
4224 	return (err);
4225 }
4226 
4227 int
4228 gem_m_getstat(void *arg, uint_t stat, uint64_t *valp)
4229 {
4230 	struct gem_dev		*dp = arg;
4231 	struct gem_stats	*gstp = &dp->stats;
4232 	uint64_t		val = 0;
4233 
4234 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4235 
4236 	if (mutex_owned(&dp->intrlock)) {
4237 		if (dp->mac_suspended) {
4238 			return (EIO);
4239 		}
4240 	} else {
4241 		mutex_enter(&dp->intrlock);
4242 		if (dp->mac_suspended) {
4243 			mutex_exit(&dp->intrlock);
4244 			return (EIO);
4245 		}
4246 		mutex_exit(&dp->intrlock);
4247 	}
4248 
4249 	if ((*dp->gc.gc_get_stats)(dp) != GEM_SUCCESS) {
4250 		return (EIO);
4251 	}
4252 
4253 	switch (stat) {
4254 	case MAC_STAT_IFSPEED:
4255 		val = gem_speed_value[dp->speed] *1000000ull;
4256 		break;
4257 
4258 	case MAC_STAT_MULTIRCV:
4259 		val = gstp->rmcast;
4260 		break;
4261 
4262 	case MAC_STAT_BRDCSTRCV:
4263 		val = gstp->rbcast;
4264 		break;
4265 
4266 	case MAC_STAT_MULTIXMT:
4267 		val = gstp->omcast;
4268 		break;
4269 
4270 	case MAC_STAT_BRDCSTXMT:
4271 		val = gstp->obcast;
4272 		break;
4273 
4274 	case MAC_STAT_NORCVBUF:
4275 		val = gstp->norcvbuf + gstp->missed;
4276 		break;
4277 
4278 	case MAC_STAT_IERRORS:
4279 		val = gstp->errrcv;
4280 		break;
4281 
4282 	case MAC_STAT_NOXMTBUF:
4283 		val = gstp->noxmtbuf;
4284 		break;
4285 
4286 	case MAC_STAT_OERRORS:
4287 		val = gstp->errxmt;
4288 		break;
4289 
4290 	case MAC_STAT_COLLISIONS:
4291 		val = gstp->collisions;
4292 		break;
4293 
4294 	case MAC_STAT_RBYTES:
4295 		val = gstp->rbytes;
4296 		break;
4297 
4298 	case MAC_STAT_IPACKETS:
4299 		val = gstp->rpackets;
4300 		break;
4301 
4302 	case MAC_STAT_OBYTES:
4303 		val = gstp->obytes;
4304 		break;
4305 
4306 	case MAC_STAT_OPACKETS:
4307 		val = gstp->opackets;
4308 		break;
4309 
4310 	case MAC_STAT_UNDERFLOWS:
4311 		val = gstp->underflow;
4312 		break;
4313 
4314 	case MAC_STAT_OVERFLOWS:
4315 		val = gstp->overflow;
4316 		break;
4317 
4318 	case ETHER_STAT_ALIGN_ERRORS:
4319 		val = gstp->frame;
4320 		break;
4321 
4322 	case ETHER_STAT_FCS_ERRORS:
4323 		val = gstp->crc;
4324 		break;
4325 
4326 	case ETHER_STAT_FIRST_COLLISIONS:
4327 		val = gstp->first_coll;
4328 		break;
4329 
4330 	case ETHER_STAT_MULTI_COLLISIONS:
4331 		val = gstp->multi_coll;
4332 		break;
4333 
4334 	case ETHER_STAT_SQE_ERRORS:
4335 		val = gstp->sqe;
4336 		break;
4337 
4338 	case ETHER_STAT_DEFER_XMTS:
4339 		val = gstp->defer;
4340 		break;
4341 
4342 	case ETHER_STAT_TX_LATE_COLLISIONS:
4343 		val = gstp->xmtlatecoll;
4344 		break;
4345 
4346 	case ETHER_STAT_EX_COLLISIONS:
4347 		val = gstp->excoll;
4348 		break;
4349 
4350 	case ETHER_STAT_MACXMT_ERRORS:
4351 		val = gstp->xmit_internal_err;
4352 		break;
4353 
4354 	case ETHER_STAT_CARRIER_ERRORS:
4355 		val = gstp->nocarrier;
4356 		break;
4357 
4358 	case ETHER_STAT_TOOLONG_ERRORS:
4359 		val = gstp->frame_too_long;
4360 		break;
4361 
4362 	case ETHER_STAT_MACRCV_ERRORS:
4363 		val = gstp->rcv_internal_err;
4364 		break;
4365 
4366 	case ETHER_STAT_XCVR_ADDR:
4367 		val = dp->mii_phy_addr;
4368 		break;
4369 
4370 	case ETHER_STAT_XCVR_ID:
4371 		val = dp->mii_phy_id;
4372 		break;
4373 
4374 	case ETHER_STAT_XCVR_INUSE:
4375 		val = gem_mac_xcvr_inuse(dp);
4376 		break;
4377 
4378 	case ETHER_STAT_CAP_1000FDX:
4379 		val = (dp->mii_xstatus & MII_XSTATUS_1000BASET_FD) ||
4380 		    (dp->mii_xstatus & MII_XSTATUS_1000BASEX_FD);
4381 		break;
4382 
4383 	case ETHER_STAT_CAP_1000HDX:
4384 		val = (dp->mii_xstatus & MII_XSTATUS_1000BASET) ||
4385 		    (dp->mii_xstatus & MII_XSTATUS_1000BASEX);
4386 		break;
4387 
4388 	case ETHER_STAT_CAP_100FDX:
4389 		val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX_FD);
4390 		break;
4391 
4392 	case ETHER_STAT_CAP_100HDX:
4393 		val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX);
4394 		break;
4395 
4396 	case ETHER_STAT_CAP_10FDX:
4397 		val = BOOLEAN(dp->mii_status & MII_STATUS_10_FD);
4398 		break;
4399 
4400 	case ETHER_STAT_CAP_10HDX:
4401 		val = BOOLEAN(dp->mii_status & MII_STATUS_10);
4402 		break;
4403 
4404 	case ETHER_STAT_CAP_ASMPAUSE:
4405 		val = BOOLEAN(dp->gc.gc_flow_control & 2);
4406 		break;
4407 
4408 	case ETHER_STAT_CAP_PAUSE:
4409 		val = BOOLEAN(dp->gc.gc_flow_control & 1);
4410 		break;
4411 
4412 	case ETHER_STAT_CAP_AUTONEG:
4413 		val = BOOLEAN(dp->mii_status & MII_STATUS_CANAUTONEG);
4414 		break;
4415 
4416 	case ETHER_STAT_ADV_CAP_1000FDX:
4417 		val = dp->anadv_1000fdx;
4418 		break;
4419 
4420 	case ETHER_STAT_ADV_CAP_1000HDX:
4421 		val = dp->anadv_1000hdx;
4422 		break;
4423 
4424 	case ETHER_STAT_ADV_CAP_100FDX:
4425 		val = dp->anadv_100fdx;
4426 		break;
4427 
4428 	case ETHER_STAT_ADV_CAP_100HDX:
4429 		val = dp->anadv_100hdx;
4430 		break;
4431 
4432 	case ETHER_STAT_ADV_CAP_10FDX:
4433 		val = dp->anadv_10fdx;
4434 		break;
4435 
4436 	case ETHER_STAT_ADV_CAP_10HDX:
4437 		val = dp->anadv_10hdx;
4438 		break;
4439 
4440 	case ETHER_STAT_ADV_CAP_ASMPAUSE:
4441 		val = BOOLEAN(dp->anadv_flow_control & 2);
4442 		break;
4443 
4444 	case ETHER_STAT_ADV_CAP_PAUSE:
4445 		val = BOOLEAN(dp->anadv_flow_control & 1);
4446 		break;
4447 
4448 	case ETHER_STAT_ADV_CAP_AUTONEG:
4449 		val = dp->anadv_autoneg;
4450 		break;
4451 
4452 	case ETHER_STAT_LP_CAP_1000FDX:
4453 		val = BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_FULL);
4454 		break;
4455 
4456 	case ETHER_STAT_LP_CAP_1000HDX:
4457 		val = BOOLEAN(dp->mii_stat1000 & MII_1000TS_LP_HALF);
4458 		break;
4459 
4460 	case ETHER_STAT_LP_CAP_100FDX:
4461 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX_FD);
4462 		break;
4463 
4464 	case ETHER_STAT_LP_CAP_100HDX:
4465 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_TX);
4466 		break;
4467 
4468 	case ETHER_STAT_LP_CAP_10FDX:
4469 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T_FD);
4470 		break;
4471 
4472 	case ETHER_STAT_LP_CAP_10HDX:
4473 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_10BASE_T);
4474 		break;
4475 
4476 	case ETHER_STAT_LP_CAP_ASMPAUSE:
4477 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_ASM_DIR);
4478 		break;
4479 
4480 	case ETHER_STAT_LP_CAP_PAUSE:
4481 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_PAUSE);
4482 		break;
4483 
4484 	case ETHER_STAT_LP_CAP_AUTONEG:
4485 		val = BOOLEAN(dp->mii_exp & MII_AN_EXP_LPCANAN);
4486 		break;
4487 
4488 	case ETHER_STAT_LINK_ASMPAUSE:
4489 		val = BOOLEAN(dp->flow_control & 2);
4490 		break;
4491 
4492 	case ETHER_STAT_LINK_PAUSE:
4493 		val = BOOLEAN(dp->flow_control & 1);
4494 		break;
4495 
4496 	case ETHER_STAT_LINK_AUTONEG:
4497 		val = dp->anadv_autoneg &&
4498 		    BOOLEAN(dp->mii_exp & MII_AN_EXP_LPCANAN);
4499 		break;
4500 
4501 	case ETHER_STAT_LINK_DUPLEX:
4502 		val = (dp->mii_state == MII_STATE_LINKUP) ?
4503 		    (dp->full_duplex ? 2 : 1) : 0;
4504 		break;
4505 
4506 	case ETHER_STAT_TOOSHORT_ERRORS:
4507 		val = gstp->runt;
4508 		break;
4509 	case ETHER_STAT_LP_REMFAULT:
4510 		val = BOOLEAN(dp->mii_lpable & MII_AN_ADVERT_REMFAULT);
4511 		break;
4512 
4513 	case ETHER_STAT_JABBER_ERRORS:
4514 		val = gstp->jabber;
4515 		break;
4516 
4517 	case ETHER_STAT_CAP_100T4:
4518 		val = BOOLEAN(dp->mii_status & MII_STATUS_100_BASE_T4);
4519 		break;
4520 
4521 	case ETHER_STAT_ADV_CAP_100T4:
4522 		val = dp->anadv_100t4;
4523 		break;
4524 
4525 	case ETHER_STAT_LP_CAP_100T4:
4526 		val = BOOLEAN(dp->mii_lpable & MII_ABILITY_100BASE_T4);
4527 		break;
4528 
4529 	default:
4530 #if GEM_DEBUG_LEVEL > 2
4531 		cmn_err(CE_WARN,
4532 		    "%s: unrecognized parameter value = %d",
4533 		    __func__, stat);
4534 #endif
4535 		return (ENOTSUP);
4536 	}
4537 
4538 	*valp = val;
4539 
4540 	return (0);
4541 }
4542 
4543 static int
4544 gem_m_unicst(void *arg, const uint8_t *mac)
4545 {
4546 	int		err = 0;
4547 	struct gem_dev	*dp = arg;
4548 
4549 	DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4550 
4551 	mutex_enter(&dp->intrlock);
4552 	if (dp->mac_suspended) {
4553 		mutex_exit(&dp->intrlock);
4554 		return (EIO);
4555 	}
4556 	bcopy(mac, dp->cur_addr.ether_addr_octet, ETHERADDRL);
4557 	dp->rxmode |= RXMODE_ENABLE;
4558 
4559 	if (gem_mac_set_rx_filter(dp) != GEM_SUCCESS) {
4560 		err = EIO;
4561 	}
4562 	mutex_exit(&dp->intrlock);
4563 
4564 	return (err);
4565 }
4566 
4567 /*
4568  * gem_m_tx is used only for sending data packets into ethernet wire.
4569  */
4570 static mblk_t *
4571 gem_m_tx(void *arg, mblk_t *mp)
4572 {
4573 	uint32_t	flags = 0;
4574 	struct gem_dev	*dp = arg;
4575 	mblk_t		*tp;
4576 
4577 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4578 
4579 	ASSERT(dp->nic_state == NIC_STATE_ONLINE);
4580 	if (dp->mii_state != MII_STATE_LINKUP) {
4581 		/* Some nics hate to send packets when the link is down. */
4582 		while (mp) {
4583 			tp = mp->b_next;
4584 			mp->b_next = NULL;
4585 			freemsg(mp);
4586 			mp = tp;
4587 		}
4588 		return (NULL);
4589 	}
4590 
4591 	return (gem_send_common(dp, mp, flags));
4592 }
4593 
4594 static void
4595 gem_set_coalease(void *arg, time_t ticks, uint_t count)
4596 {
4597 	struct gem_dev *dp = arg;
4598 	DPRINTF(1, (CE_CONT, "%s: %s: ticks:%d count:%d",
4599 	    dp->name, __func__, ticks, count));
4600 
4601 	mutex_enter(&dp->intrlock);
4602 	dp->poll_pkt_delay = min(count, dp->gc.gc_rx_ring_size/2);
4603 	mutex_exit(&dp->intrlock);
4604 }
4605 
4606 static void
4607 gem_m_resources(void *arg)
4608 {
4609 	struct gem_dev		*dp = arg;
4610 	mac_rx_fifo_t		mrf;
4611 
4612 	DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4613 
4614 	mutex_enter(&dp->intrlock);
4615 	mutex_enter(&dp->xmitlock);
4616 
4617 	/*
4618 	 * Register Rx rings as resources and save mac
4619 	 * resource id for future reference
4620 	 */
4621 	mrf.mrf_type = MAC_RX_FIFO;
4622 	mrf.mrf_blank = gem_set_coalease;
4623 	mrf.mrf_arg = (void *)dp;
4624 	mrf.mrf_normal_blank_time = 1; /* in uS */
4625 	mrf.mrf_normal_pkt_count = dp->poll_pkt_delay;
4626 
4627 	dp->mac_rx_ring_ha = mac_resource_add(dp->mh, (mac_resource_t *)&mrf);
4628 
4629 	mutex_exit(&dp->xmitlock);
4630 	mutex_exit(&dp->intrlock);
4631 }
4632 
4633 static void
4634 gem_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
4635 {
4636 	DPRINTF(0, (CE_CONT, "!%s: %s: called",
4637 	    ((struct gem_dev *)arg)->name, __func__));
4638 
4639 	gem_mac_ioctl((struct gem_dev *)arg, wq, mp);
4640 }
4641 
4642 static boolean_t
4643 gem_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
4644 {
4645 	boolean_t	ret;
4646 
4647 	ret = B_FALSE;
4648 	switch (cap) {
4649 	case MAC_CAPAB_POLL:
4650 		ret = B_TRUE;
4651 		break;
4652 	}
4653 	return (ret);
4654 }
4655 
4656 static void
4657 gem_gld3_init(struct gem_dev *dp, mac_register_t *macp)
4658 {
4659 	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
4660 	macp->m_driver = dp;
4661 	macp->m_dip = dp->dip;
4662 	macp->m_src_addr = dp->dev_addr.ether_addr_octet;
4663 	macp->m_callbacks = &gem_m_callbacks;
4664 	macp->m_min_sdu = 0;
4665 	macp->m_max_sdu = dp->mtu;
4666 
4667 	if (dp->misc_flag & GEM_VLAN) {
4668 		macp->m_margin = VTAG_SIZE;
4669 	}
4670 }
4671 
4672 /* ======================================================================== */
4673 /*
4674  * attach/detatch support
4675  */
4676 /* ======================================================================== */
4677 static void
4678 gem_read_conf(struct gem_dev *dp)
4679 {
4680 	int	val;
4681 
4682 	DPRINTF(1, (CE_CONT, "!%s: %s: called", dp->name, __func__));
4683 
4684 	/*
4685 	 * Get media mode infomation from .conf file
4686 	 */
4687 	dp->anadv_autoneg = gem_prop_get_int(dp, "adv_autoneg_cap", 1) != 0;
4688 	dp->anadv_1000fdx = gem_prop_get_int(dp, "adv_1000fdx_cap", 1) != 0;
4689 	dp->anadv_1000hdx = gem_prop_get_int(dp, "adv_1000hdx_cap", 1) != 0;
4690 	dp->anadv_100t4   = gem_prop_get_int(dp, "adv_100T4_cap", 1) != 0;
4691 	dp->anadv_100fdx  = gem_prop_get_int(dp, "adv_100fdx_cap", 1) != 0;
4692 	dp->anadv_100hdx  = gem_prop_get_int(dp, "adv_100hdx_cap", 1) != 0;
4693 	dp->anadv_10fdx   = gem_prop_get_int(dp, "adv_10fdx_cap", 1) != 0;
4694 	dp->anadv_10hdx   = gem_prop_get_int(dp, "adv_10hdx_cap", 1) != 0;
4695 
4696 	if ((ddi_prop_exists(DDI_DEV_T_ANY, dp->dip,
4697 	    DDI_PROP_DONTPASS, "full-duplex"))) {
4698 		dp->full_duplex = gem_prop_get_int(dp, "full-duplex", 1) != 0;
4699 		dp->anadv_autoneg = B_FALSE;
4700 		if (dp->full_duplex) {
4701 			dp->anadv_1000hdx = B_FALSE;
4702 			dp->anadv_100hdx = B_FALSE;
4703 			dp->anadv_10hdx = B_FALSE;
4704 		} else {
4705 			dp->anadv_1000fdx = B_FALSE;
4706 			dp->anadv_100fdx = B_FALSE;
4707 			dp->anadv_10fdx = B_FALSE;
4708 		}
4709 	}
4710 
4711 	if ((val = gem_prop_get_int(dp, "speed", 0)) > 0) {
4712 		dp->anadv_autoneg = B_FALSE;
4713 		switch (val) {
4714 		case 1000:
4715 			dp->speed = GEM_SPD_1000;
4716 			dp->anadv_100t4   = B_FALSE;
4717 			dp->anadv_100fdx  = B_FALSE;
4718 			dp->anadv_100hdx  = B_FALSE;
4719 			dp->anadv_10fdx   = B_FALSE;
4720 			dp->anadv_10hdx   = B_FALSE;
4721 			break;
4722 		case 100:
4723 			dp->speed = GEM_SPD_100;
4724 			dp->anadv_1000fdx = B_FALSE;
4725 			dp->anadv_1000hdx = B_FALSE;
4726 			dp->anadv_10fdx   = B_FALSE;
4727 			dp->anadv_10hdx   = B_FALSE;
4728 			break;
4729 		case 10:
4730 			dp->speed = GEM_SPD_10;
4731 			dp->anadv_1000fdx = B_FALSE;
4732 			dp->anadv_1000hdx = B_FALSE;
4733 			dp->anadv_100t4   = B_FALSE;
4734 			dp->anadv_100fdx  = B_FALSE;
4735 			dp->anadv_100hdx  = B_FALSE;
4736 			break;
4737 		default:
4738 			cmn_err(CE_WARN,
4739 			    "!%s: property %s: illegal value:%d",
4740 			    dp->name, "speed", val);
4741 			dp->anadv_autoneg = B_TRUE;
4742 			break;
4743 		}
4744 	}
4745 
4746 	val = gem_prop_get_int(dp, "flow-control", dp->gc.gc_flow_control);
4747 	if (val > FLOW_CONTROL_RX_PAUSE || val < FLOW_CONTROL_NONE) {
4748 		cmn_err(CE_WARN,
4749 		    "!%s: property %s: illegal value:%d",
4750 		    dp->name, "flow-control", val);
4751 	} else {
4752 		val = min(val, dp->gc.gc_flow_control);
4753 	}
4754 	dp->anadv_flow_control = val;
4755 
4756 	if (gem_prop_get_int(dp, "nointr", 0)) {
4757 		dp->misc_flag |= GEM_NOINTR;
4758 		cmn_err(CE_NOTE, "!%s: polling mode enabled", dp->name);
4759 	}
4760 
4761 	dp->mtu = gem_prop_get_int(dp, "mtu", dp->mtu);
4762 	dp->txthr = gem_prop_get_int(dp, "txthr", dp->txthr);
4763 	dp->rxthr = gem_prop_get_int(dp, "rxthr", dp->rxthr);
4764 	dp->txmaxdma = gem_prop_get_int(dp, "txmaxdma", dp->txmaxdma);
4765 	dp->rxmaxdma = gem_prop_get_int(dp, "rxmaxdma", dp->rxmaxdma);
4766 }
4767 
4768 
4769 /*
4770  * Gem kstat support
4771  */
4772 
4773 #define	GEM_LOCAL_DATA_SIZE(gc)	\
4774 	(sizeof (struct gem_dev) + \
4775 	sizeof (struct mcast_addr) * GEM_MAXMC + \
4776 	sizeof (struct txbuf) * ((gc)->gc_tx_buf_size) + \
4777 	sizeof (void *) * ((gc)->gc_tx_buf_size))
4778 
4779 struct gem_dev *
4780 gem_do_attach(dev_info_t *dip, int port,
4781 	struct gem_conf *gc, void *base, ddi_acc_handle_t *regs_handlep,
4782 	void *lp, int lmsize)
4783 {
4784 	struct gem_dev		*dp;
4785 	int			i;
4786 	ddi_iblock_cookie_t	c;
4787 	mac_register_t		*macp = NULL;
4788 	int			ret;
4789 	int			unit;
4790 	int			nports;
4791 
4792 	unit = ddi_get_instance(dip);
4793 	if ((nports = gc->gc_nports) == 0) {
4794 		nports = 1;
4795 	}
4796 	if (nports == 1) {
4797 		ddi_set_driver_private(dip, NULL);
4798 	}
4799 
4800 	DPRINTF(2, (CE_CONT, "!gem%d: gem_do_attach: called cmd:ATTACH",
4801 	    unit));
4802 
4803 	/*
4804 	 * Allocate soft data structure
4805 	 */
4806 	dp = kmem_zalloc(GEM_LOCAL_DATA_SIZE(gc), KM_SLEEP);
4807 
4808 	if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
4809 		cmn_err(CE_WARN, "!gem%d: %s: mac_alloc failed",
4810 		    unit, __func__);
4811 		return (NULL);
4812 	}
4813 	/* ddi_set_driver_private(dip, dp); */
4814 
4815 	/* link to private area */
4816 	dp->private = lp;
4817 	dp->priv_size = lmsize;
4818 	dp->mc_list = (struct mcast_addr *)&dp[1];
4819 
4820 	dp->dip = dip;
4821 	(void) sprintf(dp->name, gc->gc_name, nports * unit + port);
4822 
4823 	/*
4824 	 * Get iblock cookie
4825 	 */
4826 	if (ddi_get_iblock_cookie(dip, 0, &c) != DDI_SUCCESS) {
4827 		cmn_err(CE_CONT,
4828 		    "!%s: gem_do_attach: ddi_get_iblock_cookie: failed",
4829 		    dp->name);
4830 		goto err_free_private;
4831 	}
4832 	dp->iblock_cookie = c;
4833 
4834 	/*
4835 	 * Initialize mutex's for this device.
4836 	 */
4837 	mutex_init(&dp->intrlock, NULL, MUTEX_DRIVER, (void *)c);
4838 	mutex_init(&dp->xmitlock, NULL, MUTEX_DRIVER, (void *)c);
4839 	cv_init(&dp->tx_drain_cv, NULL, CV_DRIVER, NULL);
4840 
4841 	/*
4842 	 * configure gem parameter
4843 	 */
4844 	dp->base_addr = base;
4845 	dp->regs_handle = *regs_handlep;
4846 	dp->gc = *gc;
4847 	gc = &dp->gc;
4848 	/* patch for simplify dma resource management */
4849 	gc->gc_tx_max_frags = 1;
4850 	gc->gc_tx_max_descs_per_pkt = 1;
4851 	gc->gc_tx_ring_size = gc->gc_tx_buf_size;
4852 	gc->gc_tx_ring_limit = gc->gc_tx_buf_limit;
4853 	gc->gc_tx_desc_write_oo = B_TRUE;
4854 
4855 	gc->gc_nports = nports;	/* fix nports */
4856 
4857 	/* fix copy threadsholds */
4858 	gc->gc_tx_copy_thresh = max(ETHERMIN, gc->gc_tx_copy_thresh);
4859 	gc->gc_rx_copy_thresh = max(ETHERMIN, gc->gc_rx_copy_thresh);
4860 
4861 	/* fix rx buffer boundary for iocache line size */
4862 	ASSERT(gc->gc_dma_attr_txbuf.dma_attr_align-1 == gc->gc_tx_buf_align);
4863 	ASSERT(gc->gc_dma_attr_rxbuf.dma_attr_align-1 == gc->gc_rx_buf_align);
4864 	gc->gc_rx_buf_align = max(gc->gc_rx_buf_align, IOC_LINESIZE - 1);
4865 	gc->gc_dma_attr_rxbuf.dma_attr_align = gc->gc_rx_buf_align + 1;
4866 
4867 	/* fix descriptor boundary for cache line size */
4868 	gc->gc_dma_attr_desc.dma_attr_align =
4869 	    max(gc->gc_dma_attr_desc.dma_attr_align, IOC_LINESIZE);
4870 
4871 	/* patch get_packet method */
4872 	if (gc->gc_get_packet == NULL) {
4873 		gc->gc_get_packet = &gem_get_packet_default;
4874 	}
4875 
4876 	/* patch get_rx_start method */
4877 	if (gc->gc_rx_start == NULL) {
4878 		gc->gc_rx_start = &gem_rx_start_default;
4879 	}
4880 
4881 	/* calculate descriptor area */
4882 	if (gc->gc_rx_desc_unit_shift >= 0) {
4883 		dp->rx_desc_size =
4884 		    ROUNDUP(gc->gc_rx_ring_size << gc->gc_rx_desc_unit_shift,
4885 		    gc->gc_dma_attr_desc.dma_attr_align);
4886 	}
4887 	if (gc->gc_tx_desc_unit_shift >= 0) {
4888 		dp->tx_desc_size =
4889 		    ROUNDUP(gc->gc_tx_ring_size << gc->gc_tx_desc_unit_shift,
4890 		    gc->gc_dma_attr_desc.dma_attr_align);
4891 	}
4892 
4893 	dp->mtu = ETHERMTU;
4894 	dp->tx_buf = (void *)&dp->mc_list[GEM_MAXMC];
4895 	/* link tx buffers */
4896 	for (i = 0; i < dp->gc.gc_tx_buf_size; i++) {
4897 		dp->tx_buf[i].txb_next =
4898 		    &dp->tx_buf[SLOT(i + 1, dp->gc.gc_tx_buf_size)];
4899 	}
4900 
4901 	dp->rxmode	   = 0;
4902 	dp->speed	   = GEM_SPD_10;	/* default is 10Mbps */
4903 	dp->full_duplex    = B_FALSE;		/* default is half */
4904 	dp->flow_control   = FLOW_CONTROL_NONE;
4905 	dp->poll_pkt_delay = 8;		/* typical coalease for rx packets */
4906 
4907 	/* performance tuning parameters */
4908 	dp->txthr    = ETHERMAX;	/* tx fifo threshold */
4909 	dp->txmaxdma = 16*4;		/* tx max dma burst size */
4910 	dp->rxthr    = 128;		/* rx fifo threshold */
4911 	dp->rxmaxdma = 16*4;		/* rx max dma burst size */
4912 
4913 	/*
4914 	 * Get media mode information from .conf file
4915 	 */
4916 	gem_read_conf(dp);
4917 
4918 	/* rx_buf_len is required buffer length without padding for alignment */
4919 	dp->rx_buf_len = MAXPKTBUF(dp) + dp->gc.gc_rx_header_len;
4920 
4921 	/*
4922 	 * Reset the chip
4923 	 */
4924 	mutex_enter(&dp->intrlock);
4925 	dp->nic_state = NIC_STATE_STOPPED;
4926 	ret = (*dp->gc.gc_reset_chip)(dp);
4927 	mutex_exit(&dp->intrlock);
4928 	if (ret != GEM_SUCCESS) {
4929 		goto err_free_regs;
4930 	}
4931 
4932 	/*
4933 	 * HW dependant paremeter initialization
4934 	 */
4935 	mutex_enter(&dp->intrlock);
4936 	ret = (*dp->gc.gc_attach_chip)(dp);
4937 	mutex_exit(&dp->intrlock);
4938 	if (ret != GEM_SUCCESS) {
4939 		goto err_free_regs;
4940 	}
4941 
4942 #ifdef DEBUG_MULTIFRAGS
4943 	dp->gc.gc_tx_copy_thresh = dp->mtu;
4944 #endif
4945 	/* allocate tx and rx resources */
4946 	if (gem_alloc_memory(dp)) {
4947 		goto err_free_regs;
4948 	}
4949 
4950 	DPRINTF(0, (CE_CONT,
4951 	    "!%s: at 0x%x, %02x:%02x:%02x:%02x:%02x:%02x",
4952 	    dp->name, (long)dp->base_addr,
4953 	    dp->dev_addr.ether_addr_octet[0],
4954 	    dp->dev_addr.ether_addr_octet[1],
4955 	    dp->dev_addr.ether_addr_octet[2],
4956 	    dp->dev_addr.ether_addr_octet[3],
4957 	    dp->dev_addr.ether_addr_octet[4],
4958 	    dp->dev_addr.ether_addr_octet[5]));
4959 
4960 	/* copy mac address */
4961 	dp->cur_addr = dp->dev_addr;
4962 
4963 	gem_gld3_init(dp, macp);
4964 
4965 	/* Probe MII phy (scan phy) */
4966 	dp->mii_lpable = 0;
4967 	dp->mii_advert = 0;
4968 	dp->mii_exp = 0;
4969 	dp->mii_ctl1000 = 0;
4970 	dp->mii_stat1000 = 0;
4971 	if ((*dp->gc.gc_mii_probe)(dp) != GEM_SUCCESS) {
4972 		goto err_free_ring;
4973 	}
4974 
4975 	/* mask unsupported abilities */
4976 	dp->anadv_autoneg &= BOOLEAN(dp->mii_status & MII_STATUS_CANAUTONEG);
4977 	dp->anadv_1000fdx &=
4978 	    BOOLEAN(dp->mii_xstatus &
4979 	    (MII_XSTATUS_1000BASEX_FD | MII_XSTATUS_1000BASET_FD));
4980 	dp->anadv_1000hdx &=
4981 	    BOOLEAN(dp->mii_xstatus &
4982 	    (MII_XSTATUS_1000BASEX | MII_XSTATUS_1000BASET));
4983 	dp->anadv_100t4  &= BOOLEAN(dp->mii_status & MII_STATUS_100_BASE_T4);
4984 	dp->anadv_100fdx &= BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX_FD);
4985 	dp->anadv_100hdx &= BOOLEAN(dp->mii_status & MII_STATUS_100_BASEX);
4986 	dp->anadv_10fdx  &= BOOLEAN(dp->mii_status & MII_STATUS_10_FD);
4987 	dp->anadv_10hdx  &= BOOLEAN(dp->mii_status & MII_STATUS_10);
4988 
4989 	gem_choose_forcedmode(dp);
4990 
4991 	/* initialize MII phy if required */
4992 	if (dp->gc.gc_mii_init) {
4993 		if ((*dp->gc.gc_mii_init)(dp) != GEM_SUCCESS) {
4994 			goto err_free_ring;
4995 		}
4996 	}
4997 
4998 	/*
4999 	 * initialize kstats including mii statistics
5000 	 */
5001 	gem_nd_setup(dp);
5002 
5003 	/*
5004 	 * Add interrupt to system.
5005 	 */
5006 	if (ret = mac_register(macp, &dp->mh)) {
5007 		cmn_err(CE_WARN, "!%s: mac_register failed, error:%d",
5008 		    dp->name, ret);
5009 		goto err_release_stats;
5010 	}
5011 	mac_free(macp);
5012 	macp = NULL;
5013 
5014 	if (dp->misc_flag & GEM_SOFTINTR) {
5015 		if (ddi_add_softintr(dip,
5016 		    DDI_SOFTINT_LOW, &dp->soft_id,
5017 		    NULL, NULL,
5018 		    (uint_t (*)(caddr_t))gem_intr,
5019 		    (caddr_t)dp) != DDI_SUCCESS) {
5020 			cmn_err(CE_WARN, "!%s: ddi_add_softintr failed",
5021 			    dp->name);
5022 			goto err_unregister;
5023 		}
5024 	} else if ((dp->misc_flag & GEM_NOINTR) == 0) {
5025 		if (ddi_add_intr(dip, 0, NULL, NULL,
5026 		    (uint_t (*)(caddr_t))gem_intr,
5027 		    (caddr_t)dp) != DDI_SUCCESS) {
5028 			cmn_err(CE_WARN, "!%s: ddi_add_intr failed", dp->name);
5029 			goto err_unregister;
5030 		}
5031 	} else {
5032 		/*
5033 		 * Dont use interrupt.
5034 		 * schedule first call of gem_intr_watcher
5035 		 */
5036 		dp->intr_watcher_id =
5037 		    timeout((void (*)(void *))gem_intr_watcher,
5038 		    (void *)dp, drv_usectohz(3*1000000));
5039 	}
5040 
5041 	/* link this device to dev_info */
5042 	dp->next = (struct gem_dev *)ddi_get_driver_private(dip);
5043 	dp->port = port;
5044 	ddi_set_driver_private(dip, (caddr_t)dp);
5045 
5046 	/* reset mii phy and start mii link watcher */
5047 	gem_mii_start(dp);
5048 
5049 	DPRINTF(2, (CE_CONT, "!gem_do_attach: return: success"));
5050 	return (dp);
5051 
5052 err_unregister:
5053 	(void) mac_unregister(dp->mh);
5054 err_release_stats:
5055 	/* release NDD resources */
5056 	gem_nd_cleanup(dp);
5057 
5058 err_free_ring:
5059 	gem_free_memory(dp);
5060 err_free_regs:
5061 	ddi_regs_map_free(&dp->regs_handle);
5062 err_free_locks:
5063 	mutex_destroy(&dp->xmitlock);
5064 	mutex_destroy(&dp->intrlock);
5065 	cv_destroy(&dp->tx_drain_cv);
5066 err_free_private:
5067 	if (macp) {
5068 		mac_free(macp);
5069 	}
5070 	kmem_free((caddr_t)dp, GEM_LOCAL_DATA_SIZE(gc));
5071 
5072 	return (NULL);
5073 }
5074 
5075 int
5076 gem_do_detach(dev_info_t *dip)
5077 {
5078 	struct gem_dev	*dp;
5079 	struct gem_dev	*tmp;
5080 	caddr_t		private;
5081 	int		priv_size;
5082 	ddi_acc_handle_t	rh;
5083 
5084 	dp = GEM_GET_DEV(dip);
5085 	if (dp == NULL) {
5086 		return (DDI_SUCCESS);
5087 	}
5088 
5089 	rh = dp->regs_handle;
5090 	private = dp->private;
5091 	priv_size = dp->priv_size;
5092 
5093 	while (dp) {
5094 		/* unregister with gld v3 */
5095 		if (mac_unregister(dp->mh) != 0) {
5096 			return (DDI_FAILURE);
5097 		}
5098 
5099 		/* ensure any rx buffers are not used */
5100 		if (dp->rx_buf_allocated != dp->rx_buf_freecnt) {
5101 			/* resource is busy */
5102 			cmn_err(CE_PANIC,
5103 			    "!%s: %s: rxbuf is busy: allocated:%d, freecnt:%d",
5104 			    dp->name, __func__,
5105 			    dp->rx_buf_allocated, dp->rx_buf_freecnt);
5106 			/* NOT REACHED */
5107 		}
5108 
5109 		/* stop mii link watcher */
5110 		gem_mii_stop(dp);
5111 
5112 		/* unregister interrupt handler */
5113 		if (dp->misc_flag & GEM_SOFTINTR) {
5114 			ddi_remove_softintr(dp->soft_id);
5115 		} else if ((dp->misc_flag & GEM_NOINTR) == 0) {
5116 			ddi_remove_intr(dip, 0, dp->iblock_cookie);
5117 		} else {
5118 			/* stop interrupt watcher */
5119 			if (dp->intr_watcher_id) {
5120 				while (untimeout(dp->intr_watcher_id) == -1)
5121 					;
5122 				dp->intr_watcher_id = 0;
5123 			}
5124 		}
5125 
5126 		/* release NDD resources */
5127 		gem_nd_cleanup(dp);
5128 		/* release buffers, descriptors and dma resources */
5129 		gem_free_memory(dp);
5130 
5131 		/* release locks and condition variables */
5132 		mutex_destroy(&dp->xmitlock);
5133 		mutex_destroy(&dp->intrlock);
5134 		cv_destroy(&dp->tx_drain_cv);
5135 
5136 		/* release basic memory resources */
5137 		tmp = dp->next;
5138 		kmem_free((caddr_t)dp, GEM_LOCAL_DATA_SIZE(&dp->gc));
5139 		dp = tmp;
5140 	}
5141 
5142 	/* release common private memory for the nic */
5143 	kmem_free(private, priv_size);
5144 
5145 	/* release register mapping resources */
5146 	ddi_regs_map_free(&rh);
5147 
5148 	DPRINTF(2, (CE_CONT, "!%s%d: gem_do_detach: return: success",
5149 	    ddi_driver_name(dip), ddi_get_instance(dip)));
5150 
5151 	return (DDI_SUCCESS);
5152 }
5153 
5154 int
5155 gem_suspend(dev_info_t *dip)
5156 {
5157 	struct gem_dev	*dp;
5158 
5159 	/*
5160 	 * stop the device
5161 	 */
5162 	dp = GEM_GET_DEV(dip);
5163 	ASSERT(dp);
5164 
5165 	DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
5166 
5167 	for (; dp; dp = dp->next) {
5168 
5169 		/* stop mii link watcher */
5170 		gem_mii_stop(dp);
5171 
5172 		/* stop interrupt watcher for no-intr mode */
5173 		if (dp->misc_flag & GEM_NOINTR) {
5174 			if (dp->intr_watcher_id) {
5175 				while (untimeout(dp->intr_watcher_id) == -1)
5176 					;
5177 			}
5178 			dp->intr_watcher_id = 0;
5179 		}
5180 
5181 		/* stop tx timeout watcher */
5182 		if (dp->timeout_id) {
5183 			while (untimeout(dp->timeout_id) == -1)
5184 				;
5185 			dp->timeout_id = 0;
5186 		}
5187 
5188 		/* make the nic state inactive */
5189 		mutex_enter(&dp->intrlock);
5190 		(void) gem_mac_stop(dp, 0);
5191 		ASSERT(!dp->mac_active);
5192 
5193 		/* no further register access */
5194 		dp->mac_suspended = B_TRUE;
5195 		mutex_exit(&dp->intrlock);
5196 	}
5197 
5198 	/* XXX - power down the nic */
5199 
5200 	return (DDI_SUCCESS);
5201 }
5202 
5203 int
5204 gem_resume(dev_info_t *dip)
5205 {
5206 	struct gem_dev	*dp;
5207 
5208 	/*
5209 	 * restart the device
5210 	 */
5211 	dp = GEM_GET_DEV(dip);
5212 	ASSERT(dp);
5213 
5214 	DPRINTF(0, (CE_CONT, "!%s: %s: called", dp->name, __func__));
5215 
5216 	for (; dp; dp = dp->next) {
5217 
5218 		/*
5219 		 * Bring up the nic after power up
5220 		 */
5221 
5222 		/* gem_xxx.c layer to setup power management state. */
5223 		ASSERT(!dp->mac_active);
5224 
5225 		/* reset the chip, because we are just after power up. */
5226 		mutex_enter(&dp->intrlock);
5227 
5228 		dp->mac_suspended = B_FALSE;
5229 		dp->nic_state = NIC_STATE_STOPPED;
5230 
5231 		if ((*dp->gc.gc_reset_chip)(dp) != GEM_SUCCESS) {
5232 			cmn_err(CE_WARN, "%s: %s: failed to reset chip",
5233 			    dp->name, __func__);
5234 			mutex_exit(&dp->intrlock);
5235 			goto err;
5236 		}
5237 		mutex_exit(&dp->intrlock);
5238 
5239 		/* initialize mii phy because we are just after power up */
5240 		if (dp->gc.gc_mii_init) {
5241 			(void) (*dp->gc.gc_mii_init)(dp);
5242 		}
5243 
5244 		if (dp->misc_flag & GEM_NOINTR) {
5245 			/*
5246 			 * schedule first call of gem_intr_watcher
5247 			 * instead of interrupts.
5248 			 */
5249 			dp->intr_watcher_id =
5250 			    timeout((void (*)(void *))gem_intr_watcher,
5251 			    (void *)dp, drv_usectohz(3*1000000));
5252 		}
5253 
5254 		/* restart mii link watcher */
5255 		gem_mii_start(dp);
5256 
5257 		/* restart mac */
5258 		mutex_enter(&dp->intrlock);
5259 
5260 		if (gem_mac_init(dp) != GEM_SUCCESS) {
5261 			mutex_exit(&dp->intrlock);
5262 			goto err_reset;
5263 		}
5264 		dp->nic_state = NIC_STATE_INITIALIZED;
5265 
5266 		/* setup media mode if the link have been up */
5267 		if (dp->mii_state == MII_STATE_LINKUP) {
5268 			if ((dp->gc.gc_set_media)(dp) != GEM_SUCCESS) {
5269 				mutex_exit(&dp->intrlock);
5270 				goto err_reset;
5271 			}
5272 		}
5273 
5274 		/* enable mac address and rx filter */
5275 		dp->rxmode |= RXMODE_ENABLE;
5276 		if ((*dp->gc.gc_set_rx_filter)(dp) != GEM_SUCCESS) {
5277 			mutex_exit(&dp->intrlock);
5278 			goto err_reset;
5279 		}
5280 		dp->nic_state = NIC_STATE_ONLINE;
5281 
5282 		/* restart tx timeout watcher */
5283 		dp->timeout_id = timeout((void (*)(void *))gem_tx_timeout,
5284 		    (void *)dp,
5285 		    dp->gc.gc_tx_timeout_interval);
5286 
5287 		/* now the nic is fully functional */
5288 		if (dp->mii_state == MII_STATE_LINKUP) {
5289 			if (gem_mac_start(dp) != GEM_SUCCESS) {
5290 				mutex_exit(&dp->intrlock);
5291 				goto err_reset;
5292 			}
5293 		}
5294 		mutex_exit(&dp->intrlock);
5295 	}
5296 
5297 	return (DDI_SUCCESS);
5298 
5299 err_reset:
5300 	if (dp->intr_watcher_id) {
5301 		while (untimeout(dp->intr_watcher_id) == -1)
5302 			;
5303 		dp->intr_watcher_id = 0;
5304 	}
5305 	mutex_enter(&dp->intrlock);
5306 	(*dp->gc.gc_reset_chip)(dp);
5307 	dp->nic_state = NIC_STATE_STOPPED;
5308 	mutex_exit(&dp->intrlock);
5309 
5310 err:
5311 	return (DDI_FAILURE);
5312 }
5313 
5314 /*
5315  * misc routines for PCI
5316  */
5317 uint8_t
5318 gem_search_pci_cap(dev_info_t *dip,
5319 		ddi_acc_handle_t conf_handle, uint8_t target)
5320 {
5321 	uint8_t		pci_cap_ptr;
5322 	uint32_t	pci_cap;
5323 
5324 	/* search power management capablities */
5325 	pci_cap_ptr = pci_config_get8(conf_handle, PCI_CONF_CAP_PTR);
5326 	while (pci_cap_ptr) {
5327 		/* read pci capability header */
5328 		pci_cap = pci_config_get32(conf_handle, pci_cap_ptr);
5329 		if ((pci_cap & 0xff) == target) {
5330 			/* found */
5331 			break;
5332 		}
5333 		/* get next_ptr */
5334 		pci_cap_ptr = (pci_cap >> 8) & 0xff;
5335 	}
5336 	return (pci_cap_ptr);
5337 }
5338 
5339 int
5340 gem_pci_set_power_state(dev_info_t *dip,
5341 		ddi_acc_handle_t conf_handle, uint_t new_mode)
5342 {
5343 	uint8_t		pci_cap_ptr;
5344 	uint32_t	pmcsr;
5345 	uint_t		unit;
5346 	const char	*drv_name;
5347 
5348 	ASSERT(new_mode < 4);
5349 
5350 	unit = ddi_get_instance(dip);
5351 	drv_name = ddi_driver_name(dip);
5352 
5353 	/* search power management capablities */
5354 	pci_cap_ptr = gem_search_pci_cap(dip, conf_handle, PCI_CAP_ID_PM);
5355 
5356 	if (pci_cap_ptr == 0) {
5357 		cmn_err(CE_CONT,
5358 		    "!%s%d: doesn't have pci power management capability",
5359 		    drv_name, unit);
5360 		return (DDI_FAILURE);
5361 	}
5362 
5363 	/* read power management capabilities */
5364 	pmcsr = pci_config_get32(conf_handle, pci_cap_ptr + PCI_PMCSR);
5365 
5366 	DPRINTF(0, (CE_CONT,
5367 	    "!%s%d: pmc found at 0x%x: pmcsr: 0x%08x",
5368 	    drv_name, unit, pci_cap_ptr, pmcsr));
5369 
5370 	/*
5371 	 * Is the resuested power mode supported?
5372 	 */
5373 	/* not yet */
5374 
5375 	/*
5376 	 * move to new mode
5377 	 */
5378 	pmcsr = (pmcsr & ~PCI_PMCSR_STATE_MASK) | new_mode;
5379 	pci_config_put32(conf_handle, pci_cap_ptr + PCI_PMCSR, pmcsr);
5380 
5381 	return (DDI_SUCCESS);
5382 }
5383 
5384 /*
5385  * select suitable register for by specified address space or register
5386  * offset in PCI config space
5387  */
5388 int
5389 gem_pci_regs_map_setup(dev_info_t *dip, uint32_t which, uint32_t mask,
5390 	struct ddi_device_acc_attr *attrp,
5391 	caddr_t *basep, ddi_acc_handle_t *hp)
5392 {
5393 	struct pci_phys_spec	*regs;
5394 	uint_t		len;
5395 	uint_t		unit;
5396 	uint_t		n;
5397 	uint_t		i;
5398 	int		ret;
5399 	const char	*drv_name;
5400 
5401 	unit = ddi_get_instance(dip);
5402 	drv_name = ddi_driver_name(dip);
5403 
5404 	/* Search IO-range or memory-range to be mapped */
5405 	regs = NULL;
5406 	len  = 0;
5407 
5408 	if ((ret = ddi_prop_lookup_int_array(
5409 	    DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
5410 	    "reg", (void *)&regs, &len)) != DDI_PROP_SUCCESS) {
5411 		cmn_err(CE_WARN,
5412 		    "!%s%d: failed to get reg property (ret:%d)",
5413 		    drv_name, unit, ret);
5414 		return (DDI_FAILURE);
5415 	}
5416 	n = len / (sizeof (struct pci_phys_spec) / sizeof (int));
5417 
5418 	ASSERT(regs != NULL && len > 0);
5419 
5420 #if GEM_DEBUG_LEVEL > 0
5421 	for (i = 0; i < n; i++) {
5422 		cmn_err(CE_CONT,
5423 		    "!%s%d: regs[%d]: %08x.%08x.%08x.%08x.%08x",
5424 		    drv_name, unit, i,
5425 		    regs[i].pci_phys_hi,
5426 		    regs[i].pci_phys_mid,
5427 		    regs[i].pci_phys_low,
5428 		    regs[i].pci_size_hi,
5429 		    regs[i].pci_size_low);
5430 	}
5431 #endif
5432 	for (i = 0; i < n; i++) {
5433 		if ((regs[i].pci_phys_hi & mask) == which) {
5434 			/* it's the requested space */
5435 			ddi_prop_free(regs);
5436 			goto address_range_found;
5437 		}
5438 	}
5439 	ddi_prop_free(regs);
5440 	return (DDI_FAILURE);
5441 
5442 address_range_found:
5443 	if ((ret = ddi_regs_map_setup(dip, i, basep, 0, 0, attrp, hp))
5444 	    != DDI_SUCCESS) {
5445 		cmn_err(CE_CONT,
5446 		    "!%s%d: ddi_regs_map_setup failed (ret:%d)",
5447 		    drv_name, unit, ret);
5448 	}
5449 
5450 	return (ret);
5451 }
5452 
5453 void
5454 gem_mod_init(struct dev_ops *dop, char *name)
5455 {
5456 	mac_init_ops(dop, name);
5457 }
5458 
5459 void
5460 gem_mod_fini(struct dev_ops *dop)
5461 {
5462 	mac_fini_ops(dop);
5463 }
5464