xref: /titanic_50/usr/src/uts/common/io/sfe/sfe.c (revision 2dd2efa5a06a9befe46075cf41e16f57533c9f98)
1 /*
2  *  sfe.c : DP83815/DP83816/SiS900 Fast Ethernet MAC driver for Solaris
3  *
4  * Copyright (c) 2002-2007 Masayuki Murayama.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * 2. Redistributions in binary form must reproduce the above copyright notice,
13  *    this list of conditions and the following disclaimer in the documentation
14  *    and/or other materials provided with the distribution.
15  *
16  * 3. Neither the name of the author nor the names of its contributors may be
17  *    used to endorse or promote products derived from this software without
18  *    specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
27  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
31  * DAMAGE.
32  */
33 
34 #pragma ident	"%Z%%M%	%I%	%E% SMI"	/* sfe device driver */
35 
36 /*
37  * System Header files.
38  */
39 #include <sys/types.h>
40 #include <sys/conf.h>
41 #include <sys/debug.h>
42 #include <sys/kmem.h>
43 #include <sys/modctl.h>
44 #include <sys/errno.h>
45 #include <sys/ddi.h>
46 #include <sys/sunddi.h>
47 #include <sys/byteorder.h>
48 #include <sys/ethernet.h>
49 #include <sys/pci.h>
50 
51 #include "sfe_mii.h"
52 #include "sfe_util.h"
53 #include "sfereg.h"
54 
55 char	ident[] = "sis900/dp83815 driver v" VERSION;
56 
57 /* Debugging support */
58 #ifdef DEBUG_LEVEL
59 static int sfe_debug = DEBUG_LEVEL;
60 #if DEBUG_LEVEL > 4
61 #define	CONS	"^"
62 #else
63 #define	CONS	"!"
64 #endif
65 #define	DPRINTF(n, args)	if (sfe_debug > (n)) cmn_err args
66 #else
67 #define	CONS	"!"
68 #define	DPRINTF(n, args)
69 #endif
70 
71 /*
72  * Useful macros and typedefs
73  */
74 #define	ONESEC		(drv_usectohz(1*1000000))
75 #define	ROUNDUP2(x, a)	(((x) + (a) - 1) & ~((a) - 1))
76 
77 /*
78  * Our configuration
79  */
80 #define	MAXTXFRAGS	1
81 #define	MAXRXFRAGS	1
82 
83 #ifndef	TX_BUF_SIZE
84 #define	TX_BUF_SIZE	64
85 #endif
86 #ifndef	TX_RING_SIZE
87 #if MAXTXFRAGS == 1
88 #define	TX_RING_SIZE	TX_BUF_SIZE
89 #else
90 #define	TX_RING_SIZE	(TX_BUF_SIZE * 4)
91 #endif
92 #endif
93 
94 #ifndef	RX_BUF_SIZE
95 #define	RX_BUF_SIZE	256
96 #endif
97 #ifndef	RX_RING_SIZE
98 #define	RX_RING_SIZE	RX_BUF_SIZE
99 #endif
100 
101 #define	OUR_INTR_BITS	\
102 	(ISR_DPERR | ISR_SSERR | ISR_RMABT | ISR_RTABT | ISR_RXSOVR |	\
103 	ISR_TXURN | ISR_TXDESC | ISR_TXERR |	\
104 	ISR_RXORN | ISR_RXIDLE | ISR_RXOK | ISR_RXERR)
105 
106 #define	USE_MULTICAST_HASHTBL
107 
108 static int	sfe_tx_copy_thresh = 256;
109 static int	sfe_rx_copy_thresh = 256;
110 
111 /* special PHY registers for SIS900 */
112 #define	MII_CONFIG1	0x0010
113 #define	MII_CONFIG2	0x0011
114 #define	MII_MASK	0x0013
115 #define	MII_RESV	0x0014
116 
117 #define	PHY_MASK		0xfffffff0
118 #define	PHY_SIS900_INTERNAL	0x001d8000
119 #define	PHY_ICS1893		0x0015f440
120 
121 
122 #define	SFE_DESC_SIZE	16	/* including pads rounding up to power of 2 */
123 
124 /*
125  * Supported chips
126  */
127 struct chip_info {
128 	uint16_t	venid;
129 	uint16_t	devid;
130 	char		*chip_name;
131 	int		chip_type;
132 #define	CHIPTYPE_DP83815	0
133 #define	CHIPTYPE_SIS900		1
134 };
135 
136 /*
137  * Chip dependent MAC state
138  */
139 struct sfe_dev {
140 	/* misc HW information */
141 	struct chip_info	*chip;
142 	uint32_t		our_intr_bits;
143 	uint32_t		cr;
144 	uint_t			tx_drain_threshold;
145 	uint_t			tx_fill_threshold;
146 	uint_t			rx_drain_threshold;
147 	uint_t			rx_fill_threshold;
148 	uint8_t			revid;	/* revision from PCI configuration */
149 	boolean_t		(*get_mac_addr)(struct gem_dev *);
150 	uint8_t			mac_addr[ETHERADDRL];
151 	uint8_t			bridge_revid;
152 };
153 
154 /*
155  * Hardware information
156  */
157 struct chip_info sfe_chiptbl[] = {
158 	{ 0x1039, 0x0900, "SiS900", CHIPTYPE_SIS900, },
159 	{ 0x100b, 0x0020, "DP83815/83816", CHIPTYPE_DP83815, },
160 	{ 0x1039, 0x7016, "SiS7016", CHIPTYPE_SIS900, },
161 };
162 #define	CHIPTABLESIZE (sizeof (sfe_chiptbl)/sizeof (struct chip_info))
163 
164 /* ======================================================== */
165 
166 /* mii operations */
167 static void  sfe_mii_sync_dp83815(struct gem_dev *);
168 static void  sfe_mii_sync_sis900(struct gem_dev *);
169 static uint16_t  sfe_mii_read_dp83815(struct gem_dev *, uint_t);
170 static uint16_t  sfe_mii_read_sis900(struct gem_dev *, uint_t);
171 static void sfe_mii_write_dp83815(struct gem_dev *, uint_t, uint16_t);
172 static void sfe_mii_write_sis900(struct gem_dev *, uint_t, uint16_t);
173 static void sfe_set_eq_sis630(struct gem_dev *dp);
174 /* nic operations */
175 static int sfe_reset_chip_sis900(struct gem_dev *);
176 static int sfe_reset_chip_dp83815(struct gem_dev *);
177 static int sfe_init_chip(struct gem_dev *);
178 static int sfe_start_chip(struct gem_dev *);
179 static int sfe_stop_chip(struct gem_dev *);
180 static int sfe_set_media(struct gem_dev *);
181 static int sfe_set_rx_filter_dp83815(struct gem_dev *);
182 static int sfe_set_rx_filter_sis900(struct gem_dev *);
183 static int sfe_get_stats(struct gem_dev *);
184 static int sfe_attach_chip(struct gem_dev *);
185 
186 /* descriptor operations */
187 static int sfe_tx_desc_write(struct gem_dev *dp, int slot,
188 		    ddi_dma_cookie_t *dmacookie, int frags, uint64_t flags);
189 static void sfe_tx_start(struct gem_dev *dp, int startslot, int nslot);
190 static void sfe_rx_desc_write(struct gem_dev *dp, int slot,
191 		    ddi_dma_cookie_t *dmacookie, int frags);
192 static uint_t sfe_tx_desc_stat(struct gem_dev *dp, int slot, int ndesc);
193 static uint64_t sfe_rx_desc_stat(struct gem_dev *dp, int slot, int ndesc);
194 
195 static void sfe_tx_desc_init(struct gem_dev *dp, int slot);
196 static void sfe_rx_desc_init(struct gem_dev *dp, int slot);
197 static void sfe_tx_desc_clean(struct gem_dev *dp, int slot);
198 static void sfe_rx_desc_clean(struct gem_dev *dp, int slot);
199 
200 /* interrupt handler */
201 static uint_t sfe_interrupt(struct gem_dev *dp);
202 
203 /* ======================================================== */
204 
205 /* mapping attributes */
206 /* Data access requirements. */
207 static struct ddi_device_acc_attr sfe_dev_attr = {
208 	DDI_DEVICE_ATTR_V0,
209 	DDI_STRUCTURE_LE_ACC,
210 	DDI_STRICTORDER_ACC
211 };
212 
213 /* On sparc, Buffers should be native endian for speed */
214 static struct ddi_device_acc_attr sfe_buf_attr = {
215 	DDI_DEVICE_ATTR_V0,
216 	DDI_NEVERSWAP_ACC,	/* native endianness */
217 	DDI_STRICTORDER_ACC
218 };
219 
220 static ddi_dma_attr_t sfe_dma_attr_buf = {
221 	DMA_ATTR_V0,		/* dma_attr_version */
222 	0,			/* dma_attr_addr_lo */
223 	0xffffffffull,		/* dma_attr_addr_hi */
224 	0x00000fffull,		/* dma_attr_count_max */
225 	0, /* patched later */	/* dma_attr_align */
226 	0x000003fc,		/* dma_attr_burstsizes */
227 	1,			/* dma_attr_minxfer */
228 	0x00000fffull,		/* dma_attr_maxxfer */
229 	0xffffffffull,		/* dma_attr_seg */
230 	0, /* patched later */	/* dma_attr_sgllen */
231 	1,			/* dma_attr_granular */
232 	0			/* dma_attr_flags */
233 };
234 
235 static ddi_dma_attr_t sfe_dma_attr_desc = {
236 	DMA_ATTR_V0,		/* dma_attr_version */
237 	16,			/* dma_attr_addr_lo */
238 	0xffffffffull,		/* dma_attr_addr_hi */
239 	0xffffffffull,		/* dma_attr_count_max */
240 	16,			/* dma_attr_align */
241 	0x000003fc,		/* dma_attr_burstsizes */
242 	1,			/* dma_attr_minxfer */
243 	0xffffffffull,		/* dma_attr_maxxfer */
244 	0xffffffffull,		/* dma_attr_seg */
245 	1,			/* dma_attr_sgllen */
246 	1,			/* dma_attr_granular */
247 	0			/* dma_attr_flags */
248 };
249 
250 uint32_t sfe_use_pcimemspace = 0;
251 
252 /* ======================================================== */
253 /*
254  * HW manipulation routines
255  */
256 /* ======================================================== */
257 
258 #define	SFE_EEPROM_DELAY(dp)	\
259 	{ (void) INL(dp, EROMAR); (void) INL(dp, EROMAR); }
260 #define	EE_CMD_READ	6
261 #define	EE_CMD_SHIFT	6
262 
263 static uint16_t
264 sfe_read_eeprom(struct gem_dev *dp, uint_t offset)
265 {
266 	int		eedi;
267 	int		i;
268 	uint16_t	ret;
269 
270 	/* ensure de-assert chip select */
271 	OUTL(dp, EROMAR, 0);
272 	SFE_EEPROM_DELAY(dp);
273 	OUTL(dp, EROMAR, EROMAR_EESK);
274 	SFE_EEPROM_DELAY(dp);
275 
276 	/* assert chip select */
277 	offset |= EE_CMD_READ << EE_CMD_SHIFT;
278 
279 	for (i = 8; i >= 0; i--) {
280 		/* make command */
281 		eedi = ((offset >> i) & 1) << EROMAR_EEDI_SHIFT;
282 
283 		/* send 1 bit */
284 		OUTL(dp, EROMAR, EROMAR_EECS | eedi);
285 		SFE_EEPROM_DELAY(dp);
286 		OUTL(dp, EROMAR, EROMAR_EECS | eedi | EROMAR_EESK);
287 		SFE_EEPROM_DELAY(dp);
288 	}
289 
290 	OUTL(dp, EROMAR, EROMAR_EECS);
291 
292 	ret = 0;
293 	for (i = 0; i < 16; i++) {
294 		/* Get 1 bit */
295 		OUTL(dp, EROMAR, EROMAR_EECS);
296 		SFE_EEPROM_DELAY(dp);
297 		OUTL(dp, EROMAR, EROMAR_EECS | EROMAR_EESK);
298 		SFE_EEPROM_DELAY(dp);
299 
300 		ret = (ret << 1) | ((INL(dp, EROMAR) >> EROMAR_EEDO_SHIFT) & 1);
301 	}
302 
303 	OUTL(dp, EROMAR, 0);
304 	SFE_EEPROM_DELAY(dp);
305 
306 	return (ret);
307 }
308 #undef SFE_EEPROM_DELAY
309 
310 static boolean_t
311 sfe_get_mac_addr_dp83815(struct gem_dev *dp)
312 {
313 	uint8_t		*mac;
314 	uint_t		val;
315 	int		i;
316 
317 #define	BITSET(p, ix, v)	(p)[(ix)/8] |= ((v) ? 1 : 0) << ((ix) & 0x7)
318 
319 	DPRINTF(4, (CE_CONT, CONS "%s: %s: called", dp->name, __func__));
320 
321 	mac = dp->dev_addr.ether_addr_octet;
322 
323 	/* first of all, clear MAC address buffer */
324 	bzero(mac, ETHERADDRL);
325 
326 	/* get bit 0 */
327 	val = sfe_read_eeprom(dp, 0x6);
328 	BITSET(mac, 0, val & 1);
329 
330 	/* get bit 1 - 16 */
331 	val = sfe_read_eeprom(dp, 0x7);
332 	for (i = 0; i < 16; i++) {
333 		BITSET(mac, 1 + i, val & (1 << (15 - i)));
334 	}
335 
336 	/* get bit 17 -  32 */
337 	val = sfe_read_eeprom(dp, 0x8);
338 	for (i = 0; i < 16; i++) {
339 		BITSET(mac, 17 + i, val & (1 << (15 - i)));
340 	}
341 
342 	/* get bit 33 -  47 */
343 	val = sfe_read_eeprom(dp, 0x9);
344 	for (i = 0; i < 15; i++) {
345 		BITSET(mac, 33 + i, val & (1 << (15 - i)));
346 	}
347 
348 	return (B_TRUE);
349 #undef BITSET
350 }
351 
352 static boolean_t
353 sfe_get_mac_addr_sis900(struct gem_dev *dp)
354 {
355 	uint_t		val;
356 	int		i;
357 	uint8_t		*mac;
358 
359 	mac = dp->dev_addr.ether_addr_octet;
360 
361 	for (i = 0; i < ETHERADDRL/2; i++) {
362 		val = sfe_read_eeprom(dp, 0x8 + i);
363 		*mac++ = (uint8_t)val;
364 		*mac++ = (uint8_t)(val >> 8);
365 	}
366 
367 	return (B_TRUE);
368 }
369 
370 static dev_info_t *
371 sfe_search_pci_dev_subr(dev_info_t *cur_node, int vendor_id, int device_id)
372 {
373 	dev_info_t	*child_id;
374 	dev_info_t	*ret;
375 	int		vid, did;
376 
377 	if (cur_node == NULL) {
378 		return (NULL);
379 	}
380 
381 	/* check brothers */
382 	do {
383 		vid = ddi_prop_get_int(DDI_DEV_T_ANY, cur_node,
384 		    DDI_PROP_DONTPASS, "vendor-id", -1);
385 		did = ddi_prop_get_int(DDI_DEV_T_ANY, cur_node,
386 		    DDI_PROP_DONTPASS, "device-id", -1);
387 
388 		if (vid == vendor_id && did == device_id) {
389 			/* found */
390 			return (cur_node);
391 		}
392 
393 		/* check children */
394 		if ((child_id = ddi_get_child(cur_node)) != NULL) {
395 			if ((ret = sfe_search_pci_dev_subr(child_id,
396 			    vendor_id, device_id)) != NULL) {
397 				return (ret);
398 			}
399 		}
400 
401 	} while ((cur_node = ddi_get_next_sibling(cur_node)) != NULL);
402 
403 	/* not found */
404 	return (NULL);
405 }
406 
407 static dev_info_t *
408 sfe_search_pci_dev(int vendor_id, int device_id)
409 {
410 	return (sfe_search_pci_dev_subr(ddi_root_node(), vendor_id, device_id));
411 }
412 
413 /* Avoid undefined symbol for non IA architectures */
414 #pragma weak	inb
415 #pragma weak	outb
416 
417 static boolean_t
418 sfe_get_mac_addr_sis630e(struct gem_dev *dp)
419 {
420 	int		i;
421 	dev_info_t	*isa_bridge;
422 	ddi_acc_handle_t isa_handle;
423 	int		reg;
424 
425 	if (inb == NULL || outb == NULL) {
426 		/* this is not IA architecture */
427 		return (B_FALSE);
428 	}
429 
430 	if ((isa_bridge = sfe_search_pci_dev(0x1039, 0x8)) == NULL) {
431 		cmn_err(CE_WARN, "%s: failed to find isa-bridge pci1039,8",
432 		    dp->name);
433 		return (B_FALSE);
434 	}
435 
436 	if (pci_config_setup(isa_bridge, &isa_handle) != DDI_SUCCESS) {
437 		cmn_err(CE_WARN, "%s: ddi_regs_map_setup failed",
438 		    dp->name);
439 		return (B_FALSE);
440 	}
441 
442 	/* enable to access CMOS RAM */
443 	reg = pci_config_get8(isa_handle, 0x48);
444 	pci_config_put8(isa_handle, 0x48, reg | 0x40);
445 
446 	for (i = 0; i < ETHERADDRL; i++) {
447 		outb(0x70, 0x09 + i);
448 		dp->dev_addr.ether_addr_octet[i] = inb(0x71);
449 	}
450 
451 	/* disable to access CMOS RAM */
452 	pci_config_put8(isa_handle, 0x48, reg);
453 	pci_config_teardown(&isa_handle);
454 
455 	return (B_TRUE);
456 }
457 
458 static boolean_t
459 sfe_get_mac_addr_sis635(struct gem_dev *dp)
460 {
461 	int		i;
462 	uint32_t	rfcr;
463 	uint16_t	v;
464 	struct sfe_dev	*lp = dp->private;
465 
466 	DPRINTF(2, (CE_CONT, CONS "%s: %s: called", dp->name, __func__));
467 	rfcr = INL(dp, RFCR);
468 
469 	OUTL(dp, CR, lp->cr | CR_RELOAD);
470 	OUTL(dp, CR, lp->cr);
471 
472 	/* disable packet filtering before reading filter */
473 	OUTL(dp, RFCR, rfcr & ~RFCR_RFEN);
474 
475 	/* load MAC addr from filter data register */
476 	for (i = 0; i < ETHERADDRL; i += 2) {
477 		OUTL(dp, RFCR,
478 		    (RFADDR_MAC_SIS900 + (i/2)) << RFCR_RFADDR_SHIFT_SIS900);
479 		v = INL(dp, RFDR);
480 		dp->dev_addr.ether_addr_octet[i] = (uint8_t)v;
481 		dp->dev_addr.ether_addr_octet[i+1] = (uint8_t)(v >> 8);
482 	}
483 
484 	/* re-enable packet filtering */
485 	OUTL(dp, RFCR, rfcr | RFCR_RFEN);
486 
487 	return (B_TRUE);
488 }
489 
490 static boolean_t
491 sfe_get_mac_addr_sis962(struct gem_dev *dp)
492 {
493 	boolean_t	ret;
494 	int		i;
495 
496 	ret = B_FALSE;
497 
498 	/* rise request signal to access EEPROM */
499 	OUTL(dp, MEAR, EROMAR_EEREQ);
500 	for (i = 0; (INL(dp, MEAR) & EROMAR_EEGNT) == 0; i++) {
501 		if (i > 200) {
502 			/* failed to acquire eeprom */
503 			cmn_err(CE_NOTE,
504 			    CONS "%s: failed to access eeprom", dp->name);
505 			goto x;
506 		}
507 		drv_usecwait(10);
508 	}
509 	ret = sfe_get_mac_addr_sis900(dp);
510 x:
511 	/* release EEPROM */
512 	OUTL(dp, MEAR, EROMAR_EEDONE);
513 
514 	return (ret);
515 }
516 
517 static int
518 sfe_reset_chip_sis900(struct gem_dev *dp)
519 {
520 	int		i;
521 	uint32_t	done;
522 	uint32_t	val;
523 	struct sfe_dev	*lp = dp->private;
524 
525 	DPRINTF(4, (CE_CONT, CONS "%s: %s called", dp->name, __func__));
526 
527 	/* invalidate mac addr cache */
528 	bzero(lp->mac_addr, sizeof (lp->mac_addr));
529 
530 	lp->cr = 0;
531 
532 	/* inhibit interrupt */
533 	OUTL(dp, IMR, 0);
534 	if (lp->our_intr_bits == 0) {
535 		/* we can clear interrupt source safely. */
536 		(void) INL(dp, ISR);
537 	}
538 
539 	OUTL(dp, RFCR, 0);
540 
541 	OUTL(dp, CR, CR_RST | CR_TXR | CR_RXR);
542 	drv_usecwait(10);
543 
544 	done = 0;
545 	for (i = 0; done != (ISR_TXRCMP | ISR_RXRCMP); i++) {
546 		if (i > 1000) {
547 			cmn_err(CE_WARN, "%s: chip reset timeout", dp->name);
548 			return (GEM_FAILURE);
549 		}
550 		done |= INL(dp, ISR) & (ISR_TXRCMP | ISR_RXRCMP);
551 		drv_usecwait(10);
552 	}
553 
554 	if (lp->revid == SIS630ET_900_REV) {
555 		lp->cr |= CR_ACCESSMODE;
556 		OUTL(dp, CR, lp->cr | INL(dp, CR));
557 	}
558 
559 	/* Configuration register: enable PCI parity */
560 	DPRINTF(2, (CE_CONT, CONS "%s: cfg:%b",
561 	    dp->name, INL(dp, CFG), CFG_BITS_SIS900));
562 	val = CFG_PESEL;
563 	if (lp->revid >= SIS635A_900_REV ||
564 	    lp->revid == SIS900B_900_REV) {
565 		/* what is this ? */
566 		val |= CFG_RND_CNT;
567 	}
568 	OUTL(dp, CFG, val);
569 	DPRINTF(2, (CE_CONT, CONS "%s: cfg:%b", dp->name,
570 	    INL(dp, CFG), CFG_BITS_SIS900));
571 
572 	return (GEM_SUCCESS);
573 }
574 
575 static int
576 sfe_reset_chip_dp83815(struct gem_dev *dp)
577 {
578 	int		i;
579 	struct sfe_dev	*lp = dp->private;
580 
581 	DPRINTF(4, (CE_CONT, CONS "%s: %s called", dp->name, __func__));
582 
583 	/* invalidate mac addr cache */
584 	bzero(lp->mac_addr, sizeof (lp->mac_addr));
585 
586 	lp->cr = 0;
587 
588 	/* inhibit interrupts */
589 	OUTL(dp, IMR, 0);
590 	if (lp->our_intr_bits == 0) {
591 		/* we can clear interrupt source safely. */
592 		(void) INL(dp, ISR);
593 	}
594 
595 	OUTL(dp, RFCR, 0);
596 
597 	OUTL(dp, CR, CR_RST);
598 	drv_usecwait(10);
599 
600 	for (i = 0; INL(dp, CR) & CR_RST; i++) {
601 		if (i > 100) {
602 			cmn_err(CE_WARN, "!%s: chip reset timeout", dp->name);
603 			return (GEM_FAILURE);
604 		}
605 		drv_usecwait(10);
606 	}
607 	DPRINTF(0, (CE_CONT, "!%s: chip reset in %duS", dp->name, i*10));
608 
609 	OUTL(dp, CCSR, CCSR_PMESTS);
610 	OUTL(dp, CCSR, 0);
611 
612 	/* Configuration register: enable PCI parity */
613 	DPRINTF(2, (CE_CONT, CONS "%s: cfg:%b",
614 	    dp->name, INL(dp, CFG), CFG_BITS_DP83815));
615 	OUTL(dp, CFG, CFG_PESEL | CFG_PAUSE_ADV);
616 	DPRINTF(2, (CE_CONT, CONS "%s: cfg:%b", dp->name,
617 	    INL(dp, CFG), CFG_BITS_DP83815));
618 
619 	return (GEM_SUCCESS);
620 }
621 
622 static int
623 sfe_init_chip(struct gem_dev *dp)
624 {
625 	/* Configuration register: have been set up in sfe_chip_reset */
626 
627 	/* PCI test control register: do nothing */
628 
629 	/* Interrupt status register : do nothing */
630 
631 	/* Interrupt mask register: clear, but leave lp->our_intr_bits */
632 	OUTL(dp, IMR, 0);
633 
634 	/* Enhanced PHY Access register (sis900): do nothing */
635 
636 	/* Transmit Descriptor Pointer register: base addr of TX ring */
637 	OUTL(dp, TXDP, dp->tx_ring_dma);
638 
639 	/* Receive descriptor pointer register: base addr of RX ring */
640 	OUTL(dp, RXDP, dp->rx_ring_dma);
641 
642 	return (GEM_SUCCESS);
643 }
644 
645 static uint_t
646 sfe_mcast_hash(struct gem_dev *dp, uint8_t *addr)
647 {
648 	return (gem_ether_crc_be(addr, ETHERADDRL));
649 }
650 
651 #ifdef DEBUG_LEVEL
652 static void
653 sfe_rxfilter_dump(struct gem_dev *dp, int start, int end)
654 {
655 	int		i;
656 	int		j;
657 	uint16_t	ram[0x10];
658 
659 	cmn_err(CE_CONT, "!%s: rx filter ram dump:", dp->name);
660 #define	WORDS_PER_LINE	4
661 	for (i = start; i < end; i += WORDS_PER_LINE*2) {
662 		for (j = 0; j < WORDS_PER_LINE; j++) {
663 			OUTL(dp, RFCR, RFADDR_MAC_DP83815 + i + j*2);
664 			ram[j] = INL(dp, RFDR);
665 		}
666 
667 		cmn_err(CE_CONT, "!0x%02x: 0x%04x 0x%04x 0x%04x 0x%04x",
668 		    i, ram[0], ram[1], ram[2], ram[3]);
669 		}
670 
671 #undef	WORDS_PER_LINE
672 }
673 #endif
674 
675 static uint_t	sfe_rf_perfect_base_dp83815[] = {
676 	RFADDR_PMATCH0_DP83815,
677 	RFADDR_PMATCH1_DP83815,
678 	RFADDR_PMATCH2_DP83815,
679 	RFADDR_PMATCH3_DP83815,
680 };
681 
682 static int
683 sfe_set_rx_filter_dp83815(struct gem_dev *dp)
684 {
685 	int		i;
686 	int		j;
687 	uint32_t	mode;
688 	uint8_t		*mac = dp->cur_addr.ether_addr_octet;
689 	uint16_t	hash_tbl[32];
690 	struct sfe_dev	*lp = dp->private;
691 
692 	DPRINTF(1, (CE_CONT, CONS "%s: %s: called, mc_count:%d, mode:0x%b",
693 	    dp->name, __func__, dp->mc_count, dp->rxmode, RXMODE_BITS));
694 
695 #if DEBUG_LEVEL > 0
696 	for (i = 0; i < dp->mc_count; i++) {
697 		cmn_err(CE_CONT,
698 		"!%s: adding mcast(%d) %02x:%02x:%02x:%02x:%02x:%02x",
699 		    dp->name, i,
700 		    dp->mc_list[i].addr.ether_addr_octet[0],
701 		    dp->mc_list[i].addr.ether_addr_octet[1],
702 		    dp->mc_list[i].addr.ether_addr_octet[2],
703 		    dp->mc_list[i].addr.ether_addr_octet[3],
704 		    dp->mc_list[i].addr.ether_addr_octet[4],
705 		    dp->mc_list[i].addr.ether_addr_octet[5]);
706 	}
707 #endif
708 	if ((dp->rxmode & RXMODE_ENABLE) == 0) {
709 		/* disable rx filter */
710 		OUTL(dp, RFCR, 0);
711 		return (GEM_SUCCESS);
712 	}
713 
714 	/*
715 	 * Set Receive filter control register
716 	 */
717 	if (dp->rxmode & RXMODE_PROMISC) {
718 		/* all broadcast, all multicast, all physical */
719 		mode = RFCR_AAB | RFCR_AAM | RFCR_AAP;
720 	} else if ((dp->rxmode & RXMODE_ALLMULTI) || dp->mc_count > 16*32/2) {
721 		/* all broadcast, all multicast, physical for the chip */
722 		mode = RFCR_AAB | RFCR_AAM | RFCR_APM_DP83815;
723 	} else if (dp->mc_count > 4) {
724 		/*
725 		 * Use multicast hash table,
726 		 * accept all broadcast and physical for the chip.
727 		 */
728 		mode = RFCR_AAB | RFCR_MHEN_DP83815 | RFCR_APM_DP83815;
729 
730 		bzero(hash_tbl, sizeof (hash_tbl));
731 		for (i = 0; i < dp->mc_count; i++) {
732 			j = dp->mc_list[i].hash >> (32 - 9);
733 			hash_tbl[j / 16] |= 1 << (j % 16);
734 		}
735 	} else {
736 		/*
737 		 * Use pattern mach filter for multicast address,
738 		 * accept all broadcast and physical for the chip
739 		 */
740 		/* need to enable corresponding pattern registers */
741 		mode = RFCR_AAB | RFCR_APM_DP83815 |
742 		    (((1 << dp->mc_count) - 1) << RFCR_APAT_SHIFT);
743 	}
744 
745 #if DEBUG_LEVEL > 1
746 	cmn_err(CE_CONT,
747 	    "!%s: mac %02x:%02x:%02x:%02x:%02x:%02x"
748 	    "  cache %02x:%02x:%02x:%02x:%02x:%02x",
749 	    dp->name, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
750 	    lp->mac_addr[0], lp->mac_addr[1],
751 	    lp->mac_addr[2], lp->mac_addr[3],
752 	    lp->mac_addr[4], lp->mac_addr[5]);
753 #endif
754 	if (bcmp(mac, lp->mac_addr, ETHERADDRL) != 0) {
755 		/*
756 		 * XXX - need to *disable* rx filter to load mac address for
757 		 * the chip. otherwise, we cannot setup rxfilter correctly.
758 		 */
759 		/* setup perfect match register for my station address */
760 		for (i = 0; i < ETHERADDRL; i += 2) {
761 			OUTL(dp, RFCR, RFADDR_MAC_DP83815 + i);
762 			OUTL(dp, RFDR, (mac[i+1] << 8) | mac[i]);
763 		}
764 
765 		bcopy(mac, lp->mac_addr, ETHERADDRL);
766 	}
767 
768 #if DEBUG_LEVEL > 3
769 	/* clear pattern ram */
770 	for (j = 0x200; j < 0x380; j += 2) {
771 		OUTL(dp, RFCR, j);
772 		OUTL(dp, RFDR, 0);
773 	}
774 #endif
775 	if (mode & RFCR_APAT_DP83815) {
776 		/* setup multicast address into pattern match registers */
777 		for (j = 0; j < dp->mc_count; j++) {
778 			mac = &dp->mc_list[j].addr.ether_addr_octet[0];
779 			for (i = 0; i < ETHERADDRL; i += 2) {
780 				OUTL(dp, RFCR,
781 				    sfe_rf_perfect_base_dp83815[j] + i*2);
782 				OUTL(dp, RFDR, (mac[i+1] << 8) | mac[i]);
783 			}
784 		}
785 
786 		/* setup pattern count registers */
787 		OUTL(dp, RFCR, RFADDR_PCOUNT01_DP83815);
788 		OUTL(dp, RFDR, (ETHERADDRL << 8) | ETHERADDRL);
789 		OUTL(dp, RFCR, RFADDR_PCOUNT23_DP83815);
790 		OUTL(dp, RFDR, (ETHERADDRL << 8) | ETHERADDRL);
791 	}
792 
793 	if (mode & RFCR_MHEN_DP83815) {
794 		/* Load Multicast hash table */
795 		for (i = 0; i < 32; i++) {
796 			/* for DP83815, index is in byte */
797 			OUTL(dp, RFCR, RFADDR_MULTICAST_DP83815 + i*2);
798 			OUTL(dp, RFDR, hash_tbl[i]);
799 		}
800 	}
801 #if DEBUG_LEVEL > 2
802 	sfe_rxfilter_dump(dp, 0, 0x10);
803 	sfe_rxfilter_dump(dp, 0x200, 0x380);
804 #endif
805 	/* Set rx filter mode and enable rx filter */
806 	OUTL(dp, RFCR, RFCR_RFEN | mode);
807 
808 	return (GEM_SUCCESS);
809 }
810 
811 static int
812 sfe_set_rx_filter_sis900(struct gem_dev *dp)
813 {
814 	int		i;
815 	uint32_t	mode;
816 	uint16_t	hash_tbl[16];
817 	uint8_t		*mac = dp->cur_addr.ether_addr_octet;
818 	int		hash_size;
819 	int		hash_shift;
820 	struct sfe_dev	*lp = dp->private;
821 
822 	DPRINTF(4, (CE_CONT, CONS "%s: %s: called", dp->name, __func__));
823 
824 	if ((dp->rxmode & RXMODE_ENABLE) == 0) {
825 		/* disalbe rx filter */
826 		OUTL(dp, RFCR, 0);
827 		return (GEM_SUCCESS);
828 	}
829 
830 	/*
831 	 * determine hardware hash table size in word.
832 	 */
833 	hash_shift = 25;
834 	if (lp->revid >= SIS635A_900_REV || lp->revid == SIS900B_900_REV) {
835 		hash_shift = 24;
836 	}
837 	hash_size = (1 << (32 - hash_shift)) / 16;
838 	bzero(hash_tbl, sizeof (hash_tbl));
839 
840 	/* Set Receive filter control register */
841 
842 	if (dp->rxmode & RXMODE_PROMISC) {
843 		/* all broadcast, all multicast, all physical */
844 		mode = RFCR_AAB | RFCR_AAM | RFCR_AAP;
845 	} else if ((dp->rxmode & RXMODE_ALLMULTI) ||
846 	    dp->mc_count > hash_size*16/2) {
847 		/* all broadcast, all multicast, physical for the chip */
848 		mode = RFCR_AAB | RFCR_AAM;
849 	} else {
850 		/* all broadcast, physical for the chip */
851 		mode = RFCR_AAB;
852 	}
853 
854 	/* make hash table */
855 	for (i = 0; i < dp->mc_count; i++) {
856 		uint_t	h;
857 		h = dp->mc_list[i].hash >> hash_shift;
858 		hash_tbl[h / 16] |= 1 << (h % 16);
859 	}
860 
861 	if (bcmp(mac, lp->mac_addr, ETHERADDRL) != 0) {
862 		/* Disable Rx filter and load mac address */
863 		for (i = 0; i < ETHERADDRL/2; i++) {
864 			/* For sis900, index is in word */
865 			OUTL(dp, RFCR,
866 			    (RFADDR_MAC_SIS900+i) << RFCR_RFADDR_SHIFT_SIS900);
867 			OUTL(dp, RFDR, (mac[i*2+1] << 8) | mac[i*2]);
868 		}
869 
870 		bcopy(mac, lp->mac_addr, ETHERADDRL);
871 	}
872 
873 	/* Load Multicast hash table */
874 	for (i = 0; i < hash_size; i++) {
875 		/* For sis900, index is in word */
876 		OUTL(dp, RFCR,
877 		    (RFADDR_MULTICAST_SIS900 + i) << RFCR_RFADDR_SHIFT_SIS900);
878 		OUTL(dp, RFDR, hash_tbl[i]);
879 	}
880 
881 	/* Load rx filter mode and enable rx filter */
882 	OUTL(dp, RFCR, RFCR_RFEN | mode);
883 
884 	return (GEM_SUCCESS);
885 }
886 
887 static int
888 sfe_start_chip(struct gem_dev *dp)
889 {
890 	struct sfe_dev	*lp = dp->private;
891 
892 	DPRINTF(4, (CE_CONT, CONS "%s: %s: called", dp->name, __func__));
893 
894 	/*
895 	 * setup interrupt mask, which shouldn't include ISR_TOK
896 	 * to improve performance.
897 	 */
898 	lp->our_intr_bits = OUR_INTR_BITS;
899 
900 	/* enable interrupt */
901 	if ((dp->misc_flag & GEM_NOINTR) == 0) {
902 		OUTL(dp, IER, 1);
903 		OUTL(dp, IMR, lp->our_intr_bits);
904 	}
905 
906 	/* Kick RX */
907 	OUTL(dp, CR, lp->cr | CR_RXE);
908 
909 	return (GEM_SUCCESS);
910 }
911 
912 /*
913  * Stop nic core gracefully.
914  */
915 static int
916 sfe_stop_chip(struct gem_dev *dp)
917 {
918 	struct sfe_dev	*lp = dp->private;
919 	uint32_t	done;
920 	int		i;
921 
922 	DPRINTF(4, (CE_CONT, CONS "%s: %s: called", dp->name, __func__));
923 
924 	/*
925 	 * Although we inhibit interrupt here, we don't clear soft copy of
926 	 * interrupt mask to avoid bogus interrupts.
927 	 */
928 	OUTL(dp, IMR, 0);
929 
930 	/* stop TX and RX immediately */
931 	OUTL(dp, CR, lp->cr | CR_TXR | CR_RXR);
932 
933 	done = 0;
934 	for (i = 0; done != (ISR_RXRCMP | ISR_TXRCMP); i++) {
935 		if (i > 1000) {
936 			/*
937 			 * As gem layer will call sfe_reset_chip(),
938 			 * we don't neet to reset futher
939 			 */
940 			cmn_err(CE_NOTE, "!%s: %s: Tx/Rx reset timeout",
941 			    dp->name, __func__);
942 
943 			return (GEM_FAILURE);
944 		}
945 		done |= INL(dp, ISR) & (ISR_RXRCMP | ISR_TXRCMP);
946 		drv_usecwait(10);
947 	}
948 
949 	return (GEM_SUCCESS);
950 }
951 
952 /*
953  * Setup media mode
954  */
955 static uint_t
956 sfe_mxdma_value[] = { 512, 4, 8, 16, 32, 64, 128, 256, };
957 
958 static uint_t
959 sfe_encode_mxdma(uint_t burstsize)
960 {
961 	int	i;
962 
963 	if (burstsize > 256) {
964 		/* choose 512 */
965 		return (0);
966 	}
967 
968 	for (i = 1; i < 8; i++) {
969 		if (burstsize <= sfe_mxdma_value[i]) {
970 			break;
971 		}
972 	}
973 	return (i);
974 }
975 
976 static int
977 sfe_set_media(struct gem_dev *dp)
978 {
979 	uint32_t	txcfg;
980 	uint32_t	rxcfg;
981 	uint32_t	pcr;
982 	uint32_t	val;
983 	uint32_t	txmxdma;
984 	uint32_t	rxmxdma;
985 	struct sfe_dev	*lp = dp->private;
986 #ifdef DEBUG_LEVEL
987 	extern int	gem_speed_value[];
988 #endif
989 	DPRINTF(2, (CE_CONT, CONS "%s: %s: %s duplex, %d Mbps",
990 	    dp->name, __func__,
991 	    dp->full_duplex ? "full" : "half", gem_speed_value[dp->speed]));
992 
993 	/* initialize txcfg and rxcfg */
994 	txcfg = TXCFG_ATP;
995 	if (dp->full_duplex) {
996 		txcfg |= (TXCFG_CSI | TXCFG_HBI);
997 	}
998 	rxcfg =	RXCFG_AEP | RXCFG_ARP;
999 	if (dp->full_duplex) {
1000 		rxcfg |= RXCFG_ATX;
1001 	}
1002 
1003 	/* select txmxdma and rxmxdma, maxmum burst length */
1004 	if (lp->chip->chip_type == CHIPTYPE_SIS900) {
1005 #ifdef DEBUG_SIS900_EDB
1006 		val = CFG_EDB_MASTER;
1007 #else
1008 		val = INL(dp, CFG) & CFG_EDB_MASTER;
1009 #endif
1010 		if (val) {
1011 			/*
1012 			 * sis900 built-in cores:
1013 			 * max burst length must be fixed to 64
1014 			 */
1015 			txmxdma = 64;
1016 			rxmxdma = 64;
1017 		} else {
1018 			/*
1019 			 * sis900 pci chipset:
1020 			 * the vendor recommended to fix max burst length
1021 			 * to 512
1022 			 */
1023 			txmxdma = 512;
1024 			rxmxdma = 512;
1025 		}
1026 	} else {
1027 		/*
1028 		 * NS dp83815/816:
1029 		 * use user defined or default for tx/rx max burst length
1030 		 */
1031 		txmxdma = max(dp->txmaxdma, 256);
1032 		rxmxdma = max(dp->rxmaxdma, 256);
1033 	}
1034 
1035 
1036 	/* tx high water mark */
1037 	lp->tx_drain_threshold = ROUNDUP2(dp->txthr, TXCFG_FIFO_UNIT);
1038 
1039 	/* determine tx_fill_threshold accroding drain threshold */
1040 	lp->tx_fill_threshold =
1041 	    TXFIFOSIZE - lp->tx_drain_threshold - TXCFG_FIFO_UNIT;
1042 
1043 	/* tune txmxdma not to exceed tx_fill_threshold */
1044 	for (; ; ) {
1045 		/* normalize txmxdma requested */
1046 		val = sfe_encode_mxdma(txmxdma);
1047 		txmxdma = sfe_mxdma_value[val];
1048 
1049 		if (txmxdma <= lp->tx_fill_threshold) {
1050 			break;
1051 		}
1052 		/* select new txmxdma */
1053 		txmxdma = txmxdma / 2;
1054 	}
1055 	txcfg |= val << TXCFG_MXDMA_SHIFT;
1056 
1057 	/* encode rxmxdma, maxmum burst length for rx */
1058 	val = sfe_encode_mxdma(rxmxdma);
1059 	txcfg |= val << RXCFG_MXDMA_SHIFT;
1060 	rxmxdma = sfe_mxdma_value[val];
1061 
1062 	/* receive starting threshold - it have only 5bit-wide field */
1063 	val = ROUNDUP2(max(dp->rxthr, ETHERMIN), RXCFG_FIFO_UNIT);
1064 	lp->rx_drain_threshold =
1065 	    min(val, (RXCFG_DRTH >> RXCFG_DRTH_SHIFT) * RXCFG_FIFO_UNIT);
1066 
1067 	DPRINTF(0, (CE_CONT,
1068 	    "%s: %s: tx: drain:%d(rest %d) fill:%d mxdma:%d,"
1069 	    " rx: drain:%d mxdma:%d",
1070 	    dp->name, __func__,
1071 	    lp->tx_drain_threshold, TXFIFOSIZE - lp->tx_drain_threshold,
1072 	    lp->tx_fill_threshold, txmxdma,
1073 	    lp->rx_drain_threshold, rxmxdma));
1074 
1075 	ASSERT(lp->tx_drain_threshold < 64*TXCFG_FIFO_UNIT);
1076 	ASSERT(lp->tx_fill_threshold < 64*TXCFG_FIFO_UNIT);
1077 	ASSERT(lp->rx_drain_threshold < 32*RXCFG_FIFO_UNIT);
1078 
1079 	txcfg |= ((lp->tx_fill_threshold/TXCFG_FIFO_UNIT) << TXCFG_FLTH_SHIFT)
1080 	    | (lp->tx_drain_threshold/TXCFG_FIFO_UNIT);
1081 	OUTL(dp, TXCFG, txcfg);
1082 
1083 	rxcfg |= ((lp->rx_drain_threshold/RXCFG_FIFO_UNIT) << RXCFG_DRTH_SHIFT);
1084 	if (lp->chip->chip_type == CHIPTYPE_DP83815) {
1085 		rxcfg |= RXCFG_ALP_DP83815;
1086 	}
1087 	OUTL(dp, RXCFG, rxcfg);
1088 
1089 	DPRINTF(0, (CE_CONT, CONS "%s: %s: txcfg:%b rxcfg:%b",
1090 	    dp->name, __func__,
1091 	    txcfg, TXCFG_BITS, rxcfg, RXCFG_BITS));
1092 
1093 	/* Flow control */
1094 	if (lp->chip->chip_type == CHIPTYPE_DP83815) {
1095 		pcr = INL(dp, PCR);
1096 		switch (dp->flow_control) {
1097 		case FLOW_CONTROL_SYMMETRIC:
1098 		case FLOW_CONTROL_RX_PAUSE:
1099 			OUTL(dp, PCR, pcr | PCR_PSEN | PCR_PS_MCAST);
1100 			break;
1101 
1102 		default:
1103 			OUTL(dp, PCR,
1104 			    pcr & ~(PCR_PSEN | PCR_PS_MCAST | PCR_PS_DA));
1105 			break;
1106 		}
1107 		DPRINTF(2, (CE_CONT, CONS "%s: PCR: %b", dp->name,
1108 		    INL(dp, PCR), PCR_BITS));
1109 
1110 	} else if (lp->chip->chip_type == CHIPTYPE_SIS900) {
1111 		switch (dp->flow_control) {
1112 		case FLOW_CONTROL_SYMMETRIC:
1113 		case FLOW_CONTROL_RX_PAUSE:
1114 			OUTL(dp, FLOWCTL, FLOWCTL_FLOWEN);
1115 			break;
1116 		default:
1117 			OUTL(dp, FLOWCTL, 0);
1118 			break;
1119 		}
1120 		DPRINTF(2, (CE_CONT, CONS "%s: FLOWCTL: %b",
1121 		    dp->name, INL(dp, FLOWCTL), FLOWCTL_BITS));
1122 	}
1123 	return (GEM_SUCCESS);
1124 }
1125 
1126 static int
1127 sfe_get_stats(struct gem_dev *dp)
1128 {
1129 	/* do nothing */
1130 	return (GEM_SUCCESS);
1131 }
1132 
1133 /*
1134  * descriptor manipulations
1135  */
1136 static int
1137 sfe_tx_desc_write(struct gem_dev *dp, int slot,
1138 		ddi_dma_cookie_t *dmacookie, int frags, uint64_t flags)
1139 {
1140 	uint32_t		mark;
1141 	struct sfe_desc		*tdp;
1142 	ddi_dma_cookie_t	*dcp;
1143 #if DEBUG_LEVEL > 1
1144 	int			i;
1145 
1146 	cmn_err(CE_CONT,
1147 	    CONS "%s: time:%d %s seqnum: %d, slot %d, frags: %d flags: %llx",
1148 	    dp->name, ddi_get_lbolt(), __func__,
1149 	    dp->tx_desc_tail, slot, frags, flags);
1150 
1151 	for (i = 0; i < frags; i++) {
1152 		cmn_err(CE_CONT, CONS "%d: addr: 0x%x, len: 0x%x",
1153 		    i, dmacookie[i].dmac_address, dmacookie[i].dmac_size);
1154 }
1155 #endif
1156 	/*
1157 	 * write tx descriptor in reversed order.
1158 	 */
1159 #if DEBUG_LEVEL > 3
1160 	flags |= GEM_TXFLAG_INTR;
1161 #endif
1162 	mark = (flags & GEM_TXFLAG_INTR)
1163 			? (CMDSTS_OWN | CMDSTS_INTR) : CMDSTS_OWN;
1164 
1165 	ASSERT(frags == 1);
1166 	dcp = &dmacookie[0];
1167 	if (flags & GEM_TXFLAG_HEAD) {
1168 		mark &= ~CMDSTS_OWN;
1169 	}
1170 
1171 	tdp = (void *)&dp->tx_ring[SFE_DESC_SIZE * slot];
1172 	tdp->d_bufptr = LE_32((uint32_t)dcp->dmac_address);
1173 	tdp->d_cmdsts = LE_32(mark | (uint32_t)dcp->dmac_size);
1174 
1175 	return (frags);
1176 }
1177 
1178 static void
1179 sfe_tx_start(struct gem_dev *dp, int start_slot, int nslot)
1180 {
1181 	struct sfe_desc		*tdp;
1182 	struct sfe_dev		*lp = dp->private;
1183 
1184 	if (nslot > 1) {
1185 		gem_tx_desc_dma_sync(dp,
1186 		    SLOT(start_slot + 1, TX_RING_SIZE),
1187 		    nslot - 1, DDI_DMA_SYNC_FORDEV);
1188 	}
1189 
1190 	tdp = (void *)&dp->tx_ring[SFE_DESC_SIZE * start_slot];
1191 	tdp->d_cmdsts |= LE_32(CMDSTS_OWN);
1192 
1193 	gem_tx_desc_dma_sync(dp, start_slot, 1, DDI_DMA_SYNC_FORDEV);
1194 
1195 	/*
1196 	 * Let the Transmit Buffer Manager Fill state machine active.
1197 	 */
1198 	if (dp->mac_active) {
1199 		OUTL(dp, CR, lp->cr | CR_TXE);
1200 	}
1201 }
1202 
1203 static void
1204 sfe_rx_desc_write(struct gem_dev *dp, int slot,
1205 	    ddi_dma_cookie_t *dmacookie, int frags)
1206 {
1207 	struct sfe_desc		*rdp;
1208 #if DEBUG_LEVEL > 2
1209 	int			i;
1210 
1211 	ASSERT(frags == 1);
1212 
1213 	cmn_err(CE_CONT, CONS
1214 	    "%s: %s seqnum: %d, slot %d, frags: %d",
1215 	    dp->name, __func__, dp->rx_active_tail, slot, frags);
1216 	for (i = 0; i < frags; i++) {
1217 		cmn_err(CE_CONT, CONS "  frag: %d addr: 0x%llx, len: 0x%lx",
1218 		    i, dmacookie[i].dmac_address, dmacookie[i].dmac_size);
1219 	}
1220 #endif
1221 	/* for the last slot of the packet */
1222 	rdp = (void *)&dp->rx_ring[SFE_DESC_SIZE * slot];
1223 
1224 	rdp->d_bufptr = LE_32((uint32_t)dmacookie->dmac_address);
1225 	rdp->d_cmdsts = LE_32(CMDSTS_INTR | (uint32_t)dmacookie->dmac_size);
1226 }
1227 
1228 static uint_t
1229 sfe_tx_desc_stat(struct gem_dev *dp, int slot, int ndesc)
1230 {
1231 	struct sfe_desc		*tdp;
1232 	uint32_t		status;
1233 	int			cols;
1234 #ifdef DEBUG_LEVEL
1235 	int			i;
1236 	clock_t			delay;
1237 #endif
1238 	/* check status of the last descriptor */
1239 	tdp = (void *)
1240 	    &dp->tx_ring[SFE_DESC_SIZE * SLOT(slot + ndesc - 1, TX_RING_SIZE)];
1241 
1242 	status = LE_32(tdp->d_cmdsts);
1243 
1244 	DPRINTF(2, (CE_CONT, CONS "%s: time:%ld %s: slot:%d, status:0x%b",
1245 	    dp->name, ddi_get_lbolt(), __func__,
1246 	    slot, status, TXSTAT_BITS));
1247 
1248 	if (status & CMDSTS_OWN) {
1249 		/*
1250 		 * not yet transmitted
1251 		 */
1252 		return (0);
1253 	}
1254 
1255 	if (status & CMDSTS_MORE) {
1256 		/* XXX - the hardware problem but don't panic the system */
1257 		/* avoid lint bug for %b format string including 32nd bit */
1258 		cmn_err(CE_NOTE, CONS
1259 		    "%s: tx status bits incorrect:  slot:%d, status:0x%x",
1260 		    dp->name, slot, status);
1261 	}
1262 
1263 #if DEBUG_LEVEL > 3
1264 	delay = (ddi_get_lbolt() - dp->tx_buf_head->txb_stime) * 10;
1265 	if (delay >= 50) {
1266 		DPRINTF(0, (CE_NOTE, "%s: tx deferred %d mS: slot %d",
1267 		    dp->name, delay, slot));
1268 	}
1269 #endif
1270 
1271 #if DEBUG_LEVEL > 3
1272 	for (i = 0; i < nfrag-1; i++) {
1273 		uint32_t	s;
1274 		int		n;
1275 
1276 		n = SLOT(slot + i, TX_RING_SIZE);
1277 		s = LE_32(
1278 		    ((struct sfe_desc *)((void *)
1279 		    &dp->tx_ring[SFE_DESC_SIZE * n]))->d_cmdsts);
1280 
1281 		ASSERT(s & CMDSTS_MORE);
1282 		ASSERT((s & CMDSTS_OWN) == 0);
1283 	}
1284 #endif
1285 
1286 	/*
1287 	 *  collect statistics
1288 	 */
1289 	if ((status & CMDSTS_OK) == 0) {
1290 
1291 		/* failed to transmit the packet */
1292 
1293 		DPRINTF(0, (CE_CONT, CONS "%s: Transmit error, Tx status %b",
1294 		    dp->name, status, TXSTAT_BITS));
1295 
1296 		dp->stats.errxmt++;
1297 
1298 		if (status & CMDSTS_TFU) {
1299 			dp->stats.underflow++;
1300 		} else if (status & CMDSTS_CRS) {
1301 			dp->stats.nocarrier++;
1302 		} else if (status & CMDSTS_OWC) {
1303 			dp->stats.xmtlatecoll++;
1304 		} else if ((!dp->full_duplex) && (status & CMDSTS_EC)) {
1305 			dp->stats.excoll++;
1306 			dp->stats.collisions += 16;
1307 		} else {
1308 			dp->stats.xmit_internal_err++;
1309 		}
1310 	} else if (!dp->full_duplex) {
1311 		cols = (status >> CMDSTS_CCNT_SHIFT) & CCNT_MASK;
1312 
1313 		if (cols > 0) {
1314 			if (cols == 1) {
1315 				dp->stats.first_coll++;
1316 			} else /* (cols > 1) */ {
1317 				dp->stats.multi_coll++;
1318 			}
1319 			dp->stats.collisions += cols;
1320 		} else if (status & CMDSTS_TD) {
1321 			dp->stats.defer++;
1322 		}
1323 	}
1324 	return (GEM_TX_DONE);
1325 }
1326 
1327 static uint64_t
1328 sfe_rx_desc_stat(struct gem_dev *dp, int slot, int ndesc)
1329 {
1330 	struct sfe_desc		*rdp;
1331 	uint_t			len;
1332 	uint_t			flag;
1333 	uint32_t		status;
1334 
1335 	flag = GEM_RX_DONE;
1336 
1337 	/* Dont read ISR because we cannot ack only to rx interrupt. */
1338 
1339 	rdp = (void *)&dp->rx_ring[SFE_DESC_SIZE * slot];
1340 
1341 	status = LE_32(rdp->d_cmdsts);
1342 
1343 	DPRINTF(2, (CE_CONT, CONS "%s: time:%ld %s: slot:%d, status:0x%b",
1344 	    dp->name, ddi_get_lbolt(), __func__,
1345 	    slot, status, RXSTAT_BITS));
1346 
1347 	if ((status & CMDSTS_OWN) == 0) {
1348 		/*
1349 		 * No more received packets because
1350 		 * this buffer is owned by NIC.
1351 		 */
1352 		return (0);
1353 	}
1354 
1355 #define	RX_ERR_BITS \
1356 	(CMDSTS_RXA | CMDSTS_RXO | CMDSTS_LONG | CMDSTS_RUNT | \
1357 		CMDSTS_ISE | CMDSTS_CRCE | CMDSTS_FAE | CMDSTS_MORE)
1358 
1359 	if (status & RX_ERR_BITS) {
1360 		/*
1361 		 * Packet with error received
1362 		 */
1363 		DPRINTF(0, (CE_CONT, CONS "%s: Corrupted packet "
1364 		    "received, buffer status: %b",
1365 		    dp->name, status, RXSTAT_BITS));
1366 
1367 		/* collect statistics information */
1368 		dp->stats.errrcv++;
1369 
1370 		if (status & CMDSTS_RXO) {
1371 			dp->stats.overflow++;
1372 		} else if (status & (CMDSTS_LONG | CMDSTS_MORE)) {
1373 			dp->stats.frame_too_long++;
1374 		} else if (status & CMDSTS_RUNT) {
1375 			dp->stats.runt++;
1376 		} else if (status & (CMDSTS_ISE | CMDSTS_FAE)) {
1377 			dp->stats.frame++;
1378 		} else if (status & CMDSTS_CRCE) {
1379 			dp->stats.crc++;
1380 		} else {
1381 			dp->stats.rcv_internal_err++;
1382 		}
1383 
1384 		return (flag | GEM_RX_ERR);
1385 	}
1386 
1387 	/*
1388 	 * this packet was received without errors
1389 	 */
1390 	if ((len = (status & CMDSTS_SIZE)) >= ETHERFCSL) {
1391 		len -= ETHERFCSL;
1392 	}
1393 
1394 #if DEBUG_LEVEL > 10
1395 {
1396 	int	i;
1397 	uint8_t	*bp = dp->rx_buf_head->rxb_buf;
1398 
1399 	cmn_err(CE_CONT, CONS "%s: len:%d", dp->name, len);
1400 
1401 	for (i = 0; i < 60; i += 10) {
1402 		cmn_err(CE_CONT, CONS
1403 		    "%02x %02x %02x %02x %02x %02x %02x %02x %02x %02x",
1404 		    bp[0], bp[1], bp[2], bp[3], bp[4],
1405 		    bp[5], bp[6], bp[7], bp[8], bp[9]);
1406 	}
1407 	bp += 10;
1408 }
1409 #endif
1410 	return (flag | (len & GEM_RX_LEN));
1411 }
1412 
1413 static void
1414 sfe_tx_desc_init(struct gem_dev *dp, int slot)
1415 {
1416 	struct sfe_desc		*tdp;
1417 	uint32_t		here;
1418 
1419 	tdp = (void *)&dp->tx_ring[SFE_DESC_SIZE * slot];
1420 
1421 	/* don't clear d_link field, which have a valid pointer */
1422 	tdp->d_cmdsts = 0;
1423 
1424 	/* make a link to this from the previous descriptor */
1425 	here = ((uint32_t)dp->tx_ring_dma) + SFE_DESC_SIZE*slot;
1426 
1427 	tdp = (void *)
1428 	    &dp->tx_ring[SFE_DESC_SIZE * SLOT(slot - 1, TX_RING_SIZE)];
1429 	tdp->d_link = LE_32(here);
1430 }
1431 
1432 static void
1433 sfe_rx_desc_init(struct gem_dev *dp, int slot)
1434 {
1435 	struct sfe_desc		*rdp;
1436 	uint32_t		here;
1437 
1438 	rdp = (void *)&dp->rx_ring[SFE_DESC_SIZE * slot];
1439 
1440 	/* don't clear d_link field, which have a valid pointer */
1441 	rdp->d_cmdsts = LE_32(CMDSTS_OWN);
1442 
1443 	/* make a link to this from the previous descriptor */
1444 	here = ((uint32_t)dp->rx_ring_dma) + SFE_DESC_SIZE*slot;
1445 
1446 	rdp = (void *)
1447 	    &dp->rx_ring[SFE_DESC_SIZE * SLOT(slot - 1, RX_RING_SIZE)];
1448 	rdp->d_link = LE_32(here);
1449 }
1450 
1451 static void
1452 sfe_tx_desc_clean(struct gem_dev *dp, int slot)
1453 {
1454 	struct sfe_desc		*tdp;
1455 
1456 	tdp = (void *)&dp->tx_ring[SFE_DESC_SIZE * slot];
1457 	tdp->d_cmdsts = 0;
1458 }
1459 
1460 static void
1461 sfe_rx_desc_clean(struct gem_dev *dp, int slot)
1462 {
1463 	struct sfe_desc		*rdp;
1464 
1465 	rdp = (void *)&dp->rx_ring[SFE_DESC_SIZE * slot];
1466 	rdp->d_cmdsts = LE_32(CMDSTS_OWN);
1467 }
1468 
1469 /*
1470  * Device depend interrupt handler
1471  */
1472 static uint_t
1473 sfe_interrupt(struct gem_dev *dp)
1474 {
1475 	uint32_t	isr;
1476 	uint_t		flags = 0;
1477 	boolean_t	need_to_reset = B_FALSE;
1478 	struct sfe_dev	*lp = dp->private;
1479 
1480 	/* read reason and clear interrupt */
1481 	isr = INL(dp, ISR);
1482 
1483 	if ((isr & lp->our_intr_bits) == 0) {
1484 		/* we are not the interrupt source */
1485 		return (DDI_INTR_UNCLAIMED);
1486 	}
1487 
1488 	DPRINTF(3, (CE_CONT,
1489 	    CONS "%s: time:%ld %s:called: isr:0x%b rx_active_head: %d",
1490 	    dp->name, ddi_get_lbolt(), __func__,
1491 	    isr, INTR_BITS, dp->rx_active_head));
1492 
1493 	if (!dp->mac_active) {
1494 		/* the device is going to stop */
1495 		lp->our_intr_bits = 0;
1496 		return (DDI_INTR_CLAIMED);
1497 	}
1498 
1499 	isr &= lp->our_intr_bits;
1500 
1501 	if (isr & (ISR_RXSOVR | ISR_RXORN | ISR_RXIDLE | ISR_RXERR |
1502 	    ISR_RXDESC | ISR_RXOK)) {
1503 		(void) gem_receive(dp);
1504 
1505 		if (isr & (ISR_RXSOVR | ISR_RXORN)) {
1506 			DPRINTF(0, (CE_CONT,
1507 			    CONS "%s: rx fifo overrun: isr %b",
1508 			    dp->name, isr, INTR_BITS));
1509 			/* no need restart rx */
1510 			dp->stats.overflow++;
1511 		}
1512 
1513 		if (isr & ISR_RXIDLE) {
1514 			DPRINTF(0, (CE_CONT,
1515 			    CONS "%s: rx buffer ran out: isr %b",
1516 			    dp->name, isr, INTR_BITS));
1517 
1518 			dp->stats.norcvbuf++;
1519 
1520 			/*
1521 			 * Make RXDP points the head of receive
1522 			 * buffer list.
1523 			 */
1524 			OUTL(dp, RXDP, dp->rx_ring_dma +
1525 			    SFE_DESC_SIZE *
1526 			    SLOT(dp->rx_active_head, RX_RING_SIZE));
1527 
1528 			/* Restart the receive engine */
1529 			OUTL(dp, CR, lp->cr | CR_RXE);
1530 		}
1531 	}
1532 
1533 	if (isr & (ISR_TXURN | ISR_TXERR | ISR_TXDESC |
1534 	    ISR_TXIDLE | ISR_TXOK)) {
1535 		/* need to reclaim tx buffers */
1536 		if (gem_tx_done(dp)) {
1537 			flags |= INTR_RESTART_TX;
1538 		}
1539 
1540 		/*
1541 		 * XXX - tx error statistics will be counted in
1542 		 * sfe_tx_desc_stat() and no need to restart tx on errors.
1543 		 */
1544 	}
1545 
1546 	if (isr & (ISR_DPERR | ISR_SSERR | ISR_RMABT | ISR_RTABT)) {
1547 		cmn_err(CE_WARN, "%s: ERROR interrupt: isr %b.",
1548 		    dp->name, isr, INTR_BITS);
1549 		need_to_reset = B_TRUE;
1550 	}
1551 reset:
1552 	if (need_to_reset) {
1553 		(void) gem_restart_nic(dp, GEM_RESTART_KEEP_BUF);
1554 		flags |= INTR_RESTART_TX;
1555 	}
1556 
1557 	DPRINTF(5, (CE_CONT, CONS "%s: %s: return: isr: %b",
1558 	    dp->name, __func__, isr, INTR_BITS));
1559 
1560 	return (DDI_INTR_CLAIMED | flags);
1561 }
1562 
1563 /* ======================================================== */
1564 /*
1565  * HW depend MII routine
1566  */
1567 /* ======================================================== */
1568 
1569 /*
1570  * MII routines for NS DP83815
1571  */
1572 static void
1573 sfe_mii_sync_dp83815(struct gem_dev *dp)
1574 {
1575 	/* do nothing */
1576 }
1577 
1578 static uint16_t
1579 sfe_mii_read_dp83815(struct gem_dev *dp, uint_t offset)
1580 {
1581 	DPRINTF(4, (CE_CONT, CONS"%s: %s: offset 0x%x",
1582 	    dp->name, __func__, offset));
1583 	return ((uint16_t)INL(dp, MII_REGS_BASE + offset*4));
1584 }
1585 
1586 static void
1587 sfe_mii_write_dp83815(struct gem_dev *dp, uint_t offset, uint16_t val)
1588 {
1589 	DPRINTF(4, (CE_CONT, CONS"%s: %s: offset 0x%x 0x%x",
1590 	    dp->name, __func__, offset, val));
1591 	OUTL(dp, MII_REGS_BASE + offset*4, val);
1592 }
1593 
1594 static int
1595 sfe_mii_config_dp83815(struct gem_dev *dp)
1596 {
1597 	uint32_t	srr;
1598 
1599 	srr = INL(dp, SRR) & SRR_REV;
1600 
1601 	DPRINTF(0, (CE_CONT, CONS "%s: srr:0x%04x %04x %04x %04x %04x %04x",
1602 	    dp->name, srr,
1603 	    INW(dp, 0x00cc),	/* PGSEL */
1604 	    INW(dp, 0x00e4),	/* PMDCSR */
1605 	    INW(dp, 0x00fc),	/* TSTDAT */
1606 	    INW(dp, 0x00f4),	/* DSPCFG */
1607 	    INW(dp, 0x00f8)));	/* SDCFG */
1608 
1609 	if (srr == SRR_REV_CVNG) {
1610 		/*
1611 		 * NS datasheet says that DP83815CVNG needs following
1612 		 * registers to be patched for optimizing its performance.
1613 		 * A report said that CRC errors on RX were disappeared
1614 		 * with the patch.
1615 		 */
1616 		OUTW(dp, 0x00cc, 0x0001);	/* PGSEL */
1617 		OUTW(dp, 0x00e4, 0x189c);	/* PMDCSR */
1618 		OUTW(dp, 0x00fc, 0x0000);	/* TSTDAT */
1619 		OUTW(dp, 0x00f4, 0x5040);	/* DSPCFG */
1620 		OUTW(dp, 0x00f8, 0x008c);	/* SDCFG */
1621 
1622 		DPRINTF(0, (CE_CONT,
1623 		    CONS "%s: PHY patched %04x %04x %04x %04x %04x",
1624 		    dp->name,
1625 		    INW(dp, 0x00cc),	/* PGSEL */
1626 		    INW(dp, 0x00e4),	/* PMDCSR */
1627 		    INW(dp, 0x00fc),	/* TSTDAT */
1628 		    INW(dp, 0x00f4),	/* DSPCFG */
1629 		    INW(dp, 0x00f8)));	/* SDCFG */
1630 	}
1631 
1632 	return (gem_mii_config_default(dp));
1633 }
1634 
1635 
1636 /*
1637  * MII routines for SiS900
1638  */
1639 #define	MDIO_DELAY(dp)	{(void) INL(dp, MEAR); }
1640 static void
1641 sfe_mii_sync_sis900(struct gem_dev *dp)
1642 {
1643 	int	i;
1644 
1645 	for (i = 0; i < 32; i++) {
1646 		OUTL(dp, MEAR, MEAR_MDDIR | MEAR_MDIO);
1647 		MDIO_DELAY(dp);
1648 		OUTL(dp, MEAR, MEAR_MDDIR | MEAR_MDIO | MEAR_MDC);
1649 		MDIO_DELAY(dp);
1650 	}
1651 }
1652 
1653 static int
1654 sfe_mii_config_sis900(struct gem_dev *dp)
1655 {
1656 	struct sfe_dev	*lp = dp->private;
1657 
1658 	/* Do chip depend setup */
1659 	if ((dp->mii_phy_id & PHY_MASK) == PHY_ICS1893) {
1660 		/* workaround for ICS1893 PHY */
1661 		gem_mii_write(dp, 0x0018, 0xD200);
1662 	}
1663 
1664 	if (lp->revid == SIS630E_900_REV) {
1665 		/*
1666 		 * SiS 630E has bugs on default values
1667 		 * of PHY registers
1668 		 */
1669 		gem_mii_write(dp, MII_AN_ADVERT, 0x05e1);
1670 		gem_mii_write(dp, MII_CONFIG1, 0x0022);
1671 		gem_mii_write(dp, MII_CONFIG2, 0xff00);
1672 		gem_mii_write(dp, MII_MASK,    0xffc0);
1673 	}
1674 	sfe_set_eq_sis630(dp);
1675 
1676 	return (gem_mii_config_default(dp));
1677 }
1678 
1679 static uint16_t
1680 sfe_mii_read_sis900(struct gem_dev *dp, uint_t reg)
1681 {
1682 	uint32_t	cmd;
1683 	uint16_t	ret;
1684 	int		i;
1685 	uint32_t	data;
1686 
1687 	cmd = MII_READ_CMD(dp->mii_phy_addr, reg);
1688 
1689 	for (i = 31; i >= 18; i--) {
1690 		data = ((cmd >> i) & 1) <<  MEAR_MDIO_SHIFT;
1691 		OUTL(dp, MEAR, data | MEAR_MDDIR);
1692 		MDIO_DELAY(dp);
1693 		OUTL(dp, MEAR, data | MEAR_MDDIR | MEAR_MDC);
1694 		MDIO_DELAY(dp);
1695 	}
1696 
1697 	/* turn around cycle */
1698 	OUTL(dp, MEAR, data | MEAR_MDDIR);
1699 	MDIO_DELAY(dp);
1700 
1701 	/* get response from PHY */
1702 	OUTL(dp, MEAR, MEAR_MDC);
1703 	MDIO_DELAY(dp);
1704 	OUTL(dp, MEAR, 0);
1705 
1706 #if DEBUG_LEBEL > 0
1707 	if (INL(dp, MEAR) & MEAR_MDIO) {
1708 		cmn_err(CE_WARN, "%s: PHY@%d not responded",
1709 		    dp->name, dp->mii_phy_addr);
1710 	}
1711 #endif
1712 	/* terminate response cycle */
1713 	OUTL(dp, MEAR, MEAR_MDC);
1714 
1715 	ret = 0;	/* to avoid lint errors */
1716 	for (i = 16; i > 0; i--) {
1717 		OUTL(dp, MEAR, 0);
1718 		ret = (ret << 1) | ((INL(dp, MEAR) >> MEAR_MDIO_SHIFT) & 1);
1719 		OUTL(dp, MEAR, MEAR_MDC);
1720 		MDIO_DELAY(dp);
1721 	}
1722 
1723 	/* terminate data transmission from PHY */
1724 	OUTL(dp, MEAR, 0);
1725 	MDIO_DELAY(dp);
1726 	OUTL(dp, MEAR, MEAR_MDC);
1727 	MDIO_DELAY(dp);
1728 
1729 	return (ret);
1730 }
1731 
1732 static void
1733 sfe_mii_write_sis900(struct gem_dev *dp, uint_t reg, uint16_t val)
1734 {
1735 	uint32_t	cmd;
1736 	int		i;
1737 	uint32_t	data;
1738 
1739 	cmd = MII_WRITE_CMD(dp->mii_phy_addr, reg, val);
1740 
1741 	for (i = 31; i >= 0; i--) {
1742 		data = ((cmd >> i) & 1) << MEAR_MDIO_SHIFT;
1743 		OUTL(dp, MEAR, data | MEAR_MDDIR);
1744 		MDIO_DELAY(dp);
1745 		OUTL(dp, MEAR, data | MEAR_MDDIR | MEAR_MDC);
1746 		MDIO_DELAY(dp);
1747 	}
1748 
1749 	/* send two 0s to terminate write cycle. */
1750 	for (i = 0; i < 2; i++) {
1751 		OUTL(dp, MEAR, MEAR_MDDIR);
1752 		MDIO_DELAY(dp);
1753 		OUTL(dp, MEAR, MEAR_MDDIR | MEAR_MDC);
1754 		MDIO_DELAY(dp);
1755 	}
1756 	OUTL(dp, MEAR, MEAR_MDDIR);
1757 	MDIO_DELAY(dp);
1758 	OUTL(dp, MEAR, MEAR_MDC);
1759 	MDIO_DELAY(dp);
1760 }
1761 #undef MDIO_DELAY
1762 
1763 static void
1764 sfe_set_eq_sis630(struct gem_dev *dp)
1765 {
1766 	uint16_t	reg14h;
1767 	uint16_t	eq_value;
1768 	uint16_t	max_value;
1769 	uint16_t	min_value;
1770 	int		i;
1771 	uint8_t		rev;
1772 	struct sfe_dev	*lp = dp->private;
1773 
1774 	rev = lp->revid;
1775 
1776 	if (!(rev == SIS630E_900_REV || rev == SIS630EA1_900_REV ||
1777 	    rev == SIS630A_900_REV || rev == SIS630ET_900_REV)) {
1778 		/* it doesn't have a internal PHY */
1779 		return;
1780 	}
1781 
1782 	if (dp->mii_state == MII_STATE_LINKUP) {
1783 		reg14h = gem_mii_read(dp, MII_RESV);
1784 		gem_mii_write(dp, MII_RESV, (0x2200 | reg14h) & 0xBFFF);
1785 
1786 		eq_value = (0x00f8 & gem_mii_read(dp, MII_RESV)) >> 3;
1787 		max_value = min_value = eq_value;
1788 		for (i = 1; i < 10; i++) {
1789 			eq_value = (0x00f8 & gem_mii_read(dp, MII_RESV)) >> 3;
1790 			max_value = max(eq_value, max_value);
1791 			min_value = min(eq_value, min_value);
1792 		}
1793 
1794 		/* for 630E, rule to determine the equalizer value */
1795 		if (rev == SIS630E_900_REV || rev == SIS630EA1_900_REV ||
1796 		    rev == SIS630ET_900_REV) {
1797 			if (max_value < 5) {
1798 				eq_value = max_value;
1799 			} else if (5 <= max_value && max_value < 15) {
1800 				eq_value =
1801 				    max(max_value + 1,
1802 				    min_value + 2);
1803 			} else if (15 <= max_value) {
1804 				eq_value =
1805 				    max(max_value + 5,
1806 				    min_value + 6);
1807 			}
1808 		}
1809 		/* for 630B0&B1, rule to determine the equalizer value */
1810 		else
1811 		if (rev == SIS630A_900_REV &&
1812 		    (lp->bridge_revid == SIS630B0 ||
1813 		    lp->bridge_revid == SIS630B1)) {
1814 
1815 			if (max_value == 0) {
1816 				eq_value = 3;
1817 			} else {
1818 				eq_value = (max_value + min_value + 1)/2;
1819 			}
1820 		}
1821 		/* write equalizer value and setting */
1822 		reg14h = gem_mii_read(dp, MII_RESV) & ~0x02f8;
1823 		reg14h |= 0x6000 | (eq_value << 3);
1824 		gem_mii_write(dp, MII_RESV, reg14h);
1825 	} else {
1826 		reg14h = (gem_mii_read(dp, MII_RESV) & ~0x4000) | 0x2000;
1827 		if (rev == SIS630A_900_REV &&
1828 		    (lp->bridge_revid == SIS630B0 ||
1829 		    lp->bridge_revid == SIS630B1)) {
1830 
1831 			reg14h |= 0x0200;
1832 		}
1833 		gem_mii_write(dp, MII_RESV, reg14h);
1834 	}
1835 }
1836 
1837 /* ======================================================== */
1838 /*
1839  * OS depend (device driver) routine
1840  */
1841 /* ======================================================== */
1842 static void
1843 sfe_chipinfo_init_sis900(struct gem_dev *dp)
1844 {
1845 	int		rev;
1846 	struct sfe_dev	*lp = (struct sfe_dev *)dp->private;
1847 
1848 	rev = lp->revid;
1849 
1850 	if (rev == SIS630E_900_REV /* 0x81 */) {
1851 		/* sis630E */
1852 		lp->get_mac_addr = &sfe_get_mac_addr_sis630e;
1853 	} else if (rev > 0x81 && rev <= 0x90) {
1854 		/* 630S, 630EA1, 630ET, 635A */
1855 		lp->get_mac_addr = &sfe_get_mac_addr_sis635;
1856 	} else if (rev == SIS962_900_REV /* 0x91 */) {
1857 		/* sis962 or later */
1858 		lp->get_mac_addr = &sfe_get_mac_addr_sis962;
1859 	} else {
1860 		/* sis900 */
1861 		lp->get_mac_addr = &sfe_get_mac_addr_sis900;
1862 	}
1863 
1864 	lp->bridge_revid = 0;
1865 
1866 	if (rev == SIS630E_900_REV || rev == SIS630EA1_900_REV ||
1867 	    rev == SIS630A_900_REV || rev ==  SIS630ET_900_REV) {
1868 		/*
1869 		 * read host bridge revision
1870 		 */
1871 		dev_info_t	*bridge;
1872 		ddi_acc_handle_t bridge_handle;
1873 
1874 		if ((bridge = sfe_search_pci_dev(0x1039, 0x630)) == NULL) {
1875 			cmn_err(CE_WARN,
1876 			    "%s: cannot find host bridge (pci1039,630)",
1877 			    dp->name);
1878 			return;
1879 		}
1880 
1881 		if (pci_config_setup(bridge, &bridge_handle) != DDI_SUCCESS) {
1882 			cmn_err(CE_WARN, "%s: pci_config_setup failed",
1883 			    dp->name);
1884 			return;
1885 		}
1886 
1887 		lp->bridge_revid =
1888 		    pci_config_get8(bridge_handle, PCI_CONF_REVID);
1889 		pci_config_teardown(&bridge_handle);
1890 	}
1891 }
1892 
1893 static int
1894 sfe_attach_chip(struct gem_dev *dp)
1895 {
1896 	struct sfe_dev		*lp = (struct sfe_dev *)dp->private;
1897 
1898 	DPRINTF(4, (CE_CONT, CONS "!%s: %s called", dp->name, __func__));
1899 
1900 	/* setup chip-depend get_mac_address function */
1901 	if (lp->chip->chip_type == CHIPTYPE_SIS900) {
1902 		sfe_chipinfo_init_sis900(dp);
1903 	} else {
1904 		lp->get_mac_addr = &sfe_get_mac_addr_dp83815;
1905 	}
1906 
1907 	/* read MAC address */
1908 	if (!(lp->get_mac_addr)(dp)) {
1909 		cmn_err(CE_WARN,
1910 		    "!%s: %s: failed to get factory mac address"
1911 		    " please specify a mac address in sfe.conf",
1912 		    dp->name, __func__);
1913 		return (GEM_FAILURE);
1914 	}
1915 
1916 	if (lp->chip->chip_type == CHIPTYPE_DP83815) {
1917 		dp->mii_phy_addr = -1;	/* no need to scan PHY */
1918 		dp->misc_flag |= GEM_VLAN_SOFT;
1919 		dp->txthr += 4; /* VTAG_SIZE */
1920 	}
1921 	dp->txthr = min(dp->txthr, TXFIFOSIZE - 2);
1922 
1923 	return (GEM_SUCCESS);
1924 }
1925 
1926 static int
1927 sfeattach(dev_info_t *dip, ddi_attach_cmd_t cmd)
1928 {
1929 	int			unit;
1930 	const char		*drv_name;
1931 	int			i;
1932 	ddi_acc_handle_t	conf_handle;
1933 	uint16_t		vid;
1934 	uint16_t		did;
1935 	uint8_t			rev;
1936 #ifdef DEBUG_LEVEL
1937 	uint32_t		iline;
1938 	uint8_t			latim;
1939 #endif
1940 	struct chip_info	*p;
1941 	struct gem_dev		*dp;
1942 	struct sfe_dev		*lp;
1943 	caddr_t			base;
1944 	ddi_acc_handle_t	regs_ha;
1945 	struct gem_conf		*gcp;
1946 
1947 	unit = ddi_get_instance(dip);
1948 	drv_name = ddi_driver_name(dip);
1949 
1950 	DPRINTF(3, (CE_CONT, CONS "%s%d: sfeattach: called", drv_name, unit));
1951 
1952 	/*
1953 	 * Common codes after power-up
1954 	 */
1955 	if (pci_config_setup(dip, &conf_handle) != DDI_SUCCESS) {
1956 		cmn_err(CE_WARN, "%s%d: ddi_regs_map_setup failed",
1957 		    drv_name, unit);
1958 		goto err;
1959 	}
1960 
1961 	vid  = pci_config_get16(conf_handle, PCI_CONF_VENID);
1962 	did  = pci_config_get16(conf_handle, PCI_CONF_DEVID);
1963 	rev  = pci_config_get16(conf_handle, PCI_CONF_REVID);
1964 #ifdef DEBUG_LEVEL
1965 	iline =	pci_config_get32(conf_handle, PCI_CONF_ILINE),
1966 	    latim = pci_config_get8(conf_handle, PCI_CONF_LATENCY_TIMER);
1967 #endif
1968 #ifdef DEBUG_BUILT_IN_SIS900
1969 	rev  = SIS630E_900_REV;
1970 #endif
1971 	for (i = 0, p = sfe_chiptbl; i < CHIPTABLESIZE; i++, p++) {
1972 		if (p->venid == vid && p->devid == did) {
1973 			/* found */
1974 			goto chip_found;
1975 		}
1976 	}
1977 
1978 	/* Not found */
1979 	cmn_err(CE_WARN,
1980 	    "%s%d: sfe_attach: wrong PCI venid/devid (0x%x, 0x%x)",
1981 	    drv_name, unit, vid, did);
1982 	pci_config_teardown(&conf_handle);
1983 	goto err;
1984 
1985 chip_found:
1986 	pci_config_put16(conf_handle, PCI_CONF_COMM,
1987 	    PCI_COMM_IO | PCI_COMM_MAE | PCI_COMM_ME |
1988 	    pci_config_get16(conf_handle, PCI_CONF_COMM));
1989 
1990 	/* ensure D0 mode */
1991 	(void) gem_pci_set_power_state(dip, conf_handle, PCI_PMCSR_D0);
1992 
1993 	pci_config_teardown(&conf_handle);
1994 
1995 	switch (cmd) {
1996 	case DDI_RESUME:
1997 		return (gem_resume(dip));
1998 
1999 	case DDI_ATTACH:
2000 
2001 		DPRINTF(0, (CE_CONT,
2002 		    CONS "%s%d: ilr 0x%08x, latency_timer:0x%02x",
2003 		    drv_name, unit, iline, latim));
2004 
2005 		/*
2006 		 * Map in the device registers.
2007 		 */
2008 		if (gem_pci_regs_map_setup(dip,
2009 		    (sfe_use_pcimemspace && p->chip_type == CHIPTYPE_DP83815)
2010 		    ? PCI_ADDR_MEM32 : PCI_ADDR_IO, PCI_ADDR_MASK,
2011 		    &sfe_dev_attr, &base, &regs_ha) != DDI_SUCCESS) {
2012 			cmn_err(CE_WARN,
2013 			    "%s%d: ddi_regs_map_setup failed",
2014 			    drv_name, unit);
2015 			goto err;
2016 		}
2017 
2018 		/*
2019 		 * construct gem configuration
2020 		 */
2021 		gcp = kmem_zalloc(sizeof (*gcp), KM_SLEEP);
2022 
2023 		/* name */
2024 		(void) sprintf(gcp->gc_name, "%s%d", drv_name, unit);
2025 
2026 		/* consistency on tx and rx */
2027 		gcp->gc_tx_buf_align = sizeof (uint8_t) - 1;
2028 		gcp->gc_tx_max_frags = MAXTXFRAGS;
2029 		gcp->gc_tx_max_descs_per_pkt = gcp->gc_tx_max_frags;
2030 		gcp->gc_tx_desc_unit_shift = 4;	/* 16 byte */
2031 		gcp->gc_tx_buf_size  = TX_BUF_SIZE;
2032 		gcp->gc_tx_buf_limit = gcp->gc_tx_buf_size;
2033 		gcp->gc_tx_ring_size = TX_RING_SIZE;
2034 		gcp->gc_tx_ring_limit = gcp->gc_tx_ring_size;
2035 		gcp->gc_tx_auto_pad  = B_TRUE;
2036 		gcp->gc_tx_copy_thresh = sfe_tx_copy_thresh;
2037 		gcp->gc_tx_desc_write_oo = B_TRUE;
2038 
2039 		gcp->gc_rx_buf_align = sizeof (uint8_t) - 1;
2040 		gcp->gc_rx_max_frags = MAXRXFRAGS;
2041 		gcp->gc_rx_desc_unit_shift = 4;
2042 		gcp->gc_rx_ring_size = RX_RING_SIZE;
2043 		gcp->gc_rx_buf_max   = RX_BUF_SIZE;
2044 		gcp->gc_rx_copy_thresh = sfe_rx_copy_thresh;
2045 
2046 		/* map attributes */
2047 		gcp->gc_dev_attr = sfe_dev_attr;
2048 		gcp->gc_buf_attr = sfe_buf_attr;
2049 		gcp->gc_desc_attr = sfe_buf_attr;
2050 
2051 		/* dma attributes */
2052 		gcp->gc_dma_attr_desc = sfe_dma_attr_desc;
2053 
2054 		gcp->gc_dma_attr_txbuf = sfe_dma_attr_buf;
2055 		gcp->gc_dma_attr_txbuf.dma_attr_align = gcp->gc_tx_buf_align+1;
2056 		gcp->gc_dma_attr_txbuf.dma_attr_sgllen = gcp->gc_tx_max_frags;
2057 
2058 		gcp->gc_dma_attr_rxbuf = sfe_dma_attr_buf;
2059 		gcp->gc_dma_attr_rxbuf.dma_attr_align = gcp->gc_rx_buf_align+1;
2060 		gcp->gc_dma_attr_rxbuf.dma_attr_sgllen = gcp->gc_rx_max_frags;
2061 
2062 		/* time out parameters */
2063 		gcp->gc_tx_timeout = 3*ONESEC;
2064 		gcp->gc_tx_timeout_interval = ONESEC;
2065 
2066 		/* MII timeout parameters */
2067 		gcp->gc_mii_link_watch_interval = ONESEC;
2068 		gcp->gc_mii_an_watch_interval   = ONESEC/5;
2069 		gcp->gc_mii_reset_timeout = MII_RESET_TIMEOUT;	/* 1 sec */
2070 		gcp->gc_mii_an_timeout = MII_AN_TIMEOUT;	/* 5 sec */
2071 		gcp->gc_mii_an_wait = 0;
2072 		gcp->gc_mii_linkdown_timeout = MII_LINKDOWN_TIMEOUT;
2073 
2074 		/* setting for general PHY */
2075 		gcp->gc_mii_an_delay = 0;
2076 		gcp->gc_mii_linkdown_action = MII_ACTION_RSA;
2077 		gcp->gc_mii_linkdown_timeout_action = MII_ACTION_RESET;
2078 		gcp->gc_mii_dont_reset = B_FALSE;
2079 
2080 
2081 		/* I/O methods */
2082 
2083 		/* mac operation */
2084 		gcp->gc_attach_chip = &sfe_attach_chip;
2085 		if (p->chip_type == CHIPTYPE_DP83815) {
2086 			gcp->gc_reset_chip = &sfe_reset_chip_dp83815;
2087 		} else {
2088 			gcp->gc_reset_chip = &sfe_reset_chip_sis900;
2089 		}
2090 		gcp->gc_init_chip  = &sfe_init_chip;
2091 		gcp->gc_start_chip = &sfe_start_chip;
2092 		gcp->gc_stop_chip  = &sfe_stop_chip;
2093 #ifdef USE_MULTICAST_HASHTBL
2094 		gcp->gc_multicast_hash = &sfe_mcast_hash;
2095 #endif
2096 		if (p->chip_type == CHIPTYPE_DP83815) {
2097 			gcp->gc_set_rx_filter = &sfe_set_rx_filter_dp83815;
2098 		} else {
2099 			gcp->gc_set_rx_filter = &sfe_set_rx_filter_sis900;
2100 		}
2101 		gcp->gc_set_media = &sfe_set_media;
2102 		gcp->gc_get_stats = &sfe_get_stats;
2103 		gcp->gc_interrupt = &sfe_interrupt;
2104 
2105 		/* descriptor operation */
2106 		gcp->gc_tx_desc_write = &sfe_tx_desc_write;
2107 		gcp->gc_tx_start = &sfe_tx_start;
2108 		gcp->gc_rx_desc_write = &sfe_rx_desc_write;
2109 		gcp->gc_rx_start = NULL;
2110 
2111 		gcp->gc_tx_desc_stat = &sfe_tx_desc_stat;
2112 		gcp->gc_rx_desc_stat = &sfe_rx_desc_stat;
2113 		gcp->gc_tx_desc_init = &sfe_tx_desc_init;
2114 		gcp->gc_rx_desc_init = &sfe_rx_desc_init;
2115 		gcp->gc_tx_desc_clean = &sfe_tx_desc_clean;
2116 		gcp->gc_rx_desc_clean = &sfe_rx_desc_clean;
2117 
2118 		/* mii operations */
2119 		if (p->chip_type == CHIPTYPE_DP83815) {
2120 			gcp->gc_mii_probe = &gem_mii_probe_default;
2121 			gcp->gc_mii_init = NULL;
2122 			gcp->gc_mii_config = &sfe_mii_config_dp83815;
2123 			gcp->gc_mii_sync = &sfe_mii_sync_dp83815;
2124 			gcp->gc_mii_read = &sfe_mii_read_dp83815;
2125 			gcp->gc_mii_write = &sfe_mii_write_dp83815;
2126 			gcp->gc_mii_tune_phy = NULL;
2127 			gcp->gc_flow_control = FLOW_CONTROL_NONE;
2128 		} else {
2129 			gcp->gc_mii_probe = &gem_mii_probe_default;
2130 			gcp->gc_mii_init = NULL;
2131 			gcp->gc_mii_config = &sfe_mii_config_sis900;
2132 			gcp->gc_mii_sync = &sfe_mii_sync_sis900;
2133 			gcp->gc_mii_read = &sfe_mii_read_sis900;
2134 			gcp->gc_mii_write = &sfe_mii_write_sis900;
2135 			gcp->gc_mii_tune_phy = &sfe_set_eq_sis630;
2136 			gcp->gc_flow_control = FLOW_CONTROL_RX_PAUSE;
2137 		}
2138 
2139 		lp = kmem_zalloc(sizeof (*lp), KM_SLEEP);
2140 		lp->chip = p;
2141 		lp->revid = rev;
2142 
2143 		cmn_err(CE_CONT, CONS "%s%d: chip:%s rev:0x%02x",
2144 		    drv_name, unit, p->chip_name, rev);
2145 
2146 		dp = gem_do_attach(dip, 0, gcp, base, &regs_ha,
2147 		    lp, sizeof (*lp));
2148 		kmem_free(gcp, sizeof (*gcp));
2149 
2150 		if (dp == NULL) {
2151 			goto err_freelp;
2152 		}
2153 
2154 		return (DDI_SUCCESS);
2155 
2156 err_freelp:
2157 		kmem_free(lp, sizeof (struct sfe_dev));
2158 err:
2159 		return (DDI_FAILURE);
2160 	}
2161 	return (DDI_FAILURE);
2162 }
2163 
2164 static int
2165 sfedetach(dev_info_t *dip, ddi_detach_cmd_t cmd)
2166 {
2167 	switch (cmd) {
2168 	case DDI_SUSPEND:
2169 		return (gem_suspend(dip));
2170 
2171 	case DDI_DETACH:
2172 		return (gem_do_detach(dip));
2173 	}
2174 	return (DDI_FAILURE);
2175 }
2176 
2177 /* ======================================================== */
2178 /*
2179  * OS depend (loadable streams driver) routine
2180  */
2181 /* ======================================================== */
2182 DDI_DEFINE_STREAM_OPS(sfe_ops, nulldev, nulldev, sfeattach, sfedetach,
2183     nodev, NULL, D_MP, NULL);
2184 
2185 static struct modldrv modldrv = {
2186 	&mod_driverops,	/* Type of module.  This one is a driver */
2187 	ident,
2188 	&sfe_ops,	/* driver ops */
2189 };
2190 
2191 static struct modlinkage modlinkage = {
2192 	MODREV_1, &modldrv, NULL
2193 };
2194 
2195 /* ======================================================== */
2196 /*
2197  * Loadable module support
2198  */
2199 /* ======================================================== */
2200 int
2201 _init(void)
2202 {
2203 	int 	status;
2204 
2205 	DPRINTF(2, (CE_CONT, CONS "sfe: _init: called"));
2206 	gem_mod_init(&sfe_ops, "sfe");
2207 	status = mod_install(&modlinkage);
2208 	if (status != DDI_SUCCESS) {
2209 		gem_mod_fini(&sfe_ops);
2210 	}
2211 	return (status);
2212 }
2213 
2214 /*
2215  * _fini : done
2216  */
2217 int
2218 _fini(void)
2219 {
2220 	int	status;
2221 
2222 	DPRINTF(2, (CE_CONT, CONS "sfe: _fini: called"));
2223 	status = mod_remove(&modlinkage);
2224 	if (status == DDI_SUCCESS) {
2225 		gem_mod_fini(&sfe_ops);
2226 	}
2227 	return (status);
2228 }
2229 
2230 int
2231 _info(struct modinfo *modinfop)
2232 {
2233 	return (mod_info(&modlinkage, modinfop));
2234 }
2235