xref: /titanic_44/usr/src/uts/common/io/sfe/sfe.c (revision d67944fbe3fa0b31893a7116a09b0718eecf6078)
1 /*
2  *  sfe.c : DP83815/DP83816/SiS900 Fast Ethernet MAC driver for Solaris
3  *
4  * Copyright (c) 2002-2008 Masayuki Murayama.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * 2. Redistributions in binary form must reproduce the above copyright notice,
13  *    this list of conditions and the following disclaimer in the documentation
14  *    and/or other materials provided with the distribution.
15  *
16  * 3. Neither the name of the author nor the names of its contributors may be
17  *    used to endorse or promote products derived from this software without
18  *    specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
27  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
31  * DAMAGE.
32  */
33 
34 /* Avoid undefined symbol for non IA architectures */
35 #pragma weak	inb
36 #pragma weak	outb
37 
38 /*
39  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
40  * Use is subject to license terms.
41  */
42 
43 /*
44  * System Header files.
45  */
46 #include <sys/types.h>
47 #include <sys/conf.h>
48 #include <sys/debug.h>
49 #include <sys/kmem.h>
50 #include <sys/modctl.h>
51 #include <sys/errno.h>
52 #include <sys/ddi.h>
53 #include <sys/sunddi.h>
54 #include <sys/byteorder.h>
55 #include <sys/ethernet.h>
56 #include <sys/pci.h>
57 
58 #include "sfe_mii.h"
59 #include "sfe_util.h"
60 #include "sfereg.h"
61 
62 char	ident[] = "sis900/dp83815 driver v" "2.6.1t30os";
63 
64 /* Debugging support */
65 #ifdef DEBUG_LEVEL
66 static int sfe_debug = DEBUG_LEVEL;
67 #if DEBUG_LEVEL > 4
68 #define	CONS	"^"
69 #else
70 #define	CONS	"!"
71 #endif
72 #define	DPRINTF(n, args)	if (sfe_debug > (n)) cmn_err args
73 #else
74 #define	CONS	"!"
75 #define	DPRINTF(n, args)
76 #endif
77 
78 /*
79  * Useful macros and typedefs
80  */
81 #define	ONESEC		(drv_usectohz(1*1000000))
82 #define	ROUNDUP2(x, a)	(((x) + (a) - 1) & ~((a) - 1))
83 
84 /*
85  * Our configuration
86  */
87 #define	MAXTXFRAGS	1
88 #define	MAXRXFRAGS	1
89 
90 #ifndef	TX_BUF_SIZE
91 #define	TX_BUF_SIZE	64
92 #endif
93 #ifndef	TX_RING_SIZE
94 #if MAXTXFRAGS == 1
95 #define	TX_RING_SIZE	TX_BUF_SIZE
96 #else
97 #define	TX_RING_SIZE	(TX_BUF_SIZE * 4)
98 #endif
99 #endif
100 
101 #ifndef	RX_BUF_SIZE
102 #define	RX_BUF_SIZE	256
103 #endif
104 #ifndef	RX_RING_SIZE
105 #define	RX_RING_SIZE	RX_BUF_SIZE
106 #endif
107 
108 #define	OUR_INTR_BITS	\
109 	(ISR_DPERR | ISR_SSERR | ISR_RMABT | ISR_RTABT | ISR_RXSOVR |	\
110 	ISR_TXURN | ISR_TXDESC | ISR_TXERR |	\
111 	ISR_RXORN | ISR_RXIDLE | ISR_RXOK | ISR_RXERR)
112 
113 #define	USE_MULTICAST_HASHTBL
114 
115 static int	sfe_tx_copy_thresh = 256;
116 static int	sfe_rx_copy_thresh = 256;
117 
118 /* special PHY registers for SIS900 */
119 #define	MII_CONFIG1	0x0010
120 #define	MII_CONFIG2	0x0011
121 #define	MII_MASK	0x0013
122 #define	MII_RESV	0x0014
123 
124 #define	PHY_MASK		0xfffffff0
125 #define	PHY_SIS900_INTERNAL	0x001d8000
126 #define	PHY_ICS1893		0x0015f440
127 
128 
129 #define	SFE_DESC_SIZE	16	/* including pads rounding up to power of 2 */
130 
131 /*
132  * Supported chips
133  */
134 struct chip_info {
135 	uint16_t	venid;
136 	uint16_t	devid;
137 	char		*chip_name;
138 	int		chip_type;
139 #define	CHIPTYPE_DP83815	0
140 #define	CHIPTYPE_SIS900		1
141 };
142 
143 /*
144  * Chip dependent MAC state
145  */
146 struct sfe_dev {
147 	/* misc HW information */
148 	struct chip_info	*chip;
149 	uint32_t		our_intr_bits;
150 	uint32_t		isr_pended;
151 	uint32_t		cr;
152 	uint_t			tx_drain_threshold;
153 	uint_t			tx_fill_threshold;
154 	uint_t			rx_drain_threshold;
155 	uint_t			rx_fill_threshold;
156 	uint8_t			revid;	/* revision from PCI configuration */
157 	boolean_t		(*get_mac_addr)(struct gem_dev *);
158 	uint8_t			mac_addr[ETHERADDRL];
159 	uint8_t			bridge_revid;
160 };
161 
162 /*
163  * Hardware information
164  */
165 struct chip_info sfe_chiptbl[] = {
166 	{ 0x1039, 0x0900, "SiS900", CHIPTYPE_SIS900, },
167 	{ 0x100b, 0x0020, "DP83815/83816", CHIPTYPE_DP83815, },
168 	{ 0x1039, 0x7016, "SiS7016", CHIPTYPE_SIS900, },
169 };
170 #define	CHIPTABLESIZE (sizeof (sfe_chiptbl)/sizeof (struct chip_info))
171 
172 /* ======================================================== */
173 
174 /* mii operations */
175 static void  sfe_mii_sync_dp83815(struct gem_dev *);
176 static void  sfe_mii_sync_sis900(struct gem_dev *);
177 static uint16_t  sfe_mii_read_dp83815(struct gem_dev *, uint_t);
178 static uint16_t  sfe_mii_read_sis900(struct gem_dev *, uint_t);
179 static void sfe_mii_write_dp83815(struct gem_dev *, uint_t, uint16_t);
180 static void sfe_mii_write_sis900(struct gem_dev *, uint_t, uint16_t);
181 static void sfe_set_eq_sis630(struct gem_dev *dp);
182 /* nic operations */
183 static int sfe_reset_chip_sis900(struct gem_dev *);
184 static int sfe_reset_chip_dp83815(struct gem_dev *);
185 static int sfe_init_chip(struct gem_dev *);
186 static int sfe_start_chip(struct gem_dev *);
187 static int sfe_stop_chip(struct gem_dev *);
188 static int sfe_set_media(struct gem_dev *);
189 static int sfe_set_rx_filter_dp83815(struct gem_dev *);
190 static int sfe_set_rx_filter_sis900(struct gem_dev *);
191 static int sfe_get_stats(struct gem_dev *);
192 static int sfe_attach_chip(struct gem_dev *);
193 
194 /* descriptor operations */
195 static int sfe_tx_desc_write(struct gem_dev *dp, int slot,
196 		    ddi_dma_cookie_t *dmacookie, int frags, uint64_t flags);
197 static void sfe_tx_start(struct gem_dev *dp, int startslot, int nslot);
198 static void sfe_rx_desc_write(struct gem_dev *dp, int slot,
199 		    ddi_dma_cookie_t *dmacookie, int frags);
200 static uint_t sfe_tx_desc_stat(struct gem_dev *dp, int slot, int ndesc);
201 static uint64_t sfe_rx_desc_stat(struct gem_dev *dp, int slot, int ndesc);
202 
203 static void sfe_tx_desc_init(struct gem_dev *dp, int slot);
204 static void sfe_rx_desc_init(struct gem_dev *dp, int slot);
205 static void sfe_tx_desc_clean(struct gem_dev *dp, int slot);
206 static void sfe_rx_desc_clean(struct gem_dev *dp, int slot);
207 
208 /* interrupt handler */
209 static uint_t sfe_interrupt(struct gem_dev *dp);
210 
211 /* ======================================================== */
212 
213 /* mapping attributes */
214 /* Data access requirements. */
215 static struct ddi_device_acc_attr sfe_dev_attr = {
216 	DDI_DEVICE_ATTR_V0,
217 	DDI_STRUCTURE_LE_ACC,
218 	DDI_STRICTORDER_ACC
219 };
220 
221 /* On sparc, Buffers should be native endian for speed */
222 static struct ddi_device_acc_attr sfe_buf_attr = {
223 	DDI_DEVICE_ATTR_V0,
224 	DDI_NEVERSWAP_ACC,	/* native endianness */
225 	DDI_STRICTORDER_ACC
226 };
227 
228 static ddi_dma_attr_t sfe_dma_attr_buf = {
229 	DMA_ATTR_V0,		/* dma_attr_version */
230 	0,			/* dma_attr_addr_lo */
231 	0xffffffffull,		/* dma_attr_addr_hi */
232 	0x00000fffull,		/* dma_attr_count_max */
233 	0, /* patched later */	/* dma_attr_align */
234 	0x000003fc,		/* dma_attr_burstsizes */
235 	1,			/* dma_attr_minxfer */
236 	0x00000fffull,		/* dma_attr_maxxfer */
237 	0xffffffffull,		/* dma_attr_seg */
238 	0, /* patched later */	/* dma_attr_sgllen */
239 	1,			/* dma_attr_granular */
240 	0			/* dma_attr_flags */
241 };
242 
243 static ddi_dma_attr_t sfe_dma_attr_desc = {
244 	DMA_ATTR_V0,		/* dma_attr_version */
245 	16,			/* dma_attr_addr_lo */
246 	0xffffffffull,		/* dma_attr_addr_hi */
247 	0xffffffffull,		/* dma_attr_count_max */
248 	16,			/* dma_attr_align */
249 	0x000003fc,		/* dma_attr_burstsizes */
250 	1,			/* dma_attr_minxfer */
251 	0xffffffffull,		/* dma_attr_maxxfer */
252 	0xffffffffull,		/* dma_attr_seg */
253 	1,			/* dma_attr_sgllen */
254 	1,			/* dma_attr_granular */
255 	0			/* dma_attr_flags */
256 };
257 
258 uint32_t sfe_use_pcimemspace = 0;
259 
260 /* ======================================================== */
261 /*
262  * HW manipulation routines
263  */
264 /* ======================================================== */
265 
266 #define	SFE_EEPROM_DELAY(dp)	\
267 	{ (void) INL(dp, EROMAR); (void) INL(dp, EROMAR); }
268 #define	EE_CMD_READ	6
269 #define	EE_CMD_SHIFT	6
270 
271 static uint16_t
sfe_read_eeprom(struct gem_dev * dp,uint_t offset)272 sfe_read_eeprom(struct gem_dev *dp, uint_t offset)
273 {
274 	int		eedi;
275 	int		i;
276 	uint16_t	ret;
277 
278 	/* ensure de-assert chip select */
279 	OUTL(dp, EROMAR, 0);
280 	SFE_EEPROM_DELAY(dp);
281 	OUTL(dp, EROMAR, EROMAR_EESK);
282 	SFE_EEPROM_DELAY(dp);
283 
284 	/* assert chip select */
285 	offset |= EE_CMD_READ << EE_CMD_SHIFT;
286 
287 	for (i = 8; i >= 0; i--) {
288 		/* make command */
289 		eedi = ((offset >> i) & 1) << EROMAR_EEDI_SHIFT;
290 
291 		/* send 1 bit */
292 		OUTL(dp, EROMAR, EROMAR_EECS | eedi);
293 		SFE_EEPROM_DELAY(dp);
294 		OUTL(dp, EROMAR, EROMAR_EECS | eedi | EROMAR_EESK);
295 		SFE_EEPROM_DELAY(dp);
296 	}
297 
298 	OUTL(dp, EROMAR, EROMAR_EECS);
299 
300 	ret = 0;
301 	for (i = 0; i < 16; i++) {
302 		/* Get 1 bit */
303 		OUTL(dp, EROMAR, EROMAR_EECS);
304 		SFE_EEPROM_DELAY(dp);
305 		OUTL(dp, EROMAR, EROMAR_EECS | EROMAR_EESK);
306 		SFE_EEPROM_DELAY(dp);
307 
308 		ret = (ret << 1) | ((INL(dp, EROMAR) >> EROMAR_EEDO_SHIFT) & 1);
309 	}
310 
311 	OUTL(dp, EROMAR, 0);
312 	SFE_EEPROM_DELAY(dp);
313 
314 	return (ret);
315 }
316 #undef SFE_EEPROM_DELAY
317 
318 static boolean_t
sfe_get_mac_addr_dp83815(struct gem_dev * dp)319 sfe_get_mac_addr_dp83815(struct gem_dev *dp)
320 {
321 	uint8_t		*mac;
322 	uint_t		val;
323 	int		i;
324 
325 #define	BITSET(p, ix, v)	(p)[(ix)/8] |= ((v) ? 1 : 0) << ((ix) & 0x7)
326 
327 	DPRINTF(4, (CE_CONT, CONS "%s: %s: called", dp->name, __func__));
328 
329 	mac = dp->dev_addr.ether_addr_octet;
330 
331 	/* first of all, clear MAC address buffer */
332 	bzero(mac, ETHERADDRL);
333 
334 	/* get bit 0 */
335 	val = sfe_read_eeprom(dp, 0x6);
336 	BITSET(mac, 0, val & 1);
337 
338 	/* get bit 1 - 16 */
339 	val = sfe_read_eeprom(dp, 0x7);
340 	for (i = 0; i < 16; i++) {
341 		BITSET(mac, 1 + i, val & (1 << (15 - i)));
342 	}
343 
344 	/* get bit 17 -  32 */
345 	val = sfe_read_eeprom(dp, 0x8);
346 	for (i = 0; i < 16; i++) {
347 		BITSET(mac, 17 + i, val & (1 << (15 - i)));
348 	}
349 
350 	/* get bit 33 -  47 */
351 	val = sfe_read_eeprom(dp, 0x9);
352 	for (i = 0; i < 15; i++) {
353 		BITSET(mac, 33 + i, val & (1 << (15 - i)));
354 	}
355 
356 	return (B_TRUE);
357 #undef BITSET
358 }
359 
360 static boolean_t
sfe_get_mac_addr_sis900(struct gem_dev * dp)361 sfe_get_mac_addr_sis900(struct gem_dev *dp)
362 {
363 	uint_t		val;
364 	int		i;
365 	uint8_t		*mac;
366 
367 	mac = dp->dev_addr.ether_addr_octet;
368 
369 	for (i = 0; i < ETHERADDRL/2; i++) {
370 		val = sfe_read_eeprom(dp, 0x8 + i);
371 		*mac++ = (uint8_t)val;
372 		*mac++ = (uint8_t)(val >> 8);
373 	}
374 
375 	return (B_TRUE);
376 }
377 
378 static dev_info_t *
sfe_search_pci_dev_subr(dev_info_t * cur_node,int vendor_id,int device_id)379 sfe_search_pci_dev_subr(dev_info_t *cur_node, int vendor_id, int device_id)
380 {
381 	dev_info_t	*child_id;
382 	dev_info_t	*ret;
383 	int		vid, did;
384 
385 	if (cur_node == NULL) {
386 		return (NULL);
387 	}
388 
389 	/* check brothers */
390 	do {
391 		vid = ddi_prop_get_int(DDI_DEV_T_ANY, cur_node,
392 		    DDI_PROP_DONTPASS, "vendor-id", -1);
393 		did = ddi_prop_get_int(DDI_DEV_T_ANY, cur_node,
394 		    DDI_PROP_DONTPASS, "device-id", -1);
395 
396 		if (vid == vendor_id && did == device_id) {
397 			/* found */
398 			return (cur_node);
399 		}
400 
401 		/* check children */
402 		if ((child_id = ddi_get_child(cur_node)) != NULL) {
403 			if ((ret = sfe_search_pci_dev_subr(child_id,
404 			    vendor_id, device_id)) != NULL) {
405 				return (ret);
406 			}
407 		}
408 
409 	} while ((cur_node = ddi_get_next_sibling(cur_node)) != NULL);
410 
411 	/* not found */
412 	return (NULL);
413 }
414 
415 static dev_info_t *
sfe_search_pci_dev(int vendor_id,int device_id)416 sfe_search_pci_dev(int vendor_id, int device_id)
417 {
418 	return (sfe_search_pci_dev_subr(ddi_root_node(), vendor_id, device_id));
419 }
420 
421 static boolean_t
sfe_get_mac_addr_sis630e(struct gem_dev * dp)422 sfe_get_mac_addr_sis630e(struct gem_dev *dp)
423 {
424 	int		i;
425 	dev_info_t	*isa_bridge;
426 	ddi_acc_handle_t isa_handle;
427 	int		reg;
428 
429 	if (inb == NULL || outb == NULL) {
430 		/* this is not IA architecture */
431 		return (B_FALSE);
432 	}
433 
434 	if ((isa_bridge = sfe_search_pci_dev(0x1039, 0x8)) == NULL) {
435 		cmn_err(CE_WARN, "%s: failed to find isa-bridge pci1039,8",
436 		    dp->name);
437 		return (B_FALSE);
438 	}
439 
440 	if (pci_config_setup(isa_bridge, &isa_handle) != DDI_SUCCESS) {
441 		cmn_err(CE_WARN, "%s: ddi_regs_map_setup failed",
442 		    dp->name);
443 		return (B_FALSE);
444 	}
445 
446 	/* enable to access CMOS RAM */
447 	reg = pci_config_get8(isa_handle, 0x48);
448 	pci_config_put8(isa_handle, 0x48, reg | 0x40);
449 
450 	for (i = 0; i < ETHERADDRL; i++) {
451 		outb(0x70, 0x09 + i);
452 		dp->dev_addr.ether_addr_octet[i] = inb(0x71);
453 	}
454 
455 	/* disable to access CMOS RAM */
456 	pci_config_put8(isa_handle, 0x48, reg);
457 	pci_config_teardown(&isa_handle);
458 
459 	return (B_TRUE);
460 }
461 
462 static boolean_t
sfe_get_mac_addr_sis635(struct gem_dev * dp)463 sfe_get_mac_addr_sis635(struct gem_dev *dp)
464 {
465 	int		i;
466 	uint32_t	rfcr;
467 	uint16_t	v;
468 	struct sfe_dev	*lp = dp->private;
469 
470 	DPRINTF(2, (CE_CONT, CONS "%s: %s: called", dp->name, __func__));
471 	rfcr = INL(dp, RFCR);
472 
473 	OUTL(dp, CR, lp->cr | CR_RELOAD);
474 	OUTL(dp, CR, lp->cr);
475 
476 	/* disable packet filtering before reading filter */
477 	OUTL(dp, RFCR, rfcr & ~RFCR_RFEN);
478 
479 	/* load MAC addr from filter data register */
480 	for (i = 0; i < ETHERADDRL; i += 2) {
481 		OUTL(dp, RFCR,
482 		    (RFADDR_MAC_SIS900 + (i/2)) << RFCR_RFADDR_SHIFT_SIS900);
483 		v = INL(dp, RFDR);
484 		dp->dev_addr.ether_addr_octet[i] = (uint8_t)v;
485 		dp->dev_addr.ether_addr_octet[i+1] = (uint8_t)(v >> 8);
486 	}
487 
488 	/* re-enable packet filtering */
489 	OUTL(dp, RFCR, rfcr | RFCR_RFEN);
490 
491 	return (B_TRUE);
492 }
493 
494 static boolean_t
sfe_get_mac_addr_sis962(struct gem_dev * dp)495 sfe_get_mac_addr_sis962(struct gem_dev *dp)
496 {
497 	boolean_t	ret;
498 	int		i;
499 
500 	ret = B_FALSE;
501 
502 	/* rise request signal to access EEPROM */
503 	OUTL(dp, MEAR, EROMAR_EEREQ);
504 	for (i = 0; (INL(dp, MEAR) & EROMAR_EEGNT) == 0; i++) {
505 		if (i > 200) {
506 			/* failed to acquire eeprom */
507 			cmn_err(CE_NOTE,
508 			    CONS "%s: failed to access eeprom", dp->name);
509 			goto x;
510 		}
511 		drv_usecwait(10);
512 	}
513 	ret = sfe_get_mac_addr_sis900(dp);
514 x:
515 	/* release EEPROM */
516 	OUTL(dp, MEAR, EROMAR_EEDONE);
517 
518 	return (ret);
519 }
520 
521 static int
sfe_reset_chip_sis900(struct gem_dev * dp)522 sfe_reset_chip_sis900(struct gem_dev *dp)
523 {
524 	int		i;
525 	uint32_t	done;
526 	uint32_t	val;
527 	struct sfe_dev	*lp = dp->private;
528 
529 	DPRINTF(4, (CE_CONT, CONS "%s: %s called", dp->name, __func__));
530 
531 	/* invalidate mac addr cache */
532 	bzero(lp->mac_addr, sizeof (lp->mac_addr));
533 
534 	lp->cr = 0;
535 
536 	/* inhibit interrupt */
537 	OUTL(dp, IMR, 0);
538 	lp->isr_pended |= INL(dp, ISR) & lp->our_intr_bits;
539 
540 	OUTLINL(dp, RFCR, 0);
541 
542 	OUTL(dp, CR, CR_RST | CR_TXR | CR_RXR);
543 	drv_usecwait(10);
544 
545 	done = 0;
546 	for (i = 0; done != (ISR_TXRCMP | ISR_RXRCMP); i++) {
547 		if (i > 1000) {
548 			cmn_err(CE_WARN, "%s: chip reset timeout", dp->name);
549 			return (GEM_FAILURE);
550 		}
551 		done |= INL(dp, ISR) & (ISR_TXRCMP | ISR_RXRCMP);
552 		drv_usecwait(10);
553 	}
554 
555 	if (lp->revid == SIS630ET_900_REV) {
556 		lp->cr |= CR_ACCESSMODE;
557 		OUTL(dp, CR, lp->cr | INL(dp, CR));
558 	}
559 
560 	/* Configuration register: enable PCI parity */
561 	DPRINTF(2, (CE_CONT, CONS "%s: cfg:%b",
562 	    dp->name, INL(dp, CFG), CFG_BITS_SIS900));
563 	val = 0;
564 	if (lp->revid >= SIS635A_900_REV ||
565 	    lp->revid == SIS900B_900_REV) {
566 		/* what is this ? */
567 		val |= CFG_RND_CNT;
568 	}
569 	OUTL(dp, CFG, val);
570 	DPRINTF(2, (CE_CONT, CONS "%s: cfg:%b", dp->name,
571 	    INL(dp, CFG), CFG_BITS_SIS900));
572 
573 	return (GEM_SUCCESS);
574 }
575 
576 static int
sfe_reset_chip_dp83815(struct gem_dev * dp)577 sfe_reset_chip_dp83815(struct gem_dev *dp)
578 {
579 	int		i;
580 	uint32_t	val;
581 	struct sfe_dev	*lp = dp->private;
582 
583 	DPRINTF(4, (CE_CONT, CONS "%s: %s called", dp->name, __func__));
584 
585 	/* invalidate mac addr cache */
586 	bzero(lp->mac_addr, sizeof (lp->mac_addr));
587 
588 	lp->cr = 0;
589 
590 	/* inhibit interrupts */
591 	OUTL(dp, IMR, 0);
592 	lp->isr_pended |= INL(dp, ISR) & lp->our_intr_bits;
593 
594 	OUTL(dp, RFCR, 0);
595 
596 	OUTL(dp, CR, CR_RST);
597 	drv_usecwait(10);
598 
599 	for (i = 0; INL(dp, CR) & CR_RST; i++) {
600 		if (i > 100) {
601 			cmn_err(CE_WARN, "!%s: chip reset timeout", dp->name);
602 			return (GEM_FAILURE);
603 		}
604 		drv_usecwait(10);
605 	}
606 	DPRINTF(0, (CE_CONT, "!%s: chip reset in %duS", dp->name, i*10));
607 
608 	OUTL(dp, CCSR, CCSR_PMESTS);
609 	OUTL(dp, CCSR, 0);
610 
611 	/* Configuration register: enable PCI parity */
612 	DPRINTF(2, (CE_CONT, CONS "%s: cfg:%b",
613 	    dp->name, INL(dp, CFG), CFG_BITS_DP83815));
614 	val = INL(dp, CFG) & (CFG_ANEG_SEL | CFG_PHY_CFG);
615 	OUTL(dp, CFG, val | CFG_PAUSE_ADV);
616 	DPRINTF(2, (CE_CONT, CONS "%s: cfg:%b", dp->name,
617 	    INL(dp, CFG), CFG_BITS_DP83815));
618 
619 	return (GEM_SUCCESS);
620 }
621 
622 static int
sfe_init_chip(struct gem_dev * dp)623 sfe_init_chip(struct gem_dev *dp)
624 {
625 	/* Configuration register: have been set up in sfe_chip_reset */
626 
627 	/* PCI test control register: do nothing */
628 
629 	/* Interrupt status register : do nothing */
630 
631 	/* Interrupt mask register: clear, but leave lp->our_intr_bits */
632 	OUTL(dp, IMR, 0);
633 
634 	/* Enhanced PHY Access register (sis900): do nothing */
635 
636 	/* Transmit Descriptor Pointer register: base addr of TX ring */
637 	OUTL(dp, TXDP, dp->tx_ring_dma);
638 
639 	/* Receive descriptor pointer register: base addr of RX ring */
640 	OUTL(dp, RXDP, dp->rx_ring_dma);
641 
642 	return (GEM_SUCCESS);
643 }
644 
645 static uint_t
sfe_mcast_hash(struct gem_dev * dp,uint8_t * addr)646 sfe_mcast_hash(struct gem_dev *dp, uint8_t *addr)
647 {
648 	return (gem_ether_crc_be(addr, ETHERADDRL));
649 }
650 
651 #ifdef DEBUG_LEVEL
652 static void
sfe_rxfilter_dump(struct gem_dev * dp,int start,int end)653 sfe_rxfilter_dump(struct gem_dev *dp, int start, int end)
654 {
655 	int		i;
656 	int		j;
657 	uint16_t	ram[0x10];
658 
659 	cmn_err(CE_CONT, "!%s: rx filter ram dump:", dp->name);
660 #define	WORDS_PER_LINE	4
661 	for (i = start; i < end; i += WORDS_PER_LINE*2) {
662 		for (j = 0; j < WORDS_PER_LINE; j++) {
663 			OUTL(dp, RFCR, RFADDR_MAC_DP83815 + i + j*2);
664 			ram[j] = INL(dp, RFDR);
665 		}
666 
667 		cmn_err(CE_CONT, "!0x%02x: 0x%04x 0x%04x 0x%04x 0x%04x",
668 		    i, ram[0], ram[1], ram[2], ram[3]);
669 		}
670 
671 #undef	WORDS_PER_LINE
672 }
673 #endif
674 
675 static uint_t	sfe_rf_perfect_base_dp83815[] = {
676 	RFADDR_PMATCH0_DP83815,
677 	RFADDR_PMATCH1_DP83815,
678 	RFADDR_PMATCH2_DP83815,
679 	RFADDR_PMATCH3_DP83815,
680 };
681 
682 static int
sfe_set_rx_filter_dp83815(struct gem_dev * dp)683 sfe_set_rx_filter_dp83815(struct gem_dev *dp)
684 {
685 	int		i;
686 	int		j;
687 	uint32_t	mode;
688 	uint8_t		*mac = dp->cur_addr.ether_addr_octet;
689 	uint16_t	hash_tbl[32];
690 	struct sfe_dev	*lp = dp->private;
691 
692 	DPRINTF(1, (CE_CONT, CONS "%s: %s: called, mc_count:%d, mode:0x%b",
693 	    dp->name, __func__, dp->mc_count, dp->rxmode, RXMODE_BITS));
694 
695 #if DEBUG_LEVEL > 0
696 	for (i = 0; i < dp->mc_count; i++) {
697 		cmn_err(CE_CONT,
698 		"!%s: adding mcast(%d) %02x:%02x:%02x:%02x:%02x:%02x",
699 		    dp->name, i,
700 		    dp->mc_list[i].addr.ether_addr_octet[0],
701 		    dp->mc_list[i].addr.ether_addr_octet[1],
702 		    dp->mc_list[i].addr.ether_addr_octet[2],
703 		    dp->mc_list[i].addr.ether_addr_octet[3],
704 		    dp->mc_list[i].addr.ether_addr_octet[4],
705 		    dp->mc_list[i].addr.ether_addr_octet[5]);
706 	}
707 #endif
708 	if ((dp->rxmode & RXMODE_ENABLE) == 0) {
709 		/* disable rx filter */
710 		OUTL(dp, RFCR, 0);
711 		return (GEM_SUCCESS);
712 	}
713 
714 	/*
715 	 * Set Receive filter control register
716 	 */
717 	if (dp->rxmode & RXMODE_PROMISC) {
718 		/* all broadcast, all multicast, all physical */
719 		mode = RFCR_AAB | RFCR_AAM | RFCR_AAP;
720 	} else if ((dp->rxmode & RXMODE_ALLMULTI) || dp->mc_count > 16*32/2) {
721 		/* all broadcast, all multicast, physical for the chip */
722 		mode = RFCR_AAB | RFCR_AAM | RFCR_APM_DP83815;
723 	} else if (dp->mc_count > 4) {
724 		/*
725 		 * Use multicast hash table,
726 		 * accept all broadcast and physical for the chip.
727 		 */
728 		mode = RFCR_AAB | RFCR_MHEN_DP83815 | RFCR_APM_DP83815;
729 
730 		bzero(hash_tbl, sizeof (hash_tbl));
731 		for (i = 0; i < dp->mc_count; i++) {
732 			j = dp->mc_list[i].hash >> (32 - 9);
733 			hash_tbl[j / 16] |= 1 << (j % 16);
734 		}
735 	} else {
736 		/*
737 		 * Use pattern mach filter for multicast address,
738 		 * accept all broadcast and physical for the chip
739 		 */
740 		/* need to enable corresponding pattern registers */
741 		mode = RFCR_AAB | RFCR_APM_DP83815 |
742 		    (((1 << dp->mc_count) - 1) << RFCR_APAT_SHIFT);
743 	}
744 
745 #if DEBUG_LEVEL > 1
746 	cmn_err(CE_CONT,
747 	    "!%s: mac %02x:%02x:%02x:%02x:%02x:%02x"
748 	    "  cache %02x:%02x:%02x:%02x:%02x:%02x",
749 	    dp->name, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
750 	    lp->mac_addr[0], lp->mac_addr[1],
751 	    lp->mac_addr[2], lp->mac_addr[3],
752 	    lp->mac_addr[4], lp->mac_addr[5]);
753 #endif
754 	if (bcmp(mac, lp->mac_addr, ETHERADDRL) != 0) {
755 		/*
756 		 * XXX - need to *disable* rx filter to load mac address for
757 		 * the chip. otherwise, we cannot setup rxfilter correctly.
758 		 */
759 		/* setup perfect match register for my station address */
760 		for (i = 0; i < ETHERADDRL; i += 2) {
761 			OUTL(dp, RFCR, RFADDR_MAC_DP83815 + i);
762 			OUTL(dp, RFDR, (mac[i+1] << 8) | mac[i]);
763 		}
764 
765 		bcopy(mac, lp->mac_addr, ETHERADDRL);
766 	}
767 
768 #if DEBUG_LEVEL > 3
769 	/* clear pattern ram */
770 	for (j = 0x200; j < 0x380; j += 2) {
771 		OUTL(dp, RFCR, j);
772 		OUTL(dp, RFDR, 0);
773 	}
774 #endif
775 	if (mode & RFCR_APAT_DP83815) {
776 		/* setup multicast address into pattern match registers */
777 		for (j = 0; j < dp->mc_count; j++) {
778 			mac = &dp->mc_list[j].addr.ether_addr_octet[0];
779 			for (i = 0; i < ETHERADDRL; i += 2) {
780 				OUTL(dp, RFCR,
781 				    sfe_rf_perfect_base_dp83815[j] + i*2);
782 				OUTL(dp, RFDR, (mac[i+1] << 8) | mac[i]);
783 			}
784 		}
785 
786 		/* setup pattern count registers */
787 		OUTL(dp, RFCR, RFADDR_PCOUNT01_DP83815);
788 		OUTL(dp, RFDR, (ETHERADDRL << 8) | ETHERADDRL);
789 		OUTL(dp, RFCR, RFADDR_PCOUNT23_DP83815);
790 		OUTL(dp, RFDR, (ETHERADDRL << 8) | ETHERADDRL);
791 	}
792 
793 	if (mode & RFCR_MHEN_DP83815) {
794 		/* Load Multicast hash table */
795 		for (i = 0; i < 32; i++) {
796 			/* for DP83815, index is in byte */
797 			OUTL(dp, RFCR, RFADDR_MULTICAST_DP83815 + i*2);
798 			OUTL(dp, RFDR, hash_tbl[i]);
799 		}
800 	}
801 #if DEBUG_LEVEL > 2
802 	sfe_rxfilter_dump(dp, 0, 0x10);
803 	sfe_rxfilter_dump(dp, 0x200, 0x380);
804 #endif
805 	/* Set rx filter mode and enable rx filter */
806 	OUTL(dp, RFCR, RFCR_RFEN | mode);
807 
808 	return (GEM_SUCCESS);
809 }
810 
811 static int
sfe_set_rx_filter_sis900(struct gem_dev * dp)812 sfe_set_rx_filter_sis900(struct gem_dev *dp)
813 {
814 	int		i;
815 	uint32_t	mode;
816 	uint16_t	hash_tbl[16];
817 	uint8_t		*mac = dp->cur_addr.ether_addr_octet;
818 	int		hash_size;
819 	int		hash_shift;
820 	struct sfe_dev	*lp = dp->private;
821 
822 	DPRINTF(4, (CE_CONT, CONS "%s: %s: called", dp->name, __func__));
823 
824 	if ((dp->rxmode & RXMODE_ENABLE) == 0) {
825 		/* disable rx filter */
826 		OUTLINL(dp, RFCR, 0);
827 		return (GEM_SUCCESS);
828 	}
829 
830 	/*
831 	 * determine hardware hash table size in word.
832 	 */
833 	hash_shift = 25;
834 	if (lp->revid >= SIS635A_900_REV || lp->revid == SIS900B_900_REV) {
835 		hash_shift = 24;
836 	}
837 	hash_size = (1 << (32 - hash_shift)) / 16;
838 	bzero(hash_tbl, sizeof (hash_tbl));
839 
840 	/* Set Receive filter control register */
841 
842 	if (dp->rxmode & RXMODE_PROMISC) {
843 		/* all broadcast, all multicast, all physical */
844 		mode = RFCR_AAB | RFCR_AAM | RFCR_AAP;
845 	} else if ((dp->rxmode & RXMODE_ALLMULTI) ||
846 	    dp->mc_count > hash_size*16/2) {
847 		/* all broadcast, all multicast, physical for the chip */
848 		mode = RFCR_AAB | RFCR_AAM;
849 	} else {
850 		/* all broadcast, physical for the chip */
851 		mode = RFCR_AAB;
852 	}
853 
854 	/* make hash table */
855 	for (i = 0; i < dp->mc_count; i++) {
856 		uint_t	h;
857 		h = dp->mc_list[i].hash >> hash_shift;
858 		hash_tbl[h / 16] |= 1 << (h % 16);
859 	}
860 
861 	if (bcmp(mac, lp->mac_addr, ETHERADDRL) != 0) {
862 		/* Disable Rx filter and load mac address */
863 		for (i = 0; i < ETHERADDRL/2; i++) {
864 			/* For sis900, index is in word */
865 			OUTLINL(dp, RFCR,
866 			    (RFADDR_MAC_SIS900+i) << RFCR_RFADDR_SHIFT_SIS900);
867 			OUTLINL(dp, RFDR, (mac[i*2+1] << 8) | mac[i*2]);
868 		}
869 
870 		bcopy(mac, lp->mac_addr, ETHERADDRL);
871 	}
872 
873 	/* Load Multicast hash table */
874 	for (i = 0; i < hash_size; i++) {
875 		/* For sis900, index is in word */
876 		OUTLINL(dp, RFCR,
877 		    (RFADDR_MULTICAST_SIS900 + i) << RFCR_RFADDR_SHIFT_SIS900);
878 		OUTLINL(dp, RFDR, hash_tbl[i]);
879 	}
880 
881 	/* Load rx filter mode and enable rx filter */
882 	OUTLINL(dp, RFCR, RFCR_RFEN | mode);
883 
884 	return (GEM_SUCCESS);
885 }
886 
887 static int
sfe_start_chip(struct gem_dev * dp)888 sfe_start_chip(struct gem_dev *dp)
889 {
890 	struct sfe_dev	*lp = dp->private;
891 
892 	DPRINTF(4, (CE_CONT, CONS "%s: %s: called", dp->name, __func__));
893 
894 	/*
895 	 * setup interrupt mask, which shouldn't include ISR_TOK
896 	 * to improve performance.
897 	 */
898 	lp->our_intr_bits = OUR_INTR_BITS;
899 
900 	/* enable interrupt */
901 	if ((dp->misc_flag & GEM_NOINTR) == 0) {
902 		OUTL(dp, IER, 1);
903 		OUTL(dp, IMR, lp->our_intr_bits);
904 	}
905 
906 	/* Kick RX */
907 	OUTL(dp, CR, lp->cr | CR_RXE);
908 
909 	return (GEM_SUCCESS);
910 }
911 
912 /*
913  * Stop nic core gracefully.
914  */
915 static int
sfe_stop_chip(struct gem_dev * dp)916 sfe_stop_chip(struct gem_dev *dp)
917 {
918 	struct sfe_dev	*lp = dp->private;
919 	uint32_t	done;
920 	int		i;
921 	uint32_t	val;
922 
923 	DPRINTF(4, (CE_CONT, CONS "%s: %s: called", dp->name, __func__));
924 
925 	/*
926 	 * Although we inhibit interrupt here, we don't clear soft copy of
927 	 * interrupt mask to avoid bogus interrupts.
928 	 */
929 	OUTL(dp, IMR, 0);
930 
931 	/* stop TX and RX immediately */
932 	OUTL(dp, CR, lp->cr | CR_TXR | CR_RXR);
933 
934 	done = 0;
935 	for (i = 0; done != (ISR_RXRCMP | ISR_TXRCMP); i++) {
936 		if (i > 1000) {
937 			/*
938 			 * As gem layer will call sfe_reset_chip(),
939 			 * we don't neet to reset futher
940 			 */
941 			cmn_err(CE_NOTE, "!%s: %s: Tx/Rx reset timeout",
942 			    dp->name, __func__);
943 
944 			return (GEM_FAILURE);
945 		}
946 		val = INL(dp, ISR);
947 		done |= val & (ISR_RXRCMP | ISR_TXRCMP);
948 		lp->isr_pended |= val & lp->our_intr_bits;
949 		drv_usecwait(10);
950 	}
951 
952 	return (GEM_SUCCESS);
953 }
954 
955 #ifndef	__sparc
956 /*
957  * Stop nic core gracefully for quiesce
958  */
959 static int
sfe_stop_chip_quiesce(struct gem_dev * dp)960 sfe_stop_chip_quiesce(struct gem_dev *dp)
961 {
962 	struct sfe_dev	*lp = dp->private;
963 	uint32_t	done;
964 	int		i;
965 	uint32_t	val;
966 
967 	/*
968 	 * Although we inhibit interrupt here, we don't clear soft copy of
969 	 * interrupt mask to avoid bogus interrupts.
970 	 */
971 	OUTL(dp, IMR, 0);
972 
973 	/* stop TX and RX immediately */
974 	OUTL(dp, CR, CR_TXR | CR_RXR);
975 
976 	done = 0;
977 	for (i = 0; done != (ISR_RXRCMP | ISR_TXRCMP); i++) {
978 		if (i > 1000) {
979 			/*
980 			 * As gem layer will call sfe_reset_chip(),
981 			 * we don't neet to reset futher
982 			 */
983 
984 			return (DDI_FAILURE);
985 		}
986 		val = INL(dp, ISR);
987 		done |= val & (ISR_RXRCMP | ISR_TXRCMP);
988 		lp->isr_pended |= val & lp->our_intr_bits;
989 		drv_usecwait(10);
990 	}
991 	return (DDI_SUCCESS);
992 }
993 #endif
994 
995 /*
996  * Setup media mode
997  */
998 static uint_t
999 sfe_mxdma_value[] = { 512, 4, 8, 16, 32, 64, 128, 256, };
1000 
1001 static uint_t
sfe_encode_mxdma(uint_t burstsize)1002 sfe_encode_mxdma(uint_t burstsize)
1003 {
1004 	int	i;
1005 
1006 	if (burstsize > 256) {
1007 		/* choose 512 */
1008 		return (0);
1009 	}
1010 
1011 	for (i = 1; i < 8; i++) {
1012 		if (burstsize <= sfe_mxdma_value[i]) {
1013 			break;
1014 		}
1015 	}
1016 	return (i);
1017 }
1018 
1019 static int
sfe_set_media(struct gem_dev * dp)1020 sfe_set_media(struct gem_dev *dp)
1021 {
1022 	uint32_t	txcfg;
1023 	uint32_t	rxcfg;
1024 	uint32_t	pcr;
1025 	uint32_t	val;
1026 	uint32_t	txmxdma;
1027 	uint32_t	rxmxdma;
1028 	struct sfe_dev	*lp = dp->private;
1029 #ifdef DEBUG_LEVEL
1030 	extern int	gem_speed_value[];
1031 #endif
1032 	DPRINTF(2, (CE_CONT, CONS "%s: %s: %s duplex, %d Mbps",
1033 	    dp->name, __func__,
1034 	    dp->full_duplex ? "full" : "half", gem_speed_value[dp->speed]));
1035 
1036 	/* initialize txcfg and rxcfg */
1037 	txcfg = TXCFG_ATP;
1038 	if (dp->full_duplex) {
1039 		txcfg |= (TXCFG_CSI | TXCFG_HBI);
1040 	}
1041 	rxcfg = RXCFG_AEP | RXCFG_ARP;
1042 	if (dp->full_duplex) {
1043 		rxcfg |= RXCFG_ATX;
1044 	}
1045 
1046 	/* select txmxdma and rxmxdma, maxmum burst length */
1047 	if (lp->chip->chip_type == CHIPTYPE_SIS900) {
1048 #ifdef DEBUG_SIS900_EDB
1049 		val = CFG_EDB_MASTER;
1050 #else
1051 		val = INL(dp, CFG) & CFG_EDB_MASTER;
1052 #endif
1053 		if (val) {
1054 			/*
1055 			 * sis900 built-in cores:
1056 			 * max burst length must be fixed to 64
1057 			 */
1058 			txmxdma = 64;
1059 			rxmxdma = 64;
1060 		} else {
1061 			/*
1062 			 * sis900 pci chipset:
1063 			 * the vendor recommended to fix max burst length
1064 			 * to 512
1065 			 */
1066 			txmxdma = 512;
1067 			rxmxdma = 512;
1068 		}
1069 	} else {
1070 		/*
1071 		 * NS dp83815/816:
1072 		 * use user defined or default for tx/rx max burst length
1073 		 */
1074 		txmxdma = max(dp->txmaxdma, 256);
1075 		rxmxdma = max(dp->rxmaxdma, 256);
1076 	}
1077 
1078 
1079 	/* tx high water mark */
1080 	lp->tx_drain_threshold = ROUNDUP2(dp->txthr, TXCFG_FIFO_UNIT);
1081 
1082 	/* determine tx_fill_threshold accroding drain threshold */
1083 	lp->tx_fill_threshold =
1084 	    TXFIFOSIZE - lp->tx_drain_threshold - TXCFG_FIFO_UNIT;
1085 
1086 	/* tune txmxdma not to exceed tx_fill_threshold */
1087 	for (; ; ) {
1088 		/* normalize txmxdma requested */
1089 		val = sfe_encode_mxdma(txmxdma);
1090 		txmxdma = sfe_mxdma_value[val];
1091 
1092 		if (txmxdma <= lp->tx_fill_threshold) {
1093 			break;
1094 		}
1095 		/* select new txmxdma */
1096 		txmxdma = txmxdma / 2;
1097 	}
1098 	txcfg |= val << TXCFG_MXDMA_SHIFT;
1099 
1100 	/* encode rxmxdma, maxmum burst length for rx */
1101 	val = sfe_encode_mxdma(rxmxdma);
1102 	rxcfg |= val << RXCFG_MXDMA_SHIFT;
1103 	rxmxdma = sfe_mxdma_value[val];
1104 
1105 	/* receive starting threshold - it have only 5bit-wide field */
1106 	val = ROUNDUP2(max(dp->rxthr, ETHERMIN), RXCFG_FIFO_UNIT);
1107 	lp->rx_drain_threshold =
1108 	    min(val, (RXCFG_DRTH >> RXCFG_DRTH_SHIFT) * RXCFG_FIFO_UNIT);
1109 
1110 	DPRINTF(0, (CE_CONT,
1111 	    "%s: %s: tx: drain:%d(rest %d) fill:%d mxdma:%d,"
1112 	    " rx: drain:%d mxdma:%d",
1113 	    dp->name, __func__,
1114 	    lp->tx_drain_threshold, TXFIFOSIZE - lp->tx_drain_threshold,
1115 	    lp->tx_fill_threshold, txmxdma,
1116 	    lp->rx_drain_threshold, rxmxdma));
1117 
1118 	ASSERT(lp->tx_drain_threshold < 64*TXCFG_FIFO_UNIT);
1119 	ASSERT(lp->tx_fill_threshold < 64*TXCFG_FIFO_UNIT);
1120 	ASSERT(lp->rx_drain_threshold < 32*RXCFG_FIFO_UNIT);
1121 
1122 	txcfg |= ((lp->tx_fill_threshold/TXCFG_FIFO_UNIT) << TXCFG_FLTH_SHIFT)
1123 	    | (lp->tx_drain_threshold/TXCFG_FIFO_UNIT);
1124 	OUTL(dp, TXCFG, txcfg);
1125 
1126 	rxcfg |= ((lp->rx_drain_threshold/RXCFG_FIFO_UNIT) << RXCFG_DRTH_SHIFT);
1127 	if (lp->chip->chip_type == CHIPTYPE_DP83815) {
1128 		rxcfg |= RXCFG_ALP_DP83815;
1129 	}
1130 	OUTL(dp, RXCFG, rxcfg);
1131 
1132 	DPRINTF(0, (CE_CONT, CONS "%s: %s: txcfg:%b rxcfg:%b",
1133 	    dp->name, __func__,
1134 	    txcfg, TXCFG_BITS, rxcfg, RXCFG_BITS));
1135 
1136 	/* Flow control */
1137 	if (lp->chip->chip_type == CHIPTYPE_DP83815) {
1138 		pcr = INL(dp, PCR);
1139 		switch (dp->flow_control) {
1140 		case FLOW_CONTROL_SYMMETRIC:
1141 		case FLOW_CONTROL_RX_PAUSE:
1142 			OUTL(dp, PCR, pcr | PCR_PSEN | PCR_PS_MCAST);
1143 			break;
1144 
1145 		default:
1146 			OUTL(dp, PCR,
1147 			    pcr & ~(PCR_PSEN | PCR_PS_MCAST | PCR_PS_DA));
1148 			break;
1149 		}
1150 		DPRINTF(2, (CE_CONT, CONS "%s: PCR: %b", dp->name,
1151 		    INL(dp, PCR), PCR_BITS));
1152 
1153 	} else if (lp->chip->chip_type == CHIPTYPE_SIS900) {
1154 		switch (dp->flow_control) {
1155 		case FLOW_CONTROL_SYMMETRIC:
1156 		case FLOW_CONTROL_RX_PAUSE:
1157 			OUTL(dp, FLOWCTL, FLOWCTL_FLOWEN);
1158 			break;
1159 		default:
1160 			OUTL(dp, FLOWCTL, 0);
1161 			break;
1162 		}
1163 		DPRINTF(2, (CE_CONT, CONS "%s: FLOWCTL: %b",
1164 		    dp->name, INL(dp, FLOWCTL), FLOWCTL_BITS));
1165 	}
1166 	return (GEM_SUCCESS);
1167 }
1168 
1169 static int
sfe_get_stats(struct gem_dev * dp)1170 sfe_get_stats(struct gem_dev *dp)
1171 {
1172 	/* do nothing */
1173 	return (GEM_SUCCESS);
1174 }
1175 
1176 /*
1177  * descriptor manipulations
1178  */
1179 static int
sfe_tx_desc_write(struct gem_dev * dp,int slot,ddi_dma_cookie_t * dmacookie,int frags,uint64_t flags)1180 sfe_tx_desc_write(struct gem_dev *dp, int slot,
1181 		ddi_dma_cookie_t *dmacookie, int frags, uint64_t flags)
1182 {
1183 	uint32_t		mark;
1184 	struct sfe_desc		*tdp;
1185 	ddi_dma_cookie_t	*dcp;
1186 	uint32_t		tmp0;
1187 #if DEBUG_LEVEL > 2
1188 	int			i;
1189 
1190 	cmn_err(CE_CONT,
1191 	    CONS "%s: time:%d %s seqnum: %d, slot %d, frags: %d flags: %llx",
1192 	    dp->name, ddi_get_lbolt(), __func__,
1193 	    dp->tx_desc_tail, slot, frags, flags);
1194 
1195 	for (i = 0; i < frags; i++) {
1196 		cmn_err(CE_CONT, CONS "%d: addr: 0x%x, len: 0x%x",
1197 		    i, dmacookie[i].dmac_address, dmacookie[i].dmac_size);
1198 	}
1199 #endif
1200 	/*
1201 	 * write tx descriptor in reversed order.
1202 	 */
1203 #if DEBUG_LEVEL > 3
1204 	flags |= GEM_TXFLAG_INTR;
1205 #endif
1206 	mark = (flags & GEM_TXFLAG_INTR)
1207 	    ? (CMDSTS_OWN | CMDSTS_INTR) : CMDSTS_OWN;
1208 
1209 	ASSERT(frags == 1);
1210 	dcp = &dmacookie[0];
1211 	if (flags & GEM_TXFLAG_HEAD) {
1212 		mark &= ~CMDSTS_OWN;
1213 	}
1214 
1215 	tdp = (void *)&dp->tx_ring[SFE_DESC_SIZE * slot];
1216 	tmp0 = (uint32_t)dcp->dmac_address;
1217 	mark |= (uint32_t)dcp->dmac_size;
1218 	tdp->d_bufptr = LE_32(tmp0);
1219 	tdp->d_cmdsts = LE_32(mark);
1220 
1221 	return (frags);
1222 }
1223 
1224 static void
sfe_tx_start(struct gem_dev * dp,int start_slot,int nslot)1225 sfe_tx_start(struct gem_dev *dp, int start_slot, int nslot)
1226 {
1227 	uint_t			tx_ring_size = dp->gc.gc_tx_ring_size;
1228 	struct sfe_desc		*tdp;
1229 	struct sfe_dev		*lp = dp->private;
1230 
1231 	if (nslot > 1) {
1232 		gem_tx_desc_dma_sync(dp,
1233 		    SLOT(start_slot + 1, tx_ring_size),
1234 		    nslot - 1, DDI_DMA_SYNC_FORDEV);
1235 	}
1236 
1237 	tdp = (void *)&dp->tx_ring[SFE_DESC_SIZE * start_slot];
1238 	tdp->d_cmdsts |= LE_32(CMDSTS_OWN);
1239 
1240 	gem_tx_desc_dma_sync(dp, start_slot, 1, DDI_DMA_SYNC_FORDEV);
1241 
1242 	/*
1243 	 * Let the Transmit Buffer Manager Fill state machine active.
1244 	 */
1245 	if (dp->mac_active) {
1246 		OUTL(dp, CR, lp->cr | CR_TXE);
1247 	}
1248 }
1249 
1250 static void
sfe_rx_desc_write(struct gem_dev * dp,int slot,ddi_dma_cookie_t * dmacookie,int frags)1251 sfe_rx_desc_write(struct gem_dev *dp, int slot,
1252 	    ddi_dma_cookie_t *dmacookie, int frags)
1253 {
1254 	struct sfe_desc		*rdp;
1255 	uint32_t		tmp0;
1256 	uint32_t		tmp1;
1257 #if DEBUG_LEVEL > 2
1258 	int			i;
1259 
1260 	ASSERT(frags == 1);
1261 
1262 	cmn_err(CE_CONT, CONS
1263 	    "%s: %s seqnum: %d, slot %d, frags: %d",
1264 	    dp->name, __func__, dp->rx_active_tail, slot, frags);
1265 	for (i = 0; i < frags; i++) {
1266 		cmn_err(CE_CONT, CONS "  frag: %d addr: 0x%llx, len: 0x%lx",
1267 		    i, dmacookie[i].dmac_address, dmacookie[i].dmac_size);
1268 	}
1269 #endif
1270 	/* for the last slot of the packet */
1271 	rdp = (void *)&dp->rx_ring[SFE_DESC_SIZE * slot];
1272 
1273 	tmp0 = (uint32_t)dmacookie->dmac_address;
1274 	tmp1 = CMDSTS_INTR | (uint32_t)dmacookie->dmac_size;
1275 	rdp->d_bufptr = LE_32(tmp0);
1276 	rdp->d_cmdsts = LE_32(tmp1);
1277 }
1278 
1279 static uint_t
sfe_tx_desc_stat(struct gem_dev * dp,int slot,int ndesc)1280 sfe_tx_desc_stat(struct gem_dev *dp, int slot, int ndesc)
1281 {
1282 	uint_t			tx_ring_size = dp->gc.gc_tx_ring_size;
1283 	struct sfe_desc		*tdp;
1284 	uint32_t		status;
1285 	int			cols;
1286 	struct sfe_dev		*lp = dp->private;
1287 #ifdef DEBUG_LEVEL
1288 	int			i;
1289 	clock_t			delay;
1290 #endif
1291 	/* check status of the last descriptor */
1292 	tdp = (void *)
1293 	    &dp->tx_ring[SFE_DESC_SIZE * SLOT(slot + ndesc - 1, tx_ring_size)];
1294 
1295 	/*
1296 	 * Don't use LE_32() directly to refer tdp->d_cmdsts.
1297 	 * It is not atomic for big endian cpus.
1298 	 */
1299 	status = tdp->d_cmdsts;
1300 	status = LE_32(status);
1301 
1302 	DPRINTF(2, (CE_CONT, CONS "%s: time:%ld %s: slot:%d, status:0x%b",
1303 	    dp->name, ddi_get_lbolt(), __func__,
1304 	    slot, status, TXSTAT_BITS));
1305 
1306 	if (status & CMDSTS_OWN) {
1307 		/*
1308 		 * not yet transmitted
1309 		 */
1310 		/* workaround for tx hang */
1311 		if (lp->chip->chip_type == CHIPTYPE_DP83815 &&
1312 		    dp->mac_active) {
1313 			OUTL(dp, CR, lp->cr | CR_TXE);
1314 		}
1315 		return (0);
1316 	}
1317 
1318 	if (status & CMDSTS_MORE) {
1319 		/* XXX - the hardware problem but don't panic the system */
1320 		/* avoid lint bug for %b format string including 32nd bit */
1321 		cmn_err(CE_NOTE, CONS
1322 		    "%s: tx status bits incorrect:  slot:%d, status:0x%x",
1323 		    dp->name, slot, status);
1324 	}
1325 
1326 #if DEBUG_LEVEL > 3
1327 	delay = (ddi_get_lbolt() - dp->tx_buf_head->txb_stime) * 10;
1328 	if (delay >= 50) {
1329 		DPRINTF(0, (CE_NOTE, "%s: tx deferred %d mS: slot %d",
1330 		    dp->name, delay, slot));
1331 	}
1332 #endif
1333 
1334 #if DEBUG_LEVEL > 3
1335 	for (i = 0; i < nfrag-1; i++) {
1336 		uint32_t	s;
1337 		int		n;
1338 
1339 		n = SLOT(slot + i, tx_ring_size);
1340 		s = LE_32(
1341 		    ((struct sfe_desc *)((void *)
1342 		    &dp->tx_ring[SFE_DESC_SIZE * n]))->d_cmdsts);
1343 
1344 		ASSERT(s & CMDSTS_MORE);
1345 		ASSERT((s & CMDSTS_OWN) == 0);
1346 	}
1347 #endif
1348 
1349 	/*
1350 	 *  collect statistics
1351 	 */
1352 	if ((status & CMDSTS_OK) == 0) {
1353 
1354 		/* failed to transmit the packet */
1355 
1356 		DPRINTF(0, (CE_CONT, CONS "%s: Transmit error, Tx status %b",
1357 		    dp->name, status, TXSTAT_BITS));
1358 
1359 		dp->stats.errxmt++;
1360 
1361 		if (status & CMDSTS_TFU) {
1362 			dp->stats.underflow++;
1363 		} else if (status & CMDSTS_CRS) {
1364 			dp->stats.nocarrier++;
1365 		} else if (status & CMDSTS_OWC) {
1366 			dp->stats.xmtlatecoll++;
1367 		} else if ((!dp->full_duplex) && (status & CMDSTS_EC)) {
1368 			dp->stats.excoll++;
1369 			dp->stats.collisions += 16;
1370 		} else {
1371 			dp->stats.xmit_internal_err++;
1372 		}
1373 	} else if (!dp->full_duplex) {
1374 		cols = (status >> CMDSTS_CCNT_SHIFT) & CCNT_MASK;
1375 
1376 		if (cols > 0) {
1377 			if (cols == 1) {
1378 				dp->stats.first_coll++;
1379 			} else /* (cols > 1) */ {
1380 				dp->stats.multi_coll++;
1381 			}
1382 			dp->stats.collisions += cols;
1383 		} else if (status & CMDSTS_TD) {
1384 			dp->stats.defer++;
1385 		}
1386 	}
1387 	return (GEM_TX_DONE);
1388 }
1389 
1390 static uint64_t
sfe_rx_desc_stat(struct gem_dev * dp,int slot,int ndesc)1391 sfe_rx_desc_stat(struct gem_dev *dp, int slot, int ndesc)
1392 {
1393 	struct sfe_desc		*rdp;
1394 	uint_t			len;
1395 	uint_t			flag;
1396 	uint32_t		status;
1397 
1398 	flag = GEM_RX_DONE;
1399 
1400 	/* Dont read ISR because we cannot ack only to rx interrupt. */
1401 
1402 	rdp = (void *)&dp->rx_ring[SFE_DESC_SIZE * slot];
1403 
1404 	/*
1405 	 * Don't use LE_32() directly to refer rdp->d_cmdsts.
1406 	 * It is not atomic for big endian cpus.
1407 	 */
1408 	status = rdp->d_cmdsts;
1409 	status = LE_32(status);
1410 
1411 	DPRINTF(2, (CE_CONT, CONS "%s: time:%ld %s: slot:%d, status:0x%b",
1412 	    dp->name, ddi_get_lbolt(), __func__,
1413 	    slot, status, RXSTAT_BITS));
1414 
1415 	if ((status & CMDSTS_OWN) == 0) {
1416 		/*
1417 		 * No more received packets because
1418 		 * this buffer is owned by NIC.
1419 		 */
1420 		return (0);
1421 	}
1422 
1423 #define	RX_ERR_BITS \
1424 	(CMDSTS_RXA | CMDSTS_RXO | CMDSTS_LONG | CMDSTS_RUNT | \
1425 		CMDSTS_ISE | CMDSTS_CRCE | CMDSTS_FAE | CMDSTS_MORE)
1426 
1427 	if (status & RX_ERR_BITS) {
1428 		/*
1429 		 * Packet with error received
1430 		 */
1431 		DPRINTF(0, (CE_CONT, CONS "%s: Corrupted packet "
1432 		    "received, buffer status: %b",
1433 		    dp->name, status, RXSTAT_BITS));
1434 
1435 		/* collect statistics information */
1436 		dp->stats.errrcv++;
1437 
1438 		if (status & CMDSTS_RXO) {
1439 			dp->stats.overflow++;
1440 		} else if (status & (CMDSTS_LONG | CMDSTS_MORE)) {
1441 			dp->stats.frame_too_long++;
1442 		} else if (status & CMDSTS_RUNT) {
1443 			dp->stats.runt++;
1444 		} else if (status & (CMDSTS_ISE | CMDSTS_FAE)) {
1445 			dp->stats.frame++;
1446 		} else if (status & CMDSTS_CRCE) {
1447 			dp->stats.crc++;
1448 		} else {
1449 			dp->stats.rcv_internal_err++;
1450 		}
1451 
1452 		return (flag | GEM_RX_ERR);
1453 	}
1454 
1455 	/*
1456 	 * this packet was received without errors
1457 	 */
1458 	if ((len = (status & CMDSTS_SIZE)) >= ETHERFCSL) {
1459 		len -= ETHERFCSL;
1460 	}
1461 
1462 #if DEBUG_LEVEL > 10
1463 {
1464 	int	i;
1465 	uint8_t	*bp = dp->rx_buf_head->rxb_buf;
1466 
1467 	cmn_err(CE_CONT, CONS "%s: len:%d", dp->name, len);
1468 
1469 	for (i = 0; i < 60; i += 10) {
1470 		cmn_err(CE_CONT, CONS
1471 		    "%02x %02x %02x %02x %02x %02x %02x %02x %02x %02x",
1472 		    bp[0], bp[1], bp[2], bp[3], bp[4],
1473 		    bp[5], bp[6], bp[7], bp[8], bp[9]);
1474 	}
1475 	bp += 10;
1476 }
1477 #endif
1478 	return (flag | (len & GEM_RX_LEN));
1479 }
1480 
1481 static void
sfe_tx_desc_init(struct gem_dev * dp,int slot)1482 sfe_tx_desc_init(struct gem_dev *dp, int slot)
1483 {
1484 	uint_t			tx_ring_size = dp->gc.gc_tx_ring_size;
1485 	struct sfe_desc		*tdp;
1486 	uint32_t		here;
1487 
1488 	tdp = (void *)&dp->tx_ring[SFE_DESC_SIZE * slot];
1489 
1490 	/* don't clear d_link field, which have a valid pointer */
1491 	tdp->d_cmdsts = 0;
1492 
1493 	/* make a link to this from the previous descriptor */
1494 	here = ((uint32_t)dp->tx_ring_dma) + SFE_DESC_SIZE*slot;
1495 
1496 	tdp = (void *)
1497 	    &dp->tx_ring[SFE_DESC_SIZE * SLOT(slot - 1, tx_ring_size)];
1498 	tdp->d_link = LE_32(here);
1499 }
1500 
1501 static void
sfe_rx_desc_init(struct gem_dev * dp,int slot)1502 sfe_rx_desc_init(struct gem_dev *dp, int slot)
1503 {
1504 	uint_t			rx_ring_size = dp->gc.gc_rx_ring_size;
1505 	struct sfe_desc		*rdp;
1506 	uint32_t		here;
1507 
1508 	rdp = (void *)&dp->rx_ring[SFE_DESC_SIZE * slot];
1509 
1510 	/* don't clear d_link field, which have a valid pointer */
1511 	rdp->d_cmdsts = LE_32(CMDSTS_OWN);
1512 
1513 	/* make a link to this from the previous descriptor */
1514 	here = ((uint32_t)dp->rx_ring_dma) + SFE_DESC_SIZE*slot;
1515 
1516 	rdp = (void *)
1517 	    &dp->rx_ring[SFE_DESC_SIZE * SLOT(slot - 1, rx_ring_size)];
1518 	rdp->d_link = LE_32(here);
1519 }
1520 
1521 static void
sfe_tx_desc_clean(struct gem_dev * dp,int slot)1522 sfe_tx_desc_clean(struct gem_dev *dp, int slot)
1523 {
1524 	struct sfe_desc		*tdp;
1525 
1526 	tdp = (void *)&dp->tx_ring[SFE_DESC_SIZE * slot];
1527 	tdp->d_cmdsts = 0;
1528 }
1529 
1530 static void
sfe_rx_desc_clean(struct gem_dev * dp,int slot)1531 sfe_rx_desc_clean(struct gem_dev *dp, int slot)
1532 {
1533 	struct sfe_desc		*rdp;
1534 
1535 	rdp = (void *)&dp->rx_ring[SFE_DESC_SIZE * slot];
1536 	rdp->d_cmdsts = LE_32(CMDSTS_OWN);
1537 }
1538 
1539 /*
1540  * Device depend interrupt handler
1541  */
1542 static uint_t
sfe_interrupt(struct gem_dev * dp)1543 sfe_interrupt(struct gem_dev *dp)
1544 {
1545 	uint_t		rx_ring_size = dp->gc.gc_rx_ring_size;
1546 	uint32_t	isr;
1547 	uint32_t	isr_bogus;
1548 	uint_t		flags = 0;
1549 	boolean_t	need_to_reset = B_FALSE;
1550 	struct sfe_dev	*lp = dp->private;
1551 
1552 	/* read reason and clear interrupt */
1553 	isr = INL(dp, ISR);
1554 
1555 	isr_bogus = lp->isr_pended;
1556 	lp->isr_pended = 0;
1557 
1558 	if (((isr | isr_bogus) & lp->our_intr_bits) == 0) {
1559 		/* we are not the interrupt source */
1560 		return (DDI_INTR_UNCLAIMED);
1561 	}
1562 
1563 	DPRINTF(3, (CE_CONT,
1564 	    CONS "%s: time:%ld %s:called: isr:0x%b rx_active_head: %d",
1565 	    dp->name, ddi_get_lbolt(), __func__,
1566 	    isr, INTR_BITS, dp->rx_active_head));
1567 
1568 	if (!dp->mac_active) {
1569 		/* the device is going to stop */
1570 		lp->our_intr_bits = 0;
1571 		return (DDI_INTR_CLAIMED);
1572 	}
1573 
1574 	isr &= lp->our_intr_bits;
1575 
1576 	if (isr & (ISR_RXSOVR | ISR_RXORN | ISR_RXIDLE | ISR_RXERR |
1577 	    ISR_RXDESC | ISR_RXOK)) {
1578 		(void) gem_receive(dp);
1579 
1580 		if (isr & (ISR_RXSOVR | ISR_RXORN)) {
1581 			DPRINTF(0, (CE_CONT,
1582 			    CONS "%s: rx fifo overrun: isr %b",
1583 			    dp->name, isr, INTR_BITS));
1584 			/* no need restart rx */
1585 			dp->stats.overflow++;
1586 		}
1587 
1588 		if (isr & ISR_RXIDLE) {
1589 			DPRINTF(0, (CE_CONT,
1590 			    CONS "%s: rx buffer ran out: isr %b",
1591 			    dp->name, isr, INTR_BITS));
1592 
1593 			dp->stats.norcvbuf++;
1594 
1595 			/*
1596 			 * Make RXDP points the head of receive
1597 			 * buffer list.
1598 			 */
1599 			OUTL(dp, RXDP, dp->rx_ring_dma +
1600 			    SFE_DESC_SIZE *
1601 			    SLOT(dp->rx_active_head, rx_ring_size));
1602 
1603 			/* Restart the receive engine */
1604 			OUTL(dp, CR, lp->cr | CR_RXE);
1605 		}
1606 	}
1607 
1608 	if (isr & (ISR_TXURN | ISR_TXERR | ISR_TXDESC |
1609 	    ISR_TXIDLE | ISR_TXOK)) {
1610 		/* need to reclaim tx buffers */
1611 		if (gem_tx_done(dp)) {
1612 			flags |= INTR_RESTART_TX;
1613 		}
1614 		/*
1615 		 * XXX - tx error statistics will be counted in
1616 		 * sfe_tx_desc_stat() and no need to restart tx on errors.
1617 		 */
1618 	}
1619 
1620 	if (isr & (ISR_DPERR | ISR_SSERR | ISR_RMABT | ISR_RTABT)) {
1621 		cmn_err(CE_WARN, "%s: ERROR interrupt: isr %b.",
1622 		    dp->name, isr, INTR_BITS);
1623 		need_to_reset = B_TRUE;
1624 	}
1625 reset:
1626 	if (need_to_reset) {
1627 		(void) gem_restart_nic(dp, GEM_RESTART_KEEP_BUF);
1628 		flags |= INTR_RESTART_TX;
1629 	}
1630 
1631 	DPRINTF(5, (CE_CONT, CONS "%s: %s: return: isr: %b",
1632 	    dp->name, __func__, isr, INTR_BITS));
1633 
1634 	return (DDI_INTR_CLAIMED | flags);
1635 }
1636 
1637 /* ======================================================== */
1638 /*
1639  * HW depend MII routine
1640  */
1641 /* ======================================================== */
1642 
1643 /*
1644  * MII routines for NS DP83815
1645  */
1646 static void
sfe_mii_sync_dp83815(struct gem_dev * dp)1647 sfe_mii_sync_dp83815(struct gem_dev *dp)
1648 {
1649 	/* do nothing */
1650 }
1651 
1652 static uint16_t
sfe_mii_read_dp83815(struct gem_dev * dp,uint_t offset)1653 sfe_mii_read_dp83815(struct gem_dev *dp, uint_t offset)
1654 {
1655 	DPRINTF(4, (CE_CONT, CONS"%s: %s: offset 0x%x",
1656 	    dp->name, __func__, offset));
1657 	return ((uint16_t)INL(dp, MII_REGS_BASE + offset*4));
1658 }
1659 
1660 static void
sfe_mii_write_dp83815(struct gem_dev * dp,uint_t offset,uint16_t val)1661 sfe_mii_write_dp83815(struct gem_dev *dp, uint_t offset, uint16_t val)
1662 {
1663 	DPRINTF(4, (CE_CONT, CONS"%s: %s: offset 0x%x 0x%x",
1664 	    dp->name, __func__, offset, val));
1665 	OUTL(dp, MII_REGS_BASE + offset*4, val);
1666 }
1667 
1668 static int
sfe_mii_config_dp83815(struct gem_dev * dp)1669 sfe_mii_config_dp83815(struct gem_dev *dp)
1670 {
1671 	uint32_t	srr;
1672 
1673 	srr = INL(dp, SRR) & SRR_REV;
1674 
1675 	DPRINTF(0, (CE_CONT, CONS "%s: srr:0x%04x %04x %04x %04x %04x %04x",
1676 	    dp->name, srr,
1677 	    INW(dp, 0x00cc),	/* PGSEL */
1678 	    INW(dp, 0x00e4),	/* PMDCSR */
1679 	    INW(dp, 0x00fc),	/* TSTDAT */
1680 	    INW(dp, 0x00f4),	/* DSPCFG */
1681 	    INW(dp, 0x00f8)));	/* SDCFG */
1682 
1683 	if (srr == SRR_REV_DP83815CVNG) {
1684 		/*
1685 		 * NS datasheet says that DP83815CVNG needs following
1686 		 * registers to be patched for optimizing its performance.
1687 		 * A report said that CRC errors on RX disappeared
1688 		 * with the patch.
1689 		 */
1690 		OUTW(dp, 0x00cc, 0x0001);	/* PGSEL */
1691 		OUTW(dp, 0x00e4, 0x189c);	/* PMDCSR */
1692 		OUTW(dp, 0x00fc, 0x0000);	/* TSTDAT */
1693 		OUTW(dp, 0x00f4, 0x5040);	/* DSPCFG */
1694 		OUTW(dp, 0x00f8, 0x008c);	/* SDCFG */
1695 		OUTW(dp, 0x00cc, 0x0000);	/* PGSEL */
1696 
1697 		DPRINTF(0, (CE_CONT,
1698 		    CONS "%s: PHY patched %04x %04x %04x %04x %04x",
1699 		    dp->name,
1700 		    INW(dp, 0x00cc),	/* PGSEL */
1701 		    INW(dp, 0x00e4),	/* PMDCSR */
1702 		    INW(dp, 0x00fc),	/* TSTDAT */
1703 		    INW(dp, 0x00f4),	/* DSPCFG */
1704 		    INW(dp, 0x00f8)));	/* SDCFG */
1705 	} else if (((srr ^ SRR_REV_DP83815DVNG) & 0xff00) == 0 ||
1706 	    ((srr ^ SRR_REV_DP83816AVNG) & 0xff00) == 0) {
1707 		/*
1708 		 * Additional packets for later chipset
1709 		 */
1710 		OUTW(dp, 0x00cc, 0x0001);	/* PGSEL */
1711 		OUTW(dp, 0x00e4, 0x189c);	/* PMDCSR */
1712 		OUTW(dp, 0x00cc, 0x0000);	/* PGSEL */
1713 
1714 		DPRINTF(0, (CE_CONT,
1715 		    CONS "%s: PHY patched %04x %04x",
1716 		    dp->name,
1717 		    INW(dp, 0x00cc),	/* PGSEL */
1718 		    INW(dp, 0x00e4)));	/* PMDCSR */
1719 	}
1720 
1721 	return (gem_mii_config_default(dp));
1722 }
1723 
1724 static int
sfe_mii_probe_dp83815(struct gem_dev * dp)1725 sfe_mii_probe_dp83815(struct gem_dev *dp)
1726 {
1727 	uint32_t	val;
1728 
1729 	/* try external phy first */
1730 	DPRINTF(0, (CE_CONT, CONS "%s: %s: trying external phy",
1731 	    dp->name, __func__));
1732 	dp->mii_phy_addr = 0;
1733 	dp->gc.gc_mii_sync = &sfe_mii_sync_sis900;
1734 	dp->gc.gc_mii_read = &sfe_mii_read_sis900;
1735 	dp->gc.gc_mii_write = &sfe_mii_write_sis900;
1736 
1737 	val = INL(dp, CFG) & (CFG_ANEG_SEL | CFG_PHY_CFG);
1738 	OUTL(dp, CFG, val | CFG_EXT_PHY | CFG_PHY_DIS);
1739 
1740 	if (gem_mii_probe_default(dp) == GEM_SUCCESS) {
1741 		return (GEM_SUCCESS);
1742 	}
1743 
1744 	/* switch to internal phy */
1745 	DPRINTF(0, (CE_CONT, CONS "%s: %s: switching to internal phy",
1746 	    dp->name, __func__));
1747 	dp->mii_phy_addr = -1;
1748 	dp->gc.gc_mii_sync = &sfe_mii_sync_dp83815;
1749 	dp->gc.gc_mii_read = &sfe_mii_read_dp83815;
1750 	dp->gc.gc_mii_write = &sfe_mii_write_dp83815;
1751 
1752 	val = INL(dp, CFG) & (CFG_ANEG_SEL | CFG_PHY_CFG);
1753 	OUTL(dp, CFG, val | CFG_PAUSE_ADV | CFG_PHY_RST);
1754 	drv_usecwait(100);	/* keep to assert RST bit for a while */
1755 	OUTL(dp, CFG, val | CFG_PAUSE_ADV);
1756 
1757 	/* wait for PHY reset */
1758 	delay(drv_usectohz(10000));
1759 
1760 	return (gem_mii_probe_default(dp));
1761 }
1762 
1763 static int
sfe_mii_init_dp83815(struct gem_dev * dp)1764 sfe_mii_init_dp83815(struct gem_dev *dp)
1765 {
1766 	uint32_t	val;
1767 
1768 	val = INL(dp, CFG) & (CFG_ANEG_SEL | CFG_PHY_CFG);
1769 
1770 	if (dp->mii_phy_addr == -1) {
1771 		/* select internal phy */
1772 		OUTL(dp, CFG, val | CFG_PAUSE_ADV);
1773 	} else {
1774 		/* select external phy */
1775 		OUTL(dp, CFG, val | CFG_EXT_PHY | CFG_PHY_DIS);
1776 	}
1777 
1778 	return (GEM_SUCCESS);
1779 }
1780 
1781 /*
1782  * MII routines for SiS900
1783  */
1784 #define	MDIO_DELAY(dp)	{(void) INL(dp, MEAR); (void) INL(dp, MEAR); }
1785 static void
sfe_mii_sync_sis900(struct gem_dev * dp)1786 sfe_mii_sync_sis900(struct gem_dev *dp)
1787 {
1788 	int	i;
1789 
1790 	/* send 32 ONE's to make MII line idle */
1791 	for (i = 0; i < 32; i++) {
1792 		OUTL(dp, MEAR, MEAR_MDDIR | MEAR_MDIO);
1793 		MDIO_DELAY(dp);
1794 		OUTL(dp, MEAR, MEAR_MDDIR | MEAR_MDIO | MEAR_MDC);
1795 		MDIO_DELAY(dp);
1796 	}
1797 }
1798 
1799 static int
sfe_mii_config_sis900(struct gem_dev * dp)1800 sfe_mii_config_sis900(struct gem_dev *dp)
1801 {
1802 	struct sfe_dev	*lp = dp->private;
1803 
1804 	/* Do chip depend setup */
1805 	if ((dp->mii_phy_id & PHY_MASK) == PHY_ICS1893) {
1806 		/* workaround for ICS1893 PHY */
1807 		gem_mii_write(dp, 0x0018, 0xD200);
1808 	}
1809 
1810 	if (lp->revid == SIS630E_900_REV) {
1811 		/*
1812 		 * SiS 630E has bugs on default values
1813 		 * of PHY registers
1814 		 */
1815 		gem_mii_write(dp, MII_AN_ADVERT, 0x05e1);
1816 		gem_mii_write(dp, MII_CONFIG1, 0x0022);
1817 		gem_mii_write(dp, MII_CONFIG2, 0xff00);
1818 		gem_mii_write(dp, MII_MASK,    0xffc0);
1819 	}
1820 	sfe_set_eq_sis630(dp);
1821 
1822 	return (gem_mii_config_default(dp));
1823 }
1824 
1825 static uint16_t
sfe_mii_read_sis900(struct gem_dev * dp,uint_t reg)1826 sfe_mii_read_sis900(struct gem_dev *dp, uint_t reg)
1827 {
1828 	uint32_t	cmd;
1829 	uint16_t	ret;
1830 	int		i;
1831 	uint32_t	data;
1832 
1833 	cmd = MII_READ_CMD(dp->mii_phy_addr, reg);
1834 
1835 	for (i = 31; i >= 18; i--) {
1836 		data = ((cmd >> i) & 1) <<  MEAR_MDIO_SHIFT;
1837 		OUTL(dp, MEAR, data | MEAR_MDDIR);
1838 		MDIO_DELAY(dp);
1839 		OUTL(dp, MEAR, data | MEAR_MDDIR | MEAR_MDC);
1840 		MDIO_DELAY(dp);
1841 	}
1842 
1843 	/* turn around cycle */
1844 	OUTL(dp, MEAR, 0);
1845 	MDIO_DELAY(dp);
1846 
1847 	/* get response from PHY */
1848 	OUTL(dp, MEAR, MEAR_MDC);
1849 	MDIO_DELAY(dp);
1850 
1851 	OUTL(dp, MEAR, 0);
1852 #if DEBUG_LEBEL > 0
1853 	(void) INL(dp, MEAR);	/* delay */
1854 	if (INL(dp, MEAR) & MEAR_MDIO) {
1855 		cmn_err(CE_WARN, "%s: PHY@%d not responded",
1856 		    dp->name, dp->mii_phy_addr);
1857 	}
1858 #else
1859 	MDIO_DELAY(dp);
1860 #endif
1861 	/* terminate response cycle */
1862 	OUTL(dp, MEAR, MEAR_MDC);
1863 	MDIO_DELAY(dp);
1864 
1865 	ret = 0;	/* to avoid lint errors */
1866 	for (i = 16; i > 0; i--) {
1867 		OUTL(dp, MEAR, 0);
1868 		(void) INL(dp, MEAR);	/* delay */
1869 		ret = (ret << 1) | ((INL(dp, MEAR) >> MEAR_MDIO_SHIFT) & 1);
1870 		OUTL(dp, MEAR, MEAR_MDC);
1871 		MDIO_DELAY(dp);
1872 	}
1873 
1874 	/* send two idle(Z) bits to terminate the read cycle */
1875 	for (i = 0; i < 2; i++) {
1876 		OUTL(dp, MEAR, 0);
1877 		MDIO_DELAY(dp);
1878 		OUTL(dp, MEAR, MEAR_MDC);
1879 		MDIO_DELAY(dp);
1880 	}
1881 
1882 	return (ret);
1883 }
1884 
1885 static void
sfe_mii_write_sis900(struct gem_dev * dp,uint_t reg,uint16_t val)1886 sfe_mii_write_sis900(struct gem_dev *dp, uint_t reg, uint16_t val)
1887 {
1888 	uint32_t	cmd;
1889 	int		i;
1890 	uint32_t	data;
1891 
1892 	cmd = MII_WRITE_CMD(dp->mii_phy_addr, reg, val);
1893 
1894 	for (i = 31; i >= 0; i--) {
1895 		data = ((cmd >> i) & 1) << MEAR_MDIO_SHIFT;
1896 		OUTL(dp, MEAR, data | MEAR_MDDIR);
1897 		MDIO_DELAY(dp);
1898 		OUTL(dp, MEAR, data | MEAR_MDDIR | MEAR_MDC);
1899 		MDIO_DELAY(dp);
1900 	}
1901 
1902 	/* send two idle(Z) bits to terminate the write cycle. */
1903 	for (i = 0; i < 2; i++) {
1904 		OUTL(dp, MEAR, 0);
1905 		MDIO_DELAY(dp);
1906 		OUTL(dp, MEAR, MEAR_MDC);
1907 		MDIO_DELAY(dp);
1908 	}
1909 }
1910 #undef MDIO_DELAY
1911 
1912 static void
sfe_set_eq_sis630(struct gem_dev * dp)1913 sfe_set_eq_sis630(struct gem_dev *dp)
1914 {
1915 	uint16_t	reg14h;
1916 	uint16_t	eq_value;
1917 	uint16_t	max_value;
1918 	uint16_t	min_value;
1919 	int		i;
1920 	uint8_t		rev;
1921 	struct sfe_dev	*lp = dp->private;
1922 
1923 	rev = lp->revid;
1924 
1925 	if (!(rev == SIS630E_900_REV || rev == SIS630EA1_900_REV ||
1926 	    rev == SIS630A_900_REV || rev == SIS630ET_900_REV)) {
1927 		/* it doesn't have a internal PHY */
1928 		return;
1929 	}
1930 
1931 	if (dp->mii_state == MII_STATE_LINKUP) {
1932 		reg14h = gem_mii_read(dp, MII_RESV);
1933 		gem_mii_write(dp, MII_RESV, (0x2200 | reg14h) & 0xBFFF);
1934 
1935 		eq_value = (0x00f8 & gem_mii_read(dp, MII_RESV)) >> 3;
1936 		max_value = min_value = eq_value;
1937 		for (i = 1; i < 10; i++) {
1938 			eq_value = (0x00f8 & gem_mii_read(dp, MII_RESV)) >> 3;
1939 			max_value = max(eq_value, max_value);
1940 			min_value = min(eq_value, min_value);
1941 		}
1942 
1943 		/* for 630E, rule to determine the equalizer value */
1944 		if (rev == SIS630E_900_REV || rev == SIS630EA1_900_REV ||
1945 		    rev == SIS630ET_900_REV) {
1946 			if (max_value < 5) {
1947 				eq_value = max_value;
1948 			} else if (5 <= max_value && max_value < 15) {
1949 				eq_value =
1950 				    max(max_value + 1,
1951 				    min_value + 2);
1952 			} else if (15 <= max_value) {
1953 				eq_value =
1954 				    max(max_value + 5,
1955 				    min_value + 6);
1956 			}
1957 		}
1958 		/* for 630B0&B1, rule to determine the equalizer value */
1959 		else
1960 		if (rev == SIS630A_900_REV &&
1961 		    (lp->bridge_revid == SIS630B0 ||
1962 		    lp->bridge_revid == SIS630B1)) {
1963 
1964 			if (max_value == 0) {
1965 				eq_value = 3;
1966 			} else {
1967 				eq_value = (max_value + min_value + 1)/2;
1968 			}
1969 		}
1970 		/* write equalizer value and setting */
1971 		reg14h = gem_mii_read(dp, MII_RESV) & ~0x02f8;
1972 		reg14h |= 0x6000 | (eq_value << 3);
1973 		gem_mii_write(dp, MII_RESV, reg14h);
1974 	} else {
1975 		reg14h = (gem_mii_read(dp, MII_RESV) & ~0x4000) | 0x2000;
1976 		if (rev == SIS630A_900_REV &&
1977 		    (lp->bridge_revid == SIS630B0 ||
1978 		    lp->bridge_revid == SIS630B1)) {
1979 
1980 			reg14h |= 0x0200;
1981 		}
1982 		gem_mii_write(dp, MII_RESV, reg14h);
1983 	}
1984 }
1985 
1986 /* ======================================================== */
1987 /*
1988  * OS depend (device driver) routine
1989  */
1990 /* ======================================================== */
1991 static void
sfe_chipinfo_init_sis900(struct gem_dev * dp)1992 sfe_chipinfo_init_sis900(struct gem_dev *dp)
1993 {
1994 	int		rev;
1995 	struct sfe_dev	*lp = (struct sfe_dev *)dp->private;
1996 
1997 	rev = lp->revid;
1998 
1999 	if (rev == SIS630E_900_REV /* 0x81 */) {
2000 		/* sis630E */
2001 		lp->get_mac_addr = &sfe_get_mac_addr_sis630e;
2002 	} else if (rev > 0x81 && rev <= 0x90) {
2003 		/* 630S, 630EA1, 630ET, 635A */
2004 		lp->get_mac_addr = &sfe_get_mac_addr_sis635;
2005 	} else if (rev == SIS962_900_REV /* 0x91 */) {
2006 		/* sis962 or later */
2007 		lp->get_mac_addr = &sfe_get_mac_addr_sis962;
2008 	} else {
2009 		/* sis900 */
2010 		lp->get_mac_addr = &sfe_get_mac_addr_sis900;
2011 	}
2012 
2013 	lp->bridge_revid = 0;
2014 
2015 	if (rev == SIS630E_900_REV || rev == SIS630EA1_900_REV ||
2016 	    rev == SIS630A_900_REV || rev ==  SIS630ET_900_REV) {
2017 		/*
2018 		 * read host bridge revision
2019 		 */
2020 		dev_info_t	*bridge;
2021 		ddi_acc_handle_t bridge_handle;
2022 
2023 		if ((bridge = sfe_search_pci_dev(0x1039, 0x630)) == NULL) {
2024 			cmn_err(CE_WARN,
2025 			    "%s: cannot find host bridge (pci1039,630)",
2026 			    dp->name);
2027 			return;
2028 		}
2029 
2030 		if (pci_config_setup(bridge, &bridge_handle) != DDI_SUCCESS) {
2031 			cmn_err(CE_WARN, "%s: pci_config_setup failed",
2032 			    dp->name);
2033 			return;
2034 		}
2035 
2036 		lp->bridge_revid =
2037 		    pci_config_get8(bridge_handle, PCI_CONF_REVID);
2038 		pci_config_teardown(&bridge_handle);
2039 	}
2040 }
2041 
2042 static int
sfe_attach_chip(struct gem_dev * dp)2043 sfe_attach_chip(struct gem_dev *dp)
2044 {
2045 	struct sfe_dev		*lp = (struct sfe_dev *)dp->private;
2046 
2047 	DPRINTF(4, (CE_CONT, CONS "!%s: %s called", dp->name, __func__));
2048 
2049 	/* setup chip-depend get_mac_address function */
2050 	if (lp->chip->chip_type == CHIPTYPE_SIS900) {
2051 		sfe_chipinfo_init_sis900(dp);
2052 	} else {
2053 		lp->get_mac_addr = &sfe_get_mac_addr_dp83815;
2054 	}
2055 
2056 	/* read MAC address */
2057 	if (!(lp->get_mac_addr)(dp)) {
2058 		cmn_err(CE_WARN,
2059 		    "!%s: %s: failed to get factory mac address"
2060 		    " please specify a mac address in sfe.conf",
2061 		    dp->name, __func__);
2062 		return (GEM_FAILURE);
2063 	}
2064 
2065 	if (lp->chip->chip_type == CHIPTYPE_DP83815) {
2066 		dp->mii_phy_addr = -1;	/* no need to scan PHY */
2067 		dp->misc_flag |= GEM_VLAN_SOFT;
2068 		dp->txthr += 4; /* VTAG_SIZE */
2069 	}
2070 	dp->txthr = min(dp->txthr, TXFIFOSIZE - 2);
2071 
2072 	return (GEM_SUCCESS);
2073 }
2074 
2075 static int
sfeattach(dev_info_t * dip,ddi_attach_cmd_t cmd)2076 sfeattach(dev_info_t *dip, ddi_attach_cmd_t cmd)
2077 {
2078 	int			unit;
2079 	const char		*drv_name;
2080 	int			i;
2081 	ddi_acc_handle_t	conf_handle;
2082 	uint16_t		vid;
2083 	uint16_t		did;
2084 	uint8_t			rev;
2085 #ifdef DEBUG_LEVEL
2086 	uint32_t		iline;
2087 	uint8_t			latim;
2088 #endif
2089 	struct chip_info	*p;
2090 	struct gem_dev		*dp;
2091 	struct sfe_dev		*lp;
2092 	caddr_t			base;
2093 	ddi_acc_handle_t	regs_ha;
2094 	struct gem_conf		*gcp;
2095 
2096 	unit = ddi_get_instance(dip);
2097 	drv_name = ddi_driver_name(dip);
2098 
2099 	DPRINTF(3, (CE_CONT, CONS "%s%d: sfeattach: called", drv_name, unit));
2100 
2101 	/*
2102 	 * Common codes after power-up
2103 	 */
2104 	if (pci_config_setup(dip, &conf_handle) != DDI_SUCCESS) {
2105 		cmn_err(CE_WARN, "%s%d: ddi_regs_map_setup failed",
2106 		    drv_name, unit);
2107 		goto err;
2108 	}
2109 
2110 	vid  = pci_config_get16(conf_handle, PCI_CONF_VENID);
2111 	did  = pci_config_get16(conf_handle, PCI_CONF_DEVID);
2112 	rev  = pci_config_get16(conf_handle, PCI_CONF_REVID);
2113 #ifdef DEBUG_LEVEL
2114 	iline = pci_config_get32(conf_handle, PCI_CONF_ILINE);
2115 	latim = pci_config_get8(conf_handle, PCI_CONF_LATENCY_TIMER);
2116 #endif
2117 #ifdef DEBUG_BUILT_IN_SIS900
2118 	rev  = SIS630E_900_REV;
2119 #endif
2120 	for (i = 0, p = sfe_chiptbl; i < CHIPTABLESIZE; i++, p++) {
2121 		if (p->venid == vid && p->devid == did) {
2122 			/* found */
2123 			goto chip_found;
2124 		}
2125 	}
2126 
2127 	/* Not found */
2128 	cmn_err(CE_WARN,
2129 	    "%s%d: sfe_attach: wrong PCI venid/devid (0x%x, 0x%x)",
2130 	    drv_name, unit, vid, did);
2131 	pci_config_teardown(&conf_handle);
2132 	goto err;
2133 
2134 chip_found:
2135 	pci_config_put16(conf_handle, PCI_CONF_COMM,
2136 	    PCI_COMM_IO | PCI_COMM_MAE | PCI_COMM_ME |
2137 	    pci_config_get16(conf_handle, PCI_CONF_COMM));
2138 
2139 	/* ensure D0 mode */
2140 	(void) gem_pci_set_power_state(dip, conf_handle, PCI_PMCSR_D0);
2141 
2142 	pci_config_teardown(&conf_handle);
2143 
2144 	switch (cmd) {
2145 	case DDI_RESUME:
2146 		return (gem_resume(dip));
2147 
2148 	case DDI_ATTACH:
2149 
2150 		DPRINTF(0, (CE_CONT,
2151 		    CONS "%s%d: ilr 0x%08x, latency_timer:0x%02x",
2152 		    drv_name, unit, iline, latim));
2153 
2154 		/*
2155 		 * Map in the device registers.
2156 		 */
2157 		if (gem_pci_regs_map_setup(dip,
2158 		    (sfe_use_pcimemspace && p->chip_type == CHIPTYPE_DP83815)
2159 		    ? PCI_ADDR_MEM32 : PCI_ADDR_IO, PCI_ADDR_MASK,
2160 		    &sfe_dev_attr, &base, &regs_ha) != DDI_SUCCESS) {
2161 			cmn_err(CE_WARN,
2162 			    "%s%d: ddi_regs_map_setup failed",
2163 			    drv_name, unit);
2164 			goto err;
2165 		}
2166 
2167 		/*
2168 		 * construct gem configuration
2169 		 */
2170 		gcp = kmem_zalloc(sizeof (*gcp), KM_SLEEP);
2171 
2172 		/* name */
2173 		(void) sprintf(gcp->gc_name, "%s%d", drv_name, unit);
2174 
2175 		/* consistency on tx and rx */
2176 		gcp->gc_tx_buf_align = sizeof (uint8_t) - 1;
2177 		gcp->gc_tx_max_frags = MAXTXFRAGS;
2178 		gcp->gc_tx_max_descs_per_pkt = gcp->gc_tx_max_frags;
2179 		gcp->gc_tx_desc_unit_shift = 4;	/* 16 byte */
2180 		gcp->gc_tx_buf_size  = TX_BUF_SIZE;
2181 		gcp->gc_tx_buf_limit = gcp->gc_tx_buf_size;
2182 		gcp->gc_tx_ring_size = TX_RING_SIZE;
2183 		gcp->gc_tx_ring_limit = gcp->gc_tx_ring_size;
2184 		gcp->gc_tx_auto_pad  = B_TRUE;
2185 		gcp->gc_tx_copy_thresh = sfe_tx_copy_thresh;
2186 		gcp->gc_tx_desc_write_oo = B_TRUE;
2187 
2188 		gcp->gc_rx_buf_align = sizeof (uint8_t) - 1;
2189 		gcp->gc_rx_max_frags = MAXRXFRAGS;
2190 		gcp->gc_rx_desc_unit_shift = 4;
2191 		gcp->gc_rx_ring_size = RX_RING_SIZE;
2192 		gcp->gc_rx_buf_max   = RX_BUF_SIZE;
2193 		gcp->gc_rx_copy_thresh = sfe_rx_copy_thresh;
2194 
2195 		/* map attributes */
2196 		gcp->gc_dev_attr = sfe_dev_attr;
2197 		gcp->gc_buf_attr = sfe_buf_attr;
2198 		gcp->gc_desc_attr = sfe_buf_attr;
2199 
2200 		/* dma attributes */
2201 		gcp->gc_dma_attr_desc = sfe_dma_attr_desc;
2202 
2203 		gcp->gc_dma_attr_txbuf = sfe_dma_attr_buf;
2204 		gcp->gc_dma_attr_txbuf.dma_attr_align = gcp->gc_tx_buf_align+1;
2205 		gcp->gc_dma_attr_txbuf.dma_attr_sgllen = gcp->gc_tx_max_frags;
2206 
2207 		gcp->gc_dma_attr_rxbuf = sfe_dma_attr_buf;
2208 		gcp->gc_dma_attr_rxbuf.dma_attr_align = gcp->gc_rx_buf_align+1;
2209 		gcp->gc_dma_attr_rxbuf.dma_attr_sgllen = gcp->gc_rx_max_frags;
2210 
2211 		/* time out parameters */
2212 		gcp->gc_tx_timeout = 3*ONESEC;
2213 		gcp->gc_tx_timeout_interval = ONESEC;
2214 		if (p->chip_type == CHIPTYPE_DP83815) {
2215 			/* workaround for tx hang */
2216 			gcp->gc_tx_timeout_interval = ONESEC/20; /* 50mS */
2217 		}
2218 
2219 		/* MII timeout parameters */
2220 		gcp->gc_mii_link_watch_interval = ONESEC;
2221 		gcp->gc_mii_an_watch_interval   = ONESEC/5;
2222 		gcp->gc_mii_reset_timeout = MII_RESET_TIMEOUT;	/* 1 sec */
2223 		gcp->gc_mii_an_timeout = MII_AN_TIMEOUT;	/* 5 sec */
2224 		gcp->gc_mii_an_wait = 0;
2225 		gcp->gc_mii_linkdown_timeout = MII_LINKDOWN_TIMEOUT;
2226 
2227 		/* setting for general PHY */
2228 		gcp->gc_mii_an_delay = 0;
2229 		gcp->gc_mii_linkdown_action = MII_ACTION_RSA;
2230 		gcp->gc_mii_linkdown_timeout_action = MII_ACTION_RESET;
2231 		gcp->gc_mii_dont_reset = B_FALSE;
2232 
2233 
2234 		/* I/O methods */
2235 
2236 		/* mac operation */
2237 		gcp->gc_attach_chip = &sfe_attach_chip;
2238 		if (p->chip_type == CHIPTYPE_DP83815) {
2239 			gcp->gc_reset_chip = &sfe_reset_chip_dp83815;
2240 		} else {
2241 			gcp->gc_reset_chip = &sfe_reset_chip_sis900;
2242 		}
2243 		gcp->gc_init_chip  = &sfe_init_chip;
2244 		gcp->gc_start_chip = &sfe_start_chip;
2245 		gcp->gc_stop_chip  = &sfe_stop_chip;
2246 #ifdef USE_MULTICAST_HASHTBL
2247 		gcp->gc_multicast_hash = &sfe_mcast_hash;
2248 #endif
2249 		if (p->chip_type == CHIPTYPE_DP83815) {
2250 			gcp->gc_set_rx_filter = &sfe_set_rx_filter_dp83815;
2251 		} else {
2252 			gcp->gc_set_rx_filter = &sfe_set_rx_filter_sis900;
2253 		}
2254 		gcp->gc_set_media = &sfe_set_media;
2255 		gcp->gc_get_stats = &sfe_get_stats;
2256 		gcp->gc_interrupt = &sfe_interrupt;
2257 
2258 		/* descriptor operation */
2259 		gcp->gc_tx_desc_write = &sfe_tx_desc_write;
2260 		gcp->gc_tx_start = &sfe_tx_start;
2261 		gcp->gc_rx_desc_write = &sfe_rx_desc_write;
2262 		gcp->gc_rx_start = NULL;
2263 
2264 		gcp->gc_tx_desc_stat = &sfe_tx_desc_stat;
2265 		gcp->gc_rx_desc_stat = &sfe_rx_desc_stat;
2266 		gcp->gc_tx_desc_init = &sfe_tx_desc_init;
2267 		gcp->gc_rx_desc_init = &sfe_rx_desc_init;
2268 		gcp->gc_tx_desc_clean = &sfe_tx_desc_clean;
2269 		gcp->gc_rx_desc_clean = &sfe_rx_desc_clean;
2270 
2271 		/* mii operations */
2272 		if (p->chip_type == CHIPTYPE_DP83815) {
2273 			gcp->gc_mii_probe = &sfe_mii_probe_dp83815;
2274 			gcp->gc_mii_init = &sfe_mii_init_dp83815;
2275 			gcp->gc_mii_config = &sfe_mii_config_dp83815;
2276 			gcp->gc_mii_sync = &sfe_mii_sync_dp83815;
2277 			gcp->gc_mii_read = &sfe_mii_read_dp83815;
2278 			gcp->gc_mii_write = &sfe_mii_write_dp83815;
2279 			gcp->gc_mii_tune_phy = NULL;
2280 			gcp->gc_flow_control = FLOW_CONTROL_NONE;
2281 		} else {
2282 			gcp->gc_mii_probe = &gem_mii_probe_default;
2283 			gcp->gc_mii_init = NULL;
2284 			gcp->gc_mii_config = &sfe_mii_config_sis900;
2285 			gcp->gc_mii_sync = &sfe_mii_sync_sis900;
2286 			gcp->gc_mii_read = &sfe_mii_read_sis900;
2287 			gcp->gc_mii_write = &sfe_mii_write_sis900;
2288 			gcp->gc_mii_tune_phy = &sfe_set_eq_sis630;
2289 			gcp->gc_flow_control = FLOW_CONTROL_RX_PAUSE;
2290 		}
2291 
2292 		lp = kmem_zalloc(sizeof (*lp), KM_SLEEP);
2293 		lp->chip = p;
2294 		lp->revid = rev;
2295 		lp->our_intr_bits = 0;
2296 		lp->isr_pended = 0;
2297 
2298 		cmn_err(CE_CONT, CONS "%s%d: chip:%s rev:0x%02x",
2299 		    drv_name, unit, p->chip_name, rev);
2300 
2301 		dp = gem_do_attach(dip, 0, gcp, base, &regs_ha,
2302 		    lp, sizeof (*lp));
2303 		kmem_free(gcp, sizeof (*gcp));
2304 
2305 		if (dp == NULL) {
2306 			goto err_freelp;
2307 		}
2308 
2309 		return (DDI_SUCCESS);
2310 
2311 err_freelp:
2312 		kmem_free(lp, sizeof (struct sfe_dev));
2313 err:
2314 		return (DDI_FAILURE);
2315 	}
2316 	return (DDI_FAILURE);
2317 }
2318 
2319 static int
sfedetach(dev_info_t * dip,ddi_detach_cmd_t cmd)2320 sfedetach(dev_info_t *dip, ddi_detach_cmd_t cmd)
2321 {
2322 	switch (cmd) {
2323 	case DDI_SUSPEND:
2324 		return (gem_suspend(dip));
2325 
2326 	case DDI_DETACH:
2327 		return (gem_do_detach(dip));
2328 	}
2329 	return (DDI_FAILURE);
2330 }
2331 
2332 /*
2333  * quiesce(9E) entry point.
2334  *
2335  * This function is called when the system is single-threaded at high
2336  * PIL with preemption disabled. Therefore, this function must not be
2337  * blocked.
2338  *
2339  * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
2340  * DDI_FAILURE indicates an error condition and should almost never happen.
2341  */
2342 #ifdef	__sparc
2343 #define	sfe_quiesce	ddi_quiesce_not_supported
2344 #else
2345 static int
sfe_quiesce(dev_info_t * dip)2346 sfe_quiesce(dev_info_t *dip)
2347 {
2348 	struct gem_dev	*dp;
2349 	int	ret = 0;
2350 
2351 	dp = GEM_GET_DEV(dip);
2352 
2353 	if (dp == NULL)
2354 		return (DDI_FAILURE);
2355 
2356 	ret = sfe_stop_chip_quiesce(dp);
2357 
2358 	return (ret);
2359 }
2360 #endif
2361 
2362 /* ======================================================== */
2363 /*
2364  * OS depend (loadable streams driver) routine
2365  */
2366 /* ======================================================== */
2367 DDI_DEFINE_STREAM_OPS(sfe_ops, nulldev, nulldev, sfeattach, sfedetach,
2368 	nodev, NULL, D_MP, NULL, sfe_quiesce);
2369 
2370 static struct modldrv modldrv = {
2371 	&mod_driverops,	/* Type of module.  This one is a driver */
2372 	ident,
2373 	&sfe_ops,	/* driver ops */
2374 };
2375 
2376 static struct modlinkage modlinkage = {
2377 	MODREV_1, &modldrv, NULL
2378 };
2379 
2380 /* ======================================================== */
2381 /*
2382  * Loadable module support
2383  */
2384 /* ======================================================== */
2385 int
_init(void)2386 _init(void)
2387 {
2388 	int 	status;
2389 
2390 	DPRINTF(2, (CE_CONT, CONS "sfe: _init: called"));
2391 	gem_mod_init(&sfe_ops, "sfe");
2392 	status = mod_install(&modlinkage);
2393 	if (status != DDI_SUCCESS) {
2394 		gem_mod_fini(&sfe_ops);
2395 	}
2396 	return (status);
2397 }
2398 
2399 /*
2400  * _fini : done
2401  */
2402 int
_fini(void)2403 _fini(void)
2404 {
2405 	int	status;
2406 
2407 	DPRINTF(2, (CE_CONT, CONS "sfe: _fini: called"));
2408 	status = mod_remove(&modlinkage);
2409 	if (status == DDI_SUCCESS) {
2410 		gem_mod_fini(&sfe_ops);
2411 	}
2412 	return (status);
2413 }
2414 
2415 int
_info(struct modinfo * modinfop)2416 _info(struct modinfo *modinfop)
2417 {
2418 	return (mod_info(&modlinkage, modinfop));
2419 }
2420