xref: /illumos-gate/usr/src/uts/common/io/sfe/sfe.c (revision c960f9b3528f78a850977a14c798c0af000b665c)
1 /*
2  *  sfe.c : DP83815/DP83816/SiS900 Fast Ethernet MAC driver for Solaris
3  *
4  * Copyright (c) 2002-2008 Masayuki Murayama.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * 2. Redistributions in binary form must reproduce the above copyright notice,
13  *    this list of conditions and the following disclaimer in the documentation
14  *    and/or other materials provided with the distribution.
15  *
16  * 3. Neither the name of the author nor the names of its contributors may be
17  *    used to endorse or promote products derived from this software without
18  *    specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
27  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
31  * DAMAGE.
32  */
33 
34 /*
35  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
36  * Use is subject to license terms.
37  * Copyright 2024 MNX Cloud, Inc.
38  */
39 
40 /*
41  * System Header files.
42  */
43 #include <sys/types.h>
44 #include <sys/conf.h>
45 #include <sys/debug.h>
46 #include <sys/kmem.h>
47 #include <sys/modctl.h>
48 #include <sys/errno.h>
49 #include <sys/ddi.h>
50 #include <sys/sunddi.h>
51 #include <sys/byteorder.h>
52 #include <sys/ethernet.h>
53 #include <sys/pci.h>
54 
55 #include "sfe_mii.h"
56 #include "sfe_util.h"
57 #include "sfereg.h"
58 
59 char	ident[] = "sis900/dp83815 driver v" "2.6.1t30os";
60 
61 /* Debugging support */
62 #ifdef DEBUG_LEVEL
63 static int sfe_debug = DEBUG_LEVEL;
64 #if DEBUG_LEVEL > 4
65 #define	CONS	"^"
66 #else
67 #define	CONS	"!"
68 #endif
69 #define	DPRINTF(n, args)	if (sfe_debug > (n)) cmn_err args
70 #else
71 #define	CONS	"!"
72 #define	DPRINTF(n, args)
73 #endif
74 
75 /*
76  * Useful macros and typedefs
77  */
78 #define	ONESEC		(drv_usectohz(1*1000000))
79 #define	ROUNDUP2(x, a)	(((x) + (a) - 1) & ~((a) - 1))
80 
81 /*
82  * Our configuration
83  */
84 #define	MAXTXFRAGS	1
85 #define	MAXRXFRAGS	1
86 
87 #ifndef	TX_BUF_SIZE
88 #define	TX_BUF_SIZE	64
89 #endif
90 #ifndef	TX_RING_SIZE
91 #if MAXTXFRAGS == 1
92 #define	TX_RING_SIZE	TX_BUF_SIZE
93 #else
94 #define	TX_RING_SIZE	(TX_BUF_SIZE * 4)
95 #endif
96 #endif
97 
98 #ifndef	RX_BUF_SIZE
99 #define	RX_BUF_SIZE	256
100 #endif
101 #ifndef	RX_RING_SIZE
102 #define	RX_RING_SIZE	RX_BUF_SIZE
103 #endif
104 
105 #define	OUR_INTR_BITS	\
106 	(ISR_DPERR | ISR_SSERR | ISR_RMABT | ISR_RTABT | ISR_RXSOVR |	\
107 	ISR_TXURN | ISR_TXDESC | ISR_TXERR |	\
108 	ISR_RXORN | ISR_RXIDLE | ISR_RXOK | ISR_RXERR)
109 
110 #define	USE_MULTICAST_HASHTBL
111 
112 static int	sfe_tx_copy_thresh = 256;
113 static int	sfe_rx_copy_thresh = 256;
114 
115 /* special PHY registers for SIS900 */
116 #define	MII_CONFIG1	0x0010
117 #define	MII_CONFIG2	0x0011
118 #define	MII_MASK	0x0013
119 #define	MII_RESV	0x0014
120 
121 #define	PHY_MASK		0xfffffff0
122 #define	PHY_SIS900_INTERNAL	0x001d8000
123 #define	PHY_ICS1893		0x0015f440
124 
125 
126 #define	SFE_DESC_SIZE	16	/* including pads rounding up to power of 2 */
127 
128 /*
129  * Supported chips
130  */
131 struct chip_info {
132 	uint16_t	venid;
133 	uint16_t	devid;
134 	char		*chip_name;
135 	int		chip_type;
136 #define	CHIPTYPE_DP83815	0
137 #define	CHIPTYPE_SIS900		1
138 };
139 
140 /*
141  * Chip dependent MAC state
142  */
143 struct sfe_dev {
144 	/* misc HW information */
145 	struct chip_info	*chip;
146 	uint32_t		our_intr_bits;
147 	uint32_t		isr_pended;
148 	uint32_t		cr;
149 	uint_t			tx_drain_threshold;
150 	uint_t			tx_fill_threshold;
151 	uint_t			rx_drain_threshold;
152 	uint_t			rx_fill_threshold;
153 	uint8_t			revid;	/* revision from PCI configuration */
154 	boolean_t		(*get_mac_addr)(struct gem_dev *);
155 	uint8_t			mac_addr[ETHERADDRL];
156 	uint8_t			bridge_revid;
157 };
158 
159 /*
160  * Hardware information
161  */
162 struct chip_info sfe_chiptbl[] = {
163 	{ 0x1039, 0x0900, "SiS900", CHIPTYPE_SIS900, },
164 	{ 0x100b, 0x0020, "DP83815/83816", CHIPTYPE_DP83815, },
165 	{ 0x1039, 0x7016, "SiS7016", CHIPTYPE_SIS900, },
166 };
167 #define	CHIPTABLESIZE (sizeof (sfe_chiptbl)/sizeof (struct chip_info))
168 
169 /* ======================================================== */
170 
171 /* mii operations */
172 static void  sfe_mii_sync_dp83815(struct gem_dev *);
173 static void  sfe_mii_sync_sis900(struct gem_dev *);
174 static uint16_t  sfe_mii_read_dp83815(struct gem_dev *, uint_t);
175 static uint16_t  sfe_mii_read_sis900(struct gem_dev *, uint_t);
176 static void sfe_mii_write_dp83815(struct gem_dev *, uint_t, uint16_t);
177 static void sfe_mii_write_sis900(struct gem_dev *, uint_t, uint16_t);
178 static void sfe_set_eq_sis630(struct gem_dev *);
179 /* nic operations */
180 static int sfe_reset_chip_sis900(struct gem_dev *);
181 static int sfe_reset_chip_dp83815(struct gem_dev *);
182 static int sfe_init_chip(struct gem_dev *);
183 static int sfe_start_chip(struct gem_dev *);
184 static int sfe_stop_chip(struct gem_dev *);
185 static int sfe_set_media(struct gem_dev *);
186 static int sfe_set_rx_filter_dp83815(struct gem_dev *);
187 static int sfe_set_rx_filter_sis900(struct gem_dev *);
188 static int sfe_get_stats(struct gem_dev *);
189 static int sfe_attach_chip(struct gem_dev *);
190 
191 /* descriptor operations */
192 static int sfe_tx_desc_write(struct gem_dev *dp, int slot,
193 		    ddi_dma_cookie_t *dmacookie, int frags, uint64_t flags);
194 static void sfe_tx_start(struct gem_dev *dp, int startslot, int nslot);
195 static void sfe_rx_desc_write(struct gem_dev *dp, int slot,
196 		    ddi_dma_cookie_t *dmacookie, int frags);
197 static uint_t sfe_tx_desc_stat(struct gem_dev *dp, int slot, int ndesc);
198 static uint64_t sfe_rx_desc_stat(struct gem_dev *dp, int slot, int ndesc);
199 
200 static void sfe_tx_desc_init(struct gem_dev *dp, int slot);
201 static void sfe_rx_desc_init(struct gem_dev *dp, int slot);
202 static void sfe_tx_desc_clean(struct gem_dev *dp, int slot);
203 static void sfe_rx_desc_clean(struct gem_dev *dp, int slot);
204 
205 /* interrupt handler */
206 static uint_t sfe_interrupt(struct gem_dev *dp);
207 
208 /* ======================================================== */
209 
210 /* mapping attributes */
211 /* Data access requirements. */
212 static struct ddi_device_acc_attr sfe_dev_attr = {
213 	DDI_DEVICE_ATTR_V0,
214 	DDI_STRUCTURE_LE_ACC,
215 	DDI_STRICTORDER_ACC
216 };
217 
218 /* On sparc, Buffers should be native endian for speed */
219 static struct ddi_device_acc_attr sfe_buf_attr = {
220 	DDI_DEVICE_ATTR_V0,
221 	DDI_NEVERSWAP_ACC,	/* native endianness */
222 	DDI_STRICTORDER_ACC
223 };
224 
225 static ddi_dma_attr_t sfe_dma_attr_buf = {
226 	DMA_ATTR_V0,		/* dma_attr_version */
227 	0,			/* dma_attr_addr_lo */
228 	0xffffffffull,		/* dma_attr_addr_hi */
229 	0x00000fffull,		/* dma_attr_count_max */
230 	0, /* patched later */	/* dma_attr_align */
231 	0x000003fc,		/* dma_attr_burstsizes */
232 	1,			/* dma_attr_minxfer */
233 	0x00000fffull,		/* dma_attr_maxxfer */
234 	0xffffffffull,		/* dma_attr_seg */
235 	0, /* patched later */	/* dma_attr_sgllen */
236 	1,			/* dma_attr_granular */
237 	0			/* dma_attr_flags */
238 };
239 
240 static ddi_dma_attr_t sfe_dma_attr_desc = {
241 	DMA_ATTR_V0,		/* dma_attr_version */
242 	16,			/* dma_attr_addr_lo */
243 	0xffffffffull,		/* dma_attr_addr_hi */
244 	0xffffffffull,		/* dma_attr_count_max */
245 	16,			/* dma_attr_align */
246 	0x000003fc,		/* dma_attr_burstsizes */
247 	1,			/* dma_attr_minxfer */
248 	0xffffffffull,		/* dma_attr_maxxfer */
249 	0xffffffffull,		/* dma_attr_seg */
250 	1,			/* dma_attr_sgllen */
251 	1,			/* dma_attr_granular */
252 	0			/* dma_attr_flags */
253 };
254 
255 uint32_t sfe_use_pcimemspace = 0;
256 
257 /* ======================================================== */
258 /*
259  * HW manipulation routines
260  */
261 /* ======================================================== */
262 
263 #define	SFE_EEPROM_DELAY(dp)	\
264 	{ (void) INL(dp, EROMAR); (void) INL(dp, EROMAR); }
265 #define	EE_CMD_READ	6
266 #define	EE_CMD_SHIFT	6
267 
268 static uint16_t
sfe_read_eeprom(struct gem_dev * dp,uint_t offset)269 sfe_read_eeprom(struct gem_dev *dp, uint_t offset)
270 {
271 	int		eedi;
272 	int		i;
273 	uint16_t	ret;
274 
275 	/* ensure de-assert chip select */
276 	OUTL(dp, EROMAR, 0);
277 	SFE_EEPROM_DELAY(dp);
278 	OUTL(dp, EROMAR, EROMAR_EESK);
279 	SFE_EEPROM_DELAY(dp);
280 
281 	/* assert chip select */
282 	offset |= EE_CMD_READ << EE_CMD_SHIFT;
283 
284 	for (i = 8; i >= 0; i--) {
285 		/* make command */
286 		eedi = ((offset >> i) & 1) << EROMAR_EEDI_SHIFT;
287 
288 		/* send 1 bit */
289 		OUTL(dp, EROMAR, EROMAR_EECS | eedi);
290 		SFE_EEPROM_DELAY(dp);
291 		OUTL(dp, EROMAR, EROMAR_EECS | eedi | EROMAR_EESK);
292 		SFE_EEPROM_DELAY(dp);
293 	}
294 
295 	OUTL(dp, EROMAR, EROMAR_EECS);
296 
297 	ret = 0;
298 	for (i = 0; i < 16; i++) {
299 		/* Get 1 bit */
300 		OUTL(dp, EROMAR, EROMAR_EECS);
301 		SFE_EEPROM_DELAY(dp);
302 		OUTL(dp, EROMAR, EROMAR_EECS | EROMAR_EESK);
303 		SFE_EEPROM_DELAY(dp);
304 
305 		ret = (ret << 1) | ((INL(dp, EROMAR) >> EROMAR_EEDO_SHIFT) & 1);
306 	}
307 
308 	OUTL(dp, EROMAR, 0);
309 	SFE_EEPROM_DELAY(dp);
310 
311 	return (ret);
312 }
313 #undef SFE_EEPROM_DELAY
314 
315 static boolean_t
sfe_get_mac_addr_dp83815(struct gem_dev * dp)316 sfe_get_mac_addr_dp83815(struct gem_dev *dp)
317 {
318 	uint8_t		*mac;
319 	uint_t		val;
320 	int		i;
321 
322 #define	BITSET(p, ix, v)	(p)[(ix)/8] |= ((v) ? 1 : 0) << ((ix) & 0x7)
323 
324 	DPRINTF(4, (CE_CONT, CONS "%s: %s: called", dp->name, __func__));
325 
326 	mac = dp->dev_addr.ether_addr_octet;
327 
328 	/* first of all, clear MAC address buffer */
329 	bzero(mac, ETHERADDRL);
330 
331 	/* get bit 0 */
332 	val = sfe_read_eeprom(dp, 0x6);
333 	BITSET(mac, 0, val & 1);
334 
335 	/* get bit 1 - 16 */
336 	val = sfe_read_eeprom(dp, 0x7);
337 	for (i = 0; i < 16; i++) {
338 		BITSET(mac, 1 + i, val & (1 << (15 - i)));
339 	}
340 
341 	/* get bit 17 -  32 */
342 	val = sfe_read_eeprom(dp, 0x8);
343 	for (i = 0; i < 16; i++) {
344 		BITSET(mac, 17 + i, val & (1 << (15 - i)));
345 	}
346 
347 	/* get bit 33 -  47 */
348 	val = sfe_read_eeprom(dp, 0x9);
349 	for (i = 0; i < 15; i++) {
350 		BITSET(mac, 33 + i, val & (1 << (15 - i)));
351 	}
352 
353 	return (B_TRUE);
354 #undef BITSET
355 }
356 
357 static boolean_t
sfe_get_mac_addr_sis900(struct gem_dev * dp)358 sfe_get_mac_addr_sis900(struct gem_dev *dp)
359 {
360 	uint_t		val;
361 	int		i;
362 	uint8_t		*mac;
363 
364 	mac = dp->dev_addr.ether_addr_octet;
365 
366 	for (i = 0; i < ETHERADDRL/2; i++) {
367 		val = sfe_read_eeprom(dp, 0x8 + i);
368 		*mac++ = (uint8_t)val;
369 		*mac++ = (uint8_t)(val >> 8);
370 	}
371 
372 	return (B_TRUE);
373 }
374 
375 static dev_info_t *
sfe_search_pci_dev_subr(dev_info_t * cur_node,int vendor_id,int device_id)376 sfe_search_pci_dev_subr(dev_info_t *cur_node, int vendor_id, int device_id)
377 {
378 	dev_info_t	*child_id;
379 	dev_info_t	*ret;
380 	int		vid, did;
381 
382 	if (cur_node == NULL) {
383 		return (NULL);
384 	}
385 
386 	/* check brothers */
387 	do {
388 		vid = ddi_prop_get_int(DDI_DEV_T_ANY, cur_node,
389 		    DDI_PROP_DONTPASS, "vendor-id", -1);
390 		did = ddi_prop_get_int(DDI_DEV_T_ANY, cur_node,
391 		    DDI_PROP_DONTPASS, "device-id", -1);
392 
393 		if (vid == vendor_id && did == device_id) {
394 			/* found */
395 			return (cur_node);
396 		}
397 
398 		/* check children */
399 		if ((child_id = ddi_get_child(cur_node)) != NULL) {
400 			if ((ret = sfe_search_pci_dev_subr(child_id,
401 			    vendor_id, device_id)) != NULL) {
402 				return (ret);
403 			}
404 		}
405 
406 	} while ((cur_node = ddi_get_next_sibling(cur_node)) != NULL);
407 
408 	/* not found */
409 	return (NULL);
410 }
411 
412 static dev_info_t *
sfe_search_pci_dev(int vendor_id,int device_id)413 sfe_search_pci_dev(int vendor_id, int device_id)
414 {
415 	return (sfe_search_pci_dev_subr(ddi_root_node(), vendor_id, device_id));
416 }
417 
418 static boolean_t
sfe_get_mac_addr_sis962(struct gem_dev * dp)419 sfe_get_mac_addr_sis962(struct gem_dev *dp)
420 {
421 	boolean_t	ret;
422 	int		i;
423 
424 	ret = B_FALSE;
425 
426 	/* rise request signal to access EEPROM */
427 	OUTL(dp, MEAR, EROMAR_EEREQ);
428 	for (i = 0; (INL(dp, MEAR) & EROMAR_EEGNT) == 0; i++) {
429 		if (i > 200) {
430 			/* failed to acquire eeprom */
431 			cmn_err(CE_NOTE,
432 			    CONS "%s: failed to access eeprom", dp->name);
433 			goto x;
434 		}
435 		drv_usecwait(10);
436 	}
437 	ret = sfe_get_mac_addr_sis900(dp);
438 x:
439 	/* release EEPROM */
440 	OUTL(dp, MEAR, EROMAR_EEDONE);
441 
442 	return (ret);
443 }
444 
445 static int
sfe_reset_chip_sis900(struct gem_dev * dp)446 sfe_reset_chip_sis900(struct gem_dev *dp)
447 {
448 	int		i;
449 	uint32_t	done;
450 	uint32_t	val;
451 	struct sfe_dev	*lp = dp->private;
452 
453 	DPRINTF(4, (CE_CONT, CONS "%s: %s called", dp->name, __func__));
454 
455 	/* invalidate mac addr cache */
456 	bzero(lp->mac_addr, sizeof (lp->mac_addr));
457 
458 	lp->cr = 0;
459 
460 	/* inhibit interrupt */
461 	OUTL(dp, IMR, 0);
462 	lp->isr_pended |= INL(dp, ISR) & lp->our_intr_bits;
463 
464 	OUTLINL(dp, RFCR, 0);
465 
466 	OUTL(dp, CR, CR_RST | CR_TXR | CR_RXR);
467 	drv_usecwait(10);
468 
469 	done = 0;
470 	for (i = 0; done != (ISR_TXRCMP | ISR_RXRCMP); i++) {
471 		if (i > 1000) {
472 			cmn_err(CE_WARN, "%s: chip reset timeout", dp->name);
473 			return (GEM_FAILURE);
474 		}
475 		done |= INL(dp, ISR) & (ISR_TXRCMP | ISR_RXRCMP);
476 		drv_usecwait(10);
477 	}
478 
479 	if (lp->revid == SIS630ET_900_REV) {
480 		lp->cr |= CR_ACCESSMODE;
481 		OUTL(dp, CR, lp->cr | INL(dp, CR));
482 	}
483 
484 	/* Configuration register: enable PCI parity */
485 	DPRINTF(2, (CE_CONT, CONS "%s: cfg:%b",
486 	    dp->name, INL(dp, CFG), CFG_BITS_SIS900));
487 	val = 0;
488 	if (lp->revid >= SIS635A_900_REV ||
489 	    lp->revid == SIS900B_900_REV) {
490 		/* what is this ? */
491 		val |= CFG_RND_CNT;
492 	}
493 	OUTL(dp, CFG, val);
494 	DPRINTF(2, (CE_CONT, CONS "%s: cfg:%b", dp->name,
495 	    INL(dp, CFG), CFG_BITS_SIS900));
496 
497 	return (GEM_SUCCESS);
498 }
499 
500 static int
sfe_reset_chip_dp83815(struct gem_dev * dp)501 sfe_reset_chip_dp83815(struct gem_dev *dp)
502 {
503 	int		i;
504 	uint32_t	val;
505 	struct sfe_dev	*lp = dp->private;
506 
507 	DPRINTF(4, (CE_CONT, CONS "%s: %s called", dp->name, __func__));
508 
509 	/* invalidate mac addr cache */
510 	bzero(lp->mac_addr, sizeof (lp->mac_addr));
511 
512 	lp->cr = 0;
513 
514 	/* inhibit interrupts */
515 	OUTL(dp, IMR, 0);
516 	lp->isr_pended |= INL(dp, ISR) & lp->our_intr_bits;
517 
518 	OUTL(dp, RFCR, 0);
519 
520 	OUTL(dp, CR, CR_RST);
521 	drv_usecwait(10);
522 
523 	for (i = 0; INL(dp, CR) & CR_RST; i++) {
524 		if (i > 100) {
525 			cmn_err(CE_WARN, "!%s: chip reset timeout", dp->name);
526 			return (GEM_FAILURE);
527 		}
528 		drv_usecwait(10);
529 	}
530 	DPRINTF(0, (CE_CONT, "!%s: chip reset in %duS", dp->name, i*10));
531 
532 	OUTL(dp, CCSR, CCSR_PMESTS);
533 	OUTL(dp, CCSR, 0);
534 
535 	/* Configuration register: enable PCI parity */
536 	DPRINTF(2, (CE_CONT, CONS "%s: cfg:%b",
537 	    dp->name, INL(dp, CFG), CFG_BITS_DP83815));
538 	val = INL(dp, CFG) & (CFG_ANEG_SEL | CFG_PHY_CFG);
539 	OUTL(dp, CFG, val | CFG_PAUSE_ADV);
540 	DPRINTF(2, (CE_CONT, CONS "%s: cfg:%b", dp->name,
541 	    INL(dp, CFG), CFG_BITS_DP83815));
542 
543 	return (GEM_SUCCESS);
544 }
545 
546 static int
sfe_init_chip(struct gem_dev * dp)547 sfe_init_chip(struct gem_dev *dp)
548 {
549 	/* Configuration register: have been set up in sfe_chip_reset */
550 
551 	/* PCI test control register: do nothing */
552 
553 	/* Interrupt status register : do nothing */
554 
555 	/* Interrupt mask register: clear, but leave lp->our_intr_bits */
556 	OUTL(dp, IMR, 0);
557 
558 	/* Enhanced PHY Access register (sis900): do nothing */
559 
560 	/* Transmit Descriptor Pointer register: base addr of TX ring */
561 	OUTL(dp, TXDP, dp->tx_ring_dma);
562 
563 	/* Receive descriptor pointer register: base addr of RX ring */
564 	OUTL(dp, RXDP, dp->rx_ring_dma);
565 
566 	return (GEM_SUCCESS);
567 }
568 
569 static uint_t
sfe_mcast_hash(struct gem_dev * dp,uint8_t * addr)570 sfe_mcast_hash(struct gem_dev *dp, uint8_t *addr)
571 {
572 	return (gem_ether_crc_be(addr, ETHERADDRL));
573 }
574 
575 #ifdef DEBUG_LEVEL
576 static void
sfe_rxfilter_dump(struct gem_dev * dp,int start,int end)577 sfe_rxfilter_dump(struct gem_dev *dp, int start, int end)
578 {
579 	int		i;
580 	int		j;
581 	uint16_t	ram[0x10];
582 
583 	cmn_err(CE_CONT, "!%s: rx filter ram dump:", dp->name);
584 #define	WORDS_PER_LINE	4
585 	for (i = start; i < end; i += WORDS_PER_LINE*2) {
586 		for (j = 0; j < WORDS_PER_LINE; j++) {
587 			OUTL(dp, RFCR, RFADDR_MAC_DP83815 + i + j*2);
588 			ram[j] = INL(dp, RFDR);
589 		}
590 
591 		cmn_err(CE_CONT, "!0x%02x: 0x%04x 0x%04x 0x%04x 0x%04x",
592 		    i, ram[0], ram[1], ram[2], ram[3]);
593 		}
594 
595 #undef	WORDS_PER_LINE
596 }
597 #endif
598 
599 static uint_t	sfe_rf_perfect_base_dp83815[] = {
600 	RFADDR_PMATCH0_DP83815,
601 	RFADDR_PMATCH1_DP83815,
602 	RFADDR_PMATCH2_DP83815,
603 	RFADDR_PMATCH3_DP83815,
604 };
605 
606 static int
sfe_set_rx_filter_dp83815(struct gem_dev * dp)607 sfe_set_rx_filter_dp83815(struct gem_dev *dp)
608 {
609 	int		i;
610 	int		j;
611 	uint32_t	mode;
612 	uint8_t		*mac = dp->cur_addr.ether_addr_octet;
613 	uint16_t	hash_tbl[32];
614 	struct sfe_dev	*lp = dp->private;
615 
616 	DPRINTF(1, (CE_CONT, CONS "%s: %s: called, mc_count:%d, mode:0x%b",
617 	    dp->name, __func__, dp->mc_count, dp->rxmode, RXMODE_BITS));
618 
619 #if DEBUG_LEVEL > 0
620 	for (i = 0; i < dp->mc_count; i++) {
621 		cmn_err(CE_CONT,
622 		"!%s: adding mcast(%d) %02x:%02x:%02x:%02x:%02x:%02x",
623 		    dp->name, i,
624 		    dp->mc_list[i].addr.ether_addr_octet[0],
625 		    dp->mc_list[i].addr.ether_addr_octet[1],
626 		    dp->mc_list[i].addr.ether_addr_octet[2],
627 		    dp->mc_list[i].addr.ether_addr_octet[3],
628 		    dp->mc_list[i].addr.ether_addr_octet[4],
629 		    dp->mc_list[i].addr.ether_addr_octet[5]);
630 	}
631 #endif
632 	if ((dp->rxmode & RXMODE_ENABLE) == 0) {
633 		/* disable rx filter */
634 		OUTL(dp, RFCR, 0);
635 		return (GEM_SUCCESS);
636 	}
637 
638 	/*
639 	 * Set Receive filter control register
640 	 */
641 	if (dp->rxmode & RXMODE_PROMISC) {
642 		/* all broadcast, all multicast, all physical */
643 		mode = RFCR_AAB | RFCR_AAM | RFCR_AAP;
644 	} else if ((dp->rxmode & RXMODE_ALLMULTI) || dp->mc_count > 16*32/2) {
645 		/* all broadcast, all multicast, physical for the chip */
646 		mode = RFCR_AAB | RFCR_AAM | RFCR_APM_DP83815;
647 	} else if (dp->mc_count > 4) {
648 		/*
649 		 * Use multicast hash table,
650 		 * accept all broadcast and physical for the chip.
651 		 */
652 		mode = RFCR_AAB | RFCR_MHEN_DP83815 | RFCR_APM_DP83815;
653 
654 		bzero(hash_tbl, sizeof (hash_tbl));
655 		for (i = 0; i < dp->mc_count; i++) {
656 			j = dp->mc_list[i].hash >> (32 - 9);
657 			hash_tbl[j / 16] |= 1 << (j % 16);
658 		}
659 	} else {
660 		/*
661 		 * Use pattern mach filter for multicast address,
662 		 * accept all broadcast and physical for the chip
663 		 */
664 		/* need to enable corresponding pattern registers */
665 		mode = RFCR_AAB | RFCR_APM_DP83815 |
666 		    (((1 << dp->mc_count) - 1) << RFCR_APAT_SHIFT);
667 	}
668 
669 #if DEBUG_LEVEL > 1
670 	cmn_err(CE_CONT,
671 	    "!%s: mac %02x:%02x:%02x:%02x:%02x:%02x"
672 	    "  cache %02x:%02x:%02x:%02x:%02x:%02x",
673 	    dp->name, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
674 	    lp->mac_addr[0], lp->mac_addr[1],
675 	    lp->mac_addr[2], lp->mac_addr[3],
676 	    lp->mac_addr[4], lp->mac_addr[5]);
677 #endif
678 	if (bcmp(mac, lp->mac_addr, ETHERADDRL) != 0) {
679 		/*
680 		 * XXX - need to *disable* rx filter to load mac address for
681 		 * the chip. otherwise, we cannot setup rxfilter correctly.
682 		 */
683 		/* setup perfect match register for my station address */
684 		for (i = 0; i < ETHERADDRL; i += 2) {
685 			OUTL(dp, RFCR, RFADDR_MAC_DP83815 + i);
686 			OUTL(dp, RFDR, (mac[i+1] << 8) | mac[i]);
687 		}
688 
689 		bcopy(mac, lp->mac_addr, ETHERADDRL);
690 	}
691 
692 #if DEBUG_LEVEL > 3
693 	/* clear pattern ram */
694 	for (j = 0x200; j < 0x380; j += 2) {
695 		OUTL(dp, RFCR, j);
696 		OUTL(dp, RFDR, 0);
697 	}
698 #endif
699 	if (mode & RFCR_APAT_DP83815) {
700 		/* setup multicast address into pattern match registers */
701 		for (j = 0; j < dp->mc_count; j++) {
702 			mac = &dp->mc_list[j].addr.ether_addr_octet[0];
703 			for (i = 0; i < ETHERADDRL; i += 2) {
704 				OUTL(dp, RFCR,
705 				    sfe_rf_perfect_base_dp83815[j] + i*2);
706 				OUTL(dp, RFDR, (mac[i+1] << 8) | mac[i]);
707 			}
708 		}
709 
710 		/* setup pattern count registers */
711 		OUTL(dp, RFCR, RFADDR_PCOUNT01_DP83815);
712 		OUTL(dp, RFDR, (ETHERADDRL << 8) | ETHERADDRL);
713 		OUTL(dp, RFCR, RFADDR_PCOUNT23_DP83815);
714 		OUTL(dp, RFDR, (ETHERADDRL << 8) | ETHERADDRL);
715 	}
716 
717 	if (mode & RFCR_MHEN_DP83815) {
718 		/* Load Multicast hash table */
719 		for (i = 0; i < 32; i++) {
720 			/* for DP83815, index is in byte */
721 			OUTL(dp, RFCR, RFADDR_MULTICAST_DP83815 + i*2);
722 			OUTL(dp, RFDR, hash_tbl[i]);
723 		}
724 	}
725 #if DEBUG_LEVEL > 2
726 	sfe_rxfilter_dump(dp, 0, 0x10);
727 	sfe_rxfilter_dump(dp, 0x200, 0x380);
728 #endif
729 	/* Set rx filter mode and enable rx filter */
730 	OUTL(dp, RFCR, RFCR_RFEN | mode);
731 
732 	return (GEM_SUCCESS);
733 }
734 
735 static int
sfe_set_rx_filter_sis900(struct gem_dev * dp)736 sfe_set_rx_filter_sis900(struct gem_dev *dp)
737 {
738 	int		i;
739 	uint32_t	mode;
740 	uint16_t	hash_tbl[16];
741 	uint8_t		*mac = dp->cur_addr.ether_addr_octet;
742 	int		hash_size;
743 	int		hash_shift;
744 	struct sfe_dev	*lp = dp->private;
745 
746 	DPRINTF(4, (CE_CONT, CONS "%s: %s: called", dp->name, __func__));
747 
748 	if ((dp->rxmode & RXMODE_ENABLE) == 0) {
749 		/* disable rx filter */
750 		OUTLINL(dp, RFCR, 0);
751 		return (GEM_SUCCESS);
752 	}
753 
754 	/*
755 	 * determine hardware hash table size in word.
756 	 */
757 	hash_shift = 25;
758 	if (lp->revid >= SIS635A_900_REV || lp->revid == SIS900B_900_REV) {
759 		hash_shift = 24;
760 	}
761 	hash_size = (1 << (32 - hash_shift)) / 16;
762 	bzero(hash_tbl, sizeof (hash_tbl));
763 
764 	/* Set Receive filter control register */
765 
766 	if (dp->rxmode & RXMODE_PROMISC) {
767 		/* all broadcast, all multicast, all physical */
768 		mode = RFCR_AAB | RFCR_AAM | RFCR_AAP;
769 	} else if ((dp->rxmode & RXMODE_ALLMULTI) ||
770 	    dp->mc_count > hash_size*16/2) {
771 		/* all broadcast, all multicast, physical for the chip */
772 		mode = RFCR_AAB | RFCR_AAM;
773 	} else {
774 		/* all broadcast, physical for the chip */
775 		mode = RFCR_AAB;
776 	}
777 
778 	/* make hash table */
779 	for (i = 0; i < dp->mc_count; i++) {
780 		uint_t	h;
781 		h = dp->mc_list[i].hash >> hash_shift;
782 		hash_tbl[h / 16] |= 1 << (h % 16);
783 	}
784 
785 	if (bcmp(mac, lp->mac_addr, ETHERADDRL) != 0) {
786 		/* Disable Rx filter and load mac address */
787 		for (i = 0; i < ETHERADDRL/2; i++) {
788 			/* For sis900, index is in word */
789 			OUTLINL(dp, RFCR,
790 			    (RFADDR_MAC_SIS900+i) << RFCR_RFADDR_SHIFT_SIS900);
791 			OUTLINL(dp, RFDR, (mac[i*2+1] << 8) | mac[i*2]);
792 		}
793 
794 		bcopy(mac, lp->mac_addr, ETHERADDRL);
795 	}
796 
797 	/* Load Multicast hash table */
798 	for (i = 0; i < hash_size; i++) {
799 		/* For sis900, index is in word */
800 		OUTLINL(dp, RFCR,
801 		    (RFADDR_MULTICAST_SIS900 + i) << RFCR_RFADDR_SHIFT_SIS900);
802 		OUTLINL(dp, RFDR, hash_tbl[i]);
803 	}
804 
805 	/* Load rx filter mode and enable rx filter */
806 	OUTLINL(dp, RFCR, RFCR_RFEN | mode);
807 
808 	return (GEM_SUCCESS);
809 }
810 
811 static int
sfe_start_chip(struct gem_dev * dp)812 sfe_start_chip(struct gem_dev *dp)
813 {
814 	struct sfe_dev	*lp = dp->private;
815 
816 	DPRINTF(4, (CE_CONT, CONS "%s: %s: called", dp->name, __func__));
817 
818 	/*
819 	 * setup interrupt mask, which shouldn't include ISR_TOK
820 	 * to improve performance.
821 	 */
822 	lp->our_intr_bits = OUR_INTR_BITS;
823 
824 	/* enable interrupt */
825 	if ((dp->misc_flag & GEM_NOINTR) == 0) {
826 		OUTL(dp, IER, 1);
827 		OUTL(dp, IMR, lp->our_intr_bits);
828 	}
829 
830 	/* Kick RX */
831 	OUTL(dp, CR, lp->cr | CR_RXE);
832 
833 	return (GEM_SUCCESS);
834 }
835 
836 /*
837  * Stop nic core gracefully.
838  */
839 static int
sfe_stop_chip(struct gem_dev * dp)840 sfe_stop_chip(struct gem_dev *dp)
841 {
842 	struct sfe_dev	*lp = dp->private;
843 	uint32_t	done;
844 	int		i;
845 	uint32_t	val;
846 
847 	DPRINTF(4, (CE_CONT, CONS "%s: %s: called", dp->name, __func__));
848 
849 	/*
850 	 * Although we inhibit interrupt here, we don't clear soft copy of
851 	 * interrupt mask to avoid bogus interrupts.
852 	 */
853 	OUTL(dp, IMR, 0);
854 
855 	/* stop TX and RX immediately */
856 	OUTL(dp, CR, lp->cr | CR_TXR | CR_RXR);
857 
858 	done = 0;
859 	for (i = 0; done != (ISR_RXRCMP | ISR_TXRCMP); i++) {
860 		if (i > 1000) {
861 			/*
862 			 * As gem layer will call sfe_reset_chip(),
863 			 * we don't neet to reset futher
864 			 */
865 			cmn_err(CE_NOTE, "!%s: %s: Tx/Rx reset timeout",
866 			    dp->name, __func__);
867 
868 			return (GEM_FAILURE);
869 		}
870 		val = INL(dp, ISR);
871 		done |= val & (ISR_RXRCMP | ISR_TXRCMP);
872 		lp->isr_pended |= val & lp->our_intr_bits;
873 		drv_usecwait(10);
874 	}
875 
876 	return (GEM_SUCCESS);
877 }
878 
879 #ifndef	__sparc
880 /*
881  * Stop nic core gracefully for quiesce
882  */
883 static int
sfe_stop_chip_quiesce(struct gem_dev * dp)884 sfe_stop_chip_quiesce(struct gem_dev *dp)
885 {
886 	struct sfe_dev	*lp = dp->private;
887 	uint32_t	done;
888 	int		i;
889 	uint32_t	val;
890 
891 	/*
892 	 * Although we inhibit interrupt here, we don't clear soft copy of
893 	 * interrupt mask to avoid bogus interrupts.
894 	 */
895 	OUTL(dp, IMR, 0);
896 
897 	/* stop TX and RX immediately */
898 	OUTL(dp, CR, CR_TXR | CR_RXR);
899 
900 	done = 0;
901 	for (i = 0; done != (ISR_RXRCMP | ISR_TXRCMP); i++) {
902 		if (i > 1000) {
903 			/*
904 			 * As gem layer will call sfe_reset_chip(),
905 			 * we don't neet to reset futher
906 			 */
907 
908 			return (DDI_FAILURE);
909 		}
910 		val = INL(dp, ISR);
911 		done |= val & (ISR_RXRCMP | ISR_TXRCMP);
912 		lp->isr_pended |= val & lp->our_intr_bits;
913 		drv_usecwait(10);
914 	}
915 	return (DDI_SUCCESS);
916 }
917 #endif
918 
919 /*
920  * Setup media mode
921  */
922 static uint_t
923 sfe_mxdma_value[] = { 512, 4, 8, 16, 32, 64, 128, 256, };
924 
925 static uint_t
sfe_encode_mxdma(uint_t burstsize)926 sfe_encode_mxdma(uint_t burstsize)
927 {
928 	int	i;
929 
930 	if (burstsize > 256) {
931 		/* choose 512 */
932 		return (0);
933 	}
934 
935 	for (i = 1; i < 8; i++) {
936 		if (burstsize <= sfe_mxdma_value[i]) {
937 			break;
938 		}
939 	}
940 	return (i);
941 }
942 
943 static int
sfe_set_media(struct gem_dev * dp)944 sfe_set_media(struct gem_dev *dp)
945 {
946 	uint32_t	txcfg;
947 	uint32_t	rxcfg;
948 	uint32_t	pcr;
949 	uint32_t	val;
950 	uint32_t	txmxdma;
951 	uint32_t	rxmxdma;
952 	struct sfe_dev	*lp = dp->private;
953 #ifdef DEBUG_LEVEL
954 	extern int	gem_speed_value[];
955 #endif
956 	DPRINTF(2, (CE_CONT, CONS "%s: %s: %s duplex, %d Mbps",
957 	    dp->name, __func__,
958 	    dp->full_duplex ? "full" : "half", gem_speed_value[dp->speed]));
959 
960 	/* initialize txcfg and rxcfg */
961 	txcfg = TXCFG_ATP;
962 	if (dp->full_duplex) {
963 		txcfg |= (TXCFG_CSI | TXCFG_HBI);
964 	}
965 	rxcfg = RXCFG_AEP | RXCFG_ARP;
966 	if (dp->full_duplex) {
967 		rxcfg |= RXCFG_ATX;
968 	}
969 
970 	/* select txmxdma and rxmxdma, maxmum burst length */
971 	if (lp->chip->chip_type == CHIPTYPE_SIS900) {
972 #ifdef DEBUG_SIS900_EDB
973 		val = CFG_EDB_MASTER;
974 #else
975 		val = INL(dp, CFG) & CFG_EDB_MASTER;
976 #endif
977 		if (val) {
978 			/*
979 			 * sis900 built-in cores:
980 			 * max burst length must be fixed to 64
981 			 */
982 			txmxdma = 64;
983 			rxmxdma = 64;
984 		} else {
985 			/*
986 			 * sis900 pci chipset:
987 			 * the vendor recommended to fix max burst length
988 			 * to 512
989 			 */
990 			txmxdma = 512;
991 			rxmxdma = 512;
992 		}
993 	} else {
994 		/*
995 		 * NS dp83815/816:
996 		 * use user defined or default for tx/rx max burst length
997 		 */
998 		txmxdma = max(dp->txmaxdma, 256);
999 		rxmxdma = max(dp->rxmaxdma, 256);
1000 	}
1001 
1002 
1003 	/* tx high water mark */
1004 	lp->tx_drain_threshold = ROUNDUP2(dp->txthr, TXCFG_FIFO_UNIT);
1005 
1006 	/* determine tx_fill_threshold accroding drain threshold */
1007 	lp->tx_fill_threshold =
1008 	    TXFIFOSIZE - lp->tx_drain_threshold - TXCFG_FIFO_UNIT;
1009 
1010 	/* tune txmxdma not to exceed tx_fill_threshold */
1011 	for (; ; ) {
1012 		/* normalize txmxdma requested */
1013 		val = sfe_encode_mxdma(txmxdma);
1014 		txmxdma = sfe_mxdma_value[val];
1015 
1016 		if (txmxdma <= lp->tx_fill_threshold) {
1017 			break;
1018 		}
1019 		/* select new txmxdma */
1020 		txmxdma = txmxdma / 2;
1021 	}
1022 	txcfg |= val << TXCFG_MXDMA_SHIFT;
1023 
1024 	/* encode rxmxdma, maxmum burst length for rx */
1025 	val = sfe_encode_mxdma(rxmxdma);
1026 	rxcfg |= val << RXCFG_MXDMA_SHIFT;
1027 	rxmxdma = sfe_mxdma_value[val];
1028 
1029 	/* receive starting threshold - it have only 5bit-wide field */
1030 	val = ROUNDUP2(max(dp->rxthr, ETHERMIN), RXCFG_FIFO_UNIT);
1031 	lp->rx_drain_threshold =
1032 	    min(val, (RXCFG_DRTH >> RXCFG_DRTH_SHIFT) * RXCFG_FIFO_UNIT);
1033 
1034 	DPRINTF(0, (CE_CONT,
1035 	    "%s: %s: tx: drain:%d(rest %d) fill:%d mxdma:%d,"
1036 	    " rx: drain:%d mxdma:%d",
1037 	    dp->name, __func__,
1038 	    lp->tx_drain_threshold, TXFIFOSIZE - lp->tx_drain_threshold,
1039 	    lp->tx_fill_threshold, txmxdma,
1040 	    lp->rx_drain_threshold, rxmxdma));
1041 
1042 	ASSERT(lp->tx_drain_threshold < 64*TXCFG_FIFO_UNIT);
1043 	ASSERT(lp->tx_fill_threshold < 64*TXCFG_FIFO_UNIT);
1044 	ASSERT(lp->rx_drain_threshold < 32*RXCFG_FIFO_UNIT);
1045 
1046 	txcfg |= ((lp->tx_fill_threshold/TXCFG_FIFO_UNIT) << TXCFG_FLTH_SHIFT)
1047 	    | (lp->tx_drain_threshold/TXCFG_FIFO_UNIT);
1048 	OUTL(dp, TXCFG, txcfg);
1049 
1050 	rxcfg |= ((lp->rx_drain_threshold/RXCFG_FIFO_UNIT) << RXCFG_DRTH_SHIFT);
1051 	if (lp->chip->chip_type == CHIPTYPE_DP83815) {
1052 		rxcfg |= RXCFG_ALP_DP83815;
1053 	}
1054 	OUTL(dp, RXCFG, rxcfg);
1055 
1056 	DPRINTF(0, (CE_CONT, CONS "%s: %s: txcfg:%b rxcfg:%b",
1057 	    dp->name, __func__,
1058 	    txcfg, TXCFG_BITS, rxcfg, RXCFG_BITS));
1059 
1060 	/* Flow control */
1061 	if (lp->chip->chip_type == CHIPTYPE_DP83815) {
1062 		pcr = INL(dp, PCR);
1063 		switch (dp->flow_control) {
1064 		case FLOW_CONTROL_SYMMETRIC:
1065 		case FLOW_CONTROL_RX_PAUSE:
1066 			OUTL(dp, PCR, pcr | PCR_PSEN | PCR_PS_MCAST);
1067 			break;
1068 
1069 		default:
1070 			OUTL(dp, PCR,
1071 			    pcr & ~(PCR_PSEN | PCR_PS_MCAST | PCR_PS_DA));
1072 			break;
1073 		}
1074 		DPRINTF(2, (CE_CONT, CONS "%s: PCR: %b", dp->name,
1075 		    INL(dp, PCR), PCR_BITS));
1076 
1077 	} else if (lp->chip->chip_type == CHIPTYPE_SIS900) {
1078 		switch (dp->flow_control) {
1079 		case FLOW_CONTROL_SYMMETRIC:
1080 		case FLOW_CONTROL_RX_PAUSE:
1081 			OUTL(dp, FLOWCTL, FLOWCTL_FLOWEN);
1082 			break;
1083 		default:
1084 			OUTL(dp, FLOWCTL, 0);
1085 			break;
1086 		}
1087 		DPRINTF(2, (CE_CONT, CONS "%s: FLOWCTL: %b",
1088 		    dp->name, INL(dp, FLOWCTL), FLOWCTL_BITS));
1089 	}
1090 	return (GEM_SUCCESS);
1091 }
1092 
1093 static int
sfe_get_stats(struct gem_dev * dp)1094 sfe_get_stats(struct gem_dev *dp)
1095 {
1096 	/* do nothing */
1097 	return (GEM_SUCCESS);
1098 }
1099 
1100 /*
1101  * descriptor manipulations
1102  */
1103 static int
sfe_tx_desc_write(struct gem_dev * dp,int slot,ddi_dma_cookie_t * dmacookie,int frags,uint64_t flags)1104 sfe_tx_desc_write(struct gem_dev *dp, int slot,
1105     ddi_dma_cookie_t *dmacookie, int frags, uint64_t flags)
1106 {
1107 	uint32_t		mark;
1108 	struct sfe_desc		*tdp;
1109 	ddi_dma_cookie_t	*dcp;
1110 	uint32_t		tmp0;
1111 #if DEBUG_LEVEL > 2
1112 	int			i;
1113 
1114 	cmn_err(CE_CONT,
1115 	    CONS "%s: time:%d %s seqnum: %d, slot %d, frags: %d flags: %llx",
1116 	    dp->name, ddi_get_lbolt(), __func__,
1117 	    dp->tx_desc_tail, slot, frags, flags);
1118 
1119 	for (i = 0; i < frags; i++) {
1120 		cmn_err(CE_CONT, CONS "%d: addr: 0x%x, len: 0x%x",
1121 		    i, dmacookie[i].dmac_address, dmacookie[i].dmac_size);
1122 	}
1123 #endif
1124 	/*
1125 	 * write tx descriptor in reversed order.
1126 	 */
1127 #if DEBUG_LEVEL > 3
1128 	flags |= GEM_TXFLAG_INTR;
1129 #endif
1130 	mark = (flags & GEM_TXFLAG_INTR)
1131 	    ? (CMDSTS_OWN | CMDSTS_INTR) : CMDSTS_OWN;
1132 
1133 	ASSERT(frags == 1);
1134 	dcp = &dmacookie[0];
1135 	if (flags & GEM_TXFLAG_HEAD) {
1136 		mark &= ~CMDSTS_OWN;
1137 	}
1138 
1139 	tdp = (void *)&dp->tx_ring[SFE_DESC_SIZE * slot];
1140 	tmp0 = (uint32_t)dcp->dmac_address;
1141 	mark |= (uint32_t)dcp->dmac_size;
1142 	tdp->d_bufptr = LE_32(tmp0);
1143 	tdp->d_cmdsts = LE_32(mark);
1144 
1145 	return (frags);
1146 }
1147 
1148 static void
sfe_tx_start(struct gem_dev * dp,int start_slot,int nslot)1149 sfe_tx_start(struct gem_dev *dp, int start_slot, int nslot)
1150 {
1151 	uint_t			tx_ring_size = dp->gc.gc_tx_ring_size;
1152 	struct sfe_desc		*tdp;
1153 	struct sfe_dev		*lp = dp->private;
1154 
1155 	if (nslot > 1) {
1156 		gem_tx_desc_dma_sync(dp,
1157 		    SLOT(start_slot + 1, tx_ring_size),
1158 		    nslot - 1, DDI_DMA_SYNC_FORDEV);
1159 	}
1160 
1161 	tdp = (void *)&dp->tx_ring[SFE_DESC_SIZE * start_slot];
1162 	tdp->d_cmdsts |= LE_32(CMDSTS_OWN);
1163 
1164 	gem_tx_desc_dma_sync(dp, start_slot, 1, DDI_DMA_SYNC_FORDEV);
1165 
1166 	/*
1167 	 * Let the Transmit Buffer Manager Fill state machine active.
1168 	 */
1169 	if (dp->mac_active) {
1170 		OUTL(dp, CR, lp->cr | CR_TXE);
1171 	}
1172 }
1173 
1174 static void
sfe_rx_desc_write(struct gem_dev * dp,int slot,ddi_dma_cookie_t * dmacookie,int frags)1175 sfe_rx_desc_write(struct gem_dev *dp, int slot,
1176     ddi_dma_cookie_t *dmacookie, int frags)
1177 {
1178 	struct sfe_desc		*rdp;
1179 	uint32_t		tmp0;
1180 	uint32_t		tmp1;
1181 #if DEBUG_LEVEL > 2
1182 	int			i;
1183 
1184 	ASSERT(frags == 1);
1185 
1186 	cmn_err(CE_CONT, CONS
1187 	    "%s: %s seqnum: %d, slot %d, frags: %d",
1188 	    dp->name, __func__, dp->rx_active_tail, slot, frags);
1189 	for (i = 0; i < frags; i++) {
1190 		cmn_err(CE_CONT, CONS "  frag: %d addr: 0x%llx, len: 0x%lx",
1191 		    i, dmacookie[i].dmac_address, dmacookie[i].dmac_size);
1192 	}
1193 #endif
1194 	/* for the last slot of the packet */
1195 	rdp = (void *)&dp->rx_ring[SFE_DESC_SIZE * slot];
1196 
1197 	tmp0 = (uint32_t)dmacookie->dmac_address;
1198 	tmp1 = CMDSTS_INTR | (uint32_t)dmacookie->dmac_size;
1199 	rdp->d_bufptr = LE_32(tmp0);
1200 	rdp->d_cmdsts = LE_32(tmp1);
1201 }
1202 
1203 static uint_t
sfe_tx_desc_stat(struct gem_dev * dp,int slot,int ndesc)1204 sfe_tx_desc_stat(struct gem_dev *dp, int slot, int ndesc)
1205 {
1206 	uint_t			tx_ring_size = dp->gc.gc_tx_ring_size;
1207 	struct sfe_desc		*tdp;
1208 	uint32_t		status;
1209 	int			cols;
1210 	struct sfe_dev		*lp = dp->private;
1211 #ifdef DEBUG_LEVEL
1212 	int			i;
1213 	clock_t			delay;
1214 #endif
1215 	/* check status of the last descriptor */
1216 	tdp = (void *)
1217 	    &dp->tx_ring[SFE_DESC_SIZE * SLOT(slot + ndesc - 1, tx_ring_size)];
1218 
1219 	/*
1220 	 * Don't use LE_32() directly to refer tdp->d_cmdsts.
1221 	 * It is not atomic for big endian cpus.
1222 	 */
1223 	status = tdp->d_cmdsts;
1224 	status = LE_32(status);
1225 
1226 	DPRINTF(2, (CE_CONT, CONS "%s: time:%ld %s: slot:%d, status:0x%b",
1227 	    dp->name, ddi_get_lbolt(), __func__,
1228 	    slot, status, TXSTAT_BITS));
1229 
1230 	if (status & CMDSTS_OWN) {
1231 		/*
1232 		 * not yet transmitted
1233 		 */
1234 		/* workaround for tx hang */
1235 		if (lp->chip->chip_type == CHIPTYPE_DP83815 &&
1236 		    dp->mac_active) {
1237 			OUTL(dp, CR, lp->cr | CR_TXE);
1238 		}
1239 		return (0);
1240 	}
1241 
1242 	if (status & CMDSTS_MORE) {
1243 		/* XXX - the hardware problem but don't panic the system */
1244 		/* avoid lint bug for %b format string including 32nd bit */
1245 		cmn_err(CE_NOTE, CONS
1246 		    "%s: tx status bits incorrect:  slot:%d, status:0x%x",
1247 		    dp->name, slot, status);
1248 	}
1249 
1250 #if DEBUG_LEVEL > 3
1251 	delay = (ddi_get_lbolt() - dp->tx_buf_head->txb_stime) * 10;
1252 	if (delay >= 50) {
1253 		DPRINTF(0, (CE_NOTE, "%s: tx deferred %d mS: slot %d",
1254 		    dp->name, delay, slot));
1255 	}
1256 #endif
1257 
1258 #if DEBUG_LEVEL > 3
1259 	for (i = 0; i < nfrag-1; i++) {
1260 		uint32_t	s;
1261 		int		n;
1262 
1263 		n = SLOT(slot + i, tx_ring_size);
1264 		s = LE_32(
1265 		    ((struct sfe_desc *)((void *)
1266 		    &dp->tx_ring[SFE_DESC_SIZE * n]))->d_cmdsts);
1267 
1268 		ASSERT(s & CMDSTS_MORE);
1269 		ASSERT((s & CMDSTS_OWN) == 0);
1270 	}
1271 #endif
1272 
1273 	/*
1274 	 *  collect statistics
1275 	 */
1276 	if ((status & CMDSTS_OK) == 0) {
1277 
1278 		/* failed to transmit the packet */
1279 
1280 		DPRINTF(0, (CE_CONT, CONS "%s: Transmit error, Tx status %b",
1281 		    dp->name, status, TXSTAT_BITS));
1282 
1283 		dp->stats.errxmt++;
1284 
1285 		if (status & CMDSTS_TFU) {
1286 			dp->stats.underflow++;
1287 		} else if (status & CMDSTS_CRS) {
1288 			dp->stats.nocarrier++;
1289 		} else if (status & CMDSTS_OWC) {
1290 			dp->stats.xmtlatecoll++;
1291 		} else if ((!dp->full_duplex) && (status & CMDSTS_EC)) {
1292 			dp->stats.excoll++;
1293 			dp->stats.collisions += 16;
1294 		} else {
1295 			dp->stats.xmit_internal_err++;
1296 		}
1297 	} else if (!dp->full_duplex) {
1298 		cols = (status >> CMDSTS_CCNT_SHIFT) & CCNT_MASK;
1299 
1300 		if (cols > 0) {
1301 			if (cols == 1) {
1302 				dp->stats.first_coll++;
1303 			} else /* (cols > 1) */ {
1304 				dp->stats.multi_coll++;
1305 			}
1306 			dp->stats.collisions += cols;
1307 		} else if (status & CMDSTS_TD) {
1308 			dp->stats.defer++;
1309 		}
1310 	}
1311 	return (GEM_TX_DONE);
1312 }
1313 
1314 static uint64_t
sfe_rx_desc_stat(struct gem_dev * dp,int slot,int ndesc)1315 sfe_rx_desc_stat(struct gem_dev *dp, int slot, int ndesc)
1316 {
1317 	struct sfe_desc		*rdp;
1318 	uint_t			len;
1319 	uint_t			flag;
1320 	uint32_t		status;
1321 
1322 	flag = GEM_RX_DONE;
1323 
1324 	/* Dont read ISR because we cannot ack only to rx interrupt. */
1325 
1326 	rdp = (void *)&dp->rx_ring[SFE_DESC_SIZE * slot];
1327 
1328 	/*
1329 	 * Don't use LE_32() directly to refer rdp->d_cmdsts.
1330 	 * It is not atomic for big endian cpus.
1331 	 */
1332 	status = rdp->d_cmdsts;
1333 	status = LE_32(status);
1334 
1335 	DPRINTF(2, (CE_CONT, CONS "%s: time:%ld %s: slot:%d, status:0x%b",
1336 	    dp->name, ddi_get_lbolt(), __func__,
1337 	    slot, status, RXSTAT_BITS));
1338 
1339 	if ((status & CMDSTS_OWN) == 0) {
1340 		/*
1341 		 * No more received packets because
1342 		 * this buffer is owned by NIC.
1343 		 */
1344 		return (0);
1345 	}
1346 
1347 #define	RX_ERR_BITS \
1348 	(CMDSTS_RXA | CMDSTS_RXO | CMDSTS_LONG | CMDSTS_RUNT | \
1349 		CMDSTS_ISE | CMDSTS_CRCE | CMDSTS_FAE | CMDSTS_MORE)
1350 
1351 	if (status & RX_ERR_BITS) {
1352 		/*
1353 		 * Packet with error received
1354 		 */
1355 		DPRINTF(0, (CE_CONT, CONS "%s: Corrupted packet "
1356 		    "received, buffer status: %b",
1357 		    dp->name, status, RXSTAT_BITS));
1358 
1359 		/* collect statistics information */
1360 		dp->stats.errrcv++;
1361 
1362 		if (status & CMDSTS_RXO) {
1363 			dp->stats.overflow++;
1364 		} else if (status & (CMDSTS_LONG | CMDSTS_MORE)) {
1365 			dp->stats.frame_too_long++;
1366 		} else if (status & CMDSTS_RUNT) {
1367 			dp->stats.runt++;
1368 		} else if (status & (CMDSTS_ISE | CMDSTS_FAE)) {
1369 			dp->stats.frame++;
1370 		} else if (status & CMDSTS_CRCE) {
1371 			dp->stats.crc++;
1372 		} else {
1373 			dp->stats.rcv_internal_err++;
1374 		}
1375 
1376 		return (flag | GEM_RX_ERR);
1377 	}
1378 
1379 	/*
1380 	 * this packet was received without errors
1381 	 */
1382 	if ((len = (status & CMDSTS_SIZE)) >= ETHERFCSL) {
1383 		len -= ETHERFCSL;
1384 	}
1385 
1386 #if DEBUG_LEVEL > 10
1387 {
1388 	int	i;
1389 	uint8_t	*bp = dp->rx_buf_head->rxb_buf;
1390 
1391 	cmn_err(CE_CONT, CONS "%s: len:%d", dp->name, len);
1392 
1393 	for (i = 0; i < 60; i += 10) {
1394 		cmn_err(CE_CONT, CONS
1395 		    "%02x %02x %02x %02x %02x %02x %02x %02x %02x %02x",
1396 		    bp[0], bp[1], bp[2], bp[3], bp[4],
1397 		    bp[5], bp[6], bp[7], bp[8], bp[9]);
1398 	}
1399 	bp += 10;
1400 }
1401 #endif
1402 	return (flag | (len & GEM_RX_LEN));
1403 }
1404 
1405 static void
sfe_tx_desc_init(struct gem_dev * dp,int slot)1406 sfe_tx_desc_init(struct gem_dev *dp, int slot)
1407 {
1408 	uint_t			tx_ring_size = dp->gc.gc_tx_ring_size;
1409 	struct sfe_desc		*tdp;
1410 	uint32_t		here;
1411 
1412 	tdp = (void *)&dp->tx_ring[SFE_DESC_SIZE * slot];
1413 
1414 	/* don't clear d_link field, which have a valid pointer */
1415 	tdp->d_cmdsts = 0;
1416 
1417 	/* make a link to this from the previous descriptor */
1418 	here = ((uint32_t)dp->tx_ring_dma) + SFE_DESC_SIZE*slot;
1419 
1420 	tdp = (void *)
1421 	    &dp->tx_ring[SFE_DESC_SIZE * SLOT(slot - 1, tx_ring_size)];
1422 	tdp->d_link = LE_32(here);
1423 }
1424 
1425 static void
sfe_rx_desc_init(struct gem_dev * dp,int slot)1426 sfe_rx_desc_init(struct gem_dev *dp, int slot)
1427 {
1428 	uint_t			rx_ring_size = dp->gc.gc_rx_ring_size;
1429 	struct sfe_desc		*rdp;
1430 	uint32_t		here;
1431 
1432 	rdp = (void *)&dp->rx_ring[SFE_DESC_SIZE * slot];
1433 
1434 	/* don't clear d_link field, which have a valid pointer */
1435 	rdp->d_cmdsts = LE_32(CMDSTS_OWN);
1436 
1437 	/* make a link to this from the previous descriptor */
1438 	here = ((uint32_t)dp->rx_ring_dma) + SFE_DESC_SIZE*slot;
1439 
1440 	rdp = (void *)
1441 	    &dp->rx_ring[SFE_DESC_SIZE * SLOT(slot - 1, rx_ring_size)];
1442 	rdp->d_link = LE_32(here);
1443 }
1444 
1445 static void
sfe_tx_desc_clean(struct gem_dev * dp,int slot)1446 sfe_tx_desc_clean(struct gem_dev *dp, int slot)
1447 {
1448 	struct sfe_desc		*tdp;
1449 
1450 	tdp = (void *)&dp->tx_ring[SFE_DESC_SIZE * slot];
1451 	tdp->d_cmdsts = 0;
1452 }
1453 
1454 static void
sfe_rx_desc_clean(struct gem_dev * dp,int slot)1455 sfe_rx_desc_clean(struct gem_dev *dp, int slot)
1456 {
1457 	struct sfe_desc		*rdp;
1458 
1459 	rdp = (void *)&dp->rx_ring[SFE_DESC_SIZE * slot];
1460 	rdp->d_cmdsts = LE_32(CMDSTS_OWN);
1461 }
1462 
1463 /*
1464  * Device depend interrupt handler
1465  */
1466 static uint_t
sfe_interrupt(struct gem_dev * dp)1467 sfe_interrupt(struct gem_dev *dp)
1468 {
1469 	uint_t		rx_ring_size = dp->gc.gc_rx_ring_size;
1470 	uint32_t	isr;
1471 	uint32_t	isr_bogus;
1472 	uint_t		flags = 0;
1473 	struct sfe_dev	*lp = dp->private;
1474 
1475 	/* read reason and clear interrupt */
1476 	isr = INL(dp, ISR);
1477 
1478 	isr_bogus = lp->isr_pended;
1479 	lp->isr_pended = 0;
1480 
1481 	if (((isr | isr_bogus) & lp->our_intr_bits) == 0) {
1482 		/* we are not the interrupt source */
1483 		return (DDI_INTR_UNCLAIMED);
1484 	}
1485 
1486 	DPRINTF(3, (CE_CONT,
1487 	    CONS "%s: time:%ld %s:called: isr:0x%b rx_active_head: %d",
1488 	    dp->name, ddi_get_lbolt(), __func__,
1489 	    isr, INTR_BITS, dp->rx_active_head));
1490 
1491 	if (!dp->mac_active) {
1492 		/* the device is going to stop */
1493 		lp->our_intr_bits = 0;
1494 		return (DDI_INTR_CLAIMED);
1495 	}
1496 
1497 	isr &= lp->our_intr_bits;
1498 
1499 	if (isr & (ISR_RXSOVR | ISR_RXORN | ISR_RXIDLE | ISR_RXERR |
1500 	    ISR_RXDESC | ISR_RXOK)) {
1501 		(void) gem_receive(dp);
1502 
1503 		if (isr & (ISR_RXSOVR | ISR_RXORN)) {
1504 			DPRINTF(0, (CE_CONT,
1505 			    CONS "%s: rx fifo overrun: isr %b",
1506 			    dp->name, isr, INTR_BITS));
1507 			/* no need restart rx */
1508 			dp->stats.overflow++;
1509 		}
1510 
1511 		if (isr & ISR_RXIDLE) {
1512 			DPRINTF(0, (CE_CONT,
1513 			    CONS "%s: rx buffer ran out: isr %b",
1514 			    dp->name, isr, INTR_BITS));
1515 
1516 			dp->stats.norcvbuf++;
1517 
1518 			/*
1519 			 * Make RXDP points the head of receive
1520 			 * buffer list.
1521 			 */
1522 			OUTL(dp, RXDP, dp->rx_ring_dma +
1523 			    SFE_DESC_SIZE *
1524 			    SLOT(dp->rx_active_head, rx_ring_size));
1525 
1526 			/* Restart the receive engine */
1527 			OUTL(dp, CR, lp->cr | CR_RXE);
1528 		}
1529 	}
1530 
1531 	if (isr & (ISR_TXURN | ISR_TXERR | ISR_TXDESC |
1532 	    ISR_TXIDLE | ISR_TXOK)) {
1533 		/* need to reclaim tx buffers */
1534 		if (gem_tx_done(dp)) {
1535 			flags |= INTR_RESTART_TX;
1536 		}
1537 		/*
1538 		 * XXX - tx error statistics will be counted in
1539 		 * sfe_tx_desc_stat() and no need to restart tx on errors.
1540 		 */
1541 	}
1542 
1543 	if (isr & (ISR_DPERR | ISR_SSERR | ISR_RMABT | ISR_RTABT)) {
1544 		cmn_err(CE_WARN, "%s: ERROR interrupt: isr %b.",
1545 		    dp->name, isr, INTR_BITS);
1546 
1547 		(void) gem_restart_nic(dp, GEM_RESTART_KEEP_BUF);
1548 		flags |= INTR_RESTART_TX;
1549 	}
1550 
1551 	DPRINTF(5, (CE_CONT, CONS "%s: %s: return: isr: %b",
1552 	    dp->name, __func__, isr, INTR_BITS));
1553 
1554 	return (DDI_INTR_CLAIMED | flags);
1555 }
1556 
1557 /* ======================================================== */
1558 /*
1559  * HW depend MII routine
1560  */
1561 /* ======================================================== */
1562 
1563 /*
1564  * MII routines for NS DP83815
1565  */
1566 static void
sfe_mii_sync_dp83815(struct gem_dev * dp)1567 sfe_mii_sync_dp83815(struct gem_dev *dp)
1568 {
1569 	/* do nothing */
1570 }
1571 
1572 static uint16_t
sfe_mii_read_dp83815(struct gem_dev * dp,uint_t offset)1573 sfe_mii_read_dp83815(struct gem_dev *dp, uint_t offset)
1574 {
1575 	DPRINTF(4, (CE_CONT, CONS"%s: %s: offset 0x%x",
1576 	    dp->name, __func__, offset));
1577 	return ((uint16_t)INL(dp, MII_REGS_BASE + offset*4));
1578 }
1579 
1580 static void
sfe_mii_write_dp83815(struct gem_dev * dp,uint_t offset,uint16_t val)1581 sfe_mii_write_dp83815(struct gem_dev *dp, uint_t offset, uint16_t val)
1582 {
1583 	DPRINTF(4, (CE_CONT, CONS"%s: %s: offset 0x%x 0x%x",
1584 	    dp->name, __func__, offset, val));
1585 	OUTL(dp, MII_REGS_BASE + offset*4, val);
1586 }
1587 
1588 static int
sfe_mii_config_dp83815(struct gem_dev * dp)1589 sfe_mii_config_dp83815(struct gem_dev *dp)
1590 {
1591 	uint32_t	srr;
1592 
1593 	srr = INL(dp, SRR) & SRR_REV;
1594 
1595 	DPRINTF(0, (CE_CONT, CONS "%s: srr:0x%04x %04x %04x %04x %04x %04x",
1596 	    dp->name, srr,
1597 	    INW(dp, 0x00cc),	/* PGSEL */
1598 	    INW(dp, 0x00e4),	/* PMDCSR */
1599 	    INW(dp, 0x00fc),	/* TSTDAT */
1600 	    INW(dp, 0x00f4),	/* DSPCFG */
1601 	    INW(dp, 0x00f8)));	/* SDCFG */
1602 
1603 	if (srr == SRR_REV_DP83815CVNG) {
1604 		/*
1605 		 * NS datasheet says that DP83815CVNG needs following
1606 		 * registers to be patched for optimizing its performance.
1607 		 * A report said that CRC errors on RX disappeared
1608 		 * with the patch.
1609 		 */
1610 		OUTW(dp, 0x00cc, 0x0001);	/* PGSEL */
1611 		OUTW(dp, 0x00e4, 0x189c);	/* PMDCSR */
1612 		OUTW(dp, 0x00fc, 0x0000);	/* TSTDAT */
1613 		OUTW(dp, 0x00f4, 0x5040);	/* DSPCFG */
1614 		OUTW(dp, 0x00f8, 0x008c);	/* SDCFG */
1615 		OUTW(dp, 0x00cc, 0x0000);	/* PGSEL */
1616 
1617 		DPRINTF(0, (CE_CONT,
1618 		    CONS "%s: PHY patched %04x %04x %04x %04x %04x",
1619 		    dp->name,
1620 		    INW(dp, 0x00cc),	/* PGSEL */
1621 		    INW(dp, 0x00e4),	/* PMDCSR */
1622 		    INW(dp, 0x00fc),	/* TSTDAT */
1623 		    INW(dp, 0x00f4),	/* DSPCFG */
1624 		    INW(dp, 0x00f8)));	/* SDCFG */
1625 	} else if (((srr ^ SRR_REV_DP83815DVNG) & 0xff00) == 0 ||
1626 	    ((srr ^ SRR_REV_DP83816AVNG) & 0xff00) == 0) {
1627 		/*
1628 		 * Additional packets for later chipset
1629 		 */
1630 		OUTW(dp, 0x00cc, 0x0001);	/* PGSEL */
1631 		OUTW(dp, 0x00e4, 0x189c);	/* PMDCSR */
1632 		OUTW(dp, 0x00cc, 0x0000);	/* PGSEL */
1633 
1634 		DPRINTF(0, (CE_CONT,
1635 		    CONS "%s: PHY patched %04x %04x",
1636 		    dp->name,
1637 		    INW(dp, 0x00cc),	/* PGSEL */
1638 		    INW(dp, 0x00e4)));	/* PMDCSR */
1639 	}
1640 
1641 	return (gem_mii_config_default(dp));
1642 }
1643 
1644 static int
sfe_mii_probe_dp83815(struct gem_dev * dp)1645 sfe_mii_probe_dp83815(struct gem_dev *dp)
1646 {
1647 	uint32_t	val;
1648 
1649 	/* try external phy first */
1650 	DPRINTF(0, (CE_CONT, CONS "%s: %s: trying external phy",
1651 	    dp->name, __func__));
1652 	dp->mii_phy_addr = 0;
1653 	dp->gc.gc_mii_sync = &sfe_mii_sync_sis900;
1654 	dp->gc.gc_mii_read = &sfe_mii_read_sis900;
1655 	dp->gc.gc_mii_write = &sfe_mii_write_sis900;
1656 
1657 	val = INL(dp, CFG) & (CFG_ANEG_SEL | CFG_PHY_CFG);
1658 	OUTL(dp, CFG, val | CFG_EXT_PHY | CFG_PHY_DIS);
1659 
1660 	if (gem_mii_probe_default(dp) == GEM_SUCCESS) {
1661 		return (GEM_SUCCESS);
1662 	}
1663 
1664 	/* switch to internal phy */
1665 	DPRINTF(0, (CE_CONT, CONS "%s: %s: switching to internal phy",
1666 	    dp->name, __func__));
1667 	dp->mii_phy_addr = -1;
1668 	dp->gc.gc_mii_sync = &sfe_mii_sync_dp83815;
1669 	dp->gc.gc_mii_read = &sfe_mii_read_dp83815;
1670 	dp->gc.gc_mii_write = &sfe_mii_write_dp83815;
1671 
1672 	val = INL(dp, CFG) & (CFG_ANEG_SEL | CFG_PHY_CFG);
1673 	OUTL(dp, CFG, val | CFG_PAUSE_ADV | CFG_PHY_RST);
1674 	drv_usecwait(100);	/* keep to assert RST bit for a while */
1675 	OUTL(dp, CFG, val | CFG_PAUSE_ADV);
1676 
1677 	/* wait for PHY reset */
1678 	delay(drv_usectohz(10000));
1679 
1680 	return (gem_mii_probe_default(dp));
1681 }
1682 
1683 static int
sfe_mii_init_dp83815(struct gem_dev * dp)1684 sfe_mii_init_dp83815(struct gem_dev *dp)
1685 {
1686 	uint32_t	val;
1687 
1688 	val = INL(dp, CFG) & (CFG_ANEG_SEL | CFG_PHY_CFG);
1689 
1690 	if (dp->mii_phy_addr == -1) {
1691 		/* select internal phy */
1692 		OUTL(dp, CFG, val | CFG_PAUSE_ADV);
1693 	} else {
1694 		/* select external phy */
1695 		OUTL(dp, CFG, val | CFG_EXT_PHY | CFG_PHY_DIS);
1696 	}
1697 
1698 	return (GEM_SUCCESS);
1699 }
1700 
1701 /*
1702  * MII routines for SiS900
1703  */
1704 #define	MDIO_DELAY(dp)	{(void) INL(dp, MEAR); (void) INL(dp, MEAR); }
1705 static void
sfe_mii_sync_sis900(struct gem_dev * dp)1706 sfe_mii_sync_sis900(struct gem_dev *dp)
1707 {
1708 	int	i;
1709 
1710 	/* send 32 ONE's to make MII line idle */
1711 	for (i = 0; i < 32; i++) {
1712 		OUTL(dp, MEAR, MEAR_MDDIR | MEAR_MDIO);
1713 		MDIO_DELAY(dp);
1714 		OUTL(dp, MEAR, MEAR_MDDIR | MEAR_MDIO | MEAR_MDC);
1715 		MDIO_DELAY(dp);
1716 	}
1717 }
1718 
1719 static int
sfe_mii_config_sis900(struct gem_dev * dp)1720 sfe_mii_config_sis900(struct gem_dev *dp)
1721 {
1722 	struct sfe_dev	*lp = dp->private;
1723 
1724 	/* Do chip depend setup */
1725 	if ((dp->mii_phy_id & PHY_MASK) == PHY_ICS1893) {
1726 		/* workaround for ICS1893 PHY */
1727 		gem_mii_write(dp, 0x0018, 0xD200);
1728 	}
1729 
1730 	if (lp->revid == SIS630E_900_REV) {
1731 		/*
1732 		 * SiS 630E has bugs on default values
1733 		 * of PHY registers
1734 		 */
1735 		gem_mii_write(dp, MII_AN_ADVERT, 0x05e1);
1736 		gem_mii_write(dp, MII_CONFIG1, 0x0022);
1737 		gem_mii_write(dp, MII_CONFIG2, 0xff00);
1738 		gem_mii_write(dp, MII_MASK,    0xffc0);
1739 	}
1740 	sfe_set_eq_sis630(dp);
1741 
1742 	return (gem_mii_config_default(dp));
1743 }
1744 
1745 static uint16_t
sfe_mii_read_sis900(struct gem_dev * dp,uint_t reg)1746 sfe_mii_read_sis900(struct gem_dev *dp, uint_t reg)
1747 {
1748 	uint32_t	cmd;
1749 	uint16_t	ret;
1750 	int		i;
1751 	uint32_t	data;
1752 
1753 	cmd = MII_READ_CMD(dp->mii_phy_addr, reg);
1754 
1755 	for (i = 31; i >= 18; i--) {
1756 		data = ((cmd >> i) & 1) <<  MEAR_MDIO_SHIFT;
1757 		OUTL(dp, MEAR, data | MEAR_MDDIR);
1758 		MDIO_DELAY(dp);
1759 		OUTL(dp, MEAR, data | MEAR_MDDIR | MEAR_MDC);
1760 		MDIO_DELAY(dp);
1761 	}
1762 
1763 	/* turn around cycle */
1764 	OUTL(dp, MEAR, 0);
1765 	MDIO_DELAY(dp);
1766 
1767 	/* get response from PHY */
1768 	OUTL(dp, MEAR, MEAR_MDC);
1769 	MDIO_DELAY(dp);
1770 
1771 	OUTL(dp, MEAR, 0);
1772 #if DEBUG_LEBEL > 0
1773 	(void) INL(dp, MEAR);	/* delay */
1774 	if (INL(dp, MEAR) & MEAR_MDIO) {
1775 		cmn_err(CE_WARN, "%s: PHY@%d not responded",
1776 		    dp->name, dp->mii_phy_addr);
1777 	}
1778 #else
1779 	MDIO_DELAY(dp);
1780 #endif
1781 	/* terminate response cycle */
1782 	OUTL(dp, MEAR, MEAR_MDC);
1783 	MDIO_DELAY(dp);
1784 
1785 	ret = 0;	/* to avoid lint errors */
1786 	for (i = 16; i > 0; i--) {
1787 		OUTL(dp, MEAR, 0);
1788 		(void) INL(dp, MEAR);	/* delay */
1789 		ret = (ret << 1) | ((INL(dp, MEAR) >> MEAR_MDIO_SHIFT) & 1);
1790 		OUTL(dp, MEAR, MEAR_MDC);
1791 		MDIO_DELAY(dp);
1792 	}
1793 
1794 	/* send two idle(Z) bits to terminate the read cycle */
1795 	for (i = 0; i < 2; i++) {
1796 		OUTL(dp, MEAR, 0);
1797 		MDIO_DELAY(dp);
1798 		OUTL(dp, MEAR, MEAR_MDC);
1799 		MDIO_DELAY(dp);
1800 	}
1801 
1802 	return (ret);
1803 }
1804 
1805 static void
sfe_mii_write_sis900(struct gem_dev * dp,uint_t reg,uint16_t val)1806 sfe_mii_write_sis900(struct gem_dev *dp, uint_t reg, uint16_t val)
1807 {
1808 	uint32_t	cmd;
1809 	int		i;
1810 	uint32_t	data;
1811 
1812 	cmd = MII_WRITE_CMD(dp->mii_phy_addr, reg, val);
1813 
1814 	for (i = 31; i >= 0; i--) {
1815 		data = ((cmd >> i) & 1) << MEAR_MDIO_SHIFT;
1816 		OUTL(dp, MEAR, data | MEAR_MDDIR);
1817 		MDIO_DELAY(dp);
1818 		OUTL(dp, MEAR, data | MEAR_MDDIR | MEAR_MDC);
1819 		MDIO_DELAY(dp);
1820 	}
1821 
1822 	/* send two idle(Z) bits to terminate the write cycle. */
1823 	for (i = 0; i < 2; i++) {
1824 		OUTL(dp, MEAR, 0);
1825 		MDIO_DELAY(dp);
1826 		OUTL(dp, MEAR, MEAR_MDC);
1827 		MDIO_DELAY(dp);
1828 	}
1829 }
1830 #undef MDIO_DELAY
1831 
1832 static void
sfe_set_eq_sis630(struct gem_dev * dp)1833 sfe_set_eq_sis630(struct gem_dev *dp)
1834 {
1835 	uint16_t	reg14h;
1836 	uint16_t	eq_value;
1837 	uint16_t	max_value;
1838 	uint16_t	min_value;
1839 	int		i;
1840 	uint8_t		rev;
1841 	struct sfe_dev	*lp = dp->private;
1842 
1843 	rev = lp->revid;
1844 
1845 	if (!(rev == SIS630E_900_REV || rev == SIS630EA1_900_REV ||
1846 	    rev == SIS630A_900_REV || rev == SIS630ET_900_REV)) {
1847 		/* it doesn't have a internal PHY */
1848 		return;
1849 	}
1850 
1851 	if (dp->mii_state == MII_STATE_LINKUP) {
1852 		reg14h = gem_mii_read(dp, MII_RESV);
1853 		gem_mii_write(dp, MII_RESV, (0x2200 | reg14h) & 0xBFFF);
1854 
1855 		eq_value = (0x00f8 & gem_mii_read(dp, MII_RESV)) >> 3;
1856 		max_value = min_value = eq_value;
1857 		for (i = 1; i < 10; i++) {
1858 			eq_value = (0x00f8 & gem_mii_read(dp, MII_RESV)) >> 3;
1859 			max_value = max(eq_value, max_value);
1860 			min_value = min(eq_value, min_value);
1861 		}
1862 
1863 		/* for 630E, rule to determine the equalizer value */
1864 		if (rev == SIS630E_900_REV || rev == SIS630EA1_900_REV ||
1865 		    rev == SIS630ET_900_REV) {
1866 			if (max_value < 5) {
1867 				eq_value = max_value;
1868 			} else if (5 <= max_value && max_value < 15) {
1869 				eq_value =
1870 				    max(max_value + 1,
1871 				    min_value + 2);
1872 			} else if (15 <= max_value) {
1873 				eq_value =
1874 				    max(max_value + 5,
1875 				    min_value + 6);
1876 			}
1877 		}
1878 		/* for 630B0&B1, rule to determine the equalizer value */
1879 		else
1880 		if (rev == SIS630A_900_REV &&
1881 		    (lp->bridge_revid == SIS630B0 ||
1882 		    lp->bridge_revid == SIS630B1)) {
1883 
1884 			if (max_value == 0) {
1885 				eq_value = 3;
1886 			} else {
1887 				eq_value = (max_value + min_value + 1)/2;
1888 			}
1889 		}
1890 		/* write equalizer value and setting */
1891 		reg14h = gem_mii_read(dp, MII_RESV) & ~0x02f8;
1892 		reg14h |= 0x6000 | (eq_value << 3);
1893 		gem_mii_write(dp, MII_RESV, reg14h);
1894 	} else {
1895 		reg14h = (gem_mii_read(dp, MII_RESV) & ~0x4000) | 0x2000;
1896 		if (rev == SIS630A_900_REV &&
1897 		    (lp->bridge_revid == SIS630B0 ||
1898 		    lp->bridge_revid == SIS630B1)) {
1899 
1900 			reg14h |= 0x0200;
1901 		}
1902 		gem_mii_write(dp, MII_RESV, reg14h);
1903 	}
1904 }
1905 
1906 /* ======================================================== */
1907 /*
1908  * OS depend (device driver) routine
1909  */
1910 /* ======================================================== */
1911 static void
sfe_chipinfo_init_sis900(struct gem_dev * dp)1912 sfe_chipinfo_init_sis900(struct gem_dev *dp)
1913 {
1914 	int		rev;
1915 	struct sfe_dev	*lp = (struct sfe_dev *)dp->private;
1916 
1917 	rev = lp->revid;
1918 
1919 	if (rev == SIS962_900_REV /* 0x91 */) {
1920 		/* sis962 or later */
1921 		lp->get_mac_addr = &sfe_get_mac_addr_sis962;
1922 	} else {
1923 		/* sis900 */
1924 		lp->get_mac_addr = &sfe_get_mac_addr_sis900;
1925 	}
1926 
1927 	lp->bridge_revid = 0;
1928 
1929 	if (rev == SIS630E_900_REV || rev == SIS630EA1_900_REV ||
1930 	    rev == SIS630A_900_REV || rev ==  SIS630ET_900_REV) {
1931 		/*
1932 		 * read host bridge revision
1933 		 */
1934 		dev_info_t	*bridge;
1935 		ddi_acc_handle_t bridge_handle;
1936 
1937 		if ((bridge = sfe_search_pci_dev(0x1039, 0x630)) == NULL) {
1938 			cmn_err(CE_WARN,
1939 			    "%s: cannot find host bridge (pci1039,630)",
1940 			    dp->name);
1941 			return;
1942 		}
1943 
1944 		if (pci_config_setup(bridge, &bridge_handle) != DDI_SUCCESS) {
1945 			cmn_err(CE_WARN, "%s: pci_config_setup failed",
1946 			    dp->name);
1947 			return;
1948 		}
1949 
1950 		lp->bridge_revid =
1951 		    pci_config_get8(bridge_handle, PCI_CONF_REVID);
1952 		pci_config_teardown(&bridge_handle);
1953 	}
1954 }
1955 
1956 static int
sfe_attach_chip(struct gem_dev * dp)1957 sfe_attach_chip(struct gem_dev *dp)
1958 {
1959 	struct sfe_dev		*lp = (struct sfe_dev *)dp->private;
1960 
1961 	DPRINTF(4, (CE_CONT, CONS "!%s: %s called", dp->name, __func__));
1962 
1963 	/* setup chip-depend get_mac_address function */
1964 	if (lp->chip->chip_type == CHIPTYPE_SIS900) {
1965 		sfe_chipinfo_init_sis900(dp);
1966 	} else {
1967 		lp->get_mac_addr = &sfe_get_mac_addr_dp83815;
1968 	}
1969 
1970 	/* read MAC address */
1971 	if (!(lp->get_mac_addr)(dp)) {
1972 		cmn_err(CE_WARN,
1973 		    "!%s: %s: failed to get factory mac address"
1974 		    " please specify a mac address in sfe.conf",
1975 		    dp->name, __func__);
1976 		return (GEM_FAILURE);
1977 	}
1978 
1979 	if (lp->chip->chip_type == CHIPTYPE_DP83815) {
1980 		dp->mii_phy_addr = -1;	/* no need to scan PHY */
1981 		dp->misc_flag |= GEM_VLAN_SOFT;
1982 		dp->txthr += 4; /* VTAG_SIZE */
1983 	}
1984 	dp->txthr = min(dp->txthr, TXFIFOSIZE - 2);
1985 
1986 	return (GEM_SUCCESS);
1987 }
1988 
1989 static int
sfeattach(dev_info_t * dip,ddi_attach_cmd_t cmd)1990 sfeattach(dev_info_t *dip, ddi_attach_cmd_t cmd)
1991 {
1992 	int			unit;
1993 	const char		*drv_name;
1994 	int			i;
1995 	ddi_acc_handle_t	conf_handle;
1996 	uint16_t		vid;
1997 	uint16_t		did;
1998 	uint8_t			rev;
1999 #ifdef DEBUG_LEVEL
2000 	uint32_t		iline;
2001 	uint8_t			latim;
2002 #endif
2003 	struct chip_info	*p;
2004 	struct gem_dev		*dp;
2005 	struct sfe_dev		*lp;
2006 	caddr_t			base;
2007 	ddi_acc_handle_t	regs_ha;
2008 	struct gem_conf		*gcp;
2009 
2010 	unit = ddi_get_instance(dip);
2011 	drv_name = ddi_driver_name(dip);
2012 
2013 	DPRINTF(3, (CE_CONT, CONS "%s%d: sfeattach: called", drv_name, unit));
2014 
2015 	/*
2016 	 * Common codes after power-up
2017 	 */
2018 	if (pci_config_setup(dip, &conf_handle) != DDI_SUCCESS) {
2019 		cmn_err(CE_WARN, "%s%d: ddi_regs_map_setup failed",
2020 		    drv_name, unit);
2021 		goto err;
2022 	}
2023 
2024 	vid  = pci_config_get16(conf_handle, PCI_CONF_VENID);
2025 	did  = pci_config_get16(conf_handle, PCI_CONF_DEVID);
2026 	rev  = pci_config_get16(conf_handle, PCI_CONF_REVID);
2027 #ifdef DEBUG_LEVEL
2028 	iline = pci_config_get32(conf_handle, PCI_CONF_ILINE);
2029 	latim = pci_config_get8(conf_handle, PCI_CONF_LATENCY_TIMER);
2030 #endif
2031 #ifdef DEBUG_BUILT_IN_SIS900
2032 	rev  = SIS630E_900_REV;
2033 #endif
2034 	for (i = 0, p = sfe_chiptbl; i < CHIPTABLESIZE; i++, p++) {
2035 		if (p->venid == vid && p->devid == did) {
2036 			/* found */
2037 			goto chip_found;
2038 		}
2039 	}
2040 
2041 	/* Not found */
2042 	cmn_err(CE_WARN,
2043 	    "%s%d: sfe_attach: wrong PCI venid/devid (0x%x, 0x%x)",
2044 	    drv_name, unit, vid, did);
2045 	pci_config_teardown(&conf_handle);
2046 	goto err;
2047 
2048 chip_found:
2049 	pci_config_put16(conf_handle, PCI_CONF_COMM,
2050 	    PCI_COMM_IO | PCI_COMM_MAE | PCI_COMM_ME |
2051 	    pci_config_get16(conf_handle, PCI_CONF_COMM));
2052 
2053 	/* ensure D0 mode */
2054 	(void) gem_pci_set_power_state(dip, conf_handle, PCI_PMCSR_D0);
2055 
2056 	pci_config_teardown(&conf_handle);
2057 
2058 	switch (cmd) {
2059 	case DDI_RESUME:
2060 		return (gem_resume(dip));
2061 
2062 	case DDI_ATTACH:
2063 
2064 		DPRINTF(0, (CE_CONT,
2065 		    CONS "%s%d: ilr 0x%08x, latency_timer:0x%02x",
2066 		    drv_name, unit, iline, latim));
2067 
2068 		/*
2069 		 * Map in the device registers.
2070 		 */
2071 		if (gem_pci_regs_map_setup(dip,
2072 		    (sfe_use_pcimemspace && p->chip_type == CHIPTYPE_DP83815)
2073 		    ? PCI_ADDR_MEM32 : PCI_ADDR_IO, PCI_ADDR_MASK,
2074 		    &sfe_dev_attr, &base, &regs_ha) != DDI_SUCCESS) {
2075 			cmn_err(CE_WARN,
2076 			    "%s%d: ddi_regs_map_setup failed",
2077 			    drv_name, unit);
2078 			goto err;
2079 		}
2080 
2081 		/*
2082 		 * construct gem configuration
2083 		 */
2084 		gcp = kmem_zalloc(sizeof (*gcp), KM_SLEEP);
2085 
2086 		/* name */
2087 		(void) sprintf(gcp->gc_name, "%s%d", drv_name, unit);
2088 
2089 		/* consistency on tx and rx */
2090 		gcp->gc_tx_buf_align = sizeof (uint8_t) - 1;
2091 		gcp->gc_tx_max_frags = MAXTXFRAGS;
2092 		gcp->gc_tx_max_descs_per_pkt = gcp->gc_tx_max_frags;
2093 		gcp->gc_tx_desc_unit_shift = 4;	/* 16 byte */
2094 		gcp->gc_tx_buf_size  = TX_BUF_SIZE;
2095 		gcp->gc_tx_buf_limit = gcp->gc_tx_buf_size;
2096 		gcp->gc_tx_ring_size = TX_RING_SIZE;
2097 		gcp->gc_tx_ring_limit = gcp->gc_tx_ring_size;
2098 		gcp->gc_tx_auto_pad  = B_TRUE;
2099 		gcp->gc_tx_copy_thresh = sfe_tx_copy_thresh;
2100 		gcp->gc_tx_desc_write_oo = B_TRUE;
2101 
2102 		gcp->gc_rx_buf_align = sizeof (uint8_t) - 1;
2103 		gcp->gc_rx_max_frags = MAXRXFRAGS;
2104 		gcp->gc_rx_desc_unit_shift = 4;
2105 		gcp->gc_rx_ring_size = RX_RING_SIZE;
2106 		gcp->gc_rx_buf_max   = RX_BUF_SIZE;
2107 		gcp->gc_rx_copy_thresh = sfe_rx_copy_thresh;
2108 
2109 		/* map attributes */
2110 		gcp->gc_dev_attr = sfe_dev_attr;
2111 		gcp->gc_buf_attr = sfe_buf_attr;
2112 		gcp->gc_desc_attr = sfe_buf_attr;
2113 
2114 		/* dma attributes */
2115 		gcp->gc_dma_attr_desc = sfe_dma_attr_desc;
2116 
2117 		gcp->gc_dma_attr_txbuf = sfe_dma_attr_buf;
2118 		gcp->gc_dma_attr_txbuf.dma_attr_align = gcp->gc_tx_buf_align+1;
2119 		gcp->gc_dma_attr_txbuf.dma_attr_sgllen = gcp->gc_tx_max_frags;
2120 
2121 		gcp->gc_dma_attr_rxbuf = sfe_dma_attr_buf;
2122 		gcp->gc_dma_attr_rxbuf.dma_attr_align = gcp->gc_rx_buf_align+1;
2123 		gcp->gc_dma_attr_rxbuf.dma_attr_sgllen = gcp->gc_rx_max_frags;
2124 
2125 		/* time out parameters */
2126 		gcp->gc_tx_timeout = 3*ONESEC;
2127 		gcp->gc_tx_timeout_interval = ONESEC;
2128 		if (p->chip_type == CHIPTYPE_DP83815) {
2129 			/* workaround for tx hang */
2130 			gcp->gc_tx_timeout_interval = ONESEC/20; /* 50mS */
2131 		}
2132 
2133 		/* MII timeout parameters */
2134 		gcp->gc_mii_link_watch_interval = ONESEC;
2135 		gcp->gc_mii_an_watch_interval   = ONESEC/5;
2136 		gcp->gc_mii_reset_timeout = MII_RESET_TIMEOUT;	/* 1 sec */
2137 		gcp->gc_mii_an_timeout = MII_AN_TIMEOUT;	/* 5 sec */
2138 		gcp->gc_mii_an_wait = 0;
2139 		gcp->gc_mii_linkdown_timeout = MII_LINKDOWN_TIMEOUT;
2140 
2141 		/* setting for general PHY */
2142 		gcp->gc_mii_an_delay = 0;
2143 		gcp->gc_mii_linkdown_action = MII_ACTION_RSA;
2144 		gcp->gc_mii_linkdown_timeout_action = MII_ACTION_RESET;
2145 		gcp->gc_mii_dont_reset = B_FALSE;
2146 
2147 
2148 		/* I/O methods */
2149 
2150 		/* mac operation */
2151 		gcp->gc_attach_chip = &sfe_attach_chip;
2152 		if (p->chip_type == CHIPTYPE_DP83815) {
2153 			gcp->gc_reset_chip = &sfe_reset_chip_dp83815;
2154 		} else {
2155 			gcp->gc_reset_chip = &sfe_reset_chip_sis900;
2156 		}
2157 		gcp->gc_init_chip  = &sfe_init_chip;
2158 		gcp->gc_start_chip = &sfe_start_chip;
2159 		gcp->gc_stop_chip  = &sfe_stop_chip;
2160 #ifdef USE_MULTICAST_HASHTBL
2161 		gcp->gc_multicast_hash = &sfe_mcast_hash;
2162 #endif
2163 		if (p->chip_type == CHIPTYPE_DP83815) {
2164 			gcp->gc_set_rx_filter = &sfe_set_rx_filter_dp83815;
2165 		} else {
2166 			gcp->gc_set_rx_filter = &sfe_set_rx_filter_sis900;
2167 		}
2168 		gcp->gc_set_media = &sfe_set_media;
2169 		gcp->gc_get_stats = &sfe_get_stats;
2170 		gcp->gc_interrupt = &sfe_interrupt;
2171 
2172 		/* descriptor operation */
2173 		gcp->gc_tx_desc_write = &sfe_tx_desc_write;
2174 		gcp->gc_tx_start = &sfe_tx_start;
2175 		gcp->gc_rx_desc_write = &sfe_rx_desc_write;
2176 		gcp->gc_rx_start = NULL;
2177 
2178 		gcp->gc_tx_desc_stat = &sfe_tx_desc_stat;
2179 		gcp->gc_rx_desc_stat = &sfe_rx_desc_stat;
2180 		gcp->gc_tx_desc_init = &sfe_tx_desc_init;
2181 		gcp->gc_rx_desc_init = &sfe_rx_desc_init;
2182 		gcp->gc_tx_desc_clean = &sfe_tx_desc_clean;
2183 		gcp->gc_rx_desc_clean = &sfe_rx_desc_clean;
2184 
2185 		/* mii operations */
2186 		if (p->chip_type == CHIPTYPE_DP83815) {
2187 			gcp->gc_mii_probe = &sfe_mii_probe_dp83815;
2188 			gcp->gc_mii_init = &sfe_mii_init_dp83815;
2189 			gcp->gc_mii_config = &sfe_mii_config_dp83815;
2190 			gcp->gc_mii_sync = &sfe_mii_sync_dp83815;
2191 			gcp->gc_mii_read = &sfe_mii_read_dp83815;
2192 			gcp->gc_mii_write = &sfe_mii_write_dp83815;
2193 			gcp->gc_mii_tune_phy = NULL;
2194 			gcp->gc_flow_control = FLOW_CONTROL_NONE;
2195 		} else {
2196 			gcp->gc_mii_probe = &gem_mii_probe_default;
2197 			gcp->gc_mii_init = NULL;
2198 			gcp->gc_mii_config = &sfe_mii_config_sis900;
2199 			gcp->gc_mii_sync = &sfe_mii_sync_sis900;
2200 			gcp->gc_mii_read = &sfe_mii_read_sis900;
2201 			gcp->gc_mii_write = &sfe_mii_write_sis900;
2202 			gcp->gc_mii_tune_phy = &sfe_set_eq_sis630;
2203 			gcp->gc_flow_control = FLOW_CONTROL_RX_PAUSE;
2204 		}
2205 
2206 		lp = kmem_zalloc(sizeof (*lp), KM_SLEEP);
2207 		lp->chip = p;
2208 		lp->revid = rev;
2209 		lp->our_intr_bits = 0;
2210 		lp->isr_pended = 0;
2211 
2212 		cmn_err(CE_CONT, CONS "%s%d: chip:%s rev:0x%02x",
2213 		    drv_name, unit, p->chip_name, rev);
2214 
2215 		dp = gem_do_attach(dip, 0, gcp, base, &regs_ha,
2216 		    lp, sizeof (*lp));
2217 		kmem_free(gcp, sizeof (*gcp));
2218 
2219 		if (dp == NULL) {
2220 			goto err_freelp;
2221 		}
2222 
2223 		return (DDI_SUCCESS);
2224 
2225 err_freelp:
2226 		kmem_free(lp, sizeof (struct sfe_dev));
2227 err:
2228 		return (DDI_FAILURE);
2229 	}
2230 	return (DDI_FAILURE);
2231 }
2232 
2233 static int
sfedetach(dev_info_t * dip,ddi_detach_cmd_t cmd)2234 sfedetach(dev_info_t *dip, ddi_detach_cmd_t cmd)
2235 {
2236 	switch (cmd) {
2237 	case DDI_SUSPEND:
2238 		return (gem_suspend(dip));
2239 
2240 	case DDI_DETACH:
2241 		return (gem_do_detach(dip));
2242 	}
2243 	return (DDI_FAILURE);
2244 }
2245 
2246 /*
2247  * quiesce(9E) entry point.
2248  *
2249  * This function is called when the system is single-threaded at high
2250  * PIL with preemption disabled. Therefore, this function must not be
2251  * blocked.
2252  *
2253  * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
2254  * DDI_FAILURE indicates an error condition and should almost never happen.
2255  */
2256 #ifdef	__sparc
2257 #define	sfe_quiesce	ddi_quiesce_not_supported
2258 #else
2259 static int
sfe_quiesce(dev_info_t * dip)2260 sfe_quiesce(dev_info_t *dip)
2261 {
2262 	struct gem_dev	*dp;
2263 	int	ret = 0;
2264 
2265 	dp = GEM_GET_DEV(dip);
2266 
2267 	if (dp == NULL)
2268 		return (DDI_FAILURE);
2269 
2270 	ret = sfe_stop_chip_quiesce(dp);
2271 
2272 	return (ret);
2273 }
2274 #endif
2275 
2276 /* ======================================================== */
2277 /*
2278  * OS depend (loadable streams driver) routine
2279  */
2280 /* ======================================================== */
2281 DDI_DEFINE_STREAM_OPS(sfe_ops, nulldev, nulldev, sfeattach, sfedetach,
2282     nodev, NULL, D_MP, NULL, sfe_quiesce);
2283 
2284 static struct modldrv modldrv = {
2285 	&mod_driverops,	/* Type of module.  This one is a driver */
2286 	ident,
2287 	&sfe_ops,	/* driver ops */
2288 };
2289 
2290 static struct modlinkage modlinkage = {
2291 	MODREV_1, &modldrv, NULL
2292 };
2293 
2294 /* ======================================================== */
2295 /*
2296  * Loadable module support
2297  */
2298 /* ======================================================== */
2299 int
_init(void)2300 _init(void)
2301 {
2302 	int	status;
2303 
2304 	DPRINTF(2, (CE_CONT, CONS "sfe: _init: called"));
2305 	gem_mod_init(&sfe_ops, "sfe");
2306 	status = mod_install(&modlinkage);
2307 	if (status != DDI_SUCCESS) {
2308 		gem_mod_fini(&sfe_ops);
2309 	}
2310 	return (status);
2311 }
2312 
2313 /*
2314  * _fini : done
2315  */
2316 int
_fini(void)2317 _fini(void)
2318 {
2319 	int	status;
2320 
2321 	DPRINTF(2, (CE_CONT, CONS "sfe: _fini: called"));
2322 	status = mod_remove(&modlinkage);
2323 	if (status == DDI_SUCCESS) {
2324 		gem_mod_fini(&sfe_ops);
2325 	}
2326 	return (status);
2327 }
2328 
2329 int
_info(struct modinfo * modinfop)2330 _info(struct modinfo *modinfop)
2331 {
2332 	return (mod_info(&modlinkage, modinfop));
2333 }
2334