xref: /illumos-gate/usr/src/uts/common/io/sfe/sfe.c (revision 3b8e64428fecd54234c133286f9c0009ad8940b0)
1 /*
2  *  sfe.c : DP83815/DP83816/SiS900 Fast Ethernet MAC driver for Solaris
3  *
4  * Copyright (c) 2002-2008 Masayuki Murayama.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * 2. Redistributions in binary form must reproduce the above copyright notice,
13  *    this list of conditions and the following disclaimer in the documentation
14  *    and/or other materials provided with the distribution.
15  *
16  * 3. Neither the name of the author nor the names of its contributors may be
17  *    used to endorse or promote products derived from this software without
18  *    specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
27  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
31  * DAMAGE.
32  */
33 
34 /*
35  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
36  * Use is subject to license terms.
37  */
38 
39 /*
40  * System Header files.
41  */
42 #include <sys/types.h>
43 #include <sys/conf.h>
44 #include <sys/debug.h>
45 #include <sys/kmem.h>
46 #include <sys/modctl.h>
47 #include <sys/errno.h>
48 #include <sys/ddi.h>
49 #include <sys/sunddi.h>
50 #include <sys/byteorder.h>
51 #include <sys/ethernet.h>
52 #include <sys/pci.h>
53 
54 #include "sfe_mii.h"
55 #include "sfe_util.h"
56 #include "sfereg.h"
57 
58 char	ident[] = "sis900/dp83815 driver v" "2.6.1t30os";
59 
60 /* Debugging support */
61 #ifdef DEBUG_LEVEL
62 static int sfe_debug = DEBUG_LEVEL;
63 #if DEBUG_LEVEL > 4
64 #define	CONS	"^"
65 #else
66 #define	CONS	"!"
67 #endif
68 #define	DPRINTF(n, args)	if (sfe_debug > (n)) cmn_err args
69 #else
70 #define	CONS	"!"
71 #define	DPRINTF(n, args)
72 #endif
73 
74 /*
75  * Useful macros and typedefs
76  */
77 #define	ONESEC		(drv_usectohz(1*1000000))
78 #define	ROUNDUP2(x, a)	(((x) + (a) - 1) & ~((a) - 1))
79 
80 /*
81  * Our configuration
82  */
83 #define	MAXTXFRAGS	1
84 #define	MAXRXFRAGS	1
85 
86 #ifndef	TX_BUF_SIZE
87 #define	TX_BUF_SIZE	64
88 #endif
89 #ifndef	TX_RING_SIZE
90 #if MAXTXFRAGS == 1
91 #define	TX_RING_SIZE	TX_BUF_SIZE
92 #else
93 #define	TX_RING_SIZE	(TX_BUF_SIZE * 4)
94 #endif
95 #endif
96 
97 #ifndef	RX_BUF_SIZE
98 #define	RX_BUF_SIZE	256
99 #endif
100 #ifndef	RX_RING_SIZE
101 #define	RX_RING_SIZE	RX_BUF_SIZE
102 #endif
103 
104 #define	OUR_INTR_BITS	\
105 	(ISR_DPERR | ISR_SSERR | ISR_RMABT | ISR_RTABT | ISR_RXSOVR |	\
106 	ISR_TXURN | ISR_TXDESC | ISR_TXERR |	\
107 	ISR_RXORN | ISR_RXIDLE | ISR_RXOK | ISR_RXERR)
108 
109 #define	USE_MULTICAST_HASHTBL
110 
111 static int	sfe_tx_copy_thresh = 256;
112 static int	sfe_rx_copy_thresh = 256;
113 
114 /* special PHY registers for SIS900 */
115 #define	MII_CONFIG1	0x0010
116 #define	MII_CONFIG2	0x0011
117 #define	MII_MASK	0x0013
118 #define	MII_RESV	0x0014
119 
120 #define	PHY_MASK		0xfffffff0
121 #define	PHY_SIS900_INTERNAL	0x001d8000
122 #define	PHY_ICS1893		0x0015f440
123 
124 
125 #define	SFE_DESC_SIZE	16	/* including pads rounding up to power of 2 */
126 
127 /*
128  * Supported chips
129  */
130 struct chip_info {
131 	uint16_t	venid;
132 	uint16_t	devid;
133 	char		*chip_name;
134 	int		chip_type;
135 #define	CHIPTYPE_DP83815	0
136 #define	CHIPTYPE_SIS900		1
137 };
138 
139 /*
140  * Chip dependent MAC state
141  */
142 struct sfe_dev {
143 	/* misc HW information */
144 	struct chip_info	*chip;
145 	uint32_t		our_intr_bits;
146 	uint32_t		isr_pended;
147 	uint32_t		cr;
148 	uint_t			tx_drain_threshold;
149 	uint_t			tx_fill_threshold;
150 	uint_t			rx_drain_threshold;
151 	uint_t			rx_fill_threshold;
152 	uint8_t			revid;	/* revision from PCI configuration */
153 	boolean_t		(*get_mac_addr)(struct gem_dev *);
154 	uint8_t			mac_addr[ETHERADDRL];
155 	uint8_t			bridge_revid;
156 };
157 
158 /*
159  * Hardware information
160  */
161 struct chip_info sfe_chiptbl[] = {
162 	{ 0x1039, 0x0900, "SiS900", CHIPTYPE_SIS900, },
163 	{ 0x100b, 0x0020, "DP83815/83816", CHIPTYPE_DP83815, },
164 	{ 0x1039, 0x7016, "SiS7016", CHIPTYPE_SIS900, },
165 };
166 #define	CHIPTABLESIZE (sizeof (sfe_chiptbl)/sizeof (struct chip_info))
167 
168 /* ======================================================== */
169 
170 /* mii operations */
171 static void  sfe_mii_sync_dp83815(struct gem_dev *);
172 static void  sfe_mii_sync_sis900(struct gem_dev *);
173 static uint16_t  sfe_mii_read_dp83815(struct gem_dev *, uint_t);
174 static uint16_t  sfe_mii_read_sis900(struct gem_dev *, uint_t);
175 static void sfe_mii_write_dp83815(struct gem_dev *, uint_t, uint16_t);
176 static void sfe_mii_write_sis900(struct gem_dev *, uint_t, uint16_t);
177 static void sfe_set_eq_sis630(struct gem_dev *);
178 /* nic operations */
179 static int sfe_reset_chip_sis900(struct gem_dev *);
180 static int sfe_reset_chip_dp83815(struct gem_dev *);
181 static int sfe_init_chip(struct gem_dev *);
182 static int sfe_start_chip(struct gem_dev *);
183 static int sfe_stop_chip(struct gem_dev *);
184 static int sfe_set_media(struct gem_dev *);
185 static int sfe_set_rx_filter_dp83815(struct gem_dev *);
186 static int sfe_set_rx_filter_sis900(struct gem_dev *);
187 static int sfe_get_stats(struct gem_dev *);
188 static int sfe_attach_chip(struct gem_dev *);
189 
190 /* descriptor operations */
191 static int sfe_tx_desc_write(struct gem_dev *dp, int slot,
192 		    ddi_dma_cookie_t *dmacookie, int frags, uint64_t flags);
193 static void sfe_tx_start(struct gem_dev *dp, int startslot, int nslot);
194 static void sfe_rx_desc_write(struct gem_dev *dp, int slot,
195 		    ddi_dma_cookie_t *dmacookie, int frags);
196 static uint_t sfe_tx_desc_stat(struct gem_dev *dp, int slot, int ndesc);
197 static uint64_t sfe_rx_desc_stat(struct gem_dev *dp, int slot, int ndesc);
198 
199 static void sfe_tx_desc_init(struct gem_dev *dp, int slot);
200 static void sfe_rx_desc_init(struct gem_dev *dp, int slot);
201 static void sfe_tx_desc_clean(struct gem_dev *dp, int slot);
202 static void sfe_rx_desc_clean(struct gem_dev *dp, int slot);
203 
204 /* interrupt handler */
205 static uint_t sfe_interrupt(struct gem_dev *dp);
206 
207 /* ======================================================== */
208 
209 /* mapping attributes */
210 /* Data access requirements. */
211 static struct ddi_device_acc_attr sfe_dev_attr = {
212 	DDI_DEVICE_ATTR_V0,
213 	DDI_STRUCTURE_LE_ACC,
214 	DDI_STRICTORDER_ACC
215 };
216 
217 /* On sparc, Buffers should be native endian for speed */
218 static struct ddi_device_acc_attr sfe_buf_attr = {
219 	DDI_DEVICE_ATTR_V0,
220 	DDI_NEVERSWAP_ACC,	/* native endianness */
221 	DDI_STRICTORDER_ACC
222 };
223 
224 static ddi_dma_attr_t sfe_dma_attr_buf = {
225 	DMA_ATTR_V0,		/* dma_attr_version */
226 	0,			/* dma_attr_addr_lo */
227 	0xffffffffull,		/* dma_attr_addr_hi */
228 	0x00000fffull,		/* dma_attr_count_max */
229 	0, /* patched later */	/* dma_attr_align */
230 	0x000003fc,		/* dma_attr_burstsizes */
231 	1,			/* dma_attr_minxfer */
232 	0x00000fffull,		/* dma_attr_maxxfer */
233 	0xffffffffull,		/* dma_attr_seg */
234 	0, /* patched later */	/* dma_attr_sgllen */
235 	1,			/* dma_attr_granular */
236 	0			/* dma_attr_flags */
237 };
238 
239 static ddi_dma_attr_t sfe_dma_attr_desc = {
240 	DMA_ATTR_V0,		/* dma_attr_version */
241 	16,			/* dma_attr_addr_lo */
242 	0xffffffffull,		/* dma_attr_addr_hi */
243 	0xffffffffull,		/* dma_attr_count_max */
244 	16,			/* dma_attr_align */
245 	0x000003fc,		/* dma_attr_burstsizes */
246 	1,			/* dma_attr_minxfer */
247 	0xffffffffull,		/* dma_attr_maxxfer */
248 	0xffffffffull,		/* dma_attr_seg */
249 	1,			/* dma_attr_sgllen */
250 	1,			/* dma_attr_granular */
251 	0			/* dma_attr_flags */
252 };
253 
254 uint32_t sfe_use_pcimemspace = 0;
255 
256 /* ======================================================== */
257 /*
258  * HW manipulation routines
259  */
260 /* ======================================================== */
261 
262 #define	SFE_EEPROM_DELAY(dp)	\
263 	{ (void) INL(dp, EROMAR); (void) INL(dp, EROMAR); }
264 #define	EE_CMD_READ	6
265 #define	EE_CMD_SHIFT	6
266 
267 static uint16_t
sfe_read_eeprom(struct gem_dev * dp,uint_t offset)268 sfe_read_eeprom(struct gem_dev *dp, uint_t offset)
269 {
270 	int		eedi;
271 	int		i;
272 	uint16_t	ret;
273 
274 	/* ensure de-assert chip select */
275 	OUTL(dp, EROMAR, 0);
276 	SFE_EEPROM_DELAY(dp);
277 	OUTL(dp, EROMAR, EROMAR_EESK);
278 	SFE_EEPROM_DELAY(dp);
279 
280 	/* assert chip select */
281 	offset |= EE_CMD_READ << EE_CMD_SHIFT;
282 
283 	for (i = 8; i >= 0; i--) {
284 		/* make command */
285 		eedi = ((offset >> i) & 1) << EROMAR_EEDI_SHIFT;
286 
287 		/* send 1 bit */
288 		OUTL(dp, EROMAR, EROMAR_EECS | eedi);
289 		SFE_EEPROM_DELAY(dp);
290 		OUTL(dp, EROMAR, EROMAR_EECS | eedi | EROMAR_EESK);
291 		SFE_EEPROM_DELAY(dp);
292 	}
293 
294 	OUTL(dp, EROMAR, EROMAR_EECS);
295 
296 	ret = 0;
297 	for (i = 0; i < 16; i++) {
298 		/* Get 1 bit */
299 		OUTL(dp, EROMAR, EROMAR_EECS);
300 		SFE_EEPROM_DELAY(dp);
301 		OUTL(dp, EROMAR, EROMAR_EECS | EROMAR_EESK);
302 		SFE_EEPROM_DELAY(dp);
303 
304 		ret = (ret << 1) | ((INL(dp, EROMAR) >> EROMAR_EEDO_SHIFT) & 1);
305 	}
306 
307 	OUTL(dp, EROMAR, 0);
308 	SFE_EEPROM_DELAY(dp);
309 
310 	return (ret);
311 }
312 #undef SFE_EEPROM_DELAY
313 
314 static boolean_t
sfe_get_mac_addr_dp83815(struct gem_dev * dp)315 sfe_get_mac_addr_dp83815(struct gem_dev *dp)
316 {
317 	uint8_t		*mac;
318 	uint_t		val;
319 	int		i;
320 
321 #define	BITSET(p, ix, v)	(p)[(ix)/8] |= ((v) ? 1 : 0) << ((ix) & 0x7)
322 
323 	DPRINTF(4, (CE_CONT, CONS "%s: %s: called", dp->name, __func__));
324 
325 	mac = dp->dev_addr.ether_addr_octet;
326 
327 	/* first of all, clear MAC address buffer */
328 	bzero(mac, ETHERADDRL);
329 
330 	/* get bit 0 */
331 	val = sfe_read_eeprom(dp, 0x6);
332 	BITSET(mac, 0, val & 1);
333 
334 	/* get bit 1 - 16 */
335 	val = sfe_read_eeprom(dp, 0x7);
336 	for (i = 0; i < 16; i++) {
337 		BITSET(mac, 1 + i, val & (1 << (15 - i)));
338 	}
339 
340 	/* get bit 17 -  32 */
341 	val = sfe_read_eeprom(dp, 0x8);
342 	for (i = 0; i < 16; i++) {
343 		BITSET(mac, 17 + i, val & (1 << (15 - i)));
344 	}
345 
346 	/* get bit 33 -  47 */
347 	val = sfe_read_eeprom(dp, 0x9);
348 	for (i = 0; i < 15; i++) {
349 		BITSET(mac, 33 + i, val & (1 << (15 - i)));
350 	}
351 
352 	return (B_TRUE);
353 #undef BITSET
354 }
355 
356 static boolean_t
sfe_get_mac_addr_sis900(struct gem_dev * dp)357 sfe_get_mac_addr_sis900(struct gem_dev *dp)
358 {
359 	uint_t		val;
360 	int		i;
361 	uint8_t		*mac;
362 
363 	mac = dp->dev_addr.ether_addr_octet;
364 
365 	for (i = 0; i < ETHERADDRL/2; i++) {
366 		val = sfe_read_eeprom(dp, 0x8 + i);
367 		*mac++ = (uint8_t)val;
368 		*mac++ = (uint8_t)(val >> 8);
369 	}
370 
371 	return (B_TRUE);
372 }
373 
374 static dev_info_t *
sfe_search_pci_dev_subr(dev_info_t * cur_node,int vendor_id,int device_id)375 sfe_search_pci_dev_subr(dev_info_t *cur_node, int vendor_id, int device_id)
376 {
377 	dev_info_t	*child_id;
378 	dev_info_t	*ret;
379 	int		vid, did;
380 
381 	if (cur_node == NULL) {
382 		return (NULL);
383 	}
384 
385 	/* check brothers */
386 	do {
387 		vid = ddi_prop_get_int(DDI_DEV_T_ANY, cur_node,
388 		    DDI_PROP_DONTPASS, "vendor-id", -1);
389 		did = ddi_prop_get_int(DDI_DEV_T_ANY, cur_node,
390 		    DDI_PROP_DONTPASS, "device-id", -1);
391 
392 		if (vid == vendor_id && did == device_id) {
393 			/* found */
394 			return (cur_node);
395 		}
396 
397 		/* check children */
398 		if ((child_id = ddi_get_child(cur_node)) != NULL) {
399 			if ((ret = sfe_search_pci_dev_subr(child_id,
400 			    vendor_id, device_id)) != NULL) {
401 				return (ret);
402 			}
403 		}
404 
405 	} while ((cur_node = ddi_get_next_sibling(cur_node)) != NULL);
406 
407 	/* not found */
408 	return (NULL);
409 }
410 
411 static dev_info_t *
sfe_search_pci_dev(int vendor_id,int device_id)412 sfe_search_pci_dev(int vendor_id, int device_id)
413 {
414 	return (sfe_search_pci_dev_subr(ddi_root_node(), vendor_id, device_id));
415 }
416 
417 static boolean_t
sfe_get_mac_addr_sis962(struct gem_dev * dp)418 sfe_get_mac_addr_sis962(struct gem_dev *dp)
419 {
420 	boolean_t	ret;
421 	int		i;
422 
423 	ret = B_FALSE;
424 
425 	/* rise request signal to access EEPROM */
426 	OUTL(dp, MEAR, EROMAR_EEREQ);
427 	for (i = 0; (INL(dp, MEAR) & EROMAR_EEGNT) == 0; i++) {
428 		if (i > 200) {
429 			/* failed to acquire eeprom */
430 			cmn_err(CE_NOTE,
431 			    CONS "%s: failed to access eeprom", dp->name);
432 			goto x;
433 		}
434 		drv_usecwait(10);
435 	}
436 	ret = sfe_get_mac_addr_sis900(dp);
437 x:
438 	/* release EEPROM */
439 	OUTL(dp, MEAR, EROMAR_EEDONE);
440 
441 	return (ret);
442 }
443 
444 static int
sfe_reset_chip_sis900(struct gem_dev * dp)445 sfe_reset_chip_sis900(struct gem_dev *dp)
446 {
447 	int		i;
448 	uint32_t	done;
449 	uint32_t	val;
450 	struct sfe_dev	*lp = dp->private;
451 
452 	DPRINTF(4, (CE_CONT, CONS "%s: %s called", dp->name, __func__));
453 
454 	/* invalidate mac addr cache */
455 	bzero(lp->mac_addr, sizeof (lp->mac_addr));
456 
457 	lp->cr = 0;
458 
459 	/* inhibit interrupt */
460 	OUTL(dp, IMR, 0);
461 	lp->isr_pended |= INL(dp, ISR) & lp->our_intr_bits;
462 
463 	OUTLINL(dp, RFCR, 0);
464 
465 	OUTL(dp, CR, CR_RST | CR_TXR | CR_RXR);
466 	drv_usecwait(10);
467 
468 	done = 0;
469 	for (i = 0; done != (ISR_TXRCMP | ISR_RXRCMP); i++) {
470 		if (i > 1000) {
471 			cmn_err(CE_WARN, "%s: chip reset timeout", dp->name);
472 			return (GEM_FAILURE);
473 		}
474 		done |= INL(dp, ISR) & (ISR_TXRCMP | ISR_RXRCMP);
475 		drv_usecwait(10);
476 	}
477 
478 	if (lp->revid == SIS630ET_900_REV) {
479 		lp->cr |= CR_ACCESSMODE;
480 		OUTL(dp, CR, lp->cr | INL(dp, CR));
481 	}
482 
483 	/* Configuration register: enable PCI parity */
484 	DPRINTF(2, (CE_CONT, CONS "%s: cfg:%b",
485 	    dp->name, INL(dp, CFG), CFG_BITS_SIS900));
486 	val = 0;
487 	if (lp->revid >= SIS635A_900_REV ||
488 	    lp->revid == SIS900B_900_REV) {
489 		/* what is this ? */
490 		val |= CFG_RND_CNT;
491 	}
492 	OUTL(dp, CFG, val);
493 	DPRINTF(2, (CE_CONT, CONS "%s: cfg:%b", dp->name,
494 	    INL(dp, CFG), CFG_BITS_SIS900));
495 
496 	return (GEM_SUCCESS);
497 }
498 
499 static int
sfe_reset_chip_dp83815(struct gem_dev * dp)500 sfe_reset_chip_dp83815(struct gem_dev *dp)
501 {
502 	int		i;
503 	uint32_t	val;
504 	struct sfe_dev	*lp = dp->private;
505 
506 	DPRINTF(4, (CE_CONT, CONS "%s: %s called", dp->name, __func__));
507 
508 	/* invalidate mac addr cache */
509 	bzero(lp->mac_addr, sizeof (lp->mac_addr));
510 
511 	lp->cr = 0;
512 
513 	/* inhibit interrupts */
514 	OUTL(dp, IMR, 0);
515 	lp->isr_pended |= INL(dp, ISR) & lp->our_intr_bits;
516 
517 	OUTL(dp, RFCR, 0);
518 
519 	OUTL(dp, CR, CR_RST);
520 	drv_usecwait(10);
521 
522 	for (i = 0; INL(dp, CR) & CR_RST; i++) {
523 		if (i > 100) {
524 			cmn_err(CE_WARN, "!%s: chip reset timeout", dp->name);
525 			return (GEM_FAILURE);
526 		}
527 		drv_usecwait(10);
528 	}
529 	DPRINTF(0, (CE_CONT, "!%s: chip reset in %duS", dp->name, i*10));
530 
531 	OUTL(dp, CCSR, CCSR_PMESTS);
532 	OUTL(dp, CCSR, 0);
533 
534 	/* Configuration register: enable PCI parity */
535 	DPRINTF(2, (CE_CONT, CONS "%s: cfg:%b",
536 	    dp->name, INL(dp, CFG), CFG_BITS_DP83815));
537 	val = INL(dp, CFG) & (CFG_ANEG_SEL | CFG_PHY_CFG);
538 	OUTL(dp, CFG, val | CFG_PAUSE_ADV);
539 	DPRINTF(2, (CE_CONT, CONS "%s: cfg:%b", dp->name,
540 	    INL(dp, CFG), CFG_BITS_DP83815));
541 
542 	return (GEM_SUCCESS);
543 }
544 
545 static int
sfe_init_chip(struct gem_dev * dp)546 sfe_init_chip(struct gem_dev *dp)
547 {
548 	/* Configuration register: have been set up in sfe_chip_reset */
549 
550 	/* PCI test control register: do nothing */
551 
552 	/* Interrupt status register : do nothing */
553 
554 	/* Interrupt mask register: clear, but leave lp->our_intr_bits */
555 	OUTL(dp, IMR, 0);
556 
557 	/* Enhanced PHY Access register (sis900): do nothing */
558 
559 	/* Transmit Descriptor Pointer register: base addr of TX ring */
560 	OUTL(dp, TXDP, dp->tx_ring_dma);
561 
562 	/* Receive descriptor pointer register: base addr of RX ring */
563 	OUTL(dp, RXDP, dp->rx_ring_dma);
564 
565 	return (GEM_SUCCESS);
566 }
567 
568 static uint_t
sfe_mcast_hash(struct gem_dev * dp,uint8_t * addr)569 sfe_mcast_hash(struct gem_dev *dp, uint8_t *addr)
570 {
571 	return (gem_ether_crc_be(addr, ETHERADDRL));
572 }
573 
574 #ifdef DEBUG_LEVEL
575 static void
sfe_rxfilter_dump(struct gem_dev * dp,int start,int end)576 sfe_rxfilter_dump(struct gem_dev *dp, int start, int end)
577 {
578 	int		i;
579 	int		j;
580 	uint16_t	ram[0x10];
581 
582 	cmn_err(CE_CONT, "!%s: rx filter ram dump:", dp->name);
583 #define	WORDS_PER_LINE	4
584 	for (i = start; i < end; i += WORDS_PER_LINE*2) {
585 		for (j = 0; j < WORDS_PER_LINE; j++) {
586 			OUTL(dp, RFCR, RFADDR_MAC_DP83815 + i + j*2);
587 			ram[j] = INL(dp, RFDR);
588 		}
589 
590 		cmn_err(CE_CONT, "!0x%02x: 0x%04x 0x%04x 0x%04x 0x%04x",
591 		    i, ram[0], ram[1], ram[2], ram[3]);
592 		}
593 
594 #undef	WORDS_PER_LINE
595 }
596 #endif
597 
598 static uint_t	sfe_rf_perfect_base_dp83815[] = {
599 	RFADDR_PMATCH0_DP83815,
600 	RFADDR_PMATCH1_DP83815,
601 	RFADDR_PMATCH2_DP83815,
602 	RFADDR_PMATCH3_DP83815,
603 };
604 
605 static int
sfe_set_rx_filter_dp83815(struct gem_dev * dp)606 sfe_set_rx_filter_dp83815(struct gem_dev *dp)
607 {
608 	int		i;
609 	int		j;
610 	uint32_t	mode;
611 	uint8_t		*mac = dp->cur_addr.ether_addr_octet;
612 	uint16_t	hash_tbl[32];
613 	struct sfe_dev	*lp = dp->private;
614 
615 	DPRINTF(1, (CE_CONT, CONS "%s: %s: called, mc_count:%d, mode:0x%b",
616 	    dp->name, __func__, dp->mc_count, dp->rxmode, RXMODE_BITS));
617 
618 #if DEBUG_LEVEL > 0
619 	for (i = 0; i < dp->mc_count; i++) {
620 		cmn_err(CE_CONT,
621 		"!%s: adding mcast(%d) %02x:%02x:%02x:%02x:%02x:%02x",
622 		    dp->name, i,
623 		    dp->mc_list[i].addr.ether_addr_octet[0],
624 		    dp->mc_list[i].addr.ether_addr_octet[1],
625 		    dp->mc_list[i].addr.ether_addr_octet[2],
626 		    dp->mc_list[i].addr.ether_addr_octet[3],
627 		    dp->mc_list[i].addr.ether_addr_octet[4],
628 		    dp->mc_list[i].addr.ether_addr_octet[5]);
629 	}
630 #endif
631 	if ((dp->rxmode & RXMODE_ENABLE) == 0) {
632 		/* disable rx filter */
633 		OUTL(dp, RFCR, 0);
634 		return (GEM_SUCCESS);
635 	}
636 
637 	/*
638 	 * Set Receive filter control register
639 	 */
640 	if (dp->rxmode & RXMODE_PROMISC) {
641 		/* all broadcast, all multicast, all physical */
642 		mode = RFCR_AAB | RFCR_AAM | RFCR_AAP;
643 	} else if ((dp->rxmode & RXMODE_ALLMULTI) || dp->mc_count > 16*32/2) {
644 		/* all broadcast, all multicast, physical for the chip */
645 		mode = RFCR_AAB | RFCR_AAM | RFCR_APM_DP83815;
646 	} else if (dp->mc_count > 4) {
647 		/*
648 		 * Use multicast hash table,
649 		 * accept all broadcast and physical for the chip.
650 		 */
651 		mode = RFCR_AAB | RFCR_MHEN_DP83815 | RFCR_APM_DP83815;
652 
653 		bzero(hash_tbl, sizeof (hash_tbl));
654 		for (i = 0; i < dp->mc_count; i++) {
655 			j = dp->mc_list[i].hash >> (32 - 9);
656 			hash_tbl[j / 16] |= 1 << (j % 16);
657 		}
658 	} else {
659 		/*
660 		 * Use pattern mach filter for multicast address,
661 		 * accept all broadcast and physical for the chip
662 		 */
663 		/* need to enable corresponding pattern registers */
664 		mode = RFCR_AAB | RFCR_APM_DP83815 |
665 		    (((1 << dp->mc_count) - 1) << RFCR_APAT_SHIFT);
666 	}
667 
668 #if DEBUG_LEVEL > 1
669 	cmn_err(CE_CONT,
670 	    "!%s: mac %02x:%02x:%02x:%02x:%02x:%02x"
671 	    "  cache %02x:%02x:%02x:%02x:%02x:%02x",
672 	    dp->name, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
673 	    lp->mac_addr[0], lp->mac_addr[1],
674 	    lp->mac_addr[2], lp->mac_addr[3],
675 	    lp->mac_addr[4], lp->mac_addr[5]);
676 #endif
677 	if (bcmp(mac, lp->mac_addr, ETHERADDRL) != 0) {
678 		/*
679 		 * XXX - need to *disable* rx filter to load mac address for
680 		 * the chip. otherwise, we cannot setup rxfilter correctly.
681 		 */
682 		/* setup perfect match register for my station address */
683 		for (i = 0; i < ETHERADDRL; i += 2) {
684 			OUTL(dp, RFCR, RFADDR_MAC_DP83815 + i);
685 			OUTL(dp, RFDR, (mac[i+1] << 8) | mac[i]);
686 		}
687 
688 		bcopy(mac, lp->mac_addr, ETHERADDRL);
689 	}
690 
691 #if DEBUG_LEVEL > 3
692 	/* clear pattern ram */
693 	for (j = 0x200; j < 0x380; j += 2) {
694 		OUTL(dp, RFCR, j);
695 		OUTL(dp, RFDR, 0);
696 	}
697 #endif
698 	if (mode & RFCR_APAT_DP83815) {
699 		/* setup multicast address into pattern match registers */
700 		for (j = 0; j < dp->mc_count; j++) {
701 			mac = &dp->mc_list[j].addr.ether_addr_octet[0];
702 			for (i = 0; i < ETHERADDRL; i += 2) {
703 				OUTL(dp, RFCR,
704 				    sfe_rf_perfect_base_dp83815[j] + i*2);
705 				OUTL(dp, RFDR, (mac[i+1] << 8) | mac[i]);
706 			}
707 		}
708 
709 		/* setup pattern count registers */
710 		OUTL(dp, RFCR, RFADDR_PCOUNT01_DP83815);
711 		OUTL(dp, RFDR, (ETHERADDRL << 8) | ETHERADDRL);
712 		OUTL(dp, RFCR, RFADDR_PCOUNT23_DP83815);
713 		OUTL(dp, RFDR, (ETHERADDRL << 8) | ETHERADDRL);
714 	}
715 
716 	if (mode & RFCR_MHEN_DP83815) {
717 		/* Load Multicast hash table */
718 		for (i = 0; i < 32; i++) {
719 			/* for DP83815, index is in byte */
720 			OUTL(dp, RFCR, RFADDR_MULTICAST_DP83815 + i*2);
721 			OUTL(dp, RFDR, hash_tbl[i]);
722 		}
723 	}
724 #if DEBUG_LEVEL > 2
725 	sfe_rxfilter_dump(dp, 0, 0x10);
726 	sfe_rxfilter_dump(dp, 0x200, 0x380);
727 #endif
728 	/* Set rx filter mode and enable rx filter */
729 	OUTL(dp, RFCR, RFCR_RFEN | mode);
730 
731 	return (GEM_SUCCESS);
732 }
733 
734 static int
sfe_set_rx_filter_sis900(struct gem_dev * dp)735 sfe_set_rx_filter_sis900(struct gem_dev *dp)
736 {
737 	int		i;
738 	uint32_t	mode;
739 	uint16_t	hash_tbl[16];
740 	uint8_t		*mac = dp->cur_addr.ether_addr_octet;
741 	int		hash_size;
742 	int		hash_shift;
743 	struct sfe_dev	*lp = dp->private;
744 
745 	DPRINTF(4, (CE_CONT, CONS "%s: %s: called", dp->name, __func__));
746 
747 	if ((dp->rxmode & RXMODE_ENABLE) == 0) {
748 		/* disable rx filter */
749 		OUTLINL(dp, RFCR, 0);
750 		return (GEM_SUCCESS);
751 	}
752 
753 	/*
754 	 * determine hardware hash table size in word.
755 	 */
756 	hash_shift = 25;
757 	if (lp->revid >= SIS635A_900_REV || lp->revid == SIS900B_900_REV) {
758 		hash_shift = 24;
759 	}
760 	hash_size = (1 << (32 - hash_shift)) / 16;
761 	bzero(hash_tbl, sizeof (hash_tbl));
762 
763 	/* Set Receive filter control register */
764 
765 	if (dp->rxmode & RXMODE_PROMISC) {
766 		/* all broadcast, all multicast, all physical */
767 		mode = RFCR_AAB | RFCR_AAM | RFCR_AAP;
768 	} else if ((dp->rxmode & RXMODE_ALLMULTI) ||
769 	    dp->mc_count > hash_size*16/2) {
770 		/* all broadcast, all multicast, physical for the chip */
771 		mode = RFCR_AAB | RFCR_AAM;
772 	} else {
773 		/* all broadcast, physical for the chip */
774 		mode = RFCR_AAB;
775 	}
776 
777 	/* make hash table */
778 	for (i = 0; i < dp->mc_count; i++) {
779 		uint_t	h;
780 		h = dp->mc_list[i].hash >> hash_shift;
781 		hash_tbl[h / 16] |= 1 << (h % 16);
782 	}
783 
784 	if (bcmp(mac, lp->mac_addr, ETHERADDRL) != 0) {
785 		/* Disable Rx filter and load mac address */
786 		for (i = 0; i < ETHERADDRL/2; i++) {
787 			/* For sis900, index is in word */
788 			OUTLINL(dp, RFCR,
789 			    (RFADDR_MAC_SIS900+i) << RFCR_RFADDR_SHIFT_SIS900);
790 			OUTLINL(dp, RFDR, (mac[i*2+1] << 8) | mac[i*2]);
791 		}
792 
793 		bcopy(mac, lp->mac_addr, ETHERADDRL);
794 	}
795 
796 	/* Load Multicast hash table */
797 	for (i = 0; i < hash_size; i++) {
798 		/* For sis900, index is in word */
799 		OUTLINL(dp, RFCR,
800 		    (RFADDR_MULTICAST_SIS900 + i) << RFCR_RFADDR_SHIFT_SIS900);
801 		OUTLINL(dp, RFDR, hash_tbl[i]);
802 	}
803 
804 	/* Load rx filter mode and enable rx filter */
805 	OUTLINL(dp, RFCR, RFCR_RFEN | mode);
806 
807 	return (GEM_SUCCESS);
808 }
809 
810 static int
sfe_start_chip(struct gem_dev * dp)811 sfe_start_chip(struct gem_dev *dp)
812 {
813 	struct sfe_dev	*lp = dp->private;
814 
815 	DPRINTF(4, (CE_CONT, CONS "%s: %s: called", dp->name, __func__));
816 
817 	/*
818 	 * setup interrupt mask, which shouldn't include ISR_TOK
819 	 * to improve performance.
820 	 */
821 	lp->our_intr_bits = OUR_INTR_BITS;
822 
823 	/* enable interrupt */
824 	if ((dp->misc_flag & GEM_NOINTR) == 0) {
825 		OUTL(dp, IER, 1);
826 		OUTL(dp, IMR, lp->our_intr_bits);
827 	}
828 
829 	/* Kick RX */
830 	OUTL(dp, CR, lp->cr | CR_RXE);
831 
832 	return (GEM_SUCCESS);
833 }
834 
835 /*
836  * Stop nic core gracefully.
837  */
838 static int
sfe_stop_chip(struct gem_dev * dp)839 sfe_stop_chip(struct gem_dev *dp)
840 {
841 	struct sfe_dev	*lp = dp->private;
842 	uint32_t	done;
843 	int		i;
844 	uint32_t	val;
845 
846 	DPRINTF(4, (CE_CONT, CONS "%s: %s: called", dp->name, __func__));
847 
848 	/*
849 	 * Although we inhibit interrupt here, we don't clear soft copy of
850 	 * interrupt mask to avoid bogus interrupts.
851 	 */
852 	OUTL(dp, IMR, 0);
853 
854 	/* stop TX and RX immediately */
855 	OUTL(dp, CR, lp->cr | CR_TXR | CR_RXR);
856 
857 	done = 0;
858 	for (i = 0; done != (ISR_RXRCMP | ISR_TXRCMP); i++) {
859 		if (i > 1000) {
860 			/*
861 			 * As gem layer will call sfe_reset_chip(),
862 			 * we don't neet to reset futher
863 			 */
864 			cmn_err(CE_NOTE, "!%s: %s: Tx/Rx reset timeout",
865 			    dp->name, __func__);
866 
867 			return (GEM_FAILURE);
868 		}
869 		val = INL(dp, ISR);
870 		done |= val & (ISR_RXRCMP | ISR_TXRCMP);
871 		lp->isr_pended |= val & lp->our_intr_bits;
872 		drv_usecwait(10);
873 	}
874 
875 	return (GEM_SUCCESS);
876 }
877 
878 #ifndef	__sparc
879 /*
880  * Stop nic core gracefully for quiesce
881  */
882 static int
sfe_stop_chip_quiesce(struct gem_dev * dp)883 sfe_stop_chip_quiesce(struct gem_dev *dp)
884 {
885 	struct sfe_dev	*lp = dp->private;
886 	uint32_t	done;
887 	int		i;
888 	uint32_t	val;
889 
890 	/*
891 	 * Although we inhibit interrupt here, we don't clear soft copy of
892 	 * interrupt mask to avoid bogus interrupts.
893 	 */
894 	OUTL(dp, IMR, 0);
895 
896 	/* stop TX and RX immediately */
897 	OUTL(dp, CR, CR_TXR | CR_RXR);
898 
899 	done = 0;
900 	for (i = 0; done != (ISR_RXRCMP | ISR_TXRCMP); i++) {
901 		if (i > 1000) {
902 			/*
903 			 * As gem layer will call sfe_reset_chip(),
904 			 * we don't neet to reset futher
905 			 */
906 
907 			return (DDI_FAILURE);
908 		}
909 		val = INL(dp, ISR);
910 		done |= val & (ISR_RXRCMP | ISR_TXRCMP);
911 		lp->isr_pended |= val & lp->our_intr_bits;
912 		drv_usecwait(10);
913 	}
914 	return (DDI_SUCCESS);
915 }
916 #endif
917 
918 /*
919  * Setup media mode
920  */
921 static uint_t
922 sfe_mxdma_value[] = { 512, 4, 8, 16, 32, 64, 128, 256, };
923 
924 static uint_t
sfe_encode_mxdma(uint_t burstsize)925 sfe_encode_mxdma(uint_t burstsize)
926 {
927 	int	i;
928 
929 	if (burstsize > 256) {
930 		/* choose 512 */
931 		return (0);
932 	}
933 
934 	for (i = 1; i < 8; i++) {
935 		if (burstsize <= sfe_mxdma_value[i]) {
936 			break;
937 		}
938 	}
939 	return (i);
940 }
941 
942 static int
sfe_set_media(struct gem_dev * dp)943 sfe_set_media(struct gem_dev *dp)
944 {
945 	uint32_t	txcfg;
946 	uint32_t	rxcfg;
947 	uint32_t	pcr;
948 	uint32_t	val;
949 	uint32_t	txmxdma;
950 	uint32_t	rxmxdma;
951 	struct sfe_dev	*lp = dp->private;
952 #ifdef DEBUG_LEVEL
953 	extern int	gem_speed_value[];
954 #endif
955 	DPRINTF(2, (CE_CONT, CONS "%s: %s: %s duplex, %d Mbps",
956 	    dp->name, __func__,
957 	    dp->full_duplex ? "full" : "half", gem_speed_value[dp->speed]));
958 
959 	/* initialize txcfg and rxcfg */
960 	txcfg = TXCFG_ATP;
961 	if (dp->full_duplex) {
962 		txcfg |= (TXCFG_CSI | TXCFG_HBI);
963 	}
964 	rxcfg = RXCFG_AEP | RXCFG_ARP;
965 	if (dp->full_duplex) {
966 		rxcfg |= RXCFG_ATX;
967 	}
968 
969 	/* select txmxdma and rxmxdma, maxmum burst length */
970 	if (lp->chip->chip_type == CHIPTYPE_SIS900) {
971 #ifdef DEBUG_SIS900_EDB
972 		val = CFG_EDB_MASTER;
973 #else
974 		val = INL(dp, CFG) & CFG_EDB_MASTER;
975 #endif
976 		if (val) {
977 			/*
978 			 * sis900 built-in cores:
979 			 * max burst length must be fixed to 64
980 			 */
981 			txmxdma = 64;
982 			rxmxdma = 64;
983 		} else {
984 			/*
985 			 * sis900 pci chipset:
986 			 * the vendor recommended to fix max burst length
987 			 * to 512
988 			 */
989 			txmxdma = 512;
990 			rxmxdma = 512;
991 		}
992 	} else {
993 		/*
994 		 * NS dp83815/816:
995 		 * use user defined or default for tx/rx max burst length
996 		 */
997 		txmxdma = max(dp->txmaxdma, 256);
998 		rxmxdma = max(dp->rxmaxdma, 256);
999 	}
1000 
1001 
1002 	/* tx high water mark */
1003 	lp->tx_drain_threshold = ROUNDUP2(dp->txthr, TXCFG_FIFO_UNIT);
1004 
1005 	/* determine tx_fill_threshold accroding drain threshold */
1006 	lp->tx_fill_threshold =
1007 	    TXFIFOSIZE - lp->tx_drain_threshold - TXCFG_FIFO_UNIT;
1008 
1009 	/* tune txmxdma not to exceed tx_fill_threshold */
1010 	for (; ; ) {
1011 		/* normalize txmxdma requested */
1012 		val = sfe_encode_mxdma(txmxdma);
1013 		txmxdma = sfe_mxdma_value[val];
1014 
1015 		if (txmxdma <= lp->tx_fill_threshold) {
1016 			break;
1017 		}
1018 		/* select new txmxdma */
1019 		txmxdma = txmxdma / 2;
1020 	}
1021 	txcfg |= val << TXCFG_MXDMA_SHIFT;
1022 
1023 	/* encode rxmxdma, maxmum burst length for rx */
1024 	val = sfe_encode_mxdma(rxmxdma);
1025 	rxcfg |= val << RXCFG_MXDMA_SHIFT;
1026 	rxmxdma = sfe_mxdma_value[val];
1027 
1028 	/* receive starting threshold - it have only 5bit-wide field */
1029 	val = ROUNDUP2(max(dp->rxthr, ETHERMIN), RXCFG_FIFO_UNIT);
1030 	lp->rx_drain_threshold =
1031 	    min(val, (RXCFG_DRTH >> RXCFG_DRTH_SHIFT) * RXCFG_FIFO_UNIT);
1032 
1033 	DPRINTF(0, (CE_CONT,
1034 	    "%s: %s: tx: drain:%d(rest %d) fill:%d mxdma:%d,"
1035 	    " rx: drain:%d mxdma:%d",
1036 	    dp->name, __func__,
1037 	    lp->tx_drain_threshold, TXFIFOSIZE - lp->tx_drain_threshold,
1038 	    lp->tx_fill_threshold, txmxdma,
1039 	    lp->rx_drain_threshold, rxmxdma));
1040 
1041 	ASSERT(lp->tx_drain_threshold < 64*TXCFG_FIFO_UNIT);
1042 	ASSERT(lp->tx_fill_threshold < 64*TXCFG_FIFO_UNIT);
1043 	ASSERT(lp->rx_drain_threshold < 32*RXCFG_FIFO_UNIT);
1044 
1045 	txcfg |= ((lp->tx_fill_threshold/TXCFG_FIFO_UNIT) << TXCFG_FLTH_SHIFT)
1046 	    | (lp->tx_drain_threshold/TXCFG_FIFO_UNIT);
1047 	OUTL(dp, TXCFG, txcfg);
1048 
1049 	rxcfg |= ((lp->rx_drain_threshold/RXCFG_FIFO_UNIT) << RXCFG_DRTH_SHIFT);
1050 	if (lp->chip->chip_type == CHIPTYPE_DP83815) {
1051 		rxcfg |= RXCFG_ALP_DP83815;
1052 	}
1053 	OUTL(dp, RXCFG, rxcfg);
1054 
1055 	DPRINTF(0, (CE_CONT, CONS "%s: %s: txcfg:%b rxcfg:%b",
1056 	    dp->name, __func__,
1057 	    txcfg, TXCFG_BITS, rxcfg, RXCFG_BITS));
1058 
1059 	/* Flow control */
1060 	if (lp->chip->chip_type == CHIPTYPE_DP83815) {
1061 		pcr = INL(dp, PCR);
1062 		switch (dp->flow_control) {
1063 		case FLOW_CONTROL_SYMMETRIC:
1064 		case FLOW_CONTROL_RX_PAUSE:
1065 			OUTL(dp, PCR, pcr | PCR_PSEN | PCR_PS_MCAST);
1066 			break;
1067 
1068 		default:
1069 			OUTL(dp, PCR,
1070 			    pcr & ~(PCR_PSEN | PCR_PS_MCAST | PCR_PS_DA));
1071 			break;
1072 		}
1073 		DPRINTF(2, (CE_CONT, CONS "%s: PCR: %b", dp->name,
1074 		    INL(dp, PCR), PCR_BITS));
1075 
1076 	} else if (lp->chip->chip_type == CHIPTYPE_SIS900) {
1077 		switch (dp->flow_control) {
1078 		case FLOW_CONTROL_SYMMETRIC:
1079 		case FLOW_CONTROL_RX_PAUSE:
1080 			OUTL(dp, FLOWCTL, FLOWCTL_FLOWEN);
1081 			break;
1082 		default:
1083 			OUTL(dp, FLOWCTL, 0);
1084 			break;
1085 		}
1086 		DPRINTF(2, (CE_CONT, CONS "%s: FLOWCTL: %b",
1087 		    dp->name, INL(dp, FLOWCTL), FLOWCTL_BITS));
1088 	}
1089 	return (GEM_SUCCESS);
1090 }
1091 
1092 static int
sfe_get_stats(struct gem_dev * dp)1093 sfe_get_stats(struct gem_dev *dp)
1094 {
1095 	/* do nothing */
1096 	return (GEM_SUCCESS);
1097 }
1098 
1099 /*
1100  * descriptor manipulations
1101  */
1102 static int
sfe_tx_desc_write(struct gem_dev * dp,int slot,ddi_dma_cookie_t * dmacookie,int frags,uint64_t flags)1103 sfe_tx_desc_write(struct gem_dev *dp, int slot,
1104     ddi_dma_cookie_t *dmacookie, int frags, uint64_t flags)
1105 {
1106 	uint32_t		mark;
1107 	struct sfe_desc		*tdp;
1108 	ddi_dma_cookie_t	*dcp;
1109 	uint32_t		tmp0;
1110 #if DEBUG_LEVEL > 2
1111 	int			i;
1112 
1113 	cmn_err(CE_CONT,
1114 	    CONS "%s: time:%d %s seqnum: %d, slot %d, frags: %d flags: %llx",
1115 	    dp->name, ddi_get_lbolt(), __func__,
1116 	    dp->tx_desc_tail, slot, frags, flags);
1117 
1118 	for (i = 0; i < frags; i++) {
1119 		cmn_err(CE_CONT, CONS "%d: addr: 0x%x, len: 0x%x",
1120 		    i, dmacookie[i].dmac_address, dmacookie[i].dmac_size);
1121 	}
1122 #endif
1123 	/*
1124 	 * write tx descriptor in reversed order.
1125 	 */
1126 #if DEBUG_LEVEL > 3
1127 	flags |= GEM_TXFLAG_INTR;
1128 #endif
1129 	mark = (flags & GEM_TXFLAG_INTR)
1130 	    ? (CMDSTS_OWN | CMDSTS_INTR) : CMDSTS_OWN;
1131 
1132 	ASSERT(frags == 1);
1133 	dcp = &dmacookie[0];
1134 	if (flags & GEM_TXFLAG_HEAD) {
1135 		mark &= ~CMDSTS_OWN;
1136 	}
1137 
1138 	tdp = (void *)&dp->tx_ring[SFE_DESC_SIZE * slot];
1139 	tmp0 = (uint32_t)dcp->dmac_address;
1140 	mark |= (uint32_t)dcp->dmac_size;
1141 	tdp->d_bufptr = LE_32(tmp0);
1142 	tdp->d_cmdsts = LE_32(mark);
1143 
1144 	return (frags);
1145 }
1146 
1147 static void
sfe_tx_start(struct gem_dev * dp,int start_slot,int nslot)1148 sfe_tx_start(struct gem_dev *dp, int start_slot, int nslot)
1149 {
1150 	uint_t			tx_ring_size = dp->gc.gc_tx_ring_size;
1151 	struct sfe_desc		*tdp;
1152 	struct sfe_dev		*lp = dp->private;
1153 
1154 	if (nslot > 1) {
1155 		gem_tx_desc_dma_sync(dp,
1156 		    SLOT(start_slot + 1, tx_ring_size),
1157 		    nslot - 1, DDI_DMA_SYNC_FORDEV);
1158 	}
1159 
1160 	tdp = (void *)&dp->tx_ring[SFE_DESC_SIZE * start_slot];
1161 	tdp->d_cmdsts |= LE_32(CMDSTS_OWN);
1162 
1163 	gem_tx_desc_dma_sync(dp, start_slot, 1, DDI_DMA_SYNC_FORDEV);
1164 
1165 	/*
1166 	 * Let the Transmit Buffer Manager Fill state machine active.
1167 	 */
1168 	if (dp->mac_active) {
1169 		OUTL(dp, CR, lp->cr | CR_TXE);
1170 	}
1171 }
1172 
1173 static void
sfe_rx_desc_write(struct gem_dev * dp,int slot,ddi_dma_cookie_t * dmacookie,int frags)1174 sfe_rx_desc_write(struct gem_dev *dp, int slot,
1175     ddi_dma_cookie_t *dmacookie, int frags)
1176 {
1177 	struct sfe_desc		*rdp;
1178 	uint32_t		tmp0;
1179 	uint32_t		tmp1;
1180 #if DEBUG_LEVEL > 2
1181 	int			i;
1182 
1183 	ASSERT(frags == 1);
1184 
1185 	cmn_err(CE_CONT, CONS
1186 	    "%s: %s seqnum: %d, slot %d, frags: %d",
1187 	    dp->name, __func__, dp->rx_active_tail, slot, frags);
1188 	for (i = 0; i < frags; i++) {
1189 		cmn_err(CE_CONT, CONS "  frag: %d addr: 0x%llx, len: 0x%lx",
1190 		    i, dmacookie[i].dmac_address, dmacookie[i].dmac_size);
1191 	}
1192 #endif
1193 	/* for the last slot of the packet */
1194 	rdp = (void *)&dp->rx_ring[SFE_DESC_SIZE * slot];
1195 
1196 	tmp0 = (uint32_t)dmacookie->dmac_address;
1197 	tmp1 = CMDSTS_INTR | (uint32_t)dmacookie->dmac_size;
1198 	rdp->d_bufptr = LE_32(tmp0);
1199 	rdp->d_cmdsts = LE_32(tmp1);
1200 }
1201 
1202 static uint_t
sfe_tx_desc_stat(struct gem_dev * dp,int slot,int ndesc)1203 sfe_tx_desc_stat(struct gem_dev *dp, int slot, int ndesc)
1204 {
1205 	uint_t			tx_ring_size = dp->gc.gc_tx_ring_size;
1206 	struct sfe_desc		*tdp;
1207 	uint32_t		status;
1208 	int			cols;
1209 	struct sfe_dev		*lp = dp->private;
1210 #ifdef DEBUG_LEVEL
1211 	int			i;
1212 	clock_t			delay;
1213 #endif
1214 	/* check status of the last descriptor */
1215 	tdp = (void *)
1216 	    &dp->tx_ring[SFE_DESC_SIZE * SLOT(slot + ndesc - 1, tx_ring_size)];
1217 
1218 	/*
1219 	 * Don't use LE_32() directly to refer tdp->d_cmdsts.
1220 	 * It is not atomic for big endian cpus.
1221 	 */
1222 	status = tdp->d_cmdsts;
1223 	status = LE_32(status);
1224 
1225 	DPRINTF(2, (CE_CONT, CONS "%s: time:%ld %s: slot:%d, status:0x%b",
1226 	    dp->name, ddi_get_lbolt(), __func__,
1227 	    slot, status, TXSTAT_BITS));
1228 
1229 	if (status & CMDSTS_OWN) {
1230 		/*
1231 		 * not yet transmitted
1232 		 */
1233 		/* workaround for tx hang */
1234 		if (lp->chip->chip_type == CHIPTYPE_DP83815 &&
1235 		    dp->mac_active) {
1236 			OUTL(dp, CR, lp->cr | CR_TXE);
1237 		}
1238 		return (0);
1239 	}
1240 
1241 	if (status & CMDSTS_MORE) {
1242 		/* XXX - the hardware problem but don't panic the system */
1243 		/* avoid lint bug for %b format string including 32nd bit */
1244 		cmn_err(CE_NOTE, CONS
1245 		    "%s: tx status bits incorrect:  slot:%d, status:0x%x",
1246 		    dp->name, slot, status);
1247 	}
1248 
1249 #if DEBUG_LEVEL > 3
1250 	delay = (ddi_get_lbolt() - dp->tx_buf_head->txb_stime) * 10;
1251 	if (delay >= 50) {
1252 		DPRINTF(0, (CE_NOTE, "%s: tx deferred %d mS: slot %d",
1253 		    dp->name, delay, slot));
1254 	}
1255 #endif
1256 
1257 #if DEBUG_LEVEL > 3
1258 	for (i = 0; i < nfrag-1; i++) {
1259 		uint32_t	s;
1260 		int		n;
1261 
1262 		n = SLOT(slot + i, tx_ring_size);
1263 		s = LE_32(
1264 		    ((struct sfe_desc *)((void *)
1265 		    &dp->tx_ring[SFE_DESC_SIZE * n]))->d_cmdsts);
1266 
1267 		ASSERT(s & CMDSTS_MORE);
1268 		ASSERT((s & CMDSTS_OWN) == 0);
1269 	}
1270 #endif
1271 
1272 	/*
1273 	 *  collect statistics
1274 	 */
1275 	if ((status & CMDSTS_OK) == 0) {
1276 
1277 		/* failed to transmit the packet */
1278 
1279 		DPRINTF(0, (CE_CONT, CONS "%s: Transmit error, Tx status %b",
1280 		    dp->name, status, TXSTAT_BITS));
1281 
1282 		dp->stats.errxmt++;
1283 
1284 		if (status & CMDSTS_TFU) {
1285 			dp->stats.underflow++;
1286 		} else if (status & CMDSTS_CRS) {
1287 			dp->stats.nocarrier++;
1288 		} else if (status & CMDSTS_OWC) {
1289 			dp->stats.xmtlatecoll++;
1290 		} else if ((!dp->full_duplex) && (status & CMDSTS_EC)) {
1291 			dp->stats.excoll++;
1292 			dp->stats.collisions += 16;
1293 		} else {
1294 			dp->stats.xmit_internal_err++;
1295 		}
1296 	} else if (!dp->full_duplex) {
1297 		cols = (status >> CMDSTS_CCNT_SHIFT) & CCNT_MASK;
1298 
1299 		if (cols > 0) {
1300 			if (cols == 1) {
1301 				dp->stats.first_coll++;
1302 			} else /* (cols > 1) */ {
1303 				dp->stats.multi_coll++;
1304 			}
1305 			dp->stats.collisions += cols;
1306 		} else if (status & CMDSTS_TD) {
1307 			dp->stats.defer++;
1308 		}
1309 	}
1310 	return (GEM_TX_DONE);
1311 }
1312 
1313 static uint64_t
sfe_rx_desc_stat(struct gem_dev * dp,int slot,int ndesc)1314 sfe_rx_desc_stat(struct gem_dev *dp, int slot, int ndesc)
1315 {
1316 	struct sfe_desc		*rdp;
1317 	uint_t			len;
1318 	uint_t			flag;
1319 	uint32_t		status;
1320 
1321 	flag = GEM_RX_DONE;
1322 
1323 	/* Dont read ISR because we cannot ack only to rx interrupt. */
1324 
1325 	rdp = (void *)&dp->rx_ring[SFE_DESC_SIZE * slot];
1326 
1327 	/*
1328 	 * Don't use LE_32() directly to refer rdp->d_cmdsts.
1329 	 * It is not atomic for big endian cpus.
1330 	 */
1331 	status = rdp->d_cmdsts;
1332 	status = LE_32(status);
1333 
1334 	DPRINTF(2, (CE_CONT, CONS "%s: time:%ld %s: slot:%d, status:0x%b",
1335 	    dp->name, ddi_get_lbolt(), __func__,
1336 	    slot, status, RXSTAT_BITS));
1337 
1338 	if ((status & CMDSTS_OWN) == 0) {
1339 		/*
1340 		 * No more received packets because
1341 		 * this buffer is owned by NIC.
1342 		 */
1343 		return (0);
1344 	}
1345 
1346 #define	RX_ERR_BITS \
1347 	(CMDSTS_RXA | CMDSTS_RXO | CMDSTS_LONG | CMDSTS_RUNT | \
1348 		CMDSTS_ISE | CMDSTS_CRCE | CMDSTS_FAE | CMDSTS_MORE)
1349 
1350 	if (status & RX_ERR_BITS) {
1351 		/*
1352 		 * Packet with error received
1353 		 */
1354 		DPRINTF(0, (CE_CONT, CONS "%s: Corrupted packet "
1355 		    "received, buffer status: %b",
1356 		    dp->name, status, RXSTAT_BITS));
1357 
1358 		/* collect statistics information */
1359 		dp->stats.errrcv++;
1360 
1361 		if (status & CMDSTS_RXO) {
1362 			dp->stats.overflow++;
1363 		} else if (status & (CMDSTS_LONG | CMDSTS_MORE)) {
1364 			dp->stats.frame_too_long++;
1365 		} else if (status & CMDSTS_RUNT) {
1366 			dp->stats.runt++;
1367 		} else if (status & (CMDSTS_ISE | CMDSTS_FAE)) {
1368 			dp->stats.frame++;
1369 		} else if (status & CMDSTS_CRCE) {
1370 			dp->stats.crc++;
1371 		} else {
1372 			dp->stats.rcv_internal_err++;
1373 		}
1374 
1375 		return (flag | GEM_RX_ERR);
1376 	}
1377 
1378 	/*
1379 	 * this packet was received without errors
1380 	 */
1381 	if ((len = (status & CMDSTS_SIZE)) >= ETHERFCSL) {
1382 		len -= ETHERFCSL;
1383 	}
1384 
1385 #if DEBUG_LEVEL > 10
1386 {
1387 	int	i;
1388 	uint8_t	*bp = dp->rx_buf_head->rxb_buf;
1389 
1390 	cmn_err(CE_CONT, CONS "%s: len:%d", dp->name, len);
1391 
1392 	for (i = 0; i < 60; i += 10) {
1393 		cmn_err(CE_CONT, CONS
1394 		    "%02x %02x %02x %02x %02x %02x %02x %02x %02x %02x",
1395 		    bp[0], bp[1], bp[2], bp[3], bp[4],
1396 		    bp[5], bp[6], bp[7], bp[8], bp[9]);
1397 	}
1398 	bp += 10;
1399 }
1400 #endif
1401 	return (flag | (len & GEM_RX_LEN));
1402 }
1403 
1404 static void
sfe_tx_desc_init(struct gem_dev * dp,int slot)1405 sfe_tx_desc_init(struct gem_dev *dp, int slot)
1406 {
1407 	uint_t			tx_ring_size = dp->gc.gc_tx_ring_size;
1408 	struct sfe_desc		*tdp;
1409 	uint32_t		here;
1410 
1411 	tdp = (void *)&dp->tx_ring[SFE_DESC_SIZE * slot];
1412 
1413 	/* don't clear d_link field, which have a valid pointer */
1414 	tdp->d_cmdsts = 0;
1415 
1416 	/* make a link to this from the previous descriptor */
1417 	here = ((uint32_t)dp->tx_ring_dma) + SFE_DESC_SIZE*slot;
1418 
1419 	tdp = (void *)
1420 	    &dp->tx_ring[SFE_DESC_SIZE * SLOT(slot - 1, tx_ring_size)];
1421 	tdp->d_link = LE_32(here);
1422 }
1423 
1424 static void
sfe_rx_desc_init(struct gem_dev * dp,int slot)1425 sfe_rx_desc_init(struct gem_dev *dp, int slot)
1426 {
1427 	uint_t			rx_ring_size = dp->gc.gc_rx_ring_size;
1428 	struct sfe_desc		*rdp;
1429 	uint32_t		here;
1430 
1431 	rdp = (void *)&dp->rx_ring[SFE_DESC_SIZE * slot];
1432 
1433 	/* don't clear d_link field, which have a valid pointer */
1434 	rdp->d_cmdsts = LE_32(CMDSTS_OWN);
1435 
1436 	/* make a link to this from the previous descriptor */
1437 	here = ((uint32_t)dp->rx_ring_dma) + SFE_DESC_SIZE*slot;
1438 
1439 	rdp = (void *)
1440 	    &dp->rx_ring[SFE_DESC_SIZE * SLOT(slot - 1, rx_ring_size)];
1441 	rdp->d_link = LE_32(here);
1442 }
1443 
1444 static void
sfe_tx_desc_clean(struct gem_dev * dp,int slot)1445 sfe_tx_desc_clean(struct gem_dev *dp, int slot)
1446 {
1447 	struct sfe_desc		*tdp;
1448 
1449 	tdp = (void *)&dp->tx_ring[SFE_DESC_SIZE * slot];
1450 	tdp->d_cmdsts = 0;
1451 }
1452 
1453 static void
sfe_rx_desc_clean(struct gem_dev * dp,int slot)1454 sfe_rx_desc_clean(struct gem_dev *dp, int slot)
1455 {
1456 	struct sfe_desc		*rdp;
1457 
1458 	rdp = (void *)&dp->rx_ring[SFE_DESC_SIZE * slot];
1459 	rdp->d_cmdsts = LE_32(CMDSTS_OWN);
1460 }
1461 
1462 /*
1463  * Device depend interrupt handler
1464  */
1465 static uint_t
sfe_interrupt(struct gem_dev * dp)1466 sfe_interrupt(struct gem_dev *dp)
1467 {
1468 	uint_t		rx_ring_size = dp->gc.gc_rx_ring_size;
1469 	uint32_t	isr;
1470 	uint32_t	isr_bogus;
1471 	uint_t		flags = 0;
1472 	boolean_t	need_to_reset = B_FALSE;
1473 	struct sfe_dev	*lp = dp->private;
1474 
1475 	/* read reason and clear interrupt */
1476 	isr = INL(dp, ISR);
1477 
1478 	isr_bogus = lp->isr_pended;
1479 	lp->isr_pended = 0;
1480 
1481 	if (((isr | isr_bogus) & lp->our_intr_bits) == 0) {
1482 		/* we are not the interrupt source */
1483 		return (DDI_INTR_UNCLAIMED);
1484 	}
1485 
1486 	DPRINTF(3, (CE_CONT,
1487 	    CONS "%s: time:%ld %s:called: isr:0x%b rx_active_head: %d",
1488 	    dp->name, ddi_get_lbolt(), __func__,
1489 	    isr, INTR_BITS, dp->rx_active_head));
1490 
1491 	if (!dp->mac_active) {
1492 		/* the device is going to stop */
1493 		lp->our_intr_bits = 0;
1494 		return (DDI_INTR_CLAIMED);
1495 	}
1496 
1497 	isr &= lp->our_intr_bits;
1498 
1499 	if (isr & (ISR_RXSOVR | ISR_RXORN | ISR_RXIDLE | ISR_RXERR |
1500 	    ISR_RXDESC | ISR_RXOK)) {
1501 		(void) gem_receive(dp);
1502 
1503 		if (isr & (ISR_RXSOVR | ISR_RXORN)) {
1504 			DPRINTF(0, (CE_CONT,
1505 			    CONS "%s: rx fifo overrun: isr %b",
1506 			    dp->name, isr, INTR_BITS));
1507 			/* no need restart rx */
1508 			dp->stats.overflow++;
1509 		}
1510 
1511 		if (isr & ISR_RXIDLE) {
1512 			DPRINTF(0, (CE_CONT,
1513 			    CONS "%s: rx buffer ran out: isr %b",
1514 			    dp->name, isr, INTR_BITS));
1515 
1516 			dp->stats.norcvbuf++;
1517 
1518 			/*
1519 			 * Make RXDP points the head of receive
1520 			 * buffer list.
1521 			 */
1522 			OUTL(dp, RXDP, dp->rx_ring_dma +
1523 			    SFE_DESC_SIZE *
1524 			    SLOT(dp->rx_active_head, rx_ring_size));
1525 
1526 			/* Restart the receive engine */
1527 			OUTL(dp, CR, lp->cr | CR_RXE);
1528 		}
1529 	}
1530 
1531 	if (isr & (ISR_TXURN | ISR_TXERR | ISR_TXDESC |
1532 	    ISR_TXIDLE | ISR_TXOK)) {
1533 		/* need to reclaim tx buffers */
1534 		if (gem_tx_done(dp)) {
1535 			flags |= INTR_RESTART_TX;
1536 		}
1537 		/*
1538 		 * XXX - tx error statistics will be counted in
1539 		 * sfe_tx_desc_stat() and no need to restart tx on errors.
1540 		 */
1541 	}
1542 
1543 	if (isr & (ISR_DPERR | ISR_SSERR | ISR_RMABT | ISR_RTABT)) {
1544 		cmn_err(CE_WARN, "%s: ERROR interrupt: isr %b.",
1545 		    dp->name, isr, INTR_BITS);
1546 		need_to_reset = B_TRUE;
1547 	}
1548 reset:
1549 	if (need_to_reset) {
1550 		(void) gem_restart_nic(dp, GEM_RESTART_KEEP_BUF);
1551 		flags |= INTR_RESTART_TX;
1552 	}
1553 
1554 	DPRINTF(5, (CE_CONT, CONS "%s: %s: return: isr: %b",
1555 	    dp->name, __func__, isr, INTR_BITS));
1556 
1557 	return (DDI_INTR_CLAIMED | flags);
1558 }
1559 
1560 /* ======================================================== */
1561 /*
1562  * HW depend MII routine
1563  */
1564 /* ======================================================== */
1565 
1566 /*
1567  * MII routines for NS DP83815
1568  */
1569 static void
sfe_mii_sync_dp83815(struct gem_dev * dp)1570 sfe_mii_sync_dp83815(struct gem_dev *dp)
1571 {
1572 	/* do nothing */
1573 }
1574 
1575 static uint16_t
sfe_mii_read_dp83815(struct gem_dev * dp,uint_t offset)1576 sfe_mii_read_dp83815(struct gem_dev *dp, uint_t offset)
1577 {
1578 	DPRINTF(4, (CE_CONT, CONS"%s: %s: offset 0x%x",
1579 	    dp->name, __func__, offset));
1580 	return ((uint16_t)INL(dp, MII_REGS_BASE + offset*4));
1581 }
1582 
1583 static void
sfe_mii_write_dp83815(struct gem_dev * dp,uint_t offset,uint16_t val)1584 sfe_mii_write_dp83815(struct gem_dev *dp, uint_t offset, uint16_t val)
1585 {
1586 	DPRINTF(4, (CE_CONT, CONS"%s: %s: offset 0x%x 0x%x",
1587 	    dp->name, __func__, offset, val));
1588 	OUTL(dp, MII_REGS_BASE + offset*4, val);
1589 }
1590 
1591 static int
sfe_mii_config_dp83815(struct gem_dev * dp)1592 sfe_mii_config_dp83815(struct gem_dev *dp)
1593 {
1594 	uint32_t	srr;
1595 
1596 	srr = INL(dp, SRR) & SRR_REV;
1597 
1598 	DPRINTF(0, (CE_CONT, CONS "%s: srr:0x%04x %04x %04x %04x %04x %04x",
1599 	    dp->name, srr,
1600 	    INW(dp, 0x00cc),	/* PGSEL */
1601 	    INW(dp, 0x00e4),	/* PMDCSR */
1602 	    INW(dp, 0x00fc),	/* TSTDAT */
1603 	    INW(dp, 0x00f4),	/* DSPCFG */
1604 	    INW(dp, 0x00f8)));	/* SDCFG */
1605 
1606 	if (srr == SRR_REV_DP83815CVNG) {
1607 		/*
1608 		 * NS datasheet says that DP83815CVNG needs following
1609 		 * registers to be patched for optimizing its performance.
1610 		 * A report said that CRC errors on RX disappeared
1611 		 * with the patch.
1612 		 */
1613 		OUTW(dp, 0x00cc, 0x0001);	/* PGSEL */
1614 		OUTW(dp, 0x00e4, 0x189c);	/* PMDCSR */
1615 		OUTW(dp, 0x00fc, 0x0000);	/* TSTDAT */
1616 		OUTW(dp, 0x00f4, 0x5040);	/* DSPCFG */
1617 		OUTW(dp, 0x00f8, 0x008c);	/* SDCFG */
1618 		OUTW(dp, 0x00cc, 0x0000);	/* PGSEL */
1619 
1620 		DPRINTF(0, (CE_CONT,
1621 		    CONS "%s: PHY patched %04x %04x %04x %04x %04x",
1622 		    dp->name,
1623 		    INW(dp, 0x00cc),	/* PGSEL */
1624 		    INW(dp, 0x00e4),	/* PMDCSR */
1625 		    INW(dp, 0x00fc),	/* TSTDAT */
1626 		    INW(dp, 0x00f4),	/* DSPCFG */
1627 		    INW(dp, 0x00f8)));	/* SDCFG */
1628 	} else if (((srr ^ SRR_REV_DP83815DVNG) & 0xff00) == 0 ||
1629 	    ((srr ^ SRR_REV_DP83816AVNG) & 0xff00) == 0) {
1630 		/*
1631 		 * Additional packets for later chipset
1632 		 */
1633 		OUTW(dp, 0x00cc, 0x0001);	/* PGSEL */
1634 		OUTW(dp, 0x00e4, 0x189c);	/* PMDCSR */
1635 		OUTW(dp, 0x00cc, 0x0000);	/* PGSEL */
1636 
1637 		DPRINTF(0, (CE_CONT,
1638 		    CONS "%s: PHY patched %04x %04x",
1639 		    dp->name,
1640 		    INW(dp, 0x00cc),	/* PGSEL */
1641 		    INW(dp, 0x00e4)));	/* PMDCSR */
1642 	}
1643 
1644 	return (gem_mii_config_default(dp));
1645 }
1646 
1647 static int
sfe_mii_probe_dp83815(struct gem_dev * dp)1648 sfe_mii_probe_dp83815(struct gem_dev *dp)
1649 {
1650 	uint32_t	val;
1651 
1652 	/* try external phy first */
1653 	DPRINTF(0, (CE_CONT, CONS "%s: %s: trying external phy",
1654 	    dp->name, __func__));
1655 	dp->mii_phy_addr = 0;
1656 	dp->gc.gc_mii_sync = &sfe_mii_sync_sis900;
1657 	dp->gc.gc_mii_read = &sfe_mii_read_sis900;
1658 	dp->gc.gc_mii_write = &sfe_mii_write_sis900;
1659 
1660 	val = INL(dp, CFG) & (CFG_ANEG_SEL | CFG_PHY_CFG);
1661 	OUTL(dp, CFG, val | CFG_EXT_PHY | CFG_PHY_DIS);
1662 
1663 	if (gem_mii_probe_default(dp) == GEM_SUCCESS) {
1664 		return (GEM_SUCCESS);
1665 	}
1666 
1667 	/* switch to internal phy */
1668 	DPRINTF(0, (CE_CONT, CONS "%s: %s: switching to internal phy",
1669 	    dp->name, __func__));
1670 	dp->mii_phy_addr = -1;
1671 	dp->gc.gc_mii_sync = &sfe_mii_sync_dp83815;
1672 	dp->gc.gc_mii_read = &sfe_mii_read_dp83815;
1673 	dp->gc.gc_mii_write = &sfe_mii_write_dp83815;
1674 
1675 	val = INL(dp, CFG) & (CFG_ANEG_SEL | CFG_PHY_CFG);
1676 	OUTL(dp, CFG, val | CFG_PAUSE_ADV | CFG_PHY_RST);
1677 	drv_usecwait(100);	/* keep to assert RST bit for a while */
1678 	OUTL(dp, CFG, val | CFG_PAUSE_ADV);
1679 
1680 	/* wait for PHY reset */
1681 	delay(drv_usectohz(10000));
1682 
1683 	return (gem_mii_probe_default(dp));
1684 }
1685 
1686 static int
sfe_mii_init_dp83815(struct gem_dev * dp)1687 sfe_mii_init_dp83815(struct gem_dev *dp)
1688 {
1689 	uint32_t	val;
1690 
1691 	val = INL(dp, CFG) & (CFG_ANEG_SEL | CFG_PHY_CFG);
1692 
1693 	if (dp->mii_phy_addr == -1) {
1694 		/* select internal phy */
1695 		OUTL(dp, CFG, val | CFG_PAUSE_ADV);
1696 	} else {
1697 		/* select external phy */
1698 		OUTL(dp, CFG, val | CFG_EXT_PHY | CFG_PHY_DIS);
1699 	}
1700 
1701 	return (GEM_SUCCESS);
1702 }
1703 
1704 /*
1705  * MII routines for SiS900
1706  */
1707 #define	MDIO_DELAY(dp)	{(void) INL(dp, MEAR); (void) INL(dp, MEAR); }
1708 static void
sfe_mii_sync_sis900(struct gem_dev * dp)1709 sfe_mii_sync_sis900(struct gem_dev *dp)
1710 {
1711 	int	i;
1712 
1713 	/* send 32 ONE's to make MII line idle */
1714 	for (i = 0; i < 32; i++) {
1715 		OUTL(dp, MEAR, MEAR_MDDIR | MEAR_MDIO);
1716 		MDIO_DELAY(dp);
1717 		OUTL(dp, MEAR, MEAR_MDDIR | MEAR_MDIO | MEAR_MDC);
1718 		MDIO_DELAY(dp);
1719 	}
1720 }
1721 
1722 static int
sfe_mii_config_sis900(struct gem_dev * dp)1723 sfe_mii_config_sis900(struct gem_dev *dp)
1724 {
1725 	struct sfe_dev	*lp = dp->private;
1726 
1727 	/* Do chip depend setup */
1728 	if ((dp->mii_phy_id & PHY_MASK) == PHY_ICS1893) {
1729 		/* workaround for ICS1893 PHY */
1730 		gem_mii_write(dp, 0x0018, 0xD200);
1731 	}
1732 
1733 	if (lp->revid == SIS630E_900_REV) {
1734 		/*
1735 		 * SiS 630E has bugs on default values
1736 		 * of PHY registers
1737 		 */
1738 		gem_mii_write(dp, MII_AN_ADVERT, 0x05e1);
1739 		gem_mii_write(dp, MII_CONFIG1, 0x0022);
1740 		gem_mii_write(dp, MII_CONFIG2, 0xff00);
1741 		gem_mii_write(dp, MII_MASK,    0xffc0);
1742 	}
1743 	sfe_set_eq_sis630(dp);
1744 
1745 	return (gem_mii_config_default(dp));
1746 }
1747 
1748 static uint16_t
sfe_mii_read_sis900(struct gem_dev * dp,uint_t reg)1749 sfe_mii_read_sis900(struct gem_dev *dp, uint_t reg)
1750 {
1751 	uint32_t	cmd;
1752 	uint16_t	ret;
1753 	int		i;
1754 	uint32_t	data;
1755 
1756 	cmd = MII_READ_CMD(dp->mii_phy_addr, reg);
1757 
1758 	for (i = 31; i >= 18; i--) {
1759 		data = ((cmd >> i) & 1) <<  MEAR_MDIO_SHIFT;
1760 		OUTL(dp, MEAR, data | MEAR_MDDIR);
1761 		MDIO_DELAY(dp);
1762 		OUTL(dp, MEAR, data | MEAR_MDDIR | MEAR_MDC);
1763 		MDIO_DELAY(dp);
1764 	}
1765 
1766 	/* turn around cycle */
1767 	OUTL(dp, MEAR, 0);
1768 	MDIO_DELAY(dp);
1769 
1770 	/* get response from PHY */
1771 	OUTL(dp, MEAR, MEAR_MDC);
1772 	MDIO_DELAY(dp);
1773 
1774 	OUTL(dp, MEAR, 0);
1775 #if DEBUG_LEBEL > 0
1776 	(void) INL(dp, MEAR);	/* delay */
1777 	if (INL(dp, MEAR) & MEAR_MDIO) {
1778 		cmn_err(CE_WARN, "%s: PHY@%d not responded",
1779 		    dp->name, dp->mii_phy_addr);
1780 	}
1781 #else
1782 	MDIO_DELAY(dp);
1783 #endif
1784 	/* terminate response cycle */
1785 	OUTL(dp, MEAR, MEAR_MDC);
1786 	MDIO_DELAY(dp);
1787 
1788 	ret = 0;	/* to avoid lint errors */
1789 	for (i = 16; i > 0; i--) {
1790 		OUTL(dp, MEAR, 0);
1791 		(void) INL(dp, MEAR);	/* delay */
1792 		ret = (ret << 1) | ((INL(dp, MEAR) >> MEAR_MDIO_SHIFT) & 1);
1793 		OUTL(dp, MEAR, MEAR_MDC);
1794 		MDIO_DELAY(dp);
1795 	}
1796 
1797 	/* send two idle(Z) bits to terminate the read cycle */
1798 	for (i = 0; i < 2; i++) {
1799 		OUTL(dp, MEAR, 0);
1800 		MDIO_DELAY(dp);
1801 		OUTL(dp, MEAR, MEAR_MDC);
1802 		MDIO_DELAY(dp);
1803 	}
1804 
1805 	return (ret);
1806 }
1807 
1808 static void
sfe_mii_write_sis900(struct gem_dev * dp,uint_t reg,uint16_t val)1809 sfe_mii_write_sis900(struct gem_dev *dp, uint_t reg, uint16_t val)
1810 {
1811 	uint32_t	cmd;
1812 	int		i;
1813 	uint32_t	data;
1814 
1815 	cmd = MII_WRITE_CMD(dp->mii_phy_addr, reg, val);
1816 
1817 	for (i = 31; i >= 0; i--) {
1818 		data = ((cmd >> i) & 1) << MEAR_MDIO_SHIFT;
1819 		OUTL(dp, MEAR, data | MEAR_MDDIR);
1820 		MDIO_DELAY(dp);
1821 		OUTL(dp, MEAR, data | MEAR_MDDIR | MEAR_MDC);
1822 		MDIO_DELAY(dp);
1823 	}
1824 
1825 	/* send two idle(Z) bits to terminate the write cycle. */
1826 	for (i = 0; i < 2; i++) {
1827 		OUTL(dp, MEAR, 0);
1828 		MDIO_DELAY(dp);
1829 		OUTL(dp, MEAR, MEAR_MDC);
1830 		MDIO_DELAY(dp);
1831 	}
1832 }
1833 #undef MDIO_DELAY
1834 
1835 static void
sfe_set_eq_sis630(struct gem_dev * dp)1836 sfe_set_eq_sis630(struct gem_dev *dp)
1837 {
1838 	uint16_t	reg14h;
1839 	uint16_t	eq_value;
1840 	uint16_t	max_value;
1841 	uint16_t	min_value;
1842 	int		i;
1843 	uint8_t		rev;
1844 	struct sfe_dev	*lp = dp->private;
1845 
1846 	rev = lp->revid;
1847 
1848 	if (!(rev == SIS630E_900_REV || rev == SIS630EA1_900_REV ||
1849 	    rev == SIS630A_900_REV || rev == SIS630ET_900_REV)) {
1850 		/* it doesn't have a internal PHY */
1851 		return;
1852 	}
1853 
1854 	if (dp->mii_state == MII_STATE_LINKUP) {
1855 		reg14h = gem_mii_read(dp, MII_RESV);
1856 		gem_mii_write(dp, MII_RESV, (0x2200 | reg14h) & 0xBFFF);
1857 
1858 		eq_value = (0x00f8 & gem_mii_read(dp, MII_RESV)) >> 3;
1859 		max_value = min_value = eq_value;
1860 		for (i = 1; i < 10; i++) {
1861 			eq_value = (0x00f8 & gem_mii_read(dp, MII_RESV)) >> 3;
1862 			max_value = max(eq_value, max_value);
1863 			min_value = min(eq_value, min_value);
1864 		}
1865 
1866 		/* for 630E, rule to determine the equalizer value */
1867 		if (rev == SIS630E_900_REV || rev == SIS630EA1_900_REV ||
1868 		    rev == SIS630ET_900_REV) {
1869 			if (max_value < 5) {
1870 				eq_value = max_value;
1871 			} else if (5 <= max_value && max_value < 15) {
1872 				eq_value =
1873 				    max(max_value + 1,
1874 				    min_value + 2);
1875 			} else if (15 <= max_value) {
1876 				eq_value =
1877 				    max(max_value + 5,
1878 				    min_value + 6);
1879 			}
1880 		}
1881 		/* for 630B0&B1, rule to determine the equalizer value */
1882 		else
1883 		if (rev == SIS630A_900_REV &&
1884 		    (lp->bridge_revid == SIS630B0 ||
1885 		    lp->bridge_revid == SIS630B1)) {
1886 
1887 			if (max_value == 0) {
1888 				eq_value = 3;
1889 			} else {
1890 				eq_value = (max_value + min_value + 1)/2;
1891 			}
1892 		}
1893 		/* write equalizer value and setting */
1894 		reg14h = gem_mii_read(dp, MII_RESV) & ~0x02f8;
1895 		reg14h |= 0x6000 | (eq_value << 3);
1896 		gem_mii_write(dp, MII_RESV, reg14h);
1897 	} else {
1898 		reg14h = (gem_mii_read(dp, MII_RESV) & ~0x4000) | 0x2000;
1899 		if (rev == SIS630A_900_REV &&
1900 		    (lp->bridge_revid == SIS630B0 ||
1901 		    lp->bridge_revid == SIS630B1)) {
1902 
1903 			reg14h |= 0x0200;
1904 		}
1905 		gem_mii_write(dp, MII_RESV, reg14h);
1906 	}
1907 }
1908 
1909 /* ======================================================== */
1910 /*
1911  * OS depend (device driver) routine
1912  */
1913 /* ======================================================== */
1914 static void
sfe_chipinfo_init_sis900(struct gem_dev * dp)1915 sfe_chipinfo_init_sis900(struct gem_dev *dp)
1916 {
1917 	int		rev;
1918 	struct sfe_dev	*lp = (struct sfe_dev *)dp->private;
1919 
1920 	rev = lp->revid;
1921 
1922 	if (rev == SIS962_900_REV /* 0x91 */) {
1923 		/* sis962 or later */
1924 		lp->get_mac_addr = &sfe_get_mac_addr_sis962;
1925 	} else {
1926 		/* sis900 */
1927 		lp->get_mac_addr = &sfe_get_mac_addr_sis900;
1928 	}
1929 
1930 	lp->bridge_revid = 0;
1931 
1932 	if (rev == SIS630E_900_REV || rev == SIS630EA1_900_REV ||
1933 	    rev == SIS630A_900_REV || rev ==  SIS630ET_900_REV) {
1934 		/*
1935 		 * read host bridge revision
1936 		 */
1937 		dev_info_t	*bridge;
1938 		ddi_acc_handle_t bridge_handle;
1939 
1940 		if ((bridge = sfe_search_pci_dev(0x1039, 0x630)) == NULL) {
1941 			cmn_err(CE_WARN,
1942 			    "%s: cannot find host bridge (pci1039,630)",
1943 			    dp->name);
1944 			return;
1945 		}
1946 
1947 		if (pci_config_setup(bridge, &bridge_handle) != DDI_SUCCESS) {
1948 			cmn_err(CE_WARN, "%s: pci_config_setup failed",
1949 			    dp->name);
1950 			return;
1951 		}
1952 
1953 		lp->bridge_revid =
1954 		    pci_config_get8(bridge_handle, PCI_CONF_REVID);
1955 		pci_config_teardown(&bridge_handle);
1956 	}
1957 }
1958 
1959 static int
sfe_attach_chip(struct gem_dev * dp)1960 sfe_attach_chip(struct gem_dev *dp)
1961 {
1962 	struct sfe_dev		*lp = (struct sfe_dev *)dp->private;
1963 
1964 	DPRINTF(4, (CE_CONT, CONS "!%s: %s called", dp->name, __func__));
1965 
1966 	/* setup chip-depend get_mac_address function */
1967 	if (lp->chip->chip_type == CHIPTYPE_SIS900) {
1968 		sfe_chipinfo_init_sis900(dp);
1969 	} else {
1970 		lp->get_mac_addr = &sfe_get_mac_addr_dp83815;
1971 	}
1972 
1973 	/* read MAC address */
1974 	if (!(lp->get_mac_addr)(dp)) {
1975 		cmn_err(CE_WARN,
1976 		    "!%s: %s: failed to get factory mac address"
1977 		    " please specify a mac address in sfe.conf",
1978 		    dp->name, __func__);
1979 		return (GEM_FAILURE);
1980 	}
1981 
1982 	if (lp->chip->chip_type == CHIPTYPE_DP83815) {
1983 		dp->mii_phy_addr = -1;	/* no need to scan PHY */
1984 		dp->misc_flag |= GEM_VLAN_SOFT;
1985 		dp->txthr += 4; /* VTAG_SIZE */
1986 	}
1987 	dp->txthr = min(dp->txthr, TXFIFOSIZE - 2);
1988 
1989 	return (GEM_SUCCESS);
1990 }
1991 
1992 static int
sfeattach(dev_info_t * dip,ddi_attach_cmd_t cmd)1993 sfeattach(dev_info_t *dip, ddi_attach_cmd_t cmd)
1994 {
1995 	int			unit;
1996 	const char		*drv_name;
1997 	int			i;
1998 	ddi_acc_handle_t	conf_handle;
1999 	uint16_t		vid;
2000 	uint16_t		did;
2001 	uint8_t			rev;
2002 #ifdef DEBUG_LEVEL
2003 	uint32_t		iline;
2004 	uint8_t			latim;
2005 #endif
2006 	struct chip_info	*p;
2007 	struct gem_dev		*dp;
2008 	struct sfe_dev		*lp;
2009 	caddr_t			base;
2010 	ddi_acc_handle_t	regs_ha;
2011 	struct gem_conf		*gcp;
2012 
2013 	unit = ddi_get_instance(dip);
2014 	drv_name = ddi_driver_name(dip);
2015 
2016 	DPRINTF(3, (CE_CONT, CONS "%s%d: sfeattach: called", drv_name, unit));
2017 
2018 	/*
2019 	 * Common codes after power-up
2020 	 */
2021 	if (pci_config_setup(dip, &conf_handle) != DDI_SUCCESS) {
2022 		cmn_err(CE_WARN, "%s%d: ddi_regs_map_setup failed",
2023 		    drv_name, unit);
2024 		goto err;
2025 	}
2026 
2027 	vid  = pci_config_get16(conf_handle, PCI_CONF_VENID);
2028 	did  = pci_config_get16(conf_handle, PCI_CONF_DEVID);
2029 	rev  = pci_config_get16(conf_handle, PCI_CONF_REVID);
2030 #ifdef DEBUG_LEVEL
2031 	iline = pci_config_get32(conf_handle, PCI_CONF_ILINE);
2032 	latim = pci_config_get8(conf_handle, PCI_CONF_LATENCY_TIMER);
2033 #endif
2034 #ifdef DEBUG_BUILT_IN_SIS900
2035 	rev  = SIS630E_900_REV;
2036 #endif
2037 	for (i = 0, p = sfe_chiptbl; i < CHIPTABLESIZE; i++, p++) {
2038 		if (p->venid == vid && p->devid == did) {
2039 			/* found */
2040 			goto chip_found;
2041 		}
2042 	}
2043 
2044 	/* Not found */
2045 	cmn_err(CE_WARN,
2046 	    "%s%d: sfe_attach: wrong PCI venid/devid (0x%x, 0x%x)",
2047 	    drv_name, unit, vid, did);
2048 	pci_config_teardown(&conf_handle);
2049 	goto err;
2050 
2051 chip_found:
2052 	pci_config_put16(conf_handle, PCI_CONF_COMM,
2053 	    PCI_COMM_IO | PCI_COMM_MAE | PCI_COMM_ME |
2054 	    pci_config_get16(conf_handle, PCI_CONF_COMM));
2055 
2056 	/* ensure D0 mode */
2057 	(void) gem_pci_set_power_state(dip, conf_handle, PCI_PMCSR_D0);
2058 
2059 	pci_config_teardown(&conf_handle);
2060 
2061 	switch (cmd) {
2062 	case DDI_RESUME:
2063 		return (gem_resume(dip));
2064 
2065 	case DDI_ATTACH:
2066 
2067 		DPRINTF(0, (CE_CONT,
2068 		    CONS "%s%d: ilr 0x%08x, latency_timer:0x%02x",
2069 		    drv_name, unit, iline, latim));
2070 
2071 		/*
2072 		 * Map in the device registers.
2073 		 */
2074 		if (gem_pci_regs_map_setup(dip,
2075 		    (sfe_use_pcimemspace && p->chip_type == CHIPTYPE_DP83815)
2076 		    ? PCI_ADDR_MEM32 : PCI_ADDR_IO, PCI_ADDR_MASK,
2077 		    &sfe_dev_attr, &base, &regs_ha) != DDI_SUCCESS) {
2078 			cmn_err(CE_WARN,
2079 			    "%s%d: ddi_regs_map_setup failed",
2080 			    drv_name, unit);
2081 			goto err;
2082 		}
2083 
2084 		/*
2085 		 * construct gem configuration
2086 		 */
2087 		gcp = kmem_zalloc(sizeof (*gcp), KM_SLEEP);
2088 
2089 		/* name */
2090 		(void) sprintf(gcp->gc_name, "%s%d", drv_name, unit);
2091 
2092 		/* consistency on tx and rx */
2093 		gcp->gc_tx_buf_align = sizeof (uint8_t) - 1;
2094 		gcp->gc_tx_max_frags = MAXTXFRAGS;
2095 		gcp->gc_tx_max_descs_per_pkt = gcp->gc_tx_max_frags;
2096 		gcp->gc_tx_desc_unit_shift = 4;	/* 16 byte */
2097 		gcp->gc_tx_buf_size  = TX_BUF_SIZE;
2098 		gcp->gc_tx_buf_limit = gcp->gc_tx_buf_size;
2099 		gcp->gc_tx_ring_size = TX_RING_SIZE;
2100 		gcp->gc_tx_ring_limit = gcp->gc_tx_ring_size;
2101 		gcp->gc_tx_auto_pad  = B_TRUE;
2102 		gcp->gc_tx_copy_thresh = sfe_tx_copy_thresh;
2103 		gcp->gc_tx_desc_write_oo = B_TRUE;
2104 
2105 		gcp->gc_rx_buf_align = sizeof (uint8_t) - 1;
2106 		gcp->gc_rx_max_frags = MAXRXFRAGS;
2107 		gcp->gc_rx_desc_unit_shift = 4;
2108 		gcp->gc_rx_ring_size = RX_RING_SIZE;
2109 		gcp->gc_rx_buf_max   = RX_BUF_SIZE;
2110 		gcp->gc_rx_copy_thresh = sfe_rx_copy_thresh;
2111 
2112 		/* map attributes */
2113 		gcp->gc_dev_attr = sfe_dev_attr;
2114 		gcp->gc_buf_attr = sfe_buf_attr;
2115 		gcp->gc_desc_attr = sfe_buf_attr;
2116 
2117 		/* dma attributes */
2118 		gcp->gc_dma_attr_desc = sfe_dma_attr_desc;
2119 
2120 		gcp->gc_dma_attr_txbuf = sfe_dma_attr_buf;
2121 		gcp->gc_dma_attr_txbuf.dma_attr_align = gcp->gc_tx_buf_align+1;
2122 		gcp->gc_dma_attr_txbuf.dma_attr_sgllen = gcp->gc_tx_max_frags;
2123 
2124 		gcp->gc_dma_attr_rxbuf = sfe_dma_attr_buf;
2125 		gcp->gc_dma_attr_rxbuf.dma_attr_align = gcp->gc_rx_buf_align+1;
2126 		gcp->gc_dma_attr_rxbuf.dma_attr_sgllen = gcp->gc_rx_max_frags;
2127 
2128 		/* time out parameters */
2129 		gcp->gc_tx_timeout = 3*ONESEC;
2130 		gcp->gc_tx_timeout_interval = ONESEC;
2131 		if (p->chip_type == CHIPTYPE_DP83815) {
2132 			/* workaround for tx hang */
2133 			gcp->gc_tx_timeout_interval = ONESEC/20; /* 50mS */
2134 		}
2135 
2136 		/* MII timeout parameters */
2137 		gcp->gc_mii_link_watch_interval = ONESEC;
2138 		gcp->gc_mii_an_watch_interval   = ONESEC/5;
2139 		gcp->gc_mii_reset_timeout = MII_RESET_TIMEOUT;	/* 1 sec */
2140 		gcp->gc_mii_an_timeout = MII_AN_TIMEOUT;	/* 5 sec */
2141 		gcp->gc_mii_an_wait = 0;
2142 		gcp->gc_mii_linkdown_timeout = MII_LINKDOWN_TIMEOUT;
2143 
2144 		/* setting for general PHY */
2145 		gcp->gc_mii_an_delay = 0;
2146 		gcp->gc_mii_linkdown_action = MII_ACTION_RSA;
2147 		gcp->gc_mii_linkdown_timeout_action = MII_ACTION_RESET;
2148 		gcp->gc_mii_dont_reset = B_FALSE;
2149 
2150 
2151 		/* I/O methods */
2152 
2153 		/* mac operation */
2154 		gcp->gc_attach_chip = &sfe_attach_chip;
2155 		if (p->chip_type == CHIPTYPE_DP83815) {
2156 			gcp->gc_reset_chip = &sfe_reset_chip_dp83815;
2157 		} else {
2158 			gcp->gc_reset_chip = &sfe_reset_chip_sis900;
2159 		}
2160 		gcp->gc_init_chip  = &sfe_init_chip;
2161 		gcp->gc_start_chip = &sfe_start_chip;
2162 		gcp->gc_stop_chip  = &sfe_stop_chip;
2163 #ifdef USE_MULTICAST_HASHTBL
2164 		gcp->gc_multicast_hash = &sfe_mcast_hash;
2165 #endif
2166 		if (p->chip_type == CHIPTYPE_DP83815) {
2167 			gcp->gc_set_rx_filter = &sfe_set_rx_filter_dp83815;
2168 		} else {
2169 			gcp->gc_set_rx_filter = &sfe_set_rx_filter_sis900;
2170 		}
2171 		gcp->gc_set_media = &sfe_set_media;
2172 		gcp->gc_get_stats = &sfe_get_stats;
2173 		gcp->gc_interrupt = &sfe_interrupt;
2174 
2175 		/* descriptor operation */
2176 		gcp->gc_tx_desc_write = &sfe_tx_desc_write;
2177 		gcp->gc_tx_start = &sfe_tx_start;
2178 		gcp->gc_rx_desc_write = &sfe_rx_desc_write;
2179 		gcp->gc_rx_start = NULL;
2180 
2181 		gcp->gc_tx_desc_stat = &sfe_tx_desc_stat;
2182 		gcp->gc_rx_desc_stat = &sfe_rx_desc_stat;
2183 		gcp->gc_tx_desc_init = &sfe_tx_desc_init;
2184 		gcp->gc_rx_desc_init = &sfe_rx_desc_init;
2185 		gcp->gc_tx_desc_clean = &sfe_tx_desc_clean;
2186 		gcp->gc_rx_desc_clean = &sfe_rx_desc_clean;
2187 
2188 		/* mii operations */
2189 		if (p->chip_type == CHIPTYPE_DP83815) {
2190 			gcp->gc_mii_probe = &sfe_mii_probe_dp83815;
2191 			gcp->gc_mii_init = &sfe_mii_init_dp83815;
2192 			gcp->gc_mii_config = &sfe_mii_config_dp83815;
2193 			gcp->gc_mii_sync = &sfe_mii_sync_dp83815;
2194 			gcp->gc_mii_read = &sfe_mii_read_dp83815;
2195 			gcp->gc_mii_write = &sfe_mii_write_dp83815;
2196 			gcp->gc_mii_tune_phy = NULL;
2197 			gcp->gc_flow_control = FLOW_CONTROL_NONE;
2198 		} else {
2199 			gcp->gc_mii_probe = &gem_mii_probe_default;
2200 			gcp->gc_mii_init = NULL;
2201 			gcp->gc_mii_config = &sfe_mii_config_sis900;
2202 			gcp->gc_mii_sync = &sfe_mii_sync_sis900;
2203 			gcp->gc_mii_read = &sfe_mii_read_sis900;
2204 			gcp->gc_mii_write = &sfe_mii_write_sis900;
2205 			gcp->gc_mii_tune_phy = &sfe_set_eq_sis630;
2206 			gcp->gc_flow_control = FLOW_CONTROL_RX_PAUSE;
2207 		}
2208 
2209 		lp = kmem_zalloc(sizeof (*lp), KM_SLEEP);
2210 		lp->chip = p;
2211 		lp->revid = rev;
2212 		lp->our_intr_bits = 0;
2213 		lp->isr_pended = 0;
2214 
2215 		cmn_err(CE_CONT, CONS "%s%d: chip:%s rev:0x%02x",
2216 		    drv_name, unit, p->chip_name, rev);
2217 
2218 		dp = gem_do_attach(dip, 0, gcp, base, &regs_ha,
2219 		    lp, sizeof (*lp));
2220 		kmem_free(gcp, sizeof (*gcp));
2221 
2222 		if (dp == NULL) {
2223 			goto err_freelp;
2224 		}
2225 
2226 		return (DDI_SUCCESS);
2227 
2228 err_freelp:
2229 		kmem_free(lp, sizeof (struct sfe_dev));
2230 err:
2231 		return (DDI_FAILURE);
2232 	}
2233 	return (DDI_FAILURE);
2234 }
2235 
2236 static int
sfedetach(dev_info_t * dip,ddi_detach_cmd_t cmd)2237 sfedetach(dev_info_t *dip, ddi_detach_cmd_t cmd)
2238 {
2239 	switch (cmd) {
2240 	case DDI_SUSPEND:
2241 		return (gem_suspend(dip));
2242 
2243 	case DDI_DETACH:
2244 		return (gem_do_detach(dip));
2245 	}
2246 	return (DDI_FAILURE);
2247 }
2248 
2249 /*
2250  * quiesce(9E) entry point.
2251  *
2252  * This function is called when the system is single-threaded at high
2253  * PIL with preemption disabled. Therefore, this function must not be
2254  * blocked.
2255  *
2256  * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
2257  * DDI_FAILURE indicates an error condition and should almost never happen.
2258  */
2259 #ifdef	__sparc
2260 #define	sfe_quiesce	ddi_quiesce_not_supported
2261 #else
2262 static int
sfe_quiesce(dev_info_t * dip)2263 sfe_quiesce(dev_info_t *dip)
2264 {
2265 	struct gem_dev	*dp;
2266 	int	ret = 0;
2267 
2268 	dp = GEM_GET_DEV(dip);
2269 
2270 	if (dp == NULL)
2271 		return (DDI_FAILURE);
2272 
2273 	ret = sfe_stop_chip_quiesce(dp);
2274 
2275 	return (ret);
2276 }
2277 #endif
2278 
2279 /* ======================================================== */
2280 /*
2281  * OS depend (loadable streams driver) routine
2282  */
2283 /* ======================================================== */
2284 DDI_DEFINE_STREAM_OPS(sfe_ops, nulldev, nulldev, sfeattach, sfedetach,
2285     nodev, NULL, D_MP, NULL, sfe_quiesce);
2286 
2287 static struct modldrv modldrv = {
2288 	&mod_driverops,	/* Type of module.  This one is a driver */
2289 	ident,
2290 	&sfe_ops,	/* driver ops */
2291 };
2292 
2293 static struct modlinkage modlinkage = {
2294 	MODREV_1, &modldrv, NULL
2295 };
2296 
2297 /* ======================================================== */
2298 /*
2299  * Loadable module support
2300  */
2301 /* ======================================================== */
2302 int
_init(void)2303 _init(void)
2304 {
2305 	int	status;
2306 
2307 	DPRINTF(2, (CE_CONT, CONS "sfe: _init: called"));
2308 	gem_mod_init(&sfe_ops, "sfe");
2309 	status = mod_install(&modlinkage);
2310 	if (status != DDI_SUCCESS) {
2311 		gem_mod_fini(&sfe_ops);
2312 	}
2313 	return (status);
2314 }
2315 
2316 /*
2317  * _fini : done
2318  */
2319 int
_fini(void)2320 _fini(void)
2321 {
2322 	int	status;
2323 
2324 	DPRINTF(2, (CE_CONT, CONS "sfe: _fini: called"));
2325 	status = mod_remove(&modlinkage);
2326 	if (status == DDI_SUCCESS) {
2327 		gem_mod_fini(&sfe_ops);
2328 	}
2329 	return (status);
2330 }
2331 
2332 int
_info(struct modinfo * modinfop)2333 _info(struct modinfo *modinfop)
2334 {
2335 	return (mod_info(&modlinkage, modinfop));
2336 }
2337