1f8919bdaSduboff /*
2f8919bdaSduboff * sfe.c : DP83815/DP83816/SiS900 Fast Ethernet MAC driver for Solaris
3f8919bdaSduboff *
423d366e3Sduboff * Copyright (c) 2002-2008 Masayuki Murayama. All rights reserved.
5f8919bdaSduboff *
6f8919bdaSduboff * Redistribution and use in source and binary forms, with or without
7f8919bdaSduboff * modification, are permitted provided that the following conditions are met:
8f8919bdaSduboff *
9f8919bdaSduboff * 1. Redistributions of source code must retain the above copyright notice,
10f8919bdaSduboff * this list of conditions and the following disclaimer.
11f8919bdaSduboff *
12f8919bdaSduboff * 2. Redistributions in binary form must reproduce the above copyright notice,
13f8919bdaSduboff * this list of conditions and the following disclaimer in the documentation
14f8919bdaSduboff * and/or other materials provided with the distribution.
15f8919bdaSduboff *
16f8919bdaSduboff * 3. Neither the name of the author nor the names of its contributors may be
17f8919bdaSduboff * used to endorse or promote products derived from this software without
18f8919bdaSduboff * specific prior written permission.
19f8919bdaSduboff *
20f8919bdaSduboff * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21f8919bdaSduboff * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22f8919bdaSduboff * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23f8919bdaSduboff * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24f8919bdaSduboff * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25f8919bdaSduboff * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26f8919bdaSduboff * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
27f8919bdaSduboff * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28f8919bdaSduboff * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29f8919bdaSduboff * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30f8919bdaSduboff * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
31f8919bdaSduboff * DAMAGE.
32f8919bdaSduboff */
33f8919bdaSduboff
34*d67944fbSScott Rotondo /* Avoid undefined symbol for non IA architectures */
35*d67944fbSScott Rotondo #pragma weak inb
36*d67944fbSScott Rotondo #pragma weak outb
37*d67944fbSScott Rotondo
3819397407SSherry Moore /*
3953560dfaSSherry Moore * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
4019397407SSherry Moore * Use is subject to license terms.
4119397407SSherry Moore */
42f8919bdaSduboff
43f8919bdaSduboff /*
44f8919bdaSduboff * System Header files.
45f8919bdaSduboff */
46f8919bdaSduboff #include <sys/types.h>
47f8919bdaSduboff #include <sys/conf.h>
48f8919bdaSduboff #include <sys/debug.h>
49f8919bdaSduboff #include <sys/kmem.h>
50f8919bdaSduboff #include <sys/modctl.h>
51f8919bdaSduboff #include <sys/errno.h>
52f8919bdaSduboff #include <sys/ddi.h>
53f8919bdaSduboff #include <sys/sunddi.h>
54f8919bdaSduboff #include <sys/byteorder.h>
55f8919bdaSduboff #include <sys/ethernet.h>
56f8919bdaSduboff #include <sys/pci.h>
57f8919bdaSduboff
58f8919bdaSduboff #include "sfe_mii.h"
59f8919bdaSduboff #include "sfe_util.h"
60f8919bdaSduboff #include "sfereg.h"
61f8919bdaSduboff
6253560dfaSSherry Moore char ident[] = "sis900/dp83815 driver v" "2.6.1t30os";
63f8919bdaSduboff
64f8919bdaSduboff /* Debugging support */
65f8919bdaSduboff #ifdef DEBUG_LEVEL
66f8919bdaSduboff static int sfe_debug = DEBUG_LEVEL;
67f8919bdaSduboff #if DEBUG_LEVEL > 4
68f8919bdaSduboff #define CONS "^"
69f8919bdaSduboff #else
70f8919bdaSduboff #define CONS "!"
71f8919bdaSduboff #endif
72f8919bdaSduboff #define DPRINTF(n, args) if (sfe_debug > (n)) cmn_err args
73f8919bdaSduboff #else
74f8919bdaSduboff #define CONS "!"
75f8919bdaSduboff #define DPRINTF(n, args)
76f8919bdaSduboff #endif
77f8919bdaSduboff
78f8919bdaSduboff /*
79f8919bdaSduboff * Useful macros and typedefs
80f8919bdaSduboff */
81f8919bdaSduboff #define ONESEC (drv_usectohz(1*1000000))
82f8919bdaSduboff #define ROUNDUP2(x, a) (((x) + (a) - 1) & ~((a) - 1))
83f8919bdaSduboff
84f8919bdaSduboff /*
85f8919bdaSduboff * Our configuration
86f8919bdaSduboff */
87f8919bdaSduboff #define MAXTXFRAGS 1
88f8919bdaSduboff #define MAXRXFRAGS 1
89f8919bdaSduboff
90f8919bdaSduboff #ifndef TX_BUF_SIZE
91f8919bdaSduboff #define TX_BUF_SIZE 64
92f8919bdaSduboff #endif
93f8919bdaSduboff #ifndef TX_RING_SIZE
94f8919bdaSduboff #if MAXTXFRAGS == 1
95f8919bdaSduboff #define TX_RING_SIZE TX_BUF_SIZE
96f8919bdaSduboff #else
97f8919bdaSduboff #define TX_RING_SIZE (TX_BUF_SIZE * 4)
98f8919bdaSduboff #endif
99f8919bdaSduboff #endif
100f8919bdaSduboff
101f8919bdaSduboff #ifndef RX_BUF_SIZE
102f8919bdaSduboff #define RX_BUF_SIZE 256
103f8919bdaSduboff #endif
104f8919bdaSduboff #ifndef RX_RING_SIZE
105f8919bdaSduboff #define RX_RING_SIZE RX_BUF_SIZE
106f8919bdaSduboff #endif
107f8919bdaSduboff
108f8919bdaSduboff #define OUR_INTR_BITS \
109f8919bdaSduboff (ISR_DPERR | ISR_SSERR | ISR_RMABT | ISR_RTABT | ISR_RXSOVR | \
110f8919bdaSduboff ISR_TXURN | ISR_TXDESC | ISR_TXERR | \
111f8919bdaSduboff ISR_RXORN | ISR_RXIDLE | ISR_RXOK | ISR_RXERR)
112f8919bdaSduboff
113f8919bdaSduboff #define USE_MULTICAST_HASHTBL
114f8919bdaSduboff
115f8919bdaSduboff static int sfe_tx_copy_thresh = 256;
116f8919bdaSduboff static int sfe_rx_copy_thresh = 256;
117f8919bdaSduboff
118f8919bdaSduboff /* special PHY registers for SIS900 */
119f8919bdaSduboff #define MII_CONFIG1 0x0010
120f8919bdaSduboff #define MII_CONFIG2 0x0011
121f8919bdaSduboff #define MII_MASK 0x0013
122f8919bdaSduboff #define MII_RESV 0x0014
123f8919bdaSduboff
124f8919bdaSduboff #define PHY_MASK 0xfffffff0
125f8919bdaSduboff #define PHY_SIS900_INTERNAL 0x001d8000
126f8919bdaSduboff #define PHY_ICS1893 0x0015f440
127f8919bdaSduboff
128f8919bdaSduboff
129f8919bdaSduboff #define SFE_DESC_SIZE 16 /* including pads rounding up to power of 2 */
130f8919bdaSduboff
131f8919bdaSduboff /*
132f8919bdaSduboff * Supported chips
133f8919bdaSduboff */
134f8919bdaSduboff struct chip_info {
135f8919bdaSduboff uint16_t venid;
136f8919bdaSduboff uint16_t devid;
137f8919bdaSduboff char *chip_name;
138f8919bdaSduboff int chip_type;
139f8919bdaSduboff #define CHIPTYPE_DP83815 0
140f8919bdaSduboff #define CHIPTYPE_SIS900 1
141f8919bdaSduboff };
142f8919bdaSduboff
143f8919bdaSduboff /*
144f8919bdaSduboff * Chip dependent MAC state
145f8919bdaSduboff */
146f8919bdaSduboff struct sfe_dev {
147f8919bdaSduboff /* misc HW information */
148f8919bdaSduboff struct chip_info *chip;
149f8919bdaSduboff uint32_t our_intr_bits;
15023d366e3Sduboff uint32_t isr_pended;
151f8919bdaSduboff uint32_t cr;
152f8919bdaSduboff uint_t tx_drain_threshold;
153f8919bdaSduboff uint_t tx_fill_threshold;
154f8919bdaSduboff uint_t rx_drain_threshold;
155f8919bdaSduboff uint_t rx_fill_threshold;
156f8919bdaSduboff uint8_t revid; /* revision from PCI configuration */
157f8919bdaSduboff boolean_t (*get_mac_addr)(struct gem_dev *);
158f8919bdaSduboff uint8_t mac_addr[ETHERADDRL];
159f8919bdaSduboff uint8_t bridge_revid;
160f8919bdaSduboff };
161f8919bdaSduboff
162f8919bdaSduboff /*
163f8919bdaSduboff * Hardware information
164f8919bdaSduboff */
165f8919bdaSduboff struct chip_info sfe_chiptbl[] = {
166f8919bdaSduboff { 0x1039, 0x0900, "SiS900", CHIPTYPE_SIS900, },
167f8919bdaSduboff { 0x100b, 0x0020, "DP83815/83816", CHIPTYPE_DP83815, },
168f8919bdaSduboff { 0x1039, 0x7016, "SiS7016", CHIPTYPE_SIS900, },
169f8919bdaSduboff };
170f8919bdaSduboff #define CHIPTABLESIZE (sizeof (sfe_chiptbl)/sizeof (struct chip_info))
171f8919bdaSduboff
172f8919bdaSduboff /* ======================================================== */
173f8919bdaSduboff
174f8919bdaSduboff /* mii operations */
175f8919bdaSduboff static void sfe_mii_sync_dp83815(struct gem_dev *);
176f8919bdaSduboff static void sfe_mii_sync_sis900(struct gem_dev *);
177f8919bdaSduboff static uint16_t sfe_mii_read_dp83815(struct gem_dev *, uint_t);
178f8919bdaSduboff static uint16_t sfe_mii_read_sis900(struct gem_dev *, uint_t);
179f8919bdaSduboff static void sfe_mii_write_dp83815(struct gem_dev *, uint_t, uint16_t);
180f8919bdaSduboff static void sfe_mii_write_sis900(struct gem_dev *, uint_t, uint16_t);
181f8919bdaSduboff static void sfe_set_eq_sis630(struct gem_dev *dp);
182f8919bdaSduboff /* nic operations */
183f8919bdaSduboff static int sfe_reset_chip_sis900(struct gem_dev *);
184f8919bdaSduboff static int sfe_reset_chip_dp83815(struct gem_dev *);
185f8919bdaSduboff static int sfe_init_chip(struct gem_dev *);
186f8919bdaSduboff static int sfe_start_chip(struct gem_dev *);
187f8919bdaSduboff static int sfe_stop_chip(struct gem_dev *);
188f8919bdaSduboff static int sfe_set_media(struct gem_dev *);
189f8919bdaSduboff static int sfe_set_rx_filter_dp83815(struct gem_dev *);
190f8919bdaSduboff static int sfe_set_rx_filter_sis900(struct gem_dev *);
191f8919bdaSduboff static int sfe_get_stats(struct gem_dev *);
192f8919bdaSduboff static int sfe_attach_chip(struct gem_dev *);
193f8919bdaSduboff
194f8919bdaSduboff /* descriptor operations */
195f8919bdaSduboff static int sfe_tx_desc_write(struct gem_dev *dp, int slot,
196f8919bdaSduboff ddi_dma_cookie_t *dmacookie, int frags, uint64_t flags);
197f8919bdaSduboff static void sfe_tx_start(struct gem_dev *dp, int startslot, int nslot);
198f8919bdaSduboff static void sfe_rx_desc_write(struct gem_dev *dp, int slot,
199f8919bdaSduboff ddi_dma_cookie_t *dmacookie, int frags);
200f8919bdaSduboff static uint_t sfe_tx_desc_stat(struct gem_dev *dp, int slot, int ndesc);
201f8919bdaSduboff static uint64_t sfe_rx_desc_stat(struct gem_dev *dp, int slot, int ndesc);
202f8919bdaSduboff
203f8919bdaSduboff static void sfe_tx_desc_init(struct gem_dev *dp, int slot);
204f8919bdaSduboff static void sfe_rx_desc_init(struct gem_dev *dp, int slot);
205f8919bdaSduboff static void sfe_tx_desc_clean(struct gem_dev *dp, int slot);
206f8919bdaSduboff static void sfe_rx_desc_clean(struct gem_dev *dp, int slot);
207f8919bdaSduboff
208f8919bdaSduboff /* interrupt handler */
209f8919bdaSduboff static uint_t sfe_interrupt(struct gem_dev *dp);
210f8919bdaSduboff
211f8919bdaSduboff /* ======================================================== */
212f8919bdaSduboff
213f8919bdaSduboff /* mapping attributes */
214f8919bdaSduboff /* Data access requirements. */
215f8919bdaSduboff static struct ddi_device_acc_attr sfe_dev_attr = {
216f8919bdaSduboff DDI_DEVICE_ATTR_V0,
217f8919bdaSduboff DDI_STRUCTURE_LE_ACC,
218f8919bdaSduboff DDI_STRICTORDER_ACC
219f8919bdaSduboff };
220f8919bdaSduboff
221f8919bdaSduboff /* On sparc, Buffers should be native endian for speed */
222f8919bdaSduboff static struct ddi_device_acc_attr sfe_buf_attr = {
223f8919bdaSduboff DDI_DEVICE_ATTR_V0,
224f8919bdaSduboff DDI_NEVERSWAP_ACC, /* native endianness */
225f8919bdaSduboff DDI_STRICTORDER_ACC
226f8919bdaSduboff };
227f8919bdaSduboff
228f8919bdaSduboff static ddi_dma_attr_t sfe_dma_attr_buf = {
229f8919bdaSduboff DMA_ATTR_V0, /* dma_attr_version */
230f8919bdaSduboff 0, /* dma_attr_addr_lo */
231f8919bdaSduboff 0xffffffffull, /* dma_attr_addr_hi */
232f8919bdaSduboff 0x00000fffull, /* dma_attr_count_max */
233f8919bdaSduboff 0, /* patched later */ /* dma_attr_align */
234f8919bdaSduboff 0x000003fc, /* dma_attr_burstsizes */
235f8919bdaSduboff 1, /* dma_attr_minxfer */
236f8919bdaSduboff 0x00000fffull, /* dma_attr_maxxfer */
237f8919bdaSduboff 0xffffffffull, /* dma_attr_seg */
238f8919bdaSduboff 0, /* patched later */ /* dma_attr_sgllen */
239f8919bdaSduboff 1, /* dma_attr_granular */
240f8919bdaSduboff 0 /* dma_attr_flags */
241f8919bdaSduboff };
242f8919bdaSduboff
243f8919bdaSduboff static ddi_dma_attr_t sfe_dma_attr_desc = {
244f8919bdaSduboff DMA_ATTR_V0, /* dma_attr_version */
245f8919bdaSduboff 16, /* dma_attr_addr_lo */
246f8919bdaSduboff 0xffffffffull, /* dma_attr_addr_hi */
247f8919bdaSduboff 0xffffffffull, /* dma_attr_count_max */
248f8919bdaSduboff 16, /* dma_attr_align */
249f8919bdaSduboff 0x000003fc, /* dma_attr_burstsizes */
250f8919bdaSduboff 1, /* dma_attr_minxfer */
251f8919bdaSduboff 0xffffffffull, /* dma_attr_maxxfer */
252f8919bdaSduboff 0xffffffffull, /* dma_attr_seg */
253f8919bdaSduboff 1, /* dma_attr_sgllen */
254f8919bdaSduboff 1, /* dma_attr_granular */
255f8919bdaSduboff 0 /* dma_attr_flags */
256f8919bdaSduboff };
257f8919bdaSduboff
258f8919bdaSduboff uint32_t sfe_use_pcimemspace = 0;
259f8919bdaSduboff
260f8919bdaSduboff /* ======================================================== */
261f8919bdaSduboff /*
262f8919bdaSduboff * HW manipulation routines
263f8919bdaSduboff */
264f8919bdaSduboff /* ======================================================== */
265f8919bdaSduboff
266f8919bdaSduboff #define SFE_EEPROM_DELAY(dp) \
267f8919bdaSduboff { (void) INL(dp, EROMAR); (void) INL(dp, EROMAR); }
268f8919bdaSduboff #define EE_CMD_READ 6
269f8919bdaSduboff #define EE_CMD_SHIFT 6
270f8919bdaSduboff
271f8919bdaSduboff static uint16_t
sfe_read_eeprom(struct gem_dev * dp,uint_t offset)272f8919bdaSduboff sfe_read_eeprom(struct gem_dev *dp, uint_t offset)
273f8919bdaSduboff {
274f8919bdaSduboff int eedi;
275f8919bdaSduboff int i;
276f8919bdaSduboff uint16_t ret;
277f8919bdaSduboff
278f8919bdaSduboff /* ensure de-assert chip select */
279f8919bdaSduboff OUTL(dp, EROMAR, 0);
280f8919bdaSduboff SFE_EEPROM_DELAY(dp);
281f8919bdaSduboff OUTL(dp, EROMAR, EROMAR_EESK);
282f8919bdaSduboff SFE_EEPROM_DELAY(dp);
283f8919bdaSduboff
284f8919bdaSduboff /* assert chip select */
285f8919bdaSduboff offset |= EE_CMD_READ << EE_CMD_SHIFT;
286f8919bdaSduboff
287f8919bdaSduboff for (i = 8; i >= 0; i--) {
288f8919bdaSduboff /* make command */
289f8919bdaSduboff eedi = ((offset >> i) & 1) << EROMAR_EEDI_SHIFT;
290f8919bdaSduboff
291f8919bdaSduboff /* send 1 bit */
292f8919bdaSduboff OUTL(dp, EROMAR, EROMAR_EECS | eedi);
293f8919bdaSduboff SFE_EEPROM_DELAY(dp);
294f8919bdaSduboff OUTL(dp, EROMAR, EROMAR_EECS | eedi | EROMAR_EESK);
295f8919bdaSduboff SFE_EEPROM_DELAY(dp);
296f8919bdaSduboff }
297f8919bdaSduboff
298f8919bdaSduboff OUTL(dp, EROMAR, EROMAR_EECS);
299f8919bdaSduboff
300f8919bdaSduboff ret = 0;
301f8919bdaSduboff for (i = 0; i < 16; i++) {
302f8919bdaSduboff /* Get 1 bit */
303f8919bdaSduboff OUTL(dp, EROMAR, EROMAR_EECS);
304f8919bdaSduboff SFE_EEPROM_DELAY(dp);
305f8919bdaSduboff OUTL(dp, EROMAR, EROMAR_EECS | EROMAR_EESK);
306f8919bdaSduboff SFE_EEPROM_DELAY(dp);
307f8919bdaSduboff
308f8919bdaSduboff ret = (ret << 1) | ((INL(dp, EROMAR) >> EROMAR_EEDO_SHIFT) & 1);
309f8919bdaSduboff }
310f8919bdaSduboff
311f8919bdaSduboff OUTL(dp, EROMAR, 0);
312f8919bdaSduboff SFE_EEPROM_DELAY(dp);
313f8919bdaSduboff
314f8919bdaSduboff return (ret);
315f8919bdaSduboff }
316f8919bdaSduboff #undef SFE_EEPROM_DELAY
317f8919bdaSduboff
318f8919bdaSduboff static boolean_t
sfe_get_mac_addr_dp83815(struct gem_dev * dp)319f8919bdaSduboff sfe_get_mac_addr_dp83815(struct gem_dev *dp)
320f8919bdaSduboff {
321f8919bdaSduboff uint8_t *mac;
322f8919bdaSduboff uint_t val;
323f8919bdaSduboff int i;
324f8919bdaSduboff
325f8919bdaSduboff #define BITSET(p, ix, v) (p)[(ix)/8] |= ((v) ? 1 : 0) << ((ix) & 0x7)
326f8919bdaSduboff
327f8919bdaSduboff DPRINTF(4, (CE_CONT, CONS "%s: %s: called", dp->name, __func__));
328f8919bdaSduboff
329f8919bdaSduboff mac = dp->dev_addr.ether_addr_octet;
330f8919bdaSduboff
331f8919bdaSduboff /* first of all, clear MAC address buffer */
332f8919bdaSduboff bzero(mac, ETHERADDRL);
333f8919bdaSduboff
334f8919bdaSduboff /* get bit 0 */
335f8919bdaSduboff val = sfe_read_eeprom(dp, 0x6);
336f8919bdaSduboff BITSET(mac, 0, val & 1);
337f8919bdaSduboff
338f8919bdaSduboff /* get bit 1 - 16 */
339f8919bdaSduboff val = sfe_read_eeprom(dp, 0x7);
340f8919bdaSduboff for (i = 0; i < 16; i++) {
341f8919bdaSduboff BITSET(mac, 1 + i, val & (1 << (15 - i)));
342f8919bdaSduboff }
343f8919bdaSduboff
344f8919bdaSduboff /* get bit 17 - 32 */
345f8919bdaSduboff val = sfe_read_eeprom(dp, 0x8);
346f8919bdaSduboff for (i = 0; i < 16; i++) {
347f8919bdaSduboff BITSET(mac, 17 + i, val & (1 << (15 - i)));
348f8919bdaSduboff }
349f8919bdaSduboff
350f8919bdaSduboff /* get bit 33 - 47 */
351f8919bdaSduboff val = sfe_read_eeprom(dp, 0x9);
352f8919bdaSduboff for (i = 0; i < 15; i++) {
353f8919bdaSduboff BITSET(mac, 33 + i, val & (1 << (15 - i)));
354f8919bdaSduboff }
355f8919bdaSduboff
356f8919bdaSduboff return (B_TRUE);
357f8919bdaSduboff #undef BITSET
358f8919bdaSduboff }
359f8919bdaSduboff
360f8919bdaSduboff static boolean_t
sfe_get_mac_addr_sis900(struct gem_dev * dp)361f8919bdaSduboff sfe_get_mac_addr_sis900(struct gem_dev *dp)
362f8919bdaSduboff {
363f8919bdaSduboff uint_t val;
364f8919bdaSduboff int i;
365f8919bdaSduboff uint8_t *mac;
366f8919bdaSduboff
367f8919bdaSduboff mac = dp->dev_addr.ether_addr_octet;
368f8919bdaSduboff
369f8919bdaSduboff for (i = 0; i < ETHERADDRL/2; i++) {
370f8919bdaSduboff val = sfe_read_eeprom(dp, 0x8 + i);
371f8919bdaSduboff *mac++ = (uint8_t)val;
372f8919bdaSduboff *mac++ = (uint8_t)(val >> 8);
373f8919bdaSduboff }
374f8919bdaSduboff
375f8919bdaSduboff return (B_TRUE);
376f8919bdaSduboff }
377f8919bdaSduboff
378f8919bdaSduboff static dev_info_t *
sfe_search_pci_dev_subr(dev_info_t * cur_node,int vendor_id,int device_id)379f8919bdaSduboff sfe_search_pci_dev_subr(dev_info_t *cur_node, int vendor_id, int device_id)
380f8919bdaSduboff {
381f8919bdaSduboff dev_info_t *child_id;
382f8919bdaSduboff dev_info_t *ret;
383f8919bdaSduboff int vid, did;
384f8919bdaSduboff
385f8919bdaSduboff if (cur_node == NULL) {
386f8919bdaSduboff return (NULL);
387f8919bdaSduboff }
388f8919bdaSduboff
389f8919bdaSduboff /* check brothers */
390f8919bdaSduboff do {
391f8919bdaSduboff vid = ddi_prop_get_int(DDI_DEV_T_ANY, cur_node,
392f8919bdaSduboff DDI_PROP_DONTPASS, "vendor-id", -1);
393f8919bdaSduboff did = ddi_prop_get_int(DDI_DEV_T_ANY, cur_node,
394f8919bdaSduboff DDI_PROP_DONTPASS, "device-id", -1);
395f8919bdaSduboff
396f8919bdaSduboff if (vid == vendor_id && did == device_id) {
397f8919bdaSduboff /* found */
398f8919bdaSduboff return (cur_node);
399f8919bdaSduboff }
400f8919bdaSduboff
401f8919bdaSduboff /* check children */
402f8919bdaSduboff if ((child_id = ddi_get_child(cur_node)) != NULL) {
403f8919bdaSduboff if ((ret = sfe_search_pci_dev_subr(child_id,
404f8919bdaSduboff vendor_id, device_id)) != NULL) {
405f8919bdaSduboff return (ret);
406f8919bdaSduboff }
407f8919bdaSduboff }
408f8919bdaSduboff
409f8919bdaSduboff } while ((cur_node = ddi_get_next_sibling(cur_node)) != NULL);
410f8919bdaSduboff
411f8919bdaSduboff /* not found */
412f8919bdaSduboff return (NULL);
413f8919bdaSduboff }
414f8919bdaSduboff
415f8919bdaSduboff static dev_info_t *
sfe_search_pci_dev(int vendor_id,int device_id)416f8919bdaSduboff sfe_search_pci_dev(int vendor_id, int device_id)
417f8919bdaSduboff {
418f8919bdaSduboff return (sfe_search_pci_dev_subr(ddi_root_node(), vendor_id, device_id));
419f8919bdaSduboff }
420f8919bdaSduboff
421f8919bdaSduboff static boolean_t
sfe_get_mac_addr_sis630e(struct gem_dev * dp)422f8919bdaSduboff sfe_get_mac_addr_sis630e(struct gem_dev *dp)
423f8919bdaSduboff {
424f8919bdaSduboff int i;
425f8919bdaSduboff dev_info_t *isa_bridge;
426f8919bdaSduboff ddi_acc_handle_t isa_handle;
427f8919bdaSduboff int reg;
428f8919bdaSduboff
429f8919bdaSduboff if (inb == NULL || outb == NULL) {
430f8919bdaSduboff /* this is not IA architecture */
431f8919bdaSduboff return (B_FALSE);
432f8919bdaSduboff }
433f8919bdaSduboff
434f8919bdaSduboff if ((isa_bridge = sfe_search_pci_dev(0x1039, 0x8)) == NULL) {
435f8919bdaSduboff cmn_err(CE_WARN, "%s: failed to find isa-bridge pci1039,8",
436f8919bdaSduboff dp->name);
437f8919bdaSduboff return (B_FALSE);
438f8919bdaSduboff }
439f8919bdaSduboff
440f8919bdaSduboff if (pci_config_setup(isa_bridge, &isa_handle) != DDI_SUCCESS) {
441f8919bdaSduboff cmn_err(CE_WARN, "%s: ddi_regs_map_setup failed",
442f8919bdaSduboff dp->name);
443f8919bdaSduboff return (B_FALSE);
444f8919bdaSduboff }
445f8919bdaSduboff
446f8919bdaSduboff /* enable to access CMOS RAM */
447f8919bdaSduboff reg = pci_config_get8(isa_handle, 0x48);
448f8919bdaSduboff pci_config_put8(isa_handle, 0x48, reg | 0x40);
449f8919bdaSduboff
450f8919bdaSduboff for (i = 0; i < ETHERADDRL; i++) {
451f8919bdaSduboff outb(0x70, 0x09 + i);
452f8919bdaSduboff dp->dev_addr.ether_addr_octet[i] = inb(0x71);
453f8919bdaSduboff }
454f8919bdaSduboff
455f8919bdaSduboff /* disable to access CMOS RAM */
456f8919bdaSduboff pci_config_put8(isa_handle, 0x48, reg);
457f8919bdaSduboff pci_config_teardown(&isa_handle);
458f8919bdaSduboff
459f8919bdaSduboff return (B_TRUE);
460f8919bdaSduboff }
461f8919bdaSduboff
462f8919bdaSduboff static boolean_t
sfe_get_mac_addr_sis635(struct gem_dev * dp)463f8919bdaSduboff sfe_get_mac_addr_sis635(struct gem_dev *dp)
464f8919bdaSduboff {
465f8919bdaSduboff int i;
466f8919bdaSduboff uint32_t rfcr;
467f8919bdaSduboff uint16_t v;
468f8919bdaSduboff struct sfe_dev *lp = dp->private;
469f8919bdaSduboff
470f8919bdaSduboff DPRINTF(2, (CE_CONT, CONS "%s: %s: called", dp->name, __func__));
471f8919bdaSduboff rfcr = INL(dp, RFCR);
472f8919bdaSduboff
473f8919bdaSduboff OUTL(dp, CR, lp->cr | CR_RELOAD);
474f8919bdaSduboff OUTL(dp, CR, lp->cr);
475f8919bdaSduboff
476f8919bdaSduboff /* disable packet filtering before reading filter */
477f8919bdaSduboff OUTL(dp, RFCR, rfcr & ~RFCR_RFEN);
478f8919bdaSduboff
479f8919bdaSduboff /* load MAC addr from filter data register */
480f8919bdaSduboff for (i = 0; i < ETHERADDRL; i += 2) {
481f8919bdaSduboff OUTL(dp, RFCR,
482f8919bdaSduboff (RFADDR_MAC_SIS900 + (i/2)) << RFCR_RFADDR_SHIFT_SIS900);
483f8919bdaSduboff v = INL(dp, RFDR);
484f8919bdaSduboff dp->dev_addr.ether_addr_octet[i] = (uint8_t)v;
485f8919bdaSduboff dp->dev_addr.ether_addr_octet[i+1] = (uint8_t)(v >> 8);
486f8919bdaSduboff }
487f8919bdaSduboff
488f8919bdaSduboff /* re-enable packet filtering */
489f8919bdaSduboff OUTL(dp, RFCR, rfcr | RFCR_RFEN);
490f8919bdaSduboff
491f8919bdaSduboff return (B_TRUE);
492f8919bdaSduboff }
493f8919bdaSduboff
494f8919bdaSduboff static boolean_t
sfe_get_mac_addr_sis962(struct gem_dev * dp)495f8919bdaSduboff sfe_get_mac_addr_sis962(struct gem_dev *dp)
496f8919bdaSduboff {
497f8919bdaSduboff boolean_t ret;
498f8919bdaSduboff int i;
499f8919bdaSduboff
500f8919bdaSduboff ret = B_FALSE;
501f8919bdaSduboff
502f8919bdaSduboff /* rise request signal to access EEPROM */
503f8919bdaSduboff OUTL(dp, MEAR, EROMAR_EEREQ);
504f8919bdaSduboff for (i = 0; (INL(dp, MEAR) & EROMAR_EEGNT) == 0; i++) {
505f8919bdaSduboff if (i > 200) {
506f8919bdaSduboff /* failed to acquire eeprom */
507f8919bdaSduboff cmn_err(CE_NOTE,
508f8919bdaSduboff CONS "%s: failed to access eeprom", dp->name);
509f8919bdaSduboff goto x;
510f8919bdaSduboff }
511f8919bdaSduboff drv_usecwait(10);
512f8919bdaSduboff }
513f8919bdaSduboff ret = sfe_get_mac_addr_sis900(dp);
514f8919bdaSduboff x:
515f8919bdaSduboff /* release EEPROM */
516f8919bdaSduboff OUTL(dp, MEAR, EROMAR_EEDONE);
517f8919bdaSduboff
518f8919bdaSduboff return (ret);
519f8919bdaSduboff }
520f8919bdaSduboff
521f8919bdaSduboff static int
sfe_reset_chip_sis900(struct gem_dev * dp)522f8919bdaSduboff sfe_reset_chip_sis900(struct gem_dev *dp)
523f8919bdaSduboff {
524f8919bdaSduboff int i;
525f8919bdaSduboff uint32_t done;
526f8919bdaSduboff uint32_t val;
527f8919bdaSduboff struct sfe_dev *lp = dp->private;
528f8919bdaSduboff
529f8919bdaSduboff DPRINTF(4, (CE_CONT, CONS "%s: %s called", dp->name, __func__));
530f8919bdaSduboff
531f8919bdaSduboff /* invalidate mac addr cache */
532f8919bdaSduboff bzero(lp->mac_addr, sizeof (lp->mac_addr));
533f8919bdaSduboff
534f8919bdaSduboff lp->cr = 0;
535f8919bdaSduboff
536f8919bdaSduboff /* inhibit interrupt */
537f8919bdaSduboff OUTL(dp, IMR, 0);
53823d366e3Sduboff lp->isr_pended |= INL(dp, ISR) & lp->our_intr_bits;
539f8919bdaSduboff
540915ebf8dSAlan Duboff OUTLINL(dp, RFCR, 0);
541f8919bdaSduboff
542f8919bdaSduboff OUTL(dp, CR, CR_RST | CR_TXR | CR_RXR);
543f8919bdaSduboff drv_usecwait(10);
544f8919bdaSduboff
545f8919bdaSduboff done = 0;
546f8919bdaSduboff for (i = 0; done != (ISR_TXRCMP | ISR_RXRCMP); i++) {
547f8919bdaSduboff if (i > 1000) {
548f8919bdaSduboff cmn_err(CE_WARN, "%s: chip reset timeout", dp->name);
549f8919bdaSduboff return (GEM_FAILURE);
550f8919bdaSduboff }
551f8919bdaSduboff done |= INL(dp, ISR) & (ISR_TXRCMP | ISR_RXRCMP);
552f8919bdaSduboff drv_usecwait(10);
553f8919bdaSduboff }
554f8919bdaSduboff
555f8919bdaSduboff if (lp->revid == SIS630ET_900_REV) {
556f8919bdaSduboff lp->cr |= CR_ACCESSMODE;
557f8919bdaSduboff OUTL(dp, CR, lp->cr | INL(dp, CR));
558f8919bdaSduboff }
559f8919bdaSduboff
560f8919bdaSduboff /* Configuration register: enable PCI parity */
561f8919bdaSduboff DPRINTF(2, (CE_CONT, CONS "%s: cfg:%b",
562f8919bdaSduboff dp->name, INL(dp, CFG), CFG_BITS_SIS900));
56323d366e3Sduboff val = 0;
564f8919bdaSduboff if (lp->revid >= SIS635A_900_REV ||
565f8919bdaSduboff lp->revid == SIS900B_900_REV) {
566f8919bdaSduboff /* what is this ? */
567f8919bdaSduboff val |= CFG_RND_CNT;
568f8919bdaSduboff }
569f8919bdaSduboff OUTL(dp, CFG, val);
570f8919bdaSduboff DPRINTF(2, (CE_CONT, CONS "%s: cfg:%b", dp->name,
571f8919bdaSduboff INL(dp, CFG), CFG_BITS_SIS900));
572f8919bdaSduboff
573f8919bdaSduboff return (GEM_SUCCESS);
574f8919bdaSduboff }
575f8919bdaSduboff
576f8919bdaSduboff static int
sfe_reset_chip_dp83815(struct gem_dev * dp)577f8919bdaSduboff sfe_reset_chip_dp83815(struct gem_dev *dp)
578f8919bdaSduboff {
579f8919bdaSduboff int i;
58023d366e3Sduboff uint32_t val;
581f8919bdaSduboff struct sfe_dev *lp = dp->private;
582f8919bdaSduboff
583f8919bdaSduboff DPRINTF(4, (CE_CONT, CONS "%s: %s called", dp->name, __func__));
584f8919bdaSduboff
585f8919bdaSduboff /* invalidate mac addr cache */
586f8919bdaSduboff bzero(lp->mac_addr, sizeof (lp->mac_addr));
587f8919bdaSduboff
588f8919bdaSduboff lp->cr = 0;
589f8919bdaSduboff
590f8919bdaSduboff /* inhibit interrupts */
591f8919bdaSduboff OUTL(dp, IMR, 0);
59223d366e3Sduboff lp->isr_pended |= INL(dp, ISR) & lp->our_intr_bits;
593f8919bdaSduboff
594f8919bdaSduboff OUTL(dp, RFCR, 0);
595f8919bdaSduboff
596f8919bdaSduboff OUTL(dp, CR, CR_RST);
597f8919bdaSduboff drv_usecwait(10);
598f8919bdaSduboff
599f8919bdaSduboff for (i = 0; INL(dp, CR) & CR_RST; i++) {
600f8919bdaSduboff if (i > 100) {
601f8919bdaSduboff cmn_err(CE_WARN, "!%s: chip reset timeout", dp->name);
602f8919bdaSduboff return (GEM_FAILURE);
603f8919bdaSduboff }
604f8919bdaSduboff drv_usecwait(10);
605f8919bdaSduboff }
606f8919bdaSduboff DPRINTF(0, (CE_CONT, "!%s: chip reset in %duS", dp->name, i*10));
607f8919bdaSduboff
608f8919bdaSduboff OUTL(dp, CCSR, CCSR_PMESTS);
609f8919bdaSduboff OUTL(dp, CCSR, 0);
610f8919bdaSduboff
611f8919bdaSduboff /* Configuration register: enable PCI parity */
612f8919bdaSduboff DPRINTF(2, (CE_CONT, CONS "%s: cfg:%b",
613f8919bdaSduboff dp->name, INL(dp, CFG), CFG_BITS_DP83815));
61423d366e3Sduboff val = INL(dp, CFG) & (CFG_ANEG_SEL | CFG_PHY_CFG);
61523d366e3Sduboff OUTL(dp, CFG, val | CFG_PAUSE_ADV);
616f8919bdaSduboff DPRINTF(2, (CE_CONT, CONS "%s: cfg:%b", dp->name,
617f8919bdaSduboff INL(dp, CFG), CFG_BITS_DP83815));
618f8919bdaSduboff
619f8919bdaSduboff return (GEM_SUCCESS);
620f8919bdaSduboff }
621f8919bdaSduboff
622f8919bdaSduboff static int
sfe_init_chip(struct gem_dev * dp)623f8919bdaSduboff sfe_init_chip(struct gem_dev *dp)
624f8919bdaSduboff {
625f8919bdaSduboff /* Configuration register: have been set up in sfe_chip_reset */
626f8919bdaSduboff
627f8919bdaSduboff /* PCI test control register: do nothing */
628f8919bdaSduboff
629f8919bdaSduboff /* Interrupt status register : do nothing */
630f8919bdaSduboff
631f8919bdaSduboff /* Interrupt mask register: clear, but leave lp->our_intr_bits */
632f8919bdaSduboff OUTL(dp, IMR, 0);
633f8919bdaSduboff
634f8919bdaSduboff /* Enhanced PHY Access register (sis900): do nothing */
635f8919bdaSduboff
636f8919bdaSduboff /* Transmit Descriptor Pointer register: base addr of TX ring */
637f8919bdaSduboff OUTL(dp, TXDP, dp->tx_ring_dma);
638f8919bdaSduboff
639f8919bdaSduboff /* Receive descriptor pointer register: base addr of RX ring */
640f8919bdaSduboff OUTL(dp, RXDP, dp->rx_ring_dma);
641f8919bdaSduboff
642f8919bdaSduboff return (GEM_SUCCESS);
643f8919bdaSduboff }
644f8919bdaSduboff
645f8919bdaSduboff static uint_t
sfe_mcast_hash(struct gem_dev * dp,uint8_t * addr)646f8919bdaSduboff sfe_mcast_hash(struct gem_dev *dp, uint8_t *addr)
647f8919bdaSduboff {
648f8919bdaSduboff return (gem_ether_crc_be(addr, ETHERADDRL));
649f8919bdaSduboff }
650f8919bdaSduboff
651f8919bdaSduboff #ifdef DEBUG_LEVEL
652f8919bdaSduboff static void
sfe_rxfilter_dump(struct gem_dev * dp,int start,int end)653f8919bdaSduboff sfe_rxfilter_dump(struct gem_dev *dp, int start, int end)
654f8919bdaSduboff {
655f8919bdaSduboff int i;
656f8919bdaSduboff int j;
657f8919bdaSduboff uint16_t ram[0x10];
658f8919bdaSduboff
659f8919bdaSduboff cmn_err(CE_CONT, "!%s: rx filter ram dump:", dp->name);
660f8919bdaSduboff #define WORDS_PER_LINE 4
661f8919bdaSduboff for (i = start; i < end; i += WORDS_PER_LINE*2) {
662f8919bdaSduboff for (j = 0; j < WORDS_PER_LINE; j++) {
663f8919bdaSduboff OUTL(dp, RFCR, RFADDR_MAC_DP83815 + i + j*2);
664f8919bdaSduboff ram[j] = INL(dp, RFDR);
665f8919bdaSduboff }
666f8919bdaSduboff
667f8919bdaSduboff cmn_err(CE_CONT, "!0x%02x: 0x%04x 0x%04x 0x%04x 0x%04x",
668f8919bdaSduboff i, ram[0], ram[1], ram[2], ram[3]);
669f8919bdaSduboff }
670f8919bdaSduboff
671f8919bdaSduboff #undef WORDS_PER_LINE
672f8919bdaSduboff }
673f8919bdaSduboff #endif
674f8919bdaSduboff
675f8919bdaSduboff static uint_t sfe_rf_perfect_base_dp83815[] = {
676f8919bdaSduboff RFADDR_PMATCH0_DP83815,
677f8919bdaSduboff RFADDR_PMATCH1_DP83815,
678f8919bdaSduboff RFADDR_PMATCH2_DP83815,
679f8919bdaSduboff RFADDR_PMATCH3_DP83815,
680f8919bdaSduboff };
681f8919bdaSduboff
682f8919bdaSduboff static int
sfe_set_rx_filter_dp83815(struct gem_dev * dp)683f8919bdaSduboff sfe_set_rx_filter_dp83815(struct gem_dev *dp)
684f8919bdaSduboff {
685f8919bdaSduboff int i;
686f8919bdaSduboff int j;
687f8919bdaSduboff uint32_t mode;
688f8919bdaSduboff uint8_t *mac = dp->cur_addr.ether_addr_octet;
689f8919bdaSduboff uint16_t hash_tbl[32];
690f8919bdaSduboff struct sfe_dev *lp = dp->private;
691f8919bdaSduboff
692f8919bdaSduboff DPRINTF(1, (CE_CONT, CONS "%s: %s: called, mc_count:%d, mode:0x%b",
693f8919bdaSduboff dp->name, __func__, dp->mc_count, dp->rxmode, RXMODE_BITS));
694f8919bdaSduboff
695f8919bdaSduboff #if DEBUG_LEVEL > 0
696f8919bdaSduboff for (i = 0; i < dp->mc_count; i++) {
697f8919bdaSduboff cmn_err(CE_CONT,
698f8919bdaSduboff "!%s: adding mcast(%d) %02x:%02x:%02x:%02x:%02x:%02x",
699f8919bdaSduboff dp->name, i,
700f8919bdaSduboff dp->mc_list[i].addr.ether_addr_octet[0],
701f8919bdaSduboff dp->mc_list[i].addr.ether_addr_octet[1],
702f8919bdaSduboff dp->mc_list[i].addr.ether_addr_octet[2],
703f8919bdaSduboff dp->mc_list[i].addr.ether_addr_octet[3],
704f8919bdaSduboff dp->mc_list[i].addr.ether_addr_octet[4],
705f8919bdaSduboff dp->mc_list[i].addr.ether_addr_octet[5]);
706f8919bdaSduboff }
707f8919bdaSduboff #endif
708f8919bdaSduboff if ((dp->rxmode & RXMODE_ENABLE) == 0) {
709f8919bdaSduboff /* disable rx filter */
710f8919bdaSduboff OUTL(dp, RFCR, 0);
711f8919bdaSduboff return (GEM_SUCCESS);
712f8919bdaSduboff }
713f8919bdaSduboff
714f8919bdaSduboff /*
715f8919bdaSduboff * Set Receive filter control register
716f8919bdaSduboff */
717f8919bdaSduboff if (dp->rxmode & RXMODE_PROMISC) {
718f8919bdaSduboff /* all broadcast, all multicast, all physical */
719f8919bdaSduboff mode = RFCR_AAB | RFCR_AAM | RFCR_AAP;
720f8919bdaSduboff } else if ((dp->rxmode & RXMODE_ALLMULTI) || dp->mc_count > 16*32/2) {
721f8919bdaSduboff /* all broadcast, all multicast, physical for the chip */
722f8919bdaSduboff mode = RFCR_AAB | RFCR_AAM | RFCR_APM_DP83815;
723f8919bdaSduboff } else if (dp->mc_count > 4) {
724f8919bdaSduboff /*
725f8919bdaSduboff * Use multicast hash table,
726f8919bdaSduboff * accept all broadcast and physical for the chip.
727f8919bdaSduboff */
728f8919bdaSduboff mode = RFCR_AAB | RFCR_MHEN_DP83815 | RFCR_APM_DP83815;
729f8919bdaSduboff
730f8919bdaSduboff bzero(hash_tbl, sizeof (hash_tbl));
731f8919bdaSduboff for (i = 0; i < dp->mc_count; i++) {
732f8919bdaSduboff j = dp->mc_list[i].hash >> (32 - 9);
733f8919bdaSduboff hash_tbl[j / 16] |= 1 << (j % 16);
734f8919bdaSduboff }
735f8919bdaSduboff } else {
736f8919bdaSduboff /*
737f8919bdaSduboff * Use pattern mach filter for multicast address,
738f8919bdaSduboff * accept all broadcast and physical for the chip
739f8919bdaSduboff */
740f8919bdaSduboff /* need to enable corresponding pattern registers */
741f8919bdaSduboff mode = RFCR_AAB | RFCR_APM_DP83815 |
742f8919bdaSduboff (((1 << dp->mc_count) - 1) << RFCR_APAT_SHIFT);
743f8919bdaSduboff }
744f8919bdaSduboff
745f8919bdaSduboff #if DEBUG_LEVEL > 1
746f8919bdaSduboff cmn_err(CE_CONT,
747f8919bdaSduboff "!%s: mac %02x:%02x:%02x:%02x:%02x:%02x"
748f8919bdaSduboff " cache %02x:%02x:%02x:%02x:%02x:%02x",
749f8919bdaSduboff dp->name, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
750f8919bdaSduboff lp->mac_addr[0], lp->mac_addr[1],
751f8919bdaSduboff lp->mac_addr[2], lp->mac_addr[3],
752f8919bdaSduboff lp->mac_addr[4], lp->mac_addr[5]);
753f8919bdaSduboff #endif
754f8919bdaSduboff if (bcmp(mac, lp->mac_addr, ETHERADDRL) != 0) {
755f8919bdaSduboff /*
756f8919bdaSduboff * XXX - need to *disable* rx filter to load mac address for
757f8919bdaSduboff * the chip. otherwise, we cannot setup rxfilter correctly.
758f8919bdaSduboff */
759f8919bdaSduboff /* setup perfect match register for my station address */
760f8919bdaSduboff for (i = 0; i < ETHERADDRL; i += 2) {
761f8919bdaSduboff OUTL(dp, RFCR, RFADDR_MAC_DP83815 + i);
762f8919bdaSduboff OUTL(dp, RFDR, (mac[i+1] << 8) | mac[i]);
763f8919bdaSduboff }
764f8919bdaSduboff
765f8919bdaSduboff bcopy(mac, lp->mac_addr, ETHERADDRL);
766f8919bdaSduboff }
767f8919bdaSduboff
768f8919bdaSduboff #if DEBUG_LEVEL > 3
769f8919bdaSduboff /* clear pattern ram */
770f8919bdaSduboff for (j = 0x200; j < 0x380; j += 2) {
771f8919bdaSduboff OUTL(dp, RFCR, j);
772f8919bdaSduboff OUTL(dp, RFDR, 0);
773f8919bdaSduboff }
774f8919bdaSduboff #endif
775f8919bdaSduboff if (mode & RFCR_APAT_DP83815) {
776f8919bdaSduboff /* setup multicast address into pattern match registers */
777f8919bdaSduboff for (j = 0; j < dp->mc_count; j++) {
778f8919bdaSduboff mac = &dp->mc_list[j].addr.ether_addr_octet[0];
779f8919bdaSduboff for (i = 0; i < ETHERADDRL; i += 2) {
780f8919bdaSduboff OUTL(dp, RFCR,
781f8919bdaSduboff sfe_rf_perfect_base_dp83815[j] + i*2);
782f8919bdaSduboff OUTL(dp, RFDR, (mac[i+1] << 8) | mac[i]);
783f8919bdaSduboff }
784f8919bdaSduboff }
785f8919bdaSduboff
786f8919bdaSduboff /* setup pattern count registers */
787f8919bdaSduboff OUTL(dp, RFCR, RFADDR_PCOUNT01_DP83815);
788f8919bdaSduboff OUTL(dp, RFDR, (ETHERADDRL << 8) | ETHERADDRL);
789f8919bdaSduboff OUTL(dp, RFCR, RFADDR_PCOUNT23_DP83815);
790f8919bdaSduboff OUTL(dp, RFDR, (ETHERADDRL << 8) | ETHERADDRL);
791f8919bdaSduboff }
792f8919bdaSduboff
793f8919bdaSduboff if (mode & RFCR_MHEN_DP83815) {
794f8919bdaSduboff /* Load Multicast hash table */
795f8919bdaSduboff for (i = 0; i < 32; i++) {
796f8919bdaSduboff /* for DP83815, index is in byte */
797f8919bdaSduboff OUTL(dp, RFCR, RFADDR_MULTICAST_DP83815 + i*2);
798f8919bdaSduboff OUTL(dp, RFDR, hash_tbl[i]);
799f8919bdaSduboff }
800f8919bdaSduboff }
801f8919bdaSduboff #if DEBUG_LEVEL > 2
802f8919bdaSduboff sfe_rxfilter_dump(dp, 0, 0x10);
803f8919bdaSduboff sfe_rxfilter_dump(dp, 0x200, 0x380);
804f8919bdaSduboff #endif
805f8919bdaSduboff /* Set rx filter mode and enable rx filter */
806f8919bdaSduboff OUTL(dp, RFCR, RFCR_RFEN | mode);
807f8919bdaSduboff
808f8919bdaSduboff return (GEM_SUCCESS);
809f8919bdaSduboff }
810f8919bdaSduboff
811f8919bdaSduboff static int
sfe_set_rx_filter_sis900(struct gem_dev * dp)812f8919bdaSduboff sfe_set_rx_filter_sis900(struct gem_dev *dp)
813f8919bdaSduboff {
814f8919bdaSduboff int i;
815f8919bdaSduboff uint32_t mode;
816f8919bdaSduboff uint16_t hash_tbl[16];
817f8919bdaSduboff uint8_t *mac = dp->cur_addr.ether_addr_octet;
818f8919bdaSduboff int hash_size;
819f8919bdaSduboff int hash_shift;
820f8919bdaSduboff struct sfe_dev *lp = dp->private;
821f8919bdaSduboff
822f8919bdaSduboff DPRINTF(4, (CE_CONT, CONS "%s: %s: called", dp->name, __func__));
823f8919bdaSduboff
824f8919bdaSduboff if ((dp->rxmode & RXMODE_ENABLE) == 0) {
825915ebf8dSAlan Duboff /* disable rx filter */
826915ebf8dSAlan Duboff OUTLINL(dp, RFCR, 0);
827f8919bdaSduboff return (GEM_SUCCESS);
828f8919bdaSduboff }
829f8919bdaSduboff
830f8919bdaSduboff /*
831f8919bdaSduboff * determine hardware hash table size in word.
832f8919bdaSduboff */
833f8919bdaSduboff hash_shift = 25;
834f8919bdaSduboff if (lp->revid >= SIS635A_900_REV || lp->revid == SIS900B_900_REV) {
835f8919bdaSduboff hash_shift = 24;
836f8919bdaSduboff }
837f8919bdaSduboff hash_size = (1 << (32 - hash_shift)) / 16;
838f8919bdaSduboff bzero(hash_tbl, sizeof (hash_tbl));
839f8919bdaSduboff
840f8919bdaSduboff /* Set Receive filter control register */
841f8919bdaSduboff
842f8919bdaSduboff if (dp->rxmode & RXMODE_PROMISC) {
843f8919bdaSduboff /* all broadcast, all multicast, all physical */
844f8919bdaSduboff mode = RFCR_AAB | RFCR_AAM | RFCR_AAP;
845f8919bdaSduboff } else if ((dp->rxmode & RXMODE_ALLMULTI) ||
846f8919bdaSduboff dp->mc_count > hash_size*16/2) {
847f8919bdaSduboff /* all broadcast, all multicast, physical for the chip */
848f8919bdaSduboff mode = RFCR_AAB | RFCR_AAM;
849f8919bdaSduboff } else {
850f8919bdaSduboff /* all broadcast, physical for the chip */
851f8919bdaSduboff mode = RFCR_AAB;
852f8919bdaSduboff }
853f8919bdaSduboff
854f8919bdaSduboff /* make hash table */
855f8919bdaSduboff for (i = 0; i < dp->mc_count; i++) {
856f8919bdaSduboff uint_t h;
857f8919bdaSduboff h = dp->mc_list[i].hash >> hash_shift;
858f8919bdaSduboff hash_tbl[h / 16] |= 1 << (h % 16);
859f8919bdaSduboff }
860f8919bdaSduboff
861f8919bdaSduboff if (bcmp(mac, lp->mac_addr, ETHERADDRL) != 0) {
862f8919bdaSduboff /* Disable Rx filter and load mac address */
863f8919bdaSduboff for (i = 0; i < ETHERADDRL/2; i++) {
864f8919bdaSduboff /* For sis900, index is in word */
865915ebf8dSAlan Duboff OUTLINL(dp, RFCR,
866f8919bdaSduboff (RFADDR_MAC_SIS900+i) << RFCR_RFADDR_SHIFT_SIS900);
867915ebf8dSAlan Duboff OUTLINL(dp, RFDR, (mac[i*2+1] << 8) | mac[i*2]);
868f8919bdaSduboff }
869f8919bdaSduboff
870f8919bdaSduboff bcopy(mac, lp->mac_addr, ETHERADDRL);
871f8919bdaSduboff }
872f8919bdaSduboff
873f8919bdaSduboff /* Load Multicast hash table */
874f8919bdaSduboff for (i = 0; i < hash_size; i++) {
875f8919bdaSduboff /* For sis900, index is in word */
876915ebf8dSAlan Duboff OUTLINL(dp, RFCR,
877f8919bdaSduboff (RFADDR_MULTICAST_SIS900 + i) << RFCR_RFADDR_SHIFT_SIS900);
878915ebf8dSAlan Duboff OUTLINL(dp, RFDR, hash_tbl[i]);
879f8919bdaSduboff }
880f8919bdaSduboff
881f8919bdaSduboff /* Load rx filter mode and enable rx filter */
882915ebf8dSAlan Duboff OUTLINL(dp, RFCR, RFCR_RFEN | mode);
883f8919bdaSduboff
884f8919bdaSduboff return (GEM_SUCCESS);
885f8919bdaSduboff }
886f8919bdaSduboff
887f8919bdaSduboff static int
sfe_start_chip(struct gem_dev * dp)888f8919bdaSduboff sfe_start_chip(struct gem_dev *dp)
889f8919bdaSduboff {
890f8919bdaSduboff struct sfe_dev *lp = dp->private;
891f8919bdaSduboff
892f8919bdaSduboff DPRINTF(4, (CE_CONT, CONS "%s: %s: called", dp->name, __func__));
893f8919bdaSduboff
894f8919bdaSduboff /*
895f8919bdaSduboff * setup interrupt mask, which shouldn't include ISR_TOK
896f8919bdaSduboff * to improve performance.
897f8919bdaSduboff */
898f8919bdaSduboff lp->our_intr_bits = OUR_INTR_BITS;
899f8919bdaSduboff
900f8919bdaSduboff /* enable interrupt */
901f8919bdaSduboff if ((dp->misc_flag & GEM_NOINTR) == 0) {
902f8919bdaSduboff OUTL(dp, IER, 1);
903f8919bdaSduboff OUTL(dp, IMR, lp->our_intr_bits);
904f8919bdaSduboff }
905f8919bdaSduboff
906f8919bdaSduboff /* Kick RX */
907f8919bdaSduboff OUTL(dp, CR, lp->cr | CR_RXE);
908f8919bdaSduboff
909f8919bdaSduboff return (GEM_SUCCESS);
910f8919bdaSduboff }
911f8919bdaSduboff
912f8919bdaSduboff /*
913f8919bdaSduboff * Stop nic core gracefully.
914f8919bdaSduboff */
915f8919bdaSduboff static int
sfe_stop_chip(struct gem_dev * dp)916f8919bdaSduboff sfe_stop_chip(struct gem_dev *dp)
917f8919bdaSduboff {
918f8919bdaSduboff struct sfe_dev *lp = dp->private;
919f8919bdaSduboff uint32_t done;
920f8919bdaSduboff int i;
92123d366e3Sduboff uint32_t val;
922f8919bdaSduboff
923f8919bdaSduboff DPRINTF(4, (CE_CONT, CONS "%s: %s: called", dp->name, __func__));
924f8919bdaSduboff
925f8919bdaSduboff /*
926f8919bdaSduboff * Although we inhibit interrupt here, we don't clear soft copy of
927f8919bdaSduboff * interrupt mask to avoid bogus interrupts.
928f8919bdaSduboff */
929f8919bdaSduboff OUTL(dp, IMR, 0);
930f8919bdaSduboff
931f8919bdaSduboff /* stop TX and RX immediately */
932f8919bdaSduboff OUTL(dp, CR, lp->cr | CR_TXR | CR_RXR);
933f8919bdaSduboff
934f8919bdaSduboff done = 0;
935f8919bdaSduboff for (i = 0; done != (ISR_RXRCMP | ISR_TXRCMP); i++) {
936f8919bdaSduboff if (i > 1000) {
937f8919bdaSduboff /*
938f8919bdaSduboff * As gem layer will call sfe_reset_chip(),
939f8919bdaSduboff * we don't neet to reset futher
940f8919bdaSduboff */
941f8919bdaSduboff cmn_err(CE_NOTE, "!%s: %s: Tx/Rx reset timeout",
942f8919bdaSduboff dp->name, __func__);
943f8919bdaSduboff
944f8919bdaSduboff return (GEM_FAILURE);
945f8919bdaSduboff }
94623d366e3Sduboff val = INL(dp, ISR);
94723d366e3Sduboff done |= val & (ISR_RXRCMP | ISR_TXRCMP);
94823d366e3Sduboff lp->isr_pended |= val & lp->our_intr_bits;
949f8919bdaSduboff drv_usecwait(10);
950f8919bdaSduboff }
951f8919bdaSduboff
952f8919bdaSduboff return (GEM_SUCCESS);
953f8919bdaSduboff }
954f8919bdaSduboff
95553560dfaSSherry Moore #ifndef __sparc
95653560dfaSSherry Moore /*
95753560dfaSSherry Moore * Stop nic core gracefully for quiesce
95853560dfaSSherry Moore */
95953560dfaSSherry Moore static int
sfe_stop_chip_quiesce(struct gem_dev * dp)96053560dfaSSherry Moore sfe_stop_chip_quiesce(struct gem_dev *dp)
96153560dfaSSherry Moore {
96253560dfaSSherry Moore struct sfe_dev *lp = dp->private;
96353560dfaSSherry Moore uint32_t done;
96453560dfaSSherry Moore int i;
96553560dfaSSherry Moore uint32_t val;
96653560dfaSSherry Moore
96753560dfaSSherry Moore /*
96853560dfaSSherry Moore * Although we inhibit interrupt here, we don't clear soft copy of
96953560dfaSSherry Moore * interrupt mask to avoid bogus interrupts.
97053560dfaSSherry Moore */
97153560dfaSSherry Moore OUTL(dp, IMR, 0);
97253560dfaSSherry Moore
97353560dfaSSherry Moore /* stop TX and RX immediately */
97453560dfaSSherry Moore OUTL(dp, CR, CR_TXR | CR_RXR);
97553560dfaSSherry Moore
97653560dfaSSherry Moore done = 0;
97753560dfaSSherry Moore for (i = 0; done != (ISR_RXRCMP | ISR_TXRCMP); i++) {
97853560dfaSSherry Moore if (i > 1000) {
97953560dfaSSherry Moore /*
98053560dfaSSherry Moore * As gem layer will call sfe_reset_chip(),
98153560dfaSSherry Moore * we don't neet to reset futher
98253560dfaSSherry Moore */
98353560dfaSSherry Moore
98453560dfaSSherry Moore return (DDI_FAILURE);
98553560dfaSSherry Moore }
98653560dfaSSherry Moore val = INL(dp, ISR);
98753560dfaSSherry Moore done |= val & (ISR_RXRCMP | ISR_TXRCMP);
98853560dfaSSherry Moore lp->isr_pended |= val & lp->our_intr_bits;
98953560dfaSSherry Moore drv_usecwait(10);
99053560dfaSSherry Moore }
99153560dfaSSherry Moore return (DDI_SUCCESS);
99253560dfaSSherry Moore }
99353560dfaSSherry Moore #endif
99453560dfaSSherry Moore
995f8919bdaSduboff /*
996f8919bdaSduboff * Setup media mode
997f8919bdaSduboff */
998f8919bdaSduboff static uint_t
999f8919bdaSduboff sfe_mxdma_value[] = { 512, 4, 8, 16, 32, 64, 128, 256, };
1000f8919bdaSduboff
1001f8919bdaSduboff static uint_t
sfe_encode_mxdma(uint_t burstsize)1002f8919bdaSduboff sfe_encode_mxdma(uint_t burstsize)
1003f8919bdaSduboff {
1004f8919bdaSduboff int i;
1005f8919bdaSduboff
1006f8919bdaSduboff if (burstsize > 256) {
1007f8919bdaSduboff /* choose 512 */
1008f8919bdaSduboff return (0);
1009f8919bdaSduboff }
1010f8919bdaSduboff
1011f8919bdaSduboff for (i = 1; i < 8; i++) {
1012f8919bdaSduboff if (burstsize <= sfe_mxdma_value[i]) {
1013f8919bdaSduboff break;
1014f8919bdaSduboff }
1015f8919bdaSduboff }
1016f8919bdaSduboff return (i);
1017f8919bdaSduboff }
1018f8919bdaSduboff
1019f8919bdaSduboff static int
sfe_set_media(struct gem_dev * dp)1020f8919bdaSduboff sfe_set_media(struct gem_dev *dp)
1021f8919bdaSduboff {
1022f8919bdaSduboff uint32_t txcfg;
1023f8919bdaSduboff uint32_t rxcfg;
1024f8919bdaSduboff uint32_t pcr;
1025f8919bdaSduboff uint32_t val;
1026f8919bdaSduboff uint32_t txmxdma;
1027f8919bdaSduboff uint32_t rxmxdma;
1028f8919bdaSduboff struct sfe_dev *lp = dp->private;
1029f8919bdaSduboff #ifdef DEBUG_LEVEL
1030f8919bdaSduboff extern int gem_speed_value[];
1031f8919bdaSduboff #endif
1032f8919bdaSduboff DPRINTF(2, (CE_CONT, CONS "%s: %s: %s duplex, %d Mbps",
1033f8919bdaSduboff dp->name, __func__,
1034f8919bdaSduboff dp->full_duplex ? "full" : "half", gem_speed_value[dp->speed]));
1035f8919bdaSduboff
1036f8919bdaSduboff /* initialize txcfg and rxcfg */
1037f8919bdaSduboff txcfg = TXCFG_ATP;
1038f8919bdaSduboff if (dp->full_duplex) {
1039f8919bdaSduboff txcfg |= (TXCFG_CSI | TXCFG_HBI);
1040f8919bdaSduboff }
1041f8919bdaSduboff rxcfg = RXCFG_AEP | RXCFG_ARP;
1042f8919bdaSduboff if (dp->full_duplex) {
1043f8919bdaSduboff rxcfg |= RXCFG_ATX;
1044f8919bdaSduboff }
1045f8919bdaSduboff
1046f8919bdaSduboff /* select txmxdma and rxmxdma, maxmum burst length */
1047f8919bdaSduboff if (lp->chip->chip_type == CHIPTYPE_SIS900) {
1048f8919bdaSduboff #ifdef DEBUG_SIS900_EDB
1049f8919bdaSduboff val = CFG_EDB_MASTER;
1050f8919bdaSduboff #else
1051f8919bdaSduboff val = INL(dp, CFG) & CFG_EDB_MASTER;
1052f8919bdaSduboff #endif
1053f8919bdaSduboff if (val) {
1054f8919bdaSduboff /*
1055f8919bdaSduboff * sis900 built-in cores:
1056f8919bdaSduboff * max burst length must be fixed to 64
1057f8919bdaSduboff */
1058f8919bdaSduboff txmxdma = 64;
1059f8919bdaSduboff rxmxdma = 64;
1060f8919bdaSduboff } else {
1061f8919bdaSduboff /*
1062f8919bdaSduboff * sis900 pci chipset:
1063f8919bdaSduboff * the vendor recommended to fix max burst length
1064f8919bdaSduboff * to 512
1065f8919bdaSduboff */
1066f8919bdaSduboff txmxdma = 512;
1067f8919bdaSduboff rxmxdma = 512;
1068f8919bdaSduboff }
1069f8919bdaSduboff } else {
1070f8919bdaSduboff /*
1071f8919bdaSduboff * NS dp83815/816:
1072f8919bdaSduboff * use user defined or default for tx/rx max burst length
1073f8919bdaSduboff */
1074f8919bdaSduboff txmxdma = max(dp->txmaxdma, 256);
1075f8919bdaSduboff rxmxdma = max(dp->rxmaxdma, 256);
1076f8919bdaSduboff }
1077f8919bdaSduboff
1078f8919bdaSduboff
1079f8919bdaSduboff /* tx high water mark */
1080f8919bdaSduboff lp->tx_drain_threshold = ROUNDUP2(dp->txthr, TXCFG_FIFO_UNIT);
1081f8919bdaSduboff
1082f8919bdaSduboff /* determine tx_fill_threshold accroding drain threshold */
1083f8919bdaSduboff lp->tx_fill_threshold =
1084f8919bdaSduboff TXFIFOSIZE - lp->tx_drain_threshold - TXCFG_FIFO_UNIT;
1085f8919bdaSduboff
1086f8919bdaSduboff /* tune txmxdma not to exceed tx_fill_threshold */
1087f8919bdaSduboff for (; ; ) {
1088f8919bdaSduboff /* normalize txmxdma requested */
1089f8919bdaSduboff val = sfe_encode_mxdma(txmxdma);
1090f8919bdaSduboff txmxdma = sfe_mxdma_value[val];
1091f8919bdaSduboff
1092f8919bdaSduboff if (txmxdma <= lp->tx_fill_threshold) {
1093f8919bdaSduboff break;
1094f8919bdaSduboff }
1095f8919bdaSduboff /* select new txmxdma */
1096f8919bdaSduboff txmxdma = txmxdma / 2;
1097f8919bdaSduboff }
1098f8919bdaSduboff txcfg |= val << TXCFG_MXDMA_SHIFT;
1099f8919bdaSduboff
1100f8919bdaSduboff /* encode rxmxdma, maxmum burst length for rx */
1101f8919bdaSduboff val = sfe_encode_mxdma(rxmxdma);
110223d366e3Sduboff rxcfg |= val << RXCFG_MXDMA_SHIFT;
1103f8919bdaSduboff rxmxdma = sfe_mxdma_value[val];
1104f8919bdaSduboff
1105f8919bdaSduboff /* receive starting threshold - it have only 5bit-wide field */
1106f8919bdaSduboff val = ROUNDUP2(max(dp->rxthr, ETHERMIN), RXCFG_FIFO_UNIT);
1107f8919bdaSduboff lp->rx_drain_threshold =
1108f8919bdaSduboff min(val, (RXCFG_DRTH >> RXCFG_DRTH_SHIFT) * RXCFG_FIFO_UNIT);
1109f8919bdaSduboff
1110f8919bdaSduboff DPRINTF(0, (CE_CONT,
1111f8919bdaSduboff "%s: %s: tx: drain:%d(rest %d) fill:%d mxdma:%d,"
1112f8919bdaSduboff " rx: drain:%d mxdma:%d",
1113f8919bdaSduboff dp->name, __func__,
1114f8919bdaSduboff lp->tx_drain_threshold, TXFIFOSIZE - lp->tx_drain_threshold,
1115f8919bdaSduboff lp->tx_fill_threshold, txmxdma,
1116f8919bdaSduboff lp->rx_drain_threshold, rxmxdma));
1117f8919bdaSduboff
1118f8919bdaSduboff ASSERT(lp->tx_drain_threshold < 64*TXCFG_FIFO_UNIT);
1119f8919bdaSduboff ASSERT(lp->tx_fill_threshold < 64*TXCFG_FIFO_UNIT);
1120f8919bdaSduboff ASSERT(lp->rx_drain_threshold < 32*RXCFG_FIFO_UNIT);
1121f8919bdaSduboff
1122f8919bdaSduboff txcfg |= ((lp->tx_fill_threshold/TXCFG_FIFO_UNIT) << TXCFG_FLTH_SHIFT)
1123f8919bdaSduboff | (lp->tx_drain_threshold/TXCFG_FIFO_UNIT);
1124f8919bdaSduboff OUTL(dp, TXCFG, txcfg);
1125f8919bdaSduboff
1126f8919bdaSduboff rxcfg |= ((lp->rx_drain_threshold/RXCFG_FIFO_UNIT) << RXCFG_DRTH_SHIFT);
1127f8919bdaSduboff if (lp->chip->chip_type == CHIPTYPE_DP83815) {
1128f8919bdaSduboff rxcfg |= RXCFG_ALP_DP83815;
1129f8919bdaSduboff }
1130f8919bdaSduboff OUTL(dp, RXCFG, rxcfg);
1131f8919bdaSduboff
1132f8919bdaSduboff DPRINTF(0, (CE_CONT, CONS "%s: %s: txcfg:%b rxcfg:%b",
1133f8919bdaSduboff dp->name, __func__,
1134f8919bdaSduboff txcfg, TXCFG_BITS, rxcfg, RXCFG_BITS));
1135f8919bdaSduboff
1136f8919bdaSduboff /* Flow control */
1137f8919bdaSduboff if (lp->chip->chip_type == CHIPTYPE_DP83815) {
1138f8919bdaSduboff pcr = INL(dp, PCR);
1139f8919bdaSduboff switch (dp->flow_control) {
1140f8919bdaSduboff case FLOW_CONTROL_SYMMETRIC:
1141f8919bdaSduboff case FLOW_CONTROL_RX_PAUSE:
1142f8919bdaSduboff OUTL(dp, PCR, pcr | PCR_PSEN | PCR_PS_MCAST);
1143f8919bdaSduboff break;
1144f8919bdaSduboff
1145f8919bdaSduboff default:
1146f8919bdaSduboff OUTL(dp, PCR,
1147f8919bdaSduboff pcr & ~(PCR_PSEN | PCR_PS_MCAST | PCR_PS_DA));
1148f8919bdaSduboff break;
1149f8919bdaSduboff }
1150f8919bdaSduboff DPRINTF(2, (CE_CONT, CONS "%s: PCR: %b", dp->name,
1151f8919bdaSduboff INL(dp, PCR), PCR_BITS));
1152f8919bdaSduboff
1153f8919bdaSduboff } else if (lp->chip->chip_type == CHIPTYPE_SIS900) {
1154f8919bdaSduboff switch (dp->flow_control) {
1155f8919bdaSduboff case FLOW_CONTROL_SYMMETRIC:
1156f8919bdaSduboff case FLOW_CONTROL_RX_PAUSE:
1157f8919bdaSduboff OUTL(dp, FLOWCTL, FLOWCTL_FLOWEN);
1158f8919bdaSduboff break;
1159f8919bdaSduboff default:
1160f8919bdaSduboff OUTL(dp, FLOWCTL, 0);
1161f8919bdaSduboff break;
1162f8919bdaSduboff }
1163f8919bdaSduboff DPRINTF(2, (CE_CONT, CONS "%s: FLOWCTL: %b",
1164f8919bdaSduboff dp->name, INL(dp, FLOWCTL), FLOWCTL_BITS));
1165f8919bdaSduboff }
1166f8919bdaSduboff return (GEM_SUCCESS);
1167f8919bdaSduboff }
1168f8919bdaSduboff
1169f8919bdaSduboff static int
sfe_get_stats(struct gem_dev * dp)1170f8919bdaSduboff sfe_get_stats(struct gem_dev *dp)
1171f8919bdaSduboff {
1172f8919bdaSduboff /* do nothing */
1173f8919bdaSduboff return (GEM_SUCCESS);
1174f8919bdaSduboff }
1175f8919bdaSduboff
1176f8919bdaSduboff /*
1177f8919bdaSduboff * descriptor manipulations
1178f8919bdaSduboff */
1179f8919bdaSduboff static int
sfe_tx_desc_write(struct gem_dev * dp,int slot,ddi_dma_cookie_t * dmacookie,int frags,uint64_t flags)1180f8919bdaSduboff sfe_tx_desc_write(struct gem_dev *dp, int slot,
1181f8919bdaSduboff ddi_dma_cookie_t *dmacookie, int frags, uint64_t flags)
1182f8919bdaSduboff {
1183f8919bdaSduboff uint32_t mark;
1184f8919bdaSduboff struct sfe_desc *tdp;
1185f8919bdaSduboff ddi_dma_cookie_t *dcp;
118623d366e3Sduboff uint32_t tmp0;
118723d366e3Sduboff #if DEBUG_LEVEL > 2
1188f8919bdaSduboff int i;
1189f8919bdaSduboff
1190f8919bdaSduboff cmn_err(CE_CONT,
1191f8919bdaSduboff CONS "%s: time:%d %s seqnum: %d, slot %d, frags: %d flags: %llx",
1192f8919bdaSduboff dp->name, ddi_get_lbolt(), __func__,
1193f8919bdaSduboff dp->tx_desc_tail, slot, frags, flags);
1194f8919bdaSduboff
1195f8919bdaSduboff for (i = 0; i < frags; i++) {
1196f8919bdaSduboff cmn_err(CE_CONT, CONS "%d: addr: 0x%x, len: 0x%x",
1197f8919bdaSduboff i, dmacookie[i].dmac_address, dmacookie[i].dmac_size);
1198f8919bdaSduboff }
1199f8919bdaSduboff #endif
1200f8919bdaSduboff /*
1201f8919bdaSduboff * write tx descriptor in reversed order.
1202f8919bdaSduboff */
1203f8919bdaSduboff #if DEBUG_LEVEL > 3
1204f8919bdaSduboff flags |= GEM_TXFLAG_INTR;
1205f8919bdaSduboff #endif
1206f8919bdaSduboff mark = (flags & GEM_TXFLAG_INTR)
1207f8919bdaSduboff ? (CMDSTS_OWN | CMDSTS_INTR) : CMDSTS_OWN;
1208f8919bdaSduboff
1209f8919bdaSduboff ASSERT(frags == 1);
1210f8919bdaSduboff dcp = &dmacookie[0];
1211f8919bdaSduboff if (flags & GEM_TXFLAG_HEAD) {
1212f8919bdaSduboff mark &= ~CMDSTS_OWN;
1213f8919bdaSduboff }
1214f8919bdaSduboff
1215f8919bdaSduboff tdp = (void *)&dp->tx_ring[SFE_DESC_SIZE * slot];
121623d366e3Sduboff tmp0 = (uint32_t)dcp->dmac_address;
121723d366e3Sduboff mark |= (uint32_t)dcp->dmac_size;
121823d366e3Sduboff tdp->d_bufptr = LE_32(tmp0);
121923d366e3Sduboff tdp->d_cmdsts = LE_32(mark);
1220f8919bdaSduboff
1221f8919bdaSduboff return (frags);
1222f8919bdaSduboff }
1223f8919bdaSduboff
1224f8919bdaSduboff static void
sfe_tx_start(struct gem_dev * dp,int start_slot,int nslot)1225f8919bdaSduboff sfe_tx_start(struct gem_dev *dp, int start_slot, int nslot)
1226f8919bdaSduboff {
122723d366e3Sduboff uint_t tx_ring_size = dp->gc.gc_tx_ring_size;
1228f8919bdaSduboff struct sfe_desc *tdp;
1229f8919bdaSduboff struct sfe_dev *lp = dp->private;
1230f8919bdaSduboff
1231f8919bdaSduboff if (nslot > 1) {
1232f8919bdaSduboff gem_tx_desc_dma_sync(dp,
123323d366e3Sduboff SLOT(start_slot + 1, tx_ring_size),
1234f8919bdaSduboff nslot - 1, DDI_DMA_SYNC_FORDEV);
1235f8919bdaSduboff }
1236f8919bdaSduboff
1237f8919bdaSduboff tdp = (void *)&dp->tx_ring[SFE_DESC_SIZE * start_slot];
1238f8919bdaSduboff tdp->d_cmdsts |= LE_32(CMDSTS_OWN);
1239f8919bdaSduboff
1240f8919bdaSduboff gem_tx_desc_dma_sync(dp, start_slot, 1, DDI_DMA_SYNC_FORDEV);
1241f8919bdaSduboff
1242f8919bdaSduboff /*
1243f8919bdaSduboff * Let the Transmit Buffer Manager Fill state machine active.
1244f8919bdaSduboff */
1245f8919bdaSduboff if (dp->mac_active) {
1246f8919bdaSduboff OUTL(dp, CR, lp->cr | CR_TXE);
1247f8919bdaSduboff }
1248f8919bdaSduboff }
1249f8919bdaSduboff
1250f8919bdaSduboff static void
sfe_rx_desc_write(struct gem_dev * dp,int slot,ddi_dma_cookie_t * dmacookie,int frags)1251f8919bdaSduboff sfe_rx_desc_write(struct gem_dev *dp, int slot,
1252f8919bdaSduboff ddi_dma_cookie_t *dmacookie, int frags)
1253f8919bdaSduboff {
1254f8919bdaSduboff struct sfe_desc *rdp;
125523d366e3Sduboff uint32_t tmp0;
125623d366e3Sduboff uint32_t tmp1;
1257f8919bdaSduboff #if DEBUG_LEVEL > 2
1258f8919bdaSduboff int i;
1259f8919bdaSduboff
1260f8919bdaSduboff ASSERT(frags == 1);
1261f8919bdaSduboff
1262f8919bdaSduboff cmn_err(CE_CONT, CONS
1263f8919bdaSduboff "%s: %s seqnum: %d, slot %d, frags: %d",
1264f8919bdaSduboff dp->name, __func__, dp->rx_active_tail, slot, frags);
1265f8919bdaSduboff for (i = 0; i < frags; i++) {
1266f8919bdaSduboff cmn_err(CE_CONT, CONS " frag: %d addr: 0x%llx, len: 0x%lx",
1267f8919bdaSduboff i, dmacookie[i].dmac_address, dmacookie[i].dmac_size);
1268f8919bdaSduboff }
1269f8919bdaSduboff #endif
1270f8919bdaSduboff /* for the last slot of the packet */
1271f8919bdaSduboff rdp = (void *)&dp->rx_ring[SFE_DESC_SIZE * slot];
1272f8919bdaSduboff
127323d366e3Sduboff tmp0 = (uint32_t)dmacookie->dmac_address;
127423d366e3Sduboff tmp1 = CMDSTS_INTR | (uint32_t)dmacookie->dmac_size;
127523d366e3Sduboff rdp->d_bufptr = LE_32(tmp0);
127623d366e3Sduboff rdp->d_cmdsts = LE_32(tmp1);
1277f8919bdaSduboff }
1278f8919bdaSduboff
1279f8919bdaSduboff static uint_t
sfe_tx_desc_stat(struct gem_dev * dp,int slot,int ndesc)1280f8919bdaSduboff sfe_tx_desc_stat(struct gem_dev *dp, int slot, int ndesc)
1281f8919bdaSduboff {
128223d366e3Sduboff uint_t tx_ring_size = dp->gc.gc_tx_ring_size;
1283f8919bdaSduboff struct sfe_desc *tdp;
1284f8919bdaSduboff uint32_t status;
1285f8919bdaSduboff int cols;
128623d366e3Sduboff struct sfe_dev *lp = dp->private;
1287f8919bdaSduboff #ifdef DEBUG_LEVEL
1288f8919bdaSduboff int i;
1289f8919bdaSduboff clock_t delay;
1290f8919bdaSduboff #endif
1291f8919bdaSduboff /* check status of the last descriptor */
1292f8919bdaSduboff tdp = (void *)
129323d366e3Sduboff &dp->tx_ring[SFE_DESC_SIZE * SLOT(slot + ndesc - 1, tx_ring_size)];
1294f8919bdaSduboff
129523d366e3Sduboff /*
129623d366e3Sduboff * Don't use LE_32() directly to refer tdp->d_cmdsts.
129723d366e3Sduboff * It is not atomic for big endian cpus.
129823d366e3Sduboff */
129923d366e3Sduboff status = tdp->d_cmdsts;
130023d366e3Sduboff status = LE_32(status);
1301f8919bdaSduboff
1302f8919bdaSduboff DPRINTF(2, (CE_CONT, CONS "%s: time:%ld %s: slot:%d, status:0x%b",
1303f8919bdaSduboff dp->name, ddi_get_lbolt(), __func__,
1304f8919bdaSduboff slot, status, TXSTAT_BITS));
1305f8919bdaSduboff
1306f8919bdaSduboff if (status & CMDSTS_OWN) {
1307f8919bdaSduboff /*
1308f8919bdaSduboff * not yet transmitted
1309f8919bdaSduboff */
131023d366e3Sduboff /* workaround for tx hang */
131123d366e3Sduboff if (lp->chip->chip_type == CHIPTYPE_DP83815 &&
131223d366e3Sduboff dp->mac_active) {
131323d366e3Sduboff OUTL(dp, CR, lp->cr | CR_TXE);
131423d366e3Sduboff }
1315f8919bdaSduboff return (0);
1316f8919bdaSduboff }
1317f8919bdaSduboff
1318f8919bdaSduboff if (status & CMDSTS_MORE) {
1319f8919bdaSduboff /* XXX - the hardware problem but don't panic the system */
1320f8919bdaSduboff /* avoid lint bug for %b format string including 32nd bit */
1321f8919bdaSduboff cmn_err(CE_NOTE, CONS
1322f8919bdaSduboff "%s: tx status bits incorrect: slot:%d, status:0x%x",
1323f8919bdaSduboff dp->name, slot, status);
1324f8919bdaSduboff }
1325f8919bdaSduboff
1326f8919bdaSduboff #if DEBUG_LEVEL > 3
1327f8919bdaSduboff delay = (ddi_get_lbolt() - dp->tx_buf_head->txb_stime) * 10;
1328f8919bdaSduboff if (delay >= 50) {
1329f8919bdaSduboff DPRINTF(0, (CE_NOTE, "%s: tx deferred %d mS: slot %d",
1330f8919bdaSduboff dp->name, delay, slot));
1331f8919bdaSduboff }
1332f8919bdaSduboff #endif
1333f8919bdaSduboff
1334f8919bdaSduboff #if DEBUG_LEVEL > 3
1335f8919bdaSduboff for (i = 0; i < nfrag-1; i++) {
1336f8919bdaSduboff uint32_t s;
1337f8919bdaSduboff int n;
1338f8919bdaSduboff
133923d366e3Sduboff n = SLOT(slot + i, tx_ring_size);
1340f8919bdaSduboff s = LE_32(
1341f8919bdaSduboff ((struct sfe_desc *)((void *)
1342f8919bdaSduboff &dp->tx_ring[SFE_DESC_SIZE * n]))->d_cmdsts);
1343f8919bdaSduboff
1344f8919bdaSduboff ASSERT(s & CMDSTS_MORE);
1345f8919bdaSduboff ASSERT((s & CMDSTS_OWN) == 0);
1346f8919bdaSduboff }
1347f8919bdaSduboff #endif
1348f8919bdaSduboff
1349f8919bdaSduboff /*
1350f8919bdaSduboff * collect statistics
1351f8919bdaSduboff */
1352f8919bdaSduboff if ((status & CMDSTS_OK) == 0) {
1353f8919bdaSduboff
1354f8919bdaSduboff /* failed to transmit the packet */
1355f8919bdaSduboff
1356f8919bdaSduboff DPRINTF(0, (CE_CONT, CONS "%s: Transmit error, Tx status %b",
1357f8919bdaSduboff dp->name, status, TXSTAT_BITS));
1358f8919bdaSduboff
1359f8919bdaSduboff dp->stats.errxmt++;
1360f8919bdaSduboff
1361f8919bdaSduboff if (status & CMDSTS_TFU) {
1362f8919bdaSduboff dp->stats.underflow++;
1363f8919bdaSduboff } else if (status & CMDSTS_CRS) {
1364f8919bdaSduboff dp->stats.nocarrier++;
1365f8919bdaSduboff } else if (status & CMDSTS_OWC) {
1366f8919bdaSduboff dp->stats.xmtlatecoll++;
1367f8919bdaSduboff } else if ((!dp->full_duplex) && (status & CMDSTS_EC)) {
1368f8919bdaSduboff dp->stats.excoll++;
1369f8919bdaSduboff dp->stats.collisions += 16;
1370f8919bdaSduboff } else {
1371f8919bdaSduboff dp->stats.xmit_internal_err++;
1372f8919bdaSduboff }
1373f8919bdaSduboff } else if (!dp->full_duplex) {
1374f8919bdaSduboff cols = (status >> CMDSTS_CCNT_SHIFT) & CCNT_MASK;
1375f8919bdaSduboff
1376f8919bdaSduboff if (cols > 0) {
1377f8919bdaSduboff if (cols == 1) {
1378f8919bdaSduboff dp->stats.first_coll++;
1379f8919bdaSduboff } else /* (cols > 1) */ {
1380f8919bdaSduboff dp->stats.multi_coll++;
1381f8919bdaSduboff }
1382f8919bdaSduboff dp->stats.collisions += cols;
1383f8919bdaSduboff } else if (status & CMDSTS_TD) {
1384f8919bdaSduboff dp->stats.defer++;
1385f8919bdaSduboff }
1386f8919bdaSduboff }
1387f8919bdaSduboff return (GEM_TX_DONE);
1388f8919bdaSduboff }
1389f8919bdaSduboff
1390f8919bdaSduboff static uint64_t
sfe_rx_desc_stat(struct gem_dev * dp,int slot,int ndesc)1391f8919bdaSduboff sfe_rx_desc_stat(struct gem_dev *dp, int slot, int ndesc)
1392f8919bdaSduboff {
1393f8919bdaSduboff struct sfe_desc *rdp;
1394f8919bdaSduboff uint_t len;
1395f8919bdaSduboff uint_t flag;
1396f8919bdaSduboff uint32_t status;
1397f8919bdaSduboff
1398f8919bdaSduboff flag = GEM_RX_DONE;
1399f8919bdaSduboff
1400f8919bdaSduboff /* Dont read ISR because we cannot ack only to rx interrupt. */
1401f8919bdaSduboff
1402f8919bdaSduboff rdp = (void *)&dp->rx_ring[SFE_DESC_SIZE * slot];
1403f8919bdaSduboff
140423d366e3Sduboff /*
140523d366e3Sduboff * Don't use LE_32() directly to refer rdp->d_cmdsts.
140623d366e3Sduboff * It is not atomic for big endian cpus.
140723d366e3Sduboff */
140823d366e3Sduboff status = rdp->d_cmdsts;
140923d366e3Sduboff status = LE_32(status);
1410f8919bdaSduboff
1411f8919bdaSduboff DPRINTF(2, (CE_CONT, CONS "%s: time:%ld %s: slot:%d, status:0x%b",
1412f8919bdaSduboff dp->name, ddi_get_lbolt(), __func__,
1413f8919bdaSduboff slot, status, RXSTAT_BITS));
1414f8919bdaSduboff
1415f8919bdaSduboff if ((status & CMDSTS_OWN) == 0) {
1416f8919bdaSduboff /*
1417f8919bdaSduboff * No more received packets because
1418f8919bdaSduboff * this buffer is owned by NIC.
1419f8919bdaSduboff */
1420f8919bdaSduboff return (0);
1421f8919bdaSduboff }
1422f8919bdaSduboff
1423f8919bdaSduboff #define RX_ERR_BITS \
1424f8919bdaSduboff (CMDSTS_RXA | CMDSTS_RXO | CMDSTS_LONG | CMDSTS_RUNT | \
1425f8919bdaSduboff CMDSTS_ISE | CMDSTS_CRCE | CMDSTS_FAE | CMDSTS_MORE)
1426f8919bdaSduboff
1427f8919bdaSduboff if (status & RX_ERR_BITS) {
1428f8919bdaSduboff /*
1429f8919bdaSduboff * Packet with error received
1430f8919bdaSduboff */
1431f8919bdaSduboff DPRINTF(0, (CE_CONT, CONS "%s: Corrupted packet "
1432f8919bdaSduboff "received, buffer status: %b",
1433f8919bdaSduboff dp->name, status, RXSTAT_BITS));
1434f8919bdaSduboff
1435f8919bdaSduboff /* collect statistics information */
1436f8919bdaSduboff dp->stats.errrcv++;
1437f8919bdaSduboff
1438f8919bdaSduboff if (status & CMDSTS_RXO) {
1439f8919bdaSduboff dp->stats.overflow++;
1440f8919bdaSduboff } else if (status & (CMDSTS_LONG | CMDSTS_MORE)) {
1441f8919bdaSduboff dp->stats.frame_too_long++;
1442f8919bdaSduboff } else if (status & CMDSTS_RUNT) {
1443f8919bdaSduboff dp->stats.runt++;
1444f8919bdaSduboff } else if (status & (CMDSTS_ISE | CMDSTS_FAE)) {
1445f8919bdaSduboff dp->stats.frame++;
1446f8919bdaSduboff } else if (status & CMDSTS_CRCE) {
1447f8919bdaSduboff dp->stats.crc++;
1448f8919bdaSduboff } else {
1449f8919bdaSduboff dp->stats.rcv_internal_err++;
1450f8919bdaSduboff }
1451f8919bdaSduboff
1452f8919bdaSduboff return (flag | GEM_RX_ERR);
1453f8919bdaSduboff }
1454f8919bdaSduboff
1455f8919bdaSduboff /*
1456f8919bdaSduboff * this packet was received without errors
1457f8919bdaSduboff */
1458f8919bdaSduboff if ((len = (status & CMDSTS_SIZE)) >= ETHERFCSL) {
1459f8919bdaSduboff len -= ETHERFCSL;
1460f8919bdaSduboff }
1461f8919bdaSduboff
1462f8919bdaSduboff #if DEBUG_LEVEL > 10
1463f8919bdaSduboff {
1464f8919bdaSduboff int i;
1465f8919bdaSduboff uint8_t *bp = dp->rx_buf_head->rxb_buf;
1466f8919bdaSduboff
1467f8919bdaSduboff cmn_err(CE_CONT, CONS "%s: len:%d", dp->name, len);
1468f8919bdaSduboff
1469f8919bdaSduboff for (i = 0; i < 60; i += 10) {
1470f8919bdaSduboff cmn_err(CE_CONT, CONS
1471f8919bdaSduboff "%02x %02x %02x %02x %02x %02x %02x %02x %02x %02x",
1472f8919bdaSduboff bp[0], bp[1], bp[2], bp[3], bp[4],
1473f8919bdaSduboff bp[5], bp[6], bp[7], bp[8], bp[9]);
1474f8919bdaSduboff }
1475f8919bdaSduboff bp += 10;
1476f8919bdaSduboff }
1477f8919bdaSduboff #endif
1478f8919bdaSduboff return (flag | (len & GEM_RX_LEN));
1479f8919bdaSduboff }
1480f8919bdaSduboff
1481f8919bdaSduboff static void
sfe_tx_desc_init(struct gem_dev * dp,int slot)1482f8919bdaSduboff sfe_tx_desc_init(struct gem_dev *dp, int slot)
1483f8919bdaSduboff {
148423d366e3Sduboff uint_t tx_ring_size = dp->gc.gc_tx_ring_size;
1485f8919bdaSduboff struct sfe_desc *tdp;
1486f8919bdaSduboff uint32_t here;
1487f8919bdaSduboff
1488f8919bdaSduboff tdp = (void *)&dp->tx_ring[SFE_DESC_SIZE * slot];
1489f8919bdaSduboff
1490f8919bdaSduboff /* don't clear d_link field, which have a valid pointer */
1491f8919bdaSduboff tdp->d_cmdsts = 0;
1492f8919bdaSduboff
1493f8919bdaSduboff /* make a link to this from the previous descriptor */
1494f8919bdaSduboff here = ((uint32_t)dp->tx_ring_dma) + SFE_DESC_SIZE*slot;
1495f8919bdaSduboff
1496f8919bdaSduboff tdp = (void *)
149723d366e3Sduboff &dp->tx_ring[SFE_DESC_SIZE * SLOT(slot - 1, tx_ring_size)];
1498f8919bdaSduboff tdp->d_link = LE_32(here);
1499f8919bdaSduboff }
1500f8919bdaSduboff
1501f8919bdaSduboff static void
sfe_rx_desc_init(struct gem_dev * dp,int slot)1502f8919bdaSduboff sfe_rx_desc_init(struct gem_dev *dp, int slot)
1503f8919bdaSduboff {
150423d366e3Sduboff uint_t rx_ring_size = dp->gc.gc_rx_ring_size;
1505f8919bdaSduboff struct sfe_desc *rdp;
1506f8919bdaSduboff uint32_t here;
1507f8919bdaSduboff
1508f8919bdaSduboff rdp = (void *)&dp->rx_ring[SFE_DESC_SIZE * slot];
1509f8919bdaSduboff
1510f8919bdaSduboff /* don't clear d_link field, which have a valid pointer */
1511f8919bdaSduboff rdp->d_cmdsts = LE_32(CMDSTS_OWN);
1512f8919bdaSduboff
1513f8919bdaSduboff /* make a link to this from the previous descriptor */
1514f8919bdaSduboff here = ((uint32_t)dp->rx_ring_dma) + SFE_DESC_SIZE*slot;
1515f8919bdaSduboff
1516f8919bdaSduboff rdp = (void *)
151723d366e3Sduboff &dp->rx_ring[SFE_DESC_SIZE * SLOT(slot - 1, rx_ring_size)];
1518f8919bdaSduboff rdp->d_link = LE_32(here);
1519f8919bdaSduboff }
1520f8919bdaSduboff
1521f8919bdaSduboff static void
sfe_tx_desc_clean(struct gem_dev * dp,int slot)1522f8919bdaSduboff sfe_tx_desc_clean(struct gem_dev *dp, int slot)
1523f8919bdaSduboff {
1524f8919bdaSduboff struct sfe_desc *tdp;
1525f8919bdaSduboff
1526f8919bdaSduboff tdp = (void *)&dp->tx_ring[SFE_DESC_SIZE * slot];
1527f8919bdaSduboff tdp->d_cmdsts = 0;
1528f8919bdaSduboff }
1529f8919bdaSduboff
1530f8919bdaSduboff static void
sfe_rx_desc_clean(struct gem_dev * dp,int slot)1531f8919bdaSduboff sfe_rx_desc_clean(struct gem_dev *dp, int slot)
1532f8919bdaSduboff {
1533f8919bdaSduboff struct sfe_desc *rdp;
1534f8919bdaSduboff
1535f8919bdaSduboff rdp = (void *)&dp->rx_ring[SFE_DESC_SIZE * slot];
1536f8919bdaSduboff rdp->d_cmdsts = LE_32(CMDSTS_OWN);
1537f8919bdaSduboff }
1538f8919bdaSduboff
1539f8919bdaSduboff /*
1540f8919bdaSduboff * Device depend interrupt handler
1541f8919bdaSduboff */
1542f8919bdaSduboff static uint_t
sfe_interrupt(struct gem_dev * dp)1543f8919bdaSduboff sfe_interrupt(struct gem_dev *dp)
1544f8919bdaSduboff {
154523d366e3Sduboff uint_t rx_ring_size = dp->gc.gc_rx_ring_size;
1546f8919bdaSduboff uint32_t isr;
154723d366e3Sduboff uint32_t isr_bogus;
1548f8919bdaSduboff uint_t flags = 0;
1549f8919bdaSduboff boolean_t need_to_reset = B_FALSE;
1550f8919bdaSduboff struct sfe_dev *lp = dp->private;
1551f8919bdaSduboff
1552f8919bdaSduboff /* read reason and clear interrupt */
1553f8919bdaSduboff isr = INL(dp, ISR);
1554f8919bdaSduboff
155523d366e3Sduboff isr_bogus = lp->isr_pended;
155623d366e3Sduboff lp->isr_pended = 0;
155723d366e3Sduboff
155823d366e3Sduboff if (((isr | isr_bogus) & lp->our_intr_bits) == 0) {
1559f8919bdaSduboff /* we are not the interrupt source */
1560f8919bdaSduboff return (DDI_INTR_UNCLAIMED);
1561f8919bdaSduboff }
1562f8919bdaSduboff
1563f8919bdaSduboff DPRINTF(3, (CE_CONT,
1564f8919bdaSduboff CONS "%s: time:%ld %s:called: isr:0x%b rx_active_head: %d",
1565f8919bdaSduboff dp->name, ddi_get_lbolt(), __func__,
1566f8919bdaSduboff isr, INTR_BITS, dp->rx_active_head));
1567f8919bdaSduboff
1568f8919bdaSduboff if (!dp->mac_active) {
1569f8919bdaSduboff /* the device is going to stop */
1570f8919bdaSduboff lp->our_intr_bits = 0;
1571f8919bdaSduboff return (DDI_INTR_CLAIMED);
1572f8919bdaSduboff }
1573f8919bdaSduboff
1574f8919bdaSduboff isr &= lp->our_intr_bits;
1575f8919bdaSduboff
1576f8919bdaSduboff if (isr & (ISR_RXSOVR | ISR_RXORN | ISR_RXIDLE | ISR_RXERR |
1577f8919bdaSduboff ISR_RXDESC | ISR_RXOK)) {
1578f8919bdaSduboff (void) gem_receive(dp);
1579f8919bdaSduboff
1580f8919bdaSduboff if (isr & (ISR_RXSOVR | ISR_RXORN)) {
1581f8919bdaSduboff DPRINTF(0, (CE_CONT,
1582f8919bdaSduboff CONS "%s: rx fifo overrun: isr %b",
1583f8919bdaSduboff dp->name, isr, INTR_BITS));
1584f8919bdaSduboff /* no need restart rx */
1585f8919bdaSduboff dp->stats.overflow++;
1586f8919bdaSduboff }
1587f8919bdaSduboff
1588f8919bdaSduboff if (isr & ISR_RXIDLE) {
1589f8919bdaSduboff DPRINTF(0, (CE_CONT,
1590f8919bdaSduboff CONS "%s: rx buffer ran out: isr %b",
1591f8919bdaSduboff dp->name, isr, INTR_BITS));
1592f8919bdaSduboff
1593f8919bdaSduboff dp->stats.norcvbuf++;
1594f8919bdaSduboff
1595f8919bdaSduboff /*
1596f8919bdaSduboff * Make RXDP points the head of receive
1597f8919bdaSduboff * buffer list.
1598f8919bdaSduboff */
1599f8919bdaSduboff OUTL(dp, RXDP, dp->rx_ring_dma +
1600f8919bdaSduboff SFE_DESC_SIZE *
160123d366e3Sduboff SLOT(dp->rx_active_head, rx_ring_size));
1602f8919bdaSduboff
1603f8919bdaSduboff /* Restart the receive engine */
1604f8919bdaSduboff OUTL(dp, CR, lp->cr | CR_RXE);
1605f8919bdaSduboff }
1606f8919bdaSduboff }
1607f8919bdaSduboff
1608f8919bdaSduboff if (isr & (ISR_TXURN | ISR_TXERR | ISR_TXDESC |
1609f8919bdaSduboff ISR_TXIDLE | ISR_TXOK)) {
1610f8919bdaSduboff /* need to reclaim tx buffers */
1611f8919bdaSduboff if (gem_tx_done(dp)) {
1612f8919bdaSduboff flags |= INTR_RESTART_TX;
1613f8919bdaSduboff }
1614f8919bdaSduboff /*
1615f8919bdaSduboff * XXX - tx error statistics will be counted in
1616f8919bdaSduboff * sfe_tx_desc_stat() and no need to restart tx on errors.
1617f8919bdaSduboff */
1618f8919bdaSduboff }
1619f8919bdaSduboff
1620f8919bdaSduboff if (isr & (ISR_DPERR | ISR_SSERR | ISR_RMABT | ISR_RTABT)) {
1621f8919bdaSduboff cmn_err(CE_WARN, "%s: ERROR interrupt: isr %b.",
1622f8919bdaSduboff dp->name, isr, INTR_BITS);
1623f8919bdaSduboff need_to_reset = B_TRUE;
1624f8919bdaSduboff }
1625f8919bdaSduboff reset:
1626f8919bdaSduboff if (need_to_reset) {
1627f8919bdaSduboff (void) gem_restart_nic(dp, GEM_RESTART_KEEP_BUF);
1628f8919bdaSduboff flags |= INTR_RESTART_TX;
1629f8919bdaSduboff }
1630f8919bdaSduboff
1631f8919bdaSduboff DPRINTF(5, (CE_CONT, CONS "%s: %s: return: isr: %b",
1632f8919bdaSduboff dp->name, __func__, isr, INTR_BITS));
1633f8919bdaSduboff
1634f8919bdaSduboff return (DDI_INTR_CLAIMED | flags);
1635f8919bdaSduboff }
1636f8919bdaSduboff
1637f8919bdaSduboff /* ======================================================== */
1638f8919bdaSduboff /*
1639f8919bdaSduboff * HW depend MII routine
1640f8919bdaSduboff */
1641f8919bdaSduboff /* ======================================================== */
1642f8919bdaSduboff
1643f8919bdaSduboff /*
1644f8919bdaSduboff * MII routines for NS DP83815
1645f8919bdaSduboff */
1646f8919bdaSduboff static void
sfe_mii_sync_dp83815(struct gem_dev * dp)1647f8919bdaSduboff sfe_mii_sync_dp83815(struct gem_dev *dp)
1648f8919bdaSduboff {
1649f8919bdaSduboff /* do nothing */
1650f8919bdaSduboff }
1651f8919bdaSduboff
1652f8919bdaSduboff static uint16_t
sfe_mii_read_dp83815(struct gem_dev * dp,uint_t offset)1653f8919bdaSduboff sfe_mii_read_dp83815(struct gem_dev *dp, uint_t offset)
1654f8919bdaSduboff {
1655f8919bdaSduboff DPRINTF(4, (CE_CONT, CONS"%s: %s: offset 0x%x",
1656f8919bdaSduboff dp->name, __func__, offset));
1657f8919bdaSduboff return ((uint16_t)INL(dp, MII_REGS_BASE + offset*4));
1658f8919bdaSduboff }
1659f8919bdaSduboff
1660f8919bdaSduboff static void
sfe_mii_write_dp83815(struct gem_dev * dp,uint_t offset,uint16_t val)1661f8919bdaSduboff sfe_mii_write_dp83815(struct gem_dev *dp, uint_t offset, uint16_t val)
1662f8919bdaSduboff {
1663f8919bdaSduboff DPRINTF(4, (CE_CONT, CONS"%s: %s: offset 0x%x 0x%x",
1664f8919bdaSduboff dp->name, __func__, offset, val));
1665f8919bdaSduboff OUTL(dp, MII_REGS_BASE + offset*4, val);
1666f8919bdaSduboff }
1667f8919bdaSduboff
1668f8919bdaSduboff static int
sfe_mii_config_dp83815(struct gem_dev * dp)1669f8919bdaSduboff sfe_mii_config_dp83815(struct gem_dev *dp)
1670f8919bdaSduboff {
1671f8919bdaSduboff uint32_t srr;
1672f8919bdaSduboff
1673f8919bdaSduboff srr = INL(dp, SRR) & SRR_REV;
1674f8919bdaSduboff
1675f8919bdaSduboff DPRINTF(0, (CE_CONT, CONS "%s: srr:0x%04x %04x %04x %04x %04x %04x",
1676f8919bdaSduboff dp->name, srr,
1677f8919bdaSduboff INW(dp, 0x00cc), /* PGSEL */
1678f8919bdaSduboff INW(dp, 0x00e4), /* PMDCSR */
1679f8919bdaSduboff INW(dp, 0x00fc), /* TSTDAT */
1680f8919bdaSduboff INW(dp, 0x00f4), /* DSPCFG */
1681f8919bdaSduboff INW(dp, 0x00f8))); /* SDCFG */
1682f8919bdaSduboff
168323d366e3Sduboff if (srr == SRR_REV_DP83815CVNG) {
1684f8919bdaSduboff /*
1685f8919bdaSduboff * NS datasheet says that DP83815CVNG needs following
1686f8919bdaSduboff * registers to be patched for optimizing its performance.
168723d366e3Sduboff * A report said that CRC errors on RX disappeared
1688f8919bdaSduboff * with the patch.
1689f8919bdaSduboff */
1690f8919bdaSduboff OUTW(dp, 0x00cc, 0x0001); /* PGSEL */
1691f8919bdaSduboff OUTW(dp, 0x00e4, 0x189c); /* PMDCSR */
1692f8919bdaSduboff OUTW(dp, 0x00fc, 0x0000); /* TSTDAT */
1693f8919bdaSduboff OUTW(dp, 0x00f4, 0x5040); /* DSPCFG */
1694f8919bdaSduboff OUTW(dp, 0x00f8, 0x008c); /* SDCFG */
169523d366e3Sduboff OUTW(dp, 0x00cc, 0x0000); /* PGSEL */
1696f8919bdaSduboff
1697f8919bdaSduboff DPRINTF(0, (CE_CONT,
1698f8919bdaSduboff CONS "%s: PHY patched %04x %04x %04x %04x %04x",
1699f8919bdaSduboff dp->name,
1700f8919bdaSduboff INW(dp, 0x00cc), /* PGSEL */
1701f8919bdaSduboff INW(dp, 0x00e4), /* PMDCSR */
1702f8919bdaSduboff INW(dp, 0x00fc), /* TSTDAT */
1703f8919bdaSduboff INW(dp, 0x00f4), /* DSPCFG */
1704f8919bdaSduboff INW(dp, 0x00f8))); /* SDCFG */
170523d366e3Sduboff } else if (((srr ^ SRR_REV_DP83815DVNG) & 0xff00) == 0 ||
170623d366e3Sduboff ((srr ^ SRR_REV_DP83816AVNG) & 0xff00) == 0) {
170723d366e3Sduboff /*
170823d366e3Sduboff * Additional packets for later chipset
170923d366e3Sduboff */
171023d366e3Sduboff OUTW(dp, 0x00cc, 0x0001); /* PGSEL */
171123d366e3Sduboff OUTW(dp, 0x00e4, 0x189c); /* PMDCSR */
171223d366e3Sduboff OUTW(dp, 0x00cc, 0x0000); /* PGSEL */
171323d366e3Sduboff
171423d366e3Sduboff DPRINTF(0, (CE_CONT,
171523d366e3Sduboff CONS "%s: PHY patched %04x %04x",
171623d366e3Sduboff dp->name,
171723d366e3Sduboff INW(dp, 0x00cc), /* PGSEL */
171823d366e3Sduboff INW(dp, 0x00e4))); /* PMDCSR */
1719f8919bdaSduboff }
1720f8919bdaSduboff
1721f8919bdaSduboff return (gem_mii_config_default(dp));
1722f8919bdaSduboff }
1723f8919bdaSduboff
172423d366e3Sduboff static int
sfe_mii_probe_dp83815(struct gem_dev * dp)172523d366e3Sduboff sfe_mii_probe_dp83815(struct gem_dev *dp)
172623d366e3Sduboff {
172723d366e3Sduboff uint32_t val;
172823d366e3Sduboff
172923d366e3Sduboff /* try external phy first */
173023d366e3Sduboff DPRINTF(0, (CE_CONT, CONS "%s: %s: trying external phy",
173123d366e3Sduboff dp->name, __func__));
173223d366e3Sduboff dp->mii_phy_addr = 0;
173323d366e3Sduboff dp->gc.gc_mii_sync = &sfe_mii_sync_sis900;
173423d366e3Sduboff dp->gc.gc_mii_read = &sfe_mii_read_sis900;
173523d366e3Sduboff dp->gc.gc_mii_write = &sfe_mii_write_sis900;
173623d366e3Sduboff
173723d366e3Sduboff val = INL(dp, CFG) & (CFG_ANEG_SEL | CFG_PHY_CFG);
173823d366e3Sduboff OUTL(dp, CFG, val | CFG_EXT_PHY | CFG_PHY_DIS);
173923d366e3Sduboff
174023d366e3Sduboff if (gem_mii_probe_default(dp) == GEM_SUCCESS) {
174123d366e3Sduboff return (GEM_SUCCESS);
174223d366e3Sduboff }
174323d366e3Sduboff
174423d366e3Sduboff /* switch to internal phy */
174523d366e3Sduboff DPRINTF(0, (CE_CONT, CONS "%s: %s: switching to internal phy",
174623d366e3Sduboff dp->name, __func__));
174723d366e3Sduboff dp->mii_phy_addr = -1;
174823d366e3Sduboff dp->gc.gc_mii_sync = &sfe_mii_sync_dp83815;
174923d366e3Sduboff dp->gc.gc_mii_read = &sfe_mii_read_dp83815;
175023d366e3Sduboff dp->gc.gc_mii_write = &sfe_mii_write_dp83815;
175123d366e3Sduboff
175223d366e3Sduboff val = INL(dp, CFG) & (CFG_ANEG_SEL | CFG_PHY_CFG);
175323d366e3Sduboff OUTL(dp, CFG, val | CFG_PAUSE_ADV | CFG_PHY_RST);
175423d366e3Sduboff drv_usecwait(100); /* keep to assert RST bit for a while */
175523d366e3Sduboff OUTL(dp, CFG, val | CFG_PAUSE_ADV);
175623d366e3Sduboff
175723d366e3Sduboff /* wait for PHY reset */
175823d366e3Sduboff delay(drv_usectohz(10000));
175923d366e3Sduboff
176023d366e3Sduboff return (gem_mii_probe_default(dp));
176123d366e3Sduboff }
176223d366e3Sduboff
176323d366e3Sduboff static int
sfe_mii_init_dp83815(struct gem_dev * dp)176423d366e3Sduboff sfe_mii_init_dp83815(struct gem_dev *dp)
176523d366e3Sduboff {
176623d366e3Sduboff uint32_t val;
176723d366e3Sduboff
176823d366e3Sduboff val = INL(dp, CFG) & (CFG_ANEG_SEL | CFG_PHY_CFG);
176923d366e3Sduboff
177023d366e3Sduboff if (dp->mii_phy_addr == -1) {
177123d366e3Sduboff /* select internal phy */
177223d366e3Sduboff OUTL(dp, CFG, val | CFG_PAUSE_ADV);
177323d366e3Sduboff } else {
177423d366e3Sduboff /* select external phy */
177523d366e3Sduboff OUTL(dp, CFG, val | CFG_EXT_PHY | CFG_PHY_DIS);
177623d366e3Sduboff }
177723d366e3Sduboff
177823d366e3Sduboff return (GEM_SUCCESS);
177923d366e3Sduboff }
1780f8919bdaSduboff
1781f8919bdaSduboff /*
1782f8919bdaSduboff * MII routines for SiS900
1783f8919bdaSduboff */
178423d366e3Sduboff #define MDIO_DELAY(dp) {(void) INL(dp, MEAR); (void) INL(dp, MEAR); }
1785f8919bdaSduboff static void
sfe_mii_sync_sis900(struct gem_dev * dp)1786f8919bdaSduboff sfe_mii_sync_sis900(struct gem_dev *dp)
1787f8919bdaSduboff {
1788f8919bdaSduboff int i;
1789f8919bdaSduboff
179023d366e3Sduboff /* send 32 ONE's to make MII line idle */
1791f8919bdaSduboff for (i = 0; i < 32; i++) {
1792f8919bdaSduboff OUTL(dp, MEAR, MEAR_MDDIR | MEAR_MDIO);
1793f8919bdaSduboff MDIO_DELAY(dp);
1794f8919bdaSduboff OUTL(dp, MEAR, MEAR_MDDIR | MEAR_MDIO | MEAR_MDC);
1795f8919bdaSduboff MDIO_DELAY(dp);
1796f8919bdaSduboff }
1797f8919bdaSduboff }
1798f8919bdaSduboff
1799f8919bdaSduboff static int
sfe_mii_config_sis900(struct gem_dev * dp)1800f8919bdaSduboff sfe_mii_config_sis900(struct gem_dev *dp)
1801f8919bdaSduboff {
1802f8919bdaSduboff struct sfe_dev *lp = dp->private;
1803f8919bdaSduboff
1804f8919bdaSduboff /* Do chip depend setup */
1805f8919bdaSduboff if ((dp->mii_phy_id & PHY_MASK) == PHY_ICS1893) {
1806f8919bdaSduboff /* workaround for ICS1893 PHY */
1807f8919bdaSduboff gem_mii_write(dp, 0x0018, 0xD200);
1808f8919bdaSduboff }
1809f8919bdaSduboff
1810f8919bdaSduboff if (lp->revid == SIS630E_900_REV) {
1811f8919bdaSduboff /*
1812f8919bdaSduboff * SiS 630E has bugs on default values
1813f8919bdaSduboff * of PHY registers
1814f8919bdaSduboff */
1815f8919bdaSduboff gem_mii_write(dp, MII_AN_ADVERT, 0x05e1);
1816f8919bdaSduboff gem_mii_write(dp, MII_CONFIG1, 0x0022);
1817f8919bdaSduboff gem_mii_write(dp, MII_CONFIG2, 0xff00);
1818f8919bdaSduboff gem_mii_write(dp, MII_MASK, 0xffc0);
1819f8919bdaSduboff }
1820f8919bdaSduboff sfe_set_eq_sis630(dp);
1821f8919bdaSduboff
1822f8919bdaSduboff return (gem_mii_config_default(dp));
1823f8919bdaSduboff }
1824f8919bdaSduboff
1825f8919bdaSduboff static uint16_t
sfe_mii_read_sis900(struct gem_dev * dp,uint_t reg)1826f8919bdaSduboff sfe_mii_read_sis900(struct gem_dev *dp, uint_t reg)
1827f8919bdaSduboff {
1828f8919bdaSduboff uint32_t cmd;
1829f8919bdaSduboff uint16_t ret;
1830f8919bdaSduboff int i;
1831f8919bdaSduboff uint32_t data;
1832f8919bdaSduboff
1833f8919bdaSduboff cmd = MII_READ_CMD(dp->mii_phy_addr, reg);
1834f8919bdaSduboff
1835f8919bdaSduboff for (i = 31; i >= 18; i--) {
1836f8919bdaSduboff data = ((cmd >> i) & 1) << MEAR_MDIO_SHIFT;
1837f8919bdaSduboff OUTL(dp, MEAR, data | MEAR_MDDIR);
1838f8919bdaSduboff MDIO_DELAY(dp);
1839f8919bdaSduboff OUTL(dp, MEAR, data | MEAR_MDDIR | MEAR_MDC);
1840f8919bdaSduboff MDIO_DELAY(dp);
1841f8919bdaSduboff }
1842f8919bdaSduboff
1843f8919bdaSduboff /* turn around cycle */
184423d366e3Sduboff OUTL(dp, MEAR, 0);
1845f8919bdaSduboff MDIO_DELAY(dp);
1846f8919bdaSduboff
1847f8919bdaSduboff /* get response from PHY */
1848f8919bdaSduboff OUTL(dp, MEAR, MEAR_MDC);
1849f8919bdaSduboff MDIO_DELAY(dp);
1850f8919bdaSduboff
185123d366e3Sduboff OUTL(dp, MEAR, 0);
1852f8919bdaSduboff #if DEBUG_LEBEL > 0
185323d366e3Sduboff (void) INL(dp, MEAR); /* delay */
1854f8919bdaSduboff if (INL(dp, MEAR) & MEAR_MDIO) {
1855f8919bdaSduboff cmn_err(CE_WARN, "%s: PHY@%d not responded",
1856f8919bdaSduboff dp->name, dp->mii_phy_addr);
1857f8919bdaSduboff }
185823d366e3Sduboff #else
185923d366e3Sduboff MDIO_DELAY(dp);
1860f8919bdaSduboff #endif
1861f8919bdaSduboff /* terminate response cycle */
1862f8919bdaSduboff OUTL(dp, MEAR, MEAR_MDC);
186323d366e3Sduboff MDIO_DELAY(dp);
1864f8919bdaSduboff
1865f8919bdaSduboff ret = 0; /* to avoid lint errors */
1866f8919bdaSduboff for (i = 16; i > 0; i--) {
1867f8919bdaSduboff OUTL(dp, MEAR, 0);
186823d366e3Sduboff (void) INL(dp, MEAR); /* delay */
1869f8919bdaSduboff ret = (ret << 1) | ((INL(dp, MEAR) >> MEAR_MDIO_SHIFT) & 1);
1870f8919bdaSduboff OUTL(dp, MEAR, MEAR_MDC);
1871f8919bdaSduboff MDIO_DELAY(dp);
1872f8919bdaSduboff }
1873f8919bdaSduboff
187423d366e3Sduboff /* send two idle(Z) bits to terminate the read cycle */
187523d366e3Sduboff for (i = 0; i < 2; i++) {
1876f8919bdaSduboff OUTL(dp, MEAR, 0);
1877f8919bdaSduboff MDIO_DELAY(dp);
1878f8919bdaSduboff OUTL(dp, MEAR, MEAR_MDC);
1879f8919bdaSduboff MDIO_DELAY(dp);
188023d366e3Sduboff }
1881f8919bdaSduboff
1882f8919bdaSduboff return (ret);
1883f8919bdaSduboff }
1884f8919bdaSduboff
1885f8919bdaSduboff static void
sfe_mii_write_sis900(struct gem_dev * dp,uint_t reg,uint16_t val)1886f8919bdaSduboff sfe_mii_write_sis900(struct gem_dev *dp, uint_t reg, uint16_t val)
1887f8919bdaSduboff {
1888f8919bdaSduboff uint32_t cmd;
1889f8919bdaSduboff int i;
1890f8919bdaSduboff uint32_t data;
1891f8919bdaSduboff
1892f8919bdaSduboff cmd = MII_WRITE_CMD(dp->mii_phy_addr, reg, val);
1893f8919bdaSduboff
1894f8919bdaSduboff for (i = 31; i >= 0; i--) {
1895f8919bdaSduboff data = ((cmd >> i) & 1) << MEAR_MDIO_SHIFT;
1896f8919bdaSduboff OUTL(dp, MEAR, data | MEAR_MDDIR);
1897f8919bdaSduboff MDIO_DELAY(dp);
1898f8919bdaSduboff OUTL(dp, MEAR, data | MEAR_MDDIR | MEAR_MDC);
1899f8919bdaSduboff MDIO_DELAY(dp);
1900f8919bdaSduboff }
1901f8919bdaSduboff
190223d366e3Sduboff /* send two idle(Z) bits to terminate the write cycle. */
1903f8919bdaSduboff for (i = 0; i < 2; i++) {
190423d366e3Sduboff OUTL(dp, MEAR, 0);
1905f8919bdaSduboff MDIO_DELAY(dp);
1906f8919bdaSduboff OUTL(dp, MEAR, MEAR_MDC);
1907f8919bdaSduboff MDIO_DELAY(dp);
1908f8919bdaSduboff }
190923d366e3Sduboff }
1910f8919bdaSduboff #undef MDIO_DELAY
1911f8919bdaSduboff
1912f8919bdaSduboff static void
sfe_set_eq_sis630(struct gem_dev * dp)1913f8919bdaSduboff sfe_set_eq_sis630(struct gem_dev *dp)
1914f8919bdaSduboff {
1915f8919bdaSduboff uint16_t reg14h;
1916f8919bdaSduboff uint16_t eq_value;
1917f8919bdaSduboff uint16_t max_value;
1918f8919bdaSduboff uint16_t min_value;
1919f8919bdaSduboff int i;
1920f8919bdaSduboff uint8_t rev;
1921f8919bdaSduboff struct sfe_dev *lp = dp->private;
1922f8919bdaSduboff
1923f8919bdaSduboff rev = lp->revid;
1924f8919bdaSduboff
1925f8919bdaSduboff if (!(rev == SIS630E_900_REV || rev == SIS630EA1_900_REV ||
1926f8919bdaSduboff rev == SIS630A_900_REV || rev == SIS630ET_900_REV)) {
1927f8919bdaSduboff /* it doesn't have a internal PHY */
1928f8919bdaSduboff return;
1929f8919bdaSduboff }
1930f8919bdaSduboff
1931f8919bdaSduboff if (dp->mii_state == MII_STATE_LINKUP) {
1932f8919bdaSduboff reg14h = gem_mii_read(dp, MII_RESV);
1933f8919bdaSduboff gem_mii_write(dp, MII_RESV, (0x2200 | reg14h) & 0xBFFF);
1934f8919bdaSduboff
1935f8919bdaSduboff eq_value = (0x00f8 & gem_mii_read(dp, MII_RESV)) >> 3;
1936f8919bdaSduboff max_value = min_value = eq_value;
1937f8919bdaSduboff for (i = 1; i < 10; i++) {
1938f8919bdaSduboff eq_value = (0x00f8 & gem_mii_read(dp, MII_RESV)) >> 3;
1939f8919bdaSduboff max_value = max(eq_value, max_value);
1940f8919bdaSduboff min_value = min(eq_value, min_value);
1941f8919bdaSduboff }
1942f8919bdaSduboff
1943f8919bdaSduboff /* for 630E, rule to determine the equalizer value */
1944f8919bdaSduboff if (rev == SIS630E_900_REV || rev == SIS630EA1_900_REV ||
1945f8919bdaSduboff rev == SIS630ET_900_REV) {
1946f8919bdaSduboff if (max_value < 5) {
1947f8919bdaSduboff eq_value = max_value;
1948f8919bdaSduboff } else if (5 <= max_value && max_value < 15) {
1949f8919bdaSduboff eq_value =
1950f8919bdaSduboff max(max_value + 1,
1951f8919bdaSduboff min_value + 2);
1952f8919bdaSduboff } else if (15 <= max_value) {
1953f8919bdaSduboff eq_value =
1954f8919bdaSduboff max(max_value + 5,
1955f8919bdaSduboff min_value + 6);
1956f8919bdaSduboff }
1957f8919bdaSduboff }
1958f8919bdaSduboff /* for 630B0&B1, rule to determine the equalizer value */
1959f8919bdaSduboff else
1960f8919bdaSduboff if (rev == SIS630A_900_REV &&
1961f8919bdaSduboff (lp->bridge_revid == SIS630B0 ||
1962f8919bdaSduboff lp->bridge_revid == SIS630B1)) {
1963f8919bdaSduboff
1964f8919bdaSduboff if (max_value == 0) {
1965f8919bdaSduboff eq_value = 3;
1966f8919bdaSduboff } else {
1967f8919bdaSduboff eq_value = (max_value + min_value + 1)/2;
1968f8919bdaSduboff }
1969f8919bdaSduboff }
1970f8919bdaSduboff /* write equalizer value and setting */
1971f8919bdaSduboff reg14h = gem_mii_read(dp, MII_RESV) & ~0x02f8;
1972f8919bdaSduboff reg14h |= 0x6000 | (eq_value << 3);
1973f8919bdaSduboff gem_mii_write(dp, MII_RESV, reg14h);
1974f8919bdaSduboff } else {
1975f8919bdaSduboff reg14h = (gem_mii_read(dp, MII_RESV) & ~0x4000) | 0x2000;
1976f8919bdaSduboff if (rev == SIS630A_900_REV &&
1977f8919bdaSduboff (lp->bridge_revid == SIS630B0 ||
1978f8919bdaSduboff lp->bridge_revid == SIS630B1)) {
1979f8919bdaSduboff
1980f8919bdaSduboff reg14h |= 0x0200;
1981f8919bdaSduboff }
1982f8919bdaSduboff gem_mii_write(dp, MII_RESV, reg14h);
1983f8919bdaSduboff }
1984f8919bdaSduboff }
1985f8919bdaSduboff
1986f8919bdaSduboff /* ======================================================== */
1987f8919bdaSduboff /*
1988f8919bdaSduboff * OS depend (device driver) routine
1989f8919bdaSduboff */
1990f8919bdaSduboff /* ======================================================== */
1991f8919bdaSduboff static void
sfe_chipinfo_init_sis900(struct gem_dev * dp)1992f8919bdaSduboff sfe_chipinfo_init_sis900(struct gem_dev *dp)
1993f8919bdaSduboff {
1994f8919bdaSduboff int rev;
1995f8919bdaSduboff struct sfe_dev *lp = (struct sfe_dev *)dp->private;
1996f8919bdaSduboff
1997f8919bdaSduboff rev = lp->revid;
1998f8919bdaSduboff
1999f8919bdaSduboff if (rev == SIS630E_900_REV /* 0x81 */) {
2000f8919bdaSduboff /* sis630E */
2001f8919bdaSduboff lp->get_mac_addr = &sfe_get_mac_addr_sis630e;
2002f8919bdaSduboff } else if (rev > 0x81 && rev <= 0x90) {
2003f8919bdaSduboff /* 630S, 630EA1, 630ET, 635A */
2004f8919bdaSduboff lp->get_mac_addr = &sfe_get_mac_addr_sis635;
2005f8919bdaSduboff } else if (rev == SIS962_900_REV /* 0x91 */) {
2006f8919bdaSduboff /* sis962 or later */
2007f8919bdaSduboff lp->get_mac_addr = &sfe_get_mac_addr_sis962;
2008f8919bdaSduboff } else {
2009f8919bdaSduboff /* sis900 */
2010f8919bdaSduboff lp->get_mac_addr = &sfe_get_mac_addr_sis900;
2011f8919bdaSduboff }
2012f8919bdaSduboff
2013f8919bdaSduboff lp->bridge_revid = 0;
2014f8919bdaSduboff
2015f8919bdaSduboff if (rev == SIS630E_900_REV || rev == SIS630EA1_900_REV ||
2016f8919bdaSduboff rev == SIS630A_900_REV || rev == SIS630ET_900_REV) {
2017f8919bdaSduboff /*
2018f8919bdaSduboff * read host bridge revision
2019f8919bdaSduboff */
2020f8919bdaSduboff dev_info_t *bridge;
2021f8919bdaSduboff ddi_acc_handle_t bridge_handle;
2022f8919bdaSduboff
2023f8919bdaSduboff if ((bridge = sfe_search_pci_dev(0x1039, 0x630)) == NULL) {
2024f8919bdaSduboff cmn_err(CE_WARN,
2025f8919bdaSduboff "%s: cannot find host bridge (pci1039,630)",
2026f8919bdaSduboff dp->name);
2027f8919bdaSduboff return;
2028f8919bdaSduboff }
2029f8919bdaSduboff
2030f8919bdaSduboff if (pci_config_setup(bridge, &bridge_handle) != DDI_SUCCESS) {
2031f8919bdaSduboff cmn_err(CE_WARN, "%s: pci_config_setup failed",
2032f8919bdaSduboff dp->name);
2033f8919bdaSduboff return;
2034f8919bdaSduboff }
2035f8919bdaSduboff
2036f8919bdaSduboff lp->bridge_revid =
2037f8919bdaSduboff pci_config_get8(bridge_handle, PCI_CONF_REVID);
2038f8919bdaSduboff pci_config_teardown(&bridge_handle);
2039f8919bdaSduboff }
2040f8919bdaSduboff }
2041f8919bdaSduboff
2042f8919bdaSduboff static int
sfe_attach_chip(struct gem_dev * dp)2043f8919bdaSduboff sfe_attach_chip(struct gem_dev *dp)
2044f8919bdaSduboff {
2045f8919bdaSduboff struct sfe_dev *lp = (struct sfe_dev *)dp->private;
2046f8919bdaSduboff
2047f8919bdaSduboff DPRINTF(4, (CE_CONT, CONS "!%s: %s called", dp->name, __func__));
2048f8919bdaSduboff
2049f8919bdaSduboff /* setup chip-depend get_mac_address function */
2050f8919bdaSduboff if (lp->chip->chip_type == CHIPTYPE_SIS900) {
2051f8919bdaSduboff sfe_chipinfo_init_sis900(dp);
2052f8919bdaSduboff } else {
2053f8919bdaSduboff lp->get_mac_addr = &sfe_get_mac_addr_dp83815;
2054f8919bdaSduboff }
2055f8919bdaSduboff
2056f8919bdaSduboff /* read MAC address */
2057f8919bdaSduboff if (!(lp->get_mac_addr)(dp)) {
2058f8919bdaSduboff cmn_err(CE_WARN,
2059f8919bdaSduboff "!%s: %s: failed to get factory mac address"
2060f8919bdaSduboff " please specify a mac address in sfe.conf",
2061f8919bdaSduboff dp->name, __func__);
2062f8919bdaSduboff return (GEM_FAILURE);
2063f8919bdaSduboff }
2064f8919bdaSduboff
2065f8919bdaSduboff if (lp->chip->chip_type == CHIPTYPE_DP83815) {
2066f8919bdaSduboff dp->mii_phy_addr = -1; /* no need to scan PHY */
2067f8919bdaSduboff dp->misc_flag |= GEM_VLAN_SOFT;
2068f8919bdaSduboff dp->txthr += 4; /* VTAG_SIZE */
2069f8919bdaSduboff }
2070f8919bdaSduboff dp->txthr = min(dp->txthr, TXFIFOSIZE - 2);
2071f8919bdaSduboff
2072f8919bdaSduboff return (GEM_SUCCESS);
2073f8919bdaSduboff }
2074f8919bdaSduboff
2075f8919bdaSduboff static int
sfeattach(dev_info_t * dip,ddi_attach_cmd_t cmd)2076f8919bdaSduboff sfeattach(dev_info_t *dip, ddi_attach_cmd_t cmd)
2077f8919bdaSduboff {
2078f8919bdaSduboff int unit;
2079f8919bdaSduboff const char *drv_name;
2080f8919bdaSduboff int i;
2081f8919bdaSduboff ddi_acc_handle_t conf_handle;
2082f8919bdaSduboff uint16_t vid;
2083f8919bdaSduboff uint16_t did;
2084f8919bdaSduboff uint8_t rev;
2085f8919bdaSduboff #ifdef DEBUG_LEVEL
2086f8919bdaSduboff uint32_t iline;
2087f8919bdaSduboff uint8_t latim;
2088f8919bdaSduboff #endif
2089f8919bdaSduboff struct chip_info *p;
2090f8919bdaSduboff struct gem_dev *dp;
2091f8919bdaSduboff struct sfe_dev *lp;
2092f8919bdaSduboff caddr_t base;
2093f8919bdaSduboff ddi_acc_handle_t regs_ha;
2094f8919bdaSduboff struct gem_conf *gcp;
2095f8919bdaSduboff
2096f8919bdaSduboff unit = ddi_get_instance(dip);
2097f8919bdaSduboff drv_name = ddi_driver_name(dip);
2098f8919bdaSduboff
2099f8919bdaSduboff DPRINTF(3, (CE_CONT, CONS "%s%d: sfeattach: called", drv_name, unit));
2100f8919bdaSduboff
2101f8919bdaSduboff /*
2102f8919bdaSduboff * Common codes after power-up
2103f8919bdaSduboff */
2104f8919bdaSduboff if (pci_config_setup(dip, &conf_handle) != DDI_SUCCESS) {
2105f8919bdaSduboff cmn_err(CE_WARN, "%s%d: ddi_regs_map_setup failed",
2106f8919bdaSduboff drv_name, unit);
2107f8919bdaSduboff goto err;
2108f8919bdaSduboff }
2109f8919bdaSduboff
2110f8919bdaSduboff vid = pci_config_get16(conf_handle, PCI_CONF_VENID);
2111f8919bdaSduboff did = pci_config_get16(conf_handle, PCI_CONF_DEVID);
2112f8919bdaSduboff rev = pci_config_get16(conf_handle, PCI_CONF_REVID);
2113f8919bdaSduboff #ifdef DEBUG_LEVEL
211423d366e3Sduboff iline = pci_config_get32(conf_handle, PCI_CONF_ILINE);
2115f8919bdaSduboff latim = pci_config_get8(conf_handle, PCI_CONF_LATENCY_TIMER);
2116f8919bdaSduboff #endif
2117f8919bdaSduboff #ifdef DEBUG_BUILT_IN_SIS900
2118f8919bdaSduboff rev = SIS630E_900_REV;
2119f8919bdaSduboff #endif
2120f8919bdaSduboff for (i = 0, p = sfe_chiptbl; i < CHIPTABLESIZE; i++, p++) {
2121f8919bdaSduboff if (p->venid == vid && p->devid == did) {
2122f8919bdaSduboff /* found */
2123f8919bdaSduboff goto chip_found;
2124f8919bdaSduboff }
2125f8919bdaSduboff }
2126f8919bdaSduboff
2127f8919bdaSduboff /* Not found */
2128f8919bdaSduboff cmn_err(CE_WARN,
2129f8919bdaSduboff "%s%d: sfe_attach: wrong PCI venid/devid (0x%x, 0x%x)",
2130f8919bdaSduboff drv_name, unit, vid, did);
2131f8919bdaSduboff pci_config_teardown(&conf_handle);
2132f8919bdaSduboff goto err;
2133f8919bdaSduboff
2134f8919bdaSduboff chip_found:
2135f8919bdaSduboff pci_config_put16(conf_handle, PCI_CONF_COMM,
2136f8919bdaSduboff PCI_COMM_IO | PCI_COMM_MAE | PCI_COMM_ME |
2137f8919bdaSduboff pci_config_get16(conf_handle, PCI_CONF_COMM));
2138f8919bdaSduboff
2139f8919bdaSduboff /* ensure D0 mode */
2140f8919bdaSduboff (void) gem_pci_set_power_state(dip, conf_handle, PCI_PMCSR_D0);
2141f8919bdaSduboff
2142f8919bdaSduboff pci_config_teardown(&conf_handle);
2143f8919bdaSduboff
2144f8919bdaSduboff switch (cmd) {
2145f8919bdaSduboff case DDI_RESUME:
2146f8919bdaSduboff return (gem_resume(dip));
2147f8919bdaSduboff
2148f8919bdaSduboff case DDI_ATTACH:
2149f8919bdaSduboff
2150f8919bdaSduboff DPRINTF(0, (CE_CONT,
2151f8919bdaSduboff CONS "%s%d: ilr 0x%08x, latency_timer:0x%02x",
2152f8919bdaSduboff drv_name, unit, iline, latim));
2153f8919bdaSduboff
2154f8919bdaSduboff /*
2155f8919bdaSduboff * Map in the device registers.
2156f8919bdaSduboff */
2157f8919bdaSduboff if (gem_pci_regs_map_setup(dip,
2158f8919bdaSduboff (sfe_use_pcimemspace && p->chip_type == CHIPTYPE_DP83815)
2159f8919bdaSduboff ? PCI_ADDR_MEM32 : PCI_ADDR_IO, PCI_ADDR_MASK,
2160f8919bdaSduboff &sfe_dev_attr, &base, ®s_ha) != DDI_SUCCESS) {
2161f8919bdaSduboff cmn_err(CE_WARN,
2162f8919bdaSduboff "%s%d: ddi_regs_map_setup failed",
2163f8919bdaSduboff drv_name, unit);
2164f8919bdaSduboff goto err;
2165f8919bdaSduboff }
2166f8919bdaSduboff
2167f8919bdaSduboff /*
2168f8919bdaSduboff * construct gem configuration
2169f8919bdaSduboff */
2170f8919bdaSduboff gcp = kmem_zalloc(sizeof (*gcp), KM_SLEEP);
2171f8919bdaSduboff
2172f8919bdaSduboff /* name */
2173f8919bdaSduboff (void) sprintf(gcp->gc_name, "%s%d", drv_name, unit);
2174f8919bdaSduboff
2175f8919bdaSduboff /* consistency on tx and rx */
2176f8919bdaSduboff gcp->gc_tx_buf_align = sizeof (uint8_t) - 1;
2177f8919bdaSduboff gcp->gc_tx_max_frags = MAXTXFRAGS;
2178f8919bdaSduboff gcp->gc_tx_max_descs_per_pkt = gcp->gc_tx_max_frags;
2179f8919bdaSduboff gcp->gc_tx_desc_unit_shift = 4; /* 16 byte */
2180f8919bdaSduboff gcp->gc_tx_buf_size = TX_BUF_SIZE;
2181f8919bdaSduboff gcp->gc_tx_buf_limit = gcp->gc_tx_buf_size;
2182f8919bdaSduboff gcp->gc_tx_ring_size = TX_RING_SIZE;
2183f8919bdaSduboff gcp->gc_tx_ring_limit = gcp->gc_tx_ring_size;
2184f8919bdaSduboff gcp->gc_tx_auto_pad = B_TRUE;
2185f8919bdaSduboff gcp->gc_tx_copy_thresh = sfe_tx_copy_thresh;
2186f8919bdaSduboff gcp->gc_tx_desc_write_oo = B_TRUE;
2187f8919bdaSduboff
2188f8919bdaSduboff gcp->gc_rx_buf_align = sizeof (uint8_t) - 1;
2189f8919bdaSduboff gcp->gc_rx_max_frags = MAXRXFRAGS;
2190f8919bdaSduboff gcp->gc_rx_desc_unit_shift = 4;
2191f8919bdaSduboff gcp->gc_rx_ring_size = RX_RING_SIZE;
2192f8919bdaSduboff gcp->gc_rx_buf_max = RX_BUF_SIZE;
2193f8919bdaSduboff gcp->gc_rx_copy_thresh = sfe_rx_copy_thresh;
2194f8919bdaSduboff
2195f8919bdaSduboff /* map attributes */
2196f8919bdaSduboff gcp->gc_dev_attr = sfe_dev_attr;
2197f8919bdaSduboff gcp->gc_buf_attr = sfe_buf_attr;
2198f8919bdaSduboff gcp->gc_desc_attr = sfe_buf_attr;
2199f8919bdaSduboff
2200f8919bdaSduboff /* dma attributes */
2201f8919bdaSduboff gcp->gc_dma_attr_desc = sfe_dma_attr_desc;
2202f8919bdaSduboff
2203f8919bdaSduboff gcp->gc_dma_attr_txbuf = sfe_dma_attr_buf;
2204f8919bdaSduboff gcp->gc_dma_attr_txbuf.dma_attr_align = gcp->gc_tx_buf_align+1;
2205f8919bdaSduboff gcp->gc_dma_attr_txbuf.dma_attr_sgllen = gcp->gc_tx_max_frags;
2206f8919bdaSduboff
2207f8919bdaSduboff gcp->gc_dma_attr_rxbuf = sfe_dma_attr_buf;
2208f8919bdaSduboff gcp->gc_dma_attr_rxbuf.dma_attr_align = gcp->gc_rx_buf_align+1;
2209f8919bdaSduboff gcp->gc_dma_attr_rxbuf.dma_attr_sgllen = gcp->gc_rx_max_frags;
2210f8919bdaSduboff
2211f8919bdaSduboff /* time out parameters */
2212f8919bdaSduboff gcp->gc_tx_timeout = 3*ONESEC;
2213f8919bdaSduboff gcp->gc_tx_timeout_interval = ONESEC;
221423d366e3Sduboff if (p->chip_type == CHIPTYPE_DP83815) {
221523d366e3Sduboff /* workaround for tx hang */
221623d366e3Sduboff gcp->gc_tx_timeout_interval = ONESEC/20; /* 50mS */
221723d366e3Sduboff }
2218f8919bdaSduboff
2219f8919bdaSduboff /* MII timeout parameters */
2220f8919bdaSduboff gcp->gc_mii_link_watch_interval = ONESEC;
2221f8919bdaSduboff gcp->gc_mii_an_watch_interval = ONESEC/5;
2222f8919bdaSduboff gcp->gc_mii_reset_timeout = MII_RESET_TIMEOUT; /* 1 sec */
2223f8919bdaSduboff gcp->gc_mii_an_timeout = MII_AN_TIMEOUT; /* 5 sec */
2224f8919bdaSduboff gcp->gc_mii_an_wait = 0;
2225f8919bdaSduboff gcp->gc_mii_linkdown_timeout = MII_LINKDOWN_TIMEOUT;
2226f8919bdaSduboff
2227f8919bdaSduboff /* setting for general PHY */
2228f8919bdaSduboff gcp->gc_mii_an_delay = 0;
2229f8919bdaSduboff gcp->gc_mii_linkdown_action = MII_ACTION_RSA;
2230f8919bdaSduboff gcp->gc_mii_linkdown_timeout_action = MII_ACTION_RESET;
2231f8919bdaSduboff gcp->gc_mii_dont_reset = B_FALSE;
2232f8919bdaSduboff
2233f8919bdaSduboff
2234f8919bdaSduboff /* I/O methods */
2235f8919bdaSduboff
2236f8919bdaSduboff /* mac operation */
2237f8919bdaSduboff gcp->gc_attach_chip = &sfe_attach_chip;
2238f8919bdaSduboff if (p->chip_type == CHIPTYPE_DP83815) {
2239f8919bdaSduboff gcp->gc_reset_chip = &sfe_reset_chip_dp83815;
2240f8919bdaSduboff } else {
2241f8919bdaSduboff gcp->gc_reset_chip = &sfe_reset_chip_sis900;
2242f8919bdaSduboff }
2243f8919bdaSduboff gcp->gc_init_chip = &sfe_init_chip;
2244f8919bdaSduboff gcp->gc_start_chip = &sfe_start_chip;
2245f8919bdaSduboff gcp->gc_stop_chip = &sfe_stop_chip;
2246f8919bdaSduboff #ifdef USE_MULTICAST_HASHTBL
2247f8919bdaSduboff gcp->gc_multicast_hash = &sfe_mcast_hash;
2248f8919bdaSduboff #endif
2249f8919bdaSduboff if (p->chip_type == CHIPTYPE_DP83815) {
2250f8919bdaSduboff gcp->gc_set_rx_filter = &sfe_set_rx_filter_dp83815;
2251f8919bdaSduboff } else {
2252f8919bdaSduboff gcp->gc_set_rx_filter = &sfe_set_rx_filter_sis900;
2253f8919bdaSduboff }
2254f8919bdaSduboff gcp->gc_set_media = &sfe_set_media;
2255f8919bdaSduboff gcp->gc_get_stats = &sfe_get_stats;
2256f8919bdaSduboff gcp->gc_interrupt = &sfe_interrupt;
2257f8919bdaSduboff
2258f8919bdaSduboff /* descriptor operation */
2259f8919bdaSduboff gcp->gc_tx_desc_write = &sfe_tx_desc_write;
2260f8919bdaSduboff gcp->gc_tx_start = &sfe_tx_start;
2261f8919bdaSduboff gcp->gc_rx_desc_write = &sfe_rx_desc_write;
2262f8919bdaSduboff gcp->gc_rx_start = NULL;
2263f8919bdaSduboff
2264f8919bdaSduboff gcp->gc_tx_desc_stat = &sfe_tx_desc_stat;
2265f8919bdaSduboff gcp->gc_rx_desc_stat = &sfe_rx_desc_stat;
2266f8919bdaSduboff gcp->gc_tx_desc_init = &sfe_tx_desc_init;
2267f8919bdaSduboff gcp->gc_rx_desc_init = &sfe_rx_desc_init;
2268f8919bdaSduboff gcp->gc_tx_desc_clean = &sfe_tx_desc_clean;
2269f8919bdaSduboff gcp->gc_rx_desc_clean = &sfe_rx_desc_clean;
2270f8919bdaSduboff
2271f8919bdaSduboff /* mii operations */
2272f8919bdaSduboff if (p->chip_type == CHIPTYPE_DP83815) {
227323d366e3Sduboff gcp->gc_mii_probe = &sfe_mii_probe_dp83815;
227423d366e3Sduboff gcp->gc_mii_init = &sfe_mii_init_dp83815;
2275f8919bdaSduboff gcp->gc_mii_config = &sfe_mii_config_dp83815;
2276f8919bdaSduboff gcp->gc_mii_sync = &sfe_mii_sync_dp83815;
2277f8919bdaSduboff gcp->gc_mii_read = &sfe_mii_read_dp83815;
2278f8919bdaSduboff gcp->gc_mii_write = &sfe_mii_write_dp83815;
2279f8919bdaSduboff gcp->gc_mii_tune_phy = NULL;
2280f8919bdaSduboff gcp->gc_flow_control = FLOW_CONTROL_NONE;
2281f8919bdaSduboff } else {
2282f8919bdaSduboff gcp->gc_mii_probe = &gem_mii_probe_default;
2283f8919bdaSduboff gcp->gc_mii_init = NULL;
2284f8919bdaSduboff gcp->gc_mii_config = &sfe_mii_config_sis900;
2285f8919bdaSduboff gcp->gc_mii_sync = &sfe_mii_sync_sis900;
2286f8919bdaSduboff gcp->gc_mii_read = &sfe_mii_read_sis900;
2287f8919bdaSduboff gcp->gc_mii_write = &sfe_mii_write_sis900;
2288f8919bdaSduboff gcp->gc_mii_tune_phy = &sfe_set_eq_sis630;
2289f8919bdaSduboff gcp->gc_flow_control = FLOW_CONTROL_RX_PAUSE;
2290f8919bdaSduboff }
2291f8919bdaSduboff
2292f8919bdaSduboff lp = kmem_zalloc(sizeof (*lp), KM_SLEEP);
2293f8919bdaSduboff lp->chip = p;
2294f8919bdaSduboff lp->revid = rev;
229523d366e3Sduboff lp->our_intr_bits = 0;
229623d366e3Sduboff lp->isr_pended = 0;
2297f8919bdaSduboff
2298f8919bdaSduboff cmn_err(CE_CONT, CONS "%s%d: chip:%s rev:0x%02x",
2299f8919bdaSduboff drv_name, unit, p->chip_name, rev);
2300f8919bdaSduboff
2301f8919bdaSduboff dp = gem_do_attach(dip, 0, gcp, base, ®s_ha,
2302f8919bdaSduboff lp, sizeof (*lp));
2303f8919bdaSduboff kmem_free(gcp, sizeof (*gcp));
2304f8919bdaSduboff
2305f8919bdaSduboff if (dp == NULL) {
2306f8919bdaSduboff goto err_freelp;
2307f8919bdaSduboff }
2308f8919bdaSduboff
2309f8919bdaSduboff return (DDI_SUCCESS);
2310f8919bdaSduboff
2311f8919bdaSduboff err_freelp:
2312f8919bdaSduboff kmem_free(lp, sizeof (struct sfe_dev));
2313f8919bdaSduboff err:
2314f8919bdaSduboff return (DDI_FAILURE);
2315f8919bdaSduboff }
2316f8919bdaSduboff return (DDI_FAILURE);
2317f8919bdaSduboff }
2318f8919bdaSduboff
2319f8919bdaSduboff static int
sfedetach(dev_info_t * dip,ddi_detach_cmd_t cmd)2320f8919bdaSduboff sfedetach(dev_info_t *dip, ddi_detach_cmd_t cmd)
2321f8919bdaSduboff {
2322f8919bdaSduboff switch (cmd) {
2323f8919bdaSduboff case DDI_SUSPEND:
2324f8919bdaSduboff return (gem_suspend(dip));
2325f8919bdaSduboff
2326f8919bdaSduboff case DDI_DETACH:
2327f8919bdaSduboff return (gem_do_detach(dip));
2328f8919bdaSduboff }
2329f8919bdaSduboff return (DDI_FAILURE);
2330f8919bdaSduboff }
2331f8919bdaSduboff
233253560dfaSSherry Moore /*
233353560dfaSSherry Moore * quiesce(9E) entry point.
233453560dfaSSherry Moore *
233553560dfaSSherry Moore * This function is called when the system is single-threaded at high
233653560dfaSSherry Moore * PIL with preemption disabled. Therefore, this function must not be
233753560dfaSSherry Moore * blocked.
233853560dfaSSherry Moore *
233953560dfaSSherry Moore * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
234053560dfaSSherry Moore * DDI_FAILURE indicates an error condition and should almost never happen.
234153560dfaSSherry Moore */
234253560dfaSSherry Moore #ifdef __sparc
234353560dfaSSherry Moore #define sfe_quiesce ddi_quiesce_not_supported
234453560dfaSSherry Moore #else
234553560dfaSSherry Moore static int
sfe_quiesce(dev_info_t * dip)234653560dfaSSherry Moore sfe_quiesce(dev_info_t *dip)
234753560dfaSSherry Moore {
234853560dfaSSherry Moore struct gem_dev *dp;
234953560dfaSSherry Moore int ret = 0;
235053560dfaSSherry Moore
235153560dfaSSherry Moore dp = GEM_GET_DEV(dip);
235253560dfaSSherry Moore
235353560dfaSSherry Moore if (dp == NULL)
235453560dfaSSherry Moore return (DDI_FAILURE);
235553560dfaSSherry Moore
235653560dfaSSherry Moore ret = sfe_stop_chip_quiesce(dp);
235753560dfaSSherry Moore
235853560dfaSSherry Moore return (ret);
235953560dfaSSherry Moore }
236053560dfaSSherry Moore #endif
236153560dfaSSherry Moore
2362f8919bdaSduboff /* ======================================================== */
2363f8919bdaSduboff /*
2364f8919bdaSduboff * OS depend (loadable streams driver) routine
2365f8919bdaSduboff */
2366f8919bdaSduboff /* ======================================================== */
2367f8919bdaSduboff DDI_DEFINE_STREAM_OPS(sfe_ops, nulldev, nulldev, sfeattach, sfedetach,
236853560dfaSSherry Moore nodev, NULL, D_MP, NULL, sfe_quiesce);
2369f8919bdaSduboff
2370f8919bdaSduboff static struct modldrv modldrv = {
2371f8919bdaSduboff &mod_driverops, /* Type of module. This one is a driver */
2372f8919bdaSduboff ident,
2373f8919bdaSduboff &sfe_ops, /* driver ops */
2374f8919bdaSduboff };
2375f8919bdaSduboff
2376f8919bdaSduboff static struct modlinkage modlinkage = {
2377f8919bdaSduboff MODREV_1, &modldrv, NULL
2378f8919bdaSduboff };
2379f8919bdaSduboff
2380f8919bdaSduboff /* ======================================================== */
2381f8919bdaSduboff /*
2382f8919bdaSduboff * Loadable module support
2383f8919bdaSduboff */
2384f8919bdaSduboff /* ======================================================== */
2385f8919bdaSduboff int
_init(void)2386f8919bdaSduboff _init(void)
2387f8919bdaSduboff {
2388f8919bdaSduboff int status;
2389f8919bdaSduboff
2390f8919bdaSduboff DPRINTF(2, (CE_CONT, CONS "sfe: _init: called"));
2391f8919bdaSduboff gem_mod_init(&sfe_ops, "sfe");
2392f8919bdaSduboff status = mod_install(&modlinkage);
2393f8919bdaSduboff if (status != DDI_SUCCESS) {
2394f8919bdaSduboff gem_mod_fini(&sfe_ops);
2395f8919bdaSduboff }
2396f8919bdaSduboff return (status);
2397f8919bdaSduboff }
2398f8919bdaSduboff
2399f8919bdaSduboff /*
2400f8919bdaSduboff * _fini : done
2401f8919bdaSduboff */
2402f8919bdaSduboff int
_fini(void)2403f8919bdaSduboff _fini(void)
2404f8919bdaSduboff {
2405f8919bdaSduboff int status;
2406f8919bdaSduboff
2407f8919bdaSduboff DPRINTF(2, (CE_CONT, CONS "sfe: _fini: called"));
2408f8919bdaSduboff status = mod_remove(&modlinkage);
2409f8919bdaSduboff if (status == DDI_SUCCESS) {
2410f8919bdaSduboff gem_mod_fini(&sfe_ops);
2411f8919bdaSduboff }
2412f8919bdaSduboff return (status);
2413f8919bdaSduboff }
2414f8919bdaSduboff
2415f8919bdaSduboff int
_info(struct modinfo * modinfop)2416f8919bdaSduboff _info(struct modinfo *modinfop)
2417f8919bdaSduboff {
2418f8919bdaSduboff return (mod_info(&modlinkage, modinfop));
2419f8919bdaSduboff }
2420