xref: /freebsd/sys/dev/sk/if_sk.c (revision 74bf4e164ba5851606a27d4feff27717452583e5)
1 /*	$OpenBSD: if_sk.c,v 2.33 2003/08/12 05:23:06 nate Exp $	*/
2 
3 /*
4  * Copyright (c) 1997, 1998, 1999, 2000
5  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by Bill Paul.
18  * 4. Neither the name of the author nor the names of any co-contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 /*
35  * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
36  *
37  * Permission to use, copy, modify, and distribute this software for any
38  * purpose with or without fee is hereby granted, provided that the above
39  * copyright notice and this permission notice appear in all copies.
40  *
41  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
42  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
43  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
44  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
45  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
46  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
47  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
48  */
49 
50 #include <sys/cdefs.h>
51 __FBSDID("$FreeBSD$");
52 
53 /*
54  * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports
55  * the SK-984x series adapters, both single port and dual port.
56  * References:
57  * 	The XaQti XMAC II datasheet,
58  *  http://www.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
59  *	The SysKonnect GEnesis manual, http://www.syskonnect.com
60  *
61  * Note: XaQti has been aquired by Vitesse, and Vitesse does not have the
62  * XMAC II datasheet online. I have put my copy at people.freebsd.org as a
63  * convenience to others until Vitesse corrects this problem:
64  *
65  * http://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
66  *
67  * Written by Bill Paul <wpaul@ee.columbia.edu>
68  * Department of Electrical Engineering
69  * Columbia University, New York City
70  */
71 /*
72  * The SysKonnect gigabit ethernet adapters consist of two main
73  * components: the SysKonnect GEnesis controller chip and the XaQti Corp.
74  * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC
75  * components and a PHY while the GEnesis controller provides a PCI
76  * interface with DMA support. Each card may have between 512K and
77  * 2MB of SRAM on board depending on the configuration.
78  *
79  * The SysKonnect GEnesis controller can have either one or two XMAC
80  * chips connected to it, allowing single or dual port NIC configurations.
81  * SysKonnect has the distinction of being the only vendor on the market
82  * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs,
83  * dual DMA queues, packet/MAC/transmit arbiters and direct access to the
84  * XMAC registers. This driver takes advantage of these features to allow
85  * both XMACs to operate as independent interfaces.
86  */
87 
88 #include <sys/param.h>
89 #include <sys/systm.h>
90 #include <sys/sockio.h>
91 #include <sys/mbuf.h>
92 #include <sys/malloc.h>
93 #include <sys/kernel.h>
94 #include <sys/module.h>
95 #include <sys/socket.h>
96 #include <sys/queue.h>
97 
98 #include <net/if.h>
99 #include <net/if_arp.h>
100 #include <net/ethernet.h>
101 #include <net/if_dl.h>
102 #include <net/if_media.h>
103 
104 #include <net/bpf.h>
105 
106 #include <vm/vm.h>              /* for vtophys */
107 #include <vm/pmap.h>            /* for vtophys */
108 #include <machine/bus_pio.h>
109 #include <machine/bus_memio.h>
110 #include <machine/bus.h>
111 #include <machine/resource.h>
112 #include <sys/bus.h>
113 #include <sys/rman.h>
114 
115 #include <dev/mii/mii.h>
116 #include <dev/mii/miivar.h>
117 #include <dev/mii/brgphyreg.h>
118 
119 #include <dev/pci/pcireg.h>
120 #include <dev/pci/pcivar.h>
121 
122 #if 0
123 #define SK_USEIOSPACE
124 #endif
125 
126 #include <pci/if_skreg.h>
127 #include <pci/xmaciireg.h>
128 #include <pci/yukonreg.h>
129 
130 MODULE_DEPEND(sk, pci, 1, 1, 1);
131 MODULE_DEPEND(sk, ether, 1, 1, 1);
132 MODULE_DEPEND(sk, miibus, 1, 1, 1);
133 
134 /* "controller miibus0" required.  See GENERIC if you get errors here. */
135 #include "miibus_if.h"
136 
137 #ifndef lint
138 static const char rcsid[] =
139   "$FreeBSD$";
140 #endif
141 
142 static struct sk_type sk_devs[] = {
143 	{
144 		VENDORID_SK,
145 		DEVICEID_SK_V1,
146 		"SysKonnect Gigabit Ethernet (V1.0)"
147 	},
148 	{
149 		VENDORID_SK,
150 		DEVICEID_SK_V2,
151 		"SysKonnect Gigabit Ethernet (V2.0)"
152 	},
153 	{
154 		VENDORID_MARVELL,
155 		DEVICEID_SK_V2,
156 		"Marvell Gigabit Ethernet"
157 	},
158 	{
159 		VENDORID_MARVELL,
160 		DEVICEID_BELKIN_5005,
161 		"Belkin F5D5005 Gigabit Ethernet"
162 	},
163 	{
164 		VENDORID_3COM,
165 		DEVICEID_3COM_3C940,
166 		"3Com 3C940 Gigabit Ethernet"
167 	},
168 	{
169 		VENDORID_LINKSYS,
170 		DEVICEID_LINKSYS_EG1032,
171 		"Linksys EG1032 Gigabit Ethernet"
172 	},
173 	{
174 		VENDORID_DLINK,
175 		DEVICEID_DLINK_DGE530T,
176 		"D-Link DGE-530T Gigabit Ethernet"
177 	},
178 	{ 0, 0, NULL }
179 };
180 
181 static int skc_probe		(device_t);
182 static int skc_attach		(device_t);
183 static int skc_detach		(device_t);
184 static void skc_shutdown	(device_t);
185 static int sk_detach		(device_t);
186 static int sk_probe		(device_t);
187 static int sk_attach		(device_t);
188 static void sk_tick		(void *);
189 static void sk_intr		(void *);
190 static void sk_intr_xmac	(struct sk_if_softc *);
191 static void sk_intr_bcom	(struct sk_if_softc *);
192 static void sk_intr_yukon	(struct sk_if_softc *);
193 static void sk_rxeof		(struct sk_if_softc *);
194 static void sk_txeof		(struct sk_if_softc *);
195 static int sk_encap		(struct sk_if_softc *, struct mbuf *,
196 					u_int32_t *);
197 static void sk_start		(struct ifnet *);
198 static int sk_ioctl		(struct ifnet *, u_long, caddr_t);
199 static void sk_init		(void *);
200 static void sk_init_xmac	(struct sk_if_softc *);
201 static void sk_init_yukon	(struct sk_if_softc *);
202 static void sk_stop		(struct sk_if_softc *);
203 static void sk_watchdog		(struct ifnet *);
204 static int sk_ifmedia_upd	(struct ifnet *);
205 static void sk_ifmedia_sts	(struct ifnet *, struct ifmediareq *);
206 static void sk_reset		(struct sk_softc *);
207 static int sk_newbuf		(struct sk_if_softc *,
208 					struct sk_chain *, struct mbuf *);
209 static int sk_alloc_jumbo_mem	(struct sk_if_softc *);
210 static void *sk_jalloc		(struct sk_if_softc *);
211 static void sk_jfree		(void *, void *);
212 static int sk_init_rx_ring	(struct sk_if_softc *);
213 static void sk_init_tx_ring	(struct sk_if_softc *);
214 static u_int32_t sk_win_read_4	(struct sk_softc *, int);
215 static u_int16_t sk_win_read_2	(struct sk_softc *, int);
216 static u_int8_t sk_win_read_1	(struct sk_softc *, int);
217 static void sk_win_write_4	(struct sk_softc *, int, u_int32_t);
218 static void sk_win_write_2	(struct sk_softc *, int, u_int32_t);
219 static void sk_win_write_1	(struct sk_softc *, int, u_int32_t);
220 static u_int8_t sk_vpd_readbyte	(struct sk_softc *, int);
221 static void sk_vpd_read_res	(struct sk_softc *, struct vpd_res *, int);
222 static void sk_vpd_read		(struct sk_softc *);
223 
224 static int sk_miibus_readreg	(device_t, int, int);
225 static int sk_miibus_writereg	(device_t, int, int, int);
226 static void sk_miibus_statchg	(device_t);
227 
228 static int sk_xmac_miibus_readreg	(struct sk_if_softc *, int, int);
229 static int sk_xmac_miibus_writereg	(struct sk_if_softc *, int, int,
230 						int);
231 static void sk_xmac_miibus_statchg	(struct sk_if_softc *);
232 
233 static int sk_marv_miibus_readreg	(struct sk_if_softc *, int, int);
234 static int sk_marv_miibus_writereg	(struct sk_if_softc *, int, int,
235 						int);
236 static void sk_marv_miibus_statchg	(struct sk_if_softc *);
237 
238 static uint32_t sk_xmchash	(const uint8_t *);
239 static uint32_t sk_gmchash	(const uint8_t *);
240 static void sk_setfilt		(struct sk_if_softc *, caddr_t, int);
241 static void sk_setmulti		(struct sk_if_softc *);
242 static void sk_setpromisc	(struct sk_if_softc *);
243 
244 #ifdef SK_USEIOSPACE
245 #define SK_RES		SYS_RES_IOPORT
246 #define SK_RID		SK_PCI_LOIO
247 #else
248 #define SK_RES		SYS_RES_MEMORY
249 #define SK_RID		SK_PCI_LOMEM
250 #endif
251 
252 /*
253  * Note that we have newbus methods for both the GEnesis controller
254  * itself and the XMAC(s). The XMACs are children of the GEnesis, and
255  * the miibus code is a child of the XMACs. We need to do it this way
256  * so that the miibus drivers can access the PHY registers on the
257  * right PHY. It's not quite what I had in mind, but it's the only
258  * design that achieves the desired effect.
259  */
260 static device_method_t skc_methods[] = {
261 	/* Device interface */
262 	DEVMETHOD(device_probe,		skc_probe),
263 	DEVMETHOD(device_attach,	skc_attach),
264 	DEVMETHOD(device_detach,	skc_detach),
265 	DEVMETHOD(device_shutdown,	skc_shutdown),
266 
267 	/* bus interface */
268 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
269 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
270 
271 	{ 0, 0 }
272 };
273 
274 static driver_t skc_driver = {
275 	"skc",
276 	skc_methods,
277 	sizeof(struct sk_softc)
278 };
279 
280 static devclass_t skc_devclass;
281 
282 static device_method_t sk_methods[] = {
283 	/* Device interface */
284 	DEVMETHOD(device_probe,		sk_probe),
285 	DEVMETHOD(device_attach,	sk_attach),
286 	DEVMETHOD(device_detach,	sk_detach),
287 	DEVMETHOD(device_shutdown,	bus_generic_shutdown),
288 
289 	/* bus interface */
290 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
291 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
292 
293 	/* MII interface */
294 	DEVMETHOD(miibus_readreg,	sk_miibus_readreg),
295 	DEVMETHOD(miibus_writereg,	sk_miibus_writereg),
296 	DEVMETHOD(miibus_statchg,	sk_miibus_statchg),
297 
298 	{ 0, 0 }
299 };
300 
301 static driver_t sk_driver = {
302 	"sk",
303 	sk_methods,
304 	sizeof(struct sk_if_softc)
305 };
306 
307 static devclass_t sk_devclass;
308 
309 DRIVER_MODULE(sk, pci, skc_driver, skc_devclass, 0, 0);
310 DRIVER_MODULE(sk, skc, sk_driver, sk_devclass, 0, 0);
311 DRIVER_MODULE(miibus, sk, miibus_driver, miibus_devclass, 0, 0);
312 
313 #define SK_SETBIT(sc, reg, x)		\
314 	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x)
315 
316 #define SK_CLRBIT(sc, reg, x)		\
317 	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x)
318 
319 #define SK_WIN_SETBIT_4(sc, reg, x)	\
320 	sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) | x)
321 
322 #define SK_WIN_CLRBIT_4(sc, reg, x)	\
323 	sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) & ~x)
324 
325 #define SK_WIN_SETBIT_2(sc, reg, x)	\
326 	sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) | x)
327 
328 #define SK_WIN_CLRBIT_2(sc, reg, x)	\
329 	sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) & ~x)
330 
331 static u_int32_t
332 sk_win_read_4(sc, reg)
333 	struct sk_softc		*sc;
334 	int			reg;
335 {
336 #ifdef SK_USEIOSPACE
337 	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
338 	return(CSR_READ_4(sc, SK_WIN_BASE + SK_REG(reg)));
339 #else
340 	return(CSR_READ_4(sc, reg));
341 #endif
342 }
343 
344 static u_int16_t
345 sk_win_read_2(sc, reg)
346 	struct sk_softc		*sc;
347 	int			reg;
348 {
349 #ifdef SK_USEIOSPACE
350 	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
351 	return(CSR_READ_2(sc, SK_WIN_BASE + SK_REG(reg)));
352 #else
353 	return(CSR_READ_2(sc, reg));
354 #endif
355 }
356 
357 static u_int8_t
358 sk_win_read_1(sc, reg)
359 	struct sk_softc		*sc;
360 	int			reg;
361 {
362 #ifdef SK_USEIOSPACE
363 	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
364 	return(CSR_READ_1(sc, SK_WIN_BASE + SK_REG(reg)));
365 #else
366 	return(CSR_READ_1(sc, reg));
367 #endif
368 }
369 
370 static void
371 sk_win_write_4(sc, reg, val)
372 	struct sk_softc		*sc;
373 	int			reg;
374 	u_int32_t		val;
375 {
376 #ifdef SK_USEIOSPACE
377 	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
378 	CSR_WRITE_4(sc, SK_WIN_BASE + SK_REG(reg), val);
379 #else
380 	CSR_WRITE_4(sc, reg, val);
381 #endif
382 	return;
383 }
384 
385 static void
386 sk_win_write_2(sc, reg, val)
387 	struct sk_softc		*sc;
388 	int			reg;
389 	u_int32_t		val;
390 {
391 #ifdef SK_USEIOSPACE
392 	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
393 	CSR_WRITE_2(sc, SK_WIN_BASE + SK_REG(reg), val);
394 #else
395 	CSR_WRITE_2(sc, reg, val);
396 #endif
397 	return;
398 }
399 
400 static void
401 sk_win_write_1(sc, reg, val)
402 	struct sk_softc		*sc;
403 	int			reg;
404 	u_int32_t		val;
405 {
406 #ifdef SK_USEIOSPACE
407 	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
408 	CSR_WRITE_1(sc, SK_WIN_BASE + SK_REG(reg), val);
409 #else
410 	CSR_WRITE_1(sc, reg, val);
411 #endif
412 	return;
413 }
414 
415 /*
416  * The VPD EEPROM contains Vital Product Data, as suggested in
417  * the PCI 2.1 specification. The VPD data is separared into areas
418  * denoted by resource IDs. The SysKonnect VPD contains an ID string
419  * resource (the name of the adapter), a read-only area resource
420  * containing various key/data fields and a read/write area which
421  * can be used to store asset management information or log messages.
422  * We read the ID string and read-only into buffers attached to
423  * the controller softc structure for later use. At the moment,
424  * we only use the ID string during skc_attach().
425  */
426 static u_int8_t
427 sk_vpd_readbyte(sc, addr)
428 	struct sk_softc		*sc;
429 	int			addr;
430 {
431 	int			i;
432 
433 	sk_win_write_2(sc, SK_PCI_REG(SK_PCI_VPD_ADDR), addr);
434 	for (i = 0; i < SK_TIMEOUT; i++) {
435 		DELAY(1);
436 		if (sk_win_read_2(sc,
437 		    SK_PCI_REG(SK_PCI_VPD_ADDR)) & SK_VPD_FLAG)
438 			break;
439 	}
440 
441 	if (i == SK_TIMEOUT)
442 		return(0);
443 
444 	return(sk_win_read_1(sc, SK_PCI_REG(SK_PCI_VPD_DATA)));
445 }
446 
447 static void
448 sk_vpd_read_res(sc, res, addr)
449 	struct sk_softc		*sc;
450 	struct vpd_res		*res;
451 	int			addr;
452 {
453 	int			i;
454 	u_int8_t		*ptr;
455 
456 	ptr = (u_int8_t *)res;
457 	for (i = 0; i < sizeof(struct vpd_res); i++)
458 		ptr[i] = sk_vpd_readbyte(sc, i + addr);
459 
460 	return;
461 }
462 
463 static void
464 sk_vpd_read(sc)
465 	struct sk_softc		*sc;
466 {
467 	int			pos = 0, i;
468 	struct vpd_res		res;
469 
470 	if (sc->sk_vpd_prodname != NULL)
471 		free(sc->sk_vpd_prodname, M_DEVBUF);
472 	if (sc->sk_vpd_readonly != NULL)
473 		free(sc->sk_vpd_readonly, M_DEVBUF);
474 	sc->sk_vpd_prodname = NULL;
475 	sc->sk_vpd_readonly = NULL;
476 
477 	sk_vpd_read_res(sc, &res, pos);
478 
479 	/*
480 	 * Bail out quietly if the eeprom appears to be missing or empty.
481 	 */
482 	if (res.vr_id == 0xff && res.vr_len == 0xff && res.vr_pad == 0xff)
483 		return;
484 
485 	if (res.vr_id != VPD_RES_ID) {
486 		printf("skc%d: bad VPD resource id: expected %x got %x\n",
487 		    sc->sk_unit, VPD_RES_ID, res.vr_id);
488 		return;
489 	}
490 
491 	pos += sizeof(res);
492 	sc->sk_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT);
493 	for (i = 0; i < res.vr_len; i++)
494 		sc->sk_vpd_prodname[i] = sk_vpd_readbyte(sc, i + pos);
495 	sc->sk_vpd_prodname[i] = '\0';
496 	pos += i;
497 
498 	sk_vpd_read_res(sc, &res, pos);
499 
500 	if (res.vr_id != VPD_RES_READ) {
501 		printf("skc%d: bad VPD resource id: expected %x got %x\n",
502 		    sc->sk_unit, VPD_RES_READ, res.vr_id);
503 		return;
504 	}
505 
506 	pos += sizeof(res);
507 	sc->sk_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT);
508 	for (i = 0; i < res.vr_len + 1; i++)
509 		sc->sk_vpd_readonly[i] = sk_vpd_readbyte(sc, i + pos);
510 
511 	return;
512 }
513 
514 static int
515 sk_miibus_readreg(dev, phy, reg)
516 	device_t		dev;
517 	int			phy, reg;
518 {
519 	struct sk_if_softc	*sc_if;
520 
521 	sc_if = device_get_softc(dev);
522 
523 	switch(sc_if->sk_softc->sk_type) {
524 	case SK_GENESIS:
525 		return(sk_xmac_miibus_readreg(sc_if, phy, reg));
526 	case SK_YUKON:
527 		return(sk_marv_miibus_readreg(sc_if, phy, reg));
528 	}
529 
530 	return(0);
531 }
532 
533 static int
534 sk_miibus_writereg(dev, phy, reg, val)
535 	device_t		dev;
536 	int			phy, reg, val;
537 {
538 	struct sk_if_softc	*sc_if;
539 
540 	sc_if = device_get_softc(dev);
541 
542 	switch(sc_if->sk_softc->sk_type) {
543 	case SK_GENESIS:
544 		return(sk_xmac_miibus_writereg(sc_if, phy, reg, val));
545 	case SK_YUKON:
546 		return(sk_marv_miibus_writereg(sc_if, phy, reg, val));
547 	}
548 
549 	return(0);
550 }
551 
552 static void
553 sk_miibus_statchg(dev)
554 	device_t		dev;
555 {
556 	struct sk_if_softc	*sc_if;
557 
558 	sc_if = device_get_softc(dev);
559 
560 	switch(sc_if->sk_softc->sk_type) {
561 	case SK_GENESIS:
562 		sk_xmac_miibus_statchg(sc_if);
563 		break;
564 	case SK_YUKON:
565 		sk_marv_miibus_statchg(sc_if);
566 		break;
567 	}
568 
569 	return;
570 }
571 
572 static int
573 sk_xmac_miibus_readreg(sc_if, phy, reg)
574 	struct sk_if_softc	*sc_if;
575 	int			phy, reg;
576 {
577 	int			i;
578 
579 	if (sc_if->sk_phytype == SK_PHYTYPE_XMAC && phy != 0)
580 		return(0);
581 
582 	SK_IF_LOCK(sc_if);
583 	SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
584 	SK_XM_READ_2(sc_if, XM_PHY_DATA);
585 	if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
586 		for (i = 0; i < SK_TIMEOUT; i++) {
587 			DELAY(1);
588 			if (SK_XM_READ_2(sc_if, XM_MMUCMD) &
589 			    XM_MMUCMD_PHYDATARDY)
590 				break;
591 		}
592 
593 		if (i == SK_TIMEOUT) {
594 			printf("sk%d: phy failed to come ready\n",
595 			    sc_if->sk_unit);
596 			SK_IF_UNLOCK(sc_if);
597 			return(0);
598 		}
599 	}
600 	DELAY(1);
601 	i = SK_XM_READ_2(sc_if, XM_PHY_DATA);
602 	SK_IF_UNLOCK(sc_if);
603 	return(i);
604 }
605 
606 static int
607 sk_xmac_miibus_writereg(sc_if, phy, reg, val)
608 	struct sk_if_softc	*sc_if;
609 	int			phy, reg, val;
610 {
611 	int			i;
612 
613 	SK_IF_LOCK(sc_if);
614 	SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
615 	for (i = 0; i < SK_TIMEOUT; i++) {
616 		if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
617 			break;
618 	}
619 
620 	if (i == SK_TIMEOUT) {
621 		printf("sk%d: phy failed to come ready\n", sc_if->sk_unit);
622 		SK_IF_UNLOCK(sc_if);
623 		return(ETIMEDOUT);
624 	}
625 
626 	SK_XM_WRITE_2(sc_if, XM_PHY_DATA, val);
627 	for (i = 0; i < SK_TIMEOUT; i++) {
628 		DELAY(1);
629 		if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
630 			break;
631 	}
632 	SK_IF_UNLOCK(sc_if);
633 	if (i == SK_TIMEOUT)
634 		printf("sk%d: phy write timed out\n", sc_if->sk_unit);
635 
636 	return(0);
637 }
638 
639 static void
640 sk_xmac_miibus_statchg(sc_if)
641 	struct sk_if_softc	*sc_if;
642 {
643 	struct mii_data		*mii;
644 
645 	mii = device_get_softc(sc_if->sk_miibus);
646 
647 	SK_IF_LOCK(sc_if);
648 	/*
649 	 * If this is a GMII PHY, manually set the XMAC's
650 	 * duplex mode accordingly.
651 	 */
652 	if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
653 		if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
654 			SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
655 		} else {
656 			SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
657 		}
658 	}
659 	SK_IF_UNLOCK(sc_if);
660 
661 	return;
662 }
663 
664 static int
665 sk_marv_miibus_readreg(sc_if, phy, reg)
666 	struct sk_if_softc	*sc_if;
667 	int			phy, reg;
668 {
669 	u_int16_t		val;
670 	int			i;
671 
672 	if (phy != 0 ||
673 	    (sc_if->sk_phytype != SK_PHYTYPE_MARV_COPPER &&
674 	     sc_if->sk_phytype != SK_PHYTYPE_MARV_FIBER)) {
675 		return(0);
676 	}
677 
678 	SK_IF_LOCK(sc_if);
679         SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
680 		      YU_SMICR_REGAD(reg) | YU_SMICR_OP_READ);
681 
682 	for (i = 0; i < SK_TIMEOUT; i++) {
683 		DELAY(1);
684 		val = SK_YU_READ_2(sc_if, YUKON_SMICR);
685 		if (val & YU_SMICR_READ_VALID)
686 			break;
687 	}
688 
689 	if (i == SK_TIMEOUT) {
690 		printf("sk%d: phy failed to come ready\n",
691 		    sc_if->sk_unit);
692 		SK_IF_UNLOCK(sc_if);
693 		return(0);
694 	}
695 
696 	val = SK_YU_READ_2(sc_if, YUKON_SMIDR);
697 	SK_IF_UNLOCK(sc_if);
698 
699 	return(val);
700 }
701 
702 static int
703 sk_marv_miibus_writereg(sc_if, phy, reg, val)
704 	struct sk_if_softc	*sc_if;
705 	int			phy, reg, val;
706 {
707 	int			i;
708 
709 	SK_IF_LOCK(sc_if);
710 	SK_YU_WRITE_2(sc_if, YUKON_SMIDR, val);
711 	SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
712 		      YU_SMICR_REGAD(reg) | YU_SMICR_OP_WRITE);
713 
714 	for (i = 0; i < SK_TIMEOUT; i++) {
715 		DELAY(1);
716 		if (SK_YU_READ_2(sc_if, YUKON_SMICR) & YU_SMICR_BUSY)
717 			break;
718 	}
719 	SK_IF_UNLOCK(sc_if);
720 
721 	return(0);
722 }
723 
724 static void
725 sk_marv_miibus_statchg(sc_if)
726 	struct sk_if_softc	*sc_if;
727 {
728 	return;
729 }
730 
731 #define HASH_BITS		6
732 
733 static u_int32_t
734 sk_xmchash(addr)
735 	const uint8_t *addr;
736 {
737 	uint32_t crc;
738 
739 	/* Compute CRC for the address value. */
740 	crc = ether_crc32_le(addr, ETHER_ADDR_LEN);
741 
742 	return (~crc & ((1 << HASH_BITS) - 1));
743 }
744 
745 /* gmchash is just a big endian crc */
746 static u_int32_t
747 sk_gmchash(addr)
748 	const uint8_t *addr;
749 {
750 	uint32_t crc;
751 
752 	/* Compute CRC for the address value. */
753 	crc = ether_crc32_be(addr, ETHER_ADDR_LEN);
754 
755 	return (crc & ((1 << HASH_BITS) - 1));
756 }
757 
758 static void
759 sk_setfilt(sc_if, addr, slot)
760 	struct sk_if_softc	*sc_if;
761 	caddr_t			addr;
762 	int			slot;
763 {
764 	int			base;
765 
766 	base = XM_RXFILT_ENTRY(slot);
767 
768 	SK_XM_WRITE_2(sc_if, base, *(u_int16_t *)(&addr[0]));
769 	SK_XM_WRITE_2(sc_if, base + 2, *(u_int16_t *)(&addr[2]));
770 	SK_XM_WRITE_2(sc_if, base + 4, *(u_int16_t *)(&addr[4]));
771 
772 	return;
773 }
774 
775 static void
776 sk_setmulti(sc_if)
777 	struct sk_if_softc	*sc_if;
778 {
779 	struct sk_softc		*sc = sc_if->sk_softc;
780 	struct ifnet		*ifp = &sc_if->arpcom.ac_if;
781 	u_int32_t		hashes[2] = { 0, 0 };
782 	int			h = 0, i;
783 	struct ifmultiaddr	*ifma;
784 	u_int8_t		dummy[] = { 0, 0, 0, 0, 0 ,0 };
785 
786 
787 	/* First, zot all the existing filters. */
788 	switch(sc->sk_type) {
789 	case SK_GENESIS:
790 		for (i = 1; i < XM_RXFILT_MAX; i++)
791 			sk_setfilt(sc_if, (caddr_t)&dummy, i);
792 
793 		SK_XM_WRITE_4(sc_if, XM_MAR0, 0);
794 		SK_XM_WRITE_4(sc_if, XM_MAR2, 0);
795 		break;
796 	case SK_YUKON:
797 		SK_YU_WRITE_2(sc_if, YUKON_MCAH1, 0);
798 		SK_YU_WRITE_2(sc_if, YUKON_MCAH2, 0);
799 		SK_YU_WRITE_2(sc_if, YUKON_MCAH3, 0);
800 		SK_YU_WRITE_2(sc_if, YUKON_MCAH4, 0);
801 		break;
802 	}
803 
804 	/* Now program new ones. */
805 	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
806 		hashes[0] = 0xFFFFFFFF;
807 		hashes[1] = 0xFFFFFFFF;
808 	} else {
809 		i = 1;
810 		TAILQ_FOREACH_REVERSE(ifma, &ifp->if_multiaddrs, ifmultihead, ifma_link) {
811 			if (ifma->ifma_addr->sa_family != AF_LINK)
812 				continue;
813 			/*
814 			 * Program the first XM_RXFILT_MAX multicast groups
815 			 * into the perfect filter. For all others,
816 			 * use the hash table.
817 			 */
818 			if (sc->sk_type == SK_GENESIS && i < XM_RXFILT_MAX) {
819 				sk_setfilt(sc_if,
820 			LLADDR((struct sockaddr_dl *)ifma->ifma_addr), i);
821 				i++;
822 				continue;
823 			}
824 
825 			switch(sc->sk_type) {
826 			case SK_GENESIS:
827 				h = sk_xmchash(
828 					LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
829 				break;
830 			case SK_YUKON:
831 				h = sk_gmchash(
832 					LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
833 				break;
834 			}
835 			if (h < 32)
836 				hashes[0] |= (1 << h);
837 			else
838 				hashes[1] |= (1 << (h - 32));
839 		}
840 	}
841 
842 	switch(sc->sk_type) {
843 	case SK_GENESIS:
844 		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_HASH|
845 			       XM_MODE_RX_USE_PERFECT);
846 		SK_XM_WRITE_4(sc_if, XM_MAR0, hashes[0]);
847 		SK_XM_WRITE_4(sc_if, XM_MAR2, hashes[1]);
848 		break;
849 	case SK_YUKON:
850 		SK_YU_WRITE_2(sc_if, YUKON_MCAH1, hashes[0] & 0xffff);
851 		SK_YU_WRITE_2(sc_if, YUKON_MCAH2, (hashes[0] >> 16) & 0xffff);
852 		SK_YU_WRITE_2(sc_if, YUKON_MCAH3, hashes[1] & 0xffff);
853 		SK_YU_WRITE_2(sc_if, YUKON_MCAH4, (hashes[1] >> 16) & 0xffff);
854 		break;
855 	}
856 
857 	return;
858 }
859 
860 static void
861 sk_setpromisc(sc_if)
862 	struct sk_if_softc	*sc_if;
863 {
864 	struct sk_softc		*sc = sc_if->sk_softc;
865 	struct ifnet		*ifp = &sc_if->arpcom.ac_if;
866 
867 	switch(sc->sk_type) {
868 	case SK_GENESIS:
869 		if (ifp->if_flags & IFF_PROMISC) {
870 			SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
871 		} else {
872 			SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
873 		}
874 		break;
875 	case SK_YUKON:
876 		if (ifp->if_flags & IFF_PROMISC) {
877 			SK_YU_CLRBIT_2(sc_if, YUKON_RCR,
878 			    YU_RCR_UFLEN | YU_RCR_MUFLEN);
879 		} else {
880 			SK_YU_SETBIT_2(sc_if, YUKON_RCR,
881 			    YU_RCR_UFLEN | YU_RCR_MUFLEN);
882 		}
883 		break;
884 	}
885 
886 	return;
887 }
888 
889 static int
890 sk_init_rx_ring(sc_if)
891 	struct sk_if_softc	*sc_if;
892 {
893 	struct sk_chain_data	*cd = &sc_if->sk_cdata;
894 	struct sk_ring_data	*rd = sc_if->sk_rdata;
895 	int			i;
896 
897 	bzero((char *)rd->sk_rx_ring,
898 	    sizeof(struct sk_rx_desc) * SK_RX_RING_CNT);
899 
900 	for (i = 0; i < SK_RX_RING_CNT; i++) {
901 		cd->sk_rx_chain[i].sk_desc = &rd->sk_rx_ring[i];
902 		if (sk_newbuf(sc_if, &cd->sk_rx_chain[i], NULL) == ENOBUFS)
903 			return(ENOBUFS);
904 		if (i == (SK_RX_RING_CNT - 1)) {
905 			cd->sk_rx_chain[i].sk_next =
906 			    &cd->sk_rx_chain[0];
907 			rd->sk_rx_ring[i].sk_next =
908 			    vtophys(&rd->sk_rx_ring[0]);
909 		} else {
910 			cd->sk_rx_chain[i].sk_next =
911 			    &cd->sk_rx_chain[i + 1];
912 			rd->sk_rx_ring[i].sk_next =
913 			    vtophys(&rd->sk_rx_ring[i + 1]);
914 		}
915 	}
916 
917 	sc_if->sk_cdata.sk_rx_prod = 0;
918 	sc_if->sk_cdata.sk_rx_cons = 0;
919 
920 	return(0);
921 }
922 
923 static void
924 sk_init_tx_ring(sc_if)
925 	struct sk_if_softc	*sc_if;
926 {
927 	struct sk_chain_data	*cd = &sc_if->sk_cdata;
928 	struct sk_ring_data	*rd = sc_if->sk_rdata;
929 	int			i;
930 
931 	bzero((char *)sc_if->sk_rdata->sk_tx_ring,
932 	    sizeof(struct sk_tx_desc) * SK_TX_RING_CNT);
933 
934 	for (i = 0; i < SK_TX_RING_CNT; i++) {
935 		cd->sk_tx_chain[i].sk_desc = &rd->sk_tx_ring[i];
936 		if (i == (SK_TX_RING_CNT - 1)) {
937 			cd->sk_tx_chain[i].sk_next =
938 			    &cd->sk_tx_chain[0];
939 			rd->sk_tx_ring[i].sk_next =
940 			    vtophys(&rd->sk_tx_ring[0]);
941 		} else {
942 			cd->sk_tx_chain[i].sk_next =
943 			    &cd->sk_tx_chain[i + 1];
944 			rd->sk_tx_ring[i].sk_next =
945 			    vtophys(&rd->sk_tx_ring[i + 1]);
946 		}
947 	}
948 
949 	sc_if->sk_cdata.sk_tx_prod = 0;
950 	sc_if->sk_cdata.sk_tx_cons = 0;
951 	sc_if->sk_cdata.sk_tx_cnt = 0;
952 
953 	return;
954 }
955 
956 static int
957 sk_newbuf(sc_if, c, m)
958 	struct sk_if_softc	*sc_if;
959 	struct sk_chain		*c;
960 	struct mbuf		*m;
961 {
962 	struct mbuf		*m_new = NULL;
963 	struct sk_rx_desc	*r;
964 
965 	if (m == NULL) {
966 		caddr_t			*buf = NULL;
967 
968 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
969 		if (m_new == NULL)
970 			return(ENOBUFS);
971 
972 		/* Allocate the jumbo buffer */
973 		buf = sk_jalloc(sc_if);
974 		if (buf == NULL) {
975 			m_freem(m_new);
976 #ifdef SK_VERBOSE
977 			printf("sk%d: jumbo allocation failed "
978 			    "-- packet dropped!\n", sc_if->sk_unit);
979 #endif
980 			return(ENOBUFS);
981 		}
982 
983 		/* Attach the buffer to the mbuf */
984 		MEXTADD(m_new, buf, SK_JLEN, sk_jfree,
985 		    (struct sk_if_softc *)sc_if, 0, EXT_NET_DRV);
986 		m_new->m_data = (void *)buf;
987 		m_new->m_pkthdr.len = m_new->m_len = SK_JLEN;
988 	} else {
989 		/*
990 	 	 * We're re-using a previously allocated mbuf;
991 		 * be sure to re-init pointers and lengths to
992 		 * default values.
993 		 */
994 		m_new = m;
995 		m_new->m_len = m_new->m_pkthdr.len = SK_JLEN;
996 		m_new->m_data = m_new->m_ext.ext_buf;
997 	}
998 
999 	/*
1000 	 * Adjust alignment so packet payload begins on a
1001 	 * longword boundary. Mandatory for Alpha, useful on
1002 	 * x86 too.
1003 	 */
1004 	m_adj(m_new, ETHER_ALIGN);
1005 
1006 	r = c->sk_desc;
1007 	c->sk_mbuf = m_new;
1008 	r->sk_data_lo = vtophys(mtod(m_new, caddr_t));
1009 	r->sk_ctl = m_new->m_len | SK_RXSTAT;
1010 
1011 	return(0);
1012 }
1013 
1014 /*
1015  * Allocate jumbo buffer storage. The SysKonnect adapters support
1016  * "jumbograms" (9K frames), although SysKonnect doesn't currently
1017  * use them in their drivers. In order for us to use them, we need
1018  * large 9K receive buffers, however standard mbuf clusters are only
1019  * 2048 bytes in size. Consequently, we need to allocate and manage
1020  * our own jumbo buffer pool. Fortunately, this does not require an
1021  * excessive amount of additional code.
1022  */
1023 static int
1024 sk_alloc_jumbo_mem(sc_if)
1025 	struct sk_if_softc	*sc_if;
1026 {
1027 	caddr_t			ptr;
1028 	register int		i;
1029 	struct sk_jpool_entry   *entry;
1030 
1031 	/* Grab a big chunk o' storage. */
1032 	sc_if->sk_cdata.sk_jumbo_buf = contigmalloc(SK_JMEM, M_DEVBUF,
1033 	    M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
1034 
1035 	if (sc_if->sk_cdata.sk_jumbo_buf == NULL) {
1036 		printf("sk%d: no memory for jumbo buffers!\n", sc_if->sk_unit);
1037 		return(ENOBUFS);
1038 	}
1039 
1040 	SLIST_INIT(&sc_if->sk_jfree_listhead);
1041 	SLIST_INIT(&sc_if->sk_jinuse_listhead);
1042 
1043 	/*
1044 	 * Now divide it up into 9K pieces and save the addresses
1045 	 * in an array.
1046 	 */
1047 	ptr = sc_if->sk_cdata.sk_jumbo_buf;
1048 	for (i = 0; i < SK_JSLOTS; i++) {
1049 		sc_if->sk_cdata.sk_jslots[i] = ptr;
1050 		ptr += SK_JLEN;
1051 		entry = malloc(sizeof(struct sk_jpool_entry),
1052 		    M_DEVBUF, M_NOWAIT);
1053 		if (entry == NULL) {
1054 			free(sc_if->sk_cdata.sk_jumbo_buf, M_DEVBUF);
1055 			sc_if->sk_cdata.sk_jumbo_buf = NULL;
1056 			printf("sk%d: no memory for jumbo "
1057 			    "buffer queue!\n", sc_if->sk_unit);
1058 			return(ENOBUFS);
1059 		}
1060 		entry->slot = i;
1061 		SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead,
1062 		    entry, jpool_entries);
1063 	}
1064 
1065 	return(0);
1066 }
1067 
1068 /*
1069  * Allocate a jumbo buffer.
1070  */
1071 static void *
1072 sk_jalloc(sc_if)
1073 	struct sk_if_softc	*sc_if;
1074 {
1075 	struct sk_jpool_entry   *entry;
1076 
1077 	entry = SLIST_FIRST(&sc_if->sk_jfree_listhead);
1078 
1079 	if (entry == NULL) {
1080 #ifdef SK_VERBOSE
1081 		printf("sk%d: no free jumbo buffers\n", sc_if->sk_unit);
1082 #endif
1083 		return(NULL);
1084 	}
1085 
1086 	SLIST_REMOVE_HEAD(&sc_if->sk_jfree_listhead, jpool_entries);
1087 	SLIST_INSERT_HEAD(&sc_if->sk_jinuse_listhead, entry, jpool_entries);
1088 	return(sc_if->sk_cdata.sk_jslots[entry->slot]);
1089 }
1090 
1091 /*
1092  * Release a jumbo buffer.
1093  */
1094 static void
1095 sk_jfree(buf, args)
1096 	void			*buf;
1097 	void			*args;
1098 {
1099 	struct sk_if_softc	*sc_if;
1100 	int		        i;
1101 	struct sk_jpool_entry   *entry;
1102 
1103 	/* Extract the softc struct pointer. */
1104 	sc_if = (struct sk_if_softc *)args;
1105 
1106 	if (sc_if == NULL)
1107 		panic("sk_jfree: didn't get softc pointer!");
1108 
1109 	/* calculate the slot this buffer belongs to */
1110 	i = ((vm_offset_t)buf
1111 	     - (vm_offset_t)sc_if->sk_cdata.sk_jumbo_buf) / SK_JLEN;
1112 
1113 	if ((i < 0) || (i >= SK_JSLOTS))
1114 		panic("sk_jfree: asked to free buffer that we don't manage!");
1115 
1116 	entry = SLIST_FIRST(&sc_if->sk_jinuse_listhead);
1117 	if (entry == NULL)
1118 		panic("sk_jfree: buffer not in use!");
1119 	entry->slot = i;
1120 	SLIST_REMOVE_HEAD(&sc_if->sk_jinuse_listhead, jpool_entries);
1121 	SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead, entry, jpool_entries);
1122 
1123 	return;
1124 }
1125 
1126 /*
1127  * Set media options.
1128  */
1129 static int
1130 sk_ifmedia_upd(ifp)
1131 	struct ifnet		*ifp;
1132 {
1133 	struct sk_if_softc	*sc_if = ifp->if_softc;
1134 	struct mii_data		*mii;
1135 
1136 	mii = device_get_softc(sc_if->sk_miibus);
1137 	sk_init(sc_if);
1138 	mii_mediachg(mii);
1139 
1140 	return(0);
1141 }
1142 
1143 /*
1144  * Report current media status.
1145  */
1146 static void
1147 sk_ifmedia_sts(ifp, ifmr)
1148 	struct ifnet		*ifp;
1149 	struct ifmediareq	*ifmr;
1150 {
1151 	struct sk_if_softc	*sc_if;
1152 	struct mii_data		*mii;
1153 
1154 	sc_if = ifp->if_softc;
1155 	mii = device_get_softc(sc_if->sk_miibus);
1156 
1157 	mii_pollstat(mii);
1158 	ifmr->ifm_active = mii->mii_media_active;
1159 	ifmr->ifm_status = mii->mii_media_status;
1160 
1161 	return;
1162 }
1163 
1164 static int
1165 sk_ioctl(ifp, command, data)
1166 	struct ifnet		*ifp;
1167 	u_long			command;
1168 	caddr_t			data;
1169 {
1170 	struct sk_if_softc	*sc_if = ifp->if_softc;
1171 	struct ifreq		*ifr = (struct ifreq *) data;
1172 	int			error = 0;
1173 	struct mii_data		*mii;
1174 
1175 	SK_IF_LOCK(sc_if);
1176 
1177 	switch(command) {
1178 	case SIOCSIFMTU:
1179 		if (ifr->ifr_mtu > SK_JUMBO_MTU)
1180 			error = EINVAL;
1181 		else {
1182 			ifp->if_mtu = ifr->ifr_mtu;
1183 			sk_init(sc_if);
1184 		}
1185 		break;
1186 	case SIOCSIFFLAGS:
1187 		if (ifp->if_flags & IFF_UP) {
1188 			if (ifp->if_flags & IFF_RUNNING) {
1189 				if ((ifp->if_flags ^ sc_if->sk_if_flags)
1190 				    & IFF_PROMISC) {
1191 					sk_setpromisc(sc_if);
1192 					sk_setmulti(sc_if);
1193 				}
1194 			} else
1195 				sk_init(sc_if);
1196 		} else {
1197 			if (ifp->if_flags & IFF_RUNNING)
1198 				sk_stop(sc_if);
1199 		}
1200 		sc_if->sk_if_flags = ifp->if_flags;
1201 		error = 0;
1202 		break;
1203 	case SIOCADDMULTI:
1204 	case SIOCDELMULTI:
1205 		sk_setmulti(sc_if);
1206 		error = 0;
1207 		break;
1208 	case SIOCGIFMEDIA:
1209 	case SIOCSIFMEDIA:
1210 		mii = device_get_softc(sc_if->sk_miibus);
1211 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1212 		break;
1213 	default:
1214 		error = ether_ioctl(ifp, command, data);
1215 		break;
1216 	}
1217 
1218 	SK_IF_UNLOCK(sc_if);
1219 
1220 	return(error);
1221 }
1222 
1223 /*
1224  * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device
1225  * IDs against our list and return a device name if we find a match.
1226  */
1227 static int
1228 skc_probe(dev)
1229 	device_t		dev;
1230 {
1231 	struct sk_softc		*sc;
1232 	struct sk_type		*t = sk_devs;
1233 
1234 	sc = device_get_softc(dev);
1235 
1236 	while(t->sk_name != NULL) {
1237 		if ((pci_get_vendor(dev) == t->sk_vid) &&
1238 		    (pci_get_device(dev) == t->sk_did)) {
1239 			device_set_desc(dev, t->sk_name);
1240 			return(0);
1241 		}
1242 		t++;
1243 	}
1244 
1245 	return(ENXIO);
1246 }
1247 
1248 /*
1249  * Force the GEnesis into reset, then bring it out of reset.
1250  */
1251 static void
1252 sk_reset(sc)
1253 	struct sk_softc		*sc;
1254 {
1255 	CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_RESET);
1256 	CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_RESET);
1257 	if (sc->sk_type == SK_YUKON)
1258 		CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_SET);
1259 
1260 	DELAY(1000);
1261 	CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_UNRESET);
1262 	DELAY(2);
1263 	CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_UNRESET);
1264 	if (sc->sk_type == SK_YUKON)
1265 		CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_CLEAR);
1266 
1267 	if (sc->sk_type == SK_GENESIS) {
1268 		/* Configure packet arbiter */
1269 		sk_win_write_2(sc, SK_PKTARB_CTL, SK_PKTARBCTL_UNRESET);
1270 		sk_win_write_2(sc, SK_RXPA1_TINIT, SK_PKTARB_TIMEOUT);
1271 		sk_win_write_2(sc, SK_TXPA1_TINIT, SK_PKTARB_TIMEOUT);
1272 		sk_win_write_2(sc, SK_RXPA2_TINIT, SK_PKTARB_TIMEOUT);
1273 		sk_win_write_2(sc, SK_TXPA2_TINIT, SK_PKTARB_TIMEOUT);
1274 	}
1275 
1276 	/* Enable RAM interface */
1277 	sk_win_write_4(sc, SK_RAMCTL, SK_RAMCTL_UNRESET);
1278 
1279 	/*
1280          * Configure interrupt moderation. The moderation timer
1281 	 * defers interrupts specified in the interrupt moderation
1282 	 * timer mask based on the timeout specified in the interrupt
1283 	 * moderation timer init register. Each bit in the timer
1284 	 * register represents 18.825ns, so to specify a timeout in
1285 	 * microseconds, we have to multiply by 54.
1286 	 */
1287 	sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(200));
1288 	sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF|SK_ISR_TX2_S_EOF|
1289 	    SK_ISR_RX1_EOF|SK_ISR_RX2_EOF);
1290 	sk_win_write_1(sc, SK_IMTIMERCTL, SK_IMCTL_START);
1291 
1292 	return;
1293 }
1294 
1295 static int
1296 sk_probe(dev)
1297 	device_t		dev;
1298 {
1299 	struct sk_softc		*sc;
1300 
1301 	sc = device_get_softc(device_get_parent(dev));
1302 
1303 	/*
1304 	 * Not much to do here. We always know there will be
1305 	 * at least one XMAC present, and if there are two,
1306 	 * skc_attach() will create a second device instance
1307 	 * for us.
1308 	 */
1309 	switch (sc->sk_type) {
1310 	case SK_GENESIS:
1311 		device_set_desc(dev, "XaQti Corp. XMAC II");
1312 		break;
1313 	case SK_YUKON:
1314 		device_set_desc(dev, "Marvell Semiconductor, Inc. Yukon");
1315 		break;
1316 	}
1317 
1318 	return(0);
1319 }
1320 
1321 /*
1322  * Each XMAC chip is attached as a separate logical IP interface.
1323  * Single port cards will have only one logical interface of course.
1324  */
1325 static int
1326 sk_attach(dev)
1327 	device_t		dev;
1328 {
1329 	struct sk_softc		*sc;
1330 	struct sk_if_softc	*sc_if;
1331 	struct ifnet		*ifp;
1332 	int			i, port, error;
1333 
1334 	if (dev == NULL)
1335 		return(EINVAL);
1336 
1337 	error = 0;
1338 	sc_if = device_get_softc(dev);
1339 	sc = device_get_softc(device_get_parent(dev));
1340 	port = *(int *)device_get_ivars(dev);
1341 	free(device_get_ivars(dev), M_DEVBUF);
1342 	device_set_ivars(dev, NULL);
1343 
1344 	sc_if->sk_dev = dev;
1345 	sc_if->sk_unit = device_get_unit(dev);
1346 	sc_if->sk_port = port;
1347 	sc_if->sk_softc = sc;
1348 	sc->sk_if[port] = sc_if;
1349 	if (port == SK_PORT_A)
1350 		sc_if->sk_tx_bmu = SK_BMU_TXS_CSR0;
1351 	if (port == SK_PORT_B)
1352 		sc_if->sk_tx_bmu = SK_BMU_TXS_CSR1;
1353 
1354 	/* Allocate the descriptor queues. */
1355 	sc_if->sk_rdata = contigmalloc(sizeof(struct sk_ring_data), M_DEVBUF,
1356 	    M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
1357 
1358 	if (sc_if->sk_rdata == NULL) {
1359 		printf("sk%d: no memory for list buffers!\n", sc_if->sk_unit);
1360 		error = ENOMEM;
1361 		goto fail;
1362 	}
1363 
1364 	bzero(sc_if->sk_rdata, sizeof(struct sk_ring_data));
1365 
1366 	/* Try to allocate memory for jumbo buffers. */
1367 	if (sk_alloc_jumbo_mem(sc_if)) {
1368 		printf("sk%d: jumbo buffer allocation failed\n",
1369 		    sc_if->sk_unit);
1370 		error = ENOMEM;
1371 		goto fail;
1372 	}
1373 
1374 	ifp = &sc_if->arpcom.ac_if;
1375 	ifp->if_softc = sc_if;
1376 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1377 	ifp->if_mtu = ETHERMTU;
1378 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1379 	ifp->if_ioctl = sk_ioctl;
1380 	ifp->if_start = sk_start;
1381 	ifp->if_watchdog = sk_watchdog;
1382 	ifp->if_init = sk_init;
1383 	ifp->if_baudrate = 1000000000;
1384 	ifp->if_snd.ifq_maxlen = SK_TX_RING_CNT - 1;
1385 
1386 	callout_handle_init(&sc_if->sk_tick_ch);
1387 
1388 	/*
1389 	 * Get station address for this interface. Note that
1390 	 * dual port cards actually come with three station
1391 	 * addresses: one for each port, plus an extra. The
1392 	 * extra one is used by the SysKonnect driver software
1393 	 * as a 'virtual' station address for when both ports
1394 	 * are operating in failover mode. Currently we don't
1395 	 * use this extra address.
1396 	 */
1397 	SK_LOCK(sc);
1398 	for (i = 0; i < ETHER_ADDR_LEN; i++)
1399 		sc_if->arpcom.ac_enaddr[i] =
1400 		    sk_win_read_1(sc, SK_MAC0_0 + (port * 8) + i);
1401 
1402 	/*
1403 	 * Set up RAM buffer addresses. The NIC will have a certain
1404 	 * amount of SRAM on it, somewhere between 512K and 2MB. We
1405 	 * need to divide this up a) between the transmitter and
1406  	 * receiver and b) between the two XMACs, if this is a
1407 	 * dual port NIC. Our algotithm is to divide up the memory
1408 	 * evenly so that everyone gets a fair share.
1409 	 */
1410 	if (sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC) {
1411 		u_int32_t		chunk, val;
1412 
1413 		chunk = sc->sk_ramsize / 2;
1414 		val = sc->sk_rboff / sizeof(u_int64_t);
1415 		sc_if->sk_rx_ramstart = val;
1416 		val += (chunk / sizeof(u_int64_t));
1417 		sc_if->sk_rx_ramend = val - 1;
1418 		sc_if->sk_tx_ramstart = val;
1419 		val += (chunk / sizeof(u_int64_t));
1420 		sc_if->sk_tx_ramend = val - 1;
1421 	} else {
1422 		u_int32_t		chunk, val;
1423 
1424 		chunk = sc->sk_ramsize / 4;
1425 		val = (sc->sk_rboff + (chunk * 2 * sc_if->sk_port)) /
1426 		    sizeof(u_int64_t);
1427 		sc_if->sk_rx_ramstart = val;
1428 		val += (chunk / sizeof(u_int64_t));
1429 		sc_if->sk_rx_ramend = val - 1;
1430 		sc_if->sk_tx_ramstart = val;
1431 		val += (chunk / sizeof(u_int64_t));
1432 		sc_if->sk_tx_ramend = val - 1;
1433 	}
1434 
1435 	/* Read and save PHY type and set PHY address */
1436 	sc_if->sk_phytype = sk_win_read_1(sc, SK_EPROM1) & 0xF;
1437 	switch(sc_if->sk_phytype) {
1438 	case SK_PHYTYPE_XMAC:
1439 		sc_if->sk_phyaddr = SK_PHYADDR_XMAC;
1440 		break;
1441 	case SK_PHYTYPE_BCOM:
1442 		sc_if->sk_phyaddr = SK_PHYADDR_BCOM;
1443 		break;
1444 	case SK_PHYTYPE_MARV_COPPER:
1445 		sc_if->sk_phyaddr = SK_PHYADDR_MARV;
1446 		break;
1447 	default:
1448 		printf("skc%d: unsupported PHY type: %d\n",
1449 		    sc->sk_unit, sc_if->sk_phytype);
1450 		error = ENODEV;
1451 		SK_UNLOCK(sc);
1452 		goto fail;
1453 	}
1454 
1455 
1456 	/*
1457 	 * Call MI attach routine.  Can't hold locks when calling into ether_*.
1458 	 */
1459 	SK_UNLOCK(sc);
1460 	ether_ifattach(ifp, sc_if->arpcom.ac_enaddr);
1461 	SK_LOCK(sc);
1462 
1463 	/*
1464 	 * Do miibus setup.
1465 	 */
1466 	switch (sc->sk_type) {
1467 	case SK_GENESIS:
1468 		sk_init_xmac(sc_if);
1469 		break;
1470 	case SK_YUKON:
1471 		sk_init_yukon(sc_if);
1472 		break;
1473 	}
1474 
1475 	SK_UNLOCK(sc);
1476 	if (mii_phy_probe(dev, &sc_if->sk_miibus,
1477 	    sk_ifmedia_upd, sk_ifmedia_sts)) {
1478 		printf("skc%d: no PHY found!\n", sc_if->sk_unit);
1479 		ether_ifdetach(ifp);
1480 		error = ENXIO;
1481 		goto fail;
1482 	}
1483 
1484 fail:
1485 	if (error) {
1486 		/* Access should be ok even though lock has been dropped */
1487 		sc->sk_if[port] = NULL;
1488 		sk_detach(dev);
1489 	}
1490 
1491 	return(error);
1492 }
1493 
1494 /*
1495  * Attach the interface. Allocate softc structures, do ifmedia
1496  * setup and ethernet/BPF attach.
1497  */
1498 static int
1499 skc_attach(dev)
1500 	device_t		dev;
1501 {
1502 	struct sk_softc		*sc;
1503 	int			unit, error = 0, rid, *port;
1504 
1505 	sc = device_get_softc(dev);
1506 	unit = device_get_unit(dev);
1507 
1508 	mtx_init(&sc->sk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1509 	    MTX_DEF | MTX_RECURSE);
1510 	/*
1511 	 * Map control/status registers.
1512 	 */
1513 	pci_enable_busmaster(dev);
1514 
1515 	rid = SK_RID;
1516 	sc->sk_res = bus_alloc_resource_any(dev, SK_RES, &rid, RF_ACTIVE);
1517 
1518 	if (sc->sk_res == NULL) {
1519 		printf("sk%d: couldn't map ports/memory\n", unit);
1520 		error = ENXIO;
1521 		goto fail;
1522 	}
1523 
1524 	sc->sk_btag = rman_get_bustag(sc->sk_res);
1525 	sc->sk_bhandle = rman_get_bushandle(sc->sk_res);
1526 
1527 	/* Allocate interrupt */
1528 	rid = 0;
1529 	sc->sk_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1530 	    RF_SHAREABLE | RF_ACTIVE);
1531 
1532 	if (sc->sk_irq == NULL) {
1533 		printf("skc%d: couldn't map interrupt\n", unit);
1534 		error = ENXIO;
1535 		goto fail;
1536 	}
1537 
1538 	/* Set adapter type */
1539 	switch (pci_get_device(dev)) {
1540 	case DEVICEID_SK_V1:
1541 		sc->sk_type = SK_GENESIS;
1542 		break;
1543 	case DEVICEID_SK_V2:
1544 	case DEVICEID_BELKIN_5005:
1545 	case DEVICEID_3COM_3C940:
1546 	case DEVICEID_LINKSYS_EG1032:
1547 	case DEVICEID_DLINK_DGE530T:
1548 		sc->sk_type = SK_YUKON;
1549 		break;
1550 	default:
1551 		printf("skc%d: unknown device!\n", unit);
1552 		error = ENXIO;
1553 		goto fail;
1554 	}
1555 
1556 	/* Reset the adapter. */
1557 	sk_reset(sc);
1558 
1559 	sc->sk_unit = unit;
1560 
1561 	/* Read and save vital product data from EEPROM. */
1562 	sk_vpd_read(sc);
1563 
1564 	if (sc->sk_type == SK_GENESIS) {
1565 		/* Read and save RAM size and RAMbuffer offset */
1566 		switch(sk_win_read_1(sc, SK_EPROM0)) {
1567 		case SK_RAMSIZE_512K_64:
1568 			sc->sk_ramsize = 0x80000;
1569 			sc->sk_rboff = SK_RBOFF_0;
1570 			break;
1571 		case SK_RAMSIZE_1024K_64:
1572 			sc->sk_ramsize = 0x100000;
1573 			sc->sk_rboff = SK_RBOFF_80000;
1574 			break;
1575 		case SK_RAMSIZE_1024K_128:
1576 			sc->sk_ramsize = 0x100000;
1577 			sc->sk_rboff = SK_RBOFF_0;
1578 			break;
1579 		case SK_RAMSIZE_2048K_128:
1580 			sc->sk_ramsize = 0x200000;
1581 			sc->sk_rboff = SK_RBOFF_0;
1582 			break;
1583 		default:
1584 			printf("skc%d: unknown ram size: %d\n",
1585 			    sc->sk_unit, sk_win_read_1(sc, SK_EPROM0));
1586 			error = ENXIO;
1587 			goto fail;
1588 		}
1589 	} else {
1590 		sc->sk_ramsize = 0x20000;
1591 		sc->sk_rboff = SK_RBOFF_0;
1592 	}
1593 
1594 	/* Read and save physical media type */
1595 	switch(sk_win_read_1(sc, SK_PMDTYPE)) {
1596 	case SK_PMD_1000BASESX:
1597 		sc->sk_pmd = IFM_1000_SX;
1598 		break;
1599 	case SK_PMD_1000BASELX:
1600 		sc->sk_pmd = IFM_1000_LX;
1601 		break;
1602 	case SK_PMD_1000BASECX:
1603 		sc->sk_pmd = IFM_1000_CX;
1604 		break;
1605 	case SK_PMD_1000BASETX:
1606 		sc->sk_pmd = IFM_1000_T;
1607 		break;
1608 	default:
1609 		printf("skc%d: unknown media type: 0x%x\n",
1610 		    sc->sk_unit, sk_win_read_1(sc, SK_PMDTYPE));
1611 		error = ENXIO;
1612 		goto fail;
1613 	}
1614 
1615 	/* Announce the product name. */
1616 	if (sc->sk_vpd_prodname != NULL)
1617 	    printf("skc%d: %s\n", sc->sk_unit, sc->sk_vpd_prodname);
1618 	sc->sk_devs[SK_PORT_A] = device_add_child(dev, "sk", -1);
1619 	port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT);
1620 	*port = SK_PORT_A;
1621 	device_set_ivars(sc->sk_devs[SK_PORT_A], port);
1622 
1623 	if (!(sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC)) {
1624 		sc->sk_devs[SK_PORT_B] = device_add_child(dev, "sk", -1);
1625 		port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT);
1626 		*port = SK_PORT_B;
1627 		device_set_ivars(sc->sk_devs[SK_PORT_B], port);
1628 	}
1629 
1630 	/* Turn on the 'driver is loaded' LED. */
1631 	CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON);
1632 
1633 	bus_generic_attach(dev);
1634 
1635 	/* Hook interrupt last to avoid having to lock softc */
1636 	error = bus_setup_intr(dev, sc->sk_irq, INTR_TYPE_NET|INTR_MPSAFE,
1637 	    sk_intr, sc, &sc->sk_intrhand);
1638 
1639 	if (error) {
1640 		printf("skc%d: couldn't set up irq\n", unit);
1641 		goto fail;
1642 	}
1643 
1644 fail:
1645 	if (error)
1646 		skc_detach(dev);
1647 
1648 	return(error);
1649 }
1650 
1651 /*
1652  * Shutdown hardware and free up resources. This can be called any
1653  * time after the mutex has been initialized. It is called in both
1654  * the error case in attach and the normal detach case so it needs
1655  * to be careful about only freeing resources that have actually been
1656  * allocated.
1657  */
1658 static int
1659 sk_detach(dev)
1660 	device_t		dev;
1661 {
1662 	struct sk_if_softc	*sc_if;
1663 	struct ifnet		*ifp;
1664 
1665 	sc_if = device_get_softc(dev);
1666 	KASSERT(mtx_initialized(&sc_if->sk_softc->sk_mtx),
1667 	    ("sk mutex not initialized in sk_detach"));
1668 	SK_IF_LOCK(sc_if);
1669 
1670 	ifp = &sc_if->arpcom.ac_if;
1671 	/* These should only be active if attach_xmac succeeded */
1672 	if (device_is_attached(dev)) {
1673 		sk_stop(sc_if);
1674 		/* Can't hold locks while calling detach */
1675 		SK_IF_UNLOCK(sc_if);
1676 		ether_ifdetach(ifp);
1677 		SK_IF_LOCK(sc_if);
1678 	}
1679 	/*
1680 	 * We're generally called from skc_detach() which is using
1681 	 * device_delete_child() to get to here. It's already trashed
1682 	 * miibus for us, so don't do it here or we'll panic.
1683 	 */
1684 	/*
1685 	if (sc_if->sk_miibus != NULL)
1686 		device_delete_child(dev, sc_if->sk_miibus);
1687 	*/
1688 	bus_generic_detach(dev);
1689 	if (sc_if->sk_cdata.sk_jumbo_buf != NULL)
1690 		contigfree(sc_if->sk_cdata.sk_jumbo_buf, SK_JMEM, M_DEVBUF);
1691 	if (sc_if->sk_rdata != NULL) {
1692 		contigfree(sc_if->sk_rdata, sizeof(struct sk_ring_data),
1693 		    M_DEVBUF);
1694 	}
1695 	SK_IF_UNLOCK(sc_if);
1696 
1697 	return(0);
1698 }
1699 
1700 static int
1701 skc_detach(dev)
1702 	device_t		dev;
1703 {
1704 	struct sk_softc		*sc;
1705 
1706 	sc = device_get_softc(dev);
1707 	KASSERT(mtx_initialized(&sc->sk_mtx), ("sk mutex not initialized"));
1708 
1709 	if (device_is_alive(dev)) {
1710 		if (sc->sk_devs[SK_PORT_A] != NULL)
1711 			device_delete_child(dev, sc->sk_devs[SK_PORT_A]);
1712 		if (sc->sk_devs[SK_PORT_B] != NULL)
1713 			device_delete_child(dev, sc->sk_devs[SK_PORT_B]);
1714 		bus_generic_detach(dev);
1715 	}
1716 
1717 	if (sc->sk_intrhand)
1718 		bus_teardown_intr(dev, sc->sk_irq, sc->sk_intrhand);
1719 	if (sc->sk_irq)
1720 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sk_irq);
1721 	if (sc->sk_res)
1722 		bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res);
1723 
1724 	mtx_destroy(&sc->sk_mtx);
1725 
1726 	return(0);
1727 }
1728 
1729 static int
1730 sk_encap(sc_if, m_head, txidx)
1731         struct sk_if_softc	*sc_if;
1732         struct mbuf		*m_head;
1733         u_int32_t		*txidx;
1734 {
1735 	struct sk_tx_desc	*f = NULL;
1736 	struct mbuf		*m;
1737 	u_int32_t		frag, cur, cnt = 0;
1738 
1739 	m = m_head;
1740 	cur = frag = *txidx;
1741 
1742 	/*
1743 	 * Start packing the mbufs in this chain into
1744 	 * the fragment pointers. Stop when we run out
1745 	 * of fragments or hit the end of the mbuf chain.
1746 	 */
1747 	for (m = m_head; m != NULL; m = m->m_next) {
1748 		if (m->m_len != 0) {
1749 			if ((SK_TX_RING_CNT -
1750 			    (sc_if->sk_cdata.sk_tx_cnt + cnt)) < 2)
1751 				return(ENOBUFS);
1752 			f = &sc_if->sk_rdata->sk_tx_ring[frag];
1753 			f->sk_data_lo = vtophys(mtod(m, vm_offset_t));
1754 			f->sk_ctl = m->m_len | SK_OPCODE_DEFAULT;
1755 			if (cnt == 0)
1756 				f->sk_ctl |= SK_TXCTL_FIRSTFRAG;
1757 			else
1758 				f->sk_ctl |= SK_TXCTL_OWN;
1759 			cur = frag;
1760 			SK_INC(frag, SK_TX_RING_CNT);
1761 			cnt++;
1762 		}
1763 	}
1764 
1765 	if (m != NULL)
1766 		return(ENOBUFS);
1767 
1768 	sc_if->sk_rdata->sk_tx_ring[cur].sk_ctl |=
1769 		SK_TXCTL_LASTFRAG|SK_TXCTL_EOF_INTR;
1770 	sc_if->sk_cdata.sk_tx_chain[cur].sk_mbuf = m_head;
1771 	sc_if->sk_rdata->sk_tx_ring[*txidx].sk_ctl |= SK_TXCTL_OWN;
1772 	sc_if->sk_cdata.sk_tx_cnt += cnt;
1773 
1774 	*txidx = frag;
1775 
1776 	return(0);
1777 }
1778 
1779 static void
1780 sk_start(ifp)
1781 	struct ifnet		*ifp;
1782 {
1783         struct sk_softc		*sc;
1784         struct sk_if_softc	*sc_if;
1785         struct mbuf		*m_head = NULL;
1786         u_int32_t		idx;
1787 
1788 	sc_if = ifp->if_softc;
1789 	sc = sc_if->sk_softc;
1790 
1791 	SK_IF_LOCK(sc_if);
1792 
1793 	idx = sc_if->sk_cdata.sk_tx_prod;
1794 
1795 	while(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf == NULL) {
1796 		IF_DEQUEUE(&ifp->if_snd, m_head);
1797 		if (m_head == NULL)
1798 			break;
1799 
1800 		/*
1801 		 * Pack the data into the transmit ring. If we
1802 		 * don't have room, set the OACTIVE flag and wait
1803 		 * for the NIC to drain the ring.
1804 		 */
1805 		if (sk_encap(sc_if, m_head, &idx)) {
1806 			IF_PREPEND(&ifp->if_snd, m_head);
1807 			ifp->if_flags |= IFF_OACTIVE;
1808 			break;
1809 		}
1810 
1811 		/*
1812 		 * If there's a BPF listener, bounce a copy of this frame
1813 		 * to him.
1814 		 */
1815 		BPF_MTAP(ifp, m_head);
1816 	}
1817 
1818 	/* Transmit */
1819 	sc_if->sk_cdata.sk_tx_prod = idx;
1820 	CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
1821 
1822 	/* Set a timeout in case the chip goes out to lunch. */
1823 	ifp->if_timer = 5;
1824 	SK_IF_UNLOCK(sc_if);
1825 
1826 	return;
1827 }
1828 
1829 
1830 static void
1831 sk_watchdog(ifp)
1832 	struct ifnet		*ifp;
1833 {
1834 	struct sk_if_softc	*sc_if;
1835 
1836 	sc_if = ifp->if_softc;
1837 
1838 	printf("sk%d: watchdog timeout\n", sc_if->sk_unit);
1839 	sk_init(sc_if);
1840 
1841 	return;
1842 }
1843 
1844 static void
1845 skc_shutdown(dev)
1846 	device_t		dev;
1847 {
1848 	struct sk_softc		*sc;
1849 
1850 	sc = device_get_softc(dev);
1851 	SK_LOCK(sc);
1852 
1853 	/* Turn off the 'driver is loaded' LED. */
1854 	CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF);
1855 
1856 	/*
1857 	 * Reset the GEnesis controller. Doing this should also
1858 	 * assert the resets on the attached XMAC(s).
1859 	 */
1860 	sk_reset(sc);
1861 	SK_UNLOCK(sc);
1862 
1863 	return;
1864 }
1865 
1866 static void
1867 sk_rxeof(sc_if)
1868 	struct sk_if_softc	*sc_if;
1869 {
1870 	struct sk_softc		*sc;
1871 	struct mbuf		*m;
1872 	struct ifnet		*ifp;
1873 	struct sk_chain		*cur_rx;
1874 	int			total_len = 0;
1875 	int			i;
1876 	u_int32_t		rxstat;
1877 
1878 	sc = sc_if->sk_softc;
1879 	ifp = &sc_if->arpcom.ac_if;
1880 	i = sc_if->sk_cdata.sk_rx_prod;
1881 	cur_rx = &sc_if->sk_cdata.sk_rx_chain[i];
1882 
1883 	SK_LOCK_ASSERT(sc);
1884 
1885 	while(!(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl & SK_RXCTL_OWN)) {
1886 
1887 		cur_rx = &sc_if->sk_cdata.sk_rx_chain[i];
1888 		rxstat = sc_if->sk_rdata->sk_rx_ring[i].sk_xmac_rxstat;
1889 		m = cur_rx->sk_mbuf;
1890 		cur_rx->sk_mbuf = NULL;
1891 		total_len = SK_RXBYTES(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl);
1892 		SK_INC(i, SK_RX_RING_CNT);
1893 
1894 		if (rxstat & XM_RXSTAT_ERRFRAME) {
1895 			ifp->if_ierrors++;
1896 			sk_newbuf(sc_if, cur_rx, m);
1897 			continue;
1898 		}
1899 
1900 		/*
1901 		 * Try to allocate a new jumbo buffer. If that
1902 		 * fails, copy the packet to mbufs and put the
1903 		 * jumbo buffer back in the ring so it can be
1904 		 * re-used. If allocating mbufs fails, then we
1905 		 * have to drop the packet.
1906 		 */
1907 		if (sk_newbuf(sc_if, cur_rx, NULL) == ENOBUFS) {
1908 			struct mbuf		*m0;
1909 			m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN,
1910 			    ifp, NULL);
1911 			sk_newbuf(sc_if, cur_rx, m);
1912 			if (m0 == NULL) {
1913 				printf("sk%d: no receive buffers "
1914 				    "available -- packet dropped!\n",
1915 				    sc_if->sk_unit);
1916 				ifp->if_ierrors++;
1917 				continue;
1918 			}
1919 			m = m0;
1920 		} else {
1921 			m->m_pkthdr.rcvif = ifp;
1922 			m->m_pkthdr.len = m->m_len = total_len;
1923 		}
1924 
1925 		ifp->if_ipackets++;
1926 		SK_UNLOCK(sc);
1927 		(*ifp->if_input)(ifp, m);
1928 		SK_LOCK(sc);
1929 	}
1930 
1931 	sc_if->sk_cdata.sk_rx_prod = i;
1932 
1933 	return;
1934 }
1935 
1936 static void
1937 sk_txeof(sc_if)
1938 	struct sk_if_softc	*sc_if;
1939 {
1940 	struct sk_tx_desc	*cur_tx = NULL;
1941 	struct ifnet		*ifp;
1942 	u_int32_t		idx;
1943 
1944 	ifp = &sc_if->arpcom.ac_if;
1945 
1946 	/*
1947 	 * Go through our tx ring and free mbufs for those
1948 	 * frames that have been sent.
1949 	 */
1950 	idx = sc_if->sk_cdata.sk_tx_cons;
1951 	while(idx != sc_if->sk_cdata.sk_tx_prod) {
1952 		cur_tx = &sc_if->sk_rdata->sk_tx_ring[idx];
1953 		if (cur_tx->sk_ctl & SK_TXCTL_OWN)
1954 			break;
1955 		if (cur_tx->sk_ctl & SK_TXCTL_LASTFRAG)
1956 			ifp->if_opackets++;
1957 		if (sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf != NULL) {
1958 			m_freem(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf);
1959 			sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf = NULL;
1960 		}
1961 		sc_if->sk_cdata.sk_tx_cnt--;
1962 		SK_INC(idx, SK_TX_RING_CNT);
1963 		ifp->if_timer = 0;
1964 	}
1965 
1966 	sc_if->sk_cdata.sk_tx_cons = idx;
1967 
1968 	if (cur_tx != NULL)
1969 		ifp->if_flags &= ~IFF_OACTIVE;
1970 
1971 	return;
1972 }
1973 
1974 static void
1975 sk_tick(xsc_if)
1976 	void			*xsc_if;
1977 {
1978 	struct sk_if_softc	*sc_if;
1979 	struct mii_data		*mii;
1980 	struct ifnet		*ifp;
1981 	int			i;
1982 
1983 	sc_if = xsc_if;
1984 	SK_IF_LOCK(sc_if);
1985 	ifp = &sc_if->arpcom.ac_if;
1986 	mii = device_get_softc(sc_if->sk_miibus);
1987 
1988 	if (!(ifp->if_flags & IFF_UP)) {
1989 		SK_IF_UNLOCK(sc_if);
1990 		return;
1991 	}
1992 
1993 	if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
1994 		sk_intr_bcom(sc_if);
1995 		SK_IF_UNLOCK(sc_if);
1996 		return;
1997 	}
1998 
1999 	/*
2000 	 * According to SysKonnect, the correct way to verify that
2001 	 * the link has come back up is to poll bit 0 of the GPIO
2002 	 * register three times. This pin has the signal from the
2003 	 * link_sync pin connected to it; if we read the same link
2004 	 * state 3 times in a row, we know the link is up.
2005 	 */
2006 	for (i = 0; i < 3; i++) {
2007 		if (SK_XM_READ_2(sc_if, XM_GPIO) & XM_GPIO_GP0_SET)
2008 			break;
2009 	}
2010 
2011 	if (i != 3) {
2012 		sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz);
2013 		SK_IF_UNLOCK(sc_if);
2014 		return;
2015 	}
2016 
2017 	/* Turn the GP0 interrupt back on. */
2018 	SK_XM_CLRBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
2019 	SK_XM_READ_2(sc_if, XM_ISR);
2020 	mii_tick(mii);
2021 	untimeout(sk_tick, sc_if, sc_if->sk_tick_ch);
2022 
2023 	SK_IF_UNLOCK(sc_if);
2024 	return;
2025 }
2026 
2027 static void
2028 sk_intr_bcom(sc_if)
2029 	struct sk_if_softc	*sc_if;
2030 {
2031 	struct mii_data		*mii;
2032 	struct ifnet		*ifp;
2033 	int			status;
2034 	mii = device_get_softc(sc_if->sk_miibus);
2035 	ifp = &sc_if->arpcom.ac_if;
2036 
2037 	SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
2038 
2039 	/*
2040 	 * Read the PHY interrupt register to make sure
2041 	 * we clear any pending interrupts.
2042 	 */
2043 	status = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, BRGPHY_MII_ISR);
2044 
2045 	if (!(ifp->if_flags & IFF_RUNNING)) {
2046 		sk_init_xmac(sc_if);
2047 		return;
2048 	}
2049 
2050 	if (status & (BRGPHY_ISR_LNK_CHG|BRGPHY_ISR_AN_PR)) {
2051 		int			lstat;
2052 		lstat = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM,
2053 		    BRGPHY_MII_AUXSTS);
2054 
2055 		if (!(lstat & BRGPHY_AUXSTS_LINK) && sc_if->sk_link) {
2056 			mii_mediachg(mii);
2057 			/* Turn off the link LED. */
2058 			SK_IF_WRITE_1(sc_if, 0,
2059 			    SK_LINKLED1_CTL, SK_LINKLED_OFF);
2060 			sc_if->sk_link = 0;
2061 		} else if (status & BRGPHY_ISR_LNK_CHG) {
2062 			sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
2063 	    		    BRGPHY_MII_IMR, 0xFF00);
2064 			mii_tick(mii);
2065 			sc_if->sk_link = 1;
2066 			/* Turn on the link LED. */
2067 			SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
2068 			    SK_LINKLED_ON|SK_LINKLED_LINKSYNC_OFF|
2069 			    SK_LINKLED_BLINK_OFF);
2070 		} else {
2071 			mii_tick(mii);
2072 			sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz);
2073 		}
2074 	}
2075 
2076 	SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
2077 
2078 	return;
2079 }
2080 
2081 static void
2082 sk_intr_xmac(sc_if)
2083 	struct sk_if_softc	*sc_if;
2084 {
2085 	struct sk_softc		*sc;
2086 	u_int16_t		status;
2087 
2088 	sc = sc_if->sk_softc;
2089 	status = SK_XM_READ_2(sc_if, XM_ISR);
2090 
2091 	/*
2092 	 * Link has gone down. Start MII tick timeout to
2093 	 * watch for link resync.
2094 	 */
2095 	if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) {
2096 		if (status & XM_ISR_GP0_SET) {
2097 			SK_XM_SETBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
2098 			sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz);
2099 		}
2100 
2101 		if (status & XM_ISR_AUTONEG_DONE) {
2102 			sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz);
2103 		}
2104 	}
2105 
2106 	if (status & XM_IMR_TX_UNDERRUN)
2107 		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_TXFIFO);
2108 
2109 	if (status & XM_IMR_RX_OVERRUN)
2110 		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_RXFIFO);
2111 
2112 	status = SK_XM_READ_2(sc_if, XM_ISR);
2113 
2114 	return;
2115 }
2116 
2117 static void
2118 sk_intr_yukon(sc_if)
2119 	struct sk_if_softc	*sc_if;
2120 {
2121 	int status;
2122 
2123 	status = SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR);
2124 
2125 	return;
2126 }
2127 
2128 static void
2129 sk_intr(xsc)
2130 	void			*xsc;
2131 {
2132 	struct sk_softc		*sc = xsc;
2133 	struct sk_if_softc	*sc_if0 = NULL, *sc_if1 = NULL;
2134 	struct ifnet		*ifp0 = NULL, *ifp1 = NULL;
2135 	u_int32_t		status;
2136 
2137 	SK_LOCK(sc);
2138 
2139 	sc_if0 = sc->sk_if[SK_PORT_A];
2140 	sc_if1 = sc->sk_if[SK_PORT_B];
2141 
2142 	if (sc_if0 != NULL)
2143 		ifp0 = &sc_if0->arpcom.ac_if;
2144 	if (sc_if1 != NULL)
2145 		ifp1 = &sc_if1->arpcom.ac_if;
2146 
2147 	for (;;) {
2148 		status = CSR_READ_4(sc, SK_ISSR);
2149 		if (!(status & sc->sk_intrmask))
2150 			break;
2151 
2152 		/* Handle receive interrupts first. */
2153 		if (status & SK_ISR_RX1_EOF) {
2154 			sk_rxeof(sc_if0);
2155 			CSR_WRITE_4(sc, SK_BMU_RX_CSR0,
2156 			    SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
2157 		}
2158 		if (status & SK_ISR_RX2_EOF) {
2159 			sk_rxeof(sc_if1);
2160 			CSR_WRITE_4(sc, SK_BMU_RX_CSR1,
2161 			    SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
2162 		}
2163 
2164 		/* Then transmit interrupts. */
2165 		if (status & SK_ISR_TX1_S_EOF) {
2166 			sk_txeof(sc_if0);
2167 			CSR_WRITE_4(sc, SK_BMU_TXS_CSR0,
2168 			    SK_TXBMU_CLR_IRQ_EOF);
2169 		}
2170 		if (status & SK_ISR_TX2_S_EOF) {
2171 			sk_txeof(sc_if1);
2172 			CSR_WRITE_4(sc, SK_BMU_TXS_CSR1,
2173 			    SK_TXBMU_CLR_IRQ_EOF);
2174 		}
2175 
2176 		/* Then MAC interrupts. */
2177 		if (status & SK_ISR_MAC1 && ifp0->if_flags & IFF_RUNNING) {
2178 			if (sc->sk_type == SK_GENESIS)
2179 				sk_intr_xmac(sc_if0);
2180 			else
2181 				sk_intr_yukon(sc_if0);
2182 		}
2183 
2184 		if (status & SK_ISR_MAC2 && ifp1->if_flags & IFF_RUNNING) {
2185 			if (sc->sk_type == SK_GENESIS)
2186 				sk_intr_xmac(sc_if1);
2187 			else
2188 				sk_intr_yukon(sc_if1);
2189 		}
2190 
2191 		if (status & SK_ISR_EXTERNAL_REG) {
2192 			if (ifp0 != NULL &&
2193 			    sc_if0->sk_phytype == SK_PHYTYPE_BCOM)
2194 				sk_intr_bcom(sc_if0);
2195 			if (ifp1 != NULL &&
2196 			    sc_if1->sk_phytype == SK_PHYTYPE_BCOM)
2197 				sk_intr_bcom(sc_if1);
2198 		}
2199 	}
2200 
2201 	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2202 
2203 	if (ifp0 != NULL && ifp0->if_snd.ifq_head != NULL)
2204 		sk_start(ifp0);
2205 	if (ifp1 != NULL && ifp1->if_snd.ifq_head != NULL)
2206 		sk_start(ifp1);
2207 
2208 	SK_UNLOCK(sc);
2209 
2210 	return;
2211 }
2212 
2213 static void
2214 sk_init_xmac(sc_if)
2215 	struct sk_if_softc	*sc_if;
2216 {
2217 	struct sk_softc		*sc;
2218 	struct ifnet		*ifp;
2219 	struct sk_bcom_hack	bhack[] = {
2220 	{ 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 },
2221 	{ 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 },
2222 	{ 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 },
2223 	{ 0, 0 } };
2224 
2225 	sc = sc_if->sk_softc;
2226 	ifp = &sc_if->arpcom.ac_if;
2227 
2228 	/* Unreset the XMAC. */
2229 	SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_UNRESET);
2230 	DELAY(1000);
2231 
2232 	/* Reset the XMAC's internal state. */
2233 	SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
2234 
2235 	/* Save the XMAC II revision */
2236 	sc_if->sk_xmac_rev = XM_XMAC_REV(SK_XM_READ_4(sc_if, XM_DEVID));
2237 
2238 	/*
2239 	 * Perform additional initialization for external PHYs,
2240 	 * namely for the 1000baseTX cards that use the XMAC's
2241 	 * GMII mode.
2242 	 */
2243 	if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
2244 		int			i = 0;
2245 		u_int32_t		val;
2246 
2247 		/* Take PHY out of reset. */
2248 		val = sk_win_read_4(sc, SK_GPIO);
2249 		if (sc_if->sk_port == SK_PORT_A)
2250 			val |= SK_GPIO_DIR0|SK_GPIO_DAT0;
2251 		else
2252 			val |= SK_GPIO_DIR2|SK_GPIO_DAT2;
2253 		sk_win_write_4(sc, SK_GPIO, val);
2254 
2255 		/* Enable GMII mode on the XMAC. */
2256 		SK_XM_SETBIT_2(sc_if, XM_HWCFG, XM_HWCFG_GMIIMODE);
2257 
2258 		sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
2259 		    BRGPHY_MII_BMCR, BRGPHY_BMCR_RESET);
2260 		DELAY(10000);
2261 		sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
2262 		    BRGPHY_MII_IMR, 0xFFF0);
2263 
2264 		/*
2265 		 * Early versions of the BCM5400 apparently have
2266 		 * a bug that requires them to have their reserved
2267 		 * registers initialized to some magic values. I don't
2268 		 * know what the numbers do, I'm just the messenger.
2269 		 */
2270 		if (sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, 0x03)
2271 		    == 0x6041) {
2272 			while(bhack[i].reg) {
2273 				sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
2274 				    bhack[i].reg, bhack[i].val);
2275 				i++;
2276 			}
2277 		}
2278 	}
2279 
2280 	/* Set station address */
2281 	SK_XM_WRITE_2(sc_if, XM_PAR0,
2282 	    *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[0]));
2283 	SK_XM_WRITE_2(sc_if, XM_PAR1,
2284 	    *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[2]));
2285 	SK_XM_WRITE_2(sc_if, XM_PAR2,
2286 	    *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[4]));
2287 	SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_STATION);
2288 
2289 	if (ifp->if_flags & IFF_BROADCAST) {
2290 		SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
2291 	} else {
2292 		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
2293 	}
2294 
2295 	/* We don't need the FCS appended to the packet. */
2296 	SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_STRIPFCS);
2297 
2298 	/* We want short frames padded to 60 bytes. */
2299 	SK_XM_SETBIT_2(sc_if, XM_TXCMD, XM_TXCMD_AUTOPAD);
2300 
2301 	/*
2302 	 * Enable the reception of all error frames. This is is
2303 	 * a necessary evil due to the design of the XMAC. The
2304 	 * XMAC's receive FIFO is only 8K in size, however jumbo
2305 	 * frames can be up to 9000 bytes in length. When bad
2306 	 * frame filtering is enabled, the XMAC's RX FIFO operates
2307 	 * in 'store and forward' mode. For this to work, the
2308 	 * entire frame has to fit into the FIFO, but that means
2309 	 * that jumbo frames larger than 8192 bytes will be
2310 	 * truncated. Disabling all bad frame filtering causes
2311 	 * the RX FIFO to operate in streaming mode, in which
2312 	 * case the XMAC will start transfering frames out of the
2313 	 * RX FIFO as soon as the FIFO threshold is reached.
2314 	 */
2315 	SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_BADFRAMES|
2316 	    XM_MODE_RX_GIANTS|XM_MODE_RX_RUNTS|XM_MODE_RX_CRCERRS|
2317 	    XM_MODE_RX_INRANGELEN);
2318 
2319 	if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
2320 		SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
2321 	else
2322 		SK_XM_CLRBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
2323 
2324 	/*
2325 	 * Bump up the transmit threshold. This helps hold off transmit
2326 	 * underruns when we're blasting traffic from both ports at once.
2327 	 */
2328 	SK_XM_WRITE_2(sc_if, XM_TX_REQTHRESH, SK_XM_TX_FIFOTHRESH);
2329 
2330 	/* Set promiscuous mode */
2331 	sk_setpromisc(sc_if);
2332 
2333 	/* Set multicast filter */
2334 	sk_setmulti(sc_if);
2335 
2336 	/* Clear and enable interrupts */
2337 	SK_XM_READ_2(sc_if, XM_ISR);
2338 	if (sc_if->sk_phytype == SK_PHYTYPE_XMAC)
2339 		SK_XM_WRITE_2(sc_if, XM_IMR, XM_INTRS);
2340 	else
2341 		SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
2342 
2343 	/* Configure MAC arbiter */
2344 	switch(sc_if->sk_xmac_rev) {
2345 	case XM_XMAC_REV_B2:
2346 		sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_B2);
2347 		sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_B2);
2348 		sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_B2);
2349 		sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_B2);
2350 		sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_B2);
2351 		sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_B2);
2352 		sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_B2);
2353 		sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_B2);
2354 		sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
2355 		break;
2356 	case XM_XMAC_REV_C1:
2357 		sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_C1);
2358 		sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_C1);
2359 		sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_C1);
2360 		sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_C1);
2361 		sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_C1);
2362 		sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_C1);
2363 		sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_C1);
2364 		sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_C1);
2365 		sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
2366 		break;
2367 	default:
2368 		break;
2369 	}
2370 	sk_win_write_2(sc, SK_MACARB_CTL,
2371 	    SK_MACARBCTL_UNRESET|SK_MACARBCTL_FASTOE_OFF);
2372 
2373 	sc_if->sk_link = 1;
2374 
2375 	return;
2376 }
2377 
2378 static void
2379 sk_init_yukon(sc_if)
2380 	struct sk_if_softc	*sc_if;
2381 {
2382 	u_int32_t		phy;
2383 	u_int16_t		reg;
2384 	int			i;
2385 
2386 	/* GMAC and GPHY Reset */
2387 	SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_SET);
2388 	SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET);
2389 	DELAY(1000);
2390 	SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_CLEAR);
2391 	SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET);
2392 	DELAY(1000);
2393 
2394 	phy = SK_GPHY_INT_POL_HI | SK_GPHY_DIS_FC | SK_GPHY_DIS_SLEEP |
2395 		SK_GPHY_ENA_XC | SK_GPHY_ANEG_ALL | SK_GPHY_ENA_PAUSE;
2396 
2397 	switch(sc_if->sk_softc->sk_pmd) {
2398 	case IFM_1000_SX:
2399 	case IFM_1000_LX:
2400 		phy |= SK_GPHY_FIBER;
2401 		break;
2402 
2403 	case IFM_1000_CX:
2404 	case IFM_1000_T:
2405 		phy |= SK_GPHY_COPPER;
2406 		break;
2407 	}
2408 
2409 	SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_SET);
2410 	DELAY(1000);
2411 	SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_CLEAR);
2412 	SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_LOOP_OFF |
2413 		      SK_GMAC_PAUSE_ON | SK_GMAC_RESET_CLEAR);
2414 
2415 	/* unused read of the interrupt source register */
2416 	SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR);
2417 
2418 	reg = SK_YU_READ_2(sc_if, YUKON_PAR);
2419 
2420 	/* MIB Counter Clear Mode set */
2421 	reg |= YU_PAR_MIB_CLR;
2422 	SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
2423 
2424 	/* MIB Counter Clear Mode clear */
2425 	reg &= ~YU_PAR_MIB_CLR;
2426 	SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
2427 
2428 	/* receive control reg */
2429 	SK_YU_WRITE_2(sc_if, YUKON_RCR, YU_RCR_CRCR);
2430 
2431 	/* transmit parameter register */
2432 	SK_YU_WRITE_2(sc_if, YUKON_TPR, YU_TPR_JAM_LEN(0x3) |
2433 		      YU_TPR_JAM_IPG(0xb) | YU_TPR_JAM2DATA_IPG(0x1a) );
2434 
2435 	/* serial mode register */
2436 	SK_YU_WRITE_2(sc_if, YUKON_SMR, YU_SMR_DATA_BLIND(0x1c) |
2437 		      YU_SMR_MFL_VLAN | YU_SMR_IPG_DATA(0x1e));
2438 
2439 	/* Setup Yukon's address */
2440 	for (i = 0; i < 3; i++) {
2441 		/* Write Source Address 1 (unicast filter) */
2442 		SK_YU_WRITE_2(sc_if, YUKON_SAL1 + i * 4,
2443 			      sc_if->arpcom.ac_enaddr[i * 2] |
2444 			      sc_if->arpcom.ac_enaddr[i * 2 + 1] << 8);
2445 	}
2446 
2447 	for (i = 0; i < 3; i++) {
2448 		reg = sk_win_read_2(sc_if->sk_softc,
2449 				    SK_MAC1_0 + i * 2 + sc_if->sk_port * 8);
2450 		SK_YU_WRITE_2(sc_if, YUKON_SAL2 + i * 4, reg);
2451 	}
2452 
2453 	/* Set promiscuous mode */
2454 	sk_setpromisc(sc_if);
2455 
2456 	/* Set multicast filter */
2457 	sk_setmulti(sc_if);
2458 
2459 	/* enable interrupt mask for counter overflows */
2460 	SK_YU_WRITE_2(sc_if, YUKON_TIMR, 0);
2461 	SK_YU_WRITE_2(sc_if, YUKON_RIMR, 0);
2462 	SK_YU_WRITE_2(sc_if, YUKON_TRIMR, 0);
2463 
2464 	/* Configure RX MAC FIFO */
2465 	SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_CLEAR);
2466 	SK_IF_WRITE_4(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_OPERATION_ON);
2467 
2468 	/* Configure TX MAC FIFO */
2469 	SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_CLEAR);
2470 	SK_IF_WRITE_4(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_OPERATION_ON);
2471 }
2472 
2473 /*
2474  * Note that to properly initialize any part of the GEnesis chip,
2475  * you first have to take it out of reset mode.
2476  */
2477 static void
2478 sk_init(xsc)
2479 	void			*xsc;
2480 {
2481 	struct sk_if_softc	*sc_if = xsc;
2482 	struct sk_softc		*sc;
2483 	struct ifnet		*ifp;
2484 	struct mii_data		*mii;
2485 	u_int16_t		reg;
2486 
2487 	SK_IF_LOCK(sc_if);
2488 
2489 	ifp = &sc_if->arpcom.ac_if;
2490 	sc = sc_if->sk_softc;
2491 	mii = device_get_softc(sc_if->sk_miibus);
2492 
2493 	/* Cancel pending I/O and free all RX/TX buffers. */
2494 	sk_stop(sc_if);
2495 
2496 	if (sc->sk_type == SK_GENESIS) {
2497 		/* Configure LINK_SYNC LED */
2498 		SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON);
2499 		SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
2500 			SK_LINKLED_LINKSYNC_ON);
2501 
2502 		/* Configure RX LED */
2503 		SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL,
2504 			SK_RXLEDCTL_COUNTER_START);
2505 
2506 		/* Configure TX LED */
2507 		SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL,
2508 			SK_TXLEDCTL_COUNTER_START);
2509 	}
2510 
2511 	/* Configure I2C registers */
2512 
2513 	/* Configure XMAC(s) */
2514 	switch (sc->sk_type) {
2515 	case SK_GENESIS:
2516 		sk_init_xmac(sc_if);
2517 		break;
2518 	case SK_YUKON:
2519 		sk_init_yukon(sc_if);
2520 		break;
2521 	}
2522 	mii_mediachg(mii);
2523 
2524 	if (sc->sk_type == SK_GENESIS) {
2525 		/* Configure MAC FIFOs */
2526 		SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_UNRESET);
2527 		SK_IF_WRITE_4(sc_if, 0, SK_RXF1_END, SK_FIFO_END);
2528 		SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_ON);
2529 
2530 		SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_UNRESET);
2531 		SK_IF_WRITE_4(sc_if, 0, SK_TXF1_END, SK_FIFO_END);
2532 		SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_ON);
2533 	}
2534 
2535 	/* Configure transmit arbiter(s) */
2536 	SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL,
2537 	    SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON);
2538 
2539 	/* Configure RAMbuffers */
2540 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET);
2541 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart);
2542 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart);
2543 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart);
2544 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend);
2545 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON);
2546 
2547 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_UNRESET);
2548 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_STORENFWD_ON);
2549 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_START, sc_if->sk_tx_ramstart);
2550 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_WR_PTR, sc_if->sk_tx_ramstart);
2551 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_RD_PTR, sc_if->sk_tx_ramstart);
2552 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_END, sc_if->sk_tx_ramend);
2553 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_ON);
2554 
2555 	/* Configure BMUs */
2556 	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_ONLINE);
2557 	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO,
2558 	    vtophys(&sc_if->sk_rdata->sk_rx_ring[0]));
2559 	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI, 0);
2560 
2561 	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_ONLINE);
2562 	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_LO,
2563 	    vtophys(&sc_if->sk_rdata->sk_tx_ring[0]));
2564 	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_HI, 0);
2565 
2566 	/* Init descriptors */
2567 	if (sk_init_rx_ring(sc_if) == ENOBUFS) {
2568 		printf("sk%d: initialization failed: no "
2569 		    "memory for rx buffers\n", sc_if->sk_unit);
2570 		sk_stop(sc_if);
2571 		SK_IF_UNLOCK(sc_if);
2572 		return;
2573 	}
2574 	sk_init_tx_ring(sc_if);
2575 
2576 	/* Configure interrupt handling */
2577 	CSR_READ_4(sc, SK_ISSR);
2578 	if (sc_if->sk_port == SK_PORT_A)
2579 		sc->sk_intrmask |= SK_INTRS1;
2580 	else
2581 		sc->sk_intrmask |= SK_INTRS2;
2582 
2583 	sc->sk_intrmask |= SK_ISR_EXTERNAL_REG;
2584 
2585 	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2586 
2587 	/* Start BMUs. */
2588 	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_START);
2589 
2590 	switch(sc->sk_type) {
2591 	case SK_GENESIS:
2592 		/* Enable XMACs TX and RX state machines */
2593 		SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_IGNPAUSE);
2594 		SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
2595 		break;
2596 	case SK_YUKON:
2597 		reg = SK_YU_READ_2(sc_if, YUKON_GPCR);
2598 		reg |= YU_GPCR_TXEN | YU_GPCR_RXEN;
2599 		reg &= ~(YU_GPCR_SPEED_EN | YU_GPCR_DPLX_EN);
2600 		SK_YU_WRITE_2(sc_if, YUKON_GPCR, reg);
2601 	}
2602 
2603 	ifp->if_flags |= IFF_RUNNING;
2604 	ifp->if_flags &= ~IFF_OACTIVE;
2605 
2606 	SK_IF_UNLOCK(sc_if);
2607 
2608 	return;
2609 }
2610 
2611 static void
2612 sk_stop(sc_if)
2613 	struct sk_if_softc	*sc_if;
2614 {
2615 	int			i;
2616 	struct sk_softc		*sc;
2617 	struct ifnet		*ifp;
2618 
2619 	SK_IF_LOCK(sc_if);
2620 	sc = sc_if->sk_softc;
2621 	ifp = &sc_if->arpcom.ac_if;
2622 
2623 	untimeout(sk_tick, sc_if, sc_if->sk_tick_ch);
2624 
2625 	if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
2626 		u_int32_t		val;
2627 
2628 		/* Put PHY back into reset. */
2629 		val = sk_win_read_4(sc, SK_GPIO);
2630 		if (sc_if->sk_port == SK_PORT_A) {
2631 			val |= SK_GPIO_DIR0;
2632 			val &= ~SK_GPIO_DAT0;
2633 		} else {
2634 			val |= SK_GPIO_DIR2;
2635 			val &= ~SK_GPIO_DAT2;
2636 		}
2637 		sk_win_write_4(sc, SK_GPIO, val);
2638 	}
2639 
2640 	/* Turn off various components of this interface. */
2641 	SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
2642 	switch (sc->sk_type) {
2643 	case SK_GENESIS:
2644 		SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_RESET);
2645 		SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_RESET);
2646 		break;
2647 	case SK_YUKON:
2648 		SK_IF_WRITE_1(sc_if,0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_SET);
2649 		SK_IF_WRITE_1(sc_if,0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_SET);
2650 		break;
2651 	}
2652 	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE);
2653 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
2654 	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_OFFLINE);
2655 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
2656 	SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF);
2657 	SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
2658 	SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
2659 	SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF);
2660 	SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF);
2661 
2662 	/* Disable interrupts */
2663 	if (sc_if->sk_port == SK_PORT_A)
2664 		sc->sk_intrmask &= ~SK_INTRS1;
2665 	else
2666 		sc->sk_intrmask &= ~SK_INTRS2;
2667 	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2668 
2669 	SK_XM_READ_2(sc_if, XM_ISR);
2670 	SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
2671 
2672 	/* Free RX and TX mbufs still in the queues. */
2673 	for (i = 0; i < SK_RX_RING_CNT; i++) {
2674 		if (sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf != NULL) {
2675 			m_freem(sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf);
2676 			sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf = NULL;
2677 		}
2678 	}
2679 
2680 	for (i = 0; i < SK_TX_RING_CNT; i++) {
2681 		if (sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf != NULL) {
2682 			m_freem(sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf);
2683 			sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf = NULL;
2684 		}
2685 	}
2686 
2687 	ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE);
2688 	SK_IF_UNLOCK(sc_if);
2689 	return;
2690 }
2691