xref: /freebsd/sys/dev/sk/if_sk.c (revision 77a0943ded95b9e6438f7db70c4a28e4d93946d4)
1 /*
2  * Copyright (c) 1997, 1998, 1999, 2000
3  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by Bill Paul.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  *
32  * $FreeBSD$
33  */
34 
35 /*
36  * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports
37  * the SK-984x series adapters, both single port and dual port.
38  * References:
39  * 	The XaQti XMAC II datasheet,
40  *  http://www.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
41  *	The SysKonnect GEnesis manual, http://www.syskonnect.com
42  *
43  * Note: XaQti has been aquired by Vitesse, and Vitesse does not have the
44  * XMAC II datasheet online. I have put my copy at people.freebsd.org as a
45  * convenience to others until Vitesse corrects this problem:
46  *
47  * http://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
48  *
49  * Written by Bill Paul <wpaul@ee.columbia.edu>
50  * Department of Electrical Engineering
51  * Columbia University, New York City
52  */
53 
54 /*
55  * The SysKonnect gigabit ethernet adapters consist of two main
56  * components: the SysKonnect GEnesis controller chip and the XaQti Corp.
57  * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC
58  * components and a PHY while the GEnesis controller provides a PCI
59  * interface with DMA support. Each card may have between 512K and
60  * 2MB of SRAM on board depending on the configuration.
61  *
62  * The SysKonnect GEnesis controller can have either one or two XMAC
63  * chips connected to it, allowing single or dual port NIC configurations.
64  * SysKonnect has the distinction of being the only vendor on the market
65  * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs,
66  * dual DMA queues, packet/MAC/transmit arbiters and direct access to the
67  * XMAC registers. This driver takes advantage of these features to allow
68  * both XMACs to operate as independent interfaces.
69  */
70 
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/sockio.h>
74 #include <sys/mbuf.h>
75 #include <sys/malloc.h>
76 #include <sys/kernel.h>
77 #include <sys/socket.h>
78 #include <sys/queue.h>
79 
80 #include <net/if.h>
81 #include <net/if_arp.h>
82 #include <net/ethernet.h>
83 #include <net/if_dl.h>
84 #include <net/if_media.h>
85 
86 #include <net/bpf.h>
87 
88 #include <vm/vm.h>              /* for vtophys */
89 #include <vm/pmap.h>            /* for vtophys */
90 #include <machine/bus_pio.h>
91 #include <machine/bus_memio.h>
92 #include <machine/bus.h>
93 #include <machine/resource.h>
94 #include <sys/bus.h>
95 #include <sys/rman.h>
96 
97 #include <dev/mii/mii.h>
98 #include <dev/mii/miivar.h>
99 #include <dev/mii/brgphyreg.h>
100 
101 #include <pci/pcireg.h>
102 #include <pci/pcivar.h>
103 
104 #define SK_USEIOSPACE
105 
106 #include <pci/if_skreg.h>
107 #include <pci/xmaciireg.h>
108 
109 MODULE_DEPEND(sk, miibus, 1, 1, 1);
110 
111 /* "controller miibus0" required.  See GENERIC if you get errors here. */
112 #include "miibus_if.h"
113 
114 #ifndef lint
115 static const char rcsid[] =
116   "$FreeBSD$";
117 #endif
118 
119 static struct sk_type sk_devs[] = {
120 	{ SK_VENDORID, SK_DEVICEID_GE, "SysKonnect Gigabit Ethernet" },
121 	{ 0, 0, NULL }
122 };
123 
124 static int sk_probe		__P((device_t));
125 static int sk_attach		__P((device_t));
126 static int sk_detach		__P((device_t));
127 static int sk_detach_xmac	__P((device_t));
128 static int sk_probe_xmac	__P((device_t));
129 static int sk_attach_xmac	__P((device_t));
130 static void sk_tick		__P((void *));
131 static void sk_intr		__P((void *));
132 static void sk_intr_xmac	__P((struct sk_if_softc *));
133 static void sk_intr_bcom	__P((struct sk_if_softc *));
134 static void sk_rxeof		__P((struct sk_if_softc *));
135 static void sk_txeof		__P((struct sk_if_softc *));
136 static int sk_encap		__P((struct sk_if_softc *, struct mbuf *,
137 					u_int32_t *));
138 static void sk_start		__P((struct ifnet *));
139 static int sk_ioctl		__P((struct ifnet *, u_long, caddr_t));
140 static void sk_init		__P((void *));
141 static void sk_init_xmac	__P((struct sk_if_softc *));
142 static void sk_stop		__P((struct sk_if_softc *));
143 static void sk_watchdog		__P((struct ifnet *));
144 static void sk_shutdown		__P((device_t));
145 static int sk_ifmedia_upd	__P((struct ifnet *));
146 static void sk_ifmedia_sts	__P((struct ifnet *, struct ifmediareq *));
147 static void sk_reset		__P((struct sk_softc *));
148 static int sk_newbuf		__P((struct sk_if_softc *,
149 					struct sk_chain *, struct mbuf *));
150 static int sk_alloc_jumbo_mem	__P((struct sk_if_softc *));
151 static void *sk_jalloc		__P((struct sk_if_softc *));
152 static void sk_jfree		__P((caddr_t, void *));
153 static int sk_init_rx_ring	__P((struct sk_if_softc *));
154 static void sk_init_tx_ring	__P((struct sk_if_softc *));
155 static u_int32_t sk_win_read_4	__P((struct sk_softc *, int));
156 static u_int16_t sk_win_read_2	__P((struct sk_softc *, int));
157 static u_int8_t sk_win_read_1	__P((struct sk_softc *, int));
158 static void sk_win_write_4	__P((struct sk_softc *, int, u_int32_t));
159 static void sk_win_write_2	__P((struct sk_softc *, int, u_int32_t));
160 static void sk_win_write_1	__P((struct sk_softc *, int, u_int32_t));
161 static u_int8_t sk_vpd_readbyte	__P((struct sk_softc *, int));
162 static void sk_vpd_read_res	__P((struct sk_softc *,
163 					struct vpd_res *, int));
164 static void sk_vpd_read		__P((struct sk_softc *));
165 
166 static int sk_miibus_readreg	__P((device_t, int, int));
167 static int sk_miibus_writereg	__P((device_t, int, int, int));
168 static void sk_miibus_statchg	__P((device_t));
169 
170 static u_int32_t sk_calchash	__P((caddr_t));
171 static void sk_setfilt		__P((struct sk_if_softc *, caddr_t, int));
172 static void sk_setmulti		__P((struct sk_if_softc *));
173 
174 #ifdef SK_USEIOSPACE
175 #define SK_RES		SYS_RES_IOPORT
176 #define SK_RID		SK_PCI_LOIO
177 #else
178 #define SK_RES		SYS_RES_MEMORY
179 #define SK_RID		SK_PCI_LOMEM
180 #endif
181 
182 /*
183  * Note that we have newbus methods for both the GEnesis controller
184  * itself and the XMAC(s). The XMACs are children of the GEnesis, and
185  * the miibus code is a child of the XMACs. We need to do it this way
186  * so that the miibus drivers can access the PHY registers on the
187  * right PHY. It's not quite what I had in mind, but it's the only
188  * design that achieves the desired effect.
189  */
190 static device_method_t skc_methods[] = {
191 	/* Device interface */
192 	DEVMETHOD(device_probe,		sk_probe),
193 	DEVMETHOD(device_attach,	sk_attach),
194 	DEVMETHOD(device_detach,	sk_detach),
195 	DEVMETHOD(device_shutdown,	sk_shutdown),
196 
197 	/* bus interface */
198 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
199 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
200 
201 	{ 0, 0 }
202 };
203 
204 static driver_t skc_driver = {
205 	"skc",
206 	skc_methods,
207 	sizeof(struct sk_softc)
208 };
209 
210 static devclass_t skc_devclass;
211 
212 static device_method_t sk_methods[] = {
213 	/* Device interface */
214 	DEVMETHOD(device_probe,		sk_probe_xmac),
215 	DEVMETHOD(device_attach,	sk_attach_xmac),
216 	DEVMETHOD(device_detach,	sk_detach_xmac),
217 	DEVMETHOD(device_shutdown,	bus_generic_shutdown),
218 
219 	/* bus interface */
220 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
221 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
222 
223 	/* MII interface */
224 	DEVMETHOD(miibus_readreg,	sk_miibus_readreg),
225 	DEVMETHOD(miibus_writereg,	sk_miibus_writereg),
226 	DEVMETHOD(miibus_statchg,	sk_miibus_statchg),
227 
228 	{ 0, 0 }
229 };
230 
231 static driver_t sk_driver = {
232 	"sk",
233 	sk_methods,
234 	sizeof(struct sk_if_softc)
235 };
236 
237 static devclass_t sk_devclass;
238 
239 DRIVER_MODULE(if_sk, pci, skc_driver, skc_devclass, 0, 0);
240 DRIVER_MODULE(sk, skc, sk_driver, sk_devclass, 0, 0);
241 DRIVER_MODULE(miibus, sk, miibus_driver, miibus_devclass, 0, 0);
242 
243 #define SK_SETBIT(sc, reg, x)		\
244 	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x)
245 
246 #define SK_CLRBIT(sc, reg, x)		\
247 	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x)
248 
249 #define SK_WIN_SETBIT_4(sc, reg, x)	\
250 	sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) | x)
251 
252 #define SK_WIN_CLRBIT_4(sc, reg, x)	\
253 	sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) & ~x)
254 
255 #define SK_WIN_SETBIT_2(sc, reg, x)	\
256 	sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) | x)
257 
258 #define SK_WIN_CLRBIT_2(sc, reg, x)	\
259 	sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) & ~x)
260 
261 static u_int32_t sk_win_read_4(sc, reg)
262 	struct sk_softc		*sc;
263 	int			reg;
264 {
265 	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
266 	return(CSR_READ_4(sc, SK_WIN_BASE + SK_REG(reg)));
267 }
268 
269 static u_int16_t sk_win_read_2(sc, reg)
270 	struct sk_softc		*sc;
271 	int			reg;
272 {
273 	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
274 	return(CSR_READ_2(sc, SK_WIN_BASE + SK_REG(reg)));
275 }
276 
277 static u_int8_t sk_win_read_1(sc, reg)
278 	struct sk_softc		*sc;
279 	int			reg;
280 {
281 	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
282 	return(CSR_READ_1(sc, SK_WIN_BASE + SK_REG(reg)));
283 }
284 
285 static void sk_win_write_4(sc, reg, val)
286 	struct sk_softc		*sc;
287 	int			reg;
288 	u_int32_t		val;
289 {
290 	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
291 	CSR_WRITE_4(sc, SK_WIN_BASE + SK_REG(reg), val);
292 	return;
293 }
294 
295 static void sk_win_write_2(sc, reg, val)
296 	struct sk_softc		*sc;
297 	int			reg;
298 	u_int32_t		val;
299 {
300 	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
301 	CSR_WRITE_2(sc, SK_WIN_BASE + SK_REG(reg), (u_int32_t)val);
302 	return;
303 }
304 
305 static void sk_win_write_1(sc, reg, val)
306 	struct sk_softc		*sc;
307 	int			reg;
308 	u_int32_t		val;
309 {
310 	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
311 	CSR_WRITE_1(sc, SK_WIN_BASE + SK_REG(reg), val);
312 	return;
313 }
314 
315 /*
316  * The VPD EEPROM contains Vital Product Data, as suggested in
317  * the PCI 2.1 specification. The VPD data is separared into areas
318  * denoted by resource IDs. The SysKonnect VPD contains an ID string
319  * resource (the name of the adapter), a read-only area resource
320  * containing various key/data fields and a read/write area which
321  * can be used to store asset management information or log messages.
322  * We read the ID string and read-only into buffers attached to
323  * the controller softc structure for later use. At the moment,
324  * we only use the ID string during sk_attach().
325  */
326 static u_int8_t sk_vpd_readbyte(sc, addr)
327 	struct sk_softc		*sc;
328 	int			addr;
329 {
330 	int			i;
331 
332 	sk_win_write_2(sc, SK_PCI_REG(SK_PCI_VPD_ADDR), addr);
333 	for (i = 0; i < SK_TIMEOUT; i++) {
334 		DELAY(1);
335 		if (sk_win_read_2(sc,
336 		    SK_PCI_REG(SK_PCI_VPD_ADDR)) & SK_VPD_FLAG)
337 			break;
338 	}
339 
340 	if (i == SK_TIMEOUT)
341 		return(0);
342 
343 	return(sk_win_read_1(sc, SK_PCI_REG(SK_PCI_VPD_DATA)));
344 }
345 
346 static void sk_vpd_read_res(sc, res, addr)
347 	struct sk_softc		*sc;
348 	struct vpd_res		*res;
349 	int			addr;
350 {
351 	int			i;
352 	u_int8_t		*ptr;
353 
354 	ptr = (u_int8_t *)res;
355 	for (i = 0; i < sizeof(struct vpd_res); i++)
356 		ptr[i] = sk_vpd_readbyte(sc, i + addr);
357 
358 	return;
359 }
360 
361 static void sk_vpd_read(sc)
362 	struct sk_softc		*sc;
363 {
364 	int			pos = 0, i;
365 	struct vpd_res		res;
366 
367 	if (sc->sk_vpd_prodname != NULL)
368 		free(sc->sk_vpd_prodname, M_DEVBUF);
369 	if (sc->sk_vpd_readonly != NULL)
370 		free(sc->sk_vpd_readonly, M_DEVBUF);
371 	sc->sk_vpd_prodname = NULL;
372 	sc->sk_vpd_readonly = NULL;
373 
374 	sk_vpd_read_res(sc, &res, pos);
375 
376 	if (res.vr_id != VPD_RES_ID) {
377 		printf("skc%d: bad VPD resource id: expected %x got %x\n",
378 		    sc->sk_unit, VPD_RES_ID, res.vr_id);
379 		return;
380 	}
381 
382 	pos += sizeof(res);
383 	sc->sk_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT);
384 	for (i = 0; i < res.vr_len; i++)
385 		sc->sk_vpd_prodname[i] = sk_vpd_readbyte(sc, i + pos);
386 	sc->sk_vpd_prodname[i] = '\0';
387 	pos += i;
388 
389 	sk_vpd_read_res(sc, &res, pos);
390 
391 	if (res.vr_id != VPD_RES_READ) {
392 		printf("skc%d: bad VPD resource id: expected %x got %x\n",
393 		    sc->sk_unit, VPD_RES_READ, res.vr_id);
394 		return;
395 	}
396 
397 	pos += sizeof(res);
398 	sc->sk_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT);
399 	for (i = 0; i < res.vr_len + 1; i++)
400 		sc->sk_vpd_readonly[i] = sk_vpd_readbyte(sc, i + pos);
401 
402 	return;
403 }
404 
405 static int sk_miibus_readreg(dev, phy, reg)
406 	device_t		dev;
407 	int			phy, reg;
408 {
409 	struct sk_if_softc	*sc_if;
410 	int			i;
411 
412 	sc_if = device_get_softc(dev);
413 
414 	if (sc_if->sk_phytype == SK_PHYTYPE_XMAC && phy != 0)
415 		return(0);
416 
417 	SK_IF_LOCK(sc_if);
418 
419 	SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
420 	SK_XM_READ_2(sc_if, XM_PHY_DATA);
421 	if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
422 		for (i = 0; i < SK_TIMEOUT; i++) {
423 			DELAY(1);
424 			if (SK_XM_READ_2(sc_if, XM_MMUCMD) &
425 			    XM_MMUCMD_PHYDATARDY)
426 				break;
427 		}
428 
429 		if (i == SK_TIMEOUT) {
430 			printf("sk%d: phy failed to come ready\n",
431 			    sc_if->sk_unit);
432 			return(0);
433 		}
434 	}
435 	DELAY(1);
436 	i = SK_XM_READ_2(sc_if, XM_PHY_DATA);
437 	SK_IF_UNLOCK(sc_if);
438 	return(i);
439 }
440 
441 static int sk_miibus_writereg(dev, phy, reg, val)
442 	device_t		dev;
443 	int			phy, reg, val;
444 {
445 	struct sk_if_softc	*sc_if;
446 	int			i;
447 
448 	sc_if = device_get_softc(dev);
449 	SK_IF_LOCK(sc_if);
450 
451 	SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
452 	for (i = 0; i < SK_TIMEOUT; i++) {
453 		if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
454 			break;
455 	}
456 
457 	if (i == SK_TIMEOUT) {
458 		printf("sk%d: phy failed to come ready\n", sc_if->sk_unit);
459 		return(ETIMEDOUT);
460 	}
461 
462 	SK_XM_WRITE_2(sc_if, XM_PHY_DATA, val);
463 	for (i = 0; i < SK_TIMEOUT; i++) {
464 		DELAY(1);
465 		if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
466 			break;
467 	}
468 
469 	SK_IF_UNLOCK(sc_if);
470 
471 	if (i == SK_TIMEOUT)
472 		printf("sk%d: phy write timed out\n", sc_if->sk_unit);
473 
474 	return(0);
475 }
476 
477 static void sk_miibus_statchg(dev)
478 	device_t		dev;
479 {
480 	struct sk_if_softc	*sc_if;
481 	struct mii_data		*mii;
482 
483 	sc_if = device_get_softc(dev);
484 	mii = device_get_softc(sc_if->sk_miibus);
485 	SK_IF_LOCK(sc_if);
486 	/*
487 	 * If this is a GMII PHY, manually set the XMAC's
488 	 * duplex mode accordingly.
489 	 */
490 	if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
491 		if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
492 			SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
493 		} else {
494 			SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
495 		}
496 	}
497 	SK_IF_UNLOCK(sc_if);
498 
499 	return;
500 }
501 
502 #define SK_POLY		0xEDB88320
503 #define SK_BITS		6
504 
505 static u_int32_t sk_calchash(addr)
506 	caddr_t			addr;
507 {
508 	u_int32_t		idx, bit, data, crc;
509 
510 	/* Compute CRC for the address value. */
511 	crc = 0xFFFFFFFF; /* initial value */
512 
513 	for (idx = 0; idx < 6; idx++) {
514 		for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1)
515 			crc = (crc >> 1) ^ (((crc ^ data) & 1) ? SK_POLY : 0);
516 	}
517 
518 	return (~crc & ((1 << SK_BITS) - 1));
519 }
520 
521 static void sk_setfilt(sc_if, addr, slot)
522 	struct sk_if_softc	*sc_if;
523 	caddr_t			addr;
524 	int			slot;
525 {
526 	int			base;
527 
528 	base = XM_RXFILT_ENTRY(slot);
529 
530 	SK_XM_WRITE_2(sc_if, base, *(u_int16_t *)(&addr[0]));
531 	SK_XM_WRITE_2(sc_if, base + 2, *(u_int16_t *)(&addr[2]));
532 	SK_XM_WRITE_2(sc_if, base + 4, *(u_int16_t *)(&addr[4]));
533 
534 	return;
535 }
536 
537 static void sk_setmulti(sc_if)
538 	struct sk_if_softc	*sc_if;
539 {
540 	struct ifnet		*ifp;
541 	u_int32_t		hashes[2] = { 0, 0 };
542 	int			h, i;
543 	struct ifmultiaddr	*ifma;
544 	u_int8_t		dummy[] = { 0, 0, 0, 0, 0 ,0 };
545 
546 	ifp = &sc_if->arpcom.ac_if;
547 
548 	/* First, zot all the existing filters. */
549 	for (i = 1; i < XM_RXFILT_MAX; i++)
550 		sk_setfilt(sc_if, (caddr_t)&dummy, i);
551 	SK_XM_WRITE_4(sc_if, XM_MAR0, 0);
552 	SK_XM_WRITE_4(sc_if, XM_MAR2, 0);
553 
554 	/* Now program new ones. */
555 	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
556 		hashes[0] = 0xFFFFFFFF;
557 		hashes[1] = 0xFFFFFFFF;
558 	} else {
559 		i = 1;
560 		/* First find the tail of the list. */
561 		for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL;
562 					ifma = ifma->ifma_link.le_next) {
563 			if (ifma->ifma_link.le_next == NULL)
564 				break;
565 		}
566 		/* Now traverse the list backwards. */
567 		for (; ifma != NULL && ifma != (void *)&ifp->if_multiaddrs;
568 			ifma = (struct ifmultiaddr *)ifma->ifma_link.le_prev) {
569 			if (ifma->ifma_addr->sa_family != AF_LINK)
570 				continue;
571 			/*
572 			 * Program the first XM_RXFILT_MAX multicast groups
573 			 * into the perfect filter. For all others,
574 			 * use the hash table.
575 			 */
576 			if (i < XM_RXFILT_MAX) {
577 				sk_setfilt(sc_if,
578 			LLADDR((struct sockaddr_dl *)ifma->ifma_addr), i);
579 				i++;
580 				continue;
581 			}
582 
583 			h = sk_calchash(
584 				LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
585 			if (h < 32)
586 				hashes[0] |= (1 << h);
587 			else
588 				hashes[1] |= (1 << (h - 32));
589 		}
590 	}
591 
592 	SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_HASH|
593 	    XM_MODE_RX_USE_PERFECT);
594 	SK_XM_WRITE_4(sc_if, XM_MAR0, hashes[0]);
595 	SK_XM_WRITE_4(sc_if, XM_MAR2, hashes[1]);
596 
597 	return;
598 }
599 
600 static int sk_init_rx_ring(sc_if)
601 	struct sk_if_softc	*sc_if;
602 {
603 	struct sk_chain_data	*cd;
604 	struct sk_ring_data	*rd;
605 	int			i;
606 
607 	cd = &sc_if->sk_cdata;
608 	rd = sc_if->sk_rdata;
609 
610 	bzero((char *)rd->sk_rx_ring,
611 	    sizeof(struct sk_rx_desc) * SK_RX_RING_CNT);
612 
613 	for (i = 0; i < SK_RX_RING_CNT; i++) {
614 		cd->sk_rx_chain[i].sk_desc = &rd->sk_rx_ring[i];
615 		if (sk_newbuf(sc_if, &cd->sk_rx_chain[i], NULL) == ENOBUFS)
616 			return(ENOBUFS);
617 		if (i == (SK_RX_RING_CNT - 1)) {
618 			cd->sk_rx_chain[i].sk_next =
619 			    &cd->sk_rx_chain[0];
620 			rd->sk_rx_ring[i].sk_next =
621 			    vtophys(&rd->sk_rx_ring[0]);
622 		} else {
623 			cd->sk_rx_chain[i].sk_next =
624 			    &cd->sk_rx_chain[i + 1];
625 			rd->sk_rx_ring[i].sk_next =
626 			    vtophys(&rd->sk_rx_ring[i + 1]);
627 		}
628 	}
629 
630 	sc_if->sk_cdata.sk_rx_prod = 0;
631 	sc_if->sk_cdata.sk_rx_cons = 0;
632 
633 	return(0);
634 }
635 
636 static void sk_init_tx_ring(sc_if)
637 	struct sk_if_softc	*sc_if;
638 {
639 	struct sk_chain_data	*cd;
640 	struct sk_ring_data	*rd;
641 	int			i;
642 
643 	cd = &sc_if->sk_cdata;
644 	rd = sc_if->sk_rdata;
645 
646 	bzero((char *)sc_if->sk_rdata->sk_tx_ring,
647 	    sizeof(struct sk_tx_desc) * SK_TX_RING_CNT);
648 
649 	for (i = 0; i < SK_TX_RING_CNT; i++) {
650 		cd->sk_tx_chain[i].sk_desc = &rd->sk_tx_ring[i];
651 		if (i == (SK_TX_RING_CNT - 1)) {
652 			cd->sk_tx_chain[i].sk_next =
653 			    &cd->sk_tx_chain[0];
654 			rd->sk_tx_ring[i].sk_next =
655 			    vtophys(&rd->sk_tx_ring[0]);
656 		} else {
657 			cd->sk_tx_chain[i].sk_next =
658 			    &cd->sk_tx_chain[i + 1];
659 			rd->sk_tx_ring[i].sk_next =
660 			    vtophys(&rd->sk_tx_ring[i + 1]);
661 		}
662 	}
663 
664 	sc_if->sk_cdata.sk_tx_prod = 0;
665 	sc_if->sk_cdata.sk_tx_cons = 0;
666 	sc_if->sk_cdata.sk_tx_cnt = 0;
667 
668 	return;
669 }
670 
671 static int sk_newbuf(sc_if, c, m)
672 	struct sk_if_softc	*sc_if;
673 	struct sk_chain		*c;
674 	struct mbuf		*m;
675 {
676 	struct mbuf		*m_new = NULL;
677 	struct sk_rx_desc	*r;
678 
679 	if (m == NULL) {
680 		caddr_t			*buf = NULL;
681 
682 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
683 		if (m_new == NULL) {
684 			printf("sk%d: no memory for rx list -- "
685 			    "packet dropped!\n", sc_if->sk_unit);
686 			return(ENOBUFS);
687 		}
688 
689 		/* Allocate the jumbo buffer */
690 		buf = sk_jalloc(sc_if);
691 		if (buf == NULL) {
692 			m_freem(m_new);
693 #ifdef SK_VERBOSE
694 			printf("sk%d: jumbo allocation failed "
695 			    "-- packet dropped!\n", sc_if->sk_unit);
696 #endif
697 			return(ENOBUFS);
698 		}
699 
700 		/* Attach the buffer to the mbuf */
701 		MEXTADD(m_new, buf, SK_JLEN, sk_jfree,
702 		    (struct sk_if_softc *)sc_if, 0, EXT_NET_DRV);
703 		m_new->m_data = (void *)buf;
704 		m_new->m_pkthdr.len = m_new->m_len = SK_JLEN;
705 	} else {
706 		/*
707 	 	 * We're re-using a previously allocated mbuf;
708 		 * be sure to re-init pointers and lengths to
709 		 * default values.
710 		 */
711 		m_new = m;
712 		m_new->m_len = m_new->m_pkthdr.len = SK_JLEN;
713 		m_new->m_data = m_new->m_ext.ext_buf;
714 	}
715 
716 	/*
717 	 * Adjust alignment so packet payload begins on a
718 	 * longword boundary. Mandatory for Alpha, useful on
719 	 * x86 too.
720 	 */
721 	m_adj(m_new, ETHER_ALIGN);
722 
723 	r = c->sk_desc;
724 	c->sk_mbuf = m_new;
725 	r->sk_data_lo = vtophys(mtod(m_new, caddr_t));
726 	r->sk_ctl = m_new->m_len | SK_RXSTAT;
727 
728 	return(0);
729 }
730 
731 /*
732  * Allocate jumbo buffer storage. The SysKonnect adapters support
733  * "jumbograms" (9K frames), although SysKonnect doesn't currently
734  * use them in their drivers. In order for us to use them, we need
735  * large 9K receive buffers, however standard mbuf clusters are only
736  * 2048 bytes in size. Consequently, we need to allocate and manage
737  * our own jumbo buffer pool. Fortunately, this does not require an
738  * excessive amount of additional code.
739  */
740 static int sk_alloc_jumbo_mem(sc_if)
741 	struct sk_if_softc	*sc_if;
742 {
743 	caddr_t			ptr;
744 	register int		i;
745 	struct sk_jpool_entry   *entry;
746 
747 	/* Grab a big chunk o' storage. */
748 	sc_if->sk_cdata.sk_jumbo_buf = contigmalloc(SK_JMEM, M_DEVBUF,
749 	    M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
750 
751 	if (sc_if->sk_cdata.sk_jumbo_buf == NULL) {
752 		printf("sk%d: no memory for jumbo buffers!\n", sc_if->sk_unit);
753 		return(ENOBUFS);
754 	}
755 
756 	SLIST_INIT(&sc_if->sk_jfree_listhead);
757 	SLIST_INIT(&sc_if->sk_jinuse_listhead);
758 
759 	/*
760 	 * Now divide it up into 9K pieces and save the addresses
761 	 * in an array.
762 	 */
763 	ptr = sc_if->sk_cdata.sk_jumbo_buf;
764 	for (i = 0; i < SK_JSLOTS; i++) {
765 		sc_if->sk_cdata.sk_jslots[i] = ptr;
766 		ptr += SK_JLEN;
767 		entry = malloc(sizeof(struct sk_jpool_entry),
768 		    M_DEVBUF, M_NOWAIT);
769 		if (entry == NULL) {
770 			free(sc_if->sk_cdata.sk_jumbo_buf, M_DEVBUF);
771 			sc_if->sk_cdata.sk_jumbo_buf = NULL;
772 			printf("sk%d: no memory for jumbo "
773 			    "buffer queue!\n", sc_if->sk_unit);
774 			return(ENOBUFS);
775 		}
776 		entry->slot = i;
777 		SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead,
778 		    entry, jpool_entries);
779 	}
780 
781 	return(0);
782 }
783 
784 /*
785  * Allocate a jumbo buffer.
786  */
787 static void *sk_jalloc(sc_if)
788 	struct sk_if_softc	*sc_if;
789 {
790 	struct sk_jpool_entry   *entry;
791 
792 	entry = SLIST_FIRST(&sc_if->sk_jfree_listhead);
793 
794 	if (entry == NULL) {
795 #ifdef SK_VERBOSE
796 		printf("sk%d: no free jumbo buffers\n", sc_if->sk_unit);
797 #endif
798 		return(NULL);
799 	}
800 
801 	SLIST_REMOVE_HEAD(&sc_if->sk_jfree_listhead, jpool_entries);
802 	SLIST_INSERT_HEAD(&sc_if->sk_jinuse_listhead, entry, jpool_entries);
803 	return(sc_if->sk_cdata.sk_jslots[entry->slot]);
804 }
805 
806 /*
807  * Release a jumbo buffer.
808  */
809 static void sk_jfree(buf, args)
810 	caddr_t			buf;
811 	void			*args;
812 {
813 	struct sk_if_softc	*sc_if;
814 	int		        i;
815 	struct sk_jpool_entry   *entry;
816 
817 	/* Extract the softc struct pointer. */
818 	sc_if = (struct sk_if_softc *)args;
819 
820 	if (sc_if == NULL)
821 		panic("sk_jfree: didn't get softc pointer!");
822 
823 	/* calculate the slot this buffer belongs to */
824 	i = ((vm_offset_t)buf
825 	     - (vm_offset_t)sc_if->sk_cdata.sk_jumbo_buf) / SK_JLEN;
826 
827 	if ((i < 0) || (i >= SK_JSLOTS))
828 		panic("sk_jfree: asked to free buffer that we don't manage!");
829 
830 	entry = SLIST_FIRST(&sc_if->sk_jinuse_listhead);
831 	if (entry == NULL)
832 		panic("sk_jfree: buffer not in use!");
833 	entry->slot = i;
834 	SLIST_REMOVE_HEAD(&sc_if->sk_jinuse_listhead, jpool_entries);
835 	SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead, entry, jpool_entries);
836 
837 	return;
838 }
839 
840 /*
841  * Set media options.
842  */
843 static int sk_ifmedia_upd(ifp)
844 	struct ifnet		*ifp;
845 {
846 	struct sk_if_softc	*sc_if;
847 	struct mii_data		*mii;
848 
849 	sc_if = ifp->if_softc;
850 	mii = device_get_softc(sc_if->sk_miibus);
851 	sk_init(sc_if);
852 	mii_mediachg(mii);
853 
854 	return(0);
855 }
856 
857 /*
858  * Report current media status.
859  */
860 static void sk_ifmedia_sts(ifp, ifmr)
861 	struct ifnet		*ifp;
862 	struct ifmediareq	*ifmr;
863 {
864 	struct sk_if_softc	*sc_if;
865 	struct mii_data		*mii;
866 
867 	sc_if = ifp->if_softc;
868 	mii = device_get_softc(sc_if->sk_miibus);
869 
870 	mii_pollstat(mii);
871 	ifmr->ifm_active = mii->mii_media_active;
872 	ifmr->ifm_status = mii->mii_media_status;
873 
874 	return;
875 }
876 
877 static int sk_ioctl(ifp, command, data)
878 	struct ifnet		*ifp;
879 	u_long			command;
880 	caddr_t			data;
881 {
882 	struct sk_if_softc	*sc_if = ifp->if_softc;
883 	struct ifreq		*ifr = (struct ifreq *) data;
884 	int			error = 0;
885 	struct mii_data		*mii;
886 
887 	SK_IF_LOCK(sc_if);
888 
889 	switch(command) {
890 	case SIOCSIFADDR:
891 	case SIOCGIFADDR:
892 		error = ether_ioctl(ifp, command, data);
893 		break;
894 	case SIOCSIFMTU:
895 		if (ifr->ifr_mtu > SK_JUMBO_MTU)
896 			error = EINVAL;
897 		else {
898 			ifp->if_mtu = ifr->ifr_mtu;
899 			sk_init(sc_if);
900 		}
901 		break;
902 	case SIOCSIFFLAGS:
903 		if (ifp->if_flags & IFF_UP) {
904 			if (ifp->if_flags & IFF_RUNNING &&
905 			    ifp->if_flags & IFF_PROMISC &&
906 			    !(sc_if->sk_if_flags & IFF_PROMISC)) {
907 				SK_XM_SETBIT_4(sc_if, XM_MODE,
908 				    XM_MODE_RX_PROMISC);
909 				sk_setmulti(sc_if);
910 			} else if (ifp->if_flags & IFF_RUNNING &&
911 			    !(ifp->if_flags & IFF_PROMISC) &&
912 			    sc_if->sk_if_flags & IFF_PROMISC) {
913 				SK_XM_CLRBIT_4(sc_if, XM_MODE,
914 				    XM_MODE_RX_PROMISC);
915 				sk_setmulti(sc_if);
916 			} else
917 				sk_init(sc_if);
918 		} else {
919 			if (ifp->if_flags & IFF_RUNNING)
920 				sk_stop(sc_if);
921 		}
922 		sc_if->sk_if_flags = ifp->if_flags;
923 		error = 0;
924 		break;
925 	case SIOCADDMULTI:
926 	case SIOCDELMULTI:
927 		sk_setmulti(sc_if);
928 		error = 0;
929 		break;
930 	case SIOCGIFMEDIA:
931 	case SIOCSIFMEDIA:
932 		mii = device_get_softc(sc_if->sk_miibus);
933 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
934 		break;
935 	default:
936 		error = EINVAL;
937 		break;
938 	}
939 
940 	SK_IF_UNLOCK(sc_if);
941 
942 	return(error);
943 }
944 
945 /*
946  * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device
947  * IDs against our list and return a device name if we find a match.
948  */
949 static int sk_probe(dev)
950 	device_t		dev;
951 {
952 	struct sk_type		*t;
953 
954 	t = sk_devs;
955 
956 	while(t->sk_name != NULL) {
957 		if ((pci_get_vendor(dev) == t->sk_vid) &&
958 		    (pci_get_device(dev) == t->sk_did)) {
959 			device_set_desc(dev, t->sk_name);
960 			return(0);
961 		}
962 		t++;
963 	}
964 
965 	return(ENXIO);
966 }
967 
968 /*
969  * Force the GEnesis into reset, then bring it out of reset.
970  */
971 static void sk_reset(sc)
972 	struct sk_softc		*sc;
973 {
974 	CSR_WRITE_4(sc, SK_CSR, SK_CSR_SW_RESET);
975 	CSR_WRITE_4(sc, SK_CSR, SK_CSR_MASTER_RESET);
976 	DELAY(1000);
977 	CSR_WRITE_4(sc, SK_CSR, SK_CSR_SW_UNRESET);
978 	CSR_WRITE_4(sc, SK_CSR, SK_CSR_MASTER_UNRESET);
979 
980 	/* Configure packet arbiter */
981 	sk_win_write_2(sc, SK_PKTARB_CTL, SK_PKTARBCTL_UNRESET);
982 	sk_win_write_2(sc, SK_RXPA1_TINIT, SK_PKTARB_TIMEOUT);
983 	sk_win_write_2(sc, SK_TXPA1_TINIT, SK_PKTARB_TIMEOUT);
984 	sk_win_write_2(sc, SK_RXPA2_TINIT, SK_PKTARB_TIMEOUT);
985 	sk_win_write_2(sc, SK_TXPA2_TINIT, SK_PKTARB_TIMEOUT);
986 
987 	/* Enable RAM interface */
988 	sk_win_write_4(sc, SK_RAMCTL, SK_RAMCTL_UNRESET);
989 
990 	/*
991          * Configure interrupt moderation. The moderation timer
992 	 * defers interrupts specified in the interrupt moderation
993 	 * timer mask based on the timeout specified in the interrupt
994 	 * moderation timer init register. Each bit in the timer
995 	 * register represents 18.825ns, so to specify a timeout in
996 	 * microseconds, we have to multiply by 54.
997 	 */
998         sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(200));
999         sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF|SK_ISR_TX2_S_EOF|
1000 	    SK_ISR_RX1_EOF|SK_ISR_RX2_EOF);
1001         sk_win_write_1(sc, SK_IMTIMERCTL, SK_IMCTL_START);
1002 
1003 	return;
1004 }
1005 
1006 static int sk_probe_xmac(dev)
1007 	device_t		dev;
1008 {
1009 	/*
1010 	 * Not much to do here. We always know there will be
1011 	 * at least one XMAC present, and if there are two,
1012 	 * sk_attach() will create a second device instance
1013 	 * for us.
1014 	 */
1015 	device_set_desc(dev, "XaQti Corp. XMAC II");
1016 
1017 	return(0);
1018 }
1019 
1020 /*
1021  * Each XMAC chip is attached as a separate logical IP interface.
1022  * Single port cards will have only one logical interface of course.
1023  */
1024 static int sk_attach_xmac(dev)
1025 	device_t		dev;
1026 {
1027 	struct sk_softc		*sc;
1028 	struct sk_if_softc	*sc_if;
1029 	struct ifnet		*ifp;
1030 	int			i, port;
1031 
1032 	if (dev == NULL)
1033 		return(EINVAL);
1034 
1035 	sc_if = device_get_softc(dev);
1036 	sc = device_get_softc(device_get_parent(dev));
1037 	SK_LOCK(sc);
1038 	port = *(int *)device_get_ivars(dev);
1039 	free(device_get_ivars(dev), M_DEVBUF);
1040 	device_set_ivars(dev, NULL);
1041 	sc_if->sk_dev = dev;
1042 
1043 	bzero((char *)sc_if, sizeof(struct sk_if_softc));
1044 
1045 	sc_if->sk_dev = dev;
1046 	sc_if->sk_unit = device_get_unit(dev);
1047 	sc_if->sk_port = port;
1048 	sc_if->sk_softc = sc;
1049 	sc->sk_if[port] = sc_if;
1050 	if (port == SK_PORT_A)
1051 		sc_if->sk_tx_bmu = SK_BMU_TXS_CSR0;
1052 	if (port == SK_PORT_B)
1053 		sc_if->sk_tx_bmu = SK_BMU_TXS_CSR1;
1054 
1055 	/*
1056 	 * Get station address for this interface. Note that
1057 	 * dual port cards actually come with three station
1058 	 * addresses: one for each port, plus an extra. The
1059 	 * extra one is used by the SysKonnect driver software
1060 	 * as a 'virtual' station address for when both ports
1061 	 * are operating in failover mode. Currently we don't
1062 	 * use this extra address.
1063 	 */
1064 	for (i = 0; i < ETHER_ADDR_LEN; i++)
1065 		sc_if->arpcom.ac_enaddr[i] =
1066 		    sk_win_read_1(sc, SK_MAC0_0 + (port * 8) + i);
1067 
1068 	printf("sk%d: Ethernet address: %6D\n",
1069 	    sc_if->sk_unit, sc_if->arpcom.ac_enaddr, ":");
1070 
1071 	/*
1072 	 * Set up RAM buffer addresses. The NIC will have a certain
1073 	 * amount of SRAM on it, somewhere between 512K and 2MB. We
1074 	 * need to divide this up a) between the transmitter and
1075  	 * receiver and b) between the two XMACs, if this is a
1076 	 * dual port NIC. Our algotithm is to divide up the memory
1077 	 * evenly so that everyone gets a fair share.
1078 	 */
1079 	if (sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC) {
1080 		u_int32_t		chunk, val;
1081 
1082 		chunk = sc->sk_ramsize / 2;
1083 		val = sc->sk_rboff / sizeof(u_int64_t);
1084 		sc_if->sk_rx_ramstart = val;
1085 		val += (chunk / sizeof(u_int64_t));
1086 		sc_if->sk_rx_ramend = val - 1;
1087 		sc_if->sk_tx_ramstart = val;
1088 		val += (chunk / sizeof(u_int64_t));
1089 		sc_if->sk_tx_ramend = val - 1;
1090 	} else {
1091 		u_int32_t		chunk, val;
1092 
1093 		chunk = sc->sk_ramsize / 4;
1094 		val = (sc->sk_rboff + (chunk * 2 * sc_if->sk_port)) /
1095 		    sizeof(u_int64_t);
1096 		sc_if->sk_rx_ramstart = val;
1097 		val += (chunk / sizeof(u_int64_t));
1098 		sc_if->sk_rx_ramend = val - 1;
1099 		sc_if->sk_tx_ramstart = val;
1100 		val += (chunk / sizeof(u_int64_t));
1101 		sc_if->sk_tx_ramend = val - 1;
1102 	}
1103 
1104 	/* Read and save PHY type and set PHY address */
1105 	sc_if->sk_phytype = sk_win_read_1(sc, SK_EPROM1) & 0xF;
1106 	switch(sc_if->sk_phytype) {
1107 	case SK_PHYTYPE_XMAC:
1108 		sc_if->sk_phyaddr = SK_PHYADDR_XMAC;
1109 		break;
1110 	case SK_PHYTYPE_BCOM:
1111 		sc_if->sk_phyaddr = SK_PHYADDR_BCOM;
1112 		break;
1113 	default:
1114 		printf("skc%d: unsupported PHY type: %d\n",
1115 		    sc->sk_unit, sc_if->sk_phytype);
1116 		SK_UNLOCK(sc);
1117 		return(ENODEV);
1118 	}
1119 
1120 	/* Allocate the descriptor queues. */
1121 	sc_if->sk_rdata = contigmalloc(sizeof(struct sk_ring_data), M_DEVBUF,
1122 	    M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
1123 
1124 	if (sc_if->sk_rdata == NULL) {
1125 		printf("sk%d: no memory for list buffers!\n", sc_if->sk_unit);
1126 		sc->sk_if[port] = NULL;
1127 		SK_UNLOCK(sc);
1128 		return(ENOMEM);
1129 	}
1130 
1131 	bzero(sc_if->sk_rdata, sizeof(struct sk_ring_data));
1132 
1133 	/* Try to allocate memory for jumbo buffers. */
1134 	if (sk_alloc_jumbo_mem(sc_if)) {
1135 		printf("sk%d: jumbo buffer allocation failed\n",
1136 		    sc_if->sk_unit);
1137 		contigfree(sc_if->sk_rdata,
1138 		    sizeof(struct sk_ring_data), M_DEVBUF);
1139 		sc->sk_if[port] = NULL;
1140 		SK_UNLOCK(sc);
1141 		return(ENOMEM);
1142 	}
1143 
1144 	ifp = &sc_if->arpcom.ac_if;
1145 	ifp->if_softc = sc_if;
1146 	ifp->if_unit = sc_if->sk_unit;
1147 	ifp->if_name = "sk";
1148 	ifp->if_mtu = ETHERMTU;
1149 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1150 	ifp->if_ioctl = sk_ioctl;
1151 	ifp->if_output = ether_output;
1152 	ifp->if_start = sk_start;
1153 	ifp->if_watchdog = sk_watchdog;
1154 	ifp->if_init = sk_init;
1155 	ifp->if_baudrate = 1000000000;
1156 	ifp->if_snd.ifq_maxlen = SK_TX_RING_CNT - 1;
1157 
1158 	/*
1159 	 * Do miibus setup.
1160 	 */
1161 	sk_init_xmac(sc_if);
1162 	if (mii_phy_probe(dev, &sc_if->sk_miibus,
1163 	    sk_ifmedia_upd, sk_ifmedia_sts)) {
1164 		printf("skc%d: no PHY found!\n", sc_if->sk_unit);
1165 		contigfree(sc_if->sk_rdata,
1166 		    sizeof(struct sk_ring_data), M_DEVBUF);
1167 		SK_UNLOCK(sc);
1168 		return(ENXIO);
1169 	}
1170 
1171 	/*
1172 	 * Call MI attach routine.
1173 	 */
1174 	ether_ifattach(ifp, ETHER_BPF_SUPPORTED);
1175 	callout_handle_init(&sc_if->sk_tick_ch);
1176 
1177 	SK_UNLOCK(sc);
1178 
1179 	return(0);
1180 }
1181 
1182 /*
1183  * Attach the interface. Allocate softc structures, do ifmedia
1184  * setup and ethernet/BPF attach.
1185  */
1186 static int sk_attach(dev)
1187 	device_t		dev;
1188 {
1189 	u_int32_t		command;
1190 	struct sk_softc		*sc;
1191 	int			unit, error = 0, rid, *port;
1192 
1193 	sc = device_get_softc(dev);
1194 	unit = device_get_unit(dev);
1195 	bzero(sc, sizeof(struct sk_softc));
1196 
1197 	/*
1198 	 * Handle power management nonsense.
1199 	 */
1200 	command = pci_read_config(dev, SK_PCI_CAPID, 4) & 0x000000FF;
1201 	if (command == 0x01) {
1202 
1203 		command = pci_read_config(dev, SK_PCI_PWRMGMTCTRL, 4);
1204 		if (command & SK_PSTATE_MASK) {
1205 			u_int32_t		iobase, membase, irq;
1206 
1207 			/* Save important PCI config data. */
1208 			iobase = pci_read_config(dev, SK_PCI_LOIO, 4);
1209 			membase = pci_read_config(dev, SK_PCI_LOMEM, 4);
1210 			irq = pci_read_config(dev, SK_PCI_INTLINE, 4);
1211 
1212 			/* Reset the power state. */
1213 			printf("skc%d: chip is in D%d power mode "
1214 			"-- setting to D0\n", unit, command & SK_PSTATE_MASK);
1215 			command &= 0xFFFFFFFC;
1216 			pci_write_config(dev, SK_PCI_PWRMGMTCTRL, command, 4);
1217 
1218 			/* Restore PCI config data. */
1219 			pci_write_config(dev, SK_PCI_LOIO, iobase, 4);
1220 			pci_write_config(dev, SK_PCI_LOMEM, membase, 4);
1221 			pci_write_config(dev, SK_PCI_INTLINE, irq, 4);
1222 		}
1223 	}
1224 
1225 	/*
1226 	 * Map control/status registers.
1227 	 */
1228 	command = pci_read_config(dev, PCIR_COMMAND, 4);
1229 	command |= (PCIM_CMD_PORTEN|PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN);
1230 	pci_write_config(dev, PCIR_COMMAND, command, 4);
1231 	command = pci_read_config(dev, PCIR_COMMAND, 4);
1232 
1233 #ifdef SK_USEIOSPACE
1234 	if (!(command & PCIM_CMD_PORTEN)) {
1235 		printf("skc%d: failed to enable I/O ports!\n", unit);
1236 		error = ENXIO;
1237 		goto fail;
1238 	}
1239 #else
1240 	if (!(command & PCIM_CMD_MEMEN)) {
1241 		printf("skc%d: failed to enable memory mapping!\n", unit);
1242 		error = ENXIO;
1243 		goto fail;
1244 	}
1245 #endif
1246 
1247 	rid = SK_RID;
1248 	sc->sk_res = bus_alloc_resource(dev, SK_RES, &rid,
1249 	    0, ~0, 1, RF_ACTIVE);
1250 
1251 	if (sc->sk_res == NULL) {
1252 		printf("sk%d: couldn't map ports/memory\n", unit);
1253 		error = ENXIO;
1254 		goto fail;
1255 	}
1256 
1257 	sc->sk_btag = rman_get_bustag(sc->sk_res);
1258 	sc->sk_bhandle = rman_get_bushandle(sc->sk_res);
1259 
1260 	/* Allocate interrupt */
1261 	rid = 0;
1262 	sc->sk_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
1263 	    RF_SHAREABLE | RF_ACTIVE);
1264 
1265 	if (sc->sk_irq == NULL) {
1266 		printf("skc%d: couldn't map interrupt\n", unit);
1267 		bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res);
1268 		error = ENXIO;
1269 		goto fail;
1270 	}
1271 
1272 	error = bus_setup_intr(dev, sc->sk_irq, INTR_TYPE_NET,
1273 	    sk_intr, sc, &sc->sk_intrhand);
1274 
1275 	if (error) {
1276 		printf("skc%d: couldn't set up irq\n", unit);
1277 		bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res);
1278 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sk_irq);
1279 		goto fail;
1280 	}
1281 
1282 	mtx_init(&sc->sk_mtx, device_get_nameunit(dev), MTX_DEF);
1283 	SK_LOCK(sc);
1284 	/* Reset the adapter. */
1285 	sk_reset(sc);
1286 
1287 	sc->sk_unit = unit;
1288 
1289 	/* Read and save vital product data from EEPROM. */
1290 	sk_vpd_read(sc);
1291 
1292 	/* Read and save RAM size and RAMbuffer offset */
1293 	switch(sk_win_read_1(sc, SK_EPROM0)) {
1294 	case SK_RAMSIZE_512K_64:
1295 		sc->sk_ramsize = 0x80000;
1296 		sc->sk_rboff = SK_RBOFF_0;
1297 		break;
1298 	case SK_RAMSIZE_1024K_64:
1299 		sc->sk_ramsize = 0x100000;
1300 		sc->sk_rboff = SK_RBOFF_80000;
1301 		break;
1302 	case SK_RAMSIZE_1024K_128:
1303 		sc->sk_ramsize = 0x100000;
1304 		sc->sk_rboff = SK_RBOFF_0;
1305 		break;
1306 	case SK_RAMSIZE_2048K_128:
1307 		sc->sk_ramsize = 0x200000;
1308 		sc->sk_rboff = SK_RBOFF_0;
1309 		break;
1310 	default:
1311 		printf("skc%d: unknown ram size: %d\n",
1312 		    sc->sk_unit, sk_win_read_1(sc, SK_EPROM0));
1313 		bus_teardown_intr(dev, sc->sk_irq, sc->sk_intrhand);
1314 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sk_irq);
1315 		bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res);
1316 		error = ENXIO;
1317 		goto fail;
1318 		break;
1319 	}
1320 
1321 	/* Read and save physical media type */
1322 	switch(sk_win_read_1(sc, SK_PMDTYPE)) {
1323 	case SK_PMD_1000BASESX:
1324 		sc->sk_pmd = IFM_1000_SX;
1325 		break;
1326 	case SK_PMD_1000BASELX:
1327 		sc->sk_pmd = IFM_1000_LX;
1328 		break;
1329 	case SK_PMD_1000BASECX:
1330 		sc->sk_pmd = IFM_1000_CX;
1331 		break;
1332 	case SK_PMD_1000BASETX:
1333 		sc->sk_pmd = IFM_1000_TX;
1334 		break;
1335 	default:
1336 		printf("skc%d: unknown media type: 0x%x\n",
1337 		    sc->sk_unit, sk_win_read_1(sc, SK_PMDTYPE));
1338 		bus_teardown_intr(dev, sc->sk_irq, sc->sk_intrhand);
1339 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sk_irq);
1340 		bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res);
1341 		error = ENXIO;
1342 		goto fail;
1343 	}
1344 
1345 	/* Announce the product name. */
1346 	printf("skc%d: %s\n", sc->sk_unit, sc->sk_vpd_prodname);
1347 	sc->sk_devs[SK_PORT_A] = device_add_child(dev, "sk", -1);
1348 	port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT);
1349 	*port = SK_PORT_A;
1350 	device_set_ivars(sc->sk_devs[SK_PORT_A], port);
1351 
1352 	if (!(sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC)) {
1353 		sc->sk_devs[SK_PORT_B] = device_add_child(dev, "sk", -1);
1354 		port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT);
1355 		*port = SK_PORT_B;
1356 		device_set_ivars(sc->sk_devs[SK_PORT_B], port);
1357 	}
1358 
1359 	/* Turn on the 'driver is loaded' LED. */
1360 	CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON);
1361 
1362 	bus_generic_attach(dev);
1363 	SK_UNLOCK(sc);
1364 	return(0);
1365 
1366 fail:
1367 	SK_UNLOCK(sc);
1368 	mtx_destroy(&sc->sk_mtx);
1369 	return(error);
1370 }
1371 
1372 static int sk_detach_xmac(dev)
1373 	device_t		dev;
1374 {
1375 	struct sk_softc		*sc;
1376 	struct sk_if_softc	*sc_if;
1377 	struct ifnet		*ifp;
1378 
1379 	sc = device_get_softc(device_get_parent(dev));
1380 	sc_if = device_get_softc(dev);
1381 	SK_IF_LOCK(sc_if);
1382 
1383 	ifp = &sc_if->arpcom.ac_if;
1384 	sk_stop(sc_if);
1385 	ether_ifdetach(ifp, ETHER_BPF_SUPPORTED);
1386 	bus_generic_detach(dev);
1387 	if (sc_if->sk_miibus != NULL)
1388 		device_delete_child(dev, sc_if->sk_miibus);
1389 	contigfree(sc_if->sk_cdata.sk_jumbo_buf, SK_JMEM, M_DEVBUF);
1390 	contigfree(sc_if->sk_rdata, sizeof(struct sk_ring_data), M_DEVBUF);
1391 	SK_IF_UNLOCK(sc_if);
1392 
1393 	return(0);
1394 }
1395 
1396 static int sk_detach(dev)
1397 	device_t		dev;
1398 {
1399 	struct sk_softc		*sc;
1400 
1401 	sc = device_get_softc(dev);
1402 	SK_LOCK(sc);
1403 
1404 	bus_generic_detach(dev);
1405 	if (sc->sk_devs[SK_PORT_A] != NULL)
1406 		device_delete_child(dev, sc->sk_devs[SK_PORT_A]);
1407 	if (sc->sk_devs[SK_PORT_B] != NULL)
1408 		device_delete_child(dev, sc->sk_devs[SK_PORT_B]);
1409 
1410 	bus_teardown_intr(dev, sc->sk_irq, sc->sk_intrhand);
1411 	bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sk_irq);
1412 	bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res);
1413 
1414 	SK_UNLOCK(sc);
1415 	mtx_destroy(&sc->sk_mtx);
1416 
1417 	return(0);
1418 }
1419 
1420 static int sk_encap(sc_if, m_head, txidx)
1421         struct sk_if_softc	*sc_if;
1422         struct mbuf		*m_head;
1423         u_int32_t		*txidx;
1424 {
1425 	struct sk_tx_desc	*f = NULL;
1426 	struct mbuf		*m;
1427 	u_int32_t		frag, cur, cnt = 0;
1428 
1429 	m = m_head;
1430 	cur = frag = *txidx;
1431 
1432 	/*
1433 	 * Start packing the mbufs in this chain into
1434 	 * the fragment pointers. Stop when we run out
1435 	 * of fragments or hit the end of the mbuf chain.
1436 	 */
1437 	for (m = m_head; m != NULL; m = m->m_next) {
1438 		if (m->m_len != 0) {
1439 			if ((SK_TX_RING_CNT -
1440 			    (sc_if->sk_cdata.sk_tx_cnt + cnt)) < 2)
1441 				return(ENOBUFS);
1442 			f = &sc_if->sk_rdata->sk_tx_ring[frag];
1443 			f->sk_data_lo = vtophys(mtod(m, vm_offset_t));
1444 			f->sk_ctl = m->m_len | SK_OPCODE_DEFAULT;
1445 			if (cnt == 0)
1446 				f->sk_ctl |= SK_TXCTL_FIRSTFRAG;
1447 			else
1448 				f->sk_ctl |= SK_TXCTL_OWN;
1449 			cur = frag;
1450 			SK_INC(frag, SK_TX_RING_CNT);
1451 			cnt++;
1452 		}
1453 	}
1454 
1455 	if (m != NULL)
1456 		return(ENOBUFS);
1457 
1458 	sc_if->sk_rdata->sk_tx_ring[cur].sk_ctl |=
1459 		SK_TXCTL_LASTFRAG|SK_TXCTL_EOF_INTR;
1460 	sc_if->sk_cdata.sk_tx_chain[cur].sk_mbuf = m_head;
1461 	sc_if->sk_rdata->sk_tx_ring[*txidx].sk_ctl |= SK_TXCTL_OWN;
1462 	sc_if->sk_cdata.sk_tx_cnt += cnt;
1463 
1464 	*txidx = frag;
1465 
1466 	return(0);
1467 }
1468 
1469 static void sk_start(ifp)
1470 	struct ifnet		*ifp;
1471 {
1472         struct sk_softc		*sc;
1473         struct sk_if_softc	*sc_if;
1474         struct mbuf		*m_head = NULL;
1475         u_int32_t		idx;
1476 
1477 	sc_if = ifp->if_softc;
1478 	sc = sc_if->sk_softc;
1479 
1480 	SK_IF_LOCK(sc_if);
1481 
1482 	idx = sc_if->sk_cdata.sk_tx_prod;
1483 
1484 	while(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf == NULL) {
1485 		IF_DEQUEUE(&ifp->if_snd, m_head);
1486 		if (m_head == NULL)
1487 			break;
1488 
1489 		/*
1490 		 * Pack the data into the transmit ring. If we
1491 		 * don't have room, set the OACTIVE flag and wait
1492 		 * for the NIC to drain the ring.
1493 		 */
1494 		if (sk_encap(sc_if, m_head, &idx)) {
1495 			IF_PREPEND(&ifp->if_snd, m_head);
1496 			ifp->if_flags |= IFF_OACTIVE;
1497 			break;
1498 		}
1499 
1500 		/*
1501 		 * If there's a BPF listener, bounce a copy of this frame
1502 		 * to him.
1503 		 */
1504 		if (ifp->if_bpf)
1505 			bpf_mtap(ifp, m_head);
1506 	}
1507 
1508 	/* Transmit */
1509 	sc_if->sk_cdata.sk_tx_prod = idx;
1510 	CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
1511 
1512 	/* Set a timeout in case the chip goes out to lunch. */
1513 	ifp->if_timer = 5;
1514 	SK_IF_UNLOCK(sc_if);
1515 
1516 	return;
1517 }
1518 
1519 
1520 static void sk_watchdog(ifp)
1521 	struct ifnet		*ifp;
1522 {
1523 	struct sk_if_softc	*sc_if;
1524 
1525 	sc_if = ifp->if_softc;
1526 
1527 	printf("sk%d: watchdog timeout\n", sc_if->sk_unit);
1528 	sk_init(sc_if);
1529 
1530 	return;
1531 }
1532 
1533 static void sk_shutdown(dev)
1534 	device_t		dev;
1535 {
1536 	struct sk_softc		*sc;
1537 
1538 	sc = device_get_softc(dev);
1539 	SK_LOCK(sc);
1540 
1541 	/* Turn off the 'driver is loaded' LED. */
1542 	CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF);
1543 
1544 	/*
1545 	 * Reset the GEnesis controller. Doing this should also
1546 	 * assert the resets on the attached XMAC(s).
1547 	 */
1548 	sk_reset(sc);
1549 	SK_UNLOCK(sc);
1550 
1551 	return;
1552 }
1553 
1554 static void sk_rxeof(sc_if)
1555 	struct sk_if_softc	*sc_if;
1556 {
1557 	struct ether_header	*eh;
1558 	struct mbuf		*m;
1559 	struct ifnet		*ifp;
1560 	struct sk_chain		*cur_rx;
1561 	int			total_len = 0;
1562 	int			i;
1563 	u_int32_t		rxstat;
1564 
1565 	ifp = &sc_if->arpcom.ac_if;
1566 	i = sc_if->sk_cdata.sk_rx_prod;
1567 	cur_rx = &sc_if->sk_cdata.sk_rx_chain[i];
1568 
1569 	while(!(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl & SK_RXCTL_OWN)) {
1570 
1571 		cur_rx = &sc_if->sk_cdata.sk_rx_chain[i];
1572 		rxstat = sc_if->sk_rdata->sk_rx_ring[i].sk_xmac_rxstat;
1573 		m = cur_rx->sk_mbuf;
1574 		cur_rx->sk_mbuf = NULL;
1575 		total_len = SK_RXBYTES(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl);
1576 		SK_INC(i, SK_RX_RING_CNT);
1577 
1578 		if (rxstat & XM_RXSTAT_ERRFRAME) {
1579 			ifp->if_ierrors++;
1580 			sk_newbuf(sc_if, cur_rx, m);
1581 			continue;
1582 		}
1583 
1584 		/*
1585 		 * Try to allocate a new jumbo buffer. If that
1586 		 * fails, copy the packet to mbufs and put the
1587 		 * jumbo buffer back in the ring so it can be
1588 		 * re-used. If allocating mbufs fails, then we
1589 		 * have to drop the packet.
1590 		 */
1591 		if (sk_newbuf(sc_if, cur_rx, NULL) == ENOBUFS) {
1592 			struct mbuf		*m0;
1593 			m0 = m_devget(mtod(m, char *) - ETHER_ALIGN,
1594 			    total_len + ETHER_ALIGN, 0, ifp, NULL);
1595 			sk_newbuf(sc_if, cur_rx, m);
1596 			if (m0 == NULL) {
1597 				printf("sk%d: no receive buffers "
1598 				    "available -- packet dropped!\n",
1599 				    sc_if->sk_unit);
1600 				ifp->if_ierrors++;
1601 				continue;
1602 			}
1603 			m_adj(m0, ETHER_ALIGN);
1604 			m = m0;
1605 		} else {
1606 			m->m_pkthdr.rcvif = ifp;
1607 			m->m_pkthdr.len = m->m_len = total_len;
1608 		}
1609 
1610 		ifp->if_ipackets++;
1611 		eh = mtod(m, struct ether_header *);
1612 
1613 		/* Remove header from mbuf and pass it on. */
1614 		m_adj(m, sizeof(struct ether_header));
1615 		ether_input(ifp, eh, m);
1616 	}
1617 
1618 	sc_if->sk_cdata.sk_rx_prod = i;
1619 
1620 	return;
1621 }
1622 
1623 static void sk_txeof(sc_if)
1624 	struct sk_if_softc	*sc_if;
1625 {
1626 	struct sk_tx_desc	*cur_tx = NULL;
1627 	struct ifnet		*ifp;
1628 	u_int32_t		idx;
1629 
1630 	ifp = &sc_if->arpcom.ac_if;
1631 
1632 	/*
1633 	 * Go through our tx ring and free mbufs for those
1634 	 * frames that have been sent.
1635 	 */
1636 	idx = sc_if->sk_cdata.sk_tx_cons;
1637 	while(idx != sc_if->sk_cdata.sk_tx_prod) {
1638 		cur_tx = &sc_if->sk_rdata->sk_tx_ring[idx];
1639 		if (cur_tx->sk_ctl & SK_TXCTL_OWN)
1640 			break;
1641 		if (cur_tx->sk_ctl & SK_TXCTL_LASTFRAG)
1642 			ifp->if_opackets++;
1643 		if (sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf != NULL) {
1644 			m_freem(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf);
1645 			sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf = NULL;
1646 		}
1647 		sc_if->sk_cdata.sk_tx_cnt--;
1648 		SK_INC(idx, SK_TX_RING_CNT);
1649 		ifp->if_timer = 0;
1650 	}
1651 
1652 	sc_if->sk_cdata.sk_tx_cons = idx;
1653 
1654 	if (cur_tx != NULL)
1655 		ifp->if_flags &= ~IFF_OACTIVE;
1656 
1657 	return;
1658 }
1659 
1660 static void sk_tick(xsc_if)
1661 	void			*xsc_if;
1662 {
1663 	struct sk_if_softc	*sc_if;
1664 	struct mii_data		*mii;
1665 	struct ifnet		*ifp;
1666 	int			i;
1667 
1668 	sc_if = xsc_if;
1669 	SK_IF_LOCK(sc_if);
1670 	ifp = &sc_if->arpcom.ac_if;
1671 	mii = device_get_softc(sc_if->sk_miibus);
1672 
1673 	if (!(ifp->if_flags & IFF_UP)) {
1674 		SK_IF_UNLOCK(sc_if);
1675 		return;
1676 	}
1677 
1678 	if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
1679 		sk_intr_bcom(sc_if);
1680 		SK_IF_UNLOCK(sc_if);
1681 		return;
1682 	}
1683 
1684 	/*
1685 	 * According to SysKonnect, the correct way to verify that
1686 	 * the link has come back up is to poll bit 0 of the GPIO
1687 	 * register three times. This pin has the signal from the
1688 	 * link_sync pin connected to it; if we read the same link
1689 	 * state 3 times in a row, we know the link is up.
1690 	 */
1691 	for (i = 0; i < 3; i++) {
1692 		if (SK_XM_READ_2(sc_if, XM_GPIO) & XM_GPIO_GP0_SET)
1693 			break;
1694 	}
1695 
1696 	if (i != 3) {
1697 		sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz);
1698 		SK_IF_UNLOCK(sc_if);
1699 		return;
1700 	}
1701 
1702 	/* Turn the GP0 interrupt back on. */
1703 	SK_XM_CLRBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
1704 	SK_XM_READ_2(sc_if, XM_ISR);
1705 	mii_tick(mii);
1706 	mii_pollstat(mii);
1707 	untimeout(sk_tick, sc_if, sc_if->sk_tick_ch);
1708 
1709 	SK_IF_UNLOCK(sc_if);
1710 	return;
1711 }
1712 
1713 static void sk_intr_bcom(sc_if)
1714 	struct sk_if_softc	*sc_if;
1715 {
1716 	struct sk_softc		*sc;
1717 	struct mii_data		*mii;
1718 	struct ifnet		*ifp;
1719 	int			status;
1720 
1721 	sc = sc_if->sk_softc;
1722 	mii = device_get_softc(sc_if->sk_miibus);
1723 	ifp = &sc_if->arpcom.ac_if;
1724 
1725 	SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
1726 
1727 	/*
1728 	 * Read the PHY interrupt register to make sure
1729 	 * we clear any pending interrupts.
1730 	 */
1731 	status = sk_miibus_readreg(sc_if->sk_dev,
1732 	    SK_PHYADDR_BCOM, BRGPHY_MII_ISR);
1733 
1734 	if (!(ifp->if_flags & IFF_RUNNING)) {
1735 		sk_init_xmac(sc_if);
1736 		return;
1737 	}
1738 
1739 	if (status & (BRGPHY_ISR_LNK_CHG|BRGPHY_ISR_AN_PR)) {
1740 		int			lstat;
1741 		lstat = sk_miibus_readreg(sc_if->sk_dev,
1742 		    SK_PHYADDR_BCOM, BRGPHY_MII_AUXSTS);
1743 
1744 		if (!(lstat & BRGPHY_AUXSTS_LINK) && sc_if->sk_link) {
1745 			mii_mediachg(mii);
1746 			/* Turn off the link LED. */
1747 			SK_IF_WRITE_1(sc_if, 0,
1748 			    SK_LINKLED1_CTL, SK_LINKLED_OFF);
1749 			sc_if->sk_link = 0;
1750 		} else if (status & BRGPHY_ISR_LNK_CHG) {
1751 			sk_miibus_writereg(sc_if->sk_dev, SK_PHYADDR_BCOM,
1752 	    		    BRGPHY_MII_IMR, 0xFF00);
1753 			mii_tick(mii);
1754 			sc_if->sk_link = 1;
1755 			/* Turn on the link LED. */
1756 			SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
1757 			    SK_LINKLED_ON|SK_LINKLED_LINKSYNC_OFF|
1758 			    SK_LINKLED_BLINK_OFF);
1759 			mii_pollstat(mii);
1760 		} else {
1761 			mii_tick(mii);
1762 			sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz);
1763 		}
1764 	}
1765 
1766 	SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
1767 
1768 	return;
1769 }
1770 
1771 static void sk_intr_xmac(sc_if)
1772 	struct sk_if_softc	*sc_if;
1773 {
1774 	struct sk_softc		*sc;
1775 	u_int16_t		status;
1776 	struct mii_data		*mii;
1777 
1778 	sc = sc_if->sk_softc;
1779 	mii = device_get_softc(sc_if->sk_miibus);
1780 	status = SK_XM_READ_2(sc_if, XM_ISR);
1781 
1782 	/*
1783 	 * Link has gone down. Start MII tick timeout to
1784 	 * watch for link resync.
1785 	 */
1786 	if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) {
1787 		if (status & XM_ISR_GP0_SET) {
1788 			SK_XM_SETBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
1789 			sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz);
1790 		}
1791 
1792 		if (status & XM_ISR_AUTONEG_DONE) {
1793 			sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz);
1794 		}
1795 	}
1796 
1797 	if (status & XM_IMR_TX_UNDERRUN)
1798 		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_TXFIFO);
1799 
1800 	if (status & XM_IMR_RX_OVERRUN)
1801 		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_RXFIFO);
1802 
1803 	status = SK_XM_READ_2(sc_if, XM_ISR);
1804 
1805 	return;
1806 }
1807 
1808 static void sk_intr(xsc)
1809 	void			*xsc;
1810 {
1811 	struct sk_softc		*sc = xsc;
1812 	struct sk_if_softc	*sc_if0 = NULL, *sc_if1 = NULL;
1813 	struct ifnet		*ifp0 = NULL, *ifp1 = NULL;
1814 	u_int32_t		status;
1815 
1816 	SK_LOCK(sc);
1817 
1818 	sc_if0 = sc->sk_if[SK_PORT_A];
1819 	sc_if1 = sc->sk_if[SK_PORT_B];
1820 
1821 	if (sc_if0 != NULL)
1822 		ifp0 = &sc_if0->arpcom.ac_if;
1823 	if (sc_if1 != NULL)
1824 		ifp1 = &sc_if1->arpcom.ac_if;
1825 
1826 	for (;;) {
1827 		status = CSR_READ_4(sc, SK_ISSR);
1828 		if (!(status & sc->sk_intrmask))
1829 			break;
1830 
1831 		/* Handle receive interrupts first. */
1832 		if (status & SK_ISR_RX1_EOF) {
1833 			sk_rxeof(sc_if0);
1834 			CSR_WRITE_4(sc, SK_BMU_RX_CSR0,
1835 			    SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
1836 		}
1837 		if (status & SK_ISR_RX2_EOF) {
1838 			sk_rxeof(sc_if1);
1839 			CSR_WRITE_4(sc, SK_BMU_RX_CSR1,
1840 			    SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
1841 		}
1842 
1843 		/* Then transmit interrupts. */
1844 		if (status & SK_ISR_TX1_S_EOF) {
1845 			sk_txeof(sc_if0);
1846 			CSR_WRITE_4(sc, SK_BMU_TXS_CSR0,
1847 			    SK_TXBMU_CLR_IRQ_EOF);
1848 		}
1849 		if (status & SK_ISR_TX2_S_EOF) {
1850 			sk_txeof(sc_if1);
1851 			CSR_WRITE_4(sc, SK_BMU_TXS_CSR1,
1852 			    SK_TXBMU_CLR_IRQ_EOF);
1853 		}
1854 
1855 		/* Then MAC interrupts. */
1856 		if (status & SK_ISR_MAC1 &&
1857 		    ifp0->if_flags & IFF_RUNNING)
1858 			sk_intr_xmac(sc_if0);
1859 
1860 		if (status & SK_ISR_MAC2 &&
1861 		    ifp1->if_flags & IFF_RUNNING)
1862 			sk_intr_xmac(sc_if1);
1863 
1864 		if (status & SK_ISR_EXTERNAL_REG) {
1865 			if (ifp0 != NULL)
1866 				sk_intr_bcom(sc_if0);
1867 			if (ifp1 != NULL)
1868 				sk_intr_bcom(sc_if1);
1869 		}
1870 	}
1871 
1872 	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
1873 
1874 	if (ifp0 != NULL && ifp0->if_snd.ifq_head != NULL)
1875 		sk_start(ifp0);
1876 	if (ifp1 != NULL && ifp1->if_snd.ifq_head != NULL)
1877 		sk_start(ifp1);
1878 
1879 	SK_UNLOCK(sc);
1880 
1881 	return;
1882 }
1883 
1884 static void sk_init_xmac(sc_if)
1885 	struct sk_if_softc	*sc_if;
1886 {
1887 	struct sk_softc		*sc;
1888 	struct ifnet		*ifp;
1889 	struct sk_bcom_hack	bhack[] = {
1890 	{ 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 },
1891 	{ 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 },
1892 	{ 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 },
1893 	{ 0, 0 } };
1894 
1895 	sc = sc_if->sk_softc;
1896 	ifp = &sc_if->arpcom.ac_if;
1897 
1898 	/* Unreset the XMAC. */
1899 	SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_UNRESET);
1900 	DELAY(1000);
1901 
1902 	/* Reset the XMAC's internal state. */
1903 	SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
1904 
1905 	/* Save the XMAC II revision */
1906 	sc_if->sk_xmac_rev = XM_XMAC_REV(SK_XM_READ_4(sc_if, XM_DEVID));
1907 
1908 	/*
1909 	 * Perform additional initialization for external PHYs,
1910 	 * namely for the 1000baseTX cards that use the XMAC's
1911 	 * GMII mode.
1912 	 */
1913 	if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
1914 		int			i = 0;
1915 		u_int32_t		val;
1916 
1917 		/* Take PHY out of reset. */
1918 		val = sk_win_read_4(sc, SK_GPIO);
1919 		if (sc_if->sk_port == SK_PORT_A)
1920 			val |= SK_GPIO_DIR0|SK_GPIO_DAT0;
1921 		else
1922 			val |= SK_GPIO_DIR2|SK_GPIO_DAT2;
1923 		sk_win_write_4(sc, SK_GPIO, val);
1924 
1925 		/* Enable GMII mode on the XMAC. */
1926 		SK_XM_SETBIT_2(sc_if, XM_HWCFG, XM_HWCFG_GMIIMODE);
1927 
1928 		sk_miibus_writereg(sc_if->sk_dev, SK_PHYADDR_BCOM,
1929 		    BRGPHY_MII_BMCR, BRGPHY_BMCR_RESET);
1930 		DELAY(10000);
1931 		sk_miibus_writereg(sc_if->sk_dev, SK_PHYADDR_BCOM,
1932 		    BRGPHY_MII_IMR, 0xFFF0);
1933 
1934 		/*
1935 		 * Early versions of the BCM5400 apparently have
1936 		 * a bug that requires them to have their reserved
1937 		 * registers initialized to some magic values. I don't
1938 		 * know what the numbers do, I'm just the messenger.
1939 		 */
1940 		if (sk_miibus_readreg(sc_if->sk_dev,
1941 		    SK_PHYADDR_BCOM, 0x03) == 0x6041) {
1942 			while(bhack[i].reg) {
1943 				sk_miibus_writereg(sc_if->sk_dev,
1944 				    SK_PHYADDR_BCOM, bhack[i].reg,
1945 				    bhack[i].val);
1946 				i++;
1947 			}
1948 		}
1949 	}
1950 
1951 	/* Set station address */
1952 	SK_XM_WRITE_2(sc_if, XM_PAR0,
1953 	    *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[0]));
1954 	SK_XM_WRITE_2(sc_if, XM_PAR1,
1955 	    *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[2]));
1956 	SK_XM_WRITE_2(sc_if, XM_PAR2,
1957 	    *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[4]));
1958 	SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_STATION);
1959 
1960 	if (ifp->if_flags & IFF_PROMISC) {
1961 		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
1962 	} else {
1963 		SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
1964 	}
1965 
1966 	if (ifp->if_flags & IFF_BROADCAST) {
1967 		SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
1968 	} else {
1969 		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
1970 	}
1971 
1972 	/* We don't need the FCS appended to the packet. */
1973 	SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_STRIPFCS);
1974 
1975 	/* We want short frames padded to 60 bytes. */
1976 	SK_XM_SETBIT_2(sc_if, XM_TXCMD, XM_TXCMD_AUTOPAD);
1977 
1978 	/*
1979 	 * Enable the reception of all error frames. This is is
1980 	 * a necessary evil due to the design of the XMAC. The
1981 	 * XMAC's receive FIFO is only 8K in size, however jumbo
1982 	 * frames can be up to 9000 bytes in length. When bad
1983 	 * frame filtering is enabled, the XMAC's RX FIFO operates
1984 	 * in 'store and forward' mode. For this to work, the
1985 	 * entire frame has to fit into the FIFO, but that means
1986 	 * that jumbo frames larger than 8192 bytes will be
1987 	 * truncated. Disabling all bad frame filtering causes
1988 	 * the RX FIFO to operate in streaming mode, in which
1989 	 * case the XMAC will start transfering frames out of the
1990 	 * RX FIFO as soon as the FIFO threshold is reached.
1991 	 */
1992 	SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_BADFRAMES|
1993 	    XM_MODE_RX_GIANTS|XM_MODE_RX_RUNTS|XM_MODE_RX_CRCERRS|
1994 	    XM_MODE_RX_INRANGELEN);
1995 
1996 	if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
1997 		SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
1998 	else
1999 		SK_XM_CLRBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
2000 
2001 	/*
2002 	 * Bump up the transmit threshold. This helps hold off transmit
2003 	 * underruns when we're blasting traffic from both ports at once.
2004 	 */
2005 	SK_XM_WRITE_2(sc_if, XM_TX_REQTHRESH, SK_XM_TX_FIFOTHRESH);
2006 
2007 	/* Set multicast filter */
2008 	sk_setmulti(sc_if);
2009 
2010 	/* Clear and enable interrupts */
2011 	SK_XM_READ_2(sc_if, XM_ISR);
2012 	if (sc_if->sk_phytype == SK_PHYTYPE_XMAC)
2013 		SK_XM_WRITE_2(sc_if, XM_IMR, XM_INTRS);
2014 	else
2015 		SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
2016 
2017 	/* Configure MAC arbiter */
2018 	switch(sc_if->sk_xmac_rev) {
2019 	case XM_XMAC_REV_B2:
2020 		sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_B2);
2021 		sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_B2);
2022 		sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_B2);
2023 		sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_B2);
2024 		sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_B2);
2025 		sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_B2);
2026 		sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_B2);
2027 		sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_B2);
2028 		sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
2029 		break;
2030 	case XM_XMAC_REV_C1:
2031 		sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_C1);
2032 		sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_C1);
2033 		sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_C1);
2034 		sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_C1);
2035 		sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_C1);
2036 		sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_C1);
2037 		sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_C1);
2038 		sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_C1);
2039 		sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
2040 		break;
2041 	default:
2042 		break;
2043 	}
2044 	sk_win_write_2(sc, SK_MACARB_CTL,
2045 	    SK_MACARBCTL_UNRESET|SK_MACARBCTL_FASTOE_OFF);
2046 
2047 	sc_if->sk_link = 1;
2048 
2049 	return;
2050 }
2051 
2052 /*
2053  * Note that to properly initialize any part of the GEnesis chip,
2054  * you first have to take it out of reset mode.
2055  */
2056 static void sk_init(xsc)
2057 	void			*xsc;
2058 {
2059 	struct sk_if_softc	*sc_if = xsc;
2060 	struct sk_softc		*sc;
2061 	struct ifnet		*ifp;
2062 	struct mii_data		*mii;
2063 
2064 	SK_IF_LOCK(sc_if);
2065 
2066 	ifp = &sc_if->arpcom.ac_if;
2067 	sc = sc_if->sk_softc;
2068 	mii = device_get_softc(sc_if->sk_miibus);
2069 
2070 	/* Cancel pending I/O and free all RX/TX buffers. */
2071 	sk_stop(sc_if);
2072 
2073 	/* Configure LINK_SYNC LED */
2074 	SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON);
2075 	SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_ON);
2076 
2077 	/* Configure RX LED */
2078 	SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_START);
2079 
2080 	/* Configure TX LED */
2081 	SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_TXLEDCTL_COUNTER_START);
2082 
2083 	/* Configure I2C registers */
2084 
2085 	/* Configure XMAC(s) */
2086 	sk_init_xmac(sc_if);
2087 	mii_mediachg(mii);
2088 
2089 	/* Configure MAC FIFOs */
2090 	SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_UNRESET);
2091 	SK_IF_WRITE_4(sc_if, 0, SK_RXF1_END, SK_FIFO_END);
2092 	SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_ON);
2093 
2094 	SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_UNRESET);
2095 	SK_IF_WRITE_4(sc_if, 0, SK_TXF1_END, SK_FIFO_END);
2096 	SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_ON);
2097 
2098 	/* Configure transmit arbiter(s) */
2099 	SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL,
2100 	    SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON);
2101 
2102 	/* Configure RAMbuffers */
2103 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET);
2104 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart);
2105 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart);
2106 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart);
2107 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend);
2108 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON);
2109 
2110 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_UNRESET);
2111 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_STORENFWD_ON);
2112 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_START, sc_if->sk_tx_ramstart);
2113 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_WR_PTR, sc_if->sk_tx_ramstart);
2114 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_RD_PTR, sc_if->sk_tx_ramstart);
2115 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_END, sc_if->sk_tx_ramend);
2116 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_ON);
2117 
2118 	/* Configure BMUs */
2119 	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_ONLINE);
2120 	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO,
2121 	    vtophys(&sc_if->sk_rdata->sk_rx_ring[0]));
2122 	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI, 0);
2123 
2124 	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_ONLINE);
2125 	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_LO,
2126 	    vtophys(&sc_if->sk_rdata->sk_tx_ring[0]));
2127 	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_HI, 0);
2128 
2129 	/* Init descriptors */
2130 	if (sk_init_rx_ring(sc_if) == ENOBUFS) {
2131 		printf("sk%d: initialization failed: no "
2132 		    "memory for rx buffers\n", sc_if->sk_unit);
2133 		sk_stop(sc_if);
2134 		SK_IF_UNLOCK(sc_if);
2135 		return;
2136 	}
2137 	sk_init_tx_ring(sc_if);
2138 
2139 	/* Configure interrupt handling */
2140 	CSR_READ_4(sc, SK_ISSR);
2141 	if (sc_if->sk_port == SK_PORT_A)
2142 		sc->sk_intrmask |= SK_INTRS1;
2143 	else
2144 		sc->sk_intrmask |= SK_INTRS2;
2145 
2146 	sc->sk_intrmask |= SK_ISR_EXTERNAL_REG;
2147 
2148 	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2149 
2150 	/* Start BMUs. */
2151 	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_START);
2152 
2153 	/* Enable XMACs TX and RX state machines */
2154 	SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_IGNPAUSE);
2155 	SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
2156 
2157 	ifp->if_flags |= IFF_RUNNING;
2158 	ifp->if_flags &= ~IFF_OACTIVE;
2159 
2160 	SK_IF_UNLOCK(sc_if);
2161 
2162 	return;
2163 }
2164 
2165 static void sk_stop(sc_if)
2166 	struct sk_if_softc	*sc_if;
2167 {
2168 	int			i;
2169 	struct sk_softc		*sc;
2170 	struct ifnet		*ifp;
2171 
2172 	SK_IF_LOCK(sc_if);
2173 	sc = sc_if->sk_softc;
2174 	ifp = &sc_if->arpcom.ac_if;
2175 
2176 	untimeout(sk_tick, sc_if, sc_if->sk_tick_ch);
2177 
2178 	if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
2179 		u_int32_t		val;
2180 
2181 		/* Put PHY back into reset. */
2182 		val = sk_win_read_4(sc, SK_GPIO);
2183 		if (sc_if->sk_port == SK_PORT_A) {
2184 			val |= SK_GPIO_DIR0;
2185 			val &= ~SK_GPIO_DAT0;
2186 		} else {
2187 			val |= SK_GPIO_DIR2;
2188 			val &= ~SK_GPIO_DAT2;
2189 		}
2190 		sk_win_write_4(sc, SK_GPIO, val);
2191 	}
2192 
2193 	/* Turn off various components of this interface. */
2194 	SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
2195 	SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_RESET);
2196 	SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_RESET);
2197 	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE);
2198 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
2199 	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_OFFLINE);
2200 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
2201 	SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF);
2202 	SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
2203 	SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
2204 	SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF);
2205 	SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF);
2206 
2207 	/* Disable interrupts */
2208 	if (sc_if->sk_port == SK_PORT_A)
2209 		sc->sk_intrmask &= ~SK_INTRS1;
2210 	else
2211 		sc->sk_intrmask &= ~SK_INTRS2;
2212 	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2213 
2214 	SK_XM_READ_2(sc_if, XM_ISR);
2215 	SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
2216 
2217 	/* Free RX and TX mbufs still in the queues. */
2218 	for (i = 0; i < SK_RX_RING_CNT; i++) {
2219 		if (sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf != NULL) {
2220 			m_freem(sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf);
2221 			sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf = NULL;
2222 		}
2223 	}
2224 
2225 	for (i = 0; i < SK_TX_RING_CNT; i++) {
2226 		if (sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf != NULL) {
2227 			m_freem(sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf);
2228 			sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf = NULL;
2229 		}
2230 	}
2231 
2232 	ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE);
2233 	SK_IF_UNLOCK(sc_if);
2234 	return;
2235 }
2236