xref: /freebsd/sys/dev/sk/if_sk.c (revision c68159a6d8eede11766cf13896d0f7670dbd51aa)
1 /*
2  * Copyright (c) 1997, 1998, 1999, 2000
3  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by Bill Paul.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  *
32  * $FreeBSD$
33  */
34 
35 /*
36  * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports
37  * the SK-984x series adapters, both single port and dual port.
38  * References:
39  * 	The XaQti XMAC II datasheet,
40  *  http://www.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
41  *	The SysKonnect GEnesis manual, http://www.syskonnect.com
42  *
43  * Note: XaQti has been aquired by Vitesse, and Vitesse does not have the
44  * XMAC II datasheet online. I have put my copy at people.freebsd.org as a
45  * convenience to others until Vitesse corrects this problem:
46  *
47  * http://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
48  *
49  * Written by Bill Paul <wpaul@ee.columbia.edu>
50  * Department of Electrical Engineering
51  * Columbia University, New York City
52  */
53 
54 /*
55  * The SysKonnect gigabit ethernet adapters consist of two main
56  * components: the SysKonnect GEnesis controller chip and the XaQti Corp.
57  * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC
58  * components and a PHY while the GEnesis controller provides a PCI
59  * interface with DMA support. Each card may have between 512K and
60  * 2MB of SRAM on board depending on the configuration.
61  *
62  * The SysKonnect GEnesis controller can have either one or two XMAC
63  * chips connected to it, allowing single or dual port NIC configurations.
64  * SysKonnect has the distinction of being the only vendor on the market
65  * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs,
66  * dual DMA queues, packet/MAC/transmit arbiters and direct access to the
67  * XMAC registers. This driver takes advantage of these features to allow
68  * both XMACs to operate as independent interfaces.
69  */
70 
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/sockio.h>
74 #include <sys/mbuf.h>
75 #include <sys/malloc.h>
76 #include <sys/kernel.h>
77 #include <sys/socket.h>
78 #include <sys/queue.h>
79 
80 #include <net/if.h>
81 #include <net/if_arp.h>
82 #include <net/ethernet.h>
83 #include <net/if_dl.h>
84 #include <net/if_media.h>
85 
86 #include <net/bpf.h>
87 
88 #include <vm/vm.h>              /* for vtophys */
89 #include <vm/pmap.h>            /* for vtophys */
90 #include <machine/bus_pio.h>
91 #include <machine/bus_memio.h>
92 #include <machine/bus.h>
93 #include <machine/resource.h>
94 #include <sys/bus.h>
95 #include <sys/rman.h>
96 
97 #include <dev/mii/mii.h>
98 #include <dev/mii/miivar.h>
99 #include <dev/mii/brgphyreg.h>
100 
101 #include <pci/pcireg.h>
102 #include <pci/pcivar.h>
103 
104 #define SK_USEIOSPACE
105 
106 #include <pci/if_skreg.h>
107 #include <pci/xmaciireg.h>
108 
109 MODULE_DEPEND(sk, miibus, 1, 1, 1);
110 
111 /* "controller miibus0" required.  See GENERIC if you get errors here. */
112 #include "miibus_if.h"
113 
114 #ifndef lint
115 static const char rcsid[] =
116   "$FreeBSD$";
117 #endif
118 
119 static struct sk_type sk_devs[] = {
120 	{ SK_VENDORID, SK_DEVICEID_GE, "SysKonnect Gigabit Ethernet" },
121 	{ 0, 0, NULL }
122 };
123 
124 static int sk_probe		__P((device_t));
125 static int sk_attach		__P((device_t));
126 static int sk_detach		__P((device_t));
127 static int sk_detach_xmac	__P((device_t));
128 static int sk_probe_xmac	__P((device_t));
129 static int sk_attach_xmac	__P((device_t));
130 static void sk_tick		__P((void *));
131 static void sk_intr		__P((void *));
132 static void sk_intr_xmac	__P((struct sk_if_softc *));
133 static void sk_intr_bcom	__P((struct sk_if_softc *));
134 static void sk_rxeof		__P((struct sk_if_softc *));
135 static void sk_txeof		__P((struct sk_if_softc *));
136 static int sk_encap		__P((struct sk_if_softc *, struct mbuf *,
137 					u_int32_t *));
138 static void sk_start		__P((struct ifnet *));
139 static int sk_ioctl		__P((struct ifnet *, u_long, caddr_t));
140 static void sk_init		__P((void *));
141 static void sk_init_xmac	__P((struct sk_if_softc *));
142 static void sk_stop		__P((struct sk_if_softc *));
143 static void sk_watchdog		__P((struct ifnet *));
144 static void sk_shutdown		__P((device_t));
145 static int sk_ifmedia_upd	__P((struct ifnet *));
146 static void sk_ifmedia_sts	__P((struct ifnet *, struct ifmediareq *));
147 static void sk_reset		__P((struct sk_softc *));
148 static int sk_newbuf		__P((struct sk_if_softc *,
149 					struct sk_chain *, struct mbuf *));
150 static int sk_alloc_jumbo_mem	__P((struct sk_if_softc *));
151 static void *sk_jalloc		__P((struct sk_if_softc *));
152 static void sk_jfree		__P((caddr_t, void *));
153 static int sk_init_rx_ring	__P((struct sk_if_softc *));
154 static void sk_init_tx_ring	__P((struct sk_if_softc *));
155 static u_int32_t sk_win_read_4	__P((struct sk_softc *, int));
156 static u_int16_t sk_win_read_2	__P((struct sk_softc *, int));
157 static u_int8_t sk_win_read_1	__P((struct sk_softc *, int));
158 static void sk_win_write_4	__P((struct sk_softc *, int, u_int32_t));
159 static void sk_win_write_2	__P((struct sk_softc *, int, u_int32_t));
160 static void sk_win_write_1	__P((struct sk_softc *, int, u_int32_t));
161 static u_int8_t sk_vpd_readbyte	__P((struct sk_softc *, int));
162 static void sk_vpd_read_res	__P((struct sk_softc *,
163 					struct vpd_res *, int));
164 static void sk_vpd_read		__P((struct sk_softc *));
165 
166 static int sk_miibus_readreg	__P((device_t, int, int));
167 static int sk_miibus_writereg	__P((device_t, int, int, int));
168 static void sk_miibus_statchg	__P((device_t));
169 
170 static u_int32_t sk_calchash	__P((caddr_t));
171 static void sk_setfilt		__P((struct sk_if_softc *, caddr_t, int));
172 static void sk_setmulti		__P((struct sk_if_softc *));
173 
174 #ifdef SK_USEIOSPACE
175 #define SK_RES		SYS_RES_IOPORT
176 #define SK_RID		SK_PCI_LOIO
177 #else
178 #define SK_RES		SYS_RES_MEMORY
179 #define SK_RID		SK_PCI_LOMEM
180 #endif
181 
182 /*
183  * Note that we have newbus methods for both the GEnesis controller
184  * itself and the XMAC(s). The XMACs are children of the GEnesis, and
185  * the miibus code is a child of the XMACs. We need to do it this way
186  * so that the miibus drivers can access the PHY registers on the
187  * right PHY. It's not quite what I had in mind, but it's the only
188  * design that achieves the desired effect.
189  */
190 static device_method_t skc_methods[] = {
191 	/* Device interface */
192 	DEVMETHOD(device_probe,		sk_probe),
193 	DEVMETHOD(device_attach,	sk_attach),
194 	DEVMETHOD(device_detach,	sk_detach),
195 	DEVMETHOD(device_shutdown,	sk_shutdown),
196 
197 	/* bus interface */
198 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
199 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
200 
201 	{ 0, 0 }
202 };
203 
204 static driver_t skc_driver = {
205 	"skc",
206 	skc_methods,
207 	sizeof(struct sk_softc)
208 };
209 
210 static devclass_t skc_devclass;
211 
212 static device_method_t sk_methods[] = {
213 	/* Device interface */
214 	DEVMETHOD(device_probe,		sk_probe_xmac),
215 	DEVMETHOD(device_attach,	sk_attach_xmac),
216 	DEVMETHOD(device_detach,	sk_detach_xmac),
217 	DEVMETHOD(device_shutdown,	bus_generic_shutdown),
218 
219 	/* bus interface */
220 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
221 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
222 
223 	/* MII interface */
224 	DEVMETHOD(miibus_readreg,	sk_miibus_readreg),
225 	DEVMETHOD(miibus_writereg,	sk_miibus_writereg),
226 	DEVMETHOD(miibus_statchg,	sk_miibus_statchg),
227 
228 	{ 0, 0 }
229 };
230 
231 static driver_t sk_driver = {
232 	"sk",
233 	sk_methods,
234 	sizeof(struct sk_if_softc)
235 };
236 
237 static devclass_t sk_devclass;
238 
239 DRIVER_MODULE(if_sk, pci, skc_driver, skc_devclass, 0, 0);
240 DRIVER_MODULE(sk, skc, sk_driver, sk_devclass, 0, 0);
241 DRIVER_MODULE(miibus, sk, miibus_driver, miibus_devclass, 0, 0);
242 
243 #define SK_SETBIT(sc, reg, x)		\
244 	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x)
245 
246 #define SK_CLRBIT(sc, reg, x)		\
247 	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x)
248 
249 #define SK_WIN_SETBIT_4(sc, reg, x)	\
250 	sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) | x)
251 
252 #define SK_WIN_CLRBIT_4(sc, reg, x)	\
253 	sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) & ~x)
254 
255 #define SK_WIN_SETBIT_2(sc, reg, x)	\
256 	sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) | x)
257 
258 #define SK_WIN_CLRBIT_2(sc, reg, x)	\
259 	sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) & ~x)
260 
261 static u_int32_t sk_win_read_4(sc, reg)
262 	struct sk_softc		*sc;
263 	int			reg;
264 {
265 	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
266 	return(CSR_READ_4(sc, SK_WIN_BASE + SK_REG(reg)));
267 }
268 
269 static u_int16_t sk_win_read_2(sc, reg)
270 	struct sk_softc		*sc;
271 	int			reg;
272 {
273 	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
274 	return(CSR_READ_2(sc, SK_WIN_BASE + SK_REG(reg)));
275 }
276 
277 static u_int8_t sk_win_read_1(sc, reg)
278 	struct sk_softc		*sc;
279 	int			reg;
280 {
281 	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
282 	return(CSR_READ_1(sc, SK_WIN_BASE + SK_REG(reg)));
283 }
284 
285 static void sk_win_write_4(sc, reg, val)
286 	struct sk_softc		*sc;
287 	int			reg;
288 	u_int32_t		val;
289 {
290 	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
291 	CSR_WRITE_4(sc, SK_WIN_BASE + SK_REG(reg), val);
292 	return;
293 }
294 
295 static void sk_win_write_2(sc, reg, val)
296 	struct sk_softc		*sc;
297 	int			reg;
298 	u_int32_t		val;
299 {
300 	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
301 	CSR_WRITE_2(sc, SK_WIN_BASE + SK_REG(reg), (u_int32_t)val);
302 	return;
303 }
304 
305 static void sk_win_write_1(sc, reg, val)
306 	struct sk_softc		*sc;
307 	int			reg;
308 	u_int32_t		val;
309 {
310 	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
311 	CSR_WRITE_1(sc, SK_WIN_BASE + SK_REG(reg), val);
312 	return;
313 }
314 
315 /*
316  * The VPD EEPROM contains Vital Product Data, as suggested in
317  * the PCI 2.1 specification. The VPD data is separared into areas
318  * denoted by resource IDs. The SysKonnect VPD contains an ID string
319  * resource (the name of the adapter), a read-only area resource
320  * containing various key/data fields and a read/write area which
321  * can be used to store asset management information or log messages.
322  * We read the ID string and read-only into buffers attached to
323  * the controller softc structure for later use. At the moment,
324  * we only use the ID string during sk_attach().
325  */
326 static u_int8_t sk_vpd_readbyte(sc, addr)
327 	struct sk_softc		*sc;
328 	int			addr;
329 {
330 	int			i;
331 
332 	sk_win_write_2(sc, SK_PCI_REG(SK_PCI_VPD_ADDR), addr);
333 	for (i = 0; i < SK_TIMEOUT; i++) {
334 		DELAY(1);
335 		if (sk_win_read_2(sc,
336 		    SK_PCI_REG(SK_PCI_VPD_ADDR)) & SK_VPD_FLAG)
337 			break;
338 	}
339 
340 	if (i == SK_TIMEOUT)
341 		return(0);
342 
343 	return(sk_win_read_1(sc, SK_PCI_REG(SK_PCI_VPD_DATA)));
344 }
345 
346 static void sk_vpd_read_res(sc, res, addr)
347 	struct sk_softc		*sc;
348 	struct vpd_res		*res;
349 	int			addr;
350 {
351 	int			i;
352 	u_int8_t		*ptr;
353 
354 	ptr = (u_int8_t *)res;
355 	for (i = 0; i < sizeof(struct vpd_res); i++)
356 		ptr[i] = sk_vpd_readbyte(sc, i + addr);
357 
358 	return;
359 }
360 
361 static void sk_vpd_read(sc)
362 	struct sk_softc		*sc;
363 {
364 	int			pos = 0, i;
365 	struct vpd_res		res;
366 
367 	if (sc->sk_vpd_prodname != NULL)
368 		free(sc->sk_vpd_prodname, M_DEVBUF);
369 	if (sc->sk_vpd_readonly != NULL)
370 		free(sc->sk_vpd_readonly, M_DEVBUF);
371 	sc->sk_vpd_prodname = NULL;
372 	sc->sk_vpd_readonly = NULL;
373 
374 	sk_vpd_read_res(sc, &res, pos);
375 
376 	if (res.vr_id != VPD_RES_ID) {
377 		printf("skc%d: bad VPD resource id: expected %x got %x\n",
378 		    sc->sk_unit, VPD_RES_ID, res.vr_id);
379 		return;
380 	}
381 
382 	pos += sizeof(res);
383 	sc->sk_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT);
384 	for (i = 0; i < res.vr_len; i++)
385 		sc->sk_vpd_prodname[i] = sk_vpd_readbyte(sc, i + pos);
386 	sc->sk_vpd_prodname[i] = '\0';
387 	pos += i;
388 
389 	sk_vpd_read_res(sc, &res, pos);
390 
391 	if (res.vr_id != VPD_RES_READ) {
392 		printf("skc%d: bad VPD resource id: expected %x got %x\n",
393 		    sc->sk_unit, VPD_RES_READ, res.vr_id);
394 		return;
395 	}
396 
397 	pos += sizeof(res);
398 	sc->sk_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT);
399 	for (i = 0; i < res.vr_len + 1; i++)
400 		sc->sk_vpd_readonly[i] = sk_vpd_readbyte(sc, i + pos);
401 
402 	return;
403 }
404 
405 static int sk_miibus_readreg(dev, phy, reg)
406 	device_t		dev;
407 	int			phy, reg;
408 {
409 	struct sk_if_softc	*sc_if;
410 	int			i;
411 
412 	sc_if = device_get_softc(dev);
413 
414 	if (sc_if->sk_phytype == SK_PHYTYPE_XMAC && phy != 0)
415 		return(0);
416 
417 	SK_IF_LOCK(sc_if);
418 
419 	SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
420 	SK_XM_READ_2(sc_if, XM_PHY_DATA);
421 	if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
422 		for (i = 0; i < SK_TIMEOUT; i++) {
423 			DELAY(1);
424 			if (SK_XM_READ_2(sc_if, XM_MMUCMD) &
425 			    XM_MMUCMD_PHYDATARDY)
426 				break;
427 		}
428 
429 		if (i == SK_TIMEOUT) {
430 			printf("sk%d: phy failed to come ready\n",
431 			    sc_if->sk_unit);
432 			return(0);
433 		}
434 	}
435 	DELAY(1);
436 	i = SK_XM_READ_2(sc_if, XM_PHY_DATA);
437 	SK_IF_UNLOCK(sc_if);
438 	return(i);
439 }
440 
441 static int sk_miibus_writereg(dev, phy, reg, val)
442 	device_t		dev;
443 	int			phy, reg, val;
444 {
445 	struct sk_if_softc	*sc_if;
446 	int			i;
447 
448 	sc_if = device_get_softc(dev);
449 	SK_IF_LOCK(sc_if);
450 
451 	SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
452 	for (i = 0; i < SK_TIMEOUT; i++) {
453 		if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
454 			break;
455 	}
456 
457 	if (i == SK_TIMEOUT) {
458 		printf("sk%d: phy failed to come ready\n", sc_if->sk_unit);
459 		return(ETIMEDOUT);
460 	}
461 
462 	SK_XM_WRITE_2(sc_if, XM_PHY_DATA, val);
463 	for (i = 0; i < SK_TIMEOUT; i++) {
464 		DELAY(1);
465 		if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
466 			break;
467 	}
468 
469 	SK_IF_UNLOCK(sc_if);
470 
471 	if (i == SK_TIMEOUT)
472 		printf("sk%d: phy write timed out\n", sc_if->sk_unit);
473 
474 	return(0);
475 }
476 
477 static void sk_miibus_statchg(dev)
478 	device_t		dev;
479 {
480 	struct sk_if_softc	*sc_if;
481 	struct mii_data		*mii;
482 
483 	sc_if = device_get_softc(dev);
484 	mii = device_get_softc(sc_if->sk_miibus);
485 	SK_IF_LOCK(sc_if);
486 	/*
487 	 * If this is a GMII PHY, manually set the XMAC's
488 	 * duplex mode accordingly.
489 	 */
490 	if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
491 		if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
492 			SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
493 		} else {
494 			SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
495 		}
496 	}
497 	SK_IF_UNLOCK(sc_if);
498 
499 	return;
500 }
501 
502 #define SK_POLY		0xEDB88320
503 #define SK_BITS		6
504 
505 static u_int32_t sk_calchash(addr)
506 	caddr_t			addr;
507 {
508 	u_int32_t		idx, bit, data, crc;
509 
510 	/* Compute CRC for the address value. */
511 	crc = 0xFFFFFFFF; /* initial value */
512 
513 	for (idx = 0; idx < 6; idx++) {
514 		for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1)
515 			crc = (crc >> 1) ^ (((crc ^ data) & 1) ? SK_POLY : 0);
516 	}
517 
518 	return (~crc & ((1 << SK_BITS) - 1));
519 }
520 
521 static void sk_setfilt(sc_if, addr, slot)
522 	struct sk_if_softc	*sc_if;
523 	caddr_t			addr;
524 	int			slot;
525 {
526 	int			base;
527 
528 	base = XM_RXFILT_ENTRY(slot);
529 
530 	SK_XM_WRITE_2(sc_if, base, *(u_int16_t *)(&addr[0]));
531 	SK_XM_WRITE_2(sc_if, base + 2, *(u_int16_t *)(&addr[2]));
532 	SK_XM_WRITE_2(sc_if, base + 4, *(u_int16_t *)(&addr[4]));
533 
534 	return;
535 }
536 
537 static void sk_setmulti(sc_if)
538 	struct sk_if_softc	*sc_if;
539 {
540 	struct ifnet		*ifp;
541 	u_int32_t		hashes[2] = { 0, 0 };
542 	int			h, i;
543 	struct ifmultiaddr	*ifma;
544 	u_int8_t		dummy[] = { 0, 0, 0, 0, 0 ,0 };
545 
546 	ifp = &sc_if->arpcom.ac_if;
547 
548 	/* First, zot all the existing filters. */
549 	for (i = 1; i < XM_RXFILT_MAX; i++)
550 		sk_setfilt(sc_if, (caddr_t)&dummy, i);
551 	SK_XM_WRITE_4(sc_if, XM_MAR0, 0);
552 	SK_XM_WRITE_4(sc_if, XM_MAR2, 0);
553 
554 	/* Now program new ones. */
555 	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
556 		hashes[0] = 0xFFFFFFFF;
557 		hashes[1] = 0xFFFFFFFF;
558 	} else {
559 		i = 1;
560 		/* First find the tail of the list. */
561 		for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL;
562 					ifma = ifma->ifma_link.le_next) {
563 			if (ifma->ifma_link.le_next == NULL)
564 				break;
565 		}
566 		/* Now traverse the list backwards. */
567 		for (; ifma != NULL && ifma != (void *)&ifp->if_multiaddrs;
568 			ifma = (struct ifmultiaddr *)ifma->ifma_link.le_prev) {
569 			if (ifma->ifma_addr->sa_family != AF_LINK)
570 				continue;
571 			/*
572 			 * Program the first XM_RXFILT_MAX multicast groups
573 			 * into the perfect filter. For all others,
574 			 * use the hash table.
575 			 */
576 			if (i < XM_RXFILT_MAX) {
577 				sk_setfilt(sc_if,
578 			LLADDR((struct sockaddr_dl *)ifma->ifma_addr), i);
579 				i++;
580 				continue;
581 			}
582 
583 			h = sk_calchash(
584 				LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
585 			if (h < 32)
586 				hashes[0] |= (1 << h);
587 			else
588 				hashes[1] |= (1 << (h - 32));
589 		}
590 	}
591 
592 	SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_HASH|
593 	    XM_MODE_RX_USE_PERFECT);
594 	SK_XM_WRITE_4(sc_if, XM_MAR0, hashes[0]);
595 	SK_XM_WRITE_4(sc_if, XM_MAR2, hashes[1]);
596 
597 	return;
598 }
599 
600 static int sk_init_rx_ring(sc_if)
601 	struct sk_if_softc	*sc_if;
602 {
603 	struct sk_chain_data	*cd;
604 	struct sk_ring_data	*rd;
605 	int			i;
606 
607 	cd = &sc_if->sk_cdata;
608 	rd = sc_if->sk_rdata;
609 
610 	bzero((char *)rd->sk_rx_ring,
611 	    sizeof(struct sk_rx_desc) * SK_RX_RING_CNT);
612 
613 	for (i = 0; i < SK_RX_RING_CNT; i++) {
614 		cd->sk_rx_chain[i].sk_desc = &rd->sk_rx_ring[i];
615 		if (sk_newbuf(sc_if, &cd->sk_rx_chain[i], NULL) == ENOBUFS)
616 			return(ENOBUFS);
617 		if (i == (SK_RX_RING_CNT - 1)) {
618 			cd->sk_rx_chain[i].sk_next =
619 			    &cd->sk_rx_chain[0];
620 			rd->sk_rx_ring[i].sk_next =
621 			    vtophys(&rd->sk_rx_ring[0]);
622 		} else {
623 			cd->sk_rx_chain[i].sk_next =
624 			    &cd->sk_rx_chain[i + 1];
625 			rd->sk_rx_ring[i].sk_next =
626 			    vtophys(&rd->sk_rx_ring[i + 1]);
627 		}
628 	}
629 
630 	sc_if->sk_cdata.sk_rx_prod = 0;
631 	sc_if->sk_cdata.sk_rx_cons = 0;
632 
633 	return(0);
634 }
635 
636 static void sk_init_tx_ring(sc_if)
637 	struct sk_if_softc	*sc_if;
638 {
639 	struct sk_chain_data	*cd;
640 	struct sk_ring_data	*rd;
641 	int			i;
642 
643 	cd = &sc_if->sk_cdata;
644 	rd = sc_if->sk_rdata;
645 
646 	bzero((char *)sc_if->sk_rdata->sk_tx_ring,
647 	    sizeof(struct sk_tx_desc) * SK_TX_RING_CNT);
648 
649 	for (i = 0; i < SK_TX_RING_CNT; i++) {
650 		cd->sk_tx_chain[i].sk_desc = &rd->sk_tx_ring[i];
651 		if (i == (SK_TX_RING_CNT - 1)) {
652 			cd->sk_tx_chain[i].sk_next =
653 			    &cd->sk_tx_chain[0];
654 			rd->sk_tx_ring[i].sk_next =
655 			    vtophys(&rd->sk_tx_ring[0]);
656 		} else {
657 			cd->sk_tx_chain[i].sk_next =
658 			    &cd->sk_tx_chain[i + 1];
659 			rd->sk_tx_ring[i].sk_next =
660 			    vtophys(&rd->sk_tx_ring[i + 1]);
661 		}
662 	}
663 
664 	sc_if->sk_cdata.sk_tx_prod = 0;
665 	sc_if->sk_cdata.sk_tx_cons = 0;
666 	sc_if->sk_cdata.sk_tx_cnt = 0;
667 
668 	return;
669 }
670 
671 static int sk_newbuf(sc_if, c, m)
672 	struct sk_if_softc	*sc_if;
673 	struct sk_chain		*c;
674 	struct mbuf		*m;
675 {
676 	struct mbuf		*m_new = NULL;
677 	struct sk_rx_desc	*r;
678 
679 	if (m == NULL) {
680 		caddr_t			*buf = NULL;
681 
682 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
683 		if (m_new == NULL) {
684 			printf("sk%d: no memory for rx list -- "
685 			    "packet dropped!\n", sc_if->sk_unit);
686 			return(ENOBUFS);
687 		}
688 
689 		/* Allocate the jumbo buffer */
690 		buf = sk_jalloc(sc_if);
691 		if (buf == NULL) {
692 			m_freem(m_new);
693 #ifdef SK_VERBOSE
694 			printf("sk%d: jumbo allocation failed "
695 			    "-- packet dropped!\n", sc_if->sk_unit);
696 #endif
697 			return(ENOBUFS);
698 		}
699 
700 		/* Attach the buffer to the mbuf */
701 		MEXTADD(m_new, buf, SK_JLEN, sk_jfree,
702 		    (struct sk_if_softc *)sc_if, 0, EXT_NET_DRV);
703 		m_new->m_data = (void *)buf;
704 		m_new->m_pkthdr.len = m_new->m_len = SK_JLEN;
705 	} else {
706 		/*
707 	 	 * We're re-using a previously allocated mbuf;
708 		 * be sure to re-init pointers and lengths to
709 		 * default values.
710 		 */
711 		m_new = m;
712 		m_new->m_len = m_new->m_pkthdr.len = SK_JLEN;
713 		m_new->m_data = m_new->m_ext.ext_buf;
714 	}
715 
716 	/*
717 	 * Adjust alignment so packet payload begins on a
718 	 * longword boundary. Mandatory for Alpha, useful on
719 	 * x86 too.
720 	 */
721 	m_adj(m_new, ETHER_ALIGN);
722 
723 	r = c->sk_desc;
724 	c->sk_mbuf = m_new;
725 	r->sk_data_lo = vtophys(mtod(m_new, caddr_t));
726 	r->sk_ctl = m_new->m_len | SK_RXSTAT;
727 
728 	return(0);
729 }
730 
731 /*
732  * Allocate jumbo buffer storage. The SysKonnect adapters support
733  * "jumbograms" (9K frames), although SysKonnect doesn't currently
734  * use them in their drivers. In order for us to use them, we need
735  * large 9K receive buffers, however standard mbuf clusters are only
736  * 2048 bytes in size. Consequently, we need to allocate and manage
737  * our own jumbo buffer pool. Fortunately, this does not require an
738  * excessive amount of additional code.
739  */
740 static int sk_alloc_jumbo_mem(sc_if)
741 	struct sk_if_softc	*sc_if;
742 {
743 	caddr_t			ptr;
744 	register int		i;
745 	struct sk_jpool_entry   *entry;
746 
747 	/* Grab a big chunk o' storage. */
748 	sc_if->sk_cdata.sk_jumbo_buf = contigmalloc(SK_JMEM, M_DEVBUF,
749 	    M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
750 
751 	if (sc_if->sk_cdata.sk_jumbo_buf == NULL) {
752 		printf("sk%d: no memory for jumbo buffers!\n", sc_if->sk_unit);
753 		return(ENOBUFS);
754 	}
755 
756 	SLIST_INIT(&sc_if->sk_jfree_listhead);
757 	SLIST_INIT(&sc_if->sk_jinuse_listhead);
758 
759 	/*
760 	 * Now divide it up into 9K pieces and save the addresses
761 	 * in an array.
762 	 */
763 	ptr = sc_if->sk_cdata.sk_jumbo_buf;
764 	for (i = 0; i < SK_JSLOTS; i++) {
765 		sc_if->sk_cdata.sk_jslots[i] = ptr;
766 		ptr += SK_JLEN;
767 		entry = malloc(sizeof(struct sk_jpool_entry),
768 		    M_DEVBUF, M_NOWAIT);
769 		if (entry == NULL) {
770 			free(sc_if->sk_cdata.sk_jumbo_buf, M_DEVBUF);
771 			sc_if->sk_cdata.sk_jumbo_buf = NULL;
772 			printf("sk%d: no memory for jumbo "
773 			    "buffer queue!\n", sc_if->sk_unit);
774 			return(ENOBUFS);
775 		}
776 		entry->slot = i;
777 		SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead,
778 		    entry, jpool_entries);
779 	}
780 
781 	return(0);
782 }
783 
784 /*
785  * Allocate a jumbo buffer.
786  */
787 static void *sk_jalloc(sc_if)
788 	struct sk_if_softc	*sc_if;
789 {
790 	struct sk_jpool_entry   *entry;
791 
792 	entry = SLIST_FIRST(&sc_if->sk_jfree_listhead);
793 
794 	if (entry == NULL) {
795 #ifdef SK_VERBOSE
796 		printf("sk%d: no free jumbo buffers\n", sc_if->sk_unit);
797 #endif
798 		return(NULL);
799 	}
800 
801 	SLIST_REMOVE_HEAD(&sc_if->sk_jfree_listhead, jpool_entries);
802 	SLIST_INSERT_HEAD(&sc_if->sk_jinuse_listhead, entry, jpool_entries);
803 	return(sc_if->sk_cdata.sk_jslots[entry->slot]);
804 }
805 
806 /*
807  * Release a jumbo buffer.
808  */
809 static void sk_jfree(buf, args)
810 	caddr_t			buf;
811 	void			*args;
812 {
813 	struct sk_if_softc	*sc_if;
814 	int		        i;
815 	struct sk_jpool_entry   *entry;
816 
817 	/* Extract the softc struct pointer. */
818 	sc_if = (struct sk_if_softc *)args;
819 
820 	if (sc_if == NULL)
821 		panic("sk_jfree: didn't get softc pointer!");
822 
823 	/* calculate the slot this buffer belongs to */
824 	i = ((vm_offset_t)buf
825 	     - (vm_offset_t)sc_if->sk_cdata.sk_jumbo_buf) / SK_JLEN;
826 
827 	if ((i < 0) || (i >= SK_JSLOTS))
828 		panic("sk_jfree: asked to free buffer that we don't manage!");
829 
830 	entry = SLIST_FIRST(&sc_if->sk_jinuse_listhead);
831 	if (entry == NULL)
832 		panic("sk_jfree: buffer not in use!");
833 	entry->slot = i;
834 	SLIST_REMOVE_HEAD(&sc_if->sk_jinuse_listhead, jpool_entries);
835 	SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead, entry, jpool_entries);
836 
837 	return;
838 }
839 
840 /*
841  * Set media options.
842  */
843 static int sk_ifmedia_upd(ifp)
844 	struct ifnet		*ifp;
845 {
846 	struct sk_if_softc	*sc_if;
847 	struct mii_data		*mii;
848 
849 	sc_if = ifp->if_softc;
850 	mii = device_get_softc(sc_if->sk_miibus);
851 	sk_init(sc_if);
852 	mii_mediachg(mii);
853 
854 	return(0);
855 }
856 
857 /*
858  * Report current media status.
859  */
860 static void sk_ifmedia_sts(ifp, ifmr)
861 	struct ifnet		*ifp;
862 	struct ifmediareq	*ifmr;
863 {
864 	struct sk_if_softc	*sc_if;
865 	struct mii_data		*mii;
866 
867 	sc_if = ifp->if_softc;
868 	mii = device_get_softc(sc_if->sk_miibus);
869 
870 	mii_pollstat(mii);
871 	ifmr->ifm_active = mii->mii_media_active;
872 	ifmr->ifm_status = mii->mii_media_status;
873 
874 	return;
875 }
876 
877 static int sk_ioctl(ifp, command, data)
878 	struct ifnet		*ifp;
879 	u_long			command;
880 	caddr_t			data;
881 {
882 	struct sk_if_softc	*sc_if = ifp->if_softc;
883 	struct ifreq		*ifr = (struct ifreq *) data;
884 	int			error = 0;
885 	struct mii_data		*mii;
886 
887 	SK_IF_LOCK(sc_if);
888 
889 	switch(command) {
890 	case SIOCSIFADDR:
891 	case SIOCGIFADDR:
892 		error = ether_ioctl(ifp, command, data);
893 		break;
894 	case SIOCSIFMTU:
895 		if (ifr->ifr_mtu > SK_JUMBO_MTU)
896 			error = EINVAL;
897 		else {
898 			ifp->if_mtu = ifr->ifr_mtu;
899 			sk_init(sc_if);
900 		}
901 		break;
902 	case SIOCSIFFLAGS:
903 		if (ifp->if_flags & IFF_UP) {
904 			if (ifp->if_flags & IFF_RUNNING &&
905 			    ifp->if_flags & IFF_PROMISC &&
906 			    !(sc_if->sk_if_flags & IFF_PROMISC)) {
907 				SK_XM_SETBIT_4(sc_if, XM_MODE,
908 				    XM_MODE_RX_PROMISC);
909 				sk_setmulti(sc_if);
910 			} else if (ifp->if_flags & IFF_RUNNING &&
911 			    !(ifp->if_flags & IFF_PROMISC) &&
912 			    sc_if->sk_if_flags & IFF_PROMISC) {
913 				SK_XM_CLRBIT_4(sc_if, XM_MODE,
914 				    XM_MODE_RX_PROMISC);
915 				sk_setmulti(sc_if);
916 			} else
917 				sk_init(sc_if);
918 		} else {
919 			if (ifp->if_flags & IFF_RUNNING)
920 				sk_stop(sc_if);
921 		}
922 		sc_if->sk_if_flags = ifp->if_flags;
923 		error = 0;
924 		break;
925 	case SIOCADDMULTI:
926 	case SIOCDELMULTI:
927 		sk_setmulti(sc_if);
928 		error = 0;
929 		break;
930 	case SIOCGIFMEDIA:
931 	case SIOCSIFMEDIA:
932 		mii = device_get_softc(sc_if->sk_miibus);
933 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
934 		break;
935 	default:
936 		error = EINVAL;
937 		break;
938 	}
939 
940 	SK_IF_UNLOCK(sc_if);
941 
942 	return(error);
943 }
944 
945 /*
946  * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device
947  * IDs against our list and return a device name if we find a match.
948  */
949 static int sk_probe(dev)
950 	device_t		dev;
951 {
952 	struct sk_type		*t;
953 
954 	t = sk_devs;
955 
956 	while(t->sk_name != NULL) {
957 		if ((pci_get_vendor(dev) == t->sk_vid) &&
958 		    (pci_get_device(dev) == t->sk_did)) {
959 			device_set_desc(dev, t->sk_name);
960 			return(0);
961 		}
962 		t++;
963 	}
964 
965 	return(ENXIO);
966 }
967 
968 /*
969  * Force the GEnesis into reset, then bring it out of reset.
970  */
971 static void sk_reset(sc)
972 	struct sk_softc		*sc;
973 {
974 	CSR_WRITE_4(sc, SK_CSR, SK_CSR_SW_RESET);
975 	CSR_WRITE_4(sc, SK_CSR, SK_CSR_MASTER_RESET);
976 	DELAY(1000);
977 	CSR_WRITE_4(sc, SK_CSR, SK_CSR_SW_UNRESET);
978 	CSR_WRITE_4(sc, SK_CSR, SK_CSR_MASTER_UNRESET);
979 
980 	/* Configure packet arbiter */
981 	sk_win_write_2(sc, SK_PKTARB_CTL, SK_PKTARBCTL_UNRESET);
982 	sk_win_write_2(sc, SK_RXPA1_TINIT, SK_PKTARB_TIMEOUT);
983 	sk_win_write_2(sc, SK_TXPA1_TINIT, SK_PKTARB_TIMEOUT);
984 	sk_win_write_2(sc, SK_RXPA2_TINIT, SK_PKTARB_TIMEOUT);
985 	sk_win_write_2(sc, SK_TXPA2_TINIT, SK_PKTARB_TIMEOUT);
986 
987 	/* Enable RAM interface */
988 	sk_win_write_4(sc, SK_RAMCTL, SK_RAMCTL_UNRESET);
989 
990 	/*
991          * Configure interrupt moderation. The moderation timer
992 	 * defers interrupts specified in the interrupt moderation
993 	 * timer mask based on the timeout specified in the interrupt
994 	 * moderation timer init register. Each bit in the timer
995 	 * register represents 18.825ns, so to specify a timeout in
996 	 * microseconds, we have to multiply by 54.
997 	 */
998         sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(200));
999         sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF|SK_ISR_TX2_S_EOF|
1000 	    SK_ISR_RX1_EOF|SK_ISR_RX2_EOF);
1001         sk_win_write_1(sc, SK_IMTIMERCTL, SK_IMCTL_START);
1002 
1003 	return;
1004 }
1005 
1006 static int sk_probe_xmac(dev)
1007 	device_t		dev;
1008 {
1009 	/*
1010 	 * Not much to do here. We always know there will be
1011 	 * at least one XMAC present, and if there are two,
1012 	 * sk_attach() will create a second device instance
1013 	 * for us.
1014 	 */
1015 	device_set_desc(dev, "XaQti Corp. XMAC II");
1016 
1017 	return(0);
1018 }
1019 
1020 /*
1021  * Each XMAC chip is attached as a separate logical IP interface.
1022  * Single port cards will have only one logical interface of course.
1023  */
1024 static int sk_attach_xmac(dev)
1025 	device_t		dev;
1026 {
1027 	struct sk_softc		*sc;
1028 	struct sk_if_softc	*sc_if;
1029 	struct ifnet		*ifp;
1030 	int			i, port;
1031 
1032 	if (dev == NULL)
1033 		return(EINVAL);
1034 
1035 	sc_if = device_get_softc(dev);
1036 	sc = device_get_softc(device_get_parent(dev));
1037 	SK_LOCK(sc);
1038 	port = *(int *)device_get_ivars(dev);
1039 	free(device_get_ivars(dev), M_DEVBUF);
1040 	device_set_ivars(dev, NULL);
1041 	sc_if->sk_dev = dev;
1042 
1043 	bzero((char *)sc_if, sizeof(struct sk_if_softc));
1044 
1045 	sc_if->sk_dev = dev;
1046 	sc_if->sk_unit = device_get_unit(dev);
1047 	sc_if->sk_port = port;
1048 	sc_if->sk_softc = sc;
1049 	sc->sk_if[port] = sc_if;
1050 	if (port == SK_PORT_A)
1051 		sc_if->sk_tx_bmu = SK_BMU_TXS_CSR0;
1052 	if (port == SK_PORT_B)
1053 		sc_if->sk_tx_bmu = SK_BMU_TXS_CSR1;
1054 
1055 	/*
1056 	 * Get station address for this interface. Note that
1057 	 * dual port cards actually come with three station
1058 	 * addresses: one for each port, plus an extra. The
1059 	 * extra one is used by the SysKonnect driver software
1060 	 * as a 'virtual' station address for when both ports
1061 	 * are operating in failover mode. Currently we don't
1062 	 * use this extra address.
1063 	 */
1064 	for (i = 0; i < ETHER_ADDR_LEN; i++)
1065 		sc_if->arpcom.ac_enaddr[i] =
1066 		    sk_win_read_1(sc, SK_MAC0_0 + (port * 8) + i);
1067 
1068 	printf("sk%d: Ethernet address: %6D\n",
1069 	    sc_if->sk_unit, sc_if->arpcom.ac_enaddr, ":");
1070 
1071 	/*
1072 	 * Set up RAM buffer addresses. The NIC will have a certain
1073 	 * amount of SRAM on it, somewhere between 512K and 2MB. We
1074 	 * need to divide this up a) between the transmitter and
1075  	 * receiver and b) between the two XMACs, if this is a
1076 	 * dual port NIC. Our algotithm is to divide up the memory
1077 	 * evenly so that everyone gets a fair share.
1078 	 */
1079 	if (sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC) {
1080 		u_int32_t		chunk, val;
1081 
1082 		chunk = sc->sk_ramsize / 2;
1083 		val = sc->sk_rboff / sizeof(u_int64_t);
1084 		sc_if->sk_rx_ramstart = val;
1085 		val += (chunk / sizeof(u_int64_t));
1086 		sc_if->sk_rx_ramend = val - 1;
1087 		sc_if->sk_tx_ramstart = val;
1088 		val += (chunk / sizeof(u_int64_t));
1089 		sc_if->sk_tx_ramend = val - 1;
1090 	} else {
1091 		u_int32_t		chunk, val;
1092 
1093 		chunk = sc->sk_ramsize / 4;
1094 		val = (sc->sk_rboff + (chunk * 2 * sc_if->sk_port)) /
1095 		    sizeof(u_int64_t);
1096 		sc_if->sk_rx_ramstart = val;
1097 		val += (chunk / sizeof(u_int64_t));
1098 		sc_if->sk_rx_ramend = val - 1;
1099 		sc_if->sk_tx_ramstart = val;
1100 		val += (chunk / sizeof(u_int64_t));
1101 		sc_if->sk_tx_ramend = val - 1;
1102 	}
1103 
1104 	/* Read and save PHY type and set PHY address */
1105 	sc_if->sk_phytype = sk_win_read_1(sc, SK_EPROM1) & 0xF;
1106 	switch(sc_if->sk_phytype) {
1107 	case SK_PHYTYPE_XMAC:
1108 		sc_if->sk_phyaddr = SK_PHYADDR_XMAC;
1109 		break;
1110 	case SK_PHYTYPE_BCOM:
1111 		sc_if->sk_phyaddr = SK_PHYADDR_BCOM;
1112 		break;
1113 	default:
1114 		printf("skc%d: unsupported PHY type: %d\n",
1115 		    sc->sk_unit, sc_if->sk_phytype);
1116 		SK_UNLOCK(sc);
1117 		return(ENODEV);
1118 	}
1119 
1120 	/* Allocate the descriptor queues. */
1121 	sc_if->sk_rdata = contigmalloc(sizeof(struct sk_ring_data), M_DEVBUF,
1122 	    M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
1123 
1124 	if (sc_if->sk_rdata == NULL) {
1125 		printf("sk%d: no memory for list buffers!\n", sc_if->sk_unit);
1126 		sc->sk_if[port] = NULL;
1127 		SK_UNLOCK(sc);
1128 		return(ENOMEM);
1129 	}
1130 
1131 	bzero(sc_if->sk_rdata, sizeof(struct sk_ring_data));
1132 
1133 	/* Try to allocate memory for jumbo buffers. */
1134 	if (sk_alloc_jumbo_mem(sc_if)) {
1135 		printf("sk%d: jumbo buffer allocation failed\n",
1136 		    sc_if->sk_unit);
1137 		contigfree(sc_if->sk_rdata,
1138 		    sizeof(struct sk_ring_data), M_DEVBUF);
1139 		sc->sk_if[port] = NULL;
1140 		SK_UNLOCK(sc);
1141 		return(ENOMEM);
1142 	}
1143 
1144 	ifp = &sc_if->arpcom.ac_if;
1145 	ifp->if_softc = sc_if;
1146 	ifp->if_unit = sc_if->sk_unit;
1147 	ifp->if_name = "sk";
1148 	ifp->if_mtu = ETHERMTU;
1149 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1150 	ifp->if_ioctl = sk_ioctl;
1151 	ifp->if_output = ether_output;
1152 	ifp->if_start = sk_start;
1153 	ifp->if_watchdog = sk_watchdog;
1154 	ifp->if_init = sk_init;
1155 	ifp->if_baudrate = 1000000000;
1156 	ifp->if_snd.ifq_maxlen = SK_TX_RING_CNT - 1;
1157 
1158 	/*
1159 	 * Do miibus setup.
1160 	 */
1161 	sk_init_xmac(sc_if);
1162 	if (mii_phy_probe(dev, &sc_if->sk_miibus,
1163 	    sk_ifmedia_upd, sk_ifmedia_sts)) {
1164 		printf("skc%d: no PHY found!\n", sc_if->sk_unit);
1165 		contigfree(sc_if->sk_rdata,
1166 		    sizeof(struct sk_ring_data), M_DEVBUF);
1167 		SK_UNLOCK(sc);
1168 		return(ENXIO);
1169 	}
1170 
1171 	/*
1172 	 * Call MI attach routine.
1173 	 */
1174 	ether_ifattach(ifp, ETHER_BPF_SUPPORTED);
1175 	callout_handle_init(&sc_if->sk_tick_ch);
1176 
1177 	SK_UNLOCK(sc);
1178 
1179 	return(0);
1180 }
1181 
1182 /*
1183  * Attach the interface. Allocate softc structures, do ifmedia
1184  * setup and ethernet/BPF attach.
1185  */
1186 static int sk_attach(dev)
1187 	device_t		dev;
1188 {
1189 	u_int32_t		command;
1190 	struct sk_softc		*sc;
1191 	int			unit, error = 0, rid, *port;
1192 
1193 	sc = device_get_softc(dev);
1194 	unit = device_get_unit(dev);
1195 	bzero(sc, sizeof(struct sk_softc));
1196 
1197 	mtx_init(&sc->sk_mtx, device_get_nameunit(dev), MTX_DEF);
1198 	SK_LOCK(sc);
1199 
1200 	/*
1201 	 * Handle power management nonsense.
1202 	 */
1203 	command = pci_read_config(dev, SK_PCI_CAPID, 4) & 0x000000FF;
1204 	if (command == 0x01) {
1205 
1206 		command = pci_read_config(dev, SK_PCI_PWRMGMTCTRL, 4);
1207 		if (command & SK_PSTATE_MASK) {
1208 			u_int32_t		iobase, membase, irq;
1209 
1210 			/* Save important PCI config data. */
1211 			iobase = pci_read_config(dev, SK_PCI_LOIO, 4);
1212 			membase = pci_read_config(dev, SK_PCI_LOMEM, 4);
1213 			irq = pci_read_config(dev, SK_PCI_INTLINE, 4);
1214 
1215 			/* Reset the power state. */
1216 			printf("skc%d: chip is in D%d power mode "
1217 			"-- setting to D0\n", unit, command & SK_PSTATE_MASK);
1218 			command &= 0xFFFFFFFC;
1219 			pci_write_config(dev, SK_PCI_PWRMGMTCTRL, command, 4);
1220 
1221 			/* Restore PCI config data. */
1222 			pci_write_config(dev, SK_PCI_LOIO, iobase, 4);
1223 			pci_write_config(dev, SK_PCI_LOMEM, membase, 4);
1224 			pci_write_config(dev, SK_PCI_INTLINE, irq, 4);
1225 		}
1226 	}
1227 
1228 	/*
1229 	 * Map control/status registers.
1230 	 */
1231 	command = pci_read_config(dev, PCIR_COMMAND, 4);
1232 	command |= (PCIM_CMD_PORTEN|PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN);
1233 	pci_write_config(dev, PCIR_COMMAND, command, 4);
1234 	command = pci_read_config(dev, PCIR_COMMAND, 4);
1235 
1236 #ifdef SK_USEIOSPACE
1237 	if (!(command & PCIM_CMD_PORTEN)) {
1238 		printf("skc%d: failed to enable I/O ports!\n", unit);
1239 		error = ENXIO;
1240 		goto fail;
1241 	}
1242 #else
1243 	if (!(command & PCIM_CMD_MEMEN)) {
1244 		printf("skc%d: failed to enable memory mapping!\n", unit);
1245 		error = ENXIO;
1246 		goto fail;
1247 	}
1248 #endif
1249 
1250 	rid = SK_RID;
1251 	sc->sk_res = bus_alloc_resource(dev, SK_RES, &rid,
1252 	    0, ~0, 1, RF_ACTIVE);
1253 
1254 	if (sc->sk_res == NULL) {
1255 		printf("sk%d: couldn't map ports/memory\n", unit);
1256 		error = ENXIO;
1257 		goto fail;
1258 	}
1259 
1260 	sc->sk_btag = rman_get_bustag(sc->sk_res);
1261 	sc->sk_bhandle = rman_get_bushandle(sc->sk_res);
1262 
1263 	/* Allocate interrupt */
1264 	rid = 0;
1265 	sc->sk_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
1266 	    RF_SHAREABLE | RF_ACTIVE);
1267 
1268 	if (sc->sk_irq == NULL) {
1269 		printf("skc%d: couldn't map interrupt\n", unit);
1270 		bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res);
1271 		error = ENXIO;
1272 		goto fail;
1273 	}
1274 
1275 	error = bus_setup_intr(dev, sc->sk_irq, INTR_TYPE_NET,
1276 	    sk_intr, sc, &sc->sk_intrhand);
1277 
1278 	if (error) {
1279 		printf("skc%d: couldn't set up irq\n", unit);
1280 		bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res);
1281 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sk_irq);
1282 		goto fail;
1283 	}
1284 
1285 	/* Reset the adapter. */
1286 	sk_reset(sc);
1287 
1288 	sc->sk_unit = unit;
1289 
1290 	/* Read and save vital product data from EEPROM. */
1291 	sk_vpd_read(sc);
1292 
1293 	/* Read and save RAM size and RAMbuffer offset */
1294 	switch(sk_win_read_1(sc, SK_EPROM0)) {
1295 	case SK_RAMSIZE_512K_64:
1296 		sc->sk_ramsize = 0x80000;
1297 		sc->sk_rboff = SK_RBOFF_0;
1298 		break;
1299 	case SK_RAMSIZE_1024K_64:
1300 		sc->sk_ramsize = 0x100000;
1301 		sc->sk_rboff = SK_RBOFF_80000;
1302 		break;
1303 	case SK_RAMSIZE_1024K_128:
1304 		sc->sk_ramsize = 0x100000;
1305 		sc->sk_rboff = SK_RBOFF_0;
1306 		break;
1307 	case SK_RAMSIZE_2048K_128:
1308 		sc->sk_ramsize = 0x200000;
1309 		sc->sk_rboff = SK_RBOFF_0;
1310 		break;
1311 	default:
1312 		printf("skc%d: unknown ram size: %d\n",
1313 		    sc->sk_unit, sk_win_read_1(sc, SK_EPROM0));
1314 		bus_teardown_intr(dev, sc->sk_irq, sc->sk_intrhand);
1315 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sk_irq);
1316 		bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res);
1317 		error = ENXIO;
1318 		goto fail;
1319 		break;
1320 	}
1321 
1322 	/* Read and save physical media type */
1323 	switch(sk_win_read_1(sc, SK_PMDTYPE)) {
1324 	case SK_PMD_1000BASESX:
1325 		sc->sk_pmd = IFM_1000_SX;
1326 		break;
1327 	case SK_PMD_1000BASELX:
1328 		sc->sk_pmd = IFM_1000_LX;
1329 		break;
1330 	case SK_PMD_1000BASECX:
1331 		sc->sk_pmd = IFM_1000_CX;
1332 		break;
1333 	case SK_PMD_1000BASETX:
1334 		sc->sk_pmd = IFM_1000_TX;
1335 		break;
1336 	default:
1337 		printf("skc%d: unknown media type: 0x%x\n",
1338 		    sc->sk_unit, sk_win_read_1(sc, SK_PMDTYPE));
1339 		bus_teardown_intr(dev, sc->sk_irq, sc->sk_intrhand);
1340 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sk_irq);
1341 		bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res);
1342 		error = ENXIO;
1343 		goto fail;
1344 	}
1345 
1346 	/* Announce the product name. */
1347 	printf("skc%d: %s\n", sc->sk_unit, sc->sk_vpd_prodname);
1348 	sc->sk_devs[SK_PORT_A] = device_add_child(dev, "sk", -1);
1349 	port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT);
1350 	*port = SK_PORT_A;
1351 	device_set_ivars(sc->sk_devs[SK_PORT_A], port);
1352 
1353 	if (!(sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC)) {
1354 		sc->sk_devs[SK_PORT_B] = device_add_child(dev, "sk", -1);
1355 		port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT);
1356 		*port = SK_PORT_B;
1357 		device_set_ivars(sc->sk_devs[SK_PORT_B], port);
1358 	}
1359 
1360 	/* Turn on the 'driver is loaded' LED. */
1361 	CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON);
1362 
1363 	bus_generic_attach(dev);
1364 	SK_UNLOCK(sc);
1365 	return(0);
1366 
1367 fail:
1368 	SK_UNLOCK(sc);
1369 	mtx_destroy(&sc->sk_mtx);
1370 	return(error);
1371 }
1372 
1373 static int sk_detach_xmac(dev)
1374 	device_t		dev;
1375 {
1376 	struct sk_softc		*sc;
1377 	struct sk_if_softc	*sc_if;
1378 	struct ifnet		*ifp;
1379 
1380 	sc = device_get_softc(device_get_parent(dev));
1381 	sc_if = device_get_softc(dev);
1382 	SK_IF_LOCK(sc_if);
1383 
1384 	ifp = &sc_if->arpcom.ac_if;
1385 	sk_stop(sc_if);
1386 	ether_ifdetach(ifp, ETHER_BPF_SUPPORTED);
1387 	bus_generic_detach(dev);
1388 	if (sc_if->sk_miibus != NULL)
1389 		device_delete_child(dev, sc_if->sk_miibus);
1390 	contigfree(sc_if->sk_cdata.sk_jumbo_buf, SK_JMEM, M_DEVBUF);
1391 	contigfree(sc_if->sk_rdata, sizeof(struct sk_ring_data), M_DEVBUF);
1392 	SK_IF_UNLOCK(sc_if);
1393 
1394 	return(0);
1395 }
1396 
1397 static int sk_detach(dev)
1398 	device_t		dev;
1399 {
1400 	struct sk_softc		*sc;
1401 
1402 	sc = device_get_softc(dev);
1403 	SK_LOCK(sc);
1404 
1405 	bus_generic_detach(dev);
1406 	if (sc->sk_devs[SK_PORT_A] != NULL)
1407 		device_delete_child(dev, sc->sk_devs[SK_PORT_A]);
1408 	if (sc->sk_devs[SK_PORT_B] != NULL)
1409 		device_delete_child(dev, sc->sk_devs[SK_PORT_B]);
1410 
1411 	bus_teardown_intr(dev, sc->sk_irq, sc->sk_intrhand);
1412 	bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sk_irq);
1413 	bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res);
1414 
1415 	SK_UNLOCK(sc);
1416 	mtx_destroy(&sc->sk_mtx);
1417 
1418 	return(0);
1419 }
1420 
1421 static int sk_encap(sc_if, m_head, txidx)
1422         struct sk_if_softc	*sc_if;
1423         struct mbuf		*m_head;
1424         u_int32_t		*txidx;
1425 {
1426 	struct sk_tx_desc	*f = NULL;
1427 	struct mbuf		*m;
1428 	u_int32_t		frag, cur, cnt = 0;
1429 
1430 	m = m_head;
1431 	cur = frag = *txidx;
1432 
1433 	/*
1434 	 * Start packing the mbufs in this chain into
1435 	 * the fragment pointers. Stop when we run out
1436 	 * of fragments or hit the end of the mbuf chain.
1437 	 */
1438 	for (m = m_head; m != NULL; m = m->m_next) {
1439 		if (m->m_len != 0) {
1440 			if ((SK_TX_RING_CNT -
1441 			    (sc_if->sk_cdata.sk_tx_cnt + cnt)) < 2)
1442 				return(ENOBUFS);
1443 			f = &sc_if->sk_rdata->sk_tx_ring[frag];
1444 			f->sk_data_lo = vtophys(mtod(m, vm_offset_t));
1445 			f->sk_ctl = m->m_len | SK_OPCODE_DEFAULT;
1446 			if (cnt == 0)
1447 				f->sk_ctl |= SK_TXCTL_FIRSTFRAG;
1448 			else
1449 				f->sk_ctl |= SK_TXCTL_OWN;
1450 			cur = frag;
1451 			SK_INC(frag, SK_TX_RING_CNT);
1452 			cnt++;
1453 		}
1454 	}
1455 
1456 	if (m != NULL)
1457 		return(ENOBUFS);
1458 
1459 	sc_if->sk_rdata->sk_tx_ring[cur].sk_ctl |=
1460 		SK_TXCTL_LASTFRAG|SK_TXCTL_EOF_INTR;
1461 	sc_if->sk_cdata.sk_tx_chain[cur].sk_mbuf = m_head;
1462 	sc_if->sk_rdata->sk_tx_ring[*txidx].sk_ctl |= SK_TXCTL_OWN;
1463 	sc_if->sk_cdata.sk_tx_cnt += cnt;
1464 
1465 	*txidx = frag;
1466 
1467 	return(0);
1468 }
1469 
1470 static void sk_start(ifp)
1471 	struct ifnet		*ifp;
1472 {
1473         struct sk_softc		*sc;
1474         struct sk_if_softc	*sc_if;
1475         struct mbuf		*m_head = NULL;
1476         u_int32_t		idx;
1477 
1478 	sc_if = ifp->if_softc;
1479 	sc = sc_if->sk_softc;
1480 
1481 	SK_IF_LOCK(sc_if);
1482 
1483 	idx = sc_if->sk_cdata.sk_tx_prod;
1484 
1485 	while(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf == NULL) {
1486 		IF_DEQUEUE(&ifp->if_snd, m_head);
1487 		if (m_head == NULL)
1488 			break;
1489 
1490 		/*
1491 		 * Pack the data into the transmit ring. If we
1492 		 * don't have room, set the OACTIVE flag and wait
1493 		 * for the NIC to drain the ring.
1494 		 */
1495 		if (sk_encap(sc_if, m_head, &idx)) {
1496 			IF_PREPEND(&ifp->if_snd, m_head);
1497 			ifp->if_flags |= IFF_OACTIVE;
1498 			break;
1499 		}
1500 
1501 		/*
1502 		 * If there's a BPF listener, bounce a copy of this frame
1503 		 * to him.
1504 		 */
1505 		if (ifp->if_bpf)
1506 			bpf_mtap(ifp, m_head);
1507 	}
1508 
1509 	/* Transmit */
1510 	sc_if->sk_cdata.sk_tx_prod = idx;
1511 	CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
1512 
1513 	/* Set a timeout in case the chip goes out to lunch. */
1514 	ifp->if_timer = 5;
1515 	SK_IF_UNLOCK(sc_if);
1516 
1517 	return;
1518 }
1519 
1520 
1521 static void sk_watchdog(ifp)
1522 	struct ifnet		*ifp;
1523 {
1524 	struct sk_if_softc	*sc_if;
1525 
1526 	sc_if = ifp->if_softc;
1527 
1528 	printf("sk%d: watchdog timeout\n", sc_if->sk_unit);
1529 	sk_init(sc_if);
1530 
1531 	return;
1532 }
1533 
1534 static void sk_shutdown(dev)
1535 	device_t		dev;
1536 {
1537 	struct sk_softc		*sc;
1538 
1539 	sc = device_get_softc(dev);
1540 	SK_LOCK(sc);
1541 
1542 	/* Turn off the 'driver is loaded' LED. */
1543 	CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF);
1544 
1545 	/*
1546 	 * Reset the GEnesis controller. Doing this should also
1547 	 * assert the resets on the attached XMAC(s).
1548 	 */
1549 	sk_reset(sc);
1550 	SK_UNLOCK(sc);
1551 
1552 	return;
1553 }
1554 
1555 static void sk_rxeof(sc_if)
1556 	struct sk_if_softc	*sc_if;
1557 {
1558 	struct ether_header	*eh;
1559 	struct mbuf		*m;
1560 	struct ifnet		*ifp;
1561 	struct sk_chain		*cur_rx;
1562 	int			total_len = 0;
1563 	int			i;
1564 	u_int32_t		rxstat;
1565 
1566 	ifp = &sc_if->arpcom.ac_if;
1567 	i = sc_if->sk_cdata.sk_rx_prod;
1568 	cur_rx = &sc_if->sk_cdata.sk_rx_chain[i];
1569 
1570 	while(!(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl & SK_RXCTL_OWN)) {
1571 
1572 		cur_rx = &sc_if->sk_cdata.sk_rx_chain[i];
1573 		rxstat = sc_if->sk_rdata->sk_rx_ring[i].sk_xmac_rxstat;
1574 		m = cur_rx->sk_mbuf;
1575 		cur_rx->sk_mbuf = NULL;
1576 		total_len = SK_RXBYTES(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl);
1577 		SK_INC(i, SK_RX_RING_CNT);
1578 
1579 		if (rxstat & XM_RXSTAT_ERRFRAME) {
1580 			ifp->if_ierrors++;
1581 			sk_newbuf(sc_if, cur_rx, m);
1582 			continue;
1583 		}
1584 
1585 		/*
1586 		 * Try to allocate a new jumbo buffer. If that
1587 		 * fails, copy the packet to mbufs and put the
1588 		 * jumbo buffer back in the ring so it can be
1589 		 * re-used. If allocating mbufs fails, then we
1590 		 * have to drop the packet.
1591 		 */
1592 		if (sk_newbuf(sc_if, cur_rx, NULL) == ENOBUFS) {
1593 			struct mbuf		*m0;
1594 			m0 = m_devget(mtod(m, char *) - ETHER_ALIGN,
1595 			    total_len + ETHER_ALIGN, 0, ifp, NULL);
1596 			sk_newbuf(sc_if, cur_rx, m);
1597 			if (m0 == NULL) {
1598 				printf("sk%d: no receive buffers "
1599 				    "available -- packet dropped!\n",
1600 				    sc_if->sk_unit);
1601 				ifp->if_ierrors++;
1602 				continue;
1603 			}
1604 			m_adj(m0, ETHER_ALIGN);
1605 			m = m0;
1606 		} else {
1607 			m->m_pkthdr.rcvif = ifp;
1608 			m->m_pkthdr.len = m->m_len = total_len;
1609 		}
1610 
1611 		ifp->if_ipackets++;
1612 		eh = mtod(m, struct ether_header *);
1613 
1614 		/* Remove header from mbuf and pass it on. */
1615 		m_adj(m, sizeof(struct ether_header));
1616 		ether_input(ifp, eh, m);
1617 	}
1618 
1619 	sc_if->sk_cdata.sk_rx_prod = i;
1620 
1621 	return;
1622 }
1623 
1624 static void sk_txeof(sc_if)
1625 	struct sk_if_softc	*sc_if;
1626 {
1627 	struct sk_tx_desc	*cur_tx = NULL;
1628 	struct ifnet		*ifp;
1629 	u_int32_t		idx;
1630 
1631 	ifp = &sc_if->arpcom.ac_if;
1632 
1633 	/*
1634 	 * Go through our tx ring and free mbufs for those
1635 	 * frames that have been sent.
1636 	 */
1637 	idx = sc_if->sk_cdata.sk_tx_cons;
1638 	while(idx != sc_if->sk_cdata.sk_tx_prod) {
1639 		cur_tx = &sc_if->sk_rdata->sk_tx_ring[idx];
1640 		if (cur_tx->sk_ctl & SK_TXCTL_OWN)
1641 			break;
1642 		if (cur_tx->sk_ctl & SK_TXCTL_LASTFRAG)
1643 			ifp->if_opackets++;
1644 		if (sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf != NULL) {
1645 			m_freem(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf);
1646 			sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf = NULL;
1647 		}
1648 		sc_if->sk_cdata.sk_tx_cnt--;
1649 		SK_INC(idx, SK_TX_RING_CNT);
1650 		ifp->if_timer = 0;
1651 	}
1652 
1653 	sc_if->sk_cdata.sk_tx_cons = idx;
1654 
1655 	if (cur_tx != NULL)
1656 		ifp->if_flags &= ~IFF_OACTIVE;
1657 
1658 	return;
1659 }
1660 
1661 static void sk_tick(xsc_if)
1662 	void			*xsc_if;
1663 {
1664 	struct sk_if_softc	*sc_if;
1665 	struct mii_data		*mii;
1666 	struct ifnet		*ifp;
1667 	int			i;
1668 
1669 	sc_if = xsc_if;
1670 	SK_IF_LOCK(sc_if);
1671 	ifp = &sc_if->arpcom.ac_if;
1672 	mii = device_get_softc(sc_if->sk_miibus);
1673 
1674 	if (!(ifp->if_flags & IFF_UP)) {
1675 		SK_IF_UNLOCK(sc_if);
1676 		return;
1677 	}
1678 
1679 	if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
1680 		sk_intr_bcom(sc_if);
1681 		SK_IF_UNLOCK(sc_if);
1682 		return;
1683 	}
1684 
1685 	/*
1686 	 * According to SysKonnect, the correct way to verify that
1687 	 * the link has come back up is to poll bit 0 of the GPIO
1688 	 * register three times. This pin has the signal from the
1689 	 * link_sync pin connected to it; if we read the same link
1690 	 * state 3 times in a row, we know the link is up.
1691 	 */
1692 	for (i = 0; i < 3; i++) {
1693 		if (SK_XM_READ_2(sc_if, XM_GPIO) & XM_GPIO_GP0_SET)
1694 			break;
1695 	}
1696 
1697 	if (i != 3) {
1698 		sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz);
1699 		SK_IF_UNLOCK(sc_if);
1700 		return;
1701 	}
1702 
1703 	/* Turn the GP0 interrupt back on. */
1704 	SK_XM_CLRBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
1705 	SK_XM_READ_2(sc_if, XM_ISR);
1706 	mii_tick(mii);
1707 	mii_pollstat(mii);
1708 	untimeout(sk_tick, sc_if, sc_if->sk_tick_ch);
1709 
1710 	SK_IF_UNLOCK(sc_if);
1711 	return;
1712 }
1713 
1714 static void sk_intr_bcom(sc_if)
1715 	struct sk_if_softc	*sc_if;
1716 {
1717 	struct sk_softc		*sc;
1718 	struct mii_data		*mii;
1719 	struct ifnet		*ifp;
1720 	int			status;
1721 
1722 	sc = sc_if->sk_softc;
1723 	mii = device_get_softc(sc_if->sk_miibus);
1724 	ifp = &sc_if->arpcom.ac_if;
1725 
1726 	SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
1727 
1728 	/*
1729 	 * Read the PHY interrupt register to make sure
1730 	 * we clear any pending interrupts.
1731 	 */
1732 	status = sk_miibus_readreg(sc_if->sk_dev,
1733 	    SK_PHYADDR_BCOM, BRGPHY_MII_ISR);
1734 
1735 	if (!(ifp->if_flags & IFF_RUNNING)) {
1736 		sk_init_xmac(sc_if);
1737 		return;
1738 	}
1739 
1740 	if (status & (BRGPHY_ISR_LNK_CHG|BRGPHY_ISR_AN_PR)) {
1741 		int			lstat;
1742 		lstat = sk_miibus_readreg(sc_if->sk_dev,
1743 		    SK_PHYADDR_BCOM, BRGPHY_MII_AUXSTS);
1744 
1745 		if (!(lstat & BRGPHY_AUXSTS_LINK) && sc_if->sk_link) {
1746 			mii_mediachg(mii);
1747 			/* Turn off the link LED. */
1748 			SK_IF_WRITE_1(sc_if, 0,
1749 			    SK_LINKLED1_CTL, SK_LINKLED_OFF);
1750 			sc_if->sk_link = 0;
1751 		} else if (status & BRGPHY_ISR_LNK_CHG) {
1752 			sk_miibus_writereg(sc_if->sk_dev, SK_PHYADDR_BCOM,
1753 	    		    BRGPHY_MII_IMR, 0xFF00);
1754 			mii_tick(mii);
1755 			sc_if->sk_link = 1;
1756 			/* Turn on the link LED. */
1757 			SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
1758 			    SK_LINKLED_ON|SK_LINKLED_LINKSYNC_OFF|
1759 			    SK_LINKLED_BLINK_OFF);
1760 			mii_pollstat(mii);
1761 		} else {
1762 			mii_tick(mii);
1763 			sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz);
1764 		}
1765 	}
1766 
1767 	SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
1768 
1769 	return;
1770 }
1771 
1772 static void sk_intr_xmac(sc_if)
1773 	struct sk_if_softc	*sc_if;
1774 {
1775 	struct sk_softc		*sc;
1776 	u_int16_t		status;
1777 	struct mii_data		*mii;
1778 
1779 	sc = sc_if->sk_softc;
1780 	mii = device_get_softc(sc_if->sk_miibus);
1781 	status = SK_XM_READ_2(sc_if, XM_ISR);
1782 
1783 	/*
1784 	 * Link has gone down. Start MII tick timeout to
1785 	 * watch for link resync.
1786 	 */
1787 	if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) {
1788 		if (status & XM_ISR_GP0_SET) {
1789 			SK_XM_SETBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
1790 			sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz);
1791 		}
1792 
1793 		if (status & XM_ISR_AUTONEG_DONE) {
1794 			sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz);
1795 		}
1796 	}
1797 
1798 	if (status & XM_IMR_TX_UNDERRUN)
1799 		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_TXFIFO);
1800 
1801 	if (status & XM_IMR_RX_OVERRUN)
1802 		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_RXFIFO);
1803 
1804 	status = SK_XM_READ_2(sc_if, XM_ISR);
1805 
1806 	return;
1807 }
1808 
1809 static void sk_intr(xsc)
1810 	void			*xsc;
1811 {
1812 	struct sk_softc		*sc = xsc;
1813 	struct sk_if_softc	*sc_if0 = NULL, *sc_if1 = NULL;
1814 	struct ifnet		*ifp0 = NULL, *ifp1 = NULL;
1815 	u_int32_t		status;
1816 
1817 	SK_LOCK(sc);
1818 
1819 	sc_if0 = sc->sk_if[SK_PORT_A];
1820 	sc_if1 = sc->sk_if[SK_PORT_B];
1821 
1822 	if (sc_if0 != NULL)
1823 		ifp0 = &sc_if0->arpcom.ac_if;
1824 	if (sc_if1 != NULL)
1825 		ifp1 = &sc_if1->arpcom.ac_if;
1826 
1827 	for (;;) {
1828 		status = CSR_READ_4(sc, SK_ISSR);
1829 		if (!(status & sc->sk_intrmask))
1830 			break;
1831 
1832 		/* Handle receive interrupts first. */
1833 		if (status & SK_ISR_RX1_EOF) {
1834 			sk_rxeof(sc_if0);
1835 			CSR_WRITE_4(sc, SK_BMU_RX_CSR0,
1836 			    SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
1837 		}
1838 		if (status & SK_ISR_RX2_EOF) {
1839 			sk_rxeof(sc_if1);
1840 			CSR_WRITE_4(sc, SK_BMU_RX_CSR1,
1841 			    SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
1842 		}
1843 
1844 		/* Then transmit interrupts. */
1845 		if (status & SK_ISR_TX1_S_EOF) {
1846 			sk_txeof(sc_if0);
1847 			CSR_WRITE_4(sc, SK_BMU_TXS_CSR0,
1848 			    SK_TXBMU_CLR_IRQ_EOF);
1849 		}
1850 		if (status & SK_ISR_TX2_S_EOF) {
1851 			sk_txeof(sc_if1);
1852 			CSR_WRITE_4(sc, SK_BMU_TXS_CSR1,
1853 			    SK_TXBMU_CLR_IRQ_EOF);
1854 		}
1855 
1856 		/* Then MAC interrupts. */
1857 		if (status & SK_ISR_MAC1 &&
1858 		    ifp0->if_flags & IFF_RUNNING)
1859 			sk_intr_xmac(sc_if0);
1860 
1861 		if (status & SK_ISR_MAC2 &&
1862 		    ifp1->if_flags & IFF_RUNNING)
1863 			sk_intr_xmac(sc_if1);
1864 
1865 		if (status & SK_ISR_EXTERNAL_REG) {
1866 			if (ifp0 != NULL)
1867 				sk_intr_bcom(sc_if0);
1868 			if (ifp1 != NULL)
1869 				sk_intr_bcom(sc_if1);
1870 		}
1871 	}
1872 
1873 	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
1874 
1875 	if (ifp0 != NULL && ifp0->if_snd.ifq_head != NULL)
1876 		sk_start(ifp0);
1877 	if (ifp1 != NULL && ifp1->if_snd.ifq_head != NULL)
1878 		sk_start(ifp1);
1879 
1880 	SK_UNLOCK(sc);
1881 
1882 	return;
1883 }
1884 
1885 static void sk_init_xmac(sc_if)
1886 	struct sk_if_softc	*sc_if;
1887 {
1888 	struct sk_softc		*sc;
1889 	struct ifnet		*ifp;
1890 	struct sk_bcom_hack	bhack[] = {
1891 	{ 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 },
1892 	{ 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 },
1893 	{ 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 },
1894 	{ 0, 0 } };
1895 
1896 	sc = sc_if->sk_softc;
1897 	ifp = &sc_if->arpcom.ac_if;
1898 
1899 	/* Unreset the XMAC. */
1900 	SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_UNRESET);
1901 	DELAY(1000);
1902 
1903 	/* Reset the XMAC's internal state. */
1904 	SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
1905 
1906 	/* Save the XMAC II revision */
1907 	sc_if->sk_xmac_rev = XM_XMAC_REV(SK_XM_READ_4(sc_if, XM_DEVID));
1908 
1909 	/*
1910 	 * Perform additional initialization for external PHYs,
1911 	 * namely for the 1000baseTX cards that use the XMAC's
1912 	 * GMII mode.
1913 	 */
1914 	if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
1915 		int			i = 0;
1916 		u_int32_t		val;
1917 
1918 		/* Take PHY out of reset. */
1919 		val = sk_win_read_4(sc, SK_GPIO);
1920 		if (sc_if->sk_port == SK_PORT_A)
1921 			val |= SK_GPIO_DIR0|SK_GPIO_DAT0;
1922 		else
1923 			val |= SK_GPIO_DIR2|SK_GPIO_DAT2;
1924 		sk_win_write_4(sc, SK_GPIO, val);
1925 
1926 		/* Enable GMII mode on the XMAC. */
1927 		SK_XM_SETBIT_2(sc_if, XM_HWCFG, XM_HWCFG_GMIIMODE);
1928 
1929 		sk_miibus_writereg(sc_if->sk_dev, SK_PHYADDR_BCOM,
1930 		    BRGPHY_MII_BMCR, BRGPHY_BMCR_RESET);
1931 		DELAY(10000);
1932 		sk_miibus_writereg(sc_if->sk_dev, SK_PHYADDR_BCOM,
1933 		    BRGPHY_MII_IMR, 0xFFF0);
1934 
1935 		/*
1936 		 * Early versions of the BCM5400 apparently have
1937 		 * a bug that requires them to have their reserved
1938 		 * registers initialized to some magic values. I don't
1939 		 * know what the numbers do, I'm just the messenger.
1940 		 */
1941 		if (sk_miibus_readreg(sc_if->sk_dev,
1942 		    SK_PHYADDR_BCOM, 0x03) == 0x6041) {
1943 			while(bhack[i].reg) {
1944 				sk_miibus_writereg(sc_if->sk_dev,
1945 				    SK_PHYADDR_BCOM, bhack[i].reg,
1946 				    bhack[i].val);
1947 				i++;
1948 			}
1949 		}
1950 	}
1951 
1952 	/* Set station address */
1953 	SK_XM_WRITE_2(sc_if, XM_PAR0,
1954 	    *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[0]));
1955 	SK_XM_WRITE_2(sc_if, XM_PAR1,
1956 	    *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[2]));
1957 	SK_XM_WRITE_2(sc_if, XM_PAR2,
1958 	    *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[4]));
1959 	SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_STATION);
1960 
1961 	if (ifp->if_flags & IFF_PROMISC) {
1962 		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
1963 	} else {
1964 		SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
1965 	}
1966 
1967 	if (ifp->if_flags & IFF_BROADCAST) {
1968 		SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
1969 	} else {
1970 		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
1971 	}
1972 
1973 	/* We don't need the FCS appended to the packet. */
1974 	SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_STRIPFCS);
1975 
1976 	/* We want short frames padded to 60 bytes. */
1977 	SK_XM_SETBIT_2(sc_if, XM_TXCMD, XM_TXCMD_AUTOPAD);
1978 
1979 	/*
1980 	 * Enable the reception of all error frames. This is is
1981 	 * a necessary evil due to the design of the XMAC. The
1982 	 * XMAC's receive FIFO is only 8K in size, however jumbo
1983 	 * frames can be up to 9000 bytes in length. When bad
1984 	 * frame filtering is enabled, the XMAC's RX FIFO operates
1985 	 * in 'store and forward' mode. For this to work, the
1986 	 * entire frame has to fit into the FIFO, but that means
1987 	 * that jumbo frames larger than 8192 bytes will be
1988 	 * truncated. Disabling all bad frame filtering causes
1989 	 * the RX FIFO to operate in streaming mode, in which
1990 	 * case the XMAC will start transfering frames out of the
1991 	 * RX FIFO as soon as the FIFO threshold is reached.
1992 	 */
1993 	SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_BADFRAMES|
1994 	    XM_MODE_RX_GIANTS|XM_MODE_RX_RUNTS|XM_MODE_RX_CRCERRS|
1995 	    XM_MODE_RX_INRANGELEN);
1996 
1997 	if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
1998 		SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
1999 	else
2000 		SK_XM_CLRBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
2001 
2002 	/*
2003 	 * Bump up the transmit threshold. This helps hold off transmit
2004 	 * underruns when we're blasting traffic from both ports at once.
2005 	 */
2006 	SK_XM_WRITE_2(sc_if, XM_TX_REQTHRESH, SK_XM_TX_FIFOTHRESH);
2007 
2008 	/* Set multicast filter */
2009 	sk_setmulti(sc_if);
2010 
2011 	/* Clear and enable interrupts */
2012 	SK_XM_READ_2(sc_if, XM_ISR);
2013 	if (sc_if->sk_phytype == SK_PHYTYPE_XMAC)
2014 		SK_XM_WRITE_2(sc_if, XM_IMR, XM_INTRS);
2015 	else
2016 		SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
2017 
2018 	/* Configure MAC arbiter */
2019 	switch(sc_if->sk_xmac_rev) {
2020 	case XM_XMAC_REV_B2:
2021 		sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_B2);
2022 		sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_B2);
2023 		sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_B2);
2024 		sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_B2);
2025 		sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_B2);
2026 		sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_B2);
2027 		sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_B2);
2028 		sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_B2);
2029 		sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
2030 		break;
2031 	case XM_XMAC_REV_C1:
2032 		sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_C1);
2033 		sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_C1);
2034 		sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_C1);
2035 		sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_C1);
2036 		sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_C1);
2037 		sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_C1);
2038 		sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_C1);
2039 		sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_C1);
2040 		sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
2041 		break;
2042 	default:
2043 		break;
2044 	}
2045 	sk_win_write_2(sc, SK_MACARB_CTL,
2046 	    SK_MACARBCTL_UNRESET|SK_MACARBCTL_FASTOE_OFF);
2047 
2048 	sc_if->sk_link = 1;
2049 
2050 	return;
2051 }
2052 
2053 /*
2054  * Note that to properly initialize any part of the GEnesis chip,
2055  * you first have to take it out of reset mode.
2056  */
2057 static void sk_init(xsc)
2058 	void			*xsc;
2059 {
2060 	struct sk_if_softc	*sc_if = xsc;
2061 	struct sk_softc		*sc;
2062 	struct ifnet		*ifp;
2063 	struct mii_data		*mii;
2064 
2065 	SK_IF_LOCK(sc_if);
2066 
2067 	ifp = &sc_if->arpcom.ac_if;
2068 	sc = sc_if->sk_softc;
2069 	mii = device_get_softc(sc_if->sk_miibus);
2070 
2071 	/* Cancel pending I/O and free all RX/TX buffers. */
2072 	sk_stop(sc_if);
2073 
2074 	/* Configure LINK_SYNC LED */
2075 	SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON);
2076 	SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_ON);
2077 
2078 	/* Configure RX LED */
2079 	SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_START);
2080 
2081 	/* Configure TX LED */
2082 	SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_TXLEDCTL_COUNTER_START);
2083 
2084 	/* Configure I2C registers */
2085 
2086 	/* Configure XMAC(s) */
2087 	sk_init_xmac(sc_if);
2088 	mii_mediachg(mii);
2089 
2090 	/* Configure MAC FIFOs */
2091 	SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_UNRESET);
2092 	SK_IF_WRITE_4(sc_if, 0, SK_RXF1_END, SK_FIFO_END);
2093 	SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_ON);
2094 
2095 	SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_UNRESET);
2096 	SK_IF_WRITE_4(sc_if, 0, SK_TXF1_END, SK_FIFO_END);
2097 	SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_ON);
2098 
2099 	/* Configure transmit arbiter(s) */
2100 	SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL,
2101 	    SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON);
2102 
2103 	/* Configure RAMbuffers */
2104 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET);
2105 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart);
2106 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart);
2107 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart);
2108 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend);
2109 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON);
2110 
2111 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_UNRESET);
2112 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_STORENFWD_ON);
2113 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_START, sc_if->sk_tx_ramstart);
2114 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_WR_PTR, sc_if->sk_tx_ramstart);
2115 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_RD_PTR, sc_if->sk_tx_ramstart);
2116 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_END, sc_if->sk_tx_ramend);
2117 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_ON);
2118 
2119 	/* Configure BMUs */
2120 	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_ONLINE);
2121 	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO,
2122 	    vtophys(&sc_if->sk_rdata->sk_rx_ring[0]));
2123 	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI, 0);
2124 
2125 	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_ONLINE);
2126 	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_LO,
2127 	    vtophys(&sc_if->sk_rdata->sk_tx_ring[0]));
2128 	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_HI, 0);
2129 
2130 	/* Init descriptors */
2131 	if (sk_init_rx_ring(sc_if) == ENOBUFS) {
2132 		printf("sk%d: initialization failed: no "
2133 		    "memory for rx buffers\n", sc_if->sk_unit);
2134 		sk_stop(sc_if);
2135 		SK_IF_UNLOCK(sc_if);
2136 		return;
2137 	}
2138 	sk_init_tx_ring(sc_if);
2139 
2140 	/* Configure interrupt handling */
2141 	CSR_READ_4(sc, SK_ISSR);
2142 	if (sc_if->sk_port == SK_PORT_A)
2143 		sc->sk_intrmask |= SK_INTRS1;
2144 	else
2145 		sc->sk_intrmask |= SK_INTRS2;
2146 
2147 	sc->sk_intrmask |= SK_ISR_EXTERNAL_REG;
2148 
2149 	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2150 
2151 	/* Start BMUs. */
2152 	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_START);
2153 
2154 	/* Enable XMACs TX and RX state machines */
2155 	SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_IGNPAUSE);
2156 	SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
2157 
2158 	ifp->if_flags |= IFF_RUNNING;
2159 	ifp->if_flags &= ~IFF_OACTIVE;
2160 
2161 	SK_IF_UNLOCK(sc_if);
2162 
2163 	return;
2164 }
2165 
2166 static void sk_stop(sc_if)
2167 	struct sk_if_softc	*sc_if;
2168 {
2169 	int			i;
2170 	struct sk_softc		*sc;
2171 	struct ifnet		*ifp;
2172 
2173 	SK_IF_LOCK(sc_if);
2174 	sc = sc_if->sk_softc;
2175 	ifp = &sc_if->arpcom.ac_if;
2176 
2177 	untimeout(sk_tick, sc_if, sc_if->sk_tick_ch);
2178 
2179 	if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
2180 		u_int32_t		val;
2181 
2182 		/* Put PHY back into reset. */
2183 		val = sk_win_read_4(sc, SK_GPIO);
2184 		if (sc_if->sk_port == SK_PORT_A) {
2185 			val |= SK_GPIO_DIR0;
2186 			val &= ~SK_GPIO_DAT0;
2187 		} else {
2188 			val |= SK_GPIO_DIR2;
2189 			val &= ~SK_GPIO_DAT2;
2190 		}
2191 		sk_win_write_4(sc, SK_GPIO, val);
2192 	}
2193 
2194 	/* Turn off various components of this interface. */
2195 	SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
2196 	SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_RESET);
2197 	SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_RESET);
2198 	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE);
2199 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
2200 	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_OFFLINE);
2201 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
2202 	SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF);
2203 	SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
2204 	SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
2205 	SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF);
2206 	SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF);
2207 
2208 	/* Disable interrupts */
2209 	if (sc_if->sk_port == SK_PORT_A)
2210 		sc->sk_intrmask &= ~SK_INTRS1;
2211 	else
2212 		sc->sk_intrmask &= ~SK_INTRS2;
2213 	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2214 
2215 	SK_XM_READ_2(sc_if, XM_ISR);
2216 	SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
2217 
2218 	/* Free RX and TX mbufs still in the queues. */
2219 	for (i = 0; i < SK_RX_RING_CNT; i++) {
2220 		if (sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf != NULL) {
2221 			m_freem(sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf);
2222 			sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf = NULL;
2223 		}
2224 	}
2225 
2226 	for (i = 0; i < SK_TX_RING_CNT; i++) {
2227 		if (sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf != NULL) {
2228 			m_freem(sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf);
2229 			sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf = NULL;
2230 		}
2231 	}
2232 
2233 	ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE);
2234 	SK_IF_UNLOCK(sc_if);
2235 	return;
2236 }
2237