xref: /freebsd/sys/dev/sk/if_sk.c (revision d429ea332342fcb98d27a350d0c4944bf9aec3f9)
1 /*	$OpenBSD: if_sk.c,v 2.33 2003/08/12 05:23:06 nate Exp $	*/
2 
3 /*-
4  * Copyright (c) 1997, 1998, 1999, 2000
5  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by Bill Paul.
18  * 4. Neither the name of the author nor the names of any co-contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 /*-
35  * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
36  *
37  * Permission to use, copy, modify, and distribute this software for any
38  * purpose with or without fee is hereby granted, provided that the above
39  * copyright notice and this permission notice appear in all copies.
40  *
41  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
42  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
43  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
44  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
45  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
46  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
47  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
48  */
49 
50 #include <sys/cdefs.h>
51 __FBSDID("$FreeBSD$");
52 
53 /*
54  * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports
55  * the SK-984x series adapters, both single port and dual port.
56  * References:
57  * 	The XaQti XMAC II datasheet,
58  *  http://www.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
59  *	The SysKonnect GEnesis manual, http://www.syskonnect.com
60  *
61  * Note: XaQti has been aquired by Vitesse, and Vitesse does not have the
62  * XMAC II datasheet online. I have put my copy at people.freebsd.org as a
63  * convenience to others until Vitesse corrects this problem:
64  *
65  * http://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
66  *
67  * Written by Bill Paul <wpaul@ee.columbia.edu>
68  * Department of Electrical Engineering
69  * Columbia University, New York City
70  */
71 /*
72  * The SysKonnect gigabit ethernet adapters consist of two main
73  * components: the SysKonnect GEnesis controller chip and the XaQti Corp.
74  * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC
75  * components and a PHY while the GEnesis controller provides a PCI
76  * interface with DMA support. Each card may have between 512K and
77  * 2MB of SRAM on board depending on the configuration.
78  *
79  * The SysKonnect GEnesis controller can have either one or two XMAC
80  * chips connected to it, allowing single or dual port NIC configurations.
81  * SysKonnect has the distinction of being the only vendor on the market
82  * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs,
83  * dual DMA queues, packet/MAC/transmit arbiters and direct access to the
84  * XMAC registers. This driver takes advantage of these features to allow
85  * both XMACs to operate as independent interfaces.
86  */
87 
88 #include <sys/param.h>
89 #include <sys/systm.h>
90 #include <sys/sockio.h>
91 #include <sys/mbuf.h>
92 #include <sys/malloc.h>
93 #include <sys/kernel.h>
94 #include <sys/module.h>
95 #include <sys/socket.h>
96 #include <sys/queue.h>
97 #include <sys/sysctl.h>
98 
99 #include <net/if.h>
100 #include <net/if_arp.h>
101 #include <net/ethernet.h>
102 #include <net/if_dl.h>
103 #include <net/if_media.h>
104 #include <net/if_types.h>
105 
106 #include <net/bpf.h>
107 
108 #include <vm/vm.h>              /* for vtophys */
109 #include <vm/pmap.h>            /* for vtophys */
110 #include <machine/bus.h>
111 #include <machine/resource.h>
112 #include <sys/bus.h>
113 #include <sys/rman.h>
114 
115 #include <dev/mii/mii.h>
116 #include <dev/mii/miivar.h>
117 #include <dev/mii/brgphyreg.h>
118 
119 #include <dev/pci/pcireg.h>
120 #include <dev/pci/pcivar.h>
121 
122 #if 0
123 #define SK_USEIOSPACE
124 #endif
125 
126 #include <pci/if_skreg.h>
127 #include <pci/xmaciireg.h>
128 #include <pci/yukonreg.h>
129 
130 MODULE_DEPEND(sk, pci, 1, 1, 1);
131 MODULE_DEPEND(sk, ether, 1, 1, 1);
132 MODULE_DEPEND(sk, miibus, 1, 1, 1);
133 
134 /* "controller miibus0" required.  See GENERIC if you get errors here. */
135 #include "miibus_if.h"
136 
137 #ifndef lint
138 static const char rcsid[] =
139   "$FreeBSD$";
140 #endif
141 
142 static struct sk_type sk_devs[] = {
143 	{
144 		VENDORID_SK,
145 		DEVICEID_SK_V1,
146 		"SysKonnect Gigabit Ethernet (V1.0)"
147 	},
148 	{
149 		VENDORID_SK,
150 		DEVICEID_SK_V2,
151 		"SysKonnect Gigabit Ethernet (V2.0)"
152 	},
153 	{
154 		VENDORID_MARVELL,
155 		DEVICEID_SK_V2,
156 		"Marvell Gigabit Ethernet"
157 	},
158 	{
159 		VENDORID_MARVELL,
160 		DEVICEID_BELKIN_5005,
161 		"Belkin F5D5005 Gigabit Ethernet"
162 	},
163 	{
164 		VENDORID_3COM,
165 		DEVICEID_3COM_3C940,
166 		"3Com 3C940 Gigabit Ethernet"
167 	},
168 	{
169 		VENDORID_LINKSYS,
170 		DEVICEID_LINKSYS_EG1032,
171 		"Linksys EG1032 Gigabit Ethernet"
172 	},
173 	{
174 		VENDORID_DLINK,
175 		DEVICEID_DLINK_DGE530T,
176 		"D-Link DGE-530T Gigabit Ethernet"
177 	},
178 	{ 0, 0, NULL }
179 };
180 
181 static int skc_probe(device_t);
182 static int skc_attach(device_t);
183 static int skc_detach(device_t);
184 static void skc_shutdown(device_t);
185 static int sk_detach(device_t);
186 static int sk_probe(device_t);
187 static int sk_attach(device_t);
188 static void sk_tick(void *);
189 static void sk_intr(void *);
190 static void sk_intr_xmac(struct sk_if_softc *);
191 static void sk_intr_bcom(struct sk_if_softc *);
192 static void sk_intr_yukon(struct sk_if_softc *);
193 static void sk_rxeof(struct sk_if_softc *);
194 static void sk_txeof(struct sk_if_softc *);
195 static int sk_encap(struct sk_if_softc *, struct mbuf *,
196 					u_int32_t *);
197 static void sk_start(struct ifnet *);
198 static int sk_ioctl(struct ifnet *, u_long, caddr_t);
199 static void sk_init(void *);
200 static void sk_init_xmac(struct sk_if_softc *);
201 static void sk_init_yukon(struct sk_if_softc *);
202 static void sk_stop(struct sk_if_softc *);
203 static void sk_watchdog(struct ifnet *);
204 static int sk_ifmedia_upd(struct ifnet *);
205 static void sk_ifmedia_sts(struct ifnet *, struct ifmediareq *);
206 static void sk_reset(struct sk_softc *);
207 static int sk_newbuf(struct sk_if_softc *,
208 					struct sk_chain *, struct mbuf *);
209 static int sk_alloc_jumbo_mem(struct sk_if_softc *);
210 static void sk_free_jumbo_mem(struct sk_if_softc *);
211 static void *sk_jalloc(struct sk_if_softc *);
212 static void sk_jfree(void *, void *);
213 static int sk_init_rx_ring(struct sk_if_softc *);
214 static void sk_init_tx_ring(struct sk_if_softc *);
215 static u_int32_t sk_win_read_4(struct sk_softc *, int);
216 static u_int16_t sk_win_read_2(struct sk_softc *, int);
217 static u_int8_t sk_win_read_1(struct sk_softc *, int);
218 static void sk_win_write_4(struct sk_softc *, int, u_int32_t);
219 static void sk_win_write_2(struct sk_softc *, int, u_int32_t);
220 static void sk_win_write_1(struct sk_softc *, int, u_int32_t);
221 static u_int8_t sk_vpd_readbyte(struct sk_softc *, int);
222 static void sk_vpd_read_res(struct sk_softc *, struct vpd_res *, int);
223 static void sk_vpd_read(struct sk_softc *);
224 
225 static int sk_miibus_readreg(device_t, int, int);
226 static int sk_miibus_writereg(device_t, int, int, int);
227 static void sk_miibus_statchg(device_t);
228 
229 static int sk_xmac_miibus_readreg(struct sk_if_softc *, int, int);
230 static int sk_xmac_miibus_writereg(struct sk_if_softc *, int, int,
231 						int);
232 static void sk_xmac_miibus_statchg(struct sk_if_softc *);
233 
234 static int sk_marv_miibus_readreg(struct sk_if_softc *, int, int);
235 static int sk_marv_miibus_writereg(struct sk_if_softc *, int, int,
236 						int);
237 static void sk_marv_miibus_statchg(struct sk_if_softc *);
238 
239 static uint32_t sk_xmchash(const uint8_t *);
240 static uint32_t sk_gmchash(const uint8_t *);
241 static void sk_setfilt(struct sk_if_softc *, caddr_t, int);
242 static void sk_setmulti(struct sk_if_softc *);
243 static void sk_setpromisc(struct sk_if_softc *);
244 
245 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high);
246 static int sysctl_hw_sk_int_mod(SYSCTL_HANDLER_ARGS);
247 
248 #ifdef SK_USEIOSPACE
249 #define SK_RES		SYS_RES_IOPORT
250 #define SK_RID		SK_PCI_LOIO
251 #else
252 #define SK_RES		SYS_RES_MEMORY
253 #define SK_RID		SK_PCI_LOMEM
254 #endif
255 
256 /*
257  * Note that we have newbus methods for both the GEnesis controller
258  * itself and the XMAC(s). The XMACs are children of the GEnesis, and
259  * the miibus code is a child of the XMACs. We need to do it this way
260  * so that the miibus drivers can access the PHY registers on the
261  * right PHY. It's not quite what I had in mind, but it's the only
262  * design that achieves the desired effect.
263  */
264 static device_method_t skc_methods[] = {
265 	/* Device interface */
266 	DEVMETHOD(device_probe,		skc_probe),
267 	DEVMETHOD(device_attach,	skc_attach),
268 	DEVMETHOD(device_detach,	skc_detach),
269 	DEVMETHOD(device_shutdown,	skc_shutdown),
270 
271 	/* bus interface */
272 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
273 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
274 
275 	{ 0, 0 }
276 };
277 
278 static driver_t skc_driver = {
279 	"skc",
280 	skc_methods,
281 	sizeof(struct sk_softc)
282 };
283 
284 static devclass_t skc_devclass;
285 
286 static device_method_t sk_methods[] = {
287 	/* Device interface */
288 	DEVMETHOD(device_probe,		sk_probe),
289 	DEVMETHOD(device_attach,	sk_attach),
290 	DEVMETHOD(device_detach,	sk_detach),
291 	DEVMETHOD(device_shutdown,	bus_generic_shutdown),
292 
293 	/* bus interface */
294 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
295 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
296 
297 	/* MII interface */
298 	DEVMETHOD(miibus_readreg,	sk_miibus_readreg),
299 	DEVMETHOD(miibus_writereg,	sk_miibus_writereg),
300 	DEVMETHOD(miibus_statchg,	sk_miibus_statchg),
301 
302 	{ 0, 0 }
303 };
304 
305 static driver_t sk_driver = {
306 	"sk",
307 	sk_methods,
308 	sizeof(struct sk_if_softc)
309 };
310 
311 static devclass_t sk_devclass;
312 
313 DRIVER_MODULE(sk, pci, skc_driver, skc_devclass, 0, 0);
314 DRIVER_MODULE(sk, skc, sk_driver, sk_devclass, 0, 0);
315 DRIVER_MODULE(miibus, sk, miibus_driver, miibus_devclass, 0, 0);
316 
317 #define SK_SETBIT(sc, reg, x)		\
318 	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x)
319 
320 #define SK_CLRBIT(sc, reg, x)		\
321 	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x)
322 
323 #define SK_WIN_SETBIT_4(sc, reg, x)	\
324 	sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) | x)
325 
326 #define SK_WIN_CLRBIT_4(sc, reg, x)	\
327 	sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) & ~x)
328 
329 #define SK_WIN_SETBIT_2(sc, reg, x)	\
330 	sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) | x)
331 
332 #define SK_WIN_CLRBIT_2(sc, reg, x)	\
333 	sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) & ~x)
334 
335 static u_int32_t
336 sk_win_read_4(sc, reg)
337 	struct sk_softc		*sc;
338 	int			reg;
339 {
340 #ifdef SK_USEIOSPACE
341 	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
342 	return(CSR_READ_4(sc, SK_WIN_BASE + SK_REG(reg)));
343 #else
344 	return(CSR_READ_4(sc, reg));
345 #endif
346 }
347 
348 static u_int16_t
349 sk_win_read_2(sc, reg)
350 	struct sk_softc		*sc;
351 	int			reg;
352 {
353 #ifdef SK_USEIOSPACE
354 	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
355 	return(CSR_READ_2(sc, SK_WIN_BASE + SK_REG(reg)));
356 #else
357 	return(CSR_READ_2(sc, reg));
358 #endif
359 }
360 
361 static u_int8_t
362 sk_win_read_1(sc, reg)
363 	struct sk_softc		*sc;
364 	int			reg;
365 {
366 #ifdef SK_USEIOSPACE
367 	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
368 	return(CSR_READ_1(sc, SK_WIN_BASE + SK_REG(reg)));
369 #else
370 	return(CSR_READ_1(sc, reg));
371 #endif
372 }
373 
374 static void
375 sk_win_write_4(sc, reg, val)
376 	struct sk_softc		*sc;
377 	int			reg;
378 	u_int32_t		val;
379 {
380 #ifdef SK_USEIOSPACE
381 	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
382 	CSR_WRITE_4(sc, SK_WIN_BASE + SK_REG(reg), val);
383 #else
384 	CSR_WRITE_4(sc, reg, val);
385 #endif
386 	return;
387 }
388 
389 static void
390 sk_win_write_2(sc, reg, val)
391 	struct sk_softc		*sc;
392 	int			reg;
393 	u_int32_t		val;
394 {
395 #ifdef SK_USEIOSPACE
396 	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
397 	CSR_WRITE_2(sc, SK_WIN_BASE + SK_REG(reg), val);
398 #else
399 	CSR_WRITE_2(sc, reg, val);
400 #endif
401 	return;
402 }
403 
404 static void
405 sk_win_write_1(sc, reg, val)
406 	struct sk_softc		*sc;
407 	int			reg;
408 	u_int32_t		val;
409 {
410 #ifdef SK_USEIOSPACE
411 	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
412 	CSR_WRITE_1(sc, SK_WIN_BASE + SK_REG(reg), val);
413 #else
414 	CSR_WRITE_1(sc, reg, val);
415 #endif
416 	return;
417 }
418 
419 /*
420  * The VPD EEPROM contains Vital Product Data, as suggested in
421  * the PCI 2.1 specification. The VPD data is separared into areas
422  * denoted by resource IDs. The SysKonnect VPD contains an ID string
423  * resource (the name of the adapter), a read-only area resource
424  * containing various key/data fields and a read/write area which
425  * can be used to store asset management information or log messages.
426  * We read the ID string and read-only into buffers attached to
427  * the controller softc structure for later use. At the moment,
428  * we only use the ID string during skc_attach().
429  */
430 static u_int8_t
431 sk_vpd_readbyte(sc, addr)
432 	struct sk_softc		*sc;
433 	int			addr;
434 {
435 	int			i;
436 
437 	sk_win_write_2(sc, SK_PCI_REG(SK_PCI_VPD_ADDR), addr);
438 	for (i = 0; i < SK_TIMEOUT; i++) {
439 		DELAY(1);
440 		if (sk_win_read_2(sc,
441 		    SK_PCI_REG(SK_PCI_VPD_ADDR)) & SK_VPD_FLAG)
442 			break;
443 	}
444 
445 	if (i == SK_TIMEOUT)
446 		return(0);
447 
448 	return(sk_win_read_1(sc, SK_PCI_REG(SK_PCI_VPD_DATA)));
449 }
450 
451 static void
452 sk_vpd_read_res(sc, res, addr)
453 	struct sk_softc		*sc;
454 	struct vpd_res		*res;
455 	int			addr;
456 {
457 	int			i;
458 	u_int8_t		*ptr;
459 
460 	ptr = (u_int8_t *)res;
461 	for (i = 0; i < sizeof(struct vpd_res); i++)
462 		ptr[i] = sk_vpd_readbyte(sc, i + addr);
463 
464 	return;
465 }
466 
467 static void
468 sk_vpd_read(sc)
469 	struct sk_softc		*sc;
470 {
471 	int			pos = 0, i;
472 	struct vpd_res		res;
473 
474 	if (sc->sk_vpd_prodname != NULL)
475 		free(sc->sk_vpd_prodname, M_DEVBUF);
476 	if (sc->sk_vpd_readonly != NULL)
477 		free(sc->sk_vpd_readonly, M_DEVBUF);
478 	sc->sk_vpd_prodname = NULL;
479 	sc->sk_vpd_readonly = NULL;
480 	sc->sk_vpd_readonly_len = 0;
481 
482 	sk_vpd_read_res(sc, &res, pos);
483 
484 	/*
485 	 * Bail out quietly if the eeprom appears to be missing or empty.
486 	 */
487 	if (res.vr_id == 0xff && res.vr_len == 0xff && res.vr_pad == 0xff)
488 		return;
489 
490 	if (res.vr_id != VPD_RES_ID) {
491 		printf("skc%d: bad VPD resource id: expected %x got %x\n",
492 		    sc->sk_unit, VPD_RES_ID, res.vr_id);
493 		return;
494 	}
495 
496 	pos += sizeof(res);
497 	sc->sk_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT);
498 	if (sc->sk_vpd_prodname != NULL) {
499 		for (i = 0; i < res.vr_len; i++)
500 			sc->sk_vpd_prodname[i] = sk_vpd_readbyte(sc, i + pos);
501 		sc->sk_vpd_prodname[i] = '\0';
502 	}
503 	pos += res.vr_len;
504 
505 	sk_vpd_read_res(sc, &res, pos);
506 
507 	if (res.vr_id != VPD_RES_READ) {
508 		printf("skc%d: bad VPD resource id: expected %x got %x\n",
509 		    sc->sk_unit, VPD_RES_READ, res.vr_id);
510 		return;
511 	}
512 
513 	pos += sizeof(res);
514 	sc->sk_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT);
515 	for (i = 0; i < res.vr_len; i++)
516 		sc->sk_vpd_readonly[i] = sk_vpd_readbyte(sc, i + pos);
517 	sc->sk_vpd_readonly_len = res.vr_len;
518 
519 	return;
520 }
521 
522 static int
523 sk_miibus_readreg(dev, phy, reg)
524 	device_t		dev;
525 	int			phy, reg;
526 {
527 	struct sk_if_softc	*sc_if;
528 
529 	sc_if = device_get_softc(dev);
530 
531 	switch(sc_if->sk_softc->sk_type) {
532 	case SK_GENESIS:
533 		return(sk_xmac_miibus_readreg(sc_if, phy, reg));
534 	case SK_YUKON:
535 	case SK_YUKON_LITE:
536 	case SK_YUKON_LP:
537 		return(sk_marv_miibus_readreg(sc_if, phy, reg));
538 	}
539 
540 	return(0);
541 }
542 
543 static int
544 sk_miibus_writereg(dev, phy, reg, val)
545 	device_t		dev;
546 	int			phy, reg, val;
547 {
548 	struct sk_if_softc	*sc_if;
549 
550 	sc_if = device_get_softc(dev);
551 
552 	switch(sc_if->sk_softc->sk_type) {
553 	case SK_GENESIS:
554 		return(sk_xmac_miibus_writereg(sc_if, phy, reg, val));
555 	case SK_YUKON:
556 	case SK_YUKON_LITE:
557 	case SK_YUKON_LP:
558 		return(sk_marv_miibus_writereg(sc_if, phy, reg, val));
559 	}
560 
561 	return(0);
562 }
563 
564 static void
565 sk_miibus_statchg(dev)
566 	device_t		dev;
567 {
568 	struct sk_if_softc	*sc_if;
569 
570 	sc_if = device_get_softc(dev);
571 
572 	switch(sc_if->sk_softc->sk_type) {
573 	case SK_GENESIS:
574 		sk_xmac_miibus_statchg(sc_if);
575 		break;
576 	case SK_YUKON:
577 	case SK_YUKON_LITE:
578 	case SK_YUKON_LP:
579 		sk_marv_miibus_statchg(sc_if);
580 		break;
581 	}
582 
583 	return;
584 }
585 
586 static int
587 sk_xmac_miibus_readreg(sc_if, phy, reg)
588 	struct sk_if_softc	*sc_if;
589 	int			phy, reg;
590 {
591 	int			i;
592 
593 	if (sc_if->sk_phytype == SK_PHYTYPE_XMAC && phy != 0)
594 		return(0);
595 
596 	SK_IF_LOCK(sc_if);
597 	SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
598 	SK_XM_READ_2(sc_if, XM_PHY_DATA);
599 	if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
600 		for (i = 0; i < SK_TIMEOUT; i++) {
601 			DELAY(1);
602 			if (SK_XM_READ_2(sc_if, XM_MMUCMD) &
603 			    XM_MMUCMD_PHYDATARDY)
604 				break;
605 		}
606 
607 		if (i == SK_TIMEOUT) {
608 			printf("sk%d: phy failed to come ready\n",
609 			    sc_if->sk_unit);
610 			SK_IF_UNLOCK(sc_if);
611 			return(0);
612 		}
613 	}
614 	DELAY(1);
615 	i = SK_XM_READ_2(sc_if, XM_PHY_DATA);
616 	SK_IF_UNLOCK(sc_if);
617 	return(i);
618 }
619 
620 static int
621 sk_xmac_miibus_writereg(sc_if, phy, reg, val)
622 	struct sk_if_softc	*sc_if;
623 	int			phy, reg, val;
624 {
625 	int			i;
626 
627 	SK_IF_LOCK(sc_if);
628 	SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
629 	for (i = 0; i < SK_TIMEOUT; i++) {
630 		if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
631 			break;
632 	}
633 
634 	if (i == SK_TIMEOUT) {
635 		printf("sk%d: phy failed to come ready\n", sc_if->sk_unit);
636 		SK_IF_UNLOCK(sc_if);
637 		return(ETIMEDOUT);
638 	}
639 
640 	SK_XM_WRITE_2(sc_if, XM_PHY_DATA, val);
641 	for (i = 0; i < SK_TIMEOUT; i++) {
642 		DELAY(1);
643 		if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
644 			break;
645 	}
646 	SK_IF_UNLOCK(sc_if);
647 	if (i == SK_TIMEOUT)
648 		printf("sk%d: phy write timed out\n", sc_if->sk_unit);
649 
650 	return(0);
651 }
652 
653 static void
654 sk_xmac_miibus_statchg(sc_if)
655 	struct sk_if_softc	*sc_if;
656 {
657 	struct mii_data		*mii;
658 
659 	mii = device_get_softc(sc_if->sk_miibus);
660 
661 	SK_IF_LOCK(sc_if);
662 	/*
663 	 * If this is a GMII PHY, manually set the XMAC's
664 	 * duplex mode accordingly.
665 	 */
666 	if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
667 		if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
668 			SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
669 		} else {
670 			SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
671 		}
672 	}
673 	SK_IF_UNLOCK(sc_if);
674 
675 	return;
676 }
677 
678 static int
679 sk_marv_miibus_readreg(sc_if, phy, reg)
680 	struct sk_if_softc	*sc_if;
681 	int			phy, reg;
682 {
683 	u_int16_t		val;
684 	int			i;
685 
686 	if (phy != 0 ||
687 	    (sc_if->sk_phytype != SK_PHYTYPE_MARV_COPPER &&
688 	     sc_if->sk_phytype != SK_PHYTYPE_MARV_FIBER)) {
689 		return(0);
690 	}
691 
692 	SK_IF_LOCK(sc_if);
693         SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
694 		      YU_SMICR_REGAD(reg) | YU_SMICR_OP_READ);
695 
696 	for (i = 0; i < SK_TIMEOUT; i++) {
697 		DELAY(1);
698 		val = SK_YU_READ_2(sc_if, YUKON_SMICR);
699 		if (val & YU_SMICR_READ_VALID)
700 			break;
701 	}
702 
703 	if (i == SK_TIMEOUT) {
704 		printf("sk%d: phy failed to come ready\n",
705 		    sc_if->sk_unit);
706 		SK_IF_UNLOCK(sc_if);
707 		return(0);
708 	}
709 
710 	val = SK_YU_READ_2(sc_if, YUKON_SMIDR);
711 	SK_IF_UNLOCK(sc_if);
712 
713 	return(val);
714 }
715 
716 static int
717 sk_marv_miibus_writereg(sc_if, phy, reg, val)
718 	struct sk_if_softc	*sc_if;
719 	int			phy, reg, val;
720 {
721 	int			i;
722 
723 	SK_IF_LOCK(sc_if);
724 	SK_YU_WRITE_2(sc_if, YUKON_SMIDR, val);
725 	SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
726 		      YU_SMICR_REGAD(reg) | YU_SMICR_OP_WRITE);
727 
728 	for (i = 0; i < SK_TIMEOUT; i++) {
729 		DELAY(1);
730 		if (SK_YU_READ_2(sc_if, YUKON_SMICR) & YU_SMICR_BUSY)
731 			break;
732 	}
733 	SK_IF_UNLOCK(sc_if);
734 
735 	return(0);
736 }
737 
738 static void
739 sk_marv_miibus_statchg(sc_if)
740 	struct sk_if_softc	*sc_if;
741 {
742 	return;
743 }
744 
745 #define HASH_BITS		6
746 
747 static u_int32_t
748 sk_xmchash(addr)
749 	const uint8_t *addr;
750 {
751 	uint32_t crc;
752 
753 	/* Compute CRC for the address value. */
754 	crc = ether_crc32_le(addr, ETHER_ADDR_LEN);
755 
756 	return (~crc & ((1 << HASH_BITS) - 1));
757 }
758 
759 /* gmchash is just a big endian crc */
760 static u_int32_t
761 sk_gmchash(addr)
762 	const uint8_t *addr;
763 {
764 	uint32_t crc;
765 
766 	/* Compute CRC for the address value. */
767 	crc = ether_crc32_be(addr, ETHER_ADDR_LEN);
768 
769 	return (crc & ((1 << HASH_BITS) - 1));
770 }
771 
772 static void
773 sk_setfilt(sc_if, addr, slot)
774 	struct sk_if_softc	*sc_if;
775 	caddr_t			addr;
776 	int			slot;
777 {
778 	int			base;
779 
780 	base = XM_RXFILT_ENTRY(slot);
781 
782 	SK_XM_WRITE_2(sc_if, base, *(u_int16_t *)(&addr[0]));
783 	SK_XM_WRITE_2(sc_if, base + 2, *(u_int16_t *)(&addr[2]));
784 	SK_XM_WRITE_2(sc_if, base + 4, *(u_int16_t *)(&addr[4]));
785 
786 	return;
787 }
788 
789 static void
790 sk_setmulti(sc_if)
791 	struct sk_if_softc	*sc_if;
792 {
793 	struct sk_softc		*sc = sc_if->sk_softc;
794 	struct ifnet		*ifp = sc_if->sk_ifp;
795 	u_int32_t		hashes[2] = { 0, 0 };
796 	int			h = 0, i;
797 	struct ifmultiaddr	*ifma;
798 	u_int8_t		dummy[] = { 0, 0, 0, 0, 0 ,0 };
799 
800 
801 	/* First, zot all the existing filters. */
802 	switch(sc->sk_type) {
803 	case SK_GENESIS:
804 		for (i = 1; i < XM_RXFILT_MAX; i++)
805 			sk_setfilt(sc_if, (caddr_t)&dummy, i);
806 
807 		SK_XM_WRITE_4(sc_if, XM_MAR0, 0);
808 		SK_XM_WRITE_4(sc_if, XM_MAR2, 0);
809 		break;
810 	case SK_YUKON:
811 	case SK_YUKON_LITE:
812 	case SK_YUKON_LP:
813 		SK_YU_WRITE_2(sc_if, YUKON_MCAH1, 0);
814 		SK_YU_WRITE_2(sc_if, YUKON_MCAH2, 0);
815 		SK_YU_WRITE_2(sc_if, YUKON_MCAH3, 0);
816 		SK_YU_WRITE_2(sc_if, YUKON_MCAH4, 0);
817 		break;
818 	}
819 
820 	/* Now program new ones. */
821 	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
822 		hashes[0] = 0xFFFFFFFF;
823 		hashes[1] = 0xFFFFFFFF;
824 	} else {
825 		i = 1;
826 		TAILQ_FOREACH_REVERSE(ifma, &ifp->if_multiaddrs, ifmultihead, ifma_link) {
827 			if (ifma->ifma_addr->sa_family != AF_LINK)
828 				continue;
829 			/*
830 			 * Program the first XM_RXFILT_MAX multicast groups
831 			 * into the perfect filter. For all others,
832 			 * use the hash table.
833 			 */
834 			if (sc->sk_type == SK_GENESIS && i < XM_RXFILT_MAX) {
835 				sk_setfilt(sc_if,
836 			LLADDR((struct sockaddr_dl *)ifma->ifma_addr), i);
837 				i++;
838 				continue;
839 			}
840 
841 			switch(sc->sk_type) {
842 			case SK_GENESIS:
843 				h = sk_xmchash(
844 					LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
845 				break;
846 			case SK_YUKON:
847 			case SK_YUKON_LITE:
848 			case SK_YUKON_LP:
849 				h = sk_gmchash(
850 					LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
851 				break;
852 			}
853 			if (h < 32)
854 				hashes[0] |= (1 << h);
855 			else
856 				hashes[1] |= (1 << (h - 32));
857 		}
858 	}
859 
860 	switch(sc->sk_type) {
861 	case SK_GENESIS:
862 		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_HASH|
863 			       XM_MODE_RX_USE_PERFECT);
864 		SK_XM_WRITE_4(sc_if, XM_MAR0, hashes[0]);
865 		SK_XM_WRITE_4(sc_if, XM_MAR2, hashes[1]);
866 		break;
867 	case SK_YUKON:
868 	case SK_YUKON_LITE:
869 	case SK_YUKON_LP:
870 		SK_YU_WRITE_2(sc_if, YUKON_MCAH1, hashes[0] & 0xffff);
871 		SK_YU_WRITE_2(sc_if, YUKON_MCAH2, (hashes[0] >> 16) & 0xffff);
872 		SK_YU_WRITE_2(sc_if, YUKON_MCAH3, hashes[1] & 0xffff);
873 		SK_YU_WRITE_2(sc_if, YUKON_MCAH4, (hashes[1] >> 16) & 0xffff);
874 		break;
875 	}
876 
877 	return;
878 }
879 
880 static void
881 sk_setpromisc(sc_if)
882 	struct sk_if_softc	*sc_if;
883 {
884 	struct sk_softc		*sc = sc_if->sk_softc;
885 	struct ifnet		*ifp = sc_if->sk_ifp;
886 
887 	switch(sc->sk_type) {
888 	case SK_GENESIS:
889 		if (ifp->if_flags & IFF_PROMISC) {
890 			SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
891 		} else {
892 			SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
893 		}
894 		break;
895 	case SK_YUKON:
896 	case SK_YUKON_LITE:
897 	case SK_YUKON_LP:
898 		if (ifp->if_flags & IFF_PROMISC) {
899 			SK_YU_CLRBIT_2(sc_if, YUKON_RCR,
900 			    YU_RCR_UFLEN | YU_RCR_MUFLEN);
901 		} else {
902 			SK_YU_SETBIT_2(sc_if, YUKON_RCR,
903 			    YU_RCR_UFLEN | YU_RCR_MUFLEN);
904 		}
905 		break;
906 	}
907 
908 	return;
909 }
910 
911 static int
912 sk_init_rx_ring(sc_if)
913 	struct sk_if_softc	*sc_if;
914 {
915 	struct sk_chain_data	*cd = &sc_if->sk_cdata;
916 	struct sk_ring_data	*rd = sc_if->sk_rdata;
917 	int			i;
918 
919 	bzero((char *)rd->sk_rx_ring,
920 	    sizeof(struct sk_rx_desc) * SK_RX_RING_CNT);
921 
922 	for (i = 0; i < SK_RX_RING_CNT; i++) {
923 		cd->sk_rx_chain[i].sk_desc = &rd->sk_rx_ring[i];
924 		if (sk_newbuf(sc_if, &cd->sk_rx_chain[i], NULL) == ENOBUFS)
925 			return(ENOBUFS);
926 		if (i == (SK_RX_RING_CNT - 1)) {
927 			cd->sk_rx_chain[i].sk_next =
928 			    &cd->sk_rx_chain[0];
929 			rd->sk_rx_ring[i].sk_next =
930 			    vtophys(&rd->sk_rx_ring[0]);
931 		} else {
932 			cd->sk_rx_chain[i].sk_next =
933 			    &cd->sk_rx_chain[i + 1];
934 			rd->sk_rx_ring[i].sk_next =
935 			    vtophys(&rd->sk_rx_ring[i + 1]);
936 		}
937 	}
938 
939 	sc_if->sk_cdata.sk_rx_prod = 0;
940 	sc_if->sk_cdata.sk_rx_cons = 0;
941 
942 	return(0);
943 }
944 
945 static void
946 sk_init_tx_ring(sc_if)
947 	struct sk_if_softc	*sc_if;
948 {
949 	struct sk_chain_data	*cd = &sc_if->sk_cdata;
950 	struct sk_ring_data	*rd = sc_if->sk_rdata;
951 	int			i;
952 
953 	bzero((char *)sc_if->sk_rdata->sk_tx_ring,
954 	    sizeof(struct sk_tx_desc) * SK_TX_RING_CNT);
955 
956 	for (i = 0; i < SK_TX_RING_CNT; i++) {
957 		cd->sk_tx_chain[i].sk_desc = &rd->sk_tx_ring[i];
958 		if (i == (SK_TX_RING_CNT - 1)) {
959 			cd->sk_tx_chain[i].sk_next =
960 			    &cd->sk_tx_chain[0];
961 			rd->sk_tx_ring[i].sk_next =
962 			    vtophys(&rd->sk_tx_ring[0]);
963 		} else {
964 			cd->sk_tx_chain[i].sk_next =
965 			    &cd->sk_tx_chain[i + 1];
966 			rd->sk_tx_ring[i].sk_next =
967 			    vtophys(&rd->sk_tx_ring[i + 1]);
968 		}
969 	}
970 
971 	sc_if->sk_cdata.sk_tx_prod = 0;
972 	sc_if->sk_cdata.sk_tx_cons = 0;
973 	sc_if->sk_cdata.sk_tx_cnt = 0;
974 
975 	return;
976 }
977 
978 static int
979 sk_newbuf(sc_if, c, m)
980 	struct sk_if_softc	*sc_if;
981 	struct sk_chain		*c;
982 	struct mbuf		*m;
983 {
984 	struct mbuf		*m_new = NULL;
985 	struct sk_rx_desc	*r;
986 
987 	if (m == NULL) {
988 		caddr_t			*buf = NULL;
989 
990 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
991 		if (m_new == NULL)
992 			return(ENOBUFS);
993 
994 		/* Allocate the jumbo buffer */
995 		buf = sk_jalloc(sc_if);
996 		if (buf == NULL) {
997 			m_freem(m_new);
998 #ifdef SK_VERBOSE
999 			printf("sk%d: jumbo allocation failed "
1000 			    "-- packet dropped!\n", sc_if->sk_unit);
1001 #endif
1002 			return(ENOBUFS);
1003 		}
1004 
1005 		/* Attach the buffer to the mbuf */
1006 		MEXTADD(m_new, buf, SK_JLEN, sk_jfree,
1007 		    (struct sk_if_softc *)sc_if, 0, EXT_NET_DRV);
1008 		m_new->m_data = (void *)buf;
1009 		m_new->m_pkthdr.len = m_new->m_len = SK_JLEN;
1010 	} else {
1011 		/*
1012 	 	 * We're re-using a previously allocated mbuf;
1013 		 * be sure to re-init pointers and lengths to
1014 		 * default values.
1015 		 */
1016 		m_new = m;
1017 		m_new->m_len = m_new->m_pkthdr.len = SK_JLEN;
1018 		m_new->m_data = m_new->m_ext.ext_buf;
1019 	}
1020 
1021 	/*
1022 	 * Adjust alignment so packet payload begins on a
1023 	 * longword boundary. Mandatory for Alpha, useful on
1024 	 * x86 too.
1025 	 */
1026 	m_adj(m_new, ETHER_ALIGN);
1027 
1028 	r = c->sk_desc;
1029 	c->sk_mbuf = m_new;
1030 	r->sk_data_lo = vtophys(mtod(m_new, caddr_t));
1031 	r->sk_ctl = m_new->m_len | SK_RXSTAT;
1032 
1033 	return(0);
1034 }
1035 
1036 /*
1037  * Allocate jumbo buffer storage. The SysKonnect adapters support
1038  * "jumbograms" (9K frames), although SysKonnect doesn't currently
1039  * use them in their drivers. In order for us to use them, we need
1040  * large 9K receive buffers, however standard mbuf clusters are only
1041  * 2048 bytes in size. Consequently, we need to allocate and manage
1042  * our own jumbo buffer pool. Fortunately, this does not require an
1043  * excessive amount of additional code.
1044  */
1045 static int
1046 sk_alloc_jumbo_mem(sc_if)
1047 	struct sk_if_softc	*sc_if;
1048 {
1049 	caddr_t			ptr;
1050 	register int		i;
1051 	struct sk_jpool_entry   *entry;
1052 
1053 	/* Grab a big chunk o' storage. */
1054 	sc_if->sk_cdata.sk_jumbo_buf = contigmalloc(SK_JMEM, M_DEVBUF,
1055 	    M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
1056 
1057 	if (sc_if->sk_cdata.sk_jumbo_buf == NULL) {
1058 		printf("sk%d: no memory for jumbo buffers!\n", sc_if->sk_unit);
1059 		return(ENOBUFS);
1060 	}
1061 
1062 	mtx_init(&sc_if->sk_jlist_mtx, "sk_jlist_mtx", NULL, MTX_DEF);
1063 
1064 	SLIST_INIT(&sc_if->sk_jfree_listhead);
1065 	SLIST_INIT(&sc_if->sk_jinuse_listhead);
1066 
1067 	/*
1068 	 * Now divide it up into 9K pieces and save the addresses
1069 	 * in an array.
1070 	 */
1071 	ptr = sc_if->sk_cdata.sk_jumbo_buf;
1072 	for (i = 0; i < SK_JSLOTS; i++) {
1073 		sc_if->sk_cdata.sk_jslots[i] = ptr;
1074 		ptr += SK_JLEN;
1075 		entry = malloc(sizeof(struct sk_jpool_entry),
1076 		    M_DEVBUF, M_NOWAIT);
1077 		if (entry == NULL) {
1078 			sk_free_jumbo_mem(sc_if);
1079 			sc_if->sk_cdata.sk_jumbo_buf = NULL;
1080 			printf("sk%d: no memory for jumbo "
1081 			    "buffer queue!\n", sc_if->sk_unit);
1082 			return(ENOBUFS);
1083 		}
1084 		entry->slot = i;
1085 		SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead,
1086 		    entry, jpool_entries);
1087 	}
1088 
1089 	return(0);
1090 }
1091 
1092 static void
1093 sk_free_jumbo_mem(sc_if)
1094 	struct sk_if_softc	*sc_if;
1095 {
1096 	struct sk_jpool_entry	*entry;
1097 
1098 	SK_JLIST_LOCK(sc_if);
1099 
1100 	/* We cannot release external mbuf storage while in use. */
1101 	if (!SLIST_EMPTY(&sc_if->sk_jinuse_listhead)) {
1102 		printf("sk%d: will leak jumbo buffer memory!\n", sc_if->sk_unit);
1103 		SK_JLIST_UNLOCK(sc_if);
1104 		return;
1105 	}
1106 
1107 	while (!SLIST_EMPTY(&sc_if->sk_jfree_listhead)) {
1108 		entry = SLIST_FIRST(&sc_if->sk_jfree_listhead);
1109 		SLIST_REMOVE_HEAD(&sc_if->sk_jfree_listhead, jpool_entries);
1110 		free(entry, M_DEVBUF);
1111 	}
1112 
1113 	SK_JLIST_UNLOCK(sc_if);
1114 
1115 	mtx_destroy(&sc_if->sk_jlist_mtx);
1116 
1117 	contigfree(sc_if->sk_cdata.sk_jumbo_buf, SK_JMEM, M_DEVBUF);
1118 
1119 	return;
1120 }
1121 
1122 /*
1123  * Allocate a jumbo buffer.
1124  */
1125 static void *
1126 sk_jalloc(sc_if)
1127 	struct sk_if_softc	*sc_if;
1128 {
1129 	struct sk_jpool_entry   *entry;
1130 
1131 	SK_JLIST_LOCK(sc_if);
1132 
1133 	entry = SLIST_FIRST(&sc_if->sk_jfree_listhead);
1134 
1135 	if (entry == NULL) {
1136 #ifdef SK_VERBOSE
1137 		printf("sk%d: no free jumbo buffers\n", sc_if->sk_unit);
1138 #endif
1139 		SK_JLIST_UNLOCK(sc_if);
1140 		return(NULL);
1141 	}
1142 
1143 	SLIST_REMOVE_HEAD(&sc_if->sk_jfree_listhead, jpool_entries);
1144 	SLIST_INSERT_HEAD(&sc_if->sk_jinuse_listhead, entry, jpool_entries);
1145 
1146 	SK_JLIST_UNLOCK(sc_if);
1147 
1148 	return(sc_if->sk_cdata.sk_jslots[entry->slot]);
1149 }
1150 
1151 /*
1152  * Release a jumbo buffer.
1153  */
1154 static void
1155 sk_jfree(buf, args)
1156 	void			*buf;
1157 	void			*args;
1158 {
1159 	struct sk_if_softc	*sc_if;
1160 	int		        i;
1161 	struct sk_jpool_entry   *entry;
1162 
1163 	/* Extract the softc struct pointer. */
1164 	sc_if = (struct sk_if_softc *)args;
1165 	if (sc_if == NULL)
1166 		panic("sk_jfree: didn't get softc pointer!");
1167 
1168 	SK_JLIST_LOCK(sc_if);
1169 
1170 	/* calculate the slot this buffer belongs to */
1171 	i = ((vm_offset_t)buf
1172 	     - (vm_offset_t)sc_if->sk_cdata.sk_jumbo_buf) / SK_JLEN;
1173 
1174 	if ((i < 0) || (i >= SK_JSLOTS))
1175 		panic("sk_jfree: asked to free buffer that we don't manage!");
1176 
1177 	entry = SLIST_FIRST(&sc_if->sk_jinuse_listhead);
1178 	if (entry == NULL)
1179 		panic("sk_jfree: buffer not in use!");
1180 	entry->slot = i;
1181 	SLIST_REMOVE_HEAD(&sc_if->sk_jinuse_listhead, jpool_entries);
1182 	SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead, entry, jpool_entries);
1183 	if (SLIST_EMPTY(&sc_if->sk_jinuse_listhead))
1184 		wakeup(sc_if);
1185 
1186 	SK_JLIST_UNLOCK(sc_if);
1187 	return;
1188 }
1189 
1190 /*
1191  * Set media options.
1192  */
1193 static int
1194 sk_ifmedia_upd(ifp)
1195 	struct ifnet		*ifp;
1196 {
1197 	struct sk_if_softc	*sc_if = ifp->if_softc;
1198 	struct mii_data		*mii;
1199 
1200 	mii = device_get_softc(sc_if->sk_miibus);
1201 	sk_init(sc_if);
1202 	mii_mediachg(mii);
1203 
1204 	return(0);
1205 }
1206 
1207 /*
1208  * Report current media status.
1209  */
1210 static void
1211 sk_ifmedia_sts(ifp, ifmr)
1212 	struct ifnet		*ifp;
1213 	struct ifmediareq	*ifmr;
1214 {
1215 	struct sk_if_softc	*sc_if;
1216 	struct mii_data		*mii;
1217 
1218 	sc_if = ifp->if_softc;
1219 	mii = device_get_softc(sc_if->sk_miibus);
1220 
1221 	mii_pollstat(mii);
1222 	ifmr->ifm_active = mii->mii_media_active;
1223 	ifmr->ifm_status = mii->mii_media_status;
1224 
1225 	return;
1226 }
1227 
1228 static int
1229 sk_ioctl(ifp, command, data)
1230 	struct ifnet		*ifp;
1231 	u_long			command;
1232 	caddr_t			data;
1233 {
1234 	struct sk_if_softc	*sc_if = ifp->if_softc;
1235 	struct ifreq		*ifr = (struct ifreq *) data;
1236 	int			error = 0;
1237 	struct mii_data		*mii;
1238 
1239 	switch(command) {
1240 	case SIOCSIFMTU:
1241 		if (ifr->ifr_mtu > SK_JUMBO_MTU)
1242 			error = EINVAL;
1243 		else {
1244 			ifp->if_mtu = ifr->ifr_mtu;
1245 			ifp->if_flags &= ~IFF_RUNNING;
1246 			sk_init(sc_if);
1247 		}
1248 		break;
1249 	case SIOCSIFFLAGS:
1250 		SK_IF_LOCK(sc_if);
1251 		if (ifp->if_flags & IFF_UP) {
1252 			if (ifp->if_flags & IFF_RUNNING) {
1253 				if ((ifp->if_flags ^ sc_if->sk_if_flags)
1254 				    & IFF_PROMISC) {
1255 					sk_setpromisc(sc_if);
1256 					sk_setmulti(sc_if);
1257 				}
1258 			} else
1259 				sk_init(sc_if);
1260 		} else {
1261 			if (ifp->if_flags & IFF_RUNNING)
1262 				sk_stop(sc_if);
1263 		}
1264 		sc_if->sk_if_flags = ifp->if_flags;
1265 		SK_IF_UNLOCK(sc_if);
1266 		error = 0;
1267 		break;
1268 	case SIOCADDMULTI:
1269 	case SIOCDELMULTI:
1270 		if (ifp->if_flags & IFF_RUNNING) {
1271 			SK_IF_LOCK(sc_if);
1272 			sk_setmulti(sc_if);
1273 			SK_IF_UNLOCK(sc_if);
1274 			error = 0;
1275 		}
1276 		break;
1277 	case SIOCGIFMEDIA:
1278 	case SIOCSIFMEDIA:
1279 		mii = device_get_softc(sc_if->sk_miibus);
1280 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1281 		break;
1282 	default:
1283 		error = ether_ioctl(ifp, command, data);
1284 		break;
1285 	}
1286 
1287 	return(error);
1288 }
1289 
1290 /*
1291  * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device
1292  * IDs against our list and return a device name if we find a match.
1293  */
1294 static int
1295 skc_probe(dev)
1296 	device_t		dev;
1297 {
1298 	struct sk_softc		*sc;
1299 	struct sk_type		*t = sk_devs;
1300 
1301 	sc = device_get_softc(dev);
1302 
1303 	while(t->sk_name != NULL) {
1304 		if ((pci_get_vendor(dev) == t->sk_vid) &&
1305 		    (pci_get_device(dev) == t->sk_did)) {
1306 			device_set_desc(dev, t->sk_name);
1307 			return (BUS_PROBE_DEFAULT);
1308 		}
1309 		t++;
1310 	}
1311 
1312 	return(ENXIO);
1313 }
1314 
1315 /*
1316  * Force the GEnesis into reset, then bring it out of reset.
1317  */
1318 static void
1319 sk_reset(sc)
1320 	struct sk_softc		*sc;
1321 {
1322 	CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_RESET);
1323 	CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_RESET);
1324 	if (SK_YUKON_FAMILY(sc->sk_type))
1325 		CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_SET);
1326 
1327 	DELAY(1000);
1328 	CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_UNRESET);
1329 	DELAY(2);
1330 	CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_UNRESET);
1331 	if (SK_YUKON_FAMILY(sc->sk_type))
1332 		CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_CLEAR);
1333 
1334 	if (sc->sk_type == SK_GENESIS) {
1335 		/* Configure packet arbiter */
1336 		sk_win_write_2(sc, SK_PKTARB_CTL, SK_PKTARBCTL_UNRESET);
1337 		sk_win_write_2(sc, SK_RXPA1_TINIT, SK_PKTARB_TIMEOUT);
1338 		sk_win_write_2(sc, SK_TXPA1_TINIT, SK_PKTARB_TIMEOUT);
1339 		sk_win_write_2(sc, SK_RXPA2_TINIT, SK_PKTARB_TIMEOUT);
1340 		sk_win_write_2(sc, SK_TXPA2_TINIT, SK_PKTARB_TIMEOUT);
1341 	}
1342 
1343 	/* Enable RAM interface */
1344 	sk_win_write_4(sc, SK_RAMCTL, SK_RAMCTL_UNRESET);
1345 
1346 	/*
1347          * Configure interrupt moderation. The moderation timer
1348 	 * defers interrupts specified in the interrupt moderation
1349 	 * timer mask based on the timeout specified in the interrupt
1350 	 * moderation timer init register. Each bit in the timer
1351 	 * register represents 18.825ns, so to specify a timeout in
1352 	 * microseconds, we have to multiply by 54.
1353 	 */
1354 	printf("skc%d: interrupt moderation is %d us\n",
1355 	    sc->sk_unit, sc->sk_int_mod);
1356 	sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(sc->sk_int_mod));
1357 	sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF|SK_ISR_TX2_S_EOF|
1358 	    SK_ISR_RX1_EOF|SK_ISR_RX2_EOF);
1359 	sk_win_write_1(sc, SK_IMTIMERCTL, SK_IMCTL_START);
1360 
1361 	return;
1362 }
1363 
1364 static int
1365 sk_probe(dev)
1366 	device_t		dev;
1367 {
1368 	struct sk_softc		*sc;
1369 
1370 	sc = device_get_softc(device_get_parent(dev));
1371 
1372 	/*
1373 	 * Not much to do here. We always know there will be
1374 	 * at least one XMAC present, and if there are two,
1375 	 * skc_attach() will create a second device instance
1376 	 * for us.
1377 	 */
1378 	switch (sc->sk_type) {
1379 	case SK_GENESIS:
1380 		device_set_desc(dev, "XaQti Corp. XMAC II");
1381 		break;
1382 	case SK_YUKON:
1383 	case SK_YUKON_LITE:
1384 	case SK_YUKON_LP:
1385 		device_set_desc(dev, "Marvell Semiconductor, Inc. Yukon");
1386 		break;
1387 	}
1388 
1389 	return (BUS_PROBE_DEFAULT);
1390 }
1391 
1392 /*
1393  * Each XMAC chip is attached as a separate logical IP interface.
1394  * Single port cards will have only one logical interface of course.
1395  */
1396 static int
1397 sk_attach(dev)
1398 	device_t		dev;
1399 {
1400 	struct sk_softc		*sc;
1401 	struct sk_if_softc	*sc_if;
1402 	struct ifnet		*ifp;
1403 	int			i, port, error;
1404 	u_char			eaddr[6];
1405 
1406 	if (dev == NULL)
1407 		return(EINVAL);
1408 
1409 	error = 0;
1410 	sc_if = device_get_softc(dev);
1411 	sc = device_get_softc(device_get_parent(dev));
1412 	port = *(int *)device_get_ivars(dev);
1413 
1414 	sc_if->sk_dev = dev;
1415 	sc_if->sk_unit = device_get_unit(dev);
1416 	sc_if->sk_port = port;
1417 	sc_if->sk_softc = sc;
1418 	sc->sk_if[port] = sc_if;
1419 	if (port == SK_PORT_A)
1420 		sc_if->sk_tx_bmu = SK_BMU_TXS_CSR0;
1421 	if (port == SK_PORT_B)
1422 		sc_if->sk_tx_bmu = SK_BMU_TXS_CSR1;
1423 
1424 	/* Allocate the descriptor queues. */
1425 	sc_if->sk_rdata = contigmalloc(sizeof(struct sk_ring_data), M_DEVBUF,
1426 	    M_NOWAIT, M_ZERO, 0xffffffff, PAGE_SIZE, 0);
1427 
1428 	if (sc_if->sk_rdata == NULL) {
1429 		printf("sk%d: no memory for list buffers!\n", sc_if->sk_unit);
1430 		error = ENOMEM;
1431 		goto fail;
1432 	}
1433 
1434 	/* Try to allocate memory for jumbo buffers. */
1435 	if (sk_alloc_jumbo_mem(sc_if)) {
1436 		printf("sk%d: jumbo buffer allocation failed\n",
1437 		    sc_if->sk_unit);
1438 		error = ENOMEM;
1439 		goto fail;
1440 	}
1441 
1442 	ifp = sc_if->sk_ifp = if_alloc(IFT_ETHER);
1443 	if (ifp == NULL) {
1444 		printf("sk%d: can not if_alloc()\n", sc_if->sk_unit);
1445 		error = ENOSPC;
1446 		goto fail;
1447 	}
1448 	ifp->if_softc = sc_if;
1449 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1450 	ifp->if_mtu = ETHERMTU;
1451 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1452 	ifp->if_ioctl = sk_ioctl;
1453 	ifp->if_start = sk_start;
1454 	ifp->if_watchdog = sk_watchdog;
1455 	ifp->if_init = sk_init;
1456 	ifp->if_baudrate = 1000000000;
1457 	IFQ_SET_MAXLEN(&ifp->if_snd, SK_TX_RING_CNT - 1);
1458 	ifp->if_snd.ifq_drv_maxlen = SK_TX_RING_CNT - 1;
1459 	IFQ_SET_READY(&ifp->if_snd);
1460 
1461 	callout_handle_init(&sc_if->sk_tick_ch);
1462 
1463 	/*
1464 	 * Get station address for this interface. Note that
1465 	 * dual port cards actually come with three station
1466 	 * addresses: one for each port, plus an extra. The
1467 	 * extra one is used by the SysKonnect driver software
1468 	 * as a 'virtual' station address for when both ports
1469 	 * are operating in failover mode. Currently we don't
1470 	 * use this extra address.
1471 	 */
1472 	SK_LOCK(sc);
1473 	for (i = 0; i < ETHER_ADDR_LEN; i++)
1474 		eaddr[i] =
1475 		    sk_win_read_1(sc, SK_MAC0_0 + (port * 8) + i);
1476 
1477 	/*
1478 	 * Set up RAM buffer addresses. The NIC will have a certain
1479 	 * amount of SRAM on it, somewhere between 512K and 2MB. We
1480 	 * need to divide this up a) between the transmitter and
1481  	 * receiver and b) between the two XMACs, if this is a
1482 	 * dual port NIC. Our algotithm is to divide up the memory
1483 	 * evenly so that everyone gets a fair share.
1484 	 */
1485 	if (sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC) {
1486 		u_int32_t		chunk, val;
1487 
1488 		chunk = sc->sk_ramsize / 2;
1489 		val = sc->sk_rboff / sizeof(u_int64_t);
1490 		sc_if->sk_rx_ramstart = val;
1491 		val += (chunk / sizeof(u_int64_t));
1492 		sc_if->sk_rx_ramend = val - 1;
1493 		sc_if->sk_tx_ramstart = val;
1494 		val += (chunk / sizeof(u_int64_t));
1495 		sc_if->sk_tx_ramend = val - 1;
1496 	} else {
1497 		u_int32_t		chunk, val;
1498 
1499 		chunk = sc->sk_ramsize / 4;
1500 		val = (sc->sk_rboff + (chunk * 2 * sc_if->sk_port)) /
1501 		    sizeof(u_int64_t);
1502 		sc_if->sk_rx_ramstart = val;
1503 		val += (chunk / sizeof(u_int64_t));
1504 		sc_if->sk_rx_ramend = val - 1;
1505 		sc_if->sk_tx_ramstart = val;
1506 		val += (chunk / sizeof(u_int64_t));
1507 		sc_if->sk_tx_ramend = val - 1;
1508 	}
1509 
1510 	/* Read and save PHY type and set PHY address */
1511 	sc_if->sk_phytype = sk_win_read_1(sc, SK_EPROM1) & 0xF;
1512 	switch(sc_if->sk_phytype) {
1513 	case SK_PHYTYPE_XMAC:
1514 		sc_if->sk_phyaddr = SK_PHYADDR_XMAC;
1515 		break;
1516 	case SK_PHYTYPE_BCOM:
1517 		sc_if->sk_phyaddr = SK_PHYADDR_BCOM;
1518 		break;
1519 	case SK_PHYTYPE_MARV_COPPER:
1520 		sc_if->sk_phyaddr = SK_PHYADDR_MARV;
1521 		break;
1522 	default:
1523 		printf("skc%d: unsupported PHY type: %d\n",
1524 		    sc->sk_unit, sc_if->sk_phytype);
1525 		error = ENODEV;
1526 		SK_UNLOCK(sc);
1527 		if_free(ifp);
1528 		goto fail;
1529 	}
1530 
1531 
1532 	/*
1533 	 * Call MI attach routine.  Can't hold locks when calling into ether_*.
1534 	 */
1535 	SK_UNLOCK(sc);
1536 	ether_ifattach(ifp, eaddr);
1537 	SK_LOCK(sc);
1538 
1539 	/*
1540 	 * Do miibus setup.
1541 	 */
1542 	switch (sc->sk_type) {
1543 	case SK_GENESIS:
1544 		sk_init_xmac(sc_if);
1545 		break;
1546 	case SK_YUKON:
1547 	case SK_YUKON_LITE:
1548 	case SK_YUKON_LP:
1549 		sk_init_yukon(sc_if);
1550 		break;
1551 	}
1552 
1553 	SK_UNLOCK(sc);
1554 	if (mii_phy_probe(dev, &sc_if->sk_miibus,
1555 	    sk_ifmedia_upd, sk_ifmedia_sts)) {
1556 		printf("skc%d: no PHY found!\n", sc_if->sk_unit);
1557 		ether_ifdetach(ifp);
1558 		if_free(ifp);
1559 		error = ENXIO;
1560 		goto fail;
1561 	}
1562 
1563 fail:
1564 	if (error) {
1565 		/* Access should be ok even though lock has been dropped */
1566 		sc->sk_if[port] = NULL;
1567 		sk_detach(dev);
1568 	}
1569 
1570 	return(error);
1571 }
1572 
1573 /*
1574  * Attach the interface. Allocate softc structures, do ifmedia
1575  * setup and ethernet/BPF attach.
1576  */
1577 static int
1578 skc_attach(dev)
1579 	device_t		dev;
1580 {
1581 	struct sk_softc		*sc;
1582 	int			unit, error = 0, rid, *port;
1583 	uint8_t			skrs;
1584 	char			*pname, *revstr;
1585 
1586 	sc = device_get_softc(dev);
1587 	unit = device_get_unit(dev);
1588 
1589 	mtx_init(&sc->sk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1590 	    MTX_DEF | MTX_RECURSE);
1591 	/*
1592 	 * Map control/status registers.
1593 	 */
1594 	pci_enable_busmaster(dev);
1595 
1596 	rid = SK_RID;
1597 	sc->sk_res = bus_alloc_resource_any(dev, SK_RES, &rid, RF_ACTIVE);
1598 
1599 	if (sc->sk_res == NULL) {
1600 		printf("sk%d: couldn't map ports/memory\n", unit);
1601 		error = ENXIO;
1602 		goto fail;
1603 	}
1604 
1605 	sc->sk_btag = rman_get_bustag(sc->sk_res);
1606 	sc->sk_bhandle = rman_get_bushandle(sc->sk_res);
1607 
1608 	sc->sk_type = sk_win_read_1(sc, SK_CHIPVER);
1609 	sc->sk_rev = (sk_win_read_1(sc, SK_CONFIG) >> 4) & 0xf;
1610 
1611 	/* Bail out if chip is not recognized. */
1612 	if (sc->sk_type != SK_GENESIS && !SK_YUKON_FAMILY(sc->sk_type)) {
1613 		printf("skc%d: unknown device: chipver=%02x, rev=%x\n",
1614 			unit, sc->sk_type, sc->sk_rev);
1615 		error = ENXIO;
1616 		goto fail;
1617 	}
1618 
1619 	/* Allocate interrupt */
1620 	rid = 0;
1621 	sc->sk_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1622 	    RF_SHAREABLE | RF_ACTIVE);
1623 
1624 	if (sc->sk_irq == NULL) {
1625 		printf("skc%d: couldn't map interrupt\n", unit);
1626 		error = ENXIO;
1627 		goto fail;
1628 	}
1629 
1630 	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
1631 		SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1632 		OID_AUTO, "int_mod", CTLTYPE_INT|CTLFLAG_RW,
1633 		&sc->sk_int_mod, 0, sysctl_hw_sk_int_mod, "I",
1634 		"SK interrupt moderation");
1635 
1636 	/* Pull in device tunables. */
1637 	sc->sk_int_mod = SK_IM_DEFAULT;
1638 	error = resource_int_value(device_get_name(dev), unit,
1639 		"int_mod", &sc->sk_int_mod);
1640 	if (error == 0) {
1641 		if (sc->sk_int_mod < SK_IM_MIN ||
1642 		    sc->sk_int_mod > SK_IM_MAX) {
1643 			printf("skc%d: int_mod value out of range; "
1644 			    "using default: %d\n", unit, SK_IM_DEFAULT);
1645 			sc->sk_int_mod = SK_IM_DEFAULT;
1646 		}
1647 	}
1648 
1649 	/* Reset the adapter. */
1650 	sk_reset(sc);
1651 
1652 	sc->sk_unit = unit;
1653 
1654 	/* Read and save vital product data from EEPROM. */
1655 	sk_vpd_read(sc);
1656 
1657 	skrs = sk_win_read_1(sc, SK_EPROM0);
1658 	if (sc->sk_type == SK_GENESIS) {
1659 		/* Read and save RAM size and RAMbuffer offset */
1660 		switch(skrs) {
1661 		case SK_RAMSIZE_512K_64:
1662 			sc->sk_ramsize = 0x80000;
1663 			sc->sk_rboff = SK_RBOFF_0;
1664 			break;
1665 		case SK_RAMSIZE_1024K_64:
1666 			sc->sk_ramsize = 0x100000;
1667 			sc->sk_rboff = SK_RBOFF_80000;
1668 			break;
1669 		case SK_RAMSIZE_1024K_128:
1670 			sc->sk_ramsize = 0x100000;
1671 			sc->sk_rboff = SK_RBOFF_0;
1672 			break;
1673 		case SK_RAMSIZE_2048K_128:
1674 			sc->sk_ramsize = 0x200000;
1675 			sc->sk_rboff = SK_RBOFF_0;
1676 			break;
1677 		default:
1678 			printf("skc%d: unknown ram size: %d\n",
1679 			    sc->sk_unit, sk_win_read_1(sc, SK_EPROM0));
1680 			error = ENXIO;
1681 			goto fail;
1682 		}
1683 	} else { /* SK_YUKON_FAMILY */
1684 		if (skrs == 0x00)
1685 			sc->sk_ramsize = 0x20000;
1686 		else
1687 			sc->sk_ramsize = skrs * (1<<12);
1688 		sc->sk_rboff = SK_RBOFF_0;
1689 	}
1690 
1691 	/* Read and save physical media type */
1692 	switch(sk_win_read_1(sc, SK_PMDTYPE)) {
1693 	case SK_PMD_1000BASESX:
1694 		sc->sk_pmd = IFM_1000_SX;
1695 		break;
1696 	case SK_PMD_1000BASELX:
1697 		sc->sk_pmd = IFM_1000_LX;
1698 		break;
1699 	case SK_PMD_1000BASECX:
1700 		sc->sk_pmd = IFM_1000_CX;
1701 		break;
1702 	case SK_PMD_1000BASETX:
1703 		sc->sk_pmd = IFM_1000_T;
1704 		break;
1705 	default:
1706 		printf("skc%d: unknown media type: 0x%x\n",
1707 		    sc->sk_unit, sk_win_read_1(sc, SK_PMDTYPE));
1708 		error = ENXIO;
1709 		goto fail;
1710 	}
1711 
1712 	/* Determine whether to name it with VPD PN or just make it up.
1713 	 * Marvell Yukon VPD PN seems to freqently be bogus. */
1714 	switch (pci_get_device(dev)) {
1715 	case DEVICEID_SK_V1:
1716 	case DEVICEID_BELKIN_5005:
1717 	case DEVICEID_3COM_3C940:
1718 	case DEVICEID_LINKSYS_EG1032:
1719 	case DEVICEID_DLINK_DGE530T:
1720 		/* Stay with VPD PN. */
1721 		pname = sc->sk_vpd_prodname;
1722 		break;
1723 	case DEVICEID_SK_V2:
1724 		/* YUKON VPD PN might bear no resemblance to reality. */
1725 		switch (sc->sk_type) {
1726 		case SK_GENESIS:
1727 			/* Stay with VPD PN. */
1728 			pname = sc->sk_vpd_prodname;
1729 			break;
1730 		case SK_YUKON:
1731 			pname = "Marvell Yukon Gigabit Ethernet";
1732 			break;
1733 		case SK_YUKON_LITE:
1734 			pname = "Marvell Yukon Lite Gigabit Ethernet";
1735 			break;
1736 		case SK_YUKON_LP:
1737 			pname = "Marvell Yukon LP Gigabit Ethernet";
1738 			break;
1739 		default:
1740 			pname = "Marvell Yukon (Unknown) Gigabit Ethernet";
1741 			break;
1742 		}
1743 
1744 		/* Yukon Lite Rev. A0 needs special test. */
1745 		if (sc->sk_type == SK_YUKON || sc->sk_type == SK_YUKON_LP) {
1746 			u_int32_t far;
1747 			u_int8_t testbyte;
1748 
1749 			/* Save flash address register before testing. */
1750 			far = sk_win_read_4(sc, SK_EP_ADDR);
1751 
1752 			sk_win_write_1(sc, SK_EP_ADDR+0x03, 0xff);
1753 			testbyte = sk_win_read_1(sc, SK_EP_ADDR+0x03);
1754 
1755 			if (testbyte != 0x00) {
1756 				/* Yukon Lite Rev. A0 detected. */
1757 				sc->sk_type = SK_YUKON_LITE;
1758 				sc->sk_rev = SK_YUKON_LITE_REV_A0;
1759 				/* Restore flash address register. */
1760 				sk_win_write_4(sc, SK_EP_ADDR, far);
1761 			}
1762 		}
1763 		break;
1764 	default:
1765 		device_printf(dev, "unknown device: vendor=%04x, device=%04x, "
1766 			"chipver=%02x, rev=%x\n",
1767 			pci_get_vendor(dev), pci_get_device(dev),
1768 			sc->sk_type, sc->sk_rev);
1769 		error = ENXIO;
1770 		goto fail;
1771 	}
1772 
1773 	if (sc->sk_type == SK_YUKON_LITE) {
1774 		switch (sc->sk_rev) {
1775 		case SK_YUKON_LITE_REV_A0:
1776 			revstr = "A0";
1777 			break;
1778 		case SK_YUKON_LITE_REV_A1:
1779 			revstr = "A1";
1780 			break;
1781 		case SK_YUKON_LITE_REV_A3:
1782 			revstr = "A3";
1783 			break;
1784 		default:
1785 			revstr = "";
1786 			break;
1787 		}
1788 	} else {
1789 		revstr = "";
1790 	}
1791 
1792 	/* Announce the product name and more VPD data if there. */
1793 	device_printf(dev, "%s rev. %s(0x%x)\n",
1794 		pname != NULL ? pname : "<unknown>", revstr, sc->sk_rev);
1795 
1796 	if (bootverbose) {
1797 		if (sc->sk_vpd_readonly != NULL &&
1798 		    sc->sk_vpd_readonly_len != 0) {
1799 			char buf[256];
1800 			char *dp = sc->sk_vpd_readonly;
1801 			uint16_t l, len = sc->sk_vpd_readonly_len;
1802 
1803 			while (len >= 3) {
1804 				if ((*dp == 'P' && *(dp+1) == 'N') ||
1805 				    (*dp == 'E' && *(dp+1) == 'C') ||
1806 				    (*dp == 'M' && *(dp+1) == 'N') ||
1807 				    (*dp == 'S' && *(dp+1) == 'N')) {
1808 					l = 0;
1809 					while (l < *(dp+2)) {
1810 						buf[l] = *(dp+3+l);
1811 						++l;
1812 					}
1813 					buf[l] = '\0';
1814 					device_printf(dev, "%c%c: %s\n",
1815 					    *dp, *(dp+1), buf);
1816 					len -= (3 + l);
1817 					dp += (3 + l);
1818 				} else {
1819 					len -= (3 + *(dp+2));
1820 					dp += (3 + *(dp+2));
1821 				}
1822 			}
1823 		}
1824 		device_printf(dev, "chip ver  = 0x%02x\n", sc->sk_type);
1825 		device_printf(dev, "chip rev  = 0x%02x\n", sc->sk_rev);
1826 		device_printf(dev, "SK_EPROM0 = 0x%02x\n", skrs);
1827 		device_printf(dev, "SRAM size = 0x%06x\n", sc->sk_ramsize);
1828 	}
1829 
1830 	sc->sk_devs[SK_PORT_A] = device_add_child(dev, "sk", -1);
1831 	if (sc->sk_devs[SK_PORT_A] == NULL) {
1832 		device_printf(dev, "failed to add child for PORT_A\n");
1833 		error = ENXIO;
1834 		goto fail;
1835 	}
1836 	port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT);
1837 	if (port == NULL) {
1838 		device_printf(dev, "failed to allocate memory for "
1839 		    "ivars of PORT_A\n");
1840 		error = ENXIO;
1841 		goto fail;
1842 	}
1843 	*port = SK_PORT_A;
1844 	device_set_ivars(sc->sk_devs[SK_PORT_A], port);
1845 
1846 	if (!(sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC)) {
1847 		sc->sk_devs[SK_PORT_B] = device_add_child(dev, "sk", -1);
1848 		if (sc->sk_devs[SK_PORT_B] == NULL) {
1849 			device_printf(dev, "failed to add child for PORT_B\n");
1850 			error = ENXIO;
1851 			goto fail;
1852 		}
1853 		port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT);
1854 		if (port == NULL) {
1855 			device_printf(dev, "failed to allocate memory for "
1856 			    "ivars of PORT_B\n");
1857 			error = ENXIO;
1858 			goto fail;
1859 		}
1860 		*port = SK_PORT_B;
1861 		device_set_ivars(sc->sk_devs[SK_PORT_B], port);
1862 	}
1863 
1864 	/* Turn on the 'driver is loaded' LED. */
1865 	CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON);
1866 
1867 	bus_generic_attach(dev);
1868 
1869 	/* Hook interrupt last to avoid having to lock softc */
1870 	error = bus_setup_intr(dev, sc->sk_irq, INTR_TYPE_NET|INTR_MPSAFE,
1871 	    sk_intr, sc, &sc->sk_intrhand);
1872 
1873 	if (error) {
1874 		printf("skc%d: couldn't set up irq\n", unit);
1875 		goto fail;
1876 	}
1877 
1878 fail:
1879 	if (error)
1880 		skc_detach(dev);
1881 
1882 	return(error);
1883 }
1884 
1885 /*
1886  * Shutdown hardware and free up resources. This can be called any
1887  * time after the mutex has been initialized. It is called in both
1888  * the error case in attach and the normal detach case so it needs
1889  * to be careful about only freeing resources that have actually been
1890  * allocated.
1891  */
1892 static int
1893 sk_detach(dev)
1894 	device_t		dev;
1895 {
1896 	struct sk_if_softc	*sc_if;
1897 	struct ifnet		*ifp;
1898 
1899 	sc_if = device_get_softc(dev);
1900 	KASSERT(mtx_initialized(&sc_if->sk_softc->sk_mtx),
1901 	    ("sk mutex not initialized in sk_detach"));
1902 	SK_IF_LOCK(sc_if);
1903 
1904 	ifp = sc_if->sk_ifp;
1905 	/* These should only be active if attach_xmac succeeded */
1906 	if (device_is_attached(dev)) {
1907 		sk_stop(sc_if);
1908 		/* Can't hold locks while calling detach */
1909 		SK_IF_UNLOCK(sc_if);
1910 		ether_ifdetach(ifp);
1911 		if_free(ifp);
1912 		SK_IF_LOCK(sc_if);
1913 	}
1914 	/*
1915 	 * We're generally called from skc_detach() which is using
1916 	 * device_delete_child() to get to here. It's already trashed
1917 	 * miibus for us, so don't do it here or we'll panic.
1918 	 */
1919 	/*
1920 	if (sc_if->sk_miibus != NULL)
1921 		device_delete_child(dev, sc_if->sk_miibus);
1922 	*/
1923 	bus_generic_detach(dev);
1924 	if (sc_if->sk_cdata.sk_jumbo_buf != NULL)
1925 		sk_free_jumbo_mem(sc_if);
1926 	if (sc_if->sk_rdata != NULL) {
1927 		contigfree(sc_if->sk_rdata, sizeof(struct sk_ring_data),
1928 		    M_DEVBUF);
1929 	}
1930 	SK_IF_UNLOCK(sc_if);
1931 
1932 	return(0);
1933 }
1934 
1935 static int
1936 skc_detach(dev)
1937 	device_t		dev;
1938 {
1939 	struct sk_softc		*sc;
1940 
1941 	sc = device_get_softc(dev);
1942 	KASSERT(mtx_initialized(&sc->sk_mtx), ("sk mutex not initialized"));
1943 
1944 	if (device_is_alive(dev)) {
1945 		if (sc->sk_devs[SK_PORT_A] != NULL) {
1946 			free(device_get_ivars(sc->sk_devs[SK_PORT_A]), M_DEVBUF);
1947 			device_delete_child(dev, sc->sk_devs[SK_PORT_A]);
1948 		}
1949 		if (sc->sk_devs[SK_PORT_B] != NULL) {
1950 			free(device_get_ivars(sc->sk_devs[SK_PORT_B]), M_DEVBUF);
1951 			device_delete_child(dev, sc->sk_devs[SK_PORT_B]);
1952 		}
1953 		bus_generic_detach(dev);
1954 	}
1955 
1956 	if (sc->sk_vpd_prodname != NULL)
1957 		free(sc->sk_vpd_prodname, M_DEVBUF);
1958 	if (sc->sk_vpd_readonly != NULL)
1959 		free(sc->sk_vpd_readonly, M_DEVBUF);
1960 
1961 	if (sc->sk_intrhand)
1962 		bus_teardown_intr(dev, sc->sk_irq, sc->sk_intrhand);
1963 	if (sc->sk_irq)
1964 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sk_irq);
1965 	if (sc->sk_res)
1966 		bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res);
1967 
1968 	mtx_destroy(&sc->sk_mtx);
1969 
1970 	return(0);
1971 }
1972 
1973 static int
1974 sk_encap(sc_if, m_head, txidx)
1975         struct sk_if_softc	*sc_if;
1976         struct mbuf		*m_head;
1977         u_int32_t		*txidx;
1978 {
1979 	struct sk_tx_desc	*f = NULL;
1980 	struct mbuf		*m;
1981 	u_int32_t		frag, cur, cnt = 0;
1982 
1983 	SK_IF_LOCK_ASSERT(sc_if);
1984 
1985 	m = m_head;
1986 	cur = frag = *txidx;
1987 
1988 	/*
1989 	 * Start packing the mbufs in this chain into
1990 	 * the fragment pointers. Stop when we run out
1991 	 * of fragments or hit the end of the mbuf chain.
1992 	 */
1993 	for (m = m_head; m != NULL; m = m->m_next) {
1994 		if (m->m_len != 0) {
1995 			if ((SK_TX_RING_CNT -
1996 			    (sc_if->sk_cdata.sk_tx_cnt + cnt)) < 2)
1997 				return(ENOBUFS);
1998 			f = &sc_if->sk_rdata->sk_tx_ring[frag];
1999 			f->sk_data_lo = vtophys(mtod(m, vm_offset_t));
2000 			f->sk_ctl = m->m_len | SK_OPCODE_DEFAULT;
2001 			if (cnt == 0)
2002 				f->sk_ctl |= SK_TXCTL_FIRSTFRAG;
2003 			else
2004 				f->sk_ctl |= SK_TXCTL_OWN;
2005 			cur = frag;
2006 			SK_INC(frag, SK_TX_RING_CNT);
2007 			cnt++;
2008 		}
2009 	}
2010 
2011 	if (m != NULL)
2012 		return(ENOBUFS);
2013 
2014 	sc_if->sk_rdata->sk_tx_ring[cur].sk_ctl |=
2015 		SK_TXCTL_LASTFRAG|SK_TXCTL_EOF_INTR;
2016 	sc_if->sk_cdata.sk_tx_chain[cur].sk_mbuf = m_head;
2017 	sc_if->sk_rdata->sk_tx_ring[*txidx].sk_ctl |= SK_TXCTL_OWN;
2018 	sc_if->sk_cdata.sk_tx_cnt += cnt;
2019 
2020 	*txidx = frag;
2021 
2022 	return(0);
2023 }
2024 
2025 static void
2026 sk_start(ifp)
2027 	struct ifnet		*ifp;
2028 {
2029         struct sk_softc		*sc;
2030         struct sk_if_softc	*sc_if;
2031         struct mbuf		*m_head = NULL;
2032         u_int32_t		idx;
2033 
2034 	sc_if = ifp->if_softc;
2035 	sc = sc_if->sk_softc;
2036 
2037 	SK_IF_LOCK(sc_if);
2038 
2039 	idx = sc_if->sk_cdata.sk_tx_prod;
2040 
2041 	while(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf == NULL) {
2042 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
2043 		if (m_head == NULL)
2044 			break;
2045 
2046 		/*
2047 		 * Pack the data into the transmit ring. If we
2048 		 * don't have room, set the OACTIVE flag and wait
2049 		 * for the NIC to drain the ring.
2050 		 */
2051 		if (sk_encap(sc_if, m_head, &idx)) {
2052 			IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
2053 			ifp->if_flags |= IFF_OACTIVE;
2054 			break;
2055 		}
2056 
2057 		/*
2058 		 * If there's a BPF listener, bounce a copy of this frame
2059 		 * to him.
2060 		 */
2061 		BPF_MTAP(ifp, m_head);
2062 	}
2063 
2064 	/* Transmit */
2065 	if (idx != sc_if->sk_cdata.sk_tx_prod) {
2066 		sc_if->sk_cdata.sk_tx_prod = idx;
2067 		CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
2068 
2069 		/* Set a timeout in case the chip goes out to lunch. */
2070 		ifp->if_timer = 5;
2071 	}
2072 	SK_IF_UNLOCK(sc_if);
2073 
2074 	return;
2075 }
2076 
2077 
2078 static void
2079 sk_watchdog(ifp)
2080 	struct ifnet		*ifp;
2081 {
2082 	struct sk_if_softc	*sc_if;
2083 
2084 	sc_if = ifp->if_softc;
2085 
2086 	printf("sk%d: watchdog timeout\n", sc_if->sk_unit);
2087 	ifp->if_flags &= ~IFF_RUNNING;
2088 	sk_init(sc_if);
2089 
2090 	return;
2091 }
2092 
2093 static void
2094 skc_shutdown(dev)
2095 	device_t		dev;
2096 {
2097 	struct sk_softc		*sc;
2098 
2099 	sc = device_get_softc(dev);
2100 	SK_LOCK(sc);
2101 
2102 	/* Turn off the 'driver is loaded' LED. */
2103 	CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF);
2104 
2105 	/*
2106 	 * Reset the GEnesis controller. Doing this should also
2107 	 * assert the resets on the attached XMAC(s).
2108 	 */
2109 	sk_reset(sc);
2110 	SK_UNLOCK(sc);
2111 
2112 	return;
2113 }
2114 
2115 static void
2116 sk_rxeof(sc_if)
2117 	struct sk_if_softc	*sc_if;
2118 {
2119 	struct sk_softc		*sc;
2120 	struct mbuf		*m;
2121 	struct ifnet		*ifp;
2122 	struct sk_chain		*cur_rx;
2123 	int			total_len = 0;
2124 	int			i;
2125 	u_int32_t		rxstat;
2126 
2127 	sc = sc_if->sk_softc;
2128 	ifp = sc_if->sk_ifp;
2129 	i = sc_if->sk_cdata.sk_rx_prod;
2130 	cur_rx = &sc_if->sk_cdata.sk_rx_chain[i];
2131 
2132 	SK_LOCK_ASSERT(sc);
2133 
2134 	while(!(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl & SK_RXCTL_OWN)) {
2135 
2136 		cur_rx = &sc_if->sk_cdata.sk_rx_chain[i];
2137 		rxstat = sc_if->sk_rdata->sk_rx_ring[i].sk_xmac_rxstat;
2138 		m = cur_rx->sk_mbuf;
2139 		cur_rx->sk_mbuf = NULL;
2140 		total_len = SK_RXBYTES(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl);
2141 		SK_INC(i, SK_RX_RING_CNT);
2142 
2143 		if (rxstat & XM_RXSTAT_ERRFRAME) {
2144 			ifp->if_ierrors++;
2145 			sk_newbuf(sc_if, cur_rx, m);
2146 			continue;
2147 		}
2148 
2149 		/*
2150 		 * Try to allocate a new jumbo buffer. If that
2151 		 * fails, copy the packet to mbufs and put the
2152 		 * jumbo buffer back in the ring so it can be
2153 		 * re-used. If allocating mbufs fails, then we
2154 		 * have to drop the packet.
2155 		 */
2156 		if (sk_newbuf(sc_if, cur_rx, NULL) == ENOBUFS) {
2157 			struct mbuf		*m0;
2158 			m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN,
2159 			    ifp, NULL);
2160 			sk_newbuf(sc_if, cur_rx, m);
2161 			if (m0 == NULL) {
2162 				printf("sk%d: no receive buffers "
2163 				    "available -- packet dropped!\n",
2164 				    sc_if->sk_unit);
2165 				ifp->if_ierrors++;
2166 				continue;
2167 			}
2168 			m = m0;
2169 		} else {
2170 			m->m_pkthdr.rcvif = ifp;
2171 			m->m_pkthdr.len = m->m_len = total_len;
2172 		}
2173 
2174 		ifp->if_ipackets++;
2175 		SK_UNLOCK(sc);
2176 		(*ifp->if_input)(ifp, m);
2177 		SK_LOCK(sc);
2178 	}
2179 
2180 	sc_if->sk_cdata.sk_rx_prod = i;
2181 
2182 	return;
2183 }
2184 
2185 static void
2186 sk_txeof(sc_if)
2187 	struct sk_if_softc	*sc_if;
2188 {
2189 	struct sk_softc		*sc;
2190 	struct sk_tx_desc	*cur_tx;
2191 	struct ifnet		*ifp;
2192 	u_int32_t		idx;
2193 
2194 	sc = sc_if->sk_softc;
2195 	ifp = sc_if->sk_ifp;
2196 
2197 	/*
2198 	 * Go through our tx ring and free mbufs for those
2199 	 * frames that have been sent.
2200 	 */
2201 	idx = sc_if->sk_cdata.sk_tx_cons;
2202 	while(idx != sc_if->sk_cdata.sk_tx_prod) {
2203 		cur_tx = &sc_if->sk_rdata->sk_tx_ring[idx];
2204 		if (cur_tx->sk_ctl & SK_TXCTL_OWN)
2205 			break;
2206 		if (cur_tx->sk_ctl & SK_TXCTL_LASTFRAG)
2207 			ifp->if_opackets++;
2208 		if (sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf != NULL) {
2209 			m_freem(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf);
2210 			sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf = NULL;
2211 		}
2212 		sc_if->sk_cdata.sk_tx_cnt--;
2213 		SK_INC(idx, SK_TX_RING_CNT);
2214 	}
2215 
2216 	if (sc_if->sk_cdata.sk_tx_cnt == 0) {
2217 		ifp->if_timer = 0;
2218 	} else /* nudge chip to keep tx ring moving */
2219 		CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
2220 
2221 	if (sc_if->sk_cdata.sk_tx_cnt < SK_TX_RING_CNT - 2)
2222 		ifp->if_flags &= ~IFF_OACTIVE;
2223 
2224 	sc_if->sk_cdata.sk_tx_cons = idx;
2225 }
2226 
2227 static void
2228 sk_tick(xsc_if)
2229 	void			*xsc_if;
2230 {
2231 	struct sk_if_softc	*sc_if;
2232 	struct mii_data		*mii;
2233 	struct ifnet		*ifp;
2234 	int			i;
2235 
2236 	sc_if = xsc_if;
2237 	SK_IF_LOCK(sc_if);
2238 	ifp = sc_if->sk_ifp;
2239 	mii = device_get_softc(sc_if->sk_miibus);
2240 
2241 	if (!(ifp->if_flags & IFF_UP)) {
2242 		SK_IF_UNLOCK(sc_if);
2243 		return;
2244 	}
2245 
2246 	if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
2247 		sk_intr_bcom(sc_if);
2248 		SK_IF_UNLOCK(sc_if);
2249 		return;
2250 	}
2251 
2252 	/*
2253 	 * According to SysKonnect, the correct way to verify that
2254 	 * the link has come back up is to poll bit 0 of the GPIO
2255 	 * register three times. This pin has the signal from the
2256 	 * link_sync pin connected to it; if we read the same link
2257 	 * state 3 times in a row, we know the link is up.
2258 	 */
2259 	for (i = 0; i < 3; i++) {
2260 		if (SK_XM_READ_2(sc_if, XM_GPIO) & XM_GPIO_GP0_SET)
2261 			break;
2262 	}
2263 
2264 	if (i != 3) {
2265 		sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz);
2266 		SK_IF_UNLOCK(sc_if);
2267 		return;
2268 	}
2269 
2270 	/* Turn the GP0 interrupt back on. */
2271 	SK_XM_CLRBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
2272 	SK_XM_READ_2(sc_if, XM_ISR);
2273 	mii_tick(mii);
2274 	untimeout(sk_tick, sc_if, sc_if->sk_tick_ch);
2275 
2276 	SK_IF_UNLOCK(sc_if);
2277 	return;
2278 }
2279 
2280 static void
2281 sk_intr_bcom(sc_if)
2282 	struct sk_if_softc	*sc_if;
2283 {
2284 	struct mii_data		*mii;
2285 	struct ifnet		*ifp;
2286 	int			status;
2287 	mii = device_get_softc(sc_if->sk_miibus);
2288 	ifp = sc_if->sk_ifp;
2289 
2290 	SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
2291 
2292 	/*
2293 	 * Read the PHY interrupt register to make sure
2294 	 * we clear any pending interrupts.
2295 	 */
2296 	status = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, BRGPHY_MII_ISR);
2297 
2298 	if (!(ifp->if_flags & IFF_RUNNING)) {
2299 		sk_init_xmac(sc_if);
2300 		return;
2301 	}
2302 
2303 	if (status & (BRGPHY_ISR_LNK_CHG|BRGPHY_ISR_AN_PR)) {
2304 		int			lstat;
2305 		lstat = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM,
2306 		    BRGPHY_MII_AUXSTS);
2307 
2308 		if (!(lstat & BRGPHY_AUXSTS_LINK) && sc_if->sk_link) {
2309 			mii_mediachg(mii);
2310 			/* Turn off the link LED. */
2311 			SK_IF_WRITE_1(sc_if, 0,
2312 			    SK_LINKLED1_CTL, SK_LINKLED_OFF);
2313 			sc_if->sk_link = 0;
2314 		} else if (status & BRGPHY_ISR_LNK_CHG) {
2315 			sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
2316 	    		    BRGPHY_MII_IMR, 0xFF00);
2317 			mii_tick(mii);
2318 			sc_if->sk_link = 1;
2319 			/* Turn on the link LED. */
2320 			SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
2321 			    SK_LINKLED_ON|SK_LINKLED_LINKSYNC_OFF|
2322 			    SK_LINKLED_BLINK_OFF);
2323 		} else {
2324 			mii_tick(mii);
2325 			sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz);
2326 		}
2327 	}
2328 
2329 	SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
2330 
2331 	return;
2332 }
2333 
2334 static void
2335 sk_intr_xmac(sc_if)
2336 	struct sk_if_softc	*sc_if;
2337 {
2338 	struct sk_softc		*sc;
2339 	u_int16_t		status;
2340 
2341 	sc = sc_if->sk_softc;
2342 	status = SK_XM_READ_2(sc_if, XM_ISR);
2343 
2344 	/*
2345 	 * Link has gone down. Start MII tick timeout to
2346 	 * watch for link resync.
2347 	 */
2348 	if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) {
2349 		if (status & XM_ISR_GP0_SET) {
2350 			SK_XM_SETBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
2351 			sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz);
2352 		}
2353 
2354 		if (status & XM_ISR_AUTONEG_DONE) {
2355 			sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz);
2356 		}
2357 	}
2358 
2359 	if (status & XM_IMR_TX_UNDERRUN)
2360 		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_TXFIFO);
2361 
2362 	if (status & XM_IMR_RX_OVERRUN)
2363 		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_RXFIFO);
2364 
2365 	status = SK_XM_READ_2(sc_if, XM_ISR);
2366 
2367 	return;
2368 }
2369 
2370 static void
2371 sk_intr_yukon(sc_if)
2372 	struct sk_if_softc	*sc_if;
2373 {
2374 	int status;
2375 
2376 	status = SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR);
2377 
2378 	return;
2379 }
2380 
2381 static void
2382 sk_intr(xsc)
2383 	void			*xsc;
2384 {
2385 	struct sk_softc		*sc = xsc;
2386 	struct sk_if_softc	*sc_if0 = NULL, *sc_if1 = NULL;
2387 	struct ifnet		*ifp0 = NULL, *ifp1 = NULL;
2388 	u_int32_t		status;
2389 
2390 	SK_LOCK(sc);
2391 
2392 	sc_if0 = sc->sk_if[SK_PORT_A];
2393 	sc_if1 = sc->sk_if[SK_PORT_B];
2394 
2395 	if (sc_if0 != NULL)
2396 		ifp0 = sc_if0->sk_ifp;
2397 	if (sc_if1 != NULL)
2398 		ifp1 = sc_if1->sk_ifp;
2399 
2400 	for (;;) {
2401 		status = CSR_READ_4(sc, SK_ISSR);
2402 		if (!(status & sc->sk_intrmask))
2403 			break;
2404 
2405 		/* Handle receive interrupts first. */
2406 		if (status & SK_ISR_RX1_EOF) {
2407 			sk_rxeof(sc_if0);
2408 			CSR_WRITE_4(sc, SK_BMU_RX_CSR0,
2409 			    SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
2410 		}
2411 		if (status & SK_ISR_RX2_EOF) {
2412 			sk_rxeof(sc_if1);
2413 			CSR_WRITE_4(sc, SK_BMU_RX_CSR1,
2414 			    SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
2415 		}
2416 
2417 		/* Then transmit interrupts. */
2418 		if (status & SK_ISR_TX1_S_EOF) {
2419 			sk_txeof(sc_if0);
2420 			CSR_WRITE_4(sc, SK_BMU_TXS_CSR0,
2421 			    SK_TXBMU_CLR_IRQ_EOF);
2422 		}
2423 		if (status & SK_ISR_TX2_S_EOF) {
2424 			sk_txeof(sc_if1);
2425 			CSR_WRITE_4(sc, SK_BMU_TXS_CSR1,
2426 			    SK_TXBMU_CLR_IRQ_EOF);
2427 		}
2428 
2429 		/* Then MAC interrupts. */
2430 		if (status & SK_ISR_MAC1 && ifp0->if_flags & IFF_RUNNING) {
2431 			if (sc->sk_type == SK_GENESIS)
2432 				sk_intr_xmac(sc_if0);
2433 			else
2434 				sk_intr_yukon(sc_if0);
2435 		}
2436 
2437 		if (status & SK_ISR_MAC2 && ifp1->if_flags & IFF_RUNNING) {
2438 			if (sc->sk_type == SK_GENESIS)
2439 				sk_intr_xmac(sc_if1);
2440 			else
2441 				sk_intr_yukon(sc_if1);
2442 		}
2443 
2444 		if (status & SK_ISR_EXTERNAL_REG) {
2445 			if (ifp0 != NULL &&
2446 			    sc_if0->sk_phytype == SK_PHYTYPE_BCOM)
2447 				sk_intr_bcom(sc_if0);
2448 			if (ifp1 != NULL &&
2449 			    sc_if1->sk_phytype == SK_PHYTYPE_BCOM)
2450 				sk_intr_bcom(sc_if1);
2451 		}
2452 	}
2453 
2454 	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2455 
2456 	if (ifp0 != NULL && !IFQ_DRV_IS_EMPTY(&ifp0->if_snd))
2457 		sk_start(ifp0);
2458 	if (ifp1 != NULL && !IFQ_DRV_IS_EMPTY(&ifp1->if_snd))
2459 		sk_start(ifp1);
2460 
2461 	SK_UNLOCK(sc);
2462 
2463 	return;
2464 }
2465 
2466 static void
2467 sk_init_xmac(sc_if)
2468 	struct sk_if_softc	*sc_if;
2469 {
2470 	struct sk_softc		*sc;
2471 	struct ifnet		*ifp;
2472 	struct sk_bcom_hack	bhack[] = {
2473 	{ 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 },
2474 	{ 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 },
2475 	{ 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 },
2476 	{ 0, 0 } };
2477 
2478 	sc = sc_if->sk_softc;
2479 	ifp = sc_if->sk_ifp;
2480 
2481 	/* Unreset the XMAC. */
2482 	SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_UNRESET);
2483 	DELAY(1000);
2484 
2485 	/* Reset the XMAC's internal state. */
2486 	SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
2487 
2488 	/* Save the XMAC II revision */
2489 	sc_if->sk_xmac_rev = XM_XMAC_REV(SK_XM_READ_4(sc_if, XM_DEVID));
2490 
2491 	/*
2492 	 * Perform additional initialization for external PHYs,
2493 	 * namely for the 1000baseTX cards that use the XMAC's
2494 	 * GMII mode.
2495 	 */
2496 	if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
2497 		int			i = 0;
2498 		u_int32_t		val;
2499 
2500 		/* Take PHY out of reset. */
2501 		val = sk_win_read_4(sc, SK_GPIO);
2502 		if (sc_if->sk_port == SK_PORT_A)
2503 			val |= SK_GPIO_DIR0|SK_GPIO_DAT0;
2504 		else
2505 			val |= SK_GPIO_DIR2|SK_GPIO_DAT2;
2506 		sk_win_write_4(sc, SK_GPIO, val);
2507 
2508 		/* Enable GMII mode on the XMAC. */
2509 		SK_XM_SETBIT_2(sc_if, XM_HWCFG, XM_HWCFG_GMIIMODE);
2510 
2511 		sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
2512 		    BRGPHY_MII_BMCR, BRGPHY_BMCR_RESET);
2513 		DELAY(10000);
2514 		sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
2515 		    BRGPHY_MII_IMR, 0xFFF0);
2516 
2517 		/*
2518 		 * Early versions of the BCM5400 apparently have
2519 		 * a bug that requires them to have their reserved
2520 		 * registers initialized to some magic values. I don't
2521 		 * know what the numbers do, I'm just the messenger.
2522 		 */
2523 		if (sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, 0x03)
2524 		    == 0x6041) {
2525 			while(bhack[i].reg) {
2526 				sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
2527 				    bhack[i].reg, bhack[i].val);
2528 				i++;
2529 			}
2530 		}
2531 	}
2532 
2533 	/* Set station address */
2534 	SK_XM_WRITE_2(sc_if, XM_PAR0,
2535 	    *(u_int16_t *)(&IFP2ENADDR(sc_if->sk_ifp)[0]));
2536 	SK_XM_WRITE_2(sc_if, XM_PAR1,
2537 	    *(u_int16_t *)(&IFP2ENADDR(sc_if->sk_ifp)[2]));
2538 	SK_XM_WRITE_2(sc_if, XM_PAR2,
2539 	    *(u_int16_t *)(&IFP2ENADDR(sc_if->sk_ifp)[4]));
2540 	SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_STATION);
2541 
2542 	if (ifp->if_flags & IFF_BROADCAST) {
2543 		SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
2544 	} else {
2545 		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
2546 	}
2547 
2548 	/* We don't need the FCS appended to the packet. */
2549 	SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_STRIPFCS);
2550 
2551 	/* We want short frames padded to 60 bytes. */
2552 	SK_XM_SETBIT_2(sc_if, XM_TXCMD, XM_TXCMD_AUTOPAD);
2553 
2554 	/*
2555 	 * Enable the reception of all error frames. This is is
2556 	 * a necessary evil due to the design of the XMAC. The
2557 	 * XMAC's receive FIFO is only 8K in size, however jumbo
2558 	 * frames can be up to 9000 bytes in length. When bad
2559 	 * frame filtering is enabled, the XMAC's RX FIFO operates
2560 	 * in 'store and forward' mode. For this to work, the
2561 	 * entire frame has to fit into the FIFO, but that means
2562 	 * that jumbo frames larger than 8192 bytes will be
2563 	 * truncated. Disabling all bad frame filtering causes
2564 	 * the RX FIFO to operate in streaming mode, in which
2565 	 * case the XMAC will start transfering frames out of the
2566 	 * RX FIFO as soon as the FIFO threshold is reached.
2567 	 */
2568 	SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_BADFRAMES|
2569 	    XM_MODE_RX_GIANTS|XM_MODE_RX_RUNTS|XM_MODE_RX_CRCERRS|
2570 	    XM_MODE_RX_INRANGELEN);
2571 
2572 	if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
2573 		SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
2574 	else
2575 		SK_XM_CLRBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
2576 
2577 	/*
2578 	 * Bump up the transmit threshold. This helps hold off transmit
2579 	 * underruns when we're blasting traffic from both ports at once.
2580 	 */
2581 	SK_XM_WRITE_2(sc_if, XM_TX_REQTHRESH, SK_XM_TX_FIFOTHRESH);
2582 
2583 	/* Set promiscuous mode */
2584 	sk_setpromisc(sc_if);
2585 
2586 	/* Set multicast filter */
2587 	sk_setmulti(sc_if);
2588 
2589 	/* Clear and enable interrupts */
2590 	SK_XM_READ_2(sc_if, XM_ISR);
2591 	if (sc_if->sk_phytype == SK_PHYTYPE_XMAC)
2592 		SK_XM_WRITE_2(sc_if, XM_IMR, XM_INTRS);
2593 	else
2594 		SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
2595 
2596 	/* Configure MAC arbiter */
2597 	switch(sc_if->sk_xmac_rev) {
2598 	case XM_XMAC_REV_B2:
2599 		sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_B2);
2600 		sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_B2);
2601 		sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_B2);
2602 		sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_B2);
2603 		sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_B2);
2604 		sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_B2);
2605 		sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_B2);
2606 		sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_B2);
2607 		sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
2608 		break;
2609 	case XM_XMAC_REV_C1:
2610 		sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_C1);
2611 		sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_C1);
2612 		sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_C1);
2613 		sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_C1);
2614 		sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_C1);
2615 		sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_C1);
2616 		sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_C1);
2617 		sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_C1);
2618 		sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
2619 		break;
2620 	default:
2621 		break;
2622 	}
2623 	sk_win_write_2(sc, SK_MACARB_CTL,
2624 	    SK_MACARBCTL_UNRESET|SK_MACARBCTL_FASTOE_OFF);
2625 
2626 	sc_if->sk_link = 1;
2627 
2628 	return;
2629 }
2630 
2631 static void
2632 sk_init_yukon(sc_if)
2633 	struct sk_if_softc	*sc_if;
2634 {
2635 	u_int32_t		phy;
2636 	u_int16_t		reg;
2637 	struct sk_softc		*sc;
2638 	struct ifnet		*ifp;
2639 	int			i;
2640 
2641 	sc = sc_if->sk_softc;
2642 	ifp = sc_if->sk_ifp;
2643 
2644 	if (sc->sk_type == SK_YUKON_LITE &&
2645 	    sc->sk_rev == SK_YUKON_LITE_REV_A3) {
2646 		/* Take PHY out of reset. */
2647 		sk_win_write_4(sc, SK_GPIO,
2648 			(sk_win_read_4(sc, SK_GPIO) | SK_GPIO_DIR9) & ~SK_GPIO_DAT9);
2649 	}
2650 
2651 	/* GMAC and GPHY Reset */
2652 	SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_SET);
2653 	SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET);
2654 	DELAY(1000);
2655 	SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_CLEAR);
2656 	SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET);
2657 	DELAY(1000);
2658 
2659 	phy = SK_GPHY_INT_POL_HI | SK_GPHY_DIS_FC | SK_GPHY_DIS_SLEEP |
2660 		SK_GPHY_ENA_XC | SK_GPHY_ANEG_ALL | SK_GPHY_ENA_PAUSE;
2661 
2662 	switch(sc_if->sk_softc->sk_pmd) {
2663 	case IFM_1000_SX:
2664 	case IFM_1000_LX:
2665 		phy |= SK_GPHY_FIBER;
2666 		break;
2667 
2668 	case IFM_1000_CX:
2669 	case IFM_1000_T:
2670 		phy |= SK_GPHY_COPPER;
2671 		break;
2672 	}
2673 
2674 	SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_SET);
2675 	DELAY(1000);
2676 	SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_CLEAR);
2677 	SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_LOOP_OFF |
2678 		      SK_GMAC_PAUSE_ON | SK_GMAC_RESET_CLEAR);
2679 
2680 	/* unused read of the interrupt source register */
2681 	SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR);
2682 
2683 	reg = SK_YU_READ_2(sc_if, YUKON_PAR);
2684 
2685 	/* MIB Counter Clear Mode set */
2686 	reg |= YU_PAR_MIB_CLR;
2687 	SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
2688 
2689 	/* MIB Counter Clear Mode clear */
2690 	reg &= ~YU_PAR_MIB_CLR;
2691 	SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
2692 
2693 	/* receive control reg */
2694 	SK_YU_WRITE_2(sc_if, YUKON_RCR, YU_RCR_CRCR);
2695 
2696 	/* transmit parameter register */
2697 	SK_YU_WRITE_2(sc_if, YUKON_TPR, YU_TPR_JAM_LEN(0x3) |
2698 		      YU_TPR_JAM_IPG(0xb) | YU_TPR_JAM2DATA_IPG(0x1a) );
2699 
2700 	/* serial mode register */
2701 	reg = YU_SMR_DATA_BLIND(0x1c) | YU_SMR_MFL_VLAN | YU_SMR_IPG_DATA(0x1e);
2702 	if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
2703 		reg |= YU_SMR_MFL_JUMBO;
2704 	SK_YU_WRITE_2(sc_if, YUKON_SMR, reg);
2705 
2706 	/* Setup Yukon's address */
2707 	for (i = 0; i < 3; i++) {
2708 		/* Write Source Address 1 (unicast filter) */
2709 		SK_YU_WRITE_2(sc_if, YUKON_SAL1 + i * 4,
2710 			      IFP2ENADDR(sc_if->sk_ifp)[i * 2] |
2711 			      IFP2ENADDR(sc_if->sk_ifp)[i * 2 + 1] << 8);
2712 	}
2713 
2714 	for (i = 0; i < 3; i++) {
2715 		reg = sk_win_read_2(sc_if->sk_softc,
2716 				    SK_MAC1_0 + i * 2 + sc_if->sk_port * 8);
2717 		SK_YU_WRITE_2(sc_if, YUKON_SAL2 + i * 4, reg);
2718 	}
2719 
2720 	/* Set promiscuous mode */
2721 	sk_setpromisc(sc_if);
2722 
2723 	/* Set multicast filter */
2724 	sk_setmulti(sc_if);
2725 
2726 	/* enable interrupt mask for counter overflows */
2727 	SK_YU_WRITE_2(sc_if, YUKON_TIMR, 0);
2728 	SK_YU_WRITE_2(sc_if, YUKON_RIMR, 0);
2729 	SK_YU_WRITE_2(sc_if, YUKON_TRIMR, 0);
2730 
2731 	/* Configure RX MAC FIFO */
2732 	SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_CLEAR);
2733 	SK_IF_WRITE_4(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_OPERATION_ON);
2734 
2735 	/* Configure TX MAC FIFO */
2736 	SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_CLEAR);
2737 	SK_IF_WRITE_4(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_OPERATION_ON);
2738 }
2739 
2740 /*
2741  * Note that to properly initialize any part of the GEnesis chip,
2742  * you first have to take it out of reset mode.
2743  */
2744 static void
2745 sk_init(xsc)
2746 	void			*xsc;
2747 {
2748 	struct sk_if_softc	*sc_if = xsc;
2749 	struct sk_softc		*sc;
2750 	struct ifnet		*ifp;
2751 	struct mii_data		*mii;
2752 	u_int16_t		reg;
2753 	u_int32_t		imr;
2754 
2755 	SK_IF_LOCK(sc_if);
2756 
2757 	ifp = sc_if->sk_ifp;
2758 	sc = sc_if->sk_softc;
2759 	mii = device_get_softc(sc_if->sk_miibus);
2760 
2761 	if (ifp->if_flags & IFF_RUNNING) {
2762 		SK_IF_UNLOCK(sc_if);
2763 		return;
2764 	}
2765 
2766 	/* Cancel pending I/O and free all RX/TX buffers. */
2767 	sk_stop(sc_if);
2768 
2769 	if (sc->sk_type == SK_GENESIS) {
2770 		/* Configure LINK_SYNC LED */
2771 		SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON);
2772 		SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
2773 			SK_LINKLED_LINKSYNC_ON);
2774 
2775 		/* Configure RX LED */
2776 		SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL,
2777 			SK_RXLEDCTL_COUNTER_START);
2778 
2779 		/* Configure TX LED */
2780 		SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL,
2781 			SK_TXLEDCTL_COUNTER_START);
2782 	}
2783 
2784 	/* Configure I2C registers */
2785 
2786 	/* Configure XMAC(s) */
2787 	switch (sc->sk_type) {
2788 	case SK_GENESIS:
2789 		sk_init_xmac(sc_if);
2790 		break;
2791 	case SK_YUKON:
2792 	case SK_YUKON_LITE:
2793 	case SK_YUKON_LP:
2794 		sk_init_yukon(sc_if);
2795 		break;
2796 	}
2797 	mii_mediachg(mii);
2798 
2799 	if (sc->sk_type == SK_GENESIS) {
2800 		/* Configure MAC FIFOs */
2801 		SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_UNRESET);
2802 		SK_IF_WRITE_4(sc_if, 0, SK_RXF1_END, SK_FIFO_END);
2803 		SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_ON);
2804 
2805 		SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_UNRESET);
2806 		SK_IF_WRITE_4(sc_if, 0, SK_TXF1_END, SK_FIFO_END);
2807 		SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_ON);
2808 	}
2809 
2810 	/* Configure transmit arbiter(s) */
2811 	SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL,
2812 	    SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON);
2813 
2814 	/* Configure RAMbuffers */
2815 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET);
2816 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart);
2817 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart);
2818 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart);
2819 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend);
2820 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON);
2821 
2822 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_UNRESET);
2823 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_STORENFWD_ON);
2824 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_START, sc_if->sk_tx_ramstart);
2825 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_WR_PTR, sc_if->sk_tx_ramstart);
2826 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_RD_PTR, sc_if->sk_tx_ramstart);
2827 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_END, sc_if->sk_tx_ramend);
2828 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_ON);
2829 
2830 	/* Configure BMUs */
2831 	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_ONLINE);
2832 	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO,
2833 	    vtophys(&sc_if->sk_rdata->sk_rx_ring[0]));
2834 	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI, 0);
2835 
2836 	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_ONLINE);
2837 	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_LO,
2838 	    vtophys(&sc_if->sk_rdata->sk_tx_ring[0]));
2839 	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_HI, 0);
2840 
2841 	/* Init descriptors */
2842 	if (sk_init_rx_ring(sc_if) == ENOBUFS) {
2843 		printf("sk%d: initialization failed: no "
2844 		    "memory for rx buffers\n", sc_if->sk_unit);
2845 		sk_stop(sc_if);
2846 		SK_IF_UNLOCK(sc_if);
2847 		return;
2848 	}
2849 	sk_init_tx_ring(sc_if);
2850 
2851 	/* Set interrupt moderation if changed via sysctl. */
2852 	/* SK_LOCK(sc); */
2853 	imr = sk_win_read_4(sc, SK_IMTIMERINIT);
2854 	if (imr != SK_IM_USECS(sc->sk_int_mod)) {
2855 		sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(sc->sk_int_mod));
2856 		printf("skc%d: interrupt moderation is %d us\n",
2857 		    sc->sk_unit, sc->sk_int_mod);
2858 	}
2859 	/* SK_UNLOCK(sc); */
2860 
2861 	/* Configure interrupt handling */
2862 	CSR_READ_4(sc, SK_ISSR);
2863 	if (sc_if->sk_port == SK_PORT_A)
2864 		sc->sk_intrmask |= SK_INTRS1;
2865 	else
2866 		sc->sk_intrmask |= SK_INTRS2;
2867 
2868 	sc->sk_intrmask |= SK_ISR_EXTERNAL_REG;
2869 
2870 	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2871 
2872 	/* Start BMUs. */
2873 	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_START);
2874 
2875 	switch(sc->sk_type) {
2876 	case SK_GENESIS:
2877 		/* Enable XMACs TX and RX state machines */
2878 		SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_IGNPAUSE);
2879 		SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
2880 		break;
2881 	case SK_YUKON:
2882 	case SK_YUKON_LITE:
2883 	case SK_YUKON_LP:
2884 		reg = SK_YU_READ_2(sc_if, YUKON_GPCR);
2885 		reg |= YU_GPCR_TXEN | YU_GPCR_RXEN;
2886 		reg &= ~(YU_GPCR_SPEED_EN | YU_GPCR_DPLX_EN);
2887 		SK_YU_WRITE_2(sc_if, YUKON_GPCR, reg);
2888 	}
2889 
2890 	ifp->if_flags |= IFF_RUNNING;
2891 	ifp->if_flags &= ~IFF_OACTIVE;
2892 
2893 	SK_IF_UNLOCK(sc_if);
2894 
2895 	return;
2896 }
2897 
2898 static void
2899 sk_stop(sc_if)
2900 	struct sk_if_softc	*sc_if;
2901 {
2902 	int			i;
2903 	struct sk_softc		*sc;
2904 	struct ifnet		*ifp;
2905 
2906 	SK_IF_LOCK(sc_if);
2907 	sc = sc_if->sk_softc;
2908 	ifp = sc_if->sk_ifp;
2909 
2910 	untimeout(sk_tick, sc_if, sc_if->sk_tick_ch);
2911 
2912 	if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
2913 		u_int32_t		val;
2914 
2915 		/* Put PHY back into reset. */
2916 		val = sk_win_read_4(sc, SK_GPIO);
2917 		if (sc_if->sk_port == SK_PORT_A) {
2918 			val |= SK_GPIO_DIR0;
2919 			val &= ~SK_GPIO_DAT0;
2920 		} else {
2921 			val |= SK_GPIO_DIR2;
2922 			val &= ~SK_GPIO_DAT2;
2923 		}
2924 		sk_win_write_4(sc, SK_GPIO, val);
2925 	}
2926 
2927 	/* Turn off various components of this interface. */
2928 	SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
2929 	switch (sc->sk_type) {
2930 	case SK_GENESIS:
2931 		SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_RESET);
2932 		SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_RESET);
2933 		break;
2934 	case SK_YUKON:
2935 	case SK_YUKON_LITE:
2936 	case SK_YUKON_LP:
2937 		SK_IF_WRITE_1(sc_if,0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_SET);
2938 		SK_IF_WRITE_1(sc_if,0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_SET);
2939 		break;
2940 	}
2941 	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE);
2942 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
2943 	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_OFFLINE);
2944 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
2945 	SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF);
2946 	SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
2947 	SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
2948 	SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF);
2949 	SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF);
2950 
2951 	/* Disable interrupts */
2952 	if (sc_if->sk_port == SK_PORT_A)
2953 		sc->sk_intrmask &= ~SK_INTRS1;
2954 	else
2955 		sc->sk_intrmask &= ~SK_INTRS2;
2956 	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2957 
2958 	SK_XM_READ_2(sc_if, XM_ISR);
2959 	SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
2960 
2961 	/* Free RX and TX mbufs still in the queues. */
2962 	for (i = 0; i < SK_RX_RING_CNT; i++) {
2963 		if (sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf != NULL) {
2964 			m_freem(sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf);
2965 			sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf = NULL;
2966 		}
2967 	}
2968 
2969 	for (i = 0; i < SK_TX_RING_CNT; i++) {
2970 		if (sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf != NULL) {
2971 			m_freem(sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf);
2972 			sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf = NULL;
2973 		}
2974 	}
2975 
2976 	ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE);
2977 	SK_IF_UNLOCK(sc_if);
2978 	return;
2979 }
2980 
2981 static int
2982 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
2983 {
2984 	int error, value;
2985 
2986 	if (!arg1)
2987 		return (EINVAL);
2988 	value = *(int *)arg1;
2989 	error = sysctl_handle_int(oidp, &value, 0, req);
2990 	if (error || !req->newptr)
2991 		return (error);
2992 	if (value < low || value > high)
2993 		return (EINVAL);
2994 	*(int *)arg1 = value;
2995 	return (0);
2996 }
2997 
2998 static int
2999 sysctl_hw_sk_int_mod(SYSCTL_HANDLER_ARGS)
3000 {
3001 	return (sysctl_int_range(oidp, arg1, arg2, req, SK_IM_MIN, SK_IM_MAX));
3002 }
3003