xref: /freebsd/sys/dev/sk/if_sk.c (revision daf1cffce2e07931f27c6c6998652e90df6ba87e)
1 /*
2  * Copyright (c) 1997, 1998, 1999
3  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by Bill Paul.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  *
32  * $FreeBSD$
33  */
34 
35 /*
36  * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports
37  * the SK-984x series adapters, both single port and dual port.
38  * References:
39  * 	The XaQti XMAC II datasheet, http://www.xaqti.com
40  *	The SysKonnect GEnesis manual, http://www.syskonnect.com
41  *
42  * Written by Bill Paul <wpaul@ee.columbia.edu>
43  * Department of Electrical Engineering
44  * Columbia University, New York City
45  */
46 
47 /*
48  * The SysKonnect gigabit ethernet adapters consist of two main
49  * components: the SysKonnect GEnesis controller chip and the XaQti Corp.
50  * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC
51  * components and a PHY while the GEnesis controller provides a PCI
52  * interface with DMA support. Each card may have between 512K and
53  * 2MB of SRAM on board depending on the configuration.
54  *
55  * The SysKonnect GEnesis controller can have either one or two XMAC
56  * chips connected to it, allowing single or dual port NIC configurations.
57  * SysKonnect has the distinction of being the only vendor on the market
58  * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs,
59  * dual DMA queues, packet/MAC/transmit arbiters and direct access to the
60  * XMAC registers. This driver takes advantage of these features to allow
61  * both XMACs to operate as independent interfaces.
62  */
63 
64 #include <sys/param.h>
65 #include <sys/systm.h>
66 #include <sys/sockio.h>
67 #include <sys/mbuf.h>
68 #include <sys/malloc.h>
69 #include <sys/kernel.h>
70 #include <sys/socket.h>
71 #include <sys/queue.h>
72 
73 #include <net/if.h>
74 #include <net/if_arp.h>
75 #include <net/ethernet.h>
76 #include <net/if_dl.h>
77 #include <net/if_media.h>
78 
79 #include <net/bpf.h>
80 
81 #include <vm/vm.h>              /* for vtophys */
82 #include <vm/pmap.h>            /* for vtophys */
83 #include <machine/clock.h>      /* for DELAY */
84 #include <machine/bus_pio.h>
85 #include <machine/bus_memio.h>
86 #include <machine/bus.h>
87 #include <machine/resource.h>
88 #include <sys/bus.h>
89 #include <sys/rman.h>
90 
91 #include <pci/pcireg.h>
92 #include <pci/pcivar.h>
93 
94 #define SK_USEIOSPACE
95 
96 #include <pci/if_skreg.h>
97 #include <pci/xmaciireg.h>
98 
99 #ifndef lint
100 static const char rcsid[] =
101   "$FreeBSD$";
102 #endif
103 
104 static struct sk_type sk_devs[] = {
105 	{ SK_VENDORID, SK_DEVICEID_GE, "SysKonnect Gigabit Ethernet" },
106 	{ 0, 0, NULL }
107 };
108 
109 static int sk_probe		__P((device_t));
110 static int sk_attach		__P((device_t));
111 static int sk_detach		__P((device_t));
112 static int sk_attach_xmac	__P((struct sk_softc *, int));
113 static void sk_intr		__P((void *));
114 static void sk_intr_xmac	__P((struct sk_if_softc *));
115 static void sk_rxeof		__P((struct sk_if_softc *));
116 static void sk_txeof		__P((struct sk_if_softc *));
117 static int sk_encap		__P((struct sk_if_softc *, struct mbuf *,
118 					u_int32_t *));
119 static void sk_start		__P((struct ifnet *));
120 static int sk_ioctl		__P((struct ifnet *, u_long, caddr_t));
121 static void sk_init		__P((void *));
122 static void sk_init_xmac	__P((struct sk_if_softc *));
123 static void sk_stop		__P((struct sk_if_softc *));
124 static void sk_watchdog		__P((struct ifnet *));
125 static void sk_shutdown		__P((device_t));
126 static int sk_ifmedia_upd	__P((struct ifnet *));
127 static void sk_ifmedia_sts	__P((struct ifnet *, struct ifmediareq *));
128 static void sk_reset		__P((struct sk_softc *));
129 static int sk_newbuf		__P((struct sk_if_softc *,
130 					struct sk_chain *, struct mbuf *));
131 static int sk_alloc_jumbo_mem	__P((struct sk_if_softc *));
132 static void *sk_jalloc		__P((struct sk_if_softc *));
133 static void sk_jfree		__P((caddr_t, u_int));
134 static void sk_jref		__P((caddr_t, u_int));
135 static int sk_init_rx_ring	__P((struct sk_if_softc *));
136 static void sk_init_tx_ring	__P((struct sk_if_softc *));
137 #ifdef notdef
138 static u_int32_t sk_win_read_4	__P((struct sk_softc *, int));
139 #endif
140 static u_int16_t sk_win_read_2	__P((struct sk_softc *, int));
141 static u_int8_t sk_win_read_1	__P((struct sk_softc *, int));
142 static void sk_win_write_4	__P((struct sk_softc *, int, u_int32_t));
143 static void sk_win_write_2	__P((struct sk_softc *, int, u_int32_t));
144 static void sk_win_write_1	__P((struct sk_softc *, int, u_int32_t));
145 static u_int8_t sk_vpd_readbyte	__P((struct sk_softc *, int));
146 static void sk_vpd_read_res	__P((struct sk_softc *,
147 					struct vpd_res *, int));
148 static void sk_vpd_read		__P((struct sk_softc *));
149 static u_int16_t sk_phy_readreg	__P((struct sk_if_softc *, int));
150 static void sk_phy_writereg	__P((struct sk_if_softc *, int, u_int32_t));
151 static u_int32_t sk_calchash	__P((caddr_t));
152 static void sk_setfilt		__P((struct sk_if_softc *, caddr_t, int));
153 static void sk_setmulti		__P((struct sk_if_softc *));
154 
155 #ifdef SK_USEIOSPACE
156 #define SK_RES		SYS_RES_IOPORT
157 #define SK_RID		SK_PCI_LOIO
158 #else
159 #define SK_RES		SYS_RES_MEMORY
160 #define SK_RID		SK_PCI_LOMEM
161 #endif
162 
163 static device_method_t sk_methods[] = {
164 	/* Device interface */
165 	DEVMETHOD(device_probe,		sk_probe),
166 	DEVMETHOD(device_attach,	sk_attach),
167 	DEVMETHOD(device_detach,	sk_detach),
168 	DEVMETHOD(device_shutdown,	sk_shutdown),
169 	{ 0, 0 }
170 };
171 
172 static driver_t sk_driver = {
173 	"skc",
174 	sk_methods,
175 	sizeof(struct sk_softc)
176 };
177 
178 static devclass_t sk_devclass;
179 
180 DRIVER_MODULE(if_sk, pci, sk_driver, sk_devclass, 0, 0);
181 
182 #define SK_SETBIT(sc, reg, x)		\
183 	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x)
184 
185 #define SK_CLRBIT(sc, reg, x)		\
186 	CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x)
187 
188 #define SK_WIN_SETBIT_4(sc, reg, x)	\
189 	sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) | x)
190 
191 #define SK_WIN_CLRBIT_4(sc, reg, x)	\
192 	sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) & ~x)
193 
194 #define SK_WIN_SETBIT_2(sc, reg, x)	\
195 	sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) | x)
196 
197 #define SK_WIN_CLRBIT_2(sc, reg, x)	\
198 	sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) & ~x)
199 
200 #ifdef notdef
201 static u_int32_t sk_win_read_4(sc, reg)
202 	struct sk_softc		*sc;
203 	int			reg;
204 {
205 	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
206 	return(CSR_READ_4(sc, SK_WIN_BASE + SK_REG(reg)));
207 }
208 #endif
209 
210 static u_int16_t sk_win_read_2(sc, reg)
211 	struct sk_softc		*sc;
212 	int			reg;
213 {
214 	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
215 	return(CSR_READ_2(sc, SK_WIN_BASE + SK_REG(reg)));
216 }
217 
218 static u_int8_t sk_win_read_1(sc, reg)
219 	struct sk_softc		*sc;
220 	int			reg;
221 {
222 	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
223 	return(CSR_READ_1(sc, SK_WIN_BASE + SK_REG(reg)));
224 }
225 
226 static void sk_win_write_4(sc, reg, val)
227 	struct sk_softc		*sc;
228 	int			reg;
229 	u_int32_t		val;
230 {
231 	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
232 	CSR_WRITE_4(sc, SK_WIN_BASE + SK_REG(reg), val);
233 	return;
234 }
235 
236 static void sk_win_write_2(sc, reg, val)
237 	struct sk_softc		*sc;
238 	int			reg;
239 	u_int32_t		val;
240 {
241 	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
242 	CSR_WRITE_2(sc, SK_WIN_BASE + SK_REG(reg), (u_int32_t)val);
243 	return;
244 }
245 
246 static void sk_win_write_1(sc, reg, val)
247 	struct sk_softc		*sc;
248 	int			reg;
249 	u_int32_t		val;
250 {
251 	CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg));
252 	CSR_WRITE_1(sc, SK_WIN_BASE + SK_REG(reg), val);
253 	return;
254 }
255 
256 /*
257  * The VPD EEPROM contains Vital Product Data, as suggested in
258  * the PCI 2.1 specification. The VPD data is separared into areas
259  * denoted by resource IDs. The SysKonnect VPD contains an ID string
260  * resource (the name of the adapter), a read-only area resource
261  * containing various key/data fields and a read/write area which
262  * can be used to store asset management information or log messages.
263  * We read the ID string and read-only into buffers attached to
264  * the controller softc structure for later use. At the moment,
265  * we only use the ID string during sk_attach().
266  */
267 static u_int8_t sk_vpd_readbyte(sc, addr)
268 	struct sk_softc		*sc;
269 	int			addr;
270 {
271 	int			i;
272 
273 	sk_win_write_2(sc, SK_PCI_REG(SK_PCI_VPD_ADDR), addr);
274 	for (i = 0; i < SK_TIMEOUT; i++) {
275 		DELAY(1);
276 		if (sk_win_read_2(sc,
277 		    SK_PCI_REG(SK_PCI_VPD_ADDR)) & SK_VPD_FLAG)
278 			break;
279 	}
280 
281 	if (i == SK_TIMEOUT)
282 		return(0);
283 
284 	return(sk_win_read_1(sc, SK_PCI_REG(SK_PCI_VPD_DATA)));
285 }
286 
287 static void sk_vpd_read_res(sc, res, addr)
288 	struct sk_softc		*sc;
289 	struct vpd_res		*res;
290 	int			addr;
291 {
292 	int			i;
293 	u_int8_t		*ptr;
294 
295 	ptr = (u_int8_t *)res;
296 	for (i = 0; i < sizeof(struct vpd_res); i++)
297 		ptr[i] = sk_vpd_readbyte(sc, i + addr);
298 
299 	return;
300 }
301 
302 static void sk_vpd_read(sc)
303 	struct sk_softc		*sc;
304 {
305 	int			pos = 0, i;
306 	struct vpd_res		res;
307 
308 	if (sc->sk_vpd_prodname != NULL)
309 		free(sc->sk_vpd_prodname, M_DEVBUF);
310 	if (sc->sk_vpd_readonly != NULL)
311 		free(sc->sk_vpd_readonly, M_DEVBUF);
312 	sc->sk_vpd_prodname = NULL;
313 	sc->sk_vpd_readonly = NULL;
314 
315 	sk_vpd_read_res(sc, &res, pos);
316 
317 	if (res.vr_id != VPD_RES_ID) {
318 		printf("skc%d: bad VPD resource id: expected %x got %x\n",
319 		    sc->sk_unit, VPD_RES_ID, res.vr_id);
320 		return;
321 	}
322 
323 	pos += sizeof(res);
324 	sc->sk_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT);
325 	for (i = 0; i < res.vr_len; i++)
326 		sc->sk_vpd_prodname[i] = sk_vpd_readbyte(sc, i + pos);
327 	sc->sk_vpd_prodname[i] = '\0';
328 	pos += i;
329 
330 	sk_vpd_read_res(sc, &res, pos);
331 
332 	if (res.vr_id != VPD_RES_READ) {
333 		printf("skc%d: bad VPD resource id: expected %x got %x\n",
334 		    sc->sk_unit, VPD_RES_READ, res.vr_id);
335 		return;
336 	}
337 
338 	pos += sizeof(res);
339 	sc->sk_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT);
340 	for (i = 0; i < res.vr_len + 1; i++)
341 		sc->sk_vpd_readonly[i] = sk_vpd_readbyte(sc, i + pos);
342 
343 	return;
344 }
345 
346 static u_int16_t sk_phy_readreg(sc_if, reg)
347 	struct sk_if_softc	*sc_if;
348 	int			reg;
349 {
350 	int			i;
351 
352 	SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg);
353 	for (i = 0; i < SK_TIMEOUT; i++) {
354 		if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
355 			break;
356 	}
357 
358 	if (i == SK_TIMEOUT) {
359 		printf("sk%d: phy failed to come ready\n", sc_if->sk_unit);
360 		return(0);
361 	}
362 
363 	return(SK_XM_READ_2(sc_if, XM_PHY_DATA));
364 }
365 
366 static void sk_phy_writereg(sc_if, reg, val)
367 	struct sk_if_softc	*sc_if;
368 	int			reg;
369 	u_int32_t		val;
370 {
371 	int			i;
372 
373 	SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg);
374 	for (i = 0; i < SK_TIMEOUT; i++) {
375 		if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
376 			break;
377 	}
378 
379 	if (i == SK_TIMEOUT) {
380 		printf("sk%d: phy failed to come ready\n", sc_if->sk_unit);
381 		return;
382 	}
383 
384 	SK_XM_WRITE_2(sc_if, XM_PHY_DATA, val);
385 	for (i = 0; i < SK_TIMEOUT; i++) {
386 		if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
387 			break;
388 	}
389 
390 	if (i == SK_TIMEOUT)
391 		printf("sk%d: phy write timed out\n", sc_if->sk_unit);
392 
393 	return;
394 }
395 
396 #define SK_POLY		0xEDB88320
397 #define SK_BITS		6
398 
399 static u_int32_t sk_calchash(addr)
400 	caddr_t			addr;
401 {
402 	u_int32_t		idx, bit, data, crc;
403 
404 	/* Compute CRC for the address value. */
405 	crc = 0xFFFFFFFF; /* initial value */
406 
407 	for (idx = 0; idx < 6; idx++) {
408 		for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1)
409 			crc = (crc >> 1) ^ (((crc ^ data) & 1) ? SK_POLY : 0);
410 	}
411 
412 	return (~crc & ((1 << SK_BITS) - 1));
413 }
414 
415 static void sk_setfilt(sc_if, addr, slot)
416 	struct sk_if_softc	*sc_if;
417 	caddr_t			addr;
418 	int			slot;
419 {
420 	int			base;
421 
422 	base = XM_RXFILT_ENTRY(slot);
423 
424 	SK_XM_WRITE_2(sc_if, base, *(u_int16_t *)(&addr[0]));
425 	SK_XM_WRITE_2(sc_if, base + 2, *(u_int16_t *)(&addr[2]));
426 	SK_XM_WRITE_2(sc_if, base + 4, *(u_int16_t *)(&addr[4]));
427 
428 	return;
429 }
430 
431 static void sk_setmulti(sc_if)
432 	struct sk_if_softc	*sc_if;
433 {
434 	struct ifnet		*ifp;
435 	u_int32_t		hashes[2] = { 0, 0 };
436 	int			h, i;
437 	struct ifmultiaddr	*ifma;
438 	u_int8_t		dummy[] = { 0, 0, 0, 0, 0 ,0 };
439 
440 	ifp = &sc_if->arpcom.ac_if;
441 
442 	/* First, zot all the existing filters. */
443 	for (i = 1; i < XM_RXFILT_MAX; i++)
444 		sk_setfilt(sc_if, (caddr_t)&dummy, i);
445 	SK_XM_WRITE_4(sc_if, XM_MAR0, 0);
446 	SK_XM_WRITE_4(sc_if, XM_MAR2, 0);
447 
448 	/* Now program new ones. */
449 	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
450 		hashes[0] = 0xFFFFFFFF;
451 		hashes[1] = 0xFFFFFFFF;
452 	} else {
453 		i = 1;
454 		/* First find the tail of the list. */
455 		for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL;
456 					ifma = ifma->ifma_link.le_next) {
457 			if (ifma->ifma_link.le_next == NULL)
458 				break;
459 		}
460 		/* Now traverse the list backwards. */
461 		for (; ifma != NULL && ifma != (void *)&ifp->if_multiaddrs;
462 			ifma = (struct ifmultiaddr *)ifma->ifma_link.le_prev) {
463 			if (ifma->ifma_addr->sa_family != AF_LINK)
464 				continue;
465 			/*
466 			 * Program the first XM_RXFILT_MAX multicast groups
467 			 * into the perfect filter. For all others,
468 			 * use the hash table.
469 			 */
470 			if (i < XM_RXFILT_MAX) {
471 				sk_setfilt(sc_if,
472 			LLADDR((struct sockaddr_dl *)ifma->ifma_addr), i);
473 				i++;
474 				continue;
475 			}
476 
477 			h = sk_calchash(
478 				LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
479 			if (h < 32)
480 				hashes[0] |= (1 << h);
481 			else
482 				hashes[1] |= (1 << (h - 32));
483 		}
484 	}
485 
486 	SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_HASH|
487 	    XM_MODE_RX_USE_PERFECT);
488 	SK_XM_WRITE_4(sc_if, XM_MAR0, hashes[0]);
489 	SK_XM_WRITE_4(sc_if, XM_MAR2, hashes[1]);
490 
491 	return;
492 }
493 
494 static int sk_init_rx_ring(sc_if)
495 	struct sk_if_softc	*sc_if;
496 {
497 	struct sk_chain_data	*cd;
498 	struct sk_ring_data	*rd;
499 	int			i;
500 
501 	cd = &sc_if->sk_cdata;
502 	rd = sc_if->sk_rdata;
503 
504 	bzero((char *)rd->sk_rx_ring,
505 	    sizeof(struct sk_rx_desc) * SK_RX_RING_CNT);
506 
507 	for (i = 0; i < SK_RX_RING_CNT; i++) {
508 		cd->sk_rx_chain[i].sk_desc = &rd->sk_rx_ring[i];
509 		if (sk_newbuf(sc_if, &cd->sk_rx_chain[i], NULL) == ENOBUFS)
510 			return(ENOBUFS);
511 		if (i == (SK_RX_RING_CNT - 1)) {
512 			cd->sk_rx_chain[i].sk_next =
513 			    &cd->sk_rx_chain[0];
514 			rd->sk_rx_ring[i].sk_next =
515 			    vtophys(&rd->sk_rx_ring[0]);
516 		} else {
517 			cd->sk_rx_chain[i].sk_next =
518 			    &cd->sk_rx_chain[i + 1];
519 			rd->sk_rx_ring[i].sk_next =
520 			    vtophys(&rd->sk_rx_ring[i + 1]);
521 		}
522 	}
523 
524 	sc_if->sk_cdata.sk_rx_prod = 0;
525 	sc_if->sk_cdata.sk_rx_cons = 0;
526 
527 	return(0);
528 }
529 
530 static void sk_init_tx_ring(sc_if)
531 	struct sk_if_softc	*sc_if;
532 {
533 	struct sk_chain_data	*cd;
534 	struct sk_ring_data	*rd;
535 	int			i;
536 
537 	cd = &sc_if->sk_cdata;
538 	rd = sc_if->sk_rdata;
539 
540 	bzero((char *)sc_if->sk_rdata->sk_tx_ring,
541 	    sizeof(struct sk_tx_desc) * SK_TX_RING_CNT);
542 
543 	for (i = 0; i < SK_TX_RING_CNT; i++) {
544 		cd->sk_tx_chain[i].sk_desc = &rd->sk_tx_ring[i];
545 		if (i == (SK_TX_RING_CNT - 1)) {
546 			cd->sk_tx_chain[i].sk_next =
547 			    &cd->sk_tx_chain[0];
548 			rd->sk_tx_ring[i].sk_next =
549 			    vtophys(&rd->sk_tx_ring[0]);
550 		} else {
551 			cd->sk_tx_chain[i].sk_next =
552 			    &cd->sk_tx_chain[i + 1];
553 			rd->sk_tx_ring[i].sk_next =
554 			    vtophys(&rd->sk_tx_ring[i + 1]);
555 		}
556 	}
557 
558 	sc_if->sk_cdata.sk_tx_prod = 0;
559 	sc_if->sk_cdata.sk_tx_cons = 0;
560 	sc_if->sk_cdata.sk_tx_cnt = 0;
561 
562 	return;
563 }
564 
565 static int sk_newbuf(sc_if, c, m)
566 	struct sk_if_softc	*sc_if;
567 	struct sk_chain		*c;
568 	struct mbuf		*m;
569 {
570 	struct mbuf		*m_new = NULL;
571 	struct sk_rx_desc	*r;
572 
573 	if (m == NULL) {
574 		caddr_t			*buf = NULL;
575 
576 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
577 		if (m_new == NULL) {
578 			printf("sk%d: no memory for rx list -- "
579 			    "packet dropped!\n", sc_if->sk_unit);
580 			return(ENOBUFS);
581 		}
582 
583 		/* Allocate the jumbo buffer */
584 		buf = sk_jalloc(sc_if);
585 		if (buf == NULL) {
586 			m_freem(m_new);
587 #ifdef SK_VERBOSE
588 			printf("sk%d: jumbo allocation failed "
589 			    "-- packet dropped!\n", sc_if->sk_unit);
590 #endif
591 			return(ENOBUFS);
592 		}
593 
594 		/* Attach the buffer to the mbuf */
595 		m_new->m_data = m_new->m_ext.ext_buf = (void *)buf;
596 		m_new->m_flags |= M_EXT;
597 		m_new->m_ext.ext_size = m_new->m_pkthdr.len =
598 		    m_new->m_len = SK_MCLBYTES;
599 		m_new->m_ext.ext_free = sk_jfree;
600 		m_new->m_ext.ext_ref = sk_jref;
601 	} else {
602 		/*
603 	 	 * We're re-using a previously allocated mbuf;
604 		 * be sure to re-init pointers and lengths to
605 		 * default values.
606 		 */
607 		m_new = m;
608 		m_new->m_len = m_new->m_pkthdr.len = SK_MCLBYTES;
609 		m_new->m_data = m_new->m_ext.ext_buf;
610 	}
611 
612 	/*
613 	 * Adjust alignment so packet payload begins on a
614 	 * longword boundary. Mandatory for Alpha, useful on
615 	 * x86 too.
616 	 */
617 	m_adj(m_new, ETHER_ALIGN);
618 
619 	r = c->sk_desc;
620 	c->sk_mbuf = m_new;
621 	r->sk_data_lo = vtophys(mtod(m_new, caddr_t));
622 	r->sk_ctl = m_new->m_len | SK_RXSTAT;
623 
624 	return(0);
625 }
626 
627 /*
628  * Allocate jumbo buffer storage. The SysKonnect adapters support
629  * "jumbograms" (9K frames), although SysKonnect doesn't currently
630  * use them in their drivers. In order for us to use them, we need
631  * large 9K receive buffers, however standard mbuf clusters are only
632  * 2048 bytes in size. Consequently, we need to allocate and manage
633  * our own jumbo buffer pool. Fortunately, this does not require an
634  * excessive amount of additional code.
635  */
636 static int sk_alloc_jumbo_mem(sc_if)
637 	struct sk_if_softc	*sc_if;
638 {
639 	caddr_t			ptr;
640 	register int		i;
641 	struct sk_jpool_entry   *entry;
642 
643 	/* Grab a big chunk o' storage. */
644 	sc_if->sk_cdata.sk_jumbo_buf = contigmalloc(SK_JMEM, M_DEVBUF,
645 	    M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
646 
647 	if (sc_if->sk_cdata.sk_jumbo_buf == NULL) {
648 		printf("sk%d: no memory for jumbo buffers!\n", sc_if->sk_unit);
649 		return(ENOBUFS);
650 	}
651 
652 	SLIST_INIT(&sc_if->sk_jfree_listhead);
653 	SLIST_INIT(&sc_if->sk_jinuse_listhead);
654 
655 	/*
656 	 * Now divide it up into 9K pieces and save the addresses
657 	 * in an array. Note that we play an evil trick here by using
658 	 * the first few bytes in the buffer to hold the the address
659 	 * of the softc structure for this interface. This is because
660 	 * sk_jfree() needs it, but it is called by the mbuf management
661 	 * code which will not pass it to us explicitly.
662 	 */
663 	ptr = sc_if->sk_cdata.sk_jumbo_buf;
664 	for (i = 0; i < SK_JSLOTS; i++) {
665 		u_int64_t		**aptr;
666 		aptr = (u_int64_t **)ptr;
667 		aptr[0] = (u_int64_t *)sc_if;
668 		ptr += sizeof(u_int64_t);
669 		sc_if->sk_cdata.sk_jslots[i].sk_buf = ptr;
670 		sc_if->sk_cdata.sk_jslots[i].sk_inuse = 0;
671 		ptr += SK_MCLBYTES;
672 		entry = malloc(sizeof(struct sk_jpool_entry),
673 		    M_DEVBUF, M_NOWAIT);
674 		if (entry == NULL) {
675 			free(sc_if->sk_cdata.sk_jumbo_buf, M_DEVBUF);
676 			sc_if->sk_cdata.sk_jumbo_buf = NULL;
677 			printf("sk%d: no memory for jumbo "
678 			    "buffer queue!\n", sc_if->sk_unit);
679 			return(ENOBUFS);
680 		}
681 		entry->slot = i;
682 		SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead,
683 		    entry, jpool_entries);
684 	}
685 
686 	return(0);
687 }
688 
689 /*
690  * Allocate a jumbo buffer.
691  */
692 static void *sk_jalloc(sc_if)
693 	struct sk_if_softc	*sc_if;
694 {
695 	struct sk_jpool_entry   *entry;
696 
697 	entry = SLIST_FIRST(&sc_if->sk_jfree_listhead);
698 
699 	if (entry == NULL) {
700 #ifdef SK_VERBOSE
701 		printf("sk%d: no free jumbo buffers\n", sc_if->sk_unit);
702 #endif
703 		return(NULL);
704 	}
705 
706 	SLIST_REMOVE_HEAD(&sc_if->sk_jfree_listhead, jpool_entries);
707 	SLIST_INSERT_HEAD(&sc_if->sk_jinuse_listhead, entry, jpool_entries);
708 	sc_if->sk_cdata.sk_jslots[entry->slot].sk_inuse = 1;
709 	return(sc_if->sk_cdata.sk_jslots[entry->slot].sk_buf);
710 }
711 
712 /*
713  * Adjust usage count on a jumbo buffer. In general this doesn't
714  * get used much because our jumbo buffers don't get passed around
715  * a lot, but it's implemented for correctness.
716  */
717 static void sk_jref(buf, size)
718 	caddr_t			buf;
719 	u_int			size;
720 {
721 	struct sk_if_softc	*sc_if;
722 	u_int64_t		**aptr;
723 	register int		i;
724 
725 	/* Extract the softc struct pointer. */
726 	aptr = (u_int64_t **)(buf - sizeof(u_int64_t));
727 	sc_if = (struct sk_if_softc *)(aptr[0]);
728 
729 	if (sc_if == NULL)
730 		panic("sk_jref: can't find softc pointer!");
731 
732 	if (size != SK_MCLBYTES)
733 		panic("sk_jref: adjusting refcount of buf of wrong size!");
734 
735 	/* calculate the slot this buffer belongs to */
736 
737 	i = ((vm_offset_t)aptr
738 	     - (vm_offset_t)sc_if->sk_cdata.sk_jumbo_buf) / SK_JLEN;
739 
740 	if ((i < 0) || (i >= SK_JSLOTS))
741 		panic("sk_jref: asked to reference buffer "
742 		    "that we don't manage!");
743 	else if (sc_if->sk_cdata.sk_jslots[i].sk_inuse == 0)
744 		panic("sk_jref: buffer already free!");
745 	else
746 		sc_if->sk_cdata.sk_jslots[i].sk_inuse++;
747 
748 	return;
749 }
750 
751 /*
752  * Release a jumbo buffer.
753  */
754 static void sk_jfree(buf, size)
755 	caddr_t			buf;
756 	u_int			size;
757 {
758 	struct sk_if_softc	*sc_if;
759 	u_int64_t		**aptr;
760 	int		        i;
761 	struct sk_jpool_entry   *entry;
762 
763 	/* Extract the softc struct pointer. */
764 	aptr = (u_int64_t **)(buf - sizeof(u_int64_t));
765 	sc_if = (struct sk_if_softc *)(aptr[0]);
766 
767 	if (sc_if == NULL)
768 		panic("sk_jfree: can't find softc pointer!");
769 
770 	if (size != SK_MCLBYTES)
771 		panic("sk_jfree: freeing buffer of wrong size!");
772 
773 	/* calculate the slot this buffer belongs to */
774 
775 	i = ((vm_offset_t)aptr
776 	     - (vm_offset_t)sc_if->sk_cdata.sk_jumbo_buf) / SK_JLEN;
777 
778 	if ((i < 0) || (i >= SK_JSLOTS))
779 		panic("sk_jfree: asked to free buffer that we don't manage!");
780 	else if (sc_if->sk_cdata.sk_jslots[i].sk_inuse == 0)
781 		panic("sk_jfree: buffer already free!");
782 	else {
783 		sc_if->sk_cdata.sk_jslots[i].sk_inuse--;
784 		if(sc_if->sk_cdata.sk_jslots[i].sk_inuse == 0) {
785 			entry = SLIST_FIRST(&sc_if->sk_jinuse_listhead);
786 			if (entry == NULL)
787 				panic("sk_jfree: buffer not in use!");
788 			entry->slot = i;
789 			SLIST_REMOVE_HEAD(&sc_if->sk_jinuse_listhead,
790 					  jpool_entries);
791 			SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead,
792 					  entry, jpool_entries);
793 		}
794 	}
795 
796 	return;
797 }
798 
799 /*
800  * Set media options.
801  */
802 static int sk_ifmedia_upd(ifp)
803 	struct ifnet		*ifp;
804 {
805 	struct sk_if_softc	*sc_if;
806 	struct ifmedia		*ifm;
807 
808 	sc_if = ifp->if_softc;
809 	ifm = &sc_if->ifmedia;
810 
811 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
812 		return(EINVAL);
813 
814 	switch(IFM_SUBTYPE(ifm->ifm_media)) {
815 	case IFM_AUTO:
816 		sk_phy_writereg(sc_if, XM_PHY_BMCR,
817 		    XM_BMCR_RENEGOTIATE|XM_BMCR_AUTONEGENBL);
818 		break;
819 	case IFM_1000_LX:
820 	case IFM_1000_SX:
821 	case IFM_1000_CX:
822 	case IFM_1000_TX:
823 		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
824 			sk_phy_writereg(sc_if, XM_PHY_BMCR, XM_BMCR_DUPLEX);
825 		else
826 			sk_phy_writereg(sc_if, XM_PHY_BMCR, 0);
827 		break;
828 	default:
829 		printf("sk%d: invalid media selected\n", sc_if->sk_unit);
830 		return(EINVAL);
831 		break;
832 	}
833 
834 	return(0);
835 }
836 
837 /*
838  * Report current media status.
839  */
840 static void sk_ifmedia_sts(ifp, ifmr)
841 	struct ifnet		*ifp;
842 	struct ifmediareq	*ifmr;
843 {
844 	struct sk_softc		*sc;
845 	struct sk_if_softc	*sc_if;
846 	u_int16_t		bmsr, extsts;
847 
848 	sc_if = ifp->if_softc;
849 	sc = sc_if->sk_softc;
850 
851 	ifmr->ifm_status = IFM_AVALID;
852 	ifmr->ifm_active = IFM_ETHER;
853 
854 	bmsr = sk_phy_readreg(sc_if, XM_PHY_BMSR);
855 	extsts = sk_phy_readreg(sc_if, XM_PHY_EXTSTS);
856 
857 	if (!(bmsr & XM_BMSR_LINKSTAT))
858 		return;
859 
860 	ifmr->ifm_status |= IFM_ACTIVE;
861 	ifmr->ifm_active |= sc->sk_pmd;;
862 	if (extsts & XM_EXTSTS_FULLDUPLEX)
863 		ifmr->ifm_active |= IFM_FDX;
864 	else
865 		ifmr->ifm_active |= IFM_HDX;
866 
867 	return;
868 }
869 
870 static int sk_ioctl(ifp, command, data)
871 	struct ifnet		*ifp;
872 	u_long			command;
873 	caddr_t			data;
874 {
875 	struct sk_if_softc	*sc_if = ifp->if_softc;
876 	struct ifreq		*ifr = (struct ifreq *) data;
877 	int			s, error = 0;
878 
879 	s = splimp();
880 
881 	switch(command) {
882 	case SIOCSIFADDR:
883 	case SIOCGIFADDR:
884 		error = ether_ioctl(ifp, command, data);
885 		break;
886 	case SIOCSIFMTU:
887 		if (ifr->ifr_mtu > SK_JUMBO_MTU)
888 			error = EINVAL;
889 		else {
890 			ifp->if_mtu = ifr->ifr_mtu;
891 			sk_init(sc_if);
892 		}
893 		break;
894 	case SIOCSIFFLAGS:
895 		if (ifp->if_flags & IFF_UP) {
896 			if (ifp->if_flags & IFF_RUNNING &&
897 			    ifp->if_flags & IFF_PROMISC &&
898 			    !(sc_if->sk_if_flags & IFF_PROMISC)) {
899 				SK_XM_SETBIT_4(sc_if, XM_MODE,
900 				    XM_MODE_RX_PROMISC);
901 				sk_setmulti(sc_if);
902 			} else if (ifp->if_flags & IFF_RUNNING &&
903 			    !(ifp->if_flags & IFF_PROMISC) &&
904 			    sc_if->sk_if_flags & IFF_PROMISC) {
905 				SK_XM_CLRBIT_4(sc_if, XM_MODE,
906 				    XM_MODE_RX_PROMISC);
907 				sk_setmulti(sc_if);
908 			} else
909 				sk_init(sc_if);
910 		} else {
911 			if (ifp->if_flags & IFF_RUNNING)
912 				sk_stop(sc_if);
913 		}
914 		sc_if->sk_if_flags = ifp->if_flags;
915 		error = 0;
916 		break;
917 	case SIOCADDMULTI:
918 	case SIOCDELMULTI:
919 		sk_setmulti(sc_if);
920 		error = 0;
921 		break;
922 	case SIOCGIFMEDIA:
923 	case SIOCSIFMEDIA:
924 		error = ifmedia_ioctl(ifp, ifr, &sc_if->ifmedia, command);
925 		break;
926 	default:
927 		error = EINVAL;
928 		break;
929 	}
930 
931 	(void)splx(s);
932 
933 	return(error);
934 }
935 
936 /*
937  * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device
938  * IDs against our list and return a device name if we find a match.
939  */
940 static int sk_probe(dev)
941 	device_t		dev;
942 {
943 	struct sk_type		*t;
944 
945 	t = sk_devs;
946 
947 	while(t->sk_name != NULL) {
948 		if ((pci_get_vendor(dev) == t->sk_vid) &&
949 		    (pci_get_device(dev) == t->sk_did)) {
950 			device_set_desc(dev, t->sk_name);
951 			return(0);
952 		}
953 		t++;
954 	}
955 
956 	return(ENXIO);
957 }
958 
959 /*
960  * Force the GEnesis into reset, then bring it out of reset.
961  */
962 static void sk_reset(sc)
963 	struct sk_softc		*sc;
964 {
965 	CSR_WRITE_4(sc, SK_CSR, SK_CSR_SW_RESET);
966 	CSR_WRITE_4(sc, SK_CSR, SK_CSR_MASTER_RESET);
967 	DELAY(1000);
968 	CSR_WRITE_4(sc, SK_CSR, SK_CSR_SW_UNRESET);
969 	CSR_WRITE_4(sc, SK_CSR, SK_CSR_MASTER_UNRESET);
970 
971 	/* Configure packet arbiter */
972 	sk_win_write_2(sc, SK_PKTARB_CTL, SK_PKTARBCTL_UNRESET);
973 	sk_win_write_2(sc, SK_RXPA1_TINIT, SK_PKTARB_TIMEOUT);
974 	sk_win_write_2(sc, SK_TXPA1_TINIT, SK_PKTARB_TIMEOUT);
975 	sk_win_write_2(sc, SK_RXPA2_TINIT, SK_PKTARB_TIMEOUT);
976 	sk_win_write_2(sc, SK_TXPA2_TINIT, SK_PKTARB_TIMEOUT);
977 
978 	/* Enable RAM interface */
979 	sk_win_write_4(sc, SK_RAMCTL, SK_RAMCTL_UNRESET);
980 
981 	/*
982          * Configure interrupt moderation. The moderation timer
983 	 * defers interrupts specified in the interrupt moderation
984 	 * timer mask based on the timeout specified in the interrupt
985 	 * moderation timer init register. Each bit in the timer
986 	 * register represents 18.825ns, so to specify a timeout in
987 	 * microseconds, we have to multiply by 54.
988 	 */
989         sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(200));
990         sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF|SK_ISR_TX2_S_EOF|
991 	    SK_ISR_RX1_EOF|SK_ISR_RX2_EOF);
992         sk_win_write_1(sc, SK_IMTIMERCTL, SK_IMCTL_START);
993 
994 	return;
995 }
996 
997 /*
998  * Each XMAC chip is attached as a separate logical IP interface.
999  * Single port cards will have only one logical interface of course.
1000  */
1001 static int sk_attach_xmac(sc, port)
1002 	struct sk_softc		*sc;
1003 	int			port;
1004 {
1005 	struct sk_if_softc	*sc_if;
1006 	struct ifnet		*ifp;
1007 	int			i;
1008 	char			ifname[64];
1009 
1010 	if (sc == NULL)
1011 		return(EINVAL);
1012 
1013 	if (port != SK_PORT_A && port != SK_PORT_B)
1014 		return(EINVAL);
1015 
1016 	sc_if = malloc(sizeof(struct sk_if_softc), M_DEVBUF, M_NOWAIT);
1017 	if (sc_if == NULL) {
1018 		printf("skc%d: no memory for interface softc!\n", sc->sk_unit);
1019 		return(ENOMEM);
1020 	}
1021 	bzero((char *)sc_if, sizeof(struct sk_if_softc));
1022 
1023 	for (i = 0; i < SK_MAXUNIT; i++) {
1024 		sprintf(ifname, "sk%d", i);
1025 		if (ifunit(ifname) == NULL)
1026 			break;
1027 	}
1028 
1029 	if (i == SK_MAXUNIT) {
1030 		printf("skc%d: too many sk units\n", sc->sk_unit);
1031 		free(sc_if, M_DEVBUF);
1032 		return(ENODEV);
1033 	}
1034 
1035 	sc_if->sk_unit = i;
1036 	sc_if->sk_port = port;
1037 	sc_if->sk_softc = sc;
1038 	sc->sk_if[port] = sc_if;
1039 	if (port == SK_PORT_A)
1040 		sc_if->sk_tx_bmu = SK_BMU_TXS_CSR0;
1041 	if (port == SK_PORT_B)
1042 		sc_if->sk_tx_bmu = SK_BMU_TXS_CSR1;
1043 
1044 	/*
1045 	 * Get station address for this interface. Note that
1046 	 * dual port cards actually come with three station
1047 	 * addresses: one for each port, plus an extra. The
1048 	 * extra one is used by the SysKonnect driver software
1049 	 * as a 'virtual' station address for when both ports
1050 	 * are operating in failover mode. Currently we don't
1051 	 * use this extra address.
1052 	 */
1053 	for (i = 0; i < ETHER_ADDR_LEN; i++)
1054 		sc_if->arpcom.ac_enaddr[i] =
1055 		    sk_win_read_1(sc, SK_MAC0_0 + (port * 8) + i);
1056 
1057 	printf("sk%d: <XaQti Corp. XMAC II> at skc%d port %d\n",
1058 	    sc_if->sk_unit, sc->sk_unit, port);
1059 
1060 	printf("sk%d: Ethernet address: %6D\n",
1061 	    sc_if->sk_unit, sc_if->arpcom.ac_enaddr, ":");
1062 
1063 	/*
1064 	 * Set up RAM buffer addresses. The NIC will have a certain
1065 	 * amount of SRAM on it, somewhere between 512K and 2MB. We
1066 	 * need to divide this up a) between the transmitter and
1067  	 * receiver and b) between the two XMACs, if this is a
1068 	 * dual port NIC. Our algotithm is to divide up the memory
1069 	 * evenly so that everyone gets a fair share.
1070 	 */
1071 	if (sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC) {
1072 		u_int32_t		chunk, val;
1073 
1074 		chunk = sc->sk_ramsize / 2;
1075 		val = sc->sk_rboff / sizeof(u_int64_t);
1076 		sc_if->sk_rx_ramstart = val;
1077 		val += (chunk / sizeof(u_int64_t));
1078 		sc_if->sk_rx_ramend = val - 1;
1079 		sc_if->sk_tx_ramstart = val;
1080 		val += (chunk / sizeof(u_int64_t));
1081 		sc_if->sk_tx_ramend = val - 1;
1082 	} else {
1083 		u_int32_t		chunk, val;
1084 
1085 		chunk = sc->sk_ramsize / 4;
1086 		val = (sc->sk_rboff + (chunk * 2 * sc_if->sk_port)) /
1087 		    sizeof(u_int64_t);
1088 		sc_if->sk_rx_ramstart = val;
1089 		val += (chunk / sizeof(u_int64_t));
1090 		sc_if->sk_rx_ramend = val - 1;
1091 		sc_if->sk_tx_ramstart = val;
1092 		val += (chunk / sizeof(u_int64_t));
1093 		sc_if->sk_tx_ramend = val - 1;
1094 	}
1095 
1096 	/* Allocate the descriptor queues. */
1097 	sc_if->sk_rdata = contigmalloc(sizeof(struct sk_ring_data), M_DEVBUF,
1098 	    M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
1099 
1100 	if (sc_if->sk_rdata == NULL) {
1101 		printf("sk%d: no memory for list buffers!\n", sc_if->sk_unit);
1102 		free(sc_if, M_DEVBUF);
1103 		sc->sk_if[port] = NULL;
1104 		return(ENOMEM);
1105 	}
1106 
1107 	bzero(sc_if->sk_rdata, sizeof(struct sk_ring_data));
1108 
1109 	/* Try to allocate memory for jumbo buffers. */
1110 	if (sk_alloc_jumbo_mem(sc_if)) {
1111 		printf("sk%d: jumbo buffer allocation failed\n",
1112 		    sc_if->sk_unit);
1113 		free(sc_if->sk_rdata, M_DEVBUF);
1114 		free(sc_if, M_DEVBUF);
1115 		sc->sk_if[port] = NULL;
1116 		return(ENOMEM);
1117 	}
1118 
1119 	ifp = &sc_if->arpcom.ac_if;
1120 	ifp->if_softc = sc_if;
1121 	ifp->if_unit = sc_if->sk_unit;
1122 	ifp->if_name = "sk";
1123 	ifp->if_mtu = ETHERMTU;
1124 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1125 	ifp->if_ioctl = sk_ioctl;
1126 	ifp->if_output = ether_output;
1127 	ifp->if_start = sk_start;
1128 	ifp->if_watchdog = sk_watchdog;
1129 	ifp->if_init = sk_init;
1130 	ifp->if_baudrate = 1000000000;
1131 	ifp->if_snd.ifq_maxlen = SK_TX_RING_CNT - 1;
1132 
1133 	/*
1134 	 * Do ifmedia setup.
1135 	 */
1136 	ifmedia_init(&sc_if->ifmedia, 0, sk_ifmedia_upd, sk_ifmedia_sts);
1137 	ifmedia_add(&sc_if->ifmedia, IFM_ETHER|sc->sk_pmd, 0, NULL);
1138 	ifmedia_add(&sc_if->ifmedia, IFM_ETHER|sc->sk_pmd|IFM_FDX, 0, NULL);
1139 	ifmedia_add(&sc_if->ifmedia, IFM_ETHER|sc->sk_pmd|IFM_HDX, 0, NULL);
1140 	ifmedia_add(&sc_if->ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
1141 	ifmedia_set(&sc_if->ifmedia, IFM_ETHER|IFM_AUTO);
1142 
1143 	/*
1144 	 * Call MI attach routines.
1145 	 */
1146 	if_attach(ifp);
1147 	ether_ifattach(ifp);
1148 
1149 	bpfattach(ifp, DLT_EN10MB, sizeof(struct ether_header));
1150 
1151 	return(0);
1152 }
1153 
1154 /*
1155  * Attach the interface. Allocate softc structures, do ifmedia
1156  * setup and ethernet/BPF attach.
1157  */
1158 static int sk_attach(dev)
1159 	device_t		dev;
1160 {
1161 	int			s;
1162 	u_int32_t		command;
1163 	struct sk_softc		*sc;
1164 	int			unit, error = 0, rid;
1165 
1166 	s = splimp();
1167 
1168 	sc = device_get_softc(dev);
1169 	unit = device_get_unit(dev);
1170 	bzero(sc, sizeof(struct sk_softc));
1171 
1172 	/*
1173 	 * Handle power management nonsense.
1174 	 */
1175 	command = pci_read_config(dev, SK_PCI_CAPID, 4) & 0x000000FF;
1176 	if (command == 0x01) {
1177 
1178 		command = pci_read_config(dev, SK_PCI_PWRMGMTCTRL, 4);
1179 		if (command & SK_PSTATE_MASK) {
1180 			u_int32_t		iobase, membase, irq;
1181 
1182 			/* Save important PCI config data. */
1183 			iobase = pci_read_config(dev, SK_PCI_LOIO, 4);
1184 			membase = pci_read_config(dev, SK_PCI_LOMEM, 4);
1185 			irq = pci_read_config(dev, SK_PCI_INTLINE, 4);
1186 
1187 			/* Reset the power state. */
1188 			printf("skc%d: chip is in D%d power mode "
1189 			"-- setting to D0\n", unit, command & SK_PSTATE_MASK);
1190 			command &= 0xFFFFFFFC;
1191 			pci_write_config(dev, SK_PCI_PWRMGMTCTRL, command, 4);
1192 
1193 			/* Restore PCI config data. */
1194 			pci_write_config(dev, SK_PCI_LOIO, iobase, 4);
1195 			pci_write_config(dev, SK_PCI_LOMEM, membase, 4);
1196 			pci_write_config(dev, SK_PCI_INTLINE, irq, 4);
1197 		}
1198 	}
1199 
1200 	/*
1201 	 * Map control/status registers.
1202 	 */
1203 	command = pci_read_config(dev, PCI_COMMAND_STATUS_REG, 4);
1204 	command |= (PCIM_CMD_PORTEN|PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN);
1205 	pci_write_config(dev, PCI_COMMAND_STATUS_REG, command, 4);
1206 	command = pci_read_config(dev, PCI_COMMAND_STATUS_REG, 4);
1207 
1208 #ifdef SK_USEIOSPACE
1209 	if (!(command & PCIM_CMD_PORTEN)) {
1210 		printf("skc%d: failed to enable I/O ports!\n", unit);
1211 		error = ENXIO;
1212 		goto fail;
1213 	}
1214 #else
1215 	if (!(command & PCIM_CMD_MEMEN)) {
1216 		printf("skc%d: failed to enable memory mapping!\n", unit);
1217 		error = ENXIO;
1218 		goto fail;
1219 	}
1220 #endif
1221 
1222 	rid = SK_RID;
1223 	sc->sk_res = bus_alloc_resource(dev, SK_RES, &rid,
1224 	    0, ~0, 1, RF_ACTIVE);
1225 
1226 	if (sc->sk_res == NULL) {
1227 		printf("sk%d: couldn't map ports/memory\n", unit);
1228 		error = ENXIO;
1229 		goto fail;
1230 	}
1231 
1232 	sc->sk_btag = rman_get_bustag(sc->sk_res);
1233 	sc->sk_bhandle = rman_get_bushandle(sc->sk_res);
1234 
1235 	/* Allocate interrupt */
1236 	rid = 0;
1237 	sc->sk_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
1238 	    RF_SHAREABLE | RF_ACTIVE);
1239 
1240 	if (sc->sk_irq == NULL) {
1241 		printf("skc%d: couldn't map interrupt\n", unit);
1242 		bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res);
1243 		error = ENXIO;
1244 		goto fail;
1245 	}
1246 
1247 	error = bus_setup_intr(dev, sc->sk_irq, INTR_TYPE_NET,
1248 	    sk_intr, sc, &sc->sk_intrhand);
1249 
1250 	if (error) {
1251 		printf("skc%d: couldn't set up irq\n", unit);
1252 		bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res);
1253 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sk_res);
1254 		goto fail;
1255 	}
1256 
1257 	/* Reset the adapter. */
1258 	sk_reset(sc);
1259 
1260 	sc->sk_unit = unit;
1261 
1262 	/* Read and save vital product data from EEPROM. */
1263 	sk_vpd_read(sc);
1264 
1265 	/* Read and save RAM size and RAMbuffer offset */
1266 	switch(sk_win_read_1(sc, SK_EPROM0)) {
1267 	case SK_RAMSIZE_512K_64:
1268 		sc->sk_ramsize = 0x80000;
1269 		sc->sk_rboff = SK_RBOFF_0;
1270 		break;
1271 	case SK_RAMSIZE_1024K_64:
1272 		sc->sk_ramsize = 0x100000;
1273 		sc->sk_rboff = SK_RBOFF_80000;
1274 		break;
1275 	case SK_RAMSIZE_1024K_128:
1276 		sc->sk_ramsize = 0x100000;
1277 		sc->sk_rboff = SK_RBOFF_0;
1278 		break;
1279 	case SK_RAMSIZE_2048K_128:
1280 		sc->sk_ramsize = 0x200000;
1281 		sc->sk_rboff = SK_RBOFF_0;
1282 		break;
1283 	default:
1284 		printf("skc%d: unknown ram size: %d\n",
1285 		    sc->sk_unit, sk_win_read_1(sc, SK_EPROM0));
1286 		bus_teardown_intr(dev, sc->sk_irq, sc->sk_intrhand);
1287 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sk_irq);
1288 		bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res);
1289 		error = ENXIO;
1290 		goto fail;
1291 		break;
1292 	}
1293 
1294 	/* Read and save physical media type */
1295 	switch(sk_win_read_1(sc, SK_PMDTYPE)) {
1296 	case SK_PMD_1000BASESX:
1297 		sc->sk_pmd = IFM_1000_SX;
1298 		break;
1299 	case SK_PMD_1000BASELX:
1300 		sc->sk_pmd = IFM_1000_LX;
1301 		break;
1302 	case SK_PMD_1000BASECX:
1303 		sc->sk_pmd = IFM_1000_CX;
1304 		break;
1305 	case SK_PMD_1000BASETX:
1306 		sc->sk_pmd = IFM_1000_TX;
1307 		break;
1308 	default:
1309 		printf("skc%d: unknown media type: 0x%x\n",
1310 		    sc->sk_unit, sk_win_read_1(sc, SK_PMDTYPE));
1311 		bus_teardown_intr(dev, sc->sk_irq, sc->sk_intrhand);
1312 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sk_irq);
1313 		bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res);
1314 		error = ENXIO;
1315 		goto fail;
1316 	}
1317 
1318 	/* Announce the product name. */
1319 	printf("skc%d: %s\n", sc->sk_unit, sc->sk_vpd_prodname);
1320 
1321 	sk_attach_xmac(sc, SK_PORT_A);
1322 	if (!(sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC))
1323 		sk_attach_xmac(sc, SK_PORT_B);
1324 
1325 	/* Turn on the 'driver is loaded' LED. */
1326 	CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON);
1327 
1328 fail:
1329 	splx(s);
1330 	return(error);
1331 }
1332 
1333 static int sk_detach(dev)
1334 	device_t		dev;
1335 {
1336 	struct sk_softc		*sc;
1337 	struct sk_if_softc	*sc_if0 = NULL, *sc_if1 = NULL;
1338 	struct ifnet		*ifp0 = NULL, *ifp1 = NULL;
1339 	int			s;
1340 
1341 	s = splimp();
1342 
1343 	sc = device_get_softc(dev);
1344 	sc_if0 = sc->sk_if[SK_PORT_A];
1345 	ifp0 = &sc_if0->arpcom.ac_if;
1346 	sk_stop(sc_if0);
1347 	if_detach(ifp0);
1348 	contigfree(sc_if0->sk_cdata.sk_jumbo_buf, SK_JMEM, M_DEVBUF);
1349 	ifmedia_removeall(&sc_if0->ifmedia);
1350 	free(sc->sk_if[SK_PORT_A], M_DEVBUF);
1351 	if (sc->sk_if[SK_PORT_B] != NULL) {
1352 		sc_if1 = sc->sk_if[SK_PORT_B];
1353 		ifp1 = &sc_if1->arpcom.ac_if;
1354 		sk_stop(sc_if1);
1355 		if_detach(ifp1);
1356 		contigfree(sc_if1->sk_cdata.sk_jumbo_buf, SK_JMEM, M_DEVBUF);
1357 		ifmedia_removeall(&sc_if1->ifmedia);
1358 		free(sc->sk_if[SK_PORT_B], M_DEVBUF);
1359 	}
1360 
1361 	bus_teardown_intr(dev, sc->sk_irq, sc->sk_intrhand);
1362 	bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sk_irq);
1363 	bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res);
1364 
1365 	splx(s);
1366 
1367 	return(0);
1368 }
1369 
1370 static int sk_encap(sc_if, m_head, txidx)
1371         struct sk_if_softc	*sc_if;
1372         struct mbuf		*m_head;
1373         u_int32_t		*txidx;
1374 {
1375 	struct sk_tx_desc	*f = NULL;
1376 	struct mbuf		*m;
1377 	u_int32_t		frag, cur, cnt = 0;
1378 
1379 	m = m_head;
1380 	cur = frag = *txidx;
1381 
1382 	/*
1383 	 * Start packing the mbufs in this chain into
1384 	 * the fragment pointers. Stop when we run out
1385 	 * of fragments or hit the end of the mbuf chain.
1386 	 */
1387 	for (m = m_head; m != NULL; m = m->m_next) {
1388 		if (m->m_len != 0) {
1389 			if ((SK_TX_RING_CNT -
1390 			    (sc_if->sk_cdata.sk_tx_cnt + cnt)) < 2)
1391 				return(ENOBUFS);
1392 			f = &sc_if->sk_rdata->sk_tx_ring[frag];
1393 			f->sk_data_lo = vtophys(mtod(m, vm_offset_t));
1394 			f->sk_ctl = m->m_len | SK_OPCODE_DEFAULT;
1395 			if (cnt == 0)
1396 				f->sk_ctl |= SK_TXCTL_FIRSTFRAG;
1397 			else
1398 				f->sk_ctl |= SK_TXCTL_OWN;
1399 			cur = frag;
1400 			SK_INC(frag, SK_TX_RING_CNT);
1401 			cnt++;
1402 		}
1403 	}
1404 
1405 	if (m != NULL)
1406 		return(ENOBUFS);
1407 
1408 	sc_if->sk_rdata->sk_tx_ring[cur].sk_ctl |=
1409 		SK_TXCTL_LASTFRAG|SK_TXCTL_EOF_INTR;
1410 	sc_if->sk_cdata.sk_tx_chain[cur].sk_mbuf = m_head;
1411 	sc_if->sk_rdata->sk_tx_ring[*txidx].sk_ctl |= SK_TXCTL_OWN;
1412 	sc_if->sk_cdata.sk_tx_cnt += cnt;
1413 
1414 	*txidx = frag;
1415 
1416 	return(0);
1417 }
1418 
1419 static void sk_start(ifp)
1420 	struct ifnet		*ifp;
1421 {
1422         struct sk_softc		*sc;
1423         struct sk_if_softc	*sc_if;
1424         struct mbuf		*m_head = NULL;
1425         u_int32_t		idx;
1426 
1427 	sc_if = ifp->if_softc;
1428 	sc = sc_if->sk_softc;
1429 
1430 	idx = sc_if->sk_cdata.sk_tx_prod;
1431 
1432 	while(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf == NULL) {
1433 		IF_DEQUEUE(&ifp->if_snd, m_head);
1434 		if (m_head == NULL)
1435 			break;
1436 
1437 		/*
1438 		 * Pack the data into the transmit ring. If we
1439 		 * don't have room, set the OACTIVE flag and wait
1440 		 * for the NIC to drain the ring.
1441 		 */
1442 		if (sk_encap(sc_if, m_head, &idx)) {
1443 			IF_PREPEND(&ifp->if_snd, m_head);
1444 			ifp->if_flags |= IFF_OACTIVE;
1445 			break;
1446 		}
1447 
1448 		/*
1449 		 * If there's a BPF listener, bounce a copy of this frame
1450 		 * to him.
1451 		 */
1452 		if (ifp->if_bpf)
1453 			bpf_mtap(ifp, m_head);
1454 	}
1455 
1456 	/* Transmit */
1457 	sc_if->sk_cdata.sk_tx_prod = idx;
1458 	CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
1459 
1460 	/* Set a timeout in case the chip goes out to lunch. */
1461 	ifp->if_timer = 5;
1462 
1463 	return;
1464 }
1465 
1466 
1467 static void sk_watchdog(ifp)
1468 	struct ifnet		*ifp;
1469 {
1470 	struct sk_if_softc	*sc_if;
1471 
1472 	sc_if = ifp->if_softc;
1473 
1474 	printf("sk%d: watchdog timeout\n", sc_if->sk_unit);
1475 	sk_init(sc_if);
1476 
1477 	return;
1478 }
1479 
1480 static void sk_shutdown(dev)
1481 	device_t		dev;
1482 {
1483 	struct sk_softc		*sc;
1484 
1485 	sc = device_get_softc(dev);
1486 
1487 	/* Turn off the 'driver is loaded' LED. */
1488 	CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF);
1489 
1490 	/*
1491 	 * Reset the GEnesis controller. Doing this should also
1492 	 * assert the resets on the attached XMAC(s).
1493 	 */
1494 	sk_reset(sc);
1495 
1496 	return;
1497 }
1498 
1499 static void sk_rxeof(sc_if)
1500 	struct sk_if_softc	*sc_if;
1501 {
1502 	struct ether_header	*eh;
1503 	struct mbuf		*m;
1504 	struct ifnet		*ifp;
1505 	struct sk_chain		*cur_rx;
1506 	int			total_len = 0;
1507 	int			i;
1508 	u_int32_t		rxstat;
1509 
1510 	ifp = &sc_if->arpcom.ac_if;
1511 	i = sc_if->sk_cdata.sk_rx_prod;
1512 	cur_rx = &sc_if->sk_cdata.sk_rx_chain[i];
1513 
1514 	while(!(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl & SK_RXCTL_OWN)) {
1515 
1516 		cur_rx = &sc_if->sk_cdata.sk_rx_chain[i];
1517 		rxstat = sc_if->sk_rdata->sk_rx_ring[i].sk_xmac_rxstat;
1518 		m = cur_rx->sk_mbuf;
1519 		cur_rx->sk_mbuf = NULL;
1520 		total_len = SK_RXBYTES(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl);
1521 		SK_INC(i, SK_RX_RING_CNT);
1522 
1523 		if (rxstat & XM_RXSTAT_ERRFRAME) {
1524 			ifp->if_ierrors++;
1525 			sk_newbuf(sc_if, cur_rx, m);
1526 			continue;
1527 		}
1528 
1529 		/*
1530 		 * Try to allocate a new jumbo buffer. If that
1531 		 * fails, copy the packet to mbufs and put the
1532 		 * jumbo buffer back in the ring so it can be
1533 		 * re-used. If allocating mbufs fails, then we
1534 		 * have to drop the packet.
1535 		 */
1536 		if (sk_newbuf(sc_if, cur_rx, NULL) == ENOBUFS) {
1537 			struct mbuf		*m0;
1538 			m0 = m_devget(mtod(m, char *) - ETHER_ALIGN,
1539 			    total_len + ETHER_ALIGN, 0, ifp, NULL);
1540 			sk_newbuf(sc_if, cur_rx, m);
1541 			if (m0 == NULL) {
1542 				printf("sk%d: no receive buffers "
1543 				    "available -- packet dropped!\n",
1544 				    sc_if->sk_unit);
1545 				ifp->if_ierrors++;
1546 				continue;
1547 			}
1548 			m_adj(m0, ETHER_ALIGN);
1549 			m = m0;
1550 		} else {
1551 			m->m_pkthdr.rcvif = ifp;
1552 			m->m_pkthdr.len = m->m_len = total_len;
1553 		}
1554 
1555 		ifp->if_ipackets++;
1556 		eh = mtod(m, struct ether_header *);
1557 
1558 		if (ifp->if_bpf) {
1559 			bpf_mtap(ifp, m);
1560 			if (ifp->if_flags & IFF_PROMISC &&
1561 			    (bcmp(eh->ether_dhost, sc_if->arpcom.ac_enaddr,
1562 			    ETHER_ADDR_LEN) && !(eh->ether_dhost[0] & 1))) {
1563 				m_freem(m);
1564 				continue;
1565 			}
1566 		}
1567 
1568 		/* Remove header from mbuf and pass it on. */
1569 		m_adj(m, sizeof(struct ether_header));
1570 		ether_input(ifp, eh, m);
1571 	}
1572 
1573 	sc_if->sk_cdata.sk_rx_prod = i;
1574 
1575 	return;
1576 }
1577 
1578 static void sk_txeof(sc_if)
1579 	struct sk_if_softc	*sc_if;
1580 {
1581 	struct sk_tx_desc	*cur_tx = NULL;
1582 	struct ifnet		*ifp;
1583 	u_int32_t		idx;
1584 
1585 	ifp = &sc_if->arpcom.ac_if;
1586 
1587 	/*
1588 	 * Go through our tx ring and free mbufs for those
1589 	 * frames that have been sent.
1590 	 */
1591 	idx = sc_if->sk_cdata.sk_tx_cons;
1592 	while(idx != sc_if->sk_cdata.sk_tx_prod) {
1593 		cur_tx = &sc_if->sk_rdata->sk_tx_ring[idx];
1594 		if (cur_tx->sk_ctl & SK_TXCTL_OWN)
1595 			break;
1596 		if (cur_tx->sk_ctl & SK_TXCTL_LASTFRAG)
1597 			ifp->if_opackets++;
1598 		if (sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf != NULL) {
1599 			m_freem(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf);
1600 			sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf = NULL;
1601 		}
1602 		sc_if->sk_cdata.sk_tx_cnt--;
1603 		SK_INC(idx, SK_TX_RING_CNT);
1604 		ifp->if_timer = 0;
1605 	}
1606 
1607 	sc_if->sk_cdata.sk_tx_cons = idx;
1608 
1609 	if (cur_tx != NULL)
1610 		ifp->if_flags &= ~IFF_OACTIVE;
1611 
1612 	return;
1613 }
1614 
1615 static void sk_intr_xmac(sc_if)
1616 	struct sk_if_softc	*sc_if;
1617 {
1618 	struct sk_softc		*sc;
1619 	u_int16_t		status;
1620 	u_int16_t		bmsr;
1621 
1622 	sc = sc_if->sk_softc;
1623 	status = SK_XM_READ_2(sc_if, XM_ISR);
1624 
1625 	if (status & XM_ISR_LINKEVENT) {
1626 		SK_XM_SETBIT_2(sc_if, XM_IMR, XM_IMR_LINKEVENT);
1627 		if (sc_if->sk_link == 1) {
1628 			printf("sk%d: gigabit link down\n", sc_if->sk_unit);
1629 			sc_if->sk_link = 0;
1630 		}
1631 	}
1632 
1633 	if (status & XM_ISR_AUTONEG_DONE) {
1634 		bmsr = sk_phy_readreg(sc_if, XM_PHY_BMSR);
1635 		if (bmsr & XM_BMSR_LINKSTAT) {
1636 			sc_if->sk_link = 1;
1637 			SK_XM_CLRBIT_2(sc_if, XM_IMR, XM_IMR_LINKEVENT);
1638 			printf("sk%d: gigabit link up\n", sc_if->sk_unit);
1639 		}
1640 	}
1641 
1642 	if (status & XM_IMR_TX_UNDERRUN)
1643 		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_TXFIFO);
1644 
1645 	if (status & XM_IMR_RX_OVERRUN)
1646 		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_RXFIFO);
1647 
1648 	return;
1649 }
1650 
1651 static void sk_intr(xsc)
1652 	void			*xsc;
1653 {
1654 	struct sk_softc		*sc = xsc;
1655 	struct sk_if_softc	*sc_if0 = NULL, *sc_if1 = NULL;
1656 	struct ifnet		*ifp0 = NULL, *ifp1 = NULL;
1657 	u_int32_t		status;
1658 
1659 	sc_if0 = sc->sk_if[SK_PORT_A];
1660 	sc_if1 = sc->sk_if[SK_PORT_B];
1661 
1662 	if (sc_if0 != NULL)
1663 		ifp0 = &sc_if0->arpcom.ac_if;
1664 	if (sc_if1 != NULL)
1665 		ifp1 = &sc_if0->arpcom.ac_if;
1666 
1667 	for (;;) {
1668 		status = CSR_READ_4(sc, SK_ISSR);
1669 		if (!(status & sc->sk_intrmask))
1670 			break;
1671 
1672 		/* Handle receive interrupts first. */
1673 		if (status & SK_ISR_RX1_EOF) {
1674 			sk_rxeof(sc_if0);
1675 			CSR_WRITE_4(sc, SK_BMU_RX_CSR0,
1676 			    SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
1677 		}
1678 		if (status & SK_ISR_RX2_EOF) {
1679 			sk_rxeof(sc_if1);
1680 			CSR_WRITE_4(sc, SK_BMU_RX_CSR1,
1681 			    SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
1682 		}
1683 
1684 		/* Then transmit interrupts. */
1685 		if (status & SK_ISR_TX1_S_EOF) {
1686 			sk_txeof(sc_if0);
1687 			CSR_WRITE_4(sc, SK_BMU_TXS_CSR0,
1688 			    SK_TXBMU_CLR_IRQ_EOF);
1689 		}
1690 		if (status & SK_ISR_TX2_S_EOF) {
1691 			sk_txeof(sc_if1);
1692 			CSR_WRITE_4(sc, SK_BMU_TXS_CSR1,
1693 			    SK_TXBMU_CLR_IRQ_EOF);
1694 		}
1695 
1696 		/* Then MAC interrupts. */
1697 		if (status & SK_ISR_MAC1)
1698 			sk_intr_xmac(sc_if0);
1699 
1700 		if (status & SK_ISR_MAC2)
1701 			sk_intr_xmac(sc_if1);
1702 	}
1703 
1704 	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
1705 
1706 	if (ifp0 != NULL && ifp0->if_snd.ifq_head != NULL)
1707 		sk_start(ifp0);
1708 	if (ifp1 != NULL && ifp1->if_snd.ifq_head != NULL)
1709 		sk_start(ifp1);
1710 
1711 	return;
1712 }
1713 
1714 static void sk_init_xmac(sc_if)
1715 	struct sk_if_softc	*sc_if;
1716 {
1717 	struct sk_softc		*sc;
1718 	struct ifnet		*ifp;
1719 
1720 	sc = sc_if->sk_softc;
1721 	ifp = &sc_if->arpcom.ac_if;
1722 
1723 	/* Unreset the XMAC. */
1724 	SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_UNRESET);
1725 	DELAY(1000);
1726 
1727 	/* Save the XMAC II revision */
1728 	sc_if->sk_xmac_rev = XM_XMAC_REV(SK_XM_READ_4(sc_if, XM_DEVID));
1729 
1730 	/* Set station address */
1731 	SK_XM_WRITE_2(sc_if, XM_PAR0,
1732 	    *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[0]));
1733 	SK_XM_WRITE_2(sc_if, XM_PAR1,
1734 	    *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[2]));
1735 	SK_XM_WRITE_2(sc_if, XM_PAR2,
1736 	    *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[4]));
1737 	SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_STATION);
1738 
1739 	if (ifp->if_flags & IFF_PROMISC) {
1740 		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
1741 	} else {
1742 		SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
1743 	}
1744 
1745 	if (ifp->if_flags & IFF_BROADCAST) {
1746 		SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
1747 	} else {
1748 		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
1749 	}
1750 
1751 	/* We don't need the FCS appended to the packet. */
1752 	SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_STRIPFCS);
1753 
1754 	/* We want short frames padded to 60 bytes. */
1755 	SK_XM_SETBIT_2(sc_if, XM_TXCMD, XM_TXCMD_AUTOPAD);
1756 
1757 	/*
1758 	 * Enable the reception of all error frames. This is is
1759 	 * a necessary evil due to the design of the XMAC. The
1760 	 * XMAC's receive FIFO is only 8K in size, however jumbo
1761 	 * frames can be up to 9000 bytes in length. When bad
1762 	 * frame filtering is enabled, the XMAC's RX FIFO operates
1763 	 * in 'store and forward' mode. For this to work, the
1764 	 * entire frame has to fit into the FIFO, but that means
1765 	 * that jumbo frames larger than 8192 bytes will be
1766 	 * truncated. Disabling all bad frame filtering causes
1767 	 * the RX FIFO to operate in streaming mode, in which
1768 	 * case the XMAC will start transfering frames out of the
1769 	 * RX FIFO as soon as the FIFO threshold is reached.
1770 	 */
1771 	SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_BADFRAMES|
1772 	    XM_MODE_RX_GIANTS|XM_MODE_RX_RUNTS|XM_MODE_RX_CRCERRS|
1773 	    XM_MODE_RX_INRANGELEN);
1774 
1775 	if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
1776 		SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
1777 	else
1778 		SK_XM_CLRBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
1779 
1780 	/*
1781 	 * Bump up the transmit threshold. This helps hold off transmit
1782 	 * underruns when we're blasting traffic from both ports at once.
1783 	 */
1784 	SK_XM_WRITE_2(sc_if, XM_TX_REQTHRESH, SK_XM_TX_FIFOTHRESH);
1785 
1786 	/* Set multicast filter */
1787 	sk_setmulti(sc_if);
1788 
1789 	/* Clear and enable interrupts */
1790 	SK_XM_READ_2(sc_if, XM_ISR);
1791 	SK_XM_WRITE_2(sc_if, XM_IMR, XM_INTRS);
1792 
1793 	sc_if->sk_link = 0;
1794 
1795 	/* Configure MAC arbiter */
1796 	switch(sc_if->sk_xmac_rev) {
1797 	case XM_XMAC_REV_B2:
1798 		sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_B2);
1799 		sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_B2);
1800 		sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_B2);
1801 		sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_B2);
1802 		sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_B2);
1803 		sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_B2);
1804 		sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_B2);
1805 		sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_B2);
1806 		sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
1807 		break;
1808 	case XM_XMAC_REV_C1:
1809 		sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_C1);
1810 		sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_C1);
1811 		sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_C1);
1812 		sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_C1);
1813 		sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_C1);
1814 		sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_C1);
1815 		sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_C1);
1816 		sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_C1);
1817 		sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
1818 		break;
1819 	default:
1820 		break;
1821 	}
1822 	sk_win_write_2(sc, SK_MACARB_CTL,
1823 	    SK_MACARBCTL_UNRESET|SK_MACARBCTL_FASTOE_OFF);
1824 
1825 	return;
1826 }
1827 
1828 /*
1829  * Note that to properly initialize any part of the GEnesis chip,
1830  * you first have to take it out of reset mode.
1831  */
1832 static void sk_init(xsc)
1833 	void			*xsc;
1834 {
1835 	struct sk_if_softc	*sc_if = xsc;
1836 	struct sk_softc		*sc;
1837 	struct ifnet		*ifp;
1838 	int			s;
1839 
1840 	s = splimp();
1841 
1842 	ifp = &sc_if->arpcom.ac_if;
1843 	sc = sc_if->sk_softc;
1844 
1845 	/* Cancel pending I/O and free all RX/TX buffers. */
1846 	sk_stop(sc_if);
1847 
1848 	/* Configure LINK_SYNC LED */
1849 	SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON);
1850 	SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_ON);
1851 
1852 	/* Configure RX LED */
1853 	SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_START);
1854 
1855 	/* Configure TX LED */
1856 	SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_TXLEDCTL_COUNTER_START);
1857 
1858 	/* Configure I2C registers */
1859 
1860 	/* Configure XMAC(s) */
1861 	sk_init_xmac(sc_if);
1862 
1863 	/* Configure MAC FIFOs */
1864 	SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_UNRESET);
1865 	SK_IF_WRITE_4(sc_if, 0, SK_RXF1_END, SK_FIFO_END);
1866 	SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_ON);
1867 
1868 	SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_UNRESET);
1869 	SK_IF_WRITE_4(sc_if, 0, SK_TXF1_END, SK_FIFO_END);
1870 	SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_ON);
1871 
1872 	/* Configure transmit arbiter(s) */
1873 	SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL,
1874 	    SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON);
1875 
1876 	/* Configure RAMbuffers */
1877 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET);
1878 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart);
1879 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart);
1880 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart);
1881 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend);
1882 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON);
1883 
1884 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_UNRESET);
1885 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_STORENFWD_ON);
1886 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_START, sc_if->sk_tx_ramstart);
1887 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_WR_PTR, sc_if->sk_tx_ramstart);
1888 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_RD_PTR, sc_if->sk_tx_ramstart);
1889 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_END, sc_if->sk_tx_ramend);
1890 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_ON);
1891 
1892 	/* Configure BMUs */
1893 	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_ONLINE);
1894 	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO,
1895 	    vtophys(&sc_if->sk_rdata->sk_rx_ring[0]));
1896 	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI, 0);
1897 
1898 	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_ONLINE);
1899 	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_LO,
1900 	    vtophys(&sc_if->sk_rdata->sk_tx_ring[0]));
1901 	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_HI, 0);
1902 
1903 	/* Init descriptors */
1904 	if (sk_init_rx_ring(sc_if) == ENOBUFS) {
1905 		printf("sk%d: initialization failed: no "
1906 		    "memory for rx buffers\n", sc_if->sk_unit);
1907 		sk_stop(sc_if);
1908 		(void)splx(s);
1909 		return;
1910 	}
1911 	sk_init_tx_ring(sc_if);
1912 
1913 	/* Configure interrupt handling */
1914 	CSR_READ_4(sc, SK_ISSR);
1915 	if (sc_if->sk_port == SK_PORT_A)
1916 		sc->sk_intrmask |= SK_INTRS1;
1917 	else
1918 		sc->sk_intrmask |= SK_INTRS2;
1919 	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
1920 
1921 	/* Start BMUs. */
1922 	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_START);
1923 
1924 	/* Enable XMACs TX and RX state machines */
1925 	SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
1926 
1927 	ifp->if_flags |= IFF_RUNNING;
1928 	ifp->if_flags &= ~IFF_OACTIVE;
1929 
1930 	splx(s);
1931 
1932 	return;
1933 }
1934 
1935 static void sk_stop(sc_if)
1936 	struct sk_if_softc	*sc_if;
1937 {
1938 	int			i;
1939 	struct sk_softc		*sc;
1940 	struct ifnet		*ifp;
1941 
1942 	sc = sc_if->sk_softc;
1943 	ifp = &sc_if->arpcom.ac_if;
1944 
1945 	/* Turn off various components of this interface. */
1946 	SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_RESET);
1947 	SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_RESET);
1948 	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE);
1949 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
1950 	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_OFFLINE);
1951 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
1952 	SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF);
1953 	SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
1954 	SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
1955 	SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF);
1956 	SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF);
1957 
1958 	/* Disable interrupts */
1959 	if (sc_if->sk_port == SK_PORT_A)
1960 		sc->sk_intrmask &= ~SK_INTRS1;
1961 	else
1962 		sc->sk_intrmask &= ~SK_INTRS2;
1963 	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
1964 
1965 	/* Free RX and TX mbufs still in the queues. */
1966 	for (i = 0; i < SK_RX_RING_CNT; i++) {
1967 		if (sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf != NULL) {
1968 			m_freem(sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf);
1969 			sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf = NULL;
1970 		}
1971 	}
1972 
1973 	for (i = 0; i < SK_TX_RING_CNT; i++) {
1974 		if (sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf != NULL) {
1975 			m_freem(sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf);
1976 			sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf = NULL;
1977 		}
1978 	}
1979 
1980 	ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE);
1981 
1982 	return;
1983 }
1984