xref: /freebsd/sys/dev/lge/if_lge.c (revision 87569f75a91f298c52a71823c04d41cf53c88889)
1 /*-
2  * Copyright (c) 2001 Wind River Systems
3  * Copyright (c) 1997, 1998, 1999, 2000, 2001
4  *	Bill Paul <william.paul@windriver.com>.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *	This product includes software developed by Bill Paul.
17  * 4. Neither the name of the author nor the names of any co-contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31  * THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 /*
38  * Level 1 LXT1001 gigabit ethernet driver for FreeBSD. Public
39  * documentation not available, but ask me nicely.
40  *
41  * The Level 1 chip is used on some D-Link, SMC and Addtron NICs.
42  * It's a 64-bit PCI part that supports TCP/IP checksum offload,
43  * VLAN tagging/insertion, GMII and TBI (1000baseX) ports. There
44  * are three supported methods for data transfer between host and
45  * NIC: programmed I/O, traditional scatter/gather DMA and Packet
46  * Propulsion Technology (tm) DMA. The latter mechanism is a form
47  * of double buffer DMA where the packet data is copied to a
48  * pre-allocated DMA buffer who's physical address has been loaded
49  * into a table at device initialization time. The rationale is that
50  * the virtual to physical address translation needed for normal
51  * scatter/gather DMA is more expensive than the data copy needed
52  * for double buffering. This may be true in Windows NT and the like,
53  * but it isn't true for us, at least on the x86 arch. This driver
54  * uses the scatter/gather I/O method for both TX and RX.
55  *
56  * The LXT1001 only supports TCP/IP checksum offload on receive.
57  * Also, the VLAN tagging is done using a 16-entry table which allows
58  * the chip to perform hardware filtering based on VLAN tags. Sadly,
59  * our vlan support doesn't currently play well with this kind of
60  * hardware support.
61  *
62  * Special thanks to:
63  * - Jeff James at Intel, for arranging to have the LXT1001 manual
64  *   released (at long last)
65  * - Beny Chen at D-Link, for actually sending it to me
66  * - Brad Short and Keith Alexis at SMC, for sending me sample
67  *   SMC9462SX and SMC9462TX adapters for testing
68  * - Paul Saab at Y!, for not killing me (though it remains to be seen
69  *   if in fact he did me much of a favor)
70  */
71 
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/sockio.h>
75 #include <sys/mbuf.h>
76 #include <sys/malloc.h>
77 #include <sys/kernel.h>
78 #include <sys/module.h>
79 #include <sys/socket.h>
80 
81 #include <net/if.h>
82 #include <net/if_arp.h>
83 #include <net/ethernet.h>
84 #include <net/if_dl.h>
85 #include <net/if_media.h>
86 #include <net/if_types.h>
87 
88 #include <net/bpf.h>
89 
90 #include <vm/vm.h>              /* for vtophys */
91 #include <vm/pmap.h>            /* for vtophys */
92 #include <machine/clock.h>      /* for DELAY */
93 #include <machine/bus.h>
94 #include <machine/resource.h>
95 #include <sys/bus.h>
96 #include <sys/rman.h>
97 
98 #include <dev/mii/mii.h>
99 #include <dev/mii/miivar.h>
100 
101 #include <dev/pci/pcireg.h>
102 #include <dev/pci/pcivar.h>
103 
104 #define LGE_USEIOSPACE
105 
106 #include <dev/lge/if_lgereg.h>
107 
108 /* "device miibus" required.  See GENERIC if you get errors here. */
109 #include "miibus_if.h"
110 
111 /*
112  * Various supported device vendors/types and their names.
113  */
114 static struct lge_type lge_devs[] = {
115 	{ LGE_VENDORID, LGE_DEVICEID, "Level 1 Gigabit Ethernet" },
116 	{ 0, 0, NULL }
117 };
118 
119 static int lge_probe(device_t);
120 static int lge_attach(device_t);
121 static int lge_detach(device_t);
122 
123 static int lge_alloc_jumbo_mem(struct lge_softc *);
124 static void lge_free_jumbo_mem(struct lge_softc *);
125 static void *lge_jalloc(struct lge_softc *);
126 static void lge_jfree(void *, void *);
127 
128 static int lge_newbuf(struct lge_softc *, struct lge_rx_desc *, struct mbuf *);
129 static int lge_encap(struct lge_softc *, struct mbuf *, u_int32_t *);
130 static void lge_rxeof(struct lge_softc *, int);
131 static void lge_rxeoc(struct lge_softc *);
132 static void lge_txeof(struct lge_softc *);
133 static void lge_intr(void *);
134 static void lge_tick(void *);
135 static void lge_start(struct ifnet *);
136 static void lge_start_locked(struct ifnet *);
137 static int lge_ioctl(struct ifnet *, u_long, caddr_t);
138 static void lge_init(void *);
139 static void lge_init_locked(struct lge_softc *);
140 static void lge_stop(struct lge_softc *);
141 static void lge_watchdog(struct ifnet *);
142 static void lge_shutdown(device_t);
143 static int lge_ifmedia_upd(struct ifnet *);
144 static void lge_ifmedia_upd_locked(struct ifnet *);
145 static void lge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
146 
147 static void lge_eeprom_getword(struct lge_softc *, int, u_int16_t *);
148 static void lge_read_eeprom(struct lge_softc *, caddr_t, int, int, int);
149 
150 static int lge_miibus_readreg(device_t, int, int);
151 static int lge_miibus_writereg(device_t, int, int, int);
152 static void lge_miibus_statchg(device_t);
153 
154 static void lge_setmulti(struct lge_softc *);
155 static void lge_reset(struct lge_softc *);
156 static int lge_list_rx_init(struct lge_softc *);
157 static int lge_list_tx_init(struct lge_softc *);
158 
159 #ifdef LGE_USEIOSPACE
160 #define LGE_RES			SYS_RES_IOPORT
161 #define LGE_RID			LGE_PCI_LOIO
162 #else
163 #define LGE_RES			SYS_RES_MEMORY
164 #define LGE_RID			LGE_PCI_LOMEM
165 #endif
166 
167 static device_method_t lge_methods[] = {
168 	/* Device interface */
169 	DEVMETHOD(device_probe,		lge_probe),
170 	DEVMETHOD(device_attach,	lge_attach),
171 	DEVMETHOD(device_detach,	lge_detach),
172 	DEVMETHOD(device_shutdown,	lge_shutdown),
173 
174 	/* bus interface */
175 	DEVMETHOD(bus_print_child,	bus_generic_print_child),
176 	DEVMETHOD(bus_driver_added,	bus_generic_driver_added),
177 
178 	/* MII interface */
179 	DEVMETHOD(miibus_readreg,	lge_miibus_readreg),
180 	DEVMETHOD(miibus_writereg,	lge_miibus_writereg),
181 	DEVMETHOD(miibus_statchg,	lge_miibus_statchg),
182 
183 	{ 0, 0 }
184 };
185 
186 static driver_t lge_driver = {
187 	"lge",
188 	lge_methods,
189 	sizeof(struct lge_softc)
190 };
191 
192 static devclass_t lge_devclass;
193 
194 DRIVER_MODULE(lge, pci, lge_driver, lge_devclass, 0, 0);
195 DRIVER_MODULE(miibus, lge, miibus_driver, miibus_devclass, 0, 0);
196 MODULE_DEPEND(lge, pci, 1, 1, 1);
197 MODULE_DEPEND(lge, ether, 1, 1, 1);
198 MODULE_DEPEND(lge, miibus, 1, 1, 1);
199 
200 #define LGE_SETBIT(sc, reg, x)				\
201 	CSR_WRITE_4(sc, reg,				\
202 		CSR_READ_4(sc, reg) | (x))
203 
204 #define LGE_CLRBIT(sc, reg, x)				\
205 	CSR_WRITE_4(sc, reg,				\
206 		CSR_READ_4(sc, reg) & ~(x))
207 
208 #define SIO_SET(x)					\
209 	CSR_WRITE_4(sc, LGE_MEAR, CSR_READ_4(sc, LGE_MEAR) | x)
210 
211 #define SIO_CLR(x)					\
212 	CSR_WRITE_4(sc, LGE_MEAR, CSR_READ_4(sc, LGE_MEAR) & ~x)
213 
214 /*
215  * Read a word of data stored in the EEPROM at address 'addr.'
216  */
217 static void
218 lge_eeprom_getword(sc, addr, dest)
219 	struct lge_softc	*sc;
220 	int			addr;
221 	u_int16_t		*dest;
222 {
223 	register int		i;
224 	u_int32_t		val;
225 
226 	CSR_WRITE_4(sc, LGE_EECTL, LGE_EECTL_CMD_READ|
227 	    LGE_EECTL_SINGLEACCESS|((addr >> 1) << 8));
228 
229 	for (i = 0; i < LGE_TIMEOUT; i++)
230 		if (!(CSR_READ_4(sc, LGE_EECTL) & LGE_EECTL_CMD_READ))
231 			break;
232 
233 	if (i == LGE_TIMEOUT) {
234 		if_printf(sc->lge_ifp, "EEPROM read timed out\n");
235 		return;
236 	}
237 
238 	val = CSR_READ_4(sc, LGE_EEDATA);
239 
240 	if (addr & 1)
241 		*dest = (val >> 16) & 0xFFFF;
242 	else
243 		*dest = val & 0xFFFF;
244 
245 	return;
246 }
247 
248 /*
249  * Read a sequence of words from the EEPROM.
250  */
251 static void
252 lge_read_eeprom(sc, dest, off, cnt, swap)
253 	struct lge_softc	*sc;
254 	caddr_t			dest;
255 	int			off;
256 	int			cnt;
257 	int			swap;
258 {
259 	int			i;
260 	u_int16_t		word = 0, *ptr;
261 
262 	for (i = 0; i < cnt; i++) {
263 		lge_eeprom_getword(sc, off + i, &word);
264 		ptr = (u_int16_t *)(dest + (i * 2));
265 		if (swap)
266 			*ptr = ntohs(word);
267 		else
268 			*ptr = word;
269 	}
270 
271 	return;
272 }
273 
274 static int
275 lge_miibus_readreg(dev, phy, reg)
276 	device_t		dev;
277 	int			phy, reg;
278 {
279 	struct lge_softc	*sc;
280 	int			i;
281 
282 	sc = device_get_softc(dev);
283 
284 	/*
285 	 * If we have a non-PCS PHY, pretend that the internal
286 	 * autoneg stuff at PHY address 0 isn't there so that
287 	 * the miibus code will find only the GMII PHY.
288 	 */
289 	if (sc->lge_pcs == 0 && phy == 0)
290 		return(0);
291 
292 	CSR_WRITE_4(sc, LGE_GMIICTL, (phy << 8) | reg | LGE_GMIICMD_READ);
293 
294 	for (i = 0; i < LGE_TIMEOUT; i++)
295 		if (!(CSR_READ_4(sc, LGE_GMIICTL) & LGE_GMIICTL_CMDBUSY))
296 			break;
297 
298 	if (i == LGE_TIMEOUT) {
299 		if_printf(sc->lge_ifp, "PHY read timed out\n");
300 		return(0);
301 	}
302 
303 	return(CSR_READ_4(sc, LGE_GMIICTL) >> 16);
304 }
305 
306 static int
307 lge_miibus_writereg(dev, phy, reg, data)
308 	device_t		dev;
309 	int			phy, reg, data;
310 {
311 	struct lge_softc	*sc;
312 	int			i;
313 
314 	sc = device_get_softc(dev);
315 
316 	CSR_WRITE_4(sc, LGE_GMIICTL,
317 	    (data << 16) | (phy << 8) | reg | LGE_GMIICMD_WRITE);
318 
319 	for (i = 0; i < LGE_TIMEOUT; i++)
320 		if (!(CSR_READ_4(sc, LGE_GMIICTL) & LGE_GMIICTL_CMDBUSY))
321 			break;
322 
323 	if (i == LGE_TIMEOUT) {
324 		if_printf(sc->lge_ifp, "PHY write timed out\n");
325 		return(0);
326 	}
327 
328 	return(0);
329 }
330 
331 static void
332 lge_miibus_statchg(dev)
333 	device_t		dev;
334 {
335 	struct lge_softc	*sc;
336 	struct mii_data		*mii;
337 
338 	sc = device_get_softc(dev);
339 	mii = device_get_softc(sc->lge_miibus);
340 
341 	LGE_CLRBIT(sc, LGE_GMIIMODE, LGE_GMIIMODE_SPEED);
342 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
343 	case IFM_1000_T:
344 	case IFM_1000_SX:
345 		LGE_SETBIT(sc, LGE_GMIIMODE, LGE_SPEED_1000);
346 		break;
347 	case IFM_100_TX:
348 		LGE_SETBIT(sc, LGE_GMIIMODE, LGE_SPEED_100);
349 		break;
350 	case IFM_10_T:
351 		LGE_SETBIT(sc, LGE_GMIIMODE, LGE_SPEED_10);
352 		break;
353 	default:
354 		/*
355 		 * Choose something, even if it's wrong. Clearing
356 		 * all the bits will hose autoneg on the internal
357 		 * PHY.
358 		 */
359 		LGE_SETBIT(sc, LGE_GMIIMODE, LGE_SPEED_1000);
360 		break;
361 	}
362 
363 	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
364 		LGE_SETBIT(sc, LGE_GMIIMODE, LGE_GMIIMODE_FDX);
365 	} else {
366 		LGE_CLRBIT(sc, LGE_GMIIMODE, LGE_GMIIMODE_FDX);
367 	}
368 
369 	return;
370 }
371 
372 static void
373 lge_setmulti(sc)
374 	struct lge_softc	*sc;
375 {
376 	struct ifnet		*ifp;
377 	struct ifmultiaddr	*ifma;
378 	u_int32_t		h = 0, hashes[2] = { 0, 0 };
379 
380 	ifp = sc->lge_ifp;
381 	LGE_LOCK_ASSERT(sc);
382 
383 	/* Make sure multicast hash table is enabled. */
384 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_MCAST);
385 
386 	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
387 		CSR_WRITE_4(sc, LGE_MAR0, 0xFFFFFFFF);
388 		CSR_WRITE_4(sc, LGE_MAR1, 0xFFFFFFFF);
389 		return;
390 	}
391 
392 	/* first, zot all the existing hash bits */
393 	CSR_WRITE_4(sc, LGE_MAR0, 0);
394 	CSR_WRITE_4(sc, LGE_MAR1, 0);
395 
396 	/* now program new ones */
397 	IF_ADDR_LOCK(ifp);
398 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
399 		if (ifma->ifma_addr->sa_family != AF_LINK)
400 			continue;
401 		h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
402 		    ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
403 		if (h < 32)
404 			hashes[0] |= (1 << h);
405 		else
406 			hashes[1] |= (1 << (h - 32));
407 	}
408 	IF_ADDR_UNLOCK(ifp);
409 
410 	CSR_WRITE_4(sc, LGE_MAR0, hashes[0]);
411 	CSR_WRITE_4(sc, LGE_MAR1, hashes[1]);
412 
413 	return;
414 }
415 
416 static void
417 lge_reset(sc)
418 	struct lge_softc	*sc;
419 {
420 	register int		i;
421 
422 	LGE_SETBIT(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL0|LGE_MODE1_SOFTRST);
423 
424 	for (i = 0; i < LGE_TIMEOUT; i++) {
425 		if (!(CSR_READ_4(sc, LGE_MODE1) & LGE_MODE1_SOFTRST))
426 			break;
427 	}
428 
429 	if (i == LGE_TIMEOUT)
430 		if_printf(sc->lge_ifp, "reset never completed\n");
431 
432 	/* Wait a little while for the chip to get its brains in order. */
433 	DELAY(1000);
434 
435         return;
436 }
437 
438 /*
439  * Probe for a Level 1 chip. Check the PCI vendor and device
440  * IDs against our list and return a device name if we find a match.
441  */
442 static int
443 lge_probe(dev)
444 	device_t		dev;
445 {
446 	struct lge_type		*t;
447 
448 	t = lge_devs;
449 
450 	while(t->lge_name != NULL) {
451 		if ((pci_get_vendor(dev) == t->lge_vid) &&
452 		    (pci_get_device(dev) == t->lge_did)) {
453 			device_set_desc(dev, t->lge_name);
454 			return(BUS_PROBE_DEFAULT);
455 		}
456 		t++;
457 	}
458 
459 	return(ENXIO);
460 }
461 
462 /*
463  * Attach the interface. Allocate softc structures, do ifmedia
464  * setup and ethernet/BPF attach.
465  */
466 static int
467 lge_attach(dev)
468 	device_t		dev;
469 {
470 	u_char			eaddr[ETHER_ADDR_LEN];
471 	struct lge_softc	*sc;
472 	struct ifnet		*ifp = NULL;
473 	int			error = 0, rid;
474 
475 	sc = device_get_softc(dev);
476 	mtx_init(&sc->lge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
477 	    MTX_DEF);
478 	callout_init_mtx(&sc->lge_stat_callout, &sc->lge_mtx, 0);
479 
480 	/*
481 	 * Map control/status registers.
482 	 */
483 	pci_enable_busmaster(dev);
484 
485 	rid = LGE_RID;
486 	sc->lge_res = bus_alloc_resource_any(dev, LGE_RES, &rid, RF_ACTIVE);
487 
488 	if (sc->lge_res == NULL) {
489 		device_printf(dev, "couldn't map ports/memory\n");
490 		error = ENXIO;
491 		goto fail;
492 	}
493 
494 	sc->lge_btag = rman_get_bustag(sc->lge_res);
495 	sc->lge_bhandle = rman_get_bushandle(sc->lge_res);
496 
497 	/* Allocate interrupt */
498 	rid = 0;
499 	sc->lge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
500 	    RF_SHAREABLE | RF_ACTIVE);
501 
502 	if (sc->lge_irq == NULL) {
503 		device_printf(dev, "couldn't map interrupt\n");
504 		error = ENXIO;
505 		goto fail;
506 	}
507 
508 	/* Reset the adapter. */
509 	lge_reset(sc);
510 
511 	/*
512 	 * Get station address from the EEPROM.
513 	 */
514 	lge_read_eeprom(sc, (caddr_t)&eaddr[0], LGE_EE_NODEADDR_0, 1, 0);
515 	lge_read_eeprom(sc, (caddr_t)&eaddr[2], LGE_EE_NODEADDR_1, 1, 0);
516 	lge_read_eeprom(sc, (caddr_t)&eaddr[4], LGE_EE_NODEADDR_2, 1, 0);
517 
518 	sc->lge_ldata = contigmalloc(sizeof(struct lge_list_data), M_DEVBUF,
519 	    M_NOWAIT | M_ZERO, 0, 0xffffffff, PAGE_SIZE, 0);
520 
521 	if (sc->lge_ldata == NULL) {
522 		device_printf(dev, "no memory for list buffers!\n");
523 		error = ENXIO;
524 		goto fail;
525 	}
526 
527 	/* Try to allocate memory for jumbo buffers. */
528 	if (lge_alloc_jumbo_mem(sc)) {
529 		device_printf(dev, "jumbo buffer allocation failed\n");
530 		error = ENXIO;
531 		goto fail;
532 	}
533 
534 	ifp = sc->lge_ifp = if_alloc(IFT_ETHER);
535 	if (ifp == NULL) {
536 		device_printf(dev, "can not if_alloc()\n");
537 		lge_free_jumbo_mem(sc);
538 		error = ENOSPC;
539 		goto fail;
540 	}
541 	ifp->if_softc = sc;
542 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
543 	ifp->if_mtu = ETHERMTU;
544 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
545 	ifp->if_ioctl = lge_ioctl;
546 	ifp->if_start = lge_start;
547 	ifp->if_watchdog = lge_watchdog;
548 	ifp->if_init = lge_init;
549 	ifp->if_snd.ifq_maxlen = LGE_TX_LIST_CNT - 1;
550 	ifp->if_capabilities = IFCAP_RXCSUM;
551 	ifp->if_capenable = ifp->if_capabilities;
552 
553 	if (CSR_READ_4(sc, LGE_GMIIMODE) & LGE_GMIIMODE_PCSENH)
554 		sc->lge_pcs = 1;
555 	else
556 		sc->lge_pcs = 0;
557 
558 	/*
559 	 * Do MII setup.
560 	 */
561 	if (mii_phy_probe(dev, &sc->lge_miibus,
562 	    lge_ifmedia_upd, lge_ifmedia_sts)) {
563 		device_printf(dev, "MII without any PHY!\n");
564 		lge_free_jumbo_mem(sc);
565 		error = ENXIO;
566 		goto fail;
567 	}
568 
569 	/*
570 	 * Call MI attach routine.
571 	 */
572 	ether_ifattach(ifp, eaddr);
573 
574 	error = bus_setup_intr(dev, sc->lge_irq, INTR_TYPE_NET | INTR_MPSAFE,
575 	    lge_intr, sc, &sc->lge_intrhand);
576 
577 	if (error) {
578 		ether_ifdetach(ifp);
579 		device_printf(dev, "couldn't set up irq\n");
580 		goto fail;
581 	}
582 	return (0);
583 
584 fail:
585 	if (sc->lge_ldata)
586 		contigfree(sc->lge_ldata,
587 		    sizeof(struct lge_list_data), M_DEVBUF);
588 	if (ifp)
589 		if_free(ifp);
590 	if (sc->lge_irq)
591 		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->lge_irq);
592 	if (sc->lge_res)
593 		bus_release_resource(dev, LGE_RES, LGE_RID, sc->lge_res);
594 	mtx_destroy(&sc->lge_mtx);
595 	return(error);
596 }
597 
598 static int
599 lge_detach(dev)
600 	device_t		dev;
601 {
602 	struct lge_softc	*sc;
603 	struct ifnet		*ifp;
604 
605 	sc = device_get_softc(dev);
606 	ifp = sc->lge_ifp;
607 
608 	LGE_LOCK(sc);
609 	lge_reset(sc);
610 	lge_stop(sc);
611 	LGE_UNLOCK(sc);
612 	callout_drain(&sc->lge_stat_callout);
613 	ether_ifdetach(ifp);
614 
615 	bus_generic_detach(dev);
616 	device_delete_child(dev, sc->lge_miibus);
617 
618 	bus_teardown_intr(dev, sc->lge_irq, sc->lge_intrhand);
619 	bus_release_resource(dev, SYS_RES_IRQ, 0, sc->lge_irq);
620 	bus_release_resource(dev, LGE_RES, LGE_RID, sc->lge_res);
621 
622 	contigfree(sc->lge_ldata, sizeof(struct lge_list_data), M_DEVBUF);
623 	if_free(ifp);
624 	lge_free_jumbo_mem(sc);
625 	mtx_destroy(&sc->lge_mtx);
626 
627 	return(0);
628 }
629 
630 /*
631  * Initialize the transmit descriptors.
632  */
633 static int
634 lge_list_tx_init(sc)
635 	struct lge_softc	*sc;
636 {
637 	struct lge_list_data	*ld;
638 	struct lge_ring_data	*cd;
639 	int			i;
640 
641 	cd = &sc->lge_cdata;
642 	ld = sc->lge_ldata;
643 	for (i = 0; i < LGE_TX_LIST_CNT; i++) {
644 		ld->lge_tx_list[i].lge_mbuf = NULL;
645 		ld->lge_tx_list[i].lge_ctl = 0;
646 	}
647 
648 	cd->lge_tx_prod = cd->lge_tx_cons = 0;
649 
650 	return(0);
651 }
652 
653 
654 /*
655  * Initialize the RX descriptors and allocate mbufs for them. Note that
656  * we arralge the descriptors in a closed ring, so that the last descriptor
657  * points back to the first.
658  */
659 static int
660 lge_list_rx_init(sc)
661 	struct lge_softc	*sc;
662 {
663 	struct lge_list_data	*ld;
664 	struct lge_ring_data	*cd;
665 	int			i;
666 
667 	ld = sc->lge_ldata;
668 	cd = &sc->lge_cdata;
669 
670 	cd->lge_rx_prod = cd->lge_rx_cons = 0;
671 
672 	CSR_WRITE_4(sc, LGE_RXDESC_ADDR_HI, 0);
673 
674 	for (i = 0; i < LGE_RX_LIST_CNT; i++) {
675 		if (CSR_READ_1(sc, LGE_RXCMDFREE_8BIT) == 0)
676 			break;
677 		if (lge_newbuf(sc, &ld->lge_rx_list[i], NULL) == ENOBUFS)
678 			return(ENOBUFS);
679 	}
680 
681 	/* Clear possible 'rx command queue empty' interrupt. */
682 	CSR_READ_4(sc, LGE_ISR);
683 
684 	return(0);
685 }
686 
687 /*
688  * Initialize an RX descriptor and attach an MBUF cluster.
689  */
690 static int
691 lge_newbuf(sc, c, m)
692 	struct lge_softc	*sc;
693 	struct lge_rx_desc	*c;
694 	struct mbuf		*m;
695 {
696 	struct mbuf		*m_new = NULL;
697 	caddr_t			*buf = NULL;
698 
699 	if (m == NULL) {
700 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
701 		if (m_new == NULL) {
702 			if_printf(sc->lge_ifp, "no memory for rx list "
703 			    "-- packet dropped!\n");
704 			return(ENOBUFS);
705 		}
706 
707 		/* Allocate the jumbo buffer */
708 		buf = lge_jalloc(sc);
709 		if (buf == NULL) {
710 #ifdef LGE_VERBOSE
711 			if_printf(sc->lge_ifp, "jumbo allocation failed "
712 			    "-- packet dropped!\n");
713 #endif
714 			m_freem(m_new);
715 			return(ENOBUFS);
716 		}
717 		/* Attach the buffer to the mbuf */
718 		m_new->m_data = (void *)buf;
719 		m_new->m_len = m_new->m_pkthdr.len = LGE_JUMBO_FRAMELEN;
720 		MEXTADD(m_new, buf, LGE_JUMBO_FRAMELEN, lge_jfree,
721 		    (struct lge_softc *)sc, 0, EXT_NET_DRV);
722 	} else {
723 		m_new = m;
724 		m_new->m_len = m_new->m_pkthdr.len = LGE_JUMBO_FRAMELEN;
725 		m_new->m_data = m_new->m_ext.ext_buf;
726 	}
727 
728 	/*
729 	 * Adjust alignment so packet payload begins on a
730 	 * longword boundary. Mandatory for Alpha, useful on
731 	 * x86 too.
732 	*/
733 	m_adj(m_new, ETHER_ALIGN);
734 
735 	c->lge_mbuf = m_new;
736 	c->lge_fragptr_hi = 0;
737 	c->lge_fragptr_lo = vtophys(mtod(m_new, caddr_t));
738 	c->lge_fraglen = m_new->m_len;
739 	c->lge_ctl = m_new->m_len | LGE_RXCTL_WANTINTR | LGE_FRAGCNT(1);
740 	c->lge_sts = 0;
741 
742 	/*
743 	 * Put this buffer in the RX command FIFO. To do this,
744 	 * we just write the physical address of the descriptor
745 	 * into the RX descriptor address registers. Note that
746 	 * there are two registers, one high DWORD and one low
747 	 * DWORD, which lets us specify a 64-bit address if
748 	 * desired. We only use a 32-bit address for now.
749 	 * Writing to the low DWORD register is what actually
750 	 * causes the command to be issued, so we do that
751 	 * last.
752 	 */
753 	CSR_WRITE_4(sc, LGE_RXDESC_ADDR_LO, vtophys(c));
754 	LGE_INC(sc->lge_cdata.lge_rx_prod, LGE_RX_LIST_CNT);
755 
756 	return(0);
757 }
758 
759 static int
760 lge_alloc_jumbo_mem(sc)
761 	struct lge_softc	*sc;
762 {
763 	caddr_t			ptr;
764 	register int		i;
765 	struct lge_jpool_entry   *entry;
766 
767 	/* Grab a big chunk o' storage. */
768 	sc->lge_cdata.lge_jumbo_buf = contigmalloc(LGE_JMEM, M_DEVBUF,
769 	    M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0);
770 
771 	if (sc->lge_cdata.lge_jumbo_buf == NULL) {
772 		if_printf(sc->lge_ifp, "no memory for jumbo buffers!\n");
773 		return(ENOBUFS);
774 	}
775 
776 	SLIST_INIT(&sc->lge_jfree_listhead);
777 	SLIST_INIT(&sc->lge_jinuse_listhead);
778 
779 	/*
780 	 * Now divide it up into 9K pieces and save the addresses
781 	 * in an array.
782 	 */
783 	ptr = sc->lge_cdata.lge_jumbo_buf;
784 	for (i = 0; i < LGE_JSLOTS; i++) {
785 		sc->lge_cdata.lge_jslots[i] = ptr;
786 		ptr += LGE_JLEN;
787 		entry = malloc(sizeof(struct lge_jpool_entry),
788 		    M_DEVBUF, M_NOWAIT);
789 		if (entry == NULL) {
790 			if_printf(sc->lge_ifp, "no memory for jumbo "
791 			    "buffer queue!\n");
792 			return(ENOBUFS);
793 		}
794 		entry->slot = i;
795 		SLIST_INSERT_HEAD(&sc->lge_jfree_listhead,
796 		    entry, jpool_entries);
797 	}
798 
799 	return(0);
800 }
801 
802 static void
803 lge_free_jumbo_mem(sc)
804 	struct lge_softc	*sc;
805 {
806 	int			i;
807 	struct lge_jpool_entry	*entry;
808 
809 	for (i = 0; i < LGE_JSLOTS; i++) {
810 		entry = SLIST_FIRST(&sc->lge_jfree_listhead);
811 		SLIST_REMOVE_HEAD(&sc->lge_jfree_listhead, jpool_entries);
812 		free(entry, M_DEVBUF);
813 	}
814 
815 	contigfree(sc->lge_cdata.lge_jumbo_buf, LGE_JMEM, M_DEVBUF);
816 
817 	return;
818 }
819 
820 /*
821  * Allocate a jumbo buffer.
822  */
823 static void *
824 lge_jalloc(sc)
825 	struct lge_softc	*sc;
826 {
827 	struct lge_jpool_entry   *entry;
828 
829 	entry = SLIST_FIRST(&sc->lge_jfree_listhead);
830 
831 	if (entry == NULL) {
832 #ifdef LGE_VERBOSE
833 		if_printf(sc->lge_ifp, "no free jumbo buffers\n");
834 #endif
835 		return(NULL);
836 	}
837 
838 	SLIST_REMOVE_HEAD(&sc->lge_jfree_listhead, jpool_entries);
839 	SLIST_INSERT_HEAD(&sc->lge_jinuse_listhead, entry, jpool_entries);
840 	return(sc->lge_cdata.lge_jslots[entry->slot]);
841 }
842 
843 /*
844  * Release a jumbo buffer.
845  */
846 static void
847 lge_jfree(buf, args)
848 	void			*buf;
849 	void			*args;
850 {
851 	struct lge_softc	*sc;
852 	int		        i;
853 	struct lge_jpool_entry   *entry;
854 
855 	/* Extract the softc struct pointer. */
856 	sc = args;
857 
858 	if (sc == NULL)
859 		panic("lge_jfree: can't find softc pointer!");
860 
861 	/* calculate the slot this buffer belongs to */
862 	i = ((vm_offset_t)buf
863 	     - (vm_offset_t)sc->lge_cdata.lge_jumbo_buf) / LGE_JLEN;
864 
865 	if ((i < 0) || (i >= LGE_JSLOTS))
866 		panic("lge_jfree: asked to free buffer that we don't manage!");
867 
868 	entry = SLIST_FIRST(&sc->lge_jinuse_listhead);
869 	if (entry == NULL)
870 		panic("lge_jfree: buffer not in use!");
871 	entry->slot = i;
872 	SLIST_REMOVE_HEAD(&sc->lge_jinuse_listhead, jpool_entries);
873 	SLIST_INSERT_HEAD(&sc->lge_jfree_listhead, entry, jpool_entries);
874 
875 	return;
876 }
877 
878 /*
879  * A frame has been uploaded: pass the resulting mbuf chain up to
880  * the higher level protocols.
881  */
882 static void
883 lge_rxeof(sc, cnt)
884 	struct lge_softc	*sc;
885 	int			cnt;
886 {
887         struct mbuf		*m;
888         struct ifnet		*ifp;
889 	struct lge_rx_desc	*cur_rx;
890 	int			c, i, total_len = 0;
891 	u_int32_t		rxsts, rxctl;
892 
893 	ifp = sc->lge_ifp;
894 
895 	/* Find out how many frames were processed. */
896 	c = cnt;
897 	i = sc->lge_cdata.lge_rx_cons;
898 
899 	/* Suck them in. */
900 	while(c) {
901 		struct mbuf		*m0 = NULL;
902 
903 		cur_rx = &sc->lge_ldata->lge_rx_list[i];
904 		rxctl = cur_rx->lge_ctl;
905 		rxsts = cur_rx->lge_sts;
906 		m = cur_rx->lge_mbuf;
907 		cur_rx->lge_mbuf = NULL;
908 		total_len = LGE_RXBYTES(cur_rx);
909 		LGE_INC(i, LGE_RX_LIST_CNT);
910 		c--;
911 
912 		/*
913 		 * If an error occurs, update stats, clear the
914 		 * status word and leave the mbuf cluster in place:
915 		 * it should simply get re-used next time this descriptor
916 	 	 * comes up in the ring.
917 		 */
918 		if (rxctl & LGE_RXCTL_ERRMASK) {
919 			ifp->if_ierrors++;
920 			lge_newbuf(sc, &LGE_RXTAIL(sc), m);
921 			continue;
922 		}
923 
924 		if (lge_newbuf(sc, &LGE_RXTAIL(sc), NULL) == ENOBUFS) {
925 			m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN,
926 			    ifp, NULL);
927 			lge_newbuf(sc, &LGE_RXTAIL(sc), m);
928 			if (m0 == NULL) {
929 				if_printf(ifp, "no receive buffers "
930 				    "available -- packet dropped!\n");
931 				ifp->if_ierrors++;
932 				continue;
933 			}
934 			m = m0;
935 		} else {
936 			m->m_pkthdr.rcvif = ifp;
937 			m->m_pkthdr.len = m->m_len = total_len;
938 		}
939 
940 		ifp->if_ipackets++;
941 
942 		/* Do IP checksum checking. */
943 		if (rxsts & LGE_RXSTS_ISIP)
944 			m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
945 		if (!(rxsts & LGE_RXSTS_IPCSUMERR))
946 			m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
947 		if ((rxsts & LGE_RXSTS_ISTCP &&
948 		    !(rxsts & LGE_RXSTS_TCPCSUMERR)) ||
949 		    (rxsts & LGE_RXSTS_ISUDP &&
950 		    !(rxsts & LGE_RXSTS_UDPCSUMERR))) {
951 			m->m_pkthdr.csum_flags |=
952 			    CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
953 			m->m_pkthdr.csum_data = 0xffff;
954 		}
955 
956 		LGE_UNLOCK(sc);
957 		(*ifp->if_input)(ifp, m);
958 		LGE_LOCK(sc);
959 	}
960 
961 	sc->lge_cdata.lge_rx_cons = i;
962 
963 	return;
964 }
965 
966 static void
967 lge_rxeoc(sc)
968 	struct lge_softc	*sc;
969 {
970 	struct ifnet		*ifp;
971 
972 	ifp = sc->lge_ifp;
973 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
974 	lge_init_locked(sc);
975 	return;
976 }
977 
978 /*
979  * A frame was downloaded to the chip. It's safe for us to clean up
980  * the list buffers.
981  */
982 
983 static void
984 lge_txeof(sc)
985 	struct lge_softc	*sc;
986 {
987 	struct lge_tx_desc	*cur_tx = NULL;
988 	struct ifnet		*ifp;
989 	u_int32_t		idx, txdone;
990 
991 	ifp = sc->lge_ifp;
992 
993 	/* Clear the timeout timer. */
994 	ifp->if_timer = 0;
995 
996 	/*
997 	 * Go through our tx list and free mbufs for those
998 	 * frames that have been transmitted.
999 	 */
1000 	idx = sc->lge_cdata.lge_tx_cons;
1001 	txdone = CSR_READ_1(sc, LGE_TXDMADONE_8BIT);
1002 
1003 	while (idx != sc->lge_cdata.lge_tx_prod && txdone) {
1004 		cur_tx = &sc->lge_ldata->lge_tx_list[idx];
1005 
1006 		ifp->if_opackets++;
1007 		if (cur_tx->lge_mbuf != NULL) {
1008 			m_freem(cur_tx->lge_mbuf);
1009 			cur_tx->lge_mbuf = NULL;
1010 		}
1011 		cur_tx->lge_ctl = 0;
1012 
1013 		txdone--;
1014 		LGE_INC(idx, LGE_TX_LIST_CNT);
1015 		ifp->if_timer = 0;
1016 	}
1017 
1018 	sc->lge_cdata.lge_tx_cons = idx;
1019 
1020 	if (cur_tx != NULL)
1021 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1022 
1023 	return;
1024 }
1025 
1026 static void
1027 lge_tick(xsc)
1028 	void			*xsc;
1029 {
1030 	struct lge_softc	*sc;
1031 	struct mii_data		*mii;
1032 	struct ifnet		*ifp;
1033 
1034 	sc = xsc;
1035 	ifp = sc->lge_ifp;
1036 	LGE_LOCK_ASSERT(sc);
1037 
1038 	CSR_WRITE_4(sc, LGE_STATSIDX, LGE_STATS_SINGLE_COLL_PKTS);
1039 	ifp->if_collisions += CSR_READ_4(sc, LGE_STATSVAL);
1040 	CSR_WRITE_4(sc, LGE_STATSIDX, LGE_STATS_MULTI_COLL_PKTS);
1041 	ifp->if_collisions += CSR_READ_4(sc, LGE_STATSVAL);
1042 
1043 	if (!sc->lge_link) {
1044 		mii = device_get_softc(sc->lge_miibus);
1045 		mii_tick(mii);
1046 		if (mii->mii_media_status & IFM_ACTIVE &&
1047 		    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
1048 			sc->lge_link++;
1049 			if (bootverbose &&
1050 		  	    (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX||
1051 			    IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T))
1052 				if_printf(ifp, "gigabit link up\n");
1053 			if (ifp->if_snd.ifq_head != NULL)
1054 				lge_start_locked(ifp);
1055 		}
1056 	}
1057 
1058 	callout_reset(&sc->lge_stat_callout, hz, lge_tick, sc);
1059 
1060 	return;
1061 }
1062 
1063 static void
1064 lge_intr(arg)
1065 	void			*arg;
1066 {
1067 	struct lge_softc	*sc;
1068 	struct ifnet		*ifp;
1069 	u_int32_t		status;
1070 
1071 	sc = arg;
1072 	ifp = sc->lge_ifp;
1073 	LGE_LOCK(sc);
1074 
1075 	/* Supress unwanted interrupts */
1076 	if (!(ifp->if_flags & IFF_UP)) {
1077 		lge_stop(sc);
1078 		LGE_UNLOCK(sc);
1079 		return;
1080 	}
1081 
1082 	for (;;) {
1083 		/*
1084 		 * Reading the ISR register clears all interrupts, and
1085 		 * clears the 'interrupts enabled' bit in the IMR
1086 		 * register.
1087 		 */
1088 		status = CSR_READ_4(sc, LGE_ISR);
1089 
1090 		if ((status & LGE_INTRS) == 0)
1091 			break;
1092 
1093 		if ((status & (LGE_ISR_TXCMDFIFO_EMPTY|LGE_ISR_TXDMA_DONE)))
1094 			lge_txeof(sc);
1095 
1096 		if (status & LGE_ISR_RXDMA_DONE)
1097 			lge_rxeof(sc, LGE_RX_DMACNT(status));
1098 
1099 		if (status & LGE_ISR_RXCMDFIFO_EMPTY)
1100 			lge_rxeoc(sc);
1101 
1102 		if (status & LGE_ISR_PHY_INTR) {
1103 			sc->lge_link = 0;
1104 			callout_stop(&sc->lge_stat_callout);
1105 			lge_tick(sc);
1106 		}
1107 	}
1108 
1109 	/* Re-enable interrupts. */
1110 	CSR_WRITE_4(sc, LGE_IMR, LGE_IMR_SETRST_CTL0|LGE_IMR_INTR_ENB);
1111 
1112 	if (ifp->if_snd.ifq_head != NULL)
1113 		lge_start_locked(ifp);
1114 
1115 	LGE_UNLOCK(sc);
1116 	return;
1117 }
1118 
1119 /*
1120  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1121  * pointers to the fragment pointers.
1122  */
1123 static int
1124 lge_encap(sc, m_head, txidx)
1125 	struct lge_softc	*sc;
1126 	struct mbuf		*m_head;
1127 	u_int32_t		*txidx;
1128 {
1129 	struct lge_frag		*f = NULL;
1130 	struct lge_tx_desc	*cur_tx;
1131 	struct mbuf		*m;
1132 	int			frag = 0, tot_len = 0;
1133 
1134 	/*
1135  	 * Start packing the mbufs in this chain into
1136 	 * the fragment pointers. Stop when we run out
1137  	 * of fragments or hit the end of the mbuf chain.
1138 	 */
1139 	m = m_head;
1140 	cur_tx = &sc->lge_ldata->lge_tx_list[*txidx];
1141 	frag = 0;
1142 
1143 	for (m = m_head; m != NULL; m = m->m_next) {
1144 		if (m->m_len != 0) {
1145 			tot_len += m->m_len;
1146 			f = &cur_tx->lge_frags[frag];
1147 			f->lge_fraglen = m->m_len;
1148 			f->lge_fragptr_lo = vtophys(mtod(m, vm_offset_t));
1149 			f->lge_fragptr_hi = 0;
1150 			frag++;
1151 		}
1152 	}
1153 
1154 	if (m != NULL)
1155 		return(ENOBUFS);
1156 
1157 	cur_tx->lge_mbuf = m_head;
1158 	cur_tx->lge_ctl = LGE_TXCTL_WANTINTR|LGE_FRAGCNT(frag)|tot_len;
1159 	LGE_INC((*txidx), LGE_TX_LIST_CNT);
1160 
1161 	/* Queue for transmit */
1162 	CSR_WRITE_4(sc, LGE_TXDESC_ADDR_LO, vtophys(cur_tx));
1163 
1164 	return(0);
1165 }
1166 
1167 /*
1168  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1169  * to the mbuf data regions directly in the transmit lists. We also save a
1170  * copy of the pointers since the transmit list fragment pointers are
1171  * physical addresses.
1172  */
1173 
1174 static void
1175 lge_start(ifp)
1176 	struct ifnet		*ifp;
1177 {
1178 	struct lge_softc	*sc;
1179 
1180 	sc = ifp->if_softc;
1181 	LGE_LOCK(sc);
1182 	lge_start_locked(ifp);
1183 	LGE_UNLOCK(sc);
1184 }
1185 
1186 static void
1187 lge_start_locked(ifp)
1188 	struct ifnet		*ifp;
1189 {
1190 	struct lge_softc	*sc;
1191 	struct mbuf		*m_head = NULL;
1192 	u_int32_t		idx;
1193 
1194 	sc = ifp->if_softc;
1195 
1196 	if (!sc->lge_link)
1197 		return;
1198 
1199 	idx = sc->lge_cdata.lge_tx_prod;
1200 
1201 	if (ifp->if_drv_flags & IFF_DRV_OACTIVE)
1202 		return;
1203 
1204 	while(sc->lge_ldata->lge_tx_list[idx].lge_mbuf == NULL) {
1205 		if (CSR_READ_1(sc, LGE_TXCMDFREE_8BIT) == 0)
1206 			break;
1207 
1208 		IF_DEQUEUE(&ifp->if_snd, m_head);
1209 		if (m_head == NULL)
1210 			break;
1211 
1212 		if (lge_encap(sc, m_head, &idx)) {
1213 			IF_PREPEND(&ifp->if_snd, m_head);
1214 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1215 			break;
1216 		}
1217 
1218 		/*
1219 		 * If there's a BPF listener, bounce a copy of this frame
1220 		 * to him.
1221 		 */
1222 		BPF_MTAP(ifp, m_head);
1223 	}
1224 
1225 	sc->lge_cdata.lge_tx_prod = idx;
1226 
1227 	/*
1228 	 * Set a timeout in case the chip goes out to lunch.
1229 	 */
1230 	ifp->if_timer = 5;
1231 
1232 	return;
1233 }
1234 
1235 static void
1236 lge_init(xsc)
1237 	void			*xsc;
1238 {
1239 	struct lge_softc	*sc = xsc;
1240 
1241 	LGE_LOCK(sc);
1242 	lge_init_locked(sc);
1243 	LGE_UNLOCK(sc);
1244 }
1245 
1246 static void
1247 lge_init_locked(sc)
1248 	struct lge_softc	*sc;
1249 {
1250 	struct ifnet		*ifp = sc->lge_ifp;
1251 	struct mii_data		*mii;
1252 
1253 	LGE_LOCK_ASSERT(sc);
1254 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1255 		return;
1256 
1257 	/*
1258 	 * Cancel pending I/O and free all RX/TX buffers.
1259 	 */
1260 	lge_stop(sc);
1261 	lge_reset(sc);
1262 
1263 	mii = device_get_softc(sc->lge_miibus);
1264 
1265 	/* Set MAC address */
1266 	CSR_WRITE_4(sc, LGE_PAR0, *(u_int32_t *)(&IF_LLADDR(sc->lge_ifp)[0]));
1267 	CSR_WRITE_4(sc, LGE_PAR1, *(u_int32_t *)(&IF_LLADDR(sc->lge_ifp)[4]));
1268 
1269 	/* Init circular RX list. */
1270 	if (lge_list_rx_init(sc) == ENOBUFS) {
1271 		if_printf(ifp, "initialization failed: no "
1272 		    "memory for rx buffers\n");
1273 		lge_stop(sc);
1274 		return;
1275 	}
1276 
1277 	/*
1278 	 * Init tx descriptors.
1279 	 */
1280 	lge_list_tx_init(sc);
1281 
1282 	/* Set initial value for MODE1 register. */
1283 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_UCAST|
1284 	    LGE_MODE1_TX_CRC|LGE_MODE1_TXPAD|
1285 	    LGE_MODE1_RX_FLOWCTL|LGE_MODE1_SETRST_CTL0|
1286 	    LGE_MODE1_SETRST_CTL1|LGE_MODE1_SETRST_CTL2);
1287 
1288 	 /* If we want promiscuous mode, set the allframes bit. */
1289 	if (ifp->if_flags & IFF_PROMISC) {
1290 		CSR_WRITE_4(sc, LGE_MODE1,
1291 		    LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_PROMISC);
1292 	} else {
1293 		CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_PROMISC);
1294 	}
1295 
1296 	/*
1297 	 * Set the capture broadcast bit to capture broadcast frames.
1298 	 */
1299 	if (ifp->if_flags & IFF_BROADCAST) {
1300 		CSR_WRITE_4(sc, LGE_MODE1,
1301 		    LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_BCAST);
1302 	} else {
1303 		CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_BCAST);
1304 	}
1305 
1306 	/* Packet padding workaround? */
1307 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_RMVPAD);
1308 
1309 	/* No error frames */
1310 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_ERRPKTS);
1311 
1312 	/* Receive large frames */
1313 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_GIANTS);
1314 
1315 	/* Workaround: disable RX/TX flow control */
1316 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_TX_FLOWCTL);
1317 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_FLOWCTL);
1318 
1319 	/* Make sure to strip CRC from received frames */
1320 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_CRC);
1321 
1322 	/* Turn off magic packet mode */
1323 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_MPACK_ENB);
1324 
1325 	/* Turn off all VLAN stuff */
1326 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_VLAN_RX|LGE_MODE1_VLAN_TX|
1327 	    LGE_MODE1_VLAN_STRIP|LGE_MODE1_VLAN_INSERT);
1328 
1329 	/* Workarond: FIFO overflow */
1330 	CSR_WRITE_2(sc, LGE_RXFIFO_HIWAT, 0x3FFF);
1331 	CSR_WRITE_4(sc, LGE_IMR, LGE_IMR_SETRST_CTL1|LGE_IMR_RXFIFO_WAT);
1332 
1333 	/*
1334 	 * Load the multicast filter.
1335 	 */
1336 	lge_setmulti(sc);
1337 
1338 	/*
1339 	 * Enable hardware checksum validation for all received IPv4
1340 	 * packets, do not reject packets with bad checksums.
1341 	 */
1342 	CSR_WRITE_4(sc, LGE_MODE2, LGE_MODE2_RX_IPCSUM|
1343 	    LGE_MODE2_RX_TCPCSUM|LGE_MODE2_RX_UDPCSUM|
1344 	    LGE_MODE2_RX_ERRCSUM);
1345 
1346 	/*
1347 	 * Enable the delivery of PHY interrupts based on
1348 	 * link/speed/duplex status chalges.
1349 	 */
1350 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL0|LGE_MODE1_GMIIPOLL);
1351 
1352 	/* Enable receiver and transmitter. */
1353 	CSR_WRITE_4(sc, LGE_RXDESC_ADDR_HI, 0);
1354 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_ENB);
1355 
1356 	CSR_WRITE_4(sc, LGE_TXDESC_ADDR_HI, 0);
1357 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_TX_ENB);
1358 
1359 	/*
1360 	 * Enable interrupts.
1361 	 */
1362 	CSR_WRITE_4(sc, LGE_IMR, LGE_IMR_SETRST_CTL0|
1363 	    LGE_IMR_SETRST_CTL1|LGE_IMR_INTR_ENB|LGE_INTRS);
1364 
1365 	lge_ifmedia_upd_locked(ifp);
1366 
1367 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1368 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1369 
1370 	callout_reset(&sc->lge_stat_callout, hz, lge_tick, sc);
1371 
1372 	return;
1373 }
1374 
1375 /*
1376  * Set media options.
1377  */
1378 static int
1379 lge_ifmedia_upd(ifp)
1380 	struct ifnet		*ifp;
1381 {
1382 	struct lge_softc	*sc;
1383 
1384 	sc = ifp->if_softc;
1385 	LGE_LOCK(sc);
1386 	lge_ifmedia_upd_locked(ifp);
1387 	LGE_UNLOCK(sc);
1388 
1389 	return(0);
1390 }
1391 
1392 static void
1393 lge_ifmedia_upd_locked(ifp)
1394 	struct ifnet		*ifp;
1395 {
1396 	struct lge_softc	*sc;
1397 	struct mii_data		*mii;
1398 
1399 	sc = ifp->if_softc;
1400 
1401 	LGE_LOCK_ASSERT(sc);
1402 	mii = device_get_softc(sc->lge_miibus);
1403 	sc->lge_link = 0;
1404 	if (mii->mii_instance) {
1405 		struct mii_softc	*miisc;
1406 		for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL;
1407 		    miisc = LIST_NEXT(miisc, mii_list))
1408 			mii_phy_reset(miisc);
1409 	}
1410 	mii_mediachg(mii);
1411 }
1412 
1413 /*
1414  * Report current media status.
1415  */
1416 static void
1417 lge_ifmedia_sts(ifp, ifmr)
1418 	struct ifnet		*ifp;
1419 	struct ifmediareq	*ifmr;
1420 {
1421 	struct lge_softc	*sc;
1422 	struct mii_data		*mii;
1423 
1424 	sc = ifp->if_softc;
1425 
1426 	LGE_LOCK(sc);
1427 	mii = device_get_softc(sc->lge_miibus);
1428 	mii_pollstat(mii);
1429 	LGE_UNLOCK(sc);
1430 	ifmr->ifm_active = mii->mii_media_active;
1431 	ifmr->ifm_status = mii->mii_media_status;
1432 
1433 	return;
1434 }
1435 
1436 static int
1437 lge_ioctl(ifp, command, data)
1438 	struct ifnet		*ifp;
1439 	u_long			command;
1440 	caddr_t			data;
1441 {
1442 	struct lge_softc	*sc = ifp->if_softc;
1443 	struct ifreq		*ifr = (struct ifreq *) data;
1444 	struct mii_data		*mii;
1445 	int			error = 0;
1446 
1447 	switch(command) {
1448 	case SIOCSIFMTU:
1449 		LGE_LOCK(sc);
1450 		if (ifr->ifr_mtu > LGE_JUMBO_MTU)
1451 			error = EINVAL;
1452 		else
1453 			ifp->if_mtu = ifr->ifr_mtu;
1454 		LGE_UNLOCK(sc);
1455 		break;
1456 	case SIOCSIFFLAGS:
1457 		LGE_LOCK(sc);
1458 		if (ifp->if_flags & IFF_UP) {
1459 			if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1460 			    ifp->if_flags & IFF_PROMISC &&
1461 			    !(sc->lge_if_flags & IFF_PROMISC)) {
1462 				CSR_WRITE_4(sc, LGE_MODE1,
1463 				    LGE_MODE1_SETRST_CTL1|
1464 				    LGE_MODE1_RX_PROMISC);
1465 			} else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1466 			    !(ifp->if_flags & IFF_PROMISC) &&
1467 			    sc->lge_if_flags & IFF_PROMISC) {
1468 				CSR_WRITE_4(sc, LGE_MODE1,
1469 				    LGE_MODE1_RX_PROMISC);
1470 			} else {
1471 				ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1472 				lge_init_locked(sc);
1473 			}
1474 		} else {
1475 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1476 				lge_stop(sc);
1477 		}
1478 		sc->lge_if_flags = ifp->if_flags;
1479 		LGE_UNLOCK(sc);
1480 		error = 0;
1481 		break;
1482 	case SIOCADDMULTI:
1483 	case SIOCDELMULTI:
1484 		LGE_LOCK(sc);
1485 		lge_setmulti(sc);
1486 		LGE_UNLOCK(sc);
1487 		error = 0;
1488 		break;
1489 	case SIOCGIFMEDIA:
1490 	case SIOCSIFMEDIA:
1491 		mii = device_get_softc(sc->lge_miibus);
1492 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1493 		break;
1494 	default:
1495 		error = ether_ioctl(ifp, command, data);
1496 		break;
1497 	}
1498 
1499 	return(error);
1500 }
1501 
1502 static void
1503 lge_watchdog(ifp)
1504 	struct ifnet		*ifp;
1505 {
1506 	struct lge_softc	*sc;
1507 
1508 	sc = ifp->if_softc;
1509 
1510 	LGE_LOCK(sc);
1511 	ifp->if_oerrors++;
1512 	if_printf(ifp, "watchdog timeout\n");
1513 
1514 	lge_stop(sc);
1515 	lge_reset(sc);
1516 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1517 	lge_init_locked(sc);
1518 
1519 	if (ifp->if_snd.ifq_head != NULL)
1520 		lge_start_locked(ifp);
1521 	LGE_UNLOCK(sc);
1522 
1523 	return;
1524 }
1525 
1526 /*
1527  * Stop the adapter and free any mbufs allocated to the
1528  * RX and TX lists.
1529  */
1530 static void
1531 lge_stop(sc)
1532 	struct lge_softc	*sc;
1533 {
1534 	register int		i;
1535 	struct ifnet		*ifp;
1536 
1537 	LGE_LOCK_ASSERT(sc);
1538 	ifp = sc->lge_ifp;
1539 	ifp->if_timer = 0;
1540 	callout_stop(&sc->lge_stat_callout);
1541 	CSR_WRITE_4(sc, LGE_IMR, LGE_IMR_INTR_ENB);
1542 
1543 	/* Disable receiver and transmitter. */
1544 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_ENB|LGE_MODE1_TX_ENB);
1545 	sc->lge_link = 0;
1546 
1547 	/*
1548 	 * Free data in the RX lists.
1549 	 */
1550 	for (i = 0; i < LGE_RX_LIST_CNT; i++) {
1551 		if (sc->lge_ldata->lge_rx_list[i].lge_mbuf != NULL) {
1552 			m_freem(sc->lge_ldata->lge_rx_list[i].lge_mbuf);
1553 			sc->lge_ldata->lge_rx_list[i].lge_mbuf = NULL;
1554 		}
1555 	}
1556 	bzero((char *)&sc->lge_ldata->lge_rx_list,
1557 		sizeof(sc->lge_ldata->lge_rx_list));
1558 
1559 	/*
1560 	 * Free the TX list buffers.
1561 	 */
1562 	for (i = 0; i < LGE_TX_LIST_CNT; i++) {
1563 		if (sc->lge_ldata->lge_tx_list[i].lge_mbuf != NULL) {
1564 			m_freem(sc->lge_ldata->lge_tx_list[i].lge_mbuf);
1565 			sc->lge_ldata->lge_tx_list[i].lge_mbuf = NULL;
1566 		}
1567 	}
1568 
1569 	bzero((char *)&sc->lge_ldata->lge_tx_list,
1570 		sizeof(sc->lge_ldata->lge_tx_list));
1571 
1572 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1573 
1574 	return;
1575 }
1576 
1577 /*
1578  * Stop all chip I/O so that the kernel's probe routines don't
1579  * get confused by errant DMAs when rebooting.
1580  */
1581 static void
1582 lge_shutdown(dev)
1583 	device_t		dev;
1584 {
1585 	struct lge_softc	*sc;
1586 
1587 	sc = device_get_softc(dev);
1588 
1589 	LGE_LOCK(sc);
1590 	lge_reset(sc);
1591 	lge_stop(sc);
1592 	LGE_UNLOCK(sc);
1593 
1594 	return;
1595 }
1596