xref: /freebsd/sys/dev/gem/if_gem.c (revision 7660b554bc59a07be0431c17e0e33815818baa69)
1 /*-
2  * Copyright (C) 2001 Eduardo Horvath.
3  * Copyright (c) 2001-2003 Thomas Moestl
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR  ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR  BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  *	from: NetBSD: gem.c,v 1.21 2002/06/01 23:50:58 lukem Exp
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 /*
34  * Driver for Sun GEM ethernet controllers.
35  */
36 
37 #if 0
38 #define	GEM_DEBUG
39 #endif
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/bus.h>
44 #include <sys/callout.h>
45 #include <sys/endian.h>
46 #include <sys/mbuf.h>
47 #include <sys/malloc.h>
48 #include <sys/kernel.h>
49 #include <sys/socket.h>
50 #include <sys/sockio.h>
51 
52 #include <net/bpf.h>
53 #include <net/ethernet.h>
54 #include <net/if.h>
55 #include <net/if_arp.h>
56 #include <net/if_dl.h>
57 #include <net/if_media.h>
58 
59 #include <machine/bus.h>
60 
61 #include <dev/mii/mii.h>
62 #include <dev/mii/miivar.h>
63 
64 #include <dev/gem/if_gemreg.h>
65 #include <dev/gem/if_gemvar.h>
66 
67 #define TRIES	10000
68 
69 static void	gem_start(struct ifnet *);
70 static void	gem_stop(struct ifnet *, int);
71 static int	gem_ioctl(struct ifnet *, u_long, caddr_t);
72 static void	gem_cddma_callback(void *, bus_dma_segment_t *, int, int);
73 static void	gem_rxdma_callback(void *, bus_dma_segment_t *, int,
74     bus_size_t, int);
75 static void	gem_txdma_callback(void *, bus_dma_segment_t *, int,
76     bus_size_t, int);
77 static void	gem_tick(void *);
78 static void	gem_watchdog(struct ifnet *);
79 static void	gem_init(void *);
80 static void	gem_init_regs(struct gem_softc *sc);
81 static int	gem_ringsize(int sz);
82 static int	gem_meminit(struct gem_softc *);
83 static int	gem_load_txmbuf(struct gem_softc *, struct mbuf *);
84 static void	gem_mifinit(struct gem_softc *);
85 static int	gem_bitwait(struct gem_softc *sc, bus_addr_t r,
86     u_int32_t clr, u_int32_t set);
87 static int	gem_reset_rx(struct gem_softc *);
88 static int	gem_reset_tx(struct gem_softc *);
89 static int	gem_disable_rx(struct gem_softc *);
90 static int	gem_disable_tx(struct gem_softc *);
91 static void	gem_rxdrain(struct gem_softc *);
92 static int	gem_add_rxbuf(struct gem_softc *, int);
93 static void	gem_setladrf(struct gem_softc *);
94 
95 struct mbuf	*gem_get(struct gem_softc *, int, int);
96 static void	gem_eint(struct gem_softc *, u_int);
97 static void	gem_rint(struct gem_softc *);
98 #if 0
99 static void	gem_rint_timeout(void *);
100 #endif
101 static void	gem_tint(struct gem_softc *);
102 #ifdef notyet
103 static void	gem_power(int, void *);
104 #endif
105 
106 devclass_t gem_devclass;
107 DRIVER_MODULE(miibus, gem, miibus_driver, miibus_devclass, 0, 0);
108 MODULE_DEPEND(gem, miibus, 1, 1, 1);
109 
110 #ifdef GEM_DEBUG
111 #include <sys/ktr.h>
112 #define	KTR_GEM		KTR_CT2
113 #endif
114 
115 #define	GEM_NSEGS GEM_NTXDESC
116 
117 /*
118  * gem_attach:
119  *
120  *	Attach a Gem interface to the system.
121  */
122 int
123 gem_attach(sc)
124 	struct gem_softc *sc;
125 {
126 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
127 	struct mii_softc *child;
128 	int i, error;
129 	u_int32_t v;
130 
131 	/* Make sure the chip is stopped. */
132 	ifp->if_softc = sc;
133 	gem_reset(sc);
134 
135 	error = bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT,
136 	    BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, GEM_NSEGS,
137 	    BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &sc->sc_pdmatag);
138 	if (error)
139 		return (error);
140 
141 	error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0,
142 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MAXBSIZE,
143 	    1, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, NULL, NULL,
144 	    &sc->sc_rdmatag);
145 	if (error)
146 		goto fail_ptag;
147 
148 	error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0,
149 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
150 	    GEM_TD_BUFSIZE, GEM_NTXDESC, BUS_SPACE_MAXSIZE_32BIT,
151 	    BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_tdmatag);
152 	if (error)
153 		goto fail_rtag;
154 
155 	error = bus_dma_tag_create(sc->sc_pdmatag, PAGE_SIZE, 0,
156 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
157 	    sizeof(struct gem_control_data), 1,
158 	    sizeof(struct gem_control_data), BUS_DMA_ALLOCNOW,
159 	    busdma_lock_mutex, &Giant, &sc->sc_cdmatag);
160 	if (error)
161 		goto fail_ttag;
162 
163 	/*
164 	 * Allocate the control data structures, and create and load the
165 	 * DMA map for it.
166 	 */
167 	if ((error = bus_dmamem_alloc(sc->sc_cdmatag,
168 	    (void **)&sc->sc_control_data, 0, &sc->sc_cddmamap))) {
169 		device_printf(sc->sc_dev, "unable to allocate control data,"
170 		    " error = %d\n", error);
171 		goto fail_ctag;
172 	}
173 
174 	sc->sc_cddma = 0;
175 	if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cddmamap,
176 	    sc->sc_control_data, sizeof(struct gem_control_data),
177 	    gem_cddma_callback, sc, 0)) != 0 || sc->sc_cddma == 0) {
178 		device_printf(sc->sc_dev, "unable to load control data DMA "
179 		    "map, error = %d\n", error);
180 		goto fail_cmem;
181 	}
182 
183 	/*
184 	 * Initialize the transmit job descriptors.
185 	 */
186 	STAILQ_INIT(&sc->sc_txfreeq);
187 	STAILQ_INIT(&sc->sc_txdirtyq);
188 
189 	/*
190 	 * Create the transmit buffer DMA maps.
191 	 */
192 	error = ENOMEM;
193 	for (i = 0; i < GEM_TXQUEUELEN; i++) {
194 		struct gem_txsoft *txs;
195 
196 		txs = &sc->sc_txsoft[i];
197 		txs->txs_mbuf = NULL;
198 		txs->txs_ndescs = 0;
199 		if ((error = bus_dmamap_create(sc->sc_tdmatag, 0,
200 		    &txs->txs_dmamap)) != 0) {
201 			device_printf(sc->sc_dev, "unable to create tx DMA map "
202 			    "%d, error = %d\n", i, error);
203 			goto fail_txd;
204 		}
205 		STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
206 	}
207 
208 	/*
209 	 * Create the receive buffer DMA maps.
210 	 */
211 	for (i = 0; i < GEM_NRXDESC; i++) {
212 		if ((error = bus_dmamap_create(sc->sc_rdmatag, 0,
213 		    &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
214 			device_printf(sc->sc_dev, "unable to create rx DMA map "
215 			    "%d, error = %d\n", i, error);
216 			goto fail_rxd;
217 		}
218 		sc->sc_rxsoft[i].rxs_mbuf = NULL;
219 	}
220 
221 
222 	gem_mifinit(sc);
223 
224 	if ((error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, gem_mediachange,
225 	    gem_mediastatus)) != 0) {
226 		device_printf(sc->sc_dev, "phy probe failed: %d\n", error);
227 		goto fail_rxd;
228 	}
229 	sc->sc_mii = device_get_softc(sc->sc_miibus);
230 
231 	/*
232 	 * From this point forward, the attachment cannot fail.  A failure
233 	 * before this point releases all resources that may have been
234 	 * allocated.
235 	 */
236 
237 	/* Announce ourselves. */
238 	device_printf(sc->sc_dev, "Ethernet address:");
239 	for (i = 0; i < 6; i++)
240 		printf("%c%02x", i > 0 ? ':' : ' ', sc->sc_arpcom.ac_enaddr[i]);
241 
242 	/* Get RX FIFO size */
243 	sc->sc_rxfifosize = 64 *
244 	    bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_RX_FIFO_SIZE);
245 	printf(", %uKB RX fifo", sc->sc_rxfifosize / 1024);
246 
247 	/* Get TX FIFO size */
248 	v = bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_FIFO_SIZE);
249 	printf(", %uKB TX fifo\n", v / 16);
250 
251 	/* Initialize ifnet structure. */
252 	ifp->if_softc = sc;
253 	ifp->if_unit = device_get_unit(sc->sc_dev);
254 	ifp->if_name = "gem";
255 	ifp->if_mtu = ETHERMTU;
256 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
257 	ifp->if_start = gem_start;
258 	ifp->if_ioctl = gem_ioctl;
259 	ifp->if_watchdog = gem_watchdog;
260 	ifp->if_init = gem_init;
261 	ifp->if_output = ether_output;
262 	ifp->if_snd.ifq_maxlen = GEM_TXQUEUELEN;
263 	/*
264 	 * Walk along the list of attached MII devices and
265 	 * establish an `MII instance' to `phy number'
266 	 * mapping. We'll use this mapping in media change
267 	 * requests to determine which phy to use to program
268 	 * the MIF configuration register.
269 	 */
270 	for (child = LIST_FIRST(&sc->sc_mii->mii_phys); child != NULL;
271 	     child = LIST_NEXT(child, mii_list)) {
272 		/*
273 		 * Note: we support just two PHYs: the built-in
274 		 * internal device and an external on the MII
275 		 * connector.
276 		 */
277 		if (child->mii_phy > 1 || child->mii_inst > 1) {
278 			device_printf(sc->sc_dev, "cannot accomodate "
279 			    "MII device %s at phy %d, instance %d\n",
280 			    device_get_name(child->mii_dev),
281 			    child->mii_phy, child->mii_inst);
282 			continue;
283 		}
284 
285 		sc->sc_phys[child->mii_inst] = child->mii_phy;
286 	}
287 
288 	/*
289 	 * Now select and activate the PHY we will use.
290 	 *
291 	 * The order of preference is External (MDI1),
292 	 * Internal (MDI0), Serial Link (no MII).
293 	 */
294 	if (sc->sc_phys[1]) {
295 #ifdef GEM_DEBUG
296 		printf("using external phy\n");
297 #endif
298 		sc->sc_mif_config |= GEM_MIF_CONFIG_PHY_SEL;
299 	} else {
300 #ifdef GEM_DEBUG
301 		printf("using internal phy\n");
302 #endif
303 		sc->sc_mif_config &= ~GEM_MIF_CONFIG_PHY_SEL;
304 	}
305 	bus_space_write_4(sc->sc_bustag, sc->sc_h, GEM_MIF_CONFIG,
306 	    sc->sc_mif_config);
307 	/* Attach the interface. */
308 	ether_ifattach(ifp, sc->sc_arpcom.ac_enaddr);
309 
310 #if notyet
311 	/*
312 	 * Add a suspend hook to make sure we come back up after a
313 	 * resume.
314 	 */
315 	sc->sc_powerhook = powerhook_establish(gem_power, sc);
316 	if (sc->sc_powerhook == NULL)
317 		device_printf(sc->sc_dev, "WARNING: unable to establish power "
318 		    "hook\n");
319 #endif
320 
321 	callout_init(&sc->sc_tick_ch, 0);
322 	callout_init(&sc->sc_rx_ch, 0);
323 	return (0);
324 
325 	/*
326 	 * Free any resources we've allocated during the failed attach
327 	 * attempt.  Do this in reverse order and fall through.
328 	 */
329 fail_rxd:
330 	for (i = 0; i < GEM_NRXDESC; i++) {
331 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
332 			bus_dmamap_destroy(sc->sc_rdmatag,
333 			    sc->sc_rxsoft[i].rxs_dmamap);
334 	}
335 fail_txd:
336 	for (i = 0; i < GEM_TXQUEUELEN; i++) {
337 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
338 			bus_dmamap_destroy(sc->sc_tdmatag,
339 			    sc->sc_txsoft[i].txs_dmamap);
340 	}
341 	bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap);
342 fail_cmem:
343 	bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data,
344 	    sc->sc_cddmamap);
345 fail_ctag:
346 	bus_dma_tag_destroy(sc->sc_cdmatag);
347 fail_ttag:
348 	bus_dma_tag_destroy(sc->sc_tdmatag);
349 fail_rtag:
350 	bus_dma_tag_destroy(sc->sc_rdmatag);
351 fail_ptag:
352 	bus_dma_tag_destroy(sc->sc_pdmatag);
353 	return (error);
354 }
355 
356 void
357 gem_detach(sc)
358 	struct gem_softc *sc;
359 {
360 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
361 	int i;
362 
363 	ether_ifdetach(ifp);
364 	gem_stop(ifp, 1);
365 	device_delete_child(sc->sc_dev, sc->sc_miibus);
366 
367 	for (i = 0; i < GEM_NRXDESC; i++) {
368 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
369 			bus_dmamap_destroy(sc->sc_rdmatag,
370 			    sc->sc_rxsoft[i].rxs_dmamap);
371 	}
372 	for (i = 0; i < GEM_TXQUEUELEN; i++) {
373 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
374 			bus_dmamap_destroy(sc->sc_tdmatag,
375 			    sc->sc_txsoft[i].txs_dmamap);
376 	}
377 	GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD);
378 	GEM_CDSYNC(sc, BUS_DMASYNC_POSTWRITE);
379 	bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap);
380 	bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data,
381 	    sc->sc_cddmamap);
382 	bus_dma_tag_destroy(sc->sc_cdmatag);
383 	bus_dma_tag_destroy(sc->sc_tdmatag);
384 	bus_dma_tag_destroy(sc->sc_rdmatag);
385 	bus_dma_tag_destroy(sc->sc_pdmatag);
386 }
387 
388 void
389 gem_suspend(sc)
390 	struct gem_softc *sc;
391 {
392 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
393 
394 	gem_stop(ifp, 0);
395 }
396 
397 void
398 gem_resume(sc)
399 	struct gem_softc *sc;
400 {
401 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
402 
403 	if (ifp->if_flags & IFF_UP)
404 		gem_init(ifp);
405 }
406 
407 static void
408 gem_cddma_callback(xsc, segs, nsegs, error)
409 	void *xsc;
410 	bus_dma_segment_t *segs;
411 	int nsegs;
412 	int error;
413 {
414 	struct gem_softc *sc = (struct gem_softc *)xsc;
415 
416 	if (error != 0)
417 		return;
418 	if (nsegs != 1) {
419 		/* can't happen... */
420 		panic("gem_cddma_callback: bad control buffer segment count");
421 	}
422 	sc->sc_cddma = segs[0].ds_addr;
423 }
424 
425 static void
426 gem_rxdma_callback(xsc, segs, nsegs, totsz, error)
427 	void *xsc;
428 	bus_dma_segment_t *segs;
429 	int nsegs;
430 	bus_size_t totsz;
431 	int error;
432 {
433 	struct gem_rxsoft *rxs = (struct gem_rxsoft *)xsc;
434 
435 	if (error != 0)
436 		return;
437 	KASSERT(nsegs == 1, ("gem_rxdma_callback: bad dma segment count"));
438 	rxs->rxs_paddr = segs[0].ds_addr;
439 }
440 
441 static void
442 gem_txdma_callback(xsc, segs, nsegs, totsz, error)
443 	void *xsc;
444 	bus_dma_segment_t *segs;
445 	int nsegs;
446 	bus_size_t totsz;
447 	int error;
448 {
449 	struct gem_txdma *txd = (struct gem_txdma *)xsc;
450 	struct gem_softc *sc = txd->txd_sc;
451 	struct gem_txsoft *txs = txd->txd_txs;
452 	bus_size_t len = 0;
453 	uint64_t flags = 0;
454 	int seg, nexttx;
455 
456 	if (error != 0)
457 		return;
458 	/*
459 	 * Ensure we have enough descriptors free to describe
460 	 * the packet.  Note, we always reserve one descriptor
461 	 * at the end of the ring as a termination point, to
462 	 * prevent wrap-around.
463 	 */
464 	if (nsegs > sc->sc_txfree - 1) {
465 		txs->txs_ndescs = -1;
466 		return;
467 	}
468 	txs->txs_ndescs = nsegs;
469 
470 	nexttx = txs->txs_firstdesc;
471 	/*
472 	 * Initialize the transmit descriptors.
473 	 */
474 	for (seg = 0; seg < nsegs;
475 	     seg++, nexttx = GEM_NEXTTX(nexttx)) {
476 #ifdef GEM_DEBUG
477 		CTR5(KTR_GEM, "txdma_cb: mapping seg %d (txd %d), len "
478 		    "%lx, addr %#lx (%#lx)",  seg, nexttx,
479 		    segs[seg].ds_len, segs[seg].ds_addr,
480 		    GEM_DMA_WRITE(sc, segs[seg].ds_addr));
481 #endif
482 
483 		if (segs[seg].ds_len == 0)
484 			continue;
485 		sc->sc_txdescs[nexttx].gd_addr =
486 		    GEM_DMA_WRITE(sc, segs[seg].ds_addr);
487 		KASSERT(segs[seg].ds_len < GEM_TD_BUFSIZE,
488 		    ("gem_txdma_callback: segment size too large!"));
489 		flags = segs[seg].ds_len & GEM_TD_BUFSIZE;
490 		if (len == 0) {
491 #ifdef GEM_DEBUG
492 			CTR2(KTR_GEM, "txdma_cb: start of packet at seg %d, "
493 			    "tx %d", seg, nexttx);
494 #endif
495 			flags |= GEM_TD_START_OF_PACKET;
496 			if (++sc->sc_txwin > GEM_NTXSEGS * 2 / 3) {
497 				sc->sc_txwin = 0;
498 				flags |= GEM_TD_INTERRUPT_ME;
499 			}
500 		}
501 		if (len + segs[seg].ds_len == totsz) {
502 #ifdef GEM_DEBUG
503 			CTR2(KTR_GEM, "txdma_cb: end of packet at seg %d, "
504 			    "tx %d", seg, nexttx);
505 #endif
506 			flags |= GEM_TD_END_OF_PACKET;
507 		}
508 		sc->sc_txdescs[nexttx].gd_flags = GEM_DMA_WRITE(sc, flags);
509 		txs->txs_lastdesc = nexttx;
510 		len += segs[seg].ds_len;
511 	}
512 	KASSERT((flags & GEM_TD_END_OF_PACKET) != 0,
513 	    ("gem_txdma_callback: missed end of packet!"));
514 }
515 
516 static void
517 gem_tick(arg)
518 	void *arg;
519 {
520 	struct gem_softc *sc = arg;
521 	int s;
522 
523 	s = splnet();
524 	mii_tick(sc->sc_mii);
525 	splx(s);
526 
527 	callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc);
528 }
529 
530 static int
531 gem_bitwait(sc, r, clr, set)
532 	struct gem_softc *sc;
533 	bus_addr_t r;
534 	u_int32_t clr;
535 	u_int32_t set;
536 {
537 	int i;
538 	u_int32_t reg;
539 
540 	for (i = TRIES; i--; DELAY(100)) {
541 		reg = bus_space_read_4(sc->sc_bustag, sc->sc_h, r);
542 		if ((r & clr) == 0 && (r & set) == set)
543 			return (1);
544 	}
545 	return (0);
546 }
547 
548 void
549 gem_reset(sc)
550 	struct gem_softc *sc;
551 {
552 	bus_space_tag_t t = sc->sc_bustag;
553 	bus_space_handle_t h = sc->sc_h;
554 	int s;
555 
556 	s = splnet();
557 #ifdef GEM_DEBUG
558 	CTR1(KTR_GEM, "%s: gem_reset", device_get_name(sc->sc_dev));
559 #endif
560 	gem_reset_rx(sc);
561 	gem_reset_tx(sc);
562 
563 	/* Do a full reset */
564 	bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX);
565 	if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0))
566 		device_printf(sc->sc_dev, "cannot reset device\n");
567 	splx(s);
568 }
569 
570 
571 /*
572  * gem_rxdrain:
573  *
574  *	Drain the receive queue.
575  */
576 static void
577 gem_rxdrain(sc)
578 	struct gem_softc *sc;
579 {
580 	struct gem_rxsoft *rxs;
581 	int i;
582 
583 	for (i = 0; i < GEM_NRXDESC; i++) {
584 		rxs = &sc->sc_rxsoft[i];
585 		if (rxs->rxs_mbuf != NULL) {
586 			bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap,
587 			    BUS_DMASYNC_POSTREAD);
588 			bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap);
589 			m_freem(rxs->rxs_mbuf);
590 			rxs->rxs_mbuf = NULL;
591 		}
592 	}
593 }
594 
595 /*
596  * Reset the whole thing.
597  */
598 static void
599 gem_stop(ifp, disable)
600 	struct ifnet *ifp;
601 	int disable;
602 {
603 	struct gem_softc *sc = (struct gem_softc *)ifp->if_softc;
604 	struct gem_txsoft *txs;
605 
606 #ifdef GEM_DEBUG
607 	CTR1(KTR_GEM, "%s: gem_stop", device_get_name(sc->sc_dev));
608 #endif
609 
610 	callout_stop(&sc->sc_tick_ch);
611 
612 	/* XXX - Should we reset these instead? */
613 	gem_disable_tx(sc);
614 	gem_disable_rx(sc);
615 
616 	/*
617 	 * Release any queued transmit buffers.
618 	 */
619 	while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
620 		STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
621 		if (txs->txs_ndescs != 0) {
622 			bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap,
623 			    BUS_DMASYNC_POSTWRITE);
624 			bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap);
625 			if (txs->txs_mbuf != NULL) {
626 				m_freem(txs->txs_mbuf);
627 				txs->txs_mbuf = NULL;
628 			}
629 		}
630 		STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
631 	}
632 
633 	if (disable)
634 		gem_rxdrain(sc);
635 
636 	/*
637 	 * Mark the interface down and cancel the watchdog timer.
638 	 */
639 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
640 	ifp->if_timer = 0;
641 }
642 
643 /*
644  * Reset the receiver
645  */
646 int
647 gem_reset_rx(sc)
648 	struct gem_softc *sc;
649 {
650 	bus_space_tag_t t = sc->sc_bustag;
651 	bus_space_handle_t h = sc->sc_h;
652 
653 	/*
654 	 * Resetting while DMA is in progress can cause a bus hang, so we
655 	 * disable DMA first.
656 	 */
657 	gem_disable_rx(sc);
658 	bus_space_write_4(t, h, GEM_RX_CONFIG, 0);
659 	/* Wait till it finishes */
660 	if (!gem_bitwait(sc, GEM_RX_CONFIG, 1, 0))
661 		device_printf(sc->sc_dev, "cannot disable read dma\n");
662 
663 	/* Wait 5ms extra. */
664 	DELAY(5000);
665 
666 	/* Finally, reset the ERX */
667 	bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX);
668 	/* Wait till it finishes */
669 	if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_TX, 0)) {
670 		device_printf(sc->sc_dev, "cannot reset receiver\n");
671 		return (1);
672 	}
673 	return (0);
674 }
675 
676 
677 /*
678  * Reset the transmitter
679  */
680 static int
681 gem_reset_tx(sc)
682 	struct gem_softc *sc;
683 {
684 	bus_space_tag_t t = sc->sc_bustag;
685 	bus_space_handle_t h = sc->sc_h;
686 	int i;
687 
688 	/*
689 	 * Resetting while DMA is in progress can cause a bus hang, so we
690 	 * disable DMA first.
691 	 */
692 	gem_disable_tx(sc);
693 	bus_space_write_4(t, h, GEM_TX_CONFIG, 0);
694 	/* Wait till it finishes */
695 	if (!gem_bitwait(sc, GEM_TX_CONFIG, 1, 0))
696 		device_printf(sc->sc_dev, "cannot disable read dma\n");
697 
698 	/* Wait 5ms extra. */
699 	DELAY(5000);
700 
701 	/* Finally, reset the ETX */
702 	bus_space_write_4(t, h, GEM_RESET, GEM_RESET_TX);
703 	/* Wait till it finishes */
704 	for (i = TRIES; i--; DELAY(100))
705 		if ((bus_space_read_4(t, h, GEM_RESET) & GEM_RESET_TX) == 0)
706 			break;
707 	if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_TX, 0)) {
708 		device_printf(sc->sc_dev, "cannot reset receiver\n");
709 		return (1);
710 	}
711 	return (0);
712 }
713 
714 /*
715  * disable receiver.
716  */
717 static int
718 gem_disable_rx(sc)
719 	struct gem_softc *sc;
720 {
721 	bus_space_tag_t t = sc->sc_bustag;
722 	bus_space_handle_t h = sc->sc_h;
723 	u_int32_t cfg;
724 
725 	/* Flip the enable bit */
726 	cfg = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG);
727 	cfg &= ~GEM_MAC_RX_ENABLE;
728 	bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, cfg);
729 
730 	/* Wait for it to finish */
731 	return (gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0));
732 }
733 
734 /*
735  * disable transmitter.
736  */
737 static int
738 gem_disable_tx(sc)
739 	struct gem_softc *sc;
740 {
741 	bus_space_tag_t t = sc->sc_bustag;
742 	bus_space_handle_t h = sc->sc_h;
743 	u_int32_t cfg;
744 
745 	/* Flip the enable bit */
746 	cfg = bus_space_read_4(t, h, GEM_MAC_TX_CONFIG);
747 	cfg &= ~GEM_MAC_TX_ENABLE;
748 	bus_space_write_4(t, h, GEM_MAC_TX_CONFIG, cfg);
749 
750 	/* Wait for it to finish */
751 	return (gem_bitwait(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0));
752 }
753 
754 /*
755  * Initialize interface.
756  */
757 static int
758 gem_meminit(sc)
759 	struct gem_softc *sc;
760 {
761 	struct gem_rxsoft *rxs;
762 	int i, error;
763 
764 	/*
765 	 * Initialize the transmit descriptor ring.
766 	 */
767 	memset((void *)sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
768 	for (i = 0; i < GEM_NTXDESC; i++) {
769 		sc->sc_txdescs[i].gd_flags = 0;
770 		sc->sc_txdescs[i].gd_addr = 0;
771 	}
772 	sc->sc_txfree = GEM_MAXTXFREE;
773 	sc->sc_txnext = 0;
774 	sc->sc_txwin = 0;
775 
776 	/*
777 	 * Initialize the receive descriptor and receive job
778 	 * descriptor rings.
779 	 */
780 	for (i = 0; i < GEM_NRXDESC; i++) {
781 		rxs = &sc->sc_rxsoft[i];
782 		if (rxs->rxs_mbuf == NULL) {
783 			if ((error = gem_add_rxbuf(sc, i)) != 0) {
784 				device_printf(sc->sc_dev, "unable to "
785 				    "allocate or map rx buffer %d, error = "
786 				    "%d\n", i, error);
787 				/*
788 				 * XXX Should attempt to run with fewer receive
789 				 * XXX buffers instead of just failing.
790 				 */
791 				gem_rxdrain(sc);
792 				return (1);
793 			}
794 		} else
795 			GEM_INIT_RXDESC(sc, i);
796 	}
797 	sc->sc_rxptr = 0;
798 	GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE);
799 	GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD);
800 
801 	return (0);
802 }
803 
804 static int
805 gem_ringsize(sz)
806 	int sz;
807 {
808 	int v = 0;
809 
810 	switch (sz) {
811 	case 32:
812 		v = GEM_RING_SZ_32;
813 		break;
814 	case 64:
815 		v = GEM_RING_SZ_64;
816 		break;
817 	case 128:
818 		v = GEM_RING_SZ_128;
819 		break;
820 	case 256:
821 		v = GEM_RING_SZ_256;
822 		break;
823 	case 512:
824 		v = GEM_RING_SZ_512;
825 		break;
826 	case 1024:
827 		v = GEM_RING_SZ_1024;
828 		break;
829 	case 2048:
830 		v = GEM_RING_SZ_2048;
831 		break;
832 	case 4096:
833 		v = GEM_RING_SZ_4096;
834 		break;
835 	case 8192:
836 		v = GEM_RING_SZ_8192;
837 		break;
838 	default:
839 		printf("gem: invalid Receive Descriptor ring size\n");
840 		break;
841 	}
842 	return (v);
843 }
844 
845 /*
846  * Initialization of interface; set up initialization block
847  * and transmit/receive descriptor rings.
848  */
849 static void
850 gem_init(xsc)
851 	void *xsc;
852 {
853 	struct gem_softc *sc = (struct gem_softc *)xsc;
854 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
855 	bus_space_tag_t t = sc->sc_bustag;
856 	bus_space_handle_t h = sc->sc_h;
857 	int s;
858 	u_int32_t v;
859 
860 	s = splnet();
861 
862 #ifdef GEM_DEBUG
863 	CTR1(KTR_GEM, "%s: gem_init: calling stop", device_get_name(sc->sc_dev));
864 #endif
865 	/*
866 	 * Initialization sequence. The numbered steps below correspond
867 	 * to the sequence outlined in section 6.3.5.1 in the Ethernet
868 	 * Channel Engine manual (part of the PCIO manual).
869 	 * See also the STP2002-STQ document from Sun Microsystems.
870 	 */
871 
872 	/* step 1 & 2. Reset the Ethernet Channel */
873 	gem_stop(&sc->sc_arpcom.ac_if, 0);
874 	gem_reset(sc);
875 #ifdef GEM_DEBUG
876 	CTR1(KTR_GEM, "%s: gem_init: restarting", device_get_name(sc->sc_dev));
877 #endif
878 
879 	/* Re-initialize the MIF */
880 	gem_mifinit(sc);
881 
882 	/* step 3. Setup data structures in host memory */
883 	gem_meminit(sc);
884 
885 	/* step 4. TX MAC registers & counters */
886 	gem_init_regs(sc);
887 	/* XXX: VLAN code from NetBSD temporarily removed. */
888 	bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME,
889             (ETHER_MAX_LEN + sizeof(struct ether_header)) | (0x2000<<16));
890 
891 	/* step 5. RX MAC registers & counters */
892 	gem_setladrf(sc);
893 
894 	/* step 6 & 7. Program Descriptor Ring Base Addresses */
895 	/* NOTE: we use only 32-bit DMA addresses here. */
896 	bus_space_write_4(t, h, GEM_TX_RING_PTR_HI, 0);
897 	bus_space_write_4(t, h, GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0));
898 
899 	bus_space_write_4(t, h, GEM_RX_RING_PTR_HI, 0);
900 	bus_space_write_4(t, h, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0));
901 #ifdef GEM_DEBUG
902 	CTR3(KTR_GEM, "loading rx ring %lx, tx ring %lx, cddma %lx",
903 	    GEM_CDRXADDR(sc, 0), GEM_CDTXADDR(sc, 0), sc->sc_cddma);
904 #endif
905 
906 	/* step 8. Global Configuration & Interrupt Mask */
907 	bus_space_write_4(t, h, GEM_INTMASK,
908 		      ~(GEM_INTR_TX_INTME|
909 			GEM_INTR_TX_EMPTY|
910 			GEM_INTR_RX_DONE|GEM_INTR_RX_NOBUF|
911 			GEM_INTR_RX_TAG_ERR|GEM_INTR_PCS|
912 			GEM_INTR_MAC_CONTROL|GEM_INTR_MIF|
913 			GEM_INTR_BERR));
914 	bus_space_write_4(t, h, GEM_MAC_RX_MASK,
915 			GEM_MAC_RX_DONE|GEM_MAC_RX_FRAME_CNT);
916 	bus_space_write_4(t, h, GEM_MAC_TX_MASK, 0xffff); /* XXXX */
917 	bus_space_write_4(t, h, GEM_MAC_CONTROL_MASK, 0); /* XXXX */
918 
919 	/* step 9. ETX Configuration: use mostly default values */
920 
921 	/* Enable DMA */
922 	v = gem_ringsize(GEM_NTXDESC /*XXX*/);
923 	bus_space_write_4(t, h, GEM_TX_CONFIG,
924 		v|GEM_TX_CONFIG_TXDMA_EN|
925 		((0x400<<10)&GEM_TX_CONFIG_TXFIFO_TH));
926 
927 	/* step 10. ERX Configuration */
928 
929 	/* Encode Receive Descriptor ring size: four possible values */
930 	v = gem_ringsize(GEM_NRXDESC /*XXX*/);
931 
932 	/* Enable DMA */
933 	bus_space_write_4(t, h, GEM_RX_CONFIG,
934 		v|(GEM_THRSH_1024<<GEM_RX_CONFIG_FIFO_THRS_SHIFT)|
935 		(2<<GEM_RX_CONFIG_FBOFF_SHFT)|GEM_RX_CONFIG_RXDMA_EN|
936 		(0<<GEM_RX_CONFIG_CXM_START_SHFT));
937 	/*
938 	 * The following value is for an OFF Threshold of about 3/4 full
939 	 * and an ON Threshold of 1/4 full.
940 	 */
941 	bus_space_write_4(t, h, GEM_RX_PAUSE_THRESH,
942 	    (3 * sc->sc_rxfifosize / 256) |
943 	    (   (sc->sc_rxfifosize / 256) << 12));
944 	bus_space_write_4(t, h, GEM_RX_BLANKING, (6<<12)|6);
945 
946 	/* step 11. Configure Media */
947 	mii_mediachg(sc->sc_mii);
948 
949 	/* step 12. RX_MAC Configuration Register */
950 	v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG);
951 	v |= GEM_MAC_RX_ENABLE;
952 	bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v);
953 
954 	/* step 14. Issue Transmit Pending command */
955 
956 	/* step 15.  Give the reciever a swift kick */
957 	bus_space_write_4(t, h, GEM_RX_KICK, GEM_NRXDESC-4);
958 
959 	/* Start the one second timer. */
960 	callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc);
961 
962 	ifp->if_flags |= IFF_RUNNING;
963 	ifp->if_flags &= ~IFF_OACTIVE;
964 	ifp->if_timer = 0;
965 	sc->sc_ifflags = ifp->if_flags;
966 	splx(s);
967 }
968 
969 static int
970 gem_load_txmbuf(sc, m0)
971 	struct gem_softc *sc;
972 	struct mbuf *m0;
973 {
974 	struct gem_txdma txd;
975 	struct gem_txsoft *txs;
976 	int error;
977 
978 	/* Get a work queue entry. */
979 	if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) {
980 		/* Ran out of descriptors. */
981 		return (-1);
982 	}
983 	txd.txd_sc = sc;
984 	txd.txd_txs = txs;
985 	txs->txs_mbuf = m0;
986 	txs->txs_firstdesc = sc->sc_txnext;
987 	error = bus_dmamap_load_mbuf(sc->sc_tdmatag, txs->txs_dmamap, m0,
988 	    gem_txdma_callback, &txd, BUS_DMA_NOWAIT);
989 	if (error != 0)
990 		goto fail;
991 	if (txs->txs_ndescs == -1) {
992 		error = -1;
993 		goto fail;
994 	}
995 
996 	/* Sync the DMA map. */
997 	bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap,
998 	    BUS_DMASYNC_PREWRITE);
999 
1000 #ifdef GEM_DEBUG
1001 	CTR3(KTR_GEM, "load_mbuf: setting firstdesc=%d, lastdesc=%d, "
1002 	    "ndescs=%d", txs->txs_firstdesc, txs->txs_lastdesc,
1003 	    txs->txs_ndescs);
1004 #endif
1005 	STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q);
1006 	STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q);
1007 
1008 	sc->sc_txnext = GEM_NEXTTX(txs->txs_lastdesc);
1009 	sc->sc_txfree -= txs->txs_ndescs;
1010 	return (0);
1011 
1012 fail:
1013 #ifdef GEM_DEBUG
1014 	CTR1(KTR_GEM, "gem_load_txmbuf failed (%d)", error);
1015 #endif
1016 	bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap);
1017 	return (error);
1018 }
1019 
1020 static void
1021 gem_init_regs(sc)
1022 	struct gem_softc *sc;
1023 {
1024 	bus_space_tag_t t = sc->sc_bustag;
1025 	bus_space_handle_t h = sc->sc_h;
1026 	const u_char *laddr = sc->sc_arpcom.ac_enaddr;
1027 	u_int32_t v;
1028 
1029 	/* These regs are not cleared on reset */
1030 	if (!sc->sc_inited) {
1031 
1032 		/* Wooo.  Magic values. */
1033 		bus_space_write_4(t, h, GEM_MAC_IPG0, 0);
1034 		bus_space_write_4(t, h, GEM_MAC_IPG1, 8);
1035 		bus_space_write_4(t, h, GEM_MAC_IPG2, 4);
1036 
1037 		bus_space_write_4(t, h, GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN);
1038 		/* Max frame and max burst size */
1039 		bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME,
1040 		    ETHER_MAX_LEN | (0x2000<<16));
1041 
1042 		bus_space_write_4(t, h, GEM_MAC_PREAMBLE_LEN, 0x7);
1043 		bus_space_write_4(t, h, GEM_MAC_JAM_SIZE, 0x4);
1044 		bus_space_write_4(t, h, GEM_MAC_ATTEMPT_LIMIT, 0x10);
1045 		/* Dunno.... */
1046 		bus_space_write_4(t, h, GEM_MAC_CONTROL_TYPE, 0x8088);
1047 		bus_space_write_4(t, h, GEM_MAC_RANDOM_SEED,
1048 		    ((laddr[5]<<8)|laddr[4])&0x3ff);
1049 
1050 		/* Secondary MAC addr set to 0:0:0:0:0:0 */
1051 		bus_space_write_4(t, h, GEM_MAC_ADDR3, 0);
1052 		bus_space_write_4(t, h, GEM_MAC_ADDR4, 0);
1053 		bus_space_write_4(t, h, GEM_MAC_ADDR5, 0);
1054 
1055 		/* MAC control addr set to 01:80:c2:00:00:01 */
1056 		bus_space_write_4(t, h, GEM_MAC_ADDR6, 0x0001);
1057 		bus_space_write_4(t, h, GEM_MAC_ADDR7, 0xc200);
1058 		bus_space_write_4(t, h, GEM_MAC_ADDR8, 0x0180);
1059 
1060 		/* MAC filter addr set to 0:0:0:0:0:0 */
1061 		bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER0, 0);
1062 		bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER1, 0);
1063 		bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER2, 0);
1064 
1065 		bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK1_2, 0);
1066 		bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK0, 0);
1067 
1068 		sc->sc_inited = 1;
1069 	}
1070 
1071 	/* Counters need to be zeroed */
1072 	bus_space_write_4(t, h, GEM_MAC_NORM_COLL_CNT, 0);
1073 	bus_space_write_4(t, h, GEM_MAC_FIRST_COLL_CNT, 0);
1074 	bus_space_write_4(t, h, GEM_MAC_EXCESS_COLL_CNT, 0);
1075 	bus_space_write_4(t, h, GEM_MAC_LATE_COLL_CNT, 0);
1076 	bus_space_write_4(t, h, GEM_MAC_DEFER_TMR_CNT, 0);
1077 	bus_space_write_4(t, h, GEM_MAC_PEAK_ATTEMPTS, 0);
1078 	bus_space_write_4(t, h, GEM_MAC_RX_FRAME_COUNT, 0);
1079 	bus_space_write_4(t, h, GEM_MAC_RX_LEN_ERR_CNT, 0);
1080 	bus_space_write_4(t, h, GEM_MAC_RX_ALIGN_ERR, 0);
1081 	bus_space_write_4(t, h, GEM_MAC_RX_CRC_ERR_CNT, 0);
1082 	bus_space_write_4(t, h, GEM_MAC_RX_CODE_VIOL, 0);
1083 
1084 	/* Un-pause stuff */
1085 #if 0
1086 	bus_space_write_4(t, h, GEM_MAC_SEND_PAUSE_CMD, 0x1BF0);
1087 #else
1088 	bus_space_write_4(t, h, GEM_MAC_SEND_PAUSE_CMD, 0);
1089 #endif
1090 
1091 	/*
1092 	 * Set the station address.
1093 	 */
1094 	bus_space_write_4(t, h, GEM_MAC_ADDR0, (laddr[4]<<8)|laddr[5]);
1095 	bus_space_write_4(t, h, GEM_MAC_ADDR1, (laddr[2]<<8)|laddr[3]);
1096 	bus_space_write_4(t, h, GEM_MAC_ADDR2, (laddr[0]<<8)|laddr[1]);
1097 
1098 	/*
1099 	 * Enable MII outputs.  Enable GMII if there is a gigabit PHY.
1100 	 */
1101 	sc->sc_mif_config = bus_space_read_4(t, h, GEM_MIF_CONFIG);
1102 	v = GEM_MAC_XIF_TX_MII_ENA;
1103 	if (sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) {
1104 		v |= GEM_MAC_XIF_FDPLX_LED;
1105 		if (sc->sc_flags & GEM_GIGABIT)
1106 			v |= GEM_MAC_XIF_GMII_MODE;
1107 	}
1108 	bus_space_write_4(t, h, GEM_MAC_XIF_CONFIG, v);
1109 }
1110 
1111 static void
1112 gem_start(ifp)
1113 	struct ifnet *ifp;
1114 {
1115 	struct gem_softc *sc = (struct gem_softc *)ifp->if_softc;
1116 	struct mbuf *m0 = NULL;
1117 	int firsttx, ntx = 0, ofree, txmfail;
1118 
1119 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1120 		return;
1121 
1122 	/*
1123 	 * Remember the previous number of free descriptors and
1124 	 * the first descriptor we'll use.
1125 	 */
1126 	ofree = sc->sc_txfree;
1127 	firsttx = sc->sc_txnext;
1128 
1129 #ifdef GEM_DEBUG
1130 	CTR3(KTR_GEM, "%s: gem_start: txfree %d, txnext %d",
1131 	    device_get_name(sc->sc_dev), ofree, firsttx);
1132 #endif
1133 
1134 	/*
1135 	 * Loop through the send queue, setting up transmit descriptors
1136 	 * until we drain the queue, or use up all available transmit
1137 	 * descriptors.
1138 	 */
1139 	txmfail = 0;
1140 	do {
1141 		/*
1142 		 * Grab a packet off the queue.
1143 		 */
1144 		IF_DEQUEUE(&ifp->if_snd, m0);
1145 		if (m0 == NULL)
1146 			break;
1147 
1148 		txmfail = gem_load_txmbuf(sc, m0);
1149 		if (txmfail > 0) {
1150 			/* Drop the mbuf and complain. */
1151 			printf("gem_start: error %d while loading mbuf dma "
1152 			    "map\n", txmfail);
1153 			continue;
1154 		}
1155 		/* Not enough descriptors. */
1156 		if (txmfail == -1) {
1157 			if (sc->sc_txfree == GEM_MAXTXFREE)
1158 				panic("gem_start: mbuf chain too long!");
1159 			IF_PREPEND(&ifp->if_snd, m0);
1160 			break;
1161 		}
1162 
1163 		ntx++;
1164 		/* Kick the transmitter. */
1165 #ifdef GEM_DEBUG
1166 		CTR2(KTR_GEM, "%s: gem_start: kicking tx %d",
1167 		    device_get_name(sc->sc_dev), sc->sc_txnext);
1168 #endif
1169 		bus_space_write_4(sc->sc_bustag, sc->sc_h, GEM_TX_KICK,
1170 			sc->sc_txnext);
1171 
1172 		if (ifp->if_bpf != NULL)
1173 			bpf_mtap(ifp->if_bpf, m0);
1174 	} while (1);
1175 
1176 	if (txmfail == -1 || sc->sc_txfree == 0) {
1177 		/* No more slots left; notify upper layer. */
1178 		ifp->if_flags |= IFF_OACTIVE;
1179 	}
1180 
1181 	if (ntx > 0) {
1182 		GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE);
1183 
1184 #ifdef GEM_DEBUG
1185 		CTR2(KTR_GEM, "%s: packets enqueued, OWN on %d",
1186 		    device_get_name(sc->sc_dev), firsttx);
1187 #endif
1188 
1189 		/* Set a watchdog timer in case the chip flakes out. */
1190 		ifp->if_timer = 5;
1191 #ifdef GEM_DEBUG
1192 		CTR2(KTR_GEM, "%s: gem_start: watchdog %d",
1193 			device_get_name(sc->sc_dev), ifp->if_timer);
1194 #endif
1195 	}
1196 }
1197 
1198 /*
1199  * Transmit interrupt.
1200  */
1201 static void
1202 gem_tint(sc)
1203 	struct gem_softc *sc;
1204 {
1205 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1206 	bus_space_tag_t t = sc->sc_bustag;
1207 	bus_space_handle_t mac = sc->sc_h;
1208 	struct gem_txsoft *txs;
1209 	int txlast;
1210 	int progress = 0;
1211 
1212 
1213 #ifdef GEM_DEBUG
1214 	CTR1(KTR_GEM, "%s: gem_tint", device_get_name(sc->sc_dev));
1215 #endif
1216 
1217 	/*
1218 	 * Unload collision counters
1219 	 */
1220 	ifp->if_collisions +=
1221 		bus_space_read_4(t, mac, GEM_MAC_NORM_COLL_CNT) +
1222 		bus_space_read_4(t, mac, GEM_MAC_FIRST_COLL_CNT) +
1223 		bus_space_read_4(t, mac, GEM_MAC_EXCESS_COLL_CNT) +
1224 		bus_space_read_4(t, mac, GEM_MAC_LATE_COLL_CNT);
1225 
1226 	/*
1227 	 * then clear the hardware counters.
1228 	 */
1229 	bus_space_write_4(t, mac, GEM_MAC_NORM_COLL_CNT, 0);
1230 	bus_space_write_4(t, mac, GEM_MAC_FIRST_COLL_CNT, 0);
1231 	bus_space_write_4(t, mac, GEM_MAC_EXCESS_COLL_CNT, 0);
1232 	bus_space_write_4(t, mac, GEM_MAC_LATE_COLL_CNT, 0);
1233 
1234 	/*
1235 	 * Go through our Tx list and free mbufs for those
1236 	 * frames that have been transmitted.
1237 	 */
1238 	GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD);
1239 	while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
1240 
1241 #ifdef GEM_DEBUG
1242 		if (ifp->if_flags & IFF_DEBUG) {
1243 			int i;
1244 			printf("    txsoft %p transmit chain:\n", txs);
1245 			for (i = txs->txs_firstdesc;; i = GEM_NEXTTX(i)) {
1246 				printf("descriptor %d: ", i);
1247 				printf("gd_flags: 0x%016llx\t", (long long)
1248 					GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_flags));
1249 				printf("gd_addr: 0x%016llx\n", (long long)
1250 					GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_addr));
1251 				if (i == txs->txs_lastdesc)
1252 					break;
1253 			}
1254 		}
1255 #endif
1256 
1257 		/*
1258 		 * In theory, we could harveast some descriptors before
1259 		 * the ring is empty, but that's a bit complicated.
1260 		 *
1261 		 * GEM_TX_COMPLETION points to the last descriptor
1262 		 * processed +1.
1263 		 */
1264 		txlast = bus_space_read_4(t, mac, GEM_TX_COMPLETION);
1265 #ifdef GEM_DEBUG
1266 		CTR3(KTR_GEM, "gem_tint: txs->txs_firstdesc = %d, "
1267 		    "txs->txs_lastdesc = %d, txlast = %d",
1268 		    txs->txs_firstdesc, txs->txs_lastdesc, txlast);
1269 #endif
1270 		if (txs->txs_firstdesc <= txs->txs_lastdesc) {
1271 			if ((txlast >= txs->txs_firstdesc) &&
1272 				(txlast <= txs->txs_lastdesc))
1273 				break;
1274 		} else {
1275 			/* Ick -- this command wraps */
1276 			if ((txlast >= txs->txs_firstdesc) ||
1277 				(txlast <= txs->txs_lastdesc))
1278 				break;
1279 		}
1280 
1281 #ifdef GEM_DEBUG
1282 		CTR0(KTR_GEM, "gem_tint: releasing a desc");
1283 #endif
1284 		STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
1285 
1286 		sc->sc_txfree += txs->txs_ndescs;
1287 
1288 		bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap,
1289 		    BUS_DMASYNC_POSTWRITE);
1290 		bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap);
1291 		if (txs->txs_mbuf != NULL) {
1292 			m_freem(txs->txs_mbuf);
1293 			txs->txs_mbuf = NULL;
1294 		}
1295 
1296 		STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
1297 
1298 		ifp->if_opackets++;
1299 		progress = 1;
1300 	}
1301 
1302 #ifdef GEM_DEBUG
1303 	CTR3(KTR_GEM, "gem_tint: GEM_TX_STATE_MACHINE %x "
1304 		"GEM_TX_DATA_PTR %llx "
1305 		"GEM_TX_COMPLETION %x",
1306 		bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_STATE_MACHINE),
1307 		((long long) bus_space_read_4(sc->sc_bustag, sc->sc_h,
1308 			GEM_TX_DATA_PTR_HI) << 32) |
1309 			     bus_space_read_4(sc->sc_bustag, sc->sc_h,
1310 			GEM_TX_DATA_PTR_LO),
1311 		bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_COMPLETION));
1312 #endif
1313 
1314 	if (progress) {
1315 		if (sc->sc_txfree == GEM_NTXDESC - 1)
1316 			sc->sc_txwin = 0;
1317 
1318 		/* Freed some descriptors, so reset IFF_OACTIVE and restart. */
1319 		ifp->if_flags &= ~IFF_OACTIVE;
1320 		gem_start(ifp);
1321 
1322 		if (STAILQ_EMPTY(&sc->sc_txdirtyq))
1323 			ifp->if_timer = 0;
1324 	}
1325 
1326 #ifdef GEM_DEBUG
1327 	CTR2(KTR_GEM, "%s: gem_tint: watchdog %d",
1328 		device_get_name(sc->sc_dev), ifp->if_timer);
1329 #endif
1330 }
1331 
1332 #if 0
1333 static void
1334 gem_rint_timeout(arg)
1335 	void *arg;
1336 {
1337 
1338 	gem_rint((struct gem_softc *)arg);
1339 }
1340 #endif
1341 
1342 /*
1343  * Receive interrupt.
1344  */
1345 static void
1346 gem_rint(sc)
1347 	struct gem_softc *sc;
1348 {
1349 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1350 	bus_space_tag_t t = sc->sc_bustag;
1351 	bus_space_handle_t h = sc->sc_h;
1352 	struct gem_rxsoft *rxs;
1353 	struct mbuf *m;
1354 	u_int64_t rxstat;
1355 	u_int32_t rxcomp;
1356 	int i, len, progress = 0;
1357 
1358 	callout_stop(&sc->sc_rx_ch);
1359 #ifdef GEM_DEBUG
1360 	CTR1(KTR_GEM, "%s: gem_rint", device_get_name(sc->sc_dev));
1361 #endif
1362 
1363 	/*
1364 	 * Read the completion register once.  This limits
1365 	 * how long the following loop can execute.
1366 	 */
1367 	rxcomp = bus_space_read_4(t, h, GEM_RX_COMPLETION);
1368 
1369 #ifdef GEM_DEBUG
1370 	CTR2(KTR_GEM, "gem_rint: sc->rxptr %d, complete %d",
1371 	    sc->sc_rxptr, rxcomp);
1372 #endif
1373 	GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD);
1374 	for (i = sc->sc_rxptr; i != rxcomp;
1375 	     i = GEM_NEXTRX(i)) {
1376 		rxs = &sc->sc_rxsoft[i];
1377 
1378 		rxstat = GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags);
1379 
1380 		if (rxstat & GEM_RD_OWN) {
1381 #if 0 /* XXX: In case of emergency, re-enable this. */
1382 			/*
1383 			 * The descriptor is still marked as owned, although
1384 			 * it is supposed to have completed. This has been
1385 			 * observed on some machines. Just exiting here
1386 			 * might leave the packet sitting around until another
1387 			 * one arrives to trigger a new interrupt, which is
1388 			 * generally undesirable, so set up a timeout.
1389 			 */
1390 			callout_reset(&sc->sc_rx_ch, GEM_RXOWN_TICKS,
1391 			    gem_rint_timeout, sc);
1392 #endif
1393 			break;
1394 		}
1395 
1396 		progress++;
1397 		ifp->if_ipackets++;
1398 
1399 		if (rxstat & GEM_RD_BAD_CRC) {
1400 			ifp->if_ierrors++;
1401 			device_printf(sc->sc_dev, "receive error: CRC error\n");
1402 			GEM_INIT_RXDESC(sc, i);
1403 			continue;
1404 		}
1405 
1406 #ifdef GEM_DEBUG
1407 		if (ifp->if_flags & IFF_DEBUG) {
1408 			printf("    rxsoft %p descriptor %d: ", rxs, i);
1409 			printf("gd_flags: 0x%016llx\t", (long long)
1410 				GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags));
1411 			printf("gd_addr: 0x%016llx\n", (long long)
1412 				GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_addr));
1413 		}
1414 #endif
1415 
1416 		/*
1417 		 * No errors; receive the packet.  Note the Gem
1418 		 * includes the CRC with every packet.
1419 		 */
1420 		len = GEM_RD_BUFLEN(rxstat);
1421 
1422 		/*
1423 		 * Allocate a new mbuf cluster.  If that fails, we are
1424 		 * out of memory, and must drop the packet and recycle
1425 		 * the buffer that's already attached to this descriptor.
1426 		 */
1427 		m = rxs->rxs_mbuf;
1428 		if (gem_add_rxbuf(sc, i) != 0) {
1429 			ifp->if_ierrors++;
1430 			GEM_INIT_RXDESC(sc, i);
1431 			continue;
1432 		}
1433 		m->m_data += 2; /* We're already off by two */
1434 
1435 		m->m_pkthdr.rcvif = ifp;
1436 		m->m_pkthdr.len = m->m_len = len - ETHER_CRC_LEN;
1437 
1438 		/* Pass it on. */
1439 		(*ifp->if_input)(ifp, m);
1440 	}
1441 
1442 	if (progress) {
1443 		GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE);
1444 		/* Update the receive pointer. */
1445 		if (i == sc->sc_rxptr) {
1446 			device_printf(sc->sc_dev, "rint: ring wrap\n");
1447 		}
1448 		sc->sc_rxptr = i;
1449 		bus_space_write_4(t, h, GEM_RX_KICK, GEM_PREVRX(i));
1450 	}
1451 
1452 #ifdef GEM_DEBUG
1453 	CTR2(KTR_GEM, "gem_rint: done sc->rxptr %d, complete %d",
1454 		sc->sc_rxptr, bus_space_read_4(t, h, GEM_RX_COMPLETION));
1455 #endif
1456 }
1457 
1458 
1459 /*
1460  * gem_add_rxbuf:
1461  *
1462  *	Add a receive buffer to the indicated descriptor.
1463  */
1464 static int
1465 gem_add_rxbuf(sc, idx)
1466 	struct gem_softc *sc;
1467 	int idx;
1468 {
1469 	struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx];
1470 	struct mbuf *m;
1471 	int error;
1472 
1473 	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1474 	if (m == NULL)
1475 		return (ENOBUFS);
1476 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
1477 
1478 #ifdef GEM_DEBUG
1479 	/* bzero the packet to check dma */
1480 	memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size);
1481 #endif
1482 
1483 	if (rxs->rxs_mbuf != NULL) {
1484 		bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap,
1485 		    BUS_DMASYNC_POSTREAD);
1486 		bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap);
1487 	}
1488 
1489 	rxs->rxs_mbuf = m;
1490 
1491 	error = bus_dmamap_load_mbuf(sc->sc_rdmatag, rxs->rxs_dmamap,
1492 	    m, gem_rxdma_callback, rxs, BUS_DMA_NOWAIT);
1493 	if (error != 0 || rxs->rxs_paddr == 0) {
1494 		device_printf(sc->sc_dev, "can't load rx DMA map %d, error = "
1495 		    "%d\n", idx, error);
1496 		panic("gem_add_rxbuf");	/* XXX */
1497 	}
1498 
1499 	bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, BUS_DMASYNC_PREREAD);
1500 
1501 	GEM_INIT_RXDESC(sc, idx);
1502 
1503 	return (0);
1504 }
1505 
1506 
1507 static void
1508 gem_eint(sc, status)
1509 	struct gem_softc *sc;
1510 	u_int status;
1511 {
1512 
1513 	if ((status & GEM_INTR_MIF) != 0) {
1514 		device_printf(sc->sc_dev, "XXXlink status changed\n");
1515 		return;
1516 	}
1517 
1518 	device_printf(sc->sc_dev, "status=%x\n", status);
1519 }
1520 
1521 
1522 void
1523 gem_intr(v)
1524 	void *v;
1525 {
1526 	struct gem_softc *sc = (struct gem_softc *)v;
1527 	bus_space_tag_t t = sc->sc_bustag;
1528 	bus_space_handle_t seb = sc->sc_h;
1529 	u_int32_t status;
1530 
1531 	status = bus_space_read_4(t, seb, GEM_STATUS);
1532 #ifdef GEM_DEBUG
1533 	CTR3(KTR_GEM, "%s: gem_intr: cplt %x, status %x",
1534 		device_get_name(sc->sc_dev), (status>>19),
1535 		(u_int)status);
1536 #endif
1537 
1538 	if ((status & (GEM_INTR_RX_TAG_ERR | GEM_INTR_BERR)) != 0)
1539 		gem_eint(sc, status);
1540 
1541 	if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0)
1542 		gem_tint(sc);
1543 
1544 	if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0)
1545 		gem_rint(sc);
1546 
1547 	/* We should eventually do more than just print out error stats. */
1548 	if (status & GEM_INTR_TX_MAC) {
1549 		int txstat = bus_space_read_4(t, seb, GEM_MAC_TX_STATUS);
1550 		if (txstat & ~GEM_MAC_TX_XMIT_DONE)
1551 			device_printf(sc->sc_dev, "MAC tx fault, status %x\n",
1552 			    txstat);
1553 		if (txstat & (GEM_MAC_TX_UNDERRUN | GEM_MAC_TX_PKT_TOO_LONG))
1554 			gem_init(sc);
1555 	}
1556 	if (status & GEM_INTR_RX_MAC) {
1557 		int rxstat = bus_space_read_4(t, seb, GEM_MAC_RX_STATUS);
1558 		if (rxstat & ~(GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT))
1559 			device_printf(sc->sc_dev, "MAC rx fault, status %x\n",
1560 			    rxstat);
1561 		if ((rxstat & GEM_MAC_RX_OVERFLOW) != 0)
1562 			gem_init(sc);
1563 	}
1564 }
1565 
1566 
1567 static void
1568 gem_watchdog(ifp)
1569 	struct ifnet *ifp;
1570 {
1571 	struct gem_softc *sc = ifp->if_softc;
1572 
1573 #ifdef GEM_DEBUG
1574 	CTR3(KTR_GEM, "gem_watchdog: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x "
1575 		"GEM_MAC_RX_CONFIG %x",
1576 		bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_RX_CONFIG),
1577 		bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_RX_STATUS),
1578 		bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_RX_CONFIG));
1579 	CTR3(KTR_GEM, "gem_watchdog: GEM_TX_CONFIG %x GEM_MAC_TX_STATUS %x "
1580 		"GEM_MAC_TX_CONFIG %x",
1581 		bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_CONFIG),
1582 		bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_TX_STATUS),
1583 		bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_TX_CONFIG));
1584 #endif
1585 
1586 	device_printf(sc->sc_dev, "device timeout\n");
1587 	++ifp->if_oerrors;
1588 
1589 	/* Try to get more packets going. */
1590 	gem_start(ifp);
1591 }
1592 
1593 /*
1594  * Initialize the MII Management Interface
1595  */
1596 static void
1597 gem_mifinit(sc)
1598 	struct gem_softc *sc;
1599 {
1600 	bus_space_tag_t t = sc->sc_bustag;
1601 	bus_space_handle_t mif = sc->sc_h;
1602 
1603 	/* Configure the MIF in frame mode */
1604 	sc->sc_mif_config = bus_space_read_4(t, mif, GEM_MIF_CONFIG);
1605 	sc->sc_mif_config &= ~GEM_MIF_CONFIG_BB_ENA;
1606 	bus_space_write_4(t, mif, GEM_MIF_CONFIG, sc->sc_mif_config);
1607 }
1608 
1609 /*
1610  * MII interface
1611  *
1612  * The GEM MII interface supports at least three different operating modes:
1613  *
1614  * Bitbang mode is implemented using data, clock and output enable registers.
1615  *
1616  * Frame mode is implemented by loading a complete frame into the frame
1617  * register and polling the valid bit for completion.
1618  *
1619  * Polling mode uses the frame register but completion is indicated by
1620  * an interrupt.
1621  *
1622  */
1623 int
1624 gem_mii_readreg(dev, phy, reg)
1625 	device_t dev;
1626 	int phy, reg;
1627 {
1628 	struct gem_softc *sc = device_get_softc(dev);
1629 	bus_space_tag_t t = sc->sc_bustag;
1630 	bus_space_handle_t mif = sc->sc_h;
1631 	int n;
1632 	u_int32_t v;
1633 
1634 #ifdef GEM_DEBUG_PHY
1635 	printf("gem_mii_readreg: phy %d reg %d\n", phy, reg);
1636 #endif
1637 
1638 #if 0
1639 	/* Select the desired PHY in the MIF configuration register */
1640 	v = bus_space_read_4(t, mif, GEM_MIF_CONFIG);
1641 	/* Clear PHY select bit */
1642 	v &= ~GEM_MIF_CONFIG_PHY_SEL;
1643 	if (phy == GEM_PHYAD_EXTERNAL)
1644 		/* Set PHY select bit to get at external device */
1645 		v |= GEM_MIF_CONFIG_PHY_SEL;
1646 	bus_space_write_4(t, mif, GEM_MIF_CONFIG, v);
1647 #endif
1648 
1649 	/* Construct the frame command */
1650 	v = (reg << GEM_MIF_REG_SHIFT)	| (phy << GEM_MIF_PHY_SHIFT) |
1651 		GEM_MIF_FRAME_READ;
1652 
1653 	bus_space_write_4(t, mif, GEM_MIF_FRAME, v);
1654 	for (n = 0; n < 100; n++) {
1655 		DELAY(1);
1656 		v = bus_space_read_4(t, mif, GEM_MIF_FRAME);
1657 		if (v & GEM_MIF_FRAME_TA0)
1658 			return (v & GEM_MIF_FRAME_DATA);
1659 	}
1660 
1661 	device_printf(sc->sc_dev, "mii_read timeout\n");
1662 	return (0);
1663 }
1664 
1665 int
1666 gem_mii_writereg(dev, phy, reg, val)
1667 	device_t dev;
1668 	int phy, reg, val;
1669 {
1670 	struct gem_softc *sc = device_get_softc(dev);
1671 	bus_space_tag_t t = sc->sc_bustag;
1672 	bus_space_handle_t mif = sc->sc_h;
1673 	int n;
1674 	u_int32_t v;
1675 
1676 #ifdef GEM_DEBUG_PHY
1677 	printf("gem_mii_writereg: phy %d reg %d val %x\n", phy, reg, val);
1678 #endif
1679 
1680 #if 0
1681 	/* Select the desired PHY in the MIF configuration register */
1682 	v = bus_space_read_4(t, mif, GEM_MIF_CONFIG);
1683 	/* Clear PHY select bit */
1684 	v &= ~GEM_MIF_CONFIG_PHY_SEL;
1685 	if (phy == GEM_PHYAD_EXTERNAL)
1686 		/* Set PHY select bit to get at external device */
1687 		v |= GEM_MIF_CONFIG_PHY_SEL;
1688 	bus_space_write_4(t, mif, GEM_MIF_CONFIG, v);
1689 #endif
1690 	/* Construct the frame command */
1691 	v = GEM_MIF_FRAME_WRITE			|
1692 	    (phy << GEM_MIF_PHY_SHIFT)		|
1693 	    (reg << GEM_MIF_REG_SHIFT)		|
1694 	    (val & GEM_MIF_FRAME_DATA);
1695 
1696 	bus_space_write_4(t, mif, GEM_MIF_FRAME, v);
1697 	for (n = 0; n < 100; n++) {
1698 		DELAY(1);
1699 		v = bus_space_read_4(t, mif, GEM_MIF_FRAME);
1700 		if (v & GEM_MIF_FRAME_TA0)
1701 			return (1);
1702 	}
1703 
1704 	device_printf(sc->sc_dev, "mii_write timeout\n");
1705 	return (0);
1706 }
1707 
1708 void
1709 gem_mii_statchg(dev)
1710 	device_t dev;
1711 {
1712 	struct gem_softc *sc = device_get_softc(dev);
1713 #ifdef GEM_DEBUG
1714 	int instance = IFM_INST(sc->sc_mii->mii_media.ifm_cur->ifm_media);
1715 #endif
1716 	bus_space_tag_t t = sc->sc_bustag;
1717 	bus_space_handle_t mac = sc->sc_h;
1718 	u_int32_t v;
1719 
1720 #ifdef GEM_DEBUG
1721 	if (sc->sc_debug)
1722 		printf("gem_mii_statchg: status change: phy = %d\n",
1723 			sc->sc_phys[instance]);
1724 #endif
1725 
1726 	/* Set tx full duplex options */
1727 	bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, 0);
1728 	DELAY(10000); /* reg must be cleared and delay before changing. */
1729 	v = GEM_MAC_TX_ENA_IPG0|GEM_MAC_TX_NGU|GEM_MAC_TX_NGU_LIMIT|
1730 		GEM_MAC_TX_ENABLE;
1731 	if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) {
1732 		v |= GEM_MAC_TX_IGN_CARRIER|GEM_MAC_TX_IGN_COLLIS;
1733 	}
1734 	bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, v);
1735 
1736 	/* XIF Configuration */
1737  /* We should really calculate all this rather than rely on defaults */
1738 	v = bus_space_read_4(t, mac, GEM_MAC_XIF_CONFIG);
1739 	v = GEM_MAC_XIF_LINK_LED;
1740 	v |= GEM_MAC_XIF_TX_MII_ENA;
1741 
1742 	/* If an external transceiver is connected, enable its MII drivers */
1743 	sc->sc_mif_config = bus_space_read_4(t, mac, GEM_MIF_CONFIG);
1744 	if ((sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) != 0) {
1745 		/* External MII needs echo disable if half duplex. */
1746 		if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0)
1747 			/* turn on full duplex LED */
1748 			v |= GEM_MAC_XIF_FDPLX_LED;
1749 		else
1750 	 		/* half duplex -- disable echo */
1751 	 		v |= GEM_MAC_XIF_ECHO_DISABL;
1752 
1753 		if (IFM_SUBTYPE(sc->sc_mii->mii_media_active) == IFM_1000_T)
1754 			v |= GEM_MAC_XIF_GMII_MODE;
1755 		else
1756 			v &= ~GEM_MAC_XIF_GMII_MODE;
1757 	} else {
1758 		/* Internal MII needs buf enable */
1759 		v |= GEM_MAC_XIF_MII_BUF_ENA;
1760 	}
1761 	bus_space_write_4(t, mac, GEM_MAC_XIF_CONFIG, v);
1762 }
1763 
1764 int
1765 gem_mediachange(ifp)
1766 	struct ifnet *ifp;
1767 {
1768 	struct gem_softc *sc = ifp->if_softc;
1769 
1770 	/* XXX Add support for serial media. */
1771 
1772 	return (mii_mediachg(sc->sc_mii));
1773 }
1774 
1775 void
1776 gem_mediastatus(ifp, ifmr)
1777 	struct ifnet *ifp;
1778 	struct ifmediareq *ifmr;
1779 {
1780 	struct gem_softc *sc = ifp->if_softc;
1781 
1782 	if ((ifp->if_flags & IFF_UP) == 0)
1783 		return;
1784 
1785 	mii_pollstat(sc->sc_mii);
1786 	ifmr->ifm_active = sc->sc_mii->mii_media_active;
1787 	ifmr->ifm_status = sc->sc_mii->mii_media_status;
1788 }
1789 
1790 /*
1791  * Process an ioctl request.
1792  */
1793 static int
1794 gem_ioctl(ifp, cmd, data)
1795 	struct ifnet *ifp;
1796 	u_long cmd;
1797 	caddr_t data;
1798 {
1799 	struct gem_softc *sc = ifp->if_softc;
1800 	struct ifreq *ifr = (struct ifreq *)data;
1801 	int s, error = 0;
1802 
1803 	switch (cmd) {
1804 	case SIOCSIFADDR:
1805 	case SIOCGIFADDR:
1806 	case SIOCSIFMTU:
1807 		error = ether_ioctl(ifp, cmd, data);
1808 		break;
1809 	case SIOCSIFFLAGS:
1810 		if (ifp->if_flags & IFF_UP) {
1811 			if ((sc->sc_ifflags ^ ifp->if_flags) == IFF_PROMISC)
1812 				gem_setladrf(sc);
1813 			else
1814 				gem_init(sc);
1815 		} else {
1816 			if (ifp->if_flags & IFF_RUNNING)
1817 				gem_stop(ifp, 0);
1818 		}
1819 		sc->sc_ifflags = ifp->if_flags;
1820 		error = 0;
1821 		break;
1822 	case SIOCADDMULTI:
1823 	case SIOCDELMULTI:
1824 		gem_setladrf(sc);
1825 		error = 0;
1826 		break;
1827 	case SIOCGIFMEDIA:
1828 	case SIOCSIFMEDIA:
1829 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd);
1830 		break;
1831 	default:
1832 		error = ENOTTY;
1833 		break;
1834 	}
1835 
1836 	/* Try to get things going again */
1837 	if (ifp->if_flags & IFF_UP)
1838 		gem_start(ifp);
1839 	splx(s);
1840 	return (error);
1841 }
1842 
1843 /*
1844  * Set up the logical address filter.
1845  */
1846 static void
1847 gem_setladrf(sc)
1848 	struct gem_softc *sc;
1849 {
1850 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1851 	struct ifmultiaddr *inm;
1852 	struct sockaddr_dl *sdl;
1853 	bus_space_tag_t t = sc->sc_bustag;
1854 	bus_space_handle_t h = sc->sc_h;
1855 	u_char *cp;
1856 	u_int32_t crc;
1857 	u_int32_t hash[16];
1858 	u_int32_t v;
1859 	int len;
1860 	int i;
1861 
1862 	/* Get current RX configuration */
1863 	v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG);
1864 
1865 	/*
1866 	 * Turn off promiscuous mode, promiscuous group mode (all multicast),
1867 	 * and hash filter.  Depending on the case, the right bit will be
1868 	 * enabled.
1869 	 */
1870 	v &= ~(GEM_MAC_RX_PROMISCUOUS|GEM_MAC_RX_HASH_FILTER|
1871 	    GEM_MAC_RX_PROMISC_GRP);
1872 
1873 	if ((ifp->if_flags & IFF_PROMISC) != 0) {
1874 		/* Turn on promiscuous mode */
1875 		v |= GEM_MAC_RX_PROMISCUOUS;
1876 		goto chipit;
1877 	}
1878 	if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
1879 		hash[3] = hash[2] = hash[1] = hash[0] = 0xffff;
1880 		ifp->if_flags |= IFF_ALLMULTI;
1881 		v |= GEM_MAC_RX_PROMISC_GRP;
1882 		goto chipit;
1883 	}
1884 
1885 	/*
1886 	 * Set up multicast address filter by passing all multicast addresses
1887 	 * through a crc generator, and then using the high order 8 bits as an
1888 	 * index into the 256 bit logical address filter.  The high order 4
1889 	 * bits selects the word, while the other 4 bits select the bit within
1890 	 * the word (where bit 0 is the MSB).
1891 	 */
1892 
1893 	/* Clear hash table */
1894 	memset(hash, 0, sizeof(hash));
1895 
1896 	TAILQ_FOREACH(inm, &sc->sc_arpcom.ac_if.if_multiaddrs, ifma_link) {
1897 		if (inm->ifma_addr->sa_family != AF_LINK)
1898 			continue;
1899 		sdl = (struct sockaddr_dl *)inm->ifma_addr;
1900 		cp = LLADDR(sdl);
1901 		crc = 0xffffffff;
1902 		for (len = sdl->sdl_alen; --len >= 0;) {
1903 			int octet = *cp++;
1904 			int i;
1905 
1906 #define MC_POLY_LE	0xedb88320UL	/* mcast crc, little endian */
1907 			for (i = 0; i < 8; i++) {
1908 				if ((crc & 1) ^ (octet & 1)) {
1909 					crc >>= 1;
1910 					crc ^= MC_POLY_LE;
1911 				} else {
1912 					crc >>= 1;
1913 				}
1914 				octet >>= 1;
1915 			}
1916 		}
1917 		/* Just want the 8 most significant bits. */
1918 		crc >>= 24;
1919 
1920 		/* Set the corresponding bit in the filter. */
1921 		hash[crc >> 4] |= 1 << (15 - (crc & 15));
1922 	}
1923 
1924 	v |= GEM_MAC_RX_HASH_FILTER;
1925 	ifp->if_flags &= ~IFF_ALLMULTI;
1926 
1927 	/* Now load the hash table into the chip (if we are using it) */
1928 	for (i = 0; i < 16; i++) {
1929 		bus_space_write_4(t, h,
1930 		    GEM_MAC_HASH0 + i * (GEM_MAC_HASH1-GEM_MAC_HASH0),
1931 		    hash[i]);
1932 	}
1933 
1934 chipit:
1935 	bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v);
1936 }
1937