xref: /freebsd/sys/dev/gem/if_gem.c (revision e87137e1337501e1858be85f3c4b9bdb8cdd9fa4)
1aad970f1SDavid E. O'Brien /*-
242c1b001SThomas Moestl  * Copyright (C) 2001 Eduardo Horvath.
3305f2c06SThomas Moestl  * Copyright (c) 2001-2003 Thomas Moestl
442c1b001SThomas Moestl  * All rights reserved.
542c1b001SThomas Moestl  *
642c1b001SThomas Moestl  * Redistribution and use in source and binary forms, with or without
742c1b001SThomas Moestl  * modification, are permitted provided that the following conditions
842c1b001SThomas Moestl  * are met:
942c1b001SThomas Moestl  * 1. Redistributions of source code must retain the above copyright
1042c1b001SThomas Moestl  *    notice, this list of conditions and the following disclaimer.
1142c1b001SThomas Moestl  * 2. Redistributions in binary form must reproduce the above copyright
1242c1b001SThomas Moestl  *    notice, this list of conditions and the following disclaimer in the
1342c1b001SThomas Moestl  *    documentation and/or other materials provided with the distribution.
1442c1b001SThomas Moestl  *
1542c1b001SThomas Moestl  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR  ``AS IS'' AND
1642c1b001SThomas Moestl  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1742c1b001SThomas Moestl  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1842c1b001SThomas Moestl  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR  BE LIABLE
1942c1b001SThomas Moestl  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
2042c1b001SThomas Moestl  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2142c1b001SThomas Moestl  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2242c1b001SThomas Moestl  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2342c1b001SThomas Moestl  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2442c1b001SThomas Moestl  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2542c1b001SThomas Moestl  * SUCH DAMAGE.
2642c1b001SThomas Moestl  *
27336cca9eSBenno Rice  *	from: NetBSD: gem.c,v 1.21 2002/06/01 23:50:58 lukem Exp
2842c1b001SThomas Moestl  */
2942c1b001SThomas Moestl 
30aad970f1SDavid E. O'Brien #include <sys/cdefs.h>
31aad970f1SDavid E. O'Brien __FBSDID("$FreeBSD$");
32aad970f1SDavid E. O'Brien 
3342c1b001SThomas Moestl /*
3442c1b001SThomas Moestl  * Driver for Sun GEM ethernet controllers.
3542c1b001SThomas Moestl  */
3642c1b001SThomas Moestl 
3718100346SThomas Moestl #if 0
3842c1b001SThomas Moestl #define	GEM_DEBUG
3918100346SThomas Moestl #endif
4042c1b001SThomas Moestl 
41c3d5598aSMarius Strobl #if 0	/* XXX: In case of emergency, re-enable this. */
42c3d5598aSMarius Strobl #define	GEM_RINT_TIMEOUT
43c3d5598aSMarius Strobl #endif
44c3d5598aSMarius Strobl 
4542c1b001SThomas Moestl #include <sys/param.h>
4642c1b001SThomas Moestl #include <sys/systm.h>
4742c1b001SThomas Moestl #include <sys/bus.h>
4842c1b001SThomas Moestl #include <sys/callout.h>
49a30d4b32SMike Barcroft #include <sys/endian.h>
5042c1b001SThomas Moestl #include <sys/mbuf.h>
5142c1b001SThomas Moestl #include <sys/malloc.h>
5242c1b001SThomas Moestl #include <sys/kernel.h>
538cfaff7dSMarius Strobl #include <sys/lock.h>
54186f2b9eSPoul-Henning Kamp #include <sys/module.h>
558cfaff7dSMarius Strobl #include <sys/mutex.h>
5642c1b001SThomas Moestl #include <sys/socket.h>
5742c1b001SThomas Moestl #include <sys/sockio.h>
58e1bb13cdSPoul-Henning Kamp #include <sys/rman.h>
5942c1b001SThomas Moestl 
6008e0fdebSThomas Moestl #include <net/bpf.h>
6142c1b001SThomas Moestl #include <net/ethernet.h>
6242c1b001SThomas Moestl #include <net/if.h>
6342c1b001SThomas Moestl #include <net/if_arp.h>
6442c1b001SThomas Moestl #include <net/if_dl.h>
6542c1b001SThomas Moestl #include <net/if_media.h>
66fc74a9f9SBrooks Davis #include <net/if_types.h>
6700d12766SMarius Strobl #include <net/if_vlan_var.h>
6842c1b001SThomas Moestl 
6912fb0330SPyun YongHyeon #include <netinet/in.h>
7012fb0330SPyun YongHyeon #include <netinet/in_systm.h>
7112fb0330SPyun YongHyeon #include <netinet/ip.h>
7212fb0330SPyun YongHyeon #include <netinet/tcp.h>
7312fb0330SPyun YongHyeon #include <netinet/udp.h>
7412fb0330SPyun YongHyeon 
7542c1b001SThomas Moestl #include <machine/bus.h>
7642c1b001SThomas Moestl 
7742c1b001SThomas Moestl #include <dev/mii/mii.h>
7842c1b001SThomas Moestl #include <dev/mii/miivar.h>
7942c1b001SThomas Moestl 
80681f7d03SWarner Losh #include <dev/gem/if_gemreg.h>
81681f7d03SWarner Losh #include <dev/gem/if_gemvar.h>
8242c1b001SThomas Moestl 
8342c1b001SThomas Moestl #define TRIES	10000
8412fb0330SPyun YongHyeon /*
8512fb0330SPyun YongHyeon  * The GEM hardware support basic TCP/UDP checksum offloading. However,
8612fb0330SPyun YongHyeon  * the hardware doesn't compensate the checksum for UDP datagram which
8712fb0330SPyun YongHyeon  * can yield to 0x0. As a safe guard, UDP checksum offload is disabled
8812fb0330SPyun YongHyeon  * by default. It can be reactivated by setting special link option
8912fb0330SPyun YongHyeon  * link0 with ifconfig(8).
9012fb0330SPyun YongHyeon  */
9112fb0330SPyun YongHyeon #define	GEM_CSUM_FEATURES	(CSUM_TCP)
9242c1b001SThomas Moestl 
93e51a25f8SAlfred Perlstein static void	gem_start(struct ifnet *);
948cfaff7dSMarius Strobl static void	gem_start_locked(struct ifnet *);
95e51a25f8SAlfred Perlstein static void	gem_stop(struct ifnet *, int);
96e51a25f8SAlfred Perlstein static int	gem_ioctl(struct ifnet *, u_long, caddr_t);
97e51a25f8SAlfred Perlstein static void	gem_cddma_callback(void *, bus_dma_segment_t *, int, int);
9812fb0330SPyun YongHyeon static __inline void gem_txcksum(struct gem_softc *, struct mbuf *, uint64_t *);
9912fb0330SPyun YongHyeon static __inline void gem_rxcksum(struct mbuf *, uint64_t);
100e51a25f8SAlfred Perlstein static void	gem_tick(void *);
1018cb37876SMarius Strobl static int	gem_watchdog(struct gem_softc *);
102e51a25f8SAlfred Perlstein static void	gem_init(void *);
1038cb37876SMarius Strobl static void	gem_init_locked(struct gem_softc *);
1048cb37876SMarius Strobl static void	gem_init_regs(struct gem_softc *);
105e51a25f8SAlfred Perlstein static int	gem_ringsize(int sz);
106e51a25f8SAlfred Perlstein static int	gem_meminit(struct gem_softc *);
10712fb0330SPyun YongHyeon static struct mbuf *gem_defrag(struct mbuf *, int, int);
10812fb0330SPyun YongHyeon static int	gem_load_txmbuf(struct gem_softc *, struct mbuf **);
109e51a25f8SAlfred Perlstein static void	gem_mifinit(struct gem_softc *);
1108cb37876SMarius Strobl static int	gem_bitwait(struct gem_softc *, bus_addr_t, u_int32_t,
1118cb37876SMarius Strobl     u_int32_t);
112e51a25f8SAlfred Perlstein static int	gem_reset_rx(struct gem_softc *);
113e51a25f8SAlfred Perlstein static int	gem_reset_tx(struct gem_softc *);
114e51a25f8SAlfred Perlstein static int	gem_disable_rx(struct gem_softc *);
115e51a25f8SAlfred Perlstein static int	gem_disable_tx(struct gem_softc *);
116e51a25f8SAlfred Perlstein static void	gem_rxdrain(struct gem_softc *);
117e51a25f8SAlfred Perlstein static int	gem_add_rxbuf(struct gem_softc *, int);
118e51a25f8SAlfred Perlstein static void	gem_setladrf(struct gem_softc *);
11942c1b001SThomas Moestl 
120e51a25f8SAlfred Perlstein struct mbuf	*gem_get(struct gem_softc *, int, int);
121e51a25f8SAlfred Perlstein static void	gem_eint(struct gem_softc *, u_int);
122e51a25f8SAlfred Perlstein static void	gem_rint(struct gem_softc *);
123c3d5598aSMarius Strobl #ifdef GEM_RINT_TIMEOUT
1240d80b9bdSThomas Moestl static void	gem_rint_timeout(void *);
12511e3f060SJake Burkholder #endif
126e51a25f8SAlfred Perlstein static void	gem_tint(struct gem_softc *);
12742c1b001SThomas Moestl #ifdef notyet
128e51a25f8SAlfred Perlstein static void	gem_power(int, void *);
12942c1b001SThomas Moestl #endif
13042c1b001SThomas Moestl 
13142c1b001SThomas Moestl devclass_t gem_devclass;
13242c1b001SThomas Moestl DRIVER_MODULE(miibus, gem, miibus_driver, miibus_devclass, 0, 0);
13342c1b001SThomas Moestl MODULE_DEPEND(gem, miibus, 1, 1, 1);
13442c1b001SThomas Moestl 
13542c1b001SThomas Moestl #ifdef GEM_DEBUG
13642c1b001SThomas Moestl #include <sys/ktr.h>
13742c1b001SThomas Moestl #define	KTR_GEM		KTR_CT2
13842c1b001SThomas Moestl #endif
13942c1b001SThomas Moestl 
14018100346SThomas Moestl #define	GEM_NSEGS GEM_NTXDESC
14142c1b001SThomas Moestl 
14242c1b001SThomas Moestl /*
14342c1b001SThomas Moestl  * gem_attach:
14442c1b001SThomas Moestl  *
14542c1b001SThomas Moestl  *	Attach a Gem interface to the system.
14642c1b001SThomas Moestl  */
14742c1b001SThomas Moestl int
14842c1b001SThomas Moestl gem_attach(sc)
14942c1b001SThomas Moestl 	struct gem_softc *sc;
15042c1b001SThomas Moestl {
151fc74a9f9SBrooks Davis 	struct ifnet *ifp;
15242c1b001SThomas Moestl 	struct mii_softc *child;
15342c1b001SThomas Moestl 	int i, error;
154336cca9eSBenno Rice 	u_int32_t v;
15542c1b001SThomas Moestl 
156fc74a9f9SBrooks Davis 	ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
157fc74a9f9SBrooks Davis 	if (ifp == NULL)
158fc74a9f9SBrooks Davis 		return (ENOSPC);
159fc74a9f9SBrooks Davis 
1601f317bf9SMarius Strobl 	callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0);
1611f317bf9SMarius Strobl #ifdef GEM_RINT_TIMEOUT
1621f317bf9SMarius Strobl 	callout_init_mtx(&sc->sc_rx_ch, &sc->sc_mtx, 0);
1631f317bf9SMarius Strobl #endif
1641f317bf9SMarius Strobl 
16542c1b001SThomas Moestl 	/* Make sure the chip is stopped. */
16642c1b001SThomas Moestl 	ifp->if_softc = sc;
1678cfaff7dSMarius Strobl 	GEM_LOCK(sc);
1681f317bf9SMarius Strobl 	gem_stop(ifp, 0);
16942c1b001SThomas Moestl 	gem_reset(sc);
1708cfaff7dSMarius Strobl 	GEM_UNLOCK(sc);
17142c1b001SThomas Moestl 
172378f231eSJohn-Mark Gurney 	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
173378f231eSJohn-Mark Gurney 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
17412fb0330SPyun YongHyeon 	    BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
175378f231eSJohn-Mark Gurney 	    &sc->sc_pdmatag);
17642c1b001SThomas Moestl 	if (error)
177fc74a9f9SBrooks Davis 		goto fail_ifnet;
17842c1b001SThomas Moestl 
17942c1b001SThomas Moestl 	error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0,
18012fb0330SPyun YongHyeon 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
18112fb0330SPyun YongHyeon 	    1, MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_rdmatag);
18242c1b001SThomas Moestl 	if (error)
183305f2c06SThomas Moestl 		goto fail_ptag;
184305f2c06SThomas Moestl 
185305f2c06SThomas Moestl 	error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0,
18612fb0330SPyun YongHyeon 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
18712fb0330SPyun YongHyeon 	    MCLBYTES * GEM_NTXSEGS, GEM_NTXSEGS, MCLBYTES,
188f6b1c44dSScott Long 	    BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_tdmatag);
189305f2c06SThomas Moestl 	if (error)
190305f2c06SThomas Moestl 		goto fail_rtag;
19142c1b001SThomas Moestl 
19242c1b001SThomas Moestl 	error = bus_dma_tag_create(sc->sc_pdmatag, PAGE_SIZE, 0,
19312fb0330SPyun YongHyeon 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
19442c1b001SThomas Moestl 	    sizeof(struct gem_control_data), 1,
19512fb0330SPyun YongHyeon 	    sizeof(struct gem_control_data), 0,
19612fb0330SPyun YongHyeon 	    NULL, NULL, &sc->sc_cdmatag);
19742c1b001SThomas Moestl 	if (error)
198305f2c06SThomas Moestl 		goto fail_ttag;
19942c1b001SThomas Moestl 
20042c1b001SThomas Moestl 	/*
20142c1b001SThomas Moestl 	 * Allocate the control data structures, and create and load the
20242c1b001SThomas Moestl 	 * DMA map for it.
20342c1b001SThomas Moestl 	 */
20442c1b001SThomas Moestl 	if ((error = bus_dmamem_alloc(sc->sc_cdmatag,
20512fb0330SPyun YongHyeon 	    (void **)&sc->sc_control_data,
20612fb0330SPyun YongHyeon 	    BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
20712fb0330SPyun YongHyeon 	    &sc->sc_cddmamap))) {
20842c1b001SThomas Moestl 		device_printf(sc->sc_dev, "unable to allocate control data,"
20942c1b001SThomas Moestl 		    " error = %d\n", error);
210305f2c06SThomas Moestl 		goto fail_ctag;
21142c1b001SThomas Moestl 	}
21242c1b001SThomas Moestl 
21342c1b001SThomas Moestl 	sc->sc_cddma = 0;
21442c1b001SThomas Moestl 	if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cddmamap,
21542c1b001SThomas Moestl 	    sc->sc_control_data, sizeof(struct gem_control_data),
21642c1b001SThomas Moestl 	    gem_cddma_callback, sc, 0)) != 0 || sc->sc_cddma == 0) {
21742c1b001SThomas Moestl 		device_printf(sc->sc_dev, "unable to load control data DMA "
21842c1b001SThomas Moestl 		    "map, error = %d\n", error);
219305f2c06SThomas Moestl 		goto fail_cmem;
22042c1b001SThomas Moestl 	}
22142c1b001SThomas Moestl 
22242c1b001SThomas Moestl 	/*
22342c1b001SThomas Moestl 	 * Initialize the transmit job descriptors.
22442c1b001SThomas Moestl 	 */
22542c1b001SThomas Moestl 	STAILQ_INIT(&sc->sc_txfreeq);
22642c1b001SThomas Moestl 	STAILQ_INIT(&sc->sc_txdirtyq);
22742c1b001SThomas Moestl 
22842c1b001SThomas Moestl 	/*
22942c1b001SThomas Moestl 	 * Create the transmit buffer DMA maps.
23042c1b001SThomas Moestl 	 */
23142c1b001SThomas Moestl 	error = ENOMEM;
23242c1b001SThomas Moestl 	for (i = 0; i < GEM_TXQUEUELEN; i++) {
23342c1b001SThomas Moestl 		struct gem_txsoft *txs;
23442c1b001SThomas Moestl 
23542c1b001SThomas Moestl 		txs = &sc->sc_txsoft[i];
23642c1b001SThomas Moestl 		txs->txs_mbuf = NULL;
23742c1b001SThomas Moestl 		txs->txs_ndescs = 0;
238305f2c06SThomas Moestl 		if ((error = bus_dmamap_create(sc->sc_tdmatag, 0,
23942c1b001SThomas Moestl 		    &txs->txs_dmamap)) != 0) {
24042c1b001SThomas Moestl 			device_printf(sc->sc_dev, "unable to create tx DMA map "
24142c1b001SThomas Moestl 			    "%d, error = %d\n", i, error);
242305f2c06SThomas Moestl 			goto fail_txd;
24342c1b001SThomas Moestl 		}
24442c1b001SThomas Moestl 		STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
24542c1b001SThomas Moestl 	}
24642c1b001SThomas Moestl 
24742c1b001SThomas Moestl 	/*
24842c1b001SThomas Moestl 	 * Create the receive buffer DMA maps.
24942c1b001SThomas Moestl 	 */
25042c1b001SThomas Moestl 	for (i = 0; i < GEM_NRXDESC; i++) {
251305f2c06SThomas Moestl 		if ((error = bus_dmamap_create(sc->sc_rdmatag, 0,
25242c1b001SThomas Moestl 		    &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
25342c1b001SThomas Moestl 			device_printf(sc->sc_dev, "unable to create rx DMA map "
25442c1b001SThomas Moestl 			    "%d, error = %d\n", i, error);
255305f2c06SThomas Moestl 			goto fail_rxd;
25642c1b001SThomas Moestl 		}
25742c1b001SThomas Moestl 		sc->sc_rxsoft[i].rxs_mbuf = NULL;
25842c1b001SThomas Moestl 	}
25942c1b001SThomas Moestl 
26042c1b001SThomas Moestl 	gem_mifinit(sc);
26142c1b001SThomas Moestl 
26242c1b001SThomas Moestl 	if ((error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, gem_mediachange,
26342c1b001SThomas Moestl 	    gem_mediastatus)) != 0) {
26442c1b001SThomas Moestl 		device_printf(sc->sc_dev, "phy probe failed: %d\n", error);
265305f2c06SThomas Moestl 		goto fail_rxd;
26642c1b001SThomas Moestl 	}
26742c1b001SThomas Moestl 	sc->sc_mii = device_get_softc(sc->sc_miibus);
26842c1b001SThomas Moestl 
26942c1b001SThomas Moestl 	/*
27042c1b001SThomas Moestl 	 * From this point forward, the attachment cannot fail.  A failure
27142c1b001SThomas Moestl 	 * before this point releases all resources that may have been
27242c1b001SThomas Moestl 	 * allocated.
27342c1b001SThomas Moestl 	 */
27442c1b001SThomas Moestl 
275336cca9eSBenno Rice 	/* Get RX FIFO size */
276336cca9eSBenno Rice 	sc->sc_rxfifosize = 64 *
277e1bb13cdSPoul-Henning Kamp 	    bus_read_4(sc->sc_res[0], GEM_RX_FIFO_SIZE);
278336cca9eSBenno Rice 
279336cca9eSBenno Rice 	/* Get TX FIFO size */
280e1bb13cdSPoul-Henning Kamp 	v = bus_read_4(sc->sc_res[0], GEM_TX_FIFO_SIZE);
2813a5aee5aSThomas Moestl 	device_printf(sc->sc_dev, "%ukB RX FIFO, %ukB TX FIFO\n",
2823a5aee5aSThomas Moestl 	    sc->sc_rxfifosize / 1024, v / 16);
28342c1b001SThomas Moestl 
28412fb0330SPyun YongHyeon 	sc->sc_csum_features = GEM_CSUM_FEATURES;
28542c1b001SThomas Moestl 	/* Initialize ifnet structure. */
28642c1b001SThomas Moestl 	ifp->if_softc = sc;
2879bf40edeSBrooks Davis 	if_initname(ifp, device_get_name(sc->sc_dev),
2889bf40edeSBrooks Davis 	    device_get_unit(sc->sc_dev));
2898cfaff7dSMarius Strobl 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
29042c1b001SThomas Moestl 	ifp->if_start = gem_start;
29142c1b001SThomas Moestl 	ifp->if_ioctl = gem_ioctl;
29242c1b001SThomas Moestl 	ifp->if_init = gem_init;
29312fb0330SPyun YongHyeon 	IFQ_SET_MAXLEN(&ifp->if_snd, GEM_TXQUEUELEN);
29412fb0330SPyun YongHyeon 	ifp->if_snd.ifq_drv_maxlen = GEM_TXQUEUELEN;
29512fb0330SPyun YongHyeon 	IFQ_SET_READY(&ifp->if_snd);
29642c1b001SThomas Moestl 	/*
29742c1b001SThomas Moestl 	 * Walk along the list of attached MII devices and
29842c1b001SThomas Moestl 	 * establish an `MII instance' to `phy number'
29942c1b001SThomas Moestl 	 * mapping. We'll use this mapping in media change
30042c1b001SThomas Moestl 	 * requests to determine which phy to use to program
30142c1b001SThomas Moestl 	 * the MIF configuration register.
30242c1b001SThomas Moestl 	 */
30342c1b001SThomas Moestl 	for (child = LIST_FIRST(&sc->sc_mii->mii_phys); child != NULL;
30442c1b001SThomas Moestl 	     child = LIST_NEXT(child, mii_list)) {
30542c1b001SThomas Moestl 		/*
30642c1b001SThomas Moestl 		 * Note: we support just two PHYs: the built-in
30742c1b001SThomas Moestl 		 * internal device and an external on the MII
30842c1b001SThomas Moestl 		 * connector.
30942c1b001SThomas Moestl 		 */
31042c1b001SThomas Moestl 		if (child->mii_phy > 1 || child->mii_inst > 1) {
31142c1b001SThomas Moestl 			device_printf(sc->sc_dev, "cannot accomodate "
31242c1b001SThomas Moestl 			    "MII device %s at phy %d, instance %d\n",
31342c1b001SThomas Moestl 			    device_get_name(child->mii_dev),
31442c1b001SThomas Moestl 			    child->mii_phy, child->mii_inst);
31542c1b001SThomas Moestl 			continue;
31642c1b001SThomas Moestl 		}
31742c1b001SThomas Moestl 
31842c1b001SThomas Moestl 		sc->sc_phys[child->mii_inst] = child->mii_phy;
31942c1b001SThomas Moestl 	}
32042c1b001SThomas Moestl 
32142c1b001SThomas Moestl 	/*
32242c1b001SThomas Moestl 	 * Now select and activate the PHY we will use.
32342c1b001SThomas Moestl 	 *
32442c1b001SThomas Moestl 	 * The order of preference is External (MDI1),
32542c1b001SThomas Moestl 	 * Internal (MDI0), Serial Link (no MII).
32642c1b001SThomas Moestl 	 */
32742c1b001SThomas Moestl 	if (sc->sc_phys[1]) {
32842c1b001SThomas Moestl #ifdef GEM_DEBUG
32942c1b001SThomas Moestl 		printf("using external phy\n");
33042c1b001SThomas Moestl #endif
33142c1b001SThomas Moestl 		sc->sc_mif_config |= GEM_MIF_CONFIG_PHY_SEL;
33242c1b001SThomas Moestl 	} else {
33342c1b001SThomas Moestl #ifdef GEM_DEBUG
33442c1b001SThomas Moestl 		printf("using internal phy\n");
33542c1b001SThomas Moestl #endif
33642c1b001SThomas Moestl 		sc->sc_mif_config &= ~GEM_MIF_CONFIG_PHY_SEL;
33742c1b001SThomas Moestl 	}
338e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_MIF_CONFIG,
33942c1b001SThomas Moestl 	    sc->sc_mif_config);
34042c1b001SThomas Moestl 	/* Attach the interface. */
341fc74a9f9SBrooks Davis 	ether_ifattach(ifp, sc->sc_enaddr);
34242c1b001SThomas Moestl 
343342ed5d9SRuslan Ermilov #ifdef notyet
34442c1b001SThomas Moestl 	/*
34542c1b001SThomas Moestl 	 * Add a suspend hook to make sure we come back up after a
34642c1b001SThomas Moestl 	 * resume.
34742c1b001SThomas Moestl 	 */
34842c1b001SThomas Moestl 	sc->sc_powerhook = powerhook_establish(gem_power, sc);
34942c1b001SThomas Moestl 	if (sc->sc_powerhook == NULL)
35042c1b001SThomas Moestl 		device_printf(sc->sc_dev, "WARNING: unable to establish power "
35142c1b001SThomas Moestl 		    "hook\n");
35242c1b001SThomas Moestl #endif
35342c1b001SThomas Moestl 
35400d12766SMarius Strobl 	/*
35512fb0330SPyun YongHyeon 	 * Tell the upper layer(s) we support long frames/checksum offloads.
35600d12766SMarius Strobl 	 */
35700d12766SMarius Strobl 	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
35812fb0330SPyun YongHyeon 	ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_HWCSUM;
35912fb0330SPyun YongHyeon 	ifp->if_hwassist |= sc->sc_csum_features;
36012fb0330SPyun YongHyeon 	ifp->if_capenable |= IFCAP_VLAN_MTU | IFCAP_HWCSUM;
36100d12766SMarius Strobl 
36242c1b001SThomas Moestl 	return (0);
36342c1b001SThomas Moestl 
36442c1b001SThomas Moestl 	/*
36542c1b001SThomas Moestl 	 * Free any resources we've allocated during the failed attach
36642c1b001SThomas Moestl 	 * attempt.  Do this in reverse order and fall through.
36742c1b001SThomas Moestl 	 */
368305f2c06SThomas Moestl fail_rxd:
36942c1b001SThomas Moestl 	for (i = 0; i < GEM_NRXDESC; i++) {
37042c1b001SThomas Moestl 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
371305f2c06SThomas Moestl 			bus_dmamap_destroy(sc->sc_rdmatag,
37242c1b001SThomas Moestl 			    sc->sc_rxsoft[i].rxs_dmamap);
37342c1b001SThomas Moestl 	}
374305f2c06SThomas Moestl fail_txd:
37542c1b001SThomas Moestl 	for (i = 0; i < GEM_TXQUEUELEN; i++) {
37642c1b001SThomas Moestl 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
377305f2c06SThomas Moestl 			bus_dmamap_destroy(sc->sc_tdmatag,
37842c1b001SThomas Moestl 			    sc->sc_txsoft[i].txs_dmamap);
37942c1b001SThomas Moestl 	}
380305f2c06SThomas Moestl 	bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap);
381305f2c06SThomas Moestl fail_cmem:
38242c1b001SThomas Moestl 	bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data,
38342c1b001SThomas Moestl 	    sc->sc_cddmamap);
384305f2c06SThomas Moestl fail_ctag:
38542c1b001SThomas Moestl 	bus_dma_tag_destroy(sc->sc_cdmatag);
386305f2c06SThomas Moestl fail_ttag:
387305f2c06SThomas Moestl 	bus_dma_tag_destroy(sc->sc_tdmatag);
388305f2c06SThomas Moestl fail_rtag:
389305f2c06SThomas Moestl 	bus_dma_tag_destroy(sc->sc_rdmatag);
390305f2c06SThomas Moestl fail_ptag:
39142c1b001SThomas Moestl 	bus_dma_tag_destroy(sc->sc_pdmatag);
392fc74a9f9SBrooks Davis fail_ifnet:
393fc74a9f9SBrooks Davis 	if_free(ifp);
39442c1b001SThomas Moestl 	return (error);
39542c1b001SThomas Moestl }
39642c1b001SThomas Moestl 
397cbbdf236SThomas Moestl void
398cbbdf236SThomas Moestl gem_detach(sc)
399cbbdf236SThomas Moestl 	struct gem_softc *sc;
400cbbdf236SThomas Moestl {
401fc74a9f9SBrooks Davis 	struct ifnet *ifp = sc->sc_ifp;
402cbbdf236SThomas Moestl 	int i;
403cbbdf236SThomas Moestl 
4048cfaff7dSMarius Strobl 	GEM_LOCK(sc);
40525bd46d0SBrooks Davis 	gem_stop(ifp, 1);
4068cfaff7dSMarius Strobl 	GEM_UNLOCK(sc);
4071f317bf9SMarius Strobl 	callout_drain(&sc->sc_tick_ch);
4081f317bf9SMarius Strobl #ifdef GEM_RINT_TIMEOUT
4091f317bf9SMarius Strobl 	callout_drain(&sc->sc_rx_ch);
4101f317bf9SMarius Strobl #endif
411cbbdf236SThomas Moestl 	ether_ifdetach(ifp);
412fc74a9f9SBrooks Davis 	if_free(ifp);
413cbbdf236SThomas Moestl 	device_delete_child(sc->sc_dev, sc->sc_miibus);
414cbbdf236SThomas Moestl 
415cbbdf236SThomas Moestl 	for (i = 0; i < GEM_NRXDESC; i++) {
416cbbdf236SThomas Moestl 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
417cbbdf236SThomas Moestl 			bus_dmamap_destroy(sc->sc_rdmatag,
418cbbdf236SThomas Moestl 			    sc->sc_rxsoft[i].rxs_dmamap);
419cbbdf236SThomas Moestl 	}
420cbbdf236SThomas Moestl 	for (i = 0; i < GEM_TXQUEUELEN; i++) {
421cbbdf236SThomas Moestl 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
422cbbdf236SThomas Moestl 			bus_dmamap_destroy(sc->sc_tdmatag,
423cbbdf236SThomas Moestl 			    sc->sc_txsoft[i].txs_dmamap);
424cbbdf236SThomas Moestl 	}
425b2d59f42SThomas Moestl 	GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD);
426b2d59f42SThomas Moestl 	GEM_CDSYNC(sc, BUS_DMASYNC_POSTWRITE);
427cbbdf236SThomas Moestl 	bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap);
428cbbdf236SThomas Moestl 	bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data,
429cbbdf236SThomas Moestl 	    sc->sc_cddmamap);
430cbbdf236SThomas Moestl 	bus_dma_tag_destroy(sc->sc_cdmatag);
431cbbdf236SThomas Moestl 	bus_dma_tag_destroy(sc->sc_tdmatag);
432cbbdf236SThomas Moestl 	bus_dma_tag_destroy(sc->sc_rdmatag);
433cbbdf236SThomas Moestl 	bus_dma_tag_destroy(sc->sc_pdmatag);
434cbbdf236SThomas Moestl }
435cbbdf236SThomas Moestl 
436cbbdf236SThomas Moestl void
437cbbdf236SThomas Moestl gem_suspend(sc)
438cbbdf236SThomas Moestl 	struct gem_softc *sc;
439cbbdf236SThomas Moestl {
440fc74a9f9SBrooks Davis 	struct ifnet *ifp = sc->sc_ifp;
441cbbdf236SThomas Moestl 
4428cfaff7dSMarius Strobl 	GEM_LOCK(sc);
443cbbdf236SThomas Moestl 	gem_stop(ifp, 0);
4448cfaff7dSMarius Strobl 	GEM_UNLOCK(sc);
445cbbdf236SThomas Moestl }
446cbbdf236SThomas Moestl 
447cbbdf236SThomas Moestl void
448cbbdf236SThomas Moestl gem_resume(sc)
449cbbdf236SThomas Moestl 	struct gem_softc *sc;
450cbbdf236SThomas Moestl {
451fc74a9f9SBrooks Davis 	struct ifnet *ifp = sc->sc_ifp;
452cbbdf236SThomas Moestl 
4538cfaff7dSMarius Strobl 	GEM_LOCK(sc);
45400d12766SMarius Strobl 	/*
45500d12766SMarius Strobl 	 * On resume all registers have to be initialized again like
45600d12766SMarius Strobl 	 * after power-on.
45700d12766SMarius Strobl 	 */
45800d12766SMarius Strobl 	sc->sc_inited = 0;
459cbbdf236SThomas Moestl 	if (ifp->if_flags & IFF_UP)
4608cfaff7dSMarius Strobl 		gem_init_locked(sc);
4618cfaff7dSMarius Strobl 	GEM_UNLOCK(sc);
462cbbdf236SThomas Moestl }
463cbbdf236SThomas Moestl 
46412fb0330SPyun YongHyeon static __inline void
46512fb0330SPyun YongHyeon gem_txcksum(struct gem_softc *sc, struct mbuf *m, uint64_t *cflags)
46612fb0330SPyun YongHyeon {
46712fb0330SPyun YongHyeon 	struct ip *ip;
46812fb0330SPyun YongHyeon 	uint64_t offset, offset2;
46912fb0330SPyun YongHyeon 	char *p;
47012fb0330SPyun YongHyeon 
47112fb0330SPyun YongHyeon 	offset = sizeof(struct ip) + ETHER_HDR_LEN;
47212fb0330SPyun YongHyeon 	for(; m && m->m_len == 0; m = m->m_next)
47312fb0330SPyun YongHyeon 		;
47412fb0330SPyun YongHyeon 	if (m == NULL || m->m_len < ETHER_HDR_LEN) {
47512fb0330SPyun YongHyeon 		device_printf(sc->sc_dev, "%s: m_len < ETHER_HDR_LEN\n",
47612fb0330SPyun YongHyeon 		    __func__);
47712fb0330SPyun YongHyeon 		/* checksum will be corrupted */
47812fb0330SPyun YongHyeon 		goto sendit;
47912fb0330SPyun YongHyeon 	}
48012fb0330SPyun YongHyeon 	if (m->m_len < ETHER_HDR_LEN + sizeof(uint32_t)) {
48112fb0330SPyun YongHyeon 		if (m->m_len != ETHER_HDR_LEN) {
48212fb0330SPyun YongHyeon 			device_printf(sc->sc_dev,
48312fb0330SPyun YongHyeon 			    "%s: m_len != ETHER_HDR_LEN\n", __func__);
48412fb0330SPyun YongHyeon 			/* checksum will be corrupted */
48512fb0330SPyun YongHyeon 			goto sendit;
48612fb0330SPyun YongHyeon 		}
48712fb0330SPyun YongHyeon 		for(m = m->m_next; m && m->m_len == 0; m = m->m_next)
48812fb0330SPyun YongHyeon 			;
48912fb0330SPyun YongHyeon 		if (m == NULL) {
49012fb0330SPyun YongHyeon 			/* checksum will be corrupted */
49112fb0330SPyun YongHyeon 			goto sendit;
49212fb0330SPyun YongHyeon 		}
49312fb0330SPyun YongHyeon 		ip = mtod(m, struct ip *);
49412fb0330SPyun YongHyeon 	} else {
49512fb0330SPyun YongHyeon 		p = mtod(m, uint8_t *);
49612fb0330SPyun YongHyeon 		p += ETHER_HDR_LEN;
49712fb0330SPyun YongHyeon 		ip = (struct ip *)p;
49812fb0330SPyun YongHyeon 	}
49912fb0330SPyun YongHyeon 	offset = (ip->ip_hl << 2) + ETHER_HDR_LEN;
50012fb0330SPyun YongHyeon 
50112fb0330SPyun YongHyeon sendit:
50212fb0330SPyun YongHyeon 	offset2 = m->m_pkthdr.csum_data;
50312fb0330SPyun YongHyeon 	*cflags = offset << GEM_TD_CXSUM_STARTSHFT;
50412fb0330SPyun YongHyeon 	*cflags |= ((offset + offset2) << GEM_TD_CXSUM_STUFFSHFT);
50512fb0330SPyun YongHyeon 	*cflags |= GEM_TD_CXSUM_ENABLE;
50612fb0330SPyun YongHyeon }
50712fb0330SPyun YongHyeon 
50812fb0330SPyun YongHyeon static __inline void
50912fb0330SPyun YongHyeon gem_rxcksum(struct mbuf *m, uint64_t flags)
51012fb0330SPyun YongHyeon {
51112fb0330SPyun YongHyeon 	struct ether_header *eh;
51212fb0330SPyun YongHyeon 	struct ip *ip;
51312fb0330SPyun YongHyeon 	struct udphdr *uh;
51412fb0330SPyun YongHyeon 	int32_t hlen, len, pktlen;
51512fb0330SPyun YongHyeon 	uint16_t cksum, *opts;
51612fb0330SPyun YongHyeon 	uint32_t temp32;
51712fb0330SPyun YongHyeon 
51812fb0330SPyun YongHyeon 	pktlen = m->m_pkthdr.len;
51912fb0330SPyun YongHyeon 	if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
52012fb0330SPyun YongHyeon 		return;
52112fb0330SPyun YongHyeon 	eh = mtod(m, struct ether_header *);
52212fb0330SPyun YongHyeon 	if (eh->ether_type != htons(ETHERTYPE_IP))
52312fb0330SPyun YongHyeon 		return;
52412fb0330SPyun YongHyeon 	ip = (struct ip *)(eh + 1);
52512fb0330SPyun YongHyeon 	if (ip->ip_v != IPVERSION)
52612fb0330SPyun YongHyeon 		return;
52712fb0330SPyun YongHyeon 
52812fb0330SPyun YongHyeon 	hlen = ip->ip_hl << 2;
52912fb0330SPyun YongHyeon 	pktlen -= sizeof(struct ether_header);
53012fb0330SPyun YongHyeon 	if (hlen < sizeof(struct ip))
53112fb0330SPyun YongHyeon 		return;
53212fb0330SPyun YongHyeon 	if (ntohs(ip->ip_len) < hlen)
53312fb0330SPyun YongHyeon 		return;
53412fb0330SPyun YongHyeon 	if (ntohs(ip->ip_len) != pktlen)
53512fb0330SPyun YongHyeon 		return;
53612fb0330SPyun YongHyeon 	if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
53712fb0330SPyun YongHyeon 		return;	/* can't handle fragmented packet */
53812fb0330SPyun YongHyeon 
53912fb0330SPyun YongHyeon 	switch (ip->ip_p) {
54012fb0330SPyun YongHyeon 	case IPPROTO_TCP:
54112fb0330SPyun YongHyeon 		if (pktlen < (hlen + sizeof(struct tcphdr)))
54212fb0330SPyun YongHyeon 			return;
54312fb0330SPyun YongHyeon 		break;
54412fb0330SPyun YongHyeon 	case IPPROTO_UDP:
54512fb0330SPyun YongHyeon 		if (pktlen < (hlen + sizeof(struct udphdr)))
54612fb0330SPyun YongHyeon 			return;
54712fb0330SPyun YongHyeon 		uh = (struct udphdr *)((uint8_t *)ip + hlen);
54812fb0330SPyun YongHyeon 		if (uh->uh_sum == 0)
54912fb0330SPyun YongHyeon 			return; /* no checksum */
55012fb0330SPyun YongHyeon 		break;
55112fb0330SPyun YongHyeon 	default:
55212fb0330SPyun YongHyeon 		return;
55312fb0330SPyun YongHyeon 	}
55412fb0330SPyun YongHyeon 
55512fb0330SPyun YongHyeon 	cksum = ~(flags & GEM_RD_CHECKSUM);
55612fb0330SPyun YongHyeon 	/* checksum fixup for IP options */
55712fb0330SPyun YongHyeon 	len = hlen - sizeof(struct ip);
55812fb0330SPyun YongHyeon 	if (len > 0) {
55912fb0330SPyun YongHyeon 		opts = (uint16_t *)(ip + 1);
56012fb0330SPyun YongHyeon 		for (; len > 0; len -= sizeof(uint16_t), opts++) {
56112fb0330SPyun YongHyeon 			temp32 = cksum - *opts;
56212fb0330SPyun YongHyeon 			temp32 = (temp32 >> 16) + (temp32 & 65535);
56312fb0330SPyun YongHyeon 			cksum = temp32 & 65535;
56412fb0330SPyun YongHyeon 		}
56512fb0330SPyun YongHyeon 	}
56612fb0330SPyun YongHyeon 	m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
56712fb0330SPyun YongHyeon 	m->m_pkthdr.csum_data = cksum;
56812fb0330SPyun YongHyeon }
56912fb0330SPyun YongHyeon 
57042c1b001SThomas Moestl static void
57142c1b001SThomas Moestl gem_cddma_callback(xsc, segs, nsegs, error)
57242c1b001SThomas Moestl 	void *xsc;
57342c1b001SThomas Moestl 	bus_dma_segment_t *segs;
57442c1b001SThomas Moestl 	int nsegs;
57542c1b001SThomas Moestl 	int error;
57642c1b001SThomas Moestl {
57742c1b001SThomas Moestl 	struct gem_softc *sc = (struct gem_softc *)xsc;
57842c1b001SThomas Moestl 
57942c1b001SThomas Moestl 	if (error != 0)
58042c1b001SThomas Moestl 		return;
58142c1b001SThomas Moestl 	if (nsegs != 1) {
58242c1b001SThomas Moestl 		/* can't happen... */
58342c1b001SThomas Moestl 		panic("gem_cddma_callback: bad control buffer segment count");
58442c1b001SThomas Moestl 	}
58542c1b001SThomas Moestl 	sc->sc_cddma = segs[0].ds_addr;
58642c1b001SThomas Moestl }
58742c1b001SThomas Moestl 
58842c1b001SThomas Moestl static void
58942c1b001SThomas Moestl gem_tick(arg)
59042c1b001SThomas Moestl 	void *arg;
59142c1b001SThomas Moestl {
59242c1b001SThomas Moestl 	struct gem_softc *sc = arg;
59312fb0330SPyun YongHyeon 	struct ifnet *ifp;
59442c1b001SThomas Moestl 
5951f317bf9SMarius Strobl 	GEM_LOCK_ASSERT(sc, MA_OWNED);
59612fb0330SPyun YongHyeon 
59712fb0330SPyun YongHyeon 	ifp = sc->sc_ifp;
59812fb0330SPyun YongHyeon 	/*
59912fb0330SPyun YongHyeon 	 * Unload collision counters
60012fb0330SPyun YongHyeon 	 */
60112fb0330SPyun YongHyeon 	ifp->if_collisions +=
60212fb0330SPyun YongHyeon 	    bus_read_4(sc->sc_res[0], GEM_MAC_NORM_COLL_CNT) +
60312fb0330SPyun YongHyeon 	    bus_read_4(sc->sc_res[0], GEM_MAC_FIRST_COLL_CNT) +
60412fb0330SPyun YongHyeon 	    bus_read_4(sc->sc_res[0], GEM_MAC_EXCESS_COLL_CNT) +
60512fb0330SPyun YongHyeon 	    bus_read_4(sc->sc_res[0], GEM_MAC_LATE_COLL_CNT);
60612fb0330SPyun YongHyeon 
60712fb0330SPyun YongHyeon 	/*
60812fb0330SPyun YongHyeon 	 * then clear the hardware counters.
60912fb0330SPyun YongHyeon 	 */
61012fb0330SPyun YongHyeon 	bus_write_4(sc->sc_res[0], GEM_MAC_NORM_COLL_CNT, 0);
61112fb0330SPyun YongHyeon 	bus_write_4(sc->sc_res[0], GEM_MAC_FIRST_COLL_CNT, 0);
61212fb0330SPyun YongHyeon 	bus_write_4(sc->sc_res[0], GEM_MAC_EXCESS_COLL_CNT, 0);
61312fb0330SPyun YongHyeon 	bus_write_4(sc->sc_res[0], GEM_MAC_LATE_COLL_CNT, 0);
61412fb0330SPyun YongHyeon 
61542c1b001SThomas Moestl 	mii_tick(sc->sc_mii);
61642c1b001SThomas Moestl 
6178cb37876SMarius Strobl 	if (gem_watchdog(sc) == EJUSTRETURN)
6188cb37876SMarius Strobl 		return;
6198cb37876SMarius Strobl 
62042c1b001SThomas Moestl 	callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc);
62142c1b001SThomas Moestl }
62242c1b001SThomas Moestl 
62342c1b001SThomas Moestl static int
62442c1b001SThomas Moestl gem_bitwait(sc, r, clr, set)
62542c1b001SThomas Moestl 	struct gem_softc *sc;
62642c1b001SThomas Moestl 	bus_addr_t r;
62742c1b001SThomas Moestl 	u_int32_t clr;
62842c1b001SThomas Moestl 	u_int32_t set;
62942c1b001SThomas Moestl {
63042c1b001SThomas Moestl 	int i;
63142c1b001SThomas Moestl 	u_int32_t reg;
63242c1b001SThomas Moestl 
63342c1b001SThomas Moestl 	for (i = TRIES; i--; DELAY(100)) {
634e1bb13cdSPoul-Henning Kamp 		reg = bus_read_4(sc->sc_res[0], r);
635e87137e1SMarius Strobl 		if ((reg & clr) == 0 && (reg & set) == set)
63642c1b001SThomas Moestl 			return (1);
63742c1b001SThomas Moestl 	}
63842c1b001SThomas Moestl 	return (0);
63942c1b001SThomas Moestl }
64042c1b001SThomas Moestl 
64142c1b001SThomas Moestl void
64242c1b001SThomas Moestl gem_reset(sc)
64342c1b001SThomas Moestl 	struct gem_softc *sc;
64442c1b001SThomas Moestl {
64542c1b001SThomas Moestl 
64618100346SThomas Moestl #ifdef GEM_DEBUG
64712fb0330SPyun YongHyeon 	CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__);
64818100346SThomas Moestl #endif
64942c1b001SThomas Moestl 	gem_reset_rx(sc);
65042c1b001SThomas Moestl 	gem_reset_tx(sc);
65142c1b001SThomas Moestl 
65242c1b001SThomas Moestl 	/* Do a full reset */
653e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_RESET, GEM_RESET_RX | GEM_RESET_TX);
65442c1b001SThomas Moestl 	if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0))
65542c1b001SThomas Moestl 		device_printf(sc->sc_dev, "cannot reset device\n");
65642c1b001SThomas Moestl }
65742c1b001SThomas Moestl 
65842c1b001SThomas Moestl 
65942c1b001SThomas Moestl /*
66042c1b001SThomas Moestl  * gem_rxdrain:
66142c1b001SThomas Moestl  *
66242c1b001SThomas Moestl  *	Drain the receive queue.
66342c1b001SThomas Moestl  */
66442c1b001SThomas Moestl static void
66542c1b001SThomas Moestl gem_rxdrain(sc)
66642c1b001SThomas Moestl 	struct gem_softc *sc;
66742c1b001SThomas Moestl {
66842c1b001SThomas Moestl 	struct gem_rxsoft *rxs;
66942c1b001SThomas Moestl 	int i;
67042c1b001SThomas Moestl 
67142c1b001SThomas Moestl 	for (i = 0; i < GEM_NRXDESC; i++) {
67242c1b001SThomas Moestl 		rxs = &sc->sc_rxsoft[i];
67342c1b001SThomas Moestl 		if (rxs->rxs_mbuf != NULL) {
674b2d59f42SThomas Moestl 			bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap,
675b2d59f42SThomas Moestl 			    BUS_DMASYNC_POSTREAD);
676305f2c06SThomas Moestl 			bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap);
67742c1b001SThomas Moestl 			m_freem(rxs->rxs_mbuf);
67842c1b001SThomas Moestl 			rxs->rxs_mbuf = NULL;
67942c1b001SThomas Moestl 		}
68042c1b001SThomas Moestl 	}
68142c1b001SThomas Moestl }
68242c1b001SThomas Moestl 
68342c1b001SThomas Moestl /*
68442c1b001SThomas Moestl  * Reset the whole thing.
68542c1b001SThomas Moestl  */
68642c1b001SThomas Moestl static void
68742c1b001SThomas Moestl gem_stop(ifp, disable)
68842c1b001SThomas Moestl 	struct ifnet *ifp;
68942c1b001SThomas Moestl 	int disable;
69042c1b001SThomas Moestl {
69142c1b001SThomas Moestl 	struct gem_softc *sc = (struct gem_softc *)ifp->if_softc;
69242c1b001SThomas Moestl 	struct gem_txsoft *txs;
69342c1b001SThomas Moestl 
69418100346SThomas Moestl #ifdef GEM_DEBUG
69512fb0330SPyun YongHyeon 	CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__);
69618100346SThomas Moestl #endif
69742c1b001SThomas Moestl 
69842c1b001SThomas Moestl 	callout_stop(&sc->sc_tick_ch);
6991f317bf9SMarius Strobl #ifdef GEM_RINT_TIMEOUT
7001f317bf9SMarius Strobl 	callout_stop(&sc->sc_rx_ch);
7011f317bf9SMarius Strobl #endif
70242c1b001SThomas Moestl 
70342c1b001SThomas Moestl 	/* XXX - Should we reset these instead? */
70442c1b001SThomas Moestl 	gem_disable_tx(sc);
70542c1b001SThomas Moestl 	gem_disable_rx(sc);
70642c1b001SThomas Moestl 
70742c1b001SThomas Moestl 	/*
70842c1b001SThomas Moestl 	 * Release any queued transmit buffers.
70942c1b001SThomas Moestl 	 */
71042c1b001SThomas Moestl 	while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
71142c1b001SThomas Moestl 		STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
71242c1b001SThomas Moestl 		if (txs->txs_ndescs != 0) {
713b2d59f42SThomas Moestl 			bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap,
714b2d59f42SThomas Moestl 			    BUS_DMASYNC_POSTWRITE);
715305f2c06SThomas Moestl 			bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap);
71642c1b001SThomas Moestl 			if (txs->txs_mbuf != NULL) {
71742c1b001SThomas Moestl 				m_freem(txs->txs_mbuf);
71842c1b001SThomas Moestl 				txs->txs_mbuf = NULL;
71942c1b001SThomas Moestl 			}
72042c1b001SThomas Moestl 		}
72142c1b001SThomas Moestl 		STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
72242c1b001SThomas Moestl 	}
72342c1b001SThomas Moestl 
72442c1b001SThomas Moestl 	if (disable)
72542c1b001SThomas Moestl 		gem_rxdrain(sc);
72642c1b001SThomas Moestl 
72742c1b001SThomas Moestl 	/*
72842c1b001SThomas Moestl 	 * Mark the interface down and cancel the watchdog timer.
72942c1b001SThomas Moestl 	 */
73013f4c340SRobert Watson 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
7318cb37876SMarius Strobl 	sc->sc_wdog_timer = 0;
73242c1b001SThomas Moestl }
73342c1b001SThomas Moestl 
73442c1b001SThomas Moestl /*
73542c1b001SThomas Moestl  * Reset the receiver
73642c1b001SThomas Moestl  */
73742c1b001SThomas Moestl int
73842c1b001SThomas Moestl gem_reset_rx(sc)
73942c1b001SThomas Moestl 	struct gem_softc *sc;
74042c1b001SThomas Moestl {
74142c1b001SThomas Moestl 
74242c1b001SThomas Moestl 	/*
74342c1b001SThomas Moestl 	 * Resetting while DMA is in progress can cause a bus hang, so we
74442c1b001SThomas Moestl 	 * disable DMA first.
74542c1b001SThomas Moestl 	 */
74642c1b001SThomas Moestl 	gem_disable_rx(sc);
747e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_RX_CONFIG, 0);
74842c1b001SThomas Moestl 	/* Wait till it finishes */
74942c1b001SThomas Moestl 	if (!gem_bitwait(sc, GEM_RX_CONFIG, 1, 0))
75042c1b001SThomas Moestl 		device_printf(sc->sc_dev, "cannot disable read dma\n");
75142c1b001SThomas Moestl 
75242c1b001SThomas Moestl 	/* Wait 5ms extra. */
75342c1b001SThomas Moestl 	DELAY(5000);
75442c1b001SThomas Moestl 
75542c1b001SThomas Moestl 	/* Finally, reset the ERX */
756e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_RESET, GEM_RESET_RX);
75742c1b001SThomas Moestl 	/* Wait till it finishes */
758e87137e1SMarius Strobl 	if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_RX, 0)) {
75942c1b001SThomas Moestl 		device_printf(sc->sc_dev, "cannot reset receiver\n");
76042c1b001SThomas Moestl 		return (1);
76142c1b001SThomas Moestl 	}
76242c1b001SThomas Moestl 	return (0);
76342c1b001SThomas Moestl }
76442c1b001SThomas Moestl 
76542c1b001SThomas Moestl 
76642c1b001SThomas Moestl /*
76742c1b001SThomas Moestl  * Reset the transmitter
76842c1b001SThomas Moestl  */
76942c1b001SThomas Moestl static int
77042c1b001SThomas Moestl gem_reset_tx(sc)
77142c1b001SThomas Moestl 	struct gem_softc *sc;
77242c1b001SThomas Moestl {
77342c1b001SThomas Moestl 	int i;
77442c1b001SThomas Moestl 
77542c1b001SThomas Moestl 	/*
77642c1b001SThomas Moestl 	 * Resetting while DMA is in progress can cause a bus hang, so we
77742c1b001SThomas Moestl 	 * disable DMA first.
77842c1b001SThomas Moestl 	 */
77942c1b001SThomas Moestl 	gem_disable_tx(sc);
780e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_TX_CONFIG, 0);
78142c1b001SThomas Moestl 	/* Wait till it finishes */
78242c1b001SThomas Moestl 	if (!gem_bitwait(sc, GEM_TX_CONFIG, 1, 0))
78342c1b001SThomas Moestl 		device_printf(sc->sc_dev, "cannot disable read dma\n");
78442c1b001SThomas Moestl 
78542c1b001SThomas Moestl 	/* Wait 5ms extra. */
78642c1b001SThomas Moestl 	DELAY(5000);
78742c1b001SThomas Moestl 
78842c1b001SThomas Moestl 	/* Finally, reset the ETX */
789e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_RESET, GEM_RESET_TX);
79042c1b001SThomas Moestl 	/* Wait till it finishes */
79142c1b001SThomas Moestl 	for (i = TRIES; i--; DELAY(100))
792e1bb13cdSPoul-Henning Kamp 		if ((bus_read_4(sc->sc_res[0], GEM_RESET) & GEM_RESET_TX) == 0)
79342c1b001SThomas Moestl 			break;
79442c1b001SThomas Moestl 	if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_TX, 0)) {
79542c1b001SThomas Moestl 		device_printf(sc->sc_dev, "cannot reset receiver\n");
79642c1b001SThomas Moestl 		return (1);
79742c1b001SThomas Moestl 	}
79842c1b001SThomas Moestl 	return (0);
79942c1b001SThomas Moestl }
80042c1b001SThomas Moestl 
80142c1b001SThomas Moestl /*
80242c1b001SThomas Moestl  * disable receiver.
80342c1b001SThomas Moestl  */
80442c1b001SThomas Moestl static int
80542c1b001SThomas Moestl gem_disable_rx(sc)
80642c1b001SThomas Moestl 	struct gem_softc *sc;
80742c1b001SThomas Moestl {
80842c1b001SThomas Moestl 	u_int32_t cfg;
80942c1b001SThomas Moestl 
81042c1b001SThomas Moestl 	/* Flip the enable bit */
811e1bb13cdSPoul-Henning Kamp 	cfg = bus_read_4(sc->sc_res[0], GEM_MAC_RX_CONFIG);
81242c1b001SThomas Moestl 	cfg &= ~GEM_MAC_RX_ENABLE;
813e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, cfg);
81442c1b001SThomas Moestl 
81542c1b001SThomas Moestl 	/* Wait for it to finish */
81642c1b001SThomas Moestl 	return (gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0));
81742c1b001SThomas Moestl }
81842c1b001SThomas Moestl 
81942c1b001SThomas Moestl /*
82042c1b001SThomas Moestl  * disable transmitter.
82142c1b001SThomas Moestl  */
82242c1b001SThomas Moestl static int
82342c1b001SThomas Moestl gem_disable_tx(sc)
82442c1b001SThomas Moestl 	struct gem_softc *sc;
82542c1b001SThomas Moestl {
82642c1b001SThomas Moestl 	u_int32_t cfg;
82742c1b001SThomas Moestl 
82842c1b001SThomas Moestl 	/* Flip the enable bit */
829e1bb13cdSPoul-Henning Kamp 	cfg = bus_read_4(sc->sc_res[0], GEM_MAC_TX_CONFIG);
83042c1b001SThomas Moestl 	cfg &= ~GEM_MAC_TX_ENABLE;
831e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_MAC_TX_CONFIG, cfg);
83242c1b001SThomas Moestl 
83342c1b001SThomas Moestl 	/* Wait for it to finish */
83442c1b001SThomas Moestl 	return (gem_bitwait(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0));
83542c1b001SThomas Moestl }
83642c1b001SThomas Moestl 
83742c1b001SThomas Moestl /*
83842c1b001SThomas Moestl  * Initialize interface.
83942c1b001SThomas Moestl  */
84042c1b001SThomas Moestl static int
84142c1b001SThomas Moestl gem_meminit(sc)
84242c1b001SThomas Moestl 	struct gem_softc *sc;
84342c1b001SThomas Moestl {
84442c1b001SThomas Moestl 	struct gem_rxsoft *rxs;
84542c1b001SThomas Moestl 	int i, error;
84642c1b001SThomas Moestl 
84742c1b001SThomas Moestl 	/*
84842c1b001SThomas Moestl 	 * Initialize the transmit descriptor ring.
84942c1b001SThomas Moestl 	 */
85042c1b001SThomas Moestl 	for (i = 0; i < GEM_NTXDESC; i++) {
85142c1b001SThomas Moestl 		sc->sc_txdescs[i].gd_flags = 0;
85242c1b001SThomas Moestl 		sc->sc_txdescs[i].gd_addr = 0;
85342c1b001SThomas Moestl 	}
854305f2c06SThomas Moestl 	sc->sc_txfree = GEM_MAXTXFREE;
85542c1b001SThomas Moestl 	sc->sc_txnext = 0;
856336cca9eSBenno Rice 	sc->sc_txwin = 0;
85742c1b001SThomas Moestl 
85842c1b001SThomas Moestl 	/*
85942c1b001SThomas Moestl 	 * Initialize the receive descriptor and receive job
86042c1b001SThomas Moestl 	 * descriptor rings.
86142c1b001SThomas Moestl 	 */
86242c1b001SThomas Moestl 	for (i = 0; i < GEM_NRXDESC; i++) {
86342c1b001SThomas Moestl 		rxs = &sc->sc_rxsoft[i];
86442c1b001SThomas Moestl 		if (rxs->rxs_mbuf == NULL) {
86542c1b001SThomas Moestl 			if ((error = gem_add_rxbuf(sc, i)) != 0) {
86642c1b001SThomas Moestl 				device_printf(sc->sc_dev, "unable to "
86742c1b001SThomas Moestl 				    "allocate or map rx buffer %d, error = "
86842c1b001SThomas Moestl 				    "%d\n", i, error);
86942c1b001SThomas Moestl 				/*
87042c1b001SThomas Moestl 				 * XXX Should attempt to run with fewer receive
87142c1b001SThomas Moestl 				 * XXX buffers instead of just failing.
87242c1b001SThomas Moestl 				 */
87342c1b001SThomas Moestl 				gem_rxdrain(sc);
87442c1b001SThomas Moestl 				return (1);
87542c1b001SThomas Moestl 			}
87642c1b001SThomas Moestl 		} else
87742c1b001SThomas Moestl 			GEM_INIT_RXDESC(sc, i);
87842c1b001SThomas Moestl 	}
87942c1b001SThomas Moestl 	sc->sc_rxptr = 0;
880b2d59f42SThomas Moestl 	GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE);
881b2d59f42SThomas Moestl 	GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD);
88242c1b001SThomas Moestl 
88342c1b001SThomas Moestl 	return (0);
88442c1b001SThomas Moestl }
88542c1b001SThomas Moestl 
88642c1b001SThomas Moestl static int
88742c1b001SThomas Moestl gem_ringsize(sz)
88842c1b001SThomas Moestl 	int sz;
88942c1b001SThomas Moestl {
89042c1b001SThomas Moestl 	int v = 0;
89142c1b001SThomas Moestl 
89242c1b001SThomas Moestl 	switch (sz) {
89342c1b001SThomas Moestl 	case 32:
89442c1b001SThomas Moestl 		v = GEM_RING_SZ_32;
89542c1b001SThomas Moestl 		break;
89642c1b001SThomas Moestl 	case 64:
89742c1b001SThomas Moestl 		v = GEM_RING_SZ_64;
89842c1b001SThomas Moestl 		break;
89942c1b001SThomas Moestl 	case 128:
90042c1b001SThomas Moestl 		v = GEM_RING_SZ_128;
90142c1b001SThomas Moestl 		break;
90242c1b001SThomas Moestl 	case 256:
90342c1b001SThomas Moestl 		v = GEM_RING_SZ_256;
90442c1b001SThomas Moestl 		break;
90542c1b001SThomas Moestl 	case 512:
90642c1b001SThomas Moestl 		v = GEM_RING_SZ_512;
90742c1b001SThomas Moestl 		break;
90842c1b001SThomas Moestl 	case 1024:
90942c1b001SThomas Moestl 		v = GEM_RING_SZ_1024;
91042c1b001SThomas Moestl 		break;
91142c1b001SThomas Moestl 	case 2048:
91242c1b001SThomas Moestl 		v = GEM_RING_SZ_2048;
91342c1b001SThomas Moestl 		break;
91442c1b001SThomas Moestl 	case 4096:
91542c1b001SThomas Moestl 		v = GEM_RING_SZ_4096;
91642c1b001SThomas Moestl 		break;
91742c1b001SThomas Moestl 	case 8192:
91842c1b001SThomas Moestl 		v = GEM_RING_SZ_8192;
91942c1b001SThomas Moestl 		break;
92042c1b001SThomas Moestl 	default:
92142c1b001SThomas Moestl 		printf("gem: invalid Receive Descriptor ring size\n");
92242c1b001SThomas Moestl 		break;
92342c1b001SThomas Moestl 	}
92442c1b001SThomas Moestl 	return (v);
92542c1b001SThomas Moestl }
92642c1b001SThomas Moestl 
92742c1b001SThomas Moestl static void
92842c1b001SThomas Moestl gem_init(xsc)
92942c1b001SThomas Moestl 	void *xsc;
93042c1b001SThomas Moestl {
93142c1b001SThomas Moestl 	struct gem_softc *sc = (struct gem_softc *)xsc;
9328cfaff7dSMarius Strobl 
9338cfaff7dSMarius Strobl 	GEM_LOCK(sc);
9348cfaff7dSMarius Strobl 	gem_init_locked(sc);
9358cfaff7dSMarius Strobl 	GEM_UNLOCK(sc);
9368cfaff7dSMarius Strobl }
9378cfaff7dSMarius Strobl 
9388cfaff7dSMarius Strobl /*
9398cfaff7dSMarius Strobl  * Initialization of interface; set up initialization block
9408cfaff7dSMarius Strobl  * and transmit/receive descriptor rings.
9418cfaff7dSMarius Strobl  */
9428cfaff7dSMarius Strobl static void
9438cfaff7dSMarius Strobl gem_init_locked(sc)
9448cfaff7dSMarius Strobl 	struct gem_softc *sc;
9458cfaff7dSMarius Strobl {
946fc74a9f9SBrooks Davis 	struct ifnet *ifp = sc->sc_ifp;
94742c1b001SThomas Moestl 	u_int32_t v;
94842c1b001SThomas Moestl 
9498cfaff7dSMarius Strobl 	GEM_LOCK_ASSERT(sc, MA_OWNED);
95042c1b001SThomas Moestl 
95118100346SThomas Moestl #ifdef GEM_DEBUG
95212fb0330SPyun YongHyeon 	CTR2(KTR_GEM, "%s: %s: calling stop", device_get_name(sc->sc_dev),
95312fb0330SPyun YongHyeon 	    __func__);
95418100346SThomas Moestl #endif
95542c1b001SThomas Moestl 	/*
95642c1b001SThomas Moestl 	 * Initialization sequence. The numbered steps below correspond
95742c1b001SThomas Moestl 	 * to the sequence outlined in section 6.3.5.1 in the Ethernet
95842c1b001SThomas Moestl 	 * Channel Engine manual (part of the PCIO manual).
95942c1b001SThomas Moestl 	 * See also the STP2002-STQ document from Sun Microsystems.
96042c1b001SThomas Moestl 	 */
96142c1b001SThomas Moestl 
96242c1b001SThomas Moestl 	/* step 1 & 2. Reset the Ethernet Channel */
963fc74a9f9SBrooks Davis 	gem_stop(sc->sc_ifp, 0);
96442c1b001SThomas Moestl 	gem_reset(sc);
96518100346SThomas Moestl #ifdef GEM_DEBUG
96612fb0330SPyun YongHyeon 	CTR2(KTR_GEM, "%s: %s: restarting", device_get_name(sc->sc_dev),
96712fb0330SPyun YongHyeon 	    __func__);
96818100346SThomas Moestl #endif
96942c1b001SThomas Moestl 
97042c1b001SThomas Moestl 	/* Re-initialize the MIF */
97142c1b001SThomas Moestl 	gem_mifinit(sc);
97242c1b001SThomas Moestl 
97342c1b001SThomas Moestl 	/* step 3. Setup data structures in host memory */
97442c1b001SThomas Moestl 	gem_meminit(sc);
97542c1b001SThomas Moestl 
97642c1b001SThomas Moestl 	/* step 4. TX MAC registers & counters */
97742c1b001SThomas Moestl 	gem_init_regs(sc);
97842c1b001SThomas Moestl 
97942c1b001SThomas Moestl 	/* step 5. RX MAC registers & counters */
98042c1b001SThomas Moestl 	gem_setladrf(sc);
98142c1b001SThomas Moestl 
98242c1b001SThomas Moestl 	/* step 6 & 7. Program Descriptor Ring Base Addresses */
98342c1b001SThomas Moestl 	/* NOTE: we use only 32-bit DMA addresses here. */
984e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_TX_RING_PTR_HI, 0);
985e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0));
98642c1b001SThomas Moestl 
987e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_RX_RING_PTR_HI, 0);
988e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0));
98918100346SThomas Moestl #ifdef GEM_DEBUG
99042c1b001SThomas Moestl 	CTR3(KTR_GEM, "loading rx ring %lx, tx ring %lx, cddma %lx",
99142c1b001SThomas Moestl 	    GEM_CDRXADDR(sc, 0), GEM_CDTXADDR(sc, 0), sc->sc_cddma);
99218100346SThomas Moestl #endif
99342c1b001SThomas Moestl 
99442c1b001SThomas Moestl 	/* step 8. Global Configuration & Interrupt Mask */
995e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_INTMASK,
99642c1b001SThomas Moestl 		      ~(GEM_INTR_TX_INTME|
99742c1b001SThomas Moestl 			GEM_INTR_TX_EMPTY|
99842c1b001SThomas Moestl 			GEM_INTR_RX_DONE|GEM_INTR_RX_NOBUF|
99942c1b001SThomas Moestl 			GEM_INTR_RX_TAG_ERR|GEM_INTR_PCS|
100042c1b001SThomas Moestl 			GEM_INTR_MAC_CONTROL|GEM_INTR_MIF|
100142c1b001SThomas Moestl 			GEM_INTR_BERR));
1002e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_MAC_RX_MASK,
1003336cca9eSBenno Rice 			GEM_MAC_RX_DONE|GEM_MAC_RX_FRAME_CNT);
1004e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_MAC_TX_MASK, 0xffff); /* XXXX */
1005e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_MAC_CONTROL_MASK, 0); /* XXXX */
100642c1b001SThomas Moestl 
100742c1b001SThomas Moestl 	/* step 9. ETX Configuration: use mostly default values */
100842c1b001SThomas Moestl 
100942c1b001SThomas Moestl 	/* Enable DMA */
101042c1b001SThomas Moestl 	v = gem_ringsize(GEM_NTXDESC /*XXX*/);
1011e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_TX_CONFIG,
101242c1b001SThomas Moestl 		v|GEM_TX_CONFIG_TXDMA_EN|
101342c1b001SThomas Moestl 		((0x400<<10)&GEM_TX_CONFIG_TXFIFO_TH));
101442c1b001SThomas Moestl 
101542c1b001SThomas Moestl 	/* step 10. ERX Configuration */
101642c1b001SThomas Moestl 
101742c1b001SThomas Moestl 	/* Encode Receive Descriptor ring size: four possible values */
101842c1b001SThomas Moestl 	v = gem_ringsize(GEM_NRXDESC /*XXX*/);
101912fb0330SPyun YongHyeon 	/* Rx TCP/UDP checksum offset */
102012fb0330SPyun YongHyeon 	v |= ((ETHER_HDR_LEN + sizeof(struct ip)) <<
102112fb0330SPyun YongHyeon 	    GEM_RX_CONFIG_CXM_START_SHFT);
102242c1b001SThomas Moestl 
102342c1b001SThomas Moestl 	/* Enable DMA */
1024e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_RX_CONFIG,
102542c1b001SThomas Moestl 		v|(GEM_THRSH_1024<<GEM_RX_CONFIG_FIFO_THRS_SHIFT)|
102612fb0330SPyun YongHyeon 		(2<<GEM_RX_CONFIG_FBOFF_SHFT)|GEM_RX_CONFIG_RXDMA_EN);
102742c1b001SThomas Moestl 	/*
1028336cca9eSBenno Rice 	 * The following value is for an OFF Threshold of about 3/4 full
1029336cca9eSBenno Rice 	 * and an ON Threshold of 1/4 full.
103042c1b001SThomas Moestl 	 */
1031e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_RX_PAUSE_THRESH,
1032336cca9eSBenno Rice 	    (3 * sc->sc_rxfifosize / 256) |
1033336cca9eSBenno Rice 	    (   (sc->sc_rxfifosize / 256) << 12));
1034e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_RX_BLANKING, (6<<12)|6);
103542c1b001SThomas Moestl 
103642c1b001SThomas Moestl 	/* step 11. Configure Media */
1037336cca9eSBenno Rice 	mii_mediachg(sc->sc_mii);
103842c1b001SThomas Moestl 
103942c1b001SThomas Moestl 	/* step 12. RX_MAC Configuration Register */
1040e1bb13cdSPoul-Henning Kamp 	v = bus_read_4(sc->sc_res[0], GEM_MAC_RX_CONFIG);
104112fb0330SPyun YongHyeon 	v |= GEM_MAC_RX_ENABLE | GEM_MAC_RX_STRIP_CRC;
1042e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, v);
104342c1b001SThomas Moestl 
104442c1b001SThomas Moestl 	/* step 14. Issue Transmit Pending command */
104542c1b001SThomas Moestl 
104642c1b001SThomas Moestl 	/* step 15.  Give the reciever a swift kick */
1047e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_RX_KICK, GEM_NRXDESC-4);
104842c1b001SThomas Moestl 
104942c1b001SThomas Moestl 	/* Start the one second timer. */
10508cb37876SMarius Strobl 	sc->sc_wdog_timer = 0;
105142c1b001SThomas Moestl 	callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc);
105242c1b001SThomas Moestl 
105313f4c340SRobert Watson 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
105413f4c340SRobert Watson 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1055336cca9eSBenno Rice 	sc->sc_ifflags = ifp->if_flags;
105642c1b001SThomas Moestl }
105742c1b001SThomas Moestl 
105812fb0330SPyun YongHyeon /*
105912fb0330SPyun YongHyeon  * It's copy of ath_defrag(ath(4)).
106012fb0330SPyun YongHyeon  *
106112fb0330SPyun YongHyeon  * Defragment an mbuf chain, returning at most maxfrags separate
106212fb0330SPyun YongHyeon  * mbufs+clusters.  If this is not possible NULL is returned and
106312fb0330SPyun YongHyeon  * the original mbuf chain is left in it's present (potentially
106412fb0330SPyun YongHyeon  * modified) state.  We use two techniques: collapsing consecutive
106512fb0330SPyun YongHyeon  * mbufs and replacing consecutive mbufs by a cluster.
106612fb0330SPyun YongHyeon  */
106712fb0330SPyun YongHyeon static struct mbuf *
106812fb0330SPyun YongHyeon gem_defrag(m0, how, maxfrags)
106942c1b001SThomas Moestl 	struct mbuf *m0;
107012fb0330SPyun YongHyeon 	int how;
107112fb0330SPyun YongHyeon 	int maxfrags;
107242c1b001SThomas Moestl {
107312fb0330SPyun YongHyeon 	struct mbuf *m, *n, *n2, **prev;
107412fb0330SPyun YongHyeon 	u_int curfrags;
107512fb0330SPyun YongHyeon 
107612fb0330SPyun YongHyeon 	/*
107712fb0330SPyun YongHyeon 	 * Calculate the current number of frags.
107812fb0330SPyun YongHyeon 	 */
107912fb0330SPyun YongHyeon 	curfrags = 0;
108012fb0330SPyun YongHyeon 	for (m = m0; m != NULL; m = m->m_next)
108112fb0330SPyun YongHyeon 		curfrags++;
108212fb0330SPyun YongHyeon 	/*
108312fb0330SPyun YongHyeon 	 * First, try to collapse mbufs.  Note that we always collapse
108412fb0330SPyun YongHyeon 	 * towards the front so we don't need to deal with moving the
108512fb0330SPyun YongHyeon 	 * pkthdr.  This may be suboptimal if the first mbuf has much
108612fb0330SPyun YongHyeon 	 * less data than the following.
108712fb0330SPyun YongHyeon 	 */
108812fb0330SPyun YongHyeon 	m = m0;
108912fb0330SPyun YongHyeon again:
109012fb0330SPyun YongHyeon 	for (;;) {
109112fb0330SPyun YongHyeon 		n = m->m_next;
109212fb0330SPyun YongHyeon 		if (n == NULL)
109312fb0330SPyun YongHyeon 			break;
109412fb0330SPyun YongHyeon 		if ((m->m_flags & M_RDONLY) == 0 &&
109512fb0330SPyun YongHyeon 		    n->m_len < M_TRAILINGSPACE(m)) {
109612fb0330SPyun YongHyeon 			bcopy(mtod(n, void *), mtod(m, char *) + m->m_len,
109712fb0330SPyun YongHyeon 				n->m_len);
109812fb0330SPyun YongHyeon 			m->m_len += n->m_len;
109912fb0330SPyun YongHyeon 			m->m_next = n->m_next;
110012fb0330SPyun YongHyeon 			m_free(n);
110112fb0330SPyun YongHyeon 			if (--curfrags <= maxfrags)
110212fb0330SPyun YongHyeon 				return (m0);
110312fb0330SPyun YongHyeon 		} else
110412fb0330SPyun YongHyeon 			m = n;
110512fb0330SPyun YongHyeon 	}
110612fb0330SPyun YongHyeon 	KASSERT(maxfrags > 1,
110712fb0330SPyun YongHyeon 		("maxfrags %u, but normal collapse failed", maxfrags));
110812fb0330SPyun YongHyeon 	/*
110912fb0330SPyun YongHyeon 	 * Collapse consecutive mbufs to a cluster.
111012fb0330SPyun YongHyeon 	 */
111112fb0330SPyun YongHyeon 	prev = &m0->m_next;		/* NB: not the first mbuf */
111212fb0330SPyun YongHyeon 	while ((n = *prev) != NULL) {
111312fb0330SPyun YongHyeon 		if ((n2 = n->m_next) != NULL &&
111412fb0330SPyun YongHyeon 		    n->m_len + n2->m_len < MCLBYTES) {
111512fb0330SPyun YongHyeon 			m = m_getcl(how, MT_DATA, 0);
111612fb0330SPyun YongHyeon 			if (m == NULL)
111712fb0330SPyun YongHyeon 				goto bad;
111812fb0330SPyun YongHyeon 			bcopy(mtod(n, void *), mtod(m, void *), n->m_len);
111912fb0330SPyun YongHyeon 			bcopy(mtod(n2, void *), mtod(m, char *) + n->m_len,
112012fb0330SPyun YongHyeon 				n2->m_len);
112112fb0330SPyun YongHyeon 			m->m_len = n->m_len + n2->m_len;
112212fb0330SPyun YongHyeon 			m->m_next = n2->m_next;
112312fb0330SPyun YongHyeon 			*prev = m;
112412fb0330SPyun YongHyeon 			m_free(n);
112512fb0330SPyun YongHyeon 			m_free(n2);
112612fb0330SPyun YongHyeon 			if (--curfrags <= maxfrags)	/* +1 cl -2 mbufs */
112712fb0330SPyun YongHyeon 				return m0;
112812fb0330SPyun YongHyeon 			/*
112912fb0330SPyun YongHyeon 			 * Still not there, try the normal collapse
113012fb0330SPyun YongHyeon 			 * again before we allocate another cluster.
113112fb0330SPyun YongHyeon 			 */
113212fb0330SPyun YongHyeon 			goto again;
113312fb0330SPyun YongHyeon 		}
113412fb0330SPyun YongHyeon 		prev = &n->m_next;
113512fb0330SPyun YongHyeon 	}
113612fb0330SPyun YongHyeon 	/*
113712fb0330SPyun YongHyeon 	 * No place where we can collapse to a cluster; punt.
113812fb0330SPyun YongHyeon 	 * This can occur if, for example, you request 2 frags
113912fb0330SPyun YongHyeon 	 * but the packet requires that both be clusters (we
114012fb0330SPyun YongHyeon 	 * never reallocate the first mbuf to avoid moving the
114112fb0330SPyun YongHyeon 	 * packet header).
114212fb0330SPyun YongHyeon 	 */
114312fb0330SPyun YongHyeon bad:
114412fb0330SPyun YongHyeon 	return (NULL);
114512fb0330SPyun YongHyeon }
114612fb0330SPyun YongHyeon 
114712fb0330SPyun YongHyeon static int
114812fb0330SPyun YongHyeon gem_load_txmbuf(sc, m_head)
114912fb0330SPyun YongHyeon 	struct gem_softc *sc;
115012fb0330SPyun YongHyeon 	struct mbuf **m_head;
115112fb0330SPyun YongHyeon {
115242c1b001SThomas Moestl 	struct gem_txsoft *txs;
115312fb0330SPyun YongHyeon 	bus_dma_segment_t txsegs[GEM_NTXSEGS];
115412fb0330SPyun YongHyeon 	struct mbuf *m;
115512fb0330SPyun YongHyeon 	uint64_t flags, cflags;
115612fb0330SPyun YongHyeon 	int error, nexttx, nsegs, seg;
115742c1b001SThomas Moestl 
115842c1b001SThomas Moestl 	/* Get a work queue entry. */
115942c1b001SThomas Moestl 	if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) {
1160305f2c06SThomas Moestl 		/* Ran out of descriptors. */
116112fb0330SPyun YongHyeon 		return (ENOBUFS);
1162305f2c06SThomas Moestl 	}
116312fb0330SPyun YongHyeon 	error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, txs->txs_dmamap,
116412fb0330SPyun YongHyeon 	    *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
116512fb0330SPyun YongHyeon 	if (error == EFBIG) {
116612fb0330SPyun YongHyeon 		m = gem_defrag(*m_head, M_DONTWAIT, GEM_NTXSEGS);
116712fb0330SPyun YongHyeon 		if (m == NULL) {
116812fb0330SPyun YongHyeon 			m_freem(*m_head);
116912fb0330SPyun YongHyeon 			*m_head = NULL;
117012fb0330SPyun YongHyeon 			return (ENOBUFS);
117112fb0330SPyun YongHyeon 		}
117212fb0330SPyun YongHyeon 		*m_head = m;
117312fb0330SPyun YongHyeon 		error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, txs->txs_dmamap,
117412fb0330SPyun YongHyeon 		    *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
117512fb0330SPyun YongHyeon 		if (error != 0) {
117612fb0330SPyun YongHyeon 			m_freem(*m_head);
117712fb0330SPyun YongHyeon 			*m_head = NULL;
117812fb0330SPyun YongHyeon 			return (error);
117912fb0330SPyun YongHyeon 		}
118012fb0330SPyun YongHyeon 	} else if (error != 0)
118112fb0330SPyun YongHyeon 		return (error);
118212fb0330SPyun YongHyeon 	if (nsegs == 0) {
118312fb0330SPyun YongHyeon 		m_freem(*m_head);
118412fb0330SPyun YongHyeon 		*m_head = NULL;
118512fb0330SPyun YongHyeon 		return (EIO);
118612fb0330SPyun YongHyeon 	}
118712fb0330SPyun YongHyeon 
118812fb0330SPyun YongHyeon 	/*
118912fb0330SPyun YongHyeon 	 * Ensure we have enough descriptors free to describe
119012fb0330SPyun YongHyeon 	 * the packet.  Note, we always reserve one descriptor
119112fb0330SPyun YongHyeon 	 * at the end of the ring as a termination point, to
119212fb0330SPyun YongHyeon 	 * prevent wrap-around.
119312fb0330SPyun YongHyeon 	 */
119412fb0330SPyun YongHyeon 	if (nsegs > sc->sc_txfree - 1) {
119512fb0330SPyun YongHyeon 		txs->txs_ndescs = 0;
119612fb0330SPyun YongHyeon 		bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap);
119712fb0330SPyun YongHyeon 		return (ENOBUFS);
119812fb0330SPyun YongHyeon 	}
119912fb0330SPyun YongHyeon 
120012fb0330SPyun YongHyeon 	flags = cflags = 0;
120112fb0330SPyun YongHyeon 	if (((*m_head)->m_pkthdr.csum_flags & sc->sc_csum_features) != 0)
120212fb0330SPyun YongHyeon 		gem_txcksum(sc, *m_head, &cflags);
120312fb0330SPyun YongHyeon 
120412fb0330SPyun YongHyeon 	txs->txs_ndescs = nsegs;
1205305f2c06SThomas Moestl 	txs->txs_firstdesc = sc->sc_txnext;
120612fb0330SPyun YongHyeon 	nexttx = txs->txs_firstdesc;
120712fb0330SPyun YongHyeon 	for (seg = 0; seg < nsegs; seg++, nexttx = GEM_NEXTTX(nexttx)) {
120812fb0330SPyun YongHyeon #ifdef	GEM_DEBUG
120912fb0330SPyun YongHyeon 		CTR6(KTR_GEM, "%s: mapping seg %d (txd %d), len "
121012fb0330SPyun YongHyeon 		    "%lx, addr %#lx (%#lx)", __func__, seg, nexttx,
121112fb0330SPyun YongHyeon 		    txsegs[seg].ds_len, txsegs[seg].ds_addr,
121212fb0330SPyun YongHyeon 		    GEM_DMA_WRITE(sc, txsegs[seg].ds_addr));
121312fb0330SPyun YongHyeon #endif
121412fb0330SPyun YongHyeon 		sc->sc_txdescs[nexttx].gd_addr =
121512fb0330SPyun YongHyeon 		    GEM_DMA_WRITE(sc, txsegs[seg].ds_addr);
121612fb0330SPyun YongHyeon 		KASSERT(txsegs[seg].ds_len < GEM_TD_BUFSIZE,
121712fb0330SPyun YongHyeon 		    ("%s: segment size too large!", __func__));
121812fb0330SPyun YongHyeon 		flags = txsegs[seg].ds_len & GEM_TD_BUFSIZE;
121912fb0330SPyun YongHyeon 		sc->sc_txdescs[nexttx].gd_flags =
122012fb0330SPyun YongHyeon 		    GEM_DMA_WRITE(sc, flags | cflags);
122112fb0330SPyun YongHyeon 		txs->txs_lastdesc = nexttx;
122242c1b001SThomas Moestl 	}
1223305f2c06SThomas Moestl 
122412fb0330SPyun YongHyeon 	/* set EOP on the last descriptor */
122512fb0330SPyun YongHyeon #ifdef	GEM_DEBUG
122612fb0330SPyun YongHyeon 	CTR3(KTR_GEM, "%s: end of packet at seg %d, tx %d", __func__, seg,
122712fb0330SPyun YongHyeon 	    nexttx);
122812fb0330SPyun YongHyeon #endif
122912fb0330SPyun YongHyeon 	sc->sc_txdescs[txs->txs_lastdesc].gd_flags |=
123012fb0330SPyun YongHyeon 	    GEM_DMA_WRITE(sc, GEM_TD_END_OF_PACKET);
123112fb0330SPyun YongHyeon 
123212fb0330SPyun YongHyeon 	/* Lastly set SOP on the first descriptor */
123312fb0330SPyun YongHyeon #ifdef	GEM_DEBUG
123412fb0330SPyun YongHyeon 	CTR3(KTR_GEM, "%s: start of packet at seg %d, tx %d", __func__, seg,
123512fb0330SPyun YongHyeon 	    nexttx);
123612fb0330SPyun YongHyeon #endif
123712fb0330SPyun YongHyeon 	if (++sc->sc_txwin > GEM_NTXSEGS * 2 / 3) {
123812fb0330SPyun YongHyeon 		sc->sc_txwin = 0;
123912fb0330SPyun YongHyeon 		flags |= GEM_TD_INTERRUPT_ME;
124012fb0330SPyun YongHyeon 		sc->sc_txdescs[txs->txs_firstdesc].gd_flags |=
124112fb0330SPyun YongHyeon 		    GEM_DMA_WRITE(sc, GEM_TD_INTERRUPT_ME |
124212fb0330SPyun YongHyeon 		    GEM_TD_START_OF_PACKET);
124312fb0330SPyun YongHyeon 	} else
124412fb0330SPyun YongHyeon 		sc->sc_txdescs[txs->txs_firstdesc].gd_flags |=
124512fb0330SPyun YongHyeon 		    GEM_DMA_WRITE(sc, GEM_TD_START_OF_PACKET);
124612fb0330SPyun YongHyeon 
124742c1b001SThomas Moestl 	/* Sync the DMA map. */
124812fb0330SPyun YongHyeon 	bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, BUS_DMASYNC_PREWRITE);
1249305f2c06SThomas Moestl 
125018100346SThomas Moestl #ifdef GEM_DEBUG
125112fb0330SPyun YongHyeon 	CTR4(KTR_GEM, "%s: setting firstdesc=%d, lastdesc=%d, ndescs=%d",
125212fb0330SPyun YongHyeon 	    __func__, txs->txs_firstdesc, txs->txs_lastdesc, txs->txs_ndescs);
125318100346SThomas Moestl #endif
125442c1b001SThomas Moestl 	STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q);
1255305f2c06SThomas Moestl 	STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q);
125612fb0330SPyun YongHyeon 	txs->txs_mbuf = *m_head;
1257305f2c06SThomas Moestl 
1258305f2c06SThomas Moestl 	sc->sc_txnext = GEM_NEXTTX(txs->txs_lastdesc);
1259305f2c06SThomas Moestl 	sc->sc_txfree -= txs->txs_ndescs;
126042c1b001SThomas Moestl 
126112fb0330SPyun YongHyeon 	return (0);
126242c1b001SThomas Moestl }
126342c1b001SThomas Moestl 
126442c1b001SThomas Moestl static void
126542c1b001SThomas Moestl gem_init_regs(sc)
126642c1b001SThomas Moestl 	struct gem_softc *sc;
126742c1b001SThomas Moestl {
12684a0d6638SRuslan Ermilov 	const u_char *laddr = IF_LLADDR(sc->sc_ifp);
1269336cca9eSBenno Rice 	u_int32_t v;
127042c1b001SThomas Moestl 
127142c1b001SThomas Moestl 	/* These regs are not cleared on reset */
127242c1b001SThomas Moestl 	if (!sc->sc_inited) {
127342c1b001SThomas Moestl 
127442c1b001SThomas Moestl 		/* Wooo.  Magic values. */
1275e1bb13cdSPoul-Henning Kamp 		bus_write_4(sc->sc_res[0], GEM_MAC_IPG0, 0);
1276e1bb13cdSPoul-Henning Kamp 		bus_write_4(sc->sc_res[0], GEM_MAC_IPG1, 8);
1277e1bb13cdSPoul-Henning Kamp 		bus_write_4(sc->sc_res[0], GEM_MAC_IPG2, 4);
127842c1b001SThomas Moestl 
1279e1bb13cdSPoul-Henning Kamp 		bus_write_4(sc->sc_res[0], GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN);
128042c1b001SThomas Moestl 		/* Max frame and max burst size */
1281e1bb13cdSPoul-Henning Kamp 		bus_write_4(sc->sc_res[0], GEM_MAC_MAC_MAX_FRAME,
128200d12766SMarius Strobl 		    (ETHER_MAX_LEN + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN) |
128300d12766SMarius Strobl 		    (0x2000 << 16));
1284336cca9eSBenno Rice 
1285e1bb13cdSPoul-Henning Kamp 		bus_write_4(sc->sc_res[0], GEM_MAC_PREAMBLE_LEN, 0x7);
1286e1bb13cdSPoul-Henning Kamp 		bus_write_4(sc->sc_res[0], GEM_MAC_JAM_SIZE, 0x4);
1287e1bb13cdSPoul-Henning Kamp 		bus_write_4(sc->sc_res[0], GEM_MAC_ATTEMPT_LIMIT, 0x10);
128842c1b001SThomas Moestl 		/* Dunno.... */
1289e1bb13cdSPoul-Henning Kamp 		bus_write_4(sc->sc_res[0], GEM_MAC_CONTROL_TYPE, 0x8088);
1290e1bb13cdSPoul-Henning Kamp 		bus_write_4(sc->sc_res[0], GEM_MAC_RANDOM_SEED,
1291336cca9eSBenno Rice 		    ((laddr[5]<<8)|laddr[4])&0x3ff);
1292336cca9eSBenno Rice 
129342c1b001SThomas Moestl 		/* Secondary MAC addr set to 0:0:0:0:0:0 */
1294e1bb13cdSPoul-Henning Kamp 		bus_write_4(sc->sc_res[0], GEM_MAC_ADDR3, 0);
1295e1bb13cdSPoul-Henning Kamp 		bus_write_4(sc->sc_res[0], GEM_MAC_ADDR4, 0);
1296e1bb13cdSPoul-Henning Kamp 		bus_write_4(sc->sc_res[0], GEM_MAC_ADDR5, 0);
1297336cca9eSBenno Rice 
1298336cca9eSBenno Rice 		/* MAC control addr set to 01:80:c2:00:00:01 */
1299e1bb13cdSPoul-Henning Kamp 		bus_write_4(sc->sc_res[0], GEM_MAC_ADDR6, 0x0001);
1300e1bb13cdSPoul-Henning Kamp 		bus_write_4(sc->sc_res[0], GEM_MAC_ADDR7, 0xc200);
1301e1bb13cdSPoul-Henning Kamp 		bus_write_4(sc->sc_res[0], GEM_MAC_ADDR8, 0x0180);
130242c1b001SThomas Moestl 
130342c1b001SThomas Moestl 		/* MAC filter addr set to 0:0:0:0:0:0 */
1304e1bb13cdSPoul-Henning Kamp 		bus_write_4(sc->sc_res[0], GEM_MAC_ADDR_FILTER0, 0);
1305e1bb13cdSPoul-Henning Kamp 		bus_write_4(sc->sc_res[0], GEM_MAC_ADDR_FILTER1, 0);
1306e1bb13cdSPoul-Henning Kamp 		bus_write_4(sc->sc_res[0], GEM_MAC_ADDR_FILTER2, 0);
130742c1b001SThomas Moestl 
1308e1bb13cdSPoul-Henning Kamp 		bus_write_4(sc->sc_res[0], GEM_MAC_ADR_FLT_MASK1_2, 0);
1309e1bb13cdSPoul-Henning Kamp 		bus_write_4(sc->sc_res[0], GEM_MAC_ADR_FLT_MASK0, 0);
131042c1b001SThomas Moestl 
131142c1b001SThomas Moestl 		sc->sc_inited = 1;
131242c1b001SThomas Moestl 	}
131342c1b001SThomas Moestl 
131442c1b001SThomas Moestl 	/* Counters need to be zeroed */
1315e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_MAC_NORM_COLL_CNT, 0);
1316e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_MAC_FIRST_COLL_CNT, 0);
1317e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_MAC_EXCESS_COLL_CNT, 0);
1318e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_MAC_LATE_COLL_CNT, 0);
1319e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_MAC_DEFER_TMR_CNT, 0);
1320e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_MAC_PEAK_ATTEMPTS, 0);
1321e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_MAC_RX_FRAME_COUNT, 0);
1322e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_MAC_RX_LEN_ERR_CNT, 0);
1323e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_MAC_RX_ALIGN_ERR, 0);
1324e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_MAC_RX_CRC_ERR_CNT, 0);
1325e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_MAC_RX_CODE_VIOL, 0);
132642c1b001SThomas Moestl 
132742c1b001SThomas Moestl 	/* Un-pause stuff */
132842c1b001SThomas Moestl #if 0
1329e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_MAC_SEND_PAUSE_CMD, 0x1BF0);
133042c1b001SThomas Moestl #else
1331e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_MAC_SEND_PAUSE_CMD, 0);
133242c1b001SThomas Moestl #endif
133342c1b001SThomas Moestl 
133442c1b001SThomas Moestl 	/*
133542c1b001SThomas Moestl 	 * Set the station address.
133642c1b001SThomas Moestl 	 */
1337e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_MAC_ADDR0, (laddr[4]<<8)|laddr[5]);
1338e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_MAC_ADDR1, (laddr[2]<<8)|laddr[3]);
1339e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_MAC_ADDR2, (laddr[0]<<8)|laddr[1]);
1340336cca9eSBenno Rice 
1341336cca9eSBenno Rice 	/*
1342336cca9eSBenno Rice 	 * Enable MII outputs.  Enable GMII if there is a gigabit PHY.
1343336cca9eSBenno Rice 	 */
1344e1bb13cdSPoul-Henning Kamp 	sc->sc_mif_config = bus_read_4(sc->sc_res[0], GEM_MIF_CONFIG);
1345336cca9eSBenno Rice 	v = GEM_MAC_XIF_TX_MII_ENA;
1346336cca9eSBenno Rice 	if (sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) {
1347336cca9eSBenno Rice 		v |= GEM_MAC_XIF_FDPLX_LED;
1348336cca9eSBenno Rice 		if (sc->sc_flags & GEM_GIGABIT)
1349336cca9eSBenno Rice 			v |= GEM_MAC_XIF_GMII_MODE;
1350336cca9eSBenno Rice 	}
1351e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_MAC_XIF_CONFIG, v);
135242c1b001SThomas Moestl }
135342c1b001SThomas Moestl 
135442c1b001SThomas Moestl static void
135542c1b001SThomas Moestl gem_start(ifp)
135642c1b001SThomas Moestl 	struct ifnet *ifp;
135742c1b001SThomas Moestl {
135842c1b001SThomas Moestl 	struct gem_softc *sc = (struct gem_softc *)ifp->if_softc;
13598cfaff7dSMarius Strobl 
13608cfaff7dSMarius Strobl 	GEM_LOCK(sc);
13618cfaff7dSMarius Strobl 	gem_start_locked(ifp);
13628cfaff7dSMarius Strobl 	GEM_UNLOCK(sc);
13638cfaff7dSMarius Strobl }
13648cfaff7dSMarius Strobl 
13658cfaff7dSMarius Strobl static void
13668cfaff7dSMarius Strobl gem_start_locked(ifp)
13678cfaff7dSMarius Strobl 	struct ifnet *ifp;
13688cfaff7dSMarius Strobl {
13698cfaff7dSMarius Strobl 	struct gem_softc *sc = (struct gem_softc *)ifp->if_softc;
137012fb0330SPyun YongHyeon 	struct mbuf *m;
137112fb0330SPyun YongHyeon 	int firsttx, ntx = 0, txmfail;
137242c1b001SThomas Moestl 
137313f4c340SRobert Watson 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
137413f4c340SRobert Watson 	    IFF_DRV_RUNNING)
137542c1b001SThomas Moestl 		return;
137642c1b001SThomas Moestl 
137742c1b001SThomas Moestl 	firsttx = sc->sc_txnext;
137818100346SThomas Moestl #ifdef GEM_DEBUG
137912fb0330SPyun YongHyeon 	CTR4(KTR_GEM, "%s: %s: txfree %d, txnext %d",
138012fb0330SPyun YongHyeon 	    device_get_name(sc->sc_dev), __func__, sc->sc_txfree, firsttx);
138118100346SThomas Moestl #endif
138212fb0330SPyun YongHyeon 	for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && sc->sc_txfree > 1;) {
138312fb0330SPyun YongHyeon 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
138412fb0330SPyun YongHyeon 		if (m == NULL)
138542c1b001SThomas Moestl 			break;
138612fb0330SPyun YongHyeon 		txmfail = gem_load_txmbuf(sc, &m);
138712fb0330SPyun YongHyeon 		if (txmfail != 0) {
138812fb0330SPyun YongHyeon 			if (m == NULL)
138912fb0330SPyun YongHyeon 				break;
139012fb0330SPyun YongHyeon 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
139112fb0330SPyun YongHyeon 			IFQ_DRV_PREPEND(&ifp->if_snd, m);
139242c1b001SThomas Moestl 			break;
139342c1b001SThomas Moestl 		}
139418100346SThomas Moestl 		ntx++;
1395305f2c06SThomas Moestl 		/* Kick the transmitter. */
139618100346SThomas Moestl #ifdef	GEM_DEBUG
139712fb0330SPyun YongHyeon 		CTR3(KTR_GEM, "%s: %s: kicking tx %d",
139812fb0330SPyun YongHyeon 		    device_get_name(sc->sc_dev), __func__, sc->sc_txnext);
139918100346SThomas Moestl #endif
1400e1bb13cdSPoul-Henning Kamp 		bus_write_4(sc->sc_res[0], GEM_TX_KICK,
140142c1b001SThomas Moestl 			sc->sc_txnext);
140242c1b001SThomas Moestl 
140312fb0330SPyun YongHyeon 		BPF_MTAP(ifp, m);
1404305f2c06SThomas Moestl 	}
1405305f2c06SThomas Moestl 
1406305f2c06SThomas Moestl 	if (ntx > 0) {
1407b2d59f42SThomas Moestl 		GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE);
1408b2d59f42SThomas Moestl 
140918100346SThomas Moestl #ifdef GEM_DEBUG
1410305f2c06SThomas Moestl 		CTR2(KTR_GEM, "%s: packets enqueued, OWN on %d",
1411305f2c06SThomas Moestl 		    device_get_name(sc->sc_dev), firsttx);
141218100346SThomas Moestl #endif
1413305f2c06SThomas Moestl 
141442c1b001SThomas Moestl 		/* Set a watchdog timer in case the chip flakes out. */
14158cb37876SMarius Strobl 		sc->sc_wdog_timer = 5;
141618100346SThomas Moestl #ifdef GEM_DEBUG
141712fb0330SPyun YongHyeon 		CTR3(KTR_GEM, "%s: %s: watchdog %d",
141812fb0330SPyun YongHyeon 		    device_get_name(sc->sc_dev), __func__, sc->sc_wdog_timer);
141918100346SThomas Moestl #endif
142042c1b001SThomas Moestl 	}
142142c1b001SThomas Moestl }
142242c1b001SThomas Moestl 
142342c1b001SThomas Moestl /*
142442c1b001SThomas Moestl  * Transmit interrupt.
142542c1b001SThomas Moestl  */
142642c1b001SThomas Moestl static void
142742c1b001SThomas Moestl gem_tint(sc)
142842c1b001SThomas Moestl 	struct gem_softc *sc;
142942c1b001SThomas Moestl {
1430fc74a9f9SBrooks Davis 	struct ifnet *ifp = sc->sc_ifp;
143142c1b001SThomas Moestl 	struct gem_txsoft *txs;
143242c1b001SThomas Moestl 	int txlast;
1433336cca9eSBenno Rice 	int progress = 0;
143442c1b001SThomas Moestl 
143542c1b001SThomas Moestl 
143618100346SThomas Moestl #ifdef GEM_DEBUG
143712fb0330SPyun YongHyeon 	CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__);
143818100346SThomas Moestl #endif
143942c1b001SThomas Moestl 
144042c1b001SThomas Moestl 	/*
144142c1b001SThomas Moestl 	 * Go through our Tx list and free mbufs for those
144242c1b001SThomas Moestl 	 * frames that have been transmitted.
144342c1b001SThomas Moestl 	 */
1444b2d59f42SThomas Moestl 	GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD);
144542c1b001SThomas Moestl 	while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
144642c1b001SThomas Moestl 
144742c1b001SThomas Moestl #ifdef GEM_DEBUG
144842c1b001SThomas Moestl 		if (ifp->if_flags & IFF_DEBUG) {
144942c1b001SThomas Moestl 			int i;
145042c1b001SThomas Moestl 			printf("    txsoft %p transmit chain:\n", txs);
145142c1b001SThomas Moestl 			for (i = txs->txs_firstdesc;; i = GEM_NEXTTX(i)) {
145242c1b001SThomas Moestl 				printf("descriptor %d: ", i);
145342c1b001SThomas Moestl 				printf("gd_flags: 0x%016llx\t", (long long)
145442c1b001SThomas Moestl 					GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_flags));
145542c1b001SThomas Moestl 				printf("gd_addr: 0x%016llx\n", (long long)
145642c1b001SThomas Moestl 					GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_addr));
145742c1b001SThomas Moestl 				if (i == txs->txs_lastdesc)
145842c1b001SThomas Moestl 					break;
145942c1b001SThomas Moestl 			}
146042c1b001SThomas Moestl 		}
146142c1b001SThomas Moestl #endif
146242c1b001SThomas Moestl 
146342c1b001SThomas Moestl 		/*
146442c1b001SThomas Moestl 		 * In theory, we could harveast some descriptors before
146542c1b001SThomas Moestl 		 * the ring is empty, but that's a bit complicated.
146642c1b001SThomas Moestl 		 *
146742c1b001SThomas Moestl 		 * GEM_TX_COMPLETION points to the last descriptor
146842c1b001SThomas Moestl 		 * processed +1.
146942c1b001SThomas Moestl 		 */
1470e1bb13cdSPoul-Henning Kamp 		txlast = bus_read_4(sc->sc_res[0], GEM_TX_COMPLETION);
147118100346SThomas Moestl #ifdef GEM_DEBUG
147212fb0330SPyun YongHyeon 		CTR4(KTR_GEM, "%s: txs->txs_firstdesc = %d, "
147342c1b001SThomas Moestl 		    "txs->txs_lastdesc = %d, txlast = %d",
147412fb0330SPyun YongHyeon 		    __func__, txs->txs_firstdesc, txs->txs_lastdesc, txlast);
147518100346SThomas Moestl #endif
147642c1b001SThomas Moestl 		if (txs->txs_firstdesc <= txs->txs_lastdesc) {
147742c1b001SThomas Moestl 			if ((txlast >= txs->txs_firstdesc) &&
147842c1b001SThomas Moestl 				(txlast <= txs->txs_lastdesc))
147942c1b001SThomas Moestl 				break;
148042c1b001SThomas Moestl 		} else {
148142c1b001SThomas Moestl 			/* Ick -- this command wraps */
148242c1b001SThomas Moestl 			if ((txlast >= txs->txs_firstdesc) ||
148342c1b001SThomas Moestl 				(txlast <= txs->txs_lastdesc))
148442c1b001SThomas Moestl 				break;
148542c1b001SThomas Moestl 		}
148642c1b001SThomas Moestl 
148718100346SThomas Moestl #ifdef GEM_DEBUG
148812fb0330SPyun YongHyeon 		CTR1(KTR_GEM, "%s: releasing a desc", __func__);
148918100346SThomas Moestl #endif
149042c1b001SThomas Moestl 		STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
149142c1b001SThomas Moestl 
149242c1b001SThomas Moestl 		sc->sc_txfree += txs->txs_ndescs;
149342c1b001SThomas Moestl 
1494305f2c06SThomas Moestl 		bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap,
149542c1b001SThomas Moestl 		    BUS_DMASYNC_POSTWRITE);
1496305f2c06SThomas Moestl 		bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap);
149742c1b001SThomas Moestl 		if (txs->txs_mbuf != NULL) {
149842c1b001SThomas Moestl 			m_freem(txs->txs_mbuf);
149942c1b001SThomas Moestl 			txs->txs_mbuf = NULL;
150042c1b001SThomas Moestl 		}
150142c1b001SThomas Moestl 
150242c1b001SThomas Moestl 		STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
150342c1b001SThomas Moestl 
150442c1b001SThomas Moestl 		ifp->if_opackets++;
1505336cca9eSBenno Rice 		progress = 1;
150642c1b001SThomas Moestl 	}
150742c1b001SThomas Moestl 
150818100346SThomas Moestl #ifdef GEM_DEBUG
150912fb0330SPyun YongHyeon 	CTR4(KTR_GEM, "%s: GEM_TX_STATE_MACHINE %x "
151042c1b001SThomas Moestl 		"GEM_TX_DATA_PTR %llx "
151142c1b001SThomas Moestl 		"GEM_TX_COMPLETION %x",
151212fb0330SPyun YongHyeon 		__func__,
151312fb0330SPyun YongHyeon 		bus_space_read_4(sc->sc_res[0], sc->sc_h, GEM_TX_STATE_MACHINE),
151412fb0330SPyun YongHyeon 		((long long) bus_4(sc->sc_res[0],
151542c1b001SThomas Moestl 			GEM_TX_DATA_PTR_HI) << 32) |
1516e1bb13cdSPoul-Henning Kamp 			     bus_read_4(sc->sc_res[0],
151742c1b001SThomas Moestl 			GEM_TX_DATA_PTR_LO),
1518e1bb13cdSPoul-Henning Kamp 		bus_read_4(sc->sc_res[0], GEM_TX_COMPLETION));
151918100346SThomas Moestl #endif
152042c1b001SThomas Moestl 
1521336cca9eSBenno Rice 	if (progress) {
1522336cca9eSBenno Rice 		if (sc->sc_txfree == GEM_NTXDESC - 1)
1523336cca9eSBenno Rice 			sc->sc_txwin = 0;
152442c1b001SThomas Moestl 
152513f4c340SRobert Watson 		/* Freed some descriptors, so reset IFF_DRV_OACTIVE and restart. */
152613f4c340SRobert Watson 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
15278cb37876SMarius Strobl 		sc->sc_wdog_timer = STAILQ_EMPTY(&sc->sc_txdirtyq) ? 0 : 5;
152812fb0330SPyun YongHyeon 
152912fb0330SPyun YongHyeon 		if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
153012fb0330SPyun YongHyeon 		    !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
153112fb0330SPyun YongHyeon 			gem_start_locked(ifp);
1532336cca9eSBenno Rice 	}
153342c1b001SThomas Moestl 
153418100346SThomas Moestl #ifdef GEM_DEBUG
153512fb0330SPyun YongHyeon 	CTR3(KTR_GEM, "%s: %s: watchdog %d",
153612fb0330SPyun YongHyeon 	    device_get_name(sc->sc_dev), __func__, sc->sc_wdog_timer);
153718100346SThomas Moestl #endif
153842c1b001SThomas Moestl }
153942c1b001SThomas Moestl 
1540c3d5598aSMarius Strobl #ifdef GEM_RINT_TIMEOUT
15410d80b9bdSThomas Moestl static void
15420d80b9bdSThomas Moestl gem_rint_timeout(arg)
15430d80b9bdSThomas Moestl 	void *arg;
15440d80b9bdSThomas Moestl {
15458cfaff7dSMarius Strobl 	struct gem_softc *sc = (struct gem_softc *)arg;
15460d80b9bdSThomas Moestl 
15471f317bf9SMarius Strobl 	GEM_LOCK_ASSERT(sc, MA_OWNED);
15488cfaff7dSMarius Strobl 	gem_rint(sc);
15490d80b9bdSThomas Moestl }
155011e3f060SJake Burkholder #endif
15510d80b9bdSThomas Moestl 
155242c1b001SThomas Moestl /*
155342c1b001SThomas Moestl  * Receive interrupt.
155442c1b001SThomas Moestl  */
155542c1b001SThomas Moestl static void
155642c1b001SThomas Moestl gem_rint(sc)
155742c1b001SThomas Moestl 	struct gem_softc *sc;
155842c1b001SThomas Moestl {
1559fc74a9f9SBrooks Davis 	struct ifnet *ifp = sc->sc_ifp;
156042c1b001SThomas Moestl 	struct gem_rxsoft *rxs;
156142c1b001SThomas Moestl 	struct mbuf *m;
156242c1b001SThomas Moestl 	u_int64_t rxstat;
1563336cca9eSBenno Rice 	u_int32_t rxcomp;
1564336cca9eSBenno Rice 	int i, len, progress = 0;
156542c1b001SThomas Moestl 
1566c3d5598aSMarius Strobl #ifdef GEM_RINT_TIMEOUT
15670d80b9bdSThomas Moestl 	callout_stop(&sc->sc_rx_ch);
1568c3d5598aSMarius Strobl #endif
156918100346SThomas Moestl #ifdef GEM_DEBUG
157012fb0330SPyun YongHyeon 	CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__);
157118100346SThomas Moestl #endif
1572336cca9eSBenno Rice 
1573336cca9eSBenno Rice 	/*
1574336cca9eSBenno Rice 	 * Read the completion register once.  This limits
1575336cca9eSBenno Rice 	 * how long the following loop can execute.
1576336cca9eSBenno Rice 	 */
1577e1bb13cdSPoul-Henning Kamp 	rxcomp = bus_read_4(sc->sc_res[0], GEM_RX_COMPLETION);
1578336cca9eSBenno Rice 
157918100346SThomas Moestl #ifdef GEM_DEBUG
158012fb0330SPyun YongHyeon 	CTR3(KTR_GEM, "%s: sc->rxptr %d, complete %d",
158112fb0330SPyun YongHyeon 	    __func__, sc->sc_rxptr, rxcomp);
158218100346SThomas Moestl #endif
1583b2d59f42SThomas Moestl 	GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD);
1584336cca9eSBenno Rice 	for (i = sc->sc_rxptr; i != rxcomp;
158542c1b001SThomas Moestl 	     i = GEM_NEXTRX(i)) {
158642c1b001SThomas Moestl 		rxs = &sc->sc_rxsoft[i];
158742c1b001SThomas Moestl 
158842c1b001SThomas Moestl 		rxstat = GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags);
158942c1b001SThomas Moestl 
159042c1b001SThomas Moestl 		if (rxstat & GEM_RD_OWN) {
1591c3d5598aSMarius Strobl #ifdef GEM_RINT_TIMEOUT
159242c1b001SThomas Moestl 			/*
15930d80b9bdSThomas Moestl 			 * The descriptor is still marked as owned, although
15940d80b9bdSThomas Moestl 			 * it is supposed to have completed. This has been
15950d80b9bdSThomas Moestl 			 * observed on some machines. Just exiting here
15960d80b9bdSThomas Moestl 			 * might leave the packet sitting around until another
15970d80b9bdSThomas Moestl 			 * one arrives to trigger a new interrupt, which is
15980d80b9bdSThomas Moestl 			 * generally undesirable, so set up a timeout.
159942c1b001SThomas Moestl 			 */
16000d80b9bdSThomas Moestl 			callout_reset(&sc->sc_rx_ch, GEM_RXOWN_TICKS,
16010d80b9bdSThomas Moestl 			    gem_rint_timeout, sc);
1602336cca9eSBenno Rice #endif
160342c1b001SThomas Moestl 			break;
160442c1b001SThomas Moestl 		}
160542c1b001SThomas Moestl 
1606336cca9eSBenno Rice 		progress++;
1607336cca9eSBenno Rice 		ifp->if_ipackets++;
1608336cca9eSBenno Rice 
160942c1b001SThomas Moestl 		if (rxstat & GEM_RD_BAD_CRC) {
1610336cca9eSBenno Rice 			ifp->if_ierrors++;
161142c1b001SThomas Moestl 			device_printf(sc->sc_dev, "receive error: CRC error\n");
161242c1b001SThomas Moestl 			GEM_INIT_RXDESC(sc, i);
161342c1b001SThomas Moestl 			continue;
161442c1b001SThomas Moestl 		}
161542c1b001SThomas Moestl 
161642c1b001SThomas Moestl #ifdef GEM_DEBUG
161742c1b001SThomas Moestl 		if (ifp->if_flags & IFF_DEBUG) {
161842c1b001SThomas Moestl 			printf("    rxsoft %p descriptor %d: ", rxs, i);
161942c1b001SThomas Moestl 			printf("gd_flags: 0x%016llx\t", (long long)
162042c1b001SThomas Moestl 				GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags));
162142c1b001SThomas Moestl 			printf("gd_addr: 0x%016llx\n", (long long)
162242c1b001SThomas Moestl 				GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_addr));
162342c1b001SThomas Moestl 		}
162442c1b001SThomas Moestl #endif
162542c1b001SThomas Moestl 
162642c1b001SThomas Moestl 		/*
162712fb0330SPyun YongHyeon 		 * No errors; receive the packet.
162842c1b001SThomas Moestl 		 */
162942c1b001SThomas Moestl 		len = GEM_RD_BUFLEN(rxstat);
163042c1b001SThomas Moestl 
163142c1b001SThomas Moestl 		/*
163242c1b001SThomas Moestl 		 * Allocate a new mbuf cluster.  If that fails, we are
163342c1b001SThomas Moestl 		 * out of memory, and must drop the packet and recycle
163442c1b001SThomas Moestl 		 * the buffer that's already attached to this descriptor.
163542c1b001SThomas Moestl 		 */
163642c1b001SThomas Moestl 		m = rxs->rxs_mbuf;
163742c1b001SThomas Moestl 		if (gem_add_rxbuf(sc, i) != 0) {
163842c1b001SThomas Moestl 			ifp->if_ierrors++;
163942c1b001SThomas Moestl 			GEM_INIT_RXDESC(sc, i);
164042c1b001SThomas Moestl 			continue;
164142c1b001SThomas Moestl 		}
164242c1b001SThomas Moestl 		m->m_data += 2; /* We're already off by two */
164342c1b001SThomas Moestl 
164442c1b001SThomas Moestl 		m->m_pkthdr.rcvif = ifp;
164512fb0330SPyun YongHyeon 		m->m_pkthdr.len = m->m_len = len;
164612fb0330SPyun YongHyeon 
164712fb0330SPyun YongHyeon 		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
164812fb0330SPyun YongHyeon 			gem_rxcksum(m, rxstat);
164942c1b001SThomas Moestl 
165042c1b001SThomas Moestl 		/* Pass it on. */
16518cfaff7dSMarius Strobl 		GEM_UNLOCK(sc);
1652673d9191SSam Leffler 		(*ifp->if_input)(ifp, m);
16538cfaff7dSMarius Strobl 		GEM_LOCK(sc);
165442c1b001SThomas Moestl 	}
165542c1b001SThomas Moestl 
1656336cca9eSBenno Rice 	if (progress) {
1657b2d59f42SThomas Moestl 		GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE);
165842c1b001SThomas Moestl 		/* Update the receive pointer. */
1659336cca9eSBenno Rice 		if (i == sc->sc_rxptr) {
1660336cca9eSBenno Rice 			device_printf(sc->sc_dev, "rint: ring wrap\n");
1661336cca9eSBenno Rice 		}
166242c1b001SThomas Moestl 		sc->sc_rxptr = i;
1663e1bb13cdSPoul-Henning Kamp 		bus_write_4(sc->sc_res[0], GEM_RX_KICK, GEM_PREVRX(i));
1664336cca9eSBenno Rice 	}
166542c1b001SThomas Moestl 
166618100346SThomas Moestl #ifdef GEM_DEBUG
166712fb0330SPyun YongHyeon 	CTR3(KTR_GEM, "%s: done sc->rxptr %d, complete %d", __func__,
1668e1bb13cdSPoul-Henning Kamp 		sc->sc_rxptr, bus_read_4(sc->sc_res[0], GEM_RX_COMPLETION));
166918100346SThomas Moestl #endif
167042c1b001SThomas Moestl }
167142c1b001SThomas Moestl 
167242c1b001SThomas Moestl 
167342c1b001SThomas Moestl /*
167442c1b001SThomas Moestl  * gem_add_rxbuf:
167542c1b001SThomas Moestl  *
167642c1b001SThomas Moestl  *	Add a receive buffer to the indicated descriptor.
167742c1b001SThomas Moestl  */
167842c1b001SThomas Moestl static int
167942c1b001SThomas Moestl gem_add_rxbuf(sc, idx)
168042c1b001SThomas Moestl 	struct gem_softc *sc;
168142c1b001SThomas Moestl 	int idx;
168242c1b001SThomas Moestl {
168342c1b001SThomas Moestl 	struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx];
168442c1b001SThomas Moestl 	struct mbuf *m;
1685c3d5598aSMarius Strobl 	bus_dma_segment_t segs[1];
1686c3d5598aSMarius Strobl 	int error, nsegs;
168742c1b001SThomas Moestl 
1688a163d034SWarner Losh 	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
168942c1b001SThomas Moestl 	if (m == NULL)
169042c1b001SThomas Moestl 		return (ENOBUFS);
1691305f2c06SThomas Moestl 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
169242c1b001SThomas Moestl 
169342c1b001SThomas Moestl #ifdef GEM_DEBUG
169442c1b001SThomas Moestl 	/* bzero the packet to check dma */
169542c1b001SThomas Moestl 	memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size);
169642c1b001SThomas Moestl #endif
169742c1b001SThomas Moestl 
1698b2d59f42SThomas Moestl 	if (rxs->rxs_mbuf != NULL) {
1699b2d59f42SThomas Moestl 		bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap,
1700b2d59f42SThomas Moestl 		    BUS_DMASYNC_POSTREAD);
1701305f2c06SThomas Moestl 		bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap);
1702b2d59f42SThomas Moestl 	}
170342c1b001SThomas Moestl 
170442c1b001SThomas Moestl 	rxs->rxs_mbuf = m;
170542c1b001SThomas Moestl 
1706c3d5598aSMarius Strobl 	error = bus_dmamap_load_mbuf_sg(sc->sc_rdmatag, rxs->rxs_dmamap,
1707c3d5598aSMarius Strobl 	    m, segs, &nsegs, BUS_DMA_NOWAIT);
1708c3d5598aSMarius Strobl 	/* If nsegs is wrong then the stack is corrupt. */
1709c3d5598aSMarius Strobl 	KASSERT(nsegs == 1, ("Too many segments returned!"));
1710c3d5598aSMarius Strobl 	if (error != 0) {
171142c1b001SThomas Moestl 		device_printf(sc->sc_dev, "can't load rx DMA map %d, error = "
171242c1b001SThomas Moestl 		    "%d\n", idx, error);
1713c3d5598aSMarius Strobl 		m_freem(m);
1714c3d5598aSMarius Strobl 		return (ENOBUFS);
171542c1b001SThomas Moestl 	}
1716c3d5598aSMarius Strobl 	rxs->rxs_paddr = segs[0].ds_addr;
171742c1b001SThomas Moestl 
1718305f2c06SThomas Moestl 	bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, BUS_DMASYNC_PREREAD);
171942c1b001SThomas Moestl 
172042c1b001SThomas Moestl 	GEM_INIT_RXDESC(sc, idx);
172142c1b001SThomas Moestl 
172242c1b001SThomas Moestl 	return (0);
172342c1b001SThomas Moestl }
172442c1b001SThomas Moestl 
172542c1b001SThomas Moestl 
172642c1b001SThomas Moestl static void
172742c1b001SThomas Moestl gem_eint(sc, status)
172842c1b001SThomas Moestl 	struct gem_softc *sc;
172942c1b001SThomas Moestl 	u_int status;
173042c1b001SThomas Moestl {
173142c1b001SThomas Moestl 
173242c1b001SThomas Moestl 	if ((status & GEM_INTR_MIF) != 0) {
173342c1b001SThomas Moestl 		device_printf(sc->sc_dev, "XXXlink status changed\n");
173442c1b001SThomas Moestl 		return;
173542c1b001SThomas Moestl 	}
173642c1b001SThomas Moestl 
173742c1b001SThomas Moestl 	device_printf(sc->sc_dev, "status=%x\n", status);
173842c1b001SThomas Moestl }
173942c1b001SThomas Moestl 
174042c1b001SThomas Moestl 
174142c1b001SThomas Moestl void
174242c1b001SThomas Moestl gem_intr(v)
174342c1b001SThomas Moestl 	void *v;
174442c1b001SThomas Moestl {
174542c1b001SThomas Moestl 	struct gem_softc *sc = (struct gem_softc *)v;
174642c1b001SThomas Moestl 	u_int32_t status;
174742c1b001SThomas Moestl 
17488cfaff7dSMarius Strobl 	GEM_LOCK(sc);
1749e1bb13cdSPoul-Henning Kamp 	status = bus_read_4(sc->sc_res[0], GEM_STATUS);
175018100346SThomas Moestl #ifdef GEM_DEBUG
175112fb0330SPyun YongHyeon 	CTR4(KTR_GEM, "%s: %s: cplt %x, status %x",
175212fb0330SPyun YongHyeon 		device_get_name(sc->sc_dev), __func__, (status>>19),
175342c1b001SThomas Moestl 		(u_int)status);
175418100346SThomas Moestl #endif
175542c1b001SThomas Moestl 
175642c1b001SThomas Moestl 	if ((status & (GEM_INTR_RX_TAG_ERR | GEM_INTR_BERR)) != 0)
175742c1b001SThomas Moestl 		gem_eint(sc, status);
175842c1b001SThomas Moestl 
175942c1b001SThomas Moestl 	if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0)
176042c1b001SThomas Moestl 		gem_tint(sc);
176142c1b001SThomas Moestl 
176242c1b001SThomas Moestl 	if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0)
176342c1b001SThomas Moestl 		gem_rint(sc);
176442c1b001SThomas Moestl 
176542c1b001SThomas Moestl 	/* We should eventually do more than just print out error stats. */
176642c1b001SThomas Moestl 	if (status & GEM_INTR_TX_MAC) {
1767e1bb13cdSPoul-Henning Kamp 		int txstat = bus_read_4(sc->sc_res[0], GEM_MAC_TX_STATUS);
176842c1b001SThomas Moestl 		if (txstat & ~GEM_MAC_TX_XMIT_DONE)
1769336cca9eSBenno Rice 			device_printf(sc->sc_dev, "MAC tx fault, status %x\n",
1770336cca9eSBenno Rice 			    txstat);
17719bb711b9SThomas Moestl 		if (txstat & (GEM_MAC_TX_UNDERRUN | GEM_MAC_TX_PKT_TOO_LONG))
17728cfaff7dSMarius Strobl 			gem_init_locked(sc);
177342c1b001SThomas Moestl 	}
177442c1b001SThomas Moestl 	if (status & GEM_INTR_RX_MAC) {
1775e1bb13cdSPoul-Henning Kamp 		int rxstat = bus_read_4(sc->sc_res[0], GEM_MAC_RX_STATUS);
177600d12766SMarius Strobl 		/*
177700d12766SMarius Strobl 		 * On some chip revisions GEM_MAC_RX_OVERFLOW happen often
177800d12766SMarius Strobl 		 * due to a silicon bug so handle them silently.
177900d12766SMarius Strobl 		 */
178000d12766SMarius Strobl 		if (rxstat & GEM_MAC_RX_OVERFLOW)
178100d12766SMarius Strobl 			gem_init_locked(sc);
178200d12766SMarius Strobl 		else if (rxstat & ~(GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT))
1783336cca9eSBenno Rice 			device_printf(sc->sc_dev, "MAC rx fault, status %x\n",
1784336cca9eSBenno Rice 			    rxstat);
178542c1b001SThomas Moestl 	}
17868cfaff7dSMarius Strobl 	GEM_UNLOCK(sc);
178742c1b001SThomas Moestl }
178842c1b001SThomas Moestl 
17898cb37876SMarius Strobl static int
17908cb37876SMarius Strobl gem_watchdog(sc)
17918cb37876SMarius Strobl 	struct gem_softc *sc;
179242c1b001SThomas Moestl {
179342c1b001SThomas Moestl 
17948cb37876SMarius Strobl 	GEM_LOCK_ASSERT(sc, MA_OWNED);
17958cb37876SMarius Strobl 
179618100346SThomas Moestl #ifdef GEM_DEBUG
179712fb0330SPyun YongHyeon 	CTR4(KTR_GEM, "%s: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x "
179812fb0330SPyun YongHyeon 		"GEM_MAC_RX_CONFIG %x", __func__,
1799e1bb13cdSPoul-Henning Kamp 		bus_read_4(sc->sc_res[0], GEM_RX_CONFIG),
1800e1bb13cdSPoul-Henning Kamp 		bus_read_4(sc->sc_res[0], GEM_MAC_RX_STATUS),
1801e1bb13cdSPoul-Henning Kamp 		bus_read_4(sc->sc_res[0], GEM_MAC_RX_CONFIG));
180212fb0330SPyun YongHyeon 	CTR4(KTR_GEM, "%s: GEM_TX_CONFIG %x GEM_MAC_TX_STATUS %x "
180312fb0330SPyun YongHyeon 		"GEM_MAC_TX_CONFIG %x", __func__,
1804e1bb13cdSPoul-Henning Kamp 		bus_read_4(sc->sc_res[0], GEM_TX_CONFIG),
1805e1bb13cdSPoul-Henning Kamp 		bus_read_4(sc->sc_res[0], GEM_MAC_TX_STATUS),
1806e1bb13cdSPoul-Henning Kamp 		bus_read_4(sc->sc_res[0], GEM_MAC_TX_CONFIG));
180718100346SThomas Moestl #endif
180842c1b001SThomas Moestl 
18098cb37876SMarius Strobl 	if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0)
18108cb37876SMarius Strobl 		return (0);
18118cb37876SMarius Strobl 
181242c1b001SThomas Moestl 	device_printf(sc->sc_dev, "device timeout\n");
18138cb37876SMarius Strobl 	++sc->sc_ifp->if_oerrors;
181442c1b001SThomas Moestl 
181542c1b001SThomas Moestl 	/* Try to get more packets going. */
18168cfaff7dSMarius Strobl 	gem_init_locked(sc);
18178cb37876SMarius Strobl 	return (EJUSTRETURN);
181842c1b001SThomas Moestl }
181942c1b001SThomas Moestl 
182042c1b001SThomas Moestl /*
182142c1b001SThomas Moestl  * Initialize the MII Management Interface
182242c1b001SThomas Moestl  */
182342c1b001SThomas Moestl static void
182442c1b001SThomas Moestl gem_mifinit(sc)
182542c1b001SThomas Moestl 	struct gem_softc *sc;
182642c1b001SThomas Moestl {
182742c1b001SThomas Moestl 
182842c1b001SThomas Moestl 	/* Configure the MIF in frame mode */
1829e1bb13cdSPoul-Henning Kamp 	sc->sc_mif_config = bus_read_4(sc->sc_res[0], GEM_MIF_CONFIG);
183042c1b001SThomas Moestl 	sc->sc_mif_config &= ~GEM_MIF_CONFIG_BB_ENA;
1831e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_MIF_CONFIG, sc->sc_mif_config);
183242c1b001SThomas Moestl }
183342c1b001SThomas Moestl 
183442c1b001SThomas Moestl /*
183542c1b001SThomas Moestl  * MII interface
183642c1b001SThomas Moestl  *
183742c1b001SThomas Moestl  * The GEM MII interface supports at least three different operating modes:
183842c1b001SThomas Moestl  *
183942c1b001SThomas Moestl  * Bitbang mode is implemented using data, clock and output enable registers.
184042c1b001SThomas Moestl  *
184142c1b001SThomas Moestl  * Frame mode is implemented by loading a complete frame into the frame
184242c1b001SThomas Moestl  * register and polling the valid bit for completion.
184342c1b001SThomas Moestl  *
184442c1b001SThomas Moestl  * Polling mode uses the frame register but completion is indicated by
184542c1b001SThomas Moestl  * an interrupt.
184642c1b001SThomas Moestl  *
184742c1b001SThomas Moestl  */
184842c1b001SThomas Moestl int
184942c1b001SThomas Moestl gem_mii_readreg(dev, phy, reg)
185042c1b001SThomas Moestl 	device_t dev;
185142c1b001SThomas Moestl 	int phy, reg;
185242c1b001SThomas Moestl {
185342c1b001SThomas Moestl 	struct gem_softc *sc = device_get_softc(dev);
185442c1b001SThomas Moestl 	int n;
185542c1b001SThomas Moestl 	u_int32_t v;
185642c1b001SThomas Moestl 
185742c1b001SThomas Moestl #ifdef GEM_DEBUG_PHY
185842c1b001SThomas Moestl 	printf("gem_mii_readreg: phy %d reg %d\n", phy, reg);
185942c1b001SThomas Moestl #endif
186042c1b001SThomas Moestl 
186142c1b001SThomas Moestl #if 0
186242c1b001SThomas Moestl 	/* Select the desired PHY in the MIF configuration register */
1863e1bb13cdSPoul-Henning Kamp 	v = bus_read_4(sc->sc_res[0], GEM_MIF_CONFIG);
186442c1b001SThomas Moestl 	/* Clear PHY select bit */
186542c1b001SThomas Moestl 	v &= ~GEM_MIF_CONFIG_PHY_SEL;
186642c1b001SThomas Moestl 	if (phy == GEM_PHYAD_EXTERNAL)
186742c1b001SThomas Moestl 		/* Set PHY select bit to get at external device */
186842c1b001SThomas Moestl 		v |= GEM_MIF_CONFIG_PHY_SEL;
1869e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_MIF_CONFIG, v);
187042c1b001SThomas Moestl #endif
187142c1b001SThomas Moestl 
187242c1b001SThomas Moestl 	/* Construct the frame command */
187342c1b001SThomas Moestl 	v = (reg << GEM_MIF_REG_SHIFT)	| (phy << GEM_MIF_PHY_SHIFT) |
187442c1b001SThomas Moestl 		GEM_MIF_FRAME_READ;
187542c1b001SThomas Moestl 
1876e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_MIF_FRAME, v);
187742c1b001SThomas Moestl 	for (n = 0; n < 100; n++) {
187842c1b001SThomas Moestl 		DELAY(1);
1879e1bb13cdSPoul-Henning Kamp 		v = bus_read_4(sc->sc_res[0], GEM_MIF_FRAME);
18801f317bf9SMarius Strobl 		if (v & GEM_MIF_FRAME_TA0)
188142c1b001SThomas Moestl 			return (v & GEM_MIF_FRAME_DATA);
188242c1b001SThomas Moestl 	}
188342c1b001SThomas Moestl 
188442c1b001SThomas Moestl 	device_printf(sc->sc_dev, "mii_read timeout\n");
188542c1b001SThomas Moestl 	return (0);
188642c1b001SThomas Moestl }
188742c1b001SThomas Moestl 
188842c1b001SThomas Moestl int
188942c1b001SThomas Moestl gem_mii_writereg(dev, phy, reg, val)
189042c1b001SThomas Moestl 	device_t dev;
189142c1b001SThomas Moestl 	int phy, reg, val;
189242c1b001SThomas Moestl {
189342c1b001SThomas Moestl 	struct gem_softc *sc = device_get_softc(dev);
189442c1b001SThomas Moestl 	int n;
189542c1b001SThomas Moestl 	u_int32_t v;
189642c1b001SThomas Moestl 
189742c1b001SThomas Moestl #ifdef GEM_DEBUG_PHY
189842c1b001SThomas Moestl 	printf("gem_mii_writereg: phy %d reg %d val %x\n", phy, reg, val);
189942c1b001SThomas Moestl #endif
190042c1b001SThomas Moestl 
190142c1b001SThomas Moestl #if 0
190242c1b001SThomas Moestl 	/* Select the desired PHY in the MIF configuration register */
1903e1bb13cdSPoul-Henning Kamp 	v = bus_read_4(sc->sc_res[0], GEM_MIF_CONFIG);
190442c1b001SThomas Moestl 	/* Clear PHY select bit */
190542c1b001SThomas Moestl 	v &= ~GEM_MIF_CONFIG_PHY_SEL;
190642c1b001SThomas Moestl 	if (phy == GEM_PHYAD_EXTERNAL)
190742c1b001SThomas Moestl 		/* Set PHY select bit to get at external device */
190842c1b001SThomas Moestl 		v |= GEM_MIF_CONFIG_PHY_SEL;
1909e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_MIF_CONFIG, v);
191042c1b001SThomas Moestl #endif
191142c1b001SThomas Moestl 	/* Construct the frame command */
191242c1b001SThomas Moestl 	v = GEM_MIF_FRAME_WRITE			|
191342c1b001SThomas Moestl 	    (phy << GEM_MIF_PHY_SHIFT)		|
191442c1b001SThomas Moestl 	    (reg << GEM_MIF_REG_SHIFT)		|
191542c1b001SThomas Moestl 	    (val & GEM_MIF_FRAME_DATA);
191642c1b001SThomas Moestl 
1917e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_MIF_FRAME, v);
191842c1b001SThomas Moestl 	for (n = 0; n < 100; n++) {
191942c1b001SThomas Moestl 		DELAY(1);
1920e1bb13cdSPoul-Henning Kamp 		v = bus_read_4(sc->sc_res[0], GEM_MIF_FRAME);
19211f317bf9SMarius Strobl 		if (v & GEM_MIF_FRAME_TA0)
192242c1b001SThomas Moestl 			return (1);
192342c1b001SThomas Moestl 	}
192442c1b001SThomas Moestl 
192542c1b001SThomas Moestl 	device_printf(sc->sc_dev, "mii_write timeout\n");
192642c1b001SThomas Moestl 	return (0);
192742c1b001SThomas Moestl }
192842c1b001SThomas Moestl 
192942c1b001SThomas Moestl void
193042c1b001SThomas Moestl gem_mii_statchg(dev)
193142c1b001SThomas Moestl 	device_t dev;
193242c1b001SThomas Moestl {
193342c1b001SThomas Moestl 	struct gem_softc *sc = device_get_softc(dev);
193442c1b001SThomas Moestl #ifdef GEM_DEBUG
19358cfaff7dSMarius Strobl 	int instance;
193642c1b001SThomas Moestl #endif
193742c1b001SThomas Moestl 	u_int32_t v;
193842c1b001SThomas Moestl 
193942c1b001SThomas Moestl #ifdef GEM_DEBUG
19408cfaff7dSMarius Strobl 	instance = IFM_INST(sc->sc_mii->mii_media.ifm_cur->ifm_media);
194142c1b001SThomas Moestl 	if (sc->sc_debug)
194242c1b001SThomas Moestl 		printf("gem_mii_statchg: status change: phy = %d\n",
194342c1b001SThomas Moestl 			sc->sc_phys[instance]);
194442c1b001SThomas Moestl #endif
194542c1b001SThomas Moestl 
194642c1b001SThomas Moestl 	/* Set tx full duplex options */
1947e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_MAC_TX_CONFIG, 0);
194842c1b001SThomas Moestl 	DELAY(10000); /* reg must be cleared and delay before changing. */
194942c1b001SThomas Moestl 	v = GEM_MAC_TX_ENA_IPG0|GEM_MAC_TX_NGU|GEM_MAC_TX_NGU_LIMIT|
195042c1b001SThomas Moestl 		GEM_MAC_TX_ENABLE;
195142c1b001SThomas Moestl 	if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) {
195242c1b001SThomas Moestl 		v |= GEM_MAC_TX_IGN_CARRIER|GEM_MAC_TX_IGN_COLLIS;
195342c1b001SThomas Moestl 	}
1954e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_MAC_TX_CONFIG, v);
195542c1b001SThomas Moestl 
195642c1b001SThomas Moestl 	/* XIF Configuration */
195742c1b001SThomas Moestl 	v = GEM_MAC_XIF_LINK_LED;
195842c1b001SThomas Moestl 	v |= GEM_MAC_XIF_TX_MII_ENA;
1959336cca9eSBenno Rice 
196042c1b001SThomas Moestl 	/* If an external transceiver is connected, enable its MII drivers */
1961e1bb13cdSPoul-Henning Kamp 	sc->sc_mif_config = bus_read_4(sc->sc_res[0], GEM_MIF_CONFIG);
196242c1b001SThomas Moestl 	if ((sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) != 0) {
196342c1b001SThomas Moestl 		/* External MII needs echo disable if half duplex. */
196442c1b001SThomas Moestl 		if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0)
196542c1b001SThomas Moestl 			/* turn on full duplex LED */
196642c1b001SThomas Moestl 			v |= GEM_MAC_XIF_FDPLX_LED;
196742c1b001SThomas Moestl 		else
196842c1b001SThomas Moestl 	 		/* half duplex -- disable echo */
196942c1b001SThomas Moestl 	 		v |= GEM_MAC_XIF_ECHO_DISABL;
1970336cca9eSBenno Rice 
1971336cca9eSBenno Rice 		if (IFM_SUBTYPE(sc->sc_mii->mii_media_active) == IFM_1000_T)
1972336cca9eSBenno Rice 			v |= GEM_MAC_XIF_GMII_MODE;
1973336cca9eSBenno Rice 		else
1974336cca9eSBenno Rice 			v &= ~GEM_MAC_XIF_GMII_MODE;
197542c1b001SThomas Moestl 	} else {
197642c1b001SThomas Moestl 		/* Internal MII needs buf enable */
197742c1b001SThomas Moestl 		v |= GEM_MAC_XIF_MII_BUF_ENA;
197842c1b001SThomas Moestl 	}
1979e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_MAC_XIF_CONFIG, v);
198042c1b001SThomas Moestl }
198142c1b001SThomas Moestl 
198242c1b001SThomas Moestl int
198342c1b001SThomas Moestl gem_mediachange(ifp)
198442c1b001SThomas Moestl 	struct ifnet *ifp;
198542c1b001SThomas Moestl {
198642c1b001SThomas Moestl 	struct gem_softc *sc = ifp->if_softc;
19871f317bf9SMarius Strobl 	int error;
198842c1b001SThomas Moestl 
198942c1b001SThomas Moestl 	/* XXX Add support for serial media. */
199042c1b001SThomas Moestl 
19911f317bf9SMarius Strobl 	GEM_LOCK(sc);
19921f317bf9SMarius Strobl 	error = mii_mediachg(sc->sc_mii);
19931f317bf9SMarius Strobl 	GEM_UNLOCK(sc);
19941f317bf9SMarius Strobl 	return (error);
199542c1b001SThomas Moestl }
199642c1b001SThomas Moestl 
199742c1b001SThomas Moestl void
199842c1b001SThomas Moestl gem_mediastatus(ifp, ifmr)
199942c1b001SThomas Moestl 	struct ifnet *ifp;
200042c1b001SThomas Moestl 	struct ifmediareq *ifmr;
200142c1b001SThomas Moestl {
200242c1b001SThomas Moestl 	struct gem_softc *sc = ifp->if_softc;
200342c1b001SThomas Moestl 
20048cfaff7dSMarius Strobl 	GEM_LOCK(sc);
20058cfaff7dSMarius Strobl 	if ((ifp->if_flags & IFF_UP) == 0) {
20068cfaff7dSMarius Strobl 		GEM_UNLOCK(sc);
200742c1b001SThomas Moestl 		return;
20088cfaff7dSMarius Strobl 	}
200942c1b001SThomas Moestl 
201042c1b001SThomas Moestl 	mii_pollstat(sc->sc_mii);
201142c1b001SThomas Moestl 	ifmr->ifm_active = sc->sc_mii->mii_media_active;
201242c1b001SThomas Moestl 	ifmr->ifm_status = sc->sc_mii->mii_media_status;
20138cfaff7dSMarius Strobl 	GEM_UNLOCK(sc);
201442c1b001SThomas Moestl }
201542c1b001SThomas Moestl 
201642c1b001SThomas Moestl /*
201742c1b001SThomas Moestl  * Process an ioctl request.
201842c1b001SThomas Moestl  */
201942c1b001SThomas Moestl static int
202042c1b001SThomas Moestl gem_ioctl(ifp, cmd, data)
202142c1b001SThomas Moestl 	struct ifnet *ifp;
202242c1b001SThomas Moestl 	u_long cmd;
202342c1b001SThomas Moestl 	caddr_t data;
202442c1b001SThomas Moestl {
202542c1b001SThomas Moestl 	struct gem_softc *sc = ifp->if_softc;
202642c1b001SThomas Moestl 	struct ifreq *ifr = (struct ifreq *)data;
20278cfaff7dSMarius Strobl 	int error = 0;
20288cfaff7dSMarius Strobl 
202942c1b001SThomas Moestl 	switch (cmd) {
203042c1b001SThomas Moestl 	case SIOCSIFFLAGS:
20311f317bf9SMarius Strobl 		GEM_LOCK(sc);
203242c1b001SThomas Moestl 		if (ifp->if_flags & IFF_UP) {
2033336cca9eSBenno Rice 			if ((sc->sc_ifflags ^ ifp->if_flags) == IFF_PROMISC)
203442c1b001SThomas Moestl 				gem_setladrf(sc);
203542c1b001SThomas Moestl 			else
20368cfaff7dSMarius Strobl 				gem_init_locked(sc);
203742c1b001SThomas Moestl 		} else {
203813f4c340SRobert Watson 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
203942c1b001SThomas Moestl 				gem_stop(ifp, 0);
204042c1b001SThomas Moestl 		}
204112fb0330SPyun YongHyeon 		if ((ifp->if_flags & IFF_LINK0) != 0)
204212fb0330SPyun YongHyeon 			sc->sc_csum_features |= CSUM_UDP;
204312fb0330SPyun YongHyeon 		else
204412fb0330SPyun YongHyeon 			sc->sc_csum_features &= ~CSUM_UDP;
204512fb0330SPyun YongHyeon 		if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
204612fb0330SPyun YongHyeon 			ifp->if_hwassist = sc->sc_csum_features;
2047336cca9eSBenno Rice 		sc->sc_ifflags = ifp->if_flags;
20481f317bf9SMarius Strobl 		GEM_UNLOCK(sc);
204942c1b001SThomas Moestl 		break;
205042c1b001SThomas Moestl 	case SIOCADDMULTI:
205142c1b001SThomas Moestl 	case SIOCDELMULTI:
20521f317bf9SMarius Strobl 		GEM_LOCK(sc);
205342c1b001SThomas Moestl 		gem_setladrf(sc);
20541f317bf9SMarius Strobl 		GEM_UNLOCK(sc);
205542c1b001SThomas Moestl 		break;
205642c1b001SThomas Moestl 	case SIOCGIFMEDIA:
205742c1b001SThomas Moestl 	case SIOCSIFMEDIA:
205842c1b001SThomas Moestl 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd);
205942c1b001SThomas Moestl 		break;
206012fb0330SPyun YongHyeon 	case SIOCSIFCAP:
206112fb0330SPyun YongHyeon 		GEM_LOCK(sc);
206212fb0330SPyun YongHyeon 		ifp->if_capenable = ifr->ifr_reqcap;
206312fb0330SPyun YongHyeon 		if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
206412fb0330SPyun YongHyeon 			ifp->if_hwassist = sc->sc_csum_features;
206512fb0330SPyun YongHyeon 		else
206612fb0330SPyun YongHyeon 			ifp->if_hwassist = 0;
206712fb0330SPyun YongHyeon 		GEM_UNLOCK(sc);
206812fb0330SPyun YongHyeon 		break;
206942c1b001SThomas Moestl 	default:
20701f317bf9SMarius Strobl 		error = ether_ioctl(ifp, cmd, data);
207142c1b001SThomas Moestl 		break;
207242c1b001SThomas Moestl 	}
207342c1b001SThomas Moestl 
207442c1b001SThomas Moestl 	return (error);
207542c1b001SThomas Moestl }
207642c1b001SThomas Moestl 
207742c1b001SThomas Moestl /*
207842c1b001SThomas Moestl  * Set up the logical address filter.
207942c1b001SThomas Moestl  */
208042c1b001SThomas Moestl static void
208142c1b001SThomas Moestl gem_setladrf(sc)
208242c1b001SThomas Moestl 	struct gem_softc *sc;
208342c1b001SThomas Moestl {
2084fc74a9f9SBrooks Davis 	struct ifnet *ifp = sc->sc_ifp;
208542c1b001SThomas Moestl 	struct ifmultiaddr *inm;
208642c1b001SThomas Moestl 	u_int32_t crc;
208742c1b001SThomas Moestl 	u_int32_t hash[16];
208842c1b001SThomas Moestl 	u_int32_t v;
2089336cca9eSBenno Rice 	int i;
209042c1b001SThomas Moestl 
20918cfaff7dSMarius Strobl 	GEM_LOCK_ASSERT(sc, MA_OWNED);
20928cfaff7dSMarius Strobl 
209342c1b001SThomas Moestl 	/* Get current RX configuration */
2094e1bb13cdSPoul-Henning Kamp 	v = bus_read_4(sc->sc_res[0], GEM_MAC_RX_CONFIG);
209542c1b001SThomas Moestl 
2096336cca9eSBenno Rice 	/*
2097336cca9eSBenno Rice 	 * Turn off promiscuous mode, promiscuous group mode (all multicast),
2098336cca9eSBenno Rice 	 * and hash filter.  Depending on the case, the right bit will be
2099336cca9eSBenno Rice 	 * enabled.
2100336cca9eSBenno Rice 	 */
2101336cca9eSBenno Rice 	v &= ~(GEM_MAC_RX_PROMISCUOUS|GEM_MAC_RX_HASH_FILTER|
2102336cca9eSBenno Rice 	    GEM_MAC_RX_PROMISC_GRP);
2103336cca9eSBenno Rice 
210442c1b001SThomas Moestl 	if ((ifp->if_flags & IFF_PROMISC) != 0) {
2105336cca9eSBenno Rice 		/* Turn on promiscuous mode */
210642c1b001SThomas Moestl 		v |= GEM_MAC_RX_PROMISCUOUS;
210742c1b001SThomas Moestl 		goto chipit;
210842c1b001SThomas Moestl 	}
210942c1b001SThomas Moestl 	if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
211042c1b001SThomas Moestl 		hash[3] = hash[2] = hash[1] = hash[0] = 0xffff;
211142c1b001SThomas Moestl 		ifp->if_flags |= IFF_ALLMULTI;
2112336cca9eSBenno Rice 		v |= GEM_MAC_RX_PROMISC_GRP;
211342c1b001SThomas Moestl 		goto chipit;
211442c1b001SThomas Moestl 	}
211542c1b001SThomas Moestl 
211642c1b001SThomas Moestl 	/*
211742c1b001SThomas Moestl 	 * Set up multicast address filter by passing all multicast addresses
2118336cca9eSBenno Rice 	 * through a crc generator, and then using the high order 8 bits as an
2119336cca9eSBenno Rice 	 * index into the 256 bit logical address filter.  The high order 4
2120336cca9eSBenno Rice 	 * bits selects the word, while the other 4 bits select the bit within
2121336cca9eSBenno Rice 	 * the word (where bit 0 is the MSB).
212242c1b001SThomas Moestl 	 */
212342c1b001SThomas Moestl 
2124336cca9eSBenno Rice 	/* Clear hash table */
2125336cca9eSBenno Rice 	memset(hash, 0, sizeof(hash));
2126336cca9eSBenno Rice 
212713b203d0SRobert Watson 	IF_ADDR_LOCK(ifp);
2128fc74a9f9SBrooks Davis 	TAILQ_FOREACH(inm, &ifp->if_multiaddrs, ifma_link) {
212942c1b001SThomas Moestl 		if (inm->ifma_addr->sa_family != AF_LINK)
213042c1b001SThomas Moestl 			continue;
2131c240bd8cSMarius Strobl 		crc = ether_crc32_le(LLADDR((struct sockaddr_dl *)
2132c240bd8cSMarius Strobl 		    inm->ifma_addr), ETHER_ADDR_LEN);
213342c1b001SThomas Moestl 
213442c1b001SThomas Moestl 		/* Just want the 8 most significant bits. */
213542c1b001SThomas Moestl 		crc >>= 24;
213642c1b001SThomas Moestl 
213742c1b001SThomas Moestl 		/* Set the corresponding bit in the filter. */
2138336cca9eSBenno Rice 		hash[crc >> 4] |= 1 << (15 - (crc & 15));
2139336cca9eSBenno Rice 	}
214013b203d0SRobert Watson 	IF_ADDR_UNLOCK(ifp);
2141336cca9eSBenno Rice 
2142336cca9eSBenno Rice 	v |= GEM_MAC_RX_HASH_FILTER;
2143336cca9eSBenno Rice 	ifp->if_flags &= ~IFF_ALLMULTI;
2144336cca9eSBenno Rice 
2145336cca9eSBenno Rice 	/* Now load the hash table into the chip (if we are using it) */
2146336cca9eSBenno Rice 	for (i = 0; i < 16; i++) {
2147e1bb13cdSPoul-Henning Kamp 		bus_write_4(sc->sc_res[0],
2148336cca9eSBenno Rice 		    GEM_MAC_HASH0 + i * (GEM_MAC_HASH1-GEM_MAC_HASH0),
2149336cca9eSBenno Rice 		    hash[i]);
215042c1b001SThomas Moestl 	}
215142c1b001SThomas Moestl 
215242c1b001SThomas Moestl chipit:
2153e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, v);
215442c1b001SThomas Moestl }
2155