xref: /freebsd/sys/dev/gem/if_gem.c (revision 1ed3fed743ab7aa6d690d731f716d962f16faec1)
1aad970f1SDavid E. O'Brien /*-
242c1b001SThomas Moestl  * Copyright (C) 2001 Eduardo Horvath.
3305f2c06SThomas Moestl  * Copyright (c) 2001-2003 Thomas Moestl
442c1b001SThomas Moestl  * All rights reserved.
542c1b001SThomas Moestl  *
642c1b001SThomas Moestl  * Redistribution and use in source and binary forms, with or without
742c1b001SThomas Moestl  * modification, are permitted provided that the following conditions
842c1b001SThomas Moestl  * are met:
942c1b001SThomas Moestl  * 1. Redistributions of source code must retain the above copyright
1042c1b001SThomas Moestl  *    notice, this list of conditions and the following disclaimer.
1142c1b001SThomas Moestl  * 2. Redistributions in binary form must reproduce the above copyright
1242c1b001SThomas Moestl  *    notice, this list of conditions and the following disclaimer in the
1342c1b001SThomas Moestl  *    documentation and/or other materials provided with the distribution.
1442c1b001SThomas Moestl  *
1542c1b001SThomas Moestl  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR  ``AS IS'' AND
1642c1b001SThomas Moestl  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1742c1b001SThomas Moestl  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1842c1b001SThomas Moestl  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR  BE LIABLE
1942c1b001SThomas Moestl  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
2042c1b001SThomas Moestl  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2142c1b001SThomas Moestl  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2242c1b001SThomas Moestl  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2342c1b001SThomas Moestl  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2442c1b001SThomas Moestl  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2542c1b001SThomas Moestl  * SUCH DAMAGE.
2642c1b001SThomas Moestl  *
27336cca9eSBenno Rice  *	from: NetBSD: gem.c,v 1.21 2002/06/01 23:50:58 lukem Exp
2842c1b001SThomas Moestl  */
2942c1b001SThomas Moestl 
30aad970f1SDavid E. O'Brien #include <sys/cdefs.h>
31aad970f1SDavid E. O'Brien __FBSDID("$FreeBSD$");
32aad970f1SDavid E. O'Brien 
3342c1b001SThomas Moestl /*
341ed3fed7SMarius Strobl  * Driver for Apple GMAC, Sun ERI and Sun GEM Ethernet controllers
3542c1b001SThomas Moestl  */
3642c1b001SThomas Moestl 
3718100346SThomas Moestl #if 0
3842c1b001SThomas Moestl #define	GEM_DEBUG
3918100346SThomas Moestl #endif
4042c1b001SThomas Moestl 
41c3d5598aSMarius Strobl #if 0	/* XXX: In case of emergency, re-enable this. */
42c3d5598aSMarius Strobl #define	GEM_RINT_TIMEOUT
43c3d5598aSMarius Strobl #endif
44c3d5598aSMarius Strobl 
4542c1b001SThomas Moestl #include <sys/param.h>
4642c1b001SThomas Moestl #include <sys/systm.h>
4742c1b001SThomas Moestl #include <sys/bus.h>
4842c1b001SThomas Moestl #include <sys/callout.h>
49a30d4b32SMike Barcroft #include <sys/endian.h>
5042c1b001SThomas Moestl #include <sys/mbuf.h>
5142c1b001SThomas Moestl #include <sys/malloc.h>
5242c1b001SThomas Moestl #include <sys/kernel.h>
538cfaff7dSMarius Strobl #include <sys/lock.h>
54186f2b9eSPoul-Henning Kamp #include <sys/module.h>
558cfaff7dSMarius Strobl #include <sys/mutex.h>
5642c1b001SThomas Moestl #include <sys/socket.h>
5742c1b001SThomas Moestl #include <sys/sockio.h>
58e1bb13cdSPoul-Henning Kamp #include <sys/rman.h>
5942c1b001SThomas Moestl 
6008e0fdebSThomas Moestl #include <net/bpf.h>
6142c1b001SThomas Moestl #include <net/ethernet.h>
6242c1b001SThomas Moestl #include <net/if.h>
6342c1b001SThomas Moestl #include <net/if_arp.h>
6442c1b001SThomas Moestl #include <net/if_dl.h>
6542c1b001SThomas Moestl #include <net/if_media.h>
66fc74a9f9SBrooks Davis #include <net/if_types.h>
6700d12766SMarius Strobl #include <net/if_vlan_var.h>
6842c1b001SThomas Moestl 
6912fb0330SPyun YongHyeon #include <netinet/in.h>
7012fb0330SPyun YongHyeon #include <netinet/in_systm.h>
7112fb0330SPyun YongHyeon #include <netinet/ip.h>
7212fb0330SPyun YongHyeon #include <netinet/tcp.h>
7312fb0330SPyun YongHyeon #include <netinet/udp.h>
7412fb0330SPyun YongHyeon 
7542c1b001SThomas Moestl #include <machine/bus.h>
7642c1b001SThomas Moestl 
7742c1b001SThomas Moestl #include <dev/mii/mii.h>
7842c1b001SThomas Moestl #include <dev/mii/miivar.h>
7942c1b001SThomas Moestl 
80681f7d03SWarner Losh #include <dev/gem/if_gemreg.h>
81681f7d03SWarner Losh #include <dev/gem/if_gemvar.h>
8242c1b001SThomas Moestl 
831ed3fed7SMarius Strobl CTASSERT(powerof2(GEM_NRXDESC) && GEM_NRXDESC >= 32 && GEM_NRXDESC <= 8192);
841ed3fed7SMarius Strobl CTASSERT(powerof2(GEM_NTXDESC) && GEM_NTXDESC >= 32 && GEM_NTXDESC <= 8192);
851ed3fed7SMarius Strobl 
8642c1b001SThomas Moestl #define TRIES	10000
871ed3fed7SMarius Strobl 
8812fb0330SPyun YongHyeon /*
8912fb0330SPyun YongHyeon  * The GEM hardware support basic TCP/UDP checksum offloading. However,
9012fb0330SPyun YongHyeon  * the hardware doesn't compensate the checksum for UDP datagram which
9112fb0330SPyun YongHyeon  * can yield to 0x0. As a safe guard, UDP checksum offload is disabled
9212fb0330SPyun YongHyeon  * by default. It can be reactivated by setting special link option
9312fb0330SPyun YongHyeon  * link0 with ifconfig(8).
9412fb0330SPyun YongHyeon  */
9512fb0330SPyun YongHyeon #define	GEM_CSUM_FEATURES	(CSUM_TCP)
9642c1b001SThomas Moestl 
97e51a25f8SAlfred Perlstein static void	gem_start(struct ifnet *);
988cfaff7dSMarius Strobl static void	gem_start_locked(struct ifnet *);
99e51a25f8SAlfred Perlstein static void	gem_stop(struct ifnet *, int);
100e51a25f8SAlfred Perlstein static int	gem_ioctl(struct ifnet *, u_long, caddr_t);
101e51a25f8SAlfred Perlstein static void	gem_cddma_callback(void *, bus_dma_segment_t *, int, int);
10212fb0330SPyun YongHyeon static __inline void gem_txcksum(struct gem_softc *, struct mbuf *, uint64_t *);
10312fb0330SPyun YongHyeon static __inline void gem_rxcksum(struct mbuf *, uint64_t);
104e51a25f8SAlfred Perlstein static void	gem_tick(void *);
1058cb37876SMarius Strobl static int	gem_watchdog(struct gem_softc *);
106e51a25f8SAlfred Perlstein static void	gem_init(void *);
1078cb37876SMarius Strobl static void	gem_init_locked(struct gem_softc *);
1088cb37876SMarius Strobl static void	gem_init_regs(struct gem_softc *);
1091ed3fed7SMarius Strobl static u_int	gem_ringsize(u_int);
110e51a25f8SAlfred Perlstein static int	gem_meminit(struct gem_softc *);
11112fb0330SPyun YongHyeon static struct mbuf *gem_defrag(struct mbuf *, int, int);
11212fb0330SPyun YongHyeon static int	gem_load_txmbuf(struct gem_softc *, struct mbuf **);
113e51a25f8SAlfred Perlstein static void	gem_mifinit(struct gem_softc *);
1148cb37876SMarius Strobl static int	gem_bitwait(struct gem_softc *, bus_addr_t, u_int32_t,
1158cb37876SMarius Strobl     u_int32_t);
1161ed3fed7SMarius Strobl static void	gem_reset(struct gem_softc *);
117e51a25f8SAlfred Perlstein static int	gem_reset_rx(struct gem_softc *);
1181ed3fed7SMarius Strobl static void	gem_reset_rxdma(struct gem_softc *sc);
119e51a25f8SAlfred Perlstein static int	gem_reset_tx(struct gem_softc *);
120e51a25f8SAlfred Perlstein static int	gem_disable_rx(struct gem_softc *);
121e51a25f8SAlfred Perlstein static int	gem_disable_tx(struct gem_softc *);
122e51a25f8SAlfred Perlstein static void	gem_rxdrain(struct gem_softc *);
123e51a25f8SAlfred Perlstein static int	gem_add_rxbuf(struct gem_softc *, int);
124e51a25f8SAlfred Perlstein static void	gem_setladrf(struct gem_softc *);
12542c1b001SThomas Moestl 
126e51a25f8SAlfred Perlstein struct mbuf	*gem_get(struct gem_softc *, int, int);
127e51a25f8SAlfred Perlstein static void	gem_eint(struct gem_softc *, u_int);
128e51a25f8SAlfred Perlstein static void	gem_rint(struct gem_softc *);
129c3d5598aSMarius Strobl #ifdef GEM_RINT_TIMEOUT
1300d80b9bdSThomas Moestl static void	gem_rint_timeout(void *);
13111e3f060SJake Burkholder #endif
132e51a25f8SAlfred Perlstein static void	gem_tint(struct gem_softc *);
13342c1b001SThomas Moestl 
13442c1b001SThomas Moestl devclass_t gem_devclass;
13542c1b001SThomas Moestl DRIVER_MODULE(miibus, gem, miibus_driver, miibus_devclass, 0, 0);
13642c1b001SThomas Moestl MODULE_DEPEND(gem, miibus, 1, 1, 1);
13742c1b001SThomas Moestl 
13842c1b001SThomas Moestl #ifdef GEM_DEBUG
13942c1b001SThomas Moestl #include <sys/ktr.h>
14042c1b001SThomas Moestl #define	KTR_GEM		KTR_CT2
14142c1b001SThomas Moestl #endif
14242c1b001SThomas Moestl 
14318100346SThomas Moestl #define	GEM_NSEGS GEM_NTXDESC
14442c1b001SThomas Moestl 
14542c1b001SThomas Moestl /*
14642c1b001SThomas Moestl  * gem_attach:
14742c1b001SThomas Moestl  *
14842c1b001SThomas Moestl  *	Attach a Gem interface to the system.
14942c1b001SThomas Moestl  */
15042c1b001SThomas Moestl int
15142c1b001SThomas Moestl gem_attach(sc)
15242c1b001SThomas Moestl 	struct gem_softc *sc;
15342c1b001SThomas Moestl {
154fc74a9f9SBrooks Davis 	struct ifnet *ifp;
15542c1b001SThomas Moestl 	int i, error;
156336cca9eSBenno Rice 	u_int32_t v;
15742c1b001SThomas Moestl 
158fc74a9f9SBrooks Davis 	ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
159fc74a9f9SBrooks Davis 	if (ifp == NULL)
160fc74a9f9SBrooks Davis 		return (ENOSPC);
161fc74a9f9SBrooks Davis 
1621f317bf9SMarius Strobl 	callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0);
1631f317bf9SMarius Strobl #ifdef GEM_RINT_TIMEOUT
1641f317bf9SMarius Strobl 	callout_init_mtx(&sc->sc_rx_ch, &sc->sc_mtx, 0);
1651f317bf9SMarius Strobl #endif
1661f317bf9SMarius Strobl 
16742c1b001SThomas Moestl 	/* Make sure the chip is stopped. */
16842c1b001SThomas Moestl 	ifp->if_softc = sc;
16942c1b001SThomas Moestl 	gem_reset(sc);
17042c1b001SThomas Moestl 
171378f231eSJohn-Mark Gurney 	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
172378f231eSJohn-Mark Gurney 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
17312fb0330SPyun YongHyeon 	    BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL,
174378f231eSJohn-Mark Gurney 	    &sc->sc_pdmatag);
17542c1b001SThomas Moestl 	if (error)
176fc74a9f9SBrooks Davis 		goto fail_ifnet;
17742c1b001SThomas Moestl 
17842c1b001SThomas Moestl 	error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0,
17912fb0330SPyun YongHyeon 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
18012fb0330SPyun YongHyeon 	    1, MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_rdmatag);
18142c1b001SThomas Moestl 	if (error)
182305f2c06SThomas Moestl 		goto fail_ptag;
183305f2c06SThomas Moestl 
184305f2c06SThomas Moestl 	error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0,
18512fb0330SPyun YongHyeon 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
18612fb0330SPyun YongHyeon 	    MCLBYTES * GEM_NTXSEGS, GEM_NTXSEGS, MCLBYTES,
187f6b1c44dSScott Long 	    BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_tdmatag);
188305f2c06SThomas Moestl 	if (error)
189305f2c06SThomas Moestl 		goto fail_rtag;
19042c1b001SThomas Moestl 
19142c1b001SThomas Moestl 	error = bus_dma_tag_create(sc->sc_pdmatag, PAGE_SIZE, 0,
19212fb0330SPyun YongHyeon 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
19342c1b001SThomas Moestl 	    sizeof(struct gem_control_data), 1,
19412fb0330SPyun YongHyeon 	    sizeof(struct gem_control_data), 0,
19512fb0330SPyun YongHyeon 	    NULL, NULL, &sc->sc_cdmatag);
19642c1b001SThomas Moestl 	if (error)
197305f2c06SThomas Moestl 		goto fail_ttag;
19842c1b001SThomas Moestl 
19942c1b001SThomas Moestl 	/*
20042c1b001SThomas Moestl 	 * Allocate the control data structures, and create and load the
20142c1b001SThomas Moestl 	 * DMA map for it.
20242c1b001SThomas Moestl 	 */
20342c1b001SThomas Moestl 	if ((error = bus_dmamem_alloc(sc->sc_cdmatag,
20412fb0330SPyun YongHyeon 	    (void **)&sc->sc_control_data,
20512fb0330SPyun YongHyeon 	    BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
20612fb0330SPyun YongHyeon 	    &sc->sc_cddmamap))) {
20742c1b001SThomas Moestl 		device_printf(sc->sc_dev, "unable to allocate control data,"
20842c1b001SThomas Moestl 		    " error = %d\n", error);
209305f2c06SThomas Moestl 		goto fail_ctag;
21042c1b001SThomas Moestl 	}
21142c1b001SThomas Moestl 
21242c1b001SThomas Moestl 	sc->sc_cddma = 0;
21342c1b001SThomas Moestl 	if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cddmamap,
21442c1b001SThomas Moestl 	    sc->sc_control_data, sizeof(struct gem_control_data),
21542c1b001SThomas Moestl 	    gem_cddma_callback, sc, 0)) != 0 || sc->sc_cddma == 0) {
21642c1b001SThomas Moestl 		device_printf(sc->sc_dev, "unable to load control data DMA "
21742c1b001SThomas Moestl 		    "map, error = %d\n", error);
218305f2c06SThomas Moestl 		goto fail_cmem;
21942c1b001SThomas Moestl 	}
22042c1b001SThomas Moestl 
22142c1b001SThomas Moestl 	/*
22242c1b001SThomas Moestl 	 * Initialize the transmit job descriptors.
22342c1b001SThomas Moestl 	 */
22442c1b001SThomas Moestl 	STAILQ_INIT(&sc->sc_txfreeq);
22542c1b001SThomas Moestl 	STAILQ_INIT(&sc->sc_txdirtyq);
22642c1b001SThomas Moestl 
22742c1b001SThomas Moestl 	/*
22842c1b001SThomas Moestl 	 * Create the transmit buffer DMA maps.
22942c1b001SThomas Moestl 	 */
23042c1b001SThomas Moestl 	error = ENOMEM;
23142c1b001SThomas Moestl 	for (i = 0; i < GEM_TXQUEUELEN; i++) {
23242c1b001SThomas Moestl 		struct gem_txsoft *txs;
23342c1b001SThomas Moestl 
23442c1b001SThomas Moestl 		txs = &sc->sc_txsoft[i];
23542c1b001SThomas Moestl 		txs->txs_mbuf = NULL;
23642c1b001SThomas Moestl 		txs->txs_ndescs = 0;
237305f2c06SThomas Moestl 		if ((error = bus_dmamap_create(sc->sc_tdmatag, 0,
23842c1b001SThomas Moestl 		    &txs->txs_dmamap)) != 0) {
23942c1b001SThomas Moestl 			device_printf(sc->sc_dev, "unable to create tx DMA map "
24042c1b001SThomas Moestl 			    "%d, error = %d\n", i, error);
241305f2c06SThomas Moestl 			goto fail_txd;
24242c1b001SThomas Moestl 		}
24342c1b001SThomas Moestl 		STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
24442c1b001SThomas Moestl 	}
24542c1b001SThomas Moestl 
24642c1b001SThomas Moestl 	/*
24742c1b001SThomas Moestl 	 * Create the receive buffer DMA maps.
24842c1b001SThomas Moestl 	 */
24942c1b001SThomas Moestl 	for (i = 0; i < GEM_NRXDESC; i++) {
250305f2c06SThomas Moestl 		if ((error = bus_dmamap_create(sc->sc_rdmatag, 0,
25142c1b001SThomas Moestl 		    &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
25242c1b001SThomas Moestl 			device_printf(sc->sc_dev, "unable to create rx DMA map "
25342c1b001SThomas Moestl 			    "%d, error = %d\n", i, error);
254305f2c06SThomas Moestl 			goto fail_rxd;
25542c1b001SThomas Moestl 		}
25642c1b001SThomas Moestl 		sc->sc_rxsoft[i].rxs_mbuf = NULL;
25742c1b001SThomas Moestl 	}
25842c1b001SThomas Moestl 
2591ed3fed7SMarius Strobl 	/* Bad things will happen when touching this register on ERI. */
2601ed3fed7SMarius Strobl 	if (sc->sc_variant != GEM_SUN_ERI)
2611ed3fed7SMarius Strobl 		bus_write_4(sc->sc_res[0], GEM_MII_DATAPATH_MODE,
2621ed3fed7SMarius Strobl 		    GEM_MII_DATAPATH_MII);
2631ed3fed7SMarius Strobl 
26442c1b001SThomas Moestl 	gem_mifinit(sc);
26542c1b001SThomas Moestl 
2661ed3fed7SMarius Strobl 	/*
2671ed3fed7SMarius Strobl 	 * Look for an external PHY.
2681ed3fed7SMarius Strobl 	 */
2691ed3fed7SMarius Strobl 	error = ENXIO;
2701ed3fed7SMarius Strobl 	v = bus_read_4(sc->sc_res[0], GEM_MIF_CONFIG);
2711ed3fed7SMarius Strobl 	if ((v & GEM_MIF_CONFIG_MDI1) != 0) {
2721ed3fed7SMarius Strobl 		v |= GEM_MIF_CONFIG_PHY_SEL;
2731ed3fed7SMarius Strobl 		bus_write_4(sc->sc_res[0], GEM_MIF_CONFIG, v);
2741ed3fed7SMarius Strobl 		switch (sc->sc_variant) {
2751ed3fed7SMarius Strobl 		case GEM_SUN_ERI:
2761ed3fed7SMarius Strobl 			sc->sc_phyad = GEM_PHYAD_EXTERNAL;
2771ed3fed7SMarius Strobl 			break;
2781ed3fed7SMarius Strobl 		default:
2791ed3fed7SMarius Strobl 			sc->sc_phyad = -1;
2801ed3fed7SMarius Strobl 			break;
2811ed3fed7SMarius Strobl 		}
2821ed3fed7SMarius Strobl 		error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus,
2831ed3fed7SMarius Strobl 		    gem_mediachange, gem_mediastatus);
2841ed3fed7SMarius Strobl 	}
2851ed3fed7SMarius Strobl 
2861ed3fed7SMarius Strobl 	/*
2871ed3fed7SMarius Strobl 	 * Fall back on an internal PHY if no external PHY was found.
2881ed3fed7SMarius Strobl 	 */
2891ed3fed7SMarius Strobl 	if (error != 0 && (v & GEM_MIF_CONFIG_MDI0) != 0) {
2901ed3fed7SMarius Strobl 		v &= ~GEM_MIF_CONFIG_PHY_SEL;
2911ed3fed7SMarius Strobl 		bus_write_4(sc->sc_res[0], GEM_MIF_CONFIG, v);
2921ed3fed7SMarius Strobl 		switch (sc->sc_variant) {
2931ed3fed7SMarius Strobl 		case GEM_SUN_ERI:
2941ed3fed7SMarius Strobl 		case GEM_APPLE_K2_GMAC:
2951ed3fed7SMarius Strobl 			sc->sc_phyad = GEM_PHYAD_INTERNAL;
2961ed3fed7SMarius Strobl 			break;
2971ed3fed7SMarius Strobl 		case GEM_APPLE_GMAC:
2981ed3fed7SMarius Strobl 			sc->sc_phyad = GEM_PHYAD_EXTERNAL;
2991ed3fed7SMarius Strobl 			break;
3001ed3fed7SMarius Strobl 		default:
3011ed3fed7SMarius Strobl 			sc->sc_phyad = -1;
3021ed3fed7SMarius Strobl 			break;
3031ed3fed7SMarius Strobl 		}
3041ed3fed7SMarius Strobl 		error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus,
3051ed3fed7SMarius Strobl 		    gem_mediachange, gem_mediastatus);
3061ed3fed7SMarius Strobl 	}
3071ed3fed7SMarius Strobl 
3081ed3fed7SMarius Strobl 	/*
3091ed3fed7SMarius Strobl 	 * Try the external PCS SERDES if we didn't find any PHYs.
3101ed3fed7SMarius Strobl 	 */
3111ed3fed7SMarius Strobl 	if (error != 0 && sc->sc_variant == GEM_SUN_GEM) {
3121ed3fed7SMarius Strobl 		bus_write_4(sc->sc_res[0], GEM_MII_DATAPATH_MODE,
3131ed3fed7SMarius Strobl 		    GEM_MII_DATAPATH_SERDES);
3141ed3fed7SMarius Strobl 		bus_write_4(sc->sc_res[0], GEM_MII_SLINK_CONTROL,
3151ed3fed7SMarius Strobl 		    GEM_MII_SLINK_LOOPBACK | GEM_MII_SLINK_EN_SYNC_D);
3161ed3fed7SMarius Strobl 		bus_write_4(sc->sc_res[0], GEM_MII_CONFIG,
3171ed3fed7SMarius Strobl 		    GEM_MII_CONFIG_ENABLE);
3181ed3fed7SMarius Strobl 		sc->sc_flags |= GEM_SERDES;
3191ed3fed7SMarius Strobl 		sc->sc_phyad = GEM_PHYAD_EXTERNAL;
3201ed3fed7SMarius Strobl 		error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus,
3211ed3fed7SMarius Strobl 		    gem_mediachange, gem_mediastatus);
3221ed3fed7SMarius Strobl 	}
3231ed3fed7SMarius Strobl 
3241ed3fed7SMarius Strobl 	if (error != 0) {
3251ed3fed7SMarius Strobl 		device_printf(sc->sc_dev, "PHY probe failed: %d\n", error);
326305f2c06SThomas Moestl 		goto fail_rxd;
32742c1b001SThomas Moestl 	}
32842c1b001SThomas Moestl 	sc->sc_mii = device_get_softc(sc->sc_miibus);
32942c1b001SThomas Moestl 
33042c1b001SThomas Moestl 	/*
33142c1b001SThomas Moestl 	 * From this point forward, the attachment cannot fail.  A failure
33242c1b001SThomas Moestl 	 * before this point releases all resources that may have been
33342c1b001SThomas Moestl 	 * allocated.
33442c1b001SThomas Moestl 	 */
33542c1b001SThomas Moestl 
336336cca9eSBenno Rice 	/* Get RX FIFO size */
337336cca9eSBenno Rice 	sc->sc_rxfifosize = 64 *
338e1bb13cdSPoul-Henning Kamp 	    bus_read_4(sc->sc_res[0], GEM_RX_FIFO_SIZE);
339336cca9eSBenno Rice 
340336cca9eSBenno Rice 	/* Get TX FIFO size */
341e1bb13cdSPoul-Henning Kamp 	v = bus_read_4(sc->sc_res[0], GEM_TX_FIFO_SIZE);
3423a5aee5aSThomas Moestl 	device_printf(sc->sc_dev, "%ukB RX FIFO, %ukB TX FIFO\n",
3433a5aee5aSThomas Moestl 	    sc->sc_rxfifosize / 1024, v / 16);
34442c1b001SThomas Moestl 
34512fb0330SPyun YongHyeon 	sc->sc_csum_features = GEM_CSUM_FEATURES;
34642c1b001SThomas Moestl 	/* Initialize ifnet structure. */
34742c1b001SThomas Moestl 	ifp->if_softc = sc;
3489bf40edeSBrooks Davis 	if_initname(ifp, device_get_name(sc->sc_dev),
3499bf40edeSBrooks Davis 	    device_get_unit(sc->sc_dev));
3508cfaff7dSMarius Strobl 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
35142c1b001SThomas Moestl 	ifp->if_start = gem_start;
35242c1b001SThomas Moestl 	ifp->if_ioctl = gem_ioctl;
35342c1b001SThomas Moestl 	ifp->if_init = gem_init;
35412fb0330SPyun YongHyeon 	IFQ_SET_MAXLEN(&ifp->if_snd, GEM_TXQUEUELEN);
35512fb0330SPyun YongHyeon 	ifp->if_snd.ifq_drv_maxlen = GEM_TXQUEUELEN;
35612fb0330SPyun YongHyeon 	IFQ_SET_READY(&ifp->if_snd);
35742c1b001SThomas Moestl 
35842c1b001SThomas Moestl 	/* Attach the interface. */
359fc74a9f9SBrooks Davis 	ether_ifattach(ifp, sc->sc_enaddr);
36042c1b001SThomas Moestl 
36100d12766SMarius Strobl 	/*
36212fb0330SPyun YongHyeon 	 * Tell the upper layer(s) we support long frames/checksum offloads.
36300d12766SMarius Strobl 	 */
36400d12766SMarius Strobl 	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
36512fb0330SPyun YongHyeon 	ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_HWCSUM;
36612fb0330SPyun YongHyeon 	ifp->if_hwassist |= sc->sc_csum_features;
36712fb0330SPyun YongHyeon 	ifp->if_capenable |= IFCAP_VLAN_MTU | IFCAP_HWCSUM;
36800d12766SMarius Strobl 
36942c1b001SThomas Moestl 	return (0);
37042c1b001SThomas Moestl 
37142c1b001SThomas Moestl 	/*
37242c1b001SThomas Moestl 	 * Free any resources we've allocated during the failed attach
37342c1b001SThomas Moestl 	 * attempt.  Do this in reverse order and fall through.
37442c1b001SThomas Moestl 	 */
375305f2c06SThomas Moestl fail_rxd:
37642c1b001SThomas Moestl 	for (i = 0; i < GEM_NRXDESC; i++) {
37742c1b001SThomas Moestl 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
378305f2c06SThomas Moestl 			bus_dmamap_destroy(sc->sc_rdmatag,
37942c1b001SThomas Moestl 			    sc->sc_rxsoft[i].rxs_dmamap);
38042c1b001SThomas Moestl 	}
381305f2c06SThomas Moestl fail_txd:
38242c1b001SThomas Moestl 	for (i = 0; i < GEM_TXQUEUELEN; i++) {
38342c1b001SThomas Moestl 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
384305f2c06SThomas Moestl 			bus_dmamap_destroy(sc->sc_tdmatag,
38542c1b001SThomas Moestl 			    sc->sc_txsoft[i].txs_dmamap);
38642c1b001SThomas Moestl 	}
387305f2c06SThomas Moestl 	bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap);
388305f2c06SThomas Moestl fail_cmem:
38942c1b001SThomas Moestl 	bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data,
39042c1b001SThomas Moestl 	    sc->sc_cddmamap);
391305f2c06SThomas Moestl fail_ctag:
39242c1b001SThomas Moestl 	bus_dma_tag_destroy(sc->sc_cdmatag);
393305f2c06SThomas Moestl fail_ttag:
394305f2c06SThomas Moestl 	bus_dma_tag_destroy(sc->sc_tdmatag);
395305f2c06SThomas Moestl fail_rtag:
396305f2c06SThomas Moestl 	bus_dma_tag_destroy(sc->sc_rdmatag);
397305f2c06SThomas Moestl fail_ptag:
39842c1b001SThomas Moestl 	bus_dma_tag_destroy(sc->sc_pdmatag);
399fc74a9f9SBrooks Davis fail_ifnet:
400fc74a9f9SBrooks Davis 	if_free(ifp);
40142c1b001SThomas Moestl 	return (error);
40242c1b001SThomas Moestl }
40342c1b001SThomas Moestl 
404cbbdf236SThomas Moestl void
405cbbdf236SThomas Moestl gem_detach(sc)
406cbbdf236SThomas Moestl 	struct gem_softc *sc;
407cbbdf236SThomas Moestl {
408fc74a9f9SBrooks Davis 	struct ifnet *ifp = sc->sc_ifp;
409cbbdf236SThomas Moestl 	int i;
410cbbdf236SThomas Moestl 
4118cfaff7dSMarius Strobl 	GEM_LOCK(sc);
41225bd46d0SBrooks Davis 	gem_stop(ifp, 1);
4138cfaff7dSMarius Strobl 	GEM_UNLOCK(sc);
4141f317bf9SMarius Strobl 	callout_drain(&sc->sc_tick_ch);
4151f317bf9SMarius Strobl #ifdef GEM_RINT_TIMEOUT
4161f317bf9SMarius Strobl 	callout_drain(&sc->sc_rx_ch);
4171f317bf9SMarius Strobl #endif
418cbbdf236SThomas Moestl 	ether_ifdetach(ifp);
419fc74a9f9SBrooks Davis 	if_free(ifp);
420cbbdf236SThomas Moestl 	device_delete_child(sc->sc_dev, sc->sc_miibus);
421cbbdf236SThomas Moestl 
422cbbdf236SThomas Moestl 	for (i = 0; i < GEM_NRXDESC; i++) {
423cbbdf236SThomas Moestl 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
424cbbdf236SThomas Moestl 			bus_dmamap_destroy(sc->sc_rdmatag,
425cbbdf236SThomas Moestl 			    sc->sc_rxsoft[i].rxs_dmamap);
426cbbdf236SThomas Moestl 	}
427cbbdf236SThomas Moestl 	for (i = 0; i < GEM_TXQUEUELEN; i++) {
428cbbdf236SThomas Moestl 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
429cbbdf236SThomas Moestl 			bus_dmamap_destroy(sc->sc_tdmatag,
430cbbdf236SThomas Moestl 			    sc->sc_txsoft[i].txs_dmamap);
431cbbdf236SThomas Moestl 	}
432b2d59f42SThomas Moestl 	GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD);
433b2d59f42SThomas Moestl 	GEM_CDSYNC(sc, BUS_DMASYNC_POSTWRITE);
434cbbdf236SThomas Moestl 	bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap);
435cbbdf236SThomas Moestl 	bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data,
436cbbdf236SThomas Moestl 	    sc->sc_cddmamap);
437cbbdf236SThomas Moestl 	bus_dma_tag_destroy(sc->sc_cdmatag);
438cbbdf236SThomas Moestl 	bus_dma_tag_destroy(sc->sc_tdmatag);
439cbbdf236SThomas Moestl 	bus_dma_tag_destroy(sc->sc_rdmatag);
440cbbdf236SThomas Moestl 	bus_dma_tag_destroy(sc->sc_pdmatag);
441cbbdf236SThomas Moestl }
442cbbdf236SThomas Moestl 
443cbbdf236SThomas Moestl void
444cbbdf236SThomas Moestl gem_suspend(sc)
445cbbdf236SThomas Moestl 	struct gem_softc *sc;
446cbbdf236SThomas Moestl {
447fc74a9f9SBrooks Davis 	struct ifnet *ifp = sc->sc_ifp;
448cbbdf236SThomas Moestl 
4498cfaff7dSMarius Strobl 	GEM_LOCK(sc);
450cbbdf236SThomas Moestl 	gem_stop(ifp, 0);
4518cfaff7dSMarius Strobl 	GEM_UNLOCK(sc);
452cbbdf236SThomas Moestl }
453cbbdf236SThomas Moestl 
454cbbdf236SThomas Moestl void
455cbbdf236SThomas Moestl gem_resume(sc)
456cbbdf236SThomas Moestl 	struct gem_softc *sc;
457cbbdf236SThomas Moestl {
458fc74a9f9SBrooks Davis 	struct ifnet *ifp = sc->sc_ifp;
459cbbdf236SThomas Moestl 
4608cfaff7dSMarius Strobl 	GEM_LOCK(sc);
46100d12766SMarius Strobl 	/*
46200d12766SMarius Strobl 	 * On resume all registers have to be initialized again like
46300d12766SMarius Strobl 	 * after power-on.
46400d12766SMarius Strobl 	 */
4651ed3fed7SMarius Strobl 	sc->sc_flags &= ~GEM_INITED;
466cbbdf236SThomas Moestl 	if (ifp->if_flags & IFF_UP)
4678cfaff7dSMarius Strobl 		gem_init_locked(sc);
4688cfaff7dSMarius Strobl 	GEM_UNLOCK(sc);
469cbbdf236SThomas Moestl }
470cbbdf236SThomas Moestl 
47112fb0330SPyun YongHyeon static __inline void
47212fb0330SPyun YongHyeon gem_txcksum(struct gem_softc *sc, struct mbuf *m, uint64_t *cflags)
47312fb0330SPyun YongHyeon {
47412fb0330SPyun YongHyeon 	struct ip *ip;
47512fb0330SPyun YongHyeon 	uint64_t offset, offset2;
47612fb0330SPyun YongHyeon 	char *p;
47712fb0330SPyun YongHyeon 
47812fb0330SPyun YongHyeon 	offset = sizeof(struct ip) + ETHER_HDR_LEN;
47912fb0330SPyun YongHyeon 	for(; m && m->m_len == 0; m = m->m_next)
48012fb0330SPyun YongHyeon 		;
48112fb0330SPyun YongHyeon 	if (m == NULL || m->m_len < ETHER_HDR_LEN) {
48212fb0330SPyun YongHyeon 		device_printf(sc->sc_dev, "%s: m_len < ETHER_HDR_LEN\n",
48312fb0330SPyun YongHyeon 		    __func__);
48412fb0330SPyun YongHyeon 		/* checksum will be corrupted */
48512fb0330SPyun YongHyeon 		goto sendit;
48612fb0330SPyun YongHyeon 	}
48712fb0330SPyun YongHyeon 	if (m->m_len < ETHER_HDR_LEN + sizeof(uint32_t)) {
48812fb0330SPyun YongHyeon 		if (m->m_len != ETHER_HDR_LEN) {
48912fb0330SPyun YongHyeon 			device_printf(sc->sc_dev,
49012fb0330SPyun YongHyeon 			    "%s: m_len != ETHER_HDR_LEN\n", __func__);
49112fb0330SPyun YongHyeon 			/* checksum will be corrupted */
49212fb0330SPyun YongHyeon 			goto sendit;
49312fb0330SPyun YongHyeon 		}
49412fb0330SPyun YongHyeon 		for(m = m->m_next; m && m->m_len == 0; m = m->m_next)
49512fb0330SPyun YongHyeon 			;
49612fb0330SPyun YongHyeon 		if (m == NULL) {
49712fb0330SPyun YongHyeon 			/* checksum will be corrupted */
49812fb0330SPyun YongHyeon 			goto sendit;
49912fb0330SPyun YongHyeon 		}
50012fb0330SPyun YongHyeon 		ip = mtod(m, struct ip *);
50112fb0330SPyun YongHyeon 	} else {
50212fb0330SPyun YongHyeon 		p = mtod(m, uint8_t *);
50312fb0330SPyun YongHyeon 		p += ETHER_HDR_LEN;
50412fb0330SPyun YongHyeon 		ip = (struct ip *)p;
50512fb0330SPyun YongHyeon 	}
50612fb0330SPyun YongHyeon 	offset = (ip->ip_hl << 2) + ETHER_HDR_LEN;
50712fb0330SPyun YongHyeon 
50812fb0330SPyun YongHyeon sendit:
50912fb0330SPyun YongHyeon 	offset2 = m->m_pkthdr.csum_data;
51012fb0330SPyun YongHyeon 	*cflags = offset << GEM_TD_CXSUM_STARTSHFT;
51112fb0330SPyun YongHyeon 	*cflags |= ((offset + offset2) << GEM_TD_CXSUM_STUFFSHFT);
51212fb0330SPyun YongHyeon 	*cflags |= GEM_TD_CXSUM_ENABLE;
51312fb0330SPyun YongHyeon }
51412fb0330SPyun YongHyeon 
51512fb0330SPyun YongHyeon static __inline void
51612fb0330SPyun YongHyeon gem_rxcksum(struct mbuf *m, uint64_t flags)
51712fb0330SPyun YongHyeon {
51812fb0330SPyun YongHyeon 	struct ether_header *eh;
51912fb0330SPyun YongHyeon 	struct ip *ip;
52012fb0330SPyun YongHyeon 	struct udphdr *uh;
52112fb0330SPyun YongHyeon 	int32_t hlen, len, pktlen;
52212fb0330SPyun YongHyeon 	uint16_t cksum, *opts;
52312fb0330SPyun YongHyeon 	uint32_t temp32;
52412fb0330SPyun YongHyeon 
52512fb0330SPyun YongHyeon 	pktlen = m->m_pkthdr.len;
52612fb0330SPyun YongHyeon 	if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
52712fb0330SPyun YongHyeon 		return;
52812fb0330SPyun YongHyeon 	eh = mtod(m, struct ether_header *);
52912fb0330SPyun YongHyeon 	if (eh->ether_type != htons(ETHERTYPE_IP))
53012fb0330SPyun YongHyeon 		return;
53112fb0330SPyun YongHyeon 	ip = (struct ip *)(eh + 1);
53212fb0330SPyun YongHyeon 	if (ip->ip_v != IPVERSION)
53312fb0330SPyun YongHyeon 		return;
53412fb0330SPyun YongHyeon 
53512fb0330SPyun YongHyeon 	hlen = ip->ip_hl << 2;
53612fb0330SPyun YongHyeon 	pktlen -= sizeof(struct ether_header);
53712fb0330SPyun YongHyeon 	if (hlen < sizeof(struct ip))
53812fb0330SPyun YongHyeon 		return;
53912fb0330SPyun YongHyeon 	if (ntohs(ip->ip_len) < hlen)
54012fb0330SPyun YongHyeon 		return;
54112fb0330SPyun YongHyeon 	if (ntohs(ip->ip_len) != pktlen)
54212fb0330SPyun YongHyeon 		return;
54312fb0330SPyun YongHyeon 	if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
54412fb0330SPyun YongHyeon 		return;	/* can't handle fragmented packet */
54512fb0330SPyun YongHyeon 
54612fb0330SPyun YongHyeon 	switch (ip->ip_p) {
54712fb0330SPyun YongHyeon 	case IPPROTO_TCP:
54812fb0330SPyun YongHyeon 		if (pktlen < (hlen + sizeof(struct tcphdr)))
54912fb0330SPyun YongHyeon 			return;
55012fb0330SPyun YongHyeon 		break;
55112fb0330SPyun YongHyeon 	case IPPROTO_UDP:
55212fb0330SPyun YongHyeon 		if (pktlen < (hlen + sizeof(struct udphdr)))
55312fb0330SPyun YongHyeon 			return;
55412fb0330SPyun YongHyeon 		uh = (struct udphdr *)((uint8_t *)ip + hlen);
55512fb0330SPyun YongHyeon 		if (uh->uh_sum == 0)
55612fb0330SPyun YongHyeon 			return; /* no checksum */
55712fb0330SPyun YongHyeon 		break;
55812fb0330SPyun YongHyeon 	default:
55912fb0330SPyun YongHyeon 		return;
56012fb0330SPyun YongHyeon 	}
56112fb0330SPyun YongHyeon 
56212fb0330SPyun YongHyeon 	cksum = ~(flags & GEM_RD_CHECKSUM);
56312fb0330SPyun YongHyeon 	/* checksum fixup for IP options */
56412fb0330SPyun YongHyeon 	len = hlen - sizeof(struct ip);
56512fb0330SPyun YongHyeon 	if (len > 0) {
56612fb0330SPyun YongHyeon 		opts = (uint16_t *)(ip + 1);
56712fb0330SPyun YongHyeon 		for (; len > 0; len -= sizeof(uint16_t), opts++) {
56812fb0330SPyun YongHyeon 			temp32 = cksum - *opts;
56912fb0330SPyun YongHyeon 			temp32 = (temp32 >> 16) + (temp32 & 65535);
57012fb0330SPyun YongHyeon 			cksum = temp32 & 65535;
57112fb0330SPyun YongHyeon 		}
57212fb0330SPyun YongHyeon 	}
57312fb0330SPyun YongHyeon 	m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
57412fb0330SPyun YongHyeon 	m->m_pkthdr.csum_data = cksum;
57512fb0330SPyun YongHyeon }
57612fb0330SPyun YongHyeon 
57742c1b001SThomas Moestl static void
57842c1b001SThomas Moestl gem_cddma_callback(xsc, segs, nsegs, error)
57942c1b001SThomas Moestl 	void *xsc;
58042c1b001SThomas Moestl 	bus_dma_segment_t *segs;
58142c1b001SThomas Moestl 	int nsegs;
58242c1b001SThomas Moestl 	int error;
58342c1b001SThomas Moestl {
58442c1b001SThomas Moestl 	struct gem_softc *sc = (struct gem_softc *)xsc;
58542c1b001SThomas Moestl 
58642c1b001SThomas Moestl 	if (error != 0)
58742c1b001SThomas Moestl 		return;
58842c1b001SThomas Moestl 	if (nsegs != 1) {
58942c1b001SThomas Moestl 		/* can't happen... */
5901ed3fed7SMarius Strobl 		panic("%s: bad control buffer segment count", __func__);
59142c1b001SThomas Moestl 	}
59242c1b001SThomas Moestl 	sc->sc_cddma = segs[0].ds_addr;
59342c1b001SThomas Moestl }
59442c1b001SThomas Moestl 
59542c1b001SThomas Moestl static void
59642c1b001SThomas Moestl gem_tick(arg)
59742c1b001SThomas Moestl 	void *arg;
59842c1b001SThomas Moestl {
59942c1b001SThomas Moestl 	struct gem_softc *sc = arg;
60012fb0330SPyun YongHyeon 	struct ifnet *ifp;
60142c1b001SThomas Moestl 
6021f317bf9SMarius Strobl 	GEM_LOCK_ASSERT(sc, MA_OWNED);
60312fb0330SPyun YongHyeon 
60412fb0330SPyun YongHyeon 	ifp = sc->sc_ifp;
60512fb0330SPyun YongHyeon 	/*
60612fb0330SPyun YongHyeon 	 * Unload collision counters
60712fb0330SPyun YongHyeon 	 */
60812fb0330SPyun YongHyeon 	ifp->if_collisions +=
60912fb0330SPyun YongHyeon 	    bus_read_4(sc->sc_res[0], GEM_MAC_NORM_COLL_CNT) +
61012fb0330SPyun YongHyeon 	    bus_read_4(sc->sc_res[0], GEM_MAC_FIRST_COLL_CNT) +
61112fb0330SPyun YongHyeon 	    bus_read_4(sc->sc_res[0], GEM_MAC_EXCESS_COLL_CNT) +
61212fb0330SPyun YongHyeon 	    bus_read_4(sc->sc_res[0], GEM_MAC_LATE_COLL_CNT);
61312fb0330SPyun YongHyeon 
61412fb0330SPyun YongHyeon 	/*
61512fb0330SPyun YongHyeon 	 * then clear the hardware counters.
61612fb0330SPyun YongHyeon 	 */
61712fb0330SPyun YongHyeon 	bus_write_4(sc->sc_res[0], GEM_MAC_NORM_COLL_CNT, 0);
61812fb0330SPyun YongHyeon 	bus_write_4(sc->sc_res[0], GEM_MAC_FIRST_COLL_CNT, 0);
61912fb0330SPyun YongHyeon 	bus_write_4(sc->sc_res[0], GEM_MAC_EXCESS_COLL_CNT, 0);
62012fb0330SPyun YongHyeon 	bus_write_4(sc->sc_res[0], GEM_MAC_LATE_COLL_CNT, 0);
62112fb0330SPyun YongHyeon 
62242c1b001SThomas Moestl 	mii_tick(sc->sc_mii);
62342c1b001SThomas Moestl 
6248cb37876SMarius Strobl 	if (gem_watchdog(sc) == EJUSTRETURN)
6258cb37876SMarius Strobl 		return;
6268cb37876SMarius Strobl 
62742c1b001SThomas Moestl 	callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc);
62842c1b001SThomas Moestl }
62942c1b001SThomas Moestl 
63042c1b001SThomas Moestl static int
63142c1b001SThomas Moestl gem_bitwait(sc, r, clr, set)
63242c1b001SThomas Moestl 	struct gem_softc *sc;
63342c1b001SThomas Moestl 	bus_addr_t r;
63442c1b001SThomas Moestl 	u_int32_t clr;
63542c1b001SThomas Moestl 	u_int32_t set;
63642c1b001SThomas Moestl {
63742c1b001SThomas Moestl 	int i;
63842c1b001SThomas Moestl 	u_int32_t reg;
63942c1b001SThomas Moestl 
64042c1b001SThomas Moestl 	for (i = TRIES; i--; DELAY(100)) {
641e1bb13cdSPoul-Henning Kamp 		reg = bus_read_4(sc->sc_res[0], r);
642e87137e1SMarius Strobl 		if ((reg & clr) == 0 && (reg & set) == set)
64342c1b001SThomas Moestl 			return (1);
64442c1b001SThomas Moestl 	}
64542c1b001SThomas Moestl 	return (0);
64642c1b001SThomas Moestl }
64742c1b001SThomas Moestl 
6481ed3fed7SMarius Strobl static void
64942c1b001SThomas Moestl gem_reset(sc)
65042c1b001SThomas Moestl 	struct gem_softc *sc;
65142c1b001SThomas Moestl {
65242c1b001SThomas Moestl 
65318100346SThomas Moestl #ifdef GEM_DEBUG
65412fb0330SPyun YongHyeon 	CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__);
65518100346SThomas Moestl #endif
65642c1b001SThomas Moestl 	gem_reset_rx(sc);
65742c1b001SThomas Moestl 	gem_reset_tx(sc);
65842c1b001SThomas Moestl 
65942c1b001SThomas Moestl 	/* Do a full reset */
660e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_RESET, GEM_RESET_RX | GEM_RESET_TX);
6611ed3fed7SMarius Strobl 	bus_barrier(sc->sc_res[0], GEM_RESET, 4, BUS_SPACE_BARRIER_WRITE);
66242c1b001SThomas Moestl 	if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0))
66342c1b001SThomas Moestl 		device_printf(sc->sc_dev, "cannot reset device\n");
66442c1b001SThomas Moestl }
66542c1b001SThomas Moestl 
66642c1b001SThomas Moestl /*
66742c1b001SThomas Moestl  * gem_rxdrain:
66842c1b001SThomas Moestl  *
66942c1b001SThomas Moestl  *	Drain the receive queue.
67042c1b001SThomas Moestl  */
67142c1b001SThomas Moestl static void
67242c1b001SThomas Moestl gem_rxdrain(sc)
67342c1b001SThomas Moestl 	struct gem_softc *sc;
67442c1b001SThomas Moestl {
67542c1b001SThomas Moestl 	struct gem_rxsoft *rxs;
67642c1b001SThomas Moestl 	int i;
67742c1b001SThomas Moestl 
67842c1b001SThomas Moestl 	for (i = 0; i < GEM_NRXDESC; i++) {
67942c1b001SThomas Moestl 		rxs = &sc->sc_rxsoft[i];
68042c1b001SThomas Moestl 		if (rxs->rxs_mbuf != NULL) {
681b2d59f42SThomas Moestl 			bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap,
682b2d59f42SThomas Moestl 			    BUS_DMASYNC_POSTREAD);
683305f2c06SThomas Moestl 			bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap);
68442c1b001SThomas Moestl 			m_freem(rxs->rxs_mbuf);
68542c1b001SThomas Moestl 			rxs->rxs_mbuf = NULL;
68642c1b001SThomas Moestl 		}
68742c1b001SThomas Moestl 	}
68842c1b001SThomas Moestl }
68942c1b001SThomas Moestl 
69042c1b001SThomas Moestl /*
69142c1b001SThomas Moestl  * Reset the whole thing.
69242c1b001SThomas Moestl  */
69342c1b001SThomas Moestl static void
69442c1b001SThomas Moestl gem_stop(ifp, disable)
69542c1b001SThomas Moestl 	struct ifnet *ifp;
69642c1b001SThomas Moestl 	int disable;
69742c1b001SThomas Moestl {
69842c1b001SThomas Moestl 	struct gem_softc *sc = (struct gem_softc *)ifp->if_softc;
69942c1b001SThomas Moestl 	struct gem_txsoft *txs;
70042c1b001SThomas Moestl 
70118100346SThomas Moestl #ifdef GEM_DEBUG
70212fb0330SPyun YongHyeon 	CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__);
70318100346SThomas Moestl #endif
70442c1b001SThomas Moestl 
70542c1b001SThomas Moestl 	callout_stop(&sc->sc_tick_ch);
7061f317bf9SMarius Strobl #ifdef GEM_RINT_TIMEOUT
7071f317bf9SMarius Strobl 	callout_stop(&sc->sc_rx_ch);
7081f317bf9SMarius Strobl #endif
70942c1b001SThomas Moestl 
71042c1b001SThomas Moestl 	/* XXX - Should we reset these instead? */
71142c1b001SThomas Moestl 	gem_disable_tx(sc);
71242c1b001SThomas Moestl 	gem_disable_rx(sc);
71342c1b001SThomas Moestl 
71442c1b001SThomas Moestl 	/*
71542c1b001SThomas Moestl 	 * Release any queued transmit buffers.
71642c1b001SThomas Moestl 	 */
71742c1b001SThomas Moestl 	while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
71842c1b001SThomas Moestl 		STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
71942c1b001SThomas Moestl 		if (txs->txs_ndescs != 0) {
720b2d59f42SThomas Moestl 			bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap,
721b2d59f42SThomas Moestl 			    BUS_DMASYNC_POSTWRITE);
722305f2c06SThomas Moestl 			bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap);
72342c1b001SThomas Moestl 			if (txs->txs_mbuf != NULL) {
72442c1b001SThomas Moestl 				m_freem(txs->txs_mbuf);
72542c1b001SThomas Moestl 				txs->txs_mbuf = NULL;
72642c1b001SThomas Moestl 			}
72742c1b001SThomas Moestl 		}
72842c1b001SThomas Moestl 		STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
72942c1b001SThomas Moestl 	}
73042c1b001SThomas Moestl 
73142c1b001SThomas Moestl 	if (disable)
73242c1b001SThomas Moestl 		gem_rxdrain(sc);
73342c1b001SThomas Moestl 
73442c1b001SThomas Moestl 	/*
73542c1b001SThomas Moestl 	 * Mark the interface down and cancel the watchdog timer.
73642c1b001SThomas Moestl 	 */
73713f4c340SRobert Watson 	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
7381ed3fed7SMarius Strobl 	sc->sc_flags &= ~GEM_LINK;
7398cb37876SMarius Strobl 	sc->sc_wdog_timer = 0;
74042c1b001SThomas Moestl }
74142c1b001SThomas Moestl 
74242c1b001SThomas Moestl /*
74342c1b001SThomas Moestl  * Reset the receiver
74442c1b001SThomas Moestl  */
7451ed3fed7SMarius Strobl static int
74642c1b001SThomas Moestl gem_reset_rx(sc)
74742c1b001SThomas Moestl 	struct gem_softc *sc;
74842c1b001SThomas Moestl {
74942c1b001SThomas Moestl 
75042c1b001SThomas Moestl 	/*
75142c1b001SThomas Moestl 	 * Resetting while DMA is in progress can cause a bus hang, so we
75242c1b001SThomas Moestl 	 * disable DMA first.
75342c1b001SThomas Moestl 	 */
75442c1b001SThomas Moestl 	gem_disable_rx(sc);
755e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_RX_CONFIG, 0);
7561ed3fed7SMarius Strobl 	bus_barrier(sc->sc_res[0], GEM_RX_CONFIG, 4, BUS_SPACE_BARRIER_WRITE);
7571ed3fed7SMarius Strobl 	if (!gem_bitwait(sc, GEM_RX_CONFIG, GEM_RX_CONFIG_RXDMA_EN, 0))
7581ed3fed7SMarius Strobl 		device_printf(sc->sc_dev, "cannot disable RX DMA\n");
75942c1b001SThomas Moestl 
76042c1b001SThomas Moestl 	/* Finally, reset the ERX */
761e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_RESET, GEM_RESET_RX);
7621ed3fed7SMarius Strobl 	bus_barrier(sc->sc_res[0], GEM_RESET, 4, BUS_SPACE_BARRIER_WRITE);
7631ed3fed7SMarius Strobl 	if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0)) {
76442c1b001SThomas Moestl 		device_printf(sc->sc_dev, "cannot reset receiver\n");
76542c1b001SThomas Moestl 		return (1);
76642c1b001SThomas Moestl 	}
76742c1b001SThomas Moestl 	return (0);
76842c1b001SThomas Moestl }
76942c1b001SThomas Moestl 
7701ed3fed7SMarius Strobl /*
7711ed3fed7SMarius Strobl  * Reset the receiver DMA engine.
7721ed3fed7SMarius Strobl  *
7731ed3fed7SMarius Strobl  * Intended to be used in case of GEM_INTR_RX_TAG_ERR, GEM_MAC_RX_OVERFLOW
7741ed3fed7SMarius Strobl  * etc in order to reset the receiver DMA engine only and not do a full
7751ed3fed7SMarius Strobl  * reset which amongst others also downs the link and clears the FIFOs.
7761ed3fed7SMarius Strobl  */
7771ed3fed7SMarius Strobl static void
7781ed3fed7SMarius Strobl gem_reset_rxdma(struct gem_softc *sc)
7791ed3fed7SMarius Strobl {
7801ed3fed7SMarius Strobl 	int i;
7811ed3fed7SMarius Strobl 
7821ed3fed7SMarius Strobl 	if (gem_reset_rx(sc) != 0)
7831ed3fed7SMarius Strobl 		return (gem_init_locked(sc));
7841ed3fed7SMarius Strobl 	for (i = 0; i < GEM_NRXDESC; i++)
7851ed3fed7SMarius Strobl 		if (sc->sc_rxsoft[i].rxs_mbuf != NULL)
7861ed3fed7SMarius Strobl 			GEM_UPDATE_RXDESC(sc, i);
7871ed3fed7SMarius Strobl 	sc->sc_rxptr = 0;
7881ed3fed7SMarius Strobl 	GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE);
7891ed3fed7SMarius Strobl 	GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD);
7901ed3fed7SMarius Strobl 
7911ed3fed7SMarius Strobl 	/* NOTE: we use only 32-bit DMA addresses here. */
7921ed3fed7SMarius Strobl 	bus_write_4(sc->sc_res[0], GEM_RX_RING_PTR_HI, 0);
7931ed3fed7SMarius Strobl 	bus_write_4(sc->sc_res[0], GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0));
7941ed3fed7SMarius Strobl 	bus_write_4(sc->sc_res[0], GEM_RX_KICK, GEM_NRXDESC - 4);
7951ed3fed7SMarius Strobl 	bus_write_4(sc->sc_res[0], GEM_RX_CONFIG,
7961ed3fed7SMarius Strobl 	    gem_ringsize(GEM_NRXDESC /*XXX*/) |
7971ed3fed7SMarius Strobl 	    ((ETHER_HDR_LEN + sizeof(struct ip)) <<
7981ed3fed7SMarius Strobl 	    GEM_RX_CONFIG_CXM_START_SHFT) |
7991ed3fed7SMarius Strobl 	    (GEM_THRSH_1024 << GEM_RX_CONFIG_FIFO_THRS_SHIFT) |
8001ed3fed7SMarius Strobl 	    (2 << GEM_RX_CONFIG_FBOFF_SHFT));
8011ed3fed7SMarius Strobl 	bus_write_4(sc->sc_res[0], GEM_RX_BLANKING,
8021ed3fed7SMarius Strobl 	    (6 << GEM_RX_BLANKING_TIME_SHIFT) | 6);
8031ed3fed7SMarius Strobl 	bus_write_4(sc->sc_res[0], GEM_RX_PAUSE_THRESH,
8041ed3fed7SMarius Strobl 	    (3 * sc->sc_rxfifosize / 256) | ((sc->sc_rxfifosize / 256) << 12));
8051ed3fed7SMarius Strobl 	bus_write_4(sc->sc_res[0], GEM_RX_CONFIG,
8061ed3fed7SMarius Strobl 	    bus_read_4(sc->sc_res[0], GEM_RX_CONFIG) | GEM_RX_CONFIG_RXDMA_EN);
8071ed3fed7SMarius Strobl 	bus_write_4(sc->sc_res[0], GEM_MAC_RX_MASK,
8081ed3fed7SMarius Strobl 	    GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT);
8091ed3fed7SMarius Strobl 	bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG,
8101ed3fed7SMarius Strobl 	    bus_read_4(sc->sc_res[0], GEM_MAC_RX_CONFIG) | GEM_MAC_RX_ENABLE);
8111ed3fed7SMarius Strobl }
81242c1b001SThomas Moestl 
81342c1b001SThomas Moestl /*
81442c1b001SThomas Moestl  * Reset the transmitter
81542c1b001SThomas Moestl  */
81642c1b001SThomas Moestl static int
81742c1b001SThomas Moestl gem_reset_tx(sc)
81842c1b001SThomas Moestl 	struct gem_softc *sc;
81942c1b001SThomas Moestl {
82042c1b001SThomas Moestl 
82142c1b001SThomas Moestl 	/*
82242c1b001SThomas Moestl 	 * Resetting while DMA is in progress can cause a bus hang, so we
82342c1b001SThomas Moestl 	 * disable DMA first.
82442c1b001SThomas Moestl 	 */
82542c1b001SThomas Moestl 	gem_disable_tx(sc);
826e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_TX_CONFIG, 0);
8271ed3fed7SMarius Strobl 	bus_barrier(sc->sc_res[0], GEM_TX_CONFIG, 4, BUS_SPACE_BARRIER_WRITE);
8281ed3fed7SMarius Strobl 	if (!gem_bitwait(sc, GEM_TX_CONFIG, GEM_TX_CONFIG_TXDMA_EN, 0))
8291ed3fed7SMarius Strobl 		device_printf(sc->sc_dev, "cannot disable TX DMA\n");
83042c1b001SThomas Moestl 
83142c1b001SThomas Moestl 	/* Finally, reset the ETX */
832e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_RESET, GEM_RESET_TX);
8331ed3fed7SMarius Strobl 	bus_barrier(sc->sc_res[0], GEM_RESET, 4, BUS_SPACE_BARRIER_WRITE);
8341ed3fed7SMarius Strobl 	if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0)) {
8351ed3fed7SMarius Strobl 		device_printf(sc->sc_dev, "cannot reset transmitter\n");
83642c1b001SThomas Moestl 		return (1);
83742c1b001SThomas Moestl 	}
83842c1b001SThomas Moestl 	return (0);
83942c1b001SThomas Moestl }
84042c1b001SThomas Moestl 
84142c1b001SThomas Moestl /*
84242c1b001SThomas Moestl  * disable receiver.
84342c1b001SThomas Moestl  */
84442c1b001SThomas Moestl static int
84542c1b001SThomas Moestl gem_disable_rx(sc)
84642c1b001SThomas Moestl 	struct gem_softc *sc;
84742c1b001SThomas Moestl {
84842c1b001SThomas Moestl 	u_int32_t cfg;
84942c1b001SThomas Moestl 
85042c1b001SThomas Moestl 	/* Flip the enable bit */
851e1bb13cdSPoul-Henning Kamp 	cfg = bus_read_4(sc->sc_res[0], GEM_MAC_RX_CONFIG);
85242c1b001SThomas Moestl 	cfg &= ~GEM_MAC_RX_ENABLE;
853e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, cfg);
8541ed3fed7SMarius Strobl 	bus_barrier(sc->sc_res[0], GEM_MAC_RX_CONFIG, 4,
8551ed3fed7SMarius Strobl 	   BUS_SPACE_BARRIER_WRITE);
85642c1b001SThomas Moestl 	return (gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0));
85742c1b001SThomas Moestl }
85842c1b001SThomas Moestl 
85942c1b001SThomas Moestl /*
86042c1b001SThomas Moestl  * disable transmitter.
86142c1b001SThomas Moestl  */
86242c1b001SThomas Moestl static int
86342c1b001SThomas Moestl gem_disable_tx(sc)
86442c1b001SThomas Moestl 	struct gem_softc *sc;
86542c1b001SThomas Moestl {
86642c1b001SThomas Moestl 	u_int32_t cfg;
86742c1b001SThomas Moestl 
86842c1b001SThomas Moestl 	/* Flip the enable bit */
869e1bb13cdSPoul-Henning Kamp 	cfg = bus_read_4(sc->sc_res[0], GEM_MAC_TX_CONFIG);
87042c1b001SThomas Moestl 	cfg &= ~GEM_MAC_TX_ENABLE;
871e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_MAC_TX_CONFIG, cfg);
8721ed3fed7SMarius Strobl 	bus_barrier(sc->sc_res[0], GEM_MAC_TX_CONFIG, 4,
8731ed3fed7SMarius Strobl 	    BUS_SPACE_BARRIER_WRITE);
87442c1b001SThomas Moestl 	return (gem_bitwait(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0));
87542c1b001SThomas Moestl }
87642c1b001SThomas Moestl 
87742c1b001SThomas Moestl /*
87842c1b001SThomas Moestl  * Initialize interface.
87942c1b001SThomas Moestl  */
88042c1b001SThomas Moestl static int
88142c1b001SThomas Moestl gem_meminit(sc)
88242c1b001SThomas Moestl 	struct gem_softc *sc;
88342c1b001SThomas Moestl {
88442c1b001SThomas Moestl 	struct gem_rxsoft *rxs;
88542c1b001SThomas Moestl 	int i, error;
88642c1b001SThomas Moestl 
88742c1b001SThomas Moestl 	/*
88842c1b001SThomas Moestl 	 * Initialize the transmit descriptor ring.
88942c1b001SThomas Moestl 	 */
89042c1b001SThomas Moestl 	for (i = 0; i < GEM_NTXDESC; i++) {
89142c1b001SThomas Moestl 		sc->sc_txdescs[i].gd_flags = 0;
89242c1b001SThomas Moestl 		sc->sc_txdescs[i].gd_addr = 0;
89342c1b001SThomas Moestl 	}
894305f2c06SThomas Moestl 	sc->sc_txfree = GEM_MAXTXFREE;
89542c1b001SThomas Moestl 	sc->sc_txnext = 0;
896336cca9eSBenno Rice 	sc->sc_txwin = 0;
89742c1b001SThomas Moestl 
89842c1b001SThomas Moestl 	/*
89942c1b001SThomas Moestl 	 * Initialize the receive descriptor and receive job
90042c1b001SThomas Moestl 	 * descriptor rings.
90142c1b001SThomas Moestl 	 */
90242c1b001SThomas Moestl 	for (i = 0; i < GEM_NRXDESC; i++) {
90342c1b001SThomas Moestl 		rxs = &sc->sc_rxsoft[i];
90442c1b001SThomas Moestl 		if (rxs->rxs_mbuf == NULL) {
90542c1b001SThomas Moestl 			if ((error = gem_add_rxbuf(sc, i)) != 0) {
90642c1b001SThomas Moestl 				device_printf(sc->sc_dev, "unable to "
90742c1b001SThomas Moestl 				    "allocate or map rx buffer %d, error = "
90842c1b001SThomas Moestl 				    "%d\n", i, error);
90942c1b001SThomas Moestl 				/*
91042c1b001SThomas Moestl 				 * XXX Should attempt to run with fewer receive
91142c1b001SThomas Moestl 				 * XXX buffers instead of just failing.
91242c1b001SThomas Moestl 				 */
91342c1b001SThomas Moestl 				gem_rxdrain(sc);
91442c1b001SThomas Moestl 				return (1);
91542c1b001SThomas Moestl 			}
91642c1b001SThomas Moestl 		} else
91742c1b001SThomas Moestl 			GEM_INIT_RXDESC(sc, i);
91842c1b001SThomas Moestl 	}
91942c1b001SThomas Moestl 	sc->sc_rxptr = 0;
920b2d59f42SThomas Moestl 	GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE);
921b2d59f42SThomas Moestl 	GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD);
92242c1b001SThomas Moestl 
92342c1b001SThomas Moestl 	return (0);
92442c1b001SThomas Moestl }
92542c1b001SThomas Moestl 
9261ed3fed7SMarius Strobl static u_int
92742c1b001SThomas Moestl gem_ringsize(sz)
9281ed3fed7SMarius Strobl 	u_int sz;
92942c1b001SThomas Moestl {
93042c1b001SThomas Moestl 
93142c1b001SThomas Moestl 	switch (sz) {
93242c1b001SThomas Moestl 	case 32:
9331ed3fed7SMarius Strobl 		return (GEM_RING_SZ_32);
93442c1b001SThomas Moestl 	case 64:
9351ed3fed7SMarius Strobl 		return (GEM_RING_SZ_64);
93642c1b001SThomas Moestl 	case 128:
9371ed3fed7SMarius Strobl 		return (GEM_RING_SZ_128);
93842c1b001SThomas Moestl 	case 256:
9391ed3fed7SMarius Strobl 		return (GEM_RING_SZ_256);
94042c1b001SThomas Moestl 	case 512:
9411ed3fed7SMarius Strobl 		return (GEM_RING_SZ_512);
94242c1b001SThomas Moestl 	case 1024:
9431ed3fed7SMarius Strobl 		return (GEM_RING_SZ_1024);
94442c1b001SThomas Moestl 	case 2048:
9451ed3fed7SMarius Strobl 		return (GEM_RING_SZ_2048);
94642c1b001SThomas Moestl 	case 4096:
9471ed3fed7SMarius Strobl 		return (GEM_RING_SZ_4096);
94842c1b001SThomas Moestl 	case 8192:
9491ed3fed7SMarius Strobl 		return (GEM_RING_SZ_8192);
95042c1b001SThomas Moestl 	default:
9511ed3fed7SMarius Strobl 		printf("%s: invalid ring size %d\n", __func__, sz);
9521ed3fed7SMarius Strobl 		return (GEM_RING_SZ_32);
95342c1b001SThomas Moestl 	}
95442c1b001SThomas Moestl }
95542c1b001SThomas Moestl 
95642c1b001SThomas Moestl static void
95742c1b001SThomas Moestl gem_init(xsc)
95842c1b001SThomas Moestl 	void *xsc;
95942c1b001SThomas Moestl {
96042c1b001SThomas Moestl 	struct gem_softc *sc = (struct gem_softc *)xsc;
9618cfaff7dSMarius Strobl 
9628cfaff7dSMarius Strobl 	GEM_LOCK(sc);
9638cfaff7dSMarius Strobl 	gem_init_locked(sc);
9648cfaff7dSMarius Strobl 	GEM_UNLOCK(sc);
9658cfaff7dSMarius Strobl }
9668cfaff7dSMarius Strobl 
9678cfaff7dSMarius Strobl /*
9688cfaff7dSMarius Strobl  * Initialization of interface; set up initialization block
9698cfaff7dSMarius Strobl  * and transmit/receive descriptor rings.
9708cfaff7dSMarius Strobl  */
9718cfaff7dSMarius Strobl static void
9728cfaff7dSMarius Strobl gem_init_locked(sc)
9738cfaff7dSMarius Strobl 	struct gem_softc *sc;
9748cfaff7dSMarius Strobl {
975fc74a9f9SBrooks Davis 	struct ifnet *ifp = sc->sc_ifp;
97642c1b001SThomas Moestl 	u_int32_t v;
97742c1b001SThomas Moestl 
9788cfaff7dSMarius Strobl 	GEM_LOCK_ASSERT(sc, MA_OWNED);
97942c1b001SThomas Moestl 
98018100346SThomas Moestl #ifdef GEM_DEBUG
98112fb0330SPyun YongHyeon 	CTR2(KTR_GEM, "%s: %s: calling stop", device_get_name(sc->sc_dev),
98212fb0330SPyun YongHyeon 	    __func__);
98318100346SThomas Moestl #endif
98442c1b001SThomas Moestl 	/*
98542c1b001SThomas Moestl 	 * Initialization sequence. The numbered steps below correspond
98642c1b001SThomas Moestl 	 * to the sequence outlined in section 6.3.5.1 in the Ethernet
98742c1b001SThomas Moestl 	 * Channel Engine manual (part of the PCIO manual).
98842c1b001SThomas Moestl 	 * See also the STP2002-STQ document from Sun Microsystems.
98942c1b001SThomas Moestl 	 */
99042c1b001SThomas Moestl 
99142c1b001SThomas Moestl 	/* step 1 & 2. Reset the Ethernet Channel */
992fc74a9f9SBrooks Davis 	gem_stop(sc->sc_ifp, 0);
99342c1b001SThomas Moestl 	gem_reset(sc);
99418100346SThomas Moestl #ifdef GEM_DEBUG
99512fb0330SPyun YongHyeon 	CTR2(KTR_GEM, "%s: %s: restarting", device_get_name(sc->sc_dev),
99612fb0330SPyun YongHyeon 	    __func__);
99718100346SThomas Moestl #endif
99842c1b001SThomas Moestl 
99942c1b001SThomas Moestl 	/* Re-initialize the MIF */
100042c1b001SThomas Moestl 	gem_mifinit(sc);
100142c1b001SThomas Moestl 
100242c1b001SThomas Moestl 	/* step 3. Setup data structures in host memory */
10031ed3fed7SMarius Strobl 	if (gem_meminit(sc) != 0)
10041ed3fed7SMarius Strobl 		return;
100542c1b001SThomas Moestl 
100642c1b001SThomas Moestl 	/* step 4. TX MAC registers & counters */
100742c1b001SThomas Moestl 	gem_init_regs(sc);
100842c1b001SThomas Moestl 
100942c1b001SThomas Moestl 	/* step 5. RX MAC registers & counters */
101042c1b001SThomas Moestl 	gem_setladrf(sc);
101142c1b001SThomas Moestl 
101242c1b001SThomas Moestl 	/* step 6 & 7. Program Descriptor Ring Base Addresses */
101342c1b001SThomas Moestl 	/* NOTE: we use only 32-bit DMA addresses here. */
1014e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_TX_RING_PTR_HI, 0);
1015e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0));
101642c1b001SThomas Moestl 
1017e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_RX_RING_PTR_HI, 0);
1018e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0));
101918100346SThomas Moestl #ifdef GEM_DEBUG
102042c1b001SThomas Moestl 	CTR3(KTR_GEM, "loading rx ring %lx, tx ring %lx, cddma %lx",
102142c1b001SThomas Moestl 	    GEM_CDRXADDR(sc, 0), GEM_CDTXADDR(sc, 0), sc->sc_cddma);
102218100346SThomas Moestl #endif
102342c1b001SThomas Moestl 
102442c1b001SThomas Moestl 	/* step 8. Global Configuration & Interrupt Mask */
1025e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_INTMASK,
10261ed3fed7SMarius Strobl 	    ~(GEM_INTR_TX_INTME | GEM_INTR_TX_EMPTY | GEM_INTR_RX_DONE |
10271ed3fed7SMarius Strobl 	    GEM_INTR_RX_NOBUF | GEM_INTR_RX_TAG_ERR | GEM_INTR_PERR |
10281ed3fed7SMarius Strobl 	    GEM_INTR_BERR
10291ed3fed7SMarius Strobl #ifdef GEM_DEBUG
10301ed3fed7SMarius Strobl 	    | GEM_INTR_PCS | GEM_INTR_MIF
10311ed3fed7SMarius Strobl #endif
10321ed3fed7SMarius Strobl 	    ));
1033e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_MAC_RX_MASK,
1034336cca9eSBenno Rice 	    GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT);
10351ed3fed7SMarius Strobl 	bus_write_4(sc->sc_res[0], GEM_MAC_TX_MASK,
10361ed3fed7SMarius Strobl 	    GEM_MAC_TX_XMIT_DONE | GEM_MAC_TX_DEFER_EXP);
10371ed3fed7SMarius Strobl #ifdef GEM_DEBUG
10381ed3fed7SMarius Strobl 	bus_write_4(sc->sc_res[0], GEM_MAC_CONTROL_MASK,
10391ed3fed7SMarius Strobl 	    ~(GEM_MAC_PAUSED | GEM_MAC_PAUSE | GEM_MAC_RESUME));
10401ed3fed7SMarius Strobl #else
10411ed3fed7SMarius Strobl 	bus_write_4(sc->sc_res[0], GEM_MAC_CONTROL_MASK,
10421ed3fed7SMarius Strobl 	    GEM_MAC_PAUSED | GEM_MAC_PAUSE | GEM_MAC_RESUME);
10431ed3fed7SMarius Strobl #endif
104442c1b001SThomas Moestl 
104542c1b001SThomas Moestl 	/* step 9. ETX Configuration: use mostly default values */
104642c1b001SThomas Moestl 
104742c1b001SThomas Moestl 	/* Enable DMA */
104842c1b001SThomas Moestl 	v = gem_ringsize(GEM_NTXDESC /*XXX*/);
1049e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_TX_CONFIG,
105042c1b001SThomas Moestl 		v|GEM_TX_CONFIG_TXDMA_EN|
105142c1b001SThomas Moestl 		((0x400<<10)&GEM_TX_CONFIG_TXFIFO_TH));
105242c1b001SThomas Moestl 
105342c1b001SThomas Moestl 	/* step 10. ERX Configuration */
105442c1b001SThomas Moestl 
10551ed3fed7SMarius Strobl 	/* Encode Receive Descriptor ring size. */
105642c1b001SThomas Moestl 	v = gem_ringsize(GEM_NRXDESC /*XXX*/);
105712fb0330SPyun YongHyeon 	/* Rx TCP/UDP checksum offset */
105812fb0330SPyun YongHyeon 	v |= ((ETHER_HDR_LEN + sizeof(struct ip)) <<
105912fb0330SPyun YongHyeon 	    GEM_RX_CONFIG_CXM_START_SHFT);
106042c1b001SThomas Moestl 
106142c1b001SThomas Moestl 	/* Enable DMA */
1062e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_RX_CONFIG,
106342c1b001SThomas Moestl 		v|(GEM_THRSH_1024<<GEM_RX_CONFIG_FIFO_THRS_SHIFT)|
106412fb0330SPyun YongHyeon 		(2<<GEM_RX_CONFIG_FBOFF_SHFT)|GEM_RX_CONFIG_RXDMA_EN);
10651ed3fed7SMarius Strobl 
10661ed3fed7SMarius Strobl 	bus_write_4(sc->sc_res[0], GEM_RX_BLANKING,
10671ed3fed7SMarius Strobl 	    (6 << GEM_RX_BLANKING_TIME_SHIFT) | 6);
10681ed3fed7SMarius Strobl 
106942c1b001SThomas Moestl 	/*
1070336cca9eSBenno Rice 	 * The following value is for an OFF Threshold of about 3/4 full
1071336cca9eSBenno Rice 	 * and an ON Threshold of 1/4 full.
107242c1b001SThomas Moestl 	 */
1073e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_RX_PAUSE_THRESH,
1074336cca9eSBenno Rice 	    (3 * sc->sc_rxfifosize / 256) |
1075336cca9eSBenno Rice 	    (   (sc->sc_rxfifosize / 256) << 12));
107642c1b001SThomas Moestl 
107742c1b001SThomas Moestl 	/* step 11. Configure Media */
107842c1b001SThomas Moestl 
107942c1b001SThomas Moestl 	/* step 12. RX_MAC Configuration Register */
1080e1bb13cdSPoul-Henning Kamp 	v = bus_read_4(sc->sc_res[0], GEM_MAC_RX_CONFIG);
10811ed3fed7SMarius Strobl 	v |= GEM_MAC_RX_STRIP_CRC;
10821ed3fed7SMarius Strobl 	bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, 0);
10831ed3fed7SMarius Strobl 	bus_barrier(sc->sc_res[0], GEM_MAC_RX_CONFIG, 4,
10841ed3fed7SMarius Strobl 	    BUS_SPACE_BARRIER_WRITE);
10851ed3fed7SMarius Strobl 	if (!gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0))
10861ed3fed7SMarius Strobl 		device_printf(sc->sc_dev, "cannot disable RX MAC\n");
1087e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, v);
108842c1b001SThomas Moestl 
108942c1b001SThomas Moestl 	/* step 14. Issue Transmit Pending command */
109042c1b001SThomas Moestl 
109142c1b001SThomas Moestl 	/* step 15.  Give the reciever a swift kick */
1092e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_RX_KICK, GEM_NRXDESC-4);
109342c1b001SThomas Moestl 
109413f4c340SRobert Watson 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
109513f4c340SRobert Watson 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1096336cca9eSBenno Rice 	sc->sc_ifflags = ifp->if_flags;
10971ed3fed7SMarius Strobl 
10981ed3fed7SMarius Strobl 	sc->sc_flags &= ~GEM_LINK;
10991ed3fed7SMarius Strobl 	mii_mediachg(sc->sc_mii);
11001ed3fed7SMarius Strobl 
11011ed3fed7SMarius Strobl 	/* Start the one second timer. */
11021ed3fed7SMarius Strobl 	sc->sc_wdog_timer = 0;
11031ed3fed7SMarius Strobl 	callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc);
110442c1b001SThomas Moestl }
110542c1b001SThomas Moestl 
110612fb0330SPyun YongHyeon /*
110712fb0330SPyun YongHyeon  * It's copy of ath_defrag(ath(4)).
110812fb0330SPyun YongHyeon  *
110912fb0330SPyun YongHyeon  * Defragment an mbuf chain, returning at most maxfrags separate
111012fb0330SPyun YongHyeon  * mbufs+clusters.  If this is not possible NULL is returned and
111112fb0330SPyun YongHyeon  * the original mbuf chain is left in it's present (potentially
111212fb0330SPyun YongHyeon  * modified) state.  We use two techniques: collapsing consecutive
111312fb0330SPyun YongHyeon  * mbufs and replacing consecutive mbufs by a cluster.
111412fb0330SPyun YongHyeon  */
111512fb0330SPyun YongHyeon static struct mbuf *
111612fb0330SPyun YongHyeon gem_defrag(m0, how, maxfrags)
111742c1b001SThomas Moestl 	struct mbuf *m0;
111812fb0330SPyun YongHyeon 	int how;
111912fb0330SPyun YongHyeon 	int maxfrags;
112042c1b001SThomas Moestl {
112112fb0330SPyun YongHyeon 	struct mbuf *m, *n, *n2, **prev;
112212fb0330SPyun YongHyeon 	u_int curfrags;
112312fb0330SPyun YongHyeon 
112412fb0330SPyun YongHyeon 	/*
112512fb0330SPyun YongHyeon 	 * Calculate the current number of frags.
112612fb0330SPyun YongHyeon 	 */
112712fb0330SPyun YongHyeon 	curfrags = 0;
112812fb0330SPyun YongHyeon 	for (m = m0; m != NULL; m = m->m_next)
112912fb0330SPyun YongHyeon 		curfrags++;
113012fb0330SPyun YongHyeon 	/*
113112fb0330SPyun YongHyeon 	 * First, try to collapse mbufs.  Note that we always collapse
113212fb0330SPyun YongHyeon 	 * towards the front so we don't need to deal with moving the
113312fb0330SPyun YongHyeon 	 * pkthdr.  This may be suboptimal if the first mbuf has much
113412fb0330SPyun YongHyeon 	 * less data than the following.
113512fb0330SPyun YongHyeon 	 */
113612fb0330SPyun YongHyeon 	m = m0;
113712fb0330SPyun YongHyeon again:
113812fb0330SPyun YongHyeon 	for (;;) {
113912fb0330SPyun YongHyeon 		n = m->m_next;
114012fb0330SPyun YongHyeon 		if (n == NULL)
114112fb0330SPyun YongHyeon 			break;
114212fb0330SPyun YongHyeon 		if ((m->m_flags & M_RDONLY) == 0 &&
114312fb0330SPyun YongHyeon 		    n->m_len < M_TRAILINGSPACE(m)) {
114412fb0330SPyun YongHyeon 			bcopy(mtod(n, void *), mtod(m, char *) + m->m_len,
114512fb0330SPyun YongHyeon 				n->m_len);
114612fb0330SPyun YongHyeon 			m->m_len += n->m_len;
114712fb0330SPyun YongHyeon 			m->m_next = n->m_next;
114812fb0330SPyun YongHyeon 			m_free(n);
114912fb0330SPyun YongHyeon 			if (--curfrags <= maxfrags)
115012fb0330SPyun YongHyeon 				return (m0);
115112fb0330SPyun YongHyeon 		} else
115212fb0330SPyun YongHyeon 			m = n;
115312fb0330SPyun YongHyeon 	}
115412fb0330SPyun YongHyeon 	KASSERT(maxfrags > 1,
115512fb0330SPyun YongHyeon 		("maxfrags %u, but normal collapse failed", maxfrags));
115612fb0330SPyun YongHyeon 	/*
115712fb0330SPyun YongHyeon 	 * Collapse consecutive mbufs to a cluster.
115812fb0330SPyun YongHyeon 	 */
115912fb0330SPyun YongHyeon 	prev = &m0->m_next;		/* NB: not the first mbuf */
116012fb0330SPyun YongHyeon 	while ((n = *prev) != NULL) {
116112fb0330SPyun YongHyeon 		if ((n2 = n->m_next) != NULL &&
116212fb0330SPyun YongHyeon 		    n->m_len + n2->m_len < MCLBYTES) {
116312fb0330SPyun YongHyeon 			m = m_getcl(how, MT_DATA, 0);
116412fb0330SPyun YongHyeon 			if (m == NULL)
116512fb0330SPyun YongHyeon 				goto bad;
116612fb0330SPyun YongHyeon 			bcopy(mtod(n, void *), mtod(m, void *), n->m_len);
116712fb0330SPyun YongHyeon 			bcopy(mtod(n2, void *), mtod(m, char *) + n->m_len,
116812fb0330SPyun YongHyeon 				n2->m_len);
116912fb0330SPyun YongHyeon 			m->m_len = n->m_len + n2->m_len;
117012fb0330SPyun YongHyeon 			m->m_next = n2->m_next;
117112fb0330SPyun YongHyeon 			*prev = m;
117212fb0330SPyun YongHyeon 			m_free(n);
117312fb0330SPyun YongHyeon 			m_free(n2);
117412fb0330SPyun YongHyeon 			if (--curfrags <= maxfrags)	/* +1 cl -2 mbufs */
117512fb0330SPyun YongHyeon 				return m0;
117612fb0330SPyun YongHyeon 			/*
117712fb0330SPyun YongHyeon 			 * Still not there, try the normal collapse
117812fb0330SPyun YongHyeon 			 * again before we allocate another cluster.
117912fb0330SPyun YongHyeon 			 */
118012fb0330SPyun YongHyeon 			goto again;
118112fb0330SPyun YongHyeon 		}
118212fb0330SPyun YongHyeon 		prev = &n->m_next;
118312fb0330SPyun YongHyeon 	}
118412fb0330SPyun YongHyeon 	/*
118512fb0330SPyun YongHyeon 	 * No place where we can collapse to a cluster; punt.
118612fb0330SPyun YongHyeon 	 * This can occur if, for example, you request 2 frags
118712fb0330SPyun YongHyeon 	 * but the packet requires that both be clusters (we
118812fb0330SPyun YongHyeon 	 * never reallocate the first mbuf to avoid moving the
118912fb0330SPyun YongHyeon 	 * packet header).
119012fb0330SPyun YongHyeon 	 */
119112fb0330SPyun YongHyeon bad:
119212fb0330SPyun YongHyeon 	return (NULL);
119312fb0330SPyun YongHyeon }
119412fb0330SPyun YongHyeon 
119512fb0330SPyun YongHyeon static int
119612fb0330SPyun YongHyeon gem_load_txmbuf(sc, m_head)
119712fb0330SPyun YongHyeon 	struct gem_softc *sc;
119812fb0330SPyun YongHyeon 	struct mbuf **m_head;
119912fb0330SPyun YongHyeon {
120042c1b001SThomas Moestl 	struct gem_txsoft *txs;
120112fb0330SPyun YongHyeon 	bus_dma_segment_t txsegs[GEM_NTXSEGS];
120212fb0330SPyun YongHyeon 	struct mbuf *m;
120312fb0330SPyun YongHyeon 	uint64_t flags, cflags;
120412fb0330SPyun YongHyeon 	int error, nexttx, nsegs, seg;
120542c1b001SThomas Moestl 
120642c1b001SThomas Moestl 	/* Get a work queue entry. */
120742c1b001SThomas Moestl 	if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) {
1208305f2c06SThomas Moestl 		/* Ran out of descriptors. */
120912fb0330SPyun YongHyeon 		return (ENOBUFS);
1210305f2c06SThomas Moestl 	}
121112fb0330SPyun YongHyeon 	error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, txs->txs_dmamap,
121212fb0330SPyun YongHyeon 	    *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
121312fb0330SPyun YongHyeon 	if (error == EFBIG) {
121412fb0330SPyun YongHyeon 		m = gem_defrag(*m_head, M_DONTWAIT, GEM_NTXSEGS);
121512fb0330SPyun YongHyeon 		if (m == NULL) {
121612fb0330SPyun YongHyeon 			m_freem(*m_head);
121712fb0330SPyun YongHyeon 			*m_head = NULL;
121812fb0330SPyun YongHyeon 			return (ENOBUFS);
121912fb0330SPyun YongHyeon 		}
122012fb0330SPyun YongHyeon 		*m_head = m;
122112fb0330SPyun YongHyeon 		error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, txs->txs_dmamap,
122212fb0330SPyun YongHyeon 		    *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
122312fb0330SPyun YongHyeon 		if (error != 0) {
122412fb0330SPyun YongHyeon 			m_freem(*m_head);
122512fb0330SPyun YongHyeon 			*m_head = NULL;
122612fb0330SPyun YongHyeon 			return (error);
122712fb0330SPyun YongHyeon 		}
122812fb0330SPyun YongHyeon 	} else if (error != 0)
122912fb0330SPyun YongHyeon 		return (error);
123012fb0330SPyun YongHyeon 	if (nsegs == 0) {
123112fb0330SPyun YongHyeon 		m_freem(*m_head);
123212fb0330SPyun YongHyeon 		*m_head = NULL;
123312fb0330SPyun YongHyeon 		return (EIO);
123412fb0330SPyun YongHyeon 	}
123512fb0330SPyun YongHyeon 
123612fb0330SPyun YongHyeon 	/*
123712fb0330SPyun YongHyeon 	 * Ensure we have enough descriptors free to describe
123812fb0330SPyun YongHyeon 	 * the packet.  Note, we always reserve one descriptor
123912fb0330SPyun YongHyeon 	 * at the end of the ring as a termination point, to
124012fb0330SPyun YongHyeon 	 * prevent wrap-around.
124112fb0330SPyun YongHyeon 	 */
124212fb0330SPyun YongHyeon 	if (nsegs > sc->sc_txfree - 1) {
124312fb0330SPyun YongHyeon 		txs->txs_ndescs = 0;
124412fb0330SPyun YongHyeon 		bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap);
124512fb0330SPyun YongHyeon 		return (ENOBUFS);
124612fb0330SPyun YongHyeon 	}
124712fb0330SPyun YongHyeon 
124812fb0330SPyun YongHyeon 	flags = cflags = 0;
124912fb0330SPyun YongHyeon 	if (((*m_head)->m_pkthdr.csum_flags & sc->sc_csum_features) != 0)
125012fb0330SPyun YongHyeon 		gem_txcksum(sc, *m_head, &cflags);
125112fb0330SPyun YongHyeon 
125212fb0330SPyun YongHyeon 	txs->txs_ndescs = nsegs;
1253305f2c06SThomas Moestl 	txs->txs_firstdesc = sc->sc_txnext;
125412fb0330SPyun YongHyeon 	nexttx = txs->txs_firstdesc;
125512fb0330SPyun YongHyeon 	for (seg = 0; seg < nsegs; seg++, nexttx = GEM_NEXTTX(nexttx)) {
125612fb0330SPyun YongHyeon #ifdef GEM_DEBUG
125712fb0330SPyun YongHyeon 		CTR6(KTR_GEM, "%s: mapping seg %d (txd %d), len "
125812fb0330SPyun YongHyeon 		    "%lx, addr %#lx (%#lx)", __func__, seg, nexttx,
125912fb0330SPyun YongHyeon 		    txsegs[seg].ds_len, txsegs[seg].ds_addr,
126012fb0330SPyun YongHyeon 		    GEM_DMA_WRITE(sc, txsegs[seg].ds_addr));
126112fb0330SPyun YongHyeon #endif
126212fb0330SPyun YongHyeon 		sc->sc_txdescs[nexttx].gd_addr =
126312fb0330SPyun YongHyeon 		    GEM_DMA_WRITE(sc, txsegs[seg].ds_addr);
126412fb0330SPyun YongHyeon 		KASSERT(txsegs[seg].ds_len < GEM_TD_BUFSIZE,
126512fb0330SPyun YongHyeon 		    ("%s: segment size too large!", __func__));
126612fb0330SPyun YongHyeon 		flags = txsegs[seg].ds_len & GEM_TD_BUFSIZE;
126712fb0330SPyun YongHyeon 		sc->sc_txdescs[nexttx].gd_flags =
126812fb0330SPyun YongHyeon 		    GEM_DMA_WRITE(sc, flags | cflags);
126912fb0330SPyun YongHyeon 		txs->txs_lastdesc = nexttx;
127042c1b001SThomas Moestl 	}
1271305f2c06SThomas Moestl 
127212fb0330SPyun YongHyeon 	/* set EOP on the last descriptor */
127312fb0330SPyun YongHyeon #ifdef GEM_DEBUG
127412fb0330SPyun YongHyeon 	CTR3(KTR_GEM, "%s: end of packet at seg %d, tx %d", __func__, seg,
127512fb0330SPyun YongHyeon 	    nexttx);
127612fb0330SPyun YongHyeon #endif
127712fb0330SPyun YongHyeon 	sc->sc_txdescs[txs->txs_lastdesc].gd_flags |=
127812fb0330SPyun YongHyeon 	    GEM_DMA_WRITE(sc, GEM_TD_END_OF_PACKET);
127912fb0330SPyun YongHyeon 
128012fb0330SPyun YongHyeon 	/* Lastly set SOP on the first descriptor */
128112fb0330SPyun YongHyeon #ifdef GEM_DEBUG
128212fb0330SPyun YongHyeon 	CTR3(KTR_GEM, "%s: start of packet at seg %d, tx %d", __func__, seg,
128312fb0330SPyun YongHyeon 	    nexttx);
128412fb0330SPyun YongHyeon #endif
128512fb0330SPyun YongHyeon 	if (++sc->sc_txwin > GEM_NTXSEGS * 2 / 3) {
128612fb0330SPyun YongHyeon 		sc->sc_txwin = 0;
128712fb0330SPyun YongHyeon 		flags |= GEM_TD_INTERRUPT_ME;
128812fb0330SPyun YongHyeon 		sc->sc_txdescs[txs->txs_firstdesc].gd_flags |=
128912fb0330SPyun YongHyeon 		    GEM_DMA_WRITE(sc, GEM_TD_INTERRUPT_ME |
129012fb0330SPyun YongHyeon 		    GEM_TD_START_OF_PACKET);
129112fb0330SPyun YongHyeon 	} else
129212fb0330SPyun YongHyeon 		sc->sc_txdescs[txs->txs_firstdesc].gd_flags |=
129312fb0330SPyun YongHyeon 		    GEM_DMA_WRITE(sc, GEM_TD_START_OF_PACKET);
129412fb0330SPyun YongHyeon 
129542c1b001SThomas Moestl 	/* Sync the DMA map. */
129612fb0330SPyun YongHyeon 	bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, BUS_DMASYNC_PREWRITE);
1297305f2c06SThomas Moestl 
129818100346SThomas Moestl #ifdef GEM_DEBUG
129912fb0330SPyun YongHyeon 	CTR4(KTR_GEM, "%s: setting firstdesc=%d, lastdesc=%d, ndescs=%d",
130012fb0330SPyun YongHyeon 	    __func__, txs->txs_firstdesc, txs->txs_lastdesc, txs->txs_ndescs);
130118100346SThomas Moestl #endif
130242c1b001SThomas Moestl 	STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q);
1303305f2c06SThomas Moestl 	STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q);
130412fb0330SPyun YongHyeon 	txs->txs_mbuf = *m_head;
1305305f2c06SThomas Moestl 
1306305f2c06SThomas Moestl 	sc->sc_txnext = GEM_NEXTTX(txs->txs_lastdesc);
1307305f2c06SThomas Moestl 	sc->sc_txfree -= txs->txs_ndescs;
130842c1b001SThomas Moestl 
130912fb0330SPyun YongHyeon 	return (0);
131042c1b001SThomas Moestl }
131142c1b001SThomas Moestl 
131242c1b001SThomas Moestl static void
131342c1b001SThomas Moestl gem_init_regs(sc)
131442c1b001SThomas Moestl 	struct gem_softc *sc;
131542c1b001SThomas Moestl {
13164a0d6638SRuslan Ermilov 	const u_char *laddr = IF_LLADDR(sc->sc_ifp);
131742c1b001SThomas Moestl 
131842c1b001SThomas Moestl 	/* These regs are not cleared on reset */
13191ed3fed7SMarius Strobl 	if ((sc->sc_flags & GEM_INITED) == 0) {
132042c1b001SThomas Moestl 		/* Wooo.  Magic values. */
1321e1bb13cdSPoul-Henning Kamp 		bus_write_4(sc->sc_res[0], GEM_MAC_IPG0, 0);
1322e1bb13cdSPoul-Henning Kamp 		bus_write_4(sc->sc_res[0], GEM_MAC_IPG1, 8);
1323e1bb13cdSPoul-Henning Kamp 		bus_write_4(sc->sc_res[0], GEM_MAC_IPG2, 4);
132442c1b001SThomas Moestl 
1325e1bb13cdSPoul-Henning Kamp 		bus_write_4(sc->sc_res[0], GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN);
132642c1b001SThomas Moestl 		/* Max frame and max burst size */
1327e1bb13cdSPoul-Henning Kamp 		bus_write_4(sc->sc_res[0], GEM_MAC_MAC_MAX_FRAME,
13281ed3fed7SMarius Strobl 		    (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) | (0x2000 << 16));
1329336cca9eSBenno Rice 
1330e1bb13cdSPoul-Henning Kamp 		bus_write_4(sc->sc_res[0], GEM_MAC_PREAMBLE_LEN, 0x7);
1331e1bb13cdSPoul-Henning Kamp 		bus_write_4(sc->sc_res[0], GEM_MAC_JAM_SIZE, 0x4);
1332e1bb13cdSPoul-Henning Kamp 		bus_write_4(sc->sc_res[0], GEM_MAC_ATTEMPT_LIMIT, 0x10);
133342c1b001SThomas Moestl 		/* Dunno.... */
1334e1bb13cdSPoul-Henning Kamp 		bus_write_4(sc->sc_res[0], GEM_MAC_CONTROL_TYPE, 0x8088);
1335e1bb13cdSPoul-Henning Kamp 		bus_write_4(sc->sc_res[0], GEM_MAC_RANDOM_SEED,
1336336cca9eSBenno Rice 		    ((laddr[5]<<8)|laddr[4])&0x3ff);
1337336cca9eSBenno Rice 
133842c1b001SThomas Moestl 		/* Secondary MAC addr set to 0:0:0:0:0:0 */
1339e1bb13cdSPoul-Henning Kamp 		bus_write_4(sc->sc_res[0], GEM_MAC_ADDR3, 0);
1340e1bb13cdSPoul-Henning Kamp 		bus_write_4(sc->sc_res[0], GEM_MAC_ADDR4, 0);
1341e1bb13cdSPoul-Henning Kamp 		bus_write_4(sc->sc_res[0], GEM_MAC_ADDR5, 0);
1342336cca9eSBenno Rice 
1343336cca9eSBenno Rice 		/* MAC control addr set to 01:80:c2:00:00:01 */
1344e1bb13cdSPoul-Henning Kamp 		bus_write_4(sc->sc_res[0], GEM_MAC_ADDR6, 0x0001);
1345e1bb13cdSPoul-Henning Kamp 		bus_write_4(sc->sc_res[0], GEM_MAC_ADDR7, 0xc200);
1346e1bb13cdSPoul-Henning Kamp 		bus_write_4(sc->sc_res[0], GEM_MAC_ADDR8, 0x0180);
134742c1b001SThomas Moestl 
134842c1b001SThomas Moestl 		/* MAC filter addr set to 0:0:0:0:0:0 */
1349e1bb13cdSPoul-Henning Kamp 		bus_write_4(sc->sc_res[0], GEM_MAC_ADDR_FILTER0, 0);
1350e1bb13cdSPoul-Henning Kamp 		bus_write_4(sc->sc_res[0], GEM_MAC_ADDR_FILTER1, 0);
1351e1bb13cdSPoul-Henning Kamp 		bus_write_4(sc->sc_res[0], GEM_MAC_ADDR_FILTER2, 0);
135242c1b001SThomas Moestl 
1353e1bb13cdSPoul-Henning Kamp 		bus_write_4(sc->sc_res[0], GEM_MAC_ADR_FLT_MASK1_2, 0);
1354e1bb13cdSPoul-Henning Kamp 		bus_write_4(sc->sc_res[0], GEM_MAC_ADR_FLT_MASK0, 0);
135542c1b001SThomas Moestl 
13561ed3fed7SMarius Strobl 		sc->sc_flags |= GEM_INITED;
135742c1b001SThomas Moestl 	}
135842c1b001SThomas Moestl 
135942c1b001SThomas Moestl 	/* Counters need to be zeroed */
1360e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_MAC_NORM_COLL_CNT, 0);
1361e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_MAC_FIRST_COLL_CNT, 0);
1362e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_MAC_EXCESS_COLL_CNT, 0);
1363e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_MAC_LATE_COLL_CNT, 0);
1364e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_MAC_DEFER_TMR_CNT, 0);
1365e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_MAC_PEAK_ATTEMPTS, 0);
1366e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_MAC_RX_FRAME_COUNT, 0);
1367e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_MAC_RX_LEN_ERR_CNT, 0);
1368e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_MAC_RX_ALIGN_ERR, 0);
1369e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_MAC_RX_CRC_ERR_CNT, 0);
1370e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_MAC_RX_CODE_VIOL, 0);
137142c1b001SThomas Moestl 
13721ed3fed7SMarius Strobl 	/* Set XOFF PAUSE time. */
1373e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_MAC_SEND_PAUSE_CMD, 0x1BF0);
13741ed3fed7SMarius Strobl 
13751ed3fed7SMarius Strobl 	/*
13761ed3fed7SMarius Strobl 	 * Set the internal arbitration to "infinite" bursts of the
13771ed3fed7SMarius Strobl 	 * maximum length of 31 * 64 bytes so DMA transfers aren't
13781ed3fed7SMarius Strobl 	 * split up in cache line size chunks. This greatly improves
13791ed3fed7SMarius Strobl 	 * especially RX performance.
13801ed3fed7SMarius Strobl 	 * Enable silicon bug workarounds for the Apple variants.
13811ed3fed7SMarius Strobl 	 */
13821ed3fed7SMarius Strobl 	bus_write_4(sc->sc_res[0], GEM_CONFIG,
13831ed3fed7SMarius Strobl 	    GEM_CONFIG_TXDMA_LIMIT | GEM_CONFIG_RXDMA_LIMIT |
13841ed3fed7SMarius Strobl 	    GEM_CONFIG_BURST_INF | (GEM_IS_APPLE(sc) ?
13851ed3fed7SMarius Strobl 	    GEM_CONFIG_RONPAULBIT | GEM_CONFIG_BUG2FIX : 0));
138642c1b001SThomas Moestl 
138742c1b001SThomas Moestl 	/*
138842c1b001SThomas Moestl 	 * Set the station address.
138942c1b001SThomas Moestl 	 */
1390e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_MAC_ADDR0, (laddr[4]<<8)|laddr[5]);
1391e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_MAC_ADDR1, (laddr[2]<<8)|laddr[3]);
1392e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_MAC_ADDR2, (laddr[0]<<8)|laddr[1]);
1393336cca9eSBenno Rice 
13941ed3fed7SMarius Strobl 	/* Enable MII outputs. */
13951ed3fed7SMarius Strobl 	bus_write_4(sc->sc_res[0], GEM_MAC_XIF_CONFIG, GEM_MAC_XIF_TX_MII_ENA);
139642c1b001SThomas Moestl }
139742c1b001SThomas Moestl 
139842c1b001SThomas Moestl static void
139942c1b001SThomas Moestl gem_start(ifp)
140042c1b001SThomas Moestl 	struct ifnet *ifp;
140142c1b001SThomas Moestl {
140242c1b001SThomas Moestl 	struct gem_softc *sc = (struct gem_softc *)ifp->if_softc;
14038cfaff7dSMarius Strobl 
14048cfaff7dSMarius Strobl 	GEM_LOCK(sc);
14058cfaff7dSMarius Strobl 	gem_start_locked(ifp);
14068cfaff7dSMarius Strobl 	GEM_UNLOCK(sc);
14078cfaff7dSMarius Strobl }
14088cfaff7dSMarius Strobl 
14098cfaff7dSMarius Strobl static void
14108cfaff7dSMarius Strobl gem_start_locked(ifp)
14118cfaff7dSMarius Strobl 	struct ifnet *ifp;
14128cfaff7dSMarius Strobl {
14138cfaff7dSMarius Strobl 	struct gem_softc *sc = (struct gem_softc *)ifp->if_softc;
141412fb0330SPyun YongHyeon 	struct mbuf *m;
14151ed3fed7SMarius Strobl 	int ntx = 0;
141642c1b001SThomas Moestl 
141713f4c340SRobert Watson 	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
14181ed3fed7SMarius Strobl 	    IFF_DRV_RUNNING || (sc->sc_flags & GEM_LINK) == 0)
141942c1b001SThomas Moestl 		return;
142042c1b001SThomas Moestl 
142118100346SThomas Moestl #ifdef GEM_DEBUG
142212fb0330SPyun YongHyeon 	CTR4(KTR_GEM, "%s: %s: txfree %d, txnext %d",
14231ed3fed7SMarius Strobl 	    device_get_name(sc->sc_dev), __func__, sc->sc_txfree,
14241ed3fed7SMarius Strobl 	    sc->sc_txnext);
142518100346SThomas Moestl #endif
142612fb0330SPyun YongHyeon 	for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && sc->sc_txfree > 1;) {
142712fb0330SPyun YongHyeon 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
142812fb0330SPyun YongHyeon 		if (m == NULL)
142942c1b001SThomas Moestl 			break;
14301ed3fed7SMarius Strobl 		if (gem_load_txmbuf(sc, &m) != 0) {
143112fb0330SPyun YongHyeon 			if (m == NULL)
143212fb0330SPyun YongHyeon 				break;
143312fb0330SPyun YongHyeon 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
143412fb0330SPyun YongHyeon 			IFQ_DRV_PREPEND(&ifp->if_snd, m);
143542c1b001SThomas Moestl 			break;
143642c1b001SThomas Moestl 		}
143718100346SThomas Moestl 		ntx++;
1438305f2c06SThomas Moestl 		/* Kick the transmitter. */
143918100346SThomas Moestl #ifdef GEM_DEBUG
144012fb0330SPyun YongHyeon 		CTR3(KTR_GEM, "%s: %s: kicking tx %d",
144112fb0330SPyun YongHyeon 		    device_get_name(sc->sc_dev), __func__, sc->sc_txnext);
144218100346SThomas Moestl #endif
14431ed3fed7SMarius Strobl 		GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE);
1444e1bb13cdSPoul-Henning Kamp 		bus_write_4(sc->sc_res[0], GEM_TX_KICK,
144542c1b001SThomas Moestl 			sc->sc_txnext);
144642c1b001SThomas Moestl 
144712fb0330SPyun YongHyeon 		BPF_MTAP(ifp, m);
1448305f2c06SThomas Moestl 	}
1449305f2c06SThomas Moestl 
1450305f2c06SThomas Moestl 	if (ntx > 0) {
145118100346SThomas Moestl #ifdef GEM_DEBUG
1452305f2c06SThomas Moestl 		CTR2(KTR_GEM, "%s: packets enqueued, OWN on %d",
14531ed3fed7SMarius Strobl 		    device_get_name(sc->sc_dev), sc->sc_txnext);
145418100346SThomas Moestl #endif
1455305f2c06SThomas Moestl 
145642c1b001SThomas Moestl 		/* Set a watchdog timer in case the chip flakes out. */
14578cb37876SMarius Strobl 		sc->sc_wdog_timer = 5;
145818100346SThomas Moestl #ifdef GEM_DEBUG
145912fb0330SPyun YongHyeon 		CTR3(KTR_GEM, "%s: %s: watchdog %d",
146012fb0330SPyun YongHyeon 		    device_get_name(sc->sc_dev), __func__, sc->sc_wdog_timer);
146118100346SThomas Moestl #endif
146242c1b001SThomas Moestl 	}
146342c1b001SThomas Moestl }
146442c1b001SThomas Moestl 
146542c1b001SThomas Moestl /*
146642c1b001SThomas Moestl  * Transmit interrupt.
146742c1b001SThomas Moestl  */
146842c1b001SThomas Moestl static void
146942c1b001SThomas Moestl gem_tint(sc)
147042c1b001SThomas Moestl 	struct gem_softc *sc;
147142c1b001SThomas Moestl {
1472fc74a9f9SBrooks Davis 	struct ifnet *ifp = sc->sc_ifp;
147342c1b001SThomas Moestl 	struct gem_txsoft *txs;
147442c1b001SThomas Moestl 	int txlast;
1475336cca9eSBenno Rice 	int progress = 0;
147642c1b001SThomas Moestl 
147718100346SThomas Moestl #ifdef GEM_DEBUG
147812fb0330SPyun YongHyeon 	CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__);
147918100346SThomas Moestl #endif
148042c1b001SThomas Moestl 
148142c1b001SThomas Moestl 	/*
148242c1b001SThomas Moestl 	 * Go through our Tx list and free mbufs for those
148342c1b001SThomas Moestl 	 * frames that have been transmitted.
148442c1b001SThomas Moestl 	 */
1485b2d59f42SThomas Moestl 	GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD);
148642c1b001SThomas Moestl 	while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) {
148742c1b001SThomas Moestl 
148842c1b001SThomas Moestl #ifdef GEM_DEBUG
148942c1b001SThomas Moestl 		if (ifp->if_flags & IFF_DEBUG) {
149042c1b001SThomas Moestl 			int i;
149142c1b001SThomas Moestl 			printf("    txsoft %p transmit chain:\n", txs);
149242c1b001SThomas Moestl 			for (i = txs->txs_firstdesc;; i = GEM_NEXTTX(i)) {
149342c1b001SThomas Moestl 				printf("descriptor %d: ", i);
149442c1b001SThomas Moestl 				printf("gd_flags: 0x%016llx\t", (long long)
149542c1b001SThomas Moestl 					GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_flags));
149642c1b001SThomas Moestl 				printf("gd_addr: 0x%016llx\n", (long long)
149742c1b001SThomas Moestl 					GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_addr));
149842c1b001SThomas Moestl 				if (i == txs->txs_lastdesc)
149942c1b001SThomas Moestl 					break;
150042c1b001SThomas Moestl 			}
150142c1b001SThomas Moestl 		}
150242c1b001SThomas Moestl #endif
150342c1b001SThomas Moestl 
150442c1b001SThomas Moestl 		/*
15051ed3fed7SMarius Strobl 		 * In theory, we could harvest some descriptors before
150642c1b001SThomas Moestl 		 * the ring is empty, but that's a bit complicated.
150742c1b001SThomas Moestl 		 *
150842c1b001SThomas Moestl 		 * GEM_TX_COMPLETION points to the last descriptor
150942c1b001SThomas Moestl 		 * processed +1.
151042c1b001SThomas Moestl 		 */
1511e1bb13cdSPoul-Henning Kamp 		txlast = bus_read_4(sc->sc_res[0], GEM_TX_COMPLETION);
151218100346SThomas Moestl #ifdef GEM_DEBUG
151312fb0330SPyun YongHyeon 		CTR4(KTR_GEM, "%s: txs->txs_firstdesc = %d, "
151442c1b001SThomas Moestl 		    "txs->txs_lastdesc = %d, txlast = %d",
151512fb0330SPyun YongHyeon 		    __func__, txs->txs_firstdesc, txs->txs_lastdesc, txlast);
151618100346SThomas Moestl #endif
151742c1b001SThomas Moestl 		if (txs->txs_firstdesc <= txs->txs_lastdesc) {
151842c1b001SThomas Moestl 			if ((txlast >= txs->txs_firstdesc) &&
151942c1b001SThomas Moestl 				(txlast <= txs->txs_lastdesc))
152042c1b001SThomas Moestl 				break;
152142c1b001SThomas Moestl 		} else {
152242c1b001SThomas Moestl 			/* Ick -- this command wraps */
152342c1b001SThomas Moestl 			if ((txlast >= txs->txs_firstdesc) ||
152442c1b001SThomas Moestl 				(txlast <= txs->txs_lastdesc))
152542c1b001SThomas Moestl 				break;
152642c1b001SThomas Moestl 		}
152742c1b001SThomas Moestl 
152818100346SThomas Moestl #ifdef GEM_DEBUG
152912fb0330SPyun YongHyeon 		CTR1(KTR_GEM, "%s: releasing a desc", __func__);
153018100346SThomas Moestl #endif
153142c1b001SThomas Moestl 		STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q);
153242c1b001SThomas Moestl 
153342c1b001SThomas Moestl 		sc->sc_txfree += txs->txs_ndescs;
153442c1b001SThomas Moestl 
1535305f2c06SThomas Moestl 		bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap,
153642c1b001SThomas Moestl 		    BUS_DMASYNC_POSTWRITE);
1537305f2c06SThomas Moestl 		bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap);
153842c1b001SThomas Moestl 		if (txs->txs_mbuf != NULL) {
153942c1b001SThomas Moestl 			m_freem(txs->txs_mbuf);
154042c1b001SThomas Moestl 			txs->txs_mbuf = NULL;
154142c1b001SThomas Moestl 		}
154242c1b001SThomas Moestl 
154342c1b001SThomas Moestl 		STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q);
154442c1b001SThomas Moestl 
154542c1b001SThomas Moestl 		ifp->if_opackets++;
1546336cca9eSBenno Rice 		progress = 1;
154742c1b001SThomas Moestl 	}
154842c1b001SThomas Moestl 
154918100346SThomas Moestl #ifdef GEM_DEBUG
155012fb0330SPyun YongHyeon 	CTR4(KTR_GEM, "%s: GEM_TX_STATE_MACHINE %x "
155142c1b001SThomas Moestl 	    "GEM_TX_DATA_PTR %llx "
155242c1b001SThomas Moestl 	    "GEM_TX_COMPLETION %x",
155312fb0330SPyun YongHyeon 	    __func__,
15541ed3fed7SMarius Strobl 	    bus_read_4(sc->sc_res[0], GEM_TX_STATE_MACHINE),
15551ed3fed7SMarius Strobl 	    ((long long) bus_read_4(sc->sc_res[0], GEM_TX_DATA_PTR_HI) << 32) |
15561ed3fed7SMarius Strobl 	    bus_read_4(sc->sc_res[0], GEM_TX_DATA_PTR_LO),
1557e1bb13cdSPoul-Henning Kamp 	    bus_read_4(sc->sc_res[0], GEM_TX_COMPLETION));
155818100346SThomas Moestl #endif
155942c1b001SThomas Moestl 
1560336cca9eSBenno Rice 	if (progress) {
1561336cca9eSBenno Rice 		if (sc->sc_txfree == GEM_NTXDESC - 1)
1562336cca9eSBenno Rice 			sc->sc_txwin = 0;
156342c1b001SThomas Moestl 
156413f4c340SRobert Watson 		/* Freed some descriptors, so reset IFF_DRV_OACTIVE and restart. */
156513f4c340SRobert Watson 		ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
15668cb37876SMarius Strobl 		sc->sc_wdog_timer = STAILQ_EMPTY(&sc->sc_txdirtyq) ? 0 : 5;
156712fb0330SPyun YongHyeon 
156812fb0330SPyun YongHyeon 		if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
156912fb0330SPyun YongHyeon 		    !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
157012fb0330SPyun YongHyeon 			gem_start_locked(ifp);
1571336cca9eSBenno Rice 	}
157242c1b001SThomas Moestl 
157318100346SThomas Moestl #ifdef GEM_DEBUG
157412fb0330SPyun YongHyeon 	CTR3(KTR_GEM, "%s: %s: watchdog %d",
157512fb0330SPyun YongHyeon 	    device_get_name(sc->sc_dev), __func__, sc->sc_wdog_timer);
157618100346SThomas Moestl #endif
157742c1b001SThomas Moestl }
157842c1b001SThomas Moestl 
1579c3d5598aSMarius Strobl #ifdef GEM_RINT_TIMEOUT
15800d80b9bdSThomas Moestl static void
15810d80b9bdSThomas Moestl gem_rint_timeout(arg)
15820d80b9bdSThomas Moestl 	void *arg;
15830d80b9bdSThomas Moestl {
15848cfaff7dSMarius Strobl 	struct gem_softc *sc = (struct gem_softc *)arg;
15850d80b9bdSThomas Moestl 
15861f317bf9SMarius Strobl 	GEM_LOCK_ASSERT(sc, MA_OWNED);
15878cfaff7dSMarius Strobl 	gem_rint(sc);
15880d80b9bdSThomas Moestl }
158911e3f060SJake Burkholder #endif
15900d80b9bdSThomas Moestl 
159142c1b001SThomas Moestl /*
159242c1b001SThomas Moestl  * Receive interrupt.
159342c1b001SThomas Moestl  */
159442c1b001SThomas Moestl static void
159542c1b001SThomas Moestl gem_rint(sc)
159642c1b001SThomas Moestl 	struct gem_softc *sc;
159742c1b001SThomas Moestl {
1598fc74a9f9SBrooks Davis 	struct ifnet *ifp = sc->sc_ifp;
159942c1b001SThomas Moestl 	struct mbuf *m;
160042c1b001SThomas Moestl 	u_int64_t rxstat;
1601336cca9eSBenno Rice 	u_int32_t rxcomp;
160242c1b001SThomas Moestl 
1603c3d5598aSMarius Strobl #ifdef GEM_RINT_TIMEOUT
16040d80b9bdSThomas Moestl 	callout_stop(&sc->sc_rx_ch);
1605c3d5598aSMarius Strobl #endif
160618100346SThomas Moestl #ifdef GEM_DEBUG
160712fb0330SPyun YongHyeon 	CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__);
160818100346SThomas Moestl #endif
1609336cca9eSBenno Rice 
1610336cca9eSBenno Rice 	/*
1611336cca9eSBenno Rice 	 * Read the completion register once.  This limits
1612336cca9eSBenno Rice 	 * how long the following loop can execute.
1613336cca9eSBenno Rice 	 */
1614e1bb13cdSPoul-Henning Kamp 	rxcomp = bus_read_4(sc->sc_res[0], GEM_RX_COMPLETION);
1615336cca9eSBenno Rice 
161618100346SThomas Moestl #ifdef GEM_DEBUG
161712fb0330SPyun YongHyeon 	CTR3(KTR_GEM, "%s: sc->rxptr %d, complete %d",
161812fb0330SPyun YongHyeon 	    __func__, sc->sc_rxptr, rxcomp);
161918100346SThomas Moestl #endif
1620b2d59f42SThomas Moestl 	GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD);
16211ed3fed7SMarius Strobl 	for (; sc->sc_rxptr != rxcomp;) {
16221ed3fed7SMarius Strobl 		m = sc->sc_rxsoft[sc->sc_rxptr].rxs_mbuf;
16231ed3fed7SMarius Strobl 		rxstat = GEM_DMA_READ(sc,
16241ed3fed7SMarius Strobl 		    sc->sc_rxdescs[sc->sc_rxptr].gd_flags);
162542c1b001SThomas Moestl 
162642c1b001SThomas Moestl 		if (rxstat & GEM_RD_OWN) {
1627c3d5598aSMarius Strobl #ifdef GEM_RINT_TIMEOUT
162842c1b001SThomas Moestl 			/*
16290d80b9bdSThomas Moestl 			 * The descriptor is still marked as owned, although
16300d80b9bdSThomas Moestl 			 * it is supposed to have completed. This has been
16310d80b9bdSThomas Moestl 			 * observed on some machines. Just exiting here
16320d80b9bdSThomas Moestl 			 * might leave the packet sitting around until another
16330d80b9bdSThomas Moestl 			 * one arrives to trigger a new interrupt, which is
16340d80b9bdSThomas Moestl 			 * generally undesirable, so set up a timeout.
163542c1b001SThomas Moestl 			 */
16360d80b9bdSThomas Moestl 			callout_reset(&sc->sc_rx_ch, GEM_RXOWN_TICKS,
16370d80b9bdSThomas Moestl 			    gem_rint_timeout, sc);
1638336cca9eSBenno Rice #endif
16391ed3fed7SMarius Strobl 			m = NULL;
16401ed3fed7SMarius Strobl 			goto kickit;
164142c1b001SThomas Moestl 		}
164242c1b001SThomas Moestl 
164342c1b001SThomas Moestl 		if (rxstat & GEM_RD_BAD_CRC) {
1644336cca9eSBenno Rice 			ifp->if_ierrors++;
164542c1b001SThomas Moestl 			device_printf(sc->sc_dev, "receive error: CRC error\n");
16461ed3fed7SMarius Strobl 			GEM_INIT_RXDESC(sc, sc->sc_rxptr);
16471ed3fed7SMarius Strobl 			m = NULL;
16481ed3fed7SMarius Strobl 			goto kickit;
164942c1b001SThomas Moestl 		}
165042c1b001SThomas Moestl 
165142c1b001SThomas Moestl #ifdef GEM_DEBUG
165242c1b001SThomas Moestl 		if (ifp->if_flags & IFF_DEBUG) {
16531ed3fed7SMarius Strobl 			printf("    rxsoft %p descriptor %d: ",
16541ed3fed7SMarius Strobl 			    &sc->sc_rxsoft[sc->sc_rxptr], sc->sc_rxptr);
165542c1b001SThomas Moestl 			printf("gd_flags: 0x%016llx\t", (long long)
16561ed3fed7SMarius Strobl 			    GEM_DMA_READ(sc, sc->sc_rxdescs[
16571ed3fed7SMarius Strobl 			    sc->sc_rxptr].gd_flags));
165842c1b001SThomas Moestl 			printf("gd_addr: 0x%016llx\n", (long long)
16591ed3fed7SMarius Strobl 			    GEM_DMA_READ(sc, sc->sc_rxdescs[
16601ed3fed7SMarius Strobl 			    sc->sc_rxptr].gd_addr));
166142c1b001SThomas Moestl 		}
166242c1b001SThomas Moestl #endif
166342c1b001SThomas Moestl 
166442c1b001SThomas Moestl 		/*
166542c1b001SThomas Moestl 		 * Allocate a new mbuf cluster.  If that fails, we are
166642c1b001SThomas Moestl 		 * out of memory, and must drop the packet and recycle
166742c1b001SThomas Moestl 		 * the buffer that's already attached to this descriptor.
166842c1b001SThomas Moestl 		 */
16691ed3fed7SMarius Strobl 		if (gem_add_rxbuf(sc, sc->sc_rxptr) != 0) {
167042c1b001SThomas Moestl 			ifp->if_ierrors++;
16711ed3fed7SMarius Strobl 			GEM_INIT_RXDESC(sc, sc->sc_rxptr);
16721ed3fed7SMarius Strobl 			m = NULL;
16731ed3fed7SMarius Strobl 		}
16741ed3fed7SMarius Strobl 
16751ed3fed7SMarius Strobl kickit:
16761ed3fed7SMarius Strobl     		/*
16771ed3fed7SMarius Strobl 		 * Update the RX kick register. This register has to point
16781ed3fed7SMarius Strobl 		 * to the descriptor after the last valid one (before the
16791ed3fed7SMarius Strobl 		 * current batch) and must be incremented in multiples of
16801ed3fed7SMarius Strobl 		 * 4 (because the DMA engine fetches/updates descriptors
16811ed3fed7SMarius Strobl 		 * in batches of 4).
16821ed3fed7SMarius Strobl 		 */
16831ed3fed7SMarius Strobl 		sc->sc_rxptr = GEM_NEXTRX(sc->sc_rxptr);
16841ed3fed7SMarius Strobl 		if ((sc->sc_rxptr % 4) == 0) {
16851ed3fed7SMarius Strobl 			GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE);
16861ed3fed7SMarius Strobl 			bus_write_4(sc->sc_res[0], GEM_RX_KICK,
16871ed3fed7SMarius Strobl 			    (sc->sc_rxptr + GEM_NRXDESC - 4) &
16881ed3fed7SMarius Strobl 			    GEM_NRXDESC_MASK);
16891ed3fed7SMarius Strobl 		}
16901ed3fed7SMarius Strobl 
16911ed3fed7SMarius Strobl 		if (m == NULL) {
16921ed3fed7SMarius Strobl 			if (rxstat & GEM_RD_OWN)
16931ed3fed7SMarius Strobl 				break;
169442c1b001SThomas Moestl 			continue;
169542c1b001SThomas Moestl 		}
169642c1b001SThomas Moestl 
16971ed3fed7SMarius Strobl 		ifp->if_ipackets++;
16981ed3fed7SMarius Strobl 		m->m_data += 2; /* We're already off by two */
169942c1b001SThomas Moestl 		m->m_pkthdr.rcvif = ifp;
17001ed3fed7SMarius Strobl 		m->m_pkthdr.len = m->m_len = GEM_RD_BUFLEN(rxstat);
170112fb0330SPyun YongHyeon 
170212fb0330SPyun YongHyeon 		if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
170312fb0330SPyun YongHyeon 			gem_rxcksum(m, rxstat);
170442c1b001SThomas Moestl 
170542c1b001SThomas Moestl 		/* Pass it on. */
17068cfaff7dSMarius Strobl 		GEM_UNLOCK(sc);
1707673d9191SSam Leffler 		(*ifp->if_input)(ifp, m);
17088cfaff7dSMarius Strobl 		GEM_LOCK(sc);
170942c1b001SThomas Moestl 	}
171042c1b001SThomas Moestl 
171118100346SThomas Moestl #ifdef GEM_DEBUG
171212fb0330SPyun YongHyeon 	CTR3(KTR_GEM, "%s: done sc->rxptr %d, complete %d", __func__,
1713e1bb13cdSPoul-Henning Kamp 		sc->sc_rxptr, bus_read_4(sc->sc_res[0], GEM_RX_COMPLETION));
171418100346SThomas Moestl #endif
171542c1b001SThomas Moestl }
171642c1b001SThomas Moestl 
171742c1b001SThomas Moestl /*
171842c1b001SThomas Moestl  * gem_add_rxbuf:
171942c1b001SThomas Moestl  *
172042c1b001SThomas Moestl  *	Add a receive buffer to the indicated descriptor.
172142c1b001SThomas Moestl  */
172242c1b001SThomas Moestl static int
172342c1b001SThomas Moestl gem_add_rxbuf(sc, idx)
172442c1b001SThomas Moestl 	struct gem_softc *sc;
172542c1b001SThomas Moestl 	int idx;
172642c1b001SThomas Moestl {
172742c1b001SThomas Moestl 	struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx];
172842c1b001SThomas Moestl 	struct mbuf *m;
1729c3d5598aSMarius Strobl 	bus_dma_segment_t segs[1];
1730c3d5598aSMarius Strobl 	int error, nsegs;
173142c1b001SThomas Moestl 
1732a163d034SWarner Losh 	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
173342c1b001SThomas Moestl 	if (m == NULL)
173442c1b001SThomas Moestl 		return (ENOBUFS);
1735305f2c06SThomas Moestl 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
173642c1b001SThomas Moestl 
173742c1b001SThomas Moestl #ifdef GEM_DEBUG
173842c1b001SThomas Moestl 	/* bzero the packet to check dma */
173942c1b001SThomas Moestl 	memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size);
174042c1b001SThomas Moestl #endif
174142c1b001SThomas Moestl 
1742b2d59f42SThomas Moestl 	if (rxs->rxs_mbuf != NULL) {
1743b2d59f42SThomas Moestl 		bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap,
1744b2d59f42SThomas Moestl 		    BUS_DMASYNC_POSTREAD);
1745305f2c06SThomas Moestl 		bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap);
1746b2d59f42SThomas Moestl 	}
174742c1b001SThomas Moestl 
1748c3d5598aSMarius Strobl 	error = bus_dmamap_load_mbuf_sg(sc->sc_rdmatag, rxs->rxs_dmamap,
1749c3d5598aSMarius Strobl 	    m, segs, &nsegs, BUS_DMA_NOWAIT);
1750c3d5598aSMarius Strobl 	/* If nsegs is wrong then the stack is corrupt. */
1751c3d5598aSMarius Strobl 	KASSERT(nsegs == 1, ("Too many segments returned!"));
1752c3d5598aSMarius Strobl 	if (error != 0) {
175342c1b001SThomas Moestl 		device_printf(sc->sc_dev, "can't load rx DMA map %d, error = "
175442c1b001SThomas Moestl 		    "%d\n", idx, error);
1755c3d5598aSMarius Strobl 		m_freem(m);
17561ed3fed7SMarius Strobl 		return (error);
175742c1b001SThomas Moestl 	}
17581ed3fed7SMarius Strobl 	rxs->rxs_mbuf = m;
1759c3d5598aSMarius Strobl 	rxs->rxs_paddr = segs[0].ds_addr;
176042c1b001SThomas Moestl 
1761305f2c06SThomas Moestl 	bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, BUS_DMASYNC_PREREAD);
176242c1b001SThomas Moestl 
176342c1b001SThomas Moestl 	GEM_INIT_RXDESC(sc, idx);
176442c1b001SThomas Moestl 
176542c1b001SThomas Moestl 	return (0);
176642c1b001SThomas Moestl }
176742c1b001SThomas Moestl 
176842c1b001SThomas Moestl static void
176942c1b001SThomas Moestl gem_eint(sc, status)
177042c1b001SThomas Moestl 	struct gem_softc *sc;
177142c1b001SThomas Moestl 	u_int status;
177242c1b001SThomas Moestl {
177342c1b001SThomas Moestl 
17741ed3fed7SMarius Strobl 	sc->sc_ifp->if_ierrors++;
17751ed3fed7SMarius Strobl 	if ((status & GEM_INTR_RX_TAG_ERR) != 0) {
17761ed3fed7SMarius Strobl 		gem_reset_rxdma(sc);
177742c1b001SThomas Moestl 		return;
177842c1b001SThomas Moestl 	}
177942c1b001SThomas Moestl 
17801ed3fed7SMarius Strobl 	device_printf(sc->sc_dev, "%s: status=%x\n", __func__, status);
178142c1b001SThomas Moestl }
178242c1b001SThomas Moestl 
178342c1b001SThomas Moestl void
178442c1b001SThomas Moestl gem_intr(v)
178542c1b001SThomas Moestl 	void *v;
178642c1b001SThomas Moestl {
178742c1b001SThomas Moestl 	struct gem_softc *sc = (struct gem_softc *)v;
17881ed3fed7SMarius Strobl 	uint32_t status, status2;
178942c1b001SThomas Moestl 
17908cfaff7dSMarius Strobl 	GEM_LOCK(sc);
1791e1bb13cdSPoul-Henning Kamp 	status = bus_read_4(sc->sc_res[0], GEM_STATUS);
17921ed3fed7SMarius Strobl 
179318100346SThomas Moestl #ifdef GEM_DEBUG
179412fb0330SPyun YongHyeon 	CTR4(KTR_GEM, "%s: %s: cplt %x, status %x",
179512fb0330SPyun YongHyeon 		device_get_name(sc->sc_dev), __func__, (status>>19),
179642c1b001SThomas Moestl 		(u_int)status);
17971ed3fed7SMarius Strobl 
17981ed3fed7SMarius Strobl 	/*
17991ed3fed7SMarius Strobl 	 * PCS interrupts must be cleared, otherwise no traffic is passed!
18001ed3fed7SMarius Strobl 	 */
18011ed3fed7SMarius Strobl 	if ((status & GEM_INTR_PCS) != 0) {
18021ed3fed7SMarius Strobl 		status2 = bus_read_4(sc->sc_res[0], GEM_MII_INTERRUP_STATUS) |
18031ed3fed7SMarius Strobl 		    bus_read_4(sc->sc_res[0], GEM_MII_INTERRUP_STATUS);
18041ed3fed7SMarius Strobl 		if ((status2 & GEM_MII_INTERRUP_LINK) != 0)
18051ed3fed7SMarius Strobl 			device_printf(sc->sc_dev,
18061ed3fed7SMarius Strobl 			    "%s: PCS link status changed\n", __func__);
18071ed3fed7SMarius Strobl 	}
18081ed3fed7SMarius Strobl 	if ((status & GEM_MAC_CONTROL_STATUS) != 0) {
18091ed3fed7SMarius Strobl 		status2 = bus_read_4(sc->sc_res[0], GEM_MAC_CONTROL_STATUS);
18101ed3fed7SMarius Strobl 		if ((status2 & GEM_MAC_PAUSED) != 0)
18111ed3fed7SMarius Strobl 			device_printf(sc->sc_dev,
18121ed3fed7SMarius Strobl 			    "%s: PAUSE received (PAUSE time %d slots)\n",
18131ed3fed7SMarius Strobl 			    __func__, GEM_MAC_PAUSE_TIME(status2));
18141ed3fed7SMarius Strobl 		if ((status2 & GEM_MAC_PAUSE) != 0)
18151ed3fed7SMarius Strobl 			device_printf(sc->sc_dev,
18161ed3fed7SMarius Strobl 			    "%s: transited to PAUSE state\n", __func__);
18171ed3fed7SMarius Strobl 		if ((status2 & GEM_MAC_RESUME) != 0)
18181ed3fed7SMarius Strobl 			device_printf(sc->sc_dev,
18191ed3fed7SMarius Strobl 			    "%s: transited to non-PAUSE state\n", __func__);
18201ed3fed7SMarius Strobl 	}
18211ed3fed7SMarius Strobl 	if ((status & GEM_INTR_MIF) != 0)
18221ed3fed7SMarius Strobl 		device_printf(sc->sc_dev, "%s: MIF interrupt\n", __func__);
182318100346SThomas Moestl #endif
182442c1b001SThomas Moestl 
18251ed3fed7SMarius Strobl 	if ((status &
18261ed3fed7SMarius Strobl 	    (GEM_INTR_RX_TAG_ERR | GEM_INTR_PERR | GEM_INTR_BERR)) != 0)
182742c1b001SThomas Moestl 		gem_eint(sc, status);
182842c1b001SThomas Moestl 
182942c1b001SThomas Moestl 	if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0)
183042c1b001SThomas Moestl 		gem_rint(sc);
183142c1b001SThomas Moestl 
18321ed3fed7SMarius Strobl 	if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0)
18331ed3fed7SMarius Strobl 		gem_tint(sc);
18341ed3fed7SMarius Strobl 
183542c1b001SThomas Moestl 	if (status & GEM_INTR_TX_MAC) {
18361ed3fed7SMarius Strobl 		status2 = bus_read_4(sc->sc_res[0], GEM_MAC_TX_STATUS);
18371ed3fed7SMarius Strobl 		if (status2 & ~(GEM_MAC_TX_XMIT_DONE | GEM_MAC_TX_DEFER_EXP))
1838336cca9eSBenno Rice 			device_printf(sc->sc_dev, "MAC tx fault, status %x\n",
18391ed3fed7SMarius Strobl 			    status2);
18401ed3fed7SMarius Strobl 		if (status2 & (GEM_MAC_TX_UNDERRUN | GEM_MAC_TX_PKT_TOO_LONG))
18418cfaff7dSMarius Strobl 			gem_init_locked(sc);
184242c1b001SThomas Moestl 	}
184342c1b001SThomas Moestl 	if (status & GEM_INTR_RX_MAC) {
18441ed3fed7SMarius Strobl 		status2 = bus_read_4(sc->sc_res[0], GEM_MAC_RX_STATUS);
184500d12766SMarius Strobl 		/*
18461ed3fed7SMarius Strobl 		 * At least with GEM_SUN_GEM and some GEM_SUN_ERI
18471ed3fed7SMarius Strobl 		 * revisions GEM_MAC_RX_OVERFLOW happen often due to a
18481ed3fed7SMarius Strobl 		 * silicon bug so handle them silently. Moreover, it's
18491ed3fed7SMarius Strobl 		 * likely that the receiver has hung so we reset it.
185000d12766SMarius Strobl 		 */
18511ed3fed7SMarius Strobl 		if (status2 & GEM_MAC_RX_OVERFLOW) {
18521ed3fed7SMarius Strobl 			sc->sc_ifp->if_ierrors++;
18531ed3fed7SMarius Strobl 			gem_reset_rxdma(sc);
18541ed3fed7SMarius Strobl 		} else if (status2 & ~(GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT))
1855336cca9eSBenno Rice 			device_printf(sc->sc_dev, "MAC rx fault, status %x\n",
18561ed3fed7SMarius Strobl 			    status2);
185742c1b001SThomas Moestl 	}
18588cfaff7dSMarius Strobl 	GEM_UNLOCK(sc);
185942c1b001SThomas Moestl }
186042c1b001SThomas Moestl 
18618cb37876SMarius Strobl static int
18628cb37876SMarius Strobl gem_watchdog(sc)
18638cb37876SMarius Strobl 	struct gem_softc *sc;
186442c1b001SThomas Moestl {
186542c1b001SThomas Moestl 
18668cb37876SMarius Strobl 	GEM_LOCK_ASSERT(sc, MA_OWNED);
18678cb37876SMarius Strobl 
186818100346SThomas Moestl #ifdef GEM_DEBUG
186912fb0330SPyun YongHyeon 	CTR4(KTR_GEM, "%s: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x "
187012fb0330SPyun YongHyeon 		"GEM_MAC_RX_CONFIG %x", __func__,
1871e1bb13cdSPoul-Henning Kamp 		bus_read_4(sc->sc_res[0], GEM_RX_CONFIG),
1872e1bb13cdSPoul-Henning Kamp 		bus_read_4(sc->sc_res[0], GEM_MAC_RX_STATUS),
1873e1bb13cdSPoul-Henning Kamp 		bus_read_4(sc->sc_res[0], GEM_MAC_RX_CONFIG));
187412fb0330SPyun YongHyeon 	CTR4(KTR_GEM, "%s: GEM_TX_CONFIG %x GEM_MAC_TX_STATUS %x "
187512fb0330SPyun YongHyeon 		"GEM_MAC_TX_CONFIG %x", __func__,
1876e1bb13cdSPoul-Henning Kamp 		bus_read_4(sc->sc_res[0], GEM_TX_CONFIG),
1877e1bb13cdSPoul-Henning Kamp 		bus_read_4(sc->sc_res[0], GEM_MAC_TX_STATUS),
1878e1bb13cdSPoul-Henning Kamp 		bus_read_4(sc->sc_res[0], GEM_MAC_TX_CONFIG));
187918100346SThomas Moestl #endif
188042c1b001SThomas Moestl 
18818cb37876SMarius Strobl 	if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0)
18828cb37876SMarius Strobl 		return (0);
18838cb37876SMarius Strobl 
18841ed3fed7SMarius Strobl 	if ((sc->sc_flags & GEM_LINK) != 0)
188542c1b001SThomas Moestl 		device_printf(sc->sc_dev, "device timeout\n");
18861ed3fed7SMarius Strobl 	else if (bootverbose)
18871ed3fed7SMarius Strobl 		device_printf(sc->sc_dev, "device timeout (no link)\n");
18888cb37876SMarius Strobl 	++sc->sc_ifp->if_oerrors;
188942c1b001SThomas Moestl 
189042c1b001SThomas Moestl 	/* Try to get more packets going. */
18918cfaff7dSMarius Strobl 	gem_init_locked(sc);
18928cb37876SMarius Strobl 	return (EJUSTRETURN);
189342c1b001SThomas Moestl }
189442c1b001SThomas Moestl 
189542c1b001SThomas Moestl /*
189642c1b001SThomas Moestl  * Initialize the MII Management Interface
189742c1b001SThomas Moestl  */
189842c1b001SThomas Moestl static void
189942c1b001SThomas Moestl gem_mifinit(sc)
190042c1b001SThomas Moestl 	struct gem_softc *sc;
190142c1b001SThomas Moestl {
190242c1b001SThomas Moestl 
190342c1b001SThomas Moestl 	/* Configure the MIF in frame mode */
19041ed3fed7SMarius Strobl 	bus_write_4(sc->sc_res[0], GEM_MIF_CONFIG, bus_read_4(sc->sc_res[0],
19051ed3fed7SMarius Strobl 	    GEM_MIF_CONFIG) & ~GEM_MIF_CONFIG_BB_ENA);
190642c1b001SThomas Moestl }
190742c1b001SThomas Moestl 
190842c1b001SThomas Moestl /*
190942c1b001SThomas Moestl  * MII interface
191042c1b001SThomas Moestl  *
191142c1b001SThomas Moestl  * The GEM MII interface supports at least three different operating modes:
191242c1b001SThomas Moestl  *
191342c1b001SThomas Moestl  * Bitbang mode is implemented using data, clock and output enable registers.
191442c1b001SThomas Moestl  *
191542c1b001SThomas Moestl  * Frame mode is implemented by loading a complete frame into the frame
191642c1b001SThomas Moestl  * register and polling the valid bit for completion.
191742c1b001SThomas Moestl  *
191842c1b001SThomas Moestl  * Polling mode uses the frame register but completion is indicated by
191942c1b001SThomas Moestl  * an interrupt.
192042c1b001SThomas Moestl  *
192142c1b001SThomas Moestl  */
192242c1b001SThomas Moestl int
192342c1b001SThomas Moestl gem_mii_readreg(dev, phy, reg)
192442c1b001SThomas Moestl 	device_t dev;
192542c1b001SThomas Moestl 	int phy, reg;
192642c1b001SThomas Moestl {
192742c1b001SThomas Moestl 	struct gem_softc *sc = device_get_softc(dev);
192842c1b001SThomas Moestl 	int n;
192942c1b001SThomas Moestl 	u_int32_t v;
193042c1b001SThomas Moestl 
193142c1b001SThomas Moestl #ifdef GEM_DEBUG_PHY
19321ed3fed7SMarius Strobl 	printf("%s: phy %d reg %d\n", __func__, phy, reg);
193342c1b001SThomas Moestl #endif
193442c1b001SThomas Moestl 
19351ed3fed7SMarius Strobl 	if (sc->sc_phyad != -1 && phy != sc->sc_phyad)
19361ed3fed7SMarius Strobl 		return (0);
19371ed3fed7SMarius Strobl 
19381ed3fed7SMarius Strobl 	if ((sc->sc_flags & GEM_SERDES) != 0) {
19391ed3fed7SMarius Strobl 		switch (reg) {
19401ed3fed7SMarius Strobl 		case MII_BMCR:
19411ed3fed7SMarius Strobl 			reg = GEM_MII_CONTROL;
19421ed3fed7SMarius Strobl 			break;
19431ed3fed7SMarius Strobl 		case MII_BMSR:
19441ed3fed7SMarius Strobl 			reg = GEM_MII_STATUS;
19451ed3fed7SMarius Strobl 			break;
19461ed3fed7SMarius Strobl 		case MII_PHYIDR1:
19471ed3fed7SMarius Strobl 		case MII_PHYIDR2:
19481ed3fed7SMarius Strobl 			return (0);
19491ed3fed7SMarius Strobl 		case MII_ANAR:
19501ed3fed7SMarius Strobl 			reg = GEM_MII_ANAR;
19511ed3fed7SMarius Strobl 			break;
19521ed3fed7SMarius Strobl 		case MII_ANLPAR:
19531ed3fed7SMarius Strobl 			reg = GEM_MII_ANLPAR;
19541ed3fed7SMarius Strobl 			break;
19551ed3fed7SMarius Strobl 		case MII_EXTSR:
19561ed3fed7SMarius Strobl 			return (EXTSR_1000XFDX | EXTSR_1000XHDX);
19571ed3fed7SMarius Strobl 		default:
19581ed3fed7SMarius Strobl 			device_printf(sc->sc_dev,
19591ed3fed7SMarius Strobl 			    "%s: unhandled register %d\n", __func__, reg);
19601ed3fed7SMarius Strobl 			return (0);
19611ed3fed7SMarius Strobl 		}
19621ed3fed7SMarius Strobl 		return (bus_read_4(sc->sc_res[0], reg));
19631ed3fed7SMarius Strobl 	}
196442c1b001SThomas Moestl 
196542c1b001SThomas Moestl 	/* Construct the frame command */
19661ed3fed7SMarius Strobl 	v = GEM_MIF_FRAME_READ |
19671ed3fed7SMarius Strobl 	    (phy << GEM_MIF_PHY_SHIFT) |
19681ed3fed7SMarius Strobl 	    (reg << GEM_MIF_REG_SHIFT);
196942c1b001SThomas Moestl 
1970e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_MIF_FRAME, v);
197142c1b001SThomas Moestl 	for (n = 0; n < 100; n++) {
197242c1b001SThomas Moestl 		DELAY(1);
1973e1bb13cdSPoul-Henning Kamp 		v = bus_read_4(sc->sc_res[0], GEM_MIF_FRAME);
19741f317bf9SMarius Strobl 		if (v & GEM_MIF_FRAME_TA0)
197542c1b001SThomas Moestl 			return (v & GEM_MIF_FRAME_DATA);
197642c1b001SThomas Moestl 	}
197742c1b001SThomas Moestl 
197842c1b001SThomas Moestl 	device_printf(sc->sc_dev, "mii_read timeout\n");
197942c1b001SThomas Moestl 	return (0);
198042c1b001SThomas Moestl }
198142c1b001SThomas Moestl 
198242c1b001SThomas Moestl int
198342c1b001SThomas Moestl gem_mii_writereg(dev, phy, reg, val)
198442c1b001SThomas Moestl 	device_t dev;
198542c1b001SThomas Moestl 	int phy, reg, val;
198642c1b001SThomas Moestl {
198742c1b001SThomas Moestl 	struct gem_softc *sc = device_get_softc(dev);
198842c1b001SThomas Moestl 	int n;
198942c1b001SThomas Moestl 	u_int32_t v;
199042c1b001SThomas Moestl 
199142c1b001SThomas Moestl #ifdef GEM_DEBUG_PHY
19921ed3fed7SMarius Strobl 	printf("%s: phy %d reg %d val %x\n", phy, reg, val, __func__);
199342c1b001SThomas Moestl #endif
199442c1b001SThomas Moestl 
19951ed3fed7SMarius Strobl 	if (sc->sc_phyad != -1 && phy != sc->sc_phyad)
19961ed3fed7SMarius Strobl 		return (0);
19971ed3fed7SMarius Strobl 
19981ed3fed7SMarius Strobl 	if ((sc->sc_flags & GEM_SERDES) != 0) {
19991ed3fed7SMarius Strobl 		switch (reg) {
20001ed3fed7SMarius Strobl 		case MII_BMCR:
20011ed3fed7SMarius Strobl 			reg = GEM_MII_CONTROL;
20021ed3fed7SMarius Strobl 			break;
20031ed3fed7SMarius Strobl 		case MII_BMSR:
20041ed3fed7SMarius Strobl 			reg = GEM_MII_STATUS;
20051ed3fed7SMarius Strobl 			break;
20061ed3fed7SMarius Strobl 		case MII_ANAR:
20071ed3fed7SMarius Strobl 			bus_write_4(sc->sc_res[0], GEM_MII_CONFIG, 0);
20081ed3fed7SMarius Strobl 			bus_barrier(sc->sc_res[0], GEM_MII_CONFIG, 4,
20091ed3fed7SMarius Strobl 			    BUS_SPACE_BARRIER_WRITE);
20101ed3fed7SMarius Strobl 			bus_write_4(sc->sc_res[0], GEM_MII_ANAR, val);
20111ed3fed7SMarius Strobl 			bus_write_4(sc->sc_res[0], GEM_MII_SLINK_CONTROL,
20121ed3fed7SMarius Strobl 			    GEM_MII_SLINK_LOOPBACK | GEM_MII_SLINK_EN_SYNC_D);
20131ed3fed7SMarius Strobl 			bus_write_4(sc->sc_res[0], GEM_MII_CONFIG,
20141ed3fed7SMarius Strobl 			    GEM_MII_CONFIG_ENABLE);
20151ed3fed7SMarius Strobl 			return (0);
20161ed3fed7SMarius Strobl 		case MII_ANLPAR:
20171ed3fed7SMarius Strobl 			reg = GEM_MII_ANLPAR;
20181ed3fed7SMarius Strobl 			break;
20191ed3fed7SMarius Strobl 		default:
20201ed3fed7SMarius Strobl 			device_printf(sc->sc_dev,
20211ed3fed7SMarius Strobl 			    "%s: unhandled register %d\n", __func__, reg);
20221ed3fed7SMarius Strobl 			return (0);
20231ed3fed7SMarius Strobl 		}
20241ed3fed7SMarius Strobl 		bus_write_4(sc->sc_res[0], reg, val);
20251ed3fed7SMarius Strobl 		return (0);
20261ed3fed7SMarius Strobl 	}
20271ed3fed7SMarius Strobl 
202842c1b001SThomas Moestl 	/* Construct the frame command */
202942c1b001SThomas Moestl 	v = GEM_MIF_FRAME_WRITE |
203042c1b001SThomas Moestl 	    (phy << GEM_MIF_PHY_SHIFT) |
203142c1b001SThomas Moestl 	    (reg << GEM_MIF_REG_SHIFT) |
203242c1b001SThomas Moestl 	    (val & GEM_MIF_FRAME_DATA);
203342c1b001SThomas Moestl 
2034e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_MIF_FRAME, v);
203542c1b001SThomas Moestl 	for (n = 0; n < 100; n++) {
203642c1b001SThomas Moestl 		DELAY(1);
2037e1bb13cdSPoul-Henning Kamp 		v = bus_read_4(sc->sc_res[0], GEM_MIF_FRAME);
20381f317bf9SMarius Strobl 		if (v & GEM_MIF_FRAME_TA0)
203942c1b001SThomas Moestl 			return (1);
204042c1b001SThomas Moestl 	}
204142c1b001SThomas Moestl 
204242c1b001SThomas Moestl 	device_printf(sc->sc_dev, "mii_write timeout\n");
204342c1b001SThomas Moestl 	return (0);
204442c1b001SThomas Moestl }
204542c1b001SThomas Moestl 
204642c1b001SThomas Moestl void
204742c1b001SThomas Moestl gem_mii_statchg(dev)
204842c1b001SThomas Moestl 	device_t dev;
204942c1b001SThomas Moestl {
205042c1b001SThomas Moestl 	struct gem_softc *sc = device_get_softc(dev);
20511ed3fed7SMarius Strobl 	int gigabit;
20521ed3fed7SMarius Strobl 	uint32_t rxcfg, txcfg, v;
205342c1b001SThomas Moestl 
205442c1b001SThomas Moestl #ifdef GEM_DEBUG
20551ed3fed7SMarius Strobl 	if ((sc->sc_ifflags & IFF_DEBUG) != 0)
20561ed3fed7SMarius Strobl 		device_printf(sc->sc_dev, "%s: status change: PHY = %d\n",
20571ed3fed7SMarius Strobl 		    __func__, sc->sc_phyad);
205842c1b001SThomas Moestl #endif
205942c1b001SThomas Moestl 
20601ed3fed7SMarius Strobl 	if ((sc->sc_mii->mii_media_status & IFM_ACTIVE) != 0 &&
20611ed3fed7SMarius Strobl 	    IFM_SUBTYPE(sc->sc_mii->mii_media_active) != IFM_NONE)
20621ed3fed7SMarius Strobl 		sc->sc_flags |= GEM_LINK;
20631ed3fed7SMarius Strobl 	else
20641ed3fed7SMarius Strobl 		sc->sc_flags &= ~GEM_LINK;
20651ed3fed7SMarius Strobl 
20661ed3fed7SMarius Strobl 	switch (IFM_SUBTYPE(sc->sc_mii->mii_media_active)) {
20671ed3fed7SMarius Strobl 	case IFM_1000_SX:
20681ed3fed7SMarius Strobl 	case IFM_1000_LX:
20691ed3fed7SMarius Strobl 	case IFM_1000_CX:
20701ed3fed7SMarius Strobl 	case IFM_1000_T:
20711ed3fed7SMarius Strobl 		gigabit = 1;
20721ed3fed7SMarius Strobl 		break;
20731ed3fed7SMarius Strobl 	default:
20741ed3fed7SMarius Strobl 		gigabit = 0;
207542c1b001SThomas Moestl 	}
20761ed3fed7SMarius Strobl 
20771ed3fed7SMarius Strobl 	/*
20781ed3fed7SMarius Strobl 	 * The configuration done here corresponds to the steps F) and
20791ed3fed7SMarius Strobl 	 * G) and as far as enabling of RX and TX MAC goes also step H)
20801ed3fed7SMarius Strobl 	 * of the initialization sequence outlined in section 3.2.1 of
20811ed3fed7SMarius Strobl 	 * the GEM Gigabit Ethernet ASIC Specification.
20821ed3fed7SMarius Strobl 	 */
20831ed3fed7SMarius Strobl 
20841ed3fed7SMarius Strobl 	rxcfg = bus_read_4(sc->sc_res[0], GEM_MAC_RX_CONFIG);
20851ed3fed7SMarius Strobl 	rxcfg &= ~(GEM_MAC_RX_CARR_EXTEND | GEM_MAC_RX_ENABLE);
20861ed3fed7SMarius Strobl 	txcfg = GEM_MAC_TX_ENA_IPG0 | GEM_MAC_TX_NGU | GEM_MAC_TX_NGU_LIMIT;
20871ed3fed7SMarius Strobl 	if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0)
20881ed3fed7SMarius Strobl 		txcfg |= GEM_MAC_TX_IGN_CARRIER | GEM_MAC_TX_IGN_COLLIS;
20891ed3fed7SMarius Strobl 	else if (gigabit != 0) {
20901ed3fed7SMarius Strobl 		rxcfg |= GEM_MAC_RX_CARR_EXTEND;
20911ed3fed7SMarius Strobl 		txcfg |= GEM_MAC_TX_CARR_EXTEND;
20921ed3fed7SMarius Strobl 	}
20931ed3fed7SMarius Strobl 	bus_write_4(sc->sc_res[0], GEM_MAC_TX_CONFIG, 0);
20941ed3fed7SMarius Strobl 	bus_barrier(sc->sc_res[0], GEM_MAC_TX_CONFIG, 4,
20951ed3fed7SMarius Strobl 	    BUS_SPACE_BARRIER_WRITE);
20961ed3fed7SMarius Strobl 	if (!gem_bitwait(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0))
20971ed3fed7SMarius Strobl 		device_printf(sc->sc_dev, "cannot disable TX MAC\n");
20981ed3fed7SMarius Strobl 	bus_write_4(sc->sc_res[0], GEM_MAC_TX_CONFIG, txcfg);
20991ed3fed7SMarius Strobl 	bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, 0);
21001ed3fed7SMarius Strobl 	bus_barrier(sc->sc_res[0], GEM_MAC_RX_CONFIG, 4,
21011ed3fed7SMarius Strobl 	    BUS_SPACE_BARRIER_WRITE);
21021ed3fed7SMarius Strobl 	if (!gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0))
21031ed3fed7SMarius Strobl 		device_printf(sc->sc_dev, "cannot disable RX MAC\n");
21041ed3fed7SMarius Strobl 	bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, rxcfg);
21051ed3fed7SMarius Strobl 
21061ed3fed7SMarius Strobl 	v = bus_read_4(sc->sc_res[0], GEM_MAC_CONTROL_CONFIG) &
21071ed3fed7SMarius Strobl 	    ~(GEM_MAC_CC_RX_PAUSE | GEM_MAC_CC_TX_PAUSE);
21081ed3fed7SMarius Strobl #ifdef notyet
21091ed3fed7SMarius Strobl 	if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
21101ed3fed7SMarius Strobl 		v |= GEM_MAC_CC_RX_PAUSE;
21111ed3fed7SMarius Strobl 	if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
21121ed3fed7SMarius Strobl 		v |= GEM_MAC_CC_TX_PAUSE;
21131ed3fed7SMarius Strobl #endif
21141ed3fed7SMarius Strobl 	bus_write_4(sc->sc_res[0], GEM_MAC_CONTROL_CONFIG, v);
21151ed3fed7SMarius Strobl 
21161ed3fed7SMarius Strobl 	if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0 &&
21171ed3fed7SMarius Strobl 	    gigabit != 0)
21181ed3fed7SMarius Strobl 		bus_write_4(sc->sc_res[0], GEM_MAC_SLOT_TIME,
21191ed3fed7SMarius Strobl 		    GEM_MAC_SLOT_TIME_CARR_EXTEND);
21201ed3fed7SMarius Strobl 	else
21211ed3fed7SMarius Strobl 		bus_write_4(sc->sc_res[0], GEM_MAC_SLOT_TIME,
21221ed3fed7SMarius Strobl 		    GEM_MAC_SLOT_TIME_NORMAL);
212342c1b001SThomas Moestl 
212442c1b001SThomas Moestl 	/* XIF Configuration */
212542c1b001SThomas Moestl 	v = GEM_MAC_XIF_LINK_LED;
212642c1b001SThomas Moestl 	v |= GEM_MAC_XIF_TX_MII_ENA;
21271ed3fed7SMarius Strobl 	if ((sc->sc_flags & GEM_SERDES) == 0) {
21281ed3fed7SMarius Strobl 		if ((bus_read_4(sc->sc_res[0], GEM_MIF_CONFIG) &
21291ed3fed7SMarius Strobl 		    GEM_MIF_CONFIG_PHY_SEL) != 0 &&
21301ed3fed7SMarius Strobl 		    (IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0)
213142c1b001SThomas Moestl 			/* External MII needs echo disable if half duplex. */
213242c1b001SThomas Moestl  			v |= GEM_MAC_XIF_ECHO_DISABL;
2133336cca9eSBenno Rice 		else
21341ed3fed7SMarius Strobl 			/*
21351ed3fed7SMarius Strobl 			 * Internal MII needs buffer enable.
21361ed3fed7SMarius Strobl 			 * XXX buffer enable makes only sense for an
21371ed3fed7SMarius Strobl 			 * external PHY.
21381ed3fed7SMarius Strobl 			 */
213942c1b001SThomas Moestl 			v |= GEM_MAC_XIF_MII_BUF_ENA;
214042c1b001SThomas Moestl 	}
21411ed3fed7SMarius Strobl 	if (gigabit != 0)
21421ed3fed7SMarius Strobl 		v |= GEM_MAC_XIF_GMII_MODE;
21431ed3fed7SMarius Strobl 	if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0)
21441ed3fed7SMarius Strobl 		v |= GEM_MAC_XIF_FDPLX_LED;
2145e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_MAC_XIF_CONFIG, v);
21461ed3fed7SMarius Strobl 
21471ed3fed7SMarius Strobl 	if ((sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
21481ed3fed7SMarius Strobl 	    (sc->sc_flags & GEM_LINK) != 0) {
21491ed3fed7SMarius Strobl 		bus_write_4(sc->sc_res[0], GEM_MAC_TX_CONFIG,
21501ed3fed7SMarius Strobl 		    txcfg | GEM_MAC_TX_ENABLE);
21511ed3fed7SMarius Strobl 		bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG,
21521ed3fed7SMarius Strobl 		    rxcfg | GEM_MAC_RX_ENABLE);
21531ed3fed7SMarius Strobl 	}
215442c1b001SThomas Moestl }
215542c1b001SThomas Moestl 
215642c1b001SThomas Moestl int
215742c1b001SThomas Moestl gem_mediachange(ifp)
215842c1b001SThomas Moestl 	struct ifnet *ifp;
215942c1b001SThomas Moestl {
216042c1b001SThomas Moestl 	struct gem_softc *sc = ifp->if_softc;
21611f317bf9SMarius Strobl 	int error;
216242c1b001SThomas Moestl 
216342c1b001SThomas Moestl 	/* XXX Add support for serial media. */
216442c1b001SThomas Moestl 
21651f317bf9SMarius Strobl 	GEM_LOCK(sc);
21661f317bf9SMarius Strobl 	error = mii_mediachg(sc->sc_mii);
21671f317bf9SMarius Strobl 	GEM_UNLOCK(sc);
21681f317bf9SMarius Strobl 	return (error);
216942c1b001SThomas Moestl }
217042c1b001SThomas Moestl 
217142c1b001SThomas Moestl void
217242c1b001SThomas Moestl gem_mediastatus(ifp, ifmr)
217342c1b001SThomas Moestl 	struct ifnet *ifp;
217442c1b001SThomas Moestl 	struct ifmediareq *ifmr;
217542c1b001SThomas Moestl {
217642c1b001SThomas Moestl 	struct gem_softc *sc = ifp->if_softc;
217742c1b001SThomas Moestl 
21788cfaff7dSMarius Strobl 	GEM_LOCK(sc);
21798cfaff7dSMarius Strobl 	if ((ifp->if_flags & IFF_UP) == 0) {
21808cfaff7dSMarius Strobl 		GEM_UNLOCK(sc);
218142c1b001SThomas Moestl 		return;
21828cfaff7dSMarius Strobl 	}
218342c1b001SThomas Moestl 
218442c1b001SThomas Moestl 	mii_pollstat(sc->sc_mii);
218542c1b001SThomas Moestl 	ifmr->ifm_active = sc->sc_mii->mii_media_active;
218642c1b001SThomas Moestl 	ifmr->ifm_status = sc->sc_mii->mii_media_status;
21878cfaff7dSMarius Strobl 	GEM_UNLOCK(sc);
218842c1b001SThomas Moestl }
218942c1b001SThomas Moestl 
219042c1b001SThomas Moestl /*
219142c1b001SThomas Moestl  * Process an ioctl request.
219242c1b001SThomas Moestl  */
219342c1b001SThomas Moestl static int
219442c1b001SThomas Moestl gem_ioctl(ifp, cmd, data)
219542c1b001SThomas Moestl 	struct ifnet *ifp;
219642c1b001SThomas Moestl 	u_long cmd;
219742c1b001SThomas Moestl 	caddr_t data;
219842c1b001SThomas Moestl {
219942c1b001SThomas Moestl 	struct gem_softc *sc = ifp->if_softc;
220042c1b001SThomas Moestl 	struct ifreq *ifr = (struct ifreq *)data;
22018cfaff7dSMarius Strobl 	int error = 0;
22028cfaff7dSMarius Strobl 
220342c1b001SThomas Moestl 	switch (cmd) {
220442c1b001SThomas Moestl 	case SIOCSIFFLAGS:
22051f317bf9SMarius Strobl 		GEM_LOCK(sc);
220642c1b001SThomas Moestl 		if (ifp->if_flags & IFF_UP) {
22071ed3fed7SMarius Strobl 			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
22081ed3fed7SMarius Strobl 			    ((ifp->if_flags ^ sc->sc_ifflags) &
22091ed3fed7SMarius Strobl 			    (IFF_ALLMULTI | IFF_PROMISC)) != 0)
221042c1b001SThomas Moestl 				gem_setladrf(sc);
221142c1b001SThomas Moestl 			else
22128cfaff7dSMarius Strobl 				gem_init_locked(sc);
221342c1b001SThomas Moestl 		} else {
221413f4c340SRobert Watson 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
221542c1b001SThomas Moestl 				gem_stop(ifp, 0);
221642c1b001SThomas Moestl 		}
221712fb0330SPyun YongHyeon 		if ((ifp->if_flags & IFF_LINK0) != 0)
221812fb0330SPyun YongHyeon 			sc->sc_csum_features |= CSUM_UDP;
221912fb0330SPyun YongHyeon 		else
222012fb0330SPyun YongHyeon 			sc->sc_csum_features &= ~CSUM_UDP;
222112fb0330SPyun YongHyeon 		if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
222212fb0330SPyun YongHyeon 			ifp->if_hwassist = sc->sc_csum_features;
2223336cca9eSBenno Rice 		sc->sc_ifflags = ifp->if_flags;
22241f317bf9SMarius Strobl 		GEM_UNLOCK(sc);
222542c1b001SThomas Moestl 		break;
222642c1b001SThomas Moestl 	case SIOCADDMULTI:
222742c1b001SThomas Moestl 	case SIOCDELMULTI:
22281f317bf9SMarius Strobl 		GEM_LOCK(sc);
222942c1b001SThomas Moestl 		gem_setladrf(sc);
22301f317bf9SMarius Strobl 		GEM_UNLOCK(sc);
223142c1b001SThomas Moestl 		break;
223242c1b001SThomas Moestl 	case SIOCGIFMEDIA:
223342c1b001SThomas Moestl 	case SIOCSIFMEDIA:
223442c1b001SThomas Moestl 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd);
223542c1b001SThomas Moestl 		break;
223612fb0330SPyun YongHyeon 	case SIOCSIFCAP:
223712fb0330SPyun YongHyeon 		GEM_LOCK(sc);
223812fb0330SPyun YongHyeon 		ifp->if_capenable = ifr->ifr_reqcap;
223912fb0330SPyun YongHyeon 		if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
224012fb0330SPyun YongHyeon 			ifp->if_hwassist = sc->sc_csum_features;
224112fb0330SPyun YongHyeon 		else
224212fb0330SPyun YongHyeon 			ifp->if_hwassist = 0;
224312fb0330SPyun YongHyeon 		GEM_UNLOCK(sc);
224412fb0330SPyun YongHyeon 		break;
224542c1b001SThomas Moestl 	default:
22461f317bf9SMarius Strobl 		error = ether_ioctl(ifp, cmd, data);
224742c1b001SThomas Moestl 		break;
224842c1b001SThomas Moestl 	}
224942c1b001SThomas Moestl 
225042c1b001SThomas Moestl 	return (error);
225142c1b001SThomas Moestl }
225242c1b001SThomas Moestl 
225342c1b001SThomas Moestl /*
225442c1b001SThomas Moestl  * Set up the logical address filter.
225542c1b001SThomas Moestl  */
225642c1b001SThomas Moestl static void
225742c1b001SThomas Moestl gem_setladrf(sc)
225842c1b001SThomas Moestl 	struct gem_softc *sc;
225942c1b001SThomas Moestl {
2260fc74a9f9SBrooks Davis 	struct ifnet *ifp = sc->sc_ifp;
226142c1b001SThomas Moestl 	struct ifmultiaddr *inm;
226242c1b001SThomas Moestl 	u_int32_t crc;
226342c1b001SThomas Moestl 	u_int32_t hash[16];
226442c1b001SThomas Moestl 	u_int32_t v;
2265336cca9eSBenno Rice 	int i;
226642c1b001SThomas Moestl 
22678cfaff7dSMarius Strobl 	GEM_LOCK_ASSERT(sc, MA_OWNED);
22688cfaff7dSMarius Strobl 
226942c1b001SThomas Moestl 	/* Get current RX configuration */
2270e1bb13cdSPoul-Henning Kamp 	v = bus_read_4(sc->sc_res[0], GEM_MAC_RX_CONFIG);
227142c1b001SThomas Moestl 
2272336cca9eSBenno Rice 	/*
2273336cca9eSBenno Rice 	 * Turn off promiscuous mode, promiscuous group mode (all multicast),
2274336cca9eSBenno Rice 	 * and hash filter.  Depending on the case, the right bit will be
2275336cca9eSBenno Rice 	 * enabled.
2276336cca9eSBenno Rice 	 */
2277336cca9eSBenno Rice 	v &= ~(GEM_MAC_RX_PROMISCUOUS|GEM_MAC_RX_HASH_FILTER|
2278336cca9eSBenno Rice 	    GEM_MAC_RX_PROMISC_GRP);
2279336cca9eSBenno Rice 
22801ed3fed7SMarius Strobl 	bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, v);
22811ed3fed7SMarius Strobl 	bus_barrier(sc->sc_res[0], GEM_MAC_RX_CONFIG, 4,
22821ed3fed7SMarius Strobl 	    BUS_SPACE_BARRIER_WRITE);
22831ed3fed7SMarius Strobl 	if (!gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_HASH_FILTER, 0))
22841ed3fed7SMarius Strobl 		device_printf(sc->sc_dev, "cannot disable RX hash filter\n");
22851ed3fed7SMarius Strobl 
228642c1b001SThomas Moestl 	if ((ifp->if_flags & IFF_PROMISC) != 0) {
228742c1b001SThomas Moestl 		v |= GEM_MAC_RX_PROMISCUOUS;
228842c1b001SThomas Moestl 		goto chipit;
228942c1b001SThomas Moestl 	}
229042c1b001SThomas Moestl 	if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
2291336cca9eSBenno Rice 		v |= GEM_MAC_RX_PROMISC_GRP;
229242c1b001SThomas Moestl 		goto chipit;
229342c1b001SThomas Moestl 	}
229442c1b001SThomas Moestl 
229542c1b001SThomas Moestl 	/*
229642c1b001SThomas Moestl 	 * Set up multicast address filter by passing all multicast addresses
2297336cca9eSBenno Rice 	 * through a crc generator, and then using the high order 8 bits as an
2298336cca9eSBenno Rice 	 * index into the 256 bit logical address filter.  The high order 4
2299336cca9eSBenno Rice 	 * bits selects the word, while the other 4 bits select the bit within
2300336cca9eSBenno Rice 	 * the word (where bit 0 is the MSB).
230142c1b001SThomas Moestl 	 */
230242c1b001SThomas Moestl 
2303336cca9eSBenno Rice 	/* Clear hash table */
2304336cca9eSBenno Rice 	memset(hash, 0, sizeof(hash));
2305336cca9eSBenno Rice 
230613b203d0SRobert Watson 	IF_ADDR_LOCK(ifp);
2307fc74a9f9SBrooks Davis 	TAILQ_FOREACH(inm, &ifp->if_multiaddrs, ifma_link) {
230842c1b001SThomas Moestl 		if (inm->ifma_addr->sa_family != AF_LINK)
230942c1b001SThomas Moestl 			continue;
2310c240bd8cSMarius Strobl 		crc = ether_crc32_le(LLADDR((struct sockaddr_dl *)
2311c240bd8cSMarius Strobl 		    inm->ifma_addr), ETHER_ADDR_LEN);
231242c1b001SThomas Moestl 
231342c1b001SThomas Moestl 		/* Just want the 8 most significant bits. */
231442c1b001SThomas Moestl 		crc >>= 24;
231542c1b001SThomas Moestl 
231642c1b001SThomas Moestl 		/* Set the corresponding bit in the filter. */
2317336cca9eSBenno Rice 		hash[crc >> 4] |= 1 << (15 - (crc & 15));
2318336cca9eSBenno Rice 	}
231913b203d0SRobert Watson 	IF_ADDR_UNLOCK(ifp);
2320336cca9eSBenno Rice 
2321336cca9eSBenno Rice 	v |= GEM_MAC_RX_HASH_FILTER;
2322336cca9eSBenno Rice 
2323336cca9eSBenno Rice 	/* Now load the hash table into the chip (if we are using it) */
2324336cca9eSBenno Rice 	for (i = 0; i < 16; i++) {
2325e1bb13cdSPoul-Henning Kamp 		bus_write_4(sc->sc_res[0],
2326336cca9eSBenno Rice 		    GEM_MAC_HASH0 + i * (GEM_MAC_HASH1-GEM_MAC_HASH0),
2327336cca9eSBenno Rice 		    hash[i]);
232842c1b001SThomas Moestl 	}
232942c1b001SThomas Moestl 
233042c1b001SThomas Moestl chipit:
2331e1bb13cdSPoul-Henning Kamp 	bus_write_4(sc->sc_res[0], GEM_MAC_RX_CONFIG, v);
233242c1b001SThomas Moestl }
2333