1aad970f1SDavid E. O'Brien /*- 2718cf2ccSPedro F. Giffuni * SPDX-License-Identifier: BSD-2-Clause-NetBSD 3718cf2ccSPedro F. Giffuni * 442c1b001SThomas Moestl * Copyright (C) 2001 Eduardo Horvath. 5305f2c06SThomas Moestl * Copyright (c) 2001-2003 Thomas Moestl 62a79fd39SMarius Strobl * Copyright (c) 2007 Marius Strobl <marius@FreeBSD.org> 742c1b001SThomas Moestl * All rights reserved. 842c1b001SThomas Moestl * 942c1b001SThomas Moestl * Redistribution and use in source and binary forms, with or without 1042c1b001SThomas Moestl * modification, are permitted provided that the following conditions 1142c1b001SThomas Moestl * are met: 1242c1b001SThomas Moestl * 1. Redistributions of source code must retain the above copyright 1342c1b001SThomas Moestl * notice, this list of conditions and the following disclaimer. 1442c1b001SThomas Moestl * 2. Redistributions in binary form must reproduce the above copyright 1542c1b001SThomas Moestl * notice, this list of conditions and the following disclaimer in the 1642c1b001SThomas Moestl * documentation and/or other materials provided with the distribution. 1742c1b001SThomas Moestl * 1842c1b001SThomas Moestl * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND 1942c1b001SThomas Moestl * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 2042c1b001SThomas Moestl * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 2142c1b001SThomas Moestl * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE 2242c1b001SThomas Moestl * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 2342c1b001SThomas Moestl * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 2442c1b001SThomas Moestl * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 2542c1b001SThomas Moestl * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2642c1b001SThomas Moestl * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 2742c1b001SThomas Moestl * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 2842c1b001SThomas Moestl * SUCH DAMAGE. 2942c1b001SThomas Moestl * 30336cca9eSBenno Rice * from: NetBSD: gem.c,v 1.21 2002/06/01 23:50:58 lukem Exp 3142c1b001SThomas Moestl */ 3242c1b001SThomas Moestl 33aad970f1SDavid E. O'Brien #include <sys/cdefs.h> 34aad970f1SDavid E. O'Brien __FBSDID("$FreeBSD$"); 35aad970f1SDavid E. O'Brien 3642c1b001SThomas Moestl /* 371ed3fed7SMarius Strobl * Driver for Apple GMAC, Sun ERI and Sun GEM Ethernet controllers 3842c1b001SThomas Moestl */ 3942c1b001SThomas Moestl 4018100346SThomas Moestl #if 0 4142c1b001SThomas Moestl #define GEM_DEBUG 4218100346SThomas Moestl #endif 4342c1b001SThomas Moestl 44c3d5598aSMarius Strobl #if 0 /* XXX: In case of emergency, re-enable this. */ 45c3d5598aSMarius Strobl #define GEM_RINT_TIMEOUT 46c3d5598aSMarius Strobl #endif 47c3d5598aSMarius Strobl 4842c1b001SThomas Moestl #include <sys/param.h> 4942c1b001SThomas Moestl #include <sys/systm.h> 5042c1b001SThomas Moestl #include <sys/bus.h> 5142c1b001SThomas Moestl #include <sys/callout.h> 52a30d4b32SMike Barcroft #include <sys/endian.h> 5342c1b001SThomas Moestl #include <sys/mbuf.h> 5442c1b001SThomas Moestl #include <sys/malloc.h> 5542c1b001SThomas Moestl #include <sys/kernel.h> 568cfaff7dSMarius Strobl #include <sys/lock.h> 57186f2b9eSPoul-Henning Kamp #include <sys/module.h> 588cfaff7dSMarius Strobl #include <sys/mutex.h> 5942c1b001SThomas Moestl #include <sys/socket.h> 6042c1b001SThomas Moestl #include <sys/sockio.h> 61e1bb13cdSPoul-Henning Kamp #include <sys/rman.h> 6242c1b001SThomas Moestl 6308e0fdebSThomas Moestl #include <net/bpf.h> 6442c1b001SThomas Moestl #include <net/ethernet.h> 6542c1b001SThomas Moestl #include <net/if.h> 6676039bc8SGleb Smirnoff #include <net/if_var.h> 6742c1b001SThomas Moestl #include <net/if_arp.h> 6842c1b001SThomas Moestl #include <net/if_dl.h> 6942c1b001SThomas Moestl #include <net/if_media.h> 70fc74a9f9SBrooks Davis #include <net/if_types.h> 7100d12766SMarius Strobl #include <net/if_vlan_var.h> 7242c1b001SThomas Moestl 7312fb0330SPyun YongHyeon #include <netinet/in.h> 7412fb0330SPyun YongHyeon #include <netinet/in_systm.h> 7512fb0330SPyun YongHyeon #include <netinet/ip.h> 7612fb0330SPyun YongHyeon #include <netinet/tcp.h> 7712fb0330SPyun YongHyeon #include <netinet/udp.h> 7812fb0330SPyun YongHyeon 7942c1b001SThomas Moestl #include <machine/bus.h> 8042c1b001SThomas Moestl 8142c1b001SThomas Moestl #include <dev/mii/mii.h> 8242c1b001SThomas Moestl #include <dev/mii/miivar.h> 8342c1b001SThomas Moestl 84681f7d03SWarner Losh #include <dev/gem/if_gemreg.h> 85681f7d03SWarner Losh #include <dev/gem/if_gemvar.h> 8642c1b001SThomas Moestl 871ed3fed7SMarius Strobl CTASSERT(powerof2(GEM_NRXDESC) && GEM_NRXDESC >= 32 && GEM_NRXDESC <= 8192); 881ed3fed7SMarius Strobl CTASSERT(powerof2(GEM_NTXDESC) && GEM_NTXDESC >= 32 && GEM_NTXDESC <= 8192); 891ed3fed7SMarius Strobl 909ba2b298SMarius Strobl #define GEM_TRIES 10000 911ed3fed7SMarius Strobl 9212fb0330SPyun YongHyeon /* 9378d22f42SMarius Strobl * The hardware supports basic TCP/UDP checksum offloading. However, 9412fb0330SPyun YongHyeon * the hardware doesn't compensate the checksum for UDP datagram which 9512fb0330SPyun YongHyeon * can yield to 0x0. As a safe guard, UDP checksum offload is disabled 9612fb0330SPyun YongHyeon * by default. It can be reactivated by setting special link option 9712fb0330SPyun YongHyeon * link0 with ifconfig(8). 9812fb0330SPyun YongHyeon */ 9912fb0330SPyun YongHyeon #define GEM_CSUM_FEATURES (CSUM_TCP) 10042c1b001SThomas Moestl 1012a79fd39SMarius Strobl static int gem_add_rxbuf(struct gem_softc *sc, int idx); 102*8defc88cSMarius Strobl static int gem_bitwait(struct gem_softc *sc, bus_addr_t r, uint32_t clr, 103*8defc88cSMarius Strobl uint32_t set); 1042a79fd39SMarius Strobl static void gem_cddma_callback(void *xsc, bus_dma_segment_t *segs, 1052a79fd39SMarius Strobl int nsegs, int error); 1062a79fd39SMarius Strobl static int gem_disable_rx(struct gem_softc *sc); 1072a79fd39SMarius Strobl static int gem_disable_tx(struct gem_softc *sc); 1082a79fd39SMarius Strobl static void gem_eint(struct gem_softc *sc, u_int status); 1092a79fd39SMarius Strobl static void gem_init(void *xsc); 1102a79fd39SMarius Strobl static void gem_init_locked(struct gem_softc *sc); 1112a79fd39SMarius Strobl static void gem_init_regs(struct gem_softc *sc); 1129f012efbSJustin Hibbits static int gem_ioctl(if_t ifp, u_long cmd, caddr_t data); 1132a79fd39SMarius Strobl static int gem_load_txmbuf(struct gem_softc *sc, struct mbuf **m_head); 1142a79fd39SMarius Strobl static int gem_meminit(struct gem_softc *sc); 1152a79fd39SMarius Strobl static void gem_mifinit(struct gem_softc *sc); 1162a79fd39SMarius Strobl static void gem_reset(struct gem_softc *sc); 1172a79fd39SMarius Strobl static int gem_reset_rx(struct gem_softc *sc); 1181ed3fed7SMarius Strobl static void gem_reset_rxdma(struct gem_softc *sc); 1192a79fd39SMarius Strobl static int gem_reset_tx(struct gem_softc *sc); 1202a79fd39SMarius Strobl static u_int gem_ringsize(u_int sz); 1212a79fd39SMarius Strobl static void gem_rint(struct gem_softc *sc); 122c3d5598aSMarius Strobl #ifdef GEM_RINT_TIMEOUT 1232a79fd39SMarius Strobl static void gem_rint_timeout(void *arg); 12411e3f060SJake Burkholder #endif 1259ba2b298SMarius Strobl static inline void gem_rxcksum(struct mbuf *m, uint64_t flags); 1262a79fd39SMarius Strobl static void gem_rxdrain(struct gem_softc *sc); 1275ed0b954SMarius Strobl static void gem_setladrf(struct gem_softc *sc); 1289f012efbSJustin Hibbits static void gem_start(if_t ifp); 1299f012efbSJustin Hibbits static void gem_start_locked(if_t ifp); 1309f012efbSJustin Hibbits static void gem_stop(if_t ifp, int disable); 1312a79fd39SMarius Strobl static void gem_tick(void *arg); 1322a79fd39SMarius Strobl static void gem_tint(struct gem_softc *sc); 1339ba2b298SMarius Strobl static inline void gem_txkick(struct gem_softc *sc); 1342a79fd39SMarius Strobl static int gem_watchdog(struct gem_softc *sc); 13542c1b001SThomas Moestl 1363e38757dSJohn Baldwin DRIVER_MODULE(miibus, gem, miibus_driver, 0, 0); 13742c1b001SThomas Moestl MODULE_DEPEND(gem, miibus, 1, 1, 1); 13842c1b001SThomas Moestl 13942c1b001SThomas Moestl #ifdef GEM_DEBUG 14042c1b001SThomas Moestl #include <sys/ktr.h> 141651aa2d8SAttilio Rao #define KTR_GEM KTR_SPARE2 14242c1b001SThomas Moestl #endif 14342c1b001SThomas Moestl 14442c1b001SThomas Moestl int 1452a79fd39SMarius Strobl gem_attach(struct gem_softc *sc) 14642c1b001SThomas Moestl { 1472a79fd39SMarius Strobl struct gem_txsoft *txs; 1489f012efbSJustin Hibbits if_t ifp; 1498e5d93dbSMarius Strobl int error, i, phy; 1502a79fd39SMarius Strobl uint32_t v; 15142c1b001SThomas Moestl 1529ba2b298SMarius Strobl if (bootverbose) 1539ba2b298SMarius Strobl device_printf(sc->sc_dev, "flags=0x%x\n", sc->sc_flags); 1549ba2b298SMarius Strobl 1559ba2b298SMarius Strobl /* Set up ifnet structure. */ 156fc74a9f9SBrooks Davis ifp = sc->sc_ifp = if_alloc(IFT_ETHER); 157fc74a9f9SBrooks Davis if (ifp == NULL) 158fc74a9f9SBrooks Davis return (ENOSPC); 1599ba2b298SMarius Strobl sc->sc_csum_features = GEM_CSUM_FEATURES; 1609f012efbSJustin Hibbits if_setsoftc(ifp, sc); 1619ba2b298SMarius Strobl if_initname(ifp, device_get_name(sc->sc_dev), 1629ba2b298SMarius Strobl device_get_unit(sc->sc_dev)); 1639f012efbSJustin Hibbits if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); 1649f012efbSJustin Hibbits if_setstartfn(ifp, gem_start); 1659f012efbSJustin Hibbits if_setioctlfn(ifp, gem_ioctl); 1669f012efbSJustin Hibbits if_setinitfn(ifp, gem_init); 1679f012efbSJustin Hibbits if_setsendqlen(ifp, GEM_TXQUEUELEN); 1689f012efbSJustin Hibbits if_setsendqready(ifp); 169fc74a9f9SBrooks Davis 1701f317bf9SMarius Strobl callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0); 1711f317bf9SMarius Strobl #ifdef GEM_RINT_TIMEOUT 1721f317bf9SMarius Strobl callout_init_mtx(&sc->sc_rx_ch, &sc->sc_mtx, 0); 1731f317bf9SMarius Strobl #endif 1741f317bf9SMarius Strobl 17542c1b001SThomas Moestl /* Make sure the chip is stopped. */ 17642c1b001SThomas Moestl gem_reset(sc); 17742c1b001SThomas Moestl 178378f231eSJohn-Mark Gurney error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, 179378f231eSJohn-Mark Gurney BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 1802a79fd39SMarius Strobl BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, 1812a79fd39SMarius Strobl NULL, &sc->sc_pdmatag); 1829ba2b298SMarius Strobl if (error != 0) 183fc74a9f9SBrooks Davis goto fail_ifnet; 18442c1b001SThomas Moestl 18542c1b001SThomas Moestl error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0, 18612fb0330SPyun YongHyeon BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 18712fb0330SPyun YongHyeon 1, MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_rdmatag); 1889ba2b298SMarius Strobl if (error != 0) 189305f2c06SThomas Moestl goto fail_ptag; 190305f2c06SThomas Moestl 191305f2c06SThomas Moestl error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0, 19212fb0330SPyun YongHyeon BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 19312fb0330SPyun YongHyeon MCLBYTES * GEM_NTXSEGS, GEM_NTXSEGS, MCLBYTES, 194f6b1c44dSScott Long BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_tdmatag); 1959ba2b298SMarius Strobl if (error != 0) 196305f2c06SThomas Moestl goto fail_rtag; 19742c1b001SThomas Moestl 19842c1b001SThomas Moestl error = bus_dma_tag_create(sc->sc_pdmatag, PAGE_SIZE, 0, 19912fb0330SPyun YongHyeon BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 20042c1b001SThomas Moestl sizeof(struct gem_control_data), 1, 20112fb0330SPyun YongHyeon sizeof(struct gem_control_data), 0, 20212fb0330SPyun YongHyeon NULL, NULL, &sc->sc_cdmatag); 2039ba2b298SMarius Strobl if (error != 0) 204305f2c06SThomas Moestl goto fail_ttag; 20542c1b001SThomas Moestl 20642c1b001SThomas Moestl /* 2072a79fd39SMarius Strobl * Allocate the control data structures, create and load the 20842c1b001SThomas Moestl * DMA map for it. 20942c1b001SThomas Moestl */ 21042c1b001SThomas Moestl if ((error = bus_dmamem_alloc(sc->sc_cdmatag, 21112fb0330SPyun YongHyeon (void **)&sc->sc_control_data, 21212fb0330SPyun YongHyeon BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, 2139ba2b298SMarius Strobl &sc->sc_cddmamap)) != 0) { 2142a79fd39SMarius Strobl device_printf(sc->sc_dev, 2152a79fd39SMarius Strobl "unable to allocate control data, error = %d\n", error); 216305f2c06SThomas Moestl goto fail_ctag; 21742c1b001SThomas Moestl } 21842c1b001SThomas Moestl 21942c1b001SThomas Moestl sc->sc_cddma = 0; 22042c1b001SThomas Moestl if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cddmamap, 22142c1b001SThomas Moestl sc->sc_control_data, sizeof(struct gem_control_data), 22242c1b001SThomas Moestl gem_cddma_callback, sc, 0)) != 0 || sc->sc_cddma == 0) { 2232a79fd39SMarius Strobl device_printf(sc->sc_dev, 2242a79fd39SMarius Strobl "unable to load control data DMA map, error = %d\n", 2252a79fd39SMarius Strobl error); 226305f2c06SThomas Moestl goto fail_cmem; 22742c1b001SThomas Moestl } 22842c1b001SThomas Moestl 22942c1b001SThomas Moestl /* 23042c1b001SThomas Moestl * Initialize the transmit job descriptors. 23142c1b001SThomas Moestl */ 23242c1b001SThomas Moestl STAILQ_INIT(&sc->sc_txfreeq); 23342c1b001SThomas Moestl STAILQ_INIT(&sc->sc_txdirtyq); 23442c1b001SThomas Moestl 23542c1b001SThomas Moestl /* 23642c1b001SThomas Moestl * Create the transmit buffer DMA maps. 23742c1b001SThomas Moestl */ 23842c1b001SThomas Moestl error = ENOMEM; 23942c1b001SThomas Moestl for (i = 0; i < GEM_TXQUEUELEN; i++) { 24042c1b001SThomas Moestl txs = &sc->sc_txsoft[i]; 24142c1b001SThomas Moestl txs->txs_mbuf = NULL; 24242c1b001SThomas Moestl txs->txs_ndescs = 0; 243305f2c06SThomas Moestl if ((error = bus_dmamap_create(sc->sc_tdmatag, 0, 24442c1b001SThomas Moestl &txs->txs_dmamap)) != 0) { 2452a79fd39SMarius Strobl device_printf(sc->sc_dev, 2462a79fd39SMarius Strobl "unable to create TX DMA map %d, error = %d\n", 2472a79fd39SMarius Strobl i, error); 248305f2c06SThomas Moestl goto fail_txd; 24942c1b001SThomas Moestl } 25042c1b001SThomas Moestl STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 25142c1b001SThomas Moestl } 25242c1b001SThomas Moestl 25342c1b001SThomas Moestl /* 25442c1b001SThomas Moestl * Create the receive buffer DMA maps. 25542c1b001SThomas Moestl */ 25642c1b001SThomas Moestl for (i = 0; i < GEM_NRXDESC; i++) { 257305f2c06SThomas Moestl if ((error = bus_dmamap_create(sc->sc_rdmatag, 0, 25842c1b001SThomas Moestl &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 2592a79fd39SMarius Strobl device_printf(sc->sc_dev, 2602a79fd39SMarius Strobl "unable to create RX DMA map %d, error = %d\n", 2612a79fd39SMarius Strobl i, error); 262305f2c06SThomas Moestl goto fail_rxd; 26342c1b001SThomas Moestl } 26442c1b001SThomas Moestl sc->sc_rxsoft[i].rxs_mbuf = NULL; 26542c1b001SThomas Moestl } 26642c1b001SThomas Moestl 26765f2c0ffSMarius Strobl /* Bypass probing PHYs if we already know for sure to use a SERDES. */ 26865f2c0ffSMarius Strobl if ((sc->sc_flags & GEM_SERDES) != 0) 26965f2c0ffSMarius Strobl goto serdes; 27065f2c0ffSMarius Strobl 271*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MII_DATAPATH_MODE, GEM_MII_DATAPATH_MII); 272*8defc88cSMarius Strobl GEM_BARRIER(sc, GEM_MII_DATAPATH_MODE, 4, 27365f2c0ffSMarius Strobl BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 2741ed3fed7SMarius Strobl 27542c1b001SThomas Moestl gem_mifinit(sc); 27642c1b001SThomas Moestl 2771ed3fed7SMarius Strobl /* 2781ed3fed7SMarius Strobl * Look for an external PHY. 2791ed3fed7SMarius Strobl */ 2801ed3fed7SMarius Strobl error = ENXIO; 281*8defc88cSMarius Strobl v = GEM_READ_4(sc, GEM_MIF_CONFIG); 2821ed3fed7SMarius Strobl if ((v & GEM_MIF_CONFIG_MDI1) != 0) { 2831ed3fed7SMarius Strobl v |= GEM_MIF_CONFIG_PHY_SEL; 284*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MIF_CONFIG, v); 285*8defc88cSMarius Strobl GEM_BARRIER(sc, GEM_MIF_CONFIG, 4, 28665f2c0ffSMarius Strobl BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 2878e5d93dbSMarius Strobl error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp, 288*8defc88cSMarius Strobl gem_mediachange, gem_mediastatus, BMSR_DEFCAPMASK, 289*8defc88cSMarius Strobl MII_PHY_ANY, MII_OFFSET_ANY, MIIF_DOPAUSE); 2901ed3fed7SMarius Strobl } 2911ed3fed7SMarius Strobl 2921ed3fed7SMarius Strobl /* 2931ed3fed7SMarius Strobl * Fall back on an internal PHY if no external PHY was found. 2949e48f1e7SMarius Strobl * Note that with Apple (K2) GMACs GEM_MIF_CONFIG_MDI0 can't be 2959e48f1e7SMarius Strobl * trusted when the firmware has powered down the chip. 2961ed3fed7SMarius Strobl */ 2979e48f1e7SMarius Strobl if (error != 0 && 2989e48f1e7SMarius Strobl ((v & GEM_MIF_CONFIG_MDI0) != 0 || GEM_IS_APPLE(sc))) { 2991ed3fed7SMarius Strobl v &= ~GEM_MIF_CONFIG_PHY_SEL; 300*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MIF_CONFIG, v); 301*8defc88cSMarius Strobl GEM_BARRIER(sc, GEM_MIF_CONFIG, 4, 30265f2c0ffSMarius Strobl BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 3031ed3fed7SMarius Strobl switch (sc->sc_variant) { 3041ed3fed7SMarius Strobl case GEM_APPLE_K2_GMAC: 3058e5d93dbSMarius Strobl phy = GEM_PHYAD_INTERNAL; 3061ed3fed7SMarius Strobl break; 3071ed3fed7SMarius Strobl case GEM_APPLE_GMAC: 3088e5d93dbSMarius Strobl phy = GEM_PHYAD_EXTERNAL; 3091ed3fed7SMarius Strobl break; 3101ed3fed7SMarius Strobl default: 3118e5d93dbSMarius Strobl phy = MII_PHY_ANY; 3121ed3fed7SMarius Strobl break; 3131ed3fed7SMarius Strobl } 3148e5d93dbSMarius Strobl error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp, 3158e5d93dbSMarius Strobl gem_mediachange, gem_mediastatus, BMSR_DEFCAPMASK, phy, 3169a68cbd3SMarius Strobl MII_OFFSET_ANY, MIIF_DOPAUSE); 3171ed3fed7SMarius Strobl } 3181ed3fed7SMarius Strobl 3191ed3fed7SMarius Strobl /* 3201ed3fed7SMarius Strobl * Try the external PCS SERDES if we didn't find any PHYs. 3211ed3fed7SMarius Strobl */ 3221ed3fed7SMarius Strobl if (error != 0 && sc->sc_variant == GEM_SUN_GEM) { 32365f2c0ffSMarius Strobl serdes: 324*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MII_DATAPATH_MODE, 3251ed3fed7SMarius Strobl GEM_MII_DATAPATH_SERDES); 326*8defc88cSMarius Strobl GEM_BARRIER(sc, GEM_MII_DATAPATH_MODE, 4, 32765f2c0ffSMarius Strobl BUS_SPACE_BARRIER_WRITE); 328*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MII_SLINK_CONTROL, 3291ed3fed7SMarius Strobl GEM_MII_SLINK_LOOPBACK | GEM_MII_SLINK_EN_SYNC_D); 330*8defc88cSMarius Strobl GEM_BARRIER(sc, GEM_MII_SLINK_CONTROL, 4, 33165f2c0ffSMarius Strobl BUS_SPACE_BARRIER_WRITE); 332*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MII_CONFIG, GEM_MII_CONFIG_ENABLE); 333*8defc88cSMarius Strobl GEM_BARRIER(sc, GEM_MII_CONFIG, 4, 33465f2c0ffSMarius Strobl BUS_SPACE_BARRIER_WRITE); 3351ed3fed7SMarius Strobl sc->sc_flags |= GEM_SERDES; 3368e5d93dbSMarius Strobl error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp, 3378e5d93dbSMarius Strobl gem_mediachange, gem_mediastatus, BMSR_DEFCAPMASK, 3389a68cbd3SMarius Strobl GEM_PHYAD_EXTERNAL, MII_OFFSET_ANY, MIIF_DOPAUSE); 3391ed3fed7SMarius Strobl } 3401ed3fed7SMarius Strobl if (error != 0) { 3418e5d93dbSMarius Strobl device_printf(sc->sc_dev, "attaching PHYs failed\n"); 342305f2c06SThomas Moestl goto fail_rxd; 34342c1b001SThomas Moestl } 34442c1b001SThomas Moestl sc->sc_mii = device_get_softc(sc->sc_miibus); 34542c1b001SThomas Moestl 34642c1b001SThomas Moestl /* 34742c1b001SThomas Moestl * From this point forward, the attachment cannot fail. A failure 34842c1b001SThomas Moestl * before this point releases all resources that may have been 34942c1b001SThomas Moestl * allocated. 35042c1b001SThomas Moestl */ 35142c1b001SThomas Moestl 352801772ecSMarius Strobl /* Get RX FIFO size. */ 353336cca9eSBenno Rice sc->sc_rxfifosize = 64 * 354*8defc88cSMarius Strobl GEM_READ_4(sc, GEM_RX_FIFO_SIZE); 355336cca9eSBenno Rice 356801772ecSMarius Strobl /* Get TX FIFO size. */ 357*8defc88cSMarius Strobl v = GEM_READ_4(sc, GEM_TX_FIFO_SIZE); 3583a5aee5aSThomas Moestl device_printf(sc->sc_dev, "%ukB RX FIFO, %ukB TX FIFO\n", 3593a5aee5aSThomas Moestl sc->sc_rxfifosize / 1024, v / 16); 36042c1b001SThomas Moestl 36142c1b001SThomas Moestl /* Attach the interface. */ 362fc74a9f9SBrooks Davis ether_ifattach(ifp, sc->sc_enaddr); 36342c1b001SThomas Moestl 36400d12766SMarius Strobl /* 36512fb0330SPyun YongHyeon * Tell the upper layer(s) we support long frames/checksum offloads. 36600d12766SMarius Strobl */ 3679f012efbSJustin Hibbits if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); 3689f012efbSJustin Hibbits if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU | IFCAP_HWCSUM, 0); 3699f012efbSJustin Hibbits if_sethwassistbits(ifp, sc->sc_csum_features, 0); 3709f012efbSJustin Hibbits if_setcapenablebit(ifp, IFCAP_VLAN_MTU | IFCAP_HWCSUM, 0); 37100d12766SMarius Strobl 37242c1b001SThomas Moestl return (0); 37342c1b001SThomas Moestl 37442c1b001SThomas Moestl /* 37542c1b001SThomas Moestl * Free any resources we've allocated during the failed attach 37642c1b001SThomas Moestl * attempt. Do this in reverse order and fall through. 37742c1b001SThomas Moestl */ 378305f2c06SThomas Moestl fail_rxd: 3792a79fd39SMarius Strobl for (i = 0; i < GEM_NRXDESC; i++) 38042c1b001SThomas Moestl if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 381305f2c06SThomas Moestl bus_dmamap_destroy(sc->sc_rdmatag, 38242c1b001SThomas Moestl sc->sc_rxsoft[i].rxs_dmamap); 383305f2c06SThomas Moestl fail_txd: 3842a79fd39SMarius Strobl for (i = 0; i < GEM_TXQUEUELEN; i++) 38542c1b001SThomas Moestl if (sc->sc_txsoft[i].txs_dmamap != NULL) 386305f2c06SThomas Moestl bus_dmamap_destroy(sc->sc_tdmatag, 38742c1b001SThomas Moestl sc->sc_txsoft[i].txs_dmamap); 388305f2c06SThomas Moestl bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap); 389305f2c06SThomas Moestl fail_cmem: 39042c1b001SThomas Moestl bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data, 39142c1b001SThomas Moestl sc->sc_cddmamap); 392305f2c06SThomas Moestl fail_ctag: 39342c1b001SThomas Moestl bus_dma_tag_destroy(sc->sc_cdmatag); 394305f2c06SThomas Moestl fail_ttag: 395305f2c06SThomas Moestl bus_dma_tag_destroy(sc->sc_tdmatag); 396305f2c06SThomas Moestl fail_rtag: 397305f2c06SThomas Moestl bus_dma_tag_destroy(sc->sc_rdmatag); 398305f2c06SThomas Moestl fail_ptag: 39942c1b001SThomas Moestl bus_dma_tag_destroy(sc->sc_pdmatag); 400fc74a9f9SBrooks Davis fail_ifnet: 401fc74a9f9SBrooks Davis if_free(ifp); 40242c1b001SThomas Moestl return (error); 40342c1b001SThomas Moestl } 40442c1b001SThomas Moestl 405cbbdf236SThomas Moestl void 4062a79fd39SMarius Strobl gem_detach(struct gem_softc *sc) 407cbbdf236SThomas Moestl { 4089f012efbSJustin Hibbits if_t ifp = sc->sc_ifp; 409cbbdf236SThomas Moestl int i; 410cbbdf236SThomas Moestl 411b3a1f860SMarius Strobl ether_ifdetach(ifp); 4128cfaff7dSMarius Strobl GEM_LOCK(sc); 41325bd46d0SBrooks Davis gem_stop(ifp, 1); 4148cfaff7dSMarius Strobl GEM_UNLOCK(sc); 4151f317bf9SMarius Strobl callout_drain(&sc->sc_tick_ch); 4161f317bf9SMarius Strobl #ifdef GEM_RINT_TIMEOUT 4171f317bf9SMarius Strobl callout_drain(&sc->sc_rx_ch); 4181f317bf9SMarius Strobl #endif 419fc74a9f9SBrooks Davis if_free(ifp); 420cbbdf236SThomas Moestl device_delete_child(sc->sc_dev, sc->sc_miibus); 421cbbdf236SThomas Moestl 4222a79fd39SMarius Strobl for (i = 0; i < GEM_NRXDESC; i++) 423cbbdf236SThomas Moestl if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 424cbbdf236SThomas Moestl bus_dmamap_destroy(sc->sc_rdmatag, 425cbbdf236SThomas Moestl sc->sc_rxsoft[i].rxs_dmamap); 4262a79fd39SMarius Strobl for (i = 0; i < GEM_TXQUEUELEN; i++) 427cbbdf236SThomas Moestl if (sc->sc_txsoft[i].txs_dmamap != NULL) 428cbbdf236SThomas Moestl bus_dmamap_destroy(sc->sc_tdmatag, 429cbbdf236SThomas Moestl sc->sc_txsoft[i].txs_dmamap); 430ccb1212aSMarius Strobl GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 431cbbdf236SThomas Moestl bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap); 432cbbdf236SThomas Moestl bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data, 433cbbdf236SThomas Moestl sc->sc_cddmamap); 434cbbdf236SThomas Moestl bus_dma_tag_destroy(sc->sc_cdmatag); 435cbbdf236SThomas Moestl bus_dma_tag_destroy(sc->sc_tdmatag); 436cbbdf236SThomas Moestl bus_dma_tag_destroy(sc->sc_rdmatag); 437cbbdf236SThomas Moestl bus_dma_tag_destroy(sc->sc_pdmatag); 438cbbdf236SThomas Moestl } 439cbbdf236SThomas Moestl 440cbbdf236SThomas Moestl void 4412a79fd39SMarius Strobl gem_suspend(struct gem_softc *sc) 442cbbdf236SThomas Moestl { 4439f012efbSJustin Hibbits if_t ifp = sc->sc_ifp; 444cbbdf236SThomas Moestl 4458cfaff7dSMarius Strobl GEM_LOCK(sc); 446cbbdf236SThomas Moestl gem_stop(ifp, 0); 4478cfaff7dSMarius Strobl GEM_UNLOCK(sc); 448cbbdf236SThomas Moestl } 449cbbdf236SThomas Moestl 450cbbdf236SThomas Moestl void 4512a79fd39SMarius Strobl gem_resume(struct gem_softc *sc) 452cbbdf236SThomas Moestl { 4539f012efbSJustin Hibbits if_t ifp = sc->sc_ifp; 454cbbdf236SThomas Moestl 4558cfaff7dSMarius Strobl GEM_LOCK(sc); 45600d12766SMarius Strobl /* 45700d12766SMarius Strobl * On resume all registers have to be initialized again like 45800d12766SMarius Strobl * after power-on. 45900d12766SMarius Strobl */ 4601ed3fed7SMarius Strobl sc->sc_flags &= ~GEM_INITED; 4619f012efbSJustin Hibbits if (if_getflags(ifp) & IFF_UP) 4628cfaff7dSMarius Strobl gem_init_locked(sc); 4638cfaff7dSMarius Strobl GEM_UNLOCK(sc); 464cbbdf236SThomas Moestl } 465cbbdf236SThomas Moestl 4669ba2b298SMarius Strobl static inline void 46712fb0330SPyun YongHyeon gem_rxcksum(struct mbuf *m, uint64_t flags) 46812fb0330SPyun YongHyeon { 46912fb0330SPyun YongHyeon struct ether_header *eh; 47012fb0330SPyun YongHyeon struct ip *ip; 47112fb0330SPyun YongHyeon struct udphdr *uh; 4722a79fd39SMarius Strobl uint16_t *opts; 47312fb0330SPyun YongHyeon int32_t hlen, len, pktlen; 47412fb0330SPyun YongHyeon uint32_t temp32; 4752a79fd39SMarius Strobl uint16_t cksum; 47612fb0330SPyun YongHyeon 47712fb0330SPyun YongHyeon pktlen = m->m_pkthdr.len; 47812fb0330SPyun YongHyeon if (pktlen < sizeof(struct ether_header) + sizeof(struct ip)) 47912fb0330SPyun YongHyeon return; 48012fb0330SPyun YongHyeon eh = mtod(m, struct ether_header *); 48112fb0330SPyun YongHyeon if (eh->ether_type != htons(ETHERTYPE_IP)) 48212fb0330SPyun YongHyeon return; 48312fb0330SPyun YongHyeon ip = (struct ip *)(eh + 1); 48412fb0330SPyun YongHyeon if (ip->ip_v != IPVERSION) 48512fb0330SPyun YongHyeon return; 48612fb0330SPyun YongHyeon 48712fb0330SPyun YongHyeon hlen = ip->ip_hl << 2; 48812fb0330SPyun YongHyeon pktlen -= sizeof(struct ether_header); 48912fb0330SPyun YongHyeon if (hlen < sizeof(struct ip)) 49012fb0330SPyun YongHyeon return; 49112fb0330SPyun YongHyeon if (ntohs(ip->ip_len) < hlen) 49212fb0330SPyun YongHyeon return; 49312fb0330SPyun YongHyeon if (ntohs(ip->ip_len) != pktlen) 49412fb0330SPyun YongHyeon return; 49512fb0330SPyun YongHyeon if (ip->ip_off & htons(IP_MF | IP_OFFMASK)) 4962a79fd39SMarius Strobl return; /* Cannot handle fragmented packet. */ 49712fb0330SPyun YongHyeon 49812fb0330SPyun YongHyeon switch (ip->ip_p) { 49912fb0330SPyun YongHyeon case IPPROTO_TCP: 50012fb0330SPyun YongHyeon if (pktlen < (hlen + sizeof(struct tcphdr))) 50112fb0330SPyun YongHyeon return; 50212fb0330SPyun YongHyeon break; 50312fb0330SPyun YongHyeon case IPPROTO_UDP: 50412fb0330SPyun YongHyeon if (pktlen < (hlen + sizeof(struct udphdr))) 50512fb0330SPyun YongHyeon return; 50612fb0330SPyun YongHyeon uh = (struct udphdr *)((uint8_t *)ip + hlen); 50712fb0330SPyun YongHyeon if (uh->uh_sum == 0) 50812fb0330SPyun YongHyeon return; /* no checksum */ 50912fb0330SPyun YongHyeon break; 51012fb0330SPyun YongHyeon default: 51112fb0330SPyun YongHyeon return; 51212fb0330SPyun YongHyeon } 51312fb0330SPyun YongHyeon 51412fb0330SPyun YongHyeon cksum = ~(flags & GEM_RD_CHECKSUM); 51512fb0330SPyun YongHyeon /* checksum fixup for IP options */ 51612fb0330SPyun YongHyeon len = hlen - sizeof(struct ip); 51712fb0330SPyun YongHyeon if (len > 0) { 51812fb0330SPyun YongHyeon opts = (uint16_t *)(ip + 1); 51912fb0330SPyun YongHyeon for (; len > 0; len -= sizeof(uint16_t), opts++) { 52012fb0330SPyun YongHyeon temp32 = cksum - *opts; 52112fb0330SPyun YongHyeon temp32 = (temp32 >> 16) + (temp32 & 65535); 52212fb0330SPyun YongHyeon cksum = temp32 & 65535; 52312fb0330SPyun YongHyeon } 52412fb0330SPyun YongHyeon } 52512fb0330SPyun YongHyeon m->m_pkthdr.csum_flags |= CSUM_DATA_VALID; 52612fb0330SPyun YongHyeon m->m_pkthdr.csum_data = cksum; 52712fb0330SPyun YongHyeon } 52812fb0330SPyun YongHyeon 52942c1b001SThomas Moestl static void 5302a79fd39SMarius Strobl gem_cddma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error) 53142c1b001SThomas Moestl { 5322a79fd39SMarius Strobl struct gem_softc *sc = xsc; 53342c1b001SThomas Moestl 53442c1b001SThomas Moestl if (error != 0) 53542c1b001SThomas Moestl return; 5362a79fd39SMarius Strobl if (nsegs != 1) 5371ed3fed7SMarius Strobl panic("%s: bad control buffer segment count", __func__); 53842c1b001SThomas Moestl sc->sc_cddma = segs[0].ds_addr; 53942c1b001SThomas Moestl } 54042c1b001SThomas Moestl 54142c1b001SThomas Moestl static void 5422a79fd39SMarius Strobl gem_tick(void *arg) 54342c1b001SThomas Moestl { 54442c1b001SThomas Moestl struct gem_softc *sc = arg; 5459f012efbSJustin Hibbits if_t ifp = sc->sc_ifp; 54678d22f42SMarius Strobl uint32_t v; 54742c1b001SThomas Moestl 5481f317bf9SMarius Strobl GEM_LOCK_ASSERT(sc, MA_OWNED); 54912fb0330SPyun YongHyeon 55012fb0330SPyun YongHyeon /* 55178d22f42SMarius Strobl * Unload collision and error counters. 55212fb0330SPyun YongHyeon */ 5538da56a6fSGleb Smirnoff if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 554*8defc88cSMarius Strobl GEM_READ_4(sc, GEM_MAC_NORM_COLL_CNT) + 555*8defc88cSMarius Strobl GEM_READ_4(sc, GEM_MAC_FIRST_COLL_CNT)); 556*8defc88cSMarius Strobl v = GEM_READ_4(sc, GEM_MAC_EXCESS_COLL_CNT) + 557*8defc88cSMarius Strobl GEM_READ_4(sc, GEM_MAC_LATE_COLL_CNT); 5588da56a6fSGleb Smirnoff if_inc_counter(ifp, IFCOUNTER_COLLISIONS, v); 5598da56a6fSGleb Smirnoff if_inc_counter(ifp, IFCOUNTER_OERRORS, v); 5608da56a6fSGleb Smirnoff if_inc_counter(ifp, IFCOUNTER_IERRORS, 561*8defc88cSMarius Strobl GEM_READ_4(sc, GEM_MAC_RX_LEN_ERR_CNT) + 562*8defc88cSMarius Strobl GEM_READ_4(sc, GEM_MAC_RX_ALIGN_ERR) + 563*8defc88cSMarius Strobl GEM_READ_4(sc, GEM_MAC_RX_CRC_ERR_CNT) + 564*8defc88cSMarius Strobl GEM_READ_4(sc, GEM_MAC_RX_CODE_VIOL)); 56512fb0330SPyun YongHyeon 56612fb0330SPyun YongHyeon /* 567801772ecSMarius Strobl * Then clear the hardware counters. 56812fb0330SPyun YongHyeon */ 569*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MAC_NORM_COLL_CNT, 0); 570*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MAC_FIRST_COLL_CNT, 0); 571*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MAC_EXCESS_COLL_CNT, 0); 572*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MAC_LATE_COLL_CNT, 0); 573*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MAC_RX_LEN_ERR_CNT, 0); 574*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MAC_RX_ALIGN_ERR, 0); 575*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MAC_RX_CRC_ERR_CNT, 0); 576*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MAC_RX_CODE_VIOL, 0); 57712fb0330SPyun YongHyeon 57842c1b001SThomas Moestl mii_tick(sc->sc_mii); 57942c1b001SThomas Moestl 5808cb37876SMarius Strobl if (gem_watchdog(sc) == EJUSTRETURN) 5818cb37876SMarius Strobl return; 5828cb37876SMarius Strobl 58342c1b001SThomas Moestl callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc); 58442c1b001SThomas Moestl } 58542c1b001SThomas Moestl 58642c1b001SThomas Moestl static int 587*8defc88cSMarius Strobl gem_bitwait(struct gem_softc *sc, bus_addr_t r, uint32_t clr, uint32_t set) 58842c1b001SThomas Moestl { 58942c1b001SThomas Moestl int i; 5902a79fd39SMarius Strobl uint32_t reg; 59142c1b001SThomas Moestl 5929ba2b298SMarius Strobl for (i = GEM_TRIES; i--; DELAY(100)) { 593*8defc88cSMarius Strobl reg = GEM_READ_4(sc, r); 594e87137e1SMarius Strobl if ((reg & clr) == 0 && (reg & set) == set) 59542c1b001SThomas Moestl return (1); 59642c1b001SThomas Moestl } 59742c1b001SThomas Moestl return (0); 59842c1b001SThomas Moestl } 59942c1b001SThomas Moestl 6001ed3fed7SMarius Strobl static void 6019ba2b298SMarius Strobl gem_reset(struct gem_softc *sc) 60242c1b001SThomas Moestl { 60342c1b001SThomas Moestl 60418100346SThomas Moestl #ifdef GEM_DEBUG 60512fb0330SPyun YongHyeon CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__); 60618100346SThomas Moestl #endif 60742c1b001SThomas Moestl gem_reset_rx(sc); 60842c1b001SThomas Moestl gem_reset_tx(sc); 60942c1b001SThomas Moestl 6102a79fd39SMarius Strobl /* Do a full reset. */ 611*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX); 612*8defc88cSMarius Strobl GEM_BARRIER(sc, GEM_RESET, 4, 613ccb1212aSMarius Strobl BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 614*8defc88cSMarius Strobl if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0)) 61542c1b001SThomas Moestl device_printf(sc->sc_dev, "cannot reset device\n"); 61642c1b001SThomas Moestl } 61742c1b001SThomas Moestl 61842c1b001SThomas Moestl static void 6192a79fd39SMarius Strobl gem_rxdrain(struct gem_softc *sc) 62042c1b001SThomas Moestl { 62142c1b001SThomas Moestl struct gem_rxsoft *rxs; 62242c1b001SThomas Moestl int i; 62342c1b001SThomas Moestl 62442c1b001SThomas Moestl for (i = 0; i < GEM_NRXDESC; i++) { 62542c1b001SThomas Moestl rxs = &sc->sc_rxsoft[i]; 62642c1b001SThomas Moestl if (rxs->rxs_mbuf != NULL) { 627b2d59f42SThomas Moestl bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, 628b2d59f42SThomas Moestl BUS_DMASYNC_POSTREAD); 629305f2c06SThomas Moestl bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap); 63042c1b001SThomas Moestl m_freem(rxs->rxs_mbuf); 63142c1b001SThomas Moestl rxs->rxs_mbuf = NULL; 63242c1b001SThomas Moestl } 63342c1b001SThomas Moestl } 63442c1b001SThomas Moestl } 63542c1b001SThomas Moestl 63642c1b001SThomas Moestl static void 6379f012efbSJustin Hibbits gem_stop(if_t ifp, int disable) 63842c1b001SThomas Moestl { 6399f012efbSJustin Hibbits struct gem_softc *sc = if_getsoftc(ifp); 64042c1b001SThomas Moestl struct gem_txsoft *txs; 64142c1b001SThomas Moestl 64218100346SThomas Moestl #ifdef GEM_DEBUG 64312fb0330SPyun YongHyeon CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__); 64418100346SThomas Moestl #endif 64542c1b001SThomas Moestl 64642c1b001SThomas Moestl callout_stop(&sc->sc_tick_ch); 6471f317bf9SMarius Strobl #ifdef GEM_RINT_TIMEOUT 6481f317bf9SMarius Strobl callout_stop(&sc->sc_rx_ch); 6491f317bf9SMarius Strobl #endif 65042c1b001SThomas Moestl 6519ba2b298SMarius Strobl gem_reset_tx(sc); 6529ba2b298SMarius Strobl gem_reset_rx(sc); 65342c1b001SThomas Moestl 65442c1b001SThomas Moestl /* 65542c1b001SThomas Moestl * Release any queued transmit buffers. 65642c1b001SThomas Moestl */ 65742c1b001SThomas Moestl while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 65842c1b001SThomas Moestl STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 65942c1b001SThomas Moestl if (txs->txs_ndescs != 0) { 660b2d59f42SThomas Moestl bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 661b2d59f42SThomas Moestl BUS_DMASYNC_POSTWRITE); 662305f2c06SThomas Moestl bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 66342c1b001SThomas Moestl if (txs->txs_mbuf != NULL) { 66442c1b001SThomas Moestl m_freem(txs->txs_mbuf); 66542c1b001SThomas Moestl txs->txs_mbuf = NULL; 66642c1b001SThomas Moestl } 66742c1b001SThomas Moestl } 66842c1b001SThomas Moestl STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 66942c1b001SThomas Moestl } 67042c1b001SThomas Moestl 67142c1b001SThomas Moestl if (disable) 67242c1b001SThomas Moestl gem_rxdrain(sc); 67342c1b001SThomas Moestl 67442c1b001SThomas Moestl /* 67542c1b001SThomas Moestl * Mark the interface down and cancel the watchdog timer. 67642c1b001SThomas Moestl */ 6779f012efbSJustin Hibbits if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)); 6781ed3fed7SMarius Strobl sc->sc_flags &= ~GEM_LINK; 6798cb37876SMarius Strobl sc->sc_wdog_timer = 0; 68042c1b001SThomas Moestl } 68142c1b001SThomas Moestl 6821ed3fed7SMarius Strobl static int 6832a79fd39SMarius Strobl gem_reset_rx(struct gem_softc *sc) 68442c1b001SThomas Moestl { 68542c1b001SThomas Moestl 68642c1b001SThomas Moestl /* 68742c1b001SThomas Moestl * Resetting while DMA is in progress can cause a bus hang, so we 68842c1b001SThomas Moestl * disable DMA first. 68942c1b001SThomas Moestl */ 690c0e3e9d4SMarius Strobl (void)gem_disable_rx(sc); 691*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_RX_CONFIG, 0); 692*8defc88cSMarius Strobl GEM_BARRIER(sc, GEM_RX_CONFIG, 4, 693ccb1212aSMarius Strobl BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 694*8defc88cSMarius Strobl if (!gem_bitwait(sc, GEM_RX_CONFIG, GEM_RX_CONFIG_RXDMA_EN, 0)) 6951ed3fed7SMarius Strobl device_printf(sc->sc_dev, "cannot disable RX DMA\n"); 69642c1b001SThomas Moestl 6979a68cbd3SMarius Strobl /* Wait 5ms extra. */ 6989a68cbd3SMarius Strobl DELAY(5000); 6999a68cbd3SMarius Strobl 700c0e3e9d4SMarius Strobl /* Reset the ERX. */ 701*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_RESET, GEM_RESET_RX); 702*8defc88cSMarius Strobl GEM_BARRIER(sc, GEM_RESET, 4, 703ccb1212aSMarius Strobl BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 704*8defc88cSMarius Strobl if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_RX, 0)) { 70542c1b001SThomas Moestl device_printf(sc->sc_dev, "cannot reset receiver\n"); 70642c1b001SThomas Moestl return (1); 70742c1b001SThomas Moestl } 708c0e3e9d4SMarius Strobl 709c0e3e9d4SMarius Strobl /* Finally, reset RX MAC. */ 710*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MAC_RXRESET, 1); 711*8defc88cSMarius Strobl GEM_BARRIER(sc, GEM_MAC_RXRESET, 4, 712c0e3e9d4SMarius Strobl BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 713*8defc88cSMarius Strobl if (!gem_bitwait(sc, GEM_MAC_RXRESET, 1, 0)) { 714c0e3e9d4SMarius Strobl device_printf(sc->sc_dev, "cannot reset RX MAC\n"); 715c0e3e9d4SMarius Strobl return (1); 716c0e3e9d4SMarius Strobl } 717c0e3e9d4SMarius Strobl 71842c1b001SThomas Moestl return (0); 71942c1b001SThomas Moestl } 72042c1b001SThomas Moestl 7211ed3fed7SMarius Strobl /* 7221ed3fed7SMarius Strobl * Reset the receiver DMA engine. 7231ed3fed7SMarius Strobl * 7241ed3fed7SMarius Strobl * Intended to be used in case of GEM_INTR_RX_TAG_ERR, GEM_MAC_RX_OVERFLOW 7251ed3fed7SMarius Strobl * etc in order to reset the receiver DMA engine only and not do a full 7261ed3fed7SMarius Strobl * reset which amongst others also downs the link and clears the FIFOs. 7271ed3fed7SMarius Strobl */ 7281ed3fed7SMarius Strobl static void 7291ed3fed7SMarius Strobl gem_reset_rxdma(struct gem_softc *sc) 7301ed3fed7SMarius Strobl { 7311ed3fed7SMarius Strobl int i; 7321ed3fed7SMarius Strobl 73383242185SPyun YongHyeon if (gem_reset_rx(sc) != 0) { 7349f012efbSJustin Hibbits if_setdrvflagbits(sc->sc_ifp, 0, IFF_DRV_RUNNING); 7351ed3fed7SMarius Strobl return (gem_init_locked(sc)); 73683242185SPyun YongHyeon } 7371ed3fed7SMarius Strobl for (i = 0; i < GEM_NRXDESC; i++) 7381ed3fed7SMarius Strobl if (sc->sc_rxsoft[i].rxs_mbuf != NULL) 7391ed3fed7SMarius Strobl GEM_UPDATE_RXDESC(sc, i); 7401ed3fed7SMarius Strobl sc->sc_rxptr = 0; 7419ba2b298SMarius Strobl GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 7421ed3fed7SMarius Strobl 7431ed3fed7SMarius Strobl /* NOTE: we use only 32-bit DMA addresses here. */ 744*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_RX_RING_PTR_HI, 0); 745*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0)); 746*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_RX_KICK, GEM_NRXDESC - 4); 747*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_RX_CONFIG, 7481ed3fed7SMarius Strobl gem_ringsize(GEM_NRXDESC /* XXX */) | 7491ed3fed7SMarius Strobl ((ETHER_HDR_LEN + sizeof(struct ip)) << 7501ed3fed7SMarius Strobl GEM_RX_CONFIG_CXM_START_SHFT) | 7511ed3fed7SMarius Strobl (GEM_THRSH_1024 << GEM_RX_CONFIG_FIFO_THRS_SHIFT) | 7529ba2b298SMarius Strobl (ETHER_ALIGN << GEM_RX_CONFIG_FBOFF_SHFT)); 753*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_RX_BLANKING, 7549ba2b298SMarius Strobl ((6 * (sc->sc_flags & GEM_PCI66) != 0 ? 2 : 1) << 7559ba2b298SMarius Strobl GEM_RX_BLANKING_TIME_SHIFT) | 6); 756*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_RX_PAUSE_THRESH, 7572a79fd39SMarius Strobl (3 * sc->sc_rxfifosize / 256) | 7582a79fd39SMarius Strobl ((sc->sc_rxfifosize / 256) << 12)); 759*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_RX_CONFIG, 760*8defc88cSMarius Strobl GEM_READ_4(sc, GEM_RX_CONFIG) | GEM_RX_CONFIG_RXDMA_EN); 761*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MAC_RX_MASK, 7621ed3fed7SMarius Strobl GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT); 7635ed0b954SMarius Strobl /* 7645ed0b954SMarius Strobl * Clear the RX filter and reprogram it. This will also set the 7655ed0b954SMarius Strobl * current RX MAC configuration and enable it. 7665ed0b954SMarius Strobl */ 7675ed0b954SMarius Strobl gem_setladrf(sc); 7681ed3fed7SMarius Strobl } 76942c1b001SThomas Moestl 77042c1b001SThomas Moestl static int 7712a79fd39SMarius Strobl gem_reset_tx(struct gem_softc *sc) 77242c1b001SThomas Moestl { 77342c1b001SThomas Moestl 77442c1b001SThomas Moestl /* 77542c1b001SThomas Moestl * Resetting while DMA is in progress can cause a bus hang, so we 77642c1b001SThomas Moestl * disable DMA first. 77742c1b001SThomas Moestl */ 778c0e3e9d4SMarius Strobl (void)gem_disable_tx(sc); 779*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_TX_CONFIG, 0); 780*8defc88cSMarius Strobl GEM_BARRIER(sc, GEM_TX_CONFIG, 4, 781ccb1212aSMarius Strobl BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 782*8defc88cSMarius Strobl if (!gem_bitwait(sc, GEM_TX_CONFIG, GEM_TX_CONFIG_TXDMA_EN, 0)) 7831ed3fed7SMarius Strobl device_printf(sc->sc_dev, "cannot disable TX DMA\n"); 78442c1b001SThomas Moestl 7859a68cbd3SMarius Strobl /* Wait 5ms extra. */ 7869a68cbd3SMarius Strobl DELAY(5000); 7879a68cbd3SMarius Strobl 788801772ecSMarius Strobl /* Finally, reset the ETX. */ 789*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_RESET, GEM_RESET_TX); 790*8defc88cSMarius Strobl GEM_BARRIER(sc, GEM_RESET, 4, 791ccb1212aSMarius Strobl BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 792*8defc88cSMarius Strobl if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_TX, 0)) { 7931ed3fed7SMarius Strobl device_printf(sc->sc_dev, "cannot reset transmitter\n"); 79442c1b001SThomas Moestl return (1); 79542c1b001SThomas Moestl } 79642c1b001SThomas Moestl return (0); 79742c1b001SThomas Moestl } 79842c1b001SThomas Moestl 79942c1b001SThomas Moestl static int 8002a79fd39SMarius Strobl gem_disable_rx(struct gem_softc *sc) 80142c1b001SThomas Moestl { 80242c1b001SThomas Moestl 803*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MAC_RX_CONFIG, 804*8defc88cSMarius Strobl GEM_READ_4(sc, GEM_MAC_RX_CONFIG) & ~GEM_MAC_RX_ENABLE); 805*8defc88cSMarius Strobl GEM_BARRIER(sc, GEM_MAC_RX_CONFIG, 4, 806ccb1212aSMarius Strobl BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 807*8defc88cSMarius Strobl if (gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0)) 808c0e3e9d4SMarius Strobl return (1); 809c0e3e9d4SMarius Strobl device_printf(sc->sc_dev, "cannot disable RX MAC\n"); 810c0e3e9d4SMarius Strobl return (0); 81142c1b001SThomas Moestl } 81242c1b001SThomas Moestl 81342c1b001SThomas Moestl static int 8142a79fd39SMarius Strobl gem_disable_tx(struct gem_softc *sc) 81542c1b001SThomas Moestl { 81642c1b001SThomas Moestl 817*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MAC_TX_CONFIG, 818*8defc88cSMarius Strobl GEM_READ_4(sc, GEM_MAC_TX_CONFIG) & ~GEM_MAC_TX_ENABLE); 819*8defc88cSMarius Strobl GEM_BARRIER(sc, GEM_MAC_TX_CONFIG, 4, 820ccb1212aSMarius Strobl BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 821*8defc88cSMarius Strobl if (gem_bitwait(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0)) 822c0e3e9d4SMarius Strobl return (1); 823c0e3e9d4SMarius Strobl device_printf(sc->sc_dev, "cannot disable TX MAC\n"); 824c0e3e9d4SMarius Strobl return (0); 82542c1b001SThomas Moestl } 82642c1b001SThomas Moestl 82742c1b001SThomas Moestl static int 8289ba2b298SMarius Strobl gem_meminit(struct gem_softc *sc) 82942c1b001SThomas Moestl { 83042c1b001SThomas Moestl struct gem_rxsoft *rxs; 8312a79fd39SMarius Strobl int error, i; 83242c1b001SThomas Moestl 8339ba2b298SMarius Strobl GEM_LOCK_ASSERT(sc, MA_OWNED); 8349ba2b298SMarius Strobl 83542c1b001SThomas Moestl /* 83642c1b001SThomas Moestl * Initialize the transmit descriptor ring. 83742c1b001SThomas Moestl */ 83842c1b001SThomas Moestl for (i = 0; i < GEM_NTXDESC; i++) { 83942c1b001SThomas Moestl sc->sc_txdescs[i].gd_flags = 0; 84042c1b001SThomas Moestl sc->sc_txdescs[i].gd_addr = 0; 84142c1b001SThomas Moestl } 842305f2c06SThomas Moestl sc->sc_txfree = GEM_MAXTXFREE; 84342c1b001SThomas Moestl sc->sc_txnext = 0; 844336cca9eSBenno Rice sc->sc_txwin = 0; 84542c1b001SThomas Moestl 84642c1b001SThomas Moestl /* 84742c1b001SThomas Moestl * Initialize the receive descriptor and receive job 84842c1b001SThomas Moestl * descriptor rings. 84942c1b001SThomas Moestl */ 85042c1b001SThomas Moestl for (i = 0; i < GEM_NRXDESC; i++) { 85142c1b001SThomas Moestl rxs = &sc->sc_rxsoft[i]; 85242c1b001SThomas Moestl if (rxs->rxs_mbuf == NULL) { 85342c1b001SThomas Moestl if ((error = gem_add_rxbuf(sc, i)) != 0) { 8542a79fd39SMarius Strobl device_printf(sc->sc_dev, 8552a79fd39SMarius Strobl "unable to allocate or map RX buffer %d, " 8562a79fd39SMarius Strobl "error = %d\n", i, error); 85742c1b001SThomas Moestl /* 8582a79fd39SMarius Strobl * XXX we should attempt to run with fewer 8592a79fd39SMarius Strobl * receive buffers instead of just failing. 86042c1b001SThomas Moestl */ 86142c1b001SThomas Moestl gem_rxdrain(sc); 86242c1b001SThomas Moestl return (1); 86342c1b001SThomas Moestl } 86442c1b001SThomas Moestl } else 86542c1b001SThomas Moestl GEM_INIT_RXDESC(sc, i); 86642c1b001SThomas Moestl } 86742c1b001SThomas Moestl sc->sc_rxptr = 0; 8689ba2b298SMarius Strobl 8699ba2b298SMarius Strobl GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 87042c1b001SThomas Moestl 87142c1b001SThomas Moestl return (0); 87242c1b001SThomas Moestl } 87342c1b001SThomas Moestl 8741ed3fed7SMarius Strobl static u_int 8752a79fd39SMarius Strobl gem_ringsize(u_int sz) 87642c1b001SThomas Moestl { 87742c1b001SThomas Moestl 87842c1b001SThomas Moestl switch (sz) { 87942c1b001SThomas Moestl case 32: 8801ed3fed7SMarius Strobl return (GEM_RING_SZ_32); 88142c1b001SThomas Moestl case 64: 8821ed3fed7SMarius Strobl return (GEM_RING_SZ_64); 88342c1b001SThomas Moestl case 128: 8841ed3fed7SMarius Strobl return (GEM_RING_SZ_128); 88542c1b001SThomas Moestl case 256: 8861ed3fed7SMarius Strobl return (GEM_RING_SZ_256); 88742c1b001SThomas Moestl case 512: 8881ed3fed7SMarius Strobl return (GEM_RING_SZ_512); 88942c1b001SThomas Moestl case 1024: 8901ed3fed7SMarius Strobl return (GEM_RING_SZ_1024); 89142c1b001SThomas Moestl case 2048: 8921ed3fed7SMarius Strobl return (GEM_RING_SZ_2048); 89342c1b001SThomas Moestl case 4096: 8941ed3fed7SMarius Strobl return (GEM_RING_SZ_4096); 89542c1b001SThomas Moestl case 8192: 8961ed3fed7SMarius Strobl return (GEM_RING_SZ_8192); 89742c1b001SThomas Moestl default: 8981ed3fed7SMarius Strobl printf("%s: invalid ring size %d\n", __func__, sz); 8991ed3fed7SMarius Strobl return (GEM_RING_SZ_32); 90042c1b001SThomas Moestl } 90142c1b001SThomas Moestl } 90242c1b001SThomas Moestl 90342c1b001SThomas Moestl static void 9042a79fd39SMarius Strobl gem_init(void *xsc) 90542c1b001SThomas Moestl { 9062a79fd39SMarius Strobl struct gem_softc *sc = xsc; 9078cfaff7dSMarius Strobl 9088cfaff7dSMarius Strobl GEM_LOCK(sc); 9098cfaff7dSMarius Strobl gem_init_locked(sc); 9108cfaff7dSMarius Strobl GEM_UNLOCK(sc); 9118cfaff7dSMarius Strobl } 9128cfaff7dSMarius Strobl 9138cfaff7dSMarius Strobl /* 9148cfaff7dSMarius Strobl * Initialization of interface; set up initialization block 9158cfaff7dSMarius Strobl * and transmit/receive descriptor rings. 9168cfaff7dSMarius Strobl */ 9178cfaff7dSMarius Strobl static void 9182a79fd39SMarius Strobl gem_init_locked(struct gem_softc *sc) 9198cfaff7dSMarius Strobl { 9209f012efbSJustin Hibbits if_t ifp = sc->sc_ifp; 9212a79fd39SMarius Strobl uint32_t v; 92242c1b001SThomas Moestl 9238cfaff7dSMarius Strobl GEM_LOCK_ASSERT(sc, MA_OWNED); 92442c1b001SThomas Moestl 9259f012efbSJustin Hibbits if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) 92683242185SPyun YongHyeon return; 92783242185SPyun YongHyeon 92818100346SThomas Moestl #ifdef GEM_DEBUG 92912fb0330SPyun YongHyeon CTR2(KTR_GEM, "%s: %s: calling stop", device_get_name(sc->sc_dev), 93012fb0330SPyun YongHyeon __func__); 93118100346SThomas Moestl #endif 93242c1b001SThomas Moestl /* 93342c1b001SThomas Moestl * Initialization sequence. The numbered steps below correspond 93442c1b001SThomas Moestl * to the sequence outlined in section 6.3.5.1 in the Ethernet 93542c1b001SThomas Moestl * Channel Engine manual (part of the PCIO manual). 93642c1b001SThomas Moestl * See also the STP2002-STQ document from Sun Microsystems. 93742c1b001SThomas Moestl */ 93842c1b001SThomas Moestl 9392a79fd39SMarius Strobl /* step 1 & 2. Reset the Ethernet Channel. */ 940ccb1212aSMarius Strobl gem_stop(ifp, 0); 94142c1b001SThomas Moestl gem_reset(sc); 94218100346SThomas Moestl #ifdef GEM_DEBUG 94312fb0330SPyun YongHyeon CTR2(KTR_GEM, "%s: %s: restarting", device_get_name(sc->sc_dev), 94412fb0330SPyun YongHyeon __func__); 94518100346SThomas Moestl #endif 94642c1b001SThomas Moestl 94765f2c0ffSMarius Strobl if ((sc->sc_flags & GEM_SERDES) == 0) 9482a79fd39SMarius Strobl /* Re-initialize the MIF. */ 94942c1b001SThomas Moestl gem_mifinit(sc); 95042c1b001SThomas Moestl 9512a79fd39SMarius Strobl /* step 3. Setup data structures in host memory. */ 9521ed3fed7SMarius Strobl if (gem_meminit(sc) != 0) 9531ed3fed7SMarius Strobl return; 95442c1b001SThomas Moestl 95542c1b001SThomas Moestl /* step 4. TX MAC registers & counters */ 95642c1b001SThomas Moestl gem_init_regs(sc); 95742c1b001SThomas Moestl 95842c1b001SThomas Moestl /* step 5. RX MAC registers & counters */ 95942c1b001SThomas Moestl 9602a79fd39SMarius Strobl /* step 6 & 7. Program Descriptor Ring Base Addresses. */ 96142c1b001SThomas Moestl /* NOTE: we use only 32-bit DMA addresses here. */ 962*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_TX_RING_PTR_HI, 0); 963*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0)); 96442c1b001SThomas Moestl 965*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_RX_RING_PTR_HI, 0); 966*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0)); 96718100346SThomas Moestl #ifdef GEM_DEBUG 9682a79fd39SMarius Strobl CTR3(KTR_GEM, "loading RX ring %lx, TX ring %lx, cddma %lx", 96942c1b001SThomas Moestl GEM_CDRXADDR(sc, 0), GEM_CDTXADDR(sc, 0), sc->sc_cddma); 97018100346SThomas Moestl #endif 97142c1b001SThomas Moestl 97242c1b001SThomas Moestl /* step 8. Global Configuration & Interrupt Mask */ 9739ba2b298SMarius Strobl 9749ba2b298SMarius Strobl /* 9759ba2b298SMarius Strobl * Set the internal arbitration to "infinite" bursts of the 9769ba2b298SMarius Strobl * maximum length of 31 * 64 bytes so DMA transfers aren't 9779ba2b298SMarius Strobl * split up in cache line size chunks. This greatly improves 9789ba2b298SMarius Strobl * RX performance. 9799ba2b298SMarius Strobl * Enable silicon bug workarounds for the Apple variants. 9809ba2b298SMarius Strobl */ 981*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_CONFIG, 9829ba2b298SMarius Strobl GEM_CONFIG_TXDMA_LIMIT | GEM_CONFIG_RXDMA_LIMIT | 983*8defc88cSMarius Strobl GEM_CONFIG_BURST_INF | (GEM_IS_APPLE(sc) ? 9849ba2b298SMarius Strobl GEM_CONFIG_RONPAULBIT | GEM_CONFIG_BUG2FIX : 0)); 9859ba2b298SMarius Strobl 986*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_INTMASK, 9871ed3fed7SMarius Strobl ~(GEM_INTR_TX_INTME | GEM_INTR_TX_EMPTY | GEM_INTR_RX_DONE | 9881ed3fed7SMarius Strobl GEM_INTR_RX_NOBUF | GEM_INTR_RX_TAG_ERR | GEM_INTR_PERR | 9891ed3fed7SMarius Strobl GEM_INTR_BERR 9901ed3fed7SMarius Strobl #ifdef GEM_DEBUG 9911ed3fed7SMarius Strobl | GEM_INTR_PCS | GEM_INTR_MIF 9921ed3fed7SMarius Strobl #endif 9931ed3fed7SMarius Strobl )); 994*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MAC_RX_MASK, 995336cca9eSBenno Rice GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT); 996*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MAC_TX_MASK, 9979ba2b298SMarius Strobl GEM_MAC_TX_XMIT_DONE | GEM_MAC_TX_DEFER_EXP | 9989ba2b298SMarius Strobl GEM_MAC_TX_PEAK_EXP); 9991ed3fed7SMarius Strobl #ifdef GEM_DEBUG 1000*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MAC_CONTROL_MASK, 10011ed3fed7SMarius Strobl ~(GEM_MAC_PAUSED | GEM_MAC_PAUSE | GEM_MAC_RESUME)); 10021ed3fed7SMarius Strobl #else 1003*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MAC_CONTROL_MASK, 10041ed3fed7SMarius Strobl GEM_MAC_PAUSED | GEM_MAC_PAUSE | GEM_MAC_RESUME); 10051ed3fed7SMarius Strobl #endif 100642c1b001SThomas Moestl 10072a79fd39SMarius Strobl /* step 9. ETX Configuration: use mostly default values. */ 100842c1b001SThomas Moestl 10092a79fd39SMarius Strobl /* Enable DMA. */ 10109ba2b298SMarius Strobl v = gem_ringsize(GEM_NTXDESC); 10119ba2b298SMarius Strobl /* Set TX FIFO threshold and enable DMA. */ 1012*8defc88cSMarius Strobl v |= (0x4ff << 10) & GEM_TX_CONFIG_TXFIFO_TH; 1013*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_TX_CONFIG, v | GEM_TX_CONFIG_TXDMA_EN); 101442c1b001SThomas Moestl 101542c1b001SThomas Moestl /* step 10. ERX Configuration */ 101642c1b001SThomas Moestl 10171ed3fed7SMarius Strobl /* Encode Receive Descriptor ring size. */ 101842c1b001SThomas Moestl v = gem_ringsize(GEM_NRXDESC /* XXX */); 10192a79fd39SMarius Strobl /* RX TCP/UDP checksum offset */ 102012fb0330SPyun YongHyeon v |= ((ETHER_HDR_LEN + sizeof(struct ip)) << 102112fb0330SPyun YongHyeon GEM_RX_CONFIG_CXM_START_SHFT); 10229ba2b298SMarius Strobl /* Set RX FIFO threshold, set first byte offset and enable DMA. */ 1023*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_RX_CONFIG, 102442c1b001SThomas Moestl v | (GEM_THRSH_1024 << GEM_RX_CONFIG_FIFO_THRS_SHIFT) | 10259ba2b298SMarius Strobl (ETHER_ALIGN << GEM_RX_CONFIG_FBOFF_SHFT) | 10269ba2b298SMarius Strobl GEM_RX_CONFIG_RXDMA_EN); 10271ed3fed7SMarius Strobl 1028*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_RX_BLANKING, 10299ba2b298SMarius Strobl ((6 * (sc->sc_flags & GEM_PCI66) != 0 ? 2 : 1) << 10309ba2b298SMarius Strobl GEM_RX_BLANKING_TIME_SHIFT) | 6); 10311ed3fed7SMarius Strobl 103242c1b001SThomas Moestl /* 1033336cca9eSBenno Rice * The following value is for an OFF Threshold of about 3/4 full 1034336cca9eSBenno Rice * and an ON Threshold of 1/4 full. 103542c1b001SThomas Moestl */ 1036*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_RX_PAUSE_THRESH, 1037336cca9eSBenno Rice (3 * sc->sc_rxfifosize / 256) | 1038336cca9eSBenno Rice ((sc->sc_rxfifosize / 256) << 12)); 103942c1b001SThomas Moestl 10402a79fd39SMarius Strobl /* step 11. Configure Media. */ 104142c1b001SThomas Moestl 104242c1b001SThomas Moestl /* step 12. RX_MAC Configuration Register */ 1043*8defc88cSMarius Strobl v = GEM_READ_4(sc, GEM_MAC_RX_CONFIG); 10445ed0b954SMarius Strobl v &= ~GEM_MAC_RX_ENABLE; 10455ed0b954SMarius Strobl v |= GEM_MAC_RX_STRIP_CRC; 10465ed0b954SMarius Strobl sc->sc_mac_rxcfg = v; 10475ed0b954SMarius Strobl /* 10485ed0b954SMarius Strobl * Clear the RX filter and reprogram it. This will also set the 10495ed0b954SMarius Strobl * current RX MAC configuration and enable it. 10505ed0b954SMarius Strobl */ 10515ed0b954SMarius Strobl gem_setladrf(sc); 105242c1b001SThomas Moestl 1053ccb1212aSMarius Strobl /* step 13. TX_MAC Configuration Register */ 1054*8defc88cSMarius Strobl v = GEM_READ_4(sc, GEM_MAC_TX_CONFIG); 1055ccb1212aSMarius Strobl v |= GEM_MAC_TX_ENABLE; 1056c0e3e9d4SMarius Strobl (void)gem_disable_tx(sc); 1057*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MAC_TX_CONFIG, v); 1058ccb1212aSMarius Strobl 10592a79fd39SMarius Strobl /* step 14. Issue Transmit Pending command. */ 106042c1b001SThomas Moestl 1061af5ac863SMarius Strobl /* step 15. Give the receiver a swift kick. */ 1062*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_RX_KICK, GEM_NRXDESC - 4); 106342c1b001SThomas Moestl 10649f012efbSJustin Hibbits if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0); 10659f012efbSJustin Hibbits if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 10661ed3fed7SMarius Strobl 10671ed3fed7SMarius Strobl mii_mediachg(sc->sc_mii); 10681ed3fed7SMarius Strobl 10691ed3fed7SMarius Strobl /* Start the one second timer. */ 10701ed3fed7SMarius Strobl sc->sc_wdog_timer = 0; 10711ed3fed7SMarius Strobl callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc); 107242c1b001SThomas Moestl } 107342c1b001SThomas Moestl 107412fb0330SPyun YongHyeon static int 10752a79fd39SMarius Strobl gem_load_txmbuf(struct gem_softc *sc, struct mbuf **m_head) 107612fb0330SPyun YongHyeon { 107712fb0330SPyun YongHyeon bus_dma_segment_t txsegs[GEM_NTXSEGS]; 10782a79fd39SMarius Strobl struct gem_txsoft *txs; 1079ccb1212aSMarius Strobl struct ip *ip; 108012fb0330SPyun YongHyeon struct mbuf *m; 10812a79fd39SMarius Strobl uint64_t cflags, flags; 1082ccb1212aSMarius Strobl int error, nexttx, nsegs, offset, seg; 108342c1b001SThomas Moestl 10849ba2b298SMarius Strobl GEM_LOCK_ASSERT(sc, MA_OWNED); 10859ba2b298SMarius Strobl 108642c1b001SThomas Moestl /* Get a work queue entry. */ 108742c1b001SThomas Moestl if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) { 1088305f2c06SThomas Moestl /* Ran out of descriptors. */ 108912fb0330SPyun YongHyeon return (ENOBUFS); 1090305f2c06SThomas Moestl } 1091ccb1212aSMarius Strobl 1092ccb1212aSMarius Strobl cflags = 0; 1093ccb1212aSMarius Strobl if (((*m_head)->m_pkthdr.csum_flags & sc->sc_csum_features) != 0) { 1094ccb1212aSMarius Strobl if (M_WRITABLE(*m_head) == 0) { 1095c6499eccSGleb Smirnoff m = m_dup(*m_head, M_NOWAIT); 1096ccb1212aSMarius Strobl m_freem(*m_head); 1097ccb1212aSMarius Strobl *m_head = m; 1098ccb1212aSMarius Strobl if (m == NULL) 1099ccb1212aSMarius Strobl return (ENOBUFS); 1100ccb1212aSMarius Strobl } 1101ccb1212aSMarius Strobl offset = sizeof(struct ether_header); 1102ccb1212aSMarius Strobl m = m_pullup(*m_head, offset + sizeof(struct ip)); 1103ccb1212aSMarius Strobl if (m == NULL) { 1104ccb1212aSMarius Strobl *m_head = NULL; 1105ccb1212aSMarius Strobl return (ENOBUFS); 1106ccb1212aSMarius Strobl } 1107ccb1212aSMarius Strobl ip = (struct ip *)(mtod(m, caddr_t) + offset); 1108ccb1212aSMarius Strobl offset += (ip->ip_hl << 2); 1109ccb1212aSMarius Strobl cflags = offset << GEM_TD_CXSUM_STARTSHFT | 1110ccb1212aSMarius Strobl ((offset + m->m_pkthdr.csum_data) << 1111ccb1212aSMarius Strobl GEM_TD_CXSUM_STUFFSHFT) | GEM_TD_CXSUM_ENABLE; 1112ccb1212aSMarius Strobl *m_head = m; 1113ccb1212aSMarius Strobl } 1114ccb1212aSMarius Strobl 111512fb0330SPyun YongHyeon error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, txs->txs_dmamap, 111612fb0330SPyun YongHyeon *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); 111712fb0330SPyun YongHyeon if (error == EFBIG) { 1118c6499eccSGleb Smirnoff m = m_collapse(*m_head, M_NOWAIT, GEM_NTXSEGS); 111912fb0330SPyun YongHyeon if (m == NULL) { 112012fb0330SPyun YongHyeon m_freem(*m_head); 112112fb0330SPyun YongHyeon *m_head = NULL; 112212fb0330SPyun YongHyeon return (ENOBUFS); 112312fb0330SPyun YongHyeon } 112412fb0330SPyun YongHyeon *m_head = m; 11252a79fd39SMarius Strobl error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, 11262a79fd39SMarius Strobl txs->txs_dmamap, *m_head, txsegs, &nsegs, 11272a79fd39SMarius Strobl BUS_DMA_NOWAIT); 112812fb0330SPyun YongHyeon if (error != 0) { 112912fb0330SPyun YongHyeon m_freem(*m_head); 113012fb0330SPyun YongHyeon *m_head = NULL; 113112fb0330SPyun YongHyeon return (error); 113212fb0330SPyun YongHyeon } 113312fb0330SPyun YongHyeon } else if (error != 0) 113412fb0330SPyun YongHyeon return (error); 1135801772ecSMarius Strobl /* If nsegs is wrong then the stack is corrupt. */ 1136801772ecSMarius Strobl KASSERT(nsegs <= GEM_NTXSEGS, 1137801772ecSMarius Strobl ("%s: too many DMA segments (%d)", __func__, nsegs)); 113812fb0330SPyun YongHyeon if (nsegs == 0) { 113912fb0330SPyun YongHyeon m_freem(*m_head); 114012fb0330SPyun YongHyeon *m_head = NULL; 114112fb0330SPyun YongHyeon return (EIO); 114212fb0330SPyun YongHyeon } 114312fb0330SPyun YongHyeon 114412fb0330SPyun YongHyeon /* 114512fb0330SPyun YongHyeon * Ensure we have enough descriptors free to describe 114612fb0330SPyun YongHyeon * the packet. Note, we always reserve one descriptor 11472a79fd39SMarius Strobl * at the end of the ring as a termination point, in 11482a79fd39SMarius Strobl * order to prevent wrap-around. 114912fb0330SPyun YongHyeon */ 115012fb0330SPyun YongHyeon if (nsegs > sc->sc_txfree - 1) { 115112fb0330SPyun YongHyeon txs->txs_ndescs = 0; 115212fb0330SPyun YongHyeon bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 115312fb0330SPyun YongHyeon return (ENOBUFS); 115412fb0330SPyun YongHyeon } 115512fb0330SPyun YongHyeon 115612fb0330SPyun YongHyeon txs->txs_ndescs = nsegs; 1157305f2c06SThomas Moestl txs->txs_firstdesc = sc->sc_txnext; 115812fb0330SPyun YongHyeon nexttx = txs->txs_firstdesc; 115912fb0330SPyun YongHyeon for (seg = 0; seg < nsegs; seg++, nexttx = GEM_NEXTTX(nexttx)) { 116012fb0330SPyun YongHyeon #ifdef GEM_DEBUG 11612a79fd39SMarius Strobl CTR6(KTR_GEM, 11622a79fd39SMarius Strobl "%s: mapping seg %d (txd %d), len %lx, addr %#lx (%#lx)", 11632a79fd39SMarius Strobl __func__, seg, nexttx, txsegs[seg].ds_len, 1164*8defc88cSMarius Strobl txsegs[seg].ds_addr, htole64(txsegs[seg].ds_addr)); 116512fb0330SPyun YongHyeon #endif 1166*8defc88cSMarius Strobl sc->sc_txdescs[nexttx].gd_addr = htole64(txsegs[seg].ds_addr); 116712fb0330SPyun YongHyeon KASSERT(txsegs[seg].ds_len < GEM_TD_BUFSIZE, 116812fb0330SPyun YongHyeon ("%s: segment size too large!", __func__)); 116912fb0330SPyun YongHyeon flags = txsegs[seg].ds_len & GEM_TD_BUFSIZE; 1170*8defc88cSMarius Strobl sc->sc_txdescs[nexttx].gd_flags = htole64(flags | cflags); 117112fb0330SPyun YongHyeon txs->txs_lastdesc = nexttx; 117242c1b001SThomas Moestl } 1173305f2c06SThomas Moestl 11742a79fd39SMarius Strobl /* Set EOP on the last descriptor. */ 117512fb0330SPyun YongHyeon #ifdef GEM_DEBUG 11762a79fd39SMarius Strobl CTR3(KTR_GEM, "%s: end of packet at segment %d, TX %d", 11772a79fd39SMarius Strobl __func__, seg, nexttx); 117812fb0330SPyun YongHyeon #endif 117912fb0330SPyun YongHyeon sc->sc_txdescs[txs->txs_lastdesc].gd_flags |= 1180*8defc88cSMarius Strobl htole64(GEM_TD_END_OF_PACKET); 118112fb0330SPyun YongHyeon 11822a79fd39SMarius Strobl /* Lastly set SOP on the first descriptor. */ 118312fb0330SPyun YongHyeon #ifdef GEM_DEBUG 11842a79fd39SMarius Strobl CTR3(KTR_GEM, "%s: start of packet at segment %d, TX %d", 11852a79fd39SMarius Strobl __func__, seg, nexttx); 118612fb0330SPyun YongHyeon #endif 118712fb0330SPyun YongHyeon if (++sc->sc_txwin > GEM_NTXSEGS * 2 / 3) { 118812fb0330SPyun YongHyeon sc->sc_txwin = 0; 118912fb0330SPyun YongHyeon sc->sc_txdescs[txs->txs_firstdesc].gd_flags |= 1190*8defc88cSMarius Strobl htole64(GEM_TD_INTERRUPT_ME | GEM_TD_START_OF_PACKET); 119112fb0330SPyun YongHyeon } else 119212fb0330SPyun YongHyeon sc->sc_txdescs[txs->txs_firstdesc].gd_flags |= 1193*8defc88cSMarius Strobl htole64(GEM_TD_START_OF_PACKET); 119412fb0330SPyun YongHyeon 119542c1b001SThomas Moestl /* Sync the DMA map. */ 11962a79fd39SMarius Strobl bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 11972a79fd39SMarius Strobl BUS_DMASYNC_PREWRITE); 1198305f2c06SThomas Moestl 119918100346SThomas Moestl #ifdef GEM_DEBUG 120012fb0330SPyun YongHyeon CTR4(KTR_GEM, "%s: setting firstdesc=%d, lastdesc=%d, ndescs=%d", 12012a79fd39SMarius Strobl __func__, txs->txs_firstdesc, txs->txs_lastdesc, 12022a79fd39SMarius Strobl txs->txs_ndescs); 120318100346SThomas Moestl #endif 120442c1b001SThomas Moestl STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q); 1205305f2c06SThomas Moestl STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q); 120612fb0330SPyun YongHyeon txs->txs_mbuf = *m_head; 1207305f2c06SThomas Moestl 1208305f2c06SThomas Moestl sc->sc_txnext = GEM_NEXTTX(txs->txs_lastdesc); 1209305f2c06SThomas Moestl sc->sc_txfree -= txs->txs_ndescs; 121042c1b001SThomas Moestl 121112fb0330SPyun YongHyeon return (0); 121242c1b001SThomas Moestl } 121342c1b001SThomas Moestl 121442c1b001SThomas Moestl static void 12152a79fd39SMarius Strobl gem_init_regs(struct gem_softc *sc) 121642c1b001SThomas Moestl { 12179f012efbSJustin Hibbits const u_char *laddr = if_getlladdr(sc->sc_ifp); 121842c1b001SThomas Moestl 12199ba2b298SMarius Strobl GEM_LOCK_ASSERT(sc, MA_OWNED); 12209ba2b298SMarius Strobl 12212a79fd39SMarius Strobl /* These registers are not cleared on reset. */ 12221ed3fed7SMarius Strobl if ((sc->sc_flags & GEM_INITED) == 0) { 12232a79fd39SMarius Strobl /* magic values */ 1224*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MAC_IPG0, 0); 1225*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MAC_IPG1, 8); 1226*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MAC_IPG2, 4); 122742c1b001SThomas Moestl 12289ba2b298SMarius Strobl /* min frame length */ 1229*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN); 12309ba2b298SMarius Strobl /* max frame length and max burst size */ 1231*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MAC_MAC_MAX_FRAME, 12321ed3fed7SMarius Strobl (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) | (0x2000 << 16)); 1233336cca9eSBenno Rice 12349ba2b298SMarius Strobl /* more magic values */ 1235*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MAC_PREAMBLE_LEN, 0x7); 1236*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MAC_JAM_SIZE, 0x4); 1237*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MAC_ATTEMPT_LIMIT, 0x10); 1238*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MAC_CONTROL_TYPE, 0x8808); 12399ba2b298SMarius Strobl 12409ba2b298SMarius Strobl /* random number seed */ 1241*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MAC_RANDOM_SEED, 1242336cca9eSBenno Rice ((laddr[5] << 8) | laddr[4]) & 0x3ff); 1243336cca9eSBenno Rice 12442a79fd39SMarius Strobl /* secondary MAC address: 0:0:0:0:0:0 */ 1245*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MAC_ADDR3, 0); 1246*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MAC_ADDR4, 0); 1247*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MAC_ADDR5, 0); 1248336cca9eSBenno Rice 12492a79fd39SMarius Strobl /* MAC control address: 01:80:c2:00:00:01 */ 1250*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MAC_ADDR6, 0x0001); 1251*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MAC_ADDR7, 0xc200); 1252*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MAC_ADDR8, 0x0180); 125342c1b001SThomas Moestl 12542a79fd39SMarius Strobl /* MAC filter address: 0:0:0:0:0:0 */ 1255*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MAC_ADDR_FILTER0, 0); 1256*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MAC_ADDR_FILTER1, 0); 1257*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MAC_ADDR_FILTER2, 0); 1258*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MAC_ADR_FLT_MASK1_2, 0); 1259*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MAC_ADR_FLT_MASK0, 0); 126042c1b001SThomas Moestl 12611ed3fed7SMarius Strobl sc->sc_flags |= GEM_INITED; 126242c1b001SThomas Moestl } 126342c1b001SThomas Moestl 12642a79fd39SMarius Strobl /* Counters need to be zeroed. */ 1265*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MAC_NORM_COLL_CNT, 0); 1266*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MAC_FIRST_COLL_CNT, 0); 1267*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MAC_EXCESS_COLL_CNT, 0); 1268*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MAC_LATE_COLL_CNT, 0); 1269*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MAC_DEFER_TMR_CNT, 0); 1270*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MAC_PEAK_ATTEMPTS, 0); 1271*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MAC_RX_FRAME_COUNT, 0); 1272*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MAC_RX_LEN_ERR_CNT, 0); 1273*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MAC_RX_ALIGN_ERR, 0); 1274*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MAC_RX_CRC_ERR_CNT, 0); 1275*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MAC_RX_CODE_VIOL, 0); 127642c1b001SThomas Moestl 12771ed3fed7SMarius Strobl /* Set XOFF PAUSE time. */ 1278*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MAC_SEND_PAUSE_CMD, 0x1BF0); 12791ed3fed7SMarius Strobl 12802a79fd39SMarius Strobl /* Set the station address. */ 1281*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MAC_ADDR0, (laddr[4] << 8) | laddr[5]); 1282*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MAC_ADDR1, (laddr[2] << 8) | laddr[3]); 1283*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MAC_ADDR2, (laddr[0] << 8) | laddr[1]); 1284336cca9eSBenno Rice 12851ed3fed7SMarius Strobl /* Enable MII outputs. */ 1286*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MAC_XIF_CONFIG, GEM_MAC_XIF_TX_MII_ENA); 128742c1b001SThomas Moestl } 128842c1b001SThomas Moestl 128942c1b001SThomas Moestl static void 12909f012efbSJustin Hibbits gem_start(if_t ifp) 129142c1b001SThomas Moestl { 12929f012efbSJustin Hibbits struct gem_softc *sc = if_getsoftc(ifp); 12938cfaff7dSMarius Strobl 12948cfaff7dSMarius Strobl GEM_LOCK(sc); 12958cfaff7dSMarius Strobl gem_start_locked(ifp); 12968cfaff7dSMarius Strobl GEM_UNLOCK(sc); 12978cfaff7dSMarius Strobl } 12988cfaff7dSMarius Strobl 12999ba2b298SMarius Strobl static inline void 13009ba2b298SMarius Strobl gem_txkick(struct gem_softc *sc) 13019ba2b298SMarius Strobl { 13029ba2b298SMarius Strobl 13039ba2b298SMarius Strobl /* 13049ba2b298SMarius Strobl * Update the TX kick register. This register has to point to the 13059ba2b298SMarius Strobl * descriptor after the last valid one and for optimum performance 13069ba2b298SMarius Strobl * should be incremented in multiples of 4 (the DMA engine fetches/ 13079ba2b298SMarius Strobl * updates descriptors in batches of 4). 13089ba2b298SMarius Strobl */ 13099ba2b298SMarius Strobl #ifdef GEM_DEBUG 13109ba2b298SMarius Strobl CTR3(KTR_GEM, "%s: %s: kicking TX %d", 13119ba2b298SMarius Strobl device_get_name(sc->sc_dev), __func__, sc->sc_txnext); 13129ba2b298SMarius Strobl #endif 13139ba2b298SMarius Strobl GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1314*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_TX_KICK, sc->sc_txnext); 13159ba2b298SMarius Strobl } 13169ba2b298SMarius Strobl 13178cfaff7dSMarius Strobl static void 13189f012efbSJustin Hibbits gem_start_locked(if_t ifp) 13198cfaff7dSMarius Strobl { 13209f012efbSJustin Hibbits struct gem_softc *sc = if_getsoftc(ifp); 132112fb0330SPyun YongHyeon struct mbuf *m; 13229ba2b298SMarius Strobl int kicked, ntx; 13239ba2b298SMarius Strobl 13249ba2b298SMarius Strobl GEM_LOCK_ASSERT(sc, MA_OWNED); 132542c1b001SThomas Moestl 13269f012efbSJustin Hibbits if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 13271ed3fed7SMarius Strobl IFF_DRV_RUNNING || (sc->sc_flags & GEM_LINK) == 0) 132842c1b001SThomas Moestl return; 132942c1b001SThomas Moestl 133018100346SThomas Moestl #ifdef GEM_DEBUG 133112fb0330SPyun YongHyeon CTR4(KTR_GEM, "%s: %s: txfree %d, txnext %d", 13321ed3fed7SMarius Strobl device_get_name(sc->sc_dev), __func__, sc->sc_txfree, 13331ed3fed7SMarius Strobl sc->sc_txnext); 133418100346SThomas Moestl #endif 13352a79fd39SMarius Strobl ntx = 0; 13369ba2b298SMarius Strobl kicked = 0; 13379f012efbSJustin Hibbits for (; !if_sendq_empty(ifp) && sc->sc_txfree > 1;) { 13389f012efbSJustin Hibbits m = if_dequeue(ifp); 133912fb0330SPyun YongHyeon if (m == NULL) 134042c1b001SThomas Moestl break; 13411ed3fed7SMarius Strobl if (gem_load_txmbuf(sc, &m) != 0) { 134212fb0330SPyun YongHyeon if (m == NULL) 134312fb0330SPyun YongHyeon break; 13449f012efbSJustin Hibbits if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 13459f012efbSJustin Hibbits if_sendq_prepend(ifp, m); 134642c1b001SThomas Moestl break; 134742c1b001SThomas Moestl } 13489ba2b298SMarius Strobl if ((sc->sc_txnext % 4) == 0) { 13499ba2b298SMarius Strobl gem_txkick(sc); 13509ba2b298SMarius Strobl kicked = 1; 13519ba2b298SMarius Strobl } else 13529ba2b298SMarius Strobl kicked = 0; 135318100346SThomas Moestl ntx++; 135412fb0330SPyun YongHyeon BPF_MTAP(ifp, m); 1355305f2c06SThomas Moestl } 1356305f2c06SThomas Moestl 1357305f2c06SThomas Moestl if (ntx > 0) { 13589ba2b298SMarius Strobl if (kicked == 0) 13599ba2b298SMarius Strobl gem_txkick(sc); 136018100346SThomas Moestl #ifdef GEM_DEBUG 1361305f2c06SThomas Moestl CTR2(KTR_GEM, "%s: packets enqueued, OWN on %d", 13621ed3fed7SMarius Strobl device_get_name(sc->sc_dev), sc->sc_txnext); 136318100346SThomas Moestl #endif 1364305f2c06SThomas Moestl 136542c1b001SThomas Moestl /* Set a watchdog timer in case the chip flakes out. */ 13668cb37876SMarius Strobl sc->sc_wdog_timer = 5; 136718100346SThomas Moestl #ifdef GEM_DEBUG 136812fb0330SPyun YongHyeon CTR3(KTR_GEM, "%s: %s: watchdog %d", 13692a79fd39SMarius Strobl device_get_name(sc->sc_dev), __func__, 13702a79fd39SMarius Strobl sc->sc_wdog_timer); 137118100346SThomas Moestl #endif 137242c1b001SThomas Moestl } 137342c1b001SThomas Moestl } 137442c1b001SThomas Moestl 137542c1b001SThomas Moestl static void 13762a79fd39SMarius Strobl gem_tint(struct gem_softc *sc) 137742c1b001SThomas Moestl { 13789f012efbSJustin Hibbits if_t ifp = sc->sc_ifp; 137942c1b001SThomas Moestl struct gem_txsoft *txs; 13809ba2b298SMarius Strobl int progress; 13819ba2b298SMarius Strobl uint32_t txlast; 138218100346SThomas Moestl #ifdef GEM_DEBUG 13832a79fd39SMarius Strobl int i; 13842a79fd39SMarius Strobl 13859ba2b298SMarius Strobl GEM_LOCK_ASSERT(sc, MA_OWNED); 13869ba2b298SMarius Strobl 138712fb0330SPyun YongHyeon CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__); 138818100346SThomas Moestl #endif 138942c1b001SThomas Moestl 139042c1b001SThomas Moestl /* 13912a79fd39SMarius Strobl * Go through our TX list and free mbufs for those 139242c1b001SThomas Moestl * frames that have been transmitted. 139342c1b001SThomas Moestl */ 13942a79fd39SMarius Strobl progress = 0; 1395b2d59f42SThomas Moestl GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD); 139642c1b001SThomas Moestl while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 139742c1b001SThomas Moestl #ifdef GEM_DEBUG 13989f012efbSJustin Hibbits if ((if_getflags(ifp) & IFF_DEBUG) != 0) { 139942c1b001SThomas Moestl printf(" txsoft %p transmit chain:\n", txs); 140042c1b001SThomas Moestl for (i = txs->txs_firstdesc;; i = GEM_NEXTTX(i)) { 140142c1b001SThomas Moestl printf("descriptor %d: ", i); 14022a79fd39SMarius Strobl printf("gd_flags: 0x%016llx\t", 1403*8defc88cSMarius Strobl (long long)le64toh( 14042a79fd39SMarius Strobl sc->sc_txdescs[i].gd_flags)); 14052a79fd39SMarius Strobl printf("gd_addr: 0x%016llx\n", 1406*8defc88cSMarius Strobl (long long)le64toh( 14072a79fd39SMarius Strobl sc->sc_txdescs[i].gd_addr)); 140842c1b001SThomas Moestl if (i == txs->txs_lastdesc) 140942c1b001SThomas Moestl break; 141042c1b001SThomas Moestl } 141142c1b001SThomas Moestl } 141242c1b001SThomas Moestl #endif 141342c1b001SThomas Moestl 141442c1b001SThomas Moestl /* 14151ed3fed7SMarius Strobl * In theory, we could harvest some descriptors before 141642c1b001SThomas Moestl * the ring is empty, but that's a bit complicated. 141742c1b001SThomas Moestl * 141842c1b001SThomas Moestl * GEM_TX_COMPLETION points to the last descriptor 141942c1b001SThomas Moestl * processed + 1. 142042c1b001SThomas Moestl */ 1421*8defc88cSMarius Strobl txlast = GEM_READ_4(sc, GEM_TX_COMPLETION); 142218100346SThomas Moestl #ifdef GEM_DEBUG 142312fb0330SPyun YongHyeon CTR4(KTR_GEM, "%s: txs->txs_firstdesc = %d, " 142442c1b001SThomas Moestl "txs->txs_lastdesc = %d, txlast = %d", 142512fb0330SPyun YongHyeon __func__, txs->txs_firstdesc, txs->txs_lastdesc, txlast); 142618100346SThomas Moestl #endif 142742c1b001SThomas Moestl if (txs->txs_firstdesc <= txs->txs_lastdesc) { 142842c1b001SThomas Moestl if ((txlast >= txs->txs_firstdesc) && 142942c1b001SThomas Moestl (txlast <= txs->txs_lastdesc)) 143042c1b001SThomas Moestl break; 143142c1b001SThomas Moestl } else { 14322a79fd39SMarius Strobl /* Ick -- this command wraps. */ 143342c1b001SThomas Moestl if ((txlast >= txs->txs_firstdesc) || 143442c1b001SThomas Moestl (txlast <= txs->txs_lastdesc)) 143542c1b001SThomas Moestl break; 143642c1b001SThomas Moestl } 143742c1b001SThomas Moestl 143818100346SThomas Moestl #ifdef GEM_DEBUG 14392a79fd39SMarius Strobl CTR1(KTR_GEM, "%s: releasing a descriptor", __func__); 144018100346SThomas Moestl #endif 144142c1b001SThomas Moestl STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 144242c1b001SThomas Moestl 144342c1b001SThomas Moestl sc->sc_txfree += txs->txs_ndescs; 144442c1b001SThomas Moestl 1445305f2c06SThomas Moestl bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, 144642c1b001SThomas Moestl BUS_DMASYNC_POSTWRITE); 1447305f2c06SThomas Moestl bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); 144842c1b001SThomas Moestl if (txs->txs_mbuf != NULL) { 144942c1b001SThomas Moestl m_freem(txs->txs_mbuf); 145042c1b001SThomas Moestl txs->txs_mbuf = NULL; 145142c1b001SThomas Moestl } 145242c1b001SThomas Moestl 145342c1b001SThomas Moestl STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 145442c1b001SThomas Moestl 14558da56a6fSGleb Smirnoff if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 1456336cca9eSBenno Rice progress = 1; 145742c1b001SThomas Moestl } 145842c1b001SThomas Moestl 145918100346SThomas Moestl #ifdef GEM_DEBUG 14602a79fd39SMarius Strobl CTR4(KTR_GEM, "%s: GEM_TX_STATE_MACHINE %x GEM_TX_DATA_PTR %llx " 146142c1b001SThomas Moestl "GEM_TX_COMPLETION %x", 1462*8defc88cSMarius Strobl __func__, GEM_READ_4(sc, GEM_TX_STATE_MACHINE), 1463*8defc88cSMarius Strobl ((long long)GEM_READ_4(sc, GEM_TX_DATA_PTR_HI) << 32) | 1464*8defc88cSMarius Strobl GEM_READ_4(sc, GEM_TX_DATA_PTR_LO), 1465*8defc88cSMarius Strobl GEM_READ_4(sc, GEM_TX_COMPLETION)); 146618100346SThomas Moestl #endif 146742c1b001SThomas Moestl 1468336cca9eSBenno Rice if (progress) { 1469336cca9eSBenno Rice if (sc->sc_txfree == GEM_NTXDESC - 1) 1470336cca9eSBenno Rice sc->sc_txwin = 0; 147142c1b001SThomas Moestl 14722a79fd39SMarius Strobl /* 14732a79fd39SMarius Strobl * We freed some descriptors, so reset IFF_DRV_OACTIVE 14742a79fd39SMarius Strobl * and restart. 14752a79fd39SMarius Strobl */ 14769f012efbSJustin Hibbits if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 14779ba2b298SMarius Strobl if (STAILQ_EMPTY(&sc->sc_txdirtyq)) 14789ba2b298SMarius Strobl sc->sc_wdog_timer = 0; 147912fb0330SPyun YongHyeon gem_start_locked(ifp); 1480336cca9eSBenno Rice } 148142c1b001SThomas Moestl 148218100346SThomas Moestl #ifdef GEM_DEBUG 148312fb0330SPyun YongHyeon CTR3(KTR_GEM, "%s: %s: watchdog %d", 148412fb0330SPyun YongHyeon device_get_name(sc->sc_dev), __func__, sc->sc_wdog_timer); 148518100346SThomas Moestl #endif 148642c1b001SThomas Moestl } 148742c1b001SThomas Moestl 1488c3d5598aSMarius Strobl #ifdef GEM_RINT_TIMEOUT 14890d80b9bdSThomas Moestl static void 14902a79fd39SMarius Strobl gem_rint_timeout(void *arg) 14910d80b9bdSThomas Moestl { 14922a79fd39SMarius Strobl struct gem_softc *sc = arg; 14930d80b9bdSThomas Moestl 14941f317bf9SMarius Strobl GEM_LOCK_ASSERT(sc, MA_OWNED); 14959ba2b298SMarius Strobl 14968cfaff7dSMarius Strobl gem_rint(sc); 14970d80b9bdSThomas Moestl } 149811e3f060SJake Burkholder #endif 14990d80b9bdSThomas Moestl 150042c1b001SThomas Moestl static void 15012a79fd39SMarius Strobl gem_rint(struct gem_softc *sc) 150242c1b001SThomas Moestl { 15039f012efbSJustin Hibbits if_t ifp = sc->sc_ifp; 150442c1b001SThomas Moestl struct mbuf *m; 15052a79fd39SMarius Strobl uint64_t rxstat; 15062a79fd39SMarius Strobl uint32_t rxcomp; 150742c1b001SThomas Moestl 15089ba2b298SMarius Strobl GEM_LOCK_ASSERT(sc, MA_OWNED); 15099ba2b298SMarius Strobl 1510c3d5598aSMarius Strobl #ifdef GEM_RINT_TIMEOUT 15110d80b9bdSThomas Moestl callout_stop(&sc->sc_rx_ch); 1512c3d5598aSMarius Strobl #endif 151318100346SThomas Moestl #ifdef GEM_DEBUG 151412fb0330SPyun YongHyeon CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__); 151518100346SThomas Moestl #endif 1516336cca9eSBenno Rice 1517336cca9eSBenno Rice /* 1518336cca9eSBenno Rice * Read the completion register once. This limits 1519336cca9eSBenno Rice * how long the following loop can execute. 1520336cca9eSBenno Rice */ 1521*8defc88cSMarius Strobl rxcomp = GEM_READ_4(sc, GEM_RX_COMPLETION); 152218100346SThomas Moestl #ifdef GEM_DEBUG 15239ba2b298SMarius Strobl CTR3(KTR_GEM, "%s: sc->sc_rxptr %d, complete %d", 152412fb0330SPyun YongHyeon __func__, sc->sc_rxptr, rxcomp); 152518100346SThomas Moestl #endif 15269ba2b298SMarius Strobl GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 15271ed3fed7SMarius Strobl for (; sc->sc_rxptr != rxcomp;) { 15281ed3fed7SMarius Strobl m = sc->sc_rxsoft[sc->sc_rxptr].rxs_mbuf; 1529*8defc88cSMarius Strobl rxstat = le64toh(sc->sc_rxdescs[sc->sc_rxptr].gd_flags); 153042c1b001SThomas Moestl 153142c1b001SThomas Moestl if (rxstat & GEM_RD_OWN) { 1532c3d5598aSMarius Strobl #ifdef GEM_RINT_TIMEOUT 153342c1b001SThomas Moestl /* 15340d80b9bdSThomas Moestl * The descriptor is still marked as owned, although 15350d80b9bdSThomas Moestl * it is supposed to have completed. This has been 15360d80b9bdSThomas Moestl * observed on some machines. Just exiting here 15370d80b9bdSThomas Moestl * might leave the packet sitting around until another 15380d80b9bdSThomas Moestl * one arrives to trigger a new interrupt, which is 15390d80b9bdSThomas Moestl * generally undesirable, so set up a timeout. 154042c1b001SThomas Moestl */ 15410d80b9bdSThomas Moestl callout_reset(&sc->sc_rx_ch, GEM_RXOWN_TICKS, 15420d80b9bdSThomas Moestl gem_rint_timeout, sc); 1543336cca9eSBenno Rice #endif 15441ed3fed7SMarius Strobl m = NULL; 15451ed3fed7SMarius Strobl goto kickit; 154642c1b001SThomas Moestl } 154742c1b001SThomas Moestl 154842c1b001SThomas Moestl if (rxstat & GEM_RD_BAD_CRC) { 15498da56a6fSGleb Smirnoff if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 155042c1b001SThomas Moestl device_printf(sc->sc_dev, "receive error: CRC error\n"); 15511ed3fed7SMarius Strobl GEM_INIT_RXDESC(sc, sc->sc_rxptr); 15521ed3fed7SMarius Strobl m = NULL; 15531ed3fed7SMarius Strobl goto kickit; 155442c1b001SThomas Moestl } 155542c1b001SThomas Moestl 155642c1b001SThomas Moestl #ifdef GEM_DEBUG 15579f012efbSJustin Hibbits if ((if_getflags(ifp) & IFF_DEBUG) != 0) { 15581ed3fed7SMarius Strobl printf(" rxsoft %p descriptor %d: ", 15591ed3fed7SMarius Strobl &sc->sc_rxsoft[sc->sc_rxptr], sc->sc_rxptr); 15602a79fd39SMarius Strobl printf("gd_flags: 0x%016llx\t", 1561*8defc88cSMarius Strobl (long long)le64toh( 15622a79fd39SMarius Strobl sc->sc_rxdescs[sc->sc_rxptr].gd_flags)); 15632a79fd39SMarius Strobl printf("gd_addr: 0x%016llx\n", 1564*8defc88cSMarius Strobl (long long)le64toh( 15652a79fd39SMarius Strobl sc->sc_rxdescs[sc->sc_rxptr].gd_addr)); 156642c1b001SThomas Moestl } 156742c1b001SThomas Moestl #endif 156842c1b001SThomas Moestl 156942c1b001SThomas Moestl /* 157042c1b001SThomas Moestl * Allocate a new mbuf cluster. If that fails, we are 157142c1b001SThomas Moestl * out of memory, and must drop the packet and recycle 157242c1b001SThomas Moestl * the buffer that's already attached to this descriptor. 157342c1b001SThomas Moestl */ 15741ed3fed7SMarius Strobl if (gem_add_rxbuf(sc, sc->sc_rxptr) != 0) { 15758da56a6fSGleb Smirnoff if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 15761ed3fed7SMarius Strobl GEM_INIT_RXDESC(sc, sc->sc_rxptr); 15771ed3fed7SMarius Strobl m = NULL; 15781ed3fed7SMarius Strobl } 15791ed3fed7SMarius Strobl 15801ed3fed7SMarius Strobl kickit: 15811ed3fed7SMarius Strobl /* 15821ed3fed7SMarius Strobl * Update the RX kick register. This register has to point 15831ed3fed7SMarius Strobl * to the descriptor after the last valid one (before the 15849ba2b298SMarius Strobl * current batch) and for optimum performance should be 15859ba2b298SMarius Strobl * incremented in multiples of 4 (the DMA engine fetches/ 15869ba2b298SMarius Strobl * updates descriptors in batches of 4). 15871ed3fed7SMarius Strobl */ 15881ed3fed7SMarius Strobl sc->sc_rxptr = GEM_NEXTRX(sc->sc_rxptr); 15891ed3fed7SMarius Strobl if ((sc->sc_rxptr % 4) == 0) { 1590ccb1212aSMarius Strobl GEM_CDSYNC(sc, 1591ccb1212aSMarius Strobl BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1592*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_RX_KICK, 15931ed3fed7SMarius Strobl (sc->sc_rxptr + GEM_NRXDESC - 4) & 15941ed3fed7SMarius Strobl GEM_NRXDESC_MASK); 15951ed3fed7SMarius Strobl } 15961ed3fed7SMarius Strobl 15971ed3fed7SMarius Strobl if (m == NULL) { 15981ed3fed7SMarius Strobl if (rxstat & GEM_RD_OWN) 15991ed3fed7SMarius Strobl break; 160042c1b001SThomas Moestl continue; 160142c1b001SThomas Moestl } 160242c1b001SThomas Moestl 16038da56a6fSGleb Smirnoff if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 16049ba2b298SMarius Strobl m->m_data += ETHER_ALIGN; /* first byte offset */ 160542c1b001SThomas Moestl m->m_pkthdr.rcvif = ifp; 16061ed3fed7SMarius Strobl m->m_pkthdr.len = m->m_len = GEM_RD_BUFLEN(rxstat); 160712fb0330SPyun YongHyeon 16089f012efbSJustin Hibbits if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) 160912fb0330SPyun YongHyeon gem_rxcksum(m, rxstat); 161042c1b001SThomas Moestl 161142c1b001SThomas Moestl /* Pass it on. */ 16128cfaff7dSMarius Strobl GEM_UNLOCK(sc); 16139f012efbSJustin Hibbits if_input(ifp, m); 16148cfaff7dSMarius Strobl GEM_LOCK(sc); 161542c1b001SThomas Moestl } 161642c1b001SThomas Moestl 161718100346SThomas Moestl #ifdef GEM_DEBUG 16189ba2b298SMarius Strobl CTR3(KTR_GEM, "%s: done sc->sc_rxptr %d, complete %d", __func__, 1619*8defc88cSMarius Strobl sc->sc_rxptr, GEM_READ_4(sc, GEM_RX_COMPLETION)); 162018100346SThomas Moestl #endif 162142c1b001SThomas Moestl } 162242c1b001SThomas Moestl 162342c1b001SThomas Moestl static int 16242a79fd39SMarius Strobl gem_add_rxbuf(struct gem_softc *sc, int idx) 162542c1b001SThomas Moestl { 162642c1b001SThomas Moestl struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx]; 162742c1b001SThomas Moestl struct mbuf *m; 1628c3d5598aSMarius Strobl bus_dma_segment_t segs[1]; 1629c3d5598aSMarius Strobl int error, nsegs; 163042c1b001SThomas Moestl 16319ba2b298SMarius Strobl GEM_LOCK_ASSERT(sc, MA_OWNED); 16329ba2b298SMarius Strobl 1633c6499eccSGleb Smirnoff m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 163442c1b001SThomas Moestl if (m == NULL) 163542c1b001SThomas Moestl return (ENOBUFS); 1636305f2c06SThomas Moestl m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 163742c1b001SThomas Moestl 163842c1b001SThomas Moestl #ifdef GEM_DEBUG 16392a79fd39SMarius Strobl /* Bzero the packet to check DMA. */ 164042c1b001SThomas Moestl memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size); 164142c1b001SThomas Moestl #endif 164242c1b001SThomas Moestl 1643b2d59f42SThomas Moestl if (rxs->rxs_mbuf != NULL) { 1644b2d59f42SThomas Moestl bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, 1645b2d59f42SThomas Moestl BUS_DMASYNC_POSTREAD); 1646305f2c06SThomas Moestl bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap); 1647b2d59f42SThomas Moestl } 164842c1b001SThomas Moestl 1649c3d5598aSMarius Strobl error = bus_dmamap_load_mbuf_sg(sc->sc_rdmatag, rxs->rxs_dmamap, 1650c3d5598aSMarius Strobl m, segs, &nsegs, BUS_DMA_NOWAIT); 1651c3d5598aSMarius Strobl if (error != 0) { 16522a79fd39SMarius Strobl device_printf(sc->sc_dev, 16532a79fd39SMarius Strobl "cannot load RS DMA map %d, error = %d\n", idx, error); 1654c3d5598aSMarius Strobl m_freem(m); 16551ed3fed7SMarius Strobl return (error); 165642c1b001SThomas Moestl } 16572a79fd39SMarius Strobl /* If nsegs is wrong then the stack is corrupt. */ 1658801772ecSMarius Strobl KASSERT(nsegs == 1, 1659801772ecSMarius Strobl ("%s: too many DMA segments (%d)", __func__, nsegs)); 16601ed3fed7SMarius Strobl rxs->rxs_mbuf = m; 1661c3d5598aSMarius Strobl rxs->rxs_paddr = segs[0].ds_addr; 166242c1b001SThomas Moestl 16632a79fd39SMarius Strobl bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, 16642a79fd39SMarius Strobl BUS_DMASYNC_PREREAD); 166542c1b001SThomas Moestl 166642c1b001SThomas Moestl GEM_INIT_RXDESC(sc, idx); 166742c1b001SThomas Moestl 166842c1b001SThomas Moestl return (0); 166942c1b001SThomas Moestl } 167042c1b001SThomas Moestl 167142c1b001SThomas Moestl static void 16722a79fd39SMarius Strobl gem_eint(struct gem_softc *sc, u_int status) 167342c1b001SThomas Moestl { 167442c1b001SThomas Moestl 16758da56a6fSGleb Smirnoff if_inc_counter(sc->sc_ifp, IFCOUNTER_IERRORS, 1); 16761ed3fed7SMarius Strobl if ((status & GEM_INTR_RX_TAG_ERR) != 0) { 16771ed3fed7SMarius Strobl gem_reset_rxdma(sc); 167842c1b001SThomas Moestl return; 167942c1b001SThomas Moestl } 168042c1b001SThomas Moestl 16819ba2b298SMarius Strobl device_printf(sc->sc_dev, "%s: status 0x%x", __func__, status); 16829ba2b298SMarius Strobl if ((status & GEM_INTR_BERR) != 0) { 1683*8defc88cSMarius Strobl printf(", PCI bus error 0x%x", 1684*8defc88cSMarius Strobl GEM_READ_4(sc, GEM_PCI_ERROR_STATUS)); 16859ba2b298SMarius Strobl } 1686*8defc88cSMarius Strobl printf("\n"); 168742c1b001SThomas Moestl } 168842c1b001SThomas Moestl 168942c1b001SThomas Moestl void 16902a79fd39SMarius Strobl gem_intr(void *v) 169142c1b001SThomas Moestl { 16922a79fd39SMarius Strobl struct gem_softc *sc = v; 16931ed3fed7SMarius Strobl uint32_t status, status2; 169442c1b001SThomas Moestl 16958cfaff7dSMarius Strobl GEM_LOCK(sc); 1696*8defc88cSMarius Strobl status = GEM_READ_4(sc, GEM_STATUS); 16971ed3fed7SMarius Strobl 169818100346SThomas Moestl #ifdef GEM_DEBUG 169912fb0330SPyun YongHyeon CTR4(KTR_GEM, "%s: %s: cplt %x, status %x", 17009ba2b298SMarius Strobl device_get_name(sc->sc_dev), __func__, 17019ba2b298SMarius Strobl (status >> GEM_STATUS_TX_COMPLETION_SHFT), (u_int)status); 17021ed3fed7SMarius Strobl 17031ed3fed7SMarius Strobl /* 17041ed3fed7SMarius Strobl * PCS interrupts must be cleared, otherwise no traffic is passed! 17051ed3fed7SMarius Strobl */ 17061ed3fed7SMarius Strobl if ((status & GEM_INTR_PCS) != 0) { 17072a79fd39SMarius Strobl status2 = 1708*8defc88cSMarius Strobl GEM_READ_4(sc, GEM_MII_INTERRUP_STATUS) | 1709*8defc88cSMarius Strobl GEM_READ_4(sc, GEM_MII_INTERRUP_STATUS); 17101ed3fed7SMarius Strobl if ((status2 & GEM_MII_INTERRUP_LINK) != 0) 17111ed3fed7SMarius Strobl device_printf(sc->sc_dev, 17121ed3fed7SMarius Strobl "%s: PCS link status changed\n", __func__); 17131ed3fed7SMarius Strobl } 17141ed3fed7SMarius Strobl if ((status & GEM_MAC_CONTROL_STATUS) != 0) { 1715*8defc88cSMarius Strobl status2 = GEM_READ_4(sc, GEM_MAC_CONTROL_STATUS); 17161ed3fed7SMarius Strobl if ((status2 & GEM_MAC_PAUSED) != 0) 17171ed3fed7SMarius Strobl device_printf(sc->sc_dev, 17181ed3fed7SMarius Strobl "%s: PAUSE received (PAUSE time %d slots)\n", 17191ed3fed7SMarius Strobl __func__, GEM_MAC_PAUSE_TIME(status2)); 17201ed3fed7SMarius Strobl if ((status2 & GEM_MAC_PAUSE) != 0) 17211ed3fed7SMarius Strobl device_printf(sc->sc_dev, 17221ed3fed7SMarius Strobl "%s: transited to PAUSE state\n", __func__); 17231ed3fed7SMarius Strobl if ((status2 & GEM_MAC_RESUME) != 0) 17241ed3fed7SMarius Strobl device_printf(sc->sc_dev, 17251ed3fed7SMarius Strobl "%s: transited to non-PAUSE state\n", __func__); 17261ed3fed7SMarius Strobl } 17271ed3fed7SMarius Strobl if ((status & GEM_INTR_MIF) != 0) 17281ed3fed7SMarius Strobl device_printf(sc->sc_dev, "%s: MIF interrupt\n", __func__); 172918100346SThomas Moestl #endif 173042c1b001SThomas Moestl 17319ba2b298SMarius Strobl if (__predict_false(status & 17321ed3fed7SMarius Strobl (GEM_INTR_RX_TAG_ERR | GEM_INTR_PERR | GEM_INTR_BERR)) != 0) 173342c1b001SThomas Moestl gem_eint(sc, status); 173442c1b001SThomas Moestl 173542c1b001SThomas Moestl if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0) 173642c1b001SThomas Moestl gem_rint(sc); 173742c1b001SThomas Moestl 17381ed3fed7SMarius Strobl if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0) 17391ed3fed7SMarius Strobl gem_tint(sc); 17401ed3fed7SMarius Strobl 17419ba2b298SMarius Strobl if (__predict_false((status & GEM_INTR_TX_MAC) != 0)) { 1742*8defc88cSMarius Strobl status2 = GEM_READ_4(sc, GEM_MAC_TX_STATUS); 17432a79fd39SMarius Strobl if ((status2 & 17449ba2b298SMarius Strobl ~(GEM_MAC_TX_XMIT_DONE | GEM_MAC_TX_DEFER_EXP | 17459ba2b298SMarius Strobl GEM_MAC_TX_PEAK_EXP)) != 0) 17462a79fd39SMarius Strobl device_printf(sc->sc_dev, 17472a79fd39SMarius Strobl "MAC TX fault, status %x\n", status2); 17482a79fd39SMarius Strobl if ((status2 & 17499ba2b298SMarius Strobl (GEM_MAC_TX_UNDERRUN | GEM_MAC_TX_PKT_TOO_LONG)) != 0) { 17508da56a6fSGleb Smirnoff if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1); 17519f012efbSJustin Hibbits if_setdrvflagbits(sc->sc_ifp, 0, IFF_DRV_RUNNING); 17528cfaff7dSMarius Strobl gem_init_locked(sc); 175342c1b001SThomas Moestl } 17549ba2b298SMarius Strobl } 17559ba2b298SMarius Strobl if (__predict_false((status & GEM_INTR_RX_MAC) != 0)) { 1756*8defc88cSMarius Strobl status2 = GEM_READ_4(sc, GEM_MAC_RX_STATUS); 175700d12766SMarius Strobl /* 1758*8defc88cSMarius Strobl * At least with GEM_SUN_GEM revisions GEM_MAC_RX_OVERFLOW 1759*8defc88cSMarius Strobl * happen often due to a silicon bug so handle them silently. 1760*8defc88cSMarius Strobl * Moreover, it's likely that the receiver has hung so we 1761*8defc88cSMarius Strobl * reset it. 176200d12766SMarius Strobl */ 17632a79fd39SMarius Strobl if ((status2 & GEM_MAC_RX_OVERFLOW) != 0) { 17648da56a6fSGleb Smirnoff if_inc_counter(sc->sc_ifp, IFCOUNTER_IERRORS, 1); 17651ed3fed7SMarius Strobl gem_reset_rxdma(sc); 17662a79fd39SMarius Strobl } else if ((status2 & 17672a79fd39SMarius Strobl ~(GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT)) != 0) 17682a79fd39SMarius Strobl device_printf(sc->sc_dev, 17692a79fd39SMarius Strobl "MAC RX fault, status %x\n", status2); 177042c1b001SThomas Moestl } 17718cfaff7dSMarius Strobl GEM_UNLOCK(sc); 177242c1b001SThomas Moestl } 177342c1b001SThomas Moestl 17748cb37876SMarius Strobl static int 17752a79fd39SMarius Strobl gem_watchdog(struct gem_softc *sc) 177642c1b001SThomas Moestl { 17779f012efbSJustin Hibbits if_t ifp = sc->sc_ifp; 177842c1b001SThomas Moestl 17798cb37876SMarius Strobl GEM_LOCK_ASSERT(sc, MA_OWNED); 17808cb37876SMarius Strobl 178118100346SThomas Moestl #ifdef GEM_DEBUG 17822a79fd39SMarius Strobl CTR4(KTR_GEM, 17832a79fd39SMarius Strobl "%s: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x GEM_MAC_RX_CONFIG %x", 1784*8defc88cSMarius Strobl __func__, GEM_READ_4(sc, GEM_RX_CONFIG), 1785*8defc88cSMarius Strobl GEM_READ_4(sc, GEM_MAC_RX_STATUS), 1786*8defc88cSMarius Strobl GEM_READ_4(sc, GEM_MAC_RX_CONFIG)); 17872a79fd39SMarius Strobl CTR4(KTR_GEM, 17882a79fd39SMarius Strobl "%s: GEM_TX_CONFIG %x GEM_MAC_TX_STATUS %x GEM_MAC_TX_CONFIG %x", 1789*8defc88cSMarius Strobl __func__, GEM_READ_4(sc, GEM_TX_CONFIG), 1790*8defc88cSMarius Strobl GEM_READ_4(sc, GEM_MAC_TX_STATUS), 1791*8defc88cSMarius Strobl GEM_READ_4(sc, GEM_MAC_TX_CONFIG)); 179218100346SThomas Moestl #endif 179342c1b001SThomas Moestl 17948cb37876SMarius Strobl if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0) 17958cb37876SMarius Strobl return (0); 17968cb37876SMarius Strobl 17971ed3fed7SMarius Strobl if ((sc->sc_flags & GEM_LINK) != 0) 179842c1b001SThomas Moestl device_printf(sc->sc_dev, "device timeout\n"); 17991ed3fed7SMarius Strobl else if (bootverbose) 18001ed3fed7SMarius Strobl device_printf(sc->sc_dev, "device timeout (no link)\n"); 18018da56a6fSGleb Smirnoff if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 180242c1b001SThomas Moestl 180342c1b001SThomas Moestl /* Try to get more packets going. */ 18049f012efbSJustin Hibbits if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); 18058cfaff7dSMarius Strobl gem_init_locked(sc); 1806ccb1212aSMarius Strobl gem_start_locked(ifp); 18078cb37876SMarius Strobl return (EJUSTRETURN); 180842c1b001SThomas Moestl } 180942c1b001SThomas Moestl 181042c1b001SThomas Moestl static void 18112a79fd39SMarius Strobl gem_mifinit(struct gem_softc *sc) 181242c1b001SThomas Moestl { 181342c1b001SThomas Moestl 1814801772ecSMarius Strobl /* Configure the MIF in frame mode. */ 1815*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MIF_CONFIG, 1816*8defc88cSMarius Strobl GEM_READ_4(sc, GEM_MIF_CONFIG) & ~GEM_MIF_CONFIG_BB_ENA); 1817*8defc88cSMarius Strobl GEM_BARRIER(sc, GEM_MIF_CONFIG, 4, 181865f2c0ffSMarius Strobl BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 181942c1b001SThomas Moestl } 182042c1b001SThomas Moestl 182142c1b001SThomas Moestl /* 182242c1b001SThomas Moestl * MII interface 182342c1b001SThomas Moestl * 182478d22f42SMarius Strobl * The MII interface supports at least three different operating modes: 182542c1b001SThomas Moestl * 182642c1b001SThomas Moestl * Bitbang mode is implemented using data, clock and output enable registers. 182742c1b001SThomas Moestl * 182842c1b001SThomas Moestl * Frame mode is implemented by loading a complete frame into the frame 182942c1b001SThomas Moestl * register and polling the valid bit for completion. 183042c1b001SThomas Moestl * 183142c1b001SThomas Moestl * Polling mode uses the frame register but completion is indicated by 183242c1b001SThomas Moestl * an interrupt. 183342c1b001SThomas Moestl * 183442c1b001SThomas Moestl */ 183542c1b001SThomas Moestl int 18362a79fd39SMarius Strobl gem_mii_readreg(device_t dev, int phy, int reg) 183742c1b001SThomas Moestl { 18382a79fd39SMarius Strobl struct gem_softc *sc; 183942c1b001SThomas Moestl int n; 18402a79fd39SMarius Strobl uint32_t v; 184142c1b001SThomas Moestl 184242c1b001SThomas Moestl #ifdef GEM_DEBUG_PHY 18431ed3fed7SMarius Strobl printf("%s: phy %d reg %d\n", __func__, phy, reg); 184442c1b001SThomas Moestl #endif 184542c1b001SThomas Moestl 18462a79fd39SMarius Strobl sc = device_get_softc(dev); 18471ed3fed7SMarius Strobl if ((sc->sc_flags & GEM_SERDES) != 0) { 18481ed3fed7SMarius Strobl switch (reg) { 18491ed3fed7SMarius Strobl case MII_BMCR: 18501ed3fed7SMarius Strobl reg = GEM_MII_CONTROL; 18511ed3fed7SMarius Strobl break; 18521ed3fed7SMarius Strobl case MII_BMSR: 18531ed3fed7SMarius Strobl reg = GEM_MII_STATUS; 18541ed3fed7SMarius Strobl break; 18551ed3fed7SMarius Strobl case MII_PHYIDR1: 18561ed3fed7SMarius Strobl case MII_PHYIDR2: 18571ed3fed7SMarius Strobl return (0); 18581ed3fed7SMarius Strobl case MII_ANAR: 18591ed3fed7SMarius Strobl reg = GEM_MII_ANAR; 18601ed3fed7SMarius Strobl break; 18611ed3fed7SMarius Strobl case MII_ANLPAR: 18621ed3fed7SMarius Strobl reg = GEM_MII_ANLPAR; 18631ed3fed7SMarius Strobl break; 18641ed3fed7SMarius Strobl case MII_EXTSR: 18651ed3fed7SMarius Strobl return (EXTSR_1000XFDX | EXTSR_1000XHDX); 18661ed3fed7SMarius Strobl default: 18671ed3fed7SMarius Strobl device_printf(sc->sc_dev, 18681ed3fed7SMarius Strobl "%s: unhandled register %d\n", __func__, reg); 18691ed3fed7SMarius Strobl return (0); 18701ed3fed7SMarius Strobl } 1871*8defc88cSMarius Strobl return (GEM_READ_4(sc, reg)); 18721ed3fed7SMarius Strobl } 187342c1b001SThomas Moestl 18742a79fd39SMarius Strobl /* Construct the frame command. */ 18751ed3fed7SMarius Strobl v = GEM_MIF_FRAME_READ | 18761ed3fed7SMarius Strobl (phy << GEM_MIF_PHY_SHIFT) | 18771ed3fed7SMarius Strobl (reg << GEM_MIF_REG_SHIFT); 187842c1b001SThomas Moestl 1879*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MIF_FRAME, v); 1880*8defc88cSMarius Strobl GEM_BARRIER(sc, GEM_MIF_FRAME, 4, 1881ccb1212aSMarius Strobl BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 188242c1b001SThomas Moestl for (n = 0; n < 100; n++) { 188342c1b001SThomas Moestl DELAY(1); 1884*8defc88cSMarius Strobl v = GEM_READ_4(sc, GEM_MIF_FRAME); 18851f317bf9SMarius Strobl if (v & GEM_MIF_FRAME_TA0) 188642c1b001SThomas Moestl return (v & GEM_MIF_FRAME_DATA); 188742c1b001SThomas Moestl } 188842c1b001SThomas Moestl 18892a79fd39SMarius Strobl device_printf(sc->sc_dev, "%s: timed out\n", __func__); 189042c1b001SThomas Moestl return (0); 189142c1b001SThomas Moestl } 189242c1b001SThomas Moestl 189342c1b001SThomas Moestl int 18942a79fd39SMarius Strobl gem_mii_writereg(device_t dev, int phy, int reg, int val) 189542c1b001SThomas Moestl { 18962a79fd39SMarius Strobl struct gem_softc *sc; 189742c1b001SThomas Moestl int n; 18982a79fd39SMarius Strobl uint32_t v; 189942c1b001SThomas Moestl 190042c1b001SThomas Moestl #ifdef GEM_DEBUG_PHY 19011ed3fed7SMarius Strobl printf("%s: phy %d reg %d val %x\n", phy, reg, val, __func__); 190242c1b001SThomas Moestl #endif 190342c1b001SThomas Moestl 19042a79fd39SMarius Strobl sc = device_get_softc(dev); 19051ed3fed7SMarius Strobl if ((sc->sc_flags & GEM_SERDES) != 0) { 19061ed3fed7SMarius Strobl switch (reg) { 19071ed3fed7SMarius Strobl case MII_BMSR: 19081ed3fed7SMarius Strobl reg = GEM_MII_STATUS; 19091ed3fed7SMarius Strobl break; 1910ccb1212aSMarius Strobl case MII_BMCR: 1911ccb1212aSMarius Strobl reg = GEM_MII_CONTROL; 1912ccb1212aSMarius Strobl if ((val & GEM_MII_CONTROL_RESET) == 0) 1913ccb1212aSMarius Strobl break; 1914*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MII_CONTROL, val); 1915*8defc88cSMarius Strobl GEM_BARRIER(sc, GEM_MII_CONTROL, 4, 1916ccb1212aSMarius Strobl BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 1917*8defc88cSMarius Strobl if (!gem_bitwait(sc, GEM_MII_CONTROL, 1918ccb1212aSMarius Strobl GEM_MII_CONTROL_RESET, 0)) 1919ccb1212aSMarius Strobl device_printf(sc->sc_dev, 1920ccb1212aSMarius Strobl "cannot reset PCS\n"); 1921ccb1212aSMarius Strobl /* FALLTHROUGH */ 19221ed3fed7SMarius Strobl case MII_ANAR: 1923*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MII_CONFIG, 0); 1924*8defc88cSMarius Strobl GEM_BARRIER(sc, GEM_MII_CONFIG, 4, 19251ed3fed7SMarius Strobl BUS_SPACE_BARRIER_WRITE); 1926*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MII_ANAR, val); 1927*8defc88cSMarius Strobl GEM_BARRIER(sc, GEM_MII_ANAR, 4, 192865f2c0ffSMarius Strobl BUS_SPACE_BARRIER_WRITE); 1929*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MII_SLINK_CONTROL, 19301ed3fed7SMarius Strobl GEM_MII_SLINK_LOOPBACK | GEM_MII_SLINK_EN_SYNC_D); 1931*8defc88cSMarius Strobl GEM_BARRIER(sc, GEM_MII_SLINK_CONTROL, 4, 193265f2c0ffSMarius Strobl BUS_SPACE_BARRIER_WRITE); 1933*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MII_CONFIG, 19341ed3fed7SMarius Strobl GEM_MII_CONFIG_ENABLE); 1935*8defc88cSMarius Strobl GEM_BARRIER(sc, GEM_MII_CONFIG, 4, 193665f2c0ffSMarius Strobl BUS_SPACE_BARRIER_WRITE); 19371ed3fed7SMarius Strobl return (0); 19381ed3fed7SMarius Strobl case MII_ANLPAR: 19391ed3fed7SMarius Strobl reg = GEM_MII_ANLPAR; 19401ed3fed7SMarius Strobl break; 19411ed3fed7SMarius Strobl default: 19421ed3fed7SMarius Strobl device_printf(sc->sc_dev, 19431ed3fed7SMarius Strobl "%s: unhandled register %d\n", __func__, reg); 19441ed3fed7SMarius Strobl return (0); 19451ed3fed7SMarius Strobl } 1946*8defc88cSMarius Strobl GEM_WRITE_4(sc, reg, val); 1947*8defc88cSMarius Strobl GEM_BARRIER(sc, reg, 4, 194865f2c0ffSMarius Strobl BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 19491ed3fed7SMarius Strobl return (0); 19501ed3fed7SMarius Strobl } 19511ed3fed7SMarius Strobl 19522a79fd39SMarius Strobl /* Construct the frame command. */ 195342c1b001SThomas Moestl v = GEM_MIF_FRAME_WRITE | 195442c1b001SThomas Moestl (phy << GEM_MIF_PHY_SHIFT) | 195542c1b001SThomas Moestl (reg << GEM_MIF_REG_SHIFT) | 195642c1b001SThomas Moestl (val & GEM_MIF_FRAME_DATA); 195742c1b001SThomas Moestl 1958*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MIF_FRAME, v); 1959*8defc88cSMarius Strobl GEM_BARRIER(sc, GEM_MIF_FRAME, 4, 1960ccb1212aSMarius Strobl BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 196142c1b001SThomas Moestl for (n = 0; n < 100; n++) { 196242c1b001SThomas Moestl DELAY(1); 1963*8defc88cSMarius Strobl v = GEM_READ_4(sc, GEM_MIF_FRAME); 19641f317bf9SMarius Strobl if (v & GEM_MIF_FRAME_TA0) 196542c1b001SThomas Moestl return (1); 196642c1b001SThomas Moestl } 196742c1b001SThomas Moestl 19682a79fd39SMarius Strobl device_printf(sc->sc_dev, "%s: timed out\n", __func__); 196942c1b001SThomas Moestl return (0); 197042c1b001SThomas Moestl } 197142c1b001SThomas Moestl 197242c1b001SThomas Moestl void 19732a79fd39SMarius Strobl gem_mii_statchg(device_t dev) 197442c1b001SThomas Moestl { 19752a79fd39SMarius Strobl struct gem_softc *sc; 19761ed3fed7SMarius Strobl int gigabit; 19771ed3fed7SMarius Strobl uint32_t rxcfg, txcfg, v; 197842c1b001SThomas Moestl 19792a79fd39SMarius Strobl sc = device_get_softc(dev); 19802a79fd39SMarius Strobl 19819ba2b298SMarius Strobl GEM_LOCK_ASSERT(sc, MA_OWNED); 19829ba2b298SMarius Strobl 198342c1b001SThomas Moestl #ifdef GEM_DEBUG 19849f012efbSJustin Hibbits if ((sc->sc_if_getflags(ifp) & IFF_DEBUG) != 0) 19858e5d93dbSMarius Strobl device_printf(sc->sc_dev, "%s: status change\n", __func__); 198642c1b001SThomas Moestl #endif 198742c1b001SThomas Moestl 19881ed3fed7SMarius Strobl if ((sc->sc_mii->mii_media_status & IFM_ACTIVE) != 0 && 19891ed3fed7SMarius Strobl IFM_SUBTYPE(sc->sc_mii->mii_media_active) != IFM_NONE) 19901ed3fed7SMarius Strobl sc->sc_flags |= GEM_LINK; 19911ed3fed7SMarius Strobl else 19921ed3fed7SMarius Strobl sc->sc_flags &= ~GEM_LINK; 19931ed3fed7SMarius Strobl 19941ed3fed7SMarius Strobl switch (IFM_SUBTYPE(sc->sc_mii->mii_media_active)) { 19951ed3fed7SMarius Strobl case IFM_1000_SX: 19961ed3fed7SMarius Strobl case IFM_1000_LX: 19971ed3fed7SMarius Strobl case IFM_1000_CX: 19981ed3fed7SMarius Strobl case IFM_1000_T: 19991ed3fed7SMarius Strobl gigabit = 1; 20001ed3fed7SMarius Strobl break; 20011ed3fed7SMarius Strobl default: 20021ed3fed7SMarius Strobl gigabit = 0; 200342c1b001SThomas Moestl } 20041ed3fed7SMarius Strobl 20051ed3fed7SMarius Strobl /* 20061ed3fed7SMarius Strobl * The configuration done here corresponds to the steps F) and 20071ed3fed7SMarius Strobl * G) and as far as enabling of RX and TX MAC goes also step H) 20081ed3fed7SMarius Strobl * of the initialization sequence outlined in section 3.2.1 of 20091ed3fed7SMarius Strobl * the GEM Gigabit Ethernet ASIC Specification. 20101ed3fed7SMarius Strobl */ 20111ed3fed7SMarius Strobl 2012c0e3e9d4SMarius Strobl rxcfg = sc->sc_mac_rxcfg; 2013c0e3e9d4SMarius Strobl rxcfg &= ~GEM_MAC_RX_CARR_EXTEND; 20141ed3fed7SMarius Strobl txcfg = GEM_MAC_TX_ENA_IPG0 | GEM_MAC_TX_NGU | GEM_MAC_TX_NGU_LIMIT; 20151ed3fed7SMarius Strobl if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) 20161ed3fed7SMarius Strobl txcfg |= GEM_MAC_TX_IGN_CARRIER | GEM_MAC_TX_IGN_COLLIS; 20171ed3fed7SMarius Strobl else if (gigabit != 0) { 20181ed3fed7SMarius Strobl rxcfg |= GEM_MAC_RX_CARR_EXTEND; 20191ed3fed7SMarius Strobl txcfg |= GEM_MAC_TX_CARR_EXTEND; 20201ed3fed7SMarius Strobl } 2021c0e3e9d4SMarius Strobl (void)gem_disable_tx(sc); 2022*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MAC_TX_CONFIG, txcfg); 2023c0e3e9d4SMarius Strobl (void)gem_disable_rx(sc); 2024*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MAC_RX_CONFIG, rxcfg); 20251ed3fed7SMarius Strobl 2026*8defc88cSMarius Strobl v = GEM_READ_4(sc, GEM_MAC_CONTROL_CONFIG) & 20271ed3fed7SMarius Strobl ~(GEM_MAC_CC_RX_PAUSE | GEM_MAC_CC_TX_PAUSE); 20282a79fd39SMarius Strobl if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & 20292a79fd39SMarius Strobl IFM_ETH_RXPAUSE) != 0) 20301ed3fed7SMarius Strobl v |= GEM_MAC_CC_RX_PAUSE; 20312a79fd39SMarius Strobl if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & 20322a79fd39SMarius Strobl IFM_ETH_TXPAUSE) != 0) 20331ed3fed7SMarius Strobl v |= GEM_MAC_CC_TX_PAUSE; 2034*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MAC_CONTROL_CONFIG, v); 20351ed3fed7SMarius Strobl 20361ed3fed7SMarius Strobl if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0 && 20371ed3fed7SMarius Strobl gigabit != 0) 2038*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MAC_SLOT_TIME, 20391ed3fed7SMarius Strobl GEM_MAC_SLOT_TIME_CARR_EXTEND); 20401ed3fed7SMarius Strobl else 2041*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MAC_SLOT_TIME, 20421ed3fed7SMarius Strobl GEM_MAC_SLOT_TIME_NORMAL); 204342c1b001SThomas Moestl 204442c1b001SThomas Moestl /* XIF Configuration */ 204542c1b001SThomas Moestl v = GEM_MAC_XIF_LINK_LED; 204642c1b001SThomas Moestl v |= GEM_MAC_XIF_TX_MII_ENA; 20471ed3fed7SMarius Strobl if ((sc->sc_flags & GEM_SERDES) == 0) { 2048*8defc88cSMarius Strobl if ((GEM_READ_4(sc, GEM_MIF_CONFIG) & 204978d22f42SMarius Strobl GEM_MIF_CONFIG_PHY_SEL) != 0) { 205042c1b001SThomas Moestl /* External MII needs echo disable if half duplex. */ 205178d22f42SMarius Strobl if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & 205278d22f42SMarius Strobl IFM_FDX) == 0) 205342c1b001SThomas Moestl v |= GEM_MAC_XIF_ECHO_DISABL; 205478d22f42SMarius Strobl } else 20551ed3fed7SMarius Strobl /* 20561ed3fed7SMarius Strobl * Internal MII needs buffer enable. 20571ed3fed7SMarius Strobl * XXX buffer enable makes only sense for an 20581ed3fed7SMarius Strobl * external PHY. 20591ed3fed7SMarius Strobl */ 206042c1b001SThomas Moestl v |= GEM_MAC_XIF_MII_BUF_ENA; 206142c1b001SThomas Moestl } 20621ed3fed7SMarius Strobl if (gigabit != 0) 20631ed3fed7SMarius Strobl v |= GEM_MAC_XIF_GMII_MODE; 20641ed3fed7SMarius Strobl if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) 20651ed3fed7SMarius Strobl v |= GEM_MAC_XIF_FDPLX_LED; 2066*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MAC_XIF_CONFIG, v); 20671ed3fed7SMarius Strobl 2068c0e3e9d4SMarius Strobl sc->sc_mac_rxcfg = rxcfg; 20699f012efbSJustin Hibbits if ((if_getdrvflags(sc->sc_ifp) & IFF_DRV_RUNNING) != 0 && 20701ed3fed7SMarius Strobl (sc->sc_flags & GEM_LINK) != 0) { 2071*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MAC_TX_CONFIG, 20721ed3fed7SMarius Strobl txcfg | GEM_MAC_TX_ENABLE); 2073*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MAC_RX_CONFIG, 20741ed3fed7SMarius Strobl rxcfg | GEM_MAC_RX_ENABLE); 20751ed3fed7SMarius Strobl } 207642c1b001SThomas Moestl } 207742c1b001SThomas Moestl 207842c1b001SThomas Moestl int 20799f012efbSJustin Hibbits gem_mediachange(if_t ifp) 208042c1b001SThomas Moestl { 20819f012efbSJustin Hibbits struct gem_softc *sc = if_getsoftc(ifp); 20821f317bf9SMarius Strobl int error; 208342c1b001SThomas Moestl 20842a79fd39SMarius Strobl /* XXX add support for serial media. */ 208542c1b001SThomas Moestl 20861f317bf9SMarius Strobl GEM_LOCK(sc); 20871f317bf9SMarius Strobl error = mii_mediachg(sc->sc_mii); 20881f317bf9SMarius Strobl GEM_UNLOCK(sc); 20891f317bf9SMarius Strobl return (error); 209042c1b001SThomas Moestl } 209142c1b001SThomas Moestl 209242c1b001SThomas Moestl void 20939f012efbSJustin Hibbits gem_mediastatus(if_t ifp, struct ifmediareq *ifmr) 209442c1b001SThomas Moestl { 20959f012efbSJustin Hibbits struct gem_softc *sc = if_getsoftc(ifp); 209642c1b001SThomas Moestl 20978cfaff7dSMarius Strobl GEM_LOCK(sc); 20989f012efbSJustin Hibbits if ((if_getflags(ifp) & IFF_UP) == 0) { 20998cfaff7dSMarius Strobl GEM_UNLOCK(sc); 210042c1b001SThomas Moestl return; 21018cfaff7dSMarius Strobl } 210242c1b001SThomas Moestl 210342c1b001SThomas Moestl mii_pollstat(sc->sc_mii); 210442c1b001SThomas Moestl ifmr->ifm_active = sc->sc_mii->mii_media_active; 210542c1b001SThomas Moestl ifmr->ifm_status = sc->sc_mii->mii_media_status; 21068cfaff7dSMarius Strobl GEM_UNLOCK(sc); 210742c1b001SThomas Moestl } 210842c1b001SThomas Moestl 210942c1b001SThomas Moestl static int 21109f012efbSJustin Hibbits gem_ioctl(if_t ifp, u_long cmd, caddr_t data) 211142c1b001SThomas Moestl { 21129f012efbSJustin Hibbits struct gem_softc *sc = if_getsoftc(ifp); 211342c1b001SThomas Moestl struct ifreq *ifr = (struct ifreq *)data; 21142a79fd39SMarius Strobl int error; 21158cfaff7dSMarius Strobl 21162a79fd39SMarius Strobl error = 0; 211742c1b001SThomas Moestl switch (cmd) { 211842c1b001SThomas Moestl case SIOCSIFFLAGS: 21191f317bf9SMarius Strobl GEM_LOCK(sc); 21209f012efbSJustin Hibbits if ((if_getflags(ifp) & IFF_UP) != 0) { 21219f012efbSJustin Hibbits if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0 && 21229f012efbSJustin Hibbits ((if_getflags(ifp) ^ sc->sc_ifflags) & 21231ed3fed7SMarius Strobl (IFF_ALLMULTI | IFF_PROMISC)) != 0) 21245ed0b954SMarius Strobl gem_setladrf(sc); 212542c1b001SThomas Moestl else 21268cfaff7dSMarius Strobl gem_init_locked(sc); 21279f012efbSJustin Hibbits } else if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) 212842c1b001SThomas Moestl gem_stop(ifp, 0); 21299f012efbSJustin Hibbits if ((if_getflags(ifp) & IFF_LINK0) != 0) 213012fb0330SPyun YongHyeon sc->sc_csum_features |= CSUM_UDP; 213112fb0330SPyun YongHyeon else 213212fb0330SPyun YongHyeon sc->sc_csum_features &= ~CSUM_UDP; 21339f012efbSJustin Hibbits if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0) 21349f012efbSJustin Hibbits if_sethwassist(ifp, sc->sc_csum_features); 21359f012efbSJustin Hibbits sc->sc_ifflags = if_getflags(ifp); 21361f317bf9SMarius Strobl GEM_UNLOCK(sc); 213742c1b001SThomas Moestl break; 213842c1b001SThomas Moestl case SIOCADDMULTI: 213942c1b001SThomas Moestl case SIOCDELMULTI: 21401f317bf9SMarius Strobl GEM_LOCK(sc); 21419f012efbSJustin Hibbits if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) 21425ed0b954SMarius Strobl gem_setladrf(sc); 21431f317bf9SMarius Strobl GEM_UNLOCK(sc); 214442c1b001SThomas Moestl break; 214542c1b001SThomas Moestl case SIOCGIFMEDIA: 214642c1b001SThomas Moestl case SIOCSIFMEDIA: 214742c1b001SThomas Moestl error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd); 214842c1b001SThomas Moestl break; 214912fb0330SPyun YongHyeon case SIOCSIFCAP: 215012fb0330SPyun YongHyeon GEM_LOCK(sc); 21519f012efbSJustin Hibbits if_setcapenable(ifp, ifr->ifr_reqcap); 21529f012efbSJustin Hibbits if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0) 21539f012efbSJustin Hibbits if_sethwassist(ifp, sc->sc_csum_features); 215412fb0330SPyun YongHyeon else 21559f012efbSJustin Hibbits if_sethwassist(ifp, 0); 215612fb0330SPyun YongHyeon GEM_UNLOCK(sc); 215712fb0330SPyun YongHyeon break; 215842c1b001SThomas Moestl default: 21591f317bf9SMarius Strobl error = ether_ioctl(ifp, cmd, data); 216042c1b001SThomas Moestl break; 216142c1b001SThomas Moestl } 216242c1b001SThomas Moestl 216342c1b001SThomas Moestl return (error); 216442c1b001SThomas Moestl } 216542c1b001SThomas Moestl 216699e76377SGleb Smirnoff static u_int 216799e76377SGleb Smirnoff gem_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 216899e76377SGleb Smirnoff { 216999e76377SGleb Smirnoff uint32_t crc, *hash = arg; 217099e76377SGleb Smirnoff 217199e76377SGleb Smirnoff crc = ether_crc32_le(LLADDR(sdl), ETHER_ADDR_LEN); 217299e76377SGleb Smirnoff /* We just want the 8 most significant bits. */ 217399e76377SGleb Smirnoff crc >>= 24; 217499e76377SGleb Smirnoff /* Set the corresponding bit in the filter. */ 217599e76377SGleb Smirnoff hash[crc >> 4] |= 1 << (15 - (crc & 15)); 217699e76377SGleb Smirnoff 217799e76377SGleb Smirnoff return (1); 217899e76377SGleb Smirnoff } 217999e76377SGleb Smirnoff 218042c1b001SThomas Moestl static void 21815ed0b954SMarius Strobl gem_setladrf(struct gem_softc *sc) 218242c1b001SThomas Moestl { 21839f012efbSJustin Hibbits if_t ifp = sc->sc_ifp; 2184336cca9eSBenno Rice int i; 21852a79fd39SMarius Strobl uint32_t hash[16]; 218699e76377SGleb Smirnoff uint32_t v; 218742c1b001SThomas Moestl 21888cfaff7dSMarius Strobl GEM_LOCK_ASSERT(sc, MA_OWNED); 21898cfaff7dSMarius Strobl 2190336cca9eSBenno Rice /* 2191c0e3e9d4SMarius Strobl * Turn off the RX MAC and the hash filter as required by the Sun GEM 2192c0e3e9d4SMarius Strobl * programming restrictions. 2193336cca9eSBenno Rice */ 21942b2f3c09SMarius Strobl v = sc->sc_mac_rxcfg & ~GEM_MAC_RX_HASH_FILTER; 2195*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MAC_RX_CONFIG, v); 2196*8defc88cSMarius Strobl GEM_BARRIER(sc, GEM_MAC_RX_CONFIG, 4, 2197ccb1212aSMarius Strobl BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 2198*8defc88cSMarius Strobl if (!gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_HASH_FILTER | 2199c0e3e9d4SMarius Strobl GEM_MAC_RX_ENABLE, 0)) 2200c0e3e9d4SMarius Strobl device_printf(sc->sc_dev, 2201c0e3e9d4SMarius Strobl "cannot disable RX MAC or hash filter\n"); 22021ed3fed7SMarius Strobl 2203c0e3e9d4SMarius Strobl v &= ~(GEM_MAC_RX_PROMISCUOUS | GEM_MAC_RX_PROMISC_GRP); 22049f012efbSJustin Hibbits if ((if_getflags(ifp) & IFF_PROMISC) != 0) { 220542c1b001SThomas Moestl v |= GEM_MAC_RX_PROMISCUOUS; 220642c1b001SThomas Moestl goto chipit; 220742c1b001SThomas Moestl } 22089f012efbSJustin Hibbits if ((if_getflags(ifp) & IFF_ALLMULTI) != 0) { 2209336cca9eSBenno Rice v |= GEM_MAC_RX_PROMISC_GRP; 221042c1b001SThomas Moestl goto chipit; 221142c1b001SThomas Moestl } 221242c1b001SThomas Moestl 221342c1b001SThomas Moestl /* 22142a79fd39SMarius Strobl * Set up multicast address filter by passing all multicast 22152a79fd39SMarius Strobl * addresses through a crc generator, and then using the high 22162a79fd39SMarius Strobl * order 8 bits as an index into the 256 bit logical address 22172a79fd39SMarius Strobl * filter. The high order 4 bits selects the word, while the 22182a79fd39SMarius Strobl * other 4 bits select the bit within the word (where bit 0 22192a79fd39SMarius Strobl * is the MSB). 222042c1b001SThomas Moestl */ 222142c1b001SThomas Moestl 2222336cca9eSBenno Rice memset(hash, 0, sizeof(hash)); 222399e76377SGleb Smirnoff if_foreach_llmaddr(ifp, gem_hash_maddr, hash); 2224336cca9eSBenno Rice 2225336cca9eSBenno Rice v |= GEM_MAC_RX_HASH_FILTER; 2226336cca9eSBenno Rice 22272a79fd39SMarius Strobl /* Now load the hash table into the chip (if we are using it). */ 22282a79fd39SMarius Strobl for (i = 0; i < 16; i++) 2229*8defc88cSMarius Strobl GEM_WRITE_4(sc, 2230336cca9eSBenno Rice GEM_MAC_HASH0 + i * (GEM_MAC_HASH1 - GEM_MAC_HASH0), 2231336cca9eSBenno Rice hash[i]); 223242c1b001SThomas Moestl 223342c1b001SThomas Moestl chipit: 2234c0e3e9d4SMarius Strobl sc->sc_mac_rxcfg = v; 2235*8defc88cSMarius Strobl GEM_WRITE_4(sc, GEM_MAC_RX_CONFIG, v | GEM_MAC_RX_ENABLE); 223642c1b001SThomas Moestl } 2237