12cd0c529SMike Karels /*-
22cd0c529SMike Karels * Copyright (c) 2020 Michael J Karels
32cd0c529SMike Karels * Copyright (c) 2016, 2020 Jared McNeill <jmcneill@invisible.ca>
42cd0c529SMike Karels *
52cd0c529SMike Karels * Redistribution and use in source and binary forms, with or without
62cd0c529SMike Karels * modification, are permitted provided that the following conditions
72cd0c529SMike Karels * are met:
82cd0c529SMike Karels * 1. Redistributions of source code must retain the above copyright
92cd0c529SMike Karels * notice, this list of conditions and the following disclaimer.
102cd0c529SMike Karels * 2. Redistributions in binary form must reproduce the above copyright
112cd0c529SMike Karels * notice, this list of conditions and the following disclaimer in the
122cd0c529SMike Karels * documentation and/or other materials provided with the distribution.
132cd0c529SMike Karels *
142cd0c529SMike Karels * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
152cd0c529SMike Karels * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
162cd0c529SMike Karels * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
172cd0c529SMike Karels * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
182cd0c529SMike Karels * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
192cd0c529SMike Karels * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
202cd0c529SMike Karels * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
212cd0c529SMike Karels * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
222cd0c529SMike Karels * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
232cd0c529SMike Karels * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
242cd0c529SMike Karels * SUCH DAMAGE.
252cd0c529SMike Karels */
262cd0c529SMike Karels
272cd0c529SMike Karels /*
282cd0c529SMike Karels * RPi4 (BCM 2711) Gigabit Ethernet ("GENET") controller
292cd0c529SMike Karels *
302cd0c529SMike Karels * This driver is derived in large part from bcmgenet.c from NetBSD by
312cd0c529SMike Karels * Jared McNeill. Parts of the structure and other common code in
322cd0c529SMike Karels * this driver have been copied from if_awg.c for the Allwinner EMAC,
332cd0c529SMike Karels * also by Jared McNeill.
342cd0c529SMike Karels */
352cd0c529SMike Karels
362cd0c529SMike Karels #include "opt_device_polling.h"
372cd0c529SMike Karels
382cd0c529SMike Karels #include <sys/param.h>
392cd0c529SMike Karels #include <sys/systm.h>
402cd0c529SMike Karels #include <sys/bus.h>
412cd0c529SMike Karels #include <sys/rman.h>
422cd0c529SMike Karels #include <sys/kernel.h>
432cd0c529SMike Karels #include <sys/endian.h>
442cd0c529SMike Karels #include <sys/mbuf.h>
452cd0c529SMike Karels #include <sys/socket.h>
462cd0c529SMike Karels #include <sys/sockio.h>
47184291b0SMike Karels #include <sys/sysctl.h>
482cd0c529SMike Karels #include <sys/module.h>
492cd0c529SMike Karels #include <sys/taskqueue.h>
502cd0c529SMike Karels #include <sys/gpio.h>
512cd0c529SMike Karels
522cd0c529SMike Karels #include <net/bpf.h>
532cd0c529SMike Karels #include <net/if.h>
542cd0c529SMike Karels #include <net/ethernet.h>
552cd0c529SMike Karels #include <net/if_dl.h>
562cd0c529SMike Karels #include <net/if_media.h>
572cd0c529SMike Karels #include <net/if_types.h>
582cd0c529SMike Karels #include <net/if_var.h>
592cd0c529SMike Karels
602cd0c529SMike Karels #include <machine/bus.h>
612cd0c529SMike Karels
622cd0c529SMike Karels #include <dev/ofw/ofw_bus.h>
632cd0c529SMike Karels #include <dev/ofw/ofw_bus_subr.h>
642cd0c529SMike Karels
652cd0c529SMike Karels #define __BIT(_x) (1 << (_x))
662cd0c529SMike Karels #include "if_genetreg.h"
672cd0c529SMike Karels
682cd0c529SMike Karels #include <dev/mii/mii.h>
692cd0c529SMike Karels #include <dev/mii/miivar.h>
702cd0c529SMike Karels #include <dev/mii/mii_fdt.h>
712cd0c529SMike Karels
722cd0c529SMike Karels #include <netinet/in.h>
732cd0c529SMike Karels #include <netinet/ip.h>
742cd0c529SMike Karels #include <netinet/ip6.h>
752cd0c529SMike Karels
762cd0c529SMike Karels #include "syscon_if.h"
772cd0c529SMike Karels #include "miibus_if.h"
782cd0c529SMike Karels #include "gpio_if.h"
792cd0c529SMike Karels
802cd0c529SMike Karels #define RD4(sc, reg) bus_read_4((sc)->res[_RES_MAC], (reg))
812cd0c529SMike Karels #define WR4(sc, reg, val) bus_write_4((sc)->res[_RES_MAC], (reg), (val))
822cd0c529SMike Karels
832cd0c529SMike Karels #define GEN_LOCK(sc) mtx_lock(&(sc)->mtx)
842cd0c529SMike Karels #define GEN_UNLOCK(sc) mtx_unlock(&(sc)->mtx)
852cd0c529SMike Karels #define GEN_ASSERT_LOCKED(sc) mtx_assert(&(sc)->mtx, MA_OWNED)
862cd0c529SMike Karels #define GEN_ASSERT_UNLOCKED(sc) mtx_assert(&(sc)->mtx, MA_NOTOWNED)
872cd0c529SMike Karels
882cd0c529SMike Karels #define TX_DESC_COUNT GENET_DMA_DESC_COUNT
892cd0c529SMike Karels #define RX_DESC_COUNT GENET_DMA_DESC_COUNT
902cd0c529SMike Karels
912cd0c529SMike Karels #define TX_NEXT(n, count) (((n) + 1) & ((count) - 1))
922cd0c529SMike Karels #define RX_NEXT(n, count) (((n) + 1) & ((count) - 1))
932cd0c529SMike Karels
942cd0c529SMike Karels #define TX_MAX_SEGS 20
952cd0c529SMike Karels
96184291b0SMike Karels static SYSCTL_NODE(_hw, OID_AUTO, genet, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
97184291b0SMike Karels "genet driver parameters");
98184291b0SMike Karels
99184291b0SMike Karels /* Maximum number of mbufs to pass per call to if_input */
1002cd0c529SMike Karels static int gen_rx_batch = 16 /* RX_BATCH_DEFAULT */;
101184291b0SMike Karels SYSCTL_INT(_hw_genet, OID_AUTO, rx_batch, CTLFLAG_RDTUN,
102184291b0SMike Karels &gen_rx_batch, 0, "max mbufs per call to if_input");
103184291b0SMike Karels
104184291b0SMike Karels TUNABLE_INT("hw.gen.rx_batch", &gen_rx_batch); /* old name/interface */
105184291b0SMike Karels
106184291b0SMike Karels /*
107184291b0SMike Karels * Transmitting packets with only an Ethernet header in the first mbuf
108184291b0SMike Karels * fails. Examples include reflected ICMPv6 packets, e.g. echo replies;
109184291b0SMike Karels * forwarded IPv6/TCP packets; and forwarded IPv4/TCP packets that use NAT
110184291b0SMike Karels * with IPFW. Pulling up the sizes of ether_header + ip6_hdr + icmp6_hdr
111184291b0SMike Karels * seems to work for both ICMPv6 and TCP over IPv6, as well as the IPv4/TCP
112184291b0SMike Karels * case.
113184291b0SMike Karels */
114184291b0SMike Karels static int gen_tx_hdr_min = 56; /* ether_header + ip6_hdr + icmp6_hdr */
115184291b0SMike Karels SYSCTL_INT(_hw_genet, OID_AUTO, tx_hdr_min, CTLFLAG_RW,
116184291b0SMike Karels &gen_tx_hdr_min, 0, "header to add to packets with ether header only");
1172cd0c529SMike Karels
1182cd0c529SMike Karels static struct ofw_compat_data compat_data[] = {
1192cd0c529SMike Karels { "brcm,genet-v1", 1 },
1202cd0c529SMike Karels { "brcm,genet-v2", 2 },
1212cd0c529SMike Karels { "brcm,genet-v3", 3 },
1222cd0c529SMike Karels { "brcm,genet-v4", 4 },
1232cd0c529SMike Karels { "brcm,genet-v5", 5 },
124cb00fc7cSEmmanuel Vadot { "brcm,bcm2711-genet-v5", 5 },
1252cd0c529SMike Karels { NULL, 0 }
1262cd0c529SMike Karels };
1272cd0c529SMike Karels
1282cd0c529SMike Karels enum {
1292cd0c529SMike Karels _RES_MAC, /* what to call this? */
1302cd0c529SMike Karels _RES_IRQ1,
1312cd0c529SMike Karels _RES_IRQ2,
1322cd0c529SMike Karels _RES_NITEMS
1332cd0c529SMike Karels };
1342cd0c529SMike Karels
1352cd0c529SMike Karels static struct resource_spec gen_spec[] = {
1362cd0c529SMike Karels { SYS_RES_MEMORY, 0, RF_ACTIVE },
1372cd0c529SMike Karels { SYS_RES_IRQ, 0, RF_ACTIVE },
1382cd0c529SMike Karels { SYS_RES_IRQ, 1, RF_ACTIVE },
1392cd0c529SMike Karels { -1, 0 }
1402cd0c529SMike Karels };
1412cd0c529SMike Karels
1422cd0c529SMike Karels /* structure per ring entry */
1432cd0c529SMike Karels struct gen_ring_ent {
1442cd0c529SMike Karels bus_dmamap_t map;
1452cd0c529SMike Karels struct mbuf *mbuf;
1462cd0c529SMike Karels };
1472cd0c529SMike Karels
1482cd0c529SMike Karels struct tx_queue {
1492cd0c529SMike Karels int hwindex; /* hardware index */
1502cd0c529SMike Karels int nentries;
1512cd0c529SMike Karels u_int queued; /* or avail? */
1522cd0c529SMike Karels u_int cur;
1532cd0c529SMike Karels u_int next;
1542cd0c529SMike Karels u_int prod_idx;
1552cd0c529SMike Karels u_int cons_idx;
1562cd0c529SMike Karels struct gen_ring_ent *entries;
1572cd0c529SMike Karels };
1582cd0c529SMike Karels
1592cd0c529SMike Karels struct rx_queue {
1602cd0c529SMike Karels int hwindex; /* hardware index */
1612cd0c529SMike Karels int nentries;
1622cd0c529SMike Karels u_int cur;
1632cd0c529SMike Karels u_int prod_idx;
1642cd0c529SMike Karels u_int cons_idx;
1652cd0c529SMike Karels struct gen_ring_ent *entries;
1662cd0c529SMike Karels };
1672cd0c529SMike Karels
1682cd0c529SMike Karels struct gen_softc {
1692cd0c529SMike Karels struct resource *res[_RES_NITEMS];
1702cd0c529SMike Karels struct mtx mtx;
1712cd0c529SMike Karels if_t ifp;
1722cd0c529SMike Karels device_t dev;
1732cd0c529SMike Karels device_t miibus;
1742cd0c529SMike Karels mii_contype_t phy_mode;
1752cd0c529SMike Karels
1762cd0c529SMike Karels struct callout stat_ch;
1772cd0c529SMike Karels struct task link_task;
1782cd0c529SMike Karels void *ih;
1792cd0c529SMike Karels void *ih2;
1802cd0c529SMike Karels int type;
1812cd0c529SMike Karels int if_flags;
1822cd0c529SMike Karels int link;
1832cd0c529SMike Karels bus_dma_tag_t tx_buf_tag;
1842cd0c529SMike Karels /*
1852cd0c529SMike Karels * The genet chip has multiple queues for transmit and receive.
1862cd0c529SMike Karels * This driver uses only one (queue 16, the default), but is cast
1872cd0c529SMike Karels * with multiple rings. The additional rings are used for different
1882cd0c529SMike Karels * priorities.
1892cd0c529SMike Karels */
1902cd0c529SMike Karels #define DEF_TXQUEUE 0
1912cd0c529SMike Karels #define NTXQUEUE 1
1922cd0c529SMike Karels struct tx_queue tx_queue[NTXQUEUE];
1932cd0c529SMike Karels struct gen_ring_ent tx_ring_ent[TX_DESC_COUNT]; /* ring entries */
1942cd0c529SMike Karels
1952cd0c529SMike Karels bus_dma_tag_t rx_buf_tag;
1962cd0c529SMike Karels #define DEF_RXQUEUE 0
1972cd0c529SMike Karels #define NRXQUEUE 1
1982cd0c529SMike Karels struct rx_queue rx_queue[NRXQUEUE];
1992cd0c529SMike Karels struct gen_ring_ent rx_ring_ent[RX_DESC_COUNT]; /* ring entries */
2002cd0c529SMike Karels };
2012cd0c529SMike Karels
2022cd0c529SMike Karels static void gen_init(void *softc);
2032cd0c529SMike Karels static void gen_start(if_t ifp);
2042cd0c529SMike Karels static void gen_destroy(struct gen_softc *sc);
2052cd0c529SMike Karels static int gen_encap(struct gen_softc *sc, struct mbuf **mp);
2062cd0c529SMike Karels static int gen_parse_tx(struct mbuf *m, int csum_flags);
2072cd0c529SMike Karels static int gen_ioctl(if_t ifp, u_long cmd, caddr_t data);
2082cd0c529SMike Karels static int gen_get_phy_mode(device_t dev);
2092cd0c529SMike Karels static bool gen_get_eaddr(device_t dev, struct ether_addr *eaddr);
2102cd0c529SMike Karels static void gen_set_enaddr(struct gen_softc *sc);
2112cd0c529SMike Karels static void gen_setup_rxfilter(struct gen_softc *sc);
2122cd0c529SMike Karels static void gen_reset(struct gen_softc *sc);
2132cd0c529SMike Karels static void gen_enable(struct gen_softc *sc);
2148f45652bSMike Karels static void gen_dma_disable(struct gen_softc *sc);
2152cd0c529SMike Karels static int gen_bus_dma_init(struct gen_softc *sc);
2162cd0c529SMike Karels static void gen_bus_dma_teardown(struct gen_softc *sc);
2172cd0c529SMike Karels static void gen_enable_intr(struct gen_softc *sc);
2182cd0c529SMike Karels static void gen_init_txrings(struct gen_softc *sc);
2192cd0c529SMike Karels static void gen_init_rxrings(struct gen_softc *sc);
2202cd0c529SMike Karels static void gen_intr(void *softc);
2212cd0c529SMike Karels static int gen_rxintr(struct gen_softc *sc, struct rx_queue *q);
2222cd0c529SMike Karels static void gen_txintr(struct gen_softc *sc, struct tx_queue *q);
2232cd0c529SMike Karels static void gen_intr2(void *softc);
2242cd0c529SMike Karels static int gen_newbuf_rx(struct gen_softc *sc, struct rx_queue *q, int index);
2252cd0c529SMike Karels static int gen_mapbuf_rx(struct gen_softc *sc, struct rx_queue *q, int index,
2262cd0c529SMike Karels struct mbuf *m);
2272cd0c529SMike Karels static void gen_link_task(void *arg, int pending);
2282cd0c529SMike Karels static void gen_media_status(if_t ifp, struct ifmediareq *ifmr);
2292cd0c529SMike Karels static int gen_media_change(if_t ifp);
2302cd0c529SMike Karels static void gen_tick(void *softc);
2312cd0c529SMike Karels
2322cd0c529SMike Karels static int
gen_probe(device_t dev)2332cd0c529SMike Karels gen_probe(device_t dev)
2342cd0c529SMike Karels {
2352cd0c529SMike Karels if (!ofw_bus_status_okay(dev))
2362cd0c529SMike Karels return (ENXIO);
2372cd0c529SMike Karels
2382cd0c529SMike Karels if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
2392cd0c529SMike Karels return (ENXIO);
2402cd0c529SMike Karels
2412cd0c529SMike Karels device_set_desc(dev, "RPi4 Gigabit Ethernet");
2422cd0c529SMike Karels return (BUS_PROBE_DEFAULT);
2432cd0c529SMike Karels }
2442cd0c529SMike Karels
2452cd0c529SMike Karels static int
gen_attach(device_t dev)2462cd0c529SMike Karels gen_attach(device_t dev)
2472cd0c529SMike Karels {
2482cd0c529SMike Karels struct ether_addr eaddr;
2492cd0c529SMike Karels struct gen_softc *sc;
250349eddbdSMike Karels int major, minor, error, mii_flags;
2512cd0c529SMike Karels bool eaddr_found;
2522cd0c529SMike Karels
2532cd0c529SMike Karels sc = device_get_softc(dev);
2542cd0c529SMike Karels sc->dev = dev;
2552cd0c529SMike Karels sc->type = ofw_bus_search_compatible(dev, compat_data)->ocd_data;
2562cd0c529SMike Karels
2572cd0c529SMike Karels if (bus_alloc_resources(dev, gen_spec, sc->res) != 0) {
2582cd0c529SMike Karels device_printf(dev, "cannot allocate resources for device\n");
2592cd0c529SMike Karels error = ENXIO;
2602cd0c529SMike Karels goto fail;
2612cd0c529SMike Karels }
2622cd0c529SMike Karels
2632cd0c529SMike Karels major = (RD4(sc, GENET_SYS_REV_CTRL) & REV_MAJOR) >> REV_MAJOR_SHIFT;
2642cd0c529SMike Karels if (major != REV_MAJOR_V5) {
2652cd0c529SMike Karels device_printf(dev, "version %d is not supported\n", major);
2662cd0c529SMike Karels error = ENXIO;
2672cd0c529SMike Karels goto fail;
2682cd0c529SMike Karels }
2692cd0c529SMike Karels minor = (RD4(sc, GENET_SYS_REV_CTRL) & REV_MINOR) >> REV_MINOR_SHIFT;
2702cd0c529SMike Karels device_printf(dev, "GENET version 5.%d phy 0x%04x\n", minor,
2712cd0c529SMike Karels RD4(sc, GENET_SYS_REV_CTRL) & REV_PHY);
2722cd0c529SMike Karels
2732cd0c529SMike Karels mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF);
2742cd0c529SMike Karels callout_init_mtx(&sc->stat_ch, &sc->mtx, 0);
2752cd0c529SMike Karels TASK_INIT(&sc->link_task, 0, gen_link_task, sc);
2762cd0c529SMike Karels
2772cd0c529SMike Karels error = gen_get_phy_mode(dev);
2782cd0c529SMike Karels if (error != 0)
2792cd0c529SMike Karels goto fail;
2802cd0c529SMike Karels
2812cd0c529SMike Karels bzero(&eaddr, sizeof(eaddr));
2822cd0c529SMike Karels eaddr_found = gen_get_eaddr(dev, &eaddr);
2832cd0c529SMike Karels
2842cd0c529SMike Karels /* reset core */
2852cd0c529SMike Karels gen_reset(sc);
2862cd0c529SMike Karels
2878f45652bSMike Karels gen_dma_disable(sc);
2882cd0c529SMike Karels
2892cd0c529SMike Karels /* Setup DMA */
2902cd0c529SMike Karels error = gen_bus_dma_init(sc);
2912cd0c529SMike Karels if (error != 0) {
2922cd0c529SMike Karels device_printf(dev, "cannot setup bus dma\n");
2932cd0c529SMike Karels goto fail;
2942cd0c529SMike Karels }
2952cd0c529SMike Karels
2962cd0c529SMike Karels /* Setup ethernet interface */
2972cd0c529SMike Karels sc->ifp = if_alloc(IFT_ETHER);
2982cd0c529SMike Karels if_setsoftc(sc->ifp, sc);
2992cd0c529SMike Karels if_initname(sc->ifp, device_get_name(dev), device_get_unit(dev));
3002cd0c529SMike Karels if_setflags(sc->ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
3012cd0c529SMike Karels if_setstartfn(sc->ifp, gen_start);
3022cd0c529SMike Karels if_setioctlfn(sc->ifp, gen_ioctl);
3032cd0c529SMike Karels if_setinitfn(sc->ifp, gen_init);
3042cd0c529SMike Karels if_setsendqlen(sc->ifp, TX_DESC_COUNT - 1);
3052cd0c529SMike Karels if_setsendqready(sc->ifp);
3062cd0c529SMike Karels #define GEN_CSUM_FEATURES (CSUM_UDP | CSUM_TCP)
3072cd0c529SMike Karels if_sethwassist(sc->ifp, GEN_CSUM_FEATURES);
3082cd0c529SMike Karels if_setcapabilities(sc->ifp, IFCAP_VLAN_MTU | IFCAP_HWCSUM |
3092cd0c529SMike Karels IFCAP_HWCSUM_IPV6);
3102cd0c529SMike Karels if_setcapenable(sc->ifp, if_getcapabilities(sc->ifp));
3112cd0c529SMike Karels
31213604fb0SMike Karels /* Install interrupt handlers */
31313604fb0SMike Karels error = bus_setup_intr(dev, sc->res[_RES_IRQ1],
31413604fb0SMike Karels INTR_TYPE_NET | INTR_MPSAFE, NULL, gen_intr, sc, &sc->ih);
31513604fb0SMike Karels if (error != 0) {
31613604fb0SMike Karels device_printf(dev, "cannot setup interrupt handler1\n");
31713604fb0SMike Karels goto fail;
31813604fb0SMike Karels }
31913604fb0SMike Karels
32013604fb0SMike Karels error = bus_setup_intr(dev, sc->res[_RES_IRQ2],
32113604fb0SMike Karels INTR_TYPE_NET | INTR_MPSAFE, NULL, gen_intr2, sc, &sc->ih2);
32213604fb0SMike Karels if (error != 0) {
32313604fb0SMike Karels device_printf(dev, "cannot setup interrupt handler2\n");
32413604fb0SMike Karels goto fail;
32513604fb0SMike Karels }
32613604fb0SMike Karels
3272cd0c529SMike Karels /* Attach MII driver */
328349eddbdSMike Karels mii_flags = 0;
329349eddbdSMike Karels switch (sc->phy_mode)
330349eddbdSMike Karels {
331349eddbdSMike Karels case MII_CONTYPE_RGMII_ID:
332349eddbdSMike Karels mii_flags |= MIIF_RX_DELAY | MIIF_TX_DELAY;
333349eddbdSMike Karels break;
334349eddbdSMike Karels case MII_CONTYPE_RGMII_RXID:
335349eddbdSMike Karels mii_flags |= MIIF_RX_DELAY;
336349eddbdSMike Karels break;
337349eddbdSMike Karels case MII_CONTYPE_RGMII_TXID:
338349eddbdSMike Karels mii_flags |= MIIF_TX_DELAY;
339349eddbdSMike Karels break;
340349eddbdSMike Karels default:
341349eddbdSMike Karels break;
342349eddbdSMike Karels }
3432cd0c529SMike Karels error = mii_attach(dev, &sc->miibus, sc->ifp, gen_media_change,
3442cd0c529SMike Karels gen_media_status, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY,
345349eddbdSMike Karels mii_flags);
3462cd0c529SMike Karels if (error != 0) {
3472cd0c529SMike Karels device_printf(dev, "cannot attach PHY\n");
3482cd0c529SMike Karels goto fail;
3492cd0c529SMike Karels }
3502cd0c529SMike Karels
3512cd0c529SMike Karels /* If address was not found, create one based on the hostid and name. */
3522cd0c529SMike Karels if (eaddr_found == 0)
3532cd0c529SMike Karels ether_gen_addr(sc->ifp, &eaddr);
3542cd0c529SMike Karels /* Attach ethernet interface */
3552cd0c529SMike Karels ether_ifattach(sc->ifp, eaddr.octet);
3562cd0c529SMike Karels
3572cd0c529SMike Karels fail:
3582cd0c529SMike Karels if (error)
3592cd0c529SMike Karels gen_destroy(sc);
3602cd0c529SMike Karels return (error);
3612cd0c529SMike Karels }
3622cd0c529SMike Karels
3632cd0c529SMike Karels /* Free resources after failed attach. This is not a complete detach. */
3642cd0c529SMike Karels static void
gen_destroy(struct gen_softc * sc)3652cd0c529SMike Karels gen_destroy(struct gen_softc *sc)
3662cd0c529SMike Karels {
3672cd0c529SMike Karels
3682cd0c529SMike Karels if (sc->miibus) { /* can't happen */
3692cd0c529SMike Karels device_delete_child(sc->dev, sc->miibus);
3702cd0c529SMike Karels sc->miibus = NULL;
3712cd0c529SMike Karels }
3722cd0c529SMike Karels bus_teardown_intr(sc->dev, sc->res[_RES_IRQ1], sc->ih);
3732cd0c529SMike Karels bus_teardown_intr(sc->dev, sc->res[_RES_IRQ2], sc->ih2);
3742cd0c529SMike Karels gen_bus_dma_teardown(sc);
3752cd0c529SMike Karels callout_drain(&sc->stat_ch);
3762cd0c529SMike Karels if (mtx_initialized(&sc->mtx))
3772cd0c529SMike Karels mtx_destroy(&sc->mtx);
3782cd0c529SMike Karels bus_release_resources(sc->dev, gen_spec, sc->res);
3792cd0c529SMike Karels if (sc->ifp != NULL) {
3802cd0c529SMike Karels if_free(sc->ifp);
3812cd0c529SMike Karels sc->ifp = NULL;
3822cd0c529SMike Karels }
3832cd0c529SMike Karels }
3842cd0c529SMike Karels
3852cd0c529SMike Karels static int
gen_get_phy_mode(device_t dev)3862cd0c529SMike Karels gen_get_phy_mode(device_t dev)
3872cd0c529SMike Karels {
3882cd0c529SMike Karels struct gen_softc *sc;
3892cd0c529SMike Karels phandle_t node;
3902cd0c529SMike Karels mii_contype_t type;
3912cd0c529SMike Karels int error = 0;
3922cd0c529SMike Karels
3932cd0c529SMike Karels sc = device_get_softc(dev);
3942cd0c529SMike Karels node = ofw_bus_get_node(dev);
3952cd0c529SMike Karels type = mii_fdt_get_contype(node);
3962cd0c529SMike Karels
3972cd0c529SMike Karels switch (type) {
3982cd0c529SMike Karels case MII_CONTYPE_RGMII:
399349eddbdSMike Karels case MII_CONTYPE_RGMII_ID:
4002cd0c529SMike Karels case MII_CONTYPE_RGMII_RXID:
4012cd0c529SMike Karels case MII_CONTYPE_RGMII_TXID:
4022cd0c529SMike Karels sc->phy_mode = type;
4032cd0c529SMike Karels break;
4042cd0c529SMike Karels default:
4052cd0c529SMike Karels device_printf(dev, "unknown phy-mode '%s'\n",
4062cd0c529SMike Karels mii_fdt_contype_to_name(type));
4072cd0c529SMike Karels error = ENXIO;
4082cd0c529SMike Karels break;
4092cd0c529SMike Karels }
4102cd0c529SMike Karels
4112cd0c529SMike Karels return (error);
4122cd0c529SMike Karels }
4132cd0c529SMike Karels
4142cd0c529SMike Karels static bool
gen_get_eaddr(device_t dev,struct ether_addr * eaddr)4152cd0c529SMike Karels gen_get_eaddr(device_t dev, struct ether_addr *eaddr)
4162cd0c529SMike Karels {
4172cd0c529SMike Karels struct gen_softc *sc;
4182cd0c529SMike Karels uint32_t maclo, machi, val;
4192cd0c529SMike Karels phandle_t node;
4202cd0c529SMike Karels
4212cd0c529SMike Karels sc = device_get_softc(dev);
4222cd0c529SMike Karels
4232cd0c529SMike Karels node = ofw_bus_get_node(dev);
4242cd0c529SMike Karels if (OF_getprop(node, "mac-address", eaddr->octet,
4252cd0c529SMike Karels ETHER_ADDR_LEN) != -1 ||
4262cd0c529SMike Karels OF_getprop(node, "local-mac-address", eaddr->octet,
4272cd0c529SMike Karels ETHER_ADDR_LEN) != -1 ||
4282cd0c529SMike Karels OF_getprop(node, "address", eaddr->octet, ETHER_ADDR_LEN) != -1)
4292cd0c529SMike Karels return (true);
4302cd0c529SMike Karels
4312cd0c529SMike Karels device_printf(dev, "No Ethernet address found in fdt!\n");
4322cd0c529SMike Karels maclo = machi = 0;
4332cd0c529SMike Karels
4342cd0c529SMike Karels val = RD4(sc, GENET_SYS_RBUF_FLUSH_CTRL);
4352cd0c529SMike Karels if ((val & GENET_SYS_RBUF_FLUSH_RESET) == 0) {
4362cd0c529SMike Karels maclo = htobe32(RD4(sc, GENET_UMAC_MAC0));
4372cd0c529SMike Karels machi = htobe16(RD4(sc, GENET_UMAC_MAC1) & 0xffff);
4382cd0c529SMike Karels }
4392cd0c529SMike Karels
4402cd0c529SMike Karels if (maclo == 0 && machi == 0) {
4412cd0c529SMike Karels if (bootverbose)
4422cd0c529SMike Karels device_printf(dev,
4432cd0c529SMike Karels "No Ethernet address found in controller\n");
4442cd0c529SMike Karels return (false);
4452cd0c529SMike Karels } else {
4462cd0c529SMike Karels eaddr->octet[0] = maclo & 0xff;
4472cd0c529SMike Karels eaddr->octet[1] = (maclo >> 8) & 0xff;
4482cd0c529SMike Karels eaddr->octet[2] = (maclo >> 16) & 0xff;
4492cd0c529SMike Karels eaddr->octet[3] = (maclo >> 24) & 0xff;
4502cd0c529SMike Karels eaddr->octet[4] = machi & 0xff;
4512cd0c529SMike Karels eaddr->octet[5] = (machi >> 8) & 0xff;
4522cd0c529SMike Karels return (true);
4532cd0c529SMike Karels }
4542cd0c529SMike Karels }
4552cd0c529SMike Karels
4562cd0c529SMike Karels static void
gen_reset(struct gen_softc * sc)4572cd0c529SMike Karels gen_reset(struct gen_softc *sc)
4582cd0c529SMike Karels {
4592cd0c529SMike Karels uint32_t val;
4602cd0c529SMike Karels
4612cd0c529SMike Karels val = RD4(sc, GENET_SYS_RBUF_FLUSH_CTRL);
4622cd0c529SMike Karels val |= GENET_SYS_RBUF_FLUSH_RESET;
4632cd0c529SMike Karels WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, val);
4642cd0c529SMike Karels DELAY(10);
4652cd0c529SMike Karels
4662cd0c529SMike Karels val &= ~GENET_SYS_RBUF_FLUSH_RESET;
4672cd0c529SMike Karels WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, val);
4682cd0c529SMike Karels DELAY(10);
4692cd0c529SMike Karels
4702cd0c529SMike Karels WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, 0);
4712cd0c529SMike Karels DELAY(10);
4722cd0c529SMike Karels
4732cd0c529SMike Karels WR4(sc, GENET_UMAC_CMD, 0);
4742cd0c529SMike Karels WR4(sc, GENET_UMAC_CMD,
4752cd0c529SMike Karels GENET_UMAC_CMD_LCL_LOOP_EN | GENET_UMAC_CMD_SW_RESET);
4762cd0c529SMike Karels DELAY(10);
4772cd0c529SMike Karels WR4(sc, GENET_UMAC_CMD, 0);
4782cd0c529SMike Karels
4792cd0c529SMike Karels WR4(sc, GENET_UMAC_MIB_CTRL, GENET_UMAC_MIB_RESET_RUNT |
4802cd0c529SMike Karels GENET_UMAC_MIB_RESET_RX | GENET_UMAC_MIB_RESET_TX);
4812cd0c529SMike Karels WR4(sc, GENET_UMAC_MIB_CTRL, 0);
4828f45652bSMike Karels }
4838f45652bSMike Karels
4848f45652bSMike Karels static void
gen_enable(struct gen_softc * sc)4858f45652bSMike Karels gen_enable(struct gen_softc *sc)
4868f45652bSMike Karels {
4878f45652bSMike Karels u_int val;
4882cd0c529SMike Karels
4892cd0c529SMike Karels WR4(sc, GENET_UMAC_MAX_FRAME_LEN, 1536);
4902cd0c529SMike Karels
4912cd0c529SMike Karels val = RD4(sc, GENET_RBUF_CTRL);
4922cd0c529SMike Karels val |= GENET_RBUF_ALIGN_2B;
4932cd0c529SMike Karels WR4(sc, GENET_RBUF_CTRL, val);
4942cd0c529SMike Karels
4952cd0c529SMike Karels WR4(sc, GENET_RBUF_TBUF_SIZE_CTRL, 1);
4962cd0c529SMike Karels
4972cd0c529SMike Karels /* Enable transmitter and receiver */
4982cd0c529SMike Karels val = RD4(sc, GENET_UMAC_CMD);
4992cd0c529SMike Karels val |= GENET_UMAC_CMD_TXEN;
5002cd0c529SMike Karels val |= GENET_UMAC_CMD_RXEN;
5012cd0c529SMike Karels WR4(sc, GENET_UMAC_CMD, val);
5022cd0c529SMike Karels
5032cd0c529SMike Karels /* Enable interrupts */
5042cd0c529SMike Karels gen_enable_intr(sc);
5052cd0c529SMike Karels WR4(sc, GENET_INTRL2_CPU_CLEAR_MASK,
5062cd0c529SMike Karels GENET_IRQ_TXDMA_DONE | GENET_IRQ_RXDMA_DONE);
5072cd0c529SMike Karels }
5082cd0c529SMike Karels
5092cd0c529SMike Karels static void
gen_disable_intr(struct gen_softc * sc)5108f45652bSMike Karels gen_disable_intr(struct gen_softc *sc)
5118f45652bSMike Karels {
5128f45652bSMike Karels /* Disable interrupts */
5138f45652bSMike Karels WR4(sc, GENET_INTRL2_CPU_SET_MASK, 0xffffffff);
5148f45652bSMike Karels WR4(sc, GENET_INTRL2_CPU_CLEAR_MASK, 0xffffffff);
5158f45652bSMike Karels }
5168f45652bSMike Karels
5178f45652bSMike Karels static void
gen_disable(struct gen_softc * sc)5188f45652bSMike Karels gen_disable(struct gen_softc *sc)
5198f45652bSMike Karels {
5208f45652bSMike Karels uint32_t val;
5218f45652bSMike Karels
5228f45652bSMike Karels /* Stop receiver */
5238f45652bSMike Karels val = RD4(sc, GENET_UMAC_CMD);
5248f45652bSMike Karels val &= ~GENET_UMAC_CMD_RXEN;
5258f45652bSMike Karels WR4(sc, GENET_UMAC_CMD, val);
5268f45652bSMike Karels
5278f45652bSMike Karels /* Stop transmitter */
5288f45652bSMike Karels val = RD4(sc, GENET_UMAC_CMD);
5298f45652bSMike Karels val &= ~GENET_UMAC_CMD_TXEN;
5308f45652bSMike Karels WR4(sc, GENET_UMAC_CMD, val);
5318f45652bSMike Karels
5328f45652bSMike Karels /* Disable Interrupt */
5338f45652bSMike Karels gen_disable_intr(sc);
5348f45652bSMike Karels }
5358f45652bSMike Karels
5368f45652bSMike Karels static void
gen_enable_offload(struct gen_softc * sc)5372cd0c529SMike Karels gen_enable_offload(struct gen_softc *sc)
5382cd0c529SMike Karels {
5392cd0c529SMike Karels uint32_t check_ctrl, buf_ctrl;
5402cd0c529SMike Karels
5412cd0c529SMike Karels check_ctrl = RD4(sc, GENET_RBUF_CHECK_CTRL);
5422cd0c529SMike Karels buf_ctrl = RD4(sc, GENET_RBUF_CTRL);
5432cd0c529SMike Karels if ((if_getcapenable(sc->ifp) & IFCAP_RXCSUM) != 0) {
5442cd0c529SMike Karels check_ctrl |= GENET_RBUF_CHECK_CTRL_EN;
5452cd0c529SMike Karels buf_ctrl |= GENET_RBUF_64B_EN;
5462cd0c529SMike Karels } else {
5472cd0c529SMike Karels check_ctrl &= ~GENET_RBUF_CHECK_CTRL_EN;
5482cd0c529SMike Karels buf_ctrl &= ~GENET_RBUF_64B_EN;
5492cd0c529SMike Karels }
5502cd0c529SMike Karels WR4(sc, GENET_RBUF_CHECK_CTRL, check_ctrl);
5512cd0c529SMike Karels WR4(sc, GENET_RBUF_CTRL, buf_ctrl);
5522cd0c529SMike Karels
5532cd0c529SMike Karels buf_ctrl = RD4(sc, GENET_TBUF_CTRL);
5542cd0c529SMike Karels if ((if_getcapenable(sc->ifp) & (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6)) !=
5552cd0c529SMike Karels 0)
5562cd0c529SMike Karels buf_ctrl |= GENET_RBUF_64B_EN;
5572cd0c529SMike Karels else
5582cd0c529SMike Karels buf_ctrl &= ~GENET_RBUF_64B_EN;
5592cd0c529SMike Karels WR4(sc, GENET_TBUF_CTRL, buf_ctrl);
5602cd0c529SMike Karels }
5612cd0c529SMike Karels
5622cd0c529SMike Karels static void
gen_dma_disable(struct gen_softc * sc)5638f45652bSMike Karels gen_dma_disable(struct gen_softc *sc)
5642cd0c529SMike Karels {
5652cd0c529SMike Karels int val;
5662cd0c529SMike Karels
5672cd0c529SMike Karels val = RD4(sc, GENET_TX_DMA_CTRL);
5682cd0c529SMike Karels val &= ~GENET_TX_DMA_CTRL_EN;
5692cd0c529SMike Karels val &= ~GENET_TX_DMA_CTRL_RBUF_EN(GENET_DMA_DEFAULT_QUEUE);
5702cd0c529SMike Karels WR4(sc, GENET_TX_DMA_CTRL, val);
5712cd0c529SMike Karels
5722cd0c529SMike Karels val = RD4(sc, GENET_RX_DMA_CTRL);
5732cd0c529SMike Karels val &= ~GENET_RX_DMA_CTRL_EN;
5742cd0c529SMike Karels val &= ~GENET_RX_DMA_CTRL_RBUF_EN(GENET_DMA_DEFAULT_QUEUE);
5752cd0c529SMike Karels WR4(sc, GENET_RX_DMA_CTRL, val);
5762cd0c529SMike Karels }
5772cd0c529SMike Karels
5782cd0c529SMike Karels static int
gen_bus_dma_init(struct gen_softc * sc)5792cd0c529SMike Karels gen_bus_dma_init(struct gen_softc *sc)
5802cd0c529SMike Karels {
581f66a1f40SMark Johnston device_t dev = sc->dev;
5822cd0c529SMike Karels int i, error;
5832cd0c529SMike Karels
5842cd0c529SMike Karels error = bus_dma_tag_create(
5852cd0c529SMike Karels bus_get_dma_tag(dev), /* Parent tag */
5862cd0c529SMike Karels 4, 0, /* alignment, boundary */
5872cd0c529SMike Karels BUS_SPACE_MAXADDR_40BIT, /* lowaddr */
5882cd0c529SMike Karels BUS_SPACE_MAXADDR, /* highaddr */
5892cd0c529SMike Karels NULL, NULL, /* filter, filterarg */
5902cd0c529SMike Karels MCLBYTES, TX_MAX_SEGS, /* maxsize, nsegs */
5912cd0c529SMike Karels MCLBYTES, /* maxsegsize */
5922cd0c529SMike Karels 0, /* flags */
5932cd0c529SMike Karels NULL, NULL, /* lockfunc, lockarg */
5942cd0c529SMike Karels &sc->tx_buf_tag);
5952cd0c529SMike Karels if (error != 0) {
5962cd0c529SMike Karels device_printf(dev, "cannot create TX buffer tag\n");
5972cd0c529SMike Karels return (error);
5982cd0c529SMike Karels }
5992cd0c529SMike Karels
6002cd0c529SMike Karels for (i = 0; i < TX_DESC_COUNT; i++) {
6012cd0c529SMike Karels error = bus_dmamap_create(sc->tx_buf_tag, 0,
6022cd0c529SMike Karels &sc->tx_ring_ent[i].map);
6032cd0c529SMike Karels if (error != 0) {
6042cd0c529SMike Karels device_printf(dev, "cannot create TX buffer map\n");
6052cd0c529SMike Karels return (error);
6062cd0c529SMike Karels }
6072cd0c529SMike Karels }
6082cd0c529SMike Karels
6092cd0c529SMike Karels error = bus_dma_tag_create(
6102cd0c529SMike Karels bus_get_dma_tag(dev), /* Parent tag */
6112cd0c529SMike Karels 4, 0, /* alignment, boundary */
6122cd0c529SMike Karels BUS_SPACE_MAXADDR_40BIT, /* lowaddr */
6132cd0c529SMike Karels BUS_SPACE_MAXADDR, /* highaddr */
6142cd0c529SMike Karels NULL, NULL, /* filter, filterarg */
6152cd0c529SMike Karels MCLBYTES, 1, /* maxsize, nsegs */
6162cd0c529SMike Karels MCLBYTES, /* maxsegsize */
6172cd0c529SMike Karels 0, /* flags */
6182cd0c529SMike Karels NULL, NULL, /* lockfunc, lockarg */
6192cd0c529SMike Karels &sc->rx_buf_tag);
6202cd0c529SMike Karels if (error != 0) {
6212cd0c529SMike Karels device_printf(dev, "cannot create RX buffer tag\n");
6222cd0c529SMike Karels return (error);
6232cd0c529SMike Karels }
6242cd0c529SMike Karels
6252cd0c529SMike Karels for (i = 0; i < RX_DESC_COUNT; i++) {
6262cd0c529SMike Karels error = bus_dmamap_create(sc->rx_buf_tag, 0,
6272cd0c529SMike Karels &sc->rx_ring_ent[i].map);
6282cd0c529SMike Karels if (error != 0) {
6292cd0c529SMike Karels device_printf(dev, "cannot create RX buffer map\n");
6302cd0c529SMike Karels return (error);
6312cd0c529SMike Karels }
6322cd0c529SMike Karels }
6332cd0c529SMike Karels return (0);
6342cd0c529SMike Karels }
6352cd0c529SMike Karels
6362cd0c529SMike Karels static void
gen_bus_dma_teardown(struct gen_softc * sc)6372cd0c529SMike Karels gen_bus_dma_teardown(struct gen_softc *sc)
6382cd0c529SMike Karels {
6392cd0c529SMike Karels int i, error;
6402cd0c529SMike Karels
6412cd0c529SMike Karels if (sc->tx_buf_tag != NULL) {
6422cd0c529SMike Karels for (i = 0; i < TX_DESC_COUNT; i++) {
6432cd0c529SMike Karels error = bus_dmamap_destroy(sc->tx_buf_tag,
6442cd0c529SMike Karels sc->tx_ring_ent[i].map);
6452cd0c529SMike Karels sc->tx_ring_ent[i].map = NULL;
6462cd0c529SMike Karels if (error)
6472cd0c529SMike Karels device_printf(sc->dev,
6482cd0c529SMike Karels "%s: bus_dmamap_destroy failed: %d\n",
6492cd0c529SMike Karels __func__, error);
6502cd0c529SMike Karels }
6512cd0c529SMike Karels error = bus_dma_tag_destroy(sc->tx_buf_tag);
6522cd0c529SMike Karels sc->tx_buf_tag = NULL;
6532cd0c529SMike Karels if (error)
6542cd0c529SMike Karels device_printf(sc->dev,
6552cd0c529SMike Karels "%s: bus_dma_tag_destroy failed: %d\n", __func__,
6562cd0c529SMike Karels error);
6572cd0c529SMike Karels }
6582cd0c529SMike Karels
6592cd0c529SMike Karels if (sc->tx_buf_tag != NULL) {
6602cd0c529SMike Karels for (i = 0; i < RX_DESC_COUNT; i++) {
6612cd0c529SMike Karels error = bus_dmamap_destroy(sc->rx_buf_tag,
6622cd0c529SMike Karels sc->rx_ring_ent[i].map);
6632cd0c529SMike Karels sc->rx_ring_ent[i].map = NULL;
6642cd0c529SMike Karels if (error)
6652cd0c529SMike Karels device_printf(sc->dev,
6662cd0c529SMike Karels "%s: bus_dmamap_destroy failed: %d\n",
6672cd0c529SMike Karels __func__, error);
6682cd0c529SMike Karels }
6692cd0c529SMike Karels error = bus_dma_tag_destroy(sc->rx_buf_tag);
6702cd0c529SMike Karels sc->rx_buf_tag = NULL;
6712cd0c529SMike Karels if (error)
6722cd0c529SMike Karels device_printf(sc->dev,
6732cd0c529SMike Karels "%s: bus_dma_tag_destroy failed: %d\n", __func__,
6742cd0c529SMike Karels error);
6752cd0c529SMike Karels }
6762cd0c529SMike Karels }
6772cd0c529SMike Karels
6782cd0c529SMike Karels static void
gen_enable_intr(struct gen_softc * sc)6792cd0c529SMike Karels gen_enable_intr(struct gen_softc *sc)
6802cd0c529SMike Karels {
6812cd0c529SMike Karels
6822cd0c529SMike Karels WR4(sc, GENET_INTRL2_CPU_CLEAR_MASK,
6832cd0c529SMike Karels GENET_IRQ_TXDMA_DONE | GENET_IRQ_RXDMA_DONE);
6842cd0c529SMike Karels }
6852cd0c529SMike Karels
6862cd0c529SMike Karels /*
6872cd0c529SMike Karels * "queue" is the software queue index (0-4); "qid" is the hardware index
6882cd0c529SMike Karels * (0-16). "base" is the starting index in the ring array.
6892cd0c529SMike Karels */
6902cd0c529SMike Karels static void
gen_init_txring(struct gen_softc * sc,int queue,int qid,int base,int nentries)6912cd0c529SMike Karels gen_init_txring(struct gen_softc *sc, int queue, int qid, int base,
6922cd0c529SMike Karels int nentries)
6932cd0c529SMike Karels {
6942cd0c529SMike Karels struct tx_queue *q;
6952cd0c529SMike Karels uint32_t val;
6962cd0c529SMike Karels
6972cd0c529SMike Karels q = &sc->tx_queue[queue];
6982cd0c529SMike Karels q->entries = &sc->tx_ring_ent[base];
6992cd0c529SMike Karels q->hwindex = qid;
7002cd0c529SMike Karels q->nentries = nentries;
7012cd0c529SMike Karels
7022cd0c529SMike Karels /* TX ring */
7032cd0c529SMike Karels
7042cd0c529SMike Karels q->queued = 0;
7052cd0c529SMike Karels q->cons_idx = q->prod_idx = 0;
7062cd0c529SMike Karels
7072cd0c529SMike Karels WR4(sc, GENET_TX_SCB_BURST_SIZE, 0x08);
7082cd0c529SMike Karels
7092cd0c529SMike Karels WR4(sc, GENET_TX_DMA_READ_PTR_LO(qid), 0);
7102cd0c529SMike Karels WR4(sc, GENET_TX_DMA_READ_PTR_HI(qid), 0);
7112cd0c529SMike Karels WR4(sc, GENET_TX_DMA_CONS_INDEX(qid), 0);
7122cd0c529SMike Karels WR4(sc, GENET_TX_DMA_PROD_INDEX(qid), 0);
7132cd0c529SMike Karels WR4(sc, GENET_TX_DMA_RING_BUF_SIZE(qid),
7142cd0c529SMike Karels (nentries << GENET_TX_DMA_RING_BUF_SIZE_DESC_SHIFT) |
7152cd0c529SMike Karels (MCLBYTES & GENET_TX_DMA_RING_BUF_SIZE_BUF_LEN_MASK));
7162cd0c529SMike Karels WR4(sc, GENET_TX_DMA_START_ADDR_LO(qid), 0);
7172cd0c529SMike Karels WR4(sc, GENET_TX_DMA_START_ADDR_HI(qid), 0);
7182cd0c529SMike Karels WR4(sc, GENET_TX_DMA_END_ADDR_LO(qid),
7192cd0c529SMike Karels TX_DESC_COUNT * GENET_DMA_DESC_SIZE / 4 - 1);
7202cd0c529SMike Karels WR4(sc, GENET_TX_DMA_END_ADDR_HI(qid), 0);
7212cd0c529SMike Karels WR4(sc, GENET_TX_DMA_MBUF_DONE_THRES(qid), 1);
7222cd0c529SMike Karels WR4(sc, GENET_TX_DMA_FLOW_PERIOD(qid), 0);
7232cd0c529SMike Karels WR4(sc, GENET_TX_DMA_WRITE_PTR_LO(qid), 0);
7242cd0c529SMike Karels WR4(sc, GENET_TX_DMA_WRITE_PTR_HI(qid), 0);
7252cd0c529SMike Karels
7262cd0c529SMike Karels WR4(sc, GENET_TX_DMA_RING_CFG, __BIT(qid)); /* enable */
7272cd0c529SMike Karels
7282cd0c529SMike Karels /* Enable transmit DMA */
7292cd0c529SMike Karels val = RD4(sc, GENET_TX_DMA_CTRL);
7302cd0c529SMike Karels val |= GENET_TX_DMA_CTRL_EN;
7312cd0c529SMike Karels val |= GENET_TX_DMA_CTRL_RBUF_EN(qid);
7322cd0c529SMike Karels WR4(sc, GENET_TX_DMA_CTRL, val);
7332cd0c529SMike Karels }
7342cd0c529SMike Karels
7352cd0c529SMike Karels /*
7362cd0c529SMike Karels * "queue" is the software queue index (0-4); "qid" is the hardware index
7372cd0c529SMike Karels * (0-16). "base" is the starting index in the ring array.
7382cd0c529SMike Karels */
7392cd0c529SMike Karels static void
gen_init_rxring(struct gen_softc * sc,int queue,int qid,int base,int nentries)7402cd0c529SMike Karels gen_init_rxring(struct gen_softc *sc, int queue, int qid, int base,
7412cd0c529SMike Karels int nentries)
7422cd0c529SMike Karels {
7432cd0c529SMike Karels struct rx_queue *q;
7442cd0c529SMike Karels uint32_t val;
7452cd0c529SMike Karels int i;
7462cd0c529SMike Karels
7472cd0c529SMike Karels q = &sc->rx_queue[queue];
7482cd0c529SMike Karels q->entries = &sc->rx_ring_ent[base];
7492cd0c529SMike Karels q->hwindex = qid;
7502cd0c529SMike Karels q->nentries = nentries;
7512cd0c529SMike Karels q->cons_idx = q->prod_idx = 0;
7522cd0c529SMike Karels
7532cd0c529SMike Karels WR4(sc, GENET_RX_SCB_BURST_SIZE, 0x08);
7542cd0c529SMike Karels
7552cd0c529SMike Karels WR4(sc, GENET_RX_DMA_WRITE_PTR_LO(qid), 0);
7562cd0c529SMike Karels WR4(sc, GENET_RX_DMA_WRITE_PTR_HI(qid), 0);
7572cd0c529SMike Karels WR4(sc, GENET_RX_DMA_PROD_INDEX(qid), 0);
7582cd0c529SMike Karels WR4(sc, GENET_RX_DMA_CONS_INDEX(qid), 0);
7592cd0c529SMike Karels WR4(sc, GENET_RX_DMA_RING_BUF_SIZE(qid),
7602cd0c529SMike Karels (nentries << GENET_RX_DMA_RING_BUF_SIZE_DESC_SHIFT) |
7612cd0c529SMike Karels (MCLBYTES & GENET_RX_DMA_RING_BUF_SIZE_BUF_LEN_MASK));
7622cd0c529SMike Karels WR4(sc, GENET_RX_DMA_START_ADDR_LO(qid), 0);
7632cd0c529SMike Karels WR4(sc, GENET_RX_DMA_START_ADDR_HI(qid), 0);
7642cd0c529SMike Karels WR4(sc, GENET_RX_DMA_END_ADDR_LO(qid),
7652cd0c529SMike Karels RX_DESC_COUNT * GENET_DMA_DESC_SIZE / 4 - 1);
7662cd0c529SMike Karels WR4(sc, GENET_RX_DMA_END_ADDR_HI(qid), 0);
7672cd0c529SMike Karels WR4(sc, GENET_RX_DMA_XON_XOFF_THRES(qid),
7682cd0c529SMike Karels (5 << GENET_RX_DMA_XON_XOFF_THRES_LO_SHIFT) | (RX_DESC_COUNT >> 4));
7692cd0c529SMike Karels WR4(sc, GENET_RX_DMA_READ_PTR_LO(qid), 0);
7702cd0c529SMike Karels WR4(sc, GENET_RX_DMA_READ_PTR_HI(qid), 0);
7712cd0c529SMike Karels
7722cd0c529SMike Karels WR4(sc, GENET_RX_DMA_RING_CFG, __BIT(qid)); /* enable */
7732cd0c529SMike Karels
7742cd0c529SMike Karels /* fill ring */
7752cd0c529SMike Karels for (i = 0; i < RX_DESC_COUNT; i++)
7762cd0c529SMike Karels gen_newbuf_rx(sc, &sc->rx_queue[DEF_RXQUEUE], i);
7772cd0c529SMike Karels
7782cd0c529SMike Karels /* Enable receive DMA */
7792cd0c529SMike Karels val = RD4(sc, GENET_RX_DMA_CTRL);
7802cd0c529SMike Karels val |= GENET_RX_DMA_CTRL_EN;
7812cd0c529SMike Karels val |= GENET_RX_DMA_CTRL_RBUF_EN(qid);
7822cd0c529SMike Karels WR4(sc, GENET_RX_DMA_CTRL, val);
7832cd0c529SMike Karels }
7842cd0c529SMike Karels
7852cd0c529SMike Karels static void
gen_init_txrings(struct gen_softc * sc)7862cd0c529SMike Karels gen_init_txrings(struct gen_softc *sc)
7872cd0c529SMike Karels {
7882cd0c529SMike Karels int base = 0;
7892cd0c529SMike Karels #ifdef PRI_RINGS
7902cd0c529SMike Karels int i;
7912cd0c529SMike Karels
7922cd0c529SMike Karels /* init priority rings */
7932cd0c529SMike Karels for (i = 0; i < PRI_RINGS; i++) {
7942cd0c529SMike Karels gen_init_txring(sc, i, i, base, TX_DESC_PRICOUNT);
7952cd0c529SMike Karels sc->tx_queue[i].queue = i;
7962cd0c529SMike Karels base += TX_DESC_PRICOUNT;
7972cd0c529SMike Karels dma_ring_conf |= 1 << i;
7982cd0c529SMike Karels dma_control |= DMA_RENABLE(i);
7992cd0c529SMike Karels }
8002cd0c529SMike Karels #endif
8012cd0c529SMike Karels
8022cd0c529SMike Karels /* init GENET_DMA_DEFAULT_QUEUE (16) */
8032cd0c529SMike Karels gen_init_txring(sc, DEF_TXQUEUE, GENET_DMA_DEFAULT_QUEUE, base,
8042cd0c529SMike Karels TX_DESC_COUNT);
8052cd0c529SMike Karels sc->tx_queue[DEF_TXQUEUE].hwindex = GENET_DMA_DEFAULT_QUEUE;
8062cd0c529SMike Karels }
8072cd0c529SMike Karels
8082cd0c529SMike Karels static void
gen_init_rxrings(struct gen_softc * sc)8092cd0c529SMike Karels gen_init_rxrings(struct gen_softc *sc)
8102cd0c529SMike Karels {
8112cd0c529SMike Karels int base = 0;
8122cd0c529SMike Karels #ifdef PRI_RINGS
8132cd0c529SMike Karels int i;
8142cd0c529SMike Karels
8152cd0c529SMike Karels /* init priority rings */
8162cd0c529SMike Karels for (i = 0; i < PRI_RINGS; i++) {
8172cd0c529SMike Karels gen_init_rxring(sc, i, i, base, TX_DESC_PRICOUNT);
8182cd0c529SMike Karels sc->rx_queue[i].queue = i;
8192cd0c529SMike Karels base += TX_DESC_PRICOUNT;
8202cd0c529SMike Karels dma_ring_conf |= 1 << i;
8212cd0c529SMike Karels dma_control |= DMA_RENABLE(i);
8222cd0c529SMike Karels }
8232cd0c529SMike Karels #endif
8242cd0c529SMike Karels
8252cd0c529SMike Karels /* init GENET_DMA_DEFAULT_QUEUE (16) */
8262cd0c529SMike Karels gen_init_rxring(sc, DEF_RXQUEUE, GENET_DMA_DEFAULT_QUEUE, base,
8272cd0c529SMike Karels RX_DESC_COUNT);
8282cd0c529SMike Karels sc->rx_queue[DEF_RXQUEUE].hwindex = GENET_DMA_DEFAULT_QUEUE;
8292cd0c529SMike Karels
8302cd0c529SMike Karels }
8312cd0c529SMike Karels
8322cd0c529SMike Karels static void
gen_stop(struct gen_softc * sc)8338f45652bSMike Karels gen_stop(struct gen_softc *sc)
8348f45652bSMike Karels {
8358f45652bSMike Karels int i;
8368f45652bSMike Karels struct gen_ring_ent *ent;
8378f45652bSMike Karels
8388f45652bSMike Karels GEN_ASSERT_LOCKED(sc);
8398f45652bSMike Karels
8408f45652bSMike Karels callout_stop(&sc->stat_ch);
8418f45652bSMike Karels if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING);
8428f45652bSMike Karels gen_reset(sc);
8438f45652bSMike Karels gen_disable(sc);
8448f45652bSMike Karels gen_dma_disable(sc);
8458f45652bSMike Karels
8468f45652bSMike Karels /* Clear the tx/rx ring buffer */
8478f45652bSMike Karels for (i = 0; i < TX_DESC_COUNT; i++) {
8488f45652bSMike Karels ent = &sc->tx_ring_ent[i];
8498f45652bSMike Karels if (ent->mbuf != NULL) {
8508f45652bSMike Karels bus_dmamap_sync(sc->tx_buf_tag, ent->map,
8518f45652bSMike Karels BUS_DMASYNC_POSTWRITE);
8528f45652bSMike Karels bus_dmamap_unload(sc->tx_buf_tag, ent->map);
8538f45652bSMike Karels m_freem(ent->mbuf);
8548f45652bSMike Karels ent->mbuf = NULL;
8558f45652bSMike Karels }
8568f45652bSMike Karels }
8578f45652bSMike Karels
8588f45652bSMike Karels for (i = 0; i < RX_DESC_COUNT; i++) {
8598f45652bSMike Karels ent = &sc->rx_ring_ent[i];
8608f45652bSMike Karels if (ent->mbuf != NULL) {
8618f45652bSMike Karels bus_dmamap_sync(sc->rx_buf_tag, ent->map,
8628f45652bSMike Karels BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
8638f45652bSMike Karels bus_dmamap_unload(sc->rx_buf_tag, ent->map);
8648f45652bSMike Karels m_freem(ent->mbuf);
8658f45652bSMike Karels ent->mbuf = NULL;
8668f45652bSMike Karels }
8678f45652bSMike Karels }
8688f45652bSMike Karels }
8698f45652bSMike Karels
8708f45652bSMike Karels static void
gen_init_locked(struct gen_softc * sc)8712cd0c529SMike Karels gen_init_locked(struct gen_softc *sc)
8722cd0c529SMike Karels {
8732cd0c529SMike Karels struct mii_data *mii;
8742cd0c529SMike Karels if_t ifp;
8752cd0c529SMike Karels
8762cd0c529SMike Karels mii = device_get_softc(sc->miibus);
8772cd0c529SMike Karels ifp = sc->ifp;
8782cd0c529SMike Karels
8792cd0c529SMike Karels GEN_ASSERT_LOCKED(sc);
8802cd0c529SMike Karels
8812cd0c529SMike Karels if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
8822cd0c529SMike Karels return;
8832cd0c529SMike Karels
884349eddbdSMike Karels switch (sc->phy_mode)
885349eddbdSMike Karels {
886349eddbdSMike Karels case MII_CONTYPE_RGMII:
887349eddbdSMike Karels case MII_CONTYPE_RGMII_ID:
888349eddbdSMike Karels case MII_CONTYPE_RGMII_RXID:
889349eddbdSMike Karels case MII_CONTYPE_RGMII_TXID:
890349eddbdSMike Karels WR4(sc, GENET_SYS_PORT_CTRL, GENET_SYS_PORT_MODE_EXT_GPHY);
891349eddbdSMike Karels break;
892349eddbdSMike Karels default:
893349eddbdSMike Karels WR4(sc, GENET_SYS_PORT_CTRL, 0);
894349eddbdSMike Karels }
8952cd0c529SMike Karels
8962cd0c529SMike Karels gen_set_enaddr(sc);
8972cd0c529SMike Karels
8982cd0c529SMike Karels /* Setup RX filter */
8992cd0c529SMike Karels gen_setup_rxfilter(sc);
9002cd0c529SMike Karels
9012cd0c529SMike Karels gen_init_txrings(sc);
9022cd0c529SMike Karels gen_init_rxrings(sc);
9032cd0c529SMike Karels gen_enable(sc);
9042cd0c529SMike Karels gen_enable_offload(sc);
9052cd0c529SMike Karels
9062cd0c529SMike Karels if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
9072cd0c529SMike Karels
9082cd0c529SMike Karels mii_mediachg(mii);
9092cd0c529SMike Karels callout_reset(&sc->stat_ch, hz, gen_tick, sc);
9102cd0c529SMike Karels }
9112cd0c529SMike Karels
9122cd0c529SMike Karels static void
gen_init(void * softc)9132cd0c529SMike Karels gen_init(void *softc)
9142cd0c529SMike Karels {
9152cd0c529SMike Karels struct gen_softc *sc;
9162cd0c529SMike Karels
9172cd0c529SMike Karels sc = softc;
9182cd0c529SMike Karels GEN_LOCK(sc);
9192cd0c529SMike Karels gen_init_locked(sc);
9202cd0c529SMike Karels GEN_UNLOCK(sc);
9212cd0c529SMike Karels }
9222cd0c529SMike Karels
9232cd0c529SMike Karels static uint8_t ether_broadcastaddr[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
9242cd0c529SMike Karels
9252cd0c529SMike Karels static void
gen_setup_rxfilter_mdf(struct gen_softc * sc,u_int n,const uint8_t * ea)9262cd0c529SMike Karels gen_setup_rxfilter_mdf(struct gen_softc *sc, u_int n, const uint8_t *ea)
9272cd0c529SMike Karels {
9282cd0c529SMike Karels uint32_t addr0 = (ea[0] << 8) | ea[1];
9292cd0c529SMike Karels uint32_t addr1 = (ea[2] << 24) | (ea[3] << 16) | (ea[4] << 8) | ea[5];
9302cd0c529SMike Karels
9312cd0c529SMike Karels WR4(sc, GENET_UMAC_MDF_ADDR0(n), addr0);
9322cd0c529SMike Karels WR4(sc, GENET_UMAC_MDF_ADDR1(n), addr1);
9332cd0c529SMike Karels }
9342cd0c529SMike Karels
9352cd0c529SMike Karels static u_int
gen_setup_multi(void * arg,struct sockaddr_dl * sdl,u_int count)9362cd0c529SMike Karels gen_setup_multi(void *arg, struct sockaddr_dl *sdl, u_int count)
9372cd0c529SMike Karels {
9382cd0c529SMike Karels struct gen_softc *sc = arg;
9392cd0c529SMike Karels
9402cd0c529SMike Karels /* "count + 2" to account for unicast and broadcast */
9412cd0c529SMike Karels gen_setup_rxfilter_mdf(sc, count + 2, LLADDR(sdl));
9422cd0c529SMike Karels return (1); /* increment to count */
9432cd0c529SMike Karels }
9442cd0c529SMike Karels
9452cd0c529SMike Karels static void
gen_setup_rxfilter(struct gen_softc * sc)9462cd0c529SMike Karels gen_setup_rxfilter(struct gen_softc *sc)
9472cd0c529SMike Karels {
948b7459fb0SJustin Hibbits if_t ifp = sc->ifp;
9492cd0c529SMike Karels uint32_t cmd, mdf_ctrl;
9502cd0c529SMike Karels u_int n;
9512cd0c529SMike Karels
9522cd0c529SMike Karels GEN_ASSERT_LOCKED(sc);
9532cd0c529SMike Karels
9542cd0c529SMike Karels cmd = RD4(sc, GENET_UMAC_CMD);
9552cd0c529SMike Karels
9562cd0c529SMike Karels /*
9572cd0c529SMike Karels * Count the required number of hardware filters. We need one
9582cd0c529SMike Karels * for each multicast address, plus one for our own address and
9592cd0c529SMike Karels * the broadcast address.
9602cd0c529SMike Karels */
9612cd0c529SMike Karels n = if_llmaddr_count(ifp) + 2;
9622cd0c529SMike Karels
9632cd0c529SMike Karels if (n > GENET_MAX_MDF_FILTER)
964b7459fb0SJustin Hibbits if_setflagbits(ifp, IFF_ALLMULTI, 0);
9652cd0c529SMike Karels else
966b7459fb0SJustin Hibbits if_setflagbits(ifp, 0, IFF_ALLMULTI);
9672cd0c529SMike Karels
968b7459fb0SJustin Hibbits if ((if_getflags(ifp) & (IFF_PROMISC|IFF_ALLMULTI)) != 0) {
9692cd0c529SMike Karels cmd |= GENET_UMAC_CMD_PROMISC;
9702cd0c529SMike Karels mdf_ctrl = 0;
9712cd0c529SMike Karels } else {
9722cd0c529SMike Karels cmd &= ~GENET_UMAC_CMD_PROMISC;
9732cd0c529SMike Karels gen_setup_rxfilter_mdf(sc, 0, ether_broadcastaddr);
974b7459fb0SJustin Hibbits gen_setup_rxfilter_mdf(sc, 1, if_getlladdr(ifp));
9752cd0c529SMike Karels (void) if_foreach_llmaddr(ifp, gen_setup_multi, sc);
9762cd0c529SMike Karels mdf_ctrl = (__BIT(GENET_MAX_MDF_FILTER) - 1) &~
9772cd0c529SMike Karels (__BIT(GENET_MAX_MDF_FILTER - n) - 1);
9782cd0c529SMike Karels }
9792cd0c529SMike Karels
9802cd0c529SMike Karels WR4(sc, GENET_UMAC_CMD, cmd);
9812cd0c529SMike Karels WR4(sc, GENET_UMAC_MDF_CTRL, mdf_ctrl);
9822cd0c529SMike Karels }
9832cd0c529SMike Karels
9842cd0c529SMike Karels static void
gen_set_enaddr(struct gen_softc * sc)9852cd0c529SMike Karels gen_set_enaddr(struct gen_softc *sc)
9862cd0c529SMike Karels {
9872cd0c529SMike Karels uint8_t *enaddr;
9882cd0c529SMike Karels uint32_t val;
9892cd0c529SMike Karels if_t ifp;
9902cd0c529SMike Karels
9912cd0c529SMike Karels GEN_ASSERT_LOCKED(sc);
9922cd0c529SMike Karels
9932cd0c529SMike Karels ifp = sc->ifp;
9942cd0c529SMike Karels
9952cd0c529SMike Karels /* Write our unicast address */
996b7459fb0SJustin Hibbits enaddr = if_getlladdr(ifp);
9972cd0c529SMike Karels /* Write hardware address */
9982cd0c529SMike Karels val = enaddr[3] | (enaddr[2] << 8) | (enaddr[1] << 16) |
9992cd0c529SMike Karels (enaddr[0] << 24);
10002cd0c529SMike Karels WR4(sc, GENET_UMAC_MAC0, val);
10012cd0c529SMike Karels val = enaddr[5] | (enaddr[4] << 8);
10022cd0c529SMike Karels WR4(sc, GENET_UMAC_MAC1, val);
10032cd0c529SMike Karels }
10042cd0c529SMike Karels
10052cd0c529SMike Karels static void
gen_start_locked(struct gen_softc * sc)10062cd0c529SMike Karels gen_start_locked(struct gen_softc *sc)
10072cd0c529SMike Karels {
10082cd0c529SMike Karels struct mbuf *m;
10092cd0c529SMike Karels if_t ifp;
1010b66e766dSWarner Losh int err;
10112cd0c529SMike Karels
10122cd0c529SMike Karels GEN_ASSERT_LOCKED(sc);
10132cd0c529SMike Karels
10142cd0c529SMike Karels if (!sc->link)
10152cd0c529SMike Karels return;
10162cd0c529SMike Karels
10172cd0c529SMike Karels ifp = sc->ifp;
10182cd0c529SMike Karels
10192cd0c529SMike Karels if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
10202cd0c529SMike Karels IFF_DRV_RUNNING)
10212cd0c529SMike Karels return;
10222cd0c529SMike Karels
1023b66e766dSWarner Losh while (true) {
10242cd0c529SMike Karels m = if_dequeue(ifp);
10252cd0c529SMike Karels if (m == NULL)
10262cd0c529SMike Karels break;
10272cd0c529SMike Karels
10282cd0c529SMike Karels err = gen_encap(sc, &m);
10292cd0c529SMike Karels if (err != 0) {
10302cd0c529SMike Karels if (err == ENOBUFS)
10312cd0c529SMike Karels if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
10322bdcf623SMike Karels else if (m == NULL)
10332bdcf623SMike Karels if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
10342cd0c529SMike Karels if (m != NULL)
10352cd0c529SMike Karels if_sendq_prepend(ifp, m);
10362cd0c529SMike Karels break;
10372cd0c529SMike Karels }
10382a371643SJustin Hibbits bpf_mtap_if(ifp, m);
10392cd0c529SMike Karels }
10402cd0c529SMike Karels }
10412cd0c529SMike Karels
10422cd0c529SMike Karels static void
gen_start(if_t ifp)10432cd0c529SMike Karels gen_start(if_t ifp)
10442cd0c529SMike Karels {
10452cd0c529SMike Karels struct gen_softc *sc;
10462cd0c529SMike Karels
10472cd0c529SMike Karels sc = if_getsoftc(ifp);
10482cd0c529SMike Karels
10492cd0c529SMike Karels GEN_LOCK(sc);
10502cd0c529SMike Karels gen_start_locked(sc);
10512cd0c529SMike Karels GEN_UNLOCK(sc);
10522cd0c529SMike Karels }
10532cd0c529SMike Karels
10540add2a52SMike Karels /* Test for any delayed checksum */
10550add2a52SMike Karels #define CSUM_DELAY_ANY (CSUM_TCP | CSUM_UDP | CSUM_IP6_TCP | CSUM_IP6_UDP)
10560add2a52SMike Karels
10572cd0c529SMike Karels static int
gen_encap(struct gen_softc * sc,struct mbuf ** mp)10582cd0c529SMike Karels gen_encap(struct gen_softc *sc, struct mbuf **mp)
10592cd0c529SMike Karels {
10602cd0c529SMike Karels bus_dmamap_t map;
10612cd0c529SMike Karels bus_dma_segment_t segs[TX_MAX_SEGS];
10622cd0c529SMike Karels int error, nsegs, cur, first, i, index, offset;
10632cd0c529SMike Karels uint32_t csuminfo, length_status, csum_flags = 0, csumdata;
10642cd0c529SMike Karels struct mbuf *m;
10652cd0c529SMike Karels struct statusblock *sb = NULL;
10662cd0c529SMike Karels struct tx_queue *q;
10672cd0c529SMike Karels struct gen_ring_ent *ent;
10682cd0c529SMike Karels
10692cd0c529SMike Karels GEN_ASSERT_LOCKED(sc);
10702cd0c529SMike Karels
10712cd0c529SMike Karels q = &sc->tx_queue[DEF_TXQUEUE];
1072*a35f6651SMitchell Horne if (q->queued == q->nentries) {
1073*a35f6651SMitchell Horne /* tx_queue is full */
1074*a35f6651SMitchell Horne return (ENOBUFS);
1075*a35f6651SMitchell Horne }
10762cd0c529SMike Karels
10772cd0c529SMike Karels m = *mp;
10782bdcf623SMike Karels
107951cefda1SMike Karels /*
1080184291b0SMike Karels * Don't attempt to send packets with only an Ethernet header in
1081184291b0SMike Karels * first mbuf; see comment above with gen_tx_hdr_min.
108251cefda1SMike Karels */
108351cefda1SMike Karels if (m->m_len == sizeof(struct ether_header)) {
1084184291b0SMike Karels m = m_pullup(m, MIN(m->m_pkthdr.len, gen_tx_hdr_min));
108551cefda1SMike Karels if (m == NULL) {
1086b7459fb0SJustin Hibbits if (if_getflags(sc->ifp) & IFF_DEBUG)
108751cefda1SMike Karels device_printf(sc->dev,
1088184291b0SMike Karels "header pullup fail\n");
108951cefda1SMike Karels *mp = NULL;
109051cefda1SMike Karels return (ENOMEM);
109151cefda1SMike Karels }
109251cefda1SMike Karels }
10932bdcf623SMike Karels
10942cd0c529SMike Karels if ((if_getcapenable(sc->ifp) & (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6)) !=
10952cd0c529SMike Karels 0) {
10962cd0c529SMike Karels csum_flags = m->m_pkthdr.csum_flags;
10972cd0c529SMike Karels csumdata = m->m_pkthdr.csum_data;
10982cd0c529SMike Karels M_PREPEND(m, sizeof(struct statusblock), M_NOWAIT);
10992cd0c529SMike Karels if (m == NULL) {
1100b7459fb0SJustin Hibbits if (if_getflags(sc->ifp) & IFF_DEBUG)
11012cd0c529SMike Karels device_printf(sc->dev, "prepend fail\n");
11022cd0c529SMike Karels *mp = NULL;
11032cd0c529SMike Karels return (ENOMEM);
11042cd0c529SMike Karels }
11052cd0c529SMike Karels offset = gen_parse_tx(m, csum_flags);
11062cd0c529SMike Karels sb = mtod(m, struct statusblock *);
11070add2a52SMike Karels if ((csum_flags & CSUM_DELAY_ANY) != 0) {
11082cd0c529SMike Karels csuminfo = (offset << TXCSUM_OFF_SHIFT) |
11092cd0c529SMike Karels (offset + csumdata);
11102cd0c529SMike Karels csuminfo |= TXCSUM_LEN_VALID;
11110add2a52SMike Karels if (csum_flags & (CSUM_UDP | CSUM_IP6_UDP))
11122cd0c529SMike Karels csuminfo |= TXCSUM_UDP;
11132cd0c529SMike Karels sb->txcsuminfo = csuminfo;
11142cd0c529SMike Karels } else
11152cd0c529SMike Karels sb->txcsuminfo = 0;
11162cd0c529SMike Karels }
11172cd0c529SMike Karels
11182cd0c529SMike Karels *mp = m;
11192cd0c529SMike Karels
11202cd0c529SMike Karels cur = first = q->cur;
11212cd0c529SMike Karels ent = &q->entries[cur];
11222cd0c529SMike Karels map = ent->map;
11232cd0c529SMike Karels error = bus_dmamap_load_mbuf_sg(sc->tx_buf_tag, map, m, segs,
11242cd0c529SMike Karels &nsegs, BUS_DMA_NOWAIT);
11252cd0c529SMike Karels if (error == EFBIG) {
11262cd0c529SMike Karels m = m_collapse(m, M_NOWAIT, TX_MAX_SEGS);
11272cd0c529SMike Karels if (m == NULL) {
11282cd0c529SMike Karels device_printf(sc->dev,
11292cd0c529SMike Karels "gen_encap: m_collapse failed\n");
11302cd0c529SMike Karels m_freem(*mp);
11312cd0c529SMike Karels *mp = NULL;
11322cd0c529SMike Karels return (ENOMEM);
11332cd0c529SMike Karels }
11342cd0c529SMike Karels *mp = m;
11352cd0c529SMike Karels error = bus_dmamap_load_mbuf_sg(sc->tx_buf_tag, map, m,
11362cd0c529SMike Karels segs, &nsegs, BUS_DMA_NOWAIT);
11372cd0c529SMike Karels if (error != 0) {
11382cd0c529SMike Karels m_freem(*mp);
11392cd0c529SMike Karels *mp = NULL;
11402cd0c529SMike Karels }
11412cd0c529SMike Karels }
11422cd0c529SMike Karels if (error != 0) {
11432cd0c529SMike Karels device_printf(sc->dev,
11442cd0c529SMike Karels "gen_encap: bus_dmamap_load_mbuf_sg failed\n");
11452cd0c529SMike Karels return (error);
11462cd0c529SMike Karels }
11472cd0c529SMike Karels if (nsegs == 0) {
11482cd0c529SMike Karels m_freem(*mp);
11492cd0c529SMike Karels *mp = NULL;
11502cd0c529SMike Karels return (EIO);
11512cd0c529SMike Karels }
11522cd0c529SMike Karels
11532cd0c529SMike Karels /* Remove statusblock after mapping, before possible requeue or bpf. */
11542cd0c529SMike Karels if (sb != NULL) {
11552cd0c529SMike Karels m->m_data += sizeof(struct statusblock);
11562cd0c529SMike Karels m->m_len -= sizeof(struct statusblock);
11572cd0c529SMike Karels m->m_pkthdr.len -= sizeof(struct statusblock);
11582cd0c529SMike Karels }
11592cd0c529SMike Karels if (q->queued + nsegs > q->nentries) {
11602cd0c529SMike Karels bus_dmamap_unload(sc->tx_buf_tag, map);
11612cd0c529SMike Karels return (ENOBUFS);
11622cd0c529SMike Karels }
11632cd0c529SMike Karels
11642cd0c529SMike Karels bus_dmamap_sync(sc->tx_buf_tag, map, BUS_DMASYNC_PREWRITE);
11652cd0c529SMike Karels
11662cd0c529SMike Karels index = q->prod_idx & (q->nentries - 1);
11672cd0c529SMike Karels for (i = 0; i < nsegs; i++) {
11682cd0c529SMike Karels ent = &q->entries[cur];
11692cd0c529SMike Karels length_status = GENET_TX_DESC_STATUS_QTAG_MASK;
11702cd0c529SMike Karels if (i == 0) {
11712cd0c529SMike Karels length_status |= GENET_TX_DESC_STATUS_SOP |
11722cd0c529SMike Karels GENET_TX_DESC_STATUS_CRC;
11730add2a52SMike Karels if ((csum_flags & CSUM_DELAY_ANY) != 0)
11742cd0c529SMike Karels length_status |= GENET_TX_DESC_STATUS_CKSUM;
11752cd0c529SMike Karels }
11762cd0c529SMike Karels if (i == nsegs - 1)
11772cd0c529SMike Karels length_status |= GENET_TX_DESC_STATUS_EOP;
11782cd0c529SMike Karels
11792cd0c529SMike Karels length_status |= segs[i].ds_len <<
11802cd0c529SMike Karels GENET_TX_DESC_STATUS_BUFLEN_SHIFT;
11812cd0c529SMike Karels
11822cd0c529SMike Karels WR4(sc, GENET_TX_DESC_ADDRESS_LO(index),
11832cd0c529SMike Karels (uint32_t)segs[i].ds_addr);
11842cd0c529SMike Karels WR4(sc, GENET_TX_DESC_ADDRESS_HI(index),
11852cd0c529SMike Karels (uint32_t)(segs[i].ds_addr >> 32));
11862cd0c529SMike Karels WR4(sc, GENET_TX_DESC_STATUS(index), length_status);
11872cd0c529SMike Karels
11882cd0c529SMike Karels ++q->queued;
11892cd0c529SMike Karels cur = TX_NEXT(cur, q->nentries);
11902cd0c529SMike Karels index = TX_NEXT(index, q->nentries);
11912cd0c529SMike Karels }
11922cd0c529SMike Karels
11932cd0c529SMike Karels q->prod_idx += nsegs;
11942cd0c529SMike Karels q->prod_idx &= GENET_TX_DMA_PROD_CONS_MASK;
11952cd0c529SMike Karels /* We probably don't need to write the producer index on every iter */
11962cd0c529SMike Karels if (nsegs != 0)
11972cd0c529SMike Karels WR4(sc, GENET_TX_DMA_PROD_INDEX(q->hwindex), q->prod_idx);
11982cd0c529SMike Karels q->cur = cur;
11992cd0c529SMike Karels
12002cd0c529SMike Karels /* Store mbuf in the last segment */
12012cd0c529SMike Karels q->entries[first].mbuf = m;
12022cd0c529SMike Karels
12032cd0c529SMike Karels return (0);
12042cd0c529SMike Karels }
12052cd0c529SMike Karels
12062cd0c529SMike Karels /*
12072cd0c529SMike Karels * Parse a packet to find the offset of the transport header for checksum
12082cd0c529SMike Karels * offload. Ensure that the link and network headers are contiguous with
12092cd0c529SMike Karels * the status block, or transmission fails.
12102cd0c529SMike Karels */
12112cd0c529SMike Karels static int
gen_parse_tx(struct mbuf * m,int csum_flags)12122cd0c529SMike Karels gen_parse_tx(struct mbuf *m, int csum_flags)
12132cd0c529SMike Karels {
12142cd0c529SMike Karels int offset, off_in_m;
12150add2a52SMike Karels bool copy = false, shift = false;
12162cd0c529SMike Karels u_char *p, *copy_p = NULL;
12172cd0c529SMike Karels struct mbuf *m0 = m;
12182cd0c529SMike Karels uint16_t ether_type;
12192cd0c529SMike Karels
12202cd0c529SMike Karels if (m->m_len == sizeof(struct statusblock)) {
12212cd0c529SMike Karels /* M_PREPEND placed statusblock at end; move to beginning */
12222cd0c529SMike Karels m->m_data = m->m_pktdat;
12232cd0c529SMike Karels copy_p = mtodo(m, sizeof(struct statusblock));
12242cd0c529SMike Karels m = m->m_next;
12252cd0c529SMike Karels off_in_m = 0;
12262cd0c529SMike Karels p = mtod(m, u_char *);
12270add2a52SMike Karels copy = true;
12282cd0c529SMike Karels } else {
12290add2a52SMike Karels /*
12300add2a52SMike Karels * If statusblock is not at beginning of mbuf (likely),
12310add2a52SMike Karels * then remember to move mbuf contents down before copying
12320add2a52SMike Karels * after them.
12330add2a52SMike Karels */
12340add2a52SMike Karels if ((m->m_flags & M_EXT) == 0 && m->m_data != m->m_pktdat)
12350add2a52SMike Karels shift = true;
12362cd0c529SMike Karels p = mtodo(m, sizeof(struct statusblock));
12372cd0c529SMike Karels off_in_m = sizeof(struct statusblock);
12382cd0c529SMike Karels }
12392cd0c529SMike Karels
12400add2a52SMike Karels /*
12410add2a52SMike Karels * If headers need to be copied contiguous to statusblock, do so.
12420add2a52SMike Karels * If copying to the internal mbuf data area, and the status block
12430add2a52SMike Karels * is not at the beginning of that area, shift the status block (which
12440add2a52SMike Karels * is empty) and following data.
12450add2a52SMike Karels */
12462cd0c529SMike Karels #define COPY(size) { \
12472cd0c529SMike Karels int hsize = size; \
12480add2a52SMike Karels if (copy) { \
12490add2a52SMike Karels if (shift) { \
12500add2a52SMike Karels u_char *p0; \
12510add2a52SMike Karels shift = false; \
12520add2a52SMike Karels p0 = mtodo(m0, sizeof(struct statusblock)); \
12530add2a52SMike Karels m0->m_data = m0->m_pktdat; \
12540add2a52SMike Karels bcopy(p0, mtodo(m0, sizeof(struct statusblock)),\
12550add2a52SMike Karels m0->m_len - sizeof(struct statusblock)); \
12561de9aa4dSMike Karels copy_p = mtodo(m0, m0->m_len); \
12570add2a52SMike Karels } \
12582cd0c529SMike Karels bcopy(p, copy_p, hsize); \
12592cd0c529SMike Karels m0->m_len += hsize; \
12602cd0c529SMike Karels m->m_len -= hsize; \
12612cd0c529SMike Karels m->m_data += hsize; \
12622cd0c529SMike Karels } \
12630add2a52SMike Karels copy_p += hsize; \
12642cd0c529SMike Karels }
12652cd0c529SMike Karels
12662cd0c529SMike Karels KASSERT((sizeof(struct statusblock) + sizeof(struct ether_vlan_header) +
12672cd0c529SMike Karels sizeof(struct ip6_hdr) <= MLEN), ("%s: mbuf too small", __func__));
12682cd0c529SMike Karels
12692cd0c529SMike Karels if (((struct ether_header *)p)->ether_type == htons(ETHERTYPE_VLAN)) {
12702cd0c529SMike Karels offset = sizeof(struct ether_vlan_header);
12712cd0c529SMike Karels ether_type = ntohs(((struct ether_vlan_header *)p)->evl_proto);
12722cd0c529SMike Karels COPY(sizeof(struct ether_vlan_header));
12732cd0c529SMike Karels if (m->m_len == off_in_m + sizeof(struct ether_vlan_header)) {
12742cd0c529SMike Karels m = m->m_next;
12752cd0c529SMike Karels off_in_m = 0;
12762cd0c529SMike Karels p = mtod(m, u_char *);
12770add2a52SMike Karels copy = true;
12782cd0c529SMike Karels } else {
12792cd0c529SMike Karels off_in_m += sizeof(struct ether_vlan_header);
12802cd0c529SMike Karels p += sizeof(struct ether_vlan_header);
12812cd0c529SMike Karels }
12822cd0c529SMike Karels } else {
12832cd0c529SMike Karels offset = sizeof(struct ether_header);
12842cd0c529SMike Karels ether_type = ntohs(((struct ether_header *)p)->ether_type);
12852cd0c529SMike Karels COPY(sizeof(struct ether_header));
12862cd0c529SMike Karels if (m->m_len == off_in_m + sizeof(struct ether_header)) {
12872cd0c529SMike Karels m = m->m_next;
12882cd0c529SMike Karels off_in_m = 0;
12892cd0c529SMike Karels p = mtod(m, u_char *);
12900add2a52SMike Karels copy = true;
12912cd0c529SMike Karels } else {
12922cd0c529SMike Karels off_in_m += sizeof(struct ether_header);
12932cd0c529SMike Karels p += sizeof(struct ether_header);
12942cd0c529SMike Karels }
12952cd0c529SMike Karels }
12962cd0c529SMike Karels if (ether_type == ETHERTYPE_IP) {
12972cd0c529SMike Karels COPY(((struct ip *)p)->ip_hl << 2);
12982cd0c529SMike Karels offset += ((struct ip *)p)->ip_hl << 2;
12992cd0c529SMike Karels } else if (ether_type == ETHERTYPE_IPV6) {
13002cd0c529SMike Karels COPY(sizeof(struct ip6_hdr));
13012cd0c529SMike Karels offset += sizeof(struct ip6_hdr);
13022cd0c529SMike Karels } else {
13032cd0c529SMike Karels /*
1304234c1463SMike Karels * Unknown whether most other cases require moving a header;
1305234c1463SMike Karels * ARP works without. However, Wake On LAN packets sent
1306234c1463SMike Karels * by wake(8) via BPF need something like this.
13072cd0c529SMike Karels */
1308234c1463SMike Karels COPY(MIN(gen_tx_hdr_min, m->m_len));
1309234c1463SMike Karels offset += MIN(gen_tx_hdr_min, m->m_len);
13102cd0c529SMike Karels }
13112cd0c529SMike Karels return (offset);
13122cd0c529SMike Karels #undef COPY
13132cd0c529SMike Karels }
13142cd0c529SMike Karels
13152cd0c529SMike Karels static void
gen_intr(void * arg)13162cd0c529SMike Karels gen_intr(void *arg)
13172cd0c529SMike Karels {
13182cd0c529SMike Karels struct gen_softc *sc = arg;
13192cd0c529SMike Karels uint32_t val;
13202cd0c529SMike Karels
13212cd0c529SMike Karels GEN_LOCK(sc);
13222cd0c529SMike Karels
13232cd0c529SMike Karels val = RD4(sc, GENET_INTRL2_CPU_STAT);
13242cd0c529SMike Karels val &= ~RD4(sc, GENET_INTRL2_CPU_STAT_MASK);
13252cd0c529SMike Karels WR4(sc, GENET_INTRL2_CPU_CLEAR, val);
13262cd0c529SMike Karels
13272cd0c529SMike Karels if (val & GENET_IRQ_RXDMA_DONE)
13282cd0c529SMike Karels gen_rxintr(sc, &sc->rx_queue[DEF_RXQUEUE]);
13292cd0c529SMike Karels
13302cd0c529SMike Karels if (val & GENET_IRQ_TXDMA_DONE) {
13312cd0c529SMike Karels gen_txintr(sc, &sc->tx_queue[DEF_TXQUEUE]);
13322cd0c529SMike Karels if (!if_sendq_empty(sc->ifp))
13332cd0c529SMike Karels gen_start_locked(sc);
13342cd0c529SMike Karels }
13352cd0c529SMike Karels
13362cd0c529SMike Karels GEN_UNLOCK(sc);
13372cd0c529SMike Karels }
13382cd0c529SMike Karels
13392cd0c529SMike Karels static int
gen_rxintr(struct gen_softc * sc,struct rx_queue * q)13402cd0c529SMike Karels gen_rxintr(struct gen_softc *sc, struct rx_queue *q)
13412cd0c529SMike Karels {
13422cd0c529SMike Karels if_t ifp;
13432cd0c529SMike Karels struct mbuf *m, *mh, *mt;
13442cd0c529SMike Karels struct statusblock *sb = NULL;
13452cd0c529SMike Karels int error, index, len, cnt, npkt, n;
13462cd0c529SMike Karels uint32_t status, prod_idx, total;
13472cd0c529SMike Karels
13482cd0c529SMike Karels ifp = sc->ifp;
13492cd0c529SMike Karels mh = mt = NULL;
13502cd0c529SMike Karels cnt = 0;
13512cd0c529SMike Karels npkt = 0;
13522cd0c529SMike Karels
13532cd0c529SMike Karels prod_idx = RD4(sc, GENET_RX_DMA_PROD_INDEX(q->hwindex)) &
13542cd0c529SMike Karels GENET_RX_DMA_PROD_CONS_MASK;
13552cd0c529SMike Karels total = (prod_idx - q->cons_idx) & GENET_RX_DMA_PROD_CONS_MASK;
13562cd0c529SMike Karels
13572cd0c529SMike Karels index = q->cons_idx & (RX_DESC_COUNT - 1);
13582cd0c529SMike Karels for (n = 0; n < total; n++) {
13592cd0c529SMike Karels bus_dmamap_sync(sc->rx_buf_tag, q->entries[index].map,
13602cd0c529SMike Karels BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
13612cd0c529SMike Karels bus_dmamap_unload(sc->rx_buf_tag, q->entries[index].map);
13622cd0c529SMike Karels
13632cd0c529SMike Karels m = q->entries[index].mbuf;
13642cd0c529SMike Karels
13652cd0c529SMike Karels if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) {
13662cd0c529SMike Karels sb = mtod(m, struct statusblock *);
13672cd0c529SMike Karels status = sb->status_buflen;
13682cd0c529SMike Karels } else
13692cd0c529SMike Karels status = RD4(sc, GENET_RX_DESC_STATUS(index));
13702cd0c529SMike Karels
13712cd0c529SMike Karels len = (status & GENET_RX_DESC_STATUS_BUFLEN_MASK) >>
13722cd0c529SMike Karels GENET_RX_DESC_STATUS_BUFLEN_SHIFT;
13732cd0c529SMike Karels
13742cd0c529SMike Karels /* check for errors */
13752cd0c529SMike Karels if ((status &
13762cd0c529SMike Karels (GENET_RX_DESC_STATUS_SOP | GENET_RX_DESC_STATUS_EOP |
13772cd0c529SMike Karels GENET_RX_DESC_STATUS_RX_ERROR)) !=
13782cd0c529SMike Karels (GENET_RX_DESC_STATUS_SOP | GENET_RX_DESC_STATUS_EOP)) {
1379b7459fb0SJustin Hibbits if (if_getflags(ifp) & IFF_DEBUG)
13802cd0c529SMike Karels device_printf(sc->dev,
13812cd0c529SMike Karels "error/frag %x csum %x\n", status,
13822cd0c529SMike Karels sb->rxcsum);
13832cd0c529SMike Karels if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
13842cd0c529SMike Karels continue;
13852cd0c529SMike Karels }
13862cd0c529SMike Karels
13872cd0c529SMike Karels error = gen_newbuf_rx(sc, q, index);
13882cd0c529SMike Karels if (error != 0) {
13892cd0c529SMike Karels if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
1390b7459fb0SJustin Hibbits if (if_getflags(ifp) & IFF_DEBUG)
13912cd0c529SMike Karels device_printf(sc->dev, "gen_newbuf_rx %d\n",
13922cd0c529SMike Karels error);
13932cd0c529SMike Karels /* reuse previous mbuf */
13942cd0c529SMike Karels (void) gen_mapbuf_rx(sc, q, index, m);
13952cd0c529SMike Karels continue;
13962cd0c529SMike Karels }
13972cd0c529SMike Karels
13982cd0c529SMike Karels if (sb != NULL) {
13992cd0c529SMike Karels if (status & GENET_RX_DESC_STATUS_CKSUM_OK) {
14002cd0c529SMike Karels /* L4 checksum checked; not sure about L3. */
14012cd0c529SMike Karels m->m_pkthdr.csum_flags = CSUM_DATA_VALID |
14022cd0c529SMike Karels CSUM_PSEUDO_HDR;
14032cd0c529SMike Karels m->m_pkthdr.csum_data = 0xffff;
14042cd0c529SMike Karels }
14052cd0c529SMike Karels m->m_data += sizeof(struct statusblock);
14062cd0c529SMike Karels m->m_len -= sizeof(struct statusblock);
14072cd0c529SMike Karels len -= sizeof(struct statusblock);
14082cd0c529SMike Karels }
14092cd0c529SMike Karels if (len > ETHER_ALIGN) {
14102cd0c529SMike Karels m_adj(m, ETHER_ALIGN);
14112cd0c529SMike Karels len -= ETHER_ALIGN;
14122cd0c529SMike Karels }
14132cd0c529SMike Karels
14142cd0c529SMike Karels m->m_pkthdr.rcvif = ifp;
14152cd0c529SMike Karels m->m_pkthdr.len = len;
14162cd0c529SMike Karels m->m_len = len;
14172cd0c529SMike Karels if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
14182cd0c529SMike Karels
14192cd0c529SMike Karels m->m_nextpkt = NULL;
14202cd0c529SMike Karels if (mh == NULL)
14212cd0c529SMike Karels mh = m;
14222cd0c529SMike Karels else
14232cd0c529SMike Karels mt->m_nextpkt = m;
14242cd0c529SMike Karels mt = m;
14252cd0c529SMike Karels ++cnt;
14262cd0c529SMike Karels ++npkt;
14272cd0c529SMike Karels
14282cd0c529SMike Karels index = RX_NEXT(index, q->nentries);
14292cd0c529SMike Karels
14302cd0c529SMike Karels q->cons_idx = (q->cons_idx + 1) & GENET_RX_DMA_PROD_CONS_MASK;
14312cd0c529SMike Karels WR4(sc, GENET_RX_DMA_CONS_INDEX(q->hwindex), q->cons_idx);
14322cd0c529SMike Karels
14332cd0c529SMike Karels if (cnt == gen_rx_batch) {
14342cd0c529SMike Karels GEN_UNLOCK(sc);
14352cd0c529SMike Karels if_input(ifp, mh);
14362cd0c529SMike Karels GEN_LOCK(sc);
14372cd0c529SMike Karels mh = mt = NULL;
14382cd0c529SMike Karels cnt = 0;
14392cd0c529SMike Karels }
14402cd0c529SMike Karels }
14412cd0c529SMike Karels
14422cd0c529SMike Karels if (mh != NULL) {
14432cd0c529SMike Karels GEN_UNLOCK(sc);
14442cd0c529SMike Karels if_input(ifp, mh);
14452cd0c529SMike Karels GEN_LOCK(sc);
14462cd0c529SMike Karels }
14472cd0c529SMike Karels
14482cd0c529SMike Karels return (npkt);
14492cd0c529SMike Karels }
14502cd0c529SMike Karels
14512cd0c529SMike Karels static void
gen_txintr(struct gen_softc * sc,struct tx_queue * q)14522cd0c529SMike Karels gen_txintr(struct gen_softc *sc, struct tx_queue *q)
14532cd0c529SMike Karels {
14542cd0c529SMike Karels uint32_t cons_idx, total;
14552cd0c529SMike Karels struct gen_ring_ent *ent;
14562cd0c529SMike Karels if_t ifp;
14572cd0c529SMike Karels int i, prog;
14582cd0c529SMike Karels
14592cd0c529SMike Karels GEN_ASSERT_LOCKED(sc);
14602cd0c529SMike Karels
14612cd0c529SMike Karels ifp = sc->ifp;
14622cd0c529SMike Karels
14632cd0c529SMike Karels cons_idx = RD4(sc, GENET_TX_DMA_CONS_INDEX(q->hwindex)) &
14642cd0c529SMike Karels GENET_TX_DMA_PROD_CONS_MASK;
14652cd0c529SMike Karels total = (cons_idx - q->cons_idx) & GENET_TX_DMA_PROD_CONS_MASK;
14662cd0c529SMike Karels
14672cd0c529SMike Karels prog = 0;
14682cd0c529SMike Karels for (i = q->next; q->queued > 0 && total > 0;
14692cd0c529SMike Karels i = TX_NEXT(i, q->nentries), total--) {
14702cd0c529SMike Karels /* XXX check for errors */
14712cd0c529SMike Karels
14722cd0c529SMike Karels ent = &q->entries[i];
14732cd0c529SMike Karels if (ent->mbuf != NULL) {
14742cd0c529SMike Karels bus_dmamap_sync(sc->tx_buf_tag, ent->map,
14752cd0c529SMike Karels BUS_DMASYNC_POSTWRITE);
14762cd0c529SMike Karels bus_dmamap_unload(sc->tx_buf_tag, ent->map);
14772cd0c529SMike Karels m_freem(ent->mbuf);
14782cd0c529SMike Karels ent->mbuf = NULL;
14792cd0c529SMike Karels if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
14802cd0c529SMike Karels }
14812cd0c529SMike Karels
14822cd0c529SMike Karels prog++;
14832cd0c529SMike Karels --q->queued;
14842cd0c529SMike Karels }
14852cd0c529SMike Karels
14862cd0c529SMike Karels if (prog > 0) {
14872cd0c529SMike Karels q->next = i;
14882cd0c529SMike Karels if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
14892cd0c529SMike Karels }
14902cd0c529SMike Karels
14912cd0c529SMike Karels q->cons_idx = cons_idx;
14922cd0c529SMike Karels }
14932cd0c529SMike Karels
14942cd0c529SMike Karels static void
gen_intr2(void * arg)14952cd0c529SMike Karels gen_intr2(void *arg)
14962cd0c529SMike Karels {
14972cd0c529SMike Karels struct gen_softc *sc = arg;
14982cd0c529SMike Karels
14992cd0c529SMike Karels device_printf(sc->dev, "gen_intr2\n");
15002cd0c529SMike Karels }
15012cd0c529SMike Karels
15022cd0c529SMike Karels static int
gen_newbuf_rx(struct gen_softc * sc,struct rx_queue * q,int index)15032cd0c529SMike Karels gen_newbuf_rx(struct gen_softc *sc, struct rx_queue *q, int index)
15042cd0c529SMike Karels {
15052cd0c529SMike Karels struct mbuf *m;
15062cd0c529SMike Karels
15072cd0c529SMike Karels m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
15082cd0c529SMike Karels if (m == NULL)
15092cd0c529SMike Karels return (ENOBUFS);
15102cd0c529SMike Karels
15112cd0c529SMike Karels m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
15122cd0c529SMike Karels m_adj(m, ETHER_ALIGN);
15132cd0c529SMike Karels
15142cd0c529SMike Karels return (gen_mapbuf_rx(sc, q, index, m));
15152cd0c529SMike Karels }
15162cd0c529SMike Karels
15172cd0c529SMike Karels static int
gen_mapbuf_rx(struct gen_softc * sc,struct rx_queue * q,int index,struct mbuf * m)15182cd0c529SMike Karels gen_mapbuf_rx(struct gen_softc *sc, struct rx_queue *q, int index,
15192cd0c529SMike Karels struct mbuf *m)
15202cd0c529SMike Karels {
15212cd0c529SMike Karels bus_dma_segment_t seg;
15222cd0c529SMike Karels bus_dmamap_t map;
15232cd0c529SMike Karels int nsegs;
15242cd0c529SMike Karels
15252cd0c529SMike Karels map = q->entries[index].map;
15262cd0c529SMike Karels if (bus_dmamap_load_mbuf_sg(sc->rx_buf_tag, map, m, &seg, &nsegs,
15272cd0c529SMike Karels BUS_DMA_NOWAIT) != 0) {
15282cd0c529SMike Karels m_freem(m);
15292cd0c529SMike Karels return (ENOBUFS);
15302cd0c529SMike Karels }
15312cd0c529SMike Karels
15322cd0c529SMike Karels bus_dmamap_sync(sc->rx_buf_tag, map, BUS_DMASYNC_PREREAD);
15332cd0c529SMike Karels
15342cd0c529SMike Karels q->entries[index].mbuf = m;
15352cd0c529SMike Karels WR4(sc, GENET_RX_DESC_ADDRESS_LO(index), (uint32_t)seg.ds_addr);
15362cd0c529SMike Karels WR4(sc, GENET_RX_DESC_ADDRESS_HI(index), (uint32_t)(seg.ds_addr >> 32));
15372cd0c529SMike Karels
15382cd0c529SMike Karels return (0);
15392cd0c529SMike Karels }
15402cd0c529SMike Karels
15412cd0c529SMike Karels static int
gen_ioctl(if_t ifp,u_long cmd,caddr_t data)15422cd0c529SMike Karels gen_ioctl(if_t ifp, u_long cmd, caddr_t data)
15432cd0c529SMike Karels {
15442cd0c529SMike Karels struct gen_softc *sc;
15452cd0c529SMike Karels struct mii_data *mii;
15462cd0c529SMike Karels struct ifreq *ifr;
15472cd0c529SMike Karels int flags, enable, error;
15482cd0c529SMike Karels
15492cd0c529SMike Karels sc = if_getsoftc(ifp);
15502cd0c529SMike Karels mii = device_get_softc(sc->miibus);
15512cd0c529SMike Karels ifr = (struct ifreq *)data;
15522cd0c529SMike Karels error = 0;
15532cd0c529SMike Karels
15542cd0c529SMike Karels switch (cmd) {
15552cd0c529SMike Karels case SIOCSIFFLAGS:
15562cd0c529SMike Karels GEN_LOCK(sc);
15572cd0c529SMike Karels if (if_getflags(ifp) & IFF_UP) {
15582cd0c529SMike Karels if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
15592cd0c529SMike Karels flags = if_getflags(ifp) ^ sc->if_flags;
15602cd0c529SMike Karels if ((flags & (IFF_PROMISC|IFF_ALLMULTI)) != 0)
15612cd0c529SMike Karels gen_setup_rxfilter(sc);
15622cd0c529SMike Karels } else
15632cd0c529SMike Karels gen_init_locked(sc);
15642cd0c529SMike Karels } else {
15652cd0c529SMike Karels if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
15668f45652bSMike Karels gen_stop(sc);
15672cd0c529SMike Karels }
15682cd0c529SMike Karels sc->if_flags = if_getflags(ifp);
15692cd0c529SMike Karels GEN_UNLOCK(sc);
15702cd0c529SMike Karels break;
15712cd0c529SMike Karels
15722cd0c529SMike Karels case SIOCADDMULTI:
15732cd0c529SMike Karels case SIOCDELMULTI:
15742cd0c529SMike Karels if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
15752cd0c529SMike Karels GEN_LOCK(sc);
15762cd0c529SMike Karels gen_setup_rxfilter(sc);
15772cd0c529SMike Karels GEN_UNLOCK(sc);
15782cd0c529SMike Karels }
15792cd0c529SMike Karels break;
15802cd0c529SMike Karels
15812cd0c529SMike Karels case SIOCSIFMEDIA:
15822cd0c529SMike Karels case SIOCGIFMEDIA:
15832cd0c529SMike Karels error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
15842cd0c529SMike Karels break;
15852cd0c529SMike Karels
15862cd0c529SMike Karels case SIOCSIFCAP:
15872cd0c529SMike Karels enable = if_getcapenable(ifp);
15882cd0c529SMike Karels flags = ifr->ifr_reqcap ^ enable;
15892cd0c529SMike Karels if (flags & IFCAP_RXCSUM)
15902cd0c529SMike Karels enable ^= IFCAP_RXCSUM;
15912cd0c529SMike Karels if (flags & IFCAP_RXCSUM_IPV6)
15922cd0c529SMike Karels enable ^= IFCAP_RXCSUM_IPV6;
15932cd0c529SMike Karels if (flags & IFCAP_TXCSUM)
15942cd0c529SMike Karels enable ^= IFCAP_TXCSUM;
15952cd0c529SMike Karels if (flags & IFCAP_TXCSUM_IPV6)
15962cd0c529SMike Karels enable ^= IFCAP_TXCSUM_IPV6;
15972cd0c529SMike Karels if (enable & (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6))
15982cd0c529SMike Karels if_sethwassist(ifp, GEN_CSUM_FEATURES);
15992cd0c529SMike Karels else
16002cd0c529SMike Karels if_sethwassist(ifp, 0);
16012cd0c529SMike Karels if_setcapenable(ifp, enable);
16022cd0c529SMike Karels if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
16032cd0c529SMike Karels gen_enable_offload(sc);
16042cd0c529SMike Karels break;
16052cd0c529SMike Karels
16062cd0c529SMike Karels default:
16072cd0c529SMike Karels error = ether_ioctl(ifp, cmd, data);
16082cd0c529SMike Karels break;
16092cd0c529SMike Karels }
16102cd0c529SMike Karels return (error);
16112cd0c529SMike Karels }
16122cd0c529SMike Karels
16132cd0c529SMike Karels static void
gen_tick(void * softc)16142cd0c529SMike Karels gen_tick(void *softc)
16152cd0c529SMike Karels {
16162cd0c529SMike Karels struct gen_softc *sc;
16172cd0c529SMike Karels struct mii_data *mii;
16182cd0c529SMike Karels if_t ifp;
16192cd0c529SMike Karels int link;
16202cd0c529SMike Karels
16212cd0c529SMike Karels sc = softc;
16222cd0c529SMike Karels ifp = sc->ifp;
16232cd0c529SMike Karels mii = device_get_softc(sc->miibus);
16242cd0c529SMike Karels
16252cd0c529SMike Karels GEN_ASSERT_LOCKED(sc);
16262cd0c529SMike Karels
16272cd0c529SMike Karels if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
16282cd0c529SMike Karels return;
16292cd0c529SMike Karels
16302cd0c529SMike Karels link = sc->link;
16312cd0c529SMike Karels mii_tick(mii);
16322cd0c529SMike Karels if (sc->link && !link)
16332cd0c529SMike Karels gen_start_locked(sc);
16342cd0c529SMike Karels
16352cd0c529SMike Karels callout_reset(&sc->stat_ch, hz, gen_tick, sc);
16362cd0c529SMike Karels }
16372cd0c529SMike Karels
16382cd0c529SMike Karels #define MII_BUSY_RETRY 1000
16392cd0c529SMike Karels
16402cd0c529SMike Karels static int
gen_miibus_readreg(device_t dev,int phy,int reg)16412cd0c529SMike Karels gen_miibus_readreg(device_t dev, int phy, int reg)
16422cd0c529SMike Karels {
16432cd0c529SMike Karels struct gen_softc *sc;
16442cd0c529SMike Karels int retry, val;
16452cd0c529SMike Karels
16462cd0c529SMike Karels sc = device_get_softc(dev);
16472cd0c529SMike Karels val = 0;
16482cd0c529SMike Karels
16492cd0c529SMike Karels WR4(sc, GENET_MDIO_CMD, GENET_MDIO_READ |
16502cd0c529SMike Karels (phy << GENET_MDIO_ADDR_SHIFT) | (reg << GENET_MDIO_REG_SHIFT));
16512cd0c529SMike Karels val = RD4(sc, GENET_MDIO_CMD);
16522cd0c529SMike Karels WR4(sc, GENET_MDIO_CMD, val | GENET_MDIO_START_BUSY);
16532cd0c529SMike Karels for (retry = MII_BUSY_RETRY; retry > 0; retry--) {
16542cd0c529SMike Karels if (((val = RD4(sc, GENET_MDIO_CMD)) &
16552cd0c529SMike Karels GENET_MDIO_START_BUSY) == 0) {
16562cd0c529SMike Karels if (val & GENET_MDIO_READ_FAILED)
16572cd0c529SMike Karels return (0); /* -1? */
16582cd0c529SMike Karels val &= GENET_MDIO_VAL_MASK;
16592cd0c529SMike Karels break;
16602cd0c529SMike Karels }
16612cd0c529SMike Karels DELAY(10);
16622cd0c529SMike Karels }
16632cd0c529SMike Karels
16642cd0c529SMike Karels if (retry == 0)
16652cd0c529SMike Karels device_printf(dev, "phy read timeout, phy=%d reg=%d\n",
16662cd0c529SMike Karels phy, reg);
16672cd0c529SMike Karels
16682cd0c529SMike Karels return (val);
16692cd0c529SMike Karels }
16702cd0c529SMike Karels
16712cd0c529SMike Karels static int
gen_miibus_writereg(device_t dev,int phy,int reg,int val)16722cd0c529SMike Karels gen_miibus_writereg(device_t dev, int phy, int reg, int val)
16732cd0c529SMike Karels {
16742cd0c529SMike Karels struct gen_softc *sc;
16752cd0c529SMike Karels int retry;
16762cd0c529SMike Karels
16772cd0c529SMike Karels sc = device_get_softc(dev);
16782cd0c529SMike Karels
16792cd0c529SMike Karels WR4(sc, GENET_MDIO_CMD, GENET_MDIO_WRITE |
16802cd0c529SMike Karels (phy << GENET_MDIO_ADDR_SHIFT) | (reg << GENET_MDIO_REG_SHIFT) |
16812cd0c529SMike Karels (val & GENET_MDIO_VAL_MASK));
16822cd0c529SMike Karels val = RD4(sc, GENET_MDIO_CMD);
16832cd0c529SMike Karels WR4(sc, GENET_MDIO_CMD, val | GENET_MDIO_START_BUSY);
16842cd0c529SMike Karels for (retry = MII_BUSY_RETRY; retry > 0; retry--) {
16852cd0c529SMike Karels val = RD4(sc, GENET_MDIO_CMD);
16862cd0c529SMike Karels if ((val & GENET_MDIO_START_BUSY) == 0)
16872cd0c529SMike Karels break;
16882cd0c529SMike Karels DELAY(10);
16892cd0c529SMike Karels }
16902cd0c529SMike Karels if (retry == 0)
16912cd0c529SMike Karels device_printf(dev, "phy write timeout, phy=%d reg=%d\n",
16922cd0c529SMike Karels phy, reg);
16932cd0c529SMike Karels
16942cd0c529SMike Karels return (0);
16952cd0c529SMike Karels }
16962cd0c529SMike Karels
16972cd0c529SMike Karels static void
gen_update_link_locked(struct gen_softc * sc)16982cd0c529SMike Karels gen_update_link_locked(struct gen_softc *sc)
16992cd0c529SMike Karels {
17002cd0c529SMike Karels struct mii_data *mii;
17012cd0c529SMike Karels uint32_t val;
17022cd0c529SMike Karels u_int speed;
17032cd0c529SMike Karels
17042cd0c529SMike Karels GEN_ASSERT_LOCKED(sc);
17052cd0c529SMike Karels
17062cd0c529SMike Karels if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) == 0)
17072cd0c529SMike Karels return;
17082cd0c529SMike Karels mii = device_get_softc(sc->miibus);
17092cd0c529SMike Karels
17102cd0c529SMike Karels if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
17112cd0c529SMike Karels (IFM_ACTIVE | IFM_AVALID)) {
17122cd0c529SMike Karels switch (IFM_SUBTYPE(mii->mii_media_active)) {
17132cd0c529SMike Karels case IFM_1000_T:
17142cd0c529SMike Karels case IFM_1000_SX:
17152cd0c529SMike Karels speed = GENET_UMAC_CMD_SPEED_1000;
17162cd0c529SMike Karels sc->link = 1;
17172cd0c529SMike Karels break;
17182cd0c529SMike Karels case IFM_100_TX:
17192cd0c529SMike Karels speed = GENET_UMAC_CMD_SPEED_100;
17202cd0c529SMike Karels sc->link = 1;
17212cd0c529SMike Karels break;
17222cd0c529SMike Karels case IFM_10_T:
17232cd0c529SMike Karels speed = GENET_UMAC_CMD_SPEED_10;
17242cd0c529SMike Karels sc->link = 1;
17252cd0c529SMike Karels break;
17262cd0c529SMike Karels default:
17272cd0c529SMike Karels sc->link = 0;
17282cd0c529SMike Karels break;
17292cd0c529SMike Karels }
17302cd0c529SMike Karels } else
17312cd0c529SMike Karels sc->link = 0;
17322cd0c529SMike Karels
17332cd0c529SMike Karels if (sc->link == 0)
17342cd0c529SMike Karels return;
17352cd0c529SMike Karels
17362cd0c529SMike Karels val = RD4(sc, GENET_EXT_RGMII_OOB_CTRL);
17372cd0c529SMike Karels val &= ~GENET_EXT_RGMII_OOB_OOB_DISABLE;
17382cd0c529SMike Karels val |= GENET_EXT_RGMII_OOB_RGMII_LINK;
17392cd0c529SMike Karels val |= GENET_EXT_RGMII_OOB_RGMII_MODE_EN;
17402cd0c529SMike Karels if (sc->phy_mode == MII_CONTYPE_RGMII)
17412cd0c529SMike Karels val |= GENET_EXT_RGMII_OOB_ID_MODE_DISABLE;
1742349eddbdSMike Karels else
1743349eddbdSMike Karels val &= ~GENET_EXT_RGMII_OOB_ID_MODE_DISABLE;
17442cd0c529SMike Karels WR4(sc, GENET_EXT_RGMII_OOB_CTRL, val);
17452cd0c529SMike Karels
17462cd0c529SMike Karels val = RD4(sc, GENET_UMAC_CMD);
17472cd0c529SMike Karels val &= ~GENET_UMAC_CMD_SPEED;
17482cd0c529SMike Karels val |= speed;
17492cd0c529SMike Karels WR4(sc, GENET_UMAC_CMD, val);
17502cd0c529SMike Karels }
17512cd0c529SMike Karels
17522cd0c529SMike Karels static void
gen_link_task(void * arg,int pending)17532cd0c529SMike Karels gen_link_task(void *arg, int pending)
17542cd0c529SMike Karels {
17552cd0c529SMike Karels struct gen_softc *sc;
17562cd0c529SMike Karels
17572cd0c529SMike Karels sc = arg;
17582cd0c529SMike Karels
17592cd0c529SMike Karels GEN_LOCK(sc);
17602cd0c529SMike Karels gen_update_link_locked(sc);
17612cd0c529SMike Karels GEN_UNLOCK(sc);
17622cd0c529SMike Karels }
17632cd0c529SMike Karels
17642cd0c529SMike Karels static void
gen_miibus_statchg(device_t dev)17652cd0c529SMike Karels gen_miibus_statchg(device_t dev)
17662cd0c529SMike Karels {
17672cd0c529SMike Karels struct gen_softc *sc;
17682cd0c529SMike Karels
17692cd0c529SMike Karels sc = device_get_softc(dev);
17702cd0c529SMike Karels
17712cd0c529SMike Karels taskqueue_enqueue(taskqueue_swi, &sc->link_task);
17722cd0c529SMike Karels }
17732cd0c529SMike Karels
17742cd0c529SMike Karels static void
gen_media_status(if_t ifp,struct ifmediareq * ifmr)17752cd0c529SMike Karels gen_media_status(if_t ifp, struct ifmediareq *ifmr)
17762cd0c529SMike Karels {
17772cd0c529SMike Karels struct gen_softc *sc;
17782cd0c529SMike Karels struct mii_data *mii;
17792cd0c529SMike Karels
17802cd0c529SMike Karels sc = if_getsoftc(ifp);
17812cd0c529SMike Karels mii = device_get_softc(sc->miibus);
17822cd0c529SMike Karels
17832cd0c529SMike Karels GEN_LOCK(sc);
17842cd0c529SMike Karels mii_pollstat(mii);
17852cd0c529SMike Karels ifmr->ifm_active = mii->mii_media_active;
17862cd0c529SMike Karels ifmr->ifm_status = mii->mii_media_status;
17872cd0c529SMike Karels GEN_UNLOCK(sc);
17882cd0c529SMike Karels }
17892cd0c529SMike Karels
17902cd0c529SMike Karels static int
gen_media_change(if_t ifp)17912cd0c529SMike Karels gen_media_change(if_t ifp)
17922cd0c529SMike Karels {
17932cd0c529SMike Karels struct gen_softc *sc;
17942cd0c529SMike Karels struct mii_data *mii;
17952cd0c529SMike Karels int error;
17962cd0c529SMike Karels
17972cd0c529SMike Karels sc = if_getsoftc(ifp);
17982cd0c529SMike Karels mii = device_get_softc(sc->miibus);
17992cd0c529SMike Karels
18002cd0c529SMike Karels GEN_LOCK(sc);
18012cd0c529SMike Karels error = mii_mediachg(mii);
18022cd0c529SMike Karels GEN_UNLOCK(sc);
18032cd0c529SMike Karels
18042cd0c529SMike Karels return (error);
18052cd0c529SMike Karels }
18062cd0c529SMike Karels
18072cd0c529SMike Karels static device_method_t gen_methods[] = {
18082cd0c529SMike Karels /* Device interface */
18092cd0c529SMike Karels DEVMETHOD(device_probe, gen_probe),
18102cd0c529SMike Karels DEVMETHOD(device_attach, gen_attach),
18112cd0c529SMike Karels
18122cd0c529SMike Karels /* MII interface */
18132cd0c529SMike Karels DEVMETHOD(miibus_readreg, gen_miibus_readreg),
18142cd0c529SMike Karels DEVMETHOD(miibus_writereg, gen_miibus_writereg),
18152cd0c529SMike Karels DEVMETHOD(miibus_statchg, gen_miibus_statchg),
18162cd0c529SMike Karels
18172cd0c529SMike Karels DEVMETHOD_END
18182cd0c529SMike Karels };
18192cd0c529SMike Karels
18202cd0c529SMike Karels static driver_t gen_driver = {
18212cd0c529SMike Karels "genet",
18222cd0c529SMike Karels gen_methods,
18232cd0c529SMike Karels sizeof(struct gen_softc),
18242cd0c529SMike Karels };
18252cd0c529SMike Karels
182682d4dc06SJohn Baldwin DRIVER_MODULE(genet, simplebus, gen_driver, 0, 0);
18273e38757dSJohn Baldwin DRIVER_MODULE(miibus, genet, miibus_driver, 0, 0);
18282cd0c529SMike Karels MODULE_DEPEND(genet, ether, 1, 1, 1);
18292cd0c529SMike Karels MODULE_DEPEND(genet, miibus, 1, 1, 1);
1830