167196661SRafal Jaworowski /*- 2718cf2ccSPedro F. Giffuni * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3718cf2ccSPedro F. Giffuni * 4d1d3233eSRafal Jaworowski * Copyright (C) 2007-2008 Semihalf, Rafal Jaworowski 5d1d3233eSRafal Jaworowski * Copyright (C) 2006-2007 Semihalf, Piotr Kruszynski 667196661SRafal Jaworowski * All rights reserved. 767196661SRafal Jaworowski * 867196661SRafal Jaworowski * Redistribution and use in source and binary forms, with or without 967196661SRafal Jaworowski * modification, are permitted provided that the following conditions 1067196661SRafal Jaworowski * are met: 1167196661SRafal Jaworowski * 1. Redistributions of source code must retain the above copyright 1267196661SRafal Jaworowski * notice, this list of conditions and the following disclaimer. 1367196661SRafal Jaworowski * 2. Redistributions in binary form must reproduce the above copyright 1467196661SRafal Jaworowski * notice, this list of conditions and the following disclaimer in the 1567196661SRafal Jaworowski * documentation and/or other materials provided with the distribution. 1667196661SRafal Jaworowski * 1767196661SRafal Jaworowski * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 1867196661SRafal Jaworowski * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 1967196661SRafal Jaworowski * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 2067196661SRafal Jaworowski * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 2167196661SRafal Jaworowski * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 2267196661SRafal Jaworowski * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 2367196661SRafal Jaworowski * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 2467196661SRafal Jaworowski * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 2567196661SRafal Jaworowski * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 2667196661SRafal Jaworowski * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 2767196661SRafal Jaworowski */ 2867196661SRafal Jaworowski 2967196661SRafal Jaworowski /* 3067196661SRafal Jaworowski * Freescale integrated Three-Speed Ethernet Controller (TSEC) driver. 3167196661SRafal Jaworowski */ 3267196661SRafal Jaworowski #include <sys/cdefs.h> 3367196661SRafal Jaworowski __FBSDID("$FreeBSD$"); 3467196661SRafal Jaworowski 35bd37530eSRafal Jaworowski #ifdef HAVE_KERNEL_OPTION_HEADERS 36bd37530eSRafal Jaworowski #include "opt_device_polling.h" 37bd37530eSRafal Jaworowski #endif 38bd37530eSRafal Jaworowski 3967196661SRafal Jaworowski #include <sys/param.h> 4067196661SRafal Jaworowski #include <sys/systm.h> 41321e12c8SRafal Jaworowski #include <sys/bus.h> 4267196661SRafal Jaworowski #include <sys/endian.h> 4367196661SRafal Jaworowski #include <sys/mbuf.h> 4467196661SRafal Jaworowski #include <sys/kernel.h> 4567196661SRafal Jaworowski #include <sys/module.h> 4667196661SRafal Jaworowski #include <sys/socket.h> 47321e12c8SRafal Jaworowski #include <sys/sockio.h> 4867196661SRafal Jaworowski #include <sys/sysctl.h> 4967196661SRafal Jaworowski 50321e12c8SRafal Jaworowski #include <net/bpf.h> 51321e12c8SRafal Jaworowski #include <net/ethernet.h> 5267196661SRafal Jaworowski #include <net/if.h> 5376039bc8SGleb Smirnoff #include <net/if_var.h> 54321e12c8SRafal Jaworowski #include <net/if_arp.h> 5567196661SRafal Jaworowski #include <net/if_dl.h> 5667196661SRafal Jaworowski #include <net/if_media.h> 5767196661SRafal Jaworowski #include <net/if_types.h> 5867196661SRafal Jaworowski #include <net/if_vlan_var.h> 5967196661SRafal Jaworowski 60bd37530eSRafal Jaworowski #include <netinet/in_systm.h> 61bd37530eSRafal Jaworowski #include <netinet/in.h> 62bd37530eSRafal Jaworowski #include <netinet/ip.h> 63bd37530eSRafal Jaworowski 64321e12c8SRafal Jaworowski #include <machine/bus.h> 65321e12c8SRafal Jaworowski 6667196661SRafal Jaworowski #include <dev/mii/mii.h> 6767196661SRafal Jaworowski #include <dev/mii/miivar.h> 6867196661SRafal Jaworowski 6967196661SRafal Jaworowski #include <dev/tsec/if_tsec.h> 7067196661SRafal Jaworowski #include <dev/tsec/if_tsecreg.h> 7167196661SRafal Jaworowski 72321e12c8SRafal Jaworowski static int tsec_alloc_dma_desc(device_t dev, bus_dma_tag_t *dtag, 73321e12c8SRafal Jaworowski bus_dmamap_t *dmap, bus_size_t dsize, void **vaddr, void *raddr, 74321e12c8SRafal Jaworowski const char *dname); 7567196661SRafal Jaworowski static void tsec_dma_ctl(struct tsec_softc *sc, int state); 7647842ecfSJustin Hibbits static void tsec_encap(if_t ifp, struct tsec_softc *sc, 772c0dbbcbSJustin Hibbits struct mbuf *m0, uint16_t fcb_flags, int *start_tx); 78321e12c8SRafal Jaworowski static void tsec_free_dma(struct tsec_softc *sc); 79321e12c8SRafal Jaworowski static void tsec_free_dma_desc(bus_dma_tag_t dtag, bus_dmamap_t dmap, void *vaddr); 8047842ecfSJustin Hibbits static int tsec_ifmedia_upd(if_t ifp); 8147842ecfSJustin Hibbits static void tsec_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr); 8267196661SRafal Jaworowski static int tsec_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, 8367196661SRafal Jaworowski struct mbuf **mbufp, uint32_t *paddr); 8467196661SRafal Jaworowski static void tsec_map_dma_addr(void *arg, bus_dma_segment_t *segs, 8567196661SRafal Jaworowski int nseg, int error); 86321e12c8SRafal Jaworowski static void tsec_intrs_ctl(struct tsec_softc *sc, int state); 87321e12c8SRafal Jaworowski static void tsec_init(void *xsc); 88321e12c8SRafal Jaworowski static void tsec_init_locked(struct tsec_softc *sc); 8947842ecfSJustin Hibbits static int tsec_ioctl(if_t ifp, u_long command, caddr_t data); 90321e12c8SRafal Jaworowski static void tsec_reset_mac(struct tsec_softc *sc); 91321e12c8SRafal Jaworowski static void tsec_setfilter(struct tsec_softc *sc); 92321e12c8SRafal Jaworowski static void tsec_set_mac_address(struct tsec_softc *sc); 9347842ecfSJustin Hibbits static void tsec_start(if_t ifp); 9447842ecfSJustin Hibbits static void tsec_start_locked(if_t ifp); 9567196661SRafal Jaworowski static void tsec_stop(struct tsec_softc *sc); 9667196661SRafal Jaworowski static void tsec_tick(void *arg); 97321e12c8SRafal Jaworowski static void tsec_watchdog(struct tsec_softc *sc); 98bd37530eSRafal Jaworowski static void tsec_add_sysctls(struct tsec_softc *sc); 99bd37530eSRafal Jaworowski static int tsec_sysctl_ic_time(SYSCTL_HANDLER_ARGS); 100bd37530eSRafal Jaworowski static int tsec_sysctl_ic_count(SYSCTL_HANDLER_ARGS); 101bd37530eSRafal Jaworowski static void tsec_set_rxic(struct tsec_softc *sc); 102bd37530eSRafal Jaworowski static void tsec_set_txic(struct tsec_softc *sc); 1031abcdbd1SAttilio Rao static int tsec_receive_intr_locked(struct tsec_softc *sc, int count); 104bd37530eSRafal Jaworowski static void tsec_transmit_intr_locked(struct tsec_softc *sc); 105bd37530eSRafal Jaworowski static void tsec_error_intr_locked(struct tsec_softc *sc, int count); 106bd37530eSRafal Jaworowski static void tsec_offload_setup(struct tsec_softc *sc); 107bd37530eSRafal Jaworowski static void tsec_offload_process_frame(struct tsec_softc *sc, 108bd37530eSRafal Jaworowski struct mbuf *m); 109bd37530eSRafal Jaworowski static void tsec_setup_multicast(struct tsec_softc *sc); 110bd37530eSRafal Jaworowski static int tsec_set_mtu(struct tsec_softc *sc, unsigned int mtu); 11167196661SRafal Jaworowski 1123e38757dSJohn Baldwin DRIVER_MODULE(miibus, tsec, miibus_driver, 0, 0); 11367196661SRafal Jaworowski MODULE_DEPEND(tsec, ether, 1, 1, 1); 11467196661SRafal Jaworowski MODULE_DEPEND(tsec, miibus, 1, 1, 1); 11567196661SRafal Jaworowski 116629aa519SNathan Whitehorn struct mtx tsec_phy_mtx; 117629aa519SNathan Whitehorn 118321e12c8SRafal Jaworowski int 119321e12c8SRafal Jaworowski tsec_attach(struct tsec_softc *sc) 12067196661SRafal Jaworowski { 121321e12c8SRafal Jaworowski uint8_t hwaddr[ETHER_ADDR_LEN]; 12247842ecfSJustin Hibbits if_t ifp; 123321e12c8SRafal Jaworowski int error = 0; 124ecb1ab17SRafal Jaworowski int i; 12567196661SRafal Jaworowski 126629aa519SNathan Whitehorn /* Initialize global (because potentially shared) MII lock */ 127629aa519SNathan Whitehorn if (!mtx_initialized(&tsec_phy_mtx)) 128629aa519SNathan Whitehorn mtx_init(&tsec_phy_mtx, "tsec mii", NULL, MTX_DEF); 129629aa519SNathan Whitehorn 130321e12c8SRafal Jaworowski /* Reset all TSEC counters */ 131321e12c8SRafal Jaworowski TSEC_TX_RX_COUNTERS_INIT(sc); 132321e12c8SRafal Jaworowski 133321e12c8SRafal Jaworowski /* Stop DMA engine if enabled by firmware */ 134321e12c8SRafal Jaworowski tsec_dma_ctl(sc, 0); 135321e12c8SRafal Jaworowski 136321e12c8SRafal Jaworowski /* Reset MAC */ 137321e12c8SRafal Jaworowski tsec_reset_mac(sc); 138321e12c8SRafal Jaworowski 139321e12c8SRafal Jaworowski /* Disable interrupts for now */ 140321e12c8SRafal Jaworowski tsec_intrs_ctl(sc, 0); 141321e12c8SRafal Jaworowski 142bd37530eSRafal Jaworowski /* Configure defaults for interrupts coalescing */ 143bd37530eSRafal Jaworowski sc->rx_ic_time = 768; 144bd37530eSRafal Jaworowski sc->rx_ic_count = 16; 145bd37530eSRafal Jaworowski sc->tx_ic_time = 768; 146bd37530eSRafal Jaworowski sc->tx_ic_count = 16; 147bd37530eSRafal Jaworowski tsec_set_rxic(sc); 148bd37530eSRafal Jaworowski tsec_set_txic(sc); 149bd37530eSRafal Jaworowski tsec_add_sysctls(sc); 150bd37530eSRafal Jaworowski 151321e12c8SRafal Jaworowski /* Allocate a busdma tag and DMA safe memory for TX descriptors. */ 152bd37530eSRafal Jaworowski error = tsec_alloc_dma_desc(sc->dev, &sc->tsec_tx_dtag, 153bd37530eSRafal Jaworowski &sc->tsec_tx_dmap, sizeof(*sc->tsec_tx_vaddr) * TSEC_TX_NUM_DESC, 154321e12c8SRafal Jaworowski (void **)&sc->tsec_tx_vaddr, &sc->tsec_tx_raddr, "TX"); 155bd37530eSRafal Jaworowski 156321e12c8SRafal Jaworowski if (error) { 157321e12c8SRafal Jaworowski tsec_detach(sc); 158321e12c8SRafal Jaworowski return (ENXIO); 159ecb1ab17SRafal Jaworowski } 160ecb1ab17SRafal Jaworowski 161321e12c8SRafal Jaworowski /* Allocate a busdma tag and DMA safe memory for RX descriptors. */ 162bd37530eSRafal Jaworowski error = tsec_alloc_dma_desc(sc->dev, &sc->tsec_rx_dtag, 163bd37530eSRafal Jaworowski &sc->tsec_rx_dmap, sizeof(*sc->tsec_rx_vaddr) * TSEC_RX_NUM_DESC, 164321e12c8SRafal Jaworowski (void **)&sc->tsec_rx_vaddr, &sc->tsec_rx_raddr, "RX"); 165321e12c8SRafal Jaworowski if (error) { 166321e12c8SRafal Jaworowski tsec_detach(sc); 167321e12c8SRafal Jaworowski return (ENXIO); 168321e12c8SRafal Jaworowski } 16967196661SRafal Jaworowski 170321e12c8SRafal Jaworowski /* Allocate a busdma tag for TX mbufs. */ 171321e12c8SRafal Jaworowski error = bus_dma_tag_create(NULL, /* parent */ 172321e12c8SRafal Jaworowski TSEC_TXBUFFER_ALIGNMENT, 0, /* alignment, boundary */ 173321e12c8SRafal Jaworowski BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 174321e12c8SRafal Jaworowski BUS_SPACE_MAXADDR, /* highaddr */ 175321e12c8SRafal Jaworowski NULL, NULL, /* filtfunc, filtfuncarg */ 176321e12c8SRafal Jaworowski MCLBYTES * (TSEC_TX_NUM_DESC - 1), /* maxsize */ 1772c0dbbcbSJustin Hibbits TSEC_TX_MAX_DMA_SEGS, /* nsegments */ 178321e12c8SRafal Jaworowski MCLBYTES, 0, /* maxsegsz, flags */ 179321e12c8SRafal Jaworowski NULL, NULL, /* lockfunc, lockfuncarg */ 180321e12c8SRafal Jaworowski &sc->tsec_tx_mtag); /* dmat */ 181321e12c8SRafal Jaworowski if (error) { 18264f90c9dSRafal Jaworowski device_printf(sc->dev, "failed to allocate busdma tag " 18364f90c9dSRafal Jaworowski "(tx mbufs)\n"); 184321e12c8SRafal Jaworowski tsec_detach(sc); 185321e12c8SRafal Jaworowski return (ENXIO); 186321e12c8SRafal Jaworowski } 187321e12c8SRafal Jaworowski 188321e12c8SRafal Jaworowski /* Allocate a busdma tag for RX mbufs. */ 189321e12c8SRafal Jaworowski error = bus_dma_tag_create(NULL, /* parent */ 190321e12c8SRafal Jaworowski TSEC_RXBUFFER_ALIGNMENT, 0, /* alignment, boundary */ 191321e12c8SRafal Jaworowski BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 192321e12c8SRafal Jaworowski BUS_SPACE_MAXADDR, /* highaddr */ 193321e12c8SRafal Jaworowski NULL, NULL, /* filtfunc, filtfuncarg */ 194321e12c8SRafal Jaworowski MCLBYTES, /* maxsize */ 195321e12c8SRafal Jaworowski 1, /* nsegments */ 196321e12c8SRafal Jaworowski MCLBYTES, 0, /* maxsegsz, flags */ 197321e12c8SRafal Jaworowski NULL, NULL, /* lockfunc, lockfuncarg */ 198321e12c8SRafal Jaworowski &sc->tsec_rx_mtag); /* dmat */ 199321e12c8SRafal Jaworowski if (error) { 20064f90c9dSRafal Jaworowski device_printf(sc->dev, "failed to allocate busdma tag " 20164f90c9dSRafal Jaworowski "(rx mbufs)\n"); 202321e12c8SRafal Jaworowski tsec_detach(sc); 203321e12c8SRafal Jaworowski return (ENXIO); 204321e12c8SRafal Jaworowski } 205321e12c8SRafal Jaworowski 206321e12c8SRafal Jaworowski /* Create TX busdma maps */ 207321e12c8SRafal Jaworowski for (i = 0; i < TSEC_TX_NUM_DESC; i++) { 2082c0dbbcbSJustin Hibbits error = bus_dmamap_create(sc->tsec_tx_mtag, 0, 2092c0dbbcbSJustin Hibbits &sc->tx_bufmap[i].map); 210321e12c8SRafal Jaworowski if (error) { 211321e12c8SRafal Jaworowski device_printf(sc->dev, "failed to init TX ring\n"); 212321e12c8SRafal Jaworowski tsec_detach(sc); 213321e12c8SRafal Jaworowski return (ENXIO); 214321e12c8SRafal Jaworowski } 2152c0dbbcbSJustin Hibbits sc->tx_bufmap[i].map_initialized = 1; 216321e12c8SRafal Jaworowski } 217321e12c8SRafal Jaworowski 218321e12c8SRafal Jaworowski /* Create RX busdma maps and zero mbuf handlers */ 219321e12c8SRafal Jaworowski for (i = 0; i < TSEC_RX_NUM_DESC; i++) { 22064f90c9dSRafal Jaworowski error = bus_dmamap_create(sc->tsec_rx_mtag, 0, 22164f90c9dSRafal Jaworowski &sc->rx_data[i].map); 222321e12c8SRafal Jaworowski if (error) { 223321e12c8SRafal Jaworowski device_printf(sc->dev, "failed to init RX ring\n"); 224321e12c8SRafal Jaworowski tsec_detach(sc); 225321e12c8SRafal Jaworowski return (ENXIO); 226321e12c8SRafal Jaworowski } 227321e12c8SRafal Jaworowski sc->rx_data[i].mbuf = NULL; 228321e12c8SRafal Jaworowski } 229321e12c8SRafal Jaworowski 230321e12c8SRafal Jaworowski /* Create mbufs for RX buffers */ 231321e12c8SRafal Jaworowski for (i = 0; i < TSEC_RX_NUM_DESC; i++) { 232321e12c8SRafal Jaworowski error = tsec_new_rxbuf(sc->tsec_rx_mtag, sc->rx_data[i].map, 233321e12c8SRafal Jaworowski &sc->rx_data[i].mbuf, &sc->rx_data[i].paddr); 234321e12c8SRafal Jaworowski if (error) { 23564f90c9dSRafal Jaworowski device_printf(sc->dev, "can't load rx DMA map %d, " 23664f90c9dSRafal Jaworowski "error = %d\n", i, error); 237321e12c8SRafal Jaworowski tsec_detach(sc); 238321e12c8SRafal Jaworowski return (error); 239321e12c8SRafal Jaworowski } 240321e12c8SRafal Jaworowski } 241321e12c8SRafal Jaworowski 242321e12c8SRafal Jaworowski /* Create network interface for upper layers */ 243321e12c8SRafal Jaworowski ifp = sc->tsec_ifp = if_alloc(IFT_ETHER); 244321e12c8SRafal Jaworowski if (ifp == NULL) { 245321e12c8SRafal Jaworowski device_printf(sc->dev, "if_alloc() failed\n"); 246321e12c8SRafal Jaworowski tsec_detach(sc); 247321e12c8SRafal Jaworowski return (ENOMEM); 248321e12c8SRafal Jaworowski } 249321e12c8SRafal Jaworowski 25047842ecfSJustin Hibbits if_setsoftc(ifp, sc); 251321e12c8SRafal Jaworowski if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev)); 25247842ecfSJustin Hibbits if_setflags(ifp, IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST); 25347842ecfSJustin Hibbits if_setinitfn(ifp, tsec_init); 25447842ecfSJustin Hibbits if_setstartfn(ifp, tsec_start); 25547842ecfSJustin Hibbits if_setioctlfn(ifp, tsec_ioctl); 256321e12c8SRafal Jaworowski 25747842ecfSJustin Hibbits if_setsendqlen(ifp, TSEC_TX_NUM_DESC - 1); 25847842ecfSJustin Hibbits if_setsendqready(ifp); 259321e12c8SRafal Jaworowski 26047842ecfSJustin Hibbits if_setcapabilities(ifp, IFCAP_VLAN_MTU); 261bd37530eSRafal Jaworowski if (sc->is_etsec) 26247842ecfSJustin Hibbits if_setcapabilitiesbit(ifp, IFCAP_HWCSUM, 0); 263bd37530eSRafal Jaworowski 26447842ecfSJustin Hibbits if_setcapenable(ifp, if_getcapabilities(ifp)); 265321e12c8SRafal Jaworowski 266bd37530eSRafal Jaworowski #ifdef DEVICE_POLLING 267bd37530eSRafal Jaworowski /* Advertise that polling is supported */ 26847842ecfSJustin Hibbits if_setcapabilitiesbit(ifp, IFCAP_POLLING, 0); 269bd37530eSRafal Jaworowski #endif 270bd37530eSRafal Jaworowski 2718e5d93dbSMarius Strobl /* Attach PHY(s) */ 2728e5d93dbSMarius Strobl error = mii_attach(sc->dev, &sc->tsec_miibus, ifp, tsec_ifmedia_upd, 2738e5d93dbSMarius Strobl tsec_ifmedia_sts, BMSR_DEFCAPMASK, sc->phyaddr, MII_OFFSET_ANY, 2748e5d93dbSMarius Strobl 0); 275321e12c8SRafal Jaworowski if (error) { 2768e5d93dbSMarius Strobl device_printf(sc->dev, "attaching PHYs failed\n"); 277321e12c8SRafal Jaworowski if_free(ifp); 278321e12c8SRafal Jaworowski sc->tsec_ifp = NULL; 279321e12c8SRafal Jaworowski tsec_detach(sc); 280321e12c8SRafal Jaworowski return (error); 281321e12c8SRafal Jaworowski } 282321e12c8SRafal Jaworowski sc->tsec_mii = device_get_softc(sc->tsec_miibus); 283321e12c8SRafal Jaworowski 284321e12c8SRafal Jaworowski /* Set MAC address */ 285321e12c8SRafal Jaworowski tsec_get_hwaddr(sc, hwaddr); 286321e12c8SRafal Jaworowski ether_ifattach(ifp, hwaddr); 287321e12c8SRafal Jaworowski 288321e12c8SRafal Jaworowski return (0); 289321e12c8SRafal Jaworowski } 290321e12c8SRafal Jaworowski 291321e12c8SRafal Jaworowski int 292321e12c8SRafal Jaworowski tsec_detach(struct tsec_softc *sc) 293321e12c8SRafal Jaworowski { 294321e12c8SRafal Jaworowski 29533518175SAndrew Thompson if (sc->tsec_ifp != NULL) { 296bd37530eSRafal Jaworowski #ifdef DEVICE_POLLING 297*d8b78838SJustin Hibbits if (if_getcapenable(sc->tsec_ifp) & IFCAP_POLLING) 298bd37530eSRafal Jaworowski ether_poll_deregister(sc->tsec_ifp); 299bd37530eSRafal Jaworowski #endif 300bd37530eSRafal Jaworowski 301321e12c8SRafal Jaworowski /* Stop TSEC controller and free TX queue */ 30233518175SAndrew Thompson if (sc->sc_rres) 303321e12c8SRafal Jaworowski tsec_shutdown(sc->dev); 304321e12c8SRafal Jaworowski 305321e12c8SRafal Jaworowski /* Detach network interface */ 306321e12c8SRafal Jaworowski ether_ifdetach(sc->tsec_ifp); 307321e12c8SRafal Jaworowski if_free(sc->tsec_ifp); 308321e12c8SRafal Jaworowski sc->tsec_ifp = NULL; 309321e12c8SRafal Jaworowski } 310321e12c8SRafal Jaworowski 311321e12c8SRafal Jaworowski /* Free DMA resources */ 312321e12c8SRafal Jaworowski tsec_free_dma(sc); 313321e12c8SRafal Jaworowski 314321e12c8SRafal Jaworowski return (0); 315321e12c8SRafal Jaworowski } 316321e12c8SRafal Jaworowski 317661ee6eeSRafal Jaworowski int 318321e12c8SRafal Jaworowski tsec_shutdown(device_t dev) 319321e12c8SRafal Jaworowski { 320321e12c8SRafal Jaworowski struct tsec_softc *sc; 321321e12c8SRafal Jaworowski 322321e12c8SRafal Jaworowski sc = device_get_softc(dev); 323321e12c8SRafal Jaworowski 324321e12c8SRafal Jaworowski TSEC_GLOBAL_LOCK(sc); 325321e12c8SRafal Jaworowski tsec_stop(sc); 326321e12c8SRafal Jaworowski TSEC_GLOBAL_UNLOCK(sc); 327661ee6eeSRafal Jaworowski return (0); 328321e12c8SRafal Jaworowski } 329321e12c8SRafal Jaworowski 330321e12c8SRafal Jaworowski int 331321e12c8SRafal Jaworowski tsec_suspend(device_t dev) 332321e12c8SRafal Jaworowski { 333321e12c8SRafal Jaworowski 334321e12c8SRafal Jaworowski /* TODO not implemented! */ 335321e12c8SRafal Jaworowski return (0); 336321e12c8SRafal Jaworowski } 337321e12c8SRafal Jaworowski 338321e12c8SRafal Jaworowski int 339321e12c8SRafal Jaworowski tsec_resume(device_t dev) 340321e12c8SRafal Jaworowski { 341321e12c8SRafal Jaworowski 342321e12c8SRafal Jaworowski /* TODO not implemented! */ 343321e12c8SRafal Jaworowski return (0); 34467196661SRafal Jaworowski } 34567196661SRafal Jaworowski 34667196661SRafal Jaworowski static void 34767196661SRafal Jaworowski tsec_init(void *xsc) 34867196661SRafal Jaworowski { 34967196661SRafal Jaworowski struct tsec_softc *sc = xsc; 35067196661SRafal Jaworowski 35167196661SRafal Jaworowski TSEC_GLOBAL_LOCK(sc); 35267196661SRafal Jaworowski tsec_init_locked(sc); 35367196661SRafal Jaworowski TSEC_GLOBAL_UNLOCK(sc); 35467196661SRafal Jaworowski } 35567196661SRafal Jaworowski 35688011b59SJustin Hibbits static int 35788011b59SJustin Hibbits tsec_mii_wait(struct tsec_softc *sc, uint32_t flags) 35888011b59SJustin Hibbits { 35988011b59SJustin Hibbits int timeout; 36088011b59SJustin Hibbits 36188011b59SJustin Hibbits /* 3625c0b62dcSGordon Bergling * The status indicators are not set immediately after a command. 36388011b59SJustin Hibbits * Discard the first value. 36488011b59SJustin Hibbits */ 36588011b59SJustin Hibbits TSEC_PHY_READ(sc, TSEC_REG_MIIMIND); 36688011b59SJustin Hibbits 36788011b59SJustin Hibbits timeout = TSEC_READ_RETRY; 36888011b59SJustin Hibbits while ((TSEC_PHY_READ(sc, TSEC_REG_MIIMIND) & flags) && --timeout) 36988011b59SJustin Hibbits DELAY(TSEC_READ_DELAY); 37088011b59SJustin Hibbits 37188011b59SJustin Hibbits return (timeout == 0); 37288011b59SJustin Hibbits } 37388011b59SJustin Hibbits 37467196661SRafal Jaworowski static void 37567196661SRafal Jaworowski tsec_init_locked(struct tsec_softc *sc) 37667196661SRafal Jaworowski { 37767196661SRafal Jaworowski struct tsec_desc *tx_desc = sc->tsec_tx_vaddr; 37867196661SRafal Jaworowski struct tsec_desc *rx_desc = sc->tsec_rx_vaddr; 37947842ecfSJustin Hibbits if_t ifp = sc->tsec_ifp; 38088011b59SJustin Hibbits uint32_t val, i; 38188011b59SJustin Hibbits int timeout; 38267196661SRafal Jaworowski 38347842ecfSJustin Hibbits if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) 384afceeed7SAndrew Thompson return; 385afceeed7SAndrew Thompson 38667196661SRafal Jaworowski TSEC_GLOBAL_LOCK_ASSERT(sc); 38767196661SRafal Jaworowski tsec_stop(sc); 38867196661SRafal Jaworowski 38967196661SRafal Jaworowski /* 39067196661SRafal Jaworowski * These steps are according to the MPC8555E PowerQUICCIII RM: 39167196661SRafal Jaworowski * 14.7 Initialization/Application Information 39267196661SRafal Jaworowski */ 39367196661SRafal Jaworowski 39467196661SRafal Jaworowski /* Step 1: soft reset MAC */ 39567196661SRafal Jaworowski tsec_reset_mac(sc); 39667196661SRafal Jaworowski 39767196661SRafal Jaworowski /* Step 2: Initialize MACCFG2 */ 39867196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MACCFG2, 39967196661SRafal Jaworowski TSEC_MACCFG2_FULLDUPLEX | /* Full Duplex = 1 */ 40067196661SRafal Jaworowski TSEC_MACCFG2_PADCRC | /* PAD/CRC append */ 40167196661SRafal Jaworowski TSEC_MACCFG2_GMII | /* I/F Mode bit */ 40267196661SRafal Jaworowski TSEC_MACCFG2_PRECNT /* Preamble count = 7 */ 40367196661SRafal Jaworowski ); 40467196661SRafal Jaworowski 40567196661SRafal Jaworowski /* Step 3: Initialize ECNTRL 40667196661SRafal Jaworowski * While the documentation states that R100M is ignored if RPM is 40767196661SRafal Jaworowski * not set, it does seem to be needed to get the orange boxes to 40867196661SRafal Jaworowski * work (which have a Marvell 88E1111 PHY). Go figure. 40967196661SRafal Jaworowski */ 41067196661SRafal Jaworowski 41167196661SRafal Jaworowski /* 41267196661SRafal Jaworowski * XXX kludge - use circumstancial evidence to program ECNTRL 41367196661SRafal Jaworowski * correctly. Ideally we need some board information to guide 41467196661SRafal Jaworowski * us here. 41567196661SRafal Jaworowski */ 41667196661SRafal Jaworowski i = TSEC_READ(sc, TSEC_REG_ID2); 41767196661SRafal Jaworowski val = (i & 0xffff) 41867196661SRafal Jaworowski ? (TSEC_ECNTRL_TBIM | TSEC_ECNTRL_SGMIIM) /* Sumatra */ 41967196661SRafal Jaworowski : TSEC_ECNTRL_R100M; /* Orange + CDS */ 42067196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_ECNTRL, TSEC_ECNTRL_STEN | val); 42167196661SRafal Jaworowski 42267196661SRafal Jaworowski /* Step 4: Initialize MAC station address */ 42367196661SRafal Jaworowski tsec_set_mac_address(sc); 42467196661SRafal Jaworowski 42567196661SRafal Jaworowski /* 42667196661SRafal Jaworowski * Step 5: Assign a Physical address to the TBI so as to not conflict 42767196661SRafal Jaworowski * with the external PHY physical address 42867196661SRafal Jaworowski */ 42967196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_TBIPA, 5); 43067196661SRafal Jaworowski 431629aa519SNathan Whitehorn TSEC_PHY_LOCK(sc); 432629aa519SNathan Whitehorn 43367196661SRafal Jaworowski /* Step 6: Reset the management interface */ 434629aa519SNathan Whitehorn TSEC_PHY_WRITE(sc, TSEC_REG_MIIMCFG, TSEC_MIIMCFG_RESETMGMT); 43567196661SRafal Jaworowski 43667196661SRafal Jaworowski /* Step 7: Setup the MII Mgmt clock speed */ 437629aa519SNathan Whitehorn TSEC_PHY_WRITE(sc, TSEC_REG_MIIMCFG, TSEC_MIIMCFG_CLKDIV28); 43867196661SRafal Jaworowski 43967196661SRafal Jaworowski /* Step 8: Read MII Mgmt indicator register and check for Busy = 0 */ 44088011b59SJustin Hibbits timeout = tsec_mii_wait(sc, TSEC_MIIMIND_BUSY); 44188011b59SJustin Hibbits 44288011b59SJustin Hibbits TSEC_PHY_UNLOCK(sc); 44388011b59SJustin Hibbits if (timeout) { 44467196661SRafal Jaworowski if_printf(ifp, "tsec_init_locked(): Mgmt busy timeout\n"); 44567196661SRafal Jaworowski return; 44667196661SRafal Jaworowski } 44767196661SRafal Jaworowski 44867196661SRafal Jaworowski /* Step 9: Setup the MII Mgmt */ 44967196661SRafal Jaworowski mii_mediachg(sc->tsec_mii); 45067196661SRafal Jaworowski 45167196661SRafal Jaworowski /* Step 10: Clear IEVENT register */ 45267196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_IEVENT, 0xffffffff); 45367196661SRafal Jaworowski 454bd37530eSRafal Jaworowski /* Step 11: Enable interrupts */ 455bd37530eSRafal Jaworowski #ifdef DEVICE_POLLING 456bd37530eSRafal Jaworowski /* 457bd37530eSRafal Jaworowski * ...only if polling is not turned on. Disable interrupts explicitly 458bd37530eSRafal Jaworowski * if polling is enabled. 459bd37530eSRafal Jaworowski */ 46047842ecfSJustin Hibbits if (if_getcapenable(ifp) & IFCAP_POLLING ) 461bd37530eSRafal Jaworowski tsec_intrs_ctl(sc, 0); 462bd37530eSRafal Jaworowski else 463bd37530eSRafal Jaworowski #endif /* DEVICE_POLLING */ 46467196661SRafal Jaworowski tsec_intrs_ctl(sc, 1); 46567196661SRafal Jaworowski 46667196661SRafal Jaworowski /* Step 12: Initialize IADDRn */ 46767196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_IADDR0, 0); 46867196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_IADDR1, 0); 46967196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_IADDR2, 0); 47067196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_IADDR3, 0); 47167196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_IADDR4, 0); 47267196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_IADDR5, 0); 47367196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_IADDR6, 0); 47467196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_IADDR7, 0); 47567196661SRafal Jaworowski 47667196661SRafal Jaworowski /* Step 13: Initialize GADDRn */ 47767196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_GADDR0, 0); 47867196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_GADDR1, 0); 47967196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_GADDR2, 0); 48067196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_GADDR3, 0); 48167196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_GADDR4, 0); 48267196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_GADDR5, 0); 48367196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_GADDR6, 0); 48467196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_GADDR7, 0); 48567196661SRafal Jaworowski 48667196661SRafal Jaworowski /* Step 14: Initialize RCTRL */ 48767196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_RCTRL, 0); 48867196661SRafal Jaworowski 48967196661SRafal Jaworowski /* Step 15: Initialize DMACTRL */ 49067196661SRafal Jaworowski tsec_dma_ctl(sc, 1); 49167196661SRafal Jaworowski 49267196661SRafal Jaworowski /* Step 16: Initialize FIFO_PAUSE_CTRL */ 49367196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_FIFO_PAUSE_CTRL, TSEC_FIFO_PAUSE_CTRL_EN); 49467196661SRafal Jaworowski 49567196661SRafal Jaworowski /* 49667196661SRafal Jaworowski * Step 17: Initialize transmit/receive descriptor rings. 49767196661SRafal Jaworowski * Initialize TBASE and RBASE. 49867196661SRafal Jaworowski */ 49967196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_TBASE, sc->tsec_tx_raddr); 50067196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_RBASE, sc->tsec_rx_raddr); 50167196661SRafal Jaworowski 50267196661SRafal Jaworowski for (i = 0; i < TSEC_TX_NUM_DESC; i++) { 50367196661SRafal Jaworowski tx_desc[i].bufptr = 0; 50467196661SRafal Jaworowski tx_desc[i].length = 0; 50564f90c9dSRafal Jaworowski tx_desc[i].flags = ((i == TSEC_TX_NUM_DESC - 1) ? 50664f90c9dSRafal Jaworowski TSEC_TXBD_W : 0); 50767196661SRafal Jaworowski } 508321e12c8SRafal Jaworowski bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap, 509321e12c8SRafal Jaworowski BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 51067196661SRafal Jaworowski 51167196661SRafal Jaworowski for (i = 0; i < TSEC_RX_NUM_DESC; i++) { 51267196661SRafal Jaworowski rx_desc[i].bufptr = sc->rx_data[i].paddr; 51367196661SRafal Jaworowski rx_desc[i].length = 0; 51467196661SRafal Jaworowski rx_desc[i].flags = TSEC_RXBD_E | TSEC_RXBD_I | 51567196661SRafal Jaworowski ((i == TSEC_RX_NUM_DESC - 1) ? TSEC_RXBD_W : 0); 51667196661SRafal Jaworowski } 517bd37530eSRafal Jaworowski bus_dmamap_sync(sc->tsec_rx_dtag, sc->tsec_rx_dmap, 518bd37530eSRafal Jaworowski BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 51967196661SRafal Jaworowski 520bd37530eSRafal Jaworowski /* Step 18: Initialize the maximum receive buffer length */ 521bd37530eSRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MRBLR, MCLBYTES); 52267196661SRafal Jaworowski 523bd37530eSRafal Jaworowski /* Step 19: Configure ethernet frame sizes */ 524bd37530eSRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MINFLR, TSEC_MIN_FRAME_SIZE); 52547842ecfSJustin Hibbits tsec_set_mtu(sc, if_getmtu(ifp)); 526bd37530eSRafal Jaworowski 527bd37530eSRafal Jaworowski /* Step 20: Enable Rx and RxBD sdata snooping */ 52867196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_ATTR, TSEC_ATTR_RDSEN | TSEC_ATTR_RBDSEN); 52967196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_ATTRELI, 0); 53067196661SRafal Jaworowski 531bd37530eSRafal Jaworowski /* Step 21: Reset collision counters in hardware */ 53267196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MON_TSCL, 0); 53367196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MON_TMCL, 0); 53467196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MON_TLCL, 0); 53567196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MON_TXCL, 0); 53667196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MON_TNCL, 0); 53767196661SRafal Jaworowski 538bd37530eSRafal Jaworowski /* Step 22: Mask all CAM interrupts */ 53967196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MON_CAM1, 0xffffffff); 54067196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MON_CAM2, 0xffffffff); 54167196661SRafal Jaworowski 542bd37530eSRafal Jaworowski /* Step 23: Enable Rx and Tx */ 54367196661SRafal Jaworowski val = TSEC_READ(sc, TSEC_REG_MACCFG1); 54467196661SRafal Jaworowski val |= (TSEC_MACCFG1_RX_EN | TSEC_MACCFG1_TX_EN); 54567196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MACCFG1, val); 54667196661SRafal Jaworowski 547bd37530eSRafal Jaworowski /* Step 24: Reset TSEC counters for Tx and Rx rings */ 54867196661SRafal Jaworowski TSEC_TX_RX_COUNTERS_INIT(sc); 54967196661SRafal Jaworowski 550bd37530eSRafal Jaworowski /* Step 25: Setup TCP/IP Off-Load engine */ 551bd37530eSRafal Jaworowski if (sc->is_etsec) 552bd37530eSRafal Jaworowski tsec_offload_setup(sc); 553bd37530eSRafal Jaworowski 554bd37530eSRafal Jaworowski /* Step 26: Setup multicast filters */ 555bd37530eSRafal Jaworowski tsec_setup_multicast(sc); 556bd37530eSRafal Jaworowski 557bd37530eSRafal Jaworowski /* Step 27: Activate network interface */ 55847842ecfSJustin Hibbits if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0); 55947842ecfSJustin Hibbits if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 56047842ecfSJustin Hibbits sc->tsec_if_flags = if_getflags(ifp); 5615432bd9fSRafal Jaworowski sc->tsec_watchdog = 0; 562772619e1SRafal Jaworowski 563772619e1SRafal Jaworowski /* Schedule watchdog timeout */ 5645432bd9fSRafal Jaworowski callout_reset(&sc->tsec_callout, hz, tsec_tick, sc); 56567196661SRafal Jaworowski } 56667196661SRafal Jaworowski 56767196661SRafal Jaworowski static void 56867196661SRafal Jaworowski tsec_set_mac_address(struct tsec_softc *sc) 56967196661SRafal Jaworowski { 57067196661SRafal Jaworowski uint32_t macbuf[2] = { 0, 0 }; 57164f90c9dSRafal Jaworowski char *macbufp, *curmac; 572321e12c8SRafal Jaworowski int i; 57367196661SRafal Jaworowski 57467196661SRafal Jaworowski TSEC_GLOBAL_LOCK_ASSERT(sc); 57567196661SRafal Jaworowski 57667196661SRafal Jaworowski KASSERT((ETHER_ADDR_LEN <= sizeof(macbuf)), 57738f004fbSJustin Hibbits ("tsec_set_mac_address: (%d <= %zd", ETHER_ADDR_LEN, 57864f90c9dSRafal Jaworowski sizeof(macbuf))); 57967196661SRafal Jaworowski 58067196661SRafal Jaworowski macbufp = (char *)macbuf; 58147842ecfSJustin Hibbits curmac = (char *)if_getlladdr(sc->tsec_ifp); 58267196661SRafal Jaworowski 58367196661SRafal Jaworowski /* Correct order of MAC address bytes */ 58467196661SRafal Jaworowski for (i = 1; i <= ETHER_ADDR_LEN; i++) 58567196661SRafal Jaworowski macbufp[ETHER_ADDR_LEN-i] = curmac[i-1]; 58667196661SRafal Jaworowski 58767196661SRafal Jaworowski /* Initialize MAC station address MACSTNADDR2 and MACSTNADDR1 */ 58867196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MACSTNADDR2, macbuf[1]); 58967196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MACSTNADDR1, macbuf[0]); 59067196661SRafal Jaworowski } 59167196661SRafal Jaworowski 59267196661SRafal Jaworowski /* 59367196661SRafal Jaworowski * DMA control function, if argument state is: 59467196661SRafal Jaworowski * 0 - DMA engine will be disabled 59567196661SRafal Jaworowski * 1 - DMA engine will be enabled 59667196661SRafal Jaworowski */ 59767196661SRafal Jaworowski static void 59867196661SRafal Jaworowski tsec_dma_ctl(struct tsec_softc *sc, int state) 59967196661SRafal Jaworowski { 60067196661SRafal Jaworowski device_t dev; 60164f90c9dSRafal Jaworowski uint32_t dma_flags, timeout; 60267196661SRafal Jaworowski 60367196661SRafal Jaworowski dev = sc->dev; 60467196661SRafal Jaworowski 60567196661SRafal Jaworowski dma_flags = TSEC_READ(sc, TSEC_REG_DMACTRL); 60667196661SRafal Jaworowski 60767196661SRafal Jaworowski switch (state) { 60867196661SRafal Jaworowski case 0: 60967196661SRafal Jaworowski /* Temporarily clear stop graceful stop bits. */ 61067196661SRafal Jaworowski tsec_dma_ctl(sc, 1000); 61167196661SRafal Jaworowski 61267196661SRafal Jaworowski /* Set it again */ 61367196661SRafal Jaworowski dma_flags |= (TSEC_DMACTRL_GRS | TSEC_DMACTRL_GTS); 61467196661SRafal Jaworowski break; 61567196661SRafal Jaworowski case 1000: 61667196661SRafal Jaworowski case 1: 61767196661SRafal Jaworowski /* Set write with response (WWR), wait (WOP) and snoop bits */ 61867196661SRafal Jaworowski dma_flags |= (TSEC_DMACTRL_TDSEN | TSEC_DMACTRL_TBDSEN | 61967196661SRafal Jaworowski DMACTRL_WWR | DMACTRL_WOP); 62067196661SRafal Jaworowski 62167196661SRafal Jaworowski /* Clear graceful stop bits */ 62267196661SRafal Jaworowski dma_flags &= ~(TSEC_DMACTRL_GRS | TSEC_DMACTRL_GTS); 62367196661SRafal Jaworowski break; 62467196661SRafal Jaworowski default: 62567196661SRafal Jaworowski device_printf(dev, "tsec_dma_ctl(): unknown state value: %d\n", 62667196661SRafal Jaworowski state); 62767196661SRafal Jaworowski } 62867196661SRafal Jaworowski 62967196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_DMACTRL, dma_flags); 63067196661SRafal Jaworowski 63167196661SRafal Jaworowski switch (state) { 63267196661SRafal Jaworowski case 0: 63367196661SRafal Jaworowski /* Wait for DMA stop */ 63467196661SRafal Jaworowski timeout = TSEC_READ_RETRY; 63567196661SRafal Jaworowski while (--timeout && (!(TSEC_READ(sc, TSEC_REG_IEVENT) & 63667196661SRafal Jaworowski (TSEC_IEVENT_GRSC | TSEC_IEVENT_GTSC)))) 63767196661SRafal Jaworowski DELAY(TSEC_READ_DELAY); 63867196661SRafal Jaworowski 63967196661SRafal Jaworowski if (timeout == 0) 64067196661SRafal Jaworowski device_printf(dev, "tsec_dma_ctl(): timeout!\n"); 64167196661SRafal Jaworowski break; 64267196661SRafal Jaworowski case 1: 64367196661SRafal Jaworowski /* Restart transmission function */ 64467196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_TSTAT, TSEC_TSTAT_THLT); 64567196661SRafal Jaworowski } 64667196661SRafal Jaworowski } 64767196661SRafal Jaworowski 64867196661SRafal Jaworowski /* 64967196661SRafal Jaworowski * Interrupts control function, if argument state is: 65067196661SRafal Jaworowski * 0 - all TSEC interrupts will be masked 65167196661SRafal Jaworowski * 1 - all TSEC interrupts will be unmasked 65267196661SRafal Jaworowski */ 65367196661SRafal Jaworowski static void 65467196661SRafal Jaworowski tsec_intrs_ctl(struct tsec_softc *sc, int state) 65567196661SRafal Jaworowski { 65667196661SRafal Jaworowski device_t dev; 65767196661SRafal Jaworowski 65867196661SRafal Jaworowski dev = sc->dev; 65967196661SRafal Jaworowski 66067196661SRafal Jaworowski switch (state) { 66167196661SRafal Jaworowski case 0: 66267196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_IMASK, 0); 66367196661SRafal Jaworowski break; 66467196661SRafal Jaworowski case 1: 66564f90c9dSRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_IMASK, TSEC_IMASK_BREN | 66664f90c9dSRafal Jaworowski TSEC_IMASK_RXCEN | TSEC_IMASK_BSYEN | TSEC_IMASK_EBERREN | 66764f90c9dSRafal Jaworowski TSEC_IMASK_BTEN | TSEC_IMASK_TXEEN | TSEC_IMASK_TXBEN | 66864f90c9dSRafal Jaworowski TSEC_IMASK_TXFEN | TSEC_IMASK_XFUNEN | TSEC_IMASK_RXFEN); 66967196661SRafal Jaworowski break; 67067196661SRafal Jaworowski default: 67167196661SRafal Jaworowski device_printf(dev, "tsec_intrs_ctl(): unknown state value: %d\n", 67267196661SRafal Jaworowski state); 67367196661SRafal Jaworowski } 67467196661SRafal Jaworowski } 67567196661SRafal Jaworowski 67667196661SRafal Jaworowski static void 67767196661SRafal Jaworowski tsec_reset_mac(struct tsec_softc *sc) 67867196661SRafal Jaworowski { 67967196661SRafal Jaworowski uint32_t maccfg1_flags; 68067196661SRafal Jaworowski 68167196661SRafal Jaworowski /* Set soft reset bit */ 68267196661SRafal Jaworowski maccfg1_flags = TSEC_READ(sc, TSEC_REG_MACCFG1); 68367196661SRafal Jaworowski maccfg1_flags |= TSEC_MACCFG1_SOFT_RESET; 68467196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MACCFG1, maccfg1_flags); 68567196661SRafal Jaworowski 68667196661SRafal Jaworowski /* Clear soft reset bit */ 68767196661SRafal Jaworowski maccfg1_flags = TSEC_READ(sc, TSEC_REG_MACCFG1); 68867196661SRafal Jaworowski maccfg1_flags &= ~TSEC_MACCFG1_SOFT_RESET; 68967196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MACCFG1, maccfg1_flags); 69067196661SRafal Jaworowski } 69167196661SRafal Jaworowski 69267196661SRafal Jaworowski static void 693772619e1SRafal Jaworowski tsec_watchdog(struct tsec_softc *sc) 69467196661SRafal Jaworowski { 69547842ecfSJustin Hibbits if_t ifp; 69667196661SRafal Jaworowski 697772619e1SRafal Jaworowski TSEC_GLOBAL_LOCK_ASSERT(sc); 69867196661SRafal Jaworowski 6995432bd9fSRafal Jaworowski if (sc->tsec_watchdog == 0 || --sc->tsec_watchdog > 0) 700772619e1SRafal Jaworowski return; 701772619e1SRafal Jaworowski 702772619e1SRafal Jaworowski ifp = sc->tsec_ifp; 703c8dfaf38SGleb Smirnoff if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 70467196661SRafal Jaworowski if_printf(ifp, "watchdog timeout\n"); 70567196661SRafal Jaworowski 70667196661SRafal Jaworowski tsec_stop(sc); 70767196661SRafal Jaworowski tsec_init_locked(sc); 70867196661SRafal Jaworowski } 70967196661SRafal Jaworowski 71067196661SRafal Jaworowski static void 71147842ecfSJustin Hibbits tsec_start(if_t ifp) 71267196661SRafal Jaworowski { 71347842ecfSJustin Hibbits struct tsec_softc *sc = if_getsoftc(ifp); 71467196661SRafal Jaworowski 71567196661SRafal Jaworowski TSEC_TRANSMIT_LOCK(sc); 71667196661SRafal Jaworowski tsec_start_locked(ifp); 71767196661SRafal Jaworowski TSEC_TRANSMIT_UNLOCK(sc); 71867196661SRafal Jaworowski } 71967196661SRafal Jaworowski 72067196661SRafal Jaworowski static void 72147842ecfSJustin Hibbits tsec_start_locked(if_t ifp) 72267196661SRafal Jaworowski { 72367196661SRafal Jaworowski struct tsec_softc *sc; 7242c0dbbcbSJustin Hibbits struct mbuf *m0; 725bd37530eSRafal Jaworowski struct tsec_tx_fcb *tx_fcb; 7262c0dbbcbSJustin Hibbits int csum_flags; 7272c0dbbcbSJustin Hibbits int start_tx; 7282c0dbbcbSJustin Hibbits uint16_t fcb_flags; 72967196661SRafal Jaworowski 73047842ecfSJustin Hibbits sc = if_getsoftc(ifp); 7312c0dbbcbSJustin Hibbits start_tx = 0; 73267196661SRafal Jaworowski 73367196661SRafal Jaworowski TSEC_TRANSMIT_LOCK_ASSERT(sc); 73467196661SRafal Jaworowski 73567196661SRafal Jaworowski if (sc->tsec_link == 0) 73667196661SRafal Jaworowski return; 73767196661SRafal Jaworowski 73864f90c9dSRafal Jaworowski bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap, 73964f90c9dSRafal Jaworowski BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 74067196661SRafal Jaworowski 7412c0dbbcbSJustin Hibbits for (;;) { 7422c0dbbcbSJustin Hibbits if (TSEC_FREE_TX_DESC(sc) < TSEC_TX_MAX_DMA_SEGS) { 7432c0dbbcbSJustin Hibbits /* No free descriptors */ 74447842ecfSJustin Hibbits if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 7452c0dbbcbSJustin Hibbits break; 7462c0dbbcbSJustin Hibbits } 7472c0dbbcbSJustin Hibbits 74867196661SRafal Jaworowski /* Get packet from the queue */ 74947842ecfSJustin Hibbits m0 = if_dequeue(ifp); 75067196661SRafal Jaworowski if (m0 == NULL) 75167196661SRafal Jaworowski break; 75267196661SRafal Jaworowski 753bd37530eSRafal Jaworowski /* Insert TCP/IP Off-load frame control block */ 7542c0dbbcbSJustin Hibbits fcb_flags = 0; 755bd37530eSRafal Jaworowski csum_flags = m0->m_pkthdr.csum_flags; 756bd37530eSRafal Jaworowski if (csum_flags) { 757c6499eccSGleb Smirnoff M_PREPEND(m0, sizeof(struct tsec_tx_fcb), M_NOWAIT); 758bd37530eSRafal Jaworowski if (m0 == NULL) 759bd37530eSRafal Jaworowski break; 760bd37530eSRafal Jaworowski 761bd37530eSRafal Jaworowski if (csum_flags & CSUM_IP) 7622c0dbbcbSJustin Hibbits fcb_flags |= TSEC_TX_FCB_IP4 | 763bd37530eSRafal Jaworowski TSEC_TX_FCB_CSUM_IP; 764bd37530eSRafal Jaworowski 765bd37530eSRafal Jaworowski if (csum_flags & CSUM_TCP) 7662c0dbbcbSJustin Hibbits fcb_flags |= TSEC_TX_FCB_TCP | 767bd37530eSRafal Jaworowski TSEC_TX_FCB_CSUM_TCP_UDP; 768bd37530eSRafal Jaworowski 769bd37530eSRafal Jaworowski if (csum_flags & CSUM_UDP) 7702c0dbbcbSJustin Hibbits fcb_flags |= TSEC_TX_FCB_UDP | 771bd37530eSRafal Jaworowski TSEC_TX_FCB_CSUM_TCP_UDP; 772bd37530eSRafal Jaworowski 7732c0dbbcbSJustin Hibbits tx_fcb = mtod(m0, struct tsec_tx_fcb *); 7742c0dbbcbSJustin Hibbits tx_fcb->flags = fcb_flags; 7752c0dbbcbSJustin Hibbits tx_fcb->l3_offset = ETHER_HDR_LEN; 7762c0dbbcbSJustin Hibbits tx_fcb->l4_offset = sizeof(struct ip); 777bd37530eSRafal Jaworowski } 778bd37530eSRafal Jaworowski 7792c0dbbcbSJustin Hibbits tsec_encap(ifp, sc, m0, fcb_flags, &start_tx); 78067196661SRafal Jaworowski } 78164f90c9dSRafal Jaworowski bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap, 78264f90c9dSRafal Jaworowski BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 78367196661SRafal Jaworowski 7842c0dbbcbSJustin Hibbits if (start_tx) { 78567196661SRafal Jaworowski /* Enable transmitter and watchdog timer */ 78667196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_TSTAT, TSEC_TSTAT_THLT); 7875432bd9fSRafal Jaworowski sc->tsec_watchdog = 5; 78867196661SRafal Jaworowski } 78967196661SRafal Jaworowski } 79067196661SRafal Jaworowski 7912c0dbbcbSJustin Hibbits static void 79247842ecfSJustin Hibbits tsec_encap(if_t ifp, struct tsec_softc *sc, struct mbuf *m0, 7932c0dbbcbSJustin Hibbits uint16_t fcb_flags, int *start_tx) 79467196661SRafal Jaworowski { 7952c0dbbcbSJustin Hibbits bus_dma_segment_t segs[TSEC_TX_MAX_DMA_SEGS]; 7962c0dbbcbSJustin Hibbits int error, i, nsegs; 7972c0dbbcbSJustin Hibbits struct tsec_bufmap *tx_bufmap; 7982c0dbbcbSJustin Hibbits uint32_t tx_idx; 7992c0dbbcbSJustin Hibbits uint16_t flags; 80067196661SRafal Jaworowski 80167196661SRafal Jaworowski TSEC_TRANSMIT_LOCK_ASSERT(sc); 80267196661SRafal Jaworowski 8032c0dbbcbSJustin Hibbits tx_idx = sc->tx_idx_head; 8042c0dbbcbSJustin Hibbits tx_bufmap = &sc->tx_bufmap[tx_idx]; 80567196661SRafal Jaworowski 80667196661SRafal Jaworowski /* Create mapping in DMA memory */ 8072c0dbbcbSJustin Hibbits error = bus_dmamap_load_mbuf_sg(sc->tsec_tx_mtag, tx_bufmap->map, m0, 8082c0dbbcbSJustin Hibbits segs, &nsegs, BUS_DMA_NOWAIT); 8092c0dbbcbSJustin Hibbits if (error == EFBIG) { 8102c0dbbcbSJustin Hibbits /* Too many segments! Defrag and try again. */ 8112c0dbbcbSJustin Hibbits struct mbuf *m = m_defrag(m0, M_NOWAIT); 8122c0dbbcbSJustin Hibbits 8132c0dbbcbSJustin Hibbits if (m == NULL) { 8142c0dbbcbSJustin Hibbits m_freem(m0); 8152c0dbbcbSJustin Hibbits return; 81667196661SRafal Jaworowski } 8172c0dbbcbSJustin Hibbits m0 = m; 8182c0dbbcbSJustin Hibbits error = bus_dmamap_load_mbuf_sg(sc->tsec_tx_mtag, 8192c0dbbcbSJustin Hibbits tx_bufmap->map, m0, segs, &nsegs, BUS_DMA_NOWAIT); 8202c0dbbcbSJustin Hibbits } 8212c0dbbcbSJustin Hibbits if (error != 0) { 8222c0dbbcbSJustin Hibbits /* Give up. */ 8232c0dbbcbSJustin Hibbits m_freem(m0); 8242c0dbbcbSJustin Hibbits return; 8252c0dbbcbSJustin Hibbits } 82667196661SRafal Jaworowski 8272c0dbbcbSJustin Hibbits bus_dmamap_sync(sc->tsec_tx_mtag, tx_bufmap->map, 8282c0dbbcbSJustin Hibbits BUS_DMASYNC_PREWRITE); 8292c0dbbcbSJustin Hibbits tx_bufmap->mbuf = m0; 83067196661SRafal Jaworowski 8312c0dbbcbSJustin Hibbits /* 8322c0dbbcbSJustin Hibbits * Fill in the TX descriptors back to front so that READY bit in first 8332c0dbbcbSJustin Hibbits * descriptor is set last. 8342c0dbbcbSJustin Hibbits */ 8352c0dbbcbSJustin Hibbits tx_idx = (tx_idx + (uint32_t)nsegs) & (TSEC_TX_NUM_DESC - 1); 8362c0dbbcbSJustin Hibbits sc->tx_idx_head = tx_idx; 8372c0dbbcbSJustin Hibbits flags = TSEC_TXBD_L | TSEC_TXBD_I | TSEC_TXBD_R | TSEC_TXBD_TC; 8382c0dbbcbSJustin Hibbits for (i = nsegs - 1; i >= 0; i--) { 8392c0dbbcbSJustin Hibbits struct tsec_desc *tx_desc; 840bd37530eSRafal Jaworowski 8412c0dbbcbSJustin Hibbits tx_idx = (tx_idx - 1) & (TSEC_TX_NUM_DESC - 1); 8422c0dbbcbSJustin Hibbits tx_desc = &sc->tsec_tx_vaddr[tx_idx]; 8432c0dbbcbSJustin Hibbits tx_desc->length = segs[i].ds_len; 8442c0dbbcbSJustin Hibbits tx_desc->bufptr = segs[i].ds_addr; 84567196661SRafal Jaworowski 8462c0dbbcbSJustin Hibbits if (i == 0) { 8472c0dbbcbSJustin Hibbits wmb(); 8482c0dbbcbSJustin Hibbits 8492c0dbbcbSJustin Hibbits if (fcb_flags != 0) 8502c0dbbcbSJustin Hibbits flags |= TSEC_TXBD_TOE; 8512c0dbbcbSJustin Hibbits } 85267196661SRafal Jaworowski 853bd37530eSRafal Jaworowski /* 854bd37530eSRafal Jaworowski * Set flags: 855bd37530eSRafal Jaworowski * - wrap 856bd37530eSRafal Jaworowski * - checksum 857bd37530eSRafal Jaworowski * - ready to send 858bd37530eSRafal Jaworowski * - transmit the CRC sequence after the last data byte 859bd37530eSRafal Jaworowski * - interrupt after the last buffer 860bd37530eSRafal Jaworowski */ 8612c0dbbcbSJustin Hibbits tx_desc->flags = (tx_idx == (TSEC_TX_NUM_DESC - 1) ? 8622c0dbbcbSJustin Hibbits TSEC_TXBD_W : 0) | flags; 8632c0dbbcbSJustin Hibbits 8642c0dbbcbSJustin Hibbits flags &= ~(TSEC_TXBD_L | TSEC_TXBD_I); 86567196661SRafal Jaworowski } 86667196661SRafal Jaworowski 8672c0dbbcbSJustin Hibbits BPF_MTAP(ifp, m0); 8682c0dbbcbSJustin Hibbits *start_tx = 1; 86967196661SRafal Jaworowski } 87067196661SRafal Jaworowski 87167196661SRafal Jaworowski static void 87267196661SRafal Jaworowski tsec_setfilter(struct tsec_softc *sc) 87367196661SRafal Jaworowski { 87447842ecfSJustin Hibbits if_t ifp; 87567196661SRafal Jaworowski uint32_t flags; 87667196661SRafal Jaworowski 87767196661SRafal Jaworowski ifp = sc->tsec_ifp; 87867196661SRafal Jaworowski flags = TSEC_READ(sc, TSEC_REG_RCTRL); 87967196661SRafal Jaworowski 88067196661SRafal Jaworowski /* Promiscuous mode */ 88147842ecfSJustin Hibbits if (if_getflags(ifp) & IFF_PROMISC) 88267196661SRafal Jaworowski flags |= TSEC_RCTRL_PROM; 88367196661SRafal Jaworowski else 88467196661SRafal Jaworowski flags &= ~TSEC_RCTRL_PROM; 88567196661SRafal Jaworowski 88667196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_RCTRL, flags); 88767196661SRafal Jaworowski } 88867196661SRafal Jaworowski 889bd37530eSRafal Jaworowski #ifdef DEVICE_POLLING 890bd37530eSRafal Jaworowski static poll_handler_t tsec_poll; 891bd37530eSRafal Jaworowski 8921abcdbd1SAttilio Rao static int 89347842ecfSJustin Hibbits tsec_poll(if_t ifp, enum poll_cmd cmd, int count) 894bd37530eSRafal Jaworowski { 895bd37530eSRafal Jaworowski uint32_t ie; 89647842ecfSJustin Hibbits struct tsec_softc *sc = if_getsoftc(ifp); 8971abcdbd1SAttilio Rao int rx_npkts; 8981abcdbd1SAttilio Rao 8991abcdbd1SAttilio Rao rx_npkts = 0; 900bd37530eSRafal Jaworowski 901bd37530eSRafal Jaworowski TSEC_GLOBAL_LOCK(sc); 90247842ecfSJustin Hibbits if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) { 903bd37530eSRafal Jaworowski TSEC_GLOBAL_UNLOCK(sc); 9041abcdbd1SAttilio Rao return (rx_npkts); 905bd37530eSRafal Jaworowski } 906bd37530eSRafal Jaworowski 907bd37530eSRafal Jaworowski if (cmd == POLL_AND_CHECK_STATUS) { 9080390701aSRafal Jaworowski tsec_error_intr_locked(sc, count); 909bd37530eSRafal Jaworowski 910bd37530eSRafal Jaworowski /* Clear all events reported */ 9110390701aSRafal Jaworowski ie = TSEC_READ(sc, TSEC_REG_IEVENT); 912bd37530eSRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_IEVENT, ie); 913bd37530eSRafal Jaworowski } 914bd37530eSRafal Jaworowski 915bd37530eSRafal Jaworowski tsec_transmit_intr_locked(sc); 916bd37530eSRafal Jaworowski 917bd37530eSRafal Jaworowski TSEC_GLOBAL_TO_RECEIVE_LOCK(sc); 918bd37530eSRafal Jaworowski 9191abcdbd1SAttilio Rao rx_npkts = tsec_receive_intr_locked(sc, count); 920bd37530eSRafal Jaworowski 921bd37530eSRafal Jaworowski TSEC_RECEIVE_UNLOCK(sc); 9221abcdbd1SAttilio Rao 9231abcdbd1SAttilio Rao return (rx_npkts); 924bd37530eSRafal Jaworowski } 925bd37530eSRafal Jaworowski #endif /* DEVICE_POLLING */ 926bd37530eSRafal Jaworowski 92767196661SRafal Jaworowski static int 92847842ecfSJustin Hibbits tsec_ioctl(if_t ifp, u_long command, caddr_t data) 92967196661SRafal Jaworowski { 93047842ecfSJustin Hibbits struct tsec_softc *sc = if_getsoftc(ifp); 93167196661SRafal Jaworowski struct ifreq *ifr = (struct ifreq *)data; 932bd37530eSRafal Jaworowski int mask, error = 0; 93367196661SRafal Jaworowski 93467196661SRafal Jaworowski switch (command) { 935bd37530eSRafal Jaworowski case SIOCSIFMTU: 936bd37530eSRafal Jaworowski TSEC_GLOBAL_LOCK(sc); 937bd37530eSRafal Jaworowski if (tsec_set_mtu(sc, ifr->ifr_mtu)) 93847842ecfSJustin Hibbits if_setmtu(ifp, ifr->ifr_mtu); 939bd37530eSRafal Jaworowski else 940bd37530eSRafal Jaworowski error = EINVAL; 941bd37530eSRafal Jaworowski TSEC_GLOBAL_UNLOCK(sc); 942bd37530eSRafal Jaworowski break; 94367196661SRafal Jaworowski case SIOCSIFFLAGS: 94467196661SRafal Jaworowski TSEC_GLOBAL_LOCK(sc); 94547842ecfSJustin Hibbits if (if_getflags(ifp) & IFF_UP) { 94647842ecfSJustin Hibbits if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 94747842ecfSJustin Hibbits if ((sc->tsec_if_flags ^ if_getflags(ifp)) & 948bd37530eSRafal Jaworowski IFF_PROMISC) 94967196661SRafal Jaworowski tsec_setfilter(sc); 950bd37530eSRafal Jaworowski 95147842ecfSJustin Hibbits if ((sc->tsec_if_flags ^ if_getflags(ifp)) & 952bd37530eSRafal Jaworowski IFF_ALLMULTI) 953bd37530eSRafal Jaworowski tsec_setup_multicast(sc); 95467196661SRafal Jaworowski } else 95567196661SRafal Jaworowski tsec_init_locked(sc); 95647842ecfSJustin Hibbits } else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) 95767196661SRafal Jaworowski tsec_stop(sc); 958321e12c8SRafal Jaworowski 95947842ecfSJustin Hibbits sc->tsec_if_flags = if_getflags(ifp); 96067196661SRafal Jaworowski TSEC_GLOBAL_UNLOCK(sc); 96167196661SRafal Jaworowski break; 962bd37530eSRafal Jaworowski case SIOCADDMULTI: 963bd37530eSRafal Jaworowski case SIOCDELMULTI: 96447842ecfSJustin Hibbits if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 965bd37530eSRafal Jaworowski TSEC_GLOBAL_LOCK(sc); 966bd37530eSRafal Jaworowski tsec_setup_multicast(sc); 967bd37530eSRafal Jaworowski TSEC_GLOBAL_UNLOCK(sc); 968bd37530eSRafal Jaworowski } 96967196661SRafal Jaworowski case SIOCGIFMEDIA: 97067196661SRafal Jaworowski case SIOCSIFMEDIA: 971bd37530eSRafal Jaworowski error = ifmedia_ioctl(ifp, ifr, &sc->tsec_mii->mii_media, 972bd37530eSRafal Jaworowski command); 97367196661SRafal Jaworowski break; 974bd37530eSRafal Jaworowski case SIOCSIFCAP: 97547842ecfSJustin Hibbits mask = if_getcapenable(ifp) ^ ifr->ifr_reqcap; 976bd37530eSRafal Jaworowski if ((mask & IFCAP_HWCSUM) && sc->is_etsec) { 977bd37530eSRafal Jaworowski TSEC_GLOBAL_LOCK(sc); 97847842ecfSJustin Hibbits if_setcapenablebit(ifp, 0, IFCAP_HWCSUM); 97947842ecfSJustin Hibbits if_setcapenablebit(ifp, IFCAP_HWCSUM & ifr->ifr_reqcap, 0); 980bd37530eSRafal Jaworowski tsec_offload_setup(sc); 981bd37530eSRafal Jaworowski TSEC_GLOBAL_UNLOCK(sc); 982bd37530eSRafal Jaworowski } 983bd37530eSRafal Jaworowski #ifdef DEVICE_POLLING 984bd37530eSRafal Jaworowski if (mask & IFCAP_POLLING) { 985bd37530eSRafal Jaworowski if (ifr->ifr_reqcap & IFCAP_POLLING) { 986bd37530eSRafal Jaworowski error = ether_poll_register(tsec_poll, ifp); 987bd37530eSRafal Jaworowski if (error) 988bd37530eSRafal Jaworowski return (error); 989bd37530eSRafal Jaworowski 990bd37530eSRafal Jaworowski TSEC_GLOBAL_LOCK(sc); 991bd37530eSRafal Jaworowski /* Disable interrupts */ 992bd37530eSRafal Jaworowski tsec_intrs_ctl(sc, 0); 99347842ecfSJustin Hibbits if_setcapenablebit(ifp, IFCAP_POLLING, 0); 994bd37530eSRafal Jaworowski TSEC_GLOBAL_UNLOCK(sc); 995bd37530eSRafal Jaworowski } else { 996bd37530eSRafal Jaworowski error = ether_poll_deregister(ifp); 997bd37530eSRafal Jaworowski TSEC_GLOBAL_LOCK(sc); 998bd37530eSRafal Jaworowski /* Enable interrupts */ 999bd37530eSRafal Jaworowski tsec_intrs_ctl(sc, 1); 100047842ecfSJustin Hibbits if_setcapenablebit(ifp, 0, IFCAP_POLLING); 1001bd37530eSRafal Jaworowski TSEC_GLOBAL_UNLOCK(sc); 1002bd37530eSRafal Jaworowski } 1003bd37530eSRafal Jaworowski } 1004bd37530eSRafal Jaworowski #endif 1005bd37530eSRafal Jaworowski break; 1006bd37530eSRafal Jaworowski 100767196661SRafal Jaworowski default: 100867196661SRafal Jaworowski error = ether_ioctl(ifp, command, data); 100967196661SRafal Jaworowski } 101067196661SRafal Jaworowski 101167196661SRafal Jaworowski /* Flush buffers if not empty */ 101247842ecfSJustin Hibbits if (if_getflags(ifp) & IFF_UP) 101367196661SRafal Jaworowski tsec_start(ifp); 101467196661SRafal Jaworowski return (error); 101567196661SRafal Jaworowski } 101667196661SRafal Jaworowski 101767196661SRafal Jaworowski static int 101847842ecfSJustin Hibbits tsec_ifmedia_upd(if_t ifp) 101967196661SRafal Jaworowski { 102047842ecfSJustin Hibbits struct tsec_softc *sc = if_getsoftc(ifp); 102167196661SRafal Jaworowski struct mii_data *mii; 102267196661SRafal Jaworowski 102367196661SRafal Jaworowski TSEC_TRANSMIT_LOCK(sc); 102467196661SRafal Jaworowski 102567196661SRafal Jaworowski mii = sc->tsec_mii; 102667196661SRafal Jaworowski mii_mediachg(mii); 102767196661SRafal Jaworowski 102867196661SRafal Jaworowski TSEC_TRANSMIT_UNLOCK(sc); 102967196661SRafal Jaworowski return (0); 103067196661SRafal Jaworowski } 103167196661SRafal Jaworowski 103267196661SRafal Jaworowski static void 103347842ecfSJustin Hibbits tsec_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr) 103467196661SRafal Jaworowski { 103547842ecfSJustin Hibbits struct tsec_softc *sc = if_getsoftc(ifp); 103667196661SRafal Jaworowski struct mii_data *mii; 103767196661SRafal Jaworowski 103867196661SRafal Jaworowski TSEC_TRANSMIT_LOCK(sc); 103967196661SRafal Jaworowski 104067196661SRafal Jaworowski mii = sc->tsec_mii; 104167196661SRafal Jaworowski mii_pollstat(mii); 104267196661SRafal Jaworowski 104367196661SRafal Jaworowski ifmr->ifm_active = mii->mii_media_active; 104467196661SRafal Jaworowski ifmr->ifm_status = mii->mii_media_status; 104567196661SRafal Jaworowski 104667196661SRafal Jaworowski TSEC_TRANSMIT_UNLOCK(sc); 104767196661SRafal Jaworowski } 104867196661SRafal Jaworowski 104967196661SRafal Jaworowski static int 105067196661SRafal Jaworowski tsec_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **mbufp, 105167196661SRafal Jaworowski uint32_t *paddr) 105267196661SRafal Jaworowski { 105367196661SRafal Jaworowski struct mbuf *new_mbuf; 105467196661SRafal Jaworowski bus_dma_segment_t seg[1]; 1055bd37530eSRafal Jaworowski int error, nsegs; 105667196661SRafal Jaworowski 105767196661SRafal Jaworowski KASSERT(mbufp != NULL, ("NULL mbuf pointer!")); 105867196661SRafal Jaworowski 1059c6499eccSGleb Smirnoff new_mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MCLBYTES); 106067196661SRafal Jaworowski if (new_mbuf == NULL) 106167196661SRafal Jaworowski return (ENOBUFS); 106267196661SRafal Jaworowski new_mbuf->m_len = new_mbuf->m_pkthdr.len = new_mbuf->m_ext.ext_size; 106367196661SRafal Jaworowski 106467196661SRafal Jaworowski if (*mbufp) { 106567196661SRafal Jaworowski bus_dmamap_sync(tag, map, BUS_DMASYNC_POSTREAD); 106667196661SRafal Jaworowski bus_dmamap_unload(tag, map); 106767196661SRafal Jaworowski } 106867196661SRafal Jaworowski 106967196661SRafal Jaworowski error = bus_dmamap_load_mbuf_sg(tag, map, new_mbuf, seg, &nsegs, 107067196661SRafal Jaworowski BUS_DMA_NOWAIT); 107167196661SRafal Jaworowski KASSERT(nsegs == 1, ("Too many segments returned!")); 107267196661SRafal Jaworowski if (nsegs != 1 || error) 107367196661SRafal Jaworowski panic("tsec_new_rxbuf(): nsegs(%d), error(%d)", nsegs, error); 107467196661SRafal Jaworowski 107567196661SRafal Jaworowski #if 0 107667196661SRafal Jaworowski if (error) { 107767196661SRafal Jaworowski printf("tsec: bus_dmamap_load_mbuf_sg() returned: %d!\n", 107867196661SRafal Jaworowski error); 107967196661SRafal Jaworowski m_freem(new_mbuf); 108067196661SRafal Jaworowski return (ENOBUFS); 108167196661SRafal Jaworowski } 108267196661SRafal Jaworowski #endif 108367196661SRafal Jaworowski 108467196661SRafal Jaworowski #if 0 108567196661SRafal Jaworowski KASSERT(((seg->ds_addr) & (TSEC_RXBUFFER_ALIGNMENT-1)) == 0, 108667196661SRafal Jaworowski ("Wrong alignment of RX buffer!")); 108767196661SRafal Jaworowski #endif 108867196661SRafal Jaworowski bus_dmamap_sync(tag, map, BUS_DMASYNC_PREREAD); 108967196661SRafal Jaworowski 109067196661SRafal Jaworowski (*mbufp) = new_mbuf; 109167196661SRafal Jaworowski (*paddr) = seg->ds_addr; 109267196661SRafal Jaworowski return (0); 109367196661SRafal Jaworowski } 109467196661SRafal Jaworowski 109567196661SRafal Jaworowski static void 109667196661SRafal Jaworowski tsec_map_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 109767196661SRafal Jaworowski { 109867196661SRafal Jaworowski u_int32_t *paddr; 109967196661SRafal Jaworowski 110067196661SRafal Jaworowski KASSERT(nseg == 1, ("wrong number of segments, should be 1")); 110167196661SRafal Jaworowski paddr = arg; 110267196661SRafal Jaworowski *paddr = segs->ds_addr; 110367196661SRafal Jaworowski } 110467196661SRafal Jaworowski 110567196661SRafal Jaworowski static int 110667196661SRafal Jaworowski tsec_alloc_dma_desc(device_t dev, bus_dma_tag_t *dtag, bus_dmamap_t *dmap, 110767196661SRafal Jaworowski bus_size_t dsize, void **vaddr, void *raddr, const char *dname) 110867196661SRafal Jaworowski { 110967196661SRafal Jaworowski int error; 111067196661SRafal Jaworowski 111167196661SRafal Jaworowski /* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */ 111267196661SRafal Jaworowski error = bus_dma_tag_create(NULL, /* parent */ 111367196661SRafal Jaworowski PAGE_SIZE, 0, /* alignment, boundary */ 111467196661SRafal Jaworowski BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 111567196661SRafal Jaworowski BUS_SPACE_MAXADDR, /* highaddr */ 111667196661SRafal Jaworowski NULL, NULL, /* filtfunc, filtfuncarg */ 111767196661SRafal Jaworowski dsize, 1, /* maxsize, nsegments */ 111867196661SRafal Jaworowski dsize, 0, /* maxsegsz, flags */ 111967196661SRafal Jaworowski NULL, NULL, /* lockfunc, lockfuncarg */ 112067196661SRafal Jaworowski dtag); /* dmat */ 112167196661SRafal Jaworowski 112267196661SRafal Jaworowski if (error) { 112364f90c9dSRafal Jaworowski device_printf(dev, "failed to allocate busdma %s tag\n", 112464f90c9dSRafal Jaworowski dname); 112567196661SRafal Jaworowski (*vaddr) = NULL; 112667196661SRafal Jaworowski return (ENXIO); 112767196661SRafal Jaworowski } 112867196661SRafal Jaworowski 112967196661SRafal Jaworowski error = bus_dmamem_alloc(*dtag, vaddr, BUS_DMA_NOWAIT | BUS_DMA_ZERO, 113067196661SRafal Jaworowski dmap); 113167196661SRafal Jaworowski if (error) { 113267196661SRafal Jaworowski device_printf(dev, "failed to allocate %s DMA safe memory\n", 113367196661SRafal Jaworowski dname); 113467196661SRafal Jaworowski bus_dma_tag_destroy(*dtag); 113567196661SRafal Jaworowski (*vaddr) = NULL; 113667196661SRafal Jaworowski return (ENXIO); 113767196661SRafal Jaworowski } 113867196661SRafal Jaworowski 113964f90c9dSRafal Jaworowski error = bus_dmamap_load(*dtag, *dmap, *vaddr, dsize, 114064f90c9dSRafal Jaworowski tsec_map_dma_addr, raddr, BUS_DMA_NOWAIT); 114167196661SRafal Jaworowski if (error) { 114264f90c9dSRafal Jaworowski device_printf(dev, "cannot get address of the %s " 114364f90c9dSRafal Jaworowski "descriptors\n", dname); 114467196661SRafal Jaworowski bus_dmamem_free(*dtag, *vaddr, *dmap); 114567196661SRafal Jaworowski bus_dma_tag_destroy(*dtag); 114667196661SRafal Jaworowski (*vaddr) = NULL; 114767196661SRafal Jaworowski return (ENXIO); 114867196661SRafal Jaworowski } 114967196661SRafal Jaworowski 115067196661SRafal Jaworowski return (0); 115167196661SRafal Jaworowski } 115267196661SRafal Jaworowski 115367196661SRafal Jaworowski static void 115467196661SRafal Jaworowski tsec_free_dma_desc(bus_dma_tag_t dtag, bus_dmamap_t dmap, void *vaddr) 115567196661SRafal Jaworowski { 115667196661SRafal Jaworowski 115767196661SRafal Jaworowski if (vaddr == NULL) 115867196661SRafal Jaworowski return; 115967196661SRafal Jaworowski 116067196661SRafal Jaworowski /* Unmap descriptors from DMA memory */ 116164f90c9dSRafal Jaworowski bus_dmamap_sync(dtag, dmap, BUS_DMASYNC_POSTREAD | 116264f90c9dSRafal Jaworowski BUS_DMASYNC_POSTWRITE); 116367196661SRafal Jaworowski bus_dmamap_unload(dtag, dmap); 116467196661SRafal Jaworowski 116567196661SRafal Jaworowski /* Free descriptors memory */ 116667196661SRafal Jaworowski bus_dmamem_free(dtag, vaddr, dmap); 116767196661SRafal Jaworowski 116867196661SRafal Jaworowski /* Destroy descriptors tag */ 116967196661SRafal Jaworowski bus_dma_tag_destroy(dtag); 117067196661SRafal Jaworowski } 117167196661SRafal Jaworowski 117267196661SRafal Jaworowski static void 117367196661SRafal Jaworowski tsec_free_dma(struct tsec_softc *sc) 117467196661SRafal Jaworowski { 117567196661SRafal Jaworowski int i; 117667196661SRafal Jaworowski 117767196661SRafal Jaworowski /* Free TX maps */ 117867196661SRafal Jaworowski for (i = 0; i < TSEC_TX_NUM_DESC; i++) 11792c0dbbcbSJustin Hibbits if (sc->tx_bufmap[i].map_initialized) 118064f90c9dSRafal Jaworowski bus_dmamap_destroy(sc->tsec_tx_mtag, 11812c0dbbcbSJustin Hibbits sc->tx_bufmap[i].map); 118264f90c9dSRafal Jaworowski /* Destroy tag for TX mbufs */ 118367196661SRafal Jaworowski bus_dma_tag_destroy(sc->tsec_tx_mtag); 118467196661SRafal Jaworowski 118567196661SRafal Jaworowski /* Free RX mbufs and maps */ 118667196661SRafal Jaworowski for (i = 0; i < TSEC_RX_NUM_DESC; i++) { 118767196661SRafal Jaworowski if (sc->rx_data[i].mbuf) { 118867196661SRafal Jaworowski /* Unload buffer from DMA */ 118967196661SRafal Jaworowski bus_dmamap_sync(sc->tsec_rx_mtag, sc->rx_data[i].map, 119067196661SRafal Jaworowski BUS_DMASYNC_POSTREAD); 119164f90c9dSRafal Jaworowski bus_dmamap_unload(sc->tsec_rx_mtag, 119264f90c9dSRafal Jaworowski sc->rx_data[i].map); 119367196661SRafal Jaworowski 119467196661SRafal Jaworowski /* Free buffer */ 119567196661SRafal Jaworowski m_freem(sc->rx_data[i].mbuf); 119667196661SRafal Jaworowski } 119767196661SRafal Jaworowski /* Destroy map for this buffer */ 119867196661SRafal Jaworowski if (sc->rx_data[i].map != NULL) 119967196661SRafal Jaworowski bus_dmamap_destroy(sc->tsec_rx_mtag, 120067196661SRafal Jaworowski sc->rx_data[i].map); 120167196661SRafal Jaworowski } 120264f90c9dSRafal Jaworowski /* Destroy tag for RX mbufs */ 120367196661SRafal Jaworowski bus_dma_tag_destroy(sc->tsec_rx_mtag); 120467196661SRafal Jaworowski 120567196661SRafal Jaworowski /* Unload TX/RX descriptors */ 120667196661SRafal Jaworowski tsec_free_dma_desc(sc->tsec_tx_dtag, sc->tsec_tx_dmap, 120767196661SRafal Jaworowski sc->tsec_tx_vaddr); 120867196661SRafal Jaworowski tsec_free_dma_desc(sc->tsec_rx_dtag, sc->tsec_rx_dmap, 120967196661SRafal Jaworowski sc->tsec_rx_vaddr); 121067196661SRafal Jaworowski } 121167196661SRafal Jaworowski 121267196661SRafal Jaworowski static void 121367196661SRafal Jaworowski tsec_stop(struct tsec_softc *sc) 121467196661SRafal Jaworowski { 121547842ecfSJustin Hibbits if_t ifp; 121667196661SRafal Jaworowski uint32_t tmpval; 121767196661SRafal Jaworowski 121867196661SRafal Jaworowski TSEC_GLOBAL_LOCK_ASSERT(sc); 121967196661SRafal Jaworowski 122067196661SRafal Jaworowski ifp = sc->tsec_ifp; 122167196661SRafal Jaworowski 122267196661SRafal Jaworowski /* Disable interface and watchdog timer */ 122364f90c9dSRafal Jaworowski callout_stop(&sc->tsec_callout); 122447842ecfSJustin Hibbits if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)); 12255432bd9fSRafal Jaworowski sc->tsec_watchdog = 0; 122667196661SRafal Jaworowski 122767196661SRafal Jaworowski /* Disable all interrupts and stop DMA */ 122867196661SRafal Jaworowski tsec_intrs_ctl(sc, 0); 122967196661SRafal Jaworowski tsec_dma_ctl(sc, 0); 123067196661SRafal Jaworowski 123167196661SRafal Jaworowski /* Remove pending data from TX queue */ 12322c0dbbcbSJustin Hibbits while (sc->tx_idx_tail != sc->tx_idx_head) { 12332c0dbbcbSJustin Hibbits bus_dmamap_sync(sc->tsec_tx_mtag, 12342c0dbbcbSJustin Hibbits sc->tx_bufmap[sc->tx_idx_tail].map, 1235bd37530eSRafal Jaworowski BUS_DMASYNC_POSTWRITE); 12362c0dbbcbSJustin Hibbits bus_dmamap_unload(sc->tsec_tx_mtag, 12372c0dbbcbSJustin Hibbits sc->tx_bufmap[sc->tx_idx_tail].map); 12382c0dbbcbSJustin Hibbits m_freem(sc->tx_bufmap[sc->tx_idx_tail].mbuf); 12392c0dbbcbSJustin Hibbits sc->tx_idx_tail = (sc->tx_idx_tail + 1) 12402c0dbbcbSJustin Hibbits & (TSEC_TX_NUM_DESC - 1); 124167196661SRafal Jaworowski } 124267196661SRafal Jaworowski 1243bd37530eSRafal Jaworowski /* Disable RX and TX */ 124467196661SRafal Jaworowski tmpval = TSEC_READ(sc, TSEC_REG_MACCFG1); 124567196661SRafal Jaworowski tmpval &= ~(TSEC_MACCFG1_RX_EN | TSEC_MACCFG1_TX_EN); 124667196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MACCFG1, tmpval); 124767196661SRafal Jaworowski DELAY(10); 124867196661SRafal Jaworowski } 124967196661SRafal Jaworowski 1250bd37530eSRafal Jaworowski static void 1251bd37530eSRafal Jaworowski tsec_tick(void *arg) 125267196661SRafal Jaworowski { 125367196661SRafal Jaworowski struct tsec_softc *sc = arg; 125447842ecfSJustin Hibbits if_t ifp; 1255bd37530eSRafal Jaworowski int link; 1256bd37530eSRafal Jaworowski 1257bd37530eSRafal Jaworowski TSEC_GLOBAL_LOCK(sc); 1258bd37530eSRafal Jaworowski 1259bd37530eSRafal Jaworowski tsec_watchdog(sc); 1260bd37530eSRafal Jaworowski 1261bd37530eSRafal Jaworowski ifp = sc->tsec_ifp; 1262bd37530eSRafal Jaworowski link = sc->tsec_link; 1263bd37530eSRafal Jaworowski 1264bd37530eSRafal Jaworowski mii_tick(sc->tsec_mii); 1265bd37530eSRafal Jaworowski 1266bd37530eSRafal Jaworowski if (link == 0 && sc->tsec_link == 1 && 126747842ecfSJustin Hibbits (!if_sendq_empty(ifp))) 1268bd37530eSRafal Jaworowski tsec_start_locked(ifp); 1269bd37530eSRafal Jaworowski 1270bd37530eSRafal Jaworowski /* Schedule another timeout one second from now. */ 1271bd37530eSRafal Jaworowski callout_reset(&sc->tsec_callout, hz, tsec_tick, sc); 1272bd37530eSRafal Jaworowski 1273bd37530eSRafal Jaworowski TSEC_GLOBAL_UNLOCK(sc); 1274bd37530eSRafal Jaworowski } 1275bd37530eSRafal Jaworowski 1276bd37530eSRafal Jaworowski /* 1277bd37530eSRafal Jaworowski * This is the core RX routine. It replenishes mbufs in the descriptor and 1278bd37530eSRafal Jaworowski * sends data which have been dma'ed into host memory to upper layer. 1279bd37530eSRafal Jaworowski * 1280bd37530eSRafal Jaworowski * Loops at most count times if count is > 0, or until done if count < 0. 1281bd37530eSRafal Jaworowski */ 12821abcdbd1SAttilio Rao static int 1283bd37530eSRafal Jaworowski tsec_receive_intr_locked(struct tsec_softc *sc, int count) 1284bd37530eSRafal Jaworowski { 128567196661SRafal Jaworowski struct tsec_desc *rx_desc; 128647842ecfSJustin Hibbits if_t ifp; 128767196661SRafal Jaworowski struct rx_data_type *rx_data; 128867196661SRafal Jaworowski struct mbuf *m; 128967196661SRafal Jaworowski uint32_t i; 12901abcdbd1SAttilio Rao int c, rx_npkts; 129167196661SRafal Jaworowski uint16_t flags; 1292bd37530eSRafal Jaworowski 1293bd37530eSRafal Jaworowski TSEC_RECEIVE_LOCK_ASSERT(sc); 129467196661SRafal Jaworowski 129567196661SRafal Jaworowski ifp = sc->tsec_ifp; 129667196661SRafal Jaworowski rx_data = sc->rx_data; 12971abcdbd1SAttilio Rao rx_npkts = 0; 129867196661SRafal Jaworowski 1299bd37530eSRafal Jaworowski bus_dmamap_sync(sc->tsec_rx_dtag, sc->tsec_rx_dmap, 1300bd37530eSRafal Jaworowski BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 130167196661SRafal Jaworowski 1302bd37530eSRafal Jaworowski for (c = 0; ; c++) { 1303bd37530eSRafal Jaworowski if (count >= 0 && count-- == 0) 1304bd37530eSRafal Jaworowski break; 130567196661SRafal Jaworowski 130667196661SRafal Jaworowski rx_desc = TSEC_GET_CUR_RX_DESC(sc); 130767196661SRafal Jaworowski flags = rx_desc->flags; 130867196661SRafal Jaworowski 130967196661SRafal Jaworowski /* Check if there is anything to receive */ 1310bd37530eSRafal Jaworowski if ((flags & TSEC_RXBD_E) || (c >= TSEC_RX_NUM_DESC)) { 131167196661SRafal Jaworowski /* 131267196661SRafal Jaworowski * Avoid generating another interrupt 131367196661SRafal Jaworowski */ 131467196661SRafal Jaworowski if (flags & TSEC_RXBD_E) 131567196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_IEVENT, 131667196661SRafal Jaworowski TSEC_IEVENT_RXB | TSEC_IEVENT_RXF); 131767196661SRafal Jaworowski /* 131867196661SRafal Jaworowski * We didn't consume current descriptor and have to 131967196661SRafal Jaworowski * return it to the queue 132067196661SRafal Jaworowski */ 132167196661SRafal Jaworowski TSEC_BACK_CUR_RX_DESC(sc); 132267196661SRafal Jaworowski break; 132367196661SRafal Jaworowski } 132467196661SRafal Jaworowski 132567196661SRafal Jaworowski if (flags & (TSEC_RXBD_LG | TSEC_RXBD_SH | TSEC_RXBD_NO | 132667196661SRafal Jaworowski TSEC_RXBD_CR | TSEC_RXBD_OV | TSEC_RXBD_TR)) { 132767196661SRafal Jaworowski rx_desc->length = 0; 1328bd37530eSRafal Jaworowski rx_desc->flags = (rx_desc->flags & 1329bd37530eSRafal Jaworowski ~TSEC_RXBD_ZEROONINIT) | TSEC_RXBD_E | TSEC_RXBD_I; 1330bd37530eSRafal Jaworowski 1331bd37530eSRafal Jaworowski if (sc->frame != NULL) { 1332bd37530eSRafal Jaworowski m_free(sc->frame); 1333bd37530eSRafal Jaworowski sc->frame = NULL; 1334bd37530eSRafal Jaworowski } 1335bd37530eSRafal Jaworowski 133667196661SRafal Jaworowski continue; 133767196661SRafal Jaworowski } 133867196661SRafal Jaworowski 133967196661SRafal Jaworowski /* Ok... process frame */ 134067196661SRafal Jaworowski i = TSEC_GET_CUR_RX_DESC_CNT(sc); 134167196661SRafal Jaworowski m = rx_data[i].mbuf; 1342bd37530eSRafal Jaworowski m->m_len = rx_desc->length; 1343bd37530eSRafal Jaworowski 1344bd37530eSRafal Jaworowski if (sc->frame != NULL) { 1345bd37530eSRafal Jaworowski if ((flags & TSEC_RXBD_L) != 0) 1346bd37530eSRafal Jaworowski m->m_len -= m_length(sc->frame, NULL); 1347bd37530eSRafal Jaworowski 1348bd37530eSRafal Jaworowski m->m_flags &= ~M_PKTHDR; 1349bd37530eSRafal Jaworowski m_cat(sc->frame, m); 1350bd37530eSRafal Jaworowski } else { 1351bd37530eSRafal Jaworowski sc->frame = m; 1352bd37530eSRafal Jaworowski } 1353bd37530eSRafal Jaworowski 1354bd37530eSRafal Jaworowski m = NULL; 1355bd37530eSRafal Jaworowski 1356bd37530eSRafal Jaworowski if ((flags & TSEC_RXBD_L) != 0) { 1357bd37530eSRafal Jaworowski m = sc->frame; 1358bd37530eSRafal Jaworowski sc->frame = NULL; 1359bd37530eSRafal Jaworowski } 136067196661SRafal Jaworowski 136167196661SRafal Jaworowski if (tsec_new_rxbuf(sc->tsec_rx_mtag, rx_data[i].map, 136267196661SRafal Jaworowski &rx_data[i].mbuf, &rx_data[i].paddr)) { 13632c0dbbcbSJustin Hibbits if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 1364ab160495SRafal Jaworowski /* 1365ab160495SRafal Jaworowski * We ran out of mbufs; didn't consume current 1366ab160495SRafal Jaworowski * descriptor and have to return it to the queue. 1367ab160495SRafal Jaworowski */ 1368ab160495SRafal Jaworowski TSEC_BACK_CUR_RX_DESC(sc); 1369ab160495SRafal Jaworowski break; 137067196661SRafal Jaworowski } 1371bd37530eSRafal Jaworowski 1372bd37530eSRafal Jaworowski /* Attach new buffer to descriptor and clear flags */ 137367196661SRafal Jaworowski rx_desc->bufptr = rx_data[i].paddr; 137467196661SRafal Jaworowski rx_desc->length = 0; 137567196661SRafal Jaworowski rx_desc->flags = (rx_desc->flags & ~TSEC_RXBD_ZEROONINIT) | 137667196661SRafal Jaworowski TSEC_RXBD_E | TSEC_RXBD_I; 137767196661SRafal Jaworowski 1378bd37530eSRafal Jaworowski if (m != NULL) { 137967196661SRafal Jaworowski m->m_pkthdr.rcvif = ifp; 138067196661SRafal Jaworowski 1381bd37530eSRafal Jaworowski m_fixhdr(m); 1382bd37530eSRafal Jaworowski m_adj(m, -ETHER_CRC_LEN); 138367196661SRafal Jaworowski 1384bd37530eSRafal Jaworowski if (sc->is_etsec) 1385bd37530eSRafal Jaworowski tsec_offload_process_frame(sc, m); 138667196661SRafal Jaworowski 138767196661SRafal Jaworowski TSEC_RECEIVE_UNLOCK(sc); 138847842ecfSJustin Hibbits if_input(ifp, m); 1389bd37530eSRafal Jaworowski TSEC_RECEIVE_LOCK(sc); 13901abcdbd1SAttilio Rao rx_npkts++; 1391bd37530eSRafal Jaworowski } 1392bd37530eSRafal Jaworowski } 139367196661SRafal Jaworowski 1394bd37530eSRafal Jaworowski bus_dmamap_sync(sc->tsec_rx_dtag, sc->tsec_rx_dmap, 1395bd37530eSRafal Jaworowski BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1396371bf7ccSRafal Jaworowski 1397371bf7ccSRafal Jaworowski /* 1398371bf7ccSRafal Jaworowski * Make sure TSEC receiver is not halted. 1399371bf7ccSRafal Jaworowski * 1400371bf7ccSRafal Jaworowski * Various conditions can stop the TSEC receiver, but not all are 1401371bf7ccSRafal Jaworowski * signaled and handled by error interrupt, so make sure the receiver 1402371bf7ccSRafal Jaworowski * is running. Writing to TSEC_REG_RSTAT restarts the receiver when 1403371bf7ccSRafal Jaworowski * halted, and is harmless if already running. 1404371bf7ccSRafal Jaworowski */ 1405371bf7ccSRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_RSTAT, TSEC_RSTAT_QHLT); 14061abcdbd1SAttilio Rao return (rx_npkts); 140767196661SRafal Jaworowski } 140867196661SRafal Jaworowski 1409321e12c8SRafal Jaworowski void 1410bd37530eSRafal Jaworowski tsec_receive_intr(void *arg) 141167196661SRafal Jaworowski { 141267196661SRafal Jaworowski struct tsec_softc *sc = arg; 1413bd37530eSRafal Jaworowski 1414bd37530eSRafal Jaworowski TSEC_RECEIVE_LOCK(sc); 1415bd37530eSRafal Jaworowski 1416bd37530eSRafal Jaworowski #ifdef DEVICE_POLLING 1417*d8b78838SJustin Hibbits if (if_getcapenable(sc->tsec_ifp) & IFCAP_POLLING) { 1418bd37530eSRafal Jaworowski TSEC_RECEIVE_UNLOCK(sc); 1419bd37530eSRafal Jaworowski return; 1420bd37530eSRafal Jaworowski } 1421bd37530eSRafal Jaworowski #endif 1422bd37530eSRafal Jaworowski 1423bd37530eSRafal Jaworowski /* Confirm the interrupt was received by driver */ 1424bd37530eSRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_IEVENT, TSEC_IEVENT_RXB | TSEC_IEVENT_RXF); 1425bd37530eSRafal Jaworowski tsec_receive_intr_locked(sc, -1); 1426bd37530eSRafal Jaworowski 1427bd37530eSRafal Jaworowski TSEC_RECEIVE_UNLOCK(sc); 1428bd37530eSRafal Jaworowski } 1429bd37530eSRafal Jaworowski 1430bd37530eSRafal Jaworowski static void 1431bd37530eSRafal Jaworowski tsec_transmit_intr_locked(struct tsec_softc *sc) 1432bd37530eSRafal Jaworowski { 143347842ecfSJustin Hibbits if_t ifp; 14342c0dbbcbSJustin Hibbits uint32_t tx_idx; 143567196661SRafal Jaworowski 1436bd37530eSRafal Jaworowski TSEC_TRANSMIT_LOCK_ASSERT(sc); 1437bd37530eSRafal Jaworowski 143867196661SRafal Jaworowski ifp = sc->tsec_ifp; 143967196661SRafal Jaworowski 144067196661SRafal Jaworowski /* Update collision statistics */ 1441c8dfaf38SGleb Smirnoff if_inc_counter(ifp, IFCOUNTER_COLLISIONS, TSEC_READ(sc, TSEC_REG_MON_TNCL)); 144267196661SRafal Jaworowski 144367196661SRafal Jaworowski /* Reset collision counters in hardware */ 144467196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MON_TSCL, 0); 144567196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MON_TMCL, 0); 144667196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MON_TLCL, 0); 144767196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MON_TXCL, 0); 144867196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MON_TNCL, 0); 144967196661SRafal Jaworowski 1450321e12c8SRafal Jaworowski bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap, 1451321e12c8SRafal Jaworowski BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 145267196661SRafal Jaworowski 14532c0dbbcbSJustin Hibbits tx_idx = sc->tx_idx_tail; 14542c0dbbcbSJustin Hibbits while (tx_idx != sc->tx_idx_head) { 14552c0dbbcbSJustin Hibbits struct tsec_desc *tx_desc; 14562c0dbbcbSJustin Hibbits struct tsec_bufmap *tx_bufmap; 14572c0dbbcbSJustin Hibbits 14582c0dbbcbSJustin Hibbits tx_desc = &sc->tsec_tx_vaddr[tx_idx]; 145967196661SRafal Jaworowski if (tx_desc->flags & TSEC_TXBD_R) { 146067196661SRafal Jaworowski break; 146167196661SRafal Jaworowski } 146267196661SRafal Jaworowski 14632c0dbbcbSJustin Hibbits tx_bufmap = &sc->tx_bufmap[tx_idx]; 14642c0dbbcbSJustin Hibbits tx_idx = (tx_idx + 1) & (TSEC_TX_NUM_DESC - 1); 14652c0dbbcbSJustin Hibbits if (tx_bufmap->mbuf == NULL) 146667196661SRafal Jaworowski continue; 146767196661SRafal Jaworowski 146867196661SRafal Jaworowski /* 146967196661SRafal Jaworowski * This is the last buf in this packet, so unmap and free it. 147067196661SRafal Jaworowski */ 14712c0dbbcbSJustin Hibbits bus_dmamap_sync(sc->tsec_tx_mtag, tx_bufmap->map, 147264f90c9dSRafal Jaworowski BUS_DMASYNC_POSTWRITE); 14732c0dbbcbSJustin Hibbits bus_dmamap_unload(sc->tsec_tx_mtag, tx_bufmap->map); 14742c0dbbcbSJustin Hibbits m_freem(tx_bufmap->mbuf); 14752c0dbbcbSJustin Hibbits tx_bufmap->mbuf = NULL; 147667196661SRafal Jaworowski 1477c8dfaf38SGleb Smirnoff if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 147867196661SRafal Jaworowski } 14792c0dbbcbSJustin Hibbits sc->tx_idx_tail = tx_idx; 1480bd37530eSRafal Jaworowski bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap, 1481bd37530eSRafal Jaworowski BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 148267196661SRafal Jaworowski 148347842ecfSJustin Hibbits if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 148467196661SRafal Jaworowski tsec_start_locked(ifp); 148567196661SRafal Jaworowski 14862c0dbbcbSJustin Hibbits if (sc->tx_idx_tail == sc->tx_idx_head) 14875432bd9fSRafal Jaworowski sc->tsec_watchdog = 0; 148867196661SRafal Jaworowski } 148967196661SRafal Jaworowski 1490321e12c8SRafal Jaworowski void 1491bd37530eSRafal Jaworowski tsec_transmit_intr(void *arg) 149267196661SRafal Jaworowski { 149367196661SRafal Jaworowski struct tsec_softc *sc = arg; 1494bd37530eSRafal Jaworowski 1495bd37530eSRafal Jaworowski TSEC_TRANSMIT_LOCK(sc); 1496bd37530eSRafal Jaworowski 1497bd37530eSRafal Jaworowski #ifdef DEVICE_POLLING 1498*d8b78838SJustin Hibbits if (if_getcapenable(sc->tsec_ifp) & IFCAP_POLLING) { 1499bd37530eSRafal Jaworowski TSEC_TRANSMIT_UNLOCK(sc); 1500bd37530eSRafal Jaworowski return; 1501bd37530eSRafal Jaworowski } 1502bd37530eSRafal Jaworowski #endif 1503bd37530eSRafal Jaworowski /* Confirm the interrupt was received by driver */ 1504bd37530eSRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_IEVENT, TSEC_IEVENT_TXB | TSEC_IEVENT_TXF); 1505bd37530eSRafal Jaworowski tsec_transmit_intr_locked(sc); 1506bd37530eSRafal Jaworowski 1507bd37530eSRafal Jaworowski TSEC_TRANSMIT_UNLOCK(sc); 1508bd37530eSRafal Jaworowski } 1509bd37530eSRafal Jaworowski 1510bd37530eSRafal Jaworowski static void 1511bd37530eSRafal Jaworowski tsec_error_intr_locked(struct tsec_softc *sc, int count) 1512bd37530eSRafal Jaworowski { 151347842ecfSJustin Hibbits if_t ifp; 151467196661SRafal Jaworowski uint32_t eflags; 151567196661SRafal Jaworowski 1516bd37530eSRafal Jaworowski TSEC_GLOBAL_LOCK_ASSERT(sc); 1517bd37530eSRafal Jaworowski 151867196661SRafal Jaworowski ifp = sc->tsec_ifp; 151967196661SRafal Jaworowski 152067196661SRafal Jaworowski eflags = TSEC_READ(sc, TSEC_REG_IEVENT); 152167196661SRafal Jaworowski 152267196661SRafal Jaworowski /* Clear events bits in hardware */ 152367196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_IEVENT, TSEC_IEVENT_RXC | TSEC_IEVENT_BSY | 152467196661SRafal Jaworowski TSEC_IEVENT_EBERR | TSEC_IEVENT_MSRO | TSEC_IEVENT_BABT | 152567196661SRafal Jaworowski TSEC_IEVENT_TXC | TSEC_IEVENT_TXE | TSEC_IEVENT_LC | 152667196661SRafal Jaworowski TSEC_IEVENT_CRL | TSEC_IEVENT_XFUN); 152767196661SRafal Jaworowski 152867196661SRafal Jaworowski /* Check transmitter errors */ 152967196661SRafal Jaworowski if (eflags & TSEC_IEVENT_TXE) { 1530c8dfaf38SGleb Smirnoff if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 153167196661SRafal Jaworowski 153267196661SRafal Jaworowski if (eflags & TSEC_IEVENT_LC) 1533c8dfaf38SGleb Smirnoff if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1); 153467196661SRafal Jaworowski 153567196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_TSTAT, TSEC_TSTAT_THLT); 153667196661SRafal Jaworowski } 153767196661SRafal Jaworowski 15382c0dbbcbSJustin Hibbits /* Check for discarded frame due to a lack of buffers */ 153967196661SRafal Jaworowski if (eflags & TSEC_IEVENT_BSY) { 1540c8dfaf38SGleb Smirnoff if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 154167196661SRafal Jaworowski } 1542bd37530eSRafal Jaworowski 154347842ecfSJustin Hibbits if (if_getflags(ifp) & IFF_DEBUG) 1544bd37530eSRafal Jaworowski if_printf(ifp, "tsec_error_intr(): event flags: 0x%x\n", 1545bd37530eSRafal Jaworowski eflags); 1546bd37530eSRafal Jaworowski 1547bd37530eSRafal Jaworowski if (eflags & TSEC_IEVENT_EBERR) { 1548bd37530eSRafal Jaworowski if_printf(ifp, "System bus error occurred during" 1549bd37530eSRafal Jaworowski "DMA transaction (flags: 0x%x)\n", eflags); 1550bd37530eSRafal Jaworowski tsec_init_locked(sc); 1551bd37530eSRafal Jaworowski } 1552bd37530eSRafal Jaworowski 1553bd37530eSRafal Jaworowski if (eflags & TSEC_IEVENT_BABT) 1554c8dfaf38SGleb Smirnoff if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1555bd37530eSRafal Jaworowski 155667196661SRafal Jaworowski if (eflags & TSEC_IEVENT_BABR) 1557c8dfaf38SGleb Smirnoff if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 155867196661SRafal Jaworowski } 155967196661SRafal Jaworowski 1560bd37530eSRafal Jaworowski void 1561bd37530eSRafal Jaworowski tsec_error_intr(void *arg) 156267196661SRafal Jaworowski { 1563bd37530eSRafal Jaworowski struct tsec_softc *sc = arg; 156467196661SRafal Jaworowski 1565772619e1SRafal Jaworowski TSEC_GLOBAL_LOCK(sc); 1566bd37530eSRafal Jaworowski tsec_error_intr_locked(sc, -1); 1567772619e1SRafal Jaworowski TSEC_GLOBAL_UNLOCK(sc); 156867196661SRafal Jaworowski } 156967196661SRafal Jaworowski 1570321e12c8SRafal Jaworowski int 157167196661SRafal Jaworowski tsec_miibus_readreg(device_t dev, int phy, int reg) 157267196661SRafal Jaworowski { 157367196661SRafal Jaworowski struct tsec_softc *sc; 157488011b59SJustin Hibbits int timeout; 1575629aa519SNathan Whitehorn int rv; 157667196661SRafal Jaworowski 1577aa15e881SRafal Jaworowski sc = device_get_softc(dev); 157867196661SRafal Jaworowski 1579629aa519SNathan Whitehorn TSEC_PHY_LOCK(); 1580629aa519SNathan Whitehorn TSEC_PHY_WRITE(sc, TSEC_REG_MIIMADD, (phy << 8) | reg); 1581629aa519SNathan Whitehorn TSEC_PHY_WRITE(sc, TSEC_REG_MIIMCOM, 0); 1582629aa519SNathan Whitehorn TSEC_PHY_WRITE(sc, TSEC_REG_MIIMCOM, TSEC_MIIMCOM_READCYCLE); 158367196661SRafal Jaworowski 158488011b59SJustin Hibbits timeout = tsec_mii_wait(sc, TSEC_MIIMIND_NOTVALID | TSEC_MIIMIND_BUSY); 158588011b59SJustin Hibbits rv = TSEC_PHY_READ(sc, TSEC_REG_MIIMSTAT); 158688011b59SJustin Hibbits TSEC_PHY_UNLOCK(); 158767196661SRafal Jaworowski 158872b58db8SJustin Hibbits if (timeout) 158967196661SRafal Jaworowski device_printf(dev, "Timeout while reading from PHY!\n"); 159067196661SRafal Jaworowski 1591629aa519SNathan Whitehorn return (rv); 159267196661SRafal Jaworowski } 159367196661SRafal Jaworowski 1594661ee6eeSRafal Jaworowski int 159567196661SRafal Jaworowski tsec_miibus_writereg(device_t dev, int phy, int reg, int value) 159667196661SRafal Jaworowski { 159767196661SRafal Jaworowski struct tsec_softc *sc; 159888011b59SJustin Hibbits int timeout; 159967196661SRafal Jaworowski 1600aa15e881SRafal Jaworowski sc = device_get_softc(dev); 160167196661SRafal Jaworowski 1602629aa519SNathan Whitehorn TSEC_PHY_LOCK(); 1603629aa519SNathan Whitehorn TSEC_PHY_WRITE(sc, TSEC_REG_MIIMADD, (phy << 8) | reg); 1604629aa519SNathan Whitehorn TSEC_PHY_WRITE(sc, TSEC_REG_MIIMCON, value); 160588011b59SJustin Hibbits timeout = tsec_mii_wait(sc, TSEC_MIIMIND_BUSY); 1606629aa519SNathan Whitehorn TSEC_PHY_UNLOCK(); 160767196661SRafal Jaworowski 160872b58db8SJustin Hibbits if (timeout) 160967196661SRafal Jaworowski device_printf(dev, "Timeout while writing to PHY!\n"); 1610661ee6eeSRafal Jaworowski 1611661ee6eeSRafal Jaworowski return (0); 161267196661SRafal Jaworowski } 161367196661SRafal Jaworowski 1614321e12c8SRafal Jaworowski void 161567196661SRafal Jaworowski tsec_miibus_statchg(device_t dev) 161667196661SRafal Jaworowski { 161767196661SRafal Jaworowski struct tsec_softc *sc; 161867196661SRafal Jaworowski struct mii_data *mii; 161967196661SRafal Jaworowski uint32_t ecntrl, id, tmp; 162067196661SRafal Jaworowski int link; 162167196661SRafal Jaworowski 162267196661SRafal Jaworowski sc = device_get_softc(dev); 162367196661SRafal Jaworowski mii = sc->tsec_mii; 162467196661SRafal Jaworowski link = ((mii->mii_media_status & IFM_ACTIVE) ? 1 : 0); 162567196661SRafal Jaworowski 162667196661SRafal Jaworowski tmp = TSEC_READ(sc, TSEC_REG_MACCFG2) & ~TSEC_MACCFG2_IF; 162767196661SRafal Jaworowski 162867196661SRafal Jaworowski if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) 162967196661SRafal Jaworowski tmp |= TSEC_MACCFG2_FULLDUPLEX; 163067196661SRafal Jaworowski else 163167196661SRafal Jaworowski tmp &= ~TSEC_MACCFG2_FULLDUPLEX; 163267196661SRafal Jaworowski 163367196661SRafal Jaworowski switch (IFM_SUBTYPE(mii->mii_media_active)) { 163467196661SRafal Jaworowski case IFM_1000_T: 163567196661SRafal Jaworowski case IFM_1000_SX: 163667196661SRafal Jaworowski tmp |= TSEC_MACCFG2_GMII; 163767196661SRafal Jaworowski sc->tsec_link = link; 163867196661SRafal Jaworowski break; 163967196661SRafal Jaworowski case IFM_100_TX: 164067196661SRafal Jaworowski case IFM_10_T: 164167196661SRafal Jaworowski tmp |= TSEC_MACCFG2_MII; 164267196661SRafal Jaworowski sc->tsec_link = link; 164367196661SRafal Jaworowski break; 164467196661SRafal Jaworowski case IFM_NONE: 164567196661SRafal Jaworowski if (link) 164664f90c9dSRafal Jaworowski device_printf(dev, "No speed selected but link " 164764f90c9dSRafal Jaworowski "active!\n"); 164867196661SRafal Jaworowski sc->tsec_link = 0; 164967196661SRafal Jaworowski return; 165067196661SRafal Jaworowski default: 165167196661SRafal Jaworowski sc->tsec_link = 0; 165267196661SRafal Jaworowski device_printf(dev, "Unknown speed (%d), link %s!\n", 165367196661SRafal Jaworowski IFM_SUBTYPE(mii->mii_media_active), 165467196661SRafal Jaworowski ((link) ? "up" : "down")); 165567196661SRafal Jaworowski return; 165667196661SRafal Jaworowski } 165767196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MACCFG2, tmp); 165867196661SRafal Jaworowski 165967196661SRafal Jaworowski /* XXX kludge - use circumstantial evidence for reduced mode. */ 166067196661SRafal Jaworowski id = TSEC_READ(sc, TSEC_REG_ID2); 166167196661SRafal Jaworowski if (id & 0xffff) { 166267196661SRafal Jaworowski ecntrl = TSEC_READ(sc, TSEC_REG_ECNTRL) & ~TSEC_ECNTRL_R100M; 166367196661SRafal Jaworowski ecntrl |= (tmp & TSEC_MACCFG2_MII) ? TSEC_ECNTRL_R100M : 0; 166467196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_ECNTRL, ecntrl); 166567196661SRafal Jaworowski } 166667196661SRafal Jaworowski } 1667bd37530eSRafal Jaworowski 1668bd37530eSRafal Jaworowski static void 1669bd37530eSRafal Jaworowski tsec_add_sysctls(struct tsec_softc *sc) 1670bd37530eSRafal Jaworowski { 1671bd37530eSRafal Jaworowski struct sysctl_ctx_list *ctx; 1672bd37530eSRafal Jaworowski struct sysctl_oid_list *children; 1673bd37530eSRafal Jaworowski struct sysctl_oid *tree; 1674bd37530eSRafal Jaworowski 1675bd37530eSRafal Jaworowski ctx = device_get_sysctl_ctx(sc->dev); 1676bd37530eSRafal Jaworowski children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); 1677bd37530eSRafal Jaworowski tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "int_coal", 16787029da5cSPawel Biernacki CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "TSEC Interrupts coalescing"); 1679bd37530eSRafal Jaworowski children = SYSCTL_CHILDREN(tree); 1680bd37530eSRafal Jaworowski 1681bd37530eSRafal Jaworowski SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_time", 16827029da5cSPawel Biernacki CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, TSEC_IC_RX, 16837029da5cSPawel Biernacki tsec_sysctl_ic_time, "I", "IC RX time threshold (0-65535)"); 1684bd37530eSRafal Jaworowski SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_count", 16857029da5cSPawel Biernacki CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, TSEC_IC_RX, 16867029da5cSPawel Biernacki tsec_sysctl_ic_count, "I", "IC RX frame count threshold (0-255)"); 1687bd37530eSRafal Jaworowski 1688bd37530eSRafal Jaworowski SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_time", 16897029da5cSPawel Biernacki CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, TSEC_IC_TX, 16907029da5cSPawel Biernacki tsec_sysctl_ic_time, "I", "IC TX time threshold (0-65535)"); 1691bd37530eSRafal Jaworowski SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_count", 16927029da5cSPawel Biernacki CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, TSEC_IC_TX, 16937029da5cSPawel Biernacki tsec_sysctl_ic_count, "I", "IC TX frame count threshold (0-255)"); 1694bd37530eSRafal Jaworowski } 1695bd37530eSRafal Jaworowski 1696bd37530eSRafal Jaworowski /* 1697bd37530eSRafal Jaworowski * With Interrupt Coalescing (IC) active, a transmit/receive frame 1698bd37530eSRafal Jaworowski * interrupt is raised either upon: 1699bd37530eSRafal Jaworowski * 1700bd37530eSRafal Jaworowski * - threshold-defined period of time elapsed, or 1701bd37530eSRafal Jaworowski * - threshold-defined number of frames is received/transmitted, 1702bd37530eSRafal Jaworowski * whichever occurs first. 1703bd37530eSRafal Jaworowski * 1704bd37530eSRafal Jaworowski * The following sysctls regulate IC behaviour (for TX/RX separately): 1705bd37530eSRafal Jaworowski * 1706bd37530eSRafal Jaworowski * dev.tsec.<unit>.int_coal.rx_time 1707bd37530eSRafal Jaworowski * dev.tsec.<unit>.int_coal.rx_count 1708bd37530eSRafal Jaworowski * dev.tsec.<unit>.int_coal.tx_time 1709bd37530eSRafal Jaworowski * dev.tsec.<unit>.int_coal.tx_count 1710bd37530eSRafal Jaworowski * 1711bd37530eSRafal Jaworowski * Values: 1712bd37530eSRafal Jaworowski * 1713bd37530eSRafal Jaworowski * - 0 for either time or count disables IC on the given TX/RX path 1714bd37530eSRafal Jaworowski * 1715bd37530eSRafal Jaworowski * - count: 1-255 (expresses frame count number; note that value of 1 is 1716bd37530eSRafal Jaworowski * effectively IC off) 1717bd37530eSRafal Jaworowski * 1718bd37530eSRafal Jaworowski * - time: 1-65535 (value corresponds to a real time period and is 1719bd37530eSRafal Jaworowski * expressed in units equivalent to 64 TSEC interface clocks, i.e. one timer 1720bd37530eSRafal Jaworowski * threshold unit is 26.5 us, 2.56 us, or 512 ns, corresponding to 10 Mbps, 1721bd37530eSRafal Jaworowski * 100 Mbps, or 1Gbps, respectively. For detailed discussion consult the 1722bd37530eSRafal Jaworowski * TSEC reference manual. 1723bd37530eSRafal Jaworowski */ 1724bd37530eSRafal Jaworowski static int 1725bd37530eSRafal Jaworowski tsec_sysctl_ic_time(SYSCTL_HANDLER_ARGS) 1726bd37530eSRafal Jaworowski { 1727bd37530eSRafal Jaworowski int error; 1728bd37530eSRafal Jaworowski uint32_t time; 1729bd37530eSRafal Jaworowski struct tsec_softc *sc = (struct tsec_softc *)arg1; 1730bd37530eSRafal Jaworowski 1731bd37530eSRafal Jaworowski time = (arg2 == TSEC_IC_RX) ? sc->rx_ic_time : sc->tx_ic_time; 1732bd37530eSRafal Jaworowski 1733bd37530eSRafal Jaworowski error = sysctl_handle_int(oidp, &time, 0, req); 1734bd37530eSRafal Jaworowski if (error != 0) 1735bd37530eSRafal Jaworowski return (error); 1736bd37530eSRafal Jaworowski 1737bd37530eSRafal Jaworowski if (time > 65535) 1738bd37530eSRafal Jaworowski return (EINVAL); 1739bd37530eSRafal Jaworowski 1740bd37530eSRafal Jaworowski TSEC_IC_LOCK(sc); 1741bd37530eSRafal Jaworowski if (arg2 == TSEC_IC_RX) { 1742bd37530eSRafal Jaworowski sc->rx_ic_time = time; 1743bd37530eSRafal Jaworowski tsec_set_rxic(sc); 1744bd37530eSRafal Jaworowski } else { 1745bd37530eSRafal Jaworowski sc->tx_ic_time = time; 1746bd37530eSRafal Jaworowski tsec_set_txic(sc); 1747bd37530eSRafal Jaworowski } 1748bd37530eSRafal Jaworowski TSEC_IC_UNLOCK(sc); 1749bd37530eSRafal Jaworowski 1750bd37530eSRafal Jaworowski return (0); 1751bd37530eSRafal Jaworowski } 1752bd37530eSRafal Jaworowski 1753bd37530eSRafal Jaworowski static int 1754bd37530eSRafal Jaworowski tsec_sysctl_ic_count(SYSCTL_HANDLER_ARGS) 1755bd37530eSRafal Jaworowski { 1756bd37530eSRafal Jaworowski int error; 1757bd37530eSRafal Jaworowski uint32_t count; 1758bd37530eSRafal Jaworowski struct tsec_softc *sc = (struct tsec_softc *)arg1; 1759bd37530eSRafal Jaworowski 1760bd37530eSRafal Jaworowski count = (arg2 == TSEC_IC_RX) ? sc->rx_ic_count : sc->tx_ic_count; 1761bd37530eSRafal Jaworowski 1762bd37530eSRafal Jaworowski error = sysctl_handle_int(oidp, &count, 0, req); 1763bd37530eSRafal Jaworowski if (error != 0) 1764bd37530eSRafal Jaworowski return (error); 1765bd37530eSRafal Jaworowski 1766bd37530eSRafal Jaworowski if (count > 255) 1767bd37530eSRafal Jaworowski return (EINVAL); 1768bd37530eSRafal Jaworowski 1769bd37530eSRafal Jaworowski TSEC_IC_LOCK(sc); 1770bd37530eSRafal Jaworowski if (arg2 == TSEC_IC_RX) { 1771bd37530eSRafal Jaworowski sc->rx_ic_count = count; 1772bd37530eSRafal Jaworowski tsec_set_rxic(sc); 1773bd37530eSRafal Jaworowski } else { 1774bd37530eSRafal Jaworowski sc->tx_ic_count = count; 1775bd37530eSRafal Jaworowski tsec_set_txic(sc); 1776bd37530eSRafal Jaworowski } 1777bd37530eSRafal Jaworowski TSEC_IC_UNLOCK(sc); 1778bd37530eSRafal Jaworowski 1779bd37530eSRafal Jaworowski return (0); 1780bd37530eSRafal Jaworowski } 1781bd37530eSRafal Jaworowski 1782bd37530eSRafal Jaworowski static void 1783bd37530eSRafal Jaworowski tsec_set_rxic(struct tsec_softc *sc) 1784bd37530eSRafal Jaworowski { 1785bd37530eSRafal Jaworowski uint32_t rxic_val; 1786bd37530eSRafal Jaworowski 1787bd37530eSRafal Jaworowski if (sc->rx_ic_count == 0 || sc->rx_ic_time == 0) 1788bd37530eSRafal Jaworowski /* Disable RX IC */ 1789bd37530eSRafal Jaworowski rxic_val = 0; 1790bd37530eSRafal Jaworowski else { 1791bd37530eSRafal Jaworowski rxic_val = 0x80000000; 1792bd37530eSRafal Jaworowski rxic_val |= (sc->rx_ic_count << 21); 1793bd37530eSRafal Jaworowski rxic_val |= sc->rx_ic_time; 1794bd37530eSRafal Jaworowski } 1795bd37530eSRafal Jaworowski 1796bd37530eSRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_RXIC, rxic_val); 1797bd37530eSRafal Jaworowski } 1798bd37530eSRafal Jaworowski 1799bd37530eSRafal Jaworowski static void 1800bd37530eSRafal Jaworowski tsec_set_txic(struct tsec_softc *sc) 1801bd37530eSRafal Jaworowski { 1802bd37530eSRafal Jaworowski uint32_t txic_val; 1803bd37530eSRafal Jaworowski 1804bd37530eSRafal Jaworowski if (sc->tx_ic_count == 0 || sc->tx_ic_time == 0) 1805bd37530eSRafal Jaworowski /* Disable TX IC */ 1806bd37530eSRafal Jaworowski txic_val = 0; 1807bd37530eSRafal Jaworowski else { 1808bd37530eSRafal Jaworowski txic_val = 0x80000000; 1809bd37530eSRafal Jaworowski txic_val |= (sc->tx_ic_count << 21); 1810bd37530eSRafal Jaworowski txic_val |= sc->tx_ic_time; 1811bd37530eSRafal Jaworowski } 1812bd37530eSRafal Jaworowski 1813bd37530eSRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_TXIC, txic_val); 1814bd37530eSRafal Jaworowski } 1815bd37530eSRafal Jaworowski 1816bd37530eSRafal Jaworowski static void 1817bd37530eSRafal Jaworowski tsec_offload_setup(struct tsec_softc *sc) 1818bd37530eSRafal Jaworowski { 181947842ecfSJustin Hibbits if_t ifp = sc->tsec_ifp; 1820bd37530eSRafal Jaworowski uint32_t reg; 1821bd37530eSRafal Jaworowski 1822bd37530eSRafal Jaworowski TSEC_GLOBAL_LOCK_ASSERT(sc); 1823bd37530eSRafal Jaworowski 1824bd37530eSRafal Jaworowski reg = TSEC_READ(sc, TSEC_REG_TCTRL); 1825bd37530eSRafal Jaworowski reg |= TSEC_TCTRL_IPCSEN | TSEC_TCTRL_TUCSEN; 1826bd37530eSRafal Jaworowski 182747842ecfSJustin Hibbits if (if_getcapenable(ifp) & IFCAP_TXCSUM) 182847842ecfSJustin Hibbits if_sethwassist(ifp, TSEC_CHECKSUM_FEATURES); 1829bd37530eSRafal Jaworowski else 183047842ecfSJustin Hibbits if_sethwassist(ifp, 0); 1831bd37530eSRafal Jaworowski 1832bd37530eSRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_TCTRL, reg); 1833bd37530eSRafal Jaworowski 1834bd37530eSRafal Jaworowski reg = TSEC_READ(sc, TSEC_REG_RCTRL); 1835bd37530eSRafal Jaworowski reg &= ~(TSEC_RCTRL_IPCSEN | TSEC_RCTRL_TUCSEN | TSEC_RCTRL_PRSDEP); 1836bd37530eSRafal Jaworowski reg |= TSEC_RCTRL_PRSDEP_PARSE_L2 | TSEC_RCTRL_VLEX; 1837bd37530eSRafal Jaworowski 183847842ecfSJustin Hibbits if (if_getcapenable(ifp) & IFCAP_RXCSUM) 1839bd37530eSRafal Jaworowski reg |= TSEC_RCTRL_IPCSEN | TSEC_RCTRL_TUCSEN | 1840bd37530eSRafal Jaworowski TSEC_RCTRL_PRSDEP_PARSE_L234; 1841bd37530eSRafal Jaworowski 1842bd37530eSRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_RCTRL, reg); 1843bd37530eSRafal Jaworowski } 1844bd37530eSRafal Jaworowski 1845bd37530eSRafal Jaworowski static void 1846bd37530eSRafal Jaworowski tsec_offload_process_frame(struct tsec_softc *sc, struct mbuf *m) 1847bd37530eSRafal Jaworowski { 1848bd37530eSRafal Jaworowski struct tsec_rx_fcb rx_fcb; 1849bd37530eSRafal Jaworowski int csum_flags = 0; 1850bd37530eSRafal Jaworowski int protocol, flags; 1851bd37530eSRafal Jaworowski 1852bd37530eSRafal Jaworowski TSEC_RECEIVE_LOCK_ASSERT(sc); 1853bd37530eSRafal Jaworowski 1854bd37530eSRafal Jaworowski m_copydata(m, 0, sizeof(struct tsec_rx_fcb), (caddr_t)(&rx_fcb)); 1855bd37530eSRafal Jaworowski flags = rx_fcb.flags; 1856bd37530eSRafal Jaworowski protocol = rx_fcb.protocol; 1857bd37530eSRafal Jaworowski 1858bd37530eSRafal Jaworowski if (TSEC_RX_FCB_IP_CSUM_CHECKED(flags)) { 1859bd37530eSRafal Jaworowski csum_flags |= CSUM_IP_CHECKED; 1860bd37530eSRafal Jaworowski 1861bd37530eSRafal Jaworowski if ((flags & TSEC_RX_FCB_IP_CSUM_ERROR) == 0) 1862bd37530eSRafal Jaworowski csum_flags |= CSUM_IP_VALID; 1863bd37530eSRafal Jaworowski } 1864bd37530eSRafal Jaworowski 1865bd37530eSRafal Jaworowski if ((protocol == IPPROTO_TCP || protocol == IPPROTO_UDP) && 1866bd37530eSRafal Jaworowski TSEC_RX_FCB_TCP_UDP_CSUM_CHECKED(flags) && 1867bd37530eSRafal Jaworowski (flags & TSEC_RX_FCB_TCP_UDP_CSUM_ERROR) == 0) { 1868bd37530eSRafal Jaworowski csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1869bd37530eSRafal Jaworowski m->m_pkthdr.csum_data = 0xFFFF; 1870bd37530eSRafal Jaworowski } 1871bd37530eSRafal Jaworowski 1872bd37530eSRafal Jaworowski m->m_pkthdr.csum_flags = csum_flags; 1873bd37530eSRafal Jaworowski 1874bd37530eSRafal Jaworowski if (flags & TSEC_RX_FCB_VLAN) { 1875bd37530eSRafal Jaworowski m->m_pkthdr.ether_vtag = rx_fcb.vlan; 1876bd37530eSRafal Jaworowski m->m_flags |= M_VLANTAG; 1877bd37530eSRafal Jaworowski } 1878bd37530eSRafal Jaworowski 1879bd37530eSRafal Jaworowski m_adj(m, sizeof(struct tsec_rx_fcb)); 1880bd37530eSRafal Jaworowski } 1881bd37530eSRafal Jaworowski 18825c973840SGleb Smirnoff static u_int 18835c973840SGleb Smirnoff tsec_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 18845c973840SGleb Smirnoff { 18855c973840SGleb Smirnoff uint32_t h, *hashtable = arg; 18865c973840SGleb Smirnoff 18875c973840SGleb Smirnoff h = (ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN) >> 24) & 0xFF; 18885c973840SGleb Smirnoff hashtable[(h >> 5)] |= 1 << (0x1F - (h & 0x1F)); 18895c973840SGleb Smirnoff 18905c973840SGleb Smirnoff return (1); 18915c973840SGleb Smirnoff } 18925c973840SGleb Smirnoff 1893bd37530eSRafal Jaworowski static void 1894bd37530eSRafal Jaworowski tsec_setup_multicast(struct tsec_softc *sc) 1895bd37530eSRafal Jaworowski { 1896bd37530eSRafal Jaworowski uint32_t hashtable[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; 189747842ecfSJustin Hibbits if_t ifp = sc->tsec_ifp; 1898bd37530eSRafal Jaworowski int i; 1899bd37530eSRafal Jaworowski 1900bd37530eSRafal Jaworowski TSEC_GLOBAL_LOCK_ASSERT(sc); 1901bd37530eSRafal Jaworowski 190247842ecfSJustin Hibbits if (if_getflags(ifp) & IFF_ALLMULTI) { 1903bd37530eSRafal Jaworowski for (i = 0; i < 8; i++) 1904bd37530eSRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_GADDR(i), 0xFFFFFFFF); 1905bd37530eSRafal Jaworowski 1906bd37530eSRafal Jaworowski return; 1907bd37530eSRafal Jaworowski } 1908bd37530eSRafal Jaworowski 19095c973840SGleb Smirnoff if_foreach_llmaddr(ifp, tsec_hash_maddr, &hashtable); 1910bd37530eSRafal Jaworowski 1911bd37530eSRafal Jaworowski for (i = 0; i < 8; i++) 1912bd37530eSRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_GADDR(i), hashtable[i]); 1913bd37530eSRafal Jaworowski } 1914bd37530eSRafal Jaworowski 1915bd37530eSRafal Jaworowski static int 1916bd37530eSRafal Jaworowski tsec_set_mtu(struct tsec_softc *sc, unsigned int mtu) 1917bd37530eSRafal Jaworowski { 1918bd37530eSRafal Jaworowski 1919bd37530eSRafal Jaworowski mtu += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ETHER_CRC_LEN; 1920bd37530eSRafal Jaworowski 1921bd37530eSRafal Jaworowski TSEC_GLOBAL_LOCK_ASSERT(sc); 1922bd37530eSRafal Jaworowski 1923bd37530eSRafal Jaworowski if (mtu >= TSEC_MIN_FRAME_SIZE && mtu <= TSEC_MAX_FRAME_SIZE) { 1924bd37530eSRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MAXFRM, mtu); 1925bd37530eSRafal Jaworowski return (mtu); 1926bd37530eSRafal Jaworowski } 1927bd37530eSRafal Jaworowski 1928bd37530eSRafal Jaworowski return (0); 1929bd37530eSRafal Jaworowski } 1930