167196661SRafal Jaworowski /*- 2718cf2ccSPedro F. Giffuni * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3718cf2ccSPedro F. Giffuni * 4d1d3233eSRafal Jaworowski * Copyright (C) 2007-2008 Semihalf, Rafal Jaworowski 5d1d3233eSRafal Jaworowski * Copyright (C) 2006-2007 Semihalf, Piotr Kruszynski 667196661SRafal Jaworowski * All rights reserved. 767196661SRafal Jaworowski * 867196661SRafal Jaworowski * Redistribution and use in source and binary forms, with or without 967196661SRafal Jaworowski * modification, are permitted provided that the following conditions 1067196661SRafal Jaworowski * are met: 1167196661SRafal Jaworowski * 1. Redistributions of source code must retain the above copyright 1267196661SRafal Jaworowski * notice, this list of conditions and the following disclaimer. 1367196661SRafal Jaworowski * 2. Redistributions in binary form must reproduce the above copyright 1467196661SRafal Jaworowski * notice, this list of conditions and the following disclaimer in the 1567196661SRafal Jaworowski * documentation and/or other materials provided with the distribution. 1667196661SRafal Jaworowski * 1767196661SRafal Jaworowski * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 1867196661SRafal Jaworowski * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 1967196661SRafal Jaworowski * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 2067196661SRafal Jaworowski * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 2167196661SRafal Jaworowski * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 2267196661SRafal Jaworowski * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 2367196661SRafal Jaworowski * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 2467196661SRafal Jaworowski * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 2567196661SRafal Jaworowski * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 2667196661SRafal Jaworowski * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 2767196661SRafal Jaworowski */ 2867196661SRafal Jaworowski 2967196661SRafal Jaworowski /* 3067196661SRafal Jaworowski * Freescale integrated Three-Speed Ethernet Controller (TSEC) driver. 3167196661SRafal Jaworowski */ 3267196661SRafal Jaworowski #include <sys/cdefs.h> 3367196661SRafal Jaworowski __FBSDID("$FreeBSD$"); 3467196661SRafal Jaworowski 35bd37530eSRafal Jaworowski #ifdef HAVE_KERNEL_OPTION_HEADERS 36bd37530eSRafal Jaworowski #include "opt_device_polling.h" 37bd37530eSRafal Jaworowski #endif 38bd37530eSRafal Jaworowski 3967196661SRafal Jaworowski #include <sys/param.h> 4067196661SRafal Jaworowski #include <sys/systm.h> 41321e12c8SRafal Jaworowski #include <sys/bus.h> 4267196661SRafal Jaworowski #include <sys/endian.h> 4367196661SRafal Jaworowski #include <sys/mbuf.h> 4467196661SRafal Jaworowski #include <sys/kernel.h> 4567196661SRafal Jaworowski #include <sys/module.h> 4667196661SRafal Jaworowski #include <sys/socket.h> 47321e12c8SRafal Jaworowski #include <sys/sockio.h> 4867196661SRafal Jaworowski #include <sys/sysctl.h> 4967196661SRafal Jaworowski 50321e12c8SRafal Jaworowski #include <net/bpf.h> 51321e12c8SRafal Jaworowski #include <net/ethernet.h> 5267196661SRafal Jaworowski #include <net/if.h> 5376039bc8SGleb Smirnoff #include <net/if_var.h> 54321e12c8SRafal Jaworowski #include <net/if_arp.h> 5567196661SRafal Jaworowski #include <net/if_dl.h> 5667196661SRafal Jaworowski #include <net/if_media.h> 5767196661SRafal Jaworowski #include <net/if_types.h> 5867196661SRafal Jaworowski #include <net/if_vlan_var.h> 5967196661SRafal Jaworowski 60bd37530eSRafal Jaworowski #include <netinet/in_systm.h> 61bd37530eSRafal Jaworowski #include <netinet/in.h> 62bd37530eSRafal Jaworowski #include <netinet/ip.h> 63bd37530eSRafal Jaworowski 64321e12c8SRafal Jaworowski #include <machine/bus.h> 65321e12c8SRafal Jaworowski 6667196661SRafal Jaworowski #include <dev/mii/mii.h> 6767196661SRafal Jaworowski #include <dev/mii/miivar.h> 6867196661SRafal Jaworowski 6967196661SRafal Jaworowski #include <dev/tsec/if_tsec.h> 7067196661SRafal Jaworowski #include <dev/tsec/if_tsecreg.h> 7167196661SRafal Jaworowski 72321e12c8SRafal Jaworowski static int tsec_alloc_dma_desc(device_t dev, bus_dma_tag_t *dtag, 73321e12c8SRafal Jaworowski bus_dmamap_t *dmap, bus_size_t dsize, void **vaddr, void *raddr, 74321e12c8SRafal Jaworowski const char *dname); 7567196661SRafal Jaworowski static void tsec_dma_ctl(struct tsec_softc *sc, int state); 762c0dbbcbSJustin Hibbits static void tsec_encap(struct ifnet *ifp, struct tsec_softc *sc, 772c0dbbcbSJustin Hibbits struct mbuf *m0, uint16_t fcb_flags, int *start_tx); 78321e12c8SRafal Jaworowski static void tsec_free_dma(struct tsec_softc *sc); 79321e12c8SRafal Jaworowski static void tsec_free_dma_desc(bus_dma_tag_t dtag, bus_dmamap_t dmap, void *vaddr); 8067196661SRafal Jaworowski static int tsec_ifmedia_upd(struct ifnet *ifp); 8167196661SRafal Jaworowski static void tsec_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr); 8267196661SRafal Jaworowski static int tsec_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, 8367196661SRafal Jaworowski struct mbuf **mbufp, uint32_t *paddr); 8467196661SRafal Jaworowski static void tsec_map_dma_addr(void *arg, bus_dma_segment_t *segs, 8567196661SRafal Jaworowski int nseg, int error); 86321e12c8SRafal Jaworowski static void tsec_intrs_ctl(struct tsec_softc *sc, int state); 87321e12c8SRafal Jaworowski static void tsec_init(void *xsc); 88321e12c8SRafal Jaworowski static void tsec_init_locked(struct tsec_softc *sc); 89321e12c8SRafal Jaworowski static int tsec_ioctl(struct ifnet *ifp, u_long command, caddr_t data); 90321e12c8SRafal Jaworowski static void tsec_reset_mac(struct tsec_softc *sc); 91321e12c8SRafal Jaworowski static void tsec_setfilter(struct tsec_softc *sc); 92321e12c8SRafal Jaworowski static void tsec_set_mac_address(struct tsec_softc *sc); 93321e12c8SRafal Jaworowski static void tsec_start(struct ifnet *ifp); 94321e12c8SRafal Jaworowski static void tsec_start_locked(struct ifnet *ifp); 9567196661SRafal Jaworowski static void tsec_stop(struct tsec_softc *sc); 9667196661SRafal Jaworowski static void tsec_tick(void *arg); 97321e12c8SRafal Jaworowski static void tsec_watchdog(struct tsec_softc *sc); 98bd37530eSRafal Jaworowski static void tsec_add_sysctls(struct tsec_softc *sc); 99bd37530eSRafal Jaworowski static int tsec_sysctl_ic_time(SYSCTL_HANDLER_ARGS); 100bd37530eSRafal Jaworowski static int tsec_sysctl_ic_count(SYSCTL_HANDLER_ARGS); 101bd37530eSRafal Jaworowski static void tsec_set_rxic(struct tsec_softc *sc); 102bd37530eSRafal Jaworowski static void tsec_set_txic(struct tsec_softc *sc); 1031abcdbd1SAttilio Rao static int tsec_receive_intr_locked(struct tsec_softc *sc, int count); 104bd37530eSRafal Jaworowski static void tsec_transmit_intr_locked(struct tsec_softc *sc); 105bd37530eSRafal Jaworowski static void tsec_error_intr_locked(struct tsec_softc *sc, int count); 106bd37530eSRafal Jaworowski static void tsec_offload_setup(struct tsec_softc *sc); 107bd37530eSRafal Jaworowski static void tsec_offload_process_frame(struct tsec_softc *sc, 108bd37530eSRafal Jaworowski struct mbuf *m); 109bd37530eSRafal Jaworowski static void tsec_setup_multicast(struct tsec_softc *sc); 110bd37530eSRafal Jaworowski static int tsec_set_mtu(struct tsec_softc *sc, unsigned int mtu); 11167196661SRafal Jaworowski 112321e12c8SRafal Jaworowski devclass_t tsec_devclass; 113*3e38757dSJohn Baldwin DRIVER_MODULE(miibus, tsec, miibus_driver, 0, 0); 11467196661SRafal Jaworowski MODULE_DEPEND(tsec, ether, 1, 1, 1); 11567196661SRafal Jaworowski MODULE_DEPEND(tsec, miibus, 1, 1, 1); 11667196661SRafal Jaworowski 117629aa519SNathan Whitehorn struct mtx tsec_phy_mtx; 118629aa519SNathan Whitehorn 119321e12c8SRafal Jaworowski int 120321e12c8SRafal Jaworowski tsec_attach(struct tsec_softc *sc) 12167196661SRafal Jaworowski { 122321e12c8SRafal Jaworowski uint8_t hwaddr[ETHER_ADDR_LEN]; 123321e12c8SRafal Jaworowski struct ifnet *ifp; 124321e12c8SRafal Jaworowski int error = 0; 125ecb1ab17SRafal Jaworowski int i; 12667196661SRafal Jaworowski 127629aa519SNathan Whitehorn /* Initialize global (because potentially shared) MII lock */ 128629aa519SNathan Whitehorn if (!mtx_initialized(&tsec_phy_mtx)) 129629aa519SNathan Whitehorn mtx_init(&tsec_phy_mtx, "tsec mii", NULL, MTX_DEF); 130629aa519SNathan Whitehorn 131321e12c8SRafal Jaworowski /* Reset all TSEC counters */ 132321e12c8SRafal Jaworowski TSEC_TX_RX_COUNTERS_INIT(sc); 133321e12c8SRafal Jaworowski 134321e12c8SRafal Jaworowski /* Stop DMA engine if enabled by firmware */ 135321e12c8SRafal Jaworowski tsec_dma_ctl(sc, 0); 136321e12c8SRafal Jaworowski 137321e12c8SRafal Jaworowski /* Reset MAC */ 138321e12c8SRafal Jaworowski tsec_reset_mac(sc); 139321e12c8SRafal Jaworowski 140321e12c8SRafal Jaworowski /* Disable interrupts for now */ 141321e12c8SRafal Jaworowski tsec_intrs_ctl(sc, 0); 142321e12c8SRafal Jaworowski 143bd37530eSRafal Jaworowski /* Configure defaults for interrupts coalescing */ 144bd37530eSRafal Jaworowski sc->rx_ic_time = 768; 145bd37530eSRafal Jaworowski sc->rx_ic_count = 16; 146bd37530eSRafal Jaworowski sc->tx_ic_time = 768; 147bd37530eSRafal Jaworowski sc->tx_ic_count = 16; 148bd37530eSRafal Jaworowski tsec_set_rxic(sc); 149bd37530eSRafal Jaworowski tsec_set_txic(sc); 150bd37530eSRafal Jaworowski tsec_add_sysctls(sc); 151bd37530eSRafal Jaworowski 152321e12c8SRafal Jaworowski /* Allocate a busdma tag and DMA safe memory for TX descriptors. */ 153bd37530eSRafal Jaworowski error = tsec_alloc_dma_desc(sc->dev, &sc->tsec_tx_dtag, 154bd37530eSRafal Jaworowski &sc->tsec_tx_dmap, sizeof(*sc->tsec_tx_vaddr) * TSEC_TX_NUM_DESC, 155321e12c8SRafal Jaworowski (void **)&sc->tsec_tx_vaddr, &sc->tsec_tx_raddr, "TX"); 156bd37530eSRafal Jaworowski 157321e12c8SRafal Jaworowski if (error) { 158321e12c8SRafal Jaworowski tsec_detach(sc); 159321e12c8SRafal Jaworowski return (ENXIO); 160ecb1ab17SRafal Jaworowski } 161ecb1ab17SRafal Jaworowski 162321e12c8SRafal Jaworowski /* Allocate a busdma tag and DMA safe memory for RX descriptors. */ 163bd37530eSRafal Jaworowski error = tsec_alloc_dma_desc(sc->dev, &sc->tsec_rx_dtag, 164bd37530eSRafal Jaworowski &sc->tsec_rx_dmap, sizeof(*sc->tsec_rx_vaddr) * TSEC_RX_NUM_DESC, 165321e12c8SRafal Jaworowski (void **)&sc->tsec_rx_vaddr, &sc->tsec_rx_raddr, "RX"); 166321e12c8SRafal Jaworowski if (error) { 167321e12c8SRafal Jaworowski tsec_detach(sc); 168321e12c8SRafal Jaworowski return (ENXIO); 169321e12c8SRafal Jaworowski } 17067196661SRafal Jaworowski 171321e12c8SRafal Jaworowski /* Allocate a busdma tag for TX mbufs. */ 172321e12c8SRafal Jaworowski error = bus_dma_tag_create(NULL, /* parent */ 173321e12c8SRafal Jaworowski TSEC_TXBUFFER_ALIGNMENT, 0, /* alignment, boundary */ 174321e12c8SRafal Jaworowski BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 175321e12c8SRafal Jaworowski BUS_SPACE_MAXADDR, /* highaddr */ 176321e12c8SRafal Jaworowski NULL, NULL, /* filtfunc, filtfuncarg */ 177321e12c8SRafal Jaworowski MCLBYTES * (TSEC_TX_NUM_DESC - 1), /* maxsize */ 1782c0dbbcbSJustin Hibbits TSEC_TX_MAX_DMA_SEGS, /* nsegments */ 179321e12c8SRafal Jaworowski MCLBYTES, 0, /* maxsegsz, flags */ 180321e12c8SRafal Jaworowski NULL, NULL, /* lockfunc, lockfuncarg */ 181321e12c8SRafal Jaworowski &sc->tsec_tx_mtag); /* dmat */ 182321e12c8SRafal Jaworowski if (error) { 18364f90c9dSRafal Jaworowski device_printf(sc->dev, "failed to allocate busdma tag " 18464f90c9dSRafal Jaworowski "(tx mbufs)\n"); 185321e12c8SRafal Jaworowski tsec_detach(sc); 186321e12c8SRafal Jaworowski return (ENXIO); 187321e12c8SRafal Jaworowski } 188321e12c8SRafal Jaworowski 189321e12c8SRafal Jaworowski /* Allocate a busdma tag for RX mbufs. */ 190321e12c8SRafal Jaworowski error = bus_dma_tag_create(NULL, /* parent */ 191321e12c8SRafal Jaworowski TSEC_RXBUFFER_ALIGNMENT, 0, /* alignment, boundary */ 192321e12c8SRafal Jaworowski BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 193321e12c8SRafal Jaworowski BUS_SPACE_MAXADDR, /* highaddr */ 194321e12c8SRafal Jaworowski NULL, NULL, /* filtfunc, filtfuncarg */ 195321e12c8SRafal Jaworowski MCLBYTES, /* maxsize */ 196321e12c8SRafal Jaworowski 1, /* nsegments */ 197321e12c8SRafal Jaworowski MCLBYTES, 0, /* maxsegsz, flags */ 198321e12c8SRafal Jaworowski NULL, NULL, /* lockfunc, lockfuncarg */ 199321e12c8SRafal Jaworowski &sc->tsec_rx_mtag); /* dmat */ 200321e12c8SRafal Jaworowski if (error) { 20164f90c9dSRafal Jaworowski device_printf(sc->dev, "failed to allocate busdma tag " 20264f90c9dSRafal Jaworowski "(rx mbufs)\n"); 203321e12c8SRafal Jaworowski tsec_detach(sc); 204321e12c8SRafal Jaworowski return (ENXIO); 205321e12c8SRafal Jaworowski } 206321e12c8SRafal Jaworowski 207321e12c8SRafal Jaworowski /* Create TX busdma maps */ 208321e12c8SRafal Jaworowski for (i = 0; i < TSEC_TX_NUM_DESC; i++) { 2092c0dbbcbSJustin Hibbits error = bus_dmamap_create(sc->tsec_tx_mtag, 0, 2102c0dbbcbSJustin Hibbits &sc->tx_bufmap[i].map); 211321e12c8SRafal Jaworowski if (error) { 212321e12c8SRafal Jaworowski device_printf(sc->dev, "failed to init TX ring\n"); 213321e12c8SRafal Jaworowski tsec_detach(sc); 214321e12c8SRafal Jaworowski return (ENXIO); 215321e12c8SRafal Jaworowski } 2162c0dbbcbSJustin Hibbits sc->tx_bufmap[i].map_initialized = 1; 217321e12c8SRafal Jaworowski } 218321e12c8SRafal Jaworowski 219321e12c8SRafal Jaworowski /* Create RX busdma maps and zero mbuf handlers */ 220321e12c8SRafal Jaworowski for (i = 0; i < TSEC_RX_NUM_DESC; i++) { 22164f90c9dSRafal Jaworowski error = bus_dmamap_create(sc->tsec_rx_mtag, 0, 22264f90c9dSRafal Jaworowski &sc->rx_data[i].map); 223321e12c8SRafal Jaworowski if (error) { 224321e12c8SRafal Jaworowski device_printf(sc->dev, "failed to init RX ring\n"); 225321e12c8SRafal Jaworowski tsec_detach(sc); 226321e12c8SRafal Jaworowski return (ENXIO); 227321e12c8SRafal Jaworowski } 228321e12c8SRafal Jaworowski sc->rx_data[i].mbuf = NULL; 229321e12c8SRafal Jaworowski } 230321e12c8SRafal Jaworowski 231321e12c8SRafal Jaworowski /* Create mbufs for RX buffers */ 232321e12c8SRafal Jaworowski for (i = 0; i < TSEC_RX_NUM_DESC; i++) { 233321e12c8SRafal Jaworowski error = tsec_new_rxbuf(sc->tsec_rx_mtag, sc->rx_data[i].map, 234321e12c8SRafal Jaworowski &sc->rx_data[i].mbuf, &sc->rx_data[i].paddr); 235321e12c8SRafal Jaworowski if (error) { 23664f90c9dSRafal Jaworowski device_printf(sc->dev, "can't load rx DMA map %d, " 23764f90c9dSRafal Jaworowski "error = %d\n", i, error); 238321e12c8SRafal Jaworowski tsec_detach(sc); 239321e12c8SRafal Jaworowski return (error); 240321e12c8SRafal Jaworowski } 241321e12c8SRafal Jaworowski } 242321e12c8SRafal Jaworowski 243321e12c8SRafal Jaworowski /* Create network interface for upper layers */ 244321e12c8SRafal Jaworowski ifp = sc->tsec_ifp = if_alloc(IFT_ETHER); 245321e12c8SRafal Jaworowski if (ifp == NULL) { 246321e12c8SRafal Jaworowski device_printf(sc->dev, "if_alloc() failed\n"); 247321e12c8SRafal Jaworowski tsec_detach(sc); 248321e12c8SRafal Jaworowski return (ENOMEM); 249321e12c8SRafal Jaworowski } 250321e12c8SRafal Jaworowski 251321e12c8SRafal Jaworowski ifp->if_softc = sc; 252321e12c8SRafal Jaworowski if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev)); 253bd37530eSRafal Jaworowski ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST; 254321e12c8SRafal Jaworowski ifp->if_init = tsec_init; 255321e12c8SRafal Jaworowski ifp->if_start = tsec_start; 256321e12c8SRafal Jaworowski ifp->if_ioctl = tsec_ioctl; 257321e12c8SRafal Jaworowski 258321e12c8SRafal Jaworowski IFQ_SET_MAXLEN(&ifp->if_snd, TSEC_TX_NUM_DESC - 1); 259321e12c8SRafal Jaworowski ifp->if_snd.ifq_drv_maxlen = TSEC_TX_NUM_DESC - 1; 260321e12c8SRafal Jaworowski IFQ_SET_READY(&ifp->if_snd); 261321e12c8SRafal Jaworowski 262bd37530eSRafal Jaworowski ifp->if_capabilities = IFCAP_VLAN_MTU; 263bd37530eSRafal Jaworowski if (sc->is_etsec) 264bd37530eSRafal Jaworowski ifp->if_capabilities |= IFCAP_HWCSUM; 265bd37530eSRafal Jaworowski 266321e12c8SRafal Jaworowski ifp->if_capenable = ifp->if_capabilities; 267321e12c8SRafal Jaworowski 268bd37530eSRafal Jaworowski #ifdef DEVICE_POLLING 269bd37530eSRafal Jaworowski /* Advertise that polling is supported */ 270bd37530eSRafal Jaworowski ifp->if_capabilities |= IFCAP_POLLING; 271bd37530eSRafal Jaworowski #endif 272bd37530eSRafal Jaworowski 2738e5d93dbSMarius Strobl /* Attach PHY(s) */ 2748e5d93dbSMarius Strobl error = mii_attach(sc->dev, &sc->tsec_miibus, ifp, tsec_ifmedia_upd, 2758e5d93dbSMarius Strobl tsec_ifmedia_sts, BMSR_DEFCAPMASK, sc->phyaddr, MII_OFFSET_ANY, 2768e5d93dbSMarius Strobl 0); 277321e12c8SRafal Jaworowski if (error) { 2788e5d93dbSMarius Strobl device_printf(sc->dev, "attaching PHYs failed\n"); 279321e12c8SRafal Jaworowski if_free(ifp); 280321e12c8SRafal Jaworowski sc->tsec_ifp = NULL; 281321e12c8SRafal Jaworowski tsec_detach(sc); 282321e12c8SRafal Jaworowski return (error); 283321e12c8SRafal Jaworowski } 284321e12c8SRafal Jaworowski sc->tsec_mii = device_get_softc(sc->tsec_miibus); 285321e12c8SRafal Jaworowski 286321e12c8SRafal Jaworowski /* Set MAC address */ 287321e12c8SRafal Jaworowski tsec_get_hwaddr(sc, hwaddr); 288321e12c8SRafal Jaworowski ether_ifattach(ifp, hwaddr); 289321e12c8SRafal Jaworowski 290321e12c8SRafal Jaworowski return (0); 291321e12c8SRafal Jaworowski } 292321e12c8SRafal Jaworowski 293321e12c8SRafal Jaworowski int 294321e12c8SRafal Jaworowski tsec_detach(struct tsec_softc *sc) 295321e12c8SRafal Jaworowski { 296321e12c8SRafal Jaworowski 29733518175SAndrew Thompson if (sc->tsec_ifp != NULL) { 298bd37530eSRafal Jaworowski #ifdef DEVICE_POLLING 299bd37530eSRafal Jaworowski if (sc->tsec_ifp->if_capenable & IFCAP_POLLING) 300bd37530eSRafal Jaworowski ether_poll_deregister(sc->tsec_ifp); 301bd37530eSRafal Jaworowski #endif 302bd37530eSRafal Jaworowski 303321e12c8SRafal Jaworowski /* Stop TSEC controller and free TX queue */ 30433518175SAndrew Thompson if (sc->sc_rres) 305321e12c8SRafal Jaworowski tsec_shutdown(sc->dev); 306321e12c8SRafal Jaworowski 307321e12c8SRafal Jaworowski /* Detach network interface */ 308321e12c8SRafal Jaworowski ether_ifdetach(sc->tsec_ifp); 309321e12c8SRafal Jaworowski if_free(sc->tsec_ifp); 310321e12c8SRafal Jaworowski sc->tsec_ifp = NULL; 311321e12c8SRafal Jaworowski } 312321e12c8SRafal Jaworowski 313321e12c8SRafal Jaworowski /* Free DMA resources */ 314321e12c8SRafal Jaworowski tsec_free_dma(sc); 315321e12c8SRafal Jaworowski 316321e12c8SRafal Jaworowski return (0); 317321e12c8SRafal Jaworowski } 318321e12c8SRafal Jaworowski 319661ee6eeSRafal Jaworowski int 320321e12c8SRafal Jaworowski tsec_shutdown(device_t dev) 321321e12c8SRafal Jaworowski { 322321e12c8SRafal Jaworowski struct tsec_softc *sc; 323321e12c8SRafal Jaworowski 324321e12c8SRafal Jaworowski sc = device_get_softc(dev); 325321e12c8SRafal Jaworowski 326321e12c8SRafal Jaworowski TSEC_GLOBAL_LOCK(sc); 327321e12c8SRafal Jaworowski tsec_stop(sc); 328321e12c8SRafal Jaworowski TSEC_GLOBAL_UNLOCK(sc); 329661ee6eeSRafal Jaworowski return (0); 330321e12c8SRafal Jaworowski } 331321e12c8SRafal Jaworowski 332321e12c8SRafal Jaworowski int 333321e12c8SRafal Jaworowski tsec_suspend(device_t dev) 334321e12c8SRafal Jaworowski { 335321e12c8SRafal Jaworowski 336321e12c8SRafal Jaworowski /* TODO not implemented! */ 337321e12c8SRafal Jaworowski return (0); 338321e12c8SRafal Jaworowski } 339321e12c8SRafal Jaworowski 340321e12c8SRafal Jaworowski int 341321e12c8SRafal Jaworowski tsec_resume(device_t dev) 342321e12c8SRafal Jaworowski { 343321e12c8SRafal Jaworowski 344321e12c8SRafal Jaworowski /* TODO not implemented! */ 345321e12c8SRafal Jaworowski return (0); 34667196661SRafal Jaworowski } 34767196661SRafal Jaworowski 34867196661SRafal Jaworowski static void 34967196661SRafal Jaworowski tsec_init(void *xsc) 35067196661SRafal Jaworowski { 35167196661SRafal Jaworowski struct tsec_softc *sc = xsc; 35267196661SRafal Jaworowski 35367196661SRafal Jaworowski TSEC_GLOBAL_LOCK(sc); 35467196661SRafal Jaworowski tsec_init_locked(sc); 35567196661SRafal Jaworowski TSEC_GLOBAL_UNLOCK(sc); 35667196661SRafal Jaworowski } 35767196661SRafal Jaworowski 35888011b59SJustin Hibbits static int 35988011b59SJustin Hibbits tsec_mii_wait(struct tsec_softc *sc, uint32_t flags) 36088011b59SJustin Hibbits { 36188011b59SJustin Hibbits int timeout; 36288011b59SJustin Hibbits 36388011b59SJustin Hibbits /* 3645c0b62dcSGordon Bergling * The status indicators are not set immediately after a command. 36588011b59SJustin Hibbits * Discard the first value. 36688011b59SJustin Hibbits */ 36788011b59SJustin Hibbits TSEC_PHY_READ(sc, TSEC_REG_MIIMIND); 36888011b59SJustin Hibbits 36988011b59SJustin Hibbits timeout = TSEC_READ_RETRY; 37088011b59SJustin Hibbits while ((TSEC_PHY_READ(sc, TSEC_REG_MIIMIND) & flags) && --timeout) 37188011b59SJustin Hibbits DELAY(TSEC_READ_DELAY); 37288011b59SJustin Hibbits 37388011b59SJustin Hibbits return (timeout == 0); 37488011b59SJustin Hibbits } 37588011b59SJustin Hibbits 37667196661SRafal Jaworowski static void 37767196661SRafal Jaworowski tsec_init_locked(struct tsec_softc *sc) 37867196661SRafal Jaworowski { 37967196661SRafal Jaworowski struct tsec_desc *tx_desc = sc->tsec_tx_vaddr; 38067196661SRafal Jaworowski struct tsec_desc *rx_desc = sc->tsec_rx_vaddr; 38167196661SRafal Jaworowski struct ifnet *ifp = sc->tsec_ifp; 38288011b59SJustin Hibbits uint32_t val, i; 38388011b59SJustin Hibbits int timeout; 38467196661SRafal Jaworowski 385afceeed7SAndrew Thompson if (ifp->if_drv_flags & IFF_DRV_RUNNING) 386afceeed7SAndrew Thompson return; 387afceeed7SAndrew Thompson 38867196661SRafal Jaworowski TSEC_GLOBAL_LOCK_ASSERT(sc); 38967196661SRafal Jaworowski tsec_stop(sc); 39067196661SRafal Jaworowski 39167196661SRafal Jaworowski /* 39267196661SRafal Jaworowski * These steps are according to the MPC8555E PowerQUICCIII RM: 39367196661SRafal Jaworowski * 14.7 Initialization/Application Information 39467196661SRafal Jaworowski */ 39567196661SRafal Jaworowski 39667196661SRafal Jaworowski /* Step 1: soft reset MAC */ 39767196661SRafal Jaworowski tsec_reset_mac(sc); 39867196661SRafal Jaworowski 39967196661SRafal Jaworowski /* Step 2: Initialize MACCFG2 */ 40067196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MACCFG2, 40167196661SRafal Jaworowski TSEC_MACCFG2_FULLDUPLEX | /* Full Duplex = 1 */ 40267196661SRafal Jaworowski TSEC_MACCFG2_PADCRC | /* PAD/CRC append */ 40367196661SRafal Jaworowski TSEC_MACCFG2_GMII | /* I/F Mode bit */ 40467196661SRafal Jaworowski TSEC_MACCFG2_PRECNT /* Preamble count = 7 */ 40567196661SRafal Jaworowski ); 40667196661SRafal Jaworowski 40767196661SRafal Jaworowski /* Step 3: Initialize ECNTRL 40867196661SRafal Jaworowski * While the documentation states that R100M is ignored if RPM is 40967196661SRafal Jaworowski * not set, it does seem to be needed to get the orange boxes to 41067196661SRafal Jaworowski * work (which have a Marvell 88E1111 PHY). Go figure. 41167196661SRafal Jaworowski */ 41267196661SRafal Jaworowski 41367196661SRafal Jaworowski /* 41467196661SRafal Jaworowski * XXX kludge - use circumstancial evidence to program ECNTRL 41567196661SRafal Jaworowski * correctly. Ideally we need some board information to guide 41667196661SRafal Jaworowski * us here. 41767196661SRafal Jaworowski */ 41867196661SRafal Jaworowski i = TSEC_READ(sc, TSEC_REG_ID2); 41967196661SRafal Jaworowski val = (i & 0xffff) 42067196661SRafal Jaworowski ? (TSEC_ECNTRL_TBIM | TSEC_ECNTRL_SGMIIM) /* Sumatra */ 42167196661SRafal Jaworowski : TSEC_ECNTRL_R100M; /* Orange + CDS */ 42267196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_ECNTRL, TSEC_ECNTRL_STEN | val); 42367196661SRafal Jaworowski 42467196661SRafal Jaworowski /* Step 4: Initialize MAC station address */ 42567196661SRafal Jaworowski tsec_set_mac_address(sc); 42667196661SRafal Jaworowski 42767196661SRafal Jaworowski /* 42867196661SRafal Jaworowski * Step 5: Assign a Physical address to the TBI so as to not conflict 42967196661SRafal Jaworowski * with the external PHY physical address 43067196661SRafal Jaworowski */ 43167196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_TBIPA, 5); 43267196661SRafal Jaworowski 433629aa519SNathan Whitehorn TSEC_PHY_LOCK(sc); 434629aa519SNathan Whitehorn 43567196661SRafal Jaworowski /* Step 6: Reset the management interface */ 436629aa519SNathan Whitehorn TSEC_PHY_WRITE(sc, TSEC_REG_MIIMCFG, TSEC_MIIMCFG_RESETMGMT); 43767196661SRafal Jaworowski 43867196661SRafal Jaworowski /* Step 7: Setup the MII Mgmt clock speed */ 439629aa519SNathan Whitehorn TSEC_PHY_WRITE(sc, TSEC_REG_MIIMCFG, TSEC_MIIMCFG_CLKDIV28); 44067196661SRafal Jaworowski 44167196661SRafal Jaworowski /* Step 8: Read MII Mgmt indicator register and check for Busy = 0 */ 44288011b59SJustin Hibbits timeout = tsec_mii_wait(sc, TSEC_MIIMIND_BUSY); 44388011b59SJustin Hibbits 44488011b59SJustin Hibbits TSEC_PHY_UNLOCK(sc); 44588011b59SJustin Hibbits if (timeout) { 44667196661SRafal Jaworowski if_printf(ifp, "tsec_init_locked(): Mgmt busy timeout\n"); 44767196661SRafal Jaworowski return; 44867196661SRafal Jaworowski } 44967196661SRafal Jaworowski 45067196661SRafal Jaworowski /* Step 9: Setup the MII Mgmt */ 45167196661SRafal Jaworowski mii_mediachg(sc->tsec_mii); 45267196661SRafal Jaworowski 45367196661SRafal Jaworowski /* Step 10: Clear IEVENT register */ 45467196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_IEVENT, 0xffffffff); 45567196661SRafal Jaworowski 456bd37530eSRafal Jaworowski /* Step 11: Enable interrupts */ 457bd37530eSRafal Jaworowski #ifdef DEVICE_POLLING 458bd37530eSRafal Jaworowski /* 459bd37530eSRafal Jaworowski * ...only if polling is not turned on. Disable interrupts explicitly 460bd37530eSRafal Jaworowski * if polling is enabled. 461bd37530eSRafal Jaworowski */ 462bd37530eSRafal Jaworowski if (ifp->if_capenable & IFCAP_POLLING ) 463bd37530eSRafal Jaworowski tsec_intrs_ctl(sc, 0); 464bd37530eSRafal Jaworowski else 465bd37530eSRafal Jaworowski #endif /* DEVICE_POLLING */ 46667196661SRafal Jaworowski tsec_intrs_ctl(sc, 1); 46767196661SRafal Jaworowski 46867196661SRafal Jaworowski /* Step 12: Initialize IADDRn */ 46967196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_IADDR0, 0); 47067196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_IADDR1, 0); 47167196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_IADDR2, 0); 47267196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_IADDR3, 0); 47367196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_IADDR4, 0); 47467196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_IADDR5, 0); 47567196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_IADDR6, 0); 47667196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_IADDR7, 0); 47767196661SRafal Jaworowski 47867196661SRafal Jaworowski /* Step 13: Initialize GADDRn */ 47967196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_GADDR0, 0); 48067196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_GADDR1, 0); 48167196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_GADDR2, 0); 48267196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_GADDR3, 0); 48367196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_GADDR4, 0); 48467196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_GADDR5, 0); 48567196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_GADDR6, 0); 48667196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_GADDR7, 0); 48767196661SRafal Jaworowski 48867196661SRafal Jaworowski /* Step 14: Initialize RCTRL */ 48967196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_RCTRL, 0); 49067196661SRafal Jaworowski 49167196661SRafal Jaworowski /* Step 15: Initialize DMACTRL */ 49267196661SRafal Jaworowski tsec_dma_ctl(sc, 1); 49367196661SRafal Jaworowski 49467196661SRafal Jaworowski /* Step 16: Initialize FIFO_PAUSE_CTRL */ 49567196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_FIFO_PAUSE_CTRL, TSEC_FIFO_PAUSE_CTRL_EN); 49667196661SRafal Jaworowski 49767196661SRafal Jaworowski /* 49867196661SRafal Jaworowski * Step 17: Initialize transmit/receive descriptor rings. 49967196661SRafal Jaworowski * Initialize TBASE and RBASE. 50067196661SRafal Jaworowski */ 50167196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_TBASE, sc->tsec_tx_raddr); 50267196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_RBASE, sc->tsec_rx_raddr); 50367196661SRafal Jaworowski 50467196661SRafal Jaworowski for (i = 0; i < TSEC_TX_NUM_DESC; i++) { 50567196661SRafal Jaworowski tx_desc[i].bufptr = 0; 50667196661SRafal Jaworowski tx_desc[i].length = 0; 50764f90c9dSRafal Jaworowski tx_desc[i].flags = ((i == TSEC_TX_NUM_DESC - 1) ? 50864f90c9dSRafal Jaworowski TSEC_TXBD_W : 0); 50967196661SRafal Jaworowski } 510321e12c8SRafal Jaworowski bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap, 511321e12c8SRafal Jaworowski BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 51267196661SRafal Jaworowski 51367196661SRafal Jaworowski for (i = 0; i < TSEC_RX_NUM_DESC; i++) { 51467196661SRafal Jaworowski rx_desc[i].bufptr = sc->rx_data[i].paddr; 51567196661SRafal Jaworowski rx_desc[i].length = 0; 51667196661SRafal Jaworowski rx_desc[i].flags = TSEC_RXBD_E | TSEC_RXBD_I | 51767196661SRafal Jaworowski ((i == TSEC_RX_NUM_DESC - 1) ? TSEC_RXBD_W : 0); 51867196661SRafal Jaworowski } 519bd37530eSRafal Jaworowski bus_dmamap_sync(sc->tsec_rx_dtag, sc->tsec_rx_dmap, 520bd37530eSRafal Jaworowski BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 52167196661SRafal Jaworowski 522bd37530eSRafal Jaworowski /* Step 18: Initialize the maximum receive buffer length */ 523bd37530eSRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MRBLR, MCLBYTES); 52467196661SRafal Jaworowski 525bd37530eSRafal Jaworowski /* Step 19: Configure ethernet frame sizes */ 526bd37530eSRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MINFLR, TSEC_MIN_FRAME_SIZE); 527bd37530eSRafal Jaworowski tsec_set_mtu(sc, ifp->if_mtu); 528bd37530eSRafal Jaworowski 529bd37530eSRafal Jaworowski /* Step 20: Enable Rx and RxBD sdata snooping */ 53067196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_ATTR, TSEC_ATTR_RDSEN | TSEC_ATTR_RBDSEN); 53167196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_ATTRELI, 0); 53267196661SRafal Jaworowski 533bd37530eSRafal Jaworowski /* Step 21: Reset collision counters in hardware */ 53467196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MON_TSCL, 0); 53567196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MON_TMCL, 0); 53667196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MON_TLCL, 0); 53767196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MON_TXCL, 0); 53867196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MON_TNCL, 0); 53967196661SRafal Jaworowski 540bd37530eSRafal Jaworowski /* Step 22: Mask all CAM interrupts */ 54167196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MON_CAM1, 0xffffffff); 54267196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MON_CAM2, 0xffffffff); 54367196661SRafal Jaworowski 544bd37530eSRafal Jaworowski /* Step 23: Enable Rx and Tx */ 54567196661SRafal Jaworowski val = TSEC_READ(sc, TSEC_REG_MACCFG1); 54667196661SRafal Jaworowski val |= (TSEC_MACCFG1_RX_EN | TSEC_MACCFG1_TX_EN); 54767196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MACCFG1, val); 54867196661SRafal Jaworowski 549bd37530eSRafal Jaworowski /* Step 24: Reset TSEC counters for Tx and Rx rings */ 55067196661SRafal Jaworowski TSEC_TX_RX_COUNTERS_INIT(sc); 55167196661SRafal Jaworowski 552bd37530eSRafal Jaworowski /* Step 25: Setup TCP/IP Off-Load engine */ 553bd37530eSRafal Jaworowski if (sc->is_etsec) 554bd37530eSRafal Jaworowski tsec_offload_setup(sc); 555bd37530eSRafal Jaworowski 556bd37530eSRafal Jaworowski /* Step 26: Setup multicast filters */ 557bd37530eSRafal Jaworowski tsec_setup_multicast(sc); 558bd37530eSRafal Jaworowski 559bd37530eSRafal Jaworowski /* Step 27: Activate network interface */ 56067196661SRafal Jaworowski ifp->if_drv_flags |= IFF_DRV_RUNNING; 56167196661SRafal Jaworowski ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 56267196661SRafal Jaworowski sc->tsec_if_flags = ifp->if_flags; 5635432bd9fSRafal Jaworowski sc->tsec_watchdog = 0; 564772619e1SRafal Jaworowski 565772619e1SRafal Jaworowski /* Schedule watchdog timeout */ 5665432bd9fSRafal Jaworowski callout_reset(&sc->tsec_callout, hz, tsec_tick, sc); 56767196661SRafal Jaworowski } 56867196661SRafal Jaworowski 56967196661SRafal Jaworowski static void 57067196661SRafal Jaworowski tsec_set_mac_address(struct tsec_softc *sc) 57167196661SRafal Jaworowski { 57267196661SRafal Jaworowski uint32_t macbuf[2] = { 0, 0 }; 57364f90c9dSRafal Jaworowski char *macbufp, *curmac; 574321e12c8SRafal Jaworowski int i; 57567196661SRafal Jaworowski 57667196661SRafal Jaworowski TSEC_GLOBAL_LOCK_ASSERT(sc); 57767196661SRafal Jaworowski 57867196661SRafal Jaworowski KASSERT((ETHER_ADDR_LEN <= sizeof(macbuf)), 57938f004fbSJustin Hibbits ("tsec_set_mac_address: (%d <= %zd", ETHER_ADDR_LEN, 58064f90c9dSRafal Jaworowski sizeof(macbuf))); 58167196661SRafal Jaworowski 58267196661SRafal Jaworowski macbufp = (char *)macbuf; 58367196661SRafal Jaworowski curmac = (char *)IF_LLADDR(sc->tsec_ifp); 58467196661SRafal Jaworowski 58567196661SRafal Jaworowski /* Correct order of MAC address bytes */ 58667196661SRafal Jaworowski for (i = 1; i <= ETHER_ADDR_LEN; i++) 58767196661SRafal Jaworowski macbufp[ETHER_ADDR_LEN-i] = curmac[i-1]; 58867196661SRafal Jaworowski 58967196661SRafal Jaworowski /* Initialize MAC station address MACSTNADDR2 and MACSTNADDR1 */ 59067196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MACSTNADDR2, macbuf[1]); 59167196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MACSTNADDR1, macbuf[0]); 59267196661SRafal Jaworowski } 59367196661SRafal Jaworowski 59467196661SRafal Jaworowski /* 59567196661SRafal Jaworowski * DMA control function, if argument state is: 59667196661SRafal Jaworowski * 0 - DMA engine will be disabled 59767196661SRafal Jaworowski * 1 - DMA engine will be enabled 59867196661SRafal Jaworowski */ 59967196661SRafal Jaworowski static void 60067196661SRafal Jaworowski tsec_dma_ctl(struct tsec_softc *sc, int state) 60167196661SRafal Jaworowski { 60267196661SRafal Jaworowski device_t dev; 60364f90c9dSRafal Jaworowski uint32_t dma_flags, timeout; 60467196661SRafal Jaworowski 60567196661SRafal Jaworowski dev = sc->dev; 60667196661SRafal Jaworowski 60767196661SRafal Jaworowski dma_flags = TSEC_READ(sc, TSEC_REG_DMACTRL); 60867196661SRafal Jaworowski 60967196661SRafal Jaworowski switch (state) { 61067196661SRafal Jaworowski case 0: 61167196661SRafal Jaworowski /* Temporarily clear stop graceful stop bits. */ 61267196661SRafal Jaworowski tsec_dma_ctl(sc, 1000); 61367196661SRafal Jaworowski 61467196661SRafal Jaworowski /* Set it again */ 61567196661SRafal Jaworowski dma_flags |= (TSEC_DMACTRL_GRS | TSEC_DMACTRL_GTS); 61667196661SRafal Jaworowski break; 61767196661SRafal Jaworowski case 1000: 61867196661SRafal Jaworowski case 1: 61967196661SRafal Jaworowski /* Set write with response (WWR), wait (WOP) and snoop bits */ 62067196661SRafal Jaworowski dma_flags |= (TSEC_DMACTRL_TDSEN | TSEC_DMACTRL_TBDSEN | 62167196661SRafal Jaworowski DMACTRL_WWR | DMACTRL_WOP); 62267196661SRafal Jaworowski 62367196661SRafal Jaworowski /* Clear graceful stop bits */ 62467196661SRafal Jaworowski dma_flags &= ~(TSEC_DMACTRL_GRS | TSEC_DMACTRL_GTS); 62567196661SRafal Jaworowski break; 62667196661SRafal Jaworowski default: 62767196661SRafal Jaworowski device_printf(dev, "tsec_dma_ctl(): unknown state value: %d\n", 62867196661SRafal Jaworowski state); 62967196661SRafal Jaworowski } 63067196661SRafal Jaworowski 63167196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_DMACTRL, dma_flags); 63267196661SRafal Jaworowski 63367196661SRafal Jaworowski switch (state) { 63467196661SRafal Jaworowski case 0: 63567196661SRafal Jaworowski /* Wait for DMA stop */ 63667196661SRafal Jaworowski timeout = TSEC_READ_RETRY; 63767196661SRafal Jaworowski while (--timeout && (!(TSEC_READ(sc, TSEC_REG_IEVENT) & 63867196661SRafal Jaworowski (TSEC_IEVENT_GRSC | TSEC_IEVENT_GTSC)))) 63967196661SRafal Jaworowski DELAY(TSEC_READ_DELAY); 64067196661SRafal Jaworowski 64167196661SRafal Jaworowski if (timeout == 0) 64267196661SRafal Jaworowski device_printf(dev, "tsec_dma_ctl(): timeout!\n"); 64367196661SRafal Jaworowski break; 64467196661SRafal Jaworowski case 1: 64567196661SRafal Jaworowski /* Restart transmission function */ 64667196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_TSTAT, TSEC_TSTAT_THLT); 64767196661SRafal Jaworowski } 64867196661SRafal Jaworowski } 64967196661SRafal Jaworowski 65067196661SRafal Jaworowski /* 65167196661SRafal Jaworowski * Interrupts control function, if argument state is: 65267196661SRafal Jaworowski * 0 - all TSEC interrupts will be masked 65367196661SRafal Jaworowski * 1 - all TSEC interrupts will be unmasked 65467196661SRafal Jaworowski */ 65567196661SRafal Jaworowski static void 65667196661SRafal Jaworowski tsec_intrs_ctl(struct tsec_softc *sc, int state) 65767196661SRafal Jaworowski { 65867196661SRafal Jaworowski device_t dev; 65967196661SRafal Jaworowski 66067196661SRafal Jaworowski dev = sc->dev; 66167196661SRafal Jaworowski 66267196661SRafal Jaworowski switch (state) { 66367196661SRafal Jaworowski case 0: 66467196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_IMASK, 0); 66567196661SRafal Jaworowski break; 66667196661SRafal Jaworowski case 1: 66764f90c9dSRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_IMASK, TSEC_IMASK_BREN | 66864f90c9dSRafal Jaworowski TSEC_IMASK_RXCEN | TSEC_IMASK_BSYEN | TSEC_IMASK_EBERREN | 66964f90c9dSRafal Jaworowski TSEC_IMASK_BTEN | TSEC_IMASK_TXEEN | TSEC_IMASK_TXBEN | 67064f90c9dSRafal Jaworowski TSEC_IMASK_TXFEN | TSEC_IMASK_XFUNEN | TSEC_IMASK_RXFEN); 67167196661SRafal Jaworowski break; 67267196661SRafal Jaworowski default: 67367196661SRafal Jaworowski device_printf(dev, "tsec_intrs_ctl(): unknown state value: %d\n", 67467196661SRafal Jaworowski state); 67567196661SRafal Jaworowski } 67667196661SRafal Jaworowski } 67767196661SRafal Jaworowski 67867196661SRafal Jaworowski static void 67967196661SRafal Jaworowski tsec_reset_mac(struct tsec_softc *sc) 68067196661SRafal Jaworowski { 68167196661SRafal Jaworowski uint32_t maccfg1_flags; 68267196661SRafal Jaworowski 68367196661SRafal Jaworowski /* Set soft reset bit */ 68467196661SRafal Jaworowski maccfg1_flags = TSEC_READ(sc, TSEC_REG_MACCFG1); 68567196661SRafal Jaworowski maccfg1_flags |= TSEC_MACCFG1_SOFT_RESET; 68667196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MACCFG1, maccfg1_flags); 68767196661SRafal Jaworowski 68867196661SRafal Jaworowski /* Clear soft reset bit */ 68967196661SRafal Jaworowski maccfg1_flags = TSEC_READ(sc, TSEC_REG_MACCFG1); 69067196661SRafal Jaworowski maccfg1_flags &= ~TSEC_MACCFG1_SOFT_RESET; 69167196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MACCFG1, maccfg1_flags); 69267196661SRafal Jaworowski } 69367196661SRafal Jaworowski 69467196661SRafal Jaworowski static void 695772619e1SRafal Jaworowski tsec_watchdog(struct tsec_softc *sc) 69667196661SRafal Jaworowski { 697772619e1SRafal Jaworowski struct ifnet *ifp; 69867196661SRafal Jaworowski 699772619e1SRafal Jaworowski TSEC_GLOBAL_LOCK_ASSERT(sc); 70067196661SRafal Jaworowski 7015432bd9fSRafal Jaworowski if (sc->tsec_watchdog == 0 || --sc->tsec_watchdog > 0) 702772619e1SRafal Jaworowski return; 703772619e1SRafal Jaworowski 704772619e1SRafal Jaworowski ifp = sc->tsec_ifp; 705c8dfaf38SGleb Smirnoff if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 70667196661SRafal Jaworowski if_printf(ifp, "watchdog timeout\n"); 70767196661SRafal Jaworowski 70867196661SRafal Jaworowski tsec_stop(sc); 70967196661SRafal Jaworowski tsec_init_locked(sc); 71067196661SRafal Jaworowski } 71167196661SRafal Jaworowski 71267196661SRafal Jaworowski static void 71367196661SRafal Jaworowski tsec_start(struct ifnet *ifp) 71467196661SRafal Jaworowski { 71567196661SRafal Jaworowski struct tsec_softc *sc = ifp->if_softc; 71667196661SRafal Jaworowski 71767196661SRafal Jaworowski TSEC_TRANSMIT_LOCK(sc); 71867196661SRafal Jaworowski tsec_start_locked(ifp); 71967196661SRafal Jaworowski TSEC_TRANSMIT_UNLOCK(sc); 72067196661SRafal Jaworowski } 72167196661SRafal Jaworowski 72267196661SRafal Jaworowski static void 72367196661SRafal Jaworowski tsec_start_locked(struct ifnet *ifp) 72467196661SRafal Jaworowski { 72567196661SRafal Jaworowski struct tsec_softc *sc; 7262c0dbbcbSJustin Hibbits struct mbuf *m0; 727bd37530eSRafal Jaworowski struct tsec_tx_fcb *tx_fcb; 7282c0dbbcbSJustin Hibbits int csum_flags; 7292c0dbbcbSJustin Hibbits int start_tx; 7302c0dbbcbSJustin Hibbits uint16_t fcb_flags; 73167196661SRafal Jaworowski 73267196661SRafal Jaworowski sc = ifp->if_softc; 7332c0dbbcbSJustin Hibbits start_tx = 0; 73467196661SRafal Jaworowski 73567196661SRafal Jaworowski TSEC_TRANSMIT_LOCK_ASSERT(sc); 73667196661SRafal Jaworowski 73767196661SRafal Jaworowski if (sc->tsec_link == 0) 73867196661SRafal Jaworowski return; 73967196661SRafal Jaworowski 74064f90c9dSRafal Jaworowski bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap, 74164f90c9dSRafal Jaworowski BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 74267196661SRafal Jaworowski 7432c0dbbcbSJustin Hibbits for (;;) { 7442c0dbbcbSJustin Hibbits if (TSEC_FREE_TX_DESC(sc) < TSEC_TX_MAX_DMA_SEGS) { 7452c0dbbcbSJustin Hibbits /* No free descriptors */ 7462c0dbbcbSJustin Hibbits ifp->if_drv_flags |= IFF_DRV_OACTIVE; 7472c0dbbcbSJustin Hibbits break; 7482c0dbbcbSJustin Hibbits } 7492c0dbbcbSJustin Hibbits 75067196661SRafal Jaworowski /* Get packet from the queue */ 75104311706SRafal Jaworowski IFQ_DRV_DEQUEUE(&ifp->if_snd, m0); 75267196661SRafal Jaworowski if (m0 == NULL) 75367196661SRafal Jaworowski break; 75467196661SRafal Jaworowski 755bd37530eSRafal Jaworowski /* Insert TCP/IP Off-load frame control block */ 7562c0dbbcbSJustin Hibbits fcb_flags = 0; 757bd37530eSRafal Jaworowski csum_flags = m0->m_pkthdr.csum_flags; 758bd37530eSRafal Jaworowski if (csum_flags) { 759c6499eccSGleb Smirnoff M_PREPEND(m0, sizeof(struct tsec_tx_fcb), M_NOWAIT); 760bd37530eSRafal Jaworowski if (m0 == NULL) 761bd37530eSRafal Jaworowski break; 762bd37530eSRafal Jaworowski 763bd37530eSRafal Jaworowski if (csum_flags & CSUM_IP) 7642c0dbbcbSJustin Hibbits fcb_flags |= TSEC_TX_FCB_IP4 | 765bd37530eSRafal Jaworowski TSEC_TX_FCB_CSUM_IP; 766bd37530eSRafal Jaworowski 767bd37530eSRafal Jaworowski if (csum_flags & CSUM_TCP) 7682c0dbbcbSJustin Hibbits fcb_flags |= TSEC_TX_FCB_TCP | 769bd37530eSRafal Jaworowski TSEC_TX_FCB_CSUM_TCP_UDP; 770bd37530eSRafal Jaworowski 771bd37530eSRafal Jaworowski if (csum_flags & CSUM_UDP) 7722c0dbbcbSJustin Hibbits fcb_flags |= TSEC_TX_FCB_UDP | 773bd37530eSRafal Jaworowski TSEC_TX_FCB_CSUM_TCP_UDP; 774bd37530eSRafal Jaworowski 7752c0dbbcbSJustin Hibbits tx_fcb = mtod(m0, struct tsec_tx_fcb *); 7762c0dbbcbSJustin Hibbits tx_fcb->flags = fcb_flags; 7772c0dbbcbSJustin Hibbits tx_fcb->l3_offset = ETHER_HDR_LEN; 7782c0dbbcbSJustin Hibbits tx_fcb->l4_offset = sizeof(struct ip); 779bd37530eSRafal Jaworowski } 780bd37530eSRafal Jaworowski 7812c0dbbcbSJustin Hibbits tsec_encap(ifp, sc, m0, fcb_flags, &start_tx); 78267196661SRafal Jaworowski } 78364f90c9dSRafal Jaworowski bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap, 78464f90c9dSRafal Jaworowski BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 78567196661SRafal Jaworowski 7862c0dbbcbSJustin Hibbits if (start_tx) { 78767196661SRafal Jaworowski /* Enable transmitter and watchdog timer */ 78867196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_TSTAT, TSEC_TSTAT_THLT); 7895432bd9fSRafal Jaworowski sc->tsec_watchdog = 5; 79067196661SRafal Jaworowski } 79167196661SRafal Jaworowski } 79267196661SRafal Jaworowski 7932c0dbbcbSJustin Hibbits static void 7942c0dbbcbSJustin Hibbits tsec_encap(struct ifnet *ifp, struct tsec_softc *sc, struct mbuf *m0, 7952c0dbbcbSJustin Hibbits uint16_t fcb_flags, int *start_tx) 79667196661SRafal Jaworowski { 7972c0dbbcbSJustin Hibbits bus_dma_segment_t segs[TSEC_TX_MAX_DMA_SEGS]; 7982c0dbbcbSJustin Hibbits int error, i, nsegs; 7992c0dbbcbSJustin Hibbits struct tsec_bufmap *tx_bufmap; 8002c0dbbcbSJustin Hibbits uint32_t tx_idx; 8012c0dbbcbSJustin Hibbits uint16_t flags; 80267196661SRafal Jaworowski 80367196661SRafal Jaworowski TSEC_TRANSMIT_LOCK_ASSERT(sc); 80467196661SRafal Jaworowski 8052c0dbbcbSJustin Hibbits tx_idx = sc->tx_idx_head; 8062c0dbbcbSJustin Hibbits tx_bufmap = &sc->tx_bufmap[tx_idx]; 80767196661SRafal Jaworowski 80867196661SRafal Jaworowski /* Create mapping in DMA memory */ 8092c0dbbcbSJustin Hibbits error = bus_dmamap_load_mbuf_sg(sc->tsec_tx_mtag, tx_bufmap->map, m0, 8102c0dbbcbSJustin Hibbits segs, &nsegs, BUS_DMA_NOWAIT); 8112c0dbbcbSJustin Hibbits if (error == EFBIG) { 8122c0dbbcbSJustin Hibbits /* Too many segments! Defrag and try again. */ 8132c0dbbcbSJustin Hibbits struct mbuf *m = m_defrag(m0, M_NOWAIT); 8142c0dbbcbSJustin Hibbits 8152c0dbbcbSJustin Hibbits if (m == NULL) { 8162c0dbbcbSJustin Hibbits m_freem(m0); 8172c0dbbcbSJustin Hibbits return; 81867196661SRafal Jaworowski } 8192c0dbbcbSJustin Hibbits m0 = m; 8202c0dbbcbSJustin Hibbits error = bus_dmamap_load_mbuf_sg(sc->tsec_tx_mtag, 8212c0dbbcbSJustin Hibbits tx_bufmap->map, m0, segs, &nsegs, BUS_DMA_NOWAIT); 8222c0dbbcbSJustin Hibbits } 8232c0dbbcbSJustin Hibbits if (error != 0) { 8242c0dbbcbSJustin Hibbits /* Give up. */ 8252c0dbbcbSJustin Hibbits m_freem(m0); 8262c0dbbcbSJustin Hibbits return; 8272c0dbbcbSJustin Hibbits } 82867196661SRafal Jaworowski 8292c0dbbcbSJustin Hibbits bus_dmamap_sync(sc->tsec_tx_mtag, tx_bufmap->map, 8302c0dbbcbSJustin Hibbits BUS_DMASYNC_PREWRITE); 8312c0dbbcbSJustin Hibbits tx_bufmap->mbuf = m0; 83267196661SRafal Jaworowski 8332c0dbbcbSJustin Hibbits /* 8342c0dbbcbSJustin Hibbits * Fill in the TX descriptors back to front so that READY bit in first 8352c0dbbcbSJustin Hibbits * descriptor is set last. 8362c0dbbcbSJustin Hibbits */ 8372c0dbbcbSJustin Hibbits tx_idx = (tx_idx + (uint32_t)nsegs) & (TSEC_TX_NUM_DESC - 1); 8382c0dbbcbSJustin Hibbits sc->tx_idx_head = tx_idx; 8392c0dbbcbSJustin Hibbits flags = TSEC_TXBD_L | TSEC_TXBD_I | TSEC_TXBD_R | TSEC_TXBD_TC; 8402c0dbbcbSJustin Hibbits for (i = nsegs - 1; i >= 0; i--) { 8412c0dbbcbSJustin Hibbits struct tsec_desc *tx_desc; 842bd37530eSRafal Jaworowski 8432c0dbbcbSJustin Hibbits tx_idx = (tx_idx - 1) & (TSEC_TX_NUM_DESC - 1); 8442c0dbbcbSJustin Hibbits tx_desc = &sc->tsec_tx_vaddr[tx_idx]; 8452c0dbbcbSJustin Hibbits tx_desc->length = segs[i].ds_len; 8462c0dbbcbSJustin Hibbits tx_desc->bufptr = segs[i].ds_addr; 84767196661SRafal Jaworowski 8482c0dbbcbSJustin Hibbits if (i == 0) { 8492c0dbbcbSJustin Hibbits wmb(); 8502c0dbbcbSJustin Hibbits 8512c0dbbcbSJustin Hibbits if (fcb_flags != 0) 8522c0dbbcbSJustin Hibbits flags |= TSEC_TXBD_TOE; 8532c0dbbcbSJustin Hibbits } 85467196661SRafal Jaworowski 855bd37530eSRafal Jaworowski /* 856bd37530eSRafal Jaworowski * Set flags: 857bd37530eSRafal Jaworowski * - wrap 858bd37530eSRafal Jaworowski * - checksum 859bd37530eSRafal Jaworowski * - ready to send 860bd37530eSRafal Jaworowski * - transmit the CRC sequence after the last data byte 861bd37530eSRafal Jaworowski * - interrupt after the last buffer 862bd37530eSRafal Jaworowski */ 8632c0dbbcbSJustin Hibbits tx_desc->flags = (tx_idx == (TSEC_TX_NUM_DESC - 1) ? 8642c0dbbcbSJustin Hibbits TSEC_TXBD_W : 0) | flags; 8652c0dbbcbSJustin Hibbits 8662c0dbbcbSJustin Hibbits flags &= ~(TSEC_TXBD_L | TSEC_TXBD_I); 86767196661SRafal Jaworowski } 86867196661SRafal Jaworowski 8692c0dbbcbSJustin Hibbits BPF_MTAP(ifp, m0); 8702c0dbbcbSJustin Hibbits *start_tx = 1; 87167196661SRafal Jaworowski } 87267196661SRafal Jaworowski 87367196661SRafal Jaworowski static void 87467196661SRafal Jaworowski tsec_setfilter(struct tsec_softc *sc) 87567196661SRafal Jaworowski { 87667196661SRafal Jaworowski struct ifnet *ifp; 87767196661SRafal Jaworowski uint32_t flags; 87867196661SRafal Jaworowski 87967196661SRafal Jaworowski ifp = sc->tsec_ifp; 88067196661SRafal Jaworowski flags = TSEC_READ(sc, TSEC_REG_RCTRL); 88167196661SRafal Jaworowski 88267196661SRafal Jaworowski /* Promiscuous mode */ 88367196661SRafal Jaworowski if (ifp->if_flags & IFF_PROMISC) 88467196661SRafal Jaworowski flags |= TSEC_RCTRL_PROM; 88567196661SRafal Jaworowski else 88667196661SRafal Jaworowski flags &= ~TSEC_RCTRL_PROM; 88767196661SRafal Jaworowski 88867196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_RCTRL, flags); 88967196661SRafal Jaworowski } 89067196661SRafal Jaworowski 891bd37530eSRafal Jaworowski #ifdef DEVICE_POLLING 892bd37530eSRafal Jaworowski static poll_handler_t tsec_poll; 893bd37530eSRafal Jaworowski 8941abcdbd1SAttilio Rao static int 895bd37530eSRafal Jaworowski tsec_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 896bd37530eSRafal Jaworowski { 897bd37530eSRafal Jaworowski uint32_t ie; 898bd37530eSRafal Jaworowski struct tsec_softc *sc = ifp->if_softc; 8991abcdbd1SAttilio Rao int rx_npkts; 9001abcdbd1SAttilio Rao 9011abcdbd1SAttilio Rao rx_npkts = 0; 902bd37530eSRafal Jaworowski 903bd37530eSRafal Jaworowski TSEC_GLOBAL_LOCK(sc); 904bd37530eSRafal Jaworowski if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 905bd37530eSRafal Jaworowski TSEC_GLOBAL_UNLOCK(sc); 9061abcdbd1SAttilio Rao return (rx_npkts); 907bd37530eSRafal Jaworowski } 908bd37530eSRafal Jaworowski 909bd37530eSRafal Jaworowski if (cmd == POLL_AND_CHECK_STATUS) { 9100390701aSRafal Jaworowski tsec_error_intr_locked(sc, count); 911bd37530eSRafal Jaworowski 912bd37530eSRafal Jaworowski /* Clear all events reported */ 9130390701aSRafal Jaworowski ie = TSEC_READ(sc, TSEC_REG_IEVENT); 914bd37530eSRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_IEVENT, ie); 915bd37530eSRafal Jaworowski } 916bd37530eSRafal Jaworowski 917bd37530eSRafal Jaworowski tsec_transmit_intr_locked(sc); 918bd37530eSRafal Jaworowski 919bd37530eSRafal Jaworowski TSEC_GLOBAL_TO_RECEIVE_LOCK(sc); 920bd37530eSRafal Jaworowski 9211abcdbd1SAttilio Rao rx_npkts = tsec_receive_intr_locked(sc, count); 922bd37530eSRafal Jaworowski 923bd37530eSRafal Jaworowski TSEC_RECEIVE_UNLOCK(sc); 9241abcdbd1SAttilio Rao 9251abcdbd1SAttilio Rao return (rx_npkts); 926bd37530eSRafal Jaworowski } 927bd37530eSRafal Jaworowski #endif /* DEVICE_POLLING */ 928bd37530eSRafal Jaworowski 92967196661SRafal Jaworowski static int 93067196661SRafal Jaworowski tsec_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 93167196661SRafal Jaworowski { 93267196661SRafal Jaworowski struct tsec_softc *sc = ifp->if_softc; 93367196661SRafal Jaworowski struct ifreq *ifr = (struct ifreq *)data; 934bd37530eSRafal Jaworowski int mask, error = 0; 93567196661SRafal Jaworowski 93667196661SRafal Jaworowski switch (command) { 937bd37530eSRafal Jaworowski case SIOCSIFMTU: 938bd37530eSRafal Jaworowski TSEC_GLOBAL_LOCK(sc); 939bd37530eSRafal Jaworowski if (tsec_set_mtu(sc, ifr->ifr_mtu)) 940bd37530eSRafal Jaworowski ifp->if_mtu = ifr->ifr_mtu; 941bd37530eSRafal Jaworowski else 942bd37530eSRafal Jaworowski error = EINVAL; 943bd37530eSRafal Jaworowski TSEC_GLOBAL_UNLOCK(sc); 944bd37530eSRafal Jaworowski break; 94567196661SRafal Jaworowski case SIOCSIFFLAGS: 94667196661SRafal Jaworowski TSEC_GLOBAL_LOCK(sc); 94767196661SRafal Jaworowski if (ifp->if_flags & IFF_UP) { 94867196661SRafal Jaworowski if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 949bd37530eSRafal Jaworowski if ((sc->tsec_if_flags ^ ifp->if_flags) & 950bd37530eSRafal Jaworowski IFF_PROMISC) 95167196661SRafal Jaworowski tsec_setfilter(sc); 952bd37530eSRafal Jaworowski 953bd37530eSRafal Jaworowski if ((sc->tsec_if_flags ^ ifp->if_flags) & 954bd37530eSRafal Jaworowski IFF_ALLMULTI) 955bd37530eSRafal Jaworowski tsec_setup_multicast(sc); 95667196661SRafal Jaworowski } else 95767196661SRafal Jaworowski tsec_init_locked(sc); 958321e12c8SRafal Jaworowski } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) 95967196661SRafal Jaworowski tsec_stop(sc); 960321e12c8SRafal Jaworowski 96167196661SRafal Jaworowski sc->tsec_if_flags = ifp->if_flags; 96267196661SRafal Jaworowski TSEC_GLOBAL_UNLOCK(sc); 96367196661SRafal Jaworowski break; 964bd37530eSRafal Jaworowski case SIOCADDMULTI: 965bd37530eSRafal Jaworowski case SIOCDELMULTI: 966bd37530eSRafal Jaworowski if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 967bd37530eSRafal Jaworowski TSEC_GLOBAL_LOCK(sc); 968bd37530eSRafal Jaworowski tsec_setup_multicast(sc); 969bd37530eSRafal Jaworowski TSEC_GLOBAL_UNLOCK(sc); 970bd37530eSRafal Jaworowski } 97167196661SRafal Jaworowski case SIOCGIFMEDIA: 97267196661SRafal Jaworowski case SIOCSIFMEDIA: 973bd37530eSRafal Jaworowski error = ifmedia_ioctl(ifp, ifr, &sc->tsec_mii->mii_media, 974bd37530eSRafal Jaworowski command); 97567196661SRafal Jaworowski break; 976bd37530eSRafal Jaworowski case SIOCSIFCAP: 977bd37530eSRafal Jaworowski mask = ifp->if_capenable ^ ifr->ifr_reqcap; 978bd37530eSRafal Jaworowski if ((mask & IFCAP_HWCSUM) && sc->is_etsec) { 979bd37530eSRafal Jaworowski TSEC_GLOBAL_LOCK(sc); 980bd37530eSRafal Jaworowski ifp->if_capenable &= ~IFCAP_HWCSUM; 981bd37530eSRafal Jaworowski ifp->if_capenable |= IFCAP_HWCSUM & ifr->ifr_reqcap; 982bd37530eSRafal Jaworowski tsec_offload_setup(sc); 983bd37530eSRafal Jaworowski TSEC_GLOBAL_UNLOCK(sc); 984bd37530eSRafal Jaworowski } 985bd37530eSRafal Jaworowski #ifdef DEVICE_POLLING 986bd37530eSRafal Jaworowski if (mask & IFCAP_POLLING) { 987bd37530eSRafal Jaworowski if (ifr->ifr_reqcap & IFCAP_POLLING) { 988bd37530eSRafal Jaworowski error = ether_poll_register(tsec_poll, ifp); 989bd37530eSRafal Jaworowski if (error) 990bd37530eSRafal Jaworowski return (error); 991bd37530eSRafal Jaworowski 992bd37530eSRafal Jaworowski TSEC_GLOBAL_LOCK(sc); 993bd37530eSRafal Jaworowski /* Disable interrupts */ 994bd37530eSRafal Jaworowski tsec_intrs_ctl(sc, 0); 995bd37530eSRafal Jaworowski ifp->if_capenable |= IFCAP_POLLING; 996bd37530eSRafal Jaworowski TSEC_GLOBAL_UNLOCK(sc); 997bd37530eSRafal Jaworowski } else { 998bd37530eSRafal Jaworowski error = ether_poll_deregister(ifp); 999bd37530eSRafal Jaworowski TSEC_GLOBAL_LOCK(sc); 1000bd37530eSRafal Jaworowski /* Enable interrupts */ 1001bd37530eSRafal Jaworowski tsec_intrs_ctl(sc, 1); 1002bd37530eSRafal Jaworowski ifp->if_capenable &= ~IFCAP_POLLING; 1003bd37530eSRafal Jaworowski TSEC_GLOBAL_UNLOCK(sc); 1004bd37530eSRafal Jaworowski } 1005bd37530eSRafal Jaworowski } 1006bd37530eSRafal Jaworowski #endif 1007bd37530eSRafal Jaworowski break; 1008bd37530eSRafal Jaworowski 100967196661SRafal Jaworowski default: 101067196661SRafal Jaworowski error = ether_ioctl(ifp, command, data); 101167196661SRafal Jaworowski } 101267196661SRafal Jaworowski 101367196661SRafal Jaworowski /* Flush buffers if not empty */ 101467196661SRafal Jaworowski if (ifp->if_flags & IFF_UP) 101567196661SRafal Jaworowski tsec_start(ifp); 101667196661SRafal Jaworowski return (error); 101767196661SRafal Jaworowski } 101867196661SRafal Jaworowski 101967196661SRafal Jaworowski static int 102067196661SRafal Jaworowski tsec_ifmedia_upd(struct ifnet *ifp) 102167196661SRafal Jaworowski { 102267196661SRafal Jaworowski struct tsec_softc *sc = ifp->if_softc; 102367196661SRafal Jaworowski struct mii_data *mii; 102467196661SRafal Jaworowski 102567196661SRafal Jaworowski TSEC_TRANSMIT_LOCK(sc); 102667196661SRafal Jaworowski 102767196661SRafal Jaworowski mii = sc->tsec_mii; 102867196661SRafal Jaworowski mii_mediachg(mii); 102967196661SRafal Jaworowski 103067196661SRafal Jaworowski TSEC_TRANSMIT_UNLOCK(sc); 103167196661SRafal Jaworowski return (0); 103267196661SRafal Jaworowski } 103367196661SRafal Jaworowski 103467196661SRafal Jaworowski static void 103567196661SRafal Jaworowski tsec_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 103667196661SRafal Jaworowski { 103767196661SRafal Jaworowski struct tsec_softc *sc = ifp->if_softc; 103867196661SRafal Jaworowski struct mii_data *mii; 103967196661SRafal Jaworowski 104067196661SRafal Jaworowski TSEC_TRANSMIT_LOCK(sc); 104167196661SRafal Jaworowski 104267196661SRafal Jaworowski mii = sc->tsec_mii; 104367196661SRafal Jaworowski mii_pollstat(mii); 104467196661SRafal Jaworowski 104567196661SRafal Jaworowski ifmr->ifm_active = mii->mii_media_active; 104667196661SRafal Jaworowski ifmr->ifm_status = mii->mii_media_status; 104767196661SRafal Jaworowski 104867196661SRafal Jaworowski TSEC_TRANSMIT_UNLOCK(sc); 104967196661SRafal Jaworowski } 105067196661SRafal Jaworowski 105167196661SRafal Jaworowski static int 105267196661SRafal Jaworowski tsec_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **mbufp, 105367196661SRafal Jaworowski uint32_t *paddr) 105467196661SRafal Jaworowski { 105567196661SRafal Jaworowski struct mbuf *new_mbuf; 105667196661SRafal Jaworowski bus_dma_segment_t seg[1]; 1057bd37530eSRafal Jaworowski int error, nsegs; 105867196661SRafal Jaworowski 105967196661SRafal Jaworowski KASSERT(mbufp != NULL, ("NULL mbuf pointer!")); 106067196661SRafal Jaworowski 1061c6499eccSGleb Smirnoff new_mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MCLBYTES); 106267196661SRafal Jaworowski if (new_mbuf == NULL) 106367196661SRafal Jaworowski return (ENOBUFS); 106467196661SRafal Jaworowski new_mbuf->m_len = new_mbuf->m_pkthdr.len = new_mbuf->m_ext.ext_size; 106567196661SRafal Jaworowski 106667196661SRafal Jaworowski if (*mbufp) { 106767196661SRafal Jaworowski bus_dmamap_sync(tag, map, BUS_DMASYNC_POSTREAD); 106867196661SRafal Jaworowski bus_dmamap_unload(tag, map); 106967196661SRafal Jaworowski } 107067196661SRafal Jaworowski 107167196661SRafal Jaworowski error = bus_dmamap_load_mbuf_sg(tag, map, new_mbuf, seg, &nsegs, 107267196661SRafal Jaworowski BUS_DMA_NOWAIT); 107367196661SRafal Jaworowski KASSERT(nsegs == 1, ("Too many segments returned!")); 107467196661SRafal Jaworowski if (nsegs != 1 || error) 107567196661SRafal Jaworowski panic("tsec_new_rxbuf(): nsegs(%d), error(%d)", nsegs, error); 107667196661SRafal Jaworowski 107767196661SRafal Jaworowski #if 0 107867196661SRafal Jaworowski if (error) { 107967196661SRafal Jaworowski printf("tsec: bus_dmamap_load_mbuf_sg() returned: %d!\n", 108067196661SRafal Jaworowski error); 108167196661SRafal Jaworowski m_freem(new_mbuf); 108267196661SRafal Jaworowski return (ENOBUFS); 108367196661SRafal Jaworowski } 108467196661SRafal Jaworowski #endif 108567196661SRafal Jaworowski 108667196661SRafal Jaworowski #if 0 108767196661SRafal Jaworowski KASSERT(((seg->ds_addr) & (TSEC_RXBUFFER_ALIGNMENT-1)) == 0, 108867196661SRafal Jaworowski ("Wrong alignment of RX buffer!")); 108967196661SRafal Jaworowski #endif 109067196661SRafal Jaworowski bus_dmamap_sync(tag, map, BUS_DMASYNC_PREREAD); 109167196661SRafal Jaworowski 109267196661SRafal Jaworowski (*mbufp) = new_mbuf; 109367196661SRafal Jaworowski (*paddr) = seg->ds_addr; 109467196661SRafal Jaworowski return (0); 109567196661SRafal Jaworowski } 109667196661SRafal Jaworowski 109767196661SRafal Jaworowski static void 109867196661SRafal Jaworowski tsec_map_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 109967196661SRafal Jaworowski { 110067196661SRafal Jaworowski u_int32_t *paddr; 110167196661SRafal Jaworowski 110267196661SRafal Jaworowski KASSERT(nseg == 1, ("wrong number of segments, should be 1")); 110367196661SRafal Jaworowski paddr = arg; 110467196661SRafal Jaworowski *paddr = segs->ds_addr; 110567196661SRafal Jaworowski } 110667196661SRafal Jaworowski 110767196661SRafal Jaworowski static int 110867196661SRafal Jaworowski tsec_alloc_dma_desc(device_t dev, bus_dma_tag_t *dtag, bus_dmamap_t *dmap, 110967196661SRafal Jaworowski bus_size_t dsize, void **vaddr, void *raddr, const char *dname) 111067196661SRafal Jaworowski { 111167196661SRafal Jaworowski int error; 111267196661SRafal Jaworowski 111367196661SRafal Jaworowski /* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */ 111467196661SRafal Jaworowski error = bus_dma_tag_create(NULL, /* parent */ 111567196661SRafal Jaworowski PAGE_SIZE, 0, /* alignment, boundary */ 111667196661SRafal Jaworowski BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 111767196661SRafal Jaworowski BUS_SPACE_MAXADDR, /* highaddr */ 111867196661SRafal Jaworowski NULL, NULL, /* filtfunc, filtfuncarg */ 111967196661SRafal Jaworowski dsize, 1, /* maxsize, nsegments */ 112067196661SRafal Jaworowski dsize, 0, /* maxsegsz, flags */ 112167196661SRafal Jaworowski NULL, NULL, /* lockfunc, lockfuncarg */ 112267196661SRafal Jaworowski dtag); /* dmat */ 112367196661SRafal Jaworowski 112467196661SRafal Jaworowski if (error) { 112564f90c9dSRafal Jaworowski device_printf(dev, "failed to allocate busdma %s tag\n", 112664f90c9dSRafal Jaworowski dname); 112767196661SRafal Jaworowski (*vaddr) = NULL; 112867196661SRafal Jaworowski return (ENXIO); 112967196661SRafal Jaworowski } 113067196661SRafal Jaworowski 113167196661SRafal Jaworowski error = bus_dmamem_alloc(*dtag, vaddr, BUS_DMA_NOWAIT | BUS_DMA_ZERO, 113267196661SRafal Jaworowski dmap); 113367196661SRafal Jaworowski if (error) { 113467196661SRafal Jaworowski device_printf(dev, "failed to allocate %s DMA safe memory\n", 113567196661SRafal Jaworowski dname); 113667196661SRafal Jaworowski bus_dma_tag_destroy(*dtag); 113767196661SRafal Jaworowski (*vaddr) = NULL; 113867196661SRafal Jaworowski return (ENXIO); 113967196661SRafal Jaworowski } 114067196661SRafal Jaworowski 114164f90c9dSRafal Jaworowski error = bus_dmamap_load(*dtag, *dmap, *vaddr, dsize, 114264f90c9dSRafal Jaworowski tsec_map_dma_addr, raddr, BUS_DMA_NOWAIT); 114367196661SRafal Jaworowski if (error) { 114464f90c9dSRafal Jaworowski device_printf(dev, "cannot get address of the %s " 114564f90c9dSRafal Jaworowski "descriptors\n", dname); 114667196661SRafal Jaworowski bus_dmamem_free(*dtag, *vaddr, *dmap); 114767196661SRafal Jaworowski bus_dma_tag_destroy(*dtag); 114867196661SRafal Jaworowski (*vaddr) = NULL; 114967196661SRafal Jaworowski return (ENXIO); 115067196661SRafal Jaworowski } 115167196661SRafal Jaworowski 115267196661SRafal Jaworowski return (0); 115367196661SRafal Jaworowski } 115467196661SRafal Jaworowski 115567196661SRafal Jaworowski static void 115667196661SRafal Jaworowski tsec_free_dma_desc(bus_dma_tag_t dtag, bus_dmamap_t dmap, void *vaddr) 115767196661SRafal Jaworowski { 115867196661SRafal Jaworowski 115967196661SRafal Jaworowski if (vaddr == NULL) 116067196661SRafal Jaworowski return; 116167196661SRafal Jaworowski 116267196661SRafal Jaworowski /* Unmap descriptors from DMA memory */ 116364f90c9dSRafal Jaworowski bus_dmamap_sync(dtag, dmap, BUS_DMASYNC_POSTREAD | 116464f90c9dSRafal Jaworowski BUS_DMASYNC_POSTWRITE); 116567196661SRafal Jaworowski bus_dmamap_unload(dtag, dmap); 116667196661SRafal Jaworowski 116767196661SRafal Jaworowski /* Free descriptors memory */ 116867196661SRafal Jaworowski bus_dmamem_free(dtag, vaddr, dmap); 116967196661SRafal Jaworowski 117067196661SRafal Jaworowski /* Destroy descriptors tag */ 117167196661SRafal Jaworowski bus_dma_tag_destroy(dtag); 117267196661SRafal Jaworowski } 117367196661SRafal Jaworowski 117467196661SRafal Jaworowski static void 117567196661SRafal Jaworowski tsec_free_dma(struct tsec_softc *sc) 117667196661SRafal Jaworowski { 117767196661SRafal Jaworowski int i; 117867196661SRafal Jaworowski 117967196661SRafal Jaworowski /* Free TX maps */ 118067196661SRafal Jaworowski for (i = 0; i < TSEC_TX_NUM_DESC; i++) 11812c0dbbcbSJustin Hibbits if (sc->tx_bufmap[i].map_initialized) 118264f90c9dSRafal Jaworowski bus_dmamap_destroy(sc->tsec_tx_mtag, 11832c0dbbcbSJustin Hibbits sc->tx_bufmap[i].map); 118464f90c9dSRafal Jaworowski /* Destroy tag for TX mbufs */ 118567196661SRafal Jaworowski bus_dma_tag_destroy(sc->tsec_tx_mtag); 118667196661SRafal Jaworowski 118767196661SRafal Jaworowski /* Free RX mbufs and maps */ 118867196661SRafal Jaworowski for (i = 0; i < TSEC_RX_NUM_DESC; i++) { 118967196661SRafal Jaworowski if (sc->rx_data[i].mbuf) { 119067196661SRafal Jaworowski /* Unload buffer from DMA */ 119167196661SRafal Jaworowski bus_dmamap_sync(sc->tsec_rx_mtag, sc->rx_data[i].map, 119267196661SRafal Jaworowski BUS_DMASYNC_POSTREAD); 119364f90c9dSRafal Jaworowski bus_dmamap_unload(sc->tsec_rx_mtag, 119464f90c9dSRafal Jaworowski sc->rx_data[i].map); 119567196661SRafal Jaworowski 119667196661SRafal Jaworowski /* Free buffer */ 119767196661SRafal Jaworowski m_freem(sc->rx_data[i].mbuf); 119867196661SRafal Jaworowski } 119967196661SRafal Jaworowski /* Destroy map for this buffer */ 120067196661SRafal Jaworowski if (sc->rx_data[i].map != NULL) 120167196661SRafal Jaworowski bus_dmamap_destroy(sc->tsec_rx_mtag, 120267196661SRafal Jaworowski sc->rx_data[i].map); 120367196661SRafal Jaworowski } 120464f90c9dSRafal Jaworowski /* Destroy tag for RX mbufs */ 120567196661SRafal Jaworowski bus_dma_tag_destroy(sc->tsec_rx_mtag); 120667196661SRafal Jaworowski 120767196661SRafal Jaworowski /* Unload TX/RX descriptors */ 120867196661SRafal Jaworowski tsec_free_dma_desc(sc->tsec_tx_dtag, sc->tsec_tx_dmap, 120967196661SRafal Jaworowski sc->tsec_tx_vaddr); 121067196661SRafal Jaworowski tsec_free_dma_desc(sc->tsec_rx_dtag, sc->tsec_rx_dmap, 121167196661SRafal Jaworowski sc->tsec_rx_vaddr); 121267196661SRafal Jaworowski } 121367196661SRafal Jaworowski 121467196661SRafal Jaworowski static void 121567196661SRafal Jaworowski tsec_stop(struct tsec_softc *sc) 121667196661SRafal Jaworowski { 121767196661SRafal Jaworowski struct ifnet *ifp; 121867196661SRafal Jaworowski uint32_t tmpval; 121967196661SRafal Jaworowski 122067196661SRafal Jaworowski TSEC_GLOBAL_LOCK_ASSERT(sc); 122167196661SRafal Jaworowski 122267196661SRafal Jaworowski ifp = sc->tsec_ifp; 122367196661SRafal Jaworowski 122467196661SRafal Jaworowski /* Disable interface and watchdog timer */ 122564f90c9dSRafal Jaworowski callout_stop(&sc->tsec_callout); 122667196661SRafal Jaworowski ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 12275432bd9fSRafal Jaworowski sc->tsec_watchdog = 0; 122867196661SRafal Jaworowski 122967196661SRafal Jaworowski /* Disable all interrupts and stop DMA */ 123067196661SRafal Jaworowski tsec_intrs_ctl(sc, 0); 123167196661SRafal Jaworowski tsec_dma_ctl(sc, 0); 123267196661SRafal Jaworowski 123367196661SRafal Jaworowski /* Remove pending data from TX queue */ 12342c0dbbcbSJustin Hibbits while (sc->tx_idx_tail != sc->tx_idx_head) { 12352c0dbbcbSJustin Hibbits bus_dmamap_sync(sc->tsec_tx_mtag, 12362c0dbbcbSJustin Hibbits sc->tx_bufmap[sc->tx_idx_tail].map, 1237bd37530eSRafal Jaworowski BUS_DMASYNC_POSTWRITE); 12382c0dbbcbSJustin Hibbits bus_dmamap_unload(sc->tsec_tx_mtag, 12392c0dbbcbSJustin Hibbits sc->tx_bufmap[sc->tx_idx_tail].map); 12402c0dbbcbSJustin Hibbits m_freem(sc->tx_bufmap[sc->tx_idx_tail].mbuf); 12412c0dbbcbSJustin Hibbits sc->tx_idx_tail = (sc->tx_idx_tail + 1) 12422c0dbbcbSJustin Hibbits & (TSEC_TX_NUM_DESC - 1); 124367196661SRafal Jaworowski } 124467196661SRafal Jaworowski 1245bd37530eSRafal Jaworowski /* Disable RX and TX */ 124667196661SRafal Jaworowski tmpval = TSEC_READ(sc, TSEC_REG_MACCFG1); 124767196661SRafal Jaworowski tmpval &= ~(TSEC_MACCFG1_RX_EN | TSEC_MACCFG1_TX_EN); 124867196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MACCFG1, tmpval); 124967196661SRafal Jaworowski DELAY(10); 125067196661SRafal Jaworowski } 125167196661SRafal Jaworowski 1252bd37530eSRafal Jaworowski static void 1253bd37530eSRafal Jaworowski tsec_tick(void *arg) 125467196661SRafal Jaworowski { 125567196661SRafal Jaworowski struct tsec_softc *sc = arg; 1256bd37530eSRafal Jaworowski struct ifnet *ifp; 1257bd37530eSRafal Jaworowski int link; 1258bd37530eSRafal Jaworowski 1259bd37530eSRafal Jaworowski TSEC_GLOBAL_LOCK(sc); 1260bd37530eSRafal Jaworowski 1261bd37530eSRafal Jaworowski tsec_watchdog(sc); 1262bd37530eSRafal Jaworowski 1263bd37530eSRafal Jaworowski ifp = sc->tsec_ifp; 1264bd37530eSRafal Jaworowski link = sc->tsec_link; 1265bd37530eSRafal Jaworowski 1266bd37530eSRafal Jaworowski mii_tick(sc->tsec_mii); 1267bd37530eSRafal Jaworowski 1268bd37530eSRafal Jaworowski if (link == 0 && sc->tsec_link == 1 && 1269bd37530eSRafal Jaworowski (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))) 1270bd37530eSRafal Jaworowski tsec_start_locked(ifp); 1271bd37530eSRafal Jaworowski 1272bd37530eSRafal Jaworowski /* Schedule another timeout one second from now. */ 1273bd37530eSRafal Jaworowski callout_reset(&sc->tsec_callout, hz, tsec_tick, sc); 1274bd37530eSRafal Jaworowski 1275bd37530eSRafal Jaworowski TSEC_GLOBAL_UNLOCK(sc); 1276bd37530eSRafal Jaworowski } 1277bd37530eSRafal Jaworowski 1278bd37530eSRafal Jaworowski /* 1279bd37530eSRafal Jaworowski * This is the core RX routine. It replenishes mbufs in the descriptor and 1280bd37530eSRafal Jaworowski * sends data which have been dma'ed into host memory to upper layer. 1281bd37530eSRafal Jaworowski * 1282bd37530eSRafal Jaworowski * Loops at most count times if count is > 0, or until done if count < 0. 1283bd37530eSRafal Jaworowski */ 12841abcdbd1SAttilio Rao static int 1285bd37530eSRafal Jaworowski tsec_receive_intr_locked(struct tsec_softc *sc, int count) 1286bd37530eSRafal Jaworowski { 128767196661SRafal Jaworowski struct tsec_desc *rx_desc; 128867196661SRafal Jaworowski struct ifnet *ifp; 128967196661SRafal Jaworowski struct rx_data_type *rx_data; 129067196661SRafal Jaworowski struct mbuf *m; 129167196661SRafal Jaworowski uint32_t i; 12921abcdbd1SAttilio Rao int c, rx_npkts; 129367196661SRafal Jaworowski uint16_t flags; 1294bd37530eSRafal Jaworowski 1295bd37530eSRafal Jaworowski TSEC_RECEIVE_LOCK_ASSERT(sc); 129667196661SRafal Jaworowski 129767196661SRafal Jaworowski ifp = sc->tsec_ifp; 129867196661SRafal Jaworowski rx_data = sc->rx_data; 12991abcdbd1SAttilio Rao rx_npkts = 0; 130067196661SRafal Jaworowski 1301bd37530eSRafal Jaworowski bus_dmamap_sync(sc->tsec_rx_dtag, sc->tsec_rx_dmap, 1302bd37530eSRafal Jaworowski BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 130367196661SRafal Jaworowski 1304bd37530eSRafal Jaworowski for (c = 0; ; c++) { 1305bd37530eSRafal Jaworowski if (count >= 0 && count-- == 0) 1306bd37530eSRafal Jaworowski break; 130767196661SRafal Jaworowski 130867196661SRafal Jaworowski rx_desc = TSEC_GET_CUR_RX_DESC(sc); 130967196661SRafal Jaworowski flags = rx_desc->flags; 131067196661SRafal Jaworowski 131167196661SRafal Jaworowski /* Check if there is anything to receive */ 1312bd37530eSRafal Jaworowski if ((flags & TSEC_RXBD_E) || (c >= TSEC_RX_NUM_DESC)) { 131367196661SRafal Jaworowski /* 131467196661SRafal Jaworowski * Avoid generating another interrupt 131567196661SRafal Jaworowski */ 131667196661SRafal Jaworowski if (flags & TSEC_RXBD_E) 131767196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_IEVENT, 131867196661SRafal Jaworowski TSEC_IEVENT_RXB | TSEC_IEVENT_RXF); 131967196661SRafal Jaworowski /* 132067196661SRafal Jaworowski * We didn't consume current descriptor and have to 132167196661SRafal Jaworowski * return it to the queue 132267196661SRafal Jaworowski */ 132367196661SRafal Jaworowski TSEC_BACK_CUR_RX_DESC(sc); 132467196661SRafal Jaworowski break; 132567196661SRafal Jaworowski } 132667196661SRafal Jaworowski 132767196661SRafal Jaworowski if (flags & (TSEC_RXBD_LG | TSEC_RXBD_SH | TSEC_RXBD_NO | 132867196661SRafal Jaworowski TSEC_RXBD_CR | TSEC_RXBD_OV | TSEC_RXBD_TR)) { 132967196661SRafal Jaworowski rx_desc->length = 0; 1330bd37530eSRafal Jaworowski rx_desc->flags = (rx_desc->flags & 1331bd37530eSRafal Jaworowski ~TSEC_RXBD_ZEROONINIT) | TSEC_RXBD_E | TSEC_RXBD_I; 1332bd37530eSRafal Jaworowski 1333bd37530eSRafal Jaworowski if (sc->frame != NULL) { 1334bd37530eSRafal Jaworowski m_free(sc->frame); 1335bd37530eSRafal Jaworowski sc->frame = NULL; 1336bd37530eSRafal Jaworowski } 1337bd37530eSRafal Jaworowski 133867196661SRafal Jaworowski continue; 133967196661SRafal Jaworowski } 134067196661SRafal Jaworowski 134167196661SRafal Jaworowski /* Ok... process frame */ 134267196661SRafal Jaworowski i = TSEC_GET_CUR_RX_DESC_CNT(sc); 134367196661SRafal Jaworowski m = rx_data[i].mbuf; 1344bd37530eSRafal Jaworowski m->m_len = rx_desc->length; 1345bd37530eSRafal Jaworowski 1346bd37530eSRafal Jaworowski if (sc->frame != NULL) { 1347bd37530eSRafal Jaworowski if ((flags & TSEC_RXBD_L) != 0) 1348bd37530eSRafal Jaworowski m->m_len -= m_length(sc->frame, NULL); 1349bd37530eSRafal Jaworowski 1350bd37530eSRafal Jaworowski m->m_flags &= ~M_PKTHDR; 1351bd37530eSRafal Jaworowski m_cat(sc->frame, m); 1352bd37530eSRafal Jaworowski } else { 1353bd37530eSRafal Jaworowski sc->frame = m; 1354bd37530eSRafal Jaworowski } 1355bd37530eSRafal Jaworowski 1356bd37530eSRafal Jaworowski m = NULL; 1357bd37530eSRafal Jaworowski 1358bd37530eSRafal Jaworowski if ((flags & TSEC_RXBD_L) != 0) { 1359bd37530eSRafal Jaworowski m = sc->frame; 1360bd37530eSRafal Jaworowski sc->frame = NULL; 1361bd37530eSRafal Jaworowski } 136267196661SRafal Jaworowski 136367196661SRafal Jaworowski if (tsec_new_rxbuf(sc->tsec_rx_mtag, rx_data[i].map, 136467196661SRafal Jaworowski &rx_data[i].mbuf, &rx_data[i].paddr)) { 13652c0dbbcbSJustin Hibbits if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 1366ab160495SRafal Jaworowski /* 1367ab160495SRafal Jaworowski * We ran out of mbufs; didn't consume current 1368ab160495SRafal Jaworowski * descriptor and have to return it to the queue. 1369ab160495SRafal Jaworowski */ 1370ab160495SRafal Jaworowski TSEC_BACK_CUR_RX_DESC(sc); 1371ab160495SRafal Jaworowski break; 137267196661SRafal Jaworowski } 1373bd37530eSRafal Jaworowski 1374bd37530eSRafal Jaworowski /* Attach new buffer to descriptor and clear flags */ 137567196661SRafal Jaworowski rx_desc->bufptr = rx_data[i].paddr; 137667196661SRafal Jaworowski rx_desc->length = 0; 137767196661SRafal Jaworowski rx_desc->flags = (rx_desc->flags & ~TSEC_RXBD_ZEROONINIT) | 137867196661SRafal Jaworowski TSEC_RXBD_E | TSEC_RXBD_I; 137967196661SRafal Jaworowski 1380bd37530eSRafal Jaworowski if (m != NULL) { 138167196661SRafal Jaworowski m->m_pkthdr.rcvif = ifp; 138267196661SRafal Jaworowski 1383bd37530eSRafal Jaworowski m_fixhdr(m); 1384bd37530eSRafal Jaworowski m_adj(m, -ETHER_CRC_LEN); 138567196661SRafal Jaworowski 1386bd37530eSRafal Jaworowski if (sc->is_etsec) 1387bd37530eSRafal Jaworowski tsec_offload_process_frame(sc, m); 138867196661SRafal Jaworowski 138967196661SRafal Jaworowski TSEC_RECEIVE_UNLOCK(sc); 1390bd37530eSRafal Jaworowski (*ifp->if_input)(ifp, m); 1391bd37530eSRafal Jaworowski TSEC_RECEIVE_LOCK(sc); 13921abcdbd1SAttilio Rao rx_npkts++; 1393bd37530eSRafal Jaworowski } 1394bd37530eSRafal Jaworowski } 139567196661SRafal Jaworowski 1396bd37530eSRafal Jaworowski bus_dmamap_sync(sc->tsec_rx_dtag, sc->tsec_rx_dmap, 1397bd37530eSRafal Jaworowski BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1398371bf7ccSRafal Jaworowski 1399371bf7ccSRafal Jaworowski /* 1400371bf7ccSRafal Jaworowski * Make sure TSEC receiver is not halted. 1401371bf7ccSRafal Jaworowski * 1402371bf7ccSRafal Jaworowski * Various conditions can stop the TSEC receiver, but not all are 1403371bf7ccSRafal Jaworowski * signaled and handled by error interrupt, so make sure the receiver 1404371bf7ccSRafal Jaworowski * is running. Writing to TSEC_REG_RSTAT restarts the receiver when 1405371bf7ccSRafal Jaworowski * halted, and is harmless if already running. 1406371bf7ccSRafal Jaworowski */ 1407371bf7ccSRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_RSTAT, TSEC_RSTAT_QHLT); 14081abcdbd1SAttilio Rao return (rx_npkts); 140967196661SRafal Jaworowski } 141067196661SRafal Jaworowski 1411321e12c8SRafal Jaworowski void 1412bd37530eSRafal Jaworowski tsec_receive_intr(void *arg) 141367196661SRafal Jaworowski { 141467196661SRafal Jaworowski struct tsec_softc *sc = arg; 1415bd37530eSRafal Jaworowski 1416bd37530eSRafal Jaworowski TSEC_RECEIVE_LOCK(sc); 1417bd37530eSRafal Jaworowski 1418bd37530eSRafal Jaworowski #ifdef DEVICE_POLLING 1419bd37530eSRafal Jaworowski if (sc->tsec_ifp->if_capenable & IFCAP_POLLING) { 1420bd37530eSRafal Jaworowski TSEC_RECEIVE_UNLOCK(sc); 1421bd37530eSRafal Jaworowski return; 1422bd37530eSRafal Jaworowski } 1423bd37530eSRafal Jaworowski #endif 1424bd37530eSRafal Jaworowski 1425bd37530eSRafal Jaworowski /* Confirm the interrupt was received by driver */ 1426bd37530eSRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_IEVENT, TSEC_IEVENT_RXB | TSEC_IEVENT_RXF); 1427bd37530eSRafal Jaworowski tsec_receive_intr_locked(sc, -1); 1428bd37530eSRafal Jaworowski 1429bd37530eSRafal Jaworowski TSEC_RECEIVE_UNLOCK(sc); 1430bd37530eSRafal Jaworowski } 1431bd37530eSRafal Jaworowski 1432bd37530eSRafal Jaworowski static void 1433bd37530eSRafal Jaworowski tsec_transmit_intr_locked(struct tsec_softc *sc) 1434bd37530eSRafal Jaworowski { 143567196661SRafal Jaworowski struct ifnet *ifp; 14362c0dbbcbSJustin Hibbits uint32_t tx_idx; 143767196661SRafal Jaworowski 1438bd37530eSRafal Jaworowski TSEC_TRANSMIT_LOCK_ASSERT(sc); 1439bd37530eSRafal Jaworowski 144067196661SRafal Jaworowski ifp = sc->tsec_ifp; 144167196661SRafal Jaworowski 144267196661SRafal Jaworowski /* Update collision statistics */ 1443c8dfaf38SGleb Smirnoff if_inc_counter(ifp, IFCOUNTER_COLLISIONS, TSEC_READ(sc, TSEC_REG_MON_TNCL)); 144467196661SRafal Jaworowski 144567196661SRafal Jaworowski /* Reset collision counters in hardware */ 144667196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MON_TSCL, 0); 144767196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MON_TMCL, 0); 144867196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MON_TLCL, 0); 144967196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MON_TXCL, 0); 145067196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MON_TNCL, 0); 145167196661SRafal Jaworowski 1452321e12c8SRafal Jaworowski bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap, 1453321e12c8SRafal Jaworowski BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 145467196661SRafal Jaworowski 14552c0dbbcbSJustin Hibbits tx_idx = sc->tx_idx_tail; 14562c0dbbcbSJustin Hibbits while (tx_idx != sc->tx_idx_head) { 14572c0dbbcbSJustin Hibbits struct tsec_desc *tx_desc; 14582c0dbbcbSJustin Hibbits struct tsec_bufmap *tx_bufmap; 14592c0dbbcbSJustin Hibbits 14602c0dbbcbSJustin Hibbits tx_desc = &sc->tsec_tx_vaddr[tx_idx]; 146167196661SRafal Jaworowski if (tx_desc->flags & TSEC_TXBD_R) { 146267196661SRafal Jaworowski break; 146367196661SRafal Jaworowski } 146467196661SRafal Jaworowski 14652c0dbbcbSJustin Hibbits tx_bufmap = &sc->tx_bufmap[tx_idx]; 14662c0dbbcbSJustin Hibbits tx_idx = (tx_idx + 1) & (TSEC_TX_NUM_DESC - 1); 14672c0dbbcbSJustin Hibbits if (tx_bufmap->mbuf == NULL) 146867196661SRafal Jaworowski continue; 146967196661SRafal Jaworowski 147067196661SRafal Jaworowski /* 147167196661SRafal Jaworowski * This is the last buf in this packet, so unmap and free it. 147267196661SRafal Jaworowski */ 14732c0dbbcbSJustin Hibbits bus_dmamap_sync(sc->tsec_tx_mtag, tx_bufmap->map, 147464f90c9dSRafal Jaworowski BUS_DMASYNC_POSTWRITE); 14752c0dbbcbSJustin Hibbits bus_dmamap_unload(sc->tsec_tx_mtag, tx_bufmap->map); 14762c0dbbcbSJustin Hibbits m_freem(tx_bufmap->mbuf); 14772c0dbbcbSJustin Hibbits tx_bufmap->mbuf = NULL; 147867196661SRafal Jaworowski 1479c8dfaf38SGleb Smirnoff if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 148067196661SRafal Jaworowski } 14812c0dbbcbSJustin Hibbits sc->tx_idx_tail = tx_idx; 1482bd37530eSRafal Jaworowski bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap, 1483bd37530eSRafal Jaworowski BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 148467196661SRafal Jaworowski 148567196661SRafal Jaworowski ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 148667196661SRafal Jaworowski tsec_start_locked(ifp); 148767196661SRafal Jaworowski 14882c0dbbcbSJustin Hibbits if (sc->tx_idx_tail == sc->tx_idx_head) 14895432bd9fSRafal Jaworowski sc->tsec_watchdog = 0; 149067196661SRafal Jaworowski } 149167196661SRafal Jaworowski 1492321e12c8SRafal Jaworowski void 1493bd37530eSRafal Jaworowski tsec_transmit_intr(void *arg) 149467196661SRafal Jaworowski { 149567196661SRafal Jaworowski struct tsec_softc *sc = arg; 1496bd37530eSRafal Jaworowski 1497bd37530eSRafal Jaworowski TSEC_TRANSMIT_LOCK(sc); 1498bd37530eSRafal Jaworowski 1499bd37530eSRafal Jaworowski #ifdef DEVICE_POLLING 1500bd37530eSRafal Jaworowski if (sc->tsec_ifp->if_capenable & IFCAP_POLLING) { 1501bd37530eSRafal Jaworowski TSEC_TRANSMIT_UNLOCK(sc); 1502bd37530eSRafal Jaworowski return; 1503bd37530eSRafal Jaworowski } 1504bd37530eSRafal Jaworowski #endif 1505bd37530eSRafal Jaworowski /* Confirm the interrupt was received by driver */ 1506bd37530eSRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_IEVENT, TSEC_IEVENT_TXB | TSEC_IEVENT_TXF); 1507bd37530eSRafal Jaworowski tsec_transmit_intr_locked(sc); 1508bd37530eSRafal Jaworowski 1509bd37530eSRafal Jaworowski TSEC_TRANSMIT_UNLOCK(sc); 1510bd37530eSRafal Jaworowski } 1511bd37530eSRafal Jaworowski 1512bd37530eSRafal Jaworowski static void 1513bd37530eSRafal Jaworowski tsec_error_intr_locked(struct tsec_softc *sc, int count) 1514bd37530eSRafal Jaworowski { 151567196661SRafal Jaworowski struct ifnet *ifp; 151667196661SRafal Jaworowski uint32_t eflags; 151767196661SRafal Jaworowski 1518bd37530eSRafal Jaworowski TSEC_GLOBAL_LOCK_ASSERT(sc); 1519bd37530eSRafal Jaworowski 152067196661SRafal Jaworowski ifp = sc->tsec_ifp; 152167196661SRafal Jaworowski 152267196661SRafal Jaworowski eflags = TSEC_READ(sc, TSEC_REG_IEVENT); 152367196661SRafal Jaworowski 152467196661SRafal Jaworowski /* Clear events bits in hardware */ 152567196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_IEVENT, TSEC_IEVENT_RXC | TSEC_IEVENT_BSY | 152667196661SRafal Jaworowski TSEC_IEVENT_EBERR | TSEC_IEVENT_MSRO | TSEC_IEVENT_BABT | 152767196661SRafal Jaworowski TSEC_IEVENT_TXC | TSEC_IEVENT_TXE | TSEC_IEVENT_LC | 152867196661SRafal Jaworowski TSEC_IEVENT_CRL | TSEC_IEVENT_XFUN); 152967196661SRafal Jaworowski 153067196661SRafal Jaworowski /* Check transmitter errors */ 153167196661SRafal Jaworowski if (eflags & TSEC_IEVENT_TXE) { 1532c8dfaf38SGleb Smirnoff if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 153367196661SRafal Jaworowski 153467196661SRafal Jaworowski if (eflags & TSEC_IEVENT_LC) 1535c8dfaf38SGleb Smirnoff if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1); 153667196661SRafal Jaworowski 153767196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_TSTAT, TSEC_TSTAT_THLT); 153867196661SRafal Jaworowski } 153967196661SRafal Jaworowski 15402c0dbbcbSJustin Hibbits /* Check for discarded frame due to a lack of buffers */ 154167196661SRafal Jaworowski if (eflags & TSEC_IEVENT_BSY) { 1542c8dfaf38SGleb Smirnoff if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 154367196661SRafal Jaworowski } 1544bd37530eSRafal Jaworowski 1545bd37530eSRafal Jaworowski if (ifp->if_flags & IFF_DEBUG) 1546bd37530eSRafal Jaworowski if_printf(ifp, "tsec_error_intr(): event flags: 0x%x\n", 1547bd37530eSRafal Jaworowski eflags); 1548bd37530eSRafal Jaworowski 1549bd37530eSRafal Jaworowski if (eflags & TSEC_IEVENT_EBERR) { 1550bd37530eSRafal Jaworowski if_printf(ifp, "System bus error occurred during" 1551bd37530eSRafal Jaworowski "DMA transaction (flags: 0x%x)\n", eflags); 1552bd37530eSRafal Jaworowski tsec_init_locked(sc); 1553bd37530eSRafal Jaworowski } 1554bd37530eSRafal Jaworowski 1555bd37530eSRafal Jaworowski if (eflags & TSEC_IEVENT_BABT) 1556c8dfaf38SGleb Smirnoff if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1557bd37530eSRafal Jaworowski 155867196661SRafal Jaworowski if (eflags & TSEC_IEVENT_BABR) 1559c8dfaf38SGleb Smirnoff if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 156067196661SRafal Jaworowski } 156167196661SRafal Jaworowski 1562bd37530eSRafal Jaworowski void 1563bd37530eSRafal Jaworowski tsec_error_intr(void *arg) 156467196661SRafal Jaworowski { 1565bd37530eSRafal Jaworowski struct tsec_softc *sc = arg; 156667196661SRafal Jaworowski 1567772619e1SRafal Jaworowski TSEC_GLOBAL_LOCK(sc); 1568bd37530eSRafal Jaworowski tsec_error_intr_locked(sc, -1); 1569772619e1SRafal Jaworowski TSEC_GLOBAL_UNLOCK(sc); 157067196661SRafal Jaworowski } 157167196661SRafal Jaworowski 1572321e12c8SRafal Jaworowski int 157367196661SRafal Jaworowski tsec_miibus_readreg(device_t dev, int phy, int reg) 157467196661SRafal Jaworowski { 157567196661SRafal Jaworowski struct tsec_softc *sc; 157688011b59SJustin Hibbits int timeout; 1577629aa519SNathan Whitehorn int rv; 157867196661SRafal Jaworowski 1579aa15e881SRafal Jaworowski sc = device_get_softc(dev); 158067196661SRafal Jaworowski 1581629aa519SNathan Whitehorn TSEC_PHY_LOCK(); 1582629aa519SNathan Whitehorn TSEC_PHY_WRITE(sc, TSEC_REG_MIIMADD, (phy << 8) | reg); 1583629aa519SNathan Whitehorn TSEC_PHY_WRITE(sc, TSEC_REG_MIIMCOM, 0); 1584629aa519SNathan Whitehorn TSEC_PHY_WRITE(sc, TSEC_REG_MIIMCOM, TSEC_MIIMCOM_READCYCLE); 158567196661SRafal Jaworowski 158688011b59SJustin Hibbits timeout = tsec_mii_wait(sc, TSEC_MIIMIND_NOTVALID | TSEC_MIIMIND_BUSY); 158788011b59SJustin Hibbits rv = TSEC_PHY_READ(sc, TSEC_REG_MIIMSTAT); 158888011b59SJustin Hibbits TSEC_PHY_UNLOCK(); 158967196661SRafal Jaworowski 159072b58db8SJustin Hibbits if (timeout) 159167196661SRafal Jaworowski device_printf(dev, "Timeout while reading from PHY!\n"); 159267196661SRafal Jaworowski 1593629aa519SNathan Whitehorn return (rv); 159467196661SRafal Jaworowski } 159567196661SRafal Jaworowski 1596661ee6eeSRafal Jaworowski int 159767196661SRafal Jaworowski tsec_miibus_writereg(device_t dev, int phy, int reg, int value) 159867196661SRafal Jaworowski { 159967196661SRafal Jaworowski struct tsec_softc *sc; 160088011b59SJustin Hibbits int timeout; 160167196661SRafal Jaworowski 1602aa15e881SRafal Jaworowski sc = device_get_softc(dev); 160367196661SRafal Jaworowski 1604629aa519SNathan Whitehorn TSEC_PHY_LOCK(); 1605629aa519SNathan Whitehorn TSEC_PHY_WRITE(sc, TSEC_REG_MIIMADD, (phy << 8) | reg); 1606629aa519SNathan Whitehorn TSEC_PHY_WRITE(sc, TSEC_REG_MIIMCON, value); 160788011b59SJustin Hibbits timeout = tsec_mii_wait(sc, TSEC_MIIMIND_BUSY); 1608629aa519SNathan Whitehorn TSEC_PHY_UNLOCK(); 160967196661SRafal Jaworowski 161072b58db8SJustin Hibbits if (timeout) 161167196661SRafal Jaworowski device_printf(dev, "Timeout while writing to PHY!\n"); 1612661ee6eeSRafal Jaworowski 1613661ee6eeSRafal Jaworowski return (0); 161467196661SRafal Jaworowski } 161567196661SRafal Jaworowski 1616321e12c8SRafal Jaworowski void 161767196661SRafal Jaworowski tsec_miibus_statchg(device_t dev) 161867196661SRafal Jaworowski { 161967196661SRafal Jaworowski struct tsec_softc *sc; 162067196661SRafal Jaworowski struct mii_data *mii; 162167196661SRafal Jaworowski uint32_t ecntrl, id, tmp; 162267196661SRafal Jaworowski int link; 162367196661SRafal Jaworowski 162467196661SRafal Jaworowski sc = device_get_softc(dev); 162567196661SRafal Jaworowski mii = sc->tsec_mii; 162667196661SRafal Jaworowski link = ((mii->mii_media_status & IFM_ACTIVE) ? 1 : 0); 162767196661SRafal Jaworowski 162867196661SRafal Jaworowski tmp = TSEC_READ(sc, TSEC_REG_MACCFG2) & ~TSEC_MACCFG2_IF; 162967196661SRafal Jaworowski 163067196661SRafal Jaworowski if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) 163167196661SRafal Jaworowski tmp |= TSEC_MACCFG2_FULLDUPLEX; 163267196661SRafal Jaworowski else 163367196661SRafal Jaworowski tmp &= ~TSEC_MACCFG2_FULLDUPLEX; 163467196661SRafal Jaworowski 163567196661SRafal Jaworowski switch (IFM_SUBTYPE(mii->mii_media_active)) { 163667196661SRafal Jaworowski case IFM_1000_T: 163767196661SRafal Jaworowski case IFM_1000_SX: 163867196661SRafal Jaworowski tmp |= TSEC_MACCFG2_GMII; 163967196661SRafal Jaworowski sc->tsec_link = link; 164067196661SRafal Jaworowski break; 164167196661SRafal Jaworowski case IFM_100_TX: 164267196661SRafal Jaworowski case IFM_10_T: 164367196661SRafal Jaworowski tmp |= TSEC_MACCFG2_MII; 164467196661SRafal Jaworowski sc->tsec_link = link; 164567196661SRafal Jaworowski break; 164667196661SRafal Jaworowski case IFM_NONE: 164767196661SRafal Jaworowski if (link) 164864f90c9dSRafal Jaworowski device_printf(dev, "No speed selected but link " 164964f90c9dSRafal Jaworowski "active!\n"); 165067196661SRafal Jaworowski sc->tsec_link = 0; 165167196661SRafal Jaworowski return; 165267196661SRafal Jaworowski default: 165367196661SRafal Jaworowski sc->tsec_link = 0; 165467196661SRafal Jaworowski device_printf(dev, "Unknown speed (%d), link %s!\n", 165567196661SRafal Jaworowski IFM_SUBTYPE(mii->mii_media_active), 165667196661SRafal Jaworowski ((link) ? "up" : "down")); 165767196661SRafal Jaworowski return; 165867196661SRafal Jaworowski } 165967196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MACCFG2, tmp); 166067196661SRafal Jaworowski 166167196661SRafal Jaworowski /* XXX kludge - use circumstantial evidence for reduced mode. */ 166267196661SRafal Jaworowski id = TSEC_READ(sc, TSEC_REG_ID2); 166367196661SRafal Jaworowski if (id & 0xffff) { 166467196661SRafal Jaworowski ecntrl = TSEC_READ(sc, TSEC_REG_ECNTRL) & ~TSEC_ECNTRL_R100M; 166567196661SRafal Jaworowski ecntrl |= (tmp & TSEC_MACCFG2_MII) ? TSEC_ECNTRL_R100M : 0; 166667196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_ECNTRL, ecntrl); 166767196661SRafal Jaworowski } 166867196661SRafal Jaworowski } 1669bd37530eSRafal Jaworowski 1670bd37530eSRafal Jaworowski static void 1671bd37530eSRafal Jaworowski tsec_add_sysctls(struct tsec_softc *sc) 1672bd37530eSRafal Jaworowski { 1673bd37530eSRafal Jaworowski struct sysctl_ctx_list *ctx; 1674bd37530eSRafal Jaworowski struct sysctl_oid_list *children; 1675bd37530eSRafal Jaworowski struct sysctl_oid *tree; 1676bd37530eSRafal Jaworowski 1677bd37530eSRafal Jaworowski ctx = device_get_sysctl_ctx(sc->dev); 1678bd37530eSRafal Jaworowski children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); 1679bd37530eSRafal Jaworowski tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "int_coal", 16807029da5cSPawel Biernacki CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "TSEC Interrupts coalescing"); 1681bd37530eSRafal Jaworowski children = SYSCTL_CHILDREN(tree); 1682bd37530eSRafal Jaworowski 1683bd37530eSRafal Jaworowski SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_time", 16847029da5cSPawel Biernacki CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, TSEC_IC_RX, 16857029da5cSPawel Biernacki tsec_sysctl_ic_time, "I", "IC RX time threshold (0-65535)"); 1686bd37530eSRafal Jaworowski SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_count", 16877029da5cSPawel Biernacki CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, TSEC_IC_RX, 16887029da5cSPawel Biernacki tsec_sysctl_ic_count, "I", "IC RX frame count threshold (0-255)"); 1689bd37530eSRafal Jaworowski 1690bd37530eSRafal Jaworowski SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_time", 16917029da5cSPawel Biernacki CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, TSEC_IC_TX, 16927029da5cSPawel Biernacki tsec_sysctl_ic_time, "I", "IC TX time threshold (0-65535)"); 1693bd37530eSRafal Jaworowski SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_count", 16947029da5cSPawel Biernacki CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, TSEC_IC_TX, 16957029da5cSPawel Biernacki tsec_sysctl_ic_count, "I", "IC TX frame count threshold (0-255)"); 1696bd37530eSRafal Jaworowski } 1697bd37530eSRafal Jaworowski 1698bd37530eSRafal Jaworowski /* 1699bd37530eSRafal Jaworowski * With Interrupt Coalescing (IC) active, a transmit/receive frame 1700bd37530eSRafal Jaworowski * interrupt is raised either upon: 1701bd37530eSRafal Jaworowski * 1702bd37530eSRafal Jaworowski * - threshold-defined period of time elapsed, or 1703bd37530eSRafal Jaworowski * - threshold-defined number of frames is received/transmitted, 1704bd37530eSRafal Jaworowski * whichever occurs first. 1705bd37530eSRafal Jaworowski * 1706bd37530eSRafal Jaworowski * The following sysctls regulate IC behaviour (for TX/RX separately): 1707bd37530eSRafal Jaworowski * 1708bd37530eSRafal Jaworowski * dev.tsec.<unit>.int_coal.rx_time 1709bd37530eSRafal Jaworowski * dev.tsec.<unit>.int_coal.rx_count 1710bd37530eSRafal Jaworowski * dev.tsec.<unit>.int_coal.tx_time 1711bd37530eSRafal Jaworowski * dev.tsec.<unit>.int_coal.tx_count 1712bd37530eSRafal Jaworowski * 1713bd37530eSRafal Jaworowski * Values: 1714bd37530eSRafal Jaworowski * 1715bd37530eSRafal Jaworowski * - 0 for either time or count disables IC on the given TX/RX path 1716bd37530eSRafal Jaworowski * 1717bd37530eSRafal Jaworowski * - count: 1-255 (expresses frame count number; note that value of 1 is 1718bd37530eSRafal Jaworowski * effectively IC off) 1719bd37530eSRafal Jaworowski * 1720bd37530eSRafal Jaworowski * - time: 1-65535 (value corresponds to a real time period and is 1721bd37530eSRafal Jaworowski * expressed in units equivalent to 64 TSEC interface clocks, i.e. one timer 1722bd37530eSRafal Jaworowski * threshold unit is 26.5 us, 2.56 us, or 512 ns, corresponding to 10 Mbps, 1723bd37530eSRafal Jaworowski * 100 Mbps, or 1Gbps, respectively. For detailed discussion consult the 1724bd37530eSRafal Jaworowski * TSEC reference manual. 1725bd37530eSRafal Jaworowski */ 1726bd37530eSRafal Jaworowski static int 1727bd37530eSRafal Jaworowski tsec_sysctl_ic_time(SYSCTL_HANDLER_ARGS) 1728bd37530eSRafal Jaworowski { 1729bd37530eSRafal Jaworowski int error; 1730bd37530eSRafal Jaworowski uint32_t time; 1731bd37530eSRafal Jaworowski struct tsec_softc *sc = (struct tsec_softc *)arg1; 1732bd37530eSRafal Jaworowski 1733bd37530eSRafal Jaworowski time = (arg2 == TSEC_IC_RX) ? sc->rx_ic_time : sc->tx_ic_time; 1734bd37530eSRafal Jaworowski 1735bd37530eSRafal Jaworowski error = sysctl_handle_int(oidp, &time, 0, req); 1736bd37530eSRafal Jaworowski if (error != 0) 1737bd37530eSRafal Jaworowski return (error); 1738bd37530eSRafal Jaworowski 1739bd37530eSRafal Jaworowski if (time > 65535) 1740bd37530eSRafal Jaworowski return (EINVAL); 1741bd37530eSRafal Jaworowski 1742bd37530eSRafal Jaworowski TSEC_IC_LOCK(sc); 1743bd37530eSRafal Jaworowski if (arg2 == TSEC_IC_RX) { 1744bd37530eSRafal Jaworowski sc->rx_ic_time = time; 1745bd37530eSRafal Jaworowski tsec_set_rxic(sc); 1746bd37530eSRafal Jaworowski } else { 1747bd37530eSRafal Jaworowski sc->tx_ic_time = time; 1748bd37530eSRafal Jaworowski tsec_set_txic(sc); 1749bd37530eSRafal Jaworowski } 1750bd37530eSRafal Jaworowski TSEC_IC_UNLOCK(sc); 1751bd37530eSRafal Jaworowski 1752bd37530eSRafal Jaworowski return (0); 1753bd37530eSRafal Jaworowski } 1754bd37530eSRafal Jaworowski 1755bd37530eSRafal Jaworowski static int 1756bd37530eSRafal Jaworowski tsec_sysctl_ic_count(SYSCTL_HANDLER_ARGS) 1757bd37530eSRafal Jaworowski { 1758bd37530eSRafal Jaworowski int error; 1759bd37530eSRafal Jaworowski uint32_t count; 1760bd37530eSRafal Jaworowski struct tsec_softc *sc = (struct tsec_softc *)arg1; 1761bd37530eSRafal Jaworowski 1762bd37530eSRafal Jaworowski count = (arg2 == TSEC_IC_RX) ? sc->rx_ic_count : sc->tx_ic_count; 1763bd37530eSRafal Jaworowski 1764bd37530eSRafal Jaworowski error = sysctl_handle_int(oidp, &count, 0, req); 1765bd37530eSRafal Jaworowski if (error != 0) 1766bd37530eSRafal Jaworowski return (error); 1767bd37530eSRafal Jaworowski 1768bd37530eSRafal Jaworowski if (count > 255) 1769bd37530eSRafal Jaworowski return (EINVAL); 1770bd37530eSRafal Jaworowski 1771bd37530eSRafal Jaworowski TSEC_IC_LOCK(sc); 1772bd37530eSRafal Jaworowski if (arg2 == TSEC_IC_RX) { 1773bd37530eSRafal Jaworowski sc->rx_ic_count = count; 1774bd37530eSRafal Jaworowski tsec_set_rxic(sc); 1775bd37530eSRafal Jaworowski } else { 1776bd37530eSRafal Jaworowski sc->tx_ic_count = count; 1777bd37530eSRafal Jaworowski tsec_set_txic(sc); 1778bd37530eSRafal Jaworowski } 1779bd37530eSRafal Jaworowski TSEC_IC_UNLOCK(sc); 1780bd37530eSRafal Jaworowski 1781bd37530eSRafal Jaworowski return (0); 1782bd37530eSRafal Jaworowski } 1783bd37530eSRafal Jaworowski 1784bd37530eSRafal Jaworowski static void 1785bd37530eSRafal Jaworowski tsec_set_rxic(struct tsec_softc *sc) 1786bd37530eSRafal Jaworowski { 1787bd37530eSRafal Jaworowski uint32_t rxic_val; 1788bd37530eSRafal Jaworowski 1789bd37530eSRafal Jaworowski if (sc->rx_ic_count == 0 || sc->rx_ic_time == 0) 1790bd37530eSRafal Jaworowski /* Disable RX IC */ 1791bd37530eSRafal Jaworowski rxic_val = 0; 1792bd37530eSRafal Jaworowski else { 1793bd37530eSRafal Jaworowski rxic_val = 0x80000000; 1794bd37530eSRafal Jaworowski rxic_val |= (sc->rx_ic_count << 21); 1795bd37530eSRafal Jaworowski rxic_val |= sc->rx_ic_time; 1796bd37530eSRafal Jaworowski } 1797bd37530eSRafal Jaworowski 1798bd37530eSRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_RXIC, rxic_val); 1799bd37530eSRafal Jaworowski } 1800bd37530eSRafal Jaworowski 1801bd37530eSRafal Jaworowski static void 1802bd37530eSRafal Jaworowski tsec_set_txic(struct tsec_softc *sc) 1803bd37530eSRafal Jaworowski { 1804bd37530eSRafal Jaworowski uint32_t txic_val; 1805bd37530eSRafal Jaworowski 1806bd37530eSRafal Jaworowski if (sc->tx_ic_count == 0 || sc->tx_ic_time == 0) 1807bd37530eSRafal Jaworowski /* Disable TX IC */ 1808bd37530eSRafal Jaworowski txic_val = 0; 1809bd37530eSRafal Jaworowski else { 1810bd37530eSRafal Jaworowski txic_val = 0x80000000; 1811bd37530eSRafal Jaworowski txic_val |= (sc->tx_ic_count << 21); 1812bd37530eSRafal Jaworowski txic_val |= sc->tx_ic_time; 1813bd37530eSRafal Jaworowski } 1814bd37530eSRafal Jaworowski 1815bd37530eSRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_TXIC, txic_val); 1816bd37530eSRafal Jaworowski } 1817bd37530eSRafal Jaworowski 1818bd37530eSRafal Jaworowski static void 1819bd37530eSRafal Jaworowski tsec_offload_setup(struct tsec_softc *sc) 1820bd37530eSRafal Jaworowski { 1821bd37530eSRafal Jaworowski struct ifnet *ifp = sc->tsec_ifp; 1822bd37530eSRafal Jaworowski uint32_t reg; 1823bd37530eSRafal Jaworowski 1824bd37530eSRafal Jaworowski TSEC_GLOBAL_LOCK_ASSERT(sc); 1825bd37530eSRafal Jaworowski 1826bd37530eSRafal Jaworowski reg = TSEC_READ(sc, TSEC_REG_TCTRL); 1827bd37530eSRafal Jaworowski reg |= TSEC_TCTRL_IPCSEN | TSEC_TCTRL_TUCSEN; 1828bd37530eSRafal Jaworowski 1829bd37530eSRafal Jaworowski if (ifp->if_capenable & IFCAP_TXCSUM) 1830bd37530eSRafal Jaworowski ifp->if_hwassist = TSEC_CHECKSUM_FEATURES; 1831bd37530eSRafal Jaworowski else 1832bd37530eSRafal Jaworowski ifp->if_hwassist = 0; 1833bd37530eSRafal Jaworowski 1834bd37530eSRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_TCTRL, reg); 1835bd37530eSRafal Jaworowski 1836bd37530eSRafal Jaworowski reg = TSEC_READ(sc, TSEC_REG_RCTRL); 1837bd37530eSRafal Jaworowski reg &= ~(TSEC_RCTRL_IPCSEN | TSEC_RCTRL_TUCSEN | TSEC_RCTRL_PRSDEP); 1838bd37530eSRafal Jaworowski reg |= TSEC_RCTRL_PRSDEP_PARSE_L2 | TSEC_RCTRL_VLEX; 1839bd37530eSRafal Jaworowski 1840bd37530eSRafal Jaworowski if (ifp->if_capenable & IFCAP_RXCSUM) 1841bd37530eSRafal Jaworowski reg |= TSEC_RCTRL_IPCSEN | TSEC_RCTRL_TUCSEN | 1842bd37530eSRafal Jaworowski TSEC_RCTRL_PRSDEP_PARSE_L234; 1843bd37530eSRafal Jaworowski 1844bd37530eSRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_RCTRL, reg); 1845bd37530eSRafal Jaworowski } 1846bd37530eSRafal Jaworowski 1847bd37530eSRafal Jaworowski static void 1848bd37530eSRafal Jaworowski tsec_offload_process_frame(struct tsec_softc *sc, struct mbuf *m) 1849bd37530eSRafal Jaworowski { 1850bd37530eSRafal Jaworowski struct tsec_rx_fcb rx_fcb; 1851bd37530eSRafal Jaworowski int csum_flags = 0; 1852bd37530eSRafal Jaworowski int protocol, flags; 1853bd37530eSRafal Jaworowski 1854bd37530eSRafal Jaworowski TSEC_RECEIVE_LOCK_ASSERT(sc); 1855bd37530eSRafal Jaworowski 1856bd37530eSRafal Jaworowski m_copydata(m, 0, sizeof(struct tsec_rx_fcb), (caddr_t)(&rx_fcb)); 1857bd37530eSRafal Jaworowski flags = rx_fcb.flags; 1858bd37530eSRafal Jaworowski protocol = rx_fcb.protocol; 1859bd37530eSRafal Jaworowski 1860bd37530eSRafal Jaworowski if (TSEC_RX_FCB_IP_CSUM_CHECKED(flags)) { 1861bd37530eSRafal Jaworowski csum_flags |= CSUM_IP_CHECKED; 1862bd37530eSRafal Jaworowski 1863bd37530eSRafal Jaworowski if ((flags & TSEC_RX_FCB_IP_CSUM_ERROR) == 0) 1864bd37530eSRafal Jaworowski csum_flags |= CSUM_IP_VALID; 1865bd37530eSRafal Jaworowski } 1866bd37530eSRafal Jaworowski 1867bd37530eSRafal Jaworowski if ((protocol == IPPROTO_TCP || protocol == IPPROTO_UDP) && 1868bd37530eSRafal Jaworowski TSEC_RX_FCB_TCP_UDP_CSUM_CHECKED(flags) && 1869bd37530eSRafal Jaworowski (flags & TSEC_RX_FCB_TCP_UDP_CSUM_ERROR) == 0) { 1870bd37530eSRafal Jaworowski csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1871bd37530eSRafal Jaworowski m->m_pkthdr.csum_data = 0xFFFF; 1872bd37530eSRafal Jaworowski } 1873bd37530eSRafal Jaworowski 1874bd37530eSRafal Jaworowski m->m_pkthdr.csum_flags = csum_flags; 1875bd37530eSRafal Jaworowski 1876bd37530eSRafal Jaworowski if (flags & TSEC_RX_FCB_VLAN) { 1877bd37530eSRafal Jaworowski m->m_pkthdr.ether_vtag = rx_fcb.vlan; 1878bd37530eSRafal Jaworowski m->m_flags |= M_VLANTAG; 1879bd37530eSRafal Jaworowski } 1880bd37530eSRafal Jaworowski 1881bd37530eSRafal Jaworowski m_adj(m, sizeof(struct tsec_rx_fcb)); 1882bd37530eSRafal Jaworowski } 1883bd37530eSRafal Jaworowski 18845c973840SGleb Smirnoff static u_int 18855c973840SGleb Smirnoff tsec_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 18865c973840SGleb Smirnoff { 18875c973840SGleb Smirnoff uint32_t h, *hashtable = arg; 18885c973840SGleb Smirnoff 18895c973840SGleb Smirnoff h = (ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN) >> 24) & 0xFF; 18905c973840SGleb Smirnoff hashtable[(h >> 5)] |= 1 << (0x1F - (h & 0x1F)); 18915c973840SGleb Smirnoff 18925c973840SGleb Smirnoff return (1); 18935c973840SGleb Smirnoff } 18945c973840SGleb Smirnoff 1895bd37530eSRafal Jaworowski static void 1896bd37530eSRafal Jaworowski tsec_setup_multicast(struct tsec_softc *sc) 1897bd37530eSRafal Jaworowski { 1898bd37530eSRafal Jaworowski uint32_t hashtable[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; 1899bd37530eSRafal Jaworowski struct ifnet *ifp = sc->tsec_ifp; 1900bd37530eSRafal Jaworowski int i; 1901bd37530eSRafal Jaworowski 1902bd37530eSRafal Jaworowski TSEC_GLOBAL_LOCK_ASSERT(sc); 1903bd37530eSRafal Jaworowski 1904bd37530eSRafal Jaworowski if (ifp->if_flags & IFF_ALLMULTI) { 1905bd37530eSRafal Jaworowski for (i = 0; i < 8; i++) 1906bd37530eSRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_GADDR(i), 0xFFFFFFFF); 1907bd37530eSRafal Jaworowski 1908bd37530eSRafal Jaworowski return; 1909bd37530eSRafal Jaworowski } 1910bd37530eSRafal Jaworowski 19115c973840SGleb Smirnoff if_foreach_llmaddr(ifp, tsec_hash_maddr, &hashtable); 1912bd37530eSRafal Jaworowski 1913bd37530eSRafal Jaworowski for (i = 0; i < 8; i++) 1914bd37530eSRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_GADDR(i), hashtable[i]); 1915bd37530eSRafal Jaworowski } 1916bd37530eSRafal Jaworowski 1917bd37530eSRafal Jaworowski static int 1918bd37530eSRafal Jaworowski tsec_set_mtu(struct tsec_softc *sc, unsigned int mtu) 1919bd37530eSRafal Jaworowski { 1920bd37530eSRafal Jaworowski 1921bd37530eSRafal Jaworowski mtu += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ETHER_CRC_LEN; 1922bd37530eSRafal Jaworowski 1923bd37530eSRafal Jaworowski TSEC_GLOBAL_LOCK_ASSERT(sc); 1924bd37530eSRafal Jaworowski 1925bd37530eSRafal Jaworowski if (mtu >= TSEC_MIN_FRAME_SIZE && mtu <= TSEC_MAX_FRAME_SIZE) { 1926bd37530eSRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MAXFRM, mtu); 1927bd37530eSRafal Jaworowski return (mtu); 1928bd37530eSRafal Jaworowski } 1929bd37530eSRafal Jaworowski 1930bd37530eSRafal Jaworowski return (0); 1931bd37530eSRafal Jaworowski } 1932