167196661SRafal Jaworowski /*- 2718cf2ccSPedro F. Giffuni * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3718cf2ccSPedro F. Giffuni * 4d1d3233eSRafal Jaworowski * Copyright (C) 2007-2008 Semihalf, Rafal Jaworowski 5d1d3233eSRafal Jaworowski * Copyright (C) 2006-2007 Semihalf, Piotr Kruszynski 667196661SRafal Jaworowski * All rights reserved. 767196661SRafal Jaworowski * 867196661SRafal Jaworowski * Redistribution and use in source and binary forms, with or without 967196661SRafal Jaworowski * modification, are permitted provided that the following conditions 1067196661SRafal Jaworowski * are met: 1167196661SRafal Jaworowski * 1. Redistributions of source code must retain the above copyright 1267196661SRafal Jaworowski * notice, this list of conditions and the following disclaimer. 1367196661SRafal Jaworowski * 2. Redistributions in binary form must reproduce the above copyright 1467196661SRafal Jaworowski * notice, this list of conditions and the following disclaimer in the 1567196661SRafal Jaworowski * documentation and/or other materials provided with the distribution. 1667196661SRafal Jaworowski * 1767196661SRafal Jaworowski * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 1867196661SRafal Jaworowski * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 1967196661SRafal Jaworowski * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 2067196661SRafal Jaworowski * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 2167196661SRafal Jaworowski * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 2267196661SRafal Jaworowski * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 2367196661SRafal Jaworowski * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 2467196661SRafal Jaworowski * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 2567196661SRafal Jaworowski * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 2667196661SRafal Jaworowski * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 2767196661SRafal Jaworowski */ 2867196661SRafal Jaworowski 2967196661SRafal Jaworowski /* 3067196661SRafal Jaworowski * Freescale integrated Three-Speed Ethernet Controller (TSEC) driver. 3167196661SRafal Jaworowski */ 3267196661SRafal Jaworowski #include <sys/cdefs.h> 3367196661SRafal Jaworowski __FBSDID("$FreeBSD$"); 3467196661SRafal Jaworowski 35bd37530eSRafal Jaworowski #ifdef HAVE_KERNEL_OPTION_HEADERS 36bd37530eSRafal Jaworowski #include "opt_device_polling.h" 37bd37530eSRafal Jaworowski #endif 38bd37530eSRafal Jaworowski 3967196661SRafal Jaworowski #include <sys/param.h> 4067196661SRafal Jaworowski #include <sys/systm.h> 41321e12c8SRafal Jaworowski #include <sys/bus.h> 4267196661SRafal Jaworowski #include <sys/endian.h> 4367196661SRafal Jaworowski #include <sys/mbuf.h> 4467196661SRafal Jaworowski #include <sys/kernel.h> 4567196661SRafal Jaworowski #include <sys/module.h> 4667196661SRafal Jaworowski #include <sys/socket.h> 47321e12c8SRafal Jaworowski #include <sys/sockio.h> 4867196661SRafal Jaworowski #include <sys/sysctl.h> 4967196661SRafal Jaworowski 50321e12c8SRafal Jaworowski #include <net/bpf.h> 51321e12c8SRafal Jaworowski #include <net/ethernet.h> 5267196661SRafal Jaworowski #include <net/if.h> 5376039bc8SGleb Smirnoff #include <net/if_var.h> 54321e12c8SRafal Jaworowski #include <net/if_arp.h> 5567196661SRafal Jaworowski #include <net/if_dl.h> 5667196661SRafal Jaworowski #include <net/if_media.h> 5767196661SRafal Jaworowski #include <net/if_types.h> 5867196661SRafal Jaworowski #include <net/if_vlan_var.h> 5967196661SRafal Jaworowski 60bd37530eSRafal Jaworowski #include <netinet/in_systm.h> 61bd37530eSRafal Jaworowski #include <netinet/in.h> 62bd37530eSRafal Jaworowski #include <netinet/ip.h> 63bd37530eSRafal Jaworowski 64321e12c8SRafal Jaworowski #include <machine/bus.h> 65321e12c8SRafal Jaworowski 6667196661SRafal Jaworowski #include <dev/mii/mii.h> 6767196661SRafal Jaworowski #include <dev/mii/miivar.h> 6867196661SRafal Jaworowski 6967196661SRafal Jaworowski #include <dev/tsec/if_tsec.h> 7067196661SRafal Jaworowski #include <dev/tsec/if_tsecreg.h> 7167196661SRafal Jaworowski 72321e12c8SRafal Jaworowski static int tsec_alloc_dma_desc(device_t dev, bus_dma_tag_t *dtag, 73321e12c8SRafal Jaworowski bus_dmamap_t *dmap, bus_size_t dsize, void **vaddr, void *raddr, 74321e12c8SRafal Jaworowski const char *dname); 7567196661SRafal Jaworowski static void tsec_dma_ctl(struct tsec_softc *sc, int state); 762c0dbbcbSJustin Hibbits static void tsec_encap(struct ifnet *ifp, struct tsec_softc *sc, 772c0dbbcbSJustin Hibbits struct mbuf *m0, uint16_t fcb_flags, int *start_tx); 78321e12c8SRafal Jaworowski static void tsec_free_dma(struct tsec_softc *sc); 79321e12c8SRafal Jaworowski static void tsec_free_dma_desc(bus_dma_tag_t dtag, bus_dmamap_t dmap, void *vaddr); 8067196661SRafal Jaworowski static int tsec_ifmedia_upd(struct ifnet *ifp); 8167196661SRafal Jaworowski static void tsec_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr); 8267196661SRafal Jaworowski static int tsec_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, 8367196661SRafal Jaworowski struct mbuf **mbufp, uint32_t *paddr); 8467196661SRafal Jaworowski static void tsec_map_dma_addr(void *arg, bus_dma_segment_t *segs, 8567196661SRafal Jaworowski int nseg, int error); 86321e12c8SRafal Jaworowski static void tsec_intrs_ctl(struct tsec_softc *sc, int state); 87321e12c8SRafal Jaworowski static void tsec_init(void *xsc); 88321e12c8SRafal Jaworowski static void tsec_init_locked(struct tsec_softc *sc); 89321e12c8SRafal Jaworowski static int tsec_ioctl(struct ifnet *ifp, u_long command, caddr_t data); 90321e12c8SRafal Jaworowski static void tsec_reset_mac(struct tsec_softc *sc); 91321e12c8SRafal Jaworowski static void tsec_setfilter(struct tsec_softc *sc); 92321e12c8SRafal Jaworowski static void tsec_set_mac_address(struct tsec_softc *sc); 93321e12c8SRafal Jaworowski static void tsec_start(struct ifnet *ifp); 94321e12c8SRafal Jaworowski static void tsec_start_locked(struct ifnet *ifp); 9567196661SRafal Jaworowski static void tsec_stop(struct tsec_softc *sc); 9667196661SRafal Jaworowski static void tsec_tick(void *arg); 97321e12c8SRafal Jaworowski static void tsec_watchdog(struct tsec_softc *sc); 98bd37530eSRafal Jaworowski static void tsec_add_sysctls(struct tsec_softc *sc); 99bd37530eSRafal Jaworowski static int tsec_sysctl_ic_time(SYSCTL_HANDLER_ARGS); 100bd37530eSRafal Jaworowski static int tsec_sysctl_ic_count(SYSCTL_HANDLER_ARGS); 101bd37530eSRafal Jaworowski static void tsec_set_rxic(struct tsec_softc *sc); 102bd37530eSRafal Jaworowski static void tsec_set_txic(struct tsec_softc *sc); 1031abcdbd1SAttilio Rao static int tsec_receive_intr_locked(struct tsec_softc *sc, int count); 104bd37530eSRafal Jaworowski static void tsec_transmit_intr_locked(struct tsec_softc *sc); 105bd37530eSRafal Jaworowski static void tsec_error_intr_locked(struct tsec_softc *sc, int count); 106bd37530eSRafal Jaworowski static void tsec_offload_setup(struct tsec_softc *sc); 107bd37530eSRafal Jaworowski static void tsec_offload_process_frame(struct tsec_softc *sc, 108bd37530eSRafal Jaworowski struct mbuf *m); 109bd37530eSRafal Jaworowski static void tsec_setup_multicast(struct tsec_softc *sc); 110bd37530eSRafal Jaworowski static int tsec_set_mtu(struct tsec_softc *sc, unsigned int mtu); 11167196661SRafal Jaworowski 112321e12c8SRafal Jaworowski devclass_t tsec_devclass; 11367196661SRafal Jaworowski DRIVER_MODULE(miibus, tsec, miibus_driver, miibus_devclass, 0, 0); 11467196661SRafal Jaworowski MODULE_DEPEND(tsec, ether, 1, 1, 1); 11567196661SRafal Jaworowski MODULE_DEPEND(tsec, miibus, 1, 1, 1); 11667196661SRafal Jaworowski 117629aa519SNathan Whitehorn struct mtx tsec_phy_mtx; 118629aa519SNathan Whitehorn 119321e12c8SRafal Jaworowski int 120321e12c8SRafal Jaworowski tsec_attach(struct tsec_softc *sc) 12167196661SRafal Jaworowski { 122321e12c8SRafal Jaworowski uint8_t hwaddr[ETHER_ADDR_LEN]; 123321e12c8SRafal Jaworowski struct ifnet *ifp; 124321e12c8SRafal Jaworowski int error = 0; 125ecb1ab17SRafal Jaworowski int i; 12667196661SRafal Jaworowski 127629aa519SNathan Whitehorn /* Initialize global (because potentially shared) MII lock */ 128629aa519SNathan Whitehorn if (!mtx_initialized(&tsec_phy_mtx)) 129629aa519SNathan Whitehorn mtx_init(&tsec_phy_mtx, "tsec mii", NULL, MTX_DEF); 130629aa519SNathan Whitehorn 131321e12c8SRafal Jaworowski /* Reset all TSEC counters */ 132321e12c8SRafal Jaworowski TSEC_TX_RX_COUNTERS_INIT(sc); 133321e12c8SRafal Jaworowski 134321e12c8SRafal Jaworowski /* Stop DMA engine if enabled by firmware */ 135321e12c8SRafal Jaworowski tsec_dma_ctl(sc, 0); 136321e12c8SRafal Jaworowski 137321e12c8SRafal Jaworowski /* Reset MAC */ 138321e12c8SRafal Jaworowski tsec_reset_mac(sc); 139321e12c8SRafal Jaworowski 140321e12c8SRafal Jaworowski /* Disable interrupts for now */ 141321e12c8SRafal Jaworowski tsec_intrs_ctl(sc, 0); 142321e12c8SRafal Jaworowski 143bd37530eSRafal Jaworowski /* Configure defaults for interrupts coalescing */ 144bd37530eSRafal Jaworowski sc->rx_ic_time = 768; 145bd37530eSRafal Jaworowski sc->rx_ic_count = 16; 146bd37530eSRafal Jaworowski sc->tx_ic_time = 768; 147bd37530eSRafal Jaworowski sc->tx_ic_count = 16; 148bd37530eSRafal Jaworowski tsec_set_rxic(sc); 149bd37530eSRafal Jaworowski tsec_set_txic(sc); 150bd37530eSRafal Jaworowski tsec_add_sysctls(sc); 151bd37530eSRafal Jaworowski 152321e12c8SRafal Jaworowski /* Allocate a busdma tag and DMA safe memory for TX descriptors. */ 153bd37530eSRafal Jaworowski error = tsec_alloc_dma_desc(sc->dev, &sc->tsec_tx_dtag, 154bd37530eSRafal Jaworowski &sc->tsec_tx_dmap, sizeof(*sc->tsec_tx_vaddr) * TSEC_TX_NUM_DESC, 155321e12c8SRafal Jaworowski (void **)&sc->tsec_tx_vaddr, &sc->tsec_tx_raddr, "TX"); 156bd37530eSRafal Jaworowski 157321e12c8SRafal Jaworowski if (error) { 158321e12c8SRafal Jaworowski tsec_detach(sc); 159321e12c8SRafal Jaworowski return (ENXIO); 160ecb1ab17SRafal Jaworowski } 161ecb1ab17SRafal Jaworowski 162321e12c8SRafal Jaworowski /* Allocate a busdma tag and DMA safe memory for RX descriptors. */ 163bd37530eSRafal Jaworowski error = tsec_alloc_dma_desc(sc->dev, &sc->tsec_rx_dtag, 164bd37530eSRafal Jaworowski &sc->tsec_rx_dmap, sizeof(*sc->tsec_rx_vaddr) * TSEC_RX_NUM_DESC, 165321e12c8SRafal Jaworowski (void **)&sc->tsec_rx_vaddr, &sc->tsec_rx_raddr, "RX"); 166321e12c8SRafal Jaworowski if (error) { 167321e12c8SRafal Jaworowski tsec_detach(sc); 168321e12c8SRafal Jaworowski return (ENXIO); 169321e12c8SRafal Jaworowski } 17067196661SRafal Jaworowski 171321e12c8SRafal Jaworowski /* Allocate a busdma tag for TX mbufs. */ 172321e12c8SRafal Jaworowski error = bus_dma_tag_create(NULL, /* parent */ 173321e12c8SRafal Jaworowski TSEC_TXBUFFER_ALIGNMENT, 0, /* alignment, boundary */ 174321e12c8SRafal Jaworowski BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 175321e12c8SRafal Jaworowski BUS_SPACE_MAXADDR, /* highaddr */ 176321e12c8SRafal Jaworowski NULL, NULL, /* filtfunc, filtfuncarg */ 177321e12c8SRafal Jaworowski MCLBYTES * (TSEC_TX_NUM_DESC - 1), /* maxsize */ 1782c0dbbcbSJustin Hibbits TSEC_TX_MAX_DMA_SEGS, /* nsegments */ 179321e12c8SRafal Jaworowski MCLBYTES, 0, /* maxsegsz, flags */ 180321e12c8SRafal Jaworowski NULL, NULL, /* lockfunc, lockfuncarg */ 181321e12c8SRafal Jaworowski &sc->tsec_tx_mtag); /* dmat */ 182321e12c8SRafal Jaworowski if (error) { 18364f90c9dSRafal Jaworowski device_printf(sc->dev, "failed to allocate busdma tag " 18464f90c9dSRafal Jaworowski "(tx mbufs)\n"); 185321e12c8SRafal Jaworowski tsec_detach(sc); 186321e12c8SRafal Jaworowski return (ENXIO); 187321e12c8SRafal Jaworowski } 188321e12c8SRafal Jaworowski 189321e12c8SRafal Jaworowski /* Allocate a busdma tag for RX mbufs. */ 190321e12c8SRafal Jaworowski error = bus_dma_tag_create(NULL, /* parent */ 191321e12c8SRafal Jaworowski TSEC_RXBUFFER_ALIGNMENT, 0, /* alignment, boundary */ 192321e12c8SRafal Jaworowski BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 193321e12c8SRafal Jaworowski BUS_SPACE_MAXADDR, /* highaddr */ 194321e12c8SRafal Jaworowski NULL, NULL, /* filtfunc, filtfuncarg */ 195321e12c8SRafal Jaworowski MCLBYTES, /* maxsize */ 196321e12c8SRafal Jaworowski 1, /* nsegments */ 197321e12c8SRafal Jaworowski MCLBYTES, 0, /* maxsegsz, flags */ 198321e12c8SRafal Jaworowski NULL, NULL, /* lockfunc, lockfuncarg */ 199321e12c8SRafal Jaworowski &sc->tsec_rx_mtag); /* dmat */ 200321e12c8SRafal Jaworowski if (error) { 20164f90c9dSRafal Jaworowski device_printf(sc->dev, "failed to allocate busdma tag " 20264f90c9dSRafal Jaworowski "(rx mbufs)\n"); 203321e12c8SRafal Jaworowski tsec_detach(sc); 204321e12c8SRafal Jaworowski return (ENXIO); 205321e12c8SRafal Jaworowski } 206321e12c8SRafal Jaworowski 207321e12c8SRafal Jaworowski /* Create TX busdma maps */ 208321e12c8SRafal Jaworowski for (i = 0; i < TSEC_TX_NUM_DESC; i++) { 2092c0dbbcbSJustin Hibbits error = bus_dmamap_create(sc->tsec_tx_mtag, 0, 2102c0dbbcbSJustin Hibbits &sc->tx_bufmap[i].map); 211321e12c8SRafal Jaworowski if (error) { 212321e12c8SRafal Jaworowski device_printf(sc->dev, "failed to init TX ring\n"); 213321e12c8SRafal Jaworowski tsec_detach(sc); 214321e12c8SRafal Jaworowski return (ENXIO); 215321e12c8SRafal Jaworowski } 2162c0dbbcbSJustin Hibbits sc->tx_bufmap[i].map_initialized = 1; 217321e12c8SRafal Jaworowski } 218321e12c8SRafal Jaworowski 219321e12c8SRafal Jaworowski /* Create RX busdma maps and zero mbuf handlers */ 220321e12c8SRafal Jaworowski for (i = 0; i < TSEC_RX_NUM_DESC; i++) { 22164f90c9dSRafal Jaworowski error = bus_dmamap_create(sc->tsec_rx_mtag, 0, 22264f90c9dSRafal Jaworowski &sc->rx_data[i].map); 223321e12c8SRafal Jaworowski if (error) { 224321e12c8SRafal Jaworowski device_printf(sc->dev, "failed to init RX ring\n"); 225321e12c8SRafal Jaworowski tsec_detach(sc); 226321e12c8SRafal Jaworowski return (ENXIO); 227321e12c8SRafal Jaworowski } 228321e12c8SRafal Jaworowski sc->rx_data[i].mbuf = NULL; 229321e12c8SRafal Jaworowski } 230321e12c8SRafal Jaworowski 231321e12c8SRafal Jaworowski /* Create mbufs for RX buffers */ 232321e12c8SRafal Jaworowski for (i = 0; i < TSEC_RX_NUM_DESC; i++) { 233321e12c8SRafal Jaworowski error = tsec_new_rxbuf(sc->tsec_rx_mtag, sc->rx_data[i].map, 234321e12c8SRafal Jaworowski &sc->rx_data[i].mbuf, &sc->rx_data[i].paddr); 235321e12c8SRafal Jaworowski if (error) { 23664f90c9dSRafal Jaworowski device_printf(sc->dev, "can't load rx DMA map %d, " 23764f90c9dSRafal Jaworowski "error = %d\n", i, error); 238321e12c8SRafal Jaworowski tsec_detach(sc); 239321e12c8SRafal Jaworowski return (error); 240321e12c8SRafal Jaworowski } 241321e12c8SRafal Jaworowski } 242321e12c8SRafal Jaworowski 243321e12c8SRafal Jaworowski /* Create network interface for upper layers */ 244321e12c8SRafal Jaworowski ifp = sc->tsec_ifp = if_alloc(IFT_ETHER); 245321e12c8SRafal Jaworowski if (ifp == NULL) { 246321e12c8SRafal Jaworowski device_printf(sc->dev, "if_alloc() failed\n"); 247321e12c8SRafal Jaworowski tsec_detach(sc); 248321e12c8SRafal Jaworowski return (ENOMEM); 249321e12c8SRafal Jaworowski } 250321e12c8SRafal Jaworowski 251321e12c8SRafal Jaworowski ifp->if_softc = sc; 252321e12c8SRafal Jaworowski if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev)); 253bd37530eSRafal Jaworowski ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST; 254321e12c8SRafal Jaworowski ifp->if_init = tsec_init; 255321e12c8SRafal Jaworowski ifp->if_start = tsec_start; 256321e12c8SRafal Jaworowski ifp->if_ioctl = tsec_ioctl; 257321e12c8SRafal Jaworowski 258321e12c8SRafal Jaworowski IFQ_SET_MAXLEN(&ifp->if_snd, TSEC_TX_NUM_DESC - 1); 259321e12c8SRafal Jaworowski ifp->if_snd.ifq_drv_maxlen = TSEC_TX_NUM_DESC - 1; 260321e12c8SRafal Jaworowski IFQ_SET_READY(&ifp->if_snd); 261321e12c8SRafal Jaworowski 262bd37530eSRafal Jaworowski ifp->if_capabilities = IFCAP_VLAN_MTU; 263bd37530eSRafal Jaworowski if (sc->is_etsec) 264bd37530eSRafal Jaworowski ifp->if_capabilities |= IFCAP_HWCSUM; 265bd37530eSRafal Jaworowski 266321e12c8SRafal Jaworowski ifp->if_capenable = ifp->if_capabilities; 267321e12c8SRafal Jaworowski 268bd37530eSRafal Jaworowski #ifdef DEVICE_POLLING 269bd37530eSRafal Jaworowski /* Advertise that polling is supported */ 270bd37530eSRafal Jaworowski ifp->if_capabilities |= IFCAP_POLLING; 271bd37530eSRafal Jaworowski #endif 272bd37530eSRafal Jaworowski 2738e5d93dbSMarius Strobl /* Attach PHY(s) */ 2748e5d93dbSMarius Strobl error = mii_attach(sc->dev, &sc->tsec_miibus, ifp, tsec_ifmedia_upd, 2758e5d93dbSMarius Strobl tsec_ifmedia_sts, BMSR_DEFCAPMASK, sc->phyaddr, MII_OFFSET_ANY, 2768e5d93dbSMarius Strobl 0); 277321e12c8SRafal Jaworowski if (error) { 2788e5d93dbSMarius Strobl device_printf(sc->dev, "attaching PHYs failed\n"); 279321e12c8SRafal Jaworowski if_free(ifp); 280321e12c8SRafal Jaworowski sc->tsec_ifp = NULL; 281321e12c8SRafal Jaworowski tsec_detach(sc); 282321e12c8SRafal Jaworowski return (error); 283321e12c8SRafal Jaworowski } 284321e12c8SRafal Jaworowski sc->tsec_mii = device_get_softc(sc->tsec_miibus); 285321e12c8SRafal Jaworowski 286321e12c8SRafal Jaworowski /* Set MAC address */ 287321e12c8SRafal Jaworowski tsec_get_hwaddr(sc, hwaddr); 288321e12c8SRafal Jaworowski ether_ifattach(ifp, hwaddr); 289321e12c8SRafal Jaworowski 290321e12c8SRafal Jaworowski return (0); 291321e12c8SRafal Jaworowski } 292321e12c8SRafal Jaworowski 293321e12c8SRafal Jaworowski int 294321e12c8SRafal Jaworowski tsec_detach(struct tsec_softc *sc) 295321e12c8SRafal Jaworowski { 296321e12c8SRafal Jaworowski 29733518175SAndrew Thompson if (sc->tsec_ifp != NULL) { 298bd37530eSRafal Jaworowski #ifdef DEVICE_POLLING 299bd37530eSRafal Jaworowski if (sc->tsec_ifp->if_capenable & IFCAP_POLLING) 300bd37530eSRafal Jaworowski ether_poll_deregister(sc->tsec_ifp); 301bd37530eSRafal Jaworowski #endif 302bd37530eSRafal Jaworowski 303321e12c8SRafal Jaworowski /* Stop TSEC controller and free TX queue */ 30433518175SAndrew Thompson if (sc->sc_rres) 305321e12c8SRafal Jaworowski tsec_shutdown(sc->dev); 306321e12c8SRafal Jaworowski 307321e12c8SRafal Jaworowski /* Detach network interface */ 308321e12c8SRafal Jaworowski ether_ifdetach(sc->tsec_ifp); 309321e12c8SRafal Jaworowski if_free(sc->tsec_ifp); 310321e12c8SRafal Jaworowski sc->tsec_ifp = NULL; 311321e12c8SRafal Jaworowski } 312321e12c8SRafal Jaworowski 313321e12c8SRafal Jaworowski /* Free DMA resources */ 314321e12c8SRafal Jaworowski tsec_free_dma(sc); 315321e12c8SRafal Jaworowski 316321e12c8SRafal Jaworowski return (0); 317321e12c8SRafal Jaworowski } 318321e12c8SRafal Jaworowski 319661ee6eeSRafal Jaworowski int 320321e12c8SRafal Jaworowski tsec_shutdown(device_t dev) 321321e12c8SRafal Jaworowski { 322321e12c8SRafal Jaworowski struct tsec_softc *sc; 323321e12c8SRafal Jaworowski 324321e12c8SRafal Jaworowski sc = device_get_softc(dev); 325321e12c8SRafal Jaworowski 326321e12c8SRafal Jaworowski TSEC_GLOBAL_LOCK(sc); 327321e12c8SRafal Jaworowski tsec_stop(sc); 328321e12c8SRafal Jaworowski TSEC_GLOBAL_UNLOCK(sc); 329661ee6eeSRafal Jaworowski return (0); 330321e12c8SRafal Jaworowski } 331321e12c8SRafal Jaworowski 332321e12c8SRafal Jaworowski int 333321e12c8SRafal Jaworowski tsec_suspend(device_t dev) 334321e12c8SRafal Jaworowski { 335321e12c8SRafal Jaworowski 336321e12c8SRafal Jaworowski /* TODO not implemented! */ 337321e12c8SRafal Jaworowski return (0); 338321e12c8SRafal Jaworowski } 339321e12c8SRafal Jaworowski 340321e12c8SRafal Jaworowski int 341321e12c8SRafal Jaworowski tsec_resume(device_t dev) 342321e12c8SRafal Jaworowski { 343321e12c8SRafal Jaworowski 344321e12c8SRafal Jaworowski /* TODO not implemented! */ 345321e12c8SRafal Jaworowski return (0); 34667196661SRafal Jaworowski } 34767196661SRafal Jaworowski 34867196661SRafal Jaworowski static void 34967196661SRafal Jaworowski tsec_init(void *xsc) 35067196661SRafal Jaworowski { 35167196661SRafal Jaworowski struct tsec_softc *sc = xsc; 35267196661SRafal Jaworowski 35367196661SRafal Jaworowski TSEC_GLOBAL_LOCK(sc); 35467196661SRafal Jaworowski tsec_init_locked(sc); 35567196661SRafal Jaworowski TSEC_GLOBAL_UNLOCK(sc); 35667196661SRafal Jaworowski } 35767196661SRafal Jaworowski 35888011b59SJustin Hibbits static int 35988011b59SJustin Hibbits tsec_mii_wait(struct tsec_softc *sc, uint32_t flags) 36088011b59SJustin Hibbits { 36188011b59SJustin Hibbits int timeout; 36288011b59SJustin Hibbits 36388011b59SJustin Hibbits /* 36488011b59SJustin Hibbits * The status indicators are not set immediatly after a command. 36588011b59SJustin Hibbits * Discard the first value. 36688011b59SJustin Hibbits */ 36788011b59SJustin Hibbits TSEC_PHY_READ(sc, TSEC_REG_MIIMIND); 36888011b59SJustin Hibbits 36988011b59SJustin Hibbits timeout = TSEC_READ_RETRY; 37088011b59SJustin Hibbits while ((TSEC_PHY_READ(sc, TSEC_REG_MIIMIND) & flags) && --timeout) 37188011b59SJustin Hibbits DELAY(TSEC_READ_DELAY); 37288011b59SJustin Hibbits 37388011b59SJustin Hibbits return (timeout == 0); 37488011b59SJustin Hibbits } 37588011b59SJustin Hibbits 37688011b59SJustin Hibbits 37767196661SRafal Jaworowski static void 37867196661SRafal Jaworowski tsec_init_locked(struct tsec_softc *sc) 37967196661SRafal Jaworowski { 38067196661SRafal Jaworowski struct tsec_desc *tx_desc = sc->tsec_tx_vaddr; 38167196661SRafal Jaworowski struct tsec_desc *rx_desc = sc->tsec_rx_vaddr; 38267196661SRafal Jaworowski struct ifnet *ifp = sc->tsec_ifp; 38388011b59SJustin Hibbits uint32_t val, i; 38488011b59SJustin Hibbits int timeout; 38567196661SRafal Jaworowski 386afceeed7SAndrew Thompson if (ifp->if_drv_flags & IFF_DRV_RUNNING) 387afceeed7SAndrew Thompson return; 388afceeed7SAndrew Thompson 38967196661SRafal Jaworowski TSEC_GLOBAL_LOCK_ASSERT(sc); 39067196661SRafal Jaworowski tsec_stop(sc); 39167196661SRafal Jaworowski 39267196661SRafal Jaworowski /* 39367196661SRafal Jaworowski * These steps are according to the MPC8555E PowerQUICCIII RM: 39467196661SRafal Jaworowski * 14.7 Initialization/Application Information 39567196661SRafal Jaworowski */ 39667196661SRafal Jaworowski 39767196661SRafal Jaworowski /* Step 1: soft reset MAC */ 39867196661SRafal Jaworowski tsec_reset_mac(sc); 39967196661SRafal Jaworowski 40067196661SRafal Jaworowski /* Step 2: Initialize MACCFG2 */ 40167196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MACCFG2, 40267196661SRafal Jaworowski TSEC_MACCFG2_FULLDUPLEX | /* Full Duplex = 1 */ 40367196661SRafal Jaworowski TSEC_MACCFG2_PADCRC | /* PAD/CRC append */ 40467196661SRafal Jaworowski TSEC_MACCFG2_GMII | /* I/F Mode bit */ 40567196661SRafal Jaworowski TSEC_MACCFG2_PRECNT /* Preamble count = 7 */ 40667196661SRafal Jaworowski ); 40767196661SRafal Jaworowski 40867196661SRafal Jaworowski /* Step 3: Initialize ECNTRL 40967196661SRafal Jaworowski * While the documentation states that R100M is ignored if RPM is 41067196661SRafal Jaworowski * not set, it does seem to be needed to get the orange boxes to 41167196661SRafal Jaworowski * work (which have a Marvell 88E1111 PHY). Go figure. 41267196661SRafal Jaworowski */ 41367196661SRafal Jaworowski 41467196661SRafal Jaworowski /* 41567196661SRafal Jaworowski * XXX kludge - use circumstancial evidence to program ECNTRL 41667196661SRafal Jaworowski * correctly. Ideally we need some board information to guide 41767196661SRafal Jaworowski * us here. 41867196661SRafal Jaworowski */ 41967196661SRafal Jaworowski i = TSEC_READ(sc, TSEC_REG_ID2); 42067196661SRafal Jaworowski val = (i & 0xffff) 42167196661SRafal Jaworowski ? (TSEC_ECNTRL_TBIM | TSEC_ECNTRL_SGMIIM) /* Sumatra */ 42267196661SRafal Jaworowski : TSEC_ECNTRL_R100M; /* Orange + CDS */ 42367196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_ECNTRL, TSEC_ECNTRL_STEN | val); 42467196661SRafal Jaworowski 42567196661SRafal Jaworowski /* Step 4: Initialize MAC station address */ 42667196661SRafal Jaworowski tsec_set_mac_address(sc); 42767196661SRafal Jaworowski 42867196661SRafal Jaworowski /* 42967196661SRafal Jaworowski * Step 5: Assign a Physical address to the TBI so as to not conflict 43067196661SRafal Jaworowski * with the external PHY physical address 43167196661SRafal Jaworowski */ 43267196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_TBIPA, 5); 43367196661SRafal Jaworowski 434629aa519SNathan Whitehorn TSEC_PHY_LOCK(sc); 435629aa519SNathan Whitehorn 43667196661SRafal Jaworowski /* Step 6: Reset the management interface */ 437629aa519SNathan Whitehorn TSEC_PHY_WRITE(sc, TSEC_REG_MIIMCFG, TSEC_MIIMCFG_RESETMGMT); 43867196661SRafal Jaworowski 43967196661SRafal Jaworowski /* Step 7: Setup the MII Mgmt clock speed */ 440629aa519SNathan Whitehorn TSEC_PHY_WRITE(sc, TSEC_REG_MIIMCFG, TSEC_MIIMCFG_CLKDIV28); 44167196661SRafal Jaworowski 44267196661SRafal Jaworowski /* Step 8: Read MII Mgmt indicator register and check for Busy = 0 */ 44388011b59SJustin Hibbits timeout = tsec_mii_wait(sc, TSEC_MIIMIND_BUSY); 44488011b59SJustin Hibbits 44588011b59SJustin Hibbits TSEC_PHY_UNLOCK(sc); 44688011b59SJustin Hibbits if (timeout) { 44767196661SRafal Jaworowski if_printf(ifp, "tsec_init_locked(): Mgmt busy timeout\n"); 44867196661SRafal Jaworowski return; 44967196661SRafal Jaworowski } 45067196661SRafal Jaworowski 45167196661SRafal Jaworowski /* Step 9: Setup the MII Mgmt */ 45267196661SRafal Jaworowski mii_mediachg(sc->tsec_mii); 45367196661SRafal Jaworowski 45467196661SRafal Jaworowski /* Step 10: Clear IEVENT register */ 45567196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_IEVENT, 0xffffffff); 45667196661SRafal Jaworowski 457bd37530eSRafal Jaworowski /* Step 11: Enable interrupts */ 458bd37530eSRafal Jaworowski #ifdef DEVICE_POLLING 459bd37530eSRafal Jaworowski /* 460bd37530eSRafal Jaworowski * ...only if polling is not turned on. Disable interrupts explicitly 461bd37530eSRafal Jaworowski * if polling is enabled. 462bd37530eSRafal Jaworowski */ 463bd37530eSRafal Jaworowski if (ifp->if_capenable & IFCAP_POLLING ) 464bd37530eSRafal Jaworowski tsec_intrs_ctl(sc, 0); 465bd37530eSRafal Jaworowski else 466bd37530eSRafal Jaworowski #endif /* DEVICE_POLLING */ 46767196661SRafal Jaworowski tsec_intrs_ctl(sc, 1); 46867196661SRafal Jaworowski 46967196661SRafal Jaworowski /* Step 12: Initialize IADDRn */ 47067196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_IADDR0, 0); 47167196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_IADDR1, 0); 47267196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_IADDR2, 0); 47367196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_IADDR3, 0); 47467196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_IADDR4, 0); 47567196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_IADDR5, 0); 47667196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_IADDR6, 0); 47767196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_IADDR7, 0); 47867196661SRafal Jaworowski 47967196661SRafal Jaworowski /* Step 13: Initialize GADDRn */ 48067196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_GADDR0, 0); 48167196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_GADDR1, 0); 48267196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_GADDR2, 0); 48367196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_GADDR3, 0); 48467196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_GADDR4, 0); 48567196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_GADDR5, 0); 48667196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_GADDR6, 0); 48767196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_GADDR7, 0); 48867196661SRafal Jaworowski 48967196661SRafal Jaworowski /* Step 14: Initialize RCTRL */ 49067196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_RCTRL, 0); 49167196661SRafal Jaworowski 49267196661SRafal Jaworowski /* Step 15: Initialize DMACTRL */ 49367196661SRafal Jaworowski tsec_dma_ctl(sc, 1); 49467196661SRafal Jaworowski 49567196661SRafal Jaworowski /* Step 16: Initialize FIFO_PAUSE_CTRL */ 49667196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_FIFO_PAUSE_CTRL, TSEC_FIFO_PAUSE_CTRL_EN); 49767196661SRafal Jaworowski 49867196661SRafal Jaworowski /* 49967196661SRafal Jaworowski * Step 17: Initialize transmit/receive descriptor rings. 50067196661SRafal Jaworowski * Initialize TBASE and RBASE. 50167196661SRafal Jaworowski */ 50267196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_TBASE, sc->tsec_tx_raddr); 50367196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_RBASE, sc->tsec_rx_raddr); 50467196661SRafal Jaworowski 50567196661SRafal Jaworowski for (i = 0; i < TSEC_TX_NUM_DESC; i++) { 50667196661SRafal Jaworowski tx_desc[i].bufptr = 0; 50767196661SRafal Jaworowski tx_desc[i].length = 0; 50864f90c9dSRafal Jaworowski tx_desc[i].flags = ((i == TSEC_TX_NUM_DESC - 1) ? 50964f90c9dSRafal Jaworowski TSEC_TXBD_W : 0); 51067196661SRafal Jaworowski } 511321e12c8SRafal Jaworowski bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap, 512321e12c8SRafal Jaworowski BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 51367196661SRafal Jaworowski 51467196661SRafal Jaworowski for (i = 0; i < TSEC_RX_NUM_DESC; i++) { 51567196661SRafal Jaworowski rx_desc[i].bufptr = sc->rx_data[i].paddr; 51667196661SRafal Jaworowski rx_desc[i].length = 0; 51767196661SRafal Jaworowski rx_desc[i].flags = TSEC_RXBD_E | TSEC_RXBD_I | 51867196661SRafal Jaworowski ((i == TSEC_RX_NUM_DESC - 1) ? TSEC_RXBD_W : 0); 51967196661SRafal Jaworowski } 520bd37530eSRafal Jaworowski bus_dmamap_sync(sc->tsec_rx_dtag, sc->tsec_rx_dmap, 521bd37530eSRafal Jaworowski BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 52267196661SRafal Jaworowski 523bd37530eSRafal Jaworowski /* Step 18: Initialize the maximum receive buffer length */ 524bd37530eSRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MRBLR, MCLBYTES); 52567196661SRafal Jaworowski 526bd37530eSRafal Jaworowski /* Step 19: Configure ethernet frame sizes */ 527bd37530eSRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MINFLR, TSEC_MIN_FRAME_SIZE); 528bd37530eSRafal Jaworowski tsec_set_mtu(sc, ifp->if_mtu); 529bd37530eSRafal Jaworowski 530bd37530eSRafal Jaworowski /* Step 20: Enable Rx and RxBD sdata snooping */ 53167196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_ATTR, TSEC_ATTR_RDSEN | TSEC_ATTR_RBDSEN); 53267196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_ATTRELI, 0); 53367196661SRafal Jaworowski 534bd37530eSRafal Jaworowski /* Step 21: Reset collision counters in hardware */ 53567196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MON_TSCL, 0); 53667196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MON_TMCL, 0); 53767196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MON_TLCL, 0); 53867196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MON_TXCL, 0); 53967196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MON_TNCL, 0); 54067196661SRafal Jaworowski 541bd37530eSRafal Jaworowski /* Step 22: Mask all CAM interrupts */ 54267196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MON_CAM1, 0xffffffff); 54367196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MON_CAM2, 0xffffffff); 54467196661SRafal Jaworowski 545bd37530eSRafal Jaworowski /* Step 23: Enable Rx and Tx */ 54667196661SRafal Jaworowski val = TSEC_READ(sc, TSEC_REG_MACCFG1); 54767196661SRafal Jaworowski val |= (TSEC_MACCFG1_RX_EN | TSEC_MACCFG1_TX_EN); 54867196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MACCFG1, val); 54967196661SRafal Jaworowski 550bd37530eSRafal Jaworowski /* Step 24: Reset TSEC counters for Tx and Rx rings */ 55167196661SRafal Jaworowski TSEC_TX_RX_COUNTERS_INIT(sc); 55267196661SRafal Jaworowski 553bd37530eSRafal Jaworowski /* Step 25: Setup TCP/IP Off-Load engine */ 554bd37530eSRafal Jaworowski if (sc->is_etsec) 555bd37530eSRafal Jaworowski tsec_offload_setup(sc); 556bd37530eSRafal Jaworowski 557bd37530eSRafal Jaworowski /* Step 26: Setup multicast filters */ 558bd37530eSRafal Jaworowski tsec_setup_multicast(sc); 559bd37530eSRafal Jaworowski 560bd37530eSRafal Jaworowski /* Step 27: Activate network interface */ 56167196661SRafal Jaworowski ifp->if_drv_flags |= IFF_DRV_RUNNING; 56267196661SRafal Jaworowski ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 56367196661SRafal Jaworowski sc->tsec_if_flags = ifp->if_flags; 5645432bd9fSRafal Jaworowski sc->tsec_watchdog = 0; 565772619e1SRafal Jaworowski 566772619e1SRafal Jaworowski /* Schedule watchdog timeout */ 5675432bd9fSRafal Jaworowski callout_reset(&sc->tsec_callout, hz, tsec_tick, sc); 56867196661SRafal Jaworowski } 56967196661SRafal Jaworowski 57067196661SRafal Jaworowski static void 57167196661SRafal Jaworowski tsec_set_mac_address(struct tsec_softc *sc) 57267196661SRafal Jaworowski { 57367196661SRafal Jaworowski uint32_t macbuf[2] = { 0, 0 }; 57464f90c9dSRafal Jaworowski char *macbufp, *curmac; 575321e12c8SRafal Jaworowski int i; 57667196661SRafal Jaworowski 57767196661SRafal Jaworowski TSEC_GLOBAL_LOCK_ASSERT(sc); 57867196661SRafal Jaworowski 57967196661SRafal Jaworowski KASSERT((ETHER_ADDR_LEN <= sizeof(macbuf)), 58038f004fbSJustin Hibbits ("tsec_set_mac_address: (%d <= %zd", ETHER_ADDR_LEN, 58164f90c9dSRafal Jaworowski sizeof(macbuf))); 58267196661SRafal Jaworowski 58367196661SRafal Jaworowski macbufp = (char *)macbuf; 58467196661SRafal Jaworowski curmac = (char *)IF_LLADDR(sc->tsec_ifp); 58567196661SRafal Jaworowski 58667196661SRafal Jaworowski /* Correct order of MAC address bytes */ 58767196661SRafal Jaworowski for (i = 1; i <= ETHER_ADDR_LEN; i++) 58867196661SRafal Jaworowski macbufp[ETHER_ADDR_LEN-i] = curmac[i-1]; 58967196661SRafal Jaworowski 59067196661SRafal Jaworowski /* Initialize MAC station address MACSTNADDR2 and MACSTNADDR1 */ 59167196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MACSTNADDR2, macbuf[1]); 59267196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MACSTNADDR1, macbuf[0]); 59367196661SRafal Jaworowski } 59467196661SRafal Jaworowski 59567196661SRafal Jaworowski /* 59667196661SRafal Jaworowski * DMA control function, if argument state is: 59767196661SRafal Jaworowski * 0 - DMA engine will be disabled 59867196661SRafal Jaworowski * 1 - DMA engine will be enabled 59967196661SRafal Jaworowski */ 60067196661SRafal Jaworowski static void 60167196661SRafal Jaworowski tsec_dma_ctl(struct tsec_softc *sc, int state) 60267196661SRafal Jaworowski { 60367196661SRafal Jaworowski device_t dev; 60464f90c9dSRafal Jaworowski uint32_t dma_flags, timeout; 60567196661SRafal Jaworowski 60667196661SRafal Jaworowski dev = sc->dev; 60767196661SRafal Jaworowski 60867196661SRafal Jaworowski dma_flags = TSEC_READ(sc, TSEC_REG_DMACTRL); 60967196661SRafal Jaworowski 61067196661SRafal Jaworowski switch (state) { 61167196661SRafal Jaworowski case 0: 61267196661SRafal Jaworowski /* Temporarily clear stop graceful stop bits. */ 61367196661SRafal Jaworowski tsec_dma_ctl(sc, 1000); 61467196661SRafal Jaworowski 61567196661SRafal Jaworowski /* Set it again */ 61667196661SRafal Jaworowski dma_flags |= (TSEC_DMACTRL_GRS | TSEC_DMACTRL_GTS); 61767196661SRafal Jaworowski break; 61867196661SRafal Jaworowski case 1000: 61967196661SRafal Jaworowski case 1: 62067196661SRafal Jaworowski /* Set write with response (WWR), wait (WOP) and snoop bits */ 62167196661SRafal Jaworowski dma_flags |= (TSEC_DMACTRL_TDSEN | TSEC_DMACTRL_TBDSEN | 62267196661SRafal Jaworowski DMACTRL_WWR | DMACTRL_WOP); 62367196661SRafal Jaworowski 62467196661SRafal Jaworowski /* Clear graceful stop bits */ 62567196661SRafal Jaworowski dma_flags &= ~(TSEC_DMACTRL_GRS | TSEC_DMACTRL_GTS); 62667196661SRafal Jaworowski break; 62767196661SRafal Jaworowski default: 62867196661SRafal Jaworowski device_printf(dev, "tsec_dma_ctl(): unknown state value: %d\n", 62967196661SRafal Jaworowski state); 63067196661SRafal Jaworowski } 63167196661SRafal Jaworowski 63267196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_DMACTRL, dma_flags); 63367196661SRafal Jaworowski 63467196661SRafal Jaworowski switch (state) { 63567196661SRafal Jaworowski case 0: 63667196661SRafal Jaworowski /* Wait for DMA stop */ 63767196661SRafal Jaworowski timeout = TSEC_READ_RETRY; 63867196661SRafal Jaworowski while (--timeout && (!(TSEC_READ(sc, TSEC_REG_IEVENT) & 63967196661SRafal Jaworowski (TSEC_IEVENT_GRSC | TSEC_IEVENT_GTSC)))) 64067196661SRafal Jaworowski DELAY(TSEC_READ_DELAY); 64167196661SRafal Jaworowski 64267196661SRafal Jaworowski if (timeout == 0) 64367196661SRafal Jaworowski device_printf(dev, "tsec_dma_ctl(): timeout!\n"); 64467196661SRafal Jaworowski break; 64567196661SRafal Jaworowski case 1: 64667196661SRafal Jaworowski /* Restart transmission function */ 64767196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_TSTAT, TSEC_TSTAT_THLT); 64867196661SRafal Jaworowski } 64967196661SRafal Jaworowski } 65067196661SRafal Jaworowski 65167196661SRafal Jaworowski /* 65267196661SRafal Jaworowski * Interrupts control function, if argument state is: 65367196661SRafal Jaworowski * 0 - all TSEC interrupts will be masked 65467196661SRafal Jaworowski * 1 - all TSEC interrupts will be unmasked 65567196661SRafal Jaworowski */ 65667196661SRafal Jaworowski static void 65767196661SRafal Jaworowski tsec_intrs_ctl(struct tsec_softc *sc, int state) 65867196661SRafal Jaworowski { 65967196661SRafal Jaworowski device_t dev; 66067196661SRafal Jaworowski 66167196661SRafal Jaworowski dev = sc->dev; 66267196661SRafal Jaworowski 66367196661SRafal Jaworowski switch (state) { 66467196661SRafal Jaworowski case 0: 66567196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_IMASK, 0); 66667196661SRafal Jaworowski break; 66767196661SRafal Jaworowski case 1: 66864f90c9dSRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_IMASK, TSEC_IMASK_BREN | 66964f90c9dSRafal Jaworowski TSEC_IMASK_RXCEN | TSEC_IMASK_BSYEN | TSEC_IMASK_EBERREN | 67064f90c9dSRafal Jaworowski TSEC_IMASK_BTEN | TSEC_IMASK_TXEEN | TSEC_IMASK_TXBEN | 67164f90c9dSRafal Jaworowski TSEC_IMASK_TXFEN | TSEC_IMASK_XFUNEN | TSEC_IMASK_RXFEN); 67267196661SRafal Jaworowski break; 67367196661SRafal Jaworowski default: 67467196661SRafal Jaworowski device_printf(dev, "tsec_intrs_ctl(): unknown state value: %d\n", 67567196661SRafal Jaworowski state); 67667196661SRafal Jaworowski } 67767196661SRafal Jaworowski } 67867196661SRafal Jaworowski 67967196661SRafal Jaworowski static void 68067196661SRafal Jaworowski tsec_reset_mac(struct tsec_softc *sc) 68167196661SRafal Jaworowski { 68267196661SRafal Jaworowski uint32_t maccfg1_flags; 68367196661SRafal Jaworowski 68467196661SRafal Jaworowski /* Set soft reset bit */ 68567196661SRafal Jaworowski maccfg1_flags = TSEC_READ(sc, TSEC_REG_MACCFG1); 68667196661SRafal Jaworowski maccfg1_flags |= TSEC_MACCFG1_SOFT_RESET; 68767196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MACCFG1, maccfg1_flags); 68867196661SRafal Jaworowski 68967196661SRafal Jaworowski /* Clear soft reset bit */ 69067196661SRafal Jaworowski maccfg1_flags = TSEC_READ(sc, TSEC_REG_MACCFG1); 69167196661SRafal Jaworowski maccfg1_flags &= ~TSEC_MACCFG1_SOFT_RESET; 69267196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MACCFG1, maccfg1_flags); 69367196661SRafal Jaworowski } 69467196661SRafal Jaworowski 69567196661SRafal Jaworowski static void 696772619e1SRafal Jaworowski tsec_watchdog(struct tsec_softc *sc) 69767196661SRafal Jaworowski { 698772619e1SRafal Jaworowski struct ifnet *ifp; 69967196661SRafal Jaworowski 700772619e1SRafal Jaworowski TSEC_GLOBAL_LOCK_ASSERT(sc); 70167196661SRafal Jaworowski 7025432bd9fSRafal Jaworowski if (sc->tsec_watchdog == 0 || --sc->tsec_watchdog > 0) 703772619e1SRafal Jaworowski return; 704772619e1SRafal Jaworowski 705772619e1SRafal Jaworowski ifp = sc->tsec_ifp; 706c8dfaf38SGleb Smirnoff if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 70767196661SRafal Jaworowski if_printf(ifp, "watchdog timeout\n"); 70867196661SRafal Jaworowski 70967196661SRafal Jaworowski tsec_stop(sc); 71067196661SRafal Jaworowski tsec_init_locked(sc); 71167196661SRafal Jaworowski } 71267196661SRafal Jaworowski 71367196661SRafal Jaworowski static void 71467196661SRafal Jaworowski tsec_start(struct ifnet *ifp) 71567196661SRafal Jaworowski { 71667196661SRafal Jaworowski struct tsec_softc *sc = ifp->if_softc; 71767196661SRafal Jaworowski 71867196661SRafal Jaworowski TSEC_TRANSMIT_LOCK(sc); 71967196661SRafal Jaworowski tsec_start_locked(ifp); 72067196661SRafal Jaworowski TSEC_TRANSMIT_UNLOCK(sc); 72167196661SRafal Jaworowski } 72267196661SRafal Jaworowski 72367196661SRafal Jaworowski static void 72467196661SRafal Jaworowski tsec_start_locked(struct ifnet *ifp) 72567196661SRafal Jaworowski { 72667196661SRafal Jaworowski struct tsec_softc *sc; 7272c0dbbcbSJustin Hibbits struct mbuf *m0; 728bd37530eSRafal Jaworowski struct tsec_tx_fcb *tx_fcb; 7292c0dbbcbSJustin Hibbits int csum_flags; 7302c0dbbcbSJustin Hibbits int start_tx; 7312c0dbbcbSJustin Hibbits uint16_t fcb_flags; 73267196661SRafal Jaworowski 73367196661SRafal Jaworowski sc = ifp->if_softc; 7342c0dbbcbSJustin Hibbits start_tx = 0; 73567196661SRafal Jaworowski 73667196661SRafal Jaworowski TSEC_TRANSMIT_LOCK_ASSERT(sc); 73767196661SRafal Jaworowski 73867196661SRafal Jaworowski if (sc->tsec_link == 0) 73967196661SRafal Jaworowski return; 74067196661SRafal Jaworowski 74164f90c9dSRafal Jaworowski bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap, 74264f90c9dSRafal Jaworowski BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 74367196661SRafal Jaworowski 7442c0dbbcbSJustin Hibbits for (;;) { 7452c0dbbcbSJustin Hibbits 7462c0dbbcbSJustin Hibbits if (TSEC_FREE_TX_DESC(sc) < TSEC_TX_MAX_DMA_SEGS) { 7472c0dbbcbSJustin Hibbits /* No free descriptors */ 7482c0dbbcbSJustin Hibbits ifp->if_drv_flags |= IFF_DRV_OACTIVE; 7492c0dbbcbSJustin Hibbits break; 7502c0dbbcbSJustin Hibbits } 7512c0dbbcbSJustin Hibbits 75267196661SRafal Jaworowski /* Get packet from the queue */ 75304311706SRafal Jaworowski IFQ_DRV_DEQUEUE(&ifp->if_snd, m0); 75467196661SRafal Jaworowski if (m0 == NULL) 75567196661SRafal Jaworowski break; 75667196661SRafal Jaworowski 757bd37530eSRafal Jaworowski /* Insert TCP/IP Off-load frame control block */ 7582c0dbbcbSJustin Hibbits fcb_flags = 0; 759bd37530eSRafal Jaworowski csum_flags = m0->m_pkthdr.csum_flags; 760bd37530eSRafal Jaworowski if (csum_flags) { 761c6499eccSGleb Smirnoff M_PREPEND(m0, sizeof(struct tsec_tx_fcb), M_NOWAIT); 762bd37530eSRafal Jaworowski if (m0 == NULL) 763bd37530eSRafal Jaworowski break; 764bd37530eSRafal Jaworowski 765bd37530eSRafal Jaworowski if (csum_flags & CSUM_IP) 7662c0dbbcbSJustin Hibbits fcb_flags |= TSEC_TX_FCB_IP4 | 767bd37530eSRafal Jaworowski TSEC_TX_FCB_CSUM_IP; 768bd37530eSRafal Jaworowski 769bd37530eSRafal Jaworowski if (csum_flags & CSUM_TCP) 7702c0dbbcbSJustin Hibbits fcb_flags |= TSEC_TX_FCB_TCP | 771bd37530eSRafal Jaworowski TSEC_TX_FCB_CSUM_TCP_UDP; 772bd37530eSRafal Jaworowski 773bd37530eSRafal Jaworowski if (csum_flags & CSUM_UDP) 7742c0dbbcbSJustin Hibbits fcb_flags |= TSEC_TX_FCB_UDP | 775bd37530eSRafal Jaworowski TSEC_TX_FCB_CSUM_TCP_UDP; 776bd37530eSRafal Jaworowski 7772c0dbbcbSJustin Hibbits tx_fcb = mtod(m0, struct tsec_tx_fcb *); 7782c0dbbcbSJustin Hibbits tx_fcb->flags = fcb_flags; 7792c0dbbcbSJustin Hibbits tx_fcb->l3_offset = ETHER_HDR_LEN; 7802c0dbbcbSJustin Hibbits tx_fcb->l4_offset = sizeof(struct ip); 781bd37530eSRafal Jaworowski } 782bd37530eSRafal Jaworowski 7832c0dbbcbSJustin Hibbits tsec_encap(ifp, sc, m0, fcb_flags, &start_tx); 78467196661SRafal Jaworowski } 78564f90c9dSRafal Jaworowski bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap, 78664f90c9dSRafal Jaworowski BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 78767196661SRafal Jaworowski 7882c0dbbcbSJustin Hibbits if (start_tx) { 78967196661SRafal Jaworowski /* Enable transmitter and watchdog timer */ 79067196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_TSTAT, TSEC_TSTAT_THLT); 7915432bd9fSRafal Jaworowski sc->tsec_watchdog = 5; 79267196661SRafal Jaworowski } 79367196661SRafal Jaworowski } 79467196661SRafal Jaworowski 7952c0dbbcbSJustin Hibbits static void 7962c0dbbcbSJustin Hibbits tsec_encap(struct ifnet *ifp, struct tsec_softc *sc, struct mbuf *m0, 7972c0dbbcbSJustin Hibbits uint16_t fcb_flags, int *start_tx) 79867196661SRafal Jaworowski { 7992c0dbbcbSJustin Hibbits bus_dma_segment_t segs[TSEC_TX_MAX_DMA_SEGS]; 8002c0dbbcbSJustin Hibbits int error, i, nsegs; 8012c0dbbcbSJustin Hibbits struct tsec_bufmap *tx_bufmap; 8022c0dbbcbSJustin Hibbits uint32_t tx_idx; 8032c0dbbcbSJustin Hibbits uint16_t flags; 80467196661SRafal Jaworowski 80567196661SRafal Jaworowski TSEC_TRANSMIT_LOCK_ASSERT(sc); 80667196661SRafal Jaworowski 8072c0dbbcbSJustin Hibbits tx_idx = sc->tx_idx_head; 8082c0dbbcbSJustin Hibbits tx_bufmap = &sc->tx_bufmap[tx_idx]; 80967196661SRafal Jaworowski 81067196661SRafal Jaworowski /* Create mapping in DMA memory */ 8112c0dbbcbSJustin Hibbits error = bus_dmamap_load_mbuf_sg(sc->tsec_tx_mtag, tx_bufmap->map, m0, 8122c0dbbcbSJustin Hibbits segs, &nsegs, BUS_DMA_NOWAIT); 8132c0dbbcbSJustin Hibbits if (error == EFBIG) { 8142c0dbbcbSJustin Hibbits /* Too many segments! Defrag and try again. */ 8152c0dbbcbSJustin Hibbits struct mbuf *m = m_defrag(m0, M_NOWAIT); 8162c0dbbcbSJustin Hibbits 8172c0dbbcbSJustin Hibbits if (m == NULL) { 8182c0dbbcbSJustin Hibbits m_freem(m0); 8192c0dbbcbSJustin Hibbits return; 82067196661SRafal Jaworowski } 8212c0dbbcbSJustin Hibbits m0 = m; 8222c0dbbcbSJustin Hibbits error = bus_dmamap_load_mbuf_sg(sc->tsec_tx_mtag, 8232c0dbbcbSJustin Hibbits tx_bufmap->map, m0, segs, &nsegs, BUS_DMA_NOWAIT); 8242c0dbbcbSJustin Hibbits } 8252c0dbbcbSJustin Hibbits if (error != 0) { 8262c0dbbcbSJustin Hibbits /* Give up. */ 8272c0dbbcbSJustin Hibbits m_freem(m0); 8282c0dbbcbSJustin Hibbits return; 8292c0dbbcbSJustin Hibbits } 83067196661SRafal Jaworowski 8312c0dbbcbSJustin Hibbits bus_dmamap_sync(sc->tsec_tx_mtag, tx_bufmap->map, 8322c0dbbcbSJustin Hibbits BUS_DMASYNC_PREWRITE); 8332c0dbbcbSJustin Hibbits tx_bufmap->mbuf = m0; 83467196661SRafal Jaworowski 8352c0dbbcbSJustin Hibbits /* 8362c0dbbcbSJustin Hibbits * Fill in the TX descriptors back to front so that READY bit in first 8372c0dbbcbSJustin Hibbits * descriptor is set last. 8382c0dbbcbSJustin Hibbits */ 8392c0dbbcbSJustin Hibbits tx_idx = (tx_idx + (uint32_t)nsegs) & (TSEC_TX_NUM_DESC - 1); 8402c0dbbcbSJustin Hibbits sc->tx_idx_head = tx_idx; 8412c0dbbcbSJustin Hibbits flags = TSEC_TXBD_L | TSEC_TXBD_I | TSEC_TXBD_R | TSEC_TXBD_TC; 8422c0dbbcbSJustin Hibbits for (i = nsegs - 1; i >= 0; i--) { 8432c0dbbcbSJustin Hibbits struct tsec_desc *tx_desc; 844bd37530eSRafal Jaworowski 8452c0dbbcbSJustin Hibbits tx_idx = (tx_idx - 1) & (TSEC_TX_NUM_DESC - 1); 8462c0dbbcbSJustin Hibbits tx_desc = &sc->tsec_tx_vaddr[tx_idx]; 8472c0dbbcbSJustin Hibbits tx_desc->length = segs[i].ds_len; 8482c0dbbcbSJustin Hibbits tx_desc->bufptr = segs[i].ds_addr; 84967196661SRafal Jaworowski 8502c0dbbcbSJustin Hibbits if (i == 0) { 8512c0dbbcbSJustin Hibbits wmb(); 8522c0dbbcbSJustin Hibbits 8532c0dbbcbSJustin Hibbits if (fcb_flags != 0) 8542c0dbbcbSJustin Hibbits flags |= TSEC_TXBD_TOE; 8552c0dbbcbSJustin Hibbits } 85667196661SRafal Jaworowski 857bd37530eSRafal Jaworowski /* 858bd37530eSRafal Jaworowski * Set flags: 859bd37530eSRafal Jaworowski * - wrap 860bd37530eSRafal Jaworowski * - checksum 861bd37530eSRafal Jaworowski * - ready to send 862bd37530eSRafal Jaworowski * - transmit the CRC sequence after the last data byte 863bd37530eSRafal Jaworowski * - interrupt after the last buffer 864bd37530eSRafal Jaworowski */ 8652c0dbbcbSJustin Hibbits tx_desc->flags = (tx_idx == (TSEC_TX_NUM_DESC - 1) ? 8662c0dbbcbSJustin Hibbits TSEC_TXBD_W : 0) | flags; 8672c0dbbcbSJustin Hibbits 8682c0dbbcbSJustin Hibbits flags &= ~(TSEC_TXBD_L | TSEC_TXBD_I); 86967196661SRafal Jaworowski } 87067196661SRafal Jaworowski 8712c0dbbcbSJustin Hibbits BPF_MTAP(ifp, m0); 8722c0dbbcbSJustin Hibbits *start_tx = 1; 87367196661SRafal Jaworowski } 87467196661SRafal Jaworowski 87567196661SRafal Jaworowski static void 87667196661SRafal Jaworowski tsec_setfilter(struct tsec_softc *sc) 87767196661SRafal Jaworowski { 87867196661SRafal Jaworowski struct ifnet *ifp; 87967196661SRafal Jaworowski uint32_t flags; 88067196661SRafal Jaworowski 88167196661SRafal Jaworowski ifp = sc->tsec_ifp; 88267196661SRafal Jaworowski flags = TSEC_READ(sc, TSEC_REG_RCTRL); 88367196661SRafal Jaworowski 88467196661SRafal Jaworowski /* Promiscuous mode */ 88567196661SRafal Jaworowski if (ifp->if_flags & IFF_PROMISC) 88667196661SRafal Jaworowski flags |= TSEC_RCTRL_PROM; 88767196661SRafal Jaworowski else 88867196661SRafal Jaworowski flags &= ~TSEC_RCTRL_PROM; 88967196661SRafal Jaworowski 89067196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_RCTRL, flags); 89167196661SRafal Jaworowski } 89267196661SRafal Jaworowski 893bd37530eSRafal Jaworowski #ifdef DEVICE_POLLING 894bd37530eSRafal Jaworowski static poll_handler_t tsec_poll; 895bd37530eSRafal Jaworowski 8961abcdbd1SAttilio Rao static int 897bd37530eSRafal Jaworowski tsec_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 898bd37530eSRafal Jaworowski { 899bd37530eSRafal Jaworowski uint32_t ie; 900bd37530eSRafal Jaworowski struct tsec_softc *sc = ifp->if_softc; 9011abcdbd1SAttilio Rao int rx_npkts; 9021abcdbd1SAttilio Rao 9031abcdbd1SAttilio Rao rx_npkts = 0; 904bd37530eSRafal Jaworowski 905bd37530eSRafal Jaworowski TSEC_GLOBAL_LOCK(sc); 906bd37530eSRafal Jaworowski if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 907bd37530eSRafal Jaworowski TSEC_GLOBAL_UNLOCK(sc); 9081abcdbd1SAttilio Rao return (rx_npkts); 909bd37530eSRafal Jaworowski } 910bd37530eSRafal Jaworowski 911bd37530eSRafal Jaworowski if (cmd == POLL_AND_CHECK_STATUS) { 9120390701aSRafal Jaworowski tsec_error_intr_locked(sc, count); 913bd37530eSRafal Jaworowski 914bd37530eSRafal Jaworowski /* Clear all events reported */ 9150390701aSRafal Jaworowski ie = TSEC_READ(sc, TSEC_REG_IEVENT); 916bd37530eSRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_IEVENT, ie); 917bd37530eSRafal Jaworowski } 918bd37530eSRafal Jaworowski 919bd37530eSRafal Jaworowski tsec_transmit_intr_locked(sc); 920bd37530eSRafal Jaworowski 921bd37530eSRafal Jaworowski TSEC_GLOBAL_TO_RECEIVE_LOCK(sc); 922bd37530eSRafal Jaworowski 9231abcdbd1SAttilio Rao rx_npkts = tsec_receive_intr_locked(sc, count); 924bd37530eSRafal Jaworowski 925bd37530eSRafal Jaworowski TSEC_RECEIVE_UNLOCK(sc); 9261abcdbd1SAttilio Rao 9271abcdbd1SAttilio Rao return (rx_npkts); 928bd37530eSRafal Jaworowski } 929bd37530eSRafal Jaworowski #endif /* DEVICE_POLLING */ 930bd37530eSRafal Jaworowski 93167196661SRafal Jaworowski static int 93267196661SRafal Jaworowski tsec_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 93367196661SRafal Jaworowski { 93467196661SRafal Jaworowski struct tsec_softc *sc = ifp->if_softc; 93567196661SRafal Jaworowski struct ifreq *ifr = (struct ifreq *)data; 936bd37530eSRafal Jaworowski int mask, error = 0; 93767196661SRafal Jaworowski 93867196661SRafal Jaworowski switch (command) { 939bd37530eSRafal Jaworowski case SIOCSIFMTU: 940bd37530eSRafal Jaworowski TSEC_GLOBAL_LOCK(sc); 941bd37530eSRafal Jaworowski if (tsec_set_mtu(sc, ifr->ifr_mtu)) 942bd37530eSRafal Jaworowski ifp->if_mtu = ifr->ifr_mtu; 943bd37530eSRafal Jaworowski else 944bd37530eSRafal Jaworowski error = EINVAL; 945bd37530eSRafal Jaworowski TSEC_GLOBAL_UNLOCK(sc); 946bd37530eSRafal Jaworowski break; 94767196661SRafal Jaworowski case SIOCSIFFLAGS: 94867196661SRafal Jaworowski TSEC_GLOBAL_LOCK(sc); 94967196661SRafal Jaworowski if (ifp->if_flags & IFF_UP) { 95067196661SRafal Jaworowski if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 951bd37530eSRafal Jaworowski if ((sc->tsec_if_flags ^ ifp->if_flags) & 952bd37530eSRafal Jaworowski IFF_PROMISC) 95367196661SRafal Jaworowski tsec_setfilter(sc); 954bd37530eSRafal Jaworowski 955bd37530eSRafal Jaworowski if ((sc->tsec_if_flags ^ ifp->if_flags) & 956bd37530eSRafal Jaworowski IFF_ALLMULTI) 957bd37530eSRafal Jaworowski tsec_setup_multicast(sc); 95867196661SRafal Jaworowski } else 95967196661SRafal Jaworowski tsec_init_locked(sc); 960321e12c8SRafal Jaworowski } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) 96167196661SRafal Jaworowski tsec_stop(sc); 962321e12c8SRafal Jaworowski 96367196661SRafal Jaworowski sc->tsec_if_flags = ifp->if_flags; 96467196661SRafal Jaworowski TSEC_GLOBAL_UNLOCK(sc); 96567196661SRafal Jaworowski break; 966bd37530eSRafal Jaworowski case SIOCADDMULTI: 967bd37530eSRafal Jaworowski case SIOCDELMULTI: 968bd37530eSRafal Jaworowski if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 969bd37530eSRafal Jaworowski TSEC_GLOBAL_LOCK(sc); 970bd37530eSRafal Jaworowski tsec_setup_multicast(sc); 971bd37530eSRafal Jaworowski TSEC_GLOBAL_UNLOCK(sc); 972bd37530eSRafal Jaworowski } 97367196661SRafal Jaworowski case SIOCGIFMEDIA: 97467196661SRafal Jaworowski case SIOCSIFMEDIA: 975bd37530eSRafal Jaworowski error = ifmedia_ioctl(ifp, ifr, &sc->tsec_mii->mii_media, 976bd37530eSRafal Jaworowski command); 97767196661SRafal Jaworowski break; 978bd37530eSRafal Jaworowski case SIOCSIFCAP: 979bd37530eSRafal Jaworowski mask = ifp->if_capenable ^ ifr->ifr_reqcap; 980bd37530eSRafal Jaworowski if ((mask & IFCAP_HWCSUM) && sc->is_etsec) { 981bd37530eSRafal Jaworowski TSEC_GLOBAL_LOCK(sc); 982bd37530eSRafal Jaworowski ifp->if_capenable &= ~IFCAP_HWCSUM; 983bd37530eSRafal Jaworowski ifp->if_capenable |= IFCAP_HWCSUM & ifr->ifr_reqcap; 984bd37530eSRafal Jaworowski tsec_offload_setup(sc); 985bd37530eSRafal Jaworowski TSEC_GLOBAL_UNLOCK(sc); 986bd37530eSRafal Jaworowski } 987bd37530eSRafal Jaworowski #ifdef DEVICE_POLLING 988bd37530eSRafal Jaworowski if (mask & IFCAP_POLLING) { 989bd37530eSRafal Jaworowski if (ifr->ifr_reqcap & IFCAP_POLLING) { 990bd37530eSRafal Jaworowski error = ether_poll_register(tsec_poll, ifp); 991bd37530eSRafal Jaworowski if (error) 992bd37530eSRafal Jaworowski return (error); 993bd37530eSRafal Jaworowski 994bd37530eSRafal Jaworowski TSEC_GLOBAL_LOCK(sc); 995bd37530eSRafal Jaworowski /* Disable interrupts */ 996bd37530eSRafal Jaworowski tsec_intrs_ctl(sc, 0); 997bd37530eSRafal Jaworowski ifp->if_capenable |= IFCAP_POLLING; 998bd37530eSRafal Jaworowski TSEC_GLOBAL_UNLOCK(sc); 999bd37530eSRafal Jaworowski } else { 1000bd37530eSRafal Jaworowski error = ether_poll_deregister(ifp); 1001bd37530eSRafal Jaworowski TSEC_GLOBAL_LOCK(sc); 1002bd37530eSRafal Jaworowski /* Enable interrupts */ 1003bd37530eSRafal Jaworowski tsec_intrs_ctl(sc, 1); 1004bd37530eSRafal Jaworowski ifp->if_capenable &= ~IFCAP_POLLING; 1005bd37530eSRafal Jaworowski TSEC_GLOBAL_UNLOCK(sc); 1006bd37530eSRafal Jaworowski } 1007bd37530eSRafal Jaworowski } 1008bd37530eSRafal Jaworowski #endif 1009bd37530eSRafal Jaworowski break; 1010bd37530eSRafal Jaworowski 101167196661SRafal Jaworowski default: 101267196661SRafal Jaworowski error = ether_ioctl(ifp, command, data); 101367196661SRafal Jaworowski } 101467196661SRafal Jaworowski 101567196661SRafal Jaworowski /* Flush buffers if not empty */ 101667196661SRafal Jaworowski if (ifp->if_flags & IFF_UP) 101767196661SRafal Jaworowski tsec_start(ifp); 101867196661SRafal Jaworowski return (error); 101967196661SRafal Jaworowski } 102067196661SRafal Jaworowski 102167196661SRafal Jaworowski static int 102267196661SRafal Jaworowski tsec_ifmedia_upd(struct ifnet *ifp) 102367196661SRafal Jaworowski { 102467196661SRafal Jaworowski struct tsec_softc *sc = ifp->if_softc; 102567196661SRafal Jaworowski struct mii_data *mii; 102667196661SRafal Jaworowski 102767196661SRafal Jaworowski TSEC_TRANSMIT_LOCK(sc); 102867196661SRafal Jaworowski 102967196661SRafal Jaworowski mii = sc->tsec_mii; 103067196661SRafal Jaworowski mii_mediachg(mii); 103167196661SRafal Jaworowski 103267196661SRafal Jaworowski TSEC_TRANSMIT_UNLOCK(sc); 103367196661SRafal Jaworowski return (0); 103467196661SRafal Jaworowski } 103567196661SRafal Jaworowski 103667196661SRafal Jaworowski static void 103767196661SRafal Jaworowski tsec_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 103867196661SRafal Jaworowski { 103967196661SRafal Jaworowski struct tsec_softc *sc = ifp->if_softc; 104067196661SRafal Jaworowski struct mii_data *mii; 104167196661SRafal Jaworowski 104267196661SRafal Jaworowski TSEC_TRANSMIT_LOCK(sc); 104367196661SRafal Jaworowski 104467196661SRafal Jaworowski mii = sc->tsec_mii; 104567196661SRafal Jaworowski mii_pollstat(mii); 104667196661SRafal Jaworowski 104767196661SRafal Jaworowski ifmr->ifm_active = mii->mii_media_active; 104867196661SRafal Jaworowski ifmr->ifm_status = mii->mii_media_status; 104967196661SRafal Jaworowski 105067196661SRafal Jaworowski TSEC_TRANSMIT_UNLOCK(sc); 105167196661SRafal Jaworowski } 105267196661SRafal Jaworowski 105367196661SRafal Jaworowski static int 105467196661SRafal Jaworowski tsec_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **mbufp, 105567196661SRafal Jaworowski uint32_t *paddr) 105667196661SRafal Jaworowski { 105767196661SRafal Jaworowski struct mbuf *new_mbuf; 105867196661SRafal Jaworowski bus_dma_segment_t seg[1]; 1059bd37530eSRafal Jaworowski int error, nsegs; 106067196661SRafal Jaworowski 106167196661SRafal Jaworowski KASSERT(mbufp != NULL, ("NULL mbuf pointer!")); 106267196661SRafal Jaworowski 1063c6499eccSGleb Smirnoff new_mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MCLBYTES); 106467196661SRafal Jaworowski if (new_mbuf == NULL) 106567196661SRafal Jaworowski return (ENOBUFS); 106667196661SRafal Jaworowski new_mbuf->m_len = new_mbuf->m_pkthdr.len = new_mbuf->m_ext.ext_size; 106767196661SRafal Jaworowski 106867196661SRafal Jaworowski if (*mbufp) { 106967196661SRafal Jaworowski bus_dmamap_sync(tag, map, BUS_DMASYNC_POSTREAD); 107067196661SRafal Jaworowski bus_dmamap_unload(tag, map); 107167196661SRafal Jaworowski } 107267196661SRafal Jaworowski 107367196661SRafal Jaworowski error = bus_dmamap_load_mbuf_sg(tag, map, new_mbuf, seg, &nsegs, 107467196661SRafal Jaworowski BUS_DMA_NOWAIT); 107567196661SRafal Jaworowski KASSERT(nsegs == 1, ("Too many segments returned!")); 107667196661SRafal Jaworowski if (nsegs != 1 || error) 107767196661SRafal Jaworowski panic("tsec_new_rxbuf(): nsegs(%d), error(%d)", nsegs, error); 107867196661SRafal Jaworowski 107967196661SRafal Jaworowski #if 0 108067196661SRafal Jaworowski if (error) { 108167196661SRafal Jaworowski printf("tsec: bus_dmamap_load_mbuf_sg() returned: %d!\n", 108267196661SRafal Jaworowski error); 108367196661SRafal Jaworowski m_freem(new_mbuf); 108467196661SRafal Jaworowski return (ENOBUFS); 108567196661SRafal Jaworowski } 108667196661SRafal Jaworowski #endif 108767196661SRafal Jaworowski 108867196661SRafal Jaworowski #if 0 108967196661SRafal Jaworowski KASSERT(((seg->ds_addr) & (TSEC_RXBUFFER_ALIGNMENT-1)) == 0, 109067196661SRafal Jaworowski ("Wrong alignment of RX buffer!")); 109167196661SRafal Jaworowski #endif 109267196661SRafal Jaworowski bus_dmamap_sync(tag, map, BUS_DMASYNC_PREREAD); 109367196661SRafal Jaworowski 109467196661SRafal Jaworowski (*mbufp) = new_mbuf; 109567196661SRafal Jaworowski (*paddr) = seg->ds_addr; 109667196661SRafal Jaworowski return (0); 109767196661SRafal Jaworowski } 109867196661SRafal Jaworowski 109967196661SRafal Jaworowski static void 110067196661SRafal Jaworowski tsec_map_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 110167196661SRafal Jaworowski { 110267196661SRafal Jaworowski u_int32_t *paddr; 110367196661SRafal Jaworowski 110467196661SRafal Jaworowski KASSERT(nseg == 1, ("wrong number of segments, should be 1")); 110567196661SRafal Jaworowski paddr = arg; 110667196661SRafal Jaworowski *paddr = segs->ds_addr; 110767196661SRafal Jaworowski } 110867196661SRafal Jaworowski 110967196661SRafal Jaworowski static int 111067196661SRafal Jaworowski tsec_alloc_dma_desc(device_t dev, bus_dma_tag_t *dtag, bus_dmamap_t *dmap, 111167196661SRafal Jaworowski bus_size_t dsize, void **vaddr, void *raddr, const char *dname) 111267196661SRafal Jaworowski { 111367196661SRafal Jaworowski int error; 111467196661SRafal Jaworowski 111567196661SRafal Jaworowski /* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */ 111667196661SRafal Jaworowski error = bus_dma_tag_create(NULL, /* parent */ 111767196661SRafal Jaworowski PAGE_SIZE, 0, /* alignment, boundary */ 111867196661SRafal Jaworowski BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 111967196661SRafal Jaworowski BUS_SPACE_MAXADDR, /* highaddr */ 112067196661SRafal Jaworowski NULL, NULL, /* filtfunc, filtfuncarg */ 112167196661SRafal Jaworowski dsize, 1, /* maxsize, nsegments */ 112267196661SRafal Jaworowski dsize, 0, /* maxsegsz, flags */ 112367196661SRafal Jaworowski NULL, NULL, /* lockfunc, lockfuncarg */ 112467196661SRafal Jaworowski dtag); /* dmat */ 112567196661SRafal Jaworowski 112667196661SRafal Jaworowski if (error) { 112764f90c9dSRafal Jaworowski device_printf(dev, "failed to allocate busdma %s tag\n", 112864f90c9dSRafal Jaworowski dname); 112967196661SRafal Jaworowski (*vaddr) = NULL; 113067196661SRafal Jaworowski return (ENXIO); 113167196661SRafal Jaworowski } 113267196661SRafal Jaworowski 113367196661SRafal Jaworowski error = bus_dmamem_alloc(*dtag, vaddr, BUS_DMA_NOWAIT | BUS_DMA_ZERO, 113467196661SRafal Jaworowski dmap); 113567196661SRafal Jaworowski if (error) { 113667196661SRafal Jaworowski device_printf(dev, "failed to allocate %s DMA safe memory\n", 113767196661SRafal Jaworowski dname); 113867196661SRafal Jaworowski bus_dma_tag_destroy(*dtag); 113967196661SRafal Jaworowski (*vaddr) = NULL; 114067196661SRafal Jaworowski return (ENXIO); 114167196661SRafal Jaworowski } 114267196661SRafal Jaworowski 114364f90c9dSRafal Jaworowski error = bus_dmamap_load(*dtag, *dmap, *vaddr, dsize, 114464f90c9dSRafal Jaworowski tsec_map_dma_addr, raddr, BUS_DMA_NOWAIT); 114567196661SRafal Jaworowski if (error) { 114664f90c9dSRafal Jaworowski device_printf(dev, "cannot get address of the %s " 114764f90c9dSRafal Jaworowski "descriptors\n", dname); 114867196661SRafal Jaworowski bus_dmamem_free(*dtag, *vaddr, *dmap); 114967196661SRafal Jaworowski bus_dma_tag_destroy(*dtag); 115067196661SRafal Jaworowski (*vaddr) = NULL; 115167196661SRafal Jaworowski return (ENXIO); 115267196661SRafal Jaworowski } 115367196661SRafal Jaworowski 115467196661SRafal Jaworowski return (0); 115567196661SRafal Jaworowski } 115667196661SRafal Jaworowski 115767196661SRafal Jaworowski static void 115867196661SRafal Jaworowski tsec_free_dma_desc(bus_dma_tag_t dtag, bus_dmamap_t dmap, void *vaddr) 115967196661SRafal Jaworowski { 116067196661SRafal Jaworowski 116167196661SRafal Jaworowski if (vaddr == NULL) 116267196661SRafal Jaworowski return; 116367196661SRafal Jaworowski 116467196661SRafal Jaworowski /* Unmap descriptors from DMA memory */ 116564f90c9dSRafal Jaworowski bus_dmamap_sync(dtag, dmap, BUS_DMASYNC_POSTREAD | 116664f90c9dSRafal Jaworowski BUS_DMASYNC_POSTWRITE); 116767196661SRafal Jaworowski bus_dmamap_unload(dtag, dmap); 116867196661SRafal Jaworowski 116967196661SRafal Jaworowski /* Free descriptors memory */ 117067196661SRafal Jaworowski bus_dmamem_free(dtag, vaddr, dmap); 117167196661SRafal Jaworowski 117267196661SRafal Jaworowski /* Destroy descriptors tag */ 117367196661SRafal Jaworowski bus_dma_tag_destroy(dtag); 117467196661SRafal Jaworowski } 117567196661SRafal Jaworowski 117667196661SRafal Jaworowski static void 117767196661SRafal Jaworowski tsec_free_dma(struct tsec_softc *sc) 117867196661SRafal Jaworowski { 117967196661SRafal Jaworowski int i; 118067196661SRafal Jaworowski 118167196661SRafal Jaworowski /* Free TX maps */ 118267196661SRafal Jaworowski for (i = 0; i < TSEC_TX_NUM_DESC; i++) 11832c0dbbcbSJustin Hibbits if (sc->tx_bufmap[i].map_initialized) 118464f90c9dSRafal Jaworowski bus_dmamap_destroy(sc->tsec_tx_mtag, 11852c0dbbcbSJustin Hibbits sc->tx_bufmap[i].map); 118664f90c9dSRafal Jaworowski /* Destroy tag for TX mbufs */ 118767196661SRafal Jaworowski bus_dma_tag_destroy(sc->tsec_tx_mtag); 118867196661SRafal Jaworowski 118967196661SRafal Jaworowski /* Free RX mbufs and maps */ 119067196661SRafal Jaworowski for (i = 0; i < TSEC_RX_NUM_DESC; i++) { 119167196661SRafal Jaworowski if (sc->rx_data[i].mbuf) { 119267196661SRafal Jaworowski /* Unload buffer from DMA */ 119367196661SRafal Jaworowski bus_dmamap_sync(sc->tsec_rx_mtag, sc->rx_data[i].map, 119467196661SRafal Jaworowski BUS_DMASYNC_POSTREAD); 119564f90c9dSRafal Jaworowski bus_dmamap_unload(sc->tsec_rx_mtag, 119664f90c9dSRafal Jaworowski sc->rx_data[i].map); 119767196661SRafal Jaworowski 119867196661SRafal Jaworowski /* Free buffer */ 119967196661SRafal Jaworowski m_freem(sc->rx_data[i].mbuf); 120067196661SRafal Jaworowski } 120167196661SRafal Jaworowski /* Destroy map for this buffer */ 120267196661SRafal Jaworowski if (sc->rx_data[i].map != NULL) 120367196661SRafal Jaworowski bus_dmamap_destroy(sc->tsec_rx_mtag, 120467196661SRafal Jaworowski sc->rx_data[i].map); 120567196661SRafal Jaworowski } 120664f90c9dSRafal Jaworowski /* Destroy tag for RX mbufs */ 120767196661SRafal Jaworowski bus_dma_tag_destroy(sc->tsec_rx_mtag); 120867196661SRafal Jaworowski 120967196661SRafal Jaworowski /* Unload TX/RX descriptors */ 121067196661SRafal Jaworowski tsec_free_dma_desc(sc->tsec_tx_dtag, sc->tsec_tx_dmap, 121167196661SRafal Jaworowski sc->tsec_tx_vaddr); 121267196661SRafal Jaworowski tsec_free_dma_desc(sc->tsec_rx_dtag, sc->tsec_rx_dmap, 121367196661SRafal Jaworowski sc->tsec_rx_vaddr); 121467196661SRafal Jaworowski } 121567196661SRafal Jaworowski 121667196661SRafal Jaworowski static void 121767196661SRafal Jaworowski tsec_stop(struct tsec_softc *sc) 121867196661SRafal Jaworowski { 121967196661SRafal Jaworowski struct ifnet *ifp; 122067196661SRafal Jaworowski uint32_t tmpval; 122167196661SRafal Jaworowski 122267196661SRafal Jaworowski TSEC_GLOBAL_LOCK_ASSERT(sc); 122367196661SRafal Jaworowski 122467196661SRafal Jaworowski ifp = sc->tsec_ifp; 122567196661SRafal Jaworowski 122667196661SRafal Jaworowski /* Disable interface and watchdog timer */ 122764f90c9dSRafal Jaworowski callout_stop(&sc->tsec_callout); 122867196661SRafal Jaworowski ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 12295432bd9fSRafal Jaworowski sc->tsec_watchdog = 0; 123067196661SRafal Jaworowski 123167196661SRafal Jaworowski /* Disable all interrupts and stop DMA */ 123267196661SRafal Jaworowski tsec_intrs_ctl(sc, 0); 123367196661SRafal Jaworowski tsec_dma_ctl(sc, 0); 123467196661SRafal Jaworowski 123567196661SRafal Jaworowski /* Remove pending data from TX queue */ 12362c0dbbcbSJustin Hibbits while (sc->tx_idx_tail != sc->tx_idx_head) { 12372c0dbbcbSJustin Hibbits bus_dmamap_sync(sc->tsec_tx_mtag, 12382c0dbbcbSJustin Hibbits sc->tx_bufmap[sc->tx_idx_tail].map, 1239bd37530eSRafal Jaworowski BUS_DMASYNC_POSTWRITE); 12402c0dbbcbSJustin Hibbits bus_dmamap_unload(sc->tsec_tx_mtag, 12412c0dbbcbSJustin Hibbits sc->tx_bufmap[sc->tx_idx_tail].map); 12422c0dbbcbSJustin Hibbits m_freem(sc->tx_bufmap[sc->tx_idx_tail].mbuf); 12432c0dbbcbSJustin Hibbits sc->tx_idx_tail = (sc->tx_idx_tail + 1) 12442c0dbbcbSJustin Hibbits & (TSEC_TX_NUM_DESC - 1); 124567196661SRafal Jaworowski } 124667196661SRafal Jaworowski 1247bd37530eSRafal Jaworowski /* Disable RX and TX */ 124867196661SRafal Jaworowski tmpval = TSEC_READ(sc, TSEC_REG_MACCFG1); 124967196661SRafal Jaworowski tmpval &= ~(TSEC_MACCFG1_RX_EN | TSEC_MACCFG1_TX_EN); 125067196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MACCFG1, tmpval); 125167196661SRafal Jaworowski DELAY(10); 125267196661SRafal Jaworowski } 125367196661SRafal Jaworowski 1254bd37530eSRafal Jaworowski static void 1255bd37530eSRafal Jaworowski tsec_tick(void *arg) 125667196661SRafal Jaworowski { 125767196661SRafal Jaworowski struct tsec_softc *sc = arg; 1258bd37530eSRafal Jaworowski struct ifnet *ifp; 1259bd37530eSRafal Jaworowski int link; 1260bd37530eSRafal Jaworowski 1261bd37530eSRafal Jaworowski TSEC_GLOBAL_LOCK(sc); 1262bd37530eSRafal Jaworowski 1263bd37530eSRafal Jaworowski tsec_watchdog(sc); 1264bd37530eSRafal Jaworowski 1265bd37530eSRafal Jaworowski ifp = sc->tsec_ifp; 1266bd37530eSRafal Jaworowski link = sc->tsec_link; 1267bd37530eSRafal Jaworowski 1268bd37530eSRafal Jaworowski mii_tick(sc->tsec_mii); 1269bd37530eSRafal Jaworowski 1270bd37530eSRafal Jaworowski if (link == 0 && sc->tsec_link == 1 && 1271bd37530eSRafal Jaworowski (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))) 1272bd37530eSRafal Jaworowski tsec_start_locked(ifp); 1273bd37530eSRafal Jaworowski 1274bd37530eSRafal Jaworowski /* Schedule another timeout one second from now. */ 1275bd37530eSRafal Jaworowski callout_reset(&sc->tsec_callout, hz, tsec_tick, sc); 1276bd37530eSRafal Jaworowski 1277bd37530eSRafal Jaworowski TSEC_GLOBAL_UNLOCK(sc); 1278bd37530eSRafal Jaworowski } 1279bd37530eSRafal Jaworowski 1280bd37530eSRafal Jaworowski /* 1281bd37530eSRafal Jaworowski * This is the core RX routine. It replenishes mbufs in the descriptor and 1282bd37530eSRafal Jaworowski * sends data which have been dma'ed into host memory to upper layer. 1283bd37530eSRafal Jaworowski * 1284bd37530eSRafal Jaworowski * Loops at most count times if count is > 0, or until done if count < 0. 1285bd37530eSRafal Jaworowski */ 12861abcdbd1SAttilio Rao static int 1287bd37530eSRafal Jaworowski tsec_receive_intr_locked(struct tsec_softc *sc, int count) 1288bd37530eSRafal Jaworowski { 128967196661SRafal Jaworowski struct tsec_desc *rx_desc; 129067196661SRafal Jaworowski struct ifnet *ifp; 129167196661SRafal Jaworowski struct rx_data_type *rx_data; 129267196661SRafal Jaworowski struct mbuf *m; 129367196661SRafal Jaworowski uint32_t i; 12941abcdbd1SAttilio Rao int c, rx_npkts; 129567196661SRafal Jaworowski uint16_t flags; 1296bd37530eSRafal Jaworowski 1297bd37530eSRafal Jaworowski TSEC_RECEIVE_LOCK_ASSERT(sc); 129867196661SRafal Jaworowski 129967196661SRafal Jaworowski ifp = sc->tsec_ifp; 130067196661SRafal Jaworowski rx_data = sc->rx_data; 13011abcdbd1SAttilio Rao rx_npkts = 0; 130267196661SRafal Jaworowski 1303bd37530eSRafal Jaworowski bus_dmamap_sync(sc->tsec_rx_dtag, sc->tsec_rx_dmap, 1304bd37530eSRafal Jaworowski BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 130567196661SRafal Jaworowski 1306bd37530eSRafal Jaworowski for (c = 0; ; c++) { 1307bd37530eSRafal Jaworowski if (count >= 0 && count-- == 0) 1308bd37530eSRafal Jaworowski break; 130967196661SRafal Jaworowski 131067196661SRafal Jaworowski rx_desc = TSEC_GET_CUR_RX_DESC(sc); 131167196661SRafal Jaworowski flags = rx_desc->flags; 131267196661SRafal Jaworowski 131367196661SRafal Jaworowski /* Check if there is anything to receive */ 1314bd37530eSRafal Jaworowski if ((flags & TSEC_RXBD_E) || (c >= TSEC_RX_NUM_DESC)) { 131567196661SRafal Jaworowski /* 131667196661SRafal Jaworowski * Avoid generating another interrupt 131767196661SRafal Jaworowski */ 131867196661SRafal Jaworowski if (flags & TSEC_RXBD_E) 131967196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_IEVENT, 132067196661SRafal Jaworowski TSEC_IEVENT_RXB | TSEC_IEVENT_RXF); 132167196661SRafal Jaworowski /* 132267196661SRafal Jaworowski * We didn't consume current descriptor and have to 132367196661SRafal Jaworowski * return it to the queue 132467196661SRafal Jaworowski */ 132567196661SRafal Jaworowski TSEC_BACK_CUR_RX_DESC(sc); 132667196661SRafal Jaworowski break; 132767196661SRafal Jaworowski } 132867196661SRafal Jaworowski 132967196661SRafal Jaworowski if (flags & (TSEC_RXBD_LG | TSEC_RXBD_SH | TSEC_RXBD_NO | 133067196661SRafal Jaworowski TSEC_RXBD_CR | TSEC_RXBD_OV | TSEC_RXBD_TR)) { 1331321e12c8SRafal Jaworowski 133267196661SRafal Jaworowski rx_desc->length = 0; 1333bd37530eSRafal Jaworowski rx_desc->flags = (rx_desc->flags & 1334bd37530eSRafal Jaworowski ~TSEC_RXBD_ZEROONINIT) | TSEC_RXBD_E | TSEC_RXBD_I; 1335bd37530eSRafal Jaworowski 1336bd37530eSRafal Jaworowski if (sc->frame != NULL) { 1337bd37530eSRafal Jaworowski m_free(sc->frame); 1338bd37530eSRafal Jaworowski sc->frame = NULL; 1339bd37530eSRafal Jaworowski } 1340bd37530eSRafal Jaworowski 134167196661SRafal Jaworowski continue; 134267196661SRafal Jaworowski } 134367196661SRafal Jaworowski 134467196661SRafal Jaworowski /* Ok... process frame */ 134567196661SRafal Jaworowski i = TSEC_GET_CUR_RX_DESC_CNT(sc); 134667196661SRafal Jaworowski m = rx_data[i].mbuf; 1347bd37530eSRafal Jaworowski m->m_len = rx_desc->length; 1348bd37530eSRafal Jaworowski 1349bd37530eSRafal Jaworowski if (sc->frame != NULL) { 1350bd37530eSRafal Jaworowski if ((flags & TSEC_RXBD_L) != 0) 1351bd37530eSRafal Jaworowski m->m_len -= m_length(sc->frame, NULL); 1352bd37530eSRafal Jaworowski 1353bd37530eSRafal Jaworowski m->m_flags &= ~M_PKTHDR; 1354bd37530eSRafal Jaworowski m_cat(sc->frame, m); 1355bd37530eSRafal Jaworowski } else { 1356bd37530eSRafal Jaworowski sc->frame = m; 1357bd37530eSRafal Jaworowski } 1358bd37530eSRafal Jaworowski 1359bd37530eSRafal Jaworowski m = NULL; 1360bd37530eSRafal Jaworowski 1361bd37530eSRafal Jaworowski if ((flags & TSEC_RXBD_L) != 0) { 1362bd37530eSRafal Jaworowski m = sc->frame; 1363bd37530eSRafal Jaworowski sc->frame = NULL; 1364bd37530eSRafal Jaworowski } 136567196661SRafal Jaworowski 136667196661SRafal Jaworowski if (tsec_new_rxbuf(sc->tsec_rx_mtag, rx_data[i].map, 136767196661SRafal Jaworowski &rx_data[i].mbuf, &rx_data[i].paddr)) { 13682c0dbbcbSJustin Hibbits if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 1369ab160495SRafal Jaworowski /* 1370ab160495SRafal Jaworowski * We ran out of mbufs; didn't consume current 1371ab160495SRafal Jaworowski * descriptor and have to return it to the queue. 1372ab160495SRafal Jaworowski */ 1373ab160495SRafal Jaworowski TSEC_BACK_CUR_RX_DESC(sc); 1374ab160495SRafal Jaworowski break; 137567196661SRafal Jaworowski } 1376bd37530eSRafal Jaworowski 1377bd37530eSRafal Jaworowski /* Attach new buffer to descriptor and clear flags */ 137867196661SRafal Jaworowski rx_desc->bufptr = rx_data[i].paddr; 137967196661SRafal Jaworowski rx_desc->length = 0; 138067196661SRafal Jaworowski rx_desc->flags = (rx_desc->flags & ~TSEC_RXBD_ZEROONINIT) | 138167196661SRafal Jaworowski TSEC_RXBD_E | TSEC_RXBD_I; 138267196661SRafal Jaworowski 1383bd37530eSRafal Jaworowski if (m != NULL) { 138467196661SRafal Jaworowski m->m_pkthdr.rcvif = ifp; 138567196661SRafal Jaworowski 1386bd37530eSRafal Jaworowski m_fixhdr(m); 1387bd37530eSRafal Jaworowski m_adj(m, -ETHER_CRC_LEN); 138867196661SRafal Jaworowski 1389bd37530eSRafal Jaworowski if (sc->is_etsec) 1390bd37530eSRafal Jaworowski tsec_offload_process_frame(sc, m); 139167196661SRafal Jaworowski 139267196661SRafal Jaworowski TSEC_RECEIVE_UNLOCK(sc); 1393bd37530eSRafal Jaworowski (*ifp->if_input)(ifp, m); 1394bd37530eSRafal Jaworowski TSEC_RECEIVE_LOCK(sc); 13951abcdbd1SAttilio Rao rx_npkts++; 1396bd37530eSRafal Jaworowski } 1397bd37530eSRafal Jaworowski } 139867196661SRafal Jaworowski 1399bd37530eSRafal Jaworowski bus_dmamap_sync(sc->tsec_rx_dtag, sc->tsec_rx_dmap, 1400bd37530eSRafal Jaworowski BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1401371bf7ccSRafal Jaworowski 1402371bf7ccSRafal Jaworowski /* 1403371bf7ccSRafal Jaworowski * Make sure TSEC receiver is not halted. 1404371bf7ccSRafal Jaworowski * 1405371bf7ccSRafal Jaworowski * Various conditions can stop the TSEC receiver, but not all are 1406371bf7ccSRafal Jaworowski * signaled and handled by error interrupt, so make sure the receiver 1407371bf7ccSRafal Jaworowski * is running. Writing to TSEC_REG_RSTAT restarts the receiver when 1408371bf7ccSRafal Jaworowski * halted, and is harmless if already running. 1409371bf7ccSRafal Jaworowski */ 1410371bf7ccSRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_RSTAT, TSEC_RSTAT_QHLT); 14111abcdbd1SAttilio Rao return (rx_npkts); 141267196661SRafal Jaworowski } 141367196661SRafal Jaworowski 1414321e12c8SRafal Jaworowski void 1415bd37530eSRafal Jaworowski tsec_receive_intr(void *arg) 141667196661SRafal Jaworowski { 141767196661SRafal Jaworowski struct tsec_softc *sc = arg; 1418bd37530eSRafal Jaworowski 1419bd37530eSRafal Jaworowski TSEC_RECEIVE_LOCK(sc); 1420bd37530eSRafal Jaworowski 1421bd37530eSRafal Jaworowski #ifdef DEVICE_POLLING 1422bd37530eSRafal Jaworowski if (sc->tsec_ifp->if_capenable & IFCAP_POLLING) { 1423bd37530eSRafal Jaworowski TSEC_RECEIVE_UNLOCK(sc); 1424bd37530eSRafal Jaworowski return; 1425bd37530eSRafal Jaworowski } 1426bd37530eSRafal Jaworowski #endif 1427bd37530eSRafal Jaworowski 1428bd37530eSRafal Jaworowski /* Confirm the interrupt was received by driver */ 1429bd37530eSRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_IEVENT, TSEC_IEVENT_RXB | TSEC_IEVENT_RXF); 1430bd37530eSRafal Jaworowski tsec_receive_intr_locked(sc, -1); 1431bd37530eSRafal Jaworowski 1432bd37530eSRafal Jaworowski TSEC_RECEIVE_UNLOCK(sc); 1433bd37530eSRafal Jaworowski } 1434bd37530eSRafal Jaworowski 1435bd37530eSRafal Jaworowski static void 1436bd37530eSRafal Jaworowski tsec_transmit_intr_locked(struct tsec_softc *sc) 1437bd37530eSRafal Jaworowski { 143867196661SRafal Jaworowski struct ifnet *ifp; 14392c0dbbcbSJustin Hibbits uint32_t tx_idx; 144067196661SRafal Jaworowski 1441bd37530eSRafal Jaworowski TSEC_TRANSMIT_LOCK_ASSERT(sc); 1442bd37530eSRafal Jaworowski 144367196661SRafal Jaworowski ifp = sc->tsec_ifp; 144467196661SRafal Jaworowski 144567196661SRafal Jaworowski /* Update collision statistics */ 1446c8dfaf38SGleb Smirnoff if_inc_counter(ifp, IFCOUNTER_COLLISIONS, TSEC_READ(sc, TSEC_REG_MON_TNCL)); 144767196661SRafal Jaworowski 144867196661SRafal Jaworowski /* Reset collision counters in hardware */ 144967196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MON_TSCL, 0); 145067196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MON_TMCL, 0); 145167196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MON_TLCL, 0); 145267196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MON_TXCL, 0); 145367196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MON_TNCL, 0); 145467196661SRafal Jaworowski 1455321e12c8SRafal Jaworowski bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap, 1456321e12c8SRafal Jaworowski BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 145767196661SRafal Jaworowski 14582c0dbbcbSJustin Hibbits tx_idx = sc->tx_idx_tail; 14592c0dbbcbSJustin Hibbits while (tx_idx != sc->tx_idx_head) { 14602c0dbbcbSJustin Hibbits struct tsec_desc *tx_desc; 14612c0dbbcbSJustin Hibbits struct tsec_bufmap *tx_bufmap; 14622c0dbbcbSJustin Hibbits 14632c0dbbcbSJustin Hibbits tx_desc = &sc->tsec_tx_vaddr[tx_idx]; 146467196661SRafal Jaworowski if (tx_desc->flags & TSEC_TXBD_R) { 146567196661SRafal Jaworowski break; 146667196661SRafal Jaworowski } 146767196661SRafal Jaworowski 14682c0dbbcbSJustin Hibbits tx_bufmap = &sc->tx_bufmap[tx_idx]; 14692c0dbbcbSJustin Hibbits tx_idx = (tx_idx + 1) & (TSEC_TX_NUM_DESC - 1); 14702c0dbbcbSJustin Hibbits if (tx_bufmap->mbuf == NULL) 147167196661SRafal Jaworowski continue; 147267196661SRafal Jaworowski 147367196661SRafal Jaworowski /* 147467196661SRafal Jaworowski * This is the last buf in this packet, so unmap and free it. 147567196661SRafal Jaworowski */ 14762c0dbbcbSJustin Hibbits bus_dmamap_sync(sc->tsec_tx_mtag, tx_bufmap->map, 147764f90c9dSRafal Jaworowski BUS_DMASYNC_POSTWRITE); 14782c0dbbcbSJustin Hibbits bus_dmamap_unload(sc->tsec_tx_mtag, tx_bufmap->map); 14792c0dbbcbSJustin Hibbits m_freem(tx_bufmap->mbuf); 14802c0dbbcbSJustin Hibbits tx_bufmap->mbuf = NULL; 148167196661SRafal Jaworowski 1482c8dfaf38SGleb Smirnoff if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 148367196661SRafal Jaworowski } 14842c0dbbcbSJustin Hibbits sc->tx_idx_tail = tx_idx; 1485bd37530eSRafal Jaworowski bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap, 1486bd37530eSRafal Jaworowski BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 148767196661SRafal Jaworowski 148867196661SRafal Jaworowski ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 148967196661SRafal Jaworowski tsec_start_locked(ifp); 149067196661SRafal Jaworowski 14912c0dbbcbSJustin Hibbits if (sc->tx_idx_tail == sc->tx_idx_head) 14925432bd9fSRafal Jaworowski sc->tsec_watchdog = 0; 149367196661SRafal Jaworowski } 149467196661SRafal Jaworowski 1495321e12c8SRafal Jaworowski void 1496bd37530eSRafal Jaworowski tsec_transmit_intr(void *arg) 149767196661SRafal Jaworowski { 149867196661SRafal Jaworowski struct tsec_softc *sc = arg; 1499bd37530eSRafal Jaworowski 1500bd37530eSRafal Jaworowski TSEC_TRANSMIT_LOCK(sc); 1501bd37530eSRafal Jaworowski 1502bd37530eSRafal Jaworowski #ifdef DEVICE_POLLING 1503bd37530eSRafal Jaworowski if (sc->tsec_ifp->if_capenable & IFCAP_POLLING) { 1504bd37530eSRafal Jaworowski TSEC_TRANSMIT_UNLOCK(sc); 1505bd37530eSRafal Jaworowski return; 1506bd37530eSRafal Jaworowski } 1507bd37530eSRafal Jaworowski #endif 1508bd37530eSRafal Jaworowski /* Confirm the interrupt was received by driver */ 1509bd37530eSRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_IEVENT, TSEC_IEVENT_TXB | TSEC_IEVENT_TXF); 1510bd37530eSRafal Jaworowski tsec_transmit_intr_locked(sc); 1511bd37530eSRafal Jaworowski 1512bd37530eSRafal Jaworowski TSEC_TRANSMIT_UNLOCK(sc); 1513bd37530eSRafal Jaworowski } 1514bd37530eSRafal Jaworowski 1515bd37530eSRafal Jaworowski static void 1516bd37530eSRafal Jaworowski tsec_error_intr_locked(struct tsec_softc *sc, int count) 1517bd37530eSRafal Jaworowski { 151867196661SRafal Jaworowski struct ifnet *ifp; 151967196661SRafal Jaworowski uint32_t eflags; 152067196661SRafal Jaworowski 1521bd37530eSRafal Jaworowski TSEC_GLOBAL_LOCK_ASSERT(sc); 1522bd37530eSRafal Jaworowski 152367196661SRafal Jaworowski ifp = sc->tsec_ifp; 152467196661SRafal Jaworowski 152567196661SRafal Jaworowski eflags = TSEC_READ(sc, TSEC_REG_IEVENT); 152667196661SRafal Jaworowski 152767196661SRafal Jaworowski /* Clear events bits in hardware */ 152867196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_IEVENT, TSEC_IEVENT_RXC | TSEC_IEVENT_BSY | 152967196661SRafal Jaworowski TSEC_IEVENT_EBERR | TSEC_IEVENT_MSRO | TSEC_IEVENT_BABT | 153067196661SRafal Jaworowski TSEC_IEVENT_TXC | TSEC_IEVENT_TXE | TSEC_IEVENT_LC | 153167196661SRafal Jaworowski TSEC_IEVENT_CRL | TSEC_IEVENT_XFUN); 153267196661SRafal Jaworowski 153367196661SRafal Jaworowski /* Check transmitter errors */ 153467196661SRafal Jaworowski if (eflags & TSEC_IEVENT_TXE) { 1535c8dfaf38SGleb Smirnoff if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 153667196661SRafal Jaworowski 153767196661SRafal Jaworowski if (eflags & TSEC_IEVENT_LC) 1538c8dfaf38SGleb Smirnoff if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1); 153967196661SRafal Jaworowski 154067196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_TSTAT, TSEC_TSTAT_THLT); 154167196661SRafal Jaworowski } 154267196661SRafal Jaworowski 15432c0dbbcbSJustin Hibbits /* Check for discarded frame due to a lack of buffers */ 154467196661SRafal Jaworowski if (eflags & TSEC_IEVENT_BSY) { 1545c8dfaf38SGleb Smirnoff if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 154667196661SRafal Jaworowski } 1547bd37530eSRafal Jaworowski 1548bd37530eSRafal Jaworowski if (ifp->if_flags & IFF_DEBUG) 1549bd37530eSRafal Jaworowski if_printf(ifp, "tsec_error_intr(): event flags: 0x%x\n", 1550bd37530eSRafal Jaworowski eflags); 1551bd37530eSRafal Jaworowski 1552bd37530eSRafal Jaworowski if (eflags & TSEC_IEVENT_EBERR) { 1553bd37530eSRafal Jaworowski if_printf(ifp, "System bus error occurred during" 1554bd37530eSRafal Jaworowski "DMA transaction (flags: 0x%x)\n", eflags); 1555bd37530eSRafal Jaworowski tsec_init_locked(sc); 1556bd37530eSRafal Jaworowski } 1557bd37530eSRafal Jaworowski 1558bd37530eSRafal Jaworowski if (eflags & TSEC_IEVENT_BABT) 1559c8dfaf38SGleb Smirnoff if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1560bd37530eSRafal Jaworowski 156167196661SRafal Jaworowski if (eflags & TSEC_IEVENT_BABR) 1562c8dfaf38SGleb Smirnoff if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 156367196661SRafal Jaworowski } 156467196661SRafal Jaworowski 1565bd37530eSRafal Jaworowski void 1566bd37530eSRafal Jaworowski tsec_error_intr(void *arg) 156767196661SRafal Jaworowski { 1568bd37530eSRafal Jaworowski struct tsec_softc *sc = arg; 156967196661SRafal Jaworowski 1570772619e1SRafal Jaworowski TSEC_GLOBAL_LOCK(sc); 1571bd37530eSRafal Jaworowski tsec_error_intr_locked(sc, -1); 1572772619e1SRafal Jaworowski TSEC_GLOBAL_UNLOCK(sc); 157367196661SRafal Jaworowski } 157467196661SRafal Jaworowski 1575321e12c8SRafal Jaworowski int 157667196661SRafal Jaworowski tsec_miibus_readreg(device_t dev, int phy, int reg) 157767196661SRafal Jaworowski { 157867196661SRafal Jaworowski struct tsec_softc *sc; 157988011b59SJustin Hibbits int timeout; 1580629aa519SNathan Whitehorn int rv; 158167196661SRafal Jaworowski 1582aa15e881SRafal Jaworowski sc = device_get_softc(dev); 158367196661SRafal Jaworowski 1584629aa519SNathan Whitehorn TSEC_PHY_LOCK(); 1585629aa519SNathan Whitehorn TSEC_PHY_WRITE(sc, TSEC_REG_MIIMADD, (phy << 8) | reg); 1586629aa519SNathan Whitehorn TSEC_PHY_WRITE(sc, TSEC_REG_MIIMCOM, 0); 1587629aa519SNathan Whitehorn TSEC_PHY_WRITE(sc, TSEC_REG_MIIMCOM, TSEC_MIIMCOM_READCYCLE); 158867196661SRafal Jaworowski 158988011b59SJustin Hibbits timeout = tsec_mii_wait(sc, TSEC_MIIMIND_NOTVALID | TSEC_MIIMIND_BUSY); 159088011b59SJustin Hibbits rv = TSEC_PHY_READ(sc, TSEC_REG_MIIMSTAT); 159188011b59SJustin Hibbits TSEC_PHY_UNLOCK(); 159267196661SRafal Jaworowski 159372b58db8SJustin Hibbits if (timeout) 159467196661SRafal Jaworowski device_printf(dev, "Timeout while reading from PHY!\n"); 159567196661SRafal Jaworowski 1596629aa519SNathan Whitehorn return (rv); 159767196661SRafal Jaworowski } 159867196661SRafal Jaworowski 1599661ee6eeSRafal Jaworowski int 160067196661SRafal Jaworowski tsec_miibus_writereg(device_t dev, int phy, int reg, int value) 160167196661SRafal Jaworowski { 160267196661SRafal Jaworowski struct tsec_softc *sc; 160388011b59SJustin Hibbits int timeout; 160467196661SRafal Jaworowski 1605aa15e881SRafal Jaworowski sc = device_get_softc(dev); 160667196661SRafal Jaworowski 1607629aa519SNathan Whitehorn TSEC_PHY_LOCK(); 1608629aa519SNathan Whitehorn TSEC_PHY_WRITE(sc, TSEC_REG_MIIMADD, (phy << 8) | reg); 1609629aa519SNathan Whitehorn TSEC_PHY_WRITE(sc, TSEC_REG_MIIMCON, value); 161088011b59SJustin Hibbits timeout = tsec_mii_wait(sc, TSEC_MIIMIND_BUSY); 1611629aa519SNathan Whitehorn TSEC_PHY_UNLOCK(); 161267196661SRafal Jaworowski 161372b58db8SJustin Hibbits if (timeout) 161467196661SRafal Jaworowski device_printf(dev, "Timeout while writing to PHY!\n"); 1615661ee6eeSRafal Jaworowski 1616661ee6eeSRafal Jaworowski return (0); 161767196661SRafal Jaworowski } 161867196661SRafal Jaworowski 1619321e12c8SRafal Jaworowski void 162067196661SRafal Jaworowski tsec_miibus_statchg(device_t dev) 162167196661SRafal Jaworowski { 162267196661SRafal Jaworowski struct tsec_softc *sc; 162367196661SRafal Jaworowski struct mii_data *mii; 162467196661SRafal Jaworowski uint32_t ecntrl, id, tmp; 162567196661SRafal Jaworowski int link; 162667196661SRafal Jaworowski 162767196661SRafal Jaworowski sc = device_get_softc(dev); 162867196661SRafal Jaworowski mii = sc->tsec_mii; 162967196661SRafal Jaworowski link = ((mii->mii_media_status & IFM_ACTIVE) ? 1 : 0); 163067196661SRafal Jaworowski 163167196661SRafal Jaworowski tmp = TSEC_READ(sc, TSEC_REG_MACCFG2) & ~TSEC_MACCFG2_IF; 163267196661SRafal Jaworowski 163367196661SRafal Jaworowski if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) 163467196661SRafal Jaworowski tmp |= TSEC_MACCFG2_FULLDUPLEX; 163567196661SRafal Jaworowski else 163667196661SRafal Jaworowski tmp &= ~TSEC_MACCFG2_FULLDUPLEX; 163767196661SRafal Jaworowski 163867196661SRafal Jaworowski switch (IFM_SUBTYPE(mii->mii_media_active)) { 163967196661SRafal Jaworowski case IFM_1000_T: 164067196661SRafal Jaworowski case IFM_1000_SX: 164167196661SRafal Jaworowski tmp |= TSEC_MACCFG2_GMII; 164267196661SRafal Jaworowski sc->tsec_link = link; 164367196661SRafal Jaworowski break; 164467196661SRafal Jaworowski case IFM_100_TX: 164567196661SRafal Jaworowski case IFM_10_T: 164667196661SRafal Jaworowski tmp |= TSEC_MACCFG2_MII; 164767196661SRafal Jaworowski sc->tsec_link = link; 164867196661SRafal Jaworowski break; 164967196661SRafal Jaworowski case IFM_NONE: 165067196661SRafal Jaworowski if (link) 165164f90c9dSRafal Jaworowski device_printf(dev, "No speed selected but link " 165264f90c9dSRafal Jaworowski "active!\n"); 165367196661SRafal Jaworowski sc->tsec_link = 0; 165467196661SRafal Jaworowski return; 165567196661SRafal Jaworowski default: 165667196661SRafal Jaworowski sc->tsec_link = 0; 165767196661SRafal Jaworowski device_printf(dev, "Unknown speed (%d), link %s!\n", 165867196661SRafal Jaworowski IFM_SUBTYPE(mii->mii_media_active), 165967196661SRafal Jaworowski ((link) ? "up" : "down")); 166067196661SRafal Jaworowski return; 166167196661SRafal Jaworowski } 166267196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MACCFG2, tmp); 166367196661SRafal Jaworowski 166467196661SRafal Jaworowski /* XXX kludge - use circumstantial evidence for reduced mode. */ 166567196661SRafal Jaworowski id = TSEC_READ(sc, TSEC_REG_ID2); 166667196661SRafal Jaworowski if (id & 0xffff) { 166767196661SRafal Jaworowski ecntrl = TSEC_READ(sc, TSEC_REG_ECNTRL) & ~TSEC_ECNTRL_R100M; 166867196661SRafal Jaworowski ecntrl |= (tmp & TSEC_MACCFG2_MII) ? TSEC_ECNTRL_R100M : 0; 166967196661SRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_ECNTRL, ecntrl); 167067196661SRafal Jaworowski } 167167196661SRafal Jaworowski } 1672bd37530eSRafal Jaworowski 1673bd37530eSRafal Jaworowski static void 1674bd37530eSRafal Jaworowski tsec_add_sysctls(struct tsec_softc *sc) 1675bd37530eSRafal Jaworowski { 1676bd37530eSRafal Jaworowski struct sysctl_ctx_list *ctx; 1677bd37530eSRafal Jaworowski struct sysctl_oid_list *children; 1678bd37530eSRafal Jaworowski struct sysctl_oid *tree; 1679bd37530eSRafal Jaworowski 1680bd37530eSRafal Jaworowski ctx = device_get_sysctl_ctx(sc->dev); 1681bd37530eSRafal Jaworowski children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); 1682bd37530eSRafal Jaworowski tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "int_coal", 1683*7029da5cSPawel Biernacki CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "TSEC Interrupts coalescing"); 1684bd37530eSRafal Jaworowski children = SYSCTL_CHILDREN(tree); 1685bd37530eSRafal Jaworowski 1686bd37530eSRafal Jaworowski SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_time", 1687*7029da5cSPawel Biernacki CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, TSEC_IC_RX, 1688*7029da5cSPawel Biernacki tsec_sysctl_ic_time, "I", "IC RX time threshold (0-65535)"); 1689bd37530eSRafal Jaworowski SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_count", 1690*7029da5cSPawel Biernacki CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, TSEC_IC_RX, 1691*7029da5cSPawel Biernacki tsec_sysctl_ic_count, "I", "IC RX frame count threshold (0-255)"); 1692bd37530eSRafal Jaworowski 1693bd37530eSRafal Jaworowski SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_time", 1694*7029da5cSPawel Biernacki CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, TSEC_IC_TX, 1695*7029da5cSPawel Biernacki tsec_sysctl_ic_time, "I", "IC TX time threshold (0-65535)"); 1696bd37530eSRafal Jaworowski SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_count", 1697*7029da5cSPawel Biernacki CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, TSEC_IC_TX, 1698*7029da5cSPawel Biernacki tsec_sysctl_ic_count, "I", "IC TX frame count threshold (0-255)"); 1699bd37530eSRafal Jaworowski } 1700bd37530eSRafal Jaworowski 1701bd37530eSRafal Jaworowski /* 1702bd37530eSRafal Jaworowski * With Interrupt Coalescing (IC) active, a transmit/receive frame 1703bd37530eSRafal Jaworowski * interrupt is raised either upon: 1704bd37530eSRafal Jaworowski * 1705bd37530eSRafal Jaworowski * - threshold-defined period of time elapsed, or 1706bd37530eSRafal Jaworowski * - threshold-defined number of frames is received/transmitted, 1707bd37530eSRafal Jaworowski * whichever occurs first. 1708bd37530eSRafal Jaworowski * 1709bd37530eSRafal Jaworowski * The following sysctls regulate IC behaviour (for TX/RX separately): 1710bd37530eSRafal Jaworowski * 1711bd37530eSRafal Jaworowski * dev.tsec.<unit>.int_coal.rx_time 1712bd37530eSRafal Jaworowski * dev.tsec.<unit>.int_coal.rx_count 1713bd37530eSRafal Jaworowski * dev.tsec.<unit>.int_coal.tx_time 1714bd37530eSRafal Jaworowski * dev.tsec.<unit>.int_coal.tx_count 1715bd37530eSRafal Jaworowski * 1716bd37530eSRafal Jaworowski * Values: 1717bd37530eSRafal Jaworowski * 1718bd37530eSRafal Jaworowski * - 0 for either time or count disables IC on the given TX/RX path 1719bd37530eSRafal Jaworowski * 1720bd37530eSRafal Jaworowski * - count: 1-255 (expresses frame count number; note that value of 1 is 1721bd37530eSRafal Jaworowski * effectively IC off) 1722bd37530eSRafal Jaworowski * 1723bd37530eSRafal Jaworowski * - time: 1-65535 (value corresponds to a real time period and is 1724bd37530eSRafal Jaworowski * expressed in units equivalent to 64 TSEC interface clocks, i.e. one timer 1725bd37530eSRafal Jaworowski * threshold unit is 26.5 us, 2.56 us, or 512 ns, corresponding to 10 Mbps, 1726bd37530eSRafal Jaworowski * 100 Mbps, or 1Gbps, respectively. For detailed discussion consult the 1727bd37530eSRafal Jaworowski * TSEC reference manual. 1728bd37530eSRafal Jaworowski */ 1729bd37530eSRafal Jaworowski static int 1730bd37530eSRafal Jaworowski tsec_sysctl_ic_time(SYSCTL_HANDLER_ARGS) 1731bd37530eSRafal Jaworowski { 1732bd37530eSRafal Jaworowski int error; 1733bd37530eSRafal Jaworowski uint32_t time; 1734bd37530eSRafal Jaworowski struct tsec_softc *sc = (struct tsec_softc *)arg1; 1735bd37530eSRafal Jaworowski 1736bd37530eSRafal Jaworowski time = (arg2 == TSEC_IC_RX) ? sc->rx_ic_time : sc->tx_ic_time; 1737bd37530eSRafal Jaworowski 1738bd37530eSRafal Jaworowski error = sysctl_handle_int(oidp, &time, 0, req); 1739bd37530eSRafal Jaworowski if (error != 0) 1740bd37530eSRafal Jaworowski return (error); 1741bd37530eSRafal Jaworowski 1742bd37530eSRafal Jaworowski if (time > 65535) 1743bd37530eSRafal Jaworowski return (EINVAL); 1744bd37530eSRafal Jaworowski 1745bd37530eSRafal Jaworowski TSEC_IC_LOCK(sc); 1746bd37530eSRafal Jaworowski if (arg2 == TSEC_IC_RX) { 1747bd37530eSRafal Jaworowski sc->rx_ic_time = time; 1748bd37530eSRafal Jaworowski tsec_set_rxic(sc); 1749bd37530eSRafal Jaworowski } else { 1750bd37530eSRafal Jaworowski sc->tx_ic_time = time; 1751bd37530eSRafal Jaworowski tsec_set_txic(sc); 1752bd37530eSRafal Jaworowski } 1753bd37530eSRafal Jaworowski TSEC_IC_UNLOCK(sc); 1754bd37530eSRafal Jaworowski 1755bd37530eSRafal Jaworowski return (0); 1756bd37530eSRafal Jaworowski } 1757bd37530eSRafal Jaworowski 1758bd37530eSRafal Jaworowski static int 1759bd37530eSRafal Jaworowski tsec_sysctl_ic_count(SYSCTL_HANDLER_ARGS) 1760bd37530eSRafal Jaworowski { 1761bd37530eSRafal Jaworowski int error; 1762bd37530eSRafal Jaworowski uint32_t count; 1763bd37530eSRafal Jaworowski struct tsec_softc *sc = (struct tsec_softc *)arg1; 1764bd37530eSRafal Jaworowski 1765bd37530eSRafal Jaworowski count = (arg2 == TSEC_IC_RX) ? sc->rx_ic_count : sc->tx_ic_count; 1766bd37530eSRafal Jaworowski 1767bd37530eSRafal Jaworowski error = sysctl_handle_int(oidp, &count, 0, req); 1768bd37530eSRafal Jaworowski if (error != 0) 1769bd37530eSRafal Jaworowski return (error); 1770bd37530eSRafal Jaworowski 1771bd37530eSRafal Jaworowski if (count > 255) 1772bd37530eSRafal Jaworowski return (EINVAL); 1773bd37530eSRafal Jaworowski 1774bd37530eSRafal Jaworowski TSEC_IC_LOCK(sc); 1775bd37530eSRafal Jaworowski if (arg2 == TSEC_IC_RX) { 1776bd37530eSRafal Jaworowski sc->rx_ic_count = count; 1777bd37530eSRafal Jaworowski tsec_set_rxic(sc); 1778bd37530eSRafal Jaworowski } else { 1779bd37530eSRafal Jaworowski sc->tx_ic_count = count; 1780bd37530eSRafal Jaworowski tsec_set_txic(sc); 1781bd37530eSRafal Jaworowski } 1782bd37530eSRafal Jaworowski TSEC_IC_UNLOCK(sc); 1783bd37530eSRafal Jaworowski 1784bd37530eSRafal Jaworowski return (0); 1785bd37530eSRafal Jaworowski } 1786bd37530eSRafal Jaworowski 1787bd37530eSRafal Jaworowski static void 1788bd37530eSRafal Jaworowski tsec_set_rxic(struct tsec_softc *sc) 1789bd37530eSRafal Jaworowski { 1790bd37530eSRafal Jaworowski uint32_t rxic_val; 1791bd37530eSRafal Jaworowski 1792bd37530eSRafal Jaworowski if (sc->rx_ic_count == 0 || sc->rx_ic_time == 0) 1793bd37530eSRafal Jaworowski /* Disable RX IC */ 1794bd37530eSRafal Jaworowski rxic_val = 0; 1795bd37530eSRafal Jaworowski else { 1796bd37530eSRafal Jaworowski rxic_val = 0x80000000; 1797bd37530eSRafal Jaworowski rxic_val |= (sc->rx_ic_count << 21); 1798bd37530eSRafal Jaworowski rxic_val |= sc->rx_ic_time; 1799bd37530eSRafal Jaworowski } 1800bd37530eSRafal Jaworowski 1801bd37530eSRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_RXIC, rxic_val); 1802bd37530eSRafal Jaworowski } 1803bd37530eSRafal Jaworowski 1804bd37530eSRafal Jaworowski static void 1805bd37530eSRafal Jaworowski tsec_set_txic(struct tsec_softc *sc) 1806bd37530eSRafal Jaworowski { 1807bd37530eSRafal Jaworowski uint32_t txic_val; 1808bd37530eSRafal Jaworowski 1809bd37530eSRafal Jaworowski if (sc->tx_ic_count == 0 || sc->tx_ic_time == 0) 1810bd37530eSRafal Jaworowski /* Disable TX IC */ 1811bd37530eSRafal Jaworowski txic_val = 0; 1812bd37530eSRafal Jaworowski else { 1813bd37530eSRafal Jaworowski txic_val = 0x80000000; 1814bd37530eSRafal Jaworowski txic_val |= (sc->tx_ic_count << 21); 1815bd37530eSRafal Jaworowski txic_val |= sc->tx_ic_time; 1816bd37530eSRafal Jaworowski } 1817bd37530eSRafal Jaworowski 1818bd37530eSRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_TXIC, txic_val); 1819bd37530eSRafal Jaworowski } 1820bd37530eSRafal Jaworowski 1821bd37530eSRafal Jaworowski static void 1822bd37530eSRafal Jaworowski tsec_offload_setup(struct tsec_softc *sc) 1823bd37530eSRafal Jaworowski { 1824bd37530eSRafal Jaworowski struct ifnet *ifp = sc->tsec_ifp; 1825bd37530eSRafal Jaworowski uint32_t reg; 1826bd37530eSRafal Jaworowski 1827bd37530eSRafal Jaworowski TSEC_GLOBAL_LOCK_ASSERT(sc); 1828bd37530eSRafal Jaworowski 1829bd37530eSRafal Jaworowski reg = TSEC_READ(sc, TSEC_REG_TCTRL); 1830bd37530eSRafal Jaworowski reg |= TSEC_TCTRL_IPCSEN | TSEC_TCTRL_TUCSEN; 1831bd37530eSRafal Jaworowski 1832bd37530eSRafal Jaworowski if (ifp->if_capenable & IFCAP_TXCSUM) 1833bd37530eSRafal Jaworowski ifp->if_hwassist = TSEC_CHECKSUM_FEATURES; 1834bd37530eSRafal Jaworowski else 1835bd37530eSRafal Jaworowski ifp->if_hwassist = 0; 1836bd37530eSRafal Jaworowski 1837bd37530eSRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_TCTRL, reg); 1838bd37530eSRafal Jaworowski 1839bd37530eSRafal Jaworowski reg = TSEC_READ(sc, TSEC_REG_RCTRL); 1840bd37530eSRafal Jaworowski reg &= ~(TSEC_RCTRL_IPCSEN | TSEC_RCTRL_TUCSEN | TSEC_RCTRL_PRSDEP); 1841bd37530eSRafal Jaworowski reg |= TSEC_RCTRL_PRSDEP_PARSE_L2 | TSEC_RCTRL_VLEX; 1842bd37530eSRafal Jaworowski 1843bd37530eSRafal Jaworowski if (ifp->if_capenable & IFCAP_RXCSUM) 1844bd37530eSRafal Jaworowski reg |= TSEC_RCTRL_IPCSEN | TSEC_RCTRL_TUCSEN | 1845bd37530eSRafal Jaworowski TSEC_RCTRL_PRSDEP_PARSE_L234; 1846bd37530eSRafal Jaworowski 1847bd37530eSRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_RCTRL, reg); 1848bd37530eSRafal Jaworowski } 1849bd37530eSRafal Jaworowski 1850bd37530eSRafal Jaworowski 1851bd37530eSRafal Jaworowski static void 1852bd37530eSRafal Jaworowski tsec_offload_process_frame(struct tsec_softc *sc, struct mbuf *m) 1853bd37530eSRafal Jaworowski { 1854bd37530eSRafal Jaworowski struct tsec_rx_fcb rx_fcb; 1855bd37530eSRafal Jaworowski int csum_flags = 0; 1856bd37530eSRafal Jaworowski int protocol, flags; 1857bd37530eSRafal Jaworowski 1858bd37530eSRafal Jaworowski TSEC_RECEIVE_LOCK_ASSERT(sc); 1859bd37530eSRafal Jaworowski 1860bd37530eSRafal Jaworowski m_copydata(m, 0, sizeof(struct tsec_rx_fcb), (caddr_t)(&rx_fcb)); 1861bd37530eSRafal Jaworowski flags = rx_fcb.flags; 1862bd37530eSRafal Jaworowski protocol = rx_fcb.protocol; 1863bd37530eSRafal Jaworowski 1864bd37530eSRafal Jaworowski if (TSEC_RX_FCB_IP_CSUM_CHECKED(flags)) { 1865bd37530eSRafal Jaworowski csum_flags |= CSUM_IP_CHECKED; 1866bd37530eSRafal Jaworowski 1867bd37530eSRafal Jaworowski if ((flags & TSEC_RX_FCB_IP_CSUM_ERROR) == 0) 1868bd37530eSRafal Jaworowski csum_flags |= CSUM_IP_VALID; 1869bd37530eSRafal Jaworowski } 1870bd37530eSRafal Jaworowski 1871bd37530eSRafal Jaworowski if ((protocol == IPPROTO_TCP || protocol == IPPROTO_UDP) && 1872bd37530eSRafal Jaworowski TSEC_RX_FCB_TCP_UDP_CSUM_CHECKED(flags) && 1873bd37530eSRafal Jaworowski (flags & TSEC_RX_FCB_TCP_UDP_CSUM_ERROR) == 0) { 1874bd37530eSRafal Jaworowski 1875bd37530eSRafal Jaworowski csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1876bd37530eSRafal Jaworowski m->m_pkthdr.csum_data = 0xFFFF; 1877bd37530eSRafal Jaworowski } 1878bd37530eSRafal Jaworowski 1879bd37530eSRafal Jaworowski m->m_pkthdr.csum_flags = csum_flags; 1880bd37530eSRafal Jaworowski 1881bd37530eSRafal Jaworowski if (flags & TSEC_RX_FCB_VLAN) { 1882bd37530eSRafal Jaworowski m->m_pkthdr.ether_vtag = rx_fcb.vlan; 1883bd37530eSRafal Jaworowski m->m_flags |= M_VLANTAG; 1884bd37530eSRafal Jaworowski } 1885bd37530eSRafal Jaworowski 1886bd37530eSRafal Jaworowski m_adj(m, sizeof(struct tsec_rx_fcb)); 1887bd37530eSRafal Jaworowski } 1888bd37530eSRafal Jaworowski 18895c973840SGleb Smirnoff static u_int 18905c973840SGleb Smirnoff tsec_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 18915c973840SGleb Smirnoff { 18925c973840SGleb Smirnoff uint32_t h, *hashtable = arg; 18935c973840SGleb Smirnoff 18945c973840SGleb Smirnoff h = (ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN) >> 24) & 0xFF; 18955c973840SGleb Smirnoff hashtable[(h >> 5)] |= 1 << (0x1F - (h & 0x1F)); 18965c973840SGleb Smirnoff 18975c973840SGleb Smirnoff return (1); 18985c973840SGleb Smirnoff } 18995c973840SGleb Smirnoff 1900bd37530eSRafal Jaworowski static void 1901bd37530eSRafal Jaworowski tsec_setup_multicast(struct tsec_softc *sc) 1902bd37530eSRafal Jaworowski { 1903bd37530eSRafal Jaworowski uint32_t hashtable[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; 1904bd37530eSRafal Jaworowski struct ifnet *ifp = sc->tsec_ifp; 1905bd37530eSRafal Jaworowski int i; 1906bd37530eSRafal Jaworowski 1907bd37530eSRafal Jaworowski TSEC_GLOBAL_LOCK_ASSERT(sc); 1908bd37530eSRafal Jaworowski 1909bd37530eSRafal Jaworowski if (ifp->if_flags & IFF_ALLMULTI) { 1910bd37530eSRafal Jaworowski for (i = 0; i < 8; i++) 1911bd37530eSRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_GADDR(i), 0xFFFFFFFF); 1912bd37530eSRafal Jaworowski 1913bd37530eSRafal Jaworowski return; 1914bd37530eSRafal Jaworowski } 1915bd37530eSRafal Jaworowski 19165c973840SGleb Smirnoff if_foreach_llmaddr(ifp, tsec_hash_maddr, &hashtable); 1917bd37530eSRafal Jaworowski 1918bd37530eSRafal Jaworowski for (i = 0; i < 8; i++) 1919bd37530eSRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_GADDR(i), hashtable[i]); 1920bd37530eSRafal Jaworowski } 1921bd37530eSRafal Jaworowski 1922bd37530eSRafal Jaworowski static int 1923bd37530eSRafal Jaworowski tsec_set_mtu(struct tsec_softc *sc, unsigned int mtu) 1924bd37530eSRafal Jaworowski { 1925bd37530eSRafal Jaworowski 1926bd37530eSRafal Jaworowski mtu += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ETHER_CRC_LEN; 1927bd37530eSRafal Jaworowski 1928bd37530eSRafal Jaworowski TSEC_GLOBAL_LOCK_ASSERT(sc); 1929bd37530eSRafal Jaworowski 1930bd37530eSRafal Jaworowski if (mtu >= TSEC_MIN_FRAME_SIZE && mtu <= TSEC_MAX_FRAME_SIZE) { 1931bd37530eSRafal Jaworowski TSEC_WRITE(sc, TSEC_REG_MAXFRM, mtu); 1932bd37530eSRafal Jaworowski return (mtu); 1933bd37530eSRafal Jaworowski } 1934bd37530eSRafal Jaworowski 1935bd37530eSRafal Jaworowski return (0); 1936bd37530eSRafal Jaworowski } 1937