1*4bf8ce03SAdrian Chadd /*- 2*4bf8ce03SAdrian Chadd * SPDX-License-Identifier: BSD-2-Clause 3*4bf8ce03SAdrian Chadd * 4*4bf8ce03SAdrian Chadd * Copyright (c) 2019, 2020, 2023-2025 Kevin Lo <kevlo@openbsd.org> 5*4bf8ce03SAdrian Chadd * Copyright (c) 2025 Adrian Chadd <adrian@FreeBSD.org> 6*4bf8ce03SAdrian Chadd * 7*4bf8ce03SAdrian Chadd * Hardware programming portions from Realtek Semiconductor. 8*4bf8ce03SAdrian Chadd * 9*4bf8ce03SAdrian Chadd * Permission to use, copy, modify, and distribute this software for any 10*4bf8ce03SAdrian Chadd * purpose with or without fee is hereby granted, provided that the above 11*4bf8ce03SAdrian Chadd * copyright notice and this permission notice appear in all copies. 12*4bf8ce03SAdrian Chadd * 13*4bf8ce03SAdrian Chadd * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 14*4bf8ce03SAdrian Chadd * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 15*4bf8ce03SAdrian Chadd * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 16*4bf8ce03SAdrian Chadd * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 17*4bf8ce03SAdrian Chadd * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 18*4bf8ce03SAdrian Chadd * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 19*4bf8ce03SAdrian Chadd * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 20*4bf8ce03SAdrian Chadd */ 21*4bf8ce03SAdrian Chadd 22*4bf8ce03SAdrian Chadd /* $OpenBSD: if_rge.c,v 1.38 2025/09/19 00:41:14 kevlo Exp $ */ 23*4bf8ce03SAdrian Chadd 24*4bf8ce03SAdrian Chadd #include <sys/param.h> 25*4bf8ce03SAdrian Chadd #include <sys/systm.h> 26*4bf8ce03SAdrian Chadd #include <sys/sockio.h> 27*4bf8ce03SAdrian Chadd #include <sys/mbuf.h> 28*4bf8ce03SAdrian Chadd #include <sys/malloc.h> 29*4bf8ce03SAdrian Chadd #include <sys/endian.h> 30*4bf8ce03SAdrian Chadd #include <sys/socket.h> 31*4bf8ce03SAdrian Chadd #include <net/if.h> 32*4bf8ce03SAdrian Chadd #include <net/if_media.h> 33*4bf8ce03SAdrian Chadd #include <sys/queue.h> 34*4bf8ce03SAdrian Chadd #include <sys/taskqueue.h> 35*4bf8ce03SAdrian Chadd #include <sys/bus.h> 36*4bf8ce03SAdrian Chadd #include <sys/module.h> 37*4bf8ce03SAdrian Chadd #include <sys/rman.h> 38*4bf8ce03SAdrian Chadd #include <sys/kernel.h> 39*4bf8ce03SAdrian Chadd 40*4bf8ce03SAdrian Chadd #include <netinet/in.h> 41*4bf8ce03SAdrian Chadd #include <netinet/if_ether.h> 42*4bf8ce03SAdrian Chadd 43*4bf8ce03SAdrian Chadd #include <net/bpf.h> 44*4bf8ce03SAdrian Chadd #include <net/ethernet.h> 45*4bf8ce03SAdrian Chadd #include <net/if.h> 46*4bf8ce03SAdrian Chadd #include <net/if_var.h> 47*4bf8ce03SAdrian Chadd #include <net/if_arp.h> 48*4bf8ce03SAdrian Chadd #include <net/if_dl.h> 49*4bf8ce03SAdrian Chadd #include <net/if_media.h> 50*4bf8ce03SAdrian Chadd #include <net/if_types.h> 51*4bf8ce03SAdrian Chadd #include <net/if_vlan_var.h> 52*4bf8ce03SAdrian Chadd 53*4bf8ce03SAdrian Chadd #include <machine/bus.h> 54*4bf8ce03SAdrian Chadd #include <machine/resource.h> 55*4bf8ce03SAdrian Chadd 56*4bf8ce03SAdrian Chadd #include <dev/mii/mii.h> 57*4bf8ce03SAdrian Chadd 58*4bf8ce03SAdrian Chadd #include <dev/pci/pcivar.h> 59*4bf8ce03SAdrian Chadd #include <dev/pci/pcireg.h> 60*4bf8ce03SAdrian Chadd 61*4bf8ce03SAdrian Chadd #include "if_rge_vendor.h" 62*4bf8ce03SAdrian Chadd #include "if_rgereg.h" 63*4bf8ce03SAdrian Chadd #include "if_rgevar.h" 64*4bf8ce03SAdrian Chadd #include "if_rge_hw.h" 65*4bf8ce03SAdrian Chadd #include "if_rge_microcode.h" 66*4bf8ce03SAdrian Chadd #include "if_rge_debug.h" 67*4bf8ce03SAdrian Chadd #include "if_rge_sysctl.h" 68*4bf8ce03SAdrian Chadd #include "if_rge_stats.h" 69*4bf8ce03SAdrian Chadd 70*4bf8ce03SAdrian Chadd #define RGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 71*4bf8ce03SAdrian Chadd 72*4bf8ce03SAdrian Chadd static int rge_attach(device_t); 73*4bf8ce03SAdrian Chadd static int rge_detach(device_t); 74*4bf8ce03SAdrian Chadd 75*4bf8ce03SAdrian Chadd #if 0 76*4bf8ce03SAdrian Chadd int rge_activate(struct device *, int); 77*4bf8ce03SAdrian Chadd #endif 78*4bf8ce03SAdrian Chadd static void rge_intr_msi(void *); 79*4bf8ce03SAdrian Chadd static int rge_ioctl(struct ifnet *, u_long, caddr_t); 80*4bf8ce03SAdrian Chadd static int rge_transmit_if(if_t, struct mbuf *); 81*4bf8ce03SAdrian Chadd static void rge_qflush_if(if_t); 82*4bf8ce03SAdrian Chadd static void rge_init_if(void *); 83*4bf8ce03SAdrian Chadd static void rge_init_locked(struct rge_softc *); 84*4bf8ce03SAdrian Chadd static void rge_stop_locked(struct rge_softc *); 85*4bf8ce03SAdrian Chadd static int rge_ifmedia_upd(if_t); 86*4bf8ce03SAdrian Chadd static void rge_ifmedia_sts(if_t, struct ifmediareq *); 87*4bf8ce03SAdrian Chadd static int rge_allocmem(struct rge_softc *); 88*4bf8ce03SAdrian Chadd static int rge_alloc_stats_mem(struct rge_softc *); 89*4bf8ce03SAdrian Chadd static int rge_freemem(struct rge_softc *); 90*4bf8ce03SAdrian Chadd static int rge_free_stats_mem(struct rge_softc *); 91*4bf8ce03SAdrian Chadd static int rge_newbuf(struct rge_queues *); 92*4bf8ce03SAdrian Chadd static void rge_rx_list_init(struct rge_queues *); 93*4bf8ce03SAdrian Chadd static void rge_tx_list_init(struct rge_queues *); 94*4bf8ce03SAdrian Chadd static void rge_fill_rx_ring(struct rge_queues *); 95*4bf8ce03SAdrian Chadd static int rge_rxeof(struct rge_queues *, struct mbufq *); 96*4bf8ce03SAdrian Chadd static int rge_txeof(struct rge_queues *); 97*4bf8ce03SAdrian Chadd static void rge_iff_locked(struct rge_softc *); 98*4bf8ce03SAdrian Chadd static void rge_add_media_types(struct rge_softc *); 99*4bf8ce03SAdrian Chadd static void rge_tx_task(void *, int); 100*4bf8ce03SAdrian Chadd static void rge_txq_flush_mbufs(struct rge_softc *sc); 101*4bf8ce03SAdrian Chadd static void rge_tick(void *); 102*4bf8ce03SAdrian Chadd static void rge_link_state(struct rge_softc *); 103*4bf8ce03SAdrian Chadd #if 0 104*4bf8ce03SAdrian Chadd #ifndef SMALL_KERNEL 105*4bf8ce03SAdrian Chadd int rge_wol(struct ifnet *, int); 106*4bf8ce03SAdrian Chadd void rge_wol_power(struct rge_softc *); 107*4bf8ce03SAdrian Chadd #endif 108*4bf8ce03SAdrian Chadd #endif 109*4bf8ce03SAdrian Chadd 110*4bf8ce03SAdrian Chadd struct rge_matchid { 111*4bf8ce03SAdrian Chadd uint16_t vendor; 112*4bf8ce03SAdrian Chadd uint16_t device; 113*4bf8ce03SAdrian Chadd const char *name; 114*4bf8ce03SAdrian Chadd }; 115*4bf8ce03SAdrian Chadd 116*4bf8ce03SAdrian Chadd const struct rge_matchid rge_devices[] = { 117*4bf8ce03SAdrian Chadd { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_E3000, "Killer E3000" }, 118*4bf8ce03SAdrian Chadd { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RTL8125, "RTL8125" }, 119*4bf8ce03SAdrian Chadd { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RTL8126, "RTL8126", }, 120*4bf8ce03SAdrian Chadd { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RTL8127, "RTL8127" }, 121*4bf8ce03SAdrian Chadd { 0, 0, NULL } 122*4bf8ce03SAdrian Chadd }; 123*4bf8ce03SAdrian Chadd 124*4bf8ce03SAdrian Chadd static int 125*4bf8ce03SAdrian Chadd rge_probe(device_t dev) 126*4bf8ce03SAdrian Chadd { 127*4bf8ce03SAdrian Chadd uint16_t vendor, device; 128*4bf8ce03SAdrian Chadd const struct rge_matchid *ri; 129*4bf8ce03SAdrian Chadd 130*4bf8ce03SAdrian Chadd vendor = pci_get_vendor(dev); 131*4bf8ce03SAdrian Chadd device = pci_get_device(dev); 132*4bf8ce03SAdrian Chadd 133*4bf8ce03SAdrian Chadd for (ri = rge_devices; ri->name != NULL; ri++) { 134*4bf8ce03SAdrian Chadd if ((vendor == ri->vendor) && (device == ri->device)) { 135*4bf8ce03SAdrian Chadd device_set_desc(dev, ri->name); 136*4bf8ce03SAdrian Chadd return (BUS_PROBE_DEFAULT); 137*4bf8ce03SAdrian Chadd } 138*4bf8ce03SAdrian Chadd } 139*4bf8ce03SAdrian Chadd 140*4bf8ce03SAdrian Chadd return (ENXIO); 141*4bf8ce03SAdrian Chadd } 142*4bf8ce03SAdrian Chadd 143*4bf8ce03SAdrian Chadd static void 144*4bf8ce03SAdrian Chadd rge_attach_if(struct rge_softc *sc, const char *eaddr) 145*4bf8ce03SAdrian Chadd { 146*4bf8ce03SAdrian Chadd if_initname(sc->sc_ifp, device_get_name(sc->sc_dev), 147*4bf8ce03SAdrian Chadd device_get_unit(sc->sc_dev)); 148*4bf8ce03SAdrian Chadd if_setdev(sc->sc_ifp, sc->sc_dev); 149*4bf8ce03SAdrian Chadd if_setinitfn(sc->sc_ifp, rge_init_if); 150*4bf8ce03SAdrian Chadd if_setsoftc(sc->sc_ifp, sc); 151*4bf8ce03SAdrian Chadd if_setflags(sc->sc_ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); 152*4bf8ce03SAdrian Chadd if_setioctlfn(sc->sc_ifp, rge_ioctl); 153*4bf8ce03SAdrian Chadd if_settransmitfn(sc->sc_ifp, rge_transmit_if); 154*4bf8ce03SAdrian Chadd if_setqflushfn(sc->sc_ifp, rge_qflush_if); 155*4bf8ce03SAdrian Chadd 156*4bf8ce03SAdrian Chadd /* Set offload as appropriate */ 157*4bf8ce03SAdrian Chadd if_sethwassist(sc->sc_ifp, CSUM_IP | CSUM_TCP | CSUM_UDP); 158*4bf8ce03SAdrian Chadd if_setcapabilities(sc->sc_ifp, IFCAP_HWCSUM); 159*4bf8ce03SAdrian Chadd if_setcapenable(sc->sc_ifp, if_getcapabilities(sc->sc_ifp)); 160*4bf8ce03SAdrian Chadd 161*4bf8ce03SAdrian Chadd /* TODO: set WOL */ 162*4bf8ce03SAdrian Chadd 163*4bf8ce03SAdrian Chadd /* Attach interface */ 164*4bf8ce03SAdrian Chadd ether_ifattach(sc->sc_ifp, eaddr); 165*4bf8ce03SAdrian Chadd sc->sc_ether_attached = true; 166*4bf8ce03SAdrian Chadd 167*4bf8ce03SAdrian Chadd /* post ether_ifattach() bits */ 168*4bf8ce03SAdrian Chadd 169*4bf8ce03SAdrian Chadd /* VLAN capabilities */ 170*4bf8ce03SAdrian Chadd if_setcapabilitiesbit(sc->sc_ifp, IFCAP_VLAN_MTU | 171*4bf8ce03SAdrian Chadd IFCAP_VLAN_HWTAGGING, 0); 172*4bf8ce03SAdrian Chadd if_setcapabilitiesbit(sc->sc_ifp, IFCAP_VLAN_HWCSUM, 0); 173*4bf8ce03SAdrian Chadd if_setcapenable(sc->sc_ifp, if_getcapabilities(sc->sc_ifp)); 174*4bf8ce03SAdrian Chadd 175*4bf8ce03SAdrian Chadd if_setifheaderlen(sc->sc_ifp, sizeof(struct ether_vlan_header)); 176*4bf8ce03SAdrian Chadd 177*4bf8ce03SAdrian Chadd /* TODO: is this needed for iftransmit? */ 178*4bf8ce03SAdrian Chadd if_setsendqlen(sc->sc_ifp, RGE_TX_LIST_CNT - 1); 179*4bf8ce03SAdrian Chadd if_setsendqready(sc->sc_ifp); 180*4bf8ce03SAdrian Chadd } 181*4bf8ce03SAdrian Chadd 182*4bf8ce03SAdrian Chadd static int 183*4bf8ce03SAdrian Chadd rge_attach(device_t dev) 184*4bf8ce03SAdrian Chadd { 185*4bf8ce03SAdrian Chadd uint8_t eaddr[ETHER_ADDR_LEN]; 186*4bf8ce03SAdrian Chadd struct rge_softc *sc; 187*4bf8ce03SAdrian Chadd struct rge_queues *q; 188*4bf8ce03SAdrian Chadd uint32_t hwrev, reg; 189*4bf8ce03SAdrian Chadd int i, rid; 190*4bf8ce03SAdrian Chadd int error; 191*4bf8ce03SAdrian Chadd int msic; 192*4bf8ce03SAdrian Chadd 193*4bf8ce03SAdrian Chadd sc = device_get_softc(dev); 194*4bf8ce03SAdrian Chadd sc->sc_dev = dev; 195*4bf8ce03SAdrian Chadd sc->sc_ifp = if_gethandle(IFT_ETHER); 196*4bf8ce03SAdrian Chadd mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 197*4bf8ce03SAdrian Chadd MTX_DEF); 198*4bf8ce03SAdrian Chadd 199*4bf8ce03SAdrian Chadd /* Enable bus mastering */ 200*4bf8ce03SAdrian Chadd pci_enable_busmaster(dev); 201*4bf8ce03SAdrian Chadd 202*4bf8ce03SAdrian Chadd /* 203*4bf8ce03SAdrian Chadd * Map control/status registers. 204*4bf8ce03SAdrian Chadd */ 205*4bf8ce03SAdrian Chadd 206*4bf8ce03SAdrian Chadd /* 207*4bf8ce03SAdrian Chadd * The openbsd driver (and my E3000 NIC) handle registering three 208*4bf8ce03SAdrian Chadd * kinds of BARs - a 64 bit MMIO BAR, a 32 bit MMIO BAR, and then 209*4bf8ce03SAdrian Chadd * a legacy IO port BAR. 210*4bf8ce03SAdrian Chadd * 211*4bf8ce03SAdrian Chadd * To simplify bring-up, I'm going to request resources for the first 212*4bf8ce03SAdrian Chadd * MMIO BAR (BAR2) which should be a 32 bit BAR. 213*4bf8ce03SAdrian Chadd */ 214*4bf8ce03SAdrian Chadd rid = PCIR_BAR(2); 215*4bf8ce03SAdrian Chadd sc->sc_bres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 216*4bf8ce03SAdrian Chadd RF_ACTIVE); 217*4bf8ce03SAdrian Chadd if (sc->sc_bres == NULL) { 218*4bf8ce03SAdrian Chadd RGE_PRINT_ERROR(sc, 219*4bf8ce03SAdrian Chadd "Unable to allocate bus resource: memory\n"); 220*4bf8ce03SAdrian Chadd goto fail; 221*4bf8ce03SAdrian Chadd } 222*4bf8ce03SAdrian Chadd sc->rge_bhandle = rman_get_bushandle(sc->sc_bres); 223*4bf8ce03SAdrian Chadd sc->rge_btag = rman_get_bustag(sc->sc_bres); 224*4bf8ce03SAdrian Chadd sc->rge_bsize = rman_get_size(sc->sc_bres); 225*4bf8ce03SAdrian Chadd 226*4bf8ce03SAdrian Chadd q = malloc(sizeof(struct rge_queues), M_DEVBUF, M_NOWAIT | M_ZERO); 227*4bf8ce03SAdrian Chadd if (q == NULL) { 228*4bf8ce03SAdrian Chadd RGE_PRINT_ERROR(sc, "Unable to malloc rge_queues memory\n"); 229*4bf8ce03SAdrian Chadd goto fail; 230*4bf8ce03SAdrian Chadd } 231*4bf8ce03SAdrian Chadd q->q_sc = sc; 232*4bf8ce03SAdrian Chadd q->q_index = 0; 233*4bf8ce03SAdrian Chadd 234*4bf8ce03SAdrian Chadd sc->sc_queues = q; 235*4bf8ce03SAdrian Chadd sc->sc_nqueues = 1; 236*4bf8ce03SAdrian Chadd 237*4bf8ce03SAdrian Chadd /* Check if PCIe */ 238*4bf8ce03SAdrian Chadd if (pci_find_cap(dev, PCIY_EXPRESS, ®) == 0) { 239*4bf8ce03SAdrian Chadd sc->rge_flags |= RGE_FLAG_PCIE; 240*4bf8ce03SAdrian Chadd sc->sc_expcap = reg; 241*4bf8ce03SAdrian Chadd } 242*4bf8ce03SAdrian Chadd 243*4bf8ce03SAdrian Chadd /* Allocate MSI */ 244*4bf8ce03SAdrian Chadd msic = pci_msi_count(dev); 245*4bf8ce03SAdrian Chadd if (msic == 0) { 246*4bf8ce03SAdrian Chadd RGE_PRINT_ERROR(sc, "%s: only MSI interrupts supported\n", 247*4bf8ce03SAdrian Chadd __func__); 248*4bf8ce03SAdrian Chadd goto fail; 249*4bf8ce03SAdrian Chadd } 250*4bf8ce03SAdrian Chadd 251*4bf8ce03SAdrian Chadd msic = RGE_MSI_MESSAGES; 252*4bf8ce03SAdrian Chadd if (pci_alloc_msi(dev, &msic) != 0) { 253*4bf8ce03SAdrian Chadd RGE_PRINT_ERROR(sc, "%s: failed to allocate MSI\n", 254*4bf8ce03SAdrian Chadd __func__); 255*4bf8ce03SAdrian Chadd goto fail; 256*4bf8ce03SAdrian Chadd } 257*4bf8ce03SAdrian Chadd 258*4bf8ce03SAdrian Chadd sc->rge_flags |= RGE_FLAG_MSI; 259*4bf8ce03SAdrian Chadd 260*4bf8ce03SAdrian Chadd /* We need at least one MSI */ 261*4bf8ce03SAdrian Chadd if (msic < RGE_MSI_MESSAGES) { 262*4bf8ce03SAdrian Chadd RGE_PRINT_ERROR(sc, "%s: didn't allocate enough MSI\n", 263*4bf8ce03SAdrian Chadd __func__); 264*4bf8ce03SAdrian Chadd goto fail; 265*4bf8ce03SAdrian Chadd } 266*4bf8ce03SAdrian Chadd 267*4bf8ce03SAdrian Chadd /* 268*4bf8ce03SAdrian Chadd * Allocate interrupt entries. 269*4bf8ce03SAdrian Chadd */ 270*4bf8ce03SAdrian Chadd for (i = 0, rid = 1; i < RGE_MSI_MESSAGES; i++, rid++) { 271*4bf8ce03SAdrian Chadd sc->sc_irq[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ, 272*4bf8ce03SAdrian Chadd &rid, RF_ACTIVE); 273*4bf8ce03SAdrian Chadd if (sc->sc_irq[i] == NULL) { 274*4bf8ce03SAdrian Chadd RGE_PRINT_ERROR(sc, "%s: couldn't allocate MSI %d", 275*4bf8ce03SAdrian Chadd __func__, rid); 276*4bf8ce03SAdrian Chadd goto fail; 277*4bf8ce03SAdrian Chadd } 278*4bf8ce03SAdrian Chadd } 279*4bf8ce03SAdrian Chadd 280*4bf8ce03SAdrian Chadd /* Hook interrupts */ 281*4bf8ce03SAdrian Chadd for (i = 0; i < RGE_MSI_MESSAGES; i++) { 282*4bf8ce03SAdrian Chadd error = bus_setup_intr(dev, sc->sc_irq[i], 283*4bf8ce03SAdrian Chadd INTR_TYPE_NET | INTR_MPSAFE, NULL, rge_intr_msi, 284*4bf8ce03SAdrian Chadd sc, &sc->sc_ih[i]); 285*4bf8ce03SAdrian Chadd if (error != 0) { 286*4bf8ce03SAdrian Chadd RGE_PRINT_ERROR(sc, 287*4bf8ce03SAdrian Chadd "%s: couldn't setup intr %d (error %d)", __func__, 288*4bf8ce03SAdrian Chadd i, error); 289*4bf8ce03SAdrian Chadd goto fail; 290*4bf8ce03SAdrian Chadd } 291*4bf8ce03SAdrian Chadd } 292*4bf8ce03SAdrian Chadd 293*4bf8ce03SAdrian Chadd /* Allocate top level bus DMA tag */ 294*4bf8ce03SAdrian Chadd error = bus_dma_tag_create(bus_get_dma_tag(dev), 295*4bf8ce03SAdrian Chadd 1, /* alignment */ 296*4bf8ce03SAdrian Chadd 0, /* boundary */ 297*4bf8ce03SAdrian Chadd BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 298*4bf8ce03SAdrian Chadd NULL, NULL, /* filter (unused) */ 299*4bf8ce03SAdrian Chadd BUS_SPACE_MAXADDR, /* maxsize */ 300*4bf8ce03SAdrian Chadd BUS_SPACE_UNRESTRICTED, /* nsegments */ 301*4bf8ce03SAdrian Chadd BUS_SPACE_MAXADDR, /* maxsegsize */ 302*4bf8ce03SAdrian Chadd 0, /* flags */ 303*4bf8ce03SAdrian Chadd NULL, NULL, /* lockfunc, lockarg */ 304*4bf8ce03SAdrian Chadd &sc->sc_dmat); 305*4bf8ce03SAdrian Chadd if (error) { 306*4bf8ce03SAdrian Chadd RGE_PRINT_ERROR(sc, 307*4bf8ce03SAdrian Chadd "couldn't allocate device DMA tag (error %d)\n", error); 308*4bf8ce03SAdrian Chadd goto fail; 309*4bf8ce03SAdrian Chadd } 310*4bf8ce03SAdrian Chadd 311*4bf8ce03SAdrian Chadd /* Allocate TX/RX descriptor and buffer tags */ 312*4bf8ce03SAdrian Chadd error = bus_dma_tag_create(sc->sc_dmat, 313*4bf8ce03SAdrian Chadd RGE_ALIGN, /* alignment */ 314*4bf8ce03SAdrian Chadd 0, /* boundary */ 315*4bf8ce03SAdrian Chadd BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, 316*4bf8ce03SAdrian Chadd NULL, NULL, /* filter (unused) */ 317*4bf8ce03SAdrian Chadd RGE_TX_LIST_SZ, /* maxsize */ 318*4bf8ce03SAdrian Chadd 1, /* nsegments */ 319*4bf8ce03SAdrian Chadd RGE_TX_LIST_SZ, /* maxsegsize */ 320*4bf8ce03SAdrian Chadd 0, /* flags */ 321*4bf8ce03SAdrian Chadd NULL, NULL, /* lockfunc, lockarg */ 322*4bf8ce03SAdrian Chadd &sc->sc_dmat_tx_desc); 323*4bf8ce03SAdrian Chadd if (error) { 324*4bf8ce03SAdrian Chadd RGE_PRINT_ERROR(sc, 325*4bf8ce03SAdrian Chadd "couldn't allocate device TX descriptor " 326*4bf8ce03SAdrian Chadd "DMA tag (error %d)\n", error); 327*4bf8ce03SAdrian Chadd goto fail; 328*4bf8ce03SAdrian Chadd } 329*4bf8ce03SAdrian Chadd 330*4bf8ce03SAdrian Chadd error = bus_dma_tag_create(sc->sc_dmat, 331*4bf8ce03SAdrian Chadd 1, /* alignment */ 332*4bf8ce03SAdrian Chadd 0, /* boundary */ 333*4bf8ce03SAdrian Chadd BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 334*4bf8ce03SAdrian Chadd NULL, NULL, /* filter (unused) */ 335*4bf8ce03SAdrian Chadd RGE_JUMBO_FRAMELEN, /* maxsize */ 336*4bf8ce03SAdrian Chadd RGE_TX_NSEGS, /* nsegments */ 337*4bf8ce03SAdrian Chadd RGE_JUMBO_FRAMELEN, /* maxsegsize */ 338*4bf8ce03SAdrian Chadd 0, /* flags */ 339*4bf8ce03SAdrian Chadd NULL, NULL, /* lockfunc, lockarg */ 340*4bf8ce03SAdrian Chadd &sc->sc_dmat_tx_buf); 341*4bf8ce03SAdrian Chadd if (error) { 342*4bf8ce03SAdrian Chadd RGE_PRINT_ERROR(sc, 343*4bf8ce03SAdrian Chadd "couldn't allocate device TX buffer DMA tag (error %d)\n", 344*4bf8ce03SAdrian Chadd error); 345*4bf8ce03SAdrian Chadd goto fail; 346*4bf8ce03SAdrian Chadd } 347*4bf8ce03SAdrian Chadd 348*4bf8ce03SAdrian Chadd error = bus_dma_tag_create(sc->sc_dmat, 349*4bf8ce03SAdrian Chadd RGE_ALIGN, /* alignment */ 350*4bf8ce03SAdrian Chadd 0, /* boundary */ 351*4bf8ce03SAdrian Chadd BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, 352*4bf8ce03SAdrian Chadd NULL, NULL, /* filter (unused) */ 353*4bf8ce03SAdrian Chadd RGE_RX_LIST_SZ, /* maxsize */ 354*4bf8ce03SAdrian Chadd 1, /* nsegments */ 355*4bf8ce03SAdrian Chadd RGE_RX_LIST_SZ, /* maxsegsize */ 356*4bf8ce03SAdrian Chadd 0, /* flags */ 357*4bf8ce03SAdrian Chadd NULL, NULL, /* lockfunc, lockarg */ 358*4bf8ce03SAdrian Chadd &sc->sc_dmat_rx_desc); 359*4bf8ce03SAdrian Chadd if (error) { 360*4bf8ce03SAdrian Chadd RGE_PRINT_ERROR(sc, 361*4bf8ce03SAdrian Chadd "couldn't allocate device RX descriptor " 362*4bf8ce03SAdrian Chadd "DMA tag (error %d)\n", error); 363*4bf8ce03SAdrian Chadd goto fail; 364*4bf8ce03SAdrian Chadd } 365*4bf8ce03SAdrian Chadd 366*4bf8ce03SAdrian Chadd error = bus_dma_tag_create(sc->sc_dmat, 367*4bf8ce03SAdrian Chadd 1, /* alignment */ 368*4bf8ce03SAdrian Chadd 0, /* boundary */ 369*4bf8ce03SAdrian Chadd BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 370*4bf8ce03SAdrian Chadd NULL, NULL, /* filter (unused) */ 371*4bf8ce03SAdrian Chadd MCLBYTES, /* maxsize */ 372*4bf8ce03SAdrian Chadd 1, /* nsegments */ 373*4bf8ce03SAdrian Chadd MCLBYTES, /* maxsegsize */ 374*4bf8ce03SAdrian Chadd 0, /* flags */ 375*4bf8ce03SAdrian Chadd NULL, NULL, /* lockfunc, lockarg */ 376*4bf8ce03SAdrian Chadd &sc->sc_dmat_rx_buf); 377*4bf8ce03SAdrian Chadd if (error) { 378*4bf8ce03SAdrian Chadd RGE_PRINT_ERROR(sc, 379*4bf8ce03SAdrian Chadd "couldn't allocate device RX buffer DMA tag (error %d)\n", 380*4bf8ce03SAdrian Chadd error); 381*4bf8ce03SAdrian Chadd goto fail; 382*4bf8ce03SAdrian Chadd } 383*4bf8ce03SAdrian Chadd 384*4bf8ce03SAdrian Chadd error = bus_dma_tag_create(sc->sc_dmat, 385*4bf8ce03SAdrian Chadd RGE_STATS_ALIGNMENT, /* alignment */ 386*4bf8ce03SAdrian Chadd 0, /* boundary */ 387*4bf8ce03SAdrian Chadd BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 388*4bf8ce03SAdrian Chadd NULL, NULL, /* filter (unused) */ 389*4bf8ce03SAdrian Chadd RGE_STATS_BUF_SIZE, /* maxsize */ 390*4bf8ce03SAdrian Chadd 1, /* nsegments */ 391*4bf8ce03SAdrian Chadd RGE_STATS_BUF_SIZE, /* maxsegsize */ 392*4bf8ce03SAdrian Chadd 0, /* flags */ 393*4bf8ce03SAdrian Chadd NULL, NULL, /* lockfunc, lockarg */ 394*4bf8ce03SAdrian Chadd &sc->sc_dmat_stats_buf); 395*4bf8ce03SAdrian Chadd if (error) { 396*4bf8ce03SAdrian Chadd RGE_PRINT_ERROR(sc, 397*4bf8ce03SAdrian Chadd "couldn't allocate device RX buffer DMA tag (error %d)\n", 398*4bf8ce03SAdrian Chadd error); 399*4bf8ce03SAdrian Chadd goto fail; 400*4bf8ce03SAdrian Chadd } 401*4bf8ce03SAdrian Chadd 402*4bf8ce03SAdrian Chadd 403*4bf8ce03SAdrian Chadd /* Attach sysctl nodes */ 404*4bf8ce03SAdrian Chadd rge_sysctl_attach(sc); 405*4bf8ce03SAdrian Chadd 406*4bf8ce03SAdrian Chadd /* Determine hardware revision */ 407*4bf8ce03SAdrian Chadd hwrev = RGE_READ_4(sc, RGE_TXCFG) & RGE_TXCFG_HWREV; 408*4bf8ce03SAdrian Chadd switch (hwrev) { 409*4bf8ce03SAdrian Chadd case 0x60900000: 410*4bf8ce03SAdrian Chadd sc->rge_type = MAC_R25; 411*4bf8ce03SAdrian Chadd // device_printf(dev, "RTL8125\n"); 412*4bf8ce03SAdrian Chadd break; 413*4bf8ce03SAdrian Chadd case 0x64100000: 414*4bf8ce03SAdrian Chadd sc->rge_type = MAC_R25B; 415*4bf8ce03SAdrian Chadd // device_printf(dev, "RTL8125B\n"); 416*4bf8ce03SAdrian Chadd break; 417*4bf8ce03SAdrian Chadd case 0x64900000: 418*4bf8ce03SAdrian Chadd sc->rge_type = MAC_R26; 419*4bf8ce03SAdrian Chadd // device_printf(dev, "RTL8126\n"); 420*4bf8ce03SAdrian Chadd break; 421*4bf8ce03SAdrian Chadd case 0x68800000: 422*4bf8ce03SAdrian Chadd sc->rge_type = MAC_R25D; 423*4bf8ce03SAdrian Chadd // device_printf(dev, "RTL8125D\n"); 424*4bf8ce03SAdrian Chadd break; 425*4bf8ce03SAdrian Chadd case 0x6c900000: 426*4bf8ce03SAdrian Chadd sc->rge_type = MAC_R27; 427*4bf8ce03SAdrian Chadd // device_printf(dev, "RTL8127\n"); 428*4bf8ce03SAdrian Chadd break; 429*4bf8ce03SAdrian Chadd default: 430*4bf8ce03SAdrian Chadd RGE_PRINT_ERROR(sc, "unknown version 0x%08x\n", hwrev); 431*4bf8ce03SAdrian Chadd goto fail; 432*4bf8ce03SAdrian Chadd } 433*4bf8ce03SAdrian Chadd 434*4bf8ce03SAdrian Chadd rge_config_imtype(sc, RGE_IMTYPE_SIM); 435*4bf8ce03SAdrian Chadd 436*4bf8ce03SAdrian Chadd /* TODO: disable ASPM/ECPM? */ 437*4bf8ce03SAdrian Chadd 438*4bf8ce03SAdrian Chadd #if 0 439*4bf8ce03SAdrian Chadd /* 440*4bf8ce03SAdrian Chadd * PCI Express check. 441*4bf8ce03SAdrian Chadd */ 442*4bf8ce03SAdrian Chadd if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS, 443*4bf8ce03SAdrian Chadd &offset, NULL)) { 444*4bf8ce03SAdrian Chadd /* Disable PCIe ASPM and ECPM. */ 445*4bf8ce03SAdrian Chadd reg = pci_conf_read(pa->pa_pc, pa->pa_tag, 446*4bf8ce03SAdrian Chadd offset + PCI_PCIE_LCSR); 447*4bf8ce03SAdrian Chadd reg &= ~(PCI_PCIE_LCSR_ASPM_L0S | PCI_PCIE_LCSR_ASPM_L1 | 448*4bf8ce03SAdrian Chadd PCI_PCIE_LCSR_ECPM); 449*4bf8ce03SAdrian Chadd pci_conf_write(pa->pa_pc, pa->pa_tag, offset + PCI_PCIE_LCSR, 450*4bf8ce03SAdrian Chadd reg); 451*4bf8ce03SAdrian Chadd } 452*4bf8ce03SAdrian Chadd #endif 453*4bf8ce03SAdrian Chadd 454*4bf8ce03SAdrian Chadd RGE_LOCK(sc); 455*4bf8ce03SAdrian Chadd if (rge_chipinit(sc)) { 456*4bf8ce03SAdrian Chadd RGE_UNLOCK(sc); 457*4bf8ce03SAdrian Chadd goto fail; 458*4bf8ce03SAdrian Chadd } 459*4bf8ce03SAdrian Chadd 460*4bf8ce03SAdrian Chadd rge_get_macaddr(sc, eaddr); 461*4bf8ce03SAdrian Chadd RGE_UNLOCK(sc); 462*4bf8ce03SAdrian Chadd 463*4bf8ce03SAdrian Chadd if (rge_allocmem(sc)) 464*4bf8ce03SAdrian Chadd goto fail; 465*4bf8ce03SAdrian Chadd if (rge_alloc_stats_mem(sc)) 466*4bf8ce03SAdrian Chadd goto fail; 467*4bf8ce03SAdrian Chadd 468*4bf8ce03SAdrian Chadd /* Initialize ifmedia structures. */ 469*4bf8ce03SAdrian Chadd ifmedia_init(&sc->sc_media, IFM_IMASK, rge_ifmedia_upd, 470*4bf8ce03SAdrian Chadd rge_ifmedia_sts); 471*4bf8ce03SAdrian Chadd rge_add_media_types(sc); 472*4bf8ce03SAdrian Chadd ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL); 473*4bf8ce03SAdrian Chadd ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO); 474*4bf8ce03SAdrian Chadd sc->sc_media.ifm_media = sc->sc_media.ifm_cur->ifm_media; 475*4bf8ce03SAdrian Chadd 476*4bf8ce03SAdrian Chadd rge_attach_if(sc, eaddr); 477*4bf8ce03SAdrian Chadd 478*4bf8ce03SAdrian Chadd /* 479*4bf8ce03SAdrian Chadd * TODO: technically should be per txq but we only support 480*4bf8ce03SAdrian Chadd * one TXQ at the moment. 481*4bf8ce03SAdrian Chadd */ 482*4bf8ce03SAdrian Chadd mbufq_init(&sc->sc_txq, RGE_TX_LIST_CNT); 483*4bf8ce03SAdrian Chadd 484*4bf8ce03SAdrian Chadd snprintf(sc->sc_tq_name, sizeof(sc->sc_tq_name), 485*4bf8ce03SAdrian Chadd "%s taskq", device_get_nameunit(sc->sc_dev)); 486*4bf8ce03SAdrian Chadd snprintf(sc->sc_tq_thr_name, sizeof(sc->sc_tq_thr_name), 487*4bf8ce03SAdrian Chadd "%s taskq thread", device_get_nameunit(sc->sc_dev)); 488*4bf8ce03SAdrian Chadd 489*4bf8ce03SAdrian Chadd sc->sc_tq = taskqueue_create(sc->sc_tq_name, M_NOWAIT, 490*4bf8ce03SAdrian Chadd taskqueue_thread_enqueue, &sc->sc_tq); 491*4bf8ce03SAdrian Chadd taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s", 492*4bf8ce03SAdrian Chadd sc->sc_tq_thr_name); 493*4bf8ce03SAdrian Chadd 494*4bf8ce03SAdrian Chadd TASK_INIT(&sc->sc_tx_task, 0, rge_tx_task, sc); 495*4bf8ce03SAdrian Chadd 496*4bf8ce03SAdrian Chadd callout_init_mtx(&sc->sc_timeout, &sc->sc_mtx, 0); 497*4bf8ce03SAdrian Chadd 498*4bf8ce03SAdrian Chadd return (0); 499*4bf8ce03SAdrian Chadd fail: 500*4bf8ce03SAdrian Chadd rge_detach(dev); 501*4bf8ce03SAdrian Chadd return (ENXIO); 502*4bf8ce03SAdrian Chadd } 503*4bf8ce03SAdrian Chadd 504*4bf8ce03SAdrian Chadd /** 505*4bf8ce03SAdrian Chadd * @brief flush the mbufq queue 506*4bf8ce03SAdrian Chadd * 507*4bf8ce03SAdrian Chadd * Again this should likely be per-TXQ. 508*4bf8ce03SAdrian Chadd * 509*4bf8ce03SAdrian Chadd * This should be called with the driver lock held. 510*4bf8ce03SAdrian Chadd */ 511*4bf8ce03SAdrian Chadd static void 512*4bf8ce03SAdrian Chadd rge_txq_flush_mbufs(struct rge_softc *sc) 513*4bf8ce03SAdrian Chadd { 514*4bf8ce03SAdrian Chadd struct mbuf *m; 515*4bf8ce03SAdrian Chadd int ntx = 0; 516*4bf8ce03SAdrian Chadd 517*4bf8ce03SAdrian Chadd RGE_ASSERT_LOCKED(sc); 518*4bf8ce03SAdrian Chadd 519*4bf8ce03SAdrian Chadd while ((m = mbufq_dequeue(&sc->sc_txq)) != NULL) { 520*4bf8ce03SAdrian Chadd m_freem(m); 521*4bf8ce03SAdrian Chadd ntx++; 522*4bf8ce03SAdrian Chadd } 523*4bf8ce03SAdrian Chadd 524*4bf8ce03SAdrian Chadd RGE_DPRINTF(sc, RGE_DEBUG_XMIT, "%s: %d frames flushed\n", __func__, 525*4bf8ce03SAdrian Chadd ntx); 526*4bf8ce03SAdrian Chadd } 527*4bf8ce03SAdrian Chadd 528*4bf8ce03SAdrian Chadd static int 529*4bf8ce03SAdrian Chadd rge_detach(device_t dev) 530*4bf8ce03SAdrian Chadd { 531*4bf8ce03SAdrian Chadd struct rge_softc *sc = device_get_softc(dev); 532*4bf8ce03SAdrian Chadd int i, rid; 533*4bf8ce03SAdrian Chadd 534*4bf8ce03SAdrian Chadd /* global flag, detaching */ 535*4bf8ce03SAdrian Chadd RGE_LOCK(sc); 536*4bf8ce03SAdrian Chadd sc->sc_stopped = true; 537*4bf8ce03SAdrian Chadd sc->sc_detaching = true; 538*4bf8ce03SAdrian Chadd RGE_UNLOCK(sc); 539*4bf8ce03SAdrian Chadd 540*4bf8ce03SAdrian Chadd /* stop/drain network interface */ 541*4bf8ce03SAdrian Chadd callout_drain(&sc->sc_timeout); 542*4bf8ce03SAdrian Chadd 543*4bf8ce03SAdrian Chadd /* Make sure TX task isn't running */ 544*4bf8ce03SAdrian Chadd if (sc->sc_tq != NULL) { 545*4bf8ce03SAdrian Chadd while (taskqueue_cancel(sc->sc_tq, &sc->sc_tx_task, NULL) != 0) 546*4bf8ce03SAdrian Chadd taskqueue_drain(sc->sc_tq, &sc->sc_tx_task); 547*4bf8ce03SAdrian Chadd } 548*4bf8ce03SAdrian Chadd 549*4bf8ce03SAdrian Chadd RGE_LOCK(sc); 550*4bf8ce03SAdrian Chadd callout_stop(&sc->sc_timeout); 551*4bf8ce03SAdrian Chadd 552*4bf8ce03SAdrian Chadd /* stop NIC / DMA */ 553*4bf8ce03SAdrian Chadd rge_stop_locked(sc); 554*4bf8ce03SAdrian Chadd 555*4bf8ce03SAdrian Chadd /* TODO: wait for completion */ 556*4bf8ce03SAdrian Chadd 557*4bf8ce03SAdrian Chadd /* Free pending TX mbufs */ 558*4bf8ce03SAdrian Chadd rge_txq_flush_mbufs(sc); 559*4bf8ce03SAdrian Chadd 560*4bf8ce03SAdrian Chadd RGE_UNLOCK(sc); 561*4bf8ce03SAdrian Chadd 562*4bf8ce03SAdrian Chadd /* Free taskqueue */ 563*4bf8ce03SAdrian Chadd if (sc->sc_tq != NULL) { 564*4bf8ce03SAdrian Chadd taskqueue_free(sc->sc_tq); 565*4bf8ce03SAdrian Chadd sc->sc_tq = NULL; 566*4bf8ce03SAdrian Chadd } 567*4bf8ce03SAdrian Chadd 568*4bf8ce03SAdrian Chadd /* Free descriptor memory */ 569*4bf8ce03SAdrian Chadd RGE_DPRINTF(sc, RGE_DEBUG_SETUP, "%s: freemem\n", __func__); 570*4bf8ce03SAdrian Chadd rge_freemem(sc); 571*4bf8ce03SAdrian Chadd rge_free_stats_mem(sc); 572*4bf8ce03SAdrian Chadd 573*4bf8ce03SAdrian Chadd if (sc->sc_ifp) { 574*4bf8ce03SAdrian Chadd RGE_DPRINTF(sc, RGE_DEBUG_SETUP, "%s: ifdetach/if_free\n", 575*4bf8ce03SAdrian Chadd __func__); 576*4bf8ce03SAdrian Chadd if (sc->sc_ether_attached) 577*4bf8ce03SAdrian Chadd ether_ifdetach(sc->sc_ifp); 578*4bf8ce03SAdrian Chadd if_free(sc->sc_ifp); 579*4bf8ce03SAdrian Chadd } 580*4bf8ce03SAdrian Chadd 581*4bf8ce03SAdrian Chadd RGE_DPRINTF(sc, RGE_DEBUG_SETUP, "%s: sc_dmat_tx_desc\n", __func__); 582*4bf8ce03SAdrian Chadd if (sc->sc_dmat_tx_desc) 583*4bf8ce03SAdrian Chadd bus_dma_tag_destroy(sc->sc_dmat_tx_desc); 584*4bf8ce03SAdrian Chadd RGE_DPRINTF(sc, RGE_DEBUG_SETUP, "%s: sc_dmat_tx_buf\n", __func__); 585*4bf8ce03SAdrian Chadd if (sc->sc_dmat_tx_buf) 586*4bf8ce03SAdrian Chadd bus_dma_tag_destroy(sc->sc_dmat_tx_buf); 587*4bf8ce03SAdrian Chadd RGE_DPRINTF(sc, RGE_DEBUG_SETUP, "%s: sc_dmat_rx_desc\n", __func__); 588*4bf8ce03SAdrian Chadd if (sc->sc_dmat_rx_desc) 589*4bf8ce03SAdrian Chadd bus_dma_tag_destroy(sc->sc_dmat_rx_desc); 590*4bf8ce03SAdrian Chadd RGE_DPRINTF(sc, RGE_DEBUG_SETUP, "%s: sc_dmat_rx_buf\n", __func__); 591*4bf8ce03SAdrian Chadd if (sc->sc_dmat_rx_buf) 592*4bf8ce03SAdrian Chadd bus_dma_tag_destroy(sc->sc_dmat_rx_buf); 593*4bf8ce03SAdrian Chadd RGE_DPRINTF(sc, RGE_DEBUG_SETUP, "%s: sc_dmat_stats_buf\n", __func__); 594*4bf8ce03SAdrian Chadd if (sc->sc_dmat_stats_buf) 595*4bf8ce03SAdrian Chadd bus_dma_tag_destroy(sc->sc_dmat_stats_buf); 596*4bf8ce03SAdrian Chadd RGE_DPRINTF(sc, RGE_DEBUG_SETUP, "%s: sc_dmat\n", __func__); 597*4bf8ce03SAdrian Chadd if (sc->sc_dmat) 598*4bf8ce03SAdrian Chadd bus_dma_tag_destroy(sc->sc_dmat); 599*4bf8ce03SAdrian Chadd 600*4bf8ce03SAdrian Chadd /* Teardown interrupts */ 601*4bf8ce03SAdrian Chadd for (i = 0; i < RGE_MSI_MESSAGES; i++) { 602*4bf8ce03SAdrian Chadd if (sc->sc_ih[i] != NULL) { 603*4bf8ce03SAdrian Chadd bus_teardown_intr(sc->sc_dev, sc->sc_irq[i], 604*4bf8ce03SAdrian Chadd sc->sc_ih[i]); 605*4bf8ce03SAdrian Chadd sc->sc_ih[i] = NULL; 606*4bf8ce03SAdrian Chadd } 607*4bf8ce03SAdrian Chadd } 608*4bf8ce03SAdrian Chadd 609*4bf8ce03SAdrian Chadd /* Free interrupt resources */ 610*4bf8ce03SAdrian Chadd for (i = 0, rid = 1; i < RGE_MSI_MESSAGES; i++, rid++) { 611*4bf8ce03SAdrian Chadd if (sc->sc_irq[i] != NULL) { 612*4bf8ce03SAdrian Chadd bus_release_resource(sc->sc_dev, SYS_RES_IRQ, 613*4bf8ce03SAdrian Chadd rid, sc->sc_irq[i]); 614*4bf8ce03SAdrian Chadd sc->sc_irq[i] = NULL; 615*4bf8ce03SAdrian Chadd } 616*4bf8ce03SAdrian Chadd } 617*4bf8ce03SAdrian Chadd 618*4bf8ce03SAdrian Chadd /* Free MSI allocation */ 619*4bf8ce03SAdrian Chadd if (sc->rge_flags & RGE_FLAG_MSI) 620*4bf8ce03SAdrian Chadd pci_release_msi(dev); 621*4bf8ce03SAdrian Chadd 622*4bf8ce03SAdrian Chadd if (sc->sc_bres) { 623*4bf8ce03SAdrian Chadd RGE_DPRINTF(sc, RGE_DEBUG_SETUP, "%s: release mmio\n", 624*4bf8ce03SAdrian Chadd __func__); 625*4bf8ce03SAdrian Chadd bus_release_resource(dev, SYS_RES_MEMORY, 626*4bf8ce03SAdrian Chadd rman_get_rid(sc->sc_bres), sc->sc_bres); 627*4bf8ce03SAdrian Chadd sc->sc_bres = NULL; 628*4bf8ce03SAdrian Chadd } 629*4bf8ce03SAdrian Chadd 630*4bf8ce03SAdrian Chadd if (sc->sc_queues) { 631*4bf8ce03SAdrian Chadd free(sc->sc_queues, M_DEVBUF); 632*4bf8ce03SAdrian Chadd sc->sc_queues = NULL; 633*4bf8ce03SAdrian Chadd } 634*4bf8ce03SAdrian Chadd 635*4bf8ce03SAdrian Chadd mtx_destroy(&sc->sc_mtx); 636*4bf8ce03SAdrian Chadd 637*4bf8ce03SAdrian Chadd return (0); 638*4bf8ce03SAdrian Chadd } 639*4bf8ce03SAdrian Chadd 640*4bf8ce03SAdrian Chadd #if 0 641*4bf8ce03SAdrian Chadd 642*4bf8ce03SAdrian Chadd int 643*4bf8ce03SAdrian Chadd rge_activate(struct device *self, int act) 644*4bf8ce03SAdrian Chadd { 645*4bf8ce03SAdrian Chadd #ifndef SMALL_KERNEL 646*4bf8ce03SAdrian Chadd struct rge_softc *sc = (struct rge_softc *)self; 647*4bf8ce03SAdrian Chadd #endif 648*4bf8ce03SAdrian Chadd 649*4bf8ce03SAdrian Chadd switch (act) { 650*4bf8ce03SAdrian Chadd case DVACT_POWERDOWN: 651*4bf8ce03SAdrian Chadd #ifndef SMALL_KERNEL 652*4bf8ce03SAdrian Chadd rge_wol_power(sc); 653*4bf8ce03SAdrian Chadd #endif 654*4bf8ce03SAdrian Chadd break; 655*4bf8ce03SAdrian Chadd } 656*4bf8ce03SAdrian Chadd return (0); 657*4bf8ce03SAdrian Chadd } 658*4bf8ce03SAdrian Chadd #endif 659*4bf8ce03SAdrian Chadd 660*4bf8ce03SAdrian Chadd static void 661*4bf8ce03SAdrian Chadd rge_intr_msi(void *arg) 662*4bf8ce03SAdrian Chadd { 663*4bf8ce03SAdrian Chadd struct mbufq rx_mq; 664*4bf8ce03SAdrian Chadd struct epoch_tracker et; 665*4bf8ce03SAdrian Chadd struct mbuf *m; 666*4bf8ce03SAdrian Chadd struct rge_softc *sc = arg; 667*4bf8ce03SAdrian Chadd struct rge_queues *q = sc->sc_queues; 668*4bf8ce03SAdrian Chadd uint32_t status; 669*4bf8ce03SAdrian Chadd int claimed = 0, rv; 670*4bf8ce03SAdrian Chadd 671*4bf8ce03SAdrian Chadd sc->sc_drv_stats.intr_cnt++; 672*4bf8ce03SAdrian Chadd 673*4bf8ce03SAdrian Chadd mbufq_init(&rx_mq, RGE_RX_LIST_CNT); 674*4bf8ce03SAdrian Chadd 675*4bf8ce03SAdrian Chadd if ((if_getdrvflags(sc->sc_ifp) & IFF_DRV_RUNNING) == 0) 676*4bf8ce03SAdrian Chadd return; 677*4bf8ce03SAdrian Chadd 678*4bf8ce03SAdrian Chadd RGE_LOCK(sc); 679*4bf8ce03SAdrian Chadd 680*4bf8ce03SAdrian Chadd if (sc->sc_suspended || sc->sc_stopped || sc->sc_detaching) { 681*4bf8ce03SAdrian Chadd RGE_UNLOCK(sc); 682*4bf8ce03SAdrian Chadd return; 683*4bf8ce03SAdrian Chadd } 684*4bf8ce03SAdrian Chadd 685*4bf8ce03SAdrian Chadd /* Disable interrupts. */ 686*4bf8ce03SAdrian Chadd RGE_WRITE_4(sc, RGE_IMR, 0); 687*4bf8ce03SAdrian Chadd 688*4bf8ce03SAdrian Chadd if (!(sc->rge_flags & RGE_FLAG_MSI)) { 689*4bf8ce03SAdrian Chadd if ((RGE_READ_4(sc, RGE_ISR) & sc->rge_intrs) == 0) 690*4bf8ce03SAdrian Chadd goto done; 691*4bf8ce03SAdrian Chadd } 692*4bf8ce03SAdrian Chadd 693*4bf8ce03SAdrian Chadd status = RGE_READ_4(sc, RGE_ISR); 694*4bf8ce03SAdrian Chadd if (status) 695*4bf8ce03SAdrian Chadd RGE_WRITE_4(sc, RGE_ISR, status); 696*4bf8ce03SAdrian Chadd 697*4bf8ce03SAdrian Chadd if (status & RGE_ISR_PCS_TIMEOUT) 698*4bf8ce03SAdrian Chadd claimed = 1; 699*4bf8ce03SAdrian Chadd 700*4bf8ce03SAdrian Chadd rv = 0; 701*4bf8ce03SAdrian Chadd if (status & sc->rge_intrs) { 702*4bf8ce03SAdrian Chadd 703*4bf8ce03SAdrian Chadd (void) q; 704*4bf8ce03SAdrian Chadd rv |= rge_rxeof(q, &rx_mq); 705*4bf8ce03SAdrian Chadd rv |= rge_txeof(q); 706*4bf8ce03SAdrian Chadd 707*4bf8ce03SAdrian Chadd if (status & RGE_ISR_SYSTEM_ERR) { 708*4bf8ce03SAdrian Chadd sc->sc_drv_stats.intr_system_err_cnt++; 709*4bf8ce03SAdrian Chadd rge_init_locked(sc); 710*4bf8ce03SAdrian Chadd } 711*4bf8ce03SAdrian Chadd claimed = 1; 712*4bf8ce03SAdrian Chadd } 713*4bf8ce03SAdrian Chadd 714*4bf8ce03SAdrian Chadd if (sc->rge_timerintr) { 715*4bf8ce03SAdrian Chadd if (!rv) { 716*4bf8ce03SAdrian Chadd /* 717*4bf8ce03SAdrian Chadd * Nothing needs to be processed, fallback 718*4bf8ce03SAdrian Chadd * to use TX/RX interrupts. 719*4bf8ce03SAdrian Chadd */ 720*4bf8ce03SAdrian Chadd rge_setup_intr(sc, RGE_IMTYPE_NONE); 721*4bf8ce03SAdrian Chadd 722*4bf8ce03SAdrian Chadd /* 723*4bf8ce03SAdrian Chadd * Recollect, mainly to avoid the possible 724*4bf8ce03SAdrian Chadd * race introduced by changing interrupt 725*4bf8ce03SAdrian Chadd * masks. 726*4bf8ce03SAdrian Chadd */ 727*4bf8ce03SAdrian Chadd rge_rxeof(q, &rx_mq); 728*4bf8ce03SAdrian Chadd rge_txeof(q); 729*4bf8ce03SAdrian Chadd } else 730*4bf8ce03SAdrian Chadd RGE_WRITE_4(sc, RGE_TIMERCNT, 1); 731*4bf8ce03SAdrian Chadd } else if (rv) { 732*4bf8ce03SAdrian Chadd /* 733*4bf8ce03SAdrian Chadd * Assume that using simulated interrupt moderation 734*4bf8ce03SAdrian Chadd * (hardware timer based) could reduce the interrupt 735*4bf8ce03SAdrian Chadd * rate. 736*4bf8ce03SAdrian Chadd */ 737*4bf8ce03SAdrian Chadd rge_setup_intr(sc, RGE_IMTYPE_SIM); 738*4bf8ce03SAdrian Chadd } 739*4bf8ce03SAdrian Chadd 740*4bf8ce03SAdrian Chadd RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs); 741*4bf8ce03SAdrian Chadd 742*4bf8ce03SAdrian Chadd done: 743*4bf8ce03SAdrian Chadd RGE_UNLOCK(sc); 744*4bf8ce03SAdrian Chadd 745*4bf8ce03SAdrian Chadd NET_EPOCH_ENTER(et); 746*4bf8ce03SAdrian Chadd /* Handle any RX frames, outside of the driver lock */ 747*4bf8ce03SAdrian Chadd while ((m = mbufq_dequeue(&rx_mq)) != NULL) { 748*4bf8ce03SAdrian Chadd sc->sc_drv_stats.recv_input_cnt++; 749*4bf8ce03SAdrian Chadd if_input(sc->sc_ifp, m); 750*4bf8ce03SAdrian Chadd } 751*4bf8ce03SAdrian Chadd NET_EPOCH_EXIT(et); 752*4bf8ce03SAdrian Chadd 753*4bf8ce03SAdrian Chadd (void) claimed; 754*4bf8ce03SAdrian Chadd } 755*4bf8ce03SAdrian Chadd 756*4bf8ce03SAdrian Chadd static inline void 757*4bf8ce03SAdrian Chadd rge_tx_list_sync(struct rge_softc *sc, struct rge_queues *q, 758*4bf8ce03SAdrian Chadd unsigned int idx, unsigned int len, int ops) 759*4bf8ce03SAdrian Chadd { 760*4bf8ce03SAdrian Chadd bus_dmamap_sync(sc->sc_dmat_tx_desc, q->q_tx.rge_tx_list_map, ops); 761*4bf8ce03SAdrian Chadd } 762*4bf8ce03SAdrian Chadd 763*4bf8ce03SAdrian Chadd /** 764*4bf8ce03SAdrian Chadd * @brief Queue the given mbuf at the given TX slot index for transmit. 765*4bf8ce03SAdrian Chadd * 766*4bf8ce03SAdrian Chadd * If the frame couldn't be enqueued then 0 is returned. 767*4bf8ce03SAdrian Chadd * The caller needs to handle that and free/re-queue the mbuf as required. 768*4bf8ce03SAdrian Chadd * 769*4bf8ce03SAdrian Chadd * Note that this doesn't actually kick-start the transmit itself; 770*4bf8ce03SAdrian Chadd * see rge_txstart() for the register to poke to start transmit. 771*4bf8ce03SAdrian Chadd * 772*4bf8ce03SAdrian Chadd * This must be called with the driver lock held. 773*4bf8ce03SAdrian Chadd * 774*4bf8ce03SAdrian Chadd * @param sc driver softc 775*4bf8ce03SAdrian Chadd * @param q TX queue ring 776*4bf8ce03SAdrian Chadd * @param m mbuf to enqueue 777*4bf8ce03SAdrian Chadd * @returns if the mbuf is enqueued, it's consumed here and the number of 778*4bf8ce03SAdrian Chadd * TX descriptors used is returned; if there's no space then 0 is 779*4bf8ce03SAdrian Chadd * returned; if the mbuf couldn't be defragged and the caller 780*4bf8ce03SAdrian Chadd * should free it then -1 is returned. 781*4bf8ce03SAdrian Chadd */ 782*4bf8ce03SAdrian Chadd static int 783*4bf8ce03SAdrian Chadd rge_encap(struct rge_softc *sc, struct rge_queues *q, struct mbuf *m, int idx) 784*4bf8ce03SAdrian Chadd { 785*4bf8ce03SAdrian Chadd struct rge_tx_desc *d = NULL; 786*4bf8ce03SAdrian Chadd struct rge_txq *txq; 787*4bf8ce03SAdrian Chadd bus_dmamap_t txmap; 788*4bf8ce03SAdrian Chadd uint32_t cmdsts, cflags = 0; 789*4bf8ce03SAdrian Chadd int cur, error, i; 790*4bf8ce03SAdrian Chadd bus_dma_segment_t seg[RGE_TX_NSEGS]; 791*4bf8ce03SAdrian Chadd int nsegs; 792*4bf8ce03SAdrian Chadd 793*4bf8ce03SAdrian Chadd RGE_ASSERT_LOCKED(sc); 794*4bf8ce03SAdrian Chadd 795*4bf8ce03SAdrian Chadd txq = &q->q_tx.rge_txq[idx]; 796*4bf8ce03SAdrian Chadd txmap = txq->txq_dmamap; 797*4bf8ce03SAdrian Chadd 798*4bf8ce03SAdrian Chadd sc->sc_drv_stats.tx_encap_cnt++; 799*4bf8ce03SAdrian Chadd 800*4bf8ce03SAdrian Chadd nsegs = RGE_TX_NSEGS; 801*4bf8ce03SAdrian Chadd error = bus_dmamap_load_mbuf_sg(sc->sc_dmat_tx_buf, txmap, m, 802*4bf8ce03SAdrian Chadd seg, &nsegs, BUS_DMA_NOWAIT); 803*4bf8ce03SAdrian Chadd 804*4bf8ce03SAdrian Chadd switch (error) { 805*4bf8ce03SAdrian Chadd case 0: 806*4bf8ce03SAdrian Chadd break; 807*4bf8ce03SAdrian Chadd case EFBIG: /* mbuf chain is too fragmented */ 808*4bf8ce03SAdrian Chadd sc->sc_drv_stats.tx_encap_refrag_cnt++; 809*4bf8ce03SAdrian Chadd nsegs = RGE_TX_NSEGS; 810*4bf8ce03SAdrian Chadd if (m_defrag(m, M_NOWAIT) == 0 && 811*4bf8ce03SAdrian Chadd bus_dmamap_load_mbuf_sg(sc->sc_dmat_tx_buf, txmap, m, 812*4bf8ce03SAdrian Chadd seg, &nsegs, BUS_DMA_NOWAIT) == 0) 813*4bf8ce03SAdrian Chadd break; 814*4bf8ce03SAdrian Chadd /* FALLTHROUGH */ 815*4bf8ce03SAdrian Chadd default: 816*4bf8ce03SAdrian Chadd sc->sc_drv_stats.tx_encap_err_toofrag++; 817*4bf8ce03SAdrian Chadd return (-1); 818*4bf8ce03SAdrian Chadd } 819*4bf8ce03SAdrian Chadd 820*4bf8ce03SAdrian Chadd bus_dmamap_sync(sc->sc_dmat_tx_buf, txmap, BUS_DMASYNC_PREWRITE); 821*4bf8ce03SAdrian Chadd 822*4bf8ce03SAdrian Chadd /* 823*4bf8ce03SAdrian Chadd * Set RGE_TDEXTSTS_IPCSUM if any checksum offloading is requested. 824*4bf8ce03SAdrian Chadd * Otherwise, RGE_TDEXTSTS_TCPCSUM / RGE_TDEXTSTS_UDPCSUM does not 825*4bf8ce03SAdrian Chadd * take affect. 826*4bf8ce03SAdrian Chadd */ 827*4bf8ce03SAdrian Chadd if ((m->m_pkthdr.csum_flags & RGE_CSUM_FEATURES) != 0) { 828*4bf8ce03SAdrian Chadd cflags |= RGE_TDEXTSTS_IPCSUM; 829*4bf8ce03SAdrian Chadd sc->sc_drv_stats.tx_offload_ip_csum_set++; 830*4bf8ce03SAdrian Chadd if (m->m_pkthdr.csum_flags & CSUM_TCP) { 831*4bf8ce03SAdrian Chadd sc->sc_drv_stats.tx_offload_tcp_csum_set++; 832*4bf8ce03SAdrian Chadd cflags |= RGE_TDEXTSTS_TCPCSUM; 833*4bf8ce03SAdrian Chadd } 834*4bf8ce03SAdrian Chadd if (m->m_pkthdr.csum_flags & CSUM_UDP) { 835*4bf8ce03SAdrian Chadd sc->sc_drv_stats.tx_offload_udp_csum_set++; 836*4bf8ce03SAdrian Chadd cflags |= RGE_TDEXTSTS_UDPCSUM; 837*4bf8ce03SAdrian Chadd } 838*4bf8ce03SAdrian Chadd } 839*4bf8ce03SAdrian Chadd 840*4bf8ce03SAdrian Chadd /* Set up hardware VLAN tagging */ 841*4bf8ce03SAdrian Chadd if (m->m_flags & M_VLANTAG) { 842*4bf8ce03SAdrian Chadd sc->sc_drv_stats.tx_offload_vlan_tag_set++; 843*4bf8ce03SAdrian Chadd cflags |= htole16(m->m_pkthdr.ether_vtag) | RGE_TDEXTSTS_VTAG; 844*4bf8ce03SAdrian Chadd } 845*4bf8ce03SAdrian Chadd 846*4bf8ce03SAdrian Chadd cur = idx; 847*4bf8ce03SAdrian Chadd for (i = 1; i < nsegs; i++) { 848*4bf8ce03SAdrian Chadd cur = RGE_NEXT_TX_DESC(cur); 849*4bf8ce03SAdrian Chadd 850*4bf8ce03SAdrian Chadd cmdsts = RGE_TDCMDSTS_OWN; 851*4bf8ce03SAdrian Chadd cmdsts |= seg[i].ds_len; 852*4bf8ce03SAdrian Chadd 853*4bf8ce03SAdrian Chadd if (cur == RGE_TX_LIST_CNT - 1) 854*4bf8ce03SAdrian Chadd cmdsts |= RGE_TDCMDSTS_EOR; 855*4bf8ce03SAdrian Chadd if (i == nsegs - 1) 856*4bf8ce03SAdrian Chadd cmdsts |= RGE_TDCMDSTS_EOF; 857*4bf8ce03SAdrian Chadd 858*4bf8ce03SAdrian Chadd /* 859*4bf8ce03SAdrian Chadd * Note: vendor driver puts wmb() after opts2/extsts, 860*4bf8ce03SAdrian Chadd * before opts1/status. 861*4bf8ce03SAdrian Chadd * 862*4bf8ce03SAdrian Chadd * See the other place I have this comment for more 863*4bf8ce03SAdrian Chadd * information. 864*4bf8ce03SAdrian Chadd */ 865*4bf8ce03SAdrian Chadd d = &q->q_tx.rge_tx_list[cur]; 866*4bf8ce03SAdrian Chadd d->rge_addr = htole64(seg[i].ds_addr); 867*4bf8ce03SAdrian Chadd d->rge_extsts = htole32(cflags); 868*4bf8ce03SAdrian Chadd wmb(); 869*4bf8ce03SAdrian Chadd d->rge_cmdsts = htole32(cmdsts); 870*4bf8ce03SAdrian Chadd } 871*4bf8ce03SAdrian Chadd 872*4bf8ce03SAdrian Chadd /* Update info of TX queue and descriptors. */ 873*4bf8ce03SAdrian Chadd txq->txq_mbuf = m; 874*4bf8ce03SAdrian Chadd txq->txq_descidx = cur; 875*4bf8ce03SAdrian Chadd 876*4bf8ce03SAdrian Chadd cmdsts = RGE_TDCMDSTS_SOF; 877*4bf8ce03SAdrian Chadd cmdsts |= seg[0].ds_len; 878*4bf8ce03SAdrian Chadd 879*4bf8ce03SAdrian Chadd if (idx == RGE_TX_LIST_CNT - 1) 880*4bf8ce03SAdrian Chadd cmdsts |= RGE_TDCMDSTS_EOR; 881*4bf8ce03SAdrian Chadd if (nsegs == 1) 882*4bf8ce03SAdrian Chadd cmdsts |= RGE_TDCMDSTS_EOF; 883*4bf8ce03SAdrian Chadd 884*4bf8ce03SAdrian Chadd /* 885*4bf8ce03SAdrian Chadd * Note: vendor driver puts wmb() after opts2/extsts, 886*4bf8ce03SAdrian Chadd * before opts1/status. 887*4bf8ce03SAdrian Chadd * 888*4bf8ce03SAdrian Chadd * It does this: 889*4bf8ce03SAdrian Chadd * - set rge_addr 890*4bf8ce03SAdrian Chadd * - set extsts 891*4bf8ce03SAdrian Chadd * - wmb 892*4bf8ce03SAdrian Chadd * - set status - at this point it's owned by the hardware 893*4bf8ce03SAdrian Chadd * 894*4bf8ce03SAdrian Chadd */ 895*4bf8ce03SAdrian Chadd d = &q->q_tx.rge_tx_list[idx]; 896*4bf8ce03SAdrian Chadd d->rge_addr = htole64(seg[0].ds_addr); 897*4bf8ce03SAdrian Chadd d->rge_extsts = htole32(cflags); 898*4bf8ce03SAdrian Chadd wmb(); 899*4bf8ce03SAdrian Chadd d->rge_cmdsts = htole32(cmdsts); 900*4bf8ce03SAdrian Chadd wmb(); 901*4bf8ce03SAdrian Chadd 902*4bf8ce03SAdrian Chadd if (cur >= idx) { 903*4bf8ce03SAdrian Chadd rge_tx_list_sync(sc, q, idx, nsegs, 904*4bf8ce03SAdrian Chadd BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 905*4bf8ce03SAdrian Chadd } else { 906*4bf8ce03SAdrian Chadd rge_tx_list_sync(sc, q, idx, RGE_TX_LIST_CNT - idx, 907*4bf8ce03SAdrian Chadd BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 908*4bf8ce03SAdrian Chadd rge_tx_list_sync(sc, q, 0, cur + 1, 909*4bf8ce03SAdrian Chadd BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 910*4bf8ce03SAdrian Chadd } 911*4bf8ce03SAdrian Chadd 912*4bf8ce03SAdrian Chadd /* Transfer ownership of packet to the chip. */ 913*4bf8ce03SAdrian Chadd cmdsts |= RGE_TDCMDSTS_OWN; 914*4bf8ce03SAdrian Chadd rge_tx_list_sync(sc, q, idx, 1, BUS_DMASYNC_POSTWRITE); 915*4bf8ce03SAdrian Chadd d->rge_cmdsts = htole32(cmdsts); 916*4bf8ce03SAdrian Chadd rge_tx_list_sync(sc, q, idx, 1, BUS_DMASYNC_PREWRITE); 917*4bf8ce03SAdrian Chadd wmb(); 918*4bf8ce03SAdrian Chadd 919*4bf8ce03SAdrian Chadd return (nsegs); 920*4bf8ce03SAdrian Chadd } 921*4bf8ce03SAdrian Chadd 922*4bf8ce03SAdrian Chadd static int 923*4bf8ce03SAdrian Chadd rge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 924*4bf8ce03SAdrian Chadd { 925*4bf8ce03SAdrian Chadd struct rge_softc *sc = if_getsoftc(ifp); 926*4bf8ce03SAdrian Chadd struct ifreq *ifr = (struct ifreq *)data; 927*4bf8ce03SAdrian Chadd int error = 0; 928*4bf8ce03SAdrian Chadd 929*4bf8ce03SAdrian Chadd switch (cmd) { 930*4bf8ce03SAdrian Chadd case SIOCSIFMTU: 931*4bf8ce03SAdrian Chadd /* Note: no hardware reinit is required */ 932*4bf8ce03SAdrian Chadd if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > RGE_JUMBO_MTU) { 933*4bf8ce03SAdrian Chadd error = EINVAL; 934*4bf8ce03SAdrian Chadd break; 935*4bf8ce03SAdrian Chadd } 936*4bf8ce03SAdrian Chadd if (if_getmtu(ifp) != ifr->ifr_mtu) 937*4bf8ce03SAdrian Chadd if_setmtu(ifp, ifr->ifr_mtu); 938*4bf8ce03SAdrian Chadd 939*4bf8ce03SAdrian Chadd VLAN_CAPABILITIES(ifp); 940*4bf8ce03SAdrian Chadd break; 941*4bf8ce03SAdrian Chadd 942*4bf8ce03SAdrian Chadd case SIOCSIFFLAGS: 943*4bf8ce03SAdrian Chadd RGE_LOCK(sc); 944*4bf8ce03SAdrian Chadd if ((if_getflags(ifp) & IFF_UP) != 0) { 945*4bf8ce03SAdrian Chadd if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) { 946*4bf8ce03SAdrian Chadd /* 947*4bf8ce03SAdrian Chadd * TODO: handle promisc/iffmulti changing 948*4bf8ce03SAdrian Chadd * without reprogramming everything. 949*4bf8ce03SAdrian Chadd */ 950*4bf8ce03SAdrian Chadd rge_init_locked(sc); 951*4bf8ce03SAdrian Chadd } else { 952*4bf8ce03SAdrian Chadd /* Reinit promisc/multi just in case */ 953*4bf8ce03SAdrian Chadd rge_iff_locked(sc); 954*4bf8ce03SAdrian Chadd } 955*4bf8ce03SAdrian Chadd } else { 956*4bf8ce03SAdrian Chadd if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { 957*4bf8ce03SAdrian Chadd rge_stop_locked(sc); 958*4bf8ce03SAdrian Chadd } 959*4bf8ce03SAdrian Chadd } 960*4bf8ce03SAdrian Chadd RGE_UNLOCK(sc); 961*4bf8ce03SAdrian Chadd break; 962*4bf8ce03SAdrian Chadd case SIOCADDMULTI: 963*4bf8ce03SAdrian Chadd case SIOCDELMULTI: 964*4bf8ce03SAdrian Chadd RGE_LOCK(sc); 965*4bf8ce03SAdrian Chadd if ((if_getflags(ifp) & IFF_DRV_RUNNING) != 0) { 966*4bf8ce03SAdrian Chadd rge_iff_locked(sc); 967*4bf8ce03SAdrian Chadd } 968*4bf8ce03SAdrian Chadd RGE_UNLOCK(sc); 969*4bf8ce03SAdrian Chadd break; 970*4bf8ce03SAdrian Chadd case SIOCGIFMEDIA: 971*4bf8ce03SAdrian Chadd case SIOCSIFMEDIA: 972*4bf8ce03SAdrian Chadd error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd); 973*4bf8ce03SAdrian Chadd break; 974*4bf8ce03SAdrian Chadd case SIOCSIFCAP: 975*4bf8ce03SAdrian Chadd { 976*4bf8ce03SAdrian Chadd int mask; 977*4bf8ce03SAdrian Chadd bool reinit = false; 978*4bf8ce03SAdrian Chadd 979*4bf8ce03SAdrian Chadd /* Get the mask of changed bits */ 980*4bf8ce03SAdrian Chadd mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); 981*4bf8ce03SAdrian Chadd 982*4bf8ce03SAdrian Chadd /* 983*4bf8ce03SAdrian Chadd * Locked so we don't have a narrow window where frames 984*4bf8ce03SAdrian Chadd * are being processed with the updated flags but the 985*4bf8ce03SAdrian Chadd * hardware configuration hasn't yet changed. 986*4bf8ce03SAdrian Chadd */ 987*4bf8ce03SAdrian Chadd RGE_LOCK(sc); 988*4bf8ce03SAdrian Chadd 989*4bf8ce03SAdrian Chadd if ((mask & IFCAP_TXCSUM) != 0 && 990*4bf8ce03SAdrian Chadd (if_getcapabilities(ifp) & IFCAP_TXCSUM) != 0) { 991*4bf8ce03SAdrian Chadd if_togglecapenable(ifp, IFCAP_TXCSUM); 992*4bf8ce03SAdrian Chadd if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0) 993*4bf8ce03SAdrian Chadd if_sethwassistbits(ifp, RGE_CSUM_FEATURES, 0); 994*4bf8ce03SAdrian Chadd else 995*4bf8ce03SAdrian Chadd if_sethwassistbits(ifp, 0, RGE_CSUM_FEATURES); 996*4bf8ce03SAdrian Chadd reinit = 1; 997*4bf8ce03SAdrian Chadd } 998*4bf8ce03SAdrian Chadd 999*4bf8ce03SAdrian Chadd if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 1000*4bf8ce03SAdrian Chadd (if_getcapabilities(ifp) & IFCAP_VLAN_HWTAGGING) != 0) { 1001*4bf8ce03SAdrian Chadd if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING); 1002*4bf8ce03SAdrian Chadd reinit = 1; 1003*4bf8ce03SAdrian Chadd } 1004*4bf8ce03SAdrian Chadd 1005*4bf8ce03SAdrian Chadd /* TODO: WOL */ 1006*4bf8ce03SAdrian Chadd 1007*4bf8ce03SAdrian Chadd if ((mask & IFCAP_RXCSUM) != 0 && 1008*4bf8ce03SAdrian Chadd (if_getcapabilities(ifp) & IFCAP_RXCSUM) != 0) { 1009*4bf8ce03SAdrian Chadd if_togglecapenable(ifp, IFCAP_RXCSUM); 1010*4bf8ce03SAdrian Chadd reinit = 1; 1011*4bf8ce03SAdrian Chadd } 1012*4bf8ce03SAdrian Chadd 1013*4bf8ce03SAdrian Chadd if (reinit && if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 1014*4bf8ce03SAdrian Chadd if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); 1015*4bf8ce03SAdrian Chadd rge_init_locked(sc); 1016*4bf8ce03SAdrian Chadd } 1017*4bf8ce03SAdrian Chadd 1018*4bf8ce03SAdrian Chadd RGE_UNLOCK(sc); 1019*4bf8ce03SAdrian Chadd VLAN_CAPABILITIES(ifp); 1020*4bf8ce03SAdrian Chadd } 1021*4bf8ce03SAdrian Chadd break; 1022*4bf8ce03SAdrian Chadd default: 1023*4bf8ce03SAdrian Chadd error = ether_ioctl(ifp, cmd, data); 1024*4bf8ce03SAdrian Chadd break; 1025*4bf8ce03SAdrian Chadd } 1026*4bf8ce03SAdrian Chadd 1027*4bf8ce03SAdrian Chadd return (error); 1028*4bf8ce03SAdrian Chadd } 1029*4bf8ce03SAdrian Chadd 1030*4bf8ce03SAdrian Chadd static void 1031*4bf8ce03SAdrian Chadd rge_qflush_if(if_t ifp) 1032*4bf8ce03SAdrian Chadd { 1033*4bf8ce03SAdrian Chadd struct rge_softc *sc = if_getsoftc(ifp); 1034*4bf8ce03SAdrian Chadd 1035*4bf8ce03SAdrian Chadd /* TODO: this should iterate over the TXQs */ 1036*4bf8ce03SAdrian Chadd RGE_LOCK(sc); 1037*4bf8ce03SAdrian Chadd rge_txq_flush_mbufs(sc); 1038*4bf8ce03SAdrian Chadd RGE_UNLOCK(sc); 1039*4bf8ce03SAdrian Chadd } 1040*4bf8ce03SAdrian Chadd 1041*4bf8ce03SAdrian Chadd /** 1042*4bf8ce03SAdrian Chadd * @brief Transmit the given frame to the hardware. 1043*4bf8ce03SAdrian Chadd * 1044*4bf8ce03SAdrian Chadd * This routine is called by the network stack to send 1045*4bf8ce03SAdrian Chadd * a frame to the device. 1046*4bf8ce03SAdrian Chadd * 1047*4bf8ce03SAdrian Chadd * For now we simply direct dispatch this frame to the 1048*4bf8ce03SAdrian Chadd * hardware (and thus avoid maintaining our own internal 1049*4bf8ce03SAdrian Chadd * queue) 1050*4bf8ce03SAdrian Chadd */ 1051*4bf8ce03SAdrian Chadd static int 1052*4bf8ce03SAdrian Chadd rge_transmit_if(if_t ifp, struct mbuf *m) 1053*4bf8ce03SAdrian Chadd { 1054*4bf8ce03SAdrian Chadd struct rge_softc *sc = if_getsoftc(ifp); 1055*4bf8ce03SAdrian Chadd int ret; 1056*4bf8ce03SAdrian Chadd 1057*4bf8ce03SAdrian Chadd sc->sc_drv_stats.transmit_call_cnt++; 1058*4bf8ce03SAdrian Chadd 1059*4bf8ce03SAdrian Chadd RGE_LOCK(sc); 1060*4bf8ce03SAdrian Chadd if (sc->sc_stopped == true) { 1061*4bf8ce03SAdrian Chadd sc->sc_drv_stats.transmit_stopped_cnt++; 1062*4bf8ce03SAdrian Chadd RGE_UNLOCK(sc); 1063*4bf8ce03SAdrian Chadd return (ENETDOWN); /* TODO: better error? */ 1064*4bf8ce03SAdrian Chadd } 1065*4bf8ce03SAdrian Chadd 1066*4bf8ce03SAdrian Chadd /* XXX again should be a per-TXQ thing */ 1067*4bf8ce03SAdrian Chadd ret = mbufq_enqueue(&sc->sc_txq, m); 1068*4bf8ce03SAdrian Chadd if (ret != 0) { 1069*4bf8ce03SAdrian Chadd sc->sc_drv_stats.transmit_full_cnt++; 1070*4bf8ce03SAdrian Chadd RGE_UNLOCK(sc); 1071*4bf8ce03SAdrian Chadd return (ret); 1072*4bf8ce03SAdrian Chadd } 1073*4bf8ce03SAdrian Chadd RGE_UNLOCK(sc); 1074*4bf8ce03SAdrian Chadd 1075*4bf8ce03SAdrian Chadd /* mbuf is owned by the driver, schedule transmit */ 1076*4bf8ce03SAdrian Chadd taskqueue_enqueue(sc->sc_tq, &sc->sc_tx_task); 1077*4bf8ce03SAdrian Chadd sc->sc_drv_stats.transmit_queued_cnt++; 1078*4bf8ce03SAdrian Chadd 1079*4bf8ce03SAdrian Chadd return (0); 1080*4bf8ce03SAdrian Chadd } 1081*4bf8ce03SAdrian Chadd 1082*4bf8ce03SAdrian Chadd static void 1083*4bf8ce03SAdrian Chadd rge_init_if(void *xsc) 1084*4bf8ce03SAdrian Chadd { 1085*4bf8ce03SAdrian Chadd struct rge_softc *sc = xsc; 1086*4bf8ce03SAdrian Chadd 1087*4bf8ce03SAdrian Chadd RGE_LOCK(sc); 1088*4bf8ce03SAdrian Chadd rge_init_locked(sc); 1089*4bf8ce03SAdrian Chadd RGE_UNLOCK(sc); 1090*4bf8ce03SAdrian Chadd } 1091*4bf8ce03SAdrian Chadd 1092*4bf8ce03SAdrian Chadd static void 1093*4bf8ce03SAdrian Chadd rge_init_locked(struct rge_softc *sc) 1094*4bf8ce03SAdrian Chadd { 1095*4bf8ce03SAdrian Chadd struct rge_queues *q = sc->sc_queues; 1096*4bf8ce03SAdrian Chadd uint32_t rxconf, val; 1097*4bf8ce03SAdrian Chadd int i, num_miti; 1098*4bf8ce03SAdrian Chadd 1099*4bf8ce03SAdrian Chadd RGE_ASSERT_LOCKED(sc); 1100*4bf8ce03SAdrian Chadd 1101*4bf8ce03SAdrian Chadd RGE_DPRINTF(sc, RGE_DEBUG_INIT, "%s: called!\n", __func__); 1102*4bf8ce03SAdrian Chadd 1103*4bf8ce03SAdrian Chadd /* Don't double-init the hardware */ 1104*4bf8ce03SAdrian Chadd if ((if_getdrvflags(sc->sc_ifp) & IFF_DRV_RUNNING) != 0) { 1105*4bf8ce03SAdrian Chadd /* 1106*4bf8ce03SAdrian Chadd * Note: I'm leaving this disabled by default; however 1107*4bf8ce03SAdrian Chadd * I'm leaving it in here so I can figure out what's 1108*4bf8ce03SAdrian Chadd * causing this to be initialised both from the ioctl 1109*4bf8ce03SAdrian Chadd * API and if_init() API. 1110*4bf8ce03SAdrian Chadd */ 1111*4bf8ce03SAdrian Chadd // RGE_PRINT_ERROR(sc, "%s: called whilst running?\n", __func__); 1112*4bf8ce03SAdrian Chadd return; 1113*4bf8ce03SAdrian Chadd } 1114*4bf8ce03SAdrian Chadd 1115*4bf8ce03SAdrian Chadd /* 1116*4bf8ce03SAdrian Chadd * Bring the hardware down so we know it's in a good known 1117*4bf8ce03SAdrian Chadd * state before we bring it up in a good known state. 1118*4bf8ce03SAdrian Chadd */ 1119*4bf8ce03SAdrian Chadd rge_stop_locked(sc); 1120*4bf8ce03SAdrian Chadd 1121*4bf8ce03SAdrian Chadd /* Set MAC address. */ 1122*4bf8ce03SAdrian Chadd rge_set_macaddr(sc, if_getlladdr(sc->sc_ifp)); 1123*4bf8ce03SAdrian Chadd 1124*4bf8ce03SAdrian Chadd /* Initialize RX and TX descriptors lists. */ 1125*4bf8ce03SAdrian Chadd rge_rx_list_init(q); 1126*4bf8ce03SAdrian Chadd rge_tx_list_init(q); 1127*4bf8ce03SAdrian Chadd 1128*4bf8ce03SAdrian Chadd if (rge_chipinit(sc)) { 1129*4bf8ce03SAdrian Chadd RGE_PRINT_ERROR(sc, "%s: ERROR: chip init fail!\n", __func__); 1130*4bf8ce03SAdrian Chadd return; 1131*4bf8ce03SAdrian Chadd } 1132*4bf8ce03SAdrian Chadd 1133*4bf8ce03SAdrian Chadd if (rge_phy_config(sc)) 1134*4bf8ce03SAdrian Chadd return; 1135*4bf8ce03SAdrian Chadd 1136*4bf8ce03SAdrian Chadd RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG); 1137*4bf8ce03SAdrian Chadd 1138*4bf8ce03SAdrian Chadd RGE_CLRBIT_1(sc, 0xf1, 0x80); 1139*4bf8ce03SAdrian Chadd rge_disable_aspm_clkreq(sc); 1140*4bf8ce03SAdrian Chadd RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER, 1141*4bf8ce03SAdrian Chadd RGE_JUMBO_MTU + ETHER_HDR_LEN + 32); 1142*4bf8ce03SAdrian Chadd 1143*4bf8ce03SAdrian Chadd /* Load the addresses of the RX and TX lists into the chip. */ 1144*4bf8ce03SAdrian Chadd RGE_WRITE_4(sc, RGE_RXDESC_ADDR_LO, 1145*4bf8ce03SAdrian Chadd RGE_ADDR_LO(q->q_rx.rge_rx_list_paddr)); 1146*4bf8ce03SAdrian Chadd RGE_WRITE_4(sc, RGE_RXDESC_ADDR_HI, 1147*4bf8ce03SAdrian Chadd RGE_ADDR_HI(q->q_rx.rge_rx_list_paddr)); 1148*4bf8ce03SAdrian Chadd RGE_WRITE_4(sc, RGE_TXDESC_ADDR_LO, 1149*4bf8ce03SAdrian Chadd RGE_ADDR_LO(q->q_tx.rge_tx_list_paddr)); 1150*4bf8ce03SAdrian Chadd RGE_WRITE_4(sc, RGE_TXDESC_ADDR_HI, 1151*4bf8ce03SAdrian Chadd RGE_ADDR_HI(q->q_tx.rge_tx_list_paddr)); 1152*4bf8ce03SAdrian Chadd 1153*4bf8ce03SAdrian Chadd /* Set the initial RX and TX configurations. */ 1154*4bf8ce03SAdrian Chadd if (sc->rge_type == MAC_R25) 1155*4bf8ce03SAdrian Chadd rxconf = RGE_RXCFG_CONFIG; 1156*4bf8ce03SAdrian Chadd else if (sc->rge_type == MAC_R25B) 1157*4bf8ce03SAdrian Chadd rxconf = RGE_RXCFG_CONFIG_8125B; 1158*4bf8ce03SAdrian Chadd else if (sc->rge_type == MAC_R25D) 1159*4bf8ce03SAdrian Chadd rxconf = RGE_RXCFG_CONFIG_8125D; 1160*4bf8ce03SAdrian Chadd else 1161*4bf8ce03SAdrian Chadd rxconf = RGE_RXCFG_CONFIG_8126; 1162*4bf8ce03SAdrian Chadd RGE_WRITE_4(sc, RGE_RXCFG, rxconf); 1163*4bf8ce03SAdrian Chadd RGE_WRITE_4(sc, RGE_TXCFG, RGE_TXCFG_CONFIG); 1164*4bf8ce03SAdrian Chadd 1165*4bf8ce03SAdrian Chadd val = rge_read_csi(sc, 0x70c) & ~0x3f000000; 1166*4bf8ce03SAdrian Chadd rge_write_csi(sc, 0x70c, val | 0x27000000); 1167*4bf8ce03SAdrian Chadd 1168*4bf8ce03SAdrian Chadd if (sc->rge_type == MAC_R26 || sc->rge_type == MAC_R27) { 1169*4bf8ce03SAdrian Chadd /* Disable L1 timeout. */ 1170*4bf8ce03SAdrian Chadd val = rge_read_csi(sc, 0x890) & ~0x00000001; 1171*4bf8ce03SAdrian Chadd rge_write_csi(sc, 0x890, val); 1172*4bf8ce03SAdrian Chadd } else if (sc->rge_type != MAC_R25D) 1173*4bf8ce03SAdrian Chadd RGE_WRITE_2(sc, 0x0382, 0x221b); 1174*4bf8ce03SAdrian Chadd 1175*4bf8ce03SAdrian Chadd RGE_WRITE_1(sc, RGE_RSS_CTRL, 0); 1176*4bf8ce03SAdrian Chadd 1177*4bf8ce03SAdrian Chadd val = RGE_READ_2(sc, RGE_RXQUEUE_CTRL) & ~0x001c; 1178*4bf8ce03SAdrian Chadd RGE_WRITE_2(sc, RGE_RXQUEUE_CTRL, val | (fls(sc->sc_nqueues) - 1) << 2); 1179*4bf8ce03SAdrian Chadd 1180*4bf8ce03SAdrian Chadd RGE_CLRBIT_1(sc, RGE_CFG1, RGE_CFG1_SPEED_DOWN); 1181*4bf8ce03SAdrian Chadd 1182*4bf8ce03SAdrian Chadd rge_write_mac_ocp(sc, 0xc140, 0xffff); 1183*4bf8ce03SAdrian Chadd rge_write_mac_ocp(sc, 0xc142, 0xffff); 1184*4bf8ce03SAdrian Chadd 1185*4bf8ce03SAdrian Chadd RGE_MAC_SETBIT(sc, 0xeb58, 0x0001); 1186*4bf8ce03SAdrian Chadd 1187*4bf8ce03SAdrian Chadd if (sc->rge_type == MAC_R26 || sc->rge_type == MAC_R27) { 1188*4bf8ce03SAdrian Chadd RGE_CLRBIT_1(sc, 0xd8, 0x02); 1189*4bf8ce03SAdrian Chadd if (sc->rge_type == MAC_R27) { 1190*4bf8ce03SAdrian Chadd RGE_CLRBIT_1(sc, 0x20e4, 0x04); 1191*4bf8ce03SAdrian Chadd RGE_MAC_CLRBIT(sc, 0xe00c, 0x1000); 1192*4bf8ce03SAdrian Chadd RGE_MAC_CLRBIT(sc, 0xc0c2, 0x0040); 1193*4bf8ce03SAdrian Chadd } 1194*4bf8ce03SAdrian Chadd } 1195*4bf8ce03SAdrian Chadd 1196*4bf8ce03SAdrian Chadd val = rge_read_mac_ocp(sc, 0xe614); 1197*4bf8ce03SAdrian Chadd val &= (sc->rge_type == MAC_R27) ? ~0x0f00 : ~0x0700; 1198*4bf8ce03SAdrian Chadd if (sc->rge_type == MAC_R25 || sc->rge_type == MAC_R25D) 1199*4bf8ce03SAdrian Chadd rge_write_mac_ocp(sc, 0xe614, val | 0x0300); 1200*4bf8ce03SAdrian Chadd else if (sc->rge_type == MAC_R25B) 1201*4bf8ce03SAdrian Chadd rge_write_mac_ocp(sc, 0xe614, val | 0x0200); 1202*4bf8ce03SAdrian Chadd else if (sc->rge_type == MAC_R26) 1203*4bf8ce03SAdrian Chadd rge_write_mac_ocp(sc, 0xe614, val | 0x0300); 1204*4bf8ce03SAdrian Chadd else 1205*4bf8ce03SAdrian Chadd rge_write_mac_ocp(sc, 0xe614, val | 0x0f00); 1206*4bf8ce03SAdrian Chadd 1207*4bf8ce03SAdrian Chadd val = rge_read_mac_ocp(sc, 0xe63e) & ~0x0c00; 1208*4bf8ce03SAdrian Chadd rge_write_mac_ocp(sc, 0xe63e, val | 1209*4bf8ce03SAdrian Chadd ((fls(sc->sc_nqueues) - 1) & 0x03) << 10); 1210*4bf8ce03SAdrian Chadd 1211*4bf8ce03SAdrian Chadd val = rge_read_mac_ocp(sc, 0xe63e) & ~0x0030; 1212*4bf8ce03SAdrian Chadd rge_write_mac_ocp(sc, 0xe63e, val | 0x0020); 1213*4bf8ce03SAdrian Chadd 1214*4bf8ce03SAdrian Chadd RGE_MAC_CLRBIT(sc, 0xc0b4, 0x0001); 1215*4bf8ce03SAdrian Chadd RGE_MAC_SETBIT(sc, 0xc0b4, 0x0001); 1216*4bf8ce03SAdrian Chadd 1217*4bf8ce03SAdrian Chadd RGE_MAC_SETBIT(sc, 0xc0b4, 0x000c); 1218*4bf8ce03SAdrian Chadd 1219*4bf8ce03SAdrian Chadd val = rge_read_mac_ocp(sc, 0xeb6a) & ~0x00ff; 1220*4bf8ce03SAdrian Chadd rge_write_mac_ocp(sc, 0xeb6a, val | 0x0033); 1221*4bf8ce03SAdrian Chadd 1222*4bf8ce03SAdrian Chadd val = rge_read_mac_ocp(sc, 0xeb50) & ~0x03e0; 1223*4bf8ce03SAdrian Chadd rge_write_mac_ocp(sc, 0xeb50, val | 0x0040); 1224*4bf8ce03SAdrian Chadd 1225*4bf8ce03SAdrian Chadd RGE_MAC_CLRBIT(sc, 0xe056, 0x00f0); 1226*4bf8ce03SAdrian Chadd 1227*4bf8ce03SAdrian Chadd RGE_WRITE_1(sc, RGE_TDFNR, 0x10); 1228*4bf8ce03SAdrian Chadd 1229*4bf8ce03SAdrian Chadd RGE_MAC_CLRBIT(sc, 0xe040, 0x1000); 1230*4bf8ce03SAdrian Chadd 1231*4bf8ce03SAdrian Chadd val = rge_read_mac_ocp(sc, 0xea1c) & ~0x0003; 1232*4bf8ce03SAdrian Chadd rge_write_mac_ocp(sc, 0xea1c, val | 0x0001); 1233*4bf8ce03SAdrian Chadd 1234*4bf8ce03SAdrian Chadd if (sc->rge_type == MAC_R25D) 1235*4bf8ce03SAdrian Chadd rge_write_mac_ocp(sc, 0xe0c0, 0x4403); 1236*4bf8ce03SAdrian Chadd else 1237*4bf8ce03SAdrian Chadd rge_write_mac_ocp(sc, 0xe0c0, 0x4000); 1238*4bf8ce03SAdrian Chadd 1239*4bf8ce03SAdrian Chadd RGE_MAC_SETBIT(sc, 0xe052, 0x0060); 1240*4bf8ce03SAdrian Chadd RGE_MAC_CLRBIT(sc, 0xe052, 0x0088); 1241*4bf8ce03SAdrian Chadd 1242*4bf8ce03SAdrian Chadd val = rge_read_mac_ocp(sc, 0xd430) & ~0x0fff; 1243*4bf8ce03SAdrian Chadd rge_write_mac_ocp(sc, 0xd430, val | 0x045f); 1244*4bf8ce03SAdrian Chadd 1245*4bf8ce03SAdrian Chadd RGE_SETBIT_1(sc, RGE_DLLPR, RGE_DLLPR_PFM_EN | RGE_DLLPR_TX_10M_PS_EN); 1246*4bf8ce03SAdrian Chadd 1247*4bf8ce03SAdrian Chadd if (sc->rge_type == MAC_R25) 1248*4bf8ce03SAdrian Chadd RGE_SETBIT_1(sc, RGE_MCUCMD, 0x01); 1249*4bf8ce03SAdrian Chadd 1250*4bf8ce03SAdrian Chadd if (sc->rge_type != MAC_R25D) { 1251*4bf8ce03SAdrian Chadd /* Disable EEE plus. */ 1252*4bf8ce03SAdrian Chadd RGE_MAC_CLRBIT(sc, 0xe080, 0x0002); 1253*4bf8ce03SAdrian Chadd } 1254*4bf8ce03SAdrian Chadd 1255*4bf8ce03SAdrian Chadd if (sc->rge_type == MAC_R26 || sc->rge_type == MAC_R27) 1256*4bf8ce03SAdrian Chadd RGE_MAC_CLRBIT(sc, 0xea1c, 0x0304); 1257*4bf8ce03SAdrian Chadd else 1258*4bf8ce03SAdrian Chadd RGE_MAC_CLRBIT(sc, 0xea1c, 0x0004); 1259*4bf8ce03SAdrian Chadd 1260*4bf8ce03SAdrian Chadd /* Clear tcam entries. */ 1261*4bf8ce03SAdrian Chadd RGE_MAC_SETBIT(sc, 0xeb54, 0x0001); 1262*4bf8ce03SAdrian Chadd DELAY(1); 1263*4bf8ce03SAdrian Chadd RGE_MAC_CLRBIT(sc, 0xeb54, 0x0001); 1264*4bf8ce03SAdrian Chadd 1265*4bf8ce03SAdrian Chadd RGE_CLRBIT_2(sc, 0x1880, 0x0030); 1266*4bf8ce03SAdrian Chadd 1267*4bf8ce03SAdrian Chadd if (sc->rge_type == MAC_R27) { 1268*4bf8ce03SAdrian Chadd val = rge_read_mac_ocp(sc, 0xd40c) & ~0xe038; 1269*4bf8ce03SAdrian Chadd rge_write_phy_ocp(sc, 0xd40c, val | 0x8020); 1270*4bf8ce03SAdrian Chadd } 1271*4bf8ce03SAdrian Chadd 1272*4bf8ce03SAdrian Chadd /* Config interrupt type. */ 1273*4bf8ce03SAdrian Chadd if (sc->rge_type == MAC_R27) 1274*4bf8ce03SAdrian Chadd RGE_CLRBIT_1(sc, RGE_INT_CFG0, RGE_INT_CFG0_AVOID_MISS_INTR); 1275*4bf8ce03SAdrian Chadd else if (sc->rge_type != MAC_R25) 1276*4bf8ce03SAdrian Chadd RGE_CLRBIT_1(sc, RGE_INT_CFG0, RGE_INT_CFG0_EN); 1277*4bf8ce03SAdrian Chadd 1278*4bf8ce03SAdrian Chadd /* Clear timer interrupts. */ 1279*4bf8ce03SAdrian Chadd RGE_WRITE_4(sc, RGE_TIMERINT0, 0); 1280*4bf8ce03SAdrian Chadd RGE_WRITE_4(sc, RGE_TIMERINT1, 0); 1281*4bf8ce03SAdrian Chadd RGE_WRITE_4(sc, RGE_TIMERINT2, 0); 1282*4bf8ce03SAdrian Chadd RGE_WRITE_4(sc, RGE_TIMERINT3, 0); 1283*4bf8ce03SAdrian Chadd 1284*4bf8ce03SAdrian Chadd num_miti = 1285*4bf8ce03SAdrian Chadd (sc->rge_type == MAC_R25B || sc->rge_type == MAC_R26) ? 32 : 64; 1286*4bf8ce03SAdrian Chadd /* Clear interrupt moderation timer. */ 1287*4bf8ce03SAdrian Chadd for (i = 0; i < num_miti; i++) 1288*4bf8ce03SAdrian Chadd RGE_WRITE_4(sc, RGE_INTMITI(i), 0); 1289*4bf8ce03SAdrian Chadd 1290*4bf8ce03SAdrian Chadd if (sc->rge_type == MAC_R26) { 1291*4bf8ce03SAdrian Chadd RGE_CLRBIT_1(sc, RGE_INT_CFG0, 1292*4bf8ce03SAdrian Chadd RGE_INT_CFG0_TIMEOUT_BYPASS | RGE_INT_CFG0_RDU_BYPASS_8126 | 1293*4bf8ce03SAdrian Chadd RGE_INT_CFG0_MITIGATION_BYPASS); 1294*4bf8ce03SAdrian Chadd RGE_WRITE_2(sc, RGE_INT_CFG1, 0); 1295*4bf8ce03SAdrian Chadd } 1296*4bf8ce03SAdrian Chadd 1297*4bf8ce03SAdrian Chadd RGE_MAC_SETBIT(sc, 0xc0ac, 0x1f80); 1298*4bf8ce03SAdrian Chadd 1299*4bf8ce03SAdrian Chadd rge_write_mac_ocp(sc, 0xe098, 0xc302); 1300*4bf8ce03SAdrian Chadd 1301*4bf8ce03SAdrian Chadd RGE_MAC_CLRBIT(sc, 0xe032, 0x0003); 1302*4bf8ce03SAdrian Chadd val = rge_read_csi(sc, 0x98) & ~0x0000ff00; 1303*4bf8ce03SAdrian Chadd rge_write_csi(sc, 0x98, val); 1304*4bf8ce03SAdrian Chadd 1305*4bf8ce03SAdrian Chadd if (sc->rge_type == MAC_R25D) { 1306*4bf8ce03SAdrian Chadd val = rge_read_mac_ocp(sc, 0xe092) & ~0x00ff; 1307*4bf8ce03SAdrian Chadd rge_write_mac_ocp(sc, 0xe092, val | 0x0008); 1308*4bf8ce03SAdrian Chadd } else 1309*4bf8ce03SAdrian Chadd RGE_MAC_CLRBIT(sc, 0xe092, 0x00ff); 1310*4bf8ce03SAdrian Chadd 1311*4bf8ce03SAdrian Chadd /* Enable/disable HW VLAN tagging based on enabled capability */ 1312*4bf8ce03SAdrian Chadd if ((if_getcapabilities(sc->sc_ifp) & IFCAP_VLAN_HWTAGGING) != 0) 1313*4bf8ce03SAdrian Chadd RGE_SETBIT_4(sc, RGE_RXCFG, RGE_RXCFG_VLANSTRIP); 1314*4bf8ce03SAdrian Chadd else 1315*4bf8ce03SAdrian Chadd RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_VLANSTRIP); 1316*4bf8ce03SAdrian Chadd 1317*4bf8ce03SAdrian Chadd /* Enable/disable RX checksum based on enabled capability */ 1318*4bf8ce03SAdrian Chadd if ((if_getcapenable(sc->sc_ifp) & IFCAP_RXCSUM) != 0) 1319*4bf8ce03SAdrian Chadd RGE_SETBIT_2(sc, RGE_CPLUSCMD, RGE_CPLUSCMD_RXCSUM); 1320*4bf8ce03SAdrian Chadd else 1321*4bf8ce03SAdrian Chadd RGE_CLRBIT_2(sc, RGE_CPLUSCMD, RGE_CPLUSCMD_RXCSUM); 1322*4bf8ce03SAdrian Chadd RGE_READ_2(sc, RGE_CPLUSCMD); 1323*4bf8ce03SAdrian Chadd 1324*4bf8ce03SAdrian Chadd /* Set Maximum frame size. */ 1325*4bf8ce03SAdrian Chadd RGE_WRITE_2(sc, RGE_RXMAXSIZE, RGE_JUMBO_FRAMELEN); 1326*4bf8ce03SAdrian Chadd 1327*4bf8ce03SAdrian Chadd /* Disable RXDV gate. */ 1328*4bf8ce03SAdrian Chadd RGE_CLRBIT_1(sc, RGE_PPSW, 0x08); 1329*4bf8ce03SAdrian Chadd DELAY(2000); 1330*4bf8ce03SAdrian Chadd 1331*4bf8ce03SAdrian Chadd /* Program promiscuous mode and multicast filters. */ 1332*4bf8ce03SAdrian Chadd rge_iff_locked(sc); 1333*4bf8ce03SAdrian Chadd 1334*4bf8ce03SAdrian Chadd if (sc->rge_type == MAC_R27) 1335*4bf8ce03SAdrian Chadd RGE_CLRBIT_1(sc, RGE_RADMFIFO_PROTECT, 0x2001); 1336*4bf8ce03SAdrian Chadd 1337*4bf8ce03SAdrian Chadd rge_disable_aspm_clkreq(sc); 1338*4bf8ce03SAdrian Chadd 1339*4bf8ce03SAdrian Chadd RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG); 1340*4bf8ce03SAdrian Chadd DELAY(10); 1341*4bf8ce03SAdrian Chadd 1342*4bf8ce03SAdrian Chadd rge_ifmedia_upd(sc->sc_ifp); 1343*4bf8ce03SAdrian Chadd 1344*4bf8ce03SAdrian Chadd /* Enable transmit and receive. */ 1345*4bf8ce03SAdrian Chadd RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_TXENB | RGE_CMD_RXENB); 1346*4bf8ce03SAdrian Chadd 1347*4bf8ce03SAdrian Chadd /* Enable interrupts. */ 1348*4bf8ce03SAdrian Chadd rge_setup_intr(sc, RGE_IMTYPE_SIM); 1349*4bf8ce03SAdrian Chadd 1350*4bf8ce03SAdrian Chadd if_setdrvflagbits(sc->sc_ifp, IFF_DRV_RUNNING, 0); 1351*4bf8ce03SAdrian Chadd if_setdrvflagbits(sc->sc_ifp, 0, IFF_DRV_OACTIVE); 1352*4bf8ce03SAdrian Chadd 1353*4bf8ce03SAdrian Chadd callout_reset(&sc->sc_timeout, hz, rge_tick, sc); 1354*4bf8ce03SAdrian Chadd 1355*4bf8ce03SAdrian Chadd RGE_DPRINTF(sc, RGE_DEBUG_INIT, "%s: init completed!\n", __func__); 1356*4bf8ce03SAdrian Chadd 1357*4bf8ce03SAdrian Chadd /* Unblock transmit when we release the lock */ 1358*4bf8ce03SAdrian Chadd sc->sc_stopped = false; 1359*4bf8ce03SAdrian Chadd } 1360*4bf8ce03SAdrian Chadd 1361*4bf8ce03SAdrian Chadd /* 1362*4bf8ce03SAdrian Chadd * @brief Stop the adapter and free any mbufs allocated to the RX and TX lists. 1363*4bf8ce03SAdrian Chadd * 1364*4bf8ce03SAdrian Chadd * Must be called with the driver lock held. 1365*4bf8ce03SAdrian Chadd */ 1366*4bf8ce03SAdrian Chadd void 1367*4bf8ce03SAdrian Chadd rge_stop_locked(struct rge_softc *sc) 1368*4bf8ce03SAdrian Chadd { 1369*4bf8ce03SAdrian Chadd struct rge_queues *q = sc->sc_queues; 1370*4bf8ce03SAdrian Chadd int i; 1371*4bf8ce03SAdrian Chadd 1372*4bf8ce03SAdrian Chadd RGE_ASSERT_LOCKED(sc); 1373*4bf8ce03SAdrian Chadd 1374*4bf8ce03SAdrian Chadd RGE_DPRINTF(sc, RGE_DEBUG_INIT, "%s: called!\n", __func__); 1375*4bf8ce03SAdrian Chadd 1376*4bf8ce03SAdrian Chadd callout_stop(&sc->sc_timeout); 1377*4bf8ce03SAdrian Chadd 1378*4bf8ce03SAdrian Chadd /* Stop pending TX submissions */ 1379*4bf8ce03SAdrian Chadd sc->sc_stopped = true; 1380*4bf8ce03SAdrian Chadd 1381*4bf8ce03SAdrian Chadd if_setdrvflagbits(sc->sc_ifp, 0, IFF_DRV_RUNNING); 1382*4bf8ce03SAdrian Chadd sc->rge_timerintr = 0; 1383*4bf8ce03SAdrian Chadd sc->sc_watchdog = 0; 1384*4bf8ce03SAdrian Chadd 1385*4bf8ce03SAdrian Chadd RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV | 1386*4bf8ce03SAdrian Chadd RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT | 1387*4bf8ce03SAdrian Chadd RGE_RXCFG_ERRPKT); 1388*4bf8ce03SAdrian Chadd 1389*4bf8ce03SAdrian Chadd rge_hw_reset(sc); 1390*4bf8ce03SAdrian Chadd 1391*4bf8ce03SAdrian Chadd RGE_MAC_CLRBIT(sc, 0xc0ac, 0x1f80); 1392*4bf8ce03SAdrian Chadd 1393*4bf8ce03SAdrian Chadd if_setdrvflagbits(sc->sc_ifp, 0, IFF_DRV_OACTIVE); 1394*4bf8ce03SAdrian Chadd 1395*4bf8ce03SAdrian Chadd if (q->q_rx.rge_head != NULL) { 1396*4bf8ce03SAdrian Chadd m_freem(q->q_rx.rge_head); 1397*4bf8ce03SAdrian Chadd q->q_rx.rge_head = NULL; 1398*4bf8ce03SAdrian Chadd q->q_rx.rge_tail = &q->q_rx.rge_head; 1399*4bf8ce03SAdrian Chadd } 1400*4bf8ce03SAdrian Chadd 1401*4bf8ce03SAdrian Chadd /* Free the TX list buffers. */ 1402*4bf8ce03SAdrian Chadd for (i = 0; i < RGE_TX_LIST_CNT; i++) { 1403*4bf8ce03SAdrian Chadd if (q->q_tx.rge_txq[i].txq_mbuf != NULL) { 1404*4bf8ce03SAdrian Chadd bus_dmamap_unload(sc->sc_dmat_tx_buf, 1405*4bf8ce03SAdrian Chadd q->q_tx.rge_txq[i].txq_dmamap); 1406*4bf8ce03SAdrian Chadd m_freem(q->q_tx.rge_txq[i].txq_mbuf); 1407*4bf8ce03SAdrian Chadd q->q_tx.rge_txq[i].txq_mbuf = NULL; 1408*4bf8ce03SAdrian Chadd } 1409*4bf8ce03SAdrian Chadd } 1410*4bf8ce03SAdrian Chadd 1411*4bf8ce03SAdrian Chadd /* Free the RX list buffers. */ 1412*4bf8ce03SAdrian Chadd for (i = 0; i < RGE_RX_LIST_CNT; i++) { 1413*4bf8ce03SAdrian Chadd if (q->q_rx.rge_rxq[i].rxq_mbuf != NULL) { 1414*4bf8ce03SAdrian Chadd bus_dmamap_unload(sc->sc_dmat_rx_buf, 1415*4bf8ce03SAdrian Chadd q->q_rx.rge_rxq[i].rxq_dmamap); 1416*4bf8ce03SAdrian Chadd m_freem(q->q_rx.rge_rxq[i].rxq_mbuf); 1417*4bf8ce03SAdrian Chadd q->q_rx.rge_rxq[i].rxq_mbuf = NULL; 1418*4bf8ce03SAdrian Chadd } 1419*4bf8ce03SAdrian Chadd } 1420*4bf8ce03SAdrian Chadd 1421*4bf8ce03SAdrian Chadd /* Free pending TX frames */ 1422*4bf8ce03SAdrian Chadd /* TODO: should be per TX queue */ 1423*4bf8ce03SAdrian Chadd rge_txq_flush_mbufs(sc); 1424*4bf8ce03SAdrian Chadd } 1425*4bf8ce03SAdrian Chadd 1426*4bf8ce03SAdrian Chadd /* 1427*4bf8ce03SAdrian Chadd * Set media options. 1428*4bf8ce03SAdrian Chadd */ 1429*4bf8ce03SAdrian Chadd static int 1430*4bf8ce03SAdrian Chadd rge_ifmedia_upd(if_t ifp) 1431*4bf8ce03SAdrian Chadd { 1432*4bf8ce03SAdrian Chadd struct rge_softc *sc = if_getsoftc(ifp); 1433*4bf8ce03SAdrian Chadd struct ifmedia *ifm = &sc->sc_media; 1434*4bf8ce03SAdrian Chadd int anar, gig, val; 1435*4bf8ce03SAdrian Chadd 1436*4bf8ce03SAdrian Chadd if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1437*4bf8ce03SAdrian Chadd return (EINVAL); 1438*4bf8ce03SAdrian Chadd 1439*4bf8ce03SAdrian Chadd /* Disable Gigabit Lite. */ 1440*4bf8ce03SAdrian Chadd RGE_PHY_CLRBIT(sc, 0xa428, 0x0200); 1441*4bf8ce03SAdrian Chadd RGE_PHY_CLRBIT(sc, 0xa5ea, 0x0001); 1442*4bf8ce03SAdrian Chadd if (sc->rge_type == MAC_R26 || sc->rge_type == MAC_R27) 1443*4bf8ce03SAdrian Chadd RGE_PHY_CLRBIT(sc, 0xa5ea, 0x0007); 1444*4bf8ce03SAdrian Chadd 1445*4bf8ce03SAdrian Chadd val = rge_read_phy_ocp(sc, 0xa5d4); 1446*4bf8ce03SAdrian Chadd switch (sc->rge_type) { 1447*4bf8ce03SAdrian Chadd case MAC_R27: 1448*4bf8ce03SAdrian Chadd val &= ~RGE_ADV_10000TFDX; 1449*4bf8ce03SAdrian Chadd /* fallthrough */ 1450*4bf8ce03SAdrian Chadd case MAC_R26: 1451*4bf8ce03SAdrian Chadd val &= ~RGE_ADV_5000TFDX; 1452*4bf8ce03SAdrian Chadd /* fallthrough */ 1453*4bf8ce03SAdrian Chadd default: 1454*4bf8ce03SAdrian Chadd val &= ~RGE_ADV_2500TFDX; 1455*4bf8ce03SAdrian Chadd break; 1456*4bf8ce03SAdrian Chadd } 1457*4bf8ce03SAdrian Chadd 1458*4bf8ce03SAdrian Chadd anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10; 1459*4bf8ce03SAdrian Chadd gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX; 1460*4bf8ce03SAdrian Chadd 1461*4bf8ce03SAdrian Chadd switch (IFM_SUBTYPE(ifm->ifm_media)) { 1462*4bf8ce03SAdrian Chadd case IFM_AUTO: 1463*4bf8ce03SAdrian Chadd val |= RGE_ADV_2500TFDX; 1464*4bf8ce03SAdrian Chadd if (sc->rge_type == MAC_R26) 1465*4bf8ce03SAdrian Chadd val |= RGE_ADV_5000TFDX; 1466*4bf8ce03SAdrian Chadd else if (sc->rge_type == MAC_R27) 1467*4bf8ce03SAdrian Chadd val |= RGE_ADV_5000TFDX | RGE_ADV_10000TFDX; 1468*4bf8ce03SAdrian Chadd break; 1469*4bf8ce03SAdrian Chadd case IFM_10G_T: 1470*4bf8ce03SAdrian Chadd val |= RGE_ADV_10000TFDX; 1471*4bf8ce03SAdrian Chadd if_setbaudrate(ifp, IF_Gbps(10)); 1472*4bf8ce03SAdrian Chadd break; 1473*4bf8ce03SAdrian Chadd case IFM_5000_T: 1474*4bf8ce03SAdrian Chadd val |= RGE_ADV_5000TFDX; 1475*4bf8ce03SAdrian Chadd if_setbaudrate(ifp, IF_Gbps(5)); 1476*4bf8ce03SAdrian Chadd break; 1477*4bf8ce03SAdrian Chadd case IFM_2500_T: 1478*4bf8ce03SAdrian Chadd val |= RGE_ADV_2500TFDX; 1479*4bf8ce03SAdrian Chadd if_setbaudrate(ifp, IF_Mbps(2500)); 1480*4bf8ce03SAdrian Chadd break; 1481*4bf8ce03SAdrian Chadd case IFM_1000_T: 1482*4bf8ce03SAdrian Chadd if_setbaudrate(ifp, IF_Gbps(1)); 1483*4bf8ce03SAdrian Chadd break; 1484*4bf8ce03SAdrian Chadd case IFM_100_TX: 1485*4bf8ce03SAdrian Chadd gig = rge_read_phy(sc, 0, MII_100T2CR) & 1486*4bf8ce03SAdrian Chadd ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX); 1487*4bf8ce03SAdrian Chadd anar = ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) ? 1488*4bf8ce03SAdrian Chadd ANAR_TX | ANAR_TX_FD | ANAR_10_FD | ANAR_10 : 1489*4bf8ce03SAdrian Chadd ANAR_TX | ANAR_10_FD | ANAR_10; 1490*4bf8ce03SAdrian Chadd if_setbaudrate(ifp, IF_Mbps(100)); 1491*4bf8ce03SAdrian Chadd break; 1492*4bf8ce03SAdrian Chadd case IFM_10_T: 1493*4bf8ce03SAdrian Chadd gig = rge_read_phy(sc, 0, MII_100T2CR) & 1494*4bf8ce03SAdrian Chadd ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX); 1495*4bf8ce03SAdrian Chadd anar = ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) ? 1496*4bf8ce03SAdrian Chadd ANAR_10_FD | ANAR_10 : ANAR_10; 1497*4bf8ce03SAdrian Chadd if_setbaudrate(ifp, IF_Mbps(10)); 1498*4bf8ce03SAdrian Chadd break; 1499*4bf8ce03SAdrian Chadd default: 1500*4bf8ce03SAdrian Chadd RGE_PRINT_ERROR(sc, "unsupported media type\n"); 1501*4bf8ce03SAdrian Chadd return (EINVAL); 1502*4bf8ce03SAdrian Chadd } 1503*4bf8ce03SAdrian Chadd 1504*4bf8ce03SAdrian Chadd rge_write_phy(sc, 0, MII_ANAR, anar | ANAR_PAUSE_ASYM | ANAR_FC); 1505*4bf8ce03SAdrian Chadd rge_write_phy(sc, 0, MII_100T2CR, gig); 1506*4bf8ce03SAdrian Chadd rge_write_phy_ocp(sc, 0xa5d4, val); 1507*4bf8ce03SAdrian Chadd rge_write_phy(sc, 0, MII_BMCR, BMCR_RESET | BMCR_AUTOEN | 1508*4bf8ce03SAdrian Chadd BMCR_STARTNEG); 1509*4bf8ce03SAdrian Chadd 1510*4bf8ce03SAdrian Chadd return (0); 1511*4bf8ce03SAdrian Chadd } 1512*4bf8ce03SAdrian Chadd 1513*4bf8ce03SAdrian Chadd /* 1514*4bf8ce03SAdrian Chadd * Report current media status. 1515*4bf8ce03SAdrian Chadd */ 1516*4bf8ce03SAdrian Chadd static void 1517*4bf8ce03SAdrian Chadd rge_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr) 1518*4bf8ce03SAdrian Chadd { 1519*4bf8ce03SAdrian Chadd struct rge_softc *sc = if_getsoftc(ifp); 1520*4bf8ce03SAdrian Chadd uint16_t status = 0; 1521*4bf8ce03SAdrian Chadd 1522*4bf8ce03SAdrian Chadd ifmr->ifm_status = IFM_AVALID; 1523*4bf8ce03SAdrian Chadd ifmr->ifm_active = IFM_ETHER; 1524*4bf8ce03SAdrian Chadd 1525*4bf8ce03SAdrian Chadd if (rge_get_link_status(sc)) { 1526*4bf8ce03SAdrian Chadd ifmr->ifm_status |= IFM_ACTIVE; 1527*4bf8ce03SAdrian Chadd 1528*4bf8ce03SAdrian Chadd status = RGE_READ_2(sc, RGE_PHYSTAT); 1529*4bf8ce03SAdrian Chadd if ((status & RGE_PHYSTAT_FDX) || 1530*4bf8ce03SAdrian Chadd (status & (RGE_PHYSTAT_1000MBPS | RGE_PHYSTAT_2500MBPS | 1531*4bf8ce03SAdrian Chadd RGE_PHYSTAT_5000MBPS | RGE_PHYSTAT_10000MBPS))) 1532*4bf8ce03SAdrian Chadd ifmr->ifm_active |= IFM_FDX; 1533*4bf8ce03SAdrian Chadd else 1534*4bf8ce03SAdrian Chadd ifmr->ifm_active |= IFM_HDX; 1535*4bf8ce03SAdrian Chadd 1536*4bf8ce03SAdrian Chadd if (status & RGE_PHYSTAT_10MBPS) 1537*4bf8ce03SAdrian Chadd ifmr->ifm_active |= IFM_10_T; 1538*4bf8ce03SAdrian Chadd else if (status & RGE_PHYSTAT_100MBPS) 1539*4bf8ce03SAdrian Chadd ifmr->ifm_active |= IFM_100_TX; 1540*4bf8ce03SAdrian Chadd else if (status & RGE_PHYSTAT_1000MBPS) 1541*4bf8ce03SAdrian Chadd ifmr->ifm_active |= IFM_1000_T; 1542*4bf8ce03SAdrian Chadd else if (status & RGE_PHYSTAT_2500MBPS) 1543*4bf8ce03SAdrian Chadd ifmr->ifm_active |= IFM_2500_T; 1544*4bf8ce03SAdrian Chadd else if (status & RGE_PHYSTAT_5000MBPS) 1545*4bf8ce03SAdrian Chadd ifmr->ifm_active |= IFM_5000_T; 1546*4bf8ce03SAdrian Chadd else if (status & RGE_PHYSTAT_5000MBPS) 1547*4bf8ce03SAdrian Chadd ifmr->ifm_active |= IFM_5000_T; 1548*4bf8ce03SAdrian Chadd else if (status & RGE_PHYSTAT_10000MBPS) 1549*4bf8ce03SAdrian Chadd ifmr->ifm_active |= IFM_10G_T; 1550*4bf8ce03SAdrian Chadd } 1551*4bf8ce03SAdrian Chadd } 1552*4bf8ce03SAdrian Chadd 1553*4bf8ce03SAdrian Chadd /** 1554*4bf8ce03SAdrian Chadd * @brief callback to load/populate a single physical address 1555*4bf8ce03SAdrian Chadd */ 1556*4bf8ce03SAdrian Chadd static void 1557*4bf8ce03SAdrian Chadd rge_dma_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1558*4bf8ce03SAdrian Chadd { 1559*4bf8ce03SAdrian Chadd bus_addr_t *paddr = (bus_addr_t *) arg; 1560*4bf8ce03SAdrian Chadd 1561*4bf8ce03SAdrian Chadd *paddr = 0; 1562*4bf8ce03SAdrian Chadd 1563*4bf8ce03SAdrian Chadd if (error) { 1564*4bf8ce03SAdrian Chadd printf("%s: error! (%d)\n", __func__, error); 1565*4bf8ce03SAdrian Chadd *paddr = 0; 1566*4bf8ce03SAdrian Chadd return; 1567*4bf8ce03SAdrian Chadd } 1568*4bf8ce03SAdrian Chadd 1569*4bf8ce03SAdrian Chadd if (nsegs != 1) { 1570*4bf8ce03SAdrian Chadd printf("%s: too many segs (got %d)\n", __func__, nsegs); 1571*4bf8ce03SAdrian Chadd *paddr = 0; 1572*4bf8ce03SAdrian Chadd return; 1573*4bf8ce03SAdrian Chadd } 1574*4bf8ce03SAdrian Chadd 1575*4bf8ce03SAdrian Chadd *paddr = segs[0].ds_addr; 1576*4bf8ce03SAdrian Chadd } 1577*4bf8ce03SAdrian Chadd 1578*4bf8ce03SAdrian Chadd /** 1579*4bf8ce03SAdrian Chadd * @brief Allocate memory for RX/TX rings. 1580*4bf8ce03SAdrian Chadd * 1581*4bf8ce03SAdrian Chadd * Called with the driver lock NOT held. 1582*4bf8ce03SAdrian Chadd */ 1583*4bf8ce03SAdrian Chadd static int 1584*4bf8ce03SAdrian Chadd rge_allocmem(struct rge_softc *sc) 1585*4bf8ce03SAdrian Chadd { 1586*4bf8ce03SAdrian Chadd struct rge_queues *q = sc->sc_queues; 1587*4bf8ce03SAdrian Chadd int error; 1588*4bf8ce03SAdrian Chadd int i; 1589*4bf8ce03SAdrian Chadd 1590*4bf8ce03SAdrian Chadd RGE_ASSERT_UNLOCKED(sc); 1591*4bf8ce03SAdrian Chadd 1592*4bf8ce03SAdrian Chadd /* Allocate DMA'able memory for the TX ring. */ 1593*4bf8ce03SAdrian Chadd error = bus_dmamem_alloc(sc->sc_dmat_tx_desc, 1594*4bf8ce03SAdrian Chadd (void **) &q->q_tx.rge_tx_list, 1595*4bf8ce03SAdrian Chadd BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 1596*4bf8ce03SAdrian Chadd &q->q_tx.rge_tx_list_map); 1597*4bf8ce03SAdrian Chadd if (error) { 1598*4bf8ce03SAdrian Chadd RGE_PRINT_ERROR(sc, "%s: error (alloc tx_list.map) (%d)\n", 1599*4bf8ce03SAdrian Chadd __func__, error); 1600*4bf8ce03SAdrian Chadd goto error; 1601*4bf8ce03SAdrian Chadd } 1602*4bf8ce03SAdrian Chadd 1603*4bf8ce03SAdrian Chadd RGE_DPRINTF(sc, RGE_DEBUG_SETUP, "%s: tx_list=%p\n", __func__, 1604*4bf8ce03SAdrian Chadd q->q_tx.rge_tx_list); 1605*4bf8ce03SAdrian Chadd RGE_DPRINTF(sc, RGE_DEBUG_SETUP, "%s: tx_list_map=%p\n", __func__, 1606*4bf8ce03SAdrian Chadd q->q_tx.rge_tx_list_map); 1607*4bf8ce03SAdrian Chadd 1608*4bf8ce03SAdrian Chadd /* Load the map for the TX ring. */ 1609*4bf8ce03SAdrian Chadd error = bus_dmamap_load(sc->sc_dmat_tx_desc, 1610*4bf8ce03SAdrian Chadd q->q_tx.rge_tx_list_map, 1611*4bf8ce03SAdrian Chadd q->q_tx.rge_tx_list, 1612*4bf8ce03SAdrian Chadd RGE_TX_LIST_SZ, 1613*4bf8ce03SAdrian Chadd rge_dma_load_cb, 1614*4bf8ce03SAdrian Chadd (void *) &q->q_tx.rge_tx_list_paddr, 1615*4bf8ce03SAdrian Chadd BUS_DMA_NOWAIT); 1616*4bf8ce03SAdrian Chadd 1617*4bf8ce03SAdrian Chadd if ((error != 0) || (q->q_tx.rge_tx_list_paddr == 0)) { 1618*4bf8ce03SAdrian Chadd RGE_PRINT_ERROR(sc, "%s: error (load tx_list.map) (%d)\n", 1619*4bf8ce03SAdrian Chadd __func__, error); 1620*4bf8ce03SAdrian Chadd goto error; 1621*4bf8ce03SAdrian Chadd } 1622*4bf8ce03SAdrian Chadd 1623*4bf8ce03SAdrian Chadd /* Create DMA maps for TX buffers. */ 1624*4bf8ce03SAdrian Chadd for (i = 0; i < RGE_TX_LIST_CNT; i++) { 1625*4bf8ce03SAdrian Chadd error = bus_dmamap_create(sc->sc_dmat_tx_buf, 1626*4bf8ce03SAdrian Chadd 0, &q->q_tx.rge_txq[i].txq_dmamap); 1627*4bf8ce03SAdrian Chadd if (error) { 1628*4bf8ce03SAdrian Chadd RGE_PRINT_ERROR(sc, 1629*4bf8ce03SAdrian Chadd "can't create DMA map for TX (%d)\n", error); 1630*4bf8ce03SAdrian Chadd goto error; 1631*4bf8ce03SAdrian Chadd } 1632*4bf8ce03SAdrian Chadd } 1633*4bf8ce03SAdrian Chadd 1634*4bf8ce03SAdrian Chadd /* Allocate DMA'able memory for the RX ring. */ 1635*4bf8ce03SAdrian Chadd error = bus_dmamem_alloc(sc->sc_dmat_rx_desc, 1636*4bf8ce03SAdrian Chadd (void **) &q->q_rx.rge_rx_list, 1637*4bf8ce03SAdrian Chadd BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 1638*4bf8ce03SAdrian Chadd &q->q_rx.rge_rx_list_map); 1639*4bf8ce03SAdrian Chadd if (error) { 1640*4bf8ce03SAdrian Chadd RGE_PRINT_ERROR(sc, "%s: error (alloc rx_list.map) (%d)\n", 1641*4bf8ce03SAdrian Chadd __func__, error); 1642*4bf8ce03SAdrian Chadd goto error; 1643*4bf8ce03SAdrian Chadd } 1644*4bf8ce03SAdrian Chadd 1645*4bf8ce03SAdrian Chadd RGE_DPRINTF(sc, RGE_DEBUG_INIT, "%s: rx_list=%p\n", __func__, 1646*4bf8ce03SAdrian Chadd q->q_rx.rge_rx_list); 1647*4bf8ce03SAdrian Chadd RGE_DPRINTF(sc, RGE_DEBUG_INIT, "%s: rx_list_map=%p\n", __func__, 1648*4bf8ce03SAdrian Chadd q->q_rx.rge_rx_list_map); 1649*4bf8ce03SAdrian Chadd 1650*4bf8ce03SAdrian Chadd /* Load the map for the RX ring. */ 1651*4bf8ce03SAdrian Chadd error = bus_dmamap_load(sc->sc_dmat_rx_desc, 1652*4bf8ce03SAdrian Chadd q->q_rx.rge_rx_list_map, 1653*4bf8ce03SAdrian Chadd q->q_rx.rge_rx_list, 1654*4bf8ce03SAdrian Chadd RGE_RX_LIST_SZ, 1655*4bf8ce03SAdrian Chadd rge_dma_load_cb, 1656*4bf8ce03SAdrian Chadd (void *) &q->q_rx.rge_rx_list_paddr, 1657*4bf8ce03SAdrian Chadd BUS_DMA_NOWAIT); 1658*4bf8ce03SAdrian Chadd 1659*4bf8ce03SAdrian Chadd if ((error != 0) || (q->q_rx.rge_rx_list_paddr == 0)) { 1660*4bf8ce03SAdrian Chadd RGE_PRINT_ERROR(sc, "%s: error (load rx_list.map) (%d)\n", 1661*4bf8ce03SAdrian Chadd __func__, error); 1662*4bf8ce03SAdrian Chadd goto error; 1663*4bf8ce03SAdrian Chadd } 1664*4bf8ce03SAdrian Chadd 1665*4bf8ce03SAdrian Chadd /* Create DMA maps for RX buffers. */ 1666*4bf8ce03SAdrian Chadd for (i = 0; i < RGE_RX_LIST_CNT; i++) { 1667*4bf8ce03SAdrian Chadd error = bus_dmamap_create(sc->sc_dmat_rx_buf, 1668*4bf8ce03SAdrian Chadd 0, &q->q_rx.rge_rxq[i].rxq_dmamap); 1669*4bf8ce03SAdrian Chadd if (error) { 1670*4bf8ce03SAdrian Chadd RGE_PRINT_ERROR(sc, 1671*4bf8ce03SAdrian Chadd "can't create DMA map for RX (%d)\n", error); 1672*4bf8ce03SAdrian Chadd goto error; 1673*4bf8ce03SAdrian Chadd } 1674*4bf8ce03SAdrian Chadd } 1675*4bf8ce03SAdrian Chadd 1676*4bf8ce03SAdrian Chadd return (0); 1677*4bf8ce03SAdrian Chadd error: 1678*4bf8ce03SAdrian Chadd 1679*4bf8ce03SAdrian Chadd rge_freemem(sc); 1680*4bf8ce03SAdrian Chadd 1681*4bf8ce03SAdrian Chadd return (error); 1682*4bf8ce03SAdrian Chadd } 1683*4bf8ce03SAdrian Chadd 1684*4bf8ce03SAdrian Chadd /** 1685*4bf8ce03SAdrian Chadd * @brief Allocate memory for MAC stats. 1686*4bf8ce03SAdrian Chadd * 1687*4bf8ce03SAdrian Chadd * Called with the driver lock NOT held. 1688*4bf8ce03SAdrian Chadd */ 1689*4bf8ce03SAdrian Chadd static int 1690*4bf8ce03SAdrian Chadd rge_alloc_stats_mem(struct rge_softc *sc) 1691*4bf8ce03SAdrian Chadd { 1692*4bf8ce03SAdrian Chadd struct rge_mac_stats *ss = &sc->sc_mac_stats; 1693*4bf8ce03SAdrian Chadd int error; 1694*4bf8ce03SAdrian Chadd 1695*4bf8ce03SAdrian Chadd RGE_ASSERT_UNLOCKED(sc); 1696*4bf8ce03SAdrian Chadd 1697*4bf8ce03SAdrian Chadd /* Allocate DMA'able memory for the stats buffer. */ 1698*4bf8ce03SAdrian Chadd error = bus_dmamem_alloc(sc->sc_dmat_stats_buf, 1699*4bf8ce03SAdrian Chadd (void **) &ss->stats, BUS_DMA_WAITOK | BUS_DMA_ZERO, 1700*4bf8ce03SAdrian Chadd &ss->map); 1701*4bf8ce03SAdrian Chadd if (error) { 1702*4bf8ce03SAdrian Chadd RGE_PRINT_ERROR(sc, "%s: error (alloc stats) (%d)\n", 1703*4bf8ce03SAdrian Chadd __func__, error); 1704*4bf8ce03SAdrian Chadd goto error; 1705*4bf8ce03SAdrian Chadd } 1706*4bf8ce03SAdrian Chadd 1707*4bf8ce03SAdrian Chadd RGE_DPRINTF(sc, RGE_DEBUG_SETUP, "%s: stats=%p\n", __func__, ss->stats); 1708*4bf8ce03SAdrian Chadd RGE_DPRINTF(sc, RGE_DEBUG_SETUP, "%s: map=%p\n", __func__, ss->map); 1709*4bf8ce03SAdrian Chadd 1710*4bf8ce03SAdrian Chadd /* Load the map for the TX ring. */ 1711*4bf8ce03SAdrian Chadd error = bus_dmamap_load(sc->sc_dmat_stats_buf, 1712*4bf8ce03SAdrian Chadd ss->map, 1713*4bf8ce03SAdrian Chadd ss->stats, 1714*4bf8ce03SAdrian Chadd RGE_STATS_BUF_SIZE, 1715*4bf8ce03SAdrian Chadd rge_dma_load_cb, 1716*4bf8ce03SAdrian Chadd (void *) &ss->paddr, 1717*4bf8ce03SAdrian Chadd BUS_DMA_NOWAIT); 1718*4bf8ce03SAdrian Chadd 1719*4bf8ce03SAdrian Chadd if ((error != 0) || (ss->paddr == 0)) { 1720*4bf8ce03SAdrian Chadd RGE_PRINT_ERROR(sc, "%s: error (load stats.map) (%d)\n", 1721*4bf8ce03SAdrian Chadd __func__, error); 1722*4bf8ce03SAdrian Chadd if (error == 0) 1723*4bf8ce03SAdrian Chadd error = ENXIO; 1724*4bf8ce03SAdrian Chadd goto error; 1725*4bf8ce03SAdrian Chadd } 1726*4bf8ce03SAdrian Chadd 1727*4bf8ce03SAdrian Chadd return (0); 1728*4bf8ce03SAdrian Chadd 1729*4bf8ce03SAdrian Chadd error: 1730*4bf8ce03SAdrian Chadd rge_free_stats_mem(sc); 1731*4bf8ce03SAdrian Chadd 1732*4bf8ce03SAdrian Chadd return (error); 1733*4bf8ce03SAdrian Chadd } 1734*4bf8ce03SAdrian Chadd 1735*4bf8ce03SAdrian Chadd 1736*4bf8ce03SAdrian Chadd /** 1737*4bf8ce03SAdrian Chadd * @brief Free the TX/RX DMA buffers and mbufs. 1738*4bf8ce03SAdrian Chadd * 1739*4bf8ce03SAdrian Chadd * Called with the driver lock NOT held. 1740*4bf8ce03SAdrian Chadd */ 1741*4bf8ce03SAdrian Chadd static int 1742*4bf8ce03SAdrian Chadd rge_freemem(struct rge_softc *sc) 1743*4bf8ce03SAdrian Chadd { 1744*4bf8ce03SAdrian Chadd struct rge_queues *q = sc->sc_queues; 1745*4bf8ce03SAdrian Chadd int i; 1746*4bf8ce03SAdrian Chadd 1747*4bf8ce03SAdrian Chadd RGE_ASSERT_UNLOCKED(sc); 1748*4bf8ce03SAdrian Chadd 1749*4bf8ce03SAdrian Chadd /* TX desc */ 1750*4bf8ce03SAdrian Chadd bus_dmamap_unload(sc->sc_dmat_tx_desc, q->q_tx.rge_tx_list_map); 1751*4bf8ce03SAdrian Chadd if (q->q_tx.rge_tx_list != NULL) 1752*4bf8ce03SAdrian Chadd bus_dmamem_free(sc->sc_dmat_tx_desc, q->q_tx.rge_tx_list, 1753*4bf8ce03SAdrian Chadd q->q_tx.rge_tx_list_map); 1754*4bf8ce03SAdrian Chadd memset(&q->q_tx, 0, sizeof(q->q_tx)); 1755*4bf8ce03SAdrian Chadd 1756*4bf8ce03SAdrian Chadd /* TX buf */ 1757*4bf8ce03SAdrian Chadd for (i = 0; i < RGE_TX_LIST_CNT; i++) { 1758*4bf8ce03SAdrian Chadd struct rge_txq *tx = &q->q_tx.rge_txq[i]; 1759*4bf8ce03SAdrian Chadd 1760*4bf8ce03SAdrian Chadd /* unmap/free mbuf if it's still alloc'ed and mapped */ 1761*4bf8ce03SAdrian Chadd if (tx->txq_mbuf != NULL) { 1762*4bf8ce03SAdrian Chadd static bool do_warning = false; 1763*4bf8ce03SAdrian Chadd 1764*4bf8ce03SAdrian Chadd if (do_warning == false) { 1765*4bf8ce03SAdrian Chadd RGE_PRINT_ERROR(sc, 1766*4bf8ce03SAdrian Chadd "%s: TX mbuf should've been freed!\n", 1767*4bf8ce03SAdrian Chadd __func__); 1768*4bf8ce03SAdrian Chadd do_warning = true; 1769*4bf8ce03SAdrian Chadd } 1770*4bf8ce03SAdrian Chadd if (tx->txq_dmamap != NULL) { 1771*4bf8ce03SAdrian Chadd bus_dmamap_sync(sc->sc_dmat_tx_buf, 1772*4bf8ce03SAdrian Chadd tx->txq_dmamap, BUS_DMASYNC_POSTREAD); 1773*4bf8ce03SAdrian Chadd bus_dmamap_unload(sc->sc_dmat_tx_buf, 1774*4bf8ce03SAdrian Chadd tx->txq_dmamap); 1775*4bf8ce03SAdrian Chadd } 1776*4bf8ce03SAdrian Chadd m_free(tx->txq_mbuf); 1777*4bf8ce03SAdrian Chadd tx->txq_mbuf = NULL; 1778*4bf8ce03SAdrian Chadd } 1779*4bf8ce03SAdrian Chadd 1780*4bf8ce03SAdrian Chadd /* Destroy the dmamap if it's allocated */ 1781*4bf8ce03SAdrian Chadd if (tx->txq_dmamap != NULL) { 1782*4bf8ce03SAdrian Chadd bus_dmamap_destroy(sc->sc_dmat_tx_buf, tx->txq_dmamap); 1783*4bf8ce03SAdrian Chadd tx->txq_dmamap = NULL; 1784*4bf8ce03SAdrian Chadd } 1785*4bf8ce03SAdrian Chadd } 1786*4bf8ce03SAdrian Chadd 1787*4bf8ce03SAdrian Chadd /* RX desc */ 1788*4bf8ce03SAdrian Chadd bus_dmamap_unload(sc->sc_dmat_rx_desc, q->q_rx.rge_rx_list_map); 1789*4bf8ce03SAdrian Chadd if (q->q_rx.rge_rx_list != 0) 1790*4bf8ce03SAdrian Chadd bus_dmamem_free(sc->sc_dmat_rx_desc, q->q_rx.rge_rx_list, 1791*4bf8ce03SAdrian Chadd q->q_rx.rge_rx_list_map); 1792*4bf8ce03SAdrian Chadd memset(&q->q_rx, 0, sizeof(q->q_tx)); 1793*4bf8ce03SAdrian Chadd 1794*4bf8ce03SAdrian Chadd /* RX buf */ 1795*4bf8ce03SAdrian Chadd for (i = 0; i < RGE_RX_LIST_CNT; i++) { 1796*4bf8ce03SAdrian Chadd struct rge_rxq *rx = &q->q_rx.rge_rxq[i]; 1797*4bf8ce03SAdrian Chadd 1798*4bf8ce03SAdrian Chadd /* unmap/free mbuf if it's still alloc'ed and mapped */ 1799*4bf8ce03SAdrian Chadd if (rx->rxq_mbuf != NULL) { 1800*4bf8ce03SAdrian Chadd if (rx->rxq_dmamap != NULL) { 1801*4bf8ce03SAdrian Chadd bus_dmamap_sync(sc->sc_dmat_rx_buf, 1802*4bf8ce03SAdrian Chadd rx->rxq_dmamap, BUS_DMASYNC_POSTREAD); 1803*4bf8ce03SAdrian Chadd bus_dmamap_unload(sc->sc_dmat_rx_buf, 1804*4bf8ce03SAdrian Chadd rx->rxq_dmamap); 1805*4bf8ce03SAdrian Chadd } 1806*4bf8ce03SAdrian Chadd m_free(rx->rxq_mbuf); 1807*4bf8ce03SAdrian Chadd rx->rxq_mbuf = NULL; 1808*4bf8ce03SAdrian Chadd } 1809*4bf8ce03SAdrian Chadd 1810*4bf8ce03SAdrian Chadd /* Destroy the dmamap if it's allocated */ 1811*4bf8ce03SAdrian Chadd if (rx->rxq_dmamap != NULL) { 1812*4bf8ce03SAdrian Chadd bus_dmamap_destroy(sc->sc_dmat_rx_buf, rx->rxq_dmamap); 1813*4bf8ce03SAdrian Chadd rx->rxq_dmamap = NULL; 1814*4bf8ce03SAdrian Chadd } 1815*4bf8ce03SAdrian Chadd } 1816*4bf8ce03SAdrian Chadd 1817*4bf8ce03SAdrian Chadd return (0); 1818*4bf8ce03SAdrian Chadd } 1819*4bf8ce03SAdrian Chadd 1820*4bf8ce03SAdrian Chadd /** 1821*4bf8ce03SAdrian Chadd * @brief Free the stats memory. 1822*4bf8ce03SAdrian Chadd * 1823*4bf8ce03SAdrian Chadd * Called with the driver lock NOT held. 1824*4bf8ce03SAdrian Chadd */ 1825*4bf8ce03SAdrian Chadd static int 1826*4bf8ce03SAdrian Chadd rge_free_stats_mem(struct rge_softc *sc) 1827*4bf8ce03SAdrian Chadd { 1828*4bf8ce03SAdrian Chadd struct rge_mac_stats *ss = &sc->sc_mac_stats; 1829*4bf8ce03SAdrian Chadd 1830*4bf8ce03SAdrian Chadd RGE_ASSERT_UNLOCKED(sc); 1831*4bf8ce03SAdrian Chadd 1832*4bf8ce03SAdrian Chadd bus_dmamap_unload(sc->sc_dmat_stats_buf, ss->map); 1833*4bf8ce03SAdrian Chadd if (ss->stats != NULL) 1834*4bf8ce03SAdrian Chadd bus_dmamem_free(sc->sc_dmat_stats_buf, ss->stats, ss->map); 1835*4bf8ce03SAdrian Chadd memset(ss, 0, sizeof(*ss)); 1836*4bf8ce03SAdrian Chadd return (0); 1837*4bf8ce03SAdrian Chadd } 1838*4bf8ce03SAdrian Chadd 1839*4bf8ce03SAdrian Chadd static uint32_t 1840*4bf8ce03SAdrian Chadd rx_ring_space(struct rge_queues *q) 1841*4bf8ce03SAdrian Chadd { 1842*4bf8ce03SAdrian Chadd uint32_t prod, cons; 1843*4bf8ce03SAdrian Chadd uint32_t ret; 1844*4bf8ce03SAdrian Chadd 1845*4bf8ce03SAdrian Chadd RGE_ASSERT_LOCKED(q->q_sc); 1846*4bf8ce03SAdrian Chadd 1847*4bf8ce03SAdrian Chadd prod = q->q_rx.rge_rxq_prodidx; 1848*4bf8ce03SAdrian Chadd cons = q->q_rx.rge_rxq_considx; 1849*4bf8ce03SAdrian Chadd 1850*4bf8ce03SAdrian Chadd ret = (cons + RGE_RX_LIST_CNT - prod - 1) % RGE_RX_LIST_CNT + 1; 1851*4bf8ce03SAdrian Chadd 1852*4bf8ce03SAdrian Chadd if (ret > RGE_RX_LIST_CNT) 1853*4bf8ce03SAdrian Chadd return RGE_RX_LIST_CNT; 1854*4bf8ce03SAdrian Chadd 1855*4bf8ce03SAdrian Chadd return (ret); 1856*4bf8ce03SAdrian Chadd } 1857*4bf8ce03SAdrian Chadd 1858*4bf8ce03SAdrian Chadd /* 1859*4bf8ce03SAdrian Chadd * Initialize the RX descriptor and attach an mbuf cluster at the given offset. 1860*4bf8ce03SAdrian Chadd * 1861*4bf8ce03SAdrian Chadd * Note: this relies on the rxr ring buffer abstraction to not 1862*4bf8ce03SAdrian Chadd * over-fill the RX ring. For FreeBSD we'll need to use the 1863*4bf8ce03SAdrian Chadd * prod/cons RX indexes to know how much RX ring space to 1864*4bf8ce03SAdrian Chadd * populate. 1865*4bf8ce03SAdrian Chadd * 1866*4bf8ce03SAdrian Chadd * This routine will increment the producer index if successful. 1867*4bf8ce03SAdrian Chadd * 1868*4bf8ce03SAdrian Chadd * This must be called with the driver lock held. 1869*4bf8ce03SAdrian Chadd */ 1870*4bf8ce03SAdrian Chadd static int 1871*4bf8ce03SAdrian Chadd rge_newbuf(struct rge_queues *q) 1872*4bf8ce03SAdrian Chadd { 1873*4bf8ce03SAdrian Chadd struct rge_softc *sc = q->q_sc; 1874*4bf8ce03SAdrian Chadd struct mbuf *m; 1875*4bf8ce03SAdrian Chadd struct rge_rx_desc *r; 1876*4bf8ce03SAdrian Chadd struct rge_rxq *rxq; 1877*4bf8ce03SAdrian Chadd bus_dmamap_t rxmap; 1878*4bf8ce03SAdrian Chadd bus_dma_segment_t seg[1]; 1879*4bf8ce03SAdrian Chadd uint32_t cmdsts; 1880*4bf8ce03SAdrian Chadd int nsegs; 1881*4bf8ce03SAdrian Chadd uint32_t idx; 1882*4bf8ce03SAdrian Chadd 1883*4bf8ce03SAdrian Chadd RGE_ASSERT_LOCKED(q->q_sc); 1884*4bf8ce03SAdrian Chadd 1885*4bf8ce03SAdrian Chadd /* 1886*4bf8ce03SAdrian Chadd * Verify we have enough space in the ring; error out 1887*4bf8ce03SAdrian Chadd * if we do not. 1888*4bf8ce03SAdrian Chadd */ 1889*4bf8ce03SAdrian Chadd if (rx_ring_space(q) == 0) 1890*4bf8ce03SAdrian Chadd return (ENOBUFS); 1891*4bf8ce03SAdrian Chadd 1892*4bf8ce03SAdrian Chadd idx = q->q_rx.rge_rxq_prodidx; 1893*4bf8ce03SAdrian Chadd rxq = &q->q_rx.rge_rxq[idx]; 1894*4bf8ce03SAdrian Chadd rxmap = rxq->rxq_dmamap; 1895*4bf8ce03SAdrian Chadd 1896*4bf8ce03SAdrian Chadd /* 1897*4bf8ce03SAdrian Chadd * If we already have an mbuf here then something messed up; 1898*4bf8ce03SAdrian Chadd * exit out as the hardware may be DMAing to it. 1899*4bf8ce03SAdrian Chadd */ 1900*4bf8ce03SAdrian Chadd if (rxq->rxq_mbuf != NULL) { 1901*4bf8ce03SAdrian Chadd RGE_PRINT_ERROR(sc, 1902*4bf8ce03SAdrian Chadd "%s: RX ring slot %d already has an mbuf?\n", __func__, 1903*4bf8ce03SAdrian Chadd idx); 1904*4bf8ce03SAdrian Chadd return (ENOBUFS); 1905*4bf8ce03SAdrian Chadd } 1906*4bf8ce03SAdrian Chadd 1907*4bf8ce03SAdrian Chadd /* Allocate single buffer backed mbuf of MCLBYTES */ 1908*4bf8ce03SAdrian Chadd m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1909*4bf8ce03SAdrian Chadd if (m == NULL) 1910*4bf8ce03SAdrian Chadd return (ENOBUFS); 1911*4bf8ce03SAdrian Chadd 1912*4bf8ce03SAdrian Chadd m->m_len = m->m_pkthdr.len = MCLBYTES; 1913*4bf8ce03SAdrian Chadd 1914*4bf8ce03SAdrian Chadd nsegs = 1; 1915*4bf8ce03SAdrian Chadd if (bus_dmamap_load_mbuf_sg(sc->sc_dmat_rx_buf, rxmap, m, seg, &nsegs, 1916*4bf8ce03SAdrian Chadd BUS_DMA_NOWAIT)) { 1917*4bf8ce03SAdrian Chadd m_freem(m); 1918*4bf8ce03SAdrian Chadd return (ENOBUFS); 1919*4bf8ce03SAdrian Chadd } 1920*4bf8ce03SAdrian Chadd 1921*4bf8ce03SAdrian Chadd /* 1922*4bf8ce03SAdrian Chadd * Make sure any changes made to the buffer have been flushed to host 1923*4bf8ce03SAdrian Chadd * memory. 1924*4bf8ce03SAdrian Chadd */ 1925*4bf8ce03SAdrian Chadd bus_dmamap_sync(sc->sc_dmat_rx_buf, rxmap, 1926*4bf8ce03SAdrian Chadd BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1927*4bf8ce03SAdrian Chadd 1928*4bf8ce03SAdrian Chadd /* 1929*4bf8ce03SAdrian Chadd * Map the segment into RX descriptors. Note that this 1930*4bf8ce03SAdrian Chadd * only currently supports a single segment per mbuf; 1931*4bf8ce03SAdrian Chadd * the call to load_mbuf_sg above specified a single segment. 1932*4bf8ce03SAdrian Chadd */ 1933*4bf8ce03SAdrian Chadd r = &q->q_rx.rge_rx_list[idx]; 1934*4bf8ce03SAdrian Chadd 1935*4bf8ce03SAdrian Chadd rxq->rxq_mbuf = m; 1936*4bf8ce03SAdrian Chadd 1937*4bf8ce03SAdrian Chadd cmdsts = seg[0].ds_len; /* XXX how big is this field in the descriptor? */ 1938*4bf8ce03SAdrian Chadd if (idx == RGE_RX_LIST_CNT - 1) 1939*4bf8ce03SAdrian Chadd cmdsts |= RGE_RDCMDSTS_EOR; 1940*4bf8ce03SAdrian Chadd 1941*4bf8ce03SAdrian Chadd /* 1942*4bf8ce03SAdrian Chadd * Configure the DMA pointer and config, but don't hand 1943*4bf8ce03SAdrian Chadd * it yet to the hardware. 1944*4bf8ce03SAdrian Chadd */ 1945*4bf8ce03SAdrian Chadd r->hi_qword1.rx_qword4.rge_cmdsts = htole32(cmdsts); 1946*4bf8ce03SAdrian Chadd r->hi_qword1.rx_qword4.rge_extsts = htole32(0); 1947*4bf8ce03SAdrian Chadd r->hi_qword0.rge_addr = htole64(seg[0].ds_addr); 1948*4bf8ce03SAdrian Chadd wmb(); 1949*4bf8ce03SAdrian Chadd 1950*4bf8ce03SAdrian Chadd /* 1951*4bf8ce03SAdrian Chadd * Mark the specific descriptor slot as "this descriptor is now 1952*4bf8ce03SAdrian Chadd * owned by the hardware", which when the hardware next sees 1953*4bf8ce03SAdrian Chadd * this, it'll continue RX DMA. 1954*4bf8ce03SAdrian Chadd */ 1955*4bf8ce03SAdrian Chadd cmdsts |= RGE_RDCMDSTS_OWN; 1956*4bf8ce03SAdrian Chadd r->hi_qword1.rx_qword4.rge_cmdsts = htole32(cmdsts); 1957*4bf8ce03SAdrian Chadd wmb(); 1958*4bf8ce03SAdrian Chadd 1959*4bf8ce03SAdrian Chadd /* 1960*4bf8ce03SAdrian Chadd * At this point the hope is the whole ring is now updated and 1961*4bf8ce03SAdrian Chadd * consistent; if the hardware was waiting for a descriptor to be 1962*4bf8ce03SAdrian Chadd * ready to write into then it should be ready here. 1963*4bf8ce03SAdrian Chadd */ 1964*4bf8ce03SAdrian Chadd 1965*4bf8ce03SAdrian Chadd RGE_DPRINTF(sc, RGE_DEBUG_RECV_DESC, 1966*4bf8ce03SAdrian Chadd "%s: [%d]: m=%p, m_data=%p, m_len=%ju, phys=0x%jx len %ju, " 1967*4bf8ce03SAdrian Chadd "desc=0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", 1968*4bf8ce03SAdrian Chadd __func__, 1969*4bf8ce03SAdrian Chadd idx, 1970*4bf8ce03SAdrian Chadd m, 1971*4bf8ce03SAdrian Chadd m->m_data, 1972*4bf8ce03SAdrian Chadd (uintmax_t) m->m_len, 1973*4bf8ce03SAdrian Chadd (uintmax_t) seg[0].ds_addr, 1974*4bf8ce03SAdrian Chadd (uintmax_t) seg[0].ds_len, 1975*4bf8ce03SAdrian Chadd ((uint32_t *) r)[0], 1976*4bf8ce03SAdrian Chadd ((uint32_t *) r)[1], 1977*4bf8ce03SAdrian Chadd ((uint32_t *) r)[2], 1978*4bf8ce03SAdrian Chadd ((uint32_t *) r)[3], 1979*4bf8ce03SAdrian Chadd ((uint32_t *) r)[4], 1980*4bf8ce03SAdrian Chadd ((uint32_t *) r)[5], 1981*4bf8ce03SAdrian Chadd ((uint32_t *) r)[6], 1982*4bf8ce03SAdrian Chadd ((uint32_t *) r)[7]); 1983*4bf8ce03SAdrian Chadd 1984*4bf8ce03SAdrian Chadd q->q_rx.rge_rxq_prodidx = RGE_NEXT_RX_DESC(idx); 1985*4bf8ce03SAdrian Chadd 1986*4bf8ce03SAdrian Chadd return (0); 1987*4bf8ce03SAdrian Chadd } 1988*4bf8ce03SAdrian Chadd 1989*4bf8ce03SAdrian Chadd static void 1990*4bf8ce03SAdrian Chadd rge_rx_list_init(struct rge_queues *q) 1991*4bf8ce03SAdrian Chadd { 1992*4bf8ce03SAdrian Chadd memset(q->q_rx.rge_rx_list, 0, RGE_RX_LIST_SZ); 1993*4bf8ce03SAdrian Chadd 1994*4bf8ce03SAdrian Chadd RGE_ASSERT_LOCKED(q->q_sc); 1995*4bf8ce03SAdrian Chadd 1996*4bf8ce03SAdrian Chadd q->q_rx.rge_rxq_prodidx = q->q_rx.rge_rxq_considx = 0; 1997*4bf8ce03SAdrian Chadd q->q_rx.rge_head = NULL; 1998*4bf8ce03SAdrian Chadd q->q_rx.rge_tail = &q->q_rx.rge_head; 1999*4bf8ce03SAdrian Chadd 2000*4bf8ce03SAdrian Chadd RGE_DPRINTF(q->q_sc, RGE_DEBUG_SETUP, "%s: rx_list=%p\n", __func__, 2001*4bf8ce03SAdrian Chadd q->q_rx.rge_rx_list); 2002*4bf8ce03SAdrian Chadd 2003*4bf8ce03SAdrian Chadd rge_fill_rx_ring(q); 2004*4bf8ce03SAdrian Chadd } 2005*4bf8ce03SAdrian Chadd 2006*4bf8ce03SAdrian Chadd /** 2007*4bf8ce03SAdrian Chadd * @brief Fill / refill the RX ring as needed. 2008*4bf8ce03SAdrian Chadd * 2009*4bf8ce03SAdrian Chadd * Refill the RX ring with one less than the total descriptors needed. 2010*4bf8ce03SAdrian Chadd * This makes the check in rge_rxeof() easier - it can just check 2011*4bf8ce03SAdrian Chadd * descriptors from cons -> prod and bail once it hits prod. 2012*4bf8ce03SAdrian Chadd * If the whole ring is filled then cons == prod, and that shortcut 2013*4bf8ce03SAdrian Chadd * fails. 2014*4bf8ce03SAdrian Chadd * 2015*4bf8ce03SAdrian Chadd * This must be called with the driver lock held. 2016*4bf8ce03SAdrian Chadd */ 2017*4bf8ce03SAdrian Chadd static void 2018*4bf8ce03SAdrian Chadd rge_fill_rx_ring(struct rge_queues *q) 2019*4bf8ce03SAdrian Chadd { 2020*4bf8ce03SAdrian Chadd struct rge_softc *sc = q->q_sc; 2021*4bf8ce03SAdrian Chadd uint32_t count, i, prod, cons; 2022*4bf8ce03SAdrian Chadd 2023*4bf8ce03SAdrian Chadd RGE_ASSERT_LOCKED(q->q_sc); 2024*4bf8ce03SAdrian Chadd 2025*4bf8ce03SAdrian Chadd prod = q->q_rx.rge_rxq_prodidx; 2026*4bf8ce03SAdrian Chadd cons = q->q_rx.rge_rxq_considx; 2027*4bf8ce03SAdrian Chadd count = rx_ring_space(q); 2028*4bf8ce03SAdrian Chadd 2029*4bf8ce03SAdrian Chadd /* Fill to count-1; bail if we don't have the space */ 2030*4bf8ce03SAdrian Chadd if (count <= 1) 2031*4bf8ce03SAdrian Chadd return; 2032*4bf8ce03SAdrian Chadd count--; 2033*4bf8ce03SAdrian Chadd 2034*4bf8ce03SAdrian Chadd RGE_DPRINTF(sc, RGE_DEBUG_RECV_DESC, "%s: prod=%u, cons=%u, space=%u\n", 2035*4bf8ce03SAdrian Chadd __func__, prod, cons, count); 2036*4bf8ce03SAdrian Chadd 2037*4bf8ce03SAdrian Chadd /* Make sure device->host changes are visible */ 2038*4bf8ce03SAdrian Chadd bus_dmamap_sync(sc->sc_dmat_rx_desc, q->q_rx.rge_rx_list_map, 2039*4bf8ce03SAdrian Chadd BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2040*4bf8ce03SAdrian Chadd 2041*4bf8ce03SAdrian Chadd for (i = 0; i < count; i++) { 2042*4bf8ce03SAdrian Chadd if (rge_newbuf(q)) 2043*4bf8ce03SAdrian Chadd break; 2044*4bf8ce03SAdrian Chadd } 2045*4bf8ce03SAdrian Chadd 2046*4bf8ce03SAdrian Chadd /* Make changes visible to the device */ 2047*4bf8ce03SAdrian Chadd bus_dmamap_sync(sc->sc_dmat_rx_desc, q->q_rx.rge_rx_list_map, 2048*4bf8ce03SAdrian Chadd BUS_DMASYNC_PREWRITE); 2049*4bf8ce03SAdrian Chadd } 2050*4bf8ce03SAdrian Chadd 2051*4bf8ce03SAdrian Chadd static void 2052*4bf8ce03SAdrian Chadd rge_tx_list_init(struct rge_queues *q) 2053*4bf8ce03SAdrian Chadd { 2054*4bf8ce03SAdrian Chadd struct rge_softc *sc = q->q_sc; 2055*4bf8ce03SAdrian Chadd struct rge_tx_desc *d; 2056*4bf8ce03SAdrian Chadd int i; 2057*4bf8ce03SAdrian Chadd 2058*4bf8ce03SAdrian Chadd RGE_ASSERT_LOCKED(q->q_sc); 2059*4bf8ce03SAdrian Chadd 2060*4bf8ce03SAdrian Chadd memset(q->q_tx.rge_tx_list, 0, RGE_TX_LIST_SZ); 2061*4bf8ce03SAdrian Chadd 2062*4bf8ce03SAdrian Chadd for (i = 0; i < RGE_TX_LIST_CNT; i++) 2063*4bf8ce03SAdrian Chadd q->q_tx.rge_txq[i].txq_mbuf = NULL; 2064*4bf8ce03SAdrian Chadd 2065*4bf8ce03SAdrian Chadd d = &q->q_tx.rge_tx_list[RGE_TX_LIST_CNT - 1]; 2066*4bf8ce03SAdrian Chadd d->rge_cmdsts = htole32(RGE_TDCMDSTS_EOR); 2067*4bf8ce03SAdrian Chadd 2068*4bf8ce03SAdrian Chadd bus_dmamap_sync(sc->sc_dmat_tx_desc, q->q_tx.rge_tx_list_map, 2069*4bf8ce03SAdrian Chadd BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2070*4bf8ce03SAdrian Chadd wmb(); 2071*4bf8ce03SAdrian Chadd 2072*4bf8ce03SAdrian Chadd q->q_tx.rge_txq_prodidx = q->q_tx.rge_txq_considx = 0; 2073*4bf8ce03SAdrian Chadd 2074*4bf8ce03SAdrian Chadd RGE_DPRINTF(sc, RGE_DEBUG_SETUP, "%s: rx_list=%p\n", __func__, 2075*4bf8ce03SAdrian Chadd q->q_tx.rge_tx_list); 2076*4bf8ce03SAdrian Chadd } 2077*4bf8ce03SAdrian Chadd 2078*4bf8ce03SAdrian Chadd int 2079*4bf8ce03SAdrian Chadd rge_rxeof(struct rge_queues *q, struct mbufq *mq) 2080*4bf8ce03SAdrian Chadd { 2081*4bf8ce03SAdrian Chadd struct rge_softc *sc = q->q_sc; 2082*4bf8ce03SAdrian Chadd struct mbuf *m; 2083*4bf8ce03SAdrian Chadd struct rge_rx_desc *cur_rx; 2084*4bf8ce03SAdrian Chadd struct rge_rxq *rxq; 2085*4bf8ce03SAdrian Chadd uint32_t rxstat, extsts; 2086*4bf8ce03SAdrian Chadd int i, mlen, rx = 0; 2087*4bf8ce03SAdrian Chadd int cons, prod; 2088*4bf8ce03SAdrian Chadd int maxpkt = 16; /* XXX TODO: make this a tunable */ 2089*4bf8ce03SAdrian Chadd bool check_hwcsum; 2090*4bf8ce03SAdrian Chadd 2091*4bf8ce03SAdrian Chadd check_hwcsum = ((if_getcapenable(sc->sc_ifp) & IFCAP_RXCSUM) != 0); 2092*4bf8ce03SAdrian Chadd 2093*4bf8ce03SAdrian Chadd RGE_ASSERT_LOCKED(sc); 2094*4bf8ce03SAdrian Chadd 2095*4bf8ce03SAdrian Chadd sc->sc_drv_stats.rxeof_cnt++; 2096*4bf8ce03SAdrian Chadd 2097*4bf8ce03SAdrian Chadd RGE_DPRINTF(sc, RGE_DEBUG_INTR, "%s; called\n", __func__); 2098*4bf8ce03SAdrian Chadd 2099*4bf8ce03SAdrian Chadd /* Note: if_re is POSTREAD/WRITE, rge is only POSTWRITE */ 2100*4bf8ce03SAdrian Chadd bus_dmamap_sync(sc->sc_dmat_rx_desc, q->q_rx.rge_rx_list_map, 2101*4bf8ce03SAdrian Chadd BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2102*4bf8ce03SAdrian Chadd 2103*4bf8ce03SAdrian Chadd prod = q->q_rx.rge_rxq_prodidx; 2104*4bf8ce03SAdrian Chadd 2105*4bf8ce03SAdrian Chadd /* 2106*4bf8ce03SAdrian Chadd * Loop around until we've run out of active descriptors to check 2107*4bf8ce03SAdrian Chadd * or maxpkt has been reached. 2108*4bf8ce03SAdrian Chadd */ 2109*4bf8ce03SAdrian Chadd for (i = cons = q->q_rx.rge_rxq_considx; 2110*4bf8ce03SAdrian Chadd maxpkt > 0 && i != prod; 2111*4bf8ce03SAdrian Chadd i = RGE_NEXT_RX_DESC(i)) { 2112*4bf8ce03SAdrian Chadd /* break out of loop if we're not running */ 2113*4bf8ce03SAdrian Chadd if ((if_getdrvflags(sc->sc_ifp) & IFF_DRV_RUNNING) == 0) 2114*4bf8ce03SAdrian Chadd break; 2115*4bf8ce03SAdrian Chadd 2116*4bf8ce03SAdrian Chadd /* get the current rx descriptor to check descriptor status */ 2117*4bf8ce03SAdrian Chadd cur_rx = &q->q_rx.rge_rx_list[i]; 2118*4bf8ce03SAdrian Chadd rxstat = le32toh(cur_rx->hi_qword1.rx_qword4.rge_cmdsts); 2119*4bf8ce03SAdrian Chadd if ((rxstat & RGE_RDCMDSTS_OWN) != 0) { 2120*4bf8ce03SAdrian Chadd break; 2121*4bf8ce03SAdrian Chadd } 2122*4bf8ce03SAdrian Chadd 2123*4bf8ce03SAdrian Chadd /* Ensure everything else has been DMAed */ 2124*4bf8ce03SAdrian Chadd rmb(); 2125*4bf8ce03SAdrian Chadd 2126*4bf8ce03SAdrian Chadd /* Get the current rx buffer, sync */ 2127*4bf8ce03SAdrian Chadd rxq = &q->q_rx.rge_rxq[i]; 2128*4bf8ce03SAdrian Chadd 2129*4bf8ce03SAdrian Chadd /* Ensure any device updates are now visible in host memory */ 2130*4bf8ce03SAdrian Chadd bus_dmamap_sync(sc->sc_dmat_rx_buf, rxq->rxq_dmamap, 2131*4bf8ce03SAdrian Chadd BUS_DMASYNC_POSTREAD); 2132*4bf8ce03SAdrian Chadd 2133*4bf8ce03SAdrian Chadd /* Unload the DMA map, we are done with it here */ 2134*4bf8ce03SAdrian Chadd bus_dmamap_unload(sc->sc_dmat_rx_buf, rxq->rxq_dmamap); 2135*4bf8ce03SAdrian Chadd m = rxq->rxq_mbuf; 2136*4bf8ce03SAdrian Chadd rxq->rxq_mbuf = NULL; 2137*4bf8ce03SAdrian Chadd 2138*4bf8ce03SAdrian Chadd rx = 1; 2139*4bf8ce03SAdrian Chadd 2140*4bf8ce03SAdrian Chadd RGE_DPRINTF(sc, RGE_DEBUG_RECV_DESC, 2141*4bf8ce03SAdrian Chadd "%s: RX: [%d]: m=%p, m_data=%p, m_len=%ju, " 2142*4bf8ce03SAdrian Chadd "desc=0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", 2143*4bf8ce03SAdrian Chadd __func__, 2144*4bf8ce03SAdrian Chadd i, 2145*4bf8ce03SAdrian Chadd m, 2146*4bf8ce03SAdrian Chadd m->m_data, 2147*4bf8ce03SAdrian Chadd (uintmax_t) m->m_len, 2148*4bf8ce03SAdrian Chadd ((uint32_t *) cur_rx)[0], 2149*4bf8ce03SAdrian Chadd ((uint32_t *) cur_rx)[1], 2150*4bf8ce03SAdrian Chadd ((uint32_t *) cur_rx)[2], 2151*4bf8ce03SAdrian Chadd ((uint32_t *) cur_rx)[3], 2152*4bf8ce03SAdrian Chadd ((uint32_t *) cur_rx)[4], 2153*4bf8ce03SAdrian Chadd ((uint32_t *) cur_rx)[5], 2154*4bf8ce03SAdrian Chadd ((uint32_t *) cur_rx)[6], 2155*4bf8ce03SAdrian Chadd ((uint32_t *) cur_rx)[7]); 2156*4bf8ce03SAdrian Chadd 2157*4bf8ce03SAdrian Chadd if ((rxstat & RGE_RDCMDSTS_SOF) != 0) { 2158*4bf8ce03SAdrian Chadd if (q->q_rx.rge_head != NULL) { 2159*4bf8ce03SAdrian Chadd sc->sc_drv_stats.rx_desc_err_multidesc++; 2160*4bf8ce03SAdrian Chadd if_inc_counter(sc->sc_ifp, IFCOUNTER_IERRORS, 2161*4bf8ce03SAdrian Chadd 1); 2162*4bf8ce03SAdrian Chadd m_freem(q->q_rx.rge_head); 2163*4bf8ce03SAdrian Chadd q->q_rx.rge_tail = &q->q_rx.rge_head; 2164*4bf8ce03SAdrian Chadd } 2165*4bf8ce03SAdrian Chadd 2166*4bf8ce03SAdrian Chadd m->m_pkthdr.len = 0; 2167*4bf8ce03SAdrian Chadd } else if (q->q_rx.rge_head == NULL) { 2168*4bf8ce03SAdrian Chadd m_freem(m); 2169*4bf8ce03SAdrian Chadd continue; 2170*4bf8ce03SAdrian Chadd } else 2171*4bf8ce03SAdrian Chadd m->m_flags &= ~M_PKTHDR; 2172*4bf8ce03SAdrian Chadd 2173*4bf8ce03SAdrian Chadd *q->q_rx.rge_tail = m; 2174*4bf8ce03SAdrian Chadd q->q_rx.rge_tail = &m->m_next; 2175*4bf8ce03SAdrian Chadd 2176*4bf8ce03SAdrian Chadd mlen = rxstat & RGE_RDCMDSTS_FRAGLEN; 2177*4bf8ce03SAdrian Chadd m->m_len = mlen; 2178*4bf8ce03SAdrian Chadd 2179*4bf8ce03SAdrian Chadd m = q->q_rx.rge_head; 2180*4bf8ce03SAdrian Chadd m->m_pkthdr.len += mlen; 2181*4bf8ce03SAdrian Chadd 2182*4bf8ce03SAdrian Chadd /* Ethernet CRC error */ 2183*4bf8ce03SAdrian Chadd if (rxstat & RGE_RDCMDSTS_RXERRSUM) { 2184*4bf8ce03SAdrian Chadd sc->sc_drv_stats.rx_ether_csum_err++; 2185*4bf8ce03SAdrian Chadd if_inc_counter(sc->sc_ifp, IFCOUNTER_IERRORS, 1); 2186*4bf8ce03SAdrian Chadd m_freem(m); 2187*4bf8ce03SAdrian Chadd q->q_rx.rge_head = NULL; 2188*4bf8ce03SAdrian Chadd q->q_rx.rge_tail = &q->q_rx.rge_head; 2189*4bf8ce03SAdrian Chadd continue; 2190*4bf8ce03SAdrian Chadd } 2191*4bf8ce03SAdrian Chadd 2192*4bf8ce03SAdrian Chadd /* 2193*4bf8ce03SAdrian Chadd * This mbuf is part of a multi-descriptor frame, 2194*4bf8ce03SAdrian Chadd * so count it towards that. 2195*4bf8ce03SAdrian Chadd * 2196*4bf8ce03SAdrian Chadd * Yes, this means we won't be counting the 2197*4bf8ce03SAdrian Chadd * final descriptor/mbuf as part of a multi-descriptor 2198*4bf8ce03SAdrian Chadd * frame; if someone wishes to do that then it 2199*4bf8ce03SAdrian Chadd * shouldn't be too hard to add. 2200*4bf8ce03SAdrian Chadd */ 2201*4bf8ce03SAdrian Chadd if ((rxstat & RGE_RDCMDSTS_EOF) == 0) { 2202*4bf8ce03SAdrian Chadd sc->sc_drv_stats.rx_desc_jumbo_frag++; 2203*4bf8ce03SAdrian Chadd continue; 2204*4bf8ce03SAdrian Chadd } 2205*4bf8ce03SAdrian Chadd 2206*4bf8ce03SAdrian Chadd q->q_rx.rge_head = NULL; 2207*4bf8ce03SAdrian Chadd q->q_rx.rge_tail = &q->q_rx.rge_head; 2208*4bf8ce03SAdrian Chadd 2209*4bf8ce03SAdrian Chadd m_adj(m, -ETHER_CRC_LEN); 2210*4bf8ce03SAdrian Chadd m->m_pkthdr.rcvif = sc->sc_ifp; 2211*4bf8ce03SAdrian Chadd if_inc_counter(sc->sc_ifp, IFCOUNTER_IPACKETS, 1); 2212*4bf8ce03SAdrian Chadd 2213*4bf8ce03SAdrian Chadd extsts = le32toh(cur_rx->hi_qword1.rx_qword4.rge_extsts); 2214*4bf8ce03SAdrian Chadd 2215*4bf8ce03SAdrian Chadd /* Check IP header checksum. */ 2216*4bf8ce03SAdrian Chadd if (check_hwcsum) { 2217*4bf8ce03SAdrian Chadd /* Does it exist for IPv4? */ 2218*4bf8ce03SAdrian Chadd if (extsts & RGE_RDEXTSTS_IPV4) { 2219*4bf8ce03SAdrian Chadd sc->sc_drv_stats.rx_offload_csum_ipv4_exists++; 2220*4bf8ce03SAdrian Chadd m->m_pkthdr.csum_flags |= 2221*4bf8ce03SAdrian Chadd CSUM_IP_CHECKED; 2222*4bf8ce03SAdrian Chadd } 2223*4bf8ce03SAdrian Chadd /* XXX IPv6 checksum check? */ 2224*4bf8ce03SAdrian Chadd 2225*4bf8ce03SAdrian Chadd if (((extsts & RGE_RDEXTSTS_IPCSUMERR) == 0) 2226*4bf8ce03SAdrian Chadd && ((extsts & RGE_RDEXTSTS_IPV4) != 0)) { 2227*4bf8ce03SAdrian Chadd sc->sc_drv_stats.rx_offload_csum_ipv4_valid++; 2228*4bf8ce03SAdrian Chadd m->m_pkthdr.csum_flags |= 2229*4bf8ce03SAdrian Chadd CSUM_IP_VALID; 2230*4bf8ce03SAdrian Chadd } 2231*4bf8ce03SAdrian Chadd 2232*4bf8ce03SAdrian Chadd /* Check TCP/UDP checksum. */ 2233*4bf8ce03SAdrian Chadd if ((extsts & (RGE_RDEXTSTS_IPV4 | RGE_RDEXTSTS_IPV6)) && 2234*4bf8ce03SAdrian Chadd (extsts & RGE_RDEXTSTS_TCPPKT)) { 2235*4bf8ce03SAdrian Chadd sc->sc_drv_stats.rx_offload_csum_tcp_exists++; 2236*4bf8ce03SAdrian Chadd if ((extsts & RGE_RDEXTSTS_TCPCSUMERR) == 0) { 2237*4bf8ce03SAdrian Chadd sc->sc_drv_stats.rx_offload_csum_tcp_valid++; 2238*4bf8ce03SAdrian Chadd /* TCP checksum OK */ 2239*4bf8ce03SAdrian Chadd m->m_pkthdr.csum_flags |= 2240*4bf8ce03SAdrian Chadd CSUM_DATA_VALID|CSUM_PSEUDO_HDR; 2241*4bf8ce03SAdrian Chadd m->m_pkthdr.csum_data = 0xffff; 2242*4bf8ce03SAdrian Chadd } 2243*4bf8ce03SAdrian Chadd } 2244*4bf8ce03SAdrian Chadd 2245*4bf8ce03SAdrian Chadd if ((extsts & (RGE_RDEXTSTS_IPV4 | RGE_RDEXTSTS_IPV6)) && 2246*4bf8ce03SAdrian Chadd (extsts & RGE_RDEXTSTS_UDPPKT)) { 2247*4bf8ce03SAdrian Chadd sc->sc_drv_stats.rx_offload_csum_udp_exists++; 2248*4bf8ce03SAdrian Chadd if ((extsts & RGE_RDEXTSTS_UDPCSUMERR) == 0) { 2249*4bf8ce03SAdrian Chadd sc->sc_drv_stats.rx_offload_csum_udp_valid++; 2250*4bf8ce03SAdrian Chadd /* UDP checksum OK */ 2251*4bf8ce03SAdrian Chadd m->m_pkthdr.csum_flags |= 2252*4bf8ce03SAdrian Chadd CSUM_DATA_VALID|CSUM_PSEUDO_HDR; 2253*4bf8ce03SAdrian Chadd m->m_pkthdr.csum_data = 0xffff; 2254*4bf8ce03SAdrian Chadd } 2255*4bf8ce03SAdrian Chadd } 2256*4bf8ce03SAdrian Chadd } 2257*4bf8ce03SAdrian Chadd 2258*4bf8ce03SAdrian Chadd if (extsts & RGE_RDEXTSTS_VTAG) { 2259*4bf8ce03SAdrian Chadd sc->sc_drv_stats.rx_offload_vlan_tag++; 2260*4bf8ce03SAdrian Chadd m->m_pkthdr.ether_vtag = 2261*4bf8ce03SAdrian Chadd ntohs(extsts & RGE_RDEXTSTS_VLAN_MASK); 2262*4bf8ce03SAdrian Chadd m->m_flags |= M_VLANTAG; 2263*4bf8ce03SAdrian Chadd } 2264*4bf8ce03SAdrian Chadd 2265*4bf8ce03SAdrian Chadd mbufq_enqueue(mq, m); 2266*4bf8ce03SAdrian Chadd 2267*4bf8ce03SAdrian Chadd maxpkt--; 2268*4bf8ce03SAdrian Chadd } 2269*4bf8ce03SAdrian Chadd 2270*4bf8ce03SAdrian Chadd if (!rx) 2271*4bf8ce03SAdrian Chadd return (0); 2272*4bf8ce03SAdrian Chadd 2273*4bf8ce03SAdrian Chadd /* 2274*4bf8ce03SAdrian Chadd * Make sure any device updates to the descriptor ring are 2275*4bf8ce03SAdrian Chadd * visible to the host before we continue. 2276*4bf8ce03SAdrian Chadd */ 2277*4bf8ce03SAdrian Chadd bus_dmamap_sync(sc->sc_dmat_rx_desc, q->q_rx.rge_rx_list_map, 2278*4bf8ce03SAdrian Chadd BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2279*4bf8ce03SAdrian Chadd wmb(); 2280*4bf8ce03SAdrian Chadd 2281*4bf8ce03SAdrian Chadd /* Update the consumer index, refill the RX ring */ 2282*4bf8ce03SAdrian Chadd q->q_rx.rge_rxq_considx = i; 2283*4bf8ce03SAdrian Chadd rge_fill_rx_ring(q); 2284*4bf8ce03SAdrian Chadd 2285*4bf8ce03SAdrian Chadd return (1); 2286*4bf8ce03SAdrian Chadd } 2287*4bf8ce03SAdrian Chadd 2288*4bf8ce03SAdrian Chadd int 2289*4bf8ce03SAdrian Chadd rge_txeof(struct rge_queues *q) 2290*4bf8ce03SAdrian Chadd { 2291*4bf8ce03SAdrian Chadd struct rge_softc *sc = q->q_sc; 2292*4bf8ce03SAdrian Chadd struct ifnet *ifp = sc->sc_ifp; 2293*4bf8ce03SAdrian Chadd struct rge_txq *txq; 2294*4bf8ce03SAdrian Chadd uint32_t txstat; 2295*4bf8ce03SAdrian Chadd int cons, prod, cur, idx; 2296*4bf8ce03SAdrian Chadd int free = 0, ntx = 0; 2297*4bf8ce03SAdrian Chadd int pktlen; 2298*4bf8ce03SAdrian Chadd bool is_mcast; 2299*4bf8ce03SAdrian Chadd 2300*4bf8ce03SAdrian Chadd RGE_ASSERT_LOCKED(sc); 2301*4bf8ce03SAdrian Chadd 2302*4bf8ce03SAdrian Chadd sc->sc_drv_stats.txeof_cnt++; 2303*4bf8ce03SAdrian Chadd 2304*4bf8ce03SAdrian Chadd prod = q->q_tx.rge_txq_prodidx; 2305*4bf8ce03SAdrian Chadd cons = q->q_tx.rge_txq_considx; 2306*4bf8ce03SAdrian Chadd 2307*4bf8ce03SAdrian Chadd idx = cons; 2308*4bf8ce03SAdrian Chadd while (idx != prod) { 2309*4bf8ce03SAdrian Chadd txq = &q->q_tx.rge_txq[idx]; 2310*4bf8ce03SAdrian Chadd cur = txq->txq_descidx; 2311*4bf8ce03SAdrian Chadd 2312*4bf8ce03SAdrian Chadd rge_tx_list_sync(sc, q, cur, 1, BUS_DMASYNC_POSTREAD); 2313*4bf8ce03SAdrian Chadd txstat = q->q_tx.rge_tx_list[cur].rge_cmdsts; 2314*4bf8ce03SAdrian Chadd rge_tx_list_sync(sc, q, cur, 1, BUS_DMASYNC_PREREAD); 2315*4bf8ce03SAdrian Chadd if ((txstat & htole32(RGE_TDCMDSTS_OWN)) != 0) { 2316*4bf8ce03SAdrian Chadd free = 2; 2317*4bf8ce03SAdrian Chadd break; 2318*4bf8ce03SAdrian Chadd } 2319*4bf8ce03SAdrian Chadd 2320*4bf8ce03SAdrian Chadd bus_dmamap_sync(sc->sc_dmat_tx_buf, txq->txq_dmamap, 2321*4bf8ce03SAdrian Chadd BUS_DMASYNC_POSTWRITE); 2322*4bf8ce03SAdrian Chadd bus_dmamap_unload(sc->sc_dmat_tx_buf, txq->txq_dmamap); 2323*4bf8ce03SAdrian Chadd pktlen = txq->txq_mbuf->m_pkthdr.len; 2324*4bf8ce03SAdrian Chadd is_mcast = ((txq->txq_mbuf->m_flags & M_MCAST) != 0); 2325*4bf8ce03SAdrian Chadd m_freem(txq->txq_mbuf); 2326*4bf8ce03SAdrian Chadd txq->txq_mbuf = NULL; 2327*4bf8ce03SAdrian Chadd ntx++; 2328*4bf8ce03SAdrian Chadd 2329*4bf8ce03SAdrian Chadd if ((txstat & 2330*4bf8ce03SAdrian Chadd htole32(RGE_TDCMDSTS_EXCESSCOLL | RGE_TDCMDSTS_COLL)) != 0) 2331*4bf8ce03SAdrian Chadd if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1); 2332*4bf8ce03SAdrian Chadd if ((txstat & htole32(RGE_TDCMDSTS_TXERR)) != 0) 2333*4bf8ce03SAdrian Chadd if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 2334*4bf8ce03SAdrian Chadd else { 2335*4bf8ce03SAdrian Chadd if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 2336*4bf8ce03SAdrian Chadd if_inc_counter(ifp, IFCOUNTER_OBYTES, pktlen); 2337*4bf8ce03SAdrian Chadd if (is_mcast) 2338*4bf8ce03SAdrian Chadd if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1); 2339*4bf8ce03SAdrian Chadd 2340*4bf8ce03SAdrian Chadd } 2341*4bf8ce03SAdrian Chadd 2342*4bf8ce03SAdrian Chadd idx = RGE_NEXT_TX_DESC(cur); 2343*4bf8ce03SAdrian Chadd free = 1; 2344*4bf8ce03SAdrian Chadd } 2345*4bf8ce03SAdrian Chadd 2346*4bf8ce03SAdrian Chadd /* If we didn't complete any TX descriptors then return 0 */ 2347*4bf8ce03SAdrian Chadd if (free == 0) 2348*4bf8ce03SAdrian Chadd return (0); 2349*4bf8ce03SAdrian Chadd 2350*4bf8ce03SAdrian Chadd if (idx >= cons) { 2351*4bf8ce03SAdrian Chadd rge_tx_list_sync(sc, q, cons, idx - cons, 2352*4bf8ce03SAdrian Chadd BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2353*4bf8ce03SAdrian Chadd } else { 2354*4bf8ce03SAdrian Chadd rge_tx_list_sync(sc, q, cons, RGE_TX_LIST_CNT - cons, 2355*4bf8ce03SAdrian Chadd BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2356*4bf8ce03SAdrian Chadd rge_tx_list_sync(sc, q, 0, idx, 2357*4bf8ce03SAdrian Chadd BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2358*4bf8ce03SAdrian Chadd } 2359*4bf8ce03SAdrian Chadd 2360*4bf8ce03SAdrian Chadd q->q_tx.rge_txq_considx = idx; 2361*4bf8ce03SAdrian Chadd 2362*4bf8ce03SAdrian Chadd RGE_DPRINTF(sc, RGE_DEBUG_XMIT, 2363*4bf8ce03SAdrian Chadd "%s: handled %d frames; prod=%d, cons=%d\n", __func__, 2364*4bf8ce03SAdrian Chadd ntx, q->q_tx.rge_txq_prodidx, q->q_tx.rge_txq_considx); 2365*4bf8ce03SAdrian Chadd 2366*4bf8ce03SAdrian Chadd /* 2367*4bf8ce03SAdrian Chadd * We processed the ring and hit a descriptor that was still 2368*4bf8ce03SAdrian Chadd * owned by the hardware, so there's still pending work. 2369*4bf8ce03SAdrian Chadd * 2370*4bf8ce03SAdrian Chadd * If we got to the end of the ring and there's no further 2371*4bf8ce03SAdrian Chadd * frames owned by the hardware then we can quieten the 2372*4bf8ce03SAdrian Chadd * watchdog. 2373*4bf8ce03SAdrian Chadd */ 2374*4bf8ce03SAdrian Chadd if (free == 2) 2375*4bf8ce03SAdrian Chadd sc->sc_watchdog = 5; 2376*4bf8ce03SAdrian Chadd else 2377*4bf8ce03SAdrian Chadd sc->sc_watchdog = 0; 2378*4bf8ce03SAdrian Chadd 2379*4bf8ce03SAdrian Chadd /* 2380*4bf8ce03SAdrian Chadd * Kick-start the transmit task just in case we have 2381*4bf8ce03SAdrian Chadd * more frames available. 2382*4bf8ce03SAdrian Chadd */ 2383*4bf8ce03SAdrian Chadd taskqueue_enqueue(sc->sc_tq, &sc->sc_tx_task); 2384*4bf8ce03SAdrian Chadd 2385*4bf8ce03SAdrian Chadd return (1); 2386*4bf8ce03SAdrian Chadd } 2387*4bf8ce03SAdrian Chadd 2388*4bf8ce03SAdrian Chadd static u_int 2389*4bf8ce03SAdrian Chadd rge_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 2390*4bf8ce03SAdrian Chadd { 2391*4bf8ce03SAdrian Chadd uint32_t crc, *hashes = arg; 2392*4bf8ce03SAdrian Chadd 2393*4bf8ce03SAdrian Chadd // XXX TODO: validate this does addrlo? */ 2394*4bf8ce03SAdrian Chadd crc = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN) >> 26; 2395*4bf8ce03SAdrian Chadd crc &= 0x3f; 2396*4bf8ce03SAdrian Chadd 2397*4bf8ce03SAdrian Chadd if (crc < 32) 2398*4bf8ce03SAdrian Chadd hashes[0] |= (1 << crc); 2399*4bf8ce03SAdrian Chadd else 2400*4bf8ce03SAdrian Chadd hashes[1] |= (1 << (crc - 32)); 2401*4bf8ce03SAdrian Chadd 2402*4bf8ce03SAdrian Chadd return (1); 2403*4bf8ce03SAdrian Chadd } 2404*4bf8ce03SAdrian Chadd 2405*4bf8ce03SAdrian Chadd /** 2406*4bf8ce03SAdrian Chadd * @brief Configure the RX filter and multicast filter. 2407*4bf8ce03SAdrian Chadd * 2408*4bf8ce03SAdrian Chadd * This must be called with the driver lock held. 2409*4bf8ce03SAdrian Chadd */ 2410*4bf8ce03SAdrian Chadd static void 2411*4bf8ce03SAdrian Chadd rge_iff_locked(struct rge_softc *sc) 2412*4bf8ce03SAdrian Chadd { 2413*4bf8ce03SAdrian Chadd uint32_t hashes[2]; 2414*4bf8ce03SAdrian Chadd uint32_t rxfilt; 2415*4bf8ce03SAdrian Chadd 2416*4bf8ce03SAdrian Chadd RGE_ASSERT_LOCKED(sc); 2417*4bf8ce03SAdrian Chadd 2418*4bf8ce03SAdrian Chadd rxfilt = RGE_READ_4(sc, RGE_RXCFG); 2419*4bf8ce03SAdrian Chadd rxfilt &= ~(RGE_RXCFG_ALLPHYS | RGE_RXCFG_MULTI); 2420*4bf8ce03SAdrian Chadd 2421*4bf8ce03SAdrian Chadd /* 2422*4bf8ce03SAdrian Chadd * Always accept frames destined to our station address. 2423*4bf8ce03SAdrian Chadd * Always accept broadcast frames. 2424*4bf8ce03SAdrian Chadd */ 2425*4bf8ce03SAdrian Chadd rxfilt |= RGE_RXCFG_INDIV | RGE_RXCFG_BROAD; 2426*4bf8ce03SAdrian Chadd 2427*4bf8ce03SAdrian Chadd if ((if_getflags(sc->sc_ifp) & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 2428*4bf8ce03SAdrian Chadd rxfilt |= RGE_RXCFG_MULTI; 2429*4bf8ce03SAdrian Chadd if ((if_getflags(sc->sc_ifp) & IFF_PROMISC) != 0) 2430*4bf8ce03SAdrian Chadd rxfilt |= RGE_RXCFG_ALLPHYS; 2431*4bf8ce03SAdrian Chadd hashes[0] = hashes[1] = 0xffffffff; 2432*4bf8ce03SAdrian Chadd } else { 2433*4bf8ce03SAdrian Chadd rxfilt |= RGE_RXCFG_MULTI; 2434*4bf8ce03SAdrian Chadd /* Program new filter. */ 2435*4bf8ce03SAdrian Chadd memset(hashes, 0, sizeof(hashes)); 2436*4bf8ce03SAdrian Chadd if_foreach_llmaddr(sc->sc_ifp, rge_hash_maddr, &hashes); 2437*4bf8ce03SAdrian Chadd } 2438*4bf8ce03SAdrian Chadd 2439*4bf8ce03SAdrian Chadd RGE_WRITE_4(sc, RGE_RXCFG, rxfilt); 2440*4bf8ce03SAdrian Chadd RGE_WRITE_4(sc, RGE_MAR0, bswap32(hashes[1])); 2441*4bf8ce03SAdrian Chadd RGE_WRITE_4(sc, RGE_MAR4, bswap32(hashes[0])); 2442*4bf8ce03SAdrian Chadd } 2443*4bf8ce03SAdrian Chadd 2444*4bf8ce03SAdrian Chadd static void 2445*4bf8ce03SAdrian Chadd rge_add_media_types(struct rge_softc *sc) 2446*4bf8ce03SAdrian Chadd { 2447*4bf8ce03SAdrian Chadd ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T, 0, NULL); 2448*4bf8ce03SAdrian Chadd ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL); 2449*4bf8ce03SAdrian Chadd ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX, 0, NULL); 2450*4bf8ce03SAdrian Chadd ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL); 2451*4bf8ce03SAdrian Chadd ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T, 0, NULL); 2452*4bf8ce03SAdrian Chadd ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); 2453*4bf8ce03SAdrian Chadd ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T, 0, NULL); 2454*4bf8ce03SAdrian Chadd ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T | IFM_FDX, 0, NULL); 2455*4bf8ce03SAdrian Chadd 2456*4bf8ce03SAdrian Chadd if (sc->rge_type == MAC_R26) { 2457*4bf8ce03SAdrian Chadd ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_5000_T, 0, NULL); 2458*4bf8ce03SAdrian Chadd ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_5000_T | IFM_FDX, 2459*4bf8ce03SAdrian Chadd 0, NULL); 2460*4bf8ce03SAdrian Chadd } else if (sc->rge_type == MAC_R27) { 2461*4bf8ce03SAdrian Chadd ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10G_T, 0, NULL); 2462*4bf8ce03SAdrian Chadd ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10G_T | IFM_FDX, 2463*4bf8ce03SAdrian Chadd 0, NULL); 2464*4bf8ce03SAdrian Chadd } 2465*4bf8ce03SAdrian Chadd } 2466*4bf8ce03SAdrian Chadd 2467*4bf8ce03SAdrian Chadd /** 2468*4bf8ce03SAdrian Chadd * @brief Deferred packet dequeue and submit. 2469*4bf8ce03SAdrian Chadd */ 2470*4bf8ce03SAdrian Chadd static void 2471*4bf8ce03SAdrian Chadd rge_tx_task(void *arg, int npending) 2472*4bf8ce03SAdrian Chadd { 2473*4bf8ce03SAdrian Chadd struct rge_softc *sc = (struct rge_softc *) arg; 2474*4bf8ce03SAdrian Chadd /* Note: for now, one queue */ 2475*4bf8ce03SAdrian Chadd struct rge_queues *q = sc->sc_queues; 2476*4bf8ce03SAdrian Chadd struct mbuf *m; 2477*4bf8ce03SAdrian Chadd int ntx = 0; 2478*4bf8ce03SAdrian Chadd int idx, free, used; 2479*4bf8ce03SAdrian Chadd 2480*4bf8ce03SAdrian Chadd RGE_DPRINTF(sc, RGE_DEBUG_XMIT, "%s: running\n", __func__); 2481*4bf8ce03SAdrian Chadd 2482*4bf8ce03SAdrian Chadd RGE_LOCK(sc); 2483*4bf8ce03SAdrian Chadd sc->sc_drv_stats.tx_task_cnt++; 2484*4bf8ce03SAdrian Chadd 2485*4bf8ce03SAdrian Chadd if (sc->sc_stopped == true) { 2486*4bf8ce03SAdrian Chadd sc->sc_watchdog = 0; 2487*4bf8ce03SAdrian Chadd RGE_UNLOCK(sc); 2488*4bf8ce03SAdrian Chadd return; 2489*4bf8ce03SAdrian Chadd } 2490*4bf8ce03SAdrian Chadd 2491*4bf8ce03SAdrian Chadd /* Calculate free space. */ 2492*4bf8ce03SAdrian Chadd idx = q->q_tx.rge_txq_prodidx; 2493*4bf8ce03SAdrian Chadd free = q->q_tx.rge_txq_considx; 2494*4bf8ce03SAdrian Chadd if (free <= idx) 2495*4bf8ce03SAdrian Chadd free += RGE_TX_LIST_CNT; 2496*4bf8ce03SAdrian Chadd free -= idx; 2497*4bf8ce03SAdrian Chadd 2498*4bf8ce03SAdrian Chadd for (;;) { 2499*4bf8ce03SAdrian Chadd if (free < RGE_TX_NSEGS + 2) { 2500*4bf8ce03SAdrian Chadd break; 2501*4bf8ce03SAdrian Chadd } 2502*4bf8ce03SAdrian Chadd 2503*4bf8ce03SAdrian Chadd /* Dequeue */ 2504*4bf8ce03SAdrian Chadd m = mbufq_dequeue(&sc->sc_txq); 2505*4bf8ce03SAdrian Chadd if (m == NULL) 2506*4bf8ce03SAdrian Chadd break; 2507*4bf8ce03SAdrian Chadd 2508*4bf8ce03SAdrian Chadd /* Attempt to encap */ 2509*4bf8ce03SAdrian Chadd used = rge_encap(sc, q, m, idx); 2510*4bf8ce03SAdrian Chadd if (used < 0) { 2511*4bf8ce03SAdrian Chadd if_inc_counter(sc->sc_ifp, IFCOUNTER_OQDROPS, 1); 2512*4bf8ce03SAdrian Chadd m_freem(m); 2513*4bf8ce03SAdrian Chadd continue; 2514*4bf8ce03SAdrian Chadd } else if (used == 0) { 2515*4bf8ce03SAdrian Chadd mbufq_prepend(&sc->sc_txq, m); 2516*4bf8ce03SAdrian Chadd break; 2517*4bf8ce03SAdrian Chadd } 2518*4bf8ce03SAdrian Chadd 2519*4bf8ce03SAdrian Chadd /* 2520*4bf8ce03SAdrian Chadd * Note: mbuf is now owned by the tx ring, but we hold the 2521*4bf8ce03SAdrian Chadd * lock so it's safe to pass it up here to be copied without 2522*4bf8ce03SAdrian Chadd * worrying the TX task will run and dequeue/free it before 2523*4bf8ce03SAdrian Chadd * we get a shot at it. 2524*4bf8ce03SAdrian Chadd */ 2525*4bf8ce03SAdrian Chadd ETHER_BPF_MTAP(sc->sc_ifp, m); 2526*4bf8ce03SAdrian Chadd 2527*4bf8ce03SAdrian Chadd /* Update free/idx pointers */ 2528*4bf8ce03SAdrian Chadd free -= used; 2529*4bf8ce03SAdrian Chadd idx += used; 2530*4bf8ce03SAdrian Chadd if (idx >= RGE_TX_LIST_CNT) 2531*4bf8ce03SAdrian Chadd idx -= RGE_TX_LIST_CNT; 2532*4bf8ce03SAdrian Chadd 2533*4bf8ce03SAdrian Chadd ntx++; 2534*4bf8ce03SAdrian Chadd } 2535*4bf8ce03SAdrian Chadd 2536*4bf8ce03SAdrian Chadd /* Ok, did we queue anything? If so, poke the hardware */ 2537*4bf8ce03SAdrian Chadd if (ntx > 0) { 2538*4bf8ce03SAdrian Chadd q->q_tx.rge_txq_prodidx = idx; 2539*4bf8ce03SAdrian Chadd sc->sc_watchdog = 5; 2540*4bf8ce03SAdrian Chadd RGE_WRITE_2(sc, RGE_TXSTART, RGE_TXSTART_START); 2541*4bf8ce03SAdrian Chadd } 2542*4bf8ce03SAdrian Chadd 2543*4bf8ce03SAdrian Chadd RGE_DPRINTF(sc, RGE_DEBUG_XMIT, 2544*4bf8ce03SAdrian Chadd "%s: handled %d frames; prod=%d, cons=%d\n", __func__, 2545*4bf8ce03SAdrian Chadd ntx, q->q_tx.rge_txq_prodidx, q->q_tx.rge_txq_considx); 2546*4bf8ce03SAdrian Chadd 2547*4bf8ce03SAdrian Chadd RGE_UNLOCK(sc); 2548*4bf8ce03SAdrian Chadd } 2549*4bf8ce03SAdrian Chadd 2550*4bf8ce03SAdrian Chadd /** 2551*4bf8ce03SAdrian Chadd * @brief Called by the sc_timeout callout. 2552*4bf8ce03SAdrian Chadd * 2553*4bf8ce03SAdrian Chadd * This is called by the callout code with the driver lock held. 2554*4bf8ce03SAdrian Chadd */ 2555*4bf8ce03SAdrian Chadd void 2556*4bf8ce03SAdrian Chadd rge_tick(void *arg) 2557*4bf8ce03SAdrian Chadd { 2558*4bf8ce03SAdrian Chadd struct rge_softc *sc = arg; 2559*4bf8ce03SAdrian Chadd 2560*4bf8ce03SAdrian Chadd RGE_ASSERT_LOCKED(sc); 2561*4bf8ce03SAdrian Chadd 2562*4bf8ce03SAdrian Chadd rge_link_state(sc); 2563*4bf8ce03SAdrian Chadd 2564*4bf8ce03SAdrian Chadd /* 2565*4bf8ce03SAdrian Chadd * Since we don't have any other place yet to trigger/test this, 2566*4bf8ce03SAdrian Chadd * let's do it here every second and just bite the driver 2567*4bf8ce03SAdrian Chadd * blocking for a little bit whilst it happens. 2568*4bf8ce03SAdrian Chadd */ 2569*4bf8ce03SAdrian Chadd if ((if_getdrvflags(sc->sc_ifp) & IFF_DRV_RUNNING) != 0) 2570*4bf8ce03SAdrian Chadd rge_hw_mac_stats_fetch(sc, &sc->sc_mac_stats.lcl_stats); 2571*4bf8ce03SAdrian Chadd 2572*4bf8ce03SAdrian Chadd /* 2573*4bf8ce03SAdrian Chadd * Handle the TX watchdog. 2574*4bf8ce03SAdrian Chadd */ 2575*4bf8ce03SAdrian Chadd if (sc->sc_watchdog > 0) { 2576*4bf8ce03SAdrian Chadd sc->sc_watchdog--; 2577*4bf8ce03SAdrian Chadd if (sc->sc_watchdog == 0) { 2578*4bf8ce03SAdrian Chadd RGE_PRINT_ERROR(sc, "TX timeout (watchdog)\n"); 2579*4bf8ce03SAdrian Chadd rge_init_locked(sc); 2580*4bf8ce03SAdrian Chadd sc->sc_drv_stats.tx_watchdog_timeout_cnt++; 2581*4bf8ce03SAdrian Chadd } 2582*4bf8ce03SAdrian Chadd } 2583*4bf8ce03SAdrian Chadd 2584*4bf8ce03SAdrian Chadd callout_reset(&sc->sc_timeout, hz, rge_tick, sc); 2585*4bf8ce03SAdrian Chadd } 2586*4bf8ce03SAdrian Chadd 2587*4bf8ce03SAdrian Chadd /** 2588*4bf8ce03SAdrian Chadd * @brief process a link state change. 2589*4bf8ce03SAdrian Chadd * 2590*4bf8ce03SAdrian Chadd * Must be called with the driver lock held. 2591*4bf8ce03SAdrian Chadd */ 2592*4bf8ce03SAdrian Chadd void 2593*4bf8ce03SAdrian Chadd rge_link_state(struct rge_softc *sc) 2594*4bf8ce03SAdrian Chadd { 2595*4bf8ce03SAdrian Chadd int link = LINK_STATE_DOWN; 2596*4bf8ce03SAdrian Chadd 2597*4bf8ce03SAdrian Chadd RGE_ASSERT_LOCKED(sc); 2598*4bf8ce03SAdrian Chadd 2599*4bf8ce03SAdrian Chadd if (rge_get_link_status(sc)) 2600*4bf8ce03SAdrian Chadd link = LINK_STATE_UP; 2601*4bf8ce03SAdrian Chadd 2602*4bf8ce03SAdrian Chadd if (if_getlinkstate(sc->sc_ifp) != link) { 2603*4bf8ce03SAdrian Chadd sc->sc_drv_stats.link_state_change_cnt++; 2604*4bf8ce03SAdrian Chadd if_link_state_change(sc->sc_ifp, link); 2605*4bf8ce03SAdrian Chadd } 2606*4bf8ce03SAdrian Chadd } 2607*4bf8ce03SAdrian Chadd 2608*4bf8ce03SAdrian Chadd /** 2609*4bf8ce03SAdrian Chadd * @brief Suspend 2610*4bf8ce03SAdrian Chadd */ 2611*4bf8ce03SAdrian Chadd static int 2612*4bf8ce03SAdrian Chadd rge_suspend(device_t dev) 2613*4bf8ce03SAdrian Chadd { 2614*4bf8ce03SAdrian Chadd struct rge_softc *sc = device_get_softc(dev); 2615*4bf8ce03SAdrian Chadd 2616*4bf8ce03SAdrian Chadd RGE_LOCK(sc); 2617*4bf8ce03SAdrian Chadd rge_stop_locked(sc); 2618*4bf8ce03SAdrian Chadd /* TODO: wake on lan */ 2619*4bf8ce03SAdrian Chadd sc->sc_suspended = true; 2620*4bf8ce03SAdrian Chadd RGE_UNLOCK(sc); 2621*4bf8ce03SAdrian Chadd 2622*4bf8ce03SAdrian Chadd return (0); 2623*4bf8ce03SAdrian Chadd } 2624*4bf8ce03SAdrian Chadd 2625*4bf8ce03SAdrian Chadd /** 2626*4bf8ce03SAdrian Chadd * @brief Resume 2627*4bf8ce03SAdrian Chadd */ 2628*4bf8ce03SAdrian Chadd static int 2629*4bf8ce03SAdrian Chadd rge_resume(device_t dev) 2630*4bf8ce03SAdrian Chadd { 2631*4bf8ce03SAdrian Chadd struct rge_softc *sc = device_get_softc(dev); 2632*4bf8ce03SAdrian Chadd 2633*4bf8ce03SAdrian Chadd RGE_LOCK(sc); 2634*4bf8ce03SAdrian Chadd /* TODO: wake on lan */ 2635*4bf8ce03SAdrian Chadd 2636*4bf8ce03SAdrian Chadd /* reinit if required */ 2637*4bf8ce03SAdrian Chadd if (if_getflags(sc->sc_ifp) & IFF_UP) 2638*4bf8ce03SAdrian Chadd rge_init_locked(sc); 2639*4bf8ce03SAdrian Chadd 2640*4bf8ce03SAdrian Chadd sc->sc_suspended = false; 2641*4bf8ce03SAdrian Chadd 2642*4bf8ce03SAdrian Chadd RGE_UNLOCK(sc); 2643*4bf8ce03SAdrian Chadd 2644*4bf8ce03SAdrian Chadd return (0); 2645*4bf8ce03SAdrian Chadd } 2646*4bf8ce03SAdrian Chadd 2647*4bf8ce03SAdrian Chadd /** 2648*4bf8ce03SAdrian Chadd * @brief Shutdown the driver during shutdown 2649*4bf8ce03SAdrian Chadd */ 2650*4bf8ce03SAdrian Chadd static int 2651*4bf8ce03SAdrian Chadd rge_shutdown(device_t dev) 2652*4bf8ce03SAdrian Chadd { 2653*4bf8ce03SAdrian Chadd struct rge_softc *sc = device_get_softc(dev); 2654*4bf8ce03SAdrian Chadd 2655*4bf8ce03SAdrian Chadd RGE_LOCK(sc); 2656*4bf8ce03SAdrian Chadd rge_stop_locked(sc); 2657*4bf8ce03SAdrian Chadd RGE_UNLOCK(sc); 2658*4bf8ce03SAdrian Chadd 2659*4bf8ce03SAdrian Chadd return (0); 2660*4bf8ce03SAdrian Chadd } 2661*4bf8ce03SAdrian Chadd 2662*4bf8ce03SAdrian Chadd static device_method_t rge_methods[] = { 2663*4bf8ce03SAdrian Chadd DEVMETHOD(device_probe, rge_probe), 2664*4bf8ce03SAdrian Chadd DEVMETHOD(device_attach, rge_attach), 2665*4bf8ce03SAdrian Chadd DEVMETHOD(device_detach, rge_detach), 2666*4bf8ce03SAdrian Chadd 2667*4bf8ce03SAdrian Chadd DEVMETHOD(device_suspend, rge_suspend), 2668*4bf8ce03SAdrian Chadd DEVMETHOD(device_resume, rge_resume), 2669*4bf8ce03SAdrian Chadd DEVMETHOD(device_shutdown, rge_shutdown), 2670*4bf8ce03SAdrian Chadd 2671*4bf8ce03SAdrian Chadd DEVMETHOD_END 2672*4bf8ce03SAdrian Chadd }; 2673*4bf8ce03SAdrian Chadd 2674*4bf8ce03SAdrian Chadd static driver_t rge_driver = { 2675*4bf8ce03SAdrian Chadd "rge", 2676*4bf8ce03SAdrian Chadd rge_methods, 2677*4bf8ce03SAdrian Chadd sizeof(struct rge_softc) 2678*4bf8ce03SAdrian Chadd }; 2679*4bf8ce03SAdrian Chadd 2680*4bf8ce03SAdrian Chadd MODULE_DEPEND(rge, pci, 1, 1, 1); 2681*4bf8ce03SAdrian Chadd MODULE_DEPEND(rge, ether, 1, 1, 1); 2682*4bf8ce03SAdrian Chadd 2683*4bf8ce03SAdrian Chadd DRIVER_MODULE_ORDERED(rge, pci, rge_driver, NULL, NULL, SI_ORDER_ANY); 2684