xref: /freebsd/sys/dev/rge/if_rge.c (revision cce603cfca773c61f3e17ea38575f4ba0557792d)
14bf8ce03SAdrian Chadd /*-
24bf8ce03SAdrian Chadd  * SPDX-License-Identifier: BSD-2-Clause
34bf8ce03SAdrian Chadd  *
44bf8ce03SAdrian Chadd  * Copyright (c) 2019, 2020, 2023-2025 Kevin Lo <kevlo@openbsd.org>
54bf8ce03SAdrian Chadd  * Copyright (c) 2025 Adrian Chadd <adrian@FreeBSD.org>
64bf8ce03SAdrian Chadd  *
74bf8ce03SAdrian Chadd  * Hardware programming portions from Realtek Semiconductor.
84bf8ce03SAdrian Chadd  *
94bf8ce03SAdrian Chadd  * Permission to use, copy, modify, and distribute this software for any
104bf8ce03SAdrian Chadd  * purpose with or without fee is hereby granted, provided that the above
114bf8ce03SAdrian Chadd  * copyright notice and this permission notice appear in all copies.
124bf8ce03SAdrian Chadd  *
134bf8ce03SAdrian Chadd  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
144bf8ce03SAdrian Chadd  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
154bf8ce03SAdrian Chadd  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
164bf8ce03SAdrian Chadd  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
174bf8ce03SAdrian Chadd  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
184bf8ce03SAdrian Chadd  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
194bf8ce03SAdrian Chadd  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
204bf8ce03SAdrian Chadd  */
214bf8ce03SAdrian Chadd 
224bf8ce03SAdrian Chadd /*	$OpenBSD: if_rge.c,v 1.38 2025/09/19 00:41:14 kevlo Exp $	*/
234bf8ce03SAdrian Chadd 
244bf8ce03SAdrian Chadd #include <sys/param.h>
254bf8ce03SAdrian Chadd #include <sys/systm.h>
264bf8ce03SAdrian Chadd #include <sys/sockio.h>
274bf8ce03SAdrian Chadd #include <sys/mbuf.h>
284bf8ce03SAdrian Chadd #include <sys/malloc.h>
294bf8ce03SAdrian Chadd #include <sys/endian.h>
304bf8ce03SAdrian Chadd #include <sys/socket.h>
314bf8ce03SAdrian Chadd #include <net/if.h>
324bf8ce03SAdrian Chadd #include <net/if_media.h>
334bf8ce03SAdrian Chadd #include <sys/queue.h>
344bf8ce03SAdrian Chadd #include <sys/taskqueue.h>
354bf8ce03SAdrian Chadd #include <sys/bus.h>
364bf8ce03SAdrian Chadd #include <sys/module.h>
374bf8ce03SAdrian Chadd #include <sys/rman.h>
384bf8ce03SAdrian Chadd #include <sys/kernel.h>
394bf8ce03SAdrian Chadd 
404bf8ce03SAdrian Chadd #include <netinet/in.h>
414bf8ce03SAdrian Chadd #include <netinet/if_ether.h>
424bf8ce03SAdrian Chadd 
434bf8ce03SAdrian Chadd #include <net/bpf.h>
444bf8ce03SAdrian Chadd #include <net/ethernet.h>
454bf8ce03SAdrian Chadd #include <net/if.h>
464bf8ce03SAdrian Chadd #include <net/if_var.h>
474bf8ce03SAdrian Chadd #include <net/if_arp.h>
484bf8ce03SAdrian Chadd #include <net/if_dl.h>
494bf8ce03SAdrian Chadd #include <net/if_media.h>
504bf8ce03SAdrian Chadd #include <net/if_types.h>
514bf8ce03SAdrian Chadd #include <net/if_vlan_var.h>
524bf8ce03SAdrian Chadd 
534bf8ce03SAdrian Chadd #include <machine/bus.h>
544bf8ce03SAdrian Chadd #include <machine/resource.h>
554bf8ce03SAdrian Chadd 
564bf8ce03SAdrian Chadd #include <dev/mii/mii.h>
574bf8ce03SAdrian Chadd 
584bf8ce03SAdrian Chadd #include <dev/pci/pcivar.h>
594bf8ce03SAdrian Chadd #include <dev/pci/pcireg.h>
604bf8ce03SAdrian Chadd 
614bf8ce03SAdrian Chadd #include "if_rge_vendor.h"
624bf8ce03SAdrian Chadd #include "if_rgereg.h"
634bf8ce03SAdrian Chadd #include "if_rgevar.h"
644bf8ce03SAdrian Chadd #include "if_rge_hw.h"
654bf8ce03SAdrian Chadd #include "if_rge_microcode.h"
664bf8ce03SAdrian Chadd #include "if_rge_debug.h"
674bf8ce03SAdrian Chadd #include "if_rge_sysctl.h"
684bf8ce03SAdrian Chadd #include "if_rge_stats.h"
694bf8ce03SAdrian Chadd 
704bf8ce03SAdrian Chadd #define	RGE_CSUM_FEATURES		(CSUM_IP | CSUM_TCP | CSUM_UDP)
714bf8ce03SAdrian Chadd 
724bf8ce03SAdrian Chadd static int		rge_attach(device_t);
734bf8ce03SAdrian Chadd static int		rge_detach(device_t);
744bf8ce03SAdrian Chadd 
754bf8ce03SAdrian Chadd #if 0
764bf8ce03SAdrian Chadd int		rge_activate(struct device *, int);
774bf8ce03SAdrian Chadd #endif
784bf8ce03SAdrian Chadd static void	rge_intr_msi(void *);
794bf8ce03SAdrian Chadd static int	rge_ioctl(struct ifnet *, u_long, caddr_t);
804bf8ce03SAdrian Chadd static int	rge_transmit_if(if_t, struct mbuf *);
814bf8ce03SAdrian Chadd static void	rge_qflush_if(if_t);
824bf8ce03SAdrian Chadd static void	rge_init_if(void *);
834bf8ce03SAdrian Chadd static void	rge_init_locked(struct rge_softc *);
844bf8ce03SAdrian Chadd static void	rge_stop_locked(struct rge_softc *);
854bf8ce03SAdrian Chadd static int	rge_ifmedia_upd(if_t);
864bf8ce03SAdrian Chadd static void	rge_ifmedia_sts(if_t, struct ifmediareq *);
874bf8ce03SAdrian Chadd static int	rge_allocmem(struct rge_softc *);
884bf8ce03SAdrian Chadd static int	rge_alloc_stats_mem(struct rge_softc *);
894bf8ce03SAdrian Chadd static int	rge_freemem(struct rge_softc *);
904bf8ce03SAdrian Chadd static int	rge_free_stats_mem(struct rge_softc *);
914bf8ce03SAdrian Chadd static int	rge_newbuf(struct rge_queues *);
924bf8ce03SAdrian Chadd static void	rge_rx_list_init(struct rge_queues *);
934bf8ce03SAdrian Chadd static void	rge_tx_list_init(struct rge_queues *);
944bf8ce03SAdrian Chadd static void	rge_fill_rx_ring(struct rge_queues *);
954bf8ce03SAdrian Chadd static int	rge_rxeof(struct rge_queues *, struct mbufq *);
964bf8ce03SAdrian Chadd static int	rge_txeof(struct rge_queues *);
974bf8ce03SAdrian Chadd static void	rge_iff_locked(struct rge_softc *);
984bf8ce03SAdrian Chadd static void	rge_add_media_types(struct rge_softc *);
994bf8ce03SAdrian Chadd static void	rge_tx_task(void *, int);
1004bf8ce03SAdrian Chadd static void	rge_txq_flush_mbufs(struct rge_softc *sc);
1014bf8ce03SAdrian Chadd static void	rge_tick(void *);
1024bf8ce03SAdrian Chadd static void	rge_link_state(struct rge_softc *);
1034bf8ce03SAdrian Chadd #if 0
1044bf8ce03SAdrian Chadd #ifndef SMALL_KERNEL
1054bf8ce03SAdrian Chadd int		rge_wol(struct ifnet *, int);
1064bf8ce03SAdrian Chadd void		rge_wol_power(struct rge_softc *);
1074bf8ce03SAdrian Chadd #endif
1084bf8ce03SAdrian Chadd #endif
1094bf8ce03SAdrian Chadd 
1104bf8ce03SAdrian Chadd struct rge_matchid {
1114bf8ce03SAdrian Chadd 	uint16_t vendor;
1124bf8ce03SAdrian Chadd 	uint16_t device;
1134bf8ce03SAdrian Chadd 	const char *name;
1144bf8ce03SAdrian Chadd };
1154bf8ce03SAdrian Chadd 
1164bf8ce03SAdrian Chadd const struct rge_matchid rge_devices[] = {
1174bf8ce03SAdrian Chadd 	{ PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_E3000, "Killer E3000" },
1184bf8ce03SAdrian Chadd 	{ PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RTL8125, "RTL8125" },
1194bf8ce03SAdrian Chadd 	{ PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RTL8126, "RTL8126", },
1204bf8ce03SAdrian Chadd 	{ PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RTL8127, "RTL8127" },
1214bf8ce03SAdrian Chadd 	{ 0, 0, NULL }
1224bf8ce03SAdrian Chadd };
1234bf8ce03SAdrian Chadd 
1244bf8ce03SAdrian Chadd static int
rge_probe(device_t dev)1254bf8ce03SAdrian Chadd rge_probe(device_t dev)
1264bf8ce03SAdrian Chadd {
1274bf8ce03SAdrian Chadd 	uint16_t vendor, device;
1284bf8ce03SAdrian Chadd 	const struct rge_matchid *ri;
1294bf8ce03SAdrian Chadd 
1304bf8ce03SAdrian Chadd 	vendor = pci_get_vendor(dev);
1314bf8ce03SAdrian Chadd 	device = pci_get_device(dev);
1324bf8ce03SAdrian Chadd 
1334bf8ce03SAdrian Chadd 	for (ri = rge_devices; ri->name != NULL; ri++) {
1344bf8ce03SAdrian Chadd 		if ((vendor == ri->vendor) && (device == ri->device)) {
1354bf8ce03SAdrian Chadd 			device_set_desc(dev, ri->name);
1364bf8ce03SAdrian Chadd 			return (BUS_PROBE_DEFAULT);
1374bf8ce03SAdrian Chadd 		}
1384bf8ce03SAdrian Chadd 	}
1394bf8ce03SAdrian Chadd 
1404bf8ce03SAdrian Chadd 	return (ENXIO);
1414bf8ce03SAdrian Chadd }
1424bf8ce03SAdrian Chadd 
1434bf8ce03SAdrian Chadd static void
rge_attach_if(struct rge_softc * sc,const char * eaddr)1444bf8ce03SAdrian Chadd rge_attach_if(struct rge_softc *sc, const char *eaddr)
1454bf8ce03SAdrian Chadd {
1464bf8ce03SAdrian Chadd 	if_initname(sc->sc_ifp, device_get_name(sc->sc_dev),
1474bf8ce03SAdrian Chadd 	    device_get_unit(sc->sc_dev));
1484bf8ce03SAdrian Chadd 	if_setdev(sc->sc_ifp, sc->sc_dev);
1494bf8ce03SAdrian Chadd 	if_setinitfn(sc->sc_ifp, rge_init_if);
1504bf8ce03SAdrian Chadd 	if_setsoftc(sc->sc_ifp, sc);
1514bf8ce03SAdrian Chadd 	if_setflags(sc->sc_ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
1524bf8ce03SAdrian Chadd 	if_setioctlfn(sc->sc_ifp, rge_ioctl);
1534bf8ce03SAdrian Chadd 	if_settransmitfn(sc->sc_ifp, rge_transmit_if);
1544bf8ce03SAdrian Chadd 	if_setqflushfn(sc->sc_ifp, rge_qflush_if);
1554bf8ce03SAdrian Chadd 
1564bf8ce03SAdrian Chadd 	/* Set offload as appropriate */
1574bf8ce03SAdrian Chadd 	if_sethwassist(sc->sc_ifp, CSUM_IP | CSUM_TCP | CSUM_UDP);
1584bf8ce03SAdrian Chadd 	if_setcapabilities(sc->sc_ifp, IFCAP_HWCSUM);
1594bf8ce03SAdrian Chadd 	if_setcapenable(sc->sc_ifp, if_getcapabilities(sc->sc_ifp));
1604bf8ce03SAdrian Chadd 
1614bf8ce03SAdrian Chadd 	/* TODO: set WOL */
1624bf8ce03SAdrian Chadd 
1634bf8ce03SAdrian Chadd 	/* Attach interface */
1644bf8ce03SAdrian Chadd 	ether_ifattach(sc->sc_ifp, eaddr);
1654bf8ce03SAdrian Chadd 	sc->sc_ether_attached = true;
1664bf8ce03SAdrian Chadd 
1674bf8ce03SAdrian Chadd 	/* post ether_ifattach() bits */
1684bf8ce03SAdrian Chadd 
1694bf8ce03SAdrian Chadd 	/* VLAN capabilities */
1704bf8ce03SAdrian Chadd 	if_setcapabilitiesbit(sc->sc_ifp, IFCAP_VLAN_MTU |
1714bf8ce03SAdrian Chadd 	    IFCAP_VLAN_HWTAGGING, 0);
1724bf8ce03SAdrian Chadd 	if_setcapabilitiesbit(sc->sc_ifp, IFCAP_VLAN_HWCSUM, 0);
1734bf8ce03SAdrian Chadd 	if_setcapenable(sc->sc_ifp, if_getcapabilities(sc->sc_ifp));
1744bf8ce03SAdrian Chadd 
1754bf8ce03SAdrian Chadd 	if_setifheaderlen(sc->sc_ifp, sizeof(struct ether_vlan_header));
1764bf8ce03SAdrian Chadd 
1774bf8ce03SAdrian Chadd 	/* TODO: is this needed for iftransmit? */
1784bf8ce03SAdrian Chadd 	if_setsendqlen(sc->sc_ifp, RGE_TX_LIST_CNT - 1);
1794bf8ce03SAdrian Chadd 	if_setsendqready(sc->sc_ifp);
1804bf8ce03SAdrian Chadd }
1814bf8ce03SAdrian Chadd 
1824bf8ce03SAdrian Chadd static int
rge_attach(device_t dev)1834bf8ce03SAdrian Chadd rge_attach(device_t dev)
1844bf8ce03SAdrian Chadd {
1854bf8ce03SAdrian Chadd 	uint8_t eaddr[ETHER_ADDR_LEN];
1864bf8ce03SAdrian Chadd 	struct rge_softc *sc;
1874bf8ce03SAdrian Chadd 	struct rge_queues *q;
1884bf8ce03SAdrian Chadd 	uint32_t hwrev, reg;
1894bf8ce03SAdrian Chadd 	int i, rid;
1904bf8ce03SAdrian Chadd 	int error;
1914bf8ce03SAdrian Chadd 	int msic;
1924bf8ce03SAdrian Chadd 
1934bf8ce03SAdrian Chadd 	sc = device_get_softc(dev);
1944bf8ce03SAdrian Chadd 	sc->sc_dev = dev;
1954bf8ce03SAdrian Chadd 	sc->sc_ifp = if_gethandle(IFT_ETHER);
1964bf8ce03SAdrian Chadd 	mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
1974bf8ce03SAdrian Chadd 	    MTX_DEF);
1984bf8ce03SAdrian Chadd 
1994bf8ce03SAdrian Chadd 	/* Enable bus mastering */
2004bf8ce03SAdrian Chadd 	pci_enable_busmaster(dev);
2014bf8ce03SAdrian Chadd 
2024bf8ce03SAdrian Chadd 	/*
2034bf8ce03SAdrian Chadd 	 * Map control/status registers.
2044bf8ce03SAdrian Chadd 	 */
2054bf8ce03SAdrian Chadd 
2064bf8ce03SAdrian Chadd 	/*
2074bf8ce03SAdrian Chadd 	 * The openbsd driver (and my E3000 NIC) handle registering three
2084bf8ce03SAdrian Chadd 	 * kinds of BARs - a 64 bit MMIO BAR, a 32 bit MMIO BAR, and then
2094bf8ce03SAdrian Chadd 	 * a legacy IO port BAR.
2104bf8ce03SAdrian Chadd 	 *
2114bf8ce03SAdrian Chadd 	 * To simplify bring-up, I'm going to request resources for the first
2124bf8ce03SAdrian Chadd 	 * MMIO BAR (BAR2) which should be a 32 bit BAR.
2134bf8ce03SAdrian Chadd 	 */
2144bf8ce03SAdrian Chadd 	rid = PCIR_BAR(2);
2154bf8ce03SAdrian Chadd 	sc->sc_bres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
2164bf8ce03SAdrian Chadd 	    RF_ACTIVE);
2174bf8ce03SAdrian Chadd 	if (sc->sc_bres == NULL) {
2184bf8ce03SAdrian Chadd 		RGE_PRINT_ERROR(sc,
2194bf8ce03SAdrian Chadd 		    "Unable to allocate bus resource: memory\n");
2204bf8ce03SAdrian Chadd 		goto fail;
2214bf8ce03SAdrian Chadd 	}
2224bf8ce03SAdrian Chadd 	sc->rge_bhandle = rman_get_bushandle(sc->sc_bres);
2234bf8ce03SAdrian Chadd 	sc->rge_btag = rman_get_bustag(sc->sc_bres);
2244bf8ce03SAdrian Chadd 	sc->rge_bsize = rman_get_size(sc->sc_bres);
2254bf8ce03SAdrian Chadd 
2264bf8ce03SAdrian Chadd 	q = malloc(sizeof(struct rge_queues), M_DEVBUF, M_NOWAIT | M_ZERO);
2274bf8ce03SAdrian Chadd 	if (q == NULL) {
2284bf8ce03SAdrian Chadd 		RGE_PRINT_ERROR(sc, "Unable to malloc rge_queues memory\n");
2294bf8ce03SAdrian Chadd 		goto fail;
2304bf8ce03SAdrian Chadd 	}
2314bf8ce03SAdrian Chadd 	q->q_sc = sc;
2324bf8ce03SAdrian Chadd 	q->q_index = 0;
2334bf8ce03SAdrian Chadd 
2344bf8ce03SAdrian Chadd 	sc->sc_queues = q;
2354bf8ce03SAdrian Chadd 	sc->sc_nqueues = 1;
2364bf8ce03SAdrian Chadd 
2374bf8ce03SAdrian Chadd 	/* Check if PCIe */
2384bf8ce03SAdrian Chadd 	if (pci_find_cap(dev, PCIY_EXPRESS, &reg) == 0) {
2394bf8ce03SAdrian Chadd 		sc->rge_flags |= RGE_FLAG_PCIE;
2404bf8ce03SAdrian Chadd 		sc->sc_expcap = reg;
2414bf8ce03SAdrian Chadd 	}
2424bf8ce03SAdrian Chadd 
2434bf8ce03SAdrian Chadd 	/* Allocate MSI */
2444bf8ce03SAdrian Chadd 	msic = pci_msi_count(dev);
2454bf8ce03SAdrian Chadd 	if (msic == 0) {
2464bf8ce03SAdrian Chadd 		RGE_PRINT_ERROR(sc, "%s: only MSI interrupts supported\n",
2474bf8ce03SAdrian Chadd 		    __func__);
2484bf8ce03SAdrian Chadd 		goto fail;
2494bf8ce03SAdrian Chadd 	}
2504bf8ce03SAdrian Chadd 
2514bf8ce03SAdrian Chadd 	msic = RGE_MSI_MESSAGES;
2524bf8ce03SAdrian Chadd 	if (pci_alloc_msi(dev, &msic) != 0) {
2534bf8ce03SAdrian Chadd 		RGE_PRINT_ERROR(sc, "%s: failed to allocate MSI\n",
2544bf8ce03SAdrian Chadd 		    __func__);
2554bf8ce03SAdrian Chadd 		goto fail;
2564bf8ce03SAdrian Chadd 	}
2574bf8ce03SAdrian Chadd 
2584bf8ce03SAdrian Chadd 	sc->rge_flags |= RGE_FLAG_MSI;
2594bf8ce03SAdrian Chadd 
2604bf8ce03SAdrian Chadd 	/* We need at least one MSI */
2614bf8ce03SAdrian Chadd 	if (msic < RGE_MSI_MESSAGES) {
2624bf8ce03SAdrian Chadd 		RGE_PRINT_ERROR(sc, "%s: didn't allocate enough MSI\n",
2634bf8ce03SAdrian Chadd 		    __func__);
2644bf8ce03SAdrian Chadd 		goto fail;
2654bf8ce03SAdrian Chadd 	}
2664bf8ce03SAdrian Chadd 
2674bf8ce03SAdrian Chadd 	/*
2684bf8ce03SAdrian Chadd 	 * Allocate interrupt entries.
2694bf8ce03SAdrian Chadd 	 */
2704bf8ce03SAdrian Chadd 	for (i = 0, rid = 1; i < RGE_MSI_MESSAGES; i++, rid++) {
2714bf8ce03SAdrian Chadd 		sc->sc_irq[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ,
2724bf8ce03SAdrian Chadd 		    &rid, RF_ACTIVE);
2734bf8ce03SAdrian Chadd 		if (sc->sc_irq[i] == NULL) {
2744bf8ce03SAdrian Chadd 			RGE_PRINT_ERROR(sc, "%s: couldn't allocate MSI %d",
2754bf8ce03SAdrian Chadd 			    __func__, rid);
2764bf8ce03SAdrian Chadd 			goto fail;
2774bf8ce03SAdrian Chadd 		}
2784bf8ce03SAdrian Chadd 	}
2794bf8ce03SAdrian Chadd 
2804bf8ce03SAdrian Chadd 	/* Hook interrupts */
2814bf8ce03SAdrian Chadd 	for (i = 0; i < RGE_MSI_MESSAGES; i++) {
2824bf8ce03SAdrian Chadd 		error = bus_setup_intr(dev, sc->sc_irq[i],
2834bf8ce03SAdrian Chadd 		    INTR_TYPE_NET | INTR_MPSAFE, NULL, rge_intr_msi,
2844bf8ce03SAdrian Chadd 		    sc, &sc->sc_ih[i]);
2854bf8ce03SAdrian Chadd 		if (error != 0) {
2864bf8ce03SAdrian Chadd 			RGE_PRINT_ERROR(sc,
2874bf8ce03SAdrian Chadd 			    "%s: couldn't setup intr %d (error %d)", __func__,
2884bf8ce03SAdrian Chadd 			    i, error);
2894bf8ce03SAdrian Chadd 			goto fail;
2904bf8ce03SAdrian Chadd 		}
2914bf8ce03SAdrian Chadd 	}
2924bf8ce03SAdrian Chadd 
2934bf8ce03SAdrian Chadd 	/* Allocate top level bus DMA tag */
2944bf8ce03SAdrian Chadd 	error = bus_dma_tag_create(bus_get_dma_tag(dev),
2954bf8ce03SAdrian Chadd 	    1, /* alignment */
2964bf8ce03SAdrian Chadd 	    0, /* boundary */
2974bf8ce03SAdrian Chadd 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
2984bf8ce03SAdrian Chadd 	    NULL, NULL, /* filter (unused) */
2994bf8ce03SAdrian Chadd 	    BUS_SPACE_MAXADDR, /* maxsize */
3004bf8ce03SAdrian Chadd 	    BUS_SPACE_UNRESTRICTED, /* nsegments */
3014bf8ce03SAdrian Chadd 	    BUS_SPACE_MAXADDR, /* maxsegsize */
3024bf8ce03SAdrian Chadd 	    0, /* flags */
3034bf8ce03SAdrian Chadd 	    NULL, NULL, /* lockfunc, lockarg */
3044bf8ce03SAdrian Chadd 	    &sc->sc_dmat);
3054bf8ce03SAdrian Chadd 	if (error) {
3064bf8ce03SAdrian Chadd 		RGE_PRINT_ERROR(sc,
3074bf8ce03SAdrian Chadd 		    "couldn't allocate device DMA tag (error %d)\n", error);
3084bf8ce03SAdrian Chadd 		goto fail;
3094bf8ce03SAdrian Chadd 	}
3104bf8ce03SAdrian Chadd 
3114bf8ce03SAdrian Chadd 	/* Allocate TX/RX descriptor and buffer tags */
3124bf8ce03SAdrian Chadd 	error = bus_dma_tag_create(sc->sc_dmat,
3134bf8ce03SAdrian Chadd 	    RGE_ALIGN, /* alignment */
3144bf8ce03SAdrian Chadd 	    0, /* boundary */
3154bf8ce03SAdrian Chadd 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
3164bf8ce03SAdrian Chadd 	    NULL, NULL, /* filter (unused) */
3174bf8ce03SAdrian Chadd 	    RGE_TX_LIST_SZ, /* maxsize */
3184bf8ce03SAdrian Chadd 	    1, /* nsegments */
3194bf8ce03SAdrian Chadd 	    RGE_TX_LIST_SZ, /* maxsegsize */
3204bf8ce03SAdrian Chadd 	    0, /* flags */
3214bf8ce03SAdrian Chadd 	    NULL, NULL, /* lockfunc, lockarg */
3224bf8ce03SAdrian Chadd 	    &sc->sc_dmat_tx_desc);
3234bf8ce03SAdrian Chadd 	if (error) {
3244bf8ce03SAdrian Chadd 		RGE_PRINT_ERROR(sc,
3254bf8ce03SAdrian Chadd 		    "couldn't allocate device TX descriptor "
3264bf8ce03SAdrian Chadd 		    "DMA tag (error %d)\n", error);
3274bf8ce03SAdrian Chadd 		    goto fail;
3284bf8ce03SAdrian Chadd 	}
3294bf8ce03SAdrian Chadd 
3304bf8ce03SAdrian Chadd 	error = bus_dma_tag_create(sc->sc_dmat,
3314bf8ce03SAdrian Chadd 	    1, /* alignment */
3324bf8ce03SAdrian Chadd 	    0, /* boundary */
3334bf8ce03SAdrian Chadd 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3344bf8ce03SAdrian Chadd 	    NULL, NULL, /* filter (unused) */
3354bf8ce03SAdrian Chadd 	    RGE_JUMBO_FRAMELEN, /* maxsize */
3364bf8ce03SAdrian Chadd 	    RGE_TX_NSEGS, /* nsegments */
3374bf8ce03SAdrian Chadd 	    RGE_JUMBO_FRAMELEN, /* maxsegsize */
3384bf8ce03SAdrian Chadd 	    0, /* flags */
3394bf8ce03SAdrian Chadd 	    NULL, NULL, /* lockfunc, lockarg */
3404bf8ce03SAdrian Chadd 	    &sc->sc_dmat_tx_buf);
3414bf8ce03SAdrian Chadd 	if (error) {
3424bf8ce03SAdrian Chadd 		RGE_PRINT_ERROR(sc,
3434bf8ce03SAdrian Chadd 		    "couldn't allocate device TX buffer DMA tag (error %d)\n",
3444bf8ce03SAdrian Chadd 		    error);
3454bf8ce03SAdrian Chadd 		goto fail;
3464bf8ce03SAdrian Chadd 	}
3474bf8ce03SAdrian Chadd 
3484bf8ce03SAdrian Chadd 	error = bus_dma_tag_create(sc->sc_dmat,
3494bf8ce03SAdrian Chadd 	    RGE_ALIGN, /* alignment */
3504bf8ce03SAdrian Chadd 	    0, /* boundary */
3514bf8ce03SAdrian Chadd 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
3524bf8ce03SAdrian Chadd 	    NULL, NULL, /* filter (unused) */
3534bf8ce03SAdrian Chadd 	    RGE_RX_LIST_SZ, /* maxsize */
3544bf8ce03SAdrian Chadd 	    1, /* nsegments */
3554bf8ce03SAdrian Chadd 	    RGE_RX_LIST_SZ, /* maxsegsize */
3564bf8ce03SAdrian Chadd 	    0, /* flags */
3574bf8ce03SAdrian Chadd 	    NULL, NULL, /* lockfunc, lockarg */
3584bf8ce03SAdrian Chadd 	    &sc->sc_dmat_rx_desc);
3594bf8ce03SAdrian Chadd 	if (error) {
3604bf8ce03SAdrian Chadd 		RGE_PRINT_ERROR(sc,
3614bf8ce03SAdrian Chadd 		    "couldn't allocate device RX descriptor "
3624bf8ce03SAdrian Chadd 		    "DMA tag (error %d)\n", error);
3634bf8ce03SAdrian Chadd 		goto fail;
3644bf8ce03SAdrian Chadd 	}
3654bf8ce03SAdrian Chadd 
3664bf8ce03SAdrian Chadd 	error = bus_dma_tag_create(sc->sc_dmat,
3674bf8ce03SAdrian Chadd 	    1, /* alignment */
3684bf8ce03SAdrian Chadd 	    0, /* boundary */
3694bf8ce03SAdrian Chadd 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3704bf8ce03SAdrian Chadd 	    NULL, NULL, /* filter (unused) */
3714bf8ce03SAdrian Chadd 	    MCLBYTES, /* maxsize */
3724bf8ce03SAdrian Chadd 	    1, /* nsegments */
3734bf8ce03SAdrian Chadd 	    MCLBYTES, /* maxsegsize */
3744bf8ce03SAdrian Chadd 	    0, /* flags */
3754bf8ce03SAdrian Chadd 	    NULL, NULL, /* lockfunc, lockarg */
3764bf8ce03SAdrian Chadd 	    &sc->sc_dmat_rx_buf);
3774bf8ce03SAdrian Chadd 	if (error) {
3784bf8ce03SAdrian Chadd 		RGE_PRINT_ERROR(sc,
3794bf8ce03SAdrian Chadd 		    "couldn't allocate device RX buffer DMA tag (error %d)\n",
3804bf8ce03SAdrian Chadd 		    error);
3814bf8ce03SAdrian Chadd 		goto fail;
3824bf8ce03SAdrian Chadd 	}
3834bf8ce03SAdrian Chadd 
3844bf8ce03SAdrian Chadd 	error = bus_dma_tag_create(sc->sc_dmat,
3854bf8ce03SAdrian Chadd 	    RGE_STATS_ALIGNMENT, /* alignment */
3864bf8ce03SAdrian Chadd 	    0, /* boundary */
3874bf8ce03SAdrian Chadd 	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3884bf8ce03SAdrian Chadd 	    NULL, NULL, /* filter (unused) */
3894bf8ce03SAdrian Chadd 	    RGE_STATS_BUF_SIZE, /* maxsize */
3904bf8ce03SAdrian Chadd 	    1, /* nsegments */
3914bf8ce03SAdrian Chadd 	    RGE_STATS_BUF_SIZE, /* maxsegsize */
3924bf8ce03SAdrian Chadd 	    0, /* flags */
3934bf8ce03SAdrian Chadd 	    NULL, NULL, /* lockfunc, lockarg */
3944bf8ce03SAdrian Chadd 	    &sc->sc_dmat_stats_buf);
3954bf8ce03SAdrian Chadd 	if (error) {
3964bf8ce03SAdrian Chadd 		RGE_PRINT_ERROR(sc,
3974bf8ce03SAdrian Chadd 		    "couldn't allocate device RX buffer DMA tag (error %d)\n",
3984bf8ce03SAdrian Chadd 		    error);
3994bf8ce03SAdrian Chadd 		goto fail;
4004bf8ce03SAdrian Chadd 	}
4014bf8ce03SAdrian Chadd 
4024bf8ce03SAdrian Chadd 
4034bf8ce03SAdrian Chadd 	/* Attach sysctl nodes */
4044bf8ce03SAdrian Chadd 	rge_sysctl_attach(sc);
4054bf8ce03SAdrian Chadd 
4064bf8ce03SAdrian Chadd 	/* Determine hardware revision */
4074bf8ce03SAdrian Chadd 	hwrev = RGE_READ_4(sc, RGE_TXCFG) & RGE_TXCFG_HWREV;
4084bf8ce03SAdrian Chadd 	switch (hwrev) {
4094bf8ce03SAdrian Chadd 	case 0x60900000:
4104bf8ce03SAdrian Chadd 		sc->rge_type = MAC_R25;
4114bf8ce03SAdrian Chadd //		device_printf(dev, "RTL8125\n");
4124bf8ce03SAdrian Chadd 		break;
4134bf8ce03SAdrian Chadd 	case 0x64100000:
4144bf8ce03SAdrian Chadd 		sc->rge_type = MAC_R25B;
4154bf8ce03SAdrian Chadd //		device_printf(dev, "RTL8125B\n");
4164bf8ce03SAdrian Chadd 		break;
4174bf8ce03SAdrian Chadd 	case 0x64900000:
4184bf8ce03SAdrian Chadd 		sc->rge_type = MAC_R26;
4194bf8ce03SAdrian Chadd //		device_printf(dev, "RTL8126\n");
4204bf8ce03SAdrian Chadd 		break;
4214bf8ce03SAdrian Chadd 	case 0x68800000:
4224bf8ce03SAdrian Chadd 		sc->rge_type = MAC_R25D;
4234bf8ce03SAdrian Chadd //		device_printf(dev, "RTL8125D\n");
4244bf8ce03SAdrian Chadd 		break;
4254bf8ce03SAdrian Chadd 	case 0x6c900000:
4264bf8ce03SAdrian Chadd 		sc->rge_type = MAC_R27;
4274bf8ce03SAdrian Chadd //		device_printf(dev, "RTL8127\n");
4284bf8ce03SAdrian Chadd 		break;
4294bf8ce03SAdrian Chadd 	default:
4304bf8ce03SAdrian Chadd 		RGE_PRINT_ERROR(sc, "unknown version 0x%08x\n", hwrev);
4314bf8ce03SAdrian Chadd 		goto fail;
4324bf8ce03SAdrian Chadd 	}
4334bf8ce03SAdrian Chadd 
4344bf8ce03SAdrian Chadd 	rge_config_imtype(sc, RGE_IMTYPE_SIM);
4354bf8ce03SAdrian Chadd 
4364bf8ce03SAdrian Chadd 	/* TODO: disable ASPM/ECPM? */
4374bf8ce03SAdrian Chadd 
4384bf8ce03SAdrian Chadd #if 0
4394bf8ce03SAdrian Chadd 	/*
4404bf8ce03SAdrian Chadd 	 * PCI Express check.
4414bf8ce03SAdrian Chadd 	 */
4424bf8ce03SAdrian Chadd 	if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_PCIEXPRESS,
4434bf8ce03SAdrian Chadd 	    &offset, NULL)) {
4444bf8ce03SAdrian Chadd 		/* Disable PCIe ASPM and ECPM. */
4454bf8ce03SAdrian Chadd 		reg = pci_conf_read(pa->pa_pc, pa->pa_tag,
4464bf8ce03SAdrian Chadd 		    offset + PCI_PCIE_LCSR);
4474bf8ce03SAdrian Chadd 		reg &= ~(PCI_PCIE_LCSR_ASPM_L0S | PCI_PCIE_LCSR_ASPM_L1 |
4484bf8ce03SAdrian Chadd 		    PCI_PCIE_LCSR_ECPM);
4494bf8ce03SAdrian Chadd 		pci_conf_write(pa->pa_pc, pa->pa_tag, offset + PCI_PCIE_LCSR,
4504bf8ce03SAdrian Chadd 		    reg);
4514bf8ce03SAdrian Chadd 	}
4524bf8ce03SAdrian Chadd #endif
4534bf8ce03SAdrian Chadd 
4544bf8ce03SAdrian Chadd 	RGE_LOCK(sc);
4554bf8ce03SAdrian Chadd 	if (rge_chipinit(sc)) {
4564bf8ce03SAdrian Chadd 		RGE_UNLOCK(sc);
4574bf8ce03SAdrian Chadd 		goto fail;
4584bf8ce03SAdrian Chadd 	}
4594bf8ce03SAdrian Chadd 
4604bf8ce03SAdrian Chadd 	rge_get_macaddr(sc, eaddr);
4614bf8ce03SAdrian Chadd 	RGE_UNLOCK(sc);
4624bf8ce03SAdrian Chadd 
4634bf8ce03SAdrian Chadd 	if (rge_allocmem(sc))
4644bf8ce03SAdrian Chadd 		goto fail;
4654bf8ce03SAdrian Chadd 	if (rge_alloc_stats_mem(sc))
4664bf8ce03SAdrian Chadd 		goto fail;
4674bf8ce03SAdrian Chadd 
4684bf8ce03SAdrian Chadd 	/* Initialize ifmedia structures. */
4694bf8ce03SAdrian Chadd 	ifmedia_init(&sc->sc_media, IFM_IMASK, rge_ifmedia_upd,
4704bf8ce03SAdrian Chadd 	    rge_ifmedia_sts);
4714bf8ce03SAdrian Chadd 	rge_add_media_types(sc);
4724bf8ce03SAdrian Chadd 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
4734bf8ce03SAdrian Chadd 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
4744bf8ce03SAdrian Chadd 	sc->sc_media.ifm_media = sc->sc_media.ifm_cur->ifm_media;
4754bf8ce03SAdrian Chadd 
4764bf8ce03SAdrian Chadd 	rge_attach_if(sc, eaddr);
4774bf8ce03SAdrian Chadd 
4784bf8ce03SAdrian Chadd 	/*
4794bf8ce03SAdrian Chadd 	 * TODO: technically should be per txq but we only support
4804bf8ce03SAdrian Chadd 	 * one TXQ at the moment.
4814bf8ce03SAdrian Chadd 	 */
4824bf8ce03SAdrian Chadd 	mbufq_init(&sc->sc_txq, RGE_TX_LIST_CNT);
4834bf8ce03SAdrian Chadd 
4844bf8ce03SAdrian Chadd 	snprintf(sc->sc_tq_name, sizeof(sc->sc_tq_name),
4854bf8ce03SAdrian Chadd 	    "%s taskq", device_get_nameunit(sc->sc_dev));
4864bf8ce03SAdrian Chadd 	snprintf(sc->sc_tq_thr_name, sizeof(sc->sc_tq_thr_name),
4874bf8ce03SAdrian Chadd 	    "%s taskq thread", device_get_nameunit(sc->sc_dev));
4884bf8ce03SAdrian Chadd 
4894bf8ce03SAdrian Chadd 	sc->sc_tq = taskqueue_create(sc->sc_tq_name, M_NOWAIT,
4904bf8ce03SAdrian Chadd 	    taskqueue_thread_enqueue, &sc->sc_tq);
4914bf8ce03SAdrian Chadd 	taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s",
4924bf8ce03SAdrian Chadd 	    sc->sc_tq_thr_name);
4934bf8ce03SAdrian Chadd 
4944bf8ce03SAdrian Chadd 	TASK_INIT(&sc->sc_tx_task, 0, rge_tx_task, sc);
4954bf8ce03SAdrian Chadd 
4964bf8ce03SAdrian Chadd 	callout_init_mtx(&sc->sc_timeout, &sc->sc_mtx, 0);
4974bf8ce03SAdrian Chadd 
4984bf8ce03SAdrian Chadd 	return (0);
4994bf8ce03SAdrian Chadd fail:
5004bf8ce03SAdrian Chadd 	rge_detach(dev);
5014bf8ce03SAdrian Chadd 	return (ENXIO);
5024bf8ce03SAdrian Chadd }
5034bf8ce03SAdrian Chadd 
5044bf8ce03SAdrian Chadd /**
5054bf8ce03SAdrian Chadd  * @brief flush the mbufq queue
5064bf8ce03SAdrian Chadd  *
5074bf8ce03SAdrian Chadd  * Again this should likely be per-TXQ.
5084bf8ce03SAdrian Chadd  *
5094bf8ce03SAdrian Chadd  * This should be called with the driver lock held.
5104bf8ce03SAdrian Chadd  */
5114bf8ce03SAdrian Chadd static void
rge_txq_flush_mbufs(struct rge_softc * sc)5124bf8ce03SAdrian Chadd rge_txq_flush_mbufs(struct rge_softc *sc)
5134bf8ce03SAdrian Chadd {
5144bf8ce03SAdrian Chadd 	struct mbuf *m;
5154bf8ce03SAdrian Chadd 	int ntx = 0;
5164bf8ce03SAdrian Chadd 
5174bf8ce03SAdrian Chadd 	RGE_ASSERT_LOCKED(sc);
5184bf8ce03SAdrian Chadd 
5194bf8ce03SAdrian Chadd 	while ((m = mbufq_dequeue(&sc->sc_txq)) != NULL) {
5204bf8ce03SAdrian Chadd 		m_freem(m);
5214bf8ce03SAdrian Chadd 		ntx++;
5224bf8ce03SAdrian Chadd 	}
5234bf8ce03SAdrian Chadd 
5244bf8ce03SAdrian Chadd 	RGE_DPRINTF(sc, RGE_DEBUG_XMIT, "%s: %d frames flushed\n", __func__,
5254bf8ce03SAdrian Chadd 	    ntx);
5264bf8ce03SAdrian Chadd }
5274bf8ce03SAdrian Chadd 
5284bf8ce03SAdrian Chadd static int
rge_detach(device_t dev)5294bf8ce03SAdrian Chadd rge_detach(device_t dev)
5304bf8ce03SAdrian Chadd {
5314bf8ce03SAdrian Chadd 	struct rge_softc *sc = device_get_softc(dev);
5324bf8ce03SAdrian Chadd 	int i, rid;
5334bf8ce03SAdrian Chadd 
5344bf8ce03SAdrian Chadd 	/* global flag, detaching */
5354bf8ce03SAdrian Chadd 	RGE_LOCK(sc);
5364bf8ce03SAdrian Chadd 	sc->sc_stopped = true;
5374bf8ce03SAdrian Chadd 	sc->sc_detaching = true;
5384bf8ce03SAdrian Chadd 	RGE_UNLOCK(sc);
5394bf8ce03SAdrian Chadd 
5404bf8ce03SAdrian Chadd 	/* stop/drain network interface */
5414bf8ce03SAdrian Chadd 	callout_drain(&sc->sc_timeout);
5424bf8ce03SAdrian Chadd 
5434bf8ce03SAdrian Chadd 	/* Make sure TX task isn't running */
5444bf8ce03SAdrian Chadd 	if (sc->sc_tq != NULL) {
5454bf8ce03SAdrian Chadd 		while (taskqueue_cancel(sc->sc_tq, &sc->sc_tx_task, NULL) != 0)
5464bf8ce03SAdrian Chadd 			taskqueue_drain(sc->sc_tq, &sc->sc_tx_task);
5474bf8ce03SAdrian Chadd 	}
5484bf8ce03SAdrian Chadd 
5494bf8ce03SAdrian Chadd 	RGE_LOCK(sc);
5504bf8ce03SAdrian Chadd 	callout_stop(&sc->sc_timeout);
5514bf8ce03SAdrian Chadd 
5524bf8ce03SAdrian Chadd 	/* stop NIC / DMA */
5534bf8ce03SAdrian Chadd 	rge_stop_locked(sc);
5544bf8ce03SAdrian Chadd 
5554bf8ce03SAdrian Chadd 	/* TODO: wait for completion */
5564bf8ce03SAdrian Chadd 
5574bf8ce03SAdrian Chadd 	/* Free pending TX mbufs */
5584bf8ce03SAdrian Chadd 	rge_txq_flush_mbufs(sc);
5594bf8ce03SAdrian Chadd 
5604bf8ce03SAdrian Chadd 	RGE_UNLOCK(sc);
5614bf8ce03SAdrian Chadd 
5624bf8ce03SAdrian Chadd 	/* Free taskqueue */
5634bf8ce03SAdrian Chadd 	if (sc->sc_tq != NULL) {
5644bf8ce03SAdrian Chadd 		taskqueue_free(sc->sc_tq);
5654bf8ce03SAdrian Chadd 		sc->sc_tq = NULL;
5664bf8ce03SAdrian Chadd 	}
5674bf8ce03SAdrian Chadd 
5684bf8ce03SAdrian Chadd 	/* Free descriptor memory */
5694bf8ce03SAdrian Chadd 	RGE_DPRINTF(sc, RGE_DEBUG_SETUP, "%s: freemem\n", __func__);
5704bf8ce03SAdrian Chadd 	rge_freemem(sc);
5714bf8ce03SAdrian Chadd 	rge_free_stats_mem(sc);
5724bf8ce03SAdrian Chadd 
5734bf8ce03SAdrian Chadd 	if (sc->sc_ifp) {
5744bf8ce03SAdrian Chadd 		RGE_DPRINTF(sc, RGE_DEBUG_SETUP, "%s: ifdetach/if_free\n",
5754bf8ce03SAdrian Chadd 		    __func__);
5764bf8ce03SAdrian Chadd 		if (sc->sc_ether_attached)
5774bf8ce03SAdrian Chadd 			ether_ifdetach(sc->sc_ifp);
5784bf8ce03SAdrian Chadd 		if_free(sc->sc_ifp);
5794bf8ce03SAdrian Chadd 	}
5804bf8ce03SAdrian Chadd 
5814bf8ce03SAdrian Chadd 	RGE_DPRINTF(sc, RGE_DEBUG_SETUP, "%s: sc_dmat_tx_desc\n", __func__);
5824bf8ce03SAdrian Chadd 	if (sc->sc_dmat_tx_desc)
5834bf8ce03SAdrian Chadd 		bus_dma_tag_destroy(sc->sc_dmat_tx_desc);
5844bf8ce03SAdrian Chadd 	RGE_DPRINTF(sc, RGE_DEBUG_SETUP, "%s: sc_dmat_tx_buf\n", __func__);
5854bf8ce03SAdrian Chadd 	if (sc->sc_dmat_tx_buf)
5864bf8ce03SAdrian Chadd 		bus_dma_tag_destroy(sc->sc_dmat_tx_buf);
5874bf8ce03SAdrian Chadd 	RGE_DPRINTF(sc, RGE_DEBUG_SETUP, "%s: sc_dmat_rx_desc\n", __func__);
5884bf8ce03SAdrian Chadd 	if (sc->sc_dmat_rx_desc)
5894bf8ce03SAdrian Chadd 		bus_dma_tag_destroy(sc->sc_dmat_rx_desc);
5904bf8ce03SAdrian Chadd 	RGE_DPRINTF(sc, RGE_DEBUG_SETUP, "%s: sc_dmat_rx_buf\n", __func__);
5914bf8ce03SAdrian Chadd 	if (sc->sc_dmat_rx_buf)
5924bf8ce03SAdrian Chadd 		bus_dma_tag_destroy(sc->sc_dmat_rx_buf);
5934bf8ce03SAdrian Chadd 	RGE_DPRINTF(sc, RGE_DEBUG_SETUP, "%s: sc_dmat_stats_buf\n", __func__);
5944bf8ce03SAdrian Chadd 	if (sc->sc_dmat_stats_buf)
5954bf8ce03SAdrian Chadd 		bus_dma_tag_destroy(sc->sc_dmat_stats_buf);
5964bf8ce03SAdrian Chadd 	RGE_DPRINTF(sc, RGE_DEBUG_SETUP, "%s: sc_dmat\n", __func__);
5974bf8ce03SAdrian Chadd 	if (sc->sc_dmat)
5984bf8ce03SAdrian Chadd 		bus_dma_tag_destroy(sc->sc_dmat);
5994bf8ce03SAdrian Chadd 
6004bf8ce03SAdrian Chadd 	/* Teardown interrupts */
6014bf8ce03SAdrian Chadd 	for (i = 0; i < RGE_MSI_MESSAGES; i++) {
6024bf8ce03SAdrian Chadd 		if (sc->sc_ih[i] != NULL) {
6034bf8ce03SAdrian Chadd 			bus_teardown_intr(sc->sc_dev, sc->sc_irq[i],
6044bf8ce03SAdrian Chadd 			    sc->sc_ih[i]);
6054bf8ce03SAdrian Chadd 			sc->sc_ih[i] = NULL;
6064bf8ce03SAdrian Chadd 		}
6074bf8ce03SAdrian Chadd 	}
6084bf8ce03SAdrian Chadd 
6094bf8ce03SAdrian Chadd 	/* Free interrupt resources */
6104bf8ce03SAdrian Chadd 	for (i = 0, rid = 1; i < RGE_MSI_MESSAGES; i++, rid++) {
6114bf8ce03SAdrian Chadd 		if (sc->sc_irq[i] != NULL) {
6124bf8ce03SAdrian Chadd 			bus_release_resource(sc->sc_dev, SYS_RES_IRQ,
6134bf8ce03SAdrian Chadd 			    rid, sc->sc_irq[i]);
6144bf8ce03SAdrian Chadd 			sc->sc_irq[i] = NULL;
6154bf8ce03SAdrian Chadd 		}
6164bf8ce03SAdrian Chadd 	}
6174bf8ce03SAdrian Chadd 
6184bf8ce03SAdrian Chadd 	/* Free MSI allocation */
6194bf8ce03SAdrian Chadd 	if (sc->rge_flags & RGE_FLAG_MSI)
6204bf8ce03SAdrian Chadd 		pci_release_msi(dev);
6214bf8ce03SAdrian Chadd 
6224bf8ce03SAdrian Chadd 	if (sc->sc_bres) {
6234bf8ce03SAdrian Chadd 		RGE_DPRINTF(sc, RGE_DEBUG_SETUP, "%s: release mmio\n",
6244bf8ce03SAdrian Chadd 		    __func__);
6254bf8ce03SAdrian Chadd 		bus_release_resource(dev, SYS_RES_MEMORY,
6264bf8ce03SAdrian Chadd 		    rman_get_rid(sc->sc_bres), sc->sc_bres);
6274bf8ce03SAdrian Chadd 		sc->sc_bres = NULL;
6284bf8ce03SAdrian Chadd 	}
6294bf8ce03SAdrian Chadd 
6304bf8ce03SAdrian Chadd 	if (sc->sc_queues) {
6314bf8ce03SAdrian Chadd 		free(sc->sc_queues, M_DEVBUF);
6324bf8ce03SAdrian Chadd 		sc->sc_queues = NULL;
6334bf8ce03SAdrian Chadd 	}
6344bf8ce03SAdrian Chadd 
6354bf8ce03SAdrian Chadd 	mtx_destroy(&sc->sc_mtx);
6364bf8ce03SAdrian Chadd 
6374bf8ce03SAdrian Chadd 	return (0);
6384bf8ce03SAdrian Chadd }
6394bf8ce03SAdrian Chadd 
6404bf8ce03SAdrian Chadd #if 0
6414bf8ce03SAdrian Chadd 
6424bf8ce03SAdrian Chadd int
6434bf8ce03SAdrian Chadd rge_activate(struct device *self, int act)
6444bf8ce03SAdrian Chadd {
6454bf8ce03SAdrian Chadd #ifndef SMALL_KERNEL
6464bf8ce03SAdrian Chadd 	struct rge_softc *sc = (struct rge_softc *)self;
6474bf8ce03SAdrian Chadd #endif
6484bf8ce03SAdrian Chadd 
6494bf8ce03SAdrian Chadd 	switch (act) {
6504bf8ce03SAdrian Chadd 	case DVACT_POWERDOWN:
6514bf8ce03SAdrian Chadd #ifndef SMALL_KERNEL
6524bf8ce03SAdrian Chadd 		rge_wol_power(sc);
6534bf8ce03SAdrian Chadd #endif
6544bf8ce03SAdrian Chadd 		break;
6554bf8ce03SAdrian Chadd 	}
6564bf8ce03SAdrian Chadd 	return (0);
6574bf8ce03SAdrian Chadd }
6584bf8ce03SAdrian Chadd #endif
6594bf8ce03SAdrian Chadd 
6604bf8ce03SAdrian Chadd static void
rge_intr_msi(void * arg)6614bf8ce03SAdrian Chadd rge_intr_msi(void *arg)
6624bf8ce03SAdrian Chadd {
6634bf8ce03SAdrian Chadd 	struct mbufq rx_mq;
6644bf8ce03SAdrian Chadd 	struct epoch_tracker et;
6654bf8ce03SAdrian Chadd 	struct mbuf *m;
6664bf8ce03SAdrian Chadd 	struct rge_softc *sc = arg;
6674bf8ce03SAdrian Chadd 	struct rge_queues *q = sc->sc_queues;
6684bf8ce03SAdrian Chadd 	uint32_t status;
6694bf8ce03SAdrian Chadd 	int claimed = 0, rv;
6704bf8ce03SAdrian Chadd 
6714bf8ce03SAdrian Chadd 	sc->sc_drv_stats.intr_cnt++;
6724bf8ce03SAdrian Chadd 
6734bf8ce03SAdrian Chadd 	mbufq_init(&rx_mq, RGE_RX_LIST_CNT);
6744bf8ce03SAdrian Chadd 
6754bf8ce03SAdrian Chadd 	if ((if_getdrvflags(sc->sc_ifp) & IFF_DRV_RUNNING) == 0)
6764bf8ce03SAdrian Chadd 		return;
6774bf8ce03SAdrian Chadd 
6784bf8ce03SAdrian Chadd 	RGE_LOCK(sc);
6794bf8ce03SAdrian Chadd 
6804bf8ce03SAdrian Chadd 	if (sc->sc_suspended || sc->sc_stopped || sc->sc_detaching) {
6814bf8ce03SAdrian Chadd 		RGE_UNLOCK(sc);
6824bf8ce03SAdrian Chadd 		return;
6834bf8ce03SAdrian Chadd 	}
6844bf8ce03SAdrian Chadd 
6854bf8ce03SAdrian Chadd 	/* Disable interrupts. */
6864bf8ce03SAdrian Chadd 	RGE_WRITE_4(sc, RGE_IMR, 0);
6874bf8ce03SAdrian Chadd 
6884bf8ce03SAdrian Chadd 	if (!(sc->rge_flags & RGE_FLAG_MSI)) {
6894bf8ce03SAdrian Chadd 		if ((RGE_READ_4(sc, RGE_ISR) & sc->rge_intrs) == 0)
6904bf8ce03SAdrian Chadd 			goto done;
6914bf8ce03SAdrian Chadd 	}
6924bf8ce03SAdrian Chadd 
6934bf8ce03SAdrian Chadd 	status = RGE_READ_4(sc, RGE_ISR);
6944bf8ce03SAdrian Chadd 	if (status)
6954bf8ce03SAdrian Chadd 		RGE_WRITE_4(sc, RGE_ISR, status);
6964bf8ce03SAdrian Chadd 
6974bf8ce03SAdrian Chadd 	if (status & RGE_ISR_PCS_TIMEOUT)
6984bf8ce03SAdrian Chadd 		claimed = 1;
6994bf8ce03SAdrian Chadd 
7004bf8ce03SAdrian Chadd 	rv = 0;
7014bf8ce03SAdrian Chadd 	if (status & sc->rge_intrs) {
7024bf8ce03SAdrian Chadd 
7034bf8ce03SAdrian Chadd 		(void) q;
7044bf8ce03SAdrian Chadd 		rv |= rge_rxeof(q, &rx_mq);
7054bf8ce03SAdrian Chadd 		rv |= rge_txeof(q);
7064bf8ce03SAdrian Chadd 
7074bf8ce03SAdrian Chadd 		if (status & RGE_ISR_SYSTEM_ERR) {
7084bf8ce03SAdrian Chadd 			sc->sc_drv_stats.intr_system_err_cnt++;
7094bf8ce03SAdrian Chadd 			rge_init_locked(sc);
7104bf8ce03SAdrian Chadd 		}
7114bf8ce03SAdrian Chadd 		claimed = 1;
7124bf8ce03SAdrian Chadd 	}
7134bf8ce03SAdrian Chadd 
7144bf8ce03SAdrian Chadd 	if (sc->rge_timerintr) {
7154bf8ce03SAdrian Chadd 		if (!rv) {
7164bf8ce03SAdrian Chadd 			/*
7174bf8ce03SAdrian Chadd 			 * Nothing needs to be processed, fallback
7184bf8ce03SAdrian Chadd 			 * to use TX/RX interrupts.
7194bf8ce03SAdrian Chadd 			 */
7204bf8ce03SAdrian Chadd 			rge_setup_intr(sc, RGE_IMTYPE_NONE);
7214bf8ce03SAdrian Chadd 
7224bf8ce03SAdrian Chadd 			/*
7234bf8ce03SAdrian Chadd 			 * Recollect, mainly to avoid the possible
7244bf8ce03SAdrian Chadd 			 * race introduced by changing interrupt
7254bf8ce03SAdrian Chadd 			 * masks.
7264bf8ce03SAdrian Chadd 			 */
7274bf8ce03SAdrian Chadd 			rge_rxeof(q, &rx_mq);
7284bf8ce03SAdrian Chadd 			rge_txeof(q);
7294bf8ce03SAdrian Chadd 		} else
7304bf8ce03SAdrian Chadd 			RGE_WRITE_4(sc, RGE_TIMERCNT, 1);
7314bf8ce03SAdrian Chadd 	} else if (rv) {
7324bf8ce03SAdrian Chadd 		/*
7334bf8ce03SAdrian Chadd 		 * Assume that using simulated interrupt moderation
7344bf8ce03SAdrian Chadd 		 * (hardware timer based) could reduce the interrupt
7354bf8ce03SAdrian Chadd 		 * rate.
7364bf8ce03SAdrian Chadd 		 */
7374bf8ce03SAdrian Chadd 		rge_setup_intr(sc, RGE_IMTYPE_SIM);
7384bf8ce03SAdrian Chadd 	}
7394bf8ce03SAdrian Chadd 
7404bf8ce03SAdrian Chadd 	RGE_WRITE_4(sc, RGE_IMR, sc->rge_intrs);
7414bf8ce03SAdrian Chadd 
7424bf8ce03SAdrian Chadd done:
7434bf8ce03SAdrian Chadd 	RGE_UNLOCK(sc);
7444bf8ce03SAdrian Chadd 
7454bf8ce03SAdrian Chadd 	NET_EPOCH_ENTER(et);
7464bf8ce03SAdrian Chadd 	/* Handle any RX frames, outside of the driver lock */
7474bf8ce03SAdrian Chadd 	while ((m = mbufq_dequeue(&rx_mq)) != NULL) {
7484bf8ce03SAdrian Chadd 		sc->sc_drv_stats.recv_input_cnt++;
7494bf8ce03SAdrian Chadd 		if_input(sc->sc_ifp, m);
7504bf8ce03SAdrian Chadd 	}
7514bf8ce03SAdrian Chadd 	NET_EPOCH_EXIT(et);
7524bf8ce03SAdrian Chadd 
7534bf8ce03SAdrian Chadd 	(void) claimed;
7544bf8ce03SAdrian Chadd }
7554bf8ce03SAdrian Chadd 
7564bf8ce03SAdrian Chadd static inline void
rge_tx_list_sync(struct rge_softc * sc,struct rge_queues * q,unsigned int idx,unsigned int len,int ops)7574bf8ce03SAdrian Chadd rge_tx_list_sync(struct rge_softc *sc, struct rge_queues *q,
7584bf8ce03SAdrian Chadd     unsigned int idx, unsigned int len, int ops)
7594bf8ce03SAdrian Chadd {
7604bf8ce03SAdrian Chadd 	bus_dmamap_sync(sc->sc_dmat_tx_desc, q->q_tx.rge_tx_list_map, ops);
7614bf8ce03SAdrian Chadd }
7624bf8ce03SAdrian Chadd 
7634bf8ce03SAdrian Chadd /**
7644bf8ce03SAdrian Chadd  * @brief Queue the given mbuf at the given TX slot index for transmit.
7654bf8ce03SAdrian Chadd  *
7664bf8ce03SAdrian Chadd  * If the frame couldn't be enqueued then 0 is returned.
7674bf8ce03SAdrian Chadd  * The caller needs to handle that and free/re-queue the mbuf as required.
7684bf8ce03SAdrian Chadd  *
7694bf8ce03SAdrian Chadd  * Note that this doesn't actually kick-start the transmit itself;
7704bf8ce03SAdrian Chadd  * see rge_txstart() for the register to poke to start transmit.
7714bf8ce03SAdrian Chadd  *
7724bf8ce03SAdrian Chadd  * This must be called with the driver lock held.
7734bf8ce03SAdrian Chadd  *
7744bf8ce03SAdrian Chadd  * @param sc	driver softc
7754bf8ce03SAdrian Chadd  * @param q	TX queue ring
7764bf8ce03SAdrian Chadd  * @param m	mbuf to enqueue
7774bf8ce03SAdrian Chadd  * @returns	if the mbuf is enqueued, it's consumed here and the number of
7784bf8ce03SAdrian Chadd  * 		TX descriptors used is returned; if there's no space then 0 is
7794bf8ce03SAdrian Chadd  *		returned; if the mbuf couldn't be defragged and the caller
7804bf8ce03SAdrian Chadd  *		should free it then -1 is returned.
7814bf8ce03SAdrian Chadd  */
7824bf8ce03SAdrian Chadd static int
rge_encap(struct rge_softc * sc,struct rge_queues * q,struct mbuf * m,int idx)7834bf8ce03SAdrian Chadd rge_encap(struct rge_softc *sc, struct rge_queues *q, struct mbuf *m, int idx)
7844bf8ce03SAdrian Chadd {
7854bf8ce03SAdrian Chadd 	struct rge_tx_desc *d = NULL;
7864bf8ce03SAdrian Chadd 	struct rge_txq *txq;
7874bf8ce03SAdrian Chadd 	bus_dmamap_t txmap;
7884bf8ce03SAdrian Chadd 	uint32_t cmdsts, cflags = 0;
7894bf8ce03SAdrian Chadd 	int cur, error, i;
7904bf8ce03SAdrian Chadd 	bus_dma_segment_t seg[RGE_TX_NSEGS];
7914bf8ce03SAdrian Chadd 	int nsegs;
7924bf8ce03SAdrian Chadd 
7934bf8ce03SAdrian Chadd 	RGE_ASSERT_LOCKED(sc);
7944bf8ce03SAdrian Chadd 
7954bf8ce03SAdrian Chadd 	txq = &q->q_tx.rge_txq[idx];
7964bf8ce03SAdrian Chadd 	txmap = txq->txq_dmamap;
7974bf8ce03SAdrian Chadd 
7984bf8ce03SAdrian Chadd 	sc->sc_drv_stats.tx_encap_cnt++;
7994bf8ce03SAdrian Chadd 
8004bf8ce03SAdrian Chadd 	nsegs = RGE_TX_NSEGS;
8014bf8ce03SAdrian Chadd 	error = bus_dmamap_load_mbuf_sg(sc->sc_dmat_tx_buf, txmap, m,
8024bf8ce03SAdrian Chadd 	    seg, &nsegs, BUS_DMA_NOWAIT);
8034bf8ce03SAdrian Chadd 
8044bf8ce03SAdrian Chadd 	switch (error) {
8054bf8ce03SAdrian Chadd 	case 0:
8064bf8ce03SAdrian Chadd 		break;
8074bf8ce03SAdrian Chadd 	case EFBIG: /* mbuf chain is too fragmented */
8084bf8ce03SAdrian Chadd 		sc->sc_drv_stats.tx_encap_refrag_cnt++;
8094bf8ce03SAdrian Chadd 		nsegs = RGE_TX_NSEGS;
8104bf8ce03SAdrian Chadd 		if (m_defrag(m, M_NOWAIT) == 0 &&
8114bf8ce03SAdrian Chadd 		    bus_dmamap_load_mbuf_sg(sc->sc_dmat_tx_buf, txmap, m,
8124bf8ce03SAdrian Chadd 		    seg, &nsegs, BUS_DMA_NOWAIT) == 0)
8134bf8ce03SAdrian Chadd 			break;
8144bf8ce03SAdrian Chadd 		/* FALLTHROUGH */
8154bf8ce03SAdrian Chadd 	default:
8164bf8ce03SAdrian Chadd 		sc->sc_drv_stats.tx_encap_err_toofrag++;
8174bf8ce03SAdrian Chadd 		return (-1);
8184bf8ce03SAdrian Chadd 	}
8194bf8ce03SAdrian Chadd 
8204bf8ce03SAdrian Chadd 	bus_dmamap_sync(sc->sc_dmat_tx_buf, txmap, BUS_DMASYNC_PREWRITE);
8214bf8ce03SAdrian Chadd 
8224bf8ce03SAdrian Chadd 	/*
8234bf8ce03SAdrian Chadd 	 * Set RGE_TDEXTSTS_IPCSUM if any checksum offloading is requested.
8244bf8ce03SAdrian Chadd 	 * Otherwise, RGE_TDEXTSTS_TCPCSUM / RGE_TDEXTSTS_UDPCSUM does not
8254bf8ce03SAdrian Chadd 	 * take affect.
8264bf8ce03SAdrian Chadd 	 */
8274bf8ce03SAdrian Chadd 	if ((m->m_pkthdr.csum_flags & RGE_CSUM_FEATURES) != 0) {
8284bf8ce03SAdrian Chadd 		cflags |= RGE_TDEXTSTS_IPCSUM;
8294bf8ce03SAdrian Chadd 		sc->sc_drv_stats.tx_offload_ip_csum_set++;
8304bf8ce03SAdrian Chadd 		if (m->m_pkthdr.csum_flags & CSUM_TCP) {
8314bf8ce03SAdrian Chadd 			sc->sc_drv_stats.tx_offload_tcp_csum_set++;
8324bf8ce03SAdrian Chadd 			cflags |= RGE_TDEXTSTS_TCPCSUM;
8334bf8ce03SAdrian Chadd 		}
8344bf8ce03SAdrian Chadd 		if (m->m_pkthdr.csum_flags & CSUM_UDP) {
8354bf8ce03SAdrian Chadd 			sc->sc_drv_stats.tx_offload_udp_csum_set++;
8364bf8ce03SAdrian Chadd 			cflags |= RGE_TDEXTSTS_UDPCSUM;
8374bf8ce03SAdrian Chadd 		}
8384bf8ce03SAdrian Chadd 	}
8394bf8ce03SAdrian Chadd 
8404bf8ce03SAdrian Chadd 	/* Set up hardware VLAN tagging */
8414bf8ce03SAdrian Chadd 	if (m->m_flags & M_VLANTAG) {
8424bf8ce03SAdrian Chadd 		sc->sc_drv_stats.tx_offload_vlan_tag_set++;
8434bf8ce03SAdrian Chadd 		cflags |= htole16(m->m_pkthdr.ether_vtag) | RGE_TDEXTSTS_VTAG;
8444bf8ce03SAdrian Chadd 	}
8454bf8ce03SAdrian Chadd 
8464bf8ce03SAdrian Chadd 	cur = idx;
8474bf8ce03SAdrian Chadd 	for (i = 1; i < nsegs; i++) {
8484bf8ce03SAdrian Chadd 		cur = RGE_NEXT_TX_DESC(cur);
8494bf8ce03SAdrian Chadd 
8504bf8ce03SAdrian Chadd 		cmdsts = RGE_TDCMDSTS_OWN;
8514bf8ce03SAdrian Chadd 		cmdsts |= seg[i].ds_len;
8524bf8ce03SAdrian Chadd 
8534bf8ce03SAdrian Chadd 		if (cur == RGE_TX_LIST_CNT - 1)
8544bf8ce03SAdrian Chadd 			cmdsts |= RGE_TDCMDSTS_EOR;
8554bf8ce03SAdrian Chadd 		if (i == nsegs - 1)
8564bf8ce03SAdrian Chadd 			cmdsts |= RGE_TDCMDSTS_EOF;
8574bf8ce03SAdrian Chadd 
8584bf8ce03SAdrian Chadd 		/*
8594bf8ce03SAdrian Chadd 		 * Note: vendor driver puts wmb() after opts2/extsts,
8604bf8ce03SAdrian Chadd 		 * before opts1/status.
8614bf8ce03SAdrian Chadd 		 *
8624bf8ce03SAdrian Chadd 		 * See the other place I have this comment for more
8634bf8ce03SAdrian Chadd 		 * information.
8644bf8ce03SAdrian Chadd 		 */
8654bf8ce03SAdrian Chadd 		d = &q->q_tx.rge_tx_list[cur];
8664bf8ce03SAdrian Chadd 		d->rge_addr = htole64(seg[i].ds_addr);
8674bf8ce03SAdrian Chadd 		d->rge_extsts = htole32(cflags);
8684bf8ce03SAdrian Chadd 		wmb();
8694bf8ce03SAdrian Chadd 		d->rge_cmdsts = htole32(cmdsts);
8704bf8ce03SAdrian Chadd 	}
8714bf8ce03SAdrian Chadd 
8724bf8ce03SAdrian Chadd 	/* Update info of TX queue and descriptors. */
8734bf8ce03SAdrian Chadd 	txq->txq_mbuf = m;
8744bf8ce03SAdrian Chadd 	txq->txq_descidx = cur;
8754bf8ce03SAdrian Chadd 
8764bf8ce03SAdrian Chadd 	cmdsts = RGE_TDCMDSTS_SOF;
8774bf8ce03SAdrian Chadd 	cmdsts |= seg[0].ds_len;
8784bf8ce03SAdrian Chadd 
8794bf8ce03SAdrian Chadd 	if (idx == RGE_TX_LIST_CNT - 1)
8804bf8ce03SAdrian Chadd 		cmdsts |= RGE_TDCMDSTS_EOR;
8814bf8ce03SAdrian Chadd 	if (nsegs == 1)
8824bf8ce03SAdrian Chadd 		cmdsts |= RGE_TDCMDSTS_EOF;
8834bf8ce03SAdrian Chadd 
8844bf8ce03SAdrian Chadd 	/*
8854bf8ce03SAdrian Chadd 	 * Note: vendor driver puts wmb() after opts2/extsts,
8864bf8ce03SAdrian Chadd 	 * before opts1/status.
8874bf8ce03SAdrian Chadd 	 *
8884bf8ce03SAdrian Chadd 	 * It does this:
8894bf8ce03SAdrian Chadd 	 * - set rge_addr
8904bf8ce03SAdrian Chadd 	 * - set extsts
8914bf8ce03SAdrian Chadd 	 * - wmb
8924bf8ce03SAdrian Chadd 	 * - set status - at this point it's owned by the hardware
8934bf8ce03SAdrian Chadd 	 *
8944bf8ce03SAdrian Chadd 	 */
8954bf8ce03SAdrian Chadd 	d = &q->q_tx.rge_tx_list[idx];
8964bf8ce03SAdrian Chadd 	d->rge_addr = htole64(seg[0].ds_addr);
8974bf8ce03SAdrian Chadd 	d->rge_extsts = htole32(cflags);
8984bf8ce03SAdrian Chadd 	wmb();
8994bf8ce03SAdrian Chadd 	d->rge_cmdsts = htole32(cmdsts);
9004bf8ce03SAdrian Chadd 	wmb();
9014bf8ce03SAdrian Chadd 
9024bf8ce03SAdrian Chadd 	if (cur >= idx) {
9034bf8ce03SAdrian Chadd 		rge_tx_list_sync(sc, q, idx, nsegs,
9044bf8ce03SAdrian Chadd 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
9054bf8ce03SAdrian Chadd 	} else {
9064bf8ce03SAdrian Chadd 		rge_tx_list_sync(sc, q, idx, RGE_TX_LIST_CNT - idx,
9074bf8ce03SAdrian Chadd 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
9084bf8ce03SAdrian Chadd 		rge_tx_list_sync(sc, q, 0, cur + 1,
9094bf8ce03SAdrian Chadd 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
9104bf8ce03SAdrian Chadd 	}
9114bf8ce03SAdrian Chadd 
9124bf8ce03SAdrian Chadd 	/* Transfer ownership of packet to the chip. */
9134bf8ce03SAdrian Chadd 	cmdsts |= RGE_TDCMDSTS_OWN;
9144bf8ce03SAdrian Chadd 	rge_tx_list_sync(sc, q, idx, 1, BUS_DMASYNC_POSTWRITE);
9154bf8ce03SAdrian Chadd 	d->rge_cmdsts = htole32(cmdsts);
9164bf8ce03SAdrian Chadd 	rge_tx_list_sync(sc, q, idx, 1, BUS_DMASYNC_PREWRITE);
9174bf8ce03SAdrian Chadd 	wmb();
9184bf8ce03SAdrian Chadd 
9194bf8ce03SAdrian Chadd 	return (nsegs);
9204bf8ce03SAdrian Chadd }
9214bf8ce03SAdrian Chadd 
9224bf8ce03SAdrian Chadd static int
rge_ioctl(struct ifnet * ifp,u_long cmd,caddr_t data)9234bf8ce03SAdrian Chadd rge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
9244bf8ce03SAdrian Chadd {
9254bf8ce03SAdrian Chadd 	struct rge_softc *sc = if_getsoftc(ifp);
9264bf8ce03SAdrian Chadd 	struct ifreq *ifr = (struct ifreq *)data;
9274bf8ce03SAdrian Chadd 	int error = 0;
9284bf8ce03SAdrian Chadd 
9294bf8ce03SAdrian Chadd 	switch (cmd) {
9304bf8ce03SAdrian Chadd 	case SIOCSIFMTU:
9314bf8ce03SAdrian Chadd 		/* Note: no hardware reinit is required */
9324bf8ce03SAdrian Chadd 		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > RGE_JUMBO_MTU) {
9334bf8ce03SAdrian Chadd 			error = EINVAL;
9344bf8ce03SAdrian Chadd 			break;
9354bf8ce03SAdrian Chadd 		}
9364bf8ce03SAdrian Chadd 		if (if_getmtu(ifp) != ifr->ifr_mtu)
9374bf8ce03SAdrian Chadd 			if_setmtu(ifp, ifr->ifr_mtu);
9384bf8ce03SAdrian Chadd 
9394bf8ce03SAdrian Chadd 		VLAN_CAPABILITIES(ifp);
9404bf8ce03SAdrian Chadd 		break;
9414bf8ce03SAdrian Chadd 
9424bf8ce03SAdrian Chadd 	case SIOCSIFFLAGS:
9434bf8ce03SAdrian Chadd 		RGE_LOCK(sc);
9444bf8ce03SAdrian Chadd 		if ((if_getflags(ifp) & IFF_UP) != 0) {
9454bf8ce03SAdrian Chadd 			if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
9464bf8ce03SAdrian Chadd 				/*
9474bf8ce03SAdrian Chadd 				 * TODO: handle promisc/iffmulti changing
9484bf8ce03SAdrian Chadd 				 * without reprogramming everything.
9494bf8ce03SAdrian Chadd 				 */
9504bf8ce03SAdrian Chadd 				rge_init_locked(sc);
9514bf8ce03SAdrian Chadd 			} else {
9524bf8ce03SAdrian Chadd 				/* Reinit promisc/multi just in case */
9534bf8ce03SAdrian Chadd 				rge_iff_locked(sc);
9544bf8ce03SAdrian Chadd 			}
9554bf8ce03SAdrian Chadd 		} else {
9564bf8ce03SAdrian Chadd 			if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
9574bf8ce03SAdrian Chadd 				rge_stop_locked(sc);
9584bf8ce03SAdrian Chadd 			}
9594bf8ce03SAdrian Chadd 		}
9604bf8ce03SAdrian Chadd 		RGE_UNLOCK(sc);
9614bf8ce03SAdrian Chadd 		break;
9624bf8ce03SAdrian Chadd 	case SIOCADDMULTI:
9634bf8ce03SAdrian Chadd 	case SIOCDELMULTI:
9644bf8ce03SAdrian Chadd 		RGE_LOCK(sc);
9654bf8ce03SAdrian Chadd 		if ((if_getflags(ifp) & IFF_DRV_RUNNING) != 0) {
9664bf8ce03SAdrian Chadd 			rge_iff_locked(sc);
9674bf8ce03SAdrian Chadd 		}
9684bf8ce03SAdrian Chadd 		RGE_UNLOCK(sc);
9694bf8ce03SAdrian Chadd 		break;
9704bf8ce03SAdrian Chadd 	case SIOCGIFMEDIA:
9714bf8ce03SAdrian Chadd 	case SIOCSIFMEDIA:
9724bf8ce03SAdrian Chadd 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
9734bf8ce03SAdrian Chadd 		break;
9744bf8ce03SAdrian Chadd 	case SIOCSIFCAP:
9754bf8ce03SAdrian Chadd 		{
9764bf8ce03SAdrian Chadd 			int mask;
9774bf8ce03SAdrian Chadd 			bool reinit = false;
9784bf8ce03SAdrian Chadd 
9794bf8ce03SAdrian Chadd 			/* Get the mask of changed bits */
9804bf8ce03SAdrian Chadd 			mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
9814bf8ce03SAdrian Chadd 
9824bf8ce03SAdrian Chadd 			/*
9834bf8ce03SAdrian Chadd 			 * Locked so we don't have a narrow window where frames
9844bf8ce03SAdrian Chadd 			 * are being processed with the updated flags but the
9854bf8ce03SAdrian Chadd 			 * hardware configuration hasn't yet changed.
9864bf8ce03SAdrian Chadd 			 */
9874bf8ce03SAdrian Chadd 			RGE_LOCK(sc);
9884bf8ce03SAdrian Chadd 
9894bf8ce03SAdrian Chadd 			if ((mask & IFCAP_TXCSUM) != 0 &&
9904bf8ce03SAdrian Chadd 			    (if_getcapabilities(ifp) & IFCAP_TXCSUM) != 0) {
9914bf8ce03SAdrian Chadd 				if_togglecapenable(ifp, IFCAP_TXCSUM);
9924bf8ce03SAdrian Chadd 				if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0)
9934bf8ce03SAdrian Chadd 					if_sethwassistbits(ifp, RGE_CSUM_FEATURES, 0);
9944bf8ce03SAdrian Chadd 				else
9954bf8ce03SAdrian Chadd 					if_sethwassistbits(ifp, 0, RGE_CSUM_FEATURES);
9964bf8ce03SAdrian Chadd 				reinit = 1;
9974bf8ce03SAdrian Chadd 			}
9984bf8ce03SAdrian Chadd 
9994bf8ce03SAdrian Chadd 			if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
10004bf8ce03SAdrian Chadd 			    (if_getcapabilities(ifp) & IFCAP_VLAN_HWTAGGING) != 0) {
10014bf8ce03SAdrian Chadd 				if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
10024bf8ce03SAdrian Chadd 				reinit = 1;
10034bf8ce03SAdrian Chadd 			}
10044bf8ce03SAdrian Chadd 
10054bf8ce03SAdrian Chadd 			/* TODO: WOL */
10064bf8ce03SAdrian Chadd 
10074bf8ce03SAdrian Chadd 			if ((mask & IFCAP_RXCSUM) != 0 &&
10084bf8ce03SAdrian Chadd 			    (if_getcapabilities(ifp) & IFCAP_RXCSUM) != 0) {
10094bf8ce03SAdrian Chadd 				if_togglecapenable(ifp, IFCAP_RXCSUM);
10104bf8ce03SAdrian Chadd 				reinit = 1;
10114bf8ce03SAdrian Chadd 			}
10124bf8ce03SAdrian Chadd 
10134bf8ce03SAdrian Chadd 			if (reinit && if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
10144bf8ce03SAdrian Chadd 				if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
10154bf8ce03SAdrian Chadd 				rge_init_locked(sc);
10164bf8ce03SAdrian Chadd 			}
10174bf8ce03SAdrian Chadd 
10184bf8ce03SAdrian Chadd 			RGE_UNLOCK(sc);
10194bf8ce03SAdrian Chadd 			VLAN_CAPABILITIES(ifp);
10204bf8ce03SAdrian Chadd 		}
10214bf8ce03SAdrian Chadd 		break;
10224bf8ce03SAdrian Chadd 	default:
10234bf8ce03SAdrian Chadd 		error = ether_ioctl(ifp, cmd, data);
10244bf8ce03SAdrian Chadd 		break;
10254bf8ce03SAdrian Chadd 	}
10264bf8ce03SAdrian Chadd 
10274bf8ce03SAdrian Chadd 	return (error);
10284bf8ce03SAdrian Chadd }
10294bf8ce03SAdrian Chadd 
10304bf8ce03SAdrian Chadd static void
rge_qflush_if(if_t ifp)10314bf8ce03SAdrian Chadd rge_qflush_if(if_t ifp)
10324bf8ce03SAdrian Chadd {
10334bf8ce03SAdrian Chadd 	struct rge_softc *sc = if_getsoftc(ifp);
10344bf8ce03SAdrian Chadd 
10354bf8ce03SAdrian Chadd 	/* TODO: this should iterate over the TXQs */
10364bf8ce03SAdrian Chadd 	RGE_LOCK(sc);
10374bf8ce03SAdrian Chadd 	rge_txq_flush_mbufs(sc);
10384bf8ce03SAdrian Chadd 	RGE_UNLOCK(sc);
10394bf8ce03SAdrian Chadd }
10404bf8ce03SAdrian Chadd 
10414bf8ce03SAdrian Chadd /**
10424bf8ce03SAdrian Chadd  * @brief Transmit the given frame to the hardware.
10434bf8ce03SAdrian Chadd  *
10444bf8ce03SAdrian Chadd  * This routine is called by the network stack to send
10454bf8ce03SAdrian Chadd  * a frame to the device.
10464bf8ce03SAdrian Chadd  *
10474bf8ce03SAdrian Chadd  * For now we simply direct dispatch this frame to the
10484bf8ce03SAdrian Chadd  * hardware (and thus avoid maintaining our own internal
10494bf8ce03SAdrian Chadd  * queue)
10504bf8ce03SAdrian Chadd  */
10514bf8ce03SAdrian Chadd static int
rge_transmit_if(if_t ifp,struct mbuf * m)10524bf8ce03SAdrian Chadd rge_transmit_if(if_t ifp, struct mbuf *m)
10534bf8ce03SAdrian Chadd {
10544bf8ce03SAdrian Chadd 	struct rge_softc *sc = if_getsoftc(ifp);
10554bf8ce03SAdrian Chadd 	int ret;
10564bf8ce03SAdrian Chadd 
10574bf8ce03SAdrian Chadd 	sc->sc_drv_stats.transmit_call_cnt++;
10584bf8ce03SAdrian Chadd 
10594bf8ce03SAdrian Chadd 	RGE_LOCK(sc);
10604bf8ce03SAdrian Chadd 	if (sc->sc_stopped == true) {
10614bf8ce03SAdrian Chadd 		sc->sc_drv_stats.transmit_stopped_cnt++;
10624bf8ce03SAdrian Chadd 		RGE_UNLOCK(sc);
10634bf8ce03SAdrian Chadd 		return (ENETDOWN);	/* TODO: better error? */
10644bf8ce03SAdrian Chadd 	}
10654bf8ce03SAdrian Chadd 
10664bf8ce03SAdrian Chadd 	/* XXX again should be a per-TXQ thing */
10674bf8ce03SAdrian Chadd 	ret = mbufq_enqueue(&sc->sc_txq, m);
10684bf8ce03SAdrian Chadd 	if (ret != 0) {
10694bf8ce03SAdrian Chadd 		sc->sc_drv_stats.transmit_full_cnt++;
10704bf8ce03SAdrian Chadd 		RGE_UNLOCK(sc);
10714bf8ce03SAdrian Chadd 		return (ret);
10724bf8ce03SAdrian Chadd 	}
10734bf8ce03SAdrian Chadd 	RGE_UNLOCK(sc);
10744bf8ce03SAdrian Chadd 
10754bf8ce03SAdrian Chadd 	/* mbuf is owned by the driver, schedule transmit */
10764bf8ce03SAdrian Chadd 	taskqueue_enqueue(sc->sc_tq, &sc->sc_tx_task);
10774bf8ce03SAdrian Chadd 	sc->sc_drv_stats.transmit_queued_cnt++;
10784bf8ce03SAdrian Chadd 
10794bf8ce03SAdrian Chadd 	return (0);
10804bf8ce03SAdrian Chadd }
10814bf8ce03SAdrian Chadd 
10824bf8ce03SAdrian Chadd static void
rge_init_if(void * xsc)10834bf8ce03SAdrian Chadd rge_init_if(void *xsc)
10844bf8ce03SAdrian Chadd {
10854bf8ce03SAdrian Chadd 	struct rge_softc *sc = xsc;
10864bf8ce03SAdrian Chadd 
10874bf8ce03SAdrian Chadd 	RGE_LOCK(sc);
10884bf8ce03SAdrian Chadd 	rge_init_locked(sc);
10894bf8ce03SAdrian Chadd 	RGE_UNLOCK(sc);
10904bf8ce03SAdrian Chadd }
10914bf8ce03SAdrian Chadd 
10924bf8ce03SAdrian Chadd static void
rge_init_locked(struct rge_softc * sc)10934bf8ce03SAdrian Chadd rge_init_locked(struct rge_softc *sc)
10944bf8ce03SAdrian Chadd {
10954bf8ce03SAdrian Chadd 	struct rge_queues *q = sc->sc_queues;
10964bf8ce03SAdrian Chadd 	uint32_t rxconf, val;
10974bf8ce03SAdrian Chadd 	int i, num_miti;
10984bf8ce03SAdrian Chadd 
10994bf8ce03SAdrian Chadd 	RGE_ASSERT_LOCKED(sc);
11004bf8ce03SAdrian Chadd 
11014bf8ce03SAdrian Chadd 	RGE_DPRINTF(sc, RGE_DEBUG_INIT, "%s: called!\n", __func__);
11024bf8ce03SAdrian Chadd 
11034bf8ce03SAdrian Chadd 	/* Don't double-init the hardware */
11044bf8ce03SAdrian Chadd 	if ((if_getdrvflags(sc->sc_ifp) & IFF_DRV_RUNNING) != 0) {
11054bf8ce03SAdrian Chadd 		/*
11064bf8ce03SAdrian Chadd 		 * Note: I'm leaving this disabled by default; however
11074bf8ce03SAdrian Chadd 		 * I'm leaving it in here so I can figure out what's
11084bf8ce03SAdrian Chadd 		 * causing this to be initialised both from the ioctl
11094bf8ce03SAdrian Chadd 		 * API and if_init() API.
11104bf8ce03SAdrian Chadd 		 */
11114bf8ce03SAdrian Chadd //		RGE_PRINT_ERROR(sc, "%s: called whilst running?\n", __func__);
11124bf8ce03SAdrian Chadd 		return;
11134bf8ce03SAdrian Chadd 	}
11144bf8ce03SAdrian Chadd 
11154bf8ce03SAdrian Chadd 	/*
11164bf8ce03SAdrian Chadd 	 * Bring the hardware down so we know it's in a good known
11174bf8ce03SAdrian Chadd 	 * state before we bring it up in a good known state.
11184bf8ce03SAdrian Chadd 	 */
11194bf8ce03SAdrian Chadd 	rge_stop_locked(sc);
11204bf8ce03SAdrian Chadd 
11214bf8ce03SAdrian Chadd 	/* Set MAC address. */
11224bf8ce03SAdrian Chadd 	rge_set_macaddr(sc, if_getlladdr(sc->sc_ifp));
11234bf8ce03SAdrian Chadd 
11244bf8ce03SAdrian Chadd 	/* Initialize RX and TX descriptors lists. */
11254bf8ce03SAdrian Chadd 	rge_rx_list_init(q);
11264bf8ce03SAdrian Chadd 	rge_tx_list_init(q);
11274bf8ce03SAdrian Chadd 
11284bf8ce03SAdrian Chadd 	if (rge_chipinit(sc)) {
11294bf8ce03SAdrian Chadd 		RGE_PRINT_ERROR(sc, "%s: ERROR: chip init fail!\n", __func__);
11304bf8ce03SAdrian Chadd 		return;
11314bf8ce03SAdrian Chadd 	}
11324bf8ce03SAdrian Chadd 
11334bf8ce03SAdrian Chadd 	if (rge_phy_config(sc))
11344bf8ce03SAdrian Chadd 		return;
11354bf8ce03SAdrian Chadd 
11364bf8ce03SAdrian Chadd 	RGE_SETBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
11374bf8ce03SAdrian Chadd 
11384bf8ce03SAdrian Chadd 	RGE_CLRBIT_1(sc, 0xf1, 0x80);
11394bf8ce03SAdrian Chadd 	rge_disable_aspm_clkreq(sc);
11404bf8ce03SAdrian Chadd 	RGE_WRITE_2(sc, RGE_EEE_TXIDLE_TIMER,
11414bf8ce03SAdrian Chadd 	    RGE_JUMBO_MTU + ETHER_HDR_LEN + 32);
11424bf8ce03SAdrian Chadd 
11434bf8ce03SAdrian Chadd 	/* Load the addresses of the RX and TX lists into the chip. */
11444bf8ce03SAdrian Chadd 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_LO,
11454bf8ce03SAdrian Chadd 	    RGE_ADDR_LO(q->q_rx.rge_rx_list_paddr));
11464bf8ce03SAdrian Chadd 	RGE_WRITE_4(sc, RGE_RXDESC_ADDR_HI,
11474bf8ce03SAdrian Chadd 	    RGE_ADDR_HI(q->q_rx.rge_rx_list_paddr));
11484bf8ce03SAdrian Chadd 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_LO,
11494bf8ce03SAdrian Chadd 	    RGE_ADDR_LO(q->q_tx.rge_tx_list_paddr));
11504bf8ce03SAdrian Chadd 	RGE_WRITE_4(sc, RGE_TXDESC_ADDR_HI,
11514bf8ce03SAdrian Chadd 	    RGE_ADDR_HI(q->q_tx.rge_tx_list_paddr));
11524bf8ce03SAdrian Chadd 
11534bf8ce03SAdrian Chadd 	/* Set the initial RX and TX configurations. */
11544bf8ce03SAdrian Chadd 	if (sc->rge_type == MAC_R25)
11554bf8ce03SAdrian Chadd 		rxconf = RGE_RXCFG_CONFIG;
11564bf8ce03SAdrian Chadd 	else if (sc->rge_type == MAC_R25B)
11574bf8ce03SAdrian Chadd 		rxconf = RGE_RXCFG_CONFIG_8125B;
11584bf8ce03SAdrian Chadd 	else if (sc->rge_type == MAC_R25D)
11594bf8ce03SAdrian Chadd 		rxconf = RGE_RXCFG_CONFIG_8125D;
11604bf8ce03SAdrian Chadd 	else
11614bf8ce03SAdrian Chadd 		rxconf = RGE_RXCFG_CONFIG_8126;
11624bf8ce03SAdrian Chadd 	RGE_WRITE_4(sc, RGE_RXCFG, rxconf);
11634bf8ce03SAdrian Chadd 	RGE_WRITE_4(sc, RGE_TXCFG, RGE_TXCFG_CONFIG);
11644bf8ce03SAdrian Chadd 
11654bf8ce03SAdrian Chadd 	val = rge_read_csi(sc, 0x70c) & ~0x3f000000;
11664bf8ce03SAdrian Chadd 	rge_write_csi(sc, 0x70c, val | 0x27000000);
11674bf8ce03SAdrian Chadd 
11684bf8ce03SAdrian Chadd 	if (sc->rge_type == MAC_R26 || sc->rge_type == MAC_R27) {
11694bf8ce03SAdrian Chadd 		/* Disable L1 timeout. */
11704bf8ce03SAdrian Chadd 		val = rge_read_csi(sc, 0x890) & ~0x00000001;
11714bf8ce03SAdrian Chadd 		rge_write_csi(sc, 0x890, val);
11724bf8ce03SAdrian Chadd 	} else if (sc->rge_type != MAC_R25D)
11734bf8ce03SAdrian Chadd 		RGE_WRITE_2(sc, 0x0382, 0x221b);
11744bf8ce03SAdrian Chadd 
11754bf8ce03SAdrian Chadd 	RGE_WRITE_1(sc, RGE_RSS_CTRL, 0);
11764bf8ce03SAdrian Chadd 
11774bf8ce03SAdrian Chadd 	val = RGE_READ_2(sc, RGE_RXQUEUE_CTRL) & ~0x001c;
11784bf8ce03SAdrian Chadd 	RGE_WRITE_2(sc, RGE_RXQUEUE_CTRL, val | (fls(sc->sc_nqueues) - 1) << 2);
11794bf8ce03SAdrian Chadd 
11804bf8ce03SAdrian Chadd 	RGE_CLRBIT_1(sc, RGE_CFG1, RGE_CFG1_SPEED_DOWN);
11814bf8ce03SAdrian Chadd 
11824bf8ce03SAdrian Chadd 	rge_write_mac_ocp(sc, 0xc140, 0xffff);
11834bf8ce03SAdrian Chadd 	rge_write_mac_ocp(sc, 0xc142, 0xffff);
11844bf8ce03SAdrian Chadd 
11854bf8ce03SAdrian Chadd 	RGE_MAC_SETBIT(sc, 0xeb58, 0x0001);
11864bf8ce03SAdrian Chadd 
11874bf8ce03SAdrian Chadd 	if (sc->rge_type == MAC_R26 || sc->rge_type == MAC_R27) {
11884bf8ce03SAdrian Chadd 		RGE_CLRBIT_1(sc, 0xd8, 0x02);
11894bf8ce03SAdrian Chadd 		if (sc->rge_type == MAC_R27) {
11904bf8ce03SAdrian Chadd 			RGE_CLRBIT_1(sc, 0x20e4, 0x04);
11914bf8ce03SAdrian Chadd 			RGE_MAC_CLRBIT(sc, 0xe00c, 0x1000);
11924bf8ce03SAdrian Chadd 			RGE_MAC_CLRBIT(sc, 0xc0c2, 0x0040);
11934bf8ce03SAdrian Chadd 		}
11944bf8ce03SAdrian Chadd 	}
11954bf8ce03SAdrian Chadd 
11964bf8ce03SAdrian Chadd 	val = rge_read_mac_ocp(sc, 0xe614);
11974bf8ce03SAdrian Chadd 	val &= (sc->rge_type == MAC_R27) ? ~0x0f00 : ~0x0700;
11984bf8ce03SAdrian Chadd 	if (sc->rge_type == MAC_R25 || sc->rge_type == MAC_R25D)
11994bf8ce03SAdrian Chadd 		rge_write_mac_ocp(sc, 0xe614, val | 0x0300);
12004bf8ce03SAdrian Chadd 	else if (sc->rge_type == MAC_R25B)
12014bf8ce03SAdrian Chadd 		rge_write_mac_ocp(sc, 0xe614, val | 0x0200);
12024bf8ce03SAdrian Chadd 	else if (sc->rge_type == MAC_R26)
12034bf8ce03SAdrian Chadd 		rge_write_mac_ocp(sc, 0xe614, val | 0x0300);
12044bf8ce03SAdrian Chadd 	else
12054bf8ce03SAdrian Chadd 		rge_write_mac_ocp(sc, 0xe614, val | 0x0f00);
12064bf8ce03SAdrian Chadd 
12074bf8ce03SAdrian Chadd 	val = rge_read_mac_ocp(sc, 0xe63e) & ~0x0c00;
12084bf8ce03SAdrian Chadd 	rge_write_mac_ocp(sc, 0xe63e, val |
12094bf8ce03SAdrian Chadd 	    ((fls(sc->sc_nqueues) - 1) & 0x03) << 10);
12104bf8ce03SAdrian Chadd 
12114bf8ce03SAdrian Chadd 	val = rge_read_mac_ocp(sc, 0xe63e) & ~0x0030;
12124bf8ce03SAdrian Chadd 	rge_write_mac_ocp(sc, 0xe63e, val | 0x0020);
12134bf8ce03SAdrian Chadd 
12144bf8ce03SAdrian Chadd 	RGE_MAC_CLRBIT(sc, 0xc0b4, 0x0001);
12154bf8ce03SAdrian Chadd 	RGE_MAC_SETBIT(sc, 0xc0b4, 0x0001);
12164bf8ce03SAdrian Chadd 
12174bf8ce03SAdrian Chadd 	RGE_MAC_SETBIT(sc, 0xc0b4, 0x000c);
12184bf8ce03SAdrian Chadd 
12194bf8ce03SAdrian Chadd 	val = rge_read_mac_ocp(sc, 0xeb6a) & ~0x00ff;
12204bf8ce03SAdrian Chadd 	rge_write_mac_ocp(sc, 0xeb6a, val | 0x0033);
12214bf8ce03SAdrian Chadd 
12224bf8ce03SAdrian Chadd 	val = rge_read_mac_ocp(sc, 0xeb50) & ~0x03e0;
12234bf8ce03SAdrian Chadd 	rge_write_mac_ocp(sc, 0xeb50, val | 0x0040);
12244bf8ce03SAdrian Chadd 
12254bf8ce03SAdrian Chadd 	RGE_MAC_CLRBIT(sc, 0xe056, 0x00f0);
12264bf8ce03SAdrian Chadd 
12274bf8ce03SAdrian Chadd 	RGE_WRITE_1(sc, RGE_TDFNR, 0x10);
12284bf8ce03SAdrian Chadd 
12294bf8ce03SAdrian Chadd 	RGE_MAC_CLRBIT(sc, 0xe040, 0x1000);
12304bf8ce03SAdrian Chadd 
12314bf8ce03SAdrian Chadd 	val = rge_read_mac_ocp(sc, 0xea1c) & ~0x0003;
12324bf8ce03SAdrian Chadd 	rge_write_mac_ocp(sc, 0xea1c, val | 0x0001);
12334bf8ce03SAdrian Chadd 
12344bf8ce03SAdrian Chadd 	if (sc->rge_type == MAC_R25D)
12354bf8ce03SAdrian Chadd 		rge_write_mac_ocp(sc, 0xe0c0, 0x4403);
12364bf8ce03SAdrian Chadd 	else
12374bf8ce03SAdrian Chadd 		rge_write_mac_ocp(sc, 0xe0c0, 0x4000);
12384bf8ce03SAdrian Chadd 
12394bf8ce03SAdrian Chadd 	RGE_MAC_SETBIT(sc, 0xe052, 0x0060);
12404bf8ce03SAdrian Chadd 	RGE_MAC_CLRBIT(sc, 0xe052, 0x0088);
12414bf8ce03SAdrian Chadd 
12424bf8ce03SAdrian Chadd 	val = rge_read_mac_ocp(sc, 0xd430) & ~0x0fff;
12434bf8ce03SAdrian Chadd 	rge_write_mac_ocp(sc, 0xd430, val | 0x045f);
12444bf8ce03SAdrian Chadd 
12454bf8ce03SAdrian Chadd 	RGE_SETBIT_1(sc, RGE_DLLPR, RGE_DLLPR_PFM_EN | RGE_DLLPR_TX_10M_PS_EN);
12464bf8ce03SAdrian Chadd 
12474bf8ce03SAdrian Chadd 	if (sc->rge_type == MAC_R25)
12484bf8ce03SAdrian Chadd 		RGE_SETBIT_1(sc, RGE_MCUCMD, 0x01);
12494bf8ce03SAdrian Chadd 
12504bf8ce03SAdrian Chadd 	if (sc->rge_type != MAC_R25D) {
12514bf8ce03SAdrian Chadd 		/* Disable EEE plus. */
12524bf8ce03SAdrian Chadd 		RGE_MAC_CLRBIT(sc, 0xe080, 0x0002);
12534bf8ce03SAdrian Chadd 	}
12544bf8ce03SAdrian Chadd 
12554bf8ce03SAdrian Chadd 	if (sc->rge_type == MAC_R26 || sc->rge_type == MAC_R27)
12564bf8ce03SAdrian Chadd 		RGE_MAC_CLRBIT(sc, 0xea1c, 0x0304);
12574bf8ce03SAdrian Chadd 	else
12584bf8ce03SAdrian Chadd 		RGE_MAC_CLRBIT(sc, 0xea1c, 0x0004);
12594bf8ce03SAdrian Chadd 
12604bf8ce03SAdrian Chadd 	/* Clear tcam entries. */
12614bf8ce03SAdrian Chadd 	RGE_MAC_SETBIT(sc, 0xeb54, 0x0001);
12624bf8ce03SAdrian Chadd 	DELAY(1);
12634bf8ce03SAdrian Chadd 	RGE_MAC_CLRBIT(sc, 0xeb54, 0x0001);
12644bf8ce03SAdrian Chadd 
12654bf8ce03SAdrian Chadd 	RGE_CLRBIT_2(sc, 0x1880, 0x0030);
12664bf8ce03SAdrian Chadd 
12674bf8ce03SAdrian Chadd 	if (sc->rge_type == MAC_R27) {
12684bf8ce03SAdrian Chadd 		val = rge_read_mac_ocp(sc, 0xd40c) & ~0xe038;
12694bf8ce03SAdrian Chadd 		rge_write_phy_ocp(sc, 0xd40c, val | 0x8020);
12704bf8ce03SAdrian Chadd 	}
12714bf8ce03SAdrian Chadd 
12724bf8ce03SAdrian Chadd 	/* Config interrupt type. */
12734bf8ce03SAdrian Chadd 	if (sc->rge_type == MAC_R27)
12744bf8ce03SAdrian Chadd 		RGE_CLRBIT_1(sc, RGE_INT_CFG0, RGE_INT_CFG0_AVOID_MISS_INTR);
12754bf8ce03SAdrian Chadd 	else if (sc->rge_type != MAC_R25)
12764bf8ce03SAdrian Chadd 		RGE_CLRBIT_1(sc, RGE_INT_CFG0, RGE_INT_CFG0_EN);
12774bf8ce03SAdrian Chadd 
12784bf8ce03SAdrian Chadd 	/* Clear timer interrupts. */
12794bf8ce03SAdrian Chadd 	RGE_WRITE_4(sc, RGE_TIMERINT0, 0);
12804bf8ce03SAdrian Chadd 	RGE_WRITE_4(sc, RGE_TIMERINT1, 0);
12814bf8ce03SAdrian Chadd 	RGE_WRITE_4(sc, RGE_TIMERINT2, 0);
12824bf8ce03SAdrian Chadd 	RGE_WRITE_4(sc, RGE_TIMERINT3, 0);
12834bf8ce03SAdrian Chadd 
12844bf8ce03SAdrian Chadd 	num_miti =
12854bf8ce03SAdrian Chadd 	    (sc->rge_type == MAC_R25B || sc->rge_type == MAC_R26) ? 32 : 64;
12864bf8ce03SAdrian Chadd 	/* Clear interrupt moderation timer. */
12874bf8ce03SAdrian Chadd 	for (i = 0; i < num_miti; i++)
12884bf8ce03SAdrian Chadd 		RGE_WRITE_4(sc, RGE_INTMITI(i), 0);
12894bf8ce03SAdrian Chadd 
12904bf8ce03SAdrian Chadd 	if (sc->rge_type == MAC_R26) {
12914bf8ce03SAdrian Chadd 		RGE_CLRBIT_1(sc, RGE_INT_CFG0,
12924bf8ce03SAdrian Chadd 		    RGE_INT_CFG0_TIMEOUT_BYPASS | RGE_INT_CFG0_RDU_BYPASS_8126 |
12934bf8ce03SAdrian Chadd 		    RGE_INT_CFG0_MITIGATION_BYPASS);
12944bf8ce03SAdrian Chadd 		RGE_WRITE_2(sc, RGE_INT_CFG1, 0);
12954bf8ce03SAdrian Chadd 	}
12964bf8ce03SAdrian Chadd 
12974bf8ce03SAdrian Chadd 	RGE_MAC_SETBIT(sc, 0xc0ac, 0x1f80);
12984bf8ce03SAdrian Chadd 
12994bf8ce03SAdrian Chadd 	rge_write_mac_ocp(sc, 0xe098, 0xc302);
13004bf8ce03SAdrian Chadd 
13014bf8ce03SAdrian Chadd 	RGE_MAC_CLRBIT(sc, 0xe032, 0x0003);
13024bf8ce03SAdrian Chadd 	val = rge_read_csi(sc, 0x98) & ~0x0000ff00;
13034bf8ce03SAdrian Chadd 	rge_write_csi(sc, 0x98, val);
13044bf8ce03SAdrian Chadd 
13054bf8ce03SAdrian Chadd 	if (sc->rge_type == MAC_R25D) {
13064bf8ce03SAdrian Chadd 		val = rge_read_mac_ocp(sc, 0xe092) & ~0x00ff;
13074bf8ce03SAdrian Chadd 		rge_write_mac_ocp(sc, 0xe092, val | 0x0008);
13084bf8ce03SAdrian Chadd 	} else
13094bf8ce03SAdrian Chadd 		RGE_MAC_CLRBIT(sc, 0xe092, 0x00ff);
13104bf8ce03SAdrian Chadd 
13114bf8ce03SAdrian Chadd 	/* Enable/disable HW VLAN tagging based on enabled capability */
13124bf8ce03SAdrian Chadd 	if ((if_getcapabilities(sc->sc_ifp) & IFCAP_VLAN_HWTAGGING) != 0)
13134bf8ce03SAdrian Chadd 		RGE_SETBIT_4(sc, RGE_RXCFG, RGE_RXCFG_VLANSTRIP);
13144bf8ce03SAdrian Chadd 	else
13154bf8ce03SAdrian Chadd 		RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_VLANSTRIP);
13164bf8ce03SAdrian Chadd 
13174bf8ce03SAdrian Chadd 	/* Enable/disable RX checksum based on enabled capability */
13184bf8ce03SAdrian Chadd 	if ((if_getcapenable(sc->sc_ifp) & IFCAP_RXCSUM) != 0)
13194bf8ce03SAdrian Chadd 		RGE_SETBIT_2(sc, RGE_CPLUSCMD, RGE_CPLUSCMD_RXCSUM);
13204bf8ce03SAdrian Chadd 	else
13214bf8ce03SAdrian Chadd 		RGE_CLRBIT_2(sc, RGE_CPLUSCMD, RGE_CPLUSCMD_RXCSUM);
13224bf8ce03SAdrian Chadd 	RGE_READ_2(sc, RGE_CPLUSCMD);
13234bf8ce03SAdrian Chadd 
13244bf8ce03SAdrian Chadd 	/* Set Maximum frame size. */
13254bf8ce03SAdrian Chadd 	RGE_WRITE_2(sc, RGE_RXMAXSIZE, RGE_JUMBO_FRAMELEN);
13264bf8ce03SAdrian Chadd 
13274bf8ce03SAdrian Chadd 	/* Disable RXDV gate. */
13284bf8ce03SAdrian Chadd 	RGE_CLRBIT_1(sc, RGE_PPSW, 0x08);
13294bf8ce03SAdrian Chadd 	DELAY(2000);
13304bf8ce03SAdrian Chadd 
13314bf8ce03SAdrian Chadd 	/* Program promiscuous mode and multicast filters. */
13324bf8ce03SAdrian Chadd 	rge_iff_locked(sc);
13334bf8ce03SAdrian Chadd 
13344bf8ce03SAdrian Chadd 	if (sc->rge_type == MAC_R27)
13354bf8ce03SAdrian Chadd 		RGE_CLRBIT_1(sc, RGE_RADMFIFO_PROTECT, 0x2001);
13364bf8ce03SAdrian Chadd 
13374bf8ce03SAdrian Chadd 	rge_disable_aspm_clkreq(sc);
13384bf8ce03SAdrian Chadd 
13394bf8ce03SAdrian Chadd 	RGE_CLRBIT_1(sc, RGE_EECMD, RGE_EECMD_WRITECFG);
13404bf8ce03SAdrian Chadd 	DELAY(10);
13414bf8ce03SAdrian Chadd 
13424bf8ce03SAdrian Chadd 	rge_ifmedia_upd(sc->sc_ifp);
13434bf8ce03SAdrian Chadd 
13444bf8ce03SAdrian Chadd 	/* Enable transmit and receive. */
13454bf8ce03SAdrian Chadd 	RGE_WRITE_1(sc, RGE_CMD, RGE_CMD_TXENB | RGE_CMD_RXENB);
13464bf8ce03SAdrian Chadd 
13474bf8ce03SAdrian Chadd 	/* Enable interrupts. */
13484bf8ce03SAdrian Chadd 	rge_setup_intr(sc, RGE_IMTYPE_SIM);
13494bf8ce03SAdrian Chadd 
13504bf8ce03SAdrian Chadd 	if_setdrvflagbits(sc->sc_ifp, IFF_DRV_RUNNING, 0);
13514bf8ce03SAdrian Chadd 	if_setdrvflagbits(sc->sc_ifp, 0, IFF_DRV_OACTIVE);
13524bf8ce03SAdrian Chadd 
13534bf8ce03SAdrian Chadd 	callout_reset(&sc->sc_timeout, hz, rge_tick, sc);
13544bf8ce03SAdrian Chadd 
13554bf8ce03SAdrian Chadd 	RGE_DPRINTF(sc, RGE_DEBUG_INIT, "%s: init completed!\n", __func__);
13564bf8ce03SAdrian Chadd 
13574bf8ce03SAdrian Chadd 	/* Unblock transmit when we release the lock */
13584bf8ce03SAdrian Chadd 	sc->sc_stopped = false;
13594bf8ce03SAdrian Chadd }
13604bf8ce03SAdrian Chadd 
13614bf8ce03SAdrian Chadd /*
13624bf8ce03SAdrian Chadd  * @brief Stop the adapter and free any mbufs allocated to the RX and TX lists.
13634bf8ce03SAdrian Chadd  *
13644bf8ce03SAdrian Chadd  * Must be called with the driver lock held.
13654bf8ce03SAdrian Chadd  */
13664bf8ce03SAdrian Chadd void
rge_stop_locked(struct rge_softc * sc)13674bf8ce03SAdrian Chadd rge_stop_locked(struct rge_softc *sc)
13684bf8ce03SAdrian Chadd {
13694bf8ce03SAdrian Chadd 	struct rge_queues *q = sc->sc_queues;
13704bf8ce03SAdrian Chadd 	int i;
13714bf8ce03SAdrian Chadd 
13724bf8ce03SAdrian Chadd 	RGE_ASSERT_LOCKED(sc);
13734bf8ce03SAdrian Chadd 
13744bf8ce03SAdrian Chadd 	RGE_DPRINTF(sc, RGE_DEBUG_INIT, "%s: called!\n", __func__);
13754bf8ce03SAdrian Chadd 
13764bf8ce03SAdrian Chadd 	callout_stop(&sc->sc_timeout);
13774bf8ce03SAdrian Chadd 
13784bf8ce03SAdrian Chadd 	/* Stop pending TX submissions */
13794bf8ce03SAdrian Chadd 	sc->sc_stopped = true;
13804bf8ce03SAdrian Chadd 
13814bf8ce03SAdrian Chadd 	if_setdrvflagbits(sc->sc_ifp, 0, IFF_DRV_RUNNING);
13824bf8ce03SAdrian Chadd 	sc->rge_timerintr = 0;
13834bf8ce03SAdrian Chadd 	sc->sc_watchdog = 0;
13844bf8ce03SAdrian Chadd 
13854bf8ce03SAdrian Chadd 	RGE_CLRBIT_4(sc, RGE_RXCFG, RGE_RXCFG_ALLPHYS | RGE_RXCFG_INDIV |
13864bf8ce03SAdrian Chadd 	    RGE_RXCFG_MULTI | RGE_RXCFG_BROAD | RGE_RXCFG_RUNT |
13874bf8ce03SAdrian Chadd 	    RGE_RXCFG_ERRPKT);
13884bf8ce03SAdrian Chadd 
13894bf8ce03SAdrian Chadd 	rge_hw_reset(sc);
13904bf8ce03SAdrian Chadd 
13914bf8ce03SAdrian Chadd 	RGE_MAC_CLRBIT(sc, 0xc0ac, 0x1f80);
13924bf8ce03SAdrian Chadd 
13934bf8ce03SAdrian Chadd 	if_setdrvflagbits(sc->sc_ifp, 0, IFF_DRV_OACTIVE);
13944bf8ce03SAdrian Chadd 
13954bf8ce03SAdrian Chadd 	if (q->q_rx.rge_head != NULL) {
13964bf8ce03SAdrian Chadd 		m_freem(q->q_rx.rge_head);
13974bf8ce03SAdrian Chadd 		q->q_rx.rge_head = NULL;
13984bf8ce03SAdrian Chadd 		q->q_rx.rge_tail = &q->q_rx.rge_head;
13994bf8ce03SAdrian Chadd 	}
14004bf8ce03SAdrian Chadd 
14014bf8ce03SAdrian Chadd 	/* Free the TX list buffers. */
14024bf8ce03SAdrian Chadd 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
14034bf8ce03SAdrian Chadd 		if (q->q_tx.rge_txq[i].txq_mbuf != NULL) {
14044bf8ce03SAdrian Chadd 			bus_dmamap_unload(sc->sc_dmat_tx_buf,
14054bf8ce03SAdrian Chadd 			    q->q_tx.rge_txq[i].txq_dmamap);
14064bf8ce03SAdrian Chadd 			m_freem(q->q_tx.rge_txq[i].txq_mbuf);
14074bf8ce03SAdrian Chadd 			q->q_tx.rge_txq[i].txq_mbuf = NULL;
14084bf8ce03SAdrian Chadd 		}
14094bf8ce03SAdrian Chadd 	}
14104bf8ce03SAdrian Chadd 
14114bf8ce03SAdrian Chadd 	/* Free the RX list buffers. */
14124bf8ce03SAdrian Chadd 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
14134bf8ce03SAdrian Chadd 		if (q->q_rx.rge_rxq[i].rxq_mbuf != NULL) {
14144bf8ce03SAdrian Chadd 			bus_dmamap_unload(sc->sc_dmat_rx_buf,
14154bf8ce03SAdrian Chadd 			    q->q_rx.rge_rxq[i].rxq_dmamap);
14164bf8ce03SAdrian Chadd 			m_freem(q->q_rx.rge_rxq[i].rxq_mbuf);
14174bf8ce03SAdrian Chadd 			q->q_rx.rge_rxq[i].rxq_mbuf = NULL;
14184bf8ce03SAdrian Chadd 		}
14194bf8ce03SAdrian Chadd 	}
14204bf8ce03SAdrian Chadd 
14214bf8ce03SAdrian Chadd 	/* Free pending TX frames */
14224bf8ce03SAdrian Chadd 	/* TODO: should be per TX queue */
14234bf8ce03SAdrian Chadd 	rge_txq_flush_mbufs(sc);
14244bf8ce03SAdrian Chadd }
14254bf8ce03SAdrian Chadd 
14264bf8ce03SAdrian Chadd /*
14274bf8ce03SAdrian Chadd  * Set media options.
14284bf8ce03SAdrian Chadd  */
14294bf8ce03SAdrian Chadd static int
rge_ifmedia_upd(if_t ifp)14304bf8ce03SAdrian Chadd rge_ifmedia_upd(if_t ifp)
14314bf8ce03SAdrian Chadd {
14324bf8ce03SAdrian Chadd 	struct rge_softc *sc = if_getsoftc(ifp);
14334bf8ce03SAdrian Chadd 	struct ifmedia *ifm = &sc->sc_media;
14344bf8ce03SAdrian Chadd 	int anar, gig, val;
14354bf8ce03SAdrian Chadd 
14364bf8ce03SAdrian Chadd 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
14374bf8ce03SAdrian Chadd 		return (EINVAL);
14384bf8ce03SAdrian Chadd 
14394bf8ce03SAdrian Chadd 	/* Disable Gigabit Lite. */
14404bf8ce03SAdrian Chadd 	RGE_PHY_CLRBIT(sc, 0xa428, 0x0200);
14414bf8ce03SAdrian Chadd 	RGE_PHY_CLRBIT(sc, 0xa5ea, 0x0001);
14424bf8ce03SAdrian Chadd 	if (sc->rge_type == MAC_R26 || sc->rge_type == MAC_R27)
14434bf8ce03SAdrian Chadd 		RGE_PHY_CLRBIT(sc, 0xa5ea, 0x0007);
14444bf8ce03SAdrian Chadd 
14454bf8ce03SAdrian Chadd 	val = rge_read_phy_ocp(sc, 0xa5d4);
14464bf8ce03SAdrian Chadd 	switch (sc->rge_type) {
14474bf8ce03SAdrian Chadd 	case MAC_R27:
14484bf8ce03SAdrian Chadd 		val &= ~RGE_ADV_10000TFDX;
14494bf8ce03SAdrian Chadd 		/* fallthrough */
14504bf8ce03SAdrian Chadd 	case MAC_R26:
14514bf8ce03SAdrian Chadd 		val &= ~RGE_ADV_5000TFDX;
14524bf8ce03SAdrian Chadd 		/* fallthrough */
14534bf8ce03SAdrian Chadd 	default:
14544bf8ce03SAdrian Chadd 		val &= ~RGE_ADV_2500TFDX;
14554bf8ce03SAdrian Chadd 		break;
14564bf8ce03SAdrian Chadd 	}
14574bf8ce03SAdrian Chadd 
14584bf8ce03SAdrian Chadd 	anar = ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10;
14594bf8ce03SAdrian Chadd 	gig = GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX;
14604bf8ce03SAdrian Chadd 
14614bf8ce03SAdrian Chadd 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
14624bf8ce03SAdrian Chadd 	case IFM_AUTO:
14634bf8ce03SAdrian Chadd 		val |= RGE_ADV_2500TFDX;
14644bf8ce03SAdrian Chadd 		if (sc->rge_type == MAC_R26)
14654bf8ce03SAdrian Chadd 			val |= RGE_ADV_5000TFDX;
14664bf8ce03SAdrian Chadd 		else if (sc->rge_type == MAC_R27)
14674bf8ce03SAdrian Chadd 			val |= RGE_ADV_5000TFDX | RGE_ADV_10000TFDX;
14684bf8ce03SAdrian Chadd 		break;
14694bf8ce03SAdrian Chadd 	case IFM_10G_T:
14704bf8ce03SAdrian Chadd 		val |= RGE_ADV_10000TFDX;
14714bf8ce03SAdrian Chadd 		if_setbaudrate(ifp, IF_Gbps(10));
14724bf8ce03SAdrian Chadd 		break;
14734bf8ce03SAdrian Chadd 	case IFM_5000_T:
14744bf8ce03SAdrian Chadd 		val |= RGE_ADV_5000TFDX;
14754bf8ce03SAdrian Chadd 		if_setbaudrate(ifp, IF_Gbps(5));
14764bf8ce03SAdrian Chadd 		break;
14774bf8ce03SAdrian Chadd 	case IFM_2500_T:
14784bf8ce03SAdrian Chadd 		val |= RGE_ADV_2500TFDX;
14794bf8ce03SAdrian Chadd 		if_setbaudrate(ifp, IF_Mbps(2500));
14804bf8ce03SAdrian Chadd 		break;
14814bf8ce03SAdrian Chadd 	case IFM_1000_T:
14824bf8ce03SAdrian Chadd 		if_setbaudrate(ifp, IF_Gbps(1));
14834bf8ce03SAdrian Chadd 		break;
14844bf8ce03SAdrian Chadd 	case IFM_100_TX:
14854bf8ce03SAdrian Chadd 		gig = rge_read_phy(sc, 0, MII_100T2CR) &
14864bf8ce03SAdrian Chadd 		    ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX);
14874bf8ce03SAdrian Chadd 		anar = ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) ?
14884bf8ce03SAdrian Chadd 		    ANAR_TX | ANAR_TX_FD | ANAR_10_FD | ANAR_10 :
14894bf8ce03SAdrian Chadd 		    ANAR_TX | ANAR_10_FD | ANAR_10;
14904bf8ce03SAdrian Chadd 		if_setbaudrate(ifp, IF_Mbps(100));
14914bf8ce03SAdrian Chadd 		break;
14924bf8ce03SAdrian Chadd 	case IFM_10_T:
14934bf8ce03SAdrian Chadd 		gig = rge_read_phy(sc, 0, MII_100T2CR) &
14944bf8ce03SAdrian Chadd 		    ~(GTCR_ADV_1000TFDX | GTCR_ADV_1000THDX);
14954bf8ce03SAdrian Chadd 		anar = ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) ?
14964bf8ce03SAdrian Chadd 		    ANAR_10_FD | ANAR_10 : ANAR_10;
14974bf8ce03SAdrian Chadd 		if_setbaudrate(ifp, IF_Mbps(10));
14984bf8ce03SAdrian Chadd 		break;
14994bf8ce03SAdrian Chadd 	default:
15004bf8ce03SAdrian Chadd 		RGE_PRINT_ERROR(sc, "unsupported media type\n");
15014bf8ce03SAdrian Chadd 		return (EINVAL);
15024bf8ce03SAdrian Chadd 	}
15034bf8ce03SAdrian Chadd 
15044bf8ce03SAdrian Chadd 	rge_write_phy(sc, 0, MII_ANAR, anar | ANAR_PAUSE_ASYM | ANAR_FC);
15054bf8ce03SAdrian Chadd 	rge_write_phy(sc, 0, MII_100T2CR, gig);
15064bf8ce03SAdrian Chadd 	rge_write_phy_ocp(sc, 0xa5d4, val);
15074bf8ce03SAdrian Chadd 	rge_write_phy(sc, 0, MII_BMCR, BMCR_RESET | BMCR_AUTOEN |
15084bf8ce03SAdrian Chadd 	    BMCR_STARTNEG);
15094bf8ce03SAdrian Chadd 
15104bf8ce03SAdrian Chadd 	return (0);
15114bf8ce03SAdrian Chadd }
15124bf8ce03SAdrian Chadd 
15134bf8ce03SAdrian Chadd /*
15144bf8ce03SAdrian Chadd  * Report current media status.
15154bf8ce03SAdrian Chadd  */
15164bf8ce03SAdrian Chadd static void
rge_ifmedia_sts(if_t ifp,struct ifmediareq * ifmr)15174bf8ce03SAdrian Chadd rge_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
15184bf8ce03SAdrian Chadd {
15194bf8ce03SAdrian Chadd 	struct rge_softc *sc = if_getsoftc(ifp);
15204bf8ce03SAdrian Chadd 	uint16_t status = 0;
15214bf8ce03SAdrian Chadd 
15224bf8ce03SAdrian Chadd 	ifmr->ifm_status = IFM_AVALID;
15234bf8ce03SAdrian Chadd 	ifmr->ifm_active = IFM_ETHER;
15244bf8ce03SAdrian Chadd 
15254bf8ce03SAdrian Chadd 	if (rge_get_link_status(sc)) {
15264bf8ce03SAdrian Chadd 		ifmr->ifm_status |= IFM_ACTIVE;
15274bf8ce03SAdrian Chadd 
15284bf8ce03SAdrian Chadd 		status = RGE_READ_2(sc, RGE_PHYSTAT);
15294bf8ce03SAdrian Chadd 		if ((status & RGE_PHYSTAT_FDX) ||
15304bf8ce03SAdrian Chadd 		    (status & (RGE_PHYSTAT_1000MBPS | RGE_PHYSTAT_2500MBPS |
15314bf8ce03SAdrian Chadd 		    RGE_PHYSTAT_5000MBPS | RGE_PHYSTAT_10000MBPS)))
15324bf8ce03SAdrian Chadd 			ifmr->ifm_active |= IFM_FDX;
15334bf8ce03SAdrian Chadd 		else
15344bf8ce03SAdrian Chadd 			ifmr->ifm_active |= IFM_HDX;
15354bf8ce03SAdrian Chadd 
15364bf8ce03SAdrian Chadd 		if (status & RGE_PHYSTAT_10MBPS)
15374bf8ce03SAdrian Chadd 			ifmr->ifm_active |= IFM_10_T;
15384bf8ce03SAdrian Chadd 		else if (status & RGE_PHYSTAT_100MBPS)
15394bf8ce03SAdrian Chadd 			ifmr->ifm_active |= IFM_100_TX;
15404bf8ce03SAdrian Chadd 		else if (status & RGE_PHYSTAT_1000MBPS)
15414bf8ce03SAdrian Chadd 			ifmr->ifm_active |= IFM_1000_T;
15424bf8ce03SAdrian Chadd 		else if (status & RGE_PHYSTAT_2500MBPS)
15434bf8ce03SAdrian Chadd 			ifmr->ifm_active |= IFM_2500_T;
15444bf8ce03SAdrian Chadd 		else if (status & RGE_PHYSTAT_5000MBPS)
15454bf8ce03SAdrian Chadd 			ifmr->ifm_active |= IFM_5000_T;
15464bf8ce03SAdrian Chadd 		else if (status & RGE_PHYSTAT_5000MBPS)
15474bf8ce03SAdrian Chadd 			ifmr->ifm_active |= IFM_5000_T;
15484bf8ce03SAdrian Chadd 		else if (status & RGE_PHYSTAT_10000MBPS)
15494bf8ce03SAdrian Chadd 			ifmr->ifm_active |= IFM_10G_T;
15504bf8ce03SAdrian Chadd 	}
15514bf8ce03SAdrian Chadd }
15524bf8ce03SAdrian Chadd 
15534bf8ce03SAdrian Chadd /**
15544bf8ce03SAdrian Chadd  * @brief callback to load/populate a single physical address
15554bf8ce03SAdrian Chadd  */
15564bf8ce03SAdrian Chadd static void
rge_dma_load_cb(void * arg,bus_dma_segment_t * segs,int nsegs,int error)15574bf8ce03SAdrian Chadd rge_dma_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
15584bf8ce03SAdrian Chadd {
15594bf8ce03SAdrian Chadd 	bus_addr_t *paddr = (bus_addr_t *) arg;
15604bf8ce03SAdrian Chadd 
15614bf8ce03SAdrian Chadd 	*paddr = 0;
15624bf8ce03SAdrian Chadd 
15634bf8ce03SAdrian Chadd 	if (error) {
15644bf8ce03SAdrian Chadd 		printf("%s: error! (%d)\n", __func__, error);
15654bf8ce03SAdrian Chadd 		*paddr = 0;
15664bf8ce03SAdrian Chadd 		return;
15674bf8ce03SAdrian Chadd 	}
15684bf8ce03SAdrian Chadd 
15694bf8ce03SAdrian Chadd 	if (nsegs != 1) {
15704bf8ce03SAdrian Chadd 		printf("%s: too many segs (got %d)\n", __func__, nsegs);
15714bf8ce03SAdrian Chadd 		*paddr = 0;
15724bf8ce03SAdrian Chadd 		return;
15734bf8ce03SAdrian Chadd 	}
15744bf8ce03SAdrian Chadd 
15754bf8ce03SAdrian Chadd 	*paddr = segs[0].ds_addr;
15764bf8ce03SAdrian Chadd }
15774bf8ce03SAdrian Chadd 
15784bf8ce03SAdrian Chadd /**
15794bf8ce03SAdrian Chadd  * @brief Allocate memory for RX/TX rings.
15804bf8ce03SAdrian Chadd  *
15814bf8ce03SAdrian Chadd  * Called with the driver lock NOT held.
15824bf8ce03SAdrian Chadd  */
15834bf8ce03SAdrian Chadd static int
rge_allocmem(struct rge_softc * sc)15844bf8ce03SAdrian Chadd rge_allocmem(struct rge_softc *sc)
15854bf8ce03SAdrian Chadd {
15864bf8ce03SAdrian Chadd 	struct rge_queues *q = sc->sc_queues;
15874bf8ce03SAdrian Chadd 	int error;
15884bf8ce03SAdrian Chadd 	int i;
15894bf8ce03SAdrian Chadd 
15904bf8ce03SAdrian Chadd 	RGE_ASSERT_UNLOCKED(sc);
15914bf8ce03SAdrian Chadd 
15924bf8ce03SAdrian Chadd 	/* Allocate DMA'able memory for the TX ring. */
15934bf8ce03SAdrian Chadd 	error = bus_dmamem_alloc(sc->sc_dmat_tx_desc,
15944bf8ce03SAdrian Chadd 	    (void **) &q->q_tx.rge_tx_list,
15954bf8ce03SAdrian Chadd 	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
15964bf8ce03SAdrian Chadd 	    &q->q_tx.rge_tx_list_map);
15974bf8ce03SAdrian Chadd 	if (error) {
15984bf8ce03SAdrian Chadd 		RGE_PRINT_ERROR(sc, "%s: error (alloc tx_list.map) (%d)\n",
15994bf8ce03SAdrian Chadd 		    __func__, error);
16004bf8ce03SAdrian Chadd 		goto error;
16014bf8ce03SAdrian Chadd 	}
16024bf8ce03SAdrian Chadd 
16034bf8ce03SAdrian Chadd 	RGE_DPRINTF(sc, RGE_DEBUG_SETUP, "%s: tx_list=%p\n", __func__,
16044bf8ce03SAdrian Chadd 	    q->q_tx.rge_tx_list);
16054bf8ce03SAdrian Chadd 	RGE_DPRINTF(sc, RGE_DEBUG_SETUP, "%s: tx_list_map=%p\n", __func__,
16064bf8ce03SAdrian Chadd 	    q->q_tx.rge_tx_list_map);
16074bf8ce03SAdrian Chadd 
16084bf8ce03SAdrian Chadd 	/* Load the map for the TX ring. */
16094bf8ce03SAdrian Chadd 	error = bus_dmamap_load(sc->sc_dmat_tx_desc,
16104bf8ce03SAdrian Chadd 	    q->q_tx.rge_tx_list_map,
16114bf8ce03SAdrian Chadd 	    q->q_tx.rge_tx_list,
16124bf8ce03SAdrian Chadd 	    RGE_TX_LIST_SZ,
16134bf8ce03SAdrian Chadd 	    rge_dma_load_cb,
16144bf8ce03SAdrian Chadd 	    (void *) &q->q_tx.rge_tx_list_paddr,
16154bf8ce03SAdrian Chadd 	    BUS_DMA_NOWAIT);
16164bf8ce03SAdrian Chadd 
16174bf8ce03SAdrian Chadd 	if ((error != 0) || (q->q_tx.rge_tx_list_paddr == 0)) {
16184bf8ce03SAdrian Chadd 		RGE_PRINT_ERROR(sc, "%s: error (load tx_list.map) (%d)\n",
16194bf8ce03SAdrian Chadd 		    __func__, error);
16204bf8ce03SAdrian Chadd 		goto error;
16214bf8ce03SAdrian Chadd 	}
16224bf8ce03SAdrian Chadd 
16234bf8ce03SAdrian Chadd 	/* Create DMA maps for TX buffers. */
16244bf8ce03SAdrian Chadd 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
16254bf8ce03SAdrian Chadd 		error = bus_dmamap_create(sc->sc_dmat_tx_buf,
16264bf8ce03SAdrian Chadd 		    0, &q->q_tx.rge_txq[i].txq_dmamap);
16274bf8ce03SAdrian Chadd 		if (error) {
16284bf8ce03SAdrian Chadd 			RGE_PRINT_ERROR(sc,
16294bf8ce03SAdrian Chadd 			    "can't create DMA map for TX (%d)\n", error);
16304bf8ce03SAdrian Chadd 			goto error;
16314bf8ce03SAdrian Chadd 		}
16324bf8ce03SAdrian Chadd 	}
16334bf8ce03SAdrian Chadd 
16344bf8ce03SAdrian Chadd 	/* Allocate DMA'able memory for the RX ring. */
16354bf8ce03SAdrian Chadd 	error = bus_dmamem_alloc(sc->sc_dmat_rx_desc,
16364bf8ce03SAdrian Chadd 	    (void **) &q->q_rx.rge_rx_list,
16374bf8ce03SAdrian Chadd 	    BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
16384bf8ce03SAdrian Chadd 	    &q->q_rx.rge_rx_list_map);
16394bf8ce03SAdrian Chadd 	if (error) {
16404bf8ce03SAdrian Chadd 		RGE_PRINT_ERROR(sc, "%s: error (alloc rx_list.map) (%d)\n",
16414bf8ce03SAdrian Chadd 		    __func__, error);
16424bf8ce03SAdrian Chadd 		goto error;
16434bf8ce03SAdrian Chadd 	}
16444bf8ce03SAdrian Chadd 
16454bf8ce03SAdrian Chadd 	RGE_DPRINTF(sc, RGE_DEBUG_INIT, "%s: rx_list=%p\n", __func__,
16464bf8ce03SAdrian Chadd 	    q->q_rx.rge_rx_list);
16474bf8ce03SAdrian Chadd 	RGE_DPRINTF(sc, RGE_DEBUG_INIT, "%s: rx_list_map=%p\n", __func__,
16484bf8ce03SAdrian Chadd 	    q->q_rx.rge_rx_list_map);
16494bf8ce03SAdrian Chadd 
16504bf8ce03SAdrian Chadd 	/* Load the map for the RX ring. */
16514bf8ce03SAdrian Chadd 	error = bus_dmamap_load(sc->sc_dmat_rx_desc,
16524bf8ce03SAdrian Chadd 	    q->q_rx.rge_rx_list_map,
16534bf8ce03SAdrian Chadd 	    q->q_rx.rge_rx_list,
16544bf8ce03SAdrian Chadd 	    RGE_RX_LIST_SZ,
16554bf8ce03SAdrian Chadd 	    rge_dma_load_cb,
16564bf8ce03SAdrian Chadd 	    (void *) &q->q_rx.rge_rx_list_paddr,
16574bf8ce03SAdrian Chadd 	    BUS_DMA_NOWAIT);
16584bf8ce03SAdrian Chadd 
16594bf8ce03SAdrian Chadd 	if ((error != 0) || (q->q_rx.rge_rx_list_paddr == 0)) {
16604bf8ce03SAdrian Chadd 		RGE_PRINT_ERROR(sc, "%s: error (load rx_list.map) (%d)\n",
16614bf8ce03SAdrian Chadd 		    __func__, error);
16624bf8ce03SAdrian Chadd 		goto error;
16634bf8ce03SAdrian Chadd 	}
16644bf8ce03SAdrian Chadd 
16654bf8ce03SAdrian Chadd 	/* Create DMA maps for RX buffers. */
16664bf8ce03SAdrian Chadd 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
16674bf8ce03SAdrian Chadd 		error = bus_dmamap_create(sc->sc_dmat_rx_buf,
16684bf8ce03SAdrian Chadd 		    0, &q->q_rx.rge_rxq[i].rxq_dmamap);
16694bf8ce03SAdrian Chadd 		if (error) {
16704bf8ce03SAdrian Chadd 			RGE_PRINT_ERROR(sc,
16714bf8ce03SAdrian Chadd 			    "can't create DMA map for RX (%d)\n", error);
16724bf8ce03SAdrian Chadd 			goto error;
16734bf8ce03SAdrian Chadd 		}
16744bf8ce03SAdrian Chadd 	}
16754bf8ce03SAdrian Chadd 
16764bf8ce03SAdrian Chadd 	return (0);
16774bf8ce03SAdrian Chadd error:
16784bf8ce03SAdrian Chadd 
16794bf8ce03SAdrian Chadd 	rge_freemem(sc);
16804bf8ce03SAdrian Chadd 
16814bf8ce03SAdrian Chadd 	return (error);
16824bf8ce03SAdrian Chadd }
16834bf8ce03SAdrian Chadd 
16844bf8ce03SAdrian Chadd /**
16854bf8ce03SAdrian Chadd  * @brief Allocate memory for MAC stats.
16864bf8ce03SAdrian Chadd  *
16874bf8ce03SAdrian Chadd  * Called with the driver lock NOT held.
16884bf8ce03SAdrian Chadd  */
16894bf8ce03SAdrian Chadd static int
rge_alloc_stats_mem(struct rge_softc * sc)16904bf8ce03SAdrian Chadd rge_alloc_stats_mem(struct rge_softc *sc)
16914bf8ce03SAdrian Chadd {
16924bf8ce03SAdrian Chadd 	struct rge_mac_stats *ss = &sc->sc_mac_stats;
16934bf8ce03SAdrian Chadd 	int error;
16944bf8ce03SAdrian Chadd 
16954bf8ce03SAdrian Chadd 	RGE_ASSERT_UNLOCKED(sc);
16964bf8ce03SAdrian Chadd 
16974bf8ce03SAdrian Chadd 	/* Allocate DMA'able memory for the stats buffer. */
16984bf8ce03SAdrian Chadd 	error = bus_dmamem_alloc(sc->sc_dmat_stats_buf,
16994bf8ce03SAdrian Chadd 	    (void **) &ss->stats, BUS_DMA_WAITOK | BUS_DMA_ZERO,
17004bf8ce03SAdrian Chadd 	    &ss->map);
17014bf8ce03SAdrian Chadd 	if (error) {
17024bf8ce03SAdrian Chadd 		RGE_PRINT_ERROR(sc, "%s: error (alloc stats) (%d)\n",
17034bf8ce03SAdrian Chadd 		    __func__, error);
17044bf8ce03SAdrian Chadd 		goto error;
17054bf8ce03SAdrian Chadd 	}
17064bf8ce03SAdrian Chadd 
17074bf8ce03SAdrian Chadd 	RGE_DPRINTF(sc, RGE_DEBUG_SETUP, "%s: stats=%p\n", __func__, ss->stats);
17084bf8ce03SAdrian Chadd 	RGE_DPRINTF(sc, RGE_DEBUG_SETUP, "%s: map=%p\n", __func__, ss->map);
17094bf8ce03SAdrian Chadd 
17104bf8ce03SAdrian Chadd 	/* Load the map for the TX ring. */
17114bf8ce03SAdrian Chadd 	error = bus_dmamap_load(sc->sc_dmat_stats_buf,
17124bf8ce03SAdrian Chadd 	    ss->map,
17134bf8ce03SAdrian Chadd 	    ss->stats,
17144bf8ce03SAdrian Chadd 	    RGE_STATS_BUF_SIZE,
17154bf8ce03SAdrian Chadd 	    rge_dma_load_cb,
17164bf8ce03SAdrian Chadd 	    (void *) &ss->paddr,
17174bf8ce03SAdrian Chadd 	    BUS_DMA_NOWAIT);
17184bf8ce03SAdrian Chadd 
17194bf8ce03SAdrian Chadd 	if ((error != 0) || (ss->paddr == 0)) {
17204bf8ce03SAdrian Chadd 		RGE_PRINT_ERROR(sc, "%s: error (load stats.map) (%d)\n",
17214bf8ce03SAdrian Chadd 		    __func__, error);
17224bf8ce03SAdrian Chadd 		if (error == 0)
17234bf8ce03SAdrian Chadd 			error = ENXIO;
17244bf8ce03SAdrian Chadd 		goto error;
17254bf8ce03SAdrian Chadd 	}
17264bf8ce03SAdrian Chadd 
17274bf8ce03SAdrian Chadd 	return (0);
17284bf8ce03SAdrian Chadd 
17294bf8ce03SAdrian Chadd error:
17304bf8ce03SAdrian Chadd 	rge_free_stats_mem(sc);
17314bf8ce03SAdrian Chadd 
17324bf8ce03SAdrian Chadd 	return (error);
17334bf8ce03SAdrian Chadd }
17344bf8ce03SAdrian Chadd 
17354bf8ce03SAdrian Chadd 
17364bf8ce03SAdrian Chadd /**
17374bf8ce03SAdrian Chadd  * @brief Free the TX/RX DMA buffers and mbufs.
17384bf8ce03SAdrian Chadd  *
17394bf8ce03SAdrian Chadd  * Called with the driver lock NOT held.
17404bf8ce03SAdrian Chadd  */
17414bf8ce03SAdrian Chadd static int
rge_freemem(struct rge_softc * sc)17424bf8ce03SAdrian Chadd rge_freemem(struct rge_softc *sc)
17434bf8ce03SAdrian Chadd {
17444bf8ce03SAdrian Chadd 	struct rge_queues *q = sc->sc_queues;
17454bf8ce03SAdrian Chadd 	int i;
17464bf8ce03SAdrian Chadd 
17474bf8ce03SAdrian Chadd 	RGE_ASSERT_UNLOCKED(sc);
17484bf8ce03SAdrian Chadd 
17494bf8ce03SAdrian Chadd 	/* TX desc */
17504bf8ce03SAdrian Chadd 	bus_dmamap_unload(sc->sc_dmat_tx_desc, q->q_tx.rge_tx_list_map);
17514bf8ce03SAdrian Chadd 	if (q->q_tx.rge_tx_list != NULL)
17524bf8ce03SAdrian Chadd 		bus_dmamem_free(sc->sc_dmat_tx_desc, q->q_tx.rge_tx_list,
17534bf8ce03SAdrian Chadd 		    q->q_tx.rge_tx_list_map);
17544bf8ce03SAdrian Chadd 	memset(&q->q_tx, 0, sizeof(q->q_tx));
17554bf8ce03SAdrian Chadd 
17564bf8ce03SAdrian Chadd 	/* TX buf */
17574bf8ce03SAdrian Chadd 	for (i = 0; i < RGE_TX_LIST_CNT; i++) {
17584bf8ce03SAdrian Chadd 		struct rge_txq *tx = &q->q_tx.rge_txq[i];
17594bf8ce03SAdrian Chadd 
17604bf8ce03SAdrian Chadd 		/* unmap/free mbuf if it's still alloc'ed and mapped */
17614bf8ce03SAdrian Chadd 		if (tx->txq_mbuf != NULL) {
17624bf8ce03SAdrian Chadd 			static bool do_warning = false;
17634bf8ce03SAdrian Chadd 
17644bf8ce03SAdrian Chadd 			if (do_warning == false) {
17654bf8ce03SAdrian Chadd 				RGE_PRINT_ERROR(sc,
17664bf8ce03SAdrian Chadd 				    "%s: TX mbuf should've been freed!\n",
17674bf8ce03SAdrian Chadd 				    __func__);
17684bf8ce03SAdrian Chadd 				do_warning = true;
17694bf8ce03SAdrian Chadd 			}
17704bf8ce03SAdrian Chadd 			if (tx->txq_dmamap != NULL) {
17714bf8ce03SAdrian Chadd 				bus_dmamap_sync(sc->sc_dmat_tx_buf,
17724bf8ce03SAdrian Chadd 				    tx->txq_dmamap, BUS_DMASYNC_POSTREAD);
17734bf8ce03SAdrian Chadd 				bus_dmamap_unload(sc->sc_dmat_tx_buf,
17744bf8ce03SAdrian Chadd 				    tx->txq_dmamap);
17754bf8ce03SAdrian Chadd 			}
17764bf8ce03SAdrian Chadd 			m_free(tx->txq_mbuf);
17774bf8ce03SAdrian Chadd 			tx->txq_mbuf = NULL;
17784bf8ce03SAdrian Chadd 		}
17794bf8ce03SAdrian Chadd 
17804bf8ce03SAdrian Chadd 		/* Destroy the dmamap if it's allocated */
17814bf8ce03SAdrian Chadd 		if (tx->txq_dmamap != NULL) {
17824bf8ce03SAdrian Chadd 			bus_dmamap_destroy(sc->sc_dmat_tx_buf, tx->txq_dmamap);
17834bf8ce03SAdrian Chadd 			tx->txq_dmamap = NULL;
17844bf8ce03SAdrian Chadd 		}
17854bf8ce03SAdrian Chadd 	}
17864bf8ce03SAdrian Chadd 
17874bf8ce03SAdrian Chadd 	/* RX desc */
17884bf8ce03SAdrian Chadd 	bus_dmamap_unload(sc->sc_dmat_rx_desc, q->q_rx.rge_rx_list_map);
17894bf8ce03SAdrian Chadd 	if (q->q_rx.rge_rx_list != 0)
17904bf8ce03SAdrian Chadd 		bus_dmamem_free(sc->sc_dmat_rx_desc, q->q_rx.rge_rx_list,
17914bf8ce03SAdrian Chadd 		    q->q_rx.rge_rx_list_map);
17924bf8ce03SAdrian Chadd 	memset(&q->q_rx, 0, sizeof(q->q_tx));
17934bf8ce03SAdrian Chadd 
17944bf8ce03SAdrian Chadd 	/* RX buf */
17954bf8ce03SAdrian Chadd 	for (i = 0; i < RGE_RX_LIST_CNT; i++) {
17964bf8ce03SAdrian Chadd 		struct rge_rxq *rx = &q->q_rx.rge_rxq[i];
17974bf8ce03SAdrian Chadd 
17984bf8ce03SAdrian Chadd 		/* unmap/free mbuf if it's still alloc'ed and mapped */
17994bf8ce03SAdrian Chadd 		if (rx->rxq_mbuf != NULL) {
18004bf8ce03SAdrian Chadd 			if (rx->rxq_dmamap != NULL) {
18014bf8ce03SAdrian Chadd 				bus_dmamap_sync(sc->sc_dmat_rx_buf,
18024bf8ce03SAdrian Chadd 				    rx->rxq_dmamap, BUS_DMASYNC_POSTREAD);
18034bf8ce03SAdrian Chadd 				bus_dmamap_unload(sc->sc_dmat_rx_buf,
18044bf8ce03SAdrian Chadd 				    rx->rxq_dmamap);
18054bf8ce03SAdrian Chadd 			}
18064bf8ce03SAdrian Chadd 			m_free(rx->rxq_mbuf);
18074bf8ce03SAdrian Chadd 			rx->rxq_mbuf = NULL;
18084bf8ce03SAdrian Chadd 		}
18094bf8ce03SAdrian Chadd 
18104bf8ce03SAdrian Chadd 		/* Destroy the dmamap if it's allocated */
18114bf8ce03SAdrian Chadd 		if (rx->rxq_dmamap != NULL) {
18124bf8ce03SAdrian Chadd 			bus_dmamap_destroy(sc->sc_dmat_rx_buf, rx->rxq_dmamap);
18134bf8ce03SAdrian Chadd 			rx->rxq_dmamap = NULL;
18144bf8ce03SAdrian Chadd 		}
18154bf8ce03SAdrian Chadd 	}
18164bf8ce03SAdrian Chadd 
18174bf8ce03SAdrian Chadd 	return (0);
18184bf8ce03SAdrian Chadd }
18194bf8ce03SAdrian Chadd 
18204bf8ce03SAdrian Chadd /**
18214bf8ce03SAdrian Chadd  * @brief Free the stats memory.
18224bf8ce03SAdrian Chadd  *
18234bf8ce03SAdrian Chadd  * Called with the driver lock NOT held.
18244bf8ce03SAdrian Chadd  */
18254bf8ce03SAdrian Chadd static int
rge_free_stats_mem(struct rge_softc * sc)18264bf8ce03SAdrian Chadd rge_free_stats_mem(struct rge_softc *sc)
18274bf8ce03SAdrian Chadd {
18284bf8ce03SAdrian Chadd 	struct rge_mac_stats *ss = &sc->sc_mac_stats;
18294bf8ce03SAdrian Chadd 
18304bf8ce03SAdrian Chadd 	RGE_ASSERT_UNLOCKED(sc);
18314bf8ce03SAdrian Chadd 
18324bf8ce03SAdrian Chadd 	bus_dmamap_unload(sc->sc_dmat_stats_buf, ss->map);
18334bf8ce03SAdrian Chadd 	if (ss->stats != NULL)
18344bf8ce03SAdrian Chadd 		bus_dmamem_free(sc->sc_dmat_stats_buf, ss->stats, ss->map);
18354bf8ce03SAdrian Chadd 	memset(ss, 0, sizeof(*ss));
18364bf8ce03SAdrian Chadd 	return (0);
18374bf8ce03SAdrian Chadd }
18384bf8ce03SAdrian Chadd 
18394bf8ce03SAdrian Chadd static uint32_t
rx_ring_space(struct rge_queues * q)18404bf8ce03SAdrian Chadd rx_ring_space(struct rge_queues *q)
18414bf8ce03SAdrian Chadd {
18424bf8ce03SAdrian Chadd 	uint32_t prod, cons;
18434bf8ce03SAdrian Chadd 	uint32_t ret;
18444bf8ce03SAdrian Chadd 
18454bf8ce03SAdrian Chadd 	RGE_ASSERT_LOCKED(q->q_sc);
18464bf8ce03SAdrian Chadd 
18474bf8ce03SAdrian Chadd 	prod = q->q_rx.rge_rxq_prodidx;
18484bf8ce03SAdrian Chadd 	cons = q->q_rx.rge_rxq_considx;
18494bf8ce03SAdrian Chadd 
18504bf8ce03SAdrian Chadd 	ret = (cons + RGE_RX_LIST_CNT - prod - 1) % RGE_RX_LIST_CNT + 1;
18514bf8ce03SAdrian Chadd 
18524bf8ce03SAdrian Chadd 	if (ret > RGE_RX_LIST_CNT)
18534bf8ce03SAdrian Chadd 		return RGE_RX_LIST_CNT;
18544bf8ce03SAdrian Chadd 
18554bf8ce03SAdrian Chadd 	return (ret);
18564bf8ce03SAdrian Chadd }
18574bf8ce03SAdrian Chadd 
18584bf8ce03SAdrian Chadd /*
18594bf8ce03SAdrian Chadd  * Initialize the RX descriptor and attach an mbuf cluster at the given offset.
18604bf8ce03SAdrian Chadd  *
18614bf8ce03SAdrian Chadd  * Note: this relies on the rxr ring buffer abstraction to not
18624bf8ce03SAdrian Chadd  * over-fill the RX ring.  For FreeBSD we'll need to use the
18634bf8ce03SAdrian Chadd  * prod/cons RX indexes to know how much RX ring space to
18644bf8ce03SAdrian Chadd  * populate.
18654bf8ce03SAdrian Chadd  *
18664bf8ce03SAdrian Chadd  * This routine will increment the producer index if successful.
18674bf8ce03SAdrian Chadd  *
18684bf8ce03SAdrian Chadd  * This must be called with the driver lock held.
18694bf8ce03SAdrian Chadd  */
18704bf8ce03SAdrian Chadd static int
rge_newbuf(struct rge_queues * q)18714bf8ce03SAdrian Chadd rge_newbuf(struct rge_queues *q)
18724bf8ce03SAdrian Chadd {
18734bf8ce03SAdrian Chadd 	struct rge_softc *sc = q->q_sc;
18744bf8ce03SAdrian Chadd 	struct mbuf *m;
18754bf8ce03SAdrian Chadd 	struct rge_rx_desc *r;
18764bf8ce03SAdrian Chadd 	struct rge_rxq *rxq;
18774bf8ce03SAdrian Chadd 	bus_dmamap_t rxmap;
18784bf8ce03SAdrian Chadd 	bus_dma_segment_t seg[1];
18794bf8ce03SAdrian Chadd 	uint32_t cmdsts;
18804bf8ce03SAdrian Chadd 	int nsegs;
18814bf8ce03SAdrian Chadd 	uint32_t idx;
18824bf8ce03SAdrian Chadd 
18834bf8ce03SAdrian Chadd 	RGE_ASSERT_LOCKED(q->q_sc);
18844bf8ce03SAdrian Chadd 
18854bf8ce03SAdrian Chadd 	/*
18864bf8ce03SAdrian Chadd 	 * Verify we have enough space in the ring; error out
18874bf8ce03SAdrian Chadd 	 * if we do not.
18884bf8ce03SAdrian Chadd 	 */
18894bf8ce03SAdrian Chadd 	if (rx_ring_space(q) == 0)
18904bf8ce03SAdrian Chadd 		return (ENOBUFS);
18914bf8ce03SAdrian Chadd 
18924bf8ce03SAdrian Chadd 	idx = q->q_rx.rge_rxq_prodidx;
18934bf8ce03SAdrian Chadd 	rxq = &q->q_rx.rge_rxq[idx];
18944bf8ce03SAdrian Chadd 	rxmap = rxq->rxq_dmamap;
18954bf8ce03SAdrian Chadd 
18964bf8ce03SAdrian Chadd 	/*
18974bf8ce03SAdrian Chadd 	 * If we already have an mbuf here then something messed up;
18984bf8ce03SAdrian Chadd 	 * exit out as the hardware may be DMAing to it.
18994bf8ce03SAdrian Chadd 	 */
19004bf8ce03SAdrian Chadd 	if (rxq->rxq_mbuf != NULL) {
19014bf8ce03SAdrian Chadd 		RGE_PRINT_ERROR(sc,
19024bf8ce03SAdrian Chadd 		    "%s: RX ring slot %d already has an mbuf?\n", __func__,
19034bf8ce03SAdrian Chadd 		    idx);
19044bf8ce03SAdrian Chadd 		return (ENOBUFS);
19054bf8ce03SAdrian Chadd 	}
19064bf8ce03SAdrian Chadd 
19074bf8ce03SAdrian Chadd 	/* Allocate single buffer backed mbuf of MCLBYTES */
19084bf8ce03SAdrian Chadd 	m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
19094bf8ce03SAdrian Chadd 	if (m == NULL)
19104bf8ce03SAdrian Chadd 		return (ENOBUFS);
19114bf8ce03SAdrian Chadd 
19124bf8ce03SAdrian Chadd 	m->m_len = m->m_pkthdr.len = MCLBYTES;
19134bf8ce03SAdrian Chadd 
19144bf8ce03SAdrian Chadd 	nsegs = 1;
19154bf8ce03SAdrian Chadd 	if (bus_dmamap_load_mbuf_sg(sc->sc_dmat_rx_buf, rxmap, m, seg, &nsegs,
19164bf8ce03SAdrian Chadd 	    BUS_DMA_NOWAIT)) {
19174bf8ce03SAdrian Chadd 		m_freem(m);
19184bf8ce03SAdrian Chadd 		return (ENOBUFS);
19194bf8ce03SAdrian Chadd 	}
19204bf8ce03SAdrian Chadd 
19214bf8ce03SAdrian Chadd 	/*
19224bf8ce03SAdrian Chadd 	 * Make sure any changes made to the buffer have been flushed to host
19234bf8ce03SAdrian Chadd 	 * memory.
19244bf8ce03SAdrian Chadd 	 */
19254bf8ce03SAdrian Chadd 	bus_dmamap_sync(sc->sc_dmat_rx_buf, rxmap,
19264bf8ce03SAdrian Chadd 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
19274bf8ce03SAdrian Chadd 
19284bf8ce03SAdrian Chadd 	/*
19294bf8ce03SAdrian Chadd 	 * Map the segment into RX descriptors.  Note that this
19304bf8ce03SAdrian Chadd 	 * only currently supports a single segment per mbuf;
19314bf8ce03SAdrian Chadd 	 * the call to load_mbuf_sg above specified a single segment.
19324bf8ce03SAdrian Chadd 	 */
19334bf8ce03SAdrian Chadd 	r = &q->q_rx.rge_rx_list[idx];
19344bf8ce03SAdrian Chadd 
19354bf8ce03SAdrian Chadd 	rxq->rxq_mbuf = m;
19364bf8ce03SAdrian Chadd 
19374bf8ce03SAdrian Chadd 	cmdsts = seg[0].ds_len; /* XXX how big is this field in the descriptor? */
19384bf8ce03SAdrian Chadd 	if (idx == RGE_RX_LIST_CNT - 1)
19394bf8ce03SAdrian Chadd 		cmdsts |= RGE_RDCMDSTS_EOR;
19404bf8ce03SAdrian Chadd 
19414bf8ce03SAdrian Chadd 	/*
19424bf8ce03SAdrian Chadd 	 * Configure the DMA pointer and config, but don't hand
19434bf8ce03SAdrian Chadd 	 * it yet to the hardware.
19444bf8ce03SAdrian Chadd 	 */
19454bf8ce03SAdrian Chadd 	r->hi_qword1.rx_qword4.rge_cmdsts = htole32(cmdsts);
19464bf8ce03SAdrian Chadd 	r->hi_qword1.rx_qword4.rge_extsts = htole32(0);
19474bf8ce03SAdrian Chadd 	r->hi_qword0.rge_addr = htole64(seg[0].ds_addr);
19484bf8ce03SAdrian Chadd 	wmb();
19494bf8ce03SAdrian Chadd 
19504bf8ce03SAdrian Chadd 	/*
19514bf8ce03SAdrian Chadd 	 * Mark the specific descriptor slot as "this descriptor is now
19524bf8ce03SAdrian Chadd 	 * owned by the hardware", which when the hardware next sees
19534bf8ce03SAdrian Chadd 	 * this, it'll continue RX DMA.
19544bf8ce03SAdrian Chadd 	 */
19554bf8ce03SAdrian Chadd 	cmdsts |= RGE_RDCMDSTS_OWN;
19564bf8ce03SAdrian Chadd 	r->hi_qword1.rx_qword4.rge_cmdsts = htole32(cmdsts);
19574bf8ce03SAdrian Chadd 	wmb();
19584bf8ce03SAdrian Chadd 
19594bf8ce03SAdrian Chadd 	/*
19604bf8ce03SAdrian Chadd 	 * At this point the hope is the whole ring is now updated and
19614bf8ce03SAdrian Chadd 	 * consistent; if the hardware was waiting for a descriptor to be
19624bf8ce03SAdrian Chadd 	 * ready to write into then it should be ready here.
19634bf8ce03SAdrian Chadd 	 */
19644bf8ce03SAdrian Chadd 
19654bf8ce03SAdrian Chadd 	RGE_DPRINTF(sc, RGE_DEBUG_RECV_DESC,
19664bf8ce03SAdrian Chadd 	    "%s: [%d]: m=%p, m_data=%p, m_len=%ju, phys=0x%jx len %ju, "
19674bf8ce03SAdrian Chadd 	    "desc=0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
19684bf8ce03SAdrian Chadd 	    __func__,
19694bf8ce03SAdrian Chadd 	    idx,
19704bf8ce03SAdrian Chadd 	    m,
19714bf8ce03SAdrian Chadd 	    m->m_data,
19724bf8ce03SAdrian Chadd 	    (uintmax_t) m->m_len,
19734bf8ce03SAdrian Chadd 	    (uintmax_t) seg[0].ds_addr,
19744bf8ce03SAdrian Chadd 	    (uintmax_t) seg[0].ds_len,
19754bf8ce03SAdrian Chadd 	    ((uint32_t *) r)[0],
19764bf8ce03SAdrian Chadd 	    ((uint32_t *) r)[1],
19774bf8ce03SAdrian Chadd 	    ((uint32_t *) r)[2],
19784bf8ce03SAdrian Chadd 	    ((uint32_t *) r)[3],
19794bf8ce03SAdrian Chadd 	    ((uint32_t *) r)[4],
19804bf8ce03SAdrian Chadd 	    ((uint32_t *) r)[5],
19814bf8ce03SAdrian Chadd 	    ((uint32_t *) r)[6],
19824bf8ce03SAdrian Chadd 	    ((uint32_t *) r)[7]);
19834bf8ce03SAdrian Chadd 
19844bf8ce03SAdrian Chadd 	q->q_rx.rge_rxq_prodidx = RGE_NEXT_RX_DESC(idx);
19854bf8ce03SAdrian Chadd 
19864bf8ce03SAdrian Chadd 	return (0);
19874bf8ce03SAdrian Chadd }
19884bf8ce03SAdrian Chadd 
19894bf8ce03SAdrian Chadd static void
rge_rx_list_init(struct rge_queues * q)19904bf8ce03SAdrian Chadd rge_rx_list_init(struct rge_queues *q)
19914bf8ce03SAdrian Chadd {
19924bf8ce03SAdrian Chadd 	memset(q->q_rx.rge_rx_list, 0, RGE_RX_LIST_SZ);
19934bf8ce03SAdrian Chadd 
19944bf8ce03SAdrian Chadd 	RGE_ASSERT_LOCKED(q->q_sc);
19954bf8ce03SAdrian Chadd 
19964bf8ce03SAdrian Chadd 	q->q_rx.rge_rxq_prodidx = q->q_rx.rge_rxq_considx = 0;
19974bf8ce03SAdrian Chadd 	q->q_rx.rge_head = NULL;
19984bf8ce03SAdrian Chadd 	q->q_rx.rge_tail = &q->q_rx.rge_head;
19994bf8ce03SAdrian Chadd 
20004bf8ce03SAdrian Chadd 	RGE_DPRINTF(q->q_sc, RGE_DEBUG_SETUP, "%s: rx_list=%p\n", __func__,
20014bf8ce03SAdrian Chadd 	    q->q_rx.rge_rx_list);
20024bf8ce03SAdrian Chadd 
20034bf8ce03SAdrian Chadd 	rge_fill_rx_ring(q);
20044bf8ce03SAdrian Chadd }
20054bf8ce03SAdrian Chadd 
20064bf8ce03SAdrian Chadd /**
20074bf8ce03SAdrian Chadd  * @brief Fill / refill the RX ring as needed.
20084bf8ce03SAdrian Chadd  *
20094bf8ce03SAdrian Chadd  * Refill the RX ring with one less than the total descriptors needed.
20104bf8ce03SAdrian Chadd  * This makes the check in rge_rxeof() easier - it can just check
20114bf8ce03SAdrian Chadd  * descriptors from cons -> prod and bail once it hits prod.
20124bf8ce03SAdrian Chadd  * If the whole ring is filled then cons == prod, and that shortcut
20134bf8ce03SAdrian Chadd  * fails.
20144bf8ce03SAdrian Chadd  *
20154bf8ce03SAdrian Chadd  * This must be called with the driver lock held.
20164bf8ce03SAdrian Chadd  */
20174bf8ce03SAdrian Chadd static void
rge_fill_rx_ring(struct rge_queues * q)20184bf8ce03SAdrian Chadd rge_fill_rx_ring(struct rge_queues *q)
20194bf8ce03SAdrian Chadd {
20204bf8ce03SAdrian Chadd 	struct rge_softc *sc = q->q_sc;
20214bf8ce03SAdrian Chadd 	uint32_t count, i, prod, cons;
20224bf8ce03SAdrian Chadd 
20234bf8ce03SAdrian Chadd 	RGE_ASSERT_LOCKED(q->q_sc);
20244bf8ce03SAdrian Chadd 
20254bf8ce03SAdrian Chadd 	prod = q->q_rx.rge_rxq_prodidx;
20264bf8ce03SAdrian Chadd 	cons = q->q_rx.rge_rxq_considx;
20274bf8ce03SAdrian Chadd 	count = rx_ring_space(q);
20284bf8ce03SAdrian Chadd 
20294bf8ce03SAdrian Chadd 	/* Fill to count-1; bail if we don't have the space */
20304bf8ce03SAdrian Chadd 	if (count <= 1)
20314bf8ce03SAdrian Chadd 		return;
20324bf8ce03SAdrian Chadd 	count--;
20334bf8ce03SAdrian Chadd 
20344bf8ce03SAdrian Chadd 	RGE_DPRINTF(sc, RGE_DEBUG_RECV_DESC, "%s: prod=%u, cons=%u, space=%u\n",
20354bf8ce03SAdrian Chadd 	  __func__, prod, cons, count);
20364bf8ce03SAdrian Chadd 
20374bf8ce03SAdrian Chadd 	/* Make sure device->host changes are visible */
20384bf8ce03SAdrian Chadd 	bus_dmamap_sync(sc->sc_dmat_rx_desc, q->q_rx.rge_rx_list_map,
20394bf8ce03SAdrian Chadd 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
20404bf8ce03SAdrian Chadd 
20414bf8ce03SAdrian Chadd 	for (i = 0; i < count; i++) {
20424bf8ce03SAdrian Chadd 		if (rge_newbuf(q))
20434bf8ce03SAdrian Chadd 			break;
20444bf8ce03SAdrian Chadd 	}
20454bf8ce03SAdrian Chadd 
20464bf8ce03SAdrian Chadd 	/* Make changes visible to the device */
20474bf8ce03SAdrian Chadd 	bus_dmamap_sync(sc->sc_dmat_rx_desc, q->q_rx.rge_rx_list_map,
20484bf8ce03SAdrian Chadd 	    BUS_DMASYNC_PREWRITE);
20494bf8ce03SAdrian Chadd }
20504bf8ce03SAdrian Chadd 
20514bf8ce03SAdrian Chadd static void
rge_tx_list_init(struct rge_queues * q)20524bf8ce03SAdrian Chadd rge_tx_list_init(struct rge_queues *q)
20534bf8ce03SAdrian Chadd {
20544bf8ce03SAdrian Chadd 	struct rge_softc *sc = q->q_sc;
20554bf8ce03SAdrian Chadd 	struct rge_tx_desc *d;
20564bf8ce03SAdrian Chadd 	int i;
20574bf8ce03SAdrian Chadd 
20584bf8ce03SAdrian Chadd 	RGE_ASSERT_LOCKED(q->q_sc);
20594bf8ce03SAdrian Chadd 
20604bf8ce03SAdrian Chadd 	memset(q->q_tx.rge_tx_list, 0, RGE_TX_LIST_SZ);
20614bf8ce03SAdrian Chadd 
20624bf8ce03SAdrian Chadd 	for (i = 0; i < RGE_TX_LIST_CNT; i++)
20634bf8ce03SAdrian Chadd 		q->q_tx.rge_txq[i].txq_mbuf = NULL;
20644bf8ce03SAdrian Chadd 
20654bf8ce03SAdrian Chadd 	d = &q->q_tx.rge_tx_list[RGE_TX_LIST_CNT - 1];
20664bf8ce03SAdrian Chadd 	d->rge_cmdsts = htole32(RGE_TDCMDSTS_EOR);
20674bf8ce03SAdrian Chadd 
20684bf8ce03SAdrian Chadd 	bus_dmamap_sync(sc->sc_dmat_tx_desc, q->q_tx.rge_tx_list_map,
20694bf8ce03SAdrian Chadd 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
20704bf8ce03SAdrian Chadd 	wmb();
20714bf8ce03SAdrian Chadd 
20724bf8ce03SAdrian Chadd 	q->q_tx.rge_txq_prodidx = q->q_tx.rge_txq_considx = 0;
20734bf8ce03SAdrian Chadd 
20744bf8ce03SAdrian Chadd 	RGE_DPRINTF(sc, RGE_DEBUG_SETUP, "%s: rx_list=%p\n", __func__,
20754bf8ce03SAdrian Chadd 	    q->q_tx.rge_tx_list);
20764bf8ce03SAdrian Chadd }
20774bf8ce03SAdrian Chadd 
20784bf8ce03SAdrian Chadd int
rge_rxeof(struct rge_queues * q,struct mbufq * mq)20794bf8ce03SAdrian Chadd rge_rxeof(struct rge_queues *q, struct mbufq *mq)
20804bf8ce03SAdrian Chadd {
20814bf8ce03SAdrian Chadd 	struct rge_softc *sc = q->q_sc;
20824bf8ce03SAdrian Chadd 	struct mbuf *m;
20834bf8ce03SAdrian Chadd 	struct rge_rx_desc *cur_rx;
20844bf8ce03SAdrian Chadd 	struct rge_rxq *rxq;
20854bf8ce03SAdrian Chadd 	uint32_t rxstat, extsts;
20864bf8ce03SAdrian Chadd 	int i, mlen, rx = 0;
20874bf8ce03SAdrian Chadd 	int cons, prod;
20884bf8ce03SAdrian Chadd 	int maxpkt = 16; /* XXX TODO: make this a tunable */
20894bf8ce03SAdrian Chadd 	bool check_hwcsum;
20904bf8ce03SAdrian Chadd 
20914bf8ce03SAdrian Chadd 	check_hwcsum = ((if_getcapenable(sc->sc_ifp) & IFCAP_RXCSUM) != 0);
20924bf8ce03SAdrian Chadd 
20934bf8ce03SAdrian Chadd 	RGE_ASSERT_LOCKED(sc);
20944bf8ce03SAdrian Chadd 
20954bf8ce03SAdrian Chadd 	sc->sc_drv_stats.rxeof_cnt++;
20964bf8ce03SAdrian Chadd 
20974bf8ce03SAdrian Chadd 	RGE_DPRINTF(sc, RGE_DEBUG_INTR, "%s; called\n", __func__);
20984bf8ce03SAdrian Chadd 
20994bf8ce03SAdrian Chadd 	/* Note: if_re is POSTREAD/WRITE, rge is only POSTWRITE */
21004bf8ce03SAdrian Chadd 	bus_dmamap_sync(sc->sc_dmat_rx_desc, q->q_rx.rge_rx_list_map,
21014bf8ce03SAdrian Chadd 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
21024bf8ce03SAdrian Chadd 
21034bf8ce03SAdrian Chadd 	prod = q->q_rx.rge_rxq_prodidx;
21044bf8ce03SAdrian Chadd 
21054bf8ce03SAdrian Chadd 	/*
21064bf8ce03SAdrian Chadd 	 * Loop around until we've run out of active descriptors to check
21074bf8ce03SAdrian Chadd 	 * or maxpkt has been reached.
21084bf8ce03SAdrian Chadd 	 */
21094bf8ce03SAdrian Chadd 	for (i = cons = q->q_rx.rge_rxq_considx;
21104bf8ce03SAdrian Chadd 	    maxpkt > 0 && i != prod;
21114bf8ce03SAdrian Chadd 	    i = RGE_NEXT_RX_DESC(i)) {
21124bf8ce03SAdrian Chadd 		/* break out of loop if we're not running */
21134bf8ce03SAdrian Chadd 		if ((if_getdrvflags(sc->sc_ifp) & IFF_DRV_RUNNING) == 0)
21144bf8ce03SAdrian Chadd 			break;
21154bf8ce03SAdrian Chadd 
21164bf8ce03SAdrian Chadd 		/* get the current rx descriptor to check descriptor status */
21174bf8ce03SAdrian Chadd 		cur_rx = &q->q_rx.rge_rx_list[i];
21184bf8ce03SAdrian Chadd 		rxstat = le32toh(cur_rx->hi_qword1.rx_qword4.rge_cmdsts);
21194bf8ce03SAdrian Chadd 		if ((rxstat & RGE_RDCMDSTS_OWN) != 0) {
21204bf8ce03SAdrian Chadd 			break;
21214bf8ce03SAdrian Chadd 		}
21224bf8ce03SAdrian Chadd 
21234bf8ce03SAdrian Chadd 		/* Ensure everything else has been DMAed */
21244bf8ce03SAdrian Chadd 		rmb();
21254bf8ce03SAdrian Chadd 
21264bf8ce03SAdrian Chadd 		/* Get the current rx buffer, sync */
21274bf8ce03SAdrian Chadd 		rxq = &q->q_rx.rge_rxq[i];
21284bf8ce03SAdrian Chadd 
21294bf8ce03SAdrian Chadd 		/* Ensure any device updates are now visible in host memory */
21304bf8ce03SAdrian Chadd 		bus_dmamap_sync(sc->sc_dmat_rx_buf, rxq->rxq_dmamap,
21314bf8ce03SAdrian Chadd 		    BUS_DMASYNC_POSTREAD);
21324bf8ce03SAdrian Chadd 
21334bf8ce03SAdrian Chadd 		/* Unload the DMA map, we are done with it here */
21344bf8ce03SAdrian Chadd 		bus_dmamap_unload(sc->sc_dmat_rx_buf, rxq->rxq_dmamap);
21354bf8ce03SAdrian Chadd 		m = rxq->rxq_mbuf;
21364bf8ce03SAdrian Chadd 		rxq->rxq_mbuf = NULL;
21374bf8ce03SAdrian Chadd 
21384bf8ce03SAdrian Chadd 		rx = 1;
21394bf8ce03SAdrian Chadd 
21404bf8ce03SAdrian Chadd 		RGE_DPRINTF(sc, RGE_DEBUG_RECV_DESC,
21414bf8ce03SAdrian Chadd 		    "%s: RX: [%d]: m=%p, m_data=%p, m_len=%ju, "
21424bf8ce03SAdrian Chadd 		    "desc=0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
21434bf8ce03SAdrian Chadd 		    __func__,
21444bf8ce03SAdrian Chadd 		    i,
21454bf8ce03SAdrian Chadd 		    m,
21464bf8ce03SAdrian Chadd 		    m->m_data,
21474bf8ce03SAdrian Chadd 		    (uintmax_t) m->m_len,
21484bf8ce03SAdrian Chadd 		    ((uint32_t *) cur_rx)[0],
21494bf8ce03SAdrian Chadd 		    ((uint32_t *) cur_rx)[1],
21504bf8ce03SAdrian Chadd 		    ((uint32_t *) cur_rx)[2],
21514bf8ce03SAdrian Chadd 		    ((uint32_t *) cur_rx)[3],
21524bf8ce03SAdrian Chadd 		    ((uint32_t *) cur_rx)[4],
21534bf8ce03SAdrian Chadd 		    ((uint32_t *) cur_rx)[5],
21544bf8ce03SAdrian Chadd 		    ((uint32_t *) cur_rx)[6],
21554bf8ce03SAdrian Chadd 		    ((uint32_t *) cur_rx)[7]);
21564bf8ce03SAdrian Chadd 
21574bf8ce03SAdrian Chadd 		if ((rxstat & RGE_RDCMDSTS_SOF) != 0) {
21584bf8ce03SAdrian Chadd 			if (q->q_rx.rge_head != NULL) {
21594bf8ce03SAdrian Chadd 				sc->sc_drv_stats.rx_desc_err_multidesc++;
21604bf8ce03SAdrian Chadd 				if_inc_counter(sc->sc_ifp, IFCOUNTER_IERRORS,
21614bf8ce03SAdrian Chadd 				    1);
21624bf8ce03SAdrian Chadd 				m_freem(q->q_rx.rge_head);
21634bf8ce03SAdrian Chadd 				q->q_rx.rge_tail = &q->q_rx.rge_head;
21644bf8ce03SAdrian Chadd 			}
21654bf8ce03SAdrian Chadd 
21664bf8ce03SAdrian Chadd 			m->m_pkthdr.len = 0;
21674bf8ce03SAdrian Chadd 		} else if (q->q_rx.rge_head == NULL) {
21684bf8ce03SAdrian Chadd 			m_freem(m);
21694bf8ce03SAdrian Chadd 			continue;
21704bf8ce03SAdrian Chadd 		} else
21714bf8ce03SAdrian Chadd 			m->m_flags &= ~M_PKTHDR;
21724bf8ce03SAdrian Chadd 
21734bf8ce03SAdrian Chadd 		*q->q_rx.rge_tail = m;
21744bf8ce03SAdrian Chadd 		q->q_rx.rge_tail = &m->m_next;
21754bf8ce03SAdrian Chadd 
21764bf8ce03SAdrian Chadd 		mlen = rxstat & RGE_RDCMDSTS_FRAGLEN;
21774bf8ce03SAdrian Chadd 		m->m_len = mlen;
21784bf8ce03SAdrian Chadd 
21794bf8ce03SAdrian Chadd 		m = q->q_rx.rge_head;
21804bf8ce03SAdrian Chadd 		m->m_pkthdr.len += mlen;
21814bf8ce03SAdrian Chadd 
21824bf8ce03SAdrian Chadd 		/* Ethernet CRC error */
21834bf8ce03SAdrian Chadd 		if (rxstat & RGE_RDCMDSTS_RXERRSUM) {
21844bf8ce03SAdrian Chadd 			sc->sc_drv_stats.rx_ether_csum_err++;
21854bf8ce03SAdrian Chadd 			if_inc_counter(sc->sc_ifp, IFCOUNTER_IERRORS, 1);
21864bf8ce03SAdrian Chadd 			m_freem(m);
21874bf8ce03SAdrian Chadd 			q->q_rx.rge_head = NULL;
21884bf8ce03SAdrian Chadd 			q->q_rx.rge_tail = &q->q_rx.rge_head;
21894bf8ce03SAdrian Chadd 			continue;
21904bf8ce03SAdrian Chadd 		}
21914bf8ce03SAdrian Chadd 
21924bf8ce03SAdrian Chadd 		/*
21934bf8ce03SAdrian Chadd 		 * This mbuf is part of a multi-descriptor frame,
21944bf8ce03SAdrian Chadd 		 * so count it towards that.
21954bf8ce03SAdrian Chadd 		 *
21964bf8ce03SAdrian Chadd 		 * Yes, this means we won't be counting the
21974bf8ce03SAdrian Chadd 		 * final descriptor/mbuf as part of a multi-descriptor
21984bf8ce03SAdrian Chadd 		 * frame; if someone wishes to do that then it
21994bf8ce03SAdrian Chadd 		 * shouldn't be too hard to add.
22004bf8ce03SAdrian Chadd 		 */
22014bf8ce03SAdrian Chadd 		if ((rxstat & RGE_RDCMDSTS_EOF) == 0) {
22024bf8ce03SAdrian Chadd 			sc->sc_drv_stats.rx_desc_jumbo_frag++;
22034bf8ce03SAdrian Chadd 			continue;
22044bf8ce03SAdrian Chadd 		}
22054bf8ce03SAdrian Chadd 
22064bf8ce03SAdrian Chadd 		q->q_rx.rge_head = NULL;
22074bf8ce03SAdrian Chadd 		q->q_rx.rge_tail = &q->q_rx.rge_head;
22084bf8ce03SAdrian Chadd 
22094bf8ce03SAdrian Chadd 		m_adj(m, -ETHER_CRC_LEN);
22104bf8ce03SAdrian Chadd 		m->m_pkthdr.rcvif = sc->sc_ifp;
22114bf8ce03SAdrian Chadd 		if_inc_counter(sc->sc_ifp, IFCOUNTER_IPACKETS, 1);
22124bf8ce03SAdrian Chadd 
22134bf8ce03SAdrian Chadd 		extsts = le32toh(cur_rx->hi_qword1.rx_qword4.rge_extsts);
22144bf8ce03SAdrian Chadd 
22154bf8ce03SAdrian Chadd 		/* Check IP header checksum. */
22164bf8ce03SAdrian Chadd 		if (check_hwcsum) {
22174bf8ce03SAdrian Chadd 			/* Does it exist for IPv4? */
22184bf8ce03SAdrian Chadd 			if (extsts & RGE_RDEXTSTS_IPV4) {
22194bf8ce03SAdrian Chadd 				sc->sc_drv_stats.rx_offload_csum_ipv4_exists++;
22204bf8ce03SAdrian Chadd 				m->m_pkthdr.csum_flags |=
22214bf8ce03SAdrian Chadd 				    CSUM_IP_CHECKED;
22224bf8ce03SAdrian Chadd 			}
22234bf8ce03SAdrian Chadd 			/* XXX IPv6 checksum check? */
22244bf8ce03SAdrian Chadd 
22254bf8ce03SAdrian Chadd 			if (((extsts & RGE_RDEXTSTS_IPCSUMERR) == 0)
22264bf8ce03SAdrian Chadd 			    && ((extsts & RGE_RDEXTSTS_IPV4) != 0)) {
22274bf8ce03SAdrian Chadd 				sc->sc_drv_stats.rx_offload_csum_ipv4_valid++;
22284bf8ce03SAdrian Chadd 				m->m_pkthdr.csum_flags |=
22294bf8ce03SAdrian Chadd 				    CSUM_IP_VALID;
22304bf8ce03SAdrian Chadd 			}
22314bf8ce03SAdrian Chadd 
22324bf8ce03SAdrian Chadd 			/* Check TCP/UDP checksum. */
22334bf8ce03SAdrian Chadd 			if ((extsts & (RGE_RDEXTSTS_IPV4 | RGE_RDEXTSTS_IPV6)) &&
22344bf8ce03SAdrian Chadd 			    (extsts & RGE_RDEXTSTS_TCPPKT)) {
22354bf8ce03SAdrian Chadd 				sc->sc_drv_stats.rx_offload_csum_tcp_exists++;
22364bf8ce03SAdrian Chadd 				if ((extsts & RGE_RDEXTSTS_TCPCSUMERR) == 0) {
22374bf8ce03SAdrian Chadd 					sc->sc_drv_stats.rx_offload_csum_tcp_valid++;
22384bf8ce03SAdrian Chadd 					/* TCP checksum OK */
22394bf8ce03SAdrian Chadd 					m->m_pkthdr.csum_flags |=
22404bf8ce03SAdrian Chadd 					    CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
22414bf8ce03SAdrian Chadd 					m->m_pkthdr.csum_data = 0xffff;
22424bf8ce03SAdrian Chadd 				}
22434bf8ce03SAdrian Chadd 			}
22444bf8ce03SAdrian Chadd 
22454bf8ce03SAdrian Chadd 			if ((extsts & (RGE_RDEXTSTS_IPV4 | RGE_RDEXTSTS_IPV6)) &&
22464bf8ce03SAdrian Chadd 			    (extsts & RGE_RDEXTSTS_UDPPKT)) {
22474bf8ce03SAdrian Chadd 				sc->sc_drv_stats.rx_offload_csum_udp_exists++;
22484bf8ce03SAdrian Chadd 				if ((extsts & RGE_RDEXTSTS_UDPCSUMERR) == 0) {
22494bf8ce03SAdrian Chadd 					sc->sc_drv_stats.rx_offload_csum_udp_valid++;
22504bf8ce03SAdrian Chadd 					/* UDP checksum OK */
22514bf8ce03SAdrian Chadd 					m->m_pkthdr.csum_flags |=
22524bf8ce03SAdrian Chadd 					    CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
22534bf8ce03SAdrian Chadd 					m->m_pkthdr.csum_data = 0xffff;
22544bf8ce03SAdrian Chadd 				}
22554bf8ce03SAdrian Chadd 			}
22564bf8ce03SAdrian Chadd 		}
22574bf8ce03SAdrian Chadd 
22584bf8ce03SAdrian Chadd 		if (extsts & RGE_RDEXTSTS_VTAG) {
22594bf8ce03SAdrian Chadd 			sc->sc_drv_stats.rx_offload_vlan_tag++;
22604bf8ce03SAdrian Chadd 			m->m_pkthdr.ether_vtag =
22614bf8ce03SAdrian Chadd 			    ntohs(extsts & RGE_RDEXTSTS_VLAN_MASK);
22624bf8ce03SAdrian Chadd 			m->m_flags |= M_VLANTAG;
22634bf8ce03SAdrian Chadd 		}
22644bf8ce03SAdrian Chadd 
22654bf8ce03SAdrian Chadd 		mbufq_enqueue(mq, m);
22664bf8ce03SAdrian Chadd 
22674bf8ce03SAdrian Chadd 		maxpkt--;
22684bf8ce03SAdrian Chadd 	}
22694bf8ce03SAdrian Chadd 
22704bf8ce03SAdrian Chadd 	if (!rx)
22714bf8ce03SAdrian Chadd 		return (0);
22724bf8ce03SAdrian Chadd 
22734bf8ce03SAdrian Chadd 	/*
22744bf8ce03SAdrian Chadd 	 * Make sure any device updates to the descriptor ring are
22754bf8ce03SAdrian Chadd 	 * visible to the host before we continue.
22764bf8ce03SAdrian Chadd 	 */
22774bf8ce03SAdrian Chadd 	bus_dmamap_sync(sc->sc_dmat_rx_desc, q->q_rx.rge_rx_list_map,
22784bf8ce03SAdrian Chadd 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
22794bf8ce03SAdrian Chadd 	wmb();
22804bf8ce03SAdrian Chadd 
22814bf8ce03SAdrian Chadd 	/* Update the consumer index, refill the RX ring */
22824bf8ce03SAdrian Chadd 	q->q_rx.rge_rxq_considx = i;
22834bf8ce03SAdrian Chadd 	rge_fill_rx_ring(q);
22844bf8ce03SAdrian Chadd 
22854bf8ce03SAdrian Chadd 	return (1);
22864bf8ce03SAdrian Chadd }
22874bf8ce03SAdrian Chadd 
22884bf8ce03SAdrian Chadd int
rge_txeof(struct rge_queues * q)22894bf8ce03SAdrian Chadd rge_txeof(struct rge_queues *q)
22904bf8ce03SAdrian Chadd {
22914bf8ce03SAdrian Chadd 	struct rge_softc *sc = q->q_sc;
22924bf8ce03SAdrian Chadd 	struct ifnet *ifp = sc->sc_ifp;
22934bf8ce03SAdrian Chadd 	struct rge_txq *txq;
22944bf8ce03SAdrian Chadd 	uint32_t txstat;
22954bf8ce03SAdrian Chadd 	int cons, prod, cur, idx;
22964bf8ce03SAdrian Chadd 	int free = 0, ntx = 0;
22974bf8ce03SAdrian Chadd 	int pktlen;
22984bf8ce03SAdrian Chadd 	bool is_mcast;
22994bf8ce03SAdrian Chadd 
23004bf8ce03SAdrian Chadd 	RGE_ASSERT_LOCKED(sc);
23014bf8ce03SAdrian Chadd 
23024bf8ce03SAdrian Chadd 	sc->sc_drv_stats.txeof_cnt++;
23034bf8ce03SAdrian Chadd 
23044bf8ce03SAdrian Chadd 	prod = q->q_tx.rge_txq_prodidx;
23054bf8ce03SAdrian Chadd 	cons = q->q_tx.rge_txq_considx;
23064bf8ce03SAdrian Chadd 
23074bf8ce03SAdrian Chadd 	idx = cons;
23084bf8ce03SAdrian Chadd 	while (idx != prod) {
23094bf8ce03SAdrian Chadd 		txq = &q->q_tx.rge_txq[idx];
23104bf8ce03SAdrian Chadd 		cur = txq->txq_descidx;
23114bf8ce03SAdrian Chadd 
23124bf8ce03SAdrian Chadd 		rge_tx_list_sync(sc, q, cur, 1, BUS_DMASYNC_POSTREAD);
23134bf8ce03SAdrian Chadd 		txstat = q->q_tx.rge_tx_list[cur].rge_cmdsts;
23144bf8ce03SAdrian Chadd 		rge_tx_list_sync(sc, q, cur, 1, BUS_DMASYNC_PREREAD);
23154bf8ce03SAdrian Chadd 		if ((txstat & htole32(RGE_TDCMDSTS_OWN)) != 0) {
23164bf8ce03SAdrian Chadd 			free = 2;
23174bf8ce03SAdrian Chadd 			break;
23184bf8ce03SAdrian Chadd 		}
23194bf8ce03SAdrian Chadd 
23204bf8ce03SAdrian Chadd 		bus_dmamap_sync(sc->sc_dmat_tx_buf, txq->txq_dmamap,
23214bf8ce03SAdrian Chadd 		    BUS_DMASYNC_POSTWRITE);
23224bf8ce03SAdrian Chadd 		bus_dmamap_unload(sc->sc_dmat_tx_buf, txq->txq_dmamap);
23234bf8ce03SAdrian Chadd 		pktlen = txq->txq_mbuf->m_pkthdr.len;
23244bf8ce03SAdrian Chadd 		is_mcast = ((txq->txq_mbuf->m_flags & M_MCAST) != 0);
23254bf8ce03SAdrian Chadd 		m_freem(txq->txq_mbuf);
23264bf8ce03SAdrian Chadd 		txq->txq_mbuf = NULL;
23274bf8ce03SAdrian Chadd 		ntx++;
23284bf8ce03SAdrian Chadd 
23294bf8ce03SAdrian Chadd 		if ((txstat &
23304bf8ce03SAdrian Chadd 		    htole32(RGE_TDCMDSTS_EXCESSCOLL | RGE_TDCMDSTS_COLL)) != 0)
23314bf8ce03SAdrian Chadd 			if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1);
23324bf8ce03SAdrian Chadd 		if ((txstat & htole32(RGE_TDCMDSTS_TXERR)) != 0)
23334bf8ce03SAdrian Chadd 			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
23344bf8ce03SAdrian Chadd 		else {
23354bf8ce03SAdrian Chadd 			if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
23364bf8ce03SAdrian Chadd 			if_inc_counter(ifp, IFCOUNTER_OBYTES, pktlen);
23374bf8ce03SAdrian Chadd 			if (is_mcast)
23384bf8ce03SAdrian Chadd 				if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
23394bf8ce03SAdrian Chadd 
23404bf8ce03SAdrian Chadd 		}
23414bf8ce03SAdrian Chadd 
23424bf8ce03SAdrian Chadd 		idx = RGE_NEXT_TX_DESC(cur);
23434bf8ce03SAdrian Chadd 		free = 1;
23444bf8ce03SAdrian Chadd 	}
23454bf8ce03SAdrian Chadd 
23464bf8ce03SAdrian Chadd 	/* If we didn't complete any TX descriptors then return 0 */
23474bf8ce03SAdrian Chadd 	if (free == 0)
23484bf8ce03SAdrian Chadd 		return (0);
23494bf8ce03SAdrian Chadd 
23504bf8ce03SAdrian Chadd 	if (idx >= cons) {
23514bf8ce03SAdrian Chadd 		rge_tx_list_sync(sc, q, cons, idx - cons,
23524bf8ce03SAdrian Chadd 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
23534bf8ce03SAdrian Chadd 	} else {
23544bf8ce03SAdrian Chadd 		rge_tx_list_sync(sc, q, cons, RGE_TX_LIST_CNT - cons,
23554bf8ce03SAdrian Chadd 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
23564bf8ce03SAdrian Chadd 		rge_tx_list_sync(sc, q, 0, idx,
23574bf8ce03SAdrian Chadd 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
23584bf8ce03SAdrian Chadd 	}
23594bf8ce03SAdrian Chadd 
23604bf8ce03SAdrian Chadd 	q->q_tx.rge_txq_considx = idx;
23614bf8ce03SAdrian Chadd 
23624bf8ce03SAdrian Chadd 	RGE_DPRINTF(sc, RGE_DEBUG_XMIT,
23634bf8ce03SAdrian Chadd 	    "%s: handled %d frames; prod=%d, cons=%d\n", __func__,
23644bf8ce03SAdrian Chadd 	    ntx, q->q_tx.rge_txq_prodidx, q->q_tx.rge_txq_considx);
23654bf8ce03SAdrian Chadd 
23664bf8ce03SAdrian Chadd 	/*
23674bf8ce03SAdrian Chadd 	 * We processed the ring and hit a descriptor that was still
23684bf8ce03SAdrian Chadd 	 * owned by the hardware, so there's still pending work.
23694bf8ce03SAdrian Chadd 	 *
23704bf8ce03SAdrian Chadd 	 * If we got to the end of the ring and there's no further
23714bf8ce03SAdrian Chadd 	 * frames owned by the hardware then we can quieten the
23724bf8ce03SAdrian Chadd 	 * watchdog.
23734bf8ce03SAdrian Chadd 	 */
23744bf8ce03SAdrian Chadd 	if (free == 2)
23754bf8ce03SAdrian Chadd 		sc->sc_watchdog = 5;
23764bf8ce03SAdrian Chadd 	else
23774bf8ce03SAdrian Chadd 		sc->sc_watchdog = 0;
23784bf8ce03SAdrian Chadd 
23794bf8ce03SAdrian Chadd 	/*
23804bf8ce03SAdrian Chadd 	 * Kick-start the transmit task just in case we have
23814bf8ce03SAdrian Chadd 	 * more frames available.
23824bf8ce03SAdrian Chadd 	 */
23834bf8ce03SAdrian Chadd 	taskqueue_enqueue(sc->sc_tq, &sc->sc_tx_task);
23844bf8ce03SAdrian Chadd 
23854bf8ce03SAdrian Chadd 	return (1);
23864bf8ce03SAdrian Chadd }
23874bf8ce03SAdrian Chadd 
23884bf8ce03SAdrian Chadd static u_int
rge_hash_maddr(void * arg,struct sockaddr_dl * sdl,u_int cnt)23894bf8ce03SAdrian Chadd rge_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
23904bf8ce03SAdrian Chadd {
23914bf8ce03SAdrian Chadd 	uint32_t crc, *hashes = arg;
23924bf8ce03SAdrian Chadd 
23934bf8ce03SAdrian Chadd 	// XXX TODO: validate this does addrlo? */
23944bf8ce03SAdrian Chadd 	crc = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN) >> 26;
23954bf8ce03SAdrian Chadd 	crc &= 0x3f;
23964bf8ce03SAdrian Chadd 
23974bf8ce03SAdrian Chadd 	if (crc < 32)
23984bf8ce03SAdrian Chadd 		hashes[0] |= (1 << crc);
23994bf8ce03SAdrian Chadd 	else
24004bf8ce03SAdrian Chadd 		hashes[1] |= (1 << (crc - 32));
24014bf8ce03SAdrian Chadd 
24024bf8ce03SAdrian Chadd 	return (1);
24034bf8ce03SAdrian Chadd }
24044bf8ce03SAdrian Chadd 
24054bf8ce03SAdrian Chadd /**
24064bf8ce03SAdrian Chadd  * @brief Configure the RX filter and multicast filter.
24074bf8ce03SAdrian Chadd  *
24084bf8ce03SAdrian Chadd  * This must be called with the driver lock held.
24094bf8ce03SAdrian Chadd  */
24104bf8ce03SAdrian Chadd static void
rge_iff_locked(struct rge_softc * sc)24114bf8ce03SAdrian Chadd rge_iff_locked(struct rge_softc *sc)
24124bf8ce03SAdrian Chadd {
24134bf8ce03SAdrian Chadd 	uint32_t hashes[2];
24144bf8ce03SAdrian Chadd 	uint32_t rxfilt;
24154bf8ce03SAdrian Chadd 
24164bf8ce03SAdrian Chadd 	RGE_ASSERT_LOCKED(sc);
24174bf8ce03SAdrian Chadd 
24184bf8ce03SAdrian Chadd 	rxfilt = RGE_READ_4(sc, RGE_RXCFG);
24194bf8ce03SAdrian Chadd 	rxfilt &= ~(RGE_RXCFG_ALLPHYS | RGE_RXCFG_MULTI);
24204bf8ce03SAdrian Chadd 
24214bf8ce03SAdrian Chadd 	/*
24224bf8ce03SAdrian Chadd 	 * Always accept frames destined to our station address.
24234bf8ce03SAdrian Chadd 	 * Always accept broadcast frames.
24244bf8ce03SAdrian Chadd 	 */
24254bf8ce03SAdrian Chadd 	rxfilt |= RGE_RXCFG_INDIV | RGE_RXCFG_BROAD;
24264bf8ce03SAdrian Chadd 
24274bf8ce03SAdrian Chadd 	if ((if_getflags(sc->sc_ifp) & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
24284bf8ce03SAdrian Chadd 		rxfilt |= RGE_RXCFG_MULTI;
24294bf8ce03SAdrian Chadd 		if ((if_getflags(sc->sc_ifp) & IFF_PROMISC) != 0)
24304bf8ce03SAdrian Chadd 			rxfilt |= RGE_RXCFG_ALLPHYS;
24314bf8ce03SAdrian Chadd 		hashes[0] = hashes[1] = 0xffffffff;
24324bf8ce03SAdrian Chadd 	} else {
24334bf8ce03SAdrian Chadd 		rxfilt |= RGE_RXCFG_MULTI;
24344bf8ce03SAdrian Chadd 		/* Program new filter. */
24354bf8ce03SAdrian Chadd 		memset(hashes, 0, sizeof(hashes));
24364bf8ce03SAdrian Chadd 		if_foreach_llmaddr(sc->sc_ifp, rge_hash_maddr, &hashes);
24374bf8ce03SAdrian Chadd 	}
24384bf8ce03SAdrian Chadd 
24394bf8ce03SAdrian Chadd 	RGE_WRITE_4(sc, RGE_RXCFG, rxfilt);
24404bf8ce03SAdrian Chadd 	RGE_WRITE_4(sc, RGE_MAR0, bswap32(hashes[1]));
24414bf8ce03SAdrian Chadd 	RGE_WRITE_4(sc, RGE_MAR4, bswap32(hashes[0]));
24424bf8ce03SAdrian Chadd }
24434bf8ce03SAdrian Chadd 
24444bf8ce03SAdrian Chadd static void
rge_add_media_types(struct rge_softc * sc)24454bf8ce03SAdrian Chadd rge_add_media_types(struct rge_softc *sc)
24464bf8ce03SAdrian Chadd {
24474bf8ce03SAdrian Chadd 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T, 0, NULL);
24484bf8ce03SAdrian Chadd 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
24494bf8ce03SAdrian Chadd 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX, 0, NULL);
24504bf8ce03SAdrian Chadd 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
24514bf8ce03SAdrian Chadd 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T, 0, NULL);
24524bf8ce03SAdrian Chadd 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
24534bf8ce03SAdrian Chadd 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T, 0, NULL);
24544bf8ce03SAdrian Chadd 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T | IFM_FDX, 0, NULL);
24554bf8ce03SAdrian Chadd 
24564bf8ce03SAdrian Chadd 	if (sc->rge_type == MAC_R26) {
24574bf8ce03SAdrian Chadd 		ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_5000_T, 0, NULL);
24584bf8ce03SAdrian Chadd 		ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_5000_T | IFM_FDX,
24594bf8ce03SAdrian Chadd 		    0, NULL);
24604bf8ce03SAdrian Chadd 	} else if (sc->rge_type == MAC_R27) {
24614bf8ce03SAdrian Chadd 		ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10G_T, 0, NULL);
24624bf8ce03SAdrian Chadd 		ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10G_T | IFM_FDX,
24634bf8ce03SAdrian Chadd 		    0, NULL);
24644bf8ce03SAdrian Chadd 	}
24654bf8ce03SAdrian Chadd }
24664bf8ce03SAdrian Chadd 
24674bf8ce03SAdrian Chadd /**
24684bf8ce03SAdrian Chadd  * @brief Deferred packet dequeue and submit.
24694bf8ce03SAdrian Chadd  */
24704bf8ce03SAdrian Chadd static void
rge_tx_task(void * arg,int npending)24714bf8ce03SAdrian Chadd rge_tx_task(void *arg, int npending)
24724bf8ce03SAdrian Chadd {
24734bf8ce03SAdrian Chadd 	struct rge_softc *sc = (struct rge_softc *) arg;
24744bf8ce03SAdrian Chadd 	/* Note: for now, one queue */
24754bf8ce03SAdrian Chadd 	struct rge_queues *q = sc->sc_queues;
24764bf8ce03SAdrian Chadd 	struct mbuf *m;
24774bf8ce03SAdrian Chadd 	int ntx = 0;
24784bf8ce03SAdrian Chadd 	int idx, free, used;
24794bf8ce03SAdrian Chadd 
24804bf8ce03SAdrian Chadd 	RGE_DPRINTF(sc, RGE_DEBUG_XMIT, "%s: running\n", __func__);
24814bf8ce03SAdrian Chadd 
24824bf8ce03SAdrian Chadd 	RGE_LOCK(sc);
24834bf8ce03SAdrian Chadd 	sc->sc_drv_stats.tx_task_cnt++;
24844bf8ce03SAdrian Chadd 
24854bf8ce03SAdrian Chadd 	if (sc->sc_stopped == true) {
24864bf8ce03SAdrian Chadd 		sc->sc_watchdog = 0;
24874bf8ce03SAdrian Chadd 		RGE_UNLOCK(sc);
24884bf8ce03SAdrian Chadd 		return;
24894bf8ce03SAdrian Chadd 	}
24904bf8ce03SAdrian Chadd 
24914bf8ce03SAdrian Chadd 	/* Calculate free space. */
24924bf8ce03SAdrian Chadd 	idx = q->q_tx.rge_txq_prodidx;
24934bf8ce03SAdrian Chadd 	free = q->q_tx.rge_txq_considx;
24944bf8ce03SAdrian Chadd 	if (free <= idx)
24954bf8ce03SAdrian Chadd 		free += RGE_TX_LIST_CNT;
24964bf8ce03SAdrian Chadd 	free -= idx;
24974bf8ce03SAdrian Chadd 
24984bf8ce03SAdrian Chadd 	for (;;) {
24994bf8ce03SAdrian Chadd 		if (free < RGE_TX_NSEGS + 2) {
25004bf8ce03SAdrian Chadd 			break;
25014bf8ce03SAdrian Chadd 		}
25024bf8ce03SAdrian Chadd 
25034bf8ce03SAdrian Chadd 		/* Dequeue */
25044bf8ce03SAdrian Chadd 		m = mbufq_dequeue(&sc->sc_txq);
25054bf8ce03SAdrian Chadd 		if (m == NULL)
25064bf8ce03SAdrian Chadd 			break;
25074bf8ce03SAdrian Chadd 
25084bf8ce03SAdrian Chadd 		/* Attempt to encap */
25094bf8ce03SAdrian Chadd 		used = rge_encap(sc, q, m, idx);
25104bf8ce03SAdrian Chadd 		if (used < 0) {
25114bf8ce03SAdrian Chadd 			if_inc_counter(sc->sc_ifp, IFCOUNTER_OQDROPS, 1);
25124bf8ce03SAdrian Chadd 			m_freem(m);
25134bf8ce03SAdrian Chadd 			continue;
25144bf8ce03SAdrian Chadd 		} else if (used == 0) {
25154bf8ce03SAdrian Chadd 			mbufq_prepend(&sc->sc_txq, m);
25164bf8ce03SAdrian Chadd 			break;
25174bf8ce03SAdrian Chadd 		}
25184bf8ce03SAdrian Chadd 
25194bf8ce03SAdrian Chadd 		/*
25204bf8ce03SAdrian Chadd 		 * Note: mbuf is now owned by the tx ring, but we hold the
25214bf8ce03SAdrian Chadd 		 * lock so it's safe to pass it up here to be copied without
25224bf8ce03SAdrian Chadd 		 * worrying the TX task will run and dequeue/free it before
25234bf8ce03SAdrian Chadd 		 * we get a shot at it.
25244bf8ce03SAdrian Chadd 		 */
25254bf8ce03SAdrian Chadd 		ETHER_BPF_MTAP(sc->sc_ifp, m);
25264bf8ce03SAdrian Chadd 
25274bf8ce03SAdrian Chadd 		/* Update free/idx pointers */
25284bf8ce03SAdrian Chadd 		free -= used;
25294bf8ce03SAdrian Chadd 		idx += used;
25304bf8ce03SAdrian Chadd 		if (idx >= RGE_TX_LIST_CNT)
25314bf8ce03SAdrian Chadd 			idx -= RGE_TX_LIST_CNT;
25324bf8ce03SAdrian Chadd 
25334bf8ce03SAdrian Chadd 		ntx++;
25344bf8ce03SAdrian Chadd 	}
25354bf8ce03SAdrian Chadd 
25364bf8ce03SAdrian Chadd 	/* Ok, did we queue anything? If so, poke the hardware */
25374bf8ce03SAdrian Chadd 	if (ntx > 0) {
25384bf8ce03SAdrian Chadd 		q->q_tx.rge_txq_prodidx = idx;
25394bf8ce03SAdrian Chadd 		sc->sc_watchdog = 5;
25404bf8ce03SAdrian Chadd 		RGE_WRITE_2(sc, RGE_TXSTART, RGE_TXSTART_START);
25414bf8ce03SAdrian Chadd 	}
25424bf8ce03SAdrian Chadd 
25434bf8ce03SAdrian Chadd 	RGE_DPRINTF(sc, RGE_DEBUG_XMIT,
25444bf8ce03SAdrian Chadd 	    "%s: handled %d frames; prod=%d, cons=%d\n", __func__,
25454bf8ce03SAdrian Chadd 	    ntx, q->q_tx.rge_txq_prodidx, q->q_tx.rge_txq_considx);
25464bf8ce03SAdrian Chadd 
25474bf8ce03SAdrian Chadd 	RGE_UNLOCK(sc);
25484bf8ce03SAdrian Chadd }
25494bf8ce03SAdrian Chadd 
25504bf8ce03SAdrian Chadd /**
25514bf8ce03SAdrian Chadd  * @brief Called by the sc_timeout callout.
25524bf8ce03SAdrian Chadd  *
25534bf8ce03SAdrian Chadd  * This is called by the callout code with the driver lock held.
25544bf8ce03SAdrian Chadd  */
25554bf8ce03SAdrian Chadd void
rge_tick(void * arg)25564bf8ce03SAdrian Chadd rge_tick(void *arg)
25574bf8ce03SAdrian Chadd {
25584bf8ce03SAdrian Chadd 	struct rge_softc *sc = arg;
25594bf8ce03SAdrian Chadd 
25604bf8ce03SAdrian Chadd 	RGE_ASSERT_LOCKED(sc);
25614bf8ce03SAdrian Chadd 
25624bf8ce03SAdrian Chadd 	rge_link_state(sc);
25634bf8ce03SAdrian Chadd 
25644bf8ce03SAdrian Chadd 	/*
25654bf8ce03SAdrian Chadd 	 * Since we don't have any other place yet to trigger/test this,
25664bf8ce03SAdrian Chadd 	 * let's do it here every second and just bite the driver
25674bf8ce03SAdrian Chadd 	 * blocking for a little bit whilst it happens.
25684bf8ce03SAdrian Chadd 	 */
25694bf8ce03SAdrian Chadd 	if ((if_getdrvflags(sc->sc_ifp) & IFF_DRV_RUNNING) != 0)
25704bf8ce03SAdrian Chadd 		rge_hw_mac_stats_fetch(sc, &sc->sc_mac_stats.lcl_stats);
25714bf8ce03SAdrian Chadd 
25724bf8ce03SAdrian Chadd 	/*
25734bf8ce03SAdrian Chadd 	 * Handle the TX watchdog.
25744bf8ce03SAdrian Chadd 	 */
25754bf8ce03SAdrian Chadd 	if (sc->sc_watchdog > 0) {
25764bf8ce03SAdrian Chadd 		sc->sc_watchdog--;
25774bf8ce03SAdrian Chadd 		if (sc->sc_watchdog == 0) {
25784bf8ce03SAdrian Chadd 			RGE_PRINT_ERROR(sc, "TX timeout (watchdog)\n");
25794bf8ce03SAdrian Chadd 			rge_init_locked(sc);
25804bf8ce03SAdrian Chadd 			sc->sc_drv_stats.tx_watchdog_timeout_cnt++;
25814bf8ce03SAdrian Chadd 		}
25824bf8ce03SAdrian Chadd 	}
25834bf8ce03SAdrian Chadd 
25844bf8ce03SAdrian Chadd 	callout_reset(&sc->sc_timeout, hz, rge_tick, sc);
25854bf8ce03SAdrian Chadd }
25864bf8ce03SAdrian Chadd 
25874bf8ce03SAdrian Chadd /**
25884bf8ce03SAdrian Chadd  * @brief process a link state change.
25894bf8ce03SAdrian Chadd  *
25904bf8ce03SAdrian Chadd  * Must be called with the driver lock held.
25914bf8ce03SAdrian Chadd  */
25924bf8ce03SAdrian Chadd void
rge_link_state(struct rge_softc * sc)25934bf8ce03SAdrian Chadd rge_link_state(struct rge_softc *sc)
25944bf8ce03SAdrian Chadd {
25954bf8ce03SAdrian Chadd 	int link = LINK_STATE_DOWN;
25964bf8ce03SAdrian Chadd 
25974bf8ce03SAdrian Chadd 	RGE_ASSERT_LOCKED(sc);
25984bf8ce03SAdrian Chadd 
25994bf8ce03SAdrian Chadd 	if (rge_get_link_status(sc))
26004bf8ce03SAdrian Chadd 		link = LINK_STATE_UP;
26014bf8ce03SAdrian Chadd 
26024bf8ce03SAdrian Chadd 	if (if_getlinkstate(sc->sc_ifp) != link) {
26034bf8ce03SAdrian Chadd 		sc->sc_drv_stats.link_state_change_cnt++;
26044bf8ce03SAdrian Chadd 		if_link_state_change(sc->sc_ifp, link);
26054bf8ce03SAdrian Chadd 	}
26064bf8ce03SAdrian Chadd }
26074bf8ce03SAdrian Chadd 
26084bf8ce03SAdrian Chadd /**
26094bf8ce03SAdrian Chadd  * @brief Suspend
26104bf8ce03SAdrian Chadd  */
26114bf8ce03SAdrian Chadd static int
rge_suspend(device_t dev)26124bf8ce03SAdrian Chadd rge_suspend(device_t dev)
26134bf8ce03SAdrian Chadd {
26144bf8ce03SAdrian Chadd 	struct rge_softc *sc = device_get_softc(dev);
26154bf8ce03SAdrian Chadd 
26164bf8ce03SAdrian Chadd 	RGE_LOCK(sc);
26174bf8ce03SAdrian Chadd 	rge_stop_locked(sc);
26184bf8ce03SAdrian Chadd 	/* TODO: wake on lan */
26194bf8ce03SAdrian Chadd 	sc->sc_suspended = true;
26204bf8ce03SAdrian Chadd 	RGE_UNLOCK(sc);
26214bf8ce03SAdrian Chadd 
26224bf8ce03SAdrian Chadd 	return (0);
26234bf8ce03SAdrian Chadd }
26244bf8ce03SAdrian Chadd 
26254bf8ce03SAdrian Chadd /**
26264bf8ce03SAdrian Chadd  * @brief Resume
26274bf8ce03SAdrian Chadd  */
26284bf8ce03SAdrian Chadd static int
rge_resume(device_t dev)26294bf8ce03SAdrian Chadd rge_resume(device_t dev)
26304bf8ce03SAdrian Chadd {
26314bf8ce03SAdrian Chadd 	struct rge_softc *sc = device_get_softc(dev);
26324bf8ce03SAdrian Chadd 
26334bf8ce03SAdrian Chadd 	RGE_LOCK(sc);
26344bf8ce03SAdrian Chadd 	/* TODO: wake on lan */
26354bf8ce03SAdrian Chadd 
26364bf8ce03SAdrian Chadd 	/* reinit if required */
26374bf8ce03SAdrian Chadd 	if (if_getflags(sc->sc_ifp) & IFF_UP)
26384bf8ce03SAdrian Chadd 		rge_init_locked(sc);
26394bf8ce03SAdrian Chadd 
26404bf8ce03SAdrian Chadd 	sc->sc_suspended = false;
26414bf8ce03SAdrian Chadd 
26424bf8ce03SAdrian Chadd 	RGE_UNLOCK(sc);
26434bf8ce03SAdrian Chadd 
26444bf8ce03SAdrian Chadd 	return (0);
26454bf8ce03SAdrian Chadd }
26464bf8ce03SAdrian Chadd 
26474bf8ce03SAdrian Chadd /**
26484bf8ce03SAdrian Chadd  * @brief Shutdown the driver during shutdown
26494bf8ce03SAdrian Chadd  */
26504bf8ce03SAdrian Chadd static int
rge_shutdown(device_t dev)26514bf8ce03SAdrian Chadd rge_shutdown(device_t dev)
26524bf8ce03SAdrian Chadd {
26534bf8ce03SAdrian Chadd 	struct rge_softc *sc = device_get_softc(dev);
26544bf8ce03SAdrian Chadd 
26554bf8ce03SAdrian Chadd 	RGE_LOCK(sc);
26564bf8ce03SAdrian Chadd 	rge_stop_locked(sc);
26574bf8ce03SAdrian Chadd 	RGE_UNLOCK(sc);
26584bf8ce03SAdrian Chadd 
26594bf8ce03SAdrian Chadd 	return (0);
26604bf8ce03SAdrian Chadd }
26614bf8ce03SAdrian Chadd 
26624bf8ce03SAdrian Chadd static device_method_t rge_methods[] = {
26634bf8ce03SAdrian Chadd 	DEVMETHOD(device_probe,			rge_probe),
26644bf8ce03SAdrian Chadd 	DEVMETHOD(device_attach,		rge_attach),
26654bf8ce03SAdrian Chadd 	DEVMETHOD(device_detach,		rge_detach),
26664bf8ce03SAdrian Chadd 
26674bf8ce03SAdrian Chadd 	DEVMETHOD(device_suspend,		rge_suspend),
26684bf8ce03SAdrian Chadd 	DEVMETHOD(device_resume,		rge_resume),
26694bf8ce03SAdrian Chadd 	DEVMETHOD(device_shutdown,		rge_shutdown),
26704bf8ce03SAdrian Chadd 
26714bf8ce03SAdrian Chadd 	DEVMETHOD_END
26724bf8ce03SAdrian Chadd };
26734bf8ce03SAdrian Chadd 
26744bf8ce03SAdrian Chadd static driver_t rge_driver = {
26754bf8ce03SAdrian Chadd 	"rge",
26764bf8ce03SAdrian Chadd 	rge_methods,
26774bf8ce03SAdrian Chadd 	sizeof(struct rge_softc)
26784bf8ce03SAdrian Chadd };
26794bf8ce03SAdrian Chadd 
26804bf8ce03SAdrian Chadd MODULE_DEPEND(rge, pci, 1, 1, 1);
26814bf8ce03SAdrian Chadd MODULE_DEPEND(rge, ether, 1, 1, 1);
26824bf8ce03SAdrian Chadd 
26834bf8ce03SAdrian Chadd DRIVER_MODULE_ORDERED(rge, pci, rge_driver, NULL, NULL, SI_ORDER_ANY);
2684*cce603cfSOlivier Cochard MODULE_PNP_INFO("U16:vendor;U16:device;D:#", pci, rge, rge_devices,
2685*cce603cfSOlivier Cochard     nitems(rge_devices) - 1);
2686