1098ca2bdSWarner Losh /*- 242c1b001SThomas Moestl * Copyright (C) 2001 Eduardo Horvath. 342c1b001SThomas Moestl * All rights reserved. 442c1b001SThomas Moestl * 542c1b001SThomas Moestl * Redistribution and use in source and binary forms, with or without 642c1b001SThomas Moestl * modification, are permitted provided that the following conditions 742c1b001SThomas Moestl * are met: 842c1b001SThomas Moestl * 1. Redistributions of source code must retain the above copyright 942c1b001SThomas Moestl * notice, this list of conditions and the following disclaimer. 1042c1b001SThomas Moestl * 2. Redistributions in binary form must reproduce the above copyright 1142c1b001SThomas Moestl * notice, this list of conditions and the following disclaimer in the 1242c1b001SThomas Moestl * documentation and/or other materials provided with the distribution. 1342c1b001SThomas Moestl * 1442c1b001SThomas Moestl * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND 1542c1b001SThomas Moestl * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 1642c1b001SThomas Moestl * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 1742c1b001SThomas Moestl * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE 1842c1b001SThomas Moestl * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 1942c1b001SThomas Moestl * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 2042c1b001SThomas Moestl * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 2142c1b001SThomas Moestl * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2242c1b001SThomas Moestl * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 2342c1b001SThomas Moestl * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 2442c1b001SThomas Moestl * SUCH DAMAGE. 2542c1b001SThomas Moestl * 26336cca9eSBenno Rice * from: NetBSD: gemvar.h,v 1.8 2002/05/15 02:36:12 matt Exp 2742c1b001SThomas Moestl * 2842c1b001SThomas Moestl * $FreeBSD$ 2942c1b001SThomas Moestl */ 3042c1b001SThomas Moestl 3142c1b001SThomas Moestl #ifndef _IF_GEMVAR_H 3242c1b001SThomas Moestl #define _IF_GEMVAR_H 3342c1b001SThomas Moestl 3442c1b001SThomas Moestl 3542c1b001SThomas Moestl #include <sys/queue.h> 3642c1b001SThomas Moestl #include <sys/callout.h> 3742c1b001SThomas Moestl 3842c1b001SThomas Moestl /* 3942c1b001SThomas Moestl * Misc. definitions for the Sun ``Gem'' Ethernet controller family driver. 4042c1b001SThomas Moestl */ 4142c1b001SThomas Moestl 4242c1b001SThomas Moestl /* 4342c1b001SThomas Moestl * Transmit descriptor list size. This is arbitrary, but allocate 4442c1b001SThomas Moestl * enough descriptors for 64 pending transmissions and 16 segments 4518100346SThomas Moestl * per packet. This limit is not actually enforced (packets with more segments 4618100346SThomas Moestl * can be sent, depending on the busdma backend); it is however used as an 4718100346SThomas Moestl * estimate for the tx window size. 4842c1b001SThomas Moestl */ 4942c1b001SThomas Moestl #define GEM_NTXSEGS 16 5042c1b001SThomas Moestl 5142c1b001SThomas Moestl #define GEM_TXQUEUELEN 64 5242c1b001SThomas Moestl #define GEM_NTXDESC (GEM_TXQUEUELEN * GEM_NTXSEGS) 53305f2c06SThomas Moestl #define GEM_MAXTXFREE (GEM_NTXDESC - 1) 5442c1b001SThomas Moestl #define GEM_NTXDESC_MASK (GEM_NTXDESC - 1) 5542c1b001SThomas Moestl #define GEM_NEXTTX(x) ((x + 1) & GEM_NTXDESC_MASK) 5642c1b001SThomas Moestl 5742c1b001SThomas Moestl /* 5842c1b001SThomas Moestl * Receive descriptor list size. We have one Rx buffer per incoming 5942c1b001SThomas Moestl * packet, so this logic is a little simpler. 6042c1b001SThomas Moestl */ 6142c1b001SThomas Moestl #define GEM_NRXDESC 128 6242c1b001SThomas Moestl #define GEM_NRXDESC_MASK (GEM_NRXDESC - 1) 63336cca9eSBenno Rice #define GEM_PREVRX(x) ((x - 1) & GEM_NRXDESC_MASK) 6442c1b001SThomas Moestl #define GEM_NEXTRX(x) ((x + 1) & GEM_NRXDESC_MASK) 6542c1b001SThomas Moestl 6642c1b001SThomas Moestl /* 670d80b9bdSThomas Moestl * How many ticks to wait until to retry on a RX descriptor that is still owned 680d80b9bdSThomas Moestl * by the hardware. 690d80b9bdSThomas Moestl */ 700d80b9bdSThomas Moestl #define GEM_RXOWN_TICKS (hz / 50) 710d80b9bdSThomas Moestl 720d80b9bdSThomas Moestl /* 7342c1b001SThomas Moestl * Control structures are DMA'd to the GEM chip. We allocate them in 7442c1b001SThomas Moestl * a single clump that maps to a single DMA segment to make several things 7542c1b001SThomas Moestl * easier. 7642c1b001SThomas Moestl */ 7742c1b001SThomas Moestl struct gem_control_data { 7842c1b001SThomas Moestl /* 7942c1b001SThomas Moestl * The transmit descriptors. 8042c1b001SThomas Moestl */ 8142c1b001SThomas Moestl struct gem_desc gcd_txdescs[GEM_NTXDESC]; 8242c1b001SThomas Moestl 8342c1b001SThomas Moestl /* 8442c1b001SThomas Moestl * The receive descriptors. 8542c1b001SThomas Moestl */ 8642c1b001SThomas Moestl struct gem_desc gcd_rxdescs[GEM_NRXDESC]; 8742c1b001SThomas Moestl }; 8842c1b001SThomas Moestl 8942c1b001SThomas Moestl #define GEM_CDOFF(x) offsetof(struct gem_control_data, x) 9042c1b001SThomas Moestl #define GEM_CDTXOFF(x) GEM_CDOFF(gcd_txdescs[(x)]) 9142c1b001SThomas Moestl #define GEM_CDRXOFF(x) GEM_CDOFF(gcd_rxdescs[(x)]) 9242c1b001SThomas Moestl 9342c1b001SThomas Moestl /* 9442c1b001SThomas Moestl * Software state for transmit job mbufs (may be elements of mbuf chains). 9542c1b001SThomas Moestl */ 9642c1b001SThomas Moestl struct gem_txsoft { 9742c1b001SThomas Moestl struct mbuf *txs_mbuf; /* head of our mbuf chain */ 9842c1b001SThomas Moestl bus_dmamap_t txs_dmamap; /* our DMA map */ 9942c1b001SThomas Moestl int txs_firstdesc; /* first descriptor in packet */ 10042c1b001SThomas Moestl int txs_lastdesc; /* last descriptor in packet */ 10142c1b001SThomas Moestl int txs_ndescs; /* number of descriptors */ 10242c1b001SThomas Moestl STAILQ_ENTRY(gem_txsoft) txs_q; 10342c1b001SThomas Moestl }; 10442c1b001SThomas Moestl 10542c1b001SThomas Moestl STAILQ_HEAD(gem_txsq, gem_txsoft); 10642c1b001SThomas Moestl 10742c1b001SThomas Moestl /* Argument structure for busdma callback */ 10842c1b001SThomas Moestl struct gem_txdma { 10942c1b001SThomas Moestl struct gem_softc *txd_sc; 110305f2c06SThomas Moestl struct gem_txsoft *txd_txs; 11142c1b001SThomas Moestl }; 11242c1b001SThomas Moestl 11342c1b001SThomas Moestl /* 11442c1b001SThomas Moestl * Software state for receive jobs. 11542c1b001SThomas Moestl */ 11642c1b001SThomas Moestl struct gem_rxsoft { 11742c1b001SThomas Moestl struct mbuf *rxs_mbuf; /* head of our mbuf chain */ 11842c1b001SThomas Moestl bus_dmamap_t rxs_dmamap; /* our DMA map */ 11942c1b001SThomas Moestl bus_addr_t rxs_paddr; /* physical address of the segment */ 12042c1b001SThomas Moestl }; 12142c1b001SThomas Moestl 12242c1b001SThomas Moestl /* 12342c1b001SThomas Moestl * Software state per device. 12442c1b001SThomas Moestl */ 12542c1b001SThomas Moestl struct gem_softc { 126fc74a9f9SBrooks Davis struct ifnet *sc_ifp; 12742c1b001SThomas Moestl device_t sc_miibus; 12842c1b001SThomas Moestl struct mii_data *sc_mii; /* MII media control */ 12942c1b001SThomas Moestl device_t sc_dev; /* generic device information */ 130fc74a9f9SBrooks Davis u_char sc_enaddr[6]; 13142c1b001SThomas Moestl struct callout sc_tick_ch; /* tick callout */ 1320d80b9bdSThomas Moestl struct callout sc_rx_ch; /* delayed rx callout */ 1338cb37876SMarius Strobl int sc_wdog_timer; /* watchdog timer */ 13442c1b001SThomas Moestl 13542c1b001SThomas Moestl /* The following bus handles are to be provided by the bus front-end */ 136e1bb13cdSPoul-Henning Kamp void *sc_ih; 137e1bb13cdSPoul-Henning Kamp struct resource *sc_res[2]; 13842c1b001SThomas Moestl bus_dma_tag_t sc_pdmatag; /* parent bus dma tag */ 139305f2c06SThomas Moestl bus_dma_tag_t sc_rdmatag; /* RX bus dma tag */ 140305f2c06SThomas Moestl bus_dma_tag_t sc_tdmatag; /* TX bus dma tag */ 14142c1b001SThomas Moestl bus_dma_tag_t sc_cdmatag; /* control data bus dma tag */ 14242c1b001SThomas Moestl bus_dmamap_t sc_dmamap; /* bus dma handle */ 14342c1b001SThomas Moestl 14442c1b001SThomas Moestl int sc_phys[2]; /* MII instance -> PHY map */ 14542c1b001SThomas Moestl 14642c1b001SThomas Moestl int sc_mif_config; /* Selected MII reg setting */ 14742c1b001SThomas Moestl 14842c1b001SThomas Moestl int sc_pci; /* XXXXX -- PCI buses are LE. */ 149336cca9eSBenno Rice u_int sc_variant; /* which GEM are we dealing with? */ 150336cca9eSBenno Rice #define GEM_UNKNOWN 0 /* don't know */ 151336cca9eSBenno Rice #define GEM_SUN_GEM 1 /* Sun GEM variant */ 152336cca9eSBenno Rice #define GEM_APPLE_GMAC 2 /* Apple GMAC variant */ 153336cca9eSBenno Rice 154336cca9eSBenno Rice u_int sc_flags; /* */ 155336cca9eSBenno Rice #define GEM_GIGABIT 0x0001 /* has a gigabit PHY */ 15642c1b001SThomas Moestl 15742c1b001SThomas Moestl /* 15842c1b001SThomas Moestl * Ring buffer DMA stuff. 15942c1b001SThomas Moestl */ 16042c1b001SThomas Moestl bus_dma_segment_t sc_cdseg; /* control data memory */ 16142c1b001SThomas Moestl int sc_cdnseg; /* number of segments */ 16242c1b001SThomas Moestl bus_dmamap_t sc_cddmamap; /* control data DMA map */ 16342c1b001SThomas Moestl bus_addr_t sc_cddma; 16442c1b001SThomas Moestl 16542c1b001SThomas Moestl /* 16642c1b001SThomas Moestl * Software state for transmit and receive descriptors. 16742c1b001SThomas Moestl */ 16842c1b001SThomas Moestl struct gem_txsoft sc_txsoft[GEM_TXQUEUELEN]; 16942c1b001SThomas Moestl struct gem_rxsoft sc_rxsoft[GEM_NRXDESC]; 17042c1b001SThomas Moestl 17142c1b001SThomas Moestl /* 17242c1b001SThomas Moestl * Control data structures. 17342c1b001SThomas Moestl */ 17442c1b001SThomas Moestl struct gem_control_data *sc_control_data; 17542c1b001SThomas Moestl #define sc_txdescs sc_control_data->gcd_txdescs 17642c1b001SThomas Moestl #define sc_rxdescs sc_control_data->gcd_rxdescs 17742c1b001SThomas Moestl 17842c1b001SThomas Moestl int sc_txfree; /* number of free Tx descriptors */ 17942c1b001SThomas Moestl int sc_txnext; /* next ready Tx descriptor */ 180336cca9eSBenno Rice int sc_txwin; /* Tx descriptors since last Tx int */ 18142c1b001SThomas Moestl 18242c1b001SThomas Moestl struct gem_txsq sc_txfreeq; /* free Tx descsofts */ 18342c1b001SThomas Moestl struct gem_txsq sc_txdirtyq; /* dirty Tx descsofts */ 18442c1b001SThomas Moestl 18542c1b001SThomas Moestl int sc_rxptr; /* next ready RX descriptor/descsoft */ 186336cca9eSBenno Rice int sc_rxfifosize; /* Rx FIFO size (bytes) */ 18742c1b001SThomas Moestl 18842c1b001SThomas Moestl /* ========== */ 18942c1b001SThomas Moestl int sc_inited; 19042c1b001SThomas Moestl int sc_debug; 191336cca9eSBenno Rice int sc_ifflags; 1928cfaff7dSMarius Strobl 1938cfaff7dSMarius Strobl struct mtx sc_mtx; 19442c1b001SThomas Moestl }; 19542c1b001SThomas Moestl 19642c1b001SThomas Moestl #define GEM_DMA_READ(sc, v) (((sc)->sc_pci) ? le64toh(v) : be64toh(v)) 19742c1b001SThomas Moestl #define GEM_DMA_WRITE(sc, v) (((sc)->sc_pci) ? htole64(v) : htobe64(v)) 19842c1b001SThomas Moestl 19942c1b001SThomas Moestl #define GEM_CDTXADDR(sc, x) ((sc)->sc_cddma + GEM_CDTXOFF((x))) 20042c1b001SThomas Moestl #define GEM_CDRXADDR(sc, x) ((sc)->sc_cddma + GEM_CDRXOFF((x))) 20142c1b001SThomas Moestl 202b2d59f42SThomas Moestl #define GEM_CDSYNC(sc, ops) \ 203305f2c06SThomas Moestl bus_dmamap_sync((sc)->sc_cdmatag, (sc)->sc_cddmamap, (ops)); \ 20442c1b001SThomas Moestl 20542c1b001SThomas Moestl #define GEM_INIT_RXDESC(sc, x) \ 20642c1b001SThomas Moestl do { \ 20742c1b001SThomas Moestl struct gem_rxsoft *__rxs = &sc->sc_rxsoft[(x)]; \ 20842c1b001SThomas Moestl struct gem_desc *__rxd = &sc->sc_rxdescs[(x)]; \ 20942c1b001SThomas Moestl struct mbuf *__m = __rxs->rxs_mbuf; \ 21042c1b001SThomas Moestl \ 21142c1b001SThomas Moestl __m->m_data = __m->m_ext.ext_buf; \ 21242c1b001SThomas Moestl __rxd->gd_addr = \ 21342c1b001SThomas Moestl GEM_DMA_WRITE((sc), __rxs->rxs_paddr); \ 21442c1b001SThomas Moestl __rxd->gd_flags = \ 21542c1b001SThomas Moestl GEM_DMA_WRITE((sc), \ 21642c1b001SThomas Moestl (((__m->m_ext.ext_size)<<GEM_RD_BUFSHIFT) \ 21742c1b001SThomas Moestl & GEM_RD_BUFSIZE) | GEM_RD_OWN); \ 21842c1b001SThomas Moestl } while (0) 21942c1b001SThomas Moestl 2208cfaff7dSMarius Strobl #define GEM_LOCK_INIT(_sc, _name) \ 2218cfaff7dSMarius Strobl mtx_init(&(_sc)->sc_mtx, _name, MTX_NETWORK_LOCK, MTX_DEF) 2228cfaff7dSMarius Strobl #define GEM_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx) 2238cfaff7dSMarius Strobl #define GEM_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx) 2248cfaff7dSMarius Strobl #define GEM_LOCK_ASSERT(_sc, _what) mtx_assert(&(_sc)->sc_mtx, (_what)) 2258cfaff7dSMarius Strobl #define GEM_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->sc_mtx) 2268cfaff7dSMarius Strobl 22742c1b001SThomas Moestl #ifdef _KERNEL 22842c1b001SThomas Moestl extern devclass_t gem_devclass; 22942c1b001SThomas Moestl 230e51a25f8SAlfred Perlstein int gem_attach(struct gem_softc *); 231cbbdf236SThomas Moestl void gem_detach(struct gem_softc *); 232cbbdf236SThomas Moestl void gem_suspend(struct gem_softc *); 233cbbdf236SThomas Moestl void gem_resume(struct gem_softc *); 234e51a25f8SAlfred Perlstein void gem_intr(void *); 23542c1b001SThomas Moestl 236e51a25f8SAlfred Perlstein int gem_mediachange(struct ifnet *); 237e51a25f8SAlfred Perlstein void gem_mediastatus(struct ifnet *, struct ifmediareq *); 23842c1b001SThomas Moestl 239e51a25f8SAlfred Perlstein void gem_reset(struct gem_softc *); 24042c1b001SThomas Moestl 24142c1b001SThomas Moestl /* MII methods & callbacks */ 242e51a25f8SAlfred Perlstein int gem_mii_readreg(device_t, int, int); 243e51a25f8SAlfred Perlstein int gem_mii_writereg(device_t, int, int, int); 244e51a25f8SAlfred Perlstein void gem_mii_statchg(device_t); 24542c1b001SThomas Moestl 24642c1b001SThomas Moestl #endif /* _KERNEL */ 24742c1b001SThomas Moestl 24842c1b001SThomas Moestl 24942c1b001SThomas Moestl #endif 250