142c1b001SThomas Moestl /* 242c1b001SThomas Moestl * Copyright (C) 2001 Eduardo Horvath. 342c1b001SThomas Moestl * All rights reserved. 442c1b001SThomas Moestl * 542c1b001SThomas Moestl * Redistribution and use in source and binary forms, with or without 642c1b001SThomas Moestl * modification, are permitted provided that the following conditions 742c1b001SThomas Moestl * are met: 842c1b001SThomas Moestl * 1. Redistributions of source code must retain the above copyright 942c1b001SThomas Moestl * notice, this list of conditions and the following disclaimer. 1042c1b001SThomas Moestl * 2. Redistributions in binary form must reproduce the above copyright 1142c1b001SThomas Moestl * notice, this list of conditions and the following disclaimer in the 1242c1b001SThomas Moestl * documentation and/or other materials provided with the distribution. 1342c1b001SThomas Moestl * 1442c1b001SThomas Moestl * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND 1542c1b001SThomas Moestl * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 1642c1b001SThomas Moestl * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 1742c1b001SThomas Moestl * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE 1842c1b001SThomas Moestl * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 1942c1b001SThomas Moestl * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 2042c1b001SThomas Moestl * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 2142c1b001SThomas Moestl * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2242c1b001SThomas Moestl * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 2342c1b001SThomas Moestl * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 2442c1b001SThomas Moestl * SUCH DAMAGE. 2542c1b001SThomas Moestl * 2642c1b001SThomas Moestl * from: NetBSD: gemvar.h,v 1.5 2001/10/18 15:19:22 thorpej Exp 2742c1b001SThomas Moestl * 2842c1b001SThomas Moestl * $FreeBSD$ 2942c1b001SThomas Moestl */ 3042c1b001SThomas Moestl 3142c1b001SThomas Moestl #ifndef _IF_GEMVAR_H 3242c1b001SThomas Moestl #define _IF_GEMVAR_H 3342c1b001SThomas Moestl 3442c1b001SThomas Moestl 3542c1b001SThomas Moestl #include <sys/queue.h> 3642c1b001SThomas Moestl #include <sys/callout.h> 3742c1b001SThomas Moestl 3842c1b001SThomas Moestl /* 3942c1b001SThomas Moestl * Misc. definitions for the Sun ``Gem'' Ethernet controller family driver. 4042c1b001SThomas Moestl */ 4142c1b001SThomas Moestl 4242c1b001SThomas Moestl /* 4342c1b001SThomas Moestl * Transmit descriptor list size. This is arbitrary, but allocate 4442c1b001SThomas Moestl * enough descriptors for 64 pending transmissions and 16 segments 4542c1b001SThomas Moestl * per packet. 4642c1b001SThomas Moestl */ 4742c1b001SThomas Moestl #define GEM_NTXSEGS 16 4842c1b001SThomas Moestl 4942c1b001SThomas Moestl #define GEM_TXQUEUELEN 64 5042c1b001SThomas Moestl #define GEM_NTXDESC (GEM_TXQUEUELEN * GEM_NTXSEGS) 5142c1b001SThomas Moestl #define GEM_NTXDESC_MASK (GEM_NTXDESC - 1) 5242c1b001SThomas Moestl #define GEM_NEXTTX(x) ((x + 1) & GEM_NTXDESC_MASK) 5342c1b001SThomas Moestl 5442c1b001SThomas Moestl /* 5542c1b001SThomas Moestl * Receive descriptor list size. We have one Rx buffer per incoming 5642c1b001SThomas Moestl * packet, so this logic is a little simpler. 5742c1b001SThomas Moestl */ 5842c1b001SThomas Moestl #define GEM_NRXDESC 128 5942c1b001SThomas Moestl #define GEM_NRXDESC_MASK (GEM_NRXDESC - 1) 6042c1b001SThomas Moestl #define GEM_NEXTRX(x) ((x + 1) & GEM_NRXDESC_MASK) 6142c1b001SThomas Moestl 6242c1b001SThomas Moestl /* 630d80b9bdSThomas Moestl * How many ticks to wait until to retry on a RX descriptor that is still owned 640d80b9bdSThomas Moestl * by the hardware. 650d80b9bdSThomas Moestl */ 660d80b9bdSThomas Moestl #define GEM_RXOWN_TICKS (hz / 50) 670d80b9bdSThomas Moestl 680d80b9bdSThomas Moestl /* 6942c1b001SThomas Moestl * Control structures are DMA'd to the GEM chip. We allocate them in 7042c1b001SThomas Moestl * a single clump that maps to a single DMA segment to make several things 7142c1b001SThomas Moestl * easier. 7242c1b001SThomas Moestl */ 7342c1b001SThomas Moestl struct gem_control_data { 7442c1b001SThomas Moestl /* 7542c1b001SThomas Moestl * The transmit descriptors. 7642c1b001SThomas Moestl */ 7742c1b001SThomas Moestl struct gem_desc gcd_txdescs[GEM_NTXDESC]; 7842c1b001SThomas Moestl 7942c1b001SThomas Moestl /* 8042c1b001SThomas Moestl * The receive descriptors. 8142c1b001SThomas Moestl */ 8242c1b001SThomas Moestl struct gem_desc gcd_rxdescs[GEM_NRXDESC]; 8342c1b001SThomas Moestl }; 8442c1b001SThomas Moestl 8542c1b001SThomas Moestl #define GEM_CDOFF(x) offsetof(struct gem_control_data, x) 8642c1b001SThomas Moestl #define GEM_CDTXOFF(x) GEM_CDOFF(gcd_txdescs[(x)]) 8742c1b001SThomas Moestl #define GEM_CDRXOFF(x) GEM_CDOFF(gcd_rxdescs[(x)]) 8842c1b001SThomas Moestl 8942c1b001SThomas Moestl /* 9042c1b001SThomas Moestl * Software state for transmit job mbufs (may be elements of mbuf chains). 9142c1b001SThomas Moestl */ 9242c1b001SThomas Moestl struct gem_txsoft { 9342c1b001SThomas Moestl struct mbuf *txs_mbuf; /* head of our mbuf chain */ 9442c1b001SThomas Moestl bus_dmamap_t txs_dmamap; /* our DMA map */ 9542c1b001SThomas Moestl int txs_firstdesc; /* first descriptor in packet */ 9642c1b001SThomas Moestl int txs_lastdesc; /* last descriptor in packet */ 9742c1b001SThomas Moestl int txs_ndescs; /* number of descriptors */ 9842c1b001SThomas Moestl STAILQ_ENTRY(gem_txsoft) txs_q; 9942c1b001SThomas Moestl }; 10042c1b001SThomas Moestl 10142c1b001SThomas Moestl STAILQ_HEAD(gem_txsq, gem_txsoft); 10242c1b001SThomas Moestl 10342c1b001SThomas Moestl /* Argument structure for busdma callback */ 10442c1b001SThomas Moestl struct gem_txdma { 10542c1b001SThomas Moestl struct gem_softc *txd_sc; 10642c1b001SThomas Moestl int txd_nexttx; 10742c1b001SThomas Moestl int txd_lasttx; 10842c1b001SThomas Moestl int txd_nsegs; 10942c1b001SThomas Moestl int txd_flags; 11042c1b001SThomas Moestl #define GTXD_FIRST 1 11142c1b001SThomas Moestl #define GTXD_LAST 2 11242c1b001SThomas Moestl int txd_error; 11342c1b001SThomas Moestl }; 11442c1b001SThomas Moestl 11542c1b001SThomas Moestl /* Transmit job descriptor */ 11642c1b001SThomas Moestl struct gem_txjob { 11742c1b001SThomas Moestl int txj_nexttx; 11842c1b001SThomas Moestl int txj_lasttx; 11942c1b001SThomas Moestl int txj_nsegs; 12042c1b001SThomas Moestl STAILQ_HEAD(, gem_txsoft) txj_txsq; 12142c1b001SThomas Moestl }; 12242c1b001SThomas Moestl 12342c1b001SThomas Moestl /* 12442c1b001SThomas Moestl * Software state for receive jobs. 12542c1b001SThomas Moestl */ 12642c1b001SThomas Moestl struct gem_rxsoft { 12742c1b001SThomas Moestl struct mbuf *rxs_mbuf; /* head of our mbuf chain */ 12842c1b001SThomas Moestl bus_dmamap_t rxs_dmamap; /* our DMA map */ 12942c1b001SThomas Moestl bus_addr_t rxs_paddr; /* physical address of the segment */ 13042c1b001SThomas Moestl }; 13142c1b001SThomas Moestl 13242c1b001SThomas Moestl /* 13342c1b001SThomas Moestl * Software state per device. 13442c1b001SThomas Moestl */ 13542c1b001SThomas Moestl struct gem_softc { 13642c1b001SThomas Moestl struct arpcom sc_arpcom; /* arp common data */ 13742c1b001SThomas Moestl device_t sc_miibus; 13842c1b001SThomas Moestl struct mii_data *sc_mii; /* MII media control */ 13942c1b001SThomas Moestl device_t sc_dev; /* generic device information */ 14042c1b001SThomas Moestl struct callout sc_tick_ch; /* tick callout */ 1410d80b9bdSThomas Moestl struct callout sc_rx_ch; /* delayed rx callout */ 14242c1b001SThomas Moestl 14342c1b001SThomas Moestl /* The following bus handles are to be provided by the bus front-end */ 14442c1b001SThomas Moestl bus_space_tag_t sc_bustag; /* bus tag */ 14542c1b001SThomas Moestl bus_dma_tag_t sc_pdmatag; /* parent bus dma tag */ 14642c1b001SThomas Moestl bus_dma_tag_t sc_dmatag; /* bus dma tag */ 14742c1b001SThomas Moestl bus_dma_tag_t sc_cdmatag; /* control data bus dma tag */ 14842c1b001SThomas Moestl bus_dmamap_t sc_dmamap; /* bus dma handle */ 14942c1b001SThomas Moestl bus_space_handle_t sc_h; /* bus space handle for all regs */ 15042c1b001SThomas Moestl 15142c1b001SThomas Moestl int sc_phys[2]; /* MII instance -> PHY map */ 15242c1b001SThomas Moestl 15342c1b001SThomas Moestl int sc_mif_config; /* Selected MII reg setting */ 15442c1b001SThomas Moestl 15542c1b001SThomas Moestl int sc_pci; /* XXXXX -- PCI buses are LE. */ 15642c1b001SThomas Moestl 15742c1b001SThomas Moestl /* 15842c1b001SThomas Moestl * Ring buffer DMA stuff. 15942c1b001SThomas Moestl */ 16042c1b001SThomas Moestl bus_dma_segment_t sc_cdseg; /* control data memory */ 16142c1b001SThomas Moestl int sc_cdnseg; /* number of segments */ 16242c1b001SThomas Moestl bus_dmamap_t sc_cddmamap; /* control data DMA map */ 16342c1b001SThomas Moestl bus_addr_t sc_cddma; 16442c1b001SThomas Moestl 16542c1b001SThomas Moestl /* 16642c1b001SThomas Moestl * Software state for transmit and receive descriptors. 16742c1b001SThomas Moestl */ 16842c1b001SThomas Moestl struct gem_txsoft sc_txsoft[GEM_TXQUEUELEN]; 16942c1b001SThomas Moestl struct gem_rxsoft sc_rxsoft[GEM_NRXDESC]; 17042c1b001SThomas Moestl 17142c1b001SThomas Moestl /* 17242c1b001SThomas Moestl * Control data structures. 17342c1b001SThomas Moestl */ 17442c1b001SThomas Moestl struct gem_control_data *sc_control_data; 17542c1b001SThomas Moestl #define sc_txdescs sc_control_data->gcd_txdescs 17642c1b001SThomas Moestl #define sc_rxdescs sc_control_data->gcd_rxdescs 17742c1b001SThomas Moestl 17842c1b001SThomas Moestl int sc_txfree; /* number of free Tx descriptors */ 17942c1b001SThomas Moestl int sc_txnext; /* next ready Tx descriptor */ 18042c1b001SThomas Moestl 18142c1b001SThomas Moestl struct gem_txsq sc_txfreeq; /* free Tx descsofts */ 18242c1b001SThomas Moestl struct gem_txsq sc_txdirtyq; /* dirty Tx descsofts */ 18342c1b001SThomas Moestl 18442c1b001SThomas Moestl int sc_rxptr; /* next ready RX descriptor/descsoft */ 18542c1b001SThomas Moestl 18642c1b001SThomas Moestl /* ========== */ 18742c1b001SThomas Moestl int sc_inited; 18842c1b001SThomas Moestl int sc_debug; 18942c1b001SThomas Moestl int sc_flags; 19042c1b001SThomas Moestl 19142c1b001SThomas Moestl /* Special hardware hooks */ 192e51a25f8SAlfred Perlstein void (*sc_hwreset)(struct gem_softc *); 193e51a25f8SAlfred Perlstein void (*sc_hwinit)(struct gem_softc *); 19442c1b001SThomas Moestl }; 19542c1b001SThomas Moestl 19642c1b001SThomas Moestl #define GEM_DMA_READ(sc, v) (((sc)->sc_pci) ? le64toh(v) : be64toh(v)) 19742c1b001SThomas Moestl #define GEM_DMA_WRITE(sc, v) (((sc)->sc_pci) ? htole64(v) : htobe64(v)) 19842c1b001SThomas Moestl 19942c1b001SThomas Moestl #define GEM_CDTXADDR(sc, x) ((sc)->sc_cddma + GEM_CDTXOFF((x))) 20042c1b001SThomas Moestl #define GEM_CDRXADDR(sc, x) ((sc)->sc_cddma + GEM_CDRXOFF((x))) 20142c1b001SThomas Moestl 20242c1b001SThomas Moestl #define GEM_CDSPADDR(sc) ((sc)->sc_cddma + GEM_CDSPOFF) 20342c1b001SThomas Moestl 20442c1b001SThomas Moestl #define GEM_CDTXSYNC(sc, x, n, ops) \ 20542c1b001SThomas Moestl bus_dmamap_sync((sc)->sc_dmatag, (sc)->sc_cddmamap, (ops)); \ 20642c1b001SThomas Moestl 20742c1b001SThomas Moestl #define GEM_CDRXSYNC(sc, x, ops) \ 20842c1b001SThomas Moestl bus_dmamap_sync((sc)->sc_dmatag, (sc)->sc_cddmamap, (ops)) 20942c1b001SThomas Moestl 21042c1b001SThomas Moestl #define GEM_CDSPSYNC(sc, ops) \ 21142c1b001SThomas Moestl bus_dmamap_sync((sc)->sc_dmatag, (sc)->sc_cddmamap, (ops)) 21242c1b001SThomas Moestl 21342c1b001SThomas Moestl #define GEM_INIT_RXDESC(sc, x) \ 21442c1b001SThomas Moestl do { \ 21542c1b001SThomas Moestl struct gem_rxsoft *__rxs = &sc->sc_rxsoft[(x)]; \ 21642c1b001SThomas Moestl struct gem_desc *__rxd = &sc->sc_rxdescs[(x)]; \ 21742c1b001SThomas Moestl struct mbuf *__m = __rxs->rxs_mbuf; \ 21842c1b001SThomas Moestl \ 21942c1b001SThomas Moestl __m->m_data = __m->m_ext.ext_buf; \ 22042c1b001SThomas Moestl __rxd->gd_addr = \ 22142c1b001SThomas Moestl GEM_DMA_WRITE((sc), __rxs->rxs_paddr); \ 22242c1b001SThomas Moestl __rxd->gd_flags = \ 22342c1b001SThomas Moestl GEM_DMA_WRITE((sc), \ 22442c1b001SThomas Moestl (((__m->m_ext.ext_size)<<GEM_RD_BUFSHIFT) \ 22542c1b001SThomas Moestl & GEM_RD_BUFSIZE) | GEM_RD_OWN); \ 22642c1b001SThomas Moestl GEM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \ 22742c1b001SThomas Moestl } while (0) 22842c1b001SThomas Moestl 22942c1b001SThomas Moestl #ifdef _KERNEL 23042c1b001SThomas Moestl extern devclass_t gem_devclass; 23142c1b001SThomas Moestl 232e51a25f8SAlfred Perlstein int gem_attach(struct gem_softc *); 233e51a25f8SAlfred Perlstein int gem_detach(struct gem_softc *); 234e51a25f8SAlfred Perlstein void gem_intr(void *); 23542c1b001SThomas Moestl 236e51a25f8SAlfred Perlstein int gem_mediachange(struct ifnet *); 237e51a25f8SAlfred Perlstein void gem_mediastatus(struct ifnet *, struct ifmediareq *); 23842c1b001SThomas Moestl 239e51a25f8SAlfred Perlstein void gem_reset(struct gem_softc *); 24042c1b001SThomas Moestl 24142c1b001SThomas Moestl /* MII methods & callbacks */ 242e51a25f8SAlfred Perlstein int gem_mii_readreg(device_t, int, int); 243e51a25f8SAlfred Perlstein int gem_mii_writereg(device_t, int, int, int); 244e51a25f8SAlfred Perlstein void gem_mii_statchg(device_t); 24542c1b001SThomas Moestl 24642c1b001SThomas Moestl #endif /* _KERNEL */ 24742c1b001SThomas Moestl 24842c1b001SThomas Moestl 24942c1b001SThomas Moestl #endif 250