1 /*- 2 * Copyright (C) 2001 Eduardo Horvath. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * from: NetBSD: gemvar.h,v 1.8 2002/05/15 02:36:12 matt Exp 27 * 28 * $FreeBSD$ 29 */ 30 31 #ifndef _IF_GEMVAR_H 32 #define _IF_GEMVAR_H 33 34 #include <sys/queue.h> 35 #include <sys/callout.h> 36 37 /* 38 * Transmit descriptor list size. This is arbitrary, but allocate 39 * enough descriptors for 64 pending transmissions and 16 segments 40 * per packet. This limit is not actually enforced (packets with 41 * more segments can be sent, depending on the busdma backend); it 42 * is however used as an estimate for the TX window size. 43 */ 44 #define GEM_NTXSEGS 16 45 46 #define GEM_TXQUEUELEN 64 47 #define GEM_NTXDESC (GEM_TXQUEUELEN * GEM_NTXSEGS) 48 #define GEM_MAXTXFREE (GEM_NTXDESC - 1) 49 #define GEM_NTXDESC_MASK (GEM_NTXDESC - 1) 50 #define GEM_NEXTTX(x) ((x + 1) & GEM_NTXDESC_MASK) 51 52 /* 53 * Receive descriptor list size. We have one RX buffer per incoming 54 * packet, so this logic is a little simpler. 55 */ 56 #define GEM_NRXDESC 256 57 #define GEM_NRXDESC_MASK (GEM_NRXDESC - 1) 58 #define GEM_NEXTRX(x) ((x + 1) & GEM_NRXDESC_MASK) 59 60 /* 61 * How many ticks to wait until to retry on a RX descriptor that is 62 * still owned by the hardware. 63 */ 64 #define GEM_RXOWN_TICKS (hz / 50) 65 66 /* 67 * Control structures are DMA'd to the GEM chip. We allocate them 68 * in a single clump that maps to a single DMA segment to make 69 * several things easier. 70 */ 71 struct gem_control_data { 72 struct gem_desc gcd_txdescs[GEM_NTXDESC]; /* TX descriptors */ 73 struct gem_desc gcd_rxdescs[GEM_NRXDESC]; /* RX descriptors */ 74 }; 75 76 #define GEM_CDOFF(x) offsetof(struct gem_control_data, x) 77 #define GEM_CDTXOFF(x) GEM_CDOFF(gcd_txdescs[(x)]) 78 #define GEM_CDRXOFF(x) GEM_CDOFF(gcd_rxdescs[(x)]) 79 80 /* 81 * software state for transmit job mbufs (may be elements of mbuf chains) 82 */ 83 struct gem_txsoft { 84 struct mbuf *txs_mbuf; /* head of our mbuf chain */ 85 bus_dmamap_t txs_dmamap; /* our DMA map */ 86 int txs_firstdesc; /* first descriptor in packet */ 87 int txs_lastdesc; /* last descriptor in packet */ 88 int txs_ndescs; /* number of descriptors */ 89 STAILQ_ENTRY(gem_txsoft) txs_q; 90 }; 91 92 STAILQ_HEAD(gem_txsq, gem_txsoft); 93 94 /* 95 * software state for receive jobs 96 */ 97 struct gem_rxsoft { 98 struct mbuf *rxs_mbuf; /* head of our mbuf chain */ 99 bus_dmamap_t rxs_dmamap; /* our DMA map */ 100 bus_addr_t rxs_paddr; /* physical address of the segment */ 101 }; 102 103 /* 104 * software state per device 105 */ 106 struct gem_softc { 107 struct ifnet *sc_ifp; 108 struct mtx sc_mtx; 109 device_t sc_miibus; 110 struct mii_data *sc_mii; /* MII media control */ 111 device_t sc_dev; /* generic device information */ 112 u_char sc_enaddr[ETHER_ADDR_LEN]; 113 struct callout sc_tick_ch; /* tick callout */ 114 struct callout sc_rx_ch; /* delayed RX callout */ 115 int sc_wdog_timer; /* watchdog timer */ 116 117 void *sc_ih; 118 struct resource *sc_res[2]; 119 bus_dma_tag_t sc_pdmatag; /* parent bus DMA tag */ 120 bus_dma_tag_t sc_rdmatag; /* RX bus DMA tag */ 121 bus_dma_tag_t sc_tdmatag; /* TX bus DMA tag */ 122 bus_dma_tag_t sc_cdmatag; /* control data bus DMA tag */ 123 bus_dmamap_t sc_dmamap; /* bus DMA handle */ 124 125 int sc_phyad; /* PHY to use or -1 for any */ 126 127 u_int sc_variant; 128 #define GEM_UNKNOWN 0 /* don't know */ 129 #define GEM_SUN_GEM 1 /* Sun GEM */ 130 #define GEM_SUN_ERI 2 /* Sun ERI */ 131 #define GEM_APPLE_GMAC 3 /* Apple GMAC */ 132 #define GEM_APPLE_K2_GMAC 4 /* Apple K2 GMAC */ 133 134 #define GEM_IS_APPLE(sc) \ 135 ((sc)->sc_variant == GEM_APPLE_GMAC || \ 136 (sc)->sc_variant == GEM_APPLE_K2_GMAC) 137 138 u_int sc_flags; 139 #define GEM_INITED (1 << 0) /* reset persistent regs init'ed */ 140 #define GEM_LINK (1 << 1) /* link is up */ 141 #define GEM_PCI (1 << 2) /* PCI busses are little-endian */ 142 #define GEM_SERDES (1 << 3) /* use the SERDES */ 143 144 /* 145 * ring buffer DMA stuff 146 */ 147 bus_dma_segment_t sc_cdseg; /* control data memory */ 148 int sc_cdnseg; /* number of segments */ 149 bus_dmamap_t sc_cddmamap; /* control data DMA map */ 150 bus_addr_t sc_cddma; 151 152 /* 153 * software state for transmit and receive descriptors 154 */ 155 struct gem_txsoft sc_txsoft[GEM_TXQUEUELEN]; 156 struct gem_rxsoft sc_rxsoft[GEM_NRXDESC]; 157 158 /* 159 * control data structures 160 */ 161 struct gem_control_data *sc_control_data; 162 #define sc_txdescs sc_control_data->gcd_txdescs 163 #define sc_rxdescs sc_control_data->gcd_rxdescs 164 165 int sc_txfree; /* number of free TX descriptors */ 166 int sc_txnext; /* next ready TX descriptor */ 167 int sc_txwin; /* TX desc. since last TX intr. */ 168 169 struct gem_txsq sc_txfreeq; /* free TX descsofts */ 170 struct gem_txsq sc_txdirtyq; /* dirty TX descsofts */ 171 172 int sc_rxptr; /* next ready RX desc./descsoft */ 173 int sc_rxfifosize; /* RX FIFO size (bytes) */ 174 175 int sc_ifflags; 176 int sc_csum_features; 177 }; 178 179 /* XXX this should be handled by bus_dma(9). */ 180 #define GEM_DMA_READ(sc, v) \ 181 ((((sc)->sc_flags & GEM_PCI) != 0) ? le64toh(v) : be64toh(v)) 182 #define GEM_DMA_WRITE(sc, v) \ 183 ((((sc)->sc_flags & GEM_PCI) != 0) ? htole64(v) : htobe64(v)) 184 185 #define GEM_CDTXADDR(sc, x) ((sc)->sc_cddma + GEM_CDTXOFF((x))) 186 #define GEM_CDRXADDR(sc, x) ((sc)->sc_cddma + GEM_CDRXOFF((x))) 187 188 #define GEM_CDSYNC(sc, ops) \ 189 bus_dmamap_sync((sc)->sc_cdmatag, (sc)->sc_cddmamap, (ops)); 190 191 #define GEM_INIT_RXDESC(sc, x) \ 192 do { \ 193 struct gem_rxsoft *__rxs = &sc->sc_rxsoft[(x)]; \ 194 struct gem_desc *__rxd = &sc->sc_rxdescs[(x)]; \ 195 struct mbuf *__m = __rxs->rxs_mbuf; \ 196 \ 197 __m->m_data = __m->m_ext.ext_buf; \ 198 __rxd->gd_addr = \ 199 GEM_DMA_WRITE((sc), __rxs->rxs_paddr); \ 200 __rxd->gd_flags = \ 201 GEM_DMA_WRITE((sc), \ 202 (((__m->m_ext.ext_size) << GEM_RD_BUFSHIFT) \ 203 & GEM_RD_BUFSIZE) | GEM_RD_OWN); \ 204 } while (0) 205 206 #define GEM_UPDATE_RXDESC(sc, x) \ 207 do { \ 208 struct gem_rxsoft *__rxs = &sc->sc_rxsoft[(x)]; \ 209 struct gem_desc *__rxd = &sc->sc_rxdescs[(x)]; \ 210 struct mbuf *__m = __rxs->rxs_mbuf; \ 211 \ 212 __rxd->gd_flags = \ 213 GEM_DMA_WRITE((sc), \ 214 (((__m->m_ext.ext_size) << GEM_RD_BUFSHIFT) \ 215 & GEM_RD_BUFSIZE) | GEM_RD_OWN); \ 216 } while (0) 217 218 #define GEM_LOCK_INIT(_sc, _name) \ 219 mtx_init(&(_sc)->sc_mtx, _name, MTX_NETWORK_LOCK, MTX_DEF) 220 #define GEM_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx) 221 #define GEM_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx) 222 #define GEM_LOCK_ASSERT(_sc, _what) mtx_assert(&(_sc)->sc_mtx, (_what)) 223 #define GEM_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->sc_mtx) 224 225 #ifdef _KERNEL 226 extern devclass_t gem_devclass; 227 228 int gem_attach(struct gem_softc *sc); 229 void gem_detach(struct gem_softc *sc); 230 void gem_intr(void *v); 231 void gem_resume(struct gem_softc *sc); 232 void gem_suspend(struct gem_softc *sc); 233 234 int gem_mediachange(struct ifnet *ifp); 235 void gem_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr); 236 237 /* MII methods & callbacks */ 238 int gem_mii_readreg(device_t dev, int phy, int reg); 239 void gem_mii_statchg(device_t dev); 240 int gem_mii_writereg(device_t dev, int phy, int reg, int val); 241 242 #endif /* _KERNEL */ 243 244 #endif 245