1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (C) 2001 Eduardo Horvath. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * from: NetBSD: gemvar.h,v 1.8 2002/05/15 02:36:12 matt Exp 29 */ 30 31 #ifndef _IF_GEMVAR_H 32 #define _IF_GEMVAR_H 33 34 #include <sys/queue.h> 35 #include <sys/callout.h> 36 37 /* 38 * Transmit descriptor ring size - this is arbitrary, but allocate 39 * enough descriptors for 64 pending transmissions and 16 segments 40 * per packet. This limit is not actually enforced (packets with 41 * more segments can be sent, depending on the busdma backend); it 42 * is however used as an estimate for the TX window size. 43 */ 44 #define GEM_NTXSEGS 16 45 46 #define GEM_TXQUEUELEN 64 47 #define GEM_NTXDESC (GEM_TXQUEUELEN * GEM_NTXSEGS) 48 #define GEM_MAXTXFREE (GEM_NTXDESC - 1) 49 #define GEM_NTXDESC_MASK (GEM_NTXDESC - 1) 50 #define GEM_NEXTTX(x) ((x + 1) & GEM_NTXDESC_MASK) 51 52 /* 53 * Receive descriptor ring size - we have one RX buffer per incoming 54 * packet, so this logic is a little simpler. 55 */ 56 #define GEM_NRXDESC 256 57 #define GEM_NRXDESC_MASK (GEM_NRXDESC - 1) 58 #define GEM_NEXTRX(x) ((x + 1) & GEM_NRXDESC_MASK) 59 60 /* 61 * How many ticks to wait until to retry on a RX descriptor that is 62 * still owned by the hardware. 63 */ 64 #define GEM_RXOWN_TICKS (hz / 50) 65 66 /* 67 * Control structures are DMA'd to the chip. We allocate them 68 * in a single clump that maps to a single DMA segment to make 69 * several things easier. 70 */ 71 struct gem_control_data { 72 struct gem_desc gcd_txdescs[GEM_NTXDESC]; /* TX descriptors */ 73 struct gem_desc gcd_rxdescs[GEM_NRXDESC]; /* RX descriptors */ 74 }; 75 76 #define GEM_CDOFF(x) offsetof(struct gem_control_data, x) 77 #define GEM_CDTXOFF(x) GEM_CDOFF(gcd_txdescs[(x)]) 78 #define GEM_CDRXOFF(x) GEM_CDOFF(gcd_rxdescs[(x)]) 79 80 /* 81 * software state for transmit job mbufs (may be elements of mbuf chains) 82 */ 83 struct gem_txsoft { 84 struct mbuf *txs_mbuf; /* head of our mbuf chain */ 85 bus_dmamap_t txs_dmamap; /* our DMA map */ 86 u_int txs_firstdesc; /* first descriptor in packet */ 87 u_int txs_lastdesc; /* last descriptor in packet */ 88 u_int txs_ndescs; /* number of descriptors */ 89 STAILQ_ENTRY(gem_txsoft) txs_q; 90 }; 91 92 STAILQ_HEAD(gem_txsq, gem_txsoft); 93 94 /* 95 * software state for receive jobs 96 */ 97 struct gem_rxsoft { 98 struct mbuf *rxs_mbuf; /* head of our mbuf chain */ 99 bus_dmamap_t rxs_dmamap; /* our DMA map */ 100 bus_addr_t rxs_paddr; /* physical address of the segment */ 101 }; 102 103 /* 104 * software state per device 105 */ 106 struct gem_softc { 107 if_t sc_ifp; 108 struct mtx sc_mtx; 109 device_t sc_miibus; 110 struct mii_data *sc_mii; /* MII media control */ 111 device_t sc_dev; /* generic device information */ 112 u_char sc_enaddr[ETHER_ADDR_LEN]; 113 struct callout sc_tick_ch; /* tick callout */ 114 struct callout sc_rx_ch; /* delayed RX callout */ 115 u_int sc_wdog_timer; /* watchdog timer */ 116 117 void *sc_ih; 118 struct resource *sc_res[2]; 119 #define GEM_RES_INTR 0 120 #define GEM_RES_MEM 1 121 122 bus_dma_tag_t sc_pdmatag; /* parent bus DMA tag */ 123 bus_dma_tag_t sc_rdmatag; /* RX bus DMA tag */ 124 bus_dma_tag_t sc_tdmatag; /* TX bus DMA tag */ 125 bus_dma_tag_t sc_cdmatag; /* control data bus DMA tag */ 126 bus_dmamap_t sc_dmamap; /* bus DMA handle */ 127 128 u_int sc_variant; 129 #define GEM_UNKNOWN 0 /* don't know */ 130 #define GEM_SUN_GEM 1 /* Sun GEM */ 131 #define GEM_APPLE_GMAC 2 /* Apple GMAC */ 132 #define GEM_APPLE_K2_GMAC 3 /* Apple K2 GMAC */ 133 134 #define GEM_IS_APPLE(sc) \ 135 ((sc)->sc_variant == GEM_APPLE_GMAC || \ 136 (sc)->sc_variant == GEM_APPLE_K2_GMAC) 137 138 u_int sc_flags; 139 #define GEM_INITED (1 << 0) /* reset persistent regs init'ed */ 140 #define GEM_LINK (1 << 1) /* link is up */ 141 #define GEM_PCI66 (1 << 2) /* PCI bus runs at 66MHz */ 142 #define GEM_SERDES (1 << 3) /* use the SERDES */ 143 144 /* 145 * ring buffer DMA stuff 146 */ 147 bus_dmamap_t sc_cddmamap; /* control data DMA map */ 148 bus_addr_t sc_cddma; 149 150 /* 151 * software state for transmit and receive descriptors 152 */ 153 struct gem_txsoft sc_txsoft[GEM_TXQUEUELEN]; 154 struct gem_rxsoft sc_rxsoft[GEM_NRXDESC]; 155 156 /* 157 * control data structures 158 */ 159 struct gem_control_data *sc_control_data; 160 #define sc_txdescs sc_control_data->gcd_txdescs 161 #define sc_rxdescs sc_control_data->gcd_rxdescs 162 163 u_int sc_txfree; /* number of free TX descriptors */ 164 u_int sc_txnext; /* next ready TX descriptor */ 165 u_int sc_txwin; /* TX desc. since last TX intr. */ 166 167 struct gem_txsq sc_txfreeq; /* free TX descsofts */ 168 struct gem_txsq sc_txdirtyq; /* dirty TX descsofts */ 169 170 u_int sc_rxptr; /* next ready RX descriptor/state */ 171 u_int sc_rxfifosize; /* RX FIFO size (bytes) */ 172 173 uint32_t sc_mac_rxcfg; /* RX MAC conf. % GEM_MAC_RX_ENABLE */ 174 175 int sc_ifflags; 176 u_long sc_csum_features; 177 }; 178 179 #define GEM_BARRIER(sc, offs, len, flags) \ 180 bus_barrier((sc)->sc_res[GEM_RES_MEM], (offs), (len), (flags)) 181 182 #define GEM_READ_N(n, sc, offs) \ 183 bus_read_ ## n((sc)->sc_res[GEM_RES_MEM], (offs)) 184 #define GEM_READ_1(sc, offs) \ 185 GEM_READ_N(1, (sc), (offs)) 186 #define GEM_READ_2(sc, offs) \ 187 GEM_READ_N(2, (sc), (offs)) 188 #define GEM_READ_4(sc, offs) \ 189 GEM_READ_N(4, (sc), (offs)) 190 #define GEM_READ_1(sc, offs) \ 191 GEM_READ_N(1, (sc), (offs)) 192 #define GEM_READ_2(sc, offs) \ 193 GEM_READ_N(2, (sc), (offs)) 194 #define GEM_READ_4(sc, offs) \ 195 GEM_READ_N(4, (sc), (offs)) 196 197 #define GEM_WRITE_N(n, sc, offs, v) \ 198 bus_write_ ## n((sc)->sc_res[GEM_RES_MEM], (offs), (v)) 199 #define GEM_WRITE_1(sc, offs, v) \ 200 GEM_WRITE_N(1, (sc), (offs), (v)) 201 #define GEM_WRITE_2(sc, offs, v) \ 202 GEM_WRITE_N(2, (sc), (offs), (v)) 203 #define GEM_WRITE_4(sc, offs, v) \ 204 GEM_WRITE_N(4, (sc), (offs), (v)) 205 #define GEM_WRITE_1(sc, offs, v) \ 206 GEM_WRITE_N(1, (sc), (offs), (v)) 207 #define GEM_WRITE_2(sc, offs, v) \ 208 GEM_WRITE_N(2, (sc), (offs), (v)) 209 #define GEM_WRITE_4(sc, offs, v) \ 210 GEM_WRITE_N(4, (sc), (offs), (v)) 211 212 #define GEM_CDTXADDR(sc, x) ((sc)->sc_cddma + GEM_CDTXOFF((x))) 213 #define GEM_CDRXADDR(sc, x) ((sc)->sc_cddma + GEM_CDRXOFF((x))) 214 215 #define GEM_CDSYNC(sc, ops) \ 216 bus_dmamap_sync((sc)->sc_cdmatag, (sc)->sc_cddmamap, (ops)); 217 218 #define GEM_INIT_RXDESC(sc, x) \ 219 do { \ 220 struct gem_rxsoft *__rxs = &sc->sc_rxsoft[(x)]; \ 221 struct gem_desc *__rxd = &sc->sc_rxdescs[(x)]; \ 222 struct mbuf *__m = __rxs->rxs_mbuf; \ 223 \ 224 __m->m_data = __m->m_ext.ext_buf; \ 225 __rxd->gd_addr = htole64(__rxs->rxs_paddr); \ 226 __rxd->gd_flags = htole64((((__m->m_ext.ext_size) << \ 227 GEM_RD_BUFSHIFT) & GEM_RD_BUFSIZE) | GEM_RD_OWN); \ 228 } while (0) 229 230 #define GEM_UPDATE_RXDESC(sc, x) \ 231 do { \ 232 struct gem_rxsoft *__rxs = &sc->sc_rxsoft[(x)]; \ 233 struct gem_desc *__rxd = &sc->sc_rxdescs[(x)]; \ 234 struct mbuf *__m = __rxs->rxs_mbuf; \ 235 \ 236 __rxd->gd_flags = htole64((((__m->m_ext.ext_size) << \ 237 GEM_RD_BUFSHIFT) & GEM_RD_BUFSIZE) | GEM_RD_OWN); \ 238 } while (0) 239 240 #define GEM_LOCK_INIT(_sc, _name) \ 241 mtx_init(&(_sc)->sc_mtx, _name, MTX_NETWORK_LOCK, MTX_DEF) 242 #define GEM_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx) 243 #define GEM_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx) 244 #define GEM_LOCK_ASSERT(_sc, _what) mtx_assert(&(_sc)->sc_mtx, (_what)) 245 #define GEM_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->sc_mtx) 246 247 #ifdef _KERNEL 248 int gem_attach(struct gem_softc *sc); 249 void gem_detach(struct gem_softc *sc); 250 void gem_intr(void *v); 251 void gem_resume(struct gem_softc *sc); 252 void gem_suspend(struct gem_softc *sc); 253 254 int gem_mediachange(if_t ifp); 255 void gem_mediastatus(if_t ifp, struct ifmediareq *ifmr); 256 257 /* MII methods & callbacks */ 258 int gem_mii_readreg(device_t dev, int phy, int reg); 259 void gem_mii_statchg(device_t dev); 260 int gem_mii_writereg(device_t dev, int phy, int reg, int val); 261 262 #endif /* _KERNEL */ 263 264 #endif 265