1 /* 2 * Copyright (C) 2001 Eduardo Horvath. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * from: NetBSD: gemvar.h,v 1.5 2001/10/18 15:19:22 thorpej Exp 27 * 28 * $FreeBSD$ 29 */ 30 31 #ifndef _IF_GEMVAR_H 32 #define _IF_GEMVAR_H 33 34 35 #include <sys/queue.h> 36 #include <sys/callout.h> 37 38 /* 39 * Misc. definitions for the Sun ``Gem'' Ethernet controller family driver. 40 */ 41 42 /* 43 * Transmit descriptor list size. This is arbitrary, but allocate 44 * enough descriptors for 64 pending transmissions and 16 segments 45 * per packet. 46 */ 47 #define GEM_NTXSEGS 16 48 49 #define GEM_TXQUEUELEN 64 50 #define GEM_NTXDESC (GEM_TXQUEUELEN * GEM_NTXSEGS) 51 #define GEM_NTXDESC_MASK (GEM_NTXDESC - 1) 52 #define GEM_NEXTTX(x) ((x + 1) & GEM_NTXDESC_MASK) 53 54 /* 55 * Receive descriptor list size. We have one Rx buffer per incoming 56 * packet, so this logic is a little simpler. 57 */ 58 #define GEM_NRXDESC 128 59 #define GEM_NRXDESC_MASK (GEM_NRXDESC - 1) 60 #define GEM_NEXTRX(x) ((x + 1) & GEM_NRXDESC_MASK) 61 62 /* 63 * How many ticks to wait until to retry on a RX descriptor that is still owned 64 * by the hardware. 65 */ 66 #define GEM_RXOWN_TICKS (hz / 50) 67 68 /* 69 * Control structures are DMA'd to the GEM chip. We allocate them in 70 * a single clump that maps to a single DMA segment to make several things 71 * easier. 72 */ 73 struct gem_control_data { 74 /* 75 * The transmit descriptors. 76 */ 77 struct gem_desc gcd_txdescs[GEM_NTXDESC]; 78 79 /* 80 * The receive descriptors. 81 */ 82 struct gem_desc gcd_rxdescs[GEM_NRXDESC]; 83 }; 84 85 #define GEM_CDOFF(x) offsetof(struct gem_control_data, x) 86 #define GEM_CDTXOFF(x) GEM_CDOFF(gcd_txdescs[(x)]) 87 #define GEM_CDRXOFF(x) GEM_CDOFF(gcd_rxdescs[(x)]) 88 89 /* 90 * Software state for transmit job mbufs (may be elements of mbuf chains). 91 */ 92 struct gem_txsoft { 93 struct mbuf *txs_mbuf; /* head of our mbuf chain */ 94 bus_dmamap_t txs_dmamap; /* our DMA map */ 95 int txs_firstdesc; /* first descriptor in packet */ 96 int txs_lastdesc; /* last descriptor in packet */ 97 int txs_ndescs; /* number of descriptors */ 98 STAILQ_ENTRY(gem_txsoft) txs_q; 99 }; 100 101 STAILQ_HEAD(gem_txsq, gem_txsoft); 102 103 /* Argument structure for busdma callback */ 104 struct gem_txdma { 105 struct gem_softc *txd_sc; 106 int txd_nexttx; 107 int txd_lasttx; 108 int txd_nsegs; 109 int txd_flags; 110 #define GTXD_FIRST 1 111 #define GTXD_LAST 2 112 int txd_error; 113 }; 114 115 /* Transmit job descriptor */ 116 struct gem_txjob { 117 int txj_nexttx; 118 int txj_lasttx; 119 int txj_nsegs; 120 STAILQ_HEAD(, gem_txsoft) txj_txsq; 121 }; 122 123 /* 124 * Software state for receive jobs. 125 */ 126 struct gem_rxsoft { 127 struct mbuf *rxs_mbuf; /* head of our mbuf chain */ 128 bus_dmamap_t rxs_dmamap; /* our DMA map */ 129 bus_addr_t rxs_paddr; /* physical address of the segment */ 130 }; 131 132 /* 133 * Software state per device. 134 */ 135 struct gem_softc { 136 struct arpcom sc_arpcom; /* arp common data */ 137 device_t sc_miibus; 138 struct mii_data *sc_mii; /* MII media control */ 139 device_t sc_dev; /* generic device information */ 140 struct callout sc_tick_ch; /* tick callout */ 141 struct callout sc_rx_ch; /* delayed rx callout */ 142 143 /* The following bus handles are to be provided by the bus front-end */ 144 bus_space_tag_t sc_bustag; /* bus tag */ 145 bus_dma_tag_t sc_pdmatag; /* parent bus dma tag */ 146 bus_dma_tag_t sc_dmatag; /* bus dma tag */ 147 bus_dma_tag_t sc_cdmatag; /* control data bus dma tag */ 148 bus_dmamap_t sc_dmamap; /* bus dma handle */ 149 bus_space_handle_t sc_h; /* bus space handle for all regs */ 150 151 int sc_phys[2]; /* MII instance -> PHY map */ 152 153 int sc_mif_config; /* Selected MII reg setting */ 154 155 int sc_pci; /* XXXXX -- PCI buses are LE. */ 156 157 /* 158 * Ring buffer DMA stuff. 159 */ 160 bus_dma_segment_t sc_cdseg; /* control data memory */ 161 int sc_cdnseg; /* number of segments */ 162 bus_dmamap_t sc_cddmamap; /* control data DMA map */ 163 bus_addr_t sc_cddma; 164 165 /* 166 * Software state for transmit and receive descriptors. 167 */ 168 struct gem_txsoft sc_txsoft[GEM_TXQUEUELEN]; 169 struct gem_rxsoft sc_rxsoft[GEM_NRXDESC]; 170 171 /* 172 * Control data structures. 173 */ 174 struct gem_control_data *sc_control_data; 175 #define sc_txdescs sc_control_data->gcd_txdescs 176 #define sc_rxdescs sc_control_data->gcd_rxdescs 177 178 int sc_txfree; /* number of free Tx descriptors */ 179 int sc_txnext; /* next ready Tx descriptor */ 180 181 struct gem_txsq sc_txfreeq; /* free Tx descsofts */ 182 struct gem_txsq sc_txdirtyq; /* dirty Tx descsofts */ 183 184 int sc_rxptr; /* next ready RX descriptor/descsoft */ 185 186 /* ========== */ 187 int sc_inited; 188 int sc_debug; 189 int sc_flags; 190 191 /* Special hardware hooks */ 192 void (*sc_hwreset)(struct gem_softc *); 193 void (*sc_hwinit)(struct gem_softc *); 194 }; 195 196 #define GEM_DMA_READ(sc, v) (((sc)->sc_pci) ? le64toh(v) : be64toh(v)) 197 #define GEM_DMA_WRITE(sc, v) (((sc)->sc_pci) ? htole64(v) : htobe64(v)) 198 199 #define GEM_CDTXADDR(sc, x) ((sc)->sc_cddma + GEM_CDTXOFF((x))) 200 #define GEM_CDRXADDR(sc, x) ((sc)->sc_cddma + GEM_CDRXOFF((x))) 201 202 #define GEM_CDSPADDR(sc) ((sc)->sc_cddma + GEM_CDSPOFF) 203 204 #define GEM_CDTXSYNC(sc, x, n, ops) \ 205 bus_dmamap_sync((sc)->sc_dmatag, (sc)->sc_cddmamap, (ops)); \ 206 207 #define GEM_CDRXSYNC(sc, x, ops) \ 208 bus_dmamap_sync((sc)->sc_dmatag, (sc)->sc_cddmamap, (ops)) 209 210 #define GEM_CDSPSYNC(sc, ops) \ 211 bus_dmamap_sync((sc)->sc_dmatag, (sc)->sc_cddmamap, (ops)) 212 213 #define GEM_INIT_RXDESC(sc, x) \ 214 do { \ 215 struct gem_rxsoft *__rxs = &sc->sc_rxsoft[(x)]; \ 216 struct gem_desc *__rxd = &sc->sc_rxdescs[(x)]; \ 217 struct mbuf *__m = __rxs->rxs_mbuf; \ 218 \ 219 __m->m_data = __m->m_ext.ext_buf; \ 220 __rxd->gd_addr = \ 221 GEM_DMA_WRITE((sc), __rxs->rxs_paddr); \ 222 __rxd->gd_flags = \ 223 GEM_DMA_WRITE((sc), \ 224 (((__m->m_ext.ext_size)<<GEM_RD_BUFSHIFT) \ 225 & GEM_RD_BUFSIZE) | GEM_RD_OWN); \ 226 GEM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \ 227 } while (0) 228 229 #ifdef _KERNEL 230 extern devclass_t gem_devclass; 231 232 int gem_attach(struct gem_softc *); 233 int gem_detach(struct gem_softc *); 234 void gem_intr(void *); 235 236 int gem_mediachange(struct ifnet *); 237 void gem_mediastatus(struct ifnet *, struct ifmediareq *); 238 239 void gem_reset(struct gem_softc *); 240 241 /* MII methods & callbacks */ 242 int gem_mii_readreg(device_t, int, int); 243 int gem_mii_writereg(device_t, int, int, int); 244 void gem_mii_statchg(device_t); 245 246 #endif /* _KERNEL */ 247 248 249 #endif 250