1 /* $OpenBSD: if_rtwn.c,v 1.6 2015/08/28 00:03:53 deraadt Exp $ */ 2 3 /*- 4 * Copyright (c) 2010 Damien Bergamini <damien.bergamini@free.fr> 5 * Copyright (c) 2015 Stefan Sperling <stsp@openbsd.org> 6 * Copyright (c) 2016 Andriy Voskoboinyk <avos@FreeBSD.org> 7 * 8 * Permission to use, copy, modify, and distribute this software for any 9 * purpose with or without fee is hereby granted, provided that the above 10 * copyright notice and this permission notice appear in all copies. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 */ 20 21 #include <sys/cdefs.h> 22 __FBSDID("$FreeBSD$"); 23 24 #include "opt_wlan.h" 25 26 #include <sys/param.h> 27 #include <sys/lock.h> 28 #include <sys/mutex.h> 29 #include <sys/mbuf.h> 30 #include <sys/kernel.h> 31 #include <sys/socket.h> 32 #include <sys/systm.h> 33 #include <sys/malloc.h> 34 #include <sys/queue.h> 35 #include <sys/taskqueue.h> 36 #include <sys/bus.h> 37 #include <sys/endian.h> 38 39 #include <machine/bus.h> 40 #include <machine/resource.h> 41 #include <sys/rman.h> 42 43 #include <net/if.h> 44 #include <net/ethernet.h> 45 #include <net/if_media.h> 46 47 #include <net80211/ieee80211_var.h> 48 49 #include <dev/rtwn/if_rtwnreg.h> 50 #include <dev/rtwn/if_rtwnvar.h> 51 #include <dev/rtwn/if_rtwn_debug.h> 52 #include <dev/rtwn/if_rtwn_rx.h> 53 #include <dev/rtwn/if_rtwn_task.h> 54 #include <dev/rtwn/if_rtwn_tx.h> 55 56 #include <dev/rtwn/pci/rtwn_pci_var.h> 57 #include <dev/rtwn/pci/rtwn_pci_rx.h> 58 59 60 void 61 rtwn_pci_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, 62 int error) 63 { 64 65 if (error != 0) 66 return; 67 KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs)); 68 *(bus_addr_t *)arg = segs[0].ds_addr; 69 } 70 71 void 72 rtwn_pci_setup_rx_desc(struct rtwn_pci_softc *pc, 73 struct rtwn_rx_stat_pci *desc, bus_addr_t addr, size_t len, int idx) 74 { 75 76 memset(desc, 0, sizeof(*desc)); 77 desc->rxdw0 = htole32(SM(RTWN_RXDW0_PKTLEN, len) | 78 ((idx == RTWN_PCI_RX_LIST_COUNT - 1) ? RTWN_RXDW0_EOR : 0)); 79 desc->rxbufaddr = htole32(addr); 80 bus_space_barrier(pc->pc_st, pc->pc_sh, 0, pc->pc_mapsize, 81 BUS_SPACE_BARRIER_WRITE); 82 desc->rxdw0 |= htole32(RTWN_RXDW0_OWN); 83 } 84 85 static void 86 rtwn_pci_rx_frame(struct rtwn_softc *sc, struct rtwn_rx_stat_pci *rx_desc, 87 int desc_idx) 88 { 89 struct rtwn_pci_softc *pc = RTWN_PCI_SOFTC(sc); 90 struct rtwn_rx_ring *ring = &pc->rx_ring; 91 struct rtwn_rx_data *rx_data = &ring->rx_data[desc_idx]; 92 struct ieee80211com *ic = &sc->sc_ic; 93 struct ieee80211_node *ni; 94 uint32_t rxdw0; 95 struct mbuf *m, *m1; 96 int infosz, pktlen, shift, error; 97 98 /* Dump Rx descriptor. */ 99 RTWN_DPRINTF(sc, RTWN_DEBUG_RECV_DESC, 100 "%s: dw: 0 %08X, 1 %08X, 2 %08X, 3 %08X, 4 %08X, tsfl %08X, " 101 "addr: %08X (64: %08X)\n", 102 __func__, le32toh(rx_desc->rxdw0), le32toh(rx_desc->rxdw1), 103 le32toh(rx_desc->rxdw2), le32toh(rx_desc->rxdw3), 104 le32toh(rx_desc->rxdw4), le32toh(rx_desc->tsf_low), 105 le32toh(rx_desc->rxbufaddr), le32toh(rx_desc->rxbufaddr64)); 106 107 rxdw0 = le32toh(rx_desc->rxdw0); 108 if (__predict_false(rxdw0 & (RTWN_RXDW0_CRCERR | RTWN_RXDW0_ICVERR))) { 109 /* 110 * This should not happen since we setup our Rx filter 111 * to not receive these frames. 112 */ 113 RTWN_DPRINTF(sc, RTWN_DEBUG_RECV, 114 "%s: RX flags error (%s)\n", __func__, 115 rxdw0 & RTWN_RXDW0_CRCERR ? "CRC" : "ICV"); 116 goto fail; 117 } 118 119 pktlen = MS(rxdw0, RTWN_RXDW0_PKTLEN); 120 if (__predict_false(pktlen < sizeof(struct ieee80211_frame_ack) || 121 pktlen > MJUMPAGESIZE)) { 122 RTWN_DPRINTF(sc, RTWN_DEBUG_RECV, 123 "%s: frame is too short/long: %d\n", __func__, pktlen); 124 goto fail; 125 } 126 127 infosz = MS(rxdw0, RTWN_RXDW0_INFOSZ) * 8; 128 shift = MS(rxdw0, RTWN_RXDW0_SHIFT); 129 130 m1 = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); 131 if (__predict_false(m1 == NULL)) { 132 device_printf(sc->sc_dev, "%s: could not allocate RX mbuf\n", 133 __func__); 134 goto fail; 135 } 136 bus_dmamap_sync(ring->data_dmat, rx_data->map, BUS_DMASYNC_POSTREAD); 137 bus_dmamap_unload(ring->data_dmat, rx_data->map); 138 139 error = bus_dmamap_load(ring->data_dmat, rx_data->map, mtod(m1, void *), 140 MJUMPAGESIZE, rtwn_pci_dma_map_addr, &rx_data->paddr, 0); 141 if (error != 0) { 142 m_freem(m1); 143 144 error = bus_dmamap_load(ring->data_dmat, rx_data->map, 145 mtod(rx_data->m, void *), MJUMPAGESIZE, 146 rtwn_pci_dma_map_addr, &rx_data->paddr, BUS_DMA_NOWAIT); 147 if (error != 0) 148 panic("%s: could not load old RX mbuf", 149 device_get_name(sc->sc_dev)); 150 151 /* Physical address may have changed. */ 152 rtwn_pci_setup_rx_desc(pc, rx_desc, rx_data->paddr, 153 MJUMPAGESIZE, desc_idx); 154 goto fail; 155 } 156 157 /* Finalize mbuf. */ 158 m = rx_data->m; 159 rx_data->m = m1; 160 m->m_pkthdr.len = m->m_len = pktlen + infosz + shift; 161 162 ni = rtwn_rx_common(sc, m, rx_desc); 163 164 RTWN_DPRINTF(sc, RTWN_DEBUG_RECV, 165 "%s: Rx frame len %d, infosz %d, shift %d\n", 166 __func__, pktlen, infosz, shift); 167 168 /* Update RX descriptor. */ 169 rtwn_pci_setup_rx_desc(pc, rx_desc, rx_data->paddr, MJUMPAGESIZE, 170 desc_idx); 171 172 /* Send the frame to the 802.11 layer. */ 173 RTWN_UNLOCK(sc); 174 if (ni != NULL) { 175 (void)ieee80211_input_mimo(ni, m); 176 /* Node is no longer needed. */ 177 ieee80211_free_node(ni); 178 } else 179 (void)ieee80211_input_mimo_all(ic, m); 180 181 RTWN_LOCK(sc); 182 183 return; 184 185 fail: 186 counter_u64_add(ic->ic_ierrors, 1); 187 } 188 189 static void 190 rtwn_pci_tx_done(struct rtwn_softc *sc, int qid) 191 { 192 struct rtwn_pci_softc *pc = RTWN_PCI_SOFTC(sc); 193 struct rtwn_tx_ring *ring = &pc->tx_ring[qid]; 194 struct rtwn_tx_desc_common *desc; 195 struct rtwn_tx_data *data; 196 197 RTWN_DPRINTF(sc, RTWN_DEBUG_INTR, "%s: qid %d, last %d, cur %d\n", 198 __func__, qid, ring->last, ring->cur); 199 200 bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_POSTREAD); 201 202 while(ring->last != ring->cur) { 203 data = &ring->tx_data[ring->last]; 204 desc = (struct rtwn_tx_desc_common *) 205 ((uint8_t *)ring->desc + sc->txdesc_len * ring->last); 206 207 KASSERT(data->m != NULL, ("no mbuf")); 208 209 if (desc->flags0 & RTWN_FLAGS0_OWN) 210 break; 211 212 /* Unmap and free mbuf. */ 213 bus_dmamap_sync(ring->data_dmat, data->map, 214 BUS_DMASYNC_POSTWRITE); 215 bus_dmamap_unload(ring->data_dmat, data->map); 216 217 if (data->ni != NULL) { /* not a beacon frame */ 218 ieee80211_tx_complete(data->ni, data->m, 0); 219 220 data->ni = NULL; 221 ring->queued--; 222 KASSERT(ring->queued >= 0, 223 ("ring->queued (qid %d) underflow!\n", qid)); 224 } else 225 m_freem(data->m); 226 227 data->m = NULL; 228 ring->last = (ring->last + 1) % RTWN_PCI_TX_LIST_COUNT; 229 #ifndef D4054 230 if (ring->queued > 0) 231 sc->sc_tx_timer = 5; 232 else 233 sc->sc_tx_timer = 0; 234 #endif 235 } 236 237 if ((sc->qfullmsk & (1 << qid)) != 0 && 238 ring->queued < (RTWN_PCI_TX_LIST_COUNT - 1)) { 239 sc->qfullmsk &= ~(1 << qid); 240 rtwn_start(sc); 241 } 242 243 #ifdef IEEE80211_SUPPORT_SUPERG 244 /* 245 * If the TX active queue drops below a certain 246 * threshold, ensure we age fast-frames out so they're 247 * transmitted. 248 */ 249 if (sc->sc_ratectl != RTWN_RATECTL_NET80211 && ring->queued <= 1) { 250 /* 251 * XXX TODO: just make this a callout timer schedule 252 * so we can flush the FF staging queue if we're 253 * approaching idle. 254 */ 255 rtwn_cmd_sleepable(sc, NULL, 0, rtwn_ff_flush_all); 256 } 257 #endif 258 } 259 260 static void 261 rtwn_pci_rx_done(struct rtwn_softc *sc) 262 { 263 struct rtwn_pci_softc *pc = RTWN_PCI_SOFTC(sc); 264 struct rtwn_rx_ring *ring = &pc->rx_ring; 265 266 bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_POSTREAD); 267 268 for (;;) { 269 struct rtwn_rx_stat_pci *rx_desc = &ring->desc[ring->cur]; 270 271 if (le32toh(rx_desc->rxdw0) & RTWN_RXDW0_OWN) 272 break; 273 274 rtwn_pci_rx_frame(sc, rx_desc, ring->cur); 275 276 if (!(sc->sc_flags & RTWN_RUNNING)) 277 return; 278 279 ring->cur = (ring->cur + 1) % RTWN_PCI_RX_LIST_COUNT; 280 } 281 } 282 283 void 284 rtwn_pci_intr(void *arg) 285 { 286 struct rtwn_softc *sc = arg; 287 struct rtwn_pci_softc *pc = RTWN_PCI_SOFTC(sc); 288 int i, status, tx_rings; 289 290 RTWN_LOCK(sc); 291 status = rtwn_classify_intr(sc, &tx_rings, 0); 292 RTWN_DPRINTF(sc, RTWN_DEBUG_INTR, "%s: status %08X, tx_rings %08X\n", 293 __func__, status, tx_rings); 294 if (status == 0 && tx_rings == 0) 295 goto unlock; 296 297 if (status & RTWN_PCI_INTR_RX) { 298 rtwn_pci_rx_done(sc); 299 if (!(sc->sc_flags & RTWN_RUNNING)) 300 goto unlock; 301 } 302 303 if (tx_rings != 0) 304 for (i = 0; i < RTWN_PCI_NTXQUEUES; i++) 305 if (tx_rings & (1 << i)) 306 rtwn_pci_tx_done(sc, i); 307 308 if (sc->sc_flags & RTWN_RUNNING) 309 rtwn_pci_enable_intr(pc); 310 unlock: 311 RTWN_UNLOCK(sc); 312 } 313