xref: /freebsd/sys/dev/rtwn/pci/rtwn_pci_rx.c (revision 22cf89c938886d14f5796fc49f9f020c23ea8eaf)
1 /*	$OpenBSD: if_rtwn.c,v 1.6 2015/08/28 00:03:53 deraadt Exp $	*/
2 
3 /*-
4  * Copyright (c) 2010 Damien Bergamini <damien.bergamini@free.fr>
5  * Copyright (c) 2015 Stefan Sperling <stsp@openbsd.org>
6  * Copyright (c) 2016 Andriy Voskoboinyk <avos@FreeBSD.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 #include <sys/cdefs.h>
22 #include "opt_wlan.h"
23 
24 #include <sys/param.h>
25 #include <sys/lock.h>
26 #include <sys/mutex.h>
27 #include <sys/mbuf.h>
28 #include <sys/kernel.h>
29 #include <sys/socket.h>
30 #include <sys/systm.h>
31 #include <sys/malloc.h>
32 #include <sys/queue.h>
33 #include <sys/taskqueue.h>
34 #include <sys/bus.h>
35 #include <sys/endian.h>
36 #include <sys/epoch.h>
37 
38 #include <machine/bus.h>
39 #include <machine/resource.h>
40 #include <sys/rman.h>
41 
42 #include <net/if.h>
43 #include <net/ethernet.h>
44 #include <net/if_media.h>
45 
46 #include <net80211/ieee80211_var.h>
47 
48 #include <dev/rtwn/if_rtwnreg.h>
49 #include <dev/rtwn/if_rtwnvar.h>
50 #include <dev/rtwn/if_rtwn_debug.h>
51 #include <dev/rtwn/if_rtwn_rx.h>
52 #include <dev/rtwn/if_rtwn_task.h>
53 #include <dev/rtwn/if_rtwn_tx.h>
54 
55 #include <dev/rtwn/pci/rtwn_pci_var.h>
56 #include <dev/rtwn/pci/rtwn_pci_rx.h>
57 
58 void
59 rtwn_pci_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs,
60     int error)
61 {
62 
63 	if (error != 0)
64 		return;
65 	KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
66 	*(bus_addr_t *)arg = segs[0].ds_addr;
67 }
68 
69 void
70 rtwn_pci_setup_rx_desc(struct rtwn_pci_softc *pc,
71     struct rtwn_rx_stat_pci *desc, bus_addr_t addr, size_t len, int idx)
72 {
73 
74 	memset(desc, 0, sizeof(*desc));
75 	desc->rxdw0 = htole32(SM(RTWN_RXDW0_PKTLEN, len) |
76 		((idx == RTWN_PCI_RX_LIST_COUNT - 1) ? RTWN_RXDW0_EOR : 0));
77 	desc->rxbufaddr = htole32(addr);
78 	bus_space_barrier(pc->pc_st, pc->pc_sh, 0, pc->pc_mapsize,
79 	    BUS_SPACE_BARRIER_WRITE);
80 	desc->rxdw0 |= htole32(RTWN_RXDW0_OWN);
81 }
82 
83 static void
84 rtwn_pci_rx_frame(struct rtwn_pci_softc *pc)
85 {
86 	struct epoch_tracker et;
87 	struct rtwn_softc *sc = &pc->pc_sc;
88 	struct rtwn_rx_ring *ring = &pc->rx_ring;
89 	struct rtwn_rx_stat_pci *rx_desc = &ring->desc[ring->cur];
90 	struct rtwn_rx_data *rx_data = &ring->rx_data[ring->cur];
91 	struct ieee80211com *ic = &sc->sc_ic;
92 	struct ieee80211_node *ni;
93 	uint32_t rxdw0;
94 	struct mbuf *m, *m1;
95 	int infosz, pktlen, shift, error;
96 
97 	/* Dump Rx descriptor. */
98 	RTWN_DPRINTF(sc, RTWN_DEBUG_RECV_DESC,
99 	    "%s: dw: 0 %08X, 1 %08X, 2 %08X, 3 %08X, 4 %08X, tsfl %08X, "
100 	    "addr: %08X (64: %08X)\n",
101 	    __func__, le32toh(rx_desc->rxdw0), le32toh(rx_desc->rxdw1),
102 	    le32toh(rx_desc->rxdw2), le32toh(rx_desc->rxdw3),
103 	    le32toh(rx_desc->rxdw4), le32toh(rx_desc->tsf_low),
104 	    le32toh(rx_desc->rxbufaddr), le32toh(rx_desc->rxbufaddr64));
105 
106 	rxdw0 = le32toh(rx_desc->rxdw0);
107 	if (__predict_false(rxdw0 & (RTWN_RXDW0_CRCERR | RTWN_RXDW0_ICVERR))) {
108 		/*
109 		 * This should not happen since we setup our Rx filter
110 		 * to not receive these frames.
111 		 */
112 		RTWN_DPRINTF(sc, RTWN_DEBUG_RECV,
113 		    "%s: RX flags error (%s)\n", __func__,
114 		    rxdw0 & RTWN_RXDW0_CRCERR ? "CRC" : "ICV");
115 		goto fail;
116 	}
117 
118 	pktlen = MS(rxdw0, RTWN_RXDW0_PKTLEN);
119 	if (__predict_false(pktlen < sizeof(struct ieee80211_frame_ack) ||
120 	    pktlen > MJUMPAGESIZE)) {
121 		RTWN_DPRINTF(sc, RTWN_DEBUG_RECV,
122 		    "%s: frame is too short/long: %d\n", __func__, pktlen);
123 		goto fail;
124 	}
125 
126 	infosz = MS(rxdw0, RTWN_RXDW0_INFOSZ) * 8;
127 	shift = MS(rxdw0, RTWN_RXDW0_SHIFT);
128 
129 	m1 = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE);
130 	if (__predict_false(m1 == NULL)) {
131 		device_printf(sc->sc_dev, "%s: could not allocate RX mbuf\n",
132 		    __func__);
133 		goto fail;
134 	}
135 	bus_dmamap_sync(ring->data_dmat, rx_data->map, BUS_DMASYNC_POSTREAD);
136 	bus_dmamap_unload(ring->data_dmat, rx_data->map);
137 
138 	error = bus_dmamap_load(ring->data_dmat, rx_data->map, mtod(m1, void *),
139 	    MJUMPAGESIZE, rtwn_pci_dma_map_addr, &rx_data->paddr, 0);
140 	if (error != 0) {
141 		m_freem(m1);
142 
143 		error = bus_dmamap_load(ring->data_dmat, rx_data->map,
144 		    mtod(rx_data->m, void *), MJUMPAGESIZE,
145 		    rtwn_pci_dma_map_addr, &rx_data->paddr, BUS_DMA_NOWAIT);
146 		if (error != 0)
147 			panic("%s: could not load old RX mbuf",
148 			    device_get_name(sc->sc_dev));
149 
150 		goto fail;
151 	}
152 
153 	/* Finalize mbuf. */
154 	m = rx_data->m;
155 	rx_data->m = m1;
156 	m->m_pkthdr.len = m->m_len = pktlen + infosz + shift;
157 
158 	ni = rtwn_rx_common(sc, m, rx_desc);
159 
160 	RTWN_DPRINTF(sc, RTWN_DEBUG_RECV,
161 	    "%s: Rx frame len %d, infosz %d, shift %d\n",
162 	    __func__, pktlen, infosz, shift);
163 
164 	/* Send the frame to the 802.11 layer. */
165 	RTWN_UNLOCK(sc);
166 
167 	NET_EPOCH_ENTER(et);
168 	if (ni != NULL) {
169 		(void)ieee80211_input_mimo(ni, m);
170 		/* Node is no longer needed. */
171 		ieee80211_free_node(ni);
172 	} else
173 		(void)ieee80211_input_mimo_all(ic, m);
174 	NET_EPOCH_EXIT(et);
175 
176 	RTWN_LOCK(sc);
177 
178 	return;
179 
180 fail:
181 	counter_u64_add(ic->ic_ierrors, 1);
182 }
183 
184 static int
185 rtwn_pci_rx_buf_copy(struct rtwn_pci_softc *pc)
186 {
187 	struct rtwn_rx_ring *ring = &pc->rx_ring;
188 	struct rtwn_rx_stat_pci *rx_desc = &ring->desc[ring->cur];
189 	struct rtwn_rx_data *rx_data = &ring->rx_data[ring->cur];
190 	uint32_t rxdw0;
191 	int desc_size, pktlen;
192 
193 	/*
194 	 * NB: tx_report() / c2h_report() expects to see USB Rx
195 	 * descriptor - same as for PCIe, but without rxbufaddr* fields.
196 	 */
197 	desc_size = sizeof(struct rtwn_rx_stat_common);
198 	KASSERT(sizeof(pc->pc_rx_buf) >= desc_size,
199 	    ("adjust size for PCIe Rx buffer!"));
200 
201 	memcpy(pc->pc_rx_buf, rx_desc, desc_size);
202 
203 	rxdw0 = le32toh(rx_desc->rxdw0);
204 	pktlen = MS(rxdw0, RTWN_RXDW0_PKTLEN);
205 
206 	if (pktlen > sizeof(pc->pc_rx_buf) - desc_size)
207 	{
208 		/* Looks like an ordinary Rx frame. */
209 		return (desc_size);
210 	}
211 
212 	bus_dmamap_sync(ring->data_dmat, rx_data->map, BUS_DMASYNC_POSTREAD);
213 	memcpy(pc->pc_rx_buf + desc_size, mtod(rx_data->m, void *), pktlen);
214 
215 	return (desc_size + pktlen);
216 }
217 
218 static void
219 rtwn_pci_tx_report(struct rtwn_pci_softc *pc, int len)
220 {
221 	struct rtwn_softc *sc = &pc->pc_sc;
222 
223 	if (sc->sc_ratectl != RTWN_RATECTL_NET80211) {
224 		/* shouldn't happen */
225 		device_printf(sc->sc_dev,
226 		    "%s called while ratectl = %d!\n",
227 		     __func__, sc->sc_ratectl);
228 		return;
229 	}
230 
231 	RTWN_NT_LOCK(sc);
232 	rtwn_handle_tx_report(sc, pc->pc_rx_buf, len);
233 	RTWN_NT_UNLOCK(sc);
234 
235 #ifdef IEEE80211_SUPPORT_SUPERG
236 	/*
237 	 * NB: this will executed only when 'report' bit is set.
238 	 */
239 	if (sc->sc_tx_n_active > 0 && --sc->sc_tx_n_active <= 1)
240 		rtwn_cmd_sleepable(sc, NULL, 0, rtwn_ff_flush_all);
241 #endif
242 }
243 
244 static void
245 rtwn_pci_c2h_report(struct rtwn_pci_softc *pc, int len)
246 {
247 	rtwn_handle_c2h_report(&pc->pc_sc, pc->pc_rx_buf, len);
248 }
249 
250 static void
251 rtwn_pci_tx_done(struct rtwn_softc *sc, int qid)
252 {
253 	struct rtwn_pci_softc *pc = RTWN_PCI_SOFTC(sc);
254 	struct rtwn_tx_ring *ring = &pc->tx_ring[qid];
255 	struct rtwn_tx_desc_common *desc;
256 	struct rtwn_tx_data *data;
257 
258 	RTWN_DPRINTF(sc, RTWN_DEBUG_INTR, "%s: qid %d, last %d, cur %d\n",
259 	    __func__, qid, ring->last, ring->cur);
260 
261 	bus_dmamap_sync(ring->desc_dmat, ring->desc_map,
262 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
263 
264 	while(ring->last != ring->cur) {
265 		data = &ring->tx_data[ring->last];
266 		desc = (struct rtwn_tx_desc_common *)
267 		    ((uint8_t *)ring->desc + sc->txdesc_len * ring->last);
268 
269 		KASSERT(data->m != NULL, ("no mbuf"));
270 
271 		if (desc->flags0 & RTWN_FLAGS0_OWN)
272 			break;
273 
274 		/* Unmap and free mbuf. */
275 		bus_dmamap_sync(ring->data_dmat, data->map,
276 		    BUS_DMASYNC_POSTWRITE);
277 		bus_dmamap_unload(ring->data_dmat, data->map);
278 
279 		if (data->ni != NULL) {	/* not a beacon frame */
280 			ieee80211_tx_complete(data->ni, data->m, 0);
281 
282 			data->ni = NULL;
283 			ring->queued--;
284 			KASSERT(ring->queued >= 0,
285 			    ("ring->queued (qid %d) underflow!\n", qid));
286 		} else
287 			m_freem(data->m);
288 
289 		data->m = NULL;
290 		ring->last = (ring->last + 1) % RTWN_PCI_TX_LIST_COUNT;
291 #ifndef D4054
292 		if (ring->queued > 0)
293 			sc->sc_tx_timer = 5;
294 		else
295 			sc->sc_tx_timer = 0;
296 #endif
297 	}
298 
299 	if ((sc->qfullmsk & (1 << qid)) != 0 &&
300 	    ring->queued < (RTWN_PCI_TX_LIST_COUNT - 1)) {
301 		sc->qfullmsk &= ~(1 << qid);
302 		rtwn_start(sc);
303 	}
304 
305 #ifdef  IEEE80211_SUPPORT_SUPERG
306 	/*
307 	 * If the TX active queue drops below a certain
308 	 * threshold, ensure we age fast-frames out so they're
309 	 * transmitted.
310 	 */
311 	if (sc->sc_ratectl != RTWN_RATECTL_NET80211 && ring->queued <= 1) {
312 		/*
313 		 * XXX TODO: just make this a callout timer schedule
314 		 * so we can flush the FF staging queue if we're
315 		 * approaching idle.
316 		 */
317 		rtwn_cmd_sleepable(sc, NULL, 0, rtwn_ff_flush_all);
318 	}
319 #endif
320 }
321 
322 static void
323 rtwn_pci_rx_done(struct rtwn_softc *sc)
324 {
325 	struct rtwn_pci_softc *pc = RTWN_PCI_SOFTC(sc);
326 	struct rtwn_rx_ring *ring = &pc->rx_ring;
327 	struct rtwn_rx_stat_pci *rx_desc;
328 	struct rtwn_rx_data *rx_data;
329 	int len;
330 
331 	bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_POSTREAD);
332 
333 	for (;;) {
334 		rx_desc = &ring->desc[ring->cur];
335 		rx_data = &ring->rx_data[ring->cur];
336 
337 		if (le32toh(rx_desc->rxdw0) & RTWN_RXDW0_OWN)
338 			break;
339 
340 		len = rtwn_pci_rx_buf_copy(pc);
341 
342 		switch (rtwn_classify_intr(sc, pc->pc_rx_buf, len)) {
343 		case RTWN_RX_DATA:
344 			rtwn_pci_rx_frame(pc);
345 			break;
346 		case RTWN_RX_TX_REPORT:
347 			rtwn_pci_tx_report(pc, len);
348 			break;
349 		case RTWN_RX_OTHER:
350 			rtwn_pci_c2h_report(pc, len);
351 			break;
352 		default:
353 			/* NOTREACHED */
354 			KASSERT(0, ("unknown Rx classification code"));
355 			break;
356 		}
357 
358 		/* Update / reset RX descriptor (and set OWN bit). */
359 		rtwn_pci_setup_rx_desc(pc, rx_desc, rx_data->paddr,
360 		    MJUMPAGESIZE, ring->cur);
361 
362 		if (!(sc->sc_flags & RTWN_RUNNING))
363 			return;
364 
365 		/* NB: device can reuse current descriptor. */
366 		bus_dmamap_sync(ring->desc_dmat, ring->desc_map,
367 		    BUS_DMASYNC_POSTREAD);
368 
369 		if (le32toh(rx_desc->rxdw0) & RTWN_RXDW0_OWN)
370 			ring->cur = (ring->cur + 1) % RTWN_PCI_RX_LIST_COUNT;
371 	}
372 }
373 
374 void
375 rtwn_pci_intr(void *arg)
376 {
377 	struct rtwn_softc *sc = arg;
378 	struct rtwn_pci_softc *pc = RTWN_PCI_SOFTC(sc);
379 	int i, status, tx_rings;
380 
381 	RTWN_LOCK(sc);
382 	status = rtwn_pci_get_intr_status(pc, &tx_rings);
383 	RTWN_DPRINTF(sc, RTWN_DEBUG_INTR, "%s: status %08X, tx_rings %08X\n",
384 	    __func__, status, tx_rings);
385 	if (status == 0 && tx_rings == 0)
386 		goto unlock;
387 
388 	if (status & (RTWN_PCI_INTR_RX | RTWN_PCI_INTR_TX_REPORT)) {
389 		rtwn_pci_rx_done(sc);
390 		if (!(sc->sc_flags & RTWN_RUNNING))
391 			goto unlock;
392 	}
393 
394 	if (tx_rings != 0)
395 		for (i = 0; i < RTWN_PCI_NTXQUEUES; i++)
396 			if (tx_rings & (1 << i))
397 				rtwn_pci_tx_done(sc, i);
398 
399 	if (sc->sc_flags & RTWN_RUNNING)
400 		rtwn_pci_enable_intr(pc);
401 unlock:
402 	RTWN_UNLOCK(sc);
403 }
404