1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
5 * Copyright (c) 2010-2012 Adrian Chadd, Xenion Pty Ltd
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer,
13 * without modification.
14 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
15 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
16 * redistribution must be conditioned upon including a substantially
17 * similar Disclaimer requirement for further binary redistribution.
18 *
19 * NO WARRANTY
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
23 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
24 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
25 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
28 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGES.
31 */
32
33 #include <sys/cdefs.h>
34 /*
35 * Driver for the Atheros Wireless LAN controller.
36 *
37 * This software is derived from work of Atsushi Onoe; his contribution
38 * is greatly appreciated.
39 */
40
41 #include "opt_inet.h"
42 #include "opt_ath.h"
43 #include "opt_wlan.h"
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/sysctl.h>
48 #include <sys/mbuf.h>
49 #include <sys/malloc.h>
50 #include <sys/lock.h>
51 #include <sys/mutex.h>
52 #include <sys/kernel.h>
53 #include <sys/socket.h>
54 #include <sys/sockio.h>
55 #include <sys/errno.h>
56 #include <sys/callout.h>
57 #include <sys/bus.h>
58 #include <sys/endian.h>
59 #include <sys/kthread.h>
60 #include <sys/taskqueue.h>
61 #include <sys/priv.h>
62 #include <sys/ktr.h>
63
64 #include <machine/bus.h>
65
66 #include <net/if.h>
67 #include <net/if_var.h>
68 #include <net/if_dl.h>
69 #include <net/if_media.h>
70 #include <net/if_types.h>
71 #include <net/if_arp.h>
72 #include <net/ethernet.h>
73 #include <net/if_llc.h>
74
75 #include <net80211/ieee80211_var.h>
76 #include <net80211/ieee80211_regdomain.h>
77 #ifdef IEEE80211_SUPPORT_SUPERG
78 #include <net80211/ieee80211_superg.h>
79 #endif
80 #ifdef IEEE80211_SUPPORT_TDMA
81 #include <net80211/ieee80211_tdma.h>
82 #endif
83 #include <net80211/ieee80211_ht.h>
84
85 #include <net/bpf.h>
86
87 #ifdef INET
88 #include <netinet/in.h>
89 #include <netinet/if_ether.h>
90 #endif
91
92 #include <dev/ath/if_athvar.h>
93 #include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */
94 #include <dev/ath/ath_hal/ah_diagcodes.h>
95
96 #include <dev/ath/if_ath_debug.h>
97
98 #ifdef ATH_TX99_DIAG
99 #include <dev/ath/ath_tx99/ath_tx99.h>
100 #endif
101
102 #include <dev/ath/if_ath_misc.h>
103 #include <dev/ath/if_ath_tx.h>
104 #include <dev/ath/if_ath_tx_ht.h>
105
106 #ifdef ATH_DEBUG_ALQ
107 #include <dev/ath/if_ath_alq.h>
108 #endif
109
110 /*
111 * How many retries to perform in software
112 */
113 #define SWMAX_RETRIES 10
114
115 /*
116 * What queue to throw the non-QoS TID traffic into
117 */
118 #define ATH_NONQOS_TID_AC WME_AC_VO
119
120 #if 0
121 static int ath_tx_node_is_asleep(struct ath_softc *sc, struct ath_node *an);
122 #endif
123 static int ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an,
124 int tid);
125 static int ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an,
126 int tid);
127 static ieee80211_seq ath_tx_tid_seqno_assign(struct ath_softc *sc,
128 struct ieee80211_node *ni, struct ath_buf *bf, struct mbuf *m0);
129 static int ath_tx_action_frame_override_queue(struct ath_softc *sc,
130 struct ieee80211_node *ni, struct mbuf *m0, int *tid);
131 static struct ath_buf *
132 ath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an,
133 struct ath_tid *tid, struct ath_buf *bf);
134
135 #ifdef ATH_DEBUG_ALQ
136 void
ath_tx_alq_post(struct ath_softc * sc,struct ath_buf * bf_first)137 ath_tx_alq_post(struct ath_softc *sc, struct ath_buf *bf_first)
138 {
139 struct ath_buf *bf;
140 int i, n;
141 const char *ds;
142
143 /* XXX we should skip out early if debugging isn't enabled! */
144 bf = bf_first;
145
146 while (bf != NULL) {
147 /* XXX should ensure bf_nseg > 0! */
148 if (bf->bf_nseg == 0)
149 break;
150 n = ((bf->bf_nseg - 1) / sc->sc_tx_nmaps) + 1;
151 for (i = 0, ds = (const char *) bf->bf_desc;
152 i < n;
153 i++, ds += sc->sc_tx_desclen) {
154 if_ath_alq_post(&sc->sc_alq,
155 ATH_ALQ_EDMA_TXDESC,
156 sc->sc_tx_desclen,
157 ds);
158 }
159 bf = bf->bf_next;
160 }
161 }
162 #endif /* ATH_DEBUG_ALQ */
163
164 /*
165 * Whether to use the 11n rate scenario functions or not
166 */
167 static inline int
ath_tx_is_11n(struct ath_softc * sc)168 ath_tx_is_11n(struct ath_softc *sc)
169 {
170 return ((sc->sc_ah->ah_magic == 0x20065416) ||
171 (sc->sc_ah->ah_magic == 0x19741014));
172 }
173
174 /*
175 * Obtain the current TID from the given frame.
176 *
177 * Non-QoS frames get mapped to a TID so frames consistently
178 * go on a sensible queue.
179 */
180 static int
ath_tx_gettid(struct ath_softc * sc,const struct mbuf * m0)181 ath_tx_gettid(struct ath_softc *sc, const struct mbuf *m0)
182 {
183 const struct ieee80211_frame *wh;
184
185 wh = mtod(m0, const struct ieee80211_frame *);
186
187 /* Non-QoS: map frame to a TID queue for software queueing */
188 if (! IEEE80211_QOS_HAS_SEQ(wh))
189 return (WME_AC_TO_TID(M_WME_GETAC(m0)));
190
191 /* QoS - fetch the TID from the header, ignore mbuf WME */
192 return (ieee80211_gettid(wh));
193 }
194
195 static void
ath_tx_set_retry(struct ath_softc * sc,struct ath_buf * bf)196 ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf)
197 {
198 struct ieee80211_frame *wh;
199
200 wh = mtod(bf->bf_m, struct ieee80211_frame *);
201 /* Only update/resync if needed */
202 if (bf->bf_state.bfs_isretried == 0) {
203 wh->i_fc[1] |= IEEE80211_FC1_RETRY;
204 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
205 BUS_DMASYNC_PREWRITE);
206 }
207 bf->bf_state.bfs_isretried = 1;
208 bf->bf_state.bfs_retries ++;
209 }
210
211 /*
212 * Determine what the correct AC queue for the given frame
213 * should be.
214 *
215 * For QoS frames, obey the TID. That way things like
216 * management frames that are related to a given TID
217 * are thus serialised with the rest of the TID traffic,
218 * regardless of net80211 overriding priority.
219 *
220 * For non-QoS frames, return the mbuf WMI priority.
221 *
222 * This has implications that higher priority non-QoS traffic
223 * may end up being scheduled before other non-QoS traffic,
224 * leading to out-of-sequence packets being emitted.
225 *
226 * (It'd be nice to log/count this so we can see if it
227 * really is a problem.)
228 *
229 * TODO: maybe we should throw multicast traffic, QoS or
230 * otherwise, into a separate TX queue?
231 */
232 static int
ath_tx_getac(struct ath_softc * sc,const struct mbuf * m0)233 ath_tx_getac(struct ath_softc *sc, const struct mbuf *m0)
234 {
235 const struct ieee80211_frame *wh;
236
237 wh = mtod(m0, const struct ieee80211_frame *);
238
239 /*
240 * QoS data frame (sequence number or otherwise) -
241 * return hardware queue mapping for the underlying
242 * TID.
243 */
244 if (IEEE80211_QOS_HAS_SEQ(wh))
245 return TID_TO_WME_AC(ieee80211_gettid(wh));
246
247 /*
248 * Otherwise - return mbuf QoS pri.
249 */
250 return (M_WME_GETAC(m0));
251 }
252
253 void
ath_txfrag_cleanup(struct ath_softc * sc,ath_bufhead * frags,struct ieee80211_node * ni)254 ath_txfrag_cleanup(struct ath_softc *sc,
255 ath_bufhead *frags, struct ieee80211_node *ni)
256 {
257 struct ath_buf *bf, *next;
258
259 ATH_TXBUF_LOCK_ASSERT(sc);
260
261 TAILQ_FOREACH_SAFE(bf, frags, bf_list, next) {
262 /* NB: bf assumed clean */
263 TAILQ_REMOVE(frags, bf, bf_list);
264 ath_returnbuf_head(sc, bf);
265 ieee80211_node_decref(ni);
266 }
267 }
268
269 /*
270 * Setup xmit of a fragmented frame. Allocate a buffer
271 * for each frag and bump the node reference count to
272 * reflect the held reference to be setup by ath_tx_start.
273 */
274 int
ath_txfrag_setup(struct ath_softc * sc,ath_bufhead * frags,struct mbuf * m0,struct ieee80211_node * ni)275 ath_txfrag_setup(struct ath_softc *sc, ath_bufhead *frags,
276 struct mbuf *m0, struct ieee80211_node *ni)
277 {
278 struct mbuf *m;
279 struct ath_buf *bf;
280
281 ATH_TXBUF_LOCK(sc);
282 for (m = m0->m_nextpkt; m != NULL; m = m->m_nextpkt) {
283 /* XXX non-management? */
284 bf = _ath_getbuf_locked(sc, ATH_BUFTYPE_NORMAL);
285 if (bf == NULL) { /* out of buffers, cleanup */
286 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: no buffer?\n",
287 __func__);
288 ath_txfrag_cleanup(sc, frags, ni);
289 break;
290 }
291 (void) ieee80211_ref_node(ni);
292 TAILQ_INSERT_TAIL(frags, bf, bf_list);
293 }
294 ATH_TXBUF_UNLOCK(sc);
295
296 return !TAILQ_EMPTY(frags);
297 }
298
299 static int
ath_tx_dmasetup(struct ath_softc * sc,struct ath_buf * bf,struct mbuf * m0)300 ath_tx_dmasetup(struct ath_softc *sc, struct ath_buf *bf, struct mbuf *m0)
301 {
302 struct mbuf *m;
303 int error;
304
305 /*
306 * Load the DMA map so any coalescing is done. This
307 * also calculates the number of descriptors we need.
308 */
309 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
310 bf->bf_segs, &bf->bf_nseg,
311 BUS_DMA_NOWAIT);
312 if (error == EFBIG) {
313 /* XXX packet requires too many descriptors */
314 bf->bf_nseg = ATH_MAX_SCATTER + 1;
315 } else if (error != 0) {
316 sc->sc_stats.ast_tx_busdma++;
317 ieee80211_free_mbuf(m0);
318 return error;
319 }
320 /*
321 * Discard null packets and check for packets that
322 * require too many TX descriptors. We try to convert
323 * the latter to a cluster.
324 */
325 if (bf->bf_nseg > ATH_MAX_SCATTER) { /* too many desc's, linearize */
326 sc->sc_stats.ast_tx_linear++;
327 m = m_collapse(m0, M_NOWAIT, ATH_MAX_SCATTER);
328 if (m == NULL) {
329 ieee80211_free_mbuf(m0);
330 sc->sc_stats.ast_tx_nombuf++;
331 return ENOMEM;
332 }
333 m0 = m;
334 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
335 bf->bf_segs, &bf->bf_nseg,
336 BUS_DMA_NOWAIT);
337 if (error != 0) {
338 sc->sc_stats.ast_tx_busdma++;
339 ieee80211_free_mbuf(m0);
340 return error;
341 }
342 KASSERT(bf->bf_nseg <= ATH_MAX_SCATTER,
343 ("too many segments after defrag; nseg %u", bf->bf_nseg));
344 } else if (bf->bf_nseg == 0) { /* null packet, discard */
345 sc->sc_stats.ast_tx_nodata++;
346 ieee80211_free_mbuf(m0);
347 return EIO;
348 }
349 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: m %p len %u\n",
350 __func__, m0, m0->m_pkthdr.len);
351 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
352 bf->bf_m = m0;
353
354 return 0;
355 }
356
357 /*
358 * Chain together segments+descriptors for a frame - 11n or otherwise.
359 *
360 * For aggregates, this is called on each frame in the aggregate.
361 */
362 static void
ath_tx_chaindesclist(struct ath_softc * sc,struct ath_desc * ds0,struct ath_buf * bf,bool is_aggr,int is_first_subframe,int is_last_subframe)363 ath_tx_chaindesclist(struct ath_softc *sc, struct ath_desc *ds0,
364 struct ath_buf *bf, bool is_aggr, int is_first_subframe,
365 int is_last_subframe)
366 {
367 struct ath_hal *ah = sc->sc_ah;
368 char *ds;
369 int i, bp, dsp;
370 HAL_DMA_ADDR bufAddrList[4];
371 uint32_t segLenList[4];
372 int numTxMaps = 1;
373 int isFirstDesc = 1;
374
375 /*
376 * XXX There's txdma and txdma_mgmt; the descriptor
377 * sizes must match.
378 */
379 struct ath_descdma *dd = &sc->sc_txdma;
380
381 /*
382 * Fillin the remainder of the descriptor info.
383 */
384
385 /*
386 * We need the number of TX data pointers in each descriptor.
387 * EDMA and later chips support 4 TX buffers per descriptor;
388 * previous chips just support one.
389 */
390 numTxMaps = sc->sc_tx_nmaps;
391
392 /*
393 * For EDMA and later chips ensure the TX map is fully populated
394 * before advancing to the next descriptor.
395 */
396 ds = (char *) bf->bf_desc;
397 bp = dsp = 0;
398 bzero(bufAddrList, sizeof(bufAddrList));
399 bzero(segLenList, sizeof(segLenList));
400 for (i = 0; i < bf->bf_nseg; i++) {
401 bufAddrList[bp] = bf->bf_segs[i].ds_addr;
402 segLenList[bp] = bf->bf_segs[i].ds_len;
403 bp++;
404
405 /*
406 * Go to the next segment if this isn't the last segment
407 * and there's space in the current TX map.
408 */
409 if ((i != bf->bf_nseg - 1) && (bp < numTxMaps))
410 continue;
411
412 /*
413 * Last segment or we're out of buffer pointers.
414 */
415 bp = 0;
416
417 if (i == bf->bf_nseg - 1)
418 ath_hal_settxdesclink(ah, (struct ath_desc *) ds, 0);
419 else
420 ath_hal_settxdesclink(ah, (struct ath_desc *) ds,
421 bf->bf_daddr + dd->dd_descsize * (dsp + 1));
422
423 /*
424 * XXX This assumes that bfs_txq is the actual destination
425 * hardware queue at this point. It may not have been
426 * assigned, it may actually be pointing to the multicast
427 * software TXQ id. These must be fixed!
428 */
429 ath_hal_filltxdesc(ah, (struct ath_desc *) ds
430 , bufAddrList
431 , segLenList
432 , bf->bf_descid /* XXX desc id */
433 , bf->bf_state.bfs_tx_queue
434 , isFirstDesc /* first segment */
435 , i == bf->bf_nseg - 1 /* last segment */
436 , (struct ath_desc *) ds0 /* first descriptor */
437 );
438
439 /*
440 * Make sure the 11n aggregate fields are cleared.
441 *
442 * XXX TODO: this doesn't need to be called for
443 * aggregate frames; as it'll be called on all
444 * sub-frames. Since the descriptors are in
445 * non-cacheable memory, this leads to some
446 * rather slow writes on MIPS/ARM platforms.
447 */
448 if (ath_tx_is_11n(sc))
449 ath_hal_clr11n_aggr(sc->sc_ah, (struct ath_desc *) ds);
450
451 /*
452 * If 11n is enabled, set it up as if it's an aggregate
453 * frame.
454 */
455 if (is_last_subframe) {
456 ath_hal_set11n_aggr_last(sc->sc_ah,
457 (struct ath_desc *) ds);
458 } else if (is_aggr) {
459 /*
460 * This clears the aggrlen field; so
461 * the caller needs to call set_aggr_first()!
462 *
463 * XXX TODO: don't call this for the first
464 * descriptor in the first frame in an
465 * aggregate!
466 */
467 ath_hal_set11n_aggr_middle(sc->sc_ah,
468 (struct ath_desc *) ds,
469 bf->bf_state.bfs_ndelim);
470 }
471 isFirstDesc = 0;
472 bf->bf_lastds = (struct ath_desc *) ds;
473
474 /*
475 * Don't forget to skip to the next descriptor.
476 */
477 ds += sc->sc_tx_desclen;
478 dsp++;
479
480 /*
481 * .. and don't forget to blank these out!
482 */
483 bzero(bufAddrList, sizeof(bufAddrList));
484 bzero(segLenList, sizeof(segLenList));
485 }
486 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
487 }
488
489 /*
490 * Set the rate control fields in the given descriptor based on
491 * the bf_state fields and node state.
492 *
493 * The bfs fields should already be set with the relevant rate
494 * control information, including whether MRR is to be enabled.
495 *
496 * Since the FreeBSD HAL currently sets up the first TX rate
497 * in ath_hal_setuptxdesc(), this will setup the MRR
498 * conditionally for the pre-11n chips, and call ath_buf_set_rate
499 * unconditionally for 11n chips. These require the 11n rate
500 * scenario to be set if MCS rates are enabled, so it's easier
501 * to just always call it. The caller can then only set rates 2, 3
502 * and 4 if multi-rate retry is needed.
503 */
504 static void
ath_tx_set_ratectrl(struct ath_softc * sc,struct ieee80211_node * ni,struct ath_buf * bf)505 ath_tx_set_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni,
506 struct ath_buf *bf)
507 {
508 struct ath_rc_series *rc = bf->bf_state.bfs_rc;
509
510 /* If mrr is disabled, blank tries 1, 2, 3 */
511 if (! bf->bf_state.bfs_ismrr)
512 rc[1].tries = rc[2].tries = rc[3].tries = 0;
513
514 #if 0
515 /*
516 * If NOACK is set, just set ntries=1.
517 */
518 else if (bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) {
519 rc[1].tries = rc[2].tries = rc[3].tries = 0;
520 rc[0].tries = 1;
521 }
522 #endif
523
524 /*
525 * Always call - that way a retried descriptor will
526 * have the MRR fields overwritten.
527 *
528 * XXX TODO: see if this is really needed - setting up
529 * the first descriptor should set the MRR fields to 0
530 * for us anyway.
531 */
532 if (ath_tx_is_11n(sc)) {
533 ath_buf_set_rate(sc, ni, bf);
534 } else {
535 ath_hal_setupxtxdesc(sc->sc_ah, bf->bf_desc
536 , rc[1].ratecode, rc[1].tries
537 , rc[2].ratecode, rc[2].tries
538 , rc[3].ratecode, rc[3].tries
539 );
540 }
541 }
542
543 /*
544 * Setup segments+descriptors for an 11n aggregate.
545 * bf_first is the first buffer in the aggregate.
546 * The descriptor list must already been linked together using
547 * bf->bf_next.
548 */
549 static void
ath_tx_setds_11n(struct ath_softc * sc,struct ath_buf * bf_first)550 ath_tx_setds_11n(struct ath_softc *sc, struct ath_buf *bf_first)
551 {
552 struct ath_buf *bf, *bf_prev = NULL;
553 struct ath_desc *ds0 = bf_first->bf_desc;
554
555 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: nframes=%d, al=%d\n",
556 __func__, bf_first->bf_state.bfs_nframes,
557 bf_first->bf_state.bfs_al);
558
559 bf = bf_first;
560
561 if (bf->bf_state.bfs_txrate0 == 0)
562 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: bf=%p, txrate0=%d\n",
563 __func__, bf, 0);
564 if (bf->bf_state.bfs_rc[0].ratecode == 0)
565 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: bf=%p, rix0=%d\n",
566 __func__, bf, 0);
567
568 /*
569 * Setup all descriptors of all subframes - this will
570 * call ath_hal_set11naggrmiddle() on every frame.
571 */
572 while (bf != NULL) {
573 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
574 "%s: bf=%p, nseg=%d, pktlen=%d, seqno=%d\n",
575 __func__, bf, bf->bf_nseg, bf->bf_state.bfs_pktlen,
576 SEQNO(bf->bf_state.bfs_seqno));
577
578 /*
579 * Setup the initial fields for the first descriptor - all
580 * the non-11n specific stuff.
581 */
582 ath_hal_setuptxdesc(sc->sc_ah, bf->bf_desc
583 , bf->bf_state.bfs_pktlen /* packet length */
584 , bf->bf_state.bfs_hdrlen /* header length */
585 , bf->bf_state.bfs_atype /* Atheros packet type */
586 , bf->bf_state.bfs_txpower /* txpower */
587 , bf->bf_state.bfs_txrate0
588 , bf->bf_state.bfs_try0 /* series 0 rate/tries */
589 , bf->bf_state.bfs_keyix /* key cache index */
590 , bf->bf_state.bfs_txantenna /* antenna mode */
591 , bf->bf_state.bfs_txflags | HAL_TXDESC_INTREQ /* flags */
592 , bf->bf_state.bfs_ctsrate /* rts/cts rate */
593 , bf->bf_state.bfs_ctsduration /* rts/cts duration */
594 );
595
596 /*
597 * First descriptor? Setup the rate control and initial
598 * aggregate header information.
599 */
600 if (bf == bf_first) {
601 /*
602 * setup first desc with rate and aggr info
603 */
604 ath_tx_set_ratectrl(sc, bf->bf_node, bf);
605 }
606
607 /*
608 * Setup the descriptors for a multi-descriptor frame.
609 * This is both aggregate and non-aggregate aware.
610 */
611 ath_tx_chaindesclist(sc, ds0, bf,
612 1, /* is_aggr */
613 !! (bf == bf_first), /* is_first_subframe */
614 !! (bf->bf_next == NULL) /* is_last_subframe */
615 );
616
617 if (bf == bf_first) {
618 /*
619 * Initialise the first 11n aggregate with the
620 * aggregate length and aggregate enable bits.
621 */
622 ath_hal_set11n_aggr_first(sc->sc_ah,
623 ds0,
624 bf->bf_state.bfs_al,
625 bf->bf_state.bfs_ndelim);
626 }
627
628 /*
629 * Link the last descriptor of the previous frame
630 * to the beginning descriptor of this frame.
631 */
632 if (bf_prev != NULL)
633 ath_hal_settxdesclink(sc->sc_ah, bf_prev->bf_lastds,
634 bf->bf_daddr);
635
636 /* Save a copy so we can link the next descriptor in */
637 bf_prev = bf;
638 bf = bf->bf_next;
639 }
640
641 /*
642 * Set the first descriptor bf_lastds field to point to
643 * the last descriptor in the last subframe, that's where
644 * the status update will occur.
645 */
646 bf_first->bf_lastds = bf_prev->bf_lastds;
647
648 /*
649 * And bf_last in the first descriptor points to the end of
650 * the aggregate list.
651 */
652 bf_first->bf_last = bf_prev;
653
654 /*
655 * For non-AR9300 NICs, which require the rate control
656 * in the final descriptor - let's set that up now.
657 *
658 * This is because the filltxdesc() HAL call doesn't
659 * populate the last segment with rate control information
660 * if firstSeg is also true. For non-aggregate frames
661 * that is fine, as the first frame already has rate control
662 * info. But if the last frame in an aggregate has one
663 * descriptor, both firstseg and lastseg will be true and
664 * the rate info isn't copied.
665 *
666 * This is inefficient on MIPS/ARM platforms that have
667 * non-cachable memory for TX descriptors, but we'll just
668 * make do for now.
669 *
670 * As to why the rate table is stashed in the last descriptor
671 * rather than the first descriptor? Because proctxdesc()
672 * is called on the final descriptor in an MPDU or A-MPDU -
673 * ie, the one that gets updated by the hardware upon
674 * completion. That way proctxdesc() doesn't need to know
675 * about the first _and_ last TX descriptor.
676 */
677 ath_hal_setuplasttxdesc(sc->sc_ah, bf_prev->bf_lastds, ds0);
678
679 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: end\n", __func__);
680 }
681
682 /*
683 * Hand-off a frame to the multicast TX queue.
684 *
685 * This is a software TXQ which will be appended to the CAB queue
686 * during the beacon setup code.
687 *
688 * XXX TODO: since the AR9300 EDMA TX queue support wants the QCU ID
689 * as part of the TX descriptor, bf_state.bfs_tx_queue must be updated
690 * with the actual hardware txq, or all of this will fall apart.
691 *
692 * XXX It may not be a bad idea to just stuff the QCU ID into bf_state
693 * and retire bfs_tx_queue; then make sure the CABQ QCU ID is populated
694 * correctly.
695 */
696 static void
ath_tx_handoff_mcast(struct ath_softc * sc,struct ath_txq * txq,struct ath_buf * bf)697 ath_tx_handoff_mcast(struct ath_softc *sc, struct ath_txq *txq,
698 struct ath_buf *bf)
699 {
700 ATH_TX_LOCK_ASSERT(sc);
701
702 KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0,
703 ("%s: busy status 0x%x", __func__, bf->bf_flags));
704
705 /*
706 * Ensure that the tx queue is the cabq, so things get
707 * mapped correctly.
708 */
709 if (bf->bf_state.bfs_tx_queue != sc->sc_cabq->axq_qnum) {
710 DPRINTF(sc, ATH_DEBUG_XMIT,
711 "%s: bf=%p, bfs_tx_queue=%d, axq_qnum=%d\n",
712 __func__, bf, bf->bf_state.bfs_tx_queue,
713 txq->axq_qnum);
714 }
715
716 ATH_TXQ_LOCK(txq);
717 if (ATH_TXQ_LAST(txq, axq_q_s) != NULL) {
718 struct ath_buf *bf_last = ATH_TXQ_LAST(txq, axq_q_s);
719 struct ieee80211_frame *wh;
720
721 /* mark previous frame */
722 wh = mtod(bf_last->bf_m, struct ieee80211_frame *);
723 wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA;
724 bus_dmamap_sync(sc->sc_dmat, bf_last->bf_dmamap,
725 BUS_DMASYNC_PREWRITE);
726
727 /* link descriptor */
728 ath_hal_settxdesclink(sc->sc_ah,
729 bf_last->bf_lastds,
730 bf->bf_daddr);
731 }
732 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
733 ATH_TXQ_UNLOCK(txq);
734 }
735
736 /*
737 * Hand-off packet to a hardware queue.
738 */
739 static void
ath_tx_handoff_hw(struct ath_softc * sc,struct ath_txq * txq,struct ath_buf * bf)740 ath_tx_handoff_hw(struct ath_softc *sc, struct ath_txq *txq,
741 struct ath_buf *bf)
742 {
743 struct ath_hal *ah = sc->sc_ah;
744 struct ath_buf *bf_first;
745
746 /*
747 * Insert the frame on the outbound list and pass it on
748 * to the hardware. Multicast frames buffered for power
749 * save stations and transmit from the CAB queue are stored
750 * on a s/w only queue and loaded on to the CAB queue in
751 * the SWBA handler since frames only go out on DTIM and
752 * to avoid possible races.
753 */
754 ATH_TX_LOCK_ASSERT(sc);
755 KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0,
756 ("%s: busy status 0x%x", __func__, bf->bf_flags));
757 KASSERT(txq->axq_qnum != ATH_TXQ_SWQ,
758 ("ath_tx_handoff_hw called for mcast queue"));
759
760 /*
761 * XXX We should instead just verify that sc_txstart_cnt
762 * or ath_txproc_cnt > 0. That would mean that
763 * the reset is going to be waiting for us to complete.
764 */
765 if (sc->sc_txproc_cnt == 0 && sc->sc_txstart_cnt == 0) {
766 device_printf(sc->sc_dev,
767 "%s: TX dispatch without holding txcount/txstart refcnt!\n",
768 __func__);
769 }
770
771 /*
772 * XXX .. this is going to cause the hardware to get upset;
773 * so we really should find some way to drop or queue
774 * things.
775 */
776
777 ATH_TXQ_LOCK(txq);
778
779 /*
780 * XXX TODO: if there's a holdingbf, then
781 * ATH_TXQ_PUTRUNNING should be clear.
782 *
783 * If there is a holdingbf and the list is empty,
784 * then axq_link should be pointing to the holdingbf.
785 *
786 * Otherwise it should point to the last descriptor
787 * in the last ath_buf.
788 *
789 * In any case, we should really ensure that we
790 * update the previous descriptor link pointer to
791 * this descriptor, regardless of all of the above state.
792 *
793 * For now this is captured by having axq_link point
794 * to either the holdingbf (if the TXQ list is empty)
795 * or the end of the list (if the TXQ list isn't empty.)
796 * I'd rather just kill axq_link here and do it as above.
797 */
798
799 /*
800 * Append the frame to the TX queue.
801 */
802 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
803 ATH_KTR(sc, ATH_KTR_TX, 3,
804 "ath_tx_handoff: non-tdma: txq=%u, add bf=%p "
805 "depth=%d",
806 txq->axq_qnum,
807 bf,
808 txq->axq_depth);
809
810 /*
811 * If there's a link pointer, update it.
812 *
813 * XXX we should replace this with the above logic, just
814 * to kill axq_link with fire.
815 */
816 if (txq->axq_link != NULL) {
817 *txq->axq_link = bf->bf_daddr;
818 DPRINTF(sc, ATH_DEBUG_XMIT,
819 "%s: link[%u](%p)=%p (%p) depth %d\n", __func__,
820 txq->axq_qnum, txq->axq_link,
821 (caddr_t)bf->bf_daddr, bf->bf_desc,
822 txq->axq_depth);
823 ATH_KTR(sc, ATH_KTR_TX, 5,
824 "ath_tx_handoff: non-tdma: link[%u](%p)=%p (%p) "
825 "lastds=%d",
826 txq->axq_qnum, txq->axq_link,
827 (caddr_t)bf->bf_daddr, bf->bf_desc,
828 bf->bf_lastds);
829 }
830
831 /*
832 * If we've not pushed anything into the hardware yet,
833 * push the head of the queue into the TxDP.
834 *
835 * Once we've started DMA, there's no guarantee that
836 * updating the TxDP with a new value will actually work.
837 * So we just don't do that - if we hit the end of the list,
838 * we keep that buffer around (the "holding buffer") and
839 * re-start DMA by updating the link pointer of _that_
840 * descriptor and then restart DMA.
841 */
842 if (! (txq->axq_flags & ATH_TXQ_PUTRUNNING)) {
843 bf_first = TAILQ_FIRST(&txq->axq_q);
844 txq->axq_flags |= ATH_TXQ_PUTRUNNING;
845 ath_hal_puttxbuf(ah, txq->axq_qnum, bf_first->bf_daddr);
846 DPRINTF(sc, ATH_DEBUG_XMIT,
847 "%s: TXDP[%u] = %p (%p) depth %d\n",
848 __func__, txq->axq_qnum,
849 (caddr_t)bf_first->bf_daddr, bf_first->bf_desc,
850 txq->axq_depth);
851 ATH_KTR(sc, ATH_KTR_TX, 5,
852 "ath_tx_handoff: TXDP[%u] = %p (%p) "
853 "lastds=%p depth %d",
854 txq->axq_qnum,
855 (caddr_t)bf_first->bf_daddr, bf_first->bf_desc,
856 bf_first->bf_lastds,
857 txq->axq_depth);
858 }
859
860 /*
861 * Ensure that the bf TXQ matches this TXQ, so later
862 * checking and holding buffer manipulation is sane.
863 */
864 if (bf->bf_state.bfs_tx_queue != txq->axq_qnum) {
865 DPRINTF(sc, ATH_DEBUG_XMIT,
866 "%s: bf=%p, bfs_tx_queue=%d, axq_qnum=%d\n",
867 __func__, bf, bf->bf_state.bfs_tx_queue,
868 txq->axq_qnum);
869 }
870
871 /*
872 * Track aggregate queue depth.
873 */
874 if (bf->bf_state.bfs_aggr)
875 txq->axq_aggr_depth++;
876
877 /*
878 * Update the link pointer.
879 */
880 ath_hal_gettxdesclinkptr(ah, bf->bf_lastds, &txq->axq_link);
881
882 /*
883 * Start DMA.
884 *
885 * If we wrote a TxDP above, DMA will start from here.
886 *
887 * If DMA is running, it'll do nothing.
888 *
889 * If the DMA engine hit the end of the QCU list (ie LINK=NULL,
890 * or VEOL) then it stops at the last transmitted write.
891 * We then append a new frame by updating the link pointer
892 * in that descriptor and then kick TxE here; it will re-read
893 * that last descriptor and find the new descriptor to transmit.
894 *
895 * This is why we keep the holding descriptor around.
896 */
897 ath_hal_txstart(ah, txq->axq_qnum);
898 ATH_TXQ_UNLOCK(txq);
899 ATH_KTR(sc, ATH_KTR_TX, 1,
900 "ath_tx_handoff: txq=%u, txstart", txq->axq_qnum);
901 }
902
903 /*
904 * Restart TX DMA for the given TXQ.
905 *
906 * This must be called whether the queue is empty or not.
907 */
908 static void
ath_legacy_tx_dma_restart(struct ath_softc * sc,struct ath_txq * txq)909 ath_legacy_tx_dma_restart(struct ath_softc *sc, struct ath_txq *txq)
910 {
911 struct ath_buf *bf, *bf_last;
912
913 ATH_TXQ_LOCK_ASSERT(txq);
914
915 /* XXX make this ATH_TXQ_FIRST */
916 bf = TAILQ_FIRST(&txq->axq_q);
917 bf_last = ATH_TXQ_LAST(txq, axq_q_s);
918
919 if (bf == NULL)
920 return;
921
922 DPRINTF(sc, ATH_DEBUG_RESET,
923 "%s: Q%d: bf=%p, bf_last=%p, daddr=0x%08x\n",
924 __func__,
925 txq->axq_qnum,
926 bf,
927 bf_last,
928 (uint32_t) bf->bf_daddr);
929
930 #ifdef ATH_DEBUG
931 if (sc->sc_debug & ATH_DEBUG_RESET)
932 ath_tx_dump(sc, txq);
933 #endif
934
935 /*
936 * This is called from a restart, so DMA is known to be
937 * completely stopped.
938 */
939 KASSERT((!(txq->axq_flags & ATH_TXQ_PUTRUNNING)),
940 ("%s: Q%d: called with PUTRUNNING=1\n",
941 __func__,
942 txq->axq_qnum));
943
944 ath_hal_puttxbuf(sc->sc_ah, txq->axq_qnum, bf->bf_daddr);
945 txq->axq_flags |= ATH_TXQ_PUTRUNNING;
946
947 ath_hal_gettxdesclinkptr(sc->sc_ah, bf_last->bf_lastds,
948 &txq->axq_link);
949 ath_hal_txstart(sc->sc_ah, txq->axq_qnum);
950 }
951
952 /*
953 * Hand off a packet to the hardware (or mcast queue.)
954 *
955 * The relevant hardware txq should be locked.
956 */
957 static void
ath_legacy_xmit_handoff(struct ath_softc * sc,struct ath_txq * txq,struct ath_buf * bf)958 ath_legacy_xmit_handoff(struct ath_softc *sc, struct ath_txq *txq,
959 struct ath_buf *bf)
960 {
961 ATH_TX_LOCK_ASSERT(sc);
962
963 #ifdef ATH_DEBUG_ALQ
964 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC))
965 ath_tx_alq_post(sc, bf);
966 #endif
967
968 if (txq->axq_qnum == ATH_TXQ_SWQ)
969 ath_tx_handoff_mcast(sc, txq, bf);
970 else
971 ath_tx_handoff_hw(sc, txq, bf);
972 }
973
974 static int
ath_tx_tag_crypto(struct ath_softc * sc,struct ieee80211_node * ni,struct mbuf * m0,int iswep,int isfrag,int * hdrlen,int * pktlen,int * keyix)975 ath_tx_tag_crypto(struct ath_softc *sc, struct ieee80211_node *ni,
976 struct mbuf *m0, int iswep, int isfrag, int *hdrlen, int *pktlen,
977 int *keyix)
978 {
979 DPRINTF(sc, ATH_DEBUG_XMIT,
980 "%s: hdrlen=%d, pktlen=%d, isfrag=%d, iswep=%d, m0=%p\n",
981 __func__,
982 *hdrlen,
983 *pktlen,
984 isfrag,
985 iswep,
986 m0);
987
988 if (iswep) {
989 const struct ieee80211_cipher *cip;
990 struct ieee80211_key *k;
991
992 /*
993 * Construct the 802.11 header+trailer for an encrypted
994 * frame. The only reason this can fail is because of an
995 * unknown or unsupported cipher/key type.
996 */
997 k = ieee80211_crypto_encap(ni, m0);
998 if (k == NULL) {
999 /*
1000 * This can happen when the key is yanked after the
1001 * frame was queued. Just discard the frame; the
1002 * 802.11 layer counts failures and provides
1003 * debugging/diagnostics.
1004 */
1005 return (0);
1006 }
1007 /*
1008 * Adjust the packet + header lengths for the crypto
1009 * additions and calculate the h/w key index. When
1010 * a s/w mic is done the frame will have had any mic
1011 * added to it prior to entry so m0->m_pkthdr.len will
1012 * account for it. Otherwise we need to add it to the
1013 * packet length.
1014 */
1015 cip = k->wk_cipher;
1016 (*hdrlen) += cip->ic_header;
1017 (*pktlen) += cip->ic_header + cip->ic_trailer;
1018 /* NB: frags always have any TKIP MIC done in s/w */
1019 if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && !isfrag)
1020 (*pktlen) += cip->ic_miclen;
1021 (*keyix) = k->wk_keyix;
1022 } else if (ni->ni_ucastkey.wk_cipher == &ieee80211_cipher_none) {
1023 /*
1024 * Use station key cache slot, if assigned.
1025 */
1026 (*keyix) = ni->ni_ucastkey.wk_keyix;
1027 if ((*keyix) == IEEE80211_KEYIX_NONE)
1028 (*keyix) = HAL_TXKEYIX_INVALID;
1029 } else
1030 (*keyix) = HAL_TXKEYIX_INVALID;
1031
1032 return (1);
1033 }
1034
1035 /*
1036 * Calculate whether interoperability protection is required for
1037 * this frame.
1038 *
1039 * This requires the rate control information be filled in,
1040 * as the protection requirement depends upon the current
1041 * operating mode / PHY.
1042 */
1043 static void
ath_tx_calc_protection(struct ath_softc * sc,struct ath_buf * bf)1044 ath_tx_calc_protection(struct ath_softc *sc, struct ath_buf *bf)
1045 {
1046 struct ieee80211_frame *wh;
1047 uint8_t rix;
1048 uint16_t flags;
1049 int shortPreamble;
1050 const HAL_RATE_TABLE *rt = sc->sc_currates;
1051 struct ieee80211com *ic = &sc->sc_ic;
1052
1053 flags = bf->bf_state.bfs_txflags;
1054 rix = bf->bf_state.bfs_rc[0].rix;
1055 shortPreamble = bf->bf_state.bfs_shpream;
1056 wh = mtod(bf->bf_m, struct ieee80211_frame *);
1057
1058 /* Disable frame protection for TOA probe frames */
1059 if (bf->bf_flags & ATH_BUF_TOA_PROBE) {
1060 /* XXX count */
1061 flags &= ~(HAL_TXDESC_CTSENA | HAL_TXDESC_RTSENA);
1062 bf->bf_state.bfs_doprot = 0;
1063 goto finish;
1064 }
1065
1066 /*
1067 * If 802.11g protection is enabled, determine whether
1068 * to use RTS/CTS or just CTS. Note that this is only
1069 * done for OFDM unicast frames.
1070 */
1071 if ((ic->ic_flags & IEEE80211_F_USEPROT) &&
1072 rt->info[rix].phy == IEEE80211_T_OFDM &&
1073 (flags & HAL_TXDESC_NOACK) == 0) {
1074 bf->bf_state.bfs_doprot = 1;
1075 /* XXX fragments must use CCK rates w/ protection */
1076 if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) {
1077 flags |= HAL_TXDESC_RTSENA;
1078 } else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) {
1079 flags |= HAL_TXDESC_CTSENA;
1080 }
1081 /*
1082 * For frags it would be desirable to use the
1083 * highest CCK rate for RTS/CTS. But stations
1084 * farther away may detect it at a lower CCK rate
1085 * so use the configured protection rate instead
1086 * (for now).
1087 */
1088 sc->sc_stats.ast_tx_protect++;
1089 }
1090
1091 /*
1092 * If 11n protection is enabled and it's a HT frame,
1093 * enable RTS.
1094 *
1095 * XXX ic_htprotmode or ic_curhtprotmode?
1096 * XXX should it_htprotmode only matter if ic_curhtprotmode
1097 * XXX indicates it's not a HT pure environment?
1098 */
1099 if ((ic->ic_htprotmode == IEEE80211_PROT_RTSCTS) &&
1100 rt->info[rix].phy == IEEE80211_T_HT &&
1101 (flags & HAL_TXDESC_NOACK) == 0) {
1102 flags |= HAL_TXDESC_RTSENA;
1103 sc->sc_stats.ast_tx_htprotect++;
1104 }
1105
1106 finish:
1107 bf->bf_state.bfs_txflags = flags;
1108 }
1109
1110 /*
1111 * Update the frame duration given the currently selected rate.
1112 *
1113 * This also updates the frame duration value, so it will require
1114 * a DMA flush.
1115 */
1116 static void
ath_tx_calc_duration(struct ath_softc * sc,struct ath_buf * bf)1117 ath_tx_calc_duration(struct ath_softc *sc, struct ath_buf *bf)
1118 {
1119 struct ieee80211_frame *wh;
1120 uint8_t rix;
1121 uint16_t flags;
1122 int shortPreamble;
1123 struct ath_hal *ah = sc->sc_ah;
1124 const HAL_RATE_TABLE *rt = sc->sc_currates;
1125 int isfrag = bf->bf_m->m_flags & M_FRAG;
1126
1127 flags = bf->bf_state.bfs_txflags;
1128 rix = bf->bf_state.bfs_rc[0].rix;
1129 shortPreamble = bf->bf_state.bfs_shpream;
1130 wh = mtod(bf->bf_m, struct ieee80211_frame *);
1131
1132 /*
1133 * Calculate duration. This logically belongs in the 802.11
1134 * layer but it lacks sufficient information to calculate it.
1135 */
1136 if ((flags & HAL_TXDESC_NOACK) == 0 && !IEEE80211_IS_CTL(wh)) {
1137 u_int16_t dur;
1138 if (shortPreamble)
1139 dur = rt->info[rix].spAckDuration;
1140 else
1141 dur = rt->info[rix].lpAckDuration;
1142 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) {
1143 dur += dur; /* additional SIFS+ACK */
1144 /*
1145 * Include the size of next fragment so NAV is
1146 * updated properly. The last fragment uses only
1147 * the ACK duration
1148 *
1149 * XXX TODO: ensure that the rate lookup for each
1150 * fragment is the same as the rate used by the
1151 * first fragment!
1152 */
1153 dur += ath_hal_computetxtime(ah,
1154 rt,
1155 bf->bf_nextfraglen,
1156 rix, shortPreamble,
1157 AH_TRUE);
1158 }
1159 if (isfrag) {
1160 /*
1161 * Force hardware to use computed duration for next
1162 * fragment by disabling multi-rate retry which updates
1163 * duration based on the multi-rate duration table.
1164 */
1165 bf->bf_state.bfs_ismrr = 0;
1166 bf->bf_state.bfs_try0 = ATH_TXMGTTRY;
1167 /* XXX update bfs_rc[0].try? */
1168 }
1169
1170 /* Update the duration field itself */
1171 *(u_int16_t *)wh->i_dur = htole16(dur);
1172 }
1173 }
1174
1175 static uint8_t
ath_tx_get_rtscts_rate(struct ath_hal * ah,const HAL_RATE_TABLE * rt,int cix,int shortPreamble)1176 ath_tx_get_rtscts_rate(struct ath_hal *ah, const HAL_RATE_TABLE *rt,
1177 int cix, int shortPreamble)
1178 {
1179 uint8_t ctsrate;
1180
1181 /*
1182 * CTS transmit rate is derived from the transmit rate
1183 * by looking in the h/w rate table. We must also factor
1184 * in whether or not a short preamble is to be used.
1185 */
1186 /* NB: cix is set above where RTS/CTS is enabled */
1187 KASSERT(cix != 0xff, ("cix not setup"));
1188 ctsrate = rt->info[cix].rateCode;
1189
1190 /* XXX this should only matter for legacy rates */
1191 if (shortPreamble)
1192 ctsrate |= rt->info[cix].shortPreamble;
1193
1194 return (ctsrate);
1195 }
1196
1197 /*
1198 * Calculate the RTS/CTS duration for legacy frames.
1199 */
1200 static int
ath_tx_calc_ctsduration(struct ath_hal * ah,int rix,int cix,int shortPreamble,int pktlen,const HAL_RATE_TABLE * rt,int flags)1201 ath_tx_calc_ctsduration(struct ath_hal *ah, int rix, int cix,
1202 int shortPreamble, int pktlen, const HAL_RATE_TABLE *rt,
1203 int flags)
1204 {
1205 int ctsduration = 0;
1206
1207 /* This mustn't be called for HT modes */
1208 if (rt->info[cix].phy == IEEE80211_T_HT) {
1209 printf("%s: HT rate where it shouldn't be (0x%x)\n",
1210 __func__, rt->info[cix].rateCode);
1211 return (-1);
1212 }
1213
1214 /*
1215 * Compute the transmit duration based on the frame
1216 * size and the size of an ACK frame. We call into the
1217 * HAL to do the computation since it depends on the
1218 * characteristics of the actual PHY being used.
1219 *
1220 * NB: CTS is assumed the same size as an ACK so we can
1221 * use the precalculated ACK durations.
1222 */
1223 if (shortPreamble) {
1224 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */
1225 ctsduration += rt->info[cix].spAckDuration;
1226 ctsduration += ath_hal_computetxtime(ah,
1227 rt, pktlen, rix, AH_TRUE, AH_TRUE);
1228 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */
1229 ctsduration += rt->info[rix].spAckDuration;
1230 } else {
1231 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */
1232 ctsduration += rt->info[cix].lpAckDuration;
1233 ctsduration += ath_hal_computetxtime(ah,
1234 rt, pktlen, rix, AH_FALSE, AH_TRUE);
1235 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */
1236 ctsduration += rt->info[rix].lpAckDuration;
1237 }
1238
1239 return (ctsduration);
1240 }
1241
1242 /*
1243 * Update the given ath_buf with updated rts/cts setup and duration
1244 * values.
1245 *
1246 * To support rate lookups for each software retry, the rts/cts rate
1247 * and cts duration must be re-calculated.
1248 *
1249 * This function assumes the RTS/CTS flags have been set as needed;
1250 * mrr has been disabled; and the rate control lookup has been done.
1251 *
1252 * XXX TODO: MRR need only be disabled for the pre-11n NICs.
1253 * XXX The 11n NICs support per-rate RTS/CTS configuration.
1254 */
1255 static void
ath_tx_set_rtscts(struct ath_softc * sc,struct ath_buf * bf)1256 ath_tx_set_rtscts(struct ath_softc *sc, struct ath_buf *bf)
1257 {
1258 uint16_t ctsduration = 0;
1259 uint8_t ctsrate = 0;
1260 uint8_t rix = bf->bf_state.bfs_rc[0].rix;
1261 uint8_t cix = 0;
1262 const HAL_RATE_TABLE *rt = sc->sc_currates;
1263
1264 /*
1265 * No RTS/CTS enabled? Don't bother.
1266 */
1267 if ((bf->bf_state.bfs_txflags &
1268 (HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA)) == 0) {
1269 /* XXX is this really needed? */
1270 bf->bf_state.bfs_ctsrate = 0;
1271 bf->bf_state.bfs_ctsduration = 0;
1272 return;
1273 }
1274
1275 /*
1276 * If protection is enabled, use the protection rix control
1277 * rate. Otherwise use the rate0 control rate.
1278 */
1279 if (bf->bf_state.bfs_doprot)
1280 rix = sc->sc_protrix;
1281 else
1282 rix = bf->bf_state.bfs_rc[0].rix;
1283
1284 /*
1285 * If the raw path has hard-coded ctsrate0 to something,
1286 * use it.
1287 */
1288 if (bf->bf_state.bfs_ctsrate0 != 0)
1289 cix = ath_tx_findrix(sc, bf->bf_state.bfs_ctsrate0);
1290 else
1291 /* Control rate from above */
1292 cix = rt->info[rix].controlRate;
1293
1294 /* Calculate the rtscts rate for the given cix */
1295 ctsrate = ath_tx_get_rtscts_rate(sc->sc_ah, rt, cix,
1296 bf->bf_state.bfs_shpream);
1297
1298 /* The 11n chipsets do ctsduration calculations for you */
1299 if (! ath_tx_is_11n(sc))
1300 ctsduration = ath_tx_calc_ctsduration(sc->sc_ah, rix, cix,
1301 bf->bf_state.bfs_shpream, bf->bf_state.bfs_pktlen,
1302 rt, bf->bf_state.bfs_txflags);
1303
1304 /* Squirrel away in ath_buf */
1305 bf->bf_state.bfs_ctsrate = ctsrate;
1306 bf->bf_state.bfs_ctsduration = ctsduration;
1307
1308 /*
1309 * Must disable multi-rate retry when using RTS/CTS.
1310 */
1311 if (!sc->sc_mrrprot) {
1312 bf->bf_state.bfs_ismrr = 0;
1313 bf->bf_state.bfs_try0 =
1314 bf->bf_state.bfs_rc[0].tries = ATH_TXMGTTRY; /* XXX ew */
1315 }
1316 }
1317
1318 /*
1319 * Setup the descriptor chain for a normal or fast-frame
1320 * frame.
1321 *
1322 * XXX TODO: extend to include the destination hardware QCU ID.
1323 * Make sure that is correct. Make sure that when being added
1324 * to the mcastq, the CABQ QCUID is set or things will get a bit
1325 * odd.
1326 */
1327 static void
ath_tx_setds(struct ath_softc * sc,struct ath_buf * bf)1328 ath_tx_setds(struct ath_softc *sc, struct ath_buf *bf)
1329 {
1330 struct ath_desc *ds = bf->bf_desc;
1331 struct ath_hal *ah = sc->sc_ah;
1332
1333 if (bf->bf_state.bfs_txrate0 == 0)
1334 DPRINTF(sc, ATH_DEBUG_XMIT,
1335 "%s: bf=%p, txrate0=%d\n", __func__, bf, 0);
1336
1337 ath_hal_setuptxdesc(ah, ds
1338 , bf->bf_state.bfs_pktlen /* packet length */
1339 , bf->bf_state.bfs_hdrlen /* header length */
1340 , bf->bf_state.bfs_atype /* Atheros packet type */
1341 , bf->bf_state.bfs_txpower /* txpower */
1342 , bf->bf_state.bfs_txrate0
1343 , bf->bf_state.bfs_try0 /* series 0 rate/tries */
1344 , bf->bf_state.bfs_keyix /* key cache index */
1345 , bf->bf_state.bfs_txantenna /* antenna mode */
1346 , bf->bf_state.bfs_txflags /* flags */
1347 , bf->bf_state.bfs_ctsrate /* rts/cts rate */
1348 , bf->bf_state.bfs_ctsduration /* rts/cts duration */
1349 );
1350
1351 /*
1352 * This will be overridden when the descriptor chain is written.
1353 */
1354 bf->bf_lastds = ds;
1355 bf->bf_last = bf;
1356
1357 /* Set rate control and descriptor chain for this frame */
1358 ath_tx_set_ratectrl(sc, bf->bf_node, bf);
1359 ath_tx_chaindesclist(sc, ds, bf, 0, 0, 0);
1360 }
1361
1362 /*
1363 * Do a rate lookup.
1364 *
1365 * This performs a rate lookup for the given ath_buf only if it's required.
1366 * Non-data frames and raw frames don't require it.
1367 *
1368 * This populates the primary and MRR entries; MRR values are
1369 * then disabled later on if something requires it (eg RTS/CTS on
1370 * pre-11n chipsets.
1371 *
1372 * This needs to be done before the RTS/CTS fields are calculated
1373 * as they may depend upon the rate chosen.
1374 */
1375 static void
ath_tx_do_ratelookup(struct ath_softc * sc,struct ath_buf * bf,int tid,int pktlen,int is_aggr)1376 ath_tx_do_ratelookup(struct ath_softc *sc, struct ath_buf *bf, int tid,
1377 int pktlen, int is_aggr)
1378 {
1379 uint8_t rate, rix;
1380 int try0;
1381 int maxdur; // Note: Unused for now
1382 int maxpktlen;
1383
1384 if (! bf->bf_state.bfs_doratelookup)
1385 return;
1386
1387 /* Get rid of any previous state */
1388 bzero(bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc));
1389
1390 ATH_NODE_LOCK(ATH_NODE(bf->bf_node));
1391 ath_rate_findrate(sc, ATH_NODE(bf->bf_node), bf->bf_state.bfs_shpream,
1392 pktlen, tid, is_aggr, &rix, &try0, &rate, &maxdur, &maxpktlen);
1393
1394 /* In case MRR is disabled, make sure rc[0] is setup correctly */
1395 bf->bf_state.bfs_rc[0].rix = rix;
1396 bf->bf_state.bfs_rc[0].ratecode = rate;
1397 bf->bf_state.bfs_rc[0].tries = try0;
1398
1399 if (bf->bf_state.bfs_ismrr && try0 != ATH_TXMAXTRY)
1400 ath_rate_getxtxrates(sc, ATH_NODE(bf->bf_node), rix,
1401 is_aggr, bf->bf_state.bfs_rc);
1402 ATH_NODE_UNLOCK(ATH_NODE(bf->bf_node));
1403
1404 sc->sc_txrix = rix; /* for LED blinking */
1405 sc->sc_lastdatarix = rix; /* for fast frames */
1406 bf->bf_state.bfs_try0 = try0;
1407 bf->bf_state.bfs_txrate0 = rate;
1408 bf->bf_state.bfs_rc_maxpktlen = maxpktlen;
1409 }
1410
1411 /*
1412 * Update the CLRDMASK bit in the ath_buf if it needs to be set.
1413 */
1414 static void
ath_tx_update_clrdmask(struct ath_softc * sc,struct ath_tid * tid,struct ath_buf * bf)1415 ath_tx_update_clrdmask(struct ath_softc *sc, struct ath_tid *tid,
1416 struct ath_buf *bf)
1417 {
1418 struct ath_node *an = ATH_NODE(bf->bf_node);
1419
1420 ATH_TX_LOCK_ASSERT(sc);
1421
1422 if (an->clrdmask == 1) {
1423 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
1424 an->clrdmask = 0;
1425 }
1426 }
1427
1428 /*
1429 * Return whether this frame should be software queued or
1430 * direct dispatched.
1431 *
1432 * When doing powersave, BAR frames should be queued but other management
1433 * frames should be directly sent.
1434 *
1435 * When not doing powersave, stick BAR frames into the hardware queue
1436 * so it goes out even though the queue is paused.
1437 *
1438 * For now, management frames are also software queued by default.
1439 */
1440 static int
ath_tx_should_swq_frame(struct ath_softc * sc,struct ath_node * an,struct mbuf * m0,int * queue_to_head)1441 ath_tx_should_swq_frame(struct ath_softc *sc, struct ath_node *an,
1442 struct mbuf *m0, int *queue_to_head)
1443 {
1444 struct ieee80211_node *ni = &an->an_node;
1445 struct ieee80211_frame *wh;
1446 uint8_t type, subtype;
1447
1448 wh = mtod(m0, struct ieee80211_frame *);
1449 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
1450 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
1451
1452 (*queue_to_head) = 0;
1453
1454 /* If it's not in powersave - direct-dispatch BAR */
1455 if ((ATH_NODE(ni)->an_is_powersave == 0)
1456 && type == IEEE80211_FC0_TYPE_CTL &&
1457 subtype == IEEE80211_FC0_SUBTYPE_BAR) {
1458 DPRINTF(sc, ATH_DEBUG_SW_TX,
1459 "%s: BAR: TX'ing direct\n", __func__);
1460 return (0);
1461 } else if ((ATH_NODE(ni)->an_is_powersave == 1)
1462 && type == IEEE80211_FC0_TYPE_CTL &&
1463 subtype == IEEE80211_FC0_SUBTYPE_BAR) {
1464 /* BAR TX whilst asleep; queue */
1465 DPRINTF(sc, ATH_DEBUG_SW_TX,
1466 "%s: swq: TX'ing\n", __func__);
1467 (*queue_to_head) = 1;
1468 return (1);
1469 } else if ((ATH_NODE(ni)->an_is_powersave == 1)
1470 && (type == IEEE80211_FC0_TYPE_MGT ||
1471 type == IEEE80211_FC0_TYPE_CTL)) {
1472 /*
1473 * Other control/mgmt frame; bypass software queuing
1474 * for now!
1475 */
1476 DPRINTF(sc, ATH_DEBUG_XMIT,
1477 "%s: %6D: Node is asleep; sending mgmt "
1478 "(type=%d, subtype=%d)\n",
1479 __func__, ni->ni_macaddr, ":", type, subtype);
1480 return (0);
1481 } else {
1482 return (1);
1483 }
1484 }
1485
1486 /*
1487 * Transmit the given frame to the hardware.
1488 *
1489 * The frame must already be setup; rate control must already have
1490 * been done.
1491 *
1492 * XXX since the TXQ lock is being held here (and I dislike holding
1493 * it for this long when not doing software aggregation), later on
1494 * break this function into "setup_normal" and "xmit_normal". The
1495 * lock only needs to be held for the ath_tx_handoff call.
1496 *
1497 * XXX we don't update the leak count here - if we're doing
1498 * direct frame dispatch, we need to be able to do it without
1499 * decrementing the leak count (eg multicast queue frames.)
1500 */
1501 static void
ath_tx_xmit_normal(struct ath_softc * sc,struct ath_txq * txq,struct ath_buf * bf)1502 ath_tx_xmit_normal(struct ath_softc *sc, struct ath_txq *txq,
1503 struct ath_buf *bf)
1504 {
1505 struct ath_node *an = ATH_NODE(bf->bf_node);
1506 struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid];
1507
1508 ATH_TX_LOCK_ASSERT(sc);
1509
1510 /*
1511 * For now, just enable CLRDMASK. ath_tx_xmit_normal() does
1512 * set a completion handler however it doesn't (yet) properly
1513 * handle the strict ordering requirements needed for normal,
1514 * non-aggregate session frames.
1515 *
1516 * Once this is implemented, only set CLRDMASK like this for
1517 * frames that must go out - eg management/raw frames.
1518 */
1519 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
1520
1521 /* Setup the descriptor before handoff */
1522 ath_tx_do_ratelookup(sc, bf, tid->tid, bf->bf_state.bfs_pktlen, false);
1523 ath_tx_calc_duration(sc, bf);
1524 ath_tx_calc_protection(sc, bf);
1525 ath_tx_set_rtscts(sc, bf);
1526 ath_tx_rate_fill_rcflags(sc, bf);
1527 ath_tx_setds(sc, bf);
1528
1529 /* Track per-TID hardware queue depth correctly */
1530 tid->hwq_depth++;
1531
1532 /* Assign the completion handler */
1533 bf->bf_comp = ath_tx_normal_comp;
1534
1535 /* Hand off to hardware */
1536 ath_tx_handoff(sc, txq, bf);
1537 }
1538
1539 /*
1540 * Do the basic frame setup stuff that's required before the frame
1541 * is added to a software queue.
1542 *
1543 * All frames get mostly the same treatment and it's done once.
1544 * Retransmits fiddle with things like the rate control setup,
1545 * setting the retransmit bit in the packet; doing relevant DMA/bus
1546 * syncing and relinking it (back) into the hardware TX queue.
1547 *
1548 * Note that this may cause the mbuf to be reallocated, so
1549 * m0 may not be valid.
1550 */
1551 static int
ath_tx_normal_setup(struct ath_softc * sc,struct ieee80211_node * ni,struct ath_buf * bf,struct mbuf * m0,struct ath_txq * txq)1552 ath_tx_normal_setup(struct ath_softc *sc, struct ieee80211_node *ni,
1553 struct ath_buf *bf, struct mbuf *m0, struct ath_txq *txq)
1554 {
1555 struct ieee80211vap *vap = ni->ni_vap;
1556 struct ieee80211com *ic = &sc->sc_ic;
1557 int error, iswep, ismcast, isfrag, ismrr;
1558 int keyix, hdrlen, pktlen, try0 = 0;
1559 u_int8_t rix = 0, txrate = 0;
1560 struct ath_desc *ds;
1561 struct ieee80211_frame *wh;
1562 u_int subtype, flags;
1563 HAL_PKT_TYPE atype;
1564 const HAL_RATE_TABLE *rt;
1565 HAL_BOOL shortPreamble;
1566 struct ath_node *an;
1567
1568 /* XXX TODO: this pri is only used for non-QoS check, right? */
1569 u_int pri;
1570
1571 /*
1572 * To ensure that both sequence numbers and the CCMP PN handling
1573 * is "correct", make sure that the relevant TID queue is locked.
1574 * Otherwise the CCMP PN and seqno may appear out of order, causing
1575 * re-ordered frames to have out of order CCMP PN's, resulting
1576 * in many, many frame drops.
1577 */
1578 ATH_TX_LOCK_ASSERT(sc);
1579
1580 wh = mtod(m0, struct ieee80211_frame *);
1581 iswep = wh->i_fc[1] & IEEE80211_FC1_PROTECTED;
1582 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
1583 isfrag = m0->m_flags & M_FRAG;
1584 hdrlen = ieee80211_anyhdrsize(wh);
1585 /*
1586 * Packet length must not include any
1587 * pad bytes; deduct them here.
1588 */
1589 pktlen = m0->m_pkthdr.len - (hdrlen & 3);
1590
1591 /* Handle encryption twiddling if needed */
1592 if (! ath_tx_tag_crypto(sc, ni, m0, iswep, isfrag, &hdrlen,
1593 &pktlen, &keyix)) {
1594 ieee80211_free_mbuf(m0);
1595 return EIO;
1596 }
1597
1598 /* packet header may have moved, reset our local pointer */
1599 wh = mtod(m0, struct ieee80211_frame *);
1600
1601 pktlen += IEEE80211_CRC_LEN;
1602
1603 /*
1604 * Load the DMA map so any coalescing is done. This
1605 * also calculates the number of descriptors we need.
1606 */
1607 error = ath_tx_dmasetup(sc, bf, m0);
1608 if (error != 0)
1609 return error;
1610 KASSERT((ni != NULL), ("%s: ni=NULL!", __func__));
1611 bf->bf_node = ni; /* NB: held reference */
1612 m0 = bf->bf_m; /* NB: may have changed */
1613 wh = mtod(m0, struct ieee80211_frame *);
1614
1615 /* setup descriptors */
1616 ds = bf->bf_desc;
1617 rt = sc->sc_currates;
1618 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
1619
1620 /*
1621 * NB: the 802.11 layer marks whether or not we should
1622 * use short preamble based on the current mode and
1623 * negotiated parameters.
1624 */
1625 if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) &&
1626 (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) {
1627 shortPreamble = AH_TRUE;
1628 sc->sc_stats.ast_tx_shortpre++;
1629 } else {
1630 shortPreamble = AH_FALSE;
1631 }
1632
1633 an = ATH_NODE(ni);
1634 //flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */
1635 flags = 0;
1636 ismrr = 0; /* default no multi-rate retry*/
1637
1638 pri = ath_tx_getac(sc, m0); /* honor classification */
1639 /* XXX use txparams instead of fixed values */
1640 /*
1641 * Calculate Atheros packet type from IEEE80211 packet header,
1642 * setup for rate calculations, and select h/w transmit queue.
1643 */
1644 switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
1645 case IEEE80211_FC0_TYPE_MGT:
1646 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
1647 if (subtype == IEEE80211_FC0_SUBTYPE_BEACON)
1648 atype = HAL_PKT_TYPE_BEACON;
1649 else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
1650 atype = HAL_PKT_TYPE_PROBE_RESP;
1651 else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM)
1652 atype = HAL_PKT_TYPE_ATIM;
1653 else
1654 atype = HAL_PKT_TYPE_NORMAL; /* XXX */
1655 rix = an->an_mgmtrix;
1656 txrate = rt->info[rix].rateCode;
1657 if (shortPreamble)
1658 txrate |= rt->info[rix].shortPreamble;
1659 try0 = ATH_TXMGTTRY;
1660 flags |= HAL_TXDESC_INTREQ; /* force interrupt */
1661 break;
1662 case IEEE80211_FC0_TYPE_CTL:
1663 atype = HAL_PKT_TYPE_PSPOLL; /* stop setting of duration */
1664 rix = an->an_mgmtrix;
1665 txrate = rt->info[rix].rateCode;
1666 if (shortPreamble)
1667 txrate |= rt->info[rix].shortPreamble;
1668 try0 = ATH_TXMGTTRY;
1669 flags |= HAL_TXDESC_INTREQ; /* force interrupt */
1670 break;
1671 case IEEE80211_FC0_TYPE_DATA:
1672 atype = HAL_PKT_TYPE_NORMAL; /* default */
1673 /*
1674 * Data frames: multicast frames go out at a fixed rate,
1675 * EAPOL frames use the mgmt frame rate; otherwise consult
1676 * the rate control module for the rate to use.
1677 */
1678 if (ismcast) {
1679 rix = an->an_mcastrix;
1680 txrate = rt->info[rix].rateCode;
1681 if (shortPreamble)
1682 txrate |= rt->info[rix].shortPreamble;
1683 try0 = 1;
1684 } else if (m0->m_flags & M_EAPOL) {
1685 /* XXX? maybe always use long preamble? */
1686 rix = an->an_mgmtrix;
1687 txrate = rt->info[rix].rateCode;
1688 if (shortPreamble)
1689 txrate |= rt->info[rix].shortPreamble;
1690 try0 = ATH_TXMAXTRY; /* XXX?too many? */
1691 } else {
1692 /*
1693 * Do rate lookup on each TX, rather than using
1694 * the hard-coded TX information decided here.
1695 */
1696 ismrr = 1;
1697 bf->bf_state.bfs_doratelookup = 1;
1698 }
1699
1700 /*
1701 * Check whether to set NOACK for this WME category or not.
1702 */
1703 if (ieee80211_wme_vap_ac_is_noack(vap, pri))
1704 flags |= HAL_TXDESC_NOACK;
1705 break;
1706 default:
1707 device_printf(sc->sc_dev, "bogus frame type 0x%x (%s)\n",
1708 wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__);
1709 /* XXX statistic */
1710 /* XXX free tx dmamap */
1711 ieee80211_free_mbuf(m0);
1712 return EIO;
1713 }
1714
1715 /*
1716 * There are two known scenarios where the frame AC doesn't match
1717 * what the destination TXQ is.
1718 *
1719 * + non-QoS frames (eg management?) that the net80211 stack has
1720 * assigned a higher AC to, but since it's a non-QoS TID, it's
1721 * being thrown into TID 16. TID 16 gets the AC_BE queue.
1722 * It's quite possible that management frames should just be
1723 * direct dispatched to hardware rather than go via the software
1724 * queue; that should be investigated in the future. There are
1725 * some specific scenarios where this doesn't make sense, mostly
1726 * surrounding ADDBA request/response - hence why that is special
1727 * cased.
1728 *
1729 * + Multicast frames going into the VAP mcast queue. That shows up
1730 * as "TXQ 11".
1731 *
1732 * This driver should eventually support separate TID and TXQ locking,
1733 * allowing for arbitrary AC frames to appear on arbitrary software
1734 * queues, being queued to the "correct" hardware queue when needed.
1735 */
1736 #if 0
1737 if (txq != sc->sc_ac2q[pri]) {
1738 DPRINTF(sc, ATH_DEBUG_XMIT,
1739 "%s: txq=%p (%d), pri=%d, pri txq=%p (%d)\n",
1740 __func__,
1741 txq,
1742 txq->axq_qnum,
1743 pri,
1744 sc->sc_ac2q[pri],
1745 sc->sc_ac2q[pri]->axq_qnum);
1746 }
1747 #endif
1748
1749 /*
1750 * Calculate miscellaneous flags.
1751 */
1752 if (ismcast) {
1753 flags |= HAL_TXDESC_NOACK; /* no ack on broad/multicast */
1754 } else if (pktlen > vap->iv_rtsthreshold &&
1755 (ni->ni_ath_flags & IEEE80211_NODE_FF) == 0) {
1756 flags |= HAL_TXDESC_RTSENA; /* RTS based on frame length */
1757 sc->sc_stats.ast_tx_rts++;
1758 }
1759 if (flags & HAL_TXDESC_NOACK) /* NB: avoid double counting */
1760 sc->sc_stats.ast_tx_noack++;
1761 #ifdef IEEE80211_SUPPORT_TDMA
1762 if (sc->sc_tdma && (flags & HAL_TXDESC_NOACK) == 0) {
1763 DPRINTF(sc, ATH_DEBUG_TDMA,
1764 "%s: discard frame, ACK required w/ TDMA\n", __func__);
1765 sc->sc_stats.ast_tdma_ack++;
1766 /* XXX free tx dmamap */
1767 ieee80211_free_mbuf(m0);
1768 return EIO;
1769 }
1770 #endif
1771
1772 /*
1773 * If it's a frame to do location reporting on,
1774 * communicate it to the HAL.
1775 */
1776 if (ieee80211_get_toa_params(m0, NULL)) {
1777 device_printf(sc->sc_dev,
1778 "%s: setting TX positioning bit\n", __func__);
1779 flags |= HAL_TXDESC_POS;
1780
1781 /*
1782 * Note: The hardware reports timestamps for
1783 * each of the RX'ed packets as part of the packet
1784 * exchange. So this means things like RTS/CTS
1785 * exchanges, as well as the final ACK.
1786 *
1787 * So, if you send a RTS-protected NULL data frame,
1788 * you'll get an RX report for the RTS response, then
1789 * an RX report for the NULL frame, and then the TX
1790 * completion at the end.
1791 *
1792 * NOTE: it doesn't work right for CCK frames;
1793 * there's no channel info data provided unless
1794 * it's OFDM or HT. Will have to dig into it.
1795 */
1796 flags &= ~(HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA);
1797 bf->bf_flags |= ATH_BUF_TOA_PROBE;
1798 }
1799
1800 #if 0
1801 /*
1802 * Placeholder: if you want to transmit with the azimuth
1803 * timestamp in the end of the payload, here's where you
1804 * should set the TXDESC field.
1805 */
1806 flags |= HAL_TXDESC_HWTS;
1807 #endif
1808
1809 /*
1810 * Determine if a tx interrupt should be generated for
1811 * this descriptor. We take a tx interrupt to reap
1812 * descriptors when the h/w hits an EOL condition or
1813 * when the descriptor is specifically marked to generate
1814 * an interrupt. We periodically mark descriptors in this
1815 * way to insure timely replenishing of the supply needed
1816 * for sending frames. Defering interrupts reduces system
1817 * load and potentially allows more concurrent work to be
1818 * done but if done to aggressively can cause senders to
1819 * backup.
1820 *
1821 * NB: use >= to deal with sc_txintrperiod changing
1822 * dynamically through sysctl.
1823 */
1824 if (flags & HAL_TXDESC_INTREQ) {
1825 txq->axq_intrcnt = 0;
1826 } else if (++txq->axq_intrcnt >= sc->sc_txintrperiod) {
1827 flags |= HAL_TXDESC_INTREQ;
1828 txq->axq_intrcnt = 0;
1829 }
1830
1831 /* This point forward is actual TX bits */
1832
1833 /*
1834 * At this point we are committed to sending the frame
1835 * and we don't need to look at m_nextpkt; clear it in
1836 * case this frame is part of frag chain.
1837 */
1838 m0->m_nextpkt = NULL;
1839
1840 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT))
1841 ieee80211_dump_pkt(ic, mtod(m0, const uint8_t *), m0->m_len,
1842 sc->sc_hwmap[rix].ieeerate, -1);
1843
1844 if (ieee80211_radiotap_active_vap(vap)) {
1845 sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags;
1846 if (iswep)
1847 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
1848 if (isfrag)
1849 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG;
1850 sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate;
1851 sc->sc_tx_th.wt_txpower = ieee80211_get_node_txpower(ni);
1852 sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
1853
1854 ieee80211_radiotap_tx(vap, m0);
1855 }
1856
1857 /* Blank the legacy rate array */
1858 bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc));
1859
1860 /*
1861 * ath_buf_set_rate needs at least one rate/try to setup
1862 * the rate scenario.
1863 */
1864 bf->bf_state.bfs_rc[0].rix = rix;
1865 bf->bf_state.bfs_rc[0].tries = try0;
1866 bf->bf_state.bfs_rc[0].ratecode = txrate;
1867
1868 /* Store the decided rate index values away */
1869 bf->bf_state.bfs_pktlen = pktlen;
1870 bf->bf_state.bfs_hdrlen = hdrlen;
1871 bf->bf_state.bfs_atype = atype;
1872 bf->bf_state.bfs_txpower = ieee80211_get_node_txpower(ni);
1873 bf->bf_state.bfs_txrate0 = txrate;
1874 bf->bf_state.bfs_try0 = try0;
1875 bf->bf_state.bfs_keyix = keyix;
1876 bf->bf_state.bfs_txantenna = sc->sc_txantenna;
1877 bf->bf_state.bfs_txflags = flags;
1878 bf->bf_state.bfs_shpream = shortPreamble;
1879
1880 /* XXX this should be done in ath_tx_setrate() */
1881 bf->bf_state.bfs_ctsrate0 = 0; /* ie, no hard-coded ctsrate */
1882 bf->bf_state.bfs_ctsrate = 0; /* calculated later */
1883 bf->bf_state.bfs_ctsduration = 0;
1884 bf->bf_state.bfs_ismrr = ismrr;
1885
1886 return 0;
1887 }
1888
1889 /*
1890 * Queue a frame to the hardware or software queue.
1891 *
1892 * This can be called by the net80211 code.
1893 *
1894 * XXX what about locking? Or, push the seqno assign into the
1895 * XXX aggregate scheduler so its serialised?
1896 *
1897 * XXX When sending management frames via ath_raw_xmit(),
1898 * should CLRDMASK be set unconditionally?
1899 */
1900 int
ath_tx_start(struct ath_softc * sc,struct ieee80211_node * ni,struct ath_buf * bf,struct mbuf * m0)1901 ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni,
1902 struct ath_buf *bf, struct mbuf *m0)
1903 {
1904 struct ieee80211vap *vap = ni->ni_vap;
1905 struct ath_vap *avp = ATH_VAP(vap);
1906 int r = 0;
1907 u_int pri;
1908 int tid;
1909 struct ath_txq *txq;
1910 int ismcast;
1911 const struct ieee80211_frame *wh;
1912 int is_ampdu, is_ampdu_tx, is_ampdu_pending;
1913 ieee80211_seq seqno;
1914 uint8_t type, subtype;
1915 int queue_to_head;
1916
1917 ATH_TX_LOCK_ASSERT(sc);
1918
1919 /*
1920 * Determine the target hardware queue.
1921 *
1922 * For multicast frames, the txq gets overridden appropriately
1923 * depending upon the state of PS. If powersave is enabled
1924 * then they get added to the cabq for later transmit.
1925 *
1926 * The "fun" issue here is that group addressed frames should
1927 * have the sequence number from a different pool, rather than
1928 * the per-TID pool. That means that even QoS group addressed
1929 * frames will have a sequence number from that global value,
1930 * which means if we transmit different group addressed frames
1931 * at different traffic priorities, the sequence numbers will
1932 * all be out of whack. So - chances are, the right thing
1933 * to do here is to always put group addressed frames into the BE
1934 * queue, and ignore the TID for queue selection.
1935 *
1936 * For any other frame, we do a TID/QoS lookup inside the frame
1937 * to see what the TID should be. If it's a non-QoS frame, the
1938 * AC and TID are overridden. The TID/TXQ code assumes the
1939 * TID is on a predictable hardware TXQ, so we don't support
1940 * having a node TID queued to multiple hardware TXQs.
1941 * This may change in the future but would require some locking
1942 * fudgery.
1943 */
1944 pri = ath_tx_getac(sc, m0);
1945 tid = ath_tx_gettid(sc, m0);
1946
1947 txq = sc->sc_ac2q[pri];
1948 wh = mtod(m0, struct ieee80211_frame *);
1949 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
1950 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
1951 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
1952
1953 /*
1954 * Enforce how deep the multicast queue can grow.
1955 *
1956 * XXX duplicated in ath_raw_xmit().
1957 */
1958 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
1959 if (sc->sc_cabq->axq_depth + sc->sc_cabq->fifo.axq_depth
1960 > sc->sc_txq_mcastq_maxdepth) {
1961 sc->sc_stats.ast_tx_mcastq_overflow++;
1962 m_freem(m0);
1963 return (ENOBUFS);
1964 }
1965 }
1966
1967 /*
1968 * Enforce how deep the unicast queue can grow.
1969 *
1970 * If the node is in power save then we don't want
1971 * the software queue to grow too deep, or a node may
1972 * end up consuming all of the ath_buf entries.
1973 *
1974 * For now, only do this for DATA frames.
1975 *
1976 * We will want to cap how many management/control
1977 * frames get punted to the software queue so it doesn't
1978 * fill up. But the correct solution isn't yet obvious.
1979 * In any case, this check should at least let frames pass
1980 * that we are direct-dispatching.
1981 *
1982 * XXX TODO: duplicate this to the raw xmit path!
1983 */
1984 if (type == IEEE80211_FC0_TYPE_DATA &&
1985 ATH_NODE(ni)->an_is_powersave &&
1986 ATH_NODE(ni)->an_swq_depth >
1987 sc->sc_txq_node_psq_maxdepth) {
1988 sc->sc_stats.ast_tx_node_psq_overflow++;
1989 m_freem(m0);
1990 return (ENOBUFS);
1991 }
1992
1993 /* A-MPDU TX */
1994 is_ampdu_tx = ath_tx_ampdu_running(sc, ATH_NODE(ni), tid);
1995 is_ampdu_pending = ath_tx_ampdu_pending(sc, ATH_NODE(ni), tid);
1996 is_ampdu = is_ampdu_tx | is_ampdu_pending;
1997
1998 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ac=%d, is_ampdu=%d\n",
1999 __func__, tid, pri, is_ampdu);
2000
2001 /* Set local packet state, used to queue packets to hardware */
2002 bf->bf_state.bfs_tid = tid;
2003 bf->bf_state.bfs_tx_queue = txq->axq_qnum;
2004 bf->bf_state.bfs_pri = pri;
2005
2006 #if 1
2007 /*
2008 * When servicing one or more stations in power-save mode
2009 * (or) if there is some mcast data waiting on the mcast
2010 * queue (to prevent out of order delivery) multicast frames
2011 * must be bufferd until after the beacon.
2012 *
2013 * TODO: we should lock the mcastq before we check the length.
2014 */
2015 if (sc->sc_cabq_enable && ismcast && (vap->iv_ps_sta || avp->av_mcastq.axq_depth)) {
2016 txq = &avp->av_mcastq;
2017 /*
2018 * Mark the frame as eventually belonging on the CAB
2019 * queue, so the descriptor setup functions will
2020 * correctly initialise the descriptor 'qcuId' field.
2021 */
2022 bf->bf_state.bfs_tx_queue = sc->sc_cabq->axq_qnum;
2023 }
2024 #endif
2025
2026 /* Do the generic frame setup */
2027 /* XXX should just bzero the bf_state? */
2028 bf->bf_state.bfs_dobaw = 0;
2029
2030 /* A-MPDU TX? Manually set sequence number */
2031 /*
2032 * Don't do it whilst pending; the net80211 layer still
2033 * assigns them.
2034 *
2035 * Don't assign A-MPDU sequence numbers to group address
2036 * frames; they come from a different sequence number space.
2037 */
2038 if (is_ampdu_tx && (! IEEE80211_IS_MULTICAST(wh->i_addr1))) {
2039 /*
2040 * Always call; this function will
2041 * handle making sure that null data frames
2042 * and group-addressed frames don't get a sequence number
2043 * from the current TID and thus mess with the BAW.
2044 */
2045 seqno = ath_tx_tid_seqno_assign(sc, ni, bf, m0);
2046
2047 /*
2048 * Don't add QoS NULL frames and group-addressed frames
2049 * to the BAW.
2050 */
2051 if (IEEE80211_QOS_HAS_SEQ(wh) &&
2052 (! IEEE80211_IS_MULTICAST(wh->i_addr1)) &&
2053 (! IEEE80211_IS_QOS_NULL(wh))) {
2054 bf->bf_state.bfs_dobaw = 1;
2055 }
2056 }
2057
2058 /*
2059 * If needed, the sequence number has been assigned.
2060 * Squirrel it away somewhere easy to get to.
2061 */
2062 bf->bf_state.bfs_seqno = M_SEQNO_GET(m0) << IEEE80211_SEQ_SEQ_SHIFT;
2063
2064 /* Is ampdu pending? fetch the seqno and print it out */
2065 if (is_ampdu_pending)
2066 DPRINTF(sc, ATH_DEBUG_SW_TX,
2067 "%s: tid %d: ampdu pending, seqno %d\n",
2068 __func__, tid, M_SEQNO_GET(m0));
2069
2070 /* This also sets up the DMA map; crypto; frame parameters, etc */
2071 r = ath_tx_normal_setup(sc, ni, bf, m0, txq);
2072
2073 if (r != 0)
2074 goto done;
2075
2076 /* At this point m0 could have changed! */
2077 m0 = bf->bf_m;
2078
2079 #if 1
2080 /*
2081 * If it's a multicast frame, do a direct-dispatch to the
2082 * destination hardware queue. Don't bother software
2083 * queuing it.
2084 */
2085 /*
2086 * If it's a BAR frame, do a direct dispatch to the
2087 * destination hardware queue. Don't bother software
2088 * queuing it, as the TID will now be paused.
2089 * Sending a BAR frame can occur from the net80211 txa timer
2090 * (ie, retries) or from the ath txtask (completion call.)
2091 * It queues directly to hardware because the TID is paused
2092 * at this point (and won't be unpaused until the BAR has
2093 * either been TXed successfully or max retries has been
2094 * reached.)
2095 */
2096 /*
2097 * Until things are better debugged - if this node is asleep
2098 * and we're sending it a non-BAR frame, direct dispatch it.
2099 * Why? Because we need to figure out what's actually being
2100 * sent - eg, during reassociation/reauthentication after
2101 * the node (last) disappeared whilst asleep, the driver should
2102 * have unpaused/unsleep'ed the node. So until that is
2103 * sorted out, use this workaround.
2104 */
2105 if (txq == &avp->av_mcastq) {
2106 DPRINTF(sc, ATH_DEBUG_SW_TX,
2107 "%s: bf=%p: mcastq: TX'ing\n", __func__, bf);
2108 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2109 ath_tx_xmit_normal(sc, txq, bf);
2110 } else if (ath_tx_should_swq_frame(sc, ATH_NODE(ni), m0,
2111 &queue_to_head)) {
2112 ath_tx_swq(sc, ni, txq, queue_to_head, bf);
2113 } else {
2114 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2115 ath_tx_xmit_normal(sc, txq, bf);
2116 }
2117 #else
2118 /*
2119 * For now, since there's no software queue,
2120 * direct-dispatch to the hardware.
2121 */
2122 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2123 /*
2124 * Update the current leak count if
2125 * we're leaking frames; and set the
2126 * MORE flag as appropriate.
2127 */
2128 ath_tx_leak_count_update(sc, tid, bf);
2129 ath_tx_xmit_normal(sc, txq, bf);
2130 #endif
2131 done:
2132 return 0;
2133 }
2134
2135 static int
ath_tx_raw_start(struct ath_softc * sc,struct ieee80211_node * ni,struct ath_buf * bf,struct mbuf * m0,const struct ieee80211_bpf_params * params)2136 ath_tx_raw_start(struct ath_softc *sc, struct ieee80211_node *ni,
2137 struct ath_buf *bf, struct mbuf *m0,
2138 const struct ieee80211_bpf_params *params)
2139 {
2140 struct ieee80211com *ic = &sc->sc_ic;
2141 struct ieee80211vap *vap = ni->ni_vap;
2142 int error, ismcast, ismrr;
2143 int keyix, hdrlen, pktlen, try0, txantenna;
2144 u_int8_t rix, txrate;
2145 struct ieee80211_frame *wh;
2146 u_int flags;
2147 HAL_PKT_TYPE atype;
2148 const HAL_RATE_TABLE *rt;
2149 struct ath_desc *ds;
2150 u_int pri;
2151 int o_tid = -1;
2152 int do_override;
2153 uint8_t type, subtype;
2154 int queue_to_head;
2155 struct ath_node *an = ATH_NODE(ni);
2156
2157 ATH_TX_LOCK_ASSERT(sc);
2158
2159 wh = mtod(m0, struct ieee80211_frame *);
2160 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
2161 hdrlen = ieee80211_anyhdrsize(wh);
2162 /*
2163 * Packet length must not include any
2164 * pad bytes; deduct them here.
2165 */
2166 /* XXX honor IEEE80211_BPF_DATAPAD */
2167 pktlen = m0->m_pkthdr.len - (hdrlen & 3) + IEEE80211_CRC_LEN;
2168
2169 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
2170 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
2171
2172 ATH_KTR(sc, ATH_KTR_TX, 2,
2173 "ath_tx_raw_start: ni=%p, bf=%p, raw", ni, bf);
2174
2175 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: ismcast=%d\n",
2176 __func__, ismcast);
2177
2178 pri = params->ibp_pri & 3;
2179 /* Override pri if the frame isn't a QoS one */
2180 if (! IEEE80211_QOS_HAS_SEQ(wh))
2181 pri = ath_tx_getac(sc, m0);
2182
2183 /* XXX If it's an ADDBA, override the correct queue */
2184 do_override = ath_tx_action_frame_override_queue(sc, ni, m0, &o_tid);
2185
2186 /* Map ADDBA to the correct priority */
2187 if (do_override) {
2188 #if 1
2189 DPRINTF(sc, ATH_DEBUG_XMIT,
2190 "%s: overriding tid %d pri %d -> %d\n",
2191 __func__, o_tid, pri, TID_TO_WME_AC(o_tid));
2192 #endif
2193 pri = TID_TO_WME_AC(o_tid);
2194 }
2195
2196 /*
2197 * "pri" is the hardware queue to transmit on.
2198 *
2199 * Look at the description in ath_tx_start() to understand
2200 * what needs to be "fixed" here so we just use the TID
2201 * for QoS frames.
2202 */
2203
2204 /* Handle encryption twiddling if needed */
2205 if (! ath_tx_tag_crypto(sc, ni,
2206 m0, params->ibp_flags & IEEE80211_BPF_CRYPTO, 0,
2207 &hdrlen, &pktlen, &keyix)) {
2208 ieee80211_free_mbuf(m0);
2209 return EIO;
2210 }
2211 /* packet header may have moved, reset our local pointer */
2212 wh = mtod(m0, struct ieee80211_frame *);
2213
2214 /* Do the generic frame setup */
2215 /* XXX should just bzero the bf_state? */
2216 bf->bf_state.bfs_dobaw = 0;
2217
2218 error = ath_tx_dmasetup(sc, bf, m0);
2219 if (error != 0)
2220 return error;
2221 m0 = bf->bf_m; /* NB: may have changed */
2222 wh = mtod(m0, struct ieee80211_frame *);
2223 KASSERT((ni != NULL), ("%s: ni=NULL!", __func__));
2224 bf->bf_node = ni; /* NB: held reference */
2225
2226 /* Always enable CLRDMASK for raw frames for now.. */
2227 flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */
2228 flags |= HAL_TXDESC_INTREQ; /* force interrupt */
2229 if (params->ibp_flags & IEEE80211_BPF_RTS)
2230 flags |= HAL_TXDESC_RTSENA;
2231 else if (params->ibp_flags & IEEE80211_BPF_CTS) {
2232 /* XXX assume 11g/11n protection? */
2233 bf->bf_state.bfs_doprot = 1;
2234 flags |= HAL_TXDESC_CTSENA;
2235 }
2236 /* XXX leave ismcast to injector? */
2237 if ((params->ibp_flags & IEEE80211_BPF_NOACK) || ismcast)
2238 flags |= HAL_TXDESC_NOACK;
2239
2240 rt = sc->sc_currates;
2241 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
2242
2243 /* Fetch first rate information */
2244 rix = ath_tx_findrix(sc, params->ibp_rate0);
2245 try0 = params->ibp_try0;
2246
2247 /*
2248 * Override EAPOL rate as appropriate.
2249 */
2250 if (m0->m_flags & M_EAPOL) {
2251 /* XXX? maybe always use long preamble? */
2252 rix = an->an_mgmtrix;
2253 try0 = ATH_TXMAXTRY; /* XXX?too many? */
2254 }
2255
2256 /*
2257 * If it's a frame to do location reporting on,
2258 * communicate it to the HAL.
2259 */
2260 if (ieee80211_get_toa_params(m0, NULL)) {
2261 device_printf(sc->sc_dev,
2262 "%s: setting TX positioning bit\n", __func__);
2263 flags |= HAL_TXDESC_POS;
2264 flags &= ~(HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA);
2265 bf->bf_flags |= ATH_BUF_TOA_PROBE;
2266 }
2267
2268 txrate = rt->info[rix].rateCode;
2269 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE)
2270 txrate |= rt->info[rix].shortPreamble;
2271 sc->sc_txrix = rix;
2272 ismrr = (params->ibp_try1 != 0);
2273 txantenna = params->ibp_pri >> 2;
2274 if (txantenna == 0) /* XXX? */
2275 txantenna = sc->sc_txantenna;
2276
2277 /*
2278 * Since ctsrate is fixed, store it away for later
2279 * use when the descriptor fields are being set.
2280 */
2281 if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA))
2282 bf->bf_state.bfs_ctsrate0 = params->ibp_ctsrate;
2283
2284 /*
2285 * NB: we mark all packets as type PSPOLL so the h/w won't
2286 * set the sequence number, duration, etc.
2287 */
2288 atype = HAL_PKT_TYPE_PSPOLL;
2289
2290 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT))
2291 ieee80211_dump_pkt(ic, mtod(m0, caddr_t), m0->m_len,
2292 sc->sc_hwmap[rix].ieeerate, -1);
2293
2294 if (ieee80211_radiotap_active_vap(vap)) {
2295 sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags;
2296 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED)
2297 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
2298 if (m0->m_flags & M_FRAG)
2299 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG;
2300 sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate;
2301 sc->sc_tx_th.wt_txpower = MIN(params->ibp_power,
2302 ieee80211_get_node_txpower(ni));
2303 sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
2304
2305 ieee80211_radiotap_tx(vap, m0);
2306 }
2307
2308 /*
2309 * Formulate first tx descriptor with tx controls.
2310 */
2311 ds = bf->bf_desc;
2312 /* XXX check return value? */
2313
2314 /* Store the decided rate index values away */
2315 bf->bf_state.bfs_pktlen = pktlen;
2316 bf->bf_state.bfs_hdrlen = hdrlen;
2317 bf->bf_state.bfs_atype = atype;
2318 bf->bf_state.bfs_txpower = MIN(params->ibp_power,
2319 ieee80211_get_node_txpower(ni));
2320 bf->bf_state.bfs_txrate0 = txrate;
2321 bf->bf_state.bfs_try0 = try0;
2322 bf->bf_state.bfs_keyix = keyix;
2323 bf->bf_state.bfs_txantenna = txantenna;
2324 bf->bf_state.bfs_txflags = flags;
2325 bf->bf_state.bfs_shpream =
2326 !! (params->ibp_flags & IEEE80211_BPF_SHORTPRE);
2327
2328 /* Set local packet state, used to queue packets to hardware */
2329 bf->bf_state.bfs_tid = WME_AC_TO_TID(pri);
2330 bf->bf_state.bfs_tx_queue = sc->sc_ac2q[pri]->axq_qnum;
2331 bf->bf_state.bfs_pri = pri;
2332
2333 /* XXX this should be done in ath_tx_setrate() */
2334 bf->bf_state.bfs_ctsrate = 0;
2335 bf->bf_state.bfs_ctsduration = 0;
2336 bf->bf_state.bfs_ismrr = ismrr;
2337
2338 /* Blank the legacy rate array */
2339 bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc));
2340
2341 bf->bf_state.bfs_rc[0].rix = rix;
2342 bf->bf_state.bfs_rc[0].tries = try0;
2343 bf->bf_state.bfs_rc[0].ratecode = txrate;
2344
2345 if (ismrr) {
2346 int rix;
2347
2348 rix = ath_tx_findrix(sc, params->ibp_rate1);
2349 bf->bf_state.bfs_rc[1].rix = rix;
2350 bf->bf_state.bfs_rc[1].tries = params->ibp_try1;
2351
2352 rix = ath_tx_findrix(sc, params->ibp_rate2);
2353 bf->bf_state.bfs_rc[2].rix = rix;
2354 bf->bf_state.bfs_rc[2].tries = params->ibp_try2;
2355
2356 rix = ath_tx_findrix(sc, params->ibp_rate3);
2357 bf->bf_state.bfs_rc[3].rix = rix;
2358 bf->bf_state.bfs_rc[3].tries = params->ibp_try3;
2359 }
2360 /*
2361 * All the required rate control decisions have been made;
2362 * fill in the rc flags.
2363 */
2364 ath_tx_rate_fill_rcflags(sc, bf);
2365
2366 /* NB: no buffered multicast in power save support */
2367
2368 /*
2369 * If we're overiding the ADDBA destination, dump directly
2370 * into the hardware queue, right after any pending
2371 * frames to that node are.
2372 */
2373 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: dooverride=%d\n",
2374 __func__, do_override);
2375
2376 #if 1
2377 /*
2378 * Put addba frames in the right place in the right TID/HWQ.
2379 */
2380 if (do_override) {
2381 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2382 /*
2383 * XXX if it's addba frames, should we be leaking
2384 * them out via the frame leak method?
2385 * XXX for now let's not risk it; but we may wish
2386 * to investigate this later.
2387 */
2388 ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf);
2389 } else if (ath_tx_should_swq_frame(sc, ATH_NODE(ni), m0,
2390 &queue_to_head)) {
2391 /* Queue to software queue */
2392 ath_tx_swq(sc, ni, sc->sc_ac2q[pri], queue_to_head, bf);
2393 } else {
2394 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2395 ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf);
2396 }
2397 #else
2398 /* Direct-dispatch to the hardware */
2399 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2400 /*
2401 * Update the current leak count if
2402 * we're leaking frames; and set the
2403 * MORE flag as appropriate.
2404 */
2405 ath_tx_leak_count_update(sc, tid, bf);
2406 ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf);
2407 #endif
2408 return 0;
2409 }
2410
2411 /*
2412 * Send a raw frame.
2413 *
2414 * This can be called by net80211.
2415 */
2416 int
ath_raw_xmit(struct ieee80211_node * ni,struct mbuf * m,const struct ieee80211_bpf_params * params)2417 ath_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
2418 const struct ieee80211_bpf_params *params)
2419 {
2420 struct ieee80211com *ic = ni->ni_ic;
2421 struct ath_softc *sc = ic->ic_softc;
2422 struct ath_buf *bf;
2423 struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
2424 int error = 0;
2425
2426 ATH_PCU_LOCK(sc);
2427 if (sc->sc_inreset_cnt > 0) {
2428 DPRINTF(sc, ATH_DEBUG_XMIT,
2429 "%s: sc_inreset_cnt > 0; bailing\n", __func__);
2430 error = EIO;
2431 ATH_PCU_UNLOCK(sc);
2432 goto badbad;
2433 }
2434 sc->sc_txstart_cnt++;
2435 ATH_PCU_UNLOCK(sc);
2436
2437 /* Wake the hardware up already */
2438 ATH_LOCK(sc);
2439 ath_power_set_power_state(sc, HAL_PM_AWAKE);
2440 ATH_UNLOCK(sc);
2441
2442 ATH_TX_LOCK(sc);
2443
2444 if (!sc->sc_running || sc->sc_invalid) {
2445 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: discard frame, r/i: %d/%d",
2446 __func__, sc->sc_running, sc->sc_invalid);
2447 m_freem(m);
2448 error = ENETDOWN;
2449 goto bad;
2450 }
2451
2452 /*
2453 * Enforce how deep the multicast queue can grow.
2454 *
2455 * XXX duplicated in ath_tx_start().
2456 */
2457 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2458 if (sc->sc_cabq->axq_depth + sc->sc_cabq->fifo.axq_depth
2459 > sc->sc_txq_mcastq_maxdepth) {
2460 sc->sc_stats.ast_tx_mcastq_overflow++;
2461 error = ENOBUFS;
2462 }
2463
2464 if (error != 0) {
2465 m_freem(m);
2466 goto bad;
2467 }
2468 }
2469
2470 /*
2471 * Grab a TX buffer and associated resources.
2472 */
2473 bf = ath_getbuf(sc, ATH_BUFTYPE_MGMT);
2474 if (bf == NULL) {
2475 sc->sc_stats.ast_tx_nobuf++;
2476 m_freem(m);
2477 error = ENOBUFS;
2478 goto bad;
2479 }
2480 ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: m=%p, params=%p, bf=%p\n",
2481 m, params, bf);
2482
2483 if (params == NULL) {
2484 /*
2485 * Legacy path; interpret frame contents to decide
2486 * precisely how to send the frame.
2487 */
2488 if (ath_tx_start(sc, ni, bf, m)) {
2489 error = EIO; /* XXX */
2490 goto bad2;
2491 }
2492 } else {
2493 /*
2494 * Caller supplied explicit parameters to use in
2495 * sending the frame.
2496 */
2497 if (ath_tx_raw_start(sc, ni, bf, m, params)) {
2498 error = EIO; /* XXX */
2499 goto bad2;
2500 }
2501 }
2502 sc->sc_wd_timer = 5;
2503 sc->sc_stats.ast_tx_raw++;
2504
2505 /*
2506 * Update the TIM - if there's anything queued to the
2507 * software queue and power save is enabled, we should
2508 * set the TIM.
2509 */
2510 ath_tx_update_tim(sc, ni, 1);
2511
2512 ATH_TX_UNLOCK(sc);
2513
2514 ATH_PCU_LOCK(sc);
2515 sc->sc_txstart_cnt--;
2516 ATH_PCU_UNLOCK(sc);
2517
2518 /* Put the hardware back to sleep if required */
2519 ATH_LOCK(sc);
2520 ath_power_restore_power_state(sc);
2521 ATH_UNLOCK(sc);
2522
2523 return 0;
2524
2525 bad2:
2526 ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: bad2: m=%p, params=%p, "
2527 "bf=%p",
2528 m,
2529 params,
2530 bf);
2531 ATH_TXBUF_LOCK(sc);
2532 ath_returnbuf_head(sc, bf);
2533 ATH_TXBUF_UNLOCK(sc);
2534
2535 bad:
2536 ATH_TX_UNLOCK(sc);
2537
2538 ATH_PCU_LOCK(sc);
2539 sc->sc_txstart_cnt--;
2540 ATH_PCU_UNLOCK(sc);
2541
2542 /* Put the hardware back to sleep if required */
2543 ATH_LOCK(sc);
2544 ath_power_restore_power_state(sc);
2545 ATH_UNLOCK(sc);
2546
2547 badbad:
2548 ATH_KTR(sc, ATH_KTR_TX, 2, "ath_raw_xmit: bad0: m=%p, params=%p",
2549 m, params);
2550 sc->sc_stats.ast_tx_raw_fail++;
2551
2552 return error;
2553 }
2554
2555 /* Some helper functions */
2556
2557 /*
2558 * ADDBA (and potentially others) need to be placed in the same
2559 * hardware queue as the TID/node it's relating to. This is so
2560 * it goes out after any pending non-aggregate frames to the
2561 * same node/TID.
2562 *
2563 * If this isn't done, the ADDBA can go out before the frames
2564 * queued in hardware. Even though these frames have a sequence
2565 * number -earlier- than the ADDBA can be transmitted (but
2566 * no frames whose sequence numbers are after the ADDBA should
2567 * be!) they'll arrive after the ADDBA - and the receiving end
2568 * will simply drop them as being out of the BAW.
2569 *
2570 * The frames can't be appended to the TID software queue - it'll
2571 * never be sent out. So these frames have to be directly
2572 * dispatched to the hardware, rather than queued in software.
2573 * So if this function returns true, the TXQ has to be
2574 * overridden and it has to be directly dispatched.
2575 *
2576 * It's a dirty hack, but someone's gotta do it.
2577 */
2578
2579 /*
2580 * Return an alternate TID for ADDBA request frames.
2581 *
2582 * Yes, this likely should be done in the net80211 layer.
2583 */
2584 static int
ath_tx_action_frame_override_queue(struct ath_softc * sc,struct ieee80211_node * ni,struct mbuf * m0,int * tid)2585 ath_tx_action_frame_override_queue(struct ath_softc *sc,
2586 struct ieee80211_node *ni,
2587 struct mbuf *m0, int *tid)
2588 {
2589 struct ieee80211_frame *wh = mtod(m0, struct ieee80211_frame *);
2590 struct ieee80211_action_ba_addbarequest *ia;
2591 uint8_t *frm;
2592 uint16_t baparamset;
2593
2594 /* Not action frame? Bail */
2595 if (! IEEE80211_IS_MGMT_ACTION(wh))
2596 return 0;
2597
2598 /* XXX Not needed for frames we send? */
2599 #if 0
2600 /* Correct length? */
2601 if (! ieee80211_parse_action(ni, m))
2602 return 0;
2603 #endif
2604
2605 /* Extract out action frame */
2606 frm = (u_int8_t *)&wh[1];
2607 ia = (struct ieee80211_action_ba_addbarequest *) frm;
2608
2609 /* Not ADDBA? Bail */
2610 if (ia->rq_header.ia_category != IEEE80211_ACTION_CAT_BA)
2611 return 0;
2612 if (ia->rq_header.ia_action != IEEE80211_ACTION_BA_ADDBA_REQUEST)
2613 return 0;
2614
2615 /* Extract TID, return it */
2616 baparamset = le16toh(ia->rq_baparamset);
2617 *tid = (int) _IEEE80211_MASKSHIFT(baparamset, IEEE80211_BAPS_TID);
2618
2619 return 1;
2620 }
2621
2622 /* Per-node software queue operations */
2623
2624 /*
2625 * Add the current packet to the given BAW.
2626 * It is assumed that the current packet
2627 *
2628 * + fits inside the BAW;
2629 * + already has had a sequence number allocated.
2630 *
2631 * Since the BAW status may be modified by both the ath task and
2632 * the net80211/ifnet contexts, the TID must be locked.
2633 */
2634 void
ath_tx_addto_baw(struct ath_softc * sc,struct ath_node * an,struct ath_tid * tid,struct ath_buf * bf)2635 ath_tx_addto_baw(struct ath_softc *sc, struct ath_node *an,
2636 struct ath_tid *tid, struct ath_buf *bf)
2637 {
2638 int index, cindex;
2639 struct ieee80211_tx_ampdu *tap;
2640
2641 ATH_TX_LOCK_ASSERT(sc);
2642
2643 if (bf->bf_state.bfs_isretried)
2644 return;
2645
2646 tap = ath_tx_get_tx_tid(an, tid->tid);
2647
2648 if (! bf->bf_state.bfs_dobaw) {
2649 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2650 "%s: dobaw=0, seqno=%d, window %d:%d\n",
2651 __func__, SEQNO(bf->bf_state.bfs_seqno),
2652 tap->txa_start, tap->txa_wnd);
2653 }
2654
2655 if (bf->bf_state.bfs_addedbaw)
2656 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2657 "%s: re-added? tid=%d, seqno %d; window %d:%d; "
2658 "baw head=%d tail=%d\n",
2659 __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno),
2660 tap->txa_start, tap->txa_wnd, tid->baw_head,
2661 tid->baw_tail);
2662
2663 /*
2664 * Verify that the given sequence number is not outside of the
2665 * BAW. Complain loudly if that's the case.
2666 */
2667 if (! BAW_WITHIN(tap->txa_start, tap->txa_wnd,
2668 SEQNO(bf->bf_state.bfs_seqno))) {
2669 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2670 "%s: bf=%p: outside of BAW?? tid=%d, seqno %d; window %d:%d; "
2671 "baw head=%d tail=%d\n",
2672 __func__, bf, tid->tid, SEQNO(bf->bf_state.bfs_seqno),
2673 tap->txa_start, tap->txa_wnd, tid->baw_head,
2674 tid->baw_tail);
2675 }
2676
2677 /*
2678 * ni->ni_txseqs[] is the currently allocated seqno.
2679 * the txa state contains the current baw start.
2680 */
2681 index = ATH_BA_INDEX(tap->txa_start, SEQNO(bf->bf_state.bfs_seqno));
2682 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
2683 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2684 "%s: tid=%d, seqno %d; window %d:%d; index=%d cindex=%d "
2685 "baw head=%d tail=%d\n",
2686 __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno),
2687 tap->txa_start, tap->txa_wnd, index, cindex, tid->baw_head,
2688 tid->baw_tail);
2689
2690 #if 0
2691 assert(tid->tx_buf[cindex] == NULL);
2692 #endif
2693 if (tid->tx_buf[cindex] != NULL) {
2694 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2695 "%s: ba packet dup (index=%d, cindex=%d, "
2696 "head=%d, tail=%d)\n",
2697 __func__, index, cindex, tid->baw_head, tid->baw_tail);
2698 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2699 "%s: BA bf: %p; seqno=%d ; new bf: %p; seqno=%d\n",
2700 __func__,
2701 tid->tx_buf[cindex],
2702 SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno),
2703 bf,
2704 SEQNO(bf->bf_state.bfs_seqno)
2705 );
2706 }
2707 tid->tx_buf[cindex] = bf;
2708
2709 if (index >= ((tid->baw_tail - tid->baw_head) &
2710 (ATH_TID_MAX_BUFS - 1))) {
2711 tid->baw_tail = cindex;
2712 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
2713 }
2714 }
2715
2716 /*
2717 * Flip the BAW buffer entry over from the existing one to the new one.
2718 *
2719 * When software retransmitting a (sub-)frame, it is entirely possible that
2720 * the frame ath_buf is marked as BUSY and can't be immediately reused.
2721 * In that instance the buffer is cloned and the new buffer is used for
2722 * retransmit. We thus need to update the ath_buf slot in the BAW buf
2723 * tracking array to maintain consistency.
2724 */
2725 static void
ath_tx_switch_baw_buf(struct ath_softc * sc,struct ath_node * an,struct ath_tid * tid,struct ath_buf * old_bf,struct ath_buf * new_bf)2726 ath_tx_switch_baw_buf(struct ath_softc *sc, struct ath_node *an,
2727 struct ath_tid *tid, struct ath_buf *old_bf, struct ath_buf *new_bf)
2728 {
2729 int index, cindex;
2730 struct ieee80211_tx_ampdu *tap;
2731 int seqno = SEQNO(old_bf->bf_state.bfs_seqno);
2732
2733 ATH_TX_LOCK_ASSERT(sc);
2734
2735 tap = ath_tx_get_tx_tid(an, tid->tid);
2736 index = ATH_BA_INDEX(tap->txa_start, seqno);
2737 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
2738
2739 /*
2740 * Just warn for now; if it happens then we should find out
2741 * about it. It's highly likely the aggregation session will
2742 * soon hang.
2743 */
2744 if (old_bf->bf_state.bfs_seqno != new_bf->bf_state.bfs_seqno) {
2745 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2746 "%s: retransmitted buffer"
2747 " has mismatching seqno's, BA session may hang.\n",
2748 __func__);
2749 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2750 "%s: old seqno=%d, new_seqno=%d\n", __func__,
2751 old_bf->bf_state.bfs_seqno, new_bf->bf_state.bfs_seqno);
2752 }
2753
2754 if (tid->tx_buf[cindex] != old_bf) {
2755 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2756 "%s: ath_buf pointer incorrect; "
2757 " has m BA session may hang.\n", __func__);
2758 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2759 "%s: old bf=%p, new bf=%p\n", __func__, old_bf, new_bf);
2760 }
2761
2762 tid->tx_buf[cindex] = new_bf;
2763 }
2764
2765 /*
2766 * seq_start - left edge of BAW
2767 * seq_next - current/next sequence number to allocate
2768 *
2769 * Since the BAW status may be modified by both the ath task and
2770 * the net80211/ifnet contexts, the TID must be locked.
2771 */
2772 static void
ath_tx_update_baw(struct ath_softc * sc,struct ath_node * an,struct ath_tid * tid,const struct ath_buf * bf)2773 ath_tx_update_baw(struct ath_softc *sc, struct ath_node *an,
2774 struct ath_tid *tid, const struct ath_buf *bf)
2775 {
2776 int index, cindex;
2777 struct ieee80211_tx_ampdu *tap;
2778 int seqno = SEQNO(bf->bf_state.bfs_seqno);
2779
2780 ATH_TX_LOCK_ASSERT(sc);
2781
2782 tap = ath_tx_get_tx_tid(an, tid->tid);
2783 index = ATH_BA_INDEX(tap->txa_start, seqno);
2784 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
2785
2786 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2787 "%s: tid=%d, baw=%d:%d, seqno=%d, index=%d, cindex=%d, "
2788 "baw head=%d, tail=%d\n",
2789 __func__, tid->tid, tap->txa_start, tap->txa_wnd, seqno, index,
2790 cindex, tid->baw_head, tid->baw_tail);
2791
2792 /*
2793 * If this occurs then we have a big problem - something else
2794 * has slid tap->txa_start along without updating the BAW
2795 * tracking start/end pointers. Thus the TX BAW state is now
2796 * completely busted.
2797 *
2798 * But for now, since I haven't yet fixed TDMA and buffer cloning,
2799 * it's quite possible that a cloned buffer is making its way
2800 * here and causing it to fire off. Disable TDMA for now.
2801 */
2802 if (tid->tx_buf[cindex] != bf) {
2803 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2804 "%s: comp bf=%p, seq=%d; slot bf=%p, seqno=%d\n",
2805 __func__, bf, SEQNO(bf->bf_state.bfs_seqno),
2806 tid->tx_buf[cindex],
2807 (tid->tx_buf[cindex] != NULL) ?
2808 SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno) : -1);
2809 }
2810
2811 tid->tx_buf[cindex] = NULL;
2812
2813 while (tid->baw_head != tid->baw_tail &&
2814 !tid->tx_buf[tid->baw_head]) {
2815 INCR(tap->txa_start, IEEE80211_SEQ_RANGE);
2816 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
2817 }
2818 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2819 "%s: tid=%d: baw is now %d:%d, baw head=%d\n",
2820 __func__, tid->tid, tap->txa_start, tap->txa_wnd, tid->baw_head);
2821 }
2822
2823 static void
ath_tx_leak_count_update(struct ath_softc * sc,struct ath_tid * tid,struct ath_buf * bf)2824 ath_tx_leak_count_update(struct ath_softc *sc, struct ath_tid *tid,
2825 struct ath_buf *bf)
2826 {
2827 struct ieee80211_frame *wh;
2828
2829 ATH_TX_LOCK_ASSERT(sc);
2830
2831 if (tid->an->an_leak_count > 0) {
2832 wh = mtod(bf->bf_m, struct ieee80211_frame *);
2833
2834 /*
2835 * Update MORE based on the software/net80211 queue states.
2836 */
2837 if ((tid->an->an_stack_psq > 0)
2838 || (tid->an->an_swq_depth > 0))
2839 wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA;
2840 else
2841 wh->i_fc[1] &= ~IEEE80211_FC1_MORE_DATA;
2842
2843 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
2844 "%s: %6D: leak count = %d, psq=%d, swq=%d, MORE=%d\n",
2845 __func__,
2846 tid->an->an_node.ni_macaddr,
2847 ":",
2848 tid->an->an_leak_count,
2849 tid->an->an_stack_psq,
2850 tid->an->an_swq_depth,
2851 !! (wh->i_fc[1] & IEEE80211_FC1_MORE_DATA));
2852
2853 /*
2854 * Re-sync the underlying buffer.
2855 */
2856 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
2857 BUS_DMASYNC_PREWRITE);
2858
2859 tid->an->an_leak_count --;
2860 }
2861 }
2862
2863 static int
ath_tx_tid_can_tx_or_sched(struct ath_softc * sc,struct ath_tid * tid)2864 ath_tx_tid_can_tx_or_sched(struct ath_softc *sc, struct ath_tid *tid)
2865 {
2866
2867 ATH_TX_LOCK_ASSERT(sc);
2868
2869 if (tid->an->an_leak_count > 0) {
2870 return (1);
2871 }
2872 if (tid->paused)
2873 return (0);
2874 return (1);
2875 }
2876
2877 /*
2878 * Mark the current node/TID as ready to TX.
2879 *
2880 * This is done to make it easy for the software scheduler to
2881 * find which nodes have data to send.
2882 *
2883 * The TXQ lock must be held.
2884 */
2885 void
ath_tx_tid_sched(struct ath_softc * sc,struct ath_tid * tid)2886 ath_tx_tid_sched(struct ath_softc *sc, struct ath_tid *tid)
2887 {
2888 struct ath_txq *txq = sc->sc_ac2q[tid->ac];
2889
2890 ATH_TX_LOCK_ASSERT(sc);
2891
2892 /*
2893 * If we are leaking out a frame to this destination
2894 * for PS-POLL, ensure that we allow scheduling to
2895 * occur.
2896 */
2897 if (! ath_tx_tid_can_tx_or_sched(sc, tid))
2898 return; /* paused, can't schedule yet */
2899
2900 if (tid->sched)
2901 return; /* already scheduled */
2902
2903 tid->sched = 1;
2904
2905 #if 0
2906 /*
2907 * If this is a sleeping node we're leaking to, given
2908 * it a higher priority. This is so bad for QoS it hurts.
2909 */
2910 if (tid->an->an_leak_count) {
2911 TAILQ_INSERT_HEAD(&txq->axq_tidq, tid, axq_qelem);
2912 } else {
2913 TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem);
2914 }
2915 #endif
2916
2917 /*
2918 * We can't do the above - it'll confuse the TXQ software
2919 * scheduler which will keep checking the _head_ TID
2920 * in the list to see if it has traffic. If we queue
2921 * a TID to the head of the list and it doesn't transmit,
2922 * we'll check it again.
2923 *
2924 * So, get the rest of this leaking frames support working
2925 * and reliable first and _then_ optimise it so they're
2926 * pushed out in front of any other pending software
2927 * queued nodes.
2928 */
2929 TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem);
2930 }
2931
2932 /*
2933 * Mark the current node as no longer needing to be polled for
2934 * TX packets.
2935 *
2936 * The TXQ lock must be held.
2937 */
2938 static void
ath_tx_tid_unsched(struct ath_softc * sc,struct ath_tid * tid)2939 ath_tx_tid_unsched(struct ath_softc *sc, struct ath_tid *tid)
2940 {
2941 struct ath_txq *txq = sc->sc_ac2q[tid->ac];
2942
2943 ATH_TX_LOCK_ASSERT(sc);
2944
2945 if (tid->sched == 0)
2946 return;
2947
2948 tid->sched = 0;
2949 TAILQ_REMOVE(&txq->axq_tidq, tid, axq_qelem);
2950 }
2951
2952 /*
2953 * Assign a sequence number manually to the given frame.
2954 *
2955 * This should only be called for A-MPDU TX frames.
2956 *
2957 * Note: for group addressed frames, the sequence number
2958 * should be from NONQOS_TID, and net80211 should have
2959 * already assigned it for us.
2960 */
2961 static ieee80211_seq
ath_tx_tid_seqno_assign(struct ath_softc * sc,struct ieee80211_node * ni,struct ath_buf * bf,struct mbuf * m0)2962 ath_tx_tid_seqno_assign(struct ath_softc *sc, struct ieee80211_node *ni,
2963 struct ath_buf *bf, struct mbuf *m0)
2964 {
2965 struct ieee80211_frame *wh;
2966 int tid;
2967 ieee80211_seq seqno;
2968 uint8_t subtype;
2969
2970 wh = mtod(m0, struct ieee80211_frame *);
2971 tid = ieee80211_gettid(wh);
2972
2973 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, qos has seq=%d\n",
2974 __func__, tid, IEEE80211_QOS_HAS_SEQ(wh));
2975
2976 /* XXX Is it a control frame? Ignore */
2977
2978 /* Does the packet require a sequence number? */
2979 if (! IEEE80211_QOS_HAS_SEQ(wh))
2980 return -1;
2981
2982 ATH_TX_LOCK_ASSERT(sc);
2983
2984 /*
2985 * Is it a QOS NULL Data frame? Give it a sequence number from
2986 * the default TID (IEEE80211_NONQOS_TID.)
2987 *
2988 * The RX path of everything I've looked at doesn't include the NULL
2989 * data frame sequence number in the aggregation state updates, so
2990 * assigning it a sequence number there will cause a BAW hole on the
2991 * RX side.
2992 */
2993 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
2994 if (IEEE80211_IS_QOS_NULL(wh)) {
2995 /* XXX no locking for this TID? This is a bit of a problem. */
2996 seqno = ni->ni_txseqs[IEEE80211_NONQOS_TID];
2997 INCR(ni->ni_txseqs[IEEE80211_NONQOS_TID], IEEE80211_SEQ_RANGE);
2998 } else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2999 /*
3000 * group addressed frames get a sequence number from
3001 * a different sequence number space.
3002 */
3003 seqno = ni->ni_txseqs[IEEE80211_NONQOS_TID];
3004 INCR(ni->ni_txseqs[IEEE80211_NONQOS_TID], IEEE80211_SEQ_RANGE);
3005 } else {
3006 /* Manually assign sequence number */
3007 seqno = ni->ni_txseqs[tid];
3008 INCR(ni->ni_txseqs[tid], IEEE80211_SEQ_RANGE);
3009 }
3010 *(uint16_t *)&wh->i_seq[0] = htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT);
3011 M_SEQNO_SET(m0, seqno);
3012
3013 /* Return so caller can do something with it if needed */
3014 DPRINTF(sc, ATH_DEBUG_SW_TX,
3015 "%s: -> subtype=0x%x, tid=%d, seqno=%d\n",
3016 __func__, subtype, tid, seqno);
3017 return seqno;
3018 }
3019
3020 /*
3021 * Attempt to direct dispatch an aggregate frame to hardware.
3022 * If the frame is out of BAW, queue.
3023 * Otherwise, schedule it as a single frame.
3024 */
3025 static void
ath_tx_xmit_aggr(struct ath_softc * sc,struct ath_node * an,struct ath_txq * txq,struct ath_buf * bf)3026 ath_tx_xmit_aggr(struct ath_softc *sc, struct ath_node *an,
3027 struct ath_txq *txq, struct ath_buf *bf)
3028 {
3029 struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid];
3030 struct ieee80211_tx_ampdu *tap;
3031
3032 ATH_TX_LOCK_ASSERT(sc);
3033
3034 tap = ath_tx_get_tx_tid(an, tid->tid);
3035
3036 /* paused? queue */
3037 if (! ath_tx_tid_can_tx_or_sched(sc, tid)) {
3038 ATH_TID_INSERT_HEAD(tid, bf, bf_list);
3039 /* XXX don't sched - we're paused! */
3040 return;
3041 }
3042
3043 /* outside baw? queue */
3044 if (bf->bf_state.bfs_dobaw &&
3045 (! BAW_WITHIN(tap->txa_start, tap->txa_wnd,
3046 SEQNO(bf->bf_state.bfs_seqno)))) {
3047 ATH_TID_INSERT_HEAD(tid, bf, bf_list);
3048 ath_tx_tid_sched(sc, tid);
3049 return;
3050 }
3051
3052 /*
3053 * This is a temporary check and should be removed once
3054 * all the relevant code paths have been fixed.
3055 *
3056 * During aggregate retries, it's possible that the head
3057 * frame will fail (which has the bfs_aggr and bfs_nframes
3058 * fields set for said aggregate) and will be retried as
3059 * a single frame. In this instance, the values should
3060 * be reset or the completion code will get upset with you.
3061 */
3062 if (bf->bf_state.bfs_aggr != 0 || bf->bf_state.bfs_nframes > 1) {
3063 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
3064 "%s: bfs_aggr=%d, bfs_nframes=%d\n", __func__,
3065 bf->bf_state.bfs_aggr, bf->bf_state.bfs_nframes);
3066 bf->bf_state.bfs_aggr = 0;
3067 bf->bf_state.bfs_nframes = 1;
3068 }
3069
3070 /* Update CLRDMASK just before this frame is queued */
3071 ath_tx_update_clrdmask(sc, tid, bf);
3072
3073 /* Direct dispatch to hardware */
3074 ath_tx_do_ratelookup(sc, bf, tid->tid, bf->bf_state.bfs_pktlen,
3075 false);
3076 ath_tx_calc_duration(sc, bf);
3077 ath_tx_calc_protection(sc, bf);
3078 ath_tx_set_rtscts(sc, bf);
3079 ath_tx_rate_fill_rcflags(sc, bf);
3080 ath_tx_setds(sc, bf);
3081
3082 /* Statistics */
3083 sc->sc_aggr_stats.aggr_low_hwq_single_pkt++;
3084
3085 /* Track per-TID hardware queue depth correctly */
3086 tid->hwq_depth++;
3087
3088 /* Add to BAW */
3089 if (bf->bf_state.bfs_dobaw) {
3090 ath_tx_addto_baw(sc, an, tid, bf);
3091 bf->bf_state.bfs_addedbaw = 1;
3092 }
3093
3094 /* Set completion handler, multi-frame aggregate or not */
3095 bf->bf_comp = ath_tx_aggr_comp;
3096
3097 /*
3098 * Update the current leak count if
3099 * we're leaking frames; and set the
3100 * MORE flag as appropriate.
3101 */
3102 ath_tx_leak_count_update(sc, tid, bf);
3103
3104 /* Hand off to hardware */
3105 ath_tx_handoff(sc, txq, bf);
3106 }
3107
3108 /*
3109 * Attempt to send the packet.
3110 * If the queue isn't busy, direct-dispatch.
3111 * If the queue is busy enough, queue the given packet on the
3112 * relevant software queue.
3113 */
3114 void
ath_tx_swq(struct ath_softc * sc,struct ieee80211_node * ni,struct ath_txq * txq,int queue_to_head,struct ath_buf * bf)3115 ath_tx_swq(struct ath_softc *sc, struct ieee80211_node *ni,
3116 struct ath_txq *txq, int queue_to_head, struct ath_buf *bf)
3117 {
3118 struct ath_node *an = ATH_NODE(ni);
3119 struct ieee80211_frame *wh;
3120 struct ath_tid *atid;
3121 int pri, tid;
3122 struct mbuf *m0 = bf->bf_m;
3123
3124 ATH_TX_LOCK_ASSERT(sc);
3125
3126 /* Fetch the TID - non-QoS frames get assigned to TID 16 */
3127 wh = mtod(m0, struct ieee80211_frame *);
3128 pri = ath_tx_getac(sc, m0);
3129 tid = ath_tx_gettid(sc, m0);
3130 atid = &an->an_tid[tid];
3131
3132 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p, pri=%d, tid=%d, qos=%d\n",
3133 __func__, bf, pri, tid, IEEE80211_QOS_HAS_SEQ(wh));
3134
3135 /* Set local packet state, used to queue packets to hardware */
3136 /* XXX potentially duplicate info, re-check */
3137 bf->bf_state.bfs_tid = tid;
3138 bf->bf_state.bfs_tx_queue = txq->axq_qnum;
3139 bf->bf_state.bfs_pri = pri;
3140
3141 /*
3142 * If the hardware queue isn't busy, queue it directly.
3143 * If the hardware queue is busy, queue it.
3144 * If the TID is paused or the traffic it outside BAW, software
3145 * queue it.
3146 *
3147 * If the node is in power-save and we're leaking a frame,
3148 * leak a single frame.
3149 */
3150 if (! ath_tx_tid_can_tx_or_sched(sc, atid)) {
3151 /* TID is paused, queue */
3152 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: paused\n", __func__);
3153 /*
3154 * If the caller requested that it be sent at a high
3155 * priority, queue it at the head of the list.
3156 */
3157 if (queue_to_head)
3158 ATH_TID_INSERT_HEAD(atid, bf, bf_list);
3159 else
3160 ATH_TID_INSERT_TAIL(atid, bf, bf_list);
3161 } else if (ath_tx_ampdu_pending(sc, an, tid)) {
3162 /* AMPDU pending; queue */
3163 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: pending\n", __func__);
3164 ATH_TID_INSERT_TAIL(atid, bf, bf_list);
3165 /* XXX sched? */
3166 } else if (ath_tx_ampdu_running(sc, an, tid)) {
3167 /*
3168 * AMPDU running, queue single-frame if the hardware queue
3169 * isn't busy.
3170 *
3171 * If the hardware queue is busy, sending an aggregate frame
3172 * then just hold off so we can queue more aggregate frames.
3173 *
3174 * Otherwise we may end up with single frames leaking through
3175 * because we are dispatching them too quickly.
3176 *
3177 * TODO: maybe we should treat this as two policies - minimise
3178 * latency, or maximise throughput. Then for BE/BK we can
3179 * maximise throughput, and VO/VI (if AMPDU is enabled!)
3180 * minimise latency.
3181 */
3182
3183 /*
3184 * Always queue the frame to the tail of the list.
3185 */
3186 ATH_TID_INSERT_TAIL(atid, bf, bf_list);
3187
3188 /*
3189 * If the hardware queue isn't busy, direct dispatch
3190 * the head frame in the list.
3191 *
3192 * Note: if we're say, configured to do ADDBA but not A-MPDU
3193 * then maybe we want to still queue two non-aggregate frames
3194 * to the hardware. Again with the per-TID policy
3195 * configuration..)
3196 *
3197 * Otherwise, schedule the TID.
3198 */
3199 /* XXX TXQ locking */
3200 if (txq->axq_depth + txq->fifo.axq_depth == 0) {
3201 bf = ATH_TID_FIRST(atid);
3202 ATH_TID_REMOVE(atid, bf, bf_list);
3203
3204 /*
3205 * Ensure it's definitely treated as a non-AMPDU
3206 * frame - this information may have been left
3207 * over from a previous attempt.
3208 */
3209 bf->bf_state.bfs_aggr = 0;
3210 bf->bf_state.bfs_nframes = 1;
3211
3212 /* Queue to the hardware */
3213 ath_tx_xmit_aggr(sc, an, txq, bf);
3214 DPRINTF(sc, ATH_DEBUG_SW_TX,
3215 "%s: xmit_aggr\n",
3216 __func__);
3217 } else {
3218 DPRINTF(sc, ATH_DEBUG_SW_TX,
3219 "%s: ampdu; swq'ing\n",
3220 __func__);
3221
3222 ath_tx_tid_sched(sc, atid);
3223 }
3224 /*
3225 * If we're not doing A-MPDU, be prepared to direct dispatch
3226 * up to both limits if possible. This particular corner
3227 * case may end up with packet starvation between aggregate
3228 * traffic and non-aggregate traffic: we want to ensure
3229 * that non-aggregate stations get a few frames queued to the
3230 * hardware before the aggregate station(s) get their chance.
3231 *
3232 * So if you only ever see a couple of frames direct dispatched
3233 * to the hardware from a non-AMPDU client, check both here
3234 * and in the software queue dispatcher to ensure that those
3235 * non-AMPDU stations get a fair chance to transmit.
3236 */
3237 /* XXX TXQ locking */
3238 } else if ((txq->axq_depth + txq->fifo.axq_depth < sc->sc_hwq_limit_nonaggr) &&
3239 (txq->axq_aggr_depth < sc->sc_hwq_limit_aggr)) {
3240 /* AMPDU not running, attempt direct dispatch */
3241 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: xmit_normal\n", __func__);
3242 /* See if clrdmask needs to be set */
3243 ath_tx_update_clrdmask(sc, atid, bf);
3244
3245 /*
3246 * Update the current leak count if
3247 * we're leaking frames; and set the
3248 * MORE flag as appropriate.
3249 */
3250 ath_tx_leak_count_update(sc, atid, bf);
3251
3252 /*
3253 * Dispatch the frame.
3254 */
3255 ath_tx_xmit_normal(sc, txq, bf);
3256 } else {
3257 /* Busy; queue */
3258 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: swq'ing\n", __func__);
3259 ATH_TID_INSERT_TAIL(atid, bf, bf_list);
3260 ath_tx_tid_sched(sc, atid);
3261 }
3262 }
3263
3264 /*
3265 * Only set the clrdmask bit if none of the nodes are currently
3266 * filtered.
3267 *
3268 * XXX TODO: go through all the callers and check to see
3269 * which are being called in the context of looping over all
3270 * TIDs (eg, if all tids are being paused, resumed, etc.)
3271 * That'll avoid O(n^2) complexity here.
3272 */
3273 static void
ath_tx_set_clrdmask(struct ath_softc * sc,struct ath_node * an)3274 ath_tx_set_clrdmask(struct ath_softc *sc, struct ath_node *an)
3275 {
3276 int i;
3277
3278 ATH_TX_LOCK_ASSERT(sc);
3279
3280 for (i = 0; i < IEEE80211_TID_SIZE; i++) {
3281 if (an->an_tid[i].isfiltered == 1)
3282 return;
3283 }
3284 an->clrdmask = 1;
3285 }
3286
3287 /*
3288 * Configure the per-TID node state.
3289 *
3290 * This likely belongs in if_ath_node.c but I can't think of anywhere
3291 * else to put it just yet.
3292 *
3293 * This sets up the SLISTs and the mutex as appropriate.
3294 */
3295 void
ath_tx_tid_init(struct ath_softc * sc,struct ath_node * an)3296 ath_tx_tid_init(struct ath_softc *sc, struct ath_node *an)
3297 {
3298 int i, j;
3299 struct ath_tid *atid;
3300
3301 for (i = 0; i < IEEE80211_TID_SIZE; i++) {
3302 atid = &an->an_tid[i];
3303
3304 /* XXX now with this bzer(), is the field 0'ing needed? */
3305 bzero(atid, sizeof(*atid));
3306
3307 TAILQ_INIT(&atid->tid_q);
3308 TAILQ_INIT(&atid->filtq.tid_q);
3309 atid->tid = i;
3310 atid->an = an;
3311 for (j = 0; j < ATH_TID_MAX_BUFS; j++)
3312 atid->tx_buf[j] = NULL;
3313 atid->baw_head = atid->baw_tail = 0;
3314 atid->paused = 0;
3315 atid->sched = 0;
3316 atid->hwq_depth = 0;
3317 atid->cleanup_inprogress = 0;
3318 if (i == IEEE80211_NONQOS_TID)
3319 atid->ac = ATH_NONQOS_TID_AC;
3320 else
3321 atid->ac = TID_TO_WME_AC(i);
3322 }
3323 an->clrdmask = 1; /* Always start by setting this bit */
3324 }
3325
3326 /*
3327 * Pause the current TID. This stops packets from being transmitted
3328 * on it.
3329 *
3330 * Since this is also called from upper layers as well as the driver,
3331 * it will get the TID lock.
3332 */
3333 static void
ath_tx_tid_pause(struct ath_softc * sc,struct ath_tid * tid)3334 ath_tx_tid_pause(struct ath_softc *sc, struct ath_tid *tid)
3335 {
3336
3337 ATH_TX_LOCK_ASSERT(sc);
3338 tid->paused++;
3339 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: [%6D]: tid=%d, paused = %d\n",
3340 __func__,
3341 tid->an->an_node.ni_macaddr, ":",
3342 tid->tid,
3343 tid->paused);
3344 }
3345
3346 /*
3347 * Unpause the current TID, and schedule it if needed.
3348 */
3349 static void
ath_tx_tid_resume(struct ath_softc * sc,struct ath_tid * tid)3350 ath_tx_tid_resume(struct ath_softc *sc, struct ath_tid *tid)
3351 {
3352 ATH_TX_LOCK_ASSERT(sc);
3353
3354 /*
3355 * There's some odd places where ath_tx_tid_resume() is called
3356 * when it shouldn't be; this works around that particular issue
3357 * until it's actually resolved.
3358 */
3359 if (tid->paused == 0) {
3360 device_printf(sc->sc_dev,
3361 "%s: [%6D]: tid=%d, paused=0?\n",
3362 __func__,
3363 tid->an->an_node.ni_macaddr, ":",
3364 tid->tid);
3365 } else {
3366 tid->paused--;
3367 }
3368
3369 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
3370 "%s: [%6D]: tid=%d, unpaused = %d\n",
3371 __func__,
3372 tid->an->an_node.ni_macaddr, ":",
3373 tid->tid,
3374 tid->paused);
3375
3376 if (tid->paused)
3377 return;
3378
3379 /*
3380 * Override the clrdmask configuration for the next frame
3381 * from this TID, just to get the ball rolling.
3382 */
3383 ath_tx_set_clrdmask(sc, tid->an);
3384
3385 if (tid->axq_depth == 0)
3386 return;
3387
3388 /* XXX isfiltered shouldn't ever be 0 at this point */
3389 if (tid->isfiltered == 1) {
3390 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: filtered?!\n",
3391 __func__);
3392 return;
3393 }
3394
3395 ath_tx_tid_sched(sc, tid);
3396
3397 /*
3398 * Queue the software TX scheduler.
3399 */
3400 ath_tx_swq_kick(sc);
3401 }
3402
3403 /*
3404 * Add the given ath_buf to the TID filtered frame list.
3405 * This requires the TID be filtered.
3406 */
3407 static void
ath_tx_tid_filt_addbuf(struct ath_softc * sc,struct ath_tid * tid,struct ath_buf * bf)3408 ath_tx_tid_filt_addbuf(struct ath_softc *sc, struct ath_tid *tid,
3409 struct ath_buf *bf)
3410 {
3411
3412 ATH_TX_LOCK_ASSERT(sc);
3413
3414 if (!tid->isfiltered)
3415 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: not filtered?!\n",
3416 __func__);
3417
3418 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: bf=%p\n", __func__, bf);
3419
3420 /* Set the retry bit and bump the retry counter */
3421 ath_tx_set_retry(sc, bf);
3422 sc->sc_stats.ast_tx_swfiltered++;
3423
3424 ATH_TID_FILT_INSERT_TAIL(tid, bf, bf_list);
3425 }
3426
3427 /*
3428 * Handle a completed filtered frame from the given TID.
3429 * This just enables/pauses the filtered frame state if required
3430 * and appends the filtered frame to the filtered queue.
3431 */
3432 static void
ath_tx_tid_filt_comp_buf(struct ath_softc * sc,struct ath_tid * tid,struct ath_buf * bf)3433 ath_tx_tid_filt_comp_buf(struct ath_softc *sc, struct ath_tid *tid,
3434 struct ath_buf *bf)
3435 {
3436
3437 ATH_TX_LOCK_ASSERT(sc);
3438
3439 if (! tid->isfiltered) {
3440 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: tid=%d; filter transition\n",
3441 __func__, tid->tid);
3442 tid->isfiltered = 1;
3443 ath_tx_tid_pause(sc, tid);
3444 }
3445
3446 /* Add the frame to the filter queue */
3447 ath_tx_tid_filt_addbuf(sc, tid, bf);
3448 }
3449
3450 /*
3451 * Complete the filtered frame TX completion.
3452 *
3453 * If there are no more frames in the hardware queue, unpause/unfilter
3454 * the TID if applicable. Otherwise we will wait for a node PS transition
3455 * to unfilter.
3456 */
3457 static void
ath_tx_tid_filt_comp_complete(struct ath_softc * sc,struct ath_tid * tid)3458 ath_tx_tid_filt_comp_complete(struct ath_softc *sc, struct ath_tid *tid)
3459 {
3460 struct ath_buf *bf;
3461 int do_resume = 0;
3462
3463 ATH_TX_LOCK_ASSERT(sc);
3464
3465 if (tid->hwq_depth != 0)
3466 return;
3467
3468 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: tid=%d, hwq=0, transition back\n",
3469 __func__, tid->tid);
3470 if (tid->isfiltered == 1) {
3471 tid->isfiltered = 0;
3472 do_resume = 1;
3473 }
3474
3475 /* XXX ath_tx_tid_resume() also calls ath_tx_set_clrdmask()! */
3476 ath_tx_set_clrdmask(sc, tid->an);
3477
3478 /* XXX this is really quite inefficient */
3479 while ((bf = ATH_TID_FILT_LAST(tid, ath_bufhead_s)) != NULL) {
3480 ATH_TID_FILT_REMOVE(tid, bf, bf_list);
3481 ATH_TID_INSERT_HEAD(tid, bf, bf_list);
3482 }
3483
3484 /* And only resume if we had paused before */
3485 if (do_resume)
3486 ath_tx_tid_resume(sc, tid);
3487 }
3488
3489 /*
3490 * Called when a single (aggregate or otherwise) frame is completed.
3491 *
3492 * Returns 0 if the buffer could be added to the filtered list
3493 * (cloned or otherwise), 1 if the buffer couldn't be added to the
3494 * filtered list (failed clone; expired retry) and the caller should
3495 * free it and handle it like a failure (eg by sending a BAR.)
3496 *
3497 * since the buffer may be cloned, bf must be not touched after this
3498 * if the return value is 0.
3499 */
3500 static int
ath_tx_tid_filt_comp_single(struct ath_softc * sc,struct ath_tid * tid,struct ath_buf * bf)3501 ath_tx_tid_filt_comp_single(struct ath_softc *sc, struct ath_tid *tid,
3502 struct ath_buf *bf)
3503 {
3504 struct ath_buf *nbf;
3505 int retval;
3506
3507 ATH_TX_LOCK_ASSERT(sc);
3508
3509 /*
3510 * Don't allow a filtered frame to live forever.
3511 */
3512 if (bf->bf_state.bfs_retries > SWMAX_RETRIES) {
3513 sc->sc_stats.ast_tx_swretrymax++;
3514 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3515 "%s: bf=%p, seqno=%d, exceeded retries\n",
3516 __func__,
3517 bf,
3518 SEQNO(bf->bf_state.bfs_seqno));
3519 retval = 1; /* error */
3520 goto finish;
3521 }
3522
3523 /*
3524 * A busy buffer can't be added to the retry list.
3525 * It needs to be cloned.
3526 */
3527 if (bf->bf_flags & ATH_BUF_BUSY) {
3528 nbf = ath_tx_retry_clone(sc, tid->an, tid, bf);
3529 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3530 "%s: busy buffer clone: %p -> %p\n",
3531 __func__, bf, nbf);
3532 } else {
3533 nbf = bf;
3534 }
3535
3536 if (nbf == NULL) {
3537 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3538 "%s: busy buffer couldn't be cloned (%p)!\n",
3539 __func__, bf);
3540 retval = 1; /* error */
3541 } else {
3542 ath_tx_tid_filt_comp_buf(sc, tid, nbf);
3543 retval = 0; /* ok */
3544 }
3545 finish:
3546 ath_tx_tid_filt_comp_complete(sc, tid);
3547
3548 return (retval);
3549 }
3550
3551 static void
ath_tx_tid_filt_comp_aggr(struct ath_softc * sc,struct ath_tid * tid,struct ath_buf * bf_first,ath_bufhead * bf_q)3552 ath_tx_tid_filt_comp_aggr(struct ath_softc *sc, struct ath_tid *tid,
3553 struct ath_buf *bf_first, ath_bufhead *bf_q)
3554 {
3555 struct ath_buf *bf, *bf_next, *nbf;
3556
3557 ATH_TX_LOCK_ASSERT(sc);
3558
3559 bf = bf_first;
3560 while (bf) {
3561 bf_next = bf->bf_next;
3562 bf->bf_next = NULL; /* Remove it from the aggr list */
3563
3564 /*
3565 * Don't allow a filtered frame to live forever.
3566 */
3567 if (bf->bf_state.bfs_retries > SWMAX_RETRIES) {
3568 sc->sc_stats.ast_tx_swretrymax++;
3569 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3570 "%s: tid=%d, bf=%p, seqno=%d, exceeded retries\n",
3571 __func__,
3572 tid->tid,
3573 bf,
3574 SEQNO(bf->bf_state.bfs_seqno));
3575 TAILQ_INSERT_TAIL(bf_q, bf, bf_list);
3576 goto next;
3577 }
3578
3579 if (bf->bf_flags & ATH_BUF_BUSY) {
3580 nbf = ath_tx_retry_clone(sc, tid->an, tid, bf);
3581 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3582 "%s: tid=%d, busy buffer cloned: %p -> %p, seqno=%d\n",
3583 __func__, tid->tid, bf, nbf, SEQNO(bf->bf_state.bfs_seqno));
3584 } else {
3585 nbf = bf;
3586 }
3587
3588 /*
3589 * If the buffer couldn't be cloned, add it to bf_q;
3590 * the caller will free the buffer(s) as required.
3591 */
3592 if (nbf == NULL) {
3593 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3594 "%s: tid=%d, buffer couldn't be cloned! (%p) seqno=%d\n",
3595 __func__, tid->tid, bf, SEQNO(bf->bf_state.bfs_seqno));
3596 TAILQ_INSERT_TAIL(bf_q, bf, bf_list);
3597 } else {
3598 ath_tx_tid_filt_comp_buf(sc, tid, nbf);
3599 }
3600 next:
3601 bf = bf_next;
3602 }
3603
3604 ath_tx_tid_filt_comp_complete(sc, tid);
3605 }
3606
3607 /*
3608 * Suspend the queue because we need to TX a BAR.
3609 */
3610 static void
ath_tx_tid_bar_suspend(struct ath_softc * sc,struct ath_tid * tid)3611 ath_tx_tid_bar_suspend(struct ath_softc *sc, struct ath_tid *tid)
3612 {
3613
3614 ATH_TX_LOCK_ASSERT(sc);
3615
3616 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3617 "%s: tid=%d, bar_wait=%d, bar_tx=%d, called\n",
3618 __func__,
3619 tid->tid,
3620 tid->bar_wait,
3621 tid->bar_tx);
3622
3623 /* We shouldn't be called when bar_tx is 1 */
3624 if (tid->bar_tx) {
3625 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3626 "%s: bar_tx is 1?!\n", __func__);
3627 }
3628
3629 /* If we've already been called, just be patient. */
3630 if (tid->bar_wait)
3631 return;
3632
3633 /* Wait! */
3634 tid->bar_wait = 1;
3635
3636 /* Only one pause, no matter how many frames fail */
3637 ath_tx_tid_pause(sc, tid);
3638 }
3639
3640 /*
3641 * We've finished with BAR handling - either we succeeded or
3642 * failed. Either way, unsuspend TX.
3643 */
3644 static void
ath_tx_tid_bar_unsuspend(struct ath_softc * sc,struct ath_tid * tid)3645 ath_tx_tid_bar_unsuspend(struct ath_softc *sc, struct ath_tid *tid)
3646 {
3647
3648 ATH_TX_LOCK_ASSERT(sc);
3649
3650 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3651 "%s: %6D: TID=%d, called\n",
3652 __func__,
3653 tid->an->an_node.ni_macaddr,
3654 ":",
3655 tid->tid);
3656
3657 if (tid->bar_tx == 0 || tid->bar_wait == 0) {
3658 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3659 "%s: %6D: TID=%d, bar_tx=%d, bar_wait=%d: ?\n",
3660 __func__, tid->an->an_node.ni_macaddr, ":",
3661 tid->tid, tid->bar_tx, tid->bar_wait);
3662 }
3663
3664 tid->bar_tx = tid->bar_wait = 0;
3665 ath_tx_tid_resume(sc, tid);
3666 }
3667
3668 /*
3669 * Return whether we're ready to TX a BAR frame.
3670 *
3671 * Requires the TID lock be held.
3672 */
3673 static int
ath_tx_tid_bar_tx_ready(struct ath_softc * sc,struct ath_tid * tid)3674 ath_tx_tid_bar_tx_ready(struct ath_softc *sc, struct ath_tid *tid)
3675 {
3676
3677 ATH_TX_LOCK_ASSERT(sc);
3678
3679 if (tid->bar_wait == 0 || tid->hwq_depth > 0)
3680 return (0);
3681
3682 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3683 "%s: %6D: TID=%d, bar ready\n",
3684 __func__,
3685 tid->an->an_node.ni_macaddr,
3686 ":",
3687 tid->tid);
3688
3689 return (1);
3690 }
3691
3692 /*
3693 * Check whether the current TID is ready to have a BAR
3694 * TXed and if so, do the TX.
3695 *
3696 * Since the TID/TXQ lock can't be held during a call to
3697 * ieee80211_send_bar(), we have to do the dirty thing of unlocking it,
3698 * sending the BAR and locking it again.
3699 *
3700 * Eventually, the code to send the BAR should be broken out
3701 * from this routine so the lock doesn't have to be reacquired
3702 * just to be immediately dropped by the caller.
3703 */
3704 static void
ath_tx_tid_bar_tx(struct ath_softc * sc,struct ath_tid * tid)3705 ath_tx_tid_bar_tx(struct ath_softc *sc, struct ath_tid *tid)
3706 {
3707 struct ieee80211_tx_ampdu *tap;
3708
3709 ATH_TX_LOCK_ASSERT(sc);
3710
3711 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3712 "%s: %6D: TID=%d, called\n",
3713 __func__,
3714 tid->an->an_node.ni_macaddr,
3715 ":",
3716 tid->tid);
3717
3718 tap = ath_tx_get_tx_tid(tid->an, tid->tid);
3719
3720 /*
3721 * This is an error condition!
3722 */
3723 if (tid->bar_wait == 0 || tid->bar_tx == 1) {
3724 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3725 "%s: %6D: TID=%d, bar_tx=%d, bar_wait=%d: ?\n",
3726 __func__, tid->an->an_node.ni_macaddr, ":",
3727 tid->tid, tid->bar_tx, tid->bar_wait);
3728 return;
3729 }
3730
3731 /* Don't do anything if we still have pending frames */
3732 if (tid->hwq_depth > 0) {
3733 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3734 "%s: %6D: TID=%d, hwq_depth=%d, waiting\n",
3735 __func__,
3736 tid->an->an_node.ni_macaddr,
3737 ":",
3738 tid->tid,
3739 tid->hwq_depth);
3740 return;
3741 }
3742
3743 /* We're now about to TX */
3744 tid->bar_tx = 1;
3745
3746 /*
3747 * Override the clrdmask configuration for the next frame,
3748 * just to get the ball rolling.
3749 */
3750 ath_tx_set_clrdmask(sc, tid->an);
3751
3752 /*
3753 * Calculate new BAW left edge, now that all frames have either
3754 * succeeded or failed.
3755 *
3756 * XXX verify this is _actually_ the valid value to begin at!
3757 */
3758 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3759 "%s: %6D: TID=%d, new BAW left edge=%d\n",
3760 __func__,
3761 tid->an->an_node.ni_macaddr,
3762 ":",
3763 tid->tid,
3764 tap->txa_start);
3765
3766 /* Try sending the BAR frame */
3767 /* We can't hold the lock here! */
3768
3769 ATH_TX_UNLOCK(sc);
3770 if (ieee80211_send_bar(&tid->an->an_node, tap, tap->txa_start) == 0) {
3771 /* Success? Now we wait for notification that it's done */
3772 ATH_TX_LOCK(sc);
3773 return;
3774 }
3775
3776 /* Failure? For now, warn loudly and continue */
3777 ATH_TX_LOCK(sc);
3778 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3779 "%s: %6D: TID=%d, failed to TX BAR, continue!\n",
3780 __func__, tid->an->an_node.ni_macaddr, ":",
3781 tid->tid);
3782 ath_tx_tid_bar_unsuspend(sc, tid);
3783 }
3784
3785 static void
ath_tx_tid_drain_pkt(struct ath_softc * sc,struct ath_node * an,struct ath_tid * tid,ath_bufhead * bf_cq,struct ath_buf * bf)3786 ath_tx_tid_drain_pkt(struct ath_softc *sc, struct ath_node *an,
3787 struct ath_tid *tid, ath_bufhead *bf_cq, struct ath_buf *bf)
3788 {
3789
3790 ATH_TX_LOCK_ASSERT(sc);
3791
3792 /*
3793 * If the current TID is running AMPDU, update
3794 * the BAW.
3795 */
3796 if (ath_tx_ampdu_running(sc, an, tid->tid) &&
3797 bf->bf_state.bfs_dobaw) {
3798 /*
3799 * Only remove the frame from the BAW if it's
3800 * been transmitted at least once; this means
3801 * the frame was in the BAW to begin with.
3802 */
3803 if (bf->bf_state.bfs_retries > 0) {
3804 ath_tx_update_baw(sc, an, tid, bf);
3805 bf->bf_state.bfs_dobaw = 0;
3806 }
3807 #if 0
3808 /*
3809 * This has become a non-fatal error now
3810 */
3811 if (! bf->bf_state.bfs_addedbaw)
3812 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW
3813 "%s: wasn't added: seqno %d\n",
3814 __func__, SEQNO(bf->bf_state.bfs_seqno));
3815 #endif
3816 }
3817
3818 /* Strip it out of an aggregate list if it was in one */
3819 bf->bf_next = NULL;
3820
3821 /* Insert on the free queue to be freed by the caller */
3822 TAILQ_INSERT_TAIL(bf_cq, bf, bf_list);
3823 }
3824
3825 static void
ath_tx_tid_drain_print(struct ath_softc * sc,struct ath_node * an,const char * pfx,struct ath_tid * tid,struct ath_buf * bf)3826 ath_tx_tid_drain_print(struct ath_softc *sc, struct ath_node *an,
3827 const char *pfx, struct ath_tid *tid, struct ath_buf *bf)
3828 {
3829 struct ieee80211_node *ni = &an->an_node;
3830 struct ath_txq *txq;
3831 struct ieee80211_tx_ampdu *tap;
3832
3833 txq = sc->sc_ac2q[tid->ac];
3834 tap = ath_tx_get_tx_tid(an, tid->tid);
3835
3836 DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET,
3837 "%s: %s: %6D: bf=%p: addbaw=%d, dobaw=%d, "
3838 "seqno=%d, retry=%d\n",
3839 __func__,
3840 pfx,
3841 ni->ni_macaddr,
3842 ":",
3843 bf,
3844 bf->bf_state.bfs_addedbaw,
3845 bf->bf_state.bfs_dobaw,
3846 SEQNO(bf->bf_state.bfs_seqno),
3847 bf->bf_state.bfs_retries);
3848 DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET,
3849 "%s: %s: %6D: bf=%p: txq[%d] axq_depth=%d, axq_aggr_depth=%d\n",
3850 __func__,
3851 pfx,
3852 ni->ni_macaddr,
3853 ":",
3854 bf,
3855 txq->axq_qnum,
3856 txq->axq_depth,
3857 txq->axq_aggr_depth);
3858 DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET,
3859 "%s: %s: %6D: bf=%p: tid txq_depth=%d hwq_depth=%d, bar_wait=%d, "
3860 "isfiltered=%d\n",
3861 __func__,
3862 pfx,
3863 ni->ni_macaddr,
3864 ":",
3865 bf,
3866 tid->axq_depth,
3867 tid->hwq_depth,
3868 tid->bar_wait,
3869 tid->isfiltered);
3870 DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET,
3871 "%s: %s: %6D: tid %d: "
3872 "sched=%d, paused=%d, "
3873 "incomp=%d, baw_head=%d, "
3874 "baw_tail=%d txa_start=%d, ni_txseqs=%d\n",
3875 __func__,
3876 pfx,
3877 ni->ni_macaddr,
3878 ":",
3879 tid->tid,
3880 tid->sched, tid->paused,
3881 tid->incomp, tid->baw_head,
3882 tid->baw_tail, tap == NULL ? -1 : tap->txa_start,
3883 ni->ni_txseqs[tid->tid]);
3884
3885 /* XXX Dump the frame, see what it is? */
3886 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT))
3887 ieee80211_dump_pkt(ni->ni_ic,
3888 mtod(bf->bf_m, const uint8_t *),
3889 bf->bf_m->m_len, 0, -1);
3890 }
3891
3892 /*
3893 * Free any packets currently pending in the software TX queue.
3894 *
3895 * This will be called when a node is being deleted.
3896 *
3897 * It can also be called on an active node during an interface
3898 * reset or state transition.
3899 *
3900 * (From Linux/reference):
3901 *
3902 * TODO: For frame(s) that are in the retry state, we will reuse the
3903 * sequence number(s) without setting the retry bit. The
3904 * alternative is to give up on these and BAR the receiver's window
3905 * forward.
3906 */
3907 static void
ath_tx_tid_drain(struct ath_softc * sc,struct ath_node * an,struct ath_tid * tid,ath_bufhead * bf_cq)3908 ath_tx_tid_drain(struct ath_softc *sc, struct ath_node *an,
3909 struct ath_tid *tid, ath_bufhead *bf_cq)
3910 {
3911 struct ath_buf *bf;
3912 struct ieee80211_tx_ampdu *tap;
3913 struct ieee80211_node *ni = &an->an_node;
3914 int t;
3915
3916 tap = ath_tx_get_tx_tid(an, tid->tid);
3917
3918 ATH_TX_LOCK_ASSERT(sc);
3919
3920 /* Walk the queue, free frames */
3921 t = 0;
3922 for (;;) {
3923 bf = ATH_TID_FIRST(tid);
3924 if (bf == NULL) {
3925 break;
3926 }
3927
3928 if (t == 0) {
3929 ath_tx_tid_drain_print(sc, an, "norm", tid, bf);
3930 // t = 1;
3931 }
3932
3933 ATH_TID_REMOVE(tid, bf, bf_list);
3934 ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf);
3935 }
3936
3937 /* And now, drain the filtered frame queue */
3938 t = 0;
3939 for (;;) {
3940 bf = ATH_TID_FILT_FIRST(tid);
3941 if (bf == NULL)
3942 break;
3943
3944 if (t == 0) {
3945 ath_tx_tid_drain_print(sc, an, "filt", tid, bf);
3946 // t = 1;
3947 }
3948
3949 ATH_TID_FILT_REMOVE(tid, bf, bf_list);
3950 ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf);
3951 }
3952
3953 /*
3954 * Override the clrdmask configuration for the next frame
3955 * in case there is some future transmission, just to get
3956 * the ball rolling.
3957 *
3958 * This won't hurt things if the TID is about to be freed.
3959 */
3960 ath_tx_set_clrdmask(sc, tid->an);
3961
3962 /*
3963 * Now that it's completed, grab the TID lock and update
3964 * the sequence number and BAW window.
3965 * Because sequence numbers have been assigned to frames
3966 * that haven't been sent yet, it's entirely possible
3967 * we'll be called with some pending frames that have not
3968 * been transmitted.
3969 *
3970 * The cleaner solution is to do the sequence number allocation
3971 * when the packet is first transmitted - and thus the "retries"
3972 * check above would be enough to update the BAW/seqno.
3973 */
3974
3975 /* But don't do it for non-QoS TIDs */
3976 if (tap) {
3977 #if 1
3978 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
3979 "%s: %6D: node %p: TID %d: sliding BAW left edge to %d\n",
3980 __func__,
3981 ni->ni_macaddr,
3982 ":",
3983 an,
3984 tid->tid,
3985 tap->txa_start);
3986 #endif
3987 ni->ni_txseqs[tid->tid] = tap->txa_start;
3988 tid->baw_tail = tid->baw_head;
3989 }
3990 }
3991
3992 /*
3993 * Reset the TID state. This must be only called once the node has
3994 * had its frames flushed from this TID, to ensure that no other
3995 * pause / unpause logic can kick in.
3996 */
3997 static void
ath_tx_tid_reset(struct ath_softc * sc,struct ath_tid * tid)3998 ath_tx_tid_reset(struct ath_softc *sc, struct ath_tid *tid)
3999 {
4000
4001 #if 0
4002 tid->bar_wait = tid->bar_tx = tid->isfiltered = 0;
4003 tid->paused = tid->sched = tid->addba_tx_pending = 0;
4004 tid->incomp = tid->cleanup_inprogress = 0;
4005 #endif
4006
4007 /*
4008 * If we have a bar_wait set, we need to unpause the TID
4009 * here. Otherwise once cleanup has finished, the TID won't
4010 * have the right paused counter.
4011 *
4012 * XXX I'm not going through resume here - I don't want the
4013 * node to be rescheuled just yet. This however should be
4014 * methodized!
4015 */
4016 if (tid->bar_wait) {
4017 if (tid->paused > 0) {
4018 tid->paused --;
4019 }
4020 }
4021
4022 /*
4023 * XXX same with a currently filtered TID.
4024 *
4025 * Since this is being called during a flush, we assume that
4026 * the filtered frame list is actually empty.
4027 *
4028 * XXX TODO: add in a check to ensure that the filtered queue
4029 * depth is actually 0!
4030 */
4031 if (tid->isfiltered) {
4032 if (tid->paused > 0) {
4033 tid->paused --;
4034 }
4035 }
4036
4037 /*
4038 * Clear BAR, filtered frames, scheduled and ADDBA pending.
4039 * The TID may be going through cleanup from the last association
4040 * where things in the BAW are still in the hardware queue.
4041 */
4042 tid->bar_wait = 0;
4043 tid->bar_tx = 0;
4044 tid->isfiltered = 0;
4045 tid->sched = 0;
4046 tid->addba_tx_pending = 0;
4047
4048 /*
4049 * XXX TODO: it may just be enough to walk the HWQs and mark
4050 * frames for that node as non-aggregate; or mark the ath_node
4051 * with something that indicates that aggregation is no longer
4052 * occurring. Then we can just toss the BAW complaints and
4053 * do a complete hard reset of state here - no pause, no
4054 * complete counter, etc.
4055 */
4056
4057 }
4058
4059 /*
4060 * Flush all software queued packets for the given node.
4061 *
4062 * This occurs when a completion handler frees the last buffer
4063 * for a node, and the node is thus freed. This causes the node
4064 * to be cleaned up, which ends up calling ath_tx_node_flush.
4065 */
4066 void
ath_tx_node_flush(struct ath_softc * sc,struct ath_node * an)4067 ath_tx_node_flush(struct ath_softc *sc, struct ath_node *an)
4068 {
4069 int tid;
4070 ath_bufhead bf_cq;
4071 struct ath_buf *bf;
4072
4073 TAILQ_INIT(&bf_cq);
4074
4075 ATH_KTR(sc, ATH_KTR_NODE, 1, "ath_tx_node_flush: flush node; ni=%p",
4076 &an->an_node);
4077
4078 ATH_TX_LOCK(sc);
4079 DPRINTF(sc, ATH_DEBUG_NODE,
4080 "%s: %6D: flush; is_powersave=%d, stack_psq=%d, tim=%d, "
4081 "swq_depth=%d, clrdmask=%d, leak_count=%d\n",
4082 __func__,
4083 an->an_node.ni_macaddr,
4084 ":",
4085 an->an_is_powersave,
4086 an->an_stack_psq,
4087 an->an_tim_set,
4088 an->an_swq_depth,
4089 an->clrdmask,
4090 an->an_leak_count);
4091
4092 for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) {
4093 struct ath_tid *atid = &an->an_tid[tid];
4094
4095 /* Free packets */
4096 ath_tx_tid_drain(sc, an, atid, &bf_cq);
4097
4098 /* Remove this tid from the list of active tids */
4099 ath_tx_tid_unsched(sc, atid);
4100
4101 /* Reset the per-TID pause, BAR, etc state */
4102 ath_tx_tid_reset(sc, atid);
4103 }
4104
4105 /*
4106 * Clear global leak count
4107 */
4108 an->an_leak_count = 0;
4109 ATH_TX_UNLOCK(sc);
4110
4111 /* Handle completed frames */
4112 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
4113 TAILQ_REMOVE(&bf_cq, bf, bf_list);
4114 ath_tx_default_comp(sc, bf, 0);
4115 }
4116 }
4117
4118 /*
4119 * Drain all the software TXQs currently with traffic queued.
4120 */
4121 void
ath_tx_txq_drain(struct ath_softc * sc,struct ath_txq * txq)4122 ath_tx_txq_drain(struct ath_softc *sc, struct ath_txq *txq)
4123 {
4124 struct ath_tid *tid;
4125 ath_bufhead bf_cq;
4126 struct ath_buf *bf;
4127
4128 TAILQ_INIT(&bf_cq);
4129 ATH_TX_LOCK(sc);
4130
4131 /*
4132 * Iterate over all active tids for the given txq,
4133 * flushing and unsched'ing them
4134 */
4135 while (! TAILQ_EMPTY(&txq->axq_tidq)) {
4136 tid = TAILQ_FIRST(&txq->axq_tidq);
4137 ath_tx_tid_drain(sc, tid->an, tid, &bf_cq);
4138 ath_tx_tid_unsched(sc, tid);
4139 }
4140
4141 ATH_TX_UNLOCK(sc);
4142
4143 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
4144 TAILQ_REMOVE(&bf_cq, bf, bf_list);
4145 ath_tx_default_comp(sc, bf, 0);
4146 }
4147 }
4148
4149 /*
4150 * Handle completion of non-aggregate session frames.
4151 *
4152 * This (currently) doesn't implement software retransmission of
4153 * non-aggregate frames!
4154 *
4155 * Software retransmission of non-aggregate frames needs to obey
4156 * the strict sequence number ordering, and drop any frames that
4157 * will fail this.
4158 *
4159 * For now, filtered frames and frame transmission will cause
4160 * all kinds of issues. So we don't support them.
4161 *
4162 * So anyone queuing frames via ath_tx_normal_xmit() or
4163 * ath_tx_hw_queue_norm() must override and set CLRDMASK.
4164 */
4165 void
ath_tx_normal_comp(struct ath_softc * sc,struct ath_buf * bf,int fail)4166 ath_tx_normal_comp(struct ath_softc *sc, struct ath_buf *bf, int fail)
4167 {
4168 struct ieee80211_node *ni = bf->bf_node;
4169 struct ath_node *an = ATH_NODE(ni);
4170 int tid = bf->bf_state.bfs_tid;
4171 struct ath_tid *atid = &an->an_tid[tid];
4172 struct ath_tx_status *ts = &bf->bf_status.ds_txstat;
4173
4174 /* The TID state is protected behind the TXQ lock */
4175 ATH_TX_LOCK(sc);
4176
4177 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p: fail=%d, hwq_depth now %d\n",
4178 __func__, bf, fail, atid->hwq_depth - 1);
4179
4180 atid->hwq_depth--;
4181
4182 #if 0
4183 /*
4184 * If the frame was filtered, stick it on the filter frame
4185 * queue and complain about it. It shouldn't happen!
4186 */
4187 if ((ts->ts_status & HAL_TXERR_FILT) ||
4188 (ts->ts_status != 0 && atid->isfiltered)) {
4189 DPRINTF(sc, ATH_DEBUG_SW_TX,
4190 "%s: isfiltered=%d, ts_status=%d: huh?\n",
4191 __func__,
4192 atid->isfiltered,
4193 ts->ts_status);
4194 ath_tx_tid_filt_comp_buf(sc, atid, bf);
4195 }
4196 #endif
4197 if (atid->isfiltered)
4198 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: filtered?!\n", __func__);
4199 if (atid->hwq_depth < 0)
4200 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: hwq_depth < 0: %d\n",
4201 __func__, atid->hwq_depth);
4202
4203 /* If the TID is being cleaned up, track things */
4204 /* XXX refactor! */
4205 if (atid->cleanup_inprogress) {
4206 atid->incomp--;
4207 if (atid->incomp == 0) {
4208 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
4209 "%s: TID %d: cleaned up! resume!\n",
4210 __func__, tid);
4211 atid->cleanup_inprogress = 0;
4212 ath_tx_tid_resume(sc, atid);
4213 }
4214 }
4215
4216 /*
4217 * If the queue is filtered, potentially mark it as complete
4218 * and reschedule it as needed.
4219 *
4220 * This is required as there may be a subsequent TX descriptor
4221 * for this end-node that has CLRDMASK set, so it's quite possible
4222 * that a filtered frame will be followed by a non-filtered
4223 * (complete or otherwise) frame.
4224 *
4225 * XXX should we do this before we complete the frame?
4226 */
4227 if (atid->isfiltered)
4228 ath_tx_tid_filt_comp_complete(sc, atid);
4229 ATH_TX_UNLOCK(sc);
4230
4231 /*
4232 * punt to rate control if we're not being cleaned up
4233 * during a hw queue drain and the frame wanted an ACK.
4234 */
4235 if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0))
4236 ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc,
4237 ts,
4238 bf->bf_state.bfs_pktlen,
4239 bf->bf_state.bfs_pktlen,
4240 1, (ts->ts_status == 0) ? 0 : 1);
4241
4242 ath_tx_default_comp(sc, bf, fail);
4243 }
4244
4245 /*
4246 * Handle cleanup of aggregate session packets that aren't
4247 * an A-MPDU.
4248 *
4249 * There's no need to update the BAW here - the session is being
4250 * torn down.
4251 */
4252 static void
ath_tx_comp_cleanup_unaggr(struct ath_softc * sc,struct ath_buf * bf)4253 ath_tx_comp_cleanup_unaggr(struct ath_softc *sc, struct ath_buf *bf)
4254 {
4255 struct ieee80211_node *ni = bf->bf_node;
4256 struct ath_node *an = ATH_NODE(ni);
4257 int tid = bf->bf_state.bfs_tid;
4258 struct ath_tid *atid = &an->an_tid[tid];
4259
4260 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: TID %d: incomp=%d\n",
4261 __func__, tid, atid->incomp);
4262
4263 ATH_TX_LOCK(sc);
4264 atid->incomp--;
4265
4266 /* XXX refactor! */
4267 if (bf->bf_state.bfs_dobaw) {
4268 ath_tx_update_baw(sc, an, atid, bf);
4269 if (!bf->bf_state.bfs_addedbaw)
4270 DPRINTF(sc, ATH_DEBUG_SW_TX,
4271 "%s: wasn't added: seqno %d\n",
4272 __func__, SEQNO(bf->bf_state.bfs_seqno));
4273 }
4274
4275 if (atid->incomp == 0) {
4276 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
4277 "%s: TID %d: cleaned up! resume!\n",
4278 __func__, tid);
4279 atid->cleanup_inprogress = 0;
4280 ath_tx_tid_resume(sc, atid);
4281 }
4282 ATH_TX_UNLOCK(sc);
4283
4284 ath_tx_default_comp(sc, bf, 0);
4285 }
4286
4287 /*
4288 * This as it currently stands is a bit dumb. Ideally we'd just
4289 * fail the frame the normal way and have it permanently fail
4290 * via the normal aggregate completion path.
4291 */
4292 static void
ath_tx_tid_cleanup_frame(struct ath_softc * sc,struct ath_node * an,int tid,struct ath_buf * bf_head,ath_bufhead * bf_cq)4293 ath_tx_tid_cleanup_frame(struct ath_softc *sc, struct ath_node *an,
4294 int tid, struct ath_buf *bf_head, ath_bufhead *bf_cq)
4295 {
4296 struct ath_tid *atid = &an->an_tid[tid];
4297 struct ath_buf *bf, *bf_next;
4298
4299 ATH_TX_LOCK_ASSERT(sc);
4300
4301 /*
4302 * Remove this frame from the queue.
4303 */
4304 ATH_TID_REMOVE(atid, bf_head, bf_list);
4305
4306 /*
4307 * Loop over all the frames in the aggregate.
4308 */
4309 bf = bf_head;
4310 while (bf != NULL) {
4311 bf_next = bf->bf_next; /* next aggregate frame, or NULL */
4312
4313 /*
4314 * If it's been added to the BAW we need to kick
4315 * it out of the BAW before we continue.
4316 *
4317 * XXX if it's an aggregate, assert that it's in the
4318 * BAW - we shouldn't have it be in an aggregate
4319 * otherwise!
4320 */
4321 if (bf->bf_state.bfs_addedbaw) {
4322 ath_tx_update_baw(sc, an, atid, bf);
4323 bf->bf_state.bfs_dobaw = 0;
4324 }
4325
4326 /*
4327 * Give it the default completion handler.
4328 */
4329 bf->bf_comp = ath_tx_normal_comp;
4330 bf->bf_next = NULL;
4331
4332 /*
4333 * Add it to the list to free.
4334 */
4335 TAILQ_INSERT_TAIL(bf_cq, bf, bf_list);
4336
4337 /*
4338 * Now advance to the next frame in the aggregate.
4339 */
4340 bf = bf_next;
4341 }
4342 }
4343
4344 /*
4345 * Performs transmit side cleanup when TID changes from aggregated to
4346 * unaggregated and during reassociation.
4347 *
4348 * For now, this just tosses everything from the TID software queue
4349 * whether or not it has been retried and marks the TID as
4350 * pending completion if there's anything for this TID queued to
4351 * the hardware.
4352 *
4353 * The caller is responsible for pausing the TID and unpausing the
4354 * TID if no cleanup was required. Otherwise the cleanup path will
4355 * unpause the TID once the last hardware queued frame is completed.
4356 */
4357 static void
ath_tx_tid_cleanup(struct ath_softc * sc,struct ath_node * an,int tid,ath_bufhead * bf_cq)4358 ath_tx_tid_cleanup(struct ath_softc *sc, struct ath_node *an, int tid,
4359 ath_bufhead *bf_cq)
4360 {
4361 struct ath_tid *atid = &an->an_tid[tid];
4362 struct ath_buf *bf, *bf_next;
4363
4364 ATH_TX_LOCK_ASSERT(sc);
4365
4366 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
4367 "%s: TID %d: called; inprogress=%d\n", __func__, tid,
4368 atid->cleanup_inprogress);
4369
4370 /*
4371 * Move the filtered frames to the TX queue, before
4372 * we run off and discard/process things.
4373 */
4374
4375 /* XXX this is really quite inefficient */
4376 while ((bf = ATH_TID_FILT_LAST(atid, ath_bufhead_s)) != NULL) {
4377 ATH_TID_FILT_REMOVE(atid, bf, bf_list);
4378 ATH_TID_INSERT_HEAD(atid, bf, bf_list);
4379 }
4380
4381 /*
4382 * Update the frames in the software TX queue:
4383 *
4384 * + Discard retry frames in the queue
4385 * + Fix the completion function to be non-aggregate
4386 */
4387 bf = ATH_TID_FIRST(atid);
4388 while (bf) {
4389 /*
4390 * Grab the next frame in the list, we may
4391 * be fiddling with the list.
4392 */
4393 bf_next = TAILQ_NEXT(bf, bf_list);
4394
4395 /*
4396 * Free the frame and all subframes.
4397 */
4398 ath_tx_tid_cleanup_frame(sc, an, tid, bf, bf_cq);
4399
4400 /*
4401 * Next frame!
4402 */
4403 bf = bf_next;
4404 }
4405
4406 /*
4407 * If there's anything in the hardware queue we wait
4408 * for the TID HWQ to empty.
4409 */
4410 if (atid->hwq_depth > 0) {
4411 /*
4412 * XXX how about we kill atid->incomp, and instead
4413 * replace it with a macro that checks that atid->hwq_depth
4414 * is 0?
4415 */
4416 atid->incomp = atid->hwq_depth;
4417 atid->cleanup_inprogress = 1;
4418 }
4419
4420 if (atid->cleanup_inprogress)
4421 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
4422 "%s: TID %d: cleanup needed: %d packets\n",
4423 __func__, tid, atid->incomp);
4424
4425 /* Owner now must free completed frames */
4426 }
4427
4428 static struct ath_buf *
ath_tx_retry_clone(struct ath_softc * sc,struct ath_node * an,struct ath_tid * tid,struct ath_buf * bf)4429 ath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an,
4430 struct ath_tid *tid, struct ath_buf *bf)
4431 {
4432 struct ath_buf *nbf;
4433 int error;
4434
4435 /*
4436 * Clone the buffer. This will handle the dma unmap and
4437 * copy the node reference to the new buffer. If this
4438 * works out, 'bf' will have no DMA mapping, no mbuf
4439 * pointer and no node reference.
4440 */
4441 nbf = ath_buf_clone(sc, bf);
4442
4443 #if 0
4444 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: ATH_BUF_BUSY; cloning\n",
4445 __func__);
4446 #endif
4447
4448 if (nbf == NULL) {
4449 /* Failed to clone */
4450 DPRINTF(sc, ATH_DEBUG_XMIT,
4451 "%s: failed to clone a busy buffer\n",
4452 __func__);
4453 return NULL;
4454 }
4455
4456 /* Setup the dma for the new buffer */
4457 error = ath_tx_dmasetup(sc, nbf, nbf->bf_m);
4458 if (error != 0) {
4459 DPRINTF(sc, ATH_DEBUG_XMIT,
4460 "%s: failed to setup dma for clone\n",
4461 __func__);
4462 /*
4463 * Put this at the head of the list, not tail;
4464 * that way it doesn't interfere with the
4465 * busy buffer logic (which uses the tail of
4466 * the list.)
4467 */
4468 ATH_TXBUF_LOCK(sc);
4469 ath_returnbuf_head(sc, nbf);
4470 ATH_TXBUF_UNLOCK(sc);
4471 return NULL;
4472 }
4473
4474 /* Update BAW if required, before we free the original buf */
4475 if (bf->bf_state.bfs_dobaw)
4476 ath_tx_switch_baw_buf(sc, an, tid, bf, nbf);
4477
4478 /* Free original buffer; return new buffer */
4479 ath_freebuf(sc, bf);
4480
4481 return nbf;
4482 }
4483
4484 /*
4485 * Handle retrying an unaggregate frame in an aggregate
4486 * session.
4487 *
4488 * If too many retries occur, pause the TID, wait for
4489 * any further retransmits (as there's no reason why
4490 * non-aggregate frames in an aggregate session are
4491 * transmitted in-order; they just have to be in-BAW)
4492 * and then queue a BAR.
4493 */
4494 static void
ath_tx_aggr_retry_unaggr(struct ath_softc * sc,struct ath_buf * bf)4495 ath_tx_aggr_retry_unaggr(struct ath_softc *sc, struct ath_buf *bf)
4496 {
4497 struct ieee80211_node *ni = bf->bf_node;
4498 struct ath_node *an = ATH_NODE(ni);
4499 int tid = bf->bf_state.bfs_tid;
4500 struct ath_tid *atid = &an->an_tid[tid];
4501 struct ieee80211_tx_ampdu *tap;
4502
4503 ATH_TX_LOCK(sc);
4504
4505 tap = ath_tx_get_tx_tid(an, tid);
4506
4507 /*
4508 * If the buffer is marked as busy, we can't directly
4509 * reuse it. Instead, try to clone the buffer.
4510 * If the clone is successful, recycle the old buffer.
4511 * If the clone is unsuccessful, set bfs_retries to max
4512 * to force the next bit of code to free the buffer
4513 * for us.
4514 */
4515 if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) &&
4516 (bf->bf_flags & ATH_BUF_BUSY)) {
4517 struct ath_buf *nbf;
4518 nbf = ath_tx_retry_clone(sc, an, atid, bf);
4519 if (nbf)
4520 /* bf has been freed at this point */
4521 bf = nbf;
4522 else
4523 bf->bf_state.bfs_retries = SWMAX_RETRIES + 1;
4524 }
4525
4526 if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) {
4527 DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES,
4528 "%s: exceeded retries; seqno %d\n",
4529 __func__, SEQNO(bf->bf_state.bfs_seqno));
4530 sc->sc_stats.ast_tx_swretrymax++;
4531
4532 /* Update BAW anyway */
4533 if (bf->bf_state.bfs_dobaw) {
4534 ath_tx_update_baw(sc, an, atid, bf);
4535 if (! bf->bf_state.bfs_addedbaw)
4536 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
4537 "%s: wasn't added: seqno %d\n",
4538 __func__, SEQNO(bf->bf_state.bfs_seqno));
4539 }
4540 bf->bf_state.bfs_dobaw = 0;
4541
4542 /* Suspend the TX queue and get ready to send the BAR */
4543 ath_tx_tid_bar_suspend(sc, atid);
4544
4545 /* Send the BAR if there are no other frames waiting */
4546 if (ath_tx_tid_bar_tx_ready(sc, atid))
4547 ath_tx_tid_bar_tx(sc, atid);
4548
4549 ATH_TX_UNLOCK(sc);
4550
4551 /* Free buffer, bf is free after this call */
4552 ath_tx_default_comp(sc, bf, 0);
4553 return;
4554 }
4555
4556 /*
4557 * This increments the retry counter as well as
4558 * sets the retry flag in the ath_buf and packet
4559 * body.
4560 */
4561 ath_tx_set_retry(sc, bf);
4562 sc->sc_stats.ast_tx_swretries++;
4563
4564 /*
4565 * Insert this at the head of the queue, so it's
4566 * retried before any current/subsequent frames.
4567 */
4568 ATH_TID_INSERT_HEAD(atid, bf, bf_list);
4569 ath_tx_tid_sched(sc, atid);
4570 /* Send the BAR if there are no other frames waiting */
4571 if (ath_tx_tid_bar_tx_ready(sc, atid))
4572 ath_tx_tid_bar_tx(sc, atid);
4573
4574 ATH_TX_UNLOCK(sc);
4575 }
4576
4577 /*
4578 * Common code for aggregate excessive retry/subframe retry.
4579 * If retrying, queues buffers to bf_q. If not, frees the
4580 * buffers.
4581 *
4582 * XXX should unify this with ath_tx_aggr_retry_unaggr()
4583 */
4584 static int
ath_tx_retry_subframe(struct ath_softc * sc,struct ath_buf * bf,ath_bufhead * bf_q)4585 ath_tx_retry_subframe(struct ath_softc *sc, struct ath_buf *bf,
4586 ath_bufhead *bf_q)
4587 {
4588 struct ieee80211_node *ni = bf->bf_node;
4589 struct ath_node *an = ATH_NODE(ni);
4590 int tid = bf->bf_state.bfs_tid;
4591 struct ath_tid *atid = &an->an_tid[tid];
4592
4593 ATH_TX_LOCK_ASSERT(sc);
4594
4595 /* XXX clr11naggr should be done for all subframes */
4596 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc);
4597 ath_hal_set11nburstduration(sc->sc_ah, bf->bf_desc, 0);
4598
4599 /* ath_hal_set11n_virtualmorefrag(sc->sc_ah, bf->bf_desc, 0); */
4600
4601 /*
4602 * If the buffer is marked as busy, we can't directly
4603 * reuse it. Instead, try to clone the buffer.
4604 * If the clone is successful, recycle the old buffer.
4605 * If the clone is unsuccessful, set bfs_retries to max
4606 * to force the next bit of code to free the buffer
4607 * for us.
4608 */
4609 if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) &&
4610 (bf->bf_flags & ATH_BUF_BUSY)) {
4611 struct ath_buf *nbf;
4612 nbf = ath_tx_retry_clone(sc, an, atid, bf);
4613 if (nbf)
4614 /* bf has been freed at this point */
4615 bf = nbf;
4616 else
4617 bf->bf_state.bfs_retries = SWMAX_RETRIES + 1;
4618 }
4619
4620 if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) {
4621 sc->sc_stats.ast_tx_swretrymax++;
4622 DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES,
4623 "%s: max retries: seqno %d\n",
4624 __func__, SEQNO(bf->bf_state.bfs_seqno));
4625 ath_tx_update_baw(sc, an, atid, bf);
4626 if (!bf->bf_state.bfs_addedbaw)
4627 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
4628 "%s: wasn't added: seqno %d\n",
4629 __func__, SEQNO(bf->bf_state.bfs_seqno));
4630 bf->bf_state.bfs_dobaw = 0;
4631 return 1;
4632 }
4633
4634 ath_tx_set_retry(sc, bf);
4635 sc->sc_stats.ast_tx_swretries++;
4636 bf->bf_next = NULL; /* Just to make sure */
4637
4638 /* Clear the aggregate state */
4639 bf->bf_state.bfs_aggr = 0;
4640 bf->bf_state.bfs_ndelim = 0; /* ??? needed? */
4641 bf->bf_state.bfs_nframes = 1;
4642
4643 TAILQ_INSERT_TAIL(bf_q, bf, bf_list);
4644 return 0;
4645 }
4646
4647 /*
4648 * error pkt completion for an aggregate destination
4649 */
4650 static void
ath_tx_comp_aggr_error(struct ath_softc * sc,struct ath_buf * bf_first,struct ath_tid * tid)4651 ath_tx_comp_aggr_error(struct ath_softc *sc, struct ath_buf *bf_first,
4652 struct ath_tid *tid)
4653 {
4654 struct ieee80211_node *ni = bf_first->bf_node;
4655 struct ath_node *an = ATH_NODE(ni);
4656 struct ath_buf *bf_next, *bf;
4657 ath_bufhead bf_q;
4658 int drops = 0;
4659 struct ieee80211_tx_ampdu *tap;
4660 ath_bufhead bf_cq;
4661
4662 TAILQ_INIT(&bf_q);
4663 TAILQ_INIT(&bf_cq);
4664
4665 /*
4666 * Update rate control - all frames have failed.
4667 */
4668 ath_tx_update_ratectrl(sc, ni, bf_first->bf_state.bfs_rc,
4669 &bf_first->bf_status.ds_txstat,
4670 bf_first->bf_state.bfs_al,
4671 bf_first->bf_state.bfs_rc_maxpktlen,
4672 bf_first->bf_state.bfs_nframes, bf_first->bf_state.bfs_nframes);
4673
4674 ATH_TX_LOCK(sc);
4675 tap = ath_tx_get_tx_tid(an, tid->tid);
4676 sc->sc_stats.ast_tx_aggr_failall++;
4677
4678 /* Retry all subframes */
4679 bf = bf_first;
4680 while (bf) {
4681 bf_next = bf->bf_next;
4682 bf->bf_next = NULL; /* Remove it from the aggr list */
4683 sc->sc_stats.ast_tx_aggr_fail++;
4684 if (ath_tx_retry_subframe(sc, bf, &bf_q)) {
4685 drops++;
4686 bf->bf_next = NULL;
4687 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list);
4688 }
4689 bf = bf_next;
4690 }
4691
4692 /* Prepend all frames to the beginning of the queue */
4693 while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) {
4694 TAILQ_REMOVE(&bf_q, bf, bf_list);
4695 ATH_TID_INSERT_HEAD(tid, bf, bf_list);
4696 }
4697
4698 /*
4699 * Schedule the TID to be re-tried.
4700 */
4701 ath_tx_tid_sched(sc, tid);
4702
4703 /*
4704 * send bar if we dropped any frames
4705 *
4706 * Keep the txq lock held for now, as we need to ensure
4707 * that ni_txseqs[] is consistent (as it's being updated
4708 * in the ifnet TX context or raw TX context.)
4709 */
4710 if (drops) {
4711 /* Suspend the TX queue and get ready to send the BAR */
4712 ath_tx_tid_bar_suspend(sc, tid);
4713 }
4714
4715 /*
4716 * Send BAR if required
4717 */
4718 if (ath_tx_tid_bar_tx_ready(sc, tid))
4719 ath_tx_tid_bar_tx(sc, tid);
4720
4721 ATH_TX_UNLOCK(sc);
4722
4723 /* Complete frames which errored out */
4724 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
4725 TAILQ_REMOVE(&bf_cq, bf, bf_list);
4726 ath_tx_default_comp(sc, bf, 0);
4727 }
4728 }
4729
4730 /*
4731 * Handle clean-up of packets from an aggregate list.
4732 *
4733 * There's no need to update the BAW here - the session is being
4734 * torn down.
4735 */
4736 static void
ath_tx_comp_cleanup_aggr(struct ath_softc * sc,struct ath_buf * bf_first)4737 ath_tx_comp_cleanup_aggr(struct ath_softc *sc, struct ath_buf *bf_first)
4738 {
4739 struct ath_buf *bf, *bf_next;
4740 struct ieee80211_node *ni = bf_first->bf_node;
4741 struct ath_node *an = ATH_NODE(ni);
4742 int tid = bf_first->bf_state.bfs_tid;
4743 struct ath_tid *atid = &an->an_tid[tid];
4744
4745 ATH_TX_LOCK(sc);
4746
4747 /* update incomp */
4748 atid->incomp--;
4749
4750 /* Update the BAW */
4751 bf = bf_first;
4752 while (bf) {
4753 /* XXX refactor! */
4754 if (bf->bf_state.bfs_dobaw) {
4755 ath_tx_update_baw(sc, an, atid, bf);
4756 if (!bf->bf_state.bfs_addedbaw)
4757 DPRINTF(sc, ATH_DEBUG_SW_TX,
4758 "%s: wasn't added: seqno %d\n",
4759 __func__, SEQNO(bf->bf_state.bfs_seqno));
4760 }
4761 bf = bf->bf_next;
4762 }
4763
4764 if (atid->incomp == 0) {
4765 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
4766 "%s: TID %d: cleaned up! resume!\n",
4767 __func__, tid);
4768 atid->cleanup_inprogress = 0;
4769 ath_tx_tid_resume(sc, atid);
4770 }
4771
4772 /* Send BAR if required */
4773 /* XXX why would we send a BAR when transitioning to non-aggregation? */
4774 /*
4775 * XXX TODO: we should likely just tear down the BAR state here,
4776 * rather than sending a BAR.
4777 */
4778 if (ath_tx_tid_bar_tx_ready(sc, atid))
4779 ath_tx_tid_bar_tx(sc, atid);
4780
4781 ATH_TX_UNLOCK(sc);
4782
4783 /* Handle frame completion as individual frames */
4784 bf = bf_first;
4785 while (bf) {
4786 bf_next = bf->bf_next;
4787 bf->bf_next = NULL;
4788 ath_tx_default_comp(sc, bf, 1);
4789 bf = bf_next;
4790 }
4791 }
4792
4793 /*
4794 * Handle completion of an set of aggregate frames.
4795 *
4796 * Note: the completion handler is the last descriptor in the aggregate,
4797 * not the last descriptor in the first frame.
4798 */
4799 static void
ath_tx_aggr_comp_aggr(struct ath_softc * sc,struct ath_buf * bf_first,int fail)4800 ath_tx_aggr_comp_aggr(struct ath_softc *sc, struct ath_buf *bf_first,
4801 int fail)
4802 {
4803 //struct ath_desc *ds = bf->bf_lastds;
4804 struct ieee80211_node *ni = bf_first->bf_node;
4805 struct ath_node *an = ATH_NODE(ni);
4806 int tid = bf_first->bf_state.bfs_tid;
4807 struct ath_tid *atid = &an->an_tid[tid];
4808 struct ath_tx_status ts;
4809 struct ieee80211_tx_ampdu *tap;
4810 ath_bufhead bf_q;
4811 ath_bufhead bf_cq;
4812 int seq_st, tx_ok;
4813 int hasba, isaggr;
4814 uint32_t ba[2];
4815 struct ath_buf *bf, *bf_next;
4816 int ba_index;
4817 int drops = 0;
4818 int nframes = 0, nbad = 0, nf;
4819 int pktlen;
4820 int agglen, rc_agglen;
4821 /* XXX there's too much on the stack? */
4822 struct ath_rc_series rc[ATH_RC_NUM];
4823 int txseq;
4824
4825 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: called; hwq_depth=%d\n",
4826 __func__, atid->hwq_depth);
4827
4828 /*
4829 * Take a copy; this may be needed -after- bf_first
4830 * has been completed and freed.
4831 */
4832 ts = bf_first->bf_status.ds_txstat;
4833 agglen = bf_first->bf_state.bfs_al;
4834 rc_agglen = bf_first->bf_state.bfs_rc_maxpktlen;
4835
4836 TAILQ_INIT(&bf_q);
4837 TAILQ_INIT(&bf_cq);
4838
4839 /* The TID state is kept behind the TXQ lock */
4840 ATH_TX_LOCK(sc);
4841
4842 atid->hwq_depth--;
4843 if (atid->hwq_depth < 0)
4844 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: hwq_depth < 0: %d\n",
4845 __func__, atid->hwq_depth);
4846
4847 /*
4848 * If the TID is filtered, handle completing the filter
4849 * transition before potentially kicking it to the cleanup
4850 * function.
4851 *
4852 * XXX this is duplicate work, ew.
4853 */
4854 if (atid->isfiltered)
4855 ath_tx_tid_filt_comp_complete(sc, atid);
4856
4857 /*
4858 * Punt cleanup to the relevant function, not our problem now
4859 */
4860 if (atid->cleanup_inprogress) {
4861 if (atid->isfiltered)
4862 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4863 "%s: isfiltered=1, normal_comp?\n",
4864 __func__);
4865 ATH_TX_UNLOCK(sc);
4866 ath_tx_comp_cleanup_aggr(sc, bf_first);
4867 return;
4868 }
4869
4870 /*
4871 * If the frame is filtered, transition to filtered frame
4872 * mode and add this to the filtered frame list.
4873 *
4874 * XXX TODO: figure out how this interoperates with
4875 * BAR, pause and cleanup states.
4876 */
4877 if ((ts.ts_status & HAL_TXERR_FILT) ||
4878 (ts.ts_status != 0 && atid->isfiltered)) {
4879 if (fail != 0)
4880 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4881 "%s: isfiltered=1, fail=%d\n", __func__, fail);
4882 ath_tx_tid_filt_comp_aggr(sc, atid, bf_first, &bf_cq);
4883
4884 /* Remove from BAW */
4885 TAILQ_FOREACH_SAFE(bf, &bf_cq, bf_list, bf_next) {
4886 if (bf->bf_state.bfs_addedbaw)
4887 drops++;
4888 if (bf->bf_state.bfs_dobaw) {
4889 ath_tx_update_baw(sc, an, atid, bf);
4890 if (!bf->bf_state.bfs_addedbaw)
4891 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4892 "%s: wasn't added: seqno %d\n",
4893 __func__,
4894 SEQNO(bf->bf_state.bfs_seqno));
4895 }
4896 bf->bf_state.bfs_dobaw = 0;
4897 }
4898 /*
4899 * If any intermediate frames in the BAW were dropped when
4900 * handling filtering things, send a BAR.
4901 */
4902 if (drops)
4903 ath_tx_tid_bar_suspend(sc, atid);
4904
4905 /*
4906 * Finish up by sending a BAR if required and freeing
4907 * the frames outside of the TX lock.
4908 */
4909 goto finish_send_bar;
4910 }
4911
4912 /*
4913 * XXX for now, use the first frame in the aggregate for
4914 * XXX rate control completion; it's at least consistent.
4915 */
4916 pktlen = bf_first->bf_state.bfs_pktlen;
4917
4918 /*
4919 * Handle errors first!
4920 *
4921 * Here, handle _any_ error as a "exceeded retries" error.
4922 * Later on (when filtered frames are to be specially handled)
4923 * it'll have to be expanded.
4924 */
4925 #if 0
4926 if (ts.ts_status & HAL_TXERR_XRETRY) {
4927 #endif
4928 if (ts.ts_status != 0) {
4929 ATH_TX_UNLOCK(sc);
4930 ath_tx_comp_aggr_error(sc, bf_first, atid);
4931 return;
4932 }
4933
4934 tap = ath_tx_get_tx_tid(an, tid);
4935
4936 /*
4937 * extract starting sequence and block-ack bitmap
4938 */
4939 /* XXX endian-ness of seq_st, ba? */
4940 seq_st = ts.ts_seqnum;
4941 hasba = !! (ts.ts_flags & HAL_TX_BA);
4942 tx_ok = (ts.ts_status == 0);
4943 isaggr = bf_first->bf_state.bfs_aggr;
4944 ba[0] = ts.ts_ba_low;
4945 ba[1] = ts.ts_ba_high;
4946
4947 /*
4948 * Copy the TX completion status and the rate control
4949 * series from the first descriptor, as it may be freed
4950 * before the rate control code can get its grubby fingers
4951 * into things.
4952 */
4953 memcpy(rc, bf_first->bf_state.bfs_rc, sizeof(rc));
4954
4955 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4956 "%s: txa_start=%d, tx_ok=%d, status=%.8x, flags=%.8x, "
4957 "isaggr=%d, seq_st=%d, hasba=%d, ba=%.8x, %.8x\n",
4958 __func__, tap->txa_start, tx_ok, ts.ts_status, ts.ts_flags,
4959 isaggr, seq_st, hasba, ba[0], ba[1]);
4960
4961 /*
4962 * The reference driver doesn't do this; it simply ignores
4963 * this check in its entirety.
4964 *
4965 * I've seen this occur when using iperf to send traffic
4966 * out tid 1 - the aggregate frames are all marked as TID 1,
4967 * but the TXSTATUS has TID=0. So, let's just ignore this
4968 * check.
4969 */
4970 #if 0
4971 /* Occasionally, the MAC sends a tx status for the wrong TID. */
4972 if (tid != ts.ts_tid) {
4973 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: tid %d != hw tid %d\n",
4974 __func__, tid, ts.ts_tid);
4975 tx_ok = 0;
4976 }
4977 #endif
4978
4979 /* AR5416 BA bug; this requires an interface reset */
4980 if (isaggr && tx_ok && (! hasba)) {
4981 device_printf(sc->sc_dev,
4982 "%s: AR5416 bug: hasba=%d; txok=%d, isaggr=%d, "
4983 "seq_st=%d\n",
4984 __func__, hasba, tx_ok, isaggr, seq_st);
4985 taskqueue_enqueue(sc->sc_tq, &sc->sc_fataltask);
4986 /* And as we can't really trust the BA here .. */
4987 ba[0] = 0;
4988 ba[1] = 0;
4989 seq_st = 0;
4990 #ifdef ATH_DEBUG
4991 ath_printtxbuf(sc, bf_first,
4992 sc->sc_ac2q[atid->ac]->axq_qnum, 0, 0);
4993 #endif
4994 }
4995
4996 /*
4997 * Walk the list of frames, figure out which ones were correctly
4998 * sent and which weren't.
4999 */
5000 bf = bf_first;
5001 nf = bf_first->bf_state.bfs_nframes;
5002
5003 /* bf_first is going to be invalid once this list is walked */
5004 bf_first = NULL;
5005
5006 /*
5007 * Walk the list of completed frames and determine
5008 * which need to be completed and which need to be
5009 * retransmitted.
5010 *
5011 * For completed frames, the completion functions need
5012 * to be called at the end of this function as the last
5013 * node reference may free the node.
5014 *
5015 * Finally, since the TXQ lock can't be held during the
5016 * completion callback (to avoid lock recursion),
5017 * the completion calls have to be done outside of the
5018 * lock.
5019 */
5020 while (bf) {
5021 nframes++;
5022 ba_index = ATH_BA_INDEX(seq_st,
5023 SEQNO(bf->bf_state.bfs_seqno));
5024 bf_next = bf->bf_next;
5025 bf->bf_next = NULL; /* Remove it from the aggr list */
5026
5027 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5028 "%s: checking bf=%p seqno=%d; ack=%d\n",
5029 __func__, bf, SEQNO(bf->bf_state.bfs_seqno),
5030 ATH_BA_ISSET(ba, ba_index));
5031
5032 if (tx_ok && ATH_BA_ISSET(ba, ba_index)) {
5033 sc->sc_stats.ast_tx_aggr_ok++;
5034 ath_tx_update_baw(sc, an, atid, bf);
5035 bf->bf_state.bfs_dobaw = 0;
5036 if (!bf->bf_state.bfs_addedbaw)
5037 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5038 "%s: wasn't added: seqno %d\n",
5039 __func__, SEQNO(bf->bf_state.bfs_seqno));
5040 bf->bf_next = NULL;
5041 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list);
5042 } else {
5043 sc->sc_stats.ast_tx_aggr_fail++;
5044 if (ath_tx_retry_subframe(sc, bf, &bf_q)) {
5045 drops++;
5046 bf->bf_next = NULL;
5047 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list);
5048 }
5049 nbad++;
5050 }
5051 bf = bf_next;
5052 }
5053
5054 /*
5055 * Now that the BAW updates have been done, unlock
5056 *
5057 * txseq is grabbed before the lock is released so we
5058 * have a consistent view of what -was- in the BAW.
5059 * Anything after this point will not yet have been
5060 * TXed.
5061 */
5062 txseq = tap->txa_start;
5063 ATH_TX_UNLOCK(sc);
5064
5065 if (nframes != nf)
5066 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5067 "%s: num frames seen=%d; bf nframes=%d\n",
5068 __func__, nframes, nf);
5069
5070 /*
5071 * Now we know how many frames were bad, call the rate
5072 * control code.
5073 */
5074 if (fail == 0) {
5075 ath_tx_update_ratectrl(sc, ni, rc, &ts, agglen, rc_agglen,
5076 nframes, nbad);
5077 }
5078
5079 /*
5080 * send bar if we dropped any frames
5081 */
5082 if (drops) {
5083 /* Suspend the TX queue and get ready to send the BAR */
5084 ATH_TX_LOCK(sc);
5085 ath_tx_tid_bar_suspend(sc, atid);
5086 ATH_TX_UNLOCK(sc);
5087 }
5088
5089 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5090 "%s: txa_start now %d\n", __func__, tap->txa_start);
5091
5092 ATH_TX_LOCK(sc);
5093
5094 /* Prepend all frames to the beginning of the queue */
5095 while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) {
5096 TAILQ_REMOVE(&bf_q, bf, bf_list);
5097 ATH_TID_INSERT_HEAD(atid, bf, bf_list);
5098 }
5099
5100 /*
5101 * Reschedule to grab some further frames.
5102 */
5103 ath_tx_tid_sched(sc, atid);
5104
5105 /*
5106 * If the queue is filtered, re-schedule as required.
5107 *
5108 * This is required as there may be a subsequent TX descriptor
5109 * for this end-node that has CLRDMASK set, so it's quite possible
5110 * that a filtered frame will be followed by a non-filtered
5111 * (complete or otherwise) frame.
5112 *
5113 * XXX should we do this before we complete the frame?
5114 */
5115 if (atid->isfiltered)
5116 ath_tx_tid_filt_comp_complete(sc, atid);
5117
5118 finish_send_bar:
5119
5120 /*
5121 * Send BAR if required
5122 */
5123 if (ath_tx_tid_bar_tx_ready(sc, atid))
5124 ath_tx_tid_bar_tx(sc, atid);
5125
5126 ATH_TX_UNLOCK(sc);
5127
5128 /* Do deferred completion */
5129 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
5130 TAILQ_REMOVE(&bf_cq, bf, bf_list);
5131 ath_tx_default_comp(sc, bf, 0);
5132 }
5133 }
5134
5135 /*
5136 * Handle completion of unaggregated frames in an ADDBA
5137 * session.
5138 *
5139 * Fail is set to 1 if the entry is being freed via a call to
5140 * ath_tx_draintxq().
5141 */
5142 static void
5143 ath_tx_aggr_comp_unaggr(struct ath_softc *sc, struct ath_buf *bf, int fail)
5144 {
5145 struct ieee80211_node *ni = bf->bf_node;
5146 struct ath_node *an = ATH_NODE(ni);
5147 int tid = bf->bf_state.bfs_tid;
5148 struct ath_tid *atid = &an->an_tid[tid];
5149 struct ath_tx_status ts;
5150 int drops = 0;
5151
5152 /*
5153 * Take a copy of this; filtering/cloning the frame may free the
5154 * bf pointer.
5155 */
5156 ts = bf->bf_status.ds_txstat;
5157
5158 /*
5159 * Update rate control status here, before we possibly
5160 * punt to retry or cleanup.
5161 *
5162 * Do it outside of the TXQ lock.
5163 */
5164 if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0))
5165 ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc,
5166 &bf->bf_status.ds_txstat,
5167 bf->bf_state.bfs_pktlen,
5168 bf->bf_state.bfs_pktlen,
5169 1, (ts.ts_status == 0) ? 0 : 1);
5170
5171 /*
5172 * This is called early so atid->hwq_depth can be tracked.
5173 * This unfortunately means that it's released and regrabbed
5174 * during retry and cleanup. That's rather inefficient.
5175 */
5176 ATH_TX_LOCK(sc);
5177
5178 if (tid == IEEE80211_NONQOS_TID)
5179 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=16!\n", __func__);
5180
5181 DPRINTF(sc, ATH_DEBUG_SW_TX,
5182 "%s: bf=%p: tid=%d, hwq_depth=%d, seqno=%d\n",
5183 __func__, bf, bf->bf_state.bfs_tid, atid->hwq_depth,
5184 SEQNO(bf->bf_state.bfs_seqno));
5185
5186 atid->hwq_depth--;
5187 if (atid->hwq_depth < 0)
5188 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: hwq_depth < 0: %d\n",
5189 __func__, atid->hwq_depth);
5190
5191 /*
5192 * If the TID is filtered, handle completing the filter
5193 * transition before potentially kicking it to the cleanup
5194 * function.
5195 */
5196 if (atid->isfiltered)
5197 ath_tx_tid_filt_comp_complete(sc, atid);
5198
5199 /*
5200 * If a cleanup is in progress, punt to comp_cleanup;
5201 * rather than handling it here. It's thus their
5202 * responsibility to clean up, call the completion
5203 * function in net80211, etc.
5204 */
5205 if (atid->cleanup_inprogress) {
5206 if (atid->isfiltered)
5207 DPRINTF(sc, ATH_DEBUG_SW_TX,
5208 "%s: isfiltered=1, normal_comp?\n",
5209 __func__);
5210 ATH_TX_UNLOCK(sc);
5211 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: cleanup_unaggr\n",
5212 __func__);
5213 ath_tx_comp_cleanup_unaggr(sc, bf);
5214 return;
5215 }
5216
5217 /*
5218 * XXX TODO: how does cleanup, BAR and filtered frame handling
5219 * overlap?
5220 *
5221 * If the frame is filtered OR if it's any failure but
5222 * the TID is filtered, the frame must be added to the
5223 * filtered frame list.
5224 *
5225 * However - a busy buffer can't be added to the filtered
5226 * list as it will end up being recycled without having
5227 * been made available for the hardware.
5228 */
5229 if ((ts.ts_status & HAL_TXERR_FILT) ||
5230 (ts.ts_status != 0 && atid->isfiltered)) {
5231 int freeframe;
5232
5233 if (fail != 0)
5234 DPRINTF(sc, ATH_DEBUG_SW_TX,
5235 "%s: isfiltered=1, fail=%d\n",
5236 __func__, fail);
5237 freeframe = ath_tx_tid_filt_comp_single(sc, atid, bf);
5238 /*
5239 * If freeframe=0 then bf is no longer ours; don't
5240 * touch it.
5241 */
5242 if (freeframe) {
5243 /* Remove from BAW */
5244 if (bf->bf_state.bfs_addedbaw)
5245 drops++;
5246 if (bf->bf_state.bfs_dobaw) {
5247 ath_tx_update_baw(sc, an, atid, bf);
5248 if (!bf->bf_state.bfs_addedbaw)
5249 DPRINTF(sc, ATH_DEBUG_SW_TX,
5250 "%s: wasn't added: seqno %d\n",
5251 __func__, SEQNO(bf->bf_state.bfs_seqno));
5252 }
5253 bf->bf_state.bfs_dobaw = 0;
5254 }
5255
5256 /*
5257 * If the frame couldn't be filtered, treat it as a drop and
5258 * prepare to send a BAR.
5259 */
5260 if (freeframe && drops)
5261 ath_tx_tid_bar_suspend(sc, atid);
5262
5263 /*
5264 * Send BAR if required
5265 */
5266 if (ath_tx_tid_bar_tx_ready(sc, atid))
5267 ath_tx_tid_bar_tx(sc, atid);
5268
5269 ATH_TX_UNLOCK(sc);
5270 /*
5271 * If freeframe is set, then the frame couldn't be
5272 * cloned and bf is still valid. Just complete/free it.
5273 */
5274 if (freeframe)
5275 ath_tx_default_comp(sc, bf, fail);
5276
5277 return;
5278 }
5279 /*
5280 * Don't bother with the retry check if all frames
5281 * are being failed (eg during queue deletion.)
5282 */
5283 #if 0
5284 if (fail == 0 && ts->ts_status & HAL_TXERR_XRETRY) {
5285 #endif
5286 if (fail == 0 && ts.ts_status != 0) {
5287 ATH_TX_UNLOCK(sc);
5288 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: retry_unaggr\n",
5289 __func__);
5290 ath_tx_aggr_retry_unaggr(sc, bf);
5291 return;
5292 }
5293
5294 /* Success? Complete */
5295 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=%d, seqno %d\n",
5296 __func__, tid, SEQNO(bf->bf_state.bfs_seqno));
5297 if (bf->bf_state.bfs_dobaw) {
5298 ath_tx_update_baw(sc, an, atid, bf);
5299 bf->bf_state.bfs_dobaw = 0;
5300 if (!bf->bf_state.bfs_addedbaw)
5301 DPRINTF(sc, ATH_DEBUG_SW_TX,
5302 "%s: wasn't added: seqno %d\n",
5303 __func__, SEQNO(bf->bf_state.bfs_seqno));
5304 }
5305
5306 /*
5307 * If the queue is filtered, re-schedule as required.
5308 *
5309 * This is required as there may be a subsequent TX descriptor
5310 * for this end-node that has CLRDMASK set, so it's quite possible
5311 * that a filtered frame will be followed by a non-filtered
5312 * (complete or otherwise) frame.
5313 *
5314 * XXX should we do this before we complete the frame?
5315 */
5316 if (atid->isfiltered)
5317 ath_tx_tid_filt_comp_complete(sc, atid);
5318
5319 /*
5320 * Send BAR if required
5321 */
5322 if (ath_tx_tid_bar_tx_ready(sc, atid))
5323 ath_tx_tid_bar_tx(sc, atid);
5324
5325 ATH_TX_UNLOCK(sc);
5326
5327 ath_tx_default_comp(sc, bf, fail);
5328 /* bf is freed at this point */
5329 }
5330
5331 void
5332 ath_tx_aggr_comp(struct ath_softc *sc, struct ath_buf *bf, int fail)
5333 {
5334 if (bf->bf_state.bfs_aggr)
5335 ath_tx_aggr_comp_aggr(sc, bf, fail);
5336 else
5337 ath_tx_aggr_comp_unaggr(sc, bf, fail);
5338 }
5339
5340 /*
5341 * Grab the software queue depth that we COULD transmit.
5342 *
5343 * This includes checks if it's in the BAW, whether it's a frame
5344 * that is supposed to be in the BAW. Other checks could be done;
5345 * but for now let's try and avoid doing the whole of ath_tx_form_aggr()
5346 * here.
5347 */
5348 static int
5349 ath_tx_tid_swq_depth_bytes(struct ath_softc *sc, struct ath_node *an,
5350 struct ath_tid *tid)
5351 {
5352 struct ath_buf *bf;
5353 struct ieee80211_tx_ampdu *tap;
5354 int nbytes = 0;
5355
5356 ATH_TX_LOCK_ASSERT(sc);
5357
5358 tap = ath_tx_get_tx_tid(an, tid->tid);
5359
5360 /*
5361 * Iterate over each buffer and sum the pkt_len.
5362 * Bail if we exceed ATH_AGGR_MAXSIZE bytes; we won't
5363 * ever queue more than that in a single frame.
5364 */
5365 TAILQ_FOREACH(bf, &tid->tid_q, bf_list) {
5366 /*
5367 * TODO: I'm not sure if we're going to hit cases where
5368 * no frames get sent because the list is empty.
5369 */
5370
5371 /* Check if it's in the BAW */
5372 if (tap != NULL && (! BAW_WITHIN(tap->txa_start, tap->txa_wnd,
5373 SEQNO(bf->bf_state.bfs_seqno)))) {
5374 break;
5375 }
5376
5377 /* Check if it's even supposed to be in the BAW */
5378 if (! bf->bf_state.bfs_dobaw) {
5379 break;
5380 }
5381
5382 nbytes += bf->bf_state.bfs_pktlen;
5383 if (nbytes >= ATH_AGGR_MAXSIZE)
5384 break;
5385
5386 /*
5387 * Check if we're likely going to leak a frame
5388 * as part of a PSPOLL. Break out at this point;
5389 * we're only going to send a single frame anyway.
5390 */
5391 if (an->an_leak_count) {
5392 break;
5393 }
5394 }
5395
5396 return MIN(nbytes, ATH_AGGR_MAXSIZE);
5397 }
5398
5399 /*
5400 * Schedule some packets from the given node/TID to the hardware.
5401 *
5402 * This is the aggregate version.
5403 */
5404 void
5405 ath_tx_tid_hw_queue_aggr(struct ath_softc *sc, struct ath_node *an,
5406 struct ath_tid *tid)
5407 {
5408 struct ath_buf *bf;
5409 struct ath_txq *txq = sc->sc_ac2q[tid->ac];
5410 struct ieee80211_tx_ampdu *tap;
5411 ATH_AGGR_STATUS status;
5412 ath_bufhead bf_q;
5413 int swq_pktbytes;
5414
5415 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d\n", __func__, tid->tid);
5416 ATH_TX_LOCK_ASSERT(sc);
5417
5418 /*
5419 * XXX TODO: If we're called for a queue that we're leaking frames to,
5420 * ensure we only leak one.
5421 */
5422
5423 tap = ath_tx_get_tx_tid(an, tid->tid);
5424
5425 if (tid->tid == IEEE80211_NONQOS_TID)
5426 DPRINTF(sc, ATH_DEBUG_SW_TX,
5427 "%s: called for TID=NONQOS_TID?\n", __func__);
5428
5429 for (;;) {
5430 status = ATH_AGGR_DONE;
5431
5432 /*
5433 * If the upper layer has paused the TID, don't
5434 * queue any further packets.
5435 *
5436 * This can also occur from the completion task because
5437 * of packet loss; but as its serialised with this code,
5438 * it won't "appear" half way through queuing packets.
5439 */
5440 if (! ath_tx_tid_can_tx_or_sched(sc, tid))
5441 break;
5442
5443 bf = ATH_TID_FIRST(tid);
5444 if (bf == NULL) {
5445 break;
5446 }
5447
5448 /*
5449 * If the packet doesn't fall within the BAW (eg a NULL
5450 * data frame), schedule it directly; continue.
5451 */
5452 if (! bf->bf_state.bfs_dobaw) {
5453 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5454 "%s: non-baw packet\n",
5455 __func__);
5456 ATH_TID_REMOVE(tid, bf, bf_list);
5457
5458 if (bf->bf_state.bfs_nframes > 1)
5459 DPRINTF(sc, ATH_DEBUG_SW_TX,
5460 "%s: aggr=%d, nframes=%d\n",
5461 __func__,
5462 bf->bf_state.bfs_aggr,
5463 bf->bf_state.bfs_nframes);
5464
5465 /*
5466 * This shouldn't happen - such frames shouldn't
5467 * ever have been queued as an aggregate in the
5468 * first place. However, make sure the fields
5469 * are correctly setup just to be totally sure.
5470 */
5471 bf->bf_state.bfs_aggr = 0;
5472 bf->bf_state.bfs_nframes = 1;
5473
5474 /* Update CLRDMASK just before this frame is queued */
5475 ath_tx_update_clrdmask(sc, tid, bf);
5476
5477 ath_tx_do_ratelookup(sc, bf, tid->tid,
5478 bf->bf_state.bfs_pktlen, false);
5479 ath_tx_calc_duration(sc, bf);
5480 ath_tx_calc_protection(sc, bf);
5481 ath_tx_set_rtscts(sc, bf);
5482 ath_tx_rate_fill_rcflags(sc, bf);
5483 ath_tx_setds(sc, bf);
5484 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc);
5485
5486 sc->sc_aggr_stats.aggr_nonbaw_pkt++;
5487
5488 /* Queue the packet; continue */
5489 goto queuepkt;
5490 }
5491
5492 TAILQ_INIT(&bf_q);
5493
5494 /*
5495 * Loop over the swq to find out how long
5496 * each packet is (up until 64k) and provide that
5497 * to the rate control lookup.
5498 */
5499 swq_pktbytes = ath_tx_tid_swq_depth_bytes(sc, an, tid);
5500 ath_tx_do_ratelookup(sc, bf, tid->tid, swq_pktbytes, true);
5501
5502 /*
5503 * Note this only is used for the fragment paths and
5504 * should really be rethought out if we want to do
5505 * things like an RTS burst across >1 aggregate.
5506 */
5507 ath_tx_calc_duration(sc, bf);
5508 ath_tx_calc_protection(sc, bf);
5509
5510 ath_tx_set_rtscts(sc, bf);
5511 ath_tx_rate_fill_rcflags(sc, bf);
5512
5513 status = ath_tx_form_aggr(sc, an, tid, &bf_q);
5514
5515 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5516 "%s: ath_tx_form_aggr() status=%d\n", __func__, status);
5517
5518 /*
5519 * No frames to be picked up - out of BAW
5520 */
5521 if (TAILQ_EMPTY(&bf_q))
5522 break;
5523
5524 /*
5525 * This assumes that the descriptor list in the ath_bufhead
5526 * are already linked together via bf_next pointers.
5527 */
5528 bf = TAILQ_FIRST(&bf_q);
5529
5530 if (status == ATH_AGGR_8K_LIMITED)
5531 sc->sc_aggr_stats.aggr_rts_aggr_limited++;
5532
5533 /*
5534 * If it's the only frame send as non-aggregate
5535 * assume that ath_tx_form_aggr() has checked
5536 * whether it's in the BAW and added it appropriately.
5537 */
5538 if (bf->bf_state.bfs_nframes == 1) {
5539 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5540 "%s: single-frame aggregate\n", __func__);
5541
5542 /* Update CLRDMASK just before this frame is queued */
5543 ath_tx_update_clrdmask(sc, tid, bf);
5544
5545 bf->bf_state.bfs_aggr = 0;
5546 bf->bf_state.bfs_ndelim = 0;
5547 ath_tx_setds(sc, bf);
5548 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc);
5549 if (status == ATH_AGGR_BAW_CLOSED)
5550 sc->sc_aggr_stats.aggr_baw_closed_single_pkt++;
5551 else
5552 sc->sc_aggr_stats.aggr_single_pkt++;
5553 } else {
5554 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5555 "%s: multi-frame aggregate: %d frames, "
5556 "length %d\n",
5557 __func__, bf->bf_state.bfs_nframes,
5558 bf->bf_state.bfs_al);
5559 bf->bf_state.bfs_aggr = 1;
5560 sc->sc_aggr_stats.aggr_pkts[bf->bf_state.bfs_nframes]++;
5561 sc->sc_aggr_stats.aggr_aggr_pkt++;
5562
5563 /* Update CLRDMASK just before this frame is queued */
5564 ath_tx_update_clrdmask(sc, tid, bf);
5565
5566 /*
5567 * Calculate the duration/protection as required.
5568 */
5569 ath_tx_calc_duration(sc, bf);
5570 ath_tx_calc_protection(sc, bf);
5571
5572 /*
5573 * Update the rate and rtscts information based on the
5574 * rate decision made by the rate control code;
5575 * the first frame in the aggregate needs it.
5576 */
5577 ath_tx_set_rtscts(sc, bf);
5578
5579 /*
5580 * Setup the relevant descriptor fields
5581 * for aggregation. The first descriptor
5582 * already points to the rest in the chain.
5583 */
5584 ath_tx_setds_11n(sc, bf);
5585 }
5586 queuepkt:
5587 /* Set completion handler, multi-frame aggregate or not */
5588 bf->bf_comp = ath_tx_aggr_comp;
5589
5590 if (bf->bf_state.bfs_tid == IEEE80211_NONQOS_TID)
5591 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=16?\n", __func__);
5592
5593 /*
5594 * Update leak count and frame config if were leaking frames.
5595 *
5596 * XXX TODO: it should update all frames in an aggregate
5597 * correctly!
5598 */
5599 ath_tx_leak_count_update(sc, tid, bf);
5600
5601 /* Punt to txq */
5602 ath_tx_handoff(sc, txq, bf);
5603
5604 /* Track outstanding buffer count to hardware */
5605 /* aggregates are "one" buffer */
5606 tid->hwq_depth++;
5607
5608 /*
5609 * Break out if ath_tx_form_aggr() indicated
5610 * there can't be any further progress (eg BAW is full.)
5611 * Checking for an empty txq is done above.
5612 *
5613 * XXX locking on txq here?
5614 */
5615 /* XXX TXQ locking */
5616 if (txq->axq_aggr_depth >= sc->sc_hwq_limit_aggr ||
5617 (status == ATH_AGGR_BAW_CLOSED ||
5618 status == ATH_AGGR_LEAK_CLOSED))
5619 break;
5620 }
5621 }
5622
5623 /*
5624 * Schedule some packets from the given node/TID to the hardware.
5625 *
5626 * XXX TODO: this routine doesn't enforce the maximum TXQ depth.
5627 * It just dumps frames into the TXQ. We should limit how deep
5628 * the transmit queue can grow for frames dispatched to the given
5629 * TXQ.
5630 *
5631 * To avoid locking issues, either we need to own the TXQ lock
5632 * at this point, or we need to pass in the maximum frame count
5633 * from the caller.
5634 */
5635 void
5636 ath_tx_tid_hw_queue_norm(struct ath_softc *sc, struct ath_node *an,
5637 struct ath_tid *tid)
5638 {
5639 struct ath_buf *bf;
5640 struct ath_txq *txq = sc->sc_ac2q[tid->ac];
5641
5642 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: node %p: TID %d: called\n",
5643 __func__, an, tid->tid);
5644
5645 ATH_TX_LOCK_ASSERT(sc);
5646
5647 /* Check - is AMPDU pending or running? then print out something */
5648 if (ath_tx_ampdu_pending(sc, an, tid->tid))
5649 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ampdu pending?\n",
5650 __func__, tid->tid);
5651 if (ath_tx_ampdu_running(sc, an, tid->tid))
5652 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ampdu running?\n",
5653 __func__, tid->tid);
5654
5655 for (;;) {
5656 /*
5657 * If the upper layers have paused the TID, don't
5658 * queue any further packets.
5659 *
5660 * XXX if we are leaking frames, make sure we decrement
5661 * that counter _and_ we continue here.
5662 */
5663 if (! ath_tx_tid_can_tx_or_sched(sc, tid))
5664 break;
5665
5666 bf = ATH_TID_FIRST(tid);
5667 if (bf == NULL) {
5668 break;
5669 }
5670
5671 ATH_TID_REMOVE(tid, bf, bf_list);
5672
5673 /* Sanity check! */
5674 if (tid->tid != bf->bf_state.bfs_tid) {
5675 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bfs_tid %d !="
5676 " tid %d\n", __func__, bf->bf_state.bfs_tid,
5677 tid->tid);
5678 }
5679 /* Normal completion handler */
5680 bf->bf_comp = ath_tx_normal_comp;
5681
5682 /*
5683 * Override this for now, until the non-aggregate
5684 * completion handler correctly handles software retransmits.
5685 */
5686 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
5687
5688 /* Update CLRDMASK just before this frame is queued */
5689 ath_tx_update_clrdmask(sc, tid, bf);
5690
5691 /* Program descriptors + rate control */
5692 ath_tx_do_ratelookup(sc, bf, tid->tid,
5693 bf->bf_state.bfs_pktlen, false);
5694 ath_tx_calc_duration(sc, bf);
5695 ath_tx_calc_protection(sc, bf);
5696 ath_tx_set_rtscts(sc, bf);
5697 ath_tx_rate_fill_rcflags(sc, bf);
5698 ath_tx_setds(sc, bf);
5699
5700 /*
5701 * Update the current leak count if
5702 * we're leaking frames; and set the
5703 * MORE flag as appropriate.
5704 */
5705 ath_tx_leak_count_update(sc, tid, bf);
5706
5707 /* Track outstanding buffer count to hardware */
5708 /* aggregates are "one" buffer */
5709 tid->hwq_depth++;
5710
5711 /* Punt to hardware or software txq */
5712 ath_tx_handoff(sc, txq, bf);
5713 }
5714 }
5715
5716 /*
5717 * Schedule some packets to the given hardware queue.
5718 *
5719 * This function walks the list of TIDs (ie, ath_node TIDs
5720 * with queued traffic) and attempts to schedule traffic
5721 * from them.
5722 *
5723 * TID scheduling is implemented as a FIFO, with TIDs being
5724 * added to the end of the queue after some frames have been
5725 * scheduled.
5726 */
5727 void
5728 ath_txq_sched(struct ath_softc *sc, struct ath_txq *txq)
5729 {
5730 struct ath_tid *tid, *next, *last;
5731
5732 ATH_TX_LOCK_ASSERT(sc);
5733
5734 /*
5735 * For non-EDMA chips, aggr frames that have been built are
5736 * in axq_aggr_depth, whether they've been scheduled or not.
5737 * There's no FIFO, so txq->axq_depth is what's been scheduled
5738 * to the hardware.
5739 *
5740 * For EDMA chips, we do it in two stages. The existing code
5741 * builds a list of frames to go to the hardware and the EDMA
5742 * code turns it into a single entry to push into the FIFO.
5743 * That way we don't take up one packet per FIFO slot.
5744 * We do push one aggregate per FIFO slot though, just to keep
5745 * things simple.
5746 *
5747 * The FIFO depth is what's in the hardware; the txq->axq_depth
5748 * is what's been scheduled to the FIFO.
5749 *
5750 * fifo.axq_depth is the number of frames (or aggregates) pushed
5751 * into the EDMA FIFO. For multi-frame lists, this is the number
5752 * of frames pushed in.
5753 * axq_fifo_depth is the number of FIFO slots currently busy.
5754 */
5755
5756 /* For EDMA and non-EDMA, check built/scheduled against aggr limit */
5757 if (txq->axq_aggr_depth >= sc->sc_hwq_limit_aggr) {
5758 sc->sc_aggr_stats.aggr_sched_nopkt++;
5759 return;
5760 }
5761
5762 /*
5763 * For non-EDMA chips, axq_depth is the "what's scheduled to
5764 * the hardware list". For EDMA it's "What's built for the hardware"
5765 * and fifo.axq_depth is how many frames have been dispatched
5766 * already to the hardware.
5767 */
5768 if (txq->axq_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_nonaggr) {
5769 sc->sc_aggr_stats.aggr_sched_nopkt++;
5770 return;
5771 }
5772
5773 last = TAILQ_LAST(&txq->axq_tidq, axq_t_s);
5774
5775 TAILQ_FOREACH_SAFE(tid, &txq->axq_tidq, axq_qelem, next) {
5776 /*
5777 * Suspend paused queues here; they'll be resumed
5778 * once the addba completes or times out.
5779 */
5780 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, paused=%d\n",
5781 __func__, tid->tid, tid->paused);
5782 ath_tx_tid_unsched(sc, tid);
5783 /*
5784 * This node may be in power-save and we're leaking
5785 * a frame; be careful.
5786 */
5787 if (! ath_tx_tid_can_tx_or_sched(sc, tid)) {
5788 goto loop_done;
5789 }
5790 if (ath_tx_ampdu_running(sc, tid->an, tid->tid))
5791 ath_tx_tid_hw_queue_aggr(sc, tid->an, tid);
5792 else
5793 ath_tx_tid_hw_queue_norm(sc, tid->an, tid);
5794
5795 /* Not empty? Re-schedule */
5796 if (tid->axq_depth != 0)
5797 ath_tx_tid_sched(sc, tid);
5798
5799 /*
5800 * Give the software queue time to aggregate more
5801 * packets. If we aren't running aggregation then
5802 * we should still limit the hardware queue depth.
5803 */
5804 /* XXX TXQ locking */
5805 if (txq->axq_aggr_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_aggr) {
5806 break;
5807 }
5808 if (txq->axq_depth >= sc->sc_hwq_limit_nonaggr) {
5809 break;
5810 }
5811 loop_done:
5812 /*
5813 * If this was the last entry on the original list, stop.
5814 * Otherwise nodes that have been rescheduled onto the end
5815 * of the TID FIFO list will just keep being rescheduled.
5816 *
5817 * XXX What should we do about nodes that were paused
5818 * but are pending a leaking frame in response to a ps-poll?
5819 * They'll be put at the front of the list; so they'll
5820 * prematurely trigger this condition! Ew.
5821 */
5822 if (tid == last)
5823 break;
5824 }
5825 }
5826
5827 /*
5828 * TX addba handling
5829 */
5830
5831 /*
5832 * Return net80211 TID struct pointer, or NULL for none
5833 */
5834 struct ieee80211_tx_ampdu *
5835 ath_tx_get_tx_tid(struct ath_node *an, int tid)
5836 {
5837 struct ieee80211_node *ni = &an->an_node;
5838 struct ieee80211_tx_ampdu *tap;
5839
5840 if (tid == IEEE80211_NONQOS_TID)
5841 return NULL;
5842
5843 tap = &ni->ni_tx_ampdu[tid];
5844 return tap;
5845 }
5846
5847 /*
5848 * Is AMPDU-TX running?
5849 */
5850 static int
5851 ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an, int tid)
5852 {
5853 struct ieee80211_tx_ampdu *tap;
5854
5855 if (tid == IEEE80211_NONQOS_TID)
5856 return 0;
5857
5858 tap = ath_tx_get_tx_tid(an, tid);
5859 if (tap == NULL)
5860 return 0; /* Not valid; default to not running */
5861
5862 return !! (tap->txa_flags & IEEE80211_AGGR_RUNNING);
5863 }
5864
5865 /*
5866 * Is AMPDU-TX negotiation pending?
5867 */
5868 static int
5869 ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an, int tid)
5870 {
5871 struct ieee80211_tx_ampdu *tap;
5872
5873 if (tid == IEEE80211_NONQOS_TID)
5874 return 0;
5875
5876 tap = ath_tx_get_tx_tid(an, tid);
5877 if (tap == NULL)
5878 return 0; /* Not valid; default to not pending */
5879
5880 return !! (tap->txa_flags & IEEE80211_AGGR_XCHGPEND);
5881 }
5882
5883 /*
5884 * Is AMPDU-TX pending for the given TID?
5885 */
5886
5887 /*
5888 * Method to handle sending an ADDBA request.
5889 *
5890 * We tap this so the relevant flags can be set to pause the TID
5891 * whilst waiting for the response.
5892 *
5893 * XXX there's no timeout handler we can override?
5894 */
5895 int
5896 ath_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
5897 int dialogtoken, int baparamset, int batimeout)
5898 {
5899 struct ath_softc *sc = ni->ni_ic->ic_softc;
5900 int tid = tap->txa_tid;
5901 struct ath_node *an = ATH_NODE(ni);
5902 struct ath_tid *atid = &an->an_tid[tid];
5903
5904 /*
5905 * XXX danger Will Robinson!
5906 *
5907 * Although the taskqueue may be running and scheduling some more
5908 * packets, these should all be _before_ the addba sequence number.
5909 * However, net80211 will keep self-assigning sequence numbers
5910 * until addba has been negotiated.
5911 *
5912 * In the past, these packets would be "paused" (which still works
5913 * fine, as they're being scheduled to the driver in the same
5914 * serialised method which is calling the addba request routine)
5915 * and when the aggregation session begins, they'll be dequeued
5916 * as aggregate packets and added to the BAW. However, now there's
5917 * a "bf->bf_state.bfs_dobaw" flag, and this isn't set for these
5918 * packets. Thus they never get included in the BAW tracking and
5919 * this can cause the initial burst of packets after the addba
5920 * negotiation to "hang", as they quickly fall outside the BAW.
5921 *
5922 * The "eventual" solution should be to tag these packets with
5923 * dobaw. Although net80211 has given us a sequence number,
5924 * it'll be "after" the left edge of the BAW and thus it'll
5925 * fall within it.
5926 */
5927 ATH_TX_LOCK(sc);
5928 /*
5929 * This is a bit annoying. Until net80211 HT code inherits some
5930 * (any) locking, we may have this called in parallel BUT only
5931 * one response/timeout will be called. Grr.
5932 */
5933 if (atid->addba_tx_pending == 0) {
5934 ath_tx_tid_pause(sc, atid);
5935 atid->addba_tx_pending = 1;
5936 }
5937 ATH_TX_UNLOCK(sc);
5938
5939 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
5940 "%s: %6D: called; dialogtoken=%d, baparamset=%d, batimeout=%d\n",
5941 __func__,
5942 ni->ni_macaddr,
5943 ":",
5944 dialogtoken, baparamset, batimeout);
5945 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
5946 "%s: txa_start=%d, ni_txseqs=%d\n",
5947 __func__, tap->txa_start, ni->ni_txseqs[tid]);
5948
5949 return sc->sc_addba_request(ni, tap, dialogtoken, baparamset,
5950 batimeout);
5951 }
5952
5953 /*
5954 * Handle an ADDBA response.
5955 *
5956 * We unpause the queue so TX'ing can resume.
5957 *
5958 * Any packets TX'ed from this point should be "aggregate" (whether
5959 * aggregate or not) so the BAW is updated.
5960 *
5961 * Note! net80211 keeps self-assigning sequence numbers until
5962 * ampdu is negotiated. This means the initially-negotiated BAW left
5963 * edge won't match the ni->ni_txseq.
5964 *
5965 * So, being very dirty, the BAW left edge is "slid" here to match
5966 * ni->ni_txseq.
5967 *
5968 * What likely SHOULD happen is that all packets subsequent to the
5969 * addba request should be tagged as aggregate and queued as non-aggregate
5970 * frames; thus updating the BAW. For now though, I'll just slide the
5971 * window.
5972 */
5973 int
5974 ath_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
5975 int status, int code, int batimeout)
5976 {
5977 struct ath_softc *sc = ni->ni_ic->ic_softc;
5978 int tid = tap->txa_tid;
5979 struct ath_node *an = ATH_NODE(ni);
5980 struct ath_tid *atid = &an->an_tid[tid];
5981 int r;
5982
5983 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
5984 "%s: %6D: called; status=%d, code=%d, batimeout=%d\n", __func__,
5985 ni->ni_macaddr,
5986 ":",
5987 status, code, batimeout);
5988
5989 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
5990 "%s: txa_start=%d, ni_txseqs=%d\n",
5991 __func__, tap->txa_start, ni->ni_txseqs[tid]);
5992
5993 /*
5994 * Call this first, so the interface flags get updated
5995 * before the TID is unpaused. Otherwise a race condition
5996 * exists where the unpaused TID still doesn't yet have
5997 * IEEE80211_AGGR_RUNNING set.
5998 */
5999 r = sc->sc_addba_response(ni, tap, status, code, batimeout);
6000
6001 ATH_TX_LOCK(sc);
6002 atid->addba_tx_pending = 0;
6003 /*
6004 * XXX dirty!
6005 * Slide the BAW left edge to wherever net80211 left it for us.
6006 * Read above for more information.
6007 */
6008 tap->txa_start = ni->ni_txseqs[tid];
6009 ath_tx_tid_resume(sc, atid);
6010 ATH_TX_UNLOCK(sc);
6011 return r;
6012 }
6013
6014 /*
6015 * Stop ADDBA on a queue.
6016 *
6017 * This can be called whilst BAR TX is currently active on the queue,
6018 * so make sure this is unblocked before continuing.
6019 */
6020 void
6021 ath_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
6022 {
6023 struct ath_softc *sc = ni->ni_ic->ic_softc;
6024 int tid = tap->txa_tid;
6025 struct ath_node *an = ATH_NODE(ni);
6026 struct ath_tid *atid = &an->an_tid[tid];
6027 ath_bufhead bf_cq;
6028 struct ath_buf *bf;
6029
6030 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: %6D: called\n",
6031 __func__,
6032 ni->ni_macaddr,
6033 ":");
6034
6035 /*
6036 * Pause TID traffic early, so there aren't any races
6037 * Unblock the pending BAR held traffic, if it's currently paused.
6038 */
6039 ATH_TX_LOCK(sc);
6040 ath_tx_tid_pause(sc, atid);
6041 if (atid->bar_wait) {
6042 /*
6043 * bar_unsuspend() expects bar_tx == 1, as it should be
6044 * called from the TX completion path. This quietens
6045 * the warning. It's cleared for us anyway.
6046 */
6047 atid->bar_tx = 1;
6048 ath_tx_tid_bar_unsuspend(sc, atid);
6049 }
6050 ATH_TX_UNLOCK(sc);
6051
6052 /* There's no need to hold the TXQ lock here */
6053 sc->sc_addba_stop(ni, tap);
6054
6055 /*
6056 * ath_tx_tid_cleanup will resume the TID if possible, otherwise
6057 * it'll set the cleanup flag, and it'll be unpaused once
6058 * things have been cleaned up.
6059 */
6060 TAILQ_INIT(&bf_cq);
6061 ATH_TX_LOCK(sc);
6062
6063 /*
6064 * In case there's a followup call to this, only call it
6065 * if we don't have a cleanup in progress.
6066 *
6067 * Since we've paused the queue above, we need to make
6068 * sure we unpause if there's already a cleanup in
6069 * progress - it means something else is also doing
6070 * this stuff, so we don't need to also keep it paused.
6071 */
6072 if (atid->cleanup_inprogress) {
6073 ath_tx_tid_resume(sc, atid);
6074 } else {
6075 ath_tx_tid_cleanup(sc, an, tid, &bf_cq);
6076 /*
6077 * Unpause the TID if no cleanup is required.
6078 */
6079 if (! atid->cleanup_inprogress)
6080 ath_tx_tid_resume(sc, atid);
6081 }
6082 ATH_TX_UNLOCK(sc);
6083
6084 /* Handle completing frames and fail them */
6085 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
6086 TAILQ_REMOVE(&bf_cq, bf, bf_list);
6087 ath_tx_default_comp(sc, bf, 1);
6088 }
6089
6090 }
6091
6092 /*
6093 * Handle a node reassociation.
6094 *
6095 * We may have a bunch of frames queued to the hardware; those need
6096 * to be marked as cleanup.
6097 */
6098 void
6099 ath_tx_node_reassoc(struct ath_softc *sc, struct ath_node *an)
6100 {
6101 struct ath_tid *tid;
6102 int i;
6103 ath_bufhead bf_cq;
6104 struct ath_buf *bf;
6105
6106 TAILQ_INIT(&bf_cq);
6107
6108 ATH_TX_UNLOCK_ASSERT(sc);
6109
6110 ATH_TX_LOCK(sc);
6111 for (i = 0; i < IEEE80211_TID_SIZE; i++) {
6112 tid = &an->an_tid[i];
6113 if (tid->hwq_depth == 0)
6114 continue;
6115 DPRINTF(sc, ATH_DEBUG_NODE,
6116 "%s: %6D: TID %d: cleaning up TID\n",
6117 __func__,
6118 an->an_node.ni_macaddr,
6119 ":",
6120 i);
6121 /*
6122 * In case there's a followup call to this, only call it
6123 * if we don't have a cleanup in progress.
6124 */
6125 if (! tid->cleanup_inprogress) {
6126 ath_tx_tid_pause(sc, tid);
6127 ath_tx_tid_cleanup(sc, an, i, &bf_cq);
6128 /*
6129 * Unpause the TID if no cleanup is required.
6130 */
6131 if (! tid->cleanup_inprogress)
6132 ath_tx_tid_resume(sc, tid);
6133 }
6134 }
6135 ATH_TX_UNLOCK(sc);
6136
6137 /* Handle completing frames and fail them */
6138 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
6139 TAILQ_REMOVE(&bf_cq, bf, bf_list);
6140 ath_tx_default_comp(sc, bf, 1);
6141 }
6142 }
6143
6144 /*
6145 * Note: net80211 bar_timeout() doesn't call this function on BAR failure;
6146 * it simply tears down the aggregation session. Ew.
6147 *
6148 * It however will call ieee80211_ampdu_stop() which will call
6149 * ic->ic_addba_stop().
6150 *
6151 * XXX This uses a hard-coded max BAR count value; the whole
6152 * XXX BAR TX success or failure should be better handled!
6153 */
6154 void
6155 ath_bar_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
6156 int status)
6157 {
6158 struct ath_softc *sc = ni->ni_ic->ic_softc;
6159 int tid = tap->txa_tid;
6160 struct ath_node *an = ATH_NODE(ni);
6161 struct ath_tid *atid = &an->an_tid[tid];
6162 int attempts = tap->txa_attempts;
6163 int old_txa_start;
6164
6165 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
6166 "%s: %6D: called; txa_tid=%d, atid->tid=%d, status=%d, attempts=%d, txa_start=%d, txa_seqpending=%d\n",
6167 __func__,
6168 ni->ni_macaddr,
6169 ":",
6170 tap->txa_tid,
6171 atid->tid,
6172 status,
6173 attempts,
6174 tap->txa_start,
6175 tap->txa_seqpending);
6176
6177 /* Note: This may update the BAW details */
6178 /*
6179 * XXX What if this does slide the BAW along? We need to somehow
6180 * XXX either fix things when it does happen, or prevent the
6181 * XXX seqpending value to be anything other than exactly what
6182 * XXX the hell we want!
6183 *
6184 * XXX So for now, how I do this inside the TX lock for now
6185 * XXX and just correct it afterwards? The below condition should
6186 * XXX never happen and if it does I need to fix all kinds of things.
6187 */
6188 ATH_TX_LOCK(sc);
6189 old_txa_start = tap->txa_start;
6190 sc->sc_bar_response(ni, tap, status);
6191 if (tap->txa_start != old_txa_start) {
6192 device_printf(sc->sc_dev, "%s: tid=%d; txa_start=%d, old=%d, adjusting\n",
6193 __func__,
6194 tid,
6195 tap->txa_start,
6196 old_txa_start);
6197 }
6198 tap->txa_start = old_txa_start;
6199 ATH_TX_UNLOCK(sc);
6200
6201 /* Unpause the TID */
6202 /*
6203 * XXX if this is attempt=50, the TID will be downgraded
6204 * XXX to a non-aggregate session. So we must unpause the
6205 * XXX TID here or it'll never be done.
6206 *
6207 * Also, don't call it if bar_tx/bar_wait are 0; something
6208 * has beaten us to the punch? (XXX figure out what?)
6209 */
6210 if (status == 0 || attempts == 50) {
6211 ATH_TX_LOCK(sc);
6212 if (atid->bar_tx == 0 || atid->bar_wait == 0)
6213 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
6214 "%s: huh? bar_tx=%d, bar_wait=%d\n",
6215 __func__,
6216 atid->bar_tx, atid->bar_wait);
6217 else
6218 ath_tx_tid_bar_unsuspend(sc, atid);
6219 ATH_TX_UNLOCK(sc);
6220 }
6221 }
6222
6223 /*
6224 * This is called whenever the pending ADDBA request times out.
6225 * Unpause and reschedule the TID.
6226 */
6227 void
6228 ath_addba_response_timeout(struct ieee80211_node *ni,
6229 struct ieee80211_tx_ampdu *tap)
6230 {
6231 struct ath_softc *sc = ni->ni_ic->ic_softc;
6232 int tid = tap->txa_tid;
6233 struct ath_node *an = ATH_NODE(ni);
6234 struct ath_tid *atid = &an->an_tid[tid];
6235
6236 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
6237 "%s: %6D: TID=%d, called; resuming\n",
6238 __func__,
6239 ni->ni_macaddr,
6240 ":",
6241 tid);
6242
6243 ATH_TX_LOCK(sc);
6244 atid->addba_tx_pending = 0;
6245 ATH_TX_UNLOCK(sc);
6246
6247 /* Note: This updates the aggregate state to (again) pending */
6248 sc->sc_addba_response_timeout(ni, tap);
6249
6250 /* Unpause the TID; which reschedules it */
6251 ATH_TX_LOCK(sc);
6252 ath_tx_tid_resume(sc, atid);
6253 ATH_TX_UNLOCK(sc);
6254 }
6255
6256 /*
6257 * Check if a node is asleep or not.
6258 */
6259 int
6260 ath_tx_node_is_asleep(struct ath_softc *sc, struct ath_node *an)
6261 {
6262
6263 ATH_TX_LOCK_ASSERT(sc);
6264
6265 return (an->an_is_powersave);
6266 }
6267
6268 /*
6269 * Mark a node as currently "in powersaving."
6270 * This suspends all traffic on the node.
6271 *
6272 * This must be called with the node/tx locks free.
6273 *
6274 * XXX TODO: the locking silliness below is due to how the node
6275 * locking currently works. Right now, the node lock is grabbed
6276 * to do rate control lookups and these are done with the TX
6277 * queue lock held. This means the node lock can't be grabbed
6278 * first here or a LOR will occur.
6279 *
6280 * Eventually (hopefully!) the TX path code will only grab
6281 * the TXQ lock when transmitting and the ath_node lock when
6282 * doing node/TID operations. There are other complications -
6283 * the sched/unsched operations involve walking the per-txq
6284 * 'active tid' list and this requires both locks to be held.
6285 */
6286 void
6287 ath_tx_node_sleep(struct ath_softc *sc, struct ath_node *an)
6288 {
6289 struct ath_tid *atid;
6290 struct ath_txq *txq;
6291 int tid;
6292
6293 ATH_TX_UNLOCK_ASSERT(sc);
6294
6295 /* Suspend all traffic on the node */
6296 ATH_TX_LOCK(sc);
6297
6298 if (an->an_is_powersave) {
6299 DPRINTF(sc, ATH_DEBUG_XMIT,
6300 "%s: %6D: node was already asleep!\n",
6301 __func__, an->an_node.ni_macaddr, ":");
6302 ATH_TX_UNLOCK(sc);
6303 return;
6304 }
6305
6306 for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) {
6307 atid = &an->an_tid[tid];
6308 txq = sc->sc_ac2q[atid->ac];
6309
6310 ath_tx_tid_pause(sc, atid);
6311 }
6312
6313 /* Mark node as in powersaving */
6314 an->an_is_powersave = 1;
6315
6316 ATH_TX_UNLOCK(sc);
6317 }
6318
6319 /*
6320 * Mark a node as currently "awake."
6321 * This resumes all traffic to the node.
6322 */
6323 void
6324 ath_tx_node_wakeup(struct ath_softc *sc, struct ath_node *an)
6325 {
6326 struct ath_tid *atid;
6327 struct ath_txq *txq;
6328 int tid;
6329
6330 ATH_TX_UNLOCK_ASSERT(sc);
6331
6332 ATH_TX_LOCK(sc);
6333
6334 /* !? */
6335 if (an->an_is_powersave == 0) {
6336 ATH_TX_UNLOCK(sc);
6337 DPRINTF(sc, ATH_DEBUG_XMIT,
6338 "%s: an=%p: node was already awake\n",
6339 __func__, an);
6340 return;
6341 }
6342
6343 /* Mark node as awake */
6344 an->an_is_powersave = 0;
6345 /*
6346 * Clear any pending leaked frame requests
6347 */
6348 an->an_leak_count = 0;
6349
6350 for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) {
6351 atid = &an->an_tid[tid];
6352 txq = sc->sc_ac2q[atid->ac];
6353
6354 ath_tx_tid_resume(sc, atid);
6355 }
6356 ATH_TX_UNLOCK(sc);
6357 }
6358
6359 static int
6360 ath_legacy_dma_txsetup(struct ath_softc *sc)
6361 {
6362
6363 /* nothing new needed */
6364 return (0);
6365 }
6366
6367 static int
6368 ath_legacy_dma_txteardown(struct ath_softc *sc)
6369 {
6370
6371 /* nothing new needed */
6372 return (0);
6373 }
6374
6375 void
6376 ath_xmit_setup_legacy(struct ath_softc *sc)
6377 {
6378 /*
6379 * For now, just set the descriptor length to sizeof(ath_desc);
6380 * worry about extracting the real length out of the HAL later.
6381 */
6382 sc->sc_tx_desclen = sizeof(struct ath_desc);
6383 sc->sc_tx_statuslen = sizeof(struct ath_desc);
6384 sc->sc_tx_nmaps = 1; /* only one buffer per TX desc */
6385
6386 sc->sc_tx.xmit_setup = ath_legacy_dma_txsetup;
6387 sc->sc_tx.xmit_teardown = ath_legacy_dma_txteardown;
6388 sc->sc_tx.xmit_attach_comp_func = ath_legacy_attach_comp_func;
6389
6390 sc->sc_tx.xmit_dma_restart = ath_legacy_tx_dma_restart;
6391 sc->sc_tx.xmit_handoff = ath_legacy_xmit_handoff;
6392
6393 sc->sc_tx.xmit_drain = ath_legacy_tx_drain;
6394 }
6395