xref: /freebsd/sys/dev/ath/if_ath_tx.c (revision e27abb6689c5733dd08ce240d5402a0de3a42254)
1 /*-
2  * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
3  * Copyright (c) 2010-2012 Adrian Chadd, Xenion Pty Ltd
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer,
11  *    without modification.
12  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
13  *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
14  *    redistribution must be conditioned upon including a substantially
15  *    similar Disclaimer requirement for further binary redistribution.
16  *
17  * NO WARRANTY
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
21  * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
23  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
26  * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGES.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 /*
35  * Driver for the Atheros Wireless LAN controller.
36  *
37  * This software is derived from work of Atsushi Onoe; his contribution
38  * is greatly appreciated.
39  */
40 
41 #include "opt_inet.h"
42 #include "opt_ath.h"
43 #include "opt_wlan.h"
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/sysctl.h>
48 #include <sys/mbuf.h>
49 #include <sys/malloc.h>
50 #include <sys/lock.h>
51 #include <sys/mutex.h>
52 #include <sys/kernel.h>
53 #include <sys/socket.h>
54 #include <sys/sockio.h>
55 #include <sys/errno.h>
56 #include <sys/callout.h>
57 #include <sys/bus.h>
58 #include <sys/endian.h>
59 #include <sys/kthread.h>
60 #include <sys/taskqueue.h>
61 #include <sys/priv.h>
62 #include <sys/ktr.h>
63 
64 #include <machine/bus.h>
65 
66 #include <net/if.h>
67 #include <net/if_var.h>
68 #include <net/if_dl.h>
69 #include <net/if_media.h>
70 #include <net/if_types.h>
71 #include <net/if_arp.h>
72 #include <net/ethernet.h>
73 #include <net/if_llc.h>
74 
75 #include <net80211/ieee80211_var.h>
76 #include <net80211/ieee80211_regdomain.h>
77 #ifdef IEEE80211_SUPPORT_SUPERG
78 #include <net80211/ieee80211_superg.h>
79 #endif
80 #ifdef IEEE80211_SUPPORT_TDMA
81 #include <net80211/ieee80211_tdma.h>
82 #endif
83 #include <net80211/ieee80211_ht.h>
84 
85 #include <net/bpf.h>
86 
87 #ifdef INET
88 #include <netinet/in.h>
89 #include <netinet/if_ether.h>
90 #endif
91 
92 #include <dev/ath/if_athvar.h>
93 #include <dev/ath/ath_hal/ah_devid.h>		/* XXX for softled */
94 #include <dev/ath/ath_hal/ah_diagcodes.h>
95 
96 #include <dev/ath/if_ath_debug.h>
97 
98 #ifdef ATH_TX99_DIAG
99 #include <dev/ath/ath_tx99/ath_tx99.h>
100 #endif
101 
102 #include <dev/ath/if_ath_misc.h>
103 #include <dev/ath/if_ath_tx.h>
104 #include <dev/ath/if_ath_tx_ht.h>
105 
106 #ifdef	ATH_DEBUG_ALQ
107 #include <dev/ath/if_ath_alq.h>
108 #endif
109 
110 /*
111  * How many retries to perform in software
112  */
113 #define	SWMAX_RETRIES		10
114 
115 /*
116  * What queue to throw the non-QoS TID traffic into
117  */
118 #define	ATH_NONQOS_TID_AC	WME_AC_VO
119 
120 #if 0
121 static int ath_tx_node_is_asleep(struct ath_softc *sc, struct ath_node *an);
122 #endif
123 static int ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an,
124     int tid);
125 static int ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an,
126     int tid);
127 static ieee80211_seq ath_tx_tid_seqno_assign(struct ath_softc *sc,
128     struct ieee80211_node *ni, struct ath_buf *bf, struct mbuf *m0);
129 static int ath_tx_action_frame_override_queue(struct ath_softc *sc,
130     struct ieee80211_node *ni, struct mbuf *m0, int *tid);
131 static struct ath_buf *
132 ath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an,
133     struct ath_tid *tid, struct ath_buf *bf);
134 
135 #ifdef	ATH_DEBUG_ALQ
136 void
137 ath_tx_alq_post(struct ath_softc *sc, struct ath_buf *bf_first)
138 {
139 	struct ath_buf *bf;
140 	int i, n;
141 	const char *ds;
142 
143 	/* XXX we should skip out early if debugging isn't enabled! */
144 	bf = bf_first;
145 
146 	while (bf != NULL) {
147 		/* XXX should ensure bf_nseg > 0! */
148 		if (bf->bf_nseg == 0)
149 			break;
150 		n = ((bf->bf_nseg - 1) / sc->sc_tx_nmaps) + 1;
151 		for (i = 0, ds = (const char *) bf->bf_desc;
152 		    i < n;
153 		    i++, ds += sc->sc_tx_desclen) {
154 			if_ath_alq_post(&sc->sc_alq,
155 			    ATH_ALQ_EDMA_TXDESC,
156 			    sc->sc_tx_desclen,
157 			    ds);
158 		}
159 		bf = bf->bf_next;
160 	}
161 }
162 #endif /* ATH_DEBUG_ALQ */
163 
164 /*
165  * Whether to use the 11n rate scenario functions or not
166  */
167 static inline int
168 ath_tx_is_11n(struct ath_softc *sc)
169 {
170 	return ((sc->sc_ah->ah_magic == 0x20065416) ||
171 		    (sc->sc_ah->ah_magic == 0x19741014));
172 }
173 
174 /*
175  * Obtain the current TID from the given frame.
176  *
177  * Non-QoS frames need to go into TID 16 (IEEE80211_NONQOS_TID.)
178  * This has implications for which AC/priority the packet is placed
179  * in.
180  */
181 static int
182 ath_tx_gettid(struct ath_softc *sc, const struct mbuf *m0)
183 {
184 	const struct ieee80211_frame *wh;
185 	int pri = M_WME_GETAC(m0);
186 
187 	wh = mtod(m0, const struct ieee80211_frame *);
188 	if (! IEEE80211_QOS_HAS_SEQ(wh))
189 		return IEEE80211_NONQOS_TID;
190 	else
191 		return WME_AC_TO_TID(pri);
192 }
193 
194 static void
195 ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf)
196 {
197 	struct ieee80211_frame *wh;
198 
199 	wh = mtod(bf->bf_m, struct ieee80211_frame *);
200 	/* Only update/resync if needed */
201 	if (bf->bf_state.bfs_isretried == 0) {
202 		wh->i_fc[1] |= IEEE80211_FC1_RETRY;
203 		bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
204 		    BUS_DMASYNC_PREWRITE);
205 	}
206 	bf->bf_state.bfs_isretried = 1;
207 	bf->bf_state.bfs_retries ++;
208 }
209 
210 /*
211  * Determine what the correct AC queue for the given frame
212  * should be.
213  *
214  * This code assumes that the TIDs map consistently to
215  * the underlying hardware (or software) ath_txq.
216  * Since the sender may try to set an AC which is
217  * arbitrary, non-QoS TIDs may end up being put on
218  * completely different ACs. There's no way to put a
219  * TID into multiple ath_txq's for scheduling, so
220  * for now we override the AC/TXQ selection and set
221  * non-QOS TID frames into the BE queue.
222  *
223  * This may be completely incorrect - specifically,
224  * some management frames may end up out of order
225  * compared to the QoS traffic they're controlling.
226  * I'll look into this later.
227  */
228 static int
229 ath_tx_getac(struct ath_softc *sc, const struct mbuf *m0)
230 {
231 	const struct ieee80211_frame *wh;
232 	int pri = M_WME_GETAC(m0);
233 	wh = mtod(m0, const struct ieee80211_frame *);
234 	if (IEEE80211_QOS_HAS_SEQ(wh))
235 		return pri;
236 
237 	return ATH_NONQOS_TID_AC;
238 }
239 
240 void
241 ath_txfrag_cleanup(struct ath_softc *sc,
242 	ath_bufhead *frags, struct ieee80211_node *ni)
243 {
244 	struct ath_buf *bf, *next;
245 
246 	ATH_TXBUF_LOCK_ASSERT(sc);
247 
248 	TAILQ_FOREACH_SAFE(bf, frags, bf_list, next) {
249 		/* NB: bf assumed clean */
250 		TAILQ_REMOVE(frags, bf, bf_list);
251 		ath_returnbuf_head(sc, bf);
252 		ieee80211_node_decref(ni);
253 	}
254 }
255 
256 /*
257  * Setup xmit of a fragmented frame.  Allocate a buffer
258  * for each frag and bump the node reference count to
259  * reflect the held reference to be setup by ath_tx_start.
260  */
261 int
262 ath_txfrag_setup(struct ath_softc *sc, ath_bufhead *frags,
263 	struct mbuf *m0, struct ieee80211_node *ni)
264 {
265 	struct mbuf *m;
266 	struct ath_buf *bf;
267 
268 	ATH_TXBUF_LOCK(sc);
269 	for (m = m0->m_nextpkt; m != NULL; m = m->m_nextpkt) {
270 		/* XXX non-management? */
271 		bf = _ath_getbuf_locked(sc, ATH_BUFTYPE_NORMAL);
272 		if (bf == NULL) {	/* out of buffers, cleanup */
273 			DPRINTF(sc, ATH_DEBUG_XMIT, "%s: no buffer?\n",
274 			    __func__);
275 			ath_txfrag_cleanup(sc, frags, ni);
276 			break;
277 		}
278 		ieee80211_node_incref(ni);
279 		TAILQ_INSERT_TAIL(frags, bf, bf_list);
280 	}
281 	ATH_TXBUF_UNLOCK(sc);
282 
283 	return !TAILQ_EMPTY(frags);
284 }
285 
286 static int
287 ath_tx_dmasetup(struct ath_softc *sc, struct ath_buf *bf, struct mbuf *m0)
288 {
289 	struct mbuf *m;
290 	int error;
291 
292 	/*
293 	 * Load the DMA map so any coalescing is done.  This
294 	 * also calculates the number of descriptors we need.
295 	 */
296 	error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
297 				     bf->bf_segs, &bf->bf_nseg,
298 				     BUS_DMA_NOWAIT);
299 	if (error == EFBIG) {
300 		/* XXX packet requires too many descriptors */
301 		bf->bf_nseg = ATH_MAX_SCATTER + 1;
302 	} else if (error != 0) {
303 		sc->sc_stats.ast_tx_busdma++;
304 		ieee80211_free_mbuf(m0);
305 		return error;
306 	}
307 	/*
308 	 * Discard null packets and check for packets that
309 	 * require too many TX descriptors.  We try to convert
310 	 * the latter to a cluster.
311 	 */
312 	if (bf->bf_nseg > ATH_MAX_SCATTER) {		/* too many desc's, linearize */
313 		sc->sc_stats.ast_tx_linear++;
314 		m = m_collapse(m0, M_NOWAIT, ATH_MAX_SCATTER);
315 		if (m == NULL) {
316 			ieee80211_free_mbuf(m0);
317 			sc->sc_stats.ast_tx_nombuf++;
318 			return ENOMEM;
319 		}
320 		m0 = m;
321 		error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
322 					     bf->bf_segs, &bf->bf_nseg,
323 					     BUS_DMA_NOWAIT);
324 		if (error != 0) {
325 			sc->sc_stats.ast_tx_busdma++;
326 			ieee80211_free_mbuf(m0);
327 			return error;
328 		}
329 		KASSERT(bf->bf_nseg <= ATH_MAX_SCATTER,
330 		    ("too many segments after defrag; nseg %u", bf->bf_nseg));
331 	} else if (bf->bf_nseg == 0) {		/* null packet, discard */
332 		sc->sc_stats.ast_tx_nodata++;
333 		ieee80211_free_mbuf(m0);
334 		return EIO;
335 	}
336 	DPRINTF(sc, ATH_DEBUG_XMIT, "%s: m %p len %u\n",
337 		__func__, m0, m0->m_pkthdr.len);
338 	bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
339 	bf->bf_m = m0;
340 
341 	return 0;
342 }
343 
344 /*
345  * Chain together segments+descriptors for a frame - 11n or otherwise.
346  *
347  * For aggregates, this is called on each frame in the aggregate.
348  */
349 static void
350 ath_tx_chaindesclist(struct ath_softc *sc, struct ath_desc *ds0,
351     struct ath_buf *bf, int is_aggr, int is_first_subframe,
352     int is_last_subframe)
353 {
354 	struct ath_hal *ah = sc->sc_ah;
355 	char *ds;
356 	int i, bp, dsp;
357 	HAL_DMA_ADDR bufAddrList[4];
358 	uint32_t segLenList[4];
359 	int numTxMaps = 1;
360 	int isFirstDesc = 1;
361 
362 	/*
363 	 * XXX There's txdma and txdma_mgmt; the descriptor
364 	 * sizes must match.
365 	 */
366 	struct ath_descdma *dd = &sc->sc_txdma;
367 
368 	/*
369 	 * Fillin the remainder of the descriptor info.
370 	 */
371 
372 	/*
373 	 * We need the number of TX data pointers in each descriptor.
374 	 * EDMA and later chips support 4 TX buffers per descriptor;
375 	 * previous chips just support one.
376 	 */
377 	numTxMaps = sc->sc_tx_nmaps;
378 
379 	/*
380 	 * For EDMA and later chips ensure the TX map is fully populated
381 	 * before advancing to the next descriptor.
382 	 */
383 	ds = (char *) bf->bf_desc;
384 	bp = dsp = 0;
385 	bzero(bufAddrList, sizeof(bufAddrList));
386 	bzero(segLenList, sizeof(segLenList));
387 	for (i = 0; i < bf->bf_nseg; i++) {
388 		bufAddrList[bp] = bf->bf_segs[i].ds_addr;
389 		segLenList[bp] = bf->bf_segs[i].ds_len;
390 		bp++;
391 
392 		/*
393 		 * Go to the next segment if this isn't the last segment
394 		 * and there's space in the current TX map.
395 		 */
396 		if ((i != bf->bf_nseg - 1) && (bp < numTxMaps))
397 			continue;
398 
399 		/*
400 		 * Last segment or we're out of buffer pointers.
401 		 */
402 		bp = 0;
403 
404 		if (i == bf->bf_nseg - 1)
405 			ath_hal_settxdesclink(ah, (struct ath_desc *) ds, 0);
406 		else
407 			ath_hal_settxdesclink(ah, (struct ath_desc *) ds,
408 			    bf->bf_daddr + dd->dd_descsize * (dsp + 1));
409 
410 		/*
411 		 * XXX This assumes that bfs_txq is the actual destination
412 		 * hardware queue at this point.  It may not have been
413 		 * assigned, it may actually be pointing to the multicast
414 		 * software TXQ id.  These must be fixed!
415 		 */
416 		ath_hal_filltxdesc(ah, (struct ath_desc *) ds
417 			, bufAddrList
418 			, segLenList
419 			, bf->bf_descid		/* XXX desc id */
420 			, bf->bf_state.bfs_tx_queue
421 			, isFirstDesc		/* first segment */
422 			, i == bf->bf_nseg - 1	/* last segment */
423 			, (struct ath_desc *) ds0	/* first descriptor */
424 		);
425 
426 		/*
427 		 * Make sure the 11n aggregate fields are cleared.
428 		 *
429 		 * XXX TODO: this doesn't need to be called for
430 		 * aggregate frames; as it'll be called on all
431 		 * sub-frames.  Since the descriptors are in
432 		 * non-cacheable memory, this leads to some
433 		 * rather slow writes on MIPS/ARM platforms.
434 		 */
435 		if (ath_tx_is_11n(sc))
436 			ath_hal_clr11n_aggr(sc->sc_ah, (struct ath_desc *) ds);
437 
438 		/*
439 		 * If 11n is enabled, set it up as if it's an aggregate
440 		 * frame.
441 		 */
442 		if (is_last_subframe) {
443 			ath_hal_set11n_aggr_last(sc->sc_ah,
444 			    (struct ath_desc *) ds);
445 		} else if (is_aggr) {
446 			/*
447 			 * This clears the aggrlen field; so
448 			 * the caller needs to call set_aggr_first()!
449 			 *
450 			 * XXX TODO: don't call this for the first
451 			 * descriptor in the first frame in an
452 			 * aggregate!
453 			 */
454 			ath_hal_set11n_aggr_middle(sc->sc_ah,
455 			    (struct ath_desc *) ds,
456 			    bf->bf_state.bfs_ndelim);
457 		}
458 		isFirstDesc = 0;
459 		bf->bf_lastds = (struct ath_desc *) ds;
460 
461 		/*
462 		 * Don't forget to skip to the next descriptor.
463 		 */
464 		ds += sc->sc_tx_desclen;
465 		dsp++;
466 
467 		/*
468 		 * .. and don't forget to blank these out!
469 		 */
470 		bzero(bufAddrList, sizeof(bufAddrList));
471 		bzero(segLenList, sizeof(segLenList));
472 	}
473 	bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
474 }
475 
476 /*
477  * Set the rate control fields in the given descriptor based on
478  * the bf_state fields and node state.
479  *
480  * The bfs fields should already be set with the relevant rate
481  * control information, including whether MRR is to be enabled.
482  *
483  * Since the FreeBSD HAL currently sets up the first TX rate
484  * in ath_hal_setuptxdesc(), this will setup the MRR
485  * conditionally for the pre-11n chips, and call ath_buf_set_rate
486  * unconditionally for 11n chips. These require the 11n rate
487  * scenario to be set if MCS rates are enabled, so it's easier
488  * to just always call it. The caller can then only set rates 2, 3
489  * and 4 if multi-rate retry is needed.
490  */
491 static void
492 ath_tx_set_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni,
493     struct ath_buf *bf)
494 {
495 	struct ath_rc_series *rc = bf->bf_state.bfs_rc;
496 
497 	/* If mrr is disabled, blank tries 1, 2, 3 */
498 	if (! bf->bf_state.bfs_ismrr)
499 		rc[1].tries = rc[2].tries = rc[3].tries = 0;
500 
501 #if 0
502 	/*
503 	 * If NOACK is set, just set ntries=1.
504 	 */
505 	else if (bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) {
506 		rc[1].tries = rc[2].tries = rc[3].tries = 0;
507 		rc[0].tries = 1;
508 	}
509 #endif
510 
511 	/*
512 	 * Always call - that way a retried descriptor will
513 	 * have the MRR fields overwritten.
514 	 *
515 	 * XXX TODO: see if this is really needed - setting up
516 	 * the first descriptor should set the MRR fields to 0
517 	 * for us anyway.
518 	 */
519 	if (ath_tx_is_11n(sc)) {
520 		ath_buf_set_rate(sc, ni, bf);
521 	} else {
522 		ath_hal_setupxtxdesc(sc->sc_ah, bf->bf_desc
523 			, rc[1].ratecode, rc[1].tries
524 			, rc[2].ratecode, rc[2].tries
525 			, rc[3].ratecode, rc[3].tries
526 		);
527 	}
528 }
529 
530 /*
531  * Setup segments+descriptors for an 11n aggregate.
532  * bf_first is the first buffer in the aggregate.
533  * The descriptor list must already been linked together using
534  * bf->bf_next.
535  */
536 static void
537 ath_tx_setds_11n(struct ath_softc *sc, struct ath_buf *bf_first)
538 {
539 	struct ath_buf *bf, *bf_prev = NULL;
540 	struct ath_desc *ds0 = bf_first->bf_desc;
541 
542 	DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: nframes=%d, al=%d\n",
543 	    __func__, bf_first->bf_state.bfs_nframes,
544 	    bf_first->bf_state.bfs_al);
545 
546 	bf = bf_first;
547 
548 	if (bf->bf_state.bfs_txrate0 == 0)
549 		DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: bf=%p, txrate0=%d\n",
550 		    __func__, bf, 0);
551 	if (bf->bf_state.bfs_rc[0].ratecode == 0)
552 		DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: bf=%p, rix0=%d\n",
553 		    __func__, bf, 0);
554 
555 	/*
556 	 * Setup all descriptors of all subframes - this will
557 	 * call ath_hal_set11naggrmiddle() on every frame.
558 	 */
559 	while (bf != NULL) {
560 		DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
561 		    "%s: bf=%p, nseg=%d, pktlen=%d, seqno=%d\n",
562 		    __func__, bf, bf->bf_nseg, bf->bf_state.bfs_pktlen,
563 		    SEQNO(bf->bf_state.bfs_seqno));
564 
565 		/*
566 		 * Setup the initial fields for the first descriptor - all
567 		 * the non-11n specific stuff.
568 		 */
569 		ath_hal_setuptxdesc(sc->sc_ah, bf->bf_desc
570 			, bf->bf_state.bfs_pktlen	/* packet length */
571 			, bf->bf_state.bfs_hdrlen	/* header length */
572 			, bf->bf_state.bfs_atype	/* Atheros packet type */
573 			, bf->bf_state.bfs_txpower	/* txpower */
574 			, bf->bf_state.bfs_txrate0
575 			, bf->bf_state.bfs_try0		/* series 0 rate/tries */
576 			, bf->bf_state.bfs_keyix	/* key cache index */
577 			, bf->bf_state.bfs_txantenna	/* antenna mode */
578 			, bf->bf_state.bfs_txflags | HAL_TXDESC_INTREQ	/* flags */
579 			, bf->bf_state.bfs_ctsrate	/* rts/cts rate */
580 			, bf->bf_state.bfs_ctsduration	/* rts/cts duration */
581 		);
582 
583 		/*
584 		 * First descriptor? Setup the rate control and initial
585 		 * aggregate header information.
586 		 */
587 		if (bf == bf_first) {
588 			/*
589 			 * setup first desc with rate and aggr info
590 			 */
591 			ath_tx_set_ratectrl(sc, bf->bf_node, bf);
592 		}
593 
594 		/*
595 		 * Setup the descriptors for a multi-descriptor frame.
596 		 * This is both aggregate and non-aggregate aware.
597 		 */
598 		ath_tx_chaindesclist(sc, ds0, bf,
599 		    1, /* is_aggr */
600 		    !! (bf == bf_first), /* is_first_subframe */
601 		    !! (bf->bf_next == NULL) /* is_last_subframe */
602 		    );
603 
604 		if (bf == bf_first) {
605 			/*
606 			 * Initialise the first 11n aggregate with the
607 			 * aggregate length and aggregate enable bits.
608 			 */
609 			ath_hal_set11n_aggr_first(sc->sc_ah,
610 			    ds0,
611 			    bf->bf_state.bfs_al,
612 			    bf->bf_state.bfs_ndelim);
613 		}
614 
615 		/*
616 		 * Link the last descriptor of the previous frame
617 		 * to the beginning descriptor of this frame.
618 		 */
619 		if (bf_prev != NULL)
620 			ath_hal_settxdesclink(sc->sc_ah, bf_prev->bf_lastds,
621 			    bf->bf_daddr);
622 
623 		/* Save a copy so we can link the next descriptor in */
624 		bf_prev = bf;
625 		bf = bf->bf_next;
626 	}
627 
628 	/*
629 	 * Set the first descriptor bf_lastds field to point to
630 	 * the last descriptor in the last subframe, that's where
631 	 * the status update will occur.
632 	 */
633 	bf_first->bf_lastds = bf_prev->bf_lastds;
634 
635 	/*
636 	 * And bf_last in the first descriptor points to the end of
637 	 * the aggregate list.
638 	 */
639 	bf_first->bf_last = bf_prev;
640 
641 	/*
642 	 * For non-AR9300 NICs, which require the rate control
643 	 * in the final descriptor - let's set that up now.
644 	 *
645 	 * This is because the filltxdesc() HAL call doesn't
646 	 * populate the last segment with rate control information
647 	 * if firstSeg is also true.  For non-aggregate frames
648 	 * that is fine, as the first frame already has rate control
649 	 * info.  But if the last frame in an aggregate has one
650 	 * descriptor, both firstseg and lastseg will be true and
651 	 * the rate info isn't copied.
652 	 *
653 	 * This is inefficient on MIPS/ARM platforms that have
654 	 * non-cachable memory for TX descriptors, but we'll just
655 	 * make do for now.
656 	 *
657 	 * As to why the rate table is stashed in the last descriptor
658 	 * rather than the first descriptor?  Because proctxdesc()
659 	 * is called on the final descriptor in an MPDU or A-MPDU -
660 	 * ie, the one that gets updated by the hardware upon
661 	 * completion.  That way proctxdesc() doesn't need to know
662 	 * about the first _and_ last TX descriptor.
663 	 */
664 	ath_hal_setuplasttxdesc(sc->sc_ah, bf_prev->bf_lastds, ds0);
665 
666 	DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: end\n", __func__);
667 }
668 
669 /*
670  * Hand-off a frame to the multicast TX queue.
671  *
672  * This is a software TXQ which will be appended to the CAB queue
673  * during the beacon setup code.
674  *
675  * XXX TODO: since the AR9300 EDMA TX queue support wants the QCU ID
676  * as part of the TX descriptor, bf_state.bfs_tx_queue must be updated
677  * with the actual hardware txq, or all of this will fall apart.
678  *
679  * XXX It may not be a bad idea to just stuff the QCU ID into bf_state
680  * and retire bfs_tx_queue; then make sure the CABQ QCU ID is populated
681  * correctly.
682  */
683 static void
684 ath_tx_handoff_mcast(struct ath_softc *sc, struct ath_txq *txq,
685     struct ath_buf *bf)
686 {
687 	ATH_TX_LOCK_ASSERT(sc);
688 
689 	KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0,
690 	     ("%s: busy status 0x%x", __func__, bf->bf_flags));
691 
692 	/*
693 	 * Ensure that the tx queue is the cabq, so things get
694 	 * mapped correctly.
695 	 */
696 	if (bf->bf_state.bfs_tx_queue != sc->sc_cabq->axq_qnum) {
697 		DPRINTF(sc, ATH_DEBUG_XMIT,
698 		    "%s: bf=%p, bfs_tx_queue=%d, axq_qnum=%d\n",
699 		    __func__, bf, bf->bf_state.bfs_tx_queue,
700 		    txq->axq_qnum);
701 	}
702 
703 	ATH_TXQ_LOCK(txq);
704 	if (ATH_TXQ_LAST(txq, axq_q_s) != NULL) {
705 		struct ath_buf *bf_last = ATH_TXQ_LAST(txq, axq_q_s);
706 		struct ieee80211_frame *wh;
707 
708 		/* mark previous frame */
709 		wh = mtod(bf_last->bf_m, struct ieee80211_frame *);
710 		wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA;
711 		bus_dmamap_sync(sc->sc_dmat, bf_last->bf_dmamap,
712 		    BUS_DMASYNC_PREWRITE);
713 
714 		/* link descriptor */
715 		ath_hal_settxdesclink(sc->sc_ah,
716 		    bf_last->bf_lastds,
717 		    bf->bf_daddr);
718 	}
719 	ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
720 	ATH_TXQ_UNLOCK(txq);
721 }
722 
723 /*
724  * Hand-off packet to a hardware queue.
725  */
726 static void
727 ath_tx_handoff_hw(struct ath_softc *sc, struct ath_txq *txq,
728     struct ath_buf *bf)
729 {
730 	struct ath_hal *ah = sc->sc_ah;
731 	struct ath_buf *bf_first;
732 
733 	/*
734 	 * Insert the frame on the outbound list and pass it on
735 	 * to the hardware.  Multicast frames buffered for power
736 	 * save stations and transmit from the CAB queue are stored
737 	 * on a s/w only queue and loaded on to the CAB queue in
738 	 * the SWBA handler since frames only go out on DTIM and
739 	 * to avoid possible races.
740 	 */
741 	ATH_TX_LOCK_ASSERT(sc);
742 	KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0,
743 	     ("%s: busy status 0x%x", __func__, bf->bf_flags));
744 	KASSERT(txq->axq_qnum != ATH_TXQ_SWQ,
745 	     ("ath_tx_handoff_hw called for mcast queue"));
746 
747 	/*
748 	 * XXX We should instead just verify that sc_txstart_cnt
749 	 * or ath_txproc_cnt > 0.  That would mean that
750 	 * the reset is going to be waiting for us to complete.
751 	 */
752 	if (sc->sc_txproc_cnt == 0 && sc->sc_txstart_cnt == 0) {
753 		device_printf(sc->sc_dev,
754 		    "%s: TX dispatch without holding txcount/txstart refcnt!\n",
755 		    __func__);
756 	}
757 
758 	/*
759 	 * XXX .. this is going to cause the hardware to get upset;
760 	 * so we really should find some way to drop or queue
761 	 * things.
762 	 */
763 
764 	ATH_TXQ_LOCK(txq);
765 
766 	/*
767 	 * XXX TODO: if there's a holdingbf, then
768 	 * ATH_TXQ_PUTRUNNING should be clear.
769 	 *
770 	 * If there is a holdingbf and the list is empty,
771 	 * then axq_link should be pointing to the holdingbf.
772 	 *
773 	 * Otherwise it should point to the last descriptor
774 	 * in the last ath_buf.
775 	 *
776 	 * In any case, we should really ensure that we
777 	 * update the previous descriptor link pointer to
778 	 * this descriptor, regardless of all of the above state.
779 	 *
780 	 * For now this is captured by having axq_link point
781 	 * to either the holdingbf (if the TXQ list is empty)
782 	 * or the end of the list (if the TXQ list isn't empty.)
783 	 * I'd rather just kill axq_link here and do it as above.
784 	 */
785 
786 	/*
787 	 * Append the frame to the TX queue.
788 	 */
789 	ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
790 	ATH_KTR(sc, ATH_KTR_TX, 3,
791 	    "ath_tx_handoff: non-tdma: txq=%u, add bf=%p "
792 	    "depth=%d",
793 	    txq->axq_qnum,
794 	    bf,
795 	    txq->axq_depth);
796 
797 	/*
798 	 * If there's a link pointer, update it.
799 	 *
800 	 * XXX we should replace this with the above logic, just
801 	 * to kill axq_link with fire.
802 	 */
803 	if (txq->axq_link != NULL) {
804 		*txq->axq_link = bf->bf_daddr;
805 		DPRINTF(sc, ATH_DEBUG_XMIT,
806 		    "%s: link[%u](%p)=%p (%p) depth %d\n", __func__,
807 		    txq->axq_qnum, txq->axq_link,
808 		    (caddr_t)bf->bf_daddr, bf->bf_desc,
809 		    txq->axq_depth);
810 		ATH_KTR(sc, ATH_KTR_TX, 5,
811 		    "ath_tx_handoff: non-tdma: link[%u](%p)=%p (%p) "
812 		    "lastds=%d",
813 		    txq->axq_qnum, txq->axq_link,
814 		    (caddr_t)bf->bf_daddr, bf->bf_desc,
815 		    bf->bf_lastds);
816 	}
817 
818 	/*
819 	 * If we've not pushed anything into the hardware yet,
820 	 * push the head of the queue into the TxDP.
821 	 *
822 	 * Once we've started DMA, there's no guarantee that
823 	 * updating the TxDP with a new value will actually work.
824 	 * So we just don't do that - if we hit the end of the list,
825 	 * we keep that buffer around (the "holding buffer") and
826 	 * re-start DMA by updating the link pointer of _that_
827 	 * descriptor and then restart DMA.
828 	 */
829 	if (! (txq->axq_flags & ATH_TXQ_PUTRUNNING)) {
830 		bf_first = TAILQ_FIRST(&txq->axq_q);
831 		txq->axq_flags |= ATH_TXQ_PUTRUNNING;
832 		ath_hal_puttxbuf(ah, txq->axq_qnum, bf_first->bf_daddr);
833 		DPRINTF(sc, ATH_DEBUG_XMIT,
834 		    "%s: TXDP[%u] = %p (%p) depth %d\n",
835 		    __func__, txq->axq_qnum,
836 		    (caddr_t)bf_first->bf_daddr, bf_first->bf_desc,
837 		    txq->axq_depth);
838 		ATH_KTR(sc, ATH_KTR_TX, 5,
839 		    "ath_tx_handoff: TXDP[%u] = %p (%p) "
840 		    "lastds=%p depth %d",
841 		    txq->axq_qnum,
842 		    (caddr_t)bf_first->bf_daddr, bf_first->bf_desc,
843 		    bf_first->bf_lastds,
844 		    txq->axq_depth);
845 	}
846 
847 	/*
848 	 * Ensure that the bf TXQ matches this TXQ, so later
849 	 * checking and holding buffer manipulation is sane.
850 	 */
851 	if (bf->bf_state.bfs_tx_queue != txq->axq_qnum) {
852 		DPRINTF(sc, ATH_DEBUG_XMIT,
853 		    "%s: bf=%p, bfs_tx_queue=%d, axq_qnum=%d\n",
854 		    __func__, bf, bf->bf_state.bfs_tx_queue,
855 		    txq->axq_qnum);
856 	}
857 
858 	/*
859 	 * Track aggregate queue depth.
860 	 */
861 	if (bf->bf_state.bfs_aggr)
862 		txq->axq_aggr_depth++;
863 
864 	/*
865 	 * Update the link pointer.
866 	 */
867 	ath_hal_gettxdesclinkptr(ah, bf->bf_lastds, &txq->axq_link);
868 
869 	/*
870 	 * Start DMA.
871 	 *
872 	 * If we wrote a TxDP above, DMA will start from here.
873 	 *
874 	 * If DMA is running, it'll do nothing.
875 	 *
876 	 * If the DMA engine hit the end of the QCU list (ie LINK=NULL,
877 	 * or VEOL) then it stops at the last transmitted write.
878 	 * We then append a new frame by updating the link pointer
879 	 * in that descriptor and then kick TxE here; it will re-read
880 	 * that last descriptor and find the new descriptor to transmit.
881 	 *
882 	 * This is why we keep the holding descriptor around.
883 	 */
884 	ath_hal_txstart(ah, txq->axq_qnum);
885 	ATH_TXQ_UNLOCK(txq);
886 	ATH_KTR(sc, ATH_KTR_TX, 1,
887 	    "ath_tx_handoff: txq=%u, txstart", txq->axq_qnum);
888 }
889 
890 /*
891  * Restart TX DMA for the given TXQ.
892  *
893  * This must be called whether the queue is empty or not.
894  */
895 static void
896 ath_legacy_tx_dma_restart(struct ath_softc *sc, struct ath_txq *txq)
897 {
898 	struct ath_buf *bf, *bf_last;
899 
900 	ATH_TXQ_LOCK_ASSERT(txq);
901 
902 	/* XXX make this ATH_TXQ_FIRST */
903 	bf = TAILQ_FIRST(&txq->axq_q);
904 	bf_last = ATH_TXQ_LAST(txq, axq_q_s);
905 
906 	if (bf == NULL)
907 		return;
908 
909 	DPRINTF(sc, ATH_DEBUG_RESET,
910 	    "%s: Q%d: bf=%p, bf_last=%p, daddr=0x%08x\n",
911 	    __func__,
912 	    txq->axq_qnum,
913 	    bf,
914 	    bf_last,
915 	    (uint32_t) bf->bf_daddr);
916 
917 #ifdef	ATH_DEBUG
918 	if (sc->sc_debug & ATH_DEBUG_RESET)
919 		ath_tx_dump(sc, txq);
920 #endif
921 
922 	/*
923 	 * This is called from a restart, so DMA is known to be
924 	 * completely stopped.
925 	 */
926 	KASSERT((!(txq->axq_flags & ATH_TXQ_PUTRUNNING)),
927 	    ("%s: Q%d: called with PUTRUNNING=1\n",
928 	    __func__,
929 	    txq->axq_qnum));
930 
931 	ath_hal_puttxbuf(sc->sc_ah, txq->axq_qnum, bf->bf_daddr);
932 	txq->axq_flags |= ATH_TXQ_PUTRUNNING;
933 
934 	ath_hal_gettxdesclinkptr(sc->sc_ah, bf_last->bf_lastds,
935 	    &txq->axq_link);
936 	ath_hal_txstart(sc->sc_ah, txq->axq_qnum);
937 }
938 
939 /*
940  * Hand off a packet to the hardware (or mcast queue.)
941  *
942  * The relevant hardware txq should be locked.
943  */
944 static void
945 ath_legacy_xmit_handoff(struct ath_softc *sc, struct ath_txq *txq,
946     struct ath_buf *bf)
947 {
948 	ATH_TX_LOCK_ASSERT(sc);
949 
950 #ifdef	ATH_DEBUG_ALQ
951 	if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC))
952 		ath_tx_alq_post(sc, bf);
953 #endif
954 
955 	if (txq->axq_qnum == ATH_TXQ_SWQ)
956 		ath_tx_handoff_mcast(sc, txq, bf);
957 	else
958 		ath_tx_handoff_hw(sc, txq, bf);
959 }
960 
961 static int
962 ath_tx_tag_crypto(struct ath_softc *sc, struct ieee80211_node *ni,
963     struct mbuf *m0, int iswep, int isfrag, int *hdrlen, int *pktlen,
964     int *keyix)
965 {
966 	DPRINTF(sc, ATH_DEBUG_XMIT,
967 	    "%s: hdrlen=%d, pktlen=%d, isfrag=%d, iswep=%d, m0=%p\n",
968 	    __func__,
969 	    *hdrlen,
970 	    *pktlen,
971 	    isfrag,
972 	    iswep,
973 	    m0);
974 
975 	if (iswep) {
976 		const struct ieee80211_cipher *cip;
977 		struct ieee80211_key *k;
978 
979 		/*
980 		 * Construct the 802.11 header+trailer for an encrypted
981 		 * frame. The only reason this can fail is because of an
982 		 * unknown or unsupported cipher/key type.
983 		 */
984 		k = ieee80211_crypto_encap(ni, m0);
985 		if (k == NULL) {
986 			/*
987 			 * This can happen when the key is yanked after the
988 			 * frame was queued.  Just discard the frame; the
989 			 * 802.11 layer counts failures and provides
990 			 * debugging/diagnostics.
991 			 */
992 			return (0);
993 		}
994 		/*
995 		 * Adjust the packet + header lengths for the crypto
996 		 * additions and calculate the h/w key index.  When
997 		 * a s/w mic is done the frame will have had any mic
998 		 * added to it prior to entry so m0->m_pkthdr.len will
999 		 * account for it. Otherwise we need to add it to the
1000 		 * packet length.
1001 		 */
1002 		cip = k->wk_cipher;
1003 		(*hdrlen) += cip->ic_header;
1004 		(*pktlen) += cip->ic_header + cip->ic_trailer;
1005 		/* NB: frags always have any TKIP MIC done in s/w */
1006 		if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && !isfrag)
1007 			(*pktlen) += cip->ic_miclen;
1008 		(*keyix) = k->wk_keyix;
1009 	} else if (ni->ni_ucastkey.wk_cipher == &ieee80211_cipher_none) {
1010 		/*
1011 		 * Use station key cache slot, if assigned.
1012 		 */
1013 		(*keyix) = ni->ni_ucastkey.wk_keyix;
1014 		if ((*keyix) == IEEE80211_KEYIX_NONE)
1015 			(*keyix) = HAL_TXKEYIX_INVALID;
1016 	} else
1017 		(*keyix) = HAL_TXKEYIX_INVALID;
1018 
1019 	return (1);
1020 }
1021 
1022 /*
1023  * Calculate whether interoperability protection is required for
1024  * this frame.
1025  *
1026  * This requires the rate control information be filled in,
1027  * as the protection requirement depends upon the current
1028  * operating mode / PHY.
1029  */
1030 static void
1031 ath_tx_calc_protection(struct ath_softc *sc, struct ath_buf *bf)
1032 {
1033 	struct ieee80211_frame *wh;
1034 	uint8_t rix;
1035 	uint16_t flags;
1036 	int shortPreamble;
1037 	const HAL_RATE_TABLE *rt = sc->sc_currates;
1038 	struct ieee80211com *ic = &sc->sc_ic;
1039 
1040 	flags = bf->bf_state.bfs_txflags;
1041 	rix = bf->bf_state.bfs_rc[0].rix;
1042 	shortPreamble = bf->bf_state.bfs_shpream;
1043 	wh = mtod(bf->bf_m, struct ieee80211_frame *);
1044 
1045 	/*
1046 	 * If 802.11g protection is enabled, determine whether
1047 	 * to use RTS/CTS or just CTS.  Note that this is only
1048 	 * done for OFDM unicast frames.
1049 	 */
1050 	if ((ic->ic_flags & IEEE80211_F_USEPROT) &&
1051 	    rt->info[rix].phy == IEEE80211_T_OFDM &&
1052 	    (flags & HAL_TXDESC_NOACK) == 0) {
1053 		bf->bf_state.bfs_doprot = 1;
1054 		/* XXX fragments must use CCK rates w/ protection */
1055 		if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) {
1056 			flags |= HAL_TXDESC_RTSENA;
1057 		} else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) {
1058 			flags |= HAL_TXDESC_CTSENA;
1059 		}
1060 		/*
1061 		 * For frags it would be desirable to use the
1062 		 * highest CCK rate for RTS/CTS.  But stations
1063 		 * farther away may detect it at a lower CCK rate
1064 		 * so use the configured protection rate instead
1065 		 * (for now).
1066 		 */
1067 		sc->sc_stats.ast_tx_protect++;
1068 	}
1069 
1070 	/*
1071 	 * If 11n protection is enabled and it's a HT frame,
1072 	 * enable RTS.
1073 	 *
1074 	 * XXX ic_htprotmode or ic_curhtprotmode?
1075 	 * XXX should it_htprotmode only matter if ic_curhtprotmode
1076 	 * XXX indicates it's not a HT pure environment?
1077 	 */
1078 	if ((ic->ic_htprotmode == IEEE80211_PROT_RTSCTS) &&
1079 	    rt->info[rix].phy == IEEE80211_T_HT &&
1080 	    (flags & HAL_TXDESC_NOACK) == 0) {
1081 		flags |= HAL_TXDESC_RTSENA;
1082 		sc->sc_stats.ast_tx_htprotect++;
1083 	}
1084 	bf->bf_state.bfs_txflags = flags;
1085 }
1086 
1087 /*
1088  * Update the frame duration given the currently selected rate.
1089  *
1090  * This also updates the frame duration value, so it will require
1091  * a DMA flush.
1092  */
1093 static void
1094 ath_tx_calc_duration(struct ath_softc *sc, struct ath_buf *bf)
1095 {
1096 	struct ieee80211_frame *wh;
1097 	uint8_t rix;
1098 	uint16_t flags;
1099 	int shortPreamble;
1100 	struct ath_hal *ah = sc->sc_ah;
1101 	const HAL_RATE_TABLE *rt = sc->sc_currates;
1102 	int isfrag = bf->bf_m->m_flags & M_FRAG;
1103 
1104 	flags = bf->bf_state.bfs_txflags;
1105 	rix = bf->bf_state.bfs_rc[0].rix;
1106 	shortPreamble = bf->bf_state.bfs_shpream;
1107 	wh = mtod(bf->bf_m, struct ieee80211_frame *);
1108 
1109 	/*
1110 	 * Calculate duration.  This logically belongs in the 802.11
1111 	 * layer but it lacks sufficient information to calculate it.
1112 	 */
1113 	if ((flags & HAL_TXDESC_NOACK) == 0 &&
1114 	    (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) {
1115 		u_int16_t dur;
1116 		if (shortPreamble)
1117 			dur = rt->info[rix].spAckDuration;
1118 		else
1119 			dur = rt->info[rix].lpAckDuration;
1120 		if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) {
1121 			dur += dur;		/* additional SIFS+ACK */
1122 			/*
1123 			 * Include the size of next fragment so NAV is
1124 			 * updated properly.  The last fragment uses only
1125 			 * the ACK duration
1126 			 *
1127 			 * XXX TODO: ensure that the rate lookup for each
1128 			 * fragment is the same as the rate used by the
1129 			 * first fragment!
1130 			 */
1131 			dur += ath_hal_computetxtime(ah,
1132 			    rt,
1133 			    bf->bf_nextfraglen,
1134 			    rix, shortPreamble);
1135 		}
1136 		if (isfrag) {
1137 			/*
1138 			 * Force hardware to use computed duration for next
1139 			 * fragment by disabling multi-rate retry which updates
1140 			 * duration based on the multi-rate duration table.
1141 			 */
1142 			bf->bf_state.bfs_ismrr = 0;
1143 			bf->bf_state.bfs_try0 = ATH_TXMGTTRY;
1144 			/* XXX update bfs_rc[0].try? */
1145 		}
1146 
1147 		/* Update the duration field itself */
1148 		*(u_int16_t *)wh->i_dur = htole16(dur);
1149 	}
1150 }
1151 
1152 static uint8_t
1153 ath_tx_get_rtscts_rate(struct ath_hal *ah, const HAL_RATE_TABLE *rt,
1154     int cix, int shortPreamble)
1155 {
1156 	uint8_t ctsrate;
1157 
1158 	/*
1159 	 * CTS transmit rate is derived from the transmit rate
1160 	 * by looking in the h/w rate table.  We must also factor
1161 	 * in whether or not a short preamble is to be used.
1162 	 */
1163 	/* NB: cix is set above where RTS/CTS is enabled */
1164 	KASSERT(cix != 0xff, ("cix not setup"));
1165 	ctsrate = rt->info[cix].rateCode;
1166 
1167 	/* XXX this should only matter for legacy rates */
1168 	if (shortPreamble)
1169 		ctsrate |= rt->info[cix].shortPreamble;
1170 
1171 	return (ctsrate);
1172 }
1173 
1174 /*
1175  * Calculate the RTS/CTS duration for legacy frames.
1176  */
1177 static int
1178 ath_tx_calc_ctsduration(struct ath_hal *ah, int rix, int cix,
1179     int shortPreamble, int pktlen, const HAL_RATE_TABLE *rt,
1180     int flags)
1181 {
1182 	int ctsduration = 0;
1183 
1184 	/* This mustn't be called for HT modes */
1185 	if (rt->info[cix].phy == IEEE80211_T_HT) {
1186 		printf("%s: HT rate where it shouldn't be (0x%x)\n",
1187 		    __func__, rt->info[cix].rateCode);
1188 		return (-1);
1189 	}
1190 
1191 	/*
1192 	 * Compute the transmit duration based on the frame
1193 	 * size and the size of an ACK frame.  We call into the
1194 	 * HAL to do the computation since it depends on the
1195 	 * characteristics of the actual PHY being used.
1196 	 *
1197 	 * NB: CTS is assumed the same size as an ACK so we can
1198 	 *     use the precalculated ACK durations.
1199 	 */
1200 	if (shortPreamble) {
1201 		if (flags & HAL_TXDESC_RTSENA)		/* SIFS + CTS */
1202 			ctsduration += rt->info[cix].spAckDuration;
1203 		ctsduration += ath_hal_computetxtime(ah,
1204 			rt, pktlen, rix, AH_TRUE);
1205 		if ((flags & HAL_TXDESC_NOACK) == 0)	/* SIFS + ACK */
1206 			ctsduration += rt->info[rix].spAckDuration;
1207 	} else {
1208 		if (flags & HAL_TXDESC_RTSENA)		/* SIFS + CTS */
1209 			ctsduration += rt->info[cix].lpAckDuration;
1210 		ctsduration += ath_hal_computetxtime(ah,
1211 			rt, pktlen, rix, AH_FALSE);
1212 		if ((flags & HAL_TXDESC_NOACK) == 0)	/* SIFS + ACK */
1213 			ctsduration += rt->info[rix].lpAckDuration;
1214 	}
1215 
1216 	return (ctsduration);
1217 }
1218 
1219 /*
1220  * Update the given ath_buf with updated rts/cts setup and duration
1221  * values.
1222  *
1223  * To support rate lookups for each software retry, the rts/cts rate
1224  * and cts duration must be re-calculated.
1225  *
1226  * This function assumes the RTS/CTS flags have been set as needed;
1227  * mrr has been disabled; and the rate control lookup has been done.
1228  *
1229  * XXX TODO: MRR need only be disabled for the pre-11n NICs.
1230  * XXX The 11n NICs support per-rate RTS/CTS configuration.
1231  */
1232 static void
1233 ath_tx_set_rtscts(struct ath_softc *sc, struct ath_buf *bf)
1234 {
1235 	uint16_t ctsduration = 0;
1236 	uint8_t ctsrate = 0;
1237 	uint8_t rix = bf->bf_state.bfs_rc[0].rix;
1238 	uint8_t cix = 0;
1239 	const HAL_RATE_TABLE *rt = sc->sc_currates;
1240 
1241 	/*
1242 	 * No RTS/CTS enabled? Don't bother.
1243 	 */
1244 	if ((bf->bf_state.bfs_txflags &
1245 	    (HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA)) == 0) {
1246 		/* XXX is this really needed? */
1247 		bf->bf_state.bfs_ctsrate = 0;
1248 		bf->bf_state.bfs_ctsduration = 0;
1249 		return;
1250 	}
1251 
1252 	/*
1253 	 * If protection is enabled, use the protection rix control
1254 	 * rate. Otherwise use the rate0 control rate.
1255 	 */
1256 	if (bf->bf_state.bfs_doprot)
1257 		rix = sc->sc_protrix;
1258 	else
1259 		rix = bf->bf_state.bfs_rc[0].rix;
1260 
1261 	/*
1262 	 * If the raw path has hard-coded ctsrate0 to something,
1263 	 * use it.
1264 	 */
1265 	if (bf->bf_state.bfs_ctsrate0 != 0)
1266 		cix = ath_tx_findrix(sc, bf->bf_state.bfs_ctsrate0);
1267 	else
1268 		/* Control rate from above */
1269 		cix = rt->info[rix].controlRate;
1270 
1271 	/* Calculate the rtscts rate for the given cix */
1272 	ctsrate = ath_tx_get_rtscts_rate(sc->sc_ah, rt, cix,
1273 	    bf->bf_state.bfs_shpream);
1274 
1275 	/* The 11n chipsets do ctsduration calculations for you */
1276 	if (! ath_tx_is_11n(sc))
1277 		ctsduration = ath_tx_calc_ctsduration(sc->sc_ah, rix, cix,
1278 		    bf->bf_state.bfs_shpream, bf->bf_state.bfs_pktlen,
1279 		    rt, bf->bf_state.bfs_txflags);
1280 
1281 	/* Squirrel away in ath_buf */
1282 	bf->bf_state.bfs_ctsrate = ctsrate;
1283 	bf->bf_state.bfs_ctsduration = ctsduration;
1284 
1285 	/*
1286 	 * Must disable multi-rate retry when using RTS/CTS.
1287 	 */
1288 	if (!sc->sc_mrrprot) {
1289 		bf->bf_state.bfs_ismrr = 0;
1290 		bf->bf_state.bfs_try0 =
1291 		    bf->bf_state.bfs_rc[0].tries = ATH_TXMGTTRY; /* XXX ew */
1292 	}
1293 }
1294 
1295 /*
1296  * Setup the descriptor chain for a normal or fast-frame
1297  * frame.
1298  *
1299  * XXX TODO: extend to include the destination hardware QCU ID.
1300  * Make sure that is correct.  Make sure that when being added
1301  * to the mcastq, the CABQ QCUID is set or things will get a bit
1302  * odd.
1303  */
1304 static void
1305 ath_tx_setds(struct ath_softc *sc, struct ath_buf *bf)
1306 {
1307 	struct ath_desc *ds = bf->bf_desc;
1308 	struct ath_hal *ah = sc->sc_ah;
1309 
1310 	if (bf->bf_state.bfs_txrate0 == 0)
1311 		DPRINTF(sc, ATH_DEBUG_XMIT,
1312 		    "%s: bf=%p, txrate0=%d\n", __func__, bf, 0);
1313 
1314 	ath_hal_setuptxdesc(ah, ds
1315 		, bf->bf_state.bfs_pktlen	/* packet length */
1316 		, bf->bf_state.bfs_hdrlen	/* header length */
1317 		, bf->bf_state.bfs_atype	/* Atheros packet type */
1318 		, bf->bf_state.bfs_txpower	/* txpower */
1319 		, bf->bf_state.bfs_txrate0
1320 		, bf->bf_state.bfs_try0		/* series 0 rate/tries */
1321 		, bf->bf_state.bfs_keyix	/* key cache index */
1322 		, bf->bf_state.bfs_txantenna	/* antenna mode */
1323 		, bf->bf_state.bfs_txflags	/* flags */
1324 		, bf->bf_state.bfs_ctsrate	/* rts/cts rate */
1325 		, bf->bf_state.bfs_ctsduration	/* rts/cts duration */
1326 	);
1327 
1328 	/*
1329 	 * This will be overriden when the descriptor chain is written.
1330 	 */
1331 	bf->bf_lastds = ds;
1332 	bf->bf_last = bf;
1333 
1334 	/* Set rate control and descriptor chain for this frame */
1335 	ath_tx_set_ratectrl(sc, bf->bf_node, bf);
1336 	ath_tx_chaindesclist(sc, ds, bf, 0, 0, 0);
1337 }
1338 
1339 /*
1340  * Do a rate lookup.
1341  *
1342  * This performs a rate lookup for the given ath_buf only if it's required.
1343  * Non-data frames and raw frames don't require it.
1344  *
1345  * This populates the primary and MRR entries; MRR values are
1346  * then disabled later on if something requires it (eg RTS/CTS on
1347  * pre-11n chipsets.
1348  *
1349  * This needs to be done before the RTS/CTS fields are calculated
1350  * as they may depend upon the rate chosen.
1351  */
1352 static void
1353 ath_tx_do_ratelookup(struct ath_softc *sc, struct ath_buf *bf)
1354 {
1355 	uint8_t rate, rix;
1356 	int try0;
1357 
1358 	if (! bf->bf_state.bfs_doratelookup)
1359 		return;
1360 
1361 	/* Get rid of any previous state */
1362 	bzero(bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc));
1363 
1364 	ATH_NODE_LOCK(ATH_NODE(bf->bf_node));
1365 	ath_rate_findrate(sc, ATH_NODE(bf->bf_node), bf->bf_state.bfs_shpream,
1366 	    bf->bf_state.bfs_pktlen, &rix, &try0, &rate);
1367 
1368 	/* In case MRR is disabled, make sure rc[0] is setup correctly */
1369 	bf->bf_state.bfs_rc[0].rix = rix;
1370 	bf->bf_state.bfs_rc[0].ratecode = rate;
1371 	bf->bf_state.bfs_rc[0].tries = try0;
1372 
1373 	if (bf->bf_state.bfs_ismrr && try0 != ATH_TXMAXTRY)
1374 		ath_rate_getxtxrates(sc, ATH_NODE(bf->bf_node), rix,
1375 		    bf->bf_state.bfs_rc);
1376 	ATH_NODE_UNLOCK(ATH_NODE(bf->bf_node));
1377 
1378 	sc->sc_txrix = rix;	/* for LED blinking */
1379 	sc->sc_lastdatarix = rix;	/* for fast frames */
1380 	bf->bf_state.bfs_try0 = try0;
1381 	bf->bf_state.bfs_txrate0 = rate;
1382 }
1383 
1384 /*
1385  * Update the CLRDMASK bit in the ath_buf if it needs to be set.
1386  */
1387 static void
1388 ath_tx_update_clrdmask(struct ath_softc *sc, struct ath_tid *tid,
1389     struct ath_buf *bf)
1390 {
1391 	struct ath_node *an = ATH_NODE(bf->bf_node);
1392 
1393 	ATH_TX_LOCK_ASSERT(sc);
1394 
1395 	if (an->clrdmask == 1) {
1396 		bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
1397 		an->clrdmask = 0;
1398 	}
1399 }
1400 
1401 /*
1402  * Return whether this frame should be software queued or
1403  * direct dispatched.
1404  *
1405  * When doing powersave, BAR frames should be queued but other management
1406  * frames should be directly sent.
1407  *
1408  * When not doing powersave, stick BAR frames into the hardware queue
1409  * so it goes out even though the queue is paused.
1410  *
1411  * For now, management frames are also software queued by default.
1412  */
1413 static int
1414 ath_tx_should_swq_frame(struct ath_softc *sc, struct ath_node *an,
1415     struct mbuf *m0, int *queue_to_head)
1416 {
1417 	struct ieee80211_node *ni = &an->an_node;
1418 	struct ieee80211_frame *wh;
1419 	uint8_t type, subtype;
1420 
1421 	wh = mtod(m0, struct ieee80211_frame *);
1422 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
1423 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
1424 
1425 	(*queue_to_head) = 0;
1426 
1427 	/* If it's not in powersave - direct-dispatch BAR */
1428 	if ((ATH_NODE(ni)->an_is_powersave == 0)
1429 	    && type == IEEE80211_FC0_TYPE_CTL &&
1430 	    subtype == IEEE80211_FC0_SUBTYPE_BAR) {
1431 		DPRINTF(sc, ATH_DEBUG_SW_TX,
1432 		    "%s: BAR: TX'ing direct\n", __func__);
1433 		return (0);
1434 	} else if ((ATH_NODE(ni)->an_is_powersave == 1)
1435 	    && type == IEEE80211_FC0_TYPE_CTL &&
1436 	    subtype == IEEE80211_FC0_SUBTYPE_BAR) {
1437 		/* BAR TX whilst asleep; queue */
1438 		DPRINTF(sc, ATH_DEBUG_SW_TX,
1439 		    "%s: swq: TX'ing\n", __func__);
1440 		(*queue_to_head) = 1;
1441 		return (1);
1442 	} else if ((ATH_NODE(ni)->an_is_powersave == 1)
1443 	    && (type == IEEE80211_FC0_TYPE_MGT ||
1444 	        type == IEEE80211_FC0_TYPE_CTL)) {
1445 		/*
1446 		 * Other control/mgmt frame; bypass software queuing
1447 		 * for now!
1448 		 */
1449 		DPRINTF(sc, ATH_DEBUG_XMIT,
1450 		    "%s: %6D: Node is asleep; sending mgmt "
1451 		    "(type=%d, subtype=%d)\n",
1452 		    __func__, ni->ni_macaddr, ":", type, subtype);
1453 		return (0);
1454 	} else {
1455 		return (1);
1456 	}
1457 }
1458 
1459 
1460 /*
1461  * Transmit the given frame to the hardware.
1462  *
1463  * The frame must already be setup; rate control must already have
1464  * been done.
1465  *
1466  * XXX since the TXQ lock is being held here (and I dislike holding
1467  * it for this long when not doing software aggregation), later on
1468  * break this function into "setup_normal" and "xmit_normal". The
1469  * lock only needs to be held for the ath_tx_handoff call.
1470  *
1471  * XXX we don't update the leak count here - if we're doing
1472  * direct frame dispatch, we need to be able to do it without
1473  * decrementing the leak count (eg multicast queue frames.)
1474  */
1475 static void
1476 ath_tx_xmit_normal(struct ath_softc *sc, struct ath_txq *txq,
1477     struct ath_buf *bf)
1478 {
1479 	struct ath_node *an = ATH_NODE(bf->bf_node);
1480 	struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid];
1481 
1482 	ATH_TX_LOCK_ASSERT(sc);
1483 
1484 	/*
1485 	 * For now, just enable CLRDMASK. ath_tx_xmit_normal() does
1486 	 * set a completion handler however it doesn't (yet) properly
1487 	 * handle the strict ordering requirements needed for normal,
1488 	 * non-aggregate session frames.
1489 	 *
1490 	 * Once this is implemented, only set CLRDMASK like this for
1491 	 * frames that must go out - eg management/raw frames.
1492 	 */
1493 	bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
1494 
1495 	/* Setup the descriptor before handoff */
1496 	ath_tx_do_ratelookup(sc, bf);
1497 	ath_tx_calc_duration(sc, bf);
1498 	ath_tx_calc_protection(sc, bf);
1499 	ath_tx_set_rtscts(sc, bf);
1500 	ath_tx_rate_fill_rcflags(sc, bf);
1501 	ath_tx_setds(sc, bf);
1502 
1503 	/* Track per-TID hardware queue depth correctly */
1504 	tid->hwq_depth++;
1505 
1506 	/* Assign the completion handler */
1507 	bf->bf_comp = ath_tx_normal_comp;
1508 
1509 	/* Hand off to hardware */
1510 	ath_tx_handoff(sc, txq, bf);
1511 }
1512 
1513 /*
1514  * Do the basic frame setup stuff that's required before the frame
1515  * is added to a software queue.
1516  *
1517  * All frames get mostly the same treatment and it's done once.
1518  * Retransmits fiddle with things like the rate control setup,
1519  * setting the retransmit bit in the packet; doing relevant DMA/bus
1520  * syncing and relinking it (back) into the hardware TX queue.
1521  *
1522  * Note that this may cause the mbuf to be reallocated, so
1523  * m0 may not be valid.
1524  */
1525 static int
1526 ath_tx_normal_setup(struct ath_softc *sc, struct ieee80211_node *ni,
1527     struct ath_buf *bf, struct mbuf *m0, struct ath_txq *txq)
1528 {
1529 	struct ieee80211vap *vap = ni->ni_vap;
1530 	struct ath_hal *ah = sc->sc_ah;
1531 	struct ieee80211com *ic = &sc->sc_ic;
1532 	const struct chanAccParams *cap = &ic->ic_wme.wme_chanParams;
1533 	int error, iswep, ismcast, isfrag, ismrr;
1534 	int keyix, hdrlen, pktlen, try0 = 0;
1535 	u_int8_t rix = 0, txrate = 0;
1536 	struct ath_desc *ds;
1537 	struct ieee80211_frame *wh;
1538 	u_int subtype, flags;
1539 	HAL_PKT_TYPE atype;
1540 	const HAL_RATE_TABLE *rt;
1541 	HAL_BOOL shortPreamble;
1542 	struct ath_node *an;
1543 	u_int pri;
1544 
1545 	/*
1546 	 * To ensure that both sequence numbers and the CCMP PN handling
1547 	 * is "correct", make sure that the relevant TID queue is locked.
1548 	 * Otherwise the CCMP PN and seqno may appear out of order, causing
1549 	 * re-ordered frames to have out of order CCMP PN's, resulting
1550 	 * in many, many frame drops.
1551 	 */
1552 	ATH_TX_LOCK_ASSERT(sc);
1553 
1554 	wh = mtod(m0, struct ieee80211_frame *);
1555 	iswep = wh->i_fc[1] & IEEE80211_FC1_PROTECTED;
1556 	ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
1557 	isfrag = m0->m_flags & M_FRAG;
1558 	hdrlen = ieee80211_anyhdrsize(wh);
1559 	/*
1560 	 * Packet length must not include any
1561 	 * pad bytes; deduct them here.
1562 	 */
1563 	pktlen = m0->m_pkthdr.len - (hdrlen & 3);
1564 
1565 	/* Handle encryption twiddling if needed */
1566 	if (! ath_tx_tag_crypto(sc, ni, m0, iswep, isfrag, &hdrlen,
1567 	    &pktlen, &keyix)) {
1568 		ieee80211_free_mbuf(m0);
1569 		return EIO;
1570 	}
1571 
1572 	/* packet header may have moved, reset our local pointer */
1573 	wh = mtod(m0, struct ieee80211_frame *);
1574 
1575 	pktlen += IEEE80211_CRC_LEN;
1576 
1577 	/*
1578 	 * Load the DMA map so any coalescing is done.  This
1579 	 * also calculates the number of descriptors we need.
1580 	 */
1581 	error = ath_tx_dmasetup(sc, bf, m0);
1582 	if (error != 0)
1583 		return error;
1584 	KASSERT((ni != NULL), ("%s: ni=NULL!", __func__));
1585 	bf->bf_node = ni;			/* NB: held reference */
1586 	m0 = bf->bf_m;				/* NB: may have changed */
1587 	wh = mtod(m0, struct ieee80211_frame *);
1588 
1589 	/* setup descriptors */
1590 	ds = bf->bf_desc;
1591 	rt = sc->sc_currates;
1592 	KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
1593 
1594 	/*
1595 	 * NB: the 802.11 layer marks whether or not we should
1596 	 * use short preamble based on the current mode and
1597 	 * negotiated parameters.
1598 	 */
1599 	if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) &&
1600 	    (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) {
1601 		shortPreamble = AH_TRUE;
1602 		sc->sc_stats.ast_tx_shortpre++;
1603 	} else {
1604 		shortPreamble = AH_FALSE;
1605 	}
1606 
1607 	an = ATH_NODE(ni);
1608 	//flags = HAL_TXDESC_CLRDMASK;		/* XXX needed for crypto errs */
1609 	flags = 0;
1610 	ismrr = 0;				/* default no multi-rate retry*/
1611 	pri = M_WME_GETAC(m0);			/* honor classification */
1612 	/* XXX use txparams instead of fixed values */
1613 	/*
1614 	 * Calculate Atheros packet type from IEEE80211 packet header,
1615 	 * setup for rate calculations, and select h/w transmit queue.
1616 	 */
1617 	switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
1618 	case IEEE80211_FC0_TYPE_MGT:
1619 		subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
1620 		if (subtype == IEEE80211_FC0_SUBTYPE_BEACON)
1621 			atype = HAL_PKT_TYPE_BEACON;
1622 		else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
1623 			atype = HAL_PKT_TYPE_PROBE_RESP;
1624 		else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM)
1625 			atype = HAL_PKT_TYPE_ATIM;
1626 		else
1627 			atype = HAL_PKT_TYPE_NORMAL;	/* XXX */
1628 		rix = an->an_mgmtrix;
1629 		txrate = rt->info[rix].rateCode;
1630 		if (shortPreamble)
1631 			txrate |= rt->info[rix].shortPreamble;
1632 		try0 = ATH_TXMGTTRY;
1633 		flags |= HAL_TXDESC_INTREQ;	/* force interrupt */
1634 		break;
1635 	case IEEE80211_FC0_TYPE_CTL:
1636 		atype = HAL_PKT_TYPE_PSPOLL;	/* stop setting of duration */
1637 		rix = an->an_mgmtrix;
1638 		txrate = rt->info[rix].rateCode;
1639 		if (shortPreamble)
1640 			txrate |= rt->info[rix].shortPreamble;
1641 		try0 = ATH_TXMGTTRY;
1642 		flags |= HAL_TXDESC_INTREQ;	/* force interrupt */
1643 		break;
1644 	case IEEE80211_FC0_TYPE_DATA:
1645 		atype = HAL_PKT_TYPE_NORMAL;		/* default */
1646 		/*
1647 		 * Data frames: multicast frames go out at a fixed rate,
1648 		 * EAPOL frames use the mgmt frame rate; otherwise consult
1649 		 * the rate control module for the rate to use.
1650 		 */
1651 		if (ismcast) {
1652 			rix = an->an_mcastrix;
1653 			txrate = rt->info[rix].rateCode;
1654 			if (shortPreamble)
1655 				txrate |= rt->info[rix].shortPreamble;
1656 			try0 = 1;
1657 		} else if (m0->m_flags & M_EAPOL) {
1658 			/* XXX? maybe always use long preamble? */
1659 			rix = an->an_mgmtrix;
1660 			txrate = rt->info[rix].rateCode;
1661 			if (shortPreamble)
1662 				txrate |= rt->info[rix].shortPreamble;
1663 			try0 = ATH_TXMAXTRY;	/* XXX?too many? */
1664 		} else {
1665 			/*
1666 			 * Do rate lookup on each TX, rather than using
1667 			 * the hard-coded TX information decided here.
1668 			 */
1669 			ismrr = 1;
1670 			bf->bf_state.bfs_doratelookup = 1;
1671 		}
1672 		if (cap->cap_wmeParams[pri].wmep_noackPolicy)
1673 			flags |= HAL_TXDESC_NOACK;
1674 		break;
1675 	default:
1676 		device_printf(sc->sc_dev, "bogus frame type 0x%x (%s)\n",
1677 		    wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__);
1678 		/* XXX statistic */
1679 		/* XXX free tx dmamap */
1680 		ieee80211_free_mbuf(m0);
1681 		return EIO;
1682 	}
1683 
1684 	/*
1685 	 * There are two known scenarios where the frame AC doesn't match
1686 	 * what the destination TXQ is.
1687 	 *
1688 	 * + non-QoS frames (eg management?) that the net80211 stack has
1689 	 *   assigned a higher AC to, but since it's a non-QoS TID, it's
1690 	 *   being thrown into TID 16.  TID 16 gets the AC_BE queue.
1691 	 *   It's quite possible that management frames should just be
1692 	 *   direct dispatched to hardware rather than go via the software
1693 	 *   queue; that should be investigated in the future.  There are
1694 	 *   some specific scenarios where this doesn't make sense, mostly
1695 	 *   surrounding ADDBA request/response - hence why that is special
1696 	 *   cased.
1697 	 *
1698 	 * + Multicast frames going into the VAP mcast queue.  That shows up
1699 	 *   as "TXQ 11".
1700 	 *
1701 	 * This driver should eventually support separate TID and TXQ locking,
1702 	 * allowing for arbitrary AC frames to appear on arbitrary software
1703 	 * queues, being queued to the "correct" hardware queue when needed.
1704 	 */
1705 #if 0
1706 	if (txq != sc->sc_ac2q[pri]) {
1707 		DPRINTF(sc, ATH_DEBUG_XMIT,
1708 		    "%s: txq=%p (%d), pri=%d, pri txq=%p (%d)\n",
1709 		    __func__,
1710 		    txq,
1711 		    txq->axq_qnum,
1712 		    pri,
1713 		    sc->sc_ac2q[pri],
1714 		    sc->sc_ac2q[pri]->axq_qnum);
1715 	}
1716 #endif
1717 
1718 	/*
1719 	 * Calculate miscellaneous flags.
1720 	 */
1721 	if (ismcast) {
1722 		flags |= HAL_TXDESC_NOACK;	/* no ack on broad/multicast */
1723 	} else if (pktlen > vap->iv_rtsthreshold &&
1724 	    (ni->ni_ath_flags & IEEE80211_NODE_FF) == 0) {
1725 		flags |= HAL_TXDESC_RTSENA;	/* RTS based on frame length */
1726 		sc->sc_stats.ast_tx_rts++;
1727 	}
1728 	if (flags & HAL_TXDESC_NOACK)		/* NB: avoid double counting */
1729 		sc->sc_stats.ast_tx_noack++;
1730 #ifdef IEEE80211_SUPPORT_TDMA
1731 	if (sc->sc_tdma && (flags & HAL_TXDESC_NOACK) == 0) {
1732 		DPRINTF(sc, ATH_DEBUG_TDMA,
1733 		    "%s: discard frame, ACK required w/ TDMA\n", __func__);
1734 		sc->sc_stats.ast_tdma_ack++;
1735 		/* XXX free tx dmamap */
1736 		ieee80211_free_mbuf(m0);
1737 		return EIO;
1738 	}
1739 #endif
1740 
1741 #if 0
1742 	/*
1743 	 * Placeholder: if you want to transmit with the azimuth
1744 	 * timestamp in the end of the payload, here's where you
1745 	 * should set the TXDESC field.
1746 	 */
1747 	flags |= HAL_TXDESC_HWTS;
1748 #endif
1749 
1750 	/*
1751 	 * Determine if a tx interrupt should be generated for
1752 	 * this descriptor.  We take a tx interrupt to reap
1753 	 * descriptors when the h/w hits an EOL condition or
1754 	 * when the descriptor is specifically marked to generate
1755 	 * an interrupt.  We periodically mark descriptors in this
1756 	 * way to insure timely replenishing of the supply needed
1757 	 * for sending frames.  Defering interrupts reduces system
1758 	 * load and potentially allows more concurrent work to be
1759 	 * done but if done to aggressively can cause senders to
1760 	 * backup.
1761 	 *
1762 	 * NB: use >= to deal with sc_txintrperiod changing
1763 	 *     dynamically through sysctl.
1764 	 */
1765 	if (flags & HAL_TXDESC_INTREQ) {
1766 		txq->axq_intrcnt = 0;
1767 	} else if (++txq->axq_intrcnt >= sc->sc_txintrperiod) {
1768 		flags |= HAL_TXDESC_INTREQ;
1769 		txq->axq_intrcnt = 0;
1770 	}
1771 
1772 	/* This point forward is actual TX bits */
1773 
1774 	/*
1775 	 * At this point we are committed to sending the frame
1776 	 * and we don't need to look at m_nextpkt; clear it in
1777 	 * case this frame is part of frag chain.
1778 	 */
1779 	m0->m_nextpkt = NULL;
1780 
1781 	if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT))
1782 		ieee80211_dump_pkt(ic, mtod(m0, const uint8_t *), m0->m_len,
1783 		    sc->sc_hwmap[rix].ieeerate, -1);
1784 
1785 	if (ieee80211_radiotap_active_vap(vap)) {
1786 		u_int64_t tsf = ath_hal_gettsf64(ah);
1787 
1788 		sc->sc_tx_th.wt_tsf = htole64(tsf);
1789 		sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags;
1790 		if (iswep)
1791 			sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
1792 		if (isfrag)
1793 			sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG;
1794 		sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate;
1795 		sc->sc_tx_th.wt_txpower = ieee80211_get_node_txpower(ni);
1796 		sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
1797 
1798 		ieee80211_radiotap_tx(vap, m0);
1799 	}
1800 
1801 	/* Blank the legacy rate array */
1802 	bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc));
1803 
1804 	/*
1805 	 * ath_buf_set_rate needs at least one rate/try to setup
1806 	 * the rate scenario.
1807 	 */
1808 	bf->bf_state.bfs_rc[0].rix = rix;
1809 	bf->bf_state.bfs_rc[0].tries = try0;
1810 	bf->bf_state.bfs_rc[0].ratecode = txrate;
1811 
1812 	/* Store the decided rate index values away */
1813 	bf->bf_state.bfs_pktlen = pktlen;
1814 	bf->bf_state.bfs_hdrlen = hdrlen;
1815 	bf->bf_state.bfs_atype = atype;
1816 	bf->bf_state.bfs_txpower = ieee80211_get_node_txpower(ni);
1817 	bf->bf_state.bfs_txrate0 = txrate;
1818 	bf->bf_state.bfs_try0 = try0;
1819 	bf->bf_state.bfs_keyix = keyix;
1820 	bf->bf_state.bfs_txantenna = sc->sc_txantenna;
1821 	bf->bf_state.bfs_txflags = flags;
1822 	bf->bf_state.bfs_shpream = shortPreamble;
1823 
1824 	/* XXX this should be done in ath_tx_setrate() */
1825 	bf->bf_state.bfs_ctsrate0 = 0;	/* ie, no hard-coded ctsrate */
1826 	bf->bf_state.bfs_ctsrate = 0;	/* calculated later */
1827 	bf->bf_state.bfs_ctsduration = 0;
1828 	bf->bf_state.bfs_ismrr = ismrr;
1829 
1830 	return 0;
1831 }
1832 
1833 /*
1834  * Queue a frame to the hardware or software queue.
1835  *
1836  * This can be called by the net80211 code.
1837  *
1838  * XXX what about locking? Or, push the seqno assign into the
1839  * XXX aggregate scheduler so its serialised?
1840  *
1841  * XXX When sending management frames via ath_raw_xmit(),
1842  *     should CLRDMASK be set unconditionally?
1843  */
1844 int
1845 ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni,
1846     struct ath_buf *bf, struct mbuf *m0)
1847 {
1848 	struct ieee80211vap *vap = ni->ni_vap;
1849 	struct ath_vap *avp = ATH_VAP(vap);
1850 	int r = 0;
1851 	u_int pri;
1852 	int tid;
1853 	struct ath_txq *txq;
1854 	int ismcast;
1855 	const struct ieee80211_frame *wh;
1856 	int is_ampdu, is_ampdu_tx, is_ampdu_pending;
1857 	ieee80211_seq seqno;
1858 	uint8_t type, subtype;
1859 	int queue_to_head;
1860 
1861 	ATH_TX_LOCK_ASSERT(sc);
1862 
1863 	/*
1864 	 * Determine the target hardware queue.
1865 	 *
1866 	 * For multicast frames, the txq gets overridden appropriately
1867 	 * depending upon the state of PS.
1868 	 *
1869 	 * For any other frame, we do a TID/QoS lookup inside the frame
1870 	 * to see what the TID should be. If it's a non-QoS frame, the
1871 	 * AC and TID are overridden. The TID/TXQ code assumes the
1872 	 * TID is on a predictable hardware TXQ, so we don't support
1873 	 * having a node TID queued to multiple hardware TXQs.
1874 	 * This may change in the future but would require some locking
1875 	 * fudgery.
1876 	 */
1877 	pri = ath_tx_getac(sc, m0);
1878 	tid = ath_tx_gettid(sc, m0);
1879 
1880 	txq = sc->sc_ac2q[pri];
1881 	wh = mtod(m0, struct ieee80211_frame *);
1882 	ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
1883 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
1884 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
1885 
1886 	/*
1887 	 * Enforce how deep the multicast queue can grow.
1888 	 *
1889 	 * XXX duplicated in ath_raw_xmit().
1890 	 */
1891 	if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
1892 		if (sc->sc_cabq->axq_depth + sc->sc_cabq->fifo.axq_depth
1893 		    > sc->sc_txq_mcastq_maxdepth) {
1894 			sc->sc_stats.ast_tx_mcastq_overflow++;
1895 			m_freem(m0);
1896 			return (ENOBUFS);
1897 		}
1898 	}
1899 
1900 	/*
1901 	 * Enforce how deep the unicast queue can grow.
1902 	 *
1903 	 * If the node is in power save then we don't want
1904 	 * the software queue to grow too deep, or a node may
1905 	 * end up consuming all of the ath_buf entries.
1906 	 *
1907 	 * For now, only do this for DATA frames.
1908 	 *
1909 	 * We will want to cap how many management/control
1910 	 * frames get punted to the software queue so it doesn't
1911 	 * fill up.  But the correct solution isn't yet obvious.
1912 	 * In any case, this check should at least let frames pass
1913 	 * that we are direct-dispatching.
1914 	 *
1915 	 * XXX TODO: duplicate this to the raw xmit path!
1916 	 */
1917 	if (type == IEEE80211_FC0_TYPE_DATA &&
1918 	    ATH_NODE(ni)->an_is_powersave &&
1919 	    ATH_NODE(ni)->an_swq_depth >
1920 	     sc->sc_txq_node_psq_maxdepth) {
1921 		sc->sc_stats.ast_tx_node_psq_overflow++;
1922 		m_freem(m0);
1923 		return (ENOBUFS);
1924 	}
1925 
1926 	/* A-MPDU TX */
1927 	is_ampdu_tx = ath_tx_ampdu_running(sc, ATH_NODE(ni), tid);
1928 	is_ampdu_pending = ath_tx_ampdu_pending(sc, ATH_NODE(ni), tid);
1929 	is_ampdu = is_ampdu_tx | is_ampdu_pending;
1930 
1931 	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ac=%d, is_ampdu=%d\n",
1932 	    __func__, tid, pri, is_ampdu);
1933 
1934 	/* Set local packet state, used to queue packets to hardware */
1935 	bf->bf_state.bfs_tid = tid;
1936 	bf->bf_state.bfs_tx_queue = txq->axq_qnum;
1937 	bf->bf_state.bfs_pri = pri;
1938 
1939 #if 1
1940 	/*
1941 	 * When servicing one or more stations in power-save mode
1942 	 * (or) if there is some mcast data waiting on the mcast
1943 	 * queue (to prevent out of order delivery) multicast frames
1944 	 * must be bufferd until after the beacon.
1945 	 *
1946 	 * TODO: we should lock the mcastq before we check the length.
1947 	 */
1948 	if (sc->sc_cabq_enable && ismcast && (vap->iv_ps_sta || avp->av_mcastq.axq_depth)) {
1949 		txq = &avp->av_mcastq;
1950 		/*
1951 		 * Mark the frame as eventually belonging on the CAB
1952 		 * queue, so the descriptor setup functions will
1953 		 * correctly initialise the descriptor 'qcuId' field.
1954 		 */
1955 		bf->bf_state.bfs_tx_queue = sc->sc_cabq->axq_qnum;
1956 	}
1957 #endif
1958 
1959 	/* Do the generic frame setup */
1960 	/* XXX should just bzero the bf_state? */
1961 	bf->bf_state.bfs_dobaw = 0;
1962 
1963 	/* A-MPDU TX? Manually set sequence number */
1964 	/*
1965 	 * Don't do it whilst pending; the net80211 layer still
1966 	 * assigns them.
1967 	 */
1968 	if (is_ampdu_tx) {
1969 		/*
1970 		 * Always call; this function will
1971 		 * handle making sure that null data frames
1972 		 * don't get a sequence number from the current
1973 		 * TID and thus mess with the BAW.
1974 		 */
1975 		seqno = ath_tx_tid_seqno_assign(sc, ni, bf, m0);
1976 
1977 		/*
1978 		 * Don't add QoS NULL frames to the BAW.
1979 		 */
1980 		if (IEEE80211_QOS_HAS_SEQ(wh) &&
1981 		    subtype != IEEE80211_FC0_SUBTYPE_QOS_NULL) {
1982 			bf->bf_state.bfs_dobaw = 1;
1983 		}
1984 	}
1985 
1986 	/*
1987 	 * If needed, the sequence number has been assigned.
1988 	 * Squirrel it away somewhere easy to get to.
1989 	 */
1990 	bf->bf_state.bfs_seqno = M_SEQNO_GET(m0) << IEEE80211_SEQ_SEQ_SHIFT;
1991 
1992 	/* Is ampdu pending? fetch the seqno and print it out */
1993 	if (is_ampdu_pending)
1994 		DPRINTF(sc, ATH_DEBUG_SW_TX,
1995 		    "%s: tid %d: ampdu pending, seqno %d\n",
1996 		    __func__, tid, M_SEQNO_GET(m0));
1997 
1998 	/* This also sets up the DMA map */
1999 	r = ath_tx_normal_setup(sc, ni, bf, m0, txq);
2000 
2001 	if (r != 0)
2002 		goto done;
2003 
2004 	/* At this point m0 could have changed! */
2005 	m0 = bf->bf_m;
2006 
2007 #if 1
2008 	/*
2009 	 * If it's a multicast frame, do a direct-dispatch to the
2010 	 * destination hardware queue. Don't bother software
2011 	 * queuing it.
2012 	 */
2013 	/*
2014 	 * If it's a BAR frame, do a direct dispatch to the
2015 	 * destination hardware queue. Don't bother software
2016 	 * queuing it, as the TID will now be paused.
2017 	 * Sending a BAR frame can occur from the net80211 txa timer
2018 	 * (ie, retries) or from the ath txtask (completion call.)
2019 	 * It queues directly to hardware because the TID is paused
2020 	 * at this point (and won't be unpaused until the BAR has
2021 	 * either been TXed successfully or max retries has been
2022 	 * reached.)
2023 	 */
2024 	/*
2025 	 * Until things are better debugged - if this node is asleep
2026 	 * and we're sending it a non-BAR frame, direct dispatch it.
2027 	 * Why? Because we need to figure out what's actually being
2028 	 * sent - eg, during reassociation/reauthentication after
2029 	 * the node (last) disappeared whilst asleep, the driver should
2030 	 * have unpaused/unsleep'ed the node.  So until that is
2031 	 * sorted out, use this workaround.
2032 	 */
2033 	if (txq == &avp->av_mcastq) {
2034 		DPRINTF(sc, ATH_DEBUG_SW_TX,
2035 		    "%s: bf=%p: mcastq: TX'ing\n", __func__, bf);
2036 		bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2037 		ath_tx_xmit_normal(sc, txq, bf);
2038 	} else if (ath_tx_should_swq_frame(sc, ATH_NODE(ni), m0,
2039 	    &queue_to_head)) {
2040 		ath_tx_swq(sc, ni, txq, queue_to_head, bf);
2041 	} else {
2042 		bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2043 		ath_tx_xmit_normal(sc, txq, bf);
2044 	}
2045 #else
2046 	/*
2047 	 * For now, since there's no software queue,
2048 	 * direct-dispatch to the hardware.
2049 	 */
2050 	bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2051 	/*
2052 	 * Update the current leak count if
2053 	 * we're leaking frames; and set the
2054 	 * MORE flag as appropriate.
2055 	 */
2056 	ath_tx_leak_count_update(sc, tid, bf);
2057 	ath_tx_xmit_normal(sc, txq, bf);
2058 #endif
2059 done:
2060 	return 0;
2061 }
2062 
2063 static int
2064 ath_tx_raw_start(struct ath_softc *sc, struct ieee80211_node *ni,
2065 	struct ath_buf *bf, struct mbuf *m0,
2066 	const struct ieee80211_bpf_params *params)
2067 {
2068 	struct ieee80211com *ic = &sc->sc_ic;
2069 	struct ath_hal *ah = sc->sc_ah;
2070 	struct ieee80211vap *vap = ni->ni_vap;
2071 	int error, ismcast, ismrr;
2072 	int keyix, hdrlen, pktlen, try0, txantenna;
2073 	u_int8_t rix, txrate;
2074 	struct ieee80211_frame *wh;
2075 	u_int flags;
2076 	HAL_PKT_TYPE atype;
2077 	const HAL_RATE_TABLE *rt;
2078 	struct ath_desc *ds;
2079 	u_int pri;
2080 	int o_tid = -1;
2081 	int do_override;
2082 	uint8_t type, subtype;
2083 	int queue_to_head;
2084 	struct ath_node *an = ATH_NODE(ni);
2085 
2086 	ATH_TX_LOCK_ASSERT(sc);
2087 
2088 	wh = mtod(m0, struct ieee80211_frame *);
2089 	ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
2090 	hdrlen = ieee80211_anyhdrsize(wh);
2091 	/*
2092 	 * Packet length must not include any
2093 	 * pad bytes; deduct them here.
2094 	 */
2095 	/* XXX honor IEEE80211_BPF_DATAPAD */
2096 	pktlen = m0->m_pkthdr.len - (hdrlen & 3) + IEEE80211_CRC_LEN;
2097 
2098 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
2099 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
2100 
2101 	ATH_KTR(sc, ATH_KTR_TX, 2,
2102 	     "ath_tx_raw_start: ni=%p, bf=%p, raw", ni, bf);
2103 
2104 	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: ismcast=%d\n",
2105 	    __func__, ismcast);
2106 
2107 	pri = params->ibp_pri & 3;
2108 	/* Override pri if the frame isn't a QoS one */
2109 	if (! IEEE80211_QOS_HAS_SEQ(wh))
2110 		pri = ath_tx_getac(sc, m0);
2111 
2112 	/* XXX If it's an ADDBA, override the correct queue */
2113 	do_override = ath_tx_action_frame_override_queue(sc, ni, m0, &o_tid);
2114 
2115 	/* Map ADDBA to the correct priority */
2116 	if (do_override) {
2117 #if 0
2118 		DPRINTF(sc, ATH_DEBUG_XMIT,
2119 		    "%s: overriding tid %d pri %d -> %d\n",
2120 		    __func__, o_tid, pri, TID_TO_WME_AC(o_tid));
2121 #endif
2122 		pri = TID_TO_WME_AC(o_tid);
2123 	}
2124 
2125 	/* Handle encryption twiddling if needed */
2126 	if (! ath_tx_tag_crypto(sc, ni,
2127 	    m0, params->ibp_flags & IEEE80211_BPF_CRYPTO, 0,
2128 	    &hdrlen, &pktlen, &keyix)) {
2129 		ieee80211_free_mbuf(m0);
2130 		return EIO;
2131 	}
2132 	/* packet header may have moved, reset our local pointer */
2133 	wh = mtod(m0, struct ieee80211_frame *);
2134 
2135 	/* Do the generic frame setup */
2136 	/* XXX should just bzero the bf_state? */
2137 	bf->bf_state.bfs_dobaw = 0;
2138 
2139 	error = ath_tx_dmasetup(sc, bf, m0);
2140 	if (error != 0)
2141 		return error;
2142 	m0 = bf->bf_m;				/* NB: may have changed */
2143 	wh = mtod(m0, struct ieee80211_frame *);
2144 	KASSERT((ni != NULL), ("%s: ni=NULL!", __func__));
2145 	bf->bf_node = ni;			/* NB: held reference */
2146 
2147 	/* Always enable CLRDMASK for raw frames for now.. */
2148 	flags = HAL_TXDESC_CLRDMASK;		/* XXX needed for crypto errs */
2149 	flags |= HAL_TXDESC_INTREQ;		/* force interrupt */
2150 	if (params->ibp_flags & IEEE80211_BPF_RTS)
2151 		flags |= HAL_TXDESC_RTSENA;
2152 	else if (params->ibp_flags & IEEE80211_BPF_CTS) {
2153 		/* XXX assume 11g/11n protection? */
2154 		bf->bf_state.bfs_doprot = 1;
2155 		flags |= HAL_TXDESC_CTSENA;
2156 	}
2157 	/* XXX leave ismcast to injector? */
2158 	if ((params->ibp_flags & IEEE80211_BPF_NOACK) || ismcast)
2159 		flags |= HAL_TXDESC_NOACK;
2160 
2161 	rt = sc->sc_currates;
2162 	KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
2163 
2164 	/* Fetch first rate information */
2165 	rix = ath_tx_findrix(sc, params->ibp_rate0);
2166 	try0 = params->ibp_try0;
2167 
2168 	/*
2169 	 * Override EAPOL rate as appropriate.
2170 	 */
2171 	if (m0->m_flags & M_EAPOL) {
2172 		/* XXX? maybe always use long preamble? */
2173 		rix = an->an_mgmtrix;
2174 		try0 = ATH_TXMAXTRY;	/* XXX?too many? */
2175 	}
2176 
2177 	txrate = rt->info[rix].rateCode;
2178 	if (params->ibp_flags & IEEE80211_BPF_SHORTPRE)
2179 		txrate |= rt->info[rix].shortPreamble;
2180 	sc->sc_txrix = rix;
2181 	ismrr = (params->ibp_try1 != 0);
2182 	txantenna = params->ibp_pri >> 2;
2183 	if (txantenna == 0)			/* XXX? */
2184 		txantenna = sc->sc_txantenna;
2185 
2186 	/*
2187 	 * Since ctsrate is fixed, store it away for later
2188 	 * use when the descriptor fields are being set.
2189 	 */
2190 	if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA))
2191 		bf->bf_state.bfs_ctsrate0 = params->ibp_ctsrate;
2192 
2193 	/*
2194 	 * NB: we mark all packets as type PSPOLL so the h/w won't
2195 	 * set the sequence number, duration, etc.
2196 	 */
2197 	atype = HAL_PKT_TYPE_PSPOLL;
2198 
2199 	if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT))
2200 		ieee80211_dump_pkt(ic, mtod(m0, caddr_t), m0->m_len,
2201 		    sc->sc_hwmap[rix].ieeerate, -1);
2202 
2203 	if (ieee80211_radiotap_active_vap(vap)) {
2204 		u_int64_t tsf = ath_hal_gettsf64(ah);
2205 
2206 		sc->sc_tx_th.wt_tsf = htole64(tsf);
2207 		sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags;
2208 		if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED)
2209 			sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
2210 		if (m0->m_flags & M_FRAG)
2211 			sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG;
2212 		sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate;
2213 		sc->sc_tx_th.wt_txpower = MIN(params->ibp_power,
2214 		    ieee80211_get_node_txpower(ni));
2215 		sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
2216 
2217 		ieee80211_radiotap_tx(vap, m0);
2218 	}
2219 
2220 	/*
2221 	 * Formulate first tx descriptor with tx controls.
2222 	 */
2223 	ds = bf->bf_desc;
2224 	/* XXX check return value? */
2225 
2226 	/* Store the decided rate index values away */
2227 	bf->bf_state.bfs_pktlen = pktlen;
2228 	bf->bf_state.bfs_hdrlen = hdrlen;
2229 	bf->bf_state.bfs_atype = atype;
2230 	bf->bf_state.bfs_txpower = MIN(params->ibp_power,
2231 	    ieee80211_get_node_txpower(ni));
2232 	bf->bf_state.bfs_txrate0 = txrate;
2233 	bf->bf_state.bfs_try0 = try0;
2234 	bf->bf_state.bfs_keyix = keyix;
2235 	bf->bf_state.bfs_txantenna = txantenna;
2236 	bf->bf_state.bfs_txflags = flags;
2237 	bf->bf_state.bfs_shpream =
2238 	    !! (params->ibp_flags & IEEE80211_BPF_SHORTPRE);
2239 
2240 	/* Set local packet state, used to queue packets to hardware */
2241 	bf->bf_state.bfs_tid = WME_AC_TO_TID(pri);
2242 	bf->bf_state.bfs_tx_queue = sc->sc_ac2q[pri]->axq_qnum;
2243 	bf->bf_state.bfs_pri = pri;
2244 
2245 	/* XXX this should be done in ath_tx_setrate() */
2246 	bf->bf_state.bfs_ctsrate = 0;
2247 	bf->bf_state.bfs_ctsduration = 0;
2248 	bf->bf_state.bfs_ismrr = ismrr;
2249 
2250 	/* Blank the legacy rate array */
2251 	bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc));
2252 
2253 	bf->bf_state.bfs_rc[0].rix = rix;
2254 	bf->bf_state.bfs_rc[0].tries = try0;
2255 	bf->bf_state.bfs_rc[0].ratecode = txrate;
2256 
2257 	if (ismrr) {
2258 		int rix;
2259 
2260 		rix = ath_tx_findrix(sc, params->ibp_rate1);
2261 		bf->bf_state.bfs_rc[1].rix = rix;
2262 		bf->bf_state.bfs_rc[1].tries = params->ibp_try1;
2263 
2264 		rix = ath_tx_findrix(sc, params->ibp_rate2);
2265 		bf->bf_state.bfs_rc[2].rix = rix;
2266 		bf->bf_state.bfs_rc[2].tries = params->ibp_try2;
2267 
2268 		rix = ath_tx_findrix(sc, params->ibp_rate3);
2269 		bf->bf_state.bfs_rc[3].rix = rix;
2270 		bf->bf_state.bfs_rc[3].tries = params->ibp_try3;
2271 	}
2272 	/*
2273 	 * All the required rate control decisions have been made;
2274 	 * fill in the rc flags.
2275 	 */
2276 	ath_tx_rate_fill_rcflags(sc, bf);
2277 
2278 	/* NB: no buffered multicast in power save support */
2279 
2280 	/*
2281 	 * If we're overiding the ADDBA destination, dump directly
2282 	 * into the hardware queue, right after any pending
2283 	 * frames to that node are.
2284 	 */
2285 	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: dooverride=%d\n",
2286 	    __func__, do_override);
2287 
2288 #if 1
2289 	/*
2290 	 * Put addba frames in the right place in the right TID/HWQ.
2291 	 */
2292 	if (do_override) {
2293 		bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2294 		/*
2295 		 * XXX if it's addba frames, should we be leaking
2296 		 * them out via the frame leak method?
2297 		 * XXX for now let's not risk it; but we may wish
2298 		 * to investigate this later.
2299 		 */
2300 		ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf);
2301 	} else if (ath_tx_should_swq_frame(sc, ATH_NODE(ni), m0,
2302 	    &queue_to_head)) {
2303 		/* Queue to software queue */
2304 		ath_tx_swq(sc, ni, sc->sc_ac2q[pri], queue_to_head, bf);
2305 	} else {
2306 		bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2307 		ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf);
2308 	}
2309 #else
2310 	/* Direct-dispatch to the hardware */
2311 	bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2312 	/*
2313 	 * Update the current leak count if
2314 	 * we're leaking frames; and set the
2315 	 * MORE flag as appropriate.
2316 	 */
2317 	ath_tx_leak_count_update(sc, tid, bf);
2318 	ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf);
2319 #endif
2320 	return 0;
2321 }
2322 
2323 /*
2324  * Send a raw frame.
2325  *
2326  * This can be called by net80211.
2327  */
2328 int
2329 ath_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
2330 	const struct ieee80211_bpf_params *params)
2331 {
2332 	struct ieee80211com *ic = ni->ni_ic;
2333 	struct ath_softc *sc = ic->ic_softc;
2334 	struct ath_buf *bf;
2335 	struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
2336 	int error = 0;
2337 
2338 	ATH_PCU_LOCK(sc);
2339 	if (sc->sc_inreset_cnt > 0) {
2340 		DPRINTF(sc, ATH_DEBUG_XMIT,
2341 		    "%s: sc_inreset_cnt > 0; bailing\n", __func__);
2342 		error = EIO;
2343 		ATH_PCU_UNLOCK(sc);
2344 		goto badbad;
2345 	}
2346 	sc->sc_txstart_cnt++;
2347 	ATH_PCU_UNLOCK(sc);
2348 
2349 	/* Wake the hardware up already */
2350 	ATH_LOCK(sc);
2351 	ath_power_set_power_state(sc, HAL_PM_AWAKE);
2352 	ATH_UNLOCK(sc);
2353 
2354 	ATH_TX_LOCK(sc);
2355 
2356 	if (!sc->sc_running || sc->sc_invalid) {
2357 		DPRINTF(sc, ATH_DEBUG_XMIT, "%s: discard frame, r/i: %d/%d",
2358 		    __func__, sc->sc_running, sc->sc_invalid);
2359 		m_freem(m);
2360 		error = ENETDOWN;
2361 		goto bad;
2362 	}
2363 
2364 	/*
2365 	 * Enforce how deep the multicast queue can grow.
2366 	 *
2367 	 * XXX duplicated in ath_tx_start().
2368 	 */
2369 	if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2370 		if (sc->sc_cabq->axq_depth + sc->sc_cabq->fifo.axq_depth
2371 		    > sc->sc_txq_mcastq_maxdepth) {
2372 			sc->sc_stats.ast_tx_mcastq_overflow++;
2373 			error = ENOBUFS;
2374 		}
2375 
2376 		if (error != 0) {
2377 			m_freem(m);
2378 			goto bad;
2379 		}
2380 	}
2381 
2382 	/*
2383 	 * Grab a TX buffer and associated resources.
2384 	 */
2385 	bf = ath_getbuf(sc, ATH_BUFTYPE_MGMT);
2386 	if (bf == NULL) {
2387 		sc->sc_stats.ast_tx_nobuf++;
2388 		m_freem(m);
2389 		error = ENOBUFS;
2390 		goto bad;
2391 	}
2392 	ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: m=%p, params=%p, bf=%p\n",
2393 	    m, params,  bf);
2394 
2395 	if (params == NULL) {
2396 		/*
2397 		 * Legacy path; interpret frame contents to decide
2398 		 * precisely how to send the frame.
2399 		 */
2400 		if (ath_tx_start(sc, ni, bf, m)) {
2401 			error = EIO;		/* XXX */
2402 			goto bad2;
2403 		}
2404 	} else {
2405 		/*
2406 		 * Caller supplied explicit parameters to use in
2407 		 * sending the frame.
2408 		 */
2409 		if (ath_tx_raw_start(sc, ni, bf, m, params)) {
2410 			error = EIO;		/* XXX */
2411 			goto bad2;
2412 		}
2413 	}
2414 	sc->sc_wd_timer = 5;
2415 	sc->sc_stats.ast_tx_raw++;
2416 
2417 	/*
2418 	 * Update the TIM - if there's anything queued to the
2419 	 * software queue and power save is enabled, we should
2420 	 * set the TIM.
2421 	 */
2422 	ath_tx_update_tim(sc, ni, 1);
2423 
2424 	ATH_TX_UNLOCK(sc);
2425 
2426 	ATH_PCU_LOCK(sc);
2427 	sc->sc_txstart_cnt--;
2428 	ATH_PCU_UNLOCK(sc);
2429 
2430 
2431 	/* Put the hardware back to sleep if required */
2432 	ATH_LOCK(sc);
2433 	ath_power_restore_power_state(sc);
2434 	ATH_UNLOCK(sc);
2435 
2436 	return 0;
2437 
2438 bad2:
2439 	ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: bad2: m=%p, params=%p, "
2440 	    "bf=%p",
2441 	    m,
2442 	    params,
2443 	    bf);
2444 	ATH_TXBUF_LOCK(sc);
2445 	ath_returnbuf_head(sc, bf);
2446 	ATH_TXBUF_UNLOCK(sc);
2447 
2448 bad:
2449 	ATH_TX_UNLOCK(sc);
2450 
2451 	ATH_PCU_LOCK(sc);
2452 	sc->sc_txstart_cnt--;
2453 	ATH_PCU_UNLOCK(sc);
2454 
2455 	/* Put the hardware back to sleep if required */
2456 	ATH_LOCK(sc);
2457 	ath_power_restore_power_state(sc);
2458 	ATH_UNLOCK(sc);
2459 
2460 badbad:
2461 	ATH_KTR(sc, ATH_KTR_TX, 2, "ath_raw_xmit: bad0: m=%p, params=%p",
2462 	    m, params);
2463 	sc->sc_stats.ast_tx_raw_fail++;
2464 
2465 	return error;
2466 }
2467 
2468 /* Some helper functions */
2469 
2470 /*
2471  * ADDBA (and potentially others) need to be placed in the same
2472  * hardware queue as the TID/node it's relating to. This is so
2473  * it goes out after any pending non-aggregate frames to the
2474  * same node/TID.
2475  *
2476  * If this isn't done, the ADDBA can go out before the frames
2477  * queued in hardware. Even though these frames have a sequence
2478  * number -earlier- than the ADDBA can be transmitted (but
2479  * no frames whose sequence numbers are after the ADDBA should
2480  * be!) they'll arrive after the ADDBA - and the receiving end
2481  * will simply drop them as being out of the BAW.
2482  *
2483  * The frames can't be appended to the TID software queue - it'll
2484  * never be sent out. So these frames have to be directly
2485  * dispatched to the hardware, rather than queued in software.
2486  * So if this function returns true, the TXQ has to be
2487  * overridden and it has to be directly dispatched.
2488  *
2489  * It's a dirty hack, but someone's gotta do it.
2490  */
2491 
2492 /*
2493  * XXX doesn't belong here!
2494  */
2495 static int
2496 ieee80211_is_action(struct ieee80211_frame *wh)
2497 {
2498 	/* Type: Management frame? */
2499 	if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) !=
2500 	    IEEE80211_FC0_TYPE_MGT)
2501 		return 0;
2502 
2503 	/* Subtype: Action frame? */
2504 	if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) !=
2505 	    IEEE80211_FC0_SUBTYPE_ACTION)
2506 		return 0;
2507 
2508 	return 1;
2509 }
2510 
2511 #define	MS(_v, _f)	(((_v) & _f) >> _f##_S)
2512 /*
2513  * Return an alternate TID for ADDBA request frames.
2514  *
2515  * Yes, this likely should be done in the net80211 layer.
2516  */
2517 static int
2518 ath_tx_action_frame_override_queue(struct ath_softc *sc,
2519     struct ieee80211_node *ni,
2520     struct mbuf *m0, int *tid)
2521 {
2522 	struct ieee80211_frame *wh = mtod(m0, struct ieee80211_frame *);
2523 	struct ieee80211_action_ba_addbarequest *ia;
2524 	uint8_t *frm;
2525 	uint16_t baparamset;
2526 
2527 	/* Not action frame? Bail */
2528 	if (! ieee80211_is_action(wh))
2529 		return 0;
2530 
2531 	/* XXX Not needed for frames we send? */
2532 #if 0
2533 	/* Correct length? */
2534 	if (! ieee80211_parse_action(ni, m))
2535 		return 0;
2536 #endif
2537 
2538 	/* Extract out action frame */
2539 	frm = (u_int8_t *)&wh[1];
2540 	ia = (struct ieee80211_action_ba_addbarequest *) frm;
2541 
2542 	/* Not ADDBA? Bail */
2543 	if (ia->rq_header.ia_category != IEEE80211_ACTION_CAT_BA)
2544 		return 0;
2545 	if (ia->rq_header.ia_action != IEEE80211_ACTION_BA_ADDBA_REQUEST)
2546 		return 0;
2547 
2548 	/* Extract TID, return it */
2549 	baparamset = le16toh(ia->rq_baparamset);
2550 	*tid = (int) MS(baparamset, IEEE80211_BAPS_TID);
2551 
2552 	return 1;
2553 }
2554 #undef	MS
2555 
2556 /* Per-node software queue operations */
2557 
2558 /*
2559  * Add the current packet to the given BAW.
2560  * It is assumed that the current packet
2561  *
2562  * + fits inside the BAW;
2563  * + already has had a sequence number allocated.
2564  *
2565  * Since the BAW status may be modified by both the ath task and
2566  * the net80211/ifnet contexts, the TID must be locked.
2567  */
2568 void
2569 ath_tx_addto_baw(struct ath_softc *sc, struct ath_node *an,
2570     struct ath_tid *tid, struct ath_buf *bf)
2571 {
2572 	int index, cindex;
2573 	struct ieee80211_tx_ampdu *tap;
2574 
2575 	ATH_TX_LOCK_ASSERT(sc);
2576 
2577 	if (bf->bf_state.bfs_isretried)
2578 		return;
2579 
2580 	tap = ath_tx_get_tx_tid(an, tid->tid);
2581 
2582 	if (! bf->bf_state.bfs_dobaw) {
2583 		DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2584 		    "%s: dobaw=0, seqno=%d, window %d:%d\n",
2585 		    __func__, SEQNO(bf->bf_state.bfs_seqno),
2586 		    tap->txa_start, tap->txa_wnd);
2587 	}
2588 
2589 	if (bf->bf_state.bfs_addedbaw)
2590 		DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2591 		    "%s: re-added? tid=%d, seqno %d; window %d:%d; "
2592 		    "baw head=%d tail=%d\n",
2593 		    __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno),
2594 		    tap->txa_start, tap->txa_wnd, tid->baw_head,
2595 		    tid->baw_tail);
2596 
2597 	/*
2598 	 * Verify that the given sequence number is not outside of the
2599 	 * BAW.  Complain loudly if that's the case.
2600 	 */
2601 	if (! BAW_WITHIN(tap->txa_start, tap->txa_wnd,
2602 	    SEQNO(bf->bf_state.bfs_seqno))) {
2603 		DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2604 		    "%s: bf=%p: outside of BAW?? tid=%d, seqno %d; window %d:%d; "
2605 		    "baw head=%d tail=%d\n",
2606 		    __func__, bf, tid->tid, SEQNO(bf->bf_state.bfs_seqno),
2607 		    tap->txa_start, tap->txa_wnd, tid->baw_head,
2608 		    tid->baw_tail);
2609 	}
2610 
2611 	/*
2612 	 * ni->ni_txseqs[] is the currently allocated seqno.
2613 	 * the txa state contains the current baw start.
2614 	 */
2615 	index  = ATH_BA_INDEX(tap->txa_start, SEQNO(bf->bf_state.bfs_seqno));
2616 	cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
2617 	DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2618 	    "%s: tid=%d, seqno %d; window %d:%d; index=%d cindex=%d "
2619 	    "baw head=%d tail=%d\n",
2620 	    __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno),
2621 	    tap->txa_start, tap->txa_wnd, index, cindex, tid->baw_head,
2622 	    tid->baw_tail);
2623 
2624 
2625 #if 0
2626 	assert(tid->tx_buf[cindex] == NULL);
2627 #endif
2628 	if (tid->tx_buf[cindex] != NULL) {
2629 		DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2630 		    "%s: ba packet dup (index=%d, cindex=%d, "
2631 		    "head=%d, tail=%d)\n",
2632 		    __func__, index, cindex, tid->baw_head, tid->baw_tail);
2633 		DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2634 		    "%s: BA bf: %p; seqno=%d ; new bf: %p; seqno=%d\n",
2635 		    __func__,
2636 		    tid->tx_buf[cindex],
2637 		    SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno),
2638 		    bf,
2639 		    SEQNO(bf->bf_state.bfs_seqno)
2640 		);
2641 	}
2642 	tid->tx_buf[cindex] = bf;
2643 
2644 	if (index >= ((tid->baw_tail - tid->baw_head) &
2645 	    (ATH_TID_MAX_BUFS - 1))) {
2646 		tid->baw_tail = cindex;
2647 		INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
2648 	}
2649 }
2650 
2651 /*
2652  * Flip the BAW buffer entry over from the existing one to the new one.
2653  *
2654  * When software retransmitting a (sub-)frame, it is entirely possible that
2655  * the frame ath_buf is marked as BUSY and can't be immediately reused.
2656  * In that instance the buffer is cloned and the new buffer is used for
2657  * retransmit. We thus need to update the ath_buf slot in the BAW buf
2658  * tracking array to maintain consistency.
2659  */
2660 static void
2661 ath_tx_switch_baw_buf(struct ath_softc *sc, struct ath_node *an,
2662     struct ath_tid *tid, struct ath_buf *old_bf, struct ath_buf *new_bf)
2663 {
2664 	int index, cindex;
2665 	struct ieee80211_tx_ampdu *tap;
2666 	int seqno = SEQNO(old_bf->bf_state.bfs_seqno);
2667 
2668 	ATH_TX_LOCK_ASSERT(sc);
2669 
2670 	tap = ath_tx_get_tx_tid(an, tid->tid);
2671 	index  = ATH_BA_INDEX(tap->txa_start, seqno);
2672 	cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
2673 
2674 	/*
2675 	 * Just warn for now; if it happens then we should find out
2676 	 * about it. It's highly likely the aggregation session will
2677 	 * soon hang.
2678 	 */
2679 	if (old_bf->bf_state.bfs_seqno != new_bf->bf_state.bfs_seqno) {
2680 		DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2681 		    "%s: retransmitted buffer"
2682 		    " has mismatching seqno's, BA session may hang.\n",
2683 		    __func__);
2684 		DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2685 		    "%s: old seqno=%d, new_seqno=%d\n", __func__,
2686 		    old_bf->bf_state.bfs_seqno, new_bf->bf_state.bfs_seqno);
2687 	}
2688 
2689 	if (tid->tx_buf[cindex] != old_bf) {
2690 		DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2691 		    "%s: ath_buf pointer incorrect; "
2692 		    " has m BA session may hang.\n", __func__);
2693 		DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2694 		    "%s: old bf=%p, new bf=%p\n", __func__, old_bf, new_bf);
2695 	}
2696 
2697 	tid->tx_buf[cindex] = new_bf;
2698 }
2699 
2700 /*
2701  * seq_start - left edge of BAW
2702  * seq_next - current/next sequence number to allocate
2703  *
2704  * Since the BAW status may be modified by both the ath task and
2705  * the net80211/ifnet contexts, the TID must be locked.
2706  */
2707 static void
2708 ath_tx_update_baw(struct ath_softc *sc, struct ath_node *an,
2709     struct ath_tid *tid, const struct ath_buf *bf)
2710 {
2711 	int index, cindex;
2712 	struct ieee80211_tx_ampdu *tap;
2713 	int seqno = SEQNO(bf->bf_state.bfs_seqno);
2714 
2715 	ATH_TX_LOCK_ASSERT(sc);
2716 
2717 	tap = ath_tx_get_tx_tid(an, tid->tid);
2718 	index  = ATH_BA_INDEX(tap->txa_start, seqno);
2719 	cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
2720 
2721 	DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2722 	    "%s: tid=%d, baw=%d:%d, seqno=%d, index=%d, cindex=%d, "
2723 	    "baw head=%d, tail=%d\n",
2724 	    __func__, tid->tid, tap->txa_start, tap->txa_wnd, seqno, index,
2725 	    cindex, tid->baw_head, tid->baw_tail);
2726 
2727 	/*
2728 	 * If this occurs then we have a big problem - something else
2729 	 * has slid tap->txa_start along without updating the BAW
2730 	 * tracking start/end pointers. Thus the TX BAW state is now
2731 	 * completely busted.
2732 	 *
2733 	 * But for now, since I haven't yet fixed TDMA and buffer cloning,
2734 	 * it's quite possible that a cloned buffer is making its way
2735 	 * here and causing it to fire off. Disable TDMA for now.
2736 	 */
2737 	if (tid->tx_buf[cindex] != bf) {
2738 		DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2739 		    "%s: comp bf=%p, seq=%d; slot bf=%p, seqno=%d\n",
2740 		    __func__, bf, SEQNO(bf->bf_state.bfs_seqno),
2741 		    tid->tx_buf[cindex],
2742 		    (tid->tx_buf[cindex] != NULL) ?
2743 		      SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno) : -1);
2744 	}
2745 
2746 	tid->tx_buf[cindex] = NULL;
2747 
2748 	while (tid->baw_head != tid->baw_tail &&
2749 	    !tid->tx_buf[tid->baw_head]) {
2750 		INCR(tap->txa_start, IEEE80211_SEQ_RANGE);
2751 		INCR(tid->baw_head, ATH_TID_MAX_BUFS);
2752 	}
2753 	DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2754 	    "%s: tid=%d: baw is now %d:%d, baw head=%d\n",
2755 	    __func__, tid->tid, tap->txa_start, tap->txa_wnd, tid->baw_head);
2756 }
2757 
2758 static void
2759 ath_tx_leak_count_update(struct ath_softc *sc, struct ath_tid *tid,
2760     struct ath_buf *bf)
2761 {
2762 	struct ieee80211_frame *wh;
2763 
2764 	ATH_TX_LOCK_ASSERT(sc);
2765 
2766 	if (tid->an->an_leak_count > 0) {
2767 		wh = mtod(bf->bf_m, struct ieee80211_frame *);
2768 
2769 		/*
2770 		 * Update MORE based on the software/net80211 queue states.
2771 		 */
2772 		if ((tid->an->an_stack_psq > 0)
2773 		    || (tid->an->an_swq_depth > 0))
2774 			wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA;
2775 		else
2776 			wh->i_fc[1] &= ~IEEE80211_FC1_MORE_DATA;
2777 
2778 		DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
2779 		    "%s: %6D: leak count = %d, psq=%d, swq=%d, MORE=%d\n",
2780 		    __func__,
2781 		    tid->an->an_node.ni_macaddr,
2782 		    ":",
2783 		    tid->an->an_leak_count,
2784 		    tid->an->an_stack_psq,
2785 		    tid->an->an_swq_depth,
2786 		    !! (wh->i_fc[1] & IEEE80211_FC1_MORE_DATA));
2787 
2788 		/*
2789 		 * Re-sync the underlying buffer.
2790 		 */
2791 		bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
2792 		    BUS_DMASYNC_PREWRITE);
2793 
2794 		tid->an->an_leak_count --;
2795 	}
2796 }
2797 
2798 static int
2799 ath_tx_tid_can_tx_or_sched(struct ath_softc *sc, struct ath_tid *tid)
2800 {
2801 
2802 	ATH_TX_LOCK_ASSERT(sc);
2803 
2804 	if (tid->an->an_leak_count > 0) {
2805 		return (1);
2806 	}
2807 	if (tid->paused)
2808 		return (0);
2809 	return (1);
2810 }
2811 
2812 /*
2813  * Mark the current node/TID as ready to TX.
2814  *
2815  * This is done to make it easy for the software scheduler to
2816  * find which nodes have data to send.
2817  *
2818  * The TXQ lock must be held.
2819  */
2820 void
2821 ath_tx_tid_sched(struct ath_softc *sc, struct ath_tid *tid)
2822 {
2823 	struct ath_txq *txq = sc->sc_ac2q[tid->ac];
2824 
2825 	ATH_TX_LOCK_ASSERT(sc);
2826 
2827 	/*
2828 	 * If we are leaking out a frame to this destination
2829 	 * for PS-POLL, ensure that we allow scheduling to
2830 	 * occur.
2831 	 */
2832 	if (! ath_tx_tid_can_tx_or_sched(sc, tid))
2833 		return;		/* paused, can't schedule yet */
2834 
2835 	if (tid->sched)
2836 		return;		/* already scheduled */
2837 
2838 	tid->sched = 1;
2839 
2840 #if 0
2841 	/*
2842 	 * If this is a sleeping node we're leaking to, given
2843 	 * it a higher priority.  This is so bad for QoS it hurts.
2844 	 */
2845 	if (tid->an->an_leak_count) {
2846 		TAILQ_INSERT_HEAD(&txq->axq_tidq, tid, axq_qelem);
2847 	} else {
2848 		TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem);
2849 	}
2850 #endif
2851 
2852 	/*
2853 	 * We can't do the above - it'll confuse the TXQ software
2854 	 * scheduler which will keep checking the _head_ TID
2855 	 * in the list to see if it has traffic.  If we queue
2856 	 * a TID to the head of the list and it doesn't transmit,
2857 	 * we'll check it again.
2858 	 *
2859 	 * So, get the rest of this leaking frames support working
2860 	 * and reliable first and _then_ optimise it so they're
2861 	 * pushed out in front of any other pending software
2862 	 * queued nodes.
2863 	 */
2864 	TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem);
2865 }
2866 
2867 /*
2868  * Mark the current node as no longer needing to be polled for
2869  * TX packets.
2870  *
2871  * The TXQ lock must be held.
2872  */
2873 static void
2874 ath_tx_tid_unsched(struct ath_softc *sc, struct ath_tid *tid)
2875 {
2876 	struct ath_txq *txq = sc->sc_ac2q[tid->ac];
2877 
2878 	ATH_TX_LOCK_ASSERT(sc);
2879 
2880 	if (tid->sched == 0)
2881 		return;
2882 
2883 	tid->sched = 0;
2884 	TAILQ_REMOVE(&txq->axq_tidq, tid, axq_qelem);
2885 }
2886 
2887 /*
2888  * Assign a sequence number manually to the given frame.
2889  *
2890  * This should only be called for A-MPDU TX frames.
2891  */
2892 static ieee80211_seq
2893 ath_tx_tid_seqno_assign(struct ath_softc *sc, struct ieee80211_node *ni,
2894     struct ath_buf *bf, struct mbuf *m0)
2895 {
2896 	struct ieee80211_frame *wh;
2897 	int tid, pri;
2898 	ieee80211_seq seqno;
2899 	uint8_t subtype;
2900 
2901 	/* TID lookup */
2902 	wh = mtod(m0, struct ieee80211_frame *);
2903 	pri = M_WME_GETAC(m0);			/* honor classification */
2904 	tid = WME_AC_TO_TID(pri);
2905 	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: pri=%d, tid=%d, qos has seq=%d\n",
2906 	    __func__, pri, tid, IEEE80211_QOS_HAS_SEQ(wh));
2907 
2908 	/* XXX Is it a control frame? Ignore */
2909 
2910 	/* Does the packet require a sequence number? */
2911 	if (! IEEE80211_QOS_HAS_SEQ(wh))
2912 		return -1;
2913 
2914 	ATH_TX_LOCK_ASSERT(sc);
2915 
2916 	/*
2917 	 * Is it a QOS NULL Data frame? Give it a sequence number from
2918 	 * the default TID (IEEE80211_NONQOS_TID.)
2919 	 *
2920 	 * The RX path of everything I've looked at doesn't include the NULL
2921 	 * data frame sequence number in the aggregation state updates, so
2922 	 * assigning it a sequence number there will cause a BAW hole on the
2923 	 * RX side.
2924 	 */
2925 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
2926 	if (subtype == IEEE80211_FC0_SUBTYPE_QOS_NULL) {
2927 		/* XXX no locking for this TID? This is a bit of a problem. */
2928 		seqno = ni->ni_txseqs[IEEE80211_NONQOS_TID];
2929 		INCR(ni->ni_txseqs[IEEE80211_NONQOS_TID], IEEE80211_SEQ_RANGE);
2930 	} else {
2931 		/* Manually assign sequence number */
2932 		seqno = ni->ni_txseqs[tid];
2933 		INCR(ni->ni_txseqs[tid], IEEE80211_SEQ_RANGE);
2934 	}
2935 	*(uint16_t *)&wh->i_seq[0] = htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT);
2936 	M_SEQNO_SET(m0, seqno);
2937 
2938 	/* Return so caller can do something with it if needed */
2939 	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s:  -> seqno=%d\n", __func__, seqno);
2940 	return seqno;
2941 }
2942 
2943 /*
2944  * Attempt to direct dispatch an aggregate frame to hardware.
2945  * If the frame is out of BAW, queue.
2946  * Otherwise, schedule it as a single frame.
2947  */
2948 static void
2949 ath_tx_xmit_aggr(struct ath_softc *sc, struct ath_node *an,
2950     struct ath_txq *txq, struct ath_buf *bf)
2951 {
2952 	struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid];
2953 	struct ieee80211_tx_ampdu *tap;
2954 
2955 	ATH_TX_LOCK_ASSERT(sc);
2956 
2957 	tap = ath_tx_get_tx_tid(an, tid->tid);
2958 
2959 	/* paused? queue */
2960 	if (! ath_tx_tid_can_tx_or_sched(sc, tid)) {
2961 		ATH_TID_INSERT_HEAD(tid, bf, bf_list);
2962 		/* XXX don't sched - we're paused! */
2963 		return;
2964 	}
2965 
2966 	/* outside baw? queue */
2967 	if (bf->bf_state.bfs_dobaw &&
2968 	    (! BAW_WITHIN(tap->txa_start, tap->txa_wnd,
2969 	    SEQNO(bf->bf_state.bfs_seqno)))) {
2970 		ATH_TID_INSERT_HEAD(tid, bf, bf_list);
2971 		ath_tx_tid_sched(sc, tid);
2972 		return;
2973 	}
2974 
2975 	/*
2976 	 * This is a temporary check and should be removed once
2977 	 * all the relevant code paths have been fixed.
2978 	 *
2979 	 * During aggregate retries, it's possible that the head
2980 	 * frame will fail (which has the bfs_aggr and bfs_nframes
2981 	 * fields set for said aggregate) and will be retried as
2982 	 * a single frame.  In this instance, the values should
2983 	 * be reset or the completion code will get upset with you.
2984 	 */
2985 	if (bf->bf_state.bfs_aggr != 0 || bf->bf_state.bfs_nframes > 1) {
2986 		DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
2987 		    "%s: bfs_aggr=%d, bfs_nframes=%d\n", __func__,
2988 		    bf->bf_state.bfs_aggr, bf->bf_state.bfs_nframes);
2989 		bf->bf_state.bfs_aggr = 0;
2990 		bf->bf_state.bfs_nframes = 1;
2991 	}
2992 
2993 	/* Update CLRDMASK just before this frame is queued */
2994 	ath_tx_update_clrdmask(sc, tid, bf);
2995 
2996 	/* Direct dispatch to hardware */
2997 	ath_tx_do_ratelookup(sc, bf);
2998 	ath_tx_calc_duration(sc, bf);
2999 	ath_tx_calc_protection(sc, bf);
3000 	ath_tx_set_rtscts(sc, bf);
3001 	ath_tx_rate_fill_rcflags(sc, bf);
3002 	ath_tx_setds(sc, bf);
3003 
3004 	/* Statistics */
3005 	sc->sc_aggr_stats.aggr_low_hwq_single_pkt++;
3006 
3007 	/* Track per-TID hardware queue depth correctly */
3008 	tid->hwq_depth++;
3009 
3010 	/* Add to BAW */
3011 	if (bf->bf_state.bfs_dobaw) {
3012 		ath_tx_addto_baw(sc, an, tid, bf);
3013 		bf->bf_state.bfs_addedbaw = 1;
3014 	}
3015 
3016 	/* Set completion handler, multi-frame aggregate or not */
3017 	bf->bf_comp = ath_tx_aggr_comp;
3018 
3019 	/*
3020 	 * Update the current leak count if
3021 	 * we're leaking frames; and set the
3022 	 * MORE flag as appropriate.
3023 	 */
3024 	ath_tx_leak_count_update(sc, tid, bf);
3025 
3026 	/* Hand off to hardware */
3027 	ath_tx_handoff(sc, txq, bf);
3028 }
3029 
3030 /*
3031  * Attempt to send the packet.
3032  * If the queue isn't busy, direct-dispatch.
3033  * If the queue is busy enough, queue the given packet on the
3034  *  relevant software queue.
3035  */
3036 void
3037 ath_tx_swq(struct ath_softc *sc, struct ieee80211_node *ni,
3038     struct ath_txq *txq, int queue_to_head, struct ath_buf *bf)
3039 {
3040 	struct ath_node *an = ATH_NODE(ni);
3041 	struct ieee80211_frame *wh;
3042 	struct ath_tid *atid;
3043 	int pri, tid;
3044 	struct mbuf *m0 = bf->bf_m;
3045 
3046 	ATH_TX_LOCK_ASSERT(sc);
3047 
3048 	/* Fetch the TID - non-QoS frames get assigned to TID 16 */
3049 	wh = mtod(m0, struct ieee80211_frame *);
3050 	pri = ath_tx_getac(sc, m0);
3051 	tid = ath_tx_gettid(sc, m0);
3052 	atid = &an->an_tid[tid];
3053 
3054 	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p, pri=%d, tid=%d, qos=%d\n",
3055 	    __func__, bf, pri, tid, IEEE80211_QOS_HAS_SEQ(wh));
3056 
3057 	/* Set local packet state, used to queue packets to hardware */
3058 	/* XXX potentially duplicate info, re-check */
3059 	bf->bf_state.bfs_tid = tid;
3060 	bf->bf_state.bfs_tx_queue = txq->axq_qnum;
3061 	bf->bf_state.bfs_pri = pri;
3062 
3063 	/*
3064 	 * If the hardware queue isn't busy, queue it directly.
3065 	 * If the hardware queue is busy, queue it.
3066 	 * If the TID is paused or the traffic it outside BAW, software
3067 	 * queue it.
3068 	 *
3069 	 * If the node is in power-save and we're leaking a frame,
3070 	 * leak a single frame.
3071 	 */
3072 	if (! ath_tx_tid_can_tx_or_sched(sc, atid)) {
3073 		/* TID is paused, queue */
3074 		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: paused\n", __func__);
3075 		/*
3076 		 * If the caller requested that it be sent at a high
3077 		 * priority, queue it at the head of the list.
3078 		 */
3079 		if (queue_to_head)
3080 			ATH_TID_INSERT_HEAD(atid, bf, bf_list);
3081 		else
3082 			ATH_TID_INSERT_TAIL(atid, bf, bf_list);
3083 	} else if (ath_tx_ampdu_pending(sc, an, tid)) {
3084 		/* AMPDU pending; queue */
3085 		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: pending\n", __func__);
3086 		ATH_TID_INSERT_TAIL(atid, bf, bf_list);
3087 		/* XXX sched? */
3088 	} else if (ath_tx_ampdu_running(sc, an, tid)) {
3089 		/* AMPDU running, attempt direct dispatch if possible */
3090 
3091 		/*
3092 		 * Always queue the frame to the tail of the list.
3093 		 */
3094 		ATH_TID_INSERT_TAIL(atid, bf, bf_list);
3095 
3096 		/*
3097 		 * If the hardware queue isn't busy, direct dispatch
3098 		 * the head frame in the list.  Don't schedule the
3099 		 * TID - let it build some more frames first?
3100 		 *
3101 		 * When running A-MPDU, always just check the hardware
3102 		 * queue depth against the aggregate frame limit.
3103 		 * We don't want to burst a large number of single frames
3104 		 * out to the hardware; we want to aggressively hold back.
3105 		 *
3106 		 * Otherwise, schedule the TID.
3107 		 */
3108 		/* XXX TXQ locking */
3109 		if (txq->axq_depth + txq->fifo.axq_depth < sc->sc_hwq_limit_aggr) {
3110 			bf = ATH_TID_FIRST(atid);
3111 			ATH_TID_REMOVE(atid, bf, bf_list);
3112 
3113 			/*
3114 			 * Ensure it's definitely treated as a non-AMPDU
3115 			 * frame - this information may have been left
3116 			 * over from a previous attempt.
3117 			 */
3118 			bf->bf_state.bfs_aggr = 0;
3119 			bf->bf_state.bfs_nframes = 1;
3120 
3121 			/* Queue to the hardware */
3122 			ath_tx_xmit_aggr(sc, an, txq, bf);
3123 			DPRINTF(sc, ATH_DEBUG_SW_TX,
3124 			    "%s: xmit_aggr\n",
3125 			    __func__);
3126 		} else {
3127 			DPRINTF(sc, ATH_DEBUG_SW_TX,
3128 			    "%s: ampdu; swq'ing\n",
3129 			    __func__);
3130 
3131 			ath_tx_tid_sched(sc, atid);
3132 		}
3133 	/*
3134 	 * If we're not doing A-MPDU, be prepared to direct dispatch
3135 	 * up to both limits if possible.  This particular corner
3136 	 * case may end up with packet starvation between aggregate
3137 	 * traffic and non-aggregate traffic: we want to ensure
3138 	 * that non-aggregate stations get a few frames queued to the
3139 	 * hardware before the aggregate station(s) get their chance.
3140 	 *
3141 	 * So if you only ever see a couple of frames direct dispatched
3142 	 * to the hardware from a non-AMPDU client, check both here
3143 	 * and in the software queue dispatcher to ensure that those
3144 	 * non-AMPDU stations get a fair chance to transmit.
3145 	 */
3146 	/* XXX TXQ locking */
3147 	} else if ((txq->axq_depth + txq->fifo.axq_depth < sc->sc_hwq_limit_nonaggr) &&
3148 		    (txq->axq_aggr_depth < sc->sc_hwq_limit_aggr)) {
3149 		/* AMPDU not running, attempt direct dispatch */
3150 		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: xmit_normal\n", __func__);
3151 		/* See if clrdmask needs to be set */
3152 		ath_tx_update_clrdmask(sc, atid, bf);
3153 
3154 		/*
3155 		 * Update the current leak count if
3156 		 * we're leaking frames; and set the
3157 		 * MORE flag as appropriate.
3158 		 */
3159 		ath_tx_leak_count_update(sc, atid, bf);
3160 
3161 		/*
3162 		 * Dispatch the frame.
3163 		 */
3164 		ath_tx_xmit_normal(sc, txq, bf);
3165 	} else {
3166 		/* Busy; queue */
3167 		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: swq'ing\n", __func__);
3168 		ATH_TID_INSERT_TAIL(atid, bf, bf_list);
3169 		ath_tx_tid_sched(sc, atid);
3170 	}
3171 }
3172 
3173 /*
3174  * Only set the clrdmask bit if none of the nodes are currently
3175  * filtered.
3176  *
3177  * XXX TODO: go through all the callers and check to see
3178  * which are being called in the context of looping over all
3179  * TIDs (eg, if all tids are being paused, resumed, etc.)
3180  * That'll avoid O(n^2) complexity here.
3181  */
3182 static void
3183 ath_tx_set_clrdmask(struct ath_softc *sc, struct ath_node *an)
3184 {
3185 	int i;
3186 
3187 	ATH_TX_LOCK_ASSERT(sc);
3188 
3189 	for (i = 0; i < IEEE80211_TID_SIZE; i++) {
3190 		if (an->an_tid[i].isfiltered == 1)
3191 			return;
3192 	}
3193 	an->clrdmask = 1;
3194 }
3195 
3196 /*
3197  * Configure the per-TID node state.
3198  *
3199  * This likely belongs in if_ath_node.c but I can't think of anywhere
3200  * else to put it just yet.
3201  *
3202  * This sets up the SLISTs and the mutex as appropriate.
3203  */
3204 void
3205 ath_tx_tid_init(struct ath_softc *sc, struct ath_node *an)
3206 {
3207 	int i, j;
3208 	struct ath_tid *atid;
3209 
3210 	for (i = 0; i < IEEE80211_TID_SIZE; i++) {
3211 		atid = &an->an_tid[i];
3212 
3213 		/* XXX now with this bzer(), is the field 0'ing needed? */
3214 		bzero(atid, sizeof(*atid));
3215 
3216 		TAILQ_INIT(&atid->tid_q);
3217 		TAILQ_INIT(&atid->filtq.tid_q);
3218 		atid->tid = i;
3219 		atid->an = an;
3220 		for (j = 0; j < ATH_TID_MAX_BUFS; j++)
3221 			atid->tx_buf[j] = NULL;
3222 		atid->baw_head = atid->baw_tail = 0;
3223 		atid->paused = 0;
3224 		atid->sched = 0;
3225 		atid->hwq_depth = 0;
3226 		atid->cleanup_inprogress = 0;
3227 		if (i == IEEE80211_NONQOS_TID)
3228 			atid->ac = ATH_NONQOS_TID_AC;
3229 		else
3230 			atid->ac = TID_TO_WME_AC(i);
3231 	}
3232 	an->clrdmask = 1;	/* Always start by setting this bit */
3233 }
3234 
3235 /*
3236  * Pause the current TID. This stops packets from being transmitted
3237  * on it.
3238  *
3239  * Since this is also called from upper layers as well as the driver,
3240  * it will get the TID lock.
3241  */
3242 static void
3243 ath_tx_tid_pause(struct ath_softc *sc, struct ath_tid *tid)
3244 {
3245 
3246 	ATH_TX_LOCK_ASSERT(sc);
3247 	tid->paused++;
3248 	DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: [%6D]: tid=%d, paused = %d\n",
3249 	    __func__,
3250 	    tid->an->an_node.ni_macaddr, ":",
3251 	    tid->tid,
3252 	    tid->paused);
3253 }
3254 
3255 /*
3256  * Unpause the current TID, and schedule it if needed.
3257  */
3258 static void
3259 ath_tx_tid_resume(struct ath_softc *sc, struct ath_tid *tid)
3260 {
3261 	ATH_TX_LOCK_ASSERT(sc);
3262 
3263 	/*
3264 	 * There's some odd places where ath_tx_tid_resume() is called
3265 	 * when it shouldn't be; this works around that particular issue
3266 	 * until it's actually resolved.
3267 	 */
3268 	if (tid->paused == 0) {
3269 		device_printf(sc->sc_dev,
3270 		    "%s: [%6D]: tid=%d, paused=0?\n",
3271 		    __func__,
3272 		    tid->an->an_node.ni_macaddr, ":",
3273 		    tid->tid);
3274 	} else {
3275 		tid->paused--;
3276 	}
3277 
3278 	DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
3279 	    "%s: [%6D]: tid=%d, unpaused = %d\n",
3280 	    __func__,
3281 	    tid->an->an_node.ni_macaddr, ":",
3282 	    tid->tid,
3283 	    tid->paused);
3284 
3285 	if (tid->paused)
3286 		return;
3287 
3288 	/*
3289 	 * Override the clrdmask configuration for the next frame
3290 	 * from this TID, just to get the ball rolling.
3291 	 */
3292 	ath_tx_set_clrdmask(sc, tid->an);
3293 
3294 	if (tid->axq_depth == 0)
3295 		return;
3296 
3297 	/* XXX isfiltered shouldn't ever be 0 at this point */
3298 	if (tid->isfiltered == 1) {
3299 		DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: filtered?!\n",
3300 		    __func__);
3301 		return;
3302 	}
3303 
3304 	ath_tx_tid_sched(sc, tid);
3305 
3306 	/*
3307 	 * Queue the software TX scheduler.
3308 	 */
3309 	ath_tx_swq_kick(sc);
3310 }
3311 
3312 /*
3313  * Add the given ath_buf to the TID filtered frame list.
3314  * This requires the TID be filtered.
3315  */
3316 static void
3317 ath_tx_tid_filt_addbuf(struct ath_softc *sc, struct ath_tid *tid,
3318     struct ath_buf *bf)
3319 {
3320 
3321 	ATH_TX_LOCK_ASSERT(sc);
3322 
3323 	if (!tid->isfiltered)
3324 		DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: not filtered?!\n",
3325 		    __func__);
3326 
3327 	DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: bf=%p\n", __func__, bf);
3328 
3329 	/* Set the retry bit and bump the retry counter */
3330 	ath_tx_set_retry(sc, bf);
3331 	sc->sc_stats.ast_tx_swfiltered++;
3332 
3333 	ATH_TID_FILT_INSERT_TAIL(tid, bf, bf_list);
3334 }
3335 
3336 /*
3337  * Handle a completed filtered frame from the given TID.
3338  * This just enables/pauses the filtered frame state if required
3339  * and appends the filtered frame to the filtered queue.
3340  */
3341 static void
3342 ath_tx_tid_filt_comp_buf(struct ath_softc *sc, struct ath_tid *tid,
3343     struct ath_buf *bf)
3344 {
3345 
3346 	ATH_TX_LOCK_ASSERT(sc);
3347 
3348 	if (! tid->isfiltered) {
3349 		DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: tid=%d; filter transition\n",
3350 		    __func__, tid->tid);
3351 		tid->isfiltered = 1;
3352 		ath_tx_tid_pause(sc, tid);
3353 	}
3354 
3355 	/* Add the frame to the filter queue */
3356 	ath_tx_tid_filt_addbuf(sc, tid, bf);
3357 }
3358 
3359 /*
3360  * Complete the filtered frame TX completion.
3361  *
3362  * If there are no more frames in the hardware queue, unpause/unfilter
3363  * the TID if applicable.  Otherwise we will wait for a node PS transition
3364  * to unfilter.
3365  */
3366 static void
3367 ath_tx_tid_filt_comp_complete(struct ath_softc *sc, struct ath_tid *tid)
3368 {
3369 	struct ath_buf *bf;
3370 	int do_resume = 0;
3371 
3372 	ATH_TX_LOCK_ASSERT(sc);
3373 
3374 	if (tid->hwq_depth != 0)
3375 		return;
3376 
3377 	DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: tid=%d, hwq=0, transition back\n",
3378 	    __func__, tid->tid);
3379 	if (tid->isfiltered == 1) {
3380 		tid->isfiltered = 0;
3381 		do_resume = 1;
3382 	}
3383 
3384 	/* XXX ath_tx_tid_resume() also calls ath_tx_set_clrdmask()! */
3385 	ath_tx_set_clrdmask(sc, tid->an);
3386 
3387 	/* XXX this is really quite inefficient */
3388 	while ((bf = ATH_TID_FILT_LAST(tid, ath_bufhead_s)) != NULL) {
3389 		ATH_TID_FILT_REMOVE(tid, bf, bf_list);
3390 		ATH_TID_INSERT_HEAD(tid, bf, bf_list);
3391 	}
3392 
3393 	/* And only resume if we had paused before */
3394 	if (do_resume)
3395 		ath_tx_tid_resume(sc, tid);
3396 }
3397 
3398 /*
3399  * Called when a single (aggregate or otherwise) frame is completed.
3400  *
3401  * Returns 0 if the buffer could be added to the filtered list
3402  * (cloned or otherwise), 1 if the buffer couldn't be added to the
3403  * filtered list (failed clone; expired retry) and the caller should
3404  * free it and handle it like a failure (eg by sending a BAR.)
3405  *
3406  * since the buffer may be cloned, bf must be not touched after this
3407  * if the return value is 0.
3408  */
3409 static int
3410 ath_tx_tid_filt_comp_single(struct ath_softc *sc, struct ath_tid *tid,
3411     struct ath_buf *bf)
3412 {
3413 	struct ath_buf *nbf;
3414 	int retval;
3415 
3416 	ATH_TX_LOCK_ASSERT(sc);
3417 
3418 	/*
3419 	 * Don't allow a filtered frame to live forever.
3420 	 */
3421 	if (bf->bf_state.bfs_retries > SWMAX_RETRIES) {
3422 		sc->sc_stats.ast_tx_swretrymax++;
3423 		DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3424 		    "%s: bf=%p, seqno=%d, exceeded retries\n",
3425 		    __func__,
3426 		    bf,
3427 		    SEQNO(bf->bf_state.bfs_seqno));
3428 		retval = 1; /* error */
3429 		goto finish;
3430 	}
3431 
3432 	/*
3433 	 * A busy buffer can't be added to the retry list.
3434 	 * It needs to be cloned.
3435 	 */
3436 	if (bf->bf_flags & ATH_BUF_BUSY) {
3437 		nbf = ath_tx_retry_clone(sc, tid->an, tid, bf);
3438 		DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3439 		    "%s: busy buffer clone: %p -> %p\n",
3440 		    __func__, bf, nbf);
3441 	} else {
3442 		nbf = bf;
3443 	}
3444 
3445 	if (nbf == NULL) {
3446 		DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3447 		    "%s: busy buffer couldn't be cloned (%p)!\n",
3448 		    __func__, bf);
3449 		retval = 1; /* error */
3450 	} else {
3451 		ath_tx_tid_filt_comp_buf(sc, tid, nbf);
3452 		retval = 0; /* ok */
3453 	}
3454 finish:
3455 	ath_tx_tid_filt_comp_complete(sc, tid);
3456 
3457 	return (retval);
3458 }
3459 
3460 static void
3461 ath_tx_tid_filt_comp_aggr(struct ath_softc *sc, struct ath_tid *tid,
3462     struct ath_buf *bf_first, ath_bufhead *bf_q)
3463 {
3464 	struct ath_buf *bf, *bf_next, *nbf;
3465 
3466 	ATH_TX_LOCK_ASSERT(sc);
3467 
3468 	bf = bf_first;
3469 	while (bf) {
3470 		bf_next = bf->bf_next;
3471 		bf->bf_next = NULL;	/* Remove it from the aggr list */
3472 
3473 		/*
3474 		 * Don't allow a filtered frame to live forever.
3475 		 */
3476 		if (bf->bf_state.bfs_retries > SWMAX_RETRIES) {
3477 			sc->sc_stats.ast_tx_swretrymax++;
3478 			DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3479 			    "%s: tid=%d, bf=%p, seqno=%d, exceeded retries\n",
3480 			    __func__,
3481 			    tid->tid,
3482 			    bf,
3483 			    SEQNO(bf->bf_state.bfs_seqno));
3484 			TAILQ_INSERT_TAIL(bf_q, bf, bf_list);
3485 			goto next;
3486 		}
3487 
3488 		if (bf->bf_flags & ATH_BUF_BUSY) {
3489 			nbf = ath_tx_retry_clone(sc, tid->an, tid, bf);
3490 			DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3491 			    "%s: tid=%d, busy buffer cloned: %p -> %p, seqno=%d\n",
3492 			    __func__, tid->tid, bf, nbf, SEQNO(bf->bf_state.bfs_seqno));
3493 		} else {
3494 			nbf = bf;
3495 		}
3496 
3497 		/*
3498 		 * If the buffer couldn't be cloned, add it to bf_q;
3499 		 * the caller will free the buffer(s) as required.
3500 		 */
3501 		if (nbf == NULL) {
3502 			DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3503 			    "%s: tid=%d, buffer couldn't be cloned! (%p) seqno=%d\n",
3504 			    __func__, tid->tid, bf, SEQNO(bf->bf_state.bfs_seqno));
3505 			TAILQ_INSERT_TAIL(bf_q, bf, bf_list);
3506 		} else {
3507 			ath_tx_tid_filt_comp_buf(sc, tid, nbf);
3508 		}
3509 next:
3510 		bf = bf_next;
3511 	}
3512 
3513 	ath_tx_tid_filt_comp_complete(sc, tid);
3514 }
3515 
3516 /*
3517  * Suspend the queue because we need to TX a BAR.
3518  */
3519 static void
3520 ath_tx_tid_bar_suspend(struct ath_softc *sc, struct ath_tid *tid)
3521 {
3522 
3523 	ATH_TX_LOCK_ASSERT(sc);
3524 
3525 	DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3526 	    "%s: tid=%d, bar_wait=%d, bar_tx=%d, called\n",
3527 	    __func__,
3528 	    tid->tid,
3529 	    tid->bar_wait,
3530 	    tid->bar_tx);
3531 
3532 	/* We shouldn't be called when bar_tx is 1 */
3533 	if (tid->bar_tx) {
3534 		DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3535 		    "%s: bar_tx is 1?!\n", __func__);
3536 	}
3537 
3538 	/* If we've already been called, just be patient. */
3539 	if (tid->bar_wait)
3540 		return;
3541 
3542 	/* Wait! */
3543 	tid->bar_wait = 1;
3544 
3545 	/* Only one pause, no matter how many frames fail */
3546 	ath_tx_tid_pause(sc, tid);
3547 }
3548 
3549 /*
3550  * We've finished with BAR handling - either we succeeded or
3551  * failed. Either way, unsuspend TX.
3552  */
3553 static void
3554 ath_tx_tid_bar_unsuspend(struct ath_softc *sc, struct ath_tid *tid)
3555 {
3556 
3557 	ATH_TX_LOCK_ASSERT(sc);
3558 
3559 	DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3560 	    "%s: %6D: TID=%d, called\n",
3561 	    __func__,
3562 	    tid->an->an_node.ni_macaddr,
3563 	    ":",
3564 	    tid->tid);
3565 
3566 	if (tid->bar_tx == 0 || tid->bar_wait == 0) {
3567 		DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3568 		    "%s: %6D: TID=%d, bar_tx=%d, bar_wait=%d: ?\n",
3569 		    __func__, tid->an->an_node.ni_macaddr, ":",
3570 		    tid->tid, tid->bar_tx, tid->bar_wait);
3571 	}
3572 
3573 	tid->bar_tx = tid->bar_wait = 0;
3574 	ath_tx_tid_resume(sc, tid);
3575 }
3576 
3577 /*
3578  * Return whether we're ready to TX a BAR frame.
3579  *
3580  * Requires the TID lock be held.
3581  */
3582 static int
3583 ath_tx_tid_bar_tx_ready(struct ath_softc *sc, struct ath_tid *tid)
3584 {
3585 
3586 	ATH_TX_LOCK_ASSERT(sc);
3587 
3588 	if (tid->bar_wait == 0 || tid->hwq_depth > 0)
3589 		return (0);
3590 
3591 	DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3592 	    "%s: %6D: TID=%d, bar ready\n",
3593 	    __func__,
3594 	    tid->an->an_node.ni_macaddr,
3595 	    ":",
3596 	    tid->tid);
3597 
3598 	return (1);
3599 }
3600 
3601 /*
3602  * Check whether the current TID is ready to have a BAR
3603  * TXed and if so, do the TX.
3604  *
3605  * Since the TID/TXQ lock can't be held during a call to
3606  * ieee80211_send_bar(), we have to do the dirty thing of unlocking it,
3607  * sending the BAR and locking it again.
3608  *
3609  * Eventually, the code to send the BAR should be broken out
3610  * from this routine so the lock doesn't have to be reacquired
3611  * just to be immediately dropped by the caller.
3612  */
3613 static void
3614 ath_tx_tid_bar_tx(struct ath_softc *sc, struct ath_tid *tid)
3615 {
3616 	struct ieee80211_tx_ampdu *tap;
3617 
3618 	ATH_TX_LOCK_ASSERT(sc);
3619 
3620 	DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3621 	    "%s: %6D: TID=%d, called\n",
3622 	    __func__,
3623 	    tid->an->an_node.ni_macaddr,
3624 	    ":",
3625 	    tid->tid);
3626 
3627 	tap = ath_tx_get_tx_tid(tid->an, tid->tid);
3628 
3629 	/*
3630 	 * This is an error condition!
3631 	 */
3632 	if (tid->bar_wait == 0 || tid->bar_tx == 1) {
3633 		DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3634 		    "%s: %6D: TID=%d, bar_tx=%d, bar_wait=%d: ?\n",
3635 		    __func__, tid->an->an_node.ni_macaddr, ":",
3636 		    tid->tid, tid->bar_tx, tid->bar_wait);
3637 		return;
3638 	}
3639 
3640 	/* Don't do anything if we still have pending frames */
3641 	if (tid->hwq_depth > 0) {
3642 		DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3643 		    "%s: %6D: TID=%d, hwq_depth=%d, waiting\n",
3644 		    __func__,
3645 		    tid->an->an_node.ni_macaddr,
3646 		    ":",
3647 		    tid->tid,
3648 		    tid->hwq_depth);
3649 		return;
3650 	}
3651 
3652 	/* We're now about to TX */
3653 	tid->bar_tx = 1;
3654 
3655 	/*
3656 	 * Override the clrdmask configuration for the next frame,
3657 	 * just to get the ball rolling.
3658 	 */
3659 	ath_tx_set_clrdmask(sc, tid->an);
3660 
3661 	/*
3662 	 * Calculate new BAW left edge, now that all frames have either
3663 	 * succeeded or failed.
3664 	 *
3665 	 * XXX verify this is _actually_ the valid value to begin at!
3666 	 */
3667 	DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3668 	    "%s: %6D: TID=%d, new BAW left edge=%d\n",
3669 	    __func__,
3670 	    tid->an->an_node.ni_macaddr,
3671 	    ":",
3672 	    tid->tid,
3673 	    tap->txa_start);
3674 
3675 	/* Try sending the BAR frame */
3676 	/* We can't hold the lock here! */
3677 
3678 	ATH_TX_UNLOCK(sc);
3679 	if (ieee80211_send_bar(&tid->an->an_node, tap, tap->txa_start) == 0) {
3680 		/* Success? Now we wait for notification that it's done */
3681 		ATH_TX_LOCK(sc);
3682 		return;
3683 	}
3684 
3685 	/* Failure? For now, warn loudly and continue */
3686 	ATH_TX_LOCK(sc);
3687 	DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3688 	    "%s: %6D: TID=%d, failed to TX BAR, continue!\n",
3689 	    __func__, tid->an->an_node.ni_macaddr, ":",
3690 	    tid->tid);
3691 	ath_tx_tid_bar_unsuspend(sc, tid);
3692 }
3693 
3694 static void
3695 ath_tx_tid_drain_pkt(struct ath_softc *sc, struct ath_node *an,
3696     struct ath_tid *tid, ath_bufhead *bf_cq, struct ath_buf *bf)
3697 {
3698 
3699 	ATH_TX_LOCK_ASSERT(sc);
3700 
3701 	/*
3702 	 * If the current TID is running AMPDU, update
3703 	 * the BAW.
3704 	 */
3705 	if (ath_tx_ampdu_running(sc, an, tid->tid) &&
3706 	    bf->bf_state.bfs_dobaw) {
3707 		/*
3708 		 * Only remove the frame from the BAW if it's
3709 		 * been transmitted at least once; this means
3710 		 * the frame was in the BAW to begin with.
3711 		 */
3712 		if (bf->bf_state.bfs_retries > 0) {
3713 			ath_tx_update_baw(sc, an, tid, bf);
3714 			bf->bf_state.bfs_dobaw = 0;
3715 		}
3716 #if 0
3717 		/*
3718 		 * This has become a non-fatal error now
3719 		 */
3720 		if (! bf->bf_state.bfs_addedbaw)
3721 			DPRINTF(sc, ATH_DEBUG_SW_TX_BAW
3722 			    "%s: wasn't added: seqno %d\n",
3723 			    __func__, SEQNO(bf->bf_state.bfs_seqno));
3724 #endif
3725 	}
3726 
3727 	/* Strip it out of an aggregate list if it was in one */
3728 	bf->bf_next = NULL;
3729 
3730 	/* Insert on the free queue to be freed by the caller */
3731 	TAILQ_INSERT_TAIL(bf_cq, bf, bf_list);
3732 }
3733 
3734 static void
3735 ath_tx_tid_drain_print(struct ath_softc *sc, struct ath_node *an,
3736     const char *pfx, struct ath_tid *tid, struct ath_buf *bf)
3737 {
3738 	struct ieee80211_node *ni = &an->an_node;
3739 	struct ath_txq *txq;
3740 	struct ieee80211_tx_ampdu *tap;
3741 
3742 	txq = sc->sc_ac2q[tid->ac];
3743 	tap = ath_tx_get_tx_tid(an, tid->tid);
3744 
3745 	DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET,
3746 	    "%s: %s: %6D: bf=%p: addbaw=%d, dobaw=%d, "
3747 	    "seqno=%d, retry=%d\n",
3748 	    __func__,
3749 	    pfx,
3750 	    ni->ni_macaddr,
3751 	    ":",
3752 	    bf,
3753 	    bf->bf_state.bfs_addedbaw,
3754 	    bf->bf_state.bfs_dobaw,
3755 	    SEQNO(bf->bf_state.bfs_seqno),
3756 	    bf->bf_state.bfs_retries);
3757 	DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET,
3758 	    "%s: %s: %6D: bf=%p: txq[%d] axq_depth=%d, axq_aggr_depth=%d\n",
3759 	    __func__,
3760 	    pfx,
3761 	    ni->ni_macaddr,
3762 	    ":",
3763 	    bf,
3764 	    txq->axq_qnum,
3765 	    txq->axq_depth,
3766 	    txq->axq_aggr_depth);
3767 	DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET,
3768 	    "%s: %s: %6D: bf=%p: tid txq_depth=%d hwq_depth=%d, bar_wait=%d, "
3769 	      "isfiltered=%d\n",
3770 	    __func__,
3771 	    pfx,
3772 	    ni->ni_macaddr,
3773 	    ":",
3774 	    bf,
3775 	    tid->axq_depth,
3776 	    tid->hwq_depth,
3777 	    tid->bar_wait,
3778 	    tid->isfiltered);
3779 	DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET,
3780 	    "%s: %s: %6D: tid %d: "
3781 	    "sched=%d, paused=%d, "
3782 	    "incomp=%d, baw_head=%d, "
3783 	    "baw_tail=%d txa_start=%d, ni_txseqs=%d\n",
3784 	     __func__,
3785 	     pfx,
3786 	     ni->ni_macaddr,
3787 	     ":",
3788 	     tid->tid,
3789 	     tid->sched, tid->paused,
3790 	     tid->incomp, tid->baw_head,
3791 	     tid->baw_tail, tap == NULL ? -1 : tap->txa_start,
3792 	     ni->ni_txseqs[tid->tid]);
3793 
3794 	/* XXX Dump the frame, see what it is? */
3795 	if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT))
3796 		ieee80211_dump_pkt(ni->ni_ic,
3797 		    mtod(bf->bf_m, const uint8_t *),
3798 		    bf->bf_m->m_len, 0, -1);
3799 }
3800 
3801 /*
3802  * Free any packets currently pending in the software TX queue.
3803  *
3804  * This will be called when a node is being deleted.
3805  *
3806  * It can also be called on an active node during an interface
3807  * reset or state transition.
3808  *
3809  * (From Linux/reference):
3810  *
3811  * TODO: For frame(s) that are in the retry state, we will reuse the
3812  * sequence number(s) without setting the retry bit. The
3813  * alternative is to give up on these and BAR the receiver's window
3814  * forward.
3815  */
3816 static void
3817 ath_tx_tid_drain(struct ath_softc *sc, struct ath_node *an,
3818     struct ath_tid *tid, ath_bufhead *bf_cq)
3819 {
3820 	struct ath_buf *bf;
3821 	struct ieee80211_tx_ampdu *tap;
3822 	struct ieee80211_node *ni = &an->an_node;
3823 	int t;
3824 
3825 	tap = ath_tx_get_tx_tid(an, tid->tid);
3826 
3827 	ATH_TX_LOCK_ASSERT(sc);
3828 
3829 	/* Walk the queue, free frames */
3830 	t = 0;
3831 	for (;;) {
3832 		bf = ATH_TID_FIRST(tid);
3833 		if (bf == NULL) {
3834 			break;
3835 		}
3836 
3837 		if (t == 0) {
3838 			ath_tx_tid_drain_print(sc, an, "norm", tid, bf);
3839 //			t = 1;
3840 		}
3841 
3842 		ATH_TID_REMOVE(tid, bf, bf_list);
3843 		ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf);
3844 	}
3845 
3846 	/* And now, drain the filtered frame queue */
3847 	t = 0;
3848 	for (;;) {
3849 		bf = ATH_TID_FILT_FIRST(tid);
3850 		if (bf == NULL)
3851 			break;
3852 
3853 		if (t == 0) {
3854 			ath_tx_tid_drain_print(sc, an, "filt", tid, bf);
3855 //			t = 1;
3856 		}
3857 
3858 		ATH_TID_FILT_REMOVE(tid, bf, bf_list);
3859 		ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf);
3860 	}
3861 
3862 	/*
3863 	 * Override the clrdmask configuration for the next frame
3864 	 * in case there is some future transmission, just to get
3865 	 * the ball rolling.
3866 	 *
3867 	 * This won't hurt things if the TID is about to be freed.
3868 	 */
3869 	ath_tx_set_clrdmask(sc, tid->an);
3870 
3871 	/*
3872 	 * Now that it's completed, grab the TID lock and update
3873 	 * the sequence number and BAW window.
3874 	 * Because sequence numbers have been assigned to frames
3875 	 * that haven't been sent yet, it's entirely possible
3876 	 * we'll be called with some pending frames that have not
3877 	 * been transmitted.
3878 	 *
3879 	 * The cleaner solution is to do the sequence number allocation
3880 	 * when the packet is first transmitted - and thus the "retries"
3881 	 * check above would be enough to update the BAW/seqno.
3882 	 */
3883 
3884 	/* But don't do it for non-QoS TIDs */
3885 	if (tap) {
3886 #if 1
3887 		DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
3888 		    "%s: %6D: node %p: TID %d: sliding BAW left edge to %d\n",
3889 		    __func__,
3890 		    ni->ni_macaddr,
3891 		    ":",
3892 		    an,
3893 		    tid->tid,
3894 		    tap->txa_start);
3895 #endif
3896 		ni->ni_txseqs[tid->tid] = tap->txa_start;
3897 		tid->baw_tail = tid->baw_head;
3898 	}
3899 }
3900 
3901 /*
3902  * Reset the TID state.  This must be only called once the node has
3903  * had its frames flushed from this TID, to ensure that no other
3904  * pause / unpause logic can kick in.
3905  */
3906 static void
3907 ath_tx_tid_reset(struct ath_softc *sc, struct ath_tid *tid)
3908 {
3909 
3910 #if 0
3911 	tid->bar_wait = tid->bar_tx = tid->isfiltered = 0;
3912 	tid->paused = tid->sched = tid->addba_tx_pending = 0;
3913 	tid->incomp = tid->cleanup_inprogress = 0;
3914 #endif
3915 
3916 	/*
3917 	 * If we have a bar_wait set, we need to unpause the TID
3918 	 * here.  Otherwise once cleanup has finished, the TID won't
3919 	 * have the right paused counter.
3920 	 *
3921 	 * XXX I'm not going through resume here - I don't want the
3922 	 * node to be rescheuled just yet.  This however should be
3923 	 * methodized!
3924 	 */
3925 	if (tid->bar_wait) {
3926 		if (tid->paused > 0) {
3927 			tid->paused --;
3928 		}
3929 	}
3930 
3931 	/*
3932 	 * XXX same with a currently filtered TID.
3933 	 *
3934 	 * Since this is being called during a flush, we assume that
3935 	 * the filtered frame list is actually empty.
3936 	 *
3937 	 * XXX TODO: add in a check to ensure that the filtered queue
3938 	 * depth is actually 0!
3939 	 */
3940 	if (tid->isfiltered) {
3941 		if (tid->paused > 0) {
3942 			tid->paused --;
3943 		}
3944 	}
3945 
3946 	/*
3947 	 * Clear BAR, filtered frames, scheduled and ADDBA pending.
3948 	 * The TID may be going through cleanup from the last association
3949 	 * where things in the BAW are still in the hardware queue.
3950 	 */
3951 	tid->bar_wait = 0;
3952 	tid->bar_tx = 0;
3953 	tid->isfiltered = 0;
3954 	tid->sched = 0;
3955 	tid->addba_tx_pending = 0;
3956 
3957 	/*
3958 	 * XXX TODO: it may just be enough to walk the HWQs and mark
3959 	 * frames for that node as non-aggregate; or mark the ath_node
3960 	 * with something that indicates that aggregation is no longer
3961 	 * occurring.  Then we can just toss the BAW complaints and
3962 	 * do a complete hard reset of state here - no pause, no
3963 	 * complete counter, etc.
3964 	 */
3965 
3966 }
3967 
3968 /*
3969  * Flush all software queued packets for the given node.
3970  *
3971  * This occurs when a completion handler frees the last buffer
3972  * for a node, and the node is thus freed. This causes the node
3973  * to be cleaned up, which ends up calling ath_tx_node_flush.
3974  */
3975 void
3976 ath_tx_node_flush(struct ath_softc *sc, struct ath_node *an)
3977 {
3978 	int tid;
3979 	ath_bufhead bf_cq;
3980 	struct ath_buf *bf;
3981 
3982 	TAILQ_INIT(&bf_cq);
3983 
3984 	ATH_KTR(sc, ATH_KTR_NODE, 1, "ath_tx_node_flush: flush node; ni=%p",
3985 	    &an->an_node);
3986 
3987 	ATH_TX_LOCK(sc);
3988 	DPRINTF(sc, ATH_DEBUG_NODE,
3989 	    "%s: %6D: flush; is_powersave=%d, stack_psq=%d, tim=%d, "
3990 	    "swq_depth=%d, clrdmask=%d, leak_count=%d\n",
3991 	    __func__,
3992 	    an->an_node.ni_macaddr,
3993 	    ":",
3994 	    an->an_is_powersave,
3995 	    an->an_stack_psq,
3996 	    an->an_tim_set,
3997 	    an->an_swq_depth,
3998 	    an->clrdmask,
3999 	    an->an_leak_count);
4000 
4001 	for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) {
4002 		struct ath_tid *atid = &an->an_tid[tid];
4003 
4004 		/* Free packets */
4005 		ath_tx_tid_drain(sc, an, atid, &bf_cq);
4006 
4007 		/* Remove this tid from the list of active tids */
4008 		ath_tx_tid_unsched(sc, atid);
4009 
4010 		/* Reset the per-TID pause, BAR, etc state */
4011 		ath_tx_tid_reset(sc, atid);
4012 	}
4013 
4014 	/*
4015 	 * Clear global leak count
4016 	 */
4017 	an->an_leak_count = 0;
4018 	ATH_TX_UNLOCK(sc);
4019 
4020 	/* Handle completed frames */
4021 	while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
4022 		TAILQ_REMOVE(&bf_cq, bf, bf_list);
4023 		ath_tx_default_comp(sc, bf, 0);
4024 	}
4025 }
4026 
4027 /*
4028  * Drain all the software TXQs currently with traffic queued.
4029  */
4030 void
4031 ath_tx_txq_drain(struct ath_softc *sc, struct ath_txq *txq)
4032 {
4033 	struct ath_tid *tid;
4034 	ath_bufhead bf_cq;
4035 	struct ath_buf *bf;
4036 
4037 	TAILQ_INIT(&bf_cq);
4038 	ATH_TX_LOCK(sc);
4039 
4040 	/*
4041 	 * Iterate over all active tids for the given txq,
4042 	 * flushing and unsched'ing them
4043 	 */
4044 	while (! TAILQ_EMPTY(&txq->axq_tidq)) {
4045 		tid = TAILQ_FIRST(&txq->axq_tidq);
4046 		ath_tx_tid_drain(sc, tid->an, tid, &bf_cq);
4047 		ath_tx_tid_unsched(sc, tid);
4048 	}
4049 
4050 	ATH_TX_UNLOCK(sc);
4051 
4052 	while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
4053 		TAILQ_REMOVE(&bf_cq, bf, bf_list);
4054 		ath_tx_default_comp(sc, bf, 0);
4055 	}
4056 }
4057 
4058 /*
4059  * Handle completion of non-aggregate session frames.
4060  *
4061  * This (currently) doesn't implement software retransmission of
4062  * non-aggregate frames!
4063  *
4064  * Software retransmission of non-aggregate frames needs to obey
4065  * the strict sequence number ordering, and drop any frames that
4066  * will fail this.
4067  *
4068  * For now, filtered frames and frame transmission will cause
4069  * all kinds of issues.  So we don't support them.
4070  *
4071  * So anyone queuing frames via ath_tx_normal_xmit() or
4072  * ath_tx_hw_queue_norm() must override and set CLRDMASK.
4073  */
4074 void
4075 ath_tx_normal_comp(struct ath_softc *sc, struct ath_buf *bf, int fail)
4076 {
4077 	struct ieee80211_node *ni = bf->bf_node;
4078 	struct ath_node *an = ATH_NODE(ni);
4079 	int tid = bf->bf_state.bfs_tid;
4080 	struct ath_tid *atid = &an->an_tid[tid];
4081 	struct ath_tx_status *ts = &bf->bf_status.ds_txstat;
4082 
4083 	/* The TID state is protected behind the TXQ lock */
4084 	ATH_TX_LOCK(sc);
4085 
4086 	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p: fail=%d, hwq_depth now %d\n",
4087 	    __func__, bf, fail, atid->hwq_depth - 1);
4088 
4089 	atid->hwq_depth--;
4090 
4091 #if 0
4092 	/*
4093 	 * If the frame was filtered, stick it on the filter frame
4094 	 * queue and complain about it.  It shouldn't happen!
4095 	 */
4096 	if ((ts->ts_status & HAL_TXERR_FILT) ||
4097 	    (ts->ts_status != 0 && atid->isfiltered)) {
4098 		DPRINTF(sc, ATH_DEBUG_SW_TX,
4099 		    "%s: isfiltered=%d, ts_status=%d: huh?\n",
4100 		    __func__,
4101 		    atid->isfiltered,
4102 		    ts->ts_status);
4103 		ath_tx_tid_filt_comp_buf(sc, atid, bf);
4104 	}
4105 #endif
4106 	if (atid->isfiltered)
4107 		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: filtered?!\n", __func__);
4108 	if (atid->hwq_depth < 0)
4109 		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: hwq_depth < 0: %d\n",
4110 		    __func__, atid->hwq_depth);
4111 
4112 	/* If the TID is being cleaned up, track things */
4113 	/* XXX refactor! */
4114 	if (atid->cleanup_inprogress) {
4115 		atid->incomp--;
4116 		if (atid->incomp == 0) {
4117 			DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
4118 			    "%s: TID %d: cleaned up! resume!\n",
4119 			    __func__, tid);
4120 			atid->cleanup_inprogress = 0;
4121 			ath_tx_tid_resume(sc, atid);
4122 		}
4123 	}
4124 
4125 	/*
4126 	 * If the queue is filtered, potentially mark it as complete
4127 	 * and reschedule it as needed.
4128 	 *
4129 	 * This is required as there may be a subsequent TX descriptor
4130 	 * for this end-node that has CLRDMASK set, so it's quite possible
4131 	 * that a filtered frame will be followed by a non-filtered
4132 	 * (complete or otherwise) frame.
4133 	 *
4134 	 * XXX should we do this before we complete the frame?
4135 	 */
4136 	if (atid->isfiltered)
4137 		ath_tx_tid_filt_comp_complete(sc, atid);
4138 	ATH_TX_UNLOCK(sc);
4139 
4140 	/*
4141 	 * punt to rate control if we're not being cleaned up
4142 	 * during a hw queue drain and the frame wanted an ACK.
4143 	 */
4144 	if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0))
4145 		ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc,
4146 		    ts, bf->bf_state.bfs_pktlen,
4147 		    1, (ts->ts_status == 0) ? 0 : 1);
4148 
4149 	ath_tx_default_comp(sc, bf, fail);
4150 }
4151 
4152 /*
4153  * Handle cleanup of aggregate session packets that aren't
4154  * an A-MPDU.
4155  *
4156  * There's no need to update the BAW here - the session is being
4157  * torn down.
4158  */
4159 static void
4160 ath_tx_comp_cleanup_unaggr(struct ath_softc *sc, struct ath_buf *bf)
4161 {
4162 	struct ieee80211_node *ni = bf->bf_node;
4163 	struct ath_node *an = ATH_NODE(ni);
4164 	int tid = bf->bf_state.bfs_tid;
4165 	struct ath_tid *atid = &an->an_tid[tid];
4166 
4167 	DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: TID %d: incomp=%d\n",
4168 	    __func__, tid, atid->incomp);
4169 
4170 	ATH_TX_LOCK(sc);
4171 	atid->incomp--;
4172 
4173 	/* XXX refactor! */
4174 	if (bf->bf_state.bfs_dobaw) {
4175 		ath_tx_update_baw(sc, an, atid, bf);
4176 		if (!bf->bf_state.bfs_addedbaw)
4177 			DPRINTF(sc, ATH_DEBUG_SW_TX,
4178 			    "%s: wasn't added: seqno %d\n",
4179 			    __func__, SEQNO(bf->bf_state.bfs_seqno));
4180 	}
4181 
4182 	if (atid->incomp == 0) {
4183 		DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
4184 		    "%s: TID %d: cleaned up! resume!\n",
4185 		    __func__, tid);
4186 		atid->cleanup_inprogress = 0;
4187 		ath_tx_tid_resume(sc, atid);
4188 	}
4189 	ATH_TX_UNLOCK(sc);
4190 
4191 	ath_tx_default_comp(sc, bf, 0);
4192 }
4193 
4194 
4195 /*
4196  * This as it currently stands is a bit dumb.  Ideally we'd just
4197  * fail the frame the normal way and have it permanently fail
4198  * via the normal aggregate completion path.
4199  */
4200 static void
4201 ath_tx_tid_cleanup_frame(struct ath_softc *sc, struct ath_node *an,
4202     int tid, struct ath_buf *bf_head, ath_bufhead *bf_cq)
4203 {
4204 	struct ath_tid *atid = &an->an_tid[tid];
4205 	struct ath_buf *bf, *bf_next;
4206 
4207 	ATH_TX_LOCK_ASSERT(sc);
4208 
4209 	/*
4210 	 * Remove this frame from the queue.
4211 	 */
4212 	ATH_TID_REMOVE(atid, bf_head, bf_list);
4213 
4214 	/*
4215 	 * Loop over all the frames in the aggregate.
4216 	 */
4217 	bf = bf_head;
4218 	while (bf != NULL) {
4219 		bf_next = bf->bf_next;	/* next aggregate frame, or NULL */
4220 
4221 		/*
4222 		 * If it's been added to the BAW we need to kick
4223 		 * it out of the BAW before we continue.
4224 		 *
4225 		 * XXX if it's an aggregate, assert that it's in the
4226 		 * BAW - we shouldn't have it be in an aggregate
4227 		 * otherwise!
4228 		 */
4229 		if (bf->bf_state.bfs_addedbaw) {
4230 			ath_tx_update_baw(sc, an, atid, bf);
4231 			bf->bf_state.bfs_dobaw = 0;
4232 		}
4233 
4234 		/*
4235 		 * Give it the default completion handler.
4236 		 */
4237 		bf->bf_comp = ath_tx_normal_comp;
4238 		bf->bf_next = NULL;
4239 
4240 		/*
4241 		 * Add it to the list to free.
4242 		 */
4243 		TAILQ_INSERT_TAIL(bf_cq, bf, bf_list);
4244 
4245 		/*
4246 		 * Now advance to the next frame in the aggregate.
4247 		 */
4248 		bf = bf_next;
4249 	}
4250 }
4251 
4252 /*
4253  * Performs transmit side cleanup when TID changes from aggregated to
4254  * unaggregated and during reassociation.
4255  *
4256  * For now, this just tosses everything from the TID software queue
4257  * whether or not it has been retried and marks the TID as
4258  * pending completion if there's anything for this TID queued to
4259  * the hardware.
4260  *
4261  * The caller is responsible for pausing the TID and unpausing the
4262  * TID if no cleanup was required. Otherwise the cleanup path will
4263  * unpause the TID once the last hardware queued frame is completed.
4264  */
4265 static void
4266 ath_tx_tid_cleanup(struct ath_softc *sc, struct ath_node *an, int tid,
4267     ath_bufhead *bf_cq)
4268 {
4269 	struct ath_tid *atid = &an->an_tid[tid];
4270 	struct ath_buf *bf, *bf_next;
4271 
4272 	ATH_TX_LOCK_ASSERT(sc);
4273 
4274 	DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
4275 	    "%s: TID %d: called; inprogress=%d\n", __func__, tid,
4276 	    atid->cleanup_inprogress);
4277 
4278 	/*
4279 	 * Move the filtered frames to the TX queue, before
4280 	 * we run off and discard/process things.
4281 	 */
4282 
4283 	/* XXX this is really quite inefficient */
4284 	while ((bf = ATH_TID_FILT_LAST(atid, ath_bufhead_s)) != NULL) {
4285 		ATH_TID_FILT_REMOVE(atid, bf, bf_list);
4286 		ATH_TID_INSERT_HEAD(atid, bf, bf_list);
4287 	}
4288 
4289 	/*
4290 	 * Update the frames in the software TX queue:
4291 	 *
4292 	 * + Discard retry frames in the queue
4293 	 * + Fix the completion function to be non-aggregate
4294 	 */
4295 	bf = ATH_TID_FIRST(atid);
4296 	while (bf) {
4297 		/*
4298 		 * Grab the next frame in the list, we may
4299 		 * be fiddling with the list.
4300 		 */
4301 		bf_next = TAILQ_NEXT(bf, bf_list);
4302 
4303 		/*
4304 		 * Free the frame and all subframes.
4305 		 */
4306 		ath_tx_tid_cleanup_frame(sc, an, tid, bf, bf_cq);
4307 
4308 		/*
4309 		 * Next frame!
4310 		 */
4311 		bf = bf_next;
4312 	}
4313 
4314 	/*
4315 	 * If there's anything in the hardware queue we wait
4316 	 * for the TID HWQ to empty.
4317 	 */
4318 	if (atid->hwq_depth > 0) {
4319 		/*
4320 		 * XXX how about we kill atid->incomp, and instead
4321 		 * replace it with a macro that checks that atid->hwq_depth
4322 		 * is 0?
4323 		 */
4324 		atid->incomp = atid->hwq_depth;
4325 		atid->cleanup_inprogress = 1;
4326 	}
4327 
4328 	if (atid->cleanup_inprogress)
4329 		DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
4330 		    "%s: TID %d: cleanup needed: %d packets\n",
4331 		    __func__, tid, atid->incomp);
4332 
4333 	/* Owner now must free completed frames */
4334 }
4335 
4336 static struct ath_buf *
4337 ath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an,
4338     struct ath_tid *tid, struct ath_buf *bf)
4339 {
4340 	struct ath_buf *nbf;
4341 	int error;
4342 
4343 	/*
4344 	 * Clone the buffer.  This will handle the dma unmap and
4345 	 * copy the node reference to the new buffer.  If this
4346 	 * works out, 'bf' will have no DMA mapping, no mbuf
4347 	 * pointer and no node reference.
4348 	 */
4349 	nbf = ath_buf_clone(sc, bf);
4350 
4351 #if 0
4352 	DPRINTF(sc, ATH_DEBUG_XMIT, "%s: ATH_BUF_BUSY; cloning\n",
4353 	    __func__);
4354 #endif
4355 
4356 	if (nbf == NULL) {
4357 		/* Failed to clone */
4358 		DPRINTF(sc, ATH_DEBUG_XMIT,
4359 		    "%s: failed to clone a busy buffer\n",
4360 		    __func__);
4361 		return NULL;
4362 	}
4363 
4364 	/* Setup the dma for the new buffer */
4365 	error = ath_tx_dmasetup(sc, nbf, nbf->bf_m);
4366 	if (error != 0) {
4367 		DPRINTF(sc, ATH_DEBUG_XMIT,
4368 		    "%s: failed to setup dma for clone\n",
4369 		    __func__);
4370 		/*
4371 		 * Put this at the head of the list, not tail;
4372 		 * that way it doesn't interfere with the
4373 		 * busy buffer logic (which uses the tail of
4374 		 * the list.)
4375 		 */
4376 		ATH_TXBUF_LOCK(sc);
4377 		ath_returnbuf_head(sc, nbf);
4378 		ATH_TXBUF_UNLOCK(sc);
4379 		return NULL;
4380 	}
4381 
4382 	/* Update BAW if required, before we free the original buf */
4383 	if (bf->bf_state.bfs_dobaw)
4384 		ath_tx_switch_baw_buf(sc, an, tid, bf, nbf);
4385 
4386 	/* Free original buffer; return new buffer */
4387 	ath_freebuf(sc, bf);
4388 
4389 	return nbf;
4390 }
4391 
4392 /*
4393  * Handle retrying an unaggregate frame in an aggregate
4394  * session.
4395  *
4396  * If too many retries occur, pause the TID, wait for
4397  * any further retransmits (as there's no reason why
4398  * non-aggregate frames in an aggregate session are
4399  * transmitted in-order; they just have to be in-BAW)
4400  * and then queue a BAR.
4401  */
4402 static void
4403 ath_tx_aggr_retry_unaggr(struct ath_softc *sc, struct ath_buf *bf)
4404 {
4405 	struct ieee80211_node *ni = bf->bf_node;
4406 	struct ath_node *an = ATH_NODE(ni);
4407 	int tid = bf->bf_state.bfs_tid;
4408 	struct ath_tid *atid = &an->an_tid[tid];
4409 	struct ieee80211_tx_ampdu *tap;
4410 
4411 	ATH_TX_LOCK(sc);
4412 
4413 	tap = ath_tx_get_tx_tid(an, tid);
4414 
4415 	/*
4416 	 * If the buffer is marked as busy, we can't directly
4417 	 * reuse it. Instead, try to clone the buffer.
4418 	 * If the clone is successful, recycle the old buffer.
4419 	 * If the clone is unsuccessful, set bfs_retries to max
4420 	 * to force the next bit of code to free the buffer
4421 	 * for us.
4422 	 */
4423 	if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) &&
4424 	    (bf->bf_flags & ATH_BUF_BUSY)) {
4425 		struct ath_buf *nbf;
4426 		nbf = ath_tx_retry_clone(sc, an, atid, bf);
4427 		if (nbf)
4428 			/* bf has been freed at this point */
4429 			bf = nbf;
4430 		else
4431 			bf->bf_state.bfs_retries = SWMAX_RETRIES + 1;
4432 	}
4433 
4434 	if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) {
4435 		DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES,
4436 		    "%s: exceeded retries; seqno %d\n",
4437 		    __func__, SEQNO(bf->bf_state.bfs_seqno));
4438 		sc->sc_stats.ast_tx_swretrymax++;
4439 
4440 		/* Update BAW anyway */
4441 		if (bf->bf_state.bfs_dobaw) {
4442 			ath_tx_update_baw(sc, an, atid, bf);
4443 			if (! bf->bf_state.bfs_addedbaw)
4444 				DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
4445 				    "%s: wasn't added: seqno %d\n",
4446 				    __func__, SEQNO(bf->bf_state.bfs_seqno));
4447 		}
4448 		bf->bf_state.bfs_dobaw = 0;
4449 
4450 		/* Suspend the TX queue and get ready to send the BAR */
4451 		ath_tx_tid_bar_suspend(sc, atid);
4452 
4453 		/* Send the BAR if there are no other frames waiting */
4454 		if (ath_tx_tid_bar_tx_ready(sc, atid))
4455 			ath_tx_tid_bar_tx(sc, atid);
4456 
4457 		ATH_TX_UNLOCK(sc);
4458 
4459 		/* Free buffer, bf is free after this call */
4460 		ath_tx_default_comp(sc, bf, 0);
4461 		return;
4462 	}
4463 
4464 	/*
4465 	 * This increments the retry counter as well as
4466 	 * sets the retry flag in the ath_buf and packet
4467 	 * body.
4468 	 */
4469 	ath_tx_set_retry(sc, bf);
4470 	sc->sc_stats.ast_tx_swretries++;
4471 
4472 	/*
4473 	 * Insert this at the head of the queue, so it's
4474 	 * retried before any current/subsequent frames.
4475 	 */
4476 	ATH_TID_INSERT_HEAD(atid, bf, bf_list);
4477 	ath_tx_tid_sched(sc, atid);
4478 	/* Send the BAR if there are no other frames waiting */
4479 	if (ath_tx_tid_bar_tx_ready(sc, atid))
4480 		ath_tx_tid_bar_tx(sc, atid);
4481 
4482 	ATH_TX_UNLOCK(sc);
4483 }
4484 
4485 /*
4486  * Common code for aggregate excessive retry/subframe retry.
4487  * If retrying, queues buffers to bf_q. If not, frees the
4488  * buffers.
4489  *
4490  * XXX should unify this with ath_tx_aggr_retry_unaggr()
4491  */
4492 static int
4493 ath_tx_retry_subframe(struct ath_softc *sc, struct ath_buf *bf,
4494     ath_bufhead *bf_q)
4495 {
4496 	struct ieee80211_node *ni = bf->bf_node;
4497 	struct ath_node *an = ATH_NODE(ni);
4498 	int tid = bf->bf_state.bfs_tid;
4499 	struct ath_tid *atid = &an->an_tid[tid];
4500 
4501 	ATH_TX_LOCK_ASSERT(sc);
4502 
4503 	/* XXX clr11naggr should be done for all subframes */
4504 	ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc);
4505 	ath_hal_set11nburstduration(sc->sc_ah, bf->bf_desc, 0);
4506 
4507 	/* ath_hal_set11n_virtualmorefrag(sc->sc_ah, bf->bf_desc, 0); */
4508 
4509 	/*
4510 	 * If the buffer is marked as busy, we can't directly
4511 	 * reuse it. Instead, try to clone the buffer.
4512 	 * If the clone is successful, recycle the old buffer.
4513 	 * If the clone is unsuccessful, set bfs_retries to max
4514 	 * to force the next bit of code to free the buffer
4515 	 * for us.
4516 	 */
4517 	if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) &&
4518 	    (bf->bf_flags & ATH_BUF_BUSY)) {
4519 		struct ath_buf *nbf;
4520 		nbf = ath_tx_retry_clone(sc, an, atid, bf);
4521 		if (nbf)
4522 			/* bf has been freed at this point */
4523 			bf = nbf;
4524 		else
4525 			bf->bf_state.bfs_retries = SWMAX_RETRIES + 1;
4526 	}
4527 
4528 	if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) {
4529 		sc->sc_stats.ast_tx_swretrymax++;
4530 		DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES,
4531 		    "%s: max retries: seqno %d\n",
4532 		    __func__, SEQNO(bf->bf_state.bfs_seqno));
4533 		ath_tx_update_baw(sc, an, atid, bf);
4534 		if (!bf->bf_state.bfs_addedbaw)
4535 			DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
4536 			    "%s: wasn't added: seqno %d\n",
4537 			    __func__, SEQNO(bf->bf_state.bfs_seqno));
4538 		bf->bf_state.bfs_dobaw = 0;
4539 		return 1;
4540 	}
4541 
4542 	ath_tx_set_retry(sc, bf);
4543 	sc->sc_stats.ast_tx_swretries++;
4544 	bf->bf_next = NULL;		/* Just to make sure */
4545 
4546 	/* Clear the aggregate state */
4547 	bf->bf_state.bfs_aggr = 0;
4548 	bf->bf_state.bfs_ndelim = 0;	/* ??? needed? */
4549 	bf->bf_state.bfs_nframes = 1;
4550 
4551 	TAILQ_INSERT_TAIL(bf_q, bf, bf_list);
4552 	return 0;
4553 }
4554 
4555 /*
4556  * error pkt completion for an aggregate destination
4557  */
4558 static void
4559 ath_tx_comp_aggr_error(struct ath_softc *sc, struct ath_buf *bf_first,
4560     struct ath_tid *tid)
4561 {
4562 	struct ieee80211_node *ni = bf_first->bf_node;
4563 	struct ath_node *an = ATH_NODE(ni);
4564 	struct ath_buf *bf_next, *bf;
4565 	ath_bufhead bf_q;
4566 	int drops = 0;
4567 	struct ieee80211_tx_ampdu *tap;
4568 	ath_bufhead bf_cq;
4569 
4570 	TAILQ_INIT(&bf_q);
4571 	TAILQ_INIT(&bf_cq);
4572 
4573 	/*
4574 	 * Update rate control - all frames have failed.
4575 	 *
4576 	 * XXX use the length in the first frame in the series;
4577 	 * XXX just so things are consistent for now.
4578 	 */
4579 	ath_tx_update_ratectrl(sc, ni, bf_first->bf_state.bfs_rc,
4580 	    &bf_first->bf_status.ds_txstat,
4581 	    bf_first->bf_state.bfs_pktlen,
4582 	    bf_first->bf_state.bfs_nframes, bf_first->bf_state.bfs_nframes);
4583 
4584 	ATH_TX_LOCK(sc);
4585 	tap = ath_tx_get_tx_tid(an, tid->tid);
4586 	sc->sc_stats.ast_tx_aggr_failall++;
4587 
4588 	/* Retry all subframes */
4589 	bf = bf_first;
4590 	while (bf) {
4591 		bf_next = bf->bf_next;
4592 		bf->bf_next = NULL;	/* Remove it from the aggr list */
4593 		sc->sc_stats.ast_tx_aggr_fail++;
4594 		if (ath_tx_retry_subframe(sc, bf, &bf_q)) {
4595 			drops++;
4596 			bf->bf_next = NULL;
4597 			TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list);
4598 		}
4599 		bf = bf_next;
4600 	}
4601 
4602 	/* Prepend all frames to the beginning of the queue */
4603 	while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) {
4604 		TAILQ_REMOVE(&bf_q, bf, bf_list);
4605 		ATH_TID_INSERT_HEAD(tid, bf, bf_list);
4606 	}
4607 
4608 	/*
4609 	 * Schedule the TID to be re-tried.
4610 	 */
4611 	ath_tx_tid_sched(sc, tid);
4612 
4613 	/*
4614 	 * send bar if we dropped any frames
4615 	 *
4616 	 * Keep the txq lock held for now, as we need to ensure
4617 	 * that ni_txseqs[] is consistent (as it's being updated
4618 	 * in the ifnet TX context or raw TX context.)
4619 	 */
4620 	if (drops) {
4621 		/* Suspend the TX queue and get ready to send the BAR */
4622 		ath_tx_tid_bar_suspend(sc, tid);
4623 	}
4624 
4625 	/*
4626 	 * Send BAR if required
4627 	 */
4628 	if (ath_tx_tid_bar_tx_ready(sc, tid))
4629 		ath_tx_tid_bar_tx(sc, tid);
4630 
4631 	ATH_TX_UNLOCK(sc);
4632 
4633 	/* Complete frames which errored out */
4634 	while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
4635 		TAILQ_REMOVE(&bf_cq, bf, bf_list);
4636 		ath_tx_default_comp(sc, bf, 0);
4637 	}
4638 }
4639 
4640 /*
4641  * Handle clean-up of packets from an aggregate list.
4642  *
4643  * There's no need to update the BAW here - the session is being
4644  * torn down.
4645  */
4646 static void
4647 ath_tx_comp_cleanup_aggr(struct ath_softc *sc, struct ath_buf *bf_first)
4648 {
4649 	struct ath_buf *bf, *bf_next;
4650 	struct ieee80211_node *ni = bf_first->bf_node;
4651 	struct ath_node *an = ATH_NODE(ni);
4652 	int tid = bf_first->bf_state.bfs_tid;
4653 	struct ath_tid *atid = &an->an_tid[tid];
4654 
4655 	ATH_TX_LOCK(sc);
4656 
4657 	/* update incomp */
4658 	atid->incomp--;
4659 
4660 	/* Update the BAW */
4661 	bf = bf_first;
4662 	while (bf) {
4663 		/* XXX refactor! */
4664 		if (bf->bf_state.bfs_dobaw) {
4665 			ath_tx_update_baw(sc, an, atid, bf);
4666 			if (!bf->bf_state.bfs_addedbaw)
4667 				DPRINTF(sc, ATH_DEBUG_SW_TX,
4668 				    "%s: wasn't added: seqno %d\n",
4669 				    __func__, SEQNO(bf->bf_state.bfs_seqno));
4670 		}
4671 		bf = bf->bf_next;
4672 	}
4673 
4674 	if (atid->incomp == 0) {
4675 		DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
4676 		    "%s: TID %d: cleaned up! resume!\n",
4677 		    __func__, tid);
4678 		atid->cleanup_inprogress = 0;
4679 		ath_tx_tid_resume(sc, atid);
4680 	}
4681 
4682 	/* Send BAR if required */
4683 	/* XXX why would we send a BAR when transitioning to non-aggregation? */
4684 	/*
4685 	 * XXX TODO: we should likely just tear down the BAR state here,
4686 	 * rather than sending a BAR.
4687 	 */
4688 	if (ath_tx_tid_bar_tx_ready(sc, atid))
4689 		ath_tx_tid_bar_tx(sc, atid);
4690 
4691 	ATH_TX_UNLOCK(sc);
4692 
4693 	/* Handle frame completion as individual frames */
4694 	bf = bf_first;
4695 	while (bf) {
4696 		bf_next = bf->bf_next;
4697 		bf->bf_next = NULL;
4698 		ath_tx_default_comp(sc, bf, 1);
4699 		bf = bf_next;
4700 	}
4701 }
4702 
4703 /*
4704  * Handle completion of an set of aggregate frames.
4705  *
4706  * Note: the completion handler is the last descriptor in the aggregate,
4707  * not the last descriptor in the first frame.
4708  */
4709 static void
4710 ath_tx_aggr_comp_aggr(struct ath_softc *sc, struct ath_buf *bf_first,
4711     int fail)
4712 {
4713 	//struct ath_desc *ds = bf->bf_lastds;
4714 	struct ieee80211_node *ni = bf_first->bf_node;
4715 	struct ath_node *an = ATH_NODE(ni);
4716 	int tid = bf_first->bf_state.bfs_tid;
4717 	struct ath_tid *atid = &an->an_tid[tid];
4718 	struct ath_tx_status ts;
4719 	struct ieee80211_tx_ampdu *tap;
4720 	ath_bufhead bf_q;
4721 	ath_bufhead bf_cq;
4722 	int seq_st, tx_ok;
4723 	int hasba, isaggr;
4724 	uint32_t ba[2];
4725 	struct ath_buf *bf, *bf_next;
4726 	int ba_index;
4727 	int drops = 0;
4728 	int nframes = 0, nbad = 0, nf;
4729 	int pktlen;
4730 	/* XXX there's too much on the stack? */
4731 	struct ath_rc_series rc[ATH_RC_NUM];
4732 	int txseq;
4733 
4734 	DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: called; hwq_depth=%d\n",
4735 	    __func__, atid->hwq_depth);
4736 
4737 	/*
4738 	 * Take a copy; this may be needed -after- bf_first
4739 	 * has been completed and freed.
4740 	 */
4741 	ts = bf_first->bf_status.ds_txstat;
4742 
4743 	TAILQ_INIT(&bf_q);
4744 	TAILQ_INIT(&bf_cq);
4745 
4746 	/* The TID state is kept behind the TXQ lock */
4747 	ATH_TX_LOCK(sc);
4748 
4749 	atid->hwq_depth--;
4750 	if (atid->hwq_depth < 0)
4751 		DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: hwq_depth < 0: %d\n",
4752 		    __func__, atid->hwq_depth);
4753 
4754 	/*
4755 	 * If the TID is filtered, handle completing the filter
4756 	 * transition before potentially kicking it to the cleanup
4757 	 * function.
4758 	 *
4759 	 * XXX this is duplicate work, ew.
4760 	 */
4761 	if (atid->isfiltered)
4762 		ath_tx_tid_filt_comp_complete(sc, atid);
4763 
4764 	/*
4765 	 * Punt cleanup to the relevant function, not our problem now
4766 	 */
4767 	if (atid->cleanup_inprogress) {
4768 		if (atid->isfiltered)
4769 			DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4770 			    "%s: isfiltered=1, normal_comp?\n",
4771 			    __func__);
4772 		ATH_TX_UNLOCK(sc);
4773 		ath_tx_comp_cleanup_aggr(sc, bf_first);
4774 		return;
4775 	}
4776 
4777 	/*
4778 	 * If the frame is filtered, transition to filtered frame
4779 	 * mode and add this to the filtered frame list.
4780 	 *
4781 	 * XXX TODO: figure out how this interoperates with
4782 	 * BAR, pause and cleanup states.
4783 	 */
4784 	if ((ts.ts_status & HAL_TXERR_FILT) ||
4785 	    (ts.ts_status != 0 && atid->isfiltered)) {
4786 		if (fail != 0)
4787 			DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4788 			    "%s: isfiltered=1, fail=%d\n", __func__, fail);
4789 		ath_tx_tid_filt_comp_aggr(sc, atid, bf_first, &bf_cq);
4790 
4791 		/* Remove from BAW */
4792 		TAILQ_FOREACH_SAFE(bf, &bf_cq, bf_list, bf_next) {
4793 			if (bf->bf_state.bfs_addedbaw)
4794 				drops++;
4795 			if (bf->bf_state.bfs_dobaw) {
4796 				ath_tx_update_baw(sc, an, atid, bf);
4797 				if (!bf->bf_state.bfs_addedbaw)
4798 					DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4799 					    "%s: wasn't added: seqno %d\n",
4800 					    __func__,
4801 					    SEQNO(bf->bf_state.bfs_seqno));
4802 			}
4803 			bf->bf_state.bfs_dobaw = 0;
4804 		}
4805 		/*
4806 		 * If any intermediate frames in the BAW were dropped when
4807 		 * handling filtering things, send a BAR.
4808 		 */
4809 		if (drops)
4810 			ath_tx_tid_bar_suspend(sc, atid);
4811 
4812 		/*
4813 		 * Finish up by sending a BAR if required and freeing
4814 		 * the frames outside of the TX lock.
4815 		 */
4816 		goto finish_send_bar;
4817 	}
4818 
4819 	/*
4820 	 * XXX for now, use the first frame in the aggregate for
4821 	 * XXX rate control completion; it's at least consistent.
4822 	 */
4823 	pktlen = bf_first->bf_state.bfs_pktlen;
4824 
4825 	/*
4826 	 * Handle errors first!
4827 	 *
4828 	 * Here, handle _any_ error as a "exceeded retries" error.
4829 	 * Later on (when filtered frames are to be specially handled)
4830 	 * it'll have to be expanded.
4831 	 */
4832 #if 0
4833 	if (ts.ts_status & HAL_TXERR_XRETRY) {
4834 #endif
4835 	if (ts.ts_status != 0) {
4836 		ATH_TX_UNLOCK(sc);
4837 		ath_tx_comp_aggr_error(sc, bf_first, atid);
4838 		return;
4839 	}
4840 
4841 	tap = ath_tx_get_tx_tid(an, tid);
4842 
4843 	/*
4844 	 * extract starting sequence and block-ack bitmap
4845 	 */
4846 	/* XXX endian-ness of seq_st, ba? */
4847 	seq_st = ts.ts_seqnum;
4848 	hasba = !! (ts.ts_flags & HAL_TX_BA);
4849 	tx_ok = (ts.ts_status == 0);
4850 	isaggr = bf_first->bf_state.bfs_aggr;
4851 	ba[0] = ts.ts_ba_low;
4852 	ba[1] = ts.ts_ba_high;
4853 
4854 	/*
4855 	 * Copy the TX completion status and the rate control
4856 	 * series from the first descriptor, as it may be freed
4857 	 * before the rate control code can get its grubby fingers
4858 	 * into things.
4859 	 */
4860 	memcpy(rc, bf_first->bf_state.bfs_rc, sizeof(rc));
4861 
4862 	DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4863 	    "%s: txa_start=%d, tx_ok=%d, status=%.8x, flags=%.8x, "
4864 	    "isaggr=%d, seq_st=%d, hasba=%d, ba=%.8x, %.8x\n",
4865 	    __func__, tap->txa_start, tx_ok, ts.ts_status, ts.ts_flags,
4866 	    isaggr, seq_st, hasba, ba[0], ba[1]);
4867 
4868 	/*
4869 	 * The reference driver doesn't do this; it simply ignores
4870 	 * this check in its entirety.
4871 	 *
4872 	 * I've seen this occur when using iperf to send traffic
4873 	 * out tid 1 - the aggregate frames are all marked as TID 1,
4874 	 * but the TXSTATUS has TID=0.  So, let's just ignore this
4875 	 * check.
4876 	 */
4877 #if 0
4878 	/* Occasionally, the MAC sends a tx status for the wrong TID. */
4879 	if (tid != ts.ts_tid) {
4880 		DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: tid %d != hw tid %d\n",
4881 		    __func__, tid, ts.ts_tid);
4882 		tx_ok = 0;
4883 	}
4884 #endif
4885 
4886 	/* AR5416 BA bug; this requires an interface reset */
4887 	if (isaggr && tx_ok && (! hasba)) {
4888 		device_printf(sc->sc_dev,
4889 		    "%s: AR5416 bug: hasba=%d; txok=%d, isaggr=%d, "
4890 		    "seq_st=%d\n",
4891 		    __func__, hasba, tx_ok, isaggr, seq_st);
4892 		/* XXX TODO: schedule an interface reset */
4893 #ifdef ATH_DEBUG
4894 		ath_printtxbuf(sc, bf_first,
4895 		    sc->sc_ac2q[atid->ac]->axq_qnum, 0, 0);
4896 #endif
4897 	}
4898 
4899 	/*
4900 	 * Walk the list of frames, figure out which ones were correctly
4901 	 * sent and which weren't.
4902 	 */
4903 	bf = bf_first;
4904 	nf = bf_first->bf_state.bfs_nframes;
4905 
4906 	/* bf_first is going to be invalid once this list is walked */
4907 	bf_first = NULL;
4908 
4909 	/*
4910 	 * Walk the list of completed frames and determine
4911 	 * which need to be completed and which need to be
4912 	 * retransmitted.
4913 	 *
4914 	 * For completed frames, the completion functions need
4915 	 * to be called at the end of this function as the last
4916 	 * node reference may free the node.
4917 	 *
4918 	 * Finally, since the TXQ lock can't be held during the
4919 	 * completion callback (to avoid lock recursion),
4920 	 * the completion calls have to be done outside of the
4921 	 * lock.
4922 	 */
4923 	while (bf) {
4924 		nframes++;
4925 		ba_index = ATH_BA_INDEX(seq_st,
4926 		    SEQNO(bf->bf_state.bfs_seqno));
4927 		bf_next = bf->bf_next;
4928 		bf->bf_next = NULL;	/* Remove it from the aggr list */
4929 
4930 		DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4931 		    "%s: checking bf=%p seqno=%d; ack=%d\n",
4932 		    __func__, bf, SEQNO(bf->bf_state.bfs_seqno),
4933 		    ATH_BA_ISSET(ba, ba_index));
4934 
4935 		if (tx_ok && ATH_BA_ISSET(ba, ba_index)) {
4936 			sc->sc_stats.ast_tx_aggr_ok++;
4937 			ath_tx_update_baw(sc, an, atid, bf);
4938 			bf->bf_state.bfs_dobaw = 0;
4939 			if (!bf->bf_state.bfs_addedbaw)
4940 				DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4941 				    "%s: wasn't added: seqno %d\n",
4942 				    __func__, SEQNO(bf->bf_state.bfs_seqno));
4943 			bf->bf_next = NULL;
4944 			TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list);
4945 		} else {
4946 			sc->sc_stats.ast_tx_aggr_fail++;
4947 			if (ath_tx_retry_subframe(sc, bf, &bf_q)) {
4948 				drops++;
4949 				bf->bf_next = NULL;
4950 				TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list);
4951 			}
4952 			nbad++;
4953 		}
4954 		bf = bf_next;
4955 	}
4956 
4957 	/*
4958 	 * Now that the BAW updates have been done, unlock
4959 	 *
4960 	 * txseq is grabbed before the lock is released so we
4961 	 * have a consistent view of what -was- in the BAW.
4962 	 * Anything after this point will not yet have been
4963 	 * TXed.
4964 	 */
4965 	txseq = tap->txa_start;
4966 	ATH_TX_UNLOCK(sc);
4967 
4968 	if (nframes != nf)
4969 		DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4970 		    "%s: num frames seen=%d; bf nframes=%d\n",
4971 		    __func__, nframes, nf);
4972 
4973 	/*
4974 	 * Now we know how many frames were bad, call the rate
4975 	 * control code.
4976 	 */
4977 	if (fail == 0)
4978 		ath_tx_update_ratectrl(sc, ni, rc, &ts, pktlen, nframes,
4979 		    nbad);
4980 
4981 	/*
4982 	 * send bar if we dropped any frames
4983 	 */
4984 	if (drops) {
4985 		/* Suspend the TX queue and get ready to send the BAR */
4986 		ATH_TX_LOCK(sc);
4987 		ath_tx_tid_bar_suspend(sc, atid);
4988 		ATH_TX_UNLOCK(sc);
4989 	}
4990 
4991 	DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4992 	    "%s: txa_start now %d\n", __func__, tap->txa_start);
4993 
4994 	ATH_TX_LOCK(sc);
4995 
4996 	/* Prepend all frames to the beginning of the queue */
4997 	while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) {
4998 		TAILQ_REMOVE(&bf_q, bf, bf_list);
4999 		ATH_TID_INSERT_HEAD(atid, bf, bf_list);
5000 	}
5001 
5002 	/*
5003 	 * Reschedule to grab some further frames.
5004 	 */
5005 	ath_tx_tid_sched(sc, atid);
5006 
5007 	/*
5008 	 * If the queue is filtered, re-schedule as required.
5009 	 *
5010 	 * This is required as there may be a subsequent TX descriptor
5011 	 * for this end-node that has CLRDMASK set, so it's quite possible
5012 	 * that a filtered frame will be followed by a non-filtered
5013 	 * (complete or otherwise) frame.
5014 	 *
5015 	 * XXX should we do this before we complete the frame?
5016 	 */
5017 	if (atid->isfiltered)
5018 		ath_tx_tid_filt_comp_complete(sc, atid);
5019 
5020 finish_send_bar:
5021 
5022 	/*
5023 	 * Send BAR if required
5024 	 */
5025 	if (ath_tx_tid_bar_tx_ready(sc, atid))
5026 		ath_tx_tid_bar_tx(sc, atid);
5027 
5028 	ATH_TX_UNLOCK(sc);
5029 
5030 	/* Do deferred completion */
5031 	while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
5032 		TAILQ_REMOVE(&bf_cq, bf, bf_list);
5033 		ath_tx_default_comp(sc, bf, 0);
5034 	}
5035 }
5036 
5037 /*
5038  * Handle completion of unaggregated frames in an ADDBA
5039  * session.
5040  *
5041  * Fail is set to 1 if the entry is being freed via a call to
5042  * ath_tx_draintxq().
5043  */
5044 static void
5045 ath_tx_aggr_comp_unaggr(struct ath_softc *sc, struct ath_buf *bf, int fail)
5046 {
5047 	struct ieee80211_node *ni = bf->bf_node;
5048 	struct ath_node *an = ATH_NODE(ni);
5049 	int tid = bf->bf_state.bfs_tid;
5050 	struct ath_tid *atid = &an->an_tid[tid];
5051 	struct ath_tx_status ts;
5052 	int drops = 0;
5053 
5054 	/*
5055 	 * Take a copy of this; filtering/cloning the frame may free the
5056 	 * bf pointer.
5057 	 */
5058 	ts = bf->bf_status.ds_txstat;
5059 
5060 	/*
5061 	 * Update rate control status here, before we possibly
5062 	 * punt to retry or cleanup.
5063 	 *
5064 	 * Do it outside of the TXQ lock.
5065 	 */
5066 	if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0))
5067 		ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc,
5068 		    &bf->bf_status.ds_txstat,
5069 		    bf->bf_state.bfs_pktlen,
5070 		    1, (ts.ts_status == 0) ? 0 : 1);
5071 
5072 	/*
5073 	 * This is called early so atid->hwq_depth can be tracked.
5074 	 * This unfortunately means that it's released and regrabbed
5075 	 * during retry and cleanup. That's rather inefficient.
5076 	 */
5077 	ATH_TX_LOCK(sc);
5078 
5079 	if (tid == IEEE80211_NONQOS_TID)
5080 		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=16!\n", __func__);
5081 
5082 	DPRINTF(sc, ATH_DEBUG_SW_TX,
5083 	    "%s: bf=%p: tid=%d, hwq_depth=%d, seqno=%d\n",
5084 	    __func__, bf, bf->bf_state.bfs_tid, atid->hwq_depth,
5085 	    SEQNO(bf->bf_state.bfs_seqno));
5086 
5087 	atid->hwq_depth--;
5088 	if (atid->hwq_depth < 0)
5089 		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: hwq_depth < 0: %d\n",
5090 		    __func__, atid->hwq_depth);
5091 
5092 	/*
5093 	 * If the TID is filtered, handle completing the filter
5094 	 * transition before potentially kicking it to the cleanup
5095 	 * function.
5096 	 */
5097 	if (atid->isfiltered)
5098 		ath_tx_tid_filt_comp_complete(sc, atid);
5099 
5100 	/*
5101 	 * If a cleanup is in progress, punt to comp_cleanup;
5102 	 * rather than handling it here. It's thus their
5103 	 * responsibility to clean up, call the completion
5104 	 * function in net80211, etc.
5105 	 */
5106 	if (atid->cleanup_inprogress) {
5107 		if (atid->isfiltered)
5108 			DPRINTF(sc, ATH_DEBUG_SW_TX,
5109 			    "%s: isfiltered=1, normal_comp?\n",
5110 			    __func__);
5111 		ATH_TX_UNLOCK(sc);
5112 		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: cleanup_unaggr\n",
5113 		    __func__);
5114 		ath_tx_comp_cleanup_unaggr(sc, bf);
5115 		return;
5116 	}
5117 
5118 	/*
5119 	 * XXX TODO: how does cleanup, BAR and filtered frame handling
5120 	 * overlap?
5121 	 *
5122 	 * If the frame is filtered OR if it's any failure but
5123 	 * the TID is filtered, the frame must be added to the
5124 	 * filtered frame list.
5125 	 *
5126 	 * However - a busy buffer can't be added to the filtered
5127 	 * list as it will end up being recycled without having
5128 	 * been made available for the hardware.
5129 	 */
5130 	if ((ts.ts_status & HAL_TXERR_FILT) ||
5131 	    (ts.ts_status != 0 && atid->isfiltered)) {
5132 		int freeframe;
5133 
5134 		if (fail != 0)
5135 			DPRINTF(sc, ATH_DEBUG_SW_TX,
5136 			    "%s: isfiltered=1, fail=%d\n",
5137 			    __func__, fail);
5138 		freeframe = ath_tx_tid_filt_comp_single(sc, atid, bf);
5139 		/*
5140 		 * If freeframe=0 then bf is no longer ours; don't
5141 		 * touch it.
5142 		 */
5143 		if (freeframe) {
5144 			/* Remove from BAW */
5145 			if (bf->bf_state.bfs_addedbaw)
5146 				drops++;
5147 			if (bf->bf_state.bfs_dobaw) {
5148 				ath_tx_update_baw(sc, an, atid, bf);
5149 				if (!bf->bf_state.bfs_addedbaw)
5150 					DPRINTF(sc, ATH_DEBUG_SW_TX,
5151 					    "%s: wasn't added: seqno %d\n",
5152 					    __func__, SEQNO(bf->bf_state.bfs_seqno));
5153 			}
5154 			bf->bf_state.bfs_dobaw = 0;
5155 		}
5156 
5157 		/*
5158 		 * If the frame couldn't be filtered, treat it as a drop and
5159 		 * prepare to send a BAR.
5160 		 */
5161 		if (freeframe && drops)
5162 			ath_tx_tid_bar_suspend(sc, atid);
5163 
5164 		/*
5165 		 * Send BAR if required
5166 		 */
5167 		if (ath_tx_tid_bar_tx_ready(sc, atid))
5168 			ath_tx_tid_bar_tx(sc, atid);
5169 
5170 		ATH_TX_UNLOCK(sc);
5171 		/*
5172 		 * If freeframe is set, then the frame couldn't be
5173 		 * cloned and bf is still valid.  Just complete/free it.
5174 		 */
5175 		if (freeframe)
5176 			ath_tx_default_comp(sc, bf, fail);
5177 
5178 		return;
5179 	}
5180 	/*
5181 	 * Don't bother with the retry check if all frames
5182 	 * are being failed (eg during queue deletion.)
5183 	 */
5184 #if 0
5185 	if (fail == 0 && ts->ts_status & HAL_TXERR_XRETRY) {
5186 #endif
5187 	if (fail == 0 && ts.ts_status != 0) {
5188 		ATH_TX_UNLOCK(sc);
5189 		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: retry_unaggr\n",
5190 		    __func__);
5191 		ath_tx_aggr_retry_unaggr(sc, bf);
5192 		return;
5193 	}
5194 
5195 	/* Success? Complete */
5196 	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=%d, seqno %d\n",
5197 	    __func__, tid, SEQNO(bf->bf_state.bfs_seqno));
5198 	if (bf->bf_state.bfs_dobaw) {
5199 		ath_tx_update_baw(sc, an, atid, bf);
5200 		bf->bf_state.bfs_dobaw = 0;
5201 		if (!bf->bf_state.bfs_addedbaw)
5202 			DPRINTF(sc, ATH_DEBUG_SW_TX,
5203 			    "%s: wasn't added: seqno %d\n",
5204 			    __func__, SEQNO(bf->bf_state.bfs_seqno));
5205 	}
5206 
5207 	/*
5208 	 * If the queue is filtered, re-schedule as required.
5209 	 *
5210 	 * This is required as there may be a subsequent TX descriptor
5211 	 * for this end-node that has CLRDMASK set, so it's quite possible
5212 	 * that a filtered frame will be followed by a non-filtered
5213 	 * (complete or otherwise) frame.
5214 	 *
5215 	 * XXX should we do this before we complete the frame?
5216 	 */
5217 	if (atid->isfiltered)
5218 		ath_tx_tid_filt_comp_complete(sc, atid);
5219 
5220 	/*
5221 	 * Send BAR if required
5222 	 */
5223 	if (ath_tx_tid_bar_tx_ready(sc, atid))
5224 		ath_tx_tid_bar_tx(sc, atid);
5225 
5226 	ATH_TX_UNLOCK(sc);
5227 
5228 	ath_tx_default_comp(sc, bf, fail);
5229 	/* bf is freed at this point */
5230 }
5231 
5232 void
5233 ath_tx_aggr_comp(struct ath_softc *sc, struct ath_buf *bf, int fail)
5234 {
5235 	if (bf->bf_state.bfs_aggr)
5236 		ath_tx_aggr_comp_aggr(sc, bf, fail);
5237 	else
5238 		ath_tx_aggr_comp_unaggr(sc, bf, fail);
5239 }
5240 
5241 /*
5242  * Schedule some packets from the given node/TID to the hardware.
5243  *
5244  * This is the aggregate version.
5245  */
5246 void
5247 ath_tx_tid_hw_queue_aggr(struct ath_softc *sc, struct ath_node *an,
5248     struct ath_tid *tid)
5249 {
5250 	struct ath_buf *bf;
5251 	struct ath_txq *txq = sc->sc_ac2q[tid->ac];
5252 	struct ieee80211_tx_ampdu *tap;
5253 	ATH_AGGR_STATUS status;
5254 	ath_bufhead bf_q;
5255 
5256 	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d\n", __func__, tid->tid);
5257 	ATH_TX_LOCK_ASSERT(sc);
5258 
5259 	/*
5260 	 * XXX TODO: If we're called for a queue that we're leaking frames to,
5261 	 * ensure we only leak one.
5262 	 */
5263 
5264 	tap = ath_tx_get_tx_tid(an, tid->tid);
5265 
5266 	if (tid->tid == IEEE80211_NONQOS_TID)
5267 		DPRINTF(sc, ATH_DEBUG_SW_TX,
5268 		    "%s: called for TID=NONQOS_TID?\n", __func__);
5269 
5270 	for (;;) {
5271 		status = ATH_AGGR_DONE;
5272 
5273 		/*
5274 		 * If the upper layer has paused the TID, don't
5275 		 * queue any further packets.
5276 		 *
5277 		 * This can also occur from the completion task because
5278 		 * of packet loss; but as its serialised with this code,
5279 		 * it won't "appear" half way through queuing packets.
5280 		 */
5281 		if (! ath_tx_tid_can_tx_or_sched(sc, tid))
5282 			break;
5283 
5284 		bf = ATH_TID_FIRST(tid);
5285 		if (bf == NULL) {
5286 			break;
5287 		}
5288 
5289 		/*
5290 		 * If the packet doesn't fall within the BAW (eg a NULL
5291 		 * data frame), schedule it directly; continue.
5292 		 */
5293 		if (! bf->bf_state.bfs_dobaw) {
5294 			DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5295 			    "%s: non-baw packet\n",
5296 			    __func__);
5297 			ATH_TID_REMOVE(tid, bf, bf_list);
5298 
5299 			if (bf->bf_state.bfs_nframes > 1)
5300 				DPRINTF(sc, ATH_DEBUG_SW_TX,
5301 				    "%s: aggr=%d, nframes=%d\n",
5302 				    __func__,
5303 				    bf->bf_state.bfs_aggr,
5304 				    bf->bf_state.bfs_nframes);
5305 
5306 			/*
5307 			 * This shouldn't happen - such frames shouldn't
5308 			 * ever have been queued as an aggregate in the
5309 			 * first place.  However, make sure the fields
5310 			 * are correctly setup just to be totally sure.
5311 			 */
5312 			bf->bf_state.bfs_aggr = 0;
5313 			bf->bf_state.bfs_nframes = 1;
5314 
5315 			/* Update CLRDMASK just before this frame is queued */
5316 			ath_tx_update_clrdmask(sc, tid, bf);
5317 
5318 			ath_tx_do_ratelookup(sc, bf);
5319 			ath_tx_calc_duration(sc, bf);
5320 			ath_tx_calc_protection(sc, bf);
5321 			ath_tx_set_rtscts(sc, bf);
5322 			ath_tx_rate_fill_rcflags(sc, bf);
5323 			ath_tx_setds(sc, bf);
5324 			ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc);
5325 
5326 			sc->sc_aggr_stats.aggr_nonbaw_pkt++;
5327 
5328 			/* Queue the packet; continue */
5329 			goto queuepkt;
5330 		}
5331 
5332 		TAILQ_INIT(&bf_q);
5333 
5334 		/*
5335 		 * Do a rate control lookup on the first frame in the
5336 		 * list. The rate control code needs that to occur
5337 		 * before it can determine whether to TX.
5338 		 * It's inaccurate because the rate control code doesn't
5339 		 * really "do" aggregate lookups, so it only considers
5340 		 * the size of the first frame.
5341 		 */
5342 		ath_tx_do_ratelookup(sc, bf);
5343 		bf->bf_state.bfs_rc[3].rix = 0;
5344 		bf->bf_state.bfs_rc[3].tries = 0;
5345 
5346 		ath_tx_calc_duration(sc, bf);
5347 		ath_tx_calc_protection(sc, bf);
5348 
5349 		ath_tx_set_rtscts(sc, bf);
5350 		ath_tx_rate_fill_rcflags(sc, bf);
5351 
5352 		status = ath_tx_form_aggr(sc, an, tid, &bf_q);
5353 
5354 		DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5355 		    "%s: ath_tx_form_aggr() status=%d\n", __func__, status);
5356 
5357 		/*
5358 		 * No frames to be picked up - out of BAW
5359 		 */
5360 		if (TAILQ_EMPTY(&bf_q))
5361 			break;
5362 
5363 		/*
5364 		 * This assumes that the descriptor list in the ath_bufhead
5365 		 * are already linked together via bf_next pointers.
5366 		 */
5367 		bf = TAILQ_FIRST(&bf_q);
5368 
5369 		if (status == ATH_AGGR_8K_LIMITED)
5370 			sc->sc_aggr_stats.aggr_rts_aggr_limited++;
5371 
5372 		/*
5373 		 * If it's the only frame send as non-aggregate
5374 		 * assume that ath_tx_form_aggr() has checked
5375 		 * whether it's in the BAW and added it appropriately.
5376 		 */
5377 		if (bf->bf_state.bfs_nframes == 1) {
5378 			DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5379 			    "%s: single-frame aggregate\n", __func__);
5380 
5381 			/* Update CLRDMASK just before this frame is queued */
5382 			ath_tx_update_clrdmask(sc, tid, bf);
5383 
5384 			bf->bf_state.bfs_aggr = 0;
5385 			bf->bf_state.bfs_ndelim = 0;
5386 			ath_tx_setds(sc, bf);
5387 			ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc);
5388 			if (status == ATH_AGGR_BAW_CLOSED)
5389 				sc->sc_aggr_stats.aggr_baw_closed_single_pkt++;
5390 			else
5391 				sc->sc_aggr_stats.aggr_single_pkt++;
5392 		} else {
5393 			DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5394 			    "%s: multi-frame aggregate: %d frames, "
5395 			    "length %d\n",
5396 			     __func__, bf->bf_state.bfs_nframes,
5397 			    bf->bf_state.bfs_al);
5398 			bf->bf_state.bfs_aggr = 1;
5399 			sc->sc_aggr_stats.aggr_pkts[bf->bf_state.bfs_nframes]++;
5400 			sc->sc_aggr_stats.aggr_aggr_pkt++;
5401 
5402 			/* Update CLRDMASK just before this frame is queued */
5403 			ath_tx_update_clrdmask(sc, tid, bf);
5404 
5405 			/*
5406 			 * Calculate the duration/protection as required.
5407 			 */
5408 			ath_tx_calc_duration(sc, bf);
5409 			ath_tx_calc_protection(sc, bf);
5410 
5411 			/*
5412 			 * Update the rate and rtscts information based on the
5413 			 * rate decision made by the rate control code;
5414 			 * the first frame in the aggregate needs it.
5415 			 */
5416 			ath_tx_set_rtscts(sc, bf);
5417 
5418 			/*
5419 			 * Setup the relevant descriptor fields
5420 			 * for aggregation. The first descriptor
5421 			 * already points to the rest in the chain.
5422 			 */
5423 			ath_tx_setds_11n(sc, bf);
5424 
5425 		}
5426 	queuepkt:
5427 		/* Set completion handler, multi-frame aggregate or not */
5428 		bf->bf_comp = ath_tx_aggr_comp;
5429 
5430 		if (bf->bf_state.bfs_tid == IEEE80211_NONQOS_TID)
5431 			DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=16?\n", __func__);
5432 
5433 		/*
5434 		 * Update leak count and frame config if were leaking frames.
5435 		 *
5436 		 * XXX TODO: it should update all frames in an aggregate
5437 		 * correctly!
5438 		 */
5439 		ath_tx_leak_count_update(sc, tid, bf);
5440 
5441 		/* Punt to txq */
5442 		ath_tx_handoff(sc, txq, bf);
5443 
5444 		/* Track outstanding buffer count to hardware */
5445 		/* aggregates are "one" buffer */
5446 		tid->hwq_depth++;
5447 
5448 		/*
5449 		 * Break out if ath_tx_form_aggr() indicated
5450 		 * there can't be any further progress (eg BAW is full.)
5451 		 * Checking for an empty txq is done above.
5452 		 *
5453 		 * XXX locking on txq here?
5454 		 */
5455 		/* XXX TXQ locking */
5456 		if (txq->axq_aggr_depth >= sc->sc_hwq_limit_aggr ||
5457 		    (status == ATH_AGGR_BAW_CLOSED ||
5458 		     status == ATH_AGGR_LEAK_CLOSED))
5459 			break;
5460 	}
5461 }
5462 
5463 /*
5464  * Schedule some packets from the given node/TID to the hardware.
5465  *
5466  * XXX TODO: this routine doesn't enforce the maximum TXQ depth.
5467  * It just dumps frames into the TXQ.  We should limit how deep
5468  * the transmit queue can grow for frames dispatched to the given
5469  * TXQ.
5470  *
5471  * To avoid locking issues, either we need to own the TXQ lock
5472  * at this point, or we need to pass in the maximum frame count
5473  * from the caller.
5474  */
5475 void
5476 ath_tx_tid_hw_queue_norm(struct ath_softc *sc, struct ath_node *an,
5477     struct ath_tid *tid)
5478 {
5479 	struct ath_buf *bf;
5480 	struct ath_txq *txq = sc->sc_ac2q[tid->ac];
5481 
5482 	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: node %p: TID %d: called\n",
5483 	    __func__, an, tid->tid);
5484 
5485 	ATH_TX_LOCK_ASSERT(sc);
5486 
5487 	/* Check - is AMPDU pending or running? then print out something */
5488 	if (ath_tx_ampdu_pending(sc, an, tid->tid))
5489 		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ampdu pending?\n",
5490 		    __func__, tid->tid);
5491 	if (ath_tx_ampdu_running(sc, an, tid->tid))
5492 		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ampdu running?\n",
5493 		    __func__, tid->tid);
5494 
5495 	for (;;) {
5496 
5497 		/*
5498 		 * If the upper layers have paused the TID, don't
5499 		 * queue any further packets.
5500 		 *
5501 		 * XXX if we are leaking frames, make sure we decrement
5502 		 * that counter _and_ we continue here.
5503 		 */
5504 		if (! ath_tx_tid_can_tx_or_sched(sc, tid))
5505 			break;
5506 
5507 		bf = ATH_TID_FIRST(tid);
5508 		if (bf == NULL) {
5509 			break;
5510 		}
5511 
5512 		ATH_TID_REMOVE(tid, bf, bf_list);
5513 
5514 		/* Sanity check! */
5515 		if (tid->tid != bf->bf_state.bfs_tid) {
5516 			DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bfs_tid %d !="
5517 			    " tid %d\n", __func__, bf->bf_state.bfs_tid,
5518 			    tid->tid);
5519 		}
5520 		/* Normal completion handler */
5521 		bf->bf_comp = ath_tx_normal_comp;
5522 
5523 		/*
5524 		 * Override this for now, until the non-aggregate
5525 		 * completion handler correctly handles software retransmits.
5526 		 */
5527 		bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
5528 
5529 		/* Update CLRDMASK just before this frame is queued */
5530 		ath_tx_update_clrdmask(sc, tid, bf);
5531 
5532 		/* Program descriptors + rate control */
5533 		ath_tx_do_ratelookup(sc, bf);
5534 		ath_tx_calc_duration(sc, bf);
5535 		ath_tx_calc_protection(sc, bf);
5536 		ath_tx_set_rtscts(sc, bf);
5537 		ath_tx_rate_fill_rcflags(sc, bf);
5538 		ath_tx_setds(sc, bf);
5539 
5540 		/*
5541 		 * Update the current leak count if
5542 		 * we're leaking frames; and set the
5543 		 * MORE flag as appropriate.
5544 		 */
5545 		ath_tx_leak_count_update(sc, tid, bf);
5546 
5547 		/* Track outstanding buffer count to hardware */
5548 		/* aggregates are "one" buffer */
5549 		tid->hwq_depth++;
5550 
5551 		/* Punt to hardware or software txq */
5552 		ath_tx_handoff(sc, txq, bf);
5553 	}
5554 }
5555 
5556 /*
5557  * Schedule some packets to the given hardware queue.
5558  *
5559  * This function walks the list of TIDs (ie, ath_node TIDs
5560  * with queued traffic) and attempts to schedule traffic
5561  * from them.
5562  *
5563  * TID scheduling is implemented as a FIFO, with TIDs being
5564  * added to the end of the queue after some frames have been
5565  * scheduled.
5566  */
5567 void
5568 ath_txq_sched(struct ath_softc *sc, struct ath_txq *txq)
5569 {
5570 	struct ath_tid *tid, *next, *last;
5571 
5572 	ATH_TX_LOCK_ASSERT(sc);
5573 
5574 	/*
5575 	 * Don't schedule if the hardware queue is busy.
5576 	 * This (hopefully) gives some more time to aggregate
5577 	 * some packets in the aggregation queue.
5578 	 *
5579 	 * XXX It doesn't stop a parallel sender from sneaking
5580 	 * in transmitting a frame!
5581 	 */
5582 	/* XXX TXQ locking */
5583 	if (txq->axq_aggr_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_aggr) {
5584 		sc->sc_aggr_stats.aggr_sched_nopkt++;
5585 		return;
5586 	}
5587 	if (txq->axq_depth >= sc->sc_hwq_limit_nonaggr) {
5588 		sc->sc_aggr_stats.aggr_sched_nopkt++;
5589 		return;
5590 	}
5591 
5592 	last = TAILQ_LAST(&txq->axq_tidq, axq_t_s);
5593 
5594 	TAILQ_FOREACH_SAFE(tid, &txq->axq_tidq, axq_qelem, next) {
5595 		/*
5596 		 * Suspend paused queues here; they'll be resumed
5597 		 * once the addba completes or times out.
5598 		 */
5599 		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, paused=%d\n",
5600 		    __func__, tid->tid, tid->paused);
5601 		ath_tx_tid_unsched(sc, tid);
5602 		/*
5603 		 * This node may be in power-save and we're leaking
5604 		 * a frame; be careful.
5605 		 */
5606 		if (! ath_tx_tid_can_tx_or_sched(sc, tid)) {
5607 			goto loop_done;
5608 		}
5609 		if (ath_tx_ampdu_running(sc, tid->an, tid->tid))
5610 			ath_tx_tid_hw_queue_aggr(sc, tid->an, tid);
5611 		else
5612 			ath_tx_tid_hw_queue_norm(sc, tid->an, tid);
5613 
5614 		/* Not empty? Re-schedule */
5615 		if (tid->axq_depth != 0)
5616 			ath_tx_tid_sched(sc, tid);
5617 
5618 		/*
5619 		 * Give the software queue time to aggregate more
5620 		 * packets.  If we aren't running aggregation then
5621 		 * we should still limit the hardware queue depth.
5622 		 */
5623 		/* XXX TXQ locking */
5624 		if (txq->axq_aggr_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_aggr) {
5625 			break;
5626 		}
5627 		if (txq->axq_depth >= sc->sc_hwq_limit_nonaggr) {
5628 			break;
5629 		}
5630 loop_done:
5631 		/*
5632 		 * If this was the last entry on the original list, stop.
5633 		 * Otherwise nodes that have been rescheduled onto the end
5634 		 * of the TID FIFO list will just keep being rescheduled.
5635 		 *
5636 		 * XXX What should we do about nodes that were paused
5637 		 * but are pending a leaking frame in response to a ps-poll?
5638 		 * They'll be put at the front of the list; so they'll
5639 		 * prematurely trigger this condition! Ew.
5640 		 */
5641 		if (tid == last)
5642 			break;
5643 	}
5644 }
5645 
5646 /*
5647  * TX addba handling
5648  */
5649 
5650 /*
5651  * Return net80211 TID struct pointer, or NULL for none
5652  */
5653 struct ieee80211_tx_ampdu *
5654 ath_tx_get_tx_tid(struct ath_node *an, int tid)
5655 {
5656 	struct ieee80211_node *ni = &an->an_node;
5657 	struct ieee80211_tx_ampdu *tap;
5658 
5659 	if (tid == IEEE80211_NONQOS_TID)
5660 		return NULL;
5661 
5662 	tap = &ni->ni_tx_ampdu[tid];
5663 	return tap;
5664 }
5665 
5666 /*
5667  * Is AMPDU-TX running?
5668  */
5669 static int
5670 ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an, int tid)
5671 {
5672 	struct ieee80211_tx_ampdu *tap;
5673 
5674 	if (tid == IEEE80211_NONQOS_TID)
5675 		return 0;
5676 
5677 	tap = ath_tx_get_tx_tid(an, tid);
5678 	if (tap == NULL)
5679 		return 0;	/* Not valid; default to not running */
5680 
5681 	return !! (tap->txa_flags & IEEE80211_AGGR_RUNNING);
5682 }
5683 
5684 /*
5685  * Is AMPDU-TX negotiation pending?
5686  */
5687 static int
5688 ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an, int tid)
5689 {
5690 	struct ieee80211_tx_ampdu *tap;
5691 
5692 	if (tid == IEEE80211_NONQOS_TID)
5693 		return 0;
5694 
5695 	tap = ath_tx_get_tx_tid(an, tid);
5696 	if (tap == NULL)
5697 		return 0;	/* Not valid; default to not pending */
5698 
5699 	return !! (tap->txa_flags & IEEE80211_AGGR_XCHGPEND);
5700 }
5701 
5702 /*
5703  * Is AMPDU-TX pending for the given TID?
5704  */
5705 
5706 
5707 /*
5708  * Method to handle sending an ADDBA request.
5709  *
5710  * We tap this so the relevant flags can be set to pause the TID
5711  * whilst waiting for the response.
5712  *
5713  * XXX there's no timeout handler we can override?
5714  */
5715 int
5716 ath_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
5717     int dialogtoken, int baparamset, int batimeout)
5718 {
5719 	struct ath_softc *sc = ni->ni_ic->ic_softc;
5720 	int tid = tap->txa_tid;
5721 	struct ath_node *an = ATH_NODE(ni);
5722 	struct ath_tid *atid = &an->an_tid[tid];
5723 
5724 	/*
5725 	 * XXX danger Will Robinson!
5726 	 *
5727 	 * Although the taskqueue may be running and scheduling some more
5728 	 * packets, these should all be _before_ the addba sequence number.
5729 	 * However, net80211 will keep self-assigning sequence numbers
5730 	 * until addba has been negotiated.
5731 	 *
5732 	 * In the past, these packets would be "paused" (which still works
5733 	 * fine, as they're being scheduled to the driver in the same
5734 	 * serialised method which is calling the addba request routine)
5735 	 * and when the aggregation session begins, they'll be dequeued
5736 	 * as aggregate packets and added to the BAW. However, now there's
5737 	 * a "bf->bf_state.bfs_dobaw" flag, and this isn't set for these
5738 	 * packets. Thus they never get included in the BAW tracking and
5739 	 * this can cause the initial burst of packets after the addba
5740 	 * negotiation to "hang", as they quickly fall outside the BAW.
5741 	 *
5742 	 * The "eventual" solution should be to tag these packets with
5743 	 * dobaw. Although net80211 has given us a sequence number,
5744 	 * it'll be "after" the left edge of the BAW and thus it'll
5745 	 * fall within it.
5746 	 */
5747 	ATH_TX_LOCK(sc);
5748 	/*
5749 	 * This is a bit annoying.  Until net80211 HT code inherits some
5750 	 * (any) locking, we may have this called in parallel BUT only
5751 	 * one response/timeout will be called.  Grr.
5752 	 */
5753 	if (atid->addba_tx_pending == 0) {
5754 		ath_tx_tid_pause(sc, atid);
5755 		atid->addba_tx_pending = 1;
5756 	}
5757 	ATH_TX_UNLOCK(sc);
5758 
5759 	DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
5760 	    "%s: %6D: called; dialogtoken=%d, baparamset=%d, batimeout=%d\n",
5761 	    __func__,
5762 	    ni->ni_macaddr,
5763 	    ":",
5764 	    dialogtoken, baparamset, batimeout);
5765 	DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
5766 	    "%s: txa_start=%d, ni_txseqs=%d\n",
5767 	    __func__, tap->txa_start, ni->ni_txseqs[tid]);
5768 
5769 	return sc->sc_addba_request(ni, tap, dialogtoken, baparamset,
5770 	    batimeout);
5771 }
5772 
5773 /*
5774  * Handle an ADDBA response.
5775  *
5776  * We unpause the queue so TX'ing can resume.
5777  *
5778  * Any packets TX'ed from this point should be "aggregate" (whether
5779  * aggregate or not) so the BAW is updated.
5780  *
5781  * Note! net80211 keeps self-assigning sequence numbers until
5782  * ampdu is negotiated. This means the initially-negotiated BAW left
5783  * edge won't match the ni->ni_txseq.
5784  *
5785  * So, being very dirty, the BAW left edge is "slid" here to match
5786  * ni->ni_txseq.
5787  *
5788  * What likely SHOULD happen is that all packets subsequent to the
5789  * addba request should be tagged as aggregate and queued as non-aggregate
5790  * frames; thus updating the BAW. For now though, I'll just slide the
5791  * window.
5792  */
5793 int
5794 ath_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
5795     int status, int code, int batimeout)
5796 {
5797 	struct ath_softc *sc = ni->ni_ic->ic_softc;
5798 	int tid = tap->txa_tid;
5799 	struct ath_node *an = ATH_NODE(ni);
5800 	struct ath_tid *atid = &an->an_tid[tid];
5801 	int r;
5802 
5803 	DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
5804 	    "%s: %6D: called; status=%d, code=%d, batimeout=%d\n", __func__,
5805 	    ni->ni_macaddr,
5806 	    ":",
5807 	    status, code, batimeout);
5808 
5809 	DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
5810 	    "%s: txa_start=%d, ni_txseqs=%d\n",
5811 	    __func__, tap->txa_start, ni->ni_txseqs[tid]);
5812 
5813 	/*
5814 	 * Call this first, so the interface flags get updated
5815 	 * before the TID is unpaused. Otherwise a race condition
5816 	 * exists where the unpaused TID still doesn't yet have
5817 	 * IEEE80211_AGGR_RUNNING set.
5818 	 */
5819 	r = sc->sc_addba_response(ni, tap, status, code, batimeout);
5820 
5821 	ATH_TX_LOCK(sc);
5822 	atid->addba_tx_pending = 0;
5823 	/*
5824 	 * XXX dirty!
5825 	 * Slide the BAW left edge to wherever net80211 left it for us.
5826 	 * Read above for more information.
5827 	 */
5828 	tap->txa_start = ni->ni_txseqs[tid];
5829 	ath_tx_tid_resume(sc, atid);
5830 	ATH_TX_UNLOCK(sc);
5831 	return r;
5832 }
5833 
5834 
5835 /*
5836  * Stop ADDBA on a queue.
5837  *
5838  * This can be called whilst BAR TX is currently active on the queue,
5839  * so make sure this is unblocked before continuing.
5840  */
5841 void
5842 ath_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
5843 {
5844 	struct ath_softc *sc = ni->ni_ic->ic_softc;
5845 	int tid = tap->txa_tid;
5846 	struct ath_node *an = ATH_NODE(ni);
5847 	struct ath_tid *atid = &an->an_tid[tid];
5848 	ath_bufhead bf_cq;
5849 	struct ath_buf *bf;
5850 
5851 	DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: %6D: called\n",
5852 	    __func__,
5853 	    ni->ni_macaddr,
5854 	    ":");
5855 
5856 	/*
5857 	 * Pause TID traffic early, so there aren't any races
5858 	 * Unblock the pending BAR held traffic, if it's currently paused.
5859 	 */
5860 	ATH_TX_LOCK(sc);
5861 	ath_tx_tid_pause(sc, atid);
5862 	if (atid->bar_wait) {
5863 		/*
5864 		 * bar_unsuspend() expects bar_tx == 1, as it should be
5865 		 * called from the TX completion path.  This quietens
5866 		 * the warning.  It's cleared for us anyway.
5867 		 */
5868 		atid->bar_tx = 1;
5869 		ath_tx_tid_bar_unsuspend(sc, atid);
5870 	}
5871 	ATH_TX_UNLOCK(sc);
5872 
5873 	/* There's no need to hold the TXQ lock here */
5874 	sc->sc_addba_stop(ni, tap);
5875 
5876 	/*
5877 	 * ath_tx_tid_cleanup will resume the TID if possible, otherwise
5878 	 * it'll set the cleanup flag, and it'll be unpaused once
5879 	 * things have been cleaned up.
5880 	 */
5881 	TAILQ_INIT(&bf_cq);
5882 	ATH_TX_LOCK(sc);
5883 
5884 	/*
5885 	 * In case there's a followup call to this, only call it
5886 	 * if we don't have a cleanup in progress.
5887 	 *
5888 	 * Since we've paused the queue above, we need to make
5889 	 * sure we unpause if there's already a cleanup in
5890 	 * progress - it means something else is also doing
5891 	 * this stuff, so we don't need to also keep it paused.
5892 	 */
5893 	if (atid->cleanup_inprogress) {
5894 		ath_tx_tid_resume(sc, atid);
5895 	} else {
5896 		ath_tx_tid_cleanup(sc, an, tid, &bf_cq);
5897 		/*
5898 		 * Unpause the TID if no cleanup is required.
5899 		 */
5900 		if (! atid->cleanup_inprogress)
5901 			ath_tx_tid_resume(sc, atid);
5902 	}
5903 	ATH_TX_UNLOCK(sc);
5904 
5905 	/* Handle completing frames and fail them */
5906 	while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
5907 		TAILQ_REMOVE(&bf_cq, bf, bf_list);
5908 		ath_tx_default_comp(sc, bf, 1);
5909 	}
5910 
5911 }
5912 
5913 /*
5914  * Handle a node reassociation.
5915  *
5916  * We may have a bunch of frames queued to the hardware; those need
5917  * to be marked as cleanup.
5918  */
5919 void
5920 ath_tx_node_reassoc(struct ath_softc *sc, struct ath_node *an)
5921 {
5922 	struct ath_tid *tid;
5923 	int i;
5924 	ath_bufhead bf_cq;
5925 	struct ath_buf *bf;
5926 
5927 	TAILQ_INIT(&bf_cq);
5928 
5929 	ATH_TX_UNLOCK_ASSERT(sc);
5930 
5931 	ATH_TX_LOCK(sc);
5932 	for (i = 0; i < IEEE80211_TID_SIZE; i++) {
5933 		tid = &an->an_tid[i];
5934 		if (tid->hwq_depth == 0)
5935 			continue;
5936 		DPRINTF(sc, ATH_DEBUG_NODE,
5937 		    "%s: %6D: TID %d: cleaning up TID\n",
5938 		    __func__,
5939 		    an->an_node.ni_macaddr,
5940 		    ":",
5941 		    i);
5942 		/*
5943 		 * In case there's a followup call to this, only call it
5944 		 * if we don't have a cleanup in progress.
5945 		 */
5946 		if (! tid->cleanup_inprogress) {
5947 			ath_tx_tid_pause(sc, tid);
5948 			ath_tx_tid_cleanup(sc, an, i, &bf_cq);
5949 			/*
5950 			 * Unpause the TID if no cleanup is required.
5951 			 */
5952 			if (! tid->cleanup_inprogress)
5953 				ath_tx_tid_resume(sc, tid);
5954 		}
5955 	}
5956 	ATH_TX_UNLOCK(sc);
5957 
5958 	/* Handle completing frames and fail them */
5959 	while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
5960 		TAILQ_REMOVE(&bf_cq, bf, bf_list);
5961 		ath_tx_default_comp(sc, bf, 1);
5962 	}
5963 }
5964 
5965 /*
5966  * Note: net80211 bar_timeout() doesn't call this function on BAR failure;
5967  * it simply tears down the aggregation session. Ew.
5968  *
5969  * It however will call ieee80211_ampdu_stop() which will call
5970  * ic->ic_addba_stop().
5971  *
5972  * XXX This uses a hard-coded max BAR count value; the whole
5973  * XXX BAR TX success or failure should be better handled!
5974  */
5975 void
5976 ath_bar_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
5977     int status)
5978 {
5979 	struct ath_softc *sc = ni->ni_ic->ic_softc;
5980 	int tid = tap->txa_tid;
5981 	struct ath_node *an = ATH_NODE(ni);
5982 	struct ath_tid *atid = &an->an_tid[tid];
5983 	int attempts = tap->txa_attempts;
5984 	int old_txa_start;
5985 
5986 	DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
5987 	    "%s: %6D: called; txa_tid=%d, atid->tid=%d, status=%d, attempts=%d, txa_start=%d, txa_seqpending=%d\n",
5988 	    __func__,
5989 	    ni->ni_macaddr,
5990 	    ":",
5991 	    tap->txa_tid,
5992 	    atid->tid,
5993 	    status,
5994 	    attempts,
5995 	    tap->txa_start,
5996 	    tap->txa_seqpending);
5997 
5998 	/* Note: This may update the BAW details */
5999 	/*
6000 	 * XXX What if this does slide the BAW along? We need to somehow
6001 	 * XXX either fix things when it does happen, or prevent the
6002 	 * XXX seqpending value to be anything other than exactly what
6003 	 * XXX the hell we want!
6004 	 *
6005 	 * XXX So for now, how I do this inside the TX lock for now
6006 	 * XXX and just correct it afterwards? The below condition should
6007 	 * XXX never happen and if it does I need to fix all kinds of things.
6008 	 */
6009 	ATH_TX_LOCK(sc);
6010 	old_txa_start = tap->txa_start;
6011 	sc->sc_bar_response(ni, tap, status);
6012 	if (tap->txa_start != old_txa_start) {
6013 		device_printf(sc->sc_dev, "%s: tid=%d; txa_start=%d, old=%d, adjusting\n",
6014 		    __func__,
6015 		    tid,
6016 		    tap->txa_start,
6017 		    old_txa_start);
6018 	}
6019 	tap->txa_start = old_txa_start;
6020 	ATH_TX_UNLOCK(sc);
6021 
6022 	/* Unpause the TID */
6023 	/*
6024 	 * XXX if this is attempt=50, the TID will be downgraded
6025 	 * XXX to a non-aggregate session. So we must unpause the
6026 	 * XXX TID here or it'll never be done.
6027 	 *
6028 	 * Also, don't call it if bar_tx/bar_wait are 0; something
6029 	 * has beaten us to the punch? (XXX figure out what?)
6030 	 */
6031 	if (status == 0 || attempts == 50) {
6032 		ATH_TX_LOCK(sc);
6033 		if (atid->bar_tx == 0 || atid->bar_wait == 0)
6034 			DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
6035 			    "%s: huh? bar_tx=%d, bar_wait=%d\n",
6036 			    __func__,
6037 			    atid->bar_tx, atid->bar_wait);
6038 		else
6039 			ath_tx_tid_bar_unsuspend(sc, atid);
6040 		ATH_TX_UNLOCK(sc);
6041 	}
6042 }
6043 
6044 /*
6045  * This is called whenever the pending ADDBA request times out.
6046  * Unpause and reschedule the TID.
6047  */
6048 void
6049 ath_addba_response_timeout(struct ieee80211_node *ni,
6050     struct ieee80211_tx_ampdu *tap)
6051 {
6052 	struct ath_softc *sc = ni->ni_ic->ic_softc;
6053 	int tid = tap->txa_tid;
6054 	struct ath_node *an = ATH_NODE(ni);
6055 	struct ath_tid *atid = &an->an_tid[tid];
6056 
6057 	DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
6058 	    "%s: %6D: TID=%d, called; resuming\n",
6059 	    __func__,
6060 	    ni->ni_macaddr,
6061 	    ":",
6062 	    tid);
6063 
6064 	ATH_TX_LOCK(sc);
6065 	atid->addba_tx_pending = 0;
6066 	ATH_TX_UNLOCK(sc);
6067 
6068 	/* Note: This updates the aggregate state to (again) pending */
6069 	sc->sc_addba_response_timeout(ni, tap);
6070 
6071 	/* Unpause the TID; which reschedules it */
6072 	ATH_TX_LOCK(sc);
6073 	ath_tx_tid_resume(sc, atid);
6074 	ATH_TX_UNLOCK(sc);
6075 }
6076 
6077 /*
6078  * Check if a node is asleep or not.
6079  */
6080 int
6081 ath_tx_node_is_asleep(struct ath_softc *sc, struct ath_node *an)
6082 {
6083 
6084 	ATH_TX_LOCK_ASSERT(sc);
6085 
6086 	return (an->an_is_powersave);
6087 }
6088 
6089 /*
6090  * Mark a node as currently "in powersaving."
6091  * This suspends all traffic on the node.
6092  *
6093  * This must be called with the node/tx locks free.
6094  *
6095  * XXX TODO: the locking silliness below is due to how the node
6096  * locking currently works.  Right now, the node lock is grabbed
6097  * to do rate control lookups and these are done with the TX
6098  * queue lock held.  This means the node lock can't be grabbed
6099  * first here or a LOR will occur.
6100  *
6101  * Eventually (hopefully!) the TX path code will only grab
6102  * the TXQ lock when transmitting and the ath_node lock when
6103  * doing node/TID operations.  There are other complications -
6104  * the sched/unsched operations involve walking the per-txq
6105  * 'active tid' list and this requires both locks to be held.
6106  */
6107 void
6108 ath_tx_node_sleep(struct ath_softc *sc, struct ath_node *an)
6109 {
6110 	struct ath_tid *atid;
6111 	struct ath_txq *txq;
6112 	int tid;
6113 
6114 	ATH_TX_UNLOCK_ASSERT(sc);
6115 
6116 	/* Suspend all traffic on the node */
6117 	ATH_TX_LOCK(sc);
6118 
6119 	if (an->an_is_powersave) {
6120 		DPRINTF(sc, ATH_DEBUG_XMIT,
6121 		    "%s: %6D: node was already asleep!\n",
6122 		    __func__, an->an_node.ni_macaddr, ":");
6123 		ATH_TX_UNLOCK(sc);
6124 		return;
6125 	}
6126 
6127 	for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) {
6128 		atid = &an->an_tid[tid];
6129 		txq = sc->sc_ac2q[atid->ac];
6130 
6131 		ath_tx_tid_pause(sc, atid);
6132 	}
6133 
6134 	/* Mark node as in powersaving */
6135 	an->an_is_powersave = 1;
6136 
6137 	ATH_TX_UNLOCK(sc);
6138 }
6139 
6140 /*
6141  * Mark a node as currently "awake."
6142  * This resumes all traffic to the node.
6143  */
6144 void
6145 ath_tx_node_wakeup(struct ath_softc *sc, struct ath_node *an)
6146 {
6147 	struct ath_tid *atid;
6148 	struct ath_txq *txq;
6149 	int tid;
6150 
6151 	ATH_TX_UNLOCK_ASSERT(sc);
6152 
6153 	ATH_TX_LOCK(sc);
6154 
6155 	/* !? */
6156 	if (an->an_is_powersave == 0) {
6157 		ATH_TX_UNLOCK(sc);
6158 		DPRINTF(sc, ATH_DEBUG_XMIT,
6159 		    "%s: an=%p: node was already awake\n",
6160 		    __func__, an);
6161 		return;
6162 	}
6163 
6164 	/* Mark node as awake */
6165 	an->an_is_powersave = 0;
6166 	/*
6167 	 * Clear any pending leaked frame requests
6168 	 */
6169 	an->an_leak_count = 0;
6170 
6171 	for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) {
6172 		atid = &an->an_tid[tid];
6173 		txq = sc->sc_ac2q[atid->ac];
6174 
6175 		ath_tx_tid_resume(sc, atid);
6176 	}
6177 	ATH_TX_UNLOCK(sc);
6178 }
6179 
6180 static int
6181 ath_legacy_dma_txsetup(struct ath_softc *sc)
6182 {
6183 
6184 	/* nothing new needed */
6185 	return (0);
6186 }
6187 
6188 static int
6189 ath_legacy_dma_txteardown(struct ath_softc *sc)
6190 {
6191 
6192 	/* nothing new needed */
6193 	return (0);
6194 }
6195 
6196 void
6197 ath_xmit_setup_legacy(struct ath_softc *sc)
6198 {
6199 	/*
6200 	 * For now, just set the descriptor length to sizeof(ath_desc);
6201 	 * worry about extracting the real length out of the HAL later.
6202 	 */
6203 	sc->sc_tx_desclen = sizeof(struct ath_desc);
6204 	sc->sc_tx_statuslen = sizeof(struct ath_desc);
6205 	sc->sc_tx_nmaps = 1;	/* only one buffer per TX desc */
6206 
6207 	sc->sc_tx.xmit_setup = ath_legacy_dma_txsetup;
6208 	sc->sc_tx.xmit_teardown = ath_legacy_dma_txteardown;
6209 	sc->sc_tx.xmit_attach_comp_func = ath_legacy_attach_comp_func;
6210 
6211 	sc->sc_tx.xmit_dma_restart = ath_legacy_tx_dma_restart;
6212 	sc->sc_tx.xmit_handoff = ath_legacy_xmit_handoff;
6213 
6214 	sc->sc_tx.xmit_drain = ath_legacy_tx_drain;
6215 }
6216