xref: /freebsd/sys/dev/ath/if_ath_tx.c (revision 907b59d76938e654f0d040a888e8dfca3de1e222)
1 /*-
2  * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
3  * Copyright (c) 2010-2012 Adrian Chadd, Xenion Pty Ltd
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer,
11  *    without modification.
12  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
13  *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
14  *    redistribution must be conditioned upon including a substantially
15  *    similar Disclaimer requirement for further binary redistribution.
16  *
17  * NO WARRANTY
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
21  * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
23  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
26  * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGES.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 /*
35  * Driver for the Atheros Wireless LAN controller.
36  *
37  * This software is derived from work of Atsushi Onoe; his contribution
38  * is greatly appreciated.
39  */
40 
41 #include "opt_inet.h"
42 #include "opt_ath.h"
43 #include "opt_wlan.h"
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/sysctl.h>
48 #include <sys/mbuf.h>
49 #include <sys/malloc.h>
50 #include <sys/lock.h>
51 #include <sys/mutex.h>
52 #include <sys/kernel.h>
53 #include <sys/socket.h>
54 #include <sys/sockio.h>
55 #include <sys/errno.h>
56 #include <sys/callout.h>
57 #include <sys/bus.h>
58 #include <sys/endian.h>
59 #include <sys/kthread.h>
60 #include <sys/taskqueue.h>
61 #include <sys/priv.h>
62 #include <sys/ktr.h>
63 
64 #include <machine/bus.h>
65 
66 #include <net/if.h>
67 #include <net/if_var.h>
68 #include <net/if_dl.h>
69 #include <net/if_media.h>
70 #include <net/if_types.h>
71 #include <net/if_arp.h>
72 #include <net/ethernet.h>
73 #include <net/if_llc.h>
74 
75 #include <net80211/ieee80211_var.h>
76 #include <net80211/ieee80211_regdomain.h>
77 #ifdef IEEE80211_SUPPORT_SUPERG
78 #include <net80211/ieee80211_superg.h>
79 #endif
80 #ifdef IEEE80211_SUPPORT_TDMA
81 #include <net80211/ieee80211_tdma.h>
82 #endif
83 #include <net80211/ieee80211_ht.h>
84 
85 #include <net/bpf.h>
86 
87 #ifdef INET
88 #include <netinet/in.h>
89 #include <netinet/if_ether.h>
90 #endif
91 
92 #include <dev/ath/if_athvar.h>
93 #include <dev/ath/ath_hal/ah_devid.h>		/* XXX for softled */
94 #include <dev/ath/ath_hal/ah_diagcodes.h>
95 
96 #include <dev/ath/if_ath_debug.h>
97 
98 #ifdef ATH_TX99_DIAG
99 #include <dev/ath/ath_tx99/ath_tx99.h>
100 #endif
101 
102 #include <dev/ath/if_ath_misc.h>
103 #include <dev/ath/if_ath_tx.h>
104 #include <dev/ath/if_ath_tx_ht.h>
105 
106 #ifdef	ATH_DEBUG_ALQ
107 #include <dev/ath/if_ath_alq.h>
108 #endif
109 
110 /*
111  * How many retries to perform in software
112  */
113 #define	SWMAX_RETRIES		10
114 
115 /*
116  * What queue to throw the non-QoS TID traffic into
117  */
118 #define	ATH_NONQOS_TID_AC	WME_AC_VO
119 
120 #if 0
121 static int ath_tx_node_is_asleep(struct ath_softc *sc, struct ath_node *an);
122 #endif
123 static int ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an,
124     int tid);
125 static int ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an,
126     int tid);
127 static ieee80211_seq ath_tx_tid_seqno_assign(struct ath_softc *sc,
128     struct ieee80211_node *ni, struct ath_buf *bf, struct mbuf *m0);
129 static int ath_tx_action_frame_override_queue(struct ath_softc *sc,
130     struct ieee80211_node *ni, struct mbuf *m0, int *tid);
131 static struct ath_buf *
132 ath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an,
133     struct ath_tid *tid, struct ath_buf *bf);
134 
135 #ifdef	ATH_DEBUG_ALQ
136 void
137 ath_tx_alq_post(struct ath_softc *sc, struct ath_buf *bf_first)
138 {
139 	struct ath_buf *bf;
140 	int i, n;
141 	const char *ds;
142 
143 	/* XXX we should skip out early if debugging isn't enabled! */
144 	bf = bf_first;
145 
146 	while (bf != NULL) {
147 		/* XXX should ensure bf_nseg > 0! */
148 		if (bf->bf_nseg == 0)
149 			break;
150 		n = ((bf->bf_nseg - 1) / sc->sc_tx_nmaps) + 1;
151 		for (i = 0, ds = (const char *) bf->bf_desc;
152 		    i < n;
153 		    i++, ds += sc->sc_tx_desclen) {
154 			if_ath_alq_post(&sc->sc_alq,
155 			    ATH_ALQ_EDMA_TXDESC,
156 			    sc->sc_tx_desclen,
157 			    ds);
158 		}
159 		bf = bf->bf_next;
160 	}
161 }
162 #endif /* ATH_DEBUG_ALQ */
163 
164 /*
165  * Whether to use the 11n rate scenario functions or not
166  */
167 static inline int
168 ath_tx_is_11n(struct ath_softc *sc)
169 {
170 	return ((sc->sc_ah->ah_magic == 0x20065416) ||
171 		    (sc->sc_ah->ah_magic == 0x19741014));
172 }
173 
174 /*
175  * Obtain the current TID from the given frame.
176  *
177  * Non-QoS frames need to go into TID 16 (IEEE80211_NONQOS_TID.)
178  * This has implications for which AC/priority the packet is placed
179  * in.
180  */
181 static int
182 ath_tx_gettid(struct ath_softc *sc, const struct mbuf *m0)
183 {
184 	const struct ieee80211_frame *wh;
185 	int pri = M_WME_GETAC(m0);
186 
187 	wh = mtod(m0, const struct ieee80211_frame *);
188 	if (! IEEE80211_QOS_HAS_SEQ(wh))
189 		return IEEE80211_NONQOS_TID;
190 	else
191 		return WME_AC_TO_TID(pri);
192 }
193 
194 static void
195 ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf)
196 {
197 	struct ieee80211_frame *wh;
198 
199 	wh = mtod(bf->bf_m, struct ieee80211_frame *);
200 	/* Only update/resync if needed */
201 	if (bf->bf_state.bfs_isretried == 0) {
202 		wh->i_fc[1] |= IEEE80211_FC1_RETRY;
203 		bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
204 		    BUS_DMASYNC_PREWRITE);
205 	}
206 	bf->bf_state.bfs_isretried = 1;
207 	bf->bf_state.bfs_retries ++;
208 }
209 
210 /*
211  * Determine what the correct AC queue for the given frame
212  * should be.
213  *
214  * This code assumes that the TIDs map consistently to
215  * the underlying hardware (or software) ath_txq.
216  * Since the sender may try to set an AC which is
217  * arbitrary, non-QoS TIDs may end up being put on
218  * completely different ACs. There's no way to put a
219  * TID into multiple ath_txq's for scheduling, so
220  * for now we override the AC/TXQ selection and set
221  * non-QOS TID frames into the BE queue.
222  *
223  * This may be completely incorrect - specifically,
224  * some management frames may end up out of order
225  * compared to the QoS traffic they're controlling.
226  * I'll look into this later.
227  */
228 static int
229 ath_tx_getac(struct ath_softc *sc, const struct mbuf *m0)
230 {
231 	const struct ieee80211_frame *wh;
232 	int pri = M_WME_GETAC(m0);
233 	wh = mtod(m0, const struct ieee80211_frame *);
234 	if (IEEE80211_QOS_HAS_SEQ(wh))
235 		return pri;
236 
237 	return ATH_NONQOS_TID_AC;
238 }
239 
240 void
241 ath_txfrag_cleanup(struct ath_softc *sc,
242 	ath_bufhead *frags, struct ieee80211_node *ni)
243 {
244 	struct ath_buf *bf, *next;
245 
246 	ATH_TXBUF_LOCK_ASSERT(sc);
247 
248 	TAILQ_FOREACH_SAFE(bf, frags, bf_list, next) {
249 		/* NB: bf assumed clean */
250 		TAILQ_REMOVE(frags, bf, bf_list);
251 		ath_returnbuf_head(sc, bf);
252 		ieee80211_node_decref(ni);
253 	}
254 }
255 
256 /*
257  * Setup xmit of a fragmented frame.  Allocate a buffer
258  * for each frag and bump the node reference count to
259  * reflect the held reference to be setup by ath_tx_start.
260  */
261 int
262 ath_txfrag_setup(struct ath_softc *sc, ath_bufhead *frags,
263 	struct mbuf *m0, struct ieee80211_node *ni)
264 {
265 	struct mbuf *m;
266 	struct ath_buf *bf;
267 
268 	ATH_TXBUF_LOCK(sc);
269 	for (m = m0->m_nextpkt; m != NULL; m = m->m_nextpkt) {
270 		/* XXX non-management? */
271 		bf = _ath_getbuf_locked(sc, ATH_BUFTYPE_NORMAL);
272 		if (bf == NULL) {	/* out of buffers, cleanup */
273 			DPRINTF(sc, ATH_DEBUG_XMIT, "%s: no buffer?\n",
274 			    __func__);
275 			ath_txfrag_cleanup(sc, frags, ni);
276 			break;
277 		}
278 		ieee80211_node_incref(ni);
279 		TAILQ_INSERT_TAIL(frags, bf, bf_list);
280 	}
281 	ATH_TXBUF_UNLOCK(sc);
282 
283 	return !TAILQ_EMPTY(frags);
284 }
285 
286 static int
287 ath_tx_dmasetup(struct ath_softc *sc, struct ath_buf *bf, struct mbuf *m0)
288 {
289 	struct mbuf *m;
290 	int error;
291 
292 	/*
293 	 * Load the DMA map so any coalescing is done.  This
294 	 * also calculates the number of descriptors we need.
295 	 */
296 	error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
297 				     bf->bf_segs, &bf->bf_nseg,
298 				     BUS_DMA_NOWAIT);
299 	if (error == EFBIG) {
300 		/* XXX packet requires too many descriptors */
301 		bf->bf_nseg = ATH_MAX_SCATTER + 1;
302 	} else if (error != 0) {
303 		sc->sc_stats.ast_tx_busdma++;
304 		ieee80211_free_mbuf(m0);
305 		return error;
306 	}
307 	/*
308 	 * Discard null packets and check for packets that
309 	 * require too many TX descriptors.  We try to convert
310 	 * the latter to a cluster.
311 	 */
312 	if (bf->bf_nseg > ATH_MAX_SCATTER) {		/* too many desc's, linearize */
313 		sc->sc_stats.ast_tx_linear++;
314 		m = m_collapse(m0, M_NOWAIT, ATH_MAX_SCATTER);
315 		if (m == NULL) {
316 			ieee80211_free_mbuf(m0);
317 			sc->sc_stats.ast_tx_nombuf++;
318 			return ENOMEM;
319 		}
320 		m0 = m;
321 		error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
322 					     bf->bf_segs, &bf->bf_nseg,
323 					     BUS_DMA_NOWAIT);
324 		if (error != 0) {
325 			sc->sc_stats.ast_tx_busdma++;
326 			ieee80211_free_mbuf(m0);
327 			return error;
328 		}
329 		KASSERT(bf->bf_nseg <= ATH_MAX_SCATTER,
330 		    ("too many segments after defrag; nseg %u", bf->bf_nseg));
331 	} else if (bf->bf_nseg == 0) {		/* null packet, discard */
332 		sc->sc_stats.ast_tx_nodata++;
333 		ieee80211_free_mbuf(m0);
334 		return EIO;
335 	}
336 	DPRINTF(sc, ATH_DEBUG_XMIT, "%s: m %p len %u\n",
337 		__func__, m0, m0->m_pkthdr.len);
338 	bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
339 	bf->bf_m = m0;
340 
341 	return 0;
342 }
343 
344 /*
345  * Chain together segments+descriptors for a frame - 11n or otherwise.
346  *
347  * For aggregates, this is called on each frame in the aggregate.
348  */
349 static void
350 ath_tx_chaindesclist(struct ath_softc *sc, struct ath_desc *ds0,
351     struct ath_buf *bf, int is_aggr, int is_first_subframe,
352     int is_last_subframe)
353 {
354 	struct ath_hal *ah = sc->sc_ah;
355 	char *ds;
356 	int i, bp, dsp;
357 	HAL_DMA_ADDR bufAddrList[4];
358 	uint32_t segLenList[4];
359 	int numTxMaps = 1;
360 	int isFirstDesc = 1;
361 
362 	/*
363 	 * XXX There's txdma and txdma_mgmt; the descriptor
364 	 * sizes must match.
365 	 */
366 	struct ath_descdma *dd = &sc->sc_txdma;
367 
368 	/*
369 	 * Fillin the remainder of the descriptor info.
370 	 */
371 
372 	/*
373 	 * We need the number of TX data pointers in each descriptor.
374 	 * EDMA and later chips support 4 TX buffers per descriptor;
375 	 * previous chips just support one.
376 	 */
377 	numTxMaps = sc->sc_tx_nmaps;
378 
379 	/*
380 	 * For EDMA and later chips ensure the TX map is fully populated
381 	 * before advancing to the next descriptor.
382 	 */
383 	ds = (char *) bf->bf_desc;
384 	bp = dsp = 0;
385 	bzero(bufAddrList, sizeof(bufAddrList));
386 	bzero(segLenList, sizeof(segLenList));
387 	for (i = 0; i < bf->bf_nseg; i++) {
388 		bufAddrList[bp] = bf->bf_segs[i].ds_addr;
389 		segLenList[bp] = bf->bf_segs[i].ds_len;
390 		bp++;
391 
392 		/*
393 		 * Go to the next segment if this isn't the last segment
394 		 * and there's space in the current TX map.
395 		 */
396 		if ((i != bf->bf_nseg - 1) && (bp < numTxMaps))
397 			continue;
398 
399 		/*
400 		 * Last segment or we're out of buffer pointers.
401 		 */
402 		bp = 0;
403 
404 		if (i == bf->bf_nseg - 1)
405 			ath_hal_settxdesclink(ah, (struct ath_desc *) ds, 0);
406 		else
407 			ath_hal_settxdesclink(ah, (struct ath_desc *) ds,
408 			    bf->bf_daddr + dd->dd_descsize * (dsp + 1));
409 
410 		/*
411 		 * XXX This assumes that bfs_txq is the actual destination
412 		 * hardware queue at this point.  It may not have been
413 		 * assigned, it may actually be pointing to the multicast
414 		 * software TXQ id.  These must be fixed!
415 		 */
416 		ath_hal_filltxdesc(ah, (struct ath_desc *) ds
417 			, bufAddrList
418 			, segLenList
419 			, bf->bf_descid		/* XXX desc id */
420 			, bf->bf_state.bfs_tx_queue
421 			, isFirstDesc		/* first segment */
422 			, i == bf->bf_nseg - 1	/* last segment */
423 			, (struct ath_desc *) ds0	/* first descriptor */
424 		);
425 
426 		/*
427 		 * Make sure the 11n aggregate fields are cleared.
428 		 *
429 		 * XXX TODO: this doesn't need to be called for
430 		 * aggregate frames; as it'll be called on all
431 		 * sub-frames.  Since the descriptors are in
432 		 * non-cacheable memory, this leads to some
433 		 * rather slow writes on MIPS/ARM platforms.
434 		 */
435 		if (ath_tx_is_11n(sc))
436 			ath_hal_clr11n_aggr(sc->sc_ah, (struct ath_desc *) ds);
437 
438 		/*
439 		 * If 11n is enabled, set it up as if it's an aggregate
440 		 * frame.
441 		 */
442 		if (is_last_subframe) {
443 			ath_hal_set11n_aggr_last(sc->sc_ah,
444 			    (struct ath_desc *) ds);
445 		} else if (is_aggr) {
446 			/*
447 			 * This clears the aggrlen field; so
448 			 * the caller needs to call set_aggr_first()!
449 			 *
450 			 * XXX TODO: don't call this for the first
451 			 * descriptor in the first frame in an
452 			 * aggregate!
453 			 */
454 			ath_hal_set11n_aggr_middle(sc->sc_ah,
455 			    (struct ath_desc *) ds,
456 			    bf->bf_state.bfs_ndelim);
457 		}
458 		isFirstDesc = 0;
459 		bf->bf_lastds = (struct ath_desc *) ds;
460 
461 		/*
462 		 * Don't forget to skip to the next descriptor.
463 		 */
464 		ds += sc->sc_tx_desclen;
465 		dsp++;
466 
467 		/*
468 		 * .. and don't forget to blank these out!
469 		 */
470 		bzero(bufAddrList, sizeof(bufAddrList));
471 		bzero(segLenList, sizeof(segLenList));
472 	}
473 	bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
474 }
475 
476 /*
477  * Set the rate control fields in the given descriptor based on
478  * the bf_state fields and node state.
479  *
480  * The bfs fields should already be set with the relevant rate
481  * control information, including whether MRR is to be enabled.
482  *
483  * Since the FreeBSD HAL currently sets up the first TX rate
484  * in ath_hal_setuptxdesc(), this will setup the MRR
485  * conditionally for the pre-11n chips, and call ath_buf_set_rate
486  * unconditionally for 11n chips. These require the 11n rate
487  * scenario to be set if MCS rates are enabled, so it's easier
488  * to just always call it. The caller can then only set rates 2, 3
489  * and 4 if multi-rate retry is needed.
490  */
491 static void
492 ath_tx_set_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni,
493     struct ath_buf *bf)
494 {
495 	struct ath_rc_series *rc = bf->bf_state.bfs_rc;
496 
497 	/* If mrr is disabled, blank tries 1, 2, 3 */
498 	if (! bf->bf_state.bfs_ismrr)
499 		rc[1].tries = rc[2].tries = rc[3].tries = 0;
500 
501 #if 0
502 	/*
503 	 * If NOACK is set, just set ntries=1.
504 	 */
505 	else if (bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) {
506 		rc[1].tries = rc[2].tries = rc[3].tries = 0;
507 		rc[0].tries = 1;
508 	}
509 #endif
510 
511 	/*
512 	 * Always call - that way a retried descriptor will
513 	 * have the MRR fields overwritten.
514 	 *
515 	 * XXX TODO: see if this is really needed - setting up
516 	 * the first descriptor should set the MRR fields to 0
517 	 * for us anyway.
518 	 */
519 	if (ath_tx_is_11n(sc)) {
520 		ath_buf_set_rate(sc, ni, bf);
521 	} else {
522 		ath_hal_setupxtxdesc(sc->sc_ah, bf->bf_desc
523 			, rc[1].ratecode, rc[1].tries
524 			, rc[2].ratecode, rc[2].tries
525 			, rc[3].ratecode, rc[3].tries
526 		);
527 	}
528 }
529 
530 /*
531  * Setup segments+descriptors for an 11n aggregate.
532  * bf_first is the first buffer in the aggregate.
533  * The descriptor list must already been linked together using
534  * bf->bf_next.
535  */
536 static void
537 ath_tx_setds_11n(struct ath_softc *sc, struct ath_buf *bf_first)
538 {
539 	struct ath_buf *bf, *bf_prev = NULL;
540 	struct ath_desc *ds0 = bf_first->bf_desc;
541 
542 	DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: nframes=%d, al=%d\n",
543 	    __func__, bf_first->bf_state.bfs_nframes,
544 	    bf_first->bf_state.bfs_al);
545 
546 	bf = bf_first;
547 
548 	if (bf->bf_state.bfs_txrate0 == 0)
549 		DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: bf=%p, txrate0=%d\n",
550 		    __func__, bf, 0);
551 	if (bf->bf_state.bfs_rc[0].ratecode == 0)
552 		DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: bf=%p, rix0=%d\n",
553 		    __func__, bf, 0);
554 
555 	/*
556 	 * Setup all descriptors of all subframes - this will
557 	 * call ath_hal_set11naggrmiddle() on every frame.
558 	 */
559 	while (bf != NULL) {
560 		DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
561 		    "%s: bf=%p, nseg=%d, pktlen=%d, seqno=%d\n",
562 		    __func__, bf, bf->bf_nseg, bf->bf_state.bfs_pktlen,
563 		    SEQNO(bf->bf_state.bfs_seqno));
564 
565 		/*
566 		 * Setup the initial fields for the first descriptor - all
567 		 * the non-11n specific stuff.
568 		 */
569 		ath_hal_setuptxdesc(sc->sc_ah, bf->bf_desc
570 			, bf->bf_state.bfs_pktlen	/* packet length */
571 			, bf->bf_state.bfs_hdrlen	/* header length */
572 			, bf->bf_state.bfs_atype	/* Atheros packet type */
573 			, bf->bf_state.bfs_txpower	/* txpower */
574 			, bf->bf_state.bfs_txrate0
575 			, bf->bf_state.bfs_try0		/* series 0 rate/tries */
576 			, bf->bf_state.bfs_keyix	/* key cache index */
577 			, bf->bf_state.bfs_txantenna	/* antenna mode */
578 			, bf->bf_state.bfs_txflags | HAL_TXDESC_INTREQ	/* flags */
579 			, bf->bf_state.bfs_ctsrate	/* rts/cts rate */
580 			, bf->bf_state.bfs_ctsduration	/* rts/cts duration */
581 		);
582 
583 		/*
584 		 * First descriptor? Setup the rate control and initial
585 		 * aggregate header information.
586 		 */
587 		if (bf == bf_first) {
588 			/*
589 			 * setup first desc with rate and aggr info
590 			 */
591 			ath_tx_set_ratectrl(sc, bf->bf_node, bf);
592 		}
593 
594 		/*
595 		 * Setup the descriptors for a multi-descriptor frame.
596 		 * This is both aggregate and non-aggregate aware.
597 		 */
598 		ath_tx_chaindesclist(sc, ds0, bf,
599 		    1, /* is_aggr */
600 		    !! (bf == bf_first), /* is_first_subframe */
601 		    !! (bf->bf_next == NULL) /* is_last_subframe */
602 		    );
603 
604 		if (bf == bf_first) {
605 			/*
606 			 * Initialise the first 11n aggregate with the
607 			 * aggregate length and aggregate enable bits.
608 			 */
609 			ath_hal_set11n_aggr_first(sc->sc_ah,
610 			    ds0,
611 			    bf->bf_state.bfs_al,
612 			    bf->bf_state.bfs_ndelim);
613 		}
614 
615 		/*
616 		 * Link the last descriptor of the previous frame
617 		 * to the beginning descriptor of this frame.
618 		 */
619 		if (bf_prev != NULL)
620 			ath_hal_settxdesclink(sc->sc_ah, bf_prev->bf_lastds,
621 			    bf->bf_daddr);
622 
623 		/* Save a copy so we can link the next descriptor in */
624 		bf_prev = bf;
625 		bf = bf->bf_next;
626 	}
627 
628 	/*
629 	 * Set the first descriptor bf_lastds field to point to
630 	 * the last descriptor in the last subframe, that's where
631 	 * the status update will occur.
632 	 */
633 	bf_first->bf_lastds = bf_prev->bf_lastds;
634 
635 	/*
636 	 * And bf_last in the first descriptor points to the end of
637 	 * the aggregate list.
638 	 */
639 	bf_first->bf_last = bf_prev;
640 
641 	/*
642 	 * For non-AR9300 NICs, which require the rate control
643 	 * in the final descriptor - let's set that up now.
644 	 *
645 	 * This is because the filltxdesc() HAL call doesn't
646 	 * populate the last segment with rate control information
647 	 * if firstSeg is also true.  For non-aggregate frames
648 	 * that is fine, as the first frame already has rate control
649 	 * info.  But if the last frame in an aggregate has one
650 	 * descriptor, both firstseg and lastseg will be true and
651 	 * the rate info isn't copied.
652 	 *
653 	 * This is inefficient on MIPS/ARM platforms that have
654 	 * non-cachable memory for TX descriptors, but we'll just
655 	 * make do for now.
656 	 *
657 	 * As to why the rate table is stashed in the last descriptor
658 	 * rather than the first descriptor?  Because proctxdesc()
659 	 * is called on the final descriptor in an MPDU or A-MPDU -
660 	 * ie, the one that gets updated by the hardware upon
661 	 * completion.  That way proctxdesc() doesn't need to know
662 	 * about the first _and_ last TX descriptor.
663 	 */
664 	ath_hal_setuplasttxdesc(sc->sc_ah, bf_prev->bf_lastds, ds0);
665 
666 	DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: end\n", __func__);
667 }
668 
669 /*
670  * Hand-off a frame to the multicast TX queue.
671  *
672  * This is a software TXQ which will be appended to the CAB queue
673  * during the beacon setup code.
674  *
675  * XXX TODO: since the AR9300 EDMA TX queue support wants the QCU ID
676  * as part of the TX descriptor, bf_state.bfs_tx_queue must be updated
677  * with the actual hardware txq, or all of this will fall apart.
678  *
679  * XXX It may not be a bad idea to just stuff the QCU ID into bf_state
680  * and retire bfs_tx_queue; then make sure the CABQ QCU ID is populated
681  * correctly.
682  */
683 static void
684 ath_tx_handoff_mcast(struct ath_softc *sc, struct ath_txq *txq,
685     struct ath_buf *bf)
686 {
687 	ATH_TX_LOCK_ASSERT(sc);
688 
689 	KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0,
690 	     ("%s: busy status 0x%x", __func__, bf->bf_flags));
691 
692 	/*
693 	 * Ensure that the tx queue is the cabq, so things get
694 	 * mapped correctly.
695 	 */
696 	if (bf->bf_state.bfs_tx_queue != sc->sc_cabq->axq_qnum) {
697 		DPRINTF(sc, ATH_DEBUG_XMIT,
698 		    "%s: bf=%p, bfs_tx_queue=%d, axq_qnum=%d\n",
699 		    __func__, bf, bf->bf_state.bfs_tx_queue,
700 		    txq->axq_qnum);
701 	}
702 
703 	ATH_TXQ_LOCK(txq);
704 	if (ATH_TXQ_LAST(txq, axq_q_s) != NULL) {
705 		struct ath_buf *bf_last = ATH_TXQ_LAST(txq, axq_q_s);
706 		struct ieee80211_frame *wh;
707 
708 		/* mark previous frame */
709 		wh = mtod(bf_last->bf_m, struct ieee80211_frame *);
710 		wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA;
711 		bus_dmamap_sync(sc->sc_dmat, bf_last->bf_dmamap,
712 		    BUS_DMASYNC_PREWRITE);
713 
714 		/* link descriptor */
715 		ath_hal_settxdesclink(sc->sc_ah,
716 		    bf_last->bf_lastds,
717 		    bf->bf_daddr);
718 	}
719 	ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
720 	ATH_TXQ_UNLOCK(txq);
721 }
722 
723 /*
724  * Hand-off packet to a hardware queue.
725  */
726 static void
727 ath_tx_handoff_hw(struct ath_softc *sc, struct ath_txq *txq,
728     struct ath_buf *bf)
729 {
730 	struct ath_hal *ah = sc->sc_ah;
731 	struct ath_buf *bf_first;
732 
733 	/*
734 	 * Insert the frame on the outbound list and pass it on
735 	 * to the hardware.  Multicast frames buffered for power
736 	 * save stations and transmit from the CAB queue are stored
737 	 * on a s/w only queue and loaded on to the CAB queue in
738 	 * the SWBA handler since frames only go out on DTIM and
739 	 * to avoid possible races.
740 	 */
741 	ATH_TX_LOCK_ASSERT(sc);
742 	KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0,
743 	     ("%s: busy status 0x%x", __func__, bf->bf_flags));
744 	KASSERT(txq->axq_qnum != ATH_TXQ_SWQ,
745 	     ("ath_tx_handoff_hw called for mcast queue"));
746 
747 	/*
748 	 * XXX We should instead just verify that sc_txstart_cnt
749 	 * or ath_txproc_cnt > 0.  That would mean that
750 	 * the reset is going to be waiting for us to complete.
751 	 */
752 	if (sc->sc_txproc_cnt == 0 && sc->sc_txstart_cnt == 0) {
753 		device_printf(sc->sc_dev,
754 		    "%s: TX dispatch without holding txcount/txstart refcnt!\n",
755 		    __func__);
756 	}
757 
758 	/*
759 	 * XXX .. this is going to cause the hardware to get upset;
760 	 * so we really should find some way to drop or queue
761 	 * things.
762 	 */
763 
764 	ATH_TXQ_LOCK(txq);
765 
766 	/*
767 	 * XXX TODO: if there's a holdingbf, then
768 	 * ATH_TXQ_PUTRUNNING should be clear.
769 	 *
770 	 * If there is a holdingbf and the list is empty,
771 	 * then axq_link should be pointing to the holdingbf.
772 	 *
773 	 * Otherwise it should point to the last descriptor
774 	 * in the last ath_buf.
775 	 *
776 	 * In any case, we should really ensure that we
777 	 * update the previous descriptor link pointer to
778 	 * this descriptor, regardless of all of the above state.
779 	 *
780 	 * For now this is captured by having axq_link point
781 	 * to either the holdingbf (if the TXQ list is empty)
782 	 * or the end of the list (if the TXQ list isn't empty.)
783 	 * I'd rather just kill axq_link here and do it as above.
784 	 */
785 
786 	/*
787 	 * Append the frame to the TX queue.
788 	 */
789 	ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
790 	ATH_KTR(sc, ATH_KTR_TX, 3,
791 	    "ath_tx_handoff: non-tdma: txq=%u, add bf=%p "
792 	    "depth=%d",
793 	    txq->axq_qnum,
794 	    bf,
795 	    txq->axq_depth);
796 
797 	/*
798 	 * If there's a link pointer, update it.
799 	 *
800 	 * XXX we should replace this with the above logic, just
801 	 * to kill axq_link with fire.
802 	 */
803 	if (txq->axq_link != NULL) {
804 		*txq->axq_link = bf->bf_daddr;
805 		DPRINTF(sc, ATH_DEBUG_XMIT,
806 		    "%s: link[%u](%p)=%p (%p) depth %d\n", __func__,
807 		    txq->axq_qnum, txq->axq_link,
808 		    (caddr_t)bf->bf_daddr, bf->bf_desc,
809 		    txq->axq_depth);
810 		ATH_KTR(sc, ATH_KTR_TX, 5,
811 		    "ath_tx_handoff: non-tdma: link[%u](%p)=%p (%p) "
812 		    "lastds=%d",
813 		    txq->axq_qnum, txq->axq_link,
814 		    (caddr_t)bf->bf_daddr, bf->bf_desc,
815 		    bf->bf_lastds);
816 	}
817 
818 	/*
819 	 * If we've not pushed anything into the hardware yet,
820 	 * push the head of the queue into the TxDP.
821 	 *
822 	 * Once we've started DMA, there's no guarantee that
823 	 * updating the TxDP with a new value will actually work.
824 	 * So we just don't do that - if we hit the end of the list,
825 	 * we keep that buffer around (the "holding buffer") and
826 	 * re-start DMA by updating the link pointer of _that_
827 	 * descriptor and then restart DMA.
828 	 */
829 	if (! (txq->axq_flags & ATH_TXQ_PUTRUNNING)) {
830 		bf_first = TAILQ_FIRST(&txq->axq_q);
831 		txq->axq_flags |= ATH_TXQ_PUTRUNNING;
832 		ath_hal_puttxbuf(ah, txq->axq_qnum, bf_first->bf_daddr);
833 		DPRINTF(sc, ATH_DEBUG_XMIT,
834 		    "%s: TXDP[%u] = %p (%p) depth %d\n",
835 		    __func__, txq->axq_qnum,
836 		    (caddr_t)bf_first->bf_daddr, bf_first->bf_desc,
837 		    txq->axq_depth);
838 		ATH_KTR(sc, ATH_KTR_TX, 5,
839 		    "ath_tx_handoff: TXDP[%u] = %p (%p) "
840 		    "lastds=%p depth %d",
841 		    txq->axq_qnum,
842 		    (caddr_t)bf_first->bf_daddr, bf_first->bf_desc,
843 		    bf_first->bf_lastds,
844 		    txq->axq_depth);
845 	}
846 
847 	/*
848 	 * Ensure that the bf TXQ matches this TXQ, so later
849 	 * checking and holding buffer manipulation is sane.
850 	 */
851 	if (bf->bf_state.bfs_tx_queue != txq->axq_qnum) {
852 		DPRINTF(sc, ATH_DEBUG_XMIT,
853 		    "%s: bf=%p, bfs_tx_queue=%d, axq_qnum=%d\n",
854 		    __func__, bf, bf->bf_state.bfs_tx_queue,
855 		    txq->axq_qnum);
856 	}
857 
858 	/*
859 	 * Track aggregate queue depth.
860 	 */
861 	if (bf->bf_state.bfs_aggr)
862 		txq->axq_aggr_depth++;
863 
864 	/*
865 	 * Update the link pointer.
866 	 */
867 	ath_hal_gettxdesclinkptr(ah, bf->bf_lastds, &txq->axq_link);
868 
869 	/*
870 	 * Start DMA.
871 	 *
872 	 * If we wrote a TxDP above, DMA will start from here.
873 	 *
874 	 * If DMA is running, it'll do nothing.
875 	 *
876 	 * If the DMA engine hit the end of the QCU list (ie LINK=NULL,
877 	 * or VEOL) then it stops at the last transmitted write.
878 	 * We then append a new frame by updating the link pointer
879 	 * in that descriptor and then kick TxE here; it will re-read
880 	 * that last descriptor and find the new descriptor to transmit.
881 	 *
882 	 * This is why we keep the holding descriptor around.
883 	 */
884 	ath_hal_txstart(ah, txq->axq_qnum);
885 	ATH_TXQ_UNLOCK(txq);
886 	ATH_KTR(sc, ATH_KTR_TX, 1,
887 	    "ath_tx_handoff: txq=%u, txstart", txq->axq_qnum);
888 }
889 
890 /*
891  * Restart TX DMA for the given TXQ.
892  *
893  * This must be called whether the queue is empty or not.
894  */
895 static void
896 ath_legacy_tx_dma_restart(struct ath_softc *sc, struct ath_txq *txq)
897 {
898 	struct ath_buf *bf, *bf_last;
899 
900 	ATH_TXQ_LOCK_ASSERT(txq);
901 
902 	/* XXX make this ATH_TXQ_FIRST */
903 	bf = TAILQ_FIRST(&txq->axq_q);
904 	bf_last = ATH_TXQ_LAST(txq, axq_q_s);
905 
906 	if (bf == NULL)
907 		return;
908 
909 	DPRINTF(sc, ATH_DEBUG_RESET,
910 	    "%s: Q%d: bf=%p, bf_last=%p, daddr=0x%08x\n",
911 	    __func__,
912 	    txq->axq_qnum,
913 	    bf,
914 	    bf_last,
915 	    (uint32_t) bf->bf_daddr);
916 
917 #ifdef	ATH_DEBUG
918 	if (sc->sc_debug & ATH_DEBUG_RESET)
919 		ath_tx_dump(sc, txq);
920 #endif
921 
922 	/*
923 	 * This is called from a restart, so DMA is known to be
924 	 * completely stopped.
925 	 */
926 	KASSERT((!(txq->axq_flags & ATH_TXQ_PUTRUNNING)),
927 	    ("%s: Q%d: called with PUTRUNNING=1\n",
928 	    __func__,
929 	    txq->axq_qnum));
930 
931 	ath_hal_puttxbuf(sc->sc_ah, txq->axq_qnum, bf->bf_daddr);
932 	txq->axq_flags |= ATH_TXQ_PUTRUNNING;
933 
934 	ath_hal_gettxdesclinkptr(sc->sc_ah, bf_last->bf_lastds,
935 	    &txq->axq_link);
936 	ath_hal_txstart(sc->sc_ah, txq->axq_qnum);
937 }
938 
939 /*
940  * Hand off a packet to the hardware (or mcast queue.)
941  *
942  * The relevant hardware txq should be locked.
943  */
944 static void
945 ath_legacy_xmit_handoff(struct ath_softc *sc, struct ath_txq *txq,
946     struct ath_buf *bf)
947 {
948 	ATH_TX_LOCK_ASSERT(sc);
949 
950 #ifdef	ATH_DEBUG_ALQ
951 	if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC))
952 		ath_tx_alq_post(sc, bf);
953 #endif
954 
955 	if (txq->axq_qnum == ATH_TXQ_SWQ)
956 		ath_tx_handoff_mcast(sc, txq, bf);
957 	else
958 		ath_tx_handoff_hw(sc, txq, bf);
959 }
960 
961 static int
962 ath_tx_tag_crypto(struct ath_softc *sc, struct ieee80211_node *ni,
963     struct mbuf *m0, int iswep, int isfrag, int *hdrlen, int *pktlen,
964     int *keyix)
965 {
966 	DPRINTF(sc, ATH_DEBUG_XMIT,
967 	    "%s: hdrlen=%d, pktlen=%d, isfrag=%d, iswep=%d, m0=%p\n",
968 	    __func__,
969 	    *hdrlen,
970 	    *pktlen,
971 	    isfrag,
972 	    iswep,
973 	    m0);
974 
975 	if (iswep) {
976 		const struct ieee80211_cipher *cip;
977 		struct ieee80211_key *k;
978 
979 		/*
980 		 * Construct the 802.11 header+trailer for an encrypted
981 		 * frame. The only reason this can fail is because of an
982 		 * unknown or unsupported cipher/key type.
983 		 */
984 		k = ieee80211_crypto_encap(ni, m0);
985 		if (k == NULL) {
986 			/*
987 			 * This can happen when the key is yanked after the
988 			 * frame was queued.  Just discard the frame; the
989 			 * 802.11 layer counts failures and provides
990 			 * debugging/diagnostics.
991 			 */
992 			return (0);
993 		}
994 		/*
995 		 * Adjust the packet + header lengths for the crypto
996 		 * additions and calculate the h/w key index.  When
997 		 * a s/w mic is done the frame will have had any mic
998 		 * added to it prior to entry so m0->m_pkthdr.len will
999 		 * account for it. Otherwise we need to add it to the
1000 		 * packet length.
1001 		 */
1002 		cip = k->wk_cipher;
1003 		(*hdrlen) += cip->ic_header;
1004 		(*pktlen) += cip->ic_header + cip->ic_trailer;
1005 		/* NB: frags always have any TKIP MIC done in s/w */
1006 		if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && !isfrag)
1007 			(*pktlen) += cip->ic_miclen;
1008 		(*keyix) = k->wk_keyix;
1009 	} else if (ni->ni_ucastkey.wk_cipher == &ieee80211_cipher_none) {
1010 		/*
1011 		 * Use station key cache slot, if assigned.
1012 		 */
1013 		(*keyix) = ni->ni_ucastkey.wk_keyix;
1014 		if ((*keyix) == IEEE80211_KEYIX_NONE)
1015 			(*keyix) = HAL_TXKEYIX_INVALID;
1016 	} else
1017 		(*keyix) = HAL_TXKEYIX_INVALID;
1018 
1019 	return (1);
1020 }
1021 
1022 /*
1023  * Calculate whether interoperability protection is required for
1024  * this frame.
1025  *
1026  * This requires the rate control information be filled in,
1027  * as the protection requirement depends upon the current
1028  * operating mode / PHY.
1029  */
1030 static void
1031 ath_tx_calc_protection(struct ath_softc *sc, struct ath_buf *bf)
1032 {
1033 	struct ieee80211_frame *wh;
1034 	uint8_t rix;
1035 	uint16_t flags;
1036 	int shortPreamble;
1037 	const HAL_RATE_TABLE *rt = sc->sc_currates;
1038 	struct ieee80211com *ic = &sc->sc_ic;
1039 
1040 	flags = bf->bf_state.bfs_txflags;
1041 	rix = bf->bf_state.bfs_rc[0].rix;
1042 	shortPreamble = bf->bf_state.bfs_shpream;
1043 	wh = mtod(bf->bf_m, struct ieee80211_frame *);
1044 
1045 	/*
1046 	 * If 802.11g protection is enabled, determine whether
1047 	 * to use RTS/CTS or just CTS.  Note that this is only
1048 	 * done for OFDM unicast frames.
1049 	 */
1050 	if ((ic->ic_flags & IEEE80211_F_USEPROT) &&
1051 	    rt->info[rix].phy == IEEE80211_T_OFDM &&
1052 	    (flags & HAL_TXDESC_NOACK) == 0) {
1053 		bf->bf_state.bfs_doprot = 1;
1054 		/* XXX fragments must use CCK rates w/ protection */
1055 		if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) {
1056 			flags |= HAL_TXDESC_RTSENA;
1057 		} else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) {
1058 			flags |= HAL_TXDESC_CTSENA;
1059 		}
1060 		/*
1061 		 * For frags it would be desirable to use the
1062 		 * highest CCK rate for RTS/CTS.  But stations
1063 		 * farther away may detect it at a lower CCK rate
1064 		 * so use the configured protection rate instead
1065 		 * (for now).
1066 		 */
1067 		sc->sc_stats.ast_tx_protect++;
1068 	}
1069 
1070 	/*
1071 	 * If 11n protection is enabled and it's a HT frame,
1072 	 * enable RTS.
1073 	 *
1074 	 * XXX ic_htprotmode or ic_curhtprotmode?
1075 	 * XXX should it_htprotmode only matter if ic_curhtprotmode
1076 	 * XXX indicates it's not a HT pure environment?
1077 	 */
1078 	if ((ic->ic_htprotmode == IEEE80211_PROT_RTSCTS) &&
1079 	    rt->info[rix].phy == IEEE80211_T_HT &&
1080 	    (flags & HAL_TXDESC_NOACK) == 0) {
1081 		flags |= HAL_TXDESC_RTSENA;
1082 		sc->sc_stats.ast_tx_htprotect++;
1083 	}
1084 	bf->bf_state.bfs_txflags = flags;
1085 }
1086 
1087 /*
1088  * Update the frame duration given the currently selected rate.
1089  *
1090  * This also updates the frame duration value, so it will require
1091  * a DMA flush.
1092  */
1093 static void
1094 ath_tx_calc_duration(struct ath_softc *sc, struct ath_buf *bf)
1095 {
1096 	struct ieee80211_frame *wh;
1097 	uint8_t rix;
1098 	uint16_t flags;
1099 	int shortPreamble;
1100 	struct ath_hal *ah = sc->sc_ah;
1101 	const HAL_RATE_TABLE *rt = sc->sc_currates;
1102 	int isfrag = bf->bf_m->m_flags & M_FRAG;
1103 
1104 	flags = bf->bf_state.bfs_txflags;
1105 	rix = bf->bf_state.bfs_rc[0].rix;
1106 	shortPreamble = bf->bf_state.bfs_shpream;
1107 	wh = mtod(bf->bf_m, struct ieee80211_frame *);
1108 
1109 	/*
1110 	 * Calculate duration.  This logically belongs in the 802.11
1111 	 * layer but it lacks sufficient information to calculate it.
1112 	 */
1113 	if ((flags & HAL_TXDESC_NOACK) == 0 &&
1114 	    (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) {
1115 		u_int16_t dur;
1116 		if (shortPreamble)
1117 			dur = rt->info[rix].spAckDuration;
1118 		else
1119 			dur = rt->info[rix].lpAckDuration;
1120 		if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) {
1121 			dur += dur;		/* additional SIFS+ACK */
1122 			/*
1123 			 * Include the size of next fragment so NAV is
1124 			 * updated properly.  The last fragment uses only
1125 			 * the ACK duration
1126 			 *
1127 			 * XXX TODO: ensure that the rate lookup for each
1128 			 * fragment is the same as the rate used by the
1129 			 * first fragment!
1130 			 */
1131 			dur += ath_hal_computetxtime(ah,
1132 			    rt,
1133 			    bf->bf_nextfraglen,
1134 			    rix, shortPreamble,
1135 			    AH_TRUE);
1136 		}
1137 		if (isfrag) {
1138 			/*
1139 			 * Force hardware to use computed duration for next
1140 			 * fragment by disabling multi-rate retry which updates
1141 			 * duration based on the multi-rate duration table.
1142 			 */
1143 			bf->bf_state.bfs_ismrr = 0;
1144 			bf->bf_state.bfs_try0 = ATH_TXMGTTRY;
1145 			/* XXX update bfs_rc[0].try? */
1146 		}
1147 
1148 		/* Update the duration field itself */
1149 		*(u_int16_t *)wh->i_dur = htole16(dur);
1150 	}
1151 }
1152 
1153 static uint8_t
1154 ath_tx_get_rtscts_rate(struct ath_hal *ah, const HAL_RATE_TABLE *rt,
1155     int cix, int shortPreamble)
1156 {
1157 	uint8_t ctsrate;
1158 
1159 	/*
1160 	 * CTS transmit rate is derived from the transmit rate
1161 	 * by looking in the h/w rate table.  We must also factor
1162 	 * in whether or not a short preamble is to be used.
1163 	 */
1164 	/* NB: cix is set above where RTS/CTS is enabled */
1165 	KASSERT(cix != 0xff, ("cix not setup"));
1166 	ctsrate = rt->info[cix].rateCode;
1167 
1168 	/* XXX this should only matter for legacy rates */
1169 	if (shortPreamble)
1170 		ctsrate |= rt->info[cix].shortPreamble;
1171 
1172 	return (ctsrate);
1173 }
1174 
1175 /*
1176  * Calculate the RTS/CTS duration for legacy frames.
1177  */
1178 static int
1179 ath_tx_calc_ctsduration(struct ath_hal *ah, int rix, int cix,
1180     int shortPreamble, int pktlen, const HAL_RATE_TABLE *rt,
1181     int flags)
1182 {
1183 	int ctsduration = 0;
1184 
1185 	/* This mustn't be called for HT modes */
1186 	if (rt->info[cix].phy == IEEE80211_T_HT) {
1187 		printf("%s: HT rate where it shouldn't be (0x%x)\n",
1188 		    __func__, rt->info[cix].rateCode);
1189 		return (-1);
1190 	}
1191 
1192 	/*
1193 	 * Compute the transmit duration based on the frame
1194 	 * size and the size of an ACK frame.  We call into the
1195 	 * HAL to do the computation since it depends on the
1196 	 * characteristics of the actual PHY being used.
1197 	 *
1198 	 * NB: CTS is assumed the same size as an ACK so we can
1199 	 *     use the precalculated ACK durations.
1200 	 */
1201 	if (shortPreamble) {
1202 		if (flags & HAL_TXDESC_RTSENA)		/* SIFS + CTS */
1203 			ctsduration += rt->info[cix].spAckDuration;
1204 		ctsduration += ath_hal_computetxtime(ah,
1205 			rt, pktlen, rix, AH_TRUE, AH_TRUE);
1206 		if ((flags & HAL_TXDESC_NOACK) == 0)	/* SIFS + ACK */
1207 			ctsduration += rt->info[rix].spAckDuration;
1208 	} else {
1209 		if (flags & HAL_TXDESC_RTSENA)		/* SIFS + CTS */
1210 			ctsduration += rt->info[cix].lpAckDuration;
1211 		ctsduration += ath_hal_computetxtime(ah,
1212 			rt, pktlen, rix, AH_FALSE, AH_TRUE);
1213 		if ((flags & HAL_TXDESC_NOACK) == 0)	/* SIFS + ACK */
1214 			ctsduration += rt->info[rix].lpAckDuration;
1215 	}
1216 
1217 	return (ctsduration);
1218 }
1219 
1220 /*
1221  * Update the given ath_buf with updated rts/cts setup and duration
1222  * values.
1223  *
1224  * To support rate lookups for each software retry, the rts/cts rate
1225  * and cts duration must be re-calculated.
1226  *
1227  * This function assumes the RTS/CTS flags have been set as needed;
1228  * mrr has been disabled; and the rate control lookup has been done.
1229  *
1230  * XXX TODO: MRR need only be disabled for the pre-11n NICs.
1231  * XXX The 11n NICs support per-rate RTS/CTS configuration.
1232  */
1233 static void
1234 ath_tx_set_rtscts(struct ath_softc *sc, struct ath_buf *bf)
1235 {
1236 	uint16_t ctsduration = 0;
1237 	uint8_t ctsrate = 0;
1238 	uint8_t rix = bf->bf_state.bfs_rc[0].rix;
1239 	uint8_t cix = 0;
1240 	const HAL_RATE_TABLE *rt = sc->sc_currates;
1241 
1242 	/*
1243 	 * No RTS/CTS enabled? Don't bother.
1244 	 */
1245 	if ((bf->bf_state.bfs_txflags &
1246 	    (HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA)) == 0) {
1247 		/* XXX is this really needed? */
1248 		bf->bf_state.bfs_ctsrate = 0;
1249 		bf->bf_state.bfs_ctsduration = 0;
1250 		return;
1251 	}
1252 
1253 	/*
1254 	 * If protection is enabled, use the protection rix control
1255 	 * rate. Otherwise use the rate0 control rate.
1256 	 */
1257 	if (bf->bf_state.bfs_doprot)
1258 		rix = sc->sc_protrix;
1259 	else
1260 		rix = bf->bf_state.bfs_rc[0].rix;
1261 
1262 	/*
1263 	 * If the raw path has hard-coded ctsrate0 to something,
1264 	 * use it.
1265 	 */
1266 	if (bf->bf_state.bfs_ctsrate0 != 0)
1267 		cix = ath_tx_findrix(sc, bf->bf_state.bfs_ctsrate0);
1268 	else
1269 		/* Control rate from above */
1270 		cix = rt->info[rix].controlRate;
1271 
1272 	/* Calculate the rtscts rate for the given cix */
1273 	ctsrate = ath_tx_get_rtscts_rate(sc->sc_ah, rt, cix,
1274 	    bf->bf_state.bfs_shpream);
1275 
1276 	/* The 11n chipsets do ctsduration calculations for you */
1277 	if (! ath_tx_is_11n(sc))
1278 		ctsduration = ath_tx_calc_ctsduration(sc->sc_ah, rix, cix,
1279 		    bf->bf_state.bfs_shpream, bf->bf_state.bfs_pktlen,
1280 		    rt, bf->bf_state.bfs_txflags);
1281 
1282 	/* Squirrel away in ath_buf */
1283 	bf->bf_state.bfs_ctsrate = ctsrate;
1284 	bf->bf_state.bfs_ctsduration = ctsduration;
1285 
1286 	/*
1287 	 * Must disable multi-rate retry when using RTS/CTS.
1288 	 */
1289 	if (!sc->sc_mrrprot) {
1290 		bf->bf_state.bfs_ismrr = 0;
1291 		bf->bf_state.bfs_try0 =
1292 		    bf->bf_state.bfs_rc[0].tries = ATH_TXMGTTRY; /* XXX ew */
1293 	}
1294 }
1295 
1296 /*
1297  * Setup the descriptor chain for a normal or fast-frame
1298  * frame.
1299  *
1300  * XXX TODO: extend to include the destination hardware QCU ID.
1301  * Make sure that is correct.  Make sure that when being added
1302  * to the mcastq, the CABQ QCUID is set or things will get a bit
1303  * odd.
1304  */
1305 static void
1306 ath_tx_setds(struct ath_softc *sc, struct ath_buf *bf)
1307 {
1308 	struct ath_desc *ds = bf->bf_desc;
1309 	struct ath_hal *ah = sc->sc_ah;
1310 
1311 	if (bf->bf_state.bfs_txrate0 == 0)
1312 		DPRINTF(sc, ATH_DEBUG_XMIT,
1313 		    "%s: bf=%p, txrate0=%d\n", __func__, bf, 0);
1314 
1315 	ath_hal_setuptxdesc(ah, ds
1316 		, bf->bf_state.bfs_pktlen	/* packet length */
1317 		, bf->bf_state.bfs_hdrlen	/* header length */
1318 		, bf->bf_state.bfs_atype	/* Atheros packet type */
1319 		, bf->bf_state.bfs_txpower	/* txpower */
1320 		, bf->bf_state.bfs_txrate0
1321 		, bf->bf_state.bfs_try0		/* series 0 rate/tries */
1322 		, bf->bf_state.bfs_keyix	/* key cache index */
1323 		, bf->bf_state.bfs_txantenna	/* antenna mode */
1324 		, bf->bf_state.bfs_txflags	/* flags */
1325 		, bf->bf_state.bfs_ctsrate	/* rts/cts rate */
1326 		, bf->bf_state.bfs_ctsduration	/* rts/cts duration */
1327 	);
1328 
1329 	/*
1330 	 * This will be overriden when the descriptor chain is written.
1331 	 */
1332 	bf->bf_lastds = ds;
1333 	bf->bf_last = bf;
1334 
1335 	/* Set rate control and descriptor chain for this frame */
1336 	ath_tx_set_ratectrl(sc, bf->bf_node, bf);
1337 	ath_tx_chaindesclist(sc, ds, bf, 0, 0, 0);
1338 }
1339 
1340 /*
1341  * Do a rate lookup.
1342  *
1343  * This performs a rate lookup for the given ath_buf only if it's required.
1344  * Non-data frames and raw frames don't require it.
1345  *
1346  * This populates the primary and MRR entries; MRR values are
1347  * then disabled later on if something requires it (eg RTS/CTS on
1348  * pre-11n chipsets.
1349  *
1350  * This needs to be done before the RTS/CTS fields are calculated
1351  * as they may depend upon the rate chosen.
1352  */
1353 static void
1354 ath_tx_do_ratelookup(struct ath_softc *sc, struct ath_buf *bf)
1355 {
1356 	uint8_t rate, rix;
1357 	int try0;
1358 
1359 	if (! bf->bf_state.bfs_doratelookup)
1360 		return;
1361 
1362 	/* Get rid of any previous state */
1363 	bzero(bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc));
1364 
1365 	ATH_NODE_LOCK(ATH_NODE(bf->bf_node));
1366 	ath_rate_findrate(sc, ATH_NODE(bf->bf_node), bf->bf_state.bfs_shpream,
1367 	    bf->bf_state.bfs_pktlen, &rix, &try0, &rate);
1368 
1369 	/* In case MRR is disabled, make sure rc[0] is setup correctly */
1370 	bf->bf_state.bfs_rc[0].rix = rix;
1371 	bf->bf_state.bfs_rc[0].ratecode = rate;
1372 	bf->bf_state.bfs_rc[0].tries = try0;
1373 
1374 	if (bf->bf_state.bfs_ismrr && try0 != ATH_TXMAXTRY)
1375 		ath_rate_getxtxrates(sc, ATH_NODE(bf->bf_node), rix,
1376 		    bf->bf_state.bfs_rc);
1377 	ATH_NODE_UNLOCK(ATH_NODE(bf->bf_node));
1378 
1379 	sc->sc_txrix = rix;	/* for LED blinking */
1380 	sc->sc_lastdatarix = rix;	/* for fast frames */
1381 	bf->bf_state.bfs_try0 = try0;
1382 	bf->bf_state.bfs_txrate0 = rate;
1383 }
1384 
1385 /*
1386  * Update the CLRDMASK bit in the ath_buf if it needs to be set.
1387  */
1388 static void
1389 ath_tx_update_clrdmask(struct ath_softc *sc, struct ath_tid *tid,
1390     struct ath_buf *bf)
1391 {
1392 	struct ath_node *an = ATH_NODE(bf->bf_node);
1393 
1394 	ATH_TX_LOCK_ASSERT(sc);
1395 
1396 	if (an->clrdmask == 1) {
1397 		bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
1398 		an->clrdmask = 0;
1399 	}
1400 }
1401 
1402 /*
1403  * Return whether this frame should be software queued or
1404  * direct dispatched.
1405  *
1406  * When doing powersave, BAR frames should be queued but other management
1407  * frames should be directly sent.
1408  *
1409  * When not doing powersave, stick BAR frames into the hardware queue
1410  * so it goes out even though the queue is paused.
1411  *
1412  * For now, management frames are also software queued by default.
1413  */
1414 static int
1415 ath_tx_should_swq_frame(struct ath_softc *sc, struct ath_node *an,
1416     struct mbuf *m0, int *queue_to_head)
1417 {
1418 	struct ieee80211_node *ni = &an->an_node;
1419 	struct ieee80211_frame *wh;
1420 	uint8_t type, subtype;
1421 
1422 	wh = mtod(m0, struct ieee80211_frame *);
1423 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
1424 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
1425 
1426 	(*queue_to_head) = 0;
1427 
1428 	/* If it's not in powersave - direct-dispatch BAR */
1429 	if ((ATH_NODE(ni)->an_is_powersave == 0)
1430 	    && type == IEEE80211_FC0_TYPE_CTL &&
1431 	    subtype == IEEE80211_FC0_SUBTYPE_BAR) {
1432 		DPRINTF(sc, ATH_DEBUG_SW_TX,
1433 		    "%s: BAR: TX'ing direct\n", __func__);
1434 		return (0);
1435 	} else if ((ATH_NODE(ni)->an_is_powersave == 1)
1436 	    && type == IEEE80211_FC0_TYPE_CTL &&
1437 	    subtype == IEEE80211_FC0_SUBTYPE_BAR) {
1438 		/* BAR TX whilst asleep; queue */
1439 		DPRINTF(sc, ATH_DEBUG_SW_TX,
1440 		    "%s: swq: TX'ing\n", __func__);
1441 		(*queue_to_head) = 1;
1442 		return (1);
1443 	} else if ((ATH_NODE(ni)->an_is_powersave == 1)
1444 	    && (type == IEEE80211_FC0_TYPE_MGT ||
1445 	        type == IEEE80211_FC0_TYPE_CTL)) {
1446 		/*
1447 		 * Other control/mgmt frame; bypass software queuing
1448 		 * for now!
1449 		 */
1450 		DPRINTF(sc, ATH_DEBUG_XMIT,
1451 		    "%s: %6D: Node is asleep; sending mgmt "
1452 		    "(type=%d, subtype=%d)\n",
1453 		    __func__, ni->ni_macaddr, ":", type, subtype);
1454 		return (0);
1455 	} else {
1456 		return (1);
1457 	}
1458 }
1459 
1460 
1461 /*
1462  * Transmit the given frame to the hardware.
1463  *
1464  * The frame must already be setup; rate control must already have
1465  * been done.
1466  *
1467  * XXX since the TXQ lock is being held here (and I dislike holding
1468  * it for this long when not doing software aggregation), later on
1469  * break this function into "setup_normal" and "xmit_normal". The
1470  * lock only needs to be held for the ath_tx_handoff call.
1471  *
1472  * XXX we don't update the leak count here - if we're doing
1473  * direct frame dispatch, we need to be able to do it without
1474  * decrementing the leak count (eg multicast queue frames.)
1475  */
1476 static void
1477 ath_tx_xmit_normal(struct ath_softc *sc, struct ath_txq *txq,
1478     struct ath_buf *bf)
1479 {
1480 	struct ath_node *an = ATH_NODE(bf->bf_node);
1481 	struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid];
1482 
1483 	ATH_TX_LOCK_ASSERT(sc);
1484 
1485 	/*
1486 	 * For now, just enable CLRDMASK. ath_tx_xmit_normal() does
1487 	 * set a completion handler however it doesn't (yet) properly
1488 	 * handle the strict ordering requirements needed for normal,
1489 	 * non-aggregate session frames.
1490 	 *
1491 	 * Once this is implemented, only set CLRDMASK like this for
1492 	 * frames that must go out - eg management/raw frames.
1493 	 */
1494 	bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
1495 
1496 	/* Setup the descriptor before handoff */
1497 	ath_tx_do_ratelookup(sc, bf);
1498 	ath_tx_calc_duration(sc, bf);
1499 	ath_tx_calc_protection(sc, bf);
1500 	ath_tx_set_rtscts(sc, bf);
1501 	ath_tx_rate_fill_rcflags(sc, bf);
1502 	ath_tx_setds(sc, bf);
1503 
1504 	/* Track per-TID hardware queue depth correctly */
1505 	tid->hwq_depth++;
1506 
1507 	/* Assign the completion handler */
1508 	bf->bf_comp = ath_tx_normal_comp;
1509 
1510 	/* Hand off to hardware */
1511 	ath_tx_handoff(sc, txq, bf);
1512 }
1513 
1514 /*
1515  * Do the basic frame setup stuff that's required before the frame
1516  * is added to a software queue.
1517  *
1518  * All frames get mostly the same treatment and it's done once.
1519  * Retransmits fiddle with things like the rate control setup,
1520  * setting the retransmit bit in the packet; doing relevant DMA/bus
1521  * syncing and relinking it (back) into the hardware TX queue.
1522  *
1523  * Note that this may cause the mbuf to be reallocated, so
1524  * m0 may not be valid.
1525  */
1526 static int
1527 ath_tx_normal_setup(struct ath_softc *sc, struct ieee80211_node *ni,
1528     struct ath_buf *bf, struct mbuf *m0, struct ath_txq *txq)
1529 {
1530 	struct ieee80211vap *vap = ni->ni_vap;
1531 	struct ath_hal *ah = sc->sc_ah;
1532 	struct ieee80211com *ic = &sc->sc_ic;
1533 	const struct chanAccParams *cap = &ic->ic_wme.wme_chanParams;
1534 	int error, iswep, ismcast, isfrag, ismrr;
1535 	int keyix, hdrlen, pktlen, try0 = 0;
1536 	u_int8_t rix = 0, txrate = 0;
1537 	struct ath_desc *ds;
1538 	struct ieee80211_frame *wh;
1539 	u_int subtype, flags;
1540 	HAL_PKT_TYPE atype;
1541 	const HAL_RATE_TABLE *rt;
1542 	HAL_BOOL shortPreamble;
1543 	struct ath_node *an;
1544 	u_int pri;
1545 
1546 	/*
1547 	 * To ensure that both sequence numbers and the CCMP PN handling
1548 	 * is "correct", make sure that the relevant TID queue is locked.
1549 	 * Otherwise the CCMP PN and seqno may appear out of order, causing
1550 	 * re-ordered frames to have out of order CCMP PN's, resulting
1551 	 * in many, many frame drops.
1552 	 */
1553 	ATH_TX_LOCK_ASSERT(sc);
1554 
1555 	wh = mtod(m0, struct ieee80211_frame *);
1556 	iswep = wh->i_fc[1] & IEEE80211_FC1_PROTECTED;
1557 	ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
1558 	isfrag = m0->m_flags & M_FRAG;
1559 	hdrlen = ieee80211_anyhdrsize(wh);
1560 	/*
1561 	 * Packet length must not include any
1562 	 * pad bytes; deduct them here.
1563 	 */
1564 	pktlen = m0->m_pkthdr.len - (hdrlen & 3);
1565 
1566 	/* Handle encryption twiddling if needed */
1567 	if (! ath_tx_tag_crypto(sc, ni, m0, iswep, isfrag, &hdrlen,
1568 	    &pktlen, &keyix)) {
1569 		ieee80211_free_mbuf(m0);
1570 		return EIO;
1571 	}
1572 
1573 	/* packet header may have moved, reset our local pointer */
1574 	wh = mtod(m0, struct ieee80211_frame *);
1575 
1576 	pktlen += IEEE80211_CRC_LEN;
1577 
1578 	/*
1579 	 * Load the DMA map so any coalescing is done.  This
1580 	 * also calculates the number of descriptors we need.
1581 	 */
1582 	error = ath_tx_dmasetup(sc, bf, m0);
1583 	if (error != 0)
1584 		return error;
1585 	KASSERT((ni != NULL), ("%s: ni=NULL!", __func__));
1586 	bf->bf_node = ni;			/* NB: held reference */
1587 	m0 = bf->bf_m;				/* NB: may have changed */
1588 	wh = mtod(m0, struct ieee80211_frame *);
1589 
1590 	/* setup descriptors */
1591 	ds = bf->bf_desc;
1592 	rt = sc->sc_currates;
1593 	KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
1594 
1595 	/*
1596 	 * NB: the 802.11 layer marks whether or not we should
1597 	 * use short preamble based on the current mode and
1598 	 * negotiated parameters.
1599 	 */
1600 	if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) &&
1601 	    (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) {
1602 		shortPreamble = AH_TRUE;
1603 		sc->sc_stats.ast_tx_shortpre++;
1604 	} else {
1605 		shortPreamble = AH_FALSE;
1606 	}
1607 
1608 	an = ATH_NODE(ni);
1609 	//flags = HAL_TXDESC_CLRDMASK;		/* XXX needed for crypto errs */
1610 	flags = 0;
1611 	ismrr = 0;				/* default no multi-rate retry*/
1612 	pri = M_WME_GETAC(m0);			/* honor classification */
1613 	/* XXX use txparams instead of fixed values */
1614 	/*
1615 	 * Calculate Atheros packet type from IEEE80211 packet header,
1616 	 * setup for rate calculations, and select h/w transmit queue.
1617 	 */
1618 	switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
1619 	case IEEE80211_FC0_TYPE_MGT:
1620 		subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
1621 		if (subtype == IEEE80211_FC0_SUBTYPE_BEACON)
1622 			atype = HAL_PKT_TYPE_BEACON;
1623 		else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
1624 			atype = HAL_PKT_TYPE_PROBE_RESP;
1625 		else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM)
1626 			atype = HAL_PKT_TYPE_ATIM;
1627 		else
1628 			atype = HAL_PKT_TYPE_NORMAL;	/* XXX */
1629 		rix = an->an_mgmtrix;
1630 		txrate = rt->info[rix].rateCode;
1631 		if (shortPreamble)
1632 			txrate |= rt->info[rix].shortPreamble;
1633 		try0 = ATH_TXMGTTRY;
1634 		flags |= HAL_TXDESC_INTREQ;	/* force interrupt */
1635 		break;
1636 	case IEEE80211_FC0_TYPE_CTL:
1637 		atype = HAL_PKT_TYPE_PSPOLL;	/* stop setting of duration */
1638 		rix = an->an_mgmtrix;
1639 		txrate = rt->info[rix].rateCode;
1640 		if (shortPreamble)
1641 			txrate |= rt->info[rix].shortPreamble;
1642 		try0 = ATH_TXMGTTRY;
1643 		flags |= HAL_TXDESC_INTREQ;	/* force interrupt */
1644 		break;
1645 	case IEEE80211_FC0_TYPE_DATA:
1646 		atype = HAL_PKT_TYPE_NORMAL;		/* default */
1647 		/*
1648 		 * Data frames: multicast frames go out at a fixed rate,
1649 		 * EAPOL frames use the mgmt frame rate; otherwise consult
1650 		 * the rate control module for the rate to use.
1651 		 */
1652 		if (ismcast) {
1653 			rix = an->an_mcastrix;
1654 			txrate = rt->info[rix].rateCode;
1655 			if (shortPreamble)
1656 				txrate |= rt->info[rix].shortPreamble;
1657 			try0 = 1;
1658 		} else if (m0->m_flags & M_EAPOL) {
1659 			/* XXX? maybe always use long preamble? */
1660 			rix = an->an_mgmtrix;
1661 			txrate = rt->info[rix].rateCode;
1662 			if (shortPreamble)
1663 				txrate |= rt->info[rix].shortPreamble;
1664 			try0 = ATH_TXMAXTRY;	/* XXX?too many? */
1665 		} else {
1666 			/*
1667 			 * Do rate lookup on each TX, rather than using
1668 			 * the hard-coded TX information decided here.
1669 			 */
1670 			ismrr = 1;
1671 			bf->bf_state.bfs_doratelookup = 1;
1672 		}
1673 		if (cap->cap_wmeParams[pri].wmep_noackPolicy)
1674 			flags |= HAL_TXDESC_NOACK;
1675 		break;
1676 	default:
1677 		device_printf(sc->sc_dev, "bogus frame type 0x%x (%s)\n",
1678 		    wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__);
1679 		/* XXX statistic */
1680 		/* XXX free tx dmamap */
1681 		ieee80211_free_mbuf(m0);
1682 		return EIO;
1683 	}
1684 
1685 	/*
1686 	 * There are two known scenarios where the frame AC doesn't match
1687 	 * what the destination TXQ is.
1688 	 *
1689 	 * + non-QoS frames (eg management?) that the net80211 stack has
1690 	 *   assigned a higher AC to, but since it's a non-QoS TID, it's
1691 	 *   being thrown into TID 16.  TID 16 gets the AC_BE queue.
1692 	 *   It's quite possible that management frames should just be
1693 	 *   direct dispatched to hardware rather than go via the software
1694 	 *   queue; that should be investigated in the future.  There are
1695 	 *   some specific scenarios where this doesn't make sense, mostly
1696 	 *   surrounding ADDBA request/response - hence why that is special
1697 	 *   cased.
1698 	 *
1699 	 * + Multicast frames going into the VAP mcast queue.  That shows up
1700 	 *   as "TXQ 11".
1701 	 *
1702 	 * This driver should eventually support separate TID and TXQ locking,
1703 	 * allowing for arbitrary AC frames to appear on arbitrary software
1704 	 * queues, being queued to the "correct" hardware queue when needed.
1705 	 */
1706 #if 0
1707 	if (txq != sc->sc_ac2q[pri]) {
1708 		DPRINTF(sc, ATH_DEBUG_XMIT,
1709 		    "%s: txq=%p (%d), pri=%d, pri txq=%p (%d)\n",
1710 		    __func__,
1711 		    txq,
1712 		    txq->axq_qnum,
1713 		    pri,
1714 		    sc->sc_ac2q[pri],
1715 		    sc->sc_ac2q[pri]->axq_qnum);
1716 	}
1717 #endif
1718 
1719 	/*
1720 	 * Calculate miscellaneous flags.
1721 	 */
1722 	if (ismcast) {
1723 		flags |= HAL_TXDESC_NOACK;	/* no ack on broad/multicast */
1724 	} else if (pktlen > vap->iv_rtsthreshold &&
1725 	    (ni->ni_ath_flags & IEEE80211_NODE_FF) == 0) {
1726 		flags |= HAL_TXDESC_RTSENA;	/* RTS based on frame length */
1727 		sc->sc_stats.ast_tx_rts++;
1728 	}
1729 	if (flags & HAL_TXDESC_NOACK)		/* NB: avoid double counting */
1730 		sc->sc_stats.ast_tx_noack++;
1731 #ifdef IEEE80211_SUPPORT_TDMA
1732 	if (sc->sc_tdma && (flags & HAL_TXDESC_NOACK) == 0) {
1733 		DPRINTF(sc, ATH_DEBUG_TDMA,
1734 		    "%s: discard frame, ACK required w/ TDMA\n", __func__);
1735 		sc->sc_stats.ast_tdma_ack++;
1736 		/* XXX free tx dmamap */
1737 		ieee80211_free_mbuf(m0);
1738 		return EIO;
1739 	}
1740 #endif
1741 
1742 #if 0
1743 	/*
1744 	 * Placeholder: if you want to transmit with the azimuth
1745 	 * timestamp in the end of the payload, here's where you
1746 	 * should set the TXDESC field.
1747 	 */
1748 	flags |= HAL_TXDESC_HWTS;
1749 #endif
1750 
1751 	/*
1752 	 * Determine if a tx interrupt should be generated for
1753 	 * this descriptor.  We take a tx interrupt to reap
1754 	 * descriptors when the h/w hits an EOL condition or
1755 	 * when the descriptor is specifically marked to generate
1756 	 * an interrupt.  We periodically mark descriptors in this
1757 	 * way to insure timely replenishing of the supply needed
1758 	 * for sending frames.  Defering interrupts reduces system
1759 	 * load and potentially allows more concurrent work to be
1760 	 * done but if done to aggressively can cause senders to
1761 	 * backup.
1762 	 *
1763 	 * NB: use >= to deal with sc_txintrperiod changing
1764 	 *     dynamically through sysctl.
1765 	 */
1766 	if (flags & HAL_TXDESC_INTREQ) {
1767 		txq->axq_intrcnt = 0;
1768 	} else if (++txq->axq_intrcnt >= sc->sc_txintrperiod) {
1769 		flags |= HAL_TXDESC_INTREQ;
1770 		txq->axq_intrcnt = 0;
1771 	}
1772 
1773 	/* This point forward is actual TX bits */
1774 
1775 	/*
1776 	 * At this point we are committed to sending the frame
1777 	 * and we don't need to look at m_nextpkt; clear it in
1778 	 * case this frame is part of frag chain.
1779 	 */
1780 	m0->m_nextpkt = NULL;
1781 
1782 	if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT))
1783 		ieee80211_dump_pkt(ic, mtod(m0, const uint8_t *), m0->m_len,
1784 		    sc->sc_hwmap[rix].ieeerate, -1);
1785 
1786 	if (ieee80211_radiotap_active_vap(vap)) {
1787 		u_int64_t tsf = ath_hal_gettsf64(ah);
1788 
1789 		sc->sc_tx_th.wt_tsf = htole64(tsf);
1790 		sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags;
1791 		if (iswep)
1792 			sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
1793 		if (isfrag)
1794 			sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG;
1795 		sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate;
1796 		sc->sc_tx_th.wt_txpower = ieee80211_get_node_txpower(ni);
1797 		sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
1798 
1799 		ieee80211_radiotap_tx(vap, m0);
1800 	}
1801 
1802 	/* Blank the legacy rate array */
1803 	bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc));
1804 
1805 	/*
1806 	 * ath_buf_set_rate needs at least one rate/try to setup
1807 	 * the rate scenario.
1808 	 */
1809 	bf->bf_state.bfs_rc[0].rix = rix;
1810 	bf->bf_state.bfs_rc[0].tries = try0;
1811 	bf->bf_state.bfs_rc[0].ratecode = txrate;
1812 
1813 	/* Store the decided rate index values away */
1814 	bf->bf_state.bfs_pktlen = pktlen;
1815 	bf->bf_state.bfs_hdrlen = hdrlen;
1816 	bf->bf_state.bfs_atype = atype;
1817 	bf->bf_state.bfs_txpower = ieee80211_get_node_txpower(ni);
1818 	bf->bf_state.bfs_txrate0 = txrate;
1819 	bf->bf_state.bfs_try0 = try0;
1820 	bf->bf_state.bfs_keyix = keyix;
1821 	bf->bf_state.bfs_txantenna = sc->sc_txantenna;
1822 	bf->bf_state.bfs_txflags = flags;
1823 	bf->bf_state.bfs_shpream = shortPreamble;
1824 
1825 	/* XXX this should be done in ath_tx_setrate() */
1826 	bf->bf_state.bfs_ctsrate0 = 0;	/* ie, no hard-coded ctsrate */
1827 	bf->bf_state.bfs_ctsrate = 0;	/* calculated later */
1828 	bf->bf_state.bfs_ctsduration = 0;
1829 	bf->bf_state.bfs_ismrr = ismrr;
1830 
1831 	return 0;
1832 }
1833 
1834 /*
1835  * Queue a frame to the hardware or software queue.
1836  *
1837  * This can be called by the net80211 code.
1838  *
1839  * XXX what about locking? Or, push the seqno assign into the
1840  * XXX aggregate scheduler so its serialised?
1841  *
1842  * XXX When sending management frames via ath_raw_xmit(),
1843  *     should CLRDMASK be set unconditionally?
1844  */
1845 int
1846 ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni,
1847     struct ath_buf *bf, struct mbuf *m0)
1848 {
1849 	struct ieee80211vap *vap = ni->ni_vap;
1850 	struct ath_vap *avp = ATH_VAP(vap);
1851 	int r = 0;
1852 	u_int pri;
1853 	int tid;
1854 	struct ath_txq *txq;
1855 	int ismcast;
1856 	const struct ieee80211_frame *wh;
1857 	int is_ampdu, is_ampdu_tx, is_ampdu_pending;
1858 	ieee80211_seq seqno;
1859 	uint8_t type, subtype;
1860 	int queue_to_head;
1861 
1862 	ATH_TX_LOCK_ASSERT(sc);
1863 
1864 	/*
1865 	 * Determine the target hardware queue.
1866 	 *
1867 	 * For multicast frames, the txq gets overridden appropriately
1868 	 * depending upon the state of PS.
1869 	 *
1870 	 * For any other frame, we do a TID/QoS lookup inside the frame
1871 	 * to see what the TID should be. If it's a non-QoS frame, the
1872 	 * AC and TID are overridden. The TID/TXQ code assumes the
1873 	 * TID is on a predictable hardware TXQ, so we don't support
1874 	 * having a node TID queued to multiple hardware TXQs.
1875 	 * This may change in the future but would require some locking
1876 	 * fudgery.
1877 	 */
1878 	pri = ath_tx_getac(sc, m0);
1879 	tid = ath_tx_gettid(sc, m0);
1880 
1881 	txq = sc->sc_ac2q[pri];
1882 	wh = mtod(m0, struct ieee80211_frame *);
1883 	ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
1884 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
1885 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
1886 
1887 	/*
1888 	 * Enforce how deep the multicast queue can grow.
1889 	 *
1890 	 * XXX duplicated in ath_raw_xmit().
1891 	 */
1892 	if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
1893 		if (sc->sc_cabq->axq_depth + sc->sc_cabq->fifo.axq_depth
1894 		    > sc->sc_txq_mcastq_maxdepth) {
1895 			sc->sc_stats.ast_tx_mcastq_overflow++;
1896 			m_freem(m0);
1897 			return (ENOBUFS);
1898 		}
1899 	}
1900 
1901 	/*
1902 	 * Enforce how deep the unicast queue can grow.
1903 	 *
1904 	 * If the node is in power save then we don't want
1905 	 * the software queue to grow too deep, or a node may
1906 	 * end up consuming all of the ath_buf entries.
1907 	 *
1908 	 * For now, only do this for DATA frames.
1909 	 *
1910 	 * We will want to cap how many management/control
1911 	 * frames get punted to the software queue so it doesn't
1912 	 * fill up.  But the correct solution isn't yet obvious.
1913 	 * In any case, this check should at least let frames pass
1914 	 * that we are direct-dispatching.
1915 	 *
1916 	 * XXX TODO: duplicate this to the raw xmit path!
1917 	 */
1918 	if (type == IEEE80211_FC0_TYPE_DATA &&
1919 	    ATH_NODE(ni)->an_is_powersave &&
1920 	    ATH_NODE(ni)->an_swq_depth >
1921 	     sc->sc_txq_node_psq_maxdepth) {
1922 		sc->sc_stats.ast_tx_node_psq_overflow++;
1923 		m_freem(m0);
1924 		return (ENOBUFS);
1925 	}
1926 
1927 	/* A-MPDU TX */
1928 	is_ampdu_tx = ath_tx_ampdu_running(sc, ATH_NODE(ni), tid);
1929 	is_ampdu_pending = ath_tx_ampdu_pending(sc, ATH_NODE(ni), tid);
1930 	is_ampdu = is_ampdu_tx | is_ampdu_pending;
1931 
1932 	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ac=%d, is_ampdu=%d\n",
1933 	    __func__, tid, pri, is_ampdu);
1934 
1935 	/* Set local packet state, used to queue packets to hardware */
1936 	bf->bf_state.bfs_tid = tid;
1937 	bf->bf_state.bfs_tx_queue = txq->axq_qnum;
1938 	bf->bf_state.bfs_pri = pri;
1939 
1940 #if 1
1941 	/*
1942 	 * When servicing one or more stations in power-save mode
1943 	 * (or) if there is some mcast data waiting on the mcast
1944 	 * queue (to prevent out of order delivery) multicast frames
1945 	 * must be bufferd until after the beacon.
1946 	 *
1947 	 * TODO: we should lock the mcastq before we check the length.
1948 	 */
1949 	if (sc->sc_cabq_enable && ismcast && (vap->iv_ps_sta || avp->av_mcastq.axq_depth)) {
1950 		txq = &avp->av_mcastq;
1951 		/*
1952 		 * Mark the frame as eventually belonging on the CAB
1953 		 * queue, so the descriptor setup functions will
1954 		 * correctly initialise the descriptor 'qcuId' field.
1955 		 */
1956 		bf->bf_state.bfs_tx_queue = sc->sc_cabq->axq_qnum;
1957 	}
1958 #endif
1959 
1960 	/* Do the generic frame setup */
1961 	/* XXX should just bzero the bf_state? */
1962 	bf->bf_state.bfs_dobaw = 0;
1963 
1964 	/* A-MPDU TX? Manually set sequence number */
1965 	/*
1966 	 * Don't do it whilst pending; the net80211 layer still
1967 	 * assigns them.
1968 	 */
1969 	if (is_ampdu_tx) {
1970 		/*
1971 		 * Always call; this function will
1972 		 * handle making sure that null data frames
1973 		 * don't get a sequence number from the current
1974 		 * TID and thus mess with the BAW.
1975 		 */
1976 		seqno = ath_tx_tid_seqno_assign(sc, ni, bf, m0);
1977 
1978 		/*
1979 		 * Don't add QoS NULL frames to the BAW.
1980 		 */
1981 		if (IEEE80211_QOS_HAS_SEQ(wh) &&
1982 		    subtype != IEEE80211_FC0_SUBTYPE_QOS_NULL) {
1983 			bf->bf_state.bfs_dobaw = 1;
1984 		}
1985 	}
1986 
1987 	/*
1988 	 * If needed, the sequence number has been assigned.
1989 	 * Squirrel it away somewhere easy to get to.
1990 	 */
1991 	bf->bf_state.bfs_seqno = M_SEQNO_GET(m0) << IEEE80211_SEQ_SEQ_SHIFT;
1992 
1993 	/* Is ampdu pending? fetch the seqno and print it out */
1994 	if (is_ampdu_pending)
1995 		DPRINTF(sc, ATH_DEBUG_SW_TX,
1996 		    "%s: tid %d: ampdu pending, seqno %d\n",
1997 		    __func__, tid, M_SEQNO_GET(m0));
1998 
1999 	/* This also sets up the DMA map */
2000 	r = ath_tx_normal_setup(sc, ni, bf, m0, txq);
2001 
2002 	if (r != 0)
2003 		goto done;
2004 
2005 	/* At this point m0 could have changed! */
2006 	m0 = bf->bf_m;
2007 
2008 #if 1
2009 	/*
2010 	 * If it's a multicast frame, do a direct-dispatch to the
2011 	 * destination hardware queue. Don't bother software
2012 	 * queuing it.
2013 	 */
2014 	/*
2015 	 * If it's a BAR frame, do a direct dispatch to the
2016 	 * destination hardware queue. Don't bother software
2017 	 * queuing it, as the TID will now be paused.
2018 	 * Sending a BAR frame can occur from the net80211 txa timer
2019 	 * (ie, retries) or from the ath txtask (completion call.)
2020 	 * It queues directly to hardware because the TID is paused
2021 	 * at this point (and won't be unpaused until the BAR has
2022 	 * either been TXed successfully or max retries has been
2023 	 * reached.)
2024 	 */
2025 	/*
2026 	 * Until things are better debugged - if this node is asleep
2027 	 * and we're sending it a non-BAR frame, direct dispatch it.
2028 	 * Why? Because we need to figure out what's actually being
2029 	 * sent - eg, during reassociation/reauthentication after
2030 	 * the node (last) disappeared whilst asleep, the driver should
2031 	 * have unpaused/unsleep'ed the node.  So until that is
2032 	 * sorted out, use this workaround.
2033 	 */
2034 	if (txq == &avp->av_mcastq) {
2035 		DPRINTF(sc, ATH_DEBUG_SW_TX,
2036 		    "%s: bf=%p: mcastq: TX'ing\n", __func__, bf);
2037 		bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2038 		ath_tx_xmit_normal(sc, txq, bf);
2039 	} else if (ath_tx_should_swq_frame(sc, ATH_NODE(ni), m0,
2040 	    &queue_to_head)) {
2041 		ath_tx_swq(sc, ni, txq, queue_to_head, bf);
2042 	} else {
2043 		bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2044 		ath_tx_xmit_normal(sc, txq, bf);
2045 	}
2046 #else
2047 	/*
2048 	 * For now, since there's no software queue,
2049 	 * direct-dispatch to the hardware.
2050 	 */
2051 	bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2052 	/*
2053 	 * Update the current leak count if
2054 	 * we're leaking frames; and set the
2055 	 * MORE flag as appropriate.
2056 	 */
2057 	ath_tx_leak_count_update(sc, tid, bf);
2058 	ath_tx_xmit_normal(sc, txq, bf);
2059 #endif
2060 done:
2061 	return 0;
2062 }
2063 
2064 static int
2065 ath_tx_raw_start(struct ath_softc *sc, struct ieee80211_node *ni,
2066 	struct ath_buf *bf, struct mbuf *m0,
2067 	const struct ieee80211_bpf_params *params)
2068 {
2069 	struct ieee80211com *ic = &sc->sc_ic;
2070 	struct ath_hal *ah = sc->sc_ah;
2071 	struct ieee80211vap *vap = ni->ni_vap;
2072 	int error, ismcast, ismrr;
2073 	int keyix, hdrlen, pktlen, try0, txantenna;
2074 	u_int8_t rix, txrate;
2075 	struct ieee80211_frame *wh;
2076 	u_int flags;
2077 	HAL_PKT_TYPE atype;
2078 	const HAL_RATE_TABLE *rt;
2079 	struct ath_desc *ds;
2080 	u_int pri;
2081 	int o_tid = -1;
2082 	int do_override;
2083 	uint8_t type, subtype;
2084 	int queue_to_head;
2085 	struct ath_node *an = ATH_NODE(ni);
2086 
2087 	ATH_TX_LOCK_ASSERT(sc);
2088 
2089 	wh = mtod(m0, struct ieee80211_frame *);
2090 	ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
2091 	hdrlen = ieee80211_anyhdrsize(wh);
2092 	/*
2093 	 * Packet length must not include any
2094 	 * pad bytes; deduct them here.
2095 	 */
2096 	/* XXX honor IEEE80211_BPF_DATAPAD */
2097 	pktlen = m0->m_pkthdr.len - (hdrlen & 3) + IEEE80211_CRC_LEN;
2098 
2099 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
2100 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
2101 
2102 	ATH_KTR(sc, ATH_KTR_TX, 2,
2103 	     "ath_tx_raw_start: ni=%p, bf=%p, raw", ni, bf);
2104 
2105 	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: ismcast=%d\n",
2106 	    __func__, ismcast);
2107 
2108 	pri = params->ibp_pri & 3;
2109 	/* Override pri if the frame isn't a QoS one */
2110 	if (! IEEE80211_QOS_HAS_SEQ(wh))
2111 		pri = ath_tx_getac(sc, m0);
2112 
2113 	/* XXX If it's an ADDBA, override the correct queue */
2114 	do_override = ath_tx_action_frame_override_queue(sc, ni, m0, &o_tid);
2115 
2116 	/* Map ADDBA to the correct priority */
2117 	if (do_override) {
2118 #if 0
2119 		DPRINTF(sc, ATH_DEBUG_XMIT,
2120 		    "%s: overriding tid %d pri %d -> %d\n",
2121 		    __func__, o_tid, pri, TID_TO_WME_AC(o_tid));
2122 #endif
2123 		pri = TID_TO_WME_AC(o_tid);
2124 	}
2125 
2126 	/* Handle encryption twiddling if needed */
2127 	if (! ath_tx_tag_crypto(sc, ni,
2128 	    m0, params->ibp_flags & IEEE80211_BPF_CRYPTO, 0,
2129 	    &hdrlen, &pktlen, &keyix)) {
2130 		ieee80211_free_mbuf(m0);
2131 		return EIO;
2132 	}
2133 	/* packet header may have moved, reset our local pointer */
2134 	wh = mtod(m0, struct ieee80211_frame *);
2135 
2136 	/* Do the generic frame setup */
2137 	/* XXX should just bzero the bf_state? */
2138 	bf->bf_state.bfs_dobaw = 0;
2139 
2140 	error = ath_tx_dmasetup(sc, bf, m0);
2141 	if (error != 0)
2142 		return error;
2143 	m0 = bf->bf_m;				/* NB: may have changed */
2144 	wh = mtod(m0, struct ieee80211_frame *);
2145 	KASSERT((ni != NULL), ("%s: ni=NULL!", __func__));
2146 	bf->bf_node = ni;			/* NB: held reference */
2147 
2148 	/* Always enable CLRDMASK for raw frames for now.. */
2149 	flags = HAL_TXDESC_CLRDMASK;		/* XXX needed for crypto errs */
2150 	flags |= HAL_TXDESC_INTREQ;		/* force interrupt */
2151 	if (params->ibp_flags & IEEE80211_BPF_RTS)
2152 		flags |= HAL_TXDESC_RTSENA;
2153 	else if (params->ibp_flags & IEEE80211_BPF_CTS) {
2154 		/* XXX assume 11g/11n protection? */
2155 		bf->bf_state.bfs_doprot = 1;
2156 		flags |= HAL_TXDESC_CTSENA;
2157 	}
2158 	/* XXX leave ismcast to injector? */
2159 	if ((params->ibp_flags & IEEE80211_BPF_NOACK) || ismcast)
2160 		flags |= HAL_TXDESC_NOACK;
2161 
2162 	rt = sc->sc_currates;
2163 	KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
2164 
2165 	/* Fetch first rate information */
2166 	rix = ath_tx_findrix(sc, params->ibp_rate0);
2167 	try0 = params->ibp_try0;
2168 
2169 	/*
2170 	 * Override EAPOL rate as appropriate.
2171 	 */
2172 	if (m0->m_flags & M_EAPOL) {
2173 		/* XXX? maybe always use long preamble? */
2174 		rix = an->an_mgmtrix;
2175 		try0 = ATH_TXMAXTRY;	/* XXX?too many? */
2176 	}
2177 
2178 	txrate = rt->info[rix].rateCode;
2179 	if (params->ibp_flags & IEEE80211_BPF_SHORTPRE)
2180 		txrate |= rt->info[rix].shortPreamble;
2181 	sc->sc_txrix = rix;
2182 	ismrr = (params->ibp_try1 != 0);
2183 	txantenna = params->ibp_pri >> 2;
2184 	if (txantenna == 0)			/* XXX? */
2185 		txantenna = sc->sc_txantenna;
2186 
2187 	/*
2188 	 * Since ctsrate is fixed, store it away for later
2189 	 * use when the descriptor fields are being set.
2190 	 */
2191 	if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA))
2192 		bf->bf_state.bfs_ctsrate0 = params->ibp_ctsrate;
2193 
2194 	/*
2195 	 * NB: we mark all packets as type PSPOLL so the h/w won't
2196 	 * set the sequence number, duration, etc.
2197 	 */
2198 	atype = HAL_PKT_TYPE_PSPOLL;
2199 
2200 	if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT))
2201 		ieee80211_dump_pkt(ic, mtod(m0, caddr_t), m0->m_len,
2202 		    sc->sc_hwmap[rix].ieeerate, -1);
2203 
2204 	if (ieee80211_radiotap_active_vap(vap)) {
2205 		u_int64_t tsf = ath_hal_gettsf64(ah);
2206 
2207 		sc->sc_tx_th.wt_tsf = htole64(tsf);
2208 		sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags;
2209 		if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED)
2210 			sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
2211 		if (m0->m_flags & M_FRAG)
2212 			sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG;
2213 		sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate;
2214 		sc->sc_tx_th.wt_txpower = MIN(params->ibp_power,
2215 		    ieee80211_get_node_txpower(ni));
2216 		sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
2217 
2218 		ieee80211_radiotap_tx(vap, m0);
2219 	}
2220 
2221 	/*
2222 	 * Formulate first tx descriptor with tx controls.
2223 	 */
2224 	ds = bf->bf_desc;
2225 	/* XXX check return value? */
2226 
2227 	/* Store the decided rate index values away */
2228 	bf->bf_state.bfs_pktlen = pktlen;
2229 	bf->bf_state.bfs_hdrlen = hdrlen;
2230 	bf->bf_state.bfs_atype = atype;
2231 	bf->bf_state.bfs_txpower = MIN(params->ibp_power,
2232 	    ieee80211_get_node_txpower(ni));
2233 	bf->bf_state.bfs_txrate0 = txrate;
2234 	bf->bf_state.bfs_try0 = try0;
2235 	bf->bf_state.bfs_keyix = keyix;
2236 	bf->bf_state.bfs_txantenna = txantenna;
2237 	bf->bf_state.bfs_txflags = flags;
2238 	bf->bf_state.bfs_shpream =
2239 	    !! (params->ibp_flags & IEEE80211_BPF_SHORTPRE);
2240 
2241 	/* Set local packet state, used to queue packets to hardware */
2242 	bf->bf_state.bfs_tid = WME_AC_TO_TID(pri);
2243 	bf->bf_state.bfs_tx_queue = sc->sc_ac2q[pri]->axq_qnum;
2244 	bf->bf_state.bfs_pri = pri;
2245 
2246 	/* XXX this should be done in ath_tx_setrate() */
2247 	bf->bf_state.bfs_ctsrate = 0;
2248 	bf->bf_state.bfs_ctsduration = 0;
2249 	bf->bf_state.bfs_ismrr = ismrr;
2250 
2251 	/* Blank the legacy rate array */
2252 	bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc));
2253 
2254 	bf->bf_state.bfs_rc[0].rix = rix;
2255 	bf->bf_state.bfs_rc[0].tries = try0;
2256 	bf->bf_state.bfs_rc[0].ratecode = txrate;
2257 
2258 	if (ismrr) {
2259 		int rix;
2260 
2261 		rix = ath_tx_findrix(sc, params->ibp_rate1);
2262 		bf->bf_state.bfs_rc[1].rix = rix;
2263 		bf->bf_state.bfs_rc[1].tries = params->ibp_try1;
2264 
2265 		rix = ath_tx_findrix(sc, params->ibp_rate2);
2266 		bf->bf_state.bfs_rc[2].rix = rix;
2267 		bf->bf_state.bfs_rc[2].tries = params->ibp_try2;
2268 
2269 		rix = ath_tx_findrix(sc, params->ibp_rate3);
2270 		bf->bf_state.bfs_rc[3].rix = rix;
2271 		bf->bf_state.bfs_rc[3].tries = params->ibp_try3;
2272 	}
2273 	/*
2274 	 * All the required rate control decisions have been made;
2275 	 * fill in the rc flags.
2276 	 */
2277 	ath_tx_rate_fill_rcflags(sc, bf);
2278 
2279 	/* NB: no buffered multicast in power save support */
2280 
2281 	/*
2282 	 * If we're overiding the ADDBA destination, dump directly
2283 	 * into the hardware queue, right after any pending
2284 	 * frames to that node are.
2285 	 */
2286 	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: dooverride=%d\n",
2287 	    __func__, do_override);
2288 
2289 #if 1
2290 	/*
2291 	 * Put addba frames in the right place in the right TID/HWQ.
2292 	 */
2293 	if (do_override) {
2294 		bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2295 		/*
2296 		 * XXX if it's addba frames, should we be leaking
2297 		 * them out via the frame leak method?
2298 		 * XXX for now let's not risk it; but we may wish
2299 		 * to investigate this later.
2300 		 */
2301 		ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf);
2302 	} else if (ath_tx_should_swq_frame(sc, ATH_NODE(ni), m0,
2303 	    &queue_to_head)) {
2304 		/* Queue to software queue */
2305 		ath_tx_swq(sc, ni, sc->sc_ac2q[pri], queue_to_head, bf);
2306 	} else {
2307 		bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2308 		ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf);
2309 	}
2310 #else
2311 	/* Direct-dispatch to the hardware */
2312 	bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2313 	/*
2314 	 * Update the current leak count if
2315 	 * we're leaking frames; and set the
2316 	 * MORE flag as appropriate.
2317 	 */
2318 	ath_tx_leak_count_update(sc, tid, bf);
2319 	ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf);
2320 #endif
2321 	return 0;
2322 }
2323 
2324 /*
2325  * Send a raw frame.
2326  *
2327  * This can be called by net80211.
2328  */
2329 int
2330 ath_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
2331 	const struct ieee80211_bpf_params *params)
2332 {
2333 	struct ieee80211com *ic = ni->ni_ic;
2334 	struct ath_softc *sc = ic->ic_softc;
2335 	struct ath_buf *bf;
2336 	struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
2337 	int error = 0;
2338 
2339 	ATH_PCU_LOCK(sc);
2340 	if (sc->sc_inreset_cnt > 0) {
2341 		DPRINTF(sc, ATH_DEBUG_XMIT,
2342 		    "%s: sc_inreset_cnt > 0; bailing\n", __func__);
2343 		error = EIO;
2344 		ATH_PCU_UNLOCK(sc);
2345 		goto badbad;
2346 	}
2347 	sc->sc_txstart_cnt++;
2348 	ATH_PCU_UNLOCK(sc);
2349 
2350 	/* Wake the hardware up already */
2351 	ATH_LOCK(sc);
2352 	ath_power_set_power_state(sc, HAL_PM_AWAKE);
2353 	ATH_UNLOCK(sc);
2354 
2355 	ATH_TX_LOCK(sc);
2356 
2357 	if (!sc->sc_running || sc->sc_invalid) {
2358 		DPRINTF(sc, ATH_DEBUG_XMIT, "%s: discard frame, r/i: %d/%d",
2359 		    __func__, sc->sc_running, sc->sc_invalid);
2360 		m_freem(m);
2361 		error = ENETDOWN;
2362 		goto bad;
2363 	}
2364 
2365 	/*
2366 	 * Enforce how deep the multicast queue can grow.
2367 	 *
2368 	 * XXX duplicated in ath_tx_start().
2369 	 */
2370 	if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2371 		if (sc->sc_cabq->axq_depth + sc->sc_cabq->fifo.axq_depth
2372 		    > sc->sc_txq_mcastq_maxdepth) {
2373 			sc->sc_stats.ast_tx_mcastq_overflow++;
2374 			error = ENOBUFS;
2375 		}
2376 
2377 		if (error != 0) {
2378 			m_freem(m);
2379 			goto bad;
2380 		}
2381 	}
2382 
2383 	/*
2384 	 * Grab a TX buffer and associated resources.
2385 	 */
2386 	bf = ath_getbuf(sc, ATH_BUFTYPE_MGMT);
2387 	if (bf == NULL) {
2388 		sc->sc_stats.ast_tx_nobuf++;
2389 		m_freem(m);
2390 		error = ENOBUFS;
2391 		goto bad;
2392 	}
2393 	ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: m=%p, params=%p, bf=%p\n",
2394 	    m, params,  bf);
2395 
2396 	if (params == NULL) {
2397 		/*
2398 		 * Legacy path; interpret frame contents to decide
2399 		 * precisely how to send the frame.
2400 		 */
2401 		if (ath_tx_start(sc, ni, bf, m)) {
2402 			error = EIO;		/* XXX */
2403 			goto bad2;
2404 		}
2405 	} else {
2406 		/*
2407 		 * Caller supplied explicit parameters to use in
2408 		 * sending the frame.
2409 		 */
2410 		if (ath_tx_raw_start(sc, ni, bf, m, params)) {
2411 			error = EIO;		/* XXX */
2412 			goto bad2;
2413 		}
2414 	}
2415 	sc->sc_wd_timer = 5;
2416 	sc->sc_stats.ast_tx_raw++;
2417 
2418 	/*
2419 	 * Update the TIM - if there's anything queued to the
2420 	 * software queue and power save is enabled, we should
2421 	 * set the TIM.
2422 	 */
2423 	ath_tx_update_tim(sc, ni, 1);
2424 
2425 	ATH_TX_UNLOCK(sc);
2426 
2427 	ATH_PCU_LOCK(sc);
2428 	sc->sc_txstart_cnt--;
2429 	ATH_PCU_UNLOCK(sc);
2430 
2431 
2432 	/* Put the hardware back to sleep if required */
2433 	ATH_LOCK(sc);
2434 	ath_power_restore_power_state(sc);
2435 	ATH_UNLOCK(sc);
2436 
2437 	return 0;
2438 
2439 bad2:
2440 	ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: bad2: m=%p, params=%p, "
2441 	    "bf=%p",
2442 	    m,
2443 	    params,
2444 	    bf);
2445 	ATH_TXBUF_LOCK(sc);
2446 	ath_returnbuf_head(sc, bf);
2447 	ATH_TXBUF_UNLOCK(sc);
2448 
2449 bad:
2450 	ATH_TX_UNLOCK(sc);
2451 
2452 	ATH_PCU_LOCK(sc);
2453 	sc->sc_txstart_cnt--;
2454 	ATH_PCU_UNLOCK(sc);
2455 
2456 	/* Put the hardware back to sleep if required */
2457 	ATH_LOCK(sc);
2458 	ath_power_restore_power_state(sc);
2459 	ATH_UNLOCK(sc);
2460 
2461 badbad:
2462 	ATH_KTR(sc, ATH_KTR_TX, 2, "ath_raw_xmit: bad0: m=%p, params=%p",
2463 	    m, params);
2464 	sc->sc_stats.ast_tx_raw_fail++;
2465 
2466 	return error;
2467 }
2468 
2469 /* Some helper functions */
2470 
2471 /*
2472  * ADDBA (and potentially others) need to be placed in the same
2473  * hardware queue as the TID/node it's relating to. This is so
2474  * it goes out after any pending non-aggregate frames to the
2475  * same node/TID.
2476  *
2477  * If this isn't done, the ADDBA can go out before the frames
2478  * queued in hardware. Even though these frames have a sequence
2479  * number -earlier- than the ADDBA can be transmitted (but
2480  * no frames whose sequence numbers are after the ADDBA should
2481  * be!) they'll arrive after the ADDBA - and the receiving end
2482  * will simply drop them as being out of the BAW.
2483  *
2484  * The frames can't be appended to the TID software queue - it'll
2485  * never be sent out. So these frames have to be directly
2486  * dispatched to the hardware, rather than queued in software.
2487  * So if this function returns true, the TXQ has to be
2488  * overridden and it has to be directly dispatched.
2489  *
2490  * It's a dirty hack, but someone's gotta do it.
2491  */
2492 
2493 /*
2494  * XXX doesn't belong here!
2495  */
2496 static int
2497 ieee80211_is_action(struct ieee80211_frame *wh)
2498 {
2499 	/* Type: Management frame? */
2500 	if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) !=
2501 	    IEEE80211_FC0_TYPE_MGT)
2502 		return 0;
2503 
2504 	/* Subtype: Action frame? */
2505 	if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) !=
2506 	    IEEE80211_FC0_SUBTYPE_ACTION)
2507 		return 0;
2508 
2509 	return 1;
2510 }
2511 
2512 #define	MS(_v, _f)	(((_v) & _f) >> _f##_S)
2513 /*
2514  * Return an alternate TID for ADDBA request frames.
2515  *
2516  * Yes, this likely should be done in the net80211 layer.
2517  */
2518 static int
2519 ath_tx_action_frame_override_queue(struct ath_softc *sc,
2520     struct ieee80211_node *ni,
2521     struct mbuf *m0, int *tid)
2522 {
2523 	struct ieee80211_frame *wh = mtod(m0, struct ieee80211_frame *);
2524 	struct ieee80211_action_ba_addbarequest *ia;
2525 	uint8_t *frm;
2526 	uint16_t baparamset;
2527 
2528 	/* Not action frame? Bail */
2529 	if (! ieee80211_is_action(wh))
2530 		return 0;
2531 
2532 	/* XXX Not needed for frames we send? */
2533 #if 0
2534 	/* Correct length? */
2535 	if (! ieee80211_parse_action(ni, m))
2536 		return 0;
2537 #endif
2538 
2539 	/* Extract out action frame */
2540 	frm = (u_int8_t *)&wh[1];
2541 	ia = (struct ieee80211_action_ba_addbarequest *) frm;
2542 
2543 	/* Not ADDBA? Bail */
2544 	if (ia->rq_header.ia_category != IEEE80211_ACTION_CAT_BA)
2545 		return 0;
2546 	if (ia->rq_header.ia_action != IEEE80211_ACTION_BA_ADDBA_REQUEST)
2547 		return 0;
2548 
2549 	/* Extract TID, return it */
2550 	baparamset = le16toh(ia->rq_baparamset);
2551 	*tid = (int) MS(baparamset, IEEE80211_BAPS_TID);
2552 
2553 	return 1;
2554 }
2555 #undef	MS
2556 
2557 /* Per-node software queue operations */
2558 
2559 /*
2560  * Add the current packet to the given BAW.
2561  * It is assumed that the current packet
2562  *
2563  * + fits inside the BAW;
2564  * + already has had a sequence number allocated.
2565  *
2566  * Since the BAW status may be modified by both the ath task and
2567  * the net80211/ifnet contexts, the TID must be locked.
2568  */
2569 void
2570 ath_tx_addto_baw(struct ath_softc *sc, struct ath_node *an,
2571     struct ath_tid *tid, struct ath_buf *bf)
2572 {
2573 	int index, cindex;
2574 	struct ieee80211_tx_ampdu *tap;
2575 
2576 	ATH_TX_LOCK_ASSERT(sc);
2577 
2578 	if (bf->bf_state.bfs_isretried)
2579 		return;
2580 
2581 	tap = ath_tx_get_tx_tid(an, tid->tid);
2582 
2583 	if (! bf->bf_state.bfs_dobaw) {
2584 		DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2585 		    "%s: dobaw=0, seqno=%d, window %d:%d\n",
2586 		    __func__, SEQNO(bf->bf_state.bfs_seqno),
2587 		    tap->txa_start, tap->txa_wnd);
2588 	}
2589 
2590 	if (bf->bf_state.bfs_addedbaw)
2591 		DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2592 		    "%s: re-added? tid=%d, seqno %d; window %d:%d; "
2593 		    "baw head=%d tail=%d\n",
2594 		    __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno),
2595 		    tap->txa_start, tap->txa_wnd, tid->baw_head,
2596 		    tid->baw_tail);
2597 
2598 	/*
2599 	 * Verify that the given sequence number is not outside of the
2600 	 * BAW.  Complain loudly if that's the case.
2601 	 */
2602 	if (! BAW_WITHIN(tap->txa_start, tap->txa_wnd,
2603 	    SEQNO(bf->bf_state.bfs_seqno))) {
2604 		DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2605 		    "%s: bf=%p: outside of BAW?? tid=%d, seqno %d; window %d:%d; "
2606 		    "baw head=%d tail=%d\n",
2607 		    __func__, bf, tid->tid, SEQNO(bf->bf_state.bfs_seqno),
2608 		    tap->txa_start, tap->txa_wnd, tid->baw_head,
2609 		    tid->baw_tail);
2610 	}
2611 
2612 	/*
2613 	 * ni->ni_txseqs[] is the currently allocated seqno.
2614 	 * the txa state contains the current baw start.
2615 	 */
2616 	index  = ATH_BA_INDEX(tap->txa_start, SEQNO(bf->bf_state.bfs_seqno));
2617 	cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
2618 	DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2619 	    "%s: tid=%d, seqno %d; window %d:%d; index=%d cindex=%d "
2620 	    "baw head=%d tail=%d\n",
2621 	    __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno),
2622 	    tap->txa_start, tap->txa_wnd, index, cindex, tid->baw_head,
2623 	    tid->baw_tail);
2624 
2625 
2626 #if 0
2627 	assert(tid->tx_buf[cindex] == NULL);
2628 #endif
2629 	if (tid->tx_buf[cindex] != NULL) {
2630 		DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2631 		    "%s: ba packet dup (index=%d, cindex=%d, "
2632 		    "head=%d, tail=%d)\n",
2633 		    __func__, index, cindex, tid->baw_head, tid->baw_tail);
2634 		DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2635 		    "%s: BA bf: %p; seqno=%d ; new bf: %p; seqno=%d\n",
2636 		    __func__,
2637 		    tid->tx_buf[cindex],
2638 		    SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno),
2639 		    bf,
2640 		    SEQNO(bf->bf_state.bfs_seqno)
2641 		);
2642 	}
2643 	tid->tx_buf[cindex] = bf;
2644 
2645 	if (index >= ((tid->baw_tail - tid->baw_head) &
2646 	    (ATH_TID_MAX_BUFS - 1))) {
2647 		tid->baw_tail = cindex;
2648 		INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
2649 	}
2650 }
2651 
2652 /*
2653  * Flip the BAW buffer entry over from the existing one to the new one.
2654  *
2655  * When software retransmitting a (sub-)frame, it is entirely possible that
2656  * the frame ath_buf is marked as BUSY and can't be immediately reused.
2657  * In that instance the buffer is cloned and the new buffer is used for
2658  * retransmit. We thus need to update the ath_buf slot in the BAW buf
2659  * tracking array to maintain consistency.
2660  */
2661 static void
2662 ath_tx_switch_baw_buf(struct ath_softc *sc, struct ath_node *an,
2663     struct ath_tid *tid, struct ath_buf *old_bf, struct ath_buf *new_bf)
2664 {
2665 	int index, cindex;
2666 	struct ieee80211_tx_ampdu *tap;
2667 	int seqno = SEQNO(old_bf->bf_state.bfs_seqno);
2668 
2669 	ATH_TX_LOCK_ASSERT(sc);
2670 
2671 	tap = ath_tx_get_tx_tid(an, tid->tid);
2672 	index  = ATH_BA_INDEX(tap->txa_start, seqno);
2673 	cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
2674 
2675 	/*
2676 	 * Just warn for now; if it happens then we should find out
2677 	 * about it. It's highly likely the aggregation session will
2678 	 * soon hang.
2679 	 */
2680 	if (old_bf->bf_state.bfs_seqno != new_bf->bf_state.bfs_seqno) {
2681 		DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2682 		    "%s: retransmitted buffer"
2683 		    " has mismatching seqno's, BA session may hang.\n",
2684 		    __func__);
2685 		DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2686 		    "%s: old seqno=%d, new_seqno=%d\n", __func__,
2687 		    old_bf->bf_state.bfs_seqno, new_bf->bf_state.bfs_seqno);
2688 	}
2689 
2690 	if (tid->tx_buf[cindex] != old_bf) {
2691 		DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2692 		    "%s: ath_buf pointer incorrect; "
2693 		    " has m BA session may hang.\n", __func__);
2694 		DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2695 		    "%s: old bf=%p, new bf=%p\n", __func__, old_bf, new_bf);
2696 	}
2697 
2698 	tid->tx_buf[cindex] = new_bf;
2699 }
2700 
2701 /*
2702  * seq_start - left edge of BAW
2703  * seq_next - current/next sequence number to allocate
2704  *
2705  * Since the BAW status may be modified by both the ath task and
2706  * the net80211/ifnet contexts, the TID must be locked.
2707  */
2708 static void
2709 ath_tx_update_baw(struct ath_softc *sc, struct ath_node *an,
2710     struct ath_tid *tid, const struct ath_buf *bf)
2711 {
2712 	int index, cindex;
2713 	struct ieee80211_tx_ampdu *tap;
2714 	int seqno = SEQNO(bf->bf_state.bfs_seqno);
2715 
2716 	ATH_TX_LOCK_ASSERT(sc);
2717 
2718 	tap = ath_tx_get_tx_tid(an, tid->tid);
2719 	index  = ATH_BA_INDEX(tap->txa_start, seqno);
2720 	cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
2721 
2722 	DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2723 	    "%s: tid=%d, baw=%d:%d, seqno=%d, index=%d, cindex=%d, "
2724 	    "baw head=%d, tail=%d\n",
2725 	    __func__, tid->tid, tap->txa_start, tap->txa_wnd, seqno, index,
2726 	    cindex, tid->baw_head, tid->baw_tail);
2727 
2728 	/*
2729 	 * If this occurs then we have a big problem - something else
2730 	 * has slid tap->txa_start along without updating the BAW
2731 	 * tracking start/end pointers. Thus the TX BAW state is now
2732 	 * completely busted.
2733 	 *
2734 	 * But for now, since I haven't yet fixed TDMA and buffer cloning,
2735 	 * it's quite possible that a cloned buffer is making its way
2736 	 * here and causing it to fire off. Disable TDMA for now.
2737 	 */
2738 	if (tid->tx_buf[cindex] != bf) {
2739 		DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2740 		    "%s: comp bf=%p, seq=%d; slot bf=%p, seqno=%d\n",
2741 		    __func__, bf, SEQNO(bf->bf_state.bfs_seqno),
2742 		    tid->tx_buf[cindex],
2743 		    (tid->tx_buf[cindex] != NULL) ?
2744 		      SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno) : -1);
2745 	}
2746 
2747 	tid->tx_buf[cindex] = NULL;
2748 
2749 	while (tid->baw_head != tid->baw_tail &&
2750 	    !tid->tx_buf[tid->baw_head]) {
2751 		INCR(tap->txa_start, IEEE80211_SEQ_RANGE);
2752 		INCR(tid->baw_head, ATH_TID_MAX_BUFS);
2753 	}
2754 	DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2755 	    "%s: tid=%d: baw is now %d:%d, baw head=%d\n",
2756 	    __func__, tid->tid, tap->txa_start, tap->txa_wnd, tid->baw_head);
2757 }
2758 
2759 static void
2760 ath_tx_leak_count_update(struct ath_softc *sc, struct ath_tid *tid,
2761     struct ath_buf *bf)
2762 {
2763 	struct ieee80211_frame *wh;
2764 
2765 	ATH_TX_LOCK_ASSERT(sc);
2766 
2767 	if (tid->an->an_leak_count > 0) {
2768 		wh = mtod(bf->bf_m, struct ieee80211_frame *);
2769 
2770 		/*
2771 		 * Update MORE based on the software/net80211 queue states.
2772 		 */
2773 		if ((tid->an->an_stack_psq > 0)
2774 		    || (tid->an->an_swq_depth > 0))
2775 			wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA;
2776 		else
2777 			wh->i_fc[1] &= ~IEEE80211_FC1_MORE_DATA;
2778 
2779 		DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
2780 		    "%s: %6D: leak count = %d, psq=%d, swq=%d, MORE=%d\n",
2781 		    __func__,
2782 		    tid->an->an_node.ni_macaddr,
2783 		    ":",
2784 		    tid->an->an_leak_count,
2785 		    tid->an->an_stack_psq,
2786 		    tid->an->an_swq_depth,
2787 		    !! (wh->i_fc[1] & IEEE80211_FC1_MORE_DATA));
2788 
2789 		/*
2790 		 * Re-sync the underlying buffer.
2791 		 */
2792 		bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
2793 		    BUS_DMASYNC_PREWRITE);
2794 
2795 		tid->an->an_leak_count --;
2796 	}
2797 }
2798 
2799 static int
2800 ath_tx_tid_can_tx_or_sched(struct ath_softc *sc, struct ath_tid *tid)
2801 {
2802 
2803 	ATH_TX_LOCK_ASSERT(sc);
2804 
2805 	if (tid->an->an_leak_count > 0) {
2806 		return (1);
2807 	}
2808 	if (tid->paused)
2809 		return (0);
2810 	return (1);
2811 }
2812 
2813 /*
2814  * Mark the current node/TID as ready to TX.
2815  *
2816  * This is done to make it easy for the software scheduler to
2817  * find which nodes have data to send.
2818  *
2819  * The TXQ lock must be held.
2820  */
2821 void
2822 ath_tx_tid_sched(struct ath_softc *sc, struct ath_tid *tid)
2823 {
2824 	struct ath_txq *txq = sc->sc_ac2q[tid->ac];
2825 
2826 	ATH_TX_LOCK_ASSERT(sc);
2827 
2828 	/*
2829 	 * If we are leaking out a frame to this destination
2830 	 * for PS-POLL, ensure that we allow scheduling to
2831 	 * occur.
2832 	 */
2833 	if (! ath_tx_tid_can_tx_or_sched(sc, tid))
2834 		return;		/* paused, can't schedule yet */
2835 
2836 	if (tid->sched)
2837 		return;		/* already scheduled */
2838 
2839 	tid->sched = 1;
2840 
2841 #if 0
2842 	/*
2843 	 * If this is a sleeping node we're leaking to, given
2844 	 * it a higher priority.  This is so bad for QoS it hurts.
2845 	 */
2846 	if (tid->an->an_leak_count) {
2847 		TAILQ_INSERT_HEAD(&txq->axq_tidq, tid, axq_qelem);
2848 	} else {
2849 		TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem);
2850 	}
2851 #endif
2852 
2853 	/*
2854 	 * We can't do the above - it'll confuse the TXQ software
2855 	 * scheduler which will keep checking the _head_ TID
2856 	 * in the list to see if it has traffic.  If we queue
2857 	 * a TID to the head of the list and it doesn't transmit,
2858 	 * we'll check it again.
2859 	 *
2860 	 * So, get the rest of this leaking frames support working
2861 	 * and reliable first and _then_ optimise it so they're
2862 	 * pushed out in front of any other pending software
2863 	 * queued nodes.
2864 	 */
2865 	TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem);
2866 }
2867 
2868 /*
2869  * Mark the current node as no longer needing to be polled for
2870  * TX packets.
2871  *
2872  * The TXQ lock must be held.
2873  */
2874 static void
2875 ath_tx_tid_unsched(struct ath_softc *sc, struct ath_tid *tid)
2876 {
2877 	struct ath_txq *txq = sc->sc_ac2q[tid->ac];
2878 
2879 	ATH_TX_LOCK_ASSERT(sc);
2880 
2881 	if (tid->sched == 0)
2882 		return;
2883 
2884 	tid->sched = 0;
2885 	TAILQ_REMOVE(&txq->axq_tidq, tid, axq_qelem);
2886 }
2887 
2888 /*
2889  * Assign a sequence number manually to the given frame.
2890  *
2891  * This should only be called for A-MPDU TX frames.
2892  */
2893 static ieee80211_seq
2894 ath_tx_tid_seqno_assign(struct ath_softc *sc, struct ieee80211_node *ni,
2895     struct ath_buf *bf, struct mbuf *m0)
2896 {
2897 	struct ieee80211_frame *wh;
2898 	int tid, pri;
2899 	ieee80211_seq seqno;
2900 	uint8_t subtype;
2901 
2902 	/* TID lookup */
2903 	wh = mtod(m0, struct ieee80211_frame *);
2904 	pri = M_WME_GETAC(m0);			/* honor classification */
2905 	tid = WME_AC_TO_TID(pri);
2906 	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: pri=%d, tid=%d, qos has seq=%d\n",
2907 	    __func__, pri, tid, IEEE80211_QOS_HAS_SEQ(wh));
2908 
2909 	/* XXX Is it a control frame? Ignore */
2910 
2911 	/* Does the packet require a sequence number? */
2912 	if (! IEEE80211_QOS_HAS_SEQ(wh))
2913 		return -1;
2914 
2915 	ATH_TX_LOCK_ASSERT(sc);
2916 
2917 	/*
2918 	 * Is it a QOS NULL Data frame? Give it a sequence number from
2919 	 * the default TID (IEEE80211_NONQOS_TID.)
2920 	 *
2921 	 * The RX path of everything I've looked at doesn't include the NULL
2922 	 * data frame sequence number in the aggregation state updates, so
2923 	 * assigning it a sequence number there will cause a BAW hole on the
2924 	 * RX side.
2925 	 */
2926 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
2927 	if (subtype == IEEE80211_FC0_SUBTYPE_QOS_NULL) {
2928 		/* XXX no locking for this TID? This is a bit of a problem. */
2929 		seqno = ni->ni_txseqs[IEEE80211_NONQOS_TID];
2930 		INCR(ni->ni_txseqs[IEEE80211_NONQOS_TID], IEEE80211_SEQ_RANGE);
2931 	} else {
2932 		/* Manually assign sequence number */
2933 		seqno = ni->ni_txseqs[tid];
2934 		INCR(ni->ni_txseqs[tid], IEEE80211_SEQ_RANGE);
2935 	}
2936 	*(uint16_t *)&wh->i_seq[0] = htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT);
2937 	M_SEQNO_SET(m0, seqno);
2938 
2939 	/* Return so caller can do something with it if needed */
2940 	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s:  -> seqno=%d\n", __func__, seqno);
2941 	return seqno;
2942 }
2943 
2944 /*
2945  * Attempt to direct dispatch an aggregate frame to hardware.
2946  * If the frame is out of BAW, queue.
2947  * Otherwise, schedule it as a single frame.
2948  */
2949 static void
2950 ath_tx_xmit_aggr(struct ath_softc *sc, struct ath_node *an,
2951     struct ath_txq *txq, struct ath_buf *bf)
2952 {
2953 	struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid];
2954 	struct ieee80211_tx_ampdu *tap;
2955 
2956 	ATH_TX_LOCK_ASSERT(sc);
2957 
2958 	tap = ath_tx_get_tx_tid(an, tid->tid);
2959 
2960 	/* paused? queue */
2961 	if (! ath_tx_tid_can_tx_or_sched(sc, tid)) {
2962 		ATH_TID_INSERT_HEAD(tid, bf, bf_list);
2963 		/* XXX don't sched - we're paused! */
2964 		return;
2965 	}
2966 
2967 	/* outside baw? queue */
2968 	if (bf->bf_state.bfs_dobaw &&
2969 	    (! BAW_WITHIN(tap->txa_start, tap->txa_wnd,
2970 	    SEQNO(bf->bf_state.bfs_seqno)))) {
2971 		ATH_TID_INSERT_HEAD(tid, bf, bf_list);
2972 		ath_tx_tid_sched(sc, tid);
2973 		return;
2974 	}
2975 
2976 	/*
2977 	 * This is a temporary check and should be removed once
2978 	 * all the relevant code paths have been fixed.
2979 	 *
2980 	 * During aggregate retries, it's possible that the head
2981 	 * frame will fail (which has the bfs_aggr and bfs_nframes
2982 	 * fields set for said aggregate) and will be retried as
2983 	 * a single frame.  In this instance, the values should
2984 	 * be reset or the completion code will get upset with you.
2985 	 */
2986 	if (bf->bf_state.bfs_aggr != 0 || bf->bf_state.bfs_nframes > 1) {
2987 		DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
2988 		    "%s: bfs_aggr=%d, bfs_nframes=%d\n", __func__,
2989 		    bf->bf_state.bfs_aggr, bf->bf_state.bfs_nframes);
2990 		bf->bf_state.bfs_aggr = 0;
2991 		bf->bf_state.bfs_nframes = 1;
2992 	}
2993 
2994 	/* Update CLRDMASK just before this frame is queued */
2995 	ath_tx_update_clrdmask(sc, tid, bf);
2996 
2997 	/* Direct dispatch to hardware */
2998 	ath_tx_do_ratelookup(sc, bf);
2999 	ath_tx_calc_duration(sc, bf);
3000 	ath_tx_calc_protection(sc, bf);
3001 	ath_tx_set_rtscts(sc, bf);
3002 	ath_tx_rate_fill_rcflags(sc, bf);
3003 	ath_tx_setds(sc, bf);
3004 
3005 	/* Statistics */
3006 	sc->sc_aggr_stats.aggr_low_hwq_single_pkt++;
3007 
3008 	/* Track per-TID hardware queue depth correctly */
3009 	tid->hwq_depth++;
3010 
3011 	/* Add to BAW */
3012 	if (bf->bf_state.bfs_dobaw) {
3013 		ath_tx_addto_baw(sc, an, tid, bf);
3014 		bf->bf_state.bfs_addedbaw = 1;
3015 	}
3016 
3017 	/* Set completion handler, multi-frame aggregate or not */
3018 	bf->bf_comp = ath_tx_aggr_comp;
3019 
3020 	/*
3021 	 * Update the current leak count if
3022 	 * we're leaking frames; and set the
3023 	 * MORE flag as appropriate.
3024 	 */
3025 	ath_tx_leak_count_update(sc, tid, bf);
3026 
3027 	/* Hand off to hardware */
3028 	ath_tx_handoff(sc, txq, bf);
3029 }
3030 
3031 /*
3032  * Attempt to send the packet.
3033  * If the queue isn't busy, direct-dispatch.
3034  * If the queue is busy enough, queue the given packet on the
3035  *  relevant software queue.
3036  */
3037 void
3038 ath_tx_swq(struct ath_softc *sc, struct ieee80211_node *ni,
3039     struct ath_txq *txq, int queue_to_head, struct ath_buf *bf)
3040 {
3041 	struct ath_node *an = ATH_NODE(ni);
3042 	struct ieee80211_frame *wh;
3043 	struct ath_tid *atid;
3044 	int pri, tid;
3045 	struct mbuf *m0 = bf->bf_m;
3046 
3047 	ATH_TX_LOCK_ASSERT(sc);
3048 
3049 	/* Fetch the TID - non-QoS frames get assigned to TID 16 */
3050 	wh = mtod(m0, struct ieee80211_frame *);
3051 	pri = ath_tx_getac(sc, m0);
3052 	tid = ath_tx_gettid(sc, m0);
3053 	atid = &an->an_tid[tid];
3054 
3055 	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p, pri=%d, tid=%d, qos=%d\n",
3056 	    __func__, bf, pri, tid, IEEE80211_QOS_HAS_SEQ(wh));
3057 
3058 	/* Set local packet state, used to queue packets to hardware */
3059 	/* XXX potentially duplicate info, re-check */
3060 	bf->bf_state.bfs_tid = tid;
3061 	bf->bf_state.bfs_tx_queue = txq->axq_qnum;
3062 	bf->bf_state.bfs_pri = pri;
3063 
3064 	/*
3065 	 * If the hardware queue isn't busy, queue it directly.
3066 	 * If the hardware queue is busy, queue it.
3067 	 * If the TID is paused or the traffic it outside BAW, software
3068 	 * queue it.
3069 	 *
3070 	 * If the node is in power-save and we're leaking a frame,
3071 	 * leak a single frame.
3072 	 */
3073 	if (! ath_tx_tid_can_tx_or_sched(sc, atid)) {
3074 		/* TID is paused, queue */
3075 		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: paused\n", __func__);
3076 		/*
3077 		 * If the caller requested that it be sent at a high
3078 		 * priority, queue it at the head of the list.
3079 		 */
3080 		if (queue_to_head)
3081 			ATH_TID_INSERT_HEAD(atid, bf, bf_list);
3082 		else
3083 			ATH_TID_INSERT_TAIL(atid, bf, bf_list);
3084 	} else if (ath_tx_ampdu_pending(sc, an, tid)) {
3085 		/* AMPDU pending; queue */
3086 		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: pending\n", __func__);
3087 		ATH_TID_INSERT_TAIL(atid, bf, bf_list);
3088 		/* XXX sched? */
3089 	} else if (ath_tx_ampdu_running(sc, an, tid)) {
3090 		/* AMPDU running, attempt direct dispatch if possible */
3091 
3092 		/*
3093 		 * Always queue the frame to the tail of the list.
3094 		 */
3095 		ATH_TID_INSERT_TAIL(atid, bf, bf_list);
3096 
3097 		/*
3098 		 * If the hardware queue isn't busy, direct dispatch
3099 		 * the head frame in the list.  Don't schedule the
3100 		 * TID - let it build some more frames first?
3101 		 *
3102 		 * When running A-MPDU, always just check the hardware
3103 		 * queue depth against the aggregate frame limit.
3104 		 * We don't want to burst a large number of single frames
3105 		 * out to the hardware; we want to aggressively hold back.
3106 		 *
3107 		 * Otherwise, schedule the TID.
3108 		 */
3109 		/* XXX TXQ locking */
3110 		if (txq->axq_depth + txq->fifo.axq_depth < sc->sc_hwq_limit_aggr) {
3111 			bf = ATH_TID_FIRST(atid);
3112 			ATH_TID_REMOVE(atid, bf, bf_list);
3113 
3114 			/*
3115 			 * Ensure it's definitely treated as a non-AMPDU
3116 			 * frame - this information may have been left
3117 			 * over from a previous attempt.
3118 			 */
3119 			bf->bf_state.bfs_aggr = 0;
3120 			bf->bf_state.bfs_nframes = 1;
3121 
3122 			/* Queue to the hardware */
3123 			ath_tx_xmit_aggr(sc, an, txq, bf);
3124 			DPRINTF(sc, ATH_DEBUG_SW_TX,
3125 			    "%s: xmit_aggr\n",
3126 			    __func__);
3127 		} else {
3128 			DPRINTF(sc, ATH_DEBUG_SW_TX,
3129 			    "%s: ampdu; swq'ing\n",
3130 			    __func__);
3131 
3132 			ath_tx_tid_sched(sc, atid);
3133 		}
3134 	/*
3135 	 * If we're not doing A-MPDU, be prepared to direct dispatch
3136 	 * up to both limits if possible.  This particular corner
3137 	 * case may end up with packet starvation between aggregate
3138 	 * traffic and non-aggregate traffic: we want to ensure
3139 	 * that non-aggregate stations get a few frames queued to the
3140 	 * hardware before the aggregate station(s) get their chance.
3141 	 *
3142 	 * So if you only ever see a couple of frames direct dispatched
3143 	 * to the hardware from a non-AMPDU client, check both here
3144 	 * and in the software queue dispatcher to ensure that those
3145 	 * non-AMPDU stations get a fair chance to transmit.
3146 	 */
3147 	/* XXX TXQ locking */
3148 	} else if ((txq->axq_depth + txq->fifo.axq_depth < sc->sc_hwq_limit_nonaggr) &&
3149 		    (txq->axq_aggr_depth < sc->sc_hwq_limit_aggr)) {
3150 		/* AMPDU not running, attempt direct dispatch */
3151 		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: xmit_normal\n", __func__);
3152 		/* See if clrdmask needs to be set */
3153 		ath_tx_update_clrdmask(sc, atid, bf);
3154 
3155 		/*
3156 		 * Update the current leak count if
3157 		 * we're leaking frames; and set the
3158 		 * MORE flag as appropriate.
3159 		 */
3160 		ath_tx_leak_count_update(sc, atid, bf);
3161 
3162 		/*
3163 		 * Dispatch the frame.
3164 		 */
3165 		ath_tx_xmit_normal(sc, txq, bf);
3166 	} else {
3167 		/* Busy; queue */
3168 		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: swq'ing\n", __func__);
3169 		ATH_TID_INSERT_TAIL(atid, bf, bf_list);
3170 		ath_tx_tid_sched(sc, atid);
3171 	}
3172 }
3173 
3174 /*
3175  * Only set the clrdmask bit if none of the nodes are currently
3176  * filtered.
3177  *
3178  * XXX TODO: go through all the callers and check to see
3179  * which are being called in the context of looping over all
3180  * TIDs (eg, if all tids are being paused, resumed, etc.)
3181  * That'll avoid O(n^2) complexity here.
3182  */
3183 static void
3184 ath_tx_set_clrdmask(struct ath_softc *sc, struct ath_node *an)
3185 {
3186 	int i;
3187 
3188 	ATH_TX_LOCK_ASSERT(sc);
3189 
3190 	for (i = 0; i < IEEE80211_TID_SIZE; i++) {
3191 		if (an->an_tid[i].isfiltered == 1)
3192 			return;
3193 	}
3194 	an->clrdmask = 1;
3195 }
3196 
3197 /*
3198  * Configure the per-TID node state.
3199  *
3200  * This likely belongs in if_ath_node.c but I can't think of anywhere
3201  * else to put it just yet.
3202  *
3203  * This sets up the SLISTs and the mutex as appropriate.
3204  */
3205 void
3206 ath_tx_tid_init(struct ath_softc *sc, struct ath_node *an)
3207 {
3208 	int i, j;
3209 	struct ath_tid *atid;
3210 
3211 	for (i = 0; i < IEEE80211_TID_SIZE; i++) {
3212 		atid = &an->an_tid[i];
3213 
3214 		/* XXX now with this bzer(), is the field 0'ing needed? */
3215 		bzero(atid, sizeof(*atid));
3216 
3217 		TAILQ_INIT(&atid->tid_q);
3218 		TAILQ_INIT(&atid->filtq.tid_q);
3219 		atid->tid = i;
3220 		atid->an = an;
3221 		for (j = 0; j < ATH_TID_MAX_BUFS; j++)
3222 			atid->tx_buf[j] = NULL;
3223 		atid->baw_head = atid->baw_tail = 0;
3224 		atid->paused = 0;
3225 		atid->sched = 0;
3226 		atid->hwq_depth = 0;
3227 		atid->cleanup_inprogress = 0;
3228 		if (i == IEEE80211_NONQOS_TID)
3229 			atid->ac = ATH_NONQOS_TID_AC;
3230 		else
3231 			atid->ac = TID_TO_WME_AC(i);
3232 	}
3233 	an->clrdmask = 1;	/* Always start by setting this bit */
3234 }
3235 
3236 /*
3237  * Pause the current TID. This stops packets from being transmitted
3238  * on it.
3239  *
3240  * Since this is also called from upper layers as well as the driver,
3241  * it will get the TID lock.
3242  */
3243 static void
3244 ath_tx_tid_pause(struct ath_softc *sc, struct ath_tid *tid)
3245 {
3246 
3247 	ATH_TX_LOCK_ASSERT(sc);
3248 	tid->paused++;
3249 	DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: [%6D]: tid=%d, paused = %d\n",
3250 	    __func__,
3251 	    tid->an->an_node.ni_macaddr, ":",
3252 	    tid->tid,
3253 	    tid->paused);
3254 }
3255 
3256 /*
3257  * Unpause the current TID, and schedule it if needed.
3258  */
3259 static void
3260 ath_tx_tid_resume(struct ath_softc *sc, struct ath_tid *tid)
3261 {
3262 	ATH_TX_LOCK_ASSERT(sc);
3263 
3264 	/*
3265 	 * There's some odd places where ath_tx_tid_resume() is called
3266 	 * when it shouldn't be; this works around that particular issue
3267 	 * until it's actually resolved.
3268 	 */
3269 	if (tid->paused == 0) {
3270 		device_printf(sc->sc_dev,
3271 		    "%s: [%6D]: tid=%d, paused=0?\n",
3272 		    __func__,
3273 		    tid->an->an_node.ni_macaddr, ":",
3274 		    tid->tid);
3275 	} else {
3276 		tid->paused--;
3277 	}
3278 
3279 	DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
3280 	    "%s: [%6D]: tid=%d, unpaused = %d\n",
3281 	    __func__,
3282 	    tid->an->an_node.ni_macaddr, ":",
3283 	    tid->tid,
3284 	    tid->paused);
3285 
3286 	if (tid->paused)
3287 		return;
3288 
3289 	/*
3290 	 * Override the clrdmask configuration for the next frame
3291 	 * from this TID, just to get the ball rolling.
3292 	 */
3293 	ath_tx_set_clrdmask(sc, tid->an);
3294 
3295 	if (tid->axq_depth == 0)
3296 		return;
3297 
3298 	/* XXX isfiltered shouldn't ever be 0 at this point */
3299 	if (tid->isfiltered == 1) {
3300 		DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: filtered?!\n",
3301 		    __func__);
3302 		return;
3303 	}
3304 
3305 	ath_tx_tid_sched(sc, tid);
3306 
3307 	/*
3308 	 * Queue the software TX scheduler.
3309 	 */
3310 	ath_tx_swq_kick(sc);
3311 }
3312 
3313 /*
3314  * Add the given ath_buf to the TID filtered frame list.
3315  * This requires the TID be filtered.
3316  */
3317 static void
3318 ath_tx_tid_filt_addbuf(struct ath_softc *sc, struct ath_tid *tid,
3319     struct ath_buf *bf)
3320 {
3321 
3322 	ATH_TX_LOCK_ASSERT(sc);
3323 
3324 	if (!tid->isfiltered)
3325 		DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: not filtered?!\n",
3326 		    __func__);
3327 
3328 	DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: bf=%p\n", __func__, bf);
3329 
3330 	/* Set the retry bit and bump the retry counter */
3331 	ath_tx_set_retry(sc, bf);
3332 	sc->sc_stats.ast_tx_swfiltered++;
3333 
3334 	ATH_TID_FILT_INSERT_TAIL(tid, bf, bf_list);
3335 }
3336 
3337 /*
3338  * Handle a completed filtered frame from the given TID.
3339  * This just enables/pauses the filtered frame state if required
3340  * and appends the filtered frame to the filtered queue.
3341  */
3342 static void
3343 ath_tx_tid_filt_comp_buf(struct ath_softc *sc, struct ath_tid *tid,
3344     struct ath_buf *bf)
3345 {
3346 
3347 	ATH_TX_LOCK_ASSERT(sc);
3348 
3349 	if (! tid->isfiltered) {
3350 		DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: tid=%d; filter transition\n",
3351 		    __func__, tid->tid);
3352 		tid->isfiltered = 1;
3353 		ath_tx_tid_pause(sc, tid);
3354 	}
3355 
3356 	/* Add the frame to the filter queue */
3357 	ath_tx_tid_filt_addbuf(sc, tid, bf);
3358 }
3359 
3360 /*
3361  * Complete the filtered frame TX completion.
3362  *
3363  * If there are no more frames in the hardware queue, unpause/unfilter
3364  * the TID if applicable.  Otherwise we will wait for a node PS transition
3365  * to unfilter.
3366  */
3367 static void
3368 ath_tx_tid_filt_comp_complete(struct ath_softc *sc, struct ath_tid *tid)
3369 {
3370 	struct ath_buf *bf;
3371 	int do_resume = 0;
3372 
3373 	ATH_TX_LOCK_ASSERT(sc);
3374 
3375 	if (tid->hwq_depth != 0)
3376 		return;
3377 
3378 	DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: tid=%d, hwq=0, transition back\n",
3379 	    __func__, tid->tid);
3380 	if (tid->isfiltered == 1) {
3381 		tid->isfiltered = 0;
3382 		do_resume = 1;
3383 	}
3384 
3385 	/* XXX ath_tx_tid_resume() also calls ath_tx_set_clrdmask()! */
3386 	ath_tx_set_clrdmask(sc, tid->an);
3387 
3388 	/* XXX this is really quite inefficient */
3389 	while ((bf = ATH_TID_FILT_LAST(tid, ath_bufhead_s)) != NULL) {
3390 		ATH_TID_FILT_REMOVE(tid, bf, bf_list);
3391 		ATH_TID_INSERT_HEAD(tid, bf, bf_list);
3392 	}
3393 
3394 	/* And only resume if we had paused before */
3395 	if (do_resume)
3396 		ath_tx_tid_resume(sc, tid);
3397 }
3398 
3399 /*
3400  * Called when a single (aggregate or otherwise) frame is completed.
3401  *
3402  * Returns 0 if the buffer could be added to the filtered list
3403  * (cloned or otherwise), 1 if the buffer couldn't be added to the
3404  * filtered list (failed clone; expired retry) and the caller should
3405  * free it and handle it like a failure (eg by sending a BAR.)
3406  *
3407  * since the buffer may be cloned, bf must be not touched after this
3408  * if the return value is 0.
3409  */
3410 static int
3411 ath_tx_tid_filt_comp_single(struct ath_softc *sc, struct ath_tid *tid,
3412     struct ath_buf *bf)
3413 {
3414 	struct ath_buf *nbf;
3415 	int retval;
3416 
3417 	ATH_TX_LOCK_ASSERT(sc);
3418 
3419 	/*
3420 	 * Don't allow a filtered frame to live forever.
3421 	 */
3422 	if (bf->bf_state.bfs_retries > SWMAX_RETRIES) {
3423 		sc->sc_stats.ast_tx_swretrymax++;
3424 		DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3425 		    "%s: bf=%p, seqno=%d, exceeded retries\n",
3426 		    __func__,
3427 		    bf,
3428 		    SEQNO(bf->bf_state.bfs_seqno));
3429 		retval = 1; /* error */
3430 		goto finish;
3431 	}
3432 
3433 	/*
3434 	 * A busy buffer can't be added to the retry list.
3435 	 * It needs to be cloned.
3436 	 */
3437 	if (bf->bf_flags & ATH_BUF_BUSY) {
3438 		nbf = ath_tx_retry_clone(sc, tid->an, tid, bf);
3439 		DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3440 		    "%s: busy buffer clone: %p -> %p\n",
3441 		    __func__, bf, nbf);
3442 	} else {
3443 		nbf = bf;
3444 	}
3445 
3446 	if (nbf == NULL) {
3447 		DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3448 		    "%s: busy buffer couldn't be cloned (%p)!\n",
3449 		    __func__, bf);
3450 		retval = 1; /* error */
3451 	} else {
3452 		ath_tx_tid_filt_comp_buf(sc, tid, nbf);
3453 		retval = 0; /* ok */
3454 	}
3455 finish:
3456 	ath_tx_tid_filt_comp_complete(sc, tid);
3457 
3458 	return (retval);
3459 }
3460 
3461 static void
3462 ath_tx_tid_filt_comp_aggr(struct ath_softc *sc, struct ath_tid *tid,
3463     struct ath_buf *bf_first, ath_bufhead *bf_q)
3464 {
3465 	struct ath_buf *bf, *bf_next, *nbf;
3466 
3467 	ATH_TX_LOCK_ASSERT(sc);
3468 
3469 	bf = bf_first;
3470 	while (bf) {
3471 		bf_next = bf->bf_next;
3472 		bf->bf_next = NULL;	/* Remove it from the aggr list */
3473 
3474 		/*
3475 		 * Don't allow a filtered frame to live forever.
3476 		 */
3477 		if (bf->bf_state.bfs_retries > SWMAX_RETRIES) {
3478 			sc->sc_stats.ast_tx_swretrymax++;
3479 			DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3480 			    "%s: tid=%d, bf=%p, seqno=%d, exceeded retries\n",
3481 			    __func__,
3482 			    tid->tid,
3483 			    bf,
3484 			    SEQNO(bf->bf_state.bfs_seqno));
3485 			TAILQ_INSERT_TAIL(bf_q, bf, bf_list);
3486 			goto next;
3487 		}
3488 
3489 		if (bf->bf_flags & ATH_BUF_BUSY) {
3490 			nbf = ath_tx_retry_clone(sc, tid->an, tid, bf);
3491 			DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3492 			    "%s: tid=%d, busy buffer cloned: %p -> %p, seqno=%d\n",
3493 			    __func__, tid->tid, bf, nbf, SEQNO(bf->bf_state.bfs_seqno));
3494 		} else {
3495 			nbf = bf;
3496 		}
3497 
3498 		/*
3499 		 * If the buffer couldn't be cloned, add it to bf_q;
3500 		 * the caller will free the buffer(s) as required.
3501 		 */
3502 		if (nbf == NULL) {
3503 			DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3504 			    "%s: tid=%d, buffer couldn't be cloned! (%p) seqno=%d\n",
3505 			    __func__, tid->tid, bf, SEQNO(bf->bf_state.bfs_seqno));
3506 			TAILQ_INSERT_TAIL(bf_q, bf, bf_list);
3507 		} else {
3508 			ath_tx_tid_filt_comp_buf(sc, tid, nbf);
3509 		}
3510 next:
3511 		bf = bf_next;
3512 	}
3513 
3514 	ath_tx_tid_filt_comp_complete(sc, tid);
3515 }
3516 
3517 /*
3518  * Suspend the queue because we need to TX a BAR.
3519  */
3520 static void
3521 ath_tx_tid_bar_suspend(struct ath_softc *sc, struct ath_tid *tid)
3522 {
3523 
3524 	ATH_TX_LOCK_ASSERT(sc);
3525 
3526 	DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3527 	    "%s: tid=%d, bar_wait=%d, bar_tx=%d, called\n",
3528 	    __func__,
3529 	    tid->tid,
3530 	    tid->bar_wait,
3531 	    tid->bar_tx);
3532 
3533 	/* We shouldn't be called when bar_tx is 1 */
3534 	if (tid->bar_tx) {
3535 		DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3536 		    "%s: bar_tx is 1?!\n", __func__);
3537 	}
3538 
3539 	/* If we've already been called, just be patient. */
3540 	if (tid->bar_wait)
3541 		return;
3542 
3543 	/* Wait! */
3544 	tid->bar_wait = 1;
3545 
3546 	/* Only one pause, no matter how many frames fail */
3547 	ath_tx_tid_pause(sc, tid);
3548 }
3549 
3550 /*
3551  * We've finished with BAR handling - either we succeeded or
3552  * failed. Either way, unsuspend TX.
3553  */
3554 static void
3555 ath_tx_tid_bar_unsuspend(struct ath_softc *sc, struct ath_tid *tid)
3556 {
3557 
3558 	ATH_TX_LOCK_ASSERT(sc);
3559 
3560 	DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3561 	    "%s: %6D: TID=%d, called\n",
3562 	    __func__,
3563 	    tid->an->an_node.ni_macaddr,
3564 	    ":",
3565 	    tid->tid);
3566 
3567 	if (tid->bar_tx == 0 || tid->bar_wait == 0) {
3568 		DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3569 		    "%s: %6D: TID=%d, bar_tx=%d, bar_wait=%d: ?\n",
3570 		    __func__, tid->an->an_node.ni_macaddr, ":",
3571 		    tid->tid, tid->bar_tx, tid->bar_wait);
3572 	}
3573 
3574 	tid->bar_tx = tid->bar_wait = 0;
3575 	ath_tx_tid_resume(sc, tid);
3576 }
3577 
3578 /*
3579  * Return whether we're ready to TX a BAR frame.
3580  *
3581  * Requires the TID lock be held.
3582  */
3583 static int
3584 ath_tx_tid_bar_tx_ready(struct ath_softc *sc, struct ath_tid *tid)
3585 {
3586 
3587 	ATH_TX_LOCK_ASSERT(sc);
3588 
3589 	if (tid->bar_wait == 0 || tid->hwq_depth > 0)
3590 		return (0);
3591 
3592 	DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3593 	    "%s: %6D: TID=%d, bar ready\n",
3594 	    __func__,
3595 	    tid->an->an_node.ni_macaddr,
3596 	    ":",
3597 	    tid->tid);
3598 
3599 	return (1);
3600 }
3601 
3602 /*
3603  * Check whether the current TID is ready to have a BAR
3604  * TXed and if so, do the TX.
3605  *
3606  * Since the TID/TXQ lock can't be held during a call to
3607  * ieee80211_send_bar(), we have to do the dirty thing of unlocking it,
3608  * sending the BAR and locking it again.
3609  *
3610  * Eventually, the code to send the BAR should be broken out
3611  * from this routine so the lock doesn't have to be reacquired
3612  * just to be immediately dropped by the caller.
3613  */
3614 static void
3615 ath_tx_tid_bar_tx(struct ath_softc *sc, struct ath_tid *tid)
3616 {
3617 	struct ieee80211_tx_ampdu *tap;
3618 
3619 	ATH_TX_LOCK_ASSERT(sc);
3620 
3621 	DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3622 	    "%s: %6D: TID=%d, called\n",
3623 	    __func__,
3624 	    tid->an->an_node.ni_macaddr,
3625 	    ":",
3626 	    tid->tid);
3627 
3628 	tap = ath_tx_get_tx_tid(tid->an, tid->tid);
3629 
3630 	/*
3631 	 * This is an error condition!
3632 	 */
3633 	if (tid->bar_wait == 0 || tid->bar_tx == 1) {
3634 		DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3635 		    "%s: %6D: TID=%d, bar_tx=%d, bar_wait=%d: ?\n",
3636 		    __func__, tid->an->an_node.ni_macaddr, ":",
3637 		    tid->tid, tid->bar_tx, tid->bar_wait);
3638 		return;
3639 	}
3640 
3641 	/* Don't do anything if we still have pending frames */
3642 	if (tid->hwq_depth > 0) {
3643 		DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3644 		    "%s: %6D: TID=%d, hwq_depth=%d, waiting\n",
3645 		    __func__,
3646 		    tid->an->an_node.ni_macaddr,
3647 		    ":",
3648 		    tid->tid,
3649 		    tid->hwq_depth);
3650 		return;
3651 	}
3652 
3653 	/* We're now about to TX */
3654 	tid->bar_tx = 1;
3655 
3656 	/*
3657 	 * Override the clrdmask configuration for the next frame,
3658 	 * just to get the ball rolling.
3659 	 */
3660 	ath_tx_set_clrdmask(sc, tid->an);
3661 
3662 	/*
3663 	 * Calculate new BAW left edge, now that all frames have either
3664 	 * succeeded or failed.
3665 	 *
3666 	 * XXX verify this is _actually_ the valid value to begin at!
3667 	 */
3668 	DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3669 	    "%s: %6D: TID=%d, new BAW left edge=%d\n",
3670 	    __func__,
3671 	    tid->an->an_node.ni_macaddr,
3672 	    ":",
3673 	    tid->tid,
3674 	    tap->txa_start);
3675 
3676 	/* Try sending the BAR frame */
3677 	/* We can't hold the lock here! */
3678 
3679 	ATH_TX_UNLOCK(sc);
3680 	if (ieee80211_send_bar(&tid->an->an_node, tap, tap->txa_start) == 0) {
3681 		/* Success? Now we wait for notification that it's done */
3682 		ATH_TX_LOCK(sc);
3683 		return;
3684 	}
3685 
3686 	/* Failure? For now, warn loudly and continue */
3687 	ATH_TX_LOCK(sc);
3688 	DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3689 	    "%s: %6D: TID=%d, failed to TX BAR, continue!\n",
3690 	    __func__, tid->an->an_node.ni_macaddr, ":",
3691 	    tid->tid);
3692 	ath_tx_tid_bar_unsuspend(sc, tid);
3693 }
3694 
3695 static void
3696 ath_tx_tid_drain_pkt(struct ath_softc *sc, struct ath_node *an,
3697     struct ath_tid *tid, ath_bufhead *bf_cq, struct ath_buf *bf)
3698 {
3699 
3700 	ATH_TX_LOCK_ASSERT(sc);
3701 
3702 	/*
3703 	 * If the current TID is running AMPDU, update
3704 	 * the BAW.
3705 	 */
3706 	if (ath_tx_ampdu_running(sc, an, tid->tid) &&
3707 	    bf->bf_state.bfs_dobaw) {
3708 		/*
3709 		 * Only remove the frame from the BAW if it's
3710 		 * been transmitted at least once; this means
3711 		 * the frame was in the BAW to begin with.
3712 		 */
3713 		if (bf->bf_state.bfs_retries > 0) {
3714 			ath_tx_update_baw(sc, an, tid, bf);
3715 			bf->bf_state.bfs_dobaw = 0;
3716 		}
3717 #if 0
3718 		/*
3719 		 * This has become a non-fatal error now
3720 		 */
3721 		if (! bf->bf_state.bfs_addedbaw)
3722 			DPRINTF(sc, ATH_DEBUG_SW_TX_BAW
3723 			    "%s: wasn't added: seqno %d\n",
3724 			    __func__, SEQNO(bf->bf_state.bfs_seqno));
3725 #endif
3726 	}
3727 
3728 	/* Strip it out of an aggregate list if it was in one */
3729 	bf->bf_next = NULL;
3730 
3731 	/* Insert on the free queue to be freed by the caller */
3732 	TAILQ_INSERT_TAIL(bf_cq, bf, bf_list);
3733 }
3734 
3735 static void
3736 ath_tx_tid_drain_print(struct ath_softc *sc, struct ath_node *an,
3737     const char *pfx, struct ath_tid *tid, struct ath_buf *bf)
3738 {
3739 	struct ieee80211_node *ni = &an->an_node;
3740 	struct ath_txq *txq;
3741 	struct ieee80211_tx_ampdu *tap;
3742 
3743 	txq = sc->sc_ac2q[tid->ac];
3744 	tap = ath_tx_get_tx_tid(an, tid->tid);
3745 
3746 	DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET,
3747 	    "%s: %s: %6D: bf=%p: addbaw=%d, dobaw=%d, "
3748 	    "seqno=%d, retry=%d\n",
3749 	    __func__,
3750 	    pfx,
3751 	    ni->ni_macaddr,
3752 	    ":",
3753 	    bf,
3754 	    bf->bf_state.bfs_addedbaw,
3755 	    bf->bf_state.bfs_dobaw,
3756 	    SEQNO(bf->bf_state.bfs_seqno),
3757 	    bf->bf_state.bfs_retries);
3758 	DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET,
3759 	    "%s: %s: %6D: bf=%p: txq[%d] axq_depth=%d, axq_aggr_depth=%d\n",
3760 	    __func__,
3761 	    pfx,
3762 	    ni->ni_macaddr,
3763 	    ":",
3764 	    bf,
3765 	    txq->axq_qnum,
3766 	    txq->axq_depth,
3767 	    txq->axq_aggr_depth);
3768 	DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET,
3769 	    "%s: %s: %6D: bf=%p: tid txq_depth=%d hwq_depth=%d, bar_wait=%d, "
3770 	      "isfiltered=%d\n",
3771 	    __func__,
3772 	    pfx,
3773 	    ni->ni_macaddr,
3774 	    ":",
3775 	    bf,
3776 	    tid->axq_depth,
3777 	    tid->hwq_depth,
3778 	    tid->bar_wait,
3779 	    tid->isfiltered);
3780 	DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET,
3781 	    "%s: %s: %6D: tid %d: "
3782 	    "sched=%d, paused=%d, "
3783 	    "incomp=%d, baw_head=%d, "
3784 	    "baw_tail=%d txa_start=%d, ni_txseqs=%d\n",
3785 	     __func__,
3786 	     pfx,
3787 	     ni->ni_macaddr,
3788 	     ":",
3789 	     tid->tid,
3790 	     tid->sched, tid->paused,
3791 	     tid->incomp, tid->baw_head,
3792 	     tid->baw_tail, tap == NULL ? -1 : tap->txa_start,
3793 	     ni->ni_txseqs[tid->tid]);
3794 
3795 	/* XXX Dump the frame, see what it is? */
3796 	if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT))
3797 		ieee80211_dump_pkt(ni->ni_ic,
3798 		    mtod(bf->bf_m, const uint8_t *),
3799 		    bf->bf_m->m_len, 0, -1);
3800 }
3801 
3802 /*
3803  * Free any packets currently pending in the software TX queue.
3804  *
3805  * This will be called when a node is being deleted.
3806  *
3807  * It can also be called on an active node during an interface
3808  * reset or state transition.
3809  *
3810  * (From Linux/reference):
3811  *
3812  * TODO: For frame(s) that are in the retry state, we will reuse the
3813  * sequence number(s) without setting the retry bit. The
3814  * alternative is to give up on these and BAR the receiver's window
3815  * forward.
3816  */
3817 static void
3818 ath_tx_tid_drain(struct ath_softc *sc, struct ath_node *an,
3819     struct ath_tid *tid, ath_bufhead *bf_cq)
3820 {
3821 	struct ath_buf *bf;
3822 	struct ieee80211_tx_ampdu *tap;
3823 	struct ieee80211_node *ni = &an->an_node;
3824 	int t;
3825 
3826 	tap = ath_tx_get_tx_tid(an, tid->tid);
3827 
3828 	ATH_TX_LOCK_ASSERT(sc);
3829 
3830 	/* Walk the queue, free frames */
3831 	t = 0;
3832 	for (;;) {
3833 		bf = ATH_TID_FIRST(tid);
3834 		if (bf == NULL) {
3835 			break;
3836 		}
3837 
3838 		if (t == 0) {
3839 			ath_tx_tid_drain_print(sc, an, "norm", tid, bf);
3840 //			t = 1;
3841 		}
3842 
3843 		ATH_TID_REMOVE(tid, bf, bf_list);
3844 		ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf);
3845 	}
3846 
3847 	/* And now, drain the filtered frame queue */
3848 	t = 0;
3849 	for (;;) {
3850 		bf = ATH_TID_FILT_FIRST(tid);
3851 		if (bf == NULL)
3852 			break;
3853 
3854 		if (t == 0) {
3855 			ath_tx_tid_drain_print(sc, an, "filt", tid, bf);
3856 //			t = 1;
3857 		}
3858 
3859 		ATH_TID_FILT_REMOVE(tid, bf, bf_list);
3860 		ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf);
3861 	}
3862 
3863 	/*
3864 	 * Override the clrdmask configuration for the next frame
3865 	 * in case there is some future transmission, just to get
3866 	 * the ball rolling.
3867 	 *
3868 	 * This won't hurt things if the TID is about to be freed.
3869 	 */
3870 	ath_tx_set_clrdmask(sc, tid->an);
3871 
3872 	/*
3873 	 * Now that it's completed, grab the TID lock and update
3874 	 * the sequence number and BAW window.
3875 	 * Because sequence numbers have been assigned to frames
3876 	 * that haven't been sent yet, it's entirely possible
3877 	 * we'll be called with some pending frames that have not
3878 	 * been transmitted.
3879 	 *
3880 	 * The cleaner solution is to do the sequence number allocation
3881 	 * when the packet is first transmitted - and thus the "retries"
3882 	 * check above would be enough to update the BAW/seqno.
3883 	 */
3884 
3885 	/* But don't do it for non-QoS TIDs */
3886 	if (tap) {
3887 #if 1
3888 		DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
3889 		    "%s: %6D: node %p: TID %d: sliding BAW left edge to %d\n",
3890 		    __func__,
3891 		    ni->ni_macaddr,
3892 		    ":",
3893 		    an,
3894 		    tid->tid,
3895 		    tap->txa_start);
3896 #endif
3897 		ni->ni_txseqs[tid->tid] = tap->txa_start;
3898 		tid->baw_tail = tid->baw_head;
3899 	}
3900 }
3901 
3902 /*
3903  * Reset the TID state.  This must be only called once the node has
3904  * had its frames flushed from this TID, to ensure that no other
3905  * pause / unpause logic can kick in.
3906  */
3907 static void
3908 ath_tx_tid_reset(struct ath_softc *sc, struct ath_tid *tid)
3909 {
3910 
3911 #if 0
3912 	tid->bar_wait = tid->bar_tx = tid->isfiltered = 0;
3913 	tid->paused = tid->sched = tid->addba_tx_pending = 0;
3914 	tid->incomp = tid->cleanup_inprogress = 0;
3915 #endif
3916 
3917 	/*
3918 	 * If we have a bar_wait set, we need to unpause the TID
3919 	 * here.  Otherwise once cleanup has finished, the TID won't
3920 	 * have the right paused counter.
3921 	 *
3922 	 * XXX I'm not going through resume here - I don't want the
3923 	 * node to be rescheuled just yet.  This however should be
3924 	 * methodized!
3925 	 */
3926 	if (tid->bar_wait) {
3927 		if (tid->paused > 0) {
3928 			tid->paused --;
3929 		}
3930 	}
3931 
3932 	/*
3933 	 * XXX same with a currently filtered TID.
3934 	 *
3935 	 * Since this is being called during a flush, we assume that
3936 	 * the filtered frame list is actually empty.
3937 	 *
3938 	 * XXX TODO: add in a check to ensure that the filtered queue
3939 	 * depth is actually 0!
3940 	 */
3941 	if (tid->isfiltered) {
3942 		if (tid->paused > 0) {
3943 			tid->paused --;
3944 		}
3945 	}
3946 
3947 	/*
3948 	 * Clear BAR, filtered frames, scheduled and ADDBA pending.
3949 	 * The TID may be going through cleanup from the last association
3950 	 * where things in the BAW are still in the hardware queue.
3951 	 */
3952 	tid->bar_wait = 0;
3953 	tid->bar_tx = 0;
3954 	tid->isfiltered = 0;
3955 	tid->sched = 0;
3956 	tid->addba_tx_pending = 0;
3957 
3958 	/*
3959 	 * XXX TODO: it may just be enough to walk the HWQs and mark
3960 	 * frames for that node as non-aggregate; or mark the ath_node
3961 	 * with something that indicates that aggregation is no longer
3962 	 * occurring.  Then we can just toss the BAW complaints and
3963 	 * do a complete hard reset of state here - no pause, no
3964 	 * complete counter, etc.
3965 	 */
3966 
3967 }
3968 
3969 /*
3970  * Flush all software queued packets for the given node.
3971  *
3972  * This occurs when a completion handler frees the last buffer
3973  * for a node, and the node is thus freed. This causes the node
3974  * to be cleaned up, which ends up calling ath_tx_node_flush.
3975  */
3976 void
3977 ath_tx_node_flush(struct ath_softc *sc, struct ath_node *an)
3978 {
3979 	int tid;
3980 	ath_bufhead bf_cq;
3981 	struct ath_buf *bf;
3982 
3983 	TAILQ_INIT(&bf_cq);
3984 
3985 	ATH_KTR(sc, ATH_KTR_NODE, 1, "ath_tx_node_flush: flush node; ni=%p",
3986 	    &an->an_node);
3987 
3988 	ATH_TX_LOCK(sc);
3989 	DPRINTF(sc, ATH_DEBUG_NODE,
3990 	    "%s: %6D: flush; is_powersave=%d, stack_psq=%d, tim=%d, "
3991 	    "swq_depth=%d, clrdmask=%d, leak_count=%d\n",
3992 	    __func__,
3993 	    an->an_node.ni_macaddr,
3994 	    ":",
3995 	    an->an_is_powersave,
3996 	    an->an_stack_psq,
3997 	    an->an_tim_set,
3998 	    an->an_swq_depth,
3999 	    an->clrdmask,
4000 	    an->an_leak_count);
4001 
4002 	for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) {
4003 		struct ath_tid *atid = &an->an_tid[tid];
4004 
4005 		/* Free packets */
4006 		ath_tx_tid_drain(sc, an, atid, &bf_cq);
4007 
4008 		/* Remove this tid from the list of active tids */
4009 		ath_tx_tid_unsched(sc, atid);
4010 
4011 		/* Reset the per-TID pause, BAR, etc state */
4012 		ath_tx_tid_reset(sc, atid);
4013 	}
4014 
4015 	/*
4016 	 * Clear global leak count
4017 	 */
4018 	an->an_leak_count = 0;
4019 	ATH_TX_UNLOCK(sc);
4020 
4021 	/* Handle completed frames */
4022 	while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
4023 		TAILQ_REMOVE(&bf_cq, bf, bf_list);
4024 		ath_tx_default_comp(sc, bf, 0);
4025 	}
4026 }
4027 
4028 /*
4029  * Drain all the software TXQs currently with traffic queued.
4030  */
4031 void
4032 ath_tx_txq_drain(struct ath_softc *sc, struct ath_txq *txq)
4033 {
4034 	struct ath_tid *tid;
4035 	ath_bufhead bf_cq;
4036 	struct ath_buf *bf;
4037 
4038 	TAILQ_INIT(&bf_cq);
4039 	ATH_TX_LOCK(sc);
4040 
4041 	/*
4042 	 * Iterate over all active tids for the given txq,
4043 	 * flushing and unsched'ing them
4044 	 */
4045 	while (! TAILQ_EMPTY(&txq->axq_tidq)) {
4046 		tid = TAILQ_FIRST(&txq->axq_tidq);
4047 		ath_tx_tid_drain(sc, tid->an, tid, &bf_cq);
4048 		ath_tx_tid_unsched(sc, tid);
4049 	}
4050 
4051 	ATH_TX_UNLOCK(sc);
4052 
4053 	while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
4054 		TAILQ_REMOVE(&bf_cq, bf, bf_list);
4055 		ath_tx_default_comp(sc, bf, 0);
4056 	}
4057 }
4058 
4059 /*
4060  * Handle completion of non-aggregate session frames.
4061  *
4062  * This (currently) doesn't implement software retransmission of
4063  * non-aggregate frames!
4064  *
4065  * Software retransmission of non-aggregate frames needs to obey
4066  * the strict sequence number ordering, and drop any frames that
4067  * will fail this.
4068  *
4069  * For now, filtered frames and frame transmission will cause
4070  * all kinds of issues.  So we don't support them.
4071  *
4072  * So anyone queuing frames via ath_tx_normal_xmit() or
4073  * ath_tx_hw_queue_norm() must override and set CLRDMASK.
4074  */
4075 void
4076 ath_tx_normal_comp(struct ath_softc *sc, struct ath_buf *bf, int fail)
4077 {
4078 	struct ieee80211_node *ni = bf->bf_node;
4079 	struct ath_node *an = ATH_NODE(ni);
4080 	int tid = bf->bf_state.bfs_tid;
4081 	struct ath_tid *atid = &an->an_tid[tid];
4082 	struct ath_tx_status *ts = &bf->bf_status.ds_txstat;
4083 
4084 	/* The TID state is protected behind the TXQ lock */
4085 	ATH_TX_LOCK(sc);
4086 
4087 	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p: fail=%d, hwq_depth now %d\n",
4088 	    __func__, bf, fail, atid->hwq_depth - 1);
4089 
4090 	atid->hwq_depth--;
4091 
4092 #if 0
4093 	/*
4094 	 * If the frame was filtered, stick it on the filter frame
4095 	 * queue and complain about it.  It shouldn't happen!
4096 	 */
4097 	if ((ts->ts_status & HAL_TXERR_FILT) ||
4098 	    (ts->ts_status != 0 && atid->isfiltered)) {
4099 		DPRINTF(sc, ATH_DEBUG_SW_TX,
4100 		    "%s: isfiltered=%d, ts_status=%d: huh?\n",
4101 		    __func__,
4102 		    atid->isfiltered,
4103 		    ts->ts_status);
4104 		ath_tx_tid_filt_comp_buf(sc, atid, bf);
4105 	}
4106 #endif
4107 	if (atid->isfiltered)
4108 		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: filtered?!\n", __func__);
4109 	if (atid->hwq_depth < 0)
4110 		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: hwq_depth < 0: %d\n",
4111 		    __func__, atid->hwq_depth);
4112 
4113 	/* If the TID is being cleaned up, track things */
4114 	/* XXX refactor! */
4115 	if (atid->cleanup_inprogress) {
4116 		atid->incomp--;
4117 		if (atid->incomp == 0) {
4118 			DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
4119 			    "%s: TID %d: cleaned up! resume!\n",
4120 			    __func__, tid);
4121 			atid->cleanup_inprogress = 0;
4122 			ath_tx_tid_resume(sc, atid);
4123 		}
4124 	}
4125 
4126 	/*
4127 	 * If the queue is filtered, potentially mark it as complete
4128 	 * and reschedule it as needed.
4129 	 *
4130 	 * This is required as there may be a subsequent TX descriptor
4131 	 * for this end-node that has CLRDMASK set, so it's quite possible
4132 	 * that a filtered frame will be followed by a non-filtered
4133 	 * (complete or otherwise) frame.
4134 	 *
4135 	 * XXX should we do this before we complete the frame?
4136 	 */
4137 	if (atid->isfiltered)
4138 		ath_tx_tid_filt_comp_complete(sc, atid);
4139 	ATH_TX_UNLOCK(sc);
4140 
4141 	/*
4142 	 * punt to rate control if we're not being cleaned up
4143 	 * during a hw queue drain and the frame wanted an ACK.
4144 	 */
4145 	if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0))
4146 		ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc,
4147 		    ts, bf->bf_state.bfs_pktlen,
4148 		    1, (ts->ts_status == 0) ? 0 : 1);
4149 
4150 	ath_tx_default_comp(sc, bf, fail);
4151 }
4152 
4153 /*
4154  * Handle cleanup of aggregate session packets that aren't
4155  * an A-MPDU.
4156  *
4157  * There's no need to update the BAW here - the session is being
4158  * torn down.
4159  */
4160 static void
4161 ath_tx_comp_cleanup_unaggr(struct ath_softc *sc, struct ath_buf *bf)
4162 {
4163 	struct ieee80211_node *ni = bf->bf_node;
4164 	struct ath_node *an = ATH_NODE(ni);
4165 	int tid = bf->bf_state.bfs_tid;
4166 	struct ath_tid *atid = &an->an_tid[tid];
4167 
4168 	DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: TID %d: incomp=%d\n",
4169 	    __func__, tid, atid->incomp);
4170 
4171 	ATH_TX_LOCK(sc);
4172 	atid->incomp--;
4173 
4174 	/* XXX refactor! */
4175 	if (bf->bf_state.bfs_dobaw) {
4176 		ath_tx_update_baw(sc, an, atid, bf);
4177 		if (!bf->bf_state.bfs_addedbaw)
4178 			DPRINTF(sc, ATH_DEBUG_SW_TX,
4179 			    "%s: wasn't added: seqno %d\n",
4180 			    __func__, SEQNO(bf->bf_state.bfs_seqno));
4181 	}
4182 
4183 	if (atid->incomp == 0) {
4184 		DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
4185 		    "%s: TID %d: cleaned up! resume!\n",
4186 		    __func__, tid);
4187 		atid->cleanup_inprogress = 0;
4188 		ath_tx_tid_resume(sc, atid);
4189 	}
4190 	ATH_TX_UNLOCK(sc);
4191 
4192 	ath_tx_default_comp(sc, bf, 0);
4193 }
4194 
4195 
4196 /*
4197  * This as it currently stands is a bit dumb.  Ideally we'd just
4198  * fail the frame the normal way and have it permanently fail
4199  * via the normal aggregate completion path.
4200  */
4201 static void
4202 ath_tx_tid_cleanup_frame(struct ath_softc *sc, struct ath_node *an,
4203     int tid, struct ath_buf *bf_head, ath_bufhead *bf_cq)
4204 {
4205 	struct ath_tid *atid = &an->an_tid[tid];
4206 	struct ath_buf *bf, *bf_next;
4207 
4208 	ATH_TX_LOCK_ASSERT(sc);
4209 
4210 	/*
4211 	 * Remove this frame from the queue.
4212 	 */
4213 	ATH_TID_REMOVE(atid, bf_head, bf_list);
4214 
4215 	/*
4216 	 * Loop over all the frames in the aggregate.
4217 	 */
4218 	bf = bf_head;
4219 	while (bf != NULL) {
4220 		bf_next = bf->bf_next;	/* next aggregate frame, or NULL */
4221 
4222 		/*
4223 		 * If it's been added to the BAW we need to kick
4224 		 * it out of the BAW before we continue.
4225 		 *
4226 		 * XXX if it's an aggregate, assert that it's in the
4227 		 * BAW - we shouldn't have it be in an aggregate
4228 		 * otherwise!
4229 		 */
4230 		if (bf->bf_state.bfs_addedbaw) {
4231 			ath_tx_update_baw(sc, an, atid, bf);
4232 			bf->bf_state.bfs_dobaw = 0;
4233 		}
4234 
4235 		/*
4236 		 * Give it the default completion handler.
4237 		 */
4238 		bf->bf_comp = ath_tx_normal_comp;
4239 		bf->bf_next = NULL;
4240 
4241 		/*
4242 		 * Add it to the list to free.
4243 		 */
4244 		TAILQ_INSERT_TAIL(bf_cq, bf, bf_list);
4245 
4246 		/*
4247 		 * Now advance to the next frame in the aggregate.
4248 		 */
4249 		bf = bf_next;
4250 	}
4251 }
4252 
4253 /*
4254  * Performs transmit side cleanup when TID changes from aggregated to
4255  * unaggregated and during reassociation.
4256  *
4257  * For now, this just tosses everything from the TID software queue
4258  * whether or not it has been retried and marks the TID as
4259  * pending completion if there's anything for this TID queued to
4260  * the hardware.
4261  *
4262  * The caller is responsible for pausing the TID and unpausing the
4263  * TID if no cleanup was required. Otherwise the cleanup path will
4264  * unpause the TID once the last hardware queued frame is completed.
4265  */
4266 static void
4267 ath_tx_tid_cleanup(struct ath_softc *sc, struct ath_node *an, int tid,
4268     ath_bufhead *bf_cq)
4269 {
4270 	struct ath_tid *atid = &an->an_tid[tid];
4271 	struct ath_buf *bf, *bf_next;
4272 
4273 	ATH_TX_LOCK_ASSERT(sc);
4274 
4275 	DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
4276 	    "%s: TID %d: called; inprogress=%d\n", __func__, tid,
4277 	    atid->cleanup_inprogress);
4278 
4279 	/*
4280 	 * Move the filtered frames to the TX queue, before
4281 	 * we run off and discard/process things.
4282 	 */
4283 
4284 	/* XXX this is really quite inefficient */
4285 	while ((bf = ATH_TID_FILT_LAST(atid, ath_bufhead_s)) != NULL) {
4286 		ATH_TID_FILT_REMOVE(atid, bf, bf_list);
4287 		ATH_TID_INSERT_HEAD(atid, bf, bf_list);
4288 	}
4289 
4290 	/*
4291 	 * Update the frames in the software TX queue:
4292 	 *
4293 	 * + Discard retry frames in the queue
4294 	 * + Fix the completion function to be non-aggregate
4295 	 */
4296 	bf = ATH_TID_FIRST(atid);
4297 	while (bf) {
4298 		/*
4299 		 * Grab the next frame in the list, we may
4300 		 * be fiddling with the list.
4301 		 */
4302 		bf_next = TAILQ_NEXT(bf, bf_list);
4303 
4304 		/*
4305 		 * Free the frame and all subframes.
4306 		 */
4307 		ath_tx_tid_cleanup_frame(sc, an, tid, bf, bf_cq);
4308 
4309 		/*
4310 		 * Next frame!
4311 		 */
4312 		bf = bf_next;
4313 	}
4314 
4315 	/*
4316 	 * If there's anything in the hardware queue we wait
4317 	 * for the TID HWQ to empty.
4318 	 */
4319 	if (atid->hwq_depth > 0) {
4320 		/*
4321 		 * XXX how about we kill atid->incomp, and instead
4322 		 * replace it with a macro that checks that atid->hwq_depth
4323 		 * is 0?
4324 		 */
4325 		atid->incomp = atid->hwq_depth;
4326 		atid->cleanup_inprogress = 1;
4327 	}
4328 
4329 	if (atid->cleanup_inprogress)
4330 		DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
4331 		    "%s: TID %d: cleanup needed: %d packets\n",
4332 		    __func__, tid, atid->incomp);
4333 
4334 	/* Owner now must free completed frames */
4335 }
4336 
4337 static struct ath_buf *
4338 ath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an,
4339     struct ath_tid *tid, struct ath_buf *bf)
4340 {
4341 	struct ath_buf *nbf;
4342 	int error;
4343 
4344 	/*
4345 	 * Clone the buffer.  This will handle the dma unmap and
4346 	 * copy the node reference to the new buffer.  If this
4347 	 * works out, 'bf' will have no DMA mapping, no mbuf
4348 	 * pointer and no node reference.
4349 	 */
4350 	nbf = ath_buf_clone(sc, bf);
4351 
4352 #if 0
4353 	DPRINTF(sc, ATH_DEBUG_XMIT, "%s: ATH_BUF_BUSY; cloning\n",
4354 	    __func__);
4355 #endif
4356 
4357 	if (nbf == NULL) {
4358 		/* Failed to clone */
4359 		DPRINTF(sc, ATH_DEBUG_XMIT,
4360 		    "%s: failed to clone a busy buffer\n",
4361 		    __func__);
4362 		return NULL;
4363 	}
4364 
4365 	/* Setup the dma for the new buffer */
4366 	error = ath_tx_dmasetup(sc, nbf, nbf->bf_m);
4367 	if (error != 0) {
4368 		DPRINTF(sc, ATH_DEBUG_XMIT,
4369 		    "%s: failed to setup dma for clone\n",
4370 		    __func__);
4371 		/*
4372 		 * Put this at the head of the list, not tail;
4373 		 * that way it doesn't interfere with the
4374 		 * busy buffer logic (which uses the tail of
4375 		 * the list.)
4376 		 */
4377 		ATH_TXBUF_LOCK(sc);
4378 		ath_returnbuf_head(sc, nbf);
4379 		ATH_TXBUF_UNLOCK(sc);
4380 		return NULL;
4381 	}
4382 
4383 	/* Update BAW if required, before we free the original buf */
4384 	if (bf->bf_state.bfs_dobaw)
4385 		ath_tx_switch_baw_buf(sc, an, tid, bf, nbf);
4386 
4387 	/* Free original buffer; return new buffer */
4388 	ath_freebuf(sc, bf);
4389 
4390 	return nbf;
4391 }
4392 
4393 /*
4394  * Handle retrying an unaggregate frame in an aggregate
4395  * session.
4396  *
4397  * If too many retries occur, pause the TID, wait for
4398  * any further retransmits (as there's no reason why
4399  * non-aggregate frames in an aggregate session are
4400  * transmitted in-order; they just have to be in-BAW)
4401  * and then queue a BAR.
4402  */
4403 static void
4404 ath_tx_aggr_retry_unaggr(struct ath_softc *sc, struct ath_buf *bf)
4405 {
4406 	struct ieee80211_node *ni = bf->bf_node;
4407 	struct ath_node *an = ATH_NODE(ni);
4408 	int tid = bf->bf_state.bfs_tid;
4409 	struct ath_tid *atid = &an->an_tid[tid];
4410 	struct ieee80211_tx_ampdu *tap;
4411 
4412 	ATH_TX_LOCK(sc);
4413 
4414 	tap = ath_tx_get_tx_tid(an, tid);
4415 
4416 	/*
4417 	 * If the buffer is marked as busy, we can't directly
4418 	 * reuse it. Instead, try to clone the buffer.
4419 	 * If the clone is successful, recycle the old buffer.
4420 	 * If the clone is unsuccessful, set bfs_retries to max
4421 	 * to force the next bit of code to free the buffer
4422 	 * for us.
4423 	 */
4424 	if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) &&
4425 	    (bf->bf_flags & ATH_BUF_BUSY)) {
4426 		struct ath_buf *nbf;
4427 		nbf = ath_tx_retry_clone(sc, an, atid, bf);
4428 		if (nbf)
4429 			/* bf has been freed at this point */
4430 			bf = nbf;
4431 		else
4432 			bf->bf_state.bfs_retries = SWMAX_RETRIES + 1;
4433 	}
4434 
4435 	if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) {
4436 		DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES,
4437 		    "%s: exceeded retries; seqno %d\n",
4438 		    __func__, SEQNO(bf->bf_state.bfs_seqno));
4439 		sc->sc_stats.ast_tx_swretrymax++;
4440 
4441 		/* Update BAW anyway */
4442 		if (bf->bf_state.bfs_dobaw) {
4443 			ath_tx_update_baw(sc, an, atid, bf);
4444 			if (! bf->bf_state.bfs_addedbaw)
4445 				DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
4446 				    "%s: wasn't added: seqno %d\n",
4447 				    __func__, SEQNO(bf->bf_state.bfs_seqno));
4448 		}
4449 		bf->bf_state.bfs_dobaw = 0;
4450 
4451 		/* Suspend the TX queue and get ready to send the BAR */
4452 		ath_tx_tid_bar_suspend(sc, atid);
4453 
4454 		/* Send the BAR if there are no other frames waiting */
4455 		if (ath_tx_tid_bar_tx_ready(sc, atid))
4456 			ath_tx_tid_bar_tx(sc, atid);
4457 
4458 		ATH_TX_UNLOCK(sc);
4459 
4460 		/* Free buffer, bf is free after this call */
4461 		ath_tx_default_comp(sc, bf, 0);
4462 		return;
4463 	}
4464 
4465 	/*
4466 	 * This increments the retry counter as well as
4467 	 * sets the retry flag in the ath_buf and packet
4468 	 * body.
4469 	 */
4470 	ath_tx_set_retry(sc, bf);
4471 	sc->sc_stats.ast_tx_swretries++;
4472 
4473 	/*
4474 	 * Insert this at the head of the queue, so it's
4475 	 * retried before any current/subsequent frames.
4476 	 */
4477 	ATH_TID_INSERT_HEAD(atid, bf, bf_list);
4478 	ath_tx_tid_sched(sc, atid);
4479 	/* Send the BAR if there are no other frames waiting */
4480 	if (ath_tx_tid_bar_tx_ready(sc, atid))
4481 		ath_tx_tid_bar_tx(sc, atid);
4482 
4483 	ATH_TX_UNLOCK(sc);
4484 }
4485 
4486 /*
4487  * Common code for aggregate excessive retry/subframe retry.
4488  * If retrying, queues buffers to bf_q. If not, frees the
4489  * buffers.
4490  *
4491  * XXX should unify this with ath_tx_aggr_retry_unaggr()
4492  */
4493 static int
4494 ath_tx_retry_subframe(struct ath_softc *sc, struct ath_buf *bf,
4495     ath_bufhead *bf_q)
4496 {
4497 	struct ieee80211_node *ni = bf->bf_node;
4498 	struct ath_node *an = ATH_NODE(ni);
4499 	int tid = bf->bf_state.bfs_tid;
4500 	struct ath_tid *atid = &an->an_tid[tid];
4501 
4502 	ATH_TX_LOCK_ASSERT(sc);
4503 
4504 	/* XXX clr11naggr should be done for all subframes */
4505 	ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc);
4506 	ath_hal_set11nburstduration(sc->sc_ah, bf->bf_desc, 0);
4507 
4508 	/* ath_hal_set11n_virtualmorefrag(sc->sc_ah, bf->bf_desc, 0); */
4509 
4510 	/*
4511 	 * If the buffer is marked as busy, we can't directly
4512 	 * reuse it. Instead, try to clone the buffer.
4513 	 * If the clone is successful, recycle the old buffer.
4514 	 * If the clone is unsuccessful, set bfs_retries to max
4515 	 * to force the next bit of code to free the buffer
4516 	 * for us.
4517 	 */
4518 	if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) &&
4519 	    (bf->bf_flags & ATH_BUF_BUSY)) {
4520 		struct ath_buf *nbf;
4521 		nbf = ath_tx_retry_clone(sc, an, atid, bf);
4522 		if (nbf)
4523 			/* bf has been freed at this point */
4524 			bf = nbf;
4525 		else
4526 			bf->bf_state.bfs_retries = SWMAX_RETRIES + 1;
4527 	}
4528 
4529 	if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) {
4530 		sc->sc_stats.ast_tx_swretrymax++;
4531 		DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES,
4532 		    "%s: max retries: seqno %d\n",
4533 		    __func__, SEQNO(bf->bf_state.bfs_seqno));
4534 		ath_tx_update_baw(sc, an, atid, bf);
4535 		if (!bf->bf_state.bfs_addedbaw)
4536 			DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
4537 			    "%s: wasn't added: seqno %d\n",
4538 			    __func__, SEQNO(bf->bf_state.bfs_seqno));
4539 		bf->bf_state.bfs_dobaw = 0;
4540 		return 1;
4541 	}
4542 
4543 	ath_tx_set_retry(sc, bf);
4544 	sc->sc_stats.ast_tx_swretries++;
4545 	bf->bf_next = NULL;		/* Just to make sure */
4546 
4547 	/* Clear the aggregate state */
4548 	bf->bf_state.bfs_aggr = 0;
4549 	bf->bf_state.bfs_ndelim = 0;	/* ??? needed? */
4550 	bf->bf_state.bfs_nframes = 1;
4551 
4552 	TAILQ_INSERT_TAIL(bf_q, bf, bf_list);
4553 	return 0;
4554 }
4555 
4556 /*
4557  * error pkt completion for an aggregate destination
4558  */
4559 static void
4560 ath_tx_comp_aggr_error(struct ath_softc *sc, struct ath_buf *bf_first,
4561     struct ath_tid *tid)
4562 {
4563 	struct ieee80211_node *ni = bf_first->bf_node;
4564 	struct ath_node *an = ATH_NODE(ni);
4565 	struct ath_buf *bf_next, *bf;
4566 	ath_bufhead bf_q;
4567 	int drops = 0;
4568 	struct ieee80211_tx_ampdu *tap;
4569 	ath_bufhead bf_cq;
4570 
4571 	TAILQ_INIT(&bf_q);
4572 	TAILQ_INIT(&bf_cq);
4573 
4574 	/*
4575 	 * Update rate control - all frames have failed.
4576 	 *
4577 	 * XXX use the length in the first frame in the series;
4578 	 * XXX just so things are consistent for now.
4579 	 */
4580 	ath_tx_update_ratectrl(sc, ni, bf_first->bf_state.bfs_rc,
4581 	    &bf_first->bf_status.ds_txstat,
4582 	    bf_first->bf_state.bfs_pktlen,
4583 	    bf_first->bf_state.bfs_nframes, bf_first->bf_state.bfs_nframes);
4584 
4585 	ATH_TX_LOCK(sc);
4586 	tap = ath_tx_get_tx_tid(an, tid->tid);
4587 	sc->sc_stats.ast_tx_aggr_failall++;
4588 
4589 	/* Retry all subframes */
4590 	bf = bf_first;
4591 	while (bf) {
4592 		bf_next = bf->bf_next;
4593 		bf->bf_next = NULL;	/* Remove it from the aggr list */
4594 		sc->sc_stats.ast_tx_aggr_fail++;
4595 		if (ath_tx_retry_subframe(sc, bf, &bf_q)) {
4596 			drops++;
4597 			bf->bf_next = NULL;
4598 			TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list);
4599 		}
4600 		bf = bf_next;
4601 	}
4602 
4603 	/* Prepend all frames to the beginning of the queue */
4604 	while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) {
4605 		TAILQ_REMOVE(&bf_q, bf, bf_list);
4606 		ATH_TID_INSERT_HEAD(tid, bf, bf_list);
4607 	}
4608 
4609 	/*
4610 	 * Schedule the TID to be re-tried.
4611 	 */
4612 	ath_tx_tid_sched(sc, tid);
4613 
4614 	/*
4615 	 * send bar if we dropped any frames
4616 	 *
4617 	 * Keep the txq lock held for now, as we need to ensure
4618 	 * that ni_txseqs[] is consistent (as it's being updated
4619 	 * in the ifnet TX context or raw TX context.)
4620 	 */
4621 	if (drops) {
4622 		/* Suspend the TX queue and get ready to send the BAR */
4623 		ath_tx_tid_bar_suspend(sc, tid);
4624 	}
4625 
4626 	/*
4627 	 * Send BAR if required
4628 	 */
4629 	if (ath_tx_tid_bar_tx_ready(sc, tid))
4630 		ath_tx_tid_bar_tx(sc, tid);
4631 
4632 	ATH_TX_UNLOCK(sc);
4633 
4634 	/* Complete frames which errored out */
4635 	while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
4636 		TAILQ_REMOVE(&bf_cq, bf, bf_list);
4637 		ath_tx_default_comp(sc, bf, 0);
4638 	}
4639 }
4640 
4641 /*
4642  * Handle clean-up of packets from an aggregate list.
4643  *
4644  * There's no need to update the BAW here - the session is being
4645  * torn down.
4646  */
4647 static void
4648 ath_tx_comp_cleanup_aggr(struct ath_softc *sc, struct ath_buf *bf_first)
4649 {
4650 	struct ath_buf *bf, *bf_next;
4651 	struct ieee80211_node *ni = bf_first->bf_node;
4652 	struct ath_node *an = ATH_NODE(ni);
4653 	int tid = bf_first->bf_state.bfs_tid;
4654 	struct ath_tid *atid = &an->an_tid[tid];
4655 
4656 	ATH_TX_LOCK(sc);
4657 
4658 	/* update incomp */
4659 	atid->incomp--;
4660 
4661 	/* Update the BAW */
4662 	bf = bf_first;
4663 	while (bf) {
4664 		/* XXX refactor! */
4665 		if (bf->bf_state.bfs_dobaw) {
4666 			ath_tx_update_baw(sc, an, atid, bf);
4667 			if (!bf->bf_state.bfs_addedbaw)
4668 				DPRINTF(sc, ATH_DEBUG_SW_TX,
4669 				    "%s: wasn't added: seqno %d\n",
4670 				    __func__, SEQNO(bf->bf_state.bfs_seqno));
4671 		}
4672 		bf = bf->bf_next;
4673 	}
4674 
4675 	if (atid->incomp == 0) {
4676 		DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
4677 		    "%s: TID %d: cleaned up! resume!\n",
4678 		    __func__, tid);
4679 		atid->cleanup_inprogress = 0;
4680 		ath_tx_tid_resume(sc, atid);
4681 	}
4682 
4683 	/* Send BAR if required */
4684 	/* XXX why would we send a BAR when transitioning to non-aggregation? */
4685 	/*
4686 	 * XXX TODO: we should likely just tear down the BAR state here,
4687 	 * rather than sending a BAR.
4688 	 */
4689 	if (ath_tx_tid_bar_tx_ready(sc, atid))
4690 		ath_tx_tid_bar_tx(sc, atid);
4691 
4692 	ATH_TX_UNLOCK(sc);
4693 
4694 	/* Handle frame completion as individual frames */
4695 	bf = bf_first;
4696 	while (bf) {
4697 		bf_next = bf->bf_next;
4698 		bf->bf_next = NULL;
4699 		ath_tx_default_comp(sc, bf, 1);
4700 		bf = bf_next;
4701 	}
4702 }
4703 
4704 /*
4705  * Handle completion of an set of aggregate frames.
4706  *
4707  * Note: the completion handler is the last descriptor in the aggregate,
4708  * not the last descriptor in the first frame.
4709  */
4710 static void
4711 ath_tx_aggr_comp_aggr(struct ath_softc *sc, struct ath_buf *bf_first,
4712     int fail)
4713 {
4714 	//struct ath_desc *ds = bf->bf_lastds;
4715 	struct ieee80211_node *ni = bf_first->bf_node;
4716 	struct ath_node *an = ATH_NODE(ni);
4717 	int tid = bf_first->bf_state.bfs_tid;
4718 	struct ath_tid *atid = &an->an_tid[tid];
4719 	struct ath_tx_status ts;
4720 	struct ieee80211_tx_ampdu *tap;
4721 	ath_bufhead bf_q;
4722 	ath_bufhead bf_cq;
4723 	int seq_st, tx_ok;
4724 	int hasba, isaggr;
4725 	uint32_t ba[2];
4726 	struct ath_buf *bf, *bf_next;
4727 	int ba_index;
4728 	int drops = 0;
4729 	int nframes = 0, nbad = 0, nf;
4730 	int pktlen;
4731 	/* XXX there's too much on the stack? */
4732 	struct ath_rc_series rc[ATH_RC_NUM];
4733 	int txseq;
4734 
4735 	DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: called; hwq_depth=%d\n",
4736 	    __func__, atid->hwq_depth);
4737 
4738 	/*
4739 	 * Take a copy; this may be needed -after- bf_first
4740 	 * has been completed and freed.
4741 	 */
4742 	ts = bf_first->bf_status.ds_txstat;
4743 
4744 	TAILQ_INIT(&bf_q);
4745 	TAILQ_INIT(&bf_cq);
4746 
4747 	/* The TID state is kept behind the TXQ lock */
4748 	ATH_TX_LOCK(sc);
4749 
4750 	atid->hwq_depth--;
4751 	if (atid->hwq_depth < 0)
4752 		DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: hwq_depth < 0: %d\n",
4753 		    __func__, atid->hwq_depth);
4754 
4755 	/*
4756 	 * If the TID is filtered, handle completing the filter
4757 	 * transition before potentially kicking it to the cleanup
4758 	 * function.
4759 	 *
4760 	 * XXX this is duplicate work, ew.
4761 	 */
4762 	if (atid->isfiltered)
4763 		ath_tx_tid_filt_comp_complete(sc, atid);
4764 
4765 	/*
4766 	 * Punt cleanup to the relevant function, not our problem now
4767 	 */
4768 	if (atid->cleanup_inprogress) {
4769 		if (atid->isfiltered)
4770 			DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4771 			    "%s: isfiltered=1, normal_comp?\n",
4772 			    __func__);
4773 		ATH_TX_UNLOCK(sc);
4774 		ath_tx_comp_cleanup_aggr(sc, bf_first);
4775 		return;
4776 	}
4777 
4778 	/*
4779 	 * If the frame is filtered, transition to filtered frame
4780 	 * mode and add this to the filtered frame list.
4781 	 *
4782 	 * XXX TODO: figure out how this interoperates with
4783 	 * BAR, pause and cleanup states.
4784 	 */
4785 	if ((ts.ts_status & HAL_TXERR_FILT) ||
4786 	    (ts.ts_status != 0 && atid->isfiltered)) {
4787 		if (fail != 0)
4788 			DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4789 			    "%s: isfiltered=1, fail=%d\n", __func__, fail);
4790 		ath_tx_tid_filt_comp_aggr(sc, atid, bf_first, &bf_cq);
4791 
4792 		/* Remove from BAW */
4793 		TAILQ_FOREACH_SAFE(bf, &bf_cq, bf_list, bf_next) {
4794 			if (bf->bf_state.bfs_addedbaw)
4795 				drops++;
4796 			if (bf->bf_state.bfs_dobaw) {
4797 				ath_tx_update_baw(sc, an, atid, bf);
4798 				if (!bf->bf_state.bfs_addedbaw)
4799 					DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4800 					    "%s: wasn't added: seqno %d\n",
4801 					    __func__,
4802 					    SEQNO(bf->bf_state.bfs_seqno));
4803 			}
4804 			bf->bf_state.bfs_dobaw = 0;
4805 		}
4806 		/*
4807 		 * If any intermediate frames in the BAW were dropped when
4808 		 * handling filtering things, send a BAR.
4809 		 */
4810 		if (drops)
4811 			ath_tx_tid_bar_suspend(sc, atid);
4812 
4813 		/*
4814 		 * Finish up by sending a BAR if required and freeing
4815 		 * the frames outside of the TX lock.
4816 		 */
4817 		goto finish_send_bar;
4818 	}
4819 
4820 	/*
4821 	 * XXX for now, use the first frame in the aggregate for
4822 	 * XXX rate control completion; it's at least consistent.
4823 	 */
4824 	pktlen = bf_first->bf_state.bfs_pktlen;
4825 
4826 	/*
4827 	 * Handle errors first!
4828 	 *
4829 	 * Here, handle _any_ error as a "exceeded retries" error.
4830 	 * Later on (when filtered frames are to be specially handled)
4831 	 * it'll have to be expanded.
4832 	 */
4833 #if 0
4834 	if (ts.ts_status & HAL_TXERR_XRETRY) {
4835 #endif
4836 	if (ts.ts_status != 0) {
4837 		ATH_TX_UNLOCK(sc);
4838 		ath_tx_comp_aggr_error(sc, bf_first, atid);
4839 		return;
4840 	}
4841 
4842 	tap = ath_tx_get_tx_tid(an, tid);
4843 
4844 	/*
4845 	 * extract starting sequence and block-ack bitmap
4846 	 */
4847 	/* XXX endian-ness of seq_st, ba? */
4848 	seq_st = ts.ts_seqnum;
4849 	hasba = !! (ts.ts_flags & HAL_TX_BA);
4850 	tx_ok = (ts.ts_status == 0);
4851 	isaggr = bf_first->bf_state.bfs_aggr;
4852 	ba[0] = ts.ts_ba_low;
4853 	ba[1] = ts.ts_ba_high;
4854 
4855 	/*
4856 	 * Copy the TX completion status and the rate control
4857 	 * series from the first descriptor, as it may be freed
4858 	 * before the rate control code can get its grubby fingers
4859 	 * into things.
4860 	 */
4861 	memcpy(rc, bf_first->bf_state.bfs_rc, sizeof(rc));
4862 
4863 	DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4864 	    "%s: txa_start=%d, tx_ok=%d, status=%.8x, flags=%.8x, "
4865 	    "isaggr=%d, seq_st=%d, hasba=%d, ba=%.8x, %.8x\n",
4866 	    __func__, tap->txa_start, tx_ok, ts.ts_status, ts.ts_flags,
4867 	    isaggr, seq_st, hasba, ba[0], ba[1]);
4868 
4869 	/*
4870 	 * The reference driver doesn't do this; it simply ignores
4871 	 * this check in its entirety.
4872 	 *
4873 	 * I've seen this occur when using iperf to send traffic
4874 	 * out tid 1 - the aggregate frames are all marked as TID 1,
4875 	 * but the TXSTATUS has TID=0.  So, let's just ignore this
4876 	 * check.
4877 	 */
4878 #if 0
4879 	/* Occasionally, the MAC sends a tx status for the wrong TID. */
4880 	if (tid != ts.ts_tid) {
4881 		DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: tid %d != hw tid %d\n",
4882 		    __func__, tid, ts.ts_tid);
4883 		tx_ok = 0;
4884 	}
4885 #endif
4886 
4887 	/* AR5416 BA bug; this requires an interface reset */
4888 	if (isaggr && tx_ok && (! hasba)) {
4889 		device_printf(sc->sc_dev,
4890 		    "%s: AR5416 bug: hasba=%d; txok=%d, isaggr=%d, "
4891 		    "seq_st=%d\n",
4892 		    __func__, hasba, tx_ok, isaggr, seq_st);
4893 		/* XXX TODO: schedule an interface reset */
4894 #ifdef ATH_DEBUG
4895 		ath_printtxbuf(sc, bf_first,
4896 		    sc->sc_ac2q[atid->ac]->axq_qnum, 0, 0);
4897 #endif
4898 	}
4899 
4900 	/*
4901 	 * Walk the list of frames, figure out which ones were correctly
4902 	 * sent and which weren't.
4903 	 */
4904 	bf = bf_first;
4905 	nf = bf_first->bf_state.bfs_nframes;
4906 
4907 	/* bf_first is going to be invalid once this list is walked */
4908 	bf_first = NULL;
4909 
4910 	/*
4911 	 * Walk the list of completed frames and determine
4912 	 * which need to be completed and which need to be
4913 	 * retransmitted.
4914 	 *
4915 	 * For completed frames, the completion functions need
4916 	 * to be called at the end of this function as the last
4917 	 * node reference may free the node.
4918 	 *
4919 	 * Finally, since the TXQ lock can't be held during the
4920 	 * completion callback (to avoid lock recursion),
4921 	 * the completion calls have to be done outside of the
4922 	 * lock.
4923 	 */
4924 	while (bf) {
4925 		nframes++;
4926 		ba_index = ATH_BA_INDEX(seq_st,
4927 		    SEQNO(bf->bf_state.bfs_seqno));
4928 		bf_next = bf->bf_next;
4929 		bf->bf_next = NULL;	/* Remove it from the aggr list */
4930 
4931 		DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4932 		    "%s: checking bf=%p seqno=%d; ack=%d\n",
4933 		    __func__, bf, SEQNO(bf->bf_state.bfs_seqno),
4934 		    ATH_BA_ISSET(ba, ba_index));
4935 
4936 		if (tx_ok && ATH_BA_ISSET(ba, ba_index)) {
4937 			sc->sc_stats.ast_tx_aggr_ok++;
4938 			ath_tx_update_baw(sc, an, atid, bf);
4939 			bf->bf_state.bfs_dobaw = 0;
4940 			if (!bf->bf_state.bfs_addedbaw)
4941 				DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4942 				    "%s: wasn't added: seqno %d\n",
4943 				    __func__, SEQNO(bf->bf_state.bfs_seqno));
4944 			bf->bf_next = NULL;
4945 			TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list);
4946 		} else {
4947 			sc->sc_stats.ast_tx_aggr_fail++;
4948 			if (ath_tx_retry_subframe(sc, bf, &bf_q)) {
4949 				drops++;
4950 				bf->bf_next = NULL;
4951 				TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list);
4952 			}
4953 			nbad++;
4954 		}
4955 		bf = bf_next;
4956 	}
4957 
4958 	/*
4959 	 * Now that the BAW updates have been done, unlock
4960 	 *
4961 	 * txseq is grabbed before the lock is released so we
4962 	 * have a consistent view of what -was- in the BAW.
4963 	 * Anything after this point will not yet have been
4964 	 * TXed.
4965 	 */
4966 	txseq = tap->txa_start;
4967 	ATH_TX_UNLOCK(sc);
4968 
4969 	if (nframes != nf)
4970 		DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4971 		    "%s: num frames seen=%d; bf nframes=%d\n",
4972 		    __func__, nframes, nf);
4973 
4974 	/*
4975 	 * Now we know how many frames were bad, call the rate
4976 	 * control code.
4977 	 */
4978 	if (fail == 0)
4979 		ath_tx_update_ratectrl(sc, ni, rc, &ts, pktlen, nframes,
4980 		    nbad);
4981 
4982 	/*
4983 	 * send bar if we dropped any frames
4984 	 */
4985 	if (drops) {
4986 		/* Suspend the TX queue and get ready to send the BAR */
4987 		ATH_TX_LOCK(sc);
4988 		ath_tx_tid_bar_suspend(sc, atid);
4989 		ATH_TX_UNLOCK(sc);
4990 	}
4991 
4992 	DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4993 	    "%s: txa_start now %d\n", __func__, tap->txa_start);
4994 
4995 	ATH_TX_LOCK(sc);
4996 
4997 	/* Prepend all frames to the beginning of the queue */
4998 	while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) {
4999 		TAILQ_REMOVE(&bf_q, bf, bf_list);
5000 		ATH_TID_INSERT_HEAD(atid, bf, bf_list);
5001 	}
5002 
5003 	/*
5004 	 * Reschedule to grab some further frames.
5005 	 */
5006 	ath_tx_tid_sched(sc, atid);
5007 
5008 	/*
5009 	 * If the queue is filtered, re-schedule as required.
5010 	 *
5011 	 * This is required as there may be a subsequent TX descriptor
5012 	 * for this end-node that has CLRDMASK set, so it's quite possible
5013 	 * that a filtered frame will be followed by a non-filtered
5014 	 * (complete or otherwise) frame.
5015 	 *
5016 	 * XXX should we do this before we complete the frame?
5017 	 */
5018 	if (atid->isfiltered)
5019 		ath_tx_tid_filt_comp_complete(sc, atid);
5020 
5021 finish_send_bar:
5022 
5023 	/*
5024 	 * Send BAR if required
5025 	 */
5026 	if (ath_tx_tid_bar_tx_ready(sc, atid))
5027 		ath_tx_tid_bar_tx(sc, atid);
5028 
5029 	ATH_TX_UNLOCK(sc);
5030 
5031 	/* Do deferred completion */
5032 	while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
5033 		TAILQ_REMOVE(&bf_cq, bf, bf_list);
5034 		ath_tx_default_comp(sc, bf, 0);
5035 	}
5036 }
5037 
5038 /*
5039  * Handle completion of unaggregated frames in an ADDBA
5040  * session.
5041  *
5042  * Fail is set to 1 if the entry is being freed via a call to
5043  * ath_tx_draintxq().
5044  */
5045 static void
5046 ath_tx_aggr_comp_unaggr(struct ath_softc *sc, struct ath_buf *bf, int fail)
5047 {
5048 	struct ieee80211_node *ni = bf->bf_node;
5049 	struct ath_node *an = ATH_NODE(ni);
5050 	int tid = bf->bf_state.bfs_tid;
5051 	struct ath_tid *atid = &an->an_tid[tid];
5052 	struct ath_tx_status ts;
5053 	int drops = 0;
5054 
5055 	/*
5056 	 * Take a copy of this; filtering/cloning the frame may free the
5057 	 * bf pointer.
5058 	 */
5059 	ts = bf->bf_status.ds_txstat;
5060 
5061 	/*
5062 	 * Update rate control status here, before we possibly
5063 	 * punt to retry or cleanup.
5064 	 *
5065 	 * Do it outside of the TXQ lock.
5066 	 */
5067 	if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0))
5068 		ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc,
5069 		    &bf->bf_status.ds_txstat,
5070 		    bf->bf_state.bfs_pktlen,
5071 		    1, (ts.ts_status == 0) ? 0 : 1);
5072 
5073 	/*
5074 	 * This is called early so atid->hwq_depth can be tracked.
5075 	 * This unfortunately means that it's released and regrabbed
5076 	 * during retry and cleanup. That's rather inefficient.
5077 	 */
5078 	ATH_TX_LOCK(sc);
5079 
5080 	if (tid == IEEE80211_NONQOS_TID)
5081 		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=16!\n", __func__);
5082 
5083 	DPRINTF(sc, ATH_DEBUG_SW_TX,
5084 	    "%s: bf=%p: tid=%d, hwq_depth=%d, seqno=%d\n",
5085 	    __func__, bf, bf->bf_state.bfs_tid, atid->hwq_depth,
5086 	    SEQNO(bf->bf_state.bfs_seqno));
5087 
5088 	atid->hwq_depth--;
5089 	if (atid->hwq_depth < 0)
5090 		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: hwq_depth < 0: %d\n",
5091 		    __func__, atid->hwq_depth);
5092 
5093 	/*
5094 	 * If the TID is filtered, handle completing the filter
5095 	 * transition before potentially kicking it to the cleanup
5096 	 * function.
5097 	 */
5098 	if (atid->isfiltered)
5099 		ath_tx_tid_filt_comp_complete(sc, atid);
5100 
5101 	/*
5102 	 * If a cleanup is in progress, punt to comp_cleanup;
5103 	 * rather than handling it here. It's thus their
5104 	 * responsibility to clean up, call the completion
5105 	 * function in net80211, etc.
5106 	 */
5107 	if (atid->cleanup_inprogress) {
5108 		if (atid->isfiltered)
5109 			DPRINTF(sc, ATH_DEBUG_SW_TX,
5110 			    "%s: isfiltered=1, normal_comp?\n",
5111 			    __func__);
5112 		ATH_TX_UNLOCK(sc);
5113 		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: cleanup_unaggr\n",
5114 		    __func__);
5115 		ath_tx_comp_cleanup_unaggr(sc, bf);
5116 		return;
5117 	}
5118 
5119 	/*
5120 	 * XXX TODO: how does cleanup, BAR and filtered frame handling
5121 	 * overlap?
5122 	 *
5123 	 * If the frame is filtered OR if it's any failure but
5124 	 * the TID is filtered, the frame must be added to the
5125 	 * filtered frame list.
5126 	 *
5127 	 * However - a busy buffer can't be added to the filtered
5128 	 * list as it will end up being recycled without having
5129 	 * been made available for the hardware.
5130 	 */
5131 	if ((ts.ts_status & HAL_TXERR_FILT) ||
5132 	    (ts.ts_status != 0 && atid->isfiltered)) {
5133 		int freeframe;
5134 
5135 		if (fail != 0)
5136 			DPRINTF(sc, ATH_DEBUG_SW_TX,
5137 			    "%s: isfiltered=1, fail=%d\n",
5138 			    __func__, fail);
5139 		freeframe = ath_tx_tid_filt_comp_single(sc, atid, bf);
5140 		/*
5141 		 * If freeframe=0 then bf is no longer ours; don't
5142 		 * touch it.
5143 		 */
5144 		if (freeframe) {
5145 			/* Remove from BAW */
5146 			if (bf->bf_state.bfs_addedbaw)
5147 				drops++;
5148 			if (bf->bf_state.bfs_dobaw) {
5149 				ath_tx_update_baw(sc, an, atid, bf);
5150 				if (!bf->bf_state.bfs_addedbaw)
5151 					DPRINTF(sc, ATH_DEBUG_SW_TX,
5152 					    "%s: wasn't added: seqno %d\n",
5153 					    __func__, SEQNO(bf->bf_state.bfs_seqno));
5154 			}
5155 			bf->bf_state.bfs_dobaw = 0;
5156 		}
5157 
5158 		/*
5159 		 * If the frame couldn't be filtered, treat it as a drop and
5160 		 * prepare to send a BAR.
5161 		 */
5162 		if (freeframe && drops)
5163 			ath_tx_tid_bar_suspend(sc, atid);
5164 
5165 		/*
5166 		 * Send BAR if required
5167 		 */
5168 		if (ath_tx_tid_bar_tx_ready(sc, atid))
5169 			ath_tx_tid_bar_tx(sc, atid);
5170 
5171 		ATH_TX_UNLOCK(sc);
5172 		/*
5173 		 * If freeframe is set, then the frame couldn't be
5174 		 * cloned and bf is still valid.  Just complete/free it.
5175 		 */
5176 		if (freeframe)
5177 			ath_tx_default_comp(sc, bf, fail);
5178 
5179 		return;
5180 	}
5181 	/*
5182 	 * Don't bother with the retry check if all frames
5183 	 * are being failed (eg during queue deletion.)
5184 	 */
5185 #if 0
5186 	if (fail == 0 && ts->ts_status & HAL_TXERR_XRETRY) {
5187 #endif
5188 	if (fail == 0 && ts.ts_status != 0) {
5189 		ATH_TX_UNLOCK(sc);
5190 		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: retry_unaggr\n",
5191 		    __func__);
5192 		ath_tx_aggr_retry_unaggr(sc, bf);
5193 		return;
5194 	}
5195 
5196 	/* Success? Complete */
5197 	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=%d, seqno %d\n",
5198 	    __func__, tid, SEQNO(bf->bf_state.bfs_seqno));
5199 	if (bf->bf_state.bfs_dobaw) {
5200 		ath_tx_update_baw(sc, an, atid, bf);
5201 		bf->bf_state.bfs_dobaw = 0;
5202 		if (!bf->bf_state.bfs_addedbaw)
5203 			DPRINTF(sc, ATH_DEBUG_SW_TX,
5204 			    "%s: wasn't added: seqno %d\n",
5205 			    __func__, SEQNO(bf->bf_state.bfs_seqno));
5206 	}
5207 
5208 	/*
5209 	 * If the queue is filtered, re-schedule as required.
5210 	 *
5211 	 * This is required as there may be a subsequent TX descriptor
5212 	 * for this end-node that has CLRDMASK set, so it's quite possible
5213 	 * that a filtered frame will be followed by a non-filtered
5214 	 * (complete or otherwise) frame.
5215 	 *
5216 	 * XXX should we do this before we complete the frame?
5217 	 */
5218 	if (atid->isfiltered)
5219 		ath_tx_tid_filt_comp_complete(sc, atid);
5220 
5221 	/*
5222 	 * Send BAR if required
5223 	 */
5224 	if (ath_tx_tid_bar_tx_ready(sc, atid))
5225 		ath_tx_tid_bar_tx(sc, atid);
5226 
5227 	ATH_TX_UNLOCK(sc);
5228 
5229 	ath_tx_default_comp(sc, bf, fail);
5230 	/* bf is freed at this point */
5231 }
5232 
5233 void
5234 ath_tx_aggr_comp(struct ath_softc *sc, struct ath_buf *bf, int fail)
5235 {
5236 	if (bf->bf_state.bfs_aggr)
5237 		ath_tx_aggr_comp_aggr(sc, bf, fail);
5238 	else
5239 		ath_tx_aggr_comp_unaggr(sc, bf, fail);
5240 }
5241 
5242 /*
5243  * Schedule some packets from the given node/TID to the hardware.
5244  *
5245  * This is the aggregate version.
5246  */
5247 void
5248 ath_tx_tid_hw_queue_aggr(struct ath_softc *sc, struct ath_node *an,
5249     struct ath_tid *tid)
5250 {
5251 	struct ath_buf *bf;
5252 	struct ath_txq *txq = sc->sc_ac2q[tid->ac];
5253 	struct ieee80211_tx_ampdu *tap;
5254 	ATH_AGGR_STATUS status;
5255 	ath_bufhead bf_q;
5256 
5257 	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d\n", __func__, tid->tid);
5258 	ATH_TX_LOCK_ASSERT(sc);
5259 
5260 	/*
5261 	 * XXX TODO: If we're called for a queue that we're leaking frames to,
5262 	 * ensure we only leak one.
5263 	 */
5264 
5265 	tap = ath_tx_get_tx_tid(an, tid->tid);
5266 
5267 	if (tid->tid == IEEE80211_NONQOS_TID)
5268 		DPRINTF(sc, ATH_DEBUG_SW_TX,
5269 		    "%s: called for TID=NONQOS_TID?\n", __func__);
5270 
5271 	for (;;) {
5272 		status = ATH_AGGR_DONE;
5273 
5274 		/*
5275 		 * If the upper layer has paused the TID, don't
5276 		 * queue any further packets.
5277 		 *
5278 		 * This can also occur from the completion task because
5279 		 * of packet loss; but as its serialised with this code,
5280 		 * it won't "appear" half way through queuing packets.
5281 		 */
5282 		if (! ath_tx_tid_can_tx_or_sched(sc, tid))
5283 			break;
5284 
5285 		bf = ATH_TID_FIRST(tid);
5286 		if (bf == NULL) {
5287 			break;
5288 		}
5289 
5290 		/*
5291 		 * If the packet doesn't fall within the BAW (eg a NULL
5292 		 * data frame), schedule it directly; continue.
5293 		 */
5294 		if (! bf->bf_state.bfs_dobaw) {
5295 			DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5296 			    "%s: non-baw packet\n",
5297 			    __func__);
5298 			ATH_TID_REMOVE(tid, bf, bf_list);
5299 
5300 			if (bf->bf_state.bfs_nframes > 1)
5301 				DPRINTF(sc, ATH_DEBUG_SW_TX,
5302 				    "%s: aggr=%d, nframes=%d\n",
5303 				    __func__,
5304 				    bf->bf_state.bfs_aggr,
5305 				    bf->bf_state.bfs_nframes);
5306 
5307 			/*
5308 			 * This shouldn't happen - such frames shouldn't
5309 			 * ever have been queued as an aggregate in the
5310 			 * first place.  However, make sure the fields
5311 			 * are correctly setup just to be totally sure.
5312 			 */
5313 			bf->bf_state.bfs_aggr = 0;
5314 			bf->bf_state.bfs_nframes = 1;
5315 
5316 			/* Update CLRDMASK just before this frame is queued */
5317 			ath_tx_update_clrdmask(sc, tid, bf);
5318 
5319 			ath_tx_do_ratelookup(sc, bf);
5320 			ath_tx_calc_duration(sc, bf);
5321 			ath_tx_calc_protection(sc, bf);
5322 			ath_tx_set_rtscts(sc, bf);
5323 			ath_tx_rate_fill_rcflags(sc, bf);
5324 			ath_tx_setds(sc, bf);
5325 			ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc);
5326 
5327 			sc->sc_aggr_stats.aggr_nonbaw_pkt++;
5328 
5329 			/* Queue the packet; continue */
5330 			goto queuepkt;
5331 		}
5332 
5333 		TAILQ_INIT(&bf_q);
5334 
5335 		/*
5336 		 * Do a rate control lookup on the first frame in the
5337 		 * list. The rate control code needs that to occur
5338 		 * before it can determine whether to TX.
5339 		 * It's inaccurate because the rate control code doesn't
5340 		 * really "do" aggregate lookups, so it only considers
5341 		 * the size of the first frame.
5342 		 */
5343 		ath_tx_do_ratelookup(sc, bf);
5344 		bf->bf_state.bfs_rc[3].rix = 0;
5345 		bf->bf_state.bfs_rc[3].tries = 0;
5346 
5347 		ath_tx_calc_duration(sc, bf);
5348 		ath_tx_calc_protection(sc, bf);
5349 
5350 		ath_tx_set_rtscts(sc, bf);
5351 		ath_tx_rate_fill_rcflags(sc, bf);
5352 
5353 		status = ath_tx_form_aggr(sc, an, tid, &bf_q);
5354 
5355 		DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5356 		    "%s: ath_tx_form_aggr() status=%d\n", __func__, status);
5357 
5358 		/*
5359 		 * No frames to be picked up - out of BAW
5360 		 */
5361 		if (TAILQ_EMPTY(&bf_q))
5362 			break;
5363 
5364 		/*
5365 		 * This assumes that the descriptor list in the ath_bufhead
5366 		 * are already linked together via bf_next pointers.
5367 		 */
5368 		bf = TAILQ_FIRST(&bf_q);
5369 
5370 		if (status == ATH_AGGR_8K_LIMITED)
5371 			sc->sc_aggr_stats.aggr_rts_aggr_limited++;
5372 
5373 		/*
5374 		 * If it's the only frame send as non-aggregate
5375 		 * assume that ath_tx_form_aggr() has checked
5376 		 * whether it's in the BAW and added it appropriately.
5377 		 */
5378 		if (bf->bf_state.bfs_nframes == 1) {
5379 			DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5380 			    "%s: single-frame aggregate\n", __func__);
5381 
5382 			/* Update CLRDMASK just before this frame is queued */
5383 			ath_tx_update_clrdmask(sc, tid, bf);
5384 
5385 			bf->bf_state.bfs_aggr = 0;
5386 			bf->bf_state.bfs_ndelim = 0;
5387 			ath_tx_setds(sc, bf);
5388 			ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc);
5389 			if (status == ATH_AGGR_BAW_CLOSED)
5390 				sc->sc_aggr_stats.aggr_baw_closed_single_pkt++;
5391 			else
5392 				sc->sc_aggr_stats.aggr_single_pkt++;
5393 		} else {
5394 			DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5395 			    "%s: multi-frame aggregate: %d frames, "
5396 			    "length %d\n",
5397 			     __func__, bf->bf_state.bfs_nframes,
5398 			    bf->bf_state.bfs_al);
5399 			bf->bf_state.bfs_aggr = 1;
5400 			sc->sc_aggr_stats.aggr_pkts[bf->bf_state.bfs_nframes]++;
5401 			sc->sc_aggr_stats.aggr_aggr_pkt++;
5402 
5403 			/* Update CLRDMASK just before this frame is queued */
5404 			ath_tx_update_clrdmask(sc, tid, bf);
5405 
5406 			/*
5407 			 * Calculate the duration/protection as required.
5408 			 */
5409 			ath_tx_calc_duration(sc, bf);
5410 			ath_tx_calc_protection(sc, bf);
5411 
5412 			/*
5413 			 * Update the rate and rtscts information based on the
5414 			 * rate decision made by the rate control code;
5415 			 * the first frame in the aggregate needs it.
5416 			 */
5417 			ath_tx_set_rtscts(sc, bf);
5418 
5419 			/*
5420 			 * Setup the relevant descriptor fields
5421 			 * for aggregation. The first descriptor
5422 			 * already points to the rest in the chain.
5423 			 */
5424 			ath_tx_setds_11n(sc, bf);
5425 
5426 		}
5427 	queuepkt:
5428 		/* Set completion handler, multi-frame aggregate or not */
5429 		bf->bf_comp = ath_tx_aggr_comp;
5430 
5431 		if (bf->bf_state.bfs_tid == IEEE80211_NONQOS_TID)
5432 			DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=16?\n", __func__);
5433 
5434 		/*
5435 		 * Update leak count and frame config if were leaking frames.
5436 		 *
5437 		 * XXX TODO: it should update all frames in an aggregate
5438 		 * correctly!
5439 		 */
5440 		ath_tx_leak_count_update(sc, tid, bf);
5441 
5442 		/* Punt to txq */
5443 		ath_tx_handoff(sc, txq, bf);
5444 
5445 		/* Track outstanding buffer count to hardware */
5446 		/* aggregates are "one" buffer */
5447 		tid->hwq_depth++;
5448 
5449 		/*
5450 		 * Break out if ath_tx_form_aggr() indicated
5451 		 * there can't be any further progress (eg BAW is full.)
5452 		 * Checking for an empty txq is done above.
5453 		 *
5454 		 * XXX locking on txq here?
5455 		 */
5456 		/* XXX TXQ locking */
5457 		if (txq->axq_aggr_depth >= sc->sc_hwq_limit_aggr ||
5458 		    (status == ATH_AGGR_BAW_CLOSED ||
5459 		     status == ATH_AGGR_LEAK_CLOSED))
5460 			break;
5461 	}
5462 }
5463 
5464 /*
5465  * Schedule some packets from the given node/TID to the hardware.
5466  *
5467  * XXX TODO: this routine doesn't enforce the maximum TXQ depth.
5468  * It just dumps frames into the TXQ.  We should limit how deep
5469  * the transmit queue can grow for frames dispatched to the given
5470  * TXQ.
5471  *
5472  * To avoid locking issues, either we need to own the TXQ lock
5473  * at this point, or we need to pass in the maximum frame count
5474  * from the caller.
5475  */
5476 void
5477 ath_tx_tid_hw_queue_norm(struct ath_softc *sc, struct ath_node *an,
5478     struct ath_tid *tid)
5479 {
5480 	struct ath_buf *bf;
5481 	struct ath_txq *txq = sc->sc_ac2q[tid->ac];
5482 
5483 	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: node %p: TID %d: called\n",
5484 	    __func__, an, tid->tid);
5485 
5486 	ATH_TX_LOCK_ASSERT(sc);
5487 
5488 	/* Check - is AMPDU pending or running? then print out something */
5489 	if (ath_tx_ampdu_pending(sc, an, tid->tid))
5490 		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ampdu pending?\n",
5491 		    __func__, tid->tid);
5492 	if (ath_tx_ampdu_running(sc, an, tid->tid))
5493 		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ampdu running?\n",
5494 		    __func__, tid->tid);
5495 
5496 	for (;;) {
5497 
5498 		/*
5499 		 * If the upper layers have paused the TID, don't
5500 		 * queue any further packets.
5501 		 *
5502 		 * XXX if we are leaking frames, make sure we decrement
5503 		 * that counter _and_ we continue here.
5504 		 */
5505 		if (! ath_tx_tid_can_tx_or_sched(sc, tid))
5506 			break;
5507 
5508 		bf = ATH_TID_FIRST(tid);
5509 		if (bf == NULL) {
5510 			break;
5511 		}
5512 
5513 		ATH_TID_REMOVE(tid, bf, bf_list);
5514 
5515 		/* Sanity check! */
5516 		if (tid->tid != bf->bf_state.bfs_tid) {
5517 			DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bfs_tid %d !="
5518 			    " tid %d\n", __func__, bf->bf_state.bfs_tid,
5519 			    tid->tid);
5520 		}
5521 		/* Normal completion handler */
5522 		bf->bf_comp = ath_tx_normal_comp;
5523 
5524 		/*
5525 		 * Override this for now, until the non-aggregate
5526 		 * completion handler correctly handles software retransmits.
5527 		 */
5528 		bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
5529 
5530 		/* Update CLRDMASK just before this frame is queued */
5531 		ath_tx_update_clrdmask(sc, tid, bf);
5532 
5533 		/* Program descriptors + rate control */
5534 		ath_tx_do_ratelookup(sc, bf);
5535 		ath_tx_calc_duration(sc, bf);
5536 		ath_tx_calc_protection(sc, bf);
5537 		ath_tx_set_rtscts(sc, bf);
5538 		ath_tx_rate_fill_rcflags(sc, bf);
5539 		ath_tx_setds(sc, bf);
5540 
5541 		/*
5542 		 * Update the current leak count if
5543 		 * we're leaking frames; and set the
5544 		 * MORE flag as appropriate.
5545 		 */
5546 		ath_tx_leak_count_update(sc, tid, bf);
5547 
5548 		/* Track outstanding buffer count to hardware */
5549 		/* aggregates are "one" buffer */
5550 		tid->hwq_depth++;
5551 
5552 		/* Punt to hardware or software txq */
5553 		ath_tx_handoff(sc, txq, bf);
5554 	}
5555 }
5556 
5557 /*
5558  * Schedule some packets to the given hardware queue.
5559  *
5560  * This function walks the list of TIDs (ie, ath_node TIDs
5561  * with queued traffic) and attempts to schedule traffic
5562  * from them.
5563  *
5564  * TID scheduling is implemented as a FIFO, with TIDs being
5565  * added to the end of the queue after some frames have been
5566  * scheduled.
5567  */
5568 void
5569 ath_txq_sched(struct ath_softc *sc, struct ath_txq *txq)
5570 {
5571 	struct ath_tid *tid, *next, *last;
5572 
5573 	ATH_TX_LOCK_ASSERT(sc);
5574 
5575 	/*
5576 	 * Don't schedule if the hardware queue is busy.
5577 	 * This (hopefully) gives some more time to aggregate
5578 	 * some packets in the aggregation queue.
5579 	 *
5580 	 * XXX It doesn't stop a parallel sender from sneaking
5581 	 * in transmitting a frame!
5582 	 */
5583 	/* XXX TXQ locking */
5584 	if (txq->axq_aggr_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_aggr) {
5585 		sc->sc_aggr_stats.aggr_sched_nopkt++;
5586 		return;
5587 	}
5588 	if (txq->axq_depth >= sc->sc_hwq_limit_nonaggr) {
5589 		sc->sc_aggr_stats.aggr_sched_nopkt++;
5590 		return;
5591 	}
5592 
5593 	last = TAILQ_LAST(&txq->axq_tidq, axq_t_s);
5594 
5595 	TAILQ_FOREACH_SAFE(tid, &txq->axq_tidq, axq_qelem, next) {
5596 		/*
5597 		 * Suspend paused queues here; they'll be resumed
5598 		 * once the addba completes or times out.
5599 		 */
5600 		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, paused=%d\n",
5601 		    __func__, tid->tid, tid->paused);
5602 		ath_tx_tid_unsched(sc, tid);
5603 		/*
5604 		 * This node may be in power-save and we're leaking
5605 		 * a frame; be careful.
5606 		 */
5607 		if (! ath_tx_tid_can_tx_or_sched(sc, tid)) {
5608 			goto loop_done;
5609 		}
5610 		if (ath_tx_ampdu_running(sc, tid->an, tid->tid))
5611 			ath_tx_tid_hw_queue_aggr(sc, tid->an, tid);
5612 		else
5613 			ath_tx_tid_hw_queue_norm(sc, tid->an, tid);
5614 
5615 		/* Not empty? Re-schedule */
5616 		if (tid->axq_depth != 0)
5617 			ath_tx_tid_sched(sc, tid);
5618 
5619 		/*
5620 		 * Give the software queue time to aggregate more
5621 		 * packets.  If we aren't running aggregation then
5622 		 * we should still limit the hardware queue depth.
5623 		 */
5624 		/* XXX TXQ locking */
5625 		if (txq->axq_aggr_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_aggr) {
5626 			break;
5627 		}
5628 		if (txq->axq_depth >= sc->sc_hwq_limit_nonaggr) {
5629 			break;
5630 		}
5631 loop_done:
5632 		/*
5633 		 * If this was the last entry on the original list, stop.
5634 		 * Otherwise nodes that have been rescheduled onto the end
5635 		 * of the TID FIFO list will just keep being rescheduled.
5636 		 *
5637 		 * XXX What should we do about nodes that were paused
5638 		 * but are pending a leaking frame in response to a ps-poll?
5639 		 * They'll be put at the front of the list; so they'll
5640 		 * prematurely trigger this condition! Ew.
5641 		 */
5642 		if (tid == last)
5643 			break;
5644 	}
5645 }
5646 
5647 /*
5648  * TX addba handling
5649  */
5650 
5651 /*
5652  * Return net80211 TID struct pointer, or NULL for none
5653  */
5654 struct ieee80211_tx_ampdu *
5655 ath_tx_get_tx_tid(struct ath_node *an, int tid)
5656 {
5657 	struct ieee80211_node *ni = &an->an_node;
5658 	struct ieee80211_tx_ampdu *tap;
5659 
5660 	if (tid == IEEE80211_NONQOS_TID)
5661 		return NULL;
5662 
5663 	tap = &ni->ni_tx_ampdu[tid];
5664 	return tap;
5665 }
5666 
5667 /*
5668  * Is AMPDU-TX running?
5669  */
5670 static int
5671 ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an, int tid)
5672 {
5673 	struct ieee80211_tx_ampdu *tap;
5674 
5675 	if (tid == IEEE80211_NONQOS_TID)
5676 		return 0;
5677 
5678 	tap = ath_tx_get_tx_tid(an, tid);
5679 	if (tap == NULL)
5680 		return 0;	/* Not valid; default to not running */
5681 
5682 	return !! (tap->txa_flags & IEEE80211_AGGR_RUNNING);
5683 }
5684 
5685 /*
5686  * Is AMPDU-TX negotiation pending?
5687  */
5688 static int
5689 ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an, int tid)
5690 {
5691 	struct ieee80211_tx_ampdu *tap;
5692 
5693 	if (tid == IEEE80211_NONQOS_TID)
5694 		return 0;
5695 
5696 	tap = ath_tx_get_tx_tid(an, tid);
5697 	if (tap == NULL)
5698 		return 0;	/* Not valid; default to not pending */
5699 
5700 	return !! (tap->txa_flags & IEEE80211_AGGR_XCHGPEND);
5701 }
5702 
5703 /*
5704  * Is AMPDU-TX pending for the given TID?
5705  */
5706 
5707 
5708 /*
5709  * Method to handle sending an ADDBA request.
5710  *
5711  * We tap this so the relevant flags can be set to pause the TID
5712  * whilst waiting for the response.
5713  *
5714  * XXX there's no timeout handler we can override?
5715  */
5716 int
5717 ath_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
5718     int dialogtoken, int baparamset, int batimeout)
5719 {
5720 	struct ath_softc *sc = ni->ni_ic->ic_softc;
5721 	int tid = tap->txa_tid;
5722 	struct ath_node *an = ATH_NODE(ni);
5723 	struct ath_tid *atid = &an->an_tid[tid];
5724 
5725 	/*
5726 	 * XXX danger Will Robinson!
5727 	 *
5728 	 * Although the taskqueue may be running and scheduling some more
5729 	 * packets, these should all be _before_ the addba sequence number.
5730 	 * However, net80211 will keep self-assigning sequence numbers
5731 	 * until addba has been negotiated.
5732 	 *
5733 	 * In the past, these packets would be "paused" (which still works
5734 	 * fine, as they're being scheduled to the driver in the same
5735 	 * serialised method which is calling the addba request routine)
5736 	 * and when the aggregation session begins, they'll be dequeued
5737 	 * as aggregate packets and added to the BAW. However, now there's
5738 	 * a "bf->bf_state.bfs_dobaw" flag, and this isn't set for these
5739 	 * packets. Thus they never get included in the BAW tracking and
5740 	 * this can cause the initial burst of packets after the addba
5741 	 * negotiation to "hang", as they quickly fall outside the BAW.
5742 	 *
5743 	 * The "eventual" solution should be to tag these packets with
5744 	 * dobaw. Although net80211 has given us a sequence number,
5745 	 * it'll be "after" the left edge of the BAW and thus it'll
5746 	 * fall within it.
5747 	 */
5748 	ATH_TX_LOCK(sc);
5749 	/*
5750 	 * This is a bit annoying.  Until net80211 HT code inherits some
5751 	 * (any) locking, we may have this called in parallel BUT only
5752 	 * one response/timeout will be called.  Grr.
5753 	 */
5754 	if (atid->addba_tx_pending == 0) {
5755 		ath_tx_tid_pause(sc, atid);
5756 		atid->addba_tx_pending = 1;
5757 	}
5758 	ATH_TX_UNLOCK(sc);
5759 
5760 	DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
5761 	    "%s: %6D: called; dialogtoken=%d, baparamset=%d, batimeout=%d\n",
5762 	    __func__,
5763 	    ni->ni_macaddr,
5764 	    ":",
5765 	    dialogtoken, baparamset, batimeout);
5766 	DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
5767 	    "%s: txa_start=%d, ni_txseqs=%d\n",
5768 	    __func__, tap->txa_start, ni->ni_txseqs[tid]);
5769 
5770 	return sc->sc_addba_request(ni, tap, dialogtoken, baparamset,
5771 	    batimeout);
5772 }
5773 
5774 /*
5775  * Handle an ADDBA response.
5776  *
5777  * We unpause the queue so TX'ing can resume.
5778  *
5779  * Any packets TX'ed from this point should be "aggregate" (whether
5780  * aggregate or not) so the BAW is updated.
5781  *
5782  * Note! net80211 keeps self-assigning sequence numbers until
5783  * ampdu is negotiated. This means the initially-negotiated BAW left
5784  * edge won't match the ni->ni_txseq.
5785  *
5786  * So, being very dirty, the BAW left edge is "slid" here to match
5787  * ni->ni_txseq.
5788  *
5789  * What likely SHOULD happen is that all packets subsequent to the
5790  * addba request should be tagged as aggregate and queued as non-aggregate
5791  * frames; thus updating the BAW. For now though, I'll just slide the
5792  * window.
5793  */
5794 int
5795 ath_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
5796     int status, int code, int batimeout)
5797 {
5798 	struct ath_softc *sc = ni->ni_ic->ic_softc;
5799 	int tid = tap->txa_tid;
5800 	struct ath_node *an = ATH_NODE(ni);
5801 	struct ath_tid *atid = &an->an_tid[tid];
5802 	int r;
5803 
5804 	DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
5805 	    "%s: %6D: called; status=%d, code=%d, batimeout=%d\n", __func__,
5806 	    ni->ni_macaddr,
5807 	    ":",
5808 	    status, code, batimeout);
5809 
5810 	DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
5811 	    "%s: txa_start=%d, ni_txseqs=%d\n",
5812 	    __func__, tap->txa_start, ni->ni_txseqs[tid]);
5813 
5814 	/*
5815 	 * Call this first, so the interface flags get updated
5816 	 * before the TID is unpaused. Otherwise a race condition
5817 	 * exists where the unpaused TID still doesn't yet have
5818 	 * IEEE80211_AGGR_RUNNING set.
5819 	 */
5820 	r = sc->sc_addba_response(ni, tap, status, code, batimeout);
5821 
5822 	ATH_TX_LOCK(sc);
5823 	atid->addba_tx_pending = 0;
5824 	/*
5825 	 * XXX dirty!
5826 	 * Slide the BAW left edge to wherever net80211 left it for us.
5827 	 * Read above for more information.
5828 	 */
5829 	tap->txa_start = ni->ni_txseqs[tid];
5830 	ath_tx_tid_resume(sc, atid);
5831 	ATH_TX_UNLOCK(sc);
5832 	return r;
5833 }
5834 
5835 
5836 /*
5837  * Stop ADDBA on a queue.
5838  *
5839  * This can be called whilst BAR TX is currently active on the queue,
5840  * so make sure this is unblocked before continuing.
5841  */
5842 void
5843 ath_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
5844 {
5845 	struct ath_softc *sc = ni->ni_ic->ic_softc;
5846 	int tid = tap->txa_tid;
5847 	struct ath_node *an = ATH_NODE(ni);
5848 	struct ath_tid *atid = &an->an_tid[tid];
5849 	ath_bufhead bf_cq;
5850 	struct ath_buf *bf;
5851 
5852 	DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: %6D: called\n",
5853 	    __func__,
5854 	    ni->ni_macaddr,
5855 	    ":");
5856 
5857 	/*
5858 	 * Pause TID traffic early, so there aren't any races
5859 	 * Unblock the pending BAR held traffic, if it's currently paused.
5860 	 */
5861 	ATH_TX_LOCK(sc);
5862 	ath_tx_tid_pause(sc, atid);
5863 	if (atid->bar_wait) {
5864 		/*
5865 		 * bar_unsuspend() expects bar_tx == 1, as it should be
5866 		 * called from the TX completion path.  This quietens
5867 		 * the warning.  It's cleared for us anyway.
5868 		 */
5869 		atid->bar_tx = 1;
5870 		ath_tx_tid_bar_unsuspend(sc, atid);
5871 	}
5872 	ATH_TX_UNLOCK(sc);
5873 
5874 	/* There's no need to hold the TXQ lock here */
5875 	sc->sc_addba_stop(ni, tap);
5876 
5877 	/*
5878 	 * ath_tx_tid_cleanup will resume the TID if possible, otherwise
5879 	 * it'll set the cleanup flag, and it'll be unpaused once
5880 	 * things have been cleaned up.
5881 	 */
5882 	TAILQ_INIT(&bf_cq);
5883 	ATH_TX_LOCK(sc);
5884 
5885 	/*
5886 	 * In case there's a followup call to this, only call it
5887 	 * if we don't have a cleanup in progress.
5888 	 *
5889 	 * Since we've paused the queue above, we need to make
5890 	 * sure we unpause if there's already a cleanup in
5891 	 * progress - it means something else is also doing
5892 	 * this stuff, so we don't need to also keep it paused.
5893 	 */
5894 	if (atid->cleanup_inprogress) {
5895 		ath_tx_tid_resume(sc, atid);
5896 	} else {
5897 		ath_tx_tid_cleanup(sc, an, tid, &bf_cq);
5898 		/*
5899 		 * Unpause the TID if no cleanup is required.
5900 		 */
5901 		if (! atid->cleanup_inprogress)
5902 			ath_tx_tid_resume(sc, atid);
5903 	}
5904 	ATH_TX_UNLOCK(sc);
5905 
5906 	/* Handle completing frames and fail them */
5907 	while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
5908 		TAILQ_REMOVE(&bf_cq, bf, bf_list);
5909 		ath_tx_default_comp(sc, bf, 1);
5910 	}
5911 
5912 }
5913 
5914 /*
5915  * Handle a node reassociation.
5916  *
5917  * We may have a bunch of frames queued to the hardware; those need
5918  * to be marked as cleanup.
5919  */
5920 void
5921 ath_tx_node_reassoc(struct ath_softc *sc, struct ath_node *an)
5922 {
5923 	struct ath_tid *tid;
5924 	int i;
5925 	ath_bufhead bf_cq;
5926 	struct ath_buf *bf;
5927 
5928 	TAILQ_INIT(&bf_cq);
5929 
5930 	ATH_TX_UNLOCK_ASSERT(sc);
5931 
5932 	ATH_TX_LOCK(sc);
5933 	for (i = 0; i < IEEE80211_TID_SIZE; i++) {
5934 		tid = &an->an_tid[i];
5935 		if (tid->hwq_depth == 0)
5936 			continue;
5937 		DPRINTF(sc, ATH_DEBUG_NODE,
5938 		    "%s: %6D: TID %d: cleaning up TID\n",
5939 		    __func__,
5940 		    an->an_node.ni_macaddr,
5941 		    ":",
5942 		    i);
5943 		/*
5944 		 * In case there's a followup call to this, only call it
5945 		 * if we don't have a cleanup in progress.
5946 		 */
5947 		if (! tid->cleanup_inprogress) {
5948 			ath_tx_tid_pause(sc, tid);
5949 			ath_tx_tid_cleanup(sc, an, i, &bf_cq);
5950 			/*
5951 			 * Unpause the TID if no cleanup is required.
5952 			 */
5953 			if (! tid->cleanup_inprogress)
5954 				ath_tx_tid_resume(sc, tid);
5955 		}
5956 	}
5957 	ATH_TX_UNLOCK(sc);
5958 
5959 	/* Handle completing frames and fail them */
5960 	while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
5961 		TAILQ_REMOVE(&bf_cq, bf, bf_list);
5962 		ath_tx_default_comp(sc, bf, 1);
5963 	}
5964 }
5965 
5966 /*
5967  * Note: net80211 bar_timeout() doesn't call this function on BAR failure;
5968  * it simply tears down the aggregation session. Ew.
5969  *
5970  * It however will call ieee80211_ampdu_stop() which will call
5971  * ic->ic_addba_stop().
5972  *
5973  * XXX This uses a hard-coded max BAR count value; the whole
5974  * XXX BAR TX success or failure should be better handled!
5975  */
5976 void
5977 ath_bar_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
5978     int status)
5979 {
5980 	struct ath_softc *sc = ni->ni_ic->ic_softc;
5981 	int tid = tap->txa_tid;
5982 	struct ath_node *an = ATH_NODE(ni);
5983 	struct ath_tid *atid = &an->an_tid[tid];
5984 	int attempts = tap->txa_attempts;
5985 	int old_txa_start;
5986 
5987 	DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
5988 	    "%s: %6D: called; txa_tid=%d, atid->tid=%d, status=%d, attempts=%d, txa_start=%d, txa_seqpending=%d\n",
5989 	    __func__,
5990 	    ni->ni_macaddr,
5991 	    ":",
5992 	    tap->txa_tid,
5993 	    atid->tid,
5994 	    status,
5995 	    attempts,
5996 	    tap->txa_start,
5997 	    tap->txa_seqpending);
5998 
5999 	/* Note: This may update the BAW details */
6000 	/*
6001 	 * XXX What if this does slide the BAW along? We need to somehow
6002 	 * XXX either fix things when it does happen, or prevent the
6003 	 * XXX seqpending value to be anything other than exactly what
6004 	 * XXX the hell we want!
6005 	 *
6006 	 * XXX So for now, how I do this inside the TX lock for now
6007 	 * XXX and just correct it afterwards? The below condition should
6008 	 * XXX never happen and if it does I need to fix all kinds of things.
6009 	 */
6010 	ATH_TX_LOCK(sc);
6011 	old_txa_start = tap->txa_start;
6012 	sc->sc_bar_response(ni, tap, status);
6013 	if (tap->txa_start != old_txa_start) {
6014 		device_printf(sc->sc_dev, "%s: tid=%d; txa_start=%d, old=%d, adjusting\n",
6015 		    __func__,
6016 		    tid,
6017 		    tap->txa_start,
6018 		    old_txa_start);
6019 	}
6020 	tap->txa_start = old_txa_start;
6021 	ATH_TX_UNLOCK(sc);
6022 
6023 	/* Unpause the TID */
6024 	/*
6025 	 * XXX if this is attempt=50, the TID will be downgraded
6026 	 * XXX to a non-aggregate session. So we must unpause the
6027 	 * XXX TID here or it'll never be done.
6028 	 *
6029 	 * Also, don't call it if bar_tx/bar_wait are 0; something
6030 	 * has beaten us to the punch? (XXX figure out what?)
6031 	 */
6032 	if (status == 0 || attempts == 50) {
6033 		ATH_TX_LOCK(sc);
6034 		if (atid->bar_tx == 0 || atid->bar_wait == 0)
6035 			DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
6036 			    "%s: huh? bar_tx=%d, bar_wait=%d\n",
6037 			    __func__,
6038 			    atid->bar_tx, atid->bar_wait);
6039 		else
6040 			ath_tx_tid_bar_unsuspend(sc, atid);
6041 		ATH_TX_UNLOCK(sc);
6042 	}
6043 }
6044 
6045 /*
6046  * This is called whenever the pending ADDBA request times out.
6047  * Unpause and reschedule the TID.
6048  */
6049 void
6050 ath_addba_response_timeout(struct ieee80211_node *ni,
6051     struct ieee80211_tx_ampdu *tap)
6052 {
6053 	struct ath_softc *sc = ni->ni_ic->ic_softc;
6054 	int tid = tap->txa_tid;
6055 	struct ath_node *an = ATH_NODE(ni);
6056 	struct ath_tid *atid = &an->an_tid[tid];
6057 
6058 	DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
6059 	    "%s: %6D: TID=%d, called; resuming\n",
6060 	    __func__,
6061 	    ni->ni_macaddr,
6062 	    ":",
6063 	    tid);
6064 
6065 	ATH_TX_LOCK(sc);
6066 	atid->addba_tx_pending = 0;
6067 	ATH_TX_UNLOCK(sc);
6068 
6069 	/* Note: This updates the aggregate state to (again) pending */
6070 	sc->sc_addba_response_timeout(ni, tap);
6071 
6072 	/* Unpause the TID; which reschedules it */
6073 	ATH_TX_LOCK(sc);
6074 	ath_tx_tid_resume(sc, atid);
6075 	ATH_TX_UNLOCK(sc);
6076 }
6077 
6078 /*
6079  * Check if a node is asleep or not.
6080  */
6081 int
6082 ath_tx_node_is_asleep(struct ath_softc *sc, struct ath_node *an)
6083 {
6084 
6085 	ATH_TX_LOCK_ASSERT(sc);
6086 
6087 	return (an->an_is_powersave);
6088 }
6089 
6090 /*
6091  * Mark a node as currently "in powersaving."
6092  * This suspends all traffic on the node.
6093  *
6094  * This must be called with the node/tx locks free.
6095  *
6096  * XXX TODO: the locking silliness below is due to how the node
6097  * locking currently works.  Right now, the node lock is grabbed
6098  * to do rate control lookups and these are done with the TX
6099  * queue lock held.  This means the node lock can't be grabbed
6100  * first here or a LOR will occur.
6101  *
6102  * Eventually (hopefully!) the TX path code will only grab
6103  * the TXQ lock when transmitting and the ath_node lock when
6104  * doing node/TID operations.  There are other complications -
6105  * the sched/unsched operations involve walking the per-txq
6106  * 'active tid' list and this requires both locks to be held.
6107  */
6108 void
6109 ath_tx_node_sleep(struct ath_softc *sc, struct ath_node *an)
6110 {
6111 	struct ath_tid *atid;
6112 	struct ath_txq *txq;
6113 	int tid;
6114 
6115 	ATH_TX_UNLOCK_ASSERT(sc);
6116 
6117 	/* Suspend all traffic on the node */
6118 	ATH_TX_LOCK(sc);
6119 
6120 	if (an->an_is_powersave) {
6121 		DPRINTF(sc, ATH_DEBUG_XMIT,
6122 		    "%s: %6D: node was already asleep!\n",
6123 		    __func__, an->an_node.ni_macaddr, ":");
6124 		ATH_TX_UNLOCK(sc);
6125 		return;
6126 	}
6127 
6128 	for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) {
6129 		atid = &an->an_tid[tid];
6130 		txq = sc->sc_ac2q[atid->ac];
6131 
6132 		ath_tx_tid_pause(sc, atid);
6133 	}
6134 
6135 	/* Mark node as in powersaving */
6136 	an->an_is_powersave = 1;
6137 
6138 	ATH_TX_UNLOCK(sc);
6139 }
6140 
6141 /*
6142  * Mark a node as currently "awake."
6143  * This resumes all traffic to the node.
6144  */
6145 void
6146 ath_tx_node_wakeup(struct ath_softc *sc, struct ath_node *an)
6147 {
6148 	struct ath_tid *atid;
6149 	struct ath_txq *txq;
6150 	int tid;
6151 
6152 	ATH_TX_UNLOCK_ASSERT(sc);
6153 
6154 	ATH_TX_LOCK(sc);
6155 
6156 	/* !? */
6157 	if (an->an_is_powersave == 0) {
6158 		ATH_TX_UNLOCK(sc);
6159 		DPRINTF(sc, ATH_DEBUG_XMIT,
6160 		    "%s: an=%p: node was already awake\n",
6161 		    __func__, an);
6162 		return;
6163 	}
6164 
6165 	/* Mark node as awake */
6166 	an->an_is_powersave = 0;
6167 	/*
6168 	 * Clear any pending leaked frame requests
6169 	 */
6170 	an->an_leak_count = 0;
6171 
6172 	for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) {
6173 		atid = &an->an_tid[tid];
6174 		txq = sc->sc_ac2q[atid->ac];
6175 
6176 		ath_tx_tid_resume(sc, atid);
6177 	}
6178 	ATH_TX_UNLOCK(sc);
6179 }
6180 
6181 static int
6182 ath_legacy_dma_txsetup(struct ath_softc *sc)
6183 {
6184 
6185 	/* nothing new needed */
6186 	return (0);
6187 }
6188 
6189 static int
6190 ath_legacy_dma_txteardown(struct ath_softc *sc)
6191 {
6192 
6193 	/* nothing new needed */
6194 	return (0);
6195 }
6196 
6197 void
6198 ath_xmit_setup_legacy(struct ath_softc *sc)
6199 {
6200 	/*
6201 	 * For now, just set the descriptor length to sizeof(ath_desc);
6202 	 * worry about extracting the real length out of the HAL later.
6203 	 */
6204 	sc->sc_tx_desclen = sizeof(struct ath_desc);
6205 	sc->sc_tx_statuslen = sizeof(struct ath_desc);
6206 	sc->sc_tx_nmaps = 1;	/* only one buffer per TX desc */
6207 
6208 	sc->sc_tx.xmit_setup = ath_legacy_dma_txsetup;
6209 	sc->sc_tx.xmit_teardown = ath_legacy_dma_txteardown;
6210 	sc->sc_tx.xmit_attach_comp_func = ath_legacy_attach_comp_func;
6211 
6212 	sc->sc_tx.xmit_dma_restart = ath_legacy_tx_dma_restart;
6213 	sc->sc_tx.xmit_handoff = ath_legacy_xmit_handoff;
6214 
6215 	sc->sc_tx.xmit_drain = ath_legacy_tx_drain;
6216 }
6217