xref: /freebsd/sys/dev/ath/if_ath_tx.c (revision 49b49cda41feabe3439f7318e8bf40e3896c7bf4)
1 /*-
2  * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
3  * Copyright (c) 2010-2012 Adrian Chadd, Xenion Pty Ltd
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer,
11  *    without modification.
12  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
13  *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
14  *    redistribution must be conditioned upon including a substantially
15  *    similar Disclaimer requirement for further binary redistribution.
16  *
17  * NO WARRANTY
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
21  * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
23  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
26  * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGES.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 /*
35  * Driver for the Atheros Wireless LAN controller.
36  *
37  * This software is derived from work of Atsushi Onoe; his contribution
38  * is greatly appreciated.
39  */
40 
41 #include "opt_inet.h"
42 #include "opt_ath.h"
43 #include "opt_wlan.h"
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/sysctl.h>
48 #include <sys/mbuf.h>
49 #include <sys/malloc.h>
50 #include <sys/lock.h>
51 #include <sys/mutex.h>
52 #include <sys/kernel.h>
53 #include <sys/socket.h>
54 #include <sys/sockio.h>
55 #include <sys/errno.h>
56 #include <sys/callout.h>
57 #include <sys/bus.h>
58 #include <sys/endian.h>
59 #include <sys/kthread.h>
60 #include <sys/taskqueue.h>
61 #include <sys/priv.h>
62 #include <sys/ktr.h>
63 
64 #include <machine/bus.h>
65 
66 #include <net/if.h>
67 #include <net/if_var.h>
68 #include <net/if_dl.h>
69 #include <net/if_media.h>
70 #include <net/if_types.h>
71 #include <net/if_arp.h>
72 #include <net/ethernet.h>
73 #include <net/if_llc.h>
74 
75 #include <net80211/ieee80211_var.h>
76 #include <net80211/ieee80211_regdomain.h>
77 #ifdef IEEE80211_SUPPORT_SUPERG
78 #include <net80211/ieee80211_superg.h>
79 #endif
80 #ifdef IEEE80211_SUPPORT_TDMA
81 #include <net80211/ieee80211_tdma.h>
82 #endif
83 #include <net80211/ieee80211_ht.h>
84 
85 #include <net/bpf.h>
86 
87 #ifdef INET
88 #include <netinet/in.h>
89 #include <netinet/if_ether.h>
90 #endif
91 
92 #include <dev/ath/if_athvar.h>
93 #include <dev/ath/ath_hal/ah_devid.h>		/* XXX for softled */
94 #include <dev/ath/ath_hal/ah_diagcodes.h>
95 
96 #include <dev/ath/if_ath_debug.h>
97 
98 #ifdef ATH_TX99_DIAG
99 #include <dev/ath/ath_tx99/ath_tx99.h>
100 #endif
101 
102 #include <dev/ath/if_ath_misc.h>
103 #include <dev/ath/if_ath_tx.h>
104 #include <dev/ath/if_ath_tx_ht.h>
105 
106 #ifdef	ATH_DEBUG_ALQ
107 #include <dev/ath/if_ath_alq.h>
108 #endif
109 
110 /*
111  * How many retries to perform in software
112  */
113 #define	SWMAX_RETRIES		10
114 
115 /*
116  * What queue to throw the non-QoS TID traffic into
117  */
118 #define	ATH_NONQOS_TID_AC	WME_AC_VO
119 
120 #if 0
121 static int ath_tx_node_is_asleep(struct ath_softc *sc, struct ath_node *an);
122 #endif
123 static int ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an,
124     int tid);
125 static int ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an,
126     int tid);
127 static ieee80211_seq ath_tx_tid_seqno_assign(struct ath_softc *sc,
128     struct ieee80211_node *ni, struct ath_buf *bf, struct mbuf *m0);
129 static int ath_tx_action_frame_override_queue(struct ath_softc *sc,
130     struct ieee80211_node *ni, struct mbuf *m0, int *tid);
131 static struct ath_buf *
132 ath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an,
133     struct ath_tid *tid, struct ath_buf *bf);
134 
135 #ifdef	ATH_DEBUG_ALQ
136 void
137 ath_tx_alq_post(struct ath_softc *sc, struct ath_buf *bf_first)
138 {
139 	struct ath_buf *bf;
140 	int i, n;
141 	const char *ds;
142 
143 	/* XXX we should skip out early if debugging isn't enabled! */
144 	bf = bf_first;
145 
146 	while (bf != NULL) {
147 		/* XXX should ensure bf_nseg > 0! */
148 		if (bf->bf_nseg == 0)
149 			break;
150 		n = ((bf->bf_nseg - 1) / sc->sc_tx_nmaps) + 1;
151 		for (i = 0, ds = (const char *) bf->bf_desc;
152 		    i < n;
153 		    i++, ds += sc->sc_tx_desclen) {
154 			if_ath_alq_post(&sc->sc_alq,
155 			    ATH_ALQ_EDMA_TXDESC,
156 			    sc->sc_tx_desclen,
157 			    ds);
158 		}
159 		bf = bf->bf_next;
160 	}
161 }
162 #endif /* ATH_DEBUG_ALQ */
163 
164 /*
165  * Whether to use the 11n rate scenario functions or not
166  */
167 static inline int
168 ath_tx_is_11n(struct ath_softc *sc)
169 {
170 	return ((sc->sc_ah->ah_magic == 0x20065416) ||
171 		    (sc->sc_ah->ah_magic == 0x19741014));
172 }
173 
174 /*
175  * Obtain the current TID from the given frame.
176  *
177  * Non-QoS frames need to go into TID 16 (IEEE80211_NONQOS_TID.)
178  * This has implications for which AC/priority the packet is placed
179  * in.
180  */
181 static int
182 ath_tx_gettid(struct ath_softc *sc, const struct mbuf *m0)
183 {
184 	const struct ieee80211_frame *wh;
185 	int pri = M_WME_GETAC(m0);
186 
187 	wh = mtod(m0, const struct ieee80211_frame *);
188 	if (! IEEE80211_QOS_HAS_SEQ(wh))
189 		return IEEE80211_NONQOS_TID;
190 	else
191 		return WME_AC_TO_TID(pri);
192 }
193 
194 static void
195 ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf)
196 {
197 	struct ieee80211_frame *wh;
198 
199 	wh = mtod(bf->bf_m, struct ieee80211_frame *);
200 	/* Only update/resync if needed */
201 	if (bf->bf_state.bfs_isretried == 0) {
202 		wh->i_fc[1] |= IEEE80211_FC1_RETRY;
203 		bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
204 		    BUS_DMASYNC_PREWRITE);
205 	}
206 	bf->bf_state.bfs_isretried = 1;
207 	bf->bf_state.bfs_retries ++;
208 }
209 
210 /*
211  * Determine what the correct AC queue for the given frame
212  * should be.
213  *
214  * This code assumes that the TIDs map consistently to
215  * the underlying hardware (or software) ath_txq.
216  * Since the sender may try to set an AC which is
217  * arbitrary, non-QoS TIDs may end up being put on
218  * completely different ACs. There's no way to put a
219  * TID into multiple ath_txq's for scheduling, so
220  * for now we override the AC/TXQ selection and set
221  * non-QOS TID frames into the BE queue.
222  *
223  * This may be completely incorrect - specifically,
224  * some management frames may end up out of order
225  * compared to the QoS traffic they're controlling.
226  * I'll look into this later.
227  */
228 static int
229 ath_tx_getac(struct ath_softc *sc, const struct mbuf *m0)
230 {
231 	const struct ieee80211_frame *wh;
232 	int pri = M_WME_GETAC(m0);
233 	wh = mtod(m0, const struct ieee80211_frame *);
234 	if (IEEE80211_QOS_HAS_SEQ(wh))
235 		return pri;
236 
237 	return ATH_NONQOS_TID_AC;
238 }
239 
240 void
241 ath_txfrag_cleanup(struct ath_softc *sc,
242 	ath_bufhead *frags, struct ieee80211_node *ni)
243 {
244 	struct ath_buf *bf, *next;
245 
246 	ATH_TXBUF_LOCK_ASSERT(sc);
247 
248 	TAILQ_FOREACH_SAFE(bf, frags, bf_list, next) {
249 		/* NB: bf assumed clean */
250 		TAILQ_REMOVE(frags, bf, bf_list);
251 		ath_returnbuf_head(sc, bf);
252 		ieee80211_node_decref(ni);
253 	}
254 }
255 
256 /*
257  * Setup xmit of a fragmented frame.  Allocate a buffer
258  * for each frag and bump the node reference count to
259  * reflect the held reference to be setup by ath_tx_start.
260  */
261 int
262 ath_txfrag_setup(struct ath_softc *sc, ath_bufhead *frags,
263 	struct mbuf *m0, struct ieee80211_node *ni)
264 {
265 	struct mbuf *m;
266 	struct ath_buf *bf;
267 
268 	ATH_TXBUF_LOCK(sc);
269 	for (m = m0->m_nextpkt; m != NULL; m = m->m_nextpkt) {
270 		/* XXX non-management? */
271 		bf = _ath_getbuf_locked(sc, ATH_BUFTYPE_NORMAL);
272 		if (bf == NULL) {	/* out of buffers, cleanup */
273 			DPRINTF(sc, ATH_DEBUG_XMIT, "%s: no buffer?\n",
274 			    __func__);
275 			ath_txfrag_cleanup(sc, frags, ni);
276 			break;
277 		}
278 		ieee80211_node_incref(ni);
279 		TAILQ_INSERT_TAIL(frags, bf, bf_list);
280 	}
281 	ATH_TXBUF_UNLOCK(sc);
282 
283 	return !TAILQ_EMPTY(frags);
284 }
285 
286 static int
287 ath_tx_dmasetup(struct ath_softc *sc, struct ath_buf *bf, struct mbuf *m0)
288 {
289 	struct mbuf *m;
290 	int error;
291 
292 	/*
293 	 * Load the DMA map so any coalescing is done.  This
294 	 * also calculates the number of descriptors we need.
295 	 */
296 	error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
297 				     bf->bf_segs, &bf->bf_nseg,
298 				     BUS_DMA_NOWAIT);
299 	if (error == EFBIG) {
300 		/* XXX packet requires too many descriptors */
301 		bf->bf_nseg = ATH_MAX_SCATTER + 1;
302 	} else if (error != 0) {
303 		sc->sc_stats.ast_tx_busdma++;
304 		ieee80211_free_mbuf(m0);
305 		return error;
306 	}
307 	/*
308 	 * Discard null packets and check for packets that
309 	 * require too many TX descriptors.  We try to convert
310 	 * the latter to a cluster.
311 	 */
312 	if (bf->bf_nseg > ATH_MAX_SCATTER) {		/* too many desc's, linearize */
313 		sc->sc_stats.ast_tx_linear++;
314 		m = m_collapse(m0, M_NOWAIT, ATH_MAX_SCATTER);
315 		if (m == NULL) {
316 			ieee80211_free_mbuf(m0);
317 			sc->sc_stats.ast_tx_nombuf++;
318 			return ENOMEM;
319 		}
320 		m0 = m;
321 		error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
322 					     bf->bf_segs, &bf->bf_nseg,
323 					     BUS_DMA_NOWAIT);
324 		if (error != 0) {
325 			sc->sc_stats.ast_tx_busdma++;
326 			ieee80211_free_mbuf(m0);
327 			return error;
328 		}
329 		KASSERT(bf->bf_nseg <= ATH_MAX_SCATTER,
330 		    ("too many segments after defrag; nseg %u", bf->bf_nseg));
331 	} else if (bf->bf_nseg == 0) {		/* null packet, discard */
332 		sc->sc_stats.ast_tx_nodata++;
333 		ieee80211_free_mbuf(m0);
334 		return EIO;
335 	}
336 	DPRINTF(sc, ATH_DEBUG_XMIT, "%s: m %p len %u\n",
337 		__func__, m0, m0->m_pkthdr.len);
338 	bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
339 	bf->bf_m = m0;
340 
341 	return 0;
342 }
343 
344 /*
345  * Chain together segments+descriptors for a frame - 11n or otherwise.
346  *
347  * For aggregates, this is called on each frame in the aggregate.
348  */
349 static void
350 ath_tx_chaindesclist(struct ath_softc *sc, struct ath_desc *ds0,
351     struct ath_buf *bf, int is_aggr, int is_first_subframe,
352     int is_last_subframe)
353 {
354 	struct ath_hal *ah = sc->sc_ah;
355 	char *ds;
356 	int i, bp, dsp;
357 	HAL_DMA_ADDR bufAddrList[4];
358 	uint32_t segLenList[4];
359 	int numTxMaps = 1;
360 	int isFirstDesc = 1;
361 
362 	/*
363 	 * XXX There's txdma and txdma_mgmt; the descriptor
364 	 * sizes must match.
365 	 */
366 	struct ath_descdma *dd = &sc->sc_txdma;
367 
368 	/*
369 	 * Fillin the remainder of the descriptor info.
370 	 */
371 
372 	/*
373 	 * We need the number of TX data pointers in each descriptor.
374 	 * EDMA and later chips support 4 TX buffers per descriptor;
375 	 * previous chips just support one.
376 	 */
377 	numTxMaps = sc->sc_tx_nmaps;
378 
379 	/*
380 	 * For EDMA and later chips ensure the TX map is fully populated
381 	 * before advancing to the next descriptor.
382 	 */
383 	ds = (char *) bf->bf_desc;
384 	bp = dsp = 0;
385 	bzero(bufAddrList, sizeof(bufAddrList));
386 	bzero(segLenList, sizeof(segLenList));
387 	for (i = 0; i < bf->bf_nseg; i++) {
388 		bufAddrList[bp] = bf->bf_segs[i].ds_addr;
389 		segLenList[bp] = bf->bf_segs[i].ds_len;
390 		bp++;
391 
392 		/*
393 		 * Go to the next segment if this isn't the last segment
394 		 * and there's space in the current TX map.
395 		 */
396 		if ((i != bf->bf_nseg - 1) && (bp < numTxMaps))
397 			continue;
398 
399 		/*
400 		 * Last segment or we're out of buffer pointers.
401 		 */
402 		bp = 0;
403 
404 		if (i == bf->bf_nseg - 1)
405 			ath_hal_settxdesclink(ah, (struct ath_desc *) ds, 0);
406 		else
407 			ath_hal_settxdesclink(ah, (struct ath_desc *) ds,
408 			    bf->bf_daddr + dd->dd_descsize * (dsp + 1));
409 
410 		/*
411 		 * XXX This assumes that bfs_txq is the actual destination
412 		 * hardware queue at this point.  It may not have been
413 		 * assigned, it may actually be pointing to the multicast
414 		 * software TXQ id.  These must be fixed!
415 		 */
416 		ath_hal_filltxdesc(ah, (struct ath_desc *) ds
417 			, bufAddrList
418 			, segLenList
419 			, bf->bf_descid		/* XXX desc id */
420 			, bf->bf_state.bfs_tx_queue
421 			, isFirstDesc		/* first segment */
422 			, i == bf->bf_nseg - 1	/* last segment */
423 			, (struct ath_desc *) ds0	/* first descriptor */
424 		);
425 
426 		/*
427 		 * Make sure the 11n aggregate fields are cleared.
428 		 *
429 		 * XXX TODO: this doesn't need to be called for
430 		 * aggregate frames; as it'll be called on all
431 		 * sub-frames.  Since the descriptors are in
432 		 * non-cacheable memory, this leads to some
433 		 * rather slow writes on MIPS/ARM platforms.
434 		 */
435 		if (ath_tx_is_11n(sc))
436 			ath_hal_clr11n_aggr(sc->sc_ah, (struct ath_desc *) ds);
437 
438 		/*
439 		 * If 11n is enabled, set it up as if it's an aggregate
440 		 * frame.
441 		 */
442 		if (is_last_subframe) {
443 			ath_hal_set11n_aggr_last(sc->sc_ah,
444 			    (struct ath_desc *) ds);
445 		} else if (is_aggr) {
446 			/*
447 			 * This clears the aggrlen field; so
448 			 * the caller needs to call set_aggr_first()!
449 			 *
450 			 * XXX TODO: don't call this for the first
451 			 * descriptor in the first frame in an
452 			 * aggregate!
453 			 */
454 			ath_hal_set11n_aggr_middle(sc->sc_ah,
455 			    (struct ath_desc *) ds,
456 			    bf->bf_state.bfs_ndelim);
457 		}
458 		isFirstDesc = 0;
459 		bf->bf_lastds = (struct ath_desc *) ds;
460 
461 		/*
462 		 * Don't forget to skip to the next descriptor.
463 		 */
464 		ds += sc->sc_tx_desclen;
465 		dsp++;
466 
467 		/*
468 		 * .. and don't forget to blank these out!
469 		 */
470 		bzero(bufAddrList, sizeof(bufAddrList));
471 		bzero(segLenList, sizeof(segLenList));
472 	}
473 	bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
474 }
475 
476 /*
477  * Set the rate control fields in the given descriptor based on
478  * the bf_state fields and node state.
479  *
480  * The bfs fields should already be set with the relevant rate
481  * control information, including whether MRR is to be enabled.
482  *
483  * Since the FreeBSD HAL currently sets up the first TX rate
484  * in ath_hal_setuptxdesc(), this will setup the MRR
485  * conditionally for the pre-11n chips, and call ath_buf_set_rate
486  * unconditionally for 11n chips. These require the 11n rate
487  * scenario to be set if MCS rates are enabled, so it's easier
488  * to just always call it. The caller can then only set rates 2, 3
489  * and 4 if multi-rate retry is needed.
490  */
491 static void
492 ath_tx_set_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni,
493     struct ath_buf *bf)
494 {
495 	struct ath_rc_series *rc = bf->bf_state.bfs_rc;
496 
497 	/* If mrr is disabled, blank tries 1, 2, 3 */
498 	if (! bf->bf_state.bfs_ismrr)
499 		rc[1].tries = rc[2].tries = rc[3].tries = 0;
500 
501 #if 0
502 	/*
503 	 * If NOACK is set, just set ntries=1.
504 	 */
505 	else if (bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) {
506 		rc[1].tries = rc[2].tries = rc[3].tries = 0;
507 		rc[0].tries = 1;
508 	}
509 #endif
510 
511 	/*
512 	 * Always call - that way a retried descriptor will
513 	 * have the MRR fields overwritten.
514 	 *
515 	 * XXX TODO: see if this is really needed - setting up
516 	 * the first descriptor should set the MRR fields to 0
517 	 * for us anyway.
518 	 */
519 	if (ath_tx_is_11n(sc)) {
520 		ath_buf_set_rate(sc, ni, bf);
521 	} else {
522 		ath_hal_setupxtxdesc(sc->sc_ah, bf->bf_desc
523 			, rc[1].ratecode, rc[1].tries
524 			, rc[2].ratecode, rc[2].tries
525 			, rc[3].ratecode, rc[3].tries
526 		);
527 	}
528 }
529 
530 /*
531  * Setup segments+descriptors for an 11n aggregate.
532  * bf_first is the first buffer in the aggregate.
533  * The descriptor list must already been linked together using
534  * bf->bf_next.
535  */
536 static void
537 ath_tx_setds_11n(struct ath_softc *sc, struct ath_buf *bf_first)
538 {
539 	struct ath_buf *bf, *bf_prev = NULL;
540 	struct ath_desc *ds0 = bf_first->bf_desc;
541 
542 	DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: nframes=%d, al=%d\n",
543 	    __func__, bf_first->bf_state.bfs_nframes,
544 	    bf_first->bf_state.bfs_al);
545 
546 	bf = bf_first;
547 
548 	if (bf->bf_state.bfs_txrate0 == 0)
549 		DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: bf=%p, txrate0=%d\n",
550 		    __func__, bf, 0);
551 	if (bf->bf_state.bfs_rc[0].ratecode == 0)
552 		DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: bf=%p, rix0=%d\n",
553 		    __func__, bf, 0);
554 
555 	/*
556 	 * Setup all descriptors of all subframes - this will
557 	 * call ath_hal_set11naggrmiddle() on every frame.
558 	 */
559 	while (bf != NULL) {
560 		DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
561 		    "%s: bf=%p, nseg=%d, pktlen=%d, seqno=%d\n",
562 		    __func__, bf, bf->bf_nseg, bf->bf_state.bfs_pktlen,
563 		    SEQNO(bf->bf_state.bfs_seqno));
564 
565 		/*
566 		 * Setup the initial fields for the first descriptor - all
567 		 * the non-11n specific stuff.
568 		 */
569 		ath_hal_setuptxdesc(sc->sc_ah, bf->bf_desc
570 			, bf->bf_state.bfs_pktlen	/* packet length */
571 			, bf->bf_state.bfs_hdrlen	/* header length */
572 			, bf->bf_state.bfs_atype	/* Atheros packet type */
573 			, bf->bf_state.bfs_txpower	/* txpower */
574 			, bf->bf_state.bfs_txrate0
575 			, bf->bf_state.bfs_try0		/* series 0 rate/tries */
576 			, bf->bf_state.bfs_keyix	/* key cache index */
577 			, bf->bf_state.bfs_txantenna	/* antenna mode */
578 			, bf->bf_state.bfs_txflags | HAL_TXDESC_INTREQ	/* flags */
579 			, bf->bf_state.bfs_ctsrate	/* rts/cts rate */
580 			, bf->bf_state.bfs_ctsduration	/* rts/cts duration */
581 		);
582 
583 		/*
584 		 * First descriptor? Setup the rate control and initial
585 		 * aggregate header information.
586 		 */
587 		if (bf == bf_first) {
588 			/*
589 			 * setup first desc with rate and aggr info
590 			 */
591 			ath_tx_set_ratectrl(sc, bf->bf_node, bf);
592 		}
593 
594 		/*
595 		 * Setup the descriptors for a multi-descriptor frame.
596 		 * This is both aggregate and non-aggregate aware.
597 		 */
598 		ath_tx_chaindesclist(sc, ds0, bf,
599 		    1, /* is_aggr */
600 		    !! (bf == bf_first), /* is_first_subframe */
601 		    !! (bf->bf_next == NULL) /* is_last_subframe */
602 		    );
603 
604 		if (bf == bf_first) {
605 			/*
606 			 * Initialise the first 11n aggregate with the
607 			 * aggregate length and aggregate enable bits.
608 			 */
609 			ath_hal_set11n_aggr_first(sc->sc_ah,
610 			    ds0,
611 			    bf->bf_state.bfs_al,
612 			    bf->bf_state.bfs_ndelim);
613 		}
614 
615 		/*
616 		 * Link the last descriptor of the previous frame
617 		 * to the beginning descriptor of this frame.
618 		 */
619 		if (bf_prev != NULL)
620 			ath_hal_settxdesclink(sc->sc_ah, bf_prev->bf_lastds,
621 			    bf->bf_daddr);
622 
623 		/* Save a copy so we can link the next descriptor in */
624 		bf_prev = bf;
625 		bf = bf->bf_next;
626 	}
627 
628 	/*
629 	 * Set the first descriptor bf_lastds field to point to
630 	 * the last descriptor in the last subframe, that's where
631 	 * the status update will occur.
632 	 */
633 	bf_first->bf_lastds = bf_prev->bf_lastds;
634 
635 	/*
636 	 * And bf_last in the first descriptor points to the end of
637 	 * the aggregate list.
638 	 */
639 	bf_first->bf_last = bf_prev;
640 
641 	/*
642 	 * For non-AR9300 NICs, which require the rate control
643 	 * in the final descriptor - let's set that up now.
644 	 *
645 	 * This is because the filltxdesc() HAL call doesn't
646 	 * populate the last segment with rate control information
647 	 * if firstSeg is also true.  For non-aggregate frames
648 	 * that is fine, as the first frame already has rate control
649 	 * info.  But if the last frame in an aggregate has one
650 	 * descriptor, both firstseg and lastseg will be true and
651 	 * the rate info isn't copied.
652 	 *
653 	 * This is inefficient on MIPS/ARM platforms that have
654 	 * non-cachable memory for TX descriptors, but we'll just
655 	 * make do for now.
656 	 *
657 	 * As to why the rate table is stashed in the last descriptor
658 	 * rather than the first descriptor?  Because proctxdesc()
659 	 * is called on the final descriptor in an MPDU or A-MPDU -
660 	 * ie, the one that gets updated by the hardware upon
661 	 * completion.  That way proctxdesc() doesn't need to know
662 	 * about the first _and_ last TX descriptor.
663 	 */
664 	ath_hal_setuplasttxdesc(sc->sc_ah, bf_prev->bf_lastds, ds0);
665 
666 	DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: end\n", __func__);
667 }
668 
669 /*
670  * Hand-off a frame to the multicast TX queue.
671  *
672  * This is a software TXQ which will be appended to the CAB queue
673  * during the beacon setup code.
674  *
675  * XXX TODO: since the AR9300 EDMA TX queue support wants the QCU ID
676  * as part of the TX descriptor, bf_state.bfs_tx_queue must be updated
677  * with the actual hardware txq, or all of this will fall apart.
678  *
679  * XXX It may not be a bad idea to just stuff the QCU ID into bf_state
680  * and retire bfs_tx_queue; then make sure the CABQ QCU ID is populated
681  * correctly.
682  */
683 static void
684 ath_tx_handoff_mcast(struct ath_softc *sc, struct ath_txq *txq,
685     struct ath_buf *bf)
686 {
687 	ATH_TX_LOCK_ASSERT(sc);
688 
689 	KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0,
690 	     ("%s: busy status 0x%x", __func__, bf->bf_flags));
691 
692 	/*
693 	 * Ensure that the tx queue is the cabq, so things get
694 	 * mapped correctly.
695 	 */
696 	if (bf->bf_state.bfs_tx_queue != sc->sc_cabq->axq_qnum) {
697 		DPRINTF(sc, ATH_DEBUG_XMIT,
698 		    "%s: bf=%p, bfs_tx_queue=%d, axq_qnum=%d\n",
699 		    __func__, bf, bf->bf_state.bfs_tx_queue,
700 		    txq->axq_qnum);
701 	}
702 
703 	ATH_TXQ_LOCK(txq);
704 	if (ATH_TXQ_LAST(txq, axq_q_s) != NULL) {
705 		struct ath_buf *bf_last = ATH_TXQ_LAST(txq, axq_q_s);
706 		struct ieee80211_frame *wh;
707 
708 		/* mark previous frame */
709 		wh = mtod(bf_last->bf_m, struct ieee80211_frame *);
710 		wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA;
711 		bus_dmamap_sync(sc->sc_dmat, bf_last->bf_dmamap,
712 		    BUS_DMASYNC_PREWRITE);
713 
714 		/* link descriptor */
715 		ath_hal_settxdesclink(sc->sc_ah,
716 		    bf_last->bf_lastds,
717 		    bf->bf_daddr);
718 	}
719 	ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
720 	ATH_TXQ_UNLOCK(txq);
721 }
722 
723 /*
724  * Hand-off packet to a hardware queue.
725  */
726 static void
727 ath_tx_handoff_hw(struct ath_softc *sc, struct ath_txq *txq,
728     struct ath_buf *bf)
729 {
730 	struct ath_hal *ah = sc->sc_ah;
731 	struct ath_buf *bf_first;
732 
733 	/*
734 	 * Insert the frame on the outbound list and pass it on
735 	 * to the hardware.  Multicast frames buffered for power
736 	 * save stations and transmit from the CAB queue are stored
737 	 * on a s/w only queue and loaded on to the CAB queue in
738 	 * the SWBA handler since frames only go out on DTIM and
739 	 * to avoid possible races.
740 	 */
741 	ATH_TX_LOCK_ASSERT(sc);
742 	KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0,
743 	     ("%s: busy status 0x%x", __func__, bf->bf_flags));
744 	KASSERT(txq->axq_qnum != ATH_TXQ_SWQ,
745 	     ("ath_tx_handoff_hw called for mcast queue"));
746 
747 	/*
748 	 * XXX We should instead just verify that sc_txstart_cnt
749 	 * or ath_txproc_cnt > 0.  That would mean that
750 	 * the reset is going to be waiting for us to complete.
751 	 */
752 	if (sc->sc_txproc_cnt == 0 && sc->sc_txstart_cnt == 0) {
753 		device_printf(sc->sc_dev,
754 		    "%s: TX dispatch without holding txcount/txstart refcnt!\n",
755 		    __func__);
756 	}
757 
758 	/*
759 	 * XXX .. this is going to cause the hardware to get upset;
760 	 * so we really should find some way to drop or queue
761 	 * things.
762 	 */
763 
764 	ATH_TXQ_LOCK(txq);
765 
766 	/*
767 	 * XXX TODO: if there's a holdingbf, then
768 	 * ATH_TXQ_PUTRUNNING should be clear.
769 	 *
770 	 * If there is a holdingbf and the list is empty,
771 	 * then axq_link should be pointing to the holdingbf.
772 	 *
773 	 * Otherwise it should point to the last descriptor
774 	 * in the last ath_buf.
775 	 *
776 	 * In any case, we should really ensure that we
777 	 * update the previous descriptor link pointer to
778 	 * this descriptor, regardless of all of the above state.
779 	 *
780 	 * For now this is captured by having axq_link point
781 	 * to either the holdingbf (if the TXQ list is empty)
782 	 * or the end of the list (if the TXQ list isn't empty.)
783 	 * I'd rather just kill axq_link here and do it as above.
784 	 */
785 
786 	/*
787 	 * Append the frame to the TX queue.
788 	 */
789 	ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
790 	ATH_KTR(sc, ATH_KTR_TX, 3,
791 	    "ath_tx_handoff: non-tdma: txq=%u, add bf=%p "
792 	    "depth=%d",
793 	    txq->axq_qnum,
794 	    bf,
795 	    txq->axq_depth);
796 
797 	/*
798 	 * If there's a link pointer, update it.
799 	 *
800 	 * XXX we should replace this with the above logic, just
801 	 * to kill axq_link with fire.
802 	 */
803 	if (txq->axq_link != NULL) {
804 		*txq->axq_link = bf->bf_daddr;
805 		DPRINTF(sc, ATH_DEBUG_XMIT,
806 		    "%s: link[%u](%p)=%p (%p) depth %d\n", __func__,
807 		    txq->axq_qnum, txq->axq_link,
808 		    (caddr_t)bf->bf_daddr, bf->bf_desc,
809 		    txq->axq_depth);
810 		ATH_KTR(sc, ATH_KTR_TX, 5,
811 		    "ath_tx_handoff: non-tdma: link[%u](%p)=%p (%p) "
812 		    "lastds=%d",
813 		    txq->axq_qnum, txq->axq_link,
814 		    (caddr_t)bf->bf_daddr, bf->bf_desc,
815 		    bf->bf_lastds);
816 	}
817 
818 	/*
819 	 * If we've not pushed anything into the hardware yet,
820 	 * push the head of the queue into the TxDP.
821 	 *
822 	 * Once we've started DMA, there's no guarantee that
823 	 * updating the TxDP with a new value will actually work.
824 	 * So we just don't do that - if we hit the end of the list,
825 	 * we keep that buffer around (the "holding buffer") and
826 	 * re-start DMA by updating the link pointer of _that_
827 	 * descriptor and then restart DMA.
828 	 */
829 	if (! (txq->axq_flags & ATH_TXQ_PUTRUNNING)) {
830 		bf_first = TAILQ_FIRST(&txq->axq_q);
831 		txq->axq_flags |= ATH_TXQ_PUTRUNNING;
832 		ath_hal_puttxbuf(ah, txq->axq_qnum, bf_first->bf_daddr);
833 		DPRINTF(sc, ATH_DEBUG_XMIT,
834 		    "%s: TXDP[%u] = %p (%p) depth %d\n",
835 		    __func__, txq->axq_qnum,
836 		    (caddr_t)bf_first->bf_daddr, bf_first->bf_desc,
837 		    txq->axq_depth);
838 		ATH_KTR(sc, ATH_KTR_TX, 5,
839 		    "ath_tx_handoff: TXDP[%u] = %p (%p) "
840 		    "lastds=%p depth %d",
841 		    txq->axq_qnum,
842 		    (caddr_t)bf_first->bf_daddr, bf_first->bf_desc,
843 		    bf_first->bf_lastds,
844 		    txq->axq_depth);
845 	}
846 
847 	/*
848 	 * Ensure that the bf TXQ matches this TXQ, so later
849 	 * checking and holding buffer manipulation is sane.
850 	 */
851 	if (bf->bf_state.bfs_tx_queue != txq->axq_qnum) {
852 		DPRINTF(sc, ATH_DEBUG_XMIT,
853 		    "%s: bf=%p, bfs_tx_queue=%d, axq_qnum=%d\n",
854 		    __func__, bf, bf->bf_state.bfs_tx_queue,
855 		    txq->axq_qnum);
856 	}
857 
858 	/*
859 	 * Track aggregate queue depth.
860 	 */
861 	if (bf->bf_state.bfs_aggr)
862 		txq->axq_aggr_depth++;
863 
864 	/*
865 	 * Update the link pointer.
866 	 */
867 	ath_hal_gettxdesclinkptr(ah, bf->bf_lastds, &txq->axq_link);
868 
869 	/*
870 	 * Start DMA.
871 	 *
872 	 * If we wrote a TxDP above, DMA will start from here.
873 	 *
874 	 * If DMA is running, it'll do nothing.
875 	 *
876 	 * If the DMA engine hit the end of the QCU list (ie LINK=NULL,
877 	 * or VEOL) then it stops at the last transmitted write.
878 	 * We then append a new frame by updating the link pointer
879 	 * in that descriptor and then kick TxE here; it will re-read
880 	 * that last descriptor and find the new descriptor to transmit.
881 	 *
882 	 * This is why we keep the holding descriptor around.
883 	 */
884 	ath_hal_txstart(ah, txq->axq_qnum);
885 	ATH_TXQ_UNLOCK(txq);
886 	ATH_KTR(sc, ATH_KTR_TX, 1,
887 	    "ath_tx_handoff: txq=%u, txstart", txq->axq_qnum);
888 }
889 
890 /*
891  * Restart TX DMA for the given TXQ.
892  *
893  * This must be called whether the queue is empty or not.
894  */
895 static void
896 ath_legacy_tx_dma_restart(struct ath_softc *sc, struct ath_txq *txq)
897 {
898 	struct ath_buf *bf, *bf_last;
899 
900 	ATH_TXQ_LOCK_ASSERT(txq);
901 
902 	/* XXX make this ATH_TXQ_FIRST */
903 	bf = TAILQ_FIRST(&txq->axq_q);
904 	bf_last = ATH_TXQ_LAST(txq, axq_q_s);
905 
906 	if (bf == NULL)
907 		return;
908 
909 	DPRINTF(sc, ATH_DEBUG_RESET,
910 	    "%s: Q%d: bf=%p, bf_last=%p, daddr=0x%08x\n",
911 	    __func__,
912 	    txq->axq_qnum,
913 	    bf,
914 	    bf_last,
915 	    (uint32_t) bf->bf_daddr);
916 
917 #ifdef	ATH_DEBUG
918 	if (sc->sc_debug & ATH_DEBUG_RESET)
919 		ath_tx_dump(sc, txq);
920 #endif
921 
922 	/*
923 	 * This is called from a restart, so DMA is known to be
924 	 * completely stopped.
925 	 */
926 	KASSERT((!(txq->axq_flags & ATH_TXQ_PUTRUNNING)),
927 	    ("%s: Q%d: called with PUTRUNNING=1\n",
928 	    __func__,
929 	    txq->axq_qnum));
930 
931 	ath_hal_puttxbuf(sc->sc_ah, txq->axq_qnum, bf->bf_daddr);
932 	txq->axq_flags |= ATH_TXQ_PUTRUNNING;
933 
934 	ath_hal_gettxdesclinkptr(sc->sc_ah, bf_last->bf_lastds,
935 	    &txq->axq_link);
936 	ath_hal_txstart(sc->sc_ah, txq->axq_qnum);
937 }
938 
939 /*
940  * Hand off a packet to the hardware (or mcast queue.)
941  *
942  * The relevant hardware txq should be locked.
943  */
944 static void
945 ath_legacy_xmit_handoff(struct ath_softc *sc, struct ath_txq *txq,
946     struct ath_buf *bf)
947 {
948 	ATH_TX_LOCK_ASSERT(sc);
949 
950 #ifdef	ATH_DEBUG_ALQ
951 	if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC))
952 		ath_tx_alq_post(sc, bf);
953 #endif
954 
955 	if (txq->axq_qnum == ATH_TXQ_SWQ)
956 		ath_tx_handoff_mcast(sc, txq, bf);
957 	else
958 		ath_tx_handoff_hw(sc, txq, bf);
959 }
960 
961 static int
962 ath_tx_tag_crypto(struct ath_softc *sc, struct ieee80211_node *ni,
963     struct mbuf *m0, int iswep, int isfrag, int *hdrlen, int *pktlen,
964     int *keyix)
965 {
966 	DPRINTF(sc, ATH_DEBUG_XMIT,
967 	    "%s: hdrlen=%d, pktlen=%d, isfrag=%d, iswep=%d, m0=%p\n",
968 	    __func__,
969 	    *hdrlen,
970 	    *pktlen,
971 	    isfrag,
972 	    iswep,
973 	    m0);
974 
975 	if (iswep) {
976 		const struct ieee80211_cipher *cip;
977 		struct ieee80211_key *k;
978 
979 		/*
980 		 * Construct the 802.11 header+trailer for an encrypted
981 		 * frame. The only reason this can fail is because of an
982 		 * unknown or unsupported cipher/key type.
983 		 */
984 		k = ieee80211_crypto_encap(ni, m0);
985 		if (k == NULL) {
986 			/*
987 			 * This can happen when the key is yanked after the
988 			 * frame was queued.  Just discard the frame; the
989 			 * 802.11 layer counts failures and provides
990 			 * debugging/diagnostics.
991 			 */
992 			return (0);
993 		}
994 		/*
995 		 * Adjust the packet + header lengths for the crypto
996 		 * additions and calculate the h/w key index.  When
997 		 * a s/w mic is done the frame will have had any mic
998 		 * added to it prior to entry so m0->m_pkthdr.len will
999 		 * account for it. Otherwise we need to add it to the
1000 		 * packet length.
1001 		 */
1002 		cip = k->wk_cipher;
1003 		(*hdrlen) += cip->ic_header;
1004 		(*pktlen) += cip->ic_header + cip->ic_trailer;
1005 		/* NB: frags always have any TKIP MIC done in s/w */
1006 		if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && !isfrag)
1007 			(*pktlen) += cip->ic_miclen;
1008 		(*keyix) = k->wk_keyix;
1009 	} else if (ni->ni_ucastkey.wk_cipher == &ieee80211_cipher_none) {
1010 		/*
1011 		 * Use station key cache slot, if assigned.
1012 		 */
1013 		(*keyix) = ni->ni_ucastkey.wk_keyix;
1014 		if ((*keyix) == IEEE80211_KEYIX_NONE)
1015 			(*keyix) = HAL_TXKEYIX_INVALID;
1016 	} else
1017 		(*keyix) = HAL_TXKEYIX_INVALID;
1018 
1019 	return (1);
1020 }
1021 
1022 /*
1023  * Calculate whether interoperability protection is required for
1024  * this frame.
1025  *
1026  * This requires the rate control information be filled in,
1027  * as the protection requirement depends upon the current
1028  * operating mode / PHY.
1029  */
1030 static void
1031 ath_tx_calc_protection(struct ath_softc *sc, struct ath_buf *bf)
1032 {
1033 	struct ieee80211_frame *wh;
1034 	uint8_t rix;
1035 	uint16_t flags;
1036 	int shortPreamble;
1037 	const HAL_RATE_TABLE *rt = sc->sc_currates;
1038 	struct ieee80211com *ic = &sc->sc_ic;
1039 
1040 	flags = bf->bf_state.bfs_txflags;
1041 	rix = bf->bf_state.bfs_rc[0].rix;
1042 	shortPreamble = bf->bf_state.bfs_shpream;
1043 	wh = mtod(bf->bf_m, struct ieee80211_frame *);
1044 
1045 	/*
1046 	 * If 802.11g protection is enabled, determine whether
1047 	 * to use RTS/CTS or just CTS.  Note that this is only
1048 	 * done for OFDM unicast frames.
1049 	 */
1050 	if ((ic->ic_flags & IEEE80211_F_USEPROT) &&
1051 	    rt->info[rix].phy == IEEE80211_T_OFDM &&
1052 	    (flags & HAL_TXDESC_NOACK) == 0) {
1053 		bf->bf_state.bfs_doprot = 1;
1054 		/* XXX fragments must use CCK rates w/ protection */
1055 		if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) {
1056 			flags |= HAL_TXDESC_RTSENA;
1057 		} else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) {
1058 			flags |= HAL_TXDESC_CTSENA;
1059 		}
1060 		/*
1061 		 * For frags it would be desirable to use the
1062 		 * highest CCK rate for RTS/CTS.  But stations
1063 		 * farther away may detect it at a lower CCK rate
1064 		 * so use the configured protection rate instead
1065 		 * (for now).
1066 		 */
1067 		sc->sc_stats.ast_tx_protect++;
1068 	}
1069 
1070 	/*
1071 	 * If 11n protection is enabled and it's a HT frame,
1072 	 * enable RTS.
1073 	 *
1074 	 * XXX ic_htprotmode or ic_curhtprotmode?
1075 	 * XXX should it_htprotmode only matter if ic_curhtprotmode
1076 	 * XXX indicates it's not a HT pure environment?
1077 	 */
1078 	if ((ic->ic_htprotmode == IEEE80211_PROT_RTSCTS) &&
1079 	    rt->info[rix].phy == IEEE80211_T_HT &&
1080 	    (flags & HAL_TXDESC_NOACK) == 0) {
1081 		flags |= HAL_TXDESC_RTSENA;
1082 		sc->sc_stats.ast_tx_htprotect++;
1083 	}
1084 	bf->bf_state.bfs_txflags = flags;
1085 }
1086 
1087 /*
1088  * Update the frame duration given the currently selected rate.
1089  *
1090  * This also updates the frame duration value, so it will require
1091  * a DMA flush.
1092  */
1093 static void
1094 ath_tx_calc_duration(struct ath_softc *sc, struct ath_buf *bf)
1095 {
1096 	struct ieee80211_frame *wh;
1097 	uint8_t rix;
1098 	uint16_t flags;
1099 	int shortPreamble;
1100 	struct ath_hal *ah = sc->sc_ah;
1101 	const HAL_RATE_TABLE *rt = sc->sc_currates;
1102 	int isfrag = bf->bf_m->m_flags & M_FRAG;
1103 
1104 	flags = bf->bf_state.bfs_txflags;
1105 	rix = bf->bf_state.bfs_rc[0].rix;
1106 	shortPreamble = bf->bf_state.bfs_shpream;
1107 	wh = mtod(bf->bf_m, struct ieee80211_frame *);
1108 
1109 	/*
1110 	 * Calculate duration.  This logically belongs in the 802.11
1111 	 * layer but it lacks sufficient information to calculate it.
1112 	 */
1113 	if ((flags & HAL_TXDESC_NOACK) == 0 &&
1114 	    (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) {
1115 		u_int16_t dur;
1116 		if (shortPreamble)
1117 			dur = rt->info[rix].spAckDuration;
1118 		else
1119 			dur = rt->info[rix].lpAckDuration;
1120 		if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) {
1121 			dur += dur;		/* additional SIFS+ACK */
1122 			/*
1123 			 * Include the size of next fragment so NAV is
1124 			 * updated properly.  The last fragment uses only
1125 			 * the ACK duration
1126 			 *
1127 			 * XXX TODO: ensure that the rate lookup for each
1128 			 * fragment is the same as the rate used by the
1129 			 * first fragment!
1130 			 */
1131 			dur += ath_hal_computetxtime(ah,
1132 			    rt,
1133 			    bf->bf_nextfraglen,
1134 			    rix, shortPreamble);
1135 		}
1136 		if (isfrag) {
1137 			/*
1138 			 * Force hardware to use computed duration for next
1139 			 * fragment by disabling multi-rate retry which updates
1140 			 * duration based on the multi-rate duration table.
1141 			 */
1142 			bf->bf_state.bfs_ismrr = 0;
1143 			bf->bf_state.bfs_try0 = ATH_TXMGTTRY;
1144 			/* XXX update bfs_rc[0].try? */
1145 		}
1146 
1147 		/* Update the duration field itself */
1148 		*(u_int16_t *)wh->i_dur = htole16(dur);
1149 	}
1150 }
1151 
1152 static uint8_t
1153 ath_tx_get_rtscts_rate(struct ath_hal *ah, const HAL_RATE_TABLE *rt,
1154     int cix, int shortPreamble)
1155 {
1156 	uint8_t ctsrate;
1157 
1158 	/*
1159 	 * CTS transmit rate is derived from the transmit rate
1160 	 * by looking in the h/w rate table.  We must also factor
1161 	 * in whether or not a short preamble is to be used.
1162 	 */
1163 	/* NB: cix is set above where RTS/CTS is enabled */
1164 	KASSERT(cix != 0xff, ("cix not setup"));
1165 	ctsrate = rt->info[cix].rateCode;
1166 
1167 	/* XXX this should only matter for legacy rates */
1168 	if (shortPreamble)
1169 		ctsrate |= rt->info[cix].shortPreamble;
1170 
1171 	return (ctsrate);
1172 }
1173 
1174 /*
1175  * Calculate the RTS/CTS duration for legacy frames.
1176  */
1177 static int
1178 ath_tx_calc_ctsduration(struct ath_hal *ah, int rix, int cix,
1179     int shortPreamble, int pktlen, const HAL_RATE_TABLE *rt,
1180     int flags)
1181 {
1182 	int ctsduration = 0;
1183 
1184 	/* This mustn't be called for HT modes */
1185 	if (rt->info[cix].phy == IEEE80211_T_HT) {
1186 		printf("%s: HT rate where it shouldn't be (0x%x)\n",
1187 		    __func__, rt->info[cix].rateCode);
1188 		return (-1);
1189 	}
1190 
1191 	/*
1192 	 * Compute the transmit duration based on the frame
1193 	 * size and the size of an ACK frame.  We call into the
1194 	 * HAL to do the computation since it depends on the
1195 	 * characteristics of the actual PHY being used.
1196 	 *
1197 	 * NB: CTS is assumed the same size as an ACK so we can
1198 	 *     use the precalculated ACK durations.
1199 	 */
1200 	if (shortPreamble) {
1201 		if (flags & HAL_TXDESC_RTSENA)		/* SIFS + CTS */
1202 			ctsduration += rt->info[cix].spAckDuration;
1203 		ctsduration += ath_hal_computetxtime(ah,
1204 			rt, pktlen, rix, AH_TRUE);
1205 		if ((flags & HAL_TXDESC_NOACK) == 0)	/* SIFS + ACK */
1206 			ctsduration += rt->info[rix].spAckDuration;
1207 	} else {
1208 		if (flags & HAL_TXDESC_RTSENA)		/* SIFS + CTS */
1209 			ctsduration += rt->info[cix].lpAckDuration;
1210 		ctsduration += ath_hal_computetxtime(ah,
1211 			rt, pktlen, rix, AH_FALSE);
1212 		if ((flags & HAL_TXDESC_NOACK) == 0)	/* SIFS + ACK */
1213 			ctsduration += rt->info[rix].lpAckDuration;
1214 	}
1215 
1216 	return (ctsduration);
1217 }
1218 
1219 /*
1220  * Update the given ath_buf with updated rts/cts setup and duration
1221  * values.
1222  *
1223  * To support rate lookups for each software retry, the rts/cts rate
1224  * and cts duration must be re-calculated.
1225  *
1226  * This function assumes the RTS/CTS flags have been set as needed;
1227  * mrr has been disabled; and the rate control lookup has been done.
1228  *
1229  * XXX TODO: MRR need only be disabled for the pre-11n NICs.
1230  * XXX The 11n NICs support per-rate RTS/CTS configuration.
1231  */
1232 static void
1233 ath_tx_set_rtscts(struct ath_softc *sc, struct ath_buf *bf)
1234 {
1235 	uint16_t ctsduration = 0;
1236 	uint8_t ctsrate = 0;
1237 	uint8_t rix = bf->bf_state.bfs_rc[0].rix;
1238 	uint8_t cix = 0;
1239 	const HAL_RATE_TABLE *rt = sc->sc_currates;
1240 
1241 	/*
1242 	 * No RTS/CTS enabled? Don't bother.
1243 	 */
1244 	if ((bf->bf_state.bfs_txflags &
1245 	    (HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA)) == 0) {
1246 		/* XXX is this really needed? */
1247 		bf->bf_state.bfs_ctsrate = 0;
1248 		bf->bf_state.bfs_ctsduration = 0;
1249 		return;
1250 	}
1251 
1252 	/*
1253 	 * If protection is enabled, use the protection rix control
1254 	 * rate. Otherwise use the rate0 control rate.
1255 	 */
1256 	if (bf->bf_state.bfs_doprot)
1257 		rix = sc->sc_protrix;
1258 	else
1259 		rix = bf->bf_state.bfs_rc[0].rix;
1260 
1261 	/*
1262 	 * If the raw path has hard-coded ctsrate0 to something,
1263 	 * use it.
1264 	 */
1265 	if (bf->bf_state.bfs_ctsrate0 != 0)
1266 		cix = ath_tx_findrix(sc, bf->bf_state.bfs_ctsrate0);
1267 	else
1268 		/* Control rate from above */
1269 		cix = rt->info[rix].controlRate;
1270 
1271 	/* Calculate the rtscts rate for the given cix */
1272 	ctsrate = ath_tx_get_rtscts_rate(sc->sc_ah, rt, cix,
1273 	    bf->bf_state.bfs_shpream);
1274 
1275 	/* The 11n chipsets do ctsduration calculations for you */
1276 	if (! ath_tx_is_11n(sc))
1277 		ctsduration = ath_tx_calc_ctsduration(sc->sc_ah, rix, cix,
1278 		    bf->bf_state.bfs_shpream, bf->bf_state.bfs_pktlen,
1279 		    rt, bf->bf_state.bfs_txflags);
1280 
1281 	/* Squirrel away in ath_buf */
1282 	bf->bf_state.bfs_ctsrate = ctsrate;
1283 	bf->bf_state.bfs_ctsduration = ctsduration;
1284 
1285 	/*
1286 	 * Must disable multi-rate retry when using RTS/CTS.
1287 	 */
1288 	if (!sc->sc_mrrprot) {
1289 		bf->bf_state.bfs_ismrr = 0;
1290 		bf->bf_state.bfs_try0 =
1291 		    bf->bf_state.bfs_rc[0].tries = ATH_TXMGTTRY; /* XXX ew */
1292 	}
1293 }
1294 
1295 /*
1296  * Setup the descriptor chain for a normal or fast-frame
1297  * frame.
1298  *
1299  * XXX TODO: extend to include the destination hardware QCU ID.
1300  * Make sure that is correct.  Make sure that when being added
1301  * to the mcastq, the CABQ QCUID is set or things will get a bit
1302  * odd.
1303  */
1304 static void
1305 ath_tx_setds(struct ath_softc *sc, struct ath_buf *bf)
1306 {
1307 	struct ath_desc *ds = bf->bf_desc;
1308 	struct ath_hal *ah = sc->sc_ah;
1309 
1310 	if (bf->bf_state.bfs_txrate0 == 0)
1311 		DPRINTF(sc, ATH_DEBUG_XMIT,
1312 		    "%s: bf=%p, txrate0=%d\n", __func__, bf, 0);
1313 
1314 	ath_hal_setuptxdesc(ah, ds
1315 		, bf->bf_state.bfs_pktlen	/* packet length */
1316 		, bf->bf_state.bfs_hdrlen	/* header length */
1317 		, bf->bf_state.bfs_atype	/* Atheros packet type */
1318 		, bf->bf_state.bfs_txpower	/* txpower */
1319 		, bf->bf_state.bfs_txrate0
1320 		, bf->bf_state.bfs_try0		/* series 0 rate/tries */
1321 		, bf->bf_state.bfs_keyix	/* key cache index */
1322 		, bf->bf_state.bfs_txantenna	/* antenna mode */
1323 		, bf->bf_state.bfs_txflags	/* flags */
1324 		, bf->bf_state.bfs_ctsrate	/* rts/cts rate */
1325 		, bf->bf_state.bfs_ctsduration	/* rts/cts duration */
1326 	);
1327 
1328 	/*
1329 	 * This will be overriden when the descriptor chain is written.
1330 	 */
1331 	bf->bf_lastds = ds;
1332 	bf->bf_last = bf;
1333 
1334 	/* Set rate control and descriptor chain for this frame */
1335 	ath_tx_set_ratectrl(sc, bf->bf_node, bf);
1336 	ath_tx_chaindesclist(sc, ds, bf, 0, 0, 0);
1337 }
1338 
1339 /*
1340  * Do a rate lookup.
1341  *
1342  * This performs a rate lookup for the given ath_buf only if it's required.
1343  * Non-data frames and raw frames don't require it.
1344  *
1345  * This populates the primary and MRR entries; MRR values are
1346  * then disabled later on if something requires it (eg RTS/CTS on
1347  * pre-11n chipsets.
1348  *
1349  * This needs to be done before the RTS/CTS fields are calculated
1350  * as they may depend upon the rate chosen.
1351  */
1352 static void
1353 ath_tx_do_ratelookup(struct ath_softc *sc, struct ath_buf *bf)
1354 {
1355 	uint8_t rate, rix;
1356 	int try0;
1357 
1358 	if (! bf->bf_state.bfs_doratelookup)
1359 		return;
1360 
1361 	/* Get rid of any previous state */
1362 	bzero(bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc));
1363 
1364 	ATH_NODE_LOCK(ATH_NODE(bf->bf_node));
1365 	ath_rate_findrate(sc, ATH_NODE(bf->bf_node), bf->bf_state.bfs_shpream,
1366 	    bf->bf_state.bfs_pktlen, &rix, &try0, &rate);
1367 
1368 	/* In case MRR is disabled, make sure rc[0] is setup correctly */
1369 	bf->bf_state.bfs_rc[0].rix = rix;
1370 	bf->bf_state.bfs_rc[0].ratecode = rate;
1371 	bf->bf_state.bfs_rc[0].tries = try0;
1372 
1373 	if (bf->bf_state.bfs_ismrr && try0 != ATH_TXMAXTRY)
1374 		ath_rate_getxtxrates(sc, ATH_NODE(bf->bf_node), rix,
1375 		    bf->bf_state.bfs_rc);
1376 	ATH_NODE_UNLOCK(ATH_NODE(bf->bf_node));
1377 
1378 	sc->sc_txrix = rix;	/* for LED blinking */
1379 	sc->sc_lastdatarix = rix;	/* for fast frames */
1380 	bf->bf_state.bfs_try0 = try0;
1381 	bf->bf_state.bfs_txrate0 = rate;
1382 }
1383 
1384 /*
1385  * Update the CLRDMASK bit in the ath_buf if it needs to be set.
1386  */
1387 static void
1388 ath_tx_update_clrdmask(struct ath_softc *sc, struct ath_tid *tid,
1389     struct ath_buf *bf)
1390 {
1391 	struct ath_node *an = ATH_NODE(bf->bf_node);
1392 
1393 	ATH_TX_LOCK_ASSERT(sc);
1394 
1395 	if (an->clrdmask == 1) {
1396 		bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
1397 		an->clrdmask = 0;
1398 	}
1399 }
1400 
1401 /*
1402  * Return whether this frame should be software queued or
1403  * direct dispatched.
1404  *
1405  * When doing powersave, BAR frames should be queued but other management
1406  * frames should be directly sent.
1407  *
1408  * When not doing powersave, stick BAR frames into the hardware queue
1409  * so it goes out even though the queue is paused.
1410  *
1411  * For now, management frames are also software queued by default.
1412  */
1413 static int
1414 ath_tx_should_swq_frame(struct ath_softc *sc, struct ath_node *an,
1415     struct mbuf *m0, int *queue_to_head)
1416 {
1417 	struct ieee80211_node *ni = &an->an_node;
1418 	struct ieee80211_frame *wh;
1419 	uint8_t type, subtype;
1420 
1421 	wh = mtod(m0, struct ieee80211_frame *);
1422 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
1423 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
1424 
1425 	(*queue_to_head) = 0;
1426 
1427 	/* If it's not in powersave - direct-dispatch BAR */
1428 	if ((ATH_NODE(ni)->an_is_powersave == 0)
1429 	    && type == IEEE80211_FC0_TYPE_CTL &&
1430 	    subtype == IEEE80211_FC0_SUBTYPE_BAR) {
1431 		DPRINTF(sc, ATH_DEBUG_SW_TX,
1432 		    "%s: BAR: TX'ing direct\n", __func__);
1433 		return (0);
1434 	} else if ((ATH_NODE(ni)->an_is_powersave == 1)
1435 	    && type == IEEE80211_FC0_TYPE_CTL &&
1436 	    subtype == IEEE80211_FC0_SUBTYPE_BAR) {
1437 		/* BAR TX whilst asleep; queue */
1438 		DPRINTF(sc, ATH_DEBUG_SW_TX,
1439 		    "%s: swq: TX'ing\n", __func__);
1440 		(*queue_to_head) = 1;
1441 		return (1);
1442 	} else if ((ATH_NODE(ni)->an_is_powersave == 1)
1443 	    && (type == IEEE80211_FC0_TYPE_MGT ||
1444 	        type == IEEE80211_FC0_TYPE_CTL)) {
1445 		/*
1446 		 * Other control/mgmt frame; bypass software queuing
1447 		 * for now!
1448 		 */
1449 		DPRINTF(sc, ATH_DEBUG_XMIT,
1450 		    "%s: %6D: Node is asleep; sending mgmt "
1451 		    "(type=%d, subtype=%d)\n",
1452 		    __func__, ni->ni_macaddr, ":", type, subtype);
1453 		return (0);
1454 	} else {
1455 		return (1);
1456 	}
1457 }
1458 
1459 
1460 /*
1461  * Transmit the given frame to the hardware.
1462  *
1463  * The frame must already be setup; rate control must already have
1464  * been done.
1465  *
1466  * XXX since the TXQ lock is being held here (and I dislike holding
1467  * it for this long when not doing software aggregation), later on
1468  * break this function into "setup_normal" and "xmit_normal". The
1469  * lock only needs to be held for the ath_tx_handoff call.
1470  *
1471  * XXX we don't update the leak count here - if we're doing
1472  * direct frame dispatch, we need to be able to do it without
1473  * decrementing the leak count (eg multicast queue frames.)
1474  */
1475 static void
1476 ath_tx_xmit_normal(struct ath_softc *sc, struct ath_txq *txq,
1477     struct ath_buf *bf)
1478 {
1479 	struct ath_node *an = ATH_NODE(bf->bf_node);
1480 	struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid];
1481 
1482 	ATH_TX_LOCK_ASSERT(sc);
1483 
1484 	/*
1485 	 * For now, just enable CLRDMASK. ath_tx_xmit_normal() does
1486 	 * set a completion handler however it doesn't (yet) properly
1487 	 * handle the strict ordering requirements needed for normal,
1488 	 * non-aggregate session frames.
1489 	 *
1490 	 * Once this is implemented, only set CLRDMASK like this for
1491 	 * frames that must go out - eg management/raw frames.
1492 	 */
1493 	bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
1494 
1495 	/* Setup the descriptor before handoff */
1496 	ath_tx_do_ratelookup(sc, bf);
1497 	ath_tx_calc_duration(sc, bf);
1498 	ath_tx_calc_protection(sc, bf);
1499 	ath_tx_set_rtscts(sc, bf);
1500 	ath_tx_rate_fill_rcflags(sc, bf);
1501 	ath_tx_setds(sc, bf);
1502 
1503 	/* Track per-TID hardware queue depth correctly */
1504 	tid->hwq_depth++;
1505 
1506 	/* Assign the completion handler */
1507 	bf->bf_comp = ath_tx_normal_comp;
1508 
1509 	/* Hand off to hardware */
1510 	ath_tx_handoff(sc, txq, bf);
1511 }
1512 
1513 /*
1514  * Do the basic frame setup stuff that's required before the frame
1515  * is added to a software queue.
1516  *
1517  * All frames get mostly the same treatment and it's done once.
1518  * Retransmits fiddle with things like the rate control setup,
1519  * setting the retransmit bit in the packet; doing relevant DMA/bus
1520  * syncing and relinking it (back) into the hardware TX queue.
1521  *
1522  * Note that this may cause the mbuf to be reallocated, so
1523  * m0 may not be valid.
1524  */
1525 static int
1526 ath_tx_normal_setup(struct ath_softc *sc, struct ieee80211_node *ni,
1527     struct ath_buf *bf, struct mbuf *m0, struct ath_txq *txq)
1528 {
1529 	struct ieee80211vap *vap = ni->ni_vap;
1530 	struct ath_hal *ah = sc->sc_ah;
1531 	struct ieee80211com *ic = &sc->sc_ic;
1532 	const struct chanAccParams *cap = &ic->ic_wme.wme_chanParams;
1533 	int error, iswep, ismcast, isfrag, ismrr;
1534 	int keyix, hdrlen, pktlen, try0 = 0;
1535 	u_int8_t rix = 0, txrate = 0;
1536 	struct ath_desc *ds;
1537 	struct ieee80211_frame *wh;
1538 	u_int subtype, flags;
1539 	HAL_PKT_TYPE atype;
1540 	const HAL_RATE_TABLE *rt;
1541 	HAL_BOOL shortPreamble;
1542 	struct ath_node *an;
1543 	u_int pri;
1544 
1545 	/*
1546 	 * To ensure that both sequence numbers and the CCMP PN handling
1547 	 * is "correct", make sure that the relevant TID queue is locked.
1548 	 * Otherwise the CCMP PN and seqno may appear out of order, causing
1549 	 * re-ordered frames to have out of order CCMP PN's, resulting
1550 	 * in many, many frame drops.
1551 	 */
1552 	ATH_TX_LOCK_ASSERT(sc);
1553 
1554 	wh = mtod(m0, struct ieee80211_frame *);
1555 	iswep = wh->i_fc[1] & IEEE80211_FC1_PROTECTED;
1556 	ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
1557 	isfrag = m0->m_flags & M_FRAG;
1558 	hdrlen = ieee80211_anyhdrsize(wh);
1559 	/*
1560 	 * Packet length must not include any
1561 	 * pad bytes; deduct them here.
1562 	 */
1563 	pktlen = m0->m_pkthdr.len - (hdrlen & 3);
1564 
1565 	/* Handle encryption twiddling if needed */
1566 	if (! ath_tx_tag_crypto(sc, ni, m0, iswep, isfrag, &hdrlen,
1567 	    &pktlen, &keyix)) {
1568 		ieee80211_free_mbuf(m0);
1569 		return EIO;
1570 	}
1571 
1572 	/* packet header may have moved, reset our local pointer */
1573 	wh = mtod(m0, struct ieee80211_frame *);
1574 
1575 	pktlen += IEEE80211_CRC_LEN;
1576 
1577 	/*
1578 	 * Load the DMA map so any coalescing is done.  This
1579 	 * also calculates the number of descriptors we need.
1580 	 */
1581 	error = ath_tx_dmasetup(sc, bf, m0);
1582 	if (error != 0)
1583 		return error;
1584 	KASSERT((ni != NULL), ("%s: ni=NULL!", __func__));
1585 	bf->bf_node = ni;			/* NB: held reference */
1586 	m0 = bf->bf_m;				/* NB: may have changed */
1587 	wh = mtod(m0, struct ieee80211_frame *);
1588 
1589 	/* setup descriptors */
1590 	ds = bf->bf_desc;
1591 	rt = sc->sc_currates;
1592 	KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
1593 
1594 	/*
1595 	 * NB: the 802.11 layer marks whether or not we should
1596 	 * use short preamble based on the current mode and
1597 	 * negotiated parameters.
1598 	 */
1599 	if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) &&
1600 	    (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) {
1601 		shortPreamble = AH_TRUE;
1602 		sc->sc_stats.ast_tx_shortpre++;
1603 	} else {
1604 		shortPreamble = AH_FALSE;
1605 	}
1606 
1607 	an = ATH_NODE(ni);
1608 	//flags = HAL_TXDESC_CLRDMASK;		/* XXX needed for crypto errs */
1609 	flags = 0;
1610 	ismrr = 0;				/* default no multi-rate retry*/
1611 	pri = M_WME_GETAC(m0);			/* honor classification */
1612 	/* XXX use txparams instead of fixed values */
1613 	/*
1614 	 * Calculate Atheros packet type from IEEE80211 packet header,
1615 	 * setup for rate calculations, and select h/w transmit queue.
1616 	 */
1617 	switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
1618 	case IEEE80211_FC0_TYPE_MGT:
1619 		subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
1620 		if (subtype == IEEE80211_FC0_SUBTYPE_BEACON)
1621 			atype = HAL_PKT_TYPE_BEACON;
1622 		else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
1623 			atype = HAL_PKT_TYPE_PROBE_RESP;
1624 		else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM)
1625 			atype = HAL_PKT_TYPE_ATIM;
1626 		else
1627 			atype = HAL_PKT_TYPE_NORMAL;	/* XXX */
1628 		rix = an->an_mgmtrix;
1629 		txrate = rt->info[rix].rateCode;
1630 		if (shortPreamble)
1631 			txrate |= rt->info[rix].shortPreamble;
1632 		try0 = ATH_TXMGTTRY;
1633 		flags |= HAL_TXDESC_INTREQ;	/* force interrupt */
1634 		break;
1635 	case IEEE80211_FC0_TYPE_CTL:
1636 		atype = HAL_PKT_TYPE_PSPOLL;	/* stop setting of duration */
1637 		rix = an->an_mgmtrix;
1638 		txrate = rt->info[rix].rateCode;
1639 		if (shortPreamble)
1640 			txrate |= rt->info[rix].shortPreamble;
1641 		try0 = ATH_TXMGTTRY;
1642 		flags |= HAL_TXDESC_INTREQ;	/* force interrupt */
1643 		break;
1644 	case IEEE80211_FC0_TYPE_DATA:
1645 		atype = HAL_PKT_TYPE_NORMAL;		/* default */
1646 		/*
1647 		 * Data frames: multicast frames go out at a fixed rate,
1648 		 * EAPOL frames use the mgmt frame rate; otherwise consult
1649 		 * the rate control module for the rate to use.
1650 		 */
1651 		if (ismcast) {
1652 			rix = an->an_mcastrix;
1653 			txrate = rt->info[rix].rateCode;
1654 			if (shortPreamble)
1655 				txrate |= rt->info[rix].shortPreamble;
1656 			try0 = 1;
1657 		} else if (m0->m_flags & M_EAPOL) {
1658 			/* XXX? maybe always use long preamble? */
1659 			rix = an->an_mgmtrix;
1660 			txrate = rt->info[rix].rateCode;
1661 			if (shortPreamble)
1662 				txrate |= rt->info[rix].shortPreamble;
1663 			try0 = ATH_TXMAXTRY;	/* XXX?too many? */
1664 		} else {
1665 			/*
1666 			 * Do rate lookup on each TX, rather than using
1667 			 * the hard-coded TX information decided here.
1668 			 */
1669 			ismrr = 1;
1670 			bf->bf_state.bfs_doratelookup = 1;
1671 		}
1672 		if (cap->cap_wmeParams[pri].wmep_noackPolicy)
1673 			flags |= HAL_TXDESC_NOACK;
1674 		break;
1675 	default:
1676 		device_printf(sc->sc_dev, "bogus frame type 0x%x (%s)\n",
1677 		    wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__);
1678 		/* XXX statistic */
1679 		/* XXX free tx dmamap */
1680 		ieee80211_free_mbuf(m0);
1681 		return EIO;
1682 	}
1683 
1684 	/*
1685 	 * There are two known scenarios where the frame AC doesn't match
1686 	 * what the destination TXQ is.
1687 	 *
1688 	 * + non-QoS frames (eg management?) that the net80211 stack has
1689 	 *   assigned a higher AC to, but since it's a non-QoS TID, it's
1690 	 *   being thrown into TID 16.  TID 16 gets the AC_BE queue.
1691 	 *   It's quite possible that management frames should just be
1692 	 *   direct dispatched to hardware rather than go via the software
1693 	 *   queue; that should be investigated in the future.  There are
1694 	 *   some specific scenarios where this doesn't make sense, mostly
1695 	 *   surrounding ADDBA request/response - hence why that is special
1696 	 *   cased.
1697 	 *
1698 	 * + Multicast frames going into the VAP mcast queue.  That shows up
1699 	 *   as "TXQ 11".
1700 	 *
1701 	 * This driver should eventually support separate TID and TXQ locking,
1702 	 * allowing for arbitrary AC frames to appear on arbitrary software
1703 	 * queues, being queued to the "correct" hardware queue when needed.
1704 	 */
1705 #if 0
1706 	if (txq != sc->sc_ac2q[pri]) {
1707 		DPRINTF(sc, ATH_DEBUG_XMIT,
1708 		    "%s: txq=%p (%d), pri=%d, pri txq=%p (%d)\n",
1709 		    __func__,
1710 		    txq,
1711 		    txq->axq_qnum,
1712 		    pri,
1713 		    sc->sc_ac2q[pri],
1714 		    sc->sc_ac2q[pri]->axq_qnum);
1715 	}
1716 #endif
1717 
1718 	/*
1719 	 * Calculate miscellaneous flags.
1720 	 */
1721 	if (ismcast) {
1722 		flags |= HAL_TXDESC_NOACK;	/* no ack on broad/multicast */
1723 	} else if (pktlen > vap->iv_rtsthreshold &&
1724 	    (ni->ni_ath_flags & IEEE80211_NODE_FF) == 0) {
1725 		flags |= HAL_TXDESC_RTSENA;	/* RTS based on frame length */
1726 		sc->sc_stats.ast_tx_rts++;
1727 	}
1728 	if (flags & HAL_TXDESC_NOACK)		/* NB: avoid double counting */
1729 		sc->sc_stats.ast_tx_noack++;
1730 #ifdef IEEE80211_SUPPORT_TDMA
1731 	if (sc->sc_tdma && (flags & HAL_TXDESC_NOACK) == 0) {
1732 		DPRINTF(sc, ATH_DEBUG_TDMA,
1733 		    "%s: discard frame, ACK required w/ TDMA\n", __func__);
1734 		sc->sc_stats.ast_tdma_ack++;
1735 		/* XXX free tx dmamap */
1736 		ieee80211_free_mbuf(m0);
1737 		return EIO;
1738 	}
1739 #endif
1740 
1741 	/*
1742 	 * Determine if a tx interrupt should be generated for
1743 	 * this descriptor.  We take a tx interrupt to reap
1744 	 * descriptors when the h/w hits an EOL condition or
1745 	 * when the descriptor is specifically marked to generate
1746 	 * an interrupt.  We periodically mark descriptors in this
1747 	 * way to insure timely replenishing of the supply needed
1748 	 * for sending frames.  Defering interrupts reduces system
1749 	 * load and potentially allows more concurrent work to be
1750 	 * done but if done to aggressively can cause senders to
1751 	 * backup.
1752 	 *
1753 	 * NB: use >= to deal with sc_txintrperiod changing
1754 	 *     dynamically through sysctl.
1755 	 */
1756 	if (flags & HAL_TXDESC_INTREQ) {
1757 		txq->axq_intrcnt = 0;
1758 	} else if (++txq->axq_intrcnt >= sc->sc_txintrperiod) {
1759 		flags |= HAL_TXDESC_INTREQ;
1760 		txq->axq_intrcnt = 0;
1761 	}
1762 
1763 	/* This point forward is actual TX bits */
1764 
1765 	/*
1766 	 * At this point we are committed to sending the frame
1767 	 * and we don't need to look at m_nextpkt; clear it in
1768 	 * case this frame is part of frag chain.
1769 	 */
1770 	m0->m_nextpkt = NULL;
1771 
1772 	if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT))
1773 		ieee80211_dump_pkt(ic, mtod(m0, const uint8_t *), m0->m_len,
1774 		    sc->sc_hwmap[rix].ieeerate, -1);
1775 
1776 	if (ieee80211_radiotap_active_vap(vap)) {
1777 		u_int64_t tsf = ath_hal_gettsf64(ah);
1778 
1779 		sc->sc_tx_th.wt_tsf = htole64(tsf);
1780 		sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags;
1781 		if (iswep)
1782 			sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
1783 		if (isfrag)
1784 			sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG;
1785 		sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate;
1786 		sc->sc_tx_th.wt_txpower = ieee80211_get_node_txpower(ni);
1787 		sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
1788 
1789 		ieee80211_radiotap_tx(vap, m0);
1790 	}
1791 
1792 	/* Blank the legacy rate array */
1793 	bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc));
1794 
1795 	/*
1796 	 * ath_buf_set_rate needs at least one rate/try to setup
1797 	 * the rate scenario.
1798 	 */
1799 	bf->bf_state.bfs_rc[0].rix = rix;
1800 	bf->bf_state.bfs_rc[0].tries = try0;
1801 	bf->bf_state.bfs_rc[0].ratecode = txrate;
1802 
1803 	/* Store the decided rate index values away */
1804 	bf->bf_state.bfs_pktlen = pktlen;
1805 	bf->bf_state.bfs_hdrlen = hdrlen;
1806 	bf->bf_state.bfs_atype = atype;
1807 	bf->bf_state.bfs_txpower = ieee80211_get_node_txpower(ni);
1808 	bf->bf_state.bfs_txrate0 = txrate;
1809 	bf->bf_state.bfs_try0 = try0;
1810 	bf->bf_state.bfs_keyix = keyix;
1811 	bf->bf_state.bfs_txantenna = sc->sc_txantenna;
1812 	bf->bf_state.bfs_txflags = flags;
1813 	bf->bf_state.bfs_shpream = shortPreamble;
1814 
1815 	/* XXX this should be done in ath_tx_setrate() */
1816 	bf->bf_state.bfs_ctsrate0 = 0;	/* ie, no hard-coded ctsrate */
1817 	bf->bf_state.bfs_ctsrate = 0;	/* calculated later */
1818 	bf->bf_state.bfs_ctsduration = 0;
1819 	bf->bf_state.bfs_ismrr = ismrr;
1820 
1821 	return 0;
1822 }
1823 
1824 /*
1825  * Queue a frame to the hardware or software queue.
1826  *
1827  * This can be called by the net80211 code.
1828  *
1829  * XXX what about locking? Or, push the seqno assign into the
1830  * XXX aggregate scheduler so its serialised?
1831  *
1832  * XXX When sending management frames via ath_raw_xmit(),
1833  *     should CLRDMASK be set unconditionally?
1834  */
1835 int
1836 ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni,
1837     struct ath_buf *bf, struct mbuf *m0)
1838 {
1839 	struct ieee80211vap *vap = ni->ni_vap;
1840 	struct ath_vap *avp = ATH_VAP(vap);
1841 	int r = 0;
1842 	u_int pri;
1843 	int tid;
1844 	struct ath_txq *txq;
1845 	int ismcast;
1846 	const struct ieee80211_frame *wh;
1847 	int is_ampdu, is_ampdu_tx, is_ampdu_pending;
1848 	ieee80211_seq seqno;
1849 	uint8_t type, subtype;
1850 	int queue_to_head;
1851 
1852 	ATH_TX_LOCK_ASSERT(sc);
1853 
1854 	/*
1855 	 * Determine the target hardware queue.
1856 	 *
1857 	 * For multicast frames, the txq gets overridden appropriately
1858 	 * depending upon the state of PS.
1859 	 *
1860 	 * For any other frame, we do a TID/QoS lookup inside the frame
1861 	 * to see what the TID should be. If it's a non-QoS frame, the
1862 	 * AC and TID are overridden. The TID/TXQ code assumes the
1863 	 * TID is on a predictable hardware TXQ, so we don't support
1864 	 * having a node TID queued to multiple hardware TXQs.
1865 	 * This may change in the future but would require some locking
1866 	 * fudgery.
1867 	 */
1868 	pri = ath_tx_getac(sc, m0);
1869 	tid = ath_tx_gettid(sc, m0);
1870 
1871 	txq = sc->sc_ac2q[pri];
1872 	wh = mtod(m0, struct ieee80211_frame *);
1873 	ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
1874 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
1875 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
1876 
1877 	/*
1878 	 * Enforce how deep the multicast queue can grow.
1879 	 *
1880 	 * XXX duplicated in ath_raw_xmit().
1881 	 */
1882 	if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
1883 		if (sc->sc_cabq->axq_depth + sc->sc_cabq->fifo.axq_depth
1884 		    > sc->sc_txq_mcastq_maxdepth) {
1885 			sc->sc_stats.ast_tx_mcastq_overflow++;
1886 			m_freem(m0);
1887 			return (ENOBUFS);
1888 		}
1889 	}
1890 
1891 	/*
1892 	 * Enforce how deep the unicast queue can grow.
1893 	 *
1894 	 * If the node is in power save then we don't want
1895 	 * the software queue to grow too deep, or a node may
1896 	 * end up consuming all of the ath_buf entries.
1897 	 *
1898 	 * For now, only do this for DATA frames.
1899 	 *
1900 	 * We will want to cap how many management/control
1901 	 * frames get punted to the software queue so it doesn't
1902 	 * fill up.  But the correct solution isn't yet obvious.
1903 	 * In any case, this check should at least let frames pass
1904 	 * that we are direct-dispatching.
1905 	 *
1906 	 * XXX TODO: duplicate this to the raw xmit path!
1907 	 */
1908 	if (type == IEEE80211_FC0_TYPE_DATA &&
1909 	    ATH_NODE(ni)->an_is_powersave &&
1910 	    ATH_NODE(ni)->an_swq_depth >
1911 	     sc->sc_txq_node_psq_maxdepth) {
1912 		sc->sc_stats.ast_tx_node_psq_overflow++;
1913 		m_freem(m0);
1914 		return (ENOBUFS);
1915 	}
1916 
1917 	/* A-MPDU TX */
1918 	is_ampdu_tx = ath_tx_ampdu_running(sc, ATH_NODE(ni), tid);
1919 	is_ampdu_pending = ath_tx_ampdu_pending(sc, ATH_NODE(ni), tid);
1920 	is_ampdu = is_ampdu_tx | is_ampdu_pending;
1921 
1922 	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ac=%d, is_ampdu=%d\n",
1923 	    __func__, tid, pri, is_ampdu);
1924 
1925 	/* Set local packet state, used to queue packets to hardware */
1926 	bf->bf_state.bfs_tid = tid;
1927 	bf->bf_state.bfs_tx_queue = txq->axq_qnum;
1928 	bf->bf_state.bfs_pri = pri;
1929 
1930 #if 1
1931 	/*
1932 	 * When servicing one or more stations in power-save mode
1933 	 * (or) if there is some mcast data waiting on the mcast
1934 	 * queue (to prevent out of order delivery) multicast frames
1935 	 * must be bufferd until after the beacon.
1936 	 *
1937 	 * TODO: we should lock the mcastq before we check the length.
1938 	 */
1939 	if (sc->sc_cabq_enable && ismcast && (vap->iv_ps_sta || avp->av_mcastq.axq_depth)) {
1940 		txq = &avp->av_mcastq;
1941 		/*
1942 		 * Mark the frame as eventually belonging on the CAB
1943 		 * queue, so the descriptor setup functions will
1944 		 * correctly initialise the descriptor 'qcuId' field.
1945 		 */
1946 		bf->bf_state.bfs_tx_queue = sc->sc_cabq->axq_qnum;
1947 	}
1948 #endif
1949 
1950 	/* Do the generic frame setup */
1951 	/* XXX should just bzero the bf_state? */
1952 	bf->bf_state.bfs_dobaw = 0;
1953 
1954 	/* A-MPDU TX? Manually set sequence number */
1955 	/*
1956 	 * Don't do it whilst pending; the net80211 layer still
1957 	 * assigns them.
1958 	 */
1959 	if (is_ampdu_tx) {
1960 		/*
1961 		 * Always call; this function will
1962 		 * handle making sure that null data frames
1963 		 * don't get a sequence number from the current
1964 		 * TID and thus mess with the BAW.
1965 		 */
1966 		seqno = ath_tx_tid_seqno_assign(sc, ni, bf, m0);
1967 
1968 		/*
1969 		 * Don't add QoS NULL frames to the BAW.
1970 		 */
1971 		if (IEEE80211_QOS_HAS_SEQ(wh) &&
1972 		    subtype != IEEE80211_FC0_SUBTYPE_QOS_NULL) {
1973 			bf->bf_state.bfs_dobaw = 1;
1974 		}
1975 	}
1976 
1977 	/*
1978 	 * If needed, the sequence number has been assigned.
1979 	 * Squirrel it away somewhere easy to get to.
1980 	 */
1981 	bf->bf_state.bfs_seqno = M_SEQNO_GET(m0) << IEEE80211_SEQ_SEQ_SHIFT;
1982 
1983 	/* Is ampdu pending? fetch the seqno and print it out */
1984 	if (is_ampdu_pending)
1985 		DPRINTF(sc, ATH_DEBUG_SW_TX,
1986 		    "%s: tid %d: ampdu pending, seqno %d\n",
1987 		    __func__, tid, M_SEQNO_GET(m0));
1988 
1989 	/* This also sets up the DMA map */
1990 	r = ath_tx_normal_setup(sc, ni, bf, m0, txq);
1991 
1992 	if (r != 0)
1993 		goto done;
1994 
1995 	/* At this point m0 could have changed! */
1996 	m0 = bf->bf_m;
1997 
1998 #if 1
1999 	/*
2000 	 * If it's a multicast frame, do a direct-dispatch to the
2001 	 * destination hardware queue. Don't bother software
2002 	 * queuing it.
2003 	 */
2004 	/*
2005 	 * If it's a BAR frame, do a direct dispatch to the
2006 	 * destination hardware queue. Don't bother software
2007 	 * queuing it, as the TID will now be paused.
2008 	 * Sending a BAR frame can occur from the net80211 txa timer
2009 	 * (ie, retries) or from the ath txtask (completion call.)
2010 	 * It queues directly to hardware because the TID is paused
2011 	 * at this point (and won't be unpaused until the BAR has
2012 	 * either been TXed successfully or max retries has been
2013 	 * reached.)
2014 	 */
2015 	/*
2016 	 * Until things are better debugged - if this node is asleep
2017 	 * and we're sending it a non-BAR frame, direct dispatch it.
2018 	 * Why? Because we need to figure out what's actually being
2019 	 * sent - eg, during reassociation/reauthentication after
2020 	 * the node (last) disappeared whilst asleep, the driver should
2021 	 * have unpaused/unsleep'ed the node.  So until that is
2022 	 * sorted out, use this workaround.
2023 	 */
2024 	if (txq == &avp->av_mcastq) {
2025 		DPRINTF(sc, ATH_DEBUG_SW_TX,
2026 		    "%s: bf=%p: mcastq: TX'ing\n", __func__, bf);
2027 		bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2028 		ath_tx_xmit_normal(sc, txq, bf);
2029 	} else if (ath_tx_should_swq_frame(sc, ATH_NODE(ni), m0,
2030 	    &queue_to_head)) {
2031 		ath_tx_swq(sc, ni, txq, queue_to_head, bf);
2032 	} else {
2033 		bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2034 		ath_tx_xmit_normal(sc, txq, bf);
2035 	}
2036 #else
2037 	/*
2038 	 * For now, since there's no software queue,
2039 	 * direct-dispatch to the hardware.
2040 	 */
2041 	bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2042 	/*
2043 	 * Update the current leak count if
2044 	 * we're leaking frames; and set the
2045 	 * MORE flag as appropriate.
2046 	 */
2047 	ath_tx_leak_count_update(sc, tid, bf);
2048 	ath_tx_xmit_normal(sc, txq, bf);
2049 #endif
2050 done:
2051 	return 0;
2052 }
2053 
2054 static int
2055 ath_tx_raw_start(struct ath_softc *sc, struct ieee80211_node *ni,
2056 	struct ath_buf *bf, struct mbuf *m0,
2057 	const struct ieee80211_bpf_params *params)
2058 {
2059 	struct ieee80211com *ic = &sc->sc_ic;
2060 	struct ath_hal *ah = sc->sc_ah;
2061 	struct ieee80211vap *vap = ni->ni_vap;
2062 	int error, ismcast, ismrr;
2063 	int keyix, hdrlen, pktlen, try0, txantenna;
2064 	u_int8_t rix, txrate;
2065 	struct ieee80211_frame *wh;
2066 	u_int flags;
2067 	HAL_PKT_TYPE atype;
2068 	const HAL_RATE_TABLE *rt;
2069 	struct ath_desc *ds;
2070 	u_int pri;
2071 	int o_tid = -1;
2072 	int do_override;
2073 	uint8_t type, subtype;
2074 	int queue_to_head;
2075 	struct ath_node *an = ATH_NODE(ni);
2076 
2077 	ATH_TX_LOCK_ASSERT(sc);
2078 
2079 	wh = mtod(m0, struct ieee80211_frame *);
2080 	ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
2081 	hdrlen = ieee80211_anyhdrsize(wh);
2082 	/*
2083 	 * Packet length must not include any
2084 	 * pad bytes; deduct them here.
2085 	 */
2086 	/* XXX honor IEEE80211_BPF_DATAPAD */
2087 	pktlen = m0->m_pkthdr.len - (hdrlen & 3) + IEEE80211_CRC_LEN;
2088 
2089 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
2090 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
2091 
2092 	ATH_KTR(sc, ATH_KTR_TX, 2,
2093 	     "ath_tx_raw_start: ni=%p, bf=%p, raw", ni, bf);
2094 
2095 	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: ismcast=%d\n",
2096 	    __func__, ismcast);
2097 
2098 	pri = params->ibp_pri & 3;
2099 	/* Override pri if the frame isn't a QoS one */
2100 	if (! IEEE80211_QOS_HAS_SEQ(wh))
2101 		pri = ath_tx_getac(sc, m0);
2102 
2103 	/* XXX If it's an ADDBA, override the correct queue */
2104 	do_override = ath_tx_action_frame_override_queue(sc, ni, m0, &o_tid);
2105 
2106 	/* Map ADDBA to the correct priority */
2107 	if (do_override) {
2108 #if 0
2109 		DPRINTF(sc, ATH_DEBUG_XMIT,
2110 		    "%s: overriding tid %d pri %d -> %d\n",
2111 		    __func__, o_tid, pri, TID_TO_WME_AC(o_tid));
2112 #endif
2113 		pri = TID_TO_WME_AC(o_tid);
2114 	}
2115 
2116 	/* Handle encryption twiddling if needed */
2117 	if (! ath_tx_tag_crypto(sc, ni,
2118 	    m0, params->ibp_flags & IEEE80211_BPF_CRYPTO, 0,
2119 	    &hdrlen, &pktlen, &keyix)) {
2120 		ieee80211_free_mbuf(m0);
2121 		return EIO;
2122 	}
2123 	/* packet header may have moved, reset our local pointer */
2124 	wh = mtod(m0, struct ieee80211_frame *);
2125 
2126 	/* Do the generic frame setup */
2127 	/* XXX should just bzero the bf_state? */
2128 	bf->bf_state.bfs_dobaw = 0;
2129 
2130 	error = ath_tx_dmasetup(sc, bf, m0);
2131 	if (error != 0)
2132 		return error;
2133 	m0 = bf->bf_m;				/* NB: may have changed */
2134 	wh = mtod(m0, struct ieee80211_frame *);
2135 	KASSERT((ni != NULL), ("%s: ni=NULL!", __func__));
2136 	bf->bf_node = ni;			/* NB: held reference */
2137 
2138 	/* Always enable CLRDMASK for raw frames for now.. */
2139 	flags = HAL_TXDESC_CLRDMASK;		/* XXX needed for crypto errs */
2140 	flags |= HAL_TXDESC_INTREQ;		/* force interrupt */
2141 	if (params->ibp_flags & IEEE80211_BPF_RTS)
2142 		flags |= HAL_TXDESC_RTSENA;
2143 	else if (params->ibp_flags & IEEE80211_BPF_CTS) {
2144 		/* XXX assume 11g/11n protection? */
2145 		bf->bf_state.bfs_doprot = 1;
2146 		flags |= HAL_TXDESC_CTSENA;
2147 	}
2148 	/* XXX leave ismcast to injector? */
2149 	if ((params->ibp_flags & IEEE80211_BPF_NOACK) || ismcast)
2150 		flags |= HAL_TXDESC_NOACK;
2151 
2152 	rt = sc->sc_currates;
2153 	KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
2154 
2155 	/* Fetch first rate information */
2156 	rix = ath_tx_findrix(sc, params->ibp_rate0);
2157 	try0 = params->ibp_try0;
2158 
2159 	/*
2160 	 * Override EAPOL rate as appropriate.
2161 	 */
2162 	if (m0->m_flags & M_EAPOL) {
2163 		/* XXX? maybe always use long preamble? */
2164 		rix = an->an_mgmtrix;
2165 		try0 = ATH_TXMAXTRY;	/* XXX?too many? */
2166 	}
2167 
2168 	txrate = rt->info[rix].rateCode;
2169 	if (params->ibp_flags & IEEE80211_BPF_SHORTPRE)
2170 		txrate |= rt->info[rix].shortPreamble;
2171 	sc->sc_txrix = rix;
2172 	ismrr = (params->ibp_try1 != 0);
2173 	txantenna = params->ibp_pri >> 2;
2174 	if (txantenna == 0)			/* XXX? */
2175 		txantenna = sc->sc_txantenna;
2176 
2177 	/*
2178 	 * Since ctsrate is fixed, store it away for later
2179 	 * use when the descriptor fields are being set.
2180 	 */
2181 	if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA))
2182 		bf->bf_state.bfs_ctsrate0 = params->ibp_ctsrate;
2183 
2184 	/*
2185 	 * NB: we mark all packets as type PSPOLL so the h/w won't
2186 	 * set the sequence number, duration, etc.
2187 	 */
2188 	atype = HAL_PKT_TYPE_PSPOLL;
2189 
2190 	if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT))
2191 		ieee80211_dump_pkt(ic, mtod(m0, caddr_t), m0->m_len,
2192 		    sc->sc_hwmap[rix].ieeerate, -1);
2193 
2194 	if (ieee80211_radiotap_active_vap(vap)) {
2195 		u_int64_t tsf = ath_hal_gettsf64(ah);
2196 
2197 		sc->sc_tx_th.wt_tsf = htole64(tsf);
2198 		sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags;
2199 		if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED)
2200 			sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
2201 		if (m0->m_flags & M_FRAG)
2202 			sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG;
2203 		sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate;
2204 		sc->sc_tx_th.wt_txpower = MIN(params->ibp_power,
2205 		    ieee80211_get_node_txpower(ni));
2206 		sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
2207 
2208 		ieee80211_radiotap_tx(vap, m0);
2209 	}
2210 
2211 	/*
2212 	 * Formulate first tx descriptor with tx controls.
2213 	 */
2214 	ds = bf->bf_desc;
2215 	/* XXX check return value? */
2216 
2217 	/* Store the decided rate index values away */
2218 	bf->bf_state.bfs_pktlen = pktlen;
2219 	bf->bf_state.bfs_hdrlen = hdrlen;
2220 	bf->bf_state.bfs_atype = atype;
2221 	bf->bf_state.bfs_txpower = MIN(params->ibp_power,
2222 	    ieee80211_get_node_txpower(ni));
2223 	bf->bf_state.bfs_txrate0 = txrate;
2224 	bf->bf_state.bfs_try0 = try0;
2225 	bf->bf_state.bfs_keyix = keyix;
2226 	bf->bf_state.bfs_txantenna = txantenna;
2227 	bf->bf_state.bfs_txflags = flags;
2228 	bf->bf_state.bfs_shpream =
2229 	    !! (params->ibp_flags & IEEE80211_BPF_SHORTPRE);
2230 
2231 	/* Set local packet state, used to queue packets to hardware */
2232 	bf->bf_state.bfs_tid = WME_AC_TO_TID(pri);
2233 	bf->bf_state.bfs_tx_queue = sc->sc_ac2q[pri]->axq_qnum;
2234 	bf->bf_state.bfs_pri = pri;
2235 
2236 	/* XXX this should be done in ath_tx_setrate() */
2237 	bf->bf_state.bfs_ctsrate = 0;
2238 	bf->bf_state.bfs_ctsduration = 0;
2239 	bf->bf_state.bfs_ismrr = ismrr;
2240 
2241 	/* Blank the legacy rate array */
2242 	bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc));
2243 
2244 	bf->bf_state.bfs_rc[0].rix = rix;
2245 	bf->bf_state.bfs_rc[0].tries = try0;
2246 	bf->bf_state.bfs_rc[0].ratecode = txrate;
2247 
2248 	if (ismrr) {
2249 		int rix;
2250 
2251 		rix = ath_tx_findrix(sc, params->ibp_rate1);
2252 		bf->bf_state.bfs_rc[1].rix = rix;
2253 		bf->bf_state.bfs_rc[1].tries = params->ibp_try1;
2254 
2255 		rix = ath_tx_findrix(sc, params->ibp_rate2);
2256 		bf->bf_state.bfs_rc[2].rix = rix;
2257 		bf->bf_state.bfs_rc[2].tries = params->ibp_try2;
2258 
2259 		rix = ath_tx_findrix(sc, params->ibp_rate3);
2260 		bf->bf_state.bfs_rc[3].rix = rix;
2261 		bf->bf_state.bfs_rc[3].tries = params->ibp_try3;
2262 	}
2263 	/*
2264 	 * All the required rate control decisions have been made;
2265 	 * fill in the rc flags.
2266 	 */
2267 	ath_tx_rate_fill_rcflags(sc, bf);
2268 
2269 	/* NB: no buffered multicast in power save support */
2270 
2271 	/*
2272 	 * If we're overiding the ADDBA destination, dump directly
2273 	 * into the hardware queue, right after any pending
2274 	 * frames to that node are.
2275 	 */
2276 	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: dooverride=%d\n",
2277 	    __func__, do_override);
2278 
2279 #if 1
2280 	/*
2281 	 * Put addba frames in the right place in the right TID/HWQ.
2282 	 */
2283 	if (do_override) {
2284 		bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2285 		/*
2286 		 * XXX if it's addba frames, should we be leaking
2287 		 * them out via the frame leak method?
2288 		 * XXX for now let's not risk it; but we may wish
2289 		 * to investigate this later.
2290 		 */
2291 		ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf);
2292 	} else if (ath_tx_should_swq_frame(sc, ATH_NODE(ni), m0,
2293 	    &queue_to_head)) {
2294 		/* Queue to software queue */
2295 		ath_tx_swq(sc, ni, sc->sc_ac2q[pri], queue_to_head, bf);
2296 	} else {
2297 		bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2298 		ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf);
2299 	}
2300 #else
2301 	/* Direct-dispatch to the hardware */
2302 	bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2303 	/*
2304 	 * Update the current leak count if
2305 	 * we're leaking frames; and set the
2306 	 * MORE flag as appropriate.
2307 	 */
2308 	ath_tx_leak_count_update(sc, tid, bf);
2309 	ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf);
2310 #endif
2311 	return 0;
2312 }
2313 
2314 /*
2315  * Send a raw frame.
2316  *
2317  * This can be called by net80211.
2318  */
2319 int
2320 ath_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
2321 	const struct ieee80211_bpf_params *params)
2322 {
2323 	struct ieee80211com *ic = ni->ni_ic;
2324 	struct ath_softc *sc = ic->ic_softc;
2325 	struct ath_buf *bf;
2326 	struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
2327 	int error = 0;
2328 
2329 	ATH_PCU_LOCK(sc);
2330 	if (sc->sc_inreset_cnt > 0) {
2331 		DPRINTF(sc, ATH_DEBUG_XMIT,
2332 		    "%s: sc_inreset_cnt > 0; bailing\n", __func__);
2333 		error = EIO;
2334 		ATH_PCU_UNLOCK(sc);
2335 		goto badbad;
2336 	}
2337 	sc->sc_txstart_cnt++;
2338 	ATH_PCU_UNLOCK(sc);
2339 
2340 	/* Wake the hardware up already */
2341 	ATH_LOCK(sc);
2342 	ath_power_set_power_state(sc, HAL_PM_AWAKE);
2343 	ATH_UNLOCK(sc);
2344 
2345 	ATH_TX_LOCK(sc);
2346 
2347 	if (!sc->sc_running || sc->sc_invalid) {
2348 		DPRINTF(sc, ATH_DEBUG_XMIT, "%s: discard frame, r/i: %d/%d",
2349 		    __func__, sc->sc_running, sc->sc_invalid);
2350 		m_freem(m);
2351 		error = ENETDOWN;
2352 		goto bad;
2353 	}
2354 
2355 	/*
2356 	 * Enforce how deep the multicast queue can grow.
2357 	 *
2358 	 * XXX duplicated in ath_tx_start().
2359 	 */
2360 	if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2361 		if (sc->sc_cabq->axq_depth + sc->sc_cabq->fifo.axq_depth
2362 		    > sc->sc_txq_mcastq_maxdepth) {
2363 			sc->sc_stats.ast_tx_mcastq_overflow++;
2364 			error = ENOBUFS;
2365 		}
2366 
2367 		if (error != 0) {
2368 			m_freem(m);
2369 			goto bad;
2370 		}
2371 	}
2372 
2373 	/*
2374 	 * Grab a TX buffer and associated resources.
2375 	 */
2376 	bf = ath_getbuf(sc, ATH_BUFTYPE_MGMT);
2377 	if (bf == NULL) {
2378 		sc->sc_stats.ast_tx_nobuf++;
2379 		m_freem(m);
2380 		error = ENOBUFS;
2381 		goto bad;
2382 	}
2383 	ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: m=%p, params=%p, bf=%p\n",
2384 	    m, params,  bf);
2385 
2386 	if (params == NULL) {
2387 		/*
2388 		 * Legacy path; interpret frame contents to decide
2389 		 * precisely how to send the frame.
2390 		 */
2391 		if (ath_tx_start(sc, ni, bf, m)) {
2392 			error = EIO;		/* XXX */
2393 			goto bad2;
2394 		}
2395 	} else {
2396 		/*
2397 		 * Caller supplied explicit parameters to use in
2398 		 * sending the frame.
2399 		 */
2400 		if (ath_tx_raw_start(sc, ni, bf, m, params)) {
2401 			error = EIO;		/* XXX */
2402 			goto bad2;
2403 		}
2404 	}
2405 	sc->sc_wd_timer = 5;
2406 	sc->sc_stats.ast_tx_raw++;
2407 
2408 	/*
2409 	 * Update the TIM - if there's anything queued to the
2410 	 * software queue and power save is enabled, we should
2411 	 * set the TIM.
2412 	 */
2413 	ath_tx_update_tim(sc, ni, 1);
2414 
2415 	ATH_TX_UNLOCK(sc);
2416 
2417 	ATH_PCU_LOCK(sc);
2418 	sc->sc_txstart_cnt--;
2419 	ATH_PCU_UNLOCK(sc);
2420 
2421 
2422 	/* Put the hardware back to sleep if required */
2423 	ATH_LOCK(sc);
2424 	ath_power_restore_power_state(sc);
2425 	ATH_UNLOCK(sc);
2426 
2427 	return 0;
2428 
2429 bad2:
2430 	ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: bad2: m=%p, params=%p, "
2431 	    "bf=%p",
2432 	    m,
2433 	    params,
2434 	    bf);
2435 	ATH_TXBUF_LOCK(sc);
2436 	ath_returnbuf_head(sc, bf);
2437 	ATH_TXBUF_UNLOCK(sc);
2438 
2439 bad:
2440 	ATH_TX_UNLOCK(sc);
2441 
2442 	ATH_PCU_LOCK(sc);
2443 	sc->sc_txstart_cnt--;
2444 	ATH_PCU_UNLOCK(sc);
2445 
2446 	/* Put the hardware back to sleep if required */
2447 	ATH_LOCK(sc);
2448 	ath_power_restore_power_state(sc);
2449 	ATH_UNLOCK(sc);
2450 
2451 badbad:
2452 	ATH_KTR(sc, ATH_KTR_TX, 2, "ath_raw_xmit: bad0: m=%p, params=%p",
2453 	    m, params);
2454 	sc->sc_stats.ast_tx_raw_fail++;
2455 
2456 	return error;
2457 }
2458 
2459 /* Some helper functions */
2460 
2461 /*
2462  * ADDBA (and potentially others) need to be placed in the same
2463  * hardware queue as the TID/node it's relating to. This is so
2464  * it goes out after any pending non-aggregate frames to the
2465  * same node/TID.
2466  *
2467  * If this isn't done, the ADDBA can go out before the frames
2468  * queued in hardware. Even though these frames have a sequence
2469  * number -earlier- than the ADDBA can be transmitted (but
2470  * no frames whose sequence numbers are after the ADDBA should
2471  * be!) they'll arrive after the ADDBA - and the receiving end
2472  * will simply drop them as being out of the BAW.
2473  *
2474  * The frames can't be appended to the TID software queue - it'll
2475  * never be sent out. So these frames have to be directly
2476  * dispatched to the hardware, rather than queued in software.
2477  * So if this function returns true, the TXQ has to be
2478  * overridden and it has to be directly dispatched.
2479  *
2480  * It's a dirty hack, but someone's gotta do it.
2481  */
2482 
2483 /*
2484  * XXX doesn't belong here!
2485  */
2486 static int
2487 ieee80211_is_action(struct ieee80211_frame *wh)
2488 {
2489 	/* Type: Management frame? */
2490 	if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) !=
2491 	    IEEE80211_FC0_TYPE_MGT)
2492 		return 0;
2493 
2494 	/* Subtype: Action frame? */
2495 	if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) !=
2496 	    IEEE80211_FC0_SUBTYPE_ACTION)
2497 		return 0;
2498 
2499 	return 1;
2500 }
2501 
2502 #define	MS(_v, _f)	(((_v) & _f) >> _f##_S)
2503 /*
2504  * Return an alternate TID for ADDBA request frames.
2505  *
2506  * Yes, this likely should be done in the net80211 layer.
2507  */
2508 static int
2509 ath_tx_action_frame_override_queue(struct ath_softc *sc,
2510     struct ieee80211_node *ni,
2511     struct mbuf *m0, int *tid)
2512 {
2513 	struct ieee80211_frame *wh = mtod(m0, struct ieee80211_frame *);
2514 	struct ieee80211_action_ba_addbarequest *ia;
2515 	uint8_t *frm;
2516 	uint16_t baparamset;
2517 
2518 	/* Not action frame? Bail */
2519 	if (! ieee80211_is_action(wh))
2520 		return 0;
2521 
2522 	/* XXX Not needed for frames we send? */
2523 #if 0
2524 	/* Correct length? */
2525 	if (! ieee80211_parse_action(ni, m))
2526 		return 0;
2527 #endif
2528 
2529 	/* Extract out action frame */
2530 	frm = (u_int8_t *)&wh[1];
2531 	ia = (struct ieee80211_action_ba_addbarequest *) frm;
2532 
2533 	/* Not ADDBA? Bail */
2534 	if (ia->rq_header.ia_category != IEEE80211_ACTION_CAT_BA)
2535 		return 0;
2536 	if (ia->rq_header.ia_action != IEEE80211_ACTION_BA_ADDBA_REQUEST)
2537 		return 0;
2538 
2539 	/* Extract TID, return it */
2540 	baparamset = le16toh(ia->rq_baparamset);
2541 	*tid = (int) MS(baparamset, IEEE80211_BAPS_TID);
2542 
2543 	return 1;
2544 }
2545 #undef	MS
2546 
2547 /* Per-node software queue operations */
2548 
2549 /*
2550  * Add the current packet to the given BAW.
2551  * It is assumed that the current packet
2552  *
2553  * + fits inside the BAW;
2554  * + already has had a sequence number allocated.
2555  *
2556  * Since the BAW status may be modified by both the ath task and
2557  * the net80211/ifnet contexts, the TID must be locked.
2558  */
2559 void
2560 ath_tx_addto_baw(struct ath_softc *sc, struct ath_node *an,
2561     struct ath_tid *tid, struct ath_buf *bf)
2562 {
2563 	int index, cindex;
2564 	struct ieee80211_tx_ampdu *tap;
2565 
2566 	ATH_TX_LOCK_ASSERT(sc);
2567 
2568 	if (bf->bf_state.bfs_isretried)
2569 		return;
2570 
2571 	tap = ath_tx_get_tx_tid(an, tid->tid);
2572 
2573 	if (! bf->bf_state.bfs_dobaw) {
2574 		DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2575 		    "%s: dobaw=0, seqno=%d, window %d:%d\n",
2576 		    __func__, SEQNO(bf->bf_state.bfs_seqno),
2577 		    tap->txa_start, tap->txa_wnd);
2578 	}
2579 
2580 	if (bf->bf_state.bfs_addedbaw)
2581 		DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2582 		    "%s: re-added? tid=%d, seqno %d; window %d:%d; "
2583 		    "baw head=%d tail=%d\n",
2584 		    __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno),
2585 		    tap->txa_start, tap->txa_wnd, tid->baw_head,
2586 		    tid->baw_tail);
2587 
2588 	/*
2589 	 * Verify that the given sequence number is not outside of the
2590 	 * BAW.  Complain loudly if that's the case.
2591 	 */
2592 	if (! BAW_WITHIN(tap->txa_start, tap->txa_wnd,
2593 	    SEQNO(bf->bf_state.bfs_seqno))) {
2594 		DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2595 		    "%s: bf=%p: outside of BAW?? tid=%d, seqno %d; window %d:%d; "
2596 		    "baw head=%d tail=%d\n",
2597 		    __func__, bf, tid->tid, SEQNO(bf->bf_state.bfs_seqno),
2598 		    tap->txa_start, tap->txa_wnd, tid->baw_head,
2599 		    tid->baw_tail);
2600 	}
2601 
2602 	/*
2603 	 * ni->ni_txseqs[] is the currently allocated seqno.
2604 	 * the txa state contains the current baw start.
2605 	 */
2606 	index  = ATH_BA_INDEX(tap->txa_start, SEQNO(bf->bf_state.bfs_seqno));
2607 	cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
2608 	DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2609 	    "%s: tid=%d, seqno %d; window %d:%d; index=%d cindex=%d "
2610 	    "baw head=%d tail=%d\n",
2611 	    __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno),
2612 	    tap->txa_start, tap->txa_wnd, index, cindex, tid->baw_head,
2613 	    tid->baw_tail);
2614 
2615 
2616 #if 0
2617 	assert(tid->tx_buf[cindex] == NULL);
2618 #endif
2619 	if (tid->tx_buf[cindex] != NULL) {
2620 		DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2621 		    "%s: ba packet dup (index=%d, cindex=%d, "
2622 		    "head=%d, tail=%d)\n",
2623 		    __func__, index, cindex, tid->baw_head, tid->baw_tail);
2624 		DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2625 		    "%s: BA bf: %p; seqno=%d ; new bf: %p; seqno=%d\n",
2626 		    __func__,
2627 		    tid->tx_buf[cindex],
2628 		    SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno),
2629 		    bf,
2630 		    SEQNO(bf->bf_state.bfs_seqno)
2631 		);
2632 	}
2633 	tid->tx_buf[cindex] = bf;
2634 
2635 	if (index >= ((tid->baw_tail - tid->baw_head) &
2636 	    (ATH_TID_MAX_BUFS - 1))) {
2637 		tid->baw_tail = cindex;
2638 		INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
2639 	}
2640 }
2641 
2642 /*
2643  * Flip the BAW buffer entry over from the existing one to the new one.
2644  *
2645  * When software retransmitting a (sub-)frame, it is entirely possible that
2646  * the frame ath_buf is marked as BUSY and can't be immediately reused.
2647  * In that instance the buffer is cloned and the new buffer is used for
2648  * retransmit. We thus need to update the ath_buf slot in the BAW buf
2649  * tracking array to maintain consistency.
2650  */
2651 static void
2652 ath_tx_switch_baw_buf(struct ath_softc *sc, struct ath_node *an,
2653     struct ath_tid *tid, struct ath_buf *old_bf, struct ath_buf *new_bf)
2654 {
2655 	int index, cindex;
2656 	struct ieee80211_tx_ampdu *tap;
2657 	int seqno = SEQNO(old_bf->bf_state.bfs_seqno);
2658 
2659 	ATH_TX_LOCK_ASSERT(sc);
2660 
2661 	tap = ath_tx_get_tx_tid(an, tid->tid);
2662 	index  = ATH_BA_INDEX(tap->txa_start, seqno);
2663 	cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
2664 
2665 	/*
2666 	 * Just warn for now; if it happens then we should find out
2667 	 * about it. It's highly likely the aggregation session will
2668 	 * soon hang.
2669 	 */
2670 	if (old_bf->bf_state.bfs_seqno != new_bf->bf_state.bfs_seqno) {
2671 		DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2672 		    "%s: retransmitted buffer"
2673 		    " has mismatching seqno's, BA session may hang.\n",
2674 		    __func__);
2675 		DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2676 		    "%s: old seqno=%d, new_seqno=%d\n", __func__,
2677 		    old_bf->bf_state.bfs_seqno, new_bf->bf_state.bfs_seqno);
2678 	}
2679 
2680 	if (tid->tx_buf[cindex] != old_bf) {
2681 		DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2682 		    "%s: ath_buf pointer incorrect; "
2683 		    " has m BA session may hang.\n", __func__);
2684 		DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2685 		    "%s: old bf=%p, new bf=%p\n", __func__, old_bf, new_bf);
2686 	}
2687 
2688 	tid->tx_buf[cindex] = new_bf;
2689 }
2690 
2691 /*
2692  * seq_start - left edge of BAW
2693  * seq_next - current/next sequence number to allocate
2694  *
2695  * Since the BAW status may be modified by both the ath task and
2696  * the net80211/ifnet contexts, the TID must be locked.
2697  */
2698 static void
2699 ath_tx_update_baw(struct ath_softc *sc, struct ath_node *an,
2700     struct ath_tid *tid, const struct ath_buf *bf)
2701 {
2702 	int index, cindex;
2703 	struct ieee80211_tx_ampdu *tap;
2704 	int seqno = SEQNO(bf->bf_state.bfs_seqno);
2705 
2706 	ATH_TX_LOCK_ASSERT(sc);
2707 
2708 	tap = ath_tx_get_tx_tid(an, tid->tid);
2709 	index  = ATH_BA_INDEX(tap->txa_start, seqno);
2710 	cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
2711 
2712 	DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2713 	    "%s: tid=%d, baw=%d:%d, seqno=%d, index=%d, cindex=%d, "
2714 	    "baw head=%d, tail=%d\n",
2715 	    __func__, tid->tid, tap->txa_start, tap->txa_wnd, seqno, index,
2716 	    cindex, tid->baw_head, tid->baw_tail);
2717 
2718 	/*
2719 	 * If this occurs then we have a big problem - something else
2720 	 * has slid tap->txa_start along without updating the BAW
2721 	 * tracking start/end pointers. Thus the TX BAW state is now
2722 	 * completely busted.
2723 	 *
2724 	 * But for now, since I haven't yet fixed TDMA and buffer cloning,
2725 	 * it's quite possible that a cloned buffer is making its way
2726 	 * here and causing it to fire off. Disable TDMA for now.
2727 	 */
2728 	if (tid->tx_buf[cindex] != bf) {
2729 		DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2730 		    "%s: comp bf=%p, seq=%d; slot bf=%p, seqno=%d\n",
2731 		    __func__, bf, SEQNO(bf->bf_state.bfs_seqno),
2732 		    tid->tx_buf[cindex],
2733 		    (tid->tx_buf[cindex] != NULL) ?
2734 		      SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno) : -1);
2735 	}
2736 
2737 	tid->tx_buf[cindex] = NULL;
2738 
2739 	while (tid->baw_head != tid->baw_tail &&
2740 	    !tid->tx_buf[tid->baw_head]) {
2741 		INCR(tap->txa_start, IEEE80211_SEQ_RANGE);
2742 		INCR(tid->baw_head, ATH_TID_MAX_BUFS);
2743 	}
2744 	DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2745 	    "%s: tid=%d: baw is now %d:%d, baw head=%d\n",
2746 	    __func__, tid->tid, tap->txa_start, tap->txa_wnd, tid->baw_head);
2747 }
2748 
2749 static void
2750 ath_tx_leak_count_update(struct ath_softc *sc, struct ath_tid *tid,
2751     struct ath_buf *bf)
2752 {
2753 	struct ieee80211_frame *wh;
2754 
2755 	ATH_TX_LOCK_ASSERT(sc);
2756 
2757 	if (tid->an->an_leak_count > 0) {
2758 		wh = mtod(bf->bf_m, struct ieee80211_frame *);
2759 
2760 		/*
2761 		 * Update MORE based on the software/net80211 queue states.
2762 		 */
2763 		if ((tid->an->an_stack_psq > 0)
2764 		    || (tid->an->an_swq_depth > 0))
2765 			wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA;
2766 		else
2767 			wh->i_fc[1] &= ~IEEE80211_FC1_MORE_DATA;
2768 
2769 		DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
2770 		    "%s: %6D: leak count = %d, psq=%d, swq=%d, MORE=%d\n",
2771 		    __func__,
2772 		    tid->an->an_node.ni_macaddr,
2773 		    ":",
2774 		    tid->an->an_leak_count,
2775 		    tid->an->an_stack_psq,
2776 		    tid->an->an_swq_depth,
2777 		    !! (wh->i_fc[1] & IEEE80211_FC1_MORE_DATA));
2778 
2779 		/*
2780 		 * Re-sync the underlying buffer.
2781 		 */
2782 		bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
2783 		    BUS_DMASYNC_PREWRITE);
2784 
2785 		tid->an->an_leak_count --;
2786 	}
2787 }
2788 
2789 static int
2790 ath_tx_tid_can_tx_or_sched(struct ath_softc *sc, struct ath_tid *tid)
2791 {
2792 
2793 	ATH_TX_LOCK_ASSERT(sc);
2794 
2795 	if (tid->an->an_leak_count > 0) {
2796 		return (1);
2797 	}
2798 	if (tid->paused)
2799 		return (0);
2800 	return (1);
2801 }
2802 
2803 /*
2804  * Mark the current node/TID as ready to TX.
2805  *
2806  * This is done to make it easy for the software scheduler to
2807  * find which nodes have data to send.
2808  *
2809  * The TXQ lock must be held.
2810  */
2811 void
2812 ath_tx_tid_sched(struct ath_softc *sc, struct ath_tid *tid)
2813 {
2814 	struct ath_txq *txq = sc->sc_ac2q[tid->ac];
2815 
2816 	ATH_TX_LOCK_ASSERT(sc);
2817 
2818 	/*
2819 	 * If we are leaking out a frame to this destination
2820 	 * for PS-POLL, ensure that we allow scheduling to
2821 	 * occur.
2822 	 */
2823 	if (! ath_tx_tid_can_tx_or_sched(sc, tid))
2824 		return;		/* paused, can't schedule yet */
2825 
2826 	if (tid->sched)
2827 		return;		/* already scheduled */
2828 
2829 	tid->sched = 1;
2830 
2831 #if 0
2832 	/*
2833 	 * If this is a sleeping node we're leaking to, given
2834 	 * it a higher priority.  This is so bad for QoS it hurts.
2835 	 */
2836 	if (tid->an->an_leak_count) {
2837 		TAILQ_INSERT_HEAD(&txq->axq_tidq, tid, axq_qelem);
2838 	} else {
2839 		TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem);
2840 	}
2841 #endif
2842 
2843 	/*
2844 	 * We can't do the above - it'll confuse the TXQ software
2845 	 * scheduler which will keep checking the _head_ TID
2846 	 * in the list to see if it has traffic.  If we queue
2847 	 * a TID to the head of the list and it doesn't transmit,
2848 	 * we'll check it again.
2849 	 *
2850 	 * So, get the rest of this leaking frames support working
2851 	 * and reliable first and _then_ optimise it so they're
2852 	 * pushed out in front of any other pending software
2853 	 * queued nodes.
2854 	 */
2855 	TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem);
2856 }
2857 
2858 /*
2859  * Mark the current node as no longer needing to be polled for
2860  * TX packets.
2861  *
2862  * The TXQ lock must be held.
2863  */
2864 static void
2865 ath_tx_tid_unsched(struct ath_softc *sc, struct ath_tid *tid)
2866 {
2867 	struct ath_txq *txq = sc->sc_ac2q[tid->ac];
2868 
2869 	ATH_TX_LOCK_ASSERT(sc);
2870 
2871 	if (tid->sched == 0)
2872 		return;
2873 
2874 	tid->sched = 0;
2875 	TAILQ_REMOVE(&txq->axq_tidq, tid, axq_qelem);
2876 }
2877 
2878 /*
2879  * Assign a sequence number manually to the given frame.
2880  *
2881  * This should only be called for A-MPDU TX frames.
2882  */
2883 static ieee80211_seq
2884 ath_tx_tid_seqno_assign(struct ath_softc *sc, struct ieee80211_node *ni,
2885     struct ath_buf *bf, struct mbuf *m0)
2886 {
2887 	struct ieee80211_frame *wh;
2888 	int tid, pri;
2889 	ieee80211_seq seqno;
2890 	uint8_t subtype;
2891 
2892 	/* TID lookup */
2893 	wh = mtod(m0, struct ieee80211_frame *);
2894 	pri = M_WME_GETAC(m0);			/* honor classification */
2895 	tid = WME_AC_TO_TID(pri);
2896 	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: pri=%d, tid=%d, qos has seq=%d\n",
2897 	    __func__, pri, tid, IEEE80211_QOS_HAS_SEQ(wh));
2898 
2899 	/* XXX Is it a control frame? Ignore */
2900 
2901 	/* Does the packet require a sequence number? */
2902 	if (! IEEE80211_QOS_HAS_SEQ(wh))
2903 		return -1;
2904 
2905 	ATH_TX_LOCK_ASSERT(sc);
2906 
2907 	/*
2908 	 * Is it a QOS NULL Data frame? Give it a sequence number from
2909 	 * the default TID (IEEE80211_NONQOS_TID.)
2910 	 *
2911 	 * The RX path of everything I've looked at doesn't include the NULL
2912 	 * data frame sequence number in the aggregation state updates, so
2913 	 * assigning it a sequence number there will cause a BAW hole on the
2914 	 * RX side.
2915 	 */
2916 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
2917 	if (subtype == IEEE80211_FC0_SUBTYPE_QOS_NULL) {
2918 		/* XXX no locking for this TID? This is a bit of a problem. */
2919 		seqno = ni->ni_txseqs[IEEE80211_NONQOS_TID];
2920 		INCR(ni->ni_txseqs[IEEE80211_NONQOS_TID], IEEE80211_SEQ_RANGE);
2921 	} else {
2922 		/* Manually assign sequence number */
2923 		seqno = ni->ni_txseqs[tid];
2924 		INCR(ni->ni_txseqs[tid], IEEE80211_SEQ_RANGE);
2925 	}
2926 	*(uint16_t *)&wh->i_seq[0] = htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT);
2927 	M_SEQNO_SET(m0, seqno);
2928 
2929 	/* Return so caller can do something with it if needed */
2930 	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s:  -> seqno=%d\n", __func__, seqno);
2931 	return seqno;
2932 }
2933 
2934 /*
2935  * Attempt to direct dispatch an aggregate frame to hardware.
2936  * If the frame is out of BAW, queue.
2937  * Otherwise, schedule it as a single frame.
2938  */
2939 static void
2940 ath_tx_xmit_aggr(struct ath_softc *sc, struct ath_node *an,
2941     struct ath_txq *txq, struct ath_buf *bf)
2942 {
2943 	struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid];
2944 	struct ieee80211_tx_ampdu *tap;
2945 
2946 	ATH_TX_LOCK_ASSERT(sc);
2947 
2948 	tap = ath_tx_get_tx_tid(an, tid->tid);
2949 
2950 	/* paused? queue */
2951 	if (! ath_tx_tid_can_tx_or_sched(sc, tid)) {
2952 		ATH_TID_INSERT_HEAD(tid, bf, bf_list);
2953 		/* XXX don't sched - we're paused! */
2954 		return;
2955 	}
2956 
2957 	/* outside baw? queue */
2958 	if (bf->bf_state.bfs_dobaw &&
2959 	    (! BAW_WITHIN(tap->txa_start, tap->txa_wnd,
2960 	    SEQNO(bf->bf_state.bfs_seqno)))) {
2961 		ATH_TID_INSERT_HEAD(tid, bf, bf_list);
2962 		ath_tx_tid_sched(sc, tid);
2963 		return;
2964 	}
2965 
2966 	/*
2967 	 * This is a temporary check and should be removed once
2968 	 * all the relevant code paths have been fixed.
2969 	 *
2970 	 * During aggregate retries, it's possible that the head
2971 	 * frame will fail (which has the bfs_aggr and bfs_nframes
2972 	 * fields set for said aggregate) and will be retried as
2973 	 * a single frame.  In this instance, the values should
2974 	 * be reset or the completion code will get upset with you.
2975 	 */
2976 	if (bf->bf_state.bfs_aggr != 0 || bf->bf_state.bfs_nframes > 1) {
2977 		DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
2978 		    "%s: bfs_aggr=%d, bfs_nframes=%d\n", __func__,
2979 		    bf->bf_state.bfs_aggr, bf->bf_state.bfs_nframes);
2980 		bf->bf_state.bfs_aggr = 0;
2981 		bf->bf_state.bfs_nframes = 1;
2982 	}
2983 
2984 	/* Update CLRDMASK just before this frame is queued */
2985 	ath_tx_update_clrdmask(sc, tid, bf);
2986 
2987 	/* Direct dispatch to hardware */
2988 	ath_tx_do_ratelookup(sc, bf);
2989 	ath_tx_calc_duration(sc, bf);
2990 	ath_tx_calc_protection(sc, bf);
2991 	ath_tx_set_rtscts(sc, bf);
2992 	ath_tx_rate_fill_rcflags(sc, bf);
2993 	ath_tx_setds(sc, bf);
2994 
2995 	/* Statistics */
2996 	sc->sc_aggr_stats.aggr_low_hwq_single_pkt++;
2997 
2998 	/* Track per-TID hardware queue depth correctly */
2999 	tid->hwq_depth++;
3000 
3001 	/* Add to BAW */
3002 	if (bf->bf_state.bfs_dobaw) {
3003 		ath_tx_addto_baw(sc, an, tid, bf);
3004 		bf->bf_state.bfs_addedbaw = 1;
3005 	}
3006 
3007 	/* Set completion handler, multi-frame aggregate or not */
3008 	bf->bf_comp = ath_tx_aggr_comp;
3009 
3010 	/*
3011 	 * Update the current leak count if
3012 	 * we're leaking frames; and set the
3013 	 * MORE flag as appropriate.
3014 	 */
3015 	ath_tx_leak_count_update(sc, tid, bf);
3016 
3017 	/* Hand off to hardware */
3018 	ath_tx_handoff(sc, txq, bf);
3019 }
3020 
3021 /*
3022  * Attempt to send the packet.
3023  * If the queue isn't busy, direct-dispatch.
3024  * If the queue is busy enough, queue the given packet on the
3025  *  relevant software queue.
3026  */
3027 void
3028 ath_tx_swq(struct ath_softc *sc, struct ieee80211_node *ni,
3029     struct ath_txq *txq, int queue_to_head, struct ath_buf *bf)
3030 {
3031 	struct ath_node *an = ATH_NODE(ni);
3032 	struct ieee80211_frame *wh;
3033 	struct ath_tid *atid;
3034 	int pri, tid;
3035 	struct mbuf *m0 = bf->bf_m;
3036 
3037 	ATH_TX_LOCK_ASSERT(sc);
3038 
3039 	/* Fetch the TID - non-QoS frames get assigned to TID 16 */
3040 	wh = mtod(m0, struct ieee80211_frame *);
3041 	pri = ath_tx_getac(sc, m0);
3042 	tid = ath_tx_gettid(sc, m0);
3043 	atid = &an->an_tid[tid];
3044 
3045 	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p, pri=%d, tid=%d, qos=%d\n",
3046 	    __func__, bf, pri, tid, IEEE80211_QOS_HAS_SEQ(wh));
3047 
3048 	/* Set local packet state, used to queue packets to hardware */
3049 	/* XXX potentially duplicate info, re-check */
3050 	bf->bf_state.bfs_tid = tid;
3051 	bf->bf_state.bfs_tx_queue = txq->axq_qnum;
3052 	bf->bf_state.bfs_pri = pri;
3053 
3054 	/*
3055 	 * If the hardware queue isn't busy, queue it directly.
3056 	 * If the hardware queue is busy, queue it.
3057 	 * If the TID is paused or the traffic it outside BAW, software
3058 	 * queue it.
3059 	 *
3060 	 * If the node is in power-save and we're leaking a frame,
3061 	 * leak a single frame.
3062 	 */
3063 	if (! ath_tx_tid_can_tx_or_sched(sc, atid)) {
3064 		/* TID is paused, queue */
3065 		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: paused\n", __func__);
3066 		/*
3067 		 * If the caller requested that it be sent at a high
3068 		 * priority, queue it at the head of the list.
3069 		 */
3070 		if (queue_to_head)
3071 			ATH_TID_INSERT_HEAD(atid, bf, bf_list);
3072 		else
3073 			ATH_TID_INSERT_TAIL(atid, bf, bf_list);
3074 	} else if (ath_tx_ampdu_pending(sc, an, tid)) {
3075 		/* AMPDU pending; queue */
3076 		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: pending\n", __func__);
3077 		ATH_TID_INSERT_TAIL(atid, bf, bf_list);
3078 		/* XXX sched? */
3079 	} else if (ath_tx_ampdu_running(sc, an, tid)) {
3080 		/* AMPDU running, attempt direct dispatch if possible */
3081 
3082 		/*
3083 		 * Always queue the frame to the tail of the list.
3084 		 */
3085 		ATH_TID_INSERT_TAIL(atid, bf, bf_list);
3086 
3087 		/*
3088 		 * If the hardware queue isn't busy, direct dispatch
3089 		 * the head frame in the list.  Don't schedule the
3090 		 * TID - let it build some more frames first?
3091 		 *
3092 		 * When running A-MPDU, always just check the hardware
3093 		 * queue depth against the aggregate frame limit.
3094 		 * We don't want to burst a large number of single frames
3095 		 * out to the hardware; we want to aggressively hold back.
3096 		 *
3097 		 * Otherwise, schedule the TID.
3098 		 */
3099 		/* XXX TXQ locking */
3100 		if (txq->axq_depth + txq->fifo.axq_depth < sc->sc_hwq_limit_aggr) {
3101 			bf = ATH_TID_FIRST(atid);
3102 			ATH_TID_REMOVE(atid, bf, bf_list);
3103 
3104 			/*
3105 			 * Ensure it's definitely treated as a non-AMPDU
3106 			 * frame - this information may have been left
3107 			 * over from a previous attempt.
3108 			 */
3109 			bf->bf_state.bfs_aggr = 0;
3110 			bf->bf_state.bfs_nframes = 1;
3111 
3112 			/* Queue to the hardware */
3113 			ath_tx_xmit_aggr(sc, an, txq, bf);
3114 			DPRINTF(sc, ATH_DEBUG_SW_TX,
3115 			    "%s: xmit_aggr\n",
3116 			    __func__);
3117 		} else {
3118 			DPRINTF(sc, ATH_DEBUG_SW_TX,
3119 			    "%s: ampdu; swq'ing\n",
3120 			    __func__);
3121 
3122 			ath_tx_tid_sched(sc, atid);
3123 		}
3124 	/*
3125 	 * If we're not doing A-MPDU, be prepared to direct dispatch
3126 	 * up to both limits if possible.  This particular corner
3127 	 * case may end up with packet starvation between aggregate
3128 	 * traffic and non-aggregate traffic: we wnat to ensure
3129 	 * that non-aggregate stations get a few frames queued to the
3130 	 * hardware before the aggregate station(s) get their chance.
3131 	 *
3132 	 * So if you only ever see a couple of frames direct dispatched
3133 	 * to the hardware from a non-AMPDU client, check both here
3134 	 * and in the software queue dispatcher to ensure that those
3135 	 * non-AMPDU stations get a fair chance to transmit.
3136 	 */
3137 	/* XXX TXQ locking */
3138 	} else if ((txq->axq_depth + txq->fifo.axq_depth < sc->sc_hwq_limit_nonaggr) &&
3139 		    (txq->axq_aggr_depth < sc->sc_hwq_limit_aggr)) {
3140 		/* AMPDU not running, attempt direct dispatch */
3141 		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: xmit_normal\n", __func__);
3142 		/* See if clrdmask needs to be set */
3143 		ath_tx_update_clrdmask(sc, atid, bf);
3144 
3145 		/*
3146 		 * Update the current leak count if
3147 		 * we're leaking frames; and set the
3148 		 * MORE flag as appropriate.
3149 		 */
3150 		ath_tx_leak_count_update(sc, atid, bf);
3151 
3152 		/*
3153 		 * Dispatch the frame.
3154 		 */
3155 		ath_tx_xmit_normal(sc, txq, bf);
3156 	} else {
3157 		/* Busy; queue */
3158 		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: swq'ing\n", __func__);
3159 		ATH_TID_INSERT_TAIL(atid, bf, bf_list);
3160 		ath_tx_tid_sched(sc, atid);
3161 	}
3162 }
3163 
3164 /*
3165  * Only set the clrdmask bit if none of the nodes are currently
3166  * filtered.
3167  *
3168  * XXX TODO: go through all the callers and check to see
3169  * which are being called in the context of looping over all
3170  * TIDs (eg, if all tids are being paused, resumed, etc.)
3171  * That'll avoid O(n^2) complexity here.
3172  */
3173 static void
3174 ath_tx_set_clrdmask(struct ath_softc *sc, struct ath_node *an)
3175 {
3176 	int i;
3177 
3178 	ATH_TX_LOCK_ASSERT(sc);
3179 
3180 	for (i = 0; i < IEEE80211_TID_SIZE; i++) {
3181 		if (an->an_tid[i].isfiltered == 1)
3182 			return;
3183 	}
3184 	an->clrdmask = 1;
3185 }
3186 
3187 /*
3188  * Configure the per-TID node state.
3189  *
3190  * This likely belongs in if_ath_node.c but I can't think of anywhere
3191  * else to put it just yet.
3192  *
3193  * This sets up the SLISTs and the mutex as appropriate.
3194  */
3195 void
3196 ath_tx_tid_init(struct ath_softc *sc, struct ath_node *an)
3197 {
3198 	int i, j;
3199 	struct ath_tid *atid;
3200 
3201 	for (i = 0; i < IEEE80211_TID_SIZE; i++) {
3202 		atid = &an->an_tid[i];
3203 
3204 		/* XXX now with this bzer(), is the field 0'ing needed? */
3205 		bzero(atid, sizeof(*atid));
3206 
3207 		TAILQ_INIT(&atid->tid_q);
3208 		TAILQ_INIT(&atid->filtq.tid_q);
3209 		atid->tid = i;
3210 		atid->an = an;
3211 		for (j = 0; j < ATH_TID_MAX_BUFS; j++)
3212 			atid->tx_buf[j] = NULL;
3213 		atid->baw_head = atid->baw_tail = 0;
3214 		atid->paused = 0;
3215 		atid->sched = 0;
3216 		atid->hwq_depth = 0;
3217 		atid->cleanup_inprogress = 0;
3218 		if (i == IEEE80211_NONQOS_TID)
3219 			atid->ac = ATH_NONQOS_TID_AC;
3220 		else
3221 			atid->ac = TID_TO_WME_AC(i);
3222 	}
3223 	an->clrdmask = 1;	/* Always start by setting this bit */
3224 }
3225 
3226 /*
3227  * Pause the current TID. This stops packets from being transmitted
3228  * on it.
3229  *
3230  * Since this is also called from upper layers as well as the driver,
3231  * it will get the TID lock.
3232  */
3233 static void
3234 ath_tx_tid_pause(struct ath_softc *sc, struct ath_tid *tid)
3235 {
3236 
3237 	ATH_TX_LOCK_ASSERT(sc);
3238 	tid->paused++;
3239 	DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: [%6D]: tid=%d, paused = %d\n",
3240 	    __func__,
3241 	    tid->an->an_node.ni_macaddr, ":",
3242 	    tid->tid,
3243 	    tid->paused);
3244 }
3245 
3246 /*
3247  * Unpause the current TID, and schedule it if needed.
3248  */
3249 static void
3250 ath_tx_tid_resume(struct ath_softc *sc, struct ath_tid *tid)
3251 {
3252 	ATH_TX_LOCK_ASSERT(sc);
3253 
3254 	/*
3255 	 * There's some odd places where ath_tx_tid_resume() is called
3256 	 * when it shouldn't be; this works around that particular issue
3257 	 * until it's actually resolved.
3258 	 */
3259 	if (tid->paused == 0) {
3260 		device_printf(sc->sc_dev,
3261 		    "%s: [%6D]: tid=%d, paused=0?\n",
3262 		    __func__,
3263 		    tid->an->an_node.ni_macaddr, ":",
3264 		    tid->tid);
3265 	} else {
3266 		tid->paused--;
3267 	}
3268 
3269 	DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
3270 	    "%s: [%6D]: tid=%d, unpaused = %d\n",
3271 	    __func__,
3272 	    tid->an->an_node.ni_macaddr, ":",
3273 	    tid->tid,
3274 	    tid->paused);
3275 
3276 	if (tid->paused)
3277 		return;
3278 
3279 	/*
3280 	 * Override the clrdmask configuration for the next frame
3281 	 * from this TID, just to get the ball rolling.
3282 	 */
3283 	ath_tx_set_clrdmask(sc, tid->an);
3284 
3285 	if (tid->axq_depth == 0)
3286 		return;
3287 
3288 	/* XXX isfiltered shouldn't ever be 0 at this point */
3289 	if (tid->isfiltered == 1) {
3290 		DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: filtered?!\n",
3291 		    __func__);
3292 		return;
3293 	}
3294 
3295 	ath_tx_tid_sched(sc, tid);
3296 
3297 	/*
3298 	 * Queue the software TX scheduler.
3299 	 */
3300 	ath_tx_swq_kick(sc);
3301 }
3302 
3303 /*
3304  * Add the given ath_buf to the TID filtered frame list.
3305  * This requires the TID be filtered.
3306  */
3307 static void
3308 ath_tx_tid_filt_addbuf(struct ath_softc *sc, struct ath_tid *tid,
3309     struct ath_buf *bf)
3310 {
3311 
3312 	ATH_TX_LOCK_ASSERT(sc);
3313 
3314 	if (!tid->isfiltered)
3315 		DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: not filtered?!\n",
3316 		    __func__);
3317 
3318 	DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: bf=%p\n", __func__, bf);
3319 
3320 	/* Set the retry bit and bump the retry counter */
3321 	ath_tx_set_retry(sc, bf);
3322 	sc->sc_stats.ast_tx_swfiltered++;
3323 
3324 	ATH_TID_FILT_INSERT_TAIL(tid, bf, bf_list);
3325 }
3326 
3327 /*
3328  * Handle a completed filtered frame from the given TID.
3329  * This just enables/pauses the filtered frame state if required
3330  * and appends the filtered frame to the filtered queue.
3331  */
3332 static void
3333 ath_tx_tid_filt_comp_buf(struct ath_softc *sc, struct ath_tid *tid,
3334     struct ath_buf *bf)
3335 {
3336 
3337 	ATH_TX_LOCK_ASSERT(sc);
3338 
3339 	if (! tid->isfiltered) {
3340 		DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: tid=%d; filter transition\n",
3341 		    __func__, tid->tid);
3342 		tid->isfiltered = 1;
3343 		ath_tx_tid_pause(sc, tid);
3344 	}
3345 
3346 	/* Add the frame to the filter queue */
3347 	ath_tx_tid_filt_addbuf(sc, tid, bf);
3348 }
3349 
3350 /*
3351  * Complete the filtered frame TX completion.
3352  *
3353  * If there are no more frames in the hardware queue, unpause/unfilter
3354  * the TID if applicable.  Otherwise we will wait for a node PS transition
3355  * to unfilter.
3356  */
3357 static void
3358 ath_tx_tid_filt_comp_complete(struct ath_softc *sc, struct ath_tid *tid)
3359 {
3360 	struct ath_buf *bf;
3361 	int do_resume = 0;
3362 
3363 	ATH_TX_LOCK_ASSERT(sc);
3364 
3365 	if (tid->hwq_depth != 0)
3366 		return;
3367 
3368 	DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: tid=%d, hwq=0, transition back\n",
3369 	    __func__, tid->tid);
3370 	if (tid->isfiltered == 1) {
3371 		tid->isfiltered = 0;
3372 		do_resume = 1;
3373 	}
3374 
3375 	/* XXX ath_tx_tid_resume() also calls ath_tx_set_clrdmask()! */
3376 	ath_tx_set_clrdmask(sc, tid->an);
3377 
3378 	/* XXX this is really quite inefficient */
3379 	while ((bf = ATH_TID_FILT_LAST(tid, ath_bufhead_s)) != NULL) {
3380 		ATH_TID_FILT_REMOVE(tid, bf, bf_list);
3381 		ATH_TID_INSERT_HEAD(tid, bf, bf_list);
3382 	}
3383 
3384 	/* And only resume if we had paused before */
3385 	if (do_resume)
3386 		ath_tx_tid_resume(sc, tid);
3387 }
3388 
3389 /*
3390  * Called when a single (aggregate or otherwise) frame is completed.
3391  *
3392  * Returns 0 if the buffer could be added to the filtered list
3393  * (cloned or otherwise), 1 if the buffer couldn't be added to the
3394  * filtered list (failed clone; expired retry) and the caller should
3395  * free it and handle it like a failure (eg by sending a BAR.)
3396  *
3397  * since the buffer may be cloned, bf must be not touched after this
3398  * if the return value is 0.
3399  */
3400 static int
3401 ath_tx_tid_filt_comp_single(struct ath_softc *sc, struct ath_tid *tid,
3402     struct ath_buf *bf)
3403 {
3404 	struct ath_buf *nbf;
3405 	int retval;
3406 
3407 	ATH_TX_LOCK_ASSERT(sc);
3408 
3409 	/*
3410 	 * Don't allow a filtered frame to live forever.
3411 	 */
3412 	if (bf->bf_state.bfs_retries > SWMAX_RETRIES) {
3413 		sc->sc_stats.ast_tx_swretrymax++;
3414 		DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3415 		    "%s: bf=%p, seqno=%d, exceeded retries\n",
3416 		    __func__,
3417 		    bf,
3418 		    SEQNO(bf->bf_state.bfs_seqno));
3419 		retval = 1; /* error */
3420 		goto finish;
3421 	}
3422 
3423 	/*
3424 	 * A busy buffer can't be added to the retry list.
3425 	 * It needs to be cloned.
3426 	 */
3427 	if (bf->bf_flags & ATH_BUF_BUSY) {
3428 		nbf = ath_tx_retry_clone(sc, tid->an, tid, bf);
3429 		DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3430 		    "%s: busy buffer clone: %p -> %p\n",
3431 		    __func__, bf, nbf);
3432 	} else {
3433 		nbf = bf;
3434 	}
3435 
3436 	if (nbf == NULL) {
3437 		DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3438 		    "%s: busy buffer couldn't be cloned (%p)!\n",
3439 		    __func__, bf);
3440 		retval = 1; /* error */
3441 	} else {
3442 		ath_tx_tid_filt_comp_buf(sc, tid, nbf);
3443 		retval = 0; /* ok */
3444 	}
3445 finish:
3446 	ath_tx_tid_filt_comp_complete(sc, tid);
3447 
3448 	return (retval);
3449 }
3450 
3451 static void
3452 ath_tx_tid_filt_comp_aggr(struct ath_softc *sc, struct ath_tid *tid,
3453     struct ath_buf *bf_first, ath_bufhead *bf_q)
3454 {
3455 	struct ath_buf *bf, *bf_next, *nbf;
3456 
3457 	ATH_TX_LOCK_ASSERT(sc);
3458 
3459 	bf = bf_first;
3460 	while (bf) {
3461 		bf_next = bf->bf_next;
3462 		bf->bf_next = NULL;	/* Remove it from the aggr list */
3463 
3464 		/*
3465 		 * Don't allow a filtered frame to live forever.
3466 		 */
3467 		if (bf->bf_state.bfs_retries > SWMAX_RETRIES) {
3468 			sc->sc_stats.ast_tx_swretrymax++;
3469 			DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3470 			    "%s: tid=%d, bf=%p, seqno=%d, exceeded retries\n",
3471 			    __func__,
3472 			    tid->tid,
3473 			    bf,
3474 			    SEQNO(bf->bf_state.bfs_seqno));
3475 			TAILQ_INSERT_TAIL(bf_q, bf, bf_list);
3476 			goto next;
3477 		}
3478 
3479 		if (bf->bf_flags & ATH_BUF_BUSY) {
3480 			nbf = ath_tx_retry_clone(sc, tid->an, tid, bf);
3481 			DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3482 			    "%s: tid=%d, busy buffer cloned: %p -> %p, seqno=%d\n",
3483 			    __func__, tid->tid, bf, nbf, SEQNO(bf->bf_state.bfs_seqno));
3484 		} else {
3485 			nbf = bf;
3486 		}
3487 
3488 		/*
3489 		 * If the buffer couldn't be cloned, add it to bf_q;
3490 		 * the caller will free the buffer(s) as required.
3491 		 */
3492 		if (nbf == NULL) {
3493 			DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3494 			    "%s: tid=%d, buffer couldn't be cloned! (%p) seqno=%d\n",
3495 			    __func__, tid->tid, bf, SEQNO(bf->bf_state.bfs_seqno));
3496 			TAILQ_INSERT_TAIL(bf_q, bf, bf_list);
3497 		} else {
3498 			ath_tx_tid_filt_comp_buf(sc, tid, nbf);
3499 		}
3500 next:
3501 		bf = bf_next;
3502 	}
3503 
3504 	ath_tx_tid_filt_comp_complete(sc, tid);
3505 }
3506 
3507 /*
3508  * Suspend the queue because we need to TX a BAR.
3509  */
3510 static void
3511 ath_tx_tid_bar_suspend(struct ath_softc *sc, struct ath_tid *tid)
3512 {
3513 
3514 	ATH_TX_LOCK_ASSERT(sc);
3515 
3516 	DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3517 	    "%s: tid=%d, bar_wait=%d, bar_tx=%d, called\n",
3518 	    __func__,
3519 	    tid->tid,
3520 	    tid->bar_wait,
3521 	    tid->bar_tx);
3522 
3523 	/* We shouldn't be called when bar_tx is 1 */
3524 	if (tid->bar_tx) {
3525 		DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3526 		    "%s: bar_tx is 1?!\n", __func__);
3527 	}
3528 
3529 	/* If we've already been called, just be patient. */
3530 	if (tid->bar_wait)
3531 		return;
3532 
3533 	/* Wait! */
3534 	tid->bar_wait = 1;
3535 
3536 	/* Only one pause, no matter how many frames fail */
3537 	ath_tx_tid_pause(sc, tid);
3538 }
3539 
3540 /*
3541  * We've finished with BAR handling - either we succeeded or
3542  * failed. Either way, unsuspend TX.
3543  */
3544 static void
3545 ath_tx_tid_bar_unsuspend(struct ath_softc *sc, struct ath_tid *tid)
3546 {
3547 
3548 	ATH_TX_LOCK_ASSERT(sc);
3549 
3550 	DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3551 	    "%s: %6D: TID=%d, called\n",
3552 	    __func__,
3553 	    tid->an->an_node.ni_macaddr,
3554 	    ":",
3555 	    tid->tid);
3556 
3557 	if (tid->bar_tx == 0 || tid->bar_wait == 0) {
3558 		DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3559 		    "%s: %6D: TID=%d, bar_tx=%d, bar_wait=%d: ?\n",
3560 		    __func__, tid->an->an_node.ni_macaddr, ":",
3561 		    tid->tid, tid->bar_tx, tid->bar_wait);
3562 	}
3563 
3564 	tid->bar_tx = tid->bar_wait = 0;
3565 	ath_tx_tid_resume(sc, tid);
3566 }
3567 
3568 /*
3569  * Return whether we're ready to TX a BAR frame.
3570  *
3571  * Requires the TID lock be held.
3572  */
3573 static int
3574 ath_tx_tid_bar_tx_ready(struct ath_softc *sc, struct ath_tid *tid)
3575 {
3576 
3577 	ATH_TX_LOCK_ASSERT(sc);
3578 
3579 	if (tid->bar_wait == 0 || tid->hwq_depth > 0)
3580 		return (0);
3581 
3582 	DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3583 	    "%s: %6D: TID=%d, bar ready\n",
3584 	    __func__,
3585 	    tid->an->an_node.ni_macaddr,
3586 	    ":",
3587 	    tid->tid);
3588 
3589 	return (1);
3590 }
3591 
3592 /*
3593  * Check whether the current TID is ready to have a BAR
3594  * TXed and if so, do the TX.
3595  *
3596  * Since the TID/TXQ lock can't be held during a call to
3597  * ieee80211_send_bar(), we have to do the dirty thing of unlocking it,
3598  * sending the BAR and locking it again.
3599  *
3600  * Eventually, the code to send the BAR should be broken out
3601  * from this routine so the lock doesn't have to be reacquired
3602  * just to be immediately dropped by the caller.
3603  */
3604 static void
3605 ath_tx_tid_bar_tx(struct ath_softc *sc, struct ath_tid *tid)
3606 {
3607 	struct ieee80211_tx_ampdu *tap;
3608 
3609 	ATH_TX_LOCK_ASSERT(sc);
3610 
3611 	DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3612 	    "%s: %6D: TID=%d, called\n",
3613 	    __func__,
3614 	    tid->an->an_node.ni_macaddr,
3615 	    ":",
3616 	    tid->tid);
3617 
3618 	tap = ath_tx_get_tx_tid(tid->an, tid->tid);
3619 
3620 	/*
3621 	 * This is an error condition!
3622 	 */
3623 	if (tid->bar_wait == 0 || tid->bar_tx == 1) {
3624 		DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3625 		    "%s: %6D: TID=%d, bar_tx=%d, bar_wait=%d: ?\n",
3626 		    __func__, tid->an->an_node.ni_macaddr, ":",
3627 		    tid->tid, tid->bar_tx, tid->bar_wait);
3628 		return;
3629 	}
3630 
3631 	/* Don't do anything if we still have pending frames */
3632 	if (tid->hwq_depth > 0) {
3633 		DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3634 		    "%s: %6D: TID=%d, hwq_depth=%d, waiting\n",
3635 		    __func__,
3636 		    tid->an->an_node.ni_macaddr,
3637 		    ":",
3638 		    tid->tid,
3639 		    tid->hwq_depth);
3640 		return;
3641 	}
3642 
3643 	/* We're now about to TX */
3644 	tid->bar_tx = 1;
3645 
3646 	/*
3647 	 * Override the clrdmask configuration for the next frame,
3648 	 * just to get the ball rolling.
3649 	 */
3650 	ath_tx_set_clrdmask(sc, tid->an);
3651 
3652 	/*
3653 	 * Calculate new BAW left edge, now that all frames have either
3654 	 * succeeded or failed.
3655 	 *
3656 	 * XXX verify this is _actually_ the valid value to begin at!
3657 	 */
3658 	DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3659 	    "%s: %6D: TID=%d, new BAW left edge=%d\n",
3660 	    __func__,
3661 	    tid->an->an_node.ni_macaddr,
3662 	    ":",
3663 	    tid->tid,
3664 	    tap->txa_start);
3665 
3666 	/* Try sending the BAR frame */
3667 	/* We can't hold the lock here! */
3668 
3669 	ATH_TX_UNLOCK(sc);
3670 	if (ieee80211_send_bar(&tid->an->an_node, tap, tap->txa_start) == 0) {
3671 		/* Success? Now we wait for notification that it's done */
3672 		ATH_TX_LOCK(sc);
3673 		return;
3674 	}
3675 
3676 	/* Failure? For now, warn loudly and continue */
3677 	ATH_TX_LOCK(sc);
3678 	DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3679 	    "%s: %6D: TID=%d, failed to TX BAR, continue!\n",
3680 	    __func__, tid->an->an_node.ni_macaddr, ":",
3681 	    tid->tid);
3682 	ath_tx_tid_bar_unsuspend(sc, tid);
3683 }
3684 
3685 static void
3686 ath_tx_tid_drain_pkt(struct ath_softc *sc, struct ath_node *an,
3687     struct ath_tid *tid, ath_bufhead *bf_cq, struct ath_buf *bf)
3688 {
3689 
3690 	ATH_TX_LOCK_ASSERT(sc);
3691 
3692 	/*
3693 	 * If the current TID is running AMPDU, update
3694 	 * the BAW.
3695 	 */
3696 	if (ath_tx_ampdu_running(sc, an, tid->tid) &&
3697 	    bf->bf_state.bfs_dobaw) {
3698 		/*
3699 		 * Only remove the frame from the BAW if it's
3700 		 * been transmitted at least once; this means
3701 		 * the frame was in the BAW to begin with.
3702 		 */
3703 		if (bf->bf_state.bfs_retries > 0) {
3704 			ath_tx_update_baw(sc, an, tid, bf);
3705 			bf->bf_state.bfs_dobaw = 0;
3706 		}
3707 #if 0
3708 		/*
3709 		 * This has become a non-fatal error now
3710 		 */
3711 		if (! bf->bf_state.bfs_addedbaw)
3712 			DPRINTF(sc, ATH_DEBUG_SW_TX_BAW
3713 			    "%s: wasn't added: seqno %d\n",
3714 			    __func__, SEQNO(bf->bf_state.bfs_seqno));
3715 #endif
3716 	}
3717 
3718 	/* Strip it out of an aggregate list if it was in one */
3719 	bf->bf_next = NULL;
3720 
3721 	/* Insert on the free queue to be freed by the caller */
3722 	TAILQ_INSERT_TAIL(bf_cq, bf, bf_list);
3723 }
3724 
3725 static void
3726 ath_tx_tid_drain_print(struct ath_softc *sc, struct ath_node *an,
3727     const char *pfx, struct ath_tid *tid, struct ath_buf *bf)
3728 {
3729 	struct ieee80211_node *ni = &an->an_node;
3730 	struct ath_txq *txq;
3731 	struct ieee80211_tx_ampdu *tap;
3732 
3733 	txq = sc->sc_ac2q[tid->ac];
3734 	tap = ath_tx_get_tx_tid(an, tid->tid);
3735 
3736 	DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET,
3737 	    "%s: %s: %6D: bf=%p: addbaw=%d, dobaw=%d, "
3738 	    "seqno=%d, retry=%d\n",
3739 	    __func__,
3740 	    pfx,
3741 	    ni->ni_macaddr,
3742 	    ":",
3743 	    bf,
3744 	    bf->bf_state.bfs_addedbaw,
3745 	    bf->bf_state.bfs_dobaw,
3746 	    SEQNO(bf->bf_state.bfs_seqno),
3747 	    bf->bf_state.bfs_retries);
3748 	DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET,
3749 	    "%s: %s: %6D: bf=%p: txq[%d] axq_depth=%d, axq_aggr_depth=%d\n",
3750 	    __func__,
3751 	    pfx,
3752 	    ni->ni_macaddr,
3753 	    ":",
3754 	    bf,
3755 	    txq->axq_qnum,
3756 	    txq->axq_depth,
3757 	    txq->axq_aggr_depth);
3758 	DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET,
3759 	    "%s: %s: %6D: bf=%p: tid txq_depth=%d hwq_depth=%d, bar_wait=%d, "
3760 	      "isfiltered=%d\n",
3761 	    __func__,
3762 	    pfx,
3763 	    ni->ni_macaddr,
3764 	    ":",
3765 	    bf,
3766 	    tid->axq_depth,
3767 	    tid->hwq_depth,
3768 	    tid->bar_wait,
3769 	    tid->isfiltered);
3770 	DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET,
3771 	    "%s: %s: %6D: tid %d: "
3772 	    "sched=%d, paused=%d, "
3773 	    "incomp=%d, baw_head=%d, "
3774 	    "baw_tail=%d txa_start=%d, ni_txseqs=%d\n",
3775 	     __func__,
3776 	     pfx,
3777 	     ni->ni_macaddr,
3778 	     ":",
3779 	     tid->tid,
3780 	     tid->sched, tid->paused,
3781 	     tid->incomp, tid->baw_head,
3782 	     tid->baw_tail, tap == NULL ? -1 : tap->txa_start,
3783 	     ni->ni_txseqs[tid->tid]);
3784 
3785 	/* XXX Dump the frame, see what it is? */
3786 	if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT))
3787 		ieee80211_dump_pkt(ni->ni_ic,
3788 		    mtod(bf->bf_m, const uint8_t *),
3789 		    bf->bf_m->m_len, 0, -1);
3790 }
3791 
3792 /*
3793  * Free any packets currently pending in the software TX queue.
3794  *
3795  * This will be called when a node is being deleted.
3796  *
3797  * It can also be called on an active node during an interface
3798  * reset or state transition.
3799  *
3800  * (From Linux/reference):
3801  *
3802  * TODO: For frame(s) that are in the retry state, we will reuse the
3803  * sequence number(s) without setting the retry bit. The
3804  * alternative is to give up on these and BAR the receiver's window
3805  * forward.
3806  */
3807 static void
3808 ath_tx_tid_drain(struct ath_softc *sc, struct ath_node *an,
3809     struct ath_tid *tid, ath_bufhead *bf_cq)
3810 {
3811 	struct ath_buf *bf;
3812 	struct ieee80211_tx_ampdu *tap;
3813 	struct ieee80211_node *ni = &an->an_node;
3814 	int t;
3815 
3816 	tap = ath_tx_get_tx_tid(an, tid->tid);
3817 
3818 	ATH_TX_LOCK_ASSERT(sc);
3819 
3820 	/* Walk the queue, free frames */
3821 	t = 0;
3822 	for (;;) {
3823 		bf = ATH_TID_FIRST(tid);
3824 		if (bf == NULL) {
3825 			break;
3826 		}
3827 
3828 		if (t == 0) {
3829 			ath_tx_tid_drain_print(sc, an, "norm", tid, bf);
3830 //			t = 1;
3831 		}
3832 
3833 		ATH_TID_REMOVE(tid, bf, bf_list);
3834 		ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf);
3835 	}
3836 
3837 	/* And now, drain the filtered frame queue */
3838 	t = 0;
3839 	for (;;) {
3840 		bf = ATH_TID_FILT_FIRST(tid);
3841 		if (bf == NULL)
3842 			break;
3843 
3844 		if (t == 0) {
3845 			ath_tx_tid_drain_print(sc, an, "filt", tid, bf);
3846 //			t = 1;
3847 		}
3848 
3849 		ATH_TID_FILT_REMOVE(tid, bf, bf_list);
3850 		ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf);
3851 	}
3852 
3853 	/*
3854 	 * Override the clrdmask configuration for the next frame
3855 	 * in case there is some future transmission, just to get
3856 	 * the ball rolling.
3857 	 *
3858 	 * This won't hurt things if the TID is about to be freed.
3859 	 */
3860 	ath_tx_set_clrdmask(sc, tid->an);
3861 
3862 	/*
3863 	 * Now that it's completed, grab the TID lock and update
3864 	 * the sequence number and BAW window.
3865 	 * Because sequence numbers have been assigned to frames
3866 	 * that haven't been sent yet, it's entirely possible
3867 	 * we'll be called with some pending frames that have not
3868 	 * been transmitted.
3869 	 *
3870 	 * The cleaner solution is to do the sequence number allocation
3871 	 * when the packet is first transmitted - and thus the "retries"
3872 	 * check above would be enough to update the BAW/seqno.
3873 	 */
3874 
3875 	/* But don't do it for non-QoS TIDs */
3876 	if (tap) {
3877 #if 1
3878 		DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
3879 		    "%s: %6D: node %p: TID %d: sliding BAW left edge to %d\n",
3880 		    __func__,
3881 		    ni->ni_macaddr,
3882 		    ":",
3883 		    an,
3884 		    tid->tid,
3885 		    tap->txa_start);
3886 #endif
3887 		ni->ni_txseqs[tid->tid] = tap->txa_start;
3888 		tid->baw_tail = tid->baw_head;
3889 	}
3890 }
3891 
3892 /*
3893  * Reset the TID state.  This must be only called once the node has
3894  * had its frames flushed from this TID, to ensure that no other
3895  * pause / unpause logic can kick in.
3896  */
3897 static void
3898 ath_tx_tid_reset(struct ath_softc *sc, struct ath_tid *tid)
3899 {
3900 
3901 #if 0
3902 	tid->bar_wait = tid->bar_tx = tid->isfiltered = 0;
3903 	tid->paused = tid->sched = tid->addba_tx_pending = 0;
3904 	tid->incomp = tid->cleanup_inprogress = 0;
3905 #endif
3906 
3907 	/*
3908 	 * If we have a bar_wait set, we need to unpause the TID
3909 	 * here.  Otherwise once cleanup has finished, the TID won't
3910 	 * have the right paused counter.
3911 	 *
3912 	 * XXX I'm not going through resume here - I don't want the
3913 	 * node to be rescheuled just yet.  This however should be
3914 	 * methodized!
3915 	 */
3916 	if (tid->bar_wait) {
3917 		if (tid->paused > 0) {
3918 			tid->paused --;
3919 		}
3920 	}
3921 
3922 	/*
3923 	 * XXX same with a currently filtered TID.
3924 	 *
3925 	 * Since this is being called during a flush, we assume that
3926 	 * the filtered frame list is actually empty.
3927 	 *
3928 	 * XXX TODO: add in a check to ensure that the filtered queue
3929 	 * depth is actually 0!
3930 	 */
3931 	if (tid->isfiltered) {
3932 		if (tid->paused > 0) {
3933 			tid->paused --;
3934 		}
3935 	}
3936 
3937 	/*
3938 	 * Clear BAR, filtered frames, scheduled and ADDBA pending.
3939 	 * The TID may be going through cleanup from the last association
3940 	 * where things in the BAW are still in the hardware queue.
3941 	 */
3942 	tid->bar_wait = 0;
3943 	tid->bar_tx = 0;
3944 	tid->isfiltered = 0;
3945 	tid->sched = 0;
3946 	tid->addba_tx_pending = 0;
3947 
3948 	/*
3949 	 * XXX TODO: it may just be enough to walk the HWQs and mark
3950 	 * frames for that node as non-aggregate; or mark the ath_node
3951 	 * with something that indicates that aggregation is no longer
3952 	 * occuring.  Then we can just toss the BAW complaints and
3953 	 * do a complete hard reset of state here - no pause, no
3954 	 * complete counter, etc.
3955 	 */
3956 
3957 }
3958 
3959 /*
3960  * Flush all software queued packets for the given node.
3961  *
3962  * This occurs when a completion handler frees the last buffer
3963  * for a node, and the node is thus freed. This causes the node
3964  * to be cleaned up, which ends up calling ath_tx_node_flush.
3965  */
3966 void
3967 ath_tx_node_flush(struct ath_softc *sc, struct ath_node *an)
3968 {
3969 	int tid;
3970 	ath_bufhead bf_cq;
3971 	struct ath_buf *bf;
3972 
3973 	TAILQ_INIT(&bf_cq);
3974 
3975 	ATH_KTR(sc, ATH_KTR_NODE, 1, "ath_tx_node_flush: flush node; ni=%p",
3976 	    &an->an_node);
3977 
3978 	ATH_TX_LOCK(sc);
3979 	DPRINTF(sc, ATH_DEBUG_NODE,
3980 	    "%s: %6D: flush; is_powersave=%d, stack_psq=%d, tim=%d, "
3981 	    "swq_depth=%d, clrdmask=%d, leak_count=%d\n",
3982 	    __func__,
3983 	    an->an_node.ni_macaddr,
3984 	    ":",
3985 	    an->an_is_powersave,
3986 	    an->an_stack_psq,
3987 	    an->an_tim_set,
3988 	    an->an_swq_depth,
3989 	    an->clrdmask,
3990 	    an->an_leak_count);
3991 
3992 	for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) {
3993 		struct ath_tid *atid = &an->an_tid[tid];
3994 
3995 		/* Free packets */
3996 		ath_tx_tid_drain(sc, an, atid, &bf_cq);
3997 
3998 		/* Remove this tid from the list of active tids */
3999 		ath_tx_tid_unsched(sc, atid);
4000 
4001 		/* Reset the per-TID pause, BAR, etc state */
4002 		ath_tx_tid_reset(sc, atid);
4003 	}
4004 
4005 	/*
4006 	 * Clear global leak count
4007 	 */
4008 	an->an_leak_count = 0;
4009 	ATH_TX_UNLOCK(sc);
4010 
4011 	/* Handle completed frames */
4012 	while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
4013 		TAILQ_REMOVE(&bf_cq, bf, bf_list);
4014 		ath_tx_default_comp(sc, bf, 0);
4015 	}
4016 }
4017 
4018 /*
4019  * Drain all the software TXQs currently with traffic queued.
4020  */
4021 void
4022 ath_tx_txq_drain(struct ath_softc *sc, struct ath_txq *txq)
4023 {
4024 	struct ath_tid *tid;
4025 	ath_bufhead bf_cq;
4026 	struct ath_buf *bf;
4027 
4028 	TAILQ_INIT(&bf_cq);
4029 	ATH_TX_LOCK(sc);
4030 
4031 	/*
4032 	 * Iterate over all active tids for the given txq,
4033 	 * flushing and unsched'ing them
4034 	 */
4035 	while (! TAILQ_EMPTY(&txq->axq_tidq)) {
4036 		tid = TAILQ_FIRST(&txq->axq_tidq);
4037 		ath_tx_tid_drain(sc, tid->an, tid, &bf_cq);
4038 		ath_tx_tid_unsched(sc, tid);
4039 	}
4040 
4041 	ATH_TX_UNLOCK(sc);
4042 
4043 	while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
4044 		TAILQ_REMOVE(&bf_cq, bf, bf_list);
4045 		ath_tx_default_comp(sc, bf, 0);
4046 	}
4047 }
4048 
4049 /*
4050  * Handle completion of non-aggregate session frames.
4051  *
4052  * This (currently) doesn't implement software retransmission of
4053  * non-aggregate frames!
4054  *
4055  * Software retransmission of non-aggregate frames needs to obey
4056  * the strict sequence number ordering, and drop any frames that
4057  * will fail this.
4058  *
4059  * For now, filtered frames and frame transmission will cause
4060  * all kinds of issues.  So we don't support them.
4061  *
4062  * So anyone queuing frames via ath_tx_normal_xmit() or
4063  * ath_tx_hw_queue_norm() must override and set CLRDMASK.
4064  */
4065 void
4066 ath_tx_normal_comp(struct ath_softc *sc, struct ath_buf *bf, int fail)
4067 {
4068 	struct ieee80211_node *ni = bf->bf_node;
4069 	struct ath_node *an = ATH_NODE(ni);
4070 	int tid = bf->bf_state.bfs_tid;
4071 	struct ath_tid *atid = &an->an_tid[tid];
4072 	struct ath_tx_status *ts = &bf->bf_status.ds_txstat;
4073 
4074 	/* The TID state is protected behind the TXQ lock */
4075 	ATH_TX_LOCK(sc);
4076 
4077 	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p: fail=%d, hwq_depth now %d\n",
4078 	    __func__, bf, fail, atid->hwq_depth - 1);
4079 
4080 	atid->hwq_depth--;
4081 
4082 #if 0
4083 	/*
4084 	 * If the frame was filtered, stick it on the filter frame
4085 	 * queue and complain about it.  It shouldn't happen!
4086 	 */
4087 	if ((ts->ts_status & HAL_TXERR_FILT) ||
4088 	    (ts->ts_status != 0 && atid->isfiltered)) {
4089 		DPRINTF(sc, ATH_DEBUG_SW_TX,
4090 		    "%s: isfiltered=%d, ts_status=%d: huh?\n",
4091 		    __func__,
4092 		    atid->isfiltered,
4093 		    ts->ts_status);
4094 		ath_tx_tid_filt_comp_buf(sc, atid, bf);
4095 	}
4096 #endif
4097 	if (atid->isfiltered)
4098 		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: filtered?!\n", __func__);
4099 	if (atid->hwq_depth < 0)
4100 		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: hwq_depth < 0: %d\n",
4101 		    __func__, atid->hwq_depth);
4102 
4103 	/* If the TID is being cleaned up, track things */
4104 	/* XXX refactor! */
4105 	if (atid->cleanup_inprogress) {
4106 		atid->incomp--;
4107 		if (atid->incomp == 0) {
4108 			DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
4109 			    "%s: TID %d: cleaned up! resume!\n",
4110 			    __func__, tid);
4111 			atid->cleanup_inprogress = 0;
4112 			ath_tx_tid_resume(sc, atid);
4113 		}
4114 	}
4115 
4116 	/*
4117 	 * If the queue is filtered, potentially mark it as complete
4118 	 * and reschedule it as needed.
4119 	 *
4120 	 * This is required as there may be a subsequent TX descriptor
4121 	 * for this end-node that has CLRDMASK set, so it's quite possible
4122 	 * that a filtered frame will be followed by a non-filtered
4123 	 * (complete or otherwise) frame.
4124 	 *
4125 	 * XXX should we do this before we complete the frame?
4126 	 */
4127 	if (atid->isfiltered)
4128 		ath_tx_tid_filt_comp_complete(sc, atid);
4129 	ATH_TX_UNLOCK(sc);
4130 
4131 	/*
4132 	 * punt to rate control if we're not being cleaned up
4133 	 * during a hw queue drain and the frame wanted an ACK.
4134 	 */
4135 	if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0))
4136 		ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc,
4137 		    ts, bf->bf_state.bfs_pktlen,
4138 		    1, (ts->ts_status == 0) ? 0 : 1);
4139 
4140 	ath_tx_default_comp(sc, bf, fail);
4141 }
4142 
4143 /*
4144  * Handle cleanup of aggregate session packets that aren't
4145  * an A-MPDU.
4146  *
4147  * There's no need to update the BAW here - the session is being
4148  * torn down.
4149  */
4150 static void
4151 ath_tx_comp_cleanup_unaggr(struct ath_softc *sc, struct ath_buf *bf)
4152 {
4153 	struct ieee80211_node *ni = bf->bf_node;
4154 	struct ath_node *an = ATH_NODE(ni);
4155 	int tid = bf->bf_state.bfs_tid;
4156 	struct ath_tid *atid = &an->an_tid[tid];
4157 
4158 	DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: TID %d: incomp=%d\n",
4159 	    __func__, tid, atid->incomp);
4160 
4161 	ATH_TX_LOCK(sc);
4162 	atid->incomp--;
4163 
4164 	/* XXX refactor! */
4165 	if (bf->bf_state.bfs_dobaw) {
4166 		ath_tx_update_baw(sc, an, atid, bf);
4167 		if (!bf->bf_state.bfs_addedbaw)
4168 			DPRINTF(sc, ATH_DEBUG_SW_TX,
4169 			    "%s: wasn't added: seqno %d\n",
4170 			    __func__, SEQNO(bf->bf_state.bfs_seqno));
4171 	}
4172 
4173 	if (atid->incomp == 0) {
4174 		DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
4175 		    "%s: TID %d: cleaned up! resume!\n",
4176 		    __func__, tid);
4177 		atid->cleanup_inprogress = 0;
4178 		ath_tx_tid_resume(sc, atid);
4179 	}
4180 	ATH_TX_UNLOCK(sc);
4181 
4182 	ath_tx_default_comp(sc, bf, 0);
4183 }
4184 
4185 
4186 /*
4187  * This as it currently stands is a bit dumb.  Ideally we'd just
4188  * fail the frame the normal way and have it permanently fail
4189  * via the normal aggregate completion path.
4190  */
4191 static void
4192 ath_tx_tid_cleanup_frame(struct ath_softc *sc, struct ath_node *an,
4193     int tid, struct ath_buf *bf_head, ath_bufhead *bf_cq)
4194 {
4195 	struct ath_tid *atid = &an->an_tid[tid];
4196 	struct ath_buf *bf, *bf_next;
4197 
4198 	ATH_TX_LOCK_ASSERT(sc);
4199 
4200 	/*
4201 	 * Remove this frame from the queue.
4202 	 */
4203 	ATH_TID_REMOVE(atid, bf_head, bf_list);
4204 
4205 	/*
4206 	 * Loop over all the frames in the aggregate.
4207 	 */
4208 	bf = bf_head;
4209 	while (bf != NULL) {
4210 		bf_next = bf->bf_next;	/* next aggregate frame, or NULL */
4211 
4212 		/*
4213 		 * If it's been added to the BAW we need to kick
4214 		 * it out of the BAW before we continue.
4215 		 *
4216 		 * XXX if it's an aggregate, assert that it's in the
4217 		 * BAW - we shouldn't have it be in an aggregate
4218 		 * otherwise!
4219 		 */
4220 		if (bf->bf_state.bfs_addedbaw) {
4221 			ath_tx_update_baw(sc, an, atid, bf);
4222 			bf->bf_state.bfs_dobaw = 0;
4223 		}
4224 
4225 		/*
4226 		 * Give it the default completion handler.
4227 		 */
4228 		bf->bf_comp = ath_tx_normal_comp;
4229 		bf->bf_next = NULL;
4230 
4231 		/*
4232 		 * Add it to the list to free.
4233 		 */
4234 		TAILQ_INSERT_TAIL(bf_cq, bf, bf_list);
4235 
4236 		/*
4237 		 * Now advance to the next frame in the aggregate.
4238 		 */
4239 		bf = bf_next;
4240 	}
4241 }
4242 
4243 /*
4244  * Performs transmit side cleanup when TID changes from aggregated to
4245  * unaggregated and during reassociation.
4246  *
4247  * For now, this just tosses everything from the TID software queue
4248  * whether or not it has been retried and marks the TID as
4249  * pending completion if there's anything for this TID queued to
4250  * the hardware.
4251  *
4252  * The caller is responsible for pausing the TID and unpausing the
4253  * TID if no cleanup was required. Otherwise the cleanup path will
4254  * unpause the TID once the last hardware queued frame is completed.
4255  */
4256 static void
4257 ath_tx_tid_cleanup(struct ath_softc *sc, struct ath_node *an, int tid,
4258     ath_bufhead *bf_cq)
4259 {
4260 	struct ath_tid *atid = &an->an_tid[tid];
4261 	struct ath_buf *bf, *bf_next;
4262 
4263 	ATH_TX_LOCK_ASSERT(sc);
4264 
4265 	DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
4266 	    "%s: TID %d: called; inprogress=%d\n", __func__, tid,
4267 	    atid->cleanup_inprogress);
4268 
4269 	/*
4270 	 * Move the filtered frames to the TX queue, before
4271 	 * we run off and discard/process things.
4272 	 */
4273 
4274 	/* XXX this is really quite inefficient */
4275 	while ((bf = ATH_TID_FILT_LAST(atid, ath_bufhead_s)) != NULL) {
4276 		ATH_TID_FILT_REMOVE(atid, bf, bf_list);
4277 		ATH_TID_INSERT_HEAD(atid, bf, bf_list);
4278 	}
4279 
4280 	/*
4281 	 * Update the frames in the software TX queue:
4282 	 *
4283 	 * + Discard retry frames in the queue
4284 	 * + Fix the completion function to be non-aggregate
4285 	 */
4286 	bf = ATH_TID_FIRST(atid);
4287 	while (bf) {
4288 		/*
4289 		 * Grab the next frame in the list, we may
4290 		 * be fiddling with the list.
4291 		 */
4292 		bf_next = TAILQ_NEXT(bf, bf_list);
4293 
4294 		/*
4295 		 * Free the frame and all subframes.
4296 		 */
4297 		ath_tx_tid_cleanup_frame(sc, an, tid, bf, bf_cq);
4298 
4299 		/*
4300 		 * Next frame!
4301 		 */
4302 		bf = bf_next;
4303 	}
4304 
4305 	/*
4306 	 * If there's anything in the hardware queue we wait
4307 	 * for the TID HWQ to empty.
4308 	 */
4309 	if (atid->hwq_depth > 0) {
4310 		/*
4311 		 * XXX how about we kill atid->incomp, and instead
4312 		 * replace it with a macro that checks that atid->hwq_depth
4313 		 * is 0?
4314 		 */
4315 		atid->incomp = atid->hwq_depth;
4316 		atid->cleanup_inprogress = 1;
4317 	}
4318 
4319 	if (atid->cleanup_inprogress)
4320 		DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
4321 		    "%s: TID %d: cleanup needed: %d packets\n",
4322 		    __func__, tid, atid->incomp);
4323 
4324 	/* Owner now must free completed frames */
4325 }
4326 
4327 static struct ath_buf *
4328 ath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an,
4329     struct ath_tid *tid, struct ath_buf *bf)
4330 {
4331 	struct ath_buf *nbf;
4332 	int error;
4333 
4334 	/*
4335 	 * Clone the buffer.  This will handle the dma unmap and
4336 	 * copy the node reference to the new buffer.  If this
4337 	 * works out, 'bf' will have no DMA mapping, no mbuf
4338 	 * pointer and no node reference.
4339 	 */
4340 	nbf = ath_buf_clone(sc, bf);
4341 
4342 #if 0
4343 	DPRINTF(sc, ATH_DEBUG_XMIT, "%s: ATH_BUF_BUSY; cloning\n",
4344 	    __func__);
4345 #endif
4346 
4347 	if (nbf == NULL) {
4348 		/* Failed to clone */
4349 		DPRINTF(sc, ATH_DEBUG_XMIT,
4350 		    "%s: failed to clone a busy buffer\n",
4351 		    __func__);
4352 		return NULL;
4353 	}
4354 
4355 	/* Setup the dma for the new buffer */
4356 	error = ath_tx_dmasetup(sc, nbf, nbf->bf_m);
4357 	if (error != 0) {
4358 		DPRINTF(sc, ATH_DEBUG_XMIT,
4359 		    "%s: failed to setup dma for clone\n",
4360 		    __func__);
4361 		/*
4362 		 * Put this at the head of the list, not tail;
4363 		 * that way it doesn't interfere with the
4364 		 * busy buffer logic (which uses the tail of
4365 		 * the list.)
4366 		 */
4367 		ATH_TXBUF_LOCK(sc);
4368 		ath_returnbuf_head(sc, nbf);
4369 		ATH_TXBUF_UNLOCK(sc);
4370 		return NULL;
4371 	}
4372 
4373 	/* Update BAW if required, before we free the original buf */
4374 	if (bf->bf_state.bfs_dobaw)
4375 		ath_tx_switch_baw_buf(sc, an, tid, bf, nbf);
4376 
4377 	/* Free original buffer; return new buffer */
4378 	ath_freebuf(sc, bf);
4379 
4380 	return nbf;
4381 }
4382 
4383 /*
4384  * Handle retrying an unaggregate frame in an aggregate
4385  * session.
4386  *
4387  * If too many retries occur, pause the TID, wait for
4388  * any further retransmits (as there's no reason why
4389  * non-aggregate frames in an aggregate session are
4390  * transmitted in-order; they just have to be in-BAW)
4391  * and then queue a BAR.
4392  */
4393 static void
4394 ath_tx_aggr_retry_unaggr(struct ath_softc *sc, struct ath_buf *bf)
4395 {
4396 	struct ieee80211_node *ni = bf->bf_node;
4397 	struct ath_node *an = ATH_NODE(ni);
4398 	int tid = bf->bf_state.bfs_tid;
4399 	struct ath_tid *atid = &an->an_tid[tid];
4400 	struct ieee80211_tx_ampdu *tap;
4401 
4402 	ATH_TX_LOCK(sc);
4403 
4404 	tap = ath_tx_get_tx_tid(an, tid);
4405 
4406 	/*
4407 	 * If the buffer is marked as busy, we can't directly
4408 	 * reuse it. Instead, try to clone the buffer.
4409 	 * If the clone is successful, recycle the old buffer.
4410 	 * If the clone is unsuccessful, set bfs_retries to max
4411 	 * to force the next bit of code to free the buffer
4412 	 * for us.
4413 	 */
4414 	if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) &&
4415 	    (bf->bf_flags & ATH_BUF_BUSY)) {
4416 		struct ath_buf *nbf;
4417 		nbf = ath_tx_retry_clone(sc, an, atid, bf);
4418 		if (nbf)
4419 			/* bf has been freed at this point */
4420 			bf = nbf;
4421 		else
4422 			bf->bf_state.bfs_retries = SWMAX_RETRIES + 1;
4423 	}
4424 
4425 	if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) {
4426 		DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES,
4427 		    "%s: exceeded retries; seqno %d\n",
4428 		    __func__, SEQNO(bf->bf_state.bfs_seqno));
4429 		sc->sc_stats.ast_tx_swretrymax++;
4430 
4431 		/* Update BAW anyway */
4432 		if (bf->bf_state.bfs_dobaw) {
4433 			ath_tx_update_baw(sc, an, atid, bf);
4434 			if (! bf->bf_state.bfs_addedbaw)
4435 				DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
4436 				    "%s: wasn't added: seqno %d\n",
4437 				    __func__, SEQNO(bf->bf_state.bfs_seqno));
4438 		}
4439 		bf->bf_state.bfs_dobaw = 0;
4440 
4441 		/* Suspend the TX queue and get ready to send the BAR */
4442 		ath_tx_tid_bar_suspend(sc, atid);
4443 
4444 		/* Send the BAR if there are no other frames waiting */
4445 		if (ath_tx_tid_bar_tx_ready(sc, atid))
4446 			ath_tx_tid_bar_tx(sc, atid);
4447 
4448 		ATH_TX_UNLOCK(sc);
4449 
4450 		/* Free buffer, bf is free after this call */
4451 		ath_tx_default_comp(sc, bf, 0);
4452 		return;
4453 	}
4454 
4455 	/*
4456 	 * This increments the retry counter as well as
4457 	 * sets the retry flag in the ath_buf and packet
4458 	 * body.
4459 	 */
4460 	ath_tx_set_retry(sc, bf);
4461 	sc->sc_stats.ast_tx_swretries++;
4462 
4463 	/*
4464 	 * Insert this at the head of the queue, so it's
4465 	 * retried before any current/subsequent frames.
4466 	 */
4467 	ATH_TID_INSERT_HEAD(atid, bf, bf_list);
4468 	ath_tx_tid_sched(sc, atid);
4469 	/* Send the BAR if there are no other frames waiting */
4470 	if (ath_tx_tid_bar_tx_ready(sc, atid))
4471 		ath_tx_tid_bar_tx(sc, atid);
4472 
4473 	ATH_TX_UNLOCK(sc);
4474 }
4475 
4476 /*
4477  * Common code for aggregate excessive retry/subframe retry.
4478  * If retrying, queues buffers to bf_q. If not, frees the
4479  * buffers.
4480  *
4481  * XXX should unify this with ath_tx_aggr_retry_unaggr()
4482  */
4483 static int
4484 ath_tx_retry_subframe(struct ath_softc *sc, struct ath_buf *bf,
4485     ath_bufhead *bf_q)
4486 {
4487 	struct ieee80211_node *ni = bf->bf_node;
4488 	struct ath_node *an = ATH_NODE(ni);
4489 	int tid = bf->bf_state.bfs_tid;
4490 	struct ath_tid *atid = &an->an_tid[tid];
4491 
4492 	ATH_TX_LOCK_ASSERT(sc);
4493 
4494 	/* XXX clr11naggr should be done for all subframes */
4495 	ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc);
4496 	ath_hal_set11nburstduration(sc->sc_ah, bf->bf_desc, 0);
4497 
4498 	/* ath_hal_set11n_virtualmorefrag(sc->sc_ah, bf->bf_desc, 0); */
4499 
4500 	/*
4501 	 * If the buffer is marked as busy, we can't directly
4502 	 * reuse it. Instead, try to clone the buffer.
4503 	 * If the clone is successful, recycle the old buffer.
4504 	 * If the clone is unsuccessful, set bfs_retries to max
4505 	 * to force the next bit of code to free the buffer
4506 	 * for us.
4507 	 */
4508 	if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) &&
4509 	    (bf->bf_flags & ATH_BUF_BUSY)) {
4510 		struct ath_buf *nbf;
4511 		nbf = ath_tx_retry_clone(sc, an, atid, bf);
4512 		if (nbf)
4513 			/* bf has been freed at this point */
4514 			bf = nbf;
4515 		else
4516 			bf->bf_state.bfs_retries = SWMAX_RETRIES + 1;
4517 	}
4518 
4519 	if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) {
4520 		sc->sc_stats.ast_tx_swretrymax++;
4521 		DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES,
4522 		    "%s: max retries: seqno %d\n",
4523 		    __func__, SEQNO(bf->bf_state.bfs_seqno));
4524 		ath_tx_update_baw(sc, an, atid, bf);
4525 		if (!bf->bf_state.bfs_addedbaw)
4526 			DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
4527 			    "%s: wasn't added: seqno %d\n",
4528 			    __func__, SEQNO(bf->bf_state.bfs_seqno));
4529 		bf->bf_state.bfs_dobaw = 0;
4530 		return 1;
4531 	}
4532 
4533 	ath_tx_set_retry(sc, bf);
4534 	sc->sc_stats.ast_tx_swretries++;
4535 	bf->bf_next = NULL;		/* Just to make sure */
4536 
4537 	/* Clear the aggregate state */
4538 	bf->bf_state.bfs_aggr = 0;
4539 	bf->bf_state.bfs_ndelim = 0;	/* ??? needed? */
4540 	bf->bf_state.bfs_nframes = 1;
4541 
4542 	TAILQ_INSERT_TAIL(bf_q, bf, bf_list);
4543 	return 0;
4544 }
4545 
4546 /*
4547  * error pkt completion for an aggregate destination
4548  */
4549 static void
4550 ath_tx_comp_aggr_error(struct ath_softc *sc, struct ath_buf *bf_first,
4551     struct ath_tid *tid)
4552 {
4553 	struct ieee80211_node *ni = bf_first->bf_node;
4554 	struct ath_node *an = ATH_NODE(ni);
4555 	struct ath_buf *bf_next, *bf;
4556 	ath_bufhead bf_q;
4557 	int drops = 0;
4558 	struct ieee80211_tx_ampdu *tap;
4559 	ath_bufhead bf_cq;
4560 
4561 	TAILQ_INIT(&bf_q);
4562 	TAILQ_INIT(&bf_cq);
4563 
4564 	/*
4565 	 * Update rate control - all frames have failed.
4566 	 *
4567 	 * XXX use the length in the first frame in the series;
4568 	 * XXX just so things are consistent for now.
4569 	 */
4570 	ath_tx_update_ratectrl(sc, ni, bf_first->bf_state.bfs_rc,
4571 	    &bf_first->bf_status.ds_txstat,
4572 	    bf_first->bf_state.bfs_pktlen,
4573 	    bf_first->bf_state.bfs_nframes, bf_first->bf_state.bfs_nframes);
4574 
4575 	ATH_TX_LOCK(sc);
4576 	tap = ath_tx_get_tx_tid(an, tid->tid);
4577 	sc->sc_stats.ast_tx_aggr_failall++;
4578 
4579 	/* Retry all subframes */
4580 	bf = bf_first;
4581 	while (bf) {
4582 		bf_next = bf->bf_next;
4583 		bf->bf_next = NULL;	/* Remove it from the aggr list */
4584 		sc->sc_stats.ast_tx_aggr_fail++;
4585 		if (ath_tx_retry_subframe(sc, bf, &bf_q)) {
4586 			drops++;
4587 			bf->bf_next = NULL;
4588 			TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list);
4589 		}
4590 		bf = bf_next;
4591 	}
4592 
4593 	/* Prepend all frames to the beginning of the queue */
4594 	while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) {
4595 		TAILQ_REMOVE(&bf_q, bf, bf_list);
4596 		ATH_TID_INSERT_HEAD(tid, bf, bf_list);
4597 	}
4598 
4599 	/*
4600 	 * Schedule the TID to be re-tried.
4601 	 */
4602 	ath_tx_tid_sched(sc, tid);
4603 
4604 	/*
4605 	 * send bar if we dropped any frames
4606 	 *
4607 	 * Keep the txq lock held for now, as we need to ensure
4608 	 * that ni_txseqs[] is consistent (as it's being updated
4609 	 * in the ifnet TX context or raw TX context.)
4610 	 */
4611 	if (drops) {
4612 		/* Suspend the TX queue and get ready to send the BAR */
4613 		ath_tx_tid_bar_suspend(sc, tid);
4614 	}
4615 
4616 	/*
4617 	 * Send BAR if required
4618 	 */
4619 	if (ath_tx_tid_bar_tx_ready(sc, tid))
4620 		ath_tx_tid_bar_tx(sc, tid);
4621 
4622 	ATH_TX_UNLOCK(sc);
4623 
4624 	/* Complete frames which errored out */
4625 	while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
4626 		TAILQ_REMOVE(&bf_cq, bf, bf_list);
4627 		ath_tx_default_comp(sc, bf, 0);
4628 	}
4629 }
4630 
4631 /*
4632  * Handle clean-up of packets from an aggregate list.
4633  *
4634  * There's no need to update the BAW here - the session is being
4635  * torn down.
4636  */
4637 static void
4638 ath_tx_comp_cleanup_aggr(struct ath_softc *sc, struct ath_buf *bf_first)
4639 {
4640 	struct ath_buf *bf, *bf_next;
4641 	struct ieee80211_node *ni = bf_first->bf_node;
4642 	struct ath_node *an = ATH_NODE(ni);
4643 	int tid = bf_first->bf_state.bfs_tid;
4644 	struct ath_tid *atid = &an->an_tid[tid];
4645 
4646 	ATH_TX_LOCK(sc);
4647 
4648 	/* update incomp */
4649 	atid->incomp--;
4650 
4651 	/* Update the BAW */
4652 	bf = bf_first;
4653 	while (bf) {
4654 		/* XXX refactor! */
4655 		if (bf->bf_state.bfs_dobaw) {
4656 			ath_tx_update_baw(sc, an, atid, bf);
4657 			if (!bf->bf_state.bfs_addedbaw)
4658 				DPRINTF(sc, ATH_DEBUG_SW_TX,
4659 				    "%s: wasn't added: seqno %d\n",
4660 				    __func__, SEQNO(bf->bf_state.bfs_seqno));
4661 		}
4662 		bf = bf->bf_next;
4663 	}
4664 
4665 	if (atid->incomp == 0) {
4666 		DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
4667 		    "%s: TID %d: cleaned up! resume!\n",
4668 		    __func__, tid);
4669 		atid->cleanup_inprogress = 0;
4670 		ath_tx_tid_resume(sc, atid);
4671 	}
4672 
4673 	/* Send BAR if required */
4674 	/* XXX why would we send a BAR when transitioning to non-aggregation? */
4675 	/*
4676 	 * XXX TODO: we should likely just tear down the BAR state here,
4677 	 * rather than sending a BAR.
4678 	 */
4679 	if (ath_tx_tid_bar_tx_ready(sc, atid))
4680 		ath_tx_tid_bar_tx(sc, atid);
4681 
4682 	ATH_TX_UNLOCK(sc);
4683 
4684 	/* Handle frame completion as individual frames */
4685 	bf = bf_first;
4686 	while (bf) {
4687 		bf_next = bf->bf_next;
4688 		bf->bf_next = NULL;
4689 		ath_tx_default_comp(sc, bf, 1);
4690 		bf = bf_next;
4691 	}
4692 }
4693 
4694 /*
4695  * Handle completion of an set of aggregate frames.
4696  *
4697  * Note: the completion handler is the last descriptor in the aggregate,
4698  * not the last descriptor in the first frame.
4699  */
4700 static void
4701 ath_tx_aggr_comp_aggr(struct ath_softc *sc, struct ath_buf *bf_first,
4702     int fail)
4703 {
4704 	//struct ath_desc *ds = bf->bf_lastds;
4705 	struct ieee80211_node *ni = bf_first->bf_node;
4706 	struct ath_node *an = ATH_NODE(ni);
4707 	int tid = bf_first->bf_state.bfs_tid;
4708 	struct ath_tid *atid = &an->an_tid[tid];
4709 	struct ath_tx_status ts;
4710 	struct ieee80211_tx_ampdu *tap;
4711 	ath_bufhead bf_q;
4712 	ath_bufhead bf_cq;
4713 	int seq_st, tx_ok;
4714 	int hasba, isaggr;
4715 	uint32_t ba[2];
4716 	struct ath_buf *bf, *bf_next;
4717 	int ba_index;
4718 	int drops = 0;
4719 	int nframes = 0, nbad = 0, nf;
4720 	int pktlen;
4721 	/* XXX there's too much on the stack? */
4722 	struct ath_rc_series rc[ATH_RC_NUM];
4723 	int txseq;
4724 
4725 	DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: called; hwq_depth=%d\n",
4726 	    __func__, atid->hwq_depth);
4727 
4728 	/*
4729 	 * Take a copy; this may be needed -after- bf_first
4730 	 * has been completed and freed.
4731 	 */
4732 	ts = bf_first->bf_status.ds_txstat;
4733 
4734 	TAILQ_INIT(&bf_q);
4735 	TAILQ_INIT(&bf_cq);
4736 
4737 	/* The TID state is kept behind the TXQ lock */
4738 	ATH_TX_LOCK(sc);
4739 
4740 	atid->hwq_depth--;
4741 	if (atid->hwq_depth < 0)
4742 		DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: hwq_depth < 0: %d\n",
4743 		    __func__, atid->hwq_depth);
4744 
4745 	/*
4746 	 * If the TID is filtered, handle completing the filter
4747 	 * transition before potentially kicking it to the cleanup
4748 	 * function.
4749 	 *
4750 	 * XXX this is duplicate work, ew.
4751 	 */
4752 	if (atid->isfiltered)
4753 		ath_tx_tid_filt_comp_complete(sc, atid);
4754 
4755 	/*
4756 	 * Punt cleanup to the relevant function, not our problem now
4757 	 */
4758 	if (atid->cleanup_inprogress) {
4759 		if (atid->isfiltered)
4760 			DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4761 			    "%s: isfiltered=1, normal_comp?\n",
4762 			    __func__);
4763 		ATH_TX_UNLOCK(sc);
4764 		ath_tx_comp_cleanup_aggr(sc, bf_first);
4765 		return;
4766 	}
4767 
4768 	/*
4769 	 * If the frame is filtered, transition to filtered frame
4770 	 * mode and add this to the filtered frame list.
4771 	 *
4772 	 * XXX TODO: figure out how this interoperates with
4773 	 * BAR, pause and cleanup states.
4774 	 */
4775 	if ((ts.ts_status & HAL_TXERR_FILT) ||
4776 	    (ts.ts_status != 0 && atid->isfiltered)) {
4777 		if (fail != 0)
4778 			DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4779 			    "%s: isfiltered=1, fail=%d\n", __func__, fail);
4780 		ath_tx_tid_filt_comp_aggr(sc, atid, bf_first, &bf_cq);
4781 
4782 		/* Remove from BAW */
4783 		TAILQ_FOREACH_SAFE(bf, &bf_cq, bf_list, bf_next) {
4784 			if (bf->bf_state.bfs_addedbaw)
4785 				drops++;
4786 			if (bf->bf_state.bfs_dobaw) {
4787 				ath_tx_update_baw(sc, an, atid, bf);
4788 				if (!bf->bf_state.bfs_addedbaw)
4789 					DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4790 					    "%s: wasn't added: seqno %d\n",
4791 					    __func__,
4792 					    SEQNO(bf->bf_state.bfs_seqno));
4793 			}
4794 			bf->bf_state.bfs_dobaw = 0;
4795 		}
4796 		/*
4797 		 * If any intermediate frames in the BAW were dropped when
4798 		 * handling filtering things, send a BAR.
4799 		 */
4800 		if (drops)
4801 			ath_tx_tid_bar_suspend(sc, atid);
4802 
4803 		/*
4804 		 * Finish up by sending a BAR if required and freeing
4805 		 * the frames outside of the TX lock.
4806 		 */
4807 		goto finish_send_bar;
4808 	}
4809 
4810 	/*
4811 	 * XXX for now, use the first frame in the aggregate for
4812 	 * XXX rate control completion; it's at least consistent.
4813 	 */
4814 	pktlen = bf_first->bf_state.bfs_pktlen;
4815 
4816 	/*
4817 	 * Handle errors first!
4818 	 *
4819 	 * Here, handle _any_ error as a "exceeded retries" error.
4820 	 * Later on (when filtered frames are to be specially handled)
4821 	 * it'll have to be expanded.
4822 	 */
4823 #if 0
4824 	if (ts.ts_status & HAL_TXERR_XRETRY) {
4825 #endif
4826 	if (ts.ts_status != 0) {
4827 		ATH_TX_UNLOCK(sc);
4828 		ath_tx_comp_aggr_error(sc, bf_first, atid);
4829 		return;
4830 	}
4831 
4832 	tap = ath_tx_get_tx_tid(an, tid);
4833 
4834 	/*
4835 	 * extract starting sequence and block-ack bitmap
4836 	 */
4837 	/* XXX endian-ness of seq_st, ba? */
4838 	seq_st = ts.ts_seqnum;
4839 	hasba = !! (ts.ts_flags & HAL_TX_BA);
4840 	tx_ok = (ts.ts_status == 0);
4841 	isaggr = bf_first->bf_state.bfs_aggr;
4842 	ba[0] = ts.ts_ba_low;
4843 	ba[1] = ts.ts_ba_high;
4844 
4845 	/*
4846 	 * Copy the TX completion status and the rate control
4847 	 * series from the first descriptor, as it may be freed
4848 	 * before the rate control code can get its grubby fingers
4849 	 * into things.
4850 	 */
4851 	memcpy(rc, bf_first->bf_state.bfs_rc, sizeof(rc));
4852 
4853 	DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4854 	    "%s: txa_start=%d, tx_ok=%d, status=%.8x, flags=%.8x, "
4855 	    "isaggr=%d, seq_st=%d, hasba=%d, ba=%.8x, %.8x\n",
4856 	    __func__, tap->txa_start, tx_ok, ts.ts_status, ts.ts_flags,
4857 	    isaggr, seq_st, hasba, ba[0], ba[1]);
4858 
4859 	/*
4860 	 * The reference driver doesn't do this; it simply ignores
4861 	 * this check in its entirety.
4862 	 *
4863 	 * I've seen this occur when using iperf to send traffic
4864 	 * out tid 1 - the aggregate frames are all marked as TID 1,
4865 	 * but the TXSTATUS has TID=0.  So, let's just ignore this
4866 	 * check.
4867 	 */
4868 #if 0
4869 	/* Occasionally, the MAC sends a tx status for the wrong TID. */
4870 	if (tid != ts.ts_tid) {
4871 		DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: tid %d != hw tid %d\n",
4872 		    __func__, tid, ts.ts_tid);
4873 		tx_ok = 0;
4874 	}
4875 #endif
4876 
4877 	/* AR5416 BA bug; this requires an interface reset */
4878 	if (isaggr && tx_ok && (! hasba)) {
4879 		DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4880 		    "%s: AR5416 bug: hasba=%d; txok=%d, isaggr=%d, "
4881 		    "seq_st=%d\n",
4882 		    __func__, hasba, tx_ok, isaggr, seq_st);
4883 		/* XXX TODO: schedule an interface reset */
4884 #ifdef ATH_DEBUG
4885 		ath_printtxbuf(sc, bf_first,
4886 		    sc->sc_ac2q[atid->ac]->axq_qnum, 0, 0);
4887 #endif
4888 	}
4889 
4890 	/*
4891 	 * Walk the list of frames, figure out which ones were correctly
4892 	 * sent and which weren't.
4893 	 */
4894 	bf = bf_first;
4895 	nf = bf_first->bf_state.bfs_nframes;
4896 
4897 	/* bf_first is going to be invalid once this list is walked */
4898 	bf_first = NULL;
4899 
4900 	/*
4901 	 * Walk the list of completed frames and determine
4902 	 * which need to be completed and which need to be
4903 	 * retransmitted.
4904 	 *
4905 	 * For completed frames, the completion functions need
4906 	 * to be called at the end of this function as the last
4907 	 * node reference may free the node.
4908 	 *
4909 	 * Finally, since the TXQ lock can't be held during the
4910 	 * completion callback (to avoid lock recursion),
4911 	 * the completion calls have to be done outside of the
4912 	 * lock.
4913 	 */
4914 	while (bf) {
4915 		nframes++;
4916 		ba_index = ATH_BA_INDEX(seq_st,
4917 		    SEQNO(bf->bf_state.bfs_seqno));
4918 		bf_next = bf->bf_next;
4919 		bf->bf_next = NULL;	/* Remove it from the aggr list */
4920 
4921 		DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4922 		    "%s: checking bf=%p seqno=%d; ack=%d\n",
4923 		    __func__, bf, SEQNO(bf->bf_state.bfs_seqno),
4924 		    ATH_BA_ISSET(ba, ba_index));
4925 
4926 		if (tx_ok && ATH_BA_ISSET(ba, ba_index)) {
4927 			sc->sc_stats.ast_tx_aggr_ok++;
4928 			ath_tx_update_baw(sc, an, atid, bf);
4929 			bf->bf_state.bfs_dobaw = 0;
4930 			if (!bf->bf_state.bfs_addedbaw)
4931 				DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4932 				    "%s: wasn't added: seqno %d\n",
4933 				    __func__, SEQNO(bf->bf_state.bfs_seqno));
4934 			bf->bf_next = NULL;
4935 			TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list);
4936 		} else {
4937 			sc->sc_stats.ast_tx_aggr_fail++;
4938 			if (ath_tx_retry_subframe(sc, bf, &bf_q)) {
4939 				drops++;
4940 				bf->bf_next = NULL;
4941 				TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list);
4942 			}
4943 			nbad++;
4944 		}
4945 		bf = bf_next;
4946 	}
4947 
4948 	/*
4949 	 * Now that the BAW updates have been done, unlock
4950 	 *
4951 	 * txseq is grabbed before the lock is released so we
4952 	 * have a consistent view of what -was- in the BAW.
4953 	 * Anything after this point will not yet have been
4954 	 * TXed.
4955 	 */
4956 	txseq = tap->txa_start;
4957 	ATH_TX_UNLOCK(sc);
4958 
4959 	if (nframes != nf)
4960 		DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4961 		    "%s: num frames seen=%d; bf nframes=%d\n",
4962 		    __func__, nframes, nf);
4963 
4964 	/*
4965 	 * Now we know how many frames were bad, call the rate
4966 	 * control code.
4967 	 */
4968 	if (fail == 0)
4969 		ath_tx_update_ratectrl(sc, ni, rc, &ts, pktlen, nframes,
4970 		    nbad);
4971 
4972 	/*
4973 	 * send bar if we dropped any frames
4974 	 */
4975 	if (drops) {
4976 		/* Suspend the TX queue and get ready to send the BAR */
4977 		ATH_TX_LOCK(sc);
4978 		ath_tx_tid_bar_suspend(sc, atid);
4979 		ATH_TX_UNLOCK(sc);
4980 	}
4981 
4982 	DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4983 	    "%s: txa_start now %d\n", __func__, tap->txa_start);
4984 
4985 	ATH_TX_LOCK(sc);
4986 
4987 	/* Prepend all frames to the beginning of the queue */
4988 	while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) {
4989 		TAILQ_REMOVE(&bf_q, bf, bf_list);
4990 		ATH_TID_INSERT_HEAD(atid, bf, bf_list);
4991 	}
4992 
4993 	/*
4994 	 * Reschedule to grab some further frames.
4995 	 */
4996 	ath_tx_tid_sched(sc, atid);
4997 
4998 	/*
4999 	 * If the queue is filtered, re-schedule as required.
5000 	 *
5001 	 * This is required as there may be a subsequent TX descriptor
5002 	 * for this end-node that has CLRDMASK set, so it's quite possible
5003 	 * that a filtered frame will be followed by a non-filtered
5004 	 * (complete or otherwise) frame.
5005 	 *
5006 	 * XXX should we do this before we complete the frame?
5007 	 */
5008 	if (atid->isfiltered)
5009 		ath_tx_tid_filt_comp_complete(sc, atid);
5010 
5011 finish_send_bar:
5012 
5013 	/*
5014 	 * Send BAR if required
5015 	 */
5016 	if (ath_tx_tid_bar_tx_ready(sc, atid))
5017 		ath_tx_tid_bar_tx(sc, atid);
5018 
5019 	ATH_TX_UNLOCK(sc);
5020 
5021 	/* Do deferred completion */
5022 	while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
5023 		TAILQ_REMOVE(&bf_cq, bf, bf_list);
5024 		ath_tx_default_comp(sc, bf, 0);
5025 	}
5026 }
5027 
5028 /*
5029  * Handle completion of unaggregated frames in an ADDBA
5030  * session.
5031  *
5032  * Fail is set to 1 if the entry is being freed via a call to
5033  * ath_tx_draintxq().
5034  */
5035 static void
5036 ath_tx_aggr_comp_unaggr(struct ath_softc *sc, struct ath_buf *bf, int fail)
5037 {
5038 	struct ieee80211_node *ni = bf->bf_node;
5039 	struct ath_node *an = ATH_NODE(ni);
5040 	int tid = bf->bf_state.bfs_tid;
5041 	struct ath_tid *atid = &an->an_tid[tid];
5042 	struct ath_tx_status ts;
5043 	int drops = 0;
5044 
5045 	/*
5046 	 * Take a copy of this; filtering/cloning the frame may free the
5047 	 * bf pointer.
5048 	 */
5049 	ts = bf->bf_status.ds_txstat;
5050 
5051 	/*
5052 	 * Update rate control status here, before we possibly
5053 	 * punt to retry or cleanup.
5054 	 *
5055 	 * Do it outside of the TXQ lock.
5056 	 */
5057 	if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0))
5058 		ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc,
5059 		    &bf->bf_status.ds_txstat,
5060 		    bf->bf_state.bfs_pktlen,
5061 		    1, (ts.ts_status == 0) ? 0 : 1);
5062 
5063 	/*
5064 	 * This is called early so atid->hwq_depth can be tracked.
5065 	 * This unfortunately means that it's released and regrabbed
5066 	 * during retry and cleanup. That's rather inefficient.
5067 	 */
5068 	ATH_TX_LOCK(sc);
5069 
5070 	if (tid == IEEE80211_NONQOS_TID)
5071 		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=16!\n", __func__);
5072 
5073 	DPRINTF(sc, ATH_DEBUG_SW_TX,
5074 	    "%s: bf=%p: tid=%d, hwq_depth=%d, seqno=%d\n",
5075 	    __func__, bf, bf->bf_state.bfs_tid, atid->hwq_depth,
5076 	    SEQNO(bf->bf_state.bfs_seqno));
5077 
5078 	atid->hwq_depth--;
5079 	if (atid->hwq_depth < 0)
5080 		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: hwq_depth < 0: %d\n",
5081 		    __func__, atid->hwq_depth);
5082 
5083 	/*
5084 	 * If the TID is filtered, handle completing the filter
5085 	 * transition before potentially kicking it to the cleanup
5086 	 * function.
5087 	 */
5088 	if (atid->isfiltered)
5089 		ath_tx_tid_filt_comp_complete(sc, atid);
5090 
5091 	/*
5092 	 * If a cleanup is in progress, punt to comp_cleanup;
5093 	 * rather than handling it here. It's thus their
5094 	 * responsibility to clean up, call the completion
5095 	 * function in net80211, etc.
5096 	 */
5097 	if (atid->cleanup_inprogress) {
5098 		if (atid->isfiltered)
5099 			DPRINTF(sc, ATH_DEBUG_SW_TX,
5100 			    "%s: isfiltered=1, normal_comp?\n",
5101 			    __func__);
5102 		ATH_TX_UNLOCK(sc);
5103 		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: cleanup_unaggr\n",
5104 		    __func__);
5105 		ath_tx_comp_cleanup_unaggr(sc, bf);
5106 		return;
5107 	}
5108 
5109 	/*
5110 	 * XXX TODO: how does cleanup, BAR and filtered frame handling
5111 	 * overlap?
5112 	 *
5113 	 * If the frame is filtered OR if it's any failure but
5114 	 * the TID is filtered, the frame must be added to the
5115 	 * filtered frame list.
5116 	 *
5117 	 * However - a busy buffer can't be added to the filtered
5118 	 * list as it will end up being recycled without having
5119 	 * been made available for the hardware.
5120 	 */
5121 	if ((ts.ts_status & HAL_TXERR_FILT) ||
5122 	    (ts.ts_status != 0 && atid->isfiltered)) {
5123 		int freeframe;
5124 
5125 		if (fail != 0)
5126 			DPRINTF(sc, ATH_DEBUG_SW_TX,
5127 			    "%s: isfiltered=1, fail=%d\n",
5128 			    __func__, fail);
5129 		freeframe = ath_tx_tid_filt_comp_single(sc, atid, bf);
5130 		/*
5131 		 * If freeframe=0 then bf is no longer ours; don't
5132 		 * touch it.
5133 		 */
5134 		if (freeframe) {
5135 			/* Remove from BAW */
5136 			if (bf->bf_state.bfs_addedbaw)
5137 				drops++;
5138 			if (bf->bf_state.bfs_dobaw) {
5139 				ath_tx_update_baw(sc, an, atid, bf);
5140 				if (!bf->bf_state.bfs_addedbaw)
5141 					DPRINTF(sc, ATH_DEBUG_SW_TX,
5142 					    "%s: wasn't added: seqno %d\n",
5143 					    __func__, SEQNO(bf->bf_state.bfs_seqno));
5144 			}
5145 			bf->bf_state.bfs_dobaw = 0;
5146 		}
5147 
5148 		/*
5149 		 * If the frame couldn't be filtered, treat it as a drop and
5150 		 * prepare to send a BAR.
5151 		 */
5152 		if (freeframe && drops)
5153 			ath_tx_tid_bar_suspend(sc, atid);
5154 
5155 		/*
5156 		 * Send BAR if required
5157 		 */
5158 		if (ath_tx_tid_bar_tx_ready(sc, atid))
5159 			ath_tx_tid_bar_tx(sc, atid);
5160 
5161 		ATH_TX_UNLOCK(sc);
5162 		/*
5163 		 * If freeframe is set, then the frame couldn't be
5164 		 * cloned and bf is still valid.  Just complete/free it.
5165 		 */
5166 		if (freeframe)
5167 			ath_tx_default_comp(sc, bf, fail);
5168 
5169 		return;
5170 	}
5171 	/*
5172 	 * Don't bother with the retry check if all frames
5173 	 * are being failed (eg during queue deletion.)
5174 	 */
5175 #if 0
5176 	if (fail == 0 && ts->ts_status & HAL_TXERR_XRETRY) {
5177 #endif
5178 	if (fail == 0 && ts.ts_status != 0) {
5179 		ATH_TX_UNLOCK(sc);
5180 		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: retry_unaggr\n",
5181 		    __func__);
5182 		ath_tx_aggr_retry_unaggr(sc, bf);
5183 		return;
5184 	}
5185 
5186 	/* Success? Complete */
5187 	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=%d, seqno %d\n",
5188 	    __func__, tid, SEQNO(bf->bf_state.bfs_seqno));
5189 	if (bf->bf_state.bfs_dobaw) {
5190 		ath_tx_update_baw(sc, an, atid, bf);
5191 		bf->bf_state.bfs_dobaw = 0;
5192 		if (!bf->bf_state.bfs_addedbaw)
5193 			DPRINTF(sc, ATH_DEBUG_SW_TX,
5194 			    "%s: wasn't added: seqno %d\n",
5195 			    __func__, SEQNO(bf->bf_state.bfs_seqno));
5196 	}
5197 
5198 	/*
5199 	 * If the queue is filtered, re-schedule as required.
5200 	 *
5201 	 * This is required as there may be a subsequent TX descriptor
5202 	 * for this end-node that has CLRDMASK set, so it's quite possible
5203 	 * that a filtered frame will be followed by a non-filtered
5204 	 * (complete or otherwise) frame.
5205 	 *
5206 	 * XXX should we do this before we complete the frame?
5207 	 */
5208 	if (atid->isfiltered)
5209 		ath_tx_tid_filt_comp_complete(sc, atid);
5210 
5211 	/*
5212 	 * Send BAR if required
5213 	 */
5214 	if (ath_tx_tid_bar_tx_ready(sc, atid))
5215 		ath_tx_tid_bar_tx(sc, atid);
5216 
5217 	ATH_TX_UNLOCK(sc);
5218 
5219 	ath_tx_default_comp(sc, bf, fail);
5220 	/* bf is freed at this point */
5221 }
5222 
5223 void
5224 ath_tx_aggr_comp(struct ath_softc *sc, struct ath_buf *bf, int fail)
5225 {
5226 	if (bf->bf_state.bfs_aggr)
5227 		ath_tx_aggr_comp_aggr(sc, bf, fail);
5228 	else
5229 		ath_tx_aggr_comp_unaggr(sc, bf, fail);
5230 }
5231 
5232 /*
5233  * Schedule some packets from the given node/TID to the hardware.
5234  *
5235  * This is the aggregate version.
5236  */
5237 void
5238 ath_tx_tid_hw_queue_aggr(struct ath_softc *sc, struct ath_node *an,
5239     struct ath_tid *tid)
5240 {
5241 	struct ath_buf *bf;
5242 	struct ath_txq *txq = sc->sc_ac2q[tid->ac];
5243 	struct ieee80211_tx_ampdu *tap;
5244 	ATH_AGGR_STATUS status;
5245 	ath_bufhead bf_q;
5246 
5247 	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d\n", __func__, tid->tid);
5248 	ATH_TX_LOCK_ASSERT(sc);
5249 
5250 	/*
5251 	 * XXX TODO: If we're called for a queue that we're leaking frames to,
5252 	 * ensure we only leak one.
5253 	 */
5254 
5255 	tap = ath_tx_get_tx_tid(an, tid->tid);
5256 
5257 	if (tid->tid == IEEE80211_NONQOS_TID)
5258 		DPRINTF(sc, ATH_DEBUG_SW_TX,
5259 		    "%s: called for TID=NONQOS_TID?\n", __func__);
5260 
5261 	for (;;) {
5262 		status = ATH_AGGR_DONE;
5263 
5264 		/*
5265 		 * If the upper layer has paused the TID, don't
5266 		 * queue any further packets.
5267 		 *
5268 		 * This can also occur from the completion task because
5269 		 * of packet loss; but as its serialised with this code,
5270 		 * it won't "appear" half way through queuing packets.
5271 		 */
5272 		if (! ath_tx_tid_can_tx_or_sched(sc, tid))
5273 			break;
5274 
5275 		bf = ATH_TID_FIRST(tid);
5276 		if (bf == NULL) {
5277 			break;
5278 		}
5279 
5280 		/*
5281 		 * If the packet doesn't fall within the BAW (eg a NULL
5282 		 * data frame), schedule it directly; continue.
5283 		 */
5284 		if (! bf->bf_state.bfs_dobaw) {
5285 			DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5286 			    "%s: non-baw packet\n",
5287 			    __func__);
5288 			ATH_TID_REMOVE(tid, bf, bf_list);
5289 
5290 			if (bf->bf_state.bfs_nframes > 1)
5291 				DPRINTF(sc, ATH_DEBUG_SW_TX,
5292 				    "%s: aggr=%d, nframes=%d\n",
5293 				    __func__,
5294 				    bf->bf_state.bfs_aggr,
5295 				    bf->bf_state.bfs_nframes);
5296 
5297 			/*
5298 			 * This shouldn't happen - such frames shouldn't
5299 			 * ever have been queued as an aggregate in the
5300 			 * first place.  However, make sure the fields
5301 			 * are correctly setup just to be totally sure.
5302 			 */
5303 			bf->bf_state.bfs_aggr = 0;
5304 			bf->bf_state.bfs_nframes = 1;
5305 
5306 			/* Update CLRDMASK just before this frame is queued */
5307 			ath_tx_update_clrdmask(sc, tid, bf);
5308 
5309 			ath_tx_do_ratelookup(sc, bf);
5310 			ath_tx_calc_duration(sc, bf);
5311 			ath_tx_calc_protection(sc, bf);
5312 			ath_tx_set_rtscts(sc, bf);
5313 			ath_tx_rate_fill_rcflags(sc, bf);
5314 			ath_tx_setds(sc, bf);
5315 			ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc);
5316 
5317 			sc->sc_aggr_stats.aggr_nonbaw_pkt++;
5318 
5319 			/* Queue the packet; continue */
5320 			goto queuepkt;
5321 		}
5322 
5323 		TAILQ_INIT(&bf_q);
5324 
5325 		/*
5326 		 * Do a rate control lookup on the first frame in the
5327 		 * list. The rate control code needs that to occur
5328 		 * before it can determine whether to TX.
5329 		 * It's inaccurate because the rate control code doesn't
5330 		 * really "do" aggregate lookups, so it only considers
5331 		 * the size of the first frame.
5332 		 */
5333 		ath_tx_do_ratelookup(sc, bf);
5334 		bf->bf_state.bfs_rc[3].rix = 0;
5335 		bf->bf_state.bfs_rc[3].tries = 0;
5336 
5337 		ath_tx_calc_duration(sc, bf);
5338 		ath_tx_calc_protection(sc, bf);
5339 
5340 		ath_tx_set_rtscts(sc, bf);
5341 		ath_tx_rate_fill_rcflags(sc, bf);
5342 
5343 		status = ath_tx_form_aggr(sc, an, tid, &bf_q);
5344 
5345 		DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5346 		    "%s: ath_tx_form_aggr() status=%d\n", __func__, status);
5347 
5348 		/*
5349 		 * No frames to be picked up - out of BAW
5350 		 */
5351 		if (TAILQ_EMPTY(&bf_q))
5352 			break;
5353 
5354 		/*
5355 		 * This assumes that the descriptor list in the ath_bufhead
5356 		 * are already linked together via bf_next pointers.
5357 		 */
5358 		bf = TAILQ_FIRST(&bf_q);
5359 
5360 		if (status == ATH_AGGR_8K_LIMITED)
5361 			sc->sc_aggr_stats.aggr_rts_aggr_limited++;
5362 
5363 		/*
5364 		 * If it's the only frame send as non-aggregate
5365 		 * assume that ath_tx_form_aggr() has checked
5366 		 * whether it's in the BAW and added it appropriately.
5367 		 */
5368 		if (bf->bf_state.bfs_nframes == 1) {
5369 			DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5370 			    "%s: single-frame aggregate\n", __func__);
5371 
5372 			/* Update CLRDMASK just before this frame is queued */
5373 			ath_tx_update_clrdmask(sc, tid, bf);
5374 
5375 			bf->bf_state.bfs_aggr = 0;
5376 			bf->bf_state.bfs_ndelim = 0;
5377 			ath_tx_setds(sc, bf);
5378 			ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc);
5379 			if (status == ATH_AGGR_BAW_CLOSED)
5380 				sc->sc_aggr_stats.aggr_baw_closed_single_pkt++;
5381 			else
5382 				sc->sc_aggr_stats.aggr_single_pkt++;
5383 		} else {
5384 			DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5385 			    "%s: multi-frame aggregate: %d frames, "
5386 			    "length %d\n",
5387 			     __func__, bf->bf_state.bfs_nframes,
5388 			    bf->bf_state.bfs_al);
5389 			bf->bf_state.bfs_aggr = 1;
5390 			sc->sc_aggr_stats.aggr_pkts[bf->bf_state.bfs_nframes]++;
5391 			sc->sc_aggr_stats.aggr_aggr_pkt++;
5392 
5393 			/* Update CLRDMASK just before this frame is queued */
5394 			ath_tx_update_clrdmask(sc, tid, bf);
5395 
5396 			/*
5397 			 * Calculate the duration/protection as required.
5398 			 */
5399 			ath_tx_calc_duration(sc, bf);
5400 			ath_tx_calc_protection(sc, bf);
5401 
5402 			/*
5403 			 * Update the rate and rtscts information based on the
5404 			 * rate decision made by the rate control code;
5405 			 * the first frame in the aggregate needs it.
5406 			 */
5407 			ath_tx_set_rtscts(sc, bf);
5408 
5409 			/*
5410 			 * Setup the relevant descriptor fields
5411 			 * for aggregation. The first descriptor
5412 			 * already points to the rest in the chain.
5413 			 */
5414 			ath_tx_setds_11n(sc, bf);
5415 
5416 		}
5417 	queuepkt:
5418 		/* Set completion handler, multi-frame aggregate or not */
5419 		bf->bf_comp = ath_tx_aggr_comp;
5420 
5421 		if (bf->bf_state.bfs_tid == IEEE80211_NONQOS_TID)
5422 			DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=16?\n", __func__);
5423 
5424 		/*
5425 		 * Update leak count and frame config if were leaking frames.
5426 		 *
5427 		 * XXX TODO: it should update all frames in an aggregate
5428 		 * correctly!
5429 		 */
5430 		ath_tx_leak_count_update(sc, tid, bf);
5431 
5432 		/* Punt to txq */
5433 		ath_tx_handoff(sc, txq, bf);
5434 
5435 		/* Track outstanding buffer count to hardware */
5436 		/* aggregates are "one" buffer */
5437 		tid->hwq_depth++;
5438 
5439 		/*
5440 		 * Break out if ath_tx_form_aggr() indicated
5441 		 * there can't be any further progress (eg BAW is full.)
5442 		 * Checking for an empty txq is done above.
5443 		 *
5444 		 * XXX locking on txq here?
5445 		 */
5446 		/* XXX TXQ locking */
5447 		if (txq->axq_aggr_depth >= sc->sc_hwq_limit_aggr ||
5448 		    (status == ATH_AGGR_BAW_CLOSED ||
5449 		     status == ATH_AGGR_LEAK_CLOSED))
5450 			break;
5451 	}
5452 }
5453 
5454 /*
5455  * Schedule some packets from the given node/TID to the hardware.
5456  *
5457  * XXX TODO: this routine doesn't enforce the maximum TXQ depth.
5458  * It just dumps frames into the TXQ.  We should limit how deep
5459  * the transmit queue can grow for frames dispatched to the given
5460  * TXQ.
5461  *
5462  * To avoid locking issues, either we need to own the TXQ lock
5463  * at this point, or we need to pass in the maximum frame count
5464  * from the caller.
5465  */
5466 void
5467 ath_tx_tid_hw_queue_norm(struct ath_softc *sc, struct ath_node *an,
5468     struct ath_tid *tid)
5469 {
5470 	struct ath_buf *bf;
5471 	struct ath_txq *txq = sc->sc_ac2q[tid->ac];
5472 
5473 	DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: node %p: TID %d: called\n",
5474 	    __func__, an, tid->tid);
5475 
5476 	ATH_TX_LOCK_ASSERT(sc);
5477 
5478 	/* Check - is AMPDU pending or running? then print out something */
5479 	if (ath_tx_ampdu_pending(sc, an, tid->tid))
5480 		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ampdu pending?\n",
5481 		    __func__, tid->tid);
5482 	if (ath_tx_ampdu_running(sc, an, tid->tid))
5483 		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ampdu running?\n",
5484 		    __func__, tid->tid);
5485 
5486 	for (;;) {
5487 
5488 		/*
5489 		 * If the upper layers have paused the TID, don't
5490 		 * queue any further packets.
5491 		 *
5492 		 * XXX if we are leaking frames, make sure we decrement
5493 		 * that counter _and_ we continue here.
5494 		 */
5495 		if (! ath_tx_tid_can_tx_or_sched(sc, tid))
5496 			break;
5497 
5498 		bf = ATH_TID_FIRST(tid);
5499 		if (bf == NULL) {
5500 			break;
5501 		}
5502 
5503 		ATH_TID_REMOVE(tid, bf, bf_list);
5504 
5505 		/* Sanity check! */
5506 		if (tid->tid != bf->bf_state.bfs_tid) {
5507 			DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bfs_tid %d !="
5508 			    " tid %d\n", __func__, bf->bf_state.bfs_tid,
5509 			    tid->tid);
5510 		}
5511 		/* Normal completion handler */
5512 		bf->bf_comp = ath_tx_normal_comp;
5513 
5514 		/*
5515 		 * Override this for now, until the non-aggregate
5516 		 * completion handler correctly handles software retransmits.
5517 		 */
5518 		bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
5519 
5520 		/* Update CLRDMASK just before this frame is queued */
5521 		ath_tx_update_clrdmask(sc, tid, bf);
5522 
5523 		/* Program descriptors + rate control */
5524 		ath_tx_do_ratelookup(sc, bf);
5525 		ath_tx_calc_duration(sc, bf);
5526 		ath_tx_calc_protection(sc, bf);
5527 		ath_tx_set_rtscts(sc, bf);
5528 		ath_tx_rate_fill_rcflags(sc, bf);
5529 		ath_tx_setds(sc, bf);
5530 
5531 		/*
5532 		 * Update the current leak count if
5533 		 * we're leaking frames; and set the
5534 		 * MORE flag as appropriate.
5535 		 */
5536 		ath_tx_leak_count_update(sc, tid, bf);
5537 
5538 		/* Track outstanding buffer count to hardware */
5539 		/* aggregates are "one" buffer */
5540 		tid->hwq_depth++;
5541 
5542 		/* Punt to hardware or software txq */
5543 		ath_tx_handoff(sc, txq, bf);
5544 	}
5545 }
5546 
5547 /*
5548  * Schedule some packets to the given hardware queue.
5549  *
5550  * This function walks the list of TIDs (ie, ath_node TIDs
5551  * with queued traffic) and attempts to schedule traffic
5552  * from them.
5553  *
5554  * TID scheduling is implemented as a FIFO, with TIDs being
5555  * added to the end of the queue after some frames have been
5556  * scheduled.
5557  */
5558 void
5559 ath_txq_sched(struct ath_softc *sc, struct ath_txq *txq)
5560 {
5561 	struct ath_tid *tid, *next, *last;
5562 
5563 	ATH_TX_LOCK_ASSERT(sc);
5564 
5565 	/*
5566 	 * Don't schedule if the hardware queue is busy.
5567 	 * This (hopefully) gives some more time to aggregate
5568 	 * some packets in the aggregation queue.
5569 	 *
5570 	 * XXX It doesn't stop a parallel sender from sneaking
5571 	 * in transmitting a frame!
5572 	 */
5573 	/* XXX TXQ locking */
5574 	if (txq->axq_aggr_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_aggr) {
5575 		sc->sc_aggr_stats.aggr_sched_nopkt++;
5576 		return;
5577 	}
5578 	if (txq->axq_depth >= sc->sc_hwq_limit_nonaggr) {
5579 		sc->sc_aggr_stats.aggr_sched_nopkt++;
5580 		return;
5581 	}
5582 
5583 	last = TAILQ_LAST(&txq->axq_tidq, axq_t_s);
5584 
5585 	TAILQ_FOREACH_SAFE(tid, &txq->axq_tidq, axq_qelem, next) {
5586 		/*
5587 		 * Suspend paused queues here; they'll be resumed
5588 		 * once the addba completes or times out.
5589 		 */
5590 		DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, paused=%d\n",
5591 		    __func__, tid->tid, tid->paused);
5592 		ath_tx_tid_unsched(sc, tid);
5593 		/*
5594 		 * This node may be in power-save and we're leaking
5595 		 * a frame; be careful.
5596 		 */
5597 		if (! ath_tx_tid_can_tx_or_sched(sc, tid)) {
5598 			goto loop_done;
5599 		}
5600 		if (ath_tx_ampdu_running(sc, tid->an, tid->tid))
5601 			ath_tx_tid_hw_queue_aggr(sc, tid->an, tid);
5602 		else
5603 			ath_tx_tid_hw_queue_norm(sc, tid->an, tid);
5604 
5605 		/* Not empty? Re-schedule */
5606 		if (tid->axq_depth != 0)
5607 			ath_tx_tid_sched(sc, tid);
5608 
5609 		/*
5610 		 * Give the software queue time to aggregate more
5611 		 * packets.  If we aren't running aggregation then
5612 		 * we should still limit the hardware queue depth.
5613 		 */
5614 		/* XXX TXQ locking */
5615 		if (txq->axq_aggr_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_aggr) {
5616 			break;
5617 		}
5618 		if (txq->axq_depth >= sc->sc_hwq_limit_nonaggr) {
5619 			break;
5620 		}
5621 loop_done:
5622 		/*
5623 		 * If this was the last entry on the original list, stop.
5624 		 * Otherwise nodes that have been rescheduled onto the end
5625 		 * of the TID FIFO list will just keep being rescheduled.
5626 		 *
5627 		 * XXX What should we do about nodes that were paused
5628 		 * but are pending a leaking frame in response to a ps-poll?
5629 		 * They'll be put at the front of the list; so they'll
5630 		 * prematurely trigger this condition! Ew.
5631 		 */
5632 		if (tid == last)
5633 			break;
5634 	}
5635 }
5636 
5637 /*
5638  * TX addba handling
5639  */
5640 
5641 /*
5642  * Return net80211 TID struct pointer, or NULL for none
5643  */
5644 struct ieee80211_tx_ampdu *
5645 ath_tx_get_tx_tid(struct ath_node *an, int tid)
5646 {
5647 	struct ieee80211_node *ni = &an->an_node;
5648 	struct ieee80211_tx_ampdu *tap;
5649 
5650 	if (tid == IEEE80211_NONQOS_TID)
5651 		return NULL;
5652 
5653 	tap = &ni->ni_tx_ampdu[tid];
5654 	return tap;
5655 }
5656 
5657 /*
5658  * Is AMPDU-TX running?
5659  */
5660 static int
5661 ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an, int tid)
5662 {
5663 	struct ieee80211_tx_ampdu *tap;
5664 
5665 	if (tid == IEEE80211_NONQOS_TID)
5666 		return 0;
5667 
5668 	tap = ath_tx_get_tx_tid(an, tid);
5669 	if (tap == NULL)
5670 		return 0;	/* Not valid; default to not running */
5671 
5672 	return !! (tap->txa_flags & IEEE80211_AGGR_RUNNING);
5673 }
5674 
5675 /*
5676  * Is AMPDU-TX negotiation pending?
5677  */
5678 static int
5679 ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an, int tid)
5680 {
5681 	struct ieee80211_tx_ampdu *tap;
5682 
5683 	if (tid == IEEE80211_NONQOS_TID)
5684 		return 0;
5685 
5686 	tap = ath_tx_get_tx_tid(an, tid);
5687 	if (tap == NULL)
5688 		return 0;	/* Not valid; default to not pending */
5689 
5690 	return !! (tap->txa_flags & IEEE80211_AGGR_XCHGPEND);
5691 }
5692 
5693 /*
5694  * Is AMPDU-TX pending for the given TID?
5695  */
5696 
5697 
5698 /*
5699  * Method to handle sending an ADDBA request.
5700  *
5701  * We tap this so the relevant flags can be set to pause the TID
5702  * whilst waiting for the response.
5703  *
5704  * XXX there's no timeout handler we can override?
5705  */
5706 int
5707 ath_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
5708     int dialogtoken, int baparamset, int batimeout)
5709 {
5710 	struct ath_softc *sc = ni->ni_ic->ic_softc;
5711 	int tid = tap->txa_tid;
5712 	struct ath_node *an = ATH_NODE(ni);
5713 	struct ath_tid *atid = &an->an_tid[tid];
5714 
5715 	/*
5716 	 * XXX danger Will Robinson!
5717 	 *
5718 	 * Although the taskqueue may be running and scheduling some more
5719 	 * packets, these should all be _before_ the addba sequence number.
5720 	 * However, net80211 will keep self-assigning sequence numbers
5721 	 * until addba has been negotiated.
5722 	 *
5723 	 * In the past, these packets would be "paused" (which still works
5724 	 * fine, as they're being scheduled to the driver in the same
5725 	 * serialised method which is calling the addba request routine)
5726 	 * and when the aggregation session begins, they'll be dequeued
5727 	 * as aggregate packets and added to the BAW. However, now there's
5728 	 * a "bf->bf_state.bfs_dobaw" flag, and this isn't set for these
5729 	 * packets. Thus they never get included in the BAW tracking and
5730 	 * this can cause the initial burst of packets after the addba
5731 	 * negotiation to "hang", as they quickly fall outside the BAW.
5732 	 *
5733 	 * The "eventual" solution should be to tag these packets with
5734 	 * dobaw. Although net80211 has given us a sequence number,
5735 	 * it'll be "after" the left edge of the BAW and thus it'll
5736 	 * fall within it.
5737 	 */
5738 	ATH_TX_LOCK(sc);
5739 	/*
5740 	 * This is a bit annoying.  Until net80211 HT code inherits some
5741 	 * (any) locking, we may have this called in parallel BUT only
5742 	 * one response/timeout will be called.  Grr.
5743 	 */
5744 	if (atid->addba_tx_pending == 0) {
5745 		ath_tx_tid_pause(sc, atid);
5746 		atid->addba_tx_pending = 1;
5747 	}
5748 	ATH_TX_UNLOCK(sc);
5749 
5750 	DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
5751 	    "%s: %6D: called; dialogtoken=%d, baparamset=%d, batimeout=%d\n",
5752 	    __func__,
5753 	    ni->ni_macaddr,
5754 	    ":",
5755 	    dialogtoken, baparamset, batimeout);
5756 	DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
5757 	    "%s: txa_start=%d, ni_txseqs=%d\n",
5758 	    __func__, tap->txa_start, ni->ni_txseqs[tid]);
5759 
5760 	return sc->sc_addba_request(ni, tap, dialogtoken, baparamset,
5761 	    batimeout);
5762 }
5763 
5764 /*
5765  * Handle an ADDBA response.
5766  *
5767  * We unpause the queue so TX'ing can resume.
5768  *
5769  * Any packets TX'ed from this point should be "aggregate" (whether
5770  * aggregate or not) so the BAW is updated.
5771  *
5772  * Note! net80211 keeps self-assigning sequence numbers until
5773  * ampdu is negotiated. This means the initially-negotiated BAW left
5774  * edge won't match the ni->ni_txseq.
5775  *
5776  * So, being very dirty, the BAW left edge is "slid" here to match
5777  * ni->ni_txseq.
5778  *
5779  * What likely SHOULD happen is that all packets subsequent to the
5780  * addba request should be tagged as aggregate and queued as non-aggregate
5781  * frames; thus updating the BAW. For now though, I'll just slide the
5782  * window.
5783  */
5784 int
5785 ath_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
5786     int status, int code, int batimeout)
5787 {
5788 	struct ath_softc *sc = ni->ni_ic->ic_softc;
5789 	int tid = tap->txa_tid;
5790 	struct ath_node *an = ATH_NODE(ni);
5791 	struct ath_tid *atid = &an->an_tid[tid];
5792 	int r;
5793 
5794 	DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
5795 	    "%s: %6D: called; status=%d, code=%d, batimeout=%d\n", __func__,
5796 	    ni->ni_macaddr,
5797 	    ":",
5798 	    status, code, batimeout);
5799 
5800 	DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
5801 	    "%s: txa_start=%d, ni_txseqs=%d\n",
5802 	    __func__, tap->txa_start, ni->ni_txseqs[tid]);
5803 
5804 	/*
5805 	 * Call this first, so the interface flags get updated
5806 	 * before the TID is unpaused. Otherwise a race condition
5807 	 * exists where the unpaused TID still doesn't yet have
5808 	 * IEEE80211_AGGR_RUNNING set.
5809 	 */
5810 	r = sc->sc_addba_response(ni, tap, status, code, batimeout);
5811 
5812 	ATH_TX_LOCK(sc);
5813 	atid->addba_tx_pending = 0;
5814 	/*
5815 	 * XXX dirty!
5816 	 * Slide the BAW left edge to wherever net80211 left it for us.
5817 	 * Read above for more information.
5818 	 */
5819 	tap->txa_start = ni->ni_txseqs[tid];
5820 	ath_tx_tid_resume(sc, atid);
5821 	ATH_TX_UNLOCK(sc);
5822 	return r;
5823 }
5824 
5825 
5826 /*
5827  * Stop ADDBA on a queue.
5828  *
5829  * This can be called whilst BAR TX is currently active on the queue,
5830  * so make sure this is unblocked before continuing.
5831  */
5832 void
5833 ath_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
5834 {
5835 	struct ath_softc *sc = ni->ni_ic->ic_softc;
5836 	int tid = tap->txa_tid;
5837 	struct ath_node *an = ATH_NODE(ni);
5838 	struct ath_tid *atid = &an->an_tid[tid];
5839 	ath_bufhead bf_cq;
5840 	struct ath_buf *bf;
5841 
5842 	DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: %6D: called\n",
5843 	    __func__,
5844 	    ni->ni_macaddr,
5845 	    ":");
5846 
5847 	/*
5848 	 * Pause TID traffic early, so there aren't any races
5849 	 * Unblock the pending BAR held traffic, if it's currently paused.
5850 	 */
5851 	ATH_TX_LOCK(sc);
5852 	ath_tx_tid_pause(sc, atid);
5853 	if (atid->bar_wait) {
5854 		/*
5855 		 * bar_unsuspend() expects bar_tx == 1, as it should be
5856 		 * called from the TX completion path.  This quietens
5857 		 * the warning.  It's cleared for us anyway.
5858 		 */
5859 		atid->bar_tx = 1;
5860 		ath_tx_tid_bar_unsuspend(sc, atid);
5861 	}
5862 	ATH_TX_UNLOCK(sc);
5863 
5864 	/* There's no need to hold the TXQ lock here */
5865 	sc->sc_addba_stop(ni, tap);
5866 
5867 	/*
5868 	 * ath_tx_tid_cleanup will resume the TID if possible, otherwise
5869 	 * it'll set the cleanup flag, and it'll be unpaused once
5870 	 * things have been cleaned up.
5871 	 */
5872 	TAILQ_INIT(&bf_cq);
5873 	ATH_TX_LOCK(sc);
5874 
5875 	/*
5876 	 * In case there's a followup call to this, only call it
5877 	 * if we don't have a cleanup in progress.
5878 	 *
5879 	 * Since we've paused the queue above, we need to make
5880 	 * sure we unpause if there's already a cleanup in
5881 	 * progress - it means something else is also doing
5882 	 * this stuff, so we don't need to also keep it paused.
5883 	 */
5884 	if (atid->cleanup_inprogress) {
5885 		ath_tx_tid_resume(sc, atid);
5886 	} else {
5887 		ath_tx_tid_cleanup(sc, an, tid, &bf_cq);
5888 		/*
5889 		 * Unpause the TID if no cleanup is required.
5890 		 */
5891 		if (! atid->cleanup_inprogress)
5892 			ath_tx_tid_resume(sc, atid);
5893 	}
5894 	ATH_TX_UNLOCK(sc);
5895 
5896 	/* Handle completing frames and fail them */
5897 	while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
5898 		TAILQ_REMOVE(&bf_cq, bf, bf_list);
5899 		ath_tx_default_comp(sc, bf, 1);
5900 	}
5901 
5902 }
5903 
5904 /*
5905  * Handle a node reassociation.
5906  *
5907  * We may have a bunch of frames queued to the hardware; those need
5908  * to be marked as cleanup.
5909  */
5910 void
5911 ath_tx_node_reassoc(struct ath_softc *sc, struct ath_node *an)
5912 {
5913 	struct ath_tid *tid;
5914 	int i;
5915 	ath_bufhead bf_cq;
5916 	struct ath_buf *bf;
5917 
5918 	TAILQ_INIT(&bf_cq);
5919 
5920 	ATH_TX_UNLOCK_ASSERT(sc);
5921 
5922 	ATH_TX_LOCK(sc);
5923 	for (i = 0; i < IEEE80211_TID_SIZE; i++) {
5924 		tid = &an->an_tid[i];
5925 		if (tid->hwq_depth == 0)
5926 			continue;
5927 		DPRINTF(sc, ATH_DEBUG_NODE,
5928 		    "%s: %6D: TID %d: cleaning up TID\n",
5929 		    __func__,
5930 		    an->an_node.ni_macaddr,
5931 		    ":",
5932 		    i);
5933 		/*
5934 		 * In case there's a followup call to this, only call it
5935 		 * if we don't have a cleanup in progress.
5936 		 */
5937 		if (! tid->cleanup_inprogress) {
5938 			ath_tx_tid_pause(sc, tid);
5939 			ath_tx_tid_cleanup(sc, an, i, &bf_cq);
5940 			/*
5941 			 * Unpause the TID if no cleanup is required.
5942 			 */
5943 			if (! tid->cleanup_inprogress)
5944 				ath_tx_tid_resume(sc, tid);
5945 		}
5946 	}
5947 	ATH_TX_UNLOCK(sc);
5948 
5949 	/* Handle completing frames and fail them */
5950 	while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
5951 		TAILQ_REMOVE(&bf_cq, bf, bf_list);
5952 		ath_tx_default_comp(sc, bf, 1);
5953 	}
5954 }
5955 
5956 /*
5957  * Note: net80211 bar_timeout() doesn't call this function on BAR failure;
5958  * it simply tears down the aggregation session. Ew.
5959  *
5960  * It however will call ieee80211_ampdu_stop() which will call
5961  * ic->ic_addba_stop().
5962  *
5963  * XXX This uses a hard-coded max BAR count value; the whole
5964  * XXX BAR TX success or failure should be better handled!
5965  */
5966 void
5967 ath_bar_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
5968     int status)
5969 {
5970 	struct ath_softc *sc = ni->ni_ic->ic_softc;
5971 	int tid = tap->txa_tid;
5972 	struct ath_node *an = ATH_NODE(ni);
5973 	struct ath_tid *atid = &an->an_tid[tid];
5974 	int attempts = tap->txa_attempts;
5975 	int old_txa_start;
5976 
5977 	DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
5978 	    "%s: %6D: called; txa_tid=%d, atid->tid=%d, status=%d, attempts=%d, txa_start=%d, txa_seqpending=%d\n",
5979 	    __func__,
5980 	    ni->ni_macaddr,
5981 	    ":",
5982 	    tap->txa_tid,
5983 	    atid->tid,
5984 	    status,
5985 	    attempts,
5986 	    tap->txa_start,
5987 	    tap->txa_seqpending);
5988 
5989 	/* Note: This may update the BAW details */
5990 	/*
5991 	 * XXX What if this does slide the BAW along? We need to somehow
5992 	 * XXX either fix things when it does happen, or prevent the
5993 	 * XXX seqpending value to be anything other than exactly what
5994 	 * XXX the hell we want!
5995 	 *
5996 	 * XXX So for now, how I do this inside the TX lock for now
5997 	 * XXX and just correct it afterwards? The below condition should
5998 	 * XXX never happen and if it does I need to fix all kinds of things.
5999 	 */
6000 	ATH_TX_LOCK(sc);
6001 	old_txa_start = tap->txa_start;
6002 	sc->sc_bar_response(ni, tap, status);
6003 	if (tap->txa_start != old_txa_start) {
6004 		device_printf(sc->sc_dev, "%s: tid=%d; txa_start=%d, old=%d, adjusting\n",
6005 		    __func__,
6006 		    tid,
6007 		    tap->txa_start,
6008 		    old_txa_start);
6009 	}
6010 	tap->txa_start = old_txa_start;
6011 	ATH_TX_UNLOCK(sc);
6012 
6013 	/* Unpause the TID */
6014 	/*
6015 	 * XXX if this is attempt=50, the TID will be downgraded
6016 	 * XXX to a non-aggregate session. So we must unpause the
6017 	 * XXX TID here or it'll never be done.
6018 	 *
6019 	 * Also, don't call it if bar_tx/bar_wait are 0; something
6020 	 * has beaten us to the punch? (XXX figure out what?)
6021 	 */
6022 	if (status == 0 || attempts == 50) {
6023 		ATH_TX_LOCK(sc);
6024 		if (atid->bar_tx == 0 || atid->bar_wait == 0)
6025 			DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
6026 			    "%s: huh? bar_tx=%d, bar_wait=%d\n",
6027 			    __func__,
6028 			    atid->bar_tx, atid->bar_wait);
6029 		else
6030 			ath_tx_tid_bar_unsuspend(sc, atid);
6031 		ATH_TX_UNLOCK(sc);
6032 	}
6033 }
6034 
6035 /*
6036  * This is called whenever the pending ADDBA request times out.
6037  * Unpause and reschedule the TID.
6038  */
6039 void
6040 ath_addba_response_timeout(struct ieee80211_node *ni,
6041     struct ieee80211_tx_ampdu *tap)
6042 {
6043 	struct ath_softc *sc = ni->ni_ic->ic_softc;
6044 	int tid = tap->txa_tid;
6045 	struct ath_node *an = ATH_NODE(ni);
6046 	struct ath_tid *atid = &an->an_tid[tid];
6047 
6048 	DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
6049 	    "%s: %6D: TID=%d, called; resuming\n",
6050 	    __func__,
6051 	    ni->ni_macaddr,
6052 	    ":",
6053 	    tid);
6054 
6055 	ATH_TX_LOCK(sc);
6056 	atid->addba_tx_pending = 0;
6057 	ATH_TX_UNLOCK(sc);
6058 
6059 	/* Note: This updates the aggregate state to (again) pending */
6060 	sc->sc_addba_response_timeout(ni, tap);
6061 
6062 	/* Unpause the TID; which reschedules it */
6063 	ATH_TX_LOCK(sc);
6064 	ath_tx_tid_resume(sc, atid);
6065 	ATH_TX_UNLOCK(sc);
6066 }
6067 
6068 /*
6069  * Check if a node is asleep or not.
6070  */
6071 int
6072 ath_tx_node_is_asleep(struct ath_softc *sc, struct ath_node *an)
6073 {
6074 
6075 	ATH_TX_LOCK_ASSERT(sc);
6076 
6077 	return (an->an_is_powersave);
6078 }
6079 
6080 /*
6081  * Mark a node as currently "in powersaving."
6082  * This suspends all traffic on the node.
6083  *
6084  * This must be called with the node/tx locks free.
6085  *
6086  * XXX TODO: the locking silliness below is due to how the node
6087  * locking currently works.  Right now, the node lock is grabbed
6088  * to do rate control lookups and these are done with the TX
6089  * queue lock held.  This means the node lock can't be grabbed
6090  * first here or a LOR will occur.
6091  *
6092  * Eventually (hopefully!) the TX path code will only grab
6093  * the TXQ lock when transmitting and the ath_node lock when
6094  * doing node/TID operations.  There are other complications -
6095  * the sched/unsched operations involve walking the per-txq
6096  * 'active tid' list and this requires both locks to be held.
6097  */
6098 void
6099 ath_tx_node_sleep(struct ath_softc *sc, struct ath_node *an)
6100 {
6101 	struct ath_tid *atid;
6102 	struct ath_txq *txq;
6103 	int tid;
6104 
6105 	ATH_TX_UNLOCK_ASSERT(sc);
6106 
6107 	/* Suspend all traffic on the node */
6108 	ATH_TX_LOCK(sc);
6109 
6110 	if (an->an_is_powersave) {
6111 		DPRINTF(sc, ATH_DEBUG_XMIT,
6112 		    "%s: %6D: node was already asleep!\n",
6113 		    __func__, an->an_node.ni_macaddr, ":");
6114 		ATH_TX_UNLOCK(sc);
6115 		return;
6116 	}
6117 
6118 	for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) {
6119 		atid = &an->an_tid[tid];
6120 		txq = sc->sc_ac2q[atid->ac];
6121 
6122 		ath_tx_tid_pause(sc, atid);
6123 	}
6124 
6125 	/* Mark node as in powersaving */
6126 	an->an_is_powersave = 1;
6127 
6128 	ATH_TX_UNLOCK(sc);
6129 }
6130 
6131 /*
6132  * Mark a node as currently "awake."
6133  * This resumes all traffic to the node.
6134  */
6135 void
6136 ath_tx_node_wakeup(struct ath_softc *sc, struct ath_node *an)
6137 {
6138 	struct ath_tid *atid;
6139 	struct ath_txq *txq;
6140 	int tid;
6141 
6142 	ATH_TX_UNLOCK_ASSERT(sc);
6143 
6144 	ATH_TX_LOCK(sc);
6145 
6146 	/* !? */
6147 	if (an->an_is_powersave == 0) {
6148 		ATH_TX_UNLOCK(sc);
6149 		DPRINTF(sc, ATH_DEBUG_XMIT,
6150 		    "%s: an=%p: node was already awake\n",
6151 		    __func__, an);
6152 		return;
6153 	}
6154 
6155 	/* Mark node as awake */
6156 	an->an_is_powersave = 0;
6157 	/*
6158 	 * Clear any pending leaked frame requests
6159 	 */
6160 	an->an_leak_count = 0;
6161 
6162 	for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) {
6163 		atid = &an->an_tid[tid];
6164 		txq = sc->sc_ac2q[atid->ac];
6165 
6166 		ath_tx_tid_resume(sc, atid);
6167 	}
6168 	ATH_TX_UNLOCK(sc);
6169 }
6170 
6171 static int
6172 ath_legacy_dma_txsetup(struct ath_softc *sc)
6173 {
6174 
6175 	/* nothing new needed */
6176 	return (0);
6177 }
6178 
6179 static int
6180 ath_legacy_dma_txteardown(struct ath_softc *sc)
6181 {
6182 
6183 	/* nothing new needed */
6184 	return (0);
6185 }
6186 
6187 void
6188 ath_xmit_setup_legacy(struct ath_softc *sc)
6189 {
6190 	/*
6191 	 * For now, just set the descriptor length to sizeof(ath_desc);
6192 	 * worry about extracting the real length out of the HAL later.
6193 	 */
6194 	sc->sc_tx_desclen = sizeof(struct ath_desc);
6195 	sc->sc_tx_statuslen = sizeof(struct ath_desc);
6196 	sc->sc_tx_nmaps = 1;	/* only one buffer per TX desc */
6197 
6198 	sc->sc_tx.xmit_setup = ath_legacy_dma_txsetup;
6199 	sc->sc_tx.xmit_teardown = ath_legacy_dma_txteardown;
6200 	sc->sc_tx.xmit_attach_comp_func = ath_legacy_attach_comp_func;
6201 
6202 	sc->sc_tx.xmit_dma_restart = ath_legacy_tx_dma_restart;
6203 	sc->sc_tx.xmit_handoff = ath_legacy_xmit_handoff;
6204 
6205 	sc->sc_tx.xmit_drain = ath_legacy_tx_drain;
6206 }
6207