xref: /titanic_52/usr/src/uts/common/io/arn/arn_xmit.c (revision d35e9352c203d3c67ed57186e0e8a6de000384ab)
1 /*
2  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
3  * Use is subject to license terms.
4  */
5 
6 /*
7  * Copyright (c) 2008 Atheros Communications Inc.
8  *
9  * Permission to use, copy, modify, and/or distribute this software for any
10  * purpose with or without fee is hereby granted, provided that the above
11  * copyright notice and this permission notice appear in all copies.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20  */
21 #include <sys/param.h>
22 #include <sys/types.h>
23 #include <sys/signal.h>
24 #include <sys/stream.h>
25 #include <sys/termio.h>
26 #include <sys/errno.h>
27 #include <sys/file.h>
28 #include <sys/cmn_err.h>
29 #include <sys/stropts.h>
30 #include <sys/strsubr.h>
31 #include <sys/strtty.h>
32 #include <sys/kbio.h>
33 #include <sys/cred.h>
34 #include <sys/stat.h>
35 #include <sys/consdev.h>
36 #include <sys/kmem.h>
37 #include <sys/modctl.h>
38 #include <sys/ddi.h>
39 #include <sys/sunddi.h>
40 #include <sys/pci.h>
41 #include <sys/errno.h>
42 #include <sys/mac_provider.h>
43 #include <sys/dlpi.h>
44 #include <sys/ethernet.h>
45 #include <sys/list.h>
46 #include <sys/byteorder.h>
47 #include <sys/strsun.h>
48 #include <sys/policy.h>
49 #include <inet/common.h>
50 #include <inet/nd.h>
51 #include <inet/mi.h>
52 #include <inet/wifi_ioctl.h>
53 #include <sys/mac_wifi.h>
54 
55 #include "arn_core.h"
56 
57 #define	BITS_PER_BYTE		8
58 #define	OFDM_PLCP_BITS		22
59 #define	HT_RC_2_MCS(_rc)	((_rc) & 0x0f)
60 #define	HT_RC_2_STREAMS(_rc)	((((_rc) & 0x78) >> 3) + 1)
61 #define	L_STF			8
62 #define	L_LTF			8
63 #define	L_SIG			4
64 #define	HT_SIG			8
65 #define	HT_STF			4
66 #define	HT_LTF(_ns)		(4 * (_ns))
67 #define	SYMBOL_TIME(_ns)	((_ns) << 2) /* ns * 4 us */
68 #define	SYMBOL_TIME_HALFGI(_ns)	(((_ns) * 18 + 4) / 5)  /* ns * 3.6 us */
69 #define	NUM_SYMBOLS_PER_USEC(_usec)	(_usec >> 2)
70 #define	NUM_SYMBOLS_PER_USEC_HALFGI(_usec)	(((_usec*5)-4)/18)
71 
72 #define	OFDM_SIFS_TIME	16
73 
74 static uint32_t bits_per_symbol[][2] = {
75 	/* 20MHz 40MHz */
76 	{    26,  54 },		/*  0: BPSK */
77 	{    52,  108 },	/*  1: QPSK 1/2 */
78 	{    78,  162 },	/*  2: QPSK 3/4 */
79 	{   104,  216 },	/*  3: 16-QAM 1/2 */
80 	{   156,  324 },	/*  4: 16-QAM 3/4 */
81 	{   208,  432 },	/*  5: 64-QAM 2/3 */
82 	{   234,  486 },	/*  6: 64-QAM 3/4 */
83 	{   260,  540 },	/*  7: 64-QAM 5/6 */
84 	{    52,  108 },	/*  8: BPSK */
85 	{   104,  216 },	/*  9: QPSK 1/2 */
86 	{   156,  324 },	/* 10: QPSK 3/4 */
87 	{   208,  432 },	/* 11: 16-QAM 1/2 */
88 	{   312,  648 },	/* 12: 16-QAM 3/4 */
89 	{   416,  864 },	/* 13: 64-QAM 2/3 */
90 	{   468,  972 },	/* 14: 64-QAM 3/4 */
91 	{   520,  1080 },	/* 15: 64-QAM 5/6 */
92 };
93 
94 #define	IS_HT_RATE(_rate)	((_rate) & 0x80)
95 
96 #ifdef ARN_TX_AGGREGRATION
97 static void arn_tx_send_ht_normal(struct arn_softc *sc, struct ath_txq *txq,
98     struct ath_atx_tid *tid, list_t *bf_list);
99 static void arn_tx_complete_buf(struct arn_softc *sc, struct ath_buf *bf,
100     list_t *bf_q, int txok, int sendbar);
101 static void arn_tx_txqaddbuf(struct arn_softc *sc, struct ath_txq *txq,
102     list_t *buf_list);
103 static void arn_buf_set_rate(struct arn_softc *sc, struct ath_buf *bf);
104 static int arn_tx_num_badfrms(struct arn_softc *sc,
105     struct ath_buf *bf, int txok);
106 static void arn_tx_rc_status(struct ath_buf *bf, struct ath_desc *ds,
107     int nbad, int txok, boolean_t update_rc);
108 #endif
109 
110 static void
111 arn_get_beaconconfig(struct arn_softc *sc, struct ath_beacon_config *conf)
112 {
113 	ieee80211com_t *ic = (ieee80211com_t *)sc;
114 	struct ieee80211_node *in = ic->ic_bss;
115 
116 	/* fill in beacon config data */
117 
118 	conf->beacon_interval = in->in_intval ?
119 	    in->in_intval : ATH_DEFAULT_BINTVAL;
120 	conf->listen_interval = 100;
121 	conf->dtim_count = 1;
122 	conf->bmiss_timeout = ATH_DEFAULT_BMISS_LIMIT * conf->listen_interval;
123 }
124 
125 /* Aggregation logic */
126 
127 #ifdef ARN_TX_AGGREGATION
128 
129 /* Check if it's okay to send out aggregates */
130 static int
131 arn_aggr_query(struct arn_softc *sc, struct ath_node *an, uint8_t tidno)
132 {
133 	struct ath_atx_tid *tid;
134 	tid = ATH_AN_2_TID(an, tidno);
135 
136 	if (tid->state & AGGR_ADDBA_COMPLETE ||
137 	    tid->state & AGGR_ADDBA_PROGRESS)
138 		return (1);
139 	else
140 		return (0);
141 }
142 
143 /*
144  * queue up a dest/ac pair for tx scheduling
145  * NB: must be called with txq lock held
146  */
147 static void
148 arn_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
149 {
150 	struct ath_atx_ac *ac = tid->ac;
151 
152 	/* if tid is paused, hold off */
153 	if (tid->paused)
154 		return;
155 
156 	/* add tid to ac atmost once */
157 	if (tid->sched)
158 		return;
159 
160 	tid->sched = B_TRUE;
161 	list_insert_tail(&ac->tid_q, &tid->list);
162 
163 	/* add node ac to txq atmost once */
164 	if (ac->sched)
165 		return;
166 
167 	ac->sched = B_TRUE;
168 	list_insert_tail(&txq->axq_acq, &ac->list);
169 }
170 
171 /* pause a tid */
172 static void
173 arn_tx_pause_tid(struct arn_softc *sc, struct ath_atx_tid *tid)
174 {
175 	struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum];
176 
177 	mutex_enter(&txq->axq_lock);
178 
179 	tid->paused++;
180 
181 	mutex_exit(&txq->axq_lock);
182 }
183 
184 /* resume a tid and schedule aggregate */
185 void
186 arn_tx_resume_tid(struct arn_softc *sc, struct ath_atx_tid *tid)
187 {
188 	struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum];
189 
190 	ASSERT(tid->paused > 0);
191 	mutex_enter(&txq->axq_lock);
192 
193 	tid->paused--;
194 
195 	if (tid->paused > 0)
196 		goto unlock;
197 
198 	if (list_empty(&tid->buf_q))
199 		goto unlock;
200 
201 	/*
202 	 * Add this TID to scheduler and try to send out aggregates
203 	 */
204 	arn_tx_queue_tid(txq, tid);
205 	arn_txq_schedule(sc, txq);
206 unlock:
207 	mutex_exit(&txq->axq_lock);
208 }
209 
210 /* flush tid's software queue and send frames as non-ampdu's */
211 static void
212 arn_tx_flush_tid(struct arn_softc *sc, struct ath_atx_tid *tid)
213 {
214 	struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum];
215 	struct ath_buf *bf;
216 
217 	list_t list;
218 	list_create(&list, sizeof (struct ath_buf),
219 	    offsetof(struct ath_buf, bf_node));
220 
221 	ASSERT(tid->paused > 0);
222 	mutex_enter(&txq->axq_lock);
223 
224 	tid->paused--;
225 
226 	if (tid->paused > 0) {
227 		mutex_exit(&txq->axq_lock);
228 		return;
229 	}
230 
231 	while (!list_empty(&tid->buf_q)) {
232 		bf = list_head(&tid->buf_q);
233 		ASSERT(!bf_isretried(bf));
234 		list_remove(&tid->buf_q, bf);
235 		list_insert_tail(&list, bf);
236 		arn_tx_send_ht_normal(sc, txq, tid, &list);
237 	}
238 
239 	mutex_exit(&txq->axq_lock);
240 }
241 
242 /* Update block ack window */
243 static void
244 arn_tx_update_baw(struct arn_softc *sc, struct ath_atx_tid *tid, int seqno)
245 {
246 	int index, cindex;
247 
248 	index  = ATH_BA_INDEX(tid->seq_start, seqno);
249 	cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
250 
251 	tid->tx_buf[cindex] = NULL;
252 
253 	while (tid->baw_head != tid->baw_tail && !tid->tx_buf[tid->baw_head]) {
254 		INCR(tid->seq_start, IEEE80211_SEQ_MAX);
255 		INCR(tid->baw_head, ATH_TID_MAX_BUFS);
256 	}
257 }
258 
259 /* Add a sub-frame to block ack window */
260 static void
261 arn_tx_addto_baw(struct arn_softc *sc, struct ath_atx_tid *tid,
262     struct ath_buf *bf)
263 {
264 	int index, cindex;
265 
266 	if (bf_isretried(bf))
267 		return;
268 
269 	index  = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
270 	cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
271 
272 	ASSERT(tid->tx_buf[cindex] == NULL);
273 	tid->tx_buf[cindex] = bf;
274 
275 	if (index >= ((tid->baw_tail - tid->baw_head) &
276 	    (ATH_TID_MAX_BUFS - 1))) {
277 		tid->baw_tail = cindex;
278 		INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
279 	}
280 }
281 
282 /*
283  * TODO: For frame(s) that are in the retry state, we will reuse the
284  * sequence number(s) without setting the retry bit. The
285  * alternative is to give up on these and BAR the receiver's window
286  * forward.
287  */
288 static void
289 arn_tid_drain(struct arn_softc *sc,
290     struct ath_txq *txq,
291     struct ath_atx_tid *tid)
292 
293 {
294 	struct ath_buf *bf;
295 
296 	list_t list;
297 	list_create(&list, sizeof (struct ath_buf),
298 	    offsetof(struct ath_buf, bf_node));
299 
300 	for (;;) {
301 		if (list_empty(&tid->buf_q))
302 			break;
303 
304 		bf = list_head(&tid->buf_q);
305 		list_remove(&tid->buf_q, bf);
306 		list_insert_tail(&list, bf);
307 
308 		if (bf_isretried(bf))
309 			arn_tx_update_baw(sc, tid, bf->bf_seqno);
310 
311 		mutex_enter(&txq->axq_lock);
312 		arn_tx_complete_buf(sc, bf, &list, 0, 0);
313 		mutex_exit(&txq->axq_lock);
314 	}
315 
316 	tid->seq_next = tid->seq_start;
317 	tid->baw_tail = tid->baw_head;
318 }
319 
320 static void
321 arn_tx_set_retry(struct arn_softc *sc, struct ath_buf *bf)
322 {
323 	struct ieee80211_frame *wh;
324 	wh = (struct ieee80211_frame *)bf->bf_dma.mem_va;
325 
326 	bf->bf_state.bf_type |= BUF_RETRY;
327 	bf->bf_retries++;
328 
329 	*(uint16_t *)&wh->i_seq[0] |= LE_16(0x0800); /* ??? */
330 }
331 
332 static struct ath_buf *
333 arn_clone_txbuf(struct arn_softc *sc, struct ath_buf *bf)
334 {
335 	struct ath_buf *tbf;
336 
337 	mutex_enter(&sc->sc_txbuflock);
338 	ASSERT(!list_empty((&sc->sc_txbuf_list)));
339 
340 	tbf = list_head(&sc->sc_txbuf_list);
341 	list_remove(&sc->sc_txbuf_list, tbf);
342 	mutex_exit(&sc->sc_txbuflock);
343 
344 	ATH_TXBUF_RESET(tbf);
345 
346 	tbf->bf_daddr = bf->bf_daddr; /* physical addr of desc */
347 	tbf->bf_dma = bf->bf_dma; /* dma area for buf */
348 	*(tbf->bf_desc) = *(bf->bf_desc); /* virtual addr of desc */
349 	tbf->bf_state = bf->bf_state; /* buffer state */
350 
351 	return (tbf);
352 }
353 
354 static void
355 arn_tx_complete_aggr(struct arn_softc *sc, struct ath_txq *txq,
356     struct ath_buf *bf, list_t *bf_q, int txok)
357 {
358 	struct ieee80211_node *in;
359 	struct ath_node *an = NULL;
360 	struct ath_atx_tid *tid = NULL;
361 	struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
362 	struct ath_desc *ds = bf_last->bf_desc;
363 
364 	list_t list, list_pending;
365 	uint16_t seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
366 	uint32_t ba[WME_BA_BMP_SIZE >> 5];
367 	int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
368 	boolean_t rc_update = B_TRUE;
369 
370 	an = ATH_NODE(in); /* Be sure in != NULL */
371 	tid = ATH_AN_2_TID(an, bf->bf_tidno);
372 
373 	isaggr = bf_isaggr(bf);
374 	memset(ba, 0, WME_BA_BMP_SIZE >> 3);
375 
376 	if (isaggr && txok) {
377 		if (ATH_DS_TX_BA(ds)) {
378 			seq_st = ATH_DS_BA_SEQ(ds);
379 			memcpy(ba, ATH_DS_BA_BITMAP(ds),
380 			    WME_BA_BMP_SIZE >> 3);
381 		} else {
382 			/*
383 			 * AR5416 can become deaf/mute when BA
384 			 * issue happens. Chip needs to be reset.
385 			 * But AP code may have sychronization issues
386 			 * when perform internal reset in this routine.
387 			 * Only enable reset in STA mode for now.
388 			 */
389 			if (sc->sc_ah->ah_opmode == ATH9K_M_STA)
390 				needreset = 1;
391 		}
392 	}
393 
394 	list_create(&list_pending, sizeof (struct ath_buf),
395 	    offsetof(struct ath_buf, bf_node));
396 	list_create(&list, sizeof (struct ath_buf),
397 	    offsetof(struct ath_buf, bf_node));
398 
399 	nbad = arn_tx_num_badfrms(sc, bf, txok);
400 	while (bf) {
401 		txfail = txpending = 0;
402 		bf_next = bf->bf_next;
403 
404 		if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) {
405 			/*
406 			 * transmit completion, subframe is
407 			 * acked by block ack
408 			 */
409 			acked_cnt++;
410 		} else if (!isaggr && txok) {
411 			/* transmit completion */
412 			acked_cnt++;
413 		} else {
414 			if (!(tid->state & AGGR_CLEANUP) &&
415 			    ds->ds_txstat.ts_flags != ATH9K_TX_SW_ABORTED) {
416 				if (bf->bf_retries < ATH_MAX_SW_RETRIES) {
417 					arn_tx_set_retry(sc, bf);
418 					txpending = 1;
419 				} else {
420 					bf->bf_state.bf_type |= BUF_XRETRY;
421 					txfail = 1;
422 					sendbar = 1;
423 					txfail_cnt++;
424 				}
425 			} else {
426 				/*
427 				 * cleanup in progress, just fail
428 				 * the un-acked sub-frames
429 				 */
430 				txfail = 1;
431 			}
432 		}
433 
434 		if (bf_next == NULL) {
435 			/* INIT_LIST_HEAD */
436 			list_create(&list, sizeof (struct ath_buf),
437 			    offsetof(struct ath_buf, bf_node));
438 		} else {
439 			ASSERT(!list_empty(bf_q));
440 			list_remove(bf_q, bf);
441 			list_insert_tail(&list, bf);
442 		}
443 
444 		if (!txpending) {
445 			/*
446 			 * complete the acked-ones/xretried ones; update
447 			 * block-ack window
448 			 */
449 			mutex_enter(&txq->axq_lock);
450 			arn_tx_update_baw(sc, tid, bf->bf_seqno);
451 			mutex_exit(&txq->axq_lock);
452 
453 			if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
454 				ath_tx_rc_status(bf, ds, nbad, txok, B_TRUE);
455 				rc_update = B_FALSE;
456 			} else {
457 				ath_tx_rc_status(bf, ds, nbad, txok, B_FALSE);
458 			}
459 
460 			ath_tx_complete_buf(sc, bf, list, !txfail, sendbar);
461 		} else {
462 			/* retry the un-acked ones */
463 			if (bf->bf_next == NULL &&
464 			    bf_last->bf_status & ATH_BUFSTATUS_STALE) {
465 				struct ath_buf *tbf;
466 
467 				tbf = arn_clone_txbuf(sc, bf_last);
468 				ath9k_hw_cleartxdesc(sc->sc_ah, tbf->bf_desc);
469 				list_insert_tail(&list, tbf);
470 			} else {
471 				/*
472 				 * Clear descriptor status words for
473 				 * software retry
474 				 */
475 				ath9k_hw_cleartxdesc(sc->sc_ah, bf->bf_desc);
476 			}
477 
478 			/*
479 			 * Put this buffer to the temporary pending
480 			 * queue to retain ordering
481 			 */
482 			list_splice_tail_init(&list, &list_pending);
483 			/*
484 			 * Insert src list after dst list.
485 			 * Empty src list thereafter
486 			 */
487 			list_move_tail(&list_pending, &list);
488 			/* should re-initialize list here??? */
489 		}
490 
491 		bf = bf_next;
492 	}
493 
494 	if (tid->state & AGGR_CLEANUP) {
495 		if (tid->baw_head == tid->baw_tail) {
496 			tid->state &= ~AGGR_ADDBA_COMPLETE;
497 			tid->addba_exchangeattempts = 0;
498 			tid->state &= ~AGGR_CLEANUP;
499 
500 			/* send buffered frames as singles */
501 			arn_tx_flush_tid(sc, tid);
502 		}
503 		return;
504 	}
505 
506 	/*
507 	 * prepend un-acked frames to the beginning of
508 	 * the pending frame queue
509 	 */
510 
511 	if (!list_empty(&list_pending)) {
512 		mutex_enter(&txq->axq_lock);
513 		list_move_tail(&list_pending, &tid->buf_q);
514 		arn_tx_queue_tid(txq, tid);
515 		mutex_exit(&txq->axq_lock);
516 	}
517 }
518 
519 static uint32_t
520 arn_lookup_rate(struct arn_softc *sc, struct ath_buf *bf,
521     struct ath_atx_tid *tid)
522 {
523 	struct ath_rate_table *rate_table = sc->sc_currates;
524 	struct ath9k_tx_rate *rates;
525 	struct ath_tx_info_priv *tx_info_priv;
526 	uint32_t max_4ms_framelen, frmlen;
527 	uint16_t aggr_limit, legacy = 0, maxampdu;
528 	int i;
529 
530 	/* ???  */
531 	rates = (struct ath9k_tx_rate *)bf->rates;
532 	tx_info_priv = (struct ath_tx_info_priv *)&bf->tx_info_priv;
533 
534 	/*
535 	 * Find the lowest frame length among the rate series that will have a
536 	 * 4ms transmit duration.
537 	 * TODO - TXOP limit needs to be considered.
538 	 */
539 	max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
540 
541 	for (i = 0; i < 4; i++) {
542 		if (rates[i].count) {
543 			if (!WLAN_RC_PHY_HT
544 			    (rate_table->info[rates[i].idx].phy)) {
545 				legacy = 1;
546 				break;
547 			}
548 
549 			frmlen =
550 			    rate_table->info[rates[i].idx].max_4ms_framelen;
551 			max_4ms_framelen = min(max_4ms_framelen, frmlen);
552 		}
553 	}
554 
555 	/*
556 	 * limit aggregate size by the minimum rate if rate selected is
557 	 * not a probe rate, if rate selected is a probe rate then
558 	 * avoid aggregation of this packet.
559 	 */
560 	if (legacy)
561 		return (0);
562 
563 	aggr_limit = min(max_4ms_framelen, (uint32_t)ATH_AMPDU_LIMIT_DEFAULT);
564 
565 	/*
566 	 * h/w can accept aggregates upto 16 bit lengths (65535).
567 	 * The IE, however can hold upto 65536, which shows up here
568 	 * as zero. Ignore 65536 since we  are constrained by hw.
569 	 */
570 	maxampdu = tid->an->maxampdu;
571 	if (maxampdu)
572 		aggr_limit = min(aggr_limit, maxampdu);
573 
574 	return (aggr_limit);
575 }
576 
577 /*
578  * Returns the number of delimiters to be added to
579  * meet the minimum required mpdudensity.
580  * caller should make sure that the rate is HT rate .
581  */
582 static int
583 arn_compute_num_delims(struct arn_softc *sc, struct ath_atx_tid *tid,
584     struct ath_buf *bf, uint16_t frmlen)
585 {
586 	struct ath_rate_table *rt = sc->sc_currates;
587 	struct ath9k_tx_rate *rates = (struct ath9k_tx_rate *)bf->rates;
588 	uint32_t nsymbits, nsymbols, mpdudensity;
589 	uint16_t minlen;
590 	uint8_t rc, flags, rix;
591 	int width, half_gi, ndelim, mindelim;
592 
593 	/* Select standard number of delimiters based on frame length alone */
594 	ndelim = ATH_AGGR_GET_NDELIM(frmlen);
595 
596 	/*
597 	 * If encryption enabled, hardware requires some more padding between
598 	 * subframes.
599 	 * TODO - this could be improved to be dependent on the rate.
600 	 * The hardware can keep up at lower rates, but not higher rates
601 	 */
602 	if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR)
603 		ndelim += ATH_AGGR_ENCRYPTDELIM;
604 
605 	/*
606 	 * Convert desired mpdu density from microeconds to bytes based
607 	 * on highest rate in rate series (i.e. first rate) to determine
608 	 * required minimum length for subframe. Take into account
609 	 * whether high rate is 20 or 40Mhz and half or full GI.
610 	 */
611 	mpdudensity = tid->an->mpdudensity;
612 
613 	/*
614 	 * If there is no mpdu density restriction, no further calculation
615 	 * is needed.
616 	 */
617 	if (mpdudensity == 0)
618 		return (ndelim);
619 
620 	rix = rates[0].idx;
621 	flags = rates[0].flags;
622 	rc = rt->info[rix].ratecode;
623 	width = (flags & ATH9K_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
624 	half_gi = (flags & ATH9K_TX_RC_SHORT_GI) ? 1 : 0;
625 
626 	if (half_gi)
627 		nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(mpdudensity);
628 	else
629 		nsymbols = NUM_SYMBOLS_PER_USEC(mpdudensity);
630 
631 	if (nsymbols == 0)
632 		nsymbols = 1;
633 
634 	nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width];
635 	minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
636 
637 	if (frmlen < minlen) {
638 		mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
639 		ndelim = max(mindelim, ndelim);
640 	}
641 
642 	return (ndelim);
643 }
644 
645 static enum ATH_AGGR_STATUS
646 arn_tx_form_aggr(struct arn_softc *sc, struct ath_atx_tid *tid,
647     list_t *bf_q)
648 {
649 #define	PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
650 	struct ath_buf *bf, *bf_first, *bf_prev = NULL;
651 	int rl = 0, nframes = 0, ndelim, prev_al = 0;
652 	uint16_t aggr_limit = 0, al = 0, bpad = 0,
653 	    al_delta, h_baw = tid->baw_size / 2;
654 	enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
655 
656 	bf_first = list_head(&tid->buf_q);
657 
658 	do {
659 		bf = list_head(&tid->buf_q);
660 
661 		/* do not step over block-ack window */
662 		if (!BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno)) {
663 			status = ATH_AGGR_BAW_CLOSED;
664 			break;
665 		}
666 
667 		if (!rl) {
668 			aggr_limit = arn_lookup_rate(sc, bf, tid);
669 			rl = 1;
670 		}
671 
672 		/* do not exceed aggregation limit */
673 		al_delta = ATH_AGGR_DELIM_SZ + bf->bf_frmlen;
674 
675 		if (nframes &&
676 		    (aggr_limit < (al + bpad + al_delta + prev_al))) {
677 			status = ATH_AGGR_LIMITED;
678 			break;
679 		}
680 
681 		/* do not exceed subframe limit */
682 		if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
683 			status = ATH_AGGR_LIMITED;
684 			break;
685 		}
686 		nframes++;
687 
688 		/* add padding for previous frame to aggregation length */
689 		al += bpad + al_delta;
690 
691 		/*
692 		 * Get the delimiters needed to meet the MPDU
693 		 * density for this node.
694 		 */
695 		ndelim =
696 		    arn_compute_num_delims(sc, tid, bf_first, bf->bf_frmlen);
697 		bpad = PADBYTES(al_delta) + (ndelim << 2);
698 
699 		bf->bf_next = NULL;
700 		bf->bf_desc->ds_link = 0;
701 
702 		/* link buffers of this frame to the aggregate */
703 		arn_tx_addto_baw(sc, tid, bf);
704 		ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
705 		list_remove(&tid->buf_q, bf);
706 		list_insert_tail(bf_q, bf);
707 		if (bf_prev) {
708 			bf_prev->bf_next = bf;
709 			bf_prev->bf_desc->ds_link = bf->bf_daddr;
710 		}
711 		bf_prev = bf;
712 	} while (!list_empty(&tid->buf_q));
713 
714 	bf_first->bf_al = al;
715 	bf_first->bf_nframes = nframes;
716 
717 	return (status);
718 #undef PADBYTES
719 }
720 
721 static void
722 arn_tx_sched_aggr(struct arn_softc *sc, struct ath_txq *txq,
723     struct ath_atx_tid *tid)
724 {
725 	struct ath_buf *bf;
726 	enum ATH_AGGR_STATUS status;
727 	list_t bf_q;
728 
729 	do {
730 		if (list_empty(&tid->buf_q))
731 			return;
732 
733 		/* INIT_LIST_HEAD */
734 		list_create(&bf_q, sizeof (struct ath_buf),
735 		    offsetof(struct ath_buf, bf_node));
736 
737 		status = arn_tx_form_aggr(sc, tid, &bf_q);
738 
739 		/*
740 		 * no frames picked up to be aggregated;
741 		 * block-ack window is not open.
742 		 */
743 		if (list_empty(&bf_q))
744 			break;
745 
746 		bf = list_head(&bf_q);
747 		bf->bf_lastbf = list_object(&bf_q, bf->bf_node.list_prev);
748 
749 		/* if only one frame, send as non-aggregate */
750 		if (bf->bf_nframes == 1) {
751 			bf->bf_state.bf_type &= ~BUF_AGGR;
752 			ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
753 			ath_buf_set_rate(sc, bf);
754 			arn_tx_txqaddbuf(sc, txq, &bf_q);
755 			continue;
756 		}
757 
758 		/* setup first desc of aggregate */
759 		bf->bf_state.bf_type |= BUF_AGGR;
760 		ath_buf_set_rate(sc, bf);
761 		ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al);
762 
763 		/* anchor last desc of aggregate */
764 		ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
765 
766 		txq->axq_aggr_depth++;
767 		arn_tx_txqaddbuf(sc, txq, &bf_q);
768 
769 	} while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH &&
770 	    status != ATH_AGGR_BAW_CLOSED);
771 }
772 
773 int
774 arn_tx_aggr_start(struct arn_softc *sc, struct ieee80211_node *in,
775     uint16_t tid, uint16_t *ssn)
776 {
777 	struct ath_atx_tid *txtid;
778 	struct ath_node *an;
779 
780 	an = ATH_NODE(in);
781 
782 	if (sc->sc_flags & SC_OP_TXAGGR) {
783 		txtid = ATH_AN_2_TID(an, tid);
784 		txtid->state |= AGGR_ADDBA_PROGRESS;
785 		arn_tx_pause_tid(sc, txtid);
786 		*ssn = txtid->seq_start;
787 	}
788 
789 	return (0);
790 }
791 
792 int
793 arn_tx_aggr_stop(struct arn_softc *sc, struct ieee80211_node *in, uint16_t tid)
794 {
795 	struct ath_node *an = ATH_NODE(in);
796 	struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
797 	struct ath_txq *txq = &sc->sc_txq[txtid->ac->qnum];
798 	struct ath_buf *bf;
799 
800 	list_t list;
801 	list_create(&list, sizeof (struct ath_buf),
802 	    offsetof(struct ath_buf, bf_node));
803 
804 	if (txtid->state & AGGR_CLEANUP)
805 		return (0);
806 
807 	if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
808 		txtid->addba_exchangeattempts = 0;
809 		return (0);
810 	}
811 
812 	arn_tx_pause_tid(sc, txtid);
813 
814 	/* drop all software retried frames and mark this TID */
815 	mutex_enter(&txq->axq_lock);
816 	while (!list_empty(&txtid->buf_q)) {
817 		/* list_first_entry */
818 		bf = list_head(&txtid->buf_q);
819 		if (!bf_isretried(bf)) {
820 			/*
821 			 * NB: it's based on the assumption that
822 			 * software retried frame will always stay
823 			 * at the head of software queue.
824 			 */
825 			break;
826 		}
827 		list_remove(&txtid->buf_q, bf);
828 		list_insert_tail(&list, bf);
829 		arn_tx_update_baw(sc, txtid, bf->bf_seqno);
830 		// ath_tx_complete_buf(sc, bf, &list, 0, 0); /* to do */
831 	}
832 	mutex_exit(&txq->axq_lock);
833 
834 	if (txtid->baw_head != txtid->baw_tail) {
835 		txtid->state |= AGGR_CLEANUP;
836 	} else {
837 		txtid->state &= ~AGGR_ADDBA_COMPLETE;
838 		txtid->addba_exchangeattempts = 0;
839 		arn_tx_flush_tid(sc, txtid);
840 	}
841 
842 	return (0);
843 }
844 
845 void
846 arn_tx_aggr_resume(struct arn_softc *sc,
847     struct ieee80211_node *in,
848     uint16_t tid)
849 {
850 	struct ath_atx_tid *txtid;
851 	struct ath_node *an;
852 
853 	an = ATH_NODE(in);
854 
855 	if (sc->sc_flags & SC_OP_TXAGGR) {
856 		txtid = ATH_AN_2_TID(an, tid);
857 		txtid->baw_size = (0x8) << sc->sc_ht_conf.ampdu_factor;
858 		txtid->state |= AGGR_ADDBA_COMPLETE;
859 		txtid->state &= ~AGGR_ADDBA_PROGRESS;
860 		arn_tx_resume_tid(sc, txtid);
861 	}
862 }
863 
864 boolean_t
865 arn_tx_aggr_check(struct arn_softc *sc, struct ath_node *an, uint8_t tidno)
866 {
867 	struct ath_atx_tid *txtid;
868 
869 	if (!(sc->sc_flags & SC_OP_TXAGGR))
870 		return (B_FALSE);
871 
872 	txtid = ATH_AN_2_TID(an, tidno);
873 
874 	if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
875 		if (!(txtid->state & AGGR_ADDBA_PROGRESS) &&
876 		    (txtid->addba_exchangeattempts < ADDBA_EXCHANGE_ATTEMPTS)) {
877 			txtid->addba_exchangeattempts++;
878 			return (B_TRUE);
879 		}
880 	}
881 
882 	return (B_FALSE);
883 }
884 
885 /* Queue Management */
886 
887 static void
888 arn_txq_drain_pending_buffers(struct arn_softc *sc, struct ath_txq *txq)
889 {
890 	struct ath_atx_ac *ac, *ac_tmp;
891 	struct ath_atx_tid *tid, *tid_tmp;
892 
893 	list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq) {
894 		list_remove(&txq->axq_acq, ac);
895 		ac->sched = B_FALSE;
896 		list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q) {
897 			list_remove(&ac->tid_q, tid);
898 			tid->sched = B_FALSE;
899 			arn_tid_drain(sc, txq, tid);
900 		}
901 	}
902 }
903 
904 int
905 arn_tx_get_qnum(struct arn_softc *sc, int qtype, int haltype)
906 {
907 	int qnum;
908 
909 	switch (qtype) {
910 	case ATH9K_TX_QUEUE_DATA:
911 		if (haltype >= ARRAY_SIZE(sc->sc_haltype2q)) {
912 			ARN_DBG((ARN_DBG_FATAL, "arn: arn_tx_get_qnum(): "
913 			    "HAL AC %u out of range, max %zu!\n",
914 			    haltype, ARRAY_SIZE(sc->sc_haltype2q)));
915 			return (-1);
916 		}
917 		qnum = sc->sc_haltype2q[haltype];
918 		break;
919 	case ATH9K_TX_QUEUE_BEACON:
920 		qnum = sc->sc_beaconq;
921 		break;
922 	case ATH9K_TX_QUEUE_CAB:
923 		qnum = sc->sc_cabq->axq_qnum;
924 		break;
925 	default:
926 		qnum = -1;
927 	}
928 	return (qnum);
929 }
930 
931 struct ath_txq *
932 arn_test_get_txq(struct arn_softc *sc, struct ieee80211_node *in,
933     struct ieee80211_frame *wh, uint8_t type)
934 {
935 	struct ieee80211_qosframe *qwh = NULL;
936 	struct ath_txq *txq = NULL;
937 	int tid = -1;
938 	int qos_ac;
939 	int qnum;
940 
941 	if (in->in_flags & IEEE80211_NODE_QOS) {
942 
943 		if ((type & IEEE80211_FC0_TYPE_MASK) ==
944 		    IEEE80211_FC0_TYPE_DATA) {
945 
946 			if (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) {
947 				qwh = (struct ieee80211_qosframe *)wh;
948 
949 				tid = qwh->i_qos[0] & IEEE80211_QOS_TID;
950 				switch (tid) {
951 				case 1:
952 				case 2:
953 					qos_ac = WME_AC_BK;
954 				case 0:
955 				case 3:
956 					qos_ac = WME_AC_BE;
957 				case 4:
958 				case 5:
959 					qos_ac = WME_AC_VI;
960 				case 6:
961 				case 7:
962 					qos_ac = WME_AC_VO;
963 				}
964 			}
965 		} else {
966 			qos_ac = WME_AC_VO;
967 		}
968 	} else if ((type & IEEE80211_FC0_TYPE_MASK) ==
969 	    IEEE80211_FC0_TYPE_MGT) {
970 			qos_ac = WME_AC_VO;
971 	} else if ((type & IEEE80211_FC0_TYPE_MASK) ==
972 	    IEEE80211_FC0_TYPE_CTL) {
973 			qos_ac = WME_AC_VO;
974 	} else {
975 			qos_ac = WME_AC_BK;
976 	}
977 	qnum = arn_get_hal_qnum(qos_ac, sc);
978 	txq = &sc->sc_txq[qnum];
979 
980 	mutex_enter(&txq->axq_lock);
981 
982 	if (txq->axq_depth >= (ATH_TXBUF - 20)) {
983 		ARN_DBG((ARN_DBG_XMIT,
984 		    "TX queue: %d is full, depth: %d\n",
985 		    qnum, txq->axq_depth));
986 		/* stop th queue */
987 		sc->sc_resched_needed = B_TRUE;
988 		txq->stopped = 1;
989 		mutex_exit(&txq->axq_lock);
990 		return (NULL);
991 	}
992 
993 	mutex_exit(&txq->axq_lock);
994 
995 	return (txq);
996 }
997 
998 /* Called only when tx aggregation is enabled and HT is supported */
999 static void
1000 assign_aggr_tid_seqno(struct arn_softc *sc,
1001     struct ath_buf *bf,
1002     struct ieee80211_frame *wh)
1003 {
1004 	struct ath_node *an;
1005 	struct ath_atx_tid *tid;
1006 	struct ieee80211_node *in;
1007 	struct ieee80211_qosframe *qwh = NULL;
1008 	ieee80211com_t *ic = (ieee80211com_t *)sc;
1009 
1010 	in = ieee80211_find_txnode(ic, wh->i_addr1);
1011 	if (in == NULL) {
1012 		arn_problem("assign_aggr_tid_seqno():"
1013 		    "failed to find tx node\n");
1014 		return;
1015 	}
1016 	an = ATH_NODE(in);
1017 
1018 	/* Get tidno */
1019 	if (in->in_flags & IEEE80211_NODE_QOS) {
1020 		if (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) {
1021 			qwh = (struct ieee80211_qosframe *)wh;
1022 			bf->bf_tidno = qwh->i_qos[0] & IEEE80211_QOS_TID;
1023 		}
1024 	}
1025 
1026 	/* Get seqno */
1027 	/*
1028 	 * For HT capable stations, we save tidno for later use.
1029 	 * We also override seqno set by upper layer with the one
1030 	 * in tx aggregation state.
1031 	 *
1032 	 * If fragmentation is on, the sequence number is
1033 	 * not overridden, since it has been
1034 	 * incremented by the fragmentation routine.
1035 	 *
1036 	 * FIXME: check if the fragmentation threshold exceeds
1037 	 * IEEE80211 max.
1038 	 */
1039 	tid = ATH_AN_2_TID(an, bf->bf_tidno);
1040 
1041 	*(uint16_t *)&wh->i_seq[0] =
1042 	    LE_16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
1043 	bf->bf_seqno = tid->seq_next;
1044 	/* LINTED E_CONSTANT_CONDITION */
1045 	INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1046 
1047 	/* release node */
1048 	ieee80211_free_node(in);
1049 }
1050 
1051 /* Compute the number of bad frames */
1052 /* ARGSUSED */
1053 static int
1054 arn_tx_num_badfrms(struct arn_softc *sc, struct ath_buf *bf, int txok)
1055 {
1056 	struct ath_buf *bf_last = bf->bf_lastbf;
1057 	struct ath_desc *ds = bf_last->bf_desc;
1058 	uint16_t seq_st = 0;
1059 	uint32_t ba[WME_BA_BMP_SIZE >> 5];
1060 	int ba_index;
1061 	int nbad = 0;
1062 	int isaggr = 0;
1063 
1064 	if (ds->ds_txstat.ts_flags == ATH9K_TX_SW_ABORTED)
1065 		return (0);
1066 
1067 	isaggr = bf_isaggr(bf);
1068 	if (isaggr) {
1069 		seq_st = ATH_DS_BA_SEQ(ds);
1070 		memcpy(ba, ATH_DS_BA_BITMAP(ds), WME_BA_BMP_SIZE >> 3);
1071 	}
1072 
1073 	while (bf) {
1074 		ba_index = ATH_BA_INDEX(seq_st, bf->bf_seqno);
1075 		if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
1076 			nbad++;
1077 
1078 		bf = bf->bf_next;
1079 	}
1080 
1081 	return (nbad);
1082 }
1083 
1084 static void
1085 arn_tx_send_ht_normal(struct arn_softc *sc,
1086     struct ath_txq *txq,
1087     struct ath_atx_tid *tid,
1088     list_t *list)
1089 {
1090 	struct ath_buf *bf;
1091 
1092 	bf = list_head(list);
1093 	bf->bf_state.bf_type &= ~BUF_AMPDU;
1094 
1095 	/* update starting sequence number for subsequent ADDBA request */
1096 	INCR(tid->seq_start, IEEE80211_SEQ_MAX);
1097 
1098 	bf->bf_nframes = 1;
1099 	bf->bf_lastbf = bf;
1100 	ath_buf_set_rate(sc, bf);
1101 	arn_tx_txqaddbuf(sc, txq, list);
1102 }
1103 
1104 /*
1105  * Insert a chain of ath_buf (descriptors) on a txq and
1106  * assume the descriptors are already chained together by caller.
1107  */
1108 static void
1109 arn_tx_txqaddbuf(struct arn_softc *sc,
1110     struct ath_txq *txq,
1111     list_t *list)
1112 {
1113 	struct ath_buf *bf;
1114 
1115 	/*
1116 	 * Insert the frame on the outbound list and
1117 	 * pass it on to the hardware.
1118 	 */
1119 
1120 	if (list_empty(list))
1121 		return;
1122 
1123 	bf = list_head(list);
1124 
1125 	list_splice_tail_init(list, &txq->axq_q);
1126 
1127 	txq->axq_depth++;
1128 	txq->axq_totalqueued++;
1129 	txq->axq_linkbuf = list_object(list, txq->axq_q.prev);
1130 
1131 	ARN_DBG((ARN_DBG_QUEUE,
1132 	    "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth));
1133 
1134 	if (txq->axq_link == NULL) {
1135 		ath9k_hw_puttxbuf(sc->sc_ah, txq->axq_qnum, bf->bf_daddr);
1136 		ARN_DBG((ARN_DBG_XMIT,
1137 		    "TXDP[%u] = %llx (%p)\n",
1138 		    txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc));
1139 	} else {
1140 		*txq->axq_link = bf->bf_daddr;
1141 		ARN_DBG((ARN_DBG_XMIT, "link[%u] (%p)=%llx (%p)\n",
1142 		    txq->axq_qnum, txq->axq_link,
1143 		    ito64(bf->bf_daddr), bf->bf_desc));
1144 	}
1145 	txq->axq_link = &(bf->bf_lastbf->bf_desc->ds_link);
1146 	ath9k_hw_txstart(sc->sc_ah, txq->axq_qnum);
1147 }
1148 #endif /* ARN_TX_AGGREGATION */
1149 
1150 /*
1151  * ath_pkt_dur - compute packet duration (NB: not NAV)
1152  * rix - rate index
1153  * pktlen - total bytes (delims + data + fcs + pads + pad delims)
1154  * width  - 0 for 20 MHz, 1 for 40 MHz
1155  * half_gi - to use 4us v/s 3.6 us for symbol time
1156  */
1157 
1158 static uint32_t
1159 /* LINTED E_STATIC_UNUSED */
1160 arn_pkt_duration(struct arn_softc *sc, uint8_t rix, struct ath_buf *bf,
1161     int width, int half_gi, boolean_t shortPreamble)
1162 {
1163 	struct ath_rate_table *rate_table = sc->sc_currates;
1164 	uint32_t nbits, nsymbits, duration, nsymbols;
1165 	uint8_t rc;
1166 	int streams, pktlen;
1167 
1168 	pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen;
1169 	rc = rate_table->info[rix].ratecode;
1170 
1171 	/* for legacy rates, use old function to compute packet duration */
1172 	if (!IS_HT_RATE(rc))
1173 		return (ath9k_hw_computetxtime(sc->sc_ah, rate_table, pktlen,
1174 		    rix, shortPreamble));
1175 
1176 	/* find number of symbols: PLCP + data */
1177 	nbits = (pktlen << 3) + OFDM_PLCP_BITS;
1178 	nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width];
1179 	nsymbols = (nbits + nsymbits - 1) / nsymbits;
1180 
1181 	if (!half_gi)
1182 		duration = SYMBOL_TIME(nsymbols);
1183 	else
1184 		duration = SYMBOL_TIME_HALFGI(nsymbols);
1185 
1186 	/* addup duration for legacy/ht training and signal fields */
1187 	streams = HT_RC_2_STREAMS(rc);
1188 	duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
1189 
1190 	return (duration);
1191 }
1192 
1193 static struct ath_buf *
1194 arn_tx_get_buffer(struct arn_softc *sc)
1195 {
1196 	struct ath_buf *bf = NULL;
1197 
1198 	mutex_enter(&sc->sc_txbuflock);
1199 	bf = list_head(&sc->sc_txbuf_list);
1200 	/* Check if a tx buffer is available */
1201 	if (bf != NULL)
1202 		list_remove(&sc->sc_txbuf_list, bf);
1203 	if (list_empty(&sc->sc_txbuf_list)) {
1204 		ARN_DBG((ARN_DBG_XMIT, "arn: arn_tx(): "
1205 		    "stop queue\n"));
1206 		sc->sc_stats.ast_tx_qstop++;
1207 	}
1208 	mutex_exit(&sc->sc_txbuflock);
1209 
1210 	return (bf);
1211 }
1212 
1213 static uint32_t
1214 setup_tx_flags(struct arn_softc *sc,
1215     struct ieee80211_frame *wh,
1216     uint32_t pktlen)
1217 {
1218 	int flags = 0;
1219 	ieee80211com_t *ic = (ieee80211com_t *)sc;
1220 
1221 	flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
1222 	flags |= ATH9K_TXDESC_INTREQ;
1223 
1224 	if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
1225 		flags |= ATH9K_TXDESC_NOACK;	/* no ack on broad/multicast */
1226 		sc->sc_stats.ast_tx_noack++;
1227 	}
1228 	if (pktlen > ic->ic_rtsthreshold) {
1229 		flags |= ATH9K_TXDESC_RTSENA;	/* RTS based on frame length */
1230 		sc->sc_stats.ast_tx_rts++;
1231 	}
1232 
1233 	return (flags);
1234 }
1235 
1236 static void
1237 ath_tx_setup_buffer(struct arn_softc *sc, struct ath_buf *bf,
1238     struct ieee80211_node *in, struct ieee80211_frame *wh,
1239     uint32_t pktlen, uint32_t keytype)
1240 {
1241 	ieee80211com_t *ic = (ieee80211com_t *)sc;
1242 	int i;
1243 
1244 	/* Buf reset */
1245 	/* LINTED E_CONSTANT_CONDITION */
1246 	ATH_TXBUF_RESET(bf);
1247 	for (i = 0; i < 4; i++) {
1248 		bf->rates[i].idx = -1;
1249 		bf->rates[i].flags = 0;
1250 		bf->rates[i].count = 1;
1251 	}
1252 
1253 	bf->bf_in = in;
1254 	/* LINTED E_ASSIGN_NARROW_CONV */
1255 	bf->bf_frmlen = pktlen;
1256 
1257 	/* Frame type */
1258 	IEEE80211_IS_DATA(wh) ?
1259 	    (bf->bf_state.bf_type |= BUF_DATA) :
1260 	    (bf->bf_state.bf_type &= ~BUF_DATA);
1261 	IEEE80211_IS_BACK_REQ(wh) ?
1262 	    (bf->bf_state.bf_type |= BUF_BAR) :
1263 	    (bf->bf_state.bf_type &= ~BUF_BAR);
1264 	IEEE80211_IS_PSPOLL(wh) ?
1265 	    (bf->bf_state.bf_type |= BUF_PSPOLL) :
1266 	    (bf->bf_state.bf_type &= ~BUF_PSPOLL);
1267 	/*
1268 	 * The 802.11 layer marks whether or not we should
1269 	 * use short preamble based on the current mode and
1270 	 * negotiated parameters.
1271 	 */
1272 	((ic->ic_flags & IEEE80211_F_SHPREAMBLE) &&
1273 	    (in->in_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) ?
1274 	    (bf->bf_state.bf_type |= BUF_SHORT_PREAMBLE) :
1275 	    (bf->bf_state.bf_type &= ~BUF_SHORT_PREAMBLE);
1276 
1277 	bf->bf_flags = setup_tx_flags(sc, wh, pktlen);
1278 
1279 	/* Crypto */
1280 	bf->bf_keytype = keytype;
1281 
1282 	/* Assign seqno, tidno for tx aggrefation */
1283 
1284 #ifdef ARN_TX_AGGREGATION
1285 	if (ieee80211_is_data_qos(wh) && (sc->sc_flags & SC_OP_TXAGGR))
1286 		assign_aggr_tid_seqno(sc, bf, wh);
1287 #endif /* ARN_TX_AGGREGATION */
1288 
1289 }
1290 
1291 /*
1292  * ath_pkt_dur - compute packet duration (NB: not NAV)
1293  *
1294  * rix - rate index
1295  * pktlen - total bytes (delims + data + fcs + pads + pad delims)
1296  * width  - 0 for 20 MHz, 1 for 40 MHz
1297  * half_gi - to use 4us v/s 3.6 us for symbol time
1298  */
1299 static uint32_t
1300 ath_pkt_duration(struct arn_softc *sc, uint8_t rix, struct ath_buf *bf,
1301     int width, int half_gi, boolean_t shortPreamble)
1302 {
1303 	struct ath_rate_table *rate_table = sc->sc_currates;
1304 	uint32_t nbits, nsymbits, duration, nsymbols;
1305 	uint8_t rc;
1306 	int streams, pktlen;
1307 
1308 	pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen;
1309 	rc = rate_table->info[rix].ratecode;
1310 
1311 	/* for legacy rates, use old function to compute packet duration */
1312 	if (!IS_HT_RATE(rc))
1313 		return (ath9k_hw_computetxtime(sc->sc_ah, rate_table, pktlen,
1314 		    rix, shortPreamble));
1315 
1316 	/* find number of symbols: PLCP + data */
1317 	nbits = (pktlen << 3) + OFDM_PLCP_BITS;
1318 	nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width];
1319 	nsymbols = (nbits + nsymbits - 1) / nsymbits;
1320 
1321 	if (!half_gi)
1322 		duration = SYMBOL_TIME(nsymbols);
1323 	else
1324 		duration = SYMBOL_TIME_HALFGI(nsymbols);
1325 
1326 	/* addup duration for legacy/ht training and signal fields */
1327 	streams = HT_RC_2_STREAMS(rc);
1328 	duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
1329 
1330 	return (duration);
1331 }
1332 
1333 /* Rate module function to set rate related fields in tx descriptor */
1334 static void
1335 ath_buf_set_rate(struct arn_softc *sc,
1336 struct ath_buf *bf,
1337 struct ieee80211_frame *wh)
1338 {
1339 	struct ath_hal *ah = sc->sc_ah;
1340 	struct ath_rate_table *rt;
1341 	struct ath_desc *ds = bf->bf_desc;
1342 	struct ath_desc *lastds = bf->bf_desc; /* temp workground */
1343 	struct ath9k_11n_rate_series series[4];
1344 	struct ath9k_tx_rate *rates;
1345 	int i, flags, rtsctsena = 0;
1346 	uint32_t ctsduration = 0;
1347 	uint8_t rix = 0, cix, ctsrate = 0;
1348 
1349 	(void) memset(series, 0, sizeof (struct ath9k_11n_rate_series) * 4);
1350 
1351 	rates = bf->rates;
1352 
1353 	if (IEEE80211_HAS_MOREFRAGS(wh) ||
1354 	    wh->i_seq[0] & IEEE80211_SEQ_FRAG_MASK) {
1355 		rates[1].count = rates[2].count = rates[3].count = 0;
1356 		rates[1].idx = rates[2].idx = rates[3].idx = 0;
1357 		rates[0].count = ATH_TXMAXTRY;
1358 	}
1359 
1360 	/* get the cix for the lowest valid rix */
1361 	rt = sc->sc_currates;
1362 	for (i = 3; i >= 0; i--) {
1363 		if (rates[i].count && (rates[i].idx >= 0)) {
1364 			rix = rates[i].idx;
1365 			break;
1366 		}
1367 	}
1368 
1369 	flags = (bf->bf_flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA));
1370 	cix = rt->info[rix].ctrl_rate;
1371 
1372 	/*
1373 	 * If 802.11g protection is enabled, determine whether to use RTS/CTS or
1374 	 * just CTS.  Note that this is only done for OFDM/HT unicast frames.
1375 	 */
1376 	if (sc->sc_protmode != PROT_M_NONE &&
1377 	    !(bf->bf_flags & ATH9K_TXDESC_NOACK) &&
1378 	    (rt->info[rix].phy == WLAN_RC_PHY_OFDM ||
1379 	    WLAN_RC_PHY_HT(rt->info[rix].phy))) {
1380 		if (sc->sc_protmode == PROT_M_RTSCTS)
1381 			flags = ATH9K_TXDESC_RTSENA;
1382 		else if (sc->sc_protmode == PROT_M_CTSONLY)
1383 			flags = ATH9K_TXDESC_CTSENA;
1384 
1385 		cix = rt->info[sc->sc_protrix].ctrl_rate;
1386 		rtsctsena = 1;
1387 	}
1388 
1389 	/*
1390 	 * For 11n, the default behavior is to enable RTS for hw retried frames.
1391 	 * We enable the global flag here and let rate series flags determine
1392 	 * which rates will actually use RTS.
1393 	 */
1394 	if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) && bf_isdata(bf)) {
1395 		/* 802.11g protection not needed, use our default behavior */
1396 		if (!rtsctsena)
1397 			flags = ATH9K_TXDESC_RTSENA;
1398 	}
1399 
1400 	/* Set protection if aggregate protection on */
1401 	if (sc->sc_config.ath_aggr_prot &&
1402 	    (!bf_isaggr(bf) || (bf_isaggr(bf) && bf->bf_al < 8192))) {
1403 		flags = ATH9K_TXDESC_RTSENA;
1404 		cix = rt->info[sc->sc_protrix].ctrl_rate;
1405 		rtsctsena = 1;
1406 	}
1407 
1408 	/* For AR5416 - RTS cannot be followed by a frame larger than 8K */
1409 	if (bf_isaggr(bf) && (bf->bf_al > ah->ah_caps.rts_aggr_limit))
1410 		flags &= ~(ATH9K_TXDESC_RTSENA);
1411 
1412 	/*
1413 	 * CTS transmit rate is derived from the transmit rate by looking in the
1414 	 * h/w rate table.  We must also factor in whether or not a short
1415 	 * preamble is to be used. NB: cix is set above where RTS/CTS is enabled
1416 	 */
1417 	ctsrate = rt->info[cix].ratecode |
1418 	    (bf_isshpreamble(bf) ? rt->info[cix].short_preamble : 0);
1419 
1420 	for (i = 0; i < 4; i++) {
1421 		if (!rates[i].count || (rates[i].idx < 0))
1422 			continue;
1423 
1424 		rix = rates[i].idx;
1425 
1426 		series[i].Rate = rt->info[rix].ratecode |
1427 		    (bf_isshpreamble(bf) ?
1428 		    rt->info[rix].short_preamble : 0);
1429 
1430 		series[i].Tries = rates[i].count;
1431 
1432 		series[i].RateFlags =
1433 		    ((rates[i].flags & ATH9K_TX_RC_USE_RTS_CTS) ?
1434 		    ATH9K_RATESERIES_RTS_CTS : 0) |
1435 		    ((rates[i].flags & ATH9K_TX_RC_40_MHZ_WIDTH) ?
1436 		    ATH9K_RATESERIES_2040 : 0) |
1437 		    ((rates[i].flags & ATH9K_TX_RC_SHORT_GI) ?
1438 		    ATH9K_RATESERIES_HALFGI : 0);
1439 
1440 		series[i].PktDuration = ath_pkt_duration(sc, rix, bf,
1441 		    (rates[i].flags & ATH9K_TX_RC_40_MHZ_WIDTH) != 0,
1442 		    (rates[i].flags & ATH9K_TX_RC_SHORT_GI),
1443 		    bf_isshpreamble(bf));
1444 
1445 		series[i].ChSel = sc->sc_tx_chainmask;
1446 
1447 		if (rtsctsena)
1448 			series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
1449 
1450 		ARN_DBG((ARN_DBG_RATE,
1451 		    "series[%d]--flags & ATH9K_TX_RC_USE_RTS_CTS = %08x"
1452 		    "--flags & ATH9K_TX_RC_40_MHZ_WIDTH = %08x"
1453 		    "--flags & ATH9K_TX_RC_SHORT_GI = %08x\n",
1454 		    rates[i].flags & ATH9K_TX_RC_USE_RTS_CTS,
1455 		    rates[i].flags & ATH9K_TX_RC_40_MHZ_WIDTH,
1456 		    rates[i].flags & ATH9K_TX_RC_SHORT_GI));
1457 
1458 		ARN_DBG((ARN_DBG_RATE,
1459 		    "series[%d]:"
1460 		    "dot11rate:%d"
1461 		    "index:%d"
1462 		    "retry count:%d\n",
1463 		    i,
1464 		    (rt->info[rates[i].idx].ratekbps)/1000,
1465 		    rates[i].idx,
1466 		    rates[i].count));
1467 	}
1468 
1469 	/* set dur_update_en for l-sig computation except for PS-Poll frames */
1470 	ath9k_hw_set11n_ratescenario(ah, ds, lastds, !bf_ispspoll(bf),
1471 	    ctsrate, ctsduration,
1472 	    series, 4, flags);
1473 
1474 	if (sc->sc_config.ath_aggr_prot && flags)
1475 		ath9k_hw_set11n_burstduration(ah, ds, 8192);
1476 }
1477 
1478 static void
1479 ath_tx_complete(struct arn_softc *sc, struct ath_buf *bf,
1480     struct ath_xmit_status *tx_status)
1481 {
1482 	boolean_t is_data = bf_isdata(bf);
1483 
1484 	ARN_DBG((ARN_DBG_XMIT, "TX complete\n"));
1485 
1486 	if (tx_status->flags & ATH_TX_BAR)
1487 		tx_status->flags &= ~ATH_TX_BAR;
1488 
1489 	bf->rates[0].count = tx_status->retries + 1;
1490 
1491 	arn_tx_status(sc, bf, is_data);
1492 }
1493 
1494 /* To complete a chain of buffers associated a frame */
1495 static void
1496 ath_tx_complete_buf(struct arn_softc *sc, struct ath_buf *bf,
1497     int txok, int sendbar)
1498 {
1499 	struct ath_xmit_status tx_status;
1500 
1501 	/*
1502 	 * Set retry information.
1503 	 * NB: Don't use the information in the descriptor, because the frame
1504 	 * could be software retried.
1505 	 */
1506 	tx_status.retries = bf->bf_retries;
1507 	tx_status.flags = 0;
1508 
1509 	if (sendbar)
1510 		tx_status.flags = ATH_TX_BAR;
1511 
1512 	if (!txok) {
1513 		tx_status.flags |= ATH_TX_ERROR;
1514 
1515 		if (bf_isxretried(bf))
1516 			tx_status.flags |= ATH_TX_XRETRY;
1517 	}
1518 
1519 	/* complete this frame */
1520 	ath_tx_complete(sc, bf, &tx_status);
1521 
1522 	/*
1523 	 * Return the list of ath_buf of this mpdu to free queue
1524 	 */
1525 }
1526 
1527 static void
1528 arn_tx_stopdma(struct arn_softc *sc, struct ath_txq *txq)
1529 {
1530 	struct ath_hal *ah = sc->sc_ah;
1531 
1532 	(void) ath9k_hw_stoptxdma(ah, txq->axq_qnum);
1533 
1534 	ARN_DBG((ARN_DBG_XMIT, "arn: arn_drain_txdataq(): "
1535 	    "tx queue [%u] %x, link %p\n",
1536 	    txq->axq_qnum,
1537 	    ath9k_hw_gettxbuf(ah, txq->axq_qnum), txq->axq_link));
1538 
1539 }
1540 
1541 /* Drain only the data queues */
1542 /* ARGSUSED */
1543 static void
1544 arn_drain_txdataq(struct arn_softc *sc, boolean_t retry_tx)
1545 {
1546 	struct ath_hal *ah = sc->sc_ah;
1547 	int i, status, npend = 0;
1548 
1549 	if (!(sc->sc_flags & SC_OP_INVALID)) {
1550 		for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1551 			if (ARN_TXQ_SETUP(sc, i)) {
1552 				arn_tx_stopdma(sc, &sc->sc_txq[i]);
1553 				/*
1554 				 * The TxDMA may not really be stopped.
1555 				 * Double check the hal tx pending count
1556 				 */
1557 				npend += ath9k_hw_numtxpending(ah,
1558 				    sc->sc_txq[i].axq_qnum);
1559 			}
1560 		}
1561 	}
1562 
1563 	if (npend) {
1564 		/* TxDMA not stopped, reset the hal */
1565 		ARN_DBG((ARN_DBG_XMIT, "arn: arn_drain_txdataq(): "
1566 		    "Unable to stop TxDMA. Reset HAL!\n"));
1567 
1568 		if (!ath9k_hw_reset(ah,
1569 		    sc->sc_ah->ah_curchan,
1570 		    sc->tx_chan_width,
1571 		    sc->sc_tx_chainmask, sc->sc_rx_chainmask,
1572 		    sc->sc_ht_extprotspacing, B_TRUE, &status)) {
1573 			ARN_DBG((ARN_DBG_FATAL, "arn: arn_drain_txdataq(): "
1574 			    "unable to reset hardware; hal status %u\n",
1575 			    status));
1576 		}
1577 	}
1578 
1579 	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1580 		if (ARN_TXQ_SETUP(sc, i))
1581 			arn_tx_draintxq(sc, &sc->sc_txq[i]);
1582 	}
1583 }
1584 
1585 /* Setup a h/w transmit queue */
1586 struct ath_txq *
1587 arn_txq_setup(struct arn_softc *sc, int qtype, int subtype)
1588 {
1589 	struct ath_hal *ah = sc->sc_ah;
1590 	struct ath9k_tx_queue_info qi;
1591 	int qnum;
1592 
1593 	(void) memset(&qi, 0, sizeof (qi));
1594 	qi.tqi_subtype = subtype;
1595 	qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
1596 	qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
1597 	qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
1598 	qi.tqi_physCompBuf = 0;
1599 
1600 	/*
1601 	 * Enable interrupts only for EOL and DESC conditions.
1602 	 * We mark tx descriptors to receive a DESC interrupt
1603 	 * when a tx queue gets deep; otherwise waiting for the
1604 	 * EOL to reap descriptors.  Note that this is done to
1605 	 * reduce interrupt load and this only defers reaping
1606 	 * descriptors, never transmitting frames.  Aside from
1607 	 * reducing interrupts this also permits more concurrency.
1608 	 * The only potential downside is if the tx queue backs
1609 	 * up in which case the top half of the kernel may backup
1610 	 * due to a lack of tx descriptors.
1611 	 *
1612 	 * The UAPSD queue is an exception, since we take a desc-
1613 	 * based intr on the EOSP frames.
1614 	 */
1615 	if (qtype == ATH9K_TX_QUEUE_UAPSD)
1616 		qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
1617 	else
1618 		qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
1619 		    TXQ_FLAG_TXDESCINT_ENABLE;
1620 	qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
1621 	if (qnum == -1) {
1622 		/*
1623 		 * NB: don't print a message, this happens
1624 		 * normally on parts with too few tx queues
1625 		 */
1626 		return (NULL);
1627 	}
1628 	if (qnum >= ARRAY_SIZE(sc->sc_txq)) {
1629 		ARN_DBG((ARN_DBG_FATAL, "arn: arn_txq_setup(): "
1630 		    "hal qnum %u out of range, max %u!\n",
1631 		    qnum, (unsigned int)ARRAY_SIZE(sc->sc_txq)));
1632 		(void) ath9k_hw_releasetxqueue(ah, qnum);
1633 		return (NULL);
1634 	}
1635 	if (!ARN_TXQ_SETUP(sc, qnum)) {
1636 		struct ath_txq *txq = &sc->sc_txq[qnum];
1637 
1638 		txq->axq_qnum = qnum;
1639 		txq->axq_intrcnt = 0; /* legacy */
1640 		txq->axq_link = NULL;
1641 
1642 		list_create(&txq->axq_list, sizeof (struct ath_buf),
1643 		    offsetof(struct ath_buf, bf_node));
1644 		list_create(&txq->axq_acq, sizeof (struct ath_buf),
1645 		    offsetof(struct ath_buf, bf_node));
1646 		mutex_init(&txq->axq_lock, NULL, MUTEX_DRIVER, NULL);
1647 
1648 		txq->axq_depth = 0;
1649 		txq->axq_aggr_depth = 0;
1650 		txq->axq_totalqueued = 0;
1651 		txq->axq_linkbuf = NULL;
1652 		sc->sc_txqsetup |= 1<<qnum;
1653 	}
1654 	return (&sc->sc_txq[qnum]);
1655 }
1656 
1657 /* Reclaim resources for a setup queue */
1658 
1659 void
1660 arn_tx_cleanupq(struct arn_softc *sc, struct ath_txq *txq)
1661 {
1662 	(void) ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1663 	sc->sc_txqsetup &= ~(1<<txq->axq_qnum);
1664 }
1665 
1666 /*
1667  * Setup a hardware data transmit queue for the specified
1668  * access control.  The hal may not support all requested
1669  * queues in which case it will return a reference to a
1670  * previously setup queue.  We record the mapping from ac's
1671  * to h/w queues for use by arn_tx_start and also track
1672  * the set of h/w queues being used to optimize work in the
1673  * transmit interrupt handler and related routines.
1674  */
1675 
1676 int
1677 arn_tx_setup(struct arn_softc *sc, int haltype)
1678 {
1679 	struct ath_txq *txq;
1680 
1681 	if (haltype >= ARRAY_SIZE(sc->sc_haltype2q)) {
1682 		ARN_DBG((ARN_DBG_FATAL, "arn: arn_tx_setup(): "
1683 		    "HAL AC %u out of range, max %zu!\n",
1684 		    haltype, ARRAY_SIZE(sc->sc_haltype2q)));
1685 		return (0);
1686 	}
1687 	txq = arn_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype);
1688 	if (txq != NULL) {
1689 		sc->sc_haltype2q[haltype] = txq->axq_qnum;
1690 		return (1);
1691 	} else
1692 		return (0);
1693 }
1694 
1695 void
1696 arn_tx_draintxq(struct arn_softc *sc, struct ath_txq *txq)
1697 {
1698 	struct ath_buf *bf;
1699 
1700 	/*
1701 	 * This assumes output has been stopped.
1702 	 */
1703 	for (;;) {
1704 		mutex_enter(&txq->axq_lock);
1705 		bf = list_head(&txq->axq_list);
1706 		if (bf == NULL) {
1707 			txq->axq_link = NULL;
1708 			mutex_exit(&txq->axq_lock);
1709 			break;
1710 		}
1711 		list_remove(&txq->axq_list, bf);
1712 		mutex_exit(&txq->axq_lock);
1713 		bf->bf_in = NULL;
1714 		mutex_enter(&sc->sc_txbuflock);
1715 		list_insert_tail(&sc->sc_txbuf_list, bf);
1716 		mutex_exit(&sc->sc_txbuflock);
1717 	}
1718 }
1719 
1720 /* Drain the transmit queues and reclaim resources */
1721 
1722 void
1723 arn_draintxq(struct arn_softc *sc, boolean_t retry_tx)
1724 {
1725 	/*
1726 	 * stop beacon queue. The beacon will be freed when
1727 	 * we go to INIT state
1728 	 */
1729 	if (!(sc->sc_flags & SC_OP_INVALID)) {
1730 		(void) ath9k_hw_stoptxdma(sc->sc_ah, sc->sc_beaconq);
1731 		ARN_DBG((ARN_DBG_XMIT, "arn: arn_draintxq(): "
1732 		    "beacon queue %x\n",
1733 		    ath9k_hw_gettxbuf(sc->sc_ah, sc->sc_beaconq)));
1734 	}
1735 
1736 	arn_drain_txdataq(sc, retry_tx);
1737 }
1738 
1739 uint32_t
1740 arn_txq_depth(struct arn_softc *sc, int qnum)
1741 {
1742 	return (sc->sc_txq[qnum].axq_depth);
1743 }
1744 
1745 uint32_t
1746 arn_txq_aggr_depth(struct arn_softc *sc, int qnum)
1747 {
1748 	return (sc->sc_txq[qnum].axq_aggr_depth);
1749 }
1750 
1751 /* Update parameters for a transmit queue */
1752 int
1753 arn_txq_update(struct arn_softc *sc, int qnum,
1754     struct ath9k_tx_queue_info *qinfo)
1755 {
1756 	struct ath_hal *ah = sc->sc_ah;
1757 	int error = 0;
1758 	struct ath9k_tx_queue_info qi;
1759 
1760 	if (qnum == sc->sc_beaconq) {
1761 		/*
1762 		 * XXX: for beacon queue, we just save the parameter.
1763 		 * It will be picked up by arn_beaconq_config() when
1764 		 * it's necessary.
1765 		 */
1766 		sc->sc_beacon_qi = *qinfo;
1767 		return (0);
1768 	}
1769 
1770 	ASSERT(sc->sc_txq[qnum].axq_qnum == qnum);
1771 
1772 	(void) ath9k_hw_get_txq_props(ah, qnum, &qi);
1773 	qi.tqi_aifs = qinfo->tqi_aifs;
1774 	qi.tqi_cwmin = qinfo->tqi_cwmin;
1775 	qi.tqi_cwmax = qinfo->tqi_cwmax;
1776 	qi.tqi_burstTime = qinfo->tqi_burstTime;
1777 	qi.tqi_readyTime = qinfo->tqi_readyTime;
1778 
1779 	if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
1780 		ARN_DBG((ARN_DBG_FATAL,
1781 		    "Unable to update hardware queue %u!\n", qnum));
1782 		error = -EIO;
1783 	} else {
1784 		(void) ath9k_hw_resettxqueue(ah, qnum); /* push to h/w */
1785 	}
1786 
1787 	return (error);
1788 }
1789 
1790 int
1791 ath_cabq_update(struct arn_softc *sc)
1792 {
1793 	struct ath9k_tx_queue_info qi;
1794 	int qnum = sc->sc_cabq->axq_qnum;
1795 	struct ath_beacon_config conf;
1796 
1797 	(void) ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1798 	/*
1799 	 * Ensure the readytime % is within the bounds.
1800 	 */
1801 	if (sc->sc_config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1802 		sc->sc_config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1803 	else if (sc->sc_config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1804 		sc->sc_config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
1805 
1806 	arn_get_beaconconfig(sc, &conf);
1807 	qi.tqi_readyTime =
1808 	    (conf.beacon_interval * sc->sc_config.cabqReadytime) / 100;
1809 	(void) arn_txq_update(sc, qnum, &qi);
1810 
1811 	return (0);
1812 }
1813 
1814 static uint32_t
1815 arn_tx_get_keytype(const struct ieee80211_cipher *cip)
1816 {
1817 	uint32_t index;
1818 	static const uint8_t ciphermap[] = {
1819 	    ATH9K_CIPHER_WEP,		/* IEEE80211_CIPHER_WEP */
1820 	    ATH9K_CIPHER_TKIP,		/* IEEE80211_CIPHER_TKIP */
1821 	    ATH9K_CIPHER_AES_OCB,	/* IEEE80211_CIPHER_AES_OCB */
1822 	    ATH9K_CIPHER_AES_CCM,	/* IEEE80211_CIPHER_AES_CCM */
1823 	    ATH9K_CIPHER_CKIP,		/* IEEE80211_CIPHER_CKIP */
1824 	    ATH9K_CIPHER_CLR,		/* IEEE80211_CIPHER_NONE */
1825 	};
1826 
1827 	ASSERT(cip->ic_cipher < ARRAY_SIZE(ciphermap));
1828 	index = cip->ic_cipher;
1829 
1830 	if (ciphermap[index] == ATH9K_CIPHER_WEP)
1831 		return (ATH9K_KEY_TYPE_WEP);
1832 	else if (ciphermap[index] == ATH9K_CIPHER_TKIP)
1833 		return (ATH9K_KEY_TYPE_TKIP);
1834 	else if (ciphermap[index] == ATH9K_CIPHER_AES_CCM)
1835 		return (ATH9K_KEY_TYPE_AES);
1836 
1837 	return (ATH9K_KEY_TYPE_CLEAR);
1838 
1839 }
1840 
1841 /* Display buffer */
1842 void
1843 arn_dump_line(unsigned char *p, uint32_t len, boolean_t isaddress,
1844     uint32_t group)
1845 {
1846 	char *pnumeric = "0123456789ABCDEF";
1847 	char hex[((2 + 1) * 16) + 1];
1848 	char *phex = hex;
1849 	char ascii[16 + 1];
1850 	char *pascii = ascii;
1851 	uint32_t grouped = 0;
1852 
1853 	if (isaddress) {
1854 		arn_problem("arn: %08x: ", p);
1855 	} else {
1856 		arn_problem("arn: ");
1857 	}
1858 
1859 	while (len) {
1860 		*phex++ = pnumeric[((uint8_t)*p) / 16];
1861 		*phex++ = pnumeric[((uint8_t)*p) % 16];
1862 		if (++grouped >= group) {
1863 			*phex++ = ' ';
1864 			grouped = 0;
1865 		}
1866 
1867 		*pascii++ = (*p >= 32 && *p < 128) ? *p : '.';
1868 
1869 		++p;
1870 		--len;
1871 	}
1872 
1873 	*phex = '\0';
1874 	*pascii = '\0';
1875 
1876 	arn_problem("%-*s|%-*s|\n", (2 * 16) +
1877 	    (16 / group), hex, 16, ascii);
1878 }
1879 
1880 void
1881 arn_dump_pkg(unsigned char *p, uint32_t len, boolean_t isaddress,
1882     uint32_t group)
1883 {
1884 	uint32_t perline;
1885 	while (len) {
1886 		perline = (len < 16) ? len : 16;
1887 		arn_dump_line(p, perline, isaddress, group);
1888 		len -= perline;
1889 		p += perline;
1890 	}
1891 }
1892 
1893 /*
1894  * The input parameter mp has following assumption:
1895  * For data packets, GLDv3 mac_wifi plugin allocates and fills the
1896  * ieee80211 header. For management packets, net80211 allocates and
1897  * fills the ieee80211 header. In both cases, enough spaces in the
1898  * header are left for encryption option.
1899  */
1900 static int32_t
1901 arn_tx_start(struct arn_softc *sc, struct ieee80211_node *in,
1902     struct ath_buf *bf, mblk_t *mp)
1903 {
1904 	ieee80211com_t *ic = (ieee80211com_t *)sc;
1905 	struct ieee80211_frame *wh = (struct ieee80211_frame *)mp->b_rptr;
1906 	struct ath_hal *ah = sc->sc_ah;
1907 	struct ath_node *an;
1908 	struct ath_desc *ds;
1909 	struct ath_txq *txq;
1910 	struct ath_rate_table *rt;
1911 	enum ath9k_pkt_type atype;
1912 	boolean_t shortPreamble, is_padding = B_FALSE;
1913 	uint32_t subtype, keytype = ATH9K_KEY_TYPE_CLEAR;
1914 	int32_t keyix, iswep, hdrlen, pktlen, mblen, mbslen;
1915 	caddr_t dest;
1916 
1917 	/*
1918 	 * CRC are added by H/W, not encaped by driver,
1919 	 * but we must count it in pkt length.
1920 	 */
1921 	pktlen = IEEE80211_CRC_LEN;
1922 	iswep = wh->i_fc[1] & IEEE80211_FC1_WEP;
1923 	keyix = ATH9K_TXKEYIX_INVALID;
1924 	hdrlen = ieee80211_hdrspace(ic, mp->b_rptr);
1925 	if (hdrlen == 28)
1926 		is_padding = B_TRUE;
1927 
1928 	if (iswep != 0) {
1929 		const struct ieee80211_cipher *cip;
1930 		struct ieee80211_key *k;
1931 
1932 		/*
1933 		 * Construct the 802.11 header+trailer for an encrypted
1934 		 * frame. The only reason this can fail is because of an
1935 		 * unknown or unsupported cipher/key type.
1936 		 */
1937 		k = ieee80211_crypto_encap(ic, mp);
1938 		if (k == NULL) {
1939 			ARN_DBG((ARN_DBG_XMIT, "arn: arn_tx_start "
1940 			    "crypto_encap failed\n"));
1941 			/*
1942 			 * This can happen when the key is yanked after the
1943 			 * frame was queued.  Just discard the frame; the
1944 			 * 802.11 layer counts failures and provides
1945 			 * debugging/diagnostics.
1946 			 */
1947 			return (EIO);
1948 		}
1949 		cip = k->wk_cipher;
1950 
1951 		keytype = arn_tx_get_keytype(cip);
1952 
1953 		/*
1954 		 * Adjust the packet + header lengths for the crypto
1955 		 * additions and calculate the h/w key index.  When
1956 		 * a s/w mic is done the frame will have had any mic
1957 		 * added to it prior to entry so m0->m_pkthdr.len above will
1958 		 * account for it. Otherwise we need to add it to the
1959 		 * packet length.
1960 		 */
1961 		hdrlen += cip->ic_header;
1962 		pktlen += cip->ic_trailer;
1963 		if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0)
1964 			pktlen += cip->ic_miclen;
1965 
1966 		keyix = k->wk_keyix;
1967 
1968 		/* packet header may have moved, reset our local pointer */
1969 		wh = (struct ieee80211_frame *)mp->b_rptr;
1970 	}
1971 
1972 	dest = bf->bf_dma.mem_va;
1973 	for (; mp != NULL; mp = mp->b_cont) {
1974 		mblen = MBLKL(mp);
1975 		bcopy(mp->b_rptr, dest, mblen);
1976 		dest += mblen;
1977 	}
1978 	mbslen = (uintptr_t)dest - (uintptr_t)bf->bf_dma.mem_va;
1979 	pktlen += mbslen;
1980 	if (is_padding && (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
1981 	    IEEE80211_FC0_TYPE_DATA)
1982 		pktlen -= 2; /* real pkg len */
1983 
1984 	/* buf setup */
1985 	ath_tx_setup_buffer(sc, bf, in, wh, pktlen, keytype);
1986 
1987 	/* setup descriptors */
1988 	ds = bf->bf_desc;
1989 	rt = sc->sc_currates;
1990 	ASSERT(rt != NULL);
1991 
1992 	arn_get_rate(sc, bf, wh);
1993 	an = (struct ath_node *)(in);
1994 
1995 	/*
1996 	 * Calculate Atheros packet type from IEEE80211 packet header
1997 	 * and setup for rate calculations.
1998 	 */
1999 	switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
2000 	case IEEE80211_FC0_TYPE_MGT:
2001 		subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
2002 		if (subtype == IEEE80211_FC0_SUBTYPE_BEACON)
2003 			atype = ATH9K_PKT_TYPE_BEACON;
2004 		else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
2005 			atype = ATH9K_PKT_TYPE_PROBE_RESP;
2006 		else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM)
2007 			atype = ATH9K_PKT_TYPE_ATIM;
2008 		else
2009 			atype = ATH9K_PKT_TYPE_NORMAL;
2010 
2011 		/* force all ctl frames to highest queue */
2012 		txq = &sc->sc_txq[arn_get_hal_qnum(WME_AC_VO, sc)];
2013 		break;
2014 	case IEEE80211_FC0_TYPE_CTL:
2015 		atype = ATH9K_PKT_TYPE_PSPOLL;
2016 		subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
2017 
2018 		/* force all ctl frames to highest queue */
2019 		txq = &sc->sc_txq[arn_get_hal_qnum(WME_AC_VO, sc)];
2020 		break;
2021 	case IEEE80211_FC0_TYPE_DATA:
2022 		// arn_dump_pkg((unsigned char *)bf->bf_dma.mem_va,
2023 		//    pktlen, 1, 1);
2024 		atype = ATH9K_PKT_TYPE_NORMAL;
2025 
2026 		/* Always use background queue */
2027 		txq = &sc->sc_txq[arn_get_hal_qnum(WME_AC_BE, sc)];
2028 		break;
2029 	default:
2030 		/* Unknown 802.11 frame */
2031 		sc->sc_stats.ast_tx_invalid++;
2032 		return (1);
2033 	}
2034 
2035 	/* setup descriptor */
2036 	ds->ds_link = 0;
2037 	ds->ds_data = bf->bf_dma.cookie.dmac_address;
2038 
2039 	/*
2040 	 * Formulate first tx descriptor with tx controls.
2041 	 */
2042 	ath9k_hw_set11n_txdesc(ah, ds,
2043 	    (pktlen), /* packet length */
2044 	    atype, /* Atheros packet type */
2045 	    MAX_RATE_POWER /* MAX_RATE_POWER */,
2046 	    keyix /* ATH9K_TXKEYIX_INVALID */,
2047 	    keytype /* ATH9K_KEY_TYPE_CLEAR */,
2048 	    bf->bf_flags /* flags */);
2049 
2050 	/* LINTED E_BAD_PTR_CAST_ALIGN */
2051 	ARN_DBG((ARN_DBG_XMIT, "arn: arn_tx_start(): to %s totlen=%d "
2052 	    "an->an_tx_rate1sp=%d tx_rate2sp=%d tx_rate3sp=%d "
2053 	    "qnum=%d sht=%d dur = %d\n",
2054 	    ieee80211_macaddr_sprintf(wh->i_addr1), mbslen, an->an_tx_rate1sp,
2055 	    an->an_tx_rate2sp, an->an_tx_rate3sp,
2056 	    txq->axq_qnum, shortPreamble, *(uint16_t *)wh->i_dur));
2057 
2058 	(void) ath9k_hw_filltxdesc(ah, ds,
2059 	    mbslen,		/* segment length */
2060 	    B_TRUE,		/* first segment */
2061 	    B_TRUE,		/* last segment */
2062 	    ds);		/* first descriptor */
2063 
2064 	/* set rate related fields in tx descriptor */
2065 	ath_buf_set_rate(sc, bf, wh);
2066 
2067 	ARN_DMA_SYNC(bf->bf_dma, DDI_DMA_SYNC_FORDEV);
2068 
2069 	mutex_enter(&txq->axq_lock);
2070 	list_insert_tail(&txq->axq_list, bf);
2071 	if (txq->axq_link == NULL) {
2072 		(void) ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
2073 	} else {
2074 		*txq->axq_link = bf->bf_daddr;
2075 	}
2076 	txq->axq_link = &ds->ds_link;
2077 	mutex_exit(&txq->axq_lock);
2078 
2079 	// arn_dump_pkg((unsigned char *)bf->bf_dma.mem_va, pktlen, 1, 1);
2080 
2081 	(void) ath9k_hw_txstart(ah, txq->axq_qnum);
2082 
2083 	ic->ic_stats.is_tx_frags++;
2084 	ic->ic_stats.is_tx_bytes += pktlen;
2085 
2086 	return (0);
2087 }
2088 
2089 /*
2090  * Transmit a management frame.
2091  * Note that management frames come directly from the 802.11 layer
2092  * and do not honor the send queue flow control.
2093  */
2094 /* Upon failure caller should free mp */
2095 int
2096 arn_tx(ieee80211com_t *ic, mblk_t *mp, uint8_t type)
2097 {
2098 	struct arn_softc *sc = (struct arn_softc *)ic;
2099 	struct ath_hal *ah = sc->sc_ah;
2100 	struct ieee80211_node *in = NULL;
2101 	struct ath_buf *bf = NULL;
2102 	struct ieee80211_frame *wh;
2103 	int error = 0;
2104 
2105 	ASSERT(mp->b_next == NULL);
2106 	/* should check later */
2107 	if (sc->sc_flags & SC_OP_INVALID) {
2108 		if ((type & IEEE80211_FC0_TYPE_MASK) !=
2109 		    IEEE80211_FC0_TYPE_DATA) {
2110 			freemsg(mp);
2111 		}
2112 		return (ENXIO);
2113 	}
2114 
2115 	/* Grab a TX buffer */
2116 	bf = arn_tx_get_buffer(sc);
2117 	if (bf == NULL) {
2118 		ARN_DBG((ARN_DBG_XMIT, "arn: arn_tx(): discard, "
2119 		    "no xmit buf\n"));
2120 		ic->ic_stats.is_tx_nobuf++;
2121 		if ((type & IEEE80211_FC0_TYPE_MASK) ==
2122 		    IEEE80211_FC0_TYPE_DATA) {
2123 			sc->sc_stats.ast_tx_nobuf++;
2124 			mutex_enter(&sc->sc_resched_lock);
2125 			sc->sc_resched_needed = B_TRUE;
2126 			mutex_exit(&sc->sc_resched_lock);
2127 		} else {
2128 			sc->sc_stats.ast_tx_nobufmgt++;
2129 			freemsg(mp);
2130 		}
2131 		return (ENOMEM);
2132 	}
2133 
2134 	wh = (struct ieee80211_frame *)mp->b_rptr;
2135 
2136 	/* Locate node */
2137 	in = ieee80211_find_txnode(ic,  wh->i_addr1);
2138 	if (in == NULL) {
2139 		error = EIO;
2140 		goto bad;
2141 	}
2142 
2143 	in->in_inact = 0;
2144 	switch (type & IEEE80211_FC0_TYPE_MASK) {
2145 	case IEEE80211_FC0_TYPE_DATA:
2146 		(void) ieee80211_encap(ic, mp, in);
2147 		break;
2148 	default:
2149 		if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
2150 		    IEEE80211_FC0_SUBTYPE_PROBE_RESP) {
2151 			/* fill time stamp */
2152 			uint64_t tsf;
2153 			uint32_t *tstamp;
2154 
2155 			tsf = ath9k_hw_gettsf64(ah);
2156 			/* adjust 100us delay to xmit */
2157 			tsf += 100;
2158 			/* LINTED E_BAD_PTR_CAST_ALIGN */
2159 			tstamp = (uint32_t *)&wh[1];
2160 			tstamp[0] = LE_32(tsf & 0xffffffff);
2161 			tstamp[1] = LE_32(tsf >> 32);
2162 		}
2163 		sc->sc_stats.ast_tx_mgmt++;
2164 		break;
2165 	}
2166 
2167 	error = arn_tx_start(sc, in, bf, mp);
2168 
2169 	if (error != 0) {
2170 bad:
2171 		ic->ic_stats.is_tx_failed++;
2172 		if (bf != NULL) {
2173 			mutex_enter(&sc->sc_txbuflock);
2174 			list_insert_tail(&sc->sc_txbuf_list, bf);
2175 			mutex_exit(&sc->sc_txbuflock);
2176 		}
2177 	}
2178 	if (in != NULL)
2179 		ieee80211_free_node(in);
2180 	if ((type & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_DATA ||
2181 	    error == 0) {
2182 		freemsg(mp);
2183 	}
2184 
2185 	return (error);
2186 }
2187 
2188 static void
2189 arn_printtxbuf(struct ath_buf *bf, int done)
2190 {
2191 	struct ath_desc *ds = bf->bf_desc;
2192 	const struct ath_tx_status *ts = &ds->ds_txstat;
2193 
2194 	ARN_DBG((ARN_DBG_XMIT, "arn: T(%p %p) %08x %08x %08x %08x %08x"
2195 	    " %08x %08x %08x %c\n",
2196 	    ds, bf->bf_daddr,
2197 	    ds->ds_link, ds->ds_data,
2198 	    ds->ds_ctl0, ds->ds_ctl1,
2199 	    ds->ds_hw[0], ds->ds_hw[1], ds->ds_hw[2], ds->ds_hw[3],
2200 	    !done ? ' ' : (ts->ts_status == 0) ? '*' : '!'));
2201 }
2202 
2203 /* ARGSUSED */
2204 static void
2205 ath_tx_rc_status(struct ath_buf *bf,
2206     struct ath_desc *ds,
2207     int nbad,
2208     int txok,
2209     boolean_t update_rc)
2210 {
2211 	struct ath_tx_info_priv *tx_info_priv =
2212 	    (struct ath_tx_info_priv *)&bf->tx_info_priv;
2213 
2214 	tx_info_priv->update_rc = B_FALSE;
2215 
2216 	if ((ds->ds_txstat.ts_status & ATH9K_TXERR_FILT) == 0 &&
2217 	    (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) {
2218 		if (bf_isdata(bf)) {
2219 			(void) memcpy(&tx_info_priv->tx, &ds->ds_txstat,
2220 			    sizeof (tx_info_priv->tx));
2221 			tx_info_priv->n_frames = bf->bf_nframes;
2222 			tx_info_priv->n_bad_frames = nbad;
2223 			tx_info_priv->update_rc = B_TRUE;
2224 		}
2225 	}
2226 }
2227 
2228 /* Process completed xmit descriptors from the specified queue */
2229 static int
2230 arn_tx_processq(struct arn_softc *sc, struct ath_txq *txq)
2231 {
2232 	ieee80211com_t *ic = (ieee80211com_t *)sc;
2233 	struct ath_hal *ah = sc->sc_ah;
2234 	struct ath_buf *bf;
2235 	struct ath_desc *ds;
2236 	struct ieee80211_node *in;
2237 	struct ath_tx_status *ts;
2238 	struct ath_node *an;
2239 	int32_t sr, lr, nacked = 0;
2240 	int txok, nbad = 0;
2241 	int status;
2242 
2243 	for (;;) {
2244 		mutex_enter(&txq->axq_lock);
2245 		bf = list_head(&txq->axq_list);
2246 		if (bf == NULL) {
2247 			txq->axq_link = NULL;
2248 			/* txq->axq_linkbuf = NULL; */
2249 			mutex_exit(&txq->axq_lock);
2250 			break;
2251 		}
2252 		ds = bf->bf_desc;	/* last decriptor */
2253 		ts = &ds->ds_txstat;
2254 		status = ath9k_hw_txprocdesc(ah, ds);
2255 
2256 #ifdef DEBUG
2257 		arn_printtxbuf(bf, status == 0);
2258 #endif
2259 
2260 		if (status == EINPROGRESS) {
2261 			mutex_exit(&txq->axq_lock);
2262 			break;
2263 		}
2264 		list_remove(&txq->axq_list, bf);
2265 		mutex_exit(&txq->axq_lock);
2266 		in = bf->bf_in;
2267 		if (in != NULL) {
2268 			an = ATH_NODE(in);
2269 			/* Successful transmition */
2270 			if (ts->ts_status == 0) {
2271 				an->an_tx_ok++;
2272 				an->an_tx_antenna = ts->ts_antenna;
2273 				sc->sc_stats.ast_tx_rssidelta =
2274 				    ts->ts_rssi - sc->sc_stats.ast_tx_rssi;
2275 				sc->sc_stats.ast_tx_rssi = ts->ts_rssi;
2276 			} else {
2277 				an->an_tx_err++;
2278 				if (ts->ts_status & ATH9K_TXERR_XRETRY) {
2279 					sc->sc_stats.ast_tx_xretries++;
2280 				}
2281 				if (ts->ts_status & ATH9K_TXERR_FIFO) {
2282 					sc->sc_stats.ast_tx_fifoerr++;
2283 				}
2284 				if (ts->ts_status & ATH9K_TXERR_FILT) {
2285 					sc->sc_stats.ast_tx_filtered++;
2286 				}
2287 				an->an_tx_antenna = 0;	/* invalidate */
2288 			}
2289 			sr = ts->ts_shortretry;
2290 			lr = ts->ts_longretry;
2291 			sc->sc_stats.ast_tx_shortretry += sr;
2292 			sc->sc_stats.ast_tx_longretry += lr;
2293 			/*
2294 			 * Hand the descriptor to the rate control algorithm.
2295 			 */
2296 			if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
2297 			    (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) {
2298 				/*
2299 				 * If frame was ack'd update the last rx time
2300 				 * used to workaround phantom bmiss interrupts.
2301 				 */
2302 				if (ts->ts_status == 0) {
2303 					nacked++;
2304 					an->an_tx_ok++;
2305 				} else {
2306 					an->an_tx_err++;
2307 				}
2308 				an->an_tx_retr += sr + lr;
2309 			}
2310 		}
2311 
2312 		txok = (ds->ds_txstat.ts_status == 0);
2313 		if (!bf_isampdu(bf)) {
2314 			/*
2315 			 * This frame is sent out as a single frame.
2316 			 * Use hardware retry status for this frame.
2317 			 */
2318 			bf->bf_retries = ds->ds_txstat.ts_longretry;
2319 			if (ds->ds_txstat.ts_status & ATH9K_TXERR_XRETRY)
2320 				bf->bf_state.bf_type |= BUF_XRETRY;
2321 			nbad = 0;
2322 		}
2323 		ath_tx_rc_status(bf, ds, nbad, B_TRUE, txok);
2324 
2325 		ath_tx_complete_buf(sc, bf, txok, 0);
2326 
2327 		// arn_dump_pkg((unsigned char *)bf->bf_dma.mem_va,
2328 		//    bf->bf_frmlen, 1, 1);
2329 
2330 		bf->bf_in = NULL;
2331 		mutex_enter(&sc->sc_txbuflock);
2332 		list_insert_tail(&sc->sc_txbuf_list, bf);
2333 		mutex_exit(&sc->sc_txbuflock);
2334 
2335 		/*
2336 		 * Reschedule stalled outbound packets
2337 		 */
2338 		mutex_enter(&sc->sc_resched_lock);
2339 		if (sc->sc_resched_needed) {
2340 			sc->sc_resched_needed = B_FALSE;
2341 			mac_tx_update(ic->ic_mach);
2342 		}
2343 		mutex_exit(&sc->sc_resched_lock);
2344 	}
2345 
2346 	return (nacked);
2347 }
2348 
2349 static void
2350 arn_tx_handler(struct arn_softc *sc)
2351 {
2352 	int i;
2353 	int nacked = 0;
2354 	uint32_t qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
2355 	ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
2356 
2357 	/*
2358 	 * Process each active queue.
2359 	 */
2360 	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2361 		if (ARN_TXQ_SETUP(sc, i) && (qcumask & (1 << i))) {
2362 			nacked += arn_tx_processq(sc, &sc->sc_txq[i]);
2363 		}
2364 	}
2365 
2366 	if (nacked)
2367 		sc->sc_lastrx = ath9k_hw_gettsf64(sc->sc_ah);
2368 }
2369 
2370 /* Deferred processing of transmit interrupt */
2371 
2372 void
2373 arn_tx_int_proc(void *arg)
2374 {
2375 	struct arn_softc *sc = arg;
2376 	arn_tx_handler(sc);
2377 }
2378 
2379 /* Node init & cleanup functions */
2380 
2381 #ifdef ARN_TX_AGGREGATION
2382 void
2383 arn_tx_node_init(struct arn_softc *sc, struct ath_node *an)
2384 {
2385 	struct ath_atx_tid *tid;
2386 	struct ath_atx_ac *ac;
2387 	int tidno, acno;
2388 
2389 	for (tidno = 0, tid = &an->tid[tidno]; tidno < WME_NUM_TID;
2390 	    tidno++, tid++) {
2391 		tid->an = an;
2392 		tid->tidno = tidno;
2393 		tid->seq_start = tid->seq_next = 0;
2394 		tid->baw_size  = WME_MAX_BA;
2395 		tid->baw_head  = tid->baw_tail = 0;
2396 		tid->sched = B_FALSE;
2397 		tid->paused = B_FALSE;
2398 		tid->state &= ~AGGR_CLEANUP;
2399 		list_create(&tid->buf_q, sizeof (struct ath_buf),
2400 		    offsetof(struct ath_buf, bf_node));
2401 		acno = TID_TO_WME_AC(tidno);
2402 		tid->ac = &an->ac[acno];
2403 		tid->state &= ~AGGR_ADDBA_COMPLETE;
2404 		tid->state &= ~AGGR_ADDBA_PROGRESS;
2405 		tid->addba_exchangeattempts = 0;
2406 	}
2407 
2408 	for (acno = 0, ac = &an->ac[acno]; acno < WME_NUM_AC; acno++, ac++) {
2409 		ac->sched = B_FALSE;
2410 		list_create(&ac->tid_q, sizeof (struct ath_atx_tid),
2411 		    offsetof(struct ath_atx_tid, list));
2412 
2413 		switch (acno) {
2414 		case WME_AC_BE:
2415 			ac->qnum = arn_tx_get_qnum(sc,
2416 			    ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
2417 			break;
2418 		case WME_AC_BK:
2419 			ac->qnum = arn_tx_get_qnum(sc,
2420 			    ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BK);
2421 			break;
2422 		case WME_AC_VI:
2423 			ac->qnum = arn_tx_get_qnum(sc,
2424 			    ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VI);
2425 			break;
2426 		case WME_AC_VO:
2427 			ac->qnum = arn_tx_get_qnum(sc,
2428 			    ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VO);
2429 			break;
2430 		}
2431 	}
2432 }
2433 
2434 void
2435 arn_tx_node_cleanup(struct arn_softc *sc, struct ieee80211_node *in)
2436 {
2437 	int i;
2438 	struct ath_atx_ac *ac, *ac_tmp;
2439 	struct ath_atx_tid *tid, *tid_tmp;
2440 	struct ath_txq *txq;
2441 	struct ath_node *an = ATH_NODE(in);
2442 
2443 	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2444 		if (ARN_TXQ_SETUP(sc, i)) {
2445 			txq = &sc->sc_txq[i];
2446 
2447 			mutex_enter(&txq->axq_lock);
2448 
2449 			list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq) {
2450 				tid = list_head(&ac->tid_q);
2451 				if (tid && tid->an != an)
2452 					continue;
2453 				list_remove(&txq->axq_acq, ac);
2454 				ac->sched = B_FALSE;
2455 
2456 				list_for_each_entry_safe(tid, tid_tmp,
2457 				    &ac->tid_q) {
2458 					list_remove(&ac->tid_q, tid);
2459 					bf = list_head(&tid->buf_q);
2460 					while (bf != NULL) {
2461 						if (bf->bf_in == in)
2462 							bf->bf_in = NULL;
2463 					}
2464 					bf = list_next(&txq->axq_list, bf);
2465 					tid->sched = B_FALSE;
2466 					arn_tid_drain(sc, txq, tid);
2467 					tid->state &= ~AGGR_ADDBA_COMPLETE;
2468 					tid->addba_exchangeattempts = 0;
2469 					tid->state &= ~AGGR_CLEANUP;
2470 				}
2471 			}
2472 
2473 			mutex_exit(&txq->axq_lock);
2474 		}
2475 	}
2476 }
2477 #endif /* ARN_TX_AGGREGATION */
2478