xref: /freebsd/sys/dev/ath/ath_hal/ar5416/ar5416_xmit.c (revision b1d046441de9053152c7cf03d6b60d9882687e1b)
1 /*
2  * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
3  * Copyright (c) 2002-2008 Atheros Communications, Inc.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  *
17  * $FreeBSD$
18  */
19 #include "opt_ah.h"
20 
21 #include "ah.h"
22 #include "ah_desc.h"
23 #include "ah_internal.h"
24 
25 #include "ar5416/ar5416.h"
26 #include "ar5416/ar5416reg.h"
27 #include "ar5416/ar5416phy.h"
28 #include "ar5416/ar5416desc.h"
29 
30 /*
31  * Stop transmit on the specified queue
32  */
33 HAL_BOOL
34 ar5416StopTxDma(struct ath_hal *ah, u_int q)
35 {
36 #define	STOP_DMA_TIMEOUT	4000	/* us */
37 #define	STOP_DMA_ITER		100	/* us */
38 	u_int i;
39 
40 	HALASSERT(q < AH_PRIVATE(ah)->ah_caps.halTotalQueues);
41 
42 	HALASSERT(AH5212(ah)->ah_txq[q].tqi_type != HAL_TX_QUEUE_INACTIVE);
43 
44 	OS_REG_WRITE(ah, AR_Q_TXD, 1 << q);
45 	for (i = STOP_DMA_TIMEOUT/STOP_DMA_ITER; i != 0; i--) {
46 		if (ar5212NumTxPending(ah, q) == 0)
47 			break;
48 		OS_DELAY(STOP_DMA_ITER);
49 	}
50 #ifdef AH_DEBUG
51 	if (i == 0) {
52 		HALDEBUG(ah, HAL_DEBUG_ANY,
53 		    "%s: queue %u DMA did not stop in 400 msec\n", __func__, q);
54 		HALDEBUG(ah, HAL_DEBUG_ANY,
55 		    "%s: QSTS 0x%x Q_TXE 0x%x Q_TXD 0x%x Q_CBR 0x%x\n", __func__,
56 		    OS_REG_READ(ah, AR_QSTS(q)), OS_REG_READ(ah, AR_Q_TXE),
57 		    OS_REG_READ(ah, AR_Q_TXD), OS_REG_READ(ah, AR_QCBRCFG(q)));
58 		HALDEBUG(ah, HAL_DEBUG_ANY,
59 		    "%s: Q_MISC 0x%x Q_RDYTIMECFG 0x%x Q_RDYTIMESHDN 0x%x\n",
60 		    __func__, OS_REG_READ(ah, AR_QMISC(q)),
61 		    OS_REG_READ(ah, AR_QRDYTIMECFG(q)),
62 		    OS_REG_READ(ah, AR_Q_RDYTIMESHDN));
63 	}
64 #endif /* AH_DEBUG */
65 
66 	/* ar5416 and up can kill packets at the PCU level */
67 	if (ar5212NumTxPending(ah, q)) {
68 		uint32_t j;
69 
70 		HALDEBUG(ah, HAL_DEBUG_TXQUEUE,
71 		    "%s: Num of pending TX Frames %d on Q %d\n",
72 		    __func__, ar5212NumTxPending(ah, q), q);
73 
74 		/* Kill last PCU Tx Frame */
75 		/* TODO - save off and restore current values of Q1/Q2? */
76 		for (j = 0; j < 2; j++) {
77 			uint32_t tsfLow = OS_REG_READ(ah, AR_TSF_L32);
78 			OS_REG_WRITE(ah, AR_QUIET2,
79 			    SM(10, AR_QUIET2_QUIET_DUR));
80 			OS_REG_WRITE(ah, AR_QUIET_PERIOD, 100);
81 			OS_REG_WRITE(ah, AR_NEXT_QUIET, tsfLow >> 10);
82 			OS_REG_SET_BIT(ah, AR_TIMER_MODE, AR_TIMER_MODE_QUIET);
83 
84 			if ((OS_REG_READ(ah, AR_TSF_L32)>>10) == (tsfLow>>10))
85 				break;
86 
87 			HALDEBUG(ah, HAL_DEBUG_ANY,
88 			    "%s: TSF moved while trying to set quiet time "
89 			    "TSF: 0x%08x\n", __func__, tsfLow);
90 			HALASSERT(j < 1); /* TSF shouldn't count twice or reg access is taking forever */
91 		}
92 
93 		OS_REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_CHAN_IDLE);
94 
95 		/* Allow the quiet mechanism to do its work */
96 		OS_DELAY(200);
97 		OS_REG_CLR_BIT(ah, AR_TIMER_MODE, AR_TIMER_MODE_QUIET);
98 
99 		/* Verify the transmit q is empty */
100 		for (i = STOP_DMA_TIMEOUT/STOP_DMA_ITER; i != 0; i--) {
101 			if (ar5212NumTxPending(ah, q) == 0)
102 				break;
103 			OS_DELAY(STOP_DMA_ITER);
104 		}
105 		if (i == 0) {
106 			HALDEBUG(ah, HAL_DEBUG_ANY,
107 			    "%s: Failed to stop Tx DMA in %d msec after killing"
108 			    " last frame\n", __func__, STOP_DMA_TIMEOUT / 1000);
109 		}
110 		OS_REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_CHAN_IDLE);
111 	}
112 
113 	OS_REG_WRITE(ah, AR_Q_TXD, 0);
114 	return (i != 0);
115 #undef STOP_DMA_ITER
116 #undef STOP_DMA_TIMEOUT
117 }
118 
119 #define VALID_KEY_TYPES \
120         ((1 << HAL_KEY_TYPE_CLEAR) | (1 << HAL_KEY_TYPE_WEP)|\
121          (1 << HAL_KEY_TYPE_AES)   | (1 << HAL_KEY_TYPE_TKIP))
122 #define isValidKeyType(_t)      ((1 << (_t)) & VALID_KEY_TYPES)
123 
124 #define set11nTries(_series, _index) \
125         (SM((_series)[_index].Tries, AR_XmitDataTries##_index))
126 
127 #define set11nRate(_series, _index) \
128         (SM((_series)[_index].Rate, AR_XmitRate##_index))
129 
130 #define set11nPktDurRTSCTS(_series, _index) \
131         (SM((_series)[_index].PktDuration, AR_PacketDur##_index) |\
132          ((_series)[_index].RateFlags & HAL_RATESERIES_RTS_CTS   ?\
133          AR_RTSCTSQual##_index : 0))
134 
135 #define set11nRateFlags(_series, _index) \
136         ((_series)[_index].RateFlags & HAL_RATESERIES_2040 ? AR_2040_##_index : 0) \
137         |((_series)[_index].RateFlags & HAL_RATESERIES_HALFGI ? AR_GI##_index : 0) \
138         |SM((_series)[_index].ChSel, AR_ChainSel##_index)
139 
140 /*
141  * Descriptor Access Functions
142  */
143 
144 #define VALID_PKT_TYPES \
145         ((1<<HAL_PKT_TYPE_NORMAL)|(1<<HAL_PKT_TYPE_ATIM)|\
146          (1<<HAL_PKT_TYPE_PSPOLL)|(1<<HAL_PKT_TYPE_PROBE_RESP)|\
147          (1<<HAL_PKT_TYPE_BEACON)|(1<<HAL_PKT_TYPE_AMPDU))
148 #define isValidPktType(_t)      ((1<<(_t)) & VALID_PKT_TYPES)
149 #define VALID_TX_RATES \
150         ((1<<0x0b)|(1<<0x0f)|(1<<0x0a)|(1<<0x0e)|(1<<0x09)|(1<<0x0d)|\
151          (1<<0x08)|(1<<0x0c)|(1<<0x1b)|(1<<0x1a)|(1<<0x1e)|(1<<0x19)|\
152 	 (1<<0x1d)|(1<<0x18)|(1<<0x1c)|(1<<0x01)|(1<<0x02)|(1<<0x03)|\
153 	 (1<<0x04)|(1<<0x05)|(1<<0x06)|(1<<0x07)|(1<<0x00))
154 /* NB: accept HT rates */
155 #define	isValidTxRate(_r)	((1<<((_r) & 0x7f)) & VALID_TX_RATES)
156 
157 HAL_BOOL
158 ar5416SetupTxDesc(struct ath_hal *ah, struct ath_desc *ds,
159 	u_int pktLen,
160 	u_int hdrLen,
161 	HAL_PKT_TYPE type,
162 	u_int txPower,
163 	u_int txRate0, u_int txTries0,
164 	u_int keyIx,
165 	u_int antMode,
166 	u_int flags,
167 	u_int rtsctsRate,
168 	u_int rtsctsDuration,
169 	u_int compicvLen,
170 	u_int compivLen,
171 	u_int comp)
172 {
173 #define	RTSCTS	(HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)
174 	struct ar5416_desc *ads = AR5416DESC(ds);
175 	struct ath_hal_5416 *ahp = AH5416(ah);
176 
177 	(void) hdrLen;
178 
179 	HALASSERT(txTries0 != 0);
180 	HALASSERT(isValidPktType(type));
181 	HALASSERT(isValidTxRate(txRate0));
182 	HALASSERT((flags & RTSCTS) != RTSCTS);
183 	/* XXX validate antMode */
184 
185         txPower = (txPower + AH5212(ah)->ah_txPowerIndexOffset);
186         if (txPower > 63)
187 		txPower = 63;
188 
189 	ads->ds_ctl0 = (pktLen & AR_FrameLen)
190 		     | (txPower << AR_XmitPower_S)
191 		     | (flags & HAL_TXDESC_VEOL ? AR_VEOL : 0)
192 		     | (flags & HAL_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
193 		     | (flags & HAL_TXDESC_INTREQ ? AR_TxIntrReq : 0)
194 		     ;
195 	ads->ds_ctl1 = (type << AR_FrameType_S)
196 		     | (flags & HAL_TXDESC_NOACK ? AR_NoAck : 0)
197                      ;
198 	ads->ds_ctl2 = SM(txTries0, AR_XmitDataTries0)
199 		     | (flags & HAL_TXDESC_DURENA ? AR_DurUpdateEn : 0)
200 		     ;
201 	ads->ds_ctl3 = (txRate0 << AR_XmitRate0_S)
202 		     ;
203 	ads->ds_ctl4 = 0;
204 	ads->ds_ctl5 = 0;
205 	ads->ds_ctl6 = 0;
206 	ads->ds_ctl7 = SM(ahp->ah_tx_chainmask, AR_ChainSel0)
207 		     | SM(ahp->ah_tx_chainmask, AR_ChainSel1)
208 		     | SM(ahp->ah_tx_chainmask, AR_ChainSel2)
209 		     | SM(ahp->ah_tx_chainmask, AR_ChainSel3)
210 		     ;
211 	ads->ds_ctl8 = SM(0, AR_AntCtl0);
212 	ads->ds_ctl9 = SM(0, AR_AntCtl1) | SM(txPower, AR_XmitPower1);
213 	ads->ds_ctl10 = SM(0, AR_AntCtl2) | SM(txPower, AR_XmitPower2);
214 	ads->ds_ctl11 = SM(0, AR_AntCtl3) | SM(txPower, AR_XmitPower3);
215 
216 	if (keyIx != HAL_TXKEYIX_INVALID) {
217 		/* XXX validate key index */
218 		ads->ds_ctl1 |= SM(keyIx, AR_DestIdx);
219 		ads->ds_ctl0 |= AR_DestIdxValid;
220 		ads->ds_ctl6 |= SM(ahp->ah_keytype[keyIx], AR_EncrType);
221 	}
222 	if (flags & RTSCTS) {
223 		if (!isValidTxRate(rtsctsRate)) {
224 			HALDEBUG(ah, HAL_DEBUG_ANY,
225 			    "%s: invalid rts/cts rate 0x%x\n",
226 			    __func__, rtsctsRate);
227 			return AH_FALSE;
228 		}
229 		/* XXX validate rtsctsDuration */
230 		ads->ds_ctl0 |= (flags & HAL_TXDESC_CTSENA ? AR_CTSEnable : 0)
231 			     | (flags & HAL_TXDESC_RTSENA ? AR_RTSEnable : 0)
232 			     ;
233 		ads->ds_ctl7 |= (rtsctsRate << AR_RTSCTSRate_S);
234 	}
235 
236 	/*
237 	 * Set the TX antenna to 0 for Kite
238 	 * To preserve existing behaviour, also set the TPC bits to 0;
239 	 * when TPC is enabled these should be filled in appropriately.
240 	 */
241 	if (AR_SREV_KITE(ah)) {
242 		ads->ds_ctl8 = SM(0, AR_AntCtl0);
243 		ads->ds_ctl9 = SM(0, AR_AntCtl1) | SM(0, AR_XmitPower1);
244 		ads->ds_ctl10 = SM(0, AR_AntCtl2) | SM(0, AR_XmitPower2);
245 		ads->ds_ctl11 = SM(0, AR_AntCtl3) | SM(0, AR_XmitPower3);
246 	}
247 	return AH_TRUE;
248 #undef RTSCTS
249 }
250 
251 HAL_BOOL
252 ar5416SetupXTxDesc(struct ath_hal *ah, struct ath_desc *ds,
253 	u_int txRate1, u_int txTries1,
254 	u_int txRate2, u_int txTries2,
255 	u_int txRate3, u_int txTries3)
256 {
257 	struct ar5416_desc *ads = AR5416DESC(ds);
258 
259 	if (txTries1) {
260 		HALASSERT(isValidTxRate(txRate1));
261 		ads->ds_ctl2 |= SM(txTries1, AR_XmitDataTries1);
262 		ads->ds_ctl3 |= (txRate1 << AR_XmitRate1_S);
263 	}
264 	if (txTries2) {
265 		HALASSERT(isValidTxRate(txRate2));
266 		ads->ds_ctl2 |= SM(txTries2, AR_XmitDataTries2);
267 		ads->ds_ctl3 |= (txRate2 << AR_XmitRate2_S);
268 	}
269 	if (txTries3) {
270 		HALASSERT(isValidTxRate(txRate3));
271 		ads->ds_ctl2 |= SM(txTries3, AR_XmitDataTries3);
272 		ads->ds_ctl3 |= (txRate3 << AR_XmitRate3_S);
273 	}
274 	return AH_TRUE;
275 }
276 
277 HAL_BOOL
278 ar5416FillTxDesc(struct ath_hal *ah, struct ath_desc *ds,
279 	u_int segLen, HAL_BOOL firstSeg, HAL_BOOL lastSeg,
280 	const struct ath_desc *ds0)
281 {
282 	struct ar5416_desc *ads = AR5416DESC(ds);
283 
284 	HALASSERT((segLen &~ AR_BufLen) == 0);
285 
286 	if (firstSeg) {
287 		/*
288 		 * First descriptor, don't clobber xmit control data
289 		 * setup by ar5212SetupTxDesc.
290 		 */
291 		ads->ds_ctl1 |= segLen | (lastSeg ? 0 : AR_TxMore);
292 	} else if (lastSeg) {		/* !firstSeg && lastSeg */
293 		/*
294 		 * Last descriptor in a multi-descriptor frame,
295 		 * copy the multi-rate transmit parameters from
296 		 * the first frame for processing on completion.
297 		 */
298 		ads->ds_ctl1 = segLen;
299 #ifdef AH_NEED_DESC_SWAP
300 		ads->ds_ctl0 = __bswap32(AR5416DESC_CONST(ds0)->ds_ctl0)
301 		    & AR_TxIntrReq;
302 		ads->ds_ctl2 = __bswap32(AR5416DESC_CONST(ds0)->ds_ctl2);
303 		ads->ds_ctl3 = __bswap32(AR5416DESC_CONST(ds0)->ds_ctl3);
304 #else
305 		ads->ds_ctl0 = AR5416DESC_CONST(ds0)->ds_ctl0 & AR_TxIntrReq;
306 		ads->ds_ctl2 = AR5416DESC_CONST(ds0)->ds_ctl2;
307 		ads->ds_ctl3 = AR5416DESC_CONST(ds0)->ds_ctl3;
308 #endif
309 	} else {			/* !firstSeg && !lastSeg */
310 		/*
311 		 * Intermediate descriptor in a multi-descriptor frame.
312 		 */
313 #ifdef AH_NEED_DESC_SWAP
314 		ads->ds_ctl0 = __bswap32(AR5416DESC_CONST(ds0)->ds_ctl0)
315 		    & AR_TxIntrReq;
316 #else
317 		ads->ds_ctl0 = AR5416DESC_CONST(ds0)->ds_ctl0 & AR_TxIntrReq;
318 #endif
319 		ads->ds_ctl1 = segLen | AR_TxMore;
320 		ads->ds_ctl2 = 0;
321 		ads->ds_ctl3 = 0;
322 	}
323 	/* XXX only on last descriptor? */
324 	OS_MEMZERO(ads->u.tx.status, sizeof(ads->u.tx.status));
325 	return AH_TRUE;
326 }
327 
328 /*
329  * NB: cipher is no longer used, it's calculated.
330  */
331 HAL_BOOL
332 ar5416ChainTxDesc(struct ath_hal *ah, struct ath_desc *ds,
333 	u_int pktLen,
334 	u_int hdrLen,
335 	HAL_PKT_TYPE type,
336 	u_int keyIx,
337 	HAL_CIPHER cipher,
338 	uint8_t delims,
339 	u_int segLen,
340 	HAL_BOOL firstSeg,
341 	HAL_BOOL lastSeg)
342 {
343 	struct ar5416_desc *ads = AR5416DESC(ds);
344 	uint32_t *ds_txstatus = AR5416_DS_TXSTATUS(ah,ads);
345 	struct ath_hal_5416 *ahp = AH5416(ah);
346 
347 	int isaggr = 0;
348 
349 	(void) hdrLen;
350 	(void) ah;
351 
352 	HALASSERT((segLen &~ AR_BufLen) == 0);
353 
354 	HALASSERT(isValidPktType(type));
355 	if (type == HAL_PKT_TYPE_AMPDU) {
356 		type = HAL_PKT_TYPE_NORMAL;
357 		isaggr = 1;
358 	}
359 
360 	/*
361 	 * Since this function is called before any of the other
362 	 * descriptor setup functions (at least in this particular
363 	 * 802.11n aggregation implementation), always bzero() the
364 	 * descriptor. Previously this would be done for all but
365 	 * the first segment.
366 	 * XXX TODO: figure out why; perhaps I'm using this slightly
367 	 * XXX incorrectly.
368 	 */
369 	OS_MEMZERO(ds->ds_hw, AR5416_DESC_TX_CTL_SZ);
370 
371 	/*
372 	 * Note: VEOL should only be for the last descriptor in the chain.
373 	 */
374 	ads->ds_ctl0 = (pktLen & AR_FrameLen);
375 	ads->ds_ctl1 = (type << AR_FrameType_S)
376 			| (isaggr ? (AR_IsAggr | AR_MoreAggr) : 0);
377 	ads->ds_ctl2 = 0;
378 	ads->ds_ctl3 = 0;
379 	if (keyIx != HAL_TXKEYIX_INVALID) {
380 		/* XXX validate key index */
381 		ads->ds_ctl1 |= SM(keyIx, AR_DestIdx);
382 		ads->ds_ctl0 |= AR_DestIdxValid;
383 	}
384 
385 	ads->ds_ctl6 |= SM(ahp->ah_keytype[keyIx], AR_EncrType);
386 	if (isaggr) {
387 		ads->ds_ctl6 |= SM(delims, AR_PadDelim);
388 	}
389 
390 	if (firstSeg) {
391 		ads->ds_ctl1 |= segLen | (lastSeg ? 0 : AR_TxMore);
392 	} else if (lastSeg) {           /* !firstSeg && lastSeg */
393 		ads->ds_ctl0 = 0;
394 		ads->ds_ctl1 |= segLen;
395 	} else {                        /* !firstSeg && !lastSeg */
396 		/*
397 		 * Intermediate descriptor in a multi-descriptor frame.
398 		 */
399 		ads->ds_ctl0 = 0;
400 		ads->ds_ctl1 |= segLen | AR_TxMore;
401 	}
402 	ds_txstatus[0] = ds_txstatus[1] = 0;
403 	ds_txstatus[9] &= ~AR_TxDone;
404 
405 	return AH_TRUE;
406 }
407 
408 HAL_BOOL
409 ar5416SetupFirstTxDesc(struct ath_hal *ah, struct ath_desc *ds,
410 	u_int aggrLen, u_int flags, u_int txPower,
411 	u_int txRate0, u_int txTries0, u_int antMode,
412 	u_int rtsctsRate, u_int rtsctsDuration)
413 {
414 #define RTSCTS  (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)
415 	struct ar5416_desc *ads = AR5416DESC(ds);
416 	struct ath_hal_5212 *ahp = AH5212(ah);
417 
418 	HALASSERT(txTries0 != 0);
419 	HALASSERT(isValidTxRate(txRate0));
420 	HALASSERT((flags & RTSCTS) != RTSCTS);
421 	/* XXX validate antMode */
422 
423 	txPower = (txPower + ahp->ah_txPowerIndexOffset );
424 	if(txPower > 63)  txPower=63;
425 
426 	ads->ds_ctl0 |= (txPower << AR_XmitPower_S)
427 		| (flags & HAL_TXDESC_VEOL ? AR_VEOL : 0)
428 		| (flags & HAL_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
429 		| (flags & HAL_TXDESC_INTREQ ? AR_TxIntrReq : 0);
430 	ads->ds_ctl1 |= (flags & HAL_TXDESC_NOACK ? AR_NoAck : 0);
431 	ads->ds_ctl2 |= SM(txTries0, AR_XmitDataTries0);
432 	ads->ds_ctl3 |= (txRate0 << AR_XmitRate0_S);
433 	ads->ds_ctl7 = SM(AH5416(ah)->ah_tx_chainmask, AR_ChainSel0)
434 		| SM(AH5416(ah)->ah_tx_chainmask, AR_ChainSel1)
435 		| SM(AH5416(ah)->ah_tx_chainmask, AR_ChainSel2)
436 		| SM(AH5416(ah)->ah_tx_chainmask, AR_ChainSel3);
437 
438 	/* NB: no V1 WAR */
439 	ads->ds_ctl8 = SM(0, AR_AntCtl0);
440 	ads->ds_ctl9 = SM(0, AR_AntCtl1) | SM(txPower, AR_XmitPower1);
441 	ads->ds_ctl10 = SM(0, AR_AntCtl2) | SM(txPower, AR_XmitPower2);
442 	ads->ds_ctl11 = SM(0, AR_AntCtl3) | SM(txPower, AR_XmitPower3);
443 
444 	ads->ds_ctl6 &= ~(0xffff);
445 	ads->ds_ctl6 |= SM(aggrLen, AR_AggrLen);
446 
447 	if (flags & RTSCTS) {
448 		/* XXX validate rtsctsDuration */
449 		ads->ds_ctl0 |= (flags & HAL_TXDESC_CTSENA ? AR_CTSEnable : 0)
450 			| (flags & HAL_TXDESC_RTSENA ? AR_RTSEnable : 0);
451 	}
452 
453 	/*
454 	 * Set the TX antenna to 0 for Kite
455 	 * To preserve existing behaviour, also set the TPC bits to 0;
456 	 * when TPC is enabled these should be filled in appropriately.
457 	 */
458 	if (AR_SREV_KITE(ah)) {
459 		ads->ds_ctl8 = SM(0, AR_AntCtl0);
460 		ads->ds_ctl9 = SM(0, AR_AntCtl1) | SM(0, AR_XmitPower1);
461 		ads->ds_ctl10 = SM(0, AR_AntCtl2) | SM(0, AR_XmitPower2);
462 		ads->ds_ctl11 = SM(0, AR_AntCtl3) | SM(0, AR_XmitPower3);
463 	}
464 
465 	return AH_TRUE;
466 #undef RTSCTS
467 }
468 
469 HAL_BOOL
470 ar5416SetupLastTxDesc(struct ath_hal *ah, struct ath_desc *ds,
471 		const struct ath_desc *ds0)
472 {
473 	struct ar5416_desc *ads = AR5416DESC(ds);
474 
475 	ads->ds_ctl1 &= ~AR_MoreAggr;
476 	ads->ds_ctl6 &= ~AR_PadDelim;
477 
478 	/* hack to copy rate info to last desc for later processing */
479 #ifdef AH_NEED_DESC_SWAP
480 	ads->ds_ctl2 = __bswap32(AR5416DESC_CONST(ds0)->ds_ctl2);
481 	ads->ds_ctl3 = __bswap32(AR5416DESC_CONST(ds0)->ds_ctl3);
482 #else
483 	ads->ds_ctl2 = AR5416DESC_CONST(ds0)->ds_ctl2;
484 	ads->ds_ctl3 = AR5416DESC_CONST(ds0)->ds_ctl3;
485 #endif
486 
487 	return AH_TRUE;
488 }
489 
490 #ifdef AH_NEED_DESC_SWAP
491 /* Swap transmit descriptor */
492 static __inline void
493 ar5416SwapTxDesc(struct ath_desc *ds)
494 {
495 	ds->ds_data = __bswap32(ds->ds_data);
496 	ds->ds_ctl0 = __bswap32(ds->ds_ctl0);
497 	ds->ds_ctl1 = __bswap32(ds->ds_ctl1);
498 	ds->ds_hw[0] = __bswap32(ds->ds_hw[0]);
499 	ds->ds_hw[1] = __bswap32(ds->ds_hw[1]);
500 	ds->ds_hw[2] = __bswap32(ds->ds_hw[2]);
501 	ds->ds_hw[3] = __bswap32(ds->ds_hw[3]);
502 }
503 #endif
504 
505 /*
506  * Processing of HW TX descriptor.
507  */
508 HAL_STATUS
509 ar5416ProcTxDesc(struct ath_hal *ah,
510 	struct ath_desc *ds, struct ath_tx_status *ts)
511 {
512 	struct ar5416_desc *ads = AR5416DESC(ds);
513 	uint32_t *ds_txstatus = AR5416_DS_TXSTATUS(ah,ads);
514 
515 #ifdef AH_NEED_DESC_SWAP
516 	if ((ds_txstatus[9] & __bswap32(AR_TxDone)) == 0)
517 		return HAL_EINPROGRESS;
518 	ar5416SwapTxDesc(ds);
519 #else
520 	if ((ds_txstatus[9] & AR_TxDone) == 0)
521 		return HAL_EINPROGRESS;
522 #endif
523 
524 	/* Update software copies of the HW status */
525 	ts->ts_seqnum = MS(ds_txstatus[9], AR_SeqNum);
526 	ts->ts_tstamp = AR_SendTimestamp(ds_txstatus);
527 	ts->ts_tid = MS(ds_txstatus[9], AR_TxTid);
528 
529 	ts->ts_status = 0;
530 	if (ds_txstatus[1] & AR_ExcessiveRetries)
531 		ts->ts_status |= HAL_TXERR_XRETRY;
532 	if (ds_txstatus[1] & AR_Filtered)
533 		ts->ts_status |= HAL_TXERR_FILT;
534 	if (ds_txstatus[1] & AR_FIFOUnderrun)
535 		ts->ts_status |= HAL_TXERR_FIFO;
536 	if (ds_txstatus[9] & AR_TxOpExceeded)
537 		ts->ts_status |= HAL_TXERR_XTXOP;
538 	if (ds_txstatus[1] & AR_TxTimerExpired)
539 		ts->ts_status |= HAL_TXERR_TIMER_EXPIRED;
540 
541 	ts->ts_flags  = 0;
542 	if (ds_txstatus[0] & AR_TxBaStatus) {
543 		ts->ts_flags |= HAL_TX_BA;
544 		ts->ts_ba_low = AR_BaBitmapLow(ds_txstatus);
545 		ts->ts_ba_high = AR_BaBitmapHigh(ds_txstatus);
546 	}
547 	if (ds->ds_ctl1 & AR_IsAggr)
548 		ts->ts_flags |= HAL_TX_AGGR;
549 	if (ds_txstatus[1] & AR_DescCfgErr)
550 		ts->ts_flags |= HAL_TX_DESC_CFG_ERR;
551 	if (ds_txstatus[1] & AR_TxDataUnderrun)
552 		ts->ts_flags |= HAL_TX_DATA_UNDERRUN;
553 	if (ds_txstatus[1] & AR_TxDelimUnderrun)
554 		ts->ts_flags |= HAL_TX_DELIM_UNDERRUN;
555 
556 	/*
557 	 * Extract the transmit rate used and mark the rate as
558 	 * ``alternate'' if it wasn't the series 0 rate.
559 	 */
560 	ts->ts_finaltsi =  MS(ds_txstatus[9], AR_FinalTxIdx);
561 	switch (ts->ts_finaltsi) {
562 	case 0:
563 		ts->ts_rate = MS(ads->ds_ctl3, AR_XmitRate0);
564 		break;
565 	case 1:
566 		ts->ts_rate = MS(ads->ds_ctl3, AR_XmitRate1);
567 		break;
568 	case 2:
569 		ts->ts_rate = MS(ads->ds_ctl3, AR_XmitRate2);
570 		break;
571 	case 3:
572 		ts->ts_rate = MS(ads->ds_ctl3, AR_XmitRate3);
573 		break;
574 	}
575 
576 	ts->ts_rssi = MS(ds_txstatus[5], AR_TxRSSICombined);
577 	ts->ts_rssi_ctl[0] = MS(ds_txstatus[0], AR_TxRSSIAnt00);
578 	ts->ts_rssi_ctl[1] = MS(ds_txstatus[0], AR_TxRSSIAnt01);
579 	ts->ts_rssi_ctl[2] = MS(ds_txstatus[0], AR_TxRSSIAnt02);
580 	ts->ts_rssi_ext[0] = MS(ds_txstatus[5], AR_TxRSSIAnt10);
581 	ts->ts_rssi_ext[1] = MS(ds_txstatus[5], AR_TxRSSIAnt11);
582 	ts->ts_rssi_ext[2] = MS(ds_txstatus[5], AR_TxRSSIAnt12);
583 	ts->ts_evm0 = AR_TxEVM0(ds_txstatus);
584 	ts->ts_evm1 = AR_TxEVM1(ds_txstatus);
585 	ts->ts_evm2 = AR_TxEVM2(ds_txstatus);
586 
587 	ts->ts_shortretry = MS(ds_txstatus[1], AR_RTSFailCnt);
588 	ts->ts_longretry = MS(ds_txstatus[1], AR_DataFailCnt);
589 	/*
590 	 * The retry count has the number of un-acked tries for the
591 	 * final series used.  When doing multi-rate retry we must
592 	 * fixup the retry count by adding in the try counts for
593 	 * each series that was fully-processed.  Beware that this
594 	 * takes values from the try counts in the final descriptor.
595 	 * These are not required by the hardware.  We assume they
596 	 * are placed there by the driver as otherwise we have no
597 	 * access and the driver can't do the calculation because it
598 	 * doesn't know the descriptor format.
599 	 */
600 	switch (ts->ts_finaltsi) {
601 	case 3: ts->ts_longretry += MS(ads->ds_ctl2, AR_XmitDataTries2);
602 	case 2: ts->ts_longretry += MS(ads->ds_ctl2, AR_XmitDataTries1);
603 	case 1: ts->ts_longretry += MS(ads->ds_ctl2, AR_XmitDataTries0);
604 	}
605 
606 	/*
607 	 * These fields are not used. Zero these to preserve compatability
608 	 * with existing drivers.
609 	 */
610 	ts->ts_virtcol = MS(ads->ds_ctl1, AR_VirtRetryCnt);
611 	ts->ts_antenna = 0; /* We don't switch antennas on Owl*/
612 
613 	/* handle tx trigger level changes internally */
614 	if ((ts->ts_status & HAL_TXERR_FIFO) ||
615 	    (ts->ts_flags & (HAL_TX_DATA_UNDERRUN | HAL_TX_DELIM_UNDERRUN)))
616 		ar5212UpdateTxTrigLevel(ah, AH_TRUE);
617 
618 	return HAL_OK;
619 }
620 
621 HAL_BOOL
622 ar5416SetGlobalTxTimeout(struct ath_hal *ah, u_int tu)
623 {
624 	struct ath_hal_5416 *ahp = AH5416(ah);
625 
626 	if (tu > 0xFFFF) {
627 		HALDEBUG(ah, HAL_DEBUG_ANY, "%s: bad global tx timeout %u\n",
628 		    __func__, tu);
629 		/* restore default handling */
630 		ahp->ah_globaltxtimeout = (u_int) -1;
631 		return AH_FALSE;
632 	}
633 	OS_REG_RMW_FIELD(ah, AR_GTXTO, AR_GTXTO_TIMEOUT_LIMIT, tu);
634 	ahp->ah_globaltxtimeout = tu;
635 	return AH_TRUE;
636 }
637 
638 u_int
639 ar5416GetGlobalTxTimeout(struct ath_hal *ah)
640 {
641 	return MS(OS_REG_READ(ah, AR_GTXTO), AR_GTXTO_TIMEOUT_LIMIT);
642 }
643 
644 void
645 ar5416Set11nRateScenario(struct ath_hal *ah, struct ath_desc *ds,
646         u_int durUpdateEn, u_int rtsctsRate,
647 	HAL_11N_RATE_SERIES series[], u_int nseries, u_int flags)
648 {
649 	struct ar5416_desc *ads = AR5416DESC(ds);
650 	uint32_t ds_ctl0;
651 
652 	HALASSERT(nseries == 4);
653 	(void)nseries;
654 
655 	/*
656 	 * XXX since the upper layers doesn't know the current chainmask
657 	 * XXX setup, just override its decisions here.
658 	 * XXX The upper layers need to be taught this!
659 	 */
660 	if (series[0].Tries != 0)
661 		series[0].ChSel = AH5416(ah)->ah_tx_chainmask;
662 	if (series[1].Tries != 0)
663 		series[1].ChSel = AH5416(ah)->ah_tx_chainmask;
664 	if (series[2].Tries != 0)
665 		series[2].ChSel = AH5416(ah)->ah_tx_chainmask;
666 	if (series[3].Tries != 0)
667 		series[3].ChSel = AH5416(ah)->ah_tx_chainmask;
668 
669 	/*
670 	 * Only one of RTS and CTS enable must be set.
671 	 * If a frame has both set, just do RTS protection -
672 	 * that's enough to satisfy legacy protection.
673 	 */
674 	if (flags & (HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA)) {
675 		ds_ctl0 = ads->ds_ctl0;
676 
677 		if (flags & HAL_TXDESC_RTSENA) {
678 			ds_ctl0 &= ~AR_CTSEnable;
679 			ds_ctl0 |= AR_RTSEnable;
680 		} else {
681 			ds_ctl0 &= ~AR_RTSEnable;
682 			ds_ctl0 |= AR_CTSEnable;
683 		}
684 
685 		ads->ds_ctl0 = ds_ctl0;
686 	} else {
687 		ads->ds_ctl0 =
688 		    (ads->ds_ctl0 & ~(AR_RTSEnable | AR_CTSEnable));
689 	}
690 
691 	ads->ds_ctl2 = set11nTries(series, 0)
692 		     | set11nTries(series, 1)
693 		     | set11nTries(series, 2)
694 		     | set11nTries(series, 3)
695 		     | (durUpdateEn ? AR_DurUpdateEn : 0);
696 
697 	ads->ds_ctl3 = set11nRate(series, 0)
698 		     | set11nRate(series, 1)
699 		     | set11nRate(series, 2)
700 		     | set11nRate(series, 3);
701 
702 	ads->ds_ctl4 = set11nPktDurRTSCTS(series, 0)
703 		     | set11nPktDurRTSCTS(series, 1);
704 
705 	ads->ds_ctl5 = set11nPktDurRTSCTS(series, 2)
706 		     | set11nPktDurRTSCTS(series, 3);
707 
708 	ads->ds_ctl7 = set11nRateFlags(series, 0)
709 		     | set11nRateFlags(series, 1)
710 		     | set11nRateFlags(series, 2)
711 		     | set11nRateFlags(series, 3)
712 		     | SM(rtsctsRate, AR_RTSCTSRate);
713 }
714 
715 void
716 ar5416Set11nAggrFirst(struct ath_hal *ah, struct ath_desc *ds,
717     u_int aggrLen, u_int numDelims)
718 {
719 	struct ar5416_desc *ads = AR5416DESC(ds);
720 
721 	ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
722 
723 	ads->ds_ctl6 &= ~(AR_AggrLen | AR_PadDelim);
724 	ads->ds_ctl6 |= SM(aggrLen, AR_AggrLen) |
725 	    SM(numDelims, AR_PadDelim);
726 }
727 
728 void
729 ar5416Set11nAggrMiddle(struct ath_hal *ah, struct ath_desc *ds, u_int numDelims)
730 {
731 	struct ar5416_desc *ads = AR5416DESC(ds);
732 	uint32_t *ds_txstatus = AR5416_DS_TXSTATUS(ah,ads);
733 
734 	ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
735 
736 	ads->ds_ctl6 &= ~AR_PadDelim;
737 	ads->ds_ctl6 |= SM(numDelims, AR_PadDelim);
738 	ads->ds_ctl6 &= ~AR_AggrLen;
739 
740 	/*
741 	 * Clear the TxDone status here, may need to change
742 	 * func name to reflect this
743 	 */
744 	ds_txstatus[9] &= ~AR_TxDone;
745 }
746 
747 void
748 ar5416Set11nAggrLast(struct ath_hal *ah, struct ath_desc *ds)
749 {
750 	struct ar5416_desc *ads = AR5416DESC(ds);
751 
752 	ads->ds_ctl1 |= AR_IsAggr;
753 	ads->ds_ctl1 &= ~AR_MoreAggr;
754 	ads->ds_ctl6 &= ~AR_PadDelim;
755 }
756 
757 void
758 ar5416Clr11nAggr(struct ath_hal *ah, struct ath_desc *ds)
759 {
760 	struct ar5416_desc *ads = AR5416DESC(ds);
761 
762 	ads->ds_ctl1 &= (~AR_IsAggr & ~AR_MoreAggr);
763 	ads->ds_ctl6 &= ~AR_PadDelim;
764 	ads->ds_ctl6 &= ~AR_AggrLen;
765 }
766 
767 void
768 ar5416Set11nBurstDuration(struct ath_hal *ah, struct ath_desc *ds,
769                                                   u_int burstDuration)
770 {
771 	struct ar5416_desc *ads = AR5416DESC(ds);
772 
773 	ads->ds_ctl2 &= ~AR_BurstDur;
774 	ads->ds_ctl2 |= SM(burstDuration, AR_BurstDur);
775 }
776 
777 /*
778  * Retrieve the rate table from the given TX completion descriptor
779  */
780 HAL_BOOL
781 ar5416GetTxCompletionRates(struct ath_hal *ah, const struct ath_desc *ds0, int *rates, int *tries)
782 {
783 	const struct ar5416_desc *ads = AR5416DESC_CONST(ds0);
784 
785 	rates[0] = MS(ads->ds_ctl3, AR_XmitRate0);
786 	rates[1] = MS(ads->ds_ctl3, AR_XmitRate1);
787 	rates[2] = MS(ads->ds_ctl3, AR_XmitRate2);
788 	rates[3] = MS(ads->ds_ctl3, AR_XmitRate3);
789 
790 	tries[0] = MS(ads->ds_ctl2, AR_XmitDataTries0);
791 	tries[1] = MS(ads->ds_ctl2, AR_XmitDataTries1);
792 	tries[2] = MS(ads->ds_ctl2, AR_XmitDataTries2);
793 	tries[3] = MS(ads->ds_ctl2, AR_XmitDataTries3);
794 
795 	return AH_TRUE;
796 }
797 
798 
799 /*
800  * TX queue management routines - AR5416 and later chipsets
801  */
802 
803 /*
804  * Allocate and initialize a tx DCU/QCU combination.
805  */
806 int
807 ar5416SetupTxQueue(struct ath_hal *ah, HAL_TX_QUEUE type,
808 	const HAL_TXQ_INFO *qInfo)
809 {
810 	struct ath_hal_5212 *ahp = AH5212(ah);
811 	HAL_TX_QUEUE_INFO *qi;
812 	HAL_CAPABILITIES *pCap = &AH_PRIVATE(ah)->ah_caps;
813 	int q, defqflags;
814 
815 	/* by default enable OK+ERR+DESC+URN interrupts */
816 	defqflags = HAL_TXQ_TXOKINT_ENABLE
817 		  | HAL_TXQ_TXERRINT_ENABLE
818 		  | HAL_TXQ_TXDESCINT_ENABLE
819 		  | HAL_TXQ_TXURNINT_ENABLE;
820 	/* XXX move queue assignment to driver */
821 	switch (type) {
822 	case HAL_TX_QUEUE_BEACON:
823 		q = pCap->halTotalQueues-1;	/* highest priority */
824 		defqflags |= HAL_TXQ_DBA_GATED
825 		       | HAL_TXQ_CBR_DIS_QEMPTY
826 		       | HAL_TXQ_ARB_LOCKOUT_GLOBAL
827 		       | HAL_TXQ_BACKOFF_DISABLE;
828 		break;
829 	case HAL_TX_QUEUE_CAB:
830 		q = pCap->halTotalQueues-2;	/* next highest priority */
831 		defqflags |= HAL_TXQ_DBA_GATED
832 		       | HAL_TXQ_CBR_DIS_QEMPTY
833 		       | HAL_TXQ_CBR_DIS_BEMPTY
834 		       | HAL_TXQ_ARB_LOCKOUT_GLOBAL
835 		       | HAL_TXQ_BACKOFF_DISABLE;
836 		break;
837 	case HAL_TX_QUEUE_PSPOLL:
838 		q = 1;				/* lowest priority */
839 		defqflags |= HAL_TXQ_DBA_GATED
840 		       | HAL_TXQ_CBR_DIS_QEMPTY
841 		       | HAL_TXQ_CBR_DIS_BEMPTY
842 		       | HAL_TXQ_ARB_LOCKOUT_GLOBAL
843 		       | HAL_TXQ_BACKOFF_DISABLE;
844 		break;
845 	case HAL_TX_QUEUE_UAPSD:
846 		q = pCap->halTotalQueues-3;	/* nextest highest priority */
847 		if (ahp->ah_txq[q].tqi_type != HAL_TX_QUEUE_INACTIVE) {
848 			HALDEBUG(ah, HAL_DEBUG_ANY,
849 			    "%s: no available UAPSD tx queue\n", __func__);
850 			return -1;
851 		}
852 		break;
853 	case HAL_TX_QUEUE_DATA:
854 		for (q = 0; q < pCap->halTotalQueues; q++)
855 			if (ahp->ah_txq[q].tqi_type == HAL_TX_QUEUE_INACTIVE)
856 				break;
857 		if (q == pCap->halTotalQueues) {
858 			HALDEBUG(ah, HAL_DEBUG_ANY,
859 			    "%s: no available tx queue\n", __func__);
860 			return -1;
861 		}
862 		break;
863 	default:
864 		HALDEBUG(ah, HAL_DEBUG_ANY,
865 		    "%s: bad tx queue type %u\n", __func__, type);
866 		return -1;
867 	}
868 
869 	HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: queue %u\n", __func__, q);
870 
871 	qi = &ahp->ah_txq[q];
872 	if (qi->tqi_type != HAL_TX_QUEUE_INACTIVE) {
873 		HALDEBUG(ah, HAL_DEBUG_ANY, "%s: tx queue %u already active\n",
874 		    __func__, q);
875 		return -1;
876 	}
877 	OS_MEMZERO(qi, sizeof(HAL_TX_QUEUE_INFO));
878 	qi->tqi_type = type;
879 	if (qInfo == AH_NULL) {
880 		qi->tqi_qflags = defqflags;
881 		qi->tqi_aifs = INIT_AIFS;
882 		qi->tqi_cwmin = HAL_TXQ_USEDEFAULT;	/* NB: do at reset */
883 		qi->tqi_cwmax = INIT_CWMAX;
884 		qi->tqi_shretry = INIT_SH_RETRY;
885 		qi->tqi_lgretry = INIT_LG_RETRY;
886 		qi->tqi_physCompBuf = 0;
887 	} else {
888 		qi->tqi_physCompBuf = qInfo->tqi_compBuf;
889 		(void) ar5212SetTxQueueProps(ah, q, qInfo);
890 	}
891 	/* NB: must be followed by ar5212ResetTxQueue */
892 	return q;
893 }
894 
895 /*
896  * Update the h/w interrupt registers to reflect a tx q's configuration.
897  */
898 static void
899 setTxQInterrupts(struct ath_hal *ah, HAL_TX_QUEUE_INFO *qi)
900 {
901 	struct ath_hal_5212 *ahp = AH5212(ah);
902 
903 	HALDEBUG(ah, HAL_DEBUG_TXQUEUE,
904 	    "%s: tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n", __func__,
905 	    ahp->ah_txOkInterruptMask, ahp->ah_txErrInterruptMask,
906 	    ahp->ah_txDescInterruptMask, ahp->ah_txEolInterruptMask,
907 	    ahp->ah_txUrnInterruptMask);
908 
909 	OS_REG_WRITE(ah, AR_IMR_S0,
910 		  SM(ahp->ah_txOkInterruptMask, AR_IMR_S0_QCU_TXOK)
911 		| SM(ahp->ah_txDescInterruptMask, AR_IMR_S0_QCU_TXDESC)
912 	);
913 	OS_REG_WRITE(ah, AR_IMR_S1,
914 		  SM(ahp->ah_txErrInterruptMask, AR_IMR_S1_QCU_TXERR)
915 		| SM(ahp->ah_txEolInterruptMask, AR_IMR_S1_QCU_TXEOL)
916 	);
917 	OS_REG_RMW_FIELD(ah, AR_IMR_S2,
918 		AR_IMR_S2_QCU_TXURN, ahp->ah_txUrnInterruptMask);
919 }
920 
921 /*
922  * Set the retry, aifs, cwmin/max, readyTime regs for specified queue
923  * Assumes:
924  *  phwChannel has been set to point to the current channel
925  */
926 #define	TU_TO_USEC(_tu)		((_tu) << 10)
927 HAL_BOOL
928 ar5416ResetTxQueue(struct ath_hal *ah, u_int q)
929 {
930 	struct ath_hal_5212 *ahp = AH5212(ah);
931 	HAL_CAPABILITIES *pCap = &AH_PRIVATE(ah)->ah_caps;
932 	const struct ieee80211_channel *chan = AH_PRIVATE(ah)->ah_curchan;
933 	HAL_TX_QUEUE_INFO *qi;
934 	uint32_t cwMin, chanCwMin, qmisc, dmisc;
935 
936 	if (q >= pCap->halTotalQueues) {
937 		HALDEBUG(ah, HAL_DEBUG_ANY, "%s: invalid queue num %u\n",
938 		    __func__, q);
939 		return AH_FALSE;
940 	}
941 	qi = &ahp->ah_txq[q];
942 	if (qi->tqi_type == HAL_TX_QUEUE_INACTIVE) {
943 		HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: inactive queue %u\n",
944 		    __func__, q);
945 		return AH_TRUE;		/* XXX??? */
946 	}
947 
948 	HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: reset queue %u\n", __func__, q);
949 
950 	if (qi->tqi_cwmin == HAL_TXQ_USEDEFAULT) {
951 		/*
952 		 * Select cwmin according to channel type.
953 		 * NB: chan can be NULL during attach
954 		 */
955 		if (chan && IEEE80211_IS_CHAN_B(chan))
956 			chanCwMin = INIT_CWMIN_11B;
957 		else
958 			chanCwMin = INIT_CWMIN;
959 		/* make sure that the CWmin is of the form (2^n - 1) */
960 		for (cwMin = 1; cwMin < chanCwMin; cwMin = (cwMin << 1) | 1)
961 			;
962 	} else
963 		cwMin = qi->tqi_cwmin;
964 
965 	/* set cwMin/Max and AIFS values */
966 	OS_REG_WRITE(ah, AR_DLCL_IFS(q),
967 		  SM(cwMin, AR_D_LCL_IFS_CWMIN)
968 		| SM(qi->tqi_cwmax, AR_D_LCL_IFS_CWMAX)
969 		| SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));
970 
971 	/* Set retry limit values */
972 	OS_REG_WRITE(ah, AR_DRETRY_LIMIT(q),
973 		   SM(INIT_SSH_RETRY, AR_D_RETRY_LIMIT_STA_SH)
974 		 | SM(INIT_SLG_RETRY, AR_D_RETRY_LIMIT_STA_LG)
975 		 | SM(qi->tqi_lgretry, AR_D_RETRY_LIMIT_FR_LG)
976 		 | SM(qi->tqi_shretry, AR_D_RETRY_LIMIT_FR_SH)
977 	);
978 
979 	/* NB: always enable early termination on the QCU */
980 	qmisc = AR_Q_MISC_DCU_EARLY_TERM_REQ
981 	      | SM(AR_Q_MISC_FSP_ASAP, AR_Q_MISC_FSP);
982 
983 	/* NB: always enable DCU to wait for next fragment from QCU */
984 	dmisc = AR_D_MISC_FRAG_WAIT_EN;
985 
986 	/* Enable exponential backoff window */
987 	dmisc |= AR_D_MISC_BKOFF_PERSISTENCE;
988 
989 	/*
990 	 * The chip reset default is to use a DCU backoff threshold of 0x2.
991 	 * Restore this when programming the DCU MISC register.
992 	 */
993 	dmisc |= 0x2;
994 
995 	/* multiqueue support */
996 	if (qi->tqi_cbrPeriod) {
997 		OS_REG_WRITE(ah, AR_QCBRCFG(q),
998 			  SM(qi->tqi_cbrPeriod,AR_Q_CBRCFG_CBR_INTERVAL)
999 			| SM(qi->tqi_cbrOverflowLimit, AR_Q_CBRCFG_CBR_OVF_THRESH));
1000 		qmisc = (qmisc &~ AR_Q_MISC_FSP) | AR_Q_MISC_FSP_CBR;
1001 		if (qi->tqi_cbrOverflowLimit)
1002 			qmisc |= AR_Q_MISC_CBR_EXP_CNTR_LIMIT;
1003 	}
1004 
1005 	if (qi->tqi_readyTime && (qi->tqi_type != HAL_TX_QUEUE_CAB)) {
1006 		OS_REG_WRITE(ah, AR_QRDYTIMECFG(q),
1007 			  SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_INT)
1008 			| AR_Q_RDYTIMECFG_ENA);
1009 	}
1010 
1011 	OS_REG_WRITE(ah, AR_DCHNTIME(q),
1012 		  SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR)
1013 		| (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0));
1014 
1015 	if (qi->tqi_readyTime &&
1016 	    (qi->tqi_qflags & HAL_TXQ_RDYTIME_EXP_POLICY_ENABLE))
1017 		qmisc |= AR_Q_MISC_RDYTIME_EXP_POLICY;
1018 	if (qi->tqi_qflags & HAL_TXQ_DBA_GATED)
1019 		qmisc = (qmisc &~ AR_Q_MISC_FSP) | AR_Q_MISC_FSP_DBA_GATED;
1020 	if (MS(qmisc, AR_Q_MISC_FSP) != AR_Q_MISC_FSP_ASAP) {
1021 		/*
1022 		 * These are meangingful only when not scheduled asap.
1023 		 */
1024 		if (qi->tqi_qflags & HAL_TXQ_CBR_DIS_BEMPTY)
1025 			qmisc |= AR_Q_MISC_CBR_INCR_DIS0;
1026 		else
1027 			qmisc &= ~AR_Q_MISC_CBR_INCR_DIS0;
1028 		if (qi->tqi_qflags & HAL_TXQ_CBR_DIS_QEMPTY)
1029 			qmisc |= AR_Q_MISC_CBR_INCR_DIS1;
1030 		else
1031 			qmisc &= ~AR_Q_MISC_CBR_INCR_DIS1;
1032 	}
1033 
1034 	if (qi->tqi_qflags & HAL_TXQ_BACKOFF_DISABLE)
1035 		dmisc |= AR_D_MISC_POST_FR_BKOFF_DIS;
1036 	if (qi->tqi_qflags & HAL_TXQ_FRAG_BURST_BACKOFF_ENABLE)
1037 		dmisc |= AR_D_MISC_FRAG_BKOFF_EN;
1038 	if (qi->tqi_qflags & HAL_TXQ_ARB_LOCKOUT_GLOBAL)
1039 		dmisc |= SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
1040 			    AR_D_MISC_ARB_LOCKOUT_CNTRL);
1041 	else if (qi->tqi_qflags & HAL_TXQ_ARB_LOCKOUT_INTRA)
1042 		dmisc |= SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_INTRA_FR,
1043 			    AR_D_MISC_ARB_LOCKOUT_CNTRL);
1044 	if (qi->tqi_qflags & HAL_TXQ_IGNORE_VIRTCOL)
1045 		dmisc |= SM(AR_D_MISC_VIR_COL_HANDLING_IGNORE,
1046 			    AR_D_MISC_VIR_COL_HANDLING);
1047 	if (qi->tqi_qflags & HAL_TXQ_SEQNUM_INC_DIS)
1048 		dmisc |= AR_D_MISC_SEQ_NUM_INCR_DIS;
1049 
1050 	/*
1051 	 * Fillin type-dependent bits.  Most of this can be
1052 	 * removed by specifying the queue parameters in the
1053 	 * driver; it's here for backwards compatibility.
1054 	 */
1055 	switch (qi->tqi_type) {
1056 	case HAL_TX_QUEUE_BEACON:		/* beacon frames */
1057 		qmisc |= AR_Q_MISC_FSP_DBA_GATED
1058 		      |  AR_Q_MISC_BEACON_USE
1059 		      |  AR_Q_MISC_CBR_INCR_DIS1;
1060 
1061 		dmisc |= SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
1062 			    AR_D_MISC_ARB_LOCKOUT_CNTRL)
1063 		      |  AR_D_MISC_BEACON_USE
1064 		      |  AR_D_MISC_POST_FR_BKOFF_DIS;
1065 		break;
1066 	case HAL_TX_QUEUE_CAB:			/* CAB  frames */
1067 		/*
1068 		 * No longer Enable AR_Q_MISC_RDYTIME_EXP_POLICY,
1069 		 * There is an issue with the CAB Queue
1070 		 * not properly refreshing the Tx descriptor if
1071 		 * the TXE clear setting is used.
1072 		 */
1073 		qmisc |= AR_Q_MISC_FSP_DBA_GATED
1074 		      |  AR_Q_MISC_CBR_INCR_DIS1
1075 		      |  AR_Q_MISC_CBR_INCR_DIS0;
1076 		HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: CAB: tqi_readyTime = %d\n",
1077 		    __func__, qi->tqi_readyTime);
1078 		if (qi->tqi_readyTime) {
1079 			HALDEBUG(ah, HAL_DEBUG_TXQUEUE,
1080 			    "%s: using tqi_readyTime\n", __func__);
1081 			OS_REG_WRITE(ah, AR_QRDYTIMECFG(q),
1082 			    SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_INT) |
1083 			    AR_Q_RDYTIMECFG_ENA);
1084 		} else {
1085 			int value;
1086 			/*
1087 			 * NB: don't set default ready time if driver
1088 			 * has explicitly specified something.  This is
1089 			 * here solely for backwards compatibility.
1090 			 */
1091 			/*
1092 			 * XXX for now, hard-code a CAB interval of 70%
1093 			 * XXX of the total beacon interval.
1094 			 *
1095 			 * XXX This keeps Merlin and later based MACs
1096 			 * XXX quite a bit happier (stops stuck beacons,
1097 			 * XXX which I gather is because of such a long
1098 			 * XXX cabq time.)
1099 			 */
1100 			value = (ahp->ah_beaconInterval * 70 / 100)
1101 				- (ah->ah_config.ah_sw_beacon_response_time
1102 				+ ah->ah_config.ah_dma_beacon_response_time)
1103 				- ah->ah_config.ah_additional_swba_backoff;
1104 			/*
1105 			 * XXX Ensure it isn't too low - nothing lower
1106 			 * XXX than 10 TU
1107 			 */
1108 			if (value < 10)
1109 				value = 10;
1110 			HALDEBUG(ah, HAL_DEBUG_TXQUEUE,
1111 			    "%s: defaulting to rdytime = %d uS\n",
1112 			    __func__, value);
1113 			OS_REG_WRITE(ah, AR_QRDYTIMECFG(q),
1114 			    SM(TU_TO_USEC(value), AR_Q_RDYTIMECFG_INT) |
1115 			    AR_Q_RDYTIMECFG_ENA);
1116 		}
1117 		dmisc |= SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
1118 			    AR_D_MISC_ARB_LOCKOUT_CNTRL);
1119 		break;
1120 	case HAL_TX_QUEUE_PSPOLL:
1121 		qmisc |= AR_Q_MISC_CBR_INCR_DIS1;
1122 		break;
1123 	case HAL_TX_QUEUE_UAPSD:
1124 		dmisc |= AR_D_MISC_POST_FR_BKOFF_DIS;
1125 		break;
1126 	default:			/* NB: silence compiler */
1127 		break;
1128 	}
1129 
1130 	OS_REG_WRITE(ah, AR_QMISC(q), qmisc);
1131 	OS_REG_WRITE(ah, AR_DMISC(q), dmisc);
1132 
1133 	/* Setup compression scratchpad buffer */
1134 	/*
1135 	 * XXX: calling this asynchronously to queue operation can
1136 	 *      cause unexpected behavior!!!
1137 	 */
1138 	if (qi->tqi_physCompBuf) {
1139 		HALASSERT(qi->tqi_type == HAL_TX_QUEUE_DATA ||
1140 			  qi->tqi_type == HAL_TX_QUEUE_UAPSD);
1141 		OS_REG_WRITE(ah, AR_Q_CBBS, (80 + 2*q));
1142 		OS_REG_WRITE(ah, AR_Q_CBBA, qi->tqi_physCompBuf);
1143 		OS_REG_WRITE(ah, AR_Q_CBC,  HAL_COMP_BUF_MAX_SIZE/1024);
1144 		OS_REG_WRITE(ah, AR_Q0_MISC + 4*q,
1145 			     OS_REG_READ(ah, AR_Q0_MISC + 4*q)
1146 			     | AR_Q_MISC_QCU_COMP_EN);
1147 	}
1148 
1149 	/*
1150 	 * Always update the secondary interrupt mask registers - this
1151 	 * could be a new queue getting enabled in a running system or
1152 	 * hw getting re-initialized during a reset!
1153 	 *
1154 	 * Since we don't differentiate between tx interrupts corresponding
1155 	 * to individual queues - secondary tx mask regs are always unmasked;
1156 	 * tx interrupts are enabled/disabled for all queues collectively
1157 	 * using the primary mask reg
1158 	 */
1159 	if (qi->tqi_qflags & HAL_TXQ_TXOKINT_ENABLE)
1160 		ahp->ah_txOkInterruptMask |= 1 << q;
1161 	else
1162 		ahp->ah_txOkInterruptMask &= ~(1 << q);
1163 	if (qi->tqi_qflags & HAL_TXQ_TXERRINT_ENABLE)
1164 		ahp->ah_txErrInterruptMask |= 1 << q;
1165 	else
1166 		ahp->ah_txErrInterruptMask &= ~(1 << q);
1167 	if (qi->tqi_qflags & HAL_TXQ_TXDESCINT_ENABLE)
1168 		ahp->ah_txDescInterruptMask |= 1 << q;
1169 	else
1170 		ahp->ah_txDescInterruptMask &= ~(1 << q);
1171 	if (qi->tqi_qflags & HAL_TXQ_TXEOLINT_ENABLE)
1172 		ahp->ah_txEolInterruptMask |= 1 << q;
1173 	else
1174 		ahp->ah_txEolInterruptMask &= ~(1 << q);
1175 	if (qi->tqi_qflags & HAL_TXQ_TXURNINT_ENABLE)
1176 		ahp->ah_txUrnInterruptMask |= 1 << q;
1177 	else
1178 		ahp->ah_txUrnInterruptMask &= ~(1 << q);
1179 	setTxQInterrupts(ah, qi);
1180 
1181 	return AH_TRUE;
1182 }
1183 #undef	TU_TO_USEC
1184