xref: /freebsd/sys/dev/ath/ath_hal/ar5416/ar5416_xmit.c (revision 641a6cfb86023499caafe26a4d821a0b885cf00b)
1 /*
2  * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
3  * Copyright (c) 2002-2008 Atheros Communications, Inc.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  *
17  * $FreeBSD$
18  */
19 #include "opt_ah.h"
20 
21 #include "ah.h"
22 #include "ah_desc.h"
23 #include "ah_internal.h"
24 
25 #include "ar5416/ar5416.h"
26 #include "ar5416/ar5416reg.h"
27 #include "ar5416/ar5416phy.h"
28 #include "ar5416/ar5416desc.h"
29 
30 /*
31  * Stop transmit on the specified queue
32  */
33 HAL_BOOL
34 ar5416StopTxDma(struct ath_hal *ah, u_int q)
35 {
36 #define	STOP_DMA_TIMEOUT	4000	/* us */
37 #define	STOP_DMA_ITER		100	/* us */
38 	u_int i;
39 
40 	HALASSERT(q < AH_PRIVATE(ah)->ah_caps.halTotalQueues);
41 
42 	HALASSERT(AH5212(ah)->ah_txq[q].tqi_type != HAL_TX_QUEUE_INACTIVE);
43 
44 	OS_REG_WRITE(ah, AR_Q_TXD, 1 << q);
45 	for (i = STOP_DMA_TIMEOUT/STOP_DMA_ITER; i != 0; i--) {
46 		if (ar5212NumTxPending(ah, q) == 0)
47 			break;
48 		OS_DELAY(STOP_DMA_ITER);
49 	}
50 #ifdef AH_DEBUG
51 	if (i == 0) {
52 		HALDEBUG(ah, HAL_DEBUG_ANY,
53 		    "%s: queue %u DMA did not stop in 400 msec\n", __func__, q);
54 		HALDEBUG(ah, HAL_DEBUG_ANY,
55 		    "%s: QSTS 0x%x Q_TXE 0x%x Q_TXD 0x%x Q_CBR 0x%x\n", __func__,
56 		    OS_REG_READ(ah, AR_QSTS(q)), OS_REG_READ(ah, AR_Q_TXE),
57 		    OS_REG_READ(ah, AR_Q_TXD), OS_REG_READ(ah, AR_QCBRCFG(q)));
58 		HALDEBUG(ah, HAL_DEBUG_ANY,
59 		    "%s: Q_MISC 0x%x Q_RDYTIMECFG 0x%x Q_RDYTIMESHDN 0x%x\n",
60 		    __func__, OS_REG_READ(ah, AR_QMISC(q)),
61 		    OS_REG_READ(ah, AR_QRDYTIMECFG(q)),
62 		    OS_REG_READ(ah, AR_Q_RDYTIMESHDN));
63 	}
64 #endif /* AH_DEBUG */
65 
66 	/* ar5416 and up can kill packets at the PCU level */
67 	if (ar5212NumTxPending(ah, q)) {
68 		uint32_t j;
69 
70 		HALDEBUG(ah, HAL_DEBUG_TXQUEUE,
71 		    "%s: Num of pending TX Frames %d on Q %d\n",
72 		    __func__, ar5212NumTxPending(ah, q), q);
73 
74 		/* Kill last PCU Tx Frame */
75 		/* TODO - save off and restore current values of Q1/Q2? */
76 		for (j = 0; j < 2; j++) {
77 			uint32_t tsfLow = OS_REG_READ(ah, AR_TSF_L32);
78 			OS_REG_WRITE(ah, AR_QUIET2,
79 			    SM(10, AR_QUIET2_QUIET_DUR));
80 			OS_REG_WRITE(ah, AR_QUIET_PERIOD, 100);
81 			OS_REG_WRITE(ah, AR_NEXT_QUIET, tsfLow >> 10);
82 			OS_REG_SET_BIT(ah, AR_TIMER_MODE, AR_TIMER_MODE_QUIET);
83 
84 			if ((OS_REG_READ(ah, AR_TSF_L32)>>10) == (tsfLow>>10))
85 				break;
86 
87 			HALDEBUG(ah, HAL_DEBUG_ANY,
88 			    "%s: TSF moved while trying to set quiet time "
89 			    "TSF: 0x%08x\n", __func__, tsfLow);
90 			HALASSERT(j < 1); /* TSF shouldn't count twice or reg access is taking forever */
91 		}
92 
93 		OS_REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_CHAN_IDLE);
94 
95 		/* Allow the quiet mechanism to do its work */
96 		OS_DELAY(200);
97 		OS_REG_CLR_BIT(ah, AR_TIMER_MODE, AR_TIMER_MODE_QUIET);
98 
99 		/* Verify the transmit q is empty */
100 		for (i = STOP_DMA_TIMEOUT/STOP_DMA_ITER; i != 0; i--) {
101 			if (ar5212NumTxPending(ah, q) == 0)
102 				break;
103 			OS_DELAY(STOP_DMA_ITER);
104 		}
105 		if (i == 0) {
106 			HALDEBUG(ah, HAL_DEBUG_ANY,
107 			    "%s: Failed to stop Tx DMA in %d msec after killing"
108 			    " last frame\n", __func__, STOP_DMA_TIMEOUT / 1000);
109 		}
110 		OS_REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_CHAN_IDLE);
111 	}
112 
113 	OS_REG_WRITE(ah, AR_Q_TXD, 0);
114 	return (i != 0);
115 #undef STOP_DMA_ITER
116 #undef STOP_DMA_TIMEOUT
117 }
118 
119 #define VALID_KEY_TYPES \
120         ((1 << HAL_KEY_TYPE_CLEAR) | (1 << HAL_KEY_TYPE_WEP)|\
121          (1 << HAL_KEY_TYPE_AES)   | (1 << HAL_KEY_TYPE_TKIP))
122 #define isValidKeyType(_t)      ((1 << (_t)) & VALID_KEY_TYPES)
123 
124 #define set11nTries(_series, _index) \
125         (SM((_series)[_index].Tries, AR_XmitDataTries##_index))
126 
127 #define set11nRate(_series, _index) \
128         (SM((_series)[_index].Rate, AR_XmitRate##_index))
129 
130 #define set11nPktDurRTSCTS(_series, _index) \
131         (SM((_series)[_index].PktDuration, AR_PacketDur##_index) |\
132          ((_series)[_index].RateFlags & HAL_RATESERIES_RTS_CTS   ?\
133          AR_RTSCTSQual##_index : 0))
134 
135 #define set11nRateFlags(_series, _index) \
136         ((_series)[_index].RateFlags & HAL_RATESERIES_2040 ? AR_2040_##_index : 0) \
137         |((_series)[_index].RateFlags & HAL_RATESERIES_HALFGI ? AR_GI##_index : 0) \
138         |SM((_series)[_index].ChSel, AR_ChainSel##_index)
139 
140 /*
141  * Descriptor Access Functions
142  */
143 
144 #define VALID_PKT_TYPES \
145         ((1<<HAL_PKT_TYPE_NORMAL)|(1<<HAL_PKT_TYPE_ATIM)|\
146          (1<<HAL_PKT_TYPE_PSPOLL)|(1<<HAL_PKT_TYPE_PROBE_RESP)|\
147          (1<<HAL_PKT_TYPE_BEACON)|(1<<HAL_PKT_TYPE_AMPDU))
148 #define isValidPktType(_t)      ((1<<(_t)) & VALID_PKT_TYPES)
149 #define VALID_TX_RATES \
150         ((1<<0x0b)|(1<<0x0f)|(1<<0x0a)|(1<<0x0e)|(1<<0x09)|(1<<0x0d)|\
151          (1<<0x08)|(1<<0x0c)|(1<<0x1b)|(1<<0x1a)|(1<<0x1e)|(1<<0x19)|\
152 	 (1<<0x1d)|(1<<0x18)|(1<<0x1c)|(1<<0x01)|(1<<0x02)|(1<<0x03)|\
153 	 (1<<0x04)|(1<<0x05)|(1<<0x06)|(1<<0x07)|(1<<0x00))
154 /* NB: accept HT rates */
155 #define	isValidTxRate(_r)	((1<<((_r) & 0x7f)) & VALID_TX_RATES)
156 
157 HAL_BOOL
158 ar5416SetupTxDesc(struct ath_hal *ah, struct ath_desc *ds,
159 	u_int pktLen,
160 	u_int hdrLen,
161 	HAL_PKT_TYPE type,
162 	u_int txPower,
163 	u_int txRate0, u_int txTries0,
164 	u_int keyIx,
165 	u_int antMode,
166 	u_int flags,
167 	u_int rtsctsRate,
168 	u_int rtsctsDuration,
169 	u_int compicvLen,
170 	u_int compivLen,
171 	u_int comp)
172 {
173 #define	RTSCTS	(HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)
174 	struct ar5416_desc *ads = AR5416DESC(ds);
175 	struct ath_hal_5416 *ahp = AH5416(ah);
176 
177 	(void) hdrLen;
178 
179 	HALASSERT(txTries0 != 0);
180 	HALASSERT(isValidPktType(type));
181 	HALASSERT(isValidTxRate(txRate0));
182 	HALASSERT((flags & RTSCTS) != RTSCTS);
183 	/* XXX validate antMode */
184 
185         txPower = (txPower + AH5212(ah)->ah_txPowerIndexOffset);
186         if (txPower > 63)
187 		txPower = 63;
188 
189 	ads->ds_ctl0 = (pktLen & AR_FrameLen)
190 		     | (txPower << AR_XmitPower_S)
191 		     | (flags & HAL_TXDESC_VEOL ? AR_VEOL : 0)
192 		     | (flags & HAL_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
193 		     | (flags & HAL_TXDESC_INTREQ ? AR_TxIntrReq : 0)
194 		     ;
195 	ads->ds_ctl1 = (type << AR_FrameType_S)
196 		     | (flags & HAL_TXDESC_NOACK ? AR_NoAck : 0)
197                      ;
198 	ads->ds_ctl2 = SM(txTries0, AR_XmitDataTries0)
199 		     | (flags & HAL_TXDESC_DURENA ? AR_DurUpdateEn : 0)
200 		     ;
201 	ads->ds_ctl3 = (txRate0 << AR_XmitRate0_S)
202 		     ;
203 	ads->ds_ctl4 = 0;
204 	ads->ds_ctl5 = 0;
205 	ads->ds_ctl6 = 0;
206 	ads->ds_ctl7 = SM(ahp->ah_tx_chainmask, AR_ChainSel0)
207 		     | SM(ahp->ah_tx_chainmask, AR_ChainSel1)
208 		     | SM(ahp->ah_tx_chainmask, AR_ChainSel2)
209 		     | SM(ahp->ah_tx_chainmask, AR_ChainSel3)
210 		     ;
211 	ads->ds_ctl8 = SM(0, AR_AntCtl0);
212 	ads->ds_ctl9 = SM(0, AR_AntCtl1) | SM(txPower, AR_XmitPower1);
213 	ads->ds_ctl10 = SM(0, AR_AntCtl2) | SM(txPower, AR_XmitPower2);
214 	ads->ds_ctl11 = SM(0, AR_AntCtl3) | SM(txPower, AR_XmitPower3);
215 
216 	if (keyIx != HAL_TXKEYIX_INVALID) {
217 		/* XXX validate key index */
218 		ads->ds_ctl1 |= SM(keyIx, AR_DestIdx);
219 		ads->ds_ctl0 |= AR_DestIdxValid;
220 		ads->ds_ctl6 |= SM(ahp->ah_keytype[keyIx], AR_EncrType);
221 	}
222 	if (flags & RTSCTS) {
223 		if (!isValidTxRate(rtsctsRate)) {
224 			HALDEBUG(ah, HAL_DEBUG_ANY,
225 			    "%s: invalid rts/cts rate 0x%x\n",
226 			    __func__, rtsctsRate);
227 			return AH_FALSE;
228 		}
229 		/* XXX validate rtsctsDuration */
230 		ads->ds_ctl0 |= (flags & HAL_TXDESC_CTSENA ? AR_CTSEnable : 0)
231 			     | (flags & HAL_TXDESC_RTSENA ? AR_RTSEnable : 0)
232 			     ;
233 		ads->ds_ctl7 |= (rtsctsRate << AR_RTSCTSRate_S);
234 	}
235 
236 	/*
237 	 * Set the TX antenna to 0 for Kite
238 	 * To preserve existing behaviour, also set the TPC bits to 0;
239 	 * when TPC is enabled these should be filled in appropriately.
240 	 */
241 	if (AR_SREV_KITE(ah)) {
242 		ads->ds_ctl8 = SM(0, AR_AntCtl0);
243 		ads->ds_ctl9 = SM(0, AR_AntCtl1) | SM(0, AR_XmitPower1);
244 		ads->ds_ctl10 = SM(0, AR_AntCtl2) | SM(0, AR_XmitPower2);
245 		ads->ds_ctl11 = SM(0, AR_AntCtl3) | SM(0, AR_XmitPower3);
246 	}
247 	return AH_TRUE;
248 #undef RTSCTS
249 }
250 
251 HAL_BOOL
252 ar5416SetupXTxDesc(struct ath_hal *ah, struct ath_desc *ds,
253 	u_int txRate1, u_int txTries1,
254 	u_int txRate2, u_int txTries2,
255 	u_int txRate3, u_int txTries3)
256 {
257 	struct ar5416_desc *ads = AR5416DESC(ds);
258 
259 	if (txTries1) {
260 		HALASSERT(isValidTxRate(txRate1));
261 		ads->ds_ctl2 |= SM(txTries1, AR_XmitDataTries1);
262 		ads->ds_ctl3 |= (txRate1 << AR_XmitRate1_S);
263 	}
264 	if (txTries2) {
265 		HALASSERT(isValidTxRate(txRate2));
266 		ads->ds_ctl2 |= SM(txTries2, AR_XmitDataTries2);
267 		ads->ds_ctl3 |= (txRate2 << AR_XmitRate2_S);
268 	}
269 	if (txTries3) {
270 		HALASSERT(isValidTxRate(txRate3));
271 		ads->ds_ctl2 |= SM(txTries3, AR_XmitDataTries3);
272 		ads->ds_ctl3 |= (txRate3 << AR_XmitRate3_S);
273 	}
274 	return AH_TRUE;
275 }
276 
277 HAL_BOOL
278 ar5416FillTxDesc(struct ath_hal *ah, struct ath_desc *ds,
279 	u_int segLen, HAL_BOOL firstSeg, HAL_BOOL lastSeg,
280 	const struct ath_desc *ds0)
281 {
282 	struct ar5416_desc *ads = AR5416DESC(ds);
283 
284 	HALASSERT((segLen &~ AR_BufLen) == 0);
285 
286 	if (firstSeg) {
287 		/*
288 		 * First descriptor, don't clobber xmit control data
289 		 * setup by ar5212SetupTxDesc.
290 		 */
291 		ads->ds_ctl1 |= segLen | (lastSeg ? 0 : AR_TxMore);
292 	} else if (lastSeg) {		/* !firstSeg && lastSeg */
293 		/*
294 		 * Last descriptor in a multi-descriptor frame,
295 		 * copy the multi-rate transmit parameters from
296 		 * the first frame for processing on completion.
297 		 */
298 		ads->ds_ctl1 = segLen;
299 #ifdef AH_NEED_DESC_SWAP
300 		ads->ds_ctl0 = __bswap32(AR5416DESC_CONST(ds0)->ds_ctl0)
301 		    & AR_TxIntrReq;
302 		ads->ds_ctl2 = __bswap32(AR5416DESC_CONST(ds0)->ds_ctl2);
303 		ads->ds_ctl3 = __bswap32(AR5416DESC_CONST(ds0)->ds_ctl3);
304 #else
305 		ads->ds_ctl0 = AR5416DESC_CONST(ds0)->ds_ctl0 & AR_TxIntrReq;
306 		ads->ds_ctl2 = AR5416DESC_CONST(ds0)->ds_ctl2;
307 		ads->ds_ctl3 = AR5416DESC_CONST(ds0)->ds_ctl3;
308 #endif
309 	} else {			/* !firstSeg && !lastSeg */
310 		/*
311 		 * Intermediate descriptor in a multi-descriptor frame.
312 		 */
313 #ifdef AH_NEED_DESC_SWAP
314 		ads->ds_ctl0 = __bswap32(AR5416DESC_CONST(ds0)->ds_ctl0)
315 		    & AR_TxIntrReq;
316 #else
317 		ads->ds_ctl0 = AR5416DESC_CONST(ds0)->ds_ctl0 & AR_TxIntrReq;
318 #endif
319 		ads->ds_ctl1 = segLen | AR_TxMore;
320 		ads->ds_ctl2 = 0;
321 		ads->ds_ctl3 = 0;
322 	}
323 	/* XXX only on last descriptor? */
324 	OS_MEMZERO(ads->u.tx.status, sizeof(ads->u.tx.status));
325 	return AH_TRUE;
326 }
327 
328 /*
329  * NB: cipher is no longer used, it's calculated.
330  */
331 HAL_BOOL
332 ar5416ChainTxDesc(struct ath_hal *ah, struct ath_desc *ds,
333 	u_int pktLen,
334 	u_int hdrLen,
335 	HAL_PKT_TYPE type,
336 	u_int keyIx,
337 	HAL_CIPHER cipher,
338 	uint8_t delims,
339 	u_int segLen,
340 	HAL_BOOL firstSeg,
341 	HAL_BOOL lastSeg,
342 	HAL_BOOL lastAggr)
343 {
344 	struct ar5416_desc *ads = AR5416DESC(ds);
345 	uint32_t *ds_txstatus = AR5416_DS_TXSTATUS(ah,ads);
346 	struct ath_hal_5416 *ahp = AH5416(ah);
347 
348 	int isaggr = 0;
349 	uint32_t last_aggr = 0;
350 
351 	(void) hdrLen;
352 	(void) ah;
353 
354 	HALASSERT((segLen &~ AR_BufLen) == 0);
355 
356 	HALASSERT(isValidPktType(type));
357 	if (type == HAL_PKT_TYPE_AMPDU) {
358 		type = HAL_PKT_TYPE_NORMAL;
359 		isaggr = 1;
360 		if (lastAggr == AH_FALSE)
361 			last_aggr = AR_MoreAggr;
362 	}
363 
364 	/*
365 	 * Since this function is called before any of the other
366 	 * descriptor setup functions (at least in this particular
367 	 * 802.11n aggregation implementation), always bzero() the
368 	 * descriptor. Previously this would be done for all but
369 	 * the first segment.
370 	 * XXX TODO: figure out why; perhaps I'm using this slightly
371 	 * XXX incorrectly.
372 	 */
373 	OS_MEMZERO(ds->ds_hw, AR5416_DESC_TX_CTL_SZ);
374 
375 	/*
376 	 * Note: VEOL should only be for the last descriptor in the chain.
377 	 */
378 	ads->ds_ctl0 = (pktLen & AR_FrameLen);
379 
380 	/*
381 	 * For aggregates:
382 	 * + IsAggr must be set for all descriptors of all subframes of
383 	 *   the aggregate
384 	 * + MoreAggr must be set for all descriptors of all subframes
385 	 *   of the aggregate EXCEPT the last subframe;
386 	 * + MoreAggr must be _CLEAR_ for all descrpitors of the last
387 	 *   subframe of the aggregate.
388 	 */
389 	ads->ds_ctl1 = (type << AR_FrameType_S)
390 			| (isaggr ? (AR_IsAggr | last_aggr) : 0);
391 
392 	ads->ds_ctl2 = 0;
393 	ads->ds_ctl3 = 0;
394 	if (keyIx != HAL_TXKEYIX_INVALID) {
395 		/* XXX validate key index */
396 		ads->ds_ctl1 |= SM(keyIx, AR_DestIdx);
397 		ads->ds_ctl0 |= AR_DestIdxValid;
398 	}
399 
400 	ads->ds_ctl6 |= SM(ahp->ah_keytype[keyIx], AR_EncrType);
401 	if (isaggr) {
402 		ads->ds_ctl6 |= SM(delims, AR_PadDelim);
403 	}
404 
405 	if (firstSeg) {
406 		ads->ds_ctl1 |= segLen | (lastSeg ? 0 : AR_TxMore);
407 	} else if (lastSeg) {           /* !firstSeg && lastSeg */
408 		ads->ds_ctl0 = 0;
409 		ads->ds_ctl1 |= segLen;
410 	} else {                        /* !firstSeg && !lastSeg */
411 		/*
412 		 * Intermediate descriptor in a multi-descriptor frame.
413 		 */
414 		ads->ds_ctl0 = 0;
415 		ads->ds_ctl1 |= segLen | AR_TxMore;
416 	}
417 	ds_txstatus[0] = ds_txstatus[1] = 0;
418 	ds_txstatus[9] &= ~AR_TxDone;
419 
420 	return AH_TRUE;
421 }
422 
423 HAL_BOOL
424 ar5416SetupFirstTxDesc(struct ath_hal *ah, struct ath_desc *ds,
425 	u_int aggrLen, u_int flags, u_int txPower,
426 	u_int txRate0, u_int txTries0, u_int antMode,
427 	u_int rtsctsRate, u_int rtsctsDuration)
428 {
429 #define RTSCTS  (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)
430 	struct ar5416_desc *ads = AR5416DESC(ds);
431 	struct ath_hal_5212 *ahp = AH5212(ah);
432 
433 	HALASSERT(txTries0 != 0);
434 	HALASSERT(isValidTxRate(txRate0));
435 	HALASSERT((flags & RTSCTS) != RTSCTS);
436 	/* XXX validate antMode */
437 
438 	txPower = (txPower + ahp->ah_txPowerIndexOffset );
439 	if(txPower > 63)  txPower=63;
440 
441 	ads->ds_ctl0 |= (txPower << AR_XmitPower_S)
442 		| (flags & HAL_TXDESC_VEOL ? AR_VEOL : 0)
443 		| (flags & HAL_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
444 		| (flags & HAL_TXDESC_INTREQ ? AR_TxIntrReq : 0);
445 	ads->ds_ctl1 |= (flags & HAL_TXDESC_NOACK ? AR_NoAck : 0);
446 	ads->ds_ctl2 |= SM(txTries0, AR_XmitDataTries0);
447 	ads->ds_ctl3 |= (txRate0 << AR_XmitRate0_S);
448 	ads->ds_ctl7 = SM(AH5416(ah)->ah_tx_chainmask, AR_ChainSel0)
449 		| SM(AH5416(ah)->ah_tx_chainmask, AR_ChainSel1)
450 		| SM(AH5416(ah)->ah_tx_chainmask, AR_ChainSel2)
451 		| SM(AH5416(ah)->ah_tx_chainmask, AR_ChainSel3);
452 
453 	/* NB: no V1 WAR */
454 	ads->ds_ctl8 = SM(0, AR_AntCtl0);
455 	ads->ds_ctl9 = SM(0, AR_AntCtl1) | SM(txPower, AR_XmitPower1);
456 	ads->ds_ctl10 = SM(0, AR_AntCtl2) | SM(txPower, AR_XmitPower2);
457 	ads->ds_ctl11 = SM(0, AR_AntCtl3) | SM(txPower, AR_XmitPower3);
458 
459 	ads->ds_ctl6 &= ~(0xffff);
460 	ads->ds_ctl6 |= SM(aggrLen, AR_AggrLen);
461 
462 	if (flags & RTSCTS) {
463 		/* XXX validate rtsctsDuration */
464 		ads->ds_ctl0 |= (flags & HAL_TXDESC_CTSENA ? AR_CTSEnable : 0)
465 			| (flags & HAL_TXDESC_RTSENA ? AR_RTSEnable : 0);
466 	}
467 
468 	/*
469 	 * Set the TX antenna to 0 for Kite
470 	 * To preserve existing behaviour, also set the TPC bits to 0;
471 	 * when TPC is enabled these should be filled in appropriately.
472 	 */
473 	if (AR_SREV_KITE(ah)) {
474 		ads->ds_ctl8 = SM(0, AR_AntCtl0);
475 		ads->ds_ctl9 = SM(0, AR_AntCtl1) | SM(0, AR_XmitPower1);
476 		ads->ds_ctl10 = SM(0, AR_AntCtl2) | SM(0, AR_XmitPower2);
477 		ads->ds_ctl11 = SM(0, AR_AntCtl3) | SM(0, AR_XmitPower3);
478 	}
479 
480 	return AH_TRUE;
481 #undef RTSCTS
482 }
483 
484 HAL_BOOL
485 ar5416SetupLastTxDesc(struct ath_hal *ah, struct ath_desc *ds,
486 		const struct ath_desc *ds0)
487 {
488 	struct ar5416_desc *ads = AR5416DESC(ds);
489 
490 	ads->ds_ctl1 &= ~AR_MoreAggr;
491 	ads->ds_ctl6 &= ~AR_PadDelim;
492 
493 	/* hack to copy rate info to last desc for later processing */
494 #ifdef AH_NEED_DESC_SWAP
495 	ads->ds_ctl2 = __bswap32(AR5416DESC_CONST(ds0)->ds_ctl2);
496 	ads->ds_ctl3 = __bswap32(AR5416DESC_CONST(ds0)->ds_ctl3);
497 #else
498 	ads->ds_ctl2 = AR5416DESC_CONST(ds0)->ds_ctl2;
499 	ads->ds_ctl3 = AR5416DESC_CONST(ds0)->ds_ctl3;
500 #endif
501 	return AH_TRUE;
502 }
503 
504 #ifdef AH_NEED_DESC_SWAP
505 /* Swap transmit descriptor */
506 static __inline void
507 ar5416SwapTxDesc(struct ath_desc *ds)
508 {
509 	ds->ds_data = __bswap32(ds->ds_data);
510 	ds->ds_ctl0 = __bswap32(ds->ds_ctl0);
511 	ds->ds_ctl1 = __bswap32(ds->ds_ctl1);
512 	ds->ds_hw[0] = __bswap32(ds->ds_hw[0]);
513 	ds->ds_hw[1] = __bswap32(ds->ds_hw[1]);
514 	ds->ds_hw[2] = __bswap32(ds->ds_hw[2]);
515 	ds->ds_hw[3] = __bswap32(ds->ds_hw[3]);
516 }
517 #endif
518 
519 /*
520  * Processing of HW TX descriptor.
521  */
522 HAL_STATUS
523 ar5416ProcTxDesc(struct ath_hal *ah,
524 	struct ath_desc *ds, struct ath_tx_status *ts)
525 {
526 	struct ar5416_desc *ads = AR5416DESC(ds);
527 	uint32_t *ds_txstatus = AR5416_DS_TXSTATUS(ah,ads);
528 
529 #ifdef AH_NEED_DESC_SWAP
530 	if ((ds_txstatus[9] & __bswap32(AR_TxDone)) == 0)
531 		return HAL_EINPROGRESS;
532 	ar5416SwapTxDesc(ds);
533 #else
534 	if ((ds_txstatus[9] & AR_TxDone) == 0)
535 		return HAL_EINPROGRESS;
536 #endif
537 
538 	/* Update software copies of the HW status */
539 	ts->ts_seqnum = MS(ds_txstatus[9], AR_SeqNum);
540 	ts->ts_tstamp = AR_SendTimestamp(ds_txstatus);
541 	ts->ts_tid = MS(ds_txstatus[9], AR_TxTid);
542 
543 	ts->ts_status = 0;
544 	if (ds_txstatus[1] & AR_ExcessiveRetries)
545 		ts->ts_status |= HAL_TXERR_XRETRY;
546 	if (ds_txstatus[1] & AR_Filtered)
547 		ts->ts_status |= HAL_TXERR_FILT;
548 	if (ds_txstatus[1] & AR_FIFOUnderrun)
549 		ts->ts_status |= HAL_TXERR_FIFO;
550 	if (ds_txstatus[9] & AR_TxOpExceeded)
551 		ts->ts_status |= HAL_TXERR_XTXOP;
552 	if (ds_txstatus[1] & AR_TxTimerExpired)
553 		ts->ts_status |= HAL_TXERR_TIMER_EXPIRED;
554 
555 	ts->ts_flags  = 0;
556 	if (ds_txstatus[0] & AR_TxBaStatus) {
557 		ts->ts_flags |= HAL_TX_BA;
558 		ts->ts_ba_low = AR_BaBitmapLow(ds_txstatus);
559 		ts->ts_ba_high = AR_BaBitmapHigh(ds_txstatus);
560 	}
561 	if (ds->ds_ctl1 & AR_IsAggr)
562 		ts->ts_flags |= HAL_TX_AGGR;
563 	if (ds_txstatus[1] & AR_DescCfgErr)
564 		ts->ts_flags |= HAL_TX_DESC_CFG_ERR;
565 	if (ds_txstatus[1] & AR_TxDataUnderrun)
566 		ts->ts_flags |= HAL_TX_DATA_UNDERRUN;
567 	if (ds_txstatus[1] & AR_TxDelimUnderrun)
568 		ts->ts_flags |= HAL_TX_DELIM_UNDERRUN;
569 
570 	/*
571 	 * Extract the transmit rate used and mark the rate as
572 	 * ``alternate'' if it wasn't the series 0 rate.
573 	 */
574 	ts->ts_finaltsi =  MS(ds_txstatus[9], AR_FinalTxIdx);
575 	switch (ts->ts_finaltsi) {
576 	case 0:
577 		ts->ts_rate = MS(ads->ds_ctl3, AR_XmitRate0);
578 		break;
579 	case 1:
580 		ts->ts_rate = MS(ads->ds_ctl3, AR_XmitRate1);
581 		break;
582 	case 2:
583 		ts->ts_rate = MS(ads->ds_ctl3, AR_XmitRate2);
584 		break;
585 	case 3:
586 		ts->ts_rate = MS(ads->ds_ctl3, AR_XmitRate3);
587 		break;
588 	}
589 
590 	ts->ts_rssi = MS(ds_txstatus[5], AR_TxRSSICombined);
591 	ts->ts_rssi_ctl[0] = MS(ds_txstatus[0], AR_TxRSSIAnt00);
592 	ts->ts_rssi_ctl[1] = MS(ds_txstatus[0], AR_TxRSSIAnt01);
593 	ts->ts_rssi_ctl[2] = MS(ds_txstatus[0], AR_TxRSSIAnt02);
594 	ts->ts_rssi_ext[0] = MS(ds_txstatus[5], AR_TxRSSIAnt10);
595 	ts->ts_rssi_ext[1] = MS(ds_txstatus[5], AR_TxRSSIAnt11);
596 	ts->ts_rssi_ext[2] = MS(ds_txstatus[5], AR_TxRSSIAnt12);
597 	ts->ts_evm0 = AR_TxEVM0(ds_txstatus);
598 	ts->ts_evm1 = AR_TxEVM1(ds_txstatus);
599 	ts->ts_evm2 = AR_TxEVM2(ds_txstatus);
600 
601 	ts->ts_shortretry = MS(ds_txstatus[1], AR_RTSFailCnt);
602 	ts->ts_longretry = MS(ds_txstatus[1], AR_DataFailCnt);
603 	/*
604 	 * The retry count has the number of un-acked tries for the
605 	 * final series used.  When doing multi-rate retry we must
606 	 * fixup the retry count by adding in the try counts for
607 	 * each series that was fully-processed.  Beware that this
608 	 * takes values from the try counts in the final descriptor.
609 	 * These are not required by the hardware.  We assume they
610 	 * are placed there by the driver as otherwise we have no
611 	 * access and the driver can't do the calculation because it
612 	 * doesn't know the descriptor format.
613 	 */
614 	switch (ts->ts_finaltsi) {
615 	case 3: ts->ts_longretry += MS(ads->ds_ctl2, AR_XmitDataTries2);
616 	case 2: ts->ts_longretry += MS(ads->ds_ctl2, AR_XmitDataTries1);
617 	case 1: ts->ts_longretry += MS(ads->ds_ctl2, AR_XmitDataTries0);
618 	}
619 
620 	/*
621 	 * These fields are not used. Zero these to preserve compatability
622 	 * with existing drivers.
623 	 */
624 	ts->ts_virtcol = MS(ads->ds_ctl1, AR_VirtRetryCnt);
625 	ts->ts_antenna = 0; /* We don't switch antennas on Owl*/
626 
627 	/* handle tx trigger level changes internally */
628 	if ((ts->ts_status & HAL_TXERR_FIFO) ||
629 	    (ts->ts_flags & (HAL_TX_DATA_UNDERRUN | HAL_TX_DELIM_UNDERRUN)))
630 		ar5212UpdateTxTrigLevel(ah, AH_TRUE);
631 
632 	return HAL_OK;
633 }
634 
635 HAL_BOOL
636 ar5416SetGlobalTxTimeout(struct ath_hal *ah, u_int tu)
637 {
638 	struct ath_hal_5416 *ahp = AH5416(ah);
639 
640 	if (tu > 0xFFFF) {
641 		HALDEBUG(ah, HAL_DEBUG_ANY, "%s: bad global tx timeout %u\n",
642 		    __func__, tu);
643 		/* restore default handling */
644 		ahp->ah_globaltxtimeout = (u_int) -1;
645 		return AH_FALSE;
646 	}
647 	OS_REG_RMW_FIELD(ah, AR_GTXTO, AR_GTXTO_TIMEOUT_LIMIT, tu);
648 	ahp->ah_globaltxtimeout = tu;
649 	return AH_TRUE;
650 }
651 
652 u_int
653 ar5416GetGlobalTxTimeout(struct ath_hal *ah)
654 {
655 	return MS(OS_REG_READ(ah, AR_GTXTO), AR_GTXTO_TIMEOUT_LIMIT);
656 }
657 
658 void
659 ar5416Set11nRateScenario(struct ath_hal *ah, struct ath_desc *ds,
660         u_int durUpdateEn, u_int rtsctsRate,
661 	HAL_11N_RATE_SERIES series[], u_int nseries, u_int flags)
662 {
663 	struct ar5416_desc *ads = AR5416DESC(ds);
664 	uint32_t ds_ctl0;
665 
666 	HALASSERT(nseries == 4);
667 	(void)nseries;
668 
669 	/*
670 	 * XXX since the upper layers doesn't know the current chainmask
671 	 * XXX setup, just override its decisions here.
672 	 * XXX The upper layers need to be taught this!
673 	 */
674 	if (series[0].Tries != 0)
675 		series[0].ChSel = AH5416(ah)->ah_tx_chainmask;
676 	if (series[1].Tries != 0)
677 		series[1].ChSel = AH5416(ah)->ah_tx_chainmask;
678 	if (series[2].Tries != 0)
679 		series[2].ChSel = AH5416(ah)->ah_tx_chainmask;
680 	if (series[3].Tries != 0)
681 		series[3].ChSel = AH5416(ah)->ah_tx_chainmask;
682 
683 	/*
684 	 * Only one of RTS and CTS enable must be set.
685 	 * If a frame has both set, just do RTS protection -
686 	 * that's enough to satisfy legacy protection.
687 	 */
688 	if (flags & (HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA)) {
689 		ds_ctl0 = ads->ds_ctl0;
690 
691 		if (flags & HAL_TXDESC_RTSENA) {
692 			ds_ctl0 &= ~AR_CTSEnable;
693 			ds_ctl0 |= AR_RTSEnable;
694 		} else {
695 			ds_ctl0 &= ~AR_RTSEnable;
696 			ds_ctl0 |= AR_CTSEnable;
697 		}
698 
699 		ads->ds_ctl0 = ds_ctl0;
700 	} else {
701 		ads->ds_ctl0 =
702 		    (ads->ds_ctl0 & ~(AR_RTSEnable | AR_CTSEnable));
703 	}
704 
705 	ads->ds_ctl2 = set11nTries(series, 0)
706 		     | set11nTries(series, 1)
707 		     | set11nTries(series, 2)
708 		     | set11nTries(series, 3)
709 		     | (durUpdateEn ? AR_DurUpdateEn : 0);
710 
711 	ads->ds_ctl3 = set11nRate(series, 0)
712 		     | set11nRate(series, 1)
713 		     | set11nRate(series, 2)
714 		     | set11nRate(series, 3);
715 
716 	ads->ds_ctl4 = set11nPktDurRTSCTS(series, 0)
717 		     | set11nPktDurRTSCTS(series, 1);
718 
719 	ads->ds_ctl5 = set11nPktDurRTSCTS(series, 2)
720 		     | set11nPktDurRTSCTS(series, 3);
721 
722 	ads->ds_ctl7 = set11nRateFlags(series, 0)
723 		     | set11nRateFlags(series, 1)
724 		     | set11nRateFlags(series, 2)
725 		     | set11nRateFlags(series, 3)
726 		     | SM(rtsctsRate, AR_RTSCTSRate);
727 }
728 
729 void
730 ar5416Set11nAggrFirst(struct ath_hal *ah, struct ath_desc *ds,
731     u_int aggrLen, u_int numDelims)
732 {
733 	struct ar5416_desc *ads = AR5416DESC(ds);
734 
735 	ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
736 
737 	ads->ds_ctl6 &= ~(AR_AggrLen | AR_PadDelim);
738 	ads->ds_ctl6 |= SM(aggrLen, AR_AggrLen) |
739 	    SM(numDelims, AR_PadDelim);
740 }
741 
742 void
743 ar5416Set11nAggrMiddle(struct ath_hal *ah, struct ath_desc *ds, u_int numDelims)
744 {
745 	struct ar5416_desc *ads = AR5416DESC(ds);
746 	uint32_t *ds_txstatus = AR5416_DS_TXSTATUS(ah,ads);
747 
748 	ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
749 
750 	ads->ds_ctl6 &= ~AR_PadDelim;
751 	ads->ds_ctl6 |= SM(numDelims, AR_PadDelim);
752 	ads->ds_ctl6 &= ~AR_AggrLen;
753 
754 	/*
755 	 * Clear the TxDone status here, may need to change
756 	 * func name to reflect this
757 	 */
758 	ds_txstatus[9] &= ~AR_TxDone;
759 }
760 
761 void
762 ar5416Set11nAggrLast(struct ath_hal *ah, struct ath_desc *ds)
763 {
764 	struct ar5416_desc *ads = AR5416DESC(ds);
765 
766 	ads->ds_ctl1 |= AR_IsAggr;
767 	ads->ds_ctl1 &= ~AR_MoreAggr;
768 	ads->ds_ctl6 &= ~AR_PadDelim;
769 }
770 
771 void
772 ar5416Clr11nAggr(struct ath_hal *ah, struct ath_desc *ds)
773 {
774 	struct ar5416_desc *ads = AR5416DESC(ds);
775 
776 	ads->ds_ctl1 &= (~AR_IsAggr & ~AR_MoreAggr);
777 	ads->ds_ctl6 &= ~AR_PadDelim;
778 	ads->ds_ctl6 &= ~AR_AggrLen;
779 }
780 
781 void
782 ar5416Set11nBurstDuration(struct ath_hal *ah, struct ath_desc *ds,
783                                                   u_int burstDuration)
784 {
785 	struct ar5416_desc *ads = AR5416DESC(ds);
786 
787 	ads->ds_ctl2 &= ~AR_BurstDur;
788 	ads->ds_ctl2 |= SM(burstDuration, AR_BurstDur);
789 }
790 
791 /*
792  * Retrieve the rate table from the given TX completion descriptor
793  */
794 HAL_BOOL
795 ar5416GetTxCompletionRates(struct ath_hal *ah, const struct ath_desc *ds0, int *rates, int *tries)
796 {
797 	const struct ar5416_desc *ads = AR5416DESC_CONST(ds0);
798 
799 	rates[0] = MS(ads->ds_ctl3, AR_XmitRate0);
800 	rates[1] = MS(ads->ds_ctl3, AR_XmitRate1);
801 	rates[2] = MS(ads->ds_ctl3, AR_XmitRate2);
802 	rates[3] = MS(ads->ds_ctl3, AR_XmitRate3);
803 
804 	tries[0] = MS(ads->ds_ctl2, AR_XmitDataTries0);
805 	tries[1] = MS(ads->ds_ctl2, AR_XmitDataTries1);
806 	tries[2] = MS(ads->ds_ctl2, AR_XmitDataTries2);
807 	tries[3] = MS(ads->ds_ctl2, AR_XmitDataTries3);
808 
809 	return AH_TRUE;
810 }
811 
812 
813 /*
814  * TX queue management routines - AR5416 and later chipsets
815  */
816 
817 /*
818  * Allocate and initialize a tx DCU/QCU combination.
819  */
820 int
821 ar5416SetupTxQueue(struct ath_hal *ah, HAL_TX_QUEUE type,
822 	const HAL_TXQ_INFO *qInfo)
823 {
824 	struct ath_hal_5212 *ahp = AH5212(ah);
825 	HAL_TX_QUEUE_INFO *qi;
826 	HAL_CAPABILITIES *pCap = &AH_PRIVATE(ah)->ah_caps;
827 	int q, defqflags;
828 
829 	/* by default enable OK+ERR+DESC+URN interrupts */
830 	defqflags = HAL_TXQ_TXOKINT_ENABLE
831 		  | HAL_TXQ_TXERRINT_ENABLE
832 		  | HAL_TXQ_TXDESCINT_ENABLE
833 		  | HAL_TXQ_TXURNINT_ENABLE;
834 	/* XXX move queue assignment to driver */
835 	switch (type) {
836 	case HAL_TX_QUEUE_BEACON:
837 		q = pCap->halTotalQueues-1;	/* highest priority */
838 		defqflags |= HAL_TXQ_DBA_GATED
839 		       | HAL_TXQ_CBR_DIS_QEMPTY
840 		       | HAL_TXQ_ARB_LOCKOUT_GLOBAL
841 		       | HAL_TXQ_BACKOFF_DISABLE;
842 		break;
843 	case HAL_TX_QUEUE_CAB:
844 		q = pCap->halTotalQueues-2;	/* next highest priority */
845 		defqflags |= HAL_TXQ_DBA_GATED
846 		       | HAL_TXQ_CBR_DIS_QEMPTY
847 		       | HAL_TXQ_CBR_DIS_BEMPTY
848 		       | HAL_TXQ_ARB_LOCKOUT_GLOBAL
849 		       | HAL_TXQ_BACKOFF_DISABLE;
850 		break;
851 	case HAL_TX_QUEUE_PSPOLL:
852 		q = 1;				/* lowest priority */
853 		defqflags |= HAL_TXQ_DBA_GATED
854 		       | HAL_TXQ_CBR_DIS_QEMPTY
855 		       | HAL_TXQ_CBR_DIS_BEMPTY
856 		       | HAL_TXQ_ARB_LOCKOUT_GLOBAL
857 		       | HAL_TXQ_BACKOFF_DISABLE;
858 		break;
859 	case HAL_TX_QUEUE_UAPSD:
860 		q = pCap->halTotalQueues-3;	/* nextest highest priority */
861 		if (ahp->ah_txq[q].tqi_type != HAL_TX_QUEUE_INACTIVE) {
862 			HALDEBUG(ah, HAL_DEBUG_ANY,
863 			    "%s: no available UAPSD tx queue\n", __func__);
864 			return -1;
865 		}
866 		break;
867 	case HAL_TX_QUEUE_DATA:
868 		for (q = 0; q < pCap->halTotalQueues; q++)
869 			if (ahp->ah_txq[q].tqi_type == HAL_TX_QUEUE_INACTIVE)
870 				break;
871 		if (q == pCap->halTotalQueues) {
872 			HALDEBUG(ah, HAL_DEBUG_ANY,
873 			    "%s: no available tx queue\n", __func__);
874 			return -1;
875 		}
876 		break;
877 	default:
878 		HALDEBUG(ah, HAL_DEBUG_ANY,
879 		    "%s: bad tx queue type %u\n", __func__, type);
880 		return -1;
881 	}
882 
883 	HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: queue %u\n", __func__, q);
884 
885 	qi = &ahp->ah_txq[q];
886 	if (qi->tqi_type != HAL_TX_QUEUE_INACTIVE) {
887 		HALDEBUG(ah, HAL_DEBUG_ANY, "%s: tx queue %u already active\n",
888 		    __func__, q);
889 		return -1;
890 	}
891 	OS_MEMZERO(qi, sizeof(HAL_TX_QUEUE_INFO));
892 	qi->tqi_type = type;
893 	if (qInfo == AH_NULL) {
894 		qi->tqi_qflags = defqflags;
895 		qi->tqi_aifs = INIT_AIFS;
896 		qi->tqi_cwmin = HAL_TXQ_USEDEFAULT;	/* NB: do at reset */
897 		qi->tqi_cwmax = INIT_CWMAX;
898 		qi->tqi_shretry = INIT_SH_RETRY;
899 		qi->tqi_lgretry = INIT_LG_RETRY;
900 		qi->tqi_physCompBuf = 0;
901 	} else {
902 		qi->tqi_physCompBuf = qInfo->tqi_compBuf;
903 		(void) ar5212SetTxQueueProps(ah, q, qInfo);
904 	}
905 	/* NB: must be followed by ar5212ResetTxQueue */
906 	return q;
907 }
908 
909 /*
910  * Update the h/w interrupt registers to reflect a tx q's configuration.
911  */
912 static void
913 setTxQInterrupts(struct ath_hal *ah, HAL_TX_QUEUE_INFO *qi)
914 {
915 	struct ath_hal_5212 *ahp = AH5212(ah);
916 
917 	HALDEBUG(ah, HAL_DEBUG_TXQUEUE,
918 	    "%s: tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n", __func__,
919 	    ahp->ah_txOkInterruptMask, ahp->ah_txErrInterruptMask,
920 	    ahp->ah_txDescInterruptMask, ahp->ah_txEolInterruptMask,
921 	    ahp->ah_txUrnInterruptMask);
922 
923 	OS_REG_WRITE(ah, AR_IMR_S0,
924 		  SM(ahp->ah_txOkInterruptMask, AR_IMR_S0_QCU_TXOK)
925 		| SM(ahp->ah_txDescInterruptMask, AR_IMR_S0_QCU_TXDESC)
926 	);
927 	OS_REG_WRITE(ah, AR_IMR_S1,
928 		  SM(ahp->ah_txErrInterruptMask, AR_IMR_S1_QCU_TXERR)
929 		| SM(ahp->ah_txEolInterruptMask, AR_IMR_S1_QCU_TXEOL)
930 	);
931 	OS_REG_RMW_FIELD(ah, AR_IMR_S2,
932 		AR_IMR_S2_QCU_TXURN, ahp->ah_txUrnInterruptMask);
933 }
934 
935 /*
936  * Set the retry, aifs, cwmin/max, readyTime regs for specified queue
937  * Assumes:
938  *  phwChannel has been set to point to the current channel
939  */
940 #define	TU_TO_USEC(_tu)		((_tu) << 10)
941 HAL_BOOL
942 ar5416ResetTxQueue(struct ath_hal *ah, u_int q)
943 {
944 	struct ath_hal_5212 *ahp = AH5212(ah);
945 	HAL_CAPABILITIES *pCap = &AH_PRIVATE(ah)->ah_caps;
946 	const struct ieee80211_channel *chan = AH_PRIVATE(ah)->ah_curchan;
947 	HAL_TX_QUEUE_INFO *qi;
948 	uint32_t cwMin, chanCwMin, qmisc, dmisc;
949 
950 	if (q >= pCap->halTotalQueues) {
951 		HALDEBUG(ah, HAL_DEBUG_ANY, "%s: invalid queue num %u\n",
952 		    __func__, q);
953 		return AH_FALSE;
954 	}
955 	qi = &ahp->ah_txq[q];
956 	if (qi->tqi_type == HAL_TX_QUEUE_INACTIVE) {
957 		HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: inactive queue %u\n",
958 		    __func__, q);
959 		return AH_TRUE;		/* XXX??? */
960 	}
961 
962 	HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: reset queue %u\n", __func__, q);
963 
964 	if (qi->tqi_cwmin == HAL_TXQ_USEDEFAULT) {
965 		/*
966 		 * Select cwmin according to channel type.
967 		 * NB: chan can be NULL during attach
968 		 */
969 		if (chan && IEEE80211_IS_CHAN_B(chan))
970 			chanCwMin = INIT_CWMIN_11B;
971 		else
972 			chanCwMin = INIT_CWMIN;
973 		/* make sure that the CWmin is of the form (2^n - 1) */
974 		for (cwMin = 1; cwMin < chanCwMin; cwMin = (cwMin << 1) | 1)
975 			;
976 	} else
977 		cwMin = qi->tqi_cwmin;
978 
979 	/* set cwMin/Max and AIFS values */
980 	OS_REG_WRITE(ah, AR_DLCL_IFS(q),
981 		  SM(cwMin, AR_D_LCL_IFS_CWMIN)
982 		| SM(qi->tqi_cwmax, AR_D_LCL_IFS_CWMAX)
983 		| SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));
984 
985 	/* Set retry limit values */
986 	OS_REG_WRITE(ah, AR_DRETRY_LIMIT(q),
987 		   SM(INIT_SSH_RETRY, AR_D_RETRY_LIMIT_STA_SH)
988 		 | SM(INIT_SLG_RETRY, AR_D_RETRY_LIMIT_STA_LG)
989 		 | SM(qi->tqi_lgretry, AR_D_RETRY_LIMIT_FR_LG)
990 		 | SM(qi->tqi_shretry, AR_D_RETRY_LIMIT_FR_SH)
991 	);
992 
993 	/* NB: always enable early termination on the QCU */
994 	qmisc = AR_Q_MISC_DCU_EARLY_TERM_REQ
995 	      | SM(AR_Q_MISC_FSP_ASAP, AR_Q_MISC_FSP);
996 
997 	/* NB: always enable DCU to wait for next fragment from QCU */
998 	dmisc = AR_D_MISC_FRAG_WAIT_EN;
999 
1000 	/* Enable exponential backoff window */
1001 	dmisc |= AR_D_MISC_BKOFF_PERSISTENCE;
1002 
1003 	/*
1004 	 * The chip reset default is to use a DCU backoff threshold of 0x2.
1005 	 * Restore this when programming the DCU MISC register.
1006 	 */
1007 	dmisc |= 0x2;
1008 
1009 	/* multiqueue support */
1010 	if (qi->tqi_cbrPeriod) {
1011 		OS_REG_WRITE(ah, AR_QCBRCFG(q),
1012 			  SM(qi->tqi_cbrPeriod,AR_Q_CBRCFG_CBR_INTERVAL)
1013 			| SM(qi->tqi_cbrOverflowLimit, AR_Q_CBRCFG_CBR_OVF_THRESH));
1014 		qmisc = (qmisc &~ AR_Q_MISC_FSP) | AR_Q_MISC_FSP_CBR;
1015 		if (qi->tqi_cbrOverflowLimit)
1016 			qmisc |= AR_Q_MISC_CBR_EXP_CNTR_LIMIT;
1017 	}
1018 
1019 	if (qi->tqi_readyTime && (qi->tqi_type != HAL_TX_QUEUE_CAB)) {
1020 		OS_REG_WRITE(ah, AR_QRDYTIMECFG(q),
1021 			  SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_INT)
1022 			| AR_Q_RDYTIMECFG_ENA);
1023 	}
1024 
1025 	OS_REG_WRITE(ah, AR_DCHNTIME(q),
1026 		  SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR)
1027 		| (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0));
1028 
1029 	if (qi->tqi_readyTime &&
1030 	    (qi->tqi_qflags & HAL_TXQ_RDYTIME_EXP_POLICY_ENABLE))
1031 		qmisc |= AR_Q_MISC_RDYTIME_EXP_POLICY;
1032 	if (qi->tqi_qflags & HAL_TXQ_DBA_GATED)
1033 		qmisc = (qmisc &~ AR_Q_MISC_FSP) | AR_Q_MISC_FSP_DBA_GATED;
1034 	if (MS(qmisc, AR_Q_MISC_FSP) != AR_Q_MISC_FSP_ASAP) {
1035 		/*
1036 		 * These are meangingful only when not scheduled asap.
1037 		 */
1038 		if (qi->tqi_qflags & HAL_TXQ_CBR_DIS_BEMPTY)
1039 			qmisc |= AR_Q_MISC_CBR_INCR_DIS0;
1040 		else
1041 			qmisc &= ~AR_Q_MISC_CBR_INCR_DIS0;
1042 		if (qi->tqi_qflags & HAL_TXQ_CBR_DIS_QEMPTY)
1043 			qmisc |= AR_Q_MISC_CBR_INCR_DIS1;
1044 		else
1045 			qmisc &= ~AR_Q_MISC_CBR_INCR_DIS1;
1046 	}
1047 
1048 	if (qi->tqi_qflags & HAL_TXQ_BACKOFF_DISABLE)
1049 		dmisc |= AR_D_MISC_POST_FR_BKOFF_DIS;
1050 	if (qi->tqi_qflags & HAL_TXQ_FRAG_BURST_BACKOFF_ENABLE)
1051 		dmisc |= AR_D_MISC_FRAG_BKOFF_EN;
1052 	if (qi->tqi_qflags & HAL_TXQ_ARB_LOCKOUT_GLOBAL)
1053 		dmisc |= SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
1054 			    AR_D_MISC_ARB_LOCKOUT_CNTRL);
1055 	else if (qi->tqi_qflags & HAL_TXQ_ARB_LOCKOUT_INTRA)
1056 		dmisc |= SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_INTRA_FR,
1057 			    AR_D_MISC_ARB_LOCKOUT_CNTRL);
1058 	if (qi->tqi_qflags & HAL_TXQ_IGNORE_VIRTCOL)
1059 		dmisc |= SM(AR_D_MISC_VIR_COL_HANDLING_IGNORE,
1060 			    AR_D_MISC_VIR_COL_HANDLING);
1061 	if (qi->tqi_qflags & HAL_TXQ_SEQNUM_INC_DIS)
1062 		dmisc |= AR_D_MISC_SEQ_NUM_INCR_DIS;
1063 
1064 	/*
1065 	 * Fillin type-dependent bits.  Most of this can be
1066 	 * removed by specifying the queue parameters in the
1067 	 * driver; it's here for backwards compatibility.
1068 	 */
1069 	switch (qi->tqi_type) {
1070 	case HAL_TX_QUEUE_BEACON:		/* beacon frames */
1071 		qmisc |= AR_Q_MISC_FSP_DBA_GATED
1072 		      |  AR_Q_MISC_BEACON_USE
1073 		      |  AR_Q_MISC_CBR_INCR_DIS1;
1074 
1075 		dmisc |= SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
1076 			    AR_D_MISC_ARB_LOCKOUT_CNTRL)
1077 		      |  AR_D_MISC_BEACON_USE
1078 		      |  AR_D_MISC_POST_FR_BKOFF_DIS;
1079 		break;
1080 	case HAL_TX_QUEUE_CAB:			/* CAB  frames */
1081 		/*
1082 		 * No longer Enable AR_Q_MISC_RDYTIME_EXP_POLICY,
1083 		 * There is an issue with the CAB Queue
1084 		 * not properly refreshing the Tx descriptor if
1085 		 * the TXE clear setting is used.
1086 		 */
1087 		qmisc |= AR_Q_MISC_FSP_DBA_GATED
1088 		      |  AR_Q_MISC_CBR_INCR_DIS1
1089 		      |  AR_Q_MISC_CBR_INCR_DIS0;
1090 		HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: CAB: tqi_readyTime = %d\n",
1091 		    __func__, qi->tqi_readyTime);
1092 		if (qi->tqi_readyTime) {
1093 			HALDEBUG(ah, HAL_DEBUG_TXQUEUE,
1094 			    "%s: using tqi_readyTime\n", __func__);
1095 			OS_REG_WRITE(ah, AR_QRDYTIMECFG(q),
1096 			    SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_INT) |
1097 			    AR_Q_RDYTIMECFG_ENA);
1098 		} else {
1099 			int value;
1100 			/*
1101 			 * NB: don't set default ready time if driver
1102 			 * has explicitly specified something.  This is
1103 			 * here solely for backwards compatibility.
1104 			 */
1105 			/*
1106 			 * XXX for now, hard-code a CAB interval of 70%
1107 			 * XXX of the total beacon interval.
1108 			 *
1109 			 * XXX This keeps Merlin and later based MACs
1110 			 * XXX quite a bit happier (stops stuck beacons,
1111 			 * XXX which I gather is because of such a long
1112 			 * XXX cabq time.)
1113 			 */
1114 			value = (ahp->ah_beaconInterval * 70 / 100)
1115 				- (ah->ah_config.ah_sw_beacon_response_time
1116 				+ ah->ah_config.ah_dma_beacon_response_time)
1117 				- ah->ah_config.ah_additional_swba_backoff;
1118 			/*
1119 			 * XXX Ensure it isn't too low - nothing lower
1120 			 * XXX than 10 TU
1121 			 */
1122 			if (value < 10)
1123 				value = 10;
1124 			HALDEBUG(ah, HAL_DEBUG_TXQUEUE,
1125 			    "%s: defaulting to rdytime = %d uS\n",
1126 			    __func__, value);
1127 			OS_REG_WRITE(ah, AR_QRDYTIMECFG(q),
1128 			    SM(TU_TO_USEC(value), AR_Q_RDYTIMECFG_INT) |
1129 			    AR_Q_RDYTIMECFG_ENA);
1130 		}
1131 		dmisc |= SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
1132 			    AR_D_MISC_ARB_LOCKOUT_CNTRL);
1133 		break;
1134 	case HAL_TX_QUEUE_PSPOLL:
1135 		qmisc |= AR_Q_MISC_CBR_INCR_DIS1;
1136 		break;
1137 	case HAL_TX_QUEUE_UAPSD:
1138 		dmisc |= AR_D_MISC_POST_FR_BKOFF_DIS;
1139 		break;
1140 	default:			/* NB: silence compiler */
1141 		break;
1142 	}
1143 
1144 	OS_REG_WRITE(ah, AR_QMISC(q), qmisc);
1145 	OS_REG_WRITE(ah, AR_DMISC(q), dmisc);
1146 
1147 	/* Setup compression scratchpad buffer */
1148 	/*
1149 	 * XXX: calling this asynchronously to queue operation can
1150 	 *      cause unexpected behavior!!!
1151 	 */
1152 	if (qi->tqi_physCompBuf) {
1153 		HALASSERT(qi->tqi_type == HAL_TX_QUEUE_DATA ||
1154 			  qi->tqi_type == HAL_TX_QUEUE_UAPSD);
1155 		OS_REG_WRITE(ah, AR_Q_CBBS, (80 + 2*q));
1156 		OS_REG_WRITE(ah, AR_Q_CBBA, qi->tqi_physCompBuf);
1157 		OS_REG_WRITE(ah, AR_Q_CBC,  HAL_COMP_BUF_MAX_SIZE/1024);
1158 		OS_REG_WRITE(ah, AR_Q0_MISC + 4*q,
1159 			     OS_REG_READ(ah, AR_Q0_MISC + 4*q)
1160 			     | AR_Q_MISC_QCU_COMP_EN);
1161 	}
1162 
1163 	/*
1164 	 * Always update the secondary interrupt mask registers - this
1165 	 * could be a new queue getting enabled in a running system or
1166 	 * hw getting re-initialized during a reset!
1167 	 *
1168 	 * Since we don't differentiate between tx interrupts corresponding
1169 	 * to individual queues - secondary tx mask regs are always unmasked;
1170 	 * tx interrupts are enabled/disabled for all queues collectively
1171 	 * using the primary mask reg
1172 	 */
1173 	if (qi->tqi_qflags & HAL_TXQ_TXOKINT_ENABLE)
1174 		ahp->ah_txOkInterruptMask |= 1 << q;
1175 	else
1176 		ahp->ah_txOkInterruptMask &= ~(1 << q);
1177 	if (qi->tqi_qflags & HAL_TXQ_TXERRINT_ENABLE)
1178 		ahp->ah_txErrInterruptMask |= 1 << q;
1179 	else
1180 		ahp->ah_txErrInterruptMask &= ~(1 << q);
1181 	if (qi->tqi_qflags & HAL_TXQ_TXDESCINT_ENABLE)
1182 		ahp->ah_txDescInterruptMask |= 1 << q;
1183 	else
1184 		ahp->ah_txDescInterruptMask &= ~(1 << q);
1185 	if (qi->tqi_qflags & HAL_TXQ_TXEOLINT_ENABLE)
1186 		ahp->ah_txEolInterruptMask |= 1 << q;
1187 	else
1188 		ahp->ah_txEolInterruptMask &= ~(1 << q);
1189 	if (qi->tqi_qflags & HAL_TXQ_TXURNINT_ENABLE)
1190 		ahp->ah_txUrnInterruptMask |= 1 << q;
1191 	else
1192 		ahp->ah_txUrnInterruptMask &= ~(1 << q);
1193 	setTxQInterrupts(ah, qi);
1194 
1195 	return AH_TRUE;
1196 }
1197 #undef	TU_TO_USEC
1198