xref: /freebsd/sys/dev/ath/ath_hal/ar5416/ar5416_xmit.c (revision a10cee30c94cf5944826d2a495e9cdf339dfbcc8)
1 /*
2  * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
3  * Copyright (c) 2002-2008 Atheros Communications, Inc.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  *
17  * $FreeBSD$
18  */
19 #include "opt_ah.h"
20 
21 #include "ah.h"
22 #include "ah_desc.h"
23 #include "ah_internal.h"
24 
25 #include "ar5416/ar5416.h"
26 #include "ar5416/ar5416reg.h"
27 #include "ar5416/ar5416phy.h"
28 #include "ar5416/ar5416desc.h"
29 
30 /*
31  * Stop transmit on the specified queue
32  */
33 HAL_BOOL
34 ar5416StopTxDma(struct ath_hal *ah, u_int q)
35 {
36 #define	STOP_DMA_TIMEOUT	4000	/* us */
37 #define	STOP_DMA_ITER		100	/* us */
38 	u_int i;
39 
40 	HALASSERT(q < AH_PRIVATE(ah)->ah_caps.halTotalQueues);
41 
42 	HALASSERT(AH5212(ah)->ah_txq[q].tqi_type != HAL_TX_QUEUE_INACTIVE);
43 
44 	OS_REG_WRITE(ah, AR_Q_TXD, 1 << q);
45 	for (i = STOP_DMA_TIMEOUT/STOP_DMA_ITER; i != 0; i--) {
46 		if (ar5212NumTxPending(ah, q) == 0)
47 			break;
48 		OS_DELAY(STOP_DMA_ITER);
49 	}
50 #ifdef AH_DEBUG
51 	if (i == 0) {
52 		HALDEBUG(ah, HAL_DEBUG_ANY,
53 		    "%s: queue %u DMA did not stop in 400 msec\n", __func__, q);
54 		HALDEBUG(ah, HAL_DEBUG_ANY,
55 		    "%s: QSTS 0x%x Q_TXE 0x%x Q_TXD 0x%x Q_CBR 0x%x\n", __func__,
56 		    OS_REG_READ(ah, AR_QSTS(q)), OS_REG_READ(ah, AR_Q_TXE),
57 		    OS_REG_READ(ah, AR_Q_TXD), OS_REG_READ(ah, AR_QCBRCFG(q)));
58 		HALDEBUG(ah, HAL_DEBUG_ANY,
59 		    "%s: Q_MISC 0x%x Q_RDYTIMECFG 0x%x Q_RDYTIMESHDN 0x%x\n",
60 		    __func__, OS_REG_READ(ah, AR_QMISC(q)),
61 		    OS_REG_READ(ah, AR_QRDYTIMECFG(q)),
62 		    OS_REG_READ(ah, AR_Q_RDYTIMESHDN));
63 	}
64 #endif /* AH_DEBUG */
65 
66 	/* ar5416 and up can kill packets at the PCU level */
67 	if (ar5212NumTxPending(ah, q)) {
68 		uint32_t j;
69 
70 		HALDEBUG(ah, HAL_DEBUG_TXQUEUE,
71 		    "%s: Num of pending TX Frames %d on Q %d\n",
72 		    __func__, ar5212NumTxPending(ah, q), q);
73 
74 		/* Kill last PCU Tx Frame */
75 		/* TODO - save off and restore current values of Q1/Q2? */
76 		for (j = 0; j < 2; j++) {
77 			uint32_t tsfLow = OS_REG_READ(ah, AR_TSF_L32);
78 			OS_REG_WRITE(ah, AR_QUIET2,
79 			    SM(10, AR_QUIET2_QUIET_DUR));
80 			OS_REG_WRITE(ah, AR_QUIET_PERIOD, 100);
81 			OS_REG_WRITE(ah, AR_NEXT_QUIET, tsfLow >> 10);
82 			OS_REG_SET_BIT(ah, AR_TIMER_MODE, AR_TIMER_MODE_QUIET);
83 
84 			if ((OS_REG_READ(ah, AR_TSF_L32)>>10) == (tsfLow>>10))
85 				break;
86 
87 			HALDEBUG(ah, HAL_DEBUG_ANY,
88 			    "%s: TSF moved while trying to set quiet time "
89 			    "TSF: 0x%08x\n", __func__, tsfLow);
90 			HALASSERT(j < 1); /* TSF shouldn't count twice or reg access is taking forever */
91 		}
92 
93 		OS_REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_CHAN_IDLE);
94 
95 		/* Allow the quiet mechanism to do its work */
96 		OS_DELAY(200);
97 		OS_REG_CLR_BIT(ah, AR_TIMER_MODE, AR_TIMER_MODE_QUIET);
98 
99 		/* Verify the transmit q is empty */
100 		for (i = STOP_DMA_TIMEOUT/STOP_DMA_ITER; i != 0; i--) {
101 			if (ar5212NumTxPending(ah, q) == 0)
102 				break;
103 			OS_DELAY(STOP_DMA_ITER);
104 		}
105 		if (i == 0) {
106 			HALDEBUG(ah, HAL_DEBUG_ANY,
107 			    "%s: Failed to stop Tx DMA in %d msec after killing"
108 			    " last frame\n", __func__, STOP_DMA_TIMEOUT / 1000);
109 		}
110 		OS_REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_CHAN_IDLE);
111 	}
112 
113 	OS_REG_WRITE(ah, AR_Q_TXD, 0);
114 	return (i != 0);
115 #undef STOP_DMA_ITER
116 #undef STOP_DMA_TIMEOUT
117 }
118 
119 #define VALID_KEY_TYPES \
120         ((1 << HAL_KEY_TYPE_CLEAR) | (1 << HAL_KEY_TYPE_WEP)|\
121          (1 << HAL_KEY_TYPE_AES)   | (1 << HAL_KEY_TYPE_TKIP))
122 #define isValidKeyType(_t)      ((1 << (_t)) & VALID_KEY_TYPES)
123 
124 #define set11nTries(_series, _index) \
125         (SM((_series)[_index].Tries, AR_XmitDataTries##_index))
126 
127 #define set11nRate(_series, _index) \
128         (SM((_series)[_index].Rate, AR_XmitRate##_index))
129 
130 #define set11nPktDurRTSCTS(_series, _index) \
131         (SM((_series)[_index].PktDuration, AR_PacketDur##_index) |\
132          ((_series)[_index].RateFlags & HAL_RATESERIES_RTS_CTS   ?\
133          AR_RTSCTSQual##_index : 0))
134 
135 #define set11nRateFlags(_series, _index) \
136         ((_series)[_index].RateFlags & HAL_RATESERIES_2040 ? AR_2040_##_index : 0) \
137         |((_series)[_index].RateFlags & HAL_RATESERIES_HALFGI ? AR_GI##_index : 0) \
138         |((_series)[_index].RateFlags & HAL_RATESERIES_STBC ? AR_STBC##_index : 0) \
139         |SM((_series)[_index].ChSel, AR_ChainSel##_index)
140 
141 /*
142  * Descriptor Access Functions
143  */
144 
145 #define VALID_PKT_TYPES \
146         ((1<<HAL_PKT_TYPE_NORMAL)|(1<<HAL_PKT_TYPE_ATIM)|\
147          (1<<HAL_PKT_TYPE_PSPOLL)|(1<<HAL_PKT_TYPE_PROBE_RESP)|\
148          (1<<HAL_PKT_TYPE_BEACON)|(1<<HAL_PKT_TYPE_AMPDU))
149 #define isValidPktType(_t)      ((1<<(_t)) & VALID_PKT_TYPES)
150 #define VALID_TX_RATES \
151         ((1<<0x0b)|(1<<0x0f)|(1<<0x0a)|(1<<0x0e)|(1<<0x09)|(1<<0x0d)|\
152          (1<<0x08)|(1<<0x0c)|(1<<0x1b)|(1<<0x1a)|(1<<0x1e)|(1<<0x19)|\
153 	 (1<<0x1d)|(1<<0x18)|(1<<0x1c)|(1<<0x01)|(1<<0x02)|(1<<0x03)|\
154 	 (1<<0x04)|(1<<0x05)|(1<<0x06)|(1<<0x07)|(1<<0x00))
155 /* NB: accept HT rates */
156 #define	isValidTxRate(_r)	((1<<((_r) & 0x7f)) & VALID_TX_RATES)
157 
158 HAL_BOOL
159 ar5416SetupTxDesc(struct ath_hal *ah, struct ath_desc *ds,
160 	u_int pktLen,
161 	u_int hdrLen,
162 	HAL_PKT_TYPE type,
163 	u_int txPower,
164 	u_int txRate0, u_int txTries0,
165 	u_int keyIx,
166 	u_int antMode,
167 	u_int flags,
168 	u_int rtsctsRate,
169 	u_int rtsctsDuration,
170 	u_int compicvLen,
171 	u_int compivLen,
172 	u_int comp)
173 {
174 #define	RTSCTS	(HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)
175 	struct ar5416_desc *ads = AR5416DESC(ds);
176 	struct ath_hal_5416 *ahp = AH5416(ah);
177 
178 	(void) hdrLen;
179 
180 	HALASSERT(txTries0 != 0);
181 	HALASSERT(isValidPktType(type));
182 	HALASSERT(isValidTxRate(txRate0));
183 	HALASSERT((flags & RTSCTS) != RTSCTS);
184 	/* XXX validate antMode */
185 
186         txPower = (txPower + AH5212(ah)->ah_txPowerIndexOffset);
187         if (txPower > 63)
188 		txPower = 63;
189 
190 	ads->ds_ctl0 = (pktLen & AR_FrameLen)
191 		     | (txPower << AR_XmitPower_S)
192 		     | (flags & HAL_TXDESC_VEOL ? AR_VEOL : 0)
193 		     | (flags & HAL_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
194 		     | (flags & HAL_TXDESC_INTREQ ? AR_TxIntrReq : 0)
195 		     ;
196 	ads->ds_ctl1 = (type << AR_FrameType_S)
197 		     | (flags & HAL_TXDESC_NOACK ? AR_NoAck : 0)
198                      ;
199 	ads->ds_ctl2 = SM(txTries0, AR_XmitDataTries0)
200 		     | (flags & HAL_TXDESC_DURENA ? AR_DurUpdateEn : 0)
201 		     ;
202 	ads->ds_ctl3 = (txRate0 << AR_XmitRate0_S)
203 		     ;
204 	ads->ds_ctl4 = 0;
205 	ads->ds_ctl5 = 0;
206 	ads->ds_ctl6 = 0;
207 	ads->ds_ctl7 = SM(ahp->ah_tx_chainmask, AR_ChainSel0)
208 		     | SM(ahp->ah_tx_chainmask, AR_ChainSel1)
209 		     | SM(ahp->ah_tx_chainmask, AR_ChainSel2)
210 		     | SM(ahp->ah_tx_chainmask, AR_ChainSel3)
211 		     ;
212 	ads->ds_ctl8 = SM(0, AR_AntCtl0);
213 	ads->ds_ctl9 = SM(0, AR_AntCtl1) | SM(txPower, AR_XmitPower1);
214 	ads->ds_ctl10 = SM(0, AR_AntCtl2) | SM(txPower, AR_XmitPower2);
215 	ads->ds_ctl11 = SM(0, AR_AntCtl3) | SM(txPower, AR_XmitPower3);
216 
217 	if (keyIx != HAL_TXKEYIX_INVALID) {
218 		/* XXX validate key index */
219 		ads->ds_ctl1 |= SM(keyIx, AR_DestIdx);
220 		ads->ds_ctl0 |= AR_DestIdxValid;
221 		ads->ds_ctl6 |= SM(ahp->ah_keytype[keyIx], AR_EncrType);
222 	}
223 	if (flags & RTSCTS) {
224 		if (!isValidTxRate(rtsctsRate)) {
225 			HALDEBUG(ah, HAL_DEBUG_ANY,
226 			    "%s: invalid rts/cts rate 0x%x\n",
227 			    __func__, rtsctsRate);
228 			return AH_FALSE;
229 		}
230 		/* XXX validate rtsctsDuration */
231 		ads->ds_ctl0 |= (flags & HAL_TXDESC_CTSENA ? AR_CTSEnable : 0)
232 			     | (flags & HAL_TXDESC_RTSENA ? AR_RTSEnable : 0)
233 			     ;
234 		ads->ds_ctl7 |= (rtsctsRate << AR_RTSCTSRate_S);
235 	}
236 
237 	/*
238 	 * Set the TX antenna to 0 for Kite
239 	 * To preserve existing behaviour, also set the TPC bits to 0;
240 	 * when TPC is enabled these should be filled in appropriately.
241 	 */
242 	if (AR_SREV_KITE(ah)) {
243 		ads->ds_ctl8 = SM(0, AR_AntCtl0);
244 		ads->ds_ctl9 = SM(0, AR_AntCtl1) | SM(0, AR_XmitPower1);
245 		ads->ds_ctl10 = SM(0, AR_AntCtl2) | SM(0, AR_XmitPower2);
246 		ads->ds_ctl11 = SM(0, AR_AntCtl3) | SM(0, AR_XmitPower3);
247 	}
248 	return AH_TRUE;
249 #undef RTSCTS
250 }
251 
252 HAL_BOOL
253 ar5416SetupXTxDesc(struct ath_hal *ah, struct ath_desc *ds,
254 	u_int txRate1, u_int txTries1,
255 	u_int txRate2, u_int txTries2,
256 	u_int txRate3, u_int txTries3)
257 {
258 	struct ar5416_desc *ads = AR5416DESC(ds);
259 
260 	if (txTries1) {
261 		HALASSERT(isValidTxRate(txRate1));
262 		ads->ds_ctl2 |= SM(txTries1, AR_XmitDataTries1);
263 		ads->ds_ctl3 |= (txRate1 << AR_XmitRate1_S);
264 	}
265 	if (txTries2) {
266 		HALASSERT(isValidTxRate(txRate2));
267 		ads->ds_ctl2 |= SM(txTries2, AR_XmitDataTries2);
268 		ads->ds_ctl3 |= (txRate2 << AR_XmitRate2_S);
269 	}
270 	if (txTries3) {
271 		HALASSERT(isValidTxRate(txRate3));
272 		ads->ds_ctl2 |= SM(txTries3, AR_XmitDataTries3);
273 		ads->ds_ctl3 |= (txRate3 << AR_XmitRate3_S);
274 	}
275 	return AH_TRUE;
276 }
277 
278 HAL_BOOL
279 ar5416FillTxDesc(struct ath_hal *ah, struct ath_desc *ds,
280 	HAL_DMA_ADDR *bufAddrList, uint32_t *segLenList, u_int descId,
281 	u_int qcuId, HAL_BOOL firstSeg, HAL_BOOL lastSeg,
282 	const struct ath_desc *ds0)
283 {
284 	struct ar5416_desc *ads = AR5416DESC(ds);
285 	uint32_t segLen = segLenList[0];
286 
287 	HALASSERT((segLen &~ AR_BufLen) == 0);
288 
289 	ds->ds_data = bufAddrList[0];
290 
291 	if (firstSeg) {
292 		/*
293 		 * First descriptor, don't clobber xmit control data
294 		 * setup by ar5212SetupTxDesc.
295 		 */
296 		ads->ds_ctl1 |= segLen | (lastSeg ? 0 : AR_TxMore);
297 	} else if (lastSeg) {		/* !firstSeg && lastSeg */
298 		/*
299 		 * Last descriptor in a multi-descriptor frame,
300 		 * copy the multi-rate transmit parameters from
301 		 * the first frame for processing on completion.
302 		 */
303 		ads->ds_ctl1 = segLen;
304 #ifdef AH_NEED_DESC_SWAP
305 		ads->ds_ctl0 = __bswap32(AR5416DESC_CONST(ds0)->ds_ctl0)
306 		    & AR_TxIntrReq;
307 		ads->ds_ctl2 = __bswap32(AR5416DESC_CONST(ds0)->ds_ctl2);
308 		ads->ds_ctl3 = __bswap32(AR5416DESC_CONST(ds0)->ds_ctl3);
309 #else
310 		ads->ds_ctl0 = AR5416DESC_CONST(ds0)->ds_ctl0 & AR_TxIntrReq;
311 		ads->ds_ctl2 = AR5416DESC_CONST(ds0)->ds_ctl2;
312 		ads->ds_ctl3 = AR5416DESC_CONST(ds0)->ds_ctl3;
313 #endif
314 	} else {			/* !firstSeg && !lastSeg */
315 		/*
316 		 * Intermediate descriptor in a multi-descriptor frame.
317 		 */
318 #ifdef AH_NEED_DESC_SWAP
319 		ads->ds_ctl0 = __bswap32(AR5416DESC_CONST(ds0)->ds_ctl0)
320 		    & AR_TxIntrReq;
321 #else
322 		ads->ds_ctl0 = AR5416DESC_CONST(ds0)->ds_ctl0 & AR_TxIntrReq;
323 #endif
324 		ads->ds_ctl1 = segLen | AR_TxMore;
325 		ads->ds_ctl2 = 0;
326 		ads->ds_ctl3 = 0;
327 	}
328 	/* XXX only on last descriptor? */
329 	OS_MEMZERO(ads->u.tx.status, sizeof(ads->u.tx.status));
330 	return AH_TRUE;
331 }
332 
333 /*
334  * NB: cipher is no longer used, it's calculated.
335  */
336 HAL_BOOL
337 ar5416ChainTxDesc(struct ath_hal *ah, struct ath_desc *ds,
338 	HAL_DMA_ADDR *bufAddrList,
339 	uint32_t *segLenList,
340 	u_int pktLen,
341 	u_int hdrLen,
342 	HAL_PKT_TYPE type,
343 	u_int keyIx,
344 	HAL_CIPHER cipher,
345 	uint8_t delims,
346 	HAL_BOOL firstSeg,
347 	HAL_BOOL lastSeg,
348 	HAL_BOOL lastAggr)
349 {
350 	struct ar5416_desc *ads = AR5416DESC(ds);
351 	uint32_t *ds_txstatus = AR5416_DS_TXSTATUS(ah,ads);
352 	struct ath_hal_5416 *ahp = AH5416(ah);
353 	u_int segLen = segLenList[0];
354 
355 	int isaggr = 0;
356 	uint32_t last_aggr = 0;
357 
358 	(void) hdrLen;
359 	(void) ah;
360 
361 	HALASSERT((segLen &~ AR_BufLen) == 0);
362 	ds->ds_data = bufAddrList[0];
363 
364 	HALASSERT(isValidPktType(type));
365 	if (type == HAL_PKT_TYPE_AMPDU) {
366 		type = HAL_PKT_TYPE_NORMAL;
367 		isaggr = 1;
368 		if (lastAggr == AH_FALSE)
369 			last_aggr = AR_MoreAggr;
370 	}
371 
372 	/*
373 	 * Since this function is called before any of the other
374 	 * descriptor setup functions (at least in this particular
375 	 * 802.11n aggregation implementation), always bzero() the
376 	 * descriptor. Previously this would be done for all but
377 	 * the first segment.
378 	 * XXX TODO: figure out why; perhaps I'm using this slightly
379 	 * XXX incorrectly.
380 	 */
381 	OS_MEMZERO(ds->ds_hw, AR5416_DESC_TX_CTL_SZ);
382 
383 	/*
384 	 * Note: VEOL should only be for the last descriptor in the chain.
385 	 */
386 	ads->ds_ctl0 = (pktLen & AR_FrameLen);
387 
388 	/*
389 	 * For aggregates:
390 	 * + IsAggr must be set for all descriptors of all subframes of
391 	 *   the aggregate
392 	 * + MoreAggr must be set for all descriptors of all subframes
393 	 *   of the aggregate EXCEPT the last subframe;
394 	 * + MoreAggr must be _CLEAR_ for all descrpitors of the last
395 	 *   subframe of the aggregate.
396 	 */
397 	ads->ds_ctl1 = (type << AR_FrameType_S)
398 			| (isaggr ? (AR_IsAggr | last_aggr) : 0);
399 
400 	ads->ds_ctl2 = 0;
401 	ads->ds_ctl3 = 0;
402 	if (keyIx != HAL_TXKEYIX_INVALID) {
403 		/* XXX validate key index */
404 		ads->ds_ctl1 |= SM(keyIx, AR_DestIdx);
405 		ads->ds_ctl0 |= AR_DestIdxValid;
406 	}
407 
408 	ads->ds_ctl6 |= SM(ahp->ah_keytype[keyIx], AR_EncrType);
409 	if (isaggr) {
410 		ads->ds_ctl6 |= SM(delims, AR_PadDelim);
411 	}
412 
413 	if (firstSeg) {
414 		ads->ds_ctl1 |= segLen | (lastSeg ? 0 : AR_TxMore);
415 	} else if (lastSeg) {           /* !firstSeg && lastSeg */
416 		ads->ds_ctl0 = 0;
417 		ads->ds_ctl1 |= segLen;
418 	} else {                        /* !firstSeg && !lastSeg */
419 		/*
420 		 * Intermediate descriptor in a multi-descriptor frame.
421 		 */
422 		ads->ds_ctl0 = 0;
423 		ads->ds_ctl1 |= segLen | AR_TxMore;
424 	}
425 	ds_txstatus[0] = ds_txstatus[1] = 0;
426 	ds_txstatus[9] &= ~AR_TxDone;
427 
428 	return AH_TRUE;
429 }
430 
431 HAL_BOOL
432 ar5416SetupFirstTxDesc(struct ath_hal *ah, struct ath_desc *ds,
433 	u_int aggrLen, u_int flags, u_int txPower,
434 	u_int txRate0, u_int txTries0, u_int antMode,
435 	u_int rtsctsRate, u_int rtsctsDuration)
436 {
437 #define RTSCTS  (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)
438 	struct ar5416_desc *ads = AR5416DESC(ds);
439 	struct ath_hal_5212 *ahp = AH5212(ah);
440 
441 	HALASSERT(txTries0 != 0);
442 	HALASSERT(isValidTxRate(txRate0));
443 	HALASSERT((flags & RTSCTS) != RTSCTS);
444 	/* XXX validate antMode */
445 
446 	txPower = (txPower + ahp->ah_txPowerIndexOffset );
447 	if(txPower > 63)  txPower=63;
448 
449 	ads->ds_ctl0 |= (txPower << AR_XmitPower_S)
450 		| (flags & HAL_TXDESC_VEOL ? AR_VEOL : 0)
451 		| (flags & HAL_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
452 		| (flags & HAL_TXDESC_INTREQ ? AR_TxIntrReq : 0);
453 	ads->ds_ctl1 |= (flags & HAL_TXDESC_NOACK ? AR_NoAck : 0);
454 	ads->ds_ctl2 |= SM(txTries0, AR_XmitDataTries0);
455 	ads->ds_ctl3 |= (txRate0 << AR_XmitRate0_S);
456 	ads->ds_ctl7 = SM(AH5416(ah)->ah_tx_chainmask, AR_ChainSel0)
457 		| SM(AH5416(ah)->ah_tx_chainmask, AR_ChainSel1)
458 		| SM(AH5416(ah)->ah_tx_chainmask, AR_ChainSel2)
459 		| SM(AH5416(ah)->ah_tx_chainmask, AR_ChainSel3);
460 
461 	/* NB: no V1 WAR */
462 	ads->ds_ctl8 = SM(0, AR_AntCtl0);
463 	ads->ds_ctl9 = SM(0, AR_AntCtl1) | SM(txPower, AR_XmitPower1);
464 	ads->ds_ctl10 = SM(0, AR_AntCtl2) | SM(txPower, AR_XmitPower2);
465 	ads->ds_ctl11 = SM(0, AR_AntCtl3) | SM(txPower, AR_XmitPower3);
466 
467 	ads->ds_ctl6 &= ~(0xffff);
468 	ads->ds_ctl6 |= SM(aggrLen, AR_AggrLen);
469 
470 	if (flags & RTSCTS) {
471 		/* XXX validate rtsctsDuration */
472 		ads->ds_ctl0 |= (flags & HAL_TXDESC_CTSENA ? AR_CTSEnable : 0)
473 			| (flags & HAL_TXDESC_RTSENA ? AR_RTSEnable : 0);
474 	}
475 
476 	/*
477 	 * Set the TX antenna to 0 for Kite
478 	 * To preserve existing behaviour, also set the TPC bits to 0;
479 	 * when TPC is enabled these should be filled in appropriately.
480 	 */
481 	if (AR_SREV_KITE(ah)) {
482 		ads->ds_ctl8 = SM(0, AR_AntCtl0);
483 		ads->ds_ctl9 = SM(0, AR_AntCtl1) | SM(0, AR_XmitPower1);
484 		ads->ds_ctl10 = SM(0, AR_AntCtl2) | SM(0, AR_XmitPower2);
485 		ads->ds_ctl11 = SM(0, AR_AntCtl3) | SM(0, AR_XmitPower3);
486 	}
487 
488 	return AH_TRUE;
489 #undef RTSCTS
490 }
491 
492 HAL_BOOL
493 ar5416SetupLastTxDesc(struct ath_hal *ah, struct ath_desc *ds,
494 		const struct ath_desc *ds0)
495 {
496 	struct ar5416_desc *ads = AR5416DESC(ds);
497 
498 	ads->ds_ctl1 &= ~AR_MoreAggr;
499 	ads->ds_ctl6 &= ~AR_PadDelim;
500 
501 	/* hack to copy rate info to last desc for later processing */
502 #ifdef AH_NEED_DESC_SWAP
503 	ads->ds_ctl2 = __bswap32(AR5416DESC_CONST(ds0)->ds_ctl2);
504 	ads->ds_ctl3 = __bswap32(AR5416DESC_CONST(ds0)->ds_ctl3);
505 #else
506 	ads->ds_ctl2 = AR5416DESC_CONST(ds0)->ds_ctl2;
507 	ads->ds_ctl3 = AR5416DESC_CONST(ds0)->ds_ctl3;
508 #endif
509 	return AH_TRUE;
510 }
511 
512 #ifdef AH_NEED_DESC_SWAP
513 /* Swap transmit descriptor */
514 static __inline void
515 ar5416SwapTxDesc(struct ath_desc *ds)
516 {
517 	ds->ds_data = __bswap32(ds->ds_data);
518 	ds->ds_ctl0 = __bswap32(ds->ds_ctl0);
519 	ds->ds_ctl1 = __bswap32(ds->ds_ctl1);
520 	ds->ds_hw[0] = __bswap32(ds->ds_hw[0]);
521 	ds->ds_hw[1] = __bswap32(ds->ds_hw[1]);
522 	ds->ds_hw[2] = __bswap32(ds->ds_hw[2]);
523 	ds->ds_hw[3] = __bswap32(ds->ds_hw[3]);
524 }
525 #endif
526 
527 /*
528  * Processing of HW TX descriptor.
529  */
530 HAL_STATUS
531 ar5416ProcTxDesc(struct ath_hal *ah,
532 	struct ath_desc *ds, struct ath_tx_status *ts)
533 {
534 	struct ar5416_desc *ads = AR5416DESC(ds);
535 	uint32_t *ds_txstatus = AR5416_DS_TXSTATUS(ah,ads);
536 
537 #ifdef AH_NEED_DESC_SWAP
538 	if ((ds_txstatus[9] & __bswap32(AR_TxDone)) == 0)
539 		return HAL_EINPROGRESS;
540 	ar5416SwapTxDesc(ds);
541 #else
542 	if ((ds_txstatus[9] & AR_TxDone) == 0)
543 		return HAL_EINPROGRESS;
544 #endif
545 
546 	/* Update software copies of the HW status */
547 	ts->ts_seqnum = MS(ds_txstatus[9], AR_SeqNum);
548 	ts->ts_tstamp = AR_SendTimestamp(ds_txstatus);
549 	ts->ts_tid = MS(ds_txstatus[9], AR_TxTid);
550 
551 	ts->ts_status = 0;
552 	if (ds_txstatus[1] & AR_ExcessiveRetries)
553 		ts->ts_status |= HAL_TXERR_XRETRY;
554 	if (ds_txstatus[1] & AR_Filtered)
555 		ts->ts_status |= HAL_TXERR_FILT;
556 	if (ds_txstatus[1] & AR_FIFOUnderrun)
557 		ts->ts_status |= HAL_TXERR_FIFO;
558 	if (ds_txstatus[9] & AR_TxOpExceeded)
559 		ts->ts_status |= HAL_TXERR_XTXOP;
560 	if (ds_txstatus[1] & AR_TxTimerExpired)
561 		ts->ts_status |= HAL_TXERR_TIMER_EXPIRED;
562 
563 	ts->ts_flags  = 0;
564 	if (ds_txstatus[0] & AR_TxBaStatus) {
565 		ts->ts_flags |= HAL_TX_BA;
566 		ts->ts_ba_low = AR_BaBitmapLow(ds_txstatus);
567 		ts->ts_ba_high = AR_BaBitmapHigh(ds_txstatus);
568 	}
569 	if (ds->ds_ctl1 & AR_IsAggr)
570 		ts->ts_flags |= HAL_TX_AGGR;
571 	if (ds_txstatus[1] & AR_DescCfgErr)
572 		ts->ts_flags |= HAL_TX_DESC_CFG_ERR;
573 	if (ds_txstatus[1] & AR_TxDataUnderrun)
574 		ts->ts_flags |= HAL_TX_DATA_UNDERRUN;
575 	if (ds_txstatus[1] & AR_TxDelimUnderrun)
576 		ts->ts_flags |= HAL_TX_DELIM_UNDERRUN;
577 
578 	/*
579 	 * Extract the transmit rate used and mark the rate as
580 	 * ``alternate'' if it wasn't the series 0 rate.
581 	 */
582 	ts->ts_finaltsi =  MS(ds_txstatus[9], AR_FinalTxIdx);
583 	switch (ts->ts_finaltsi) {
584 	case 0:
585 		ts->ts_rate = MS(ads->ds_ctl3, AR_XmitRate0);
586 		break;
587 	case 1:
588 		ts->ts_rate = MS(ads->ds_ctl3, AR_XmitRate1);
589 		break;
590 	case 2:
591 		ts->ts_rate = MS(ads->ds_ctl3, AR_XmitRate2);
592 		break;
593 	case 3:
594 		ts->ts_rate = MS(ads->ds_ctl3, AR_XmitRate3);
595 		break;
596 	}
597 
598 	ts->ts_rssi = MS(ds_txstatus[5], AR_TxRSSICombined);
599 	ts->ts_rssi_ctl[0] = MS(ds_txstatus[0], AR_TxRSSIAnt00);
600 	ts->ts_rssi_ctl[1] = MS(ds_txstatus[0], AR_TxRSSIAnt01);
601 	ts->ts_rssi_ctl[2] = MS(ds_txstatus[0], AR_TxRSSIAnt02);
602 	ts->ts_rssi_ext[0] = MS(ds_txstatus[5], AR_TxRSSIAnt10);
603 	ts->ts_rssi_ext[1] = MS(ds_txstatus[5], AR_TxRSSIAnt11);
604 	ts->ts_rssi_ext[2] = MS(ds_txstatus[5], AR_TxRSSIAnt12);
605 	ts->ts_evm0 = AR_TxEVM0(ds_txstatus);
606 	ts->ts_evm1 = AR_TxEVM1(ds_txstatus);
607 	ts->ts_evm2 = AR_TxEVM2(ds_txstatus);
608 
609 	ts->ts_shortretry = MS(ds_txstatus[1], AR_RTSFailCnt);
610 	ts->ts_longretry = MS(ds_txstatus[1], AR_DataFailCnt);
611 	/*
612 	 * The retry count has the number of un-acked tries for the
613 	 * final series used.  When doing multi-rate retry we must
614 	 * fixup the retry count by adding in the try counts for
615 	 * each series that was fully-processed.  Beware that this
616 	 * takes values from the try counts in the final descriptor.
617 	 * These are not required by the hardware.  We assume they
618 	 * are placed there by the driver as otherwise we have no
619 	 * access and the driver can't do the calculation because it
620 	 * doesn't know the descriptor format.
621 	 */
622 	switch (ts->ts_finaltsi) {
623 	case 3: ts->ts_longretry += MS(ads->ds_ctl2, AR_XmitDataTries2);
624 	case 2: ts->ts_longretry += MS(ads->ds_ctl2, AR_XmitDataTries1);
625 	case 1: ts->ts_longretry += MS(ads->ds_ctl2, AR_XmitDataTries0);
626 	}
627 
628 	/*
629 	 * These fields are not used. Zero these to preserve compatability
630 	 * with existing drivers.
631 	 */
632 	ts->ts_virtcol = MS(ads->ds_ctl1, AR_VirtRetryCnt);
633 	ts->ts_antenna = 0; /* We don't switch antennas on Owl*/
634 
635 	/* handle tx trigger level changes internally */
636 	if ((ts->ts_status & HAL_TXERR_FIFO) ||
637 	    (ts->ts_flags & (HAL_TX_DATA_UNDERRUN | HAL_TX_DELIM_UNDERRUN)))
638 		ar5212UpdateTxTrigLevel(ah, AH_TRUE);
639 
640 	return HAL_OK;
641 }
642 
643 HAL_BOOL
644 ar5416SetGlobalTxTimeout(struct ath_hal *ah, u_int tu)
645 {
646 	struct ath_hal_5416 *ahp = AH5416(ah);
647 
648 	if (tu > 0xFFFF) {
649 		HALDEBUG(ah, HAL_DEBUG_ANY, "%s: bad global tx timeout %u\n",
650 		    __func__, tu);
651 		/* restore default handling */
652 		ahp->ah_globaltxtimeout = (u_int) -1;
653 		return AH_FALSE;
654 	}
655 	OS_REG_RMW_FIELD(ah, AR_GTXTO, AR_GTXTO_TIMEOUT_LIMIT, tu);
656 	ahp->ah_globaltxtimeout = tu;
657 	return AH_TRUE;
658 }
659 
660 u_int
661 ar5416GetGlobalTxTimeout(struct ath_hal *ah)
662 {
663 	return MS(OS_REG_READ(ah, AR_GTXTO), AR_GTXTO_TIMEOUT_LIMIT);
664 }
665 
666 void
667 ar5416Set11nRateScenario(struct ath_hal *ah, struct ath_desc *ds,
668         u_int durUpdateEn, u_int rtsctsRate,
669 	HAL_11N_RATE_SERIES series[], u_int nseries, u_int flags)
670 {
671 	struct ar5416_desc *ads = AR5416DESC(ds);
672 	uint32_t ds_ctl0;
673 
674 	HALASSERT(nseries == 4);
675 	(void)nseries;
676 
677 	/*
678 	 * XXX since the upper layers doesn't know the current chainmask
679 	 * XXX setup, just override its decisions here.
680 	 * XXX The upper layers need to be taught this!
681 	 */
682 	if (series[0].Tries != 0)
683 		series[0].ChSel = AH5416(ah)->ah_tx_chainmask;
684 	if (series[1].Tries != 0)
685 		series[1].ChSel = AH5416(ah)->ah_tx_chainmask;
686 	if (series[2].Tries != 0)
687 		series[2].ChSel = AH5416(ah)->ah_tx_chainmask;
688 	if (series[3].Tries != 0)
689 		series[3].ChSel = AH5416(ah)->ah_tx_chainmask;
690 
691 	/*
692 	 * Only one of RTS and CTS enable must be set.
693 	 * If a frame has both set, just do RTS protection -
694 	 * that's enough to satisfy legacy protection.
695 	 */
696 	if (flags & (HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA)) {
697 		ds_ctl0 = ads->ds_ctl0;
698 
699 		if (flags & HAL_TXDESC_RTSENA) {
700 			ds_ctl0 &= ~AR_CTSEnable;
701 			ds_ctl0 |= AR_RTSEnable;
702 		} else {
703 			ds_ctl0 &= ~AR_RTSEnable;
704 			ds_ctl0 |= AR_CTSEnable;
705 		}
706 
707 		ads->ds_ctl0 = ds_ctl0;
708 	} else {
709 		ads->ds_ctl0 =
710 		    (ads->ds_ctl0 & ~(AR_RTSEnable | AR_CTSEnable));
711 	}
712 
713 	ads->ds_ctl2 = set11nTries(series, 0)
714 		     | set11nTries(series, 1)
715 		     | set11nTries(series, 2)
716 		     | set11nTries(series, 3)
717 		     | (durUpdateEn ? AR_DurUpdateEn : 0);
718 
719 	ads->ds_ctl3 = set11nRate(series, 0)
720 		     | set11nRate(series, 1)
721 		     | set11nRate(series, 2)
722 		     | set11nRate(series, 3);
723 
724 	ads->ds_ctl4 = set11nPktDurRTSCTS(series, 0)
725 		     | set11nPktDurRTSCTS(series, 1);
726 
727 	ads->ds_ctl5 = set11nPktDurRTSCTS(series, 2)
728 		     | set11nPktDurRTSCTS(series, 3);
729 
730 	ads->ds_ctl7 = set11nRateFlags(series, 0)
731 		     | set11nRateFlags(series, 1)
732 		     | set11nRateFlags(series, 2)
733 		     | set11nRateFlags(series, 3)
734 		     | SM(rtsctsRate, AR_RTSCTSRate);
735 }
736 
737 void
738 ar5416Set11nAggrFirst(struct ath_hal *ah, struct ath_desc *ds, u_int aggrLen)
739 {
740 	struct ar5416_desc *ads = AR5416DESC(ds);
741 
742 	ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
743 
744 	ads->ds_ctl6 &= ~(AR_AggrLen | AR_PadDelim);
745 	ads->ds_ctl6 |= SM(aggrLen, AR_AggrLen);
746 }
747 
748 void
749 ar5416Set11nAggrMiddle(struct ath_hal *ah, struct ath_desc *ds, u_int numDelims)
750 {
751 	struct ar5416_desc *ads = AR5416DESC(ds);
752 	uint32_t *ds_txstatus = AR5416_DS_TXSTATUS(ah,ads);
753 
754 	ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
755 
756 	ads->ds_ctl6 &= ~AR_PadDelim;
757 	ads->ds_ctl6 |= SM(numDelims, AR_PadDelim);
758 	ads->ds_ctl6 &= ~AR_AggrLen;
759 
760 	/*
761 	 * Clear the TxDone status here, may need to change
762 	 * func name to reflect this
763 	 */
764 	ds_txstatus[9] &= ~AR_TxDone;
765 }
766 
767 void
768 ar5416Set11nAggrLast(struct ath_hal *ah, struct ath_desc *ds)
769 {
770 	struct ar5416_desc *ads = AR5416DESC(ds);
771 
772 	ads->ds_ctl1 |= AR_IsAggr;
773 	ads->ds_ctl1 &= ~AR_MoreAggr;
774 	ads->ds_ctl6 &= ~AR_PadDelim;
775 }
776 
777 void
778 ar5416Clr11nAggr(struct ath_hal *ah, struct ath_desc *ds)
779 {
780 	struct ar5416_desc *ads = AR5416DESC(ds);
781 
782 	ads->ds_ctl1 &= (~AR_IsAggr & ~AR_MoreAggr);
783 	ads->ds_ctl6 &= ~AR_PadDelim;
784 	ads->ds_ctl6 &= ~AR_AggrLen;
785 }
786 
787 void
788 ar5416Set11nBurstDuration(struct ath_hal *ah, struct ath_desc *ds,
789                                                   u_int burstDuration)
790 {
791 	struct ar5416_desc *ads = AR5416DESC(ds);
792 
793 	ads->ds_ctl2 &= ~AR_BurstDur;
794 	ads->ds_ctl2 |= SM(burstDuration, AR_BurstDur);
795 }
796 
797 /*
798  * Retrieve the rate table from the given TX completion descriptor
799  */
800 HAL_BOOL
801 ar5416GetTxCompletionRates(struct ath_hal *ah, const struct ath_desc *ds0, int *rates, int *tries)
802 {
803 	const struct ar5416_desc *ads = AR5416DESC_CONST(ds0);
804 
805 	rates[0] = MS(ads->ds_ctl3, AR_XmitRate0);
806 	rates[1] = MS(ads->ds_ctl3, AR_XmitRate1);
807 	rates[2] = MS(ads->ds_ctl3, AR_XmitRate2);
808 	rates[3] = MS(ads->ds_ctl3, AR_XmitRate3);
809 
810 	tries[0] = MS(ads->ds_ctl2, AR_XmitDataTries0);
811 	tries[1] = MS(ads->ds_ctl2, AR_XmitDataTries1);
812 	tries[2] = MS(ads->ds_ctl2, AR_XmitDataTries2);
813 	tries[3] = MS(ads->ds_ctl2, AR_XmitDataTries3);
814 
815 	return AH_TRUE;
816 }
817 
818 
819 /*
820  * TX queue management routines - AR5416 and later chipsets
821  */
822 
823 /*
824  * Allocate and initialize a tx DCU/QCU combination.
825  */
826 int
827 ar5416SetupTxQueue(struct ath_hal *ah, HAL_TX_QUEUE type,
828 	const HAL_TXQ_INFO *qInfo)
829 {
830 	struct ath_hal_5212 *ahp = AH5212(ah);
831 	HAL_TX_QUEUE_INFO *qi;
832 	HAL_CAPABILITIES *pCap = &AH_PRIVATE(ah)->ah_caps;
833 	int q, defqflags;
834 
835 	/* by default enable OK+ERR+DESC+URN interrupts */
836 	defqflags = HAL_TXQ_TXOKINT_ENABLE
837 		  | HAL_TXQ_TXERRINT_ENABLE
838 		  | HAL_TXQ_TXDESCINT_ENABLE
839 		  | HAL_TXQ_TXURNINT_ENABLE;
840 	/* XXX move queue assignment to driver */
841 	switch (type) {
842 	case HAL_TX_QUEUE_BEACON:
843 		q = pCap->halTotalQueues-1;	/* highest priority */
844 		defqflags |= HAL_TXQ_DBA_GATED
845 		       | HAL_TXQ_CBR_DIS_QEMPTY
846 		       | HAL_TXQ_ARB_LOCKOUT_GLOBAL
847 		       | HAL_TXQ_BACKOFF_DISABLE;
848 		break;
849 	case HAL_TX_QUEUE_CAB:
850 		q = pCap->halTotalQueues-2;	/* next highest priority */
851 		defqflags |= HAL_TXQ_DBA_GATED
852 		       | HAL_TXQ_CBR_DIS_QEMPTY
853 		       | HAL_TXQ_CBR_DIS_BEMPTY
854 		       | HAL_TXQ_ARB_LOCKOUT_GLOBAL
855 		       | HAL_TXQ_BACKOFF_DISABLE;
856 		break;
857 	case HAL_TX_QUEUE_PSPOLL:
858 		q = 1;				/* lowest priority */
859 		defqflags |= HAL_TXQ_DBA_GATED
860 		       | HAL_TXQ_CBR_DIS_QEMPTY
861 		       | HAL_TXQ_CBR_DIS_BEMPTY
862 		       | HAL_TXQ_ARB_LOCKOUT_GLOBAL
863 		       | HAL_TXQ_BACKOFF_DISABLE;
864 		break;
865 	case HAL_TX_QUEUE_UAPSD:
866 		q = pCap->halTotalQueues-3;	/* nextest highest priority */
867 		if (ahp->ah_txq[q].tqi_type != HAL_TX_QUEUE_INACTIVE) {
868 			HALDEBUG(ah, HAL_DEBUG_ANY,
869 			    "%s: no available UAPSD tx queue\n", __func__);
870 			return -1;
871 		}
872 		break;
873 	case HAL_TX_QUEUE_DATA:
874 		for (q = 0; q < pCap->halTotalQueues; q++)
875 			if (ahp->ah_txq[q].tqi_type == HAL_TX_QUEUE_INACTIVE)
876 				break;
877 		if (q == pCap->halTotalQueues) {
878 			HALDEBUG(ah, HAL_DEBUG_ANY,
879 			    "%s: no available tx queue\n", __func__);
880 			return -1;
881 		}
882 		break;
883 	default:
884 		HALDEBUG(ah, HAL_DEBUG_ANY,
885 		    "%s: bad tx queue type %u\n", __func__, type);
886 		return -1;
887 	}
888 
889 	HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: queue %u\n", __func__, q);
890 
891 	qi = &ahp->ah_txq[q];
892 	if (qi->tqi_type != HAL_TX_QUEUE_INACTIVE) {
893 		HALDEBUG(ah, HAL_DEBUG_ANY, "%s: tx queue %u already active\n",
894 		    __func__, q);
895 		return -1;
896 	}
897 	OS_MEMZERO(qi, sizeof(HAL_TX_QUEUE_INFO));
898 	qi->tqi_type = type;
899 	if (qInfo == AH_NULL) {
900 		qi->tqi_qflags = defqflags;
901 		qi->tqi_aifs = INIT_AIFS;
902 		qi->tqi_cwmin = HAL_TXQ_USEDEFAULT;	/* NB: do at reset */
903 		qi->tqi_cwmax = INIT_CWMAX;
904 		qi->tqi_shretry = INIT_SH_RETRY;
905 		qi->tqi_lgretry = INIT_LG_RETRY;
906 		qi->tqi_physCompBuf = 0;
907 	} else {
908 		qi->tqi_physCompBuf = qInfo->tqi_compBuf;
909 		(void) ar5212SetTxQueueProps(ah, q, qInfo);
910 	}
911 	/* NB: must be followed by ar5212ResetTxQueue */
912 	return q;
913 }
914 
915 /*
916  * Update the h/w interrupt registers to reflect a tx q's configuration.
917  */
918 static void
919 setTxQInterrupts(struct ath_hal *ah, HAL_TX_QUEUE_INFO *qi)
920 {
921 	struct ath_hal_5212 *ahp = AH5212(ah);
922 
923 	HALDEBUG(ah, HAL_DEBUG_TXQUEUE,
924 	    "%s: tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n", __func__,
925 	    ahp->ah_txOkInterruptMask, ahp->ah_txErrInterruptMask,
926 	    ahp->ah_txDescInterruptMask, ahp->ah_txEolInterruptMask,
927 	    ahp->ah_txUrnInterruptMask);
928 
929 	OS_REG_WRITE(ah, AR_IMR_S0,
930 		  SM(ahp->ah_txOkInterruptMask, AR_IMR_S0_QCU_TXOK)
931 		| SM(ahp->ah_txDescInterruptMask, AR_IMR_S0_QCU_TXDESC)
932 	);
933 	OS_REG_WRITE(ah, AR_IMR_S1,
934 		  SM(ahp->ah_txErrInterruptMask, AR_IMR_S1_QCU_TXERR)
935 		| SM(ahp->ah_txEolInterruptMask, AR_IMR_S1_QCU_TXEOL)
936 	);
937 	OS_REG_RMW_FIELD(ah, AR_IMR_S2,
938 		AR_IMR_S2_QCU_TXURN, ahp->ah_txUrnInterruptMask);
939 }
940 
941 /*
942  * Set the retry, aifs, cwmin/max, readyTime regs for specified queue
943  * Assumes:
944  *  phwChannel has been set to point to the current channel
945  */
946 #define	TU_TO_USEC(_tu)		((_tu) << 10)
947 HAL_BOOL
948 ar5416ResetTxQueue(struct ath_hal *ah, u_int q)
949 {
950 	struct ath_hal_5212 *ahp = AH5212(ah);
951 	HAL_CAPABILITIES *pCap = &AH_PRIVATE(ah)->ah_caps;
952 	const struct ieee80211_channel *chan = AH_PRIVATE(ah)->ah_curchan;
953 	HAL_TX_QUEUE_INFO *qi;
954 	uint32_t cwMin, chanCwMin, qmisc, dmisc;
955 
956 	if (q >= pCap->halTotalQueues) {
957 		HALDEBUG(ah, HAL_DEBUG_ANY, "%s: invalid queue num %u\n",
958 		    __func__, q);
959 		return AH_FALSE;
960 	}
961 	qi = &ahp->ah_txq[q];
962 	if (qi->tqi_type == HAL_TX_QUEUE_INACTIVE) {
963 		HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: inactive queue %u\n",
964 		    __func__, q);
965 		return AH_TRUE;		/* XXX??? */
966 	}
967 
968 	HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: reset queue %u\n", __func__, q);
969 
970 	if (qi->tqi_cwmin == HAL_TXQ_USEDEFAULT) {
971 		/*
972 		 * Select cwmin according to channel type.
973 		 * NB: chan can be NULL during attach
974 		 */
975 		if (chan && IEEE80211_IS_CHAN_B(chan))
976 			chanCwMin = INIT_CWMIN_11B;
977 		else
978 			chanCwMin = INIT_CWMIN;
979 		/* make sure that the CWmin is of the form (2^n - 1) */
980 		for (cwMin = 1; cwMin < chanCwMin; cwMin = (cwMin << 1) | 1)
981 			;
982 	} else
983 		cwMin = qi->tqi_cwmin;
984 
985 	/* set cwMin/Max and AIFS values */
986 	OS_REG_WRITE(ah, AR_DLCL_IFS(q),
987 		  SM(cwMin, AR_D_LCL_IFS_CWMIN)
988 		| SM(qi->tqi_cwmax, AR_D_LCL_IFS_CWMAX)
989 		| SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));
990 
991 	/* Set retry limit values */
992 	OS_REG_WRITE(ah, AR_DRETRY_LIMIT(q),
993 		   SM(INIT_SSH_RETRY, AR_D_RETRY_LIMIT_STA_SH)
994 		 | SM(INIT_SLG_RETRY, AR_D_RETRY_LIMIT_STA_LG)
995 		 | SM(qi->tqi_lgretry, AR_D_RETRY_LIMIT_FR_LG)
996 		 | SM(qi->tqi_shretry, AR_D_RETRY_LIMIT_FR_SH)
997 	);
998 
999 	/* NB: always enable early termination on the QCU */
1000 	qmisc = AR_Q_MISC_DCU_EARLY_TERM_REQ
1001 	      | SM(AR_Q_MISC_FSP_ASAP, AR_Q_MISC_FSP);
1002 
1003 	/* NB: always enable DCU to wait for next fragment from QCU */
1004 	dmisc = AR_D_MISC_FRAG_WAIT_EN;
1005 
1006 	/* Enable exponential backoff window */
1007 	dmisc |= AR_D_MISC_BKOFF_PERSISTENCE;
1008 
1009 	/*
1010 	 * The chip reset default is to use a DCU backoff threshold of 0x2.
1011 	 * Restore this when programming the DCU MISC register.
1012 	 */
1013 	dmisc |= 0x2;
1014 
1015 	/* multiqueue support */
1016 	if (qi->tqi_cbrPeriod) {
1017 		OS_REG_WRITE(ah, AR_QCBRCFG(q),
1018 			  SM(qi->tqi_cbrPeriod,AR_Q_CBRCFG_CBR_INTERVAL)
1019 			| SM(qi->tqi_cbrOverflowLimit, AR_Q_CBRCFG_CBR_OVF_THRESH));
1020 		qmisc = (qmisc &~ AR_Q_MISC_FSP) | AR_Q_MISC_FSP_CBR;
1021 		if (qi->tqi_cbrOverflowLimit)
1022 			qmisc |= AR_Q_MISC_CBR_EXP_CNTR_LIMIT;
1023 	}
1024 
1025 	if (qi->tqi_readyTime && (qi->tqi_type != HAL_TX_QUEUE_CAB)) {
1026 		OS_REG_WRITE(ah, AR_QRDYTIMECFG(q),
1027 			  SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_INT)
1028 			| AR_Q_RDYTIMECFG_ENA);
1029 	}
1030 
1031 	OS_REG_WRITE(ah, AR_DCHNTIME(q),
1032 		  SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR)
1033 		| (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0));
1034 
1035 	if (qi->tqi_readyTime &&
1036 	    (qi->tqi_qflags & HAL_TXQ_RDYTIME_EXP_POLICY_ENABLE))
1037 		qmisc |= AR_Q_MISC_RDYTIME_EXP_POLICY;
1038 	if (qi->tqi_qflags & HAL_TXQ_DBA_GATED)
1039 		qmisc = (qmisc &~ AR_Q_MISC_FSP) | AR_Q_MISC_FSP_DBA_GATED;
1040 	if (MS(qmisc, AR_Q_MISC_FSP) != AR_Q_MISC_FSP_ASAP) {
1041 		/*
1042 		 * These are meangingful only when not scheduled asap.
1043 		 */
1044 		if (qi->tqi_qflags & HAL_TXQ_CBR_DIS_BEMPTY)
1045 			qmisc |= AR_Q_MISC_CBR_INCR_DIS0;
1046 		else
1047 			qmisc &= ~AR_Q_MISC_CBR_INCR_DIS0;
1048 		if (qi->tqi_qflags & HAL_TXQ_CBR_DIS_QEMPTY)
1049 			qmisc |= AR_Q_MISC_CBR_INCR_DIS1;
1050 		else
1051 			qmisc &= ~AR_Q_MISC_CBR_INCR_DIS1;
1052 	}
1053 
1054 	if (qi->tqi_qflags & HAL_TXQ_BACKOFF_DISABLE)
1055 		dmisc |= AR_D_MISC_POST_FR_BKOFF_DIS;
1056 	if (qi->tqi_qflags & HAL_TXQ_FRAG_BURST_BACKOFF_ENABLE)
1057 		dmisc |= AR_D_MISC_FRAG_BKOFF_EN;
1058 	if (qi->tqi_qflags & HAL_TXQ_ARB_LOCKOUT_GLOBAL)
1059 		dmisc |= SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
1060 			    AR_D_MISC_ARB_LOCKOUT_CNTRL);
1061 	else if (qi->tqi_qflags & HAL_TXQ_ARB_LOCKOUT_INTRA)
1062 		dmisc |= SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_INTRA_FR,
1063 			    AR_D_MISC_ARB_LOCKOUT_CNTRL);
1064 	if (qi->tqi_qflags & HAL_TXQ_IGNORE_VIRTCOL)
1065 		dmisc |= SM(AR_D_MISC_VIR_COL_HANDLING_IGNORE,
1066 			    AR_D_MISC_VIR_COL_HANDLING);
1067 	if (qi->tqi_qflags & HAL_TXQ_SEQNUM_INC_DIS)
1068 		dmisc |= AR_D_MISC_SEQ_NUM_INCR_DIS;
1069 
1070 	/*
1071 	 * Fillin type-dependent bits.  Most of this can be
1072 	 * removed by specifying the queue parameters in the
1073 	 * driver; it's here for backwards compatibility.
1074 	 */
1075 	switch (qi->tqi_type) {
1076 	case HAL_TX_QUEUE_BEACON:		/* beacon frames */
1077 		qmisc |= AR_Q_MISC_FSP_DBA_GATED
1078 		      |  AR_Q_MISC_BEACON_USE
1079 		      |  AR_Q_MISC_CBR_INCR_DIS1;
1080 
1081 		dmisc |= SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
1082 			    AR_D_MISC_ARB_LOCKOUT_CNTRL)
1083 		      |  AR_D_MISC_BEACON_USE
1084 		      |  AR_D_MISC_POST_FR_BKOFF_DIS;
1085 		break;
1086 	case HAL_TX_QUEUE_CAB:			/* CAB  frames */
1087 		/*
1088 		 * No longer Enable AR_Q_MISC_RDYTIME_EXP_POLICY,
1089 		 * There is an issue with the CAB Queue
1090 		 * not properly refreshing the Tx descriptor if
1091 		 * the TXE clear setting is used.
1092 		 */
1093 		qmisc |= AR_Q_MISC_FSP_DBA_GATED
1094 		      |  AR_Q_MISC_CBR_INCR_DIS1
1095 		      |  AR_Q_MISC_CBR_INCR_DIS0;
1096 		HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: CAB: tqi_readyTime = %d\n",
1097 		    __func__, qi->tqi_readyTime);
1098 		if (qi->tqi_readyTime) {
1099 			HALDEBUG(ah, HAL_DEBUG_TXQUEUE,
1100 			    "%s: using tqi_readyTime\n", __func__);
1101 			OS_REG_WRITE(ah, AR_QRDYTIMECFG(q),
1102 			    SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_INT) |
1103 			    AR_Q_RDYTIMECFG_ENA);
1104 		} else {
1105 			int value;
1106 			/*
1107 			 * NB: don't set default ready time if driver
1108 			 * has explicitly specified something.  This is
1109 			 * here solely for backwards compatibility.
1110 			 */
1111 			/*
1112 			 * XXX for now, hard-code a CAB interval of 70%
1113 			 * XXX of the total beacon interval.
1114 			 *
1115 			 * XXX This keeps Merlin and later based MACs
1116 			 * XXX quite a bit happier (stops stuck beacons,
1117 			 * XXX which I gather is because of such a long
1118 			 * XXX cabq time.)
1119 			 */
1120 			value = (ahp->ah_beaconInterval * 70 / 100)
1121 				- (ah->ah_config.ah_sw_beacon_response_time
1122 				+ ah->ah_config.ah_dma_beacon_response_time)
1123 				- ah->ah_config.ah_additional_swba_backoff;
1124 			/*
1125 			 * XXX Ensure it isn't too low - nothing lower
1126 			 * XXX than 10 TU
1127 			 */
1128 			if (value < 10)
1129 				value = 10;
1130 			HALDEBUG(ah, HAL_DEBUG_TXQUEUE,
1131 			    "%s: defaulting to rdytime = %d uS\n",
1132 			    __func__, value);
1133 			OS_REG_WRITE(ah, AR_QRDYTIMECFG(q),
1134 			    SM(TU_TO_USEC(value), AR_Q_RDYTIMECFG_INT) |
1135 			    AR_Q_RDYTIMECFG_ENA);
1136 		}
1137 		dmisc |= SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
1138 			    AR_D_MISC_ARB_LOCKOUT_CNTRL);
1139 		break;
1140 	case HAL_TX_QUEUE_PSPOLL:
1141 		qmisc |= AR_Q_MISC_CBR_INCR_DIS1;
1142 		break;
1143 	case HAL_TX_QUEUE_UAPSD:
1144 		dmisc |= AR_D_MISC_POST_FR_BKOFF_DIS;
1145 		break;
1146 	default:			/* NB: silence compiler */
1147 		break;
1148 	}
1149 
1150 	OS_REG_WRITE(ah, AR_QMISC(q), qmisc);
1151 	OS_REG_WRITE(ah, AR_DMISC(q), dmisc);
1152 
1153 	/* Setup compression scratchpad buffer */
1154 	/*
1155 	 * XXX: calling this asynchronously to queue operation can
1156 	 *      cause unexpected behavior!!!
1157 	 */
1158 	if (qi->tqi_physCompBuf) {
1159 		HALASSERT(qi->tqi_type == HAL_TX_QUEUE_DATA ||
1160 			  qi->tqi_type == HAL_TX_QUEUE_UAPSD);
1161 		OS_REG_WRITE(ah, AR_Q_CBBS, (80 + 2*q));
1162 		OS_REG_WRITE(ah, AR_Q_CBBA, qi->tqi_physCompBuf);
1163 		OS_REG_WRITE(ah, AR_Q_CBC,  HAL_COMP_BUF_MAX_SIZE/1024);
1164 		OS_REG_WRITE(ah, AR_Q0_MISC + 4*q,
1165 			     OS_REG_READ(ah, AR_Q0_MISC + 4*q)
1166 			     | AR_Q_MISC_QCU_COMP_EN);
1167 	}
1168 
1169 	/*
1170 	 * Always update the secondary interrupt mask registers - this
1171 	 * could be a new queue getting enabled in a running system or
1172 	 * hw getting re-initialized during a reset!
1173 	 *
1174 	 * Since we don't differentiate between tx interrupts corresponding
1175 	 * to individual queues - secondary tx mask regs are always unmasked;
1176 	 * tx interrupts are enabled/disabled for all queues collectively
1177 	 * using the primary mask reg
1178 	 */
1179 	if (qi->tqi_qflags & HAL_TXQ_TXOKINT_ENABLE)
1180 		ahp->ah_txOkInterruptMask |= 1 << q;
1181 	else
1182 		ahp->ah_txOkInterruptMask &= ~(1 << q);
1183 	if (qi->tqi_qflags & HAL_TXQ_TXERRINT_ENABLE)
1184 		ahp->ah_txErrInterruptMask |= 1 << q;
1185 	else
1186 		ahp->ah_txErrInterruptMask &= ~(1 << q);
1187 	if (qi->tqi_qflags & HAL_TXQ_TXDESCINT_ENABLE)
1188 		ahp->ah_txDescInterruptMask |= 1 << q;
1189 	else
1190 		ahp->ah_txDescInterruptMask &= ~(1 << q);
1191 	if (qi->tqi_qflags & HAL_TXQ_TXEOLINT_ENABLE)
1192 		ahp->ah_txEolInterruptMask |= 1 << q;
1193 	else
1194 		ahp->ah_txEolInterruptMask &= ~(1 << q);
1195 	if (qi->tqi_qflags & HAL_TXQ_TXURNINT_ENABLE)
1196 		ahp->ah_txUrnInterruptMask |= 1 << q;
1197 	else
1198 		ahp->ah_txUrnInterruptMask &= ~(1 << q);
1199 	setTxQInterrupts(ah, qi);
1200 
1201 	return AH_TRUE;
1202 }
1203 #undef	TU_TO_USEC
1204