xref: /freebsd/sys/dev/ath/ath_hal/ar5416/ar5416_xmit.c (revision f5f7c05209ca2c3748fd8b27c5e80ffad49120eb)
1 /*
2  * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
3  * Copyright (c) 2002-2008 Atheros Communications, Inc.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  *
17  * $FreeBSD$
18  */
19 #include "opt_ah.h"
20 
21 #include "ah.h"
22 #include "ah_desc.h"
23 #include "ah_internal.h"
24 
25 #include "ar5416/ar5416.h"
26 #include "ar5416/ar5416reg.h"
27 #include "ar5416/ar5416phy.h"
28 #include "ar5416/ar5416desc.h"
29 
30 /*
31  * Stop transmit on the specified queue
32  */
33 HAL_BOOL
34 ar5416StopTxDma(struct ath_hal *ah, u_int q)
35 {
36 #define	STOP_DMA_TIMEOUT	4000	/* us */
37 #define	STOP_DMA_ITER		100	/* us */
38 	u_int i;
39 
40 	HALASSERT(q < AH_PRIVATE(ah)->ah_caps.halTotalQueues);
41 
42 	HALASSERT(AH5212(ah)->ah_txq[q].tqi_type != HAL_TX_QUEUE_INACTIVE);
43 
44 	OS_REG_WRITE(ah, AR_Q_TXD, 1 << q);
45 	for (i = STOP_DMA_TIMEOUT/STOP_DMA_ITER; i != 0; i--) {
46 		if (ar5212NumTxPending(ah, q) == 0)
47 			break;
48 		OS_DELAY(STOP_DMA_ITER);
49 	}
50 #ifdef AH_DEBUG
51 	if (i == 0) {
52 		HALDEBUG(ah, HAL_DEBUG_ANY,
53 		    "%s: queue %u DMA did not stop in 400 msec\n", __func__, q);
54 		HALDEBUG(ah, HAL_DEBUG_ANY,
55 		    "%s: QSTS 0x%x Q_TXE 0x%x Q_TXD 0x%x Q_CBR 0x%x\n", __func__,
56 		    OS_REG_READ(ah, AR_QSTS(q)), OS_REG_READ(ah, AR_Q_TXE),
57 		    OS_REG_READ(ah, AR_Q_TXD), OS_REG_READ(ah, AR_QCBRCFG(q)));
58 		HALDEBUG(ah, HAL_DEBUG_ANY,
59 		    "%s: Q_MISC 0x%x Q_RDYTIMECFG 0x%x Q_RDYTIMESHDN 0x%x\n",
60 		    __func__, OS_REG_READ(ah, AR_QMISC(q)),
61 		    OS_REG_READ(ah, AR_QRDYTIMECFG(q)),
62 		    OS_REG_READ(ah, AR_Q_RDYTIMESHDN));
63 	}
64 #endif /* AH_DEBUG */
65 
66 	/* ar5416 and up can kill packets at the PCU level */
67 	if (ar5212NumTxPending(ah, q)) {
68 		uint32_t j;
69 
70 		HALDEBUG(ah, HAL_DEBUG_TXQUEUE,
71 		    "%s: Num of pending TX Frames %d on Q %d\n",
72 		    __func__, ar5212NumTxPending(ah, q), q);
73 
74 		/* Kill last PCU Tx Frame */
75 		/* TODO - save off and restore current values of Q1/Q2? */
76 		for (j = 0; j < 2; j++) {
77 			uint32_t tsfLow = OS_REG_READ(ah, AR_TSF_L32);
78 			OS_REG_WRITE(ah, AR_QUIET2,
79 			    SM(10, AR_QUIET2_QUIET_DUR));
80 			OS_REG_WRITE(ah, AR_QUIET_PERIOD, 100);
81 			OS_REG_WRITE(ah, AR_NEXT_QUIET, tsfLow >> 10);
82 			OS_REG_SET_BIT(ah, AR_TIMER_MODE, AR_TIMER_MODE_QUIET);
83 
84 			if ((OS_REG_READ(ah, AR_TSF_L32)>>10) == (tsfLow>>10))
85 				break;
86 
87 			HALDEBUG(ah, HAL_DEBUG_ANY,
88 			    "%s: TSF moved while trying to set quiet time "
89 			    "TSF: 0x%08x\n", __func__, tsfLow);
90 			HALASSERT(j < 1); /* TSF shouldn't count twice or reg access is taking forever */
91 		}
92 
93 		OS_REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_CHAN_IDLE);
94 
95 		/* Allow the quiet mechanism to do its work */
96 		OS_DELAY(200);
97 		OS_REG_CLR_BIT(ah, AR_TIMER_MODE, AR_TIMER_MODE_QUIET);
98 
99 		/* Verify the transmit q is empty */
100 		for (i = STOP_DMA_TIMEOUT/STOP_DMA_ITER; i != 0; i--) {
101 			if (ar5212NumTxPending(ah, q) == 0)
102 				break;
103 			OS_DELAY(STOP_DMA_ITER);
104 		}
105 		if (i == 0) {
106 			HALDEBUG(ah, HAL_DEBUG_ANY,
107 			    "%s: Failed to stop Tx DMA in %d msec after killing"
108 			    " last frame\n", __func__, STOP_DMA_TIMEOUT / 1000);
109 		}
110 		OS_REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_CHAN_IDLE);
111 	}
112 
113 	OS_REG_WRITE(ah, AR_Q_TXD, 0);
114 	return (i != 0);
115 #undef STOP_DMA_ITER
116 #undef STOP_DMA_TIMEOUT
117 }
118 
119 #define VALID_KEY_TYPES \
120         ((1 << HAL_KEY_TYPE_CLEAR) | (1 << HAL_KEY_TYPE_WEP)|\
121          (1 << HAL_KEY_TYPE_AES)   | (1 << HAL_KEY_TYPE_TKIP))
122 #define isValidKeyType(_t)      ((1 << (_t)) & VALID_KEY_TYPES)
123 
124 #define set11nTries(_series, _index) \
125         (SM((_series)[_index].Tries, AR_XmitDataTries##_index))
126 
127 #define set11nRate(_series, _index) \
128         (SM((_series)[_index].Rate, AR_XmitRate##_index))
129 
130 #define set11nPktDurRTSCTS(_series, _index) \
131         (SM((_series)[_index].PktDuration, AR_PacketDur##_index) |\
132          ((_series)[_index].RateFlags & HAL_RATESERIES_RTS_CTS   ?\
133          AR_RTSCTSQual##_index : 0))
134 
135 #define set11nRateFlags(_series, _index) \
136         ((_series)[_index].RateFlags & HAL_RATESERIES_2040 ? AR_2040_##_index : 0) \
137         |((_series)[_index].RateFlags & HAL_RATESERIES_HALFGI ? AR_GI##_index : 0) \
138         |((_series)[_index].RateFlags & HAL_RATESERIES_STBC ? AR_STBC##_index : 0) \
139         |SM((_series)[_index].ChSel, AR_ChainSel##_index)
140 
141 /*
142  * Descriptor Access Functions
143  */
144 
145 #define VALID_PKT_TYPES \
146         ((1<<HAL_PKT_TYPE_NORMAL)|(1<<HAL_PKT_TYPE_ATIM)|\
147          (1<<HAL_PKT_TYPE_PSPOLL)|(1<<HAL_PKT_TYPE_PROBE_RESP)|\
148          (1<<HAL_PKT_TYPE_BEACON)|(1<<HAL_PKT_TYPE_AMPDU))
149 #define isValidPktType(_t)      ((1<<(_t)) & VALID_PKT_TYPES)
150 #define VALID_TX_RATES \
151         ((1<<0x0b)|(1<<0x0f)|(1<<0x0a)|(1<<0x0e)|(1<<0x09)|(1<<0x0d)|\
152          (1<<0x08)|(1<<0x0c)|(1<<0x1b)|(1<<0x1a)|(1<<0x1e)|(1<<0x19)|\
153 	 (1<<0x1d)|(1<<0x18)|(1<<0x1c)|(1<<0x01)|(1<<0x02)|(1<<0x03)|\
154 	 (1<<0x04)|(1<<0x05)|(1<<0x06)|(1<<0x07)|(1<<0x00))
155 /* NB: accept HT rates */
156 #define	isValidTxRate(_r)	((1<<((_r) & 0x7f)) & VALID_TX_RATES)
157 
158 HAL_BOOL
159 ar5416SetupTxDesc(struct ath_hal *ah, struct ath_desc *ds,
160 	u_int pktLen,
161 	u_int hdrLen,
162 	HAL_PKT_TYPE type,
163 	u_int txPower,
164 	u_int txRate0, u_int txTries0,
165 	u_int keyIx,
166 	u_int antMode,
167 	u_int flags,
168 	u_int rtsctsRate,
169 	u_int rtsctsDuration,
170 	u_int compicvLen,
171 	u_int compivLen,
172 	u_int comp)
173 {
174 #define	RTSCTS	(HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)
175 	struct ar5416_desc *ads = AR5416DESC(ds);
176 	struct ath_hal_5416 *ahp = AH5416(ah);
177 
178 	(void) hdrLen;
179 
180 	HALASSERT(txTries0 != 0);
181 	HALASSERT(isValidPktType(type));
182 	HALASSERT(isValidTxRate(txRate0));
183 	HALASSERT((flags & RTSCTS) != RTSCTS);
184 	/* XXX validate antMode */
185 
186         txPower = (txPower + AH5212(ah)->ah_txPowerIndexOffset);
187         if (txPower > 63)
188 		txPower = 63;
189 
190 	ads->ds_ctl0 = (pktLen & AR_FrameLen)
191 		     | (txPower << AR_XmitPower_S)
192 		     | (flags & HAL_TXDESC_VEOL ? AR_VEOL : 0)
193 		     | (flags & HAL_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
194 		     | (flags & HAL_TXDESC_INTREQ ? AR_TxIntrReq : 0)
195 		     ;
196 	ads->ds_ctl1 = (type << AR_FrameType_S)
197 		     | (flags & HAL_TXDESC_NOACK ? AR_NoAck : 0)
198                      ;
199 	ads->ds_ctl2 = SM(txTries0, AR_XmitDataTries0)
200 		     | (flags & HAL_TXDESC_DURENA ? AR_DurUpdateEn : 0)
201 		     ;
202 	ads->ds_ctl3 = (txRate0 << AR_XmitRate0_S)
203 		     ;
204 	ads->ds_ctl4 = 0;
205 	ads->ds_ctl5 = 0;
206 	ads->ds_ctl6 = 0;
207 	ads->ds_ctl7 = SM(ahp->ah_tx_chainmask, AR_ChainSel0)
208 		     | SM(ahp->ah_tx_chainmask, AR_ChainSel1)
209 		     | SM(ahp->ah_tx_chainmask, AR_ChainSel2)
210 		     | SM(ahp->ah_tx_chainmask, AR_ChainSel3)
211 		     ;
212 	ads->ds_ctl8 = SM(0, AR_AntCtl0);
213 	ads->ds_ctl9 = SM(0, AR_AntCtl1) | SM(txPower, AR_XmitPower1);
214 	ads->ds_ctl10 = SM(0, AR_AntCtl2) | SM(txPower, AR_XmitPower2);
215 	ads->ds_ctl11 = SM(0, AR_AntCtl3) | SM(txPower, AR_XmitPower3);
216 
217 	if (keyIx != HAL_TXKEYIX_INVALID) {
218 		/* XXX validate key index */
219 		ads->ds_ctl1 |= SM(keyIx, AR_DestIdx);
220 		ads->ds_ctl0 |= AR_DestIdxValid;
221 		ads->ds_ctl6 |= SM(ahp->ah_keytype[keyIx], AR_EncrType);
222 	}
223 	if (flags & RTSCTS) {
224 		if (!isValidTxRate(rtsctsRate)) {
225 			HALDEBUG(ah, HAL_DEBUG_ANY,
226 			    "%s: invalid rts/cts rate 0x%x\n",
227 			    __func__, rtsctsRate);
228 			return AH_FALSE;
229 		}
230 		/* XXX validate rtsctsDuration */
231 		ads->ds_ctl0 |= (flags & HAL_TXDESC_CTSENA ? AR_CTSEnable : 0)
232 			     | (flags & HAL_TXDESC_RTSENA ? AR_RTSEnable : 0)
233 			     ;
234 		ads->ds_ctl7 |= (rtsctsRate << AR_RTSCTSRate_S);
235 	}
236 
237 	/*
238 	 * Set the TX antenna to 0 for Kite
239 	 * To preserve existing behaviour, also set the TPC bits to 0;
240 	 * when TPC is enabled these should be filled in appropriately.
241 	 */
242 	if (AR_SREV_KITE(ah)) {
243 		ads->ds_ctl8 = SM(0, AR_AntCtl0);
244 		ads->ds_ctl9 = SM(0, AR_AntCtl1) | SM(0, AR_XmitPower1);
245 		ads->ds_ctl10 = SM(0, AR_AntCtl2) | SM(0, AR_XmitPower2);
246 		ads->ds_ctl11 = SM(0, AR_AntCtl3) | SM(0, AR_XmitPower3);
247 	}
248 	return AH_TRUE;
249 #undef RTSCTS
250 }
251 
252 HAL_BOOL
253 ar5416SetupXTxDesc(struct ath_hal *ah, struct ath_desc *ds,
254 	u_int txRate1, u_int txTries1,
255 	u_int txRate2, u_int txTries2,
256 	u_int txRate3, u_int txTries3)
257 {
258 	struct ar5416_desc *ads = AR5416DESC(ds);
259 
260 	if (txTries1) {
261 		HALASSERT(isValidTxRate(txRate1));
262 		ads->ds_ctl2 |= SM(txTries1, AR_XmitDataTries1);
263 		ads->ds_ctl3 |= (txRate1 << AR_XmitRate1_S);
264 	}
265 	if (txTries2) {
266 		HALASSERT(isValidTxRate(txRate2));
267 		ads->ds_ctl2 |= SM(txTries2, AR_XmitDataTries2);
268 		ads->ds_ctl3 |= (txRate2 << AR_XmitRate2_S);
269 	}
270 	if (txTries3) {
271 		HALASSERT(isValidTxRate(txRate3));
272 		ads->ds_ctl2 |= SM(txTries3, AR_XmitDataTries3);
273 		ads->ds_ctl3 |= (txRate3 << AR_XmitRate3_S);
274 	}
275 	return AH_TRUE;
276 }
277 
278 HAL_BOOL
279 ar5416FillTxDesc(struct ath_hal *ah, struct ath_desc *ds,
280 	HAL_DMA_ADDR *bufAddrList, uint32_t *segLenList, u_int descId,
281 	u_int qcuId, HAL_BOOL firstSeg, HAL_BOOL lastSeg,
282 	const struct ath_desc *ds0)
283 {
284 	struct ar5416_desc *ads = AR5416DESC(ds);
285 	uint32_t segLen = segLenList[0];
286 
287 	HALASSERT((segLen &~ AR_BufLen) == 0);
288 
289 	ds->ds_data = bufAddrList[0];
290 
291 	if (firstSeg) {
292 		/*
293 		 * First descriptor, don't clobber xmit control data
294 		 * setup by ar5212SetupTxDesc.
295 		 */
296 		ads->ds_ctl1 |= segLen | (lastSeg ? 0 : AR_TxMore);
297 	} else if (lastSeg) {		/* !firstSeg && lastSeg */
298 		/*
299 		 * Last descriptor in a multi-descriptor frame,
300 		 * copy the multi-rate transmit parameters from
301 		 * the first frame for processing on completion.
302 		 */
303 		ads->ds_ctl1 = segLen;
304 #ifdef AH_NEED_DESC_SWAP
305 		ads->ds_ctl0 = __bswap32(AR5416DESC_CONST(ds0)->ds_ctl0)
306 		    & AR_TxIntrReq;
307 		ads->ds_ctl2 = __bswap32(AR5416DESC_CONST(ds0)->ds_ctl2);
308 		ads->ds_ctl3 = __bswap32(AR5416DESC_CONST(ds0)->ds_ctl3);
309 #else
310 		ads->ds_ctl0 = AR5416DESC_CONST(ds0)->ds_ctl0 & AR_TxIntrReq;
311 		ads->ds_ctl2 = AR5416DESC_CONST(ds0)->ds_ctl2;
312 		ads->ds_ctl3 = AR5416DESC_CONST(ds0)->ds_ctl3;
313 #endif
314 	} else {			/* !firstSeg && !lastSeg */
315 		/*
316 		 * Intermediate descriptor in a multi-descriptor frame.
317 		 */
318 #ifdef AH_NEED_DESC_SWAP
319 		ads->ds_ctl0 = __bswap32(AR5416DESC_CONST(ds0)->ds_ctl0)
320 		    & AR_TxIntrReq;
321 #else
322 		ads->ds_ctl0 = AR5416DESC_CONST(ds0)->ds_ctl0 & AR_TxIntrReq;
323 #endif
324 		ads->ds_ctl1 = segLen | AR_TxMore;
325 		ads->ds_ctl2 = 0;
326 		ads->ds_ctl3 = 0;
327 	}
328 	/* XXX only on last descriptor? */
329 	OS_MEMZERO(ads->u.tx.status, sizeof(ads->u.tx.status));
330 	return AH_TRUE;
331 }
332 
333 /*
334  * NB: cipher is no longer used, it's calculated.
335  */
336 HAL_BOOL
337 ar5416ChainTxDesc(struct ath_hal *ah, struct ath_desc *ds,
338 	HAL_DMA_ADDR *bufAddrList,
339 	uint32_t *segLenList,
340 	u_int pktLen,
341 	u_int hdrLen,
342 	HAL_PKT_TYPE type,
343 	u_int keyIx,
344 	HAL_CIPHER cipher,
345 	uint8_t delims,
346 	HAL_BOOL firstSeg,
347 	HAL_BOOL lastSeg,
348 	HAL_BOOL lastAggr)
349 {
350 	struct ar5416_desc *ads = AR5416DESC(ds);
351 	uint32_t *ds_txstatus = AR5416_DS_TXSTATUS(ah,ads);
352 	struct ath_hal_5416 *ahp = AH5416(ah);
353 	u_int segLen = segLenList[0];
354 
355 	int isaggr = 0;
356 	uint32_t last_aggr = 0;
357 
358 	(void) hdrLen;
359 	(void) ah;
360 
361 	HALASSERT((segLen &~ AR_BufLen) == 0);
362 	ds->ds_data = bufAddrList[0];
363 
364 	HALASSERT(isValidPktType(type));
365 	if (type == HAL_PKT_TYPE_AMPDU) {
366 		type = HAL_PKT_TYPE_NORMAL;
367 		isaggr = 1;
368 		if (lastAggr == AH_FALSE)
369 			last_aggr = AR_MoreAggr;
370 	}
371 
372 	/*
373 	 * Since this function is called before any of the other
374 	 * descriptor setup functions (at least in this particular
375 	 * 802.11n aggregation implementation), always bzero() the
376 	 * descriptor. Previously this would be done for all but
377 	 * the first segment.
378 	 * XXX TODO: figure out why; perhaps I'm using this slightly
379 	 * XXX incorrectly.
380 	 */
381 	OS_MEMZERO(ds->ds_hw, AR5416_DESC_TX_CTL_SZ);
382 
383 	/*
384 	 * Note: VEOL should only be for the last descriptor in the chain.
385 	 */
386 	ads->ds_ctl0 = (pktLen & AR_FrameLen);
387 
388 	/*
389 	 * For aggregates:
390 	 * + IsAggr must be set for all descriptors of all subframes of
391 	 *   the aggregate
392 	 * + MoreAggr must be set for all descriptors of all subframes
393 	 *   of the aggregate EXCEPT the last subframe;
394 	 * + MoreAggr must be _CLEAR_ for all descrpitors of the last
395 	 *   subframe of the aggregate.
396 	 */
397 	ads->ds_ctl1 = (type << AR_FrameType_S)
398 			| (isaggr ? (AR_IsAggr | last_aggr) : 0);
399 
400 	ads->ds_ctl2 = 0;
401 	ads->ds_ctl3 = 0;
402 	if (keyIx != HAL_TXKEYIX_INVALID) {
403 		/* XXX validate key index */
404 		ads->ds_ctl1 |= SM(keyIx, AR_DestIdx);
405 		ads->ds_ctl0 |= AR_DestIdxValid;
406 	}
407 
408 	ads->ds_ctl6 |= SM(ahp->ah_keytype[keyIx], AR_EncrType);
409 	if (isaggr) {
410 		ads->ds_ctl6 |= SM(delims, AR_PadDelim);
411 	}
412 
413 	if (firstSeg) {
414 		ads->ds_ctl1 |= segLen | (lastSeg ? 0 : AR_TxMore);
415 	} else if (lastSeg) {           /* !firstSeg && lastSeg */
416 		ads->ds_ctl0 = 0;
417 		ads->ds_ctl1 |= segLen;
418 	} else {                        /* !firstSeg && !lastSeg */
419 		/*
420 		 * Intermediate descriptor in a multi-descriptor frame.
421 		 */
422 		ads->ds_ctl0 = 0;
423 		ads->ds_ctl1 |= segLen | AR_TxMore;
424 	}
425 	ds_txstatus[0] = ds_txstatus[1] = 0;
426 	ds_txstatus[9] &= ~AR_TxDone;
427 
428 	return AH_TRUE;
429 }
430 
431 HAL_BOOL
432 ar5416SetupFirstTxDesc(struct ath_hal *ah, struct ath_desc *ds,
433 	u_int aggrLen, u_int flags, u_int txPower,
434 	u_int txRate0, u_int txTries0, u_int antMode,
435 	u_int rtsctsRate, u_int rtsctsDuration)
436 {
437 #define RTSCTS  (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)
438 	struct ar5416_desc *ads = AR5416DESC(ds);
439 	struct ath_hal_5212 *ahp = AH5212(ah);
440 
441 	HALASSERT(txTries0 != 0);
442 	HALASSERT(isValidTxRate(txRate0));
443 	HALASSERT((flags & RTSCTS) != RTSCTS);
444 	/* XXX validate antMode */
445 
446 	txPower = (txPower + ahp->ah_txPowerIndexOffset );
447 	if(txPower > 63)  txPower=63;
448 
449 	ads->ds_ctl0 |= (txPower << AR_XmitPower_S)
450 		| (flags & HAL_TXDESC_VEOL ? AR_VEOL : 0)
451 		| (flags & HAL_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
452 		| (flags & HAL_TXDESC_INTREQ ? AR_TxIntrReq : 0);
453 	ads->ds_ctl1 |= (flags & HAL_TXDESC_NOACK ? AR_NoAck : 0);
454 	ads->ds_ctl2 |= SM(txTries0, AR_XmitDataTries0);
455 	ads->ds_ctl3 |= (txRate0 << AR_XmitRate0_S);
456 	ads->ds_ctl7 = SM(AH5416(ah)->ah_tx_chainmask, AR_ChainSel0)
457 		| SM(AH5416(ah)->ah_tx_chainmask, AR_ChainSel1)
458 		| SM(AH5416(ah)->ah_tx_chainmask, AR_ChainSel2)
459 		| SM(AH5416(ah)->ah_tx_chainmask, AR_ChainSel3);
460 
461 	/* NB: no V1 WAR */
462 	ads->ds_ctl8 = SM(0, AR_AntCtl0);
463 	ads->ds_ctl9 = SM(0, AR_AntCtl1) | SM(txPower, AR_XmitPower1);
464 	ads->ds_ctl10 = SM(0, AR_AntCtl2) | SM(txPower, AR_XmitPower2);
465 	ads->ds_ctl11 = SM(0, AR_AntCtl3) | SM(txPower, AR_XmitPower3);
466 
467 	ads->ds_ctl6 &= ~(0xffff);
468 	ads->ds_ctl6 |= SM(aggrLen, AR_AggrLen);
469 
470 	if (flags & RTSCTS) {
471 		/* XXX validate rtsctsDuration */
472 		ads->ds_ctl0 |= (flags & HAL_TXDESC_CTSENA ? AR_CTSEnable : 0)
473 			| (flags & HAL_TXDESC_RTSENA ? AR_RTSEnable : 0);
474 	}
475 
476 	/*
477 	 * Set the TX antenna to 0 for Kite
478 	 * To preserve existing behaviour, also set the TPC bits to 0;
479 	 * when TPC is enabled these should be filled in appropriately.
480 	 */
481 	if (AR_SREV_KITE(ah)) {
482 		ads->ds_ctl8 = SM(0, AR_AntCtl0);
483 		ads->ds_ctl9 = SM(0, AR_AntCtl1) | SM(0, AR_XmitPower1);
484 		ads->ds_ctl10 = SM(0, AR_AntCtl2) | SM(0, AR_XmitPower2);
485 		ads->ds_ctl11 = SM(0, AR_AntCtl3) | SM(0, AR_XmitPower3);
486 	}
487 
488 	return AH_TRUE;
489 #undef RTSCTS
490 }
491 
492 HAL_BOOL
493 ar5416SetupLastTxDesc(struct ath_hal *ah, struct ath_desc *ds,
494 		const struct ath_desc *ds0)
495 {
496 	struct ar5416_desc *ads = AR5416DESC(ds);
497 
498 	ads->ds_ctl1 &= ~AR_MoreAggr;
499 	ads->ds_ctl6 &= ~AR_PadDelim;
500 
501 	/* hack to copy rate info to last desc for later processing */
502 #ifdef AH_NEED_DESC_SWAP
503 	ads->ds_ctl2 = __bswap32(AR5416DESC_CONST(ds0)->ds_ctl2);
504 	ads->ds_ctl3 = __bswap32(AR5416DESC_CONST(ds0)->ds_ctl3);
505 #else
506 	ads->ds_ctl2 = AR5416DESC_CONST(ds0)->ds_ctl2;
507 	ads->ds_ctl3 = AR5416DESC_CONST(ds0)->ds_ctl3;
508 #endif
509 	return AH_TRUE;
510 }
511 
512 #ifdef AH_NEED_DESC_SWAP
513 /* Swap transmit descriptor */
514 static __inline void
515 ar5416SwapTxDesc(struct ath_desc *ds)
516 {
517 	ds->ds_data = __bswap32(ds->ds_data);
518 	ds->ds_ctl0 = __bswap32(ds->ds_ctl0);
519 	ds->ds_ctl1 = __bswap32(ds->ds_ctl1);
520 	ds->ds_hw[0] = __bswap32(ds->ds_hw[0]);
521 	ds->ds_hw[1] = __bswap32(ds->ds_hw[1]);
522 	ds->ds_hw[2] = __bswap32(ds->ds_hw[2]);
523 	ds->ds_hw[3] = __bswap32(ds->ds_hw[3]);
524 }
525 #endif
526 
527 /*
528  * Processing of HW TX descriptor.
529  */
530 HAL_STATUS
531 ar5416ProcTxDesc(struct ath_hal *ah,
532 	struct ath_desc *ds, struct ath_tx_status *ts)
533 {
534 	struct ar5416_desc *ads = AR5416DESC(ds);
535 	uint32_t *ds_txstatus = AR5416_DS_TXSTATUS(ah,ads);
536 
537 #ifdef AH_NEED_DESC_SWAP
538 	if ((ds_txstatus[9] & __bswap32(AR_TxDone)) == 0)
539 		return HAL_EINPROGRESS;
540 	ar5416SwapTxDesc(ds);
541 #else
542 	if ((ds_txstatus[9] & AR_TxDone) == 0)
543 		return HAL_EINPROGRESS;
544 #endif
545 
546 	/* Update software copies of the HW status */
547 	ts->ts_seqnum = MS(ds_txstatus[9], AR_SeqNum);
548 	ts->ts_tstamp = AR_SendTimestamp(ds_txstatus);
549 	ts->ts_tid = MS(ds_txstatus[9], AR_TxTid);
550 
551 	ts->ts_status = 0;
552 	if (ds_txstatus[1] & AR_ExcessiveRetries)
553 		ts->ts_status |= HAL_TXERR_XRETRY;
554 	if (ds_txstatus[1] & AR_Filtered)
555 		ts->ts_status |= HAL_TXERR_FILT;
556 	if (ds_txstatus[1] & AR_FIFOUnderrun)
557 		ts->ts_status |= HAL_TXERR_FIFO;
558 	if (ds_txstatus[9] & AR_TxOpExceeded)
559 		ts->ts_status |= HAL_TXERR_XTXOP;
560 	if (ds_txstatus[1] & AR_TxTimerExpired)
561 		ts->ts_status |= HAL_TXERR_TIMER_EXPIRED;
562 
563 	ts->ts_flags  = 0;
564 	if (ds_txstatus[0] & AR_TxBaStatus) {
565 		ts->ts_flags |= HAL_TX_BA;
566 		ts->ts_ba_low = AR_BaBitmapLow(ds_txstatus);
567 		ts->ts_ba_high = AR_BaBitmapHigh(ds_txstatus);
568 	}
569 	if (ds->ds_ctl1 & AR_IsAggr)
570 		ts->ts_flags |= HAL_TX_AGGR;
571 	if (ds_txstatus[1] & AR_DescCfgErr)
572 		ts->ts_flags |= HAL_TX_DESC_CFG_ERR;
573 	if (ds_txstatus[1] & AR_TxDataUnderrun)
574 		ts->ts_flags |= HAL_TX_DATA_UNDERRUN;
575 	if (ds_txstatus[1] & AR_TxDelimUnderrun)
576 		ts->ts_flags |= HAL_TX_DELIM_UNDERRUN;
577 
578 	/*
579 	 * Extract the transmit rate used and mark the rate as
580 	 * ``alternate'' if it wasn't the series 0 rate.
581 	 */
582 	ts->ts_finaltsi =  MS(ds_txstatus[9], AR_FinalTxIdx);
583 	switch (ts->ts_finaltsi) {
584 	case 0:
585 		ts->ts_rate = MS(ads->ds_ctl3, AR_XmitRate0);
586 		break;
587 	case 1:
588 		ts->ts_rate = MS(ads->ds_ctl3, AR_XmitRate1);
589 		break;
590 	case 2:
591 		ts->ts_rate = MS(ads->ds_ctl3, AR_XmitRate2);
592 		break;
593 	case 3:
594 		ts->ts_rate = MS(ads->ds_ctl3, AR_XmitRate3);
595 		break;
596 	}
597 
598 	ts->ts_rssi = MS(ds_txstatus[5], AR_TxRSSICombined);
599 	ts->ts_rssi_ctl[0] = MS(ds_txstatus[0], AR_TxRSSIAnt00);
600 	ts->ts_rssi_ctl[1] = MS(ds_txstatus[0], AR_TxRSSIAnt01);
601 	ts->ts_rssi_ctl[2] = MS(ds_txstatus[0], AR_TxRSSIAnt02);
602 	ts->ts_rssi_ext[0] = MS(ds_txstatus[5], AR_TxRSSIAnt10);
603 	ts->ts_rssi_ext[1] = MS(ds_txstatus[5], AR_TxRSSIAnt11);
604 	ts->ts_rssi_ext[2] = MS(ds_txstatus[5], AR_TxRSSIAnt12);
605 	ts->ts_evm0 = AR_TxEVM0(ds_txstatus);
606 	ts->ts_evm1 = AR_TxEVM1(ds_txstatus);
607 	ts->ts_evm2 = AR_TxEVM2(ds_txstatus);
608 
609 	ts->ts_shortretry = MS(ds_txstatus[1], AR_RTSFailCnt);
610 	ts->ts_longretry = MS(ds_txstatus[1], AR_DataFailCnt);
611 	/*
612 	 * The retry count has the number of un-acked tries for the
613 	 * final series used.  When doing multi-rate retry we must
614 	 * fixup the retry count by adding in the try counts for
615 	 * each series that was fully-processed.  Beware that this
616 	 * takes values from the try counts in the final descriptor.
617 	 * These are not required by the hardware.  We assume they
618 	 * are placed there by the driver as otherwise we have no
619 	 * access and the driver can't do the calculation because it
620 	 * doesn't know the descriptor format.
621 	 */
622 	switch (ts->ts_finaltsi) {
623 	case 3: ts->ts_longretry += MS(ads->ds_ctl2, AR_XmitDataTries2);
624 	case 2: ts->ts_longretry += MS(ads->ds_ctl2, AR_XmitDataTries1);
625 	case 1: ts->ts_longretry += MS(ads->ds_ctl2, AR_XmitDataTries0);
626 	}
627 
628 	/*
629 	 * These fields are not used. Zero these to preserve compatability
630 	 * with existing drivers.
631 	 */
632 	ts->ts_virtcol = MS(ads->ds_ctl1, AR_VirtRetryCnt);
633 	ts->ts_antenna = 0; /* We don't switch antennas on Owl*/
634 
635 	/* handle tx trigger level changes internally */
636 	if ((ts->ts_status & HAL_TXERR_FIFO) ||
637 	    (ts->ts_flags & (HAL_TX_DATA_UNDERRUN | HAL_TX_DELIM_UNDERRUN)))
638 		ar5212UpdateTxTrigLevel(ah, AH_TRUE);
639 
640 	return HAL_OK;
641 }
642 
643 HAL_BOOL
644 ar5416SetGlobalTxTimeout(struct ath_hal *ah, u_int tu)
645 {
646 	struct ath_hal_5416 *ahp = AH5416(ah);
647 
648 	if (tu > 0xFFFF) {
649 		HALDEBUG(ah, HAL_DEBUG_ANY, "%s: bad global tx timeout %u\n",
650 		    __func__, tu);
651 		/* restore default handling */
652 		ahp->ah_globaltxtimeout = (u_int) -1;
653 		return AH_FALSE;
654 	}
655 	OS_REG_RMW_FIELD(ah, AR_GTXTO, AR_GTXTO_TIMEOUT_LIMIT, tu);
656 	ahp->ah_globaltxtimeout = tu;
657 	return AH_TRUE;
658 }
659 
660 u_int
661 ar5416GetGlobalTxTimeout(struct ath_hal *ah)
662 {
663 	return MS(OS_REG_READ(ah, AR_GTXTO), AR_GTXTO_TIMEOUT_LIMIT);
664 }
665 
666 void
667 ar5416Set11nRateScenario(struct ath_hal *ah, struct ath_desc *ds,
668         u_int durUpdateEn, u_int rtsctsRate,
669 	HAL_11N_RATE_SERIES series[], u_int nseries, u_int flags)
670 {
671 	struct ar5416_desc *ads = AR5416DESC(ds);
672 	uint32_t ds_ctl0;
673 
674 	HALASSERT(nseries == 4);
675 	(void)nseries;
676 
677 	/*
678 	 * XXX since the upper layers doesn't know the current chainmask
679 	 * XXX setup, just override its decisions here.
680 	 * XXX The upper layers need to be taught this!
681 	 */
682 	if (series[0].Tries != 0)
683 		series[0].ChSel = AH5416(ah)->ah_tx_chainmask;
684 	if (series[1].Tries != 0)
685 		series[1].ChSel = AH5416(ah)->ah_tx_chainmask;
686 	if (series[2].Tries != 0)
687 		series[2].ChSel = AH5416(ah)->ah_tx_chainmask;
688 	if (series[3].Tries != 0)
689 		series[3].ChSel = AH5416(ah)->ah_tx_chainmask;
690 
691 	/*
692 	 * Only one of RTS and CTS enable must be set.
693 	 * If a frame has both set, just do RTS protection -
694 	 * that's enough to satisfy legacy protection.
695 	 */
696 	if (flags & (HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA)) {
697 		ds_ctl0 = ads->ds_ctl0;
698 
699 		if (flags & HAL_TXDESC_RTSENA) {
700 			ds_ctl0 &= ~AR_CTSEnable;
701 			ds_ctl0 |= AR_RTSEnable;
702 		} else {
703 			ds_ctl0 &= ~AR_RTSEnable;
704 			ds_ctl0 |= AR_CTSEnable;
705 		}
706 
707 		ads->ds_ctl0 = ds_ctl0;
708 	} else {
709 		ads->ds_ctl0 =
710 		    (ads->ds_ctl0 & ~(AR_RTSEnable | AR_CTSEnable));
711 	}
712 
713 	ads->ds_ctl2 = set11nTries(series, 0)
714 		     | set11nTries(series, 1)
715 		     | set11nTries(series, 2)
716 		     | set11nTries(series, 3)
717 		     | (durUpdateEn ? AR_DurUpdateEn : 0);
718 
719 	ads->ds_ctl3 = set11nRate(series, 0)
720 		     | set11nRate(series, 1)
721 		     | set11nRate(series, 2)
722 		     | set11nRate(series, 3);
723 
724 	ads->ds_ctl4 = set11nPktDurRTSCTS(series, 0)
725 		     | set11nPktDurRTSCTS(series, 1);
726 
727 	ads->ds_ctl5 = set11nPktDurRTSCTS(series, 2)
728 		     | set11nPktDurRTSCTS(series, 3);
729 
730 	ads->ds_ctl7 = set11nRateFlags(series, 0)
731 		     | set11nRateFlags(series, 1)
732 		     | set11nRateFlags(series, 2)
733 		     | set11nRateFlags(series, 3)
734 		     | SM(rtsctsRate, AR_RTSCTSRate);
735 }
736 
737 void
738 ar5416Set11nAggrFirst(struct ath_hal *ah, struct ath_desc *ds, u_int aggrLen,
739     u_int numDelims)
740 {
741 	struct ar5416_desc *ads = AR5416DESC(ds);
742 
743 	ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
744 
745 	ads->ds_ctl6 &= ~(AR_AggrLen | AR_PadDelim);
746 	ads->ds_ctl6 |= SM(aggrLen, AR_AggrLen);
747 	ads->ds_ctl6 |= SM(numDelims, AR_PadDelim);
748 }
749 
750 void
751 ar5416Set11nAggrMiddle(struct ath_hal *ah, struct ath_desc *ds, u_int numDelims)
752 {
753 	struct ar5416_desc *ads = AR5416DESC(ds);
754 	uint32_t *ds_txstatus = AR5416_DS_TXSTATUS(ah,ads);
755 
756 	ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
757 
758 	ads->ds_ctl6 &= ~AR_PadDelim;
759 	ads->ds_ctl6 |= SM(numDelims, AR_PadDelim);
760 	ads->ds_ctl6 &= ~AR_AggrLen;
761 
762 	/*
763 	 * Clear the TxDone status here, may need to change
764 	 * func name to reflect this
765 	 */
766 	ds_txstatus[9] &= ~AR_TxDone;
767 }
768 
769 void
770 ar5416Set11nAggrLast(struct ath_hal *ah, struct ath_desc *ds)
771 {
772 	struct ar5416_desc *ads = AR5416DESC(ds);
773 
774 	ads->ds_ctl1 |= AR_IsAggr;
775 	ads->ds_ctl1 &= ~AR_MoreAggr;
776 	ads->ds_ctl6 &= ~AR_PadDelim;
777 }
778 
779 void
780 ar5416Clr11nAggr(struct ath_hal *ah, struct ath_desc *ds)
781 {
782 	struct ar5416_desc *ads = AR5416DESC(ds);
783 
784 	ads->ds_ctl1 &= (~AR_IsAggr & ~AR_MoreAggr);
785 	ads->ds_ctl6 &= ~AR_PadDelim;
786 	ads->ds_ctl6 &= ~AR_AggrLen;
787 }
788 
789 void
790 ar5416Set11nBurstDuration(struct ath_hal *ah, struct ath_desc *ds,
791                                                   u_int burstDuration)
792 {
793 	struct ar5416_desc *ads = AR5416DESC(ds);
794 
795 	ads->ds_ctl2 &= ~AR_BurstDur;
796 	ads->ds_ctl2 |= SM(burstDuration, AR_BurstDur);
797 }
798 
799 /*
800  * Retrieve the rate table from the given TX completion descriptor
801  */
802 HAL_BOOL
803 ar5416GetTxCompletionRates(struct ath_hal *ah, const struct ath_desc *ds0, int *rates, int *tries)
804 {
805 	const struct ar5416_desc *ads = AR5416DESC_CONST(ds0);
806 
807 	rates[0] = MS(ads->ds_ctl3, AR_XmitRate0);
808 	rates[1] = MS(ads->ds_ctl3, AR_XmitRate1);
809 	rates[2] = MS(ads->ds_ctl3, AR_XmitRate2);
810 	rates[3] = MS(ads->ds_ctl3, AR_XmitRate3);
811 
812 	tries[0] = MS(ads->ds_ctl2, AR_XmitDataTries0);
813 	tries[1] = MS(ads->ds_ctl2, AR_XmitDataTries1);
814 	tries[2] = MS(ads->ds_ctl2, AR_XmitDataTries2);
815 	tries[3] = MS(ads->ds_ctl2, AR_XmitDataTries3);
816 
817 	return AH_TRUE;
818 }
819 
820 
821 /*
822  * TX queue management routines - AR5416 and later chipsets
823  */
824 
825 /*
826  * Allocate and initialize a tx DCU/QCU combination.
827  */
828 int
829 ar5416SetupTxQueue(struct ath_hal *ah, HAL_TX_QUEUE type,
830 	const HAL_TXQ_INFO *qInfo)
831 {
832 	struct ath_hal_5212 *ahp = AH5212(ah);
833 	HAL_TX_QUEUE_INFO *qi;
834 	HAL_CAPABILITIES *pCap = &AH_PRIVATE(ah)->ah_caps;
835 	int q, defqflags;
836 
837 	/* by default enable OK+ERR+DESC+URN interrupts */
838 	defqflags = HAL_TXQ_TXOKINT_ENABLE
839 		  | HAL_TXQ_TXERRINT_ENABLE
840 		  | HAL_TXQ_TXDESCINT_ENABLE
841 		  | HAL_TXQ_TXURNINT_ENABLE;
842 	/* XXX move queue assignment to driver */
843 	switch (type) {
844 	case HAL_TX_QUEUE_BEACON:
845 		q = pCap->halTotalQueues-1;	/* highest priority */
846 		defqflags |= HAL_TXQ_DBA_GATED
847 		       | HAL_TXQ_CBR_DIS_QEMPTY
848 		       | HAL_TXQ_ARB_LOCKOUT_GLOBAL
849 		       | HAL_TXQ_BACKOFF_DISABLE;
850 		break;
851 	case HAL_TX_QUEUE_CAB:
852 		q = pCap->halTotalQueues-2;	/* next highest priority */
853 		defqflags |= HAL_TXQ_DBA_GATED
854 		       | HAL_TXQ_CBR_DIS_QEMPTY
855 		       | HAL_TXQ_CBR_DIS_BEMPTY
856 		       | HAL_TXQ_ARB_LOCKOUT_GLOBAL
857 		       | HAL_TXQ_BACKOFF_DISABLE;
858 		break;
859 	case HAL_TX_QUEUE_PSPOLL:
860 		q = 1;				/* lowest priority */
861 		defqflags |= HAL_TXQ_DBA_GATED
862 		       | HAL_TXQ_CBR_DIS_QEMPTY
863 		       | HAL_TXQ_CBR_DIS_BEMPTY
864 		       | HAL_TXQ_ARB_LOCKOUT_GLOBAL
865 		       | HAL_TXQ_BACKOFF_DISABLE;
866 		break;
867 	case HAL_TX_QUEUE_UAPSD:
868 		q = pCap->halTotalQueues-3;	/* nextest highest priority */
869 		if (ahp->ah_txq[q].tqi_type != HAL_TX_QUEUE_INACTIVE) {
870 			HALDEBUG(ah, HAL_DEBUG_ANY,
871 			    "%s: no available UAPSD tx queue\n", __func__);
872 			return -1;
873 		}
874 		break;
875 	case HAL_TX_QUEUE_DATA:
876 		for (q = 0; q < pCap->halTotalQueues; q++)
877 			if (ahp->ah_txq[q].tqi_type == HAL_TX_QUEUE_INACTIVE)
878 				break;
879 		if (q == pCap->halTotalQueues) {
880 			HALDEBUG(ah, HAL_DEBUG_ANY,
881 			    "%s: no available tx queue\n", __func__);
882 			return -1;
883 		}
884 		break;
885 	default:
886 		HALDEBUG(ah, HAL_DEBUG_ANY,
887 		    "%s: bad tx queue type %u\n", __func__, type);
888 		return -1;
889 	}
890 
891 	HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: queue %u\n", __func__, q);
892 
893 	qi = &ahp->ah_txq[q];
894 	if (qi->tqi_type != HAL_TX_QUEUE_INACTIVE) {
895 		HALDEBUG(ah, HAL_DEBUG_ANY, "%s: tx queue %u already active\n",
896 		    __func__, q);
897 		return -1;
898 	}
899 	OS_MEMZERO(qi, sizeof(HAL_TX_QUEUE_INFO));
900 	qi->tqi_type = type;
901 	if (qInfo == AH_NULL) {
902 		qi->tqi_qflags = defqflags;
903 		qi->tqi_aifs = INIT_AIFS;
904 		qi->tqi_cwmin = HAL_TXQ_USEDEFAULT;	/* NB: do at reset */
905 		qi->tqi_cwmax = INIT_CWMAX;
906 		qi->tqi_shretry = INIT_SH_RETRY;
907 		qi->tqi_lgretry = INIT_LG_RETRY;
908 		qi->tqi_physCompBuf = 0;
909 	} else {
910 		qi->tqi_physCompBuf = qInfo->tqi_compBuf;
911 		(void) ar5212SetTxQueueProps(ah, q, qInfo);
912 	}
913 	/* NB: must be followed by ar5212ResetTxQueue */
914 	return q;
915 }
916 
917 /*
918  * Update the h/w interrupt registers to reflect a tx q's configuration.
919  */
920 static void
921 setTxQInterrupts(struct ath_hal *ah, HAL_TX_QUEUE_INFO *qi)
922 {
923 	struct ath_hal_5212 *ahp = AH5212(ah);
924 
925 	HALDEBUG(ah, HAL_DEBUG_TXQUEUE,
926 	    "%s: tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n", __func__,
927 	    ahp->ah_txOkInterruptMask, ahp->ah_txErrInterruptMask,
928 	    ahp->ah_txDescInterruptMask, ahp->ah_txEolInterruptMask,
929 	    ahp->ah_txUrnInterruptMask);
930 
931 	OS_REG_WRITE(ah, AR_IMR_S0,
932 		  SM(ahp->ah_txOkInterruptMask, AR_IMR_S0_QCU_TXOK)
933 		| SM(ahp->ah_txDescInterruptMask, AR_IMR_S0_QCU_TXDESC)
934 	);
935 	OS_REG_WRITE(ah, AR_IMR_S1,
936 		  SM(ahp->ah_txErrInterruptMask, AR_IMR_S1_QCU_TXERR)
937 		| SM(ahp->ah_txEolInterruptMask, AR_IMR_S1_QCU_TXEOL)
938 	);
939 	OS_REG_RMW_FIELD(ah, AR_IMR_S2,
940 		AR_IMR_S2_QCU_TXURN, ahp->ah_txUrnInterruptMask);
941 }
942 
943 /*
944  * Set the retry, aifs, cwmin/max, readyTime regs for specified queue
945  * Assumes:
946  *  phwChannel has been set to point to the current channel
947  */
948 #define	TU_TO_USEC(_tu)		((_tu) << 10)
949 HAL_BOOL
950 ar5416ResetTxQueue(struct ath_hal *ah, u_int q)
951 {
952 	struct ath_hal_5212 *ahp = AH5212(ah);
953 	HAL_CAPABILITIES *pCap = &AH_PRIVATE(ah)->ah_caps;
954 	const struct ieee80211_channel *chan = AH_PRIVATE(ah)->ah_curchan;
955 	HAL_TX_QUEUE_INFO *qi;
956 	uint32_t cwMin, chanCwMin, qmisc, dmisc;
957 
958 	if (q >= pCap->halTotalQueues) {
959 		HALDEBUG(ah, HAL_DEBUG_ANY, "%s: invalid queue num %u\n",
960 		    __func__, q);
961 		return AH_FALSE;
962 	}
963 	qi = &ahp->ah_txq[q];
964 	if (qi->tqi_type == HAL_TX_QUEUE_INACTIVE) {
965 		HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: inactive queue %u\n",
966 		    __func__, q);
967 		return AH_TRUE;		/* XXX??? */
968 	}
969 
970 	HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: reset queue %u\n", __func__, q);
971 
972 	if (qi->tqi_cwmin == HAL_TXQ_USEDEFAULT) {
973 		/*
974 		 * Select cwmin according to channel type.
975 		 * NB: chan can be NULL during attach
976 		 */
977 		if (chan && IEEE80211_IS_CHAN_B(chan))
978 			chanCwMin = INIT_CWMIN_11B;
979 		else
980 			chanCwMin = INIT_CWMIN;
981 		/* make sure that the CWmin is of the form (2^n - 1) */
982 		for (cwMin = 1; cwMin < chanCwMin; cwMin = (cwMin << 1) | 1)
983 			;
984 	} else
985 		cwMin = qi->tqi_cwmin;
986 
987 	/* set cwMin/Max and AIFS values */
988 	OS_REG_WRITE(ah, AR_DLCL_IFS(q),
989 		  SM(cwMin, AR_D_LCL_IFS_CWMIN)
990 		| SM(qi->tqi_cwmax, AR_D_LCL_IFS_CWMAX)
991 		| SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));
992 
993 	/* Set retry limit values */
994 	OS_REG_WRITE(ah, AR_DRETRY_LIMIT(q),
995 		   SM(INIT_SSH_RETRY, AR_D_RETRY_LIMIT_STA_SH)
996 		 | SM(INIT_SLG_RETRY, AR_D_RETRY_LIMIT_STA_LG)
997 		 | SM(qi->tqi_lgretry, AR_D_RETRY_LIMIT_FR_LG)
998 		 | SM(qi->tqi_shretry, AR_D_RETRY_LIMIT_FR_SH)
999 	);
1000 
1001 	/* NB: always enable early termination on the QCU */
1002 	qmisc = AR_Q_MISC_DCU_EARLY_TERM_REQ
1003 	      | SM(AR_Q_MISC_FSP_ASAP, AR_Q_MISC_FSP);
1004 
1005 	/* NB: always enable DCU to wait for next fragment from QCU */
1006 	dmisc = AR_D_MISC_FRAG_WAIT_EN;
1007 
1008 	/* Enable exponential backoff window */
1009 	dmisc |= AR_D_MISC_BKOFF_PERSISTENCE;
1010 
1011 	/*
1012 	 * The chip reset default is to use a DCU backoff threshold of 0x2.
1013 	 * Restore this when programming the DCU MISC register.
1014 	 */
1015 	dmisc |= 0x2;
1016 
1017 	/* multiqueue support */
1018 	if (qi->tqi_cbrPeriod) {
1019 		OS_REG_WRITE(ah, AR_QCBRCFG(q),
1020 			  SM(qi->tqi_cbrPeriod,AR_Q_CBRCFG_CBR_INTERVAL)
1021 			| SM(qi->tqi_cbrOverflowLimit, AR_Q_CBRCFG_CBR_OVF_THRESH));
1022 		qmisc = (qmisc &~ AR_Q_MISC_FSP) | AR_Q_MISC_FSP_CBR;
1023 		if (qi->tqi_cbrOverflowLimit)
1024 			qmisc |= AR_Q_MISC_CBR_EXP_CNTR_LIMIT;
1025 	}
1026 
1027 	if (qi->tqi_readyTime && (qi->tqi_type != HAL_TX_QUEUE_CAB)) {
1028 		OS_REG_WRITE(ah, AR_QRDYTIMECFG(q),
1029 			  SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_INT)
1030 			| AR_Q_RDYTIMECFG_ENA);
1031 	}
1032 
1033 	OS_REG_WRITE(ah, AR_DCHNTIME(q),
1034 		  SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR)
1035 		| (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0));
1036 
1037 	if (qi->tqi_readyTime &&
1038 	    (qi->tqi_qflags & HAL_TXQ_RDYTIME_EXP_POLICY_ENABLE))
1039 		qmisc |= AR_Q_MISC_RDYTIME_EXP_POLICY;
1040 	if (qi->tqi_qflags & HAL_TXQ_DBA_GATED)
1041 		qmisc = (qmisc &~ AR_Q_MISC_FSP) | AR_Q_MISC_FSP_DBA_GATED;
1042 	if (MS(qmisc, AR_Q_MISC_FSP) != AR_Q_MISC_FSP_ASAP) {
1043 		/*
1044 		 * These are meangingful only when not scheduled asap.
1045 		 */
1046 		if (qi->tqi_qflags & HAL_TXQ_CBR_DIS_BEMPTY)
1047 			qmisc |= AR_Q_MISC_CBR_INCR_DIS0;
1048 		else
1049 			qmisc &= ~AR_Q_MISC_CBR_INCR_DIS0;
1050 		if (qi->tqi_qflags & HAL_TXQ_CBR_DIS_QEMPTY)
1051 			qmisc |= AR_Q_MISC_CBR_INCR_DIS1;
1052 		else
1053 			qmisc &= ~AR_Q_MISC_CBR_INCR_DIS1;
1054 	}
1055 
1056 	if (qi->tqi_qflags & HAL_TXQ_BACKOFF_DISABLE)
1057 		dmisc |= AR_D_MISC_POST_FR_BKOFF_DIS;
1058 	if (qi->tqi_qflags & HAL_TXQ_FRAG_BURST_BACKOFF_ENABLE)
1059 		dmisc |= AR_D_MISC_FRAG_BKOFF_EN;
1060 	if (qi->tqi_qflags & HAL_TXQ_ARB_LOCKOUT_GLOBAL)
1061 		dmisc |= SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
1062 			    AR_D_MISC_ARB_LOCKOUT_CNTRL);
1063 	else if (qi->tqi_qflags & HAL_TXQ_ARB_LOCKOUT_INTRA)
1064 		dmisc |= SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_INTRA_FR,
1065 			    AR_D_MISC_ARB_LOCKOUT_CNTRL);
1066 	if (qi->tqi_qflags & HAL_TXQ_IGNORE_VIRTCOL)
1067 		dmisc |= SM(AR_D_MISC_VIR_COL_HANDLING_IGNORE,
1068 			    AR_D_MISC_VIR_COL_HANDLING);
1069 	if (qi->tqi_qflags & HAL_TXQ_SEQNUM_INC_DIS)
1070 		dmisc |= AR_D_MISC_SEQ_NUM_INCR_DIS;
1071 
1072 	/*
1073 	 * Fillin type-dependent bits.  Most of this can be
1074 	 * removed by specifying the queue parameters in the
1075 	 * driver; it's here for backwards compatibility.
1076 	 */
1077 	switch (qi->tqi_type) {
1078 	case HAL_TX_QUEUE_BEACON:		/* beacon frames */
1079 		qmisc |= AR_Q_MISC_FSP_DBA_GATED
1080 		      |  AR_Q_MISC_BEACON_USE
1081 		      |  AR_Q_MISC_CBR_INCR_DIS1;
1082 
1083 		dmisc |= SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
1084 			    AR_D_MISC_ARB_LOCKOUT_CNTRL)
1085 		      |  AR_D_MISC_BEACON_USE
1086 		      |  AR_D_MISC_POST_FR_BKOFF_DIS;
1087 		break;
1088 	case HAL_TX_QUEUE_CAB:			/* CAB  frames */
1089 		/*
1090 		 * No longer Enable AR_Q_MISC_RDYTIME_EXP_POLICY,
1091 		 * There is an issue with the CAB Queue
1092 		 * not properly refreshing the Tx descriptor if
1093 		 * the TXE clear setting is used.
1094 		 */
1095 		qmisc |= AR_Q_MISC_FSP_DBA_GATED
1096 		      |  AR_Q_MISC_CBR_INCR_DIS1
1097 		      |  AR_Q_MISC_CBR_INCR_DIS0;
1098 		HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: CAB: tqi_readyTime = %d\n",
1099 		    __func__, qi->tqi_readyTime);
1100 		if (qi->tqi_readyTime) {
1101 			HALDEBUG(ah, HAL_DEBUG_TXQUEUE,
1102 			    "%s: using tqi_readyTime\n", __func__);
1103 			OS_REG_WRITE(ah, AR_QRDYTIMECFG(q),
1104 			    SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_INT) |
1105 			    AR_Q_RDYTIMECFG_ENA);
1106 		} else {
1107 			int value;
1108 			/*
1109 			 * NB: don't set default ready time if driver
1110 			 * has explicitly specified something.  This is
1111 			 * here solely for backwards compatibility.
1112 			 */
1113 			/*
1114 			 * XXX for now, hard-code a CAB interval of 70%
1115 			 * XXX of the total beacon interval.
1116 			 *
1117 			 * XXX This keeps Merlin and later based MACs
1118 			 * XXX quite a bit happier (stops stuck beacons,
1119 			 * XXX which I gather is because of such a long
1120 			 * XXX cabq time.)
1121 			 */
1122 			value = (ahp->ah_beaconInterval * 70 / 100)
1123 				- (ah->ah_config.ah_sw_beacon_response_time
1124 				+ ah->ah_config.ah_dma_beacon_response_time)
1125 				- ah->ah_config.ah_additional_swba_backoff;
1126 			/*
1127 			 * XXX Ensure it isn't too low - nothing lower
1128 			 * XXX than 10 TU
1129 			 */
1130 			if (value < 10)
1131 				value = 10;
1132 			HALDEBUG(ah, HAL_DEBUG_TXQUEUE,
1133 			    "%s: defaulting to rdytime = %d uS\n",
1134 			    __func__, value);
1135 			OS_REG_WRITE(ah, AR_QRDYTIMECFG(q),
1136 			    SM(TU_TO_USEC(value), AR_Q_RDYTIMECFG_INT) |
1137 			    AR_Q_RDYTIMECFG_ENA);
1138 		}
1139 		dmisc |= SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
1140 			    AR_D_MISC_ARB_LOCKOUT_CNTRL);
1141 		break;
1142 	case HAL_TX_QUEUE_PSPOLL:
1143 		qmisc |= AR_Q_MISC_CBR_INCR_DIS1;
1144 		break;
1145 	case HAL_TX_QUEUE_UAPSD:
1146 		dmisc |= AR_D_MISC_POST_FR_BKOFF_DIS;
1147 		break;
1148 	default:			/* NB: silence compiler */
1149 		break;
1150 	}
1151 
1152 	OS_REG_WRITE(ah, AR_QMISC(q), qmisc);
1153 	OS_REG_WRITE(ah, AR_DMISC(q), dmisc);
1154 
1155 	/* Setup compression scratchpad buffer */
1156 	/*
1157 	 * XXX: calling this asynchronously to queue operation can
1158 	 *      cause unexpected behavior!!!
1159 	 */
1160 	if (qi->tqi_physCompBuf) {
1161 		HALASSERT(qi->tqi_type == HAL_TX_QUEUE_DATA ||
1162 			  qi->tqi_type == HAL_TX_QUEUE_UAPSD);
1163 		OS_REG_WRITE(ah, AR_Q_CBBS, (80 + 2*q));
1164 		OS_REG_WRITE(ah, AR_Q_CBBA, qi->tqi_physCompBuf);
1165 		OS_REG_WRITE(ah, AR_Q_CBC,  HAL_COMP_BUF_MAX_SIZE/1024);
1166 		OS_REG_WRITE(ah, AR_Q0_MISC + 4*q,
1167 			     OS_REG_READ(ah, AR_Q0_MISC + 4*q)
1168 			     | AR_Q_MISC_QCU_COMP_EN);
1169 	}
1170 
1171 	/*
1172 	 * Always update the secondary interrupt mask registers - this
1173 	 * could be a new queue getting enabled in a running system or
1174 	 * hw getting re-initialized during a reset!
1175 	 *
1176 	 * Since we don't differentiate between tx interrupts corresponding
1177 	 * to individual queues - secondary tx mask regs are always unmasked;
1178 	 * tx interrupts are enabled/disabled for all queues collectively
1179 	 * using the primary mask reg
1180 	 */
1181 	if (qi->tqi_qflags & HAL_TXQ_TXOKINT_ENABLE)
1182 		ahp->ah_txOkInterruptMask |= 1 << q;
1183 	else
1184 		ahp->ah_txOkInterruptMask &= ~(1 << q);
1185 	if (qi->tqi_qflags & HAL_TXQ_TXERRINT_ENABLE)
1186 		ahp->ah_txErrInterruptMask |= 1 << q;
1187 	else
1188 		ahp->ah_txErrInterruptMask &= ~(1 << q);
1189 	if (qi->tqi_qflags & HAL_TXQ_TXDESCINT_ENABLE)
1190 		ahp->ah_txDescInterruptMask |= 1 << q;
1191 	else
1192 		ahp->ah_txDescInterruptMask &= ~(1 << q);
1193 	if (qi->tqi_qflags & HAL_TXQ_TXEOLINT_ENABLE)
1194 		ahp->ah_txEolInterruptMask |= 1 << q;
1195 	else
1196 		ahp->ah_txEolInterruptMask &= ~(1 << q);
1197 	if (qi->tqi_qflags & HAL_TXQ_TXURNINT_ENABLE)
1198 		ahp->ah_txUrnInterruptMask |= 1 << q;
1199 	else
1200 		ahp->ah_txUrnInterruptMask &= ~(1 << q);
1201 	setTxQInterrupts(ah, qi);
1202 
1203 	return AH_TRUE;
1204 }
1205 #undef	TU_TO_USEC
1206