xref: /freebsd/sys/dev/ath/ath_hal/ar5416/ar5416_xmit.c (revision ddd5b8e9b4d8957fce018c520657cdfa4ecffad3)
1 /*
2  * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
3  * Copyright (c) 2002-2008 Atheros Communications, Inc.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  *
17  * $FreeBSD$
18  */
19 #include "opt_ah.h"
20 
21 #include "ah.h"
22 #include "ah_desc.h"
23 #include "ah_internal.h"
24 
25 #include "ar5416/ar5416.h"
26 #include "ar5416/ar5416reg.h"
27 #include "ar5416/ar5416phy.h"
28 #include "ar5416/ar5416desc.h"
29 
30 /*
31  * Stop transmit on the specified queue
32  */
33 HAL_BOOL
34 ar5416StopTxDma(struct ath_hal *ah, u_int q)
35 {
36 #define	STOP_DMA_TIMEOUT	4000	/* us */
37 #define	STOP_DMA_ITER		100	/* us */
38 	u_int i;
39 
40 	HALASSERT(q < AH_PRIVATE(ah)->ah_caps.halTotalQueues);
41 
42 	HALASSERT(AH5212(ah)->ah_txq[q].tqi_type != HAL_TX_QUEUE_INACTIVE);
43 
44 	OS_REG_WRITE(ah, AR_Q_TXD, 1 << q);
45 	for (i = STOP_DMA_TIMEOUT/STOP_DMA_ITER; i != 0; i--) {
46 		if (ar5212NumTxPending(ah, q) == 0)
47 			break;
48 		OS_DELAY(STOP_DMA_ITER);
49 	}
50 #ifdef AH_DEBUG
51 	if (i == 0) {
52 		HALDEBUG(ah, HAL_DEBUG_ANY,
53 		    "%s: queue %u DMA did not stop in 400 msec\n", __func__, q);
54 		HALDEBUG(ah, HAL_DEBUG_ANY,
55 		    "%s: QSTS 0x%x Q_TXE 0x%x Q_TXD 0x%x Q_CBR 0x%x\n", __func__,
56 		    OS_REG_READ(ah, AR_QSTS(q)), OS_REG_READ(ah, AR_Q_TXE),
57 		    OS_REG_READ(ah, AR_Q_TXD), OS_REG_READ(ah, AR_QCBRCFG(q)));
58 		HALDEBUG(ah, HAL_DEBUG_ANY,
59 		    "%s: Q_MISC 0x%x Q_RDYTIMECFG 0x%x Q_RDYTIMESHDN 0x%x\n",
60 		    __func__, OS_REG_READ(ah, AR_QMISC(q)),
61 		    OS_REG_READ(ah, AR_QRDYTIMECFG(q)),
62 		    OS_REG_READ(ah, AR_Q_RDYTIMESHDN));
63 	}
64 #endif /* AH_DEBUG */
65 
66 	/* ar5416 and up can kill packets at the PCU level */
67 	if (ar5212NumTxPending(ah, q)) {
68 		uint32_t j;
69 
70 		HALDEBUG(ah, HAL_DEBUG_TXQUEUE,
71 		    "%s: Num of pending TX Frames %d on Q %d\n",
72 		    __func__, ar5212NumTxPending(ah, q), q);
73 
74 		/* Kill last PCU Tx Frame */
75 		/* TODO - save off and restore current values of Q1/Q2? */
76 		for (j = 0; j < 2; j++) {
77 			uint32_t tsfLow = OS_REG_READ(ah, AR_TSF_L32);
78 			OS_REG_WRITE(ah, AR_QUIET2,
79 			    SM(10, AR_QUIET2_QUIET_DUR));
80 			OS_REG_WRITE(ah, AR_QUIET_PERIOD, 100);
81 			OS_REG_WRITE(ah, AR_NEXT_QUIET, tsfLow >> 10);
82 			OS_REG_SET_BIT(ah, AR_TIMER_MODE, AR_TIMER_MODE_QUIET);
83 
84 			if ((OS_REG_READ(ah, AR_TSF_L32)>>10) == (tsfLow>>10))
85 				break;
86 
87 			HALDEBUG(ah, HAL_DEBUG_ANY,
88 			    "%s: TSF moved while trying to set quiet time "
89 			    "TSF: 0x%08x\n", __func__, tsfLow);
90 			HALASSERT(j < 1); /* TSF shouldn't count twice or reg access is taking forever */
91 		}
92 
93 		OS_REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_CHAN_IDLE);
94 
95 		/* Allow the quiet mechanism to do its work */
96 		OS_DELAY(200);
97 		OS_REG_CLR_BIT(ah, AR_TIMER_MODE, AR_TIMER_MODE_QUIET);
98 
99 		/* Verify the transmit q is empty */
100 		for (i = STOP_DMA_TIMEOUT/STOP_DMA_ITER; i != 0; i--) {
101 			if (ar5212NumTxPending(ah, q) == 0)
102 				break;
103 			OS_DELAY(STOP_DMA_ITER);
104 		}
105 		if (i == 0) {
106 			HALDEBUG(ah, HAL_DEBUG_ANY,
107 			    "%s: Failed to stop Tx DMA in %d msec after killing"
108 			    " last frame\n", __func__, STOP_DMA_TIMEOUT / 1000);
109 		}
110 		OS_REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_CHAN_IDLE);
111 	}
112 
113 	OS_REG_WRITE(ah, AR_Q_TXD, 0);
114 	return (i != 0);
115 #undef STOP_DMA_ITER
116 #undef STOP_DMA_TIMEOUT
117 }
118 
119 #define VALID_KEY_TYPES \
120         ((1 << HAL_KEY_TYPE_CLEAR) | (1 << HAL_KEY_TYPE_WEP)|\
121          (1 << HAL_KEY_TYPE_AES)   | (1 << HAL_KEY_TYPE_TKIP))
122 #define isValidKeyType(_t)      ((1 << (_t)) & VALID_KEY_TYPES)
123 
124 #define set11nTries(_series, _index) \
125         (SM((_series)[_index].Tries, AR_XmitDataTries##_index))
126 
127 #define set11nRate(_series, _index) \
128         (SM((_series)[_index].Rate, AR_XmitRate##_index))
129 
130 #define set11nPktDurRTSCTS(_series, _index) \
131         (SM((_series)[_index].PktDuration, AR_PacketDur##_index) |\
132          ((_series)[_index].RateFlags & HAL_RATESERIES_RTS_CTS   ?\
133          AR_RTSCTSQual##_index : 0))
134 
135 #define set11nRateFlags(_series, _index) \
136         ((_series)[_index].RateFlags & HAL_RATESERIES_2040 ? AR_2040_##_index : 0) \
137         |((_series)[_index].RateFlags & HAL_RATESERIES_HALFGI ? AR_GI##_index : 0) \
138         |((_series)[_index].RateFlags & HAL_RATESERIES_STBC ? AR_STBC##_index : 0) \
139         |SM((_series)[_index].ChSel, AR_ChainSel##_index)
140 
141 /*
142  * Descriptor Access Functions
143  */
144 
145 #define VALID_PKT_TYPES \
146         ((1<<HAL_PKT_TYPE_NORMAL)|(1<<HAL_PKT_TYPE_ATIM)|\
147          (1<<HAL_PKT_TYPE_PSPOLL)|(1<<HAL_PKT_TYPE_PROBE_RESP)|\
148          (1<<HAL_PKT_TYPE_BEACON)|(1<<HAL_PKT_TYPE_AMPDU))
149 #define isValidPktType(_t)      ((1<<(_t)) & VALID_PKT_TYPES)
150 #define VALID_TX_RATES \
151         ((1<<0x0b)|(1<<0x0f)|(1<<0x0a)|(1<<0x0e)|(1<<0x09)|(1<<0x0d)|\
152          (1<<0x08)|(1<<0x0c)|(1<<0x1b)|(1<<0x1a)|(1<<0x1e)|(1<<0x19)|\
153 	 (1<<0x1d)|(1<<0x18)|(1<<0x1c)|(1<<0x01)|(1<<0x02)|(1<<0x03)|\
154 	 (1<<0x04)|(1<<0x05)|(1<<0x06)|(1<<0x07)|(1<<0x00))
155 /* NB: accept HT rates */
156 #define	isValidTxRate(_r)	((1<<((_r) & 0x7f)) & VALID_TX_RATES)
157 
158 HAL_BOOL
159 ar5416SetupTxDesc(struct ath_hal *ah, struct ath_desc *ds,
160 	u_int pktLen,
161 	u_int hdrLen,
162 	HAL_PKT_TYPE type,
163 	u_int txPower,
164 	u_int txRate0, u_int txTries0,
165 	u_int keyIx,
166 	u_int antMode,
167 	u_int flags,
168 	u_int rtsctsRate,
169 	u_int rtsctsDuration,
170 	u_int compicvLen,
171 	u_int compivLen,
172 	u_int comp)
173 {
174 #define	RTSCTS	(HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)
175 	struct ar5416_desc *ads = AR5416DESC(ds);
176 	struct ath_hal_5416 *ahp = AH5416(ah);
177 
178 	(void) hdrLen;
179 
180 	HALASSERT(txTries0 != 0);
181 	HALASSERT(isValidPktType(type));
182 	HALASSERT(isValidTxRate(txRate0));
183 	HALASSERT((flags & RTSCTS) != RTSCTS);
184 	/* XXX validate antMode */
185 
186         txPower = (txPower + AH5212(ah)->ah_txPowerIndexOffset);
187         if (txPower > 63)
188 		txPower = 63;
189 
190 	ads->ds_ctl0 = (pktLen & AR_FrameLen)
191 		     | (txPower << AR_XmitPower_S)
192 		     | (flags & HAL_TXDESC_VEOL ? AR_VEOL : 0)
193 		     | (flags & HAL_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
194 		     | (flags & HAL_TXDESC_INTREQ ? AR_TxIntrReq : 0)
195 		     ;
196 	ads->ds_ctl1 = (type << AR_FrameType_S)
197 		     | (flags & HAL_TXDESC_NOACK ? AR_NoAck : 0)
198                      ;
199 	ads->ds_ctl2 = SM(txTries0, AR_XmitDataTries0)
200 		     | (flags & HAL_TXDESC_DURENA ? AR_DurUpdateEn : 0)
201 		     ;
202 	ads->ds_ctl3 = (txRate0 << AR_XmitRate0_S)
203 		     ;
204 	ads->ds_ctl4 = 0;
205 	ads->ds_ctl5 = 0;
206 	ads->ds_ctl6 = 0;
207 	ads->ds_ctl7 = SM(ahp->ah_tx_chainmask, AR_ChainSel0)
208 		     | SM(ahp->ah_tx_chainmask, AR_ChainSel1)
209 		     | SM(ahp->ah_tx_chainmask, AR_ChainSel2)
210 		     | SM(ahp->ah_tx_chainmask, AR_ChainSel3)
211 		     ;
212 	ads->ds_ctl8 = SM(0, AR_AntCtl0);
213 	ads->ds_ctl9 = SM(0, AR_AntCtl1) | SM(txPower, AR_XmitPower1);
214 	ads->ds_ctl10 = SM(0, AR_AntCtl2) | SM(txPower, AR_XmitPower2);
215 	ads->ds_ctl11 = SM(0, AR_AntCtl3) | SM(txPower, AR_XmitPower3);
216 
217 	if (keyIx != HAL_TXKEYIX_INVALID) {
218 		/* XXX validate key index */
219 		ads->ds_ctl1 |= SM(keyIx, AR_DestIdx);
220 		ads->ds_ctl0 |= AR_DestIdxValid;
221 		ads->ds_ctl6 |= SM(ahp->ah_keytype[keyIx], AR_EncrType);
222 	}
223 	if (flags & RTSCTS) {
224 		if (!isValidTxRate(rtsctsRate)) {
225 			HALDEBUG(ah, HAL_DEBUG_ANY,
226 			    "%s: invalid rts/cts rate 0x%x\n",
227 			    __func__, rtsctsRate);
228 			return AH_FALSE;
229 		}
230 		/* XXX validate rtsctsDuration */
231 		ads->ds_ctl0 |= (flags & HAL_TXDESC_CTSENA ? AR_CTSEnable : 0)
232 			     | (flags & HAL_TXDESC_RTSENA ? AR_RTSEnable : 0)
233 			     ;
234 		ads->ds_ctl7 |= (rtsctsRate << AR_RTSCTSRate_S);
235 	}
236 
237 	/*
238 	 * Set the TX antenna to 0 for Kite
239 	 * To preserve existing behaviour, also set the TPC bits to 0;
240 	 * when TPC is enabled these should be filled in appropriately.
241 	 */
242 	if (AR_SREV_KITE(ah)) {
243 		ads->ds_ctl8 = SM(0, AR_AntCtl0);
244 		ads->ds_ctl9 = SM(0, AR_AntCtl1) | SM(0, AR_XmitPower1);
245 		ads->ds_ctl10 = SM(0, AR_AntCtl2) | SM(0, AR_XmitPower2);
246 		ads->ds_ctl11 = SM(0, AR_AntCtl3) | SM(0, AR_XmitPower3);
247 	}
248 	return AH_TRUE;
249 #undef RTSCTS
250 }
251 
252 HAL_BOOL
253 ar5416SetupXTxDesc(struct ath_hal *ah, struct ath_desc *ds,
254 	u_int txRate1, u_int txTries1,
255 	u_int txRate2, u_int txTries2,
256 	u_int txRate3, u_int txTries3)
257 {
258 	struct ar5416_desc *ads = AR5416DESC(ds);
259 
260 	if (txTries1) {
261 		HALASSERT(isValidTxRate(txRate1));
262 		ads->ds_ctl2 |= SM(txTries1, AR_XmitDataTries1);
263 		ads->ds_ctl3 |= (txRate1 << AR_XmitRate1_S);
264 	}
265 	if (txTries2) {
266 		HALASSERT(isValidTxRate(txRate2));
267 		ads->ds_ctl2 |= SM(txTries2, AR_XmitDataTries2);
268 		ads->ds_ctl3 |= (txRate2 << AR_XmitRate2_S);
269 	}
270 	if (txTries3) {
271 		HALASSERT(isValidTxRate(txRate3));
272 		ads->ds_ctl2 |= SM(txTries3, AR_XmitDataTries3);
273 		ads->ds_ctl3 |= (txRate3 << AR_XmitRate3_S);
274 	}
275 	return AH_TRUE;
276 }
277 
278 HAL_BOOL
279 ar5416FillTxDesc(struct ath_hal *ah, struct ath_desc *ds,
280 	HAL_DMA_ADDR *bufAddrList, uint32_t *segLenList, u_int descId,
281 	u_int qcuId, HAL_BOOL firstSeg, HAL_BOOL lastSeg,
282 	const struct ath_desc *ds0)
283 {
284 	struct ar5416_desc *ads = AR5416DESC(ds);
285 	uint32_t segLen = segLenList[0];
286 
287 	HALASSERT((segLen &~ AR_BufLen) == 0);
288 
289 	ds->ds_data = bufAddrList[0];
290 
291 	if (firstSeg) {
292 		/*
293 		 * First descriptor, don't clobber xmit control data
294 		 * setup by ar5212SetupTxDesc.
295 		 */
296 		ads->ds_ctl1 |= segLen | (lastSeg ? 0 : AR_TxMore);
297 	} else if (lastSeg) {		/* !firstSeg && lastSeg */
298 		/*
299 		 * Last descriptor in a multi-descriptor frame,
300 		 * copy the multi-rate transmit parameters from
301 		 * the first frame for processing on completion.
302 		 */
303 		ads->ds_ctl1 = segLen;
304 #ifdef AH_NEED_DESC_SWAP
305 		ads->ds_ctl0 = __bswap32(AR5416DESC_CONST(ds0)->ds_ctl0)
306 		    & AR_TxIntrReq;
307 		ads->ds_ctl2 = __bswap32(AR5416DESC_CONST(ds0)->ds_ctl2);
308 		ads->ds_ctl3 = __bswap32(AR5416DESC_CONST(ds0)->ds_ctl3);
309 		/* ctl6 - we only need encrtype; the rest are blank */
310 		ads->ds_ctl6 = __bswap32(AR5416DESC_CONST(ds0)->ds_ctl6 & AR_EncrType);
311 #else
312 		ads->ds_ctl0 = AR5416DESC_CONST(ds0)->ds_ctl0 & AR_TxIntrReq;
313 		ads->ds_ctl2 = AR5416DESC_CONST(ds0)->ds_ctl2;
314 		ads->ds_ctl3 = AR5416DESC_CONST(ds0)->ds_ctl3;
315 		/* ctl6 - we only need encrtype; the rest are blank */
316 		ads->ds_ctl6 = AR5416DESC_CONST(ds0)->ds_ctl6 & AR_EncrType;
317 #endif
318 	} else {			/* !firstSeg && !lastSeg */
319 		/*
320 		 * Intermediate descriptor in a multi-descriptor frame.
321 		 */
322 #ifdef AH_NEED_DESC_SWAP
323 		ads->ds_ctl0 = __bswap32(AR5416DESC_CONST(ds0)->ds_ctl0)
324 		    & AR_TxIntrReq;
325 		ads->ds_ctl6 = __bswap32(AR5416DESC_CONST(ds0)->ds_ctl6 & AR_EncrType);
326 #else
327 		ads->ds_ctl0 = AR5416DESC_CONST(ds0)->ds_ctl0 & AR_TxIntrReq;
328 		ads->ds_ctl6 = AR5416DESC_CONST(ds0)->ds_ctl6 & AR_EncrType;
329 #endif
330 		ads->ds_ctl1 = segLen | AR_TxMore;
331 		ads->ds_ctl2 = 0;
332 		ads->ds_ctl3 = 0;
333 	}
334 	/* XXX only on last descriptor? */
335 	OS_MEMZERO(ads->u.tx.status, sizeof(ads->u.tx.status));
336 	return AH_TRUE;
337 }
338 
339 /*
340  * NB: cipher is no longer used, it's calculated.
341  */
342 HAL_BOOL
343 ar5416ChainTxDesc(struct ath_hal *ah, struct ath_desc *ds,
344 	HAL_DMA_ADDR *bufAddrList,
345 	uint32_t *segLenList,
346 	u_int pktLen,
347 	u_int hdrLen,
348 	HAL_PKT_TYPE type,
349 	u_int keyIx,
350 	HAL_CIPHER cipher,
351 	uint8_t delims,
352 	HAL_BOOL firstSeg,
353 	HAL_BOOL lastSeg,
354 	HAL_BOOL lastAggr)
355 {
356 	struct ar5416_desc *ads = AR5416DESC(ds);
357 	uint32_t *ds_txstatus = AR5416_DS_TXSTATUS(ah,ads);
358 	struct ath_hal_5416 *ahp = AH5416(ah);
359 	u_int segLen = segLenList[0];
360 
361 	int isaggr = 0;
362 	uint32_t last_aggr = 0;
363 
364 	(void) hdrLen;
365 	(void) ah;
366 
367 	HALASSERT((segLen &~ AR_BufLen) == 0);
368 	ds->ds_data = bufAddrList[0];
369 
370 	HALASSERT(isValidPktType(type));
371 	if (type == HAL_PKT_TYPE_AMPDU) {
372 		type = HAL_PKT_TYPE_NORMAL;
373 		isaggr = 1;
374 		if (lastAggr == AH_FALSE)
375 			last_aggr = AR_MoreAggr;
376 	}
377 
378 	/*
379 	 * Since this function is called before any of the other
380 	 * descriptor setup functions (at least in this particular
381 	 * 802.11n aggregation implementation), always bzero() the
382 	 * descriptor. Previously this would be done for all but
383 	 * the first segment.
384 	 * XXX TODO: figure out why; perhaps I'm using this slightly
385 	 * XXX incorrectly.
386 	 */
387 	OS_MEMZERO(ds->ds_hw, AR5416_DESC_TX_CTL_SZ);
388 
389 	/*
390 	 * Note: VEOL should only be for the last descriptor in the chain.
391 	 */
392 	ads->ds_ctl0 = (pktLen & AR_FrameLen);
393 
394 	/*
395 	 * For aggregates:
396 	 * + IsAggr must be set for all descriptors of all subframes of
397 	 *   the aggregate
398 	 * + MoreAggr must be set for all descriptors of all subframes
399 	 *   of the aggregate EXCEPT the last subframe;
400 	 * + MoreAggr must be _CLEAR_ for all descrpitors of the last
401 	 *   subframe of the aggregate.
402 	 */
403 	ads->ds_ctl1 = (type << AR_FrameType_S)
404 			| (isaggr ? (AR_IsAggr | last_aggr) : 0);
405 
406 	ads->ds_ctl2 = 0;
407 	ads->ds_ctl3 = 0;
408 	if (keyIx != HAL_TXKEYIX_INVALID) {
409 		/* XXX validate key index */
410 		ads->ds_ctl1 |= SM(keyIx, AR_DestIdx);
411 		ads->ds_ctl0 |= AR_DestIdxValid;
412 	}
413 
414 	ads->ds_ctl6 |= SM(ahp->ah_keytype[keyIx], AR_EncrType);
415 	if (isaggr) {
416 		ads->ds_ctl6 |= SM(delims, AR_PadDelim);
417 	}
418 
419 	if (firstSeg) {
420 		ads->ds_ctl1 |= segLen | (lastSeg ? 0 : AR_TxMore);
421 	} else if (lastSeg) {           /* !firstSeg && lastSeg */
422 		ads->ds_ctl0 = 0;
423 		ads->ds_ctl1 |= segLen;
424 	} else {                        /* !firstSeg && !lastSeg */
425 		/*
426 		 * Intermediate descriptor in a multi-descriptor frame.
427 		 */
428 		ads->ds_ctl0 = 0;
429 		ads->ds_ctl1 |= segLen | AR_TxMore;
430 	}
431 	ds_txstatus[0] = ds_txstatus[1] = 0;
432 	ds_txstatus[9] &= ~AR_TxDone;
433 
434 	return AH_TRUE;
435 }
436 
437 HAL_BOOL
438 ar5416SetupFirstTxDesc(struct ath_hal *ah, struct ath_desc *ds,
439 	u_int aggrLen, u_int flags, u_int txPower,
440 	u_int txRate0, u_int txTries0, u_int antMode,
441 	u_int rtsctsRate, u_int rtsctsDuration)
442 {
443 #define RTSCTS  (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)
444 	struct ar5416_desc *ads = AR5416DESC(ds);
445 	struct ath_hal_5212 *ahp = AH5212(ah);
446 
447 	HALASSERT(txTries0 != 0);
448 	HALASSERT(isValidTxRate(txRate0));
449 	HALASSERT((flags & RTSCTS) != RTSCTS);
450 	/* XXX validate antMode */
451 
452 	txPower = (txPower + ahp->ah_txPowerIndexOffset );
453 	if(txPower > 63)  txPower=63;
454 
455 	ads->ds_ctl0 |= (txPower << AR_XmitPower_S)
456 		| (flags & HAL_TXDESC_VEOL ? AR_VEOL : 0)
457 		| (flags & HAL_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
458 		| (flags & HAL_TXDESC_INTREQ ? AR_TxIntrReq : 0);
459 	ads->ds_ctl1 |= (flags & HAL_TXDESC_NOACK ? AR_NoAck : 0);
460 	ads->ds_ctl2 |= SM(txTries0, AR_XmitDataTries0);
461 	ads->ds_ctl3 |= (txRate0 << AR_XmitRate0_S);
462 	ads->ds_ctl7 = SM(AH5416(ah)->ah_tx_chainmask, AR_ChainSel0)
463 		| SM(AH5416(ah)->ah_tx_chainmask, AR_ChainSel1)
464 		| SM(AH5416(ah)->ah_tx_chainmask, AR_ChainSel2)
465 		| SM(AH5416(ah)->ah_tx_chainmask, AR_ChainSel3);
466 
467 	/* NB: no V1 WAR */
468 	ads->ds_ctl8 = SM(0, AR_AntCtl0);
469 	ads->ds_ctl9 = SM(0, AR_AntCtl1) | SM(txPower, AR_XmitPower1);
470 	ads->ds_ctl10 = SM(0, AR_AntCtl2) | SM(txPower, AR_XmitPower2);
471 	ads->ds_ctl11 = SM(0, AR_AntCtl3) | SM(txPower, AR_XmitPower3);
472 
473 	ads->ds_ctl6 &= ~(0xffff);
474 	ads->ds_ctl6 |= SM(aggrLen, AR_AggrLen);
475 
476 	if (flags & RTSCTS) {
477 		/* XXX validate rtsctsDuration */
478 		ads->ds_ctl0 |= (flags & HAL_TXDESC_CTSENA ? AR_CTSEnable : 0)
479 			| (flags & HAL_TXDESC_RTSENA ? AR_RTSEnable : 0);
480 	}
481 
482 	/*
483 	 * Set the TX antenna to 0 for Kite
484 	 * To preserve existing behaviour, also set the TPC bits to 0;
485 	 * when TPC is enabled these should be filled in appropriately.
486 	 */
487 	if (AR_SREV_KITE(ah)) {
488 		ads->ds_ctl8 = SM(0, AR_AntCtl0);
489 		ads->ds_ctl9 = SM(0, AR_AntCtl1) | SM(0, AR_XmitPower1);
490 		ads->ds_ctl10 = SM(0, AR_AntCtl2) | SM(0, AR_XmitPower2);
491 		ads->ds_ctl11 = SM(0, AR_AntCtl3) | SM(0, AR_XmitPower3);
492 	}
493 
494 	return AH_TRUE;
495 #undef RTSCTS
496 }
497 
498 HAL_BOOL
499 ar5416SetupLastTxDesc(struct ath_hal *ah, struct ath_desc *ds,
500 		const struct ath_desc *ds0)
501 {
502 	struct ar5416_desc *ads = AR5416DESC(ds);
503 
504 	ads->ds_ctl1 &= ~AR_MoreAggr;
505 	ads->ds_ctl6 &= ~AR_PadDelim;
506 
507 	/* hack to copy rate info to last desc for later processing */
508 #ifdef AH_NEED_DESC_SWAP
509 	ads->ds_ctl2 = __bswap32(AR5416DESC_CONST(ds0)->ds_ctl2);
510 	ads->ds_ctl3 = __bswap32(AR5416DESC_CONST(ds0)->ds_ctl3);
511 #else
512 	ads->ds_ctl2 = AR5416DESC_CONST(ds0)->ds_ctl2;
513 	ads->ds_ctl3 = AR5416DESC_CONST(ds0)->ds_ctl3;
514 #endif
515 	return AH_TRUE;
516 }
517 
518 #ifdef AH_NEED_DESC_SWAP
519 /* Swap transmit descriptor */
520 static __inline void
521 ar5416SwapTxDesc(struct ath_desc *ds)
522 {
523 	ds->ds_data = __bswap32(ds->ds_data);
524 	ds->ds_ctl0 = __bswap32(ds->ds_ctl0);
525 	ds->ds_ctl1 = __bswap32(ds->ds_ctl1);
526 	ds->ds_hw[0] = __bswap32(ds->ds_hw[0]);
527 	ds->ds_hw[1] = __bswap32(ds->ds_hw[1]);
528 	ds->ds_hw[2] = __bswap32(ds->ds_hw[2]);
529 	ds->ds_hw[3] = __bswap32(ds->ds_hw[3]);
530 }
531 #endif
532 
533 /*
534  * Processing of HW TX descriptor.
535  */
536 HAL_STATUS
537 ar5416ProcTxDesc(struct ath_hal *ah,
538 	struct ath_desc *ds, struct ath_tx_status *ts)
539 {
540 	struct ar5416_desc *ads = AR5416DESC(ds);
541 	uint32_t *ds_txstatus = AR5416_DS_TXSTATUS(ah,ads);
542 
543 #ifdef AH_NEED_DESC_SWAP
544 	if ((ds_txstatus[9] & __bswap32(AR_TxDone)) == 0)
545 		return HAL_EINPROGRESS;
546 	ar5416SwapTxDesc(ds);
547 #else
548 	if ((ds_txstatus[9] & AR_TxDone) == 0)
549 		return HAL_EINPROGRESS;
550 #endif
551 
552 	/* Update software copies of the HW status */
553 	ts->ts_seqnum = MS(ds_txstatus[9], AR_SeqNum);
554 	ts->ts_tstamp = AR_SendTimestamp(ds_txstatus);
555 	ts->ts_tid = MS(ds_txstatus[9], AR_TxTid);
556 
557 	ts->ts_status = 0;
558 	if (ds_txstatus[1] & AR_ExcessiveRetries)
559 		ts->ts_status |= HAL_TXERR_XRETRY;
560 	if (ds_txstatus[1] & AR_Filtered)
561 		ts->ts_status |= HAL_TXERR_FILT;
562 	if (ds_txstatus[1] & AR_FIFOUnderrun)
563 		ts->ts_status |= HAL_TXERR_FIFO;
564 	if (ds_txstatus[9] & AR_TxOpExceeded)
565 		ts->ts_status |= HAL_TXERR_XTXOP;
566 	if (ds_txstatus[1] & AR_TxTimerExpired)
567 		ts->ts_status |= HAL_TXERR_TIMER_EXPIRED;
568 
569 	ts->ts_flags  = 0;
570 	if (ds_txstatus[0] & AR_TxBaStatus) {
571 		ts->ts_flags |= HAL_TX_BA;
572 		ts->ts_ba_low = AR_BaBitmapLow(ds_txstatus);
573 		ts->ts_ba_high = AR_BaBitmapHigh(ds_txstatus);
574 	}
575 	if (ds->ds_ctl1 & AR_IsAggr)
576 		ts->ts_flags |= HAL_TX_AGGR;
577 	if (ds_txstatus[1] & AR_DescCfgErr)
578 		ts->ts_flags |= HAL_TX_DESC_CFG_ERR;
579 	if (ds_txstatus[1] & AR_TxDataUnderrun)
580 		ts->ts_flags |= HAL_TX_DATA_UNDERRUN;
581 	if (ds_txstatus[1] & AR_TxDelimUnderrun)
582 		ts->ts_flags |= HAL_TX_DELIM_UNDERRUN;
583 
584 	/*
585 	 * Extract the transmit rate used and mark the rate as
586 	 * ``alternate'' if it wasn't the series 0 rate.
587 	 */
588 	ts->ts_finaltsi =  MS(ds_txstatus[9], AR_FinalTxIdx);
589 	switch (ts->ts_finaltsi) {
590 	case 0:
591 		ts->ts_rate = MS(ads->ds_ctl3, AR_XmitRate0);
592 		break;
593 	case 1:
594 		ts->ts_rate = MS(ads->ds_ctl3, AR_XmitRate1);
595 		break;
596 	case 2:
597 		ts->ts_rate = MS(ads->ds_ctl3, AR_XmitRate2);
598 		break;
599 	case 3:
600 		ts->ts_rate = MS(ads->ds_ctl3, AR_XmitRate3);
601 		break;
602 	}
603 
604 	ts->ts_rssi = MS(ds_txstatus[5], AR_TxRSSICombined);
605 	ts->ts_rssi_ctl[0] = MS(ds_txstatus[0], AR_TxRSSIAnt00);
606 	ts->ts_rssi_ctl[1] = MS(ds_txstatus[0], AR_TxRSSIAnt01);
607 	ts->ts_rssi_ctl[2] = MS(ds_txstatus[0], AR_TxRSSIAnt02);
608 	ts->ts_rssi_ext[0] = MS(ds_txstatus[5], AR_TxRSSIAnt10);
609 	ts->ts_rssi_ext[1] = MS(ds_txstatus[5], AR_TxRSSIAnt11);
610 	ts->ts_rssi_ext[2] = MS(ds_txstatus[5], AR_TxRSSIAnt12);
611 	ts->ts_evm0 = AR_TxEVM0(ds_txstatus);
612 	ts->ts_evm1 = AR_TxEVM1(ds_txstatus);
613 	ts->ts_evm2 = AR_TxEVM2(ds_txstatus);
614 
615 	ts->ts_shortretry = MS(ds_txstatus[1], AR_RTSFailCnt);
616 	ts->ts_longretry = MS(ds_txstatus[1], AR_DataFailCnt);
617 	/*
618 	 * The retry count has the number of un-acked tries for the
619 	 * final series used.  When doing multi-rate retry we must
620 	 * fixup the retry count by adding in the try counts for
621 	 * each series that was fully-processed.  Beware that this
622 	 * takes values from the try counts in the final descriptor.
623 	 * These are not required by the hardware.  We assume they
624 	 * are placed there by the driver as otherwise we have no
625 	 * access and the driver can't do the calculation because it
626 	 * doesn't know the descriptor format.
627 	 */
628 	switch (ts->ts_finaltsi) {
629 	case 3: ts->ts_longretry += MS(ads->ds_ctl2, AR_XmitDataTries2);
630 	case 2: ts->ts_longretry += MS(ads->ds_ctl2, AR_XmitDataTries1);
631 	case 1: ts->ts_longretry += MS(ads->ds_ctl2, AR_XmitDataTries0);
632 	}
633 
634 	/*
635 	 * These fields are not used. Zero these to preserve compatability
636 	 * with existing drivers.
637 	 */
638 	ts->ts_virtcol = MS(ads->ds_ctl1, AR_VirtRetryCnt);
639 	ts->ts_antenna = 0; /* We don't switch antennas on Owl*/
640 
641 	/* handle tx trigger level changes internally */
642 	if ((ts->ts_status & HAL_TXERR_FIFO) ||
643 	    (ts->ts_flags & (HAL_TX_DATA_UNDERRUN | HAL_TX_DELIM_UNDERRUN)))
644 		ar5212UpdateTxTrigLevel(ah, AH_TRUE);
645 
646 	return HAL_OK;
647 }
648 
649 HAL_BOOL
650 ar5416SetGlobalTxTimeout(struct ath_hal *ah, u_int tu)
651 {
652 	struct ath_hal_5416 *ahp = AH5416(ah);
653 
654 	if (tu > 0xFFFF) {
655 		HALDEBUG(ah, HAL_DEBUG_ANY, "%s: bad global tx timeout %u\n",
656 		    __func__, tu);
657 		/* restore default handling */
658 		ahp->ah_globaltxtimeout = (u_int) -1;
659 		return AH_FALSE;
660 	}
661 	OS_REG_RMW_FIELD(ah, AR_GTXTO, AR_GTXTO_TIMEOUT_LIMIT, tu);
662 	ahp->ah_globaltxtimeout = tu;
663 	return AH_TRUE;
664 }
665 
666 u_int
667 ar5416GetGlobalTxTimeout(struct ath_hal *ah)
668 {
669 	return MS(OS_REG_READ(ah, AR_GTXTO), AR_GTXTO_TIMEOUT_LIMIT);
670 }
671 
672 #define	HT_RC_2_MCS(_rc)	((_rc) & 0x0f)
673 static const u_int8_t baDurationDelta[] = {
674 	24,	//  0: BPSK
675 	12,	//  1: QPSK 1/2
676 	12,	//  2: QPSK 3/4
677 	4,	//  3: 16-QAM 1/2
678 	4,	//  4: 16-QAM 3/4
679 	4,	//  5: 64-QAM 2/3
680 	4,	//  6: 64-QAM 3/4
681 	4,	//  7: 64-QAM 5/6
682 	24,	//  8: BPSK
683 	12,	//  9: QPSK 1/2
684 	12,	// 10: QPSK 3/4
685 	4,	// 11: 16-QAM 1/2
686 	4,	// 12: 16-QAM 3/4
687 	4,	// 13: 64-QAM 2/3
688 	4,	// 14: 64-QAM 3/4
689 	4,	// 15: 64-QAM 5/6
690 };
691 
692 void
693 ar5416Set11nRateScenario(struct ath_hal *ah, struct ath_desc *ds,
694         u_int durUpdateEn, u_int rtsctsRate,
695 	HAL_11N_RATE_SERIES series[], u_int nseries, u_int flags)
696 {
697 	struct ar5416_desc *ads = AR5416DESC(ds);
698 	uint32_t ds_ctl0;
699 
700 	HALASSERT(nseries == 4);
701 	(void)nseries;
702 
703 	/*
704 	 * Only one of RTS and CTS enable must be set.
705 	 * If a frame has both set, just do RTS protection -
706 	 * that's enough to satisfy legacy protection.
707 	 */
708 	if (flags & (HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA)) {
709 		ds_ctl0 = ads->ds_ctl0;
710 
711 		if (flags & HAL_TXDESC_RTSENA) {
712 			ds_ctl0 &= ~AR_CTSEnable;
713 			ds_ctl0 |= AR_RTSEnable;
714 		} else {
715 			ds_ctl0 &= ~AR_RTSEnable;
716 			ds_ctl0 |= AR_CTSEnable;
717 		}
718 
719 		ads->ds_ctl0 = ds_ctl0;
720 	} else {
721 		ads->ds_ctl0 =
722 		    (ads->ds_ctl0 & ~(AR_RTSEnable | AR_CTSEnable));
723 	}
724 
725 	ads->ds_ctl2 = set11nTries(series, 0)
726 		     | set11nTries(series, 1)
727 		     | set11nTries(series, 2)
728 		     | set11nTries(series, 3)
729 		     | (durUpdateEn ? AR_DurUpdateEn : 0);
730 
731 	ads->ds_ctl3 = set11nRate(series, 0)
732 		     | set11nRate(series, 1)
733 		     | set11nRate(series, 2)
734 		     | set11nRate(series, 3);
735 
736 	ads->ds_ctl4 = set11nPktDurRTSCTS(series, 0)
737 		     | set11nPktDurRTSCTS(series, 1);
738 
739 	ads->ds_ctl5 = set11nPktDurRTSCTS(series, 2)
740 		     | set11nPktDurRTSCTS(series, 3);
741 
742 	ads->ds_ctl7 = set11nRateFlags(series, 0)
743 		     | set11nRateFlags(series, 1)
744 		     | set11nRateFlags(series, 2)
745 		     | set11nRateFlags(series, 3)
746 		     | SM(rtsctsRate, AR_RTSCTSRate);
747 }
748 
749 /*
750  * Note: this should be called before calling ar5416SetBurstDuration()
751  * (if it is indeed called) in order to ensure that the burst duration
752  * is correctly updated with the BA delta workaround.
753  */
754 void
755 ar5416Set11nAggrFirst(struct ath_hal *ah, struct ath_desc *ds, u_int aggrLen,
756     u_int numDelims)
757 {
758 	struct ar5416_desc *ads = AR5416DESC(ds);
759 	uint32_t flags;
760 	uint32_t burstDur;
761 	uint8_t rate;
762 
763 	ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
764 
765 	ads->ds_ctl6 &= ~(AR_AggrLen | AR_PadDelim);
766 	ads->ds_ctl6 |= SM(aggrLen, AR_AggrLen);
767 	ads->ds_ctl6 |= SM(numDelims, AR_PadDelim);
768 
769 	if (! AR_SREV_MERLIN_10_OR_LATER(ah)) {
770 		/*
771 		 * XXX It'd be nice if I were passed in the rate scenario
772 		 * at this point..
773 		 */
774 		rate = MS(ads->ds_ctl3, AR_XmitRate0);
775 		flags = ads->ds_ctl0 & (AR_CTSEnable | AR_RTSEnable);
776 		/*
777 		 * WAR - MAC assumes normal ACK time instead of
778 		 * block ACK while computing packet duration.
779 		 * Add this delta to the burst duration in the descriptor.
780 		 */
781 		if (flags && (ads->ds_ctl1 & AR_IsAggr)) {
782 			burstDur = baDurationDelta[HT_RC_2_MCS(rate)];
783 			ads->ds_ctl2 &= ~(AR_BurstDur);
784 			ads->ds_ctl2 |= SM(burstDur, AR_BurstDur);
785 		}
786 	}
787 }
788 
789 void
790 ar5416Set11nAggrMiddle(struct ath_hal *ah, struct ath_desc *ds, u_int numDelims)
791 {
792 	struct ar5416_desc *ads = AR5416DESC(ds);
793 	uint32_t *ds_txstatus = AR5416_DS_TXSTATUS(ah,ads);
794 
795 	ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
796 
797 	ads->ds_ctl6 &= ~AR_PadDelim;
798 	ads->ds_ctl6 |= SM(numDelims, AR_PadDelim);
799 	ads->ds_ctl6 &= ~AR_AggrLen;
800 
801 	/*
802 	 * Clear the TxDone status here, may need to change
803 	 * func name to reflect this
804 	 */
805 	ds_txstatus[9] &= ~AR_TxDone;
806 }
807 
808 void
809 ar5416Set11nAggrLast(struct ath_hal *ah, struct ath_desc *ds)
810 {
811 	struct ar5416_desc *ads = AR5416DESC(ds);
812 
813 	ads->ds_ctl1 |= AR_IsAggr;
814 	ads->ds_ctl1 &= ~AR_MoreAggr;
815 	ads->ds_ctl6 &= ~AR_PadDelim;
816 }
817 
818 void
819 ar5416Clr11nAggr(struct ath_hal *ah, struct ath_desc *ds)
820 {
821 	struct ar5416_desc *ads = AR5416DESC(ds);
822 
823 	ads->ds_ctl1 &= (~AR_IsAggr & ~AR_MoreAggr);
824 	ads->ds_ctl6 &= ~AR_PadDelim;
825 	ads->ds_ctl6 &= ~AR_AggrLen;
826 }
827 
828 void
829 ar5416Set11nVirtualMoreFrag(struct ath_hal *ah, struct ath_desc *ds,
830     u_int vmf)
831 {
832 	struct ar5416_desc *ads = AR5416DESC(ds);
833 	if (vmf)
834 		ads->ds_ctl0 |= AR_VirtMoreFrag;
835 	else
836 		ads->ds_ctl0 &= ~AR_VirtMoreFrag;
837 }
838 
839 /*
840  * Program the burst duration, with the included BA delta if it's
841  * applicable.
842  */
843 void
844 ar5416Set11nBurstDuration(struct ath_hal *ah, struct ath_desc *ds,
845                                                   u_int burstDuration)
846 {
847 	struct ar5416_desc *ads = AR5416DESC(ds);
848 	uint32_t burstDur = 0;
849 	uint8_t rate;
850 
851 	if (! AR_SREV_MERLIN_10_OR_LATER(ah)) {
852 		/*
853 		 * XXX It'd be nice if I were passed in the rate scenario
854 		 * at this point..
855 		 */
856 		rate = MS(ads->ds_ctl3, AR_XmitDataTries0);
857 		/*
858 		 * WAR - MAC assumes normal ACK time instead of
859 		 * block ACK while computing packet duration.
860 		 * Add this delta to the burst duration in the descriptor.
861 		 */
862 		if (ads->ds_ctl1 & AR_IsAggr) {
863 			burstDur = baDurationDelta[HT_RC_2_MCS(rate)];
864 		}
865 	}
866 
867 	ads->ds_ctl2 &= ~AR_BurstDur;
868 	ads->ds_ctl2 |= SM(burstDur + burstDuration, AR_BurstDur);
869 }
870 
871 /*
872  * Retrieve the rate table from the given TX completion descriptor
873  */
874 HAL_BOOL
875 ar5416GetTxCompletionRates(struct ath_hal *ah, const struct ath_desc *ds0, int *rates, int *tries)
876 {
877 	const struct ar5416_desc *ads = AR5416DESC_CONST(ds0);
878 
879 	rates[0] = MS(ads->ds_ctl3, AR_XmitRate0);
880 	rates[1] = MS(ads->ds_ctl3, AR_XmitRate1);
881 	rates[2] = MS(ads->ds_ctl3, AR_XmitRate2);
882 	rates[3] = MS(ads->ds_ctl3, AR_XmitRate3);
883 
884 	tries[0] = MS(ads->ds_ctl2, AR_XmitDataTries0);
885 	tries[1] = MS(ads->ds_ctl2, AR_XmitDataTries1);
886 	tries[2] = MS(ads->ds_ctl2, AR_XmitDataTries2);
887 	tries[3] = MS(ads->ds_ctl2, AR_XmitDataTries3);
888 
889 	return AH_TRUE;
890 }
891 
892 
893 /*
894  * TX queue management routines - AR5416 and later chipsets
895  */
896 
897 /*
898  * Allocate and initialize a tx DCU/QCU combination.
899  */
900 int
901 ar5416SetupTxQueue(struct ath_hal *ah, HAL_TX_QUEUE type,
902 	const HAL_TXQ_INFO *qInfo)
903 {
904 	struct ath_hal_5212 *ahp = AH5212(ah);
905 	HAL_TX_QUEUE_INFO *qi;
906 	HAL_CAPABILITIES *pCap = &AH_PRIVATE(ah)->ah_caps;
907 	int q, defqflags;
908 
909 	/* by default enable OK+ERR+DESC+URN interrupts */
910 	defqflags = HAL_TXQ_TXOKINT_ENABLE
911 		  | HAL_TXQ_TXERRINT_ENABLE
912 		  | HAL_TXQ_TXDESCINT_ENABLE
913 		  | HAL_TXQ_TXURNINT_ENABLE;
914 	/* XXX move queue assignment to driver */
915 	switch (type) {
916 	case HAL_TX_QUEUE_BEACON:
917 		q = pCap->halTotalQueues-1;	/* highest priority */
918 		defqflags |= HAL_TXQ_DBA_GATED
919 		       | HAL_TXQ_CBR_DIS_QEMPTY
920 		       | HAL_TXQ_ARB_LOCKOUT_GLOBAL
921 		       | HAL_TXQ_BACKOFF_DISABLE;
922 		break;
923 	case HAL_TX_QUEUE_CAB:
924 		q = pCap->halTotalQueues-2;	/* next highest priority */
925 		defqflags |= HAL_TXQ_DBA_GATED
926 		       | HAL_TXQ_CBR_DIS_QEMPTY
927 		       | HAL_TXQ_CBR_DIS_BEMPTY
928 		       | HAL_TXQ_ARB_LOCKOUT_GLOBAL
929 		       | HAL_TXQ_BACKOFF_DISABLE;
930 		break;
931 	case HAL_TX_QUEUE_PSPOLL:
932 		q = 1;				/* lowest priority */
933 		defqflags |= HAL_TXQ_DBA_GATED
934 		       | HAL_TXQ_CBR_DIS_QEMPTY
935 		       | HAL_TXQ_CBR_DIS_BEMPTY
936 		       | HAL_TXQ_ARB_LOCKOUT_GLOBAL
937 		       | HAL_TXQ_BACKOFF_DISABLE;
938 		break;
939 	case HAL_TX_QUEUE_UAPSD:
940 		q = pCap->halTotalQueues-3;	/* nextest highest priority */
941 		if (ahp->ah_txq[q].tqi_type != HAL_TX_QUEUE_INACTIVE) {
942 			HALDEBUG(ah, HAL_DEBUG_ANY,
943 			    "%s: no available UAPSD tx queue\n", __func__);
944 			return -1;
945 		}
946 		break;
947 	case HAL_TX_QUEUE_DATA:
948 		for (q = 0; q < pCap->halTotalQueues; q++)
949 			if (ahp->ah_txq[q].tqi_type == HAL_TX_QUEUE_INACTIVE)
950 				break;
951 		if (q == pCap->halTotalQueues) {
952 			HALDEBUG(ah, HAL_DEBUG_ANY,
953 			    "%s: no available tx queue\n", __func__);
954 			return -1;
955 		}
956 		break;
957 	default:
958 		HALDEBUG(ah, HAL_DEBUG_ANY,
959 		    "%s: bad tx queue type %u\n", __func__, type);
960 		return -1;
961 	}
962 
963 	HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: queue %u\n", __func__, q);
964 
965 	qi = &ahp->ah_txq[q];
966 	if (qi->tqi_type != HAL_TX_QUEUE_INACTIVE) {
967 		HALDEBUG(ah, HAL_DEBUG_ANY, "%s: tx queue %u already active\n",
968 		    __func__, q);
969 		return -1;
970 	}
971 	OS_MEMZERO(qi, sizeof(HAL_TX_QUEUE_INFO));
972 	qi->tqi_type = type;
973 	if (qInfo == AH_NULL) {
974 		qi->tqi_qflags = defqflags;
975 		qi->tqi_aifs = INIT_AIFS;
976 		qi->tqi_cwmin = HAL_TXQ_USEDEFAULT;	/* NB: do at reset */
977 		qi->tqi_cwmax = INIT_CWMAX;
978 		qi->tqi_shretry = INIT_SH_RETRY;
979 		qi->tqi_lgretry = INIT_LG_RETRY;
980 		qi->tqi_physCompBuf = 0;
981 	} else {
982 		qi->tqi_physCompBuf = qInfo->tqi_compBuf;
983 		(void) ar5212SetTxQueueProps(ah, q, qInfo);
984 	}
985 	/* NB: must be followed by ar5212ResetTxQueue */
986 	return q;
987 }
988 
989 /*
990  * Update the h/w interrupt registers to reflect a tx q's configuration.
991  */
992 static void
993 setTxQInterrupts(struct ath_hal *ah, HAL_TX_QUEUE_INFO *qi)
994 {
995 	struct ath_hal_5212 *ahp = AH5212(ah);
996 
997 	HALDEBUG(ah, HAL_DEBUG_TXQUEUE,
998 	    "%s: tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n", __func__,
999 	    ahp->ah_txOkInterruptMask, ahp->ah_txErrInterruptMask,
1000 	    ahp->ah_txDescInterruptMask, ahp->ah_txEolInterruptMask,
1001 	    ahp->ah_txUrnInterruptMask);
1002 
1003 	OS_REG_WRITE(ah, AR_IMR_S0,
1004 		  SM(ahp->ah_txOkInterruptMask, AR_IMR_S0_QCU_TXOK)
1005 		| SM(ahp->ah_txDescInterruptMask, AR_IMR_S0_QCU_TXDESC)
1006 	);
1007 	OS_REG_WRITE(ah, AR_IMR_S1,
1008 		  SM(ahp->ah_txErrInterruptMask, AR_IMR_S1_QCU_TXERR)
1009 		| SM(ahp->ah_txEolInterruptMask, AR_IMR_S1_QCU_TXEOL)
1010 	);
1011 	OS_REG_RMW_FIELD(ah, AR_IMR_S2,
1012 		AR_IMR_S2_QCU_TXURN, ahp->ah_txUrnInterruptMask);
1013 }
1014 
1015 /*
1016  * Set the retry, aifs, cwmin/max, readyTime regs for specified queue
1017  * Assumes:
1018  *  phwChannel has been set to point to the current channel
1019  */
1020 #define	TU_TO_USEC(_tu)		((_tu) << 10)
1021 HAL_BOOL
1022 ar5416ResetTxQueue(struct ath_hal *ah, u_int q)
1023 {
1024 	struct ath_hal_5212 *ahp = AH5212(ah);
1025 	HAL_CAPABILITIES *pCap = &AH_PRIVATE(ah)->ah_caps;
1026 	const struct ieee80211_channel *chan = AH_PRIVATE(ah)->ah_curchan;
1027 	HAL_TX_QUEUE_INFO *qi;
1028 	uint32_t cwMin, chanCwMin, qmisc, dmisc;
1029 
1030 	if (q >= pCap->halTotalQueues) {
1031 		HALDEBUG(ah, HAL_DEBUG_ANY, "%s: invalid queue num %u\n",
1032 		    __func__, q);
1033 		return AH_FALSE;
1034 	}
1035 	qi = &ahp->ah_txq[q];
1036 	if (qi->tqi_type == HAL_TX_QUEUE_INACTIVE) {
1037 		HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: inactive queue %u\n",
1038 		    __func__, q);
1039 		return AH_TRUE;		/* XXX??? */
1040 	}
1041 
1042 	HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: reset queue %u\n", __func__, q);
1043 
1044 	if (qi->tqi_cwmin == HAL_TXQ_USEDEFAULT) {
1045 		/*
1046 		 * Select cwmin according to channel type.
1047 		 * NB: chan can be NULL during attach
1048 		 */
1049 		if (chan && IEEE80211_IS_CHAN_B(chan))
1050 			chanCwMin = INIT_CWMIN_11B;
1051 		else
1052 			chanCwMin = INIT_CWMIN;
1053 		/* make sure that the CWmin is of the form (2^n - 1) */
1054 		for (cwMin = 1; cwMin < chanCwMin; cwMin = (cwMin << 1) | 1)
1055 			;
1056 	} else
1057 		cwMin = qi->tqi_cwmin;
1058 
1059 	/* set cwMin/Max and AIFS values */
1060 	OS_REG_WRITE(ah, AR_DLCL_IFS(q),
1061 		  SM(cwMin, AR_D_LCL_IFS_CWMIN)
1062 		| SM(qi->tqi_cwmax, AR_D_LCL_IFS_CWMAX)
1063 		| SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));
1064 
1065 	/* Set retry limit values */
1066 	OS_REG_WRITE(ah, AR_DRETRY_LIMIT(q),
1067 		   SM(INIT_SSH_RETRY, AR_D_RETRY_LIMIT_STA_SH)
1068 		 | SM(INIT_SLG_RETRY, AR_D_RETRY_LIMIT_STA_LG)
1069 		 | SM(qi->tqi_lgretry, AR_D_RETRY_LIMIT_FR_LG)
1070 		 | SM(qi->tqi_shretry, AR_D_RETRY_LIMIT_FR_SH)
1071 	);
1072 
1073 	/* NB: always enable early termination on the QCU */
1074 	qmisc = AR_Q_MISC_DCU_EARLY_TERM_REQ
1075 	      | SM(AR_Q_MISC_FSP_ASAP, AR_Q_MISC_FSP);
1076 
1077 	/* NB: always enable DCU to wait for next fragment from QCU */
1078 	dmisc = AR_D_MISC_FRAG_WAIT_EN;
1079 
1080 	/* Enable exponential backoff window */
1081 	dmisc |= AR_D_MISC_BKOFF_PERSISTENCE;
1082 
1083 	/*
1084 	 * The chip reset default is to use a DCU backoff threshold of 0x2.
1085 	 * Restore this when programming the DCU MISC register.
1086 	 */
1087 	dmisc |= 0x2;
1088 
1089 	/* multiqueue support */
1090 	if (qi->tqi_cbrPeriod) {
1091 		OS_REG_WRITE(ah, AR_QCBRCFG(q),
1092 			  SM(qi->tqi_cbrPeriod,AR_Q_CBRCFG_CBR_INTERVAL)
1093 			| SM(qi->tqi_cbrOverflowLimit, AR_Q_CBRCFG_CBR_OVF_THRESH));
1094 		qmisc = (qmisc &~ AR_Q_MISC_FSP) | AR_Q_MISC_FSP_CBR;
1095 		if (qi->tqi_cbrOverflowLimit)
1096 			qmisc |= AR_Q_MISC_CBR_EXP_CNTR_LIMIT;
1097 	}
1098 
1099 	if (qi->tqi_readyTime && (qi->tqi_type != HAL_TX_QUEUE_CAB)) {
1100 		OS_REG_WRITE(ah, AR_QRDYTIMECFG(q),
1101 			  SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_INT)
1102 			| AR_Q_RDYTIMECFG_ENA);
1103 	}
1104 
1105 	OS_REG_WRITE(ah, AR_DCHNTIME(q),
1106 		  SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR)
1107 		| (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0));
1108 
1109 	if (qi->tqi_readyTime &&
1110 	    (qi->tqi_qflags & HAL_TXQ_RDYTIME_EXP_POLICY_ENABLE))
1111 		qmisc |= AR_Q_MISC_RDYTIME_EXP_POLICY;
1112 	if (qi->tqi_qflags & HAL_TXQ_DBA_GATED)
1113 		qmisc = (qmisc &~ AR_Q_MISC_FSP) | AR_Q_MISC_FSP_DBA_GATED;
1114 	if (MS(qmisc, AR_Q_MISC_FSP) != AR_Q_MISC_FSP_ASAP) {
1115 		/*
1116 		 * These are meangingful only when not scheduled asap.
1117 		 */
1118 		if (qi->tqi_qflags & HAL_TXQ_CBR_DIS_BEMPTY)
1119 			qmisc |= AR_Q_MISC_CBR_INCR_DIS0;
1120 		else
1121 			qmisc &= ~AR_Q_MISC_CBR_INCR_DIS0;
1122 		if (qi->tqi_qflags & HAL_TXQ_CBR_DIS_QEMPTY)
1123 			qmisc |= AR_Q_MISC_CBR_INCR_DIS1;
1124 		else
1125 			qmisc &= ~AR_Q_MISC_CBR_INCR_DIS1;
1126 	}
1127 
1128 	if (qi->tqi_qflags & HAL_TXQ_BACKOFF_DISABLE)
1129 		dmisc |= AR_D_MISC_POST_FR_BKOFF_DIS;
1130 	if (qi->tqi_qflags & HAL_TXQ_FRAG_BURST_BACKOFF_ENABLE)
1131 		dmisc |= AR_D_MISC_FRAG_BKOFF_EN;
1132 	if (qi->tqi_qflags & HAL_TXQ_ARB_LOCKOUT_GLOBAL)
1133 		dmisc |= SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
1134 			    AR_D_MISC_ARB_LOCKOUT_CNTRL);
1135 	else if (qi->tqi_qflags & HAL_TXQ_ARB_LOCKOUT_INTRA)
1136 		dmisc |= SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_INTRA_FR,
1137 			    AR_D_MISC_ARB_LOCKOUT_CNTRL);
1138 	if (qi->tqi_qflags & HAL_TXQ_IGNORE_VIRTCOL)
1139 		dmisc |= SM(AR_D_MISC_VIR_COL_HANDLING_IGNORE,
1140 			    AR_D_MISC_VIR_COL_HANDLING);
1141 	if (qi->tqi_qflags & HAL_TXQ_SEQNUM_INC_DIS)
1142 		dmisc |= AR_D_MISC_SEQ_NUM_INCR_DIS;
1143 
1144 	/*
1145 	 * Fillin type-dependent bits.  Most of this can be
1146 	 * removed by specifying the queue parameters in the
1147 	 * driver; it's here for backwards compatibility.
1148 	 */
1149 	switch (qi->tqi_type) {
1150 	case HAL_TX_QUEUE_BEACON:		/* beacon frames */
1151 		qmisc |= AR_Q_MISC_FSP_DBA_GATED
1152 		      |  AR_Q_MISC_BEACON_USE
1153 		      |  AR_Q_MISC_CBR_INCR_DIS1;
1154 
1155 		dmisc |= SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
1156 			    AR_D_MISC_ARB_LOCKOUT_CNTRL)
1157 		      |  AR_D_MISC_BEACON_USE
1158 		      |  AR_D_MISC_POST_FR_BKOFF_DIS;
1159 		break;
1160 	case HAL_TX_QUEUE_CAB:			/* CAB  frames */
1161 		/*
1162 		 * No longer Enable AR_Q_MISC_RDYTIME_EXP_POLICY,
1163 		 * There is an issue with the CAB Queue
1164 		 * not properly refreshing the Tx descriptor if
1165 		 * the TXE clear setting is used.
1166 		 */
1167 		qmisc |= AR_Q_MISC_FSP_DBA_GATED
1168 		      |  AR_Q_MISC_CBR_INCR_DIS1
1169 		      |  AR_Q_MISC_CBR_INCR_DIS0;
1170 		HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: CAB: tqi_readyTime = %d\n",
1171 		    __func__, qi->tqi_readyTime);
1172 		if (qi->tqi_readyTime) {
1173 			HALDEBUG(ah, HAL_DEBUG_TXQUEUE,
1174 			    "%s: using tqi_readyTime\n", __func__);
1175 			OS_REG_WRITE(ah, AR_QRDYTIMECFG(q),
1176 			    SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_INT) |
1177 			    AR_Q_RDYTIMECFG_ENA);
1178 		} else {
1179 			int value;
1180 			/*
1181 			 * NB: don't set default ready time if driver
1182 			 * has explicitly specified something.  This is
1183 			 * here solely for backwards compatibility.
1184 			 */
1185 			/*
1186 			 * XXX for now, hard-code a CAB interval of 70%
1187 			 * XXX of the total beacon interval.
1188 			 *
1189 			 * XXX This keeps Merlin and later based MACs
1190 			 * XXX quite a bit happier (stops stuck beacons,
1191 			 * XXX which I gather is because of such a long
1192 			 * XXX cabq time.)
1193 			 */
1194 			value = (ahp->ah_beaconInterval * 50 / 100)
1195 				- ah->ah_config.ah_additional_swba_backoff
1196 				- ah->ah_config.ah_sw_beacon_response_time
1197 				+ ah->ah_config.ah_dma_beacon_response_time;
1198 			/*
1199 			 * XXX Ensure it isn't too low - nothing lower
1200 			 * XXX than 10 TU
1201 			 */
1202 			if (value < 10)
1203 				value = 10;
1204 			HALDEBUG(ah, HAL_DEBUG_TXQUEUE,
1205 			    "%s: defaulting to rdytime = %d uS\n",
1206 			    __func__, value);
1207 			OS_REG_WRITE(ah, AR_QRDYTIMECFG(q),
1208 			    SM(TU_TO_USEC(value), AR_Q_RDYTIMECFG_INT) |
1209 			    AR_Q_RDYTIMECFG_ENA);
1210 		}
1211 		dmisc |= SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
1212 			    AR_D_MISC_ARB_LOCKOUT_CNTRL);
1213 		break;
1214 	case HAL_TX_QUEUE_PSPOLL:
1215 		qmisc |= AR_Q_MISC_CBR_INCR_DIS1;
1216 		break;
1217 	case HAL_TX_QUEUE_UAPSD:
1218 		dmisc |= AR_D_MISC_POST_FR_BKOFF_DIS;
1219 		break;
1220 	default:			/* NB: silence compiler */
1221 		break;
1222 	}
1223 
1224 	OS_REG_WRITE(ah, AR_QMISC(q), qmisc);
1225 	OS_REG_WRITE(ah, AR_DMISC(q), dmisc);
1226 
1227 	/* Setup compression scratchpad buffer */
1228 	/*
1229 	 * XXX: calling this asynchronously to queue operation can
1230 	 *      cause unexpected behavior!!!
1231 	 */
1232 	if (qi->tqi_physCompBuf) {
1233 		HALASSERT(qi->tqi_type == HAL_TX_QUEUE_DATA ||
1234 			  qi->tqi_type == HAL_TX_QUEUE_UAPSD);
1235 		OS_REG_WRITE(ah, AR_Q_CBBS, (80 + 2*q));
1236 		OS_REG_WRITE(ah, AR_Q_CBBA, qi->tqi_physCompBuf);
1237 		OS_REG_WRITE(ah, AR_Q_CBC,  HAL_COMP_BUF_MAX_SIZE/1024);
1238 		OS_REG_WRITE(ah, AR_Q0_MISC + 4*q,
1239 			     OS_REG_READ(ah, AR_Q0_MISC + 4*q)
1240 			     | AR_Q_MISC_QCU_COMP_EN);
1241 	}
1242 
1243 	/*
1244 	 * Always update the secondary interrupt mask registers - this
1245 	 * could be a new queue getting enabled in a running system or
1246 	 * hw getting re-initialized during a reset!
1247 	 *
1248 	 * Since we don't differentiate between tx interrupts corresponding
1249 	 * to individual queues - secondary tx mask regs are always unmasked;
1250 	 * tx interrupts are enabled/disabled for all queues collectively
1251 	 * using the primary mask reg
1252 	 */
1253 	if (qi->tqi_qflags & HAL_TXQ_TXOKINT_ENABLE)
1254 		ahp->ah_txOkInterruptMask |= 1 << q;
1255 	else
1256 		ahp->ah_txOkInterruptMask &= ~(1 << q);
1257 	if (qi->tqi_qflags & HAL_TXQ_TXERRINT_ENABLE)
1258 		ahp->ah_txErrInterruptMask |= 1 << q;
1259 	else
1260 		ahp->ah_txErrInterruptMask &= ~(1 << q);
1261 	if (qi->tqi_qflags & HAL_TXQ_TXDESCINT_ENABLE)
1262 		ahp->ah_txDescInterruptMask |= 1 << q;
1263 	else
1264 		ahp->ah_txDescInterruptMask &= ~(1 << q);
1265 	if (qi->tqi_qflags & HAL_TXQ_TXEOLINT_ENABLE)
1266 		ahp->ah_txEolInterruptMask |= 1 << q;
1267 	else
1268 		ahp->ah_txEolInterruptMask &= ~(1 << q);
1269 	if (qi->tqi_qflags & HAL_TXQ_TXURNINT_ENABLE)
1270 		ahp->ah_txUrnInterruptMask |= 1 << q;
1271 	else
1272 		ahp->ah_txUrnInterruptMask &= ~(1 << q);
1273 	setTxQInterrupts(ah, qi);
1274 
1275 	return AH_TRUE;
1276 }
1277 #undef	TU_TO_USEC
1278