xref: /freebsd/sys/dev/ath/ath_hal/ar5212/ar5212_xmit.c (revision ddd5b8e9b4d8957fce018c520657cdfa4ecffad3)
1 /*
2  * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
3  * Copyright (c) 2002-2008 Atheros Communications, Inc.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  *
17  * $FreeBSD$
18  */
19 #include "opt_ah.h"
20 
21 #include "ah.h"
22 #include "ah_internal.h"
23 #include "ah_desc.h"
24 
25 #include "ar5212/ar5212.h"
26 #include "ar5212/ar5212reg.h"
27 #include "ar5212/ar5212desc.h"
28 #include "ar5212/ar5212phy.h"
29 #ifdef AH_SUPPORT_5311
30 #include "ar5212/ar5311reg.h"
31 #endif
32 
33 #ifdef AH_NEED_DESC_SWAP
34 static void ar5212SwapTxDesc(struct ath_desc *ds);
35 #endif
36 
37 /*
38  * Update Tx FIFO trigger level.
39  *
40  * Set bIncTrigLevel to TRUE to increase the trigger level.
41  * Set bIncTrigLevel to FALSE to decrease the trigger level.
42  *
43  * Returns TRUE if the trigger level was updated
44  */
45 HAL_BOOL
46 ar5212UpdateTxTrigLevel(struct ath_hal *ah, HAL_BOOL bIncTrigLevel)
47 {
48 	struct ath_hal_5212 *ahp = AH5212(ah);
49 	uint32_t txcfg, curLevel, newLevel;
50 	HAL_INT omask;
51 
52 	if (ahp->ah_txTrigLev >= ahp->ah_maxTxTrigLev)
53 		return AH_FALSE;
54 
55 	/*
56 	 * Disable interrupts while futzing with the fifo level.
57 	 */
58 	omask = ath_hal_setInterrupts(ah, ahp->ah_maskReg &~ HAL_INT_GLOBAL);
59 
60 	txcfg = OS_REG_READ(ah, AR_TXCFG);
61 	curLevel = MS(txcfg, AR_FTRIG);
62 	newLevel = curLevel;
63 	if (bIncTrigLevel) {		/* increase the trigger level */
64 		if (curLevel < ahp->ah_maxTxTrigLev)
65 			newLevel++;
66 	} else if (curLevel > MIN_TX_FIFO_THRESHOLD)
67 		newLevel--;
68 	if (newLevel != curLevel)
69 		/* Update the trigger level */
70 		OS_REG_WRITE(ah, AR_TXCFG,
71 			(txcfg &~ AR_FTRIG) | SM(newLevel, AR_FTRIG));
72 
73 	ahp->ah_txTrigLev = newLevel;
74 
75 	/* re-enable chip interrupts */
76 	ath_hal_setInterrupts(ah, omask);
77 
78 	return (newLevel != curLevel);
79 }
80 
81 /*
82  * Set the properties of the tx queue with the parameters
83  * from qInfo.
84  */
85 HAL_BOOL
86 ar5212SetTxQueueProps(struct ath_hal *ah, int q, const HAL_TXQ_INFO *qInfo)
87 {
88 	struct ath_hal_5212 *ahp = AH5212(ah);
89 	HAL_CAPABILITIES *pCap = &AH_PRIVATE(ah)->ah_caps;
90 
91 	if (q >= pCap->halTotalQueues) {
92 		HALDEBUG(ah, HAL_DEBUG_ANY, "%s: invalid queue num %u\n",
93 		    __func__, q);
94 		return AH_FALSE;
95 	}
96 	return ath_hal_setTxQProps(ah, &ahp->ah_txq[q], qInfo);
97 }
98 
99 /*
100  * Return the properties for the specified tx queue.
101  */
102 HAL_BOOL
103 ar5212GetTxQueueProps(struct ath_hal *ah, int q, HAL_TXQ_INFO *qInfo)
104 {
105 	struct ath_hal_5212 *ahp = AH5212(ah);
106 	HAL_CAPABILITIES *pCap = &AH_PRIVATE(ah)->ah_caps;
107 
108 
109 	if (q >= pCap->halTotalQueues) {
110 		HALDEBUG(ah, HAL_DEBUG_ANY, "%s: invalid queue num %u\n",
111 		    __func__, q);
112 		return AH_FALSE;
113 	}
114 	return ath_hal_getTxQProps(ah, qInfo, &ahp->ah_txq[q]);
115 }
116 
117 /*
118  * Allocate and initialize a tx DCU/QCU combination.
119  */
120 int
121 ar5212SetupTxQueue(struct ath_hal *ah, HAL_TX_QUEUE type,
122 	const HAL_TXQ_INFO *qInfo)
123 {
124 	struct ath_hal_5212 *ahp = AH5212(ah);
125 	HAL_TX_QUEUE_INFO *qi;
126 	HAL_CAPABILITIES *pCap = &AH_PRIVATE(ah)->ah_caps;
127 	int q, defqflags;
128 
129 	/* by default enable OK+ERR+DESC+URN interrupts */
130 	defqflags = HAL_TXQ_TXOKINT_ENABLE
131 		  | HAL_TXQ_TXERRINT_ENABLE
132 		  | HAL_TXQ_TXDESCINT_ENABLE
133 		  | HAL_TXQ_TXURNINT_ENABLE;
134 	/* XXX move queue assignment to driver */
135 	switch (type) {
136 	case HAL_TX_QUEUE_BEACON:
137 		q = pCap->halTotalQueues-1;	/* highest priority */
138 		defqflags |= HAL_TXQ_DBA_GATED
139 		       | HAL_TXQ_CBR_DIS_QEMPTY
140 		       | HAL_TXQ_ARB_LOCKOUT_GLOBAL
141 		       | HAL_TXQ_BACKOFF_DISABLE;
142 		break;
143 	case HAL_TX_QUEUE_CAB:
144 		q = pCap->halTotalQueues-2;	/* next highest priority */
145 		defqflags |= HAL_TXQ_DBA_GATED
146 		       | HAL_TXQ_CBR_DIS_QEMPTY
147 		       | HAL_TXQ_CBR_DIS_BEMPTY
148 		       | HAL_TXQ_ARB_LOCKOUT_GLOBAL
149 		       | HAL_TXQ_BACKOFF_DISABLE;
150 		break;
151 	case HAL_TX_QUEUE_UAPSD:
152 		q = pCap->halTotalQueues-3;	/* nextest highest priority */
153 		if (ahp->ah_txq[q].tqi_type != HAL_TX_QUEUE_INACTIVE) {
154 			HALDEBUG(ah, HAL_DEBUG_ANY,
155 			    "%s: no available UAPSD tx queue\n", __func__);
156 			return -1;
157 		}
158 		break;
159 	case HAL_TX_QUEUE_DATA:
160 		for (q = 0; q < pCap->halTotalQueues; q++)
161 			if (ahp->ah_txq[q].tqi_type == HAL_TX_QUEUE_INACTIVE)
162 				break;
163 		if (q == pCap->halTotalQueues) {
164 			HALDEBUG(ah, HAL_DEBUG_ANY,
165 			    "%s: no available tx queue\n", __func__);
166 			return -1;
167 		}
168 		break;
169 	default:
170 		HALDEBUG(ah, HAL_DEBUG_ANY,
171 		    "%s: bad tx queue type %u\n", __func__, type);
172 		return -1;
173 	}
174 
175 	HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: queue %u\n", __func__, q);
176 
177 	qi = &ahp->ah_txq[q];
178 	if (qi->tqi_type != HAL_TX_QUEUE_INACTIVE) {
179 		HALDEBUG(ah, HAL_DEBUG_ANY, "%s: tx queue %u already active\n",
180 		    __func__, q);
181 		return -1;
182 	}
183 	OS_MEMZERO(qi, sizeof(HAL_TX_QUEUE_INFO));
184 	qi->tqi_type = type;
185 	if (qInfo == AH_NULL) {
186 		qi->tqi_qflags = defqflags;
187 		qi->tqi_aifs = INIT_AIFS;
188 		qi->tqi_cwmin = HAL_TXQ_USEDEFAULT;	/* NB: do at reset */
189 		qi->tqi_cwmax = INIT_CWMAX;
190 		qi->tqi_shretry = INIT_SH_RETRY;
191 		qi->tqi_lgretry = INIT_LG_RETRY;
192 		qi->tqi_physCompBuf = 0;
193 	} else {
194 		qi->tqi_physCompBuf = qInfo->tqi_compBuf;
195 		(void) ar5212SetTxQueueProps(ah, q, qInfo);
196 	}
197 	/* NB: must be followed by ar5212ResetTxQueue */
198 	return q;
199 }
200 
201 /*
202  * Update the h/w interrupt registers to reflect a tx q's configuration.
203  */
204 static void
205 setTxQInterrupts(struct ath_hal *ah, HAL_TX_QUEUE_INFO *qi)
206 {
207 	struct ath_hal_5212 *ahp = AH5212(ah);
208 
209 	HALDEBUG(ah, HAL_DEBUG_TXQUEUE,
210 	    "%s: tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n", __func__,
211 	    ahp->ah_txOkInterruptMask, ahp->ah_txErrInterruptMask,
212 	    ahp->ah_txDescInterruptMask, ahp->ah_txEolInterruptMask,
213 	    ahp->ah_txUrnInterruptMask);
214 
215 	OS_REG_WRITE(ah, AR_IMR_S0,
216 		  SM(ahp->ah_txOkInterruptMask, AR_IMR_S0_QCU_TXOK)
217 		| SM(ahp->ah_txDescInterruptMask, AR_IMR_S0_QCU_TXDESC)
218 	);
219 	OS_REG_WRITE(ah, AR_IMR_S1,
220 		  SM(ahp->ah_txErrInterruptMask, AR_IMR_S1_QCU_TXERR)
221 		| SM(ahp->ah_txEolInterruptMask, AR_IMR_S1_QCU_TXEOL)
222 	);
223 	OS_REG_RMW_FIELD(ah, AR_IMR_S2,
224 		AR_IMR_S2_QCU_TXURN, ahp->ah_txUrnInterruptMask);
225 }
226 
227 /*
228  * Free a tx DCU/QCU combination.
229  */
230 HAL_BOOL
231 ar5212ReleaseTxQueue(struct ath_hal *ah, u_int q)
232 {
233 	struct ath_hal_5212 *ahp = AH5212(ah);
234 	HAL_CAPABILITIES *pCap = &AH_PRIVATE(ah)->ah_caps;
235 	HAL_TX_QUEUE_INFO *qi;
236 
237 	if (q >= pCap->halTotalQueues) {
238 		HALDEBUG(ah, HAL_DEBUG_ANY, "%s: invalid queue num %u\n",
239 		    __func__, q);
240 		return AH_FALSE;
241 	}
242 	qi = &ahp->ah_txq[q];
243 	if (qi->tqi_type == HAL_TX_QUEUE_INACTIVE) {
244 		HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: inactive queue %u\n",
245 		    __func__, q);
246 		return AH_FALSE;
247 	}
248 
249 	HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: release queue %u\n", __func__, q);
250 
251 	qi->tqi_type = HAL_TX_QUEUE_INACTIVE;
252 	ahp->ah_txOkInterruptMask &= ~(1 << q);
253 	ahp->ah_txErrInterruptMask &= ~(1 << q);
254 	ahp->ah_txDescInterruptMask &= ~(1 << q);
255 	ahp->ah_txEolInterruptMask &= ~(1 << q);
256 	ahp->ah_txUrnInterruptMask &= ~(1 << q);
257 	setTxQInterrupts(ah, qi);
258 
259 	return AH_TRUE;
260 }
261 
262 /*
263  * Set the retry, aifs, cwmin/max, readyTime regs for specified queue
264  * Assumes:
265  *  phwChannel has been set to point to the current channel
266  */
267 #define	TU_TO_USEC(_tu)		((_tu) << 10)
268 HAL_BOOL
269 ar5212ResetTxQueue(struct ath_hal *ah, u_int q)
270 {
271 	struct ath_hal_5212 *ahp = AH5212(ah);
272 	HAL_CAPABILITIES *pCap = &AH_PRIVATE(ah)->ah_caps;
273 	const struct ieee80211_channel *chan = AH_PRIVATE(ah)->ah_curchan;
274 	HAL_TX_QUEUE_INFO *qi;
275 	uint32_t cwMin, chanCwMin, qmisc, dmisc;
276 
277 	if (q >= pCap->halTotalQueues) {
278 		HALDEBUG(ah, HAL_DEBUG_ANY, "%s: invalid queue num %u\n",
279 		    __func__, q);
280 		return AH_FALSE;
281 	}
282 	qi = &ahp->ah_txq[q];
283 	if (qi->tqi_type == HAL_TX_QUEUE_INACTIVE) {
284 		HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: inactive queue %u\n",
285 		    __func__, q);
286 		return AH_TRUE;		/* XXX??? */
287 	}
288 
289 	HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: reset queue %u\n", __func__, q);
290 
291 	if (qi->tqi_cwmin == HAL_TXQ_USEDEFAULT) {
292 		/*
293 		 * Select cwmin according to channel type.
294 		 * NB: chan can be NULL during attach
295 		 */
296 		if (chan && IEEE80211_IS_CHAN_B(chan))
297 			chanCwMin = INIT_CWMIN_11B;
298 		else
299 			chanCwMin = INIT_CWMIN;
300 		/* make sure that the CWmin is of the form (2^n - 1) */
301 		for (cwMin = 1; cwMin < chanCwMin; cwMin = (cwMin << 1) | 1)
302 			;
303 	} else
304 		cwMin = qi->tqi_cwmin;
305 
306 	/* set cwMin/Max and AIFS values */
307 	OS_REG_WRITE(ah, AR_DLCL_IFS(q),
308 		  SM(cwMin, AR_D_LCL_IFS_CWMIN)
309 		| SM(qi->tqi_cwmax, AR_D_LCL_IFS_CWMAX)
310 		| SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));
311 
312 	/* Set retry limit values */
313 	OS_REG_WRITE(ah, AR_DRETRY_LIMIT(q),
314 		   SM(INIT_SSH_RETRY, AR_D_RETRY_LIMIT_STA_SH)
315 		 | SM(INIT_SLG_RETRY, AR_D_RETRY_LIMIT_STA_LG)
316 		 | SM(qi->tqi_lgretry, AR_D_RETRY_LIMIT_FR_LG)
317 		 | SM(qi->tqi_shretry, AR_D_RETRY_LIMIT_FR_SH)
318 	);
319 
320 	/* NB: always enable early termination on the QCU */
321 	qmisc = AR_Q_MISC_DCU_EARLY_TERM_REQ
322 	      | SM(AR_Q_MISC_FSP_ASAP, AR_Q_MISC_FSP);
323 
324 	/* NB: always enable DCU to wait for next fragment from QCU */
325 	dmisc = AR_D_MISC_FRAG_WAIT_EN;
326 
327 #ifdef AH_SUPPORT_5311
328 	if (AH_PRIVATE(ah)->ah_macVersion < AR_SREV_VERSION_OAHU) {
329 		/* Configure DCU to use the global sequence count */
330 		dmisc |= AR5311_D_MISC_SEQ_NUM_CONTROL;
331 	}
332 #endif
333 	/* multiqueue support */
334 	if (qi->tqi_cbrPeriod) {
335 		OS_REG_WRITE(ah, AR_QCBRCFG(q),
336 			  SM(qi->tqi_cbrPeriod,AR_Q_CBRCFG_CBR_INTERVAL)
337 			| SM(qi->tqi_cbrOverflowLimit, AR_Q_CBRCFG_CBR_OVF_THRESH));
338 		qmisc = (qmisc &~ AR_Q_MISC_FSP) | AR_Q_MISC_FSP_CBR;
339 		if (qi->tqi_cbrOverflowLimit)
340 			qmisc |= AR_Q_MISC_CBR_EXP_CNTR_LIMIT;
341 	}
342 	if (qi->tqi_readyTime) {
343 		OS_REG_WRITE(ah, AR_QRDYTIMECFG(q),
344 			  SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_INT)
345 			| AR_Q_RDYTIMECFG_ENA);
346 	}
347 
348 	OS_REG_WRITE(ah, AR_DCHNTIME(q),
349 		  SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR)
350 		| (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0));
351 
352 	if (qi->tqi_readyTime &&
353 	    (qi->tqi_qflags & HAL_TXQ_RDYTIME_EXP_POLICY_ENABLE))
354 		qmisc |= AR_Q_MISC_RDYTIME_EXP_POLICY;
355 	if (qi->tqi_qflags & HAL_TXQ_DBA_GATED)
356 		qmisc = (qmisc &~ AR_Q_MISC_FSP) | AR_Q_MISC_FSP_DBA_GATED;
357 	if (MS(qmisc, AR_Q_MISC_FSP) != AR_Q_MISC_FSP_ASAP) {
358 		/*
359 		 * These are meangingful only when not scheduled asap.
360 		 */
361 		if (qi->tqi_qflags & HAL_TXQ_CBR_DIS_BEMPTY)
362 			qmisc |= AR_Q_MISC_CBR_INCR_DIS0;
363 		else
364 			qmisc &= ~AR_Q_MISC_CBR_INCR_DIS0;
365 		if (qi->tqi_qflags & HAL_TXQ_CBR_DIS_QEMPTY)
366 			qmisc |= AR_Q_MISC_CBR_INCR_DIS1;
367 		else
368 			qmisc &= ~AR_Q_MISC_CBR_INCR_DIS1;
369 	}
370 
371 	if (qi->tqi_qflags & HAL_TXQ_BACKOFF_DISABLE)
372 		dmisc |= AR_D_MISC_POST_FR_BKOFF_DIS;
373 	if (qi->tqi_qflags & HAL_TXQ_FRAG_BURST_BACKOFF_ENABLE)
374 		dmisc |= AR_D_MISC_FRAG_BKOFF_EN;
375 	if (qi->tqi_qflags & HAL_TXQ_ARB_LOCKOUT_GLOBAL)
376 		dmisc |= SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
377 			    AR_D_MISC_ARB_LOCKOUT_CNTRL);
378 	else if (qi->tqi_qflags & HAL_TXQ_ARB_LOCKOUT_INTRA)
379 		dmisc |= SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_INTRA_FR,
380 			    AR_D_MISC_ARB_LOCKOUT_CNTRL);
381 	if (qi->tqi_qflags & HAL_TXQ_IGNORE_VIRTCOL)
382 		dmisc |= SM(AR_D_MISC_VIR_COL_HANDLING_IGNORE,
383 			    AR_D_MISC_VIR_COL_HANDLING);
384 	if (qi->tqi_qflags & HAL_TXQ_SEQNUM_INC_DIS)
385 		dmisc |= AR_D_MISC_SEQ_NUM_INCR_DIS;
386 
387 	/*
388 	 * Fillin type-dependent bits.  Most of this can be
389 	 * removed by specifying the queue parameters in the
390 	 * driver; it's here for backwards compatibility.
391 	 */
392 	switch (qi->tqi_type) {
393 	case HAL_TX_QUEUE_BEACON:		/* beacon frames */
394 		qmisc |= AR_Q_MISC_FSP_DBA_GATED
395 		      |  AR_Q_MISC_BEACON_USE
396 		      |  AR_Q_MISC_CBR_INCR_DIS1;
397 
398 		dmisc |= SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
399 			    AR_D_MISC_ARB_LOCKOUT_CNTRL)
400 		      |  AR_D_MISC_BEACON_USE
401 		      |  AR_D_MISC_POST_FR_BKOFF_DIS;
402 		break;
403 	case HAL_TX_QUEUE_CAB:			/* CAB  frames */
404 		/*
405 		 * No longer Enable AR_Q_MISC_RDYTIME_EXP_POLICY,
406 		 * There is an issue with the CAB Queue
407 		 * not properly refreshing the Tx descriptor if
408 		 * the TXE clear setting is used.
409 		 */
410 		qmisc |= AR_Q_MISC_FSP_DBA_GATED
411 		      |  AR_Q_MISC_CBR_INCR_DIS1
412 		      |  AR_Q_MISC_CBR_INCR_DIS0;
413 
414 		if (qi->tqi_readyTime) {
415 			HALDEBUG(ah, HAL_DEBUG_TXQUEUE,
416 			    "%s: using tqi_readyTime\n", __func__);
417 			OS_REG_WRITE(ah, AR_QRDYTIMECFG(q),
418 			    SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_INT) |
419 			    AR_Q_RDYTIMECFG_ENA);
420 		} else {
421 			int value;
422 			/*
423 			 * NB: don't set default ready time if driver
424 			 * has explicitly specified something.  This is
425 			 * here solely for backwards compatibility.
426 			 */
427 			/*
428 			 * XXX for now, hard-code a CAB interval of 70%
429 			 * XXX of the total beacon interval.
430 			 */
431 
432 			value = (ahp->ah_beaconInterval * 70 / 100)
433 				- (ah->ah_config.ah_sw_beacon_response_time -
434 				+ ah->ah_config.ah_dma_beacon_response_time)
435 				- ah->ah_config.ah_additional_swba_backoff;
436 			/*
437 			 * XXX Ensure it isn't too low - nothing lower
438 			 * XXX than 10 TU
439 			 */
440 			if (value < 10)
441 				value = 10;
442 			HALDEBUG(ah, HAL_DEBUG_TXQUEUE,
443 			    "%s: defaulting to rdytime = %d uS\n",
444 			    __func__, value);
445 			OS_REG_WRITE(ah, AR_QRDYTIMECFG(q),
446 			    SM(TU_TO_USEC(value), AR_Q_RDYTIMECFG_INT) |
447 			    AR_Q_RDYTIMECFG_ENA);
448 		}
449 		dmisc |= SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
450 			    AR_D_MISC_ARB_LOCKOUT_CNTRL);
451 		break;
452 	default:			/* NB: silence compiler */
453 		break;
454 	}
455 
456 	OS_REG_WRITE(ah, AR_QMISC(q), qmisc);
457 	OS_REG_WRITE(ah, AR_DMISC(q), dmisc);
458 
459 	/* Setup compression scratchpad buffer */
460 	/*
461 	 * XXX: calling this asynchronously to queue operation can
462 	 *      cause unexpected behavior!!!
463 	 */
464 	if (qi->tqi_physCompBuf) {
465 		HALASSERT(qi->tqi_type == HAL_TX_QUEUE_DATA ||
466 			  qi->tqi_type == HAL_TX_QUEUE_UAPSD);
467 		OS_REG_WRITE(ah, AR_Q_CBBS, (80 + 2*q));
468 		OS_REG_WRITE(ah, AR_Q_CBBA, qi->tqi_physCompBuf);
469 		OS_REG_WRITE(ah, AR_Q_CBC,  HAL_COMP_BUF_MAX_SIZE/1024);
470 		OS_REG_WRITE(ah, AR_Q0_MISC + 4*q,
471 			     OS_REG_READ(ah, AR_Q0_MISC + 4*q)
472 			     | AR_Q_MISC_QCU_COMP_EN);
473 	}
474 
475 	/*
476 	 * Always update the secondary interrupt mask registers - this
477 	 * could be a new queue getting enabled in a running system or
478 	 * hw getting re-initialized during a reset!
479 	 *
480 	 * Since we don't differentiate between tx interrupts corresponding
481 	 * to individual queues - secondary tx mask regs are always unmasked;
482 	 * tx interrupts are enabled/disabled for all queues collectively
483 	 * using the primary mask reg
484 	 */
485 	if (qi->tqi_qflags & HAL_TXQ_TXOKINT_ENABLE)
486 		ahp->ah_txOkInterruptMask |= 1 << q;
487 	else
488 		ahp->ah_txOkInterruptMask &= ~(1 << q);
489 	if (qi->tqi_qflags & HAL_TXQ_TXERRINT_ENABLE)
490 		ahp->ah_txErrInterruptMask |= 1 << q;
491 	else
492 		ahp->ah_txErrInterruptMask &= ~(1 << q);
493 	if (qi->tqi_qflags & HAL_TXQ_TXDESCINT_ENABLE)
494 		ahp->ah_txDescInterruptMask |= 1 << q;
495 	else
496 		ahp->ah_txDescInterruptMask &= ~(1 << q);
497 	if (qi->tqi_qflags & HAL_TXQ_TXEOLINT_ENABLE)
498 		ahp->ah_txEolInterruptMask |= 1 << q;
499 	else
500 		ahp->ah_txEolInterruptMask &= ~(1 << q);
501 	if (qi->tqi_qflags & HAL_TXQ_TXURNINT_ENABLE)
502 		ahp->ah_txUrnInterruptMask |= 1 << q;
503 	else
504 		ahp->ah_txUrnInterruptMask &= ~(1 << q);
505 	setTxQInterrupts(ah, qi);
506 
507 	return AH_TRUE;
508 }
509 #undef	TU_TO_USEC
510 
511 /*
512  * Get the TXDP for the specified queue
513  */
514 uint32_t
515 ar5212GetTxDP(struct ath_hal *ah, u_int q)
516 {
517 	HALASSERT(q < AH_PRIVATE(ah)->ah_caps.halTotalQueues);
518 	return OS_REG_READ(ah, AR_QTXDP(q));
519 }
520 
521 /*
522  * Set the TxDP for the specified queue
523  */
524 HAL_BOOL
525 ar5212SetTxDP(struct ath_hal *ah, u_int q, uint32_t txdp)
526 {
527 	HALASSERT(q < AH_PRIVATE(ah)->ah_caps.halTotalQueues);
528 	HALASSERT(AH5212(ah)->ah_txq[q].tqi_type != HAL_TX_QUEUE_INACTIVE);
529 
530 	/*
531 	 * Make sure that TXE is deasserted before setting the TXDP.  If TXE
532 	 * is still asserted, setting TXDP will have no effect.
533 	 */
534 	HALASSERT((OS_REG_READ(ah, AR_Q_TXE) & (1 << q)) == 0);
535 
536 	OS_REG_WRITE(ah, AR_QTXDP(q), txdp);
537 
538 	return AH_TRUE;
539 }
540 
541 /*
542  * Set Transmit Enable bits for the specified queue
543  */
544 HAL_BOOL
545 ar5212StartTxDma(struct ath_hal *ah, u_int q)
546 {
547 	HALASSERT(q < AH_PRIVATE(ah)->ah_caps.halTotalQueues);
548 
549 	HALASSERT(AH5212(ah)->ah_txq[q].tqi_type != HAL_TX_QUEUE_INACTIVE);
550 
551 	HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: queue %u\n", __func__, q);
552 
553 	/* Check to be sure we're not enabling a q that has its TXD bit set. */
554 	HALASSERT((OS_REG_READ(ah, AR_Q_TXD) & (1 << q)) == 0);
555 
556 	OS_REG_WRITE(ah, AR_Q_TXE, 1 << q);
557 	return AH_TRUE;
558 }
559 
560 /*
561  * Return the number of pending frames or 0 if the specified
562  * queue is stopped.
563  */
564 uint32_t
565 ar5212NumTxPending(struct ath_hal *ah, u_int q)
566 {
567 	uint32_t npend;
568 
569 	HALASSERT(q < AH_PRIVATE(ah)->ah_caps.halTotalQueues);
570 	HALASSERT(AH5212(ah)->ah_txq[q].tqi_type != HAL_TX_QUEUE_INACTIVE);
571 
572 	npend = OS_REG_READ(ah, AR_QSTS(q)) & AR_Q_STS_PEND_FR_CNT;
573 	if (npend == 0) {
574 		/*
575 		 * Pending frame count (PFC) can momentarily go to zero
576 		 * while TXE remains asserted.  In other words a PFC of
577 		 * zero is not sufficient to say that the queue has stopped.
578 		 */
579 		if (OS_REG_READ(ah, AR_Q_TXE) & (1 << q))
580 			npend = 1;		/* arbitrarily return 1 */
581 	}
582 	return npend;
583 }
584 
585 /*
586  * Stop transmit on the specified queue
587  */
588 HAL_BOOL
589 ar5212StopTxDma(struct ath_hal *ah, u_int q)
590 {
591 	u_int i;
592 	u_int wait;
593 
594 	HALASSERT(q < AH_PRIVATE(ah)->ah_caps.halTotalQueues);
595 
596 	HALASSERT(AH5212(ah)->ah_txq[q].tqi_type != HAL_TX_QUEUE_INACTIVE);
597 
598 	OS_REG_WRITE(ah, AR_Q_TXD, 1 << q);
599 	for (i = 1000; i != 0; i--) {
600 		if (ar5212NumTxPending(ah, q) == 0)
601 			break;
602 		OS_DELAY(100);        /* XXX get actual value */
603 	}
604 #ifdef AH_DEBUG
605 	if (i == 0) {
606 		HALDEBUG(ah, HAL_DEBUG_ANY,
607 		    "%s: queue %u DMA did not stop in 100 msec\n", __func__, q);
608 		HALDEBUG(ah, HAL_DEBUG_ANY,
609 		    "%s: QSTS 0x%x Q_TXE 0x%x Q_TXD 0x%x Q_CBR 0x%x\n", __func__,
610 		    OS_REG_READ(ah, AR_QSTS(q)), OS_REG_READ(ah, AR_Q_TXE),
611 		    OS_REG_READ(ah, AR_Q_TXD), OS_REG_READ(ah, AR_QCBRCFG(q)));
612 		HALDEBUG(ah, HAL_DEBUG_ANY,
613 		    "%s: Q_MISC 0x%x Q_RDYTIMECFG 0x%x Q_RDYTIMESHDN 0x%x\n",
614 		    __func__, OS_REG_READ(ah, AR_QMISC(q)),
615 		    OS_REG_READ(ah, AR_QRDYTIMECFG(q)),
616 		    OS_REG_READ(ah, AR_Q_RDYTIMESHDN));
617 	}
618 #endif /* AH_DEBUG */
619 
620 	/* 2413+ and up can kill packets at the PCU level */
621 	if (ar5212NumTxPending(ah, q) &&
622 	    (IS_2413(ah) || IS_5413(ah) || IS_2425(ah) || IS_2417(ah))) {
623 		uint32_t tsfLow, j;
624 
625 		HALDEBUG(ah, HAL_DEBUG_TXQUEUE,
626 		    "%s: Num of pending TX Frames %d on Q %d\n",
627 		    __func__, ar5212NumTxPending(ah, q), q);
628 
629 		/* Kill last PCU Tx Frame */
630 		/* TODO - save off and restore current values of Q1/Q2? */
631 		for (j = 0; j < 2; j++) {
632 			tsfLow = OS_REG_READ(ah, AR_TSF_L32);
633 			OS_REG_WRITE(ah, AR_QUIET2, SM(100, AR_QUIET2_QUIET_PER) |
634 				     SM(10, AR_QUIET2_QUIET_DUR));
635 			OS_REG_WRITE(ah, AR_QUIET1, AR_QUIET1_QUIET_ENABLE |
636 				     SM(tsfLow >> 10, AR_QUIET1_NEXT_QUIET));
637 			if ((OS_REG_READ(ah, AR_TSF_L32) >> 10) == (tsfLow >> 10)) {
638 				break;
639 			}
640 			HALDEBUG(ah, HAL_DEBUG_ANY,
641 			    "%s: TSF moved while trying to set quiet time "
642 			    "TSF: 0x%08x\n", __func__, tsfLow);
643 			HALASSERT(j < 1); /* TSF shouldn't count twice or reg access is taking forever */
644 		}
645 
646 		OS_REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_CHAN_IDLE);
647 
648 		/* Allow the quiet mechanism to do its work */
649 		OS_DELAY(200);
650 		OS_REG_CLR_BIT(ah, AR_QUIET1, AR_QUIET1_QUIET_ENABLE);
651 
652 		/* Give at least 1 millisec more to wait */
653 		wait = 100;
654 
655 		/* Verify all transmit is dead */
656 		while (ar5212NumTxPending(ah, q)) {
657 			if ((--wait) == 0) {
658 				HALDEBUG(ah, HAL_DEBUG_ANY,
659 				    "%s: Failed to stop Tx DMA in %d msec after killing last frame\n",
660 				    __func__, wait);
661 				break;
662 			}
663 			OS_DELAY(10);
664 		}
665 
666 		OS_REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_CHAN_IDLE);
667 	}
668 
669 	OS_REG_WRITE(ah, AR_Q_TXD, 0);
670 	return (i != 0);
671 }
672 
673 /*
674  * Descriptor Access Functions
675  */
676 
677 #define	VALID_PKT_TYPES \
678 	((1<<HAL_PKT_TYPE_NORMAL)|(1<<HAL_PKT_TYPE_ATIM)|\
679 	 (1<<HAL_PKT_TYPE_PSPOLL)|(1<<HAL_PKT_TYPE_PROBE_RESP)|\
680 	 (1<<HAL_PKT_TYPE_BEACON))
681 #define	isValidPktType(_t)	((1<<(_t)) & VALID_PKT_TYPES)
682 #define	VALID_TX_RATES \
683 	((1<<0x0b)|(1<<0x0f)|(1<<0x0a)|(1<<0x0e)|(1<<0x09)|(1<<0x0d)|\
684 	 (1<<0x08)|(1<<0x0c)|(1<<0x1b)|(1<<0x1a)|(1<<0x1e)|(1<<0x19)|\
685 	 (1<<0x1d)|(1<<0x18)|(1<<0x1c))
686 #define	isValidTxRate(_r)	((1<<(_r)) & VALID_TX_RATES)
687 
688 HAL_BOOL
689 ar5212SetupTxDesc(struct ath_hal *ah, struct ath_desc *ds,
690 	u_int pktLen,
691 	u_int hdrLen,
692 	HAL_PKT_TYPE type,
693 	u_int txPower,
694 	u_int txRate0, u_int txTries0,
695 	u_int keyIx,
696 	u_int antMode,
697 	u_int flags,
698 	u_int rtsctsRate,
699 	u_int rtsctsDuration,
700 	u_int compicvLen,
701 	u_int compivLen,
702 	u_int comp)
703 {
704 #define	RTSCTS	(HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)
705 	struct ar5212_desc *ads = AR5212DESC(ds);
706 	struct ath_hal_5212 *ahp = AH5212(ah);
707 
708 	(void) hdrLen;
709 
710 	HALASSERT(txTries0 != 0);
711 	HALASSERT(isValidPktType(type));
712 	HALASSERT(isValidTxRate(txRate0));
713 	HALASSERT((flags & RTSCTS) != RTSCTS);
714 	/* XXX validate antMode */
715 
716         txPower = (txPower + ahp->ah_txPowerIndexOffset );
717         if(txPower > 63)  txPower=63;
718 
719 	ads->ds_ctl0 = (pktLen & AR_FrameLen)
720 		     | (txPower << AR_XmitPower_S)
721 		     | (flags & HAL_TXDESC_VEOL ? AR_VEOL : 0)
722 		     | (flags & HAL_TXDESC_CLRDMASK ? AR_ClearDestMask : 0)
723 		     | SM(antMode, AR_AntModeXmit)
724 		     | (flags & HAL_TXDESC_INTREQ ? AR_TxInterReq : 0)
725 		     ;
726 	ads->ds_ctl1 = (type << AR_FrmType_S)
727 		     | (flags & HAL_TXDESC_NOACK ? AR_NoAck : 0)
728                      | (comp << AR_CompProc_S)
729                      | (compicvLen << AR_CompICVLen_S)
730                      | (compivLen << AR_CompIVLen_S)
731                      ;
732 	ads->ds_ctl2 = SM(txTries0, AR_XmitDataTries0)
733 		     | (flags & HAL_TXDESC_DURENA ? AR_DurUpdateEna : 0)
734 		     ;
735 	ads->ds_ctl3 = (txRate0 << AR_XmitRate0_S)
736 		     ;
737 	if (keyIx != HAL_TXKEYIX_INVALID) {
738 		/* XXX validate key index */
739 		ads->ds_ctl1 |= SM(keyIx, AR_DestIdx);
740 		ads->ds_ctl0 |= AR_DestIdxValid;
741 	}
742 	if (flags & RTSCTS) {
743 		if (!isValidTxRate(rtsctsRate)) {
744 			HALDEBUG(ah, HAL_DEBUG_ANY,
745 			    "%s: invalid rts/cts rate 0x%x\n",
746 			    __func__, rtsctsRate);
747 			return AH_FALSE;
748 		}
749 		/* XXX validate rtsctsDuration */
750 		ads->ds_ctl0 |= (flags & HAL_TXDESC_CTSENA ? AR_CTSEnable : 0)
751 			     | (flags & HAL_TXDESC_RTSENA ? AR_RTSCTSEnable : 0)
752 			     ;
753 		ads->ds_ctl2 |= SM(rtsctsDuration, AR_RTSCTSDuration);
754 		ads->ds_ctl3 |= (rtsctsRate << AR_RTSCTSRate_S);
755 	}
756 	return AH_TRUE;
757 #undef RTSCTS
758 }
759 
760 HAL_BOOL
761 ar5212SetupXTxDesc(struct ath_hal *ah, struct ath_desc *ds,
762 	u_int txRate1, u_int txTries1,
763 	u_int txRate2, u_int txTries2,
764 	u_int txRate3, u_int txTries3)
765 {
766 	struct ar5212_desc *ads = AR5212DESC(ds);
767 
768 	if (txTries1) {
769 		HALASSERT(isValidTxRate(txRate1));
770 		ads->ds_ctl2 |= SM(txTries1, AR_XmitDataTries1)
771 			     |  AR_DurUpdateEna
772 			     ;
773 		ads->ds_ctl3 |= (txRate1 << AR_XmitRate1_S);
774 	}
775 	if (txTries2) {
776 		HALASSERT(isValidTxRate(txRate2));
777 		ads->ds_ctl2 |= SM(txTries2, AR_XmitDataTries2)
778 			     |  AR_DurUpdateEna
779 			     ;
780 		ads->ds_ctl3 |= (txRate2 << AR_XmitRate2_S);
781 	}
782 	if (txTries3) {
783 		HALASSERT(isValidTxRate(txRate3));
784 		ads->ds_ctl2 |= SM(txTries3, AR_XmitDataTries3)
785 			     |  AR_DurUpdateEna
786 			     ;
787 		ads->ds_ctl3 |= (txRate3 << AR_XmitRate3_S);
788 	}
789 	return AH_TRUE;
790 }
791 
792 void
793 ar5212IntrReqTxDesc(struct ath_hal *ah, struct ath_desc *ds)
794 {
795 	struct ar5212_desc *ads = AR5212DESC(ds);
796 
797 #ifdef AH_NEED_DESC_SWAP
798 	ads->ds_ctl0 |= __bswap32(AR_TxInterReq);
799 #else
800 	ads->ds_ctl0 |= AR_TxInterReq;
801 #endif
802 }
803 
804 HAL_BOOL
805 ar5212FillTxDesc(struct ath_hal *ah, struct ath_desc *ds,
806 	HAL_DMA_ADDR *bufAddrList, uint32_t *segLenList, u_int qcuId,
807 	u_int descId, HAL_BOOL firstSeg, HAL_BOOL lastSeg,
808 	const struct ath_desc *ds0)
809 {
810 	struct ar5212_desc *ads = AR5212DESC(ds);
811 	uint32_t segLen = segLenList[0];
812 
813 	HALASSERT((segLen &~ AR_BufLen) == 0);
814 
815 	ds->ds_data = bufAddrList[0];
816 
817 	if (firstSeg) {
818 		/*
819 		 * First descriptor, don't clobber xmit control data
820 		 * setup by ar5212SetupTxDesc.
821 		 */
822 		ads->ds_ctl1 |= segLen | (lastSeg ? 0 : AR_More);
823 	} else if (lastSeg) {		/* !firstSeg && lastSeg */
824 		/*
825 		 * Last descriptor in a multi-descriptor frame,
826 		 * copy the multi-rate transmit parameters from
827 		 * the first frame for processing on completion.
828 		 */
829 		ads->ds_ctl1 = segLen;
830 #ifdef AH_NEED_DESC_SWAP
831 		ads->ds_ctl0 = __bswap32(AR5212DESC_CONST(ds0)->ds_ctl0)
832 		    & AR_TxInterReq;
833 		ads->ds_ctl2 = __bswap32(AR5212DESC_CONST(ds0)->ds_ctl2);
834 		ads->ds_ctl3 = __bswap32(AR5212DESC_CONST(ds0)->ds_ctl3);
835 #else
836 		ads->ds_ctl0 = AR5212DESC_CONST(ds0)->ds_ctl0 & AR_TxInterReq;
837 		ads->ds_ctl2 = AR5212DESC_CONST(ds0)->ds_ctl2;
838 		ads->ds_ctl3 = AR5212DESC_CONST(ds0)->ds_ctl3;
839 #endif
840 	} else {			/* !firstSeg && !lastSeg */
841 		/*
842 		 * Intermediate descriptor in a multi-descriptor frame.
843 		 */
844 #ifdef AH_NEED_DESC_SWAP
845 		ads->ds_ctl0 = __bswap32(AR5212DESC_CONST(ds0)->ds_ctl0)
846 		    & AR_TxInterReq;
847 #else
848 		ads->ds_ctl0 = AR5212DESC_CONST(ds0)->ds_ctl0 & AR_TxInterReq;
849 #endif
850 		ads->ds_ctl1 = segLen | AR_More;
851 		ads->ds_ctl2 = 0;
852 		ads->ds_ctl3 = 0;
853 	}
854 	ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
855 	return AH_TRUE;
856 }
857 
858 #ifdef AH_NEED_DESC_SWAP
859 /* Swap transmit descriptor */
860 static __inline void
861 ar5212SwapTxDesc(struct ath_desc *ds)
862 {
863 	ds->ds_data = __bswap32(ds->ds_data);
864         ds->ds_ctl0 = __bswap32(ds->ds_ctl0);
865         ds->ds_ctl1 = __bswap32(ds->ds_ctl1);
866         ds->ds_hw[0] = __bswap32(ds->ds_hw[0]);
867         ds->ds_hw[1] = __bswap32(ds->ds_hw[1]);
868         ds->ds_hw[2] = __bswap32(ds->ds_hw[2]);
869         ds->ds_hw[3] = __bswap32(ds->ds_hw[3]);
870 }
871 #endif
872 
873 /*
874  * Processing of HW TX descriptor.
875  */
876 HAL_STATUS
877 ar5212ProcTxDesc(struct ath_hal *ah,
878 	struct ath_desc *ds, struct ath_tx_status *ts)
879 {
880 	struct ar5212_desc *ads = AR5212DESC(ds);
881 
882 #ifdef AH_NEED_DESC_SWAP
883 	if ((ads->ds_txstatus1 & __bswap32(AR_Done)) == 0)
884                 return HAL_EINPROGRESS;
885 
886 	ar5212SwapTxDesc(ds);
887 #else
888 	if ((ads->ds_txstatus1 & AR_Done) == 0)
889 		return HAL_EINPROGRESS;
890 #endif
891 
892 	/* Update software copies of the HW status */
893 	ts->ts_seqnum = MS(ads->ds_txstatus1, AR_SeqNum);
894 	ts->ts_tstamp = MS(ads->ds_txstatus0, AR_SendTimestamp);
895 	ts->ts_status = 0;
896 	if ((ads->ds_txstatus0 & AR_FrmXmitOK) == 0) {
897 		if (ads->ds_txstatus0 & AR_ExcessiveRetries)
898 			ts->ts_status |= HAL_TXERR_XRETRY;
899 		if (ads->ds_txstatus0 & AR_Filtered)
900 			ts->ts_status |= HAL_TXERR_FILT;
901 		if (ads->ds_txstatus0 & AR_FIFOUnderrun)
902 			ts->ts_status |= HAL_TXERR_FIFO;
903 	}
904 	/*
905 	 * Extract the transmit rate used and mark the rate as
906 	 * ``alternate'' if it wasn't the series 0 rate.
907 	 */
908 	ts->ts_finaltsi = MS(ads->ds_txstatus1, AR_FinalTSIndex);
909 	switch (ts->ts_finaltsi) {
910 	case 0:
911 		ts->ts_rate = MS(ads->ds_ctl3, AR_XmitRate0);
912 		break;
913 	case 1:
914 		ts->ts_rate = MS(ads->ds_ctl3, AR_XmitRate1);
915 		break;
916 	case 2:
917 		ts->ts_rate = MS(ads->ds_ctl3, AR_XmitRate2);
918 		break;
919 	case 3:
920 		ts->ts_rate = MS(ads->ds_ctl3, AR_XmitRate3);
921 		break;
922 	}
923 	ts->ts_rssi = MS(ads->ds_txstatus1, AR_AckSigStrength);
924 	ts->ts_shortretry = MS(ads->ds_txstatus0, AR_RTSFailCnt);
925 	ts->ts_longretry = MS(ads->ds_txstatus0, AR_DataFailCnt);
926 	/*
927 	 * The retry count has the number of un-acked tries for the
928 	 * final series used.  When doing multi-rate retry we must
929 	 * fixup the retry count by adding in the try counts for
930 	 * each series that was fully-processed.  Beware that this
931 	 * takes values from the try counts in the final descriptor.
932 	 * These are not required by the hardware.  We assume they
933 	 * are placed there by the driver as otherwise we have no
934 	 * access and the driver can't do the calculation because it
935 	 * doesn't know the descriptor format.
936 	 */
937 	switch (ts->ts_finaltsi) {
938 	case 3: ts->ts_longretry += MS(ads->ds_ctl2, AR_XmitDataTries2);
939 	case 2: ts->ts_longretry += MS(ads->ds_ctl2, AR_XmitDataTries1);
940 	case 1: ts->ts_longretry += MS(ads->ds_ctl2, AR_XmitDataTries0);
941 	}
942 	ts->ts_virtcol = MS(ads->ds_txstatus0, AR_VirtCollCnt);
943 	ts->ts_antenna = (ads->ds_txstatus1 & AR_XmitAtenna ? 2 : 1);
944 
945 	return HAL_OK;
946 }
947 
948 /*
949  * Determine which tx queues need interrupt servicing.
950  */
951 void
952 ar5212GetTxIntrQueue(struct ath_hal *ah, uint32_t *txqs)
953 {
954 	struct ath_hal_5212 *ahp = AH5212(ah);
955 	*txqs &= ahp->ah_intrTxqs;
956 	ahp->ah_intrTxqs &= ~(*txqs);
957 }
958 
959 /*
960  * Retrieve the rate table from the given TX completion descriptor
961  */
962 HAL_BOOL
963 ar5212GetTxCompletionRates(struct ath_hal *ah, const struct ath_desc *ds0, int *rates, int *tries)
964 {
965 	const struct ar5212_desc *ads = AR5212DESC_CONST(ds0);
966 
967 	rates[0] = MS(ads->ds_ctl3, AR_XmitRate0);
968 	rates[1] = MS(ads->ds_ctl3, AR_XmitRate1);
969 	rates[2] = MS(ads->ds_ctl3, AR_XmitRate2);
970 	rates[3] = MS(ads->ds_ctl3, AR_XmitRate3);
971 
972 	tries[0] = MS(ads->ds_ctl2, AR_XmitDataTries0);
973 	tries[1] = MS(ads->ds_ctl2, AR_XmitDataTries1);
974 	tries[2] = MS(ads->ds_ctl2, AR_XmitDataTries2);
975 	tries[3] = MS(ads->ds_ctl2, AR_XmitDataTries3);
976 
977 	return AH_TRUE;
978 }
979 
980 void
981 ar5212SetTxDescLink(struct ath_hal *ah, void *ds, uint32_t link)
982 {
983 	struct ar5212_desc *ads = AR5212DESC(ds);
984 
985 	ads->ds_link = link;
986 }
987 
988 void
989 ar5212GetTxDescLink(struct ath_hal *ah, void *ds, uint32_t *link)
990 {
991 	struct ar5212_desc *ads = AR5212DESC(ds);
992 
993 	*link = ads->ds_link;
994 }
995 
996 void
997 ar5212GetTxDescLinkPtr(struct ath_hal *ah, void *ds, uint32_t **linkptr)
998 {
999 	struct ar5212_desc *ads = AR5212DESC(ds);
1000 
1001 	*linkptr = &ads->ds_link;
1002 }
1003