xref: /freebsd/sys/dev/ath/ath_hal/ar5212/ar5212_xmit.c (revision 39beb93c3f8bdbf72a61fda42300b5ebed7390c8)
1 /*
2  * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
3  * Copyright (c) 2002-2008 Atheros Communications, Inc.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  *
17  * $FreeBSD$
18  */
19 #include "opt_ah.h"
20 
21 #include "ah.h"
22 #include "ah_internal.h"
23 
24 #include "ar5212/ar5212.h"
25 #include "ar5212/ar5212reg.h"
26 #include "ar5212/ar5212desc.h"
27 #include "ar5212/ar5212phy.h"
28 #ifdef AH_SUPPORT_5311
29 #include "ar5212/ar5311reg.h"
30 #endif
31 
32 #ifdef AH_NEED_DESC_SWAP
33 static void ar5212SwapTxDesc(struct ath_desc *ds);
34 #endif
35 
36 /*
37  * Update Tx FIFO trigger level.
38  *
39  * Set bIncTrigLevel to TRUE to increase the trigger level.
40  * Set bIncTrigLevel to FALSE to decrease the trigger level.
41  *
42  * Returns TRUE if the trigger level was updated
43  */
44 HAL_BOOL
45 ar5212UpdateTxTrigLevel(struct ath_hal *ah, HAL_BOOL bIncTrigLevel)
46 {
47 	struct ath_hal_5212 *ahp = AH5212(ah);
48 	uint32_t txcfg, curLevel, newLevel;
49 	HAL_INT omask;
50 
51 	/*
52 	 * Disable interrupts while futzing with the fifo level.
53 	 */
54 	omask = ar5212SetInterrupts(ah, ahp->ah_maskReg &~ HAL_INT_GLOBAL);
55 
56 	txcfg = OS_REG_READ(ah, AR_TXCFG);
57 	curLevel = MS(txcfg, AR_FTRIG);
58 	newLevel = curLevel;
59 	if (bIncTrigLevel) {		/* increase the trigger level */
60 		if (curLevel < MAX_TX_FIFO_THRESHOLD)
61 			newLevel++;
62 	} else if (curLevel > MIN_TX_FIFO_THRESHOLD)
63 		newLevel--;
64 	if (newLevel != curLevel)
65 		/* Update the trigger level */
66 		OS_REG_WRITE(ah, AR_TXCFG,
67 			(txcfg &~ AR_FTRIG) | SM(newLevel, AR_FTRIG));
68 
69 	/* re-enable chip interrupts */
70 	ar5212SetInterrupts(ah, omask);
71 
72 	return (newLevel != curLevel);
73 }
74 
75 /*
76  * Set the properties of the tx queue with the parameters
77  * from qInfo.
78  */
79 HAL_BOOL
80 ar5212SetTxQueueProps(struct ath_hal *ah, int q, const HAL_TXQ_INFO *qInfo)
81 {
82 	struct ath_hal_5212 *ahp = AH5212(ah);
83 	HAL_CAPABILITIES *pCap = &AH_PRIVATE(ah)->ah_caps;
84 
85 	if (q >= pCap->halTotalQueues) {
86 		HALDEBUG(ah, HAL_DEBUG_ANY, "%s: invalid queue num %u\n",
87 		    __func__, q);
88 		return AH_FALSE;
89 	}
90 	return ath_hal_setTxQProps(ah, &ahp->ah_txq[q], qInfo);
91 }
92 
93 /*
94  * Return the properties for the specified tx queue.
95  */
96 HAL_BOOL
97 ar5212GetTxQueueProps(struct ath_hal *ah, int q, HAL_TXQ_INFO *qInfo)
98 {
99 	struct ath_hal_5212 *ahp = AH5212(ah);
100 	HAL_CAPABILITIES *pCap = &AH_PRIVATE(ah)->ah_caps;
101 
102 
103 	if (q >= pCap->halTotalQueues) {
104 		HALDEBUG(ah, HAL_DEBUG_ANY, "%s: invalid queue num %u\n",
105 		    __func__, q);
106 		return AH_FALSE;
107 	}
108 	return ath_hal_getTxQProps(ah, qInfo, &ahp->ah_txq[q]);
109 }
110 
111 /*
112  * Allocate and initialize a tx DCU/QCU combination.
113  */
114 int
115 ar5212SetupTxQueue(struct ath_hal *ah, HAL_TX_QUEUE type,
116 	const HAL_TXQ_INFO *qInfo)
117 {
118 	struct ath_hal_5212 *ahp = AH5212(ah);
119 	HAL_TX_QUEUE_INFO *qi;
120 	HAL_CAPABILITIES *pCap = &AH_PRIVATE(ah)->ah_caps;
121 	int q, defqflags;
122 
123 	/* by default enable OK+ERR+DESC+URN interrupts */
124 	defqflags = HAL_TXQ_TXOKINT_ENABLE
125 		  | HAL_TXQ_TXERRINT_ENABLE
126 		  | HAL_TXQ_TXDESCINT_ENABLE
127 		  | HAL_TXQ_TXURNINT_ENABLE;
128 	/* XXX move queue assignment to driver */
129 	switch (type) {
130 	case HAL_TX_QUEUE_BEACON:
131 		q = pCap->halTotalQueues-1;	/* highest priority */
132 		defqflags |= HAL_TXQ_DBA_GATED
133 		       | HAL_TXQ_CBR_DIS_QEMPTY
134 		       | HAL_TXQ_ARB_LOCKOUT_GLOBAL
135 		       | HAL_TXQ_BACKOFF_DISABLE;
136 		break;
137 	case HAL_TX_QUEUE_CAB:
138 		q = pCap->halTotalQueues-2;	/* next highest priority */
139 		defqflags |= HAL_TXQ_DBA_GATED
140 		       | HAL_TXQ_CBR_DIS_QEMPTY
141 		       | HAL_TXQ_CBR_DIS_BEMPTY
142 		       | HAL_TXQ_ARB_LOCKOUT_GLOBAL
143 		       | HAL_TXQ_BACKOFF_DISABLE;
144 		break;
145 	case HAL_TX_QUEUE_UAPSD:
146 		q = pCap->halTotalQueues-3;	/* nextest highest priority */
147 		if (ahp->ah_txq[q].tqi_type != HAL_TX_QUEUE_INACTIVE) {
148 			HALDEBUG(ah, HAL_DEBUG_ANY,
149 			    "%s: no available UAPSD tx queue\n", __func__);
150 			return -1;
151 		}
152 		break;
153 	case HAL_TX_QUEUE_DATA:
154 		for (q = 0; q < pCap->halTotalQueues; q++)
155 			if (ahp->ah_txq[q].tqi_type == HAL_TX_QUEUE_INACTIVE)
156 				break;
157 		if (q == pCap->halTotalQueues) {
158 			HALDEBUG(ah, HAL_DEBUG_ANY,
159 			    "%s: no available tx queue\n", __func__);
160 			return -1;
161 		}
162 		break;
163 	default:
164 		HALDEBUG(ah, HAL_DEBUG_ANY,
165 		    "%s: bad tx queue type %u\n", __func__, type);
166 		return -1;
167 	}
168 
169 	HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: queue %u\n", __func__, q);
170 
171 	qi = &ahp->ah_txq[q];
172 	if (qi->tqi_type != HAL_TX_QUEUE_INACTIVE) {
173 		HALDEBUG(ah, HAL_DEBUG_ANY, "%s: tx queue %u already active\n",
174 		    __func__, q);
175 		return -1;
176 	}
177 	OS_MEMZERO(qi, sizeof(HAL_TX_QUEUE_INFO));
178 	qi->tqi_type = type;
179 	if (qInfo == AH_NULL) {
180 		qi->tqi_qflags = defqflags;
181 		qi->tqi_aifs = INIT_AIFS;
182 		qi->tqi_cwmin = HAL_TXQ_USEDEFAULT;	/* NB: do at reset */
183 		qi->tqi_cwmax = INIT_CWMAX;
184 		qi->tqi_shretry = INIT_SH_RETRY;
185 		qi->tqi_lgretry = INIT_LG_RETRY;
186 		qi->tqi_physCompBuf = 0;
187 	} else {
188 		qi->tqi_physCompBuf = qInfo->tqi_compBuf;
189 		(void) ar5212SetTxQueueProps(ah, q, qInfo);
190 	}
191 	/* NB: must be followed by ar5212ResetTxQueue */
192 	return q;
193 }
194 
195 /*
196  * Update the h/w interrupt registers to reflect a tx q's configuration.
197  */
198 static void
199 setTxQInterrupts(struct ath_hal *ah, HAL_TX_QUEUE_INFO *qi)
200 {
201 	struct ath_hal_5212 *ahp = AH5212(ah);
202 
203 	HALDEBUG(ah, HAL_DEBUG_TXQUEUE,
204 	    "%s: tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n", __func__,
205 	    ahp->ah_txOkInterruptMask, ahp->ah_txErrInterruptMask,
206 	    ahp->ah_txDescInterruptMask, ahp->ah_txEolInterruptMask,
207 	    ahp->ah_txUrnInterruptMask);
208 
209 	OS_REG_WRITE(ah, AR_IMR_S0,
210 		  SM(ahp->ah_txOkInterruptMask, AR_IMR_S0_QCU_TXOK)
211 		| SM(ahp->ah_txDescInterruptMask, AR_IMR_S0_QCU_TXDESC)
212 	);
213 	OS_REG_WRITE(ah, AR_IMR_S1,
214 		  SM(ahp->ah_txErrInterruptMask, AR_IMR_S1_QCU_TXERR)
215 		| SM(ahp->ah_txEolInterruptMask, AR_IMR_S1_QCU_TXEOL)
216 	);
217 	OS_REG_RMW_FIELD(ah, AR_IMR_S2,
218 		AR_IMR_S2_QCU_TXURN, ahp->ah_txUrnInterruptMask);
219 }
220 
221 /*
222  * Free a tx DCU/QCU combination.
223  */
224 HAL_BOOL
225 ar5212ReleaseTxQueue(struct ath_hal *ah, u_int q)
226 {
227 	struct ath_hal_5212 *ahp = AH5212(ah);
228 	HAL_CAPABILITIES *pCap = &AH_PRIVATE(ah)->ah_caps;
229 	HAL_TX_QUEUE_INFO *qi;
230 
231 	if (q >= pCap->halTotalQueues) {
232 		HALDEBUG(ah, HAL_DEBUG_ANY, "%s: invalid queue num %u\n",
233 		    __func__, q);
234 		return AH_FALSE;
235 	}
236 	qi = &ahp->ah_txq[q];
237 	if (qi->tqi_type == HAL_TX_QUEUE_INACTIVE) {
238 		HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: inactive queue %u\n",
239 		    __func__, q);
240 		return AH_FALSE;
241 	}
242 
243 	HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: release queue %u\n", __func__, q);
244 
245 	qi->tqi_type = HAL_TX_QUEUE_INACTIVE;
246 	ahp->ah_txOkInterruptMask &= ~(1 << q);
247 	ahp->ah_txErrInterruptMask &= ~(1 << q);
248 	ahp->ah_txDescInterruptMask &= ~(1 << q);
249 	ahp->ah_txEolInterruptMask &= ~(1 << q);
250 	ahp->ah_txUrnInterruptMask &= ~(1 << q);
251 	setTxQInterrupts(ah, qi);
252 
253 	return AH_TRUE;
254 }
255 
256 /*
257  * Set the retry, aifs, cwmin/max, readyTime regs for specified queue
258  * Assumes:
259  *  phwChannel has been set to point to the current channel
260  */
261 HAL_BOOL
262 ar5212ResetTxQueue(struct ath_hal *ah, u_int q)
263 {
264 	struct ath_hal_5212 *ahp = AH5212(ah);
265 	HAL_CAPABILITIES *pCap = &AH_PRIVATE(ah)->ah_caps;
266 	const struct ieee80211_channel *chan = AH_PRIVATE(ah)->ah_curchan;
267 	HAL_TX_QUEUE_INFO *qi;
268 	uint32_t cwMin, chanCwMin, value, qmisc, dmisc;
269 
270 	if (q >= pCap->halTotalQueues) {
271 		HALDEBUG(ah, HAL_DEBUG_ANY, "%s: invalid queue num %u\n",
272 		    __func__, q);
273 		return AH_FALSE;
274 	}
275 	qi = &ahp->ah_txq[q];
276 	if (qi->tqi_type == HAL_TX_QUEUE_INACTIVE) {
277 		HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: inactive queue %u\n",
278 		    __func__, q);
279 		return AH_TRUE;		/* XXX??? */
280 	}
281 
282 	HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: reset queue %u\n", __func__, q);
283 
284 	if (qi->tqi_cwmin == HAL_TXQ_USEDEFAULT) {
285 		/*
286 		 * Select cwmin according to channel type.
287 		 * NB: chan can be NULL during attach
288 		 */
289 		if (chan && IEEE80211_IS_CHAN_B(chan))
290 			chanCwMin = INIT_CWMIN_11B;
291 		else
292 			chanCwMin = INIT_CWMIN;
293 		/* make sure that the CWmin is of the form (2^n - 1) */
294 		for (cwMin = 1; cwMin < chanCwMin; cwMin = (cwMin << 1) | 1)
295 			;
296 	} else
297 		cwMin = qi->tqi_cwmin;
298 
299 	/* set cwMin/Max and AIFS values */
300 	OS_REG_WRITE(ah, AR_DLCL_IFS(q),
301 		  SM(cwMin, AR_D_LCL_IFS_CWMIN)
302 		| SM(qi->tqi_cwmax, AR_D_LCL_IFS_CWMAX)
303 		| SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));
304 
305 	/* Set retry limit values */
306 	OS_REG_WRITE(ah, AR_DRETRY_LIMIT(q),
307 		   SM(INIT_SSH_RETRY, AR_D_RETRY_LIMIT_STA_SH)
308 		 | SM(INIT_SLG_RETRY, AR_D_RETRY_LIMIT_STA_LG)
309 		 | SM(qi->tqi_lgretry, AR_D_RETRY_LIMIT_FR_LG)
310 		 | SM(qi->tqi_shretry, AR_D_RETRY_LIMIT_FR_SH)
311 	);
312 
313 	/* NB: always enable early termination on the QCU */
314 	qmisc = AR_Q_MISC_DCU_EARLY_TERM_REQ
315 	      | SM(AR_Q_MISC_FSP_ASAP, AR_Q_MISC_FSP);
316 
317 	/* NB: always enable DCU to wait for next fragment from QCU */
318 	dmisc = AR_D_MISC_FRAG_WAIT_EN;
319 
320 #ifdef AH_SUPPORT_5311
321 	if (AH_PRIVATE(ah)->ah_macVersion < AR_SREV_VERSION_OAHU) {
322 		/* Configure DCU to use the global sequence count */
323 		dmisc |= AR5311_D_MISC_SEQ_NUM_CONTROL;
324 	}
325 #endif
326 	/* multiqueue support */
327 	if (qi->tqi_cbrPeriod) {
328 		OS_REG_WRITE(ah, AR_QCBRCFG(q),
329 			  SM(qi->tqi_cbrPeriod,AR_Q_CBRCFG_CBR_INTERVAL)
330 			| SM(qi->tqi_cbrOverflowLimit, AR_Q_CBRCFG_CBR_OVF_THRESH));
331 		qmisc = (qmisc &~ AR_Q_MISC_FSP) | AR_Q_MISC_FSP_CBR;
332 		if (qi->tqi_cbrOverflowLimit)
333 			qmisc |= AR_Q_MISC_CBR_EXP_CNTR_LIMIT;
334 	}
335 	if (qi->tqi_readyTime) {
336 		OS_REG_WRITE(ah, AR_QRDYTIMECFG(q),
337 			  SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_INT)
338 			| AR_Q_RDYTIMECFG_ENA);
339 	}
340 
341 	OS_REG_WRITE(ah, AR_DCHNTIME(q),
342 		  SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR)
343 		| (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0));
344 
345 	if (qi->tqi_readyTime &&
346 	    (qi->tqi_qflags & HAL_TXQ_RDYTIME_EXP_POLICY_ENABLE))
347 		qmisc |= AR_Q_MISC_RDYTIME_EXP_POLICY;
348 	if (qi->tqi_qflags & HAL_TXQ_DBA_GATED)
349 		qmisc = (qmisc &~ AR_Q_MISC_FSP) | AR_Q_MISC_FSP_DBA_GATED;
350 	if (MS(qmisc, AR_Q_MISC_FSP) != AR_Q_MISC_FSP_ASAP) {
351 		/*
352 		 * These are meangingful only when not scheduled asap.
353 		 */
354 		if (qi->tqi_qflags & HAL_TXQ_CBR_DIS_BEMPTY)
355 			qmisc |= AR_Q_MISC_CBR_INCR_DIS0;
356 		else
357 			qmisc &= ~AR_Q_MISC_CBR_INCR_DIS0;
358 		if (qi->tqi_qflags & HAL_TXQ_CBR_DIS_QEMPTY)
359 			qmisc |= AR_Q_MISC_CBR_INCR_DIS1;
360 		else
361 			qmisc &= ~AR_Q_MISC_CBR_INCR_DIS1;
362 	}
363 
364 	if (qi->tqi_qflags & HAL_TXQ_BACKOFF_DISABLE)
365 		dmisc |= AR_D_MISC_POST_FR_BKOFF_DIS;
366 	if (qi->tqi_qflags & HAL_TXQ_FRAG_BURST_BACKOFF_ENABLE)
367 		dmisc |= AR_D_MISC_FRAG_BKOFF_EN;
368 	if (qi->tqi_qflags & HAL_TXQ_ARB_LOCKOUT_GLOBAL)
369 		dmisc |= SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
370 			    AR_D_MISC_ARB_LOCKOUT_CNTRL);
371 	else if (qi->tqi_qflags & HAL_TXQ_ARB_LOCKOUT_INTRA)
372 		dmisc |= SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_INTRA_FR,
373 			    AR_D_MISC_ARB_LOCKOUT_CNTRL);
374 	if (qi->tqi_qflags & HAL_TXQ_IGNORE_VIRTCOL)
375 		dmisc |= SM(AR_D_MISC_VIR_COL_HANDLING_IGNORE,
376 			    AR_D_MISC_VIR_COL_HANDLING);
377 	if (qi->tqi_qflags & HAL_TXQ_SEQNUM_INC_DIS)
378 		dmisc |= AR_D_MISC_SEQ_NUM_INCR_DIS;
379 
380 	/*
381 	 * Fillin type-dependent bits.  Most of this can be
382 	 * removed by specifying the queue parameters in the
383 	 * driver; it's here for backwards compatibility.
384 	 */
385 	switch (qi->tqi_type) {
386 	case HAL_TX_QUEUE_BEACON:		/* beacon frames */
387 		qmisc |= AR_Q_MISC_FSP_DBA_GATED
388 		      |  AR_Q_MISC_BEACON_USE
389 		      |  AR_Q_MISC_CBR_INCR_DIS1;
390 
391 		dmisc |= SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
392 			    AR_D_MISC_ARB_LOCKOUT_CNTRL)
393 		      |  AR_D_MISC_BEACON_USE
394 		      |  AR_D_MISC_POST_FR_BKOFF_DIS;
395 		break;
396 	case HAL_TX_QUEUE_CAB:			/* CAB  frames */
397 		/*
398 		 * No longer Enable AR_Q_MISC_RDYTIME_EXP_POLICY,
399 		 * There is an issue with the CAB Queue
400 		 * not properly refreshing the Tx descriptor if
401 		 * the TXE clear setting is used.
402 		 */
403 		qmisc |= AR_Q_MISC_FSP_DBA_GATED
404 		      |  AR_Q_MISC_CBR_INCR_DIS1
405 		      |  AR_Q_MISC_CBR_INCR_DIS0;
406 
407 		if (!qi->tqi_readyTime) {
408 			/*
409 			 * NB: don't set default ready time if driver
410 			 * has explicitly specified something.  This is
411 			 * here solely for backwards compatibility.
412 			 */
413 			value = (ahp->ah_beaconInterval
414 				- (ath_hal_sw_beacon_response_time -
415 					ath_hal_dma_beacon_response_time)
416 				- ath_hal_additional_swba_backoff) * 1024;
417 			OS_REG_WRITE(ah, AR_QRDYTIMECFG(q), value | AR_Q_RDYTIMECFG_ENA);
418 		}
419 		dmisc |= SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
420 			    AR_D_MISC_ARB_LOCKOUT_CNTRL);
421 		break;
422 	default:			/* NB: silence compiler */
423 		break;
424 	}
425 
426 	OS_REG_WRITE(ah, AR_QMISC(q), qmisc);
427 	OS_REG_WRITE(ah, AR_DMISC(q), dmisc);
428 
429 	/* Setup compression scratchpad buffer */
430 	/*
431 	 * XXX: calling this asynchronously to queue operation can
432 	 *      cause unexpected behavior!!!
433 	 */
434 	if (qi->tqi_physCompBuf) {
435 		HALASSERT(qi->tqi_type == HAL_TX_QUEUE_DATA ||
436 			  qi->tqi_type == HAL_TX_QUEUE_UAPSD);
437 		OS_REG_WRITE(ah, AR_Q_CBBS, (80 + 2*q));
438 		OS_REG_WRITE(ah, AR_Q_CBBA, qi->tqi_physCompBuf);
439 		OS_REG_WRITE(ah, AR_Q_CBC,  HAL_COMP_BUF_MAX_SIZE/1024);
440 		OS_REG_WRITE(ah, AR_Q0_MISC + 4*q,
441 			     OS_REG_READ(ah, AR_Q0_MISC + 4*q)
442 			     | AR_Q_MISC_QCU_COMP_EN);
443 	}
444 
445 	/*
446 	 * Always update the secondary interrupt mask registers - this
447 	 * could be a new queue getting enabled in a running system or
448 	 * hw getting re-initialized during a reset!
449 	 *
450 	 * Since we don't differentiate between tx interrupts corresponding
451 	 * to individual queues - secondary tx mask regs are always unmasked;
452 	 * tx interrupts are enabled/disabled for all queues collectively
453 	 * using the primary mask reg
454 	 */
455 	if (qi->tqi_qflags & HAL_TXQ_TXOKINT_ENABLE)
456 		ahp->ah_txOkInterruptMask |= 1 << q;
457 	else
458 		ahp->ah_txOkInterruptMask &= ~(1 << q);
459 	if (qi->tqi_qflags & HAL_TXQ_TXERRINT_ENABLE)
460 		ahp->ah_txErrInterruptMask |= 1 << q;
461 	else
462 		ahp->ah_txErrInterruptMask &= ~(1 << q);
463 	if (qi->tqi_qflags & HAL_TXQ_TXDESCINT_ENABLE)
464 		ahp->ah_txDescInterruptMask |= 1 << q;
465 	else
466 		ahp->ah_txDescInterruptMask &= ~(1 << q);
467 	if (qi->tqi_qflags & HAL_TXQ_TXEOLINT_ENABLE)
468 		ahp->ah_txEolInterruptMask |= 1 << q;
469 	else
470 		ahp->ah_txEolInterruptMask &= ~(1 << q);
471 	if (qi->tqi_qflags & HAL_TXQ_TXURNINT_ENABLE)
472 		ahp->ah_txUrnInterruptMask |= 1 << q;
473 	else
474 		ahp->ah_txUrnInterruptMask &= ~(1 << q);
475 	setTxQInterrupts(ah, qi);
476 
477 	return AH_TRUE;
478 }
479 
480 /*
481  * Get the TXDP for the specified queue
482  */
483 uint32_t
484 ar5212GetTxDP(struct ath_hal *ah, u_int q)
485 {
486 	HALASSERT(q < AH_PRIVATE(ah)->ah_caps.halTotalQueues);
487 	return OS_REG_READ(ah, AR_QTXDP(q));
488 }
489 
490 /*
491  * Set the TxDP for the specified queue
492  */
493 HAL_BOOL
494 ar5212SetTxDP(struct ath_hal *ah, u_int q, uint32_t txdp)
495 {
496 	HALASSERT(q < AH_PRIVATE(ah)->ah_caps.halTotalQueues);
497 	HALASSERT(AH5212(ah)->ah_txq[q].tqi_type != HAL_TX_QUEUE_INACTIVE);
498 
499 	/*
500 	 * Make sure that TXE is deasserted before setting the TXDP.  If TXE
501 	 * is still asserted, setting TXDP will have no effect.
502 	 */
503 	HALASSERT((OS_REG_READ(ah, AR_Q_TXE) & (1 << q)) == 0);
504 
505 	OS_REG_WRITE(ah, AR_QTXDP(q), txdp);
506 
507 	return AH_TRUE;
508 }
509 
510 /*
511  * Set Transmit Enable bits for the specified queue
512  */
513 HAL_BOOL
514 ar5212StartTxDma(struct ath_hal *ah, u_int q)
515 {
516 	HALASSERT(q < AH_PRIVATE(ah)->ah_caps.halTotalQueues);
517 
518 	HALASSERT(AH5212(ah)->ah_txq[q].tqi_type != HAL_TX_QUEUE_INACTIVE);
519 
520 	HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: queue %u\n", __func__, q);
521 
522 	/* Check to be sure we're not enabling a q that has its TXD bit set. */
523 	HALASSERT((OS_REG_READ(ah, AR_Q_TXD) & (1 << q)) == 0);
524 
525 	OS_REG_WRITE(ah, AR_Q_TXE, 1 << q);
526 	return AH_TRUE;
527 }
528 
529 /*
530  * Return the number of pending frames or 0 if the specified
531  * queue is stopped.
532  */
533 uint32_t
534 ar5212NumTxPending(struct ath_hal *ah, u_int q)
535 {
536 	uint32_t npend;
537 
538 	HALASSERT(q < AH_PRIVATE(ah)->ah_caps.halTotalQueues);
539 	HALASSERT(AH5212(ah)->ah_txq[q].tqi_type != HAL_TX_QUEUE_INACTIVE);
540 
541 	npend = OS_REG_READ(ah, AR_QSTS(q)) & AR_Q_STS_PEND_FR_CNT;
542 	if (npend == 0) {
543 		/*
544 		 * Pending frame count (PFC) can momentarily go to zero
545 		 * while TXE remains asserted.  In other words a PFC of
546 		 * zero is not sufficient to say that the queue has stopped.
547 		 */
548 		if (OS_REG_READ(ah, AR_Q_TXE) & (1 << q))
549 			npend = 1;		/* arbitrarily return 1 */
550 	}
551 	return npend;
552 }
553 
554 /*
555  * Stop transmit on the specified queue
556  */
557 HAL_BOOL
558 ar5212StopTxDma(struct ath_hal *ah, u_int q)
559 {
560 	u_int i;
561 	u_int wait;
562 
563 	HALASSERT(q < AH_PRIVATE(ah)->ah_caps.halTotalQueues);
564 
565 	HALASSERT(AH5212(ah)->ah_txq[q].tqi_type != HAL_TX_QUEUE_INACTIVE);
566 
567 	OS_REG_WRITE(ah, AR_Q_TXD, 1 << q);
568 	for (i = 1000; i != 0; i--) {
569 		if (ar5212NumTxPending(ah, q) == 0)
570 			break;
571 		OS_DELAY(100);        /* XXX get actual value */
572 	}
573 #ifdef AH_DEBUG
574 	if (i == 0) {
575 		HALDEBUG(ah, HAL_DEBUG_ANY,
576 		    "%s: queue %u DMA did not stop in 100 msec\n", __func__, q);
577 		HALDEBUG(ah, HAL_DEBUG_ANY,
578 		    "%s: QSTS 0x%x Q_TXE 0x%x Q_TXD 0x%x Q_CBR 0x%x\n", __func__,
579 		    OS_REG_READ(ah, AR_QSTS(q)), OS_REG_READ(ah, AR_Q_TXE),
580 		    OS_REG_READ(ah, AR_Q_TXD), OS_REG_READ(ah, AR_QCBRCFG(q)));
581 		HALDEBUG(ah, HAL_DEBUG_ANY,
582 		    "%s: Q_MISC 0x%x Q_RDYTIMECFG 0x%x Q_RDYTIMESHDN 0x%x\n",
583 		    __func__, OS_REG_READ(ah, AR_QMISC(q)),
584 		    OS_REG_READ(ah, AR_QRDYTIMECFG(q)),
585 		    OS_REG_READ(ah, AR_Q_RDYTIMESHDN));
586 	}
587 #endif /* AH_DEBUG */
588 
589 	/* 2413+ and up can kill packets at the PCU level */
590 	if (ar5212NumTxPending(ah, q) &&
591 	    (IS_2413(ah) || IS_5413(ah) || IS_2425(ah) || IS_2417(ah))) {
592 		uint32_t tsfLow, j;
593 
594 		HALDEBUG(ah, HAL_DEBUG_TXQUEUE,
595 		    "%s: Num of pending TX Frames %d on Q %d\n",
596 		    __func__, ar5212NumTxPending(ah, q), q);
597 
598 		/* Kill last PCU Tx Frame */
599 		/* TODO - save off and restore current values of Q1/Q2? */
600 		for (j = 0; j < 2; j++) {
601 			tsfLow = OS_REG_READ(ah, AR_TSF_L32);
602 			OS_REG_WRITE(ah, AR_QUIET2, SM(100, AR_QUIET2_QUIET_PER) |
603 				     SM(10, AR_QUIET2_QUIET_DUR));
604 			OS_REG_WRITE(ah, AR_QUIET1, AR_QUIET1_QUIET_ENABLE |
605 				     SM(tsfLow >> 10, AR_QUIET1_NEXT_QUIET));
606 			if ((OS_REG_READ(ah, AR_TSF_L32) >> 10) == (tsfLow >> 10)) {
607 				break;
608 			}
609 			HALDEBUG(ah, HAL_DEBUG_ANY,
610 			    "%s: TSF moved while trying to set quiet time "
611 			    "TSF: 0x%08x\n", __func__, tsfLow);
612 			HALASSERT(j < 1); /* TSF shouldn't count twice or reg access is taking forever */
613 		}
614 
615 		OS_REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_CHAN_IDLE);
616 
617 		/* Allow the quiet mechanism to do its work */
618 		OS_DELAY(200);
619 		OS_REG_CLR_BIT(ah, AR_QUIET1, AR_QUIET1_QUIET_ENABLE);
620 
621 		/* Give at least 1 millisec more to wait */
622 		wait = 100;
623 
624 		/* Verify all transmit is dead */
625 		while (ar5212NumTxPending(ah, q)) {
626 			if ((--wait) == 0) {
627 				HALDEBUG(ah, HAL_DEBUG_ANY,
628 				    "%s: Failed to stop Tx DMA in %d msec after killing last frame\n",
629 				    __func__, wait);
630 				break;
631 			}
632 			OS_DELAY(10);
633 		}
634 
635 		OS_REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_CHAN_IDLE);
636 	}
637 
638 	OS_REG_WRITE(ah, AR_Q_TXD, 0);
639 	return (i != 0);
640 }
641 
642 /*
643  * Descriptor Access Functions
644  */
645 
646 #define	VALID_PKT_TYPES \
647 	((1<<HAL_PKT_TYPE_NORMAL)|(1<<HAL_PKT_TYPE_ATIM)|\
648 	 (1<<HAL_PKT_TYPE_PSPOLL)|(1<<HAL_PKT_TYPE_PROBE_RESP)|\
649 	 (1<<HAL_PKT_TYPE_BEACON))
650 #define	isValidPktType(_t)	((1<<(_t)) & VALID_PKT_TYPES)
651 #define	VALID_TX_RATES \
652 	((1<<0x0b)|(1<<0x0f)|(1<<0x0a)|(1<<0x0e)|(1<<0x09)|(1<<0x0d)|\
653 	 (1<<0x08)|(1<<0x0c)|(1<<0x1b)|(1<<0x1a)|(1<<0x1e)|(1<<0x19)|\
654 	 (1<<0x1d)|(1<<0x18)|(1<<0x1c))
655 #define	isValidTxRate(_r)	((1<<(_r)) & VALID_TX_RATES)
656 
657 HAL_BOOL
658 ar5212SetupTxDesc(struct ath_hal *ah, struct ath_desc *ds,
659 	u_int pktLen,
660 	u_int hdrLen,
661 	HAL_PKT_TYPE type,
662 	u_int txPower,
663 	u_int txRate0, u_int txTries0,
664 	u_int keyIx,
665 	u_int antMode,
666 	u_int flags,
667 	u_int rtsctsRate,
668 	u_int rtsctsDuration,
669 	u_int compicvLen,
670 	u_int compivLen,
671 	u_int comp)
672 {
673 #define	RTSCTS	(HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)
674 	struct ar5212_desc *ads = AR5212DESC(ds);
675 	struct ath_hal_5212 *ahp = AH5212(ah);
676 
677 	(void) hdrLen;
678 
679 	HALASSERT(txTries0 != 0);
680 	HALASSERT(isValidPktType(type));
681 	HALASSERT(isValidTxRate(txRate0));
682 	HALASSERT((flags & RTSCTS) != RTSCTS);
683 	/* XXX validate antMode */
684 
685         txPower = (txPower + ahp->ah_txPowerIndexOffset );
686         if(txPower > 63)  txPower=63;
687 
688 	ads->ds_ctl0 = (pktLen & AR_FrameLen)
689 		     | (txPower << AR_XmitPower_S)
690 		     | (flags & HAL_TXDESC_VEOL ? AR_VEOL : 0)
691 		     | (flags & HAL_TXDESC_CLRDMASK ? AR_ClearDestMask : 0)
692 		     | SM(antMode, AR_AntModeXmit)
693 		     | (flags & HAL_TXDESC_INTREQ ? AR_TxInterReq : 0)
694 		     ;
695 	ads->ds_ctl1 = (type << AR_FrmType_S)
696 		     | (flags & HAL_TXDESC_NOACK ? AR_NoAck : 0)
697                      | (comp << AR_CompProc_S)
698                      | (compicvLen << AR_CompICVLen_S)
699                      | (compivLen << AR_CompIVLen_S)
700                      ;
701 	ads->ds_ctl2 = SM(txTries0, AR_XmitDataTries0)
702 		     | (flags & HAL_TXDESC_DURENA ? AR_DurUpdateEna : 0)
703 		     ;
704 	ads->ds_ctl3 = (txRate0 << AR_XmitRate0_S)
705 		     ;
706 	if (keyIx != HAL_TXKEYIX_INVALID) {
707 		/* XXX validate key index */
708 		ads->ds_ctl1 |= SM(keyIx, AR_DestIdx);
709 		ads->ds_ctl0 |= AR_DestIdxValid;
710 	}
711 	if (flags & RTSCTS) {
712 		if (!isValidTxRate(rtsctsRate)) {
713 			HALDEBUG(ah, HAL_DEBUG_ANY,
714 			    "%s: invalid rts/cts rate 0x%x\n",
715 			    __func__, rtsctsRate);
716 			return AH_FALSE;
717 		}
718 		/* XXX validate rtsctsDuration */
719 		ads->ds_ctl0 |= (flags & HAL_TXDESC_CTSENA ? AR_CTSEnable : 0)
720 			     | (flags & HAL_TXDESC_RTSENA ? AR_RTSCTSEnable : 0)
721 			     ;
722 		ads->ds_ctl2 |= SM(rtsctsDuration, AR_RTSCTSDuration);
723 		ads->ds_ctl3 |= (rtsctsRate << AR_RTSCTSRate_S);
724 	}
725 	return AH_TRUE;
726 #undef RTSCTS
727 }
728 
729 HAL_BOOL
730 ar5212SetupXTxDesc(struct ath_hal *ah, struct ath_desc *ds,
731 	u_int txRate1, u_int txTries1,
732 	u_int txRate2, u_int txTries2,
733 	u_int txRate3, u_int txTries3)
734 {
735 	struct ar5212_desc *ads = AR5212DESC(ds);
736 
737 	if (txTries1) {
738 		HALASSERT(isValidTxRate(txRate1));
739 		ads->ds_ctl2 |= SM(txTries1, AR_XmitDataTries1)
740 			     |  AR_DurUpdateEna
741 			     ;
742 		ads->ds_ctl3 |= (txRate1 << AR_XmitRate1_S);
743 	}
744 	if (txTries2) {
745 		HALASSERT(isValidTxRate(txRate2));
746 		ads->ds_ctl2 |= SM(txTries2, AR_XmitDataTries2)
747 			     |  AR_DurUpdateEna
748 			     ;
749 		ads->ds_ctl3 |= (txRate2 << AR_XmitRate2_S);
750 	}
751 	if (txTries3) {
752 		HALASSERT(isValidTxRate(txRate3));
753 		ads->ds_ctl2 |= SM(txTries3, AR_XmitDataTries3)
754 			     |  AR_DurUpdateEna
755 			     ;
756 		ads->ds_ctl3 |= (txRate3 << AR_XmitRate3_S);
757 	}
758 	return AH_TRUE;
759 }
760 
761 void
762 ar5212IntrReqTxDesc(struct ath_hal *ah, struct ath_desc *ds)
763 {
764 	struct ar5212_desc *ads = AR5212DESC(ds);
765 
766 #ifdef AH_NEED_DESC_SWAP
767 	ads->ds_ctl0 |= __bswap32(AR_TxInterReq);
768 #else
769 	ads->ds_ctl0 |= AR_TxInterReq;
770 #endif
771 }
772 
773 HAL_BOOL
774 ar5212FillTxDesc(struct ath_hal *ah, struct ath_desc *ds,
775 	u_int segLen, HAL_BOOL firstSeg, HAL_BOOL lastSeg,
776 	const struct ath_desc *ds0)
777 {
778 	struct ar5212_desc *ads = AR5212DESC(ds);
779 
780 	HALASSERT((segLen &~ AR_BufLen) == 0);
781 
782 	if (firstSeg) {
783 		/*
784 		 * First descriptor, don't clobber xmit control data
785 		 * setup by ar5212SetupTxDesc.
786 		 */
787 		ads->ds_ctl1 |= segLen | (lastSeg ? 0 : AR_More);
788 	} else if (lastSeg) {		/* !firstSeg && lastSeg */
789 		/*
790 		 * Last descriptor in a multi-descriptor frame,
791 		 * copy the multi-rate transmit parameters from
792 		 * the first frame for processing on completion.
793 		 */
794 		ads->ds_ctl0 = 0;
795 		ads->ds_ctl1 = segLen;
796 #ifdef AH_NEED_DESC_SWAP
797 		ads->ds_ctl2 = __bswap32(AR5212DESC_CONST(ds0)->ds_ctl2);
798 		ads->ds_ctl3 = __bswap32(AR5212DESC_CONST(ds0)->ds_ctl3);
799 #else
800 		ads->ds_ctl2 = AR5212DESC_CONST(ds0)->ds_ctl2;
801 		ads->ds_ctl3 = AR5212DESC_CONST(ds0)->ds_ctl3;
802 #endif
803 	} else {			/* !firstSeg && !lastSeg */
804 		/*
805 		 * Intermediate descriptor in a multi-descriptor frame.
806 		 */
807 		ads->ds_ctl0 = 0;
808 		ads->ds_ctl1 = segLen | AR_More;
809 		ads->ds_ctl2 = 0;
810 		ads->ds_ctl3 = 0;
811 	}
812 	ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
813 	return AH_TRUE;
814 }
815 
816 #ifdef AH_NEED_DESC_SWAP
817 /* Swap transmit descriptor */
818 static __inline void
819 ar5212SwapTxDesc(struct ath_desc *ds)
820 {
821 	ds->ds_data = __bswap32(ds->ds_data);
822         ds->ds_ctl0 = __bswap32(ds->ds_ctl0);
823         ds->ds_ctl1 = __bswap32(ds->ds_ctl1);
824         ds->ds_hw[0] = __bswap32(ds->ds_hw[0]);
825         ds->ds_hw[1] = __bswap32(ds->ds_hw[1]);
826         ds->ds_hw[2] = __bswap32(ds->ds_hw[2]);
827         ds->ds_hw[3] = __bswap32(ds->ds_hw[3]);
828 }
829 #endif
830 
831 /*
832  * Processing of HW TX descriptor.
833  */
834 HAL_STATUS
835 ar5212ProcTxDesc(struct ath_hal *ah,
836 	struct ath_desc *ds, struct ath_tx_status *ts)
837 {
838 	struct ar5212_desc *ads = AR5212DESC(ds);
839 
840 #ifdef AH_NEED_DESC_SWAP
841 	if ((ads->ds_txstatus1 & __bswap32(AR_Done)) == 0)
842                 return HAL_EINPROGRESS;
843 
844 	ar5212SwapTxDesc(ds);
845 #else
846 	if ((ads->ds_txstatus1 & AR_Done) == 0)
847 		return HAL_EINPROGRESS;
848 #endif
849 
850 	/* Update software copies of the HW status */
851 	ts->ts_seqnum = MS(ads->ds_txstatus1, AR_SeqNum);
852 	ts->ts_tstamp = MS(ads->ds_txstatus0, AR_SendTimestamp);
853 	ts->ts_status = 0;
854 	if ((ads->ds_txstatus0 & AR_FrmXmitOK) == 0) {
855 		if (ads->ds_txstatus0 & AR_ExcessiveRetries)
856 			ts->ts_status |= HAL_TXERR_XRETRY;
857 		if (ads->ds_txstatus0 & AR_Filtered)
858 			ts->ts_status |= HAL_TXERR_FILT;
859 		if (ads->ds_txstatus0 & AR_FIFOUnderrun)
860 			ts->ts_status |= HAL_TXERR_FIFO;
861 	}
862 	/*
863 	 * Extract the transmit rate used and mark the rate as
864 	 * ``alternate'' if it wasn't the series 0 rate.
865 	 */
866 	ts->ts_finaltsi = MS(ads->ds_txstatus1, AR_FinalTSIndex);
867 	switch (ts->ts_finaltsi) {
868 	case 0:
869 		ts->ts_rate = MS(ads->ds_ctl3, AR_XmitRate0);
870 		break;
871 	case 1:
872 		ts->ts_rate = MS(ads->ds_ctl3, AR_XmitRate1) |
873 			HAL_TXSTAT_ALTRATE;
874 		break;
875 	case 2:
876 		ts->ts_rate = MS(ads->ds_ctl3, AR_XmitRate2) |
877 			HAL_TXSTAT_ALTRATE;
878 		break;
879 	case 3:
880 		ts->ts_rate = MS(ads->ds_ctl3, AR_XmitRate3) |
881 			HAL_TXSTAT_ALTRATE;
882 		break;
883 	}
884 	ts->ts_rssi = MS(ads->ds_txstatus1, AR_AckSigStrength);
885 	ts->ts_shortretry = MS(ads->ds_txstatus0, AR_RTSFailCnt);
886 	ts->ts_longretry = MS(ads->ds_txstatus0, AR_DataFailCnt);
887 	/*
888 	 * The retry count has the number of un-acked tries for the
889 	 * final series used.  When doing multi-rate retry we must
890 	 * fixup the retry count by adding in the try counts for
891 	 * each series that was fully-processed.  Beware that this
892 	 * takes values from the try counts in the final descriptor.
893 	 * These are not required by the hardware.  We assume they
894 	 * are placed there by the driver as otherwise we have no
895 	 * access and the driver can't do the calculation because it
896 	 * doesn't know the descriptor format.
897 	 */
898 	switch (ts->ts_finaltsi) {
899 	case 3: ts->ts_longretry += MS(ads->ds_ctl2, AR_XmitDataTries2);
900 	case 2: ts->ts_longretry += MS(ads->ds_ctl2, AR_XmitDataTries1);
901 	case 1: ts->ts_longretry += MS(ads->ds_ctl2, AR_XmitDataTries0);
902 	}
903 	ts->ts_virtcol = MS(ads->ds_txstatus0, AR_VirtCollCnt);
904 	ts->ts_antenna = (ads->ds_txstatus1 & AR_XmitAtenna ? 2 : 1);
905 
906 	return HAL_OK;
907 }
908 
909 /*
910  * Determine which tx queues need interrupt servicing.
911  */
912 void
913 ar5212GetTxIntrQueue(struct ath_hal *ah, uint32_t *txqs)
914 {
915 	struct ath_hal_5212 *ahp = AH5212(ah);
916 	*txqs &= ahp->ah_intrTxqs;
917 	ahp->ah_intrTxqs &= ~(*txqs);
918 }
919