xref: /titanic_41/usr/src/uts/common/io/arn/arn_mac.c (revision 1816cb7076d3ec8a78ef9ac9f895574e13c43645)
1 /*
2  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
3  * Use is subject to license terms.
4  */
5 
6 /*
7  * Copyright (c) 2008 Atheros Communications Inc.
8  *
9  * Permission to use, copy, modify, and/or distribute this software for any
10  * purpose with or without fee is hereby granted, provided that the above
11  * copyright notice and this permission notice appear in all copies.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 #include <sys/ddi.h>
23 
24 #include "arn_core.h"
25 #include "arn_hw.h"
26 #include "arn_reg.h"
27 #include "arn_phy.h"
28 
29 /* ARGSUSED */
30 static void
31 ath9k_hw_set_txq_interrupts(struct ath_hal *ah,
32     struct ath9k_tx_queue_info *qi)
33 {
34 	struct ath_hal_5416 *ahp = AH5416(ah);
35 
36 	ARN_DBG((ARN_DBG_INTERRUPT,
37 	    "%s: tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n",
38 	    __func__, ahp->ah_txOkInterruptMask,
39 	    ahp->ah_txErrInterruptMask, ahp->ah_txDescInterruptMask,
40 	    ahp->ah_txEolInterruptMask, ahp->ah_txUrnInterruptMask));
41 
42 	REG_WRITE(ah, AR_IMR_S0,
43 	    SM(ahp->ah_txOkInterruptMask, AR_IMR_S0_QCU_TXOK) |
44 	    SM(ahp->ah_txDescInterruptMask, AR_IMR_S0_QCU_TXDESC));
45 	REG_WRITE(ah, AR_IMR_S1,
46 	    SM(ahp->ah_txErrInterruptMask, AR_IMR_S1_QCU_TXERR)|
47 	    SM(ahp->ah_txEolInterruptMask, AR_IMR_S1_QCU_TXEOL));
48 	REG_RMW_FIELD(ah, AR_IMR_S2,
49 	    AR_IMR_S2_QCU_TXURN, ahp->ah_txUrnInterruptMask);
50 }
51 
52 void
53 ath9k_hw_dmaRegDump(struct ath_hal *ah)
54 {
55 	uint32_t val[ATH9K_NUM_DMA_DEBUG_REGS];
56 	int qcuOffset = 0, dcuOffset = 0;
57 	uint32_t *qcuBase = &val[0], *dcuBase = &val[4];
58 	int i;
59 
60 	REG_WRITE(ah, AR_MACMISC,
61 	    ((AR_MACMISC_DMA_OBS_LINE_8 << AR_MACMISC_DMA_OBS_S) |
62 	    (AR_MACMISC_MISC_OBS_BUS_1 <<
63 	    AR_MACMISC_MISC_OBS_BUS_MSB_S)));
64 
65 	ARN_DBG((ARN_DBG_REG_IO, "Raw DMA Debug values:\n"));
66 
67 	for (i = 0; i < ATH9K_NUM_DMA_DEBUG_REGS; i++) {
68 		if (i % 4 == 0)
69 			ARN_DBG((ARN_DBG_REG_IO, "\n"));
70 
71 		val[i] = REG_READ(ah, AR_DMADBG_0 + (i * sizeof (uint32_t)));
72 		ARN_DBG((ARN_DBG_REG_IO, "%d: %08x ", i, val[i]));
73 	}
74 
75 	ARN_DBG((ARN_DBG_REG_IO, "\n\n"));
76 	ARN_DBG((ARN_DBG_REG_IO,
77 	    "Num QCU: chain_st fsp_ok fsp_st DCU: chain_st\n"));
78 
79 	for (i = 0; i < ATH9K_NUM_QUEUES;
80 	    i++, qcuOffset += 4, dcuOffset += 5) {
81 		if (i == 8) {
82 			qcuOffset = 0;
83 			qcuBase++;
84 		}
85 
86 		if (i == 6) {
87 			dcuOffset = 0;
88 			dcuBase++;
89 		}
90 
91 		ARN_DBG((ARN_DBG_REG_IO,
92 		    "%2d          %2x      %1x     %2x           %2x\n",
93 		    i, (*qcuBase & (0x7 << qcuOffset)) >> qcuOffset,
94 		    (*qcuBase & (0x8 << qcuOffset)) >> (qcuOffset + 3),
95 		    val[2] & (0x7 << (i * 3)) >> (i * 3),
96 		    (*dcuBase & (0x1f << dcuOffset)) >> dcuOffset));
97 	}
98 
99 	ARN_DBG((ARN_DBG_REG_IO, "\n"));
100 	ARN_DBG((ARN_DBG_REG_IO,
101 	    "qcu_stitch state:   %2x    qcu_fetch state:        %2x\n",
102 	    (val[3] & 0x003c0000) >> 18, (val[3] & 0x03c00000) >> 22));
103 	ARN_DBG((ARN_DBG_REG_IO,
104 	    "qcu_complete state: %2x    dcu_complete state:     %2x\n",
105 	    (val[3] & 0x1c000000) >> 26, (val[6] & 0x3)));
106 	ARN_DBG((ARN_DBG_REG_IO,
107 	    "dcu_arb state:      %2x    dcu_fp state:           %2x\n",
108 	    (val[5] & 0x06000000) >> 25, (val[5] & 0x38000000) >> 27));
109 	ARN_DBG((ARN_DBG_REG_IO,
110 	    "chan_idle_dur:     %3d    chan_idle_dur_valid:     %1d\n",
111 	    (val[6] & 0x000003fc) >> 2, (val[6] & 0x00000400) >> 10));
112 	ARN_DBG((ARN_DBG_REG_IO,
113 	    "txfifo_valid_0:      %1d    txfifo_valid_1:          %1d\n",
114 	    (val[6] & 0x00000800) >> 11, (val[6] & 0x00001000) >> 12));
115 	ARN_DBG((ARN_DBG_REG_IO,
116 	    "txfifo_dcu_num_0:   %2d    txfifo_dcu_num_1:       %2d\n",
117 	    (val[6] & 0x0001e000) >> 13, (val[6] & 0x001e0000) >> 17));
118 
119 	ARN_DBG((ARN_DBG_REG_IO, "pcu observe 0x%x \n",
120 	    REG_READ(ah, AR_OBS_BUS_1)));
121 	ARN_DBG((ARN_DBG_REG_IO,
122 	    "AR_CR 0x%x \n", REG_READ(ah, AR_CR)));
123 }
124 
125 uint32_t
126 ath9k_hw_gettxbuf(struct ath_hal *ah, uint32_t q)
127 {
128 	return (REG_READ(ah, AR_QTXDP(q)));
129 }
130 
131 boolean_t
132 ath9k_hw_puttxbuf(struct ath_hal *ah, uint32_t q, uint32_t txdp)
133 {
134 	REG_WRITE(ah, AR_QTXDP(q), txdp);
135 
136 	return (B_TRUE);
137 }
138 
139 boolean_t
140 ath9k_hw_txstart(struct ath_hal *ah, uint32_t q)
141 {
142 	ARN_DBG((ARN_DBG_XMIT, "arn: ath9k_hw_txstart(): "
143 	    "tramist queue is %u\n", q));
144 
145 	REG_WRITE(ah, AR_Q_TXE, 1 << q);
146 
147 	return (B_TRUE);
148 }
149 
150 uint32_t
151 ath9k_hw_numtxpending(struct ath_hal *ah, uint32_t q)
152 {
153 	uint32_t npend;
154 
155 	npend = REG_READ(ah, AR_QSTS(q)) & AR_Q_STS_PEND_FR_CNT;
156 	if (npend == 0) {
157 
158 		if (REG_READ(ah, AR_Q_TXE) & (1 << q))
159 			npend = 1;
160 	}
161 
162 	return (npend);
163 }
164 
165 boolean_t
166 ath9k_hw_updatetxtriglevel(struct ath_hal *ah, boolean_t bIncTrigLevel)
167 {
168 	struct ath_hal_5416 *ahp = AH5416(ah);
169 	uint32_t txcfg, curLevel, newLevel;
170 	enum ath9k_int omask;
171 
172 	if (ah->ah_txTrigLevel >= MAX_TX_FIFO_THRESHOLD)
173 		return (B_FALSE);
174 
175 	omask = ath9k_hw_set_interrupts(ah,
176 	    ahp->ah_maskReg & ~ATH9K_INT_GLOBAL);
177 
178 	txcfg = REG_READ(ah, AR_TXCFG);
179 	curLevel = MS(txcfg, AR_FTRIG);
180 	newLevel = curLevel;
181 	if (bIncTrigLevel) {
182 		if (curLevel < MAX_TX_FIFO_THRESHOLD)
183 			newLevel++;
184 	} else if (curLevel > MIN_TX_FIFO_THRESHOLD)
185 		newLevel--;
186 	if (newLevel != curLevel)
187 		REG_WRITE(ah, AR_TXCFG,
188 		    (txcfg & ~AR_FTRIG) | SM(newLevel, AR_FTRIG));
189 
190 	(void) ath9k_hw_set_interrupts(ah, omask);
191 
192 	ah->ah_txTrigLevel = (uint16_t)newLevel; /* ??? */
193 
194 	return (newLevel != curLevel);
195 }
196 
197 boolean_t
198 ath9k_hw_stoptxdma(struct ath_hal *ah, uint32_t q)
199 {
200 	uint32_t tsfLow, j, wait;
201 
202 	REG_WRITE(ah, AR_Q_TXD, 1 << q);
203 
204 	for (wait = 1000; wait != 0; wait--) {
205 		if (ath9k_hw_numtxpending(ah, q) == 0)
206 			break;
207 		drv_usecwait(100);
208 	}
209 
210 	if (ath9k_hw_numtxpending(ah, q)) {
211 		ARN_DBG((ARN_DBG_QUEUE,
212 		    "%s: Num of pending TX Frames %d on Q %d\n",
213 		    __func__, ath9k_hw_numtxpending(ah, q), q));
214 
215 		for (j = 0; j < 2; j++) {
216 			tsfLow = REG_READ(ah, AR_TSF_L32);
217 			REG_WRITE(ah, AR_QUIET2, SM(10, AR_QUIET2_QUIET_DUR));
218 			REG_WRITE(ah, AR_QUIET_PERIOD, 100);
219 			REG_WRITE(ah, AR_NEXT_QUIET_TIMER, tsfLow >> 10);
220 			REG_SET_BIT(ah, AR_TIMER_MODE, AR_QUIET_TIMER_EN);
221 
222 			if ((REG_READ(ah, AR_TSF_L32) >> 10) == (tsfLow >> 10))
223 				break;
224 			ARN_DBG((ARN_DBG_QUEUE,
225 			    "%s: TSF have moved while trying to set "
226 			    "quiet time TSF: 0x%08x\n",
227 			    __func__, tsfLow));
228 		}
229 
230 		REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
231 
232 		drv_usecwait(200);
233 		REG_CLR_BIT(ah, AR_TIMER_MODE, AR_QUIET_TIMER_EN);
234 
235 		wait = 1000;
236 
237 		while (ath9k_hw_numtxpending(ah, q)) {
238 			if ((--wait) == 0) {
239 				ARN_DBG((ARN_DBG_XMIT,
240 				    "%s: Failed to stop Tx DMA in 100 "
241 				    "msec after killing last frame\n",
242 				    __func__));
243 				break;
244 			}
245 			drv_usecwait(100);
246 		}
247 
248 		REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
249 	}
250 
251 	REG_WRITE(ah, AR_Q_TXD, 0);
252 
253 	return (wait != 0);
254 }
255 
256 /* ARGSUSED */
257 boolean_t
258 ath9k_hw_filltxdesc(struct ath_hal *ah, struct ath_desc *ds,
259     uint32_t segLen, boolean_t firstSeg,
260     boolean_t lastSeg, const struct ath_desc *ds0)
261 {
262 	struct ar5416_desc *ads = AR5416DESC(ds);
263 
264 	if (firstSeg) {
265 		ads->ds_ctl1 |= segLen | (lastSeg ? 0 : AR_TxMore);
266 	} else if (lastSeg) {
267 		ads->ds_ctl0 = 0;
268 		ads->ds_ctl1 = segLen;
269 		ads->ds_ctl2 = AR5416DESC_CONST(ds0)->ds_ctl2;
270 		ads->ds_ctl3 = AR5416DESC_CONST(ds0)->ds_ctl3;
271 	} else {
272 		ads->ds_ctl0 = 0;
273 		ads->ds_ctl1 = segLen | AR_TxMore;
274 		ads->ds_ctl2 = 0;
275 		ads->ds_ctl3 = 0;
276 	}
277 	ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
278 	ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
279 	ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
280 	ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
281 	ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
282 
283 	return (B_TRUE);
284 }
285 
286 /* ARGSUSED */
287 void
288 ath9k_hw_cleartxdesc(struct ath_hal *ah, struct ath_desc *ds)
289 {
290 	struct ar5416_desc *ads = AR5416DESC(ds);
291 
292 	ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
293 	ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
294 	ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
295 	ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
296 	ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
297 }
298 
299 int
300 ath9k_hw_txprocdesc(struct ath_hal *ah, struct ath_desc *ds)
301 {
302 	struct ar5416_desc *ads = AR5416DESC(ds);
303 
304 	if ((ads->ds_txstatus9 & AR_TxDone) == 0)
305 		return (EINPROGRESS);
306 
307 	ds->ds_txstat.ts_seqnum = MS(ads->ds_txstatus9, AR_SeqNum);
308 	ds->ds_txstat.ts_tstamp = ads->AR_SendTimestamp;
309 	ds->ds_txstat.ts_status = 0;
310 	ds->ds_txstat.ts_flags = 0;
311 
312 	if (ads->ds_txstatus1 & AR_ExcessiveRetries) {
313 		ARN_DBG((ARN_DBG_INTERRUPT, "arn: ATH9K_TXERR_XRETRY\n"));
314 		ds->ds_txstat.ts_status |= ATH9K_TXERR_XRETRY;
315 	}
316 	if (ads->ds_txstatus1 & AR_Filtered) {
317 		ARN_DBG((ARN_DBG_INTERRUPT, "arn: AR_Filtered\n"));
318 		ds->ds_txstat.ts_status |= ATH9K_TXERR_FILT;
319 	}
320 	if (ads->ds_txstatus1 & AR_FIFOUnderrun) {
321 		ARN_DBG((ARN_DBG_INTERRUPT, "arn: ATH9K_TXERR_FIFO\n"));
322 		ds->ds_txstat.ts_status |= ATH9K_TXERR_FIFO;
323 		(void) ath9k_hw_updatetxtriglevel(ah, B_TRUE);
324 	}
325 	if (ads->ds_txstatus9 & AR_TxOpExceeded) {
326 		ARN_DBG((ARN_DBG_INTERRUPT, "arn: ATH9K_TXERR_XTXOP\n"));
327 		ds->ds_txstat.ts_status |= ATH9K_TXERR_XTXOP;
328 	}
329 	if (ads->ds_txstatus1 & AR_TxTimerExpired) {
330 		ARN_DBG((ARN_DBG_INTERRUPT,
331 		"arn: ATH9K_TXERR_TIMER_EXPIRED\n"));
332 		ds->ds_txstat.ts_status |= ATH9K_TXERR_TIMER_EXPIRED;
333 	}
334 
335 	if (ads->ds_txstatus1 & AR_DescCfgErr) {
336 		ARN_DBG((ARN_DBG_INTERRUPT, "arn: ATH9K_TX_DESC_CFG_ERR\n"));
337 		ds->ds_txstat.ts_flags |= ATH9K_TX_DESC_CFG_ERR;
338 	}
339 	if (ads->ds_txstatus1 & AR_TxDataUnderrun) {
340 		ds->ds_txstat.ts_flags |= ATH9K_TX_DATA_UNDERRUN;
341 		(void) ath9k_hw_updatetxtriglevel(ah, B_TRUE);
342 	}
343 	if (ads->ds_txstatus1 & AR_TxDelimUnderrun) {
344 		ds->ds_txstat.ts_flags |= ATH9K_TX_DELIM_UNDERRUN;
345 		(void) ath9k_hw_updatetxtriglevel(ah, B_TRUE);
346 	}
347 	if (ads->ds_txstatus0 & AR_TxBaStatus) {
348 		ds->ds_txstat.ts_flags |= ATH9K_TX_BA;
349 		ds->ds_txstat.ba_low = ads->AR_BaBitmapLow;
350 		ds->ds_txstat.ba_high = ads->AR_BaBitmapHigh;
351 	}
352 
353 	ds->ds_txstat.ts_rateindex = MS(ads->ds_txstatus9, AR_FinalTxIdx);
354 	switch (ds->ds_txstat.ts_rateindex) {
355 	case 0:
356 		ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate0);
357 		break;
358 	case 1:
359 		ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate1);
360 		break;
361 	case 2:
362 		ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate2);
363 		break;
364 	case 3:
365 		ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate3);
366 		break;
367 	}
368 
369 	ds->ds_txstat.ts_rssi = MS(ads->ds_txstatus5, AR_TxRSSICombined);
370 	ds->ds_txstat.ts_rssi_ctl0 = MS(ads->ds_txstatus0, AR_TxRSSIAnt00);
371 	ds->ds_txstat.ts_rssi_ctl1 = MS(ads->ds_txstatus0, AR_TxRSSIAnt01);
372 	ds->ds_txstat.ts_rssi_ctl2 = MS(ads->ds_txstatus0, AR_TxRSSIAnt02);
373 	ds->ds_txstat.ts_rssi_ext0 = MS(ads->ds_txstatus5, AR_TxRSSIAnt10);
374 	ds->ds_txstat.ts_rssi_ext1 = MS(ads->ds_txstatus5, AR_TxRSSIAnt11);
375 	ds->ds_txstat.ts_rssi_ext2 = MS(ads->ds_txstatus5, AR_TxRSSIAnt12);
376 	ds->ds_txstat.evm0 = ads->AR_TxEVM0;
377 	ds->ds_txstat.evm1 = ads->AR_TxEVM1;
378 	ds->ds_txstat.evm2 = ads->AR_TxEVM2;
379 	ds->ds_txstat.ts_shortretry = MS(ads->ds_txstatus1, AR_RTSFailCnt);
380 	ds->ds_txstat.ts_longretry = MS(ads->ds_txstatus1, AR_DataFailCnt);
381 	ds->ds_txstat.ts_virtcol = MS(ads->ds_txstatus1, AR_VirtRetryCnt);
382 	ds->ds_txstat.ts_antenna = 1;
383 
384 	return (0);
385 }
386 
387 void
388 ath9k_hw_set11n_txdesc(struct ath_hal *ah, struct ath_desc *ds,
389     uint32_t pktLen, enum ath9k_pkt_type type, uint32_t txPower,
390     uint32_t keyIx, enum ath9k_key_type keyType, uint32_t flags)
391 {
392 	struct ar5416_desc *ads = AR5416DESC(ds);
393 	struct ath_hal_5416 *ahp = AH5416(ah);
394 
395 	txPower += ahp->ah_txPowerIndexOffset;
396 	if (txPower > 63)
397 		txPower = 63;
398 
399 	ads->ds_ctl0 = (pktLen & AR_FrameLen) |
400 	    (flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0) |
401 	    SM(txPower, AR_XmitPower) |
402 	    (flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0) |
403 	    (flags & ATH9K_TXDESC_CLRDMASK ? AR_ClrDestMask : 0) |
404 	    (flags & ATH9K_TXDESC_INTREQ ? AR_TxIntrReq : 0) |
405 	    (keyIx != ATH9K_TXKEYIX_INVALID ? AR_DestIdxValid : 0);
406 
407 	ads->ds_ctl1 =
408 	    (keyIx != ATH9K_TXKEYIX_INVALID ? SM(keyIx, AR_DestIdx) : 0) |
409 	    SM(type, AR_FrameType) |
410 	    (flags & ATH9K_TXDESC_NOACK ? AR_NoAck : 0) |
411 	    (flags & ATH9K_TXDESC_EXT_ONLY ? AR_ExtOnly : 0) |
412 	    (flags & ATH9K_TXDESC_EXT_AND_CTL ? AR_ExtAndCtl : 0);
413 
414 	ads->ds_ctl6 = SM(keyType, AR_EncrType);
415 
416 	if (AR_SREV_9285(ah)) {
417 		ads->ds_ctl8 = 0;
418 		ads->ds_ctl9 = 0;
419 		ads->ds_ctl10 = 0;
420 		ads->ds_ctl11 = 0;
421 	}
422 
423 }
424 
425 /* ARGSUSED */
426 void
427 ath9k_hw_set11n_ratescenario(struct ath_hal *ah, struct ath_desc *ds,
428     struct ath_desc *lastds,
429     uint32_t durUpdateEn, uint32_t rtsctsRate,
430     uint32_t rtsctsDuration,
431     struct ath9k_11n_rate_series series[],
432     uint32_t nseries, uint32_t flags)
433 {
434 	struct ar5416_desc *ads = AR5416DESC(ds);
435 	struct ar5416_desc *last_ads = AR5416DESC(lastds);
436 	uint32_t ds_ctl0;
437 
438 	(void) nseries;
439 	(void) rtsctsDuration;
440 
441 	if (flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA)) {
442 		ds_ctl0 = ads->ds_ctl0;
443 
444 		if (flags & ATH9K_TXDESC_RTSENA) {
445 			ds_ctl0 &= ~AR_CTSEnable;
446 			ds_ctl0 |= AR_RTSEnable;
447 		} else {
448 			ds_ctl0 &= ~AR_RTSEnable;
449 			ds_ctl0 |= AR_CTSEnable;
450 		}
451 
452 		ads->ds_ctl0 = ds_ctl0;
453 	} else {
454 		ads->ds_ctl0 =
455 		    (ads->ds_ctl0 & ~(AR_RTSEnable | AR_CTSEnable));
456 	}
457 
458 	ads->ds_ctl2 = set11nTries(series, 0) |
459 	    set11nTries(series, 1) |
460 	    set11nTries(series, 2) |
461 	    set11nTries(series, 3) |
462 	    (durUpdateEn ? AR_DurUpdateEna : 0) |
463 	    SM(0, AR_BurstDur);
464 
465 	ads->ds_ctl3 = set11nRate(series, 0) |
466 	    set11nRate(series, 1) |
467 	    set11nRate(series, 2) |
468 	    set11nRate(series, 3);
469 
470 	ads->ds_ctl4 = set11nPktDurRTSCTS(series, 0) |
471 	    set11nPktDurRTSCTS(series, 1);
472 
473 	ads->ds_ctl5 = set11nPktDurRTSCTS(series, 2) |
474 	    set11nPktDurRTSCTS(series, 3);
475 
476 	ads->ds_ctl7 = set11nRateFlags(series, 0) |
477 	    set11nRateFlags(series, 1) |
478 	    set11nRateFlags(series, 2) |
479 	    set11nRateFlags(series, 3) |
480 	    SM(rtsctsRate, AR_RTSCTSRate);
481 	last_ads->ds_ctl2 = ads->ds_ctl2;
482 	last_ads->ds_ctl3 = ads->ds_ctl3;
483 }
484 
485 /* ARGSUSED */
486 void
487 ath9k_hw_set11n_aggr_first(struct ath_hal *ah, struct ath_desc *ds,
488     uint32_t aggrLen)
489 {
490 	struct ar5416_desc *ads = AR5416DESC(ds);
491 
492 	ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
493 	ads->ds_ctl6 &= ~AR_AggrLen;
494 	ads->ds_ctl6 |= SM(aggrLen, AR_AggrLen);
495 }
496 
497 /* ARGSUSED */
498 void
499 ath9k_hw_set11n_aggr_middle(struct ath_hal *ah, struct ath_desc *ds,
500     uint32_t numDelims)
501 {
502 	struct ar5416_desc *ads = AR5416DESC(ds);
503 	unsigned int ctl6;
504 
505 	ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
506 
507 	ctl6 = ads->ds_ctl6;
508 	ctl6 &= ~AR_PadDelim;
509 	ctl6 |= SM(numDelims, AR_PadDelim);
510 	ads->ds_ctl6 = ctl6;
511 }
512 
513 /* ARGSUSED */
514 void
515 ath9k_hw_set11n_aggr_last(struct ath_hal *ah, struct ath_desc *ds)
516 {
517 	struct ar5416_desc *ads = AR5416DESC(ds);
518 
519 	ads->ds_ctl1 |= AR_IsAggr;
520 	ads->ds_ctl1 &= ~AR_MoreAggr;
521 	ads->ds_ctl6 &= ~AR_PadDelim;
522 }
523 
524 /* ARGSUSED */
525 void
526 ath9k_hw_clr11n_aggr(struct ath_hal *ah, struct ath_desc *ds)
527 {
528 	struct ar5416_desc *ads = AR5416DESC(ds);
529 
530 	ads->ds_ctl1 &= (~AR_IsAggr & ~AR_MoreAggr);
531 }
532 
533 /* ARGSUSED */
534 void
535 ath9k_hw_set11n_burstduration(struct ath_hal *ah, struct ath_desc *ds,
536     uint32_t burstDuration)
537 {
538 	struct ar5416_desc *ads = AR5416DESC(ds);
539 
540 	ads->ds_ctl2 &= ~AR_BurstDur;
541 	ads->ds_ctl2 |= SM(burstDuration, AR_BurstDur);
542 }
543 
544 /* ARGSUSED */
545 void
546 ath9k_hw_set11n_virtualmorefrag(struct ath_hal *ah, struct ath_desc *ds,
547     uint32_t vmf)
548 {
549 	struct ar5416_desc *ads = AR5416DESC(ds);
550 
551 	if (vmf)
552 		ads->ds_ctl0 |= AR_VirtMoreFrag;
553 	else
554 		ads->ds_ctl0 &= ~AR_VirtMoreFrag;
555 }
556 
557 void
558 ath9k_hw_gettxintrtxqs(struct ath_hal *ah, uint32_t *txqs)
559 {
560 	struct ath_hal_5416 *ahp = AH5416(ah);
561 
562 	*txqs &= ahp->ah_intrTxqs;
563 	ahp->ah_intrTxqs &= ~(*txqs);
564 }
565 
566 boolean_t
567 ath9k_hw_set_txq_props(struct ath_hal *ah, int q,
568     const struct ath9k_tx_queue_info *qinfo)
569 {
570 	uint32_t cw;
571 	struct ath_hal_5416 *ahp = AH5416(ah);
572 	struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
573 	struct ath9k_tx_queue_info *qi;
574 
575 	if (q >= pCap->total_queues) {
576 		ARN_DBG((ARN_DBG_QUEUE, "%s: invalid queue num %u\n",
577 		    __func__, q));
578 		return (B_FALSE);
579 	}
580 
581 	qi = &ahp->ah_txq[q];
582 	if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
583 		ARN_DBG((ARN_DBG_QUEUE, "%s: inactive queue\n",
584 		    __func__));
585 		return (B_FALSE);
586 	}
587 
588 	ARN_DBG((ARN_DBG_QUEUE, "%s: queue %p\n", __func__, qi));
589 
590 	qi->tqi_ver = qinfo->tqi_ver;
591 	qi->tqi_subtype = qinfo->tqi_subtype;
592 	qi->tqi_qflags = qinfo->tqi_qflags;
593 	qi->tqi_priority = qinfo->tqi_priority;
594 	if (qinfo->tqi_aifs != ATH9K_TXQ_USEDEFAULT)
595 		qi->tqi_aifs = min(qinfo->tqi_aifs, 255U);
596 	else
597 		qi->tqi_aifs = INIT_AIFS;
598 	if (qinfo->tqi_cwmin != ATH9K_TXQ_USEDEFAULT) {
599 		cw = min(qinfo->tqi_cwmin, 1024U);
600 		qi->tqi_cwmin = 1;
601 		while (qi->tqi_cwmin < cw)
602 			qi->tqi_cwmin = (qi->tqi_cwmin << 1) | 1;
603 	} else
604 		qi->tqi_cwmin = qinfo->tqi_cwmin;
605 	if (qinfo->tqi_cwmax != ATH9K_TXQ_USEDEFAULT) {
606 		cw = min(qinfo->tqi_cwmax, 1024U);
607 		qi->tqi_cwmax = 1;
608 		while (qi->tqi_cwmax < cw)
609 			qi->tqi_cwmax = (qi->tqi_cwmax << 1) | 1;
610 	} else
611 		qi->tqi_cwmax = INIT_CWMAX;
612 
613 	if (qinfo->tqi_shretry != 0)
614 		qi->tqi_shretry = min((uint32_t)qinfo->tqi_shretry, 15U);
615 	else
616 		qi->tqi_shretry = INIT_SH_RETRY;
617 	if (qinfo->tqi_lgretry != 0)
618 		qi->tqi_lgretry = min((uint32_t)qinfo->tqi_lgretry, 15U);
619 	else
620 		qi->tqi_lgretry = INIT_LG_RETRY;
621 	qi->tqi_cbrPeriod = qinfo->tqi_cbrPeriod;
622 	qi->tqi_cbrOverflowLimit = qinfo->tqi_cbrOverflowLimit;
623 	qi->tqi_burstTime = qinfo->tqi_burstTime;
624 	qi->tqi_readyTime = qinfo->tqi_readyTime;
625 
626 	switch (qinfo->tqi_subtype) {
627 	case ATH9K_WME_UPSD:
628 		if (qi->tqi_type == ATH9K_TX_QUEUE_DATA)
629 			qi->tqi_intFlags = ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS;
630 		break;
631 	default:
632 		break;
633 	}
634 
635 	return (B_TRUE);
636 }
637 
638 boolean_t
639 ath9k_hw_get_txq_props(struct ath_hal *ah, int q,
640     struct ath9k_tx_queue_info *qinfo)
641 {
642 	struct ath_hal_5416 *ahp = AH5416(ah);
643 	struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
644 	struct ath9k_tx_queue_info *qi;
645 
646 	if (q >= pCap->total_queues) {
647 		ARN_DBG((ARN_DBG_QUEUE, "arn: ath9k_hw_get_txq_props(): "
648 		    "invalid queue num %u\n", q));
649 		return (B_FALSE);
650 	}
651 
652 	qi = &ahp->ah_txq[q];
653 	if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
654 		ARN_DBG((ARN_DBG_QUEUE, "arn: ath9k_hw_get_txq_props(): "
655 		    "inactive queue\n"));
656 		return (B_FALSE);
657 	}
658 
659 	qinfo->tqi_qflags = qi->tqi_qflags;
660 	qinfo->tqi_ver = qi->tqi_ver;
661 	qinfo->tqi_subtype = qi->tqi_subtype;
662 	qinfo->tqi_qflags = qi->tqi_qflags;
663 	qinfo->tqi_priority = qi->tqi_priority;
664 	qinfo->tqi_aifs = qi->tqi_aifs;
665 	qinfo->tqi_cwmin = qi->tqi_cwmin;
666 	qinfo->tqi_cwmax = qi->tqi_cwmax;
667 	qinfo->tqi_shretry = qi->tqi_shretry;
668 	qinfo->tqi_lgretry = qi->tqi_lgretry;
669 	qinfo->tqi_cbrPeriod = qi->tqi_cbrPeriod;
670 	qinfo->tqi_cbrOverflowLimit = qi->tqi_cbrOverflowLimit;
671 	qinfo->tqi_burstTime = qi->tqi_burstTime;
672 	qinfo->tqi_readyTime = qi->tqi_readyTime;
673 
674 	return (B_TRUE);
675 }
676 
677 int
678 ath9k_hw_setuptxqueue(struct ath_hal *ah, enum ath9k_tx_queue type,
679     const struct ath9k_tx_queue_info *qinfo)
680 {
681 	struct ath_hal_5416 *ahp = AH5416(ah);
682 	struct ath9k_tx_queue_info *qi;
683 	struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
684 	int q;
685 
686 	switch (type) {
687 	case ATH9K_TX_QUEUE_BEACON:
688 		q = pCap->total_queues - 1;
689 		break;
690 	case ATH9K_TX_QUEUE_CAB:
691 		q = pCap->total_queues - 2;
692 		break;
693 	case ATH9K_TX_QUEUE_PSPOLL:
694 		q = 1;
695 		break;
696 	case ATH9K_TX_QUEUE_UAPSD:
697 		q = pCap->total_queues - 3;
698 		break;
699 	case ATH9K_TX_QUEUE_DATA:
700 		for (q = 0; q < pCap->total_queues; q++)
701 			if (ahp->ah_txq[q].tqi_type ==
702 			    ATH9K_TX_QUEUE_INACTIVE)
703 				break;
704 		if (q == pCap->total_queues) {
705 			ARN_DBG((ARN_DBG_QUEUE,
706 			    "arn: ath9k_hw_setuptxqueue(): "
707 			    "no available tx queue\n"));
708 			return (-1);
709 		}
710 		break;
711 	default:
712 		ARN_DBG((ARN_DBG_QUEUE,
713 		    "arn: ath9k_hw_setuptxqueue(): "
714 		    "bad tx queue type %u\n", type));
715 
716 		return (-1);
717 	}
718 
719 	ARN_DBG((ARN_DBG_QUEUE, "arn: ath9k_hw_setuptxqueue(): "
720 	    "queue %u\n", q));
721 
722 	qi = &ahp->ah_txq[q];
723 	if (qi->tqi_type != ATH9K_TX_QUEUE_INACTIVE) {
724 		ARN_DBG((ARN_DBG_QUEUE, "arn: ath9k_hw_setuptxqueue(): "
725 		    "tx queue %u already active\n", q));
726 
727 		return (-1);
728 	}
729 	(void) memset(qi, 0, sizeof (struct ath9k_tx_queue_info));
730 	qi->tqi_type = type;
731 	if (qinfo == NULL) {
732 		qi->tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
733 		    TXQ_FLAG_TXERRINT_ENABLE |
734 		    TXQ_FLAG_TXDESCINT_ENABLE |
735 		    TXQ_FLAG_TXURNINT_ENABLE;
736 		qi->tqi_aifs = INIT_AIFS;
737 		qi->tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
738 		qi->tqi_cwmax = INIT_CWMAX;
739 		qi->tqi_shretry = INIT_SH_RETRY;
740 		qi->tqi_lgretry = INIT_LG_RETRY;
741 		qi->tqi_physCompBuf = 0;
742 	} else {
743 		qi->tqi_physCompBuf = qinfo->tqi_physCompBuf;
744 		(void) ath9k_hw_set_txq_props(ah, q, qinfo);
745 	}
746 
747 	return (q);
748 }
749 
750 boolean_t
751 ath9k_hw_releasetxqueue(struct ath_hal *ah, uint32_t q)
752 {
753 	struct ath_hal_5416 *ahp = AH5416(ah);
754 	struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
755 	struct ath9k_tx_queue_info *qi;
756 
757 	if (q >= pCap->total_queues) {
758 		ARN_DBG((ARN_DBG_QUEUE, "arn: arn_txq_setup(): "
759 		    "invalid queue num %u\n", q));
760 		return (B_FALSE);
761 	}
762 	qi = &ahp->ah_txq[q];
763 	if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
764 		ARN_DBG((ARN_DBG_QUEUE, "arn: arn_txq_setup(): "
765 		    "inactive queue %u\n", q));
766 		return (B_FALSE);
767 	}
768 
769 	ARN_DBG((ARN_DBG_QUEUE, "arn: arn_txq_setup(): "
770 	    "release queue %u\n", q));
771 
772 
773 	qi->tqi_type = ATH9K_TX_QUEUE_INACTIVE;
774 	ahp->ah_txOkInterruptMask &= ~(1 << q);
775 	ahp->ah_txErrInterruptMask &= ~(1 << q);
776 	ahp->ah_txDescInterruptMask &= ~(1 << q);
777 	ahp->ah_txEolInterruptMask &= ~(1 << q);
778 	ahp->ah_txUrnInterruptMask &= ~(1 << q);
779 	ath9k_hw_set_txq_interrupts(ah, qi);
780 
781 	return (B_TRUE);
782 }
783 
784 boolean_t
785 ath9k_hw_resettxqueue(struct ath_hal *ah, uint32_t q)
786 {
787 	struct ath_hal_5416 *ahp = AH5416(ah);
788 	struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
789 	struct ath9k_channel *chan = ah->ah_curchan;
790 	struct ath9k_tx_queue_info *qi;
791 	uint32_t cwMin, chanCwMin, value;
792 
793 	if (q >= pCap->total_queues) {
794 		ARN_DBG((ARN_DBG_QUEUE, "%s: invalid queue num %u\n",
795 		    __func__, q));
796 
797 		return (B_FALSE);
798 	}
799 
800 	qi = &ahp->ah_txq[q];
801 	if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
802 		ARN_DBG((ARN_DBG_QUEUE, "%s: inactive queue %u\n",
803 		    __func__, q));
804 
805 		return (B_TRUE);
806 	}
807 
808 	ARN_DBG((ARN_DBG_QUEUE,
809 	    "%s: reset queue %u\n", __func__, q));
810 
811 	if (qi->tqi_cwmin == ATH9K_TXQ_USEDEFAULT) {
812 		if (chan && IS_CHAN_B(chan))
813 			chanCwMin = INIT_CWMIN_11B;
814 		else
815 			chanCwMin = INIT_CWMIN;
816 
817 		for (cwMin = 1; cwMin < chanCwMin; cwMin = (cwMin << 1) | 1) {
818 			/* Nothing to do */
819 		}
820 	} else
821 		cwMin = qi->tqi_cwmin;
822 
823 	REG_WRITE(ah, AR_DLCL_IFS(q),
824 	    SM(cwMin, AR_D_LCL_IFS_CWMIN) |
825 	    SM(qi->tqi_cwmax, AR_D_LCL_IFS_CWMAX) |
826 	    SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));
827 
828 	REG_WRITE(ah, AR_DRETRY_LIMIT(q),
829 	    SM(INIT_SSH_RETRY, AR_D_RETRY_LIMIT_STA_SH) |
830 	    SM(INIT_SLG_RETRY, AR_D_RETRY_LIMIT_STA_LG) |
831 	    SM(qi->tqi_shretry, AR_D_RETRY_LIMIT_FR_SH));
832 
833 	REG_WRITE(ah, AR_QMISC(q), AR_Q_MISC_DCU_EARLY_TERM_REQ);
834 	REG_WRITE(ah, AR_DMISC(q),
835 	    AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x2);
836 
837 	if (qi->tqi_cbrPeriod) {
838 		REG_WRITE(ah, AR_QCBRCFG(q),
839 		    SM(qi->tqi_cbrPeriod, AR_Q_CBRCFG_INTERVAL) |
840 		    SM(qi->tqi_cbrOverflowLimit, AR_Q_CBRCFG_OVF_THRESH));
841 		REG_WRITE(ah, AR_QMISC(q),
842 		    REG_READ(ah, AR_QMISC(q)) | AR_Q_MISC_FSP_CBR |
843 		    (qi->tqi_cbrOverflowLimit ?
844 		    AR_Q_MISC_CBR_EXP_CNTR_LIMIT_EN : 0));
845 	}
846 	if (qi->tqi_readyTime && (qi->tqi_type != ATH9K_TX_QUEUE_CAB)) {
847 		REG_WRITE(ah, AR_QRDYTIMECFG(q),
848 		    SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_DURATION) |
849 		    AR_Q_RDYTIMECFG_EN);
850 	}
851 
852 	REG_WRITE(ah, AR_DCHNTIME(q),
853 	    SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR) |
854 	    (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0));
855 
856 	if (qi->tqi_burstTime &&
857 	    (qi->tqi_qflags & TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE)) {
858 		REG_WRITE(ah, AR_QMISC(q),
859 		    REG_READ(ah, AR_QMISC(q)) |
860 		    AR_Q_MISC_RDYTIME_EXP_POLICY);
861 
862 	}
863 
864 	if (qi->tqi_qflags & TXQ_FLAG_BACKOFF_DISABLE) {
865 		REG_WRITE(ah, AR_DMISC(q),
866 		    REG_READ(ah, AR_DMISC(q)) |
867 		    AR_D_MISC_POST_FR_BKOFF_DIS);
868 	}
869 	if (qi->tqi_qflags & TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE) {
870 		REG_WRITE(ah, AR_DMISC(q),
871 		    REG_READ(ah, AR_DMISC(q)) |
872 		    AR_D_MISC_FRAG_BKOFF_EN);
873 	}
874 	switch (qi->tqi_type) {
875 	case ATH9K_TX_QUEUE_BEACON:
876 		REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q)) |
877 		    AR_Q_MISC_FSP_DBA_GATED |
878 		    AR_Q_MISC_BEACON_USE |
879 		    AR_Q_MISC_CBR_INCR_DIS1);
880 
881 		REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q)) |
882 		    (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
883 		    AR_D_MISC_ARB_LOCKOUT_CNTRL_S) |
884 		    AR_D_MISC_BEACON_USE |
885 		    AR_D_MISC_POST_FR_BKOFF_DIS);
886 		break;
887 	case ATH9K_TX_QUEUE_CAB:
888 		REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q)) |
889 		    AR_Q_MISC_FSP_DBA_GATED |
890 		    AR_Q_MISC_CBR_INCR_DIS1 |
891 		    AR_Q_MISC_CBR_INCR_DIS0);
892 		value = (qi->tqi_readyTime -
893 		    (ah->ah_config.sw_beacon_response_time -
894 		    ah->ah_config.dma_beacon_response_time) -
895 		    ah->ah_config.additional_swba_backoff) * 1024;
896 		REG_WRITE(ah, AR_QRDYTIMECFG(q),
897 		    value | AR_Q_RDYTIMECFG_EN);
898 		REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q)) |
899 		    (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
900 		    AR_D_MISC_ARB_LOCKOUT_CNTRL_S));
901 		break;
902 	case ATH9K_TX_QUEUE_PSPOLL:
903 		REG_WRITE(ah, AR_QMISC(q),
904 		    REG_READ(ah, AR_QMISC(q)) | AR_Q_MISC_CBR_INCR_DIS1);
905 		break;
906 	case ATH9K_TX_QUEUE_UAPSD:
907 		REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q)) |
908 		    AR_D_MISC_POST_FR_BKOFF_DIS);
909 		break;
910 	default:
911 		break;
912 	}
913 
914 	if (qi->tqi_intFlags & ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS) {
915 		REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q)) |
916 		    SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
917 		    AR_D_MISC_ARB_LOCKOUT_CNTRL) |
918 		    AR_D_MISC_POST_FR_BKOFF_DIS);
919 	}
920 
921 	if (qi->tqi_qflags & TXQ_FLAG_TXOKINT_ENABLE)
922 		ahp->ah_txOkInterruptMask |= 1 << q;
923 	else
924 		ahp->ah_txOkInterruptMask &= ~(1 << q);
925 	if (qi->tqi_qflags & TXQ_FLAG_TXERRINT_ENABLE)
926 		ahp->ah_txErrInterruptMask |= 1 << q;
927 	else
928 		ahp->ah_txErrInterruptMask &= ~(1 << q);
929 	if (qi->tqi_qflags & TXQ_FLAG_TXDESCINT_ENABLE)
930 		ahp->ah_txDescInterruptMask |= 1 << q;
931 	else
932 		ahp->ah_txDescInterruptMask &= ~(1 << q);
933 	if (qi->tqi_qflags & TXQ_FLAG_TXEOLINT_ENABLE)
934 		ahp->ah_txEolInterruptMask |= 1 << q;
935 	else
936 		ahp->ah_txEolInterruptMask &= ~(1 << q);
937 	if (qi->tqi_qflags & TXQ_FLAG_TXURNINT_ENABLE)
938 		ahp->ah_txUrnInterruptMask |= 1 << q;
939 	else
940 		ahp->ah_txUrnInterruptMask &= ~(1 << q);
941 	ath9k_hw_set_txq_interrupts(ah, qi);
942 
943 	return (B_TRUE);
944 }
945 
946 /* ARGSUSED */
947 int
948 ath9k_hw_rxprocdesc(struct ath_hal *ah, struct ath_desc *ds,
949     uint32_t pa,
950     struct ath_desc *nds,
951     uint64_t tsf)
952 {
953 	struct ar5416_desc ads;
954 	struct ar5416_desc *adsp = AR5416DESC(ds);
955 	uint32_t phyerr;
956 
957 	if ((adsp->ds_rxstatus8 & AR_RxDone) == 0)
958 		return (EINPROGRESS);
959 
960 	ads.u.rx = adsp->u.rx;
961 
962 	ds->ds_rxstat.rs_status = 0;
963 	ds->ds_rxstat.rs_flags = 0;
964 
965 	ds->ds_rxstat.rs_datalen = ads.ds_rxstatus1 & AR_DataLen;
966 	ds->ds_rxstat.rs_tstamp = ads.AR_RcvTimestamp;
967 
968 	ds->ds_rxstat.rs_rssi = MS(ads.ds_rxstatus4, AR_RxRSSICombined);
969 	ds->ds_rxstat.rs_rssi_ctl0 = MS(ads.ds_rxstatus0, AR_RxRSSIAnt00);
970 	ds->ds_rxstat.rs_rssi_ctl1 = MS(ads.ds_rxstatus0, AR_RxRSSIAnt01);
971 	ds->ds_rxstat.rs_rssi_ctl2 = MS(ads.ds_rxstatus0, AR_RxRSSIAnt02);
972 	ds->ds_rxstat.rs_rssi_ext0 = MS(ads.ds_rxstatus4, AR_RxRSSIAnt10);
973 	ds->ds_rxstat.rs_rssi_ext1 = MS(ads.ds_rxstatus4, AR_RxRSSIAnt11);
974 	ds->ds_rxstat.rs_rssi_ext2 = MS(ads.ds_rxstatus4, AR_RxRSSIAnt12);
975 	if (ads.ds_rxstatus8 & AR_RxKeyIdxValid)
976 		ds->ds_rxstat.rs_keyix = MS(ads.ds_rxstatus8, AR_KeyIdx);
977 	else
978 		ds->ds_rxstat.rs_keyix = ATH9K_RXKEYIX_INVALID;
979 
980 	ds->ds_rxstat.rs_rate = RXSTATUS_RATE(ah, (&ads));
981 	ds->ds_rxstat.rs_more = (ads.ds_rxstatus1 & AR_RxMore) ? 1 : 0;
982 
983 	ds->ds_rxstat.rs_isaggr = (ads.ds_rxstatus8 & AR_RxAggr) ? 1 : 0;
984 	ds->ds_rxstat.rs_moreaggr =
985 	    (ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0;
986 	ds->ds_rxstat.rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna);
987 	ds->ds_rxstat.rs_flags =
988 	    (ads.ds_rxstatus3 & AR_GI) ? ATH9K_RX_GI : 0;
989 	ds->ds_rxstat.rs_flags |=
990 	    (ads.ds_rxstatus3 & AR_2040) ? ATH9K_RX_2040 : 0;
991 
992 	if (ads.ds_rxstatus8 & AR_PreDelimCRCErr)
993 		ds->ds_rxstat.rs_flags |= ATH9K_RX_DELIM_CRC_PRE;
994 	if (ads.ds_rxstatus8 & AR_PostDelimCRCErr)
995 		ds->ds_rxstat.rs_flags |= ATH9K_RX_DELIM_CRC_POST;
996 	if (ads.ds_rxstatus8 & AR_DecryptBusyErr)
997 		ds->ds_rxstat.rs_flags |= ATH9K_RX_DECRYPT_BUSY;
998 
999 	if ((ads.ds_rxstatus8 & AR_RxFrameOK) == 0) {
1000 		if (ads.ds_rxstatus8 & AR_CRCErr)
1001 			ds->ds_rxstat.rs_status |= ATH9K_RXERR_CRC;
1002 		else if (ads.ds_rxstatus8 & AR_PHYErr) {
1003 			ds->ds_rxstat.rs_status |= ATH9K_RXERR_PHY;
1004 			phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode);
1005 			ds->ds_rxstat.rs_phyerr = (uint8_t)phyerr; /* LINT */
1006 		} else if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
1007 			ds->ds_rxstat.rs_status |= ATH9K_RXERR_DECRYPT;
1008 		else if (ads.ds_rxstatus8 & AR_MichaelErr)
1009 			ds->ds_rxstat.rs_status |= ATH9K_RXERR_MIC;
1010 	}
1011 
1012 	return (0);
1013 }
1014 
1015 boolean_t
1016 ath9k_hw_setuprxdesc(struct ath_hal *ah, struct ath_desc *ds,
1017     uint32_t size, uint32_t flags)
1018 {
1019 	struct ar5416_desc *ads = AR5416DESC(ds);
1020 	struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
1021 
1022 	ads->ds_ctl1 = size & AR_BufLen;
1023 	if (flags & ATH9K_RXDESC_INTREQ)
1024 		ads->ds_ctl1 |= AR_RxIntrReq;
1025 
1026 	ads->ds_rxstatus8 &= ~AR_RxDone;
1027 	if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
1028 		(void) memset(&(ads->u), 0, sizeof (ads->u));
1029 
1030 	return (B_TRUE);
1031 }
1032 
1033 boolean_t
1034 ath9k_hw_setrxabort(struct ath_hal *ah, boolean_t set)
1035 {
1036 	uint32_t reg;
1037 
1038 	if (set) {
1039 		REG_SET_BIT(ah, AR_DIAG_SW,
1040 		    (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
1041 
1042 		if (!ath9k_hw_wait(ah, AR_OBS_BUS_1,
1043 		    AR_OBS_BUS_1_RX_STATE, 0)) {
1044 			REG_CLR_BIT(ah, AR_DIAG_SW,
1045 			    (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
1046 
1047 			reg = REG_READ(ah, AR_OBS_BUS_1);
1048 
1049 			ARN_DBG((ARN_DBG_FATAL,
1050 			    "%s: rx failed to go idle in 10 ms RXSM=0x%x\n",
1051 			    __func__, reg));
1052 
1053 			return (B_FALSE);
1054 		}
1055 	} else {
1056 		REG_CLR_BIT(ah, AR_DIAG_SW,
1057 		    (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
1058 	}
1059 
1060 	return (B_TRUE);
1061 }
1062 
1063 void
1064 ath9k_hw_putrxbuf(struct ath_hal *ah, uint32_t rxdp)
1065 {
1066 	REG_WRITE(ah, AR_RXDP, rxdp);
1067 }
1068 
1069 void
1070 ath9k_hw_rxena(struct ath_hal *ah)
1071 {
1072 	REG_WRITE(ah, AR_CR, AR_CR_RXE);
1073 }
1074 
1075 void
1076 ath9k_hw_startpcureceive(struct ath_hal *ah)
1077 {
1078 	ath9k_enable_mib_counters(ah);
1079 
1080 	ath9k_ani_reset(ah);
1081 
1082 	REG_CLR_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
1083 }
1084 
1085 void
1086 ath9k_hw_stoppcurecv(struct ath_hal *ah)
1087 {
1088 	REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS);
1089 
1090 	ath9k_hw_disable_mib_counters(ah);
1091 }
1092 
1093 boolean_t
1094 ath9k_hw_stopdmarecv(struct ath_hal *ah)
1095 {
1096 	REG_WRITE(ah, AR_CR, AR_CR_RXD);
1097 
1098 	if (!ath9k_hw_wait(ah, AR_CR, AR_CR_RXE, 0)) {
1099 		ARN_DBG((ARN_DBG_QUEUE, "arn: ath9k_hw_stopdmarecv(): "
1100 		    "dma failed to stop in 10ms\n"
1101 		    "AR_CR=0x%08x\nAR_DIAG_SW=0x%08x\n",
1102 		    REG_READ(ah, AR_CR), REG_READ(ah, AR_DIAG_SW)));
1103 		return (B_FALSE);
1104 	} else {
1105 		return (B_TRUE);
1106 	}
1107 }
1108