1 /* 2 * Copyright (c) 2008-2009 Atheros Communications Inc. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #include "ath9k.h" 18 19 static void ath9k_hw_set_txq_interrupts(struct ath_hw *ah, 20 struct ath9k_tx_queue_info *qi) 21 { 22 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, 23 "tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n", 24 ah->txok_interrupt_mask, ah->txerr_interrupt_mask, 25 ah->txdesc_interrupt_mask, ah->txeol_interrupt_mask, 26 ah->txurn_interrupt_mask); 27 28 REG_WRITE(ah, AR_IMR_S0, 29 SM(ah->txok_interrupt_mask, AR_IMR_S0_QCU_TXOK) 30 | SM(ah->txdesc_interrupt_mask, AR_IMR_S0_QCU_TXDESC)); 31 REG_WRITE(ah, AR_IMR_S1, 32 SM(ah->txerr_interrupt_mask, AR_IMR_S1_QCU_TXERR) 33 | SM(ah->txeol_interrupt_mask, AR_IMR_S1_QCU_TXEOL)); 34 REG_RMW_FIELD(ah, AR_IMR_S2, 35 AR_IMR_S2_QCU_TXURN, ah->txurn_interrupt_mask); 36 } 37 38 u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q) 39 { 40 return REG_READ(ah, AR_QTXDP(q)); 41 } 42 43 bool ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp) 44 { 45 REG_WRITE(ah, AR_QTXDP(q), txdp); 46 47 return true; 48 } 49 50 bool ath9k_hw_txstart(struct ath_hw *ah, u32 q) 51 { 52 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Enable TXE on queue: %u\n", q); 53 54 REG_WRITE(ah, AR_Q_TXE, 1 << q); 55 56 return true; 57 } 58 59 u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q) 60 { 61 u32 npend; 62 63 npend = REG_READ(ah, AR_QSTS(q)) & AR_Q_STS_PEND_FR_CNT; 64 if (npend == 0) { 65 66 if (REG_READ(ah, AR_Q_TXE) & (1 << q)) 67 npend = 1; 68 } 69 70 return npend; 71 } 72 73 bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel) 74 { 75 u32 txcfg, curLevel, newLevel; 76 enum ath9k_int omask; 77 78 if (ah->tx_trig_level >= MAX_TX_FIFO_THRESHOLD) 79 return false; 80 81 omask = ath9k_hw_set_interrupts(ah, ah->mask_reg & ~ATH9K_INT_GLOBAL); 82 83 txcfg = REG_READ(ah, AR_TXCFG); 84 curLevel = MS(txcfg, AR_FTRIG); 85 newLevel = curLevel; 86 if (bIncTrigLevel) { 87 if (curLevel < MAX_TX_FIFO_THRESHOLD) 88 newLevel++; 89 } else if (curLevel > MIN_TX_FIFO_THRESHOLD) 90 newLevel--; 91 if (newLevel != curLevel) 92 REG_WRITE(ah, AR_TXCFG, 93 (txcfg & ~AR_FTRIG) | SM(newLevel, AR_FTRIG)); 94 95 ath9k_hw_set_interrupts(ah, omask); 96 97 ah->tx_trig_level = newLevel; 98 99 return newLevel != curLevel; 100 } 101 102 bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q) 103 { 104 #define ATH9K_TX_STOP_DMA_TIMEOUT 4000 /* usec */ 105 #define ATH9K_TIME_QUANTUM 100 /* usec */ 106 107 struct ath9k_hw_capabilities *pCap = &ah->caps; 108 struct ath9k_tx_queue_info *qi; 109 u32 tsfLow, j, wait; 110 u32 wait_time = ATH9K_TX_STOP_DMA_TIMEOUT / ATH9K_TIME_QUANTUM; 111 112 if (q >= pCap->total_queues) { 113 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Stopping TX DMA, " 114 "invalid queue: %u\n", q); 115 return false; 116 } 117 118 qi = &ah->txq[q]; 119 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { 120 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Stopping TX DMA, " 121 "inactive queue: %u\n", q); 122 return false; 123 } 124 125 REG_WRITE(ah, AR_Q_TXD, 1 << q); 126 127 for (wait = wait_time; wait != 0; wait--) { 128 if (ath9k_hw_numtxpending(ah, q) == 0) 129 break; 130 udelay(ATH9K_TIME_QUANTUM); 131 } 132 133 if (ath9k_hw_numtxpending(ah, q)) { 134 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, 135 "%s: Num of pending TX Frames %d on Q %d\n", 136 __func__, ath9k_hw_numtxpending(ah, q), q); 137 138 for (j = 0; j < 2; j++) { 139 tsfLow = REG_READ(ah, AR_TSF_L32); 140 REG_WRITE(ah, AR_QUIET2, 141 SM(10, AR_QUIET2_QUIET_DUR)); 142 REG_WRITE(ah, AR_QUIET_PERIOD, 100); 143 REG_WRITE(ah, AR_NEXT_QUIET_TIMER, tsfLow >> 10); 144 REG_SET_BIT(ah, AR_TIMER_MODE, 145 AR_QUIET_TIMER_EN); 146 147 if ((REG_READ(ah, AR_TSF_L32) >> 10) == (tsfLow >> 10)) 148 break; 149 150 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, 151 "TSF has moved while trying to set " 152 "quiet time TSF: 0x%08x\n", tsfLow); 153 } 154 155 REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH); 156 157 udelay(200); 158 REG_CLR_BIT(ah, AR_TIMER_MODE, AR_QUIET_TIMER_EN); 159 160 wait = wait_time; 161 while (ath9k_hw_numtxpending(ah, q)) { 162 if ((--wait) == 0) { 163 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, 164 "Failed to stop TX DMA in 100 " 165 "msec after killing last frame\n"); 166 break; 167 } 168 udelay(ATH9K_TIME_QUANTUM); 169 } 170 171 REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH); 172 } 173 174 REG_WRITE(ah, AR_Q_TXD, 0); 175 return wait != 0; 176 177 #undef ATH9K_TX_STOP_DMA_TIMEOUT 178 #undef ATH9K_TIME_QUANTUM 179 } 180 181 bool ath9k_hw_filltxdesc(struct ath_hw *ah, struct ath_desc *ds, 182 u32 segLen, bool firstSeg, 183 bool lastSeg, const struct ath_desc *ds0) 184 { 185 struct ar5416_desc *ads = AR5416DESC(ds); 186 187 if (firstSeg) { 188 ads->ds_ctl1 |= segLen | (lastSeg ? 0 : AR_TxMore); 189 } else if (lastSeg) { 190 ads->ds_ctl0 = 0; 191 ads->ds_ctl1 = segLen; 192 ads->ds_ctl2 = AR5416DESC_CONST(ds0)->ds_ctl2; 193 ads->ds_ctl3 = AR5416DESC_CONST(ds0)->ds_ctl3; 194 } else { 195 ads->ds_ctl0 = 0; 196 ads->ds_ctl1 = segLen | AR_TxMore; 197 ads->ds_ctl2 = 0; 198 ads->ds_ctl3 = 0; 199 } 200 ads->ds_txstatus0 = ads->ds_txstatus1 = 0; 201 ads->ds_txstatus2 = ads->ds_txstatus3 = 0; 202 ads->ds_txstatus4 = ads->ds_txstatus5 = 0; 203 ads->ds_txstatus6 = ads->ds_txstatus7 = 0; 204 ads->ds_txstatus8 = ads->ds_txstatus9 = 0; 205 206 return true; 207 } 208 209 void ath9k_hw_cleartxdesc(struct ath_hw *ah, struct ath_desc *ds) 210 { 211 struct ar5416_desc *ads = AR5416DESC(ds); 212 213 ads->ds_txstatus0 = ads->ds_txstatus1 = 0; 214 ads->ds_txstatus2 = ads->ds_txstatus3 = 0; 215 ads->ds_txstatus4 = ads->ds_txstatus5 = 0; 216 ads->ds_txstatus6 = ads->ds_txstatus7 = 0; 217 ads->ds_txstatus8 = ads->ds_txstatus9 = 0; 218 } 219 220 int ath9k_hw_txprocdesc(struct ath_hw *ah, struct ath_desc *ds) 221 { 222 struct ar5416_desc *ads = AR5416DESC(ds); 223 224 if ((ads->ds_txstatus9 & AR_TxDone) == 0) 225 return -EINPROGRESS; 226 227 ds->ds_txstat.ts_seqnum = MS(ads->ds_txstatus9, AR_SeqNum); 228 ds->ds_txstat.ts_tstamp = ads->AR_SendTimestamp; 229 ds->ds_txstat.ts_status = 0; 230 ds->ds_txstat.ts_flags = 0; 231 232 if (ads->ds_txstatus1 & AR_ExcessiveRetries) 233 ds->ds_txstat.ts_status |= ATH9K_TXERR_XRETRY; 234 if (ads->ds_txstatus1 & AR_Filtered) 235 ds->ds_txstat.ts_status |= ATH9K_TXERR_FILT; 236 if (ads->ds_txstatus1 & AR_FIFOUnderrun) { 237 ds->ds_txstat.ts_status |= ATH9K_TXERR_FIFO; 238 ath9k_hw_updatetxtriglevel(ah, true); 239 } 240 if (ads->ds_txstatus9 & AR_TxOpExceeded) 241 ds->ds_txstat.ts_status |= ATH9K_TXERR_XTXOP; 242 if (ads->ds_txstatus1 & AR_TxTimerExpired) 243 ds->ds_txstat.ts_status |= ATH9K_TXERR_TIMER_EXPIRED; 244 245 if (ads->ds_txstatus1 & AR_DescCfgErr) 246 ds->ds_txstat.ts_flags |= ATH9K_TX_DESC_CFG_ERR; 247 if (ads->ds_txstatus1 & AR_TxDataUnderrun) { 248 ds->ds_txstat.ts_flags |= ATH9K_TX_DATA_UNDERRUN; 249 ath9k_hw_updatetxtriglevel(ah, true); 250 } 251 if (ads->ds_txstatus1 & AR_TxDelimUnderrun) { 252 ds->ds_txstat.ts_flags |= ATH9K_TX_DELIM_UNDERRUN; 253 ath9k_hw_updatetxtriglevel(ah, true); 254 } 255 if (ads->ds_txstatus0 & AR_TxBaStatus) { 256 ds->ds_txstat.ts_flags |= ATH9K_TX_BA; 257 ds->ds_txstat.ba_low = ads->AR_BaBitmapLow; 258 ds->ds_txstat.ba_high = ads->AR_BaBitmapHigh; 259 } 260 261 ds->ds_txstat.ts_rateindex = MS(ads->ds_txstatus9, AR_FinalTxIdx); 262 switch (ds->ds_txstat.ts_rateindex) { 263 case 0: 264 ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate0); 265 break; 266 case 1: 267 ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate1); 268 break; 269 case 2: 270 ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate2); 271 break; 272 case 3: 273 ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate3); 274 break; 275 } 276 277 ds->ds_txstat.ts_rssi = MS(ads->ds_txstatus5, AR_TxRSSICombined); 278 ds->ds_txstat.ts_rssi_ctl0 = MS(ads->ds_txstatus0, AR_TxRSSIAnt00); 279 ds->ds_txstat.ts_rssi_ctl1 = MS(ads->ds_txstatus0, AR_TxRSSIAnt01); 280 ds->ds_txstat.ts_rssi_ctl2 = MS(ads->ds_txstatus0, AR_TxRSSIAnt02); 281 ds->ds_txstat.ts_rssi_ext0 = MS(ads->ds_txstatus5, AR_TxRSSIAnt10); 282 ds->ds_txstat.ts_rssi_ext1 = MS(ads->ds_txstatus5, AR_TxRSSIAnt11); 283 ds->ds_txstat.ts_rssi_ext2 = MS(ads->ds_txstatus5, AR_TxRSSIAnt12); 284 ds->ds_txstat.evm0 = ads->AR_TxEVM0; 285 ds->ds_txstat.evm1 = ads->AR_TxEVM1; 286 ds->ds_txstat.evm2 = ads->AR_TxEVM2; 287 ds->ds_txstat.ts_shortretry = MS(ads->ds_txstatus1, AR_RTSFailCnt); 288 ds->ds_txstat.ts_longretry = MS(ads->ds_txstatus1, AR_DataFailCnt); 289 ds->ds_txstat.ts_virtcol = MS(ads->ds_txstatus1, AR_VirtRetryCnt); 290 ds->ds_txstat.ts_antenna = 0; 291 292 return 0; 293 } 294 295 void ath9k_hw_set11n_txdesc(struct ath_hw *ah, struct ath_desc *ds, 296 u32 pktLen, enum ath9k_pkt_type type, u32 txPower, 297 u32 keyIx, enum ath9k_key_type keyType, u32 flags) 298 { 299 struct ar5416_desc *ads = AR5416DESC(ds); 300 301 txPower += ah->txpower_indexoffset; 302 if (txPower > 63) 303 txPower = 63; 304 305 ads->ds_ctl0 = (pktLen & AR_FrameLen) 306 | (flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0) 307 | SM(txPower, AR_XmitPower) 308 | (flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0) 309 | (flags & ATH9K_TXDESC_CLRDMASK ? AR_ClrDestMask : 0) 310 | (flags & ATH9K_TXDESC_INTREQ ? AR_TxIntrReq : 0) 311 | (keyIx != ATH9K_TXKEYIX_INVALID ? AR_DestIdxValid : 0); 312 313 ads->ds_ctl1 = 314 (keyIx != ATH9K_TXKEYIX_INVALID ? SM(keyIx, AR_DestIdx) : 0) 315 | SM(type, AR_FrameType) 316 | (flags & ATH9K_TXDESC_NOACK ? AR_NoAck : 0) 317 | (flags & ATH9K_TXDESC_EXT_ONLY ? AR_ExtOnly : 0) 318 | (flags & ATH9K_TXDESC_EXT_AND_CTL ? AR_ExtAndCtl : 0); 319 320 ads->ds_ctl6 = SM(keyType, AR_EncrType); 321 322 if (AR_SREV_9285(ah)) { 323 ads->ds_ctl8 = 0; 324 ads->ds_ctl9 = 0; 325 ads->ds_ctl10 = 0; 326 ads->ds_ctl11 = 0; 327 } 328 } 329 330 void ath9k_hw_set11n_ratescenario(struct ath_hw *ah, struct ath_desc *ds, 331 struct ath_desc *lastds, 332 u32 durUpdateEn, u32 rtsctsRate, 333 u32 rtsctsDuration, 334 struct ath9k_11n_rate_series series[], 335 u32 nseries, u32 flags) 336 { 337 struct ar5416_desc *ads = AR5416DESC(ds); 338 struct ar5416_desc *last_ads = AR5416DESC(lastds); 339 u32 ds_ctl0; 340 341 if (flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA)) { 342 ds_ctl0 = ads->ds_ctl0; 343 344 if (flags & ATH9K_TXDESC_RTSENA) { 345 ds_ctl0 &= ~AR_CTSEnable; 346 ds_ctl0 |= AR_RTSEnable; 347 } else { 348 ds_ctl0 &= ~AR_RTSEnable; 349 ds_ctl0 |= AR_CTSEnable; 350 } 351 352 ads->ds_ctl0 = ds_ctl0; 353 } else { 354 ads->ds_ctl0 = 355 (ads->ds_ctl0 & ~(AR_RTSEnable | AR_CTSEnable)); 356 } 357 358 ads->ds_ctl2 = set11nTries(series, 0) 359 | set11nTries(series, 1) 360 | set11nTries(series, 2) 361 | set11nTries(series, 3) 362 | (durUpdateEn ? AR_DurUpdateEna : 0) 363 | SM(0, AR_BurstDur); 364 365 ads->ds_ctl3 = set11nRate(series, 0) 366 | set11nRate(series, 1) 367 | set11nRate(series, 2) 368 | set11nRate(series, 3); 369 370 ads->ds_ctl4 = set11nPktDurRTSCTS(series, 0) 371 | set11nPktDurRTSCTS(series, 1); 372 373 ads->ds_ctl5 = set11nPktDurRTSCTS(series, 2) 374 | set11nPktDurRTSCTS(series, 3); 375 376 ads->ds_ctl7 = set11nRateFlags(series, 0) 377 | set11nRateFlags(series, 1) 378 | set11nRateFlags(series, 2) 379 | set11nRateFlags(series, 3) 380 | SM(rtsctsRate, AR_RTSCTSRate); 381 last_ads->ds_ctl2 = ads->ds_ctl2; 382 last_ads->ds_ctl3 = ads->ds_ctl3; 383 } 384 385 void ath9k_hw_set11n_aggr_first(struct ath_hw *ah, struct ath_desc *ds, 386 u32 aggrLen) 387 { 388 struct ar5416_desc *ads = AR5416DESC(ds); 389 390 ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr); 391 ads->ds_ctl6 &= ~AR_AggrLen; 392 ads->ds_ctl6 |= SM(aggrLen, AR_AggrLen); 393 } 394 395 void ath9k_hw_set11n_aggr_middle(struct ath_hw *ah, struct ath_desc *ds, 396 u32 numDelims) 397 { 398 struct ar5416_desc *ads = AR5416DESC(ds); 399 unsigned int ctl6; 400 401 ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr); 402 403 ctl6 = ads->ds_ctl6; 404 ctl6 &= ~AR_PadDelim; 405 ctl6 |= SM(numDelims, AR_PadDelim); 406 ads->ds_ctl6 = ctl6; 407 } 408 409 void ath9k_hw_set11n_aggr_last(struct ath_hw *ah, struct ath_desc *ds) 410 { 411 struct ar5416_desc *ads = AR5416DESC(ds); 412 413 ads->ds_ctl1 |= AR_IsAggr; 414 ads->ds_ctl1 &= ~AR_MoreAggr; 415 ads->ds_ctl6 &= ~AR_PadDelim; 416 } 417 418 void ath9k_hw_clr11n_aggr(struct ath_hw *ah, struct ath_desc *ds) 419 { 420 struct ar5416_desc *ads = AR5416DESC(ds); 421 422 ads->ds_ctl1 &= (~AR_IsAggr & ~AR_MoreAggr); 423 } 424 425 void ath9k_hw_set11n_burstduration(struct ath_hw *ah, struct ath_desc *ds, 426 u32 burstDuration) 427 { 428 struct ar5416_desc *ads = AR5416DESC(ds); 429 430 ads->ds_ctl2 &= ~AR_BurstDur; 431 ads->ds_ctl2 |= SM(burstDuration, AR_BurstDur); 432 } 433 434 void ath9k_hw_set11n_virtualmorefrag(struct ath_hw *ah, struct ath_desc *ds, 435 u32 vmf) 436 { 437 struct ar5416_desc *ads = AR5416DESC(ds); 438 439 if (vmf) 440 ads->ds_ctl0 |= AR_VirtMoreFrag; 441 else 442 ads->ds_ctl0 &= ~AR_VirtMoreFrag; 443 } 444 445 void ath9k_hw_gettxintrtxqs(struct ath_hw *ah, u32 *txqs) 446 { 447 *txqs &= ah->intr_txqs; 448 ah->intr_txqs &= ~(*txqs); 449 } 450 451 bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q, 452 const struct ath9k_tx_queue_info *qinfo) 453 { 454 u32 cw; 455 struct ath9k_hw_capabilities *pCap = &ah->caps; 456 struct ath9k_tx_queue_info *qi; 457 458 if (q >= pCap->total_queues) { 459 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Set TXQ properties, " 460 "invalid queue: %u\n", q); 461 return false; 462 } 463 464 qi = &ah->txq[q]; 465 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { 466 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Set TXQ properties, " 467 "inactive queue: %u\n", q); 468 return false; 469 } 470 471 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Set queue properties for: %u\n", q); 472 473 qi->tqi_ver = qinfo->tqi_ver; 474 qi->tqi_subtype = qinfo->tqi_subtype; 475 qi->tqi_qflags = qinfo->tqi_qflags; 476 qi->tqi_priority = qinfo->tqi_priority; 477 if (qinfo->tqi_aifs != ATH9K_TXQ_USEDEFAULT) 478 qi->tqi_aifs = min(qinfo->tqi_aifs, 255U); 479 else 480 qi->tqi_aifs = INIT_AIFS; 481 if (qinfo->tqi_cwmin != ATH9K_TXQ_USEDEFAULT) { 482 cw = min(qinfo->tqi_cwmin, 1024U); 483 qi->tqi_cwmin = 1; 484 while (qi->tqi_cwmin < cw) 485 qi->tqi_cwmin = (qi->tqi_cwmin << 1) | 1; 486 } else 487 qi->tqi_cwmin = qinfo->tqi_cwmin; 488 if (qinfo->tqi_cwmax != ATH9K_TXQ_USEDEFAULT) { 489 cw = min(qinfo->tqi_cwmax, 1024U); 490 qi->tqi_cwmax = 1; 491 while (qi->tqi_cwmax < cw) 492 qi->tqi_cwmax = (qi->tqi_cwmax << 1) | 1; 493 } else 494 qi->tqi_cwmax = INIT_CWMAX; 495 496 if (qinfo->tqi_shretry != 0) 497 qi->tqi_shretry = min((u32) qinfo->tqi_shretry, 15U); 498 else 499 qi->tqi_shretry = INIT_SH_RETRY; 500 if (qinfo->tqi_lgretry != 0) 501 qi->tqi_lgretry = min((u32) qinfo->tqi_lgretry, 15U); 502 else 503 qi->tqi_lgretry = INIT_LG_RETRY; 504 qi->tqi_cbrPeriod = qinfo->tqi_cbrPeriod; 505 qi->tqi_cbrOverflowLimit = qinfo->tqi_cbrOverflowLimit; 506 qi->tqi_burstTime = qinfo->tqi_burstTime; 507 qi->tqi_readyTime = qinfo->tqi_readyTime; 508 509 switch (qinfo->tqi_subtype) { 510 case ATH9K_WME_UPSD: 511 if (qi->tqi_type == ATH9K_TX_QUEUE_DATA) 512 qi->tqi_intFlags = ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS; 513 break; 514 default: 515 break; 516 } 517 518 return true; 519 } 520 521 bool ath9k_hw_get_txq_props(struct ath_hw *ah, int q, 522 struct ath9k_tx_queue_info *qinfo) 523 { 524 struct ath9k_hw_capabilities *pCap = &ah->caps; 525 struct ath9k_tx_queue_info *qi; 526 527 if (q >= pCap->total_queues) { 528 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Get TXQ properties, " 529 "invalid queue: %u\n", q); 530 return false; 531 } 532 533 qi = &ah->txq[q]; 534 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { 535 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Get TXQ properties, " 536 "inactive queue: %u\n", q); 537 return false; 538 } 539 540 qinfo->tqi_qflags = qi->tqi_qflags; 541 qinfo->tqi_ver = qi->tqi_ver; 542 qinfo->tqi_subtype = qi->tqi_subtype; 543 qinfo->tqi_qflags = qi->tqi_qflags; 544 qinfo->tqi_priority = qi->tqi_priority; 545 qinfo->tqi_aifs = qi->tqi_aifs; 546 qinfo->tqi_cwmin = qi->tqi_cwmin; 547 qinfo->tqi_cwmax = qi->tqi_cwmax; 548 qinfo->tqi_shretry = qi->tqi_shretry; 549 qinfo->tqi_lgretry = qi->tqi_lgretry; 550 qinfo->tqi_cbrPeriod = qi->tqi_cbrPeriod; 551 qinfo->tqi_cbrOverflowLimit = qi->tqi_cbrOverflowLimit; 552 qinfo->tqi_burstTime = qi->tqi_burstTime; 553 qinfo->tqi_readyTime = qi->tqi_readyTime; 554 555 return true; 556 } 557 558 int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type, 559 const struct ath9k_tx_queue_info *qinfo) 560 { 561 struct ath9k_tx_queue_info *qi; 562 struct ath9k_hw_capabilities *pCap = &ah->caps; 563 int q; 564 565 switch (type) { 566 case ATH9K_TX_QUEUE_BEACON: 567 q = pCap->total_queues - 1; 568 break; 569 case ATH9K_TX_QUEUE_CAB: 570 q = pCap->total_queues - 2; 571 break; 572 case ATH9K_TX_QUEUE_PSPOLL: 573 q = 1; 574 break; 575 case ATH9K_TX_QUEUE_UAPSD: 576 q = pCap->total_queues - 3; 577 break; 578 case ATH9K_TX_QUEUE_DATA: 579 for (q = 0; q < pCap->total_queues; q++) 580 if (ah->txq[q].tqi_type == 581 ATH9K_TX_QUEUE_INACTIVE) 582 break; 583 if (q == pCap->total_queues) { 584 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 585 "No available TX queue\n"); 586 return -1; 587 } 588 break; 589 default: 590 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, "Invalid TX queue type: %u\n", 591 type); 592 return -1; 593 } 594 595 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Setup TX queue: %u\n", q); 596 597 qi = &ah->txq[q]; 598 if (qi->tqi_type != ATH9K_TX_QUEUE_INACTIVE) { 599 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 600 "TX queue: %u already active\n", q); 601 return -1; 602 } 603 memset(qi, 0, sizeof(struct ath9k_tx_queue_info)); 604 qi->tqi_type = type; 605 if (qinfo == NULL) { 606 qi->tqi_qflags = 607 TXQ_FLAG_TXOKINT_ENABLE 608 | TXQ_FLAG_TXERRINT_ENABLE 609 | TXQ_FLAG_TXDESCINT_ENABLE | TXQ_FLAG_TXURNINT_ENABLE; 610 qi->tqi_aifs = INIT_AIFS; 611 qi->tqi_cwmin = ATH9K_TXQ_USEDEFAULT; 612 qi->tqi_cwmax = INIT_CWMAX; 613 qi->tqi_shretry = INIT_SH_RETRY; 614 qi->tqi_lgretry = INIT_LG_RETRY; 615 qi->tqi_physCompBuf = 0; 616 } else { 617 qi->tqi_physCompBuf = qinfo->tqi_physCompBuf; 618 (void) ath9k_hw_set_txq_props(ah, q, qinfo); 619 } 620 621 return q; 622 } 623 624 bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q) 625 { 626 struct ath9k_hw_capabilities *pCap = &ah->caps; 627 struct ath9k_tx_queue_info *qi; 628 629 if (q >= pCap->total_queues) { 630 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Release TXQ, " 631 "invalid queue: %u\n", q); 632 return false; 633 } 634 qi = &ah->txq[q]; 635 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { 636 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Release TXQ, " 637 "inactive queue: %u\n", q); 638 return false; 639 } 640 641 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Release TX queue: %u\n", q); 642 643 qi->tqi_type = ATH9K_TX_QUEUE_INACTIVE; 644 ah->txok_interrupt_mask &= ~(1 << q); 645 ah->txerr_interrupt_mask &= ~(1 << q); 646 ah->txdesc_interrupt_mask &= ~(1 << q); 647 ah->txeol_interrupt_mask &= ~(1 << q); 648 ah->txurn_interrupt_mask &= ~(1 << q); 649 ath9k_hw_set_txq_interrupts(ah, qi); 650 651 return true; 652 } 653 654 bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q) 655 { 656 struct ath9k_hw_capabilities *pCap = &ah->caps; 657 struct ath9k_channel *chan = ah->curchan; 658 struct ath9k_tx_queue_info *qi; 659 u32 cwMin, chanCwMin, value; 660 661 if (q >= pCap->total_queues) { 662 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Reset TXQ, " 663 "invalid queue: %u\n", q); 664 return false; 665 } 666 667 qi = &ah->txq[q]; 668 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { 669 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Reset TXQ, " 670 "inactive queue: %u\n", q); 671 return true; 672 } 673 674 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Reset TX queue: %u\n", q); 675 676 if (qi->tqi_cwmin == ATH9K_TXQ_USEDEFAULT) { 677 if (chan && IS_CHAN_B(chan)) 678 chanCwMin = INIT_CWMIN_11B; 679 else 680 chanCwMin = INIT_CWMIN; 681 682 for (cwMin = 1; cwMin < chanCwMin; cwMin = (cwMin << 1) | 1); 683 } else 684 cwMin = qi->tqi_cwmin; 685 686 REG_WRITE(ah, AR_DLCL_IFS(q), 687 SM(cwMin, AR_D_LCL_IFS_CWMIN) | 688 SM(qi->tqi_cwmax, AR_D_LCL_IFS_CWMAX) | 689 SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS)); 690 691 REG_WRITE(ah, AR_DRETRY_LIMIT(q), 692 SM(INIT_SSH_RETRY, AR_D_RETRY_LIMIT_STA_SH) | 693 SM(INIT_SLG_RETRY, AR_D_RETRY_LIMIT_STA_LG) | 694 SM(qi->tqi_shretry, AR_D_RETRY_LIMIT_FR_SH)); 695 696 REG_WRITE(ah, AR_QMISC(q), AR_Q_MISC_DCU_EARLY_TERM_REQ); 697 REG_WRITE(ah, AR_DMISC(q), 698 AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x2); 699 700 if (qi->tqi_cbrPeriod) { 701 REG_WRITE(ah, AR_QCBRCFG(q), 702 SM(qi->tqi_cbrPeriod, AR_Q_CBRCFG_INTERVAL) | 703 SM(qi->tqi_cbrOverflowLimit, AR_Q_CBRCFG_OVF_THRESH)); 704 REG_WRITE(ah, AR_QMISC(q), 705 REG_READ(ah, AR_QMISC(q)) | AR_Q_MISC_FSP_CBR | 706 (qi->tqi_cbrOverflowLimit ? 707 AR_Q_MISC_CBR_EXP_CNTR_LIMIT_EN : 0)); 708 } 709 if (qi->tqi_readyTime && (qi->tqi_type != ATH9K_TX_QUEUE_CAB)) { 710 REG_WRITE(ah, AR_QRDYTIMECFG(q), 711 SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_DURATION) | 712 AR_Q_RDYTIMECFG_EN); 713 } 714 715 REG_WRITE(ah, AR_DCHNTIME(q), 716 SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR) | 717 (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0)); 718 719 if (qi->tqi_burstTime 720 && (qi->tqi_qflags & TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE)) { 721 REG_WRITE(ah, AR_QMISC(q), 722 REG_READ(ah, AR_QMISC(q)) | 723 AR_Q_MISC_RDYTIME_EXP_POLICY); 724 725 } 726 727 if (qi->tqi_qflags & TXQ_FLAG_BACKOFF_DISABLE) { 728 REG_WRITE(ah, AR_DMISC(q), 729 REG_READ(ah, AR_DMISC(q)) | 730 AR_D_MISC_POST_FR_BKOFF_DIS); 731 } 732 if (qi->tqi_qflags & TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE) { 733 REG_WRITE(ah, AR_DMISC(q), 734 REG_READ(ah, AR_DMISC(q)) | 735 AR_D_MISC_FRAG_BKOFF_EN); 736 } 737 switch (qi->tqi_type) { 738 case ATH9K_TX_QUEUE_BEACON: 739 REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q)) 740 | AR_Q_MISC_FSP_DBA_GATED 741 | AR_Q_MISC_BEACON_USE 742 | AR_Q_MISC_CBR_INCR_DIS1); 743 744 REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q)) 745 | (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL << 746 AR_D_MISC_ARB_LOCKOUT_CNTRL_S) 747 | AR_D_MISC_BEACON_USE 748 | AR_D_MISC_POST_FR_BKOFF_DIS); 749 break; 750 case ATH9K_TX_QUEUE_CAB: 751 REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q)) 752 | AR_Q_MISC_FSP_DBA_GATED 753 | AR_Q_MISC_CBR_INCR_DIS1 754 | AR_Q_MISC_CBR_INCR_DIS0); 755 value = (qi->tqi_readyTime - 756 (ah->config.sw_beacon_response_time - 757 ah->config.dma_beacon_response_time) - 758 ah->config.additional_swba_backoff) * 1024; 759 REG_WRITE(ah, AR_QRDYTIMECFG(q), 760 value | AR_Q_RDYTIMECFG_EN); 761 REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q)) 762 | (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL << 763 AR_D_MISC_ARB_LOCKOUT_CNTRL_S)); 764 break; 765 case ATH9K_TX_QUEUE_PSPOLL: 766 REG_WRITE(ah, AR_QMISC(q), 767 REG_READ(ah, AR_QMISC(q)) | AR_Q_MISC_CBR_INCR_DIS1); 768 break; 769 case ATH9K_TX_QUEUE_UAPSD: 770 REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q)) | 771 AR_D_MISC_POST_FR_BKOFF_DIS); 772 break; 773 default: 774 break; 775 } 776 777 if (qi->tqi_intFlags & ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS) { 778 REG_WRITE(ah, AR_DMISC(q), 779 REG_READ(ah, AR_DMISC(q)) | 780 SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL, 781 AR_D_MISC_ARB_LOCKOUT_CNTRL) | 782 AR_D_MISC_POST_FR_BKOFF_DIS); 783 } 784 785 if (qi->tqi_qflags & TXQ_FLAG_TXOKINT_ENABLE) 786 ah->txok_interrupt_mask |= 1 << q; 787 else 788 ah->txok_interrupt_mask &= ~(1 << q); 789 if (qi->tqi_qflags & TXQ_FLAG_TXERRINT_ENABLE) 790 ah->txerr_interrupt_mask |= 1 << q; 791 else 792 ah->txerr_interrupt_mask &= ~(1 << q); 793 if (qi->tqi_qflags & TXQ_FLAG_TXDESCINT_ENABLE) 794 ah->txdesc_interrupt_mask |= 1 << q; 795 else 796 ah->txdesc_interrupt_mask &= ~(1 << q); 797 if (qi->tqi_qflags & TXQ_FLAG_TXEOLINT_ENABLE) 798 ah->txeol_interrupt_mask |= 1 << q; 799 else 800 ah->txeol_interrupt_mask &= ~(1 << q); 801 if (qi->tqi_qflags & TXQ_FLAG_TXURNINT_ENABLE) 802 ah->txurn_interrupt_mask |= 1 << q; 803 else 804 ah->txurn_interrupt_mask &= ~(1 << q); 805 ath9k_hw_set_txq_interrupts(ah, qi); 806 807 return true; 808 } 809 810 int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds, 811 u32 pa, struct ath_desc *nds, u64 tsf) 812 { 813 struct ar5416_desc ads; 814 struct ar5416_desc *adsp = AR5416DESC(ds); 815 u32 phyerr; 816 817 if ((adsp->ds_rxstatus8 & AR_RxDone) == 0) 818 return -EINPROGRESS; 819 820 ads.u.rx = adsp->u.rx; 821 822 ds->ds_rxstat.rs_status = 0; 823 ds->ds_rxstat.rs_flags = 0; 824 825 ds->ds_rxstat.rs_datalen = ads.ds_rxstatus1 & AR_DataLen; 826 ds->ds_rxstat.rs_tstamp = ads.AR_RcvTimestamp; 827 828 ds->ds_rxstat.rs_rssi = MS(ads.ds_rxstatus4, AR_RxRSSICombined); 829 ds->ds_rxstat.rs_rssi_ctl0 = MS(ads.ds_rxstatus0, AR_RxRSSIAnt00); 830 ds->ds_rxstat.rs_rssi_ctl1 = MS(ads.ds_rxstatus0, AR_RxRSSIAnt01); 831 ds->ds_rxstat.rs_rssi_ctl2 = MS(ads.ds_rxstatus0, AR_RxRSSIAnt02); 832 ds->ds_rxstat.rs_rssi_ext0 = MS(ads.ds_rxstatus4, AR_RxRSSIAnt10); 833 ds->ds_rxstat.rs_rssi_ext1 = MS(ads.ds_rxstatus4, AR_RxRSSIAnt11); 834 ds->ds_rxstat.rs_rssi_ext2 = MS(ads.ds_rxstatus4, AR_RxRSSIAnt12); 835 if (ads.ds_rxstatus8 & AR_RxKeyIdxValid) 836 ds->ds_rxstat.rs_keyix = MS(ads.ds_rxstatus8, AR_KeyIdx); 837 else 838 ds->ds_rxstat.rs_keyix = ATH9K_RXKEYIX_INVALID; 839 840 ds->ds_rxstat.rs_rate = RXSTATUS_RATE(ah, (&ads)); 841 ds->ds_rxstat.rs_more = (ads.ds_rxstatus1 & AR_RxMore) ? 1 : 0; 842 843 ds->ds_rxstat.rs_isaggr = (ads.ds_rxstatus8 & AR_RxAggr) ? 1 : 0; 844 ds->ds_rxstat.rs_moreaggr = 845 (ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0; 846 ds->ds_rxstat.rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna); 847 ds->ds_rxstat.rs_flags = 848 (ads.ds_rxstatus3 & AR_GI) ? ATH9K_RX_GI : 0; 849 ds->ds_rxstat.rs_flags |= 850 (ads.ds_rxstatus3 & AR_2040) ? ATH9K_RX_2040 : 0; 851 852 if (ads.ds_rxstatus8 & AR_PreDelimCRCErr) 853 ds->ds_rxstat.rs_flags |= ATH9K_RX_DELIM_CRC_PRE; 854 if (ads.ds_rxstatus8 & AR_PostDelimCRCErr) 855 ds->ds_rxstat.rs_flags |= ATH9K_RX_DELIM_CRC_POST; 856 if (ads.ds_rxstatus8 & AR_DecryptBusyErr) 857 ds->ds_rxstat.rs_flags |= ATH9K_RX_DECRYPT_BUSY; 858 859 if ((ads.ds_rxstatus8 & AR_RxFrameOK) == 0) { 860 if (ads.ds_rxstatus8 & AR_CRCErr) 861 ds->ds_rxstat.rs_status |= ATH9K_RXERR_CRC; 862 else if (ads.ds_rxstatus8 & AR_PHYErr) { 863 ds->ds_rxstat.rs_status |= ATH9K_RXERR_PHY; 864 phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode); 865 ds->ds_rxstat.rs_phyerr = phyerr; 866 } else if (ads.ds_rxstatus8 & AR_DecryptCRCErr) 867 ds->ds_rxstat.rs_status |= ATH9K_RXERR_DECRYPT; 868 else if (ads.ds_rxstatus8 & AR_MichaelErr) 869 ds->ds_rxstat.rs_status |= ATH9K_RXERR_MIC; 870 } 871 872 return 0; 873 } 874 875 bool ath9k_hw_setuprxdesc(struct ath_hw *ah, struct ath_desc *ds, 876 u32 size, u32 flags) 877 { 878 struct ar5416_desc *ads = AR5416DESC(ds); 879 struct ath9k_hw_capabilities *pCap = &ah->caps; 880 881 ads->ds_ctl1 = size & AR_BufLen; 882 if (flags & ATH9K_RXDESC_INTREQ) 883 ads->ds_ctl1 |= AR_RxIntrReq; 884 885 ads->ds_rxstatus8 &= ~AR_RxDone; 886 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) 887 memset(&(ads->u), 0, sizeof(ads->u)); 888 889 return true; 890 } 891 892 bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set) 893 { 894 u32 reg; 895 896 if (set) { 897 REG_SET_BIT(ah, AR_DIAG_SW, 898 (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT)); 899 900 if (!ath9k_hw_wait(ah, AR_OBS_BUS_1, AR_OBS_BUS_1_RX_STATE, 901 0, AH_WAIT_TIMEOUT)) { 902 REG_CLR_BIT(ah, AR_DIAG_SW, 903 (AR_DIAG_RX_DIS | 904 AR_DIAG_RX_ABORT)); 905 906 reg = REG_READ(ah, AR_OBS_BUS_1); 907 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 908 "RX failed to go idle in 10 ms RXSM=0x%x\n", reg); 909 910 return false; 911 } 912 } else { 913 REG_CLR_BIT(ah, AR_DIAG_SW, 914 (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT)); 915 } 916 917 return true; 918 } 919 920 void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp) 921 { 922 REG_WRITE(ah, AR_RXDP, rxdp); 923 } 924 925 void ath9k_hw_rxena(struct ath_hw *ah) 926 { 927 REG_WRITE(ah, AR_CR, AR_CR_RXE); 928 } 929 930 void ath9k_hw_startpcureceive(struct ath_hw *ah) 931 { 932 ath9k_enable_mib_counters(ah); 933 934 ath9k_ani_reset(ah); 935 936 REG_CLR_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT)); 937 } 938 939 void ath9k_hw_stoppcurecv(struct ath_hw *ah) 940 { 941 REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS); 942 943 ath9k_hw_disable_mib_counters(ah); 944 } 945 946 bool ath9k_hw_stopdmarecv(struct ath_hw *ah) 947 { 948 #define AH_RX_STOP_DMA_TIMEOUT 10000 /* usec */ 949 #define AH_RX_TIME_QUANTUM 100 /* usec */ 950 951 int i; 952 953 REG_WRITE(ah, AR_CR, AR_CR_RXD); 954 955 /* Wait for rx enable bit to go low */ 956 for (i = AH_RX_STOP_DMA_TIMEOUT / AH_TIME_QUANTUM; i != 0; i--) { 957 if ((REG_READ(ah, AR_CR) & AR_CR_RXE) == 0) 958 break; 959 udelay(AH_TIME_QUANTUM); 960 } 961 962 if (i == 0) { 963 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 964 "DMA failed to stop in %d ms " 965 "AR_CR=0x%08x AR_DIAG_SW=0x%08x\n", 966 AH_RX_STOP_DMA_TIMEOUT / 1000, 967 REG_READ(ah, AR_CR), 968 REG_READ(ah, AR_DIAG_SW)); 969 return false; 970 } else { 971 return true; 972 } 973 974 #undef AH_RX_TIME_QUANTUM 975 #undef AH_RX_STOP_DMA_TIMEOUT 976 } 977