1 /* 2 * Copyright (c) 2008-2009 Atheros Communications Inc. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #include "hw.h" 18 #include "hw-ops.h" 19 20 static void ath9k_hw_set_txq_interrupts(struct ath_hw *ah, 21 struct ath9k_tx_queue_info *qi) 22 { 23 ath_print(ath9k_hw_common(ah), ATH_DBG_INTERRUPT, 24 "tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n", 25 ah->txok_interrupt_mask, ah->txerr_interrupt_mask, 26 ah->txdesc_interrupt_mask, ah->txeol_interrupt_mask, 27 ah->txurn_interrupt_mask); 28 29 ENABLE_REGWRITE_BUFFER(ah); 30 31 REG_WRITE(ah, AR_IMR_S0, 32 SM(ah->txok_interrupt_mask, AR_IMR_S0_QCU_TXOK) 33 | SM(ah->txdesc_interrupt_mask, AR_IMR_S0_QCU_TXDESC)); 34 REG_WRITE(ah, AR_IMR_S1, 35 SM(ah->txerr_interrupt_mask, AR_IMR_S1_QCU_TXERR) 36 | SM(ah->txeol_interrupt_mask, AR_IMR_S1_QCU_TXEOL)); 37 38 ah->imrs2_reg &= ~AR_IMR_S2_QCU_TXURN; 39 ah->imrs2_reg |= (ah->txurn_interrupt_mask & AR_IMR_S2_QCU_TXURN); 40 REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg); 41 42 REGWRITE_BUFFER_FLUSH(ah); 43 DISABLE_REGWRITE_BUFFER(ah); 44 } 45 46 u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q) 47 { 48 return REG_READ(ah, AR_QTXDP(q)); 49 } 50 EXPORT_SYMBOL(ath9k_hw_gettxbuf); 51 52 void ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp) 53 { 54 REG_WRITE(ah, AR_QTXDP(q), txdp); 55 } 56 EXPORT_SYMBOL(ath9k_hw_puttxbuf); 57 58 void ath9k_hw_txstart(struct ath_hw *ah, u32 q) 59 { 60 ath_print(ath9k_hw_common(ah), ATH_DBG_QUEUE, 61 "Enable TXE on queue: %u\n", q); 62 REG_WRITE(ah, AR_Q_TXE, 1 << q); 63 } 64 EXPORT_SYMBOL(ath9k_hw_txstart); 65 66 void ath9k_hw_cleartxdesc(struct ath_hw *ah, void *ds) 67 { 68 struct ar5416_desc *ads = AR5416DESC(ds); 69 70 ads->ds_txstatus0 = ads->ds_txstatus1 = 0; 71 ads->ds_txstatus2 = ads->ds_txstatus3 = 0; 72 ads->ds_txstatus4 = ads->ds_txstatus5 = 0; 73 ads->ds_txstatus6 = ads->ds_txstatus7 = 0; 74 ads->ds_txstatus8 = ads->ds_txstatus9 = 0; 75 } 76 EXPORT_SYMBOL(ath9k_hw_cleartxdesc); 77 78 u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q) 79 { 80 u32 npend; 81 82 npend = REG_READ(ah, AR_QSTS(q)) & AR_Q_STS_PEND_FR_CNT; 83 if (npend == 0) { 84 85 if (REG_READ(ah, AR_Q_TXE) & (1 << q)) 86 npend = 1; 87 } 88 89 return npend; 90 } 91 EXPORT_SYMBOL(ath9k_hw_numtxpending); 92 93 /** 94 * ath9k_hw_updatetxtriglevel - adjusts the frame trigger level 95 * 96 * @ah: atheros hardware struct 97 * @bIncTrigLevel: whether or not the frame trigger level should be updated 98 * 99 * The frame trigger level specifies the minimum number of bytes, 100 * in units of 64 bytes, that must be DMA'ed into the PCU TX FIFO 101 * before the PCU will initiate sending the frame on the air. This can 102 * mean we initiate transmit before a full frame is on the PCU TX FIFO. 103 * Resets to 0x1 (meaning 64 bytes or a full frame, whichever occurs 104 * first) 105 * 106 * Caution must be taken to ensure to set the frame trigger level based 107 * on the DMA request size. For example if the DMA request size is set to 108 * 128 bytes the trigger level cannot exceed 6 * 64 = 384. This is because 109 * there need to be enough space in the tx FIFO for the requested transfer 110 * size. Hence the tx FIFO will stop with 512 - 128 = 384 bytes. If we set 111 * the threshold to a value beyond 6, then the transmit will hang. 112 * 113 * Current dual stream devices have a PCU TX FIFO size of 8 KB. 114 * Current single stream devices have a PCU TX FIFO size of 4 KB, however, 115 * there is a hardware issue which forces us to use 2 KB instead so the 116 * frame trigger level must not exceed 2 KB for these chipsets. 117 */ 118 bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel) 119 { 120 u32 txcfg, curLevel, newLevel; 121 enum ath9k_int omask; 122 123 if (ah->tx_trig_level >= ah->config.max_txtrig_level) 124 return false; 125 126 omask = ath9k_hw_set_interrupts(ah, ah->imask & ~ATH9K_INT_GLOBAL); 127 128 txcfg = REG_READ(ah, AR_TXCFG); 129 curLevel = MS(txcfg, AR_FTRIG); 130 newLevel = curLevel; 131 if (bIncTrigLevel) { 132 if (curLevel < ah->config.max_txtrig_level) 133 newLevel++; 134 } else if (curLevel > MIN_TX_FIFO_THRESHOLD) 135 newLevel--; 136 if (newLevel != curLevel) 137 REG_WRITE(ah, AR_TXCFG, 138 (txcfg & ~AR_FTRIG) | SM(newLevel, AR_FTRIG)); 139 140 ath9k_hw_set_interrupts(ah, omask); 141 142 ah->tx_trig_level = newLevel; 143 144 return newLevel != curLevel; 145 } 146 EXPORT_SYMBOL(ath9k_hw_updatetxtriglevel); 147 148 bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q) 149 { 150 #define ATH9K_TX_STOP_DMA_TIMEOUT 4000 /* usec */ 151 #define ATH9K_TIME_QUANTUM 100 /* usec */ 152 struct ath_common *common = ath9k_hw_common(ah); 153 struct ath9k_hw_capabilities *pCap = &ah->caps; 154 struct ath9k_tx_queue_info *qi; 155 u32 tsfLow, j, wait; 156 u32 wait_time = ATH9K_TX_STOP_DMA_TIMEOUT / ATH9K_TIME_QUANTUM; 157 158 if (q >= pCap->total_queues) { 159 ath_print(common, ATH_DBG_QUEUE, "Stopping TX DMA, " 160 "invalid queue: %u\n", q); 161 return false; 162 } 163 164 qi = &ah->txq[q]; 165 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { 166 ath_print(common, ATH_DBG_QUEUE, "Stopping TX DMA, " 167 "inactive queue: %u\n", q); 168 return false; 169 } 170 171 REG_WRITE(ah, AR_Q_TXD, 1 << q); 172 173 for (wait = wait_time; wait != 0; wait--) { 174 if (ath9k_hw_numtxpending(ah, q) == 0) 175 break; 176 udelay(ATH9K_TIME_QUANTUM); 177 } 178 179 if (ath9k_hw_numtxpending(ah, q)) { 180 ath_print(common, ATH_DBG_QUEUE, 181 "%s: Num of pending TX Frames %d on Q %d\n", 182 __func__, ath9k_hw_numtxpending(ah, q), q); 183 184 for (j = 0; j < 2; j++) { 185 tsfLow = REG_READ(ah, AR_TSF_L32); 186 REG_WRITE(ah, AR_QUIET2, 187 SM(10, AR_QUIET2_QUIET_DUR)); 188 REG_WRITE(ah, AR_QUIET_PERIOD, 100); 189 REG_WRITE(ah, AR_NEXT_QUIET_TIMER, tsfLow >> 10); 190 REG_SET_BIT(ah, AR_TIMER_MODE, 191 AR_QUIET_TIMER_EN); 192 193 if ((REG_READ(ah, AR_TSF_L32) >> 10) == (tsfLow >> 10)) 194 break; 195 196 ath_print(common, ATH_DBG_QUEUE, 197 "TSF has moved while trying to set " 198 "quiet time TSF: 0x%08x\n", tsfLow); 199 } 200 201 REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH); 202 203 udelay(200); 204 REG_CLR_BIT(ah, AR_TIMER_MODE, AR_QUIET_TIMER_EN); 205 206 wait = wait_time; 207 while (ath9k_hw_numtxpending(ah, q)) { 208 if ((--wait) == 0) { 209 ath_print(common, ATH_DBG_FATAL, 210 "Failed to stop TX DMA in 100 " 211 "msec after killing last frame\n"); 212 break; 213 } 214 udelay(ATH9K_TIME_QUANTUM); 215 } 216 217 REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH); 218 } 219 220 REG_WRITE(ah, AR_Q_TXD, 0); 221 return wait != 0; 222 223 #undef ATH9K_TX_STOP_DMA_TIMEOUT 224 #undef ATH9K_TIME_QUANTUM 225 } 226 EXPORT_SYMBOL(ath9k_hw_stoptxdma); 227 228 void ath9k_hw_gettxintrtxqs(struct ath_hw *ah, u32 *txqs) 229 { 230 *txqs &= ah->intr_txqs; 231 ah->intr_txqs &= ~(*txqs); 232 } 233 EXPORT_SYMBOL(ath9k_hw_gettxintrtxqs); 234 235 bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q, 236 const struct ath9k_tx_queue_info *qinfo) 237 { 238 u32 cw; 239 struct ath_common *common = ath9k_hw_common(ah); 240 struct ath9k_hw_capabilities *pCap = &ah->caps; 241 struct ath9k_tx_queue_info *qi; 242 243 if (q >= pCap->total_queues) { 244 ath_print(common, ATH_DBG_QUEUE, "Set TXQ properties, " 245 "invalid queue: %u\n", q); 246 return false; 247 } 248 249 qi = &ah->txq[q]; 250 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { 251 ath_print(common, ATH_DBG_QUEUE, "Set TXQ properties, " 252 "inactive queue: %u\n", q); 253 return false; 254 } 255 256 ath_print(common, ATH_DBG_QUEUE, "Set queue properties for: %u\n", q); 257 258 qi->tqi_ver = qinfo->tqi_ver; 259 qi->tqi_subtype = qinfo->tqi_subtype; 260 qi->tqi_qflags = qinfo->tqi_qflags; 261 qi->tqi_priority = qinfo->tqi_priority; 262 if (qinfo->tqi_aifs != ATH9K_TXQ_USEDEFAULT) 263 qi->tqi_aifs = min(qinfo->tqi_aifs, 255U); 264 else 265 qi->tqi_aifs = INIT_AIFS; 266 if (qinfo->tqi_cwmin != ATH9K_TXQ_USEDEFAULT) { 267 cw = min(qinfo->tqi_cwmin, 1024U); 268 qi->tqi_cwmin = 1; 269 while (qi->tqi_cwmin < cw) 270 qi->tqi_cwmin = (qi->tqi_cwmin << 1) | 1; 271 } else 272 qi->tqi_cwmin = qinfo->tqi_cwmin; 273 if (qinfo->tqi_cwmax != ATH9K_TXQ_USEDEFAULT) { 274 cw = min(qinfo->tqi_cwmax, 1024U); 275 qi->tqi_cwmax = 1; 276 while (qi->tqi_cwmax < cw) 277 qi->tqi_cwmax = (qi->tqi_cwmax << 1) | 1; 278 } else 279 qi->tqi_cwmax = INIT_CWMAX; 280 281 if (qinfo->tqi_shretry != 0) 282 qi->tqi_shretry = min((u32) qinfo->tqi_shretry, 15U); 283 else 284 qi->tqi_shretry = INIT_SH_RETRY; 285 if (qinfo->tqi_lgretry != 0) 286 qi->tqi_lgretry = min((u32) qinfo->tqi_lgretry, 15U); 287 else 288 qi->tqi_lgretry = INIT_LG_RETRY; 289 qi->tqi_cbrPeriod = qinfo->tqi_cbrPeriod; 290 qi->tqi_cbrOverflowLimit = qinfo->tqi_cbrOverflowLimit; 291 qi->tqi_burstTime = qinfo->tqi_burstTime; 292 qi->tqi_readyTime = qinfo->tqi_readyTime; 293 294 switch (qinfo->tqi_subtype) { 295 case ATH9K_WME_UPSD: 296 if (qi->tqi_type == ATH9K_TX_QUEUE_DATA) 297 qi->tqi_intFlags = ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS; 298 break; 299 default: 300 break; 301 } 302 303 return true; 304 } 305 EXPORT_SYMBOL(ath9k_hw_set_txq_props); 306 307 bool ath9k_hw_get_txq_props(struct ath_hw *ah, int q, 308 struct ath9k_tx_queue_info *qinfo) 309 { 310 struct ath_common *common = ath9k_hw_common(ah); 311 struct ath9k_hw_capabilities *pCap = &ah->caps; 312 struct ath9k_tx_queue_info *qi; 313 314 if (q >= pCap->total_queues) { 315 ath_print(common, ATH_DBG_QUEUE, "Get TXQ properties, " 316 "invalid queue: %u\n", q); 317 return false; 318 } 319 320 qi = &ah->txq[q]; 321 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { 322 ath_print(common, ATH_DBG_QUEUE, "Get TXQ properties, " 323 "inactive queue: %u\n", q); 324 return false; 325 } 326 327 qinfo->tqi_qflags = qi->tqi_qflags; 328 qinfo->tqi_ver = qi->tqi_ver; 329 qinfo->tqi_subtype = qi->tqi_subtype; 330 qinfo->tqi_qflags = qi->tqi_qflags; 331 qinfo->tqi_priority = qi->tqi_priority; 332 qinfo->tqi_aifs = qi->tqi_aifs; 333 qinfo->tqi_cwmin = qi->tqi_cwmin; 334 qinfo->tqi_cwmax = qi->tqi_cwmax; 335 qinfo->tqi_shretry = qi->tqi_shretry; 336 qinfo->tqi_lgretry = qi->tqi_lgretry; 337 qinfo->tqi_cbrPeriod = qi->tqi_cbrPeriod; 338 qinfo->tqi_cbrOverflowLimit = qi->tqi_cbrOverflowLimit; 339 qinfo->tqi_burstTime = qi->tqi_burstTime; 340 qinfo->tqi_readyTime = qi->tqi_readyTime; 341 342 return true; 343 } 344 EXPORT_SYMBOL(ath9k_hw_get_txq_props); 345 346 int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type, 347 const struct ath9k_tx_queue_info *qinfo) 348 { 349 struct ath_common *common = ath9k_hw_common(ah); 350 struct ath9k_tx_queue_info *qi; 351 struct ath9k_hw_capabilities *pCap = &ah->caps; 352 int q; 353 354 switch (type) { 355 case ATH9K_TX_QUEUE_BEACON: 356 q = pCap->total_queues - 1; 357 break; 358 case ATH9K_TX_QUEUE_CAB: 359 q = pCap->total_queues - 2; 360 break; 361 case ATH9K_TX_QUEUE_PSPOLL: 362 q = 1; 363 break; 364 case ATH9K_TX_QUEUE_UAPSD: 365 q = pCap->total_queues - 3; 366 break; 367 case ATH9K_TX_QUEUE_DATA: 368 for (q = 0; q < pCap->total_queues; q++) 369 if (ah->txq[q].tqi_type == 370 ATH9K_TX_QUEUE_INACTIVE) 371 break; 372 if (q == pCap->total_queues) { 373 ath_print(common, ATH_DBG_FATAL, 374 "No available TX queue\n"); 375 return -1; 376 } 377 break; 378 default: 379 ath_print(common, ATH_DBG_FATAL, 380 "Invalid TX queue type: %u\n", type); 381 return -1; 382 } 383 384 ath_print(common, ATH_DBG_QUEUE, "Setup TX queue: %u\n", q); 385 386 qi = &ah->txq[q]; 387 if (qi->tqi_type != ATH9K_TX_QUEUE_INACTIVE) { 388 ath_print(common, ATH_DBG_FATAL, 389 "TX queue: %u already active\n", q); 390 return -1; 391 } 392 memset(qi, 0, sizeof(struct ath9k_tx_queue_info)); 393 qi->tqi_type = type; 394 if (qinfo == NULL) { 395 qi->tqi_qflags = 396 TXQ_FLAG_TXOKINT_ENABLE 397 | TXQ_FLAG_TXERRINT_ENABLE 398 | TXQ_FLAG_TXDESCINT_ENABLE | TXQ_FLAG_TXURNINT_ENABLE; 399 qi->tqi_aifs = INIT_AIFS; 400 qi->tqi_cwmin = ATH9K_TXQ_USEDEFAULT; 401 qi->tqi_cwmax = INIT_CWMAX; 402 qi->tqi_shretry = INIT_SH_RETRY; 403 qi->tqi_lgretry = INIT_LG_RETRY; 404 qi->tqi_physCompBuf = 0; 405 } else { 406 qi->tqi_physCompBuf = qinfo->tqi_physCompBuf; 407 (void) ath9k_hw_set_txq_props(ah, q, qinfo); 408 } 409 410 return q; 411 } 412 EXPORT_SYMBOL(ath9k_hw_setuptxqueue); 413 414 bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q) 415 { 416 struct ath9k_hw_capabilities *pCap = &ah->caps; 417 struct ath_common *common = ath9k_hw_common(ah); 418 struct ath9k_tx_queue_info *qi; 419 420 if (q >= pCap->total_queues) { 421 ath_print(common, ATH_DBG_QUEUE, "Release TXQ, " 422 "invalid queue: %u\n", q); 423 return false; 424 } 425 qi = &ah->txq[q]; 426 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { 427 ath_print(common, ATH_DBG_QUEUE, "Release TXQ, " 428 "inactive queue: %u\n", q); 429 return false; 430 } 431 432 ath_print(common, ATH_DBG_QUEUE, "Release TX queue: %u\n", q); 433 434 qi->tqi_type = ATH9K_TX_QUEUE_INACTIVE; 435 ah->txok_interrupt_mask &= ~(1 << q); 436 ah->txerr_interrupt_mask &= ~(1 << q); 437 ah->txdesc_interrupt_mask &= ~(1 << q); 438 ah->txeol_interrupt_mask &= ~(1 << q); 439 ah->txurn_interrupt_mask &= ~(1 << q); 440 ath9k_hw_set_txq_interrupts(ah, qi); 441 442 return true; 443 } 444 EXPORT_SYMBOL(ath9k_hw_releasetxqueue); 445 446 bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q) 447 { 448 struct ath9k_hw_capabilities *pCap = &ah->caps; 449 struct ath_common *common = ath9k_hw_common(ah); 450 struct ath9k_channel *chan = ah->curchan; 451 struct ath9k_tx_queue_info *qi; 452 u32 cwMin, chanCwMin, value; 453 454 if (q >= pCap->total_queues) { 455 ath_print(common, ATH_DBG_QUEUE, "Reset TXQ, " 456 "invalid queue: %u\n", q); 457 return false; 458 } 459 460 qi = &ah->txq[q]; 461 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { 462 ath_print(common, ATH_DBG_QUEUE, "Reset TXQ, " 463 "inactive queue: %u\n", q); 464 return true; 465 } 466 467 ath_print(common, ATH_DBG_QUEUE, "Reset TX queue: %u\n", q); 468 469 if (qi->tqi_cwmin == ATH9K_TXQ_USEDEFAULT) { 470 if (chan && IS_CHAN_B(chan)) 471 chanCwMin = INIT_CWMIN_11B; 472 else 473 chanCwMin = INIT_CWMIN; 474 475 for (cwMin = 1; cwMin < chanCwMin; cwMin = (cwMin << 1) | 1); 476 } else 477 cwMin = qi->tqi_cwmin; 478 479 ENABLE_REGWRITE_BUFFER(ah); 480 481 REG_WRITE(ah, AR_DLCL_IFS(q), 482 SM(cwMin, AR_D_LCL_IFS_CWMIN) | 483 SM(qi->tqi_cwmax, AR_D_LCL_IFS_CWMAX) | 484 SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS)); 485 486 REG_WRITE(ah, AR_DRETRY_LIMIT(q), 487 SM(INIT_SSH_RETRY, AR_D_RETRY_LIMIT_STA_SH) | 488 SM(INIT_SLG_RETRY, AR_D_RETRY_LIMIT_STA_LG) | 489 SM(qi->tqi_shretry, AR_D_RETRY_LIMIT_FR_SH)); 490 491 REG_WRITE(ah, AR_QMISC(q), AR_Q_MISC_DCU_EARLY_TERM_REQ); 492 REG_WRITE(ah, AR_DMISC(q), 493 AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x2); 494 495 REGWRITE_BUFFER_FLUSH(ah); 496 497 if (qi->tqi_cbrPeriod) { 498 REG_WRITE(ah, AR_QCBRCFG(q), 499 SM(qi->tqi_cbrPeriod, AR_Q_CBRCFG_INTERVAL) | 500 SM(qi->tqi_cbrOverflowLimit, AR_Q_CBRCFG_OVF_THRESH)); 501 REG_WRITE(ah, AR_QMISC(q), 502 REG_READ(ah, AR_QMISC(q)) | AR_Q_MISC_FSP_CBR | 503 (qi->tqi_cbrOverflowLimit ? 504 AR_Q_MISC_CBR_EXP_CNTR_LIMIT_EN : 0)); 505 } 506 if (qi->tqi_readyTime && (qi->tqi_type != ATH9K_TX_QUEUE_CAB)) { 507 REG_WRITE(ah, AR_QRDYTIMECFG(q), 508 SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_DURATION) | 509 AR_Q_RDYTIMECFG_EN); 510 } 511 512 REGWRITE_BUFFER_FLUSH(ah); 513 514 REG_WRITE(ah, AR_DCHNTIME(q), 515 SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR) | 516 (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0)); 517 518 if (qi->tqi_burstTime 519 && (qi->tqi_qflags & TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE)) { 520 REG_WRITE(ah, AR_QMISC(q), 521 REG_READ(ah, AR_QMISC(q)) | 522 AR_Q_MISC_RDYTIME_EXP_POLICY); 523 524 } 525 526 if (qi->tqi_qflags & TXQ_FLAG_BACKOFF_DISABLE) { 527 REG_WRITE(ah, AR_DMISC(q), 528 REG_READ(ah, AR_DMISC(q)) | 529 AR_D_MISC_POST_FR_BKOFF_DIS); 530 } 531 532 REGWRITE_BUFFER_FLUSH(ah); 533 DISABLE_REGWRITE_BUFFER(ah); 534 535 if (qi->tqi_qflags & TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE) { 536 REG_WRITE(ah, AR_DMISC(q), 537 REG_READ(ah, AR_DMISC(q)) | 538 AR_D_MISC_FRAG_BKOFF_EN); 539 } 540 switch (qi->tqi_type) { 541 case ATH9K_TX_QUEUE_BEACON: 542 ENABLE_REGWRITE_BUFFER(ah); 543 544 REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q)) 545 | AR_Q_MISC_FSP_DBA_GATED 546 | AR_Q_MISC_BEACON_USE 547 | AR_Q_MISC_CBR_INCR_DIS1); 548 549 REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q)) 550 | (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL << 551 AR_D_MISC_ARB_LOCKOUT_CNTRL_S) 552 | AR_D_MISC_BEACON_USE 553 | AR_D_MISC_POST_FR_BKOFF_DIS); 554 555 REGWRITE_BUFFER_FLUSH(ah); 556 DISABLE_REGWRITE_BUFFER(ah); 557 558 /* 559 * cwmin and cwmax should be 0 for beacon queue 560 * but not for IBSS as we would create an imbalance 561 * on beaconing fairness for participating nodes. 562 */ 563 if (AR_SREV_9300_20_OR_LATER(ah) && 564 ah->opmode != NL80211_IFTYPE_ADHOC) { 565 REG_WRITE(ah, AR_DLCL_IFS(q), SM(0, AR_D_LCL_IFS_CWMIN) 566 | SM(0, AR_D_LCL_IFS_CWMAX) 567 | SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS)); 568 } 569 break; 570 case ATH9K_TX_QUEUE_CAB: 571 ENABLE_REGWRITE_BUFFER(ah); 572 573 REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q)) 574 | AR_Q_MISC_FSP_DBA_GATED 575 | AR_Q_MISC_CBR_INCR_DIS1 576 | AR_Q_MISC_CBR_INCR_DIS0); 577 value = (qi->tqi_readyTime - 578 (ah->config.sw_beacon_response_time - 579 ah->config.dma_beacon_response_time) - 580 ah->config.additional_swba_backoff) * 1024; 581 REG_WRITE(ah, AR_QRDYTIMECFG(q), 582 value | AR_Q_RDYTIMECFG_EN); 583 REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q)) 584 | (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL << 585 AR_D_MISC_ARB_LOCKOUT_CNTRL_S)); 586 587 REGWRITE_BUFFER_FLUSH(ah); 588 DISABLE_REGWRITE_BUFFER(ah); 589 590 break; 591 case ATH9K_TX_QUEUE_PSPOLL: 592 REG_WRITE(ah, AR_QMISC(q), 593 REG_READ(ah, AR_QMISC(q)) | AR_Q_MISC_CBR_INCR_DIS1); 594 break; 595 case ATH9K_TX_QUEUE_UAPSD: 596 REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q)) | 597 AR_D_MISC_POST_FR_BKOFF_DIS); 598 break; 599 default: 600 break; 601 } 602 603 if (qi->tqi_intFlags & ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS) { 604 REG_WRITE(ah, AR_DMISC(q), 605 REG_READ(ah, AR_DMISC(q)) | 606 SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL, 607 AR_D_MISC_ARB_LOCKOUT_CNTRL) | 608 AR_D_MISC_POST_FR_BKOFF_DIS); 609 } 610 611 if (AR_SREV_9300_20_OR_LATER(ah)) 612 REG_WRITE(ah, AR_Q_DESC_CRCCHK, AR_Q_DESC_CRCCHK_EN); 613 614 if (qi->tqi_qflags & TXQ_FLAG_TXOKINT_ENABLE) 615 ah->txok_interrupt_mask |= 1 << q; 616 else 617 ah->txok_interrupt_mask &= ~(1 << q); 618 if (qi->tqi_qflags & TXQ_FLAG_TXERRINT_ENABLE) 619 ah->txerr_interrupt_mask |= 1 << q; 620 else 621 ah->txerr_interrupt_mask &= ~(1 << q); 622 if (qi->tqi_qflags & TXQ_FLAG_TXDESCINT_ENABLE) 623 ah->txdesc_interrupt_mask |= 1 << q; 624 else 625 ah->txdesc_interrupt_mask &= ~(1 << q); 626 if (qi->tqi_qflags & TXQ_FLAG_TXEOLINT_ENABLE) 627 ah->txeol_interrupt_mask |= 1 << q; 628 else 629 ah->txeol_interrupt_mask &= ~(1 << q); 630 if (qi->tqi_qflags & TXQ_FLAG_TXURNINT_ENABLE) 631 ah->txurn_interrupt_mask |= 1 << q; 632 else 633 ah->txurn_interrupt_mask &= ~(1 << q); 634 ath9k_hw_set_txq_interrupts(ah, qi); 635 636 return true; 637 } 638 EXPORT_SYMBOL(ath9k_hw_resettxqueue); 639 640 int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds, 641 struct ath_rx_status *rs, u64 tsf) 642 { 643 struct ar5416_desc ads; 644 struct ar5416_desc *adsp = AR5416DESC(ds); 645 u32 phyerr; 646 647 if ((adsp->ds_rxstatus8 & AR_RxDone) == 0) 648 return -EINPROGRESS; 649 650 ads.u.rx = adsp->u.rx; 651 652 rs->rs_status = 0; 653 rs->rs_flags = 0; 654 655 rs->rs_datalen = ads.ds_rxstatus1 & AR_DataLen; 656 rs->rs_tstamp = ads.AR_RcvTimestamp; 657 658 if (ads.ds_rxstatus8 & AR_PostDelimCRCErr) { 659 rs->rs_rssi = ATH9K_RSSI_BAD; 660 rs->rs_rssi_ctl0 = ATH9K_RSSI_BAD; 661 rs->rs_rssi_ctl1 = ATH9K_RSSI_BAD; 662 rs->rs_rssi_ctl2 = ATH9K_RSSI_BAD; 663 rs->rs_rssi_ext0 = ATH9K_RSSI_BAD; 664 rs->rs_rssi_ext1 = ATH9K_RSSI_BAD; 665 rs->rs_rssi_ext2 = ATH9K_RSSI_BAD; 666 } else { 667 rs->rs_rssi = MS(ads.ds_rxstatus4, AR_RxRSSICombined); 668 rs->rs_rssi_ctl0 = MS(ads.ds_rxstatus0, 669 AR_RxRSSIAnt00); 670 rs->rs_rssi_ctl1 = MS(ads.ds_rxstatus0, 671 AR_RxRSSIAnt01); 672 rs->rs_rssi_ctl2 = MS(ads.ds_rxstatus0, 673 AR_RxRSSIAnt02); 674 rs->rs_rssi_ext0 = MS(ads.ds_rxstatus4, 675 AR_RxRSSIAnt10); 676 rs->rs_rssi_ext1 = MS(ads.ds_rxstatus4, 677 AR_RxRSSIAnt11); 678 rs->rs_rssi_ext2 = MS(ads.ds_rxstatus4, 679 AR_RxRSSIAnt12); 680 } 681 if (ads.ds_rxstatus8 & AR_RxKeyIdxValid) 682 rs->rs_keyix = MS(ads.ds_rxstatus8, AR_KeyIdx); 683 else 684 rs->rs_keyix = ATH9K_RXKEYIX_INVALID; 685 686 rs->rs_rate = RXSTATUS_RATE(ah, (&ads)); 687 rs->rs_more = (ads.ds_rxstatus1 & AR_RxMore) ? 1 : 0; 688 689 rs->rs_isaggr = (ads.ds_rxstatus8 & AR_RxAggr) ? 1 : 0; 690 rs->rs_moreaggr = 691 (ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0; 692 rs->rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna); 693 rs->rs_flags = 694 (ads.ds_rxstatus3 & AR_GI) ? ATH9K_RX_GI : 0; 695 rs->rs_flags |= 696 (ads.ds_rxstatus3 & AR_2040) ? ATH9K_RX_2040 : 0; 697 698 if (ads.ds_rxstatus8 & AR_PreDelimCRCErr) 699 rs->rs_flags |= ATH9K_RX_DELIM_CRC_PRE; 700 if (ads.ds_rxstatus8 & AR_PostDelimCRCErr) 701 rs->rs_flags |= ATH9K_RX_DELIM_CRC_POST; 702 if (ads.ds_rxstatus8 & AR_DecryptBusyErr) 703 rs->rs_flags |= ATH9K_RX_DECRYPT_BUSY; 704 705 if ((ads.ds_rxstatus8 & AR_RxFrameOK) == 0) { 706 if (ads.ds_rxstatus8 & AR_CRCErr) 707 rs->rs_status |= ATH9K_RXERR_CRC; 708 else if (ads.ds_rxstatus8 & AR_PHYErr) { 709 rs->rs_status |= ATH9K_RXERR_PHY; 710 phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode); 711 rs->rs_phyerr = phyerr; 712 } else if (ads.ds_rxstatus8 & AR_DecryptCRCErr) 713 rs->rs_status |= ATH9K_RXERR_DECRYPT; 714 else if (ads.ds_rxstatus8 & AR_MichaelErr) 715 rs->rs_status |= ATH9K_RXERR_MIC; 716 } 717 718 return 0; 719 } 720 EXPORT_SYMBOL(ath9k_hw_rxprocdesc); 721 722 /* 723 * This can stop or re-enables RX. 724 * 725 * If bool is set this will kill any frame which is currently being 726 * transferred between the MAC and baseband and also prevent any new 727 * frames from getting started. 728 */ 729 bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set) 730 { 731 u32 reg; 732 733 if (set) { 734 REG_SET_BIT(ah, AR_DIAG_SW, 735 (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT)); 736 737 if (!ath9k_hw_wait(ah, AR_OBS_BUS_1, AR_OBS_BUS_1_RX_STATE, 738 0, AH_WAIT_TIMEOUT)) { 739 REG_CLR_BIT(ah, AR_DIAG_SW, 740 (AR_DIAG_RX_DIS | 741 AR_DIAG_RX_ABORT)); 742 743 reg = REG_READ(ah, AR_OBS_BUS_1); 744 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL, 745 "RX failed to go idle in 10 ms RXSM=0x%x\n", 746 reg); 747 748 return false; 749 } 750 } else { 751 REG_CLR_BIT(ah, AR_DIAG_SW, 752 (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT)); 753 } 754 755 return true; 756 } 757 EXPORT_SYMBOL(ath9k_hw_setrxabort); 758 759 void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp) 760 { 761 REG_WRITE(ah, AR_RXDP, rxdp); 762 } 763 EXPORT_SYMBOL(ath9k_hw_putrxbuf); 764 765 void ath9k_hw_startpcureceive(struct ath_hw *ah, bool is_scanning) 766 { 767 ath9k_enable_mib_counters(ah); 768 769 ath9k_ani_reset(ah, is_scanning); 770 771 REG_CLR_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT)); 772 } 773 EXPORT_SYMBOL(ath9k_hw_startpcureceive); 774 775 void ath9k_hw_stoppcurecv(struct ath_hw *ah) 776 { 777 REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS); 778 779 ath9k_hw_disable_mib_counters(ah); 780 } 781 EXPORT_SYMBOL(ath9k_hw_stoppcurecv); 782 783 void ath9k_hw_abortpcurecv(struct ath_hw *ah) 784 { 785 REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_ABORT | AR_DIAG_RX_DIS); 786 787 ath9k_hw_disable_mib_counters(ah); 788 } 789 EXPORT_SYMBOL(ath9k_hw_abortpcurecv); 790 791 bool ath9k_hw_stopdmarecv(struct ath_hw *ah) 792 { 793 #define AH_RX_STOP_DMA_TIMEOUT 10000 /* usec */ 794 #define AH_RX_TIME_QUANTUM 100 /* usec */ 795 struct ath_common *common = ath9k_hw_common(ah); 796 int i; 797 798 REG_WRITE(ah, AR_CR, AR_CR_RXD); 799 800 /* Wait for rx enable bit to go low */ 801 for (i = AH_RX_STOP_DMA_TIMEOUT / AH_TIME_QUANTUM; i != 0; i--) { 802 if ((REG_READ(ah, AR_CR) & AR_CR_RXE) == 0) 803 break; 804 udelay(AH_TIME_QUANTUM); 805 } 806 807 if (i == 0) { 808 ath_print(common, ATH_DBG_FATAL, 809 "DMA failed to stop in %d ms " 810 "AR_CR=0x%08x AR_DIAG_SW=0x%08x\n", 811 AH_RX_STOP_DMA_TIMEOUT / 1000, 812 REG_READ(ah, AR_CR), 813 REG_READ(ah, AR_DIAG_SW)); 814 return false; 815 } else { 816 return true; 817 } 818 819 #undef AH_RX_TIME_QUANTUM 820 #undef AH_RX_STOP_DMA_TIMEOUT 821 } 822 EXPORT_SYMBOL(ath9k_hw_stopdmarecv); 823 824 int ath9k_hw_beaconq_setup(struct ath_hw *ah) 825 { 826 struct ath9k_tx_queue_info qi; 827 828 memset(&qi, 0, sizeof(qi)); 829 qi.tqi_aifs = 1; 830 qi.tqi_cwmin = 0; 831 qi.tqi_cwmax = 0; 832 /* NB: don't enable any interrupts */ 833 return ath9k_hw_setuptxqueue(ah, ATH9K_TX_QUEUE_BEACON, &qi); 834 } 835 EXPORT_SYMBOL(ath9k_hw_beaconq_setup); 836 837 bool ath9k_hw_intrpend(struct ath_hw *ah) 838 { 839 u32 host_isr; 840 841 if (AR_SREV_9100(ah)) 842 return true; 843 844 host_isr = REG_READ(ah, AR_INTR_ASYNC_CAUSE); 845 if ((host_isr & AR_INTR_MAC_IRQ) && (host_isr != AR_INTR_SPURIOUS)) 846 return true; 847 848 host_isr = REG_READ(ah, AR_INTR_SYNC_CAUSE); 849 if ((host_isr & AR_INTR_SYNC_DEFAULT) 850 && (host_isr != AR_INTR_SPURIOUS)) 851 return true; 852 853 return false; 854 } 855 EXPORT_SYMBOL(ath9k_hw_intrpend); 856 857 enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah, 858 enum ath9k_int ints) 859 { 860 enum ath9k_int omask = ah->imask; 861 u32 mask, mask2; 862 struct ath9k_hw_capabilities *pCap = &ah->caps; 863 struct ath_common *common = ath9k_hw_common(ah); 864 865 ath_print(common, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints); 866 867 if (omask & ATH9K_INT_GLOBAL) { 868 ath_print(common, ATH_DBG_INTERRUPT, "disable IER\n"); 869 REG_WRITE(ah, AR_IER, AR_IER_DISABLE); 870 (void) REG_READ(ah, AR_IER); 871 if (!AR_SREV_9100(ah)) { 872 REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, 0); 873 (void) REG_READ(ah, AR_INTR_ASYNC_ENABLE); 874 875 REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0); 876 (void) REG_READ(ah, AR_INTR_SYNC_ENABLE); 877 } 878 } 879 880 /* TODO: global int Ref count */ 881 mask = ints & ATH9K_INT_COMMON; 882 mask2 = 0; 883 884 if (ints & ATH9K_INT_TX) { 885 if (ah->config.tx_intr_mitigation) 886 mask |= AR_IMR_TXMINTR | AR_IMR_TXINTM; 887 else { 888 if (ah->txok_interrupt_mask) 889 mask |= AR_IMR_TXOK; 890 if (ah->txdesc_interrupt_mask) 891 mask |= AR_IMR_TXDESC; 892 } 893 if (ah->txerr_interrupt_mask) 894 mask |= AR_IMR_TXERR; 895 if (ah->txeol_interrupt_mask) 896 mask |= AR_IMR_TXEOL; 897 } 898 if (ints & ATH9K_INT_RX) { 899 if (AR_SREV_9300_20_OR_LATER(ah)) { 900 mask |= AR_IMR_RXERR | AR_IMR_RXOK_HP; 901 if (ah->config.rx_intr_mitigation) { 902 mask &= ~AR_IMR_RXOK_LP; 903 mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM; 904 } else { 905 mask |= AR_IMR_RXOK_LP; 906 } 907 } else { 908 if (ah->config.rx_intr_mitigation) 909 mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM; 910 else 911 mask |= AR_IMR_RXOK | AR_IMR_RXDESC; 912 } 913 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) 914 mask |= AR_IMR_GENTMR; 915 } 916 917 if (ints & (ATH9K_INT_BMISC)) { 918 mask |= AR_IMR_BCNMISC; 919 if (ints & ATH9K_INT_TIM) 920 mask2 |= AR_IMR_S2_TIM; 921 if (ints & ATH9K_INT_DTIM) 922 mask2 |= AR_IMR_S2_DTIM; 923 if (ints & ATH9K_INT_DTIMSYNC) 924 mask2 |= AR_IMR_S2_DTIMSYNC; 925 if (ints & ATH9K_INT_CABEND) 926 mask2 |= AR_IMR_S2_CABEND; 927 if (ints & ATH9K_INT_TSFOOR) 928 mask2 |= AR_IMR_S2_TSFOOR; 929 } 930 931 if (ints & (ATH9K_INT_GTT | ATH9K_INT_CST)) { 932 mask |= AR_IMR_BCNMISC; 933 if (ints & ATH9K_INT_GTT) 934 mask2 |= AR_IMR_S2_GTT; 935 if (ints & ATH9K_INT_CST) 936 mask2 |= AR_IMR_S2_CST; 937 } 938 939 ath_print(common, ATH_DBG_INTERRUPT, "new IMR 0x%x\n", mask); 940 REG_WRITE(ah, AR_IMR, mask); 941 ah->imrs2_reg &= ~(AR_IMR_S2_TIM | AR_IMR_S2_DTIM | AR_IMR_S2_DTIMSYNC | 942 AR_IMR_S2_CABEND | AR_IMR_S2_CABTO | 943 AR_IMR_S2_TSFOOR | AR_IMR_S2_GTT | AR_IMR_S2_CST); 944 ah->imrs2_reg |= mask2; 945 REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg); 946 947 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) { 948 if (ints & ATH9K_INT_TIM_TIMER) 949 REG_SET_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER); 950 else 951 REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER); 952 } 953 954 if (ints & ATH9K_INT_GLOBAL) { 955 ath_print(common, ATH_DBG_INTERRUPT, "enable IER\n"); 956 REG_WRITE(ah, AR_IER, AR_IER_ENABLE); 957 if (!AR_SREV_9100(ah)) { 958 REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, 959 AR_INTR_MAC_IRQ); 960 REG_WRITE(ah, AR_INTR_ASYNC_MASK, AR_INTR_MAC_IRQ); 961 962 963 REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 964 AR_INTR_SYNC_DEFAULT); 965 REG_WRITE(ah, AR_INTR_SYNC_MASK, 966 AR_INTR_SYNC_DEFAULT); 967 } 968 ath_print(common, ATH_DBG_INTERRUPT, "AR_IMR 0x%x IER 0x%x\n", 969 REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER)); 970 } 971 972 return omask; 973 } 974 EXPORT_SYMBOL(ath9k_hw_set_interrupts); 975