1 /*- 2 * SPDX-License-Identifier: ISC 3 * 4 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting 5 * Copyright (c) 2002-2008 Atheros Communications, Inc. 6 * 7 * Permission to use, copy, modify, and/or distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 * 19 * $FreeBSD$ 20 */ 21 #include "opt_ah.h" 22 23 #include "ah.h" 24 #include "ah_internal.h" 25 #include "ah_desc.h" 26 27 #include "ar5212/ar5212.h" 28 #include "ar5212/ar5212reg.h" 29 #include "ar5212/ar5212desc.h" 30 #include "ar5212/ar5212phy.h" 31 #ifdef AH_SUPPORT_5311 32 #include "ar5212/ar5311reg.h" 33 #endif 34 35 #ifdef AH_NEED_DESC_SWAP 36 static void ar5212SwapTxDesc(struct ath_desc *ds); 37 #endif 38 39 /* 40 * Update Tx FIFO trigger level. 41 * 42 * Set bIncTrigLevel to TRUE to increase the trigger level. 43 * Set bIncTrigLevel to FALSE to decrease the trigger level. 44 * 45 * Returns TRUE if the trigger level was updated 46 */ 47 HAL_BOOL 48 ar5212UpdateTxTrigLevel(struct ath_hal *ah, HAL_BOOL bIncTrigLevel) 49 { 50 struct ath_hal_5212 *ahp = AH5212(ah); 51 uint32_t txcfg, curLevel, newLevel; 52 HAL_INT omask; 53 54 if (ahp->ah_txTrigLev >= ahp->ah_maxTxTrigLev) 55 return AH_FALSE; 56 57 /* 58 * Disable interrupts while futzing with the fifo level. 59 */ 60 omask = ath_hal_setInterrupts(ah, ahp->ah_maskReg &~ HAL_INT_GLOBAL); 61 62 txcfg = OS_REG_READ(ah, AR_TXCFG); 63 curLevel = MS(txcfg, AR_FTRIG); 64 newLevel = curLevel; 65 if (bIncTrigLevel) { /* increase the trigger level */ 66 if (curLevel < ahp->ah_maxTxTrigLev) 67 newLevel++; 68 } else if (curLevel > MIN_TX_FIFO_THRESHOLD) 69 newLevel--; 70 if (newLevel != curLevel) 71 /* Update the trigger level */ 72 OS_REG_WRITE(ah, AR_TXCFG, 73 (txcfg &~ AR_FTRIG) | SM(newLevel, AR_FTRIG)); 74 75 ahp->ah_txTrigLev = newLevel; 76 77 /* re-enable chip interrupts */ 78 ath_hal_setInterrupts(ah, omask); 79 80 return (newLevel != curLevel); 81 } 82 83 /* 84 * Set the properties of the tx queue with the parameters 85 * from qInfo. 86 */ 87 HAL_BOOL 88 ar5212SetTxQueueProps(struct ath_hal *ah, int q, const HAL_TXQ_INFO *qInfo) 89 { 90 struct ath_hal_5212 *ahp = AH5212(ah); 91 HAL_CAPABILITIES *pCap = &AH_PRIVATE(ah)->ah_caps; 92 93 if (q >= pCap->halTotalQueues) { 94 HALDEBUG(ah, HAL_DEBUG_ANY, "%s: invalid queue num %u\n", 95 __func__, q); 96 return AH_FALSE; 97 } 98 return ath_hal_setTxQProps(ah, &ahp->ah_txq[q], qInfo); 99 } 100 101 /* 102 * Return the properties for the specified tx queue. 103 */ 104 HAL_BOOL 105 ar5212GetTxQueueProps(struct ath_hal *ah, int q, HAL_TXQ_INFO *qInfo) 106 { 107 struct ath_hal_5212 *ahp = AH5212(ah); 108 HAL_CAPABILITIES *pCap = &AH_PRIVATE(ah)->ah_caps; 109 110 111 if (q >= pCap->halTotalQueues) { 112 HALDEBUG(ah, HAL_DEBUG_ANY, "%s: invalid queue num %u\n", 113 __func__, q); 114 return AH_FALSE; 115 } 116 return ath_hal_getTxQProps(ah, qInfo, &ahp->ah_txq[q]); 117 } 118 119 /* 120 * Allocate and initialize a tx DCU/QCU combination. 121 */ 122 int 123 ar5212SetupTxQueue(struct ath_hal *ah, HAL_TX_QUEUE type, 124 const HAL_TXQ_INFO *qInfo) 125 { 126 struct ath_hal_5212 *ahp = AH5212(ah); 127 HAL_TX_QUEUE_INFO *qi; 128 HAL_CAPABILITIES *pCap = &AH_PRIVATE(ah)->ah_caps; 129 int q, defqflags; 130 131 /* by default enable OK+ERR+DESC+URN interrupts */ 132 defqflags = HAL_TXQ_TXOKINT_ENABLE 133 | HAL_TXQ_TXERRINT_ENABLE 134 | HAL_TXQ_TXDESCINT_ENABLE 135 | HAL_TXQ_TXURNINT_ENABLE; 136 /* XXX move queue assignment to driver */ 137 switch (type) { 138 case HAL_TX_QUEUE_BEACON: 139 q = pCap->halTotalQueues-1; /* highest priority */ 140 defqflags |= HAL_TXQ_DBA_GATED 141 | HAL_TXQ_CBR_DIS_QEMPTY 142 | HAL_TXQ_ARB_LOCKOUT_GLOBAL 143 | HAL_TXQ_BACKOFF_DISABLE; 144 break; 145 case HAL_TX_QUEUE_CAB: 146 q = pCap->halTotalQueues-2; /* next highest priority */ 147 defqflags |= HAL_TXQ_DBA_GATED 148 | HAL_TXQ_CBR_DIS_QEMPTY 149 | HAL_TXQ_CBR_DIS_BEMPTY 150 | HAL_TXQ_ARB_LOCKOUT_GLOBAL 151 | HAL_TXQ_BACKOFF_DISABLE; 152 break; 153 case HAL_TX_QUEUE_UAPSD: 154 q = pCap->halTotalQueues-3; /* nextest highest priority */ 155 if (ahp->ah_txq[q].tqi_type != HAL_TX_QUEUE_INACTIVE) { 156 HALDEBUG(ah, HAL_DEBUG_ANY, 157 "%s: no available UAPSD tx queue\n", __func__); 158 return -1; 159 } 160 break; 161 case HAL_TX_QUEUE_DATA: 162 for (q = 0; q < pCap->halTotalQueues; q++) 163 if (ahp->ah_txq[q].tqi_type == HAL_TX_QUEUE_INACTIVE) 164 break; 165 if (q == pCap->halTotalQueues) { 166 HALDEBUG(ah, HAL_DEBUG_ANY, 167 "%s: no available tx queue\n", __func__); 168 return -1; 169 } 170 break; 171 default: 172 HALDEBUG(ah, HAL_DEBUG_ANY, 173 "%s: bad tx queue type %u\n", __func__, type); 174 return -1; 175 } 176 177 HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: queue %u\n", __func__, q); 178 179 qi = &ahp->ah_txq[q]; 180 if (qi->tqi_type != HAL_TX_QUEUE_INACTIVE) { 181 HALDEBUG(ah, HAL_DEBUG_ANY, "%s: tx queue %u already active\n", 182 __func__, q); 183 return -1; 184 } 185 OS_MEMZERO(qi, sizeof(HAL_TX_QUEUE_INFO)); 186 qi->tqi_type = type; 187 if (qInfo == AH_NULL) { 188 qi->tqi_qflags = defqflags; 189 qi->tqi_aifs = INIT_AIFS; 190 qi->tqi_cwmin = HAL_TXQ_USEDEFAULT; /* NB: do at reset */ 191 qi->tqi_cwmax = INIT_CWMAX; 192 qi->tqi_shretry = INIT_SH_RETRY; 193 qi->tqi_lgretry = INIT_LG_RETRY; 194 qi->tqi_physCompBuf = 0; 195 } else { 196 qi->tqi_physCompBuf = qInfo->tqi_compBuf; 197 (void) ar5212SetTxQueueProps(ah, q, qInfo); 198 } 199 /* NB: must be followed by ar5212ResetTxQueue */ 200 return q; 201 } 202 203 /* 204 * Update the h/w interrupt registers to reflect a tx q's configuration. 205 */ 206 static void 207 setTxQInterrupts(struct ath_hal *ah, HAL_TX_QUEUE_INFO *qi) 208 { 209 struct ath_hal_5212 *ahp = AH5212(ah); 210 211 HALDEBUG(ah, HAL_DEBUG_TXQUEUE, 212 "%s: tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n", __func__, 213 ahp->ah_txOkInterruptMask, ahp->ah_txErrInterruptMask, 214 ahp->ah_txDescInterruptMask, ahp->ah_txEolInterruptMask, 215 ahp->ah_txUrnInterruptMask); 216 217 OS_REG_WRITE(ah, AR_IMR_S0, 218 SM(ahp->ah_txOkInterruptMask, AR_IMR_S0_QCU_TXOK) 219 | SM(ahp->ah_txDescInterruptMask, AR_IMR_S0_QCU_TXDESC) 220 ); 221 OS_REG_WRITE(ah, AR_IMR_S1, 222 SM(ahp->ah_txErrInterruptMask, AR_IMR_S1_QCU_TXERR) 223 | SM(ahp->ah_txEolInterruptMask, AR_IMR_S1_QCU_TXEOL) 224 ); 225 OS_REG_RMW_FIELD(ah, AR_IMR_S2, 226 AR_IMR_S2_QCU_TXURN, ahp->ah_txUrnInterruptMask); 227 } 228 229 /* 230 * Free a tx DCU/QCU combination. 231 */ 232 HAL_BOOL 233 ar5212ReleaseTxQueue(struct ath_hal *ah, u_int q) 234 { 235 struct ath_hal_5212 *ahp = AH5212(ah); 236 HAL_CAPABILITIES *pCap = &AH_PRIVATE(ah)->ah_caps; 237 HAL_TX_QUEUE_INFO *qi; 238 239 if (q >= pCap->halTotalQueues) { 240 HALDEBUG(ah, HAL_DEBUG_ANY, "%s: invalid queue num %u\n", 241 __func__, q); 242 return AH_FALSE; 243 } 244 qi = &ahp->ah_txq[q]; 245 if (qi->tqi_type == HAL_TX_QUEUE_INACTIVE) { 246 HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: inactive queue %u\n", 247 __func__, q); 248 return AH_FALSE; 249 } 250 251 HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: release queue %u\n", __func__, q); 252 253 qi->tqi_type = HAL_TX_QUEUE_INACTIVE; 254 ahp->ah_txOkInterruptMask &= ~(1 << q); 255 ahp->ah_txErrInterruptMask &= ~(1 << q); 256 ahp->ah_txDescInterruptMask &= ~(1 << q); 257 ahp->ah_txEolInterruptMask &= ~(1 << q); 258 ahp->ah_txUrnInterruptMask &= ~(1 << q); 259 setTxQInterrupts(ah, qi); 260 261 return AH_TRUE; 262 } 263 264 /* 265 * Set the retry, aifs, cwmin/max, readyTime regs for specified queue 266 * Assumes: 267 * phwChannel has been set to point to the current channel 268 */ 269 #define TU_TO_USEC(_tu) ((_tu) << 10) 270 HAL_BOOL 271 ar5212ResetTxQueue(struct ath_hal *ah, u_int q) 272 { 273 struct ath_hal_5212 *ahp = AH5212(ah); 274 HAL_CAPABILITIES *pCap = &AH_PRIVATE(ah)->ah_caps; 275 const struct ieee80211_channel *chan = AH_PRIVATE(ah)->ah_curchan; 276 HAL_TX_QUEUE_INFO *qi; 277 uint32_t cwMin, chanCwMin, qmisc, dmisc; 278 279 if (q >= pCap->halTotalQueues) { 280 HALDEBUG(ah, HAL_DEBUG_ANY, "%s: invalid queue num %u\n", 281 __func__, q); 282 return AH_FALSE; 283 } 284 qi = &ahp->ah_txq[q]; 285 if (qi->tqi_type == HAL_TX_QUEUE_INACTIVE) { 286 HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: inactive queue %u\n", 287 __func__, q); 288 return AH_TRUE; /* XXX??? */ 289 } 290 291 HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: reset queue %u\n", __func__, q); 292 293 if (qi->tqi_cwmin == HAL_TXQ_USEDEFAULT) { 294 /* 295 * Select cwmin according to channel type. 296 * NB: chan can be NULL during attach 297 */ 298 if (chan && IEEE80211_IS_CHAN_B(chan)) 299 chanCwMin = INIT_CWMIN_11B; 300 else 301 chanCwMin = INIT_CWMIN; 302 /* make sure that the CWmin is of the form (2^n - 1) */ 303 for (cwMin = 1; cwMin < chanCwMin; cwMin = (cwMin << 1) | 1) 304 ; 305 } else 306 cwMin = qi->tqi_cwmin; 307 308 /* set cwMin/Max and AIFS values */ 309 OS_REG_WRITE(ah, AR_DLCL_IFS(q), 310 SM(cwMin, AR_D_LCL_IFS_CWMIN) 311 | SM(qi->tqi_cwmax, AR_D_LCL_IFS_CWMAX) 312 | SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS)); 313 314 /* Set retry limit values */ 315 OS_REG_WRITE(ah, AR_DRETRY_LIMIT(q), 316 SM(INIT_SSH_RETRY, AR_D_RETRY_LIMIT_STA_SH) 317 | SM(INIT_SLG_RETRY, AR_D_RETRY_LIMIT_STA_LG) 318 | SM(qi->tqi_lgretry, AR_D_RETRY_LIMIT_FR_LG) 319 | SM(qi->tqi_shretry, AR_D_RETRY_LIMIT_FR_SH) 320 ); 321 322 /* NB: always enable early termination on the QCU */ 323 qmisc = AR_Q_MISC_DCU_EARLY_TERM_REQ 324 | SM(AR_Q_MISC_FSP_ASAP, AR_Q_MISC_FSP); 325 326 /* NB: always enable DCU to wait for next fragment from QCU */ 327 dmisc = AR_D_MISC_FRAG_WAIT_EN; 328 329 #ifdef AH_SUPPORT_5311 330 if (AH_PRIVATE(ah)->ah_macVersion < AR_SREV_VERSION_OAHU) { 331 /* Configure DCU to use the global sequence count */ 332 dmisc |= AR5311_D_MISC_SEQ_NUM_CONTROL; 333 } 334 #endif 335 /* multiqueue support */ 336 if (qi->tqi_cbrPeriod) { 337 OS_REG_WRITE(ah, AR_QCBRCFG(q), 338 SM(qi->tqi_cbrPeriod,AR_Q_CBRCFG_CBR_INTERVAL) 339 | SM(qi->tqi_cbrOverflowLimit, AR_Q_CBRCFG_CBR_OVF_THRESH)); 340 qmisc = (qmisc &~ AR_Q_MISC_FSP) | AR_Q_MISC_FSP_CBR; 341 if (qi->tqi_cbrOverflowLimit) 342 qmisc |= AR_Q_MISC_CBR_EXP_CNTR_LIMIT; 343 } 344 if (qi->tqi_readyTime) { 345 OS_REG_WRITE(ah, AR_QRDYTIMECFG(q), 346 SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_INT) 347 | AR_Q_RDYTIMECFG_ENA); 348 } 349 350 OS_REG_WRITE(ah, AR_DCHNTIME(q), 351 SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR) 352 | (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0)); 353 354 if (qi->tqi_readyTime && 355 (qi->tqi_qflags & HAL_TXQ_RDYTIME_EXP_POLICY_ENABLE)) 356 qmisc |= AR_Q_MISC_RDYTIME_EXP_POLICY; 357 if (qi->tqi_qflags & HAL_TXQ_DBA_GATED) 358 qmisc = (qmisc &~ AR_Q_MISC_FSP) | AR_Q_MISC_FSP_DBA_GATED; 359 if (MS(qmisc, AR_Q_MISC_FSP) != AR_Q_MISC_FSP_ASAP) { 360 /* 361 * These are meangingful only when not scheduled asap. 362 */ 363 if (qi->tqi_qflags & HAL_TXQ_CBR_DIS_BEMPTY) 364 qmisc |= AR_Q_MISC_CBR_INCR_DIS0; 365 else 366 qmisc &= ~AR_Q_MISC_CBR_INCR_DIS0; 367 if (qi->tqi_qflags & HAL_TXQ_CBR_DIS_QEMPTY) 368 qmisc |= AR_Q_MISC_CBR_INCR_DIS1; 369 else 370 qmisc &= ~AR_Q_MISC_CBR_INCR_DIS1; 371 } 372 373 if (qi->tqi_qflags & HAL_TXQ_BACKOFF_DISABLE) 374 dmisc |= AR_D_MISC_POST_FR_BKOFF_DIS; 375 if (qi->tqi_qflags & HAL_TXQ_FRAG_BURST_BACKOFF_ENABLE) 376 dmisc |= AR_D_MISC_FRAG_BKOFF_EN; 377 if (qi->tqi_qflags & HAL_TXQ_ARB_LOCKOUT_GLOBAL) 378 dmisc |= SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL, 379 AR_D_MISC_ARB_LOCKOUT_CNTRL); 380 else if (qi->tqi_qflags & HAL_TXQ_ARB_LOCKOUT_INTRA) 381 dmisc |= SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_INTRA_FR, 382 AR_D_MISC_ARB_LOCKOUT_CNTRL); 383 if (qi->tqi_qflags & HAL_TXQ_IGNORE_VIRTCOL) 384 dmisc |= SM(AR_D_MISC_VIR_COL_HANDLING_IGNORE, 385 AR_D_MISC_VIR_COL_HANDLING); 386 if (qi->tqi_qflags & HAL_TXQ_SEQNUM_INC_DIS) 387 dmisc |= AR_D_MISC_SEQ_NUM_INCR_DIS; 388 389 /* 390 * Fillin type-dependent bits. Most of this can be 391 * removed by specifying the queue parameters in the 392 * driver; it's here for backwards compatibility. 393 */ 394 switch (qi->tqi_type) { 395 case HAL_TX_QUEUE_BEACON: /* beacon frames */ 396 qmisc |= AR_Q_MISC_FSP_DBA_GATED 397 | AR_Q_MISC_BEACON_USE 398 | AR_Q_MISC_CBR_INCR_DIS1; 399 400 dmisc |= SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL, 401 AR_D_MISC_ARB_LOCKOUT_CNTRL) 402 | AR_D_MISC_BEACON_USE 403 | AR_D_MISC_POST_FR_BKOFF_DIS; 404 break; 405 case HAL_TX_QUEUE_CAB: /* CAB frames */ 406 /* 407 * No longer Enable AR_Q_MISC_RDYTIME_EXP_POLICY, 408 * There is an issue with the CAB Queue 409 * not properly refreshing the Tx descriptor if 410 * the TXE clear setting is used. 411 */ 412 qmisc |= AR_Q_MISC_FSP_DBA_GATED 413 | AR_Q_MISC_CBR_INCR_DIS1 414 | AR_Q_MISC_CBR_INCR_DIS0; 415 416 if (qi->tqi_readyTime) { 417 HALDEBUG(ah, HAL_DEBUG_TXQUEUE, 418 "%s: using tqi_readyTime\n", __func__); 419 OS_REG_WRITE(ah, AR_QRDYTIMECFG(q), 420 SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_INT) | 421 AR_Q_RDYTIMECFG_ENA); 422 } else { 423 int value; 424 /* 425 * NB: don't set default ready time if driver 426 * has explicitly specified something. This is 427 * here solely for backwards compatibility. 428 */ 429 /* 430 * XXX for now, hard-code a CAB interval of 70% 431 * XXX of the total beacon interval. 432 */ 433 434 value = (ahp->ah_beaconInterval * 70 / 100) 435 - (ah->ah_config.ah_sw_beacon_response_time - 436 + ah->ah_config.ah_dma_beacon_response_time) 437 - ah->ah_config.ah_additional_swba_backoff; 438 /* 439 * XXX Ensure it isn't too low - nothing lower 440 * XXX than 10 TU 441 */ 442 if (value < 10) 443 value = 10; 444 HALDEBUG(ah, HAL_DEBUG_TXQUEUE, 445 "%s: defaulting to rdytime = %d uS\n", 446 __func__, value); 447 OS_REG_WRITE(ah, AR_QRDYTIMECFG(q), 448 SM(TU_TO_USEC(value), AR_Q_RDYTIMECFG_INT) | 449 AR_Q_RDYTIMECFG_ENA); 450 } 451 dmisc |= SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL, 452 AR_D_MISC_ARB_LOCKOUT_CNTRL); 453 break; 454 default: /* NB: silence compiler */ 455 break; 456 } 457 458 OS_REG_WRITE(ah, AR_QMISC(q), qmisc); 459 OS_REG_WRITE(ah, AR_DMISC(q), dmisc); 460 461 /* Setup compression scratchpad buffer */ 462 /* 463 * XXX: calling this asynchronously to queue operation can 464 * cause unexpected behavior!!! 465 */ 466 if (qi->tqi_physCompBuf) { 467 HALASSERT(qi->tqi_type == HAL_TX_QUEUE_DATA || 468 qi->tqi_type == HAL_TX_QUEUE_UAPSD); 469 OS_REG_WRITE(ah, AR_Q_CBBS, (80 + 2*q)); 470 OS_REG_WRITE(ah, AR_Q_CBBA, qi->tqi_physCompBuf); 471 OS_REG_WRITE(ah, AR_Q_CBC, HAL_COMP_BUF_MAX_SIZE/1024); 472 OS_REG_WRITE(ah, AR_Q0_MISC + 4*q, 473 OS_REG_READ(ah, AR_Q0_MISC + 4*q) 474 | AR_Q_MISC_QCU_COMP_EN); 475 } 476 477 /* 478 * Always update the secondary interrupt mask registers - this 479 * could be a new queue getting enabled in a running system or 480 * hw getting re-initialized during a reset! 481 * 482 * Since we don't differentiate between tx interrupts corresponding 483 * to individual queues - secondary tx mask regs are always unmasked; 484 * tx interrupts are enabled/disabled for all queues collectively 485 * using the primary mask reg 486 */ 487 if (qi->tqi_qflags & HAL_TXQ_TXOKINT_ENABLE) 488 ahp->ah_txOkInterruptMask |= 1 << q; 489 else 490 ahp->ah_txOkInterruptMask &= ~(1 << q); 491 if (qi->tqi_qflags & HAL_TXQ_TXERRINT_ENABLE) 492 ahp->ah_txErrInterruptMask |= 1 << q; 493 else 494 ahp->ah_txErrInterruptMask &= ~(1 << q); 495 if (qi->tqi_qflags & HAL_TXQ_TXDESCINT_ENABLE) 496 ahp->ah_txDescInterruptMask |= 1 << q; 497 else 498 ahp->ah_txDescInterruptMask &= ~(1 << q); 499 if (qi->tqi_qflags & HAL_TXQ_TXEOLINT_ENABLE) 500 ahp->ah_txEolInterruptMask |= 1 << q; 501 else 502 ahp->ah_txEolInterruptMask &= ~(1 << q); 503 if (qi->tqi_qflags & HAL_TXQ_TXURNINT_ENABLE) 504 ahp->ah_txUrnInterruptMask |= 1 << q; 505 else 506 ahp->ah_txUrnInterruptMask &= ~(1 << q); 507 setTxQInterrupts(ah, qi); 508 509 return AH_TRUE; 510 } 511 #undef TU_TO_USEC 512 513 /* 514 * Get the TXDP for the specified queue 515 */ 516 uint32_t 517 ar5212GetTxDP(struct ath_hal *ah, u_int q) 518 { 519 HALASSERT(q < AH_PRIVATE(ah)->ah_caps.halTotalQueues); 520 return OS_REG_READ(ah, AR_QTXDP(q)); 521 } 522 523 /* 524 * Set the TxDP for the specified queue 525 */ 526 HAL_BOOL 527 ar5212SetTxDP(struct ath_hal *ah, u_int q, uint32_t txdp) 528 { 529 HALASSERT(q < AH_PRIVATE(ah)->ah_caps.halTotalQueues); 530 HALASSERT(AH5212(ah)->ah_txq[q].tqi_type != HAL_TX_QUEUE_INACTIVE); 531 532 /* 533 * Make sure that TXE is deasserted before setting the TXDP. If TXE 534 * is still asserted, setting TXDP will have no effect. 535 */ 536 HALASSERT((OS_REG_READ(ah, AR_Q_TXE) & (1 << q)) == 0); 537 538 OS_REG_WRITE(ah, AR_QTXDP(q), txdp); 539 540 return AH_TRUE; 541 } 542 543 /* 544 * Set Transmit Enable bits for the specified queue 545 */ 546 HAL_BOOL 547 ar5212StartTxDma(struct ath_hal *ah, u_int q) 548 { 549 HALASSERT(q < AH_PRIVATE(ah)->ah_caps.halTotalQueues); 550 551 HALASSERT(AH5212(ah)->ah_txq[q].tqi_type != HAL_TX_QUEUE_INACTIVE); 552 553 HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: queue %u\n", __func__, q); 554 555 /* Check to be sure we're not enabling a q that has its TXD bit set. */ 556 HALASSERT((OS_REG_READ(ah, AR_Q_TXD) & (1 << q)) == 0); 557 558 OS_REG_WRITE(ah, AR_Q_TXE, 1 << q); 559 return AH_TRUE; 560 } 561 562 /* 563 * Return the number of pending frames or 0 if the specified 564 * queue is stopped. 565 */ 566 uint32_t 567 ar5212NumTxPending(struct ath_hal *ah, u_int q) 568 { 569 uint32_t npend; 570 571 HALASSERT(q < AH_PRIVATE(ah)->ah_caps.halTotalQueues); 572 HALASSERT(AH5212(ah)->ah_txq[q].tqi_type != HAL_TX_QUEUE_INACTIVE); 573 574 npend = OS_REG_READ(ah, AR_QSTS(q)) & AR_Q_STS_PEND_FR_CNT; 575 if (npend == 0) { 576 /* 577 * Pending frame count (PFC) can momentarily go to zero 578 * while TXE remains asserted. In other words a PFC of 579 * zero is not sufficient to say that the queue has stopped. 580 */ 581 if (OS_REG_READ(ah, AR_Q_TXE) & (1 << q)) 582 npend = 1; /* arbitrarily return 1 */ 583 } 584 return npend; 585 } 586 587 /* 588 * Stop transmit on the specified queue 589 */ 590 HAL_BOOL 591 ar5212StopTxDma(struct ath_hal *ah, u_int q) 592 { 593 u_int i; 594 u_int wait; 595 596 HALASSERT(q < AH_PRIVATE(ah)->ah_caps.halTotalQueues); 597 598 HALASSERT(AH5212(ah)->ah_txq[q].tqi_type != HAL_TX_QUEUE_INACTIVE); 599 600 OS_REG_WRITE(ah, AR_Q_TXD, 1 << q); 601 for (i = 1000; i != 0; i--) { 602 if (ar5212NumTxPending(ah, q) == 0) 603 break; 604 OS_DELAY(100); /* XXX get actual value */ 605 } 606 #ifdef AH_DEBUG 607 if (i == 0) { 608 HALDEBUG(ah, HAL_DEBUG_ANY, 609 "%s: queue %u DMA did not stop in 100 msec\n", __func__, q); 610 HALDEBUG(ah, HAL_DEBUG_ANY, 611 "%s: QSTS 0x%x Q_TXE 0x%x Q_TXD 0x%x Q_CBR 0x%x\n", __func__, 612 OS_REG_READ(ah, AR_QSTS(q)), OS_REG_READ(ah, AR_Q_TXE), 613 OS_REG_READ(ah, AR_Q_TXD), OS_REG_READ(ah, AR_QCBRCFG(q))); 614 HALDEBUG(ah, HAL_DEBUG_ANY, 615 "%s: Q_MISC 0x%x Q_RDYTIMECFG 0x%x Q_RDYTIMESHDN 0x%x\n", 616 __func__, OS_REG_READ(ah, AR_QMISC(q)), 617 OS_REG_READ(ah, AR_QRDYTIMECFG(q)), 618 OS_REG_READ(ah, AR_Q_RDYTIMESHDN)); 619 } 620 #endif /* AH_DEBUG */ 621 622 /* 2413+ and up can kill packets at the PCU level */ 623 if (ar5212NumTxPending(ah, q) && 624 (IS_2413(ah) || IS_5413(ah) || IS_2425(ah) || IS_2417(ah))) { 625 uint32_t tsfLow, j; 626 627 HALDEBUG(ah, HAL_DEBUG_TXQUEUE, 628 "%s: Num of pending TX Frames %d on Q %d\n", 629 __func__, ar5212NumTxPending(ah, q), q); 630 631 /* Kill last PCU Tx Frame */ 632 /* TODO - save off and restore current values of Q1/Q2? */ 633 for (j = 0; j < 2; j++) { 634 tsfLow = OS_REG_READ(ah, AR_TSF_L32); 635 OS_REG_WRITE(ah, AR_QUIET2, SM(100, AR_QUIET2_QUIET_PER) | 636 SM(10, AR_QUIET2_QUIET_DUR)); 637 OS_REG_WRITE(ah, AR_QUIET1, AR_QUIET1_QUIET_ENABLE | 638 SM(tsfLow >> 10, AR_QUIET1_NEXT_QUIET)); 639 if ((OS_REG_READ(ah, AR_TSF_L32) >> 10) == (tsfLow >> 10)) { 640 break; 641 } 642 HALDEBUG(ah, HAL_DEBUG_ANY, 643 "%s: TSF moved while trying to set quiet time " 644 "TSF: 0x%08x\n", __func__, tsfLow); 645 HALASSERT(j < 1); /* TSF shouldn't count twice or reg access is taking forever */ 646 } 647 648 OS_REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_CHAN_IDLE); 649 650 /* Allow the quiet mechanism to do its work */ 651 OS_DELAY(200); 652 OS_REG_CLR_BIT(ah, AR_QUIET1, AR_QUIET1_QUIET_ENABLE); 653 654 /* Give at least 1 millisec more to wait */ 655 wait = 100; 656 657 /* Verify all transmit is dead */ 658 while (ar5212NumTxPending(ah, q)) { 659 if ((--wait) == 0) { 660 HALDEBUG(ah, HAL_DEBUG_ANY, 661 "%s: Failed to stop Tx DMA in %d msec after killing last frame\n", 662 __func__, wait); 663 break; 664 } 665 OS_DELAY(10); 666 } 667 668 OS_REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_CHAN_IDLE); 669 } 670 671 OS_REG_WRITE(ah, AR_Q_TXD, 0); 672 return (i != 0); 673 } 674 675 /* 676 * Descriptor Access Functions 677 */ 678 679 #define VALID_PKT_TYPES \ 680 ((1<<HAL_PKT_TYPE_NORMAL)|(1<<HAL_PKT_TYPE_ATIM)|\ 681 (1<<HAL_PKT_TYPE_PSPOLL)|(1<<HAL_PKT_TYPE_PROBE_RESP)|\ 682 (1<<HAL_PKT_TYPE_BEACON)) 683 #define isValidPktType(_t) ((1<<(_t)) & VALID_PKT_TYPES) 684 #define VALID_TX_RATES \ 685 ((1<<0x0b)|(1<<0x0f)|(1<<0x0a)|(1<<0x0e)|(1<<0x09)|(1<<0x0d)|\ 686 (1<<0x08)|(1<<0x0c)|(1<<0x1b)|(1<<0x1a)|(1<<0x1e)|(1<<0x19)|\ 687 (1<<0x1d)|(1<<0x18)|(1<<0x1c)) 688 #define isValidTxRate(_r) ((1<<(_r)) & VALID_TX_RATES) 689 690 HAL_BOOL 691 ar5212SetupTxDesc(struct ath_hal *ah, struct ath_desc *ds, 692 u_int pktLen, 693 u_int hdrLen, 694 HAL_PKT_TYPE type, 695 u_int txPower, 696 u_int txRate0, u_int txTries0, 697 u_int keyIx, 698 u_int antMode, 699 u_int flags, 700 u_int rtsctsRate, 701 u_int rtsctsDuration, 702 u_int compicvLen, 703 u_int compivLen, 704 u_int comp) 705 { 706 #define RTSCTS (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA) 707 struct ar5212_desc *ads = AR5212DESC(ds); 708 struct ath_hal_5212 *ahp = AH5212(ah); 709 710 (void) hdrLen; 711 712 HALASSERT(txTries0 != 0); 713 HALASSERT(isValidPktType(type)); 714 HALASSERT(isValidTxRate(txRate0)); 715 HALASSERT((flags & RTSCTS) != RTSCTS); 716 /* XXX validate antMode */ 717 718 txPower = (txPower + ahp->ah_txPowerIndexOffset ); 719 if(txPower > 63) txPower=63; 720 721 ads->ds_ctl0 = (pktLen & AR_FrameLen) 722 | (txPower << AR_XmitPower_S) 723 | (flags & HAL_TXDESC_VEOL ? AR_VEOL : 0) 724 | (flags & HAL_TXDESC_CLRDMASK ? AR_ClearDestMask : 0) 725 | SM(antMode, AR_AntModeXmit) 726 | (flags & HAL_TXDESC_INTREQ ? AR_TxInterReq : 0) 727 ; 728 ads->ds_ctl1 = (type << AR_FrmType_S) 729 | (flags & HAL_TXDESC_NOACK ? AR_NoAck : 0) 730 | (comp << AR_CompProc_S) 731 | (compicvLen << AR_CompICVLen_S) 732 | (compivLen << AR_CompIVLen_S) 733 ; 734 ads->ds_ctl2 = SM(txTries0, AR_XmitDataTries0) 735 | (flags & HAL_TXDESC_DURENA ? AR_DurUpdateEna : 0) 736 ; 737 ads->ds_ctl3 = (txRate0 << AR_XmitRate0_S) 738 ; 739 if (keyIx != HAL_TXKEYIX_INVALID) { 740 /* XXX validate key index */ 741 ads->ds_ctl1 |= SM(keyIx, AR_DestIdx); 742 ads->ds_ctl0 |= AR_DestIdxValid; 743 } 744 if (flags & RTSCTS) { 745 if (!isValidTxRate(rtsctsRate)) { 746 HALDEBUG(ah, HAL_DEBUG_ANY, 747 "%s: invalid rts/cts rate 0x%x\n", 748 __func__, rtsctsRate); 749 return AH_FALSE; 750 } 751 /* XXX validate rtsctsDuration */ 752 ads->ds_ctl0 |= (flags & HAL_TXDESC_CTSENA ? AR_CTSEnable : 0) 753 | (flags & HAL_TXDESC_RTSENA ? AR_RTSCTSEnable : 0) 754 ; 755 ads->ds_ctl2 |= SM(rtsctsDuration, AR_RTSCTSDuration); 756 ads->ds_ctl3 |= (rtsctsRate << AR_RTSCTSRate_S); 757 } 758 return AH_TRUE; 759 #undef RTSCTS 760 } 761 762 HAL_BOOL 763 ar5212SetupXTxDesc(struct ath_hal *ah, struct ath_desc *ds, 764 u_int txRate1, u_int txTries1, 765 u_int txRate2, u_int txTries2, 766 u_int txRate3, u_int txTries3) 767 { 768 struct ar5212_desc *ads = AR5212DESC(ds); 769 770 if (txTries1) { 771 HALASSERT(isValidTxRate(txRate1)); 772 ads->ds_ctl2 |= SM(txTries1, AR_XmitDataTries1) 773 | AR_DurUpdateEna 774 ; 775 ads->ds_ctl3 |= (txRate1 << AR_XmitRate1_S); 776 } 777 if (txTries2) { 778 HALASSERT(isValidTxRate(txRate2)); 779 ads->ds_ctl2 |= SM(txTries2, AR_XmitDataTries2) 780 | AR_DurUpdateEna 781 ; 782 ads->ds_ctl3 |= (txRate2 << AR_XmitRate2_S); 783 } 784 if (txTries3) { 785 HALASSERT(isValidTxRate(txRate3)); 786 ads->ds_ctl2 |= SM(txTries3, AR_XmitDataTries3) 787 | AR_DurUpdateEna 788 ; 789 ads->ds_ctl3 |= (txRate3 << AR_XmitRate3_S); 790 } 791 return AH_TRUE; 792 } 793 794 void 795 ar5212IntrReqTxDesc(struct ath_hal *ah, struct ath_desc *ds) 796 { 797 struct ar5212_desc *ads = AR5212DESC(ds); 798 799 #ifdef AH_NEED_DESC_SWAP 800 ads->ds_ctl0 |= __bswap32(AR_TxInterReq); 801 #else 802 ads->ds_ctl0 |= AR_TxInterReq; 803 #endif 804 } 805 806 HAL_BOOL 807 ar5212FillTxDesc(struct ath_hal *ah, struct ath_desc *ds, 808 HAL_DMA_ADDR *bufAddrList, uint32_t *segLenList, u_int qcuId, 809 u_int descId, HAL_BOOL firstSeg, HAL_BOOL lastSeg, 810 const struct ath_desc *ds0) 811 { 812 struct ar5212_desc *ads = AR5212DESC(ds); 813 uint32_t segLen = segLenList[0]; 814 815 HALASSERT((segLen &~ AR_BufLen) == 0); 816 817 ds->ds_data = bufAddrList[0]; 818 819 if (firstSeg) { 820 /* 821 * First descriptor, don't clobber xmit control data 822 * setup by ar5212SetupTxDesc. 823 */ 824 ads->ds_ctl1 |= segLen | (lastSeg ? 0 : AR_More); 825 } else if (lastSeg) { /* !firstSeg && lastSeg */ 826 /* 827 * Last descriptor in a multi-descriptor frame, 828 * copy the multi-rate transmit parameters from 829 * the first frame for processing on completion. 830 */ 831 ads->ds_ctl1 = segLen; 832 #ifdef AH_NEED_DESC_SWAP 833 ads->ds_ctl0 = __bswap32(AR5212DESC_CONST(ds0)->ds_ctl0) 834 & AR_TxInterReq; 835 ads->ds_ctl2 = __bswap32(AR5212DESC_CONST(ds0)->ds_ctl2); 836 ads->ds_ctl3 = __bswap32(AR5212DESC_CONST(ds0)->ds_ctl3); 837 #else 838 ads->ds_ctl0 = AR5212DESC_CONST(ds0)->ds_ctl0 & AR_TxInterReq; 839 ads->ds_ctl2 = AR5212DESC_CONST(ds0)->ds_ctl2; 840 ads->ds_ctl3 = AR5212DESC_CONST(ds0)->ds_ctl3; 841 #endif 842 } else { /* !firstSeg && !lastSeg */ 843 /* 844 * Intermediate descriptor in a multi-descriptor frame. 845 */ 846 #ifdef AH_NEED_DESC_SWAP 847 ads->ds_ctl0 = __bswap32(AR5212DESC_CONST(ds0)->ds_ctl0) 848 & AR_TxInterReq; 849 #else 850 ads->ds_ctl0 = AR5212DESC_CONST(ds0)->ds_ctl0 & AR_TxInterReq; 851 #endif 852 ads->ds_ctl1 = segLen | AR_More; 853 ads->ds_ctl2 = 0; 854 ads->ds_ctl3 = 0; 855 } 856 ads->ds_txstatus0 = ads->ds_txstatus1 = 0; 857 return AH_TRUE; 858 } 859 860 #ifdef AH_NEED_DESC_SWAP 861 /* Swap transmit descriptor */ 862 static __inline void 863 ar5212SwapTxDesc(struct ath_desc *ds) 864 { 865 ds->ds_data = __bswap32(ds->ds_data); 866 ds->ds_ctl0 = __bswap32(ds->ds_ctl0); 867 ds->ds_ctl1 = __bswap32(ds->ds_ctl1); 868 ds->ds_hw[0] = __bswap32(ds->ds_hw[0]); 869 ds->ds_hw[1] = __bswap32(ds->ds_hw[1]); 870 ds->ds_hw[2] = __bswap32(ds->ds_hw[2]); 871 ds->ds_hw[3] = __bswap32(ds->ds_hw[3]); 872 } 873 #endif 874 875 /* 876 * Processing of HW TX descriptor. 877 */ 878 HAL_STATUS 879 ar5212ProcTxDesc(struct ath_hal *ah, 880 struct ath_desc *ds, struct ath_tx_status *ts) 881 { 882 struct ar5212_desc *ads = AR5212DESC(ds); 883 884 #ifdef AH_NEED_DESC_SWAP 885 if ((ads->ds_txstatus1 & __bswap32(AR_Done)) == 0) 886 return HAL_EINPROGRESS; 887 888 ar5212SwapTxDesc(ds); 889 #else 890 if ((ads->ds_txstatus1 & AR_Done) == 0) 891 return HAL_EINPROGRESS; 892 #endif 893 894 /* Update software copies of the HW status */ 895 ts->ts_seqnum = MS(ads->ds_txstatus1, AR_SeqNum); 896 ts->ts_tstamp = MS(ads->ds_txstatus0, AR_SendTimestamp); 897 ts->ts_status = 0; 898 if ((ads->ds_txstatus0 & AR_FrmXmitOK) == 0) { 899 if (ads->ds_txstatus0 & AR_ExcessiveRetries) 900 ts->ts_status |= HAL_TXERR_XRETRY; 901 if (ads->ds_txstatus0 & AR_Filtered) 902 ts->ts_status |= HAL_TXERR_FILT; 903 if (ads->ds_txstatus0 & AR_FIFOUnderrun) 904 ts->ts_status |= HAL_TXERR_FIFO; 905 } 906 /* 907 * Extract the transmit rate used and mark the rate as 908 * ``alternate'' if it wasn't the series 0 rate. 909 */ 910 ts->ts_finaltsi = MS(ads->ds_txstatus1, AR_FinalTSIndex); 911 switch (ts->ts_finaltsi) { 912 case 0: 913 ts->ts_rate = MS(ads->ds_ctl3, AR_XmitRate0); 914 break; 915 case 1: 916 ts->ts_rate = MS(ads->ds_ctl3, AR_XmitRate1); 917 break; 918 case 2: 919 ts->ts_rate = MS(ads->ds_ctl3, AR_XmitRate2); 920 break; 921 case 3: 922 ts->ts_rate = MS(ads->ds_ctl3, AR_XmitRate3); 923 break; 924 } 925 ts->ts_rssi = MS(ads->ds_txstatus1, AR_AckSigStrength); 926 ts->ts_shortretry = MS(ads->ds_txstatus0, AR_RTSFailCnt); 927 ts->ts_longretry = MS(ads->ds_txstatus0, AR_DataFailCnt); 928 /* 929 * The retry count has the number of un-acked tries for the 930 * final series used. When doing multi-rate retry we must 931 * fixup the retry count by adding in the try counts for 932 * each series that was fully-processed. Beware that this 933 * takes values from the try counts in the final descriptor. 934 * These are not required by the hardware. We assume they 935 * are placed there by the driver as otherwise we have no 936 * access and the driver can't do the calculation because it 937 * doesn't know the descriptor format. 938 */ 939 switch (ts->ts_finaltsi) { 940 case 3: ts->ts_longretry += MS(ads->ds_ctl2, AR_XmitDataTries2); 941 case 2: ts->ts_longretry += MS(ads->ds_ctl2, AR_XmitDataTries1); 942 case 1: ts->ts_longretry += MS(ads->ds_ctl2, AR_XmitDataTries0); 943 } 944 ts->ts_virtcol = MS(ads->ds_txstatus0, AR_VirtCollCnt); 945 ts->ts_antenna = (ads->ds_txstatus1 & AR_XmitAtenna ? 2 : 1); 946 947 return HAL_OK; 948 } 949 950 /* 951 * Determine which tx queues need interrupt servicing. 952 */ 953 void 954 ar5212GetTxIntrQueue(struct ath_hal *ah, uint32_t *txqs) 955 { 956 struct ath_hal_5212 *ahp = AH5212(ah); 957 *txqs &= ahp->ah_intrTxqs; 958 ahp->ah_intrTxqs &= ~(*txqs); 959 } 960 961 /* 962 * Retrieve the rate table from the given TX completion descriptor 963 */ 964 HAL_BOOL 965 ar5212GetTxCompletionRates(struct ath_hal *ah, const struct ath_desc *ds0, int *rates, int *tries) 966 { 967 const struct ar5212_desc *ads = AR5212DESC_CONST(ds0); 968 969 rates[0] = MS(ads->ds_ctl3, AR_XmitRate0); 970 rates[1] = MS(ads->ds_ctl3, AR_XmitRate1); 971 rates[2] = MS(ads->ds_ctl3, AR_XmitRate2); 972 rates[3] = MS(ads->ds_ctl3, AR_XmitRate3); 973 974 tries[0] = MS(ads->ds_ctl2, AR_XmitDataTries0); 975 tries[1] = MS(ads->ds_ctl2, AR_XmitDataTries1); 976 tries[2] = MS(ads->ds_ctl2, AR_XmitDataTries2); 977 tries[3] = MS(ads->ds_ctl2, AR_XmitDataTries3); 978 979 return AH_TRUE; 980 } 981 982 void 983 ar5212SetTxDescLink(struct ath_hal *ah, void *ds, uint32_t link) 984 { 985 struct ar5212_desc *ads = AR5212DESC(ds); 986 987 ads->ds_link = link; 988 } 989 990 void 991 ar5212GetTxDescLink(struct ath_hal *ah, void *ds, uint32_t *link) 992 { 993 struct ar5212_desc *ads = AR5212DESC(ds); 994 995 *link = ads->ds_link; 996 } 997 998 void 999 ar5212GetTxDescLinkPtr(struct ath_hal *ah, void *ds, uint32_t **linkptr) 1000 { 1001 struct ar5212_desc *ads = AR5212DESC(ds); 1002 1003 *linkptr = &ads->ds_link; 1004 } 1005