1 /* 2 * Copyright (c) 2008-2009 Atheros Communications Inc. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #include <linux/nl80211.h> 18 #include "ath9k.h" 19 20 static char *dev_info = "ath9k"; 21 22 MODULE_AUTHOR("Atheros Communications"); 23 MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards."); 24 MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards"); 25 MODULE_LICENSE("Dual BSD/GPL"); 26 27 static int modparam_nohwcrypt; 28 module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444); 29 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption"); 30 31 /* We use the hw_value as an index into our private channel structure */ 32 33 #define CHAN2G(_freq, _idx) { \ 34 .center_freq = (_freq), \ 35 .hw_value = (_idx), \ 36 .max_power = 20, \ 37 } 38 39 #define CHAN5G(_freq, _idx) { \ 40 .band = IEEE80211_BAND_5GHZ, \ 41 .center_freq = (_freq), \ 42 .hw_value = (_idx), \ 43 .max_power = 20, \ 44 } 45 46 /* Some 2 GHz radios are actually tunable on 2312-2732 47 * on 5 MHz steps, we support the channels which we know 48 * we have calibration data for all cards though to make 49 * this static */ 50 static struct ieee80211_channel ath9k_2ghz_chantable[] = { 51 CHAN2G(2412, 0), /* Channel 1 */ 52 CHAN2G(2417, 1), /* Channel 2 */ 53 CHAN2G(2422, 2), /* Channel 3 */ 54 CHAN2G(2427, 3), /* Channel 4 */ 55 CHAN2G(2432, 4), /* Channel 5 */ 56 CHAN2G(2437, 5), /* Channel 6 */ 57 CHAN2G(2442, 6), /* Channel 7 */ 58 CHAN2G(2447, 7), /* Channel 8 */ 59 CHAN2G(2452, 8), /* Channel 9 */ 60 CHAN2G(2457, 9), /* Channel 10 */ 61 CHAN2G(2462, 10), /* Channel 11 */ 62 CHAN2G(2467, 11), /* Channel 12 */ 63 CHAN2G(2472, 12), /* Channel 13 */ 64 CHAN2G(2484, 13), /* Channel 14 */ 65 }; 66 67 /* Some 5 GHz radios are actually tunable on XXXX-YYYY 68 * on 5 MHz steps, we support the channels which we know 69 * we have calibration data for all cards though to make 70 * this static */ 71 static struct ieee80211_channel ath9k_5ghz_chantable[] = { 72 /* _We_ call this UNII 1 */ 73 CHAN5G(5180, 14), /* Channel 36 */ 74 CHAN5G(5200, 15), /* Channel 40 */ 75 CHAN5G(5220, 16), /* Channel 44 */ 76 CHAN5G(5240, 17), /* Channel 48 */ 77 /* _We_ call this UNII 2 */ 78 CHAN5G(5260, 18), /* Channel 52 */ 79 CHAN5G(5280, 19), /* Channel 56 */ 80 CHAN5G(5300, 20), /* Channel 60 */ 81 CHAN5G(5320, 21), /* Channel 64 */ 82 /* _We_ call this "Middle band" */ 83 CHAN5G(5500, 22), /* Channel 100 */ 84 CHAN5G(5520, 23), /* Channel 104 */ 85 CHAN5G(5540, 24), /* Channel 108 */ 86 CHAN5G(5560, 25), /* Channel 112 */ 87 CHAN5G(5580, 26), /* Channel 116 */ 88 CHAN5G(5600, 27), /* Channel 120 */ 89 CHAN5G(5620, 28), /* Channel 124 */ 90 CHAN5G(5640, 29), /* Channel 128 */ 91 CHAN5G(5660, 30), /* Channel 132 */ 92 CHAN5G(5680, 31), /* Channel 136 */ 93 CHAN5G(5700, 32), /* Channel 140 */ 94 /* _We_ call this UNII 3 */ 95 CHAN5G(5745, 33), /* Channel 149 */ 96 CHAN5G(5765, 34), /* Channel 153 */ 97 CHAN5G(5785, 35), /* Channel 157 */ 98 CHAN5G(5805, 36), /* Channel 161 */ 99 CHAN5G(5825, 37), /* Channel 165 */ 100 }; 101 102 static void ath_cache_conf_rate(struct ath_softc *sc, 103 struct ieee80211_conf *conf) 104 { 105 switch (conf->channel->band) { 106 case IEEE80211_BAND_2GHZ: 107 if (conf_is_ht20(conf)) 108 sc->cur_rate_table = 109 sc->hw_rate_table[ATH9K_MODE_11NG_HT20]; 110 else if (conf_is_ht40_minus(conf)) 111 sc->cur_rate_table = 112 sc->hw_rate_table[ATH9K_MODE_11NG_HT40MINUS]; 113 else if (conf_is_ht40_plus(conf)) 114 sc->cur_rate_table = 115 sc->hw_rate_table[ATH9K_MODE_11NG_HT40PLUS]; 116 else 117 sc->cur_rate_table = 118 sc->hw_rate_table[ATH9K_MODE_11G]; 119 break; 120 case IEEE80211_BAND_5GHZ: 121 if (conf_is_ht20(conf)) 122 sc->cur_rate_table = 123 sc->hw_rate_table[ATH9K_MODE_11NA_HT20]; 124 else if (conf_is_ht40_minus(conf)) 125 sc->cur_rate_table = 126 sc->hw_rate_table[ATH9K_MODE_11NA_HT40MINUS]; 127 else if (conf_is_ht40_plus(conf)) 128 sc->cur_rate_table = 129 sc->hw_rate_table[ATH9K_MODE_11NA_HT40PLUS]; 130 else 131 sc->cur_rate_table = 132 sc->hw_rate_table[ATH9K_MODE_11A]; 133 break; 134 default: 135 BUG_ON(1); 136 break; 137 } 138 } 139 140 static void ath_update_txpow(struct ath_softc *sc) 141 { 142 struct ath_hw *ah = sc->sc_ah; 143 u32 txpow; 144 145 if (sc->curtxpow != sc->config.txpowlimit) { 146 ath9k_hw_set_txpowerlimit(ah, sc->config.txpowlimit); 147 /* read back in case value is clamped */ 148 ath9k_hw_getcapability(ah, ATH9K_CAP_TXPOW, 1, &txpow); 149 sc->curtxpow = txpow; 150 } 151 } 152 153 static u8 parse_mpdudensity(u8 mpdudensity) 154 { 155 /* 156 * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing": 157 * 0 for no restriction 158 * 1 for 1/4 us 159 * 2 for 1/2 us 160 * 3 for 1 us 161 * 4 for 2 us 162 * 5 for 4 us 163 * 6 for 8 us 164 * 7 for 16 us 165 */ 166 switch (mpdudensity) { 167 case 0: 168 return 0; 169 case 1: 170 case 2: 171 case 3: 172 /* Our lower layer calculations limit our precision to 173 1 microsecond */ 174 return 1; 175 case 4: 176 return 2; 177 case 5: 178 return 4; 179 case 6: 180 return 8; 181 case 7: 182 return 16; 183 default: 184 return 0; 185 } 186 } 187 188 static void ath_setup_rates(struct ath_softc *sc, enum ieee80211_band band) 189 { 190 const struct ath_rate_table *rate_table = NULL; 191 struct ieee80211_supported_band *sband; 192 struct ieee80211_rate *rate; 193 int i, maxrates; 194 195 switch (band) { 196 case IEEE80211_BAND_2GHZ: 197 rate_table = sc->hw_rate_table[ATH9K_MODE_11G]; 198 break; 199 case IEEE80211_BAND_5GHZ: 200 rate_table = sc->hw_rate_table[ATH9K_MODE_11A]; 201 break; 202 default: 203 break; 204 } 205 206 if (rate_table == NULL) 207 return; 208 209 sband = &sc->sbands[band]; 210 rate = sc->rates[band]; 211 212 if (rate_table->rate_cnt > ATH_RATE_MAX) 213 maxrates = ATH_RATE_MAX; 214 else 215 maxrates = rate_table->rate_cnt; 216 217 for (i = 0; i < maxrates; i++) { 218 rate[i].bitrate = rate_table->info[i].ratekbps / 100; 219 rate[i].hw_value = rate_table->info[i].ratecode; 220 if (rate_table->info[i].short_preamble) { 221 rate[i].hw_value_short = rate_table->info[i].ratecode | 222 rate_table->info[i].short_preamble; 223 rate[i].flags = IEEE80211_RATE_SHORT_PREAMBLE; 224 } 225 sband->n_bitrates++; 226 227 DPRINTF(sc, ATH_DBG_CONFIG, "Rate: %2dMbps, ratecode: %2d\n", 228 rate[i].bitrate / 10, rate[i].hw_value); 229 } 230 } 231 232 static struct ath9k_channel *ath_get_curchannel(struct ath_softc *sc, 233 struct ieee80211_hw *hw) 234 { 235 struct ieee80211_channel *curchan = hw->conf.channel; 236 struct ath9k_channel *channel; 237 u8 chan_idx; 238 239 chan_idx = curchan->hw_value; 240 channel = &sc->sc_ah->channels[chan_idx]; 241 ath9k_update_ichannel(sc, hw, channel); 242 return channel; 243 } 244 245 /* 246 * Set/change channels. If the channel is really being changed, it's done 247 * by reseting the chip. To accomplish this we must first cleanup any pending 248 * DMA, then restart stuff. 249 */ 250 int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw, 251 struct ath9k_channel *hchan) 252 { 253 struct ath_hw *ah = sc->sc_ah; 254 bool fastcc = true, stopped; 255 struct ieee80211_channel *channel = hw->conf.channel; 256 int r; 257 258 if (sc->sc_flags & SC_OP_INVALID) 259 return -EIO; 260 261 ath9k_ps_wakeup(sc); 262 263 /* 264 * This is only performed if the channel settings have 265 * actually changed. 266 * 267 * To switch channels clear any pending DMA operations; 268 * wait long enough for the RX fifo to drain, reset the 269 * hardware at the new frequency, and then re-enable 270 * the relevant bits of the h/w. 271 */ 272 ath9k_hw_set_interrupts(ah, 0); 273 ath_drain_all_txq(sc, false); 274 stopped = ath_stoprecv(sc); 275 276 /* XXX: do not flush receive queue here. We don't want 277 * to flush data frames already in queue because of 278 * changing channel. */ 279 280 if (!stopped || (sc->sc_flags & SC_OP_FULL_RESET)) 281 fastcc = false; 282 283 DPRINTF(sc, ATH_DBG_CONFIG, 284 "(%u MHz) -> (%u MHz), chanwidth: %d\n", 285 sc->sc_ah->curchan->channel, 286 channel->center_freq, sc->tx_chan_width); 287 288 spin_lock_bh(&sc->sc_resetlock); 289 290 r = ath9k_hw_reset(ah, hchan, fastcc); 291 if (r) { 292 DPRINTF(sc, ATH_DBG_FATAL, 293 "Unable to reset channel (%u Mhz) " 294 "reset status %d\n", 295 channel->center_freq, r); 296 spin_unlock_bh(&sc->sc_resetlock); 297 goto ps_restore; 298 } 299 spin_unlock_bh(&sc->sc_resetlock); 300 301 sc->sc_flags &= ~SC_OP_FULL_RESET; 302 303 if (ath_startrecv(sc) != 0) { 304 DPRINTF(sc, ATH_DBG_FATAL, 305 "Unable to restart recv logic\n"); 306 r = -EIO; 307 goto ps_restore; 308 } 309 310 ath_cache_conf_rate(sc, &hw->conf); 311 ath_update_txpow(sc); 312 ath9k_hw_set_interrupts(ah, sc->imask); 313 314 ps_restore: 315 ath9k_ps_restore(sc); 316 return r; 317 } 318 319 /* 320 * This routine performs the periodic noise floor calibration function 321 * that is used to adjust and optimize the chip performance. This 322 * takes environmental changes (location, temperature) into account. 323 * When the task is complete, it reschedules itself depending on the 324 * appropriate interval that was calculated. 325 */ 326 static void ath_ani_calibrate(unsigned long data) 327 { 328 struct ath_softc *sc = (struct ath_softc *)data; 329 struct ath_hw *ah = sc->sc_ah; 330 bool longcal = false; 331 bool shortcal = false; 332 bool aniflag = false; 333 unsigned int timestamp = jiffies_to_msecs(jiffies); 334 u32 cal_interval, short_cal_interval; 335 336 short_cal_interval = (ah->opmode == NL80211_IFTYPE_AP) ? 337 ATH_AP_SHORT_CALINTERVAL : ATH_STA_SHORT_CALINTERVAL; 338 339 /* 340 * don't calibrate when we're scanning. 341 * we are most likely not on our home channel. 342 */ 343 spin_lock(&sc->ani_lock); 344 if (sc->sc_flags & SC_OP_SCANNING) 345 goto set_timer; 346 347 /* Only calibrate if awake */ 348 if (sc->sc_ah->power_mode != ATH9K_PM_AWAKE) 349 goto set_timer; 350 351 ath9k_ps_wakeup(sc); 352 353 /* Long calibration runs independently of short calibration. */ 354 if ((timestamp - sc->ani.longcal_timer) >= ATH_LONG_CALINTERVAL) { 355 longcal = true; 356 DPRINTF(sc, ATH_DBG_ANI, "longcal @%lu\n", jiffies); 357 sc->ani.longcal_timer = timestamp; 358 } 359 360 /* Short calibration applies only while caldone is false */ 361 if (!sc->ani.caldone) { 362 if ((timestamp - sc->ani.shortcal_timer) >= short_cal_interval) { 363 shortcal = true; 364 DPRINTF(sc, ATH_DBG_ANI, "shortcal @%lu\n", jiffies); 365 sc->ani.shortcal_timer = timestamp; 366 sc->ani.resetcal_timer = timestamp; 367 } 368 } else { 369 if ((timestamp - sc->ani.resetcal_timer) >= 370 ATH_RESTART_CALINTERVAL) { 371 sc->ani.caldone = ath9k_hw_reset_calvalid(ah); 372 if (sc->ani.caldone) 373 sc->ani.resetcal_timer = timestamp; 374 } 375 } 376 377 /* Verify whether we must check ANI */ 378 if ((timestamp - sc->ani.checkani_timer) >= ATH_ANI_POLLINTERVAL) { 379 aniflag = true; 380 sc->ani.checkani_timer = timestamp; 381 } 382 383 /* Skip all processing if there's nothing to do. */ 384 if (longcal || shortcal || aniflag) { 385 /* Call ANI routine if necessary */ 386 if (aniflag) 387 ath9k_hw_ani_monitor(ah, ah->curchan); 388 389 /* Perform calibration if necessary */ 390 if (longcal || shortcal) { 391 sc->ani.caldone = ath9k_hw_calibrate(ah, ah->curchan, 392 sc->rx_chainmask, longcal); 393 394 if (longcal) 395 sc->ani.noise_floor = ath9k_hw_getchan_noise(ah, 396 ah->curchan); 397 398 DPRINTF(sc, ATH_DBG_ANI," calibrate chan %u/%x nf: %d\n", 399 ah->curchan->channel, ah->curchan->channelFlags, 400 sc->ani.noise_floor); 401 } 402 } 403 404 ath9k_ps_restore(sc); 405 406 set_timer: 407 spin_unlock(&sc->ani_lock); 408 /* 409 * Set timer interval based on previous results. 410 * The interval must be the shortest necessary to satisfy ANI, 411 * short calibration and long calibration. 412 */ 413 cal_interval = ATH_LONG_CALINTERVAL; 414 if (sc->sc_ah->config.enable_ani) 415 cal_interval = min(cal_interval, (u32)ATH_ANI_POLLINTERVAL); 416 if (!sc->ani.caldone) 417 cal_interval = min(cal_interval, (u32)short_cal_interval); 418 419 mod_timer(&sc->ani.timer, jiffies + msecs_to_jiffies(cal_interval)); 420 } 421 422 static void ath_start_ani(struct ath_softc *sc) 423 { 424 unsigned long timestamp = jiffies_to_msecs(jiffies); 425 426 sc->ani.longcal_timer = timestamp; 427 sc->ani.shortcal_timer = timestamp; 428 sc->ani.checkani_timer = timestamp; 429 430 mod_timer(&sc->ani.timer, 431 jiffies + msecs_to_jiffies(ATH_ANI_POLLINTERVAL)); 432 } 433 434 /* 435 * Update tx/rx chainmask. For legacy association, 436 * hard code chainmask to 1x1, for 11n association, use 437 * the chainmask configuration, for bt coexistence, use 438 * the chainmask configuration even in legacy mode. 439 */ 440 void ath_update_chainmask(struct ath_softc *sc, int is_ht) 441 { 442 if ((sc->sc_flags & SC_OP_SCANNING) || is_ht || 443 (sc->btcoex_info.btcoex_scheme != ATH_BTCOEX_CFG_NONE)) { 444 sc->tx_chainmask = sc->sc_ah->caps.tx_chainmask; 445 sc->rx_chainmask = sc->sc_ah->caps.rx_chainmask; 446 } else { 447 sc->tx_chainmask = 1; 448 sc->rx_chainmask = 1; 449 } 450 451 DPRINTF(sc, ATH_DBG_CONFIG, "tx chmask: %d, rx chmask: %d\n", 452 sc->tx_chainmask, sc->rx_chainmask); 453 } 454 455 static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta) 456 { 457 struct ath_node *an; 458 459 an = (struct ath_node *)sta->drv_priv; 460 461 if (sc->sc_flags & SC_OP_TXAGGR) { 462 ath_tx_node_init(sc, an); 463 an->maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR + 464 sta->ht_cap.ampdu_factor); 465 an->mpdudensity = parse_mpdudensity(sta->ht_cap.ampdu_density); 466 an->last_rssi = ATH_RSSI_DUMMY_MARKER; 467 } 468 } 469 470 static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta) 471 { 472 struct ath_node *an = (struct ath_node *)sta->drv_priv; 473 474 if (sc->sc_flags & SC_OP_TXAGGR) 475 ath_tx_node_cleanup(sc, an); 476 } 477 478 static void ath9k_tasklet(unsigned long data) 479 { 480 struct ath_softc *sc = (struct ath_softc *)data; 481 u32 status = sc->intrstatus; 482 483 ath9k_ps_wakeup(sc); 484 485 if (status & ATH9K_INT_FATAL) { 486 ath_reset(sc, false); 487 ath9k_ps_restore(sc); 488 return; 489 } 490 491 if (status & (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN)) { 492 spin_lock_bh(&sc->rx.rxflushlock); 493 ath_rx_tasklet(sc, 0); 494 spin_unlock_bh(&sc->rx.rxflushlock); 495 } 496 497 if (status & ATH9K_INT_TX) 498 ath_tx_tasklet(sc); 499 500 if ((status & ATH9K_INT_TSFOOR) && sc->ps_enabled) { 501 /* 502 * TSF sync does not look correct; remain awake to sync with 503 * the next Beacon. 504 */ 505 DPRINTF(sc, ATH_DBG_PS, "TSFOOR - Sync with next Beacon\n"); 506 sc->sc_flags |= SC_OP_WAIT_FOR_BEACON | SC_OP_BEACON_SYNC; 507 } 508 509 if (sc->btcoex_info.btcoex_scheme == ATH_BTCOEX_CFG_3WIRE) 510 if (status & ATH9K_INT_GENTIMER) 511 ath_gen_timer_isr(sc->sc_ah); 512 513 /* re-enable hardware interrupt */ 514 ath9k_hw_set_interrupts(sc->sc_ah, sc->imask); 515 ath9k_ps_restore(sc); 516 } 517 518 irqreturn_t ath_isr(int irq, void *dev) 519 { 520 #define SCHED_INTR ( \ 521 ATH9K_INT_FATAL | \ 522 ATH9K_INT_RXORN | \ 523 ATH9K_INT_RXEOL | \ 524 ATH9K_INT_RX | \ 525 ATH9K_INT_TX | \ 526 ATH9K_INT_BMISS | \ 527 ATH9K_INT_CST | \ 528 ATH9K_INT_TSFOOR | \ 529 ATH9K_INT_GENTIMER) 530 531 struct ath_softc *sc = dev; 532 struct ath_hw *ah = sc->sc_ah; 533 enum ath9k_int status; 534 bool sched = false; 535 536 /* 537 * The hardware is not ready/present, don't 538 * touch anything. Note this can happen early 539 * on if the IRQ is shared. 540 */ 541 if (sc->sc_flags & SC_OP_INVALID) 542 return IRQ_NONE; 543 544 545 /* shared irq, not for us */ 546 547 if (!ath9k_hw_intrpend(ah)) 548 return IRQ_NONE; 549 550 /* 551 * Figure out the reason(s) for the interrupt. Note 552 * that the hal returns a pseudo-ISR that may include 553 * bits we haven't explicitly enabled so we mask the 554 * value to insure we only process bits we requested. 555 */ 556 ath9k_hw_getisr(ah, &status); /* NB: clears ISR too */ 557 status &= sc->imask; /* discard unasked-for bits */ 558 559 /* 560 * If there are no status bits set, then this interrupt was not 561 * for me (should have been caught above). 562 */ 563 if (!status) 564 return IRQ_NONE; 565 566 /* Cache the status */ 567 sc->intrstatus = status; 568 569 if (status & SCHED_INTR) 570 sched = true; 571 572 /* 573 * If a FATAL or RXORN interrupt is received, we have to reset the 574 * chip immediately. 575 */ 576 if (status & (ATH9K_INT_FATAL | ATH9K_INT_RXORN)) 577 goto chip_reset; 578 579 if (status & ATH9K_INT_SWBA) 580 tasklet_schedule(&sc->bcon_tasklet); 581 582 if (status & ATH9K_INT_TXURN) 583 ath9k_hw_updatetxtriglevel(ah, true); 584 585 if (status & ATH9K_INT_MIB) { 586 /* 587 * Disable interrupts until we service the MIB 588 * interrupt; otherwise it will continue to 589 * fire. 590 */ 591 ath9k_hw_set_interrupts(ah, 0); 592 /* 593 * Let the hal handle the event. We assume 594 * it will clear whatever condition caused 595 * the interrupt. 596 */ 597 ath9k_hw_procmibevent(ah); 598 ath9k_hw_set_interrupts(ah, sc->imask); 599 } 600 601 if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) 602 if (status & ATH9K_INT_TIM_TIMER) { 603 /* Clear RxAbort bit so that we can 604 * receive frames */ 605 ath9k_hw_setpower(ah, ATH9K_PM_AWAKE); 606 ath9k_hw_setrxabort(sc->sc_ah, 0); 607 sc->sc_flags |= SC_OP_WAIT_FOR_BEACON; 608 } 609 610 chip_reset: 611 612 ath_debug_stat_interrupt(sc, status); 613 614 if (sched) { 615 /* turn off every interrupt except SWBA */ 616 ath9k_hw_set_interrupts(ah, (sc->imask & ATH9K_INT_SWBA)); 617 tasklet_schedule(&sc->intr_tq); 618 } 619 620 return IRQ_HANDLED; 621 622 #undef SCHED_INTR 623 } 624 625 static u32 ath_get_extchanmode(struct ath_softc *sc, 626 struct ieee80211_channel *chan, 627 enum nl80211_channel_type channel_type) 628 { 629 u32 chanmode = 0; 630 631 switch (chan->band) { 632 case IEEE80211_BAND_2GHZ: 633 switch(channel_type) { 634 case NL80211_CHAN_NO_HT: 635 case NL80211_CHAN_HT20: 636 chanmode = CHANNEL_G_HT20; 637 break; 638 case NL80211_CHAN_HT40PLUS: 639 chanmode = CHANNEL_G_HT40PLUS; 640 break; 641 case NL80211_CHAN_HT40MINUS: 642 chanmode = CHANNEL_G_HT40MINUS; 643 break; 644 } 645 break; 646 case IEEE80211_BAND_5GHZ: 647 switch(channel_type) { 648 case NL80211_CHAN_NO_HT: 649 case NL80211_CHAN_HT20: 650 chanmode = CHANNEL_A_HT20; 651 break; 652 case NL80211_CHAN_HT40PLUS: 653 chanmode = CHANNEL_A_HT40PLUS; 654 break; 655 case NL80211_CHAN_HT40MINUS: 656 chanmode = CHANNEL_A_HT40MINUS; 657 break; 658 } 659 break; 660 default: 661 break; 662 } 663 664 return chanmode; 665 } 666 667 static int ath_setkey_tkip(struct ath_softc *sc, u16 keyix, const u8 *key, 668 struct ath9k_keyval *hk, const u8 *addr, 669 bool authenticator) 670 { 671 const u8 *key_rxmic; 672 const u8 *key_txmic; 673 674 key_txmic = key + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY; 675 key_rxmic = key + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY; 676 677 if (addr == NULL) { 678 /* 679 * Group key installation - only two key cache entries are used 680 * regardless of splitmic capability since group key is only 681 * used either for TX or RX. 682 */ 683 if (authenticator) { 684 memcpy(hk->kv_mic, key_txmic, sizeof(hk->kv_mic)); 685 memcpy(hk->kv_txmic, key_txmic, sizeof(hk->kv_mic)); 686 } else { 687 memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic)); 688 memcpy(hk->kv_txmic, key_rxmic, sizeof(hk->kv_mic)); 689 } 690 return ath9k_hw_set_keycache_entry(sc->sc_ah, keyix, hk, addr); 691 } 692 if (!sc->splitmic) { 693 /* TX and RX keys share the same key cache entry. */ 694 memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic)); 695 memcpy(hk->kv_txmic, key_txmic, sizeof(hk->kv_txmic)); 696 return ath9k_hw_set_keycache_entry(sc->sc_ah, keyix, hk, addr); 697 } 698 699 /* Separate key cache entries for TX and RX */ 700 701 /* TX key goes at first index, RX key at +32. */ 702 memcpy(hk->kv_mic, key_txmic, sizeof(hk->kv_mic)); 703 if (!ath9k_hw_set_keycache_entry(sc->sc_ah, keyix, hk, NULL)) { 704 /* TX MIC entry failed. No need to proceed further */ 705 DPRINTF(sc, ATH_DBG_FATAL, 706 "Setting TX MIC Key Failed\n"); 707 return 0; 708 } 709 710 memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic)); 711 /* XXX delete tx key on failure? */ 712 return ath9k_hw_set_keycache_entry(sc->sc_ah, keyix + 32, hk, addr); 713 } 714 715 static int ath_reserve_key_cache_slot_tkip(struct ath_softc *sc) 716 { 717 int i; 718 719 for (i = IEEE80211_WEP_NKID; i < sc->keymax / 2; i++) { 720 if (test_bit(i, sc->keymap) || 721 test_bit(i + 64, sc->keymap)) 722 continue; /* At least one part of TKIP key allocated */ 723 if (sc->splitmic && 724 (test_bit(i + 32, sc->keymap) || 725 test_bit(i + 64 + 32, sc->keymap))) 726 continue; /* At least one part of TKIP key allocated */ 727 728 /* Found a free slot for a TKIP key */ 729 return i; 730 } 731 return -1; 732 } 733 734 static int ath_reserve_key_cache_slot(struct ath_softc *sc) 735 { 736 int i; 737 738 /* First, try to find slots that would not be available for TKIP. */ 739 if (sc->splitmic) { 740 for (i = IEEE80211_WEP_NKID; i < sc->keymax / 4; i++) { 741 if (!test_bit(i, sc->keymap) && 742 (test_bit(i + 32, sc->keymap) || 743 test_bit(i + 64, sc->keymap) || 744 test_bit(i + 64 + 32, sc->keymap))) 745 return i; 746 if (!test_bit(i + 32, sc->keymap) && 747 (test_bit(i, sc->keymap) || 748 test_bit(i + 64, sc->keymap) || 749 test_bit(i + 64 + 32, sc->keymap))) 750 return i + 32; 751 if (!test_bit(i + 64, sc->keymap) && 752 (test_bit(i , sc->keymap) || 753 test_bit(i + 32, sc->keymap) || 754 test_bit(i + 64 + 32, sc->keymap))) 755 return i + 64; 756 if (!test_bit(i + 64 + 32, sc->keymap) && 757 (test_bit(i, sc->keymap) || 758 test_bit(i + 32, sc->keymap) || 759 test_bit(i + 64, sc->keymap))) 760 return i + 64 + 32; 761 } 762 } else { 763 for (i = IEEE80211_WEP_NKID; i < sc->keymax / 2; i++) { 764 if (!test_bit(i, sc->keymap) && 765 test_bit(i + 64, sc->keymap)) 766 return i; 767 if (test_bit(i, sc->keymap) && 768 !test_bit(i + 64, sc->keymap)) 769 return i + 64; 770 } 771 } 772 773 /* No partially used TKIP slots, pick any available slot */ 774 for (i = IEEE80211_WEP_NKID; i < sc->keymax; i++) { 775 /* Do not allow slots that could be needed for TKIP group keys 776 * to be used. This limitation could be removed if we know that 777 * TKIP will not be used. */ 778 if (i >= 64 && i < 64 + IEEE80211_WEP_NKID) 779 continue; 780 if (sc->splitmic) { 781 if (i >= 32 && i < 32 + IEEE80211_WEP_NKID) 782 continue; 783 if (i >= 64 + 32 && i < 64 + 32 + IEEE80211_WEP_NKID) 784 continue; 785 } 786 787 if (!test_bit(i, sc->keymap)) 788 return i; /* Found a free slot for a key */ 789 } 790 791 /* No free slot found */ 792 return -1; 793 } 794 795 static int ath_key_config(struct ath_softc *sc, 796 struct ieee80211_vif *vif, 797 struct ieee80211_sta *sta, 798 struct ieee80211_key_conf *key) 799 { 800 struct ath9k_keyval hk; 801 const u8 *mac = NULL; 802 int ret = 0; 803 int idx; 804 805 memset(&hk, 0, sizeof(hk)); 806 807 switch (key->alg) { 808 case ALG_WEP: 809 hk.kv_type = ATH9K_CIPHER_WEP; 810 break; 811 case ALG_TKIP: 812 hk.kv_type = ATH9K_CIPHER_TKIP; 813 break; 814 case ALG_CCMP: 815 hk.kv_type = ATH9K_CIPHER_AES_CCM; 816 break; 817 default: 818 return -EOPNOTSUPP; 819 } 820 821 hk.kv_len = key->keylen; 822 memcpy(hk.kv_val, key->key, key->keylen); 823 824 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) { 825 /* For now, use the default keys for broadcast keys. This may 826 * need to change with virtual interfaces. */ 827 idx = key->keyidx; 828 } else if (key->keyidx) { 829 if (WARN_ON(!sta)) 830 return -EOPNOTSUPP; 831 mac = sta->addr; 832 833 if (vif->type != NL80211_IFTYPE_AP) { 834 /* Only keyidx 0 should be used with unicast key, but 835 * allow this for client mode for now. */ 836 idx = key->keyidx; 837 } else 838 return -EIO; 839 } else { 840 if (WARN_ON(!sta)) 841 return -EOPNOTSUPP; 842 mac = sta->addr; 843 844 if (key->alg == ALG_TKIP) 845 idx = ath_reserve_key_cache_slot_tkip(sc); 846 else 847 idx = ath_reserve_key_cache_slot(sc); 848 if (idx < 0) 849 return -ENOSPC; /* no free key cache entries */ 850 } 851 852 if (key->alg == ALG_TKIP) 853 ret = ath_setkey_tkip(sc, idx, key->key, &hk, mac, 854 vif->type == NL80211_IFTYPE_AP); 855 else 856 ret = ath9k_hw_set_keycache_entry(sc->sc_ah, idx, &hk, mac); 857 858 if (!ret) 859 return -EIO; 860 861 set_bit(idx, sc->keymap); 862 if (key->alg == ALG_TKIP) { 863 set_bit(idx + 64, sc->keymap); 864 if (sc->splitmic) { 865 set_bit(idx + 32, sc->keymap); 866 set_bit(idx + 64 + 32, sc->keymap); 867 } 868 } 869 870 return idx; 871 } 872 873 static void ath_key_delete(struct ath_softc *sc, struct ieee80211_key_conf *key) 874 { 875 ath9k_hw_keyreset(sc->sc_ah, key->hw_key_idx); 876 if (key->hw_key_idx < IEEE80211_WEP_NKID) 877 return; 878 879 clear_bit(key->hw_key_idx, sc->keymap); 880 if (key->alg != ALG_TKIP) 881 return; 882 883 clear_bit(key->hw_key_idx + 64, sc->keymap); 884 if (sc->splitmic) { 885 clear_bit(key->hw_key_idx + 32, sc->keymap); 886 clear_bit(key->hw_key_idx + 64 + 32, sc->keymap); 887 } 888 } 889 890 static void setup_ht_cap(struct ath_softc *sc, 891 struct ieee80211_sta_ht_cap *ht_info) 892 { 893 u8 tx_streams, rx_streams; 894 895 ht_info->ht_supported = true; 896 ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 | 897 IEEE80211_HT_CAP_SM_PS | 898 IEEE80211_HT_CAP_SGI_40 | 899 IEEE80211_HT_CAP_DSSSCCK40; 900 901 ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; 902 ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8; 903 904 /* set up supported mcs set */ 905 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs)); 906 tx_streams = !(sc->tx_chainmask & (sc->tx_chainmask - 1)) ? 1 : 2; 907 rx_streams = !(sc->rx_chainmask & (sc->rx_chainmask - 1)) ? 1 : 2; 908 909 if (tx_streams != rx_streams) { 910 DPRINTF(sc, ATH_DBG_CONFIG, "TX streams %d, RX streams: %d\n", 911 tx_streams, rx_streams); 912 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF; 913 ht_info->mcs.tx_params |= ((tx_streams - 1) << 914 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT); 915 } 916 917 ht_info->mcs.rx_mask[0] = 0xff; 918 if (rx_streams >= 2) 919 ht_info->mcs.rx_mask[1] = 0xff; 920 921 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED; 922 } 923 924 static void ath9k_bss_assoc_info(struct ath_softc *sc, 925 struct ieee80211_vif *vif, 926 struct ieee80211_bss_conf *bss_conf) 927 { 928 929 if (bss_conf->assoc) { 930 DPRINTF(sc, ATH_DBG_CONFIG, "Bss Info ASSOC %d, bssid: %pM\n", 931 bss_conf->aid, sc->curbssid); 932 933 /* New association, store aid */ 934 sc->curaid = bss_conf->aid; 935 ath9k_hw_write_associd(sc); 936 937 /* 938 * Request a re-configuration of Beacon related timers 939 * on the receipt of the first Beacon frame (i.e., 940 * after time sync with the AP). 941 */ 942 sc->sc_flags |= SC_OP_BEACON_SYNC; 943 944 /* Configure the beacon */ 945 ath_beacon_config(sc, vif); 946 947 /* Reset rssi stats */ 948 sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER; 949 950 ath_start_ani(sc); 951 } else { 952 DPRINTF(sc, ATH_DBG_CONFIG, "Bss Info DISASSOC\n"); 953 sc->curaid = 0; 954 /* Stop ANI */ 955 del_timer_sync(&sc->ani.timer); 956 } 957 } 958 959 /********************************/ 960 /* LED functions */ 961 /********************************/ 962 963 static void ath_led_blink_work(struct work_struct *work) 964 { 965 struct ath_softc *sc = container_of(work, struct ath_softc, 966 ath_led_blink_work.work); 967 968 if (!(sc->sc_flags & SC_OP_LED_ASSOCIATED)) 969 return; 970 971 if ((sc->led_on_duration == ATH_LED_ON_DURATION_IDLE) || 972 (sc->led_off_duration == ATH_LED_OFF_DURATION_IDLE)) 973 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0); 974 else 975 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 976 (sc->sc_flags & SC_OP_LED_ON) ? 1 : 0); 977 978 ieee80211_queue_delayed_work(sc->hw, 979 &sc->ath_led_blink_work, 980 (sc->sc_flags & SC_OP_LED_ON) ? 981 msecs_to_jiffies(sc->led_off_duration) : 982 msecs_to_jiffies(sc->led_on_duration)); 983 984 sc->led_on_duration = sc->led_on_cnt ? 985 max((ATH_LED_ON_DURATION_IDLE - sc->led_on_cnt), 25) : 986 ATH_LED_ON_DURATION_IDLE; 987 sc->led_off_duration = sc->led_off_cnt ? 988 max((ATH_LED_OFF_DURATION_IDLE - sc->led_off_cnt), 10) : 989 ATH_LED_OFF_DURATION_IDLE; 990 sc->led_on_cnt = sc->led_off_cnt = 0; 991 if (sc->sc_flags & SC_OP_LED_ON) 992 sc->sc_flags &= ~SC_OP_LED_ON; 993 else 994 sc->sc_flags |= SC_OP_LED_ON; 995 } 996 997 static void ath_led_brightness(struct led_classdev *led_cdev, 998 enum led_brightness brightness) 999 { 1000 struct ath_led *led = container_of(led_cdev, struct ath_led, led_cdev); 1001 struct ath_softc *sc = led->sc; 1002 1003 switch (brightness) { 1004 case LED_OFF: 1005 if (led->led_type == ATH_LED_ASSOC || 1006 led->led_type == ATH_LED_RADIO) { 1007 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1008 (led->led_type == ATH_LED_RADIO)); 1009 sc->sc_flags &= ~SC_OP_LED_ASSOCIATED; 1010 if (led->led_type == ATH_LED_RADIO) 1011 sc->sc_flags &= ~SC_OP_LED_ON; 1012 } else { 1013 sc->led_off_cnt++; 1014 } 1015 break; 1016 case LED_FULL: 1017 if (led->led_type == ATH_LED_ASSOC) { 1018 sc->sc_flags |= SC_OP_LED_ASSOCIATED; 1019 ieee80211_queue_delayed_work(sc->hw, 1020 &sc->ath_led_blink_work, 0); 1021 } else if (led->led_type == ATH_LED_RADIO) { 1022 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0); 1023 sc->sc_flags |= SC_OP_LED_ON; 1024 } else { 1025 sc->led_on_cnt++; 1026 } 1027 break; 1028 default: 1029 break; 1030 } 1031 } 1032 1033 static int ath_register_led(struct ath_softc *sc, struct ath_led *led, 1034 char *trigger) 1035 { 1036 int ret; 1037 1038 led->sc = sc; 1039 led->led_cdev.name = led->name; 1040 led->led_cdev.default_trigger = trigger; 1041 led->led_cdev.brightness_set = ath_led_brightness; 1042 1043 ret = led_classdev_register(wiphy_dev(sc->hw->wiphy), &led->led_cdev); 1044 if (ret) 1045 DPRINTF(sc, ATH_DBG_FATAL, 1046 "Failed to register led:%s", led->name); 1047 else 1048 led->registered = 1; 1049 return ret; 1050 } 1051 1052 static void ath_unregister_led(struct ath_led *led) 1053 { 1054 if (led->registered) { 1055 led_classdev_unregister(&led->led_cdev); 1056 led->registered = 0; 1057 } 1058 } 1059 1060 static void ath_deinit_leds(struct ath_softc *sc) 1061 { 1062 ath_unregister_led(&sc->assoc_led); 1063 sc->sc_flags &= ~SC_OP_LED_ASSOCIATED; 1064 ath_unregister_led(&sc->tx_led); 1065 ath_unregister_led(&sc->rx_led); 1066 ath_unregister_led(&sc->radio_led); 1067 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1); 1068 } 1069 1070 static void ath_init_leds(struct ath_softc *sc) 1071 { 1072 char *trigger; 1073 int ret; 1074 1075 if (AR_SREV_9287(sc->sc_ah)) 1076 sc->sc_ah->led_pin = ATH_LED_PIN_9287; 1077 else 1078 sc->sc_ah->led_pin = ATH_LED_PIN_DEF; 1079 1080 /* Configure gpio 1 for output */ 1081 ath9k_hw_cfg_output(sc->sc_ah, sc->sc_ah->led_pin, 1082 AR_GPIO_OUTPUT_MUX_AS_OUTPUT); 1083 /* LED off, active low */ 1084 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1); 1085 1086 INIT_DELAYED_WORK(&sc->ath_led_blink_work, ath_led_blink_work); 1087 1088 trigger = ieee80211_get_radio_led_name(sc->hw); 1089 snprintf(sc->radio_led.name, sizeof(sc->radio_led.name), 1090 "ath9k-%s::radio", wiphy_name(sc->hw->wiphy)); 1091 ret = ath_register_led(sc, &sc->radio_led, trigger); 1092 sc->radio_led.led_type = ATH_LED_RADIO; 1093 if (ret) 1094 goto fail; 1095 1096 trigger = ieee80211_get_assoc_led_name(sc->hw); 1097 snprintf(sc->assoc_led.name, sizeof(sc->assoc_led.name), 1098 "ath9k-%s::assoc", wiphy_name(sc->hw->wiphy)); 1099 ret = ath_register_led(sc, &sc->assoc_led, trigger); 1100 sc->assoc_led.led_type = ATH_LED_ASSOC; 1101 if (ret) 1102 goto fail; 1103 1104 trigger = ieee80211_get_tx_led_name(sc->hw); 1105 snprintf(sc->tx_led.name, sizeof(sc->tx_led.name), 1106 "ath9k-%s::tx", wiphy_name(sc->hw->wiphy)); 1107 ret = ath_register_led(sc, &sc->tx_led, trigger); 1108 sc->tx_led.led_type = ATH_LED_TX; 1109 if (ret) 1110 goto fail; 1111 1112 trigger = ieee80211_get_rx_led_name(sc->hw); 1113 snprintf(sc->rx_led.name, sizeof(sc->rx_led.name), 1114 "ath9k-%s::rx", wiphy_name(sc->hw->wiphy)); 1115 ret = ath_register_led(sc, &sc->rx_led, trigger); 1116 sc->rx_led.led_type = ATH_LED_RX; 1117 if (ret) 1118 goto fail; 1119 1120 return; 1121 1122 fail: 1123 cancel_delayed_work_sync(&sc->ath_led_blink_work); 1124 ath_deinit_leds(sc); 1125 } 1126 1127 void ath_radio_enable(struct ath_softc *sc) 1128 { 1129 struct ath_hw *ah = sc->sc_ah; 1130 struct ieee80211_channel *channel = sc->hw->conf.channel; 1131 int r; 1132 1133 ath9k_ps_wakeup(sc); 1134 ath9k_hw_configpcipowersave(ah, 0); 1135 1136 if (!ah->curchan) 1137 ah->curchan = ath_get_curchannel(sc, sc->hw); 1138 1139 spin_lock_bh(&sc->sc_resetlock); 1140 r = ath9k_hw_reset(ah, ah->curchan, false); 1141 if (r) { 1142 DPRINTF(sc, ATH_DBG_FATAL, 1143 "Unable to reset channel %u (%uMhz) ", 1144 "reset status %d\n", 1145 channel->center_freq, r); 1146 } 1147 spin_unlock_bh(&sc->sc_resetlock); 1148 1149 ath_update_txpow(sc); 1150 if (ath_startrecv(sc) != 0) { 1151 DPRINTF(sc, ATH_DBG_FATAL, 1152 "Unable to restart recv logic\n"); 1153 return; 1154 } 1155 1156 if (sc->sc_flags & SC_OP_BEACONS) 1157 ath_beacon_config(sc, NULL); /* restart beacons */ 1158 1159 /* Re-Enable interrupts */ 1160 ath9k_hw_set_interrupts(ah, sc->imask); 1161 1162 /* Enable LED */ 1163 ath9k_hw_cfg_output(ah, ah->led_pin, 1164 AR_GPIO_OUTPUT_MUX_AS_OUTPUT); 1165 ath9k_hw_set_gpio(ah, ah->led_pin, 0); 1166 1167 ieee80211_wake_queues(sc->hw); 1168 ath9k_ps_restore(sc); 1169 } 1170 1171 void ath_radio_disable(struct ath_softc *sc) 1172 { 1173 struct ath_hw *ah = sc->sc_ah; 1174 struct ieee80211_channel *channel = sc->hw->conf.channel; 1175 int r; 1176 1177 ath9k_ps_wakeup(sc); 1178 ieee80211_stop_queues(sc->hw); 1179 1180 /* Disable LED */ 1181 ath9k_hw_set_gpio(ah, ah->led_pin, 1); 1182 ath9k_hw_cfg_gpio_input(ah, ah->led_pin); 1183 1184 /* Disable interrupts */ 1185 ath9k_hw_set_interrupts(ah, 0); 1186 1187 ath_drain_all_txq(sc, false); /* clear pending tx frames */ 1188 ath_stoprecv(sc); /* turn off frame recv */ 1189 ath_flushrecv(sc); /* flush recv queue */ 1190 1191 if (!ah->curchan) 1192 ah->curchan = ath_get_curchannel(sc, sc->hw); 1193 1194 spin_lock_bh(&sc->sc_resetlock); 1195 r = ath9k_hw_reset(ah, ah->curchan, false); 1196 if (r) { 1197 DPRINTF(sc, ATH_DBG_FATAL, 1198 "Unable to reset channel %u (%uMhz) " 1199 "reset status %d\n", 1200 channel->center_freq, r); 1201 } 1202 spin_unlock_bh(&sc->sc_resetlock); 1203 1204 ath9k_hw_phy_disable(ah); 1205 ath9k_hw_configpcipowersave(ah, 1); 1206 ath9k_ps_restore(sc); 1207 ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP); 1208 } 1209 1210 /*******************/ 1211 /* Rfkill */ 1212 /*******************/ 1213 1214 static bool ath_is_rfkill_set(struct ath_softc *sc) 1215 { 1216 struct ath_hw *ah = sc->sc_ah; 1217 1218 return ath9k_hw_gpio_get(ah, ah->rfkill_gpio) == 1219 ah->rfkill_polarity; 1220 } 1221 1222 static void ath9k_rfkill_poll_state(struct ieee80211_hw *hw) 1223 { 1224 struct ath_wiphy *aphy = hw->priv; 1225 struct ath_softc *sc = aphy->sc; 1226 bool blocked = !!ath_is_rfkill_set(sc); 1227 1228 wiphy_rfkill_set_hw_state(hw->wiphy, blocked); 1229 1230 if (blocked) 1231 ath_radio_disable(sc); 1232 else 1233 ath_radio_enable(sc); 1234 } 1235 1236 static void ath_start_rfkill_poll(struct ath_softc *sc) 1237 { 1238 struct ath_hw *ah = sc->sc_ah; 1239 1240 if (ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT) 1241 wiphy_rfkill_start_polling(sc->hw->wiphy); 1242 } 1243 1244 void ath_cleanup(struct ath_softc *sc) 1245 { 1246 ath_detach(sc); 1247 free_irq(sc->irq, sc); 1248 ath_bus_cleanup(sc); 1249 kfree(sc->sec_wiphy); 1250 ieee80211_free_hw(sc->hw); 1251 } 1252 1253 void ath_detach(struct ath_softc *sc) 1254 { 1255 struct ieee80211_hw *hw = sc->hw; 1256 int i = 0; 1257 1258 ath9k_ps_wakeup(sc); 1259 1260 DPRINTF(sc, ATH_DBG_CONFIG, "Detach ATH hw\n"); 1261 1262 ath_deinit_leds(sc); 1263 1264 for (i = 0; i < sc->num_sec_wiphy; i++) { 1265 struct ath_wiphy *aphy = sc->sec_wiphy[i]; 1266 if (aphy == NULL) 1267 continue; 1268 sc->sec_wiphy[i] = NULL; 1269 ieee80211_unregister_hw(aphy->hw); 1270 ieee80211_free_hw(aphy->hw); 1271 } 1272 ieee80211_unregister_hw(hw); 1273 ath_rx_cleanup(sc); 1274 ath_tx_cleanup(sc); 1275 1276 tasklet_kill(&sc->intr_tq); 1277 tasklet_kill(&sc->bcon_tasklet); 1278 1279 if (!(sc->sc_flags & SC_OP_INVALID)) 1280 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE); 1281 1282 /* cleanup tx queues */ 1283 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) 1284 if (ATH_TXQ_SETUP(sc, i)) 1285 ath_tx_cleanupq(sc, &sc->tx.txq[i]); 1286 1287 if ((sc->btcoex_info.no_stomp_timer) && 1288 sc->btcoex_info.btcoex_scheme == ATH_BTCOEX_CFG_3WIRE) 1289 ath_gen_timer_free(sc->sc_ah, sc->btcoex_info.no_stomp_timer); 1290 1291 ath9k_hw_detach(sc->sc_ah); 1292 sc->sc_ah = NULL; 1293 ath9k_exit_debug(sc); 1294 } 1295 1296 static int ath9k_reg_notifier(struct wiphy *wiphy, 1297 struct regulatory_request *request) 1298 { 1299 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); 1300 struct ath_wiphy *aphy = hw->priv; 1301 struct ath_softc *sc = aphy->sc; 1302 struct ath_regulatory *reg = &sc->common.regulatory; 1303 1304 return ath_reg_notifier_apply(wiphy, request, reg); 1305 } 1306 1307 /* 1308 * Initialize and fill ath_softc, ath_sofct is the 1309 * "Software Carrier" struct. Historically it has existed 1310 * to allow the separation between hardware specific 1311 * variables (now in ath_hw) and driver specific variables. 1312 */ 1313 static int ath_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid) 1314 { 1315 struct ath_hw *ah = NULL; 1316 int r = 0, i; 1317 int csz = 0; 1318 1319 /* XXX: hardware will not be ready until ath_open() being called */ 1320 sc->sc_flags |= SC_OP_INVALID; 1321 1322 if (ath9k_init_debug(sc) < 0) 1323 printk(KERN_ERR "Unable to create debugfs files\n"); 1324 1325 spin_lock_init(&sc->wiphy_lock); 1326 spin_lock_init(&sc->sc_resetlock); 1327 spin_lock_init(&sc->sc_serial_rw); 1328 spin_lock_init(&sc->ani_lock); 1329 spin_lock_init(&sc->sc_pm_lock); 1330 mutex_init(&sc->mutex); 1331 tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc); 1332 tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet, 1333 (unsigned long)sc); 1334 1335 /* 1336 * Cache line size is used to size and align various 1337 * structures used to communicate with the hardware. 1338 */ 1339 ath_read_cachesize(sc, &csz); 1340 /* XXX assert csz is non-zero */ 1341 sc->common.cachelsz = csz << 2; /* convert to bytes */ 1342 1343 ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL); 1344 if (!ah) { 1345 r = -ENOMEM; 1346 goto bad_no_ah; 1347 } 1348 1349 ah->ah_sc = sc; 1350 ah->hw_version.devid = devid; 1351 ah->hw_version.subsysid = subsysid; 1352 sc->sc_ah = ah; 1353 1354 r = ath9k_hw_init(ah); 1355 if (r) { 1356 DPRINTF(sc, ATH_DBG_FATAL, 1357 "Unable to initialize hardware; " 1358 "initialization status: %d\n", r); 1359 goto bad; 1360 } 1361 1362 /* Get the hardware key cache size. */ 1363 sc->keymax = ah->caps.keycache_size; 1364 if (sc->keymax > ATH_KEYMAX) { 1365 DPRINTF(sc, ATH_DBG_ANY, 1366 "Warning, using only %u entries in %u key cache\n", 1367 ATH_KEYMAX, sc->keymax); 1368 sc->keymax = ATH_KEYMAX; 1369 } 1370 1371 /* 1372 * Reset the key cache since some parts do not 1373 * reset the contents on initial power up. 1374 */ 1375 for (i = 0; i < sc->keymax; i++) 1376 ath9k_hw_keyreset(ah, (u16) i); 1377 1378 /* default to MONITOR mode */ 1379 sc->sc_ah->opmode = NL80211_IFTYPE_MONITOR; 1380 1381 /* Setup rate tables */ 1382 1383 ath_rate_attach(sc); 1384 ath_setup_rates(sc, IEEE80211_BAND_2GHZ); 1385 ath_setup_rates(sc, IEEE80211_BAND_5GHZ); 1386 1387 /* 1388 * Allocate hardware transmit queues: one queue for 1389 * beacon frames and one data queue for each QoS 1390 * priority. Note that the hal handles reseting 1391 * these queues at the needed time. 1392 */ 1393 sc->beacon.beaconq = ath_beaconq_setup(ah); 1394 if (sc->beacon.beaconq == -1) { 1395 DPRINTF(sc, ATH_DBG_FATAL, 1396 "Unable to setup a beacon xmit queue\n"); 1397 r = -EIO; 1398 goto bad2; 1399 } 1400 sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0); 1401 if (sc->beacon.cabq == NULL) { 1402 DPRINTF(sc, ATH_DBG_FATAL, 1403 "Unable to setup CAB xmit queue\n"); 1404 r = -EIO; 1405 goto bad2; 1406 } 1407 1408 sc->config.cabqReadytime = ATH_CABQ_READY_TIME; 1409 ath_cabq_update(sc); 1410 1411 for (i = 0; i < ARRAY_SIZE(sc->tx.hwq_map); i++) 1412 sc->tx.hwq_map[i] = -1; 1413 1414 /* Setup data queues */ 1415 /* NB: ensure BK queue is the lowest priority h/w queue */ 1416 if (!ath_tx_setup(sc, ATH9K_WME_AC_BK)) { 1417 DPRINTF(sc, ATH_DBG_FATAL, 1418 "Unable to setup xmit queue for BK traffic\n"); 1419 r = -EIO; 1420 goto bad2; 1421 } 1422 1423 if (!ath_tx_setup(sc, ATH9K_WME_AC_BE)) { 1424 DPRINTF(sc, ATH_DBG_FATAL, 1425 "Unable to setup xmit queue for BE traffic\n"); 1426 r = -EIO; 1427 goto bad2; 1428 } 1429 if (!ath_tx_setup(sc, ATH9K_WME_AC_VI)) { 1430 DPRINTF(sc, ATH_DBG_FATAL, 1431 "Unable to setup xmit queue for VI traffic\n"); 1432 r = -EIO; 1433 goto bad2; 1434 } 1435 if (!ath_tx_setup(sc, ATH9K_WME_AC_VO)) { 1436 DPRINTF(sc, ATH_DBG_FATAL, 1437 "Unable to setup xmit queue for VO traffic\n"); 1438 r = -EIO; 1439 goto bad2; 1440 } 1441 1442 /* Initializes the noise floor to a reasonable default value. 1443 * Later on this will be updated during ANI processing. */ 1444 1445 sc->ani.noise_floor = ATH_DEFAULT_NOISE_FLOOR; 1446 setup_timer(&sc->ani.timer, ath_ani_calibrate, (unsigned long)sc); 1447 1448 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER, 1449 ATH9K_CIPHER_TKIP, NULL)) { 1450 /* 1451 * Whether we should enable h/w TKIP MIC. 1452 * XXX: if we don't support WME TKIP MIC, then we wouldn't 1453 * report WMM capable, so it's always safe to turn on 1454 * TKIP MIC in this case. 1455 */ 1456 ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC, 1457 0, 1, NULL); 1458 } 1459 1460 /* 1461 * Check whether the separate key cache entries 1462 * are required to handle both tx+rx MIC keys. 1463 * With split mic keys the number of stations is limited 1464 * to 27 otherwise 59. 1465 */ 1466 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER, 1467 ATH9K_CIPHER_TKIP, NULL) 1468 && ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER, 1469 ATH9K_CIPHER_MIC, NULL) 1470 && ath9k_hw_getcapability(ah, ATH9K_CAP_TKIP_SPLIT, 1471 0, NULL)) 1472 sc->splitmic = 1; 1473 1474 /* turn on mcast key search if possible */ 1475 if (!ath9k_hw_getcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL)) 1476 (void)ath9k_hw_setcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 1, 1477 1, NULL); 1478 1479 sc->config.txpowlimit = ATH_TXPOWER_MAX; 1480 1481 /* 11n Capabilities */ 1482 if (ah->caps.hw_caps & ATH9K_HW_CAP_HT) { 1483 sc->sc_flags |= SC_OP_TXAGGR; 1484 sc->sc_flags |= SC_OP_RXAGGR; 1485 } 1486 1487 sc->tx_chainmask = ah->caps.tx_chainmask; 1488 sc->rx_chainmask = ah->caps.rx_chainmask; 1489 1490 ath9k_hw_setcapability(ah, ATH9K_CAP_DIVERSITY, 1, true, NULL); 1491 sc->rx.defant = ath9k_hw_getdefantenna(ah); 1492 1493 if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) 1494 memcpy(sc->bssidmask, ath_bcast_mac, ETH_ALEN); 1495 1496 sc->beacon.slottime = ATH9K_SLOT_TIME_9; /* default to short slot time */ 1497 1498 /* initialize beacon slots */ 1499 for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) { 1500 sc->beacon.bslot[i] = NULL; 1501 sc->beacon.bslot_aphy[i] = NULL; 1502 } 1503 1504 /* setup channels and rates */ 1505 1506 sc->sbands[IEEE80211_BAND_2GHZ].channels = ath9k_2ghz_chantable; 1507 sc->sbands[IEEE80211_BAND_2GHZ].bitrates = 1508 sc->rates[IEEE80211_BAND_2GHZ]; 1509 sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ; 1510 sc->sbands[IEEE80211_BAND_2GHZ].n_channels = 1511 ARRAY_SIZE(ath9k_2ghz_chantable); 1512 1513 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes)) { 1514 sc->sbands[IEEE80211_BAND_5GHZ].channels = ath9k_5ghz_chantable; 1515 sc->sbands[IEEE80211_BAND_5GHZ].bitrates = 1516 sc->rates[IEEE80211_BAND_5GHZ]; 1517 sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ; 1518 sc->sbands[IEEE80211_BAND_5GHZ].n_channels = 1519 ARRAY_SIZE(ath9k_5ghz_chantable); 1520 } 1521 1522 if (sc->btcoex_info.btcoex_scheme != ATH_BTCOEX_CFG_NONE) { 1523 r = ath9k_hw_btcoex_init(ah); 1524 if (r) 1525 goto bad2; 1526 } 1527 1528 return 0; 1529 bad2: 1530 /* cleanup tx queues */ 1531 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) 1532 if (ATH_TXQ_SETUP(sc, i)) 1533 ath_tx_cleanupq(sc, &sc->tx.txq[i]); 1534 bad: 1535 ath9k_hw_detach(ah); 1536 sc->sc_ah = NULL; 1537 bad_no_ah: 1538 ath9k_exit_debug(sc); 1539 1540 return r; 1541 } 1542 1543 void ath_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) 1544 { 1545 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS | 1546 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | 1547 IEEE80211_HW_SIGNAL_DBM | 1548 IEEE80211_HW_AMPDU_AGGREGATION | 1549 IEEE80211_HW_SUPPORTS_PS | 1550 IEEE80211_HW_PS_NULLFUNC_STACK | 1551 IEEE80211_HW_SPECTRUM_MGMT; 1552 1553 if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || modparam_nohwcrypt) 1554 hw->flags |= IEEE80211_HW_MFP_CAPABLE; 1555 1556 hw->wiphy->interface_modes = 1557 BIT(NL80211_IFTYPE_AP) | 1558 BIT(NL80211_IFTYPE_STATION) | 1559 BIT(NL80211_IFTYPE_ADHOC) | 1560 BIT(NL80211_IFTYPE_MESH_POINT); 1561 1562 hw->queues = 4; 1563 hw->max_rates = 4; 1564 hw->channel_change_time = 5000; 1565 hw->max_listen_interval = 10; 1566 /* Hardware supports 10 but we use 4 */ 1567 hw->max_rate_tries = 4; 1568 hw->sta_data_size = sizeof(struct ath_node); 1569 hw->vif_data_size = sizeof(struct ath_vif); 1570 1571 hw->rate_control_algorithm = "ath9k_rate_control"; 1572 1573 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = 1574 &sc->sbands[IEEE80211_BAND_2GHZ]; 1575 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes)) 1576 hw->wiphy->bands[IEEE80211_BAND_5GHZ] = 1577 &sc->sbands[IEEE80211_BAND_5GHZ]; 1578 } 1579 1580 /* Device driver core initialization */ 1581 int ath_init_device(u16 devid, struct ath_softc *sc, u16 subsysid) 1582 { 1583 struct ieee80211_hw *hw = sc->hw; 1584 int error = 0, i; 1585 struct ath_regulatory *reg; 1586 1587 DPRINTF(sc, ATH_DBG_CONFIG, "Attach ATH hw\n"); 1588 1589 error = ath_init_softc(devid, sc, subsysid); 1590 if (error != 0) 1591 return error; 1592 1593 /* get mac address from hardware and set in mac80211 */ 1594 1595 SET_IEEE80211_PERM_ADDR(hw, sc->sc_ah->macaddr); 1596 1597 ath_set_hw_capab(sc, hw); 1598 1599 error = ath_regd_init(&sc->common.regulatory, sc->hw->wiphy, 1600 ath9k_reg_notifier); 1601 if (error) 1602 return error; 1603 1604 reg = &sc->common.regulatory; 1605 1606 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) { 1607 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap); 1608 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes)) 1609 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap); 1610 } 1611 1612 /* initialize tx/rx engine */ 1613 error = ath_tx_init(sc, ATH_TXBUF); 1614 if (error != 0) 1615 goto error_attach; 1616 1617 error = ath_rx_init(sc, ATH_RXBUF); 1618 if (error != 0) 1619 goto error_attach; 1620 1621 INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work); 1622 INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work); 1623 sc->wiphy_scheduler_int = msecs_to_jiffies(500); 1624 1625 error = ieee80211_register_hw(hw); 1626 1627 if (!ath_is_world_regd(reg)) { 1628 error = regulatory_hint(hw->wiphy, reg->alpha2); 1629 if (error) 1630 goto error_attach; 1631 } 1632 1633 /* Initialize LED control */ 1634 ath_init_leds(sc); 1635 1636 ath_start_rfkill_poll(sc); 1637 1638 return 0; 1639 1640 error_attach: 1641 /* cleanup tx queues */ 1642 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) 1643 if (ATH_TXQ_SETUP(sc, i)) 1644 ath_tx_cleanupq(sc, &sc->tx.txq[i]); 1645 1646 ath9k_hw_detach(sc->sc_ah); 1647 sc->sc_ah = NULL; 1648 ath9k_exit_debug(sc); 1649 1650 return error; 1651 } 1652 1653 int ath_reset(struct ath_softc *sc, bool retry_tx) 1654 { 1655 struct ath_hw *ah = sc->sc_ah; 1656 struct ieee80211_hw *hw = sc->hw; 1657 int r; 1658 1659 ath9k_hw_set_interrupts(ah, 0); 1660 ath_drain_all_txq(sc, retry_tx); 1661 ath_stoprecv(sc); 1662 ath_flushrecv(sc); 1663 1664 spin_lock_bh(&sc->sc_resetlock); 1665 r = ath9k_hw_reset(ah, sc->sc_ah->curchan, false); 1666 if (r) 1667 DPRINTF(sc, ATH_DBG_FATAL, 1668 "Unable to reset hardware; reset status %d\n", r); 1669 spin_unlock_bh(&sc->sc_resetlock); 1670 1671 if (ath_startrecv(sc) != 0) 1672 DPRINTF(sc, ATH_DBG_FATAL, "Unable to start recv logic\n"); 1673 1674 /* 1675 * We may be doing a reset in response to a request 1676 * that changes the channel so update any state that 1677 * might change as a result. 1678 */ 1679 ath_cache_conf_rate(sc, &hw->conf); 1680 1681 ath_update_txpow(sc); 1682 1683 if (sc->sc_flags & SC_OP_BEACONS) 1684 ath_beacon_config(sc, NULL); /* restart beacons */ 1685 1686 ath9k_hw_set_interrupts(ah, sc->imask); 1687 1688 if (retry_tx) { 1689 int i; 1690 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 1691 if (ATH_TXQ_SETUP(sc, i)) { 1692 spin_lock_bh(&sc->tx.txq[i].axq_lock); 1693 ath_txq_schedule(sc, &sc->tx.txq[i]); 1694 spin_unlock_bh(&sc->tx.txq[i].axq_lock); 1695 } 1696 } 1697 } 1698 1699 return r; 1700 } 1701 1702 /* 1703 * This function will allocate both the DMA descriptor structure, and the 1704 * buffers it contains. These are used to contain the descriptors used 1705 * by the system. 1706 */ 1707 int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd, 1708 struct list_head *head, const char *name, 1709 int nbuf, int ndesc) 1710 { 1711 #define DS2PHYS(_dd, _ds) \ 1712 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc)) 1713 #define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0) 1714 #define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096) 1715 1716 struct ath_desc *ds; 1717 struct ath_buf *bf; 1718 int i, bsize, error; 1719 1720 DPRINTF(sc, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n", 1721 name, nbuf, ndesc); 1722 1723 INIT_LIST_HEAD(head); 1724 /* ath_desc must be a multiple of DWORDs */ 1725 if ((sizeof(struct ath_desc) % 4) != 0) { 1726 DPRINTF(sc, ATH_DBG_FATAL, "ath_desc not DWORD aligned\n"); 1727 ASSERT((sizeof(struct ath_desc) % 4) == 0); 1728 error = -ENOMEM; 1729 goto fail; 1730 } 1731 1732 dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc; 1733 1734 /* 1735 * Need additional DMA memory because we can't use 1736 * descriptors that cross the 4K page boundary. Assume 1737 * one skipped descriptor per 4K page. 1738 */ 1739 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) { 1740 u32 ndesc_skipped = 1741 ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len); 1742 u32 dma_len; 1743 1744 while (ndesc_skipped) { 1745 dma_len = ndesc_skipped * sizeof(struct ath_desc); 1746 dd->dd_desc_len += dma_len; 1747 1748 ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len); 1749 }; 1750 } 1751 1752 /* allocate descriptors */ 1753 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len, 1754 &dd->dd_desc_paddr, GFP_KERNEL); 1755 if (dd->dd_desc == NULL) { 1756 error = -ENOMEM; 1757 goto fail; 1758 } 1759 ds = dd->dd_desc; 1760 DPRINTF(sc, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n", 1761 name, ds, (u32) dd->dd_desc_len, 1762 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len); 1763 1764 /* allocate buffers */ 1765 bsize = sizeof(struct ath_buf) * nbuf; 1766 bf = kzalloc(bsize, GFP_KERNEL); 1767 if (bf == NULL) { 1768 error = -ENOMEM; 1769 goto fail2; 1770 } 1771 dd->dd_bufptr = bf; 1772 1773 for (i = 0; i < nbuf; i++, bf++, ds += ndesc) { 1774 bf->bf_desc = ds; 1775 bf->bf_daddr = DS2PHYS(dd, ds); 1776 1777 if (!(sc->sc_ah->caps.hw_caps & 1778 ATH9K_HW_CAP_4KB_SPLITTRANS)) { 1779 /* 1780 * Skip descriptor addresses which can cause 4KB 1781 * boundary crossing (addr + length) with a 32 dword 1782 * descriptor fetch. 1783 */ 1784 while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) { 1785 ASSERT((caddr_t) bf->bf_desc < 1786 ((caddr_t) dd->dd_desc + 1787 dd->dd_desc_len)); 1788 1789 ds += ndesc; 1790 bf->bf_desc = ds; 1791 bf->bf_daddr = DS2PHYS(dd, ds); 1792 } 1793 } 1794 list_add_tail(&bf->list, head); 1795 } 1796 return 0; 1797 fail2: 1798 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc, 1799 dd->dd_desc_paddr); 1800 fail: 1801 memset(dd, 0, sizeof(*dd)); 1802 return error; 1803 #undef ATH_DESC_4KB_BOUND_CHECK 1804 #undef ATH_DESC_4KB_BOUND_NUM_SKIPPED 1805 #undef DS2PHYS 1806 } 1807 1808 void ath_descdma_cleanup(struct ath_softc *sc, 1809 struct ath_descdma *dd, 1810 struct list_head *head) 1811 { 1812 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc, 1813 dd->dd_desc_paddr); 1814 1815 INIT_LIST_HEAD(head); 1816 kfree(dd->dd_bufptr); 1817 memset(dd, 0, sizeof(*dd)); 1818 } 1819 1820 int ath_get_hal_qnum(u16 queue, struct ath_softc *sc) 1821 { 1822 int qnum; 1823 1824 switch (queue) { 1825 case 0: 1826 qnum = sc->tx.hwq_map[ATH9K_WME_AC_VO]; 1827 break; 1828 case 1: 1829 qnum = sc->tx.hwq_map[ATH9K_WME_AC_VI]; 1830 break; 1831 case 2: 1832 qnum = sc->tx.hwq_map[ATH9K_WME_AC_BE]; 1833 break; 1834 case 3: 1835 qnum = sc->tx.hwq_map[ATH9K_WME_AC_BK]; 1836 break; 1837 default: 1838 qnum = sc->tx.hwq_map[ATH9K_WME_AC_BE]; 1839 break; 1840 } 1841 1842 return qnum; 1843 } 1844 1845 int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc) 1846 { 1847 int qnum; 1848 1849 switch (queue) { 1850 case ATH9K_WME_AC_VO: 1851 qnum = 0; 1852 break; 1853 case ATH9K_WME_AC_VI: 1854 qnum = 1; 1855 break; 1856 case ATH9K_WME_AC_BE: 1857 qnum = 2; 1858 break; 1859 case ATH9K_WME_AC_BK: 1860 qnum = 3; 1861 break; 1862 default: 1863 qnum = -1; 1864 break; 1865 } 1866 1867 return qnum; 1868 } 1869 1870 /* XXX: Remove me once we don't depend on ath9k_channel for all 1871 * this redundant data */ 1872 void ath9k_update_ichannel(struct ath_softc *sc, struct ieee80211_hw *hw, 1873 struct ath9k_channel *ichan) 1874 { 1875 struct ieee80211_channel *chan = hw->conf.channel; 1876 struct ieee80211_conf *conf = &hw->conf; 1877 1878 ichan->channel = chan->center_freq; 1879 ichan->chan = chan; 1880 1881 if (chan->band == IEEE80211_BAND_2GHZ) { 1882 ichan->chanmode = CHANNEL_G; 1883 ichan->channelFlags = CHANNEL_2GHZ | CHANNEL_OFDM | CHANNEL_G; 1884 } else { 1885 ichan->chanmode = CHANNEL_A; 1886 ichan->channelFlags = CHANNEL_5GHZ | CHANNEL_OFDM; 1887 } 1888 1889 sc->tx_chan_width = ATH9K_HT_MACMODE_20; 1890 1891 if (conf_is_ht(conf)) { 1892 if (conf_is_ht40(conf)) 1893 sc->tx_chan_width = ATH9K_HT_MACMODE_2040; 1894 1895 ichan->chanmode = ath_get_extchanmode(sc, chan, 1896 conf->channel_type); 1897 } 1898 } 1899 1900 /**********************/ 1901 /* mac80211 callbacks */ 1902 /**********************/ 1903 1904 static int ath9k_start(struct ieee80211_hw *hw) 1905 { 1906 struct ath_wiphy *aphy = hw->priv; 1907 struct ath_softc *sc = aphy->sc; 1908 struct ieee80211_channel *curchan = hw->conf.channel; 1909 struct ath9k_channel *init_channel; 1910 int r; 1911 1912 DPRINTF(sc, ATH_DBG_CONFIG, "Starting driver with " 1913 "initial channel: %d MHz\n", curchan->center_freq); 1914 1915 mutex_lock(&sc->mutex); 1916 1917 if (ath9k_wiphy_started(sc)) { 1918 if (sc->chan_idx == curchan->hw_value) { 1919 /* 1920 * Already on the operational channel, the new wiphy 1921 * can be marked active. 1922 */ 1923 aphy->state = ATH_WIPHY_ACTIVE; 1924 ieee80211_wake_queues(hw); 1925 } else { 1926 /* 1927 * Another wiphy is on another channel, start the new 1928 * wiphy in paused state. 1929 */ 1930 aphy->state = ATH_WIPHY_PAUSED; 1931 ieee80211_stop_queues(hw); 1932 } 1933 mutex_unlock(&sc->mutex); 1934 return 0; 1935 } 1936 aphy->state = ATH_WIPHY_ACTIVE; 1937 1938 /* setup initial channel */ 1939 1940 sc->chan_idx = curchan->hw_value; 1941 1942 init_channel = ath_get_curchannel(sc, hw); 1943 1944 /* Reset SERDES registers */ 1945 ath9k_hw_configpcipowersave(sc->sc_ah, 0); 1946 1947 /* 1948 * The basic interface to setting the hardware in a good 1949 * state is ``reset''. On return the hardware is known to 1950 * be powered up and with interrupts disabled. This must 1951 * be followed by initialization of the appropriate bits 1952 * and then setup of the interrupt mask. 1953 */ 1954 spin_lock_bh(&sc->sc_resetlock); 1955 r = ath9k_hw_reset(sc->sc_ah, init_channel, false); 1956 if (r) { 1957 DPRINTF(sc, ATH_DBG_FATAL, 1958 "Unable to reset hardware; reset status %d " 1959 "(freq %u MHz)\n", r, 1960 curchan->center_freq); 1961 spin_unlock_bh(&sc->sc_resetlock); 1962 goto mutex_unlock; 1963 } 1964 spin_unlock_bh(&sc->sc_resetlock); 1965 1966 /* 1967 * This is needed only to setup initial state 1968 * but it's best done after a reset. 1969 */ 1970 ath_update_txpow(sc); 1971 1972 /* 1973 * Setup the hardware after reset: 1974 * The receive engine is set going. 1975 * Frame transmit is handled entirely 1976 * in the frame output path; there's nothing to do 1977 * here except setup the interrupt mask. 1978 */ 1979 if (ath_startrecv(sc) != 0) { 1980 DPRINTF(sc, ATH_DBG_FATAL, "Unable to start recv logic\n"); 1981 r = -EIO; 1982 goto mutex_unlock; 1983 } 1984 1985 /* Setup our intr mask. */ 1986 sc->imask = ATH9K_INT_RX | ATH9K_INT_TX 1987 | ATH9K_INT_RXEOL | ATH9K_INT_RXORN 1988 | ATH9K_INT_FATAL | ATH9K_INT_GLOBAL; 1989 1990 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_GTT) 1991 sc->imask |= ATH9K_INT_GTT; 1992 1993 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) 1994 sc->imask |= ATH9K_INT_CST; 1995 1996 ath_cache_conf_rate(sc, &hw->conf); 1997 1998 sc->sc_flags &= ~SC_OP_INVALID; 1999 2000 /* Disable BMISS interrupt when we're not associated */ 2001 sc->imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS); 2002 ath9k_hw_set_interrupts(sc->sc_ah, sc->imask); 2003 2004 ieee80211_wake_queues(hw); 2005 2006 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0); 2007 2008 if ((sc->btcoex_info.btcoex_scheme != ATH_BTCOEX_CFG_NONE) && 2009 !(sc->sc_flags & SC_OP_BTCOEX_ENABLED)) { 2010 ath_btcoex_set_weight(&sc->btcoex_info, AR_BT_COEX_WGHT, 2011 AR_STOMP_LOW_WLAN_WGHT); 2012 ath9k_hw_btcoex_enable(sc->sc_ah); 2013 2014 ath_pcie_aspm_disable(sc); 2015 if (sc->btcoex_info.btcoex_scheme == ATH_BTCOEX_CFG_3WIRE) 2016 ath_btcoex_timer_resume(sc, &sc->btcoex_info); 2017 } 2018 2019 mutex_unlock: 2020 mutex_unlock(&sc->mutex); 2021 2022 return r; 2023 } 2024 2025 static int ath9k_tx(struct ieee80211_hw *hw, 2026 struct sk_buff *skb) 2027 { 2028 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 2029 struct ath_wiphy *aphy = hw->priv; 2030 struct ath_softc *sc = aphy->sc; 2031 struct ath_tx_control txctl; 2032 int hdrlen, padsize; 2033 2034 if (aphy->state != ATH_WIPHY_ACTIVE && aphy->state != ATH_WIPHY_SCAN) { 2035 printk(KERN_DEBUG "ath9k: %s: TX in unexpected wiphy state " 2036 "%d\n", wiphy_name(hw->wiphy), aphy->state); 2037 goto exit; 2038 } 2039 2040 if (sc->ps_enabled) { 2041 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 2042 /* 2043 * mac80211 does not set PM field for normal data frames, so we 2044 * need to update that based on the current PS mode. 2045 */ 2046 if (ieee80211_is_data(hdr->frame_control) && 2047 !ieee80211_is_nullfunc(hdr->frame_control) && 2048 !ieee80211_has_pm(hdr->frame_control)) { 2049 DPRINTF(sc, ATH_DBG_PS, "Add PM=1 for a TX frame " 2050 "while in PS mode\n"); 2051 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM); 2052 } 2053 } 2054 2055 if (unlikely(sc->sc_ah->power_mode != ATH9K_PM_AWAKE)) { 2056 /* 2057 * We are using PS-Poll and mac80211 can request TX while in 2058 * power save mode. Need to wake up hardware for the TX to be 2059 * completed and if needed, also for RX of buffered frames. 2060 */ 2061 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 2062 ath9k_ps_wakeup(sc); 2063 ath9k_hw_setrxabort(sc->sc_ah, 0); 2064 if (ieee80211_is_pspoll(hdr->frame_control)) { 2065 DPRINTF(sc, ATH_DBG_PS, "Sending PS-Poll to pick a " 2066 "buffered frame\n"); 2067 sc->sc_flags |= SC_OP_WAIT_FOR_PSPOLL_DATA; 2068 } else { 2069 DPRINTF(sc, ATH_DBG_PS, "Wake up to complete TX\n"); 2070 sc->sc_flags |= SC_OP_WAIT_FOR_TX_ACK; 2071 } 2072 /* 2073 * The actual restore operation will happen only after 2074 * the sc_flags bit is cleared. We are just dropping 2075 * the ps_usecount here. 2076 */ 2077 ath9k_ps_restore(sc); 2078 } 2079 2080 memset(&txctl, 0, sizeof(struct ath_tx_control)); 2081 2082 /* 2083 * As a temporary workaround, assign seq# here; this will likely need 2084 * to be cleaned up to work better with Beacon transmission and virtual 2085 * BSSes. 2086 */ 2087 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { 2088 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 2089 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) 2090 sc->tx.seq_no += 0x10; 2091 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); 2092 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no); 2093 } 2094 2095 /* Add the padding after the header if this is not already done */ 2096 hdrlen = ieee80211_get_hdrlen_from_skb(skb); 2097 if (hdrlen & 3) { 2098 padsize = hdrlen % 4; 2099 if (skb_headroom(skb) < padsize) 2100 return -1; 2101 skb_push(skb, padsize); 2102 memmove(skb->data, skb->data + padsize, hdrlen); 2103 } 2104 2105 /* Check if a tx queue is available */ 2106 2107 txctl.txq = ath_test_get_txq(sc, skb); 2108 if (!txctl.txq) 2109 goto exit; 2110 2111 DPRINTF(sc, ATH_DBG_XMIT, "transmitting packet, skb: %p\n", skb); 2112 2113 if (ath_tx_start(hw, skb, &txctl) != 0) { 2114 DPRINTF(sc, ATH_DBG_XMIT, "TX failed\n"); 2115 goto exit; 2116 } 2117 2118 return 0; 2119 exit: 2120 dev_kfree_skb_any(skb); 2121 return 0; 2122 } 2123 2124 static void ath9k_stop(struct ieee80211_hw *hw) 2125 { 2126 struct ath_wiphy *aphy = hw->priv; 2127 struct ath_softc *sc = aphy->sc; 2128 2129 mutex_lock(&sc->mutex); 2130 2131 aphy->state = ATH_WIPHY_INACTIVE; 2132 2133 cancel_delayed_work_sync(&sc->ath_led_blink_work); 2134 cancel_delayed_work_sync(&sc->tx_complete_work); 2135 2136 if (!sc->num_sec_wiphy) { 2137 cancel_delayed_work_sync(&sc->wiphy_work); 2138 cancel_work_sync(&sc->chan_work); 2139 } 2140 2141 if (sc->sc_flags & SC_OP_INVALID) { 2142 DPRINTF(sc, ATH_DBG_ANY, "Device not present\n"); 2143 mutex_unlock(&sc->mutex); 2144 return; 2145 } 2146 2147 if (ath9k_wiphy_started(sc)) { 2148 mutex_unlock(&sc->mutex); 2149 return; /* another wiphy still in use */ 2150 } 2151 2152 if (sc->sc_flags & SC_OP_BTCOEX_ENABLED) { 2153 ath9k_hw_btcoex_disable(sc->sc_ah); 2154 if (sc->btcoex_info.btcoex_scheme == ATH_BTCOEX_CFG_3WIRE) 2155 ath_btcoex_timer_pause(sc, &sc->btcoex_info); 2156 } 2157 2158 /* make sure h/w will not generate any interrupt 2159 * before setting the invalid flag. */ 2160 ath9k_hw_set_interrupts(sc->sc_ah, 0); 2161 2162 if (!(sc->sc_flags & SC_OP_INVALID)) { 2163 ath_drain_all_txq(sc, false); 2164 ath_stoprecv(sc); 2165 ath9k_hw_phy_disable(sc->sc_ah); 2166 } else 2167 sc->rx.rxlink = NULL; 2168 2169 wiphy_rfkill_stop_polling(sc->hw->wiphy); 2170 2171 /* disable HAL and put h/w to sleep */ 2172 ath9k_hw_disable(sc->sc_ah); 2173 ath9k_hw_configpcipowersave(sc->sc_ah, 1); 2174 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP); 2175 2176 sc->sc_flags |= SC_OP_INVALID; 2177 2178 mutex_unlock(&sc->mutex); 2179 2180 DPRINTF(sc, ATH_DBG_CONFIG, "Driver halt\n"); 2181 } 2182 2183 static int ath9k_add_interface(struct ieee80211_hw *hw, 2184 struct ieee80211_if_init_conf *conf) 2185 { 2186 struct ath_wiphy *aphy = hw->priv; 2187 struct ath_softc *sc = aphy->sc; 2188 struct ath_vif *avp = (void *)conf->vif->drv_priv; 2189 enum nl80211_iftype ic_opmode = NL80211_IFTYPE_UNSPECIFIED; 2190 int ret = 0; 2191 2192 mutex_lock(&sc->mutex); 2193 2194 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) && 2195 sc->nvifs > 0) { 2196 ret = -ENOBUFS; 2197 goto out; 2198 } 2199 2200 switch (conf->type) { 2201 case NL80211_IFTYPE_STATION: 2202 ic_opmode = NL80211_IFTYPE_STATION; 2203 break; 2204 case NL80211_IFTYPE_ADHOC: 2205 case NL80211_IFTYPE_AP: 2206 case NL80211_IFTYPE_MESH_POINT: 2207 if (sc->nbcnvifs >= ATH_BCBUF) { 2208 ret = -ENOBUFS; 2209 goto out; 2210 } 2211 ic_opmode = conf->type; 2212 break; 2213 default: 2214 DPRINTF(sc, ATH_DBG_FATAL, 2215 "Interface type %d not yet supported\n", conf->type); 2216 ret = -EOPNOTSUPP; 2217 goto out; 2218 } 2219 2220 DPRINTF(sc, ATH_DBG_CONFIG, "Attach a VIF of type: %d\n", ic_opmode); 2221 2222 /* Set the VIF opmode */ 2223 avp->av_opmode = ic_opmode; 2224 avp->av_bslot = -1; 2225 2226 sc->nvifs++; 2227 2228 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) 2229 ath9k_set_bssid_mask(hw); 2230 2231 if (sc->nvifs > 1) 2232 goto out; /* skip global settings for secondary vif */ 2233 2234 if (ic_opmode == NL80211_IFTYPE_AP) { 2235 ath9k_hw_set_tsfadjust(sc->sc_ah, 1); 2236 sc->sc_flags |= SC_OP_TSF_RESET; 2237 } 2238 2239 /* Set the device opmode */ 2240 sc->sc_ah->opmode = ic_opmode; 2241 2242 /* 2243 * Enable MIB interrupts when there are hardware phy counters. 2244 * Note we only do this (at the moment) for station mode. 2245 */ 2246 if ((conf->type == NL80211_IFTYPE_STATION) || 2247 (conf->type == NL80211_IFTYPE_ADHOC) || 2248 (conf->type == NL80211_IFTYPE_MESH_POINT)) { 2249 sc->imask |= ATH9K_INT_MIB; 2250 sc->imask |= ATH9K_INT_TSFOOR; 2251 } 2252 2253 ath9k_hw_set_interrupts(sc->sc_ah, sc->imask); 2254 2255 if (conf->type == NL80211_IFTYPE_AP || 2256 conf->type == NL80211_IFTYPE_ADHOC || 2257 conf->type == NL80211_IFTYPE_MONITOR) 2258 ath_start_ani(sc); 2259 2260 out: 2261 mutex_unlock(&sc->mutex); 2262 return ret; 2263 } 2264 2265 static void ath9k_remove_interface(struct ieee80211_hw *hw, 2266 struct ieee80211_if_init_conf *conf) 2267 { 2268 struct ath_wiphy *aphy = hw->priv; 2269 struct ath_softc *sc = aphy->sc; 2270 struct ath_vif *avp = (void *)conf->vif->drv_priv; 2271 int i; 2272 2273 DPRINTF(sc, ATH_DBG_CONFIG, "Detach Interface\n"); 2274 2275 mutex_lock(&sc->mutex); 2276 2277 /* Stop ANI */ 2278 del_timer_sync(&sc->ani.timer); 2279 2280 /* Reclaim beacon resources */ 2281 if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) || 2282 (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) || 2283 (sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT)) { 2284 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq); 2285 ath_beacon_return(sc, avp); 2286 } 2287 2288 sc->sc_flags &= ~SC_OP_BEACONS; 2289 2290 for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) { 2291 if (sc->beacon.bslot[i] == conf->vif) { 2292 printk(KERN_DEBUG "%s: vif had allocated beacon " 2293 "slot\n", __func__); 2294 sc->beacon.bslot[i] = NULL; 2295 sc->beacon.bslot_aphy[i] = NULL; 2296 } 2297 } 2298 2299 sc->nvifs--; 2300 2301 mutex_unlock(&sc->mutex); 2302 } 2303 2304 static int ath9k_config(struct ieee80211_hw *hw, u32 changed) 2305 { 2306 struct ath_wiphy *aphy = hw->priv; 2307 struct ath_softc *sc = aphy->sc; 2308 struct ieee80211_conf *conf = &hw->conf; 2309 struct ath_hw *ah = sc->sc_ah; 2310 bool all_wiphys_idle = false, disable_radio = false; 2311 2312 mutex_lock(&sc->mutex); 2313 2314 /* Leave this as the first check */ 2315 if (changed & IEEE80211_CONF_CHANGE_IDLE) { 2316 2317 spin_lock_bh(&sc->wiphy_lock); 2318 all_wiphys_idle = ath9k_all_wiphys_idle(sc); 2319 spin_unlock_bh(&sc->wiphy_lock); 2320 2321 if (conf->flags & IEEE80211_CONF_IDLE){ 2322 if (all_wiphys_idle) 2323 disable_radio = true; 2324 } 2325 else if (all_wiphys_idle) { 2326 ath_radio_enable(sc); 2327 DPRINTF(sc, ATH_DBG_CONFIG, 2328 "not-idle: enabling radio\n"); 2329 } 2330 } 2331 2332 if (changed & IEEE80211_CONF_CHANGE_PS) { 2333 if (conf->flags & IEEE80211_CONF_PS) { 2334 if (!(ah->caps.hw_caps & 2335 ATH9K_HW_CAP_AUTOSLEEP)) { 2336 if ((sc->imask & ATH9K_INT_TIM_TIMER) == 0) { 2337 sc->imask |= ATH9K_INT_TIM_TIMER; 2338 ath9k_hw_set_interrupts(sc->sc_ah, 2339 sc->imask); 2340 } 2341 ath9k_hw_setrxabort(sc->sc_ah, 1); 2342 } 2343 sc->ps_enabled = true; 2344 } else { 2345 sc->ps_enabled = false; 2346 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE); 2347 if (!(ah->caps.hw_caps & 2348 ATH9K_HW_CAP_AUTOSLEEP)) { 2349 ath9k_hw_setrxabort(sc->sc_ah, 0); 2350 sc->sc_flags &= ~(SC_OP_WAIT_FOR_BEACON | 2351 SC_OP_WAIT_FOR_CAB | 2352 SC_OP_WAIT_FOR_PSPOLL_DATA | 2353 SC_OP_WAIT_FOR_TX_ACK); 2354 if (sc->imask & ATH9K_INT_TIM_TIMER) { 2355 sc->imask &= ~ATH9K_INT_TIM_TIMER; 2356 ath9k_hw_set_interrupts(sc->sc_ah, 2357 sc->imask); 2358 } 2359 } 2360 } 2361 } 2362 2363 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { 2364 struct ieee80211_channel *curchan = hw->conf.channel; 2365 int pos = curchan->hw_value; 2366 2367 aphy->chan_idx = pos; 2368 aphy->chan_is_ht = conf_is_ht(conf); 2369 2370 if (aphy->state == ATH_WIPHY_SCAN || 2371 aphy->state == ATH_WIPHY_ACTIVE) 2372 ath9k_wiphy_pause_all_forced(sc, aphy); 2373 else { 2374 /* 2375 * Do not change operational channel based on a paused 2376 * wiphy changes. 2377 */ 2378 goto skip_chan_change; 2379 } 2380 2381 DPRINTF(sc, ATH_DBG_CONFIG, "Set channel: %d MHz\n", 2382 curchan->center_freq); 2383 2384 /* XXX: remove me eventualy */ 2385 ath9k_update_ichannel(sc, hw, &sc->sc_ah->channels[pos]); 2386 2387 ath_update_chainmask(sc, conf_is_ht(conf)); 2388 2389 if (ath_set_channel(sc, hw, &sc->sc_ah->channels[pos]) < 0) { 2390 DPRINTF(sc, ATH_DBG_FATAL, "Unable to set channel\n"); 2391 mutex_unlock(&sc->mutex); 2392 return -EINVAL; 2393 } 2394 } 2395 2396 skip_chan_change: 2397 if (changed & IEEE80211_CONF_CHANGE_POWER) 2398 sc->config.txpowlimit = 2 * conf->power_level; 2399 2400 if (disable_radio) { 2401 DPRINTF(sc, ATH_DBG_CONFIG, "idle: disabling radio\n"); 2402 ath_radio_disable(sc); 2403 } 2404 2405 mutex_unlock(&sc->mutex); 2406 2407 return 0; 2408 } 2409 2410 #define SUPPORTED_FILTERS \ 2411 (FIF_PROMISC_IN_BSS | \ 2412 FIF_ALLMULTI | \ 2413 FIF_CONTROL | \ 2414 FIF_PSPOLL | \ 2415 FIF_OTHER_BSS | \ 2416 FIF_BCN_PRBRESP_PROMISC | \ 2417 FIF_FCSFAIL) 2418 2419 /* FIXME: sc->sc_full_reset ? */ 2420 static void ath9k_configure_filter(struct ieee80211_hw *hw, 2421 unsigned int changed_flags, 2422 unsigned int *total_flags, 2423 u64 multicast) 2424 { 2425 struct ath_wiphy *aphy = hw->priv; 2426 struct ath_softc *sc = aphy->sc; 2427 u32 rfilt; 2428 2429 changed_flags &= SUPPORTED_FILTERS; 2430 *total_flags &= SUPPORTED_FILTERS; 2431 2432 sc->rx.rxfilter = *total_flags; 2433 ath9k_ps_wakeup(sc); 2434 rfilt = ath_calcrxfilter(sc); 2435 ath9k_hw_setrxfilter(sc->sc_ah, rfilt); 2436 ath9k_ps_restore(sc); 2437 2438 DPRINTF(sc, ATH_DBG_CONFIG, "Set HW RX filter: 0x%x\n", rfilt); 2439 } 2440 2441 static void ath9k_sta_notify(struct ieee80211_hw *hw, 2442 struct ieee80211_vif *vif, 2443 enum sta_notify_cmd cmd, 2444 struct ieee80211_sta *sta) 2445 { 2446 struct ath_wiphy *aphy = hw->priv; 2447 struct ath_softc *sc = aphy->sc; 2448 2449 switch (cmd) { 2450 case STA_NOTIFY_ADD: 2451 ath_node_attach(sc, sta); 2452 break; 2453 case STA_NOTIFY_REMOVE: 2454 ath_node_detach(sc, sta); 2455 break; 2456 default: 2457 break; 2458 } 2459 } 2460 2461 static int ath9k_conf_tx(struct ieee80211_hw *hw, u16 queue, 2462 const struct ieee80211_tx_queue_params *params) 2463 { 2464 struct ath_wiphy *aphy = hw->priv; 2465 struct ath_softc *sc = aphy->sc; 2466 struct ath9k_tx_queue_info qi; 2467 int ret = 0, qnum; 2468 2469 if (queue >= WME_NUM_AC) 2470 return 0; 2471 2472 mutex_lock(&sc->mutex); 2473 2474 memset(&qi, 0, sizeof(struct ath9k_tx_queue_info)); 2475 2476 qi.tqi_aifs = params->aifs; 2477 qi.tqi_cwmin = params->cw_min; 2478 qi.tqi_cwmax = params->cw_max; 2479 qi.tqi_burstTime = params->txop; 2480 qnum = ath_get_hal_qnum(queue, sc); 2481 2482 DPRINTF(sc, ATH_DBG_CONFIG, 2483 "Configure tx [queue/halq] [%d/%d], " 2484 "aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n", 2485 queue, qnum, params->aifs, params->cw_min, 2486 params->cw_max, params->txop); 2487 2488 ret = ath_txq_update(sc, qnum, &qi); 2489 if (ret) 2490 DPRINTF(sc, ATH_DBG_FATAL, "TXQ Update failed\n"); 2491 2492 mutex_unlock(&sc->mutex); 2493 2494 return ret; 2495 } 2496 2497 static int ath9k_set_key(struct ieee80211_hw *hw, 2498 enum set_key_cmd cmd, 2499 struct ieee80211_vif *vif, 2500 struct ieee80211_sta *sta, 2501 struct ieee80211_key_conf *key) 2502 { 2503 struct ath_wiphy *aphy = hw->priv; 2504 struct ath_softc *sc = aphy->sc; 2505 int ret = 0; 2506 2507 if (modparam_nohwcrypt) 2508 return -ENOSPC; 2509 2510 mutex_lock(&sc->mutex); 2511 ath9k_ps_wakeup(sc); 2512 DPRINTF(sc, ATH_DBG_CONFIG, "Set HW Key\n"); 2513 2514 switch (cmd) { 2515 case SET_KEY: 2516 ret = ath_key_config(sc, vif, sta, key); 2517 if (ret >= 0) { 2518 key->hw_key_idx = ret; 2519 /* push IV and Michael MIC generation to stack */ 2520 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; 2521 if (key->alg == ALG_TKIP) 2522 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; 2523 if (sc->sc_ah->sw_mgmt_crypto && key->alg == ALG_CCMP) 2524 key->flags |= IEEE80211_KEY_FLAG_SW_MGMT; 2525 ret = 0; 2526 } 2527 break; 2528 case DISABLE_KEY: 2529 ath_key_delete(sc, key); 2530 break; 2531 default: 2532 ret = -EINVAL; 2533 } 2534 2535 ath9k_ps_restore(sc); 2536 mutex_unlock(&sc->mutex); 2537 2538 return ret; 2539 } 2540 2541 static void ath9k_bss_info_changed(struct ieee80211_hw *hw, 2542 struct ieee80211_vif *vif, 2543 struct ieee80211_bss_conf *bss_conf, 2544 u32 changed) 2545 { 2546 struct ath_wiphy *aphy = hw->priv; 2547 struct ath_softc *sc = aphy->sc; 2548 struct ath_hw *ah = sc->sc_ah; 2549 struct ath_vif *avp = (void *)vif->drv_priv; 2550 u32 rfilt = 0; 2551 int error, i; 2552 2553 mutex_lock(&sc->mutex); 2554 2555 /* 2556 * TODO: Need to decide which hw opmode to use for 2557 * multi-interface cases 2558 * XXX: This belongs into add_interface! 2559 */ 2560 if (vif->type == NL80211_IFTYPE_AP && 2561 ah->opmode != NL80211_IFTYPE_AP) { 2562 ah->opmode = NL80211_IFTYPE_STATION; 2563 ath9k_hw_setopmode(ah); 2564 memcpy(sc->curbssid, sc->sc_ah->macaddr, ETH_ALEN); 2565 sc->curaid = 0; 2566 ath9k_hw_write_associd(sc); 2567 /* Request full reset to get hw opmode changed properly */ 2568 sc->sc_flags |= SC_OP_FULL_RESET; 2569 } 2570 2571 if ((changed & BSS_CHANGED_BSSID) && 2572 !is_zero_ether_addr(bss_conf->bssid)) { 2573 switch (vif->type) { 2574 case NL80211_IFTYPE_STATION: 2575 case NL80211_IFTYPE_ADHOC: 2576 case NL80211_IFTYPE_MESH_POINT: 2577 /* Set BSSID */ 2578 memcpy(sc->curbssid, bss_conf->bssid, ETH_ALEN); 2579 memcpy(avp->bssid, bss_conf->bssid, ETH_ALEN); 2580 sc->curaid = 0; 2581 ath9k_hw_write_associd(sc); 2582 2583 /* Set aggregation protection mode parameters */ 2584 sc->config.ath_aggr_prot = 0; 2585 2586 DPRINTF(sc, ATH_DBG_CONFIG, 2587 "RX filter 0x%x bssid %pM aid 0x%x\n", 2588 rfilt, sc->curbssid, sc->curaid); 2589 2590 /* need to reconfigure the beacon */ 2591 sc->sc_flags &= ~SC_OP_BEACONS ; 2592 2593 break; 2594 default: 2595 break; 2596 } 2597 } 2598 2599 if ((vif->type == NL80211_IFTYPE_ADHOC) || 2600 (vif->type == NL80211_IFTYPE_AP) || 2601 (vif->type == NL80211_IFTYPE_MESH_POINT)) { 2602 if ((changed & BSS_CHANGED_BEACON) || 2603 (changed & BSS_CHANGED_BEACON_ENABLED && 2604 bss_conf->enable_beacon)) { 2605 /* 2606 * Allocate and setup the beacon frame. 2607 * 2608 * Stop any previous beacon DMA. This may be 2609 * necessary, for example, when an ibss merge 2610 * causes reconfiguration; we may be called 2611 * with beacon transmission active. 2612 */ 2613 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq); 2614 2615 error = ath_beacon_alloc(aphy, vif); 2616 if (!error) 2617 ath_beacon_config(sc, vif); 2618 } 2619 } 2620 2621 /* Check for WLAN_CAPABILITY_PRIVACY ? */ 2622 if ((avp->av_opmode != NL80211_IFTYPE_STATION)) { 2623 for (i = 0; i < IEEE80211_WEP_NKID; i++) 2624 if (ath9k_hw_keyisvalid(sc->sc_ah, (u16)i)) 2625 ath9k_hw_keysetmac(sc->sc_ah, 2626 (u16)i, 2627 sc->curbssid); 2628 } 2629 2630 /* Only legacy IBSS for now */ 2631 if (vif->type == NL80211_IFTYPE_ADHOC) 2632 ath_update_chainmask(sc, 0); 2633 2634 if (changed & BSS_CHANGED_ERP_PREAMBLE) { 2635 DPRINTF(sc, ATH_DBG_CONFIG, "BSS Changed PREAMBLE %d\n", 2636 bss_conf->use_short_preamble); 2637 if (bss_conf->use_short_preamble) 2638 sc->sc_flags |= SC_OP_PREAMBLE_SHORT; 2639 else 2640 sc->sc_flags &= ~SC_OP_PREAMBLE_SHORT; 2641 } 2642 2643 if (changed & BSS_CHANGED_ERP_CTS_PROT) { 2644 DPRINTF(sc, ATH_DBG_CONFIG, "BSS Changed CTS PROT %d\n", 2645 bss_conf->use_cts_prot); 2646 if (bss_conf->use_cts_prot && 2647 hw->conf.channel->band != IEEE80211_BAND_5GHZ) 2648 sc->sc_flags |= SC_OP_PROTECT_ENABLE; 2649 else 2650 sc->sc_flags &= ~SC_OP_PROTECT_ENABLE; 2651 } 2652 2653 if (changed & BSS_CHANGED_ASSOC) { 2654 DPRINTF(sc, ATH_DBG_CONFIG, "BSS Changed ASSOC %d\n", 2655 bss_conf->assoc); 2656 ath9k_bss_assoc_info(sc, vif, bss_conf); 2657 } 2658 2659 /* 2660 * The HW TSF has to be reset when the beacon interval changes. 2661 * We set the flag here, and ath_beacon_config_ap() would take this 2662 * into account when it gets called through the subsequent 2663 * config_interface() call - with IFCC_BEACON in the changed field. 2664 */ 2665 2666 if (changed & BSS_CHANGED_BEACON_INT) { 2667 sc->sc_flags |= SC_OP_TSF_RESET; 2668 sc->beacon_interval = bss_conf->beacon_int; 2669 } 2670 2671 mutex_unlock(&sc->mutex); 2672 } 2673 2674 static u64 ath9k_get_tsf(struct ieee80211_hw *hw) 2675 { 2676 u64 tsf; 2677 struct ath_wiphy *aphy = hw->priv; 2678 struct ath_softc *sc = aphy->sc; 2679 2680 mutex_lock(&sc->mutex); 2681 tsf = ath9k_hw_gettsf64(sc->sc_ah); 2682 mutex_unlock(&sc->mutex); 2683 2684 return tsf; 2685 } 2686 2687 static void ath9k_set_tsf(struct ieee80211_hw *hw, u64 tsf) 2688 { 2689 struct ath_wiphy *aphy = hw->priv; 2690 struct ath_softc *sc = aphy->sc; 2691 2692 mutex_lock(&sc->mutex); 2693 ath9k_hw_settsf64(sc->sc_ah, tsf); 2694 mutex_unlock(&sc->mutex); 2695 } 2696 2697 static void ath9k_reset_tsf(struct ieee80211_hw *hw) 2698 { 2699 struct ath_wiphy *aphy = hw->priv; 2700 struct ath_softc *sc = aphy->sc; 2701 2702 mutex_lock(&sc->mutex); 2703 ath9k_hw_reset_tsf(sc->sc_ah); 2704 mutex_unlock(&sc->mutex); 2705 } 2706 2707 static int ath9k_ampdu_action(struct ieee80211_hw *hw, 2708 enum ieee80211_ampdu_mlme_action action, 2709 struct ieee80211_sta *sta, 2710 u16 tid, u16 *ssn) 2711 { 2712 struct ath_wiphy *aphy = hw->priv; 2713 struct ath_softc *sc = aphy->sc; 2714 int ret = 0; 2715 2716 switch (action) { 2717 case IEEE80211_AMPDU_RX_START: 2718 if (!(sc->sc_flags & SC_OP_RXAGGR)) 2719 ret = -ENOTSUPP; 2720 break; 2721 case IEEE80211_AMPDU_RX_STOP: 2722 break; 2723 case IEEE80211_AMPDU_TX_START: 2724 ath_tx_aggr_start(sc, sta, tid, ssn); 2725 ieee80211_start_tx_ba_cb_irqsafe(hw, sta->addr, tid); 2726 break; 2727 case IEEE80211_AMPDU_TX_STOP: 2728 ath_tx_aggr_stop(sc, sta, tid); 2729 ieee80211_stop_tx_ba_cb_irqsafe(hw, sta->addr, tid); 2730 break; 2731 case IEEE80211_AMPDU_TX_OPERATIONAL: 2732 ath_tx_aggr_resume(sc, sta, tid); 2733 break; 2734 default: 2735 DPRINTF(sc, ATH_DBG_FATAL, "Unknown AMPDU action\n"); 2736 } 2737 2738 return ret; 2739 } 2740 2741 static void ath9k_sw_scan_start(struct ieee80211_hw *hw) 2742 { 2743 struct ath_wiphy *aphy = hw->priv; 2744 struct ath_softc *sc = aphy->sc; 2745 2746 mutex_lock(&sc->mutex); 2747 if (ath9k_wiphy_scanning(sc)) { 2748 printk(KERN_DEBUG "ath9k: Two wiphys trying to scan at the " 2749 "same time\n"); 2750 /* 2751 * Do not allow the concurrent scanning state for now. This 2752 * could be improved with scanning control moved into ath9k. 2753 */ 2754 mutex_unlock(&sc->mutex); 2755 return; 2756 } 2757 2758 aphy->state = ATH_WIPHY_SCAN; 2759 ath9k_wiphy_pause_all_forced(sc, aphy); 2760 2761 spin_lock_bh(&sc->ani_lock); 2762 sc->sc_flags |= SC_OP_SCANNING; 2763 spin_unlock_bh(&sc->ani_lock); 2764 mutex_unlock(&sc->mutex); 2765 } 2766 2767 static void ath9k_sw_scan_complete(struct ieee80211_hw *hw) 2768 { 2769 struct ath_wiphy *aphy = hw->priv; 2770 struct ath_softc *sc = aphy->sc; 2771 2772 mutex_lock(&sc->mutex); 2773 spin_lock_bh(&sc->ani_lock); 2774 aphy->state = ATH_WIPHY_ACTIVE; 2775 sc->sc_flags &= ~SC_OP_SCANNING; 2776 sc->sc_flags |= SC_OP_FULL_RESET; 2777 spin_unlock_bh(&sc->ani_lock); 2778 ath_beacon_config(sc, NULL); 2779 mutex_unlock(&sc->mutex); 2780 } 2781 2782 struct ieee80211_ops ath9k_ops = { 2783 .tx = ath9k_tx, 2784 .start = ath9k_start, 2785 .stop = ath9k_stop, 2786 .add_interface = ath9k_add_interface, 2787 .remove_interface = ath9k_remove_interface, 2788 .config = ath9k_config, 2789 .configure_filter = ath9k_configure_filter, 2790 .sta_notify = ath9k_sta_notify, 2791 .conf_tx = ath9k_conf_tx, 2792 .bss_info_changed = ath9k_bss_info_changed, 2793 .set_key = ath9k_set_key, 2794 .get_tsf = ath9k_get_tsf, 2795 .set_tsf = ath9k_set_tsf, 2796 .reset_tsf = ath9k_reset_tsf, 2797 .ampdu_action = ath9k_ampdu_action, 2798 .sw_scan_start = ath9k_sw_scan_start, 2799 .sw_scan_complete = ath9k_sw_scan_complete, 2800 .rfkill_poll = ath9k_rfkill_poll_state, 2801 }; 2802 2803 static struct { 2804 u32 version; 2805 const char * name; 2806 } ath_mac_bb_names[] = { 2807 { AR_SREV_VERSION_5416_PCI, "5416" }, 2808 { AR_SREV_VERSION_5416_PCIE, "5418" }, 2809 { AR_SREV_VERSION_9100, "9100" }, 2810 { AR_SREV_VERSION_9160, "9160" }, 2811 { AR_SREV_VERSION_9280, "9280" }, 2812 { AR_SREV_VERSION_9285, "9285" }, 2813 { AR_SREV_VERSION_9287, "9287" } 2814 }; 2815 2816 static struct { 2817 u16 version; 2818 const char * name; 2819 } ath_rf_names[] = { 2820 { 0, "5133" }, 2821 { AR_RAD5133_SREV_MAJOR, "5133" }, 2822 { AR_RAD5122_SREV_MAJOR, "5122" }, 2823 { AR_RAD2133_SREV_MAJOR, "2133" }, 2824 { AR_RAD2122_SREV_MAJOR, "2122" } 2825 }; 2826 2827 /* 2828 * Return the MAC/BB name. "????" is returned if the MAC/BB is unknown. 2829 */ 2830 const char * 2831 ath_mac_bb_name(u32 mac_bb_version) 2832 { 2833 int i; 2834 2835 for (i=0; i<ARRAY_SIZE(ath_mac_bb_names); i++) { 2836 if (ath_mac_bb_names[i].version == mac_bb_version) { 2837 return ath_mac_bb_names[i].name; 2838 } 2839 } 2840 2841 return "????"; 2842 } 2843 2844 /* 2845 * Return the RF name. "????" is returned if the RF is unknown. 2846 */ 2847 const char * 2848 ath_rf_name(u16 rf_version) 2849 { 2850 int i; 2851 2852 for (i=0; i<ARRAY_SIZE(ath_rf_names); i++) { 2853 if (ath_rf_names[i].version == rf_version) { 2854 return ath_rf_names[i].name; 2855 } 2856 } 2857 2858 return "????"; 2859 } 2860 2861 static int __init ath9k_init(void) 2862 { 2863 int error; 2864 2865 /* Register rate control algorithm */ 2866 error = ath_rate_control_register(); 2867 if (error != 0) { 2868 printk(KERN_ERR 2869 "ath9k: Unable to register rate control " 2870 "algorithm: %d\n", 2871 error); 2872 goto err_out; 2873 } 2874 2875 error = ath9k_debug_create_root(); 2876 if (error) { 2877 printk(KERN_ERR 2878 "ath9k: Unable to create debugfs root: %d\n", 2879 error); 2880 goto err_rate_unregister; 2881 } 2882 2883 error = ath_pci_init(); 2884 if (error < 0) { 2885 printk(KERN_ERR 2886 "ath9k: No PCI devices found, driver not installed.\n"); 2887 error = -ENODEV; 2888 goto err_remove_root; 2889 } 2890 2891 error = ath_ahb_init(); 2892 if (error < 0) { 2893 error = -ENODEV; 2894 goto err_pci_exit; 2895 } 2896 2897 return 0; 2898 2899 err_pci_exit: 2900 ath_pci_exit(); 2901 2902 err_remove_root: 2903 ath9k_debug_remove_root(); 2904 err_rate_unregister: 2905 ath_rate_control_unregister(); 2906 err_out: 2907 return error; 2908 } 2909 module_init(ath9k_init); 2910 2911 static void __exit ath9k_exit(void) 2912 { 2913 ath_ahb_exit(); 2914 ath_pci_exit(); 2915 ath9k_debug_remove_root(); 2916 ath_rate_control_unregister(); 2917 printk(KERN_INFO "%s: Driver unloaded\n", dev_info); 2918 } 2919 module_exit(ath9k_exit); 2920