1 /*- 2 * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting 3 * Copyright (c) 2004-2005 Atheros Communications, Inc. 4 * Copyright (c) 2006 Devicescape Software, Inc. 5 * Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com> 6 * Copyright (c) 2007 Luis R. Rodriguez <mcgrof@winlab.rutgers.edu> 7 * 8 * All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer, 15 * without modification. 16 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 17 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 18 * redistribution must be conditioned upon including a substantially 19 * similar Disclaimer requirement for further binary redistribution. 20 * 3. Neither the names of the above-listed copyright holders nor the names 21 * of any contributors may be used to endorse or promote products derived 22 * from this software without specific prior written permission. 23 * 24 * Alternatively, this software may be distributed under the terms of the 25 * GNU General Public License ("GPL") version 2 as published by the Free 26 * Software Foundation. 27 * 28 * NO WARRANTY 29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 30 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 31 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 32 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 33 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 34 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 35 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 36 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 37 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 38 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 39 * THE POSSIBILITY OF SUCH DAMAGES. 40 * 41 */ 42 43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 44 45 #include <linux/module.h> 46 #include <linux/delay.h> 47 #include <linux/dma-mapping.h> 48 #include <linux/hardirq.h> 49 #include <linux/if.h> 50 #include <linux/io.h> 51 #include <linux/netdevice.h> 52 #include <linux/cache.h> 53 #include <linux/ethtool.h> 54 #include <linux/uaccess.h> 55 #include <linux/slab.h> 56 #include <linux/etherdevice.h> 57 #include <linux/nl80211.h> 58 59 #include <net/ieee80211_radiotap.h> 60 61 #include <asm/unaligned.h> 62 63 #include "base.h" 64 #include "reg.h" 65 #include "debug.h" 66 #include "ani.h" 67 #include "ath5k.h" 68 #include "../regd.h" 69 70 #define CREATE_TRACE_POINTS 71 #include "trace.h" 72 73 bool ath5k_modparam_nohwcrypt; 74 module_param_named(nohwcrypt, ath5k_modparam_nohwcrypt, bool, S_IRUGO); 75 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); 76 77 static bool modparam_all_channels; 78 module_param_named(all_channels, modparam_all_channels, bool, S_IRUGO); 79 MODULE_PARM_DESC(all_channels, "Expose all channels the device can use."); 80 81 static bool modparam_fastchanswitch; 82 module_param_named(fastchanswitch, modparam_fastchanswitch, bool, S_IRUGO); 83 MODULE_PARM_DESC(fastchanswitch, "Enable fast channel switching for AR2413/AR5413 radios."); 84 85 static bool ath5k_modparam_no_hw_rfkill_switch; 86 module_param_named(no_hw_rfkill_switch, ath5k_modparam_no_hw_rfkill_switch, 87 bool, S_IRUGO); 88 MODULE_PARM_DESC(no_hw_rfkill_switch, "Ignore the GPIO RFKill switch state"); 89 90 91 /* Module info */ 92 MODULE_AUTHOR("Jiri Slaby"); 93 MODULE_AUTHOR("Nick Kossifidis"); 94 MODULE_DESCRIPTION("Support for 5xxx series of Atheros 802.11 wireless LAN cards."); 95 MODULE_SUPPORTED_DEVICE("Atheros 5xxx WLAN cards"); 96 MODULE_LICENSE("Dual BSD/GPL"); 97 98 static int ath5k_init(struct ieee80211_hw *hw); 99 static int ath5k_reset(struct ath5k_hw *ah, struct ieee80211_channel *chan, 100 bool skip_pcu); 101 102 /* Known SREVs */ 103 static const struct ath5k_srev_name srev_names[] = { 104 #ifdef CONFIG_ATHEROS_AR231X 105 { "5312", AR5K_VERSION_MAC, AR5K_SREV_AR5312_R2 }, 106 { "5312", AR5K_VERSION_MAC, AR5K_SREV_AR5312_R7 }, 107 { "2313", AR5K_VERSION_MAC, AR5K_SREV_AR2313_R8 }, 108 { "2315", AR5K_VERSION_MAC, AR5K_SREV_AR2315_R6 }, 109 { "2315", AR5K_VERSION_MAC, AR5K_SREV_AR2315_R7 }, 110 { "2317", AR5K_VERSION_MAC, AR5K_SREV_AR2317_R1 }, 111 { "2317", AR5K_VERSION_MAC, AR5K_SREV_AR2317_R2 }, 112 #else 113 { "5210", AR5K_VERSION_MAC, AR5K_SREV_AR5210 }, 114 { "5311", AR5K_VERSION_MAC, AR5K_SREV_AR5311 }, 115 { "5311A", AR5K_VERSION_MAC, AR5K_SREV_AR5311A }, 116 { "5311B", AR5K_VERSION_MAC, AR5K_SREV_AR5311B }, 117 { "5211", AR5K_VERSION_MAC, AR5K_SREV_AR5211 }, 118 { "5212", AR5K_VERSION_MAC, AR5K_SREV_AR5212 }, 119 { "5213", AR5K_VERSION_MAC, AR5K_SREV_AR5213 }, 120 { "5213A", AR5K_VERSION_MAC, AR5K_SREV_AR5213A }, 121 { "2413", AR5K_VERSION_MAC, AR5K_SREV_AR2413 }, 122 { "2414", AR5K_VERSION_MAC, AR5K_SREV_AR2414 }, 123 { "5424", AR5K_VERSION_MAC, AR5K_SREV_AR5424 }, 124 { "5413", AR5K_VERSION_MAC, AR5K_SREV_AR5413 }, 125 { "5414", AR5K_VERSION_MAC, AR5K_SREV_AR5414 }, 126 { "2415", AR5K_VERSION_MAC, AR5K_SREV_AR2415 }, 127 { "5416", AR5K_VERSION_MAC, AR5K_SREV_AR5416 }, 128 { "5418", AR5K_VERSION_MAC, AR5K_SREV_AR5418 }, 129 { "2425", AR5K_VERSION_MAC, AR5K_SREV_AR2425 }, 130 { "2417", AR5K_VERSION_MAC, AR5K_SREV_AR2417 }, 131 #endif 132 { "xxxxx", AR5K_VERSION_MAC, AR5K_SREV_UNKNOWN }, 133 { "5110", AR5K_VERSION_RAD, AR5K_SREV_RAD_5110 }, 134 { "5111", AR5K_VERSION_RAD, AR5K_SREV_RAD_5111 }, 135 { "5111A", AR5K_VERSION_RAD, AR5K_SREV_RAD_5111A }, 136 { "2111", AR5K_VERSION_RAD, AR5K_SREV_RAD_2111 }, 137 { "5112", AR5K_VERSION_RAD, AR5K_SREV_RAD_5112 }, 138 { "5112A", AR5K_VERSION_RAD, AR5K_SREV_RAD_5112A }, 139 { "5112B", AR5K_VERSION_RAD, AR5K_SREV_RAD_5112B }, 140 { "2112", AR5K_VERSION_RAD, AR5K_SREV_RAD_2112 }, 141 { "2112A", AR5K_VERSION_RAD, AR5K_SREV_RAD_2112A }, 142 { "2112B", AR5K_VERSION_RAD, AR5K_SREV_RAD_2112B }, 143 { "2413", AR5K_VERSION_RAD, AR5K_SREV_RAD_2413 }, 144 { "5413", AR5K_VERSION_RAD, AR5K_SREV_RAD_5413 }, 145 { "5424", AR5K_VERSION_RAD, AR5K_SREV_RAD_5424 }, 146 { "5133", AR5K_VERSION_RAD, AR5K_SREV_RAD_5133 }, 147 #ifdef CONFIG_ATHEROS_AR231X 148 { "2316", AR5K_VERSION_RAD, AR5K_SREV_RAD_2316 }, 149 { "2317", AR5K_VERSION_RAD, AR5K_SREV_RAD_2317 }, 150 #endif 151 { "xxxxx", AR5K_VERSION_RAD, AR5K_SREV_UNKNOWN }, 152 }; 153 154 static const struct ieee80211_rate ath5k_rates[] = { 155 { .bitrate = 10, 156 .hw_value = ATH5K_RATE_CODE_1M, }, 157 { .bitrate = 20, 158 .hw_value = ATH5K_RATE_CODE_2M, 159 .hw_value_short = ATH5K_RATE_CODE_2M | AR5K_SET_SHORT_PREAMBLE, 160 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 161 { .bitrate = 55, 162 .hw_value = ATH5K_RATE_CODE_5_5M, 163 .hw_value_short = ATH5K_RATE_CODE_5_5M | AR5K_SET_SHORT_PREAMBLE, 164 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 165 { .bitrate = 110, 166 .hw_value = ATH5K_RATE_CODE_11M, 167 .hw_value_short = ATH5K_RATE_CODE_11M | AR5K_SET_SHORT_PREAMBLE, 168 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 169 { .bitrate = 60, 170 .hw_value = ATH5K_RATE_CODE_6M, 171 .flags = 0 }, 172 { .bitrate = 90, 173 .hw_value = ATH5K_RATE_CODE_9M, 174 .flags = 0 }, 175 { .bitrate = 120, 176 .hw_value = ATH5K_RATE_CODE_12M, 177 .flags = 0 }, 178 { .bitrate = 180, 179 .hw_value = ATH5K_RATE_CODE_18M, 180 .flags = 0 }, 181 { .bitrate = 240, 182 .hw_value = ATH5K_RATE_CODE_24M, 183 .flags = 0 }, 184 { .bitrate = 360, 185 .hw_value = ATH5K_RATE_CODE_36M, 186 .flags = 0 }, 187 { .bitrate = 480, 188 .hw_value = ATH5K_RATE_CODE_48M, 189 .flags = 0 }, 190 { .bitrate = 540, 191 .hw_value = ATH5K_RATE_CODE_54M, 192 .flags = 0 }, 193 }; 194 195 static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp) 196 { 197 u64 tsf = ath5k_hw_get_tsf64(ah); 198 199 if ((tsf & 0x7fff) < rstamp) 200 tsf -= 0x8000; 201 202 return (tsf & ~0x7fff) | rstamp; 203 } 204 205 const char * 206 ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val) 207 { 208 const char *name = "xxxxx"; 209 unsigned int i; 210 211 for (i = 0; i < ARRAY_SIZE(srev_names); i++) { 212 if (srev_names[i].sr_type != type) 213 continue; 214 215 if ((val & 0xf0) == srev_names[i].sr_val) 216 name = srev_names[i].sr_name; 217 218 if ((val & 0xff) == srev_names[i].sr_val) { 219 name = srev_names[i].sr_name; 220 break; 221 } 222 } 223 224 return name; 225 } 226 static unsigned int ath5k_ioread32(void *hw_priv, u32 reg_offset) 227 { 228 struct ath5k_hw *ah = (struct ath5k_hw *) hw_priv; 229 return ath5k_hw_reg_read(ah, reg_offset); 230 } 231 232 static void ath5k_iowrite32(void *hw_priv, u32 val, u32 reg_offset) 233 { 234 struct ath5k_hw *ah = (struct ath5k_hw *) hw_priv; 235 ath5k_hw_reg_write(ah, val, reg_offset); 236 } 237 238 static const struct ath_ops ath5k_common_ops = { 239 .read = ath5k_ioread32, 240 .write = ath5k_iowrite32, 241 }; 242 243 /***********************\ 244 * Driver Initialization * 245 \***********************/ 246 247 static int ath5k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request) 248 { 249 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); 250 struct ath5k_hw *ah = hw->priv; 251 struct ath_regulatory *regulatory = ath5k_hw_regulatory(ah); 252 253 return ath_reg_notifier_apply(wiphy, request, regulatory); 254 } 255 256 /********************\ 257 * Channel/mode setup * 258 \********************/ 259 260 /* 261 * Returns true for the channel numbers used without all_channels modparam. 262 */ 263 static bool ath5k_is_standard_channel(short chan, enum ieee80211_band band) 264 { 265 if (band == IEEE80211_BAND_2GHZ && chan <= 14) 266 return true; 267 268 return /* UNII 1,2 */ 269 (((chan & 3) == 0 && chan >= 36 && chan <= 64) || 270 /* midband */ 271 ((chan & 3) == 0 && chan >= 100 && chan <= 140) || 272 /* UNII-3 */ 273 ((chan & 3) == 1 && chan >= 149 && chan <= 165) || 274 /* 802.11j 5.030-5.080 GHz (20MHz) */ 275 (chan == 8 || chan == 12 || chan == 16) || 276 /* 802.11j 4.9GHz (20MHz) */ 277 (chan == 184 || chan == 188 || chan == 192 || chan == 196)); 278 } 279 280 static unsigned int 281 ath5k_setup_channels(struct ath5k_hw *ah, struct ieee80211_channel *channels, 282 unsigned int mode, unsigned int max) 283 { 284 unsigned int count, size, freq, ch; 285 enum ieee80211_band band; 286 287 switch (mode) { 288 case AR5K_MODE_11A: 289 /* 1..220, but 2GHz frequencies are filtered by check_channel */ 290 size = 220; 291 band = IEEE80211_BAND_5GHZ; 292 break; 293 case AR5K_MODE_11B: 294 case AR5K_MODE_11G: 295 size = 26; 296 band = IEEE80211_BAND_2GHZ; 297 break; 298 default: 299 ATH5K_WARN(ah, "bad mode, not copying channels\n"); 300 return 0; 301 } 302 303 count = 0; 304 for (ch = 1; ch <= size && count < max; ch++) { 305 freq = ieee80211_channel_to_frequency(ch, band); 306 307 if (freq == 0) /* mapping failed - not a standard channel */ 308 continue; 309 310 /* Write channel info, needed for ath5k_channel_ok() */ 311 channels[count].center_freq = freq; 312 channels[count].band = band; 313 channels[count].hw_value = mode; 314 315 /* Check if channel is supported by the chipset */ 316 if (!ath5k_channel_ok(ah, &channels[count])) 317 continue; 318 319 if (!modparam_all_channels && 320 !ath5k_is_standard_channel(ch, band)) 321 continue; 322 323 count++; 324 } 325 326 return count; 327 } 328 329 static void 330 ath5k_setup_rate_idx(struct ath5k_hw *ah, struct ieee80211_supported_band *b) 331 { 332 u8 i; 333 334 for (i = 0; i < AR5K_MAX_RATES; i++) 335 ah->rate_idx[b->band][i] = -1; 336 337 for (i = 0; i < b->n_bitrates; i++) { 338 ah->rate_idx[b->band][b->bitrates[i].hw_value] = i; 339 if (b->bitrates[i].hw_value_short) 340 ah->rate_idx[b->band][b->bitrates[i].hw_value_short] = i; 341 } 342 } 343 344 static int 345 ath5k_setup_bands(struct ieee80211_hw *hw) 346 { 347 struct ath5k_hw *ah = hw->priv; 348 struct ieee80211_supported_band *sband; 349 int max_c, count_c = 0; 350 int i; 351 352 BUILD_BUG_ON(ARRAY_SIZE(ah->sbands) < IEEE80211_NUM_BANDS); 353 max_c = ARRAY_SIZE(ah->channels); 354 355 /* 2GHz band */ 356 sband = &ah->sbands[IEEE80211_BAND_2GHZ]; 357 sband->band = IEEE80211_BAND_2GHZ; 358 sband->bitrates = &ah->rates[IEEE80211_BAND_2GHZ][0]; 359 360 if (test_bit(AR5K_MODE_11G, ah->ah_capabilities.cap_mode)) { 361 /* G mode */ 362 memcpy(sband->bitrates, &ath5k_rates[0], 363 sizeof(struct ieee80211_rate) * 12); 364 sband->n_bitrates = 12; 365 366 sband->channels = ah->channels; 367 sband->n_channels = ath5k_setup_channels(ah, sband->channels, 368 AR5K_MODE_11G, max_c); 369 370 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband; 371 count_c = sband->n_channels; 372 max_c -= count_c; 373 } else if (test_bit(AR5K_MODE_11B, ah->ah_capabilities.cap_mode)) { 374 /* B mode */ 375 memcpy(sband->bitrates, &ath5k_rates[0], 376 sizeof(struct ieee80211_rate) * 4); 377 sband->n_bitrates = 4; 378 379 /* 5211 only supports B rates and uses 4bit rate codes 380 * (e.g normally we have 0x1B for 1M, but on 5211 we have 0x0B) 381 * fix them up here: 382 */ 383 if (ah->ah_version == AR5K_AR5211) { 384 for (i = 0; i < 4; i++) { 385 sband->bitrates[i].hw_value = 386 sband->bitrates[i].hw_value & 0xF; 387 sband->bitrates[i].hw_value_short = 388 sband->bitrates[i].hw_value_short & 0xF; 389 } 390 } 391 392 sband->channels = ah->channels; 393 sband->n_channels = ath5k_setup_channels(ah, sband->channels, 394 AR5K_MODE_11B, max_c); 395 396 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband; 397 count_c = sband->n_channels; 398 max_c -= count_c; 399 } 400 ath5k_setup_rate_idx(ah, sband); 401 402 /* 5GHz band, A mode */ 403 if (test_bit(AR5K_MODE_11A, ah->ah_capabilities.cap_mode)) { 404 sband = &ah->sbands[IEEE80211_BAND_5GHZ]; 405 sband->band = IEEE80211_BAND_5GHZ; 406 sband->bitrates = &ah->rates[IEEE80211_BAND_5GHZ][0]; 407 408 memcpy(sband->bitrates, &ath5k_rates[4], 409 sizeof(struct ieee80211_rate) * 8); 410 sband->n_bitrates = 8; 411 412 sband->channels = &ah->channels[count_c]; 413 sband->n_channels = ath5k_setup_channels(ah, sband->channels, 414 AR5K_MODE_11A, max_c); 415 416 hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband; 417 } 418 ath5k_setup_rate_idx(ah, sband); 419 420 ath5k_debug_dump_bands(ah); 421 422 return 0; 423 } 424 425 /* 426 * Set/change channels. We always reset the chip. 427 * To accomplish this we must first cleanup any pending DMA, 428 * then restart stuff after a la ath5k_init. 429 * 430 * Called with ah->lock. 431 */ 432 int 433 ath5k_chan_set(struct ath5k_hw *ah, struct ieee80211_channel *chan) 434 { 435 ATH5K_DBG(ah, ATH5K_DEBUG_RESET, 436 "channel set, resetting (%u -> %u MHz)\n", 437 ah->curchan->center_freq, chan->center_freq); 438 439 /* 440 * To switch channels clear any pending DMA operations; 441 * wait long enough for the RX fifo to drain, reset the 442 * hardware at the new frequency, and then re-enable 443 * the relevant bits of the h/w. 444 */ 445 return ath5k_reset(ah, chan, true); 446 } 447 448 void ath5k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif) 449 { 450 struct ath5k_vif_iter_data *iter_data = data; 451 int i; 452 struct ath5k_vif *avf = (void *)vif->drv_priv; 453 454 if (iter_data->hw_macaddr) 455 for (i = 0; i < ETH_ALEN; i++) 456 iter_data->mask[i] &= 457 ~(iter_data->hw_macaddr[i] ^ mac[i]); 458 459 if (!iter_data->found_active) { 460 iter_data->found_active = true; 461 memcpy(iter_data->active_mac, mac, ETH_ALEN); 462 } 463 464 if (iter_data->need_set_hw_addr && iter_data->hw_macaddr) 465 if (ether_addr_equal(iter_data->hw_macaddr, mac)) 466 iter_data->need_set_hw_addr = false; 467 468 if (!iter_data->any_assoc) { 469 if (avf->assoc) 470 iter_data->any_assoc = true; 471 } 472 473 /* Calculate combined mode - when APs are active, operate in AP mode. 474 * Otherwise use the mode of the new interface. This can currently 475 * only deal with combinations of APs and STAs. Only one ad-hoc 476 * interfaces is allowed. 477 */ 478 if (avf->opmode == NL80211_IFTYPE_AP) 479 iter_data->opmode = NL80211_IFTYPE_AP; 480 else { 481 if (avf->opmode == NL80211_IFTYPE_STATION) 482 iter_data->n_stas++; 483 if (iter_data->opmode == NL80211_IFTYPE_UNSPECIFIED) 484 iter_data->opmode = avf->opmode; 485 } 486 } 487 488 void 489 ath5k_update_bssid_mask_and_opmode(struct ath5k_hw *ah, 490 struct ieee80211_vif *vif) 491 { 492 struct ath_common *common = ath5k_hw_common(ah); 493 struct ath5k_vif_iter_data iter_data; 494 u32 rfilt; 495 496 /* 497 * Use the hardware MAC address as reference, the hardware uses it 498 * together with the BSSID mask when matching addresses. 499 */ 500 iter_data.hw_macaddr = common->macaddr; 501 memset(&iter_data.mask, 0xff, ETH_ALEN); 502 iter_data.found_active = false; 503 iter_data.need_set_hw_addr = true; 504 iter_data.opmode = NL80211_IFTYPE_UNSPECIFIED; 505 iter_data.n_stas = 0; 506 507 if (vif) 508 ath5k_vif_iter(&iter_data, vif->addr, vif); 509 510 /* Get list of all active MAC addresses */ 511 ieee80211_iterate_active_interfaces_atomic(ah->hw, ath5k_vif_iter, 512 &iter_data); 513 memcpy(ah->bssidmask, iter_data.mask, ETH_ALEN); 514 515 ah->opmode = iter_data.opmode; 516 if (ah->opmode == NL80211_IFTYPE_UNSPECIFIED) 517 /* Nothing active, default to station mode */ 518 ah->opmode = NL80211_IFTYPE_STATION; 519 520 ath5k_hw_set_opmode(ah, ah->opmode); 521 ATH5K_DBG(ah, ATH5K_DEBUG_MODE, "mode setup opmode %d (%s)\n", 522 ah->opmode, ath_opmode_to_string(ah->opmode)); 523 524 if (iter_data.need_set_hw_addr && iter_data.found_active) 525 ath5k_hw_set_lladdr(ah, iter_data.active_mac); 526 527 if (ath5k_hw_hasbssidmask(ah)) 528 ath5k_hw_set_bssid_mask(ah, ah->bssidmask); 529 530 /* Set up RX Filter */ 531 if (iter_data.n_stas > 1) { 532 /* If you have multiple STA interfaces connected to 533 * different APs, ARPs are not received (most of the time?) 534 * Enabling PROMISC appears to fix that problem. 535 */ 536 ah->filter_flags |= AR5K_RX_FILTER_PROM; 537 } 538 539 rfilt = ah->filter_flags; 540 ath5k_hw_set_rx_filter(ah, rfilt); 541 ATH5K_DBG(ah, ATH5K_DEBUG_MODE, "RX filter 0x%x\n", rfilt); 542 } 543 544 static inline int 545 ath5k_hw_to_driver_rix(struct ath5k_hw *ah, int hw_rix) 546 { 547 int rix; 548 549 /* return base rate on errors */ 550 if (WARN(hw_rix < 0 || hw_rix >= AR5K_MAX_RATES, 551 "hw_rix out of bounds: %x\n", hw_rix)) 552 return 0; 553 554 rix = ah->rate_idx[ah->curchan->band][hw_rix]; 555 if (WARN(rix < 0, "invalid hw_rix: %x\n", hw_rix)) 556 rix = 0; 557 558 return rix; 559 } 560 561 /***************\ 562 * Buffers setup * 563 \***************/ 564 565 static 566 struct sk_buff *ath5k_rx_skb_alloc(struct ath5k_hw *ah, dma_addr_t *skb_addr) 567 { 568 struct ath_common *common = ath5k_hw_common(ah); 569 struct sk_buff *skb; 570 571 /* 572 * Allocate buffer with headroom_needed space for the 573 * fake physical layer header at the start. 574 */ 575 skb = ath_rxbuf_alloc(common, 576 common->rx_bufsize, 577 GFP_ATOMIC); 578 579 if (!skb) { 580 ATH5K_ERR(ah, "can't alloc skbuff of size %u\n", 581 common->rx_bufsize); 582 return NULL; 583 } 584 585 *skb_addr = dma_map_single(ah->dev, 586 skb->data, common->rx_bufsize, 587 DMA_FROM_DEVICE); 588 589 if (unlikely(dma_mapping_error(ah->dev, *skb_addr))) { 590 ATH5K_ERR(ah, "%s: DMA mapping failed\n", __func__); 591 dev_kfree_skb(skb); 592 return NULL; 593 } 594 return skb; 595 } 596 597 static int 598 ath5k_rxbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf) 599 { 600 struct sk_buff *skb = bf->skb; 601 struct ath5k_desc *ds; 602 int ret; 603 604 if (!skb) { 605 skb = ath5k_rx_skb_alloc(ah, &bf->skbaddr); 606 if (!skb) 607 return -ENOMEM; 608 bf->skb = skb; 609 } 610 611 /* 612 * Setup descriptors. For receive we always terminate 613 * the descriptor list with a self-linked entry so we'll 614 * not get overrun under high load (as can happen with a 615 * 5212 when ANI processing enables PHY error frames). 616 * 617 * To ensure the last descriptor is self-linked we create 618 * each descriptor as self-linked and add it to the end. As 619 * each additional descriptor is added the previous self-linked 620 * entry is "fixed" naturally. This should be safe even 621 * if DMA is happening. When processing RX interrupts we 622 * never remove/process the last, self-linked, entry on the 623 * descriptor list. This ensures the hardware always has 624 * someplace to write a new frame. 625 */ 626 ds = bf->desc; 627 ds->ds_link = bf->daddr; /* link to self */ 628 ds->ds_data = bf->skbaddr; 629 ret = ath5k_hw_setup_rx_desc(ah, ds, ah->common.rx_bufsize, 0); 630 if (ret) { 631 ATH5K_ERR(ah, "%s: could not setup RX desc\n", __func__); 632 return ret; 633 } 634 635 if (ah->rxlink != NULL) 636 *ah->rxlink = bf->daddr; 637 ah->rxlink = &ds->ds_link; 638 return 0; 639 } 640 641 static enum ath5k_pkt_type get_hw_packet_type(struct sk_buff *skb) 642 { 643 struct ieee80211_hdr *hdr; 644 enum ath5k_pkt_type htype; 645 __le16 fc; 646 647 hdr = (struct ieee80211_hdr *)skb->data; 648 fc = hdr->frame_control; 649 650 if (ieee80211_is_beacon(fc)) 651 htype = AR5K_PKT_TYPE_BEACON; 652 else if (ieee80211_is_probe_resp(fc)) 653 htype = AR5K_PKT_TYPE_PROBE_RESP; 654 else if (ieee80211_is_atim(fc)) 655 htype = AR5K_PKT_TYPE_ATIM; 656 else if (ieee80211_is_pspoll(fc)) 657 htype = AR5K_PKT_TYPE_PSPOLL; 658 else 659 htype = AR5K_PKT_TYPE_NORMAL; 660 661 return htype; 662 } 663 664 static int 665 ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf, 666 struct ath5k_txq *txq, int padsize) 667 { 668 struct ath5k_desc *ds = bf->desc; 669 struct sk_buff *skb = bf->skb; 670 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 671 unsigned int pktlen, flags, keyidx = AR5K_TXKEYIX_INVALID; 672 struct ieee80211_rate *rate; 673 unsigned int mrr_rate[3], mrr_tries[3]; 674 int i, ret; 675 u16 hw_rate; 676 u16 cts_rate = 0; 677 u16 duration = 0; 678 u8 rc_flags; 679 680 flags = AR5K_TXDESC_INTREQ | AR5K_TXDESC_CLRDMASK; 681 682 /* XXX endianness */ 683 bf->skbaddr = dma_map_single(ah->dev, skb->data, skb->len, 684 DMA_TO_DEVICE); 685 686 rate = ieee80211_get_tx_rate(ah->hw, info); 687 if (!rate) { 688 ret = -EINVAL; 689 goto err_unmap; 690 } 691 692 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 693 flags |= AR5K_TXDESC_NOACK; 694 695 rc_flags = info->control.rates[0].flags; 696 hw_rate = (rc_flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) ? 697 rate->hw_value_short : rate->hw_value; 698 699 pktlen = skb->len; 700 701 /* FIXME: If we are in g mode and rate is a CCK rate 702 * subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta 703 * from tx power (value is in dB units already) */ 704 if (info->control.hw_key) { 705 keyidx = info->control.hw_key->hw_key_idx; 706 pktlen += info->control.hw_key->icv_len; 707 } 708 if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) { 709 flags |= AR5K_TXDESC_RTSENA; 710 cts_rate = ieee80211_get_rts_cts_rate(ah->hw, info)->hw_value; 711 duration = le16_to_cpu(ieee80211_rts_duration(ah->hw, 712 info->control.vif, pktlen, info)); 713 } 714 if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) { 715 flags |= AR5K_TXDESC_CTSENA; 716 cts_rate = ieee80211_get_rts_cts_rate(ah->hw, info)->hw_value; 717 duration = le16_to_cpu(ieee80211_ctstoself_duration(ah->hw, 718 info->control.vif, pktlen, info)); 719 } 720 ret = ah->ah_setup_tx_desc(ah, ds, pktlen, 721 ieee80211_get_hdrlen_from_skb(skb), padsize, 722 get_hw_packet_type(skb), 723 (ah->power_level * 2), 724 hw_rate, 725 info->control.rates[0].count, keyidx, ah->ah_tx_ant, flags, 726 cts_rate, duration); 727 if (ret) 728 goto err_unmap; 729 730 /* Set up MRR descriptor */ 731 if (ah->ah_capabilities.cap_has_mrr_support) { 732 memset(mrr_rate, 0, sizeof(mrr_rate)); 733 memset(mrr_tries, 0, sizeof(mrr_tries)); 734 for (i = 0; i < 3; i++) { 735 rate = ieee80211_get_alt_retry_rate(ah->hw, info, i); 736 if (!rate) 737 break; 738 739 mrr_rate[i] = rate->hw_value; 740 mrr_tries[i] = info->control.rates[i + 1].count; 741 } 742 743 ath5k_hw_setup_mrr_tx_desc(ah, ds, 744 mrr_rate[0], mrr_tries[0], 745 mrr_rate[1], mrr_tries[1], 746 mrr_rate[2], mrr_tries[2]); 747 } 748 749 ds->ds_link = 0; 750 ds->ds_data = bf->skbaddr; 751 752 spin_lock_bh(&txq->lock); 753 list_add_tail(&bf->list, &txq->q); 754 txq->txq_len++; 755 if (txq->link == NULL) /* is this first packet? */ 756 ath5k_hw_set_txdp(ah, txq->qnum, bf->daddr); 757 else /* no, so only link it */ 758 *txq->link = bf->daddr; 759 760 txq->link = &ds->ds_link; 761 ath5k_hw_start_tx_dma(ah, txq->qnum); 762 mmiowb(); 763 spin_unlock_bh(&txq->lock); 764 765 return 0; 766 err_unmap: 767 dma_unmap_single(ah->dev, bf->skbaddr, skb->len, DMA_TO_DEVICE); 768 return ret; 769 } 770 771 /*******************\ 772 * Descriptors setup * 773 \*******************/ 774 775 static int 776 ath5k_desc_alloc(struct ath5k_hw *ah) 777 { 778 struct ath5k_desc *ds; 779 struct ath5k_buf *bf; 780 dma_addr_t da; 781 unsigned int i; 782 int ret; 783 784 /* allocate descriptors */ 785 ah->desc_len = sizeof(struct ath5k_desc) * 786 (ATH_TXBUF + ATH_RXBUF + ATH_BCBUF + 1); 787 788 ah->desc = dma_alloc_coherent(ah->dev, ah->desc_len, 789 &ah->desc_daddr, GFP_KERNEL); 790 if (ah->desc == NULL) { 791 ATH5K_ERR(ah, "can't allocate descriptors\n"); 792 ret = -ENOMEM; 793 goto err; 794 } 795 ds = ah->desc; 796 da = ah->desc_daddr; 797 ATH5K_DBG(ah, ATH5K_DEBUG_ANY, "DMA map: %p (%zu) -> %llx\n", 798 ds, ah->desc_len, (unsigned long long)ah->desc_daddr); 799 800 bf = kcalloc(1 + ATH_TXBUF + ATH_RXBUF + ATH_BCBUF, 801 sizeof(struct ath5k_buf), GFP_KERNEL); 802 if (bf == NULL) { 803 ATH5K_ERR(ah, "can't allocate bufptr\n"); 804 ret = -ENOMEM; 805 goto err_free; 806 } 807 ah->bufptr = bf; 808 809 INIT_LIST_HEAD(&ah->rxbuf); 810 for (i = 0; i < ATH_RXBUF; i++, bf++, ds++, da += sizeof(*ds)) { 811 bf->desc = ds; 812 bf->daddr = da; 813 list_add_tail(&bf->list, &ah->rxbuf); 814 } 815 816 INIT_LIST_HEAD(&ah->txbuf); 817 ah->txbuf_len = ATH_TXBUF; 818 for (i = 0; i < ATH_TXBUF; i++, bf++, ds++, da += sizeof(*ds)) { 819 bf->desc = ds; 820 bf->daddr = da; 821 list_add_tail(&bf->list, &ah->txbuf); 822 } 823 824 /* beacon buffers */ 825 INIT_LIST_HEAD(&ah->bcbuf); 826 for (i = 0; i < ATH_BCBUF; i++, bf++, ds++, da += sizeof(*ds)) { 827 bf->desc = ds; 828 bf->daddr = da; 829 list_add_tail(&bf->list, &ah->bcbuf); 830 } 831 832 return 0; 833 err_free: 834 dma_free_coherent(ah->dev, ah->desc_len, ah->desc, ah->desc_daddr); 835 err: 836 ah->desc = NULL; 837 return ret; 838 } 839 840 void 841 ath5k_txbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf) 842 { 843 BUG_ON(!bf); 844 if (!bf->skb) 845 return; 846 dma_unmap_single(ah->dev, bf->skbaddr, bf->skb->len, 847 DMA_TO_DEVICE); 848 dev_kfree_skb_any(bf->skb); 849 bf->skb = NULL; 850 bf->skbaddr = 0; 851 bf->desc->ds_data = 0; 852 } 853 854 void 855 ath5k_rxbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf) 856 { 857 struct ath_common *common = ath5k_hw_common(ah); 858 859 BUG_ON(!bf); 860 if (!bf->skb) 861 return; 862 dma_unmap_single(ah->dev, bf->skbaddr, common->rx_bufsize, 863 DMA_FROM_DEVICE); 864 dev_kfree_skb_any(bf->skb); 865 bf->skb = NULL; 866 bf->skbaddr = 0; 867 bf->desc->ds_data = 0; 868 } 869 870 static void 871 ath5k_desc_free(struct ath5k_hw *ah) 872 { 873 struct ath5k_buf *bf; 874 875 list_for_each_entry(bf, &ah->txbuf, list) 876 ath5k_txbuf_free_skb(ah, bf); 877 list_for_each_entry(bf, &ah->rxbuf, list) 878 ath5k_rxbuf_free_skb(ah, bf); 879 list_for_each_entry(bf, &ah->bcbuf, list) 880 ath5k_txbuf_free_skb(ah, bf); 881 882 /* Free memory associated with all descriptors */ 883 dma_free_coherent(ah->dev, ah->desc_len, ah->desc, ah->desc_daddr); 884 ah->desc = NULL; 885 ah->desc_daddr = 0; 886 887 kfree(ah->bufptr); 888 ah->bufptr = NULL; 889 } 890 891 892 /**************\ 893 * Queues setup * 894 \**************/ 895 896 static struct ath5k_txq * 897 ath5k_txq_setup(struct ath5k_hw *ah, 898 int qtype, int subtype) 899 { 900 struct ath5k_txq *txq; 901 struct ath5k_txq_info qi = { 902 .tqi_subtype = subtype, 903 /* XXX: default values not correct for B and XR channels, 904 * but who cares? */ 905 .tqi_aifs = AR5K_TUNE_AIFS, 906 .tqi_cw_min = AR5K_TUNE_CWMIN, 907 .tqi_cw_max = AR5K_TUNE_CWMAX 908 }; 909 int qnum; 910 911 /* 912 * Enable interrupts only for EOL and DESC conditions. 913 * We mark tx descriptors to receive a DESC interrupt 914 * when a tx queue gets deep; otherwise we wait for the 915 * EOL to reap descriptors. Note that this is done to 916 * reduce interrupt load and this only defers reaping 917 * descriptors, never transmitting frames. Aside from 918 * reducing interrupts this also permits more concurrency. 919 * The only potential downside is if the tx queue backs 920 * up in which case the top half of the kernel may backup 921 * due to a lack of tx descriptors. 922 */ 923 qi.tqi_flags = AR5K_TXQ_FLAG_TXEOLINT_ENABLE | 924 AR5K_TXQ_FLAG_TXDESCINT_ENABLE; 925 qnum = ath5k_hw_setup_tx_queue(ah, qtype, &qi); 926 if (qnum < 0) { 927 /* 928 * NB: don't print a message, this happens 929 * normally on parts with too few tx queues 930 */ 931 return ERR_PTR(qnum); 932 } 933 txq = &ah->txqs[qnum]; 934 if (!txq->setup) { 935 txq->qnum = qnum; 936 txq->link = NULL; 937 INIT_LIST_HEAD(&txq->q); 938 spin_lock_init(&txq->lock); 939 txq->setup = true; 940 txq->txq_len = 0; 941 txq->txq_max = ATH5K_TXQ_LEN_MAX; 942 txq->txq_poll_mark = false; 943 txq->txq_stuck = 0; 944 } 945 return &ah->txqs[qnum]; 946 } 947 948 static int 949 ath5k_beaconq_setup(struct ath5k_hw *ah) 950 { 951 struct ath5k_txq_info qi = { 952 /* XXX: default values not correct for B and XR channels, 953 * but who cares? */ 954 .tqi_aifs = AR5K_TUNE_AIFS, 955 .tqi_cw_min = AR5K_TUNE_CWMIN, 956 .tqi_cw_max = AR5K_TUNE_CWMAX, 957 /* NB: for dynamic turbo, don't enable any other interrupts */ 958 .tqi_flags = AR5K_TXQ_FLAG_TXDESCINT_ENABLE 959 }; 960 961 return ath5k_hw_setup_tx_queue(ah, AR5K_TX_QUEUE_BEACON, &qi); 962 } 963 964 static int 965 ath5k_beaconq_config(struct ath5k_hw *ah) 966 { 967 struct ath5k_txq_info qi; 968 int ret; 969 970 ret = ath5k_hw_get_tx_queueprops(ah, ah->bhalq, &qi); 971 if (ret) 972 goto err; 973 974 if (ah->opmode == NL80211_IFTYPE_AP || 975 ah->opmode == NL80211_IFTYPE_MESH_POINT) { 976 /* 977 * Always burst out beacon and CAB traffic 978 * (aifs = cwmin = cwmax = 0) 979 */ 980 qi.tqi_aifs = 0; 981 qi.tqi_cw_min = 0; 982 qi.tqi_cw_max = 0; 983 } else if (ah->opmode == NL80211_IFTYPE_ADHOC) { 984 /* 985 * Adhoc mode; backoff between 0 and (2 * cw_min). 986 */ 987 qi.tqi_aifs = 0; 988 qi.tqi_cw_min = 0; 989 qi.tqi_cw_max = 2 * AR5K_TUNE_CWMIN; 990 } 991 992 ATH5K_DBG(ah, ATH5K_DEBUG_BEACON, 993 "beacon queueprops tqi_aifs:%d tqi_cw_min:%d tqi_cw_max:%d\n", 994 qi.tqi_aifs, qi.tqi_cw_min, qi.tqi_cw_max); 995 996 ret = ath5k_hw_set_tx_queueprops(ah, ah->bhalq, &qi); 997 if (ret) { 998 ATH5K_ERR(ah, "%s: unable to update parameters for beacon " 999 "hardware queue!\n", __func__); 1000 goto err; 1001 } 1002 ret = ath5k_hw_reset_tx_queue(ah, ah->bhalq); /* push to h/w */ 1003 if (ret) 1004 goto err; 1005 1006 /* reconfigure cabq with ready time to 80% of beacon_interval */ 1007 ret = ath5k_hw_get_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi); 1008 if (ret) 1009 goto err; 1010 1011 qi.tqi_ready_time = (ah->bintval * 80) / 100; 1012 ret = ath5k_hw_set_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi); 1013 if (ret) 1014 goto err; 1015 1016 ret = ath5k_hw_reset_tx_queue(ah, AR5K_TX_QUEUE_ID_CAB); 1017 err: 1018 return ret; 1019 } 1020 1021 /** 1022 * ath5k_drain_tx_buffs - Empty tx buffers 1023 * 1024 * @ah The &struct ath5k_hw 1025 * 1026 * Empty tx buffers from all queues in preparation 1027 * of a reset or during shutdown. 1028 * 1029 * NB: this assumes output has been stopped and 1030 * we do not need to block ath5k_tx_tasklet 1031 */ 1032 static void 1033 ath5k_drain_tx_buffs(struct ath5k_hw *ah) 1034 { 1035 struct ath5k_txq *txq; 1036 struct ath5k_buf *bf, *bf0; 1037 int i; 1038 1039 for (i = 0; i < ARRAY_SIZE(ah->txqs); i++) { 1040 if (ah->txqs[i].setup) { 1041 txq = &ah->txqs[i]; 1042 spin_lock_bh(&txq->lock); 1043 list_for_each_entry_safe(bf, bf0, &txq->q, list) { 1044 ath5k_debug_printtxbuf(ah, bf); 1045 1046 ath5k_txbuf_free_skb(ah, bf); 1047 1048 spin_lock_bh(&ah->txbuflock); 1049 list_move_tail(&bf->list, &ah->txbuf); 1050 ah->txbuf_len++; 1051 txq->txq_len--; 1052 spin_unlock_bh(&ah->txbuflock); 1053 } 1054 txq->link = NULL; 1055 txq->txq_poll_mark = false; 1056 spin_unlock_bh(&txq->lock); 1057 } 1058 } 1059 } 1060 1061 static void 1062 ath5k_txq_release(struct ath5k_hw *ah) 1063 { 1064 struct ath5k_txq *txq = ah->txqs; 1065 unsigned int i; 1066 1067 for (i = 0; i < ARRAY_SIZE(ah->txqs); i++, txq++) 1068 if (txq->setup) { 1069 ath5k_hw_release_tx_queue(ah, txq->qnum); 1070 txq->setup = false; 1071 } 1072 } 1073 1074 1075 /*************\ 1076 * RX Handling * 1077 \*************/ 1078 1079 /* 1080 * Enable the receive h/w following a reset. 1081 */ 1082 static int 1083 ath5k_rx_start(struct ath5k_hw *ah) 1084 { 1085 struct ath_common *common = ath5k_hw_common(ah); 1086 struct ath5k_buf *bf; 1087 int ret; 1088 1089 common->rx_bufsize = roundup(IEEE80211_MAX_FRAME_LEN, common->cachelsz); 1090 1091 ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "cachelsz %u rx_bufsize %u\n", 1092 common->cachelsz, common->rx_bufsize); 1093 1094 spin_lock_bh(&ah->rxbuflock); 1095 ah->rxlink = NULL; 1096 list_for_each_entry(bf, &ah->rxbuf, list) { 1097 ret = ath5k_rxbuf_setup(ah, bf); 1098 if (ret != 0) { 1099 spin_unlock_bh(&ah->rxbuflock); 1100 goto err; 1101 } 1102 } 1103 bf = list_first_entry(&ah->rxbuf, struct ath5k_buf, list); 1104 ath5k_hw_set_rxdp(ah, bf->daddr); 1105 spin_unlock_bh(&ah->rxbuflock); 1106 1107 ath5k_hw_start_rx_dma(ah); /* enable recv descriptors */ 1108 ath5k_update_bssid_mask_and_opmode(ah, NULL); /* set filters, etc. */ 1109 ath5k_hw_start_rx_pcu(ah); /* re-enable PCU/DMA engine */ 1110 1111 return 0; 1112 err: 1113 return ret; 1114 } 1115 1116 /* 1117 * Disable the receive logic on PCU (DRU) 1118 * In preparation for a shutdown. 1119 * 1120 * Note: Doesn't stop rx DMA, ath5k_hw_dma_stop 1121 * does. 1122 */ 1123 static void 1124 ath5k_rx_stop(struct ath5k_hw *ah) 1125 { 1126 1127 ath5k_hw_set_rx_filter(ah, 0); /* clear recv filter */ 1128 ath5k_hw_stop_rx_pcu(ah); /* disable PCU */ 1129 1130 ath5k_debug_printrxbuffs(ah); 1131 } 1132 1133 static unsigned int 1134 ath5k_rx_decrypted(struct ath5k_hw *ah, struct sk_buff *skb, 1135 struct ath5k_rx_status *rs) 1136 { 1137 struct ath_common *common = ath5k_hw_common(ah); 1138 struct ieee80211_hdr *hdr = (void *)skb->data; 1139 unsigned int keyix, hlen; 1140 1141 if (!(rs->rs_status & AR5K_RXERR_DECRYPT) && 1142 rs->rs_keyix != AR5K_RXKEYIX_INVALID) 1143 return RX_FLAG_DECRYPTED; 1144 1145 /* Apparently when a default key is used to decrypt the packet 1146 the hw does not set the index used to decrypt. In such cases 1147 get the index from the packet. */ 1148 hlen = ieee80211_hdrlen(hdr->frame_control); 1149 if (ieee80211_has_protected(hdr->frame_control) && 1150 !(rs->rs_status & AR5K_RXERR_DECRYPT) && 1151 skb->len >= hlen + 4) { 1152 keyix = skb->data[hlen + 3] >> 6; 1153 1154 if (test_bit(keyix, common->keymap)) 1155 return RX_FLAG_DECRYPTED; 1156 } 1157 1158 return 0; 1159 } 1160 1161 1162 static void 1163 ath5k_check_ibss_tsf(struct ath5k_hw *ah, struct sk_buff *skb, 1164 struct ieee80211_rx_status *rxs) 1165 { 1166 struct ath_common *common = ath5k_hw_common(ah); 1167 u64 tsf, bc_tstamp; 1168 u32 hw_tu; 1169 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; 1170 1171 if (ieee80211_is_beacon(mgmt->frame_control) && 1172 le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS && 1173 ether_addr_equal(mgmt->bssid, common->curbssid)) { 1174 /* 1175 * Received an IBSS beacon with the same BSSID. Hardware *must* 1176 * have updated the local TSF. We have to work around various 1177 * hardware bugs, though... 1178 */ 1179 tsf = ath5k_hw_get_tsf64(ah); 1180 bc_tstamp = le64_to_cpu(mgmt->u.beacon.timestamp); 1181 hw_tu = TSF_TO_TU(tsf); 1182 1183 ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, 1184 "beacon %llx mactime %llx (diff %lld) tsf now %llx\n", 1185 (unsigned long long)bc_tstamp, 1186 (unsigned long long)rxs->mactime, 1187 (unsigned long long)(rxs->mactime - bc_tstamp), 1188 (unsigned long long)tsf); 1189 1190 /* 1191 * Sometimes the HW will give us a wrong tstamp in the rx 1192 * status, causing the timestamp extension to go wrong. 1193 * (This seems to happen especially with beacon frames bigger 1194 * than 78 byte (incl. FCS)) 1195 * But we know that the receive timestamp must be later than the 1196 * timestamp of the beacon since HW must have synced to that. 1197 * 1198 * NOTE: here we assume mactime to be after the frame was 1199 * received, not like mac80211 which defines it at the start. 1200 */ 1201 if (bc_tstamp > rxs->mactime) { 1202 ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, 1203 "fixing mactime from %llx to %llx\n", 1204 (unsigned long long)rxs->mactime, 1205 (unsigned long long)tsf); 1206 rxs->mactime = tsf; 1207 } 1208 1209 /* 1210 * Local TSF might have moved higher than our beacon timers, 1211 * in that case we have to update them to continue sending 1212 * beacons. This also takes care of synchronizing beacon sending 1213 * times with other stations. 1214 */ 1215 if (hw_tu >= ah->nexttbtt) 1216 ath5k_beacon_update_timers(ah, bc_tstamp); 1217 1218 /* Check if the beacon timers are still correct, because a TSF 1219 * update might have created a window between them - for a 1220 * longer description see the comment of this function: */ 1221 if (!ath5k_hw_check_beacon_timers(ah, ah->bintval)) { 1222 ath5k_beacon_update_timers(ah, bc_tstamp); 1223 ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, 1224 "fixed beacon timers after beacon receive\n"); 1225 } 1226 } 1227 } 1228 1229 static void 1230 ath5k_update_beacon_rssi(struct ath5k_hw *ah, struct sk_buff *skb, int rssi) 1231 { 1232 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; 1233 struct ath_common *common = ath5k_hw_common(ah); 1234 1235 /* only beacons from our BSSID */ 1236 if (!ieee80211_is_beacon(mgmt->frame_control) || 1237 !ether_addr_equal(mgmt->bssid, common->curbssid)) 1238 return; 1239 1240 ewma_add(&ah->ah_beacon_rssi_avg, rssi); 1241 1242 /* in IBSS mode we should keep RSSI statistics per neighbour */ 1243 /* le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS */ 1244 } 1245 1246 /* 1247 * Compute padding position. skb must contain an IEEE 802.11 frame 1248 */ 1249 static int ath5k_common_padpos(struct sk_buff *skb) 1250 { 1251 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1252 __le16 frame_control = hdr->frame_control; 1253 int padpos = 24; 1254 1255 if (ieee80211_has_a4(frame_control)) 1256 padpos += ETH_ALEN; 1257 1258 if (ieee80211_is_data_qos(frame_control)) 1259 padpos += IEEE80211_QOS_CTL_LEN; 1260 1261 return padpos; 1262 } 1263 1264 /* 1265 * This function expects an 802.11 frame and returns the number of 1266 * bytes added, or -1 if we don't have enough header room. 1267 */ 1268 static int ath5k_add_padding(struct sk_buff *skb) 1269 { 1270 int padpos = ath5k_common_padpos(skb); 1271 int padsize = padpos & 3; 1272 1273 if (padsize && skb->len > padpos) { 1274 1275 if (skb_headroom(skb) < padsize) 1276 return -1; 1277 1278 skb_push(skb, padsize); 1279 memmove(skb->data, skb->data + padsize, padpos); 1280 return padsize; 1281 } 1282 1283 return 0; 1284 } 1285 1286 /* 1287 * The MAC header is padded to have 32-bit boundary if the 1288 * packet payload is non-zero. The general calculation for 1289 * padsize would take into account odd header lengths: 1290 * padsize = 4 - (hdrlen & 3); however, since only 1291 * even-length headers are used, padding can only be 0 or 2 1292 * bytes and we can optimize this a bit. We must not try to 1293 * remove padding from short control frames that do not have a 1294 * payload. 1295 * 1296 * This function expects an 802.11 frame and returns the number of 1297 * bytes removed. 1298 */ 1299 static int ath5k_remove_padding(struct sk_buff *skb) 1300 { 1301 int padpos = ath5k_common_padpos(skb); 1302 int padsize = padpos & 3; 1303 1304 if (padsize && skb->len >= padpos + padsize) { 1305 memmove(skb->data + padsize, skb->data, padpos); 1306 skb_pull(skb, padsize); 1307 return padsize; 1308 } 1309 1310 return 0; 1311 } 1312 1313 static void 1314 ath5k_receive_frame(struct ath5k_hw *ah, struct sk_buff *skb, 1315 struct ath5k_rx_status *rs) 1316 { 1317 struct ieee80211_rx_status *rxs; 1318 1319 ath5k_remove_padding(skb); 1320 1321 rxs = IEEE80211_SKB_RXCB(skb); 1322 1323 rxs->flag = 0; 1324 if (unlikely(rs->rs_status & AR5K_RXERR_MIC)) 1325 rxs->flag |= RX_FLAG_MMIC_ERROR; 1326 1327 /* 1328 * always extend the mac timestamp, since this information is 1329 * also needed for proper IBSS merging. 1330 * 1331 * XXX: it might be too late to do it here, since rs_tstamp is 1332 * 15bit only. that means TSF extension has to be done within 1333 * 32768usec (about 32ms). it might be necessary to move this to 1334 * the interrupt handler, like it is done in madwifi. 1335 * 1336 * Unfortunately we don't know when the hardware takes the rx 1337 * timestamp (beginning of phy frame, data frame, end of rx?). 1338 * The only thing we know is that it is hardware specific... 1339 * On AR5213 it seems the rx timestamp is at the end of the 1340 * frame, but I'm not sure. 1341 * 1342 * NOTE: mac80211 defines mactime at the beginning of the first 1343 * data symbol. Since we don't have any time references it's 1344 * impossible to comply to that. This affects IBSS merge only 1345 * right now, so it's not too bad... 1346 */ 1347 rxs->mactime = ath5k_extend_tsf(ah, rs->rs_tstamp); 1348 rxs->flag |= RX_FLAG_MACTIME_MPDU; 1349 1350 rxs->freq = ah->curchan->center_freq; 1351 rxs->band = ah->curchan->band; 1352 1353 rxs->signal = ah->ah_noise_floor + rs->rs_rssi; 1354 1355 rxs->antenna = rs->rs_antenna; 1356 1357 if (rs->rs_antenna > 0 && rs->rs_antenna < 5) 1358 ah->stats.antenna_rx[rs->rs_antenna]++; 1359 else 1360 ah->stats.antenna_rx[0]++; /* invalid */ 1361 1362 rxs->rate_idx = ath5k_hw_to_driver_rix(ah, rs->rs_rate); 1363 rxs->flag |= ath5k_rx_decrypted(ah, skb, rs); 1364 1365 if (rxs->rate_idx >= 0 && rs->rs_rate == 1366 ah->sbands[ah->curchan->band].bitrates[rxs->rate_idx].hw_value_short) 1367 rxs->flag |= RX_FLAG_SHORTPRE; 1368 1369 trace_ath5k_rx(ah, skb); 1370 1371 ath5k_update_beacon_rssi(ah, skb, rs->rs_rssi); 1372 1373 /* check beacons in IBSS mode */ 1374 if (ah->opmode == NL80211_IFTYPE_ADHOC) 1375 ath5k_check_ibss_tsf(ah, skb, rxs); 1376 1377 ieee80211_rx(ah->hw, skb); 1378 } 1379 1380 /** ath5k_frame_receive_ok() - Do we want to receive this frame or not? 1381 * 1382 * Check if we want to further process this frame or not. Also update 1383 * statistics. Return true if we want this frame, false if not. 1384 */ 1385 static bool 1386 ath5k_receive_frame_ok(struct ath5k_hw *ah, struct ath5k_rx_status *rs) 1387 { 1388 ah->stats.rx_all_count++; 1389 ah->stats.rx_bytes_count += rs->rs_datalen; 1390 1391 if (unlikely(rs->rs_status)) { 1392 if (rs->rs_status & AR5K_RXERR_CRC) 1393 ah->stats.rxerr_crc++; 1394 if (rs->rs_status & AR5K_RXERR_FIFO) 1395 ah->stats.rxerr_fifo++; 1396 if (rs->rs_status & AR5K_RXERR_PHY) { 1397 ah->stats.rxerr_phy++; 1398 if (rs->rs_phyerr > 0 && rs->rs_phyerr < 32) 1399 ah->stats.rxerr_phy_code[rs->rs_phyerr]++; 1400 return false; 1401 } 1402 if (rs->rs_status & AR5K_RXERR_DECRYPT) { 1403 /* 1404 * Decrypt error. If the error occurred 1405 * because there was no hardware key, then 1406 * let the frame through so the upper layers 1407 * can process it. This is necessary for 5210 1408 * parts which have no way to setup a ``clear'' 1409 * key cache entry. 1410 * 1411 * XXX do key cache faulting 1412 */ 1413 ah->stats.rxerr_decrypt++; 1414 if (rs->rs_keyix == AR5K_RXKEYIX_INVALID && 1415 !(rs->rs_status & AR5K_RXERR_CRC)) 1416 return true; 1417 } 1418 if (rs->rs_status & AR5K_RXERR_MIC) { 1419 ah->stats.rxerr_mic++; 1420 return true; 1421 } 1422 1423 /* reject any frames with non-crypto errors */ 1424 if (rs->rs_status & ~(AR5K_RXERR_DECRYPT)) 1425 return false; 1426 } 1427 1428 if (unlikely(rs->rs_more)) { 1429 ah->stats.rxerr_jumbo++; 1430 return false; 1431 } 1432 return true; 1433 } 1434 1435 static void 1436 ath5k_set_current_imask(struct ath5k_hw *ah) 1437 { 1438 enum ath5k_int imask; 1439 unsigned long flags; 1440 1441 spin_lock_irqsave(&ah->irqlock, flags); 1442 imask = ah->imask; 1443 if (ah->rx_pending) 1444 imask &= ~AR5K_INT_RX_ALL; 1445 if (ah->tx_pending) 1446 imask &= ~AR5K_INT_TX_ALL; 1447 ath5k_hw_set_imr(ah, imask); 1448 spin_unlock_irqrestore(&ah->irqlock, flags); 1449 } 1450 1451 static void 1452 ath5k_tasklet_rx(unsigned long data) 1453 { 1454 struct ath5k_rx_status rs = {}; 1455 struct sk_buff *skb, *next_skb; 1456 dma_addr_t next_skb_addr; 1457 struct ath5k_hw *ah = (void *)data; 1458 struct ath_common *common = ath5k_hw_common(ah); 1459 struct ath5k_buf *bf; 1460 struct ath5k_desc *ds; 1461 int ret; 1462 1463 spin_lock(&ah->rxbuflock); 1464 if (list_empty(&ah->rxbuf)) { 1465 ATH5K_WARN(ah, "empty rx buf pool\n"); 1466 goto unlock; 1467 } 1468 do { 1469 bf = list_first_entry(&ah->rxbuf, struct ath5k_buf, list); 1470 BUG_ON(bf->skb == NULL); 1471 skb = bf->skb; 1472 ds = bf->desc; 1473 1474 /* bail if HW is still using self-linked descriptor */ 1475 if (ath5k_hw_get_rxdp(ah) == bf->daddr) 1476 break; 1477 1478 ret = ah->ah_proc_rx_desc(ah, ds, &rs); 1479 if (unlikely(ret == -EINPROGRESS)) 1480 break; 1481 else if (unlikely(ret)) { 1482 ATH5K_ERR(ah, "error in processing rx descriptor\n"); 1483 ah->stats.rxerr_proc++; 1484 break; 1485 } 1486 1487 if (ath5k_receive_frame_ok(ah, &rs)) { 1488 next_skb = ath5k_rx_skb_alloc(ah, &next_skb_addr); 1489 1490 /* 1491 * If we can't replace bf->skb with a new skb under 1492 * memory pressure, just skip this packet 1493 */ 1494 if (!next_skb) 1495 goto next; 1496 1497 dma_unmap_single(ah->dev, bf->skbaddr, 1498 common->rx_bufsize, 1499 DMA_FROM_DEVICE); 1500 1501 skb_put(skb, rs.rs_datalen); 1502 1503 ath5k_receive_frame(ah, skb, &rs); 1504 1505 bf->skb = next_skb; 1506 bf->skbaddr = next_skb_addr; 1507 } 1508 next: 1509 list_move_tail(&bf->list, &ah->rxbuf); 1510 } while (ath5k_rxbuf_setup(ah, bf) == 0); 1511 unlock: 1512 spin_unlock(&ah->rxbuflock); 1513 ah->rx_pending = false; 1514 ath5k_set_current_imask(ah); 1515 } 1516 1517 1518 /*************\ 1519 * TX Handling * 1520 \*************/ 1521 1522 void 1523 ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb, 1524 struct ath5k_txq *txq) 1525 { 1526 struct ath5k_hw *ah = hw->priv; 1527 struct ath5k_buf *bf; 1528 unsigned long flags; 1529 int padsize; 1530 1531 trace_ath5k_tx(ah, skb, txq); 1532 1533 /* 1534 * The hardware expects the header padded to 4 byte boundaries. 1535 * If this is not the case, we add the padding after the header. 1536 */ 1537 padsize = ath5k_add_padding(skb); 1538 if (padsize < 0) { 1539 ATH5K_ERR(ah, "tx hdrlen not %%4: not enough" 1540 " headroom to pad"); 1541 goto drop_packet; 1542 } 1543 1544 if (txq->txq_len >= txq->txq_max && 1545 txq->qnum <= AR5K_TX_QUEUE_ID_DATA_MAX) 1546 ieee80211_stop_queue(hw, txq->qnum); 1547 1548 spin_lock_irqsave(&ah->txbuflock, flags); 1549 if (list_empty(&ah->txbuf)) { 1550 ATH5K_ERR(ah, "no further txbuf available, dropping packet\n"); 1551 spin_unlock_irqrestore(&ah->txbuflock, flags); 1552 ieee80211_stop_queues(hw); 1553 goto drop_packet; 1554 } 1555 bf = list_first_entry(&ah->txbuf, struct ath5k_buf, list); 1556 list_del(&bf->list); 1557 ah->txbuf_len--; 1558 if (list_empty(&ah->txbuf)) 1559 ieee80211_stop_queues(hw); 1560 spin_unlock_irqrestore(&ah->txbuflock, flags); 1561 1562 bf->skb = skb; 1563 1564 if (ath5k_txbuf_setup(ah, bf, txq, padsize)) { 1565 bf->skb = NULL; 1566 spin_lock_irqsave(&ah->txbuflock, flags); 1567 list_add_tail(&bf->list, &ah->txbuf); 1568 ah->txbuf_len++; 1569 spin_unlock_irqrestore(&ah->txbuflock, flags); 1570 goto drop_packet; 1571 } 1572 return; 1573 1574 drop_packet: 1575 dev_kfree_skb_any(skb); 1576 } 1577 1578 static void 1579 ath5k_tx_frame_completed(struct ath5k_hw *ah, struct sk_buff *skb, 1580 struct ath5k_txq *txq, struct ath5k_tx_status *ts) 1581 { 1582 struct ieee80211_tx_info *info; 1583 u8 tries[3]; 1584 int i; 1585 1586 ah->stats.tx_all_count++; 1587 ah->stats.tx_bytes_count += skb->len; 1588 info = IEEE80211_SKB_CB(skb); 1589 1590 tries[0] = info->status.rates[0].count; 1591 tries[1] = info->status.rates[1].count; 1592 tries[2] = info->status.rates[2].count; 1593 1594 ieee80211_tx_info_clear_status(info); 1595 1596 for (i = 0; i < ts->ts_final_idx; i++) { 1597 struct ieee80211_tx_rate *r = 1598 &info->status.rates[i]; 1599 1600 r->count = tries[i]; 1601 } 1602 1603 info->status.rates[ts->ts_final_idx].count = ts->ts_final_retry; 1604 info->status.rates[ts->ts_final_idx + 1].idx = -1; 1605 1606 if (unlikely(ts->ts_status)) { 1607 ah->stats.ack_fail++; 1608 if (ts->ts_status & AR5K_TXERR_FILT) { 1609 info->flags |= IEEE80211_TX_STAT_TX_FILTERED; 1610 ah->stats.txerr_filt++; 1611 } 1612 if (ts->ts_status & AR5K_TXERR_XRETRY) 1613 ah->stats.txerr_retry++; 1614 if (ts->ts_status & AR5K_TXERR_FIFO) 1615 ah->stats.txerr_fifo++; 1616 } else { 1617 info->flags |= IEEE80211_TX_STAT_ACK; 1618 info->status.ack_signal = ts->ts_rssi; 1619 1620 /* count the successful attempt as well */ 1621 info->status.rates[ts->ts_final_idx].count++; 1622 } 1623 1624 /* 1625 * Remove MAC header padding before giving the frame 1626 * back to mac80211. 1627 */ 1628 ath5k_remove_padding(skb); 1629 1630 if (ts->ts_antenna > 0 && ts->ts_antenna < 5) 1631 ah->stats.antenna_tx[ts->ts_antenna]++; 1632 else 1633 ah->stats.antenna_tx[0]++; /* invalid */ 1634 1635 trace_ath5k_tx_complete(ah, skb, txq, ts); 1636 ieee80211_tx_status(ah->hw, skb); 1637 } 1638 1639 static void 1640 ath5k_tx_processq(struct ath5k_hw *ah, struct ath5k_txq *txq) 1641 { 1642 struct ath5k_tx_status ts = {}; 1643 struct ath5k_buf *bf, *bf0; 1644 struct ath5k_desc *ds; 1645 struct sk_buff *skb; 1646 int ret; 1647 1648 spin_lock(&txq->lock); 1649 list_for_each_entry_safe(bf, bf0, &txq->q, list) { 1650 1651 txq->txq_poll_mark = false; 1652 1653 /* skb might already have been processed last time. */ 1654 if (bf->skb != NULL) { 1655 ds = bf->desc; 1656 1657 ret = ah->ah_proc_tx_desc(ah, ds, &ts); 1658 if (unlikely(ret == -EINPROGRESS)) 1659 break; 1660 else if (unlikely(ret)) { 1661 ATH5K_ERR(ah, 1662 "error %d while processing " 1663 "queue %u\n", ret, txq->qnum); 1664 break; 1665 } 1666 1667 skb = bf->skb; 1668 bf->skb = NULL; 1669 1670 dma_unmap_single(ah->dev, bf->skbaddr, skb->len, 1671 DMA_TO_DEVICE); 1672 ath5k_tx_frame_completed(ah, skb, txq, &ts); 1673 } 1674 1675 /* 1676 * It's possible that the hardware can say the buffer is 1677 * completed when it hasn't yet loaded the ds_link from 1678 * host memory and moved on. 1679 * Always keep the last descriptor to avoid HW races... 1680 */ 1681 if (ath5k_hw_get_txdp(ah, txq->qnum) != bf->daddr) { 1682 spin_lock(&ah->txbuflock); 1683 list_move_tail(&bf->list, &ah->txbuf); 1684 ah->txbuf_len++; 1685 txq->txq_len--; 1686 spin_unlock(&ah->txbuflock); 1687 } 1688 } 1689 spin_unlock(&txq->lock); 1690 if (txq->txq_len < ATH5K_TXQ_LEN_LOW && txq->qnum < 4) 1691 ieee80211_wake_queue(ah->hw, txq->qnum); 1692 } 1693 1694 static void 1695 ath5k_tasklet_tx(unsigned long data) 1696 { 1697 int i; 1698 struct ath5k_hw *ah = (void *)data; 1699 1700 for (i = 0; i < AR5K_NUM_TX_QUEUES; i++) 1701 if (ah->txqs[i].setup && (ah->ah_txq_isr_txok_all & BIT(i))) 1702 ath5k_tx_processq(ah, &ah->txqs[i]); 1703 1704 ah->tx_pending = false; 1705 ath5k_set_current_imask(ah); 1706 } 1707 1708 1709 /*****************\ 1710 * Beacon handling * 1711 \*****************/ 1712 1713 /* 1714 * Setup the beacon frame for transmit. 1715 */ 1716 static int 1717 ath5k_beacon_setup(struct ath5k_hw *ah, struct ath5k_buf *bf) 1718 { 1719 struct sk_buff *skb = bf->skb; 1720 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1721 struct ath5k_desc *ds; 1722 int ret = 0; 1723 u8 antenna; 1724 u32 flags; 1725 const int padsize = 0; 1726 1727 bf->skbaddr = dma_map_single(ah->dev, skb->data, skb->len, 1728 DMA_TO_DEVICE); 1729 ATH5K_DBG(ah, ATH5K_DEBUG_BEACON, "skb %p [data %p len %u] " 1730 "skbaddr %llx\n", skb, skb->data, skb->len, 1731 (unsigned long long)bf->skbaddr); 1732 1733 if (dma_mapping_error(ah->dev, bf->skbaddr)) { 1734 ATH5K_ERR(ah, "beacon DMA mapping failed\n"); 1735 dev_kfree_skb_any(skb); 1736 bf->skb = NULL; 1737 return -EIO; 1738 } 1739 1740 ds = bf->desc; 1741 antenna = ah->ah_tx_ant; 1742 1743 flags = AR5K_TXDESC_NOACK; 1744 if (ah->opmode == NL80211_IFTYPE_ADHOC && ath5k_hw_hasveol(ah)) { 1745 ds->ds_link = bf->daddr; /* self-linked */ 1746 flags |= AR5K_TXDESC_VEOL; 1747 } else 1748 ds->ds_link = 0; 1749 1750 /* 1751 * If we use multiple antennas on AP and use 1752 * the Sectored AP scenario, switch antenna every 1753 * 4 beacons to make sure everybody hears our AP. 1754 * When a client tries to associate, hw will keep 1755 * track of the tx antenna to be used for this client 1756 * automatically, based on ACKed packets. 1757 * 1758 * Note: AP still listens and transmits RTS on the 1759 * default antenna which is supposed to be an omni. 1760 * 1761 * Note2: On sectored scenarios it's possible to have 1762 * multiple antennas (1 omni -- the default -- and 14 1763 * sectors), so if we choose to actually support this 1764 * mode, we need to allow the user to set how many antennas 1765 * we have and tweak the code below to send beacons 1766 * on all of them. 1767 */ 1768 if (ah->ah_ant_mode == AR5K_ANTMODE_SECTOR_AP) 1769 antenna = ah->bsent & 4 ? 2 : 1; 1770 1771 1772 /* FIXME: If we are in g mode and rate is a CCK rate 1773 * subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta 1774 * from tx power (value is in dB units already) */ 1775 ds->ds_data = bf->skbaddr; 1776 ret = ah->ah_setup_tx_desc(ah, ds, skb->len, 1777 ieee80211_get_hdrlen_from_skb(skb), padsize, 1778 AR5K_PKT_TYPE_BEACON, (ah->power_level * 2), 1779 ieee80211_get_tx_rate(ah->hw, info)->hw_value, 1780 1, AR5K_TXKEYIX_INVALID, 1781 antenna, flags, 0, 0); 1782 if (ret) 1783 goto err_unmap; 1784 1785 return 0; 1786 err_unmap: 1787 dma_unmap_single(ah->dev, bf->skbaddr, skb->len, DMA_TO_DEVICE); 1788 return ret; 1789 } 1790 1791 /* 1792 * Updates the beacon that is sent by ath5k_beacon_send. For adhoc, 1793 * this is called only once at config_bss time, for AP we do it every 1794 * SWBA interrupt so that the TIM will reflect buffered frames. 1795 * 1796 * Called with the beacon lock. 1797 */ 1798 int 1799 ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif) 1800 { 1801 int ret; 1802 struct ath5k_hw *ah = hw->priv; 1803 struct ath5k_vif *avf = (void *)vif->drv_priv; 1804 struct sk_buff *skb; 1805 1806 if (WARN_ON(!vif)) { 1807 ret = -EINVAL; 1808 goto out; 1809 } 1810 1811 skb = ieee80211_beacon_get(hw, vif); 1812 1813 if (!skb) { 1814 ret = -ENOMEM; 1815 goto out; 1816 } 1817 1818 ath5k_txbuf_free_skb(ah, avf->bbuf); 1819 avf->bbuf->skb = skb; 1820 ret = ath5k_beacon_setup(ah, avf->bbuf); 1821 out: 1822 return ret; 1823 } 1824 1825 /* 1826 * Transmit a beacon frame at SWBA. Dynamic updates to the 1827 * frame contents are done as needed and the slot time is 1828 * also adjusted based on current state. 1829 * 1830 * This is called from software irq context (beacontq tasklets) 1831 * or user context from ath5k_beacon_config. 1832 */ 1833 static void 1834 ath5k_beacon_send(struct ath5k_hw *ah) 1835 { 1836 struct ieee80211_vif *vif; 1837 struct ath5k_vif *avf; 1838 struct ath5k_buf *bf; 1839 struct sk_buff *skb; 1840 int err; 1841 1842 ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, "in beacon_send\n"); 1843 1844 /* 1845 * Check if the previous beacon has gone out. If 1846 * not, don't don't try to post another: skip this 1847 * period and wait for the next. Missed beacons 1848 * indicate a problem and should not occur. If we 1849 * miss too many consecutive beacons reset the device. 1850 */ 1851 if (unlikely(ath5k_hw_num_tx_pending(ah, ah->bhalq) != 0)) { 1852 ah->bmisscount++; 1853 ATH5K_DBG(ah, ATH5K_DEBUG_BEACON, 1854 "missed %u consecutive beacons\n", ah->bmisscount); 1855 if (ah->bmisscount > 10) { /* NB: 10 is a guess */ 1856 ATH5K_DBG(ah, ATH5K_DEBUG_BEACON, 1857 "stuck beacon time (%u missed)\n", 1858 ah->bmisscount); 1859 ATH5K_DBG(ah, ATH5K_DEBUG_RESET, 1860 "stuck beacon, resetting\n"); 1861 ieee80211_queue_work(ah->hw, &ah->reset_work); 1862 } 1863 return; 1864 } 1865 if (unlikely(ah->bmisscount != 0)) { 1866 ATH5K_DBG(ah, ATH5K_DEBUG_BEACON, 1867 "resume beacon xmit after %u misses\n", 1868 ah->bmisscount); 1869 ah->bmisscount = 0; 1870 } 1871 1872 if ((ah->opmode == NL80211_IFTYPE_AP && ah->num_ap_vifs + 1873 ah->num_mesh_vifs > 1) || 1874 ah->opmode == NL80211_IFTYPE_MESH_POINT) { 1875 u64 tsf = ath5k_hw_get_tsf64(ah); 1876 u32 tsftu = TSF_TO_TU(tsf); 1877 int slot = ((tsftu % ah->bintval) * ATH_BCBUF) / ah->bintval; 1878 vif = ah->bslot[(slot + 1) % ATH_BCBUF]; 1879 ATH5K_DBG(ah, ATH5K_DEBUG_BEACON, 1880 "tsf %llx tsftu %x intval %u slot %u vif %p\n", 1881 (unsigned long long)tsf, tsftu, ah->bintval, slot, vif); 1882 } else /* only one interface */ 1883 vif = ah->bslot[0]; 1884 1885 if (!vif) 1886 return; 1887 1888 avf = (void *)vif->drv_priv; 1889 bf = avf->bbuf; 1890 1891 /* 1892 * Stop any current dma and put the new frame on the queue. 1893 * This should never fail since we check above that no frames 1894 * are still pending on the queue. 1895 */ 1896 if (unlikely(ath5k_hw_stop_beacon_queue(ah, ah->bhalq))) { 1897 ATH5K_WARN(ah, "beacon queue %u didn't start/stop ?\n", ah->bhalq); 1898 /* NB: hw still stops DMA, so proceed */ 1899 } 1900 1901 /* refresh the beacon for AP or MESH mode */ 1902 if (ah->opmode == NL80211_IFTYPE_AP || 1903 ah->opmode == NL80211_IFTYPE_MESH_POINT) { 1904 err = ath5k_beacon_update(ah->hw, vif); 1905 if (err) 1906 return; 1907 } 1908 1909 if (unlikely(bf->skb == NULL || ah->opmode == NL80211_IFTYPE_STATION || 1910 ah->opmode == NL80211_IFTYPE_MONITOR)) { 1911 ATH5K_WARN(ah, "bf=%p bf_skb=%p\n", bf, bf->skb); 1912 return; 1913 } 1914 1915 trace_ath5k_tx(ah, bf->skb, &ah->txqs[ah->bhalq]); 1916 1917 ath5k_hw_set_txdp(ah, ah->bhalq, bf->daddr); 1918 ath5k_hw_start_tx_dma(ah, ah->bhalq); 1919 ATH5K_DBG(ah, ATH5K_DEBUG_BEACON, "TXDP[%u] = %llx (%p)\n", 1920 ah->bhalq, (unsigned long long)bf->daddr, bf->desc); 1921 1922 skb = ieee80211_get_buffered_bc(ah->hw, vif); 1923 while (skb) { 1924 ath5k_tx_queue(ah->hw, skb, ah->cabq); 1925 1926 if (ah->cabq->txq_len >= ah->cabq->txq_max) 1927 break; 1928 1929 skb = ieee80211_get_buffered_bc(ah->hw, vif); 1930 } 1931 1932 ah->bsent++; 1933 } 1934 1935 /** 1936 * ath5k_beacon_update_timers - update beacon timers 1937 * 1938 * @ah: struct ath5k_hw pointer we are operating on 1939 * @bc_tsf: the timestamp of the beacon. 0 to reset the TSF. -1 to perform a 1940 * beacon timer update based on the current HW TSF. 1941 * 1942 * Calculate the next target beacon transmit time (TBTT) based on the timestamp 1943 * of a received beacon or the current local hardware TSF and write it to the 1944 * beacon timer registers. 1945 * 1946 * This is called in a variety of situations, e.g. when a beacon is received, 1947 * when a TSF update has been detected, but also when an new IBSS is created or 1948 * when we otherwise know we have to update the timers, but we keep it in this 1949 * function to have it all together in one place. 1950 */ 1951 void 1952 ath5k_beacon_update_timers(struct ath5k_hw *ah, u64 bc_tsf) 1953 { 1954 u32 nexttbtt, intval, hw_tu, bc_tu; 1955 u64 hw_tsf; 1956 1957 intval = ah->bintval & AR5K_BEACON_PERIOD; 1958 if (ah->opmode == NL80211_IFTYPE_AP && ah->num_ap_vifs 1959 + ah->num_mesh_vifs > 1) { 1960 intval /= ATH_BCBUF; /* staggered multi-bss beacons */ 1961 if (intval < 15) 1962 ATH5K_WARN(ah, "intval %u is too low, min 15\n", 1963 intval); 1964 } 1965 if (WARN_ON(!intval)) 1966 return; 1967 1968 /* beacon TSF converted to TU */ 1969 bc_tu = TSF_TO_TU(bc_tsf); 1970 1971 /* current TSF converted to TU */ 1972 hw_tsf = ath5k_hw_get_tsf64(ah); 1973 hw_tu = TSF_TO_TU(hw_tsf); 1974 1975 #define FUDGE (AR5K_TUNE_SW_BEACON_RESP + 3) 1976 /* We use FUDGE to make sure the next TBTT is ahead of the current TU. 1977 * Since we later subtract AR5K_TUNE_SW_BEACON_RESP (10) in the timer 1978 * configuration we need to make sure it is bigger than that. */ 1979 1980 if (bc_tsf == -1) { 1981 /* 1982 * no beacons received, called internally. 1983 * just need to refresh timers based on HW TSF. 1984 */ 1985 nexttbtt = roundup(hw_tu + FUDGE, intval); 1986 } else if (bc_tsf == 0) { 1987 /* 1988 * no beacon received, probably called by ath5k_reset_tsf(). 1989 * reset TSF to start with 0. 1990 */ 1991 nexttbtt = intval; 1992 intval |= AR5K_BEACON_RESET_TSF; 1993 } else if (bc_tsf > hw_tsf) { 1994 /* 1995 * beacon received, SW merge happened but HW TSF not yet updated. 1996 * not possible to reconfigure timers yet, but next time we 1997 * receive a beacon with the same BSSID, the hardware will 1998 * automatically update the TSF and then we need to reconfigure 1999 * the timers. 2000 */ 2001 ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, 2002 "need to wait for HW TSF sync\n"); 2003 return; 2004 } else { 2005 /* 2006 * most important case for beacon synchronization between STA. 2007 * 2008 * beacon received and HW TSF has been already updated by HW. 2009 * update next TBTT based on the TSF of the beacon, but make 2010 * sure it is ahead of our local TSF timer. 2011 */ 2012 nexttbtt = bc_tu + roundup(hw_tu + FUDGE - bc_tu, intval); 2013 } 2014 #undef FUDGE 2015 2016 ah->nexttbtt = nexttbtt; 2017 2018 intval |= AR5K_BEACON_ENA; 2019 ath5k_hw_init_beacon_timers(ah, nexttbtt, intval); 2020 2021 /* 2022 * debugging output last in order to preserve the time critical aspect 2023 * of this function 2024 */ 2025 if (bc_tsf == -1) 2026 ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, 2027 "reconfigured timers based on HW TSF\n"); 2028 else if (bc_tsf == 0) 2029 ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, 2030 "reset HW TSF and timers\n"); 2031 else 2032 ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, 2033 "updated timers based on beacon TSF\n"); 2034 2035 ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, 2036 "bc_tsf %llx hw_tsf %llx bc_tu %u hw_tu %u nexttbtt %u\n", 2037 (unsigned long long) bc_tsf, 2038 (unsigned long long) hw_tsf, bc_tu, hw_tu, nexttbtt); 2039 ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, "intval %u %s %s\n", 2040 intval & AR5K_BEACON_PERIOD, 2041 intval & AR5K_BEACON_ENA ? "AR5K_BEACON_ENA" : "", 2042 intval & AR5K_BEACON_RESET_TSF ? "AR5K_BEACON_RESET_TSF" : ""); 2043 } 2044 2045 /** 2046 * ath5k_beacon_config - Configure the beacon queues and interrupts 2047 * 2048 * @ah: struct ath5k_hw pointer we are operating on 2049 * 2050 * In IBSS mode we use a self-linked tx descriptor if possible. We enable SWBA 2051 * interrupts to detect TSF updates only. 2052 */ 2053 void 2054 ath5k_beacon_config(struct ath5k_hw *ah) 2055 { 2056 unsigned long flags; 2057 2058 spin_lock_irqsave(&ah->block, flags); 2059 ah->bmisscount = 0; 2060 ah->imask &= ~(AR5K_INT_BMISS | AR5K_INT_SWBA); 2061 2062 if (ah->enable_beacon) { 2063 /* 2064 * In IBSS mode we use a self-linked tx descriptor and let the 2065 * hardware send the beacons automatically. We have to load it 2066 * only once here. 2067 * We use the SWBA interrupt only to keep track of the beacon 2068 * timers in order to detect automatic TSF updates. 2069 */ 2070 ath5k_beaconq_config(ah); 2071 2072 ah->imask |= AR5K_INT_SWBA; 2073 2074 if (ah->opmode == NL80211_IFTYPE_ADHOC) { 2075 if (ath5k_hw_hasveol(ah)) 2076 ath5k_beacon_send(ah); 2077 } else 2078 ath5k_beacon_update_timers(ah, -1); 2079 } else { 2080 ath5k_hw_stop_beacon_queue(ah, ah->bhalq); 2081 } 2082 2083 ath5k_hw_set_imr(ah, ah->imask); 2084 mmiowb(); 2085 spin_unlock_irqrestore(&ah->block, flags); 2086 } 2087 2088 static void ath5k_tasklet_beacon(unsigned long data) 2089 { 2090 struct ath5k_hw *ah = (struct ath5k_hw *) data; 2091 2092 /* 2093 * Software beacon alert--time to send a beacon. 2094 * 2095 * In IBSS mode we use this interrupt just to 2096 * keep track of the next TBTT (target beacon 2097 * transmission time) in order to detect whether 2098 * automatic TSF updates happened. 2099 */ 2100 if (ah->opmode == NL80211_IFTYPE_ADHOC) { 2101 /* XXX: only if VEOL supported */ 2102 u64 tsf = ath5k_hw_get_tsf64(ah); 2103 ah->nexttbtt += ah->bintval; 2104 ATH5K_DBG(ah, ATH5K_DEBUG_BEACON, 2105 "SWBA nexttbtt: %x hw_tu: %x " 2106 "TSF: %llx\n", 2107 ah->nexttbtt, 2108 TSF_TO_TU(tsf), 2109 (unsigned long long) tsf); 2110 } else { 2111 spin_lock(&ah->block); 2112 ath5k_beacon_send(ah); 2113 spin_unlock(&ah->block); 2114 } 2115 } 2116 2117 2118 /********************\ 2119 * Interrupt handling * 2120 \********************/ 2121 2122 static void 2123 ath5k_intr_calibration_poll(struct ath5k_hw *ah) 2124 { 2125 if (time_is_before_eq_jiffies(ah->ah_cal_next_ani) && 2126 !(ah->ah_cal_mask & AR5K_CALIBRATION_FULL) && 2127 !(ah->ah_cal_mask & AR5K_CALIBRATION_SHORT)) { 2128 2129 /* Run ANI only when calibration is not active */ 2130 2131 ah->ah_cal_next_ani = jiffies + 2132 msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_ANI); 2133 tasklet_schedule(&ah->ani_tasklet); 2134 2135 } else if (time_is_before_eq_jiffies(ah->ah_cal_next_short) && 2136 !(ah->ah_cal_mask & AR5K_CALIBRATION_FULL) && 2137 !(ah->ah_cal_mask & AR5K_CALIBRATION_SHORT)) { 2138 2139 /* Run calibration only when another calibration 2140 * is not running. 2141 * 2142 * Note: This is for both full/short calibration, 2143 * if it's time for a full one, ath5k_calibrate_work will deal 2144 * with it. */ 2145 2146 ah->ah_cal_next_short = jiffies + 2147 msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_SHORT); 2148 ieee80211_queue_work(ah->hw, &ah->calib_work); 2149 } 2150 /* we could use SWI to generate enough interrupts to meet our 2151 * calibration interval requirements, if necessary: 2152 * AR5K_REG_ENABLE_BITS(ah, AR5K_CR, AR5K_CR_SWI); */ 2153 } 2154 2155 static void 2156 ath5k_schedule_rx(struct ath5k_hw *ah) 2157 { 2158 ah->rx_pending = true; 2159 tasklet_schedule(&ah->rxtq); 2160 } 2161 2162 static void 2163 ath5k_schedule_tx(struct ath5k_hw *ah) 2164 { 2165 ah->tx_pending = true; 2166 tasklet_schedule(&ah->txtq); 2167 } 2168 2169 static irqreturn_t 2170 ath5k_intr(int irq, void *dev_id) 2171 { 2172 struct ath5k_hw *ah = dev_id; 2173 enum ath5k_int status; 2174 unsigned int counter = 1000; 2175 2176 2177 /* 2178 * If hw is not ready (or detached) and we get an 2179 * interrupt, or if we have no interrupts pending 2180 * (that means it's not for us) skip it. 2181 * 2182 * NOTE: Group 0/1 PCI interface registers are not 2183 * supported on WiSOCs, so we can't check for pending 2184 * interrupts (ISR belongs to another register group 2185 * so we are ok). 2186 */ 2187 if (unlikely(test_bit(ATH_STAT_INVALID, ah->status) || 2188 ((ath5k_get_bus_type(ah) != ATH_AHB) && 2189 !ath5k_hw_is_intr_pending(ah)))) 2190 return IRQ_NONE; 2191 2192 /** Main loop **/ 2193 do { 2194 ath5k_hw_get_isr(ah, &status); /* NB: clears IRQ too */ 2195 2196 ATH5K_DBG(ah, ATH5K_DEBUG_INTR, "status 0x%x/0x%x\n", 2197 status, ah->imask); 2198 2199 /* 2200 * Fatal hw error -> Log and reset 2201 * 2202 * Fatal errors are unrecoverable so we have to 2203 * reset the card. These errors include bus and 2204 * dma errors. 2205 */ 2206 if (unlikely(status & AR5K_INT_FATAL)) { 2207 2208 ATH5K_DBG(ah, ATH5K_DEBUG_RESET, 2209 "fatal int, resetting\n"); 2210 ieee80211_queue_work(ah->hw, &ah->reset_work); 2211 2212 /* 2213 * RX Overrun -> Count and reset if needed 2214 * 2215 * Receive buffers are full. Either the bus is busy or 2216 * the CPU is not fast enough to process all received 2217 * frames. 2218 */ 2219 } else if (unlikely(status & AR5K_INT_RXORN)) { 2220 2221 /* 2222 * Older chipsets need a reset to come out of this 2223 * condition, but we treat it as RX for newer chips. 2224 * We don't know exactly which versions need a reset 2225 * this guess is copied from the HAL. 2226 */ 2227 ah->stats.rxorn_intr++; 2228 2229 if (ah->ah_mac_srev < AR5K_SREV_AR5212) { 2230 ATH5K_DBG(ah, ATH5K_DEBUG_RESET, 2231 "rx overrun, resetting\n"); 2232 ieee80211_queue_work(ah->hw, &ah->reset_work); 2233 } else 2234 ath5k_schedule_rx(ah); 2235 2236 } else { 2237 2238 /* Software Beacon Alert -> Schedule beacon tasklet */ 2239 if (status & AR5K_INT_SWBA) 2240 tasklet_hi_schedule(&ah->beacontq); 2241 2242 /* 2243 * No more RX descriptors -> Just count 2244 * 2245 * NB: the hardware should re-read the link when 2246 * RXE bit is written, but it doesn't work at 2247 * least on older hardware revs. 2248 */ 2249 if (status & AR5K_INT_RXEOL) 2250 ah->stats.rxeol_intr++; 2251 2252 2253 /* TX Underrun -> Bump tx trigger level */ 2254 if (status & AR5K_INT_TXURN) 2255 ath5k_hw_update_tx_triglevel(ah, true); 2256 2257 /* RX -> Schedule rx tasklet */ 2258 if (status & (AR5K_INT_RXOK | AR5K_INT_RXERR)) 2259 ath5k_schedule_rx(ah); 2260 2261 /* TX -> Schedule tx tasklet */ 2262 if (status & (AR5K_INT_TXOK 2263 | AR5K_INT_TXDESC 2264 | AR5K_INT_TXERR 2265 | AR5K_INT_TXEOL)) 2266 ath5k_schedule_tx(ah); 2267 2268 /* Missed beacon -> TODO 2269 if (status & AR5K_INT_BMISS) 2270 */ 2271 2272 /* MIB event -> Update counters and notify ANI */ 2273 if (status & AR5K_INT_MIB) { 2274 ah->stats.mib_intr++; 2275 ath5k_hw_update_mib_counters(ah); 2276 ath5k_ani_mib_intr(ah); 2277 } 2278 2279 /* GPIO -> Notify RFKill layer */ 2280 if (status & AR5K_INT_GPIO) 2281 tasklet_schedule(&ah->rf_kill.toggleq); 2282 2283 } 2284 2285 if (ath5k_get_bus_type(ah) == ATH_AHB) 2286 break; 2287 2288 } while (ath5k_hw_is_intr_pending(ah) && --counter > 0); 2289 2290 /* 2291 * Until we handle rx/tx interrupts mask them on IMR 2292 * 2293 * NOTE: ah->(rx/tx)_pending are set when scheduling the tasklets 2294 * and unset after we 've handled the interrupts. 2295 */ 2296 if (ah->rx_pending || ah->tx_pending) 2297 ath5k_set_current_imask(ah); 2298 2299 if (unlikely(!counter)) 2300 ATH5K_WARN(ah, "too many interrupts, giving up for now\n"); 2301 2302 /* Fire up calibration poll */ 2303 ath5k_intr_calibration_poll(ah); 2304 2305 return IRQ_HANDLED; 2306 } 2307 2308 /* 2309 * Periodically recalibrate the PHY to account 2310 * for temperature/environment changes. 2311 */ 2312 static void 2313 ath5k_calibrate_work(struct work_struct *work) 2314 { 2315 struct ath5k_hw *ah = container_of(work, struct ath5k_hw, 2316 calib_work); 2317 2318 /* Should we run a full calibration ? */ 2319 if (time_is_before_eq_jiffies(ah->ah_cal_next_full)) { 2320 2321 ah->ah_cal_next_full = jiffies + 2322 msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL); 2323 ah->ah_cal_mask |= AR5K_CALIBRATION_FULL; 2324 2325 ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE, 2326 "running full calibration\n"); 2327 2328 if (ath5k_hw_gainf_calibrate(ah) == AR5K_RFGAIN_NEED_CHANGE) { 2329 /* 2330 * Rfgain is out of bounds, reset the chip 2331 * to load new gain values. 2332 */ 2333 ATH5K_DBG(ah, ATH5K_DEBUG_RESET, 2334 "got new rfgain, resetting\n"); 2335 ieee80211_queue_work(ah->hw, &ah->reset_work); 2336 } 2337 } else 2338 ah->ah_cal_mask |= AR5K_CALIBRATION_SHORT; 2339 2340 2341 ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE, "channel %u/%x\n", 2342 ieee80211_frequency_to_channel(ah->curchan->center_freq), 2343 ah->curchan->hw_value); 2344 2345 if (ath5k_hw_phy_calibrate(ah, ah->curchan)) 2346 ATH5K_ERR(ah, "calibration of channel %u failed\n", 2347 ieee80211_frequency_to_channel( 2348 ah->curchan->center_freq)); 2349 2350 /* Clear calibration flags */ 2351 if (ah->ah_cal_mask & AR5K_CALIBRATION_FULL) 2352 ah->ah_cal_mask &= ~AR5K_CALIBRATION_FULL; 2353 else if (ah->ah_cal_mask & AR5K_CALIBRATION_SHORT) 2354 ah->ah_cal_mask &= ~AR5K_CALIBRATION_SHORT; 2355 } 2356 2357 2358 static void 2359 ath5k_tasklet_ani(unsigned long data) 2360 { 2361 struct ath5k_hw *ah = (void *)data; 2362 2363 ah->ah_cal_mask |= AR5K_CALIBRATION_ANI; 2364 ath5k_ani_calibration(ah); 2365 ah->ah_cal_mask &= ~AR5K_CALIBRATION_ANI; 2366 } 2367 2368 2369 static void 2370 ath5k_tx_complete_poll_work(struct work_struct *work) 2371 { 2372 struct ath5k_hw *ah = container_of(work, struct ath5k_hw, 2373 tx_complete_work.work); 2374 struct ath5k_txq *txq; 2375 int i; 2376 bool needreset = false; 2377 2378 mutex_lock(&ah->lock); 2379 2380 for (i = 0; i < ARRAY_SIZE(ah->txqs); i++) { 2381 if (ah->txqs[i].setup) { 2382 txq = &ah->txqs[i]; 2383 spin_lock_bh(&txq->lock); 2384 if (txq->txq_len > 1) { 2385 if (txq->txq_poll_mark) { 2386 ATH5K_DBG(ah, ATH5K_DEBUG_XMIT, 2387 "TX queue stuck %d\n", 2388 txq->qnum); 2389 needreset = true; 2390 txq->txq_stuck++; 2391 spin_unlock_bh(&txq->lock); 2392 break; 2393 } else { 2394 txq->txq_poll_mark = true; 2395 } 2396 } 2397 spin_unlock_bh(&txq->lock); 2398 } 2399 } 2400 2401 if (needreset) { 2402 ATH5K_DBG(ah, ATH5K_DEBUG_RESET, 2403 "TX queues stuck, resetting\n"); 2404 ath5k_reset(ah, NULL, true); 2405 } 2406 2407 mutex_unlock(&ah->lock); 2408 2409 ieee80211_queue_delayed_work(ah->hw, &ah->tx_complete_work, 2410 msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT)); 2411 } 2412 2413 2414 /*************************\ 2415 * Initialization routines * 2416 \*************************/ 2417 2418 int __devinit 2419 ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops) 2420 { 2421 struct ieee80211_hw *hw = ah->hw; 2422 struct ath_common *common; 2423 int ret; 2424 int csz; 2425 2426 /* Initialize driver private data */ 2427 SET_IEEE80211_DEV(hw, ah->dev); 2428 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS | 2429 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | 2430 IEEE80211_HW_SIGNAL_DBM | 2431 IEEE80211_HW_REPORTS_TX_ACK_STATUS; 2432 2433 hw->wiphy->interface_modes = 2434 BIT(NL80211_IFTYPE_AP) | 2435 BIT(NL80211_IFTYPE_STATION) | 2436 BIT(NL80211_IFTYPE_ADHOC) | 2437 BIT(NL80211_IFTYPE_MESH_POINT); 2438 2439 /* SW support for IBSS_RSN is provided by mac80211 */ 2440 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; 2441 2442 /* both antennas can be configured as RX or TX */ 2443 hw->wiphy->available_antennas_tx = 0x3; 2444 hw->wiphy->available_antennas_rx = 0x3; 2445 2446 hw->extra_tx_headroom = 2; 2447 hw->channel_change_time = 5000; 2448 2449 /* 2450 * Mark the device as detached to avoid processing 2451 * interrupts until setup is complete. 2452 */ 2453 __set_bit(ATH_STAT_INVALID, ah->status); 2454 2455 ah->opmode = NL80211_IFTYPE_STATION; 2456 ah->bintval = 1000; 2457 mutex_init(&ah->lock); 2458 spin_lock_init(&ah->rxbuflock); 2459 spin_lock_init(&ah->txbuflock); 2460 spin_lock_init(&ah->block); 2461 spin_lock_init(&ah->irqlock); 2462 2463 /* Setup interrupt handler */ 2464 ret = request_irq(ah->irq, ath5k_intr, IRQF_SHARED, "ath", ah); 2465 if (ret) { 2466 ATH5K_ERR(ah, "request_irq failed\n"); 2467 goto err; 2468 } 2469 2470 common = ath5k_hw_common(ah); 2471 common->ops = &ath5k_common_ops; 2472 common->bus_ops = bus_ops; 2473 common->ah = ah; 2474 common->hw = hw; 2475 common->priv = ah; 2476 common->clockrate = 40; 2477 2478 /* 2479 * Cache line size is used to size and align various 2480 * structures used to communicate with the hardware. 2481 */ 2482 ath5k_read_cachesize(common, &csz); 2483 common->cachelsz = csz << 2; /* convert to bytes */ 2484 2485 spin_lock_init(&common->cc_lock); 2486 2487 /* Initialize device */ 2488 ret = ath5k_hw_init(ah); 2489 if (ret) 2490 goto err_irq; 2491 2492 /* Set up multi-rate retry capabilities */ 2493 if (ah->ah_capabilities.cap_has_mrr_support) { 2494 hw->max_rates = 4; 2495 hw->max_rate_tries = max(AR5K_INIT_RETRY_SHORT, 2496 AR5K_INIT_RETRY_LONG); 2497 } 2498 2499 hw->vif_data_size = sizeof(struct ath5k_vif); 2500 2501 /* Finish private driver data initialization */ 2502 ret = ath5k_init(hw); 2503 if (ret) 2504 goto err_ah; 2505 2506 ATH5K_INFO(ah, "Atheros AR%s chip found (MAC: 0x%x, PHY: 0x%x)\n", 2507 ath5k_chip_name(AR5K_VERSION_MAC, ah->ah_mac_srev), 2508 ah->ah_mac_srev, 2509 ah->ah_phy_revision); 2510 2511 if (!ah->ah_single_chip) { 2512 /* Single chip radio (!RF5111) */ 2513 if (ah->ah_radio_5ghz_revision && 2514 !ah->ah_radio_2ghz_revision) { 2515 /* No 5GHz support -> report 2GHz radio */ 2516 if (!test_bit(AR5K_MODE_11A, 2517 ah->ah_capabilities.cap_mode)) { 2518 ATH5K_INFO(ah, "RF%s 2GHz radio found (0x%x)\n", 2519 ath5k_chip_name(AR5K_VERSION_RAD, 2520 ah->ah_radio_5ghz_revision), 2521 ah->ah_radio_5ghz_revision); 2522 /* No 2GHz support (5110 and some 2523 * 5GHz only cards) -> report 5GHz radio */ 2524 } else if (!test_bit(AR5K_MODE_11B, 2525 ah->ah_capabilities.cap_mode)) { 2526 ATH5K_INFO(ah, "RF%s 5GHz radio found (0x%x)\n", 2527 ath5k_chip_name(AR5K_VERSION_RAD, 2528 ah->ah_radio_5ghz_revision), 2529 ah->ah_radio_5ghz_revision); 2530 /* Multiband radio */ 2531 } else { 2532 ATH5K_INFO(ah, "RF%s multiband radio found" 2533 " (0x%x)\n", 2534 ath5k_chip_name(AR5K_VERSION_RAD, 2535 ah->ah_radio_5ghz_revision), 2536 ah->ah_radio_5ghz_revision); 2537 } 2538 } 2539 /* Multi chip radio (RF5111 - RF2111) -> 2540 * report both 2GHz/5GHz radios */ 2541 else if (ah->ah_radio_5ghz_revision && 2542 ah->ah_radio_2ghz_revision) { 2543 ATH5K_INFO(ah, "RF%s 5GHz radio found (0x%x)\n", 2544 ath5k_chip_name(AR5K_VERSION_RAD, 2545 ah->ah_radio_5ghz_revision), 2546 ah->ah_radio_5ghz_revision); 2547 ATH5K_INFO(ah, "RF%s 2GHz radio found (0x%x)\n", 2548 ath5k_chip_name(AR5K_VERSION_RAD, 2549 ah->ah_radio_2ghz_revision), 2550 ah->ah_radio_2ghz_revision); 2551 } 2552 } 2553 2554 ath5k_debug_init_device(ah); 2555 2556 /* ready to process interrupts */ 2557 __clear_bit(ATH_STAT_INVALID, ah->status); 2558 2559 return 0; 2560 err_ah: 2561 ath5k_hw_deinit(ah); 2562 err_irq: 2563 free_irq(ah->irq, ah); 2564 err: 2565 return ret; 2566 } 2567 2568 static int 2569 ath5k_stop_locked(struct ath5k_hw *ah) 2570 { 2571 2572 ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "invalid %u\n", 2573 test_bit(ATH_STAT_INVALID, ah->status)); 2574 2575 /* 2576 * Shutdown the hardware and driver: 2577 * stop output from above 2578 * disable interrupts 2579 * turn off timers 2580 * turn off the radio 2581 * clear transmit machinery 2582 * clear receive machinery 2583 * drain and release tx queues 2584 * reclaim beacon resources 2585 * power down hardware 2586 * 2587 * Note that some of this work is not possible if the 2588 * hardware is gone (invalid). 2589 */ 2590 ieee80211_stop_queues(ah->hw); 2591 2592 if (!test_bit(ATH_STAT_INVALID, ah->status)) { 2593 ath5k_led_off(ah); 2594 ath5k_hw_set_imr(ah, 0); 2595 synchronize_irq(ah->irq); 2596 ath5k_rx_stop(ah); 2597 ath5k_hw_dma_stop(ah); 2598 ath5k_drain_tx_buffs(ah); 2599 ath5k_hw_phy_disable(ah); 2600 } 2601 2602 return 0; 2603 } 2604 2605 int ath5k_start(struct ieee80211_hw *hw) 2606 { 2607 struct ath5k_hw *ah = hw->priv; 2608 struct ath_common *common = ath5k_hw_common(ah); 2609 int ret, i; 2610 2611 mutex_lock(&ah->lock); 2612 2613 ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "mode %d\n", ah->opmode); 2614 2615 /* 2616 * Stop anything previously setup. This is safe 2617 * no matter this is the first time through or not. 2618 */ 2619 ath5k_stop_locked(ah); 2620 2621 /* 2622 * The basic interface to setting the hardware in a good 2623 * state is ``reset''. On return the hardware is known to 2624 * be powered up and with interrupts disabled. This must 2625 * be followed by initialization of the appropriate bits 2626 * and then setup of the interrupt mask. 2627 */ 2628 ah->curchan = ah->hw->conf.channel; 2629 ah->imask = AR5K_INT_RXOK 2630 | AR5K_INT_RXERR 2631 | AR5K_INT_RXEOL 2632 | AR5K_INT_RXORN 2633 | AR5K_INT_TXDESC 2634 | AR5K_INT_TXEOL 2635 | AR5K_INT_FATAL 2636 | AR5K_INT_GLOBAL 2637 | AR5K_INT_MIB; 2638 2639 ret = ath5k_reset(ah, NULL, false); 2640 if (ret) 2641 goto done; 2642 2643 if (!ath5k_modparam_no_hw_rfkill_switch) 2644 ath5k_rfkill_hw_start(ah); 2645 2646 /* 2647 * Reset the key cache since some parts do not reset the 2648 * contents on initial power up or resume from suspend. 2649 */ 2650 for (i = 0; i < common->keymax; i++) 2651 ath_hw_keyreset(common, (u16) i); 2652 2653 /* Use higher rates for acks instead of base 2654 * rate */ 2655 ah->ah_ack_bitrate_high = true; 2656 2657 for (i = 0; i < ARRAY_SIZE(ah->bslot); i++) 2658 ah->bslot[i] = NULL; 2659 2660 ret = 0; 2661 done: 2662 mmiowb(); 2663 mutex_unlock(&ah->lock); 2664 2665 ieee80211_queue_delayed_work(ah->hw, &ah->tx_complete_work, 2666 msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT)); 2667 2668 return ret; 2669 } 2670 2671 static void ath5k_stop_tasklets(struct ath5k_hw *ah) 2672 { 2673 ah->rx_pending = false; 2674 ah->tx_pending = false; 2675 tasklet_kill(&ah->rxtq); 2676 tasklet_kill(&ah->txtq); 2677 tasklet_kill(&ah->beacontq); 2678 tasklet_kill(&ah->ani_tasklet); 2679 } 2680 2681 /* 2682 * Stop the device, grabbing the top-level lock to protect 2683 * against concurrent entry through ath5k_init (which can happen 2684 * if another thread does a system call and the thread doing the 2685 * stop is preempted). 2686 */ 2687 void ath5k_stop(struct ieee80211_hw *hw) 2688 { 2689 struct ath5k_hw *ah = hw->priv; 2690 int ret; 2691 2692 mutex_lock(&ah->lock); 2693 ret = ath5k_stop_locked(ah); 2694 if (ret == 0 && !test_bit(ATH_STAT_INVALID, ah->status)) { 2695 /* 2696 * Don't set the card in full sleep mode! 2697 * 2698 * a) When the device is in this state it must be carefully 2699 * woken up or references to registers in the PCI clock 2700 * domain may freeze the bus (and system). This varies 2701 * by chip and is mostly an issue with newer parts 2702 * (madwifi sources mentioned srev >= 0x78) that go to 2703 * sleep more quickly. 2704 * 2705 * b) On older chips full sleep results a weird behaviour 2706 * during wakeup. I tested various cards with srev < 0x78 2707 * and they don't wake up after module reload, a second 2708 * module reload is needed to bring the card up again. 2709 * 2710 * Until we figure out what's going on don't enable 2711 * full chip reset on any chip (this is what Legacy HAL 2712 * and Sam's HAL do anyway). Instead Perform a full reset 2713 * on the device (same as initial state after attach) and 2714 * leave it idle (keep MAC/BB on warm reset) */ 2715 ret = ath5k_hw_on_hold(ah); 2716 2717 ATH5K_DBG(ah, ATH5K_DEBUG_RESET, 2718 "putting device to sleep\n"); 2719 } 2720 2721 mmiowb(); 2722 mutex_unlock(&ah->lock); 2723 2724 ath5k_stop_tasklets(ah); 2725 2726 cancel_delayed_work_sync(&ah->tx_complete_work); 2727 2728 if (!ath5k_modparam_no_hw_rfkill_switch) 2729 ath5k_rfkill_hw_stop(ah); 2730 } 2731 2732 /* 2733 * Reset the hardware. If chan is not NULL, then also pause rx/tx 2734 * and change to the given channel. 2735 * 2736 * This should be called with ah->lock. 2737 */ 2738 static int 2739 ath5k_reset(struct ath5k_hw *ah, struct ieee80211_channel *chan, 2740 bool skip_pcu) 2741 { 2742 struct ath_common *common = ath5k_hw_common(ah); 2743 int ret, ani_mode; 2744 bool fast; 2745 2746 ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "resetting\n"); 2747 2748 ath5k_hw_set_imr(ah, 0); 2749 synchronize_irq(ah->irq); 2750 ath5k_stop_tasklets(ah); 2751 2752 /* Save ani mode and disable ANI during 2753 * reset. If we don't we might get false 2754 * PHY error interrupts. */ 2755 ani_mode = ah->ani_state.ani_mode; 2756 ath5k_ani_init(ah, ATH5K_ANI_MODE_OFF); 2757 2758 /* We are going to empty hw queues 2759 * so we should also free any remaining 2760 * tx buffers */ 2761 ath5k_drain_tx_buffs(ah); 2762 if (chan) 2763 ah->curchan = chan; 2764 2765 fast = ((chan != NULL) && modparam_fastchanswitch) ? 1 : 0; 2766 2767 ret = ath5k_hw_reset(ah, ah->opmode, ah->curchan, fast, skip_pcu); 2768 if (ret) { 2769 ATH5K_ERR(ah, "can't reset hardware (%d)\n", ret); 2770 goto err; 2771 } 2772 2773 ret = ath5k_rx_start(ah); 2774 if (ret) { 2775 ATH5K_ERR(ah, "can't start recv logic\n"); 2776 goto err; 2777 } 2778 2779 ath5k_ani_init(ah, ani_mode); 2780 2781 /* 2782 * Set calibration intervals 2783 * 2784 * Note: We don't need to run calibration imediately 2785 * since some initial calibration is done on reset 2786 * even for fast channel switching. Also on scanning 2787 * this will get set again and again and it won't get 2788 * executed unless we connect somewhere and spend some 2789 * time on the channel (that's what calibration needs 2790 * anyway to be accurate). 2791 */ 2792 ah->ah_cal_next_full = jiffies + 2793 msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL); 2794 ah->ah_cal_next_ani = jiffies + 2795 msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_ANI); 2796 ah->ah_cal_next_short = jiffies + 2797 msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_SHORT); 2798 2799 ewma_init(&ah->ah_beacon_rssi_avg, 1024, 8); 2800 2801 /* clear survey data and cycle counters */ 2802 memset(&ah->survey, 0, sizeof(ah->survey)); 2803 spin_lock_bh(&common->cc_lock); 2804 ath_hw_cycle_counters_update(common); 2805 memset(&common->cc_survey, 0, sizeof(common->cc_survey)); 2806 memset(&common->cc_ani, 0, sizeof(common->cc_ani)); 2807 spin_unlock_bh(&common->cc_lock); 2808 2809 /* 2810 * Change channels and update the h/w rate map if we're switching; 2811 * e.g. 11a to 11b/g. 2812 * 2813 * We may be doing a reset in response to an ioctl that changes the 2814 * channel so update any state that might change as a result. 2815 * 2816 * XXX needed? 2817 */ 2818 /* ath5k_chan_change(ah, c); */ 2819 2820 ath5k_beacon_config(ah); 2821 /* intrs are enabled by ath5k_beacon_config */ 2822 2823 ieee80211_wake_queues(ah->hw); 2824 2825 return 0; 2826 err: 2827 return ret; 2828 } 2829 2830 static void ath5k_reset_work(struct work_struct *work) 2831 { 2832 struct ath5k_hw *ah = container_of(work, struct ath5k_hw, 2833 reset_work); 2834 2835 mutex_lock(&ah->lock); 2836 ath5k_reset(ah, NULL, true); 2837 mutex_unlock(&ah->lock); 2838 } 2839 2840 static int __devinit 2841 ath5k_init(struct ieee80211_hw *hw) 2842 { 2843 2844 struct ath5k_hw *ah = hw->priv; 2845 struct ath_regulatory *regulatory = ath5k_hw_regulatory(ah); 2846 struct ath5k_txq *txq; 2847 u8 mac[ETH_ALEN] = {}; 2848 int ret; 2849 2850 2851 /* 2852 * Collect the channel list. The 802.11 layer 2853 * is responsible for filtering this list based 2854 * on settings like the phy mode and regulatory 2855 * domain restrictions. 2856 */ 2857 ret = ath5k_setup_bands(hw); 2858 if (ret) { 2859 ATH5K_ERR(ah, "can't get channels\n"); 2860 goto err; 2861 } 2862 2863 /* 2864 * Allocate tx+rx descriptors and populate the lists. 2865 */ 2866 ret = ath5k_desc_alloc(ah); 2867 if (ret) { 2868 ATH5K_ERR(ah, "can't allocate descriptors\n"); 2869 goto err; 2870 } 2871 2872 /* 2873 * Allocate hardware transmit queues: one queue for 2874 * beacon frames and one data queue for each QoS 2875 * priority. Note that hw functions handle resetting 2876 * these queues at the needed time. 2877 */ 2878 ret = ath5k_beaconq_setup(ah); 2879 if (ret < 0) { 2880 ATH5K_ERR(ah, "can't setup a beacon xmit queue\n"); 2881 goto err_desc; 2882 } 2883 ah->bhalq = ret; 2884 ah->cabq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_CAB, 0); 2885 if (IS_ERR(ah->cabq)) { 2886 ATH5K_ERR(ah, "can't setup cab queue\n"); 2887 ret = PTR_ERR(ah->cabq); 2888 goto err_bhal; 2889 } 2890 2891 /* 5211 and 5212 usually support 10 queues but we better rely on the 2892 * capability information */ 2893 if (ah->ah_capabilities.cap_queues.q_tx_num >= 6) { 2894 /* This order matches mac80211's queue priority, so we can 2895 * directly use the mac80211 queue number without any mapping */ 2896 txq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_VO); 2897 if (IS_ERR(txq)) { 2898 ATH5K_ERR(ah, "can't setup xmit queue\n"); 2899 ret = PTR_ERR(txq); 2900 goto err_queues; 2901 } 2902 txq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_VI); 2903 if (IS_ERR(txq)) { 2904 ATH5K_ERR(ah, "can't setup xmit queue\n"); 2905 ret = PTR_ERR(txq); 2906 goto err_queues; 2907 } 2908 txq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BE); 2909 if (IS_ERR(txq)) { 2910 ATH5K_ERR(ah, "can't setup xmit queue\n"); 2911 ret = PTR_ERR(txq); 2912 goto err_queues; 2913 } 2914 txq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BK); 2915 if (IS_ERR(txq)) { 2916 ATH5K_ERR(ah, "can't setup xmit queue\n"); 2917 ret = PTR_ERR(txq); 2918 goto err_queues; 2919 } 2920 hw->queues = 4; 2921 } else { 2922 /* older hardware (5210) can only support one data queue */ 2923 txq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BE); 2924 if (IS_ERR(txq)) { 2925 ATH5K_ERR(ah, "can't setup xmit queue\n"); 2926 ret = PTR_ERR(txq); 2927 goto err_queues; 2928 } 2929 hw->queues = 1; 2930 } 2931 2932 tasklet_init(&ah->rxtq, ath5k_tasklet_rx, (unsigned long)ah); 2933 tasklet_init(&ah->txtq, ath5k_tasklet_tx, (unsigned long)ah); 2934 tasklet_init(&ah->beacontq, ath5k_tasklet_beacon, (unsigned long)ah); 2935 tasklet_init(&ah->ani_tasklet, ath5k_tasklet_ani, (unsigned long)ah); 2936 2937 INIT_WORK(&ah->reset_work, ath5k_reset_work); 2938 INIT_WORK(&ah->calib_work, ath5k_calibrate_work); 2939 INIT_DELAYED_WORK(&ah->tx_complete_work, ath5k_tx_complete_poll_work); 2940 2941 ret = ath5k_hw_common(ah)->bus_ops->eeprom_read_mac(ah, mac); 2942 if (ret) { 2943 ATH5K_ERR(ah, "unable to read address from EEPROM\n"); 2944 goto err_queues; 2945 } 2946 2947 SET_IEEE80211_PERM_ADDR(hw, mac); 2948 /* All MAC address bits matter for ACKs */ 2949 ath5k_update_bssid_mask_and_opmode(ah, NULL); 2950 2951 regulatory->current_rd = ah->ah_capabilities.cap_eeprom.ee_regdomain; 2952 ret = ath_regd_init(regulatory, hw->wiphy, ath5k_reg_notifier); 2953 if (ret) { 2954 ATH5K_ERR(ah, "can't initialize regulatory system\n"); 2955 goto err_queues; 2956 } 2957 2958 ret = ieee80211_register_hw(hw); 2959 if (ret) { 2960 ATH5K_ERR(ah, "can't register ieee80211 hw\n"); 2961 goto err_queues; 2962 } 2963 2964 if (!ath_is_world_regd(regulatory)) 2965 regulatory_hint(hw->wiphy, regulatory->alpha2); 2966 2967 ath5k_init_leds(ah); 2968 2969 ath5k_sysfs_register(ah); 2970 2971 return 0; 2972 err_queues: 2973 ath5k_txq_release(ah); 2974 err_bhal: 2975 ath5k_hw_release_tx_queue(ah, ah->bhalq); 2976 err_desc: 2977 ath5k_desc_free(ah); 2978 err: 2979 return ret; 2980 } 2981 2982 void 2983 ath5k_deinit_ah(struct ath5k_hw *ah) 2984 { 2985 struct ieee80211_hw *hw = ah->hw; 2986 2987 /* 2988 * NB: the order of these is important: 2989 * o call the 802.11 layer before detaching ath5k_hw to 2990 * ensure callbacks into the driver to delete global 2991 * key cache entries can be handled 2992 * o reclaim the tx queue data structures after calling 2993 * the 802.11 layer as we'll get called back to reclaim 2994 * node state and potentially want to use them 2995 * o to cleanup the tx queues the hal is called, so detach 2996 * it last 2997 * XXX: ??? detach ath5k_hw ??? 2998 * Other than that, it's straightforward... 2999 */ 3000 ieee80211_unregister_hw(hw); 3001 ath5k_desc_free(ah); 3002 ath5k_txq_release(ah); 3003 ath5k_hw_release_tx_queue(ah, ah->bhalq); 3004 ath5k_unregister_leds(ah); 3005 3006 ath5k_sysfs_unregister(ah); 3007 /* 3008 * NB: can't reclaim these until after ieee80211_ifdetach 3009 * returns because we'll get called back to reclaim node 3010 * state and potentially want to use them. 3011 */ 3012 ath5k_hw_deinit(ah); 3013 free_irq(ah->irq, ah); 3014 } 3015 3016 bool 3017 ath5k_any_vif_assoc(struct ath5k_hw *ah) 3018 { 3019 struct ath5k_vif_iter_data iter_data; 3020 iter_data.hw_macaddr = NULL; 3021 iter_data.any_assoc = false; 3022 iter_data.need_set_hw_addr = false; 3023 iter_data.found_active = true; 3024 3025 ieee80211_iterate_active_interfaces_atomic(ah->hw, ath5k_vif_iter, 3026 &iter_data); 3027 return iter_data.any_assoc; 3028 } 3029 3030 void 3031 ath5k_set_beacon_filter(struct ieee80211_hw *hw, bool enable) 3032 { 3033 struct ath5k_hw *ah = hw->priv; 3034 u32 rfilt; 3035 rfilt = ath5k_hw_get_rx_filter(ah); 3036 if (enable) 3037 rfilt |= AR5K_RX_FILTER_BEACON; 3038 else 3039 rfilt &= ~AR5K_RX_FILTER_BEACON; 3040 ath5k_hw_set_rx_filter(ah, rfilt); 3041 ah->filter_flags = rfilt; 3042 } 3043 3044 void _ath5k_printk(const struct ath5k_hw *ah, const char *level, 3045 const char *fmt, ...) 3046 { 3047 struct va_format vaf; 3048 va_list args; 3049 3050 va_start(args, fmt); 3051 3052 vaf.fmt = fmt; 3053 vaf.va = &args; 3054 3055 if (ah && ah->hw) 3056 printk("%s" pr_fmt("%s: %pV"), 3057 level, wiphy_name(ah->hw->wiphy), &vaf); 3058 else 3059 printk("%s" pr_fmt("%pV"), level, &vaf); 3060 3061 va_end(args); 3062 } 3063