1 /*- 2 * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting 3 * Copyright (c) 2004-2005 Atheros Communications, Inc. 4 * Copyright (c) 2006 Devicescape Software, Inc. 5 * Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com> 6 * Copyright (c) 2007 Luis R. Rodriguez <mcgrof@winlab.rutgers.edu> 7 * 8 * All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer, 15 * without modification. 16 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 17 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 18 * redistribution must be conditioned upon including a substantially 19 * similar Disclaimer requirement for further binary redistribution. 20 * 3. Neither the names of the above-listed copyright holders nor the names 21 * of any contributors may be used to endorse or promote products derived 22 * from this software without specific prior written permission. 23 * 24 * Alternatively, this software may be distributed under the terms of the 25 * GNU General Public License ("GPL") version 2 as published by the Free 26 * Software Foundation. 27 * 28 * NO WARRANTY 29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 30 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 31 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 32 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 33 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 34 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 35 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 36 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 37 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 38 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 39 * THE POSSIBILITY OF SUCH DAMAGES. 40 * 41 */ 42 43 #include <linux/module.h> 44 #include <linux/delay.h> 45 #include <linux/hardirq.h> 46 #include <linux/if.h> 47 #include <linux/io.h> 48 #include <linux/netdevice.h> 49 #include <linux/cache.h> 50 #include <linux/pci.h> 51 #include <linux/ethtool.h> 52 #include <linux/uaccess.h> 53 #include <linux/slab.h> 54 55 #include <net/ieee80211_radiotap.h> 56 57 #include <asm/unaligned.h> 58 59 #include "base.h" 60 #include "reg.h" 61 #include "debug.h" 62 #include "ani.h" 63 64 static int modparam_nohwcrypt; 65 module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO); 66 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); 67 68 static int modparam_all_channels; 69 module_param_named(all_channels, modparam_all_channels, bool, S_IRUGO); 70 MODULE_PARM_DESC(all_channels, "Expose all channels the device can use."); 71 72 73 /******************\ 74 * Internal defines * 75 \******************/ 76 77 /* Module info */ 78 MODULE_AUTHOR("Jiri Slaby"); 79 MODULE_AUTHOR("Nick Kossifidis"); 80 MODULE_DESCRIPTION("Support for 5xxx series of Atheros 802.11 wireless LAN cards."); 81 MODULE_SUPPORTED_DEVICE("Atheros 5xxx WLAN cards"); 82 MODULE_LICENSE("Dual BSD/GPL"); 83 MODULE_VERSION("0.6.0 (EXPERIMENTAL)"); 84 85 86 /* Known PCI ids */ 87 static DEFINE_PCI_DEVICE_TABLE(ath5k_pci_id_table) = { 88 { PCI_VDEVICE(ATHEROS, 0x0207) }, /* 5210 early */ 89 { PCI_VDEVICE(ATHEROS, 0x0007) }, /* 5210 */ 90 { PCI_VDEVICE(ATHEROS, 0x0011) }, /* 5311 - this is on AHB bus !*/ 91 { PCI_VDEVICE(ATHEROS, 0x0012) }, /* 5211 */ 92 { PCI_VDEVICE(ATHEROS, 0x0013) }, /* 5212 */ 93 { PCI_VDEVICE(3COM_2, 0x0013) }, /* 3com 5212 */ 94 { PCI_VDEVICE(3COM, 0x0013) }, /* 3com 3CRDAG675 5212 */ 95 { PCI_VDEVICE(ATHEROS, 0x1014) }, /* IBM minipci 5212 */ 96 { PCI_VDEVICE(ATHEROS, 0x0014) }, /* 5212 combatible */ 97 { PCI_VDEVICE(ATHEROS, 0x0015) }, /* 5212 combatible */ 98 { PCI_VDEVICE(ATHEROS, 0x0016) }, /* 5212 combatible */ 99 { PCI_VDEVICE(ATHEROS, 0x0017) }, /* 5212 combatible */ 100 { PCI_VDEVICE(ATHEROS, 0x0018) }, /* 5212 combatible */ 101 { PCI_VDEVICE(ATHEROS, 0x0019) }, /* 5212 combatible */ 102 { PCI_VDEVICE(ATHEROS, 0x001a) }, /* 2413 Griffin-lite */ 103 { PCI_VDEVICE(ATHEROS, 0x001b) }, /* 5413 Eagle */ 104 { PCI_VDEVICE(ATHEROS, 0x001c) }, /* PCI-E cards */ 105 { PCI_VDEVICE(ATHEROS, 0x001d) }, /* 2417 Nala */ 106 { 0 } 107 }; 108 MODULE_DEVICE_TABLE(pci, ath5k_pci_id_table); 109 110 /* Known SREVs */ 111 static const struct ath5k_srev_name srev_names[] = { 112 { "5210", AR5K_VERSION_MAC, AR5K_SREV_AR5210 }, 113 { "5311", AR5K_VERSION_MAC, AR5K_SREV_AR5311 }, 114 { "5311A", AR5K_VERSION_MAC, AR5K_SREV_AR5311A }, 115 { "5311B", AR5K_VERSION_MAC, AR5K_SREV_AR5311B }, 116 { "5211", AR5K_VERSION_MAC, AR5K_SREV_AR5211 }, 117 { "5212", AR5K_VERSION_MAC, AR5K_SREV_AR5212 }, 118 { "5213", AR5K_VERSION_MAC, AR5K_SREV_AR5213 }, 119 { "5213A", AR5K_VERSION_MAC, AR5K_SREV_AR5213A }, 120 { "2413", AR5K_VERSION_MAC, AR5K_SREV_AR2413 }, 121 { "2414", AR5K_VERSION_MAC, AR5K_SREV_AR2414 }, 122 { "5424", AR5K_VERSION_MAC, AR5K_SREV_AR5424 }, 123 { "5413", AR5K_VERSION_MAC, AR5K_SREV_AR5413 }, 124 { "5414", AR5K_VERSION_MAC, AR5K_SREV_AR5414 }, 125 { "2415", AR5K_VERSION_MAC, AR5K_SREV_AR2415 }, 126 { "5416", AR5K_VERSION_MAC, AR5K_SREV_AR5416 }, 127 { "5418", AR5K_VERSION_MAC, AR5K_SREV_AR5418 }, 128 { "2425", AR5K_VERSION_MAC, AR5K_SREV_AR2425 }, 129 { "2417", AR5K_VERSION_MAC, AR5K_SREV_AR2417 }, 130 { "xxxxx", AR5K_VERSION_MAC, AR5K_SREV_UNKNOWN }, 131 { "5110", AR5K_VERSION_RAD, AR5K_SREV_RAD_5110 }, 132 { "5111", AR5K_VERSION_RAD, AR5K_SREV_RAD_5111 }, 133 { "5111A", AR5K_VERSION_RAD, AR5K_SREV_RAD_5111A }, 134 { "2111", AR5K_VERSION_RAD, AR5K_SREV_RAD_2111 }, 135 { "5112", AR5K_VERSION_RAD, AR5K_SREV_RAD_5112 }, 136 { "5112A", AR5K_VERSION_RAD, AR5K_SREV_RAD_5112A }, 137 { "5112B", AR5K_VERSION_RAD, AR5K_SREV_RAD_5112B }, 138 { "2112", AR5K_VERSION_RAD, AR5K_SREV_RAD_2112 }, 139 { "2112A", AR5K_VERSION_RAD, AR5K_SREV_RAD_2112A }, 140 { "2112B", AR5K_VERSION_RAD, AR5K_SREV_RAD_2112B }, 141 { "2413", AR5K_VERSION_RAD, AR5K_SREV_RAD_2413 }, 142 { "5413", AR5K_VERSION_RAD, AR5K_SREV_RAD_5413 }, 143 { "2316", AR5K_VERSION_RAD, AR5K_SREV_RAD_2316 }, 144 { "2317", AR5K_VERSION_RAD, AR5K_SREV_RAD_2317 }, 145 { "5424", AR5K_VERSION_RAD, AR5K_SREV_RAD_5424 }, 146 { "5133", AR5K_VERSION_RAD, AR5K_SREV_RAD_5133 }, 147 { "xxxxx", AR5K_VERSION_RAD, AR5K_SREV_UNKNOWN }, 148 }; 149 150 static const struct ieee80211_rate ath5k_rates[] = { 151 { .bitrate = 10, 152 .hw_value = ATH5K_RATE_CODE_1M, }, 153 { .bitrate = 20, 154 .hw_value = ATH5K_RATE_CODE_2M, 155 .hw_value_short = ATH5K_RATE_CODE_2M | AR5K_SET_SHORT_PREAMBLE, 156 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 157 { .bitrate = 55, 158 .hw_value = ATH5K_RATE_CODE_5_5M, 159 .hw_value_short = ATH5K_RATE_CODE_5_5M | AR5K_SET_SHORT_PREAMBLE, 160 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 161 { .bitrate = 110, 162 .hw_value = ATH5K_RATE_CODE_11M, 163 .hw_value_short = ATH5K_RATE_CODE_11M | AR5K_SET_SHORT_PREAMBLE, 164 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 165 { .bitrate = 60, 166 .hw_value = ATH5K_RATE_CODE_6M, 167 .flags = 0 }, 168 { .bitrate = 90, 169 .hw_value = ATH5K_RATE_CODE_9M, 170 .flags = 0 }, 171 { .bitrate = 120, 172 .hw_value = ATH5K_RATE_CODE_12M, 173 .flags = 0 }, 174 { .bitrate = 180, 175 .hw_value = ATH5K_RATE_CODE_18M, 176 .flags = 0 }, 177 { .bitrate = 240, 178 .hw_value = ATH5K_RATE_CODE_24M, 179 .flags = 0 }, 180 { .bitrate = 360, 181 .hw_value = ATH5K_RATE_CODE_36M, 182 .flags = 0 }, 183 { .bitrate = 480, 184 .hw_value = ATH5K_RATE_CODE_48M, 185 .flags = 0 }, 186 { .bitrate = 540, 187 .hw_value = ATH5K_RATE_CODE_54M, 188 .flags = 0 }, 189 /* XR missing */ 190 }; 191 192 /* 193 * Prototypes - PCI stack related functions 194 */ 195 static int __devinit ath5k_pci_probe(struct pci_dev *pdev, 196 const struct pci_device_id *id); 197 static void __devexit ath5k_pci_remove(struct pci_dev *pdev); 198 #ifdef CONFIG_PM_SLEEP 199 static int ath5k_pci_suspend(struct device *dev); 200 static int ath5k_pci_resume(struct device *dev); 201 202 static SIMPLE_DEV_PM_OPS(ath5k_pm_ops, ath5k_pci_suspend, ath5k_pci_resume); 203 #define ATH5K_PM_OPS (&ath5k_pm_ops) 204 #else 205 #define ATH5K_PM_OPS NULL 206 #endif /* CONFIG_PM_SLEEP */ 207 208 static struct pci_driver ath5k_pci_driver = { 209 .name = KBUILD_MODNAME, 210 .id_table = ath5k_pci_id_table, 211 .probe = ath5k_pci_probe, 212 .remove = __devexit_p(ath5k_pci_remove), 213 .driver.pm = ATH5K_PM_OPS, 214 }; 215 216 217 218 /* 219 * Prototypes - MAC 802.11 stack related functions 220 */ 221 static int ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb); 222 static int ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb, 223 struct ath5k_txq *txq); 224 static int ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan); 225 static int ath5k_start(struct ieee80211_hw *hw); 226 static void ath5k_stop(struct ieee80211_hw *hw); 227 static int ath5k_add_interface(struct ieee80211_hw *hw, 228 struct ieee80211_vif *vif); 229 static void ath5k_remove_interface(struct ieee80211_hw *hw, 230 struct ieee80211_vif *vif); 231 static int ath5k_config(struct ieee80211_hw *hw, u32 changed); 232 static u64 ath5k_prepare_multicast(struct ieee80211_hw *hw, 233 struct netdev_hw_addr_list *mc_list); 234 static void ath5k_configure_filter(struct ieee80211_hw *hw, 235 unsigned int changed_flags, 236 unsigned int *new_flags, 237 u64 multicast); 238 static int ath5k_set_key(struct ieee80211_hw *hw, 239 enum set_key_cmd cmd, 240 struct ieee80211_vif *vif, struct ieee80211_sta *sta, 241 struct ieee80211_key_conf *key); 242 static int ath5k_get_stats(struct ieee80211_hw *hw, 243 struct ieee80211_low_level_stats *stats); 244 static int ath5k_get_survey(struct ieee80211_hw *hw, 245 int idx, struct survey_info *survey); 246 static u64 ath5k_get_tsf(struct ieee80211_hw *hw); 247 static void ath5k_set_tsf(struct ieee80211_hw *hw, u64 tsf); 248 static void ath5k_reset_tsf(struct ieee80211_hw *hw); 249 static int ath5k_beacon_update(struct ieee80211_hw *hw, 250 struct ieee80211_vif *vif); 251 static void ath5k_bss_info_changed(struct ieee80211_hw *hw, 252 struct ieee80211_vif *vif, 253 struct ieee80211_bss_conf *bss_conf, 254 u32 changes); 255 static void ath5k_sw_scan_start(struct ieee80211_hw *hw); 256 static void ath5k_sw_scan_complete(struct ieee80211_hw *hw); 257 static void ath5k_set_coverage_class(struct ieee80211_hw *hw, 258 u8 coverage_class); 259 260 static const struct ieee80211_ops ath5k_hw_ops = { 261 .tx = ath5k_tx, 262 .start = ath5k_start, 263 .stop = ath5k_stop, 264 .add_interface = ath5k_add_interface, 265 .remove_interface = ath5k_remove_interface, 266 .config = ath5k_config, 267 .prepare_multicast = ath5k_prepare_multicast, 268 .configure_filter = ath5k_configure_filter, 269 .set_key = ath5k_set_key, 270 .get_stats = ath5k_get_stats, 271 .get_survey = ath5k_get_survey, 272 .conf_tx = NULL, 273 .get_tsf = ath5k_get_tsf, 274 .set_tsf = ath5k_set_tsf, 275 .reset_tsf = ath5k_reset_tsf, 276 .bss_info_changed = ath5k_bss_info_changed, 277 .sw_scan_start = ath5k_sw_scan_start, 278 .sw_scan_complete = ath5k_sw_scan_complete, 279 .set_coverage_class = ath5k_set_coverage_class, 280 }; 281 282 /* 283 * Prototypes - Internal functions 284 */ 285 /* Attach detach */ 286 static int ath5k_attach(struct pci_dev *pdev, 287 struct ieee80211_hw *hw); 288 static void ath5k_detach(struct pci_dev *pdev, 289 struct ieee80211_hw *hw); 290 /* Channel/mode setup */ 291 static inline short ath5k_ieee2mhz(short chan); 292 static unsigned int ath5k_copy_channels(struct ath5k_hw *ah, 293 struct ieee80211_channel *channels, 294 unsigned int mode, 295 unsigned int max); 296 static int ath5k_setup_bands(struct ieee80211_hw *hw); 297 static int ath5k_chan_set(struct ath5k_softc *sc, 298 struct ieee80211_channel *chan); 299 static void ath5k_setcurmode(struct ath5k_softc *sc, 300 unsigned int mode); 301 static void ath5k_mode_setup(struct ath5k_softc *sc); 302 303 /* Descriptor setup */ 304 static int ath5k_desc_alloc(struct ath5k_softc *sc, 305 struct pci_dev *pdev); 306 static void ath5k_desc_free(struct ath5k_softc *sc, 307 struct pci_dev *pdev); 308 /* Buffers setup */ 309 static int ath5k_rxbuf_setup(struct ath5k_softc *sc, 310 struct ath5k_buf *bf); 311 static int ath5k_txbuf_setup(struct ath5k_softc *sc, 312 struct ath5k_buf *bf, 313 struct ath5k_txq *txq, int padsize); 314 315 static inline void ath5k_txbuf_free_skb(struct ath5k_softc *sc, 316 struct ath5k_buf *bf) 317 { 318 BUG_ON(!bf); 319 if (!bf->skb) 320 return; 321 pci_unmap_single(sc->pdev, bf->skbaddr, bf->skb->len, 322 PCI_DMA_TODEVICE); 323 dev_kfree_skb_any(bf->skb); 324 bf->skb = NULL; 325 bf->skbaddr = 0; 326 bf->desc->ds_data = 0; 327 } 328 329 static inline void ath5k_rxbuf_free_skb(struct ath5k_softc *sc, 330 struct ath5k_buf *bf) 331 { 332 struct ath5k_hw *ah = sc->ah; 333 struct ath_common *common = ath5k_hw_common(ah); 334 335 BUG_ON(!bf); 336 if (!bf->skb) 337 return; 338 pci_unmap_single(sc->pdev, bf->skbaddr, common->rx_bufsize, 339 PCI_DMA_FROMDEVICE); 340 dev_kfree_skb_any(bf->skb); 341 bf->skb = NULL; 342 bf->skbaddr = 0; 343 bf->desc->ds_data = 0; 344 } 345 346 347 /* Queues setup */ 348 static struct ath5k_txq *ath5k_txq_setup(struct ath5k_softc *sc, 349 int qtype, int subtype); 350 static int ath5k_beaconq_setup(struct ath5k_hw *ah); 351 static int ath5k_beaconq_config(struct ath5k_softc *sc); 352 static void ath5k_txq_drainq(struct ath5k_softc *sc, 353 struct ath5k_txq *txq); 354 static void ath5k_txq_cleanup(struct ath5k_softc *sc); 355 static void ath5k_txq_release(struct ath5k_softc *sc); 356 /* Rx handling */ 357 static int ath5k_rx_start(struct ath5k_softc *sc); 358 static void ath5k_rx_stop(struct ath5k_softc *sc); 359 static unsigned int ath5k_rx_decrypted(struct ath5k_softc *sc, 360 struct sk_buff *skb, 361 struct ath5k_rx_status *rs); 362 static void ath5k_tasklet_rx(unsigned long data); 363 /* Tx handling */ 364 static void ath5k_tx_processq(struct ath5k_softc *sc, 365 struct ath5k_txq *txq); 366 static void ath5k_tasklet_tx(unsigned long data); 367 /* Beacon handling */ 368 static int ath5k_beacon_setup(struct ath5k_softc *sc, 369 struct ath5k_buf *bf); 370 static void ath5k_beacon_send(struct ath5k_softc *sc); 371 static void ath5k_beacon_config(struct ath5k_softc *sc); 372 static void ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf); 373 static void ath5k_tasklet_beacon(unsigned long data); 374 static void ath5k_tasklet_ani(unsigned long data); 375 376 static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp) 377 { 378 u64 tsf = ath5k_hw_get_tsf64(ah); 379 380 if ((tsf & 0x7fff) < rstamp) 381 tsf -= 0x8000; 382 383 return (tsf & ~0x7fff) | rstamp; 384 } 385 386 /* Interrupt handling */ 387 static int ath5k_init(struct ath5k_softc *sc); 388 static int ath5k_stop_locked(struct ath5k_softc *sc); 389 static int ath5k_stop_hw(struct ath5k_softc *sc); 390 static irqreturn_t ath5k_intr(int irq, void *dev_id); 391 static void ath5k_reset_work(struct work_struct *work); 392 393 static void ath5k_tasklet_calibrate(unsigned long data); 394 395 /* 396 * Module init/exit functions 397 */ 398 static int __init 399 init_ath5k_pci(void) 400 { 401 int ret; 402 403 ath5k_debug_init(); 404 405 ret = pci_register_driver(&ath5k_pci_driver); 406 if (ret) { 407 printk(KERN_ERR "ath5k_pci: can't register pci driver\n"); 408 return ret; 409 } 410 411 return 0; 412 } 413 414 static void __exit 415 exit_ath5k_pci(void) 416 { 417 pci_unregister_driver(&ath5k_pci_driver); 418 419 ath5k_debug_finish(); 420 } 421 422 module_init(init_ath5k_pci); 423 module_exit(exit_ath5k_pci); 424 425 426 /********************\ 427 * PCI Initialization * 428 \********************/ 429 430 static const char * 431 ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val) 432 { 433 const char *name = "xxxxx"; 434 unsigned int i; 435 436 for (i = 0; i < ARRAY_SIZE(srev_names); i++) { 437 if (srev_names[i].sr_type != type) 438 continue; 439 440 if ((val & 0xf0) == srev_names[i].sr_val) 441 name = srev_names[i].sr_name; 442 443 if ((val & 0xff) == srev_names[i].sr_val) { 444 name = srev_names[i].sr_name; 445 break; 446 } 447 } 448 449 return name; 450 } 451 static unsigned int ath5k_ioread32(void *hw_priv, u32 reg_offset) 452 { 453 struct ath5k_hw *ah = (struct ath5k_hw *) hw_priv; 454 return ath5k_hw_reg_read(ah, reg_offset); 455 } 456 457 static void ath5k_iowrite32(void *hw_priv, u32 val, u32 reg_offset) 458 { 459 struct ath5k_hw *ah = (struct ath5k_hw *) hw_priv; 460 ath5k_hw_reg_write(ah, val, reg_offset); 461 } 462 463 static const struct ath_ops ath5k_common_ops = { 464 .read = ath5k_ioread32, 465 .write = ath5k_iowrite32, 466 }; 467 468 static int __devinit 469 ath5k_pci_probe(struct pci_dev *pdev, 470 const struct pci_device_id *id) 471 { 472 void __iomem *mem; 473 struct ath5k_softc *sc; 474 struct ath_common *common; 475 struct ieee80211_hw *hw; 476 int ret; 477 u8 csz; 478 479 ret = pci_enable_device(pdev); 480 if (ret) { 481 dev_err(&pdev->dev, "can't enable device\n"); 482 goto err; 483 } 484 485 /* XXX 32-bit addressing only */ 486 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 487 if (ret) { 488 dev_err(&pdev->dev, "32-bit DMA not available\n"); 489 goto err_dis; 490 } 491 492 /* 493 * Cache line size is used to size and align various 494 * structures used to communicate with the hardware. 495 */ 496 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &csz); 497 if (csz == 0) { 498 /* 499 * Linux 2.4.18 (at least) writes the cache line size 500 * register as a 16-bit wide register which is wrong. 501 * We must have this setup properly for rx buffer 502 * DMA to work so force a reasonable value here if it 503 * comes up zero. 504 */ 505 csz = L1_CACHE_BYTES >> 2; 506 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, csz); 507 } 508 /* 509 * The default setting of latency timer yields poor results, 510 * set it to the value used by other systems. It may be worth 511 * tweaking this setting more. 512 */ 513 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xa8); 514 515 /* Enable bus mastering */ 516 pci_set_master(pdev); 517 518 /* 519 * Disable the RETRY_TIMEOUT register (0x41) to keep 520 * PCI Tx retries from interfering with C3 CPU state. 521 */ 522 pci_write_config_byte(pdev, 0x41, 0); 523 524 ret = pci_request_region(pdev, 0, "ath5k"); 525 if (ret) { 526 dev_err(&pdev->dev, "cannot reserve PCI memory region\n"); 527 goto err_dis; 528 } 529 530 mem = pci_iomap(pdev, 0, 0); 531 if (!mem) { 532 dev_err(&pdev->dev, "cannot remap PCI memory region\n") ; 533 ret = -EIO; 534 goto err_reg; 535 } 536 537 /* 538 * Allocate hw (mac80211 main struct) 539 * and hw->priv (driver private data) 540 */ 541 hw = ieee80211_alloc_hw(sizeof(*sc), &ath5k_hw_ops); 542 if (hw == NULL) { 543 dev_err(&pdev->dev, "cannot allocate ieee80211_hw\n"); 544 ret = -ENOMEM; 545 goto err_map; 546 } 547 548 dev_info(&pdev->dev, "registered as '%s'\n", wiphy_name(hw->wiphy)); 549 550 /* Initialize driver private data */ 551 SET_IEEE80211_DEV(hw, &pdev->dev); 552 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS | 553 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | 554 IEEE80211_HW_SIGNAL_DBM; 555 556 hw->wiphy->interface_modes = 557 BIT(NL80211_IFTYPE_AP) | 558 BIT(NL80211_IFTYPE_STATION) | 559 BIT(NL80211_IFTYPE_ADHOC) | 560 BIT(NL80211_IFTYPE_MESH_POINT); 561 562 hw->extra_tx_headroom = 2; 563 hw->channel_change_time = 5000; 564 sc = hw->priv; 565 sc->hw = hw; 566 sc->pdev = pdev; 567 568 ath5k_debug_init_device(sc); 569 570 /* 571 * Mark the device as detached to avoid processing 572 * interrupts until setup is complete. 573 */ 574 __set_bit(ATH_STAT_INVALID, sc->status); 575 576 sc->iobase = mem; /* So we can unmap it on detach */ 577 sc->opmode = NL80211_IFTYPE_STATION; 578 sc->bintval = 1000; 579 mutex_init(&sc->lock); 580 spin_lock_init(&sc->rxbuflock); 581 spin_lock_init(&sc->txbuflock); 582 spin_lock_init(&sc->block); 583 584 /* Set private data */ 585 pci_set_drvdata(pdev, sc); 586 587 /* Setup interrupt handler */ 588 ret = request_irq(pdev->irq, ath5k_intr, IRQF_SHARED, "ath", sc); 589 if (ret) { 590 ATH5K_ERR(sc, "request_irq failed\n"); 591 goto err_free; 592 } 593 594 /*If we passed the test malloc a ath5k_hw struct*/ 595 sc->ah = kzalloc(sizeof(struct ath5k_hw), GFP_KERNEL); 596 if (!sc->ah) { 597 ret = -ENOMEM; 598 ATH5K_ERR(sc, "out of memory\n"); 599 goto err_irq; 600 } 601 602 sc->ah->ah_sc = sc; 603 sc->ah->ah_iobase = sc->iobase; 604 common = ath5k_hw_common(sc->ah); 605 common->ops = &ath5k_common_ops; 606 common->ah = sc->ah; 607 common->hw = hw; 608 common->cachelsz = csz << 2; /* convert to bytes */ 609 610 /* Initialize device */ 611 ret = ath5k_hw_attach(sc); 612 if (ret) { 613 goto err_free_ah; 614 } 615 616 /* set up multi-rate retry capabilities */ 617 if (sc->ah->ah_version == AR5K_AR5212) { 618 hw->max_rates = 4; 619 hw->max_rate_tries = 11; 620 } 621 622 /* Finish private driver data initialization */ 623 ret = ath5k_attach(pdev, hw); 624 if (ret) 625 goto err_ah; 626 627 ATH5K_INFO(sc, "Atheros AR%s chip found (MAC: 0x%x, PHY: 0x%x)\n", 628 ath5k_chip_name(AR5K_VERSION_MAC, sc->ah->ah_mac_srev), 629 sc->ah->ah_mac_srev, 630 sc->ah->ah_phy_revision); 631 632 if (!sc->ah->ah_single_chip) { 633 /* Single chip radio (!RF5111) */ 634 if (sc->ah->ah_radio_5ghz_revision && 635 !sc->ah->ah_radio_2ghz_revision) { 636 /* No 5GHz support -> report 2GHz radio */ 637 if (!test_bit(AR5K_MODE_11A, 638 sc->ah->ah_capabilities.cap_mode)) { 639 ATH5K_INFO(sc, "RF%s 2GHz radio found (0x%x)\n", 640 ath5k_chip_name(AR5K_VERSION_RAD, 641 sc->ah->ah_radio_5ghz_revision), 642 sc->ah->ah_radio_5ghz_revision); 643 /* No 2GHz support (5110 and some 644 * 5Ghz only cards) -> report 5Ghz radio */ 645 } else if (!test_bit(AR5K_MODE_11B, 646 sc->ah->ah_capabilities.cap_mode)) { 647 ATH5K_INFO(sc, "RF%s 5GHz radio found (0x%x)\n", 648 ath5k_chip_name(AR5K_VERSION_RAD, 649 sc->ah->ah_radio_5ghz_revision), 650 sc->ah->ah_radio_5ghz_revision); 651 /* Multiband radio */ 652 } else { 653 ATH5K_INFO(sc, "RF%s multiband radio found" 654 " (0x%x)\n", 655 ath5k_chip_name(AR5K_VERSION_RAD, 656 sc->ah->ah_radio_5ghz_revision), 657 sc->ah->ah_radio_5ghz_revision); 658 } 659 } 660 /* Multi chip radio (RF5111 - RF2111) -> 661 * report both 2GHz/5GHz radios */ 662 else if (sc->ah->ah_radio_5ghz_revision && 663 sc->ah->ah_radio_2ghz_revision){ 664 ATH5K_INFO(sc, "RF%s 5GHz radio found (0x%x)\n", 665 ath5k_chip_name(AR5K_VERSION_RAD, 666 sc->ah->ah_radio_5ghz_revision), 667 sc->ah->ah_radio_5ghz_revision); 668 ATH5K_INFO(sc, "RF%s 2GHz radio found (0x%x)\n", 669 ath5k_chip_name(AR5K_VERSION_RAD, 670 sc->ah->ah_radio_2ghz_revision), 671 sc->ah->ah_radio_2ghz_revision); 672 } 673 } 674 675 676 /* ready to process interrupts */ 677 __clear_bit(ATH_STAT_INVALID, sc->status); 678 679 return 0; 680 err_ah: 681 ath5k_hw_detach(sc->ah); 682 err_irq: 683 free_irq(pdev->irq, sc); 684 err_free_ah: 685 kfree(sc->ah); 686 err_free: 687 ieee80211_free_hw(hw); 688 err_map: 689 pci_iounmap(pdev, mem); 690 err_reg: 691 pci_release_region(pdev, 0); 692 err_dis: 693 pci_disable_device(pdev); 694 err: 695 return ret; 696 } 697 698 static void __devexit 699 ath5k_pci_remove(struct pci_dev *pdev) 700 { 701 struct ath5k_softc *sc = pci_get_drvdata(pdev); 702 703 ath5k_debug_finish_device(sc); 704 ath5k_detach(pdev, sc->hw); 705 ath5k_hw_detach(sc->ah); 706 kfree(sc->ah); 707 free_irq(pdev->irq, sc); 708 pci_iounmap(pdev, sc->iobase); 709 pci_release_region(pdev, 0); 710 pci_disable_device(pdev); 711 ieee80211_free_hw(sc->hw); 712 } 713 714 #ifdef CONFIG_PM_SLEEP 715 static int ath5k_pci_suspend(struct device *dev) 716 { 717 struct ath5k_softc *sc = pci_get_drvdata(to_pci_dev(dev)); 718 719 ath5k_led_off(sc); 720 return 0; 721 } 722 723 static int ath5k_pci_resume(struct device *dev) 724 { 725 struct pci_dev *pdev = to_pci_dev(dev); 726 struct ath5k_softc *sc = pci_get_drvdata(pdev); 727 728 /* 729 * Suspend/Resume resets the PCI configuration space, so we have to 730 * re-disable the RETRY_TIMEOUT register (0x41) to keep 731 * PCI Tx retries from interfering with C3 CPU state 732 */ 733 pci_write_config_byte(pdev, 0x41, 0); 734 735 ath5k_led_enable(sc); 736 return 0; 737 } 738 #endif /* CONFIG_PM_SLEEP */ 739 740 741 /***********************\ 742 * Driver Initialization * 743 \***********************/ 744 745 static int ath5k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request) 746 { 747 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); 748 struct ath5k_softc *sc = hw->priv; 749 struct ath_regulatory *regulatory = ath5k_hw_regulatory(sc->ah); 750 751 return ath_reg_notifier_apply(wiphy, request, regulatory); 752 } 753 754 static int 755 ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw) 756 { 757 struct ath5k_softc *sc = hw->priv; 758 struct ath5k_hw *ah = sc->ah; 759 struct ath_regulatory *regulatory = ath5k_hw_regulatory(ah); 760 u8 mac[ETH_ALEN] = {}; 761 int ret; 762 763 ATH5K_DBG(sc, ATH5K_DEBUG_ANY, "devid 0x%x\n", pdev->device); 764 765 /* 766 * Check if the MAC has multi-rate retry support. 767 * We do this by trying to setup a fake extended 768 * descriptor. MAC's that don't have support will 769 * return false w/o doing anything. MAC's that do 770 * support it will return true w/o doing anything. 771 */ 772 ret = ath5k_hw_setup_mrr_tx_desc(ah, NULL, 0, 0, 0, 0, 0, 0); 773 774 if (ret < 0) 775 goto err; 776 if (ret > 0) 777 __set_bit(ATH_STAT_MRRETRY, sc->status); 778 779 /* 780 * Collect the channel list. The 802.11 layer 781 * is resposible for filtering this list based 782 * on settings like the phy mode and regulatory 783 * domain restrictions. 784 */ 785 ret = ath5k_setup_bands(hw); 786 if (ret) { 787 ATH5K_ERR(sc, "can't get channels\n"); 788 goto err; 789 } 790 791 /* NB: setup here so ath5k_rate_update is happy */ 792 if (test_bit(AR5K_MODE_11A, ah->ah_modes)) 793 ath5k_setcurmode(sc, AR5K_MODE_11A); 794 else 795 ath5k_setcurmode(sc, AR5K_MODE_11B); 796 797 /* 798 * Allocate tx+rx descriptors and populate the lists. 799 */ 800 ret = ath5k_desc_alloc(sc, pdev); 801 if (ret) { 802 ATH5K_ERR(sc, "can't allocate descriptors\n"); 803 goto err; 804 } 805 806 /* 807 * Allocate hardware transmit queues: one queue for 808 * beacon frames and one data queue for each QoS 809 * priority. Note that hw functions handle reseting 810 * these queues at the needed time. 811 */ 812 ret = ath5k_beaconq_setup(ah); 813 if (ret < 0) { 814 ATH5K_ERR(sc, "can't setup a beacon xmit queue\n"); 815 goto err_desc; 816 } 817 sc->bhalq = ret; 818 sc->cabq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_CAB, 0); 819 if (IS_ERR(sc->cabq)) { 820 ATH5K_ERR(sc, "can't setup cab queue\n"); 821 ret = PTR_ERR(sc->cabq); 822 goto err_bhal; 823 } 824 825 sc->txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BK); 826 if (IS_ERR(sc->txq)) { 827 ATH5K_ERR(sc, "can't setup xmit queue\n"); 828 ret = PTR_ERR(sc->txq); 829 goto err_queues; 830 } 831 832 tasklet_init(&sc->rxtq, ath5k_tasklet_rx, (unsigned long)sc); 833 tasklet_init(&sc->txtq, ath5k_tasklet_tx, (unsigned long)sc); 834 tasklet_init(&sc->calib, ath5k_tasklet_calibrate, (unsigned long)sc); 835 tasklet_init(&sc->beacontq, ath5k_tasklet_beacon, (unsigned long)sc); 836 tasklet_init(&sc->ani_tasklet, ath5k_tasklet_ani, (unsigned long)sc); 837 838 INIT_WORK(&sc->reset_work, ath5k_reset_work); 839 840 ret = ath5k_eeprom_read_mac(ah, mac); 841 if (ret) { 842 ATH5K_ERR(sc, "unable to read address from EEPROM: 0x%04x\n", 843 sc->pdev->device); 844 goto err_queues; 845 } 846 847 SET_IEEE80211_PERM_ADDR(hw, mac); 848 /* All MAC address bits matter for ACKs */ 849 memcpy(sc->bssidmask, ath_bcast_mac, ETH_ALEN); 850 ath5k_hw_set_bssid_mask(sc->ah, sc->bssidmask); 851 852 regulatory->current_rd = ah->ah_capabilities.cap_eeprom.ee_regdomain; 853 ret = ath_regd_init(regulatory, hw->wiphy, ath5k_reg_notifier); 854 if (ret) { 855 ATH5K_ERR(sc, "can't initialize regulatory system\n"); 856 goto err_queues; 857 } 858 859 ret = ieee80211_register_hw(hw); 860 if (ret) { 861 ATH5K_ERR(sc, "can't register ieee80211 hw\n"); 862 goto err_queues; 863 } 864 865 if (!ath_is_world_regd(regulatory)) 866 regulatory_hint(hw->wiphy, regulatory->alpha2); 867 868 ath5k_init_leds(sc); 869 870 ath5k_sysfs_register(sc); 871 872 return 0; 873 err_queues: 874 ath5k_txq_release(sc); 875 err_bhal: 876 ath5k_hw_release_tx_queue(ah, sc->bhalq); 877 err_desc: 878 ath5k_desc_free(sc, pdev); 879 err: 880 return ret; 881 } 882 883 static void 884 ath5k_detach(struct pci_dev *pdev, struct ieee80211_hw *hw) 885 { 886 struct ath5k_softc *sc = hw->priv; 887 888 /* 889 * NB: the order of these is important: 890 * o call the 802.11 layer before detaching ath5k_hw to 891 * insure callbacks into the driver to delete global 892 * key cache entries can be handled 893 * o reclaim the tx queue data structures after calling 894 * the 802.11 layer as we'll get called back to reclaim 895 * node state and potentially want to use them 896 * o to cleanup the tx queues the hal is called, so detach 897 * it last 898 * XXX: ??? detach ath5k_hw ??? 899 * Other than that, it's straightforward... 900 */ 901 ieee80211_unregister_hw(hw); 902 ath5k_desc_free(sc, pdev); 903 ath5k_txq_release(sc); 904 ath5k_hw_release_tx_queue(sc->ah, sc->bhalq); 905 ath5k_unregister_leds(sc); 906 907 ath5k_sysfs_unregister(sc); 908 /* 909 * NB: can't reclaim these until after ieee80211_ifdetach 910 * returns because we'll get called back to reclaim node 911 * state and potentially want to use them. 912 */ 913 } 914 915 916 917 918 /********************\ 919 * Channel/mode setup * 920 \********************/ 921 922 /* 923 * Convert IEEE channel number to MHz frequency. 924 */ 925 static inline short 926 ath5k_ieee2mhz(short chan) 927 { 928 if (chan <= 14 || chan >= 27) 929 return ieee80211chan2mhz(chan); 930 else 931 return 2212 + chan * 20; 932 } 933 934 /* 935 * Returns true for the channel numbers used without all_channels modparam. 936 */ 937 static bool ath5k_is_standard_channel(short chan) 938 { 939 return ((chan <= 14) || 940 /* UNII 1,2 */ 941 ((chan & 3) == 0 && chan >= 36 && chan <= 64) || 942 /* midband */ 943 ((chan & 3) == 0 && chan >= 100 && chan <= 140) || 944 /* UNII-3 */ 945 ((chan & 3) == 1 && chan >= 149 && chan <= 165)); 946 } 947 948 static unsigned int 949 ath5k_copy_channels(struct ath5k_hw *ah, 950 struct ieee80211_channel *channels, 951 unsigned int mode, 952 unsigned int max) 953 { 954 unsigned int i, count, size, chfreq, freq, ch; 955 956 if (!test_bit(mode, ah->ah_modes)) 957 return 0; 958 959 switch (mode) { 960 case AR5K_MODE_11A: 961 case AR5K_MODE_11A_TURBO: 962 /* 1..220, but 2GHz frequencies are filtered by check_channel */ 963 size = 220 ; 964 chfreq = CHANNEL_5GHZ; 965 break; 966 case AR5K_MODE_11B: 967 case AR5K_MODE_11G: 968 case AR5K_MODE_11G_TURBO: 969 size = 26; 970 chfreq = CHANNEL_2GHZ; 971 break; 972 default: 973 ATH5K_WARN(ah->ah_sc, "bad mode, not copying channels\n"); 974 return 0; 975 } 976 977 for (i = 0, count = 0; i < size && max > 0; i++) { 978 ch = i + 1 ; 979 freq = ath5k_ieee2mhz(ch); 980 981 /* Check if channel is supported by the chipset */ 982 if (!ath5k_channel_ok(ah, freq, chfreq)) 983 continue; 984 985 if (!modparam_all_channels && !ath5k_is_standard_channel(ch)) 986 continue; 987 988 /* Write channel info and increment counter */ 989 channels[count].center_freq = freq; 990 channels[count].band = (chfreq == CHANNEL_2GHZ) ? 991 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; 992 switch (mode) { 993 case AR5K_MODE_11A: 994 case AR5K_MODE_11G: 995 channels[count].hw_value = chfreq | CHANNEL_OFDM; 996 break; 997 case AR5K_MODE_11A_TURBO: 998 case AR5K_MODE_11G_TURBO: 999 channels[count].hw_value = chfreq | 1000 CHANNEL_OFDM | CHANNEL_TURBO; 1001 break; 1002 case AR5K_MODE_11B: 1003 channels[count].hw_value = CHANNEL_B; 1004 } 1005 1006 count++; 1007 max--; 1008 } 1009 1010 return count; 1011 } 1012 1013 static void 1014 ath5k_setup_rate_idx(struct ath5k_softc *sc, struct ieee80211_supported_band *b) 1015 { 1016 u8 i; 1017 1018 for (i = 0; i < AR5K_MAX_RATES; i++) 1019 sc->rate_idx[b->band][i] = -1; 1020 1021 for (i = 0; i < b->n_bitrates; i++) { 1022 sc->rate_idx[b->band][b->bitrates[i].hw_value] = i; 1023 if (b->bitrates[i].hw_value_short) 1024 sc->rate_idx[b->band][b->bitrates[i].hw_value_short] = i; 1025 } 1026 } 1027 1028 static int 1029 ath5k_setup_bands(struct ieee80211_hw *hw) 1030 { 1031 struct ath5k_softc *sc = hw->priv; 1032 struct ath5k_hw *ah = sc->ah; 1033 struct ieee80211_supported_band *sband; 1034 int max_c, count_c = 0; 1035 int i; 1036 1037 BUILD_BUG_ON(ARRAY_SIZE(sc->sbands) < IEEE80211_NUM_BANDS); 1038 max_c = ARRAY_SIZE(sc->channels); 1039 1040 /* 2GHz band */ 1041 sband = &sc->sbands[IEEE80211_BAND_2GHZ]; 1042 sband->band = IEEE80211_BAND_2GHZ; 1043 sband->bitrates = &sc->rates[IEEE80211_BAND_2GHZ][0]; 1044 1045 if (test_bit(AR5K_MODE_11G, sc->ah->ah_capabilities.cap_mode)) { 1046 /* G mode */ 1047 memcpy(sband->bitrates, &ath5k_rates[0], 1048 sizeof(struct ieee80211_rate) * 12); 1049 sband->n_bitrates = 12; 1050 1051 sband->channels = sc->channels; 1052 sband->n_channels = ath5k_copy_channels(ah, sband->channels, 1053 AR5K_MODE_11G, max_c); 1054 1055 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband; 1056 count_c = sband->n_channels; 1057 max_c -= count_c; 1058 } else if (test_bit(AR5K_MODE_11B, sc->ah->ah_capabilities.cap_mode)) { 1059 /* B mode */ 1060 memcpy(sband->bitrates, &ath5k_rates[0], 1061 sizeof(struct ieee80211_rate) * 4); 1062 sband->n_bitrates = 4; 1063 1064 /* 5211 only supports B rates and uses 4bit rate codes 1065 * (e.g normally we have 0x1B for 1M, but on 5211 we have 0x0B) 1066 * fix them up here: 1067 */ 1068 if (ah->ah_version == AR5K_AR5211) { 1069 for (i = 0; i < 4; i++) { 1070 sband->bitrates[i].hw_value = 1071 sband->bitrates[i].hw_value & 0xF; 1072 sband->bitrates[i].hw_value_short = 1073 sband->bitrates[i].hw_value_short & 0xF; 1074 } 1075 } 1076 1077 sband->channels = sc->channels; 1078 sband->n_channels = ath5k_copy_channels(ah, sband->channels, 1079 AR5K_MODE_11B, max_c); 1080 1081 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband; 1082 count_c = sband->n_channels; 1083 max_c -= count_c; 1084 } 1085 ath5k_setup_rate_idx(sc, sband); 1086 1087 /* 5GHz band, A mode */ 1088 if (test_bit(AR5K_MODE_11A, sc->ah->ah_capabilities.cap_mode)) { 1089 sband = &sc->sbands[IEEE80211_BAND_5GHZ]; 1090 sband->band = IEEE80211_BAND_5GHZ; 1091 sband->bitrates = &sc->rates[IEEE80211_BAND_5GHZ][0]; 1092 1093 memcpy(sband->bitrates, &ath5k_rates[4], 1094 sizeof(struct ieee80211_rate) * 8); 1095 sband->n_bitrates = 8; 1096 1097 sband->channels = &sc->channels[count_c]; 1098 sband->n_channels = ath5k_copy_channels(ah, sband->channels, 1099 AR5K_MODE_11A, max_c); 1100 1101 hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband; 1102 } 1103 ath5k_setup_rate_idx(sc, sband); 1104 1105 ath5k_debug_dump_bands(sc); 1106 1107 return 0; 1108 } 1109 1110 /* 1111 * Set/change channels. We always reset the chip. 1112 * To accomplish this we must first cleanup any pending DMA, 1113 * then restart stuff after a la ath5k_init. 1114 * 1115 * Called with sc->lock. 1116 */ 1117 static int 1118 ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan) 1119 { 1120 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, 1121 "channel set, resetting (%u -> %u MHz)\n", 1122 sc->curchan->center_freq, chan->center_freq); 1123 1124 /* 1125 * To switch channels clear any pending DMA operations; 1126 * wait long enough for the RX fifo to drain, reset the 1127 * hardware at the new frequency, and then re-enable 1128 * the relevant bits of the h/w. 1129 */ 1130 return ath5k_reset(sc, chan); 1131 } 1132 1133 static void 1134 ath5k_setcurmode(struct ath5k_softc *sc, unsigned int mode) 1135 { 1136 sc->curmode = mode; 1137 1138 if (mode == AR5K_MODE_11A) { 1139 sc->curband = &sc->sbands[IEEE80211_BAND_5GHZ]; 1140 } else { 1141 sc->curband = &sc->sbands[IEEE80211_BAND_2GHZ]; 1142 } 1143 } 1144 1145 static void 1146 ath5k_mode_setup(struct ath5k_softc *sc) 1147 { 1148 struct ath5k_hw *ah = sc->ah; 1149 u32 rfilt; 1150 1151 /* configure rx filter */ 1152 rfilt = sc->filter_flags; 1153 ath5k_hw_set_rx_filter(ah, rfilt); 1154 1155 if (ath5k_hw_hasbssidmask(ah)) 1156 ath5k_hw_set_bssid_mask(ah, sc->bssidmask); 1157 1158 /* configure operational mode */ 1159 ath5k_hw_set_opmode(ah, sc->opmode); 1160 1161 ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "mode setup opmode %d\n", sc->opmode); 1162 ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "RX filter 0x%x\n", rfilt); 1163 } 1164 1165 static inline int 1166 ath5k_hw_to_driver_rix(struct ath5k_softc *sc, int hw_rix) 1167 { 1168 int rix; 1169 1170 /* return base rate on errors */ 1171 if (WARN(hw_rix < 0 || hw_rix >= AR5K_MAX_RATES, 1172 "hw_rix out of bounds: %x\n", hw_rix)) 1173 return 0; 1174 1175 rix = sc->rate_idx[sc->curband->band][hw_rix]; 1176 if (WARN(rix < 0, "invalid hw_rix: %x\n", hw_rix)) 1177 rix = 0; 1178 1179 return rix; 1180 } 1181 1182 /***************\ 1183 * Buffers setup * 1184 \***************/ 1185 1186 static 1187 struct sk_buff *ath5k_rx_skb_alloc(struct ath5k_softc *sc, dma_addr_t *skb_addr) 1188 { 1189 struct ath_common *common = ath5k_hw_common(sc->ah); 1190 struct sk_buff *skb; 1191 1192 /* 1193 * Allocate buffer with headroom_needed space for the 1194 * fake physical layer header at the start. 1195 */ 1196 skb = ath_rxbuf_alloc(common, 1197 common->rx_bufsize, 1198 GFP_ATOMIC); 1199 1200 if (!skb) { 1201 ATH5K_ERR(sc, "can't alloc skbuff of size %u\n", 1202 common->rx_bufsize); 1203 return NULL; 1204 } 1205 1206 *skb_addr = pci_map_single(sc->pdev, 1207 skb->data, common->rx_bufsize, 1208 PCI_DMA_FROMDEVICE); 1209 if (unlikely(pci_dma_mapping_error(sc->pdev, *skb_addr))) { 1210 ATH5K_ERR(sc, "%s: DMA mapping failed\n", __func__); 1211 dev_kfree_skb(skb); 1212 return NULL; 1213 } 1214 return skb; 1215 } 1216 1217 static int 1218 ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf) 1219 { 1220 struct ath5k_hw *ah = sc->ah; 1221 struct sk_buff *skb = bf->skb; 1222 struct ath5k_desc *ds; 1223 int ret; 1224 1225 if (!skb) { 1226 skb = ath5k_rx_skb_alloc(sc, &bf->skbaddr); 1227 if (!skb) 1228 return -ENOMEM; 1229 bf->skb = skb; 1230 } 1231 1232 /* 1233 * Setup descriptors. For receive we always terminate 1234 * the descriptor list with a self-linked entry so we'll 1235 * not get overrun under high load (as can happen with a 1236 * 5212 when ANI processing enables PHY error frames). 1237 * 1238 * To ensure the last descriptor is self-linked we create 1239 * each descriptor as self-linked and add it to the end. As 1240 * each additional descriptor is added the previous self-linked 1241 * entry is "fixed" naturally. This should be safe even 1242 * if DMA is happening. When processing RX interrupts we 1243 * never remove/process the last, self-linked, entry on the 1244 * descriptor list. This ensures the hardware always has 1245 * someplace to write a new frame. 1246 */ 1247 ds = bf->desc; 1248 ds->ds_link = bf->daddr; /* link to self */ 1249 ds->ds_data = bf->skbaddr; 1250 ret = ath5k_hw_setup_rx_desc(ah, ds, ah->common.rx_bufsize, 0); 1251 if (ret) { 1252 ATH5K_ERR(sc, "%s: could not setup RX desc\n", __func__); 1253 return ret; 1254 } 1255 1256 if (sc->rxlink != NULL) 1257 *sc->rxlink = bf->daddr; 1258 sc->rxlink = &ds->ds_link; 1259 return 0; 1260 } 1261 1262 static enum ath5k_pkt_type get_hw_packet_type(struct sk_buff *skb) 1263 { 1264 struct ieee80211_hdr *hdr; 1265 enum ath5k_pkt_type htype; 1266 __le16 fc; 1267 1268 hdr = (struct ieee80211_hdr *)skb->data; 1269 fc = hdr->frame_control; 1270 1271 if (ieee80211_is_beacon(fc)) 1272 htype = AR5K_PKT_TYPE_BEACON; 1273 else if (ieee80211_is_probe_resp(fc)) 1274 htype = AR5K_PKT_TYPE_PROBE_RESP; 1275 else if (ieee80211_is_atim(fc)) 1276 htype = AR5K_PKT_TYPE_ATIM; 1277 else if (ieee80211_is_pspoll(fc)) 1278 htype = AR5K_PKT_TYPE_PSPOLL; 1279 else 1280 htype = AR5K_PKT_TYPE_NORMAL; 1281 1282 return htype; 1283 } 1284 1285 static int 1286 ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf, 1287 struct ath5k_txq *txq, int padsize) 1288 { 1289 struct ath5k_hw *ah = sc->ah; 1290 struct ath5k_desc *ds = bf->desc; 1291 struct sk_buff *skb = bf->skb; 1292 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1293 unsigned int pktlen, flags, keyidx = AR5K_TXKEYIX_INVALID; 1294 struct ieee80211_rate *rate; 1295 unsigned int mrr_rate[3], mrr_tries[3]; 1296 int i, ret; 1297 u16 hw_rate; 1298 u16 cts_rate = 0; 1299 u16 duration = 0; 1300 u8 rc_flags; 1301 1302 flags = AR5K_TXDESC_INTREQ | AR5K_TXDESC_CLRDMASK; 1303 1304 /* XXX endianness */ 1305 bf->skbaddr = pci_map_single(sc->pdev, skb->data, skb->len, 1306 PCI_DMA_TODEVICE); 1307 1308 rate = ieee80211_get_tx_rate(sc->hw, info); 1309 1310 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 1311 flags |= AR5K_TXDESC_NOACK; 1312 1313 rc_flags = info->control.rates[0].flags; 1314 hw_rate = (rc_flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) ? 1315 rate->hw_value_short : rate->hw_value; 1316 1317 pktlen = skb->len; 1318 1319 /* FIXME: If we are in g mode and rate is a CCK rate 1320 * subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta 1321 * from tx power (value is in dB units already) */ 1322 if (info->control.hw_key) { 1323 keyidx = info->control.hw_key->hw_key_idx; 1324 pktlen += info->control.hw_key->icv_len; 1325 } 1326 if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) { 1327 flags |= AR5K_TXDESC_RTSENA; 1328 cts_rate = ieee80211_get_rts_cts_rate(sc->hw, info)->hw_value; 1329 duration = le16_to_cpu(ieee80211_rts_duration(sc->hw, 1330 sc->vif, pktlen, info)); 1331 } 1332 if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) { 1333 flags |= AR5K_TXDESC_CTSENA; 1334 cts_rate = ieee80211_get_rts_cts_rate(sc->hw, info)->hw_value; 1335 duration = le16_to_cpu(ieee80211_ctstoself_duration(sc->hw, 1336 sc->vif, pktlen, info)); 1337 } 1338 ret = ah->ah_setup_tx_desc(ah, ds, pktlen, 1339 ieee80211_get_hdrlen_from_skb(skb), padsize, 1340 get_hw_packet_type(skb), 1341 (sc->power_level * 2), 1342 hw_rate, 1343 info->control.rates[0].count, keyidx, ah->ah_tx_ant, flags, 1344 cts_rate, duration); 1345 if (ret) 1346 goto err_unmap; 1347 1348 memset(mrr_rate, 0, sizeof(mrr_rate)); 1349 memset(mrr_tries, 0, sizeof(mrr_tries)); 1350 for (i = 0; i < 3; i++) { 1351 rate = ieee80211_get_alt_retry_rate(sc->hw, info, i); 1352 if (!rate) 1353 break; 1354 1355 mrr_rate[i] = rate->hw_value; 1356 mrr_tries[i] = info->control.rates[i + 1].count; 1357 } 1358 1359 ath5k_hw_setup_mrr_tx_desc(ah, ds, 1360 mrr_rate[0], mrr_tries[0], 1361 mrr_rate[1], mrr_tries[1], 1362 mrr_rate[2], mrr_tries[2]); 1363 1364 ds->ds_link = 0; 1365 ds->ds_data = bf->skbaddr; 1366 1367 spin_lock_bh(&txq->lock); 1368 list_add_tail(&bf->list, &txq->q); 1369 if (txq->link == NULL) /* is this first packet? */ 1370 ath5k_hw_set_txdp(ah, txq->qnum, bf->daddr); 1371 else /* no, so only link it */ 1372 *txq->link = bf->daddr; 1373 1374 txq->link = &ds->ds_link; 1375 ath5k_hw_start_tx_dma(ah, txq->qnum); 1376 mmiowb(); 1377 spin_unlock_bh(&txq->lock); 1378 1379 return 0; 1380 err_unmap: 1381 pci_unmap_single(sc->pdev, bf->skbaddr, skb->len, PCI_DMA_TODEVICE); 1382 return ret; 1383 } 1384 1385 /*******************\ 1386 * Descriptors setup * 1387 \*******************/ 1388 1389 static int 1390 ath5k_desc_alloc(struct ath5k_softc *sc, struct pci_dev *pdev) 1391 { 1392 struct ath5k_desc *ds; 1393 struct ath5k_buf *bf; 1394 dma_addr_t da; 1395 unsigned int i; 1396 int ret; 1397 1398 /* allocate descriptors */ 1399 sc->desc_len = sizeof(struct ath5k_desc) * 1400 (ATH_TXBUF + ATH_RXBUF + ATH_BCBUF + 1); 1401 sc->desc = pci_alloc_consistent(pdev, sc->desc_len, &sc->desc_daddr); 1402 if (sc->desc == NULL) { 1403 ATH5K_ERR(sc, "can't allocate descriptors\n"); 1404 ret = -ENOMEM; 1405 goto err; 1406 } 1407 ds = sc->desc; 1408 da = sc->desc_daddr; 1409 ATH5K_DBG(sc, ATH5K_DEBUG_ANY, "DMA map: %p (%zu) -> %llx\n", 1410 ds, sc->desc_len, (unsigned long long)sc->desc_daddr); 1411 1412 bf = kcalloc(1 + ATH_TXBUF + ATH_RXBUF + ATH_BCBUF, 1413 sizeof(struct ath5k_buf), GFP_KERNEL); 1414 if (bf == NULL) { 1415 ATH5K_ERR(sc, "can't allocate bufptr\n"); 1416 ret = -ENOMEM; 1417 goto err_free; 1418 } 1419 sc->bufptr = bf; 1420 1421 INIT_LIST_HEAD(&sc->rxbuf); 1422 for (i = 0; i < ATH_RXBUF; i++, bf++, ds++, da += sizeof(*ds)) { 1423 bf->desc = ds; 1424 bf->daddr = da; 1425 list_add_tail(&bf->list, &sc->rxbuf); 1426 } 1427 1428 INIT_LIST_HEAD(&sc->txbuf); 1429 sc->txbuf_len = ATH_TXBUF; 1430 for (i = 0; i < ATH_TXBUF; i++, bf++, ds++, 1431 da += sizeof(*ds)) { 1432 bf->desc = ds; 1433 bf->daddr = da; 1434 list_add_tail(&bf->list, &sc->txbuf); 1435 } 1436 1437 /* beacon buffer */ 1438 bf->desc = ds; 1439 bf->daddr = da; 1440 sc->bbuf = bf; 1441 1442 return 0; 1443 err_free: 1444 pci_free_consistent(pdev, sc->desc_len, sc->desc, sc->desc_daddr); 1445 err: 1446 sc->desc = NULL; 1447 return ret; 1448 } 1449 1450 static void 1451 ath5k_desc_free(struct ath5k_softc *sc, struct pci_dev *pdev) 1452 { 1453 struct ath5k_buf *bf; 1454 1455 ath5k_txbuf_free_skb(sc, sc->bbuf); 1456 list_for_each_entry(bf, &sc->txbuf, list) 1457 ath5k_txbuf_free_skb(sc, bf); 1458 list_for_each_entry(bf, &sc->rxbuf, list) 1459 ath5k_rxbuf_free_skb(sc, bf); 1460 1461 /* Free memory associated with all descriptors */ 1462 pci_free_consistent(pdev, sc->desc_len, sc->desc, sc->desc_daddr); 1463 sc->desc = NULL; 1464 sc->desc_daddr = 0; 1465 1466 kfree(sc->bufptr); 1467 sc->bufptr = NULL; 1468 sc->bbuf = NULL; 1469 } 1470 1471 1472 1473 1474 1475 /**************\ 1476 * Queues setup * 1477 \**************/ 1478 1479 static struct ath5k_txq * 1480 ath5k_txq_setup(struct ath5k_softc *sc, 1481 int qtype, int subtype) 1482 { 1483 struct ath5k_hw *ah = sc->ah; 1484 struct ath5k_txq *txq; 1485 struct ath5k_txq_info qi = { 1486 .tqi_subtype = subtype, 1487 .tqi_aifs = AR5K_TXQ_USEDEFAULT, 1488 .tqi_cw_min = AR5K_TXQ_USEDEFAULT, 1489 .tqi_cw_max = AR5K_TXQ_USEDEFAULT 1490 }; 1491 int qnum; 1492 1493 /* 1494 * Enable interrupts only for EOL and DESC conditions. 1495 * We mark tx descriptors to receive a DESC interrupt 1496 * when a tx queue gets deep; otherwise waiting for the 1497 * EOL to reap descriptors. Note that this is done to 1498 * reduce interrupt load and this only defers reaping 1499 * descriptors, never transmitting frames. Aside from 1500 * reducing interrupts this also permits more concurrency. 1501 * The only potential downside is if the tx queue backs 1502 * up in which case the top half of the kernel may backup 1503 * due to a lack of tx descriptors. 1504 */ 1505 qi.tqi_flags = AR5K_TXQ_FLAG_TXEOLINT_ENABLE | 1506 AR5K_TXQ_FLAG_TXDESCINT_ENABLE; 1507 qnum = ath5k_hw_setup_tx_queue(ah, qtype, &qi); 1508 if (qnum < 0) { 1509 /* 1510 * NB: don't print a message, this happens 1511 * normally on parts with too few tx queues 1512 */ 1513 return ERR_PTR(qnum); 1514 } 1515 if (qnum >= ARRAY_SIZE(sc->txqs)) { 1516 ATH5K_ERR(sc, "hw qnum %u out of range, max %tu!\n", 1517 qnum, ARRAY_SIZE(sc->txqs)); 1518 ath5k_hw_release_tx_queue(ah, qnum); 1519 return ERR_PTR(-EINVAL); 1520 } 1521 txq = &sc->txqs[qnum]; 1522 if (!txq->setup) { 1523 txq->qnum = qnum; 1524 txq->link = NULL; 1525 INIT_LIST_HEAD(&txq->q); 1526 spin_lock_init(&txq->lock); 1527 txq->setup = true; 1528 } 1529 return &sc->txqs[qnum]; 1530 } 1531 1532 static int 1533 ath5k_beaconq_setup(struct ath5k_hw *ah) 1534 { 1535 struct ath5k_txq_info qi = { 1536 .tqi_aifs = AR5K_TXQ_USEDEFAULT, 1537 .tqi_cw_min = AR5K_TXQ_USEDEFAULT, 1538 .tqi_cw_max = AR5K_TXQ_USEDEFAULT, 1539 /* NB: for dynamic turbo, don't enable any other interrupts */ 1540 .tqi_flags = AR5K_TXQ_FLAG_TXDESCINT_ENABLE 1541 }; 1542 1543 return ath5k_hw_setup_tx_queue(ah, AR5K_TX_QUEUE_BEACON, &qi); 1544 } 1545 1546 static int 1547 ath5k_beaconq_config(struct ath5k_softc *sc) 1548 { 1549 struct ath5k_hw *ah = sc->ah; 1550 struct ath5k_txq_info qi; 1551 int ret; 1552 1553 ret = ath5k_hw_get_tx_queueprops(ah, sc->bhalq, &qi); 1554 if (ret) 1555 goto err; 1556 1557 if (sc->opmode == NL80211_IFTYPE_AP || 1558 sc->opmode == NL80211_IFTYPE_MESH_POINT) { 1559 /* 1560 * Always burst out beacon and CAB traffic 1561 * (aifs = cwmin = cwmax = 0) 1562 */ 1563 qi.tqi_aifs = 0; 1564 qi.tqi_cw_min = 0; 1565 qi.tqi_cw_max = 0; 1566 } else if (sc->opmode == NL80211_IFTYPE_ADHOC) { 1567 /* 1568 * Adhoc mode; backoff between 0 and (2 * cw_min). 1569 */ 1570 qi.tqi_aifs = 0; 1571 qi.tqi_cw_min = 0; 1572 qi.tqi_cw_max = 2 * ah->ah_cw_min; 1573 } 1574 1575 ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, 1576 "beacon queueprops tqi_aifs:%d tqi_cw_min:%d tqi_cw_max:%d\n", 1577 qi.tqi_aifs, qi.tqi_cw_min, qi.tqi_cw_max); 1578 1579 ret = ath5k_hw_set_tx_queueprops(ah, sc->bhalq, &qi); 1580 if (ret) { 1581 ATH5K_ERR(sc, "%s: unable to update parameters for beacon " 1582 "hardware queue!\n", __func__); 1583 goto err; 1584 } 1585 ret = ath5k_hw_reset_tx_queue(ah, sc->bhalq); /* push to h/w */ 1586 if (ret) 1587 goto err; 1588 1589 /* reconfigure cabq with ready time to 80% of beacon_interval */ 1590 ret = ath5k_hw_get_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi); 1591 if (ret) 1592 goto err; 1593 1594 qi.tqi_ready_time = (sc->bintval * 80) / 100; 1595 ret = ath5k_hw_set_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi); 1596 if (ret) 1597 goto err; 1598 1599 ret = ath5k_hw_reset_tx_queue(ah, AR5K_TX_QUEUE_ID_CAB); 1600 err: 1601 return ret; 1602 } 1603 1604 static void 1605 ath5k_txq_drainq(struct ath5k_softc *sc, struct ath5k_txq *txq) 1606 { 1607 struct ath5k_buf *bf, *bf0; 1608 1609 /* 1610 * NB: this assumes output has been stopped and 1611 * we do not need to block ath5k_tx_tasklet 1612 */ 1613 spin_lock_bh(&txq->lock); 1614 list_for_each_entry_safe(bf, bf0, &txq->q, list) { 1615 ath5k_debug_printtxbuf(sc, bf); 1616 1617 ath5k_txbuf_free_skb(sc, bf); 1618 1619 spin_lock_bh(&sc->txbuflock); 1620 list_move_tail(&bf->list, &sc->txbuf); 1621 sc->txbuf_len++; 1622 spin_unlock_bh(&sc->txbuflock); 1623 } 1624 txq->link = NULL; 1625 spin_unlock_bh(&txq->lock); 1626 } 1627 1628 /* 1629 * Drain the transmit queues and reclaim resources. 1630 */ 1631 static void 1632 ath5k_txq_cleanup(struct ath5k_softc *sc) 1633 { 1634 struct ath5k_hw *ah = sc->ah; 1635 unsigned int i; 1636 1637 /* XXX return value */ 1638 if (likely(!test_bit(ATH_STAT_INVALID, sc->status))) { 1639 /* don't touch the hardware if marked invalid */ 1640 ath5k_hw_stop_tx_dma(ah, sc->bhalq); 1641 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "beacon queue %x\n", 1642 ath5k_hw_get_txdp(ah, sc->bhalq)); 1643 for (i = 0; i < ARRAY_SIZE(sc->txqs); i++) 1644 if (sc->txqs[i].setup) { 1645 ath5k_hw_stop_tx_dma(ah, sc->txqs[i].qnum); 1646 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "txq [%u] %x, " 1647 "link %p\n", 1648 sc->txqs[i].qnum, 1649 ath5k_hw_get_txdp(ah, 1650 sc->txqs[i].qnum), 1651 sc->txqs[i].link); 1652 } 1653 } 1654 1655 for (i = 0; i < ARRAY_SIZE(sc->txqs); i++) 1656 if (sc->txqs[i].setup) 1657 ath5k_txq_drainq(sc, &sc->txqs[i]); 1658 } 1659 1660 static void 1661 ath5k_txq_release(struct ath5k_softc *sc) 1662 { 1663 struct ath5k_txq *txq = sc->txqs; 1664 unsigned int i; 1665 1666 for (i = 0; i < ARRAY_SIZE(sc->txqs); i++, txq++) 1667 if (txq->setup) { 1668 ath5k_hw_release_tx_queue(sc->ah, txq->qnum); 1669 txq->setup = false; 1670 } 1671 } 1672 1673 1674 1675 1676 /*************\ 1677 * RX Handling * 1678 \*************/ 1679 1680 /* 1681 * Enable the receive h/w following a reset. 1682 */ 1683 static int 1684 ath5k_rx_start(struct ath5k_softc *sc) 1685 { 1686 struct ath5k_hw *ah = sc->ah; 1687 struct ath_common *common = ath5k_hw_common(ah); 1688 struct ath5k_buf *bf; 1689 int ret; 1690 1691 common->rx_bufsize = roundup(IEEE80211_MAX_LEN, common->cachelsz); 1692 1693 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "cachelsz %u rx_bufsize %u\n", 1694 common->cachelsz, common->rx_bufsize); 1695 1696 spin_lock_bh(&sc->rxbuflock); 1697 sc->rxlink = NULL; 1698 list_for_each_entry(bf, &sc->rxbuf, list) { 1699 ret = ath5k_rxbuf_setup(sc, bf); 1700 if (ret != 0) { 1701 spin_unlock_bh(&sc->rxbuflock); 1702 goto err; 1703 } 1704 } 1705 bf = list_first_entry(&sc->rxbuf, struct ath5k_buf, list); 1706 ath5k_hw_set_rxdp(ah, bf->daddr); 1707 spin_unlock_bh(&sc->rxbuflock); 1708 1709 ath5k_hw_start_rx_dma(ah); /* enable recv descriptors */ 1710 ath5k_mode_setup(sc); /* set filters, etc. */ 1711 ath5k_hw_start_rx_pcu(ah); /* re-enable PCU/DMA engine */ 1712 1713 return 0; 1714 err: 1715 return ret; 1716 } 1717 1718 /* 1719 * Disable the receive h/w in preparation for a reset. 1720 */ 1721 static void 1722 ath5k_rx_stop(struct ath5k_softc *sc) 1723 { 1724 struct ath5k_hw *ah = sc->ah; 1725 1726 ath5k_hw_stop_rx_pcu(ah); /* disable PCU */ 1727 ath5k_hw_set_rx_filter(ah, 0); /* clear recv filter */ 1728 ath5k_hw_stop_rx_dma(ah); /* disable DMA engine */ 1729 1730 ath5k_debug_printrxbuffs(sc, ah); 1731 } 1732 1733 static unsigned int 1734 ath5k_rx_decrypted(struct ath5k_softc *sc, struct sk_buff *skb, 1735 struct ath5k_rx_status *rs) 1736 { 1737 struct ath5k_hw *ah = sc->ah; 1738 struct ath_common *common = ath5k_hw_common(ah); 1739 struct ieee80211_hdr *hdr = (void *)skb->data; 1740 unsigned int keyix, hlen; 1741 1742 if (!(rs->rs_status & AR5K_RXERR_DECRYPT) && 1743 rs->rs_keyix != AR5K_RXKEYIX_INVALID) 1744 return RX_FLAG_DECRYPTED; 1745 1746 /* Apparently when a default key is used to decrypt the packet 1747 the hw does not set the index used to decrypt. In such cases 1748 get the index from the packet. */ 1749 hlen = ieee80211_hdrlen(hdr->frame_control); 1750 if (ieee80211_has_protected(hdr->frame_control) && 1751 !(rs->rs_status & AR5K_RXERR_DECRYPT) && 1752 skb->len >= hlen + 4) { 1753 keyix = skb->data[hlen + 3] >> 6; 1754 1755 if (test_bit(keyix, common->keymap)) 1756 return RX_FLAG_DECRYPTED; 1757 } 1758 1759 return 0; 1760 } 1761 1762 1763 static void 1764 ath5k_check_ibss_tsf(struct ath5k_softc *sc, struct sk_buff *skb, 1765 struct ieee80211_rx_status *rxs) 1766 { 1767 struct ath_common *common = ath5k_hw_common(sc->ah); 1768 u64 tsf, bc_tstamp; 1769 u32 hw_tu; 1770 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; 1771 1772 if (ieee80211_is_beacon(mgmt->frame_control) && 1773 le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS && 1774 memcmp(mgmt->bssid, common->curbssid, ETH_ALEN) == 0) { 1775 /* 1776 * Received an IBSS beacon with the same BSSID. Hardware *must* 1777 * have updated the local TSF. We have to work around various 1778 * hardware bugs, though... 1779 */ 1780 tsf = ath5k_hw_get_tsf64(sc->ah); 1781 bc_tstamp = le64_to_cpu(mgmt->u.beacon.timestamp); 1782 hw_tu = TSF_TO_TU(tsf); 1783 1784 ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, 1785 "beacon %llx mactime %llx (diff %lld) tsf now %llx\n", 1786 (unsigned long long)bc_tstamp, 1787 (unsigned long long)rxs->mactime, 1788 (unsigned long long)(rxs->mactime - bc_tstamp), 1789 (unsigned long long)tsf); 1790 1791 /* 1792 * Sometimes the HW will give us a wrong tstamp in the rx 1793 * status, causing the timestamp extension to go wrong. 1794 * (This seems to happen especially with beacon frames bigger 1795 * than 78 byte (incl. FCS)) 1796 * But we know that the receive timestamp must be later than the 1797 * timestamp of the beacon since HW must have synced to that. 1798 * 1799 * NOTE: here we assume mactime to be after the frame was 1800 * received, not like mac80211 which defines it at the start. 1801 */ 1802 if (bc_tstamp > rxs->mactime) { 1803 ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, 1804 "fixing mactime from %llx to %llx\n", 1805 (unsigned long long)rxs->mactime, 1806 (unsigned long long)tsf); 1807 rxs->mactime = tsf; 1808 } 1809 1810 /* 1811 * Local TSF might have moved higher than our beacon timers, 1812 * in that case we have to update them to continue sending 1813 * beacons. This also takes care of synchronizing beacon sending 1814 * times with other stations. 1815 */ 1816 if (hw_tu >= sc->nexttbtt) 1817 ath5k_beacon_update_timers(sc, bc_tstamp); 1818 } 1819 } 1820 1821 static void 1822 ath5k_update_beacon_rssi(struct ath5k_softc *sc, struct sk_buff *skb, int rssi) 1823 { 1824 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; 1825 struct ath5k_hw *ah = sc->ah; 1826 struct ath_common *common = ath5k_hw_common(ah); 1827 1828 /* only beacons from our BSSID */ 1829 if (!ieee80211_is_beacon(mgmt->frame_control) || 1830 memcmp(mgmt->bssid, common->curbssid, ETH_ALEN) != 0) 1831 return; 1832 1833 ah->ah_beacon_rssi_avg = ath5k_moving_average(ah->ah_beacon_rssi_avg, 1834 rssi); 1835 1836 /* in IBSS mode we should keep RSSI statistics per neighbour */ 1837 /* le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS */ 1838 } 1839 1840 /* 1841 * Compute padding position. skb must contains an IEEE 802.11 frame 1842 */ 1843 static int ath5k_common_padpos(struct sk_buff *skb) 1844 { 1845 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data; 1846 __le16 frame_control = hdr->frame_control; 1847 int padpos = 24; 1848 1849 if (ieee80211_has_a4(frame_control)) { 1850 padpos += ETH_ALEN; 1851 } 1852 if (ieee80211_is_data_qos(frame_control)) { 1853 padpos += IEEE80211_QOS_CTL_LEN; 1854 } 1855 1856 return padpos; 1857 } 1858 1859 /* 1860 * This function expects a 802.11 frame and returns the number of 1861 * bytes added, or -1 if we don't have enought header room. 1862 */ 1863 1864 static int ath5k_add_padding(struct sk_buff *skb) 1865 { 1866 int padpos = ath5k_common_padpos(skb); 1867 int padsize = padpos & 3; 1868 1869 if (padsize && skb->len>padpos) { 1870 1871 if (skb_headroom(skb) < padsize) 1872 return -1; 1873 1874 skb_push(skb, padsize); 1875 memmove(skb->data, skb->data+padsize, padpos); 1876 return padsize; 1877 } 1878 1879 return 0; 1880 } 1881 1882 /* 1883 * This function expects a 802.11 frame and returns the number of 1884 * bytes removed 1885 */ 1886 1887 static int ath5k_remove_padding(struct sk_buff *skb) 1888 { 1889 int padpos = ath5k_common_padpos(skb); 1890 int padsize = padpos & 3; 1891 1892 if (padsize && skb->len>=padpos+padsize) { 1893 memmove(skb->data + padsize, skb->data, padpos); 1894 skb_pull(skb, padsize); 1895 return padsize; 1896 } 1897 1898 return 0; 1899 } 1900 1901 static void 1902 ath5k_receive_frame(struct ath5k_softc *sc, struct sk_buff *skb, 1903 struct ath5k_rx_status *rs) 1904 { 1905 struct ieee80211_rx_status *rxs; 1906 1907 /* The MAC header is padded to have 32-bit boundary if the 1908 * packet payload is non-zero. The general calculation for 1909 * padsize would take into account odd header lengths: 1910 * padsize = (4 - hdrlen % 4) % 4; However, since only 1911 * even-length headers are used, padding can only be 0 or 2 1912 * bytes and we can optimize this a bit. In addition, we must 1913 * not try to remove padding from short control frames that do 1914 * not have payload. */ 1915 ath5k_remove_padding(skb); 1916 1917 rxs = IEEE80211_SKB_RXCB(skb); 1918 1919 rxs->flag = 0; 1920 if (unlikely(rs->rs_status & AR5K_RXERR_MIC)) 1921 rxs->flag |= RX_FLAG_MMIC_ERROR; 1922 1923 /* 1924 * always extend the mac timestamp, since this information is 1925 * also needed for proper IBSS merging. 1926 * 1927 * XXX: it might be too late to do it here, since rs_tstamp is 1928 * 15bit only. that means TSF extension has to be done within 1929 * 32768usec (about 32ms). it might be necessary to move this to 1930 * the interrupt handler, like it is done in madwifi. 1931 * 1932 * Unfortunately we don't know when the hardware takes the rx 1933 * timestamp (beginning of phy frame, data frame, end of rx?). 1934 * The only thing we know is that it is hardware specific... 1935 * On AR5213 it seems the rx timestamp is at the end of the 1936 * frame, but i'm not sure. 1937 * 1938 * NOTE: mac80211 defines mactime at the beginning of the first 1939 * data symbol. Since we don't have any time references it's 1940 * impossible to comply to that. This affects IBSS merge only 1941 * right now, so it's not too bad... 1942 */ 1943 rxs->mactime = ath5k_extend_tsf(sc->ah, rs->rs_tstamp); 1944 rxs->flag |= RX_FLAG_TSFT; 1945 1946 rxs->freq = sc->curchan->center_freq; 1947 rxs->band = sc->curband->band; 1948 1949 rxs->signal = sc->ah->ah_noise_floor + rs->rs_rssi; 1950 1951 rxs->antenna = rs->rs_antenna; 1952 1953 if (rs->rs_antenna > 0 && rs->rs_antenna < 5) 1954 sc->stats.antenna_rx[rs->rs_antenna]++; 1955 else 1956 sc->stats.antenna_rx[0]++; /* invalid */ 1957 1958 rxs->rate_idx = ath5k_hw_to_driver_rix(sc, rs->rs_rate); 1959 rxs->flag |= ath5k_rx_decrypted(sc, skb, rs); 1960 1961 if (rxs->rate_idx >= 0 && rs->rs_rate == 1962 sc->curband->bitrates[rxs->rate_idx].hw_value_short) 1963 rxs->flag |= RX_FLAG_SHORTPRE; 1964 1965 ath5k_debug_dump_skb(sc, skb, "RX ", 0); 1966 1967 ath5k_update_beacon_rssi(sc, skb, rs->rs_rssi); 1968 1969 /* check beacons in IBSS mode */ 1970 if (sc->opmode == NL80211_IFTYPE_ADHOC) 1971 ath5k_check_ibss_tsf(sc, skb, rxs); 1972 1973 ieee80211_rx(sc->hw, skb); 1974 } 1975 1976 /** ath5k_frame_receive_ok() - Do we want to receive this frame or not? 1977 * 1978 * Check if we want to further process this frame or not. Also update 1979 * statistics. Return true if we want this frame, false if not. 1980 */ 1981 static bool 1982 ath5k_receive_frame_ok(struct ath5k_softc *sc, struct ath5k_rx_status *rs) 1983 { 1984 sc->stats.rx_all_count++; 1985 1986 if (unlikely(rs->rs_status)) { 1987 if (rs->rs_status & AR5K_RXERR_CRC) 1988 sc->stats.rxerr_crc++; 1989 if (rs->rs_status & AR5K_RXERR_FIFO) 1990 sc->stats.rxerr_fifo++; 1991 if (rs->rs_status & AR5K_RXERR_PHY) { 1992 sc->stats.rxerr_phy++; 1993 if (rs->rs_phyerr > 0 && rs->rs_phyerr < 32) 1994 sc->stats.rxerr_phy_code[rs->rs_phyerr]++; 1995 return false; 1996 } 1997 if (rs->rs_status & AR5K_RXERR_DECRYPT) { 1998 /* 1999 * Decrypt error. If the error occurred 2000 * because there was no hardware key, then 2001 * let the frame through so the upper layers 2002 * can process it. This is necessary for 5210 2003 * parts which have no way to setup a ``clear'' 2004 * key cache entry. 2005 * 2006 * XXX do key cache faulting 2007 */ 2008 sc->stats.rxerr_decrypt++; 2009 if (rs->rs_keyix == AR5K_RXKEYIX_INVALID && 2010 !(rs->rs_status & AR5K_RXERR_CRC)) 2011 return true; 2012 } 2013 if (rs->rs_status & AR5K_RXERR_MIC) { 2014 sc->stats.rxerr_mic++; 2015 return true; 2016 } 2017 2018 /* let crypto-error packets fall through in MNTR */ 2019 if ((rs->rs_status & ~(AR5K_RXERR_DECRYPT|AR5K_RXERR_MIC)) || 2020 sc->opmode != NL80211_IFTYPE_MONITOR) 2021 return false; 2022 } 2023 2024 if (unlikely(rs->rs_more)) { 2025 sc->stats.rxerr_jumbo++; 2026 return false; 2027 } 2028 return true; 2029 } 2030 2031 static void 2032 ath5k_tasklet_rx(unsigned long data) 2033 { 2034 struct ath5k_rx_status rs = {}; 2035 struct sk_buff *skb, *next_skb; 2036 dma_addr_t next_skb_addr; 2037 struct ath5k_softc *sc = (void *)data; 2038 struct ath5k_hw *ah = sc->ah; 2039 struct ath_common *common = ath5k_hw_common(ah); 2040 struct ath5k_buf *bf; 2041 struct ath5k_desc *ds; 2042 int ret; 2043 2044 spin_lock(&sc->rxbuflock); 2045 if (list_empty(&sc->rxbuf)) { 2046 ATH5K_WARN(sc, "empty rx buf pool\n"); 2047 goto unlock; 2048 } 2049 do { 2050 bf = list_first_entry(&sc->rxbuf, struct ath5k_buf, list); 2051 BUG_ON(bf->skb == NULL); 2052 skb = bf->skb; 2053 ds = bf->desc; 2054 2055 /* bail if HW is still using self-linked descriptor */ 2056 if (ath5k_hw_get_rxdp(sc->ah) == bf->daddr) 2057 break; 2058 2059 ret = sc->ah->ah_proc_rx_desc(sc->ah, ds, &rs); 2060 if (unlikely(ret == -EINPROGRESS)) 2061 break; 2062 else if (unlikely(ret)) { 2063 ATH5K_ERR(sc, "error in processing rx descriptor\n"); 2064 sc->stats.rxerr_proc++; 2065 break; 2066 } 2067 2068 if (ath5k_receive_frame_ok(sc, &rs)) { 2069 next_skb = ath5k_rx_skb_alloc(sc, &next_skb_addr); 2070 2071 /* 2072 * If we can't replace bf->skb with a new skb under 2073 * memory pressure, just skip this packet 2074 */ 2075 if (!next_skb) 2076 goto next; 2077 2078 pci_unmap_single(sc->pdev, bf->skbaddr, 2079 common->rx_bufsize, 2080 PCI_DMA_FROMDEVICE); 2081 2082 skb_put(skb, rs.rs_datalen); 2083 2084 ath5k_receive_frame(sc, skb, &rs); 2085 2086 bf->skb = next_skb; 2087 bf->skbaddr = next_skb_addr; 2088 } 2089 next: 2090 list_move_tail(&bf->list, &sc->rxbuf); 2091 } while (ath5k_rxbuf_setup(sc, bf) == 0); 2092 unlock: 2093 spin_unlock(&sc->rxbuflock); 2094 } 2095 2096 2097 /*************\ 2098 * TX Handling * 2099 \*************/ 2100 2101 static void 2102 ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq) 2103 { 2104 struct ath5k_tx_status ts = {}; 2105 struct ath5k_buf *bf, *bf0; 2106 struct ath5k_desc *ds; 2107 struct sk_buff *skb; 2108 struct ieee80211_tx_info *info; 2109 int i, ret; 2110 2111 spin_lock(&txq->lock); 2112 list_for_each_entry_safe(bf, bf0, &txq->q, list) { 2113 ds = bf->desc; 2114 2115 /* 2116 * It's possible that the hardware can say the buffer is 2117 * completed when it hasn't yet loaded the ds_link from 2118 * host memory and moved on. If there are more TX 2119 * descriptors in the queue, wait for TXDP to change 2120 * before processing this one. 2121 */ 2122 if (ath5k_hw_get_txdp(sc->ah, txq->qnum) == bf->daddr && 2123 !list_is_last(&bf->list, &txq->q)) 2124 break; 2125 2126 ret = sc->ah->ah_proc_tx_desc(sc->ah, ds, &ts); 2127 if (unlikely(ret == -EINPROGRESS)) 2128 break; 2129 else if (unlikely(ret)) { 2130 ATH5K_ERR(sc, "error %d while processing queue %u\n", 2131 ret, txq->qnum); 2132 break; 2133 } 2134 2135 sc->stats.tx_all_count++; 2136 skb = bf->skb; 2137 info = IEEE80211_SKB_CB(skb); 2138 bf->skb = NULL; 2139 2140 pci_unmap_single(sc->pdev, bf->skbaddr, skb->len, 2141 PCI_DMA_TODEVICE); 2142 2143 ieee80211_tx_info_clear_status(info); 2144 for (i = 0; i < 4; i++) { 2145 struct ieee80211_tx_rate *r = 2146 &info->status.rates[i]; 2147 2148 if (ts.ts_rate[i]) { 2149 r->idx = ath5k_hw_to_driver_rix(sc, ts.ts_rate[i]); 2150 r->count = ts.ts_retry[i]; 2151 } else { 2152 r->idx = -1; 2153 r->count = 0; 2154 } 2155 } 2156 2157 /* count the successful attempt as well */ 2158 info->status.rates[ts.ts_final_idx].count++; 2159 2160 if (unlikely(ts.ts_status)) { 2161 sc->stats.ack_fail++; 2162 if (ts.ts_status & AR5K_TXERR_FILT) { 2163 info->flags |= IEEE80211_TX_STAT_TX_FILTERED; 2164 sc->stats.txerr_filt++; 2165 } 2166 if (ts.ts_status & AR5K_TXERR_XRETRY) 2167 sc->stats.txerr_retry++; 2168 if (ts.ts_status & AR5K_TXERR_FIFO) 2169 sc->stats.txerr_fifo++; 2170 } else { 2171 info->flags |= IEEE80211_TX_STAT_ACK; 2172 info->status.ack_signal = ts.ts_rssi; 2173 } 2174 2175 /* 2176 * Remove MAC header padding before giving the frame 2177 * back to mac80211. 2178 */ 2179 ath5k_remove_padding(skb); 2180 2181 if (ts.ts_antenna > 0 && ts.ts_antenna < 5) 2182 sc->stats.antenna_tx[ts.ts_antenna]++; 2183 else 2184 sc->stats.antenna_tx[0]++; /* invalid */ 2185 2186 ieee80211_tx_status(sc->hw, skb); 2187 2188 spin_lock(&sc->txbuflock); 2189 list_move_tail(&bf->list, &sc->txbuf); 2190 sc->txbuf_len++; 2191 spin_unlock(&sc->txbuflock); 2192 } 2193 if (likely(list_empty(&txq->q))) 2194 txq->link = NULL; 2195 spin_unlock(&txq->lock); 2196 if (sc->txbuf_len > ATH_TXBUF / 5) 2197 ieee80211_wake_queues(sc->hw); 2198 } 2199 2200 static void 2201 ath5k_tasklet_tx(unsigned long data) 2202 { 2203 int i; 2204 struct ath5k_softc *sc = (void *)data; 2205 2206 for (i=0; i < AR5K_NUM_TX_QUEUES; i++) 2207 if (sc->txqs[i].setup && (sc->ah->ah_txq_isr & BIT(i))) 2208 ath5k_tx_processq(sc, &sc->txqs[i]); 2209 } 2210 2211 2212 /*****************\ 2213 * Beacon handling * 2214 \*****************/ 2215 2216 /* 2217 * Setup the beacon frame for transmit. 2218 */ 2219 static int 2220 ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf) 2221 { 2222 struct sk_buff *skb = bf->skb; 2223 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 2224 struct ath5k_hw *ah = sc->ah; 2225 struct ath5k_desc *ds; 2226 int ret = 0; 2227 u8 antenna; 2228 u32 flags; 2229 const int padsize = 0; 2230 2231 bf->skbaddr = pci_map_single(sc->pdev, skb->data, skb->len, 2232 PCI_DMA_TODEVICE); 2233 ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "skb %p [data %p len %u] " 2234 "skbaddr %llx\n", skb, skb->data, skb->len, 2235 (unsigned long long)bf->skbaddr); 2236 if (pci_dma_mapping_error(sc->pdev, bf->skbaddr)) { 2237 ATH5K_ERR(sc, "beacon DMA mapping failed\n"); 2238 return -EIO; 2239 } 2240 2241 ds = bf->desc; 2242 antenna = ah->ah_tx_ant; 2243 2244 flags = AR5K_TXDESC_NOACK; 2245 if (sc->opmode == NL80211_IFTYPE_ADHOC && ath5k_hw_hasveol(ah)) { 2246 ds->ds_link = bf->daddr; /* self-linked */ 2247 flags |= AR5K_TXDESC_VEOL; 2248 } else 2249 ds->ds_link = 0; 2250 2251 /* 2252 * If we use multiple antennas on AP and use 2253 * the Sectored AP scenario, switch antenna every 2254 * 4 beacons to make sure everybody hears our AP. 2255 * When a client tries to associate, hw will keep 2256 * track of the tx antenna to be used for this client 2257 * automaticaly, based on ACKed packets. 2258 * 2259 * Note: AP still listens and transmits RTS on the 2260 * default antenna which is supposed to be an omni. 2261 * 2262 * Note2: On sectored scenarios it's possible to have 2263 * multiple antennas (1omni -the default- and 14 sectors) 2264 * so if we choose to actually support this mode we need 2265 * to allow user to set how many antennas we have and tweak 2266 * the code below to send beacons on all of them. 2267 */ 2268 if (ah->ah_ant_mode == AR5K_ANTMODE_SECTOR_AP) 2269 antenna = sc->bsent & 4 ? 2 : 1; 2270 2271 2272 /* FIXME: If we are in g mode and rate is a CCK rate 2273 * subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta 2274 * from tx power (value is in dB units already) */ 2275 ds->ds_data = bf->skbaddr; 2276 ret = ah->ah_setup_tx_desc(ah, ds, skb->len, 2277 ieee80211_get_hdrlen_from_skb(skb), padsize, 2278 AR5K_PKT_TYPE_BEACON, (sc->power_level * 2), 2279 ieee80211_get_tx_rate(sc->hw, info)->hw_value, 2280 1, AR5K_TXKEYIX_INVALID, 2281 antenna, flags, 0, 0); 2282 if (ret) 2283 goto err_unmap; 2284 2285 return 0; 2286 err_unmap: 2287 pci_unmap_single(sc->pdev, bf->skbaddr, skb->len, PCI_DMA_TODEVICE); 2288 return ret; 2289 } 2290 2291 /* 2292 * Transmit a beacon frame at SWBA. Dynamic updates to the 2293 * frame contents are done as needed and the slot time is 2294 * also adjusted based on current state. 2295 * 2296 * This is called from software irq context (beacontq tasklets) 2297 * or user context from ath5k_beacon_config. 2298 */ 2299 static void 2300 ath5k_beacon_send(struct ath5k_softc *sc) 2301 { 2302 struct ath5k_buf *bf = sc->bbuf; 2303 struct ath5k_hw *ah = sc->ah; 2304 struct sk_buff *skb; 2305 2306 ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, "in beacon_send\n"); 2307 2308 if (unlikely(bf->skb == NULL || sc->opmode == NL80211_IFTYPE_STATION || 2309 sc->opmode == NL80211_IFTYPE_MONITOR)) { 2310 ATH5K_WARN(sc, "bf=%p bf_skb=%p\n", bf, bf ? bf->skb : NULL); 2311 return; 2312 } 2313 /* 2314 * Check if the previous beacon has gone out. If 2315 * not don't don't try to post another, skip this 2316 * period and wait for the next. Missed beacons 2317 * indicate a problem and should not occur. If we 2318 * miss too many consecutive beacons reset the device. 2319 */ 2320 if (unlikely(ath5k_hw_num_tx_pending(ah, sc->bhalq) != 0)) { 2321 sc->bmisscount++; 2322 ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, 2323 "missed %u consecutive beacons\n", sc->bmisscount); 2324 if (sc->bmisscount > 10) { /* NB: 10 is a guess */ 2325 ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, 2326 "stuck beacon time (%u missed)\n", 2327 sc->bmisscount); 2328 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, 2329 "stuck beacon, resetting\n"); 2330 ieee80211_queue_work(sc->hw, &sc->reset_work); 2331 } 2332 return; 2333 } 2334 if (unlikely(sc->bmisscount != 0)) { 2335 ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, 2336 "resume beacon xmit after %u misses\n", 2337 sc->bmisscount); 2338 sc->bmisscount = 0; 2339 } 2340 2341 /* 2342 * Stop any current dma and put the new frame on the queue. 2343 * This should never fail since we check above that no frames 2344 * are still pending on the queue. 2345 */ 2346 if (unlikely(ath5k_hw_stop_tx_dma(ah, sc->bhalq))) { 2347 ATH5K_WARN(sc, "beacon queue %u didn't start/stop ?\n", sc->bhalq); 2348 /* NB: hw still stops DMA, so proceed */ 2349 } 2350 2351 /* refresh the beacon for AP mode */ 2352 if (sc->opmode == NL80211_IFTYPE_AP) 2353 ath5k_beacon_update(sc->hw, sc->vif); 2354 2355 ath5k_hw_set_txdp(ah, sc->bhalq, bf->daddr); 2356 ath5k_hw_start_tx_dma(ah, sc->bhalq); 2357 ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "TXDP[%u] = %llx (%p)\n", 2358 sc->bhalq, (unsigned long long)bf->daddr, bf->desc); 2359 2360 skb = ieee80211_get_buffered_bc(sc->hw, sc->vif); 2361 while (skb) { 2362 ath5k_tx_queue(sc->hw, skb, sc->cabq); 2363 skb = ieee80211_get_buffered_bc(sc->hw, sc->vif); 2364 } 2365 2366 sc->bsent++; 2367 } 2368 2369 2370 /** 2371 * ath5k_beacon_update_timers - update beacon timers 2372 * 2373 * @sc: struct ath5k_softc pointer we are operating on 2374 * @bc_tsf: the timestamp of the beacon. 0 to reset the TSF. -1 to perform a 2375 * beacon timer update based on the current HW TSF. 2376 * 2377 * Calculate the next target beacon transmit time (TBTT) based on the timestamp 2378 * of a received beacon or the current local hardware TSF and write it to the 2379 * beacon timer registers. 2380 * 2381 * This is called in a variety of situations, e.g. when a beacon is received, 2382 * when a TSF update has been detected, but also when an new IBSS is created or 2383 * when we otherwise know we have to update the timers, but we keep it in this 2384 * function to have it all together in one place. 2385 */ 2386 static void 2387 ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf) 2388 { 2389 struct ath5k_hw *ah = sc->ah; 2390 u32 nexttbtt, intval, hw_tu, bc_tu; 2391 u64 hw_tsf; 2392 2393 intval = sc->bintval & AR5K_BEACON_PERIOD; 2394 if (WARN_ON(!intval)) 2395 return; 2396 2397 /* beacon TSF converted to TU */ 2398 bc_tu = TSF_TO_TU(bc_tsf); 2399 2400 /* current TSF converted to TU */ 2401 hw_tsf = ath5k_hw_get_tsf64(ah); 2402 hw_tu = TSF_TO_TU(hw_tsf); 2403 2404 #define FUDGE 3 2405 /* we use FUDGE to make sure the next TBTT is ahead of the current TU */ 2406 if (bc_tsf == -1) { 2407 /* 2408 * no beacons received, called internally. 2409 * just need to refresh timers based on HW TSF. 2410 */ 2411 nexttbtt = roundup(hw_tu + FUDGE, intval); 2412 } else if (bc_tsf == 0) { 2413 /* 2414 * no beacon received, probably called by ath5k_reset_tsf(). 2415 * reset TSF to start with 0. 2416 */ 2417 nexttbtt = intval; 2418 intval |= AR5K_BEACON_RESET_TSF; 2419 } else if (bc_tsf > hw_tsf) { 2420 /* 2421 * beacon received, SW merge happend but HW TSF not yet updated. 2422 * not possible to reconfigure timers yet, but next time we 2423 * receive a beacon with the same BSSID, the hardware will 2424 * automatically update the TSF and then we need to reconfigure 2425 * the timers. 2426 */ 2427 ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, 2428 "need to wait for HW TSF sync\n"); 2429 return; 2430 } else { 2431 /* 2432 * most important case for beacon synchronization between STA. 2433 * 2434 * beacon received and HW TSF has been already updated by HW. 2435 * update next TBTT based on the TSF of the beacon, but make 2436 * sure it is ahead of our local TSF timer. 2437 */ 2438 nexttbtt = bc_tu + roundup(hw_tu + FUDGE - bc_tu, intval); 2439 } 2440 #undef FUDGE 2441 2442 sc->nexttbtt = nexttbtt; 2443 2444 intval |= AR5K_BEACON_ENA; 2445 ath5k_hw_init_beacon(ah, nexttbtt, intval); 2446 2447 /* 2448 * debugging output last in order to preserve the time critical aspect 2449 * of this function 2450 */ 2451 if (bc_tsf == -1) 2452 ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, 2453 "reconfigured timers based on HW TSF\n"); 2454 else if (bc_tsf == 0) 2455 ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, 2456 "reset HW TSF and timers\n"); 2457 else 2458 ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, 2459 "updated timers based on beacon TSF\n"); 2460 2461 ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, 2462 "bc_tsf %llx hw_tsf %llx bc_tu %u hw_tu %u nexttbtt %u\n", 2463 (unsigned long long) bc_tsf, 2464 (unsigned long long) hw_tsf, bc_tu, hw_tu, nexttbtt); 2465 ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, "intval %u %s %s\n", 2466 intval & AR5K_BEACON_PERIOD, 2467 intval & AR5K_BEACON_ENA ? "AR5K_BEACON_ENA" : "", 2468 intval & AR5K_BEACON_RESET_TSF ? "AR5K_BEACON_RESET_TSF" : ""); 2469 } 2470 2471 2472 /** 2473 * ath5k_beacon_config - Configure the beacon queues and interrupts 2474 * 2475 * @sc: struct ath5k_softc pointer we are operating on 2476 * 2477 * In IBSS mode we use a self-linked tx descriptor if possible. We enable SWBA 2478 * interrupts to detect TSF updates only. 2479 */ 2480 static void 2481 ath5k_beacon_config(struct ath5k_softc *sc) 2482 { 2483 struct ath5k_hw *ah = sc->ah; 2484 unsigned long flags; 2485 2486 spin_lock_irqsave(&sc->block, flags); 2487 sc->bmisscount = 0; 2488 sc->imask &= ~(AR5K_INT_BMISS | AR5K_INT_SWBA); 2489 2490 if (sc->enable_beacon) { 2491 /* 2492 * In IBSS mode we use a self-linked tx descriptor and let the 2493 * hardware send the beacons automatically. We have to load it 2494 * only once here. 2495 * We use the SWBA interrupt only to keep track of the beacon 2496 * timers in order to detect automatic TSF updates. 2497 */ 2498 ath5k_beaconq_config(sc); 2499 2500 sc->imask |= AR5K_INT_SWBA; 2501 2502 if (sc->opmode == NL80211_IFTYPE_ADHOC) { 2503 if (ath5k_hw_hasveol(ah)) 2504 ath5k_beacon_send(sc); 2505 } else 2506 ath5k_beacon_update_timers(sc, -1); 2507 } else { 2508 ath5k_hw_stop_tx_dma(sc->ah, sc->bhalq); 2509 } 2510 2511 ath5k_hw_set_imr(ah, sc->imask); 2512 mmiowb(); 2513 spin_unlock_irqrestore(&sc->block, flags); 2514 } 2515 2516 static void ath5k_tasklet_beacon(unsigned long data) 2517 { 2518 struct ath5k_softc *sc = (struct ath5k_softc *) data; 2519 2520 /* 2521 * Software beacon alert--time to send a beacon. 2522 * 2523 * In IBSS mode we use this interrupt just to 2524 * keep track of the next TBTT (target beacon 2525 * transmission time) in order to detect wether 2526 * automatic TSF updates happened. 2527 */ 2528 if (sc->opmode == NL80211_IFTYPE_ADHOC) { 2529 /* XXX: only if VEOL suppported */ 2530 u64 tsf = ath5k_hw_get_tsf64(sc->ah); 2531 sc->nexttbtt += sc->bintval; 2532 ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, 2533 "SWBA nexttbtt: %x hw_tu: %x " 2534 "TSF: %llx\n", 2535 sc->nexttbtt, 2536 TSF_TO_TU(tsf), 2537 (unsigned long long) tsf); 2538 } else { 2539 spin_lock(&sc->block); 2540 ath5k_beacon_send(sc); 2541 spin_unlock(&sc->block); 2542 } 2543 } 2544 2545 2546 /********************\ 2547 * Interrupt handling * 2548 \********************/ 2549 2550 static int 2551 ath5k_init(struct ath5k_softc *sc) 2552 { 2553 struct ath5k_hw *ah = sc->ah; 2554 int ret, i; 2555 2556 mutex_lock(&sc->lock); 2557 2558 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "mode %d\n", sc->opmode); 2559 2560 /* 2561 * Stop anything previously setup. This is safe 2562 * no matter this is the first time through or not. 2563 */ 2564 ath5k_stop_locked(sc); 2565 2566 /* 2567 * The basic interface to setting the hardware in a good 2568 * state is ``reset''. On return the hardware is known to 2569 * be powered up and with interrupts disabled. This must 2570 * be followed by initialization of the appropriate bits 2571 * and then setup of the interrupt mask. 2572 */ 2573 sc->curchan = sc->hw->conf.channel; 2574 sc->curband = &sc->sbands[sc->curchan->band]; 2575 sc->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL | 2576 AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL | 2577 AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_MIB; 2578 2579 ret = ath5k_reset(sc, NULL); 2580 if (ret) 2581 goto done; 2582 2583 ath5k_rfkill_hw_start(ah); 2584 2585 /* 2586 * Reset the key cache since some parts do not reset the 2587 * contents on initial power up or resume from suspend. 2588 */ 2589 for (i = 0; i < AR5K_KEYTABLE_SIZE; i++) 2590 ath5k_hw_reset_key(ah, i); 2591 2592 ath5k_hw_set_ack_bitrate_high(ah, true); 2593 ret = 0; 2594 done: 2595 mmiowb(); 2596 mutex_unlock(&sc->lock); 2597 return ret; 2598 } 2599 2600 static int 2601 ath5k_stop_locked(struct ath5k_softc *sc) 2602 { 2603 struct ath5k_hw *ah = sc->ah; 2604 2605 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "invalid %u\n", 2606 test_bit(ATH_STAT_INVALID, sc->status)); 2607 2608 /* 2609 * Shutdown the hardware and driver: 2610 * stop output from above 2611 * disable interrupts 2612 * turn off timers 2613 * turn off the radio 2614 * clear transmit machinery 2615 * clear receive machinery 2616 * drain and release tx queues 2617 * reclaim beacon resources 2618 * power down hardware 2619 * 2620 * Note that some of this work is not possible if the 2621 * hardware is gone (invalid). 2622 */ 2623 ieee80211_stop_queues(sc->hw); 2624 2625 if (!test_bit(ATH_STAT_INVALID, sc->status)) { 2626 ath5k_led_off(sc); 2627 ath5k_hw_set_imr(ah, 0); 2628 synchronize_irq(sc->pdev->irq); 2629 } 2630 ath5k_txq_cleanup(sc); 2631 if (!test_bit(ATH_STAT_INVALID, sc->status)) { 2632 ath5k_rx_stop(sc); 2633 ath5k_hw_phy_disable(ah); 2634 } 2635 2636 return 0; 2637 } 2638 2639 static void stop_tasklets(struct ath5k_softc *sc) 2640 { 2641 tasklet_kill(&sc->rxtq); 2642 tasklet_kill(&sc->txtq); 2643 tasklet_kill(&sc->calib); 2644 tasklet_kill(&sc->beacontq); 2645 tasklet_kill(&sc->ani_tasklet); 2646 } 2647 2648 /* 2649 * Stop the device, grabbing the top-level lock to protect 2650 * against concurrent entry through ath5k_init (which can happen 2651 * if another thread does a system call and the thread doing the 2652 * stop is preempted). 2653 */ 2654 static int 2655 ath5k_stop_hw(struct ath5k_softc *sc) 2656 { 2657 int ret; 2658 2659 mutex_lock(&sc->lock); 2660 ret = ath5k_stop_locked(sc); 2661 if (ret == 0 && !test_bit(ATH_STAT_INVALID, sc->status)) { 2662 /* 2663 * Don't set the card in full sleep mode! 2664 * 2665 * a) When the device is in this state it must be carefully 2666 * woken up or references to registers in the PCI clock 2667 * domain may freeze the bus (and system). This varies 2668 * by chip and is mostly an issue with newer parts 2669 * (madwifi sources mentioned srev >= 0x78) that go to 2670 * sleep more quickly. 2671 * 2672 * b) On older chips full sleep results a weird behaviour 2673 * during wakeup. I tested various cards with srev < 0x78 2674 * and they don't wake up after module reload, a second 2675 * module reload is needed to bring the card up again. 2676 * 2677 * Until we figure out what's going on don't enable 2678 * full chip reset on any chip (this is what Legacy HAL 2679 * and Sam's HAL do anyway). Instead Perform a full reset 2680 * on the device (same as initial state after attach) and 2681 * leave it idle (keep MAC/BB on warm reset) */ 2682 ret = ath5k_hw_on_hold(sc->ah); 2683 2684 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, 2685 "putting device to sleep\n"); 2686 } 2687 ath5k_txbuf_free_skb(sc, sc->bbuf); 2688 2689 mmiowb(); 2690 mutex_unlock(&sc->lock); 2691 2692 stop_tasklets(sc); 2693 2694 ath5k_rfkill_hw_stop(sc->ah); 2695 2696 return ret; 2697 } 2698 2699 static void 2700 ath5k_intr_calibration_poll(struct ath5k_hw *ah) 2701 { 2702 if (time_is_before_eq_jiffies(ah->ah_cal_next_ani) && 2703 !(ah->ah_cal_mask & AR5K_CALIBRATION_FULL)) { 2704 /* run ANI only when full calibration is not active */ 2705 ah->ah_cal_next_ani = jiffies + 2706 msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_ANI); 2707 tasklet_schedule(&ah->ah_sc->ani_tasklet); 2708 2709 } else if (time_is_before_eq_jiffies(ah->ah_cal_next_full)) { 2710 ah->ah_cal_next_full = jiffies + 2711 msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL); 2712 tasklet_schedule(&ah->ah_sc->calib); 2713 } 2714 /* we could use SWI to generate enough interrupts to meet our 2715 * calibration interval requirements, if necessary: 2716 * AR5K_REG_ENABLE_BITS(ah, AR5K_CR, AR5K_CR_SWI); */ 2717 } 2718 2719 static irqreturn_t 2720 ath5k_intr(int irq, void *dev_id) 2721 { 2722 struct ath5k_softc *sc = dev_id; 2723 struct ath5k_hw *ah = sc->ah; 2724 enum ath5k_int status; 2725 unsigned int counter = 1000; 2726 2727 if (unlikely(test_bit(ATH_STAT_INVALID, sc->status) || 2728 !ath5k_hw_is_intr_pending(ah))) 2729 return IRQ_NONE; 2730 2731 do { 2732 ath5k_hw_get_isr(ah, &status); /* NB: clears IRQ too */ 2733 ATH5K_DBG(sc, ATH5K_DEBUG_INTR, "status 0x%x/0x%x\n", 2734 status, sc->imask); 2735 if (unlikely(status & AR5K_INT_FATAL)) { 2736 /* 2737 * Fatal errors are unrecoverable. 2738 * Typically these are caused by DMA errors. 2739 */ 2740 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, 2741 "fatal int, resetting\n"); 2742 ieee80211_queue_work(sc->hw, &sc->reset_work); 2743 } else if (unlikely(status & AR5K_INT_RXORN)) { 2744 /* 2745 * Receive buffers are full. Either the bus is busy or 2746 * the CPU is not fast enough to process all received 2747 * frames. 2748 * Older chipsets need a reset to come out of this 2749 * condition, but we treat it as RX for newer chips. 2750 * We don't know exactly which versions need a reset - 2751 * this guess is copied from the HAL. 2752 */ 2753 sc->stats.rxorn_intr++; 2754 if (ah->ah_mac_srev < AR5K_SREV_AR5212) { 2755 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, 2756 "rx overrun, resetting\n"); 2757 ieee80211_queue_work(sc->hw, &sc->reset_work); 2758 } 2759 else 2760 tasklet_schedule(&sc->rxtq); 2761 } else { 2762 if (status & AR5K_INT_SWBA) { 2763 tasklet_hi_schedule(&sc->beacontq); 2764 } 2765 if (status & AR5K_INT_RXEOL) { 2766 /* 2767 * NB: the hardware should re-read the link when 2768 * RXE bit is written, but it doesn't work at 2769 * least on older hardware revs. 2770 */ 2771 sc->stats.rxeol_intr++; 2772 } 2773 if (status & AR5K_INT_TXURN) { 2774 /* bump tx trigger level */ 2775 ath5k_hw_update_tx_triglevel(ah, true); 2776 } 2777 if (status & (AR5K_INT_RXOK | AR5K_INT_RXERR)) 2778 tasklet_schedule(&sc->rxtq); 2779 if (status & (AR5K_INT_TXOK | AR5K_INT_TXDESC 2780 | AR5K_INT_TXERR | AR5K_INT_TXEOL)) 2781 tasklet_schedule(&sc->txtq); 2782 if (status & AR5K_INT_BMISS) { 2783 /* TODO */ 2784 } 2785 if (status & AR5K_INT_MIB) { 2786 sc->stats.mib_intr++; 2787 ath5k_hw_update_mib_counters(ah); 2788 ath5k_ani_mib_intr(ah); 2789 } 2790 if (status & AR5K_INT_GPIO) 2791 tasklet_schedule(&sc->rf_kill.toggleq); 2792 2793 } 2794 } while (ath5k_hw_is_intr_pending(ah) && --counter > 0); 2795 2796 if (unlikely(!counter)) 2797 ATH5K_WARN(sc, "too many interrupts, giving up for now\n"); 2798 2799 ath5k_intr_calibration_poll(ah); 2800 2801 return IRQ_HANDLED; 2802 } 2803 2804 /* 2805 * Periodically recalibrate the PHY to account 2806 * for temperature/environment changes. 2807 */ 2808 static void 2809 ath5k_tasklet_calibrate(unsigned long data) 2810 { 2811 struct ath5k_softc *sc = (void *)data; 2812 struct ath5k_hw *ah = sc->ah; 2813 2814 /* Only full calibration for now */ 2815 ah->ah_cal_mask |= AR5K_CALIBRATION_FULL; 2816 2817 ATH5K_DBG(sc, ATH5K_DEBUG_CALIBRATE, "channel %u/%x\n", 2818 ieee80211_frequency_to_channel(sc->curchan->center_freq), 2819 sc->curchan->hw_value); 2820 2821 if (ath5k_hw_gainf_calibrate(ah) == AR5K_RFGAIN_NEED_CHANGE) { 2822 /* 2823 * Rfgain is out of bounds, reset the chip 2824 * to load new gain values. 2825 */ 2826 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "calibration, resetting\n"); 2827 ieee80211_queue_work(sc->hw, &sc->reset_work); 2828 } 2829 if (ath5k_hw_phy_calibrate(ah, sc->curchan)) 2830 ATH5K_ERR(sc, "calibration of channel %u failed\n", 2831 ieee80211_frequency_to_channel( 2832 sc->curchan->center_freq)); 2833 2834 /* Noise floor calibration interrupts rx/tx path while I/Q calibration 2835 * doesn't. We stop the queues so that calibration doesn't interfere 2836 * with TX and don't run it as often */ 2837 if (time_is_before_eq_jiffies(ah->ah_cal_next_nf)) { 2838 ah->ah_cal_next_nf = jiffies + 2839 msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_NF); 2840 ieee80211_stop_queues(sc->hw); 2841 ath5k_hw_update_noise_floor(ah); 2842 ieee80211_wake_queues(sc->hw); 2843 } 2844 2845 ah->ah_cal_mask &= ~AR5K_CALIBRATION_FULL; 2846 } 2847 2848 2849 static void 2850 ath5k_tasklet_ani(unsigned long data) 2851 { 2852 struct ath5k_softc *sc = (void *)data; 2853 struct ath5k_hw *ah = sc->ah; 2854 2855 ah->ah_cal_mask |= AR5K_CALIBRATION_ANI; 2856 ath5k_ani_calibration(ah); 2857 ah->ah_cal_mask &= ~AR5K_CALIBRATION_ANI; 2858 } 2859 2860 2861 /********************\ 2862 * Mac80211 functions * 2863 \********************/ 2864 2865 static int 2866 ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 2867 { 2868 struct ath5k_softc *sc = hw->priv; 2869 2870 return ath5k_tx_queue(hw, skb, sc->txq); 2871 } 2872 2873 static int ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb, 2874 struct ath5k_txq *txq) 2875 { 2876 struct ath5k_softc *sc = hw->priv; 2877 struct ath5k_buf *bf; 2878 unsigned long flags; 2879 int padsize; 2880 2881 ath5k_debug_dump_skb(sc, skb, "TX ", 1); 2882 2883 if (sc->opmode == NL80211_IFTYPE_MONITOR) 2884 ATH5K_DBG(sc, ATH5K_DEBUG_XMIT, "tx in monitor (scan?)\n"); 2885 2886 /* 2887 * the hardware expects the header padded to 4 byte boundaries 2888 * if this is not the case we add the padding after the header 2889 */ 2890 padsize = ath5k_add_padding(skb); 2891 if (padsize < 0) { 2892 ATH5K_ERR(sc, "tx hdrlen not %%4: not enough" 2893 " headroom to pad"); 2894 goto drop_packet; 2895 } 2896 2897 spin_lock_irqsave(&sc->txbuflock, flags); 2898 if (list_empty(&sc->txbuf)) { 2899 ATH5K_ERR(sc, "no further txbuf available, dropping packet\n"); 2900 spin_unlock_irqrestore(&sc->txbuflock, flags); 2901 ieee80211_stop_queue(hw, skb_get_queue_mapping(skb)); 2902 goto drop_packet; 2903 } 2904 bf = list_first_entry(&sc->txbuf, struct ath5k_buf, list); 2905 list_del(&bf->list); 2906 sc->txbuf_len--; 2907 if (list_empty(&sc->txbuf)) 2908 ieee80211_stop_queues(hw); 2909 spin_unlock_irqrestore(&sc->txbuflock, flags); 2910 2911 bf->skb = skb; 2912 2913 if (ath5k_txbuf_setup(sc, bf, txq, padsize)) { 2914 bf->skb = NULL; 2915 spin_lock_irqsave(&sc->txbuflock, flags); 2916 list_add_tail(&bf->list, &sc->txbuf); 2917 sc->txbuf_len++; 2918 spin_unlock_irqrestore(&sc->txbuflock, flags); 2919 goto drop_packet; 2920 } 2921 return NETDEV_TX_OK; 2922 2923 drop_packet: 2924 dev_kfree_skb_any(skb); 2925 return NETDEV_TX_OK; 2926 } 2927 2928 /* 2929 * Reset the hardware. If chan is not NULL, then also pause rx/tx 2930 * and change to the given channel. 2931 * 2932 * This should be called with sc->lock. 2933 */ 2934 static int 2935 ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan) 2936 { 2937 struct ath5k_hw *ah = sc->ah; 2938 int ret; 2939 2940 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "resetting\n"); 2941 2942 ath5k_hw_set_imr(ah, 0); 2943 synchronize_irq(sc->pdev->irq); 2944 stop_tasklets(sc); 2945 2946 if (chan) { 2947 ath5k_txq_cleanup(sc); 2948 ath5k_rx_stop(sc); 2949 2950 sc->curchan = chan; 2951 sc->curband = &sc->sbands[chan->band]; 2952 } 2953 ret = ath5k_hw_reset(ah, sc->opmode, sc->curchan, chan != NULL); 2954 if (ret) { 2955 ATH5K_ERR(sc, "can't reset hardware (%d)\n", ret); 2956 goto err; 2957 } 2958 2959 ret = ath5k_rx_start(sc); 2960 if (ret) { 2961 ATH5K_ERR(sc, "can't start recv logic\n"); 2962 goto err; 2963 } 2964 2965 ath5k_ani_init(ah, ah->ah_sc->ani_state.ani_mode); 2966 2967 ah->ah_cal_next_full = jiffies; 2968 ah->ah_cal_next_ani = jiffies; 2969 ah->ah_cal_next_nf = jiffies; 2970 2971 /* 2972 * Change channels and update the h/w rate map if we're switching; 2973 * e.g. 11a to 11b/g. 2974 * 2975 * We may be doing a reset in response to an ioctl that changes the 2976 * channel so update any state that might change as a result. 2977 * 2978 * XXX needed? 2979 */ 2980 /* ath5k_chan_change(sc, c); */ 2981 2982 ath5k_beacon_config(sc); 2983 /* intrs are enabled by ath5k_beacon_config */ 2984 2985 ieee80211_wake_queues(sc->hw); 2986 2987 return 0; 2988 err: 2989 return ret; 2990 } 2991 2992 static void ath5k_reset_work(struct work_struct *work) 2993 { 2994 struct ath5k_softc *sc = container_of(work, struct ath5k_softc, 2995 reset_work); 2996 2997 mutex_lock(&sc->lock); 2998 ath5k_reset(sc, sc->curchan); 2999 mutex_unlock(&sc->lock); 3000 } 3001 3002 static int ath5k_start(struct ieee80211_hw *hw) 3003 { 3004 return ath5k_init(hw->priv); 3005 } 3006 3007 static void ath5k_stop(struct ieee80211_hw *hw) 3008 { 3009 ath5k_stop_hw(hw->priv); 3010 } 3011 3012 static int ath5k_add_interface(struct ieee80211_hw *hw, 3013 struct ieee80211_vif *vif) 3014 { 3015 struct ath5k_softc *sc = hw->priv; 3016 int ret; 3017 3018 mutex_lock(&sc->lock); 3019 if (sc->vif) { 3020 ret = 0; 3021 goto end; 3022 } 3023 3024 sc->vif = vif; 3025 3026 switch (vif->type) { 3027 case NL80211_IFTYPE_AP: 3028 case NL80211_IFTYPE_STATION: 3029 case NL80211_IFTYPE_ADHOC: 3030 case NL80211_IFTYPE_MESH_POINT: 3031 case NL80211_IFTYPE_MONITOR: 3032 sc->opmode = vif->type; 3033 break; 3034 default: 3035 ret = -EOPNOTSUPP; 3036 goto end; 3037 } 3038 3039 ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "add interface mode %d\n", sc->opmode); 3040 3041 ath5k_hw_set_lladdr(sc->ah, vif->addr); 3042 ath5k_mode_setup(sc); 3043 3044 ret = 0; 3045 end: 3046 mutex_unlock(&sc->lock); 3047 return ret; 3048 } 3049 3050 static void 3051 ath5k_remove_interface(struct ieee80211_hw *hw, 3052 struct ieee80211_vif *vif) 3053 { 3054 struct ath5k_softc *sc = hw->priv; 3055 u8 mac[ETH_ALEN] = {}; 3056 3057 mutex_lock(&sc->lock); 3058 if (sc->vif != vif) 3059 goto end; 3060 3061 ath5k_hw_set_lladdr(sc->ah, mac); 3062 sc->vif = NULL; 3063 end: 3064 mutex_unlock(&sc->lock); 3065 } 3066 3067 /* 3068 * TODO: Phy disable/diversity etc 3069 */ 3070 static int 3071 ath5k_config(struct ieee80211_hw *hw, u32 changed) 3072 { 3073 struct ath5k_softc *sc = hw->priv; 3074 struct ath5k_hw *ah = sc->ah; 3075 struct ieee80211_conf *conf = &hw->conf; 3076 int ret = 0; 3077 3078 mutex_lock(&sc->lock); 3079 3080 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { 3081 ret = ath5k_chan_set(sc, conf->channel); 3082 if (ret < 0) 3083 goto unlock; 3084 } 3085 3086 if ((changed & IEEE80211_CONF_CHANGE_POWER) && 3087 (sc->power_level != conf->power_level)) { 3088 sc->power_level = conf->power_level; 3089 3090 /* Half dB steps */ 3091 ath5k_hw_set_txpower_limit(ah, (conf->power_level * 2)); 3092 } 3093 3094 /* TODO: 3095 * 1) Move this on config_interface and handle each case 3096 * separately eg. when we have only one STA vif, use 3097 * AR5K_ANTMODE_SINGLE_AP 3098 * 3099 * 2) Allow the user to change antenna mode eg. when only 3100 * one antenna is present 3101 * 3102 * 3) Allow the user to set default/tx antenna when possible 3103 * 3104 * 4) Default mode should handle 90% of the cases, together 3105 * with fixed a/b and single AP modes we should be able to 3106 * handle 99%. Sectored modes are extreme cases and i still 3107 * haven't found a usage for them. If we decide to support them, 3108 * then we must allow the user to set how many tx antennas we 3109 * have available 3110 */ 3111 ath5k_hw_set_antenna_mode(ah, ah->ah_ant_mode); 3112 3113 unlock: 3114 mutex_unlock(&sc->lock); 3115 return ret; 3116 } 3117 3118 static u64 ath5k_prepare_multicast(struct ieee80211_hw *hw, 3119 struct netdev_hw_addr_list *mc_list) 3120 { 3121 u32 mfilt[2], val; 3122 u8 pos; 3123 struct netdev_hw_addr *ha; 3124 3125 mfilt[0] = 0; 3126 mfilt[1] = 1; 3127 3128 netdev_hw_addr_list_for_each(ha, mc_list) { 3129 /* calculate XOR of eight 6-bit values */ 3130 val = get_unaligned_le32(ha->addr + 0); 3131 pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 3132 val = get_unaligned_le32(ha->addr + 3); 3133 pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 3134 pos &= 0x3f; 3135 mfilt[pos / 32] |= (1 << (pos % 32)); 3136 /* XXX: we might be able to just do this instead, 3137 * but not sure, needs testing, if we do use this we'd 3138 * neet to inform below to not reset the mcast */ 3139 /* ath5k_hw_set_mcast_filterindex(ah, 3140 * ha->addr[5]); */ 3141 } 3142 3143 return ((u64)(mfilt[1]) << 32) | mfilt[0]; 3144 } 3145 3146 #define SUPPORTED_FIF_FLAGS \ 3147 FIF_PROMISC_IN_BSS | FIF_ALLMULTI | FIF_FCSFAIL | \ 3148 FIF_PLCPFAIL | FIF_CONTROL | FIF_OTHER_BSS | \ 3149 FIF_BCN_PRBRESP_PROMISC 3150 /* 3151 * o always accept unicast, broadcast, and multicast traffic 3152 * o multicast traffic for all BSSIDs will be enabled if mac80211 3153 * says it should be 3154 * o maintain current state of phy ofdm or phy cck error reception. 3155 * If the hardware detects any of these type of errors then 3156 * ath5k_hw_get_rx_filter() will pass to us the respective 3157 * hardware filters to be able to receive these type of frames. 3158 * o probe request frames are accepted only when operating in 3159 * hostap, adhoc, or monitor modes 3160 * o enable promiscuous mode according to the interface state 3161 * o accept beacons: 3162 * - when operating in adhoc mode so the 802.11 layer creates 3163 * node table entries for peers, 3164 * - when operating in station mode for collecting rssi data when 3165 * the station is otherwise quiet, or 3166 * - when scanning 3167 */ 3168 static void ath5k_configure_filter(struct ieee80211_hw *hw, 3169 unsigned int changed_flags, 3170 unsigned int *new_flags, 3171 u64 multicast) 3172 { 3173 struct ath5k_softc *sc = hw->priv; 3174 struct ath5k_hw *ah = sc->ah; 3175 u32 mfilt[2], rfilt; 3176 3177 mutex_lock(&sc->lock); 3178 3179 mfilt[0] = multicast; 3180 mfilt[1] = multicast >> 32; 3181 3182 /* Only deal with supported flags */ 3183 changed_flags &= SUPPORTED_FIF_FLAGS; 3184 *new_flags &= SUPPORTED_FIF_FLAGS; 3185 3186 /* If HW detects any phy or radar errors, leave those filters on. 3187 * Also, always enable Unicast, Broadcasts and Multicast 3188 * XXX: move unicast, bssid broadcasts and multicast to mac80211 */ 3189 rfilt = (ath5k_hw_get_rx_filter(ah) & (AR5K_RX_FILTER_PHYERR)) | 3190 (AR5K_RX_FILTER_UCAST | AR5K_RX_FILTER_BCAST | 3191 AR5K_RX_FILTER_MCAST); 3192 3193 if (changed_flags & (FIF_PROMISC_IN_BSS | FIF_OTHER_BSS)) { 3194 if (*new_flags & FIF_PROMISC_IN_BSS) { 3195 __set_bit(ATH_STAT_PROMISC, sc->status); 3196 } else { 3197 __clear_bit(ATH_STAT_PROMISC, sc->status); 3198 } 3199 } 3200 3201 if (test_bit(ATH_STAT_PROMISC, sc->status)) 3202 rfilt |= AR5K_RX_FILTER_PROM; 3203 3204 /* Note, AR5K_RX_FILTER_MCAST is already enabled */ 3205 if (*new_flags & FIF_ALLMULTI) { 3206 mfilt[0] = ~0; 3207 mfilt[1] = ~0; 3208 } 3209 3210 /* This is the best we can do */ 3211 if (*new_flags & (FIF_FCSFAIL | FIF_PLCPFAIL)) 3212 rfilt |= AR5K_RX_FILTER_PHYERR; 3213 3214 /* FIF_BCN_PRBRESP_PROMISC really means to enable beacons 3215 * and probes for any BSSID, this needs testing */ 3216 if (*new_flags & FIF_BCN_PRBRESP_PROMISC) 3217 rfilt |= AR5K_RX_FILTER_BEACON | AR5K_RX_FILTER_PROBEREQ; 3218 3219 /* FIF_CONTROL doc says that if FIF_PROMISC_IN_BSS is not 3220 * set we should only pass on control frames for this 3221 * station. This needs testing. I believe right now this 3222 * enables *all* control frames, which is OK.. but 3223 * but we should see if we can improve on granularity */ 3224 if (*new_flags & FIF_CONTROL) 3225 rfilt |= AR5K_RX_FILTER_CONTROL; 3226 3227 /* Additional settings per mode -- this is per ath5k */ 3228 3229 /* XXX move these to mac80211, and add a beacon IFF flag to mac80211 */ 3230 3231 switch (sc->opmode) { 3232 case NL80211_IFTYPE_MESH_POINT: 3233 case NL80211_IFTYPE_MONITOR: 3234 rfilt |= AR5K_RX_FILTER_CONTROL | 3235 AR5K_RX_FILTER_BEACON | 3236 AR5K_RX_FILTER_PROBEREQ | 3237 AR5K_RX_FILTER_PROM; 3238 break; 3239 case NL80211_IFTYPE_AP: 3240 case NL80211_IFTYPE_ADHOC: 3241 rfilt |= AR5K_RX_FILTER_PROBEREQ | 3242 AR5K_RX_FILTER_BEACON; 3243 break; 3244 case NL80211_IFTYPE_STATION: 3245 if (sc->assoc) 3246 rfilt |= AR5K_RX_FILTER_BEACON; 3247 default: 3248 break; 3249 } 3250 3251 /* Set filters */ 3252 ath5k_hw_set_rx_filter(ah, rfilt); 3253 3254 /* Set multicast bits */ 3255 ath5k_hw_set_mcast_filter(ah, mfilt[0], mfilt[1]); 3256 /* Set the cached hw filter flags, this will alter actually 3257 * be set in HW */ 3258 sc->filter_flags = rfilt; 3259 3260 mutex_unlock(&sc->lock); 3261 } 3262 3263 static int 3264 ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, 3265 struct ieee80211_vif *vif, struct ieee80211_sta *sta, 3266 struct ieee80211_key_conf *key) 3267 { 3268 struct ath5k_softc *sc = hw->priv; 3269 struct ath5k_hw *ah = sc->ah; 3270 struct ath_common *common = ath5k_hw_common(ah); 3271 int ret = 0; 3272 3273 if (modparam_nohwcrypt) 3274 return -EOPNOTSUPP; 3275 3276 if (sc->opmode == NL80211_IFTYPE_AP) 3277 return -EOPNOTSUPP; 3278 3279 switch (key->alg) { 3280 case ALG_WEP: 3281 case ALG_TKIP: 3282 break; 3283 case ALG_CCMP: 3284 if (sc->ah->ah_aes_support) 3285 break; 3286 3287 return -EOPNOTSUPP; 3288 default: 3289 WARN_ON(1); 3290 return -EINVAL; 3291 } 3292 3293 mutex_lock(&sc->lock); 3294 3295 switch (cmd) { 3296 case SET_KEY: 3297 ret = ath5k_hw_set_key(sc->ah, key->keyidx, key, 3298 sta ? sta->addr : NULL); 3299 if (ret) { 3300 ATH5K_ERR(sc, "can't set the key\n"); 3301 goto unlock; 3302 } 3303 __set_bit(key->keyidx, common->keymap); 3304 key->hw_key_idx = key->keyidx; 3305 key->flags |= (IEEE80211_KEY_FLAG_GENERATE_IV | 3306 IEEE80211_KEY_FLAG_GENERATE_MMIC); 3307 break; 3308 case DISABLE_KEY: 3309 ath5k_hw_reset_key(sc->ah, key->keyidx); 3310 __clear_bit(key->keyidx, common->keymap); 3311 break; 3312 default: 3313 ret = -EINVAL; 3314 goto unlock; 3315 } 3316 3317 unlock: 3318 mmiowb(); 3319 mutex_unlock(&sc->lock); 3320 return ret; 3321 } 3322 3323 static int 3324 ath5k_get_stats(struct ieee80211_hw *hw, 3325 struct ieee80211_low_level_stats *stats) 3326 { 3327 struct ath5k_softc *sc = hw->priv; 3328 3329 /* Force update */ 3330 ath5k_hw_update_mib_counters(sc->ah); 3331 3332 stats->dot11ACKFailureCount = sc->stats.ack_fail; 3333 stats->dot11RTSFailureCount = sc->stats.rts_fail; 3334 stats->dot11RTSSuccessCount = sc->stats.rts_ok; 3335 stats->dot11FCSErrorCount = sc->stats.fcs_error; 3336 3337 return 0; 3338 } 3339 3340 static int ath5k_get_survey(struct ieee80211_hw *hw, int idx, 3341 struct survey_info *survey) 3342 { 3343 struct ath5k_softc *sc = hw->priv; 3344 struct ieee80211_conf *conf = &hw->conf; 3345 3346 if (idx != 0) 3347 return -ENOENT; 3348 3349 survey->channel = conf->channel; 3350 survey->filled = SURVEY_INFO_NOISE_DBM; 3351 survey->noise = sc->ah->ah_noise_floor; 3352 3353 return 0; 3354 } 3355 3356 static u64 3357 ath5k_get_tsf(struct ieee80211_hw *hw) 3358 { 3359 struct ath5k_softc *sc = hw->priv; 3360 3361 return ath5k_hw_get_tsf64(sc->ah); 3362 } 3363 3364 static void 3365 ath5k_set_tsf(struct ieee80211_hw *hw, u64 tsf) 3366 { 3367 struct ath5k_softc *sc = hw->priv; 3368 3369 ath5k_hw_set_tsf64(sc->ah, tsf); 3370 } 3371 3372 static void 3373 ath5k_reset_tsf(struct ieee80211_hw *hw) 3374 { 3375 struct ath5k_softc *sc = hw->priv; 3376 3377 /* 3378 * in IBSS mode we need to update the beacon timers too. 3379 * this will also reset the TSF if we call it with 0 3380 */ 3381 if (sc->opmode == NL80211_IFTYPE_ADHOC) 3382 ath5k_beacon_update_timers(sc, 0); 3383 else 3384 ath5k_hw_reset_tsf(sc->ah); 3385 } 3386 3387 /* 3388 * Updates the beacon that is sent by ath5k_beacon_send. For adhoc, 3389 * this is called only once at config_bss time, for AP we do it every 3390 * SWBA interrupt so that the TIM will reflect buffered frames. 3391 * 3392 * Called with the beacon lock. 3393 */ 3394 static int 3395 ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif) 3396 { 3397 int ret; 3398 struct ath5k_softc *sc = hw->priv; 3399 struct sk_buff *skb; 3400 3401 if (WARN_ON(!vif)) { 3402 ret = -EINVAL; 3403 goto out; 3404 } 3405 3406 skb = ieee80211_beacon_get(hw, vif); 3407 3408 if (!skb) { 3409 ret = -ENOMEM; 3410 goto out; 3411 } 3412 3413 ath5k_debug_dump_skb(sc, skb, "BC ", 1); 3414 3415 ath5k_txbuf_free_skb(sc, sc->bbuf); 3416 sc->bbuf->skb = skb; 3417 ret = ath5k_beacon_setup(sc, sc->bbuf); 3418 if (ret) 3419 sc->bbuf->skb = NULL; 3420 out: 3421 return ret; 3422 } 3423 3424 static void 3425 set_beacon_filter(struct ieee80211_hw *hw, bool enable) 3426 { 3427 struct ath5k_softc *sc = hw->priv; 3428 struct ath5k_hw *ah = sc->ah; 3429 u32 rfilt; 3430 rfilt = ath5k_hw_get_rx_filter(ah); 3431 if (enable) 3432 rfilt |= AR5K_RX_FILTER_BEACON; 3433 else 3434 rfilt &= ~AR5K_RX_FILTER_BEACON; 3435 ath5k_hw_set_rx_filter(ah, rfilt); 3436 sc->filter_flags = rfilt; 3437 } 3438 3439 static void ath5k_bss_info_changed(struct ieee80211_hw *hw, 3440 struct ieee80211_vif *vif, 3441 struct ieee80211_bss_conf *bss_conf, 3442 u32 changes) 3443 { 3444 struct ath5k_softc *sc = hw->priv; 3445 struct ath5k_hw *ah = sc->ah; 3446 struct ath_common *common = ath5k_hw_common(ah); 3447 unsigned long flags; 3448 3449 mutex_lock(&sc->lock); 3450 if (WARN_ON(sc->vif != vif)) 3451 goto unlock; 3452 3453 if (changes & BSS_CHANGED_BSSID) { 3454 /* Cache for later use during resets */ 3455 memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN); 3456 common->curaid = 0; 3457 ath5k_hw_set_associd(ah); 3458 mmiowb(); 3459 } 3460 3461 if (changes & BSS_CHANGED_BEACON_INT) 3462 sc->bintval = bss_conf->beacon_int; 3463 3464 if (changes & BSS_CHANGED_ASSOC) { 3465 sc->assoc = bss_conf->assoc; 3466 if (sc->opmode == NL80211_IFTYPE_STATION) 3467 set_beacon_filter(hw, sc->assoc); 3468 ath5k_hw_set_ledstate(sc->ah, sc->assoc ? 3469 AR5K_LED_ASSOC : AR5K_LED_INIT); 3470 if (bss_conf->assoc) { 3471 ATH5K_DBG(sc, ATH5K_DEBUG_ANY, 3472 "Bss Info ASSOC %d, bssid: %pM\n", 3473 bss_conf->aid, common->curbssid); 3474 common->curaid = bss_conf->aid; 3475 ath5k_hw_set_associd(ah); 3476 /* Once ANI is available you would start it here */ 3477 } 3478 } 3479 3480 if (changes & BSS_CHANGED_BEACON) { 3481 spin_lock_irqsave(&sc->block, flags); 3482 ath5k_beacon_update(hw, vif); 3483 spin_unlock_irqrestore(&sc->block, flags); 3484 } 3485 3486 if (changes & BSS_CHANGED_BEACON_ENABLED) 3487 sc->enable_beacon = bss_conf->enable_beacon; 3488 3489 if (changes & (BSS_CHANGED_BEACON | BSS_CHANGED_BEACON_ENABLED | 3490 BSS_CHANGED_BEACON_INT)) 3491 ath5k_beacon_config(sc); 3492 3493 unlock: 3494 mutex_unlock(&sc->lock); 3495 } 3496 3497 static void ath5k_sw_scan_start(struct ieee80211_hw *hw) 3498 { 3499 struct ath5k_softc *sc = hw->priv; 3500 if (!sc->assoc) 3501 ath5k_hw_set_ledstate(sc->ah, AR5K_LED_SCAN); 3502 } 3503 3504 static void ath5k_sw_scan_complete(struct ieee80211_hw *hw) 3505 { 3506 struct ath5k_softc *sc = hw->priv; 3507 ath5k_hw_set_ledstate(sc->ah, sc->assoc ? 3508 AR5K_LED_ASSOC : AR5K_LED_INIT); 3509 } 3510 3511 /** 3512 * ath5k_set_coverage_class - Set IEEE 802.11 coverage class 3513 * 3514 * @hw: struct ieee80211_hw pointer 3515 * @coverage_class: IEEE 802.11 coverage class number 3516 * 3517 * Mac80211 callback. Sets slot time, ACK timeout and CTS timeout for given 3518 * coverage class. The values are persistent, they are restored after device 3519 * reset. 3520 */ 3521 static void ath5k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class) 3522 { 3523 struct ath5k_softc *sc = hw->priv; 3524 3525 mutex_lock(&sc->lock); 3526 ath5k_hw_set_coverage_class(sc->ah, coverage_class); 3527 mutex_unlock(&sc->lock); 3528 } 3529