1 /* 2 * Copyright (c) 2008-2011 Atheros Communications Inc. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 18 19 #include <linux/dma-mapping.h> 20 #include <linux/slab.h> 21 #include <linux/ath9k_platform.h> 22 #include <linux/module.h> 23 24 #include "ath9k.h" 25 26 struct ath9k_eeprom_ctx { 27 struct completion complete; 28 struct ath_hw *ah; 29 }; 30 31 static char *dev_info = "ath9k"; 32 33 MODULE_AUTHOR("Atheros Communications"); 34 MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards."); 35 MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards"); 36 MODULE_LICENSE("Dual BSD/GPL"); 37 38 static unsigned int ath9k_debug = ATH_DBG_DEFAULT; 39 module_param_named(debug, ath9k_debug, uint, 0); 40 MODULE_PARM_DESC(debug, "Debugging mask"); 41 42 int ath9k_modparam_nohwcrypt; 43 module_param_named(nohwcrypt, ath9k_modparam_nohwcrypt, int, 0444); 44 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption"); 45 46 int led_blink; 47 module_param_named(blink, led_blink, int, 0444); 48 MODULE_PARM_DESC(blink, "Enable LED blink on activity"); 49 50 static int ath9k_btcoex_enable; 51 module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444); 52 MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence"); 53 54 static int ath9k_enable_diversity; 55 module_param_named(enable_diversity, ath9k_enable_diversity, int, 0444); 56 MODULE_PARM_DESC(enable_diversity, "Enable Antenna diversity for AR9565"); 57 58 bool is_ath9k_unloaded; 59 /* We use the hw_value as an index into our private channel structure */ 60 61 #define CHAN2G(_freq, _idx) { \ 62 .band = IEEE80211_BAND_2GHZ, \ 63 .center_freq = (_freq), \ 64 .hw_value = (_idx), \ 65 .max_power = 20, \ 66 } 67 68 #define CHAN5G(_freq, _idx) { \ 69 .band = IEEE80211_BAND_5GHZ, \ 70 .center_freq = (_freq), \ 71 .hw_value = (_idx), \ 72 .max_power = 20, \ 73 } 74 75 /* Some 2 GHz radios are actually tunable on 2312-2732 76 * on 5 MHz steps, we support the channels which we know 77 * we have calibration data for all cards though to make 78 * this static */ 79 static const struct ieee80211_channel ath9k_2ghz_chantable[] = { 80 CHAN2G(2412, 0), /* Channel 1 */ 81 CHAN2G(2417, 1), /* Channel 2 */ 82 CHAN2G(2422, 2), /* Channel 3 */ 83 CHAN2G(2427, 3), /* Channel 4 */ 84 CHAN2G(2432, 4), /* Channel 5 */ 85 CHAN2G(2437, 5), /* Channel 6 */ 86 CHAN2G(2442, 6), /* Channel 7 */ 87 CHAN2G(2447, 7), /* Channel 8 */ 88 CHAN2G(2452, 8), /* Channel 9 */ 89 CHAN2G(2457, 9), /* Channel 10 */ 90 CHAN2G(2462, 10), /* Channel 11 */ 91 CHAN2G(2467, 11), /* Channel 12 */ 92 CHAN2G(2472, 12), /* Channel 13 */ 93 CHAN2G(2484, 13), /* Channel 14 */ 94 }; 95 96 /* Some 5 GHz radios are actually tunable on XXXX-YYYY 97 * on 5 MHz steps, we support the channels which we know 98 * we have calibration data for all cards though to make 99 * this static */ 100 static const struct ieee80211_channel ath9k_5ghz_chantable[] = { 101 /* _We_ call this UNII 1 */ 102 CHAN5G(5180, 14), /* Channel 36 */ 103 CHAN5G(5200, 15), /* Channel 40 */ 104 CHAN5G(5220, 16), /* Channel 44 */ 105 CHAN5G(5240, 17), /* Channel 48 */ 106 /* _We_ call this UNII 2 */ 107 CHAN5G(5260, 18), /* Channel 52 */ 108 CHAN5G(5280, 19), /* Channel 56 */ 109 CHAN5G(5300, 20), /* Channel 60 */ 110 CHAN5G(5320, 21), /* Channel 64 */ 111 /* _We_ call this "Middle band" */ 112 CHAN5G(5500, 22), /* Channel 100 */ 113 CHAN5G(5520, 23), /* Channel 104 */ 114 CHAN5G(5540, 24), /* Channel 108 */ 115 CHAN5G(5560, 25), /* Channel 112 */ 116 CHAN5G(5580, 26), /* Channel 116 */ 117 CHAN5G(5600, 27), /* Channel 120 */ 118 CHAN5G(5620, 28), /* Channel 124 */ 119 CHAN5G(5640, 29), /* Channel 128 */ 120 CHAN5G(5660, 30), /* Channel 132 */ 121 CHAN5G(5680, 31), /* Channel 136 */ 122 CHAN5G(5700, 32), /* Channel 140 */ 123 /* _We_ call this UNII 3 */ 124 CHAN5G(5745, 33), /* Channel 149 */ 125 CHAN5G(5765, 34), /* Channel 153 */ 126 CHAN5G(5785, 35), /* Channel 157 */ 127 CHAN5G(5805, 36), /* Channel 161 */ 128 CHAN5G(5825, 37), /* Channel 165 */ 129 }; 130 131 /* Atheros hardware rate code addition for short premble */ 132 #define SHPCHECK(__hw_rate, __flags) \ 133 ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0) 134 135 #define RATE(_bitrate, _hw_rate, _flags) { \ 136 .bitrate = (_bitrate), \ 137 .flags = (_flags), \ 138 .hw_value = (_hw_rate), \ 139 .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \ 140 } 141 142 static struct ieee80211_rate ath9k_legacy_rates[] = { 143 RATE(10, 0x1b, 0), 144 RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE), 145 RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE), 146 RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE), 147 RATE(60, 0x0b, 0), 148 RATE(90, 0x0f, 0), 149 RATE(120, 0x0a, 0), 150 RATE(180, 0x0e, 0), 151 RATE(240, 0x09, 0), 152 RATE(360, 0x0d, 0), 153 RATE(480, 0x08, 0), 154 RATE(540, 0x0c, 0), 155 }; 156 157 #ifdef CONFIG_MAC80211_LEDS 158 static const struct ieee80211_tpt_blink ath9k_tpt_blink[] = { 159 { .throughput = 0 * 1024, .blink_time = 334 }, 160 { .throughput = 1 * 1024, .blink_time = 260 }, 161 { .throughput = 5 * 1024, .blink_time = 220 }, 162 { .throughput = 10 * 1024, .blink_time = 190 }, 163 { .throughput = 20 * 1024, .blink_time = 170 }, 164 { .throughput = 50 * 1024, .blink_time = 150 }, 165 { .throughput = 70 * 1024, .blink_time = 130 }, 166 { .throughput = 100 * 1024, .blink_time = 110 }, 167 { .throughput = 200 * 1024, .blink_time = 80 }, 168 { .throughput = 300 * 1024, .blink_time = 50 }, 169 }; 170 #endif 171 172 static void ath9k_deinit_softc(struct ath_softc *sc); 173 174 /* 175 * Read and write, they both share the same lock. We do this to serialize 176 * reads and writes on Atheros 802.11n PCI devices only. This is required 177 * as the FIFO on these devices can only accept sanely 2 requests. 178 */ 179 180 static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset) 181 { 182 struct ath_hw *ah = (struct ath_hw *) hw_priv; 183 struct ath_common *common = ath9k_hw_common(ah); 184 struct ath_softc *sc = (struct ath_softc *) common->priv; 185 186 if (NR_CPUS > 1 && ah->config.serialize_regmode == SER_REG_MODE_ON) { 187 unsigned long flags; 188 spin_lock_irqsave(&sc->sc_serial_rw, flags); 189 iowrite32(val, sc->mem + reg_offset); 190 spin_unlock_irqrestore(&sc->sc_serial_rw, flags); 191 } else 192 iowrite32(val, sc->mem + reg_offset); 193 } 194 195 static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset) 196 { 197 struct ath_hw *ah = (struct ath_hw *) hw_priv; 198 struct ath_common *common = ath9k_hw_common(ah); 199 struct ath_softc *sc = (struct ath_softc *) common->priv; 200 u32 val; 201 202 if (NR_CPUS > 1 && ah->config.serialize_regmode == SER_REG_MODE_ON) { 203 unsigned long flags; 204 spin_lock_irqsave(&sc->sc_serial_rw, flags); 205 val = ioread32(sc->mem + reg_offset); 206 spin_unlock_irqrestore(&sc->sc_serial_rw, flags); 207 } else 208 val = ioread32(sc->mem + reg_offset); 209 return val; 210 } 211 212 static unsigned int __ath9k_reg_rmw(struct ath_softc *sc, u32 reg_offset, 213 u32 set, u32 clr) 214 { 215 u32 val; 216 217 val = ioread32(sc->mem + reg_offset); 218 val &= ~clr; 219 val |= set; 220 iowrite32(val, sc->mem + reg_offset); 221 222 return val; 223 } 224 225 static unsigned int ath9k_reg_rmw(void *hw_priv, u32 reg_offset, u32 set, u32 clr) 226 { 227 struct ath_hw *ah = (struct ath_hw *) hw_priv; 228 struct ath_common *common = ath9k_hw_common(ah); 229 struct ath_softc *sc = (struct ath_softc *) common->priv; 230 unsigned long uninitialized_var(flags); 231 u32 val; 232 233 if (NR_CPUS > 1 && ah->config.serialize_regmode == SER_REG_MODE_ON) { 234 spin_lock_irqsave(&sc->sc_serial_rw, flags); 235 val = __ath9k_reg_rmw(sc, reg_offset, set, clr); 236 spin_unlock_irqrestore(&sc->sc_serial_rw, flags); 237 } else 238 val = __ath9k_reg_rmw(sc, reg_offset, set, clr); 239 240 return val; 241 } 242 243 /**************************/ 244 /* Initialization */ 245 /**************************/ 246 247 static void setup_ht_cap(struct ath_softc *sc, 248 struct ieee80211_sta_ht_cap *ht_info) 249 { 250 struct ath_hw *ah = sc->sc_ah; 251 struct ath_common *common = ath9k_hw_common(ah); 252 u8 tx_streams, rx_streams; 253 int i, max_streams; 254 255 ht_info->ht_supported = true; 256 ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 | 257 IEEE80211_HT_CAP_SM_PS | 258 IEEE80211_HT_CAP_SGI_40 | 259 IEEE80211_HT_CAP_DSSSCCK40; 260 261 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_LDPC) 262 ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING; 263 264 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20) 265 ht_info->cap |= IEEE80211_HT_CAP_SGI_20; 266 267 ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; 268 ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8; 269 270 if (AR_SREV_9330(ah) || AR_SREV_9485(ah) || AR_SREV_9565(ah)) 271 max_streams = 1; 272 else if (AR_SREV_9462(ah)) 273 max_streams = 2; 274 else if (AR_SREV_9300_20_OR_LATER(ah)) 275 max_streams = 3; 276 else 277 max_streams = 2; 278 279 if (AR_SREV_9280_20_OR_LATER(ah)) { 280 if (max_streams >= 2) 281 ht_info->cap |= IEEE80211_HT_CAP_TX_STBC; 282 ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT); 283 } 284 285 /* set up supported mcs set */ 286 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs)); 287 tx_streams = ath9k_cmn_count_streams(ah->txchainmask, max_streams); 288 rx_streams = ath9k_cmn_count_streams(ah->rxchainmask, max_streams); 289 290 ath_dbg(common, CONFIG, "TX streams %d, RX streams: %d\n", 291 tx_streams, rx_streams); 292 293 if (tx_streams != rx_streams) { 294 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF; 295 ht_info->mcs.tx_params |= ((tx_streams - 1) << 296 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT); 297 } 298 299 for (i = 0; i < rx_streams; i++) 300 ht_info->mcs.rx_mask[i] = 0xff; 301 302 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED; 303 } 304 305 static int ath9k_reg_notifier(struct wiphy *wiphy, 306 struct regulatory_request *request) 307 { 308 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); 309 struct ath_softc *sc = hw->priv; 310 struct ath_hw *ah = sc->sc_ah; 311 struct ath_regulatory *reg = ath9k_hw_regulatory(ah); 312 int ret; 313 314 ret = ath_reg_notifier_apply(wiphy, request, reg); 315 316 /* Set tx power */ 317 if (ah->curchan) { 318 sc->config.txpowlimit = 2 * ah->curchan->chan->max_power; 319 ath9k_ps_wakeup(sc); 320 ath9k_hw_set_txpowerlimit(ah, sc->config.txpowlimit, false); 321 sc->curtxpow = ath9k_hw_regulatory(ah)->power_limit; 322 ath9k_ps_restore(sc); 323 } 324 325 return ret; 326 } 327 328 /* 329 * This function will allocate both the DMA descriptor structure, and the 330 * buffers it contains. These are used to contain the descriptors used 331 * by the system. 332 */ 333 int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd, 334 struct list_head *head, const char *name, 335 int nbuf, int ndesc, bool is_tx) 336 { 337 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 338 u8 *ds; 339 struct ath_buf *bf; 340 int i, bsize, error, desc_len; 341 342 ath_dbg(common, CONFIG, "%s DMA: %u buffers %u desc/buf\n", 343 name, nbuf, ndesc); 344 345 INIT_LIST_HEAD(head); 346 347 if (is_tx) 348 desc_len = sc->sc_ah->caps.tx_desc_len; 349 else 350 desc_len = sizeof(struct ath_desc); 351 352 /* ath_desc must be a multiple of DWORDs */ 353 if ((desc_len % 4) != 0) { 354 ath_err(common, "ath_desc not DWORD aligned\n"); 355 BUG_ON((desc_len % 4) != 0); 356 error = -ENOMEM; 357 goto fail; 358 } 359 360 dd->dd_desc_len = desc_len * nbuf * ndesc; 361 362 /* 363 * Need additional DMA memory because we can't use 364 * descriptors that cross the 4K page boundary. Assume 365 * one skipped descriptor per 4K page. 366 */ 367 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) { 368 u32 ndesc_skipped = 369 ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len); 370 u32 dma_len; 371 372 while (ndesc_skipped) { 373 dma_len = ndesc_skipped * desc_len; 374 dd->dd_desc_len += dma_len; 375 376 ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len); 377 } 378 } 379 380 /* allocate descriptors */ 381 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len, 382 &dd->dd_desc_paddr, GFP_KERNEL); 383 if (dd->dd_desc == NULL) { 384 error = -ENOMEM; 385 goto fail; 386 } 387 ds = (u8 *) dd->dd_desc; 388 ath_dbg(common, CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n", 389 name, ds, (u32) dd->dd_desc_len, 390 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len); 391 392 /* allocate buffers */ 393 bsize = sizeof(struct ath_buf) * nbuf; 394 bf = kzalloc(bsize, GFP_KERNEL); 395 if (bf == NULL) { 396 error = -ENOMEM; 397 goto fail2; 398 } 399 dd->dd_bufptr = bf; 400 401 for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) { 402 bf->bf_desc = ds; 403 bf->bf_daddr = DS2PHYS(dd, ds); 404 405 if (!(sc->sc_ah->caps.hw_caps & 406 ATH9K_HW_CAP_4KB_SPLITTRANS)) { 407 /* 408 * Skip descriptor addresses which can cause 4KB 409 * boundary crossing (addr + length) with a 32 dword 410 * descriptor fetch. 411 */ 412 while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) { 413 BUG_ON((caddr_t) bf->bf_desc >= 414 ((caddr_t) dd->dd_desc + 415 dd->dd_desc_len)); 416 417 ds += (desc_len * ndesc); 418 bf->bf_desc = ds; 419 bf->bf_daddr = DS2PHYS(dd, ds); 420 } 421 } 422 list_add_tail(&bf->list, head); 423 } 424 return 0; 425 fail2: 426 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc, 427 dd->dd_desc_paddr); 428 fail: 429 memset(dd, 0, sizeof(*dd)); 430 return error; 431 } 432 433 static int ath9k_init_queues(struct ath_softc *sc) 434 { 435 int i = 0; 436 437 sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah); 438 sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0); 439 440 sc->config.cabqReadytime = ATH_CABQ_READY_TIME; 441 ath_cabq_update(sc); 442 443 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 444 sc->tx.txq_map[i] = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, i); 445 sc->tx.txq_map[i]->mac80211_qnum = i; 446 sc->tx.txq_max_pending[i] = ATH_MAX_QDEPTH; 447 } 448 return 0; 449 } 450 451 static int ath9k_init_channels_rates(struct ath_softc *sc) 452 { 453 void *channels; 454 455 BUILD_BUG_ON(ARRAY_SIZE(ath9k_2ghz_chantable) + 456 ARRAY_SIZE(ath9k_5ghz_chantable) != 457 ATH9K_NUM_CHANNELS); 458 459 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) { 460 channels = kmemdup(ath9k_2ghz_chantable, 461 sizeof(ath9k_2ghz_chantable), GFP_KERNEL); 462 if (!channels) 463 return -ENOMEM; 464 465 sc->sbands[IEEE80211_BAND_2GHZ].channels = channels; 466 sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ; 467 sc->sbands[IEEE80211_BAND_2GHZ].n_channels = 468 ARRAY_SIZE(ath9k_2ghz_chantable); 469 sc->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates; 470 sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates = 471 ARRAY_SIZE(ath9k_legacy_rates); 472 } 473 474 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) { 475 channels = kmemdup(ath9k_5ghz_chantable, 476 sizeof(ath9k_5ghz_chantable), GFP_KERNEL); 477 if (!channels) { 478 if (sc->sbands[IEEE80211_BAND_2GHZ].channels) 479 kfree(sc->sbands[IEEE80211_BAND_2GHZ].channels); 480 return -ENOMEM; 481 } 482 483 sc->sbands[IEEE80211_BAND_5GHZ].channels = channels; 484 sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ; 485 sc->sbands[IEEE80211_BAND_5GHZ].n_channels = 486 ARRAY_SIZE(ath9k_5ghz_chantable); 487 sc->sbands[IEEE80211_BAND_5GHZ].bitrates = 488 ath9k_legacy_rates + 4; 489 sc->sbands[IEEE80211_BAND_5GHZ].n_bitrates = 490 ARRAY_SIZE(ath9k_legacy_rates) - 4; 491 } 492 return 0; 493 } 494 495 static void ath9k_init_misc(struct ath_softc *sc) 496 { 497 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 498 int i = 0; 499 500 setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc); 501 502 sc->last_rssi = ATH_RSSI_DUMMY_MARKER; 503 sc->config.txpowlimit = ATH_TXPOWER_MAX; 504 memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN); 505 sc->beacon.slottime = ATH9K_SLOT_TIME_9; 506 507 for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) 508 sc->beacon.bslot[i] = NULL; 509 510 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) 511 sc->ant_comb.count = ATH_ANT_DIV_COMB_INIT_COUNT; 512 } 513 514 static void ath9k_eeprom_request_cb(const struct firmware *eeprom_blob, 515 void *ctx) 516 { 517 struct ath9k_eeprom_ctx *ec = ctx; 518 519 if (eeprom_blob) 520 ec->ah->eeprom_blob = eeprom_blob; 521 522 complete(&ec->complete); 523 } 524 525 static int ath9k_eeprom_request(struct ath_softc *sc, const char *name) 526 { 527 struct ath9k_eeprom_ctx ec; 528 struct ath_hw *ah = ah = sc->sc_ah; 529 int err; 530 531 /* try to load the EEPROM content asynchronously */ 532 init_completion(&ec.complete); 533 ec.ah = sc->sc_ah; 534 535 err = request_firmware_nowait(THIS_MODULE, 1, name, sc->dev, GFP_KERNEL, 536 &ec, ath9k_eeprom_request_cb); 537 if (err < 0) { 538 ath_err(ath9k_hw_common(ah), 539 "EEPROM request failed\n"); 540 return err; 541 } 542 543 wait_for_completion(&ec.complete); 544 545 if (!ah->eeprom_blob) { 546 ath_err(ath9k_hw_common(ah), 547 "Unable to load EEPROM file %s\n", name); 548 return -EINVAL; 549 } 550 551 return 0; 552 } 553 554 static void ath9k_eeprom_release(struct ath_softc *sc) 555 { 556 release_firmware(sc->sc_ah->eeprom_blob); 557 } 558 559 static int ath9k_init_softc(u16 devid, struct ath_softc *sc, 560 const struct ath_bus_ops *bus_ops) 561 { 562 struct ath9k_platform_data *pdata = sc->dev->platform_data; 563 struct ath_hw *ah = NULL; 564 struct ath_common *common; 565 int ret = 0, i; 566 int csz = 0; 567 568 ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL); 569 if (!ah) 570 return -ENOMEM; 571 572 ah->hw = sc->hw; 573 ah->hw_version.devid = devid; 574 ah->reg_ops.read = ath9k_ioread32; 575 ah->reg_ops.write = ath9k_iowrite32; 576 ah->reg_ops.rmw = ath9k_reg_rmw; 577 atomic_set(&ah->intr_ref_cnt, -1); 578 sc->sc_ah = ah; 579 580 sc->dfs_detector = dfs_pattern_detector_init(NL80211_DFS_UNSET); 581 582 if (!pdata) { 583 ah->ah_flags |= AH_USE_EEPROM; 584 sc->sc_ah->led_pin = -1; 585 } else { 586 sc->sc_ah->gpio_mask = pdata->gpio_mask; 587 sc->sc_ah->gpio_val = pdata->gpio_val; 588 sc->sc_ah->led_pin = pdata->led_pin; 589 ah->is_clk_25mhz = pdata->is_clk_25mhz; 590 ah->get_mac_revision = pdata->get_mac_revision; 591 ah->external_reset = pdata->external_reset; 592 } 593 594 common = ath9k_hw_common(ah); 595 common->ops = &ah->reg_ops; 596 common->bus_ops = bus_ops; 597 common->ah = ah; 598 common->hw = sc->hw; 599 common->priv = sc; 600 common->debug_mask = ath9k_debug; 601 common->btcoex_enabled = ath9k_btcoex_enable == 1; 602 common->disable_ani = false; 603 604 /* 605 * Enable Antenna diversity only when BTCOEX is disabled 606 * and the user manually requests the feature. 607 */ 608 if (!common->btcoex_enabled && ath9k_enable_diversity) 609 common->antenna_diversity = 1; 610 611 spin_lock_init(&common->cc_lock); 612 613 spin_lock_init(&sc->sc_serial_rw); 614 spin_lock_init(&sc->sc_pm_lock); 615 mutex_init(&sc->mutex); 616 #ifdef CONFIG_ATH9K_MAC_DEBUG 617 spin_lock_init(&sc->debug.samp_lock); 618 #endif 619 tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc); 620 tasklet_init(&sc->bcon_tasklet, ath9k_beacon_tasklet, 621 (unsigned long)sc); 622 623 INIT_WORK(&sc->hw_reset_work, ath_reset_work); 624 INIT_WORK(&sc->hw_check_work, ath_hw_check); 625 INIT_WORK(&sc->paprd_work, ath_paprd_calibrate); 626 INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work); 627 setup_timer(&sc->rx_poll_timer, ath_rx_poll, (unsigned long)sc); 628 629 /* 630 * Cache line size is used to size and align various 631 * structures used to communicate with the hardware. 632 */ 633 ath_read_cachesize(common, &csz); 634 common->cachelsz = csz << 2; /* convert to bytes */ 635 636 if (pdata && pdata->eeprom_name) { 637 ret = ath9k_eeprom_request(sc, pdata->eeprom_name); 638 if (ret) 639 goto err_eeprom; 640 } 641 642 /* Initializes the hardware for all supported chipsets */ 643 ret = ath9k_hw_init(ah); 644 if (ret) 645 goto err_hw; 646 647 if (pdata && pdata->macaddr) 648 memcpy(common->macaddr, pdata->macaddr, ETH_ALEN); 649 650 ret = ath9k_init_queues(sc); 651 if (ret) 652 goto err_queues; 653 654 ret = ath9k_init_btcoex(sc); 655 if (ret) 656 goto err_btcoex; 657 658 ret = ath9k_init_channels_rates(sc); 659 if (ret) 660 goto err_btcoex; 661 662 ath9k_cmn_init_crypto(sc->sc_ah); 663 ath9k_init_misc(sc); 664 ath_fill_led_pin(sc); 665 666 if (common->bus_ops->aspm_init) 667 common->bus_ops->aspm_init(common); 668 669 return 0; 670 671 err_btcoex: 672 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) 673 if (ATH_TXQ_SETUP(sc, i)) 674 ath_tx_cleanupq(sc, &sc->tx.txq[i]); 675 err_queues: 676 ath9k_hw_deinit(ah); 677 err_hw: 678 ath9k_eeprom_release(sc); 679 err_eeprom: 680 kfree(ah); 681 sc->sc_ah = NULL; 682 683 return ret; 684 } 685 686 static void ath9k_init_band_txpower(struct ath_softc *sc, int band) 687 { 688 struct ieee80211_supported_band *sband; 689 struct ieee80211_channel *chan; 690 struct ath_hw *ah = sc->sc_ah; 691 int i; 692 693 sband = &sc->sbands[band]; 694 for (i = 0; i < sband->n_channels; i++) { 695 chan = &sband->channels[i]; 696 ah->curchan = &ah->channels[chan->hw_value]; 697 ath9k_cmn_update_ichannel(ah->curchan, chan, NL80211_CHAN_HT20); 698 ath9k_hw_set_txpowerlimit(ah, MAX_RATE_POWER, true); 699 } 700 } 701 702 static void ath9k_init_txpower_limits(struct ath_softc *sc) 703 { 704 struct ath_hw *ah = sc->sc_ah; 705 struct ath9k_channel *curchan = ah->curchan; 706 707 if (ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) 708 ath9k_init_band_txpower(sc, IEEE80211_BAND_2GHZ); 709 if (ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) 710 ath9k_init_band_txpower(sc, IEEE80211_BAND_5GHZ); 711 712 ah->curchan = curchan; 713 } 714 715 void ath9k_reload_chainmask_settings(struct ath_softc *sc) 716 { 717 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)) 718 return; 719 720 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) 721 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap); 722 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) 723 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap); 724 } 725 726 static const struct ieee80211_iface_limit if_limits[] = { 727 { .max = 2048, .types = BIT(NL80211_IFTYPE_STATION) | 728 BIT(NL80211_IFTYPE_P2P_CLIENT) | 729 BIT(NL80211_IFTYPE_WDS) }, 730 { .max = 8, .types = 731 #ifdef CONFIG_MAC80211_MESH 732 BIT(NL80211_IFTYPE_MESH_POINT) | 733 #endif 734 BIT(NL80211_IFTYPE_AP) | 735 BIT(NL80211_IFTYPE_P2P_GO) }, 736 }; 737 738 static const struct ieee80211_iface_combination if_comb = { 739 .limits = if_limits, 740 .n_limits = ARRAY_SIZE(if_limits), 741 .max_interfaces = 2048, 742 .num_different_channels = 1, 743 .beacon_int_infra_match = true, 744 }; 745 746 void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) 747 { 748 struct ath_hw *ah = sc->sc_ah; 749 struct ath_common *common = ath9k_hw_common(ah); 750 751 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS | 752 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | 753 IEEE80211_HW_SIGNAL_DBM | 754 IEEE80211_HW_SUPPORTS_PS | 755 IEEE80211_HW_PS_NULLFUNC_STACK | 756 IEEE80211_HW_SPECTRUM_MGMT | 757 IEEE80211_HW_REPORTS_TX_ACK_STATUS; 758 759 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) 760 hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION; 761 762 if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || ath9k_modparam_nohwcrypt) 763 hw->flags |= IEEE80211_HW_MFP_CAPABLE; 764 765 hw->wiphy->interface_modes = 766 BIT(NL80211_IFTYPE_P2P_GO) | 767 BIT(NL80211_IFTYPE_P2P_CLIENT) | 768 BIT(NL80211_IFTYPE_AP) | 769 BIT(NL80211_IFTYPE_WDS) | 770 BIT(NL80211_IFTYPE_STATION) | 771 BIT(NL80211_IFTYPE_ADHOC) | 772 BIT(NL80211_IFTYPE_MESH_POINT); 773 774 hw->wiphy->iface_combinations = &if_comb; 775 hw->wiphy->n_iface_combinations = 1; 776 777 if (AR_SREV_5416(sc->sc_ah)) 778 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; 779 780 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; 781 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; 782 hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; 783 784 #ifdef CONFIG_PM_SLEEP 785 786 if ((ah->caps.hw_caps & ATH9K_HW_WOW_DEVICE_CAPABLE) && 787 device_can_wakeup(sc->dev)) { 788 789 hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT | 790 WIPHY_WOWLAN_DISCONNECT; 791 hw->wiphy->wowlan.n_patterns = MAX_NUM_USER_PATTERN; 792 hw->wiphy->wowlan.pattern_min_len = 1; 793 hw->wiphy->wowlan.pattern_max_len = MAX_PATTERN_SIZE; 794 795 } 796 797 atomic_set(&sc->wow_sleep_proc_intr, -1); 798 atomic_set(&sc->wow_got_bmiss_intr, -1); 799 800 #endif 801 802 hw->queues = 4; 803 hw->max_rates = 4; 804 hw->channel_change_time = 5000; 805 hw->max_listen_interval = 1; 806 hw->max_rate_tries = 10; 807 hw->sta_data_size = sizeof(struct ath_node); 808 hw->vif_data_size = sizeof(struct ath_vif); 809 810 hw->wiphy->available_antennas_rx = BIT(ah->caps.max_rxchains) - 1; 811 hw->wiphy->available_antennas_tx = BIT(ah->caps.max_txchains) - 1; 812 813 /* single chain devices with rx diversity */ 814 if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) 815 hw->wiphy->available_antennas_rx = BIT(0) | BIT(1); 816 817 sc->ant_rx = hw->wiphy->available_antennas_rx; 818 sc->ant_tx = hw->wiphy->available_antennas_tx; 819 820 #ifdef CONFIG_ATH9K_RATE_CONTROL 821 hw->rate_control_algorithm = "ath9k_rate_control"; 822 #endif 823 824 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) 825 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = 826 &sc->sbands[IEEE80211_BAND_2GHZ]; 827 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) 828 hw->wiphy->bands[IEEE80211_BAND_5GHZ] = 829 &sc->sbands[IEEE80211_BAND_5GHZ]; 830 831 ath9k_reload_chainmask_settings(sc); 832 833 SET_IEEE80211_PERM_ADDR(hw, common->macaddr); 834 } 835 836 int ath9k_init_device(u16 devid, struct ath_softc *sc, 837 const struct ath_bus_ops *bus_ops) 838 { 839 struct ieee80211_hw *hw = sc->hw; 840 struct ath_common *common; 841 struct ath_hw *ah; 842 int error = 0; 843 struct ath_regulatory *reg; 844 845 /* Bring up device */ 846 error = ath9k_init_softc(devid, sc, bus_ops); 847 if (error != 0) 848 goto error_init; 849 850 ah = sc->sc_ah; 851 common = ath9k_hw_common(ah); 852 ath9k_set_hw_capab(sc, hw); 853 854 /* Initialize regulatory */ 855 error = ath_regd_init(&common->regulatory, sc->hw->wiphy, 856 ath9k_reg_notifier); 857 if (error) 858 goto error_regd; 859 860 reg = &common->regulatory; 861 862 /* Setup TX DMA */ 863 error = ath_tx_init(sc, ATH_TXBUF); 864 if (error != 0) 865 goto error_tx; 866 867 /* Setup RX DMA */ 868 error = ath_rx_init(sc, ATH_RXBUF); 869 if (error != 0) 870 goto error_rx; 871 872 ath9k_init_txpower_limits(sc); 873 874 #ifdef CONFIG_MAC80211_LEDS 875 /* must be initialized before ieee80211_register_hw */ 876 sc->led_cdev.default_trigger = ieee80211_create_tpt_led_trigger(sc->hw, 877 IEEE80211_TPT_LEDTRIG_FL_RADIO, ath9k_tpt_blink, 878 ARRAY_SIZE(ath9k_tpt_blink)); 879 #endif 880 881 /* Register with mac80211 */ 882 error = ieee80211_register_hw(hw); 883 if (error) 884 goto error_register; 885 886 error = ath9k_init_debug(ah); 887 if (error) { 888 ath_err(common, "Unable to create debugfs files\n"); 889 goto error_world; 890 } 891 892 /* Handle world regulatory */ 893 if (!ath_is_world_regd(reg)) { 894 error = regulatory_hint(hw->wiphy, reg->alpha2); 895 if (error) 896 goto error_world; 897 } 898 899 ath_init_leds(sc); 900 ath_start_rfkill_poll(sc); 901 902 return 0; 903 904 error_world: 905 ieee80211_unregister_hw(hw); 906 error_register: 907 ath_rx_cleanup(sc); 908 error_rx: 909 ath_tx_cleanup(sc); 910 error_tx: 911 /* Nothing */ 912 error_regd: 913 ath9k_deinit_softc(sc); 914 error_init: 915 return error; 916 } 917 918 /*****************************/ 919 /* De-Initialization */ 920 /*****************************/ 921 922 static void ath9k_deinit_softc(struct ath_softc *sc) 923 { 924 int i = 0; 925 926 if (sc->sbands[IEEE80211_BAND_2GHZ].channels) 927 kfree(sc->sbands[IEEE80211_BAND_2GHZ].channels); 928 929 if (sc->sbands[IEEE80211_BAND_5GHZ].channels) 930 kfree(sc->sbands[IEEE80211_BAND_5GHZ].channels); 931 932 ath9k_deinit_btcoex(sc); 933 934 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) 935 if (ATH_TXQ_SETUP(sc, i)) 936 ath_tx_cleanupq(sc, &sc->tx.txq[i]); 937 938 ath9k_hw_deinit(sc->sc_ah); 939 if (sc->dfs_detector != NULL) 940 sc->dfs_detector->exit(sc->dfs_detector); 941 942 ath9k_eeprom_release(sc); 943 kfree(sc->sc_ah); 944 sc->sc_ah = NULL; 945 } 946 947 void ath9k_deinit_device(struct ath_softc *sc) 948 { 949 struct ieee80211_hw *hw = sc->hw; 950 951 ath9k_ps_wakeup(sc); 952 953 wiphy_rfkill_stop_polling(sc->hw->wiphy); 954 ath_deinit_leds(sc); 955 956 ath9k_ps_restore(sc); 957 958 ieee80211_unregister_hw(hw); 959 ath_rx_cleanup(sc); 960 ath_tx_cleanup(sc); 961 ath9k_deinit_softc(sc); 962 } 963 964 void ath_descdma_cleanup(struct ath_softc *sc, 965 struct ath_descdma *dd, 966 struct list_head *head) 967 { 968 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc, 969 dd->dd_desc_paddr); 970 971 INIT_LIST_HEAD(head); 972 kfree(dd->dd_bufptr); 973 memset(dd, 0, sizeof(*dd)); 974 } 975 976 /************************/ 977 /* Module Hooks */ 978 /************************/ 979 980 static int __init ath9k_init(void) 981 { 982 int error; 983 984 /* Register rate control algorithm */ 985 error = ath_rate_control_register(); 986 if (error != 0) { 987 pr_err("Unable to register rate control algorithm: %d\n", 988 error); 989 goto err_out; 990 } 991 992 error = ath_pci_init(); 993 if (error < 0) { 994 pr_err("No PCI devices found, driver not installed\n"); 995 error = -ENODEV; 996 goto err_rate_unregister; 997 } 998 999 error = ath_ahb_init(); 1000 if (error < 0) { 1001 error = -ENODEV; 1002 goto err_pci_exit; 1003 } 1004 1005 return 0; 1006 1007 err_pci_exit: 1008 ath_pci_exit(); 1009 1010 err_rate_unregister: 1011 ath_rate_control_unregister(); 1012 err_out: 1013 return error; 1014 } 1015 module_init(ath9k_init); 1016 1017 static void __exit ath9k_exit(void) 1018 { 1019 is_ath9k_unloaded = true; 1020 ath_ahb_exit(); 1021 ath_pci_exit(); 1022 ath_rate_control_unregister(); 1023 pr_info("%s: Driver unloaded\n", dev_info); 1024 } 1025 module_exit(ath9k_exit); 1026