1 /* 2 * Copyright (c) 2008-2009 Atheros Communications Inc. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #include <linux/slab.h> 18 19 #include "ath9k.h" 20 21 static char *dev_info = "ath9k"; 22 23 MODULE_AUTHOR("Atheros Communications"); 24 MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards."); 25 MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards"); 26 MODULE_LICENSE("Dual BSD/GPL"); 27 28 static unsigned int ath9k_debug = ATH_DBG_DEFAULT; 29 module_param_named(debug, ath9k_debug, uint, 0); 30 MODULE_PARM_DESC(debug, "Debugging mask"); 31 32 int modparam_nohwcrypt; 33 module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444); 34 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption"); 35 36 int led_blink = 1; 37 module_param_named(blink, led_blink, int, 0444); 38 MODULE_PARM_DESC(blink, "Enable LED blink on activity"); 39 40 /* We use the hw_value as an index into our private channel structure */ 41 42 #define CHAN2G(_freq, _idx) { \ 43 .center_freq = (_freq), \ 44 .hw_value = (_idx), \ 45 .max_power = 20, \ 46 } 47 48 #define CHAN5G(_freq, _idx) { \ 49 .band = IEEE80211_BAND_5GHZ, \ 50 .center_freq = (_freq), \ 51 .hw_value = (_idx), \ 52 .max_power = 20, \ 53 } 54 55 /* Some 2 GHz radios are actually tunable on 2312-2732 56 * on 5 MHz steps, we support the channels which we know 57 * we have calibration data for all cards though to make 58 * this static */ 59 static struct ieee80211_channel ath9k_2ghz_chantable[] = { 60 CHAN2G(2412, 0), /* Channel 1 */ 61 CHAN2G(2417, 1), /* Channel 2 */ 62 CHAN2G(2422, 2), /* Channel 3 */ 63 CHAN2G(2427, 3), /* Channel 4 */ 64 CHAN2G(2432, 4), /* Channel 5 */ 65 CHAN2G(2437, 5), /* Channel 6 */ 66 CHAN2G(2442, 6), /* Channel 7 */ 67 CHAN2G(2447, 7), /* Channel 8 */ 68 CHAN2G(2452, 8), /* Channel 9 */ 69 CHAN2G(2457, 9), /* Channel 10 */ 70 CHAN2G(2462, 10), /* Channel 11 */ 71 CHAN2G(2467, 11), /* Channel 12 */ 72 CHAN2G(2472, 12), /* Channel 13 */ 73 CHAN2G(2484, 13), /* Channel 14 */ 74 }; 75 76 /* Some 5 GHz radios are actually tunable on XXXX-YYYY 77 * on 5 MHz steps, we support the channels which we know 78 * we have calibration data for all cards though to make 79 * this static */ 80 static struct ieee80211_channel ath9k_5ghz_chantable[] = { 81 /* _We_ call this UNII 1 */ 82 CHAN5G(5180, 14), /* Channel 36 */ 83 CHAN5G(5200, 15), /* Channel 40 */ 84 CHAN5G(5220, 16), /* Channel 44 */ 85 CHAN5G(5240, 17), /* Channel 48 */ 86 /* _We_ call this UNII 2 */ 87 CHAN5G(5260, 18), /* Channel 52 */ 88 CHAN5G(5280, 19), /* Channel 56 */ 89 CHAN5G(5300, 20), /* Channel 60 */ 90 CHAN5G(5320, 21), /* Channel 64 */ 91 /* _We_ call this "Middle band" */ 92 CHAN5G(5500, 22), /* Channel 100 */ 93 CHAN5G(5520, 23), /* Channel 104 */ 94 CHAN5G(5540, 24), /* Channel 108 */ 95 CHAN5G(5560, 25), /* Channel 112 */ 96 CHAN5G(5580, 26), /* Channel 116 */ 97 CHAN5G(5600, 27), /* Channel 120 */ 98 CHAN5G(5620, 28), /* Channel 124 */ 99 CHAN5G(5640, 29), /* Channel 128 */ 100 CHAN5G(5660, 30), /* Channel 132 */ 101 CHAN5G(5680, 31), /* Channel 136 */ 102 CHAN5G(5700, 32), /* Channel 140 */ 103 /* _We_ call this UNII 3 */ 104 CHAN5G(5745, 33), /* Channel 149 */ 105 CHAN5G(5765, 34), /* Channel 153 */ 106 CHAN5G(5785, 35), /* Channel 157 */ 107 CHAN5G(5805, 36), /* Channel 161 */ 108 CHAN5G(5825, 37), /* Channel 165 */ 109 }; 110 111 /* Atheros hardware rate code addition for short premble */ 112 #define SHPCHECK(__hw_rate, __flags) \ 113 ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0) 114 115 #define RATE(_bitrate, _hw_rate, _flags) { \ 116 .bitrate = (_bitrate), \ 117 .flags = (_flags), \ 118 .hw_value = (_hw_rate), \ 119 .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \ 120 } 121 122 static struct ieee80211_rate ath9k_legacy_rates[] = { 123 RATE(10, 0x1b, 0), 124 RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE), 125 RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE), 126 RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE), 127 RATE(60, 0x0b, 0), 128 RATE(90, 0x0f, 0), 129 RATE(120, 0x0a, 0), 130 RATE(180, 0x0e, 0), 131 RATE(240, 0x09, 0), 132 RATE(360, 0x0d, 0), 133 RATE(480, 0x08, 0), 134 RATE(540, 0x0c, 0), 135 }; 136 137 static void ath9k_deinit_softc(struct ath_softc *sc); 138 139 /* 140 * Read and write, they both share the same lock. We do this to serialize 141 * reads and writes on Atheros 802.11n PCI devices only. This is required 142 * as the FIFO on these devices can only accept sanely 2 requests. 143 */ 144 145 static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset) 146 { 147 struct ath_hw *ah = (struct ath_hw *) hw_priv; 148 struct ath_common *common = ath9k_hw_common(ah); 149 struct ath_softc *sc = (struct ath_softc *) common->priv; 150 151 if (ah->config.serialize_regmode == SER_REG_MODE_ON) { 152 unsigned long flags; 153 spin_lock_irqsave(&sc->sc_serial_rw, flags); 154 iowrite32(val, sc->mem + reg_offset); 155 spin_unlock_irqrestore(&sc->sc_serial_rw, flags); 156 } else 157 iowrite32(val, sc->mem + reg_offset); 158 } 159 160 static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset) 161 { 162 struct ath_hw *ah = (struct ath_hw *) hw_priv; 163 struct ath_common *common = ath9k_hw_common(ah); 164 struct ath_softc *sc = (struct ath_softc *) common->priv; 165 u32 val; 166 167 if (ah->config.serialize_regmode == SER_REG_MODE_ON) { 168 unsigned long flags; 169 spin_lock_irqsave(&sc->sc_serial_rw, flags); 170 val = ioread32(sc->mem + reg_offset); 171 spin_unlock_irqrestore(&sc->sc_serial_rw, flags); 172 } else 173 val = ioread32(sc->mem + reg_offset); 174 return val; 175 } 176 177 static const struct ath_ops ath9k_common_ops = { 178 .read = ath9k_ioread32, 179 .write = ath9k_iowrite32, 180 }; 181 182 /**************************/ 183 /* Initialization */ 184 /**************************/ 185 186 static void setup_ht_cap(struct ath_softc *sc, 187 struct ieee80211_sta_ht_cap *ht_info) 188 { 189 struct ath_hw *ah = sc->sc_ah; 190 struct ath_common *common = ath9k_hw_common(ah); 191 u8 tx_streams, rx_streams; 192 int i, max_streams; 193 194 ht_info->ht_supported = true; 195 ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 | 196 IEEE80211_HT_CAP_SM_PS | 197 IEEE80211_HT_CAP_SGI_40 | 198 IEEE80211_HT_CAP_DSSSCCK40; 199 200 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_LDPC) 201 ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING; 202 203 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20) 204 ht_info->cap |= IEEE80211_HT_CAP_SGI_20; 205 206 ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; 207 ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8; 208 209 if (AR_SREV_9300_20_OR_LATER(ah)) 210 max_streams = 3; 211 else 212 max_streams = 2; 213 214 if (AR_SREV_9280_10_OR_LATER(ah)) { 215 if (max_streams >= 2) 216 ht_info->cap |= IEEE80211_HT_CAP_TX_STBC; 217 ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT); 218 } 219 220 /* set up supported mcs set */ 221 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs)); 222 tx_streams = ath9k_cmn_count_streams(common->tx_chainmask, max_streams); 223 rx_streams = ath9k_cmn_count_streams(common->rx_chainmask, max_streams); 224 225 ath_print(common, ATH_DBG_CONFIG, 226 "TX streams %d, RX streams: %d\n", 227 tx_streams, rx_streams); 228 229 if (tx_streams != rx_streams) { 230 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF; 231 ht_info->mcs.tx_params |= ((tx_streams - 1) << 232 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT); 233 } 234 235 for (i = 0; i < rx_streams; i++) 236 ht_info->mcs.rx_mask[i] = 0xff; 237 238 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED; 239 } 240 241 static int ath9k_reg_notifier(struct wiphy *wiphy, 242 struct regulatory_request *request) 243 { 244 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); 245 struct ath_wiphy *aphy = hw->priv; 246 struct ath_softc *sc = aphy->sc; 247 struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah); 248 249 return ath_reg_notifier_apply(wiphy, request, reg); 250 } 251 252 /* 253 * This function will allocate both the DMA descriptor structure, and the 254 * buffers it contains. These are used to contain the descriptors used 255 * by the system. 256 */ 257 int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd, 258 struct list_head *head, const char *name, 259 int nbuf, int ndesc, bool is_tx) 260 { 261 #define DS2PHYS(_dd, _ds) \ 262 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc)) 263 #define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0) 264 #define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096) 265 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 266 u8 *ds; 267 struct ath_buf *bf; 268 int i, bsize, error, desc_len; 269 270 ath_print(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n", 271 name, nbuf, ndesc); 272 273 INIT_LIST_HEAD(head); 274 275 if (is_tx) 276 desc_len = sc->sc_ah->caps.tx_desc_len; 277 else 278 desc_len = sizeof(struct ath_desc); 279 280 /* ath_desc must be a multiple of DWORDs */ 281 if ((desc_len % 4) != 0) { 282 ath_print(common, ATH_DBG_FATAL, 283 "ath_desc not DWORD aligned\n"); 284 BUG_ON((desc_len % 4) != 0); 285 error = -ENOMEM; 286 goto fail; 287 } 288 289 dd->dd_desc_len = desc_len * nbuf * ndesc; 290 291 /* 292 * Need additional DMA memory because we can't use 293 * descriptors that cross the 4K page boundary. Assume 294 * one skipped descriptor per 4K page. 295 */ 296 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) { 297 u32 ndesc_skipped = 298 ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len); 299 u32 dma_len; 300 301 while (ndesc_skipped) { 302 dma_len = ndesc_skipped * desc_len; 303 dd->dd_desc_len += dma_len; 304 305 ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len); 306 } 307 } 308 309 /* allocate descriptors */ 310 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len, 311 &dd->dd_desc_paddr, GFP_KERNEL); 312 if (dd->dd_desc == NULL) { 313 error = -ENOMEM; 314 goto fail; 315 } 316 ds = (u8 *) dd->dd_desc; 317 ath_print(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n", 318 name, ds, (u32) dd->dd_desc_len, 319 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len); 320 321 /* allocate buffers */ 322 bsize = sizeof(struct ath_buf) * nbuf; 323 bf = kzalloc(bsize, GFP_KERNEL); 324 if (bf == NULL) { 325 error = -ENOMEM; 326 goto fail2; 327 } 328 dd->dd_bufptr = bf; 329 330 for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) { 331 bf->bf_desc = ds; 332 bf->bf_daddr = DS2PHYS(dd, ds); 333 334 if (!(sc->sc_ah->caps.hw_caps & 335 ATH9K_HW_CAP_4KB_SPLITTRANS)) { 336 /* 337 * Skip descriptor addresses which can cause 4KB 338 * boundary crossing (addr + length) with a 32 dword 339 * descriptor fetch. 340 */ 341 while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) { 342 BUG_ON((caddr_t) bf->bf_desc >= 343 ((caddr_t) dd->dd_desc + 344 dd->dd_desc_len)); 345 346 ds += (desc_len * ndesc); 347 bf->bf_desc = ds; 348 bf->bf_daddr = DS2PHYS(dd, ds); 349 } 350 } 351 list_add_tail(&bf->list, head); 352 } 353 return 0; 354 fail2: 355 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc, 356 dd->dd_desc_paddr); 357 fail: 358 memset(dd, 0, sizeof(*dd)); 359 return error; 360 #undef ATH_DESC_4KB_BOUND_CHECK 361 #undef ATH_DESC_4KB_BOUND_NUM_SKIPPED 362 #undef DS2PHYS 363 } 364 365 static void ath9k_init_crypto(struct ath_softc *sc) 366 { 367 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 368 int i = 0; 369 370 /* Get the hardware key cache size. */ 371 common->keymax = sc->sc_ah->caps.keycache_size; 372 if (common->keymax > ATH_KEYMAX) { 373 ath_print(common, ATH_DBG_ANY, 374 "Warning, using only %u entries in %u key cache\n", 375 ATH_KEYMAX, common->keymax); 376 common->keymax = ATH_KEYMAX; 377 } 378 379 /* 380 * Reset the key cache since some parts do not 381 * reset the contents on initial power up. 382 */ 383 for (i = 0; i < common->keymax; i++) 384 ath9k_hw_keyreset(sc->sc_ah, (u16) i); 385 386 /* 387 * Check whether the separate key cache entries 388 * are required to handle both tx+rx MIC keys. 389 * With split mic keys the number of stations is limited 390 * to 27 otherwise 59. 391 */ 392 if (!(sc->sc_ah->misc_mode & AR_PCU_MIC_NEW_LOC_ENA)) 393 common->splitmic = 1; 394 } 395 396 static int ath9k_init_btcoex(struct ath_softc *sc) 397 { 398 int r, qnum; 399 400 switch (sc->sc_ah->btcoex_hw.scheme) { 401 case ATH_BTCOEX_CFG_NONE: 402 break; 403 case ATH_BTCOEX_CFG_2WIRE: 404 ath9k_hw_btcoex_init_2wire(sc->sc_ah); 405 break; 406 case ATH_BTCOEX_CFG_3WIRE: 407 ath9k_hw_btcoex_init_3wire(sc->sc_ah); 408 r = ath_init_btcoex_timer(sc); 409 if (r) 410 return -1; 411 qnum = sc->tx.hwq_map[WME_AC_BE]; 412 ath9k_hw_init_btcoex_hw(sc->sc_ah, qnum); 413 sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW; 414 break; 415 default: 416 WARN_ON(1); 417 break; 418 } 419 420 return 0; 421 } 422 423 static int ath9k_init_queues(struct ath_softc *sc) 424 { 425 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 426 int i = 0; 427 428 for (i = 0; i < ARRAY_SIZE(sc->tx.hwq_map); i++) 429 sc->tx.hwq_map[i] = -1; 430 431 sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah); 432 if (sc->beacon.beaconq == -1) { 433 ath_print(common, ATH_DBG_FATAL, 434 "Unable to setup a beacon xmit queue\n"); 435 goto err; 436 } 437 438 sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0); 439 if (sc->beacon.cabq == NULL) { 440 ath_print(common, ATH_DBG_FATAL, 441 "Unable to setup CAB xmit queue\n"); 442 goto err; 443 } 444 445 sc->config.cabqReadytime = ATH_CABQ_READY_TIME; 446 ath_cabq_update(sc); 447 448 if (!ath_tx_setup(sc, WME_AC_BK)) { 449 ath_print(common, ATH_DBG_FATAL, 450 "Unable to setup xmit queue for BK traffic\n"); 451 goto err; 452 } 453 454 if (!ath_tx_setup(sc, WME_AC_BE)) { 455 ath_print(common, ATH_DBG_FATAL, 456 "Unable to setup xmit queue for BE traffic\n"); 457 goto err; 458 } 459 if (!ath_tx_setup(sc, WME_AC_VI)) { 460 ath_print(common, ATH_DBG_FATAL, 461 "Unable to setup xmit queue for VI traffic\n"); 462 goto err; 463 } 464 if (!ath_tx_setup(sc, WME_AC_VO)) { 465 ath_print(common, ATH_DBG_FATAL, 466 "Unable to setup xmit queue for VO traffic\n"); 467 goto err; 468 } 469 470 return 0; 471 472 err: 473 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) 474 if (ATH_TXQ_SETUP(sc, i)) 475 ath_tx_cleanupq(sc, &sc->tx.txq[i]); 476 477 return -EIO; 478 } 479 480 static void ath9k_init_channels_rates(struct ath_softc *sc) 481 { 482 if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes)) { 483 sc->sbands[IEEE80211_BAND_2GHZ].channels = ath9k_2ghz_chantable; 484 sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ; 485 sc->sbands[IEEE80211_BAND_2GHZ].n_channels = 486 ARRAY_SIZE(ath9k_2ghz_chantable); 487 sc->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates; 488 sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates = 489 ARRAY_SIZE(ath9k_legacy_rates); 490 } 491 492 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes)) { 493 sc->sbands[IEEE80211_BAND_5GHZ].channels = ath9k_5ghz_chantable; 494 sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ; 495 sc->sbands[IEEE80211_BAND_5GHZ].n_channels = 496 ARRAY_SIZE(ath9k_5ghz_chantable); 497 sc->sbands[IEEE80211_BAND_5GHZ].bitrates = 498 ath9k_legacy_rates + 4; 499 sc->sbands[IEEE80211_BAND_5GHZ].n_bitrates = 500 ARRAY_SIZE(ath9k_legacy_rates) - 4; 501 } 502 } 503 504 static void ath9k_init_misc(struct ath_softc *sc) 505 { 506 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 507 int i = 0; 508 509 common->ani.noise_floor = ATH_DEFAULT_NOISE_FLOOR; 510 setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc); 511 512 sc->config.txpowlimit = ATH_TXPOWER_MAX; 513 514 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) { 515 sc->sc_flags |= SC_OP_TXAGGR; 516 sc->sc_flags |= SC_OP_RXAGGR; 517 } 518 519 common->tx_chainmask = sc->sc_ah->caps.tx_chainmask; 520 common->rx_chainmask = sc->sc_ah->caps.rx_chainmask; 521 522 ath9k_hw_set_diversity(sc->sc_ah, true); 523 sc->rx.defant = ath9k_hw_getdefantenna(sc->sc_ah); 524 525 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) 526 memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN); 527 528 sc->beacon.slottime = ATH9K_SLOT_TIME_9; 529 530 for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) { 531 sc->beacon.bslot[i] = NULL; 532 sc->beacon.bslot_aphy[i] = NULL; 533 } 534 } 535 536 static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid, 537 const struct ath_bus_ops *bus_ops) 538 { 539 struct ath_hw *ah = NULL; 540 struct ath_common *common; 541 int ret = 0, i; 542 int csz = 0; 543 544 ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL); 545 if (!ah) 546 return -ENOMEM; 547 548 ah->hw_version.devid = devid; 549 ah->hw_version.subsysid = subsysid; 550 sc->sc_ah = ah; 551 552 common = ath9k_hw_common(ah); 553 common->ops = &ath9k_common_ops; 554 common->bus_ops = bus_ops; 555 common->ah = ah; 556 common->hw = sc->hw; 557 common->priv = sc; 558 common->debug_mask = ath9k_debug; 559 560 spin_lock_init(&sc->wiphy_lock); 561 spin_lock_init(&sc->sc_resetlock); 562 spin_lock_init(&sc->sc_serial_rw); 563 spin_lock_init(&sc->sc_pm_lock); 564 mutex_init(&sc->mutex); 565 tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc); 566 tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet, 567 (unsigned long)sc); 568 569 /* 570 * Cache line size is used to size and align various 571 * structures used to communicate with the hardware. 572 */ 573 ath_read_cachesize(common, &csz); 574 common->cachelsz = csz << 2; /* convert to bytes */ 575 576 /* Initializes the hardware for all supported chipsets */ 577 ret = ath9k_hw_init(ah); 578 if (ret) 579 goto err_hw; 580 581 ret = ath9k_init_debug(ah); 582 if (ret) { 583 ath_print(common, ATH_DBG_FATAL, 584 "Unable to create debugfs files\n"); 585 goto err_debug; 586 } 587 588 ret = ath9k_init_queues(sc); 589 if (ret) 590 goto err_queues; 591 592 ret = ath9k_init_btcoex(sc); 593 if (ret) 594 goto err_btcoex; 595 596 ath9k_init_crypto(sc); 597 ath9k_init_channels_rates(sc); 598 ath9k_init_misc(sc); 599 600 return 0; 601 602 err_btcoex: 603 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) 604 if (ATH_TXQ_SETUP(sc, i)) 605 ath_tx_cleanupq(sc, &sc->tx.txq[i]); 606 err_queues: 607 ath9k_exit_debug(ah); 608 err_debug: 609 ath9k_hw_deinit(ah); 610 err_hw: 611 tasklet_kill(&sc->intr_tq); 612 tasklet_kill(&sc->bcon_tasklet); 613 614 kfree(ah); 615 sc->sc_ah = NULL; 616 617 return ret; 618 } 619 620 void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) 621 { 622 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 623 624 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS | 625 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | 626 IEEE80211_HW_SIGNAL_DBM | 627 IEEE80211_HW_SUPPORTS_PS | 628 IEEE80211_HW_PS_NULLFUNC_STACK | 629 IEEE80211_HW_SPECTRUM_MGMT | 630 IEEE80211_HW_REPORTS_TX_ACK_STATUS; 631 632 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) 633 hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION; 634 635 if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || modparam_nohwcrypt) 636 hw->flags |= IEEE80211_HW_MFP_CAPABLE; 637 638 hw->wiphy->interface_modes = 639 BIT(NL80211_IFTYPE_AP) | 640 BIT(NL80211_IFTYPE_STATION) | 641 BIT(NL80211_IFTYPE_ADHOC) | 642 BIT(NL80211_IFTYPE_MESH_POINT); 643 644 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; 645 646 hw->queues = 4; 647 hw->max_rates = 4; 648 hw->channel_change_time = 5000; 649 hw->max_listen_interval = 10; 650 hw->max_rate_tries = 10; 651 hw->sta_data_size = sizeof(struct ath_node); 652 hw->vif_data_size = sizeof(struct ath_vif); 653 654 hw->rate_control_algorithm = "ath9k_rate_control"; 655 656 if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes)) 657 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = 658 &sc->sbands[IEEE80211_BAND_2GHZ]; 659 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes)) 660 hw->wiphy->bands[IEEE80211_BAND_5GHZ] = 661 &sc->sbands[IEEE80211_BAND_5GHZ]; 662 663 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) { 664 if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes)) 665 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap); 666 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes)) 667 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap); 668 } 669 670 SET_IEEE80211_PERM_ADDR(hw, common->macaddr); 671 } 672 673 int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid, 674 const struct ath_bus_ops *bus_ops) 675 { 676 struct ieee80211_hw *hw = sc->hw; 677 struct ath_common *common; 678 struct ath_hw *ah; 679 int error = 0; 680 struct ath_regulatory *reg; 681 682 /* Bring up device */ 683 error = ath9k_init_softc(devid, sc, subsysid, bus_ops); 684 if (error != 0) 685 goto error_init; 686 687 ah = sc->sc_ah; 688 common = ath9k_hw_common(ah); 689 ath9k_set_hw_capab(sc, hw); 690 691 /* Initialize regulatory */ 692 error = ath_regd_init(&common->regulatory, sc->hw->wiphy, 693 ath9k_reg_notifier); 694 if (error) 695 goto error_regd; 696 697 reg = &common->regulatory; 698 699 /* Setup TX DMA */ 700 error = ath_tx_init(sc, ATH_TXBUF); 701 if (error != 0) 702 goto error_tx; 703 704 /* Setup RX DMA */ 705 error = ath_rx_init(sc, ATH_RXBUF); 706 if (error != 0) 707 goto error_rx; 708 709 /* Register with mac80211 */ 710 error = ieee80211_register_hw(hw); 711 if (error) 712 goto error_register; 713 714 /* Handle world regulatory */ 715 if (!ath_is_world_regd(reg)) { 716 error = regulatory_hint(hw->wiphy, reg->alpha2); 717 if (error) 718 goto error_world; 719 } 720 721 INIT_WORK(&sc->hw_check_work, ath_hw_check); 722 INIT_WORK(&sc->paprd_work, ath_paprd_calibrate); 723 INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work); 724 INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work); 725 sc->wiphy_scheduler_int = msecs_to_jiffies(500); 726 727 ath_init_leds(sc); 728 ath_start_rfkill_poll(sc); 729 730 return 0; 731 732 error_world: 733 ieee80211_unregister_hw(hw); 734 error_register: 735 ath_rx_cleanup(sc); 736 error_rx: 737 ath_tx_cleanup(sc); 738 error_tx: 739 /* Nothing */ 740 error_regd: 741 ath9k_deinit_softc(sc); 742 error_init: 743 return error; 744 } 745 746 /*****************************/ 747 /* De-Initialization */ 748 /*****************************/ 749 750 static void ath9k_deinit_softc(struct ath_softc *sc) 751 { 752 int i = 0; 753 754 if ((sc->btcoex.no_stomp_timer) && 755 sc->sc_ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE) 756 ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer); 757 758 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) 759 if (ATH_TXQ_SETUP(sc, i)) 760 ath_tx_cleanupq(sc, &sc->tx.txq[i]); 761 762 ath9k_exit_debug(sc->sc_ah); 763 ath9k_hw_deinit(sc->sc_ah); 764 765 tasklet_kill(&sc->intr_tq); 766 tasklet_kill(&sc->bcon_tasklet); 767 768 kfree(sc->sc_ah); 769 sc->sc_ah = NULL; 770 } 771 772 void ath9k_deinit_device(struct ath_softc *sc) 773 { 774 struct ieee80211_hw *hw = sc->hw; 775 int i = 0; 776 777 ath9k_ps_wakeup(sc); 778 779 wiphy_rfkill_stop_polling(sc->hw->wiphy); 780 ath_deinit_leds(sc); 781 782 for (i = 0; i < sc->num_sec_wiphy; i++) { 783 struct ath_wiphy *aphy = sc->sec_wiphy[i]; 784 if (aphy == NULL) 785 continue; 786 sc->sec_wiphy[i] = NULL; 787 ieee80211_unregister_hw(aphy->hw); 788 ieee80211_free_hw(aphy->hw); 789 } 790 791 ieee80211_unregister_hw(hw); 792 ath_rx_cleanup(sc); 793 ath_tx_cleanup(sc); 794 ath9k_deinit_softc(sc); 795 kfree(sc->sec_wiphy); 796 } 797 798 void ath_descdma_cleanup(struct ath_softc *sc, 799 struct ath_descdma *dd, 800 struct list_head *head) 801 { 802 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc, 803 dd->dd_desc_paddr); 804 805 INIT_LIST_HEAD(head); 806 kfree(dd->dd_bufptr); 807 memset(dd, 0, sizeof(*dd)); 808 } 809 810 /************************/ 811 /* Module Hooks */ 812 /************************/ 813 814 static int __init ath9k_init(void) 815 { 816 int error; 817 818 /* Register rate control algorithm */ 819 error = ath_rate_control_register(); 820 if (error != 0) { 821 printk(KERN_ERR 822 "ath9k: Unable to register rate control " 823 "algorithm: %d\n", 824 error); 825 goto err_out; 826 } 827 828 error = ath9k_debug_create_root(); 829 if (error) { 830 printk(KERN_ERR 831 "ath9k: Unable to create debugfs root: %d\n", 832 error); 833 goto err_rate_unregister; 834 } 835 836 error = ath_pci_init(); 837 if (error < 0) { 838 printk(KERN_ERR 839 "ath9k: No PCI devices found, driver not installed.\n"); 840 error = -ENODEV; 841 goto err_remove_root; 842 } 843 844 error = ath_ahb_init(); 845 if (error < 0) { 846 error = -ENODEV; 847 goto err_pci_exit; 848 } 849 850 return 0; 851 852 err_pci_exit: 853 ath_pci_exit(); 854 855 err_remove_root: 856 ath9k_debug_remove_root(); 857 err_rate_unregister: 858 ath_rate_control_unregister(); 859 err_out: 860 return error; 861 } 862 module_init(ath9k_init); 863 864 static void __exit ath9k_exit(void) 865 { 866 ath_ahb_exit(); 867 ath_pci_exit(); 868 ath9k_debug_remove_root(); 869 ath_rate_control_unregister(); 870 printk(KERN_INFO "%s: Driver unloaded\n", dev_info); 871 } 872 module_exit(ath9k_exit); 873