xref: /linux/drivers/net/wireless/ath/ath9k/init.c (revision a508da6cc0093171833efb8376b00473f24221b9)
1 /*
2  * Copyright (c) 2008-2011 Atheros Communications Inc.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 
19 #include <linux/dma-mapping.h>
20 #include <linux/slab.h>
21 #include <linux/ath9k_platform.h>
22 #include <linux/module.h>
23 
24 #include "ath9k.h"
25 
26 static char *dev_info = "ath9k";
27 
28 MODULE_AUTHOR("Atheros Communications");
29 MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
30 MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
31 MODULE_LICENSE("Dual BSD/GPL");
32 
33 static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
34 module_param_named(debug, ath9k_debug, uint, 0);
35 MODULE_PARM_DESC(debug, "Debugging mask");
36 
37 int ath9k_modparam_nohwcrypt;
38 module_param_named(nohwcrypt, ath9k_modparam_nohwcrypt, int, 0444);
39 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
40 
41 int led_blink;
42 module_param_named(blink, led_blink, int, 0444);
43 MODULE_PARM_DESC(blink, "Enable LED blink on activity");
44 
45 static int ath9k_btcoex_enable;
46 module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444);
47 MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence");
48 
49 bool is_ath9k_unloaded;
50 /* We use the hw_value as an index into our private channel structure */
51 
52 #define CHAN2G(_freq, _idx)  { \
53 	.band = IEEE80211_BAND_2GHZ, \
54 	.center_freq = (_freq), \
55 	.hw_value = (_idx), \
56 	.max_power = 20, \
57 }
58 
59 #define CHAN5G(_freq, _idx) { \
60 	.band = IEEE80211_BAND_5GHZ, \
61 	.center_freq = (_freq), \
62 	.hw_value = (_idx), \
63 	.max_power = 20, \
64 }
65 
66 /* Some 2 GHz radios are actually tunable on 2312-2732
67  * on 5 MHz steps, we support the channels which we know
68  * we have calibration data for all cards though to make
69  * this static */
70 static const struct ieee80211_channel ath9k_2ghz_chantable[] = {
71 	CHAN2G(2412, 0), /* Channel 1 */
72 	CHAN2G(2417, 1), /* Channel 2 */
73 	CHAN2G(2422, 2), /* Channel 3 */
74 	CHAN2G(2427, 3), /* Channel 4 */
75 	CHAN2G(2432, 4), /* Channel 5 */
76 	CHAN2G(2437, 5), /* Channel 6 */
77 	CHAN2G(2442, 6), /* Channel 7 */
78 	CHAN2G(2447, 7), /* Channel 8 */
79 	CHAN2G(2452, 8), /* Channel 9 */
80 	CHAN2G(2457, 9), /* Channel 10 */
81 	CHAN2G(2462, 10), /* Channel 11 */
82 	CHAN2G(2467, 11), /* Channel 12 */
83 	CHAN2G(2472, 12), /* Channel 13 */
84 	CHAN2G(2484, 13), /* Channel 14 */
85 };
86 
87 /* Some 5 GHz radios are actually tunable on XXXX-YYYY
88  * on 5 MHz steps, we support the channels which we know
89  * we have calibration data for all cards though to make
90  * this static */
91 static const struct ieee80211_channel ath9k_5ghz_chantable[] = {
92 	/* _We_ call this UNII 1 */
93 	CHAN5G(5180, 14), /* Channel 36 */
94 	CHAN5G(5200, 15), /* Channel 40 */
95 	CHAN5G(5220, 16), /* Channel 44 */
96 	CHAN5G(5240, 17), /* Channel 48 */
97 	/* _We_ call this UNII 2 */
98 	CHAN5G(5260, 18), /* Channel 52 */
99 	CHAN5G(5280, 19), /* Channel 56 */
100 	CHAN5G(5300, 20), /* Channel 60 */
101 	CHAN5G(5320, 21), /* Channel 64 */
102 	/* _We_ call this "Middle band" */
103 	CHAN5G(5500, 22), /* Channel 100 */
104 	CHAN5G(5520, 23), /* Channel 104 */
105 	CHAN5G(5540, 24), /* Channel 108 */
106 	CHAN5G(5560, 25), /* Channel 112 */
107 	CHAN5G(5580, 26), /* Channel 116 */
108 	CHAN5G(5600, 27), /* Channel 120 */
109 	CHAN5G(5620, 28), /* Channel 124 */
110 	CHAN5G(5640, 29), /* Channel 128 */
111 	CHAN5G(5660, 30), /* Channel 132 */
112 	CHAN5G(5680, 31), /* Channel 136 */
113 	CHAN5G(5700, 32), /* Channel 140 */
114 	/* _We_ call this UNII 3 */
115 	CHAN5G(5745, 33), /* Channel 149 */
116 	CHAN5G(5765, 34), /* Channel 153 */
117 	CHAN5G(5785, 35), /* Channel 157 */
118 	CHAN5G(5805, 36), /* Channel 161 */
119 	CHAN5G(5825, 37), /* Channel 165 */
120 };
121 
122 /* Atheros hardware rate code addition for short premble */
123 #define SHPCHECK(__hw_rate, __flags) \
124 	((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0)
125 
126 #define RATE(_bitrate, _hw_rate, _flags) {              \
127 	.bitrate        = (_bitrate),                   \
128 	.flags          = (_flags),                     \
129 	.hw_value       = (_hw_rate),                   \
130 	.hw_value_short = (SHPCHECK(_hw_rate, _flags))  \
131 }
132 
133 static struct ieee80211_rate ath9k_legacy_rates[] = {
134 	RATE(10, 0x1b, 0),
135 	RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE),
136 	RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE),
137 	RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE),
138 	RATE(60, 0x0b, 0),
139 	RATE(90, 0x0f, 0),
140 	RATE(120, 0x0a, 0),
141 	RATE(180, 0x0e, 0),
142 	RATE(240, 0x09, 0),
143 	RATE(360, 0x0d, 0),
144 	RATE(480, 0x08, 0),
145 	RATE(540, 0x0c, 0),
146 };
147 
148 #ifdef CONFIG_MAC80211_LEDS
149 static const struct ieee80211_tpt_blink ath9k_tpt_blink[] = {
150 	{ .throughput = 0 * 1024, .blink_time = 334 },
151 	{ .throughput = 1 * 1024, .blink_time = 260 },
152 	{ .throughput = 5 * 1024, .blink_time = 220 },
153 	{ .throughput = 10 * 1024, .blink_time = 190 },
154 	{ .throughput = 20 * 1024, .blink_time = 170 },
155 	{ .throughput = 50 * 1024, .blink_time = 150 },
156 	{ .throughput = 70 * 1024, .blink_time = 130 },
157 	{ .throughput = 100 * 1024, .blink_time = 110 },
158 	{ .throughput = 200 * 1024, .blink_time = 80 },
159 	{ .throughput = 300 * 1024, .blink_time = 50 },
160 };
161 #endif
162 
163 static void ath9k_deinit_softc(struct ath_softc *sc);
164 
165 /*
166  * Read and write, they both share the same lock. We do this to serialize
167  * reads and writes on Atheros 802.11n PCI devices only. This is required
168  * as the FIFO on these devices can only accept sanely 2 requests.
169  */
170 
171 static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
172 {
173 	struct ath_hw *ah = (struct ath_hw *) hw_priv;
174 	struct ath_common *common = ath9k_hw_common(ah);
175 	struct ath_softc *sc = (struct ath_softc *) common->priv;
176 
177 	if (NR_CPUS > 1 && ah->config.serialize_regmode == SER_REG_MODE_ON) {
178 		unsigned long flags;
179 		spin_lock_irqsave(&sc->sc_serial_rw, flags);
180 		iowrite32(val, sc->mem + reg_offset);
181 		spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
182 	} else
183 		iowrite32(val, sc->mem + reg_offset);
184 }
185 
186 static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
187 {
188 	struct ath_hw *ah = (struct ath_hw *) hw_priv;
189 	struct ath_common *common = ath9k_hw_common(ah);
190 	struct ath_softc *sc = (struct ath_softc *) common->priv;
191 	u32 val;
192 
193 	if (NR_CPUS > 1 && ah->config.serialize_regmode == SER_REG_MODE_ON) {
194 		unsigned long flags;
195 		spin_lock_irqsave(&sc->sc_serial_rw, flags);
196 		val = ioread32(sc->mem + reg_offset);
197 		spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
198 	} else
199 		val = ioread32(sc->mem + reg_offset);
200 	return val;
201 }
202 
203 static unsigned int __ath9k_reg_rmw(struct ath_softc *sc, u32 reg_offset,
204 				    u32 set, u32 clr)
205 {
206 	u32 val;
207 
208 	val = ioread32(sc->mem + reg_offset);
209 	val &= ~clr;
210 	val |= set;
211 	iowrite32(val, sc->mem + reg_offset);
212 
213 	return val;
214 }
215 
216 static unsigned int ath9k_reg_rmw(void *hw_priv, u32 reg_offset, u32 set, u32 clr)
217 {
218 	struct ath_hw *ah = (struct ath_hw *) hw_priv;
219 	struct ath_common *common = ath9k_hw_common(ah);
220 	struct ath_softc *sc = (struct ath_softc *) common->priv;
221 	unsigned long uninitialized_var(flags);
222 	u32 val;
223 
224 	if (NR_CPUS > 1 && ah->config.serialize_regmode == SER_REG_MODE_ON) {
225 		spin_lock_irqsave(&sc->sc_serial_rw, flags);
226 		val = __ath9k_reg_rmw(sc, reg_offset, set, clr);
227 		spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
228 	} else
229 		val = __ath9k_reg_rmw(sc, reg_offset, set, clr);
230 
231 	return val;
232 }
233 
234 /**************************/
235 /*     Initialization     */
236 /**************************/
237 
238 static void setup_ht_cap(struct ath_softc *sc,
239 			 struct ieee80211_sta_ht_cap *ht_info)
240 {
241 	struct ath_hw *ah = sc->sc_ah;
242 	struct ath_common *common = ath9k_hw_common(ah);
243 	u8 tx_streams, rx_streams;
244 	int i, max_streams;
245 
246 	ht_info->ht_supported = true;
247 	ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
248 		       IEEE80211_HT_CAP_SM_PS |
249 		       IEEE80211_HT_CAP_SGI_40 |
250 		       IEEE80211_HT_CAP_DSSSCCK40;
251 
252 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_LDPC)
253 		ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
254 
255 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
256 		ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
257 
258 	ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
259 	ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
260 
261 	if (AR_SREV_9330(ah) || AR_SREV_9485(ah))
262 		max_streams = 1;
263 	else if (AR_SREV_9462(ah))
264 		max_streams = 2;
265 	else if (AR_SREV_9300_20_OR_LATER(ah))
266 		max_streams = 3;
267 	else
268 		max_streams = 2;
269 
270 	if (AR_SREV_9280_20_OR_LATER(ah)) {
271 		if (max_streams >= 2)
272 			ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
273 		ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
274 	}
275 
276 	/* set up supported mcs set */
277 	memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
278 	tx_streams = ath9k_cmn_count_streams(ah->txchainmask, max_streams);
279 	rx_streams = ath9k_cmn_count_streams(ah->rxchainmask, max_streams);
280 
281 	ath_dbg(common, CONFIG, "TX streams %d, RX streams: %d\n",
282 		tx_streams, rx_streams);
283 
284 	if (tx_streams != rx_streams) {
285 		ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
286 		ht_info->mcs.tx_params |= ((tx_streams - 1) <<
287 				IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
288 	}
289 
290 	for (i = 0; i < rx_streams; i++)
291 		ht_info->mcs.rx_mask[i] = 0xff;
292 
293 	ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
294 }
295 
296 static int ath9k_reg_notifier(struct wiphy *wiphy,
297 			      struct regulatory_request *request)
298 {
299 	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
300 	struct ath_softc *sc = hw->priv;
301 	struct ath_hw *ah = sc->sc_ah;
302 	struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
303 	int ret;
304 
305 	ret = ath_reg_notifier_apply(wiphy, request, reg);
306 
307 	/* Set tx power */
308 	if (ah->curchan) {
309 		sc->config.txpowlimit = 2 * ah->curchan->chan->max_power;
310 		ath9k_ps_wakeup(sc);
311 		ath9k_hw_set_txpowerlimit(ah, sc->config.txpowlimit, false);
312 		sc->curtxpow = ath9k_hw_regulatory(ah)->power_limit;
313 		ath9k_ps_restore(sc);
314 	}
315 
316 	return ret;
317 }
318 
319 /*
320  *  This function will allocate both the DMA descriptor structure, and the
321  *  buffers it contains.  These are used to contain the descriptors used
322  *  by the system.
323 */
324 int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
325 		      struct list_head *head, const char *name,
326 		      int nbuf, int ndesc, bool is_tx)
327 {
328 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
329 	u8 *ds;
330 	struct ath_buf *bf;
331 	int i, bsize, error, desc_len;
332 
333 	ath_dbg(common, CONFIG, "%s DMA: %u buffers %u desc/buf\n",
334 		name, nbuf, ndesc);
335 
336 	INIT_LIST_HEAD(head);
337 
338 	if (is_tx)
339 		desc_len = sc->sc_ah->caps.tx_desc_len;
340 	else
341 		desc_len = sizeof(struct ath_desc);
342 
343 	/* ath_desc must be a multiple of DWORDs */
344 	if ((desc_len % 4) != 0) {
345 		ath_err(common, "ath_desc not DWORD aligned\n");
346 		BUG_ON((desc_len % 4) != 0);
347 		error = -ENOMEM;
348 		goto fail;
349 	}
350 
351 	dd->dd_desc_len = desc_len * nbuf * ndesc;
352 
353 	/*
354 	 * Need additional DMA memory because we can't use
355 	 * descriptors that cross the 4K page boundary. Assume
356 	 * one skipped descriptor per 4K page.
357 	 */
358 	if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
359 		u32 ndesc_skipped =
360 			ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
361 		u32 dma_len;
362 
363 		while (ndesc_skipped) {
364 			dma_len = ndesc_skipped * desc_len;
365 			dd->dd_desc_len += dma_len;
366 
367 			ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
368 		}
369 	}
370 
371 	/* allocate descriptors */
372 	dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
373 					 &dd->dd_desc_paddr, GFP_KERNEL);
374 	if (dd->dd_desc == NULL) {
375 		error = -ENOMEM;
376 		goto fail;
377 	}
378 	ds = (u8 *) dd->dd_desc;
379 	ath_dbg(common, CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
380 		name, ds, (u32) dd->dd_desc_len,
381 		ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
382 
383 	/* allocate buffers */
384 	bsize = sizeof(struct ath_buf) * nbuf;
385 	bf = kzalloc(bsize, GFP_KERNEL);
386 	if (bf == NULL) {
387 		error = -ENOMEM;
388 		goto fail2;
389 	}
390 	dd->dd_bufptr = bf;
391 
392 	for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
393 		bf->bf_desc = ds;
394 		bf->bf_daddr = DS2PHYS(dd, ds);
395 
396 		if (!(sc->sc_ah->caps.hw_caps &
397 		      ATH9K_HW_CAP_4KB_SPLITTRANS)) {
398 			/*
399 			 * Skip descriptor addresses which can cause 4KB
400 			 * boundary crossing (addr + length) with a 32 dword
401 			 * descriptor fetch.
402 			 */
403 			while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
404 				BUG_ON((caddr_t) bf->bf_desc >=
405 				       ((caddr_t) dd->dd_desc +
406 					dd->dd_desc_len));
407 
408 				ds += (desc_len * ndesc);
409 				bf->bf_desc = ds;
410 				bf->bf_daddr = DS2PHYS(dd, ds);
411 			}
412 		}
413 		list_add_tail(&bf->list, head);
414 	}
415 	return 0;
416 fail2:
417 	dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
418 			  dd->dd_desc_paddr);
419 fail:
420 	memset(dd, 0, sizeof(*dd));
421 	return error;
422 }
423 
424 static int ath9k_init_queues(struct ath_softc *sc)
425 {
426 	int i = 0;
427 
428 	sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah);
429 	sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
430 
431 	sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
432 	ath_cabq_update(sc);
433 
434 	for (i = 0; i < WME_NUM_AC; i++) {
435 		sc->tx.txq_map[i] = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, i);
436 		sc->tx.txq_map[i]->mac80211_qnum = i;
437 	}
438 	return 0;
439 }
440 
441 static int ath9k_init_channels_rates(struct ath_softc *sc)
442 {
443 	void *channels;
444 
445 	BUILD_BUG_ON(ARRAY_SIZE(ath9k_2ghz_chantable) +
446 		     ARRAY_SIZE(ath9k_5ghz_chantable) !=
447 		     ATH9K_NUM_CHANNELS);
448 
449 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) {
450 		channels = kmemdup(ath9k_2ghz_chantable,
451 			sizeof(ath9k_2ghz_chantable), GFP_KERNEL);
452 		if (!channels)
453 		    return -ENOMEM;
454 
455 		sc->sbands[IEEE80211_BAND_2GHZ].channels = channels;
456 		sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
457 		sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
458 			ARRAY_SIZE(ath9k_2ghz_chantable);
459 		sc->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
460 		sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
461 			ARRAY_SIZE(ath9k_legacy_rates);
462 	}
463 
464 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) {
465 		channels = kmemdup(ath9k_5ghz_chantable,
466 			sizeof(ath9k_5ghz_chantable), GFP_KERNEL);
467 		if (!channels) {
468 			if (sc->sbands[IEEE80211_BAND_2GHZ].channels)
469 				kfree(sc->sbands[IEEE80211_BAND_2GHZ].channels);
470 			return -ENOMEM;
471 		}
472 
473 		sc->sbands[IEEE80211_BAND_5GHZ].channels = channels;
474 		sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
475 		sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
476 			ARRAY_SIZE(ath9k_5ghz_chantable);
477 		sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
478 			ath9k_legacy_rates + 4;
479 		sc->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
480 			ARRAY_SIZE(ath9k_legacy_rates) - 4;
481 	}
482 	return 0;
483 }
484 
485 static void ath9k_init_misc(struct ath_softc *sc)
486 {
487 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
488 	int i = 0;
489 
490 	setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
491 
492 	sc->config.txpowlimit = ATH_TXPOWER_MAX;
493 	memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
494 	sc->beacon.slottime = ATH9K_SLOT_TIME_9;
495 
496 	for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++)
497 		sc->beacon.bslot[i] = NULL;
498 
499 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
500 		sc->ant_comb.count = ATH_ANT_DIV_COMB_INIT_COUNT;
501 }
502 
503 static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
504 			    const struct ath_bus_ops *bus_ops)
505 {
506 	struct ath9k_platform_data *pdata = sc->dev->platform_data;
507 	struct ath_hw *ah = NULL;
508 	struct ath_common *common;
509 	int ret = 0, i;
510 	int csz = 0;
511 
512 	ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
513 	if (!ah)
514 		return -ENOMEM;
515 
516 	ah->hw = sc->hw;
517 	ah->hw_version.devid = devid;
518 	ah->reg_ops.read = ath9k_ioread32;
519 	ah->reg_ops.write = ath9k_iowrite32;
520 	ah->reg_ops.rmw = ath9k_reg_rmw;
521 	atomic_set(&ah->intr_ref_cnt, -1);
522 	sc->sc_ah = ah;
523 
524 	sc->dfs_detector = dfs_pattern_detector_init(NL80211_DFS_UNSET);
525 
526 	if (!pdata) {
527 		ah->ah_flags |= AH_USE_EEPROM;
528 		sc->sc_ah->led_pin = -1;
529 	} else {
530 		sc->sc_ah->gpio_mask = pdata->gpio_mask;
531 		sc->sc_ah->gpio_val = pdata->gpio_val;
532 		sc->sc_ah->led_pin = pdata->led_pin;
533 		ah->is_clk_25mhz = pdata->is_clk_25mhz;
534 		ah->get_mac_revision = pdata->get_mac_revision;
535 		ah->external_reset = pdata->external_reset;
536 	}
537 
538 	common = ath9k_hw_common(ah);
539 	common->ops = &ah->reg_ops;
540 	common->bus_ops = bus_ops;
541 	common->ah = ah;
542 	common->hw = sc->hw;
543 	common->priv = sc;
544 	common->debug_mask = ath9k_debug;
545 	common->btcoex_enabled = ath9k_btcoex_enable == 1;
546 	common->disable_ani = false;
547 	spin_lock_init(&common->cc_lock);
548 
549 	spin_lock_init(&sc->sc_serial_rw);
550 	spin_lock_init(&sc->sc_pm_lock);
551 	mutex_init(&sc->mutex);
552 #ifdef CONFIG_ATH9K_DEBUGFS
553 	spin_lock_init(&sc->nodes_lock);
554 	INIT_LIST_HEAD(&sc->nodes);
555 #endif
556 #ifdef CONFIG_ATH9K_MAC_DEBUG
557 	spin_lock_init(&sc->debug.samp_lock);
558 #endif
559 	tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
560 	tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
561 		     (unsigned long)sc);
562 
563 	/*
564 	 * Cache line size is used to size and align various
565 	 * structures used to communicate with the hardware.
566 	 */
567 	ath_read_cachesize(common, &csz);
568 	common->cachelsz = csz << 2; /* convert to bytes */
569 
570 	/* Initializes the hardware for all supported chipsets */
571 	ret = ath9k_hw_init(ah);
572 	if (ret)
573 		goto err_hw;
574 
575 	if (pdata && pdata->macaddr)
576 		memcpy(common->macaddr, pdata->macaddr, ETH_ALEN);
577 
578 	ret = ath9k_init_queues(sc);
579 	if (ret)
580 		goto err_queues;
581 
582 	ret =  ath9k_init_btcoex(sc);
583 	if (ret)
584 		goto err_btcoex;
585 
586 	ret = ath9k_init_channels_rates(sc);
587 	if (ret)
588 		goto err_btcoex;
589 
590 	ath9k_cmn_init_crypto(sc->sc_ah);
591 	ath9k_init_misc(sc);
592 
593 	return 0;
594 
595 err_btcoex:
596 	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
597 		if (ATH_TXQ_SETUP(sc, i))
598 			ath_tx_cleanupq(sc, &sc->tx.txq[i]);
599 err_queues:
600 	ath9k_hw_deinit(ah);
601 err_hw:
602 
603 	kfree(ah);
604 	sc->sc_ah = NULL;
605 
606 	return ret;
607 }
608 
609 static void ath9k_init_band_txpower(struct ath_softc *sc, int band)
610 {
611 	struct ieee80211_supported_band *sband;
612 	struct ieee80211_channel *chan;
613 	struct ath_hw *ah = sc->sc_ah;
614 	int i;
615 
616 	sband = &sc->sbands[band];
617 	for (i = 0; i < sband->n_channels; i++) {
618 		chan = &sband->channels[i];
619 		ah->curchan = &ah->channels[chan->hw_value];
620 		ath9k_cmn_update_ichannel(ah->curchan, chan, NL80211_CHAN_HT20);
621 		ath9k_hw_set_txpowerlimit(ah, MAX_RATE_POWER, true);
622 	}
623 }
624 
625 static void ath9k_init_txpower_limits(struct ath_softc *sc)
626 {
627 	struct ath_hw *ah = sc->sc_ah;
628 	struct ath9k_channel *curchan = ah->curchan;
629 
630 	if (ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
631 		ath9k_init_band_txpower(sc, IEEE80211_BAND_2GHZ);
632 	if (ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
633 		ath9k_init_band_txpower(sc, IEEE80211_BAND_5GHZ);
634 
635 	ah->curchan = curchan;
636 }
637 
638 void ath9k_reload_chainmask_settings(struct ath_softc *sc)
639 {
640 	if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT))
641 		return;
642 
643 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
644 		setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
645 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
646 		setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
647 }
648 
649 static const struct ieee80211_iface_limit if_limits[] = {
650 	{ .max = 2048,	.types = BIT(NL80211_IFTYPE_STATION) |
651 				 BIT(NL80211_IFTYPE_P2P_CLIENT) |
652 				 BIT(NL80211_IFTYPE_WDS) },
653 	{ .max = 8,	.types =
654 #ifdef CONFIG_MAC80211_MESH
655 				 BIT(NL80211_IFTYPE_MESH_POINT) |
656 #endif
657 				 BIT(NL80211_IFTYPE_AP) |
658 				 BIT(NL80211_IFTYPE_P2P_GO) },
659 };
660 
661 static const struct ieee80211_iface_combination if_comb = {
662 	.limits = if_limits,
663 	.n_limits = ARRAY_SIZE(if_limits),
664 	.max_interfaces = 2048,
665 	.num_different_channels = 1,
666 };
667 
668 void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
669 {
670 	struct ath_hw *ah = sc->sc_ah;
671 	struct ath_common *common = ath9k_hw_common(ah);
672 
673 	hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
674 		IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
675 		IEEE80211_HW_SIGNAL_DBM |
676 		IEEE80211_HW_SUPPORTS_PS |
677 		IEEE80211_HW_PS_NULLFUNC_STACK |
678 		IEEE80211_HW_SPECTRUM_MGMT |
679 		IEEE80211_HW_REPORTS_TX_ACK_STATUS;
680 
681 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
682 		 hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
683 
684 	if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || ath9k_modparam_nohwcrypt)
685 		hw->flags |= IEEE80211_HW_MFP_CAPABLE;
686 
687 	hw->wiphy->interface_modes =
688 		BIT(NL80211_IFTYPE_P2P_GO) |
689 		BIT(NL80211_IFTYPE_P2P_CLIENT) |
690 		BIT(NL80211_IFTYPE_AP) |
691 		BIT(NL80211_IFTYPE_WDS) |
692 		BIT(NL80211_IFTYPE_STATION) |
693 		BIT(NL80211_IFTYPE_ADHOC) |
694 		BIT(NL80211_IFTYPE_MESH_POINT);
695 
696 	hw->wiphy->iface_combinations = &if_comb;
697 	hw->wiphy->n_iface_combinations = 1;
698 
699 	if (AR_SREV_5416(sc->sc_ah))
700 		hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
701 
702 	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
703 	hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
704 	hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
705 
706 	hw->queues = 4;
707 	hw->max_rates = 4;
708 	hw->channel_change_time = 5000;
709 	hw->max_listen_interval = 1;
710 	hw->max_rate_tries = 10;
711 	hw->sta_data_size = sizeof(struct ath_node);
712 	hw->vif_data_size = sizeof(struct ath_vif);
713 
714 	hw->wiphy->available_antennas_rx = BIT(ah->caps.max_rxchains) - 1;
715 	hw->wiphy->available_antennas_tx = BIT(ah->caps.max_txchains) - 1;
716 
717 	/* single chain devices with rx diversity */
718 	if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
719 		hw->wiphy->available_antennas_rx = BIT(0) | BIT(1);
720 
721 	sc->ant_rx = hw->wiphy->available_antennas_rx;
722 	sc->ant_tx = hw->wiphy->available_antennas_tx;
723 
724 #ifdef CONFIG_ATH9K_RATE_CONTROL
725 	hw->rate_control_algorithm = "ath9k_rate_control";
726 #endif
727 
728 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
729 		hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
730 			&sc->sbands[IEEE80211_BAND_2GHZ];
731 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
732 		hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
733 			&sc->sbands[IEEE80211_BAND_5GHZ];
734 
735 	ath9k_reload_chainmask_settings(sc);
736 
737 	SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
738 }
739 
740 int ath9k_init_device(u16 devid, struct ath_softc *sc,
741 		    const struct ath_bus_ops *bus_ops)
742 {
743 	struct ieee80211_hw *hw = sc->hw;
744 	struct ath_common *common;
745 	struct ath_hw *ah;
746 	int error = 0;
747 	struct ath_regulatory *reg;
748 
749 	/* Bring up device */
750 	error = ath9k_init_softc(devid, sc, bus_ops);
751 	if (error != 0)
752 		goto error_init;
753 
754 	ah = sc->sc_ah;
755 	common = ath9k_hw_common(ah);
756 	ath9k_set_hw_capab(sc, hw);
757 
758 	/* Initialize regulatory */
759 	error = ath_regd_init(&common->regulatory, sc->hw->wiphy,
760 			      ath9k_reg_notifier);
761 	if (error)
762 		goto error_regd;
763 
764 	reg = &common->regulatory;
765 
766 	/* Setup TX DMA */
767 	error = ath_tx_init(sc, ATH_TXBUF);
768 	if (error != 0)
769 		goto error_tx;
770 
771 	/* Setup RX DMA */
772 	error = ath_rx_init(sc, ATH_RXBUF);
773 	if (error != 0)
774 		goto error_rx;
775 
776 	ath9k_init_txpower_limits(sc);
777 
778 #ifdef CONFIG_MAC80211_LEDS
779 	/* must be initialized before ieee80211_register_hw */
780 	sc->led_cdev.default_trigger = ieee80211_create_tpt_led_trigger(sc->hw,
781 		IEEE80211_TPT_LEDTRIG_FL_RADIO, ath9k_tpt_blink,
782 		ARRAY_SIZE(ath9k_tpt_blink));
783 #endif
784 
785 	INIT_WORK(&sc->hw_reset_work, ath_reset_work);
786 	INIT_WORK(&sc->hw_check_work, ath_hw_check);
787 	INIT_WORK(&sc->paprd_work, ath_paprd_calibrate);
788 	INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work);
789 
790 	/* Register with mac80211 */
791 	error = ieee80211_register_hw(hw);
792 	if (error)
793 		goto error_register;
794 
795 	error = ath9k_init_debug(ah);
796 	if (error) {
797 		ath_err(common, "Unable to create debugfs files\n");
798 		goto error_world;
799 	}
800 
801 	/* Handle world regulatory */
802 	if (!ath_is_world_regd(reg)) {
803 		error = regulatory_hint(hw->wiphy, reg->alpha2);
804 		if (error)
805 			goto error_world;
806 	}
807 
808 	setup_timer(&sc->rx_poll_timer, ath_rx_poll, (unsigned long)sc);
809 	sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
810 
811 	ath_init_leds(sc);
812 	ath_start_rfkill_poll(sc);
813 
814 	return 0;
815 
816 error_world:
817 	ieee80211_unregister_hw(hw);
818 error_register:
819 	ath_rx_cleanup(sc);
820 error_rx:
821 	ath_tx_cleanup(sc);
822 error_tx:
823 	/* Nothing */
824 error_regd:
825 	ath9k_deinit_softc(sc);
826 error_init:
827 	return error;
828 }
829 
830 /*****************************/
831 /*     De-Initialization     */
832 /*****************************/
833 
834 static void ath9k_deinit_softc(struct ath_softc *sc)
835 {
836 	int i = 0;
837 
838 	if (sc->sbands[IEEE80211_BAND_2GHZ].channels)
839 		kfree(sc->sbands[IEEE80211_BAND_2GHZ].channels);
840 
841 	if (sc->sbands[IEEE80211_BAND_5GHZ].channels)
842 		kfree(sc->sbands[IEEE80211_BAND_5GHZ].channels);
843 
844 	ath9k_deinit_btcoex(sc);
845 
846 	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
847 		if (ATH_TXQ_SETUP(sc, i))
848 			ath_tx_cleanupq(sc, &sc->tx.txq[i]);
849 
850 	ath9k_hw_deinit(sc->sc_ah);
851 	if (sc->dfs_detector != NULL)
852 		sc->dfs_detector->exit(sc->dfs_detector);
853 
854 	kfree(sc->sc_ah);
855 	sc->sc_ah = NULL;
856 }
857 
858 void ath9k_deinit_device(struct ath_softc *sc)
859 {
860 	struct ieee80211_hw *hw = sc->hw;
861 
862 	ath9k_ps_wakeup(sc);
863 
864 	wiphy_rfkill_stop_polling(sc->hw->wiphy);
865 	ath_deinit_leds(sc);
866 
867 	ath9k_ps_restore(sc);
868 
869 	ieee80211_unregister_hw(hw);
870 	ath_rx_cleanup(sc);
871 	ath_tx_cleanup(sc);
872 	ath9k_deinit_softc(sc);
873 }
874 
875 void ath_descdma_cleanup(struct ath_softc *sc,
876 			 struct ath_descdma *dd,
877 			 struct list_head *head)
878 {
879 	dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
880 			  dd->dd_desc_paddr);
881 
882 	INIT_LIST_HEAD(head);
883 	kfree(dd->dd_bufptr);
884 	memset(dd, 0, sizeof(*dd));
885 }
886 
887 /************************/
888 /*     Module Hooks     */
889 /************************/
890 
891 static int __init ath9k_init(void)
892 {
893 	int error;
894 
895 	/* Register rate control algorithm */
896 	error = ath_rate_control_register();
897 	if (error != 0) {
898 		pr_err("Unable to register rate control algorithm: %d\n",
899 		       error);
900 		goto err_out;
901 	}
902 
903 	error = ath_pci_init();
904 	if (error < 0) {
905 		pr_err("No PCI devices found, driver not installed\n");
906 		error = -ENODEV;
907 		goto err_rate_unregister;
908 	}
909 
910 	error = ath_ahb_init();
911 	if (error < 0) {
912 		error = -ENODEV;
913 		goto err_pci_exit;
914 	}
915 
916 	return 0;
917 
918  err_pci_exit:
919 	ath_pci_exit();
920 
921  err_rate_unregister:
922 	ath_rate_control_unregister();
923  err_out:
924 	return error;
925 }
926 module_init(ath9k_init);
927 
928 static void __exit ath9k_exit(void)
929 {
930 	is_ath9k_unloaded = true;
931 	ath_ahb_exit();
932 	ath_pci_exit();
933 	ath_rate_control_unregister();
934 	pr_info("%s: Driver unloaded\n", dev_info);
935 }
936 module_exit(ath9k_exit);
937