xref: /linux/drivers/net/wireless/ath/ath9k/init.c (revision a1087ef6abedf0bfd60e5e3fddf33192cb2c1325)
1 /*
2  * Copyright (c) 2008-2009 Atheros Communications Inc.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #include <linux/slab.h>
18 
19 #include "ath9k.h"
20 
21 static char *dev_info = "ath9k";
22 
23 MODULE_AUTHOR("Atheros Communications");
24 MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
25 MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
26 MODULE_LICENSE("Dual BSD/GPL");
27 
28 static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
29 module_param_named(debug, ath9k_debug, uint, 0);
30 MODULE_PARM_DESC(debug, "Debugging mask");
31 
32 int modparam_nohwcrypt;
33 module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444);
34 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
35 
36 int led_blink;
37 module_param_named(blink, led_blink, int, 0444);
38 MODULE_PARM_DESC(blink, "Enable LED blink on activity");
39 
40 /* We use the hw_value as an index into our private channel structure */
41 
42 #define CHAN2G(_freq, _idx)  { \
43 	.center_freq = (_freq), \
44 	.hw_value = (_idx), \
45 	.max_power = 20, \
46 }
47 
48 #define CHAN5G(_freq, _idx) { \
49 	.band = IEEE80211_BAND_5GHZ, \
50 	.center_freq = (_freq), \
51 	.hw_value = (_idx), \
52 	.max_power = 20, \
53 }
54 
55 /* Some 2 GHz radios are actually tunable on 2312-2732
56  * on 5 MHz steps, we support the channels which we know
57  * we have calibration data for all cards though to make
58  * this static */
59 static const struct ieee80211_channel ath9k_2ghz_chantable[] = {
60 	CHAN2G(2412, 0), /* Channel 1 */
61 	CHAN2G(2417, 1), /* Channel 2 */
62 	CHAN2G(2422, 2), /* Channel 3 */
63 	CHAN2G(2427, 3), /* Channel 4 */
64 	CHAN2G(2432, 4), /* Channel 5 */
65 	CHAN2G(2437, 5), /* Channel 6 */
66 	CHAN2G(2442, 6), /* Channel 7 */
67 	CHAN2G(2447, 7), /* Channel 8 */
68 	CHAN2G(2452, 8), /* Channel 9 */
69 	CHAN2G(2457, 9), /* Channel 10 */
70 	CHAN2G(2462, 10), /* Channel 11 */
71 	CHAN2G(2467, 11), /* Channel 12 */
72 	CHAN2G(2472, 12), /* Channel 13 */
73 	CHAN2G(2484, 13), /* Channel 14 */
74 };
75 
76 /* Some 5 GHz radios are actually tunable on XXXX-YYYY
77  * on 5 MHz steps, we support the channels which we know
78  * we have calibration data for all cards though to make
79  * this static */
80 static const struct ieee80211_channel ath9k_5ghz_chantable[] = {
81 	/* _We_ call this UNII 1 */
82 	CHAN5G(5180, 14), /* Channel 36 */
83 	CHAN5G(5200, 15), /* Channel 40 */
84 	CHAN5G(5220, 16), /* Channel 44 */
85 	CHAN5G(5240, 17), /* Channel 48 */
86 	/* _We_ call this UNII 2 */
87 	CHAN5G(5260, 18), /* Channel 52 */
88 	CHAN5G(5280, 19), /* Channel 56 */
89 	CHAN5G(5300, 20), /* Channel 60 */
90 	CHAN5G(5320, 21), /* Channel 64 */
91 	/* _We_ call this "Middle band" */
92 	CHAN5G(5500, 22), /* Channel 100 */
93 	CHAN5G(5520, 23), /* Channel 104 */
94 	CHAN5G(5540, 24), /* Channel 108 */
95 	CHAN5G(5560, 25), /* Channel 112 */
96 	CHAN5G(5580, 26), /* Channel 116 */
97 	CHAN5G(5600, 27), /* Channel 120 */
98 	CHAN5G(5620, 28), /* Channel 124 */
99 	CHAN5G(5640, 29), /* Channel 128 */
100 	CHAN5G(5660, 30), /* Channel 132 */
101 	CHAN5G(5680, 31), /* Channel 136 */
102 	CHAN5G(5700, 32), /* Channel 140 */
103 	/* _We_ call this UNII 3 */
104 	CHAN5G(5745, 33), /* Channel 149 */
105 	CHAN5G(5765, 34), /* Channel 153 */
106 	CHAN5G(5785, 35), /* Channel 157 */
107 	CHAN5G(5805, 36), /* Channel 161 */
108 	CHAN5G(5825, 37), /* Channel 165 */
109 };
110 
111 /* Atheros hardware rate code addition for short premble */
112 #define SHPCHECK(__hw_rate, __flags) \
113 	((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0)
114 
115 #define RATE(_bitrate, _hw_rate, _flags) {              \
116 	.bitrate        = (_bitrate),                   \
117 	.flags          = (_flags),                     \
118 	.hw_value       = (_hw_rate),                   \
119 	.hw_value_short = (SHPCHECK(_hw_rate, _flags))  \
120 }
121 
122 static struct ieee80211_rate ath9k_legacy_rates[] = {
123 	RATE(10, 0x1b, 0),
124 	RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE),
125 	RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE),
126 	RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE),
127 	RATE(60, 0x0b, 0),
128 	RATE(90, 0x0f, 0),
129 	RATE(120, 0x0a, 0),
130 	RATE(180, 0x0e, 0),
131 	RATE(240, 0x09, 0),
132 	RATE(360, 0x0d, 0),
133 	RATE(480, 0x08, 0),
134 	RATE(540, 0x0c, 0),
135 };
136 
137 static void ath9k_deinit_softc(struct ath_softc *sc);
138 
139 /*
140  * Read and write, they both share the same lock. We do this to serialize
141  * reads and writes on Atheros 802.11n PCI devices only. This is required
142  * as the FIFO on these devices can only accept sanely 2 requests.
143  */
144 
145 static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
146 {
147 	struct ath_hw *ah = (struct ath_hw *) hw_priv;
148 	struct ath_common *common = ath9k_hw_common(ah);
149 	struct ath_softc *sc = (struct ath_softc *) common->priv;
150 
151 	if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
152 		unsigned long flags;
153 		spin_lock_irqsave(&sc->sc_serial_rw, flags);
154 		iowrite32(val, sc->mem + reg_offset);
155 		spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
156 	} else
157 		iowrite32(val, sc->mem + reg_offset);
158 }
159 
160 static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
161 {
162 	struct ath_hw *ah = (struct ath_hw *) hw_priv;
163 	struct ath_common *common = ath9k_hw_common(ah);
164 	struct ath_softc *sc = (struct ath_softc *) common->priv;
165 	u32 val;
166 
167 	if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
168 		unsigned long flags;
169 		spin_lock_irqsave(&sc->sc_serial_rw, flags);
170 		val = ioread32(sc->mem + reg_offset);
171 		spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
172 	} else
173 		val = ioread32(sc->mem + reg_offset);
174 	return val;
175 }
176 
177 static const struct ath_ops ath9k_common_ops = {
178 	.read = ath9k_ioread32,
179 	.write = ath9k_iowrite32,
180 };
181 
182 /**************************/
183 /*     Initialization     */
184 /**************************/
185 
186 static void setup_ht_cap(struct ath_softc *sc,
187 			 struct ieee80211_sta_ht_cap *ht_info)
188 {
189 	struct ath_hw *ah = sc->sc_ah;
190 	struct ath_common *common = ath9k_hw_common(ah);
191 	u8 tx_streams, rx_streams;
192 	int i, max_streams;
193 
194 	ht_info->ht_supported = true;
195 	ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
196 		       IEEE80211_HT_CAP_SM_PS |
197 		       IEEE80211_HT_CAP_SGI_40 |
198 		       IEEE80211_HT_CAP_DSSSCCK40;
199 
200 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_LDPC)
201 		ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
202 
203 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
204 		ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
205 
206 	ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
207 	ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
208 
209 	if (AR_SREV_9300_20_OR_LATER(ah))
210 		max_streams = 3;
211 	else
212 		max_streams = 2;
213 
214 	if (AR_SREV_9280_20_OR_LATER(ah)) {
215 		if (max_streams >= 2)
216 			ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
217 		ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
218 	}
219 
220 	/* set up supported mcs set */
221 	memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
222 	tx_streams = ath9k_cmn_count_streams(common->tx_chainmask, max_streams);
223 	rx_streams = ath9k_cmn_count_streams(common->rx_chainmask, max_streams);
224 
225 	ath_print(common, ATH_DBG_CONFIG,
226 		  "TX streams %d, RX streams: %d\n",
227 		  tx_streams, rx_streams);
228 
229 	if (tx_streams != rx_streams) {
230 		ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
231 		ht_info->mcs.tx_params |= ((tx_streams - 1) <<
232 				IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
233 	}
234 
235 	for (i = 0; i < rx_streams; i++)
236 		ht_info->mcs.rx_mask[i] = 0xff;
237 
238 	ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
239 }
240 
241 static int ath9k_reg_notifier(struct wiphy *wiphy,
242 			      struct regulatory_request *request)
243 {
244 	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
245 	struct ath_wiphy *aphy = hw->priv;
246 	struct ath_softc *sc = aphy->sc;
247 	struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah);
248 
249 	return ath_reg_notifier_apply(wiphy, request, reg);
250 }
251 
252 /*
253  *  This function will allocate both the DMA descriptor structure, and the
254  *  buffers it contains.  These are used to contain the descriptors used
255  *  by the system.
256 */
257 int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
258 		      struct list_head *head, const char *name,
259 		      int nbuf, int ndesc, bool is_tx)
260 {
261 #define	DS2PHYS(_dd, _ds)						\
262 	((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
263 #define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
264 #define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
265 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
266 	u8 *ds;
267 	struct ath_buf *bf;
268 	int i, bsize, error, desc_len;
269 
270 	ath_print(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
271 		  name, nbuf, ndesc);
272 
273 	INIT_LIST_HEAD(head);
274 
275 	if (is_tx)
276 		desc_len = sc->sc_ah->caps.tx_desc_len;
277 	else
278 		desc_len = sizeof(struct ath_desc);
279 
280 	/* ath_desc must be a multiple of DWORDs */
281 	if ((desc_len % 4) != 0) {
282 		ath_print(common, ATH_DBG_FATAL,
283 			  "ath_desc not DWORD aligned\n");
284 		BUG_ON((desc_len % 4) != 0);
285 		error = -ENOMEM;
286 		goto fail;
287 	}
288 
289 	dd->dd_desc_len = desc_len * nbuf * ndesc;
290 
291 	/*
292 	 * Need additional DMA memory because we can't use
293 	 * descriptors that cross the 4K page boundary. Assume
294 	 * one skipped descriptor per 4K page.
295 	 */
296 	if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
297 		u32 ndesc_skipped =
298 			ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
299 		u32 dma_len;
300 
301 		while (ndesc_skipped) {
302 			dma_len = ndesc_skipped * desc_len;
303 			dd->dd_desc_len += dma_len;
304 
305 			ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
306 		}
307 	}
308 
309 	/* allocate descriptors */
310 	dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
311 					 &dd->dd_desc_paddr, GFP_KERNEL);
312 	if (dd->dd_desc == NULL) {
313 		error = -ENOMEM;
314 		goto fail;
315 	}
316 	ds = (u8 *) dd->dd_desc;
317 	ath_print(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
318 		  name, ds, (u32) dd->dd_desc_len,
319 		  ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
320 
321 	/* allocate buffers */
322 	bsize = sizeof(struct ath_buf) * nbuf;
323 	bf = kzalloc(bsize, GFP_KERNEL);
324 	if (bf == NULL) {
325 		error = -ENOMEM;
326 		goto fail2;
327 	}
328 	dd->dd_bufptr = bf;
329 
330 	for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
331 		bf->bf_desc = ds;
332 		bf->bf_daddr = DS2PHYS(dd, ds);
333 
334 		if (!(sc->sc_ah->caps.hw_caps &
335 		      ATH9K_HW_CAP_4KB_SPLITTRANS)) {
336 			/*
337 			 * Skip descriptor addresses which can cause 4KB
338 			 * boundary crossing (addr + length) with a 32 dword
339 			 * descriptor fetch.
340 			 */
341 			while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
342 				BUG_ON((caddr_t) bf->bf_desc >=
343 				       ((caddr_t) dd->dd_desc +
344 					dd->dd_desc_len));
345 
346 				ds += (desc_len * ndesc);
347 				bf->bf_desc = ds;
348 				bf->bf_daddr = DS2PHYS(dd, ds);
349 			}
350 		}
351 		list_add_tail(&bf->list, head);
352 	}
353 	return 0;
354 fail2:
355 	dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
356 			  dd->dd_desc_paddr);
357 fail:
358 	memset(dd, 0, sizeof(*dd));
359 	return error;
360 #undef ATH_DESC_4KB_BOUND_CHECK
361 #undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
362 #undef DS2PHYS
363 }
364 
365 static void ath9k_init_crypto(struct ath_softc *sc)
366 {
367 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
368 	int i = 0;
369 
370 	/* Get the hardware key cache size. */
371 	common->keymax = sc->sc_ah->caps.keycache_size;
372 	if (common->keymax > ATH_KEYMAX) {
373 		ath_print(common, ATH_DBG_ANY,
374 			  "Warning, using only %u entries in %u key cache\n",
375 			  ATH_KEYMAX, common->keymax);
376 		common->keymax = ATH_KEYMAX;
377 	}
378 
379 	/*
380 	 * Reset the key cache since some parts do not
381 	 * reset the contents on initial power up.
382 	 */
383 	for (i = 0; i < common->keymax; i++)
384 		ath_hw_keyreset(common, (u16) i);
385 
386 	/*
387 	 * Check whether the separate key cache entries
388 	 * are required to handle both tx+rx MIC keys.
389 	 * With split mic keys the number of stations is limited
390 	 * to 27 otherwise 59.
391 	 */
392 	if (sc->sc_ah->misc_mode & AR_PCU_MIC_NEW_LOC_ENA)
393 		common->crypt_caps |= ATH_CRYPT_CAP_MIC_COMBINED;
394 }
395 
396 static int ath9k_init_btcoex(struct ath_softc *sc)
397 {
398 	int r, qnum;
399 
400 	switch (sc->sc_ah->btcoex_hw.scheme) {
401 	case ATH_BTCOEX_CFG_NONE:
402 		break;
403 	case ATH_BTCOEX_CFG_2WIRE:
404 		ath9k_hw_btcoex_init_2wire(sc->sc_ah);
405 		break;
406 	case ATH_BTCOEX_CFG_3WIRE:
407 		ath9k_hw_btcoex_init_3wire(sc->sc_ah);
408 		r = ath_init_btcoex_timer(sc);
409 		if (r)
410 			return -1;
411 		qnum = sc->tx.hwq_map[WME_AC_BE];
412 		ath9k_hw_init_btcoex_hw(sc->sc_ah, qnum);
413 		sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
414 		break;
415 	default:
416 		WARN_ON(1);
417 		break;
418 	}
419 
420 	return 0;
421 }
422 
423 static int ath9k_init_queues(struct ath_softc *sc)
424 {
425 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
426 	int i = 0;
427 
428 	for (i = 0; i < ARRAY_SIZE(sc->tx.hwq_map); i++)
429 		sc->tx.hwq_map[i] = -1;
430 
431 	sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah);
432 	if (sc->beacon.beaconq == -1) {
433 		ath_print(common, ATH_DBG_FATAL,
434 			  "Unable to setup a beacon xmit queue\n");
435 		goto err;
436 	}
437 
438 	sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
439 	if (sc->beacon.cabq == NULL) {
440 		ath_print(common, ATH_DBG_FATAL,
441 			  "Unable to setup CAB xmit queue\n");
442 		goto err;
443 	}
444 
445 	sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
446 	ath_cabq_update(sc);
447 
448 	if (!ath_tx_setup(sc, WME_AC_BK)) {
449 		ath_print(common, ATH_DBG_FATAL,
450 			  "Unable to setup xmit queue for BK traffic\n");
451 		goto err;
452 	}
453 
454 	if (!ath_tx_setup(sc, WME_AC_BE)) {
455 		ath_print(common, ATH_DBG_FATAL,
456 			  "Unable to setup xmit queue for BE traffic\n");
457 		goto err;
458 	}
459 	if (!ath_tx_setup(sc, WME_AC_VI)) {
460 		ath_print(common, ATH_DBG_FATAL,
461 			  "Unable to setup xmit queue for VI traffic\n");
462 		goto err;
463 	}
464 	if (!ath_tx_setup(sc, WME_AC_VO)) {
465 		ath_print(common, ATH_DBG_FATAL,
466 			  "Unable to setup xmit queue for VO traffic\n");
467 		goto err;
468 	}
469 
470 	return 0;
471 
472 err:
473 	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
474 		if (ATH_TXQ_SETUP(sc, i))
475 			ath_tx_cleanupq(sc, &sc->tx.txq[i]);
476 
477 	return -EIO;
478 }
479 
480 static int ath9k_init_channels_rates(struct ath_softc *sc)
481 {
482 	void *channels;
483 
484 	BUILD_BUG_ON(ARRAY_SIZE(ath9k_2ghz_chantable) +
485 		     ARRAY_SIZE(ath9k_5ghz_chantable) !=
486 		     ATH9K_NUM_CHANNELS);
487 
488 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) {
489 		channels = kmemdup(ath9k_2ghz_chantable,
490 			sizeof(ath9k_2ghz_chantable), GFP_KERNEL);
491 		if (!channels)
492 		    return -ENOMEM;
493 
494 		sc->sbands[IEEE80211_BAND_2GHZ].channels = channels;
495 		sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
496 		sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
497 			ARRAY_SIZE(ath9k_2ghz_chantable);
498 		sc->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
499 		sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
500 			ARRAY_SIZE(ath9k_legacy_rates);
501 	}
502 
503 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) {
504 		channels = kmemdup(ath9k_5ghz_chantable,
505 			sizeof(ath9k_5ghz_chantable), GFP_KERNEL);
506 		if (!channels) {
507 			if (sc->sbands[IEEE80211_BAND_2GHZ].channels)
508 				kfree(sc->sbands[IEEE80211_BAND_2GHZ].channels);
509 			return -ENOMEM;
510 		}
511 
512 		sc->sbands[IEEE80211_BAND_5GHZ].channels = channels;
513 		sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
514 		sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
515 			ARRAY_SIZE(ath9k_5ghz_chantable);
516 		sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
517 			ath9k_legacy_rates + 4;
518 		sc->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
519 			ARRAY_SIZE(ath9k_legacy_rates) - 4;
520 	}
521 	return 0;
522 }
523 
524 static void ath9k_init_misc(struct ath_softc *sc)
525 {
526 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
527 	int i = 0;
528 
529 	setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
530 
531 	sc->config.txpowlimit = ATH_TXPOWER_MAX;
532 
533 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
534 		sc->sc_flags |= SC_OP_TXAGGR;
535 		sc->sc_flags |= SC_OP_RXAGGR;
536 	}
537 
538 	common->tx_chainmask = sc->sc_ah->caps.tx_chainmask;
539 	common->rx_chainmask = sc->sc_ah->caps.rx_chainmask;
540 
541 	ath9k_hw_set_diversity(sc->sc_ah, true);
542 	sc->rx.defant = ath9k_hw_getdefantenna(sc->sc_ah);
543 
544 	memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
545 
546 	sc->beacon.slottime = ATH9K_SLOT_TIME_9;
547 
548 	for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
549 		sc->beacon.bslot[i] = NULL;
550 		sc->beacon.bslot_aphy[i] = NULL;
551 	}
552 
553 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
554 		sc->ant_comb.count = ATH_ANT_DIV_COMB_INIT_COUNT;
555 }
556 
557 static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
558 			    const struct ath_bus_ops *bus_ops)
559 {
560 	struct ath_hw *ah = NULL;
561 	struct ath_common *common;
562 	int ret = 0, i;
563 	int csz = 0;
564 
565 	ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
566 	if (!ah)
567 		return -ENOMEM;
568 
569 	ah->hw_version.devid = devid;
570 	ah->hw_version.subsysid = subsysid;
571 	sc->sc_ah = ah;
572 
573 	common = ath9k_hw_common(ah);
574 	common->ops = &ath9k_common_ops;
575 	common->bus_ops = bus_ops;
576 	common->ah = ah;
577 	common->hw = sc->hw;
578 	common->priv = sc;
579 	common->debug_mask = ath9k_debug;
580 	spin_lock_init(&common->cc_lock);
581 
582 	spin_lock_init(&sc->wiphy_lock);
583 	spin_lock_init(&sc->sc_resetlock);
584 	spin_lock_init(&sc->sc_serial_rw);
585 	spin_lock_init(&sc->sc_pm_lock);
586 	mutex_init(&sc->mutex);
587 	tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
588 	tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
589 		     (unsigned long)sc);
590 
591 	/*
592 	 * Cache line size is used to size and align various
593 	 * structures used to communicate with the hardware.
594 	 */
595 	ath_read_cachesize(common, &csz);
596 	common->cachelsz = csz << 2; /* convert to bytes */
597 
598 	/* Initializes the hardware for all supported chipsets */
599 	ret = ath9k_hw_init(ah);
600 	if (ret)
601 		goto err_hw;
602 
603 	ret = ath9k_init_debug(ah);
604 	if (ret) {
605 		ath_print(common, ATH_DBG_FATAL,
606 			  "Unable to create debugfs files\n");
607 		goto err_debug;
608 	}
609 
610 	ret = ath9k_init_queues(sc);
611 	if (ret)
612 		goto err_queues;
613 
614 	ret =  ath9k_init_btcoex(sc);
615 	if (ret)
616 		goto err_btcoex;
617 
618 	ret = ath9k_init_channels_rates(sc);
619 	if (ret)
620 		goto err_btcoex;
621 
622 	ath9k_init_crypto(sc);
623 	ath9k_init_misc(sc);
624 
625 	return 0;
626 
627 err_btcoex:
628 	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
629 		if (ATH_TXQ_SETUP(sc, i))
630 			ath_tx_cleanupq(sc, &sc->tx.txq[i]);
631 err_queues:
632 	ath9k_exit_debug(ah);
633 err_debug:
634 	ath9k_hw_deinit(ah);
635 err_hw:
636 	tasklet_kill(&sc->intr_tq);
637 	tasklet_kill(&sc->bcon_tasklet);
638 
639 	kfree(ah);
640 	sc->sc_ah = NULL;
641 
642 	return ret;
643 }
644 
645 void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
646 {
647 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
648 
649 	hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
650 		IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
651 		IEEE80211_HW_SIGNAL_DBM |
652 		IEEE80211_HW_SUPPORTS_PS |
653 		IEEE80211_HW_PS_NULLFUNC_STACK |
654 		IEEE80211_HW_SPECTRUM_MGMT |
655 		IEEE80211_HW_REPORTS_TX_ACK_STATUS;
656 
657 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
658 		 hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
659 
660 	if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || modparam_nohwcrypt)
661 		hw->flags |= IEEE80211_HW_MFP_CAPABLE;
662 
663 	hw->wiphy->interface_modes =
664 		BIT(NL80211_IFTYPE_AP) |
665 		BIT(NL80211_IFTYPE_WDS) |
666 		BIT(NL80211_IFTYPE_STATION) |
667 		BIT(NL80211_IFTYPE_ADHOC) |
668 		BIT(NL80211_IFTYPE_MESH_POINT);
669 
670 	if (AR_SREV_5416(sc->sc_ah))
671 		hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
672 
673 	hw->queues = 4;
674 	hw->max_rates = 4;
675 	hw->channel_change_time = 5000;
676 	hw->max_listen_interval = 10;
677 	hw->max_rate_tries = 10;
678 	hw->sta_data_size = sizeof(struct ath_node);
679 	hw->vif_data_size = sizeof(struct ath_vif);
680 
681 #ifdef CONFIG_ATH9K_RATE_CONTROL
682 	hw->rate_control_algorithm = "ath9k_rate_control";
683 #endif
684 
685 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
686 		hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
687 			&sc->sbands[IEEE80211_BAND_2GHZ];
688 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
689 		hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
690 			&sc->sbands[IEEE80211_BAND_5GHZ];
691 
692 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
693 		if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
694 			setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
695 		if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
696 			setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
697 	}
698 
699 	SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
700 }
701 
702 int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
703 		    const struct ath_bus_ops *bus_ops)
704 {
705 	struct ieee80211_hw *hw = sc->hw;
706 	struct ath_common *common;
707 	struct ath_hw *ah;
708 	int error = 0;
709 	struct ath_regulatory *reg;
710 
711 	/* Bring up device */
712 	error = ath9k_init_softc(devid, sc, subsysid, bus_ops);
713 	if (error != 0)
714 		goto error_init;
715 
716 	ah = sc->sc_ah;
717 	common = ath9k_hw_common(ah);
718 	ath9k_set_hw_capab(sc, hw);
719 
720 	/* Initialize regulatory */
721 	error = ath_regd_init(&common->regulatory, sc->hw->wiphy,
722 			      ath9k_reg_notifier);
723 	if (error)
724 		goto error_regd;
725 
726 	reg = &common->regulatory;
727 
728 	/* Setup TX DMA */
729 	error = ath_tx_init(sc, ATH_TXBUF);
730 	if (error != 0)
731 		goto error_tx;
732 
733 	/* Setup RX DMA */
734 	error = ath_rx_init(sc, ATH_RXBUF);
735 	if (error != 0)
736 		goto error_rx;
737 
738 	/* Register with mac80211 */
739 	error = ieee80211_register_hw(hw);
740 	if (error)
741 		goto error_register;
742 
743 	/* Handle world regulatory */
744 	if (!ath_is_world_regd(reg)) {
745 		error = regulatory_hint(hw->wiphy, reg->alpha2);
746 		if (error)
747 			goto error_world;
748 	}
749 
750 	INIT_WORK(&sc->hw_check_work, ath_hw_check);
751 	INIT_WORK(&sc->paprd_work, ath_paprd_calibrate);
752 	INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work);
753 	INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work);
754 	sc->wiphy_scheduler_int = msecs_to_jiffies(500);
755 
756 	ath_init_leds(sc);
757 	ath_start_rfkill_poll(sc);
758 
759 	return 0;
760 
761 error_world:
762 	ieee80211_unregister_hw(hw);
763 error_register:
764 	ath_rx_cleanup(sc);
765 error_rx:
766 	ath_tx_cleanup(sc);
767 error_tx:
768 	/* Nothing */
769 error_regd:
770 	ath9k_deinit_softc(sc);
771 error_init:
772 	return error;
773 }
774 
775 /*****************************/
776 /*     De-Initialization     */
777 /*****************************/
778 
779 static void ath9k_deinit_softc(struct ath_softc *sc)
780 {
781 	int i = 0;
782 
783 	if (sc->sbands[IEEE80211_BAND_2GHZ].channels)
784 		kfree(sc->sbands[IEEE80211_BAND_2GHZ].channels);
785 
786 	if (sc->sbands[IEEE80211_BAND_5GHZ].channels)
787 		kfree(sc->sbands[IEEE80211_BAND_5GHZ].channels);
788 
789         if ((sc->btcoex.no_stomp_timer) &&
790 	    sc->sc_ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
791 		ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer);
792 
793 	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
794 		if (ATH_TXQ_SETUP(sc, i))
795 			ath_tx_cleanupq(sc, &sc->tx.txq[i]);
796 
797 	ath9k_exit_debug(sc->sc_ah);
798 	ath9k_hw_deinit(sc->sc_ah);
799 
800 	tasklet_kill(&sc->intr_tq);
801 	tasklet_kill(&sc->bcon_tasklet);
802 
803 	kfree(sc->sc_ah);
804 	sc->sc_ah = NULL;
805 }
806 
807 void ath9k_deinit_device(struct ath_softc *sc)
808 {
809 	struct ieee80211_hw *hw = sc->hw;
810 	int i = 0;
811 
812 	ath9k_ps_wakeup(sc);
813 
814 	wiphy_rfkill_stop_polling(sc->hw->wiphy);
815 	ath_deinit_leds(sc);
816 
817 	for (i = 0; i < sc->num_sec_wiphy; i++) {
818 		struct ath_wiphy *aphy = sc->sec_wiphy[i];
819 		if (aphy == NULL)
820 			continue;
821 		sc->sec_wiphy[i] = NULL;
822 		ieee80211_unregister_hw(aphy->hw);
823 		ieee80211_free_hw(aphy->hw);
824 	}
825 
826 	ieee80211_unregister_hw(hw);
827 	ath_rx_cleanup(sc);
828 	ath_tx_cleanup(sc);
829 	ath9k_deinit_softc(sc);
830 	kfree(sc->sec_wiphy);
831 }
832 
833 void ath_descdma_cleanup(struct ath_softc *sc,
834 			 struct ath_descdma *dd,
835 			 struct list_head *head)
836 {
837 	dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
838 			  dd->dd_desc_paddr);
839 
840 	INIT_LIST_HEAD(head);
841 	kfree(dd->dd_bufptr);
842 	memset(dd, 0, sizeof(*dd));
843 }
844 
845 /************************/
846 /*     Module Hooks     */
847 /************************/
848 
849 static int __init ath9k_init(void)
850 {
851 	int error;
852 
853 	/* Register rate control algorithm */
854 	error = ath_rate_control_register();
855 	if (error != 0) {
856 		printk(KERN_ERR
857 			"ath9k: Unable to register rate control "
858 			"algorithm: %d\n",
859 			error);
860 		goto err_out;
861 	}
862 
863 	error = ath9k_debug_create_root();
864 	if (error) {
865 		printk(KERN_ERR
866 			"ath9k: Unable to create debugfs root: %d\n",
867 			error);
868 		goto err_rate_unregister;
869 	}
870 
871 	error = ath_pci_init();
872 	if (error < 0) {
873 		printk(KERN_ERR
874 			"ath9k: No PCI devices found, driver not installed.\n");
875 		error = -ENODEV;
876 		goto err_remove_root;
877 	}
878 
879 	error = ath_ahb_init();
880 	if (error < 0) {
881 		error = -ENODEV;
882 		goto err_pci_exit;
883 	}
884 
885 	return 0;
886 
887  err_pci_exit:
888 	ath_pci_exit();
889 
890  err_remove_root:
891 	ath9k_debug_remove_root();
892  err_rate_unregister:
893 	ath_rate_control_unregister();
894  err_out:
895 	return error;
896 }
897 module_init(ath9k_init);
898 
899 static void __exit ath9k_exit(void)
900 {
901 	ath_ahb_exit();
902 	ath_pci_exit();
903 	ath9k_debug_remove_root();
904 	ath_rate_control_unregister();
905 	printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
906 }
907 module_exit(ath9k_exit);
908