xref: /linux/drivers/net/wireless/ath/ath9k/init.c (revision c145211d1f9e2ef19e7b4c2b943f68366daa97af)
1 /*
2  * Copyright (c) 2008-2009 Atheros Communications Inc.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #include <linux/slab.h>
18 
19 #include "ath9k.h"
20 
21 static char *dev_info = "ath9k";
22 
23 MODULE_AUTHOR("Atheros Communications");
24 MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
25 MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
26 MODULE_LICENSE("Dual BSD/GPL");
27 
28 static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
29 module_param_named(debug, ath9k_debug, uint, 0);
30 MODULE_PARM_DESC(debug, "Debugging mask");
31 
32 int modparam_nohwcrypt;
33 module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444);
34 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
35 
36 /* We use the hw_value as an index into our private channel structure */
37 
38 #define CHAN2G(_freq, _idx)  { \
39 	.center_freq = (_freq), \
40 	.hw_value = (_idx), \
41 	.max_power = 20, \
42 }
43 
44 #define CHAN5G(_freq, _idx) { \
45 	.band = IEEE80211_BAND_5GHZ, \
46 	.center_freq = (_freq), \
47 	.hw_value = (_idx), \
48 	.max_power = 20, \
49 }
50 
51 /* Some 2 GHz radios are actually tunable on 2312-2732
52  * on 5 MHz steps, we support the channels which we know
53  * we have calibration data for all cards though to make
54  * this static */
55 static struct ieee80211_channel ath9k_2ghz_chantable[] = {
56 	CHAN2G(2412, 0), /* Channel 1 */
57 	CHAN2G(2417, 1), /* Channel 2 */
58 	CHAN2G(2422, 2), /* Channel 3 */
59 	CHAN2G(2427, 3), /* Channel 4 */
60 	CHAN2G(2432, 4), /* Channel 5 */
61 	CHAN2G(2437, 5), /* Channel 6 */
62 	CHAN2G(2442, 6), /* Channel 7 */
63 	CHAN2G(2447, 7), /* Channel 8 */
64 	CHAN2G(2452, 8), /* Channel 9 */
65 	CHAN2G(2457, 9), /* Channel 10 */
66 	CHAN2G(2462, 10), /* Channel 11 */
67 	CHAN2G(2467, 11), /* Channel 12 */
68 	CHAN2G(2472, 12), /* Channel 13 */
69 	CHAN2G(2484, 13), /* Channel 14 */
70 };
71 
72 /* Some 5 GHz radios are actually tunable on XXXX-YYYY
73  * on 5 MHz steps, we support the channels which we know
74  * we have calibration data for all cards though to make
75  * this static */
76 static struct ieee80211_channel ath9k_5ghz_chantable[] = {
77 	/* _We_ call this UNII 1 */
78 	CHAN5G(5180, 14), /* Channel 36 */
79 	CHAN5G(5200, 15), /* Channel 40 */
80 	CHAN5G(5220, 16), /* Channel 44 */
81 	CHAN5G(5240, 17), /* Channel 48 */
82 	/* _We_ call this UNII 2 */
83 	CHAN5G(5260, 18), /* Channel 52 */
84 	CHAN5G(5280, 19), /* Channel 56 */
85 	CHAN5G(5300, 20), /* Channel 60 */
86 	CHAN5G(5320, 21), /* Channel 64 */
87 	/* _We_ call this "Middle band" */
88 	CHAN5G(5500, 22), /* Channel 100 */
89 	CHAN5G(5520, 23), /* Channel 104 */
90 	CHAN5G(5540, 24), /* Channel 108 */
91 	CHAN5G(5560, 25), /* Channel 112 */
92 	CHAN5G(5580, 26), /* Channel 116 */
93 	CHAN5G(5600, 27), /* Channel 120 */
94 	CHAN5G(5620, 28), /* Channel 124 */
95 	CHAN5G(5640, 29), /* Channel 128 */
96 	CHAN5G(5660, 30), /* Channel 132 */
97 	CHAN5G(5680, 31), /* Channel 136 */
98 	CHAN5G(5700, 32), /* Channel 140 */
99 	/* _We_ call this UNII 3 */
100 	CHAN5G(5745, 33), /* Channel 149 */
101 	CHAN5G(5765, 34), /* Channel 153 */
102 	CHAN5G(5785, 35), /* Channel 157 */
103 	CHAN5G(5805, 36), /* Channel 161 */
104 	CHAN5G(5825, 37), /* Channel 165 */
105 };
106 
107 /* Atheros hardware rate code addition for short premble */
108 #define SHPCHECK(__hw_rate, __flags) \
109 	((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0)
110 
111 #define RATE(_bitrate, _hw_rate, _flags) {              \
112 	.bitrate        = (_bitrate),                   \
113 	.flags          = (_flags),                     \
114 	.hw_value       = (_hw_rate),                   \
115 	.hw_value_short = (SHPCHECK(_hw_rate, _flags))  \
116 }
117 
118 static struct ieee80211_rate ath9k_legacy_rates[] = {
119 	RATE(10, 0x1b, 0),
120 	RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE),
121 	RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE),
122 	RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE),
123 	RATE(60, 0x0b, 0),
124 	RATE(90, 0x0f, 0),
125 	RATE(120, 0x0a, 0),
126 	RATE(180, 0x0e, 0),
127 	RATE(240, 0x09, 0),
128 	RATE(360, 0x0d, 0),
129 	RATE(480, 0x08, 0),
130 	RATE(540, 0x0c, 0),
131 };
132 
133 static void ath9k_deinit_softc(struct ath_softc *sc);
134 
135 /*
136  * Read and write, they both share the same lock. We do this to serialize
137  * reads and writes on Atheros 802.11n PCI devices only. This is required
138  * as the FIFO on these devices can only accept sanely 2 requests.
139  */
140 
141 static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
142 {
143 	struct ath_hw *ah = (struct ath_hw *) hw_priv;
144 	struct ath_common *common = ath9k_hw_common(ah);
145 	struct ath_softc *sc = (struct ath_softc *) common->priv;
146 
147 	if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
148 		unsigned long flags;
149 		spin_lock_irqsave(&sc->sc_serial_rw, flags);
150 		iowrite32(val, sc->mem + reg_offset);
151 		spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
152 	} else
153 		iowrite32(val, sc->mem + reg_offset);
154 }
155 
156 static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
157 {
158 	struct ath_hw *ah = (struct ath_hw *) hw_priv;
159 	struct ath_common *common = ath9k_hw_common(ah);
160 	struct ath_softc *sc = (struct ath_softc *) common->priv;
161 	u32 val;
162 
163 	if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
164 		unsigned long flags;
165 		spin_lock_irqsave(&sc->sc_serial_rw, flags);
166 		val = ioread32(sc->mem + reg_offset);
167 		spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
168 	} else
169 		val = ioread32(sc->mem + reg_offset);
170 	return val;
171 }
172 
173 static const struct ath_ops ath9k_common_ops = {
174 	.read = ath9k_ioread32,
175 	.write = ath9k_iowrite32,
176 };
177 
178 /**************************/
179 /*     Initialization     */
180 /**************************/
181 
182 static void setup_ht_cap(struct ath_softc *sc,
183 			 struct ieee80211_sta_ht_cap *ht_info)
184 {
185 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
186 	u8 tx_streams, rx_streams;
187 
188 	ht_info->ht_supported = true;
189 	ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
190 		       IEEE80211_HT_CAP_SM_PS |
191 		       IEEE80211_HT_CAP_SGI_40 |
192 		       IEEE80211_HT_CAP_DSSSCCK40;
193 
194 	ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
195 	ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
196 
197 	/* set up supported mcs set */
198 	memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
199 	tx_streams = !(common->tx_chainmask & (common->tx_chainmask - 1)) ?
200 		     1 : 2;
201 	rx_streams = !(common->rx_chainmask & (common->rx_chainmask - 1)) ?
202 		     1 : 2;
203 
204 	if (tx_streams != rx_streams) {
205 		ath_print(common, ATH_DBG_CONFIG,
206 			  "TX streams %d, RX streams: %d\n",
207 			  tx_streams, rx_streams);
208 		ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
209 		ht_info->mcs.tx_params |= ((tx_streams - 1) <<
210 				IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
211 	}
212 
213 	ht_info->mcs.rx_mask[0] = 0xff;
214 	if (rx_streams >= 2)
215 		ht_info->mcs.rx_mask[1] = 0xff;
216 
217 	ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
218 }
219 
220 static int ath9k_reg_notifier(struct wiphy *wiphy,
221 			      struct regulatory_request *request)
222 {
223 	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
224 	struct ath_wiphy *aphy = hw->priv;
225 	struct ath_softc *sc = aphy->sc;
226 	struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah);
227 
228 	return ath_reg_notifier_apply(wiphy, request, reg);
229 }
230 
231 /*
232  *  This function will allocate both the DMA descriptor structure, and the
233  *  buffers it contains.  These are used to contain the descriptors used
234  *  by the system.
235 */
236 int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
237 		      struct list_head *head, const char *name,
238 		      int nbuf, int ndesc)
239 {
240 #define	DS2PHYS(_dd, _ds)						\
241 	((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
242 #define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
243 #define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
244 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
245 	struct ath_desc *ds;
246 	struct ath_buf *bf;
247 	int i, bsize, error;
248 
249 	ath_print(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
250 		  name, nbuf, ndesc);
251 
252 	INIT_LIST_HEAD(head);
253 	/* ath_desc must be a multiple of DWORDs */
254 	if ((sizeof(struct ath_desc) % 4) != 0) {
255 		ath_print(common, ATH_DBG_FATAL,
256 			  "ath_desc not DWORD aligned\n");
257 		BUG_ON((sizeof(struct ath_desc) % 4) != 0);
258 		error = -ENOMEM;
259 		goto fail;
260 	}
261 
262 	dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc;
263 
264 	/*
265 	 * Need additional DMA memory because we can't use
266 	 * descriptors that cross the 4K page boundary. Assume
267 	 * one skipped descriptor per 4K page.
268 	 */
269 	if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
270 		u32 ndesc_skipped =
271 			ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
272 		u32 dma_len;
273 
274 		while (ndesc_skipped) {
275 			dma_len = ndesc_skipped * sizeof(struct ath_desc);
276 			dd->dd_desc_len += dma_len;
277 
278 			ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
279 		};
280 	}
281 
282 	/* allocate descriptors */
283 	dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
284 					 &dd->dd_desc_paddr, GFP_KERNEL);
285 	if (dd->dd_desc == NULL) {
286 		error = -ENOMEM;
287 		goto fail;
288 	}
289 	ds = dd->dd_desc;
290 	ath_print(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
291 		  name, ds, (u32) dd->dd_desc_len,
292 		  ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
293 
294 	/* allocate buffers */
295 	bsize = sizeof(struct ath_buf) * nbuf;
296 	bf = kzalloc(bsize, GFP_KERNEL);
297 	if (bf == NULL) {
298 		error = -ENOMEM;
299 		goto fail2;
300 	}
301 	dd->dd_bufptr = bf;
302 
303 	for (i = 0; i < nbuf; i++, bf++, ds += ndesc) {
304 		bf->bf_desc = ds;
305 		bf->bf_daddr = DS2PHYS(dd, ds);
306 
307 		if (!(sc->sc_ah->caps.hw_caps &
308 		      ATH9K_HW_CAP_4KB_SPLITTRANS)) {
309 			/*
310 			 * Skip descriptor addresses which can cause 4KB
311 			 * boundary crossing (addr + length) with a 32 dword
312 			 * descriptor fetch.
313 			 */
314 			while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
315 				BUG_ON((caddr_t) bf->bf_desc >=
316 				       ((caddr_t) dd->dd_desc +
317 					dd->dd_desc_len));
318 
319 				ds += ndesc;
320 				bf->bf_desc = ds;
321 				bf->bf_daddr = DS2PHYS(dd, ds);
322 			}
323 		}
324 		list_add_tail(&bf->list, head);
325 	}
326 	return 0;
327 fail2:
328 	dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
329 			  dd->dd_desc_paddr);
330 fail:
331 	memset(dd, 0, sizeof(*dd));
332 	return error;
333 #undef ATH_DESC_4KB_BOUND_CHECK
334 #undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
335 #undef DS2PHYS
336 }
337 
338 static void ath9k_init_crypto(struct ath_softc *sc)
339 {
340 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
341 	int i = 0;
342 
343 	/* Get the hardware key cache size. */
344 	common->keymax = sc->sc_ah->caps.keycache_size;
345 	if (common->keymax > ATH_KEYMAX) {
346 		ath_print(common, ATH_DBG_ANY,
347 			  "Warning, using only %u entries in %u key cache\n",
348 			  ATH_KEYMAX, common->keymax);
349 		common->keymax = ATH_KEYMAX;
350 	}
351 
352 	/*
353 	 * Reset the key cache since some parts do not
354 	 * reset the contents on initial power up.
355 	 */
356 	for (i = 0; i < common->keymax; i++)
357 		ath9k_hw_keyreset(sc->sc_ah, (u16) i);
358 
359 	if (ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_CIPHER,
360 				   ATH9K_CIPHER_TKIP, NULL)) {
361 		/*
362 		 * Whether we should enable h/w TKIP MIC.
363 		 * XXX: if we don't support WME TKIP MIC, then we wouldn't
364 		 * report WMM capable, so it's always safe to turn on
365 		 * TKIP MIC in this case.
366 		 */
367 		ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC, 0, 1, NULL);
368 	}
369 
370 	/*
371 	 * Check whether the separate key cache entries
372 	 * are required to handle both tx+rx MIC keys.
373 	 * With split mic keys the number of stations is limited
374 	 * to 27 otherwise 59.
375 	 */
376 	if (ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_CIPHER,
377 				   ATH9K_CIPHER_TKIP, NULL)
378 	    && ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_CIPHER,
379 				      ATH9K_CIPHER_MIC, NULL)
380 	    && ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_TKIP_SPLIT,
381 				      0, NULL))
382 		common->splitmic = 1;
383 
384 	/* turn on mcast key search if possible */
385 	if (!ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
386 		(void)ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_MCAST_KEYSRCH,
387 					     1, 1, NULL);
388 
389 }
390 
391 static int ath9k_init_btcoex(struct ath_softc *sc)
392 {
393 	int r, qnum;
394 
395 	switch (sc->sc_ah->btcoex_hw.scheme) {
396 	case ATH_BTCOEX_CFG_NONE:
397 		break;
398 	case ATH_BTCOEX_CFG_2WIRE:
399 		ath9k_hw_btcoex_init_2wire(sc->sc_ah);
400 		break;
401 	case ATH_BTCOEX_CFG_3WIRE:
402 		ath9k_hw_btcoex_init_3wire(sc->sc_ah);
403 		r = ath_init_btcoex_timer(sc);
404 		if (r)
405 			return -1;
406 		qnum = ath_tx_get_qnum(sc, ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
407 		ath9k_hw_init_btcoex_hw(sc->sc_ah, qnum);
408 		sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
409 		break;
410 	default:
411 		WARN_ON(1);
412 		break;
413 	}
414 
415 	return 0;
416 }
417 
418 static int ath9k_init_queues(struct ath_softc *sc)
419 {
420 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
421 	int i = 0;
422 
423 	for (i = 0; i < ARRAY_SIZE(sc->tx.hwq_map); i++)
424 		sc->tx.hwq_map[i] = -1;
425 
426 	sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah);
427 	if (sc->beacon.beaconq == -1) {
428 		ath_print(common, ATH_DBG_FATAL,
429 			  "Unable to setup a beacon xmit queue\n");
430 		goto err;
431 	}
432 
433 	sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
434 	if (sc->beacon.cabq == NULL) {
435 		ath_print(common, ATH_DBG_FATAL,
436 			  "Unable to setup CAB xmit queue\n");
437 		goto err;
438 	}
439 
440 	sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
441 	ath_cabq_update(sc);
442 
443 	if (!ath_tx_setup(sc, ATH9K_WME_AC_BK)) {
444 		ath_print(common, ATH_DBG_FATAL,
445 			  "Unable to setup xmit queue for BK traffic\n");
446 		goto err;
447 	}
448 
449 	if (!ath_tx_setup(sc, ATH9K_WME_AC_BE)) {
450 		ath_print(common, ATH_DBG_FATAL,
451 			  "Unable to setup xmit queue for BE traffic\n");
452 		goto err;
453 	}
454 	if (!ath_tx_setup(sc, ATH9K_WME_AC_VI)) {
455 		ath_print(common, ATH_DBG_FATAL,
456 			  "Unable to setup xmit queue for VI traffic\n");
457 		goto err;
458 	}
459 	if (!ath_tx_setup(sc, ATH9K_WME_AC_VO)) {
460 		ath_print(common, ATH_DBG_FATAL,
461 			  "Unable to setup xmit queue for VO traffic\n");
462 		goto err;
463 	}
464 
465 	return 0;
466 
467 err:
468 	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
469 		if (ATH_TXQ_SETUP(sc, i))
470 			ath_tx_cleanupq(sc, &sc->tx.txq[i]);
471 
472 	return -EIO;
473 }
474 
475 static void ath9k_init_channels_rates(struct ath_softc *sc)
476 {
477 	if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes)) {
478 		sc->sbands[IEEE80211_BAND_2GHZ].channels = ath9k_2ghz_chantable;
479 		sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
480 		sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
481 			ARRAY_SIZE(ath9k_2ghz_chantable);
482 		sc->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
483 		sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
484 			ARRAY_SIZE(ath9k_legacy_rates);
485 	}
486 
487 	if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes)) {
488 		sc->sbands[IEEE80211_BAND_5GHZ].channels = ath9k_5ghz_chantable;
489 		sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
490 		sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
491 			ARRAY_SIZE(ath9k_5ghz_chantable);
492 		sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
493 			ath9k_legacy_rates + 4;
494 		sc->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
495 			ARRAY_SIZE(ath9k_legacy_rates) - 4;
496 	}
497 }
498 
499 static void ath9k_init_misc(struct ath_softc *sc)
500 {
501 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
502 	int i = 0;
503 
504 	common->ani.noise_floor = ATH_DEFAULT_NOISE_FLOOR;
505 	setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
506 
507 	sc->config.txpowlimit = ATH_TXPOWER_MAX;
508 
509 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
510 		sc->sc_flags |= SC_OP_TXAGGR;
511 		sc->sc_flags |= SC_OP_RXAGGR;
512 	}
513 
514 	common->tx_chainmask = sc->sc_ah->caps.tx_chainmask;
515 	common->rx_chainmask = sc->sc_ah->caps.rx_chainmask;
516 
517 	ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_DIVERSITY, 1, true, NULL);
518 	sc->rx.defant = ath9k_hw_getdefantenna(sc->sc_ah);
519 
520 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
521 		memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
522 
523 	sc->beacon.slottime = ATH9K_SLOT_TIME_9;
524 
525 	for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
526 		sc->beacon.bslot[i] = NULL;
527 		sc->beacon.bslot_aphy[i] = NULL;
528 	}
529 }
530 
531 static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
532 			    const struct ath_bus_ops *bus_ops)
533 {
534 	struct ath_hw *ah = NULL;
535 	struct ath_common *common;
536 	int ret = 0, i;
537 	int csz = 0;
538 
539 	ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
540 	if (!ah)
541 		return -ENOMEM;
542 
543 	ah->hw_version.devid = devid;
544 	ah->hw_version.subsysid = subsysid;
545 	sc->sc_ah = ah;
546 
547 	common = ath9k_hw_common(ah);
548 	common->ops = &ath9k_common_ops;
549 	common->bus_ops = bus_ops;
550 	common->ah = ah;
551 	common->hw = sc->hw;
552 	common->priv = sc;
553 	common->debug_mask = ath9k_debug;
554 
555 	spin_lock_init(&sc->wiphy_lock);
556 	spin_lock_init(&sc->sc_resetlock);
557 	spin_lock_init(&sc->sc_serial_rw);
558 	spin_lock_init(&sc->sc_pm_lock);
559 	mutex_init(&sc->mutex);
560 	tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
561 	tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
562 		     (unsigned long)sc);
563 
564 	/*
565 	 * Cache line size is used to size and align various
566 	 * structures used to communicate with the hardware.
567 	 */
568 	ath_read_cachesize(common, &csz);
569 	common->cachelsz = csz << 2; /* convert to bytes */
570 
571 	ret = ath9k_hw_init(ah);
572 	if (ret) {
573 		ath_print(common, ATH_DBG_FATAL,
574 			  "Unable to initialize hardware; "
575 			  "initialization status: %d\n", ret);
576 		goto err_hw;
577 	}
578 
579 	ret = ath9k_init_debug(ah);
580 	if (ret) {
581 		ath_print(common, ATH_DBG_FATAL,
582 			  "Unable to create debugfs files\n");
583 		goto err_debug;
584 	}
585 
586 	ret = ath9k_init_queues(sc);
587 	if (ret)
588 		goto err_queues;
589 
590 	ret =  ath9k_init_btcoex(sc);
591 	if (ret)
592 		goto err_btcoex;
593 
594 	ath9k_init_crypto(sc);
595 	ath9k_init_channels_rates(sc);
596 	ath9k_init_misc(sc);
597 
598 	return 0;
599 
600 err_btcoex:
601 	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
602 		if (ATH_TXQ_SETUP(sc, i))
603 			ath_tx_cleanupq(sc, &sc->tx.txq[i]);
604 err_queues:
605 	ath9k_exit_debug(ah);
606 err_debug:
607 	ath9k_hw_deinit(ah);
608 err_hw:
609 	tasklet_kill(&sc->intr_tq);
610 	tasklet_kill(&sc->bcon_tasklet);
611 
612 	kfree(ah);
613 	sc->sc_ah = NULL;
614 
615 	return ret;
616 }
617 
618 void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
619 {
620 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
621 
622 	hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
623 		IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
624 		IEEE80211_HW_SIGNAL_DBM |
625 		IEEE80211_HW_SUPPORTS_PS |
626 		IEEE80211_HW_PS_NULLFUNC_STACK |
627 		IEEE80211_HW_SPECTRUM_MGMT |
628 		IEEE80211_HW_REPORTS_TX_ACK_STATUS;
629 
630 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
631 		 hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
632 
633 	if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || modparam_nohwcrypt)
634 		hw->flags |= IEEE80211_HW_MFP_CAPABLE;
635 
636 	hw->wiphy->interface_modes =
637 		BIT(NL80211_IFTYPE_AP) |
638 		BIT(NL80211_IFTYPE_STATION) |
639 		BIT(NL80211_IFTYPE_ADHOC) |
640 		BIT(NL80211_IFTYPE_MESH_POINT);
641 
642 	hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
643 
644 	hw->queues = 4;
645 	hw->max_rates = 4;
646 	hw->channel_change_time = 5000;
647 	hw->max_listen_interval = 10;
648 	hw->max_rate_tries = 10;
649 	hw->sta_data_size = sizeof(struct ath_node);
650 	hw->vif_data_size = sizeof(struct ath_vif);
651 
652 	hw->rate_control_algorithm = "ath9k_rate_control";
653 
654 	if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes))
655 		hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
656 			&sc->sbands[IEEE80211_BAND_2GHZ];
657 	if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes))
658 		hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
659 			&sc->sbands[IEEE80211_BAND_5GHZ];
660 
661 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
662 		if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes))
663 			setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
664 		if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes))
665 			setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
666 	}
667 
668 	SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
669 }
670 
671 int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
672 		    const struct ath_bus_ops *bus_ops)
673 {
674 	struct ieee80211_hw *hw = sc->hw;
675 	struct ath_common *common;
676 	struct ath_hw *ah;
677 	int error = 0;
678 	struct ath_regulatory *reg;
679 
680 	/* Bring up device */
681 	error = ath9k_init_softc(devid, sc, subsysid, bus_ops);
682 	if (error != 0)
683 		goto error_init;
684 
685 	ah = sc->sc_ah;
686 	common = ath9k_hw_common(ah);
687 	ath9k_set_hw_capab(sc, hw);
688 
689 	/* Initialize regulatory */
690 	error = ath_regd_init(&common->regulatory, sc->hw->wiphy,
691 			      ath9k_reg_notifier);
692 	if (error)
693 		goto error_regd;
694 
695 	reg = &common->regulatory;
696 
697 	/* Setup TX DMA */
698 	error = ath_tx_init(sc, ATH_TXBUF);
699 	if (error != 0)
700 		goto error_tx;
701 
702 	/* Setup RX DMA */
703 	error = ath_rx_init(sc, ATH_RXBUF);
704 	if (error != 0)
705 		goto error_rx;
706 
707 	/* Register with mac80211 */
708 	error = ieee80211_register_hw(hw);
709 	if (error)
710 		goto error_register;
711 
712 	/* Handle world regulatory */
713 	if (!ath_is_world_regd(reg)) {
714 		error = regulatory_hint(hw->wiphy, reg->alpha2);
715 		if (error)
716 			goto error_world;
717 	}
718 
719 	INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work);
720 	INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work);
721 	sc->wiphy_scheduler_int = msecs_to_jiffies(500);
722 
723 	ath_init_leds(sc);
724 	ath_start_rfkill_poll(sc);
725 
726 	return 0;
727 
728 error_world:
729 	ieee80211_unregister_hw(hw);
730 error_register:
731 	ath_rx_cleanup(sc);
732 error_rx:
733 	ath_tx_cleanup(sc);
734 error_tx:
735 	/* Nothing */
736 error_regd:
737 	ath9k_deinit_softc(sc);
738 error_init:
739 	return error;
740 }
741 
742 /*****************************/
743 /*     De-Initialization     */
744 /*****************************/
745 
746 static void ath9k_deinit_softc(struct ath_softc *sc)
747 {
748 	int i = 0;
749 
750         if ((sc->btcoex.no_stomp_timer) &&
751 	    sc->sc_ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
752 		ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer);
753 
754 	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
755 		if (ATH_TXQ_SETUP(sc, i))
756 			ath_tx_cleanupq(sc, &sc->tx.txq[i]);
757 
758 	ath9k_exit_debug(sc->sc_ah);
759 	ath9k_hw_deinit(sc->sc_ah);
760 
761 	tasklet_kill(&sc->intr_tq);
762 	tasklet_kill(&sc->bcon_tasklet);
763 }
764 
765 void ath9k_deinit_device(struct ath_softc *sc)
766 {
767 	struct ieee80211_hw *hw = sc->hw;
768 	int i = 0;
769 
770 	ath9k_ps_wakeup(sc);
771 
772 	wiphy_rfkill_stop_polling(sc->hw->wiphy);
773 	ath_deinit_leds(sc);
774 
775 	for (i = 0; i < sc->num_sec_wiphy; i++) {
776 		struct ath_wiphy *aphy = sc->sec_wiphy[i];
777 		if (aphy == NULL)
778 			continue;
779 		sc->sec_wiphy[i] = NULL;
780 		ieee80211_unregister_hw(aphy->hw);
781 		ieee80211_free_hw(aphy->hw);
782 	}
783 	kfree(sc->sec_wiphy);
784 
785 	ieee80211_unregister_hw(hw);
786 	ath_rx_cleanup(sc);
787 	ath_tx_cleanup(sc);
788 	ath9k_deinit_softc(sc);
789 }
790 
791 void ath_descdma_cleanup(struct ath_softc *sc,
792 			 struct ath_descdma *dd,
793 			 struct list_head *head)
794 {
795 	dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
796 			  dd->dd_desc_paddr);
797 
798 	INIT_LIST_HEAD(head);
799 	kfree(dd->dd_bufptr);
800 	memset(dd, 0, sizeof(*dd));
801 }
802 
803 /************************/
804 /*     Module Hooks     */
805 /************************/
806 
807 static int __init ath9k_init(void)
808 {
809 	int error;
810 
811 	/* Register rate control algorithm */
812 	error = ath_rate_control_register();
813 	if (error != 0) {
814 		printk(KERN_ERR
815 			"ath9k: Unable to register rate control "
816 			"algorithm: %d\n",
817 			error);
818 		goto err_out;
819 	}
820 
821 	error = ath9k_debug_create_root();
822 	if (error) {
823 		printk(KERN_ERR
824 			"ath9k: Unable to create debugfs root: %d\n",
825 			error);
826 		goto err_rate_unregister;
827 	}
828 
829 	error = ath_pci_init();
830 	if (error < 0) {
831 		printk(KERN_ERR
832 			"ath9k: No PCI devices found, driver not installed.\n");
833 		error = -ENODEV;
834 		goto err_remove_root;
835 	}
836 
837 	error = ath_ahb_init();
838 	if (error < 0) {
839 		error = -ENODEV;
840 		goto err_pci_exit;
841 	}
842 
843 	return 0;
844 
845  err_pci_exit:
846 	ath_pci_exit();
847 
848  err_remove_root:
849 	ath9k_debug_remove_root();
850  err_rate_unregister:
851 	ath_rate_control_unregister();
852  err_out:
853 	return error;
854 }
855 module_init(ath9k_init);
856 
857 static void __exit ath9k_exit(void)
858 {
859 	ath_ahb_exit();
860 	ath_pci_exit();
861 	ath9k_debug_remove_root();
862 	ath_rate_control_unregister();
863 	printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
864 }
865 module_exit(ath9k_exit);
866