xref: /linux/drivers/net/wireless/ath/ath9k/main.c (revision 2277ab4a1df50e05bc732fe9488d4e902bb8399a)
1 /*
2  * Copyright (c) 2008-2009 Atheros Communications Inc.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #include <linux/nl80211.h>
18 #include "ath9k.h"
19 
20 #define ATH_PCI_VERSION "0.1"
21 
22 static char *dev_info = "ath9k";
23 
24 MODULE_AUTHOR("Atheros Communications");
25 MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
26 MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
27 MODULE_LICENSE("Dual BSD/GPL");
28 
29 static int modparam_nohwcrypt;
30 module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444);
31 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
32 
33 /* We use the hw_value as an index into our private channel structure */
34 
35 #define CHAN2G(_freq, _idx)  { \
36 	.center_freq = (_freq), \
37 	.hw_value = (_idx), \
38 	.max_power = 20, \
39 }
40 
41 #define CHAN5G(_freq, _idx) { \
42 	.band = IEEE80211_BAND_5GHZ, \
43 	.center_freq = (_freq), \
44 	.hw_value = (_idx), \
45 	.max_power = 20, \
46 }
47 
48 /* Some 2 GHz radios are actually tunable on 2312-2732
49  * on 5 MHz steps, we support the channels which we know
50  * we have calibration data for all cards though to make
51  * this static */
52 static struct ieee80211_channel ath9k_2ghz_chantable[] = {
53 	CHAN2G(2412, 0), /* Channel 1 */
54 	CHAN2G(2417, 1), /* Channel 2 */
55 	CHAN2G(2422, 2), /* Channel 3 */
56 	CHAN2G(2427, 3), /* Channel 4 */
57 	CHAN2G(2432, 4), /* Channel 5 */
58 	CHAN2G(2437, 5), /* Channel 6 */
59 	CHAN2G(2442, 6), /* Channel 7 */
60 	CHAN2G(2447, 7), /* Channel 8 */
61 	CHAN2G(2452, 8), /* Channel 9 */
62 	CHAN2G(2457, 9), /* Channel 10 */
63 	CHAN2G(2462, 10), /* Channel 11 */
64 	CHAN2G(2467, 11), /* Channel 12 */
65 	CHAN2G(2472, 12), /* Channel 13 */
66 	CHAN2G(2484, 13), /* Channel 14 */
67 };
68 
69 /* Some 5 GHz radios are actually tunable on XXXX-YYYY
70  * on 5 MHz steps, we support the channels which we know
71  * we have calibration data for all cards though to make
72  * this static */
73 static struct ieee80211_channel ath9k_5ghz_chantable[] = {
74 	/* _We_ call this UNII 1 */
75 	CHAN5G(5180, 14), /* Channel 36 */
76 	CHAN5G(5200, 15), /* Channel 40 */
77 	CHAN5G(5220, 16), /* Channel 44 */
78 	CHAN5G(5240, 17), /* Channel 48 */
79 	/* _We_ call this UNII 2 */
80 	CHAN5G(5260, 18), /* Channel 52 */
81 	CHAN5G(5280, 19), /* Channel 56 */
82 	CHAN5G(5300, 20), /* Channel 60 */
83 	CHAN5G(5320, 21), /* Channel 64 */
84 	/* _We_ call this "Middle band" */
85 	CHAN5G(5500, 22), /* Channel 100 */
86 	CHAN5G(5520, 23), /* Channel 104 */
87 	CHAN5G(5540, 24), /* Channel 108 */
88 	CHAN5G(5560, 25), /* Channel 112 */
89 	CHAN5G(5580, 26), /* Channel 116 */
90 	CHAN5G(5600, 27), /* Channel 120 */
91 	CHAN5G(5620, 28), /* Channel 124 */
92 	CHAN5G(5640, 29), /* Channel 128 */
93 	CHAN5G(5660, 30), /* Channel 132 */
94 	CHAN5G(5680, 31), /* Channel 136 */
95 	CHAN5G(5700, 32), /* Channel 140 */
96 	/* _We_ call this UNII 3 */
97 	CHAN5G(5745, 33), /* Channel 149 */
98 	CHAN5G(5765, 34), /* Channel 153 */
99 	CHAN5G(5785, 35), /* Channel 157 */
100 	CHAN5G(5805, 36), /* Channel 161 */
101 	CHAN5G(5825, 37), /* Channel 165 */
102 };
103 
104 static void ath_cache_conf_rate(struct ath_softc *sc,
105 				struct ieee80211_conf *conf)
106 {
107 	switch (conf->channel->band) {
108 	case IEEE80211_BAND_2GHZ:
109 		if (conf_is_ht20(conf))
110 			sc->cur_rate_table =
111 			  sc->hw_rate_table[ATH9K_MODE_11NG_HT20];
112 		else if (conf_is_ht40_minus(conf))
113 			sc->cur_rate_table =
114 			  sc->hw_rate_table[ATH9K_MODE_11NG_HT40MINUS];
115 		else if (conf_is_ht40_plus(conf))
116 			sc->cur_rate_table =
117 			  sc->hw_rate_table[ATH9K_MODE_11NG_HT40PLUS];
118 		else
119 			sc->cur_rate_table =
120 			  sc->hw_rate_table[ATH9K_MODE_11G];
121 		break;
122 	case IEEE80211_BAND_5GHZ:
123 		if (conf_is_ht20(conf))
124 			sc->cur_rate_table =
125 			  sc->hw_rate_table[ATH9K_MODE_11NA_HT20];
126 		else if (conf_is_ht40_minus(conf))
127 			sc->cur_rate_table =
128 			  sc->hw_rate_table[ATH9K_MODE_11NA_HT40MINUS];
129 		else if (conf_is_ht40_plus(conf))
130 			sc->cur_rate_table =
131 			  sc->hw_rate_table[ATH9K_MODE_11NA_HT40PLUS];
132 		else
133 			sc->cur_rate_table =
134 			  sc->hw_rate_table[ATH9K_MODE_11A];
135 		break;
136 	default:
137 		BUG_ON(1);
138 		break;
139 	}
140 }
141 
142 static void ath_update_txpow(struct ath_softc *sc)
143 {
144 	struct ath_hw *ah = sc->sc_ah;
145 	u32 txpow;
146 
147 	if (sc->curtxpow != sc->config.txpowlimit) {
148 		ath9k_hw_set_txpowerlimit(ah, sc->config.txpowlimit);
149 		/* read back in case value is clamped */
150 		ath9k_hw_getcapability(ah, ATH9K_CAP_TXPOW, 1, &txpow);
151 		sc->curtxpow = txpow;
152 	}
153 }
154 
155 static u8 parse_mpdudensity(u8 mpdudensity)
156 {
157 	/*
158 	 * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
159 	 *   0 for no restriction
160 	 *   1 for 1/4 us
161 	 *   2 for 1/2 us
162 	 *   3 for 1 us
163 	 *   4 for 2 us
164 	 *   5 for 4 us
165 	 *   6 for 8 us
166 	 *   7 for 16 us
167 	 */
168 	switch (mpdudensity) {
169 	case 0:
170 		return 0;
171 	case 1:
172 	case 2:
173 	case 3:
174 		/* Our lower layer calculations limit our precision to
175 		   1 microsecond */
176 		return 1;
177 	case 4:
178 		return 2;
179 	case 5:
180 		return 4;
181 	case 6:
182 		return 8;
183 	case 7:
184 		return 16;
185 	default:
186 		return 0;
187 	}
188 }
189 
190 static void ath_setup_rates(struct ath_softc *sc, enum ieee80211_band band)
191 {
192 	const struct ath_rate_table *rate_table = NULL;
193 	struct ieee80211_supported_band *sband;
194 	struct ieee80211_rate *rate;
195 	int i, maxrates;
196 
197 	switch (band) {
198 	case IEEE80211_BAND_2GHZ:
199 		rate_table = sc->hw_rate_table[ATH9K_MODE_11G];
200 		break;
201 	case IEEE80211_BAND_5GHZ:
202 		rate_table = sc->hw_rate_table[ATH9K_MODE_11A];
203 		break;
204 	default:
205 		break;
206 	}
207 
208 	if (rate_table == NULL)
209 		return;
210 
211 	sband = &sc->sbands[band];
212 	rate = sc->rates[band];
213 
214 	if (rate_table->rate_cnt > ATH_RATE_MAX)
215 		maxrates = ATH_RATE_MAX;
216 	else
217 		maxrates = rate_table->rate_cnt;
218 
219 	for (i = 0; i < maxrates; i++) {
220 		rate[i].bitrate = rate_table->info[i].ratekbps / 100;
221 		rate[i].hw_value = rate_table->info[i].ratecode;
222 		if (rate_table->info[i].short_preamble) {
223 			rate[i].hw_value_short = rate_table->info[i].ratecode |
224 				rate_table->info[i].short_preamble;
225 			rate[i].flags = IEEE80211_RATE_SHORT_PREAMBLE;
226 		}
227 		sband->n_bitrates++;
228 
229 		DPRINTF(sc, ATH_DBG_CONFIG, "Rate: %2dMbps, ratecode: %2d\n",
230 			rate[i].bitrate / 10, rate[i].hw_value);
231 	}
232 }
233 
234 static struct ath9k_channel *ath_get_curchannel(struct ath_softc *sc,
235 						struct ieee80211_hw *hw)
236 {
237 	struct ieee80211_channel *curchan = hw->conf.channel;
238 	struct ath9k_channel *channel;
239 	u8 chan_idx;
240 
241 	chan_idx = curchan->hw_value;
242 	channel = &sc->sc_ah->channels[chan_idx];
243 	ath9k_update_ichannel(sc, hw, channel);
244 	return channel;
245 }
246 
247 /*
248  * Set/change channels.  If the channel is really being changed, it's done
249  * by reseting the chip.  To accomplish this we must first cleanup any pending
250  * DMA, then restart stuff.
251 */
252 int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
253 		    struct ath9k_channel *hchan)
254 {
255 	struct ath_hw *ah = sc->sc_ah;
256 	bool fastcc = true, stopped;
257 	struct ieee80211_channel *channel = hw->conf.channel;
258 	int r;
259 
260 	if (sc->sc_flags & SC_OP_INVALID)
261 		return -EIO;
262 
263 	ath9k_ps_wakeup(sc);
264 
265 	/*
266 	 * This is only performed if the channel settings have
267 	 * actually changed.
268 	 *
269 	 * To switch channels clear any pending DMA operations;
270 	 * wait long enough for the RX fifo to drain, reset the
271 	 * hardware at the new frequency, and then re-enable
272 	 * the relevant bits of the h/w.
273 	 */
274 	ath9k_hw_set_interrupts(ah, 0);
275 	ath_drain_all_txq(sc, false);
276 	stopped = ath_stoprecv(sc);
277 
278 	/* XXX: do not flush receive queue here. We don't want
279 	 * to flush data frames already in queue because of
280 	 * changing channel. */
281 
282 	if (!stopped || (sc->sc_flags & SC_OP_FULL_RESET))
283 		fastcc = false;
284 
285 	DPRINTF(sc, ATH_DBG_CONFIG,
286 		"(%u MHz) -> (%u MHz), chanwidth: %d\n",
287 		sc->sc_ah->curchan->channel,
288 		channel->center_freq, sc->tx_chan_width);
289 
290 	spin_lock_bh(&sc->sc_resetlock);
291 
292 	r = ath9k_hw_reset(ah, hchan, fastcc);
293 	if (r) {
294 		DPRINTF(sc, ATH_DBG_FATAL,
295 			"Unable to reset channel (%u Mhz) "
296 			"reset status %d\n",
297 			channel->center_freq, r);
298 		spin_unlock_bh(&sc->sc_resetlock);
299 		goto ps_restore;
300 	}
301 	spin_unlock_bh(&sc->sc_resetlock);
302 
303 	sc->sc_flags &= ~SC_OP_FULL_RESET;
304 
305 	if (ath_startrecv(sc) != 0) {
306 		DPRINTF(sc, ATH_DBG_FATAL,
307 			"Unable to restart recv logic\n");
308 		r = -EIO;
309 		goto ps_restore;
310 	}
311 
312 	ath_cache_conf_rate(sc, &hw->conf);
313 	ath_update_txpow(sc);
314 	ath9k_hw_set_interrupts(ah, sc->imask);
315 
316  ps_restore:
317 	ath9k_ps_restore(sc);
318 	return r;
319 }
320 
321 /*
322  *  This routine performs the periodic noise floor calibration function
323  *  that is used to adjust and optimize the chip performance.  This
324  *  takes environmental changes (location, temperature) into account.
325  *  When the task is complete, it reschedules itself depending on the
326  *  appropriate interval that was calculated.
327  */
328 static void ath_ani_calibrate(unsigned long data)
329 {
330 	struct ath_softc *sc = (struct ath_softc *)data;
331 	struct ath_hw *ah = sc->sc_ah;
332 	bool longcal = false;
333 	bool shortcal = false;
334 	bool aniflag = false;
335 	unsigned int timestamp = jiffies_to_msecs(jiffies);
336 	u32 cal_interval, short_cal_interval;
337 
338 	short_cal_interval = (ah->opmode == NL80211_IFTYPE_AP) ?
339 		ATH_AP_SHORT_CALINTERVAL : ATH_STA_SHORT_CALINTERVAL;
340 
341 	/*
342 	* don't calibrate when we're scanning.
343 	* we are most likely not on our home channel.
344 	*/
345 	if (sc->sc_flags & SC_OP_SCANNING)
346 		goto set_timer;
347 
348 	/* Only calibrate if awake */
349 	if (sc->sc_ah->power_mode != ATH9K_PM_AWAKE)
350 		goto set_timer;
351 
352 	ath9k_ps_wakeup(sc);
353 
354 	/* Long calibration runs independently of short calibration. */
355 	if ((timestamp - sc->ani.longcal_timer) >= ATH_LONG_CALINTERVAL) {
356 		longcal = true;
357 		DPRINTF(sc, ATH_DBG_ANI, "longcal @%lu\n", jiffies);
358 		sc->ani.longcal_timer = timestamp;
359 	}
360 
361 	/* Short calibration applies only while caldone is false */
362 	if (!sc->ani.caldone) {
363 		if ((timestamp - sc->ani.shortcal_timer) >= short_cal_interval) {
364 			shortcal = true;
365 			DPRINTF(sc, ATH_DBG_ANI, "shortcal @%lu\n", jiffies);
366 			sc->ani.shortcal_timer = timestamp;
367 			sc->ani.resetcal_timer = timestamp;
368 		}
369 	} else {
370 		if ((timestamp - sc->ani.resetcal_timer) >=
371 		    ATH_RESTART_CALINTERVAL) {
372 			sc->ani.caldone = ath9k_hw_reset_calvalid(ah);
373 			if (sc->ani.caldone)
374 				sc->ani.resetcal_timer = timestamp;
375 		}
376 	}
377 
378 	/* Verify whether we must check ANI */
379 	if ((timestamp - sc->ani.checkani_timer) >= ATH_ANI_POLLINTERVAL) {
380 		aniflag = true;
381 		sc->ani.checkani_timer = timestamp;
382 	}
383 
384 	/* Skip all processing if there's nothing to do. */
385 	if (longcal || shortcal || aniflag) {
386 		/* Call ANI routine if necessary */
387 		if (aniflag)
388 			ath9k_hw_ani_monitor(ah, &sc->nodestats, ah->curchan);
389 
390 		/* Perform calibration if necessary */
391 		if (longcal || shortcal) {
392 			sc->ani.caldone = ath9k_hw_calibrate(ah, ah->curchan,
393 						     sc->rx_chainmask, longcal);
394 
395 			if (longcal)
396 				sc->ani.noise_floor = ath9k_hw_getchan_noise(ah,
397 								     ah->curchan);
398 
399 			DPRINTF(sc, ATH_DBG_ANI," calibrate chan %u/%x nf: %d\n",
400 				ah->curchan->channel, ah->curchan->channelFlags,
401 				sc->ani.noise_floor);
402 		}
403 	}
404 
405 	ath9k_ps_restore(sc);
406 
407 set_timer:
408 	/*
409 	* Set timer interval based on previous results.
410 	* The interval must be the shortest necessary to satisfy ANI,
411 	* short calibration and long calibration.
412 	*/
413 	cal_interval = ATH_LONG_CALINTERVAL;
414 	if (sc->sc_ah->config.enable_ani)
415 		cal_interval = min(cal_interval, (u32)ATH_ANI_POLLINTERVAL);
416 	if (!sc->ani.caldone)
417 		cal_interval = min(cal_interval, (u32)short_cal_interval);
418 
419 	mod_timer(&sc->ani.timer, jiffies + msecs_to_jiffies(cal_interval));
420 }
421 
422 static void ath_start_ani(struct ath_softc *sc)
423 {
424 	unsigned long timestamp = jiffies_to_msecs(jiffies);
425 
426 	sc->ani.longcal_timer = timestamp;
427 	sc->ani.shortcal_timer = timestamp;
428 	sc->ani.checkani_timer = timestamp;
429 
430 	mod_timer(&sc->ani.timer,
431 		  jiffies + msecs_to_jiffies(ATH_ANI_POLLINTERVAL));
432 }
433 
434 /*
435  * Update tx/rx chainmask. For legacy association,
436  * hard code chainmask to 1x1, for 11n association, use
437  * the chainmask configuration, for bt coexistence, use
438  * the chainmask configuration even in legacy mode.
439  */
440 void ath_update_chainmask(struct ath_softc *sc, int is_ht)
441 {
442 	if (is_ht ||
443 	    (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_BT_COEX)) {
444 		sc->tx_chainmask = sc->sc_ah->caps.tx_chainmask;
445 		sc->rx_chainmask = sc->sc_ah->caps.rx_chainmask;
446 	} else {
447 		sc->tx_chainmask = 1;
448 		sc->rx_chainmask = 1;
449 	}
450 
451 	DPRINTF(sc, ATH_DBG_CONFIG, "tx chmask: %d, rx chmask: %d\n",
452 		sc->tx_chainmask, sc->rx_chainmask);
453 }
454 
455 static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta)
456 {
457 	struct ath_node *an;
458 
459 	an = (struct ath_node *)sta->drv_priv;
460 
461 	if (sc->sc_flags & SC_OP_TXAGGR) {
462 		ath_tx_node_init(sc, an);
463 		an->maxampdu = 1 << (IEEE80211_HTCAP_MAXRXAMPDU_FACTOR +
464 				     sta->ht_cap.ampdu_factor);
465 		an->mpdudensity = parse_mpdudensity(sta->ht_cap.ampdu_density);
466 	}
467 }
468 
469 static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
470 {
471 	struct ath_node *an = (struct ath_node *)sta->drv_priv;
472 
473 	if (sc->sc_flags & SC_OP_TXAGGR)
474 		ath_tx_node_cleanup(sc, an);
475 }
476 
477 static void ath9k_tasklet(unsigned long data)
478 {
479 	struct ath_softc *sc = (struct ath_softc *)data;
480 	u32 status = sc->intrstatus;
481 
482 	ath9k_ps_wakeup(sc);
483 
484 	if (status & ATH9K_INT_FATAL) {
485 		ath_reset(sc, false);
486 		ath9k_ps_restore(sc);
487 		return;
488 	}
489 
490 	if (status & (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN)) {
491 		spin_lock_bh(&sc->rx.rxflushlock);
492 		ath_rx_tasklet(sc, 0);
493 		spin_unlock_bh(&sc->rx.rxflushlock);
494 	}
495 
496 	if (status & ATH9K_INT_TX)
497 		ath_tx_tasklet(sc);
498 
499 	if ((status & ATH9K_INT_TSFOOR) &&
500 	    (sc->hw->conf.flags & IEEE80211_CONF_PS)) {
501 		/*
502 		 * TSF sync does not look correct; remain awake to sync with
503 		 * the next Beacon.
504 		 */
505 		DPRINTF(sc, ATH_DBG_PS, "TSFOOR - Sync with next Beacon\n");
506 		sc->sc_flags |= SC_OP_WAIT_FOR_BEACON | SC_OP_BEACON_SYNC;
507 	}
508 
509 	/* re-enable hardware interrupt */
510 	ath9k_hw_set_interrupts(sc->sc_ah, sc->imask);
511 	ath9k_ps_restore(sc);
512 }
513 
514 irqreturn_t ath_isr(int irq, void *dev)
515 {
516 #define SCHED_INTR (				\
517 		ATH9K_INT_FATAL |		\
518 		ATH9K_INT_RXORN |		\
519 		ATH9K_INT_RXEOL |		\
520 		ATH9K_INT_RX |			\
521 		ATH9K_INT_TX |			\
522 		ATH9K_INT_BMISS |		\
523 		ATH9K_INT_CST |			\
524 		ATH9K_INT_TSFOOR)
525 
526 	struct ath_softc *sc = dev;
527 	struct ath_hw *ah = sc->sc_ah;
528 	enum ath9k_int status;
529 	bool sched = false;
530 
531 	/*
532 	 * The hardware is not ready/present, don't
533 	 * touch anything. Note this can happen early
534 	 * on if the IRQ is shared.
535 	 */
536 	if (sc->sc_flags & SC_OP_INVALID)
537 		return IRQ_NONE;
538 
539 
540 	/* shared irq, not for us */
541 
542 	if (!ath9k_hw_intrpend(ah))
543 		return IRQ_NONE;
544 
545 	/*
546 	 * Figure out the reason(s) for the interrupt.  Note
547 	 * that the hal returns a pseudo-ISR that may include
548 	 * bits we haven't explicitly enabled so we mask the
549 	 * value to insure we only process bits we requested.
550 	 */
551 	ath9k_hw_getisr(ah, &status);	/* NB: clears ISR too */
552 	status &= sc->imask;	/* discard unasked-for bits */
553 
554 	/*
555 	 * If there are no status bits set, then this interrupt was not
556 	 * for me (should have been caught above).
557 	 */
558 	if (!status)
559 		return IRQ_NONE;
560 
561 	/* Cache the status */
562 	sc->intrstatus = status;
563 
564 	if (status & SCHED_INTR)
565 		sched = true;
566 
567 	/*
568 	 * If a FATAL or RXORN interrupt is received, we have to reset the
569 	 * chip immediately.
570 	 */
571 	if (status & (ATH9K_INT_FATAL | ATH9K_INT_RXORN))
572 		goto chip_reset;
573 
574 	if (status & ATH9K_INT_SWBA)
575 		tasklet_schedule(&sc->bcon_tasklet);
576 
577 	if (status & ATH9K_INT_TXURN)
578 		ath9k_hw_updatetxtriglevel(ah, true);
579 
580 	if (status & ATH9K_INT_MIB) {
581 		/*
582 		 * Disable interrupts until we service the MIB
583 		 * interrupt; otherwise it will continue to
584 		 * fire.
585 		 */
586 		ath9k_hw_set_interrupts(ah, 0);
587 		/*
588 		 * Let the hal handle the event. We assume
589 		 * it will clear whatever condition caused
590 		 * the interrupt.
591 		 */
592 		ath9k_hw_procmibevent(ah, &sc->nodestats);
593 		ath9k_hw_set_interrupts(ah, sc->imask);
594 	}
595 
596 	if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
597 		if (status & ATH9K_INT_TIM_TIMER) {
598 			/* Clear RxAbort bit so that we can
599 			 * receive frames */
600 			ath9k_hw_setpower(ah, ATH9K_PM_AWAKE);
601 			ath9k_hw_setrxabort(sc->sc_ah, 0);
602 			sc->sc_flags |= SC_OP_WAIT_FOR_BEACON;
603 		}
604 
605 chip_reset:
606 
607 	ath_debug_stat_interrupt(sc, status);
608 
609 	if (sched) {
610 		/* turn off every interrupt except SWBA */
611 		ath9k_hw_set_interrupts(ah, (sc->imask & ATH9K_INT_SWBA));
612 		tasklet_schedule(&sc->intr_tq);
613 	}
614 
615 	return IRQ_HANDLED;
616 
617 #undef SCHED_INTR
618 }
619 
620 static u32 ath_get_extchanmode(struct ath_softc *sc,
621 			       struct ieee80211_channel *chan,
622 			       enum nl80211_channel_type channel_type)
623 {
624 	u32 chanmode = 0;
625 
626 	switch (chan->band) {
627 	case IEEE80211_BAND_2GHZ:
628 		switch(channel_type) {
629 		case NL80211_CHAN_NO_HT:
630 		case NL80211_CHAN_HT20:
631 			chanmode = CHANNEL_G_HT20;
632 			break;
633 		case NL80211_CHAN_HT40PLUS:
634 			chanmode = CHANNEL_G_HT40PLUS;
635 			break;
636 		case NL80211_CHAN_HT40MINUS:
637 			chanmode = CHANNEL_G_HT40MINUS;
638 			break;
639 		}
640 		break;
641 	case IEEE80211_BAND_5GHZ:
642 		switch(channel_type) {
643 		case NL80211_CHAN_NO_HT:
644 		case NL80211_CHAN_HT20:
645 			chanmode = CHANNEL_A_HT20;
646 			break;
647 		case NL80211_CHAN_HT40PLUS:
648 			chanmode = CHANNEL_A_HT40PLUS;
649 			break;
650 		case NL80211_CHAN_HT40MINUS:
651 			chanmode = CHANNEL_A_HT40MINUS;
652 			break;
653 		}
654 		break;
655 	default:
656 		break;
657 	}
658 
659 	return chanmode;
660 }
661 
662 static int ath_setkey_tkip(struct ath_softc *sc, u16 keyix, const u8 *key,
663 			   struct ath9k_keyval *hk, const u8 *addr,
664 			   bool authenticator)
665 {
666 	const u8 *key_rxmic;
667 	const u8 *key_txmic;
668 
669 	key_txmic = key + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY;
670 	key_rxmic = key + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY;
671 
672 	if (addr == NULL) {
673 		/*
674 		 * Group key installation - only two key cache entries are used
675 		 * regardless of splitmic capability since group key is only
676 		 * used either for TX or RX.
677 		 */
678 		if (authenticator) {
679 			memcpy(hk->kv_mic, key_txmic, sizeof(hk->kv_mic));
680 			memcpy(hk->kv_txmic, key_txmic, sizeof(hk->kv_mic));
681 		} else {
682 			memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
683 			memcpy(hk->kv_txmic, key_rxmic, sizeof(hk->kv_mic));
684 		}
685 		return ath9k_hw_set_keycache_entry(sc->sc_ah, keyix, hk, addr);
686 	}
687 	if (!sc->splitmic) {
688 		/* TX and RX keys share the same key cache entry. */
689 		memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
690 		memcpy(hk->kv_txmic, key_txmic, sizeof(hk->kv_txmic));
691 		return ath9k_hw_set_keycache_entry(sc->sc_ah, keyix, hk, addr);
692 	}
693 
694 	/* Separate key cache entries for TX and RX */
695 
696 	/* TX key goes at first index, RX key at +32. */
697 	memcpy(hk->kv_mic, key_txmic, sizeof(hk->kv_mic));
698 	if (!ath9k_hw_set_keycache_entry(sc->sc_ah, keyix, hk, NULL)) {
699 		/* TX MIC entry failed. No need to proceed further */
700 		DPRINTF(sc, ATH_DBG_FATAL,
701 			"Setting TX MIC Key Failed\n");
702 		return 0;
703 	}
704 
705 	memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
706 	/* XXX delete tx key on failure? */
707 	return ath9k_hw_set_keycache_entry(sc->sc_ah, keyix + 32, hk, addr);
708 }
709 
710 static int ath_reserve_key_cache_slot_tkip(struct ath_softc *sc)
711 {
712 	int i;
713 
714 	for (i = IEEE80211_WEP_NKID; i < sc->keymax / 2; i++) {
715 		if (test_bit(i, sc->keymap) ||
716 		    test_bit(i + 64, sc->keymap))
717 			continue; /* At least one part of TKIP key allocated */
718 		if (sc->splitmic &&
719 		    (test_bit(i + 32, sc->keymap) ||
720 		     test_bit(i + 64 + 32, sc->keymap)))
721 			continue; /* At least one part of TKIP key allocated */
722 
723 		/* Found a free slot for a TKIP key */
724 		return i;
725 	}
726 	return -1;
727 }
728 
729 static int ath_reserve_key_cache_slot(struct ath_softc *sc)
730 {
731 	int i;
732 
733 	/* First, try to find slots that would not be available for TKIP. */
734 	if (sc->splitmic) {
735 		for (i = IEEE80211_WEP_NKID; i < sc->keymax / 4; i++) {
736 			if (!test_bit(i, sc->keymap) &&
737 			    (test_bit(i + 32, sc->keymap) ||
738 			     test_bit(i + 64, sc->keymap) ||
739 			     test_bit(i + 64 + 32, sc->keymap)))
740 				return i;
741 			if (!test_bit(i + 32, sc->keymap) &&
742 			    (test_bit(i, sc->keymap) ||
743 			     test_bit(i + 64, sc->keymap) ||
744 			     test_bit(i + 64 + 32, sc->keymap)))
745 				return i + 32;
746 			if (!test_bit(i + 64, sc->keymap) &&
747 			    (test_bit(i , sc->keymap) ||
748 			     test_bit(i + 32, sc->keymap) ||
749 			     test_bit(i + 64 + 32, sc->keymap)))
750 				return i + 64;
751 			if (!test_bit(i + 64 + 32, sc->keymap) &&
752 			    (test_bit(i, sc->keymap) ||
753 			     test_bit(i + 32, sc->keymap) ||
754 			     test_bit(i + 64, sc->keymap)))
755 				return i + 64 + 32;
756 		}
757 	} else {
758 		for (i = IEEE80211_WEP_NKID; i < sc->keymax / 2; i++) {
759 			if (!test_bit(i, sc->keymap) &&
760 			    test_bit(i + 64, sc->keymap))
761 				return i;
762 			if (test_bit(i, sc->keymap) &&
763 			    !test_bit(i + 64, sc->keymap))
764 				return i + 64;
765 		}
766 	}
767 
768 	/* No partially used TKIP slots, pick any available slot */
769 	for (i = IEEE80211_WEP_NKID; i < sc->keymax; i++) {
770 		/* Do not allow slots that could be needed for TKIP group keys
771 		 * to be used. This limitation could be removed if we know that
772 		 * TKIP will not be used. */
773 		if (i >= 64 && i < 64 + IEEE80211_WEP_NKID)
774 			continue;
775 		if (sc->splitmic) {
776 			if (i >= 32 && i < 32 + IEEE80211_WEP_NKID)
777 				continue;
778 			if (i >= 64 + 32 && i < 64 + 32 + IEEE80211_WEP_NKID)
779 				continue;
780 		}
781 
782 		if (!test_bit(i, sc->keymap))
783 			return i; /* Found a free slot for a key */
784 	}
785 
786 	/* No free slot found */
787 	return -1;
788 }
789 
790 static int ath_key_config(struct ath_softc *sc,
791 			  struct ieee80211_vif *vif,
792 			  struct ieee80211_sta *sta,
793 			  struct ieee80211_key_conf *key)
794 {
795 	struct ath9k_keyval hk;
796 	const u8 *mac = NULL;
797 	int ret = 0;
798 	int idx;
799 
800 	memset(&hk, 0, sizeof(hk));
801 
802 	switch (key->alg) {
803 	case ALG_WEP:
804 		hk.kv_type = ATH9K_CIPHER_WEP;
805 		break;
806 	case ALG_TKIP:
807 		hk.kv_type = ATH9K_CIPHER_TKIP;
808 		break;
809 	case ALG_CCMP:
810 		hk.kv_type = ATH9K_CIPHER_AES_CCM;
811 		break;
812 	default:
813 		return -EOPNOTSUPP;
814 	}
815 
816 	hk.kv_len = key->keylen;
817 	memcpy(hk.kv_val, key->key, key->keylen);
818 
819 	if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
820 		/* For now, use the default keys for broadcast keys. This may
821 		 * need to change with virtual interfaces. */
822 		idx = key->keyidx;
823 	} else if (key->keyidx) {
824 		if (WARN_ON(!sta))
825 			return -EOPNOTSUPP;
826 		mac = sta->addr;
827 
828 		if (vif->type != NL80211_IFTYPE_AP) {
829 			/* Only keyidx 0 should be used with unicast key, but
830 			 * allow this for client mode for now. */
831 			idx = key->keyidx;
832 		} else
833 			return -EIO;
834 	} else {
835 		if (WARN_ON(!sta))
836 			return -EOPNOTSUPP;
837 		mac = sta->addr;
838 
839 		if (key->alg == ALG_TKIP)
840 			idx = ath_reserve_key_cache_slot_tkip(sc);
841 		else
842 			idx = ath_reserve_key_cache_slot(sc);
843 		if (idx < 0)
844 			return -ENOSPC; /* no free key cache entries */
845 	}
846 
847 	if (key->alg == ALG_TKIP)
848 		ret = ath_setkey_tkip(sc, idx, key->key, &hk, mac,
849 				      vif->type == NL80211_IFTYPE_AP);
850 	else
851 		ret = ath9k_hw_set_keycache_entry(sc->sc_ah, idx, &hk, mac);
852 
853 	if (!ret)
854 		return -EIO;
855 
856 	set_bit(idx, sc->keymap);
857 	if (key->alg == ALG_TKIP) {
858 		set_bit(idx + 64, sc->keymap);
859 		if (sc->splitmic) {
860 			set_bit(idx + 32, sc->keymap);
861 			set_bit(idx + 64 + 32, sc->keymap);
862 		}
863 	}
864 
865 	return idx;
866 }
867 
868 static void ath_key_delete(struct ath_softc *sc, struct ieee80211_key_conf *key)
869 {
870 	ath9k_hw_keyreset(sc->sc_ah, key->hw_key_idx);
871 	if (key->hw_key_idx < IEEE80211_WEP_NKID)
872 		return;
873 
874 	clear_bit(key->hw_key_idx, sc->keymap);
875 	if (key->alg != ALG_TKIP)
876 		return;
877 
878 	clear_bit(key->hw_key_idx + 64, sc->keymap);
879 	if (sc->splitmic) {
880 		clear_bit(key->hw_key_idx + 32, sc->keymap);
881 		clear_bit(key->hw_key_idx + 64 + 32, sc->keymap);
882 	}
883 }
884 
885 static void setup_ht_cap(struct ath_softc *sc,
886 			 struct ieee80211_sta_ht_cap *ht_info)
887 {
888 #define	ATH9K_HT_CAP_MAXRXAMPDU_65536 0x3	/* 2 ^ 16 */
889 #define	ATH9K_HT_CAP_MPDUDENSITY_8 0x6		/* 8 usec */
890 
891 	ht_info->ht_supported = true;
892 	ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
893 		       IEEE80211_HT_CAP_SM_PS |
894 		       IEEE80211_HT_CAP_SGI_40 |
895 		       IEEE80211_HT_CAP_DSSSCCK40;
896 
897 	ht_info->ampdu_factor = ATH9K_HT_CAP_MAXRXAMPDU_65536;
898 	ht_info->ampdu_density = ATH9K_HT_CAP_MPDUDENSITY_8;
899 
900 	/* set up supported mcs set */
901 	memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
902 
903 	switch(sc->rx_chainmask) {
904 	case 1:
905 		ht_info->mcs.rx_mask[0] = 0xff;
906 		break;
907 	case 3:
908 	case 5:
909 	case 7:
910 	default:
911 		ht_info->mcs.rx_mask[0] = 0xff;
912 		ht_info->mcs.rx_mask[1] = 0xff;
913 		break;
914 	}
915 
916 	ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
917 }
918 
919 static void ath9k_bss_assoc_info(struct ath_softc *sc,
920 				 struct ieee80211_vif *vif,
921 				 struct ieee80211_bss_conf *bss_conf)
922 {
923 	struct ath_vif *avp = (void *)vif->drv_priv;
924 
925 	if (bss_conf->assoc) {
926 		DPRINTF(sc, ATH_DBG_CONFIG, "Bss Info ASSOC %d, bssid: %pM\n",
927 			bss_conf->aid, sc->curbssid);
928 
929 		/* New association, store aid */
930 		if (avp->av_opmode == NL80211_IFTYPE_STATION) {
931 			sc->curaid = bss_conf->aid;
932 			ath9k_hw_write_associd(sc);
933 
934 			/*
935 			 * Request a re-configuration of Beacon related timers
936 			 * on the receipt of the first Beacon frame (i.e.,
937 			 * after time sync with the AP).
938 			 */
939 			sc->sc_flags |= SC_OP_BEACON_SYNC;
940 		}
941 
942 		/* Configure the beacon */
943 		ath_beacon_config(sc, vif);
944 
945 		/* Reset rssi stats */
946 		sc->nodestats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER;
947 		sc->nodestats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER;
948 		sc->nodestats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER;
949 		sc->nodestats.ns_avgtxrate = ATH_RATE_DUMMY_MARKER;
950 
951 		ath_start_ani(sc);
952 	} else {
953 		DPRINTF(sc, ATH_DBG_CONFIG, "Bss Info DISASSOC\n");
954 		sc->curaid = 0;
955 	}
956 }
957 
958 /********************************/
959 /*	 LED functions		*/
960 /********************************/
961 
962 static void ath_led_blink_work(struct work_struct *work)
963 {
964 	struct ath_softc *sc = container_of(work, struct ath_softc,
965 					    ath_led_blink_work.work);
966 
967 	if (!(sc->sc_flags & SC_OP_LED_ASSOCIATED))
968 		return;
969 
970 	if ((sc->led_on_duration == ATH_LED_ON_DURATION_IDLE) ||
971 	    (sc->led_off_duration == ATH_LED_OFF_DURATION_IDLE))
972 		ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN, 0);
973 	else
974 		ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN,
975 				  (sc->sc_flags & SC_OP_LED_ON) ? 1 : 0);
976 
977 	queue_delayed_work(sc->hw->workqueue, &sc->ath_led_blink_work,
978 			   (sc->sc_flags & SC_OP_LED_ON) ?
979 			   msecs_to_jiffies(sc->led_off_duration) :
980 			   msecs_to_jiffies(sc->led_on_duration));
981 
982 	sc->led_on_duration = sc->led_on_cnt ?
983 			max((ATH_LED_ON_DURATION_IDLE - sc->led_on_cnt), 25) :
984 			ATH_LED_ON_DURATION_IDLE;
985 	sc->led_off_duration = sc->led_off_cnt ?
986 			max((ATH_LED_OFF_DURATION_IDLE - sc->led_off_cnt), 10) :
987 			ATH_LED_OFF_DURATION_IDLE;
988 	sc->led_on_cnt = sc->led_off_cnt = 0;
989 	if (sc->sc_flags & SC_OP_LED_ON)
990 		sc->sc_flags &= ~SC_OP_LED_ON;
991 	else
992 		sc->sc_flags |= SC_OP_LED_ON;
993 }
994 
995 static void ath_led_brightness(struct led_classdev *led_cdev,
996 			       enum led_brightness brightness)
997 {
998 	struct ath_led *led = container_of(led_cdev, struct ath_led, led_cdev);
999 	struct ath_softc *sc = led->sc;
1000 
1001 	switch (brightness) {
1002 	case LED_OFF:
1003 		if (led->led_type == ATH_LED_ASSOC ||
1004 		    led->led_type == ATH_LED_RADIO) {
1005 			ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN,
1006 				(led->led_type == ATH_LED_RADIO));
1007 			sc->sc_flags &= ~SC_OP_LED_ASSOCIATED;
1008 			if (led->led_type == ATH_LED_RADIO)
1009 				sc->sc_flags &= ~SC_OP_LED_ON;
1010 		} else {
1011 			sc->led_off_cnt++;
1012 		}
1013 		break;
1014 	case LED_FULL:
1015 		if (led->led_type == ATH_LED_ASSOC) {
1016 			sc->sc_flags |= SC_OP_LED_ASSOCIATED;
1017 			queue_delayed_work(sc->hw->workqueue,
1018 					   &sc->ath_led_blink_work, 0);
1019 		} else if (led->led_type == ATH_LED_RADIO) {
1020 			ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN, 0);
1021 			sc->sc_flags |= SC_OP_LED_ON;
1022 		} else {
1023 			sc->led_on_cnt++;
1024 		}
1025 		break;
1026 	default:
1027 		break;
1028 	}
1029 }
1030 
1031 static int ath_register_led(struct ath_softc *sc, struct ath_led *led,
1032 			    char *trigger)
1033 {
1034 	int ret;
1035 
1036 	led->sc = sc;
1037 	led->led_cdev.name = led->name;
1038 	led->led_cdev.default_trigger = trigger;
1039 	led->led_cdev.brightness_set = ath_led_brightness;
1040 
1041 	ret = led_classdev_register(wiphy_dev(sc->hw->wiphy), &led->led_cdev);
1042 	if (ret)
1043 		DPRINTF(sc, ATH_DBG_FATAL,
1044 			"Failed to register led:%s", led->name);
1045 	else
1046 		led->registered = 1;
1047 	return ret;
1048 }
1049 
1050 static void ath_unregister_led(struct ath_led *led)
1051 {
1052 	if (led->registered) {
1053 		led_classdev_unregister(&led->led_cdev);
1054 		led->registered = 0;
1055 	}
1056 }
1057 
1058 static void ath_deinit_leds(struct ath_softc *sc)
1059 {
1060 	cancel_delayed_work_sync(&sc->ath_led_blink_work);
1061 	ath_unregister_led(&sc->assoc_led);
1062 	sc->sc_flags &= ~SC_OP_LED_ASSOCIATED;
1063 	ath_unregister_led(&sc->tx_led);
1064 	ath_unregister_led(&sc->rx_led);
1065 	ath_unregister_led(&sc->radio_led);
1066 	ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN, 1);
1067 }
1068 
1069 static void ath_init_leds(struct ath_softc *sc)
1070 {
1071 	char *trigger;
1072 	int ret;
1073 
1074 	/* Configure gpio 1 for output */
1075 	ath9k_hw_cfg_output(sc->sc_ah, ATH_LED_PIN,
1076 			    AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
1077 	/* LED off, active low */
1078 	ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN, 1);
1079 
1080 	INIT_DELAYED_WORK(&sc->ath_led_blink_work, ath_led_blink_work);
1081 
1082 	trigger = ieee80211_get_radio_led_name(sc->hw);
1083 	snprintf(sc->radio_led.name, sizeof(sc->radio_led.name),
1084 		"ath9k-%s::radio", wiphy_name(sc->hw->wiphy));
1085 	ret = ath_register_led(sc, &sc->radio_led, trigger);
1086 	sc->radio_led.led_type = ATH_LED_RADIO;
1087 	if (ret)
1088 		goto fail;
1089 
1090 	trigger = ieee80211_get_assoc_led_name(sc->hw);
1091 	snprintf(sc->assoc_led.name, sizeof(sc->assoc_led.name),
1092 		"ath9k-%s::assoc", wiphy_name(sc->hw->wiphy));
1093 	ret = ath_register_led(sc, &sc->assoc_led, trigger);
1094 	sc->assoc_led.led_type = ATH_LED_ASSOC;
1095 	if (ret)
1096 		goto fail;
1097 
1098 	trigger = ieee80211_get_tx_led_name(sc->hw);
1099 	snprintf(sc->tx_led.name, sizeof(sc->tx_led.name),
1100 		"ath9k-%s::tx", wiphy_name(sc->hw->wiphy));
1101 	ret = ath_register_led(sc, &sc->tx_led, trigger);
1102 	sc->tx_led.led_type = ATH_LED_TX;
1103 	if (ret)
1104 		goto fail;
1105 
1106 	trigger = ieee80211_get_rx_led_name(sc->hw);
1107 	snprintf(sc->rx_led.name, sizeof(sc->rx_led.name),
1108 		"ath9k-%s::rx", wiphy_name(sc->hw->wiphy));
1109 	ret = ath_register_led(sc, &sc->rx_led, trigger);
1110 	sc->rx_led.led_type = ATH_LED_RX;
1111 	if (ret)
1112 		goto fail;
1113 
1114 	return;
1115 
1116 fail:
1117 	ath_deinit_leds(sc);
1118 }
1119 
1120 void ath_radio_enable(struct ath_softc *sc)
1121 {
1122 	struct ath_hw *ah = sc->sc_ah;
1123 	struct ieee80211_channel *channel = sc->hw->conf.channel;
1124 	int r;
1125 
1126 	ath9k_ps_wakeup(sc);
1127 	ath9k_hw_configpcipowersave(ah, 0);
1128 
1129 	if (!ah->curchan)
1130 		ah->curchan = ath_get_curchannel(sc, sc->hw);
1131 
1132 	spin_lock_bh(&sc->sc_resetlock);
1133 	r = ath9k_hw_reset(ah, ah->curchan, false);
1134 	if (r) {
1135 		DPRINTF(sc, ATH_DBG_FATAL,
1136 			"Unable to reset channel %u (%uMhz) ",
1137 			"reset status %d\n",
1138 			channel->center_freq, r);
1139 	}
1140 	spin_unlock_bh(&sc->sc_resetlock);
1141 
1142 	ath_update_txpow(sc);
1143 	if (ath_startrecv(sc) != 0) {
1144 		DPRINTF(sc, ATH_DBG_FATAL,
1145 			"Unable to restart recv logic\n");
1146 		return;
1147 	}
1148 
1149 	if (sc->sc_flags & SC_OP_BEACONS)
1150 		ath_beacon_config(sc, NULL);	/* restart beacons */
1151 
1152 	/* Re-Enable  interrupts */
1153 	ath9k_hw_set_interrupts(ah, sc->imask);
1154 
1155 	/* Enable LED */
1156 	ath9k_hw_cfg_output(ah, ATH_LED_PIN,
1157 			    AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
1158 	ath9k_hw_set_gpio(ah, ATH_LED_PIN, 0);
1159 
1160 	ieee80211_wake_queues(sc->hw);
1161 	ath9k_ps_restore(sc);
1162 }
1163 
1164 void ath_radio_disable(struct ath_softc *sc)
1165 {
1166 	struct ath_hw *ah = sc->sc_ah;
1167 	struct ieee80211_channel *channel = sc->hw->conf.channel;
1168 	int r;
1169 
1170 	ath9k_ps_wakeup(sc);
1171 	ieee80211_stop_queues(sc->hw);
1172 
1173 	/* Disable LED */
1174 	ath9k_hw_set_gpio(ah, ATH_LED_PIN, 1);
1175 	ath9k_hw_cfg_gpio_input(ah, ATH_LED_PIN);
1176 
1177 	/* Disable interrupts */
1178 	ath9k_hw_set_interrupts(ah, 0);
1179 
1180 	ath_drain_all_txq(sc, false);	/* clear pending tx frames */
1181 	ath_stoprecv(sc);		/* turn off frame recv */
1182 	ath_flushrecv(sc);		/* flush recv queue */
1183 
1184 	if (!ah->curchan)
1185 		ah->curchan = ath_get_curchannel(sc, sc->hw);
1186 
1187 	spin_lock_bh(&sc->sc_resetlock);
1188 	r = ath9k_hw_reset(ah, ah->curchan, false);
1189 	if (r) {
1190 		DPRINTF(sc, ATH_DBG_FATAL,
1191 			"Unable to reset channel %u (%uMhz) "
1192 			"reset status %d\n",
1193 			channel->center_freq, r);
1194 	}
1195 	spin_unlock_bh(&sc->sc_resetlock);
1196 
1197 	ath9k_hw_phy_disable(ah);
1198 	ath9k_hw_configpcipowersave(ah, 1);
1199 	ath9k_ps_restore(sc);
1200 	ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP);
1201 }
1202 
1203 /*******************/
1204 /*	Rfkill	   */
1205 /*******************/
1206 
1207 static bool ath_is_rfkill_set(struct ath_softc *sc)
1208 {
1209 	struct ath_hw *ah = sc->sc_ah;
1210 
1211 	return ath9k_hw_gpio_get(ah, ah->rfkill_gpio) ==
1212 				  ah->rfkill_polarity;
1213 }
1214 
1215 static void ath9k_rfkill_poll_state(struct ieee80211_hw *hw)
1216 {
1217 	struct ath_wiphy *aphy = hw->priv;
1218 	struct ath_softc *sc = aphy->sc;
1219 	bool blocked = !!ath_is_rfkill_set(sc);
1220 
1221 	wiphy_rfkill_set_hw_state(hw->wiphy, blocked);
1222 
1223 	if (blocked)
1224 		ath_radio_disable(sc);
1225 	else
1226 		ath_radio_enable(sc);
1227 }
1228 
1229 static void ath_start_rfkill_poll(struct ath_softc *sc)
1230 {
1231 	struct ath_hw *ah = sc->sc_ah;
1232 
1233 	if (ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
1234 		wiphy_rfkill_start_polling(sc->hw->wiphy);
1235 }
1236 
1237 void ath_cleanup(struct ath_softc *sc)
1238 {
1239 	ath_detach(sc);
1240 	free_irq(sc->irq, sc);
1241 	ath_bus_cleanup(sc);
1242 	kfree(sc->sec_wiphy);
1243 	ieee80211_free_hw(sc->hw);
1244 }
1245 
1246 void ath_detach(struct ath_softc *sc)
1247 {
1248 	struct ieee80211_hw *hw = sc->hw;
1249 	int i = 0;
1250 
1251 	ath9k_ps_wakeup(sc);
1252 
1253 	DPRINTF(sc, ATH_DBG_CONFIG, "Detach ATH hw\n");
1254 
1255 	ath_deinit_leds(sc);
1256 	cancel_work_sync(&sc->chan_work);
1257 	cancel_delayed_work_sync(&sc->wiphy_work);
1258 
1259 	for (i = 0; i < sc->num_sec_wiphy; i++) {
1260 		struct ath_wiphy *aphy = sc->sec_wiphy[i];
1261 		if (aphy == NULL)
1262 			continue;
1263 		sc->sec_wiphy[i] = NULL;
1264 		ieee80211_unregister_hw(aphy->hw);
1265 		ieee80211_free_hw(aphy->hw);
1266 	}
1267 	ieee80211_unregister_hw(hw);
1268 	ath_rx_cleanup(sc);
1269 	ath_tx_cleanup(sc);
1270 
1271 	tasklet_kill(&sc->intr_tq);
1272 	tasklet_kill(&sc->bcon_tasklet);
1273 
1274 	if (!(sc->sc_flags & SC_OP_INVALID))
1275 		ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
1276 
1277 	/* cleanup tx queues */
1278 	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1279 		if (ATH_TXQ_SETUP(sc, i))
1280 			ath_tx_cleanupq(sc, &sc->tx.txq[i]);
1281 
1282 	ath9k_hw_detach(sc->sc_ah);
1283 	ath9k_exit_debug(sc);
1284 	ath9k_ps_restore(sc);
1285 }
1286 
1287 static int ath9k_reg_notifier(struct wiphy *wiphy,
1288 			      struct regulatory_request *request)
1289 {
1290 	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
1291 	struct ath_wiphy *aphy = hw->priv;
1292 	struct ath_softc *sc = aphy->sc;
1293 	struct ath_regulatory *reg = &sc->sc_ah->regulatory;
1294 
1295 	return ath_reg_notifier_apply(wiphy, request, reg);
1296 }
1297 
1298 static int ath_init(u16 devid, struct ath_softc *sc)
1299 {
1300 	struct ath_hw *ah = NULL;
1301 	int status;
1302 	int error = 0, i;
1303 	int csz = 0;
1304 
1305 	/* XXX: hardware will not be ready until ath_open() being called */
1306 	sc->sc_flags |= SC_OP_INVALID;
1307 
1308 	if (ath9k_init_debug(sc) < 0)
1309 		printk(KERN_ERR "Unable to create debugfs files\n");
1310 
1311 	spin_lock_init(&sc->wiphy_lock);
1312 	spin_lock_init(&sc->sc_resetlock);
1313 	spin_lock_init(&sc->sc_serial_rw);
1314 	mutex_init(&sc->mutex);
1315 	tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
1316 	tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
1317 		     (unsigned long)sc);
1318 
1319 	/*
1320 	 * Cache line size is used to size and align various
1321 	 * structures used to communicate with the hardware.
1322 	 */
1323 	ath_read_cachesize(sc, &csz);
1324 	/* XXX assert csz is non-zero */
1325 	sc->cachelsz = csz << 2;	/* convert to bytes */
1326 
1327 	ah = ath9k_hw_attach(devid, sc, &status);
1328 	if (ah == NULL) {
1329 		DPRINTF(sc, ATH_DBG_FATAL,
1330 			"Unable to attach hardware; HAL status %d\n", status);
1331 		error = -ENXIO;
1332 		goto bad;
1333 	}
1334 	sc->sc_ah = ah;
1335 
1336 	/* Get the hardware key cache size. */
1337 	sc->keymax = ah->caps.keycache_size;
1338 	if (sc->keymax > ATH_KEYMAX) {
1339 		DPRINTF(sc, ATH_DBG_ANY,
1340 			"Warning, using only %u entries in %u key cache\n",
1341 			ATH_KEYMAX, sc->keymax);
1342 		sc->keymax = ATH_KEYMAX;
1343 	}
1344 
1345 	/*
1346 	 * Reset the key cache since some parts do not
1347 	 * reset the contents on initial power up.
1348 	 */
1349 	for (i = 0; i < sc->keymax; i++)
1350 		ath9k_hw_keyreset(ah, (u16) i);
1351 
1352 	if (error)
1353 		goto bad;
1354 
1355 	/* default to MONITOR mode */
1356 	sc->sc_ah->opmode = NL80211_IFTYPE_MONITOR;
1357 
1358 	/* Setup rate tables */
1359 
1360 	ath_rate_attach(sc);
1361 	ath_setup_rates(sc, IEEE80211_BAND_2GHZ);
1362 	ath_setup_rates(sc, IEEE80211_BAND_5GHZ);
1363 
1364 	/*
1365 	 * Allocate hardware transmit queues: one queue for
1366 	 * beacon frames and one data queue for each QoS
1367 	 * priority.  Note that the hal handles reseting
1368 	 * these queues at the needed time.
1369 	 */
1370 	sc->beacon.beaconq = ath_beaconq_setup(ah);
1371 	if (sc->beacon.beaconq == -1) {
1372 		DPRINTF(sc, ATH_DBG_FATAL,
1373 			"Unable to setup a beacon xmit queue\n");
1374 		error = -EIO;
1375 		goto bad2;
1376 	}
1377 	sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
1378 	if (sc->beacon.cabq == NULL) {
1379 		DPRINTF(sc, ATH_DBG_FATAL,
1380 			"Unable to setup CAB xmit queue\n");
1381 		error = -EIO;
1382 		goto bad2;
1383 	}
1384 
1385 	sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
1386 	ath_cabq_update(sc);
1387 
1388 	for (i = 0; i < ARRAY_SIZE(sc->tx.hwq_map); i++)
1389 		sc->tx.hwq_map[i] = -1;
1390 
1391 	/* Setup data queues */
1392 	/* NB: ensure BK queue is the lowest priority h/w queue */
1393 	if (!ath_tx_setup(sc, ATH9K_WME_AC_BK)) {
1394 		DPRINTF(sc, ATH_DBG_FATAL,
1395 			"Unable to setup xmit queue for BK traffic\n");
1396 		error = -EIO;
1397 		goto bad2;
1398 	}
1399 
1400 	if (!ath_tx_setup(sc, ATH9K_WME_AC_BE)) {
1401 		DPRINTF(sc, ATH_DBG_FATAL,
1402 			"Unable to setup xmit queue for BE traffic\n");
1403 		error = -EIO;
1404 		goto bad2;
1405 	}
1406 	if (!ath_tx_setup(sc, ATH9K_WME_AC_VI)) {
1407 		DPRINTF(sc, ATH_DBG_FATAL,
1408 			"Unable to setup xmit queue for VI traffic\n");
1409 		error = -EIO;
1410 		goto bad2;
1411 	}
1412 	if (!ath_tx_setup(sc, ATH9K_WME_AC_VO)) {
1413 		DPRINTF(sc, ATH_DBG_FATAL,
1414 			"Unable to setup xmit queue for VO traffic\n");
1415 		error = -EIO;
1416 		goto bad2;
1417 	}
1418 
1419 	/* Initializes the noise floor to a reasonable default value.
1420 	 * Later on this will be updated during ANI processing. */
1421 
1422 	sc->ani.noise_floor = ATH_DEFAULT_NOISE_FLOOR;
1423 	setup_timer(&sc->ani.timer, ath_ani_calibrate, (unsigned long)sc);
1424 
1425 	if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1426 				   ATH9K_CIPHER_TKIP, NULL)) {
1427 		/*
1428 		 * Whether we should enable h/w TKIP MIC.
1429 		 * XXX: if we don't support WME TKIP MIC, then we wouldn't
1430 		 * report WMM capable, so it's always safe to turn on
1431 		 * TKIP MIC in this case.
1432 		 */
1433 		ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC,
1434 				       0, 1, NULL);
1435 	}
1436 
1437 	/*
1438 	 * Check whether the separate key cache entries
1439 	 * are required to handle both tx+rx MIC keys.
1440 	 * With split mic keys the number of stations is limited
1441 	 * to 27 otherwise 59.
1442 	 */
1443 	if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1444 				   ATH9K_CIPHER_TKIP, NULL)
1445 	    && ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1446 				      ATH9K_CIPHER_MIC, NULL)
1447 	    && ath9k_hw_getcapability(ah, ATH9K_CAP_TKIP_SPLIT,
1448 				      0, NULL))
1449 		sc->splitmic = 1;
1450 
1451 	/* turn on mcast key search if possible */
1452 	if (!ath9k_hw_getcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
1453 		(void)ath9k_hw_setcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 1,
1454 					     1, NULL);
1455 
1456 	sc->config.txpowlimit = ATH_TXPOWER_MAX;
1457 
1458 	/* 11n Capabilities */
1459 	if (ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
1460 		sc->sc_flags |= SC_OP_TXAGGR;
1461 		sc->sc_flags |= SC_OP_RXAGGR;
1462 	}
1463 
1464 	sc->tx_chainmask = ah->caps.tx_chainmask;
1465 	sc->rx_chainmask = ah->caps.rx_chainmask;
1466 
1467 	ath9k_hw_setcapability(ah, ATH9K_CAP_DIVERSITY, 1, true, NULL);
1468 	sc->rx.defant = ath9k_hw_getdefantenna(ah);
1469 
1470 	if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
1471 		memcpy(sc->bssidmask, ath_bcast_mac, ETH_ALEN);
1472 
1473 	sc->beacon.slottime = ATH9K_SLOT_TIME_9;	/* default to short slot time */
1474 
1475 	/* initialize beacon slots */
1476 	for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
1477 		sc->beacon.bslot[i] = NULL;
1478 		sc->beacon.bslot_aphy[i] = NULL;
1479 	}
1480 
1481 	/* setup channels and rates */
1482 
1483 	sc->sbands[IEEE80211_BAND_2GHZ].channels = ath9k_2ghz_chantable;
1484 	sc->sbands[IEEE80211_BAND_2GHZ].bitrates =
1485 		sc->rates[IEEE80211_BAND_2GHZ];
1486 	sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
1487 	sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
1488 		ARRAY_SIZE(ath9k_2ghz_chantable);
1489 
1490 	if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes)) {
1491 		sc->sbands[IEEE80211_BAND_5GHZ].channels = ath9k_5ghz_chantable;
1492 		sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
1493 			sc->rates[IEEE80211_BAND_5GHZ];
1494 		sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
1495 		sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
1496 			ARRAY_SIZE(ath9k_5ghz_chantable);
1497 	}
1498 
1499 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_BT_COEX)
1500 		ath9k_hw_btcoex_enable(sc->sc_ah);
1501 
1502 	return 0;
1503 bad2:
1504 	/* cleanup tx queues */
1505 	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1506 		if (ATH_TXQ_SETUP(sc, i))
1507 			ath_tx_cleanupq(sc, &sc->tx.txq[i]);
1508 bad:
1509 	if (ah)
1510 		ath9k_hw_detach(ah);
1511 	ath9k_exit_debug(sc);
1512 
1513 	return error;
1514 }
1515 
1516 void ath_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
1517 {
1518 	hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
1519 		IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
1520 		IEEE80211_HW_SIGNAL_DBM |
1521 		IEEE80211_HW_AMPDU_AGGREGATION |
1522 		IEEE80211_HW_SUPPORTS_PS |
1523 		IEEE80211_HW_PS_NULLFUNC_STACK |
1524 		IEEE80211_HW_SPECTRUM_MGMT;
1525 
1526 	if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || modparam_nohwcrypt)
1527 		hw->flags |= IEEE80211_HW_MFP_CAPABLE;
1528 
1529 	hw->wiphy->interface_modes =
1530 		BIT(NL80211_IFTYPE_AP) |
1531 		BIT(NL80211_IFTYPE_STATION) |
1532 		BIT(NL80211_IFTYPE_ADHOC) |
1533 		BIT(NL80211_IFTYPE_MESH_POINT);
1534 
1535 	hw->queues = 4;
1536 	hw->max_rates = 4;
1537 	hw->channel_change_time = 5000;
1538 	hw->max_listen_interval = 10;
1539 	hw->max_rate_tries = ATH_11N_TXMAXTRY;
1540 	hw->sta_data_size = sizeof(struct ath_node);
1541 	hw->vif_data_size = sizeof(struct ath_vif);
1542 
1543 	hw->rate_control_algorithm = "ath9k_rate_control";
1544 
1545 	hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
1546 		&sc->sbands[IEEE80211_BAND_2GHZ];
1547 	if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes))
1548 		hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
1549 			&sc->sbands[IEEE80211_BAND_5GHZ];
1550 }
1551 
1552 int ath_attach(u16 devid, struct ath_softc *sc)
1553 {
1554 	struct ieee80211_hw *hw = sc->hw;
1555 	int error = 0, i;
1556 	struct ath_regulatory *reg;
1557 
1558 	DPRINTF(sc, ATH_DBG_CONFIG, "Attach ATH hw\n");
1559 
1560 	error = ath_init(devid, sc);
1561 	if (error != 0)
1562 		return error;
1563 
1564 	/* get mac address from hardware and set in mac80211 */
1565 
1566 	SET_IEEE80211_PERM_ADDR(hw, sc->sc_ah->macaddr);
1567 
1568 	ath_set_hw_capab(sc, hw);
1569 
1570 	error = ath_regd_init(&sc->sc_ah->regulatory, sc->hw->wiphy,
1571 			      ath9k_reg_notifier);
1572 	if (error)
1573 		return error;
1574 
1575 	reg = &sc->sc_ah->regulatory;
1576 
1577 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
1578 		setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
1579 		if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes))
1580 			setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
1581 	}
1582 
1583 	/* initialize tx/rx engine */
1584 	error = ath_tx_init(sc, ATH_TXBUF);
1585 	if (error != 0)
1586 		goto error_attach;
1587 
1588 	error = ath_rx_init(sc, ATH_RXBUF);
1589 	if (error != 0)
1590 		goto error_attach;
1591 
1592 	INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work);
1593 	INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work);
1594 	sc->wiphy_scheduler_int = msecs_to_jiffies(500);
1595 
1596 	error = ieee80211_register_hw(hw);
1597 
1598 	if (!ath_is_world_regd(reg)) {
1599 		error = regulatory_hint(hw->wiphy, reg->alpha2);
1600 		if (error)
1601 			goto error_attach;
1602 	}
1603 
1604 	/* Initialize LED control */
1605 	ath_init_leds(sc);
1606 
1607 	ath_start_rfkill_poll(sc);
1608 
1609 	return 0;
1610 
1611 error_attach:
1612 	/* cleanup tx queues */
1613 	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1614 		if (ATH_TXQ_SETUP(sc, i))
1615 			ath_tx_cleanupq(sc, &sc->tx.txq[i]);
1616 
1617 	ath9k_hw_detach(sc->sc_ah);
1618 	ath9k_exit_debug(sc);
1619 
1620 	return error;
1621 }
1622 
1623 int ath_reset(struct ath_softc *sc, bool retry_tx)
1624 {
1625 	struct ath_hw *ah = sc->sc_ah;
1626 	struct ieee80211_hw *hw = sc->hw;
1627 	int r;
1628 
1629 	ath9k_hw_set_interrupts(ah, 0);
1630 	ath_drain_all_txq(sc, retry_tx);
1631 	ath_stoprecv(sc);
1632 	ath_flushrecv(sc);
1633 
1634 	spin_lock_bh(&sc->sc_resetlock);
1635 	r = ath9k_hw_reset(ah, sc->sc_ah->curchan, false);
1636 	if (r)
1637 		DPRINTF(sc, ATH_DBG_FATAL,
1638 			"Unable to reset hardware; reset status %d\n", r);
1639 	spin_unlock_bh(&sc->sc_resetlock);
1640 
1641 	if (ath_startrecv(sc) != 0)
1642 		DPRINTF(sc, ATH_DBG_FATAL, "Unable to start recv logic\n");
1643 
1644 	/*
1645 	 * We may be doing a reset in response to a request
1646 	 * that changes the channel so update any state that
1647 	 * might change as a result.
1648 	 */
1649 	ath_cache_conf_rate(sc, &hw->conf);
1650 
1651 	ath_update_txpow(sc);
1652 
1653 	if (sc->sc_flags & SC_OP_BEACONS)
1654 		ath_beacon_config(sc, NULL);	/* restart beacons */
1655 
1656 	ath9k_hw_set_interrupts(ah, sc->imask);
1657 
1658 	if (retry_tx) {
1659 		int i;
1660 		for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1661 			if (ATH_TXQ_SETUP(sc, i)) {
1662 				spin_lock_bh(&sc->tx.txq[i].axq_lock);
1663 				ath_txq_schedule(sc, &sc->tx.txq[i]);
1664 				spin_unlock_bh(&sc->tx.txq[i].axq_lock);
1665 			}
1666 		}
1667 	}
1668 
1669 	return r;
1670 }
1671 
1672 /*
1673  *  This function will allocate both the DMA descriptor structure, and the
1674  *  buffers it contains.  These are used to contain the descriptors used
1675  *  by the system.
1676 */
1677 int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
1678 		      struct list_head *head, const char *name,
1679 		      int nbuf, int ndesc)
1680 {
1681 #define	DS2PHYS(_dd, _ds)						\
1682 	((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
1683 #define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
1684 #define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
1685 
1686 	struct ath_desc *ds;
1687 	struct ath_buf *bf;
1688 	int i, bsize, error;
1689 
1690 	DPRINTF(sc, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
1691 		name, nbuf, ndesc);
1692 
1693 	INIT_LIST_HEAD(head);
1694 	/* ath_desc must be a multiple of DWORDs */
1695 	if ((sizeof(struct ath_desc) % 4) != 0) {
1696 		DPRINTF(sc, ATH_DBG_FATAL, "ath_desc not DWORD aligned\n");
1697 		ASSERT((sizeof(struct ath_desc) % 4) == 0);
1698 		error = -ENOMEM;
1699 		goto fail;
1700 	}
1701 
1702 	dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc;
1703 
1704 	/*
1705 	 * Need additional DMA memory because we can't use
1706 	 * descriptors that cross the 4K page boundary. Assume
1707 	 * one skipped descriptor per 4K page.
1708 	 */
1709 	if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
1710 		u32 ndesc_skipped =
1711 			ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
1712 		u32 dma_len;
1713 
1714 		while (ndesc_skipped) {
1715 			dma_len = ndesc_skipped * sizeof(struct ath_desc);
1716 			dd->dd_desc_len += dma_len;
1717 
1718 			ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
1719 		};
1720 	}
1721 
1722 	/* allocate descriptors */
1723 	dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
1724 					 &dd->dd_desc_paddr, GFP_KERNEL);
1725 	if (dd->dd_desc == NULL) {
1726 		error = -ENOMEM;
1727 		goto fail;
1728 	}
1729 	ds = dd->dd_desc;
1730 	DPRINTF(sc, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
1731 		name, ds, (u32) dd->dd_desc_len,
1732 		ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
1733 
1734 	/* allocate buffers */
1735 	bsize = sizeof(struct ath_buf) * nbuf;
1736 	bf = kzalloc(bsize, GFP_KERNEL);
1737 	if (bf == NULL) {
1738 		error = -ENOMEM;
1739 		goto fail2;
1740 	}
1741 	dd->dd_bufptr = bf;
1742 
1743 	for (i = 0; i < nbuf; i++, bf++, ds += ndesc) {
1744 		bf->bf_desc = ds;
1745 		bf->bf_daddr = DS2PHYS(dd, ds);
1746 
1747 		if (!(sc->sc_ah->caps.hw_caps &
1748 		      ATH9K_HW_CAP_4KB_SPLITTRANS)) {
1749 			/*
1750 			 * Skip descriptor addresses which can cause 4KB
1751 			 * boundary crossing (addr + length) with a 32 dword
1752 			 * descriptor fetch.
1753 			 */
1754 			while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
1755 				ASSERT((caddr_t) bf->bf_desc <
1756 				       ((caddr_t) dd->dd_desc +
1757 					dd->dd_desc_len));
1758 
1759 				ds += ndesc;
1760 				bf->bf_desc = ds;
1761 				bf->bf_daddr = DS2PHYS(dd, ds);
1762 			}
1763 		}
1764 		list_add_tail(&bf->list, head);
1765 	}
1766 	return 0;
1767 fail2:
1768 	dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
1769 			  dd->dd_desc_paddr);
1770 fail:
1771 	memset(dd, 0, sizeof(*dd));
1772 	return error;
1773 #undef ATH_DESC_4KB_BOUND_CHECK
1774 #undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
1775 #undef DS2PHYS
1776 }
1777 
1778 void ath_descdma_cleanup(struct ath_softc *sc,
1779 			 struct ath_descdma *dd,
1780 			 struct list_head *head)
1781 {
1782 	dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
1783 			  dd->dd_desc_paddr);
1784 
1785 	INIT_LIST_HEAD(head);
1786 	kfree(dd->dd_bufptr);
1787 	memset(dd, 0, sizeof(*dd));
1788 }
1789 
1790 int ath_get_hal_qnum(u16 queue, struct ath_softc *sc)
1791 {
1792 	int qnum;
1793 
1794 	switch (queue) {
1795 	case 0:
1796 		qnum = sc->tx.hwq_map[ATH9K_WME_AC_VO];
1797 		break;
1798 	case 1:
1799 		qnum = sc->tx.hwq_map[ATH9K_WME_AC_VI];
1800 		break;
1801 	case 2:
1802 		qnum = sc->tx.hwq_map[ATH9K_WME_AC_BE];
1803 		break;
1804 	case 3:
1805 		qnum = sc->tx.hwq_map[ATH9K_WME_AC_BK];
1806 		break;
1807 	default:
1808 		qnum = sc->tx.hwq_map[ATH9K_WME_AC_BE];
1809 		break;
1810 	}
1811 
1812 	return qnum;
1813 }
1814 
1815 int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc)
1816 {
1817 	int qnum;
1818 
1819 	switch (queue) {
1820 	case ATH9K_WME_AC_VO:
1821 		qnum = 0;
1822 		break;
1823 	case ATH9K_WME_AC_VI:
1824 		qnum = 1;
1825 		break;
1826 	case ATH9K_WME_AC_BE:
1827 		qnum = 2;
1828 		break;
1829 	case ATH9K_WME_AC_BK:
1830 		qnum = 3;
1831 		break;
1832 	default:
1833 		qnum = -1;
1834 		break;
1835 	}
1836 
1837 	return qnum;
1838 }
1839 
1840 /* XXX: Remove me once we don't depend on ath9k_channel for all
1841  * this redundant data */
1842 void ath9k_update_ichannel(struct ath_softc *sc, struct ieee80211_hw *hw,
1843 			   struct ath9k_channel *ichan)
1844 {
1845 	struct ieee80211_channel *chan = hw->conf.channel;
1846 	struct ieee80211_conf *conf = &hw->conf;
1847 
1848 	ichan->channel = chan->center_freq;
1849 	ichan->chan = chan;
1850 
1851 	if (chan->band == IEEE80211_BAND_2GHZ) {
1852 		ichan->chanmode = CHANNEL_G;
1853 		ichan->channelFlags = CHANNEL_2GHZ | CHANNEL_OFDM;
1854 	} else {
1855 		ichan->chanmode = CHANNEL_A;
1856 		ichan->channelFlags = CHANNEL_5GHZ | CHANNEL_OFDM;
1857 	}
1858 
1859 	sc->tx_chan_width = ATH9K_HT_MACMODE_20;
1860 
1861 	if (conf_is_ht(conf)) {
1862 		if (conf_is_ht40(conf))
1863 			sc->tx_chan_width = ATH9K_HT_MACMODE_2040;
1864 
1865 		ichan->chanmode = ath_get_extchanmode(sc, chan,
1866 					    conf->channel_type);
1867 	}
1868 }
1869 
1870 /**********************/
1871 /* mac80211 callbacks */
1872 /**********************/
1873 
1874 static int ath9k_start(struct ieee80211_hw *hw)
1875 {
1876 	struct ath_wiphy *aphy = hw->priv;
1877 	struct ath_softc *sc = aphy->sc;
1878 	struct ieee80211_channel *curchan = hw->conf.channel;
1879 	struct ath9k_channel *init_channel;
1880 	int r;
1881 
1882 	DPRINTF(sc, ATH_DBG_CONFIG, "Starting driver with "
1883 		"initial channel: %d MHz\n", curchan->center_freq);
1884 
1885 	mutex_lock(&sc->mutex);
1886 
1887 	if (ath9k_wiphy_started(sc)) {
1888 		if (sc->chan_idx == curchan->hw_value) {
1889 			/*
1890 			 * Already on the operational channel, the new wiphy
1891 			 * can be marked active.
1892 			 */
1893 			aphy->state = ATH_WIPHY_ACTIVE;
1894 			ieee80211_wake_queues(hw);
1895 		} else {
1896 			/*
1897 			 * Another wiphy is on another channel, start the new
1898 			 * wiphy in paused state.
1899 			 */
1900 			aphy->state = ATH_WIPHY_PAUSED;
1901 			ieee80211_stop_queues(hw);
1902 		}
1903 		mutex_unlock(&sc->mutex);
1904 		return 0;
1905 	}
1906 	aphy->state = ATH_WIPHY_ACTIVE;
1907 
1908 	/* setup initial channel */
1909 
1910 	sc->chan_idx = curchan->hw_value;
1911 
1912 	init_channel = ath_get_curchannel(sc, hw);
1913 
1914 	/* Reset SERDES registers */
1915 	ath9k_hw_configpcipowersave(sc->sc_ah, 0);
1916 
1917 	/*
1918 	 * The basic interface to setting the hardware in a good
1919 	 * state is ``reset''.  On return the hardware is known to
1920 	 * be powered up and with interrupts disabled.  This must
1921 	 * be followed by initialization of the appropriate bits
1922 	 * and then setup of the interrupt mask.
1923 	 */
1924 	spin_lock_bh(&sc->sc_resetlock);
1925 	r = ath9k_hw_reset(sc->sc_ah, init_channel, false);
1926 	if (r) {
1927 		DPRINTF(sc, ATH_DBG_FATAL,
1928 			"Unable to reset hardware; reset status %d "
1929 			"(freq %u MHz)\n", r,
1930 			curchan->center_freq);
1931 		spin_unlock_bh(&sc->sc_resetlock);
1932 		goto mutex_unlock;
1933 	}
1934 	spin_unlock_bh(&sc->sc_resetlock);
1935 
1936 	/*
1937 	 * This is needed only to setup initial state
1938 	 * but it's best done after a reset.
1939 	 */
1940 	ath_update_txpow(sc);
1941 
1942 	/*
1943 	 * Setup the hardware after reset:
1944 	 * The receive engine is set going.
1945 	 * Frame transmit is handled entirely
1946 	 * in the frame output path; there's nothing to do
1947 	 * here except setup the interrupt mask.
1948 	 */
1949 	if (ath_startrecv(sc) != 0) {
1950 		DPRINTF(sc, ATH_DBG_FATAL, "Unable to start recv logic\n");
1951 		r = -EIO;
1952 		goto mutex_unlock;
1953 	}
1954 
1955 	/* Setup our intr mask. */
1956 	sc->imask = ATH9K_INT_RX | ATH9K_INT_TX
1957 		| ATH9K_INT_RXEOL | ATH9K_INT_RXORN
1958 		| ATH9K_INT_FATAL | ATH9K_INT_GLOBAL;
1959 
1960 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_GTT)
1961 		sc->imask |= ATH9K_INT_GTT;
1962 
1963 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
1964 		sc->imask |= ATH9K_INT_CST;
1965 
1966 	ath_cache_conf_rate(sc, &hw->conf);
1967 
1968 	sc->sc_flags &= ~SC_OP_INVALID;
1969 
1970 	/* Disable BMISS interrupt when we're not associated */
1971 	sc->imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
1972 	ath9k_hw_set_interrupts(sc->sc_ah, sc->imask);
1973 
1974 	ieee80211_wake_queues(hw);
1975 
1976 mutex_unlock:
1977 	mutex_unlock(&sc->mutex);
1978 
1979 	return r;
1980 }
1981 
1982 static int ath9k_tx(struct ieee80211_hw *hw,
1983 		    struct sk_buff *skb)
1984 {
1985 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1986 	struct ath_wiphy *aphy = hw->priv;
1987 	struct ath_softc *sc = aphy->sc;
1988 	struct ath_tx_control txctl;
1989 	int hdrlen, padsize;
1990 
1991 	if (aphy->state != ATH_WIPHY_ACTIVE && aphy->state != ATH_WIPHY_SCAN) {
1992 		printk(KERN_DEBUG "ath9k: %s: TX in unexpected wiphy state "
1993 		       "%d\n", wiphy_name(hw->wiphy), aphy->state);
1994 		goto exit;
1995 	}
1996 
1997 	if (sc->hw->conf.flags & IEEE80211_CONF_PS) {
1998 		struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1999 		/*
2000 		 * mac80211 does not set PM field for normal data frames, so we
2001 		 * need to update that based on the current PS mode.
2002 		 */
2003 		if (ieee80211_is_data(hdr->frame_control) &&
2004 		    !ieee80211_is_nullfunc(hdr->frame_control) &&
2005 		    !ieee80211_has_pm(hdr->frame_control)) {
2006 			DPRINTF(sc, ATH_DBG_PS, "Add PM=1 for a TX frame "
2007 				"while in PS mode\n");
2008 			hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
2009 		}
2010 	}
2011 
2012 	if (unlikely(sc->sc_ah->power_mode != ATH9K_PM_AWAKE)) {
2013 		/*
2014 		 * We are using PS-Poll and mac80211 can request TX while in
2015 		 * power save mode. Need to wake up hardware for the TX to be
2016 		 * completed and if needed, also for RX of buffered frames.
2017 		 */
2018 		struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
2019 		ath9k_ps_wakeup(sc);
2020 		ath9k_hw_setrxabort(sc->sc_ah, 0);
2021 		if (ieee80211_is_pspoll(hdr->frame_control)) {
2022 			DPRINTF(sc, ATH_DBG_PS, "Sending PS-Poll to pick a "
2023 				"buffered frame\n");
2024 			sc->sc_flags |= SC_OP_WAIT_FOR_PSPOLL_DATA;
2025 		} else {
2026 			DPRINTF(sc, ATH_DBG_PS, "Wake up to complete TX\n");
2027 			sc->sc_flags |= SC_OP_WAIT_FOR_TX_ACK;
2028 		}
2029 		/*
2030 		 * The actual restore operation will happen only after
2031 		 * the sc_flags bit is cleared. We are just dropping
2032 		 * the ps_usecount here.
2033 		 */
2034 		ath9k_ps_restore(sc);
2035 	}
2036 
2037 	memset(&txctl, 0, sizeof(struct ath_tx_control));
2038 
2039 	/*
2040 	 * As a temporary workaround, assign seq# here; this will likely need
2041 	 * to be cleaned up to work better with Beacon transmission and virtual
2042 	 * BSSes.
2043 	 */
2044 	if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
2045 		struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
2046 		if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
2047 			sc->tx.seq_no += 0x10;
2048 		hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
2049 		hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
2050 	}
2051 
2052 	/* Add the padding after the header if this is not already done */
2053 	hdrlen = ieee80211_get_hdrlen_from_skb(skb);
2054 	if (hdrlen & 3) {
2055 		padsize = hdrlen % 4;
2056 		if (skb_headroom(skb) < padsize)
2057 			return -1;
2058 		skb_push(skb, padsize);
2059 		memmove(skb->data, skb->data + padsize, hdrlen);
2060 	}
2061 
2062 	/* Check if a tx queue is available */
2063 
2064 	txctl.txq = ath_test_get_txq(sc, skb);
2065 	if (!txctl.txq)
2066 		goto exit;
2067 
2068 	DPRINTF(sc, ATH_DBG_XMIT, "transmitting packet, skb: %p\n", skb);
2069 
2070 	if (ath_tx_start(hw, skb, &txctl) != 0) {
2071 		DPRINTF(sc, ATH_DBG_XMIT, "TX failed\n");
2072 		goto exit;
2073 	}
2074 
2075 	return 0;
2076 exit:
2077 	dev_kfree_skb_any(skb);
2078 	return 0;
2079 }
2080 
2081 static void ath9k_stop(struct ieee80211_hw *hw)
2082 {
2083 	struct ath_wiphy *aphy = hw->priv;
2084 	struct ath_softc *sc = aphy->sc;
2085 
2086 	aphy->state = ATH_WIPHY_INACTIVE;
2087 
2088 	if (sc->sc_flags & SC_OP_INVALID) {
2089 		DPRINTF(sc, ATH_DBG_ANY, "Device not present\n");
2090 		return;
2091 	}
2092 
2093 	mutex_lock(&sc->mutex);
2094 
2095 	ieee80211_stop_queues(hw);
2096 
2097 	if (ath9k_wiphy_started(sc)) {
2098 		mutex_unlock(&sc->mutex);
2099 		return; /* another wiphy still in use */
2100 	}
2101 
2102 	/* make sure h/w will not generate any interrupt
2103 	 * before setting the invalid flag. */
2104 	ath9k_hw_set_interrupts(sc->sc_ah, 0);
2105 
2106 	if (!(sc->sc_flags & SC_OP_INVALID)) {
2107 		ath_drain_all_txq(sc, false);
2108 		ath_stoprecv(sc);
2109 		ath9k_hw_phy_disable(sc->sc_ah);
2110 	} else
2111 		sc->rx.rxlink = NULL;
2112 
2113 	wiphy_rfkill_stop_polling(sc->hw->wiphy);
2114 
2115 	/* disable HAL and put h/w to sleep */
2116 	ath9k_hw_disable(sc->sc_ah);
2117 	ath9k_hw_configpcipowersave(sc->sc_ah, 1);
2118 
2119 	sc->sc_flags |= SC_OP_INVALID;
2120 
2121 	mutex_unlock(&sc->mutex);
2122 
2123 	DPRINTF(sc, ATH_DBG_CONFIG, "Driver halt\n");
2124 }
2125 
2126 static int ath9k_add_interface(struct ieee80211_hw *hw,
2127 			       struct ieee80211_if_init_conf *conf)
2128 {
2129 	struct ath_wiphy *aphy = hw->priv;
2130 	struct ath_softc *sc = aphy->sc;
2131 	struct ath_vif *avp = (void *)conf->vif->drv_priv;
2132 	enum nl80211_iftype ic_opmode = NL80211_IFTYPE_UNSPECIFIED;
2133 	int ret = 0;
2134 
2135 	mutex_lock(&sc->mutex);
2136 
2137 	if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) &&
2138 	    sc->nvifs > 0) {
2139 		ret = -ENOBUFS;
2140 		goto out;
2141 	}
2142 
2143 	switch (conf->type) {
2144 	case NL80211_IFTYPE_STATION:
2145 		ic_opmode = NL80211_IFTYPE_STATION;
2146 		break;
2147 	case NL80211_IFTYPE_ADHOC:
2148 	case NL80211_IFTYPE_AP:
2149 	case NL80211_IFTYPE_MESH_POINT:
2150 		if (sc->nbcnvifs >= ATH_BCBUF) {
2151 			ret = -ENOBUFS;
2152 			goto out;
2153 		}
2154 		ic_opmode = conf->type;
2155 		break;
2156 	default:
2157 		DPRINTF(sc, ATH_DBG_FATAL,
2158 			"Interface type %d not yet supported\n", conf->type);
2159 		ret = -EOPNOTSUPP;
2160 		goto out;
2161 	}
2162 
2163 	DPRINTF(sc, ATH_DBG_CONFIG, "Attach a VIF of type: %d\n", ic_opmode);
2164 
2165 	/* Set the VIF opmode */
2166 	avp->av_opmode = ic_opmode;
2167 	avp->av_bslot = -1;
2168 
2169 	sc->nvifs++;
2170 
2171 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
2172 		ath9k_set_bssid_mask(hw);
2173 
2174 	if (sc->nvifs > 1)
2175 		goto out; /* skip global settings for secondary vif */
2176 
2177 	if (ic_opmode == NL80211_IFTYPE_AP) {
2178 		ath9k_hw_set_tsfadjust(sc->sc_ah, 1);
2179 		sc->sc_flags |= SC_OP_TSF_RESET;
2180 	}
2181 
2182 	/* Set the device opmode */
2183 	sc->sc_ah->opmode = ic_opmode;
2184 
2185 	/*
2186 	 * Enable MIB interrupts when there are hardware phy counters.
2187 	 * Note we only do this (at the moment) for station mode.
2188 	 */
2189 	if ((conf->type == NL80211_IFTYPE_STATION) ||
2190 	    (conf->type == NL80211_IFTYPE_ADHOC) ||
2191 	    (conf->type == NL80211_IFTYPE_MESH_POINT)) {
2192 		if (ath9k_hw_phycounters(sc->sc_ah))
2193 			sc->imask |= ATH9K_INT_MIB;
2194 		sc->imask |= ATH9K_INT_TSFOOR;
2195 	}
2196 
2197 	ath9k_hw_set_interrupts(sc->sc_ah, sc->imask);
2198 
2199 	if (conf->type == NL80211_IFTYPE_AP)
2200 		ath_start_ani(sc);
2201 
2202 out:
2203 	mutex_unlock(&sc->mutex);
2204 	return ret;
2205 }
2206 
2207 static void ath9k_remove_interface(struct ieee80211_hw *hw,
2208 				   struct ieee80211_if_init_conf *conf)
2209 {
2210 	struct ath_wiphy *aphy = hw->priv;
2211 	struct ath_softc *sc = aphy->sc;
2212 	struct ath_vif *avp = (void *)conf->vif->drv_priv;
2213 	int i;
2214 
2215 	DPRINTF(sc, ATH_DBG_CONFIG, "Detach Interface\n");
2216 
2217 	mutex_lock(&sc->mutex);
2218 
2219 	/* Stop ANI */
2220 	del_timer_sync(&sc->ani.timer);
2221 
2222 	/* Reclaim beacon resources */
2223 	if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) ||
2224 	    (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) ||
2225 	    (sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT)) {
2226 		ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
2227 		ath_beacon_return(sc, avp);
2228 	}
2229 
2230 	sc->sc_flags &= ~SC_OP_BEACONS;
2231 
2232 	for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
2233 		if (sc->beacon.bslot[i] == conf->vif) {
2234 			printk(KERN_DEBUG "%s: vif had allocated beacon "
2235 			       "slot\n", __func__);
2236 			sc->beacon.bslot[i] = NULL;
2237 			sc->beacon.bslot_aphy[i] = NULL;
2238 		}
2239 	}
2240 
2241 	sc->nvifs--;
2242 
2243 	mutex_unlock(&sc->mutex);
2244 }
2245 
2246 static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
2247 {
2248 	struct ath_wiphy *aphy = hw->priv;
2249 	struct ath_softc *sc = aphy->sc;
2250 	struct ieee80211_conf *conf = &hw->conf;
2251 	struct ath_hw *ah = sc->sc_ah;
2252 
2253 	mutex_lock(&sc->mutex);
2254 
2255 	if (changed & IEEE80211_CONF_CHANGE_PS) {
2256 		if (conf->flags & IEEE80211_CONF_PS) {
2257 			if (!(ah->caps.hw_caps &
2258 			      ATH9K_HW_CAP_AUTOSLEEP)) {
2259 				if ((sc->imask & ATH9K_INT_TIM_TIMER) == 0) {
2260 					sc->imask |= ATH9K_INT_TIM_TIMER;
2261 					ath9k_hw_set_interrupts(sc->sc_ah,
2262 							sc->imask);
2263 				}
2264 				ath9k_hw_setrxabort(sc->sc_ah, 1);
2265 			}
2266 			ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_NETWORK_SLEEP);
2267 		} else {
2268 			ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
2269 			if (!(ah->caps.hw_caps &
2270 			      ATH9K_HW_CAP_AUTOSLEEP)) {
2271 				ath9k_hw_setrxabort(sc->sc_ah, 0);
2272 				sc->sc_flags &= ~(SC_OP_WAIT_FOR_BEACON |
2273 						  SC_OP_WAIT_FOR_CAB |
2274 						  SC_OP_WAIT_FOR_PSPOLL_DATA |
2275 						  SC_OP_WAIT_FOR_TX_ACK);
2276 				if (sc->imask & ATH9K_INT_TIM_TIMER) {
2277 					sc->imask &= ~ATH9K_INT_TIM_TIMER;
2278 					ath9k_hw_set_interrupts(sc->sc_ah,
2279 							sc->imask);
2280 				}
2281 			}
2282 		}
2283 	}
2284 
2285 	if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
2286 		struct ieee80211_channel *curchan = hw->conf.channel;
2287 		int pos = curchan->hw_value;
2288 
2289 		aphy->chan_idx = pos;
2290 		aphy->chan_is_ht = conf_is_ht(conf);
2291 
2292 		if (aphy->state == ATH_WIPHY_SCAN ||
2293 		    aphy->state == ATH_WIPHY_ACTIVE)
2294 			ath9k_wiphy_pause_all_forced(sc, aphy);
2295 		else {
2296 			/*
2297 			 * Do not change operational channel based on a paused
2298 			 * wiphy changes.
2299 			 */
2300 			goto skip_chan_change;
2301 		}
2302 
2303 		DPRINTF(sc, ATH_DBG_CONFIG, "Set channel: %d MHz\n",
2304 			curchan->center_freq);
2305 
2306 		/* XXX: remove me eventualy */
2307 		ath9k_update_ichannel(sc, hw, &sc->sc_ah->channels[pos]);
2308 
2309 		ath_update_chainmask(sc, conf_is_ht(conf));
2310 
2311 		if (ath_set_channel(sc, hw, &sc->sc_ah->channels[pos]) < 0) {
2312 			DPRINTF(sc, ATH_DBG_FATAL, "Unable to set channel\n");
2313 			mutex_unlock(&sc->mutex);
2314 			return -EINVAL;
2315 		}
2316 	}
2317 
2318 skip_chan_change:
2319 	if (changed & IEEE80211_CONF_CHANGE_POWER)
2320 		sc->config.txpowlimit = 2 * conf->power_level;
2321 
2322 	mutex_unlock(&sc->mutex);
2323 
2324 	return 0;
2325 }
2326 
2327 #define SUPPORTED_FILTERS			\
2328 	(FIF_PROMISC_IN_BSS |			\
2329 	FIF_ALLMULTI |				\
2330 	FIF_CONTROL |				\
2331 	FIF_OTHER_BSS |				\
2332 	FIF_BCN_PRBRESP_PROMISC |		\
2333 	FIF_FCSFAIL)
2334 
2335 /* FIXME: sc->sc_full_reset ? */
2336 static void ath9k_configure_filter(struct ieee80211_hw *hw,
2337 				   unsigned int changed_flags,
2338 				   unsigned int *total_flags,
2339 				   int mc_count,
2340 				   struct dev_mc_list *mclist)
2341 {
2342 	struct ath_wiphy *aphy = hw->priv;
2343 	struct ath_softc *sc = aphy->sc;
2344 	u32 rfilt;
2345 
2346 	changed_flags &= SUPPORTED_FILTERS;
2347 	*total_flags &= SUPPORTED_FILTERS;
2348 
2349 	sc->rx.rxfilter = *total_flags;
2350 	ath9k_ps_wakeup(sc);
2351 	rfilt = ath_calcrxfilter(sc);
2352 	ath9k_hw_setrxfilter(sc->sc_ah, rfilt);
2353 	ath9k_ps_restore(sc);
2354 
2355 	DPRINTF(sc, ATH_DBG_CONFIG, "Set HW RX filter: 0x%x\n", sc->rx.rxfilter);
2356 }
2357 
2358 static void ath9k_sta_notify(struct ieee80211_hw *hw,
2359 			     struct ieee80211_vif *vif,
2360 			     enum sta_notify_cmd cmd,
2361 			     struct ieee80211_sta *sta)
2362 {
2363 	struct ath_wiphy *aphy = hw->priv;
2364 	struct ath_softc *sc = aphy->sc;
2365 
2366 	switch (cmd) {
2367 	case STA_NOTIFY_ADD:
2368 		ath_node_attach(sc, sta);
2369 		break;
2370 	case STA_NOTIFY_REMOVE:
2371 		ath_node_detach(sc, sta);
2372 		break;
2373 	default:
2374 		break;
2375 	}
2376 }
2377 
2378 static int ath9k_conf_tx(struct ieee80211_hw *hw, u16 queue,
2379 			 const struct ieee80211_tx_queue_params *params)
2380 {
2381 	struct ath_wiphy *aphy = hw->priv;
2382 	struct ath_softc *sc = aphy->sc;
2383 	struct ath9k_tx_queue_info qi;
2384 	int ret = 0, qnum;
2385 
2386 	if (queue >= WME_NUM_AC)
2387 		return 0;
2388 
2389 	mutex_lock(&sc->mutex);
2390 
2391 	memset(&qi, 0, sizeof(struct ath9k_tx_queue_info));
2392 
2393 	qi.tqi_aifs = params->aifs;
2394 	qi.tqi_cwmin = params->cw_min;
2395 	qi.tqi_cwmax = params->cw_max;
2396 	qi.tqi_burstTime = params->txop;
2397 	qnum = ath_get_hal_qnum(queue, sc);
2398 
2399 	DPRINTF(sc, ATH_DBG_CONFIG,
2400 		"Configure tx [queue/halq] [%d/%d],  "
2401 		"aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n",
2402 		queue, qnum, params->aifs, params->cw_min,
2403 		params->cw_max, params->txop);
2404 
2405 	ret = ath_txq_update(sc, qnum, &qi);
2406 	if (ret)
2407 		DPRINTF(sc, ATH_DBG_FATAL, "TXQ Update failed\n");
2408 
2409 	mutex_unlock(&sc->mutex);
2410 
2411 	return ret;
2412 }
2413 
2414 static int ath9k_set_key(struct ieee80211_hw *hw,
2415 			 enum set_key_cmd cmd,
2416 			 struct ieee80211_vif *vif,
2417 			 struct ieee80211_sta *sta,
2418 			 struct ieee80211_key_conf *key)
2419 {
2420 	struct ath_wiphy *aphy = hw->priv;
2421 	struct ath_softc *sc = aphy->sc;
2422 	int ret = 0;
2423 
2424 	if (modparam_nohwcrypt)
2425 		return -ENOSPC;
2426 
2427 	mutex_lock(&sc->mutex);
2428 	ath9k_ps_wakeup(sc);
2429 	DPRINTF(sc, ATH_DBG_CONFIG, "Set HW Key\n");
2430 
2431 	switch (cmd) {
2432 	case SET_KEY:
2433 		ret = ath_key_config(sc, vif, sta, key);
2434 		if (ret >= 0) {
2435 			key->hw_key_idx = ret;
2436 			/* push IV and Michael MIC generation to stack */
2437 			key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
2438 			if (key->alg == ALG_TKIP)
2439 				key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
2440 			if (sc->sc_ah->sw_mgmt_crypto && key->alg == ALG_CCMP)
2441 				key->flags |= IEEE80211_KEY_FLAG_SW_MGMT;
2442 			ret = 0;
2443 		}
2444 		break;
2445 	case DISABLE_KEY:
2446 		ath_key_delete(sc, key);
2447 		break;
2448 	default:
2449 		ret = -EINVAL;
2450 	}
2451 
2452 	ath9k_ps_restore(sc);
2453 	mutex_unlock(&sc->mutex);
2454 
2455 	return ret;
2456 }
2457 
2458 static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
2459 				   struct ieee80211_vif *vif,
2460 				   struct ieee80211_bss_conf *bss_conf,
2461 				   u32 changed)
2462 {
2463 	struct ath_wiphy *aphy = hw->priv;
2464 	struct ath_softc *sc = aphy->sc;
2465 	struct ath_hw *ah = sc->sc_ah;
2466 	struct ath_vif *avp = (void *)vif->drv_priv;
2467 	u32 rfilt = 0;
2468 	int error, i;
2469 
2470 	mutex_lock(&sc->mutex);
2471 
2472 	/*
2473 	 * TODO: Need to decide which hw opmode to use for
2474 	 *       multi-interface cases
2475 	 * XXX: This belongs into add_interface!
2476 	 */
2477 	if (vif->type == NL80211_IFTYPE_AP &&
2478 	    ah->opmode != NL80211_IFTYPE_AP) {
2479 		ah->opmode = NL80211_IFTYPE_STATION;
2480 		ath9k_hw_setopmode(ah);
2481 		memcpy(sc->curbssid, sc->sc_ah->macaddr, ETH_ALEN);
2482 		sc->curaid = 0;
2483 		ath9k_hw_write_associd(sc);
2484 		/* Request full reset to get hw opmode changed properly */
2485 		sc->sc_flags |= SC_OP_FULL_RESET;
2486 	}
2487 
2488 	if ((changed & BSS_CHANGED_BSSID) &&
2489 	    !is_zero_ether_addr(bss_conf->bssid)) {
2490 		switch (vif->type) {
2491 		case NL80211_IFTYPE_STATION:
2492 		case NL80211_IFTYPE_ADHOC:
2493 		case NL80211_IFTYPE_MESH_POINT:
2494 			/* Set BSSID */
2495 			memcpy(sc->curbssid, bss_conf->bssid, ETH_ALEN);
2496 			memcpy(avp->bssid, bss_conf->bssid, ETH_ALEN);
2497 			sc->curaid = 0;
2498 			ath9k_hw_write_associd(sc);
2499 
2500 			/* Set aggregation protection mode parameters */
2501 			sc->config.ath_aggr_prot = 0;
2502 
2503 			DPRINTF(sc, ATH_DBG_CONFIG,
2504 				"RX filter 0x%x bssid %pM aid 0x%x\n",
2505 				rfilt, sc->curbssid, sc->curaid);
2506 
2507 			/* need to reconfigure the beacon */
2508 			sc->sc_flags &= ~SC_OP_BEACONS ;
2509 
2510 			break;
2511 		default:
2512 			break;
2513 		}
2514 	}
2515 
2516 	if ((vif->type == NL80211_IFTYPE_ADHOC) ||
2517 	    (vif->type == NL80211_IFTYPE_AP) ||
2518 	    (vif->type == NL80211_IFTYPE_MESH_POINT)) {
2519 		if ((changed & BSS_CHANGED_BEACON) ||
2520 		    (changed & BSS_CHANGED_BEACON_ENABLED &&
2521 		     bss_conf->enable_beacon)) {
2522 			/*
2523 			 * Allocate and setup the beacon frame.
2524 			 *
2525 			 * Stop any previous beacon DMA.  This may be
2526 			 * necessary, for example, when an ibss merge
2527 			 * causes reconfiguration; we may be called
2528 			 * with beacon transmission active.
2529 			 */
2530 			ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
2531 
2532 			error = ath_beacon_alloc(aphy, vif);
2533 			if (!error)
2534 				ath_beacon_config(sc, vif);
2535 		}
2536 	}
2537 
2538 	/* Check for WLAN_CAPABILITY_PRIVACY ? */
2539 	if ((avp->av_opmode != NL80211_IFTYPE_STATION)) {
2540 		for (i = 0; i < IEEE80211_WEP_NKID; i++)
2541 			if (ath9k_hw_keyisvalid(sc->sc_ah, (u16)i))
2542 				ath9k_hw_keysetmac(sc->sc_ah,
2543 						   (u16)i,
2544 						   sc->curbssid);
2545 	}
2546 
2547 	/* Only legacy IBSS for now */
2548 	if (vif->type == NL80211_IFTYPE_ADHOC)
2549 		ath_update_chainmask(sc, 0);
2550 
2551 	if (changed & BSS_CHANGED_ERP_PREAMBLE) {
2552 		DPRINTF(sc, ATH_DBG_CONFIG, "BSS Changed PREAMBLE %d\n",
2553 			bss_conf->use_short_preamble);
2554 		if (bss_conf->use_short_preamble)
2555 			sc->sc_flags |= SC_OP_PREAMBLE_SHORT;
2556 		else
2557 			sc->sc_flags &= ~SC_OP_PREAMBLE_SHORT;
2558 	}
2559 
2560 	if (changed & BSS_CHANGED_ERP_CTS_PROT) {
2561 		DPRINTF(sc, ATH_DBG_CONFIG, "BSS Changed CTS PROT %d\n",
2562 			bss_conf->use_cts_prot);
2563 		if (bss_conf->use_cts_prot &&
2564 		    hw->conf.channel->band != IEEE80211_BAND_5GHZ)
2565 			sc->sc_flags |= SC_OP_PROTECT_ENABLE;
2566 		else
2567 			sc->sc_flags &= ~SC_OP_PROTECT_ENABLE;
2568 	}
2569 
2570 	if (changed & BSS_CHANGED_ASSOC) {
2571 		DPRINTF(sc, ATH_DBG_CONFIG, "BSS Changed ASSOC %d\n",
2572 			bss_conf->assoc);
2573 		ath9k_bss_assoc_info(sc, vif, bss_conf);
2574 	}
2575 
2576 	/*
2577 	 * The HW TSF has to be reset when the beacon interval changes.
2578 	 * We set the flag here, and ath_beacon_config_ap() would take this
2579 	 * into account when it gets called through the subsequent
2580 	 * config_interface() call - with IFCC_BEACON in the changed field.
2581 	 */
2582 
2583 	if (changed & BSS_CHANGED_BEACON_INT) {
2584 		sc->sc_flags |= SC_OP_TSF_RESET;
2585 		sc->beacon_interval = bss_conf->beacon_int;
2586 	}
2587 
2588 	mutex_unlock(&sc->mutex);
2589 }
2590 
2591 static u64 ath9k_get_tsf(struct ieee80211_hw *hw)
2592 {
2593 	u64 tsf;
2594 	struct ath_wiphy *aphy = hw->priv;
2595 	struct ath_softc *sc = aphy->sc;
2596 
2597 	mutex_lock(&sc->mutex);
2598 	tsf = ath9k_hw_gettsf64(sc->sc_ah);
2599 	mutex_unlock(&sc->mutex);
2600 
2601 	return tsf;
2602 }
2603 
2604 static void ath9k_set_tsf(struct ieee80211_hw *hw, u64 tsf)
2605 {
2606 	struct ath_wiphy *aphy = hw->priv;
2607 	struct ath_softc *sc = aphy->sc;
2608 
2609 	mutex_lock(&sc->mutex);
2610 	ath9k_hw_settsf64(sc->sc_ah, tsf);
2611 	mutex_unlock(&sc->mutex);
2612 }
2613 
2614 static void ath9k_reset_tsf(struct ieee80211_hw *hw)
2615 {
2616 	struct ath_wiphy *aphy = hw->priv;
2617 	struct ath_softc *sc = aphy->sc;
2618 
2619 	mutex_lock(&sc->mutex);
2620 	ath9k_hw_reset_tsf(sc->sc_ah);
2621 	mutex_unlock(&sc->mutex);
2622 }
2623 
2624 static int ath9k_ampdu_action(struct ieee80211_hw *hw,
2625 			      enum ieee80211_ampdu_mlme_action action,
2626 			      struct ieee80211_sta *sta,
2627 			      u16 tid, u16 *ssn)
2628 {
2629 	struct ath_wiphy *aphy = hw->priv;
2630 	struct ath_softc *sc = aphy->sc;
2631 	int ret = 0;
2632 
2633 	switch (action) {
2634 	case IEEE80211_AMPDU_RX_START:
2635 		if (!(sc->sc_flags & SC_OP_RXAGGR))
2636 			ret = -ENOTSUPP;
2637 		break;
2638 	case IEEE80211_AMPDU_RX_STOP:
2639 		break;
2640 	case IEEE80211_AMPDU_TX_START:
2641 		ret = ath_tx_aggr_start(sc, sta, tid, ssn);
2642 		if (ret < 0)
2643 			DPRINTF(sc, ATH_DBG_FATAL,
2644 				"Unable to start TX aggregation\n");
2645 		else
2646 			ieee80211_start_tx_ba_cb_irqsafe(hw, sta->addr, tid);
2647 		break;
2648 	case IEEE80211_AMPDU_TX_STOP:
2649 		ret = ath_tx_aggr_stop(sc, sta, tid);
2650 		if (ret < 0)
2651 			DPRINTF(sc, ATH_DBG_FATAL,
2652 				"Unable to stop TX aggregation\n");
2653 
2654 		ieee80211_stop_tx_ba_cb_irqsafe(hw, sta->addr, tid);
2655 		break;
2656 	case IEEE80211_AMPDU_TX_OPERATIONAL:
2657 		ath_tx_aggr_resume(sc, sta, tid);
2658 		break;
2659 	default:
2660 		DPRINTF(sc, ATH_DBG_FATAL, "Unknown AMPDU action\n");
2661 	}
2662 
2663 	return ret;
2664 }
2665 
2666 static void ath9k_sw_scan_start(struct ieee80211_hw *hw)
2667 {
2668 	struct ath_wiphy *aphy = hw->priv;
2669 	struct ath_softc *sc = aphy->sc;
2670 
2671 	if (ath9k_wiphy_scanning(sc)) {
2672 		printk(KERN_DEBUG "ath9k: Two wiphys trying to scan at the "
2673 		       "same time\n");
2674 		/*
2675 		 * Do not allow the concurrent scanning state for now. This
2676 		 * could be improved with scanning control moved into ath9k.
2677 		 */
2678 		return;
2679 	}
2680 
2681 	aphy->state = ATH_WIPHY_SCAN;
2682 	ath9k_wiphy_pause_all_forced(sc, aphy);
2683 
2684 	mutex_lock(&sc->mutex);
2685 	sc->sc_flags |= SC_OP_SCANNING;
2686 	mutex_unlock(&sc->mutex);
2687 }
2688 
2689 static void ath9k_sw_scan_complete(struct ieee80211_hw *hw)
2690 {
2691 	struct ath_wiphy *aphy = hw->priv;
2692 	struct ath_softc *sc = aphy->sc;
2693 
2694 	mutex_lock(&sc->mutex);
2695 	aphy->state = ATH_WIPHY_ACTIVE;
2696 	sc->sc_flags &= ~SC_OP_SCANNING;
2697 	sc->sc_flags |= SC_OP_FULL_RESET;
2698 	mutex_unlock(&sc->mutex);
2699 }
2700 
2701 struct ieee80211_ops ath9k_ops = {
2702 	.tx 		    = ath9k_tx,
2703 	.start 		    = ath9k_start,
2704 	.stop 		    = ath9k_stop,
2705 	.add_interface 	    = ath9k_add_interface,
2706 	.remove_interface   = ath9k_remove_interface,
2707 	.config 	    = ath9k_config,
2708 	.configure_filter   = ath9k_configure_filter,
2709 	.sta_notify         = ath9k_sta_notify,
2710 	.conf_tx 	    = ath9k_conf_tx,
2711 	.bss_info_changed   = ath9k_bss_info_changed,
2712 	.set_key            = ath9k_set_key,
2713 	.get_tsf 	    = ath9k_get_tsf,
2714 	.set_tsf 	    = ath9k_set_tsf,
2715 	.reset_tsf 	    = ath9k_reset_tsf,
2716 	.ampdu_action       = ath9k_ampdu_action,
2717 	.sw_scan_start      = ath9k_sw_scan_start,
2718 	.sw_scan_complete   = ath9k_sw_scan_complete,
2719 	.rfkill_poll        = ath9k_rfkill_poll_state,
2720 };
2721 
2722 static struct {
2723 	u32 version;
2724 	const char * name;
2725 } ath_mac_bb_names[] = {
2726 	{ AR_SREV_VERSION_5416_PCI,	"5416" },
2727 	{ AR_SREV_VERSION_5416_PCIE,	"5418" },
2728 	{ AR_SREV_VERSION_9100,		"9100" },
2729 	{ AR_SREV_VERSION_9160,		"9160" },
2730 	{ AR_SREV_VERSION_9280,		"9280" },
2731 	{ AR_SREV_VERSION_9285,		"9285" }
2732 };
2733 
2734 static struct {
2735 	u16 version;
2736 	const char * name;
2737 } ath_rf_names[] = {
2738 	{ 0,				"5133" },
2739 	{ AR_RAD5133_SREV_MAJOR,	"5133" },
2740 	{ AR_RAD5122_SREV_MAJOR,	"5122" },
2741 	{ AR_RAD2133_SREV_MAJOR,	"2133" },
2742 	{ AR_RAD2122_SREV_MAJOR,	"2122" }
2743 };
2744 
2745 /*
2746  * Return the MAC/BB name. "????" is returned if the MAC/BB is unknown.
2747  */
2748 const char *
2749 ath_mac_bb_name(u32 mac_bb_version)
2750 {
2751 	int i;
2752 
2753 	for (i=0; i<ARRAY_SIZE(ath_mac_bb_names); i++) {
2754 		if (ath_mac_bb_names[i].version == mac_bb_version) {
2755 			return ath_mac_bb_names[i].name;
2756 		}
2757 	}
2758 
2759 	return "????";
2760 }
2761 
2762 /*
2763  * Return the RF name. "????" is returned if the RF is unknown.
2764  */
2765 const char *
2766 ath_rf_name(u16 rf_version)
2767 {
2768 	int i;
2769 
2770 	for (i=0; i<ARRAY_SIZE(ath_rf_names); i++) {
2771 		if (ath_rf_names[i].version == rf_version) {
2772 			return ath_rf_names[i].name;
2773 		}
2774 	}
2775 
2776 	return "????";
2777 }
2778 
2779 static int __init ath9k_init(void)
2780 {
2781 	int error;
2782 
2783 	/* Register rate control algorithm */
2784 	error = ath_rate_control_register();
2785 	if (error != 0) {
2786 		printk(KERN_ERR
2787 			"ath9k: Unable to register rate control "
2788 			"algorithm: %d\n",
2789 			error);
2790 		goto err_out;
2791 	}
2792 
2793 	error = ath9k_debug_create_root();
2794 	if (error) {
2795 		printk(KERN_ERR
2796 			"ath9k: Unable to create debugfs root: %d\n",
2797 			error);
2798 		goto err_rate_unregister;
2799 	}
2800 
2801 	error = ath_pci_init();
2802 	if (error < 0) {
2803 		printk(KERN_ERR
2804 			"ath9k: No PCI devices found, driver not installed.\n");
2805 		error = -ENODEV;
2806 		goto err_remove_root;
2807 	}
2808 
2809 	error = ath_ahb_init();
2810 	if (error < 0) {
2811 		error = -ENODEV;
2812 		goto err_pci_exit;
2813 	}
2814 
2815 	return 0;
2816 
2817  err_pci_exit:
2818 	ath_pci_exit();
2819 
2820  err_remove_root:
2821 	ath9k_debug_remove_root();
2822  err_rate_unregister:
2823 	ath_rate_control_unregister();
2824  err_out:
2825 	return error;
2826 }
2827 module_init(ath9k_init);
2828 
2829 static void __exit ath9k_exit(void)
2830 {
2831 	ath_ahb_exit();
2832 	ath_pci_exit();
2833 	ath9k_debug_remove_root();
2834 	ath_rate_control_unregister();
2835 	printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
2836 }
2837 module_exit(ath9k_exit);
2838