xref: /linux/drivers/net/wireless/ath/ath5k/base.c (revision d39d0ed196aa1685bb24771e92f78633c66ac9cb)
1 /*-
2  * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
3  * Copyright (c) 2004-2005 Atheros Communications, Inc.
4  * Copyright (c) 2006 Devicescape Software, Inc.
5  * Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com>
6  * Copyright (c) 2007 Luis R. Rodriguez <mcgrof@winlab.rutgers.edu>
7  *
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer,
15  *    without modification.
16  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
17  *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
18  *    redistribution must be conditioned upon including a substantially
19  *    similar Disclaimer requirement for further binary redistribution.
20  * 3. Neither the names of the above-listed copyright holders nor the names
21  *    of any contributors may be used to endorse or promote products derived
22  *    from this software without specific prior written permission.
23  *
24  * Alternatively, this software may be distributed under the terms of the
25  * GNU General Public License ("GPL") version 2 as published by the Free
26  * Software Foundation.
27  *
28  * NO WARRANTY
29  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31  * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
32  * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
33  * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
34  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
36  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
37  * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
38  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
39  * THE POSSIBILITY OF SUCH DAMAGES.
40  *
41  */
42 
43 #include <linux/module.h>
44 #include <linux/delay.h>
45 #include <linux/hardirq.h>
46 #include <linux/if.h>
47 #include <linux/io.h>
48 #include <linux/netdevice.h>
49 #include <linux/cache.h>
50 #include <linux/pci.h>
51 #include <linux/pci-aspm.h>
52 #include <linux/ethtool.h>
53 #include <linux/uaccess.h>
54 #include <linux/slab.h>
55 
56 #include <net/ieee80211_radiotap.h>
57 
58 #include <asm/unaligned.h>
59 
60 #include "base.h"
61 #include "reg.h"
62 #include "debug.h"
63 #include "ani.h"
64 
65 static int modparam_nohwcrypt;
66 module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
67 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
68 
69 static int modparam_all_channels;
70 module_param_named(all_channels, modparam_all_channels, bool, S_IRUGO);
71 MODULE_PARM_DESC(all_channels, "Expose all channels the device can use.");
72 
73 
74 /******************\
75 * Internal defines *
76 \******************/
77 
78 /* Module info */
79 MODULE_AUTHOR("Jiri Slaby");
80 MODULE_AUTHOR("Nick Kossifidis");
81 MODULE_DESCRIPTION("Support for 5xxx series of Atheros 802.11 wireless LAN cards.");
82 MODULE_SUPPORTED_DEVICE("Atheros 5xxx WLAN cards");
83 MODULE_LICENSE("Dual BSD/GPL");
84 MODULE_VERSION("0.6.0 (EXPERIMENTAL)");
85 
86 
87 /* Known PCI ids */
88 static DEFINE_PCI_DEVICE_TABLE(ath5k_pci_id_table) = {
89 	{ PCI_VDEVICE(ATHEROS, 0x0207) }, /* 5210 early */
90 	{ PCI_VDEVICE(ATHEROS, 0x0007) }, /* 5210 */
91 	{ PCI_VDEVICE(ATHEROS, 0x0011) }, /* 5311 - this is on AHB bus !*/
92 	{ PCI_VDEVICE(ATHEROS, 0x0012) }, /* 5211 */
93 	{ PCI_VDEVICE(ATHEROS, 0x0013) }, /* 5212 */
94 	{ PCI_VDEVICE(3COM_2,  0x0013) }, /* 3com 5212 */
95 	{ PCI_VDEVICE(3COM,    0x0013) }, /* 3com 3CRDAG675 5212 */
96 	{ PCI_VDEVICE(ATHEROS, 0x1014) }, /* IBM minipci 5212 */
97 	{ PCI_VDEVICE(ATHEROS, 0x0014) }, /* 5212 combatible */
98 	{ PCI_VDEVICE(ATHEROS, 0x0015) }, /* 5212 combatible */
99 	{ PCI_VDEVICE(ATHEROS, 0x0016) }, /* 5212 combatible */
100 	{ PCI_VDEVICE(ATHEROS, 0x0017) }, /* 5212 combatible */
101 	{ PCI_VDEVICE(ATHEROS, 0x0018) }, /* 5212 combatible */
102 	{ PCI_VDEVICE(ATHEROS, 0x0019) }, /* 5212 combatible */
103 	{ PCI_VDEVICE(ATHEROS, 0x001a) }, /* 2413 Griffin-lite */
104 	{ PCI_VDEVICE(ATHEROS, 0x001b) }, /* 5413 Eagle */
105 	{ PCI_VDEVICE(ATHEROS, 0x001c) }, /* PCI-E cards */
106 	{ PCI_VDEVICE(ATHEROS, 0x001d) }, /* 2417 Nala */
107 	{ 0 }
108 };
109 MODULE_DEVICE_TABLE(pci, ath5k_pci_id_table);
110 
111 /* Known SREVs */
112 static const struct ath5k_srev_name srev_names[] = {
113 	{ "5210",	AR5K_VERSION_MAC,	AR5K_SREV_AR5210 },
114 	{ "5311",	AR5K_VERSION_MAC,	AR5K_SREV_AR5311 },
115 	{ "5311A",	AR5K_VERSION_MAC,	AR5K_SREV_AR5311A },
116 	{ "5311B",	AR5K_VERSION_MAC,	AR5K_SREV_AR5311B },
117 	{ "5211",	AR5K_VERSION_MAC,	AR5K_SREV_AR5211 },
118 	{ "5212",	AR5K_VERSION_MAC,	AR5K_SREV_AR5212 },
119 	{ "5213",	AR5K_VERSION_MAC,	AR5K_SREV_AR5213 },
120 	{ "5213A",	AR5K_VERSION_MAC,	AR5K_SREV_AR5213A },
121 	{ "2413",	AR5K_VERSION_MAC,	AR5K_SREV_AR2413 },
122 	{ "2414",	AR5K_VERSION_MAC,	AR5K_SREV_AR2414 },
123 	{ "5424",	AR5K_VERSION_MAC,	AR5K_SREV_AR5424 },
124 	{ "5413",	AR5K_VERSION_MAC,	AR5K_SREV_AR5413 },
125 	{ "5414",	AR5K_VERSION_MAC,	AR5K_SREV_AR5414 },
126 	{ "2415",	AR5K_VERSION_MAC,	AR5K_SREV_AR2415 },
127 	{ "5416",	AR5K_VERSION_MAC,	AR5K_SREV_AR5416 },
128 	{ "5418",	AR5K_VERSION_MAC,	AR5K_SREV_AR5418 },
129 	{ "2425",	AR5K_VERSION_MAC,	AR5K_SREV_AR2425 },
130 	{ "2417",	AR5K_VERSION_MAC,	AR5K_SREV_AR2417 },
131 	{ "xxxxx",	AR5K_VERSION_MAC,	AR5K_SREV_UNKNOWN },
132 	{ "5110",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5110 },
133 	{ "5111",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5111 },
134 	{ "5111A",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5111A },
135 	{ "2111",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2111 },
136 	{ "5112",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5112 },
137 	{ "5112A",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5112A },
138 	{ "5112B",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5112B },
139 	{ "2112",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2112 },
140 	{ "2112A",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2112A },
141 	{ "2112B",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2112B },
142 	{ "2413",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2413 },
143 	{ "5413",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5413 },
144 	{ "2316",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2316 },
145 	{ "2317",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2317 },
146 	{ "5424",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5424 },
147 	{ "5133",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5133 },
148 	{ "xxxxx",	AR5K_VERSION_RAD,	AR5K_SREV_UNKNOWN },
149 };
150 
151 static const struct ieee80211_rate ath5k_rates[] = {
152 	{ .bitrate = 10,
153 	  .hw_value = ATH5K_RATE_CODE_1M, },
154 	{ .bitrate = 20,
155 	  .hw_value = ATH5K_RATE_CODE_2M,
156 	  .hw_value_short = ATH5K_RATE_CODE_2M | AR5K_SET_SHORT_PREAMBLE,
157 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
158 	{ .bitrate = 55,
159 	  .hw_value = ATH5K_RATE_CODE_5_5M,
160 	  .hw_value_short = ATH5K_RATE_CODE_5_5M | AR5K_SET_SHORT_PREAMBLE,
161 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
162 	{ .bitrate = 110,
163 	  .hw_value = ATH5K_RATE_CODE_11M,
164 	  .hw_value_short = ATH5K_RATE_CODE_11M | AR5K_SET_SHORT_PREAMBLE,
165 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
166 	{ .bitrate = 60,
167 	  .hw_value = ATH5K_RATE_CODE_6M,
168 	  .flags = 0 },
169 	{ .bitrate = 90,
170 	  .hw_value = ATH5K_RATE_CODE_9M,
171 	  .flags = 0 },
172 	{ .bitrate = 120,
173 	  .hw_value = ATH5K_RATE_CODE_12M,
174 	  .flags = 0 },
175 	{ .bitrate = 180,
176 	  .hw_value = ATH5K_RATE_CODE_18M,
177 	  .flags = 0 },
178 	{ .bitrate = 240,
179 	  .hw_value = ATH5K_RATE_CODE_24M,
180 	  .flags = 0 },
181 	{ .bitrate = 360,
182 	  .hw_value = ATH5K_RATE_CODE_36M,
183 	  .flags = 0 },
184 	{ .bitrate = 480,
185 	  .hw_value = ATH5K_RATE_CODE_48M,
186 	  .flags = 0 },
187 	{ .bitrate = 540,
188 	  .hw_value = ATH5K_RATE_CODE_54M,
189 	  .flags = 0 },
190 	/* XR missing */
191 };
192 
193 /*
194  * Prototypes - PCI stack related functions
195  */
196 static int __devinit	ath5k_pci_probe(struct pci_dev *pdev,
197 				const struct pci_device_id *id);
198 static void __devexit	ath5k_pci_remove(struct pci_dev *pdev);
199 #ifdef CONFIG_PM_SLEEP
200 static int		ath5k_pci_suspend(struct device *dev);
201 static int		ath5k_pci_resume(struct device *dev);
202 
203 static SIMPLE_DEV_PM_OPS(ath5k_pm_ops, ath5k_pci_suspend, ath5k_pci_resume);
204 #define ATH5K_PM_OPS	(&ath5k_pm_ops)
205 #else
206 #define ATH5K_PM_OPS	NULL
207 #endif /* CONFIG_PM_SLEEP */
208 
209 static struct pci_driver ath5k_pci_driver = {
210 	.name		= KBUILD_MODNAME,
211 	.id_table	= ath5k_pci_id_table,
212 	.probe		= ath5k_pci_probe,
213 	.remove		= __devexit_p(ath5k_pci_remove),
214 	.driver.pm	= ATH5K_PM_OPS,
215 };
216 
217 
218 
219 /*
220  * Prototypes - MAC 802.11 stack related functions
221  */
222 static int ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
223 static int ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
224 		struct ath5k_txq *txq);
225 static int ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan);
226 static int ath5k_start(struct ieee80211_hw *hw);
227 static void ath5k_stop(struct ieee80211_hw *hw);
228 static int ath5k_add_interface(struct ieee80211_hw *hw,
229 		struct ieee80211_vif *vif);
230 static void ath5k_remove_interface(struct ieee80211_hw *hw,
231 		struct ieee80211_vif *vif);
232 static int ath5k_config(struct ieee80211_hw *hw, u32 changed);
233 static u64 ath5k_prepare_multicast(struct ieee80211_hw *hw,
234 				   struct netdev_hw_addr_list *mc_list);
235 static void ath5k_configure_filter(struct ieee80211_hw *hw,
236 		unsigned int changed_flags,
237 		unsigned int *new_flags,
238 		u64 multicast);
239 static int ath5k_set_key(struct ieee80211_hw *hw,
240 		enum set_key_cmd cmd,
241 		struct ieee80211_vif *vif, struct ieee80211_sta *sta,
242 		struct ieee80211_key_conf *key);
243 static int ath5k_get_stats(struct ieee80211_hw *hw,
244 		struct ieee80211_low_level_stats *stats);
245 static int ath5k_get_survey(struct ieee80211_hw *hw,
246 		int idx, struct survey_info *survey);
247 static u64 ath5k_get_tsf(struct ieee80211_hw *hw);
248 static void ath5k_set_tsf(struct ieee80211_hw *hw, u64 tsf);
249 static void ath5k_reset_tsf(struct ieee80211_hw *hw);
250 static int ath5k_beacon_update(struct ieee80211_hw *hw,
251 		struct ieee80211_vif *vif);
252 static void ath5k_bss_info_changed(struct ieee80211_hw *hw,
253 		struct ieee80211_vif *vif,
254 		struct ieee80211_bss_conf *bss_conf,
255 		u32 changes);
256 static void ath5k_sw_scan_start(struct ieee80211_hw *hw);
257 static void ath5k_sw_scan_complete(struct ieee80211_hw *hw);
258 static void ath5k_set_coverage_class(struct ieee80211_hw *hw,
259 		u8 coverage_class);
260 
261 static const struct ieee80211_ops ath5k_hw_ops = {
262 	.tx 		= ath5k_tx,
263 	.start 		= ath5k_start,
264 	.stop 		= ath5k_stop,
265 	.add_interface 	= ath5k_add_interface,
266 	.remove_interface = ath5k_remove_interface,
267 	.config 	= ath5k_config,
268 	.prepare_multicast = ath5k_prepare_multicast,
269 	.configure_filter = ath5k_configure_filter,
270 	.set_key 	= ath5k_set_key,
271 	.get_stats 	= ath5k_get_stats,
272 	.get_survey	= ath5k_get_survey,
273 	.conf_tx 	= NULL,
274 	.get_tsf 	= ath5k_get_tsf,
275 	.set_tsf 	= ath5k_set_tsf,
276 	.reset_tsf 	= ath5k_reset_tsf,
277 	.bss_info_changed = ath5k_bss_info_changed,
278 	.sw_scan_start	= ath5k_sw_scan_start,
279 	.sw_scan_complete = ath5k_sw_scan_complete,
280 	.set_coverage_class = ath5k_set_coverage_class,
281 };
282 
283 /*
284  * Prototypes - Internal functions
285  */
286 /* Attach detach */
287 static int 	ath5k_attach(struct pci_dev *pdev,
288 			struct ieee80211_hw *hw);
289 static void 	ath5k_detach(struct pci_dev *pdev,
290 			struct ieee80211_hw *hw);
291 /* Channel/mode setup */
292 static inline short ath5k_ieee2mhz(short chan);
293 static unsigned int ath5k_copy_channels(struct ath5k_hw *ah,
294 				struct ieee80211_channel *channels,
295 				unsigned int mode,
296 				unsigned int max);
297 static int 	ath5k_setup_bands(struct ieee80211_hw *hw);
298 static int 	ath5k_chan_set(struct ath5k_softc *sc,
299 				struct ieee80211_channel *chan);
300 static void	ath5k_setcurmode(struct ath5k_softc *sc,
301 				unsigned int mode);
302 static void	ath5k_mode_setup(struct ath5k_softc *sc);
303 
304 /* Descriptor setup */
305 static int	ath5k_desc_alloc(struct ath5k_softc *sc,
306 				struct pci_dev *pdev);
307 static void	ath5k_desc_free(struct ath5k_softc *sc,
308 				struct pci_dev *pdev);
309 /* Buffers setup */
310 static int 	ath5k_rxbuf_setup(struct ath5k_softc *sc,
311 				struct ath5k_buf *bf);
312 static int 	ath5k_txbuf_setup(struct ath5k_softc *sc,
313 				struct ath5k_buf *bf,
314 				struct ath5k_txq *txq, int padsize);
315 
316 static inline void ath5k_txbuf_free_skb(struct ath5k_softc *sc,
317 				struct ath5k_buf *bf)
318 {
319 	BUG_ON(!bf);
320 	if (!bf->skb)
321 		return;
322 	pci_unmap_single(sc->pdev, bf->skbaddr, bf->skb->len,
323 			PCI_DMA_TODEVICE);
324 	dev_kfree_skb_any(bf->skb);
325 	bf->skb = NULL;
326 	bf->skbaddr = 0;
327 	bf->desc->ds_data = 0;
328 }
329 
330 static inline void ath5k_rxbuf_free_skb(struct ath5k_softc *sc,
331 				struct ath5k_buf *bf)
332 {
333 	struct ath5k_hw *ah = sc->ah;
334 	struct ath_common *common = ath5k_hw_common(ah);
335 
336 	BUG_ON(!bf);
337 	if (!bf->skb)
338 		return;
339 	pci_unmap_single(sc->pdev, bf->skbaddr, common->rx_bufsize,
340 			PCI_DMA_FROMDEVICE);
341 	dev_kfree_skb_any(bf->skb);
342 	bf->skb = NULL;
343 	bf->skbaddr = 0;
344 	bf->desc->ds_data = 0;
345 }
346 
347 
348 /* Queues setup */
349 static struct 	ath5k_txq *ath5k_txq_setup(struct ath5k_softc *sc,
350 				int qtype, int subtype);
351 static int 	ath5k_beaconq_setup(struct ath5k_hw *ah);
352 static int 	ath5k_beaconq_config(struct ath5k_softc *sc);
353 static void 	ath5k_txq_drainq(struct ath5k_softc *sc,
354 				struct ath5k_txq *txq);
355 static void 	ath5k_txq_cleanup(struct ath5k_softc *sc);
356 static void 	ath5k_txq_release(struct ath5k_softc *sc);
357 /* Rx handling */
358 static int 	ath5k_rx_start(struct ath5k_softc *sc);
359 static void 	ath5k_rx_stop(struct ath5k_softc *sc);
360 static unsigned int ath5k_rx_decrypted(struct ath5k_softc *sc,
361 					struct sk_buff *skb,
362 					struct ath5k_rx_status *rs);
363 static void 	ath5k_tasklet_rx(unsigned long data);
364 /* Tx handling */
365 static void 	ath5k_tx_processq(struct ath5k_softc *sc,
366 				struct ath5k_txq *txq);
367 static void 	ath5k_tasklet_tx(unsigned long data);
368 /* Beacon handling */
369 static int 	ath5k_beacon_setup(struct ath5k_softc *sc,
370 					struct ath5k_buf *bf);
371 static void 	ath5k_beacon_send(struct ath5k_softc *sc);
372 static void 	ath5k_beacon_config(struct ath5k_softc *sc);
373 static void	ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf);
374 static void	ath5k_tasklet_beacon(unsigned long data);
375 static void	ath5k_tasklet_ani(unsigned long data);
376 
377 static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp)
378 {
379 	u64 tsf = ath5k_hw_get_tsf64(ah);
380 
381 	if ((tsf & 0x7fff) < rstamp)
382 		tsf -= 0x8000;
383 
384 	return (tsf & ~0x7fff) | rstamp;
385 }
386 
387 /* Interrupt handling */
388 static int 	ath5k_init(struct ath5k_softc *sc);
389 static int 	ath5k_stop_locked(struct ath5k_softc *sc);
390 static int 	ath5k_stop_hw(struct ath5k_softc *sc);
391 static irqreturn_t ath5k_intr(int irq, void *dev_id);
392 static void ath5k_reset_work(struct work_struct *work);
393 
394 static void 	ath5k_tasklet_calibrate(unsigned long data);
395 
396 /*
397  * Module init/exit functions
398  */
399 static int __init
400 init_ath5k_pci(void)
401 {
402 	int ret;
403 
404 	ath5k_debug_init();
405 
406 	ret = pci_register_driver(&ath5k_pci_driver);
407 	if (ret) {
408 		printk(KERN_ERR "ath5k_pci: can't register pci driver\n");
409 		return ret;
410 	}
411 
412 	return 0;
413 }
414 
415 static void __exit
416 exit_ath5k_pci(void)
417 {
418 	pci_unregister_driver(&ath5k_pci_driver);
419 
420 	ath5k_debug_finish();
421 }
422 
423 module_init(init_ath5k_pci);
424 module_exit(exit_ath5k_pci);
425 
426 
427 /********************\
428 * PCI Initialization *
429 \********************/
430 
431 static const char *
432 ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val)
433 {
434 	const char *name = "xxxxx";
435 	unsigned int i;
436 
437 	for (i = 0; i < ARRAY_SIZE(srev_names); i++) {
438 		if (srev_names[i].sr_type != type)
439 			continue;
440 
441 		if ((val & 0xf0) == srev_names[i].sr_val)
442 			name = srev_names[i].sr_name;
443 
444 		if ((val & 0xff) == srev_names[i].sr_val) {
445 			name = srev_names[i].sr_name;
446 			break;
447 		}
448 	}
449 
450 	return name;
451 }
452 static unsigned int ath5k_ioread32(void *hw_priv, u32 reg_offset)
453 {
454 	struct ath5k_hw *ah = (struct ath5k_hw *) hw_priv;
455 	return ath5k_hw_reg_read(ah, reg_offset);
456 }
457 
458 static void ath5k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
459 {
460 	struct ath5k_hw *ah = (struct ath5k_hw *) hw_priv;
461 	ath5k_hw_reg_write(ah, val, reg_offset);
462 }
463 
464 static const struct ath_ops ath5k_common_ops = {
465 	.read = ath5k_ioread32,
466 	.write = ath5k_iowrite32,
467 };
468 
469 static int __devinit
470 ath5k_pci_probe(struct pci_dev *pdev,
471 		const struct pci_device_id *id)
472 {
473 	void __iomem *mem;
474 	struct ath5k_softc *sc;
475 	struct ath_common *common;
476 	struct ieee80211_hw *hw;
477 	int ret;
478 	u8 csz;
479 
480 	/*
481 	 * L0s needs to be disabled on all ath5k cards.
482 	 *
483 	 * For distributions shipping with CONFIG_PCIEASPM (this will be enabled
484 	 * by default in the future in 2.6.36) this will also mean both L1 and
485 	 * L0s will be disabled when a pre 1.1 PCIe device is detected. We do
486 	 * know L1 works correctly even for all ath5k pre 1.1 PCIe devices
487 	 * though but cannot currently undue the effect of a blacklist, for
488 	 * details you can read pcie_aspm_sanity_check() and see how it adjusts
489 	 * the device link capability.
490 	 *
491 	 * It may be possible in the future to implement some PCI API to allow
492 	 * drivers to override blacklists for pre 1.1 PCIe but for now it is
493 	 * best to accept that both L0s and L1 will be disabled completely for
494 	 * distributions shipping with CONFIG_PCIEASPM rather than having this
495 	 * issue present. Motivation for adding this new API will be to help
496 	 * with power consumption for some of these devices.
497 	 */
498 	pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S);
499 
500 	ret = pci_enable_device(pdev);
501 	if (ret) {
502 		dev_err(&pdev->dev, "can't enable device\n");
503 		goto err;
504 	}
505 
506 	/* XXX 32-bit addressing only */
507 	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
508 	if (ret) {
509 		dev_err(&pdev->dev, "32-bit DMA not available\n");
510 		goto err_dis;
511 	}
512 
513 	/*
514 	 * Cache line size is used to size and align various
515 	 * structures used to communicate with the hardware.
516 	 */
517 	pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &csz);
518 	if (csz == 0) {
519 		/*
520 		 * Linux 2.4.18 (at least) writes the cache line size
521 		 * register as a 16-bit wide register which is wrong.
522 		 * We must have this setup properly for rx buffer
523 		 * DMA to work so force a reasonable value here if it
524 		 * comes up zero.
525 		 */
526 		csz = L1_CACHE_BYTES >> 2;
527 		pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, csz);
528 	}
529 	/*
530 	 * The default setting of latency timer yields poor results,
531 	 * set it to the value used by other systems.  It may be worth
532 	 * tweaking this setting more.
533 	 */
534 	pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xa8);
535 
536 	/* Enable bus mastering */
537 	pci_set_master(pdev);
538 
539 	/*
540 	 * Disable the RETRY_TIMEOUT register (0x41) to keep
541 	 * PCI Tx retries from interfering with C3 CPU state.
542 	 */
543 	pci_write_config_byte(pdev, 0x41, 0);
544 
545 	ret = pci_request_region(pdev, 0, "ath5k");
546 	if (ret) {
547 		dev_err(&pdev->dev, "cannot reserve PCI memory region\n");
548 		goto err_dis;
549 	}
550 
551 	mem = pci_iomap(pdev, 0, 0);
552 	if (!mem) {
553 		dev_err(&pdev->dev, "cannot remap PCI memory region\n") ;
554 		ret = -EIO;
555 		goto err_reg;
556 	}
557 
558 	/*
559 	 * Allocate hw (mac80211 main struct)
560 	 * and hw->priv (driver private data)
561 	 */
562 	hw = ieee80211_alloc_hw(sizeof(*sc), &ath5k_hw_ops);
563 	if (hw == NULL) {
564 		dev_err(&pdev->dev, "cannot allocate ieee80211_hw\n");
565 		ret = -ENOMEM;
566 		goto err_map;
567 	}
568 
569 	dev_info(&pdev->dev, "registered as '%s'\n", wiphy_name(hw->wiphy));
570 
571 	/* Initialize driver private data */
572 	SET_IEEE80211_DEV(hw, &pdev->dev);
573 	hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
574 		    IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
575 		    IEEE80211_HW_SIGNAL_DBM;
576 
577 	hw->wiphy->interface_modes =
578 		BIT(NL80211_IFTYPE_AP) |
579 		BIT(NL80211_IFTYPE_STATION) |
580 		BIT(NL80211_IFTYPE_ADHOC) |
581 		BIT(NL80211_IFTYPE_MESH_POINT);
582 
583 	hw->extra_tx_headroom = 2;
584 	hw->channel_change_time = 5000;
585 	sc = hw->priv;
586 	sc->hw = hw;
587 	sc->pdev = pdev;
588 
589 	ath5k_debug_init_device(sc);
590 
591 	/*
592 	 * Mark the device as detached to avoid processing
593 	 * interrupts until setup is complete.
594 	 */
595 	__set_bit(ATH_STAT_INVALID, sc->status);
596 
597 	sc->iobase = mem; /* So we can unmap it on detach */
598 	sc->opmode = NL80211_IFTYPE_STATION;
599 	sc->bintval = 1000;
600 	mutex_init(&sc->lock);
601 	spin_lock_init(&sc->rxbuflock);
602 	spin_lock_init(&sc->txbuflock);
603 	spin_lock_init(&sc->block);
604 
605 	/* Set private data */
606 	pci_set_drvdata(pdev, sc);
607 
608 	/* Setup interrupt handler */
609 	ret = request_irq(pdev->irq, ath5k_intr, IRQF_SHARED, "ath", sc);
610 	if (ret) {
611 		ATH5K_ERR(sc, "request_irq failed\n");
612 		goto err_free;
613 	}
614 
615 	/*If we passed the test malloc a ath5k_hw struct*/
616 	sc->ah = kzalloc(sizeof(struct ath5k_hw), GFP_KERNEL);
617 	if (!sc->ah) {
618 		ret = -ENOMEM;
619 		ATH5K_ERR(sc, "out of memory\n");
620 		goto err_irq;
621 	}
622 
623 	sc->ah->ah_sc = sc;
624 	sc->ah->ah_iobase = sc->iobase;
625 	common = ath5k_hw_common(sc->ah);
626 	common->ops = &ath5k_common_ops;
627 	common->ah = sc->ah;
628 	common->hw = hw;
629 	common->cachelsz = csz << 2; /* convert to bytes */
630 
631 	/* Initialize device */
632 	ret = ath5k_hw_attach(sc);
633 	if (ret) {
634 		goto err_free_ah;
635 	}
636 
637 	/* set up multi-rate retry capabilities */
638 	if (sc->ah->ah_version == AR5K_AR5212) {
639 		hw->max_rates = 4;
640 		hw->max_rate_tries = 11;
641 	}
642 
643 	/* Finish private driver data initialization */
644 	ret = ath5k_attach(pdev, hw);
645 	if (ret)
646 		goto err_ah;
647 
648 	ATH5K_INFO(sc, "Atheros AR%s chip found (MAC: 0x%x, PHY: 0x%x)\n",
649 			ath5k_chip_name(AR5K_VERSION_MAC, sc->ah->ah_mac_srev),
650 					sc->ah->ah_mac_srev,
651 					sc->ah->ah_phy_revision);
652 
653 	if (!sc->ah->ah_single_chip) {
654 		/* Single chip radio (!RF5111) */
655 		if (sc->ah->ah_radio_5ghz_revision &&
656 			!sc->ah->ah_radio_2ghz_revision) {
657 			/* No 5GHz support -> report 2GHz radio */
658 			if (!test_bit(AR5K_MODE_11A,
659 				sc->ah->ah_capabilities.cap_mode)) {
660 				ATH5K_INFO(sc, "RF%s 2GHz radio found (0x%x)\n",
661 					ath5k_chip_name(AR5K_VERSION_RAD,
662 						sc->ah->ah_radio_5ghz_revision),
663 						sc->ah->ah_radio_5ghz_revision);
664 			/* No 2GHz support (5110 and some
665 			 * 5Ghz only cards) -> report 5Ghz radio */
666 			} else if (!test_bit(AR5K_MODE_11B,
667 				sc->ah->ah_capabilities.cap_mode)) {
668 				ATH5K_INFO(sc, "RF%s 5GHz radio found (0x%x)\n",
669 					ath5k_chip_name(AR5K_VERSION_RAD,
670 						sc->ah->ah_radio_5ghz_revision),
671 						sc->ah->ah_radio_5ghz_revision);
672 			/* Multiband radio */
673 			} else {
674 				ATH5K_INFO(sc, "RF%s multiband radio found"
675 					" (0x%x)\n",
676 					ath5k_chip_name(AR5K_VERSION_RAD,
677 						sc->ah->ah_radio_5ghz_revision),
678 						sc->ah->ah_radio_5ghz_revision);
679 			}
680 		}
681 		/* Multi chip radio (RF5111 - RF2111) ->
682 		 * report both 2GHz/5GHz radios */
683 		else if (sc->ah->ah_radio_5ghz_revision &&
684 				sc->ah->ah_radio_2ghz_revision){
685 			ATH5K_INFO(sc, "RF%s 5GHz radio found (0x%x)\n",
686 				ath5k_chip_name(AR5K_VERSION_RAD,
687 					sc->ah->ah_radio_5ghz_revision),
688 					sc->ah->ah_radio_5ghz_revision);
689 			ATH5K_INFO(sc, "RF%s 2GHz radio found (0x%x)\n",
690 				ath5k_chip_name(AR5K_VERSION_RAD,
691 					sc->ah->ah_radio_2ghz_revision),
692 					sc->ah->ah_radio_2ghz_revision);
693 		}
694 	}
695 
696 
697 	/* ready to process interrupts */
698 	__clear_bit(ATH_STAT_INVALID, sc->status);
699 
700 	return 0;
701 err_ah:
702 	ath5k_hw_detach(sc->ah);
703 err_irq:
704 	free_irq(pdev->irq, sc);
705 err_free_ah:
706 	kfree(sc->ah);
707 err_free:
708 	ieee80211_free_hw(hw);
709 err_map:
710 	pci_iounmap(pdev, mem);
711 err_reg:
712 	pci_release_region(pdev, 0);
713 err_dis:
714 	pci_disable_device(pdev);
715 err:
716 	return ret;
717 }
718 
719 static void __devexit
720 ath5k_pci_remove(struct pci_dev *pdev)
721 {
722 	struct ath5k_softc *sc = pci_get_drvdata(pdev);
723 
724 	ath5k_debug_finish_device(sc);
725 	ath5k_detach(pdev, sc->hw);
726 	ath5k_hw_detach(sc->ah);
727 	kfree(sc->ah);
728 	free_irq(pdev->irq, sc);
729 	pci_iounmap(pdev, sc->iobase);
730 	pci_release_region(pdev, 0);
731 	pci_disable_device(pdev);
732 	ieee80211_free_hw(sc->hw);
733 }
734 
735 #ifdef CONFIG_PM_SLEEP
736 static int ath5k_pci_suspend(struct device *dev)
737 {
738 	struct ath5k_softc *sc = pci_get_drvdata(to_pci_dev(dev));
739 
740 	ath5k_led_off(sc);
741 	return 0;
742 }
743 
744 static int ath5k_pci_resume(struct device *dev)
745 {
746 	struct pci_dev *pdev = to_pci_dev(dev);
747 	struct ath5k_softc *sc = pci_get_drvdata(pdev);
748 
749 	/*
750 	 * Suspend/Resume resets the PCI configuration space, so we have to
751 	 * re-disable the RETRY_TIMEOUT register (0x41) to keep
752 	 * PCI Tx retries from interfering with C3 CPU state
753 	 */
754 	pci_write_config_byte(pdev, 0x41, 0);
755 
756 	ath5k_led_enable(sc);
757 	return 0;
758 }
759 #endif /* CONFIG_PM_SLEEP */
760 
761 
762 /***********************\
763 * Driver Initialization *
764 \***********************/
765 
766 static int ath5k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
767 {
768 	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
769 	struct ath5k_softc *sc = hw->priv;
770 	struct ath_regulatory *regulatory = ath5k_hw_regulatory(sc->ah);
771 
772 	return ath_reg_notifier_apply(wiphy, request, regulatory);
773 }
774 
775 static int
776 ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw)
777 {
778 	struct ath5k_softc *sc = hw->priv;
779 	struct ath5k_hw *ah = sc->ah;
780 	struct ath_regulatory *regulatory = ath5k_hw_regulatory(ah);
781 	u8 mac[ETH_ALEN] = {};
782 	int ret;
783 
784 	ATH5K_DBG(sc, ATH5K_DEBUG_ANY, "devid 0x%x\n", pdev->device);
785 
786 	/*
787 	 * Check if the MAC has multi-rate retry support.
788 	 * We do this by trying to setup a fake extended
789 	 * descriptor.  MAC's that don't have support will
790 	 * return false w/o doing anything.  MAC's that do
791 	 * support it will return true w/o doing anything.
792 	 */
793 	ret = ath5k_hw_setup_mrr_tx_desc(ah, NULL, 0, 0, 0, 0, 0, 0);
794 
795 	if (ret < 0)
796 		goto err;
797 	if (ret > 0)
798 		__set_bit(ATH_STAT_MRRETRY, sc->status);
799 
800 	/*
801 	 * Collect the channel list.  The 802.11 layer
802 	 * is resposible for filtering this list based
803 	 * on settings like the phy mode and regulatory
804 	 * domain restrictions.
805 	 */
806 	ret = ath5k_setup_bands(hw);
807 	if (ret) {
808 		ATH5K_ERR(sc, "can't get channels\n");
809 		goto err;
810 	}
811 
812 	/* NB: setup here so ath5k_rate_update is happy */
813 	if (test_bit(AR5K_MODE_11A, ah->ah_modes))
814 		ath5k_setcurmode(sc, AR5K_MODE_11A);
815 	else
816 		ath5k_setcurmode(sc, AR5K_MODE_11B);
817 
818 	/*
819 	 * Allocate tx+rx descriptors and populate the lists.
820 	 */
821 	ret = ath5k_desc_alloc(sc, pdev);
822 	if (ret) {
823 		ATH5K_ERR(sc, "can't allocate descriptors\n");
824 		goto err;
825 	}
826 
827 	/*
828 	 * Allocate hardware transmit queues: one queue for
829 	 * beacon frames and one data queue for each QoS
830 	 * priority.  Note that hw functions handle reseting
831 	 * these queues at the needed time.
832 	 */
833 	ret = ath5k_beaconq_setup(ah);
834 	if (ret < 0) {
835 		ATH5K_ERR(sc, "can't setup a beacon xmit queue\n");
836 		goto err_desc;
837 	}
838 	sc->bhalq = ret;
839 	sc->cabq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_CAB, 0);
840 	if (IS_ERR(sc->cabq)) {
841 		ATH5K_ERR(sc, "can't setup cab queue\n");
842 		ret = PTR_ERR(sc->cabq);
843 		goto err_bhal;
844 	}
845 
846 	sc->txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BK);
847 	if (IS_ERR(sc->txq)) {
848 		ATH5K_ERR(sc, "can't setup xmit queue\n");
849 		ret = PTR_ERR(sc->txq);
850 		goto err_queues;
851 	}
852 
853 	tasklet_init(&sc->rxtq, ath5k_tasklet_rx, (unsigned long)sc);
854 	tasklet_init(&sc->txtq, ath5k_tasklet_tx, (unsigned long)sc);
855 	tasklet_init(&sc->calib, ath5k_tasklet_calibrate, (unsigned long)sc);
856 	tasklet_init(&sc->beacontq, ath5k_tasklet_beacon, (unsigned long)sc);
857 	tasklet_init(&sc->ani_tasklet, ath5k_tasklet_ani, (unsigned long)sc);
858 
859 	INIT_WORK(&sc->reset_work, ath5k_reset_work);
860 
861 	ret = ath5k_eeprom_read_mac(ah, mac);
862 	if (ret) {
863 		ATH5K_ERR(sc, "unable to read address from EEPROM: 0x%04x\n",
864 			sc->pdev->device);
865 		goto err_queues;
866 	}
867 
868 	SET_IEEE80211_PERM_ADDR(hw, mac);
869 	/* All MAC address bits matter for ACKs */
870 	memcpy(sc->bssidmask, ath_bcast_mac, ETH_ALEN);
871 	ath5k_hw_set_bssid_mask(sc->ah, sc->bssidmask);
872 
873 	regulatory->current_rd = ah->ah_capabilities.cap_eeprom.ee_regdomain;
874 	ret = ath_regd_init(regulatory, hw->wiphy, ath5k_reg_notifier);
875 	if (ret) {
876 		ATH5K_ERR(sc, "can't initialize regulatory system\n");
877 		goto err_queues;
878 	}
879 
880 	ret = ieee80211_register_hw(hw);
881 	if (ret) {
882 		ATH5K_ERR(sc, "can't register ieee80211 hw\n");
883 		goto err_queues;
884 	}
885 
886 	if (!ath_is_world_regd(regulatory))
887 		regulatory_hint(hw->wiphy, regulatory->alpha2);
888 
889 	ath5k_init_leds(sc);
890 
891 	ath5k_sysfs_register(sc);
892 
893 	return 0;
894 err_queues:
895 	ath5k_txq_release(sc);
896 err_bhal:
897 	ath5k_hw_release_tx_queue(ah, sc->bhalq);
898 err_desc:
899 	ath5k_desc_free(sc, pdev);
900 err:
901 	return ret;
902 }
903 
904 static void
905 ath5k_detach(struct pci_dev *pdev, struct ieee80211_hw *hw)
906 {
907 	struct ath5k_softc *sc = hw->priv;
908 
909 	/*
910 	 * NB: the order of these is important:
911 	 * o call the 802.11 layer before detaching ath5k_hw to
912 	 *   insure callbacks into the driver to delete global
913 	 *   key cache entries can be handled
914 	 * o reclaim the tx queue data structures after calling
915 	 *   the 802.11 layer as we'll get called back to reclaim
916 	 *   node state and potentially want to use them
917 	 * o to cleanup the tx queues the hal is called, so detach
918 	 *   it last
919 	 * XXX: ??? detach ath5k_hw ???
920 	 * Other than that, it's straightforward...
921 	 */
922 	ieee80211_unregister_hw(hw);
923 	ath5k_desc_free(sc, pdev);
924 	ath5k_txq_release(sc);
925 	ath5k_hw_release_tx_queue(sc->ah, sc->bhalq);
926 	ath5k_unregister_leds(sc);
927 
928 	ath5k_sysfs_unregister(sc);
929 	/*
930 	 * NB: can't reclaim these until after ieee80211_ifdetach
931 	 * returns because we'll get called back to reclaim node
932 	 * state and potentially want to use them.
933 	 */
934 }
935 
936 
937 
938 
939 /********************\
940 * Channel/mode setup *
941 \********************/
942 
943 /*
944  * Convert IEEE channel number to MHz frequency.
945  */
946 static inline short
947 ath5k_ieee2mhz(short chan)
948 {
949 	if (chan <= 14 || chan >= 27)
950 		return ieee80211chan2mhz(chan);
951 	else
952 		return 2212 + chan * 20;
953 }
954 
955 /*
956  * Returns true for the channel numbers used without all_channels modparam.
957  */
958 static bool ath5k_is_standard_channel(short chan)
959 {
960 	return ((chan <= 14) ||
961 		/* UNII 1,2 */
962 		((chan & 3) == 0 && chan >= 36 && chan <= 64) ||
963 		/* midband */
964 		((chan & 3) == 0 && chan >= 100 && chan <= 140) ||
965 		/* UNII-3 */
966 		((chan & 3) == 1 && chan >= 149 && chan <= 165));
967 }
968 
969 static unsigned int
970 ath5k_copy_channels(struct ath5k_hw *ah,
971 		struct ieee80211_channel *channels,
972 		unsigned int mode,
973 		unsigned int max)
974 {
975 	unsigned int i, count, size, chfreq, freq, ch;
976 
977 	if (!test_bit(mode, ah->ah_modes))
978 		return 0;
979 
980 	switch (mode) {
981 	case AR5K_MODE_11A:
982 	case AR5K_MODE_11A_TURBO:
983 		/* 1..220, but 2GHz frequencies are filtered by check_channel */
984 		size = 220 ;
985 		chfreq = CHANNEL_5GHZ;
986 		break;
987 	case AR5K_MODE_11B:
988 	case AR5K_MODE_11G:
989 	case AR5K_MODE_11G_TURBO:
990 		size = 26;
991 		chfreq = CHANNEL_2GHZ;
992 		break;
993 	default:
994 		ATH5K_WARN(ah->ah_sc, "bad mode, not copying channels\n");
995 		return 0;
996 	}
997 
998 	for (i = 0, count = 0; i < size && max > 0; i++) {
999 		ch = i + 1 ;
1000 		freq = ath5k_ieee2mhz(ch);
1001 
1002 		/* Check if channel is supported by the chipset */
1003 		if (!ath5k_channel_ok(ah, freq, chfreq))
1004 			continue;
1005 
1006 		if (!modparam_all_channels && !ath5k_is_standard_channel(ch))
1007 			continue;
1008 
1009 		/* Write channel info and increment counter */
1010 		channels[count].center_freq = freq;
1011 		channels[count].band = (chfreq == CHANNEL_2GHZ) ?
1012 			IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
1013 		switch (mode) {
1014 		case AR5K_MODE_11A:
1015 		case AR5K_MODE_11G:
1016 			channels[count].hw_value = chfreq | CHANNEL_OFDM;
1017 			break;
1018 		case AR5K_MODE_11A_TURBO:
1019 		case AR5K_MODE_11G_TURBO:
1020 			channels[count].hw_value = chfreq |
1021 				CHANNEL_OFDM | CHANNEL_TURBO;
1022 			break;
1023 		case AR5K_MODE_11B:
1024 			channels[count].hw_value = CHANNEL_B;
1025 		}
1026 
1027 		count++;
1028 		max--;
1029 	}
1030 
1031 	return count;
1032 }
1033 
1034 static void
1035 ath5k_setup_rate_idx(struct ath5k_softc *sc, struct ieee80211_supported_band *b)
1036 {
1037 	u8 i;
1038 
1039 	for (i = 0; i < AR5K_MAX_RATES; i++)
1040 		sc->rate_idx[b->band][i] = -1;
1041 
1042 	for (i = 0; i < b->n_bitrates; i++) {
1043 		sc->rate_idx[b->band][b->bitrates[i].hw_value] = i;
1044 		if (b->bitrates[i].hw_value_short)
1045 			sc->rate_idx[b->band][b->bitrates[i].hw_value_short] = i;
1046 	}
1047 }
1048 
1049 static int
1050 ath5k_setup_bands(struct ieee80211_hw *hw)
1051 {
1052 	struct ath5k_softc *sc = hw->priv;
1053 	struct ath5k_hw *ah = sc->ah;
1054 	struct ieee80211_supported_band *sband;
1055 	int max_c, count_c = 0;
1056 	int i;
1057 
1058 	BUILD_BUG_ON(ARRAY_SIZE(sc->sbands) < IEEE80211_NUM_BANDS);
1059 	max_c = ARRAY_SIZE(sc->channels);
1060 
1061 	/* 2GHz band */
1062 	sband = &sc->sbands[IEEE80211_BAND_2GHZ];
1063 	sband->band = IEEE80211_BAND_2GHZ;
1064 	sband->bitrates = &sc->rates[IEEE80211_BAND_2GHZ][0];
1065 
1066 	if (test_bit(AR5K_MODE_11G, sc->ah->ah_capabilities.cap_mode)) {
1067 		/* G mode */
1068 		memcpy(sband->bitrates, &ath5k_rates[0],
1069 		       sizeof(struct ieee80211_rate) * 12);
1070 		sband->n_bitrates = 12;
1071 
1072 		sband->channels = sc->channels;
1073 		sband->n_channels = ath5k_copy_channels(ah, sband->channels,
1074 					AR5K_MODE_11G, max_c);
1075 
1076 		hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
1077 		count_c = sband->n_channels;
1078 		max_c -= count_c;
1079 	} else if (test_bit(AR5K_MODE_11B, sc->ah->ah_capabilities.cap_mode)) {
1080 		/* B mode */
1081 		memcpy(sband->bitrates, &ath5k_rates[0],
1082 		       sizeof(struct ieee80211_rate) * 4);
1083 		sband->n_bitrates = 4;
1084 
1085 		/* 5211 only supports B rates and uses 4bit rate codes
1086 		 * (e.g normally we have 0x1B for 1M, but on 5211 we have 0x0B)
1087 		 * fix them up here:
1088 		 */
1089 		if (ah->ah_version == AR5K_AR5211) {
1090 			for (i = 0; i < 4; i++) {
1091 				sband->bitrates[i].hw_value =
1092 					sband->bitrates[i].hw_value & 0xF;
1093 				sband->bitrates[i].hw_value_short =
1094 					sband->bitrates[i].hw_value_short & 0xF;
1095 			}
1096 		}
1097 
1098 		sband->channels = sc->channels;
1099 		sband->n_channels = ath5k_copy_channels(ah, sband->channels,
1100 					AR5K_MODE_11B, max_c);
1101 
1102 		hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
1103 		count_c = sband->n_channels;
1104 		max_c -= count_c;
1105 	}
1106 	ath5k_setup_rate_idx(sc, sband);
1107 
1108 	/* 5GHz band, A mode */
1109 	if (test_bit(AR5K_MODE_11A, sc->ah->ah_capabilities.cap_mode)) {
1110 		sband = &sc->sbands[IEEE80211_BAND_5GHZ];
1111 		sband->band = IEEE80211_BAND_5GHZ;
1112 		sband->bitrates = &sc->rates[IEEE80211_BAND_5GHZ][0];
1113 
1114 		memcpy(sband->bitrates, &ath5k_rates[4],
1115 		       sizeof(struct ieee80211_rate) * 8);
1116 		sband->n_bitrates = 8;
1117 
1118 		sband->channels = &sc->channels[count_c];
1119 		sband->n_channels = ath5k_copy_channels(ah, sband->channels,
1120 					AR5K_MODE_11A, max_c);
1121 
1122 		hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband;
1123 	}
1124 	ath5k_setup_rate_idx(sc, sband);
1125 
1126 	ath5k_debug_dump_bands(sc);
1127 
1128 	return 0;
1129 }
1130 
1131 /*
1132  * Set/change channels. We always reset the chip.
1133  * To accomplish this we must first cleanup any pending DMA,
1134  * then restart stuff after a la  ath5k_init.
1135  *
1136  * Called with sc->lock.
1137  */
1138 static int
1139 ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan)
1140 {
1141 	ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
1142 		  "channel set, resetting (%u -> %u MHz)\n",
1143 		  sc->curchan->center_freq, chan->center_freq);
1144 
1145 	/*
1146 	 * To switch channels clear any pending DMA operations;
1147 	 * wait long enough for the RX fifo to drain, reset the
1148 	 * hardware at the new frequency, and then re-enable
1149 	 * the relevant bits of the h/w.
1150 	 */
1151 	return ath5k_reset(sc, chan);
1152 }
1153 
1154 static void
1155 ath5k_setcurmode(struct ath5k_softc *sc, unsigned int mode)
1156 {
1157 	sc->curmode = mode;
1158 
1159 	if (mode == AR5K_MODE_11A) {
1160 		sc->curband = &sc->sbands[IEEE80211_BAND_5GHZ];
1161 	} else {
1162 		sc->curband = &sc->sbands[IEEE80211_BAND_2GHZ];
1163 	}
1164 }
1165 
1166 static void
1167 ath5k_mode_setup(struct ath5k_softc *sc)
1168 {
1169 	struct ath5k_hw *ah = sc->ah;
1170 	u32 rfilt;
1171 
1172 	/* configure rx filter */
1173 	rfilt = sc->filter_flags;
1174 	ath5k_hw_set_rx_filter(ah, rfilt);
1175 
1176 	if (ath5k_hw_hasbssidmask(ah))
1177 		ath5k_hw_set_bssid_mask(ah, sc->bssidmask);
1178 
1179 	/* configure operational mode */
1180 	ath5k_hw_set_opmode(ah, sc->opmode);
1181 
1182 	ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "mode setup opmode %d\n", sc->opmode);
1183 	ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "RX filter 0x%x\n", rfilt);
1184 }
1185 
1186 static inline int
1187 ath5k_hw_to_driver_rix(struct ath5k_softc *sc, int hw_rix)
1188 {
1189 	int rix;
1190 
1191 	/* return base rate on errors */
1192 	if (WARN(hw_rix < 0 || hw_rix >= AR5K_MAX_RATES,
1193 			"hw_rix out of bounds: %x\n", hw_rix))
1194 		return 0;
1195 
1196 	rix = sc->rate_idx[sc->curband->band][hw_rix];
1197 	if (WARN(rix < 0, "invalid hw_rix: %x\n", hw_rix))
1198 		rix = 0;
1199 
1200 	return rix;
1201 }
1202 
1203 /***************\
1204 * Buffers setup *
1205 \***************/
1206 
1207 static
1208 struct sk_buff *ath5k_rx_skb_alloc(struct ath5k_softc *sc, dma_addr_t *skb_addr)
1209 {
1210 	struct ath_common *common = ath5k_hw_common(sc->ah);
1211 	struct sk_buff *skb;
1212 
1213 	/*
1214 	 * Allocate buffer with headroom_needed space for the
1215 	 * fake physical layer header at the start.
1216 	 */
1217 	skb = ath_rxbuf_alloc(common,
1218 			      common->rx_bufsize,
1219 			      GFP_ATOMIC);
1220 
1221 	if (!skb) {
1222 		ATH5K_ERR(sc, "can't alloc skbuff of size %u\n",
1223 				common->rx_bufsize);
1224 		return NULL;
1225 	}
1226 
1227 	*skb_addr = pci_map_single(sc->pdev,
1228 				   skb->data, common->rx_bufsize,
1229 				   PCI_DMA_FROMDEVICE);
1230 	if (unlikely(pci_dma_mapping_error(sc->pdev, *skb_addr))) {
1231 		ATH5K_ERR(sc, "%s: DMA mapping failed\n", __func__);
1232 		dev_kfree_skb(skb);
1233 		return NULL;
1234 	}
1235 	return skb;
1236 }
1237 
1238 static int
1239 ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
1240 {
1241 	struct ath5k_hw *ah = sc->ah;
1242 	struct sk_buff *skb = bf->skb;
1243 	struct ath5k_desc *ds;
1244 	int ret;
1245 
1246 	if (!skb) {
1247 		skb = ath5k_rx_skb_alloc(sc, &bf->skbaddr);
1248 		if (!skb)
1249 			return -ENOMEM;
1250 		bf->skb = skb;
1251 	}
1252 
1253 	/*
1254 	 * Setup descriptors.  For receive we always terminate
1255 	 * the descriptor list with a self-linked entry so we'll
1256 	 * not get overrun under high load (as can happen with a
1257 	 * 5212 when ANI processing enables PHY error frames).
1258 	 *
1259 	 * To ensure the last descriptor is self-linked we create
1260 	 * each descriptor as self-linked and add it to the end.  As
1261 	 * each additional descriptor is added the previous self-linked
1262 	 * entry is "fixed" naturally.  This should be safe even
1263 	 * if DMA is happening.  When processing RX interrupts we
1264 	 * never remove/process the last, self-linked, entry on the
1265 	 * descriptor list.  This ensures the hardware always has
1266 	 * someplace to write a new frame.
1267 	 */
1268 	ds = bf->desc;
1269 	ds->ds_link = bf->daddr;	/* link to self */
1270 	ds->ds_data = bf->skbaddr;
1271 	ret = ath5k_hw_setup_rx_desc(ah, ds, ah->common.rx_bufsize, 0);
1272 	if (ret) {
1273 		ATH5K_ERR(sc, "%s: could not setup RX desc\n", __func__);
1274 		return ret;
1275 	}
1276 
1277 	if (sc->rxlink != NULL)
1278 		*sc->rxlink = bf->daddr;
1279 	sc->rxlink = &ds->ds_link;
1280 	return 0;
1281 }
1282 
1283 static enum ath5k_pkt_type get_hw_packet_type(struct sk_buff *skb)
1284 {
1285 	struct ieee80211_hdr *hdr;
1286 	enum ath5k_pkt_type htype;
1287 	__le16 fc;
1288 
1289 	hdr = (struct ieee80211_hdr *)skb->data;
1290 	fc = hdr->frame_control;
1291 
1292 	if (ieee80211_is_beacon(fc))
1293 		htype = AR5K_PKT_TYPE_BEACON;
1294 	else if (ieee80211_is_probe_resp(fc))
1295 		htype = AR5K_PKT_TYPE_PROBE_RESP;
1296 	else if (ieee80211_is_atim(fc))
1297 		htype = AR5K_PKT_TYPE_ATIM;
1298 	else if (ieee80211_is_pspoll(fc))
1299 		htype = AR5K_PKT_TYPE_PSPOLL;
1300 	else
1301 		htype = AR5K_PKT_TYPE_NORMAL;
1302 
1303 	return htype;
1304 }
1305 
1306 static int
1307 ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
1308 		  struct ath5k_txq *txq, int padsize)
1309 {
1310 	struct ath5k_hw *ah = sc->ah;
1311 	struct ath5k_desc *ds = bf->desc;
1312 	struct sk_buff *skb = bf->skb;
1313 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1314 	unsigned int pktlen, flags, keyidx = AR5K_TXKEYIX_INVALID;
1315 	struct ieee80211_rate *rate;
1316 	unsigned int mrr_rate[3], mrr_tries[3];
1317 	int i, ret;
1318 	u16 hw_rate;
1319 	u16 cts_rate = 0;
1320 	u16 duration = 0;
1321 	u8 rc_flags;
1322 
1323 	flags = AR5K_TXDESC_INTREQ | AR5K_TXDESC_CLRDMASK;
1324 
1325 	/* XXX endianness */
1326 	bf->skbaddr = pci_map_single(sc->pdev, skb->data, skb->len,
1327 			PCI_DMA_TODEVICE);
1328 
1329 	rate = ieee80211_get_tx_rate(sc->hw, info);
1330 
1331 	if (info->flags & IEEE80211_TX_CTL_NO_ACK)
1332 		flags |= AR5K_TXDESC_NOACK;
1333 
1334 	rc_flags = info->control.rates[0].flags;
1335 	hw_rate = (rc_flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) ?
1336 		rate->hw_value_short : rate->hw_value;
1337 
1338 	pktlen = skb->len;
1339 
1340 	/* FIXME: If we are in g mode and rate is a CCK rate
1341 	 * subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta
1342 	 * from tx power (value is in dB units already) */
1343 	if (info->control.hw_key) {
1344 		keyidx = info->control.hw_key->hw_key_idx;
1345 		pktlen += info->control.hw_key->icv_len;
1346 	}
1347 	if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
1348 		flags |= AR5K_TXDESC_RTSENA;
1349 		cts_rate = ieee80211_get_rts_cts_rate(sc->hw, info)->hw_value;
1350 		duration = le16_to_cpu(ieee80211_rts_duration(sc->hw,
1351 			sc->vif, pktlen, info));
1352 	}
1353 	if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
1354 		flags |= AR5K_TXDESC_CTSENA;
1355 		cts_rate = ieee80211_get_rts_cts_rate(sc->hw, info)->hw_value;
1356 		duration = le16_to_cpu(ieee80211_ctstoself_duration(sc->hw,
1357 			sc->vif, pktlen, info));
1358 	}
1359 	ret = ah->ah_setup_tx_desc(ah, ds, pktlen,
1360 		ieee80211_get_hdrlen_from_skb(skb), padsize,
1361 		get_hw_packet_type(skb),
1362 		(sc->power_level * 2),
1363 		hw_rate,
1364 		info->control.rates[0].count, keyidx, ah->ah_tx_ant, flags,
1365 		cts_rate, duration);
1366 	if (ret)
1367 		goto err_unmap;
1368 
1369 	memset(mrr_rate, 0, sizeof(mrr_rate));
1370 	memset(mrr_tries, 0, sizeof(mrr_tries));
1371 	for (i = 0; i < 3; i++) {
1372 		rate = ieee80211_get_alt_retry_rate(sc->hw, info, i);
1373 		if (!rate)
1374 			break;
1375 
1376 		mrr_rate[i] = rate->hw_value;
1377 		mrr_tries[i] = info->control.rates[i + 1].count;
1378 	}
1379 
1380 	ath5k_hw_setup_mrr_tx_desc(ah, ds,
1381 		mrr_rate[0], mrr_tries[0],
1382 		mrr_rate[1], mrr_tries[1],
1383 		mrr_rate[2], mrr_tries[2]);
1384 
1385 	ds->ds_link = 0;
1386 	ds->ds_data = bf->skbaddr;
1387 
1388 	spin_lock_bh(&txq->lock);
1389 	list_add_tail(&bf->list, &txq->q);
1390 	if (txq->link == NULL) /* is this first packet? */
1391 		ath5k_hw_set_txdp(ah, txq->qnum, bf->daddr);
1392 	else /* no, so only link it */
1393 		*txq->link = bf->daddr;
1394 
1395 	txq->link = &ds->ds_link;
1396 	ath5k_hw_start_tx_dma(ah, txq->qnum);
1397 	mmiowb();
1398 	spin_unlock_bh(&txq->lock);
1399 
1400 	return 0;
1401 err_unmap:
1402 	pci_unmap_single(sc->pdev, bf->skbaddr, skb->len, PCI_DMA_TODEVICE);
1403 	return ret;
1404 }
1405 
1406 /*******************\
1407 * Descriptors setup *
1408 \*******************/
1409 
1410 static int
1411 ath5k_desc_alloc(struct ath5k_softc *sc, struct pci_dev *pdev)
1412 {
1413 	struct ath5k_desc *ds;
1414 	struct ath5k_buf *bf;
1415 	dma_addr_t da;
1416 	unsigned int i;
1417 	int ret;
1418 
1419 	/* allocate descriptors */
1420 	sc->desc_len = sizeof(struct ath5k_desc) *
1421 			(ATH_TXBUF + ATH_RXBUF + ATH_BCBUF + 1);
1422 	sc->desc = pci_alloc_consistent(pdev, sc->desc_len, &sc->desc_daddr);
1423 	if (sc->desc == NULL) {
1424 		ATH5K_ERR(sc, "can't allocate descriptors\n");
1425 		ret = -ENOMEM;
1426 		goto err;
1427 	}
1428 	ds = sc->desc;
1429 	da = sc->desc_daddr;
1430 	ATH5K_DBG(sc, ATH5K_DEBUG_ANY, "DMA map: %p (%zu) -> %llx\n",
1431 		ds, sc->desc_len, (unsigned long long)sc->desc_daddr);
1432 
1433 	bf = kcalloc(1 + ATH_TXBUF + ATH_RXBUF + ATH_BCBUF,
1434 			sizeof(struct ath5k_buf), GFP_KERNEL);
1435 	if (bf == NULL) {
1436 		ATH5K_ERR(sc, "can't allocate bufptr\n");
1437 		ret = -ENOMEM;
1438 		goto err_free;
1439 	}
1440 	sc->bufptr = bf;
1441 
1442 	INIT_LIST_HEAD(&sc->rxbuf);
1443 	for (i = 0; i < ATH_RXBUF; i++, bf++, ds++, da += sizeof(*ds)) {
1444 		bf->desc = ds;
1445 		bf->daddr = da;
1446 		list_add_tail(&bf->list, &sc->rxbuf);
1447 	}
1448 
1449 	INIT_LIST_HEAD(&sc->txbuf);
1450 	sc->txbuf_len = ATH_TXBUF;
1451 	for (i = 0; i < ATH_TXBUF; i++, bf++, ds++,
1452 			da += sizeof(*ds)) {
1453 		bf->desc = ds;
1454 		bf->daddr = da;
1455 		list_add_tail(&bf->list, &sc->txbuf);
1456 	}
1457 
1458 	/* beacon buffer */
1459 	bf->desc = ds;
1460 	bf->daddr = da;
1461 	sc->bbuf = bf;
1462 
1463 	return 0;
1464 err_free:
1465 	pci_free_consistent(pdev, sc->desc_len, sc->desc, sc->desc_daddr);
1466 err:
1467 	sc->desc = NULL;
1468 	return ret;
1469 }
1470 
1471 static void
1472 ath5k_desc_free(struct ath5k_softc *sc, struct pci_dev *pdev)
1473 {
1474 	struct ath5k_buf *bf;
1475 
1476 	ath5k_txbuf_free_skb(sc, sc->bbuf);
1477 	list_for_each_entry(bf, &sc->txbuf, list)
1478 		ath5k_txbuf_free_skb(sc, bf);
1479 	list_for_each_entry(bf, &sc->rxbuf, list)
1480 		ath5k_rxbuf_free_skb(sc, bf);
1481 
1482 	/* Free memory associated with all descriptors */
1483 	pci_free_consistent(pdev, sc->desc_len, sc->desc, sc->desc_daddr);
1484 	sc->desc = NULL;
1485 	sc->desc_daddr = 0;
1486 
1487 	kfree(sc->bufptr);
1488 	sc->bufptr = NULL;
1489 	sc->bbuf = NULL;
1490 }
1491 
1492 
1493 
1494 
1495 
1496 /**************\
1497 * Queues setup *
1498 \**************/
1499 
1500 static struct ath5k_txq *
1501 ath5k_txq_setup(struct ath5k_softc *sc,
1502 		int qtype, int subtype)
1503 {
1504 	struct ath5k_hw *ah = sc->ah;
1505 	struct ath5k_txq *txq;
1506 	struct ath5k_txq_info qi = {
1507 		.tqi_subtype = subtype,
1508 		.tqi_aifs = AR5K_TXQ_USEDEFAULT,
1509 		.tqi_cw_min = AR5K_TXQ_USEDEFAULT,
1510 		.tqi_cw_max = AR5K_TXQ_USEDEFAULT
1511 	};
1512 	int qnum;
1513 
1514 	/*
1515 	 * Enable interrupts only for EOL and DESC conditions.
1516 	 * We mark tx descriptors to receive a DESC interrupt
1517 	 * when a tx queue gets deep; otherwise waiting for the
1518 	 * EOL to reap descriptors.  Note that this is done to
1519 	 * reduce interrupt load and this only defers reaping
1520 	 * descriptors, never transmitting frames.  Aside from
1521 	 * reducing interrupts this also permits more concurrency.
1522 	 * The only potential downside is if the tx queue backs
1523 	 * up in which case the top half of the kernel may backup
1524 	 * due to a lack of tx descriptors.
1525 	 */
1526 	qi.tqi_flags = AR5K_TXQ_FLAG_TXEOLINT_ENABLE |
1527 				AR5K_TXQ_FLAG_TXDESCINT_ENABLE;
1528 	qnum = ath5k_hw_setup_tx_queue(ah, qtype, &qi);
1529 	if (qnum < 0) {
1530 		/*
1531 		 * NB: don't print a message, this happens
1532 		 * normally on parts with too few tx queues
1533 		 */
1534 		return ERR_PTR(qnum);
1535 	}
1536 	if (qnum >= ARRAY_SIZE(sc->txqs)) {
1537 		ATH5K_ERR(sc, "hw qnum %u out of range, max %tu!\n",
1538 			qnum, ARRAY_SIZE(sc->txqs));
1539 		ath5k_hw_release_tx_queue(ah, qnum);
1540 		return ERR_PTR(-EINVAL);
1541 	}
1542 	txq = &sc->txqs[qnum];
1543 	if (!txq->setup) {
1544 		txq->qnum = qnum;
1545 		txq->link = NULL;
1546 		INIT_LIST_HEAD(&txq->q);
1547 		spin_lock_init(&txq->lock);
1548 		txq->setup = true;
1549 	}
1550 	return &sc->txqs[qnum];
1551 }
1552 
1553 static int
1554 ath5k_beaconq_setup(struct ath5k_hw *ah)
1555 {
1556 	struct ath5k_txq_info qi = {
1557 		.tqi_aifs = AR5K_TXQ_USEDEFAULT,
1558 		.tqi_cw_min = AR5K_TXQ_USEDEFAULT,
1559 		.tqi_cw_max = AR5K_TXQ_USEDEFAULT,
1560 		/* NB: for dynamic turbo, don't enable any other interrupts */
1561 		.tqi_flags = AR5K_TXQ_FLAG_TXDESCINT_ENABLE
1562 	};
1563 
1564 	return ath5k_hw_setup_tx_queue(ah, AR5K_TX_QUEUE_BEACON, &qi);
1565 }
1566 
1567 static int
1568 ath5k_beaconq_config(struct ath5k_softc *sc)
1569 {
1570 	struct ath5k_hw *ah = sc->ah;
1571 	struct ath5k_txq_info qi;
1572 	int ret;
1573 
1574 	ret = ath5k_hw_get_tx_queueprops(ah, sc->bhalq, &qi);
1575 	if (ret)
1576 		goto err;
1577 
1578 	if (sc->opmode == NL80211_IFTYPE_AP ||
1579 		sc->opmode == NL80211_IFTYPE_MESH_POINT) {
1580 		/*
1581 		 * Always burst out beacon and CAB traffic
1582 		 * (aifs = cwmin = cwmax = 0)
1583 		 */
1584 		qi.tqi_aifs = 0;
1585 		qi.tqi_cw_min = 0;
1586 		qi.tqi_cw_max = 0;
1587 	} else if (sc->opmode == NL80211_IFTYPE_ADHOC) {
1588 		/*
1589 		 * Adhoc mode; backoff between 0 and (2 * cw_min).
1590 		 */
1591 		qi.tqi_aifs = 0;
1592 		qi.tqi_cw_min = 0;
1593 		qi.tqi_cw_max = 2 * ah->ah_cw_min;
1594 	}
1595 
1596 	ATH5K_DBG(sc, ATH5K_DEBUG_BEACON,
1597 		"beacon queueprops tqi_aifs:%d tqi_cw_min:%d tqi_cw_max:%d\n",
1598 		qi.tqi_aifs, qi.tqi_cw_min, qi.tqi_cw_max);
1599 
1600 	ret = ath5k_hw_set_tx_queueprops(ah, sc->bhalq, &qi);
1601 	if (ret) {
1602 		ATH5K_ERR(sc, "%s: unable to update parameters for beacon "
1603 			"hardware queue!\n", __func__);
1604 		goto err;
1605 	}
1606 	ret = ath5k_hw_reset_tx_queue(ah, sc->bhalq); /* push to h/w */
1607 	if (ret)
1608 		goto err;
1609 
1610 	/* reconfigure cabq with ready time to 80% of beacon_interval */
1611 	ret = ath5k_hw_get_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi);
1612 	if (ret)
1613 		goto err;
1614 
1615 	qi.tqi_ready_time = (sc->bintval * 80) / 100;
1616 	ret = ath5k_hw_set_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi);
1617 	if (ret)
1618 		goto err;
1619 
1620 	ret = ath5k_hw_reset_tx_queue(ah, AR5K_TX_QUEUE_ID_CAB);
1621 err:
1622 	return ret;
1623 }
1624 
1625 static void
1626 ath5k_txq_drainq(struct ath5k_softc *sc, struct ath5k_txq *txq)
1627 {
1628 	struct ath5k_buf *bf, *bf0;
1629 
1630 	/*
1631 	 * NB: this assumes output has been stopped and
1632 	 *     we do not need to block ath5k_tx_tasklet
1633 	 */
1634 	spin_lock_bh(&txq->lock);
1635 	list_for_each_entry_safe(bf, bf0, &txq->q, list) {
1636 		ath5k_debug_printtxbuf(sc, bf);
1637 
1638 		ath5k_txbuf_free_skb(sc, bf);
1639 
1640 		spin_lock_bh(&sc->txbuflock);
1641 		list_move_tail(&bf->list, &sc->txbuf);
1642 		sc->txbuf_len++;
1643 		spin_unlock_bh(&sc->txbuflock);
1644 	}
1645 	txq->link = NULL;
1646 	spin_unlock_bh(&txq->lock);
1647 }
1648 
1649 /*
1650  * Drain the transmit queues and reclaim resources.
1651  */
1652 static void
1653 ath5k_txq_cleanup(struct ath5k_softc *sc)
1654 {
1655 	struct ath5k_hw *ah = sc->ah;
1656 	unsigned int i;
1657 
1658 	/* XXX return value */
1659 	if (likely(!test_bit(ATH_STAT_INVALID, sc->status))) {
1660 		/* don't touch the hardware if marked invalid */
1661 		ath5k_hw_stop_tx_dma(ah, sc->bhalq);
1662 		ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "beacon queue %x\n",
1663 			ath5k_hw_get_txdp(ah, sc->bhalq));
1664 		for (i = 0; i < ARRAY_SIZE(sc->txqs); i++)
1665 			if (sc->txqs[i].setup) {
1666 				ath5k_hw_stop_tx_dma(ah, sc->txqs[i].qnum);
1667 				ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "txq [%u] %x, "
1668 					"link %p\n",
1669 					sc->txqs[i].qnum,
1670 					ath5k_hw_get_txdp(ah,
1671 							sc->txqs[i].qnum),
1672 					sc->txqs[i].link);
1673 			}
1674 	}
1675 
1676 	for (i = 0; i < ARRAY_SIZE(sc->txqs); i++)
1677 		if (sc->txqs[i].setup)
1678 			ath5k_txq_drainq(sc, &sc->txqs[i]);
1679 }
1680 
1681 static void
1682 ath5k_txq_release(struct ath5k_softc *sc)
1683 {
1684 	struct ath5k_txq *txq = sc->txqs;
1685 	unsigned int i;
1686 
1687 	for (i = 0; i < ARRAY_SIZE(sc->txqs); i++, txq++)
1688 		if (txq->setup) {
1689 			ath5k_hw_release_tx_queue(sc->ah, txq->qnum);
1690 			txq->setup = false;
1691 		}
1692 }
1693 
1694 
1695 
1696 
1697 /*************\
1698 * RX Handling *
1699 \*************/
1700 
1701 /*
1702  * Enable the receive h/w following a reset.
1703  */
1704 static int
1705 ath5k_rx_start(struct ath5k_softc *sc)
1706 {
1707 	struct ath5k_hw *ah = sc->ah;
1708 	struct ath_common *common = ath5k_hw_common(ah);
1709 	struct ath5k_buf *bf;
1710 	int ret;
1711 
1712 	common->rx_bufsize = roundup(IEEE80211_MAX_LEN, common->cachelsz);
1713 
1714 	ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "cachelsz %u rx_bufsize %u\n",
1715 		  common->cachelsz, common->rx_bufsize);
1716 
1717 	spin_lock_bh(&sc->rxbuflock);
1718 	sc->rxlink = NULL;
1719 	list_for_each_entry(bf, &sc->rxbuf, list) {
1720 		ret = ath5k_rxbuf_setup(sc, bf);
1721 		if (ret != 0) {
1722 			spin_unlock_bh(&sc->rxbuflock);
1723 			goto err;
1724 		}
1725 	}
1726 	bf = list_first_entry(&sc->rxbuf, struct ath5k_buf, list);
1727 	ath5k_hw_set_rxdp(ah, bf->daddr);
1728 	spin_unlock_bh(&sc->rxbuflock);
1729 
1730 	ath5k_hw_start_rx_dma(ah);	/* enable recv descriptors */
1731 	ath5k_mode_setup(sc);		/* set filters, etc. */
1732 	ath5k_hw_start_rx_pcu(ah);	/* re-enable PCU/DMA engine */
1733 
1734 	return 0;
1735 err:
1736 	return ret;
1737 }
1738 
1739 /*
1740  * Disable the receive h/w in preparation for a reset.
1741  */
1742 static void
1743 ath5k_rx_stop(struct ath5k_softc *sc)
1744 {
1745 	struct ath5k_hw *ah = sc->ah;
1746 
1747 	ath5k_hw_stop_rx_pcu(ah);	/* disable PCU */
1748 	ath5k_hw_set_rx_filter(ah, 0);	/* clear recv filter */
1749 	ath5k_hw_stop_rx_dma(ah);	/* disable DMA engine */
1750 
1751 	ath5k_debug_printrxbuffs(sc, ah);
1752 }
1753 
1754 static unsigned int
1755 ath5k_rx_decrypted(struct ath5k_softc *sc, struct sk_buff *skb,
1756 		   struct ath5k_rx_status *rs)
1757 {
1758 	struct ath5k_hw *ah = sc->ah;
1759 	struct ath_common *common = ath5k_hw_common(ah);
1760 	struct ieee80211_hdr *hdr = (void *)skb->data;
1761 	unsigned int keyix, hlen;
1762 
1763 	if (!(rs->rs_status & AR5K_RXERR_DECRYPT) &&
1764 			rs->rs_keyix != AR5K_RXKEYIX_INVALID)
1765 		return RX_FLAG_DECRYPTED;
1766 
1767 	/* Apparently when a default key is used to decrypt the packet
1768 	   the hw does not set the index used to decrypt.  In such cases
1769 	   get the index from the packet. */
1770 	hlen = ieee80211_hdrlen(hdr->frame_control);
1771 	if (ieee80211_has_protected(hdr->frame_control) &&
1772 	    !(rs->rs_status & AR5K_RXERR_DECRYPT) &&
1773 	    skb->len >= hlen + 4) {
1774 		keyix = skb->data[hlen + 3] >> 6;
1775 
1776 		if (test_bit(keyix, common->keymap))
1777 			return RX_FLAG_DECRYPTED;
1778 	}
1779 
1780 	return 0;
1781 }
1782 
1783 
1784 static void
1785 ath5k_check_ibss_tsf(struct ath5k_softc *sc, struct sk_buff *skb,
1786 		     struct ieee80211_rx_status *rxs)
1787 {
1788 	struct ath_common *common = ath5k_hw_common(sc->ah);
1789 	u64 tsf, bc_tstamp;
1790 	u32 hw_tu;
1791 	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
1792 
1793 	if (ieee80211_is_beacon(mgmt->frame_control) &&
1794 	    le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS &&
1795 	    memcmp(mgmt->bssid, common->curbssid, ETH_ALEN) == 0) {
1796 		/*
1797 		 * Received an IBSS beacon with the same BSSID. Hardware *must*
1798 		 * have updated the local TSF. We have to work around various
1799 		 * hardware bugs, though...
1800 		 */
1801 		tsf = ath5k_hw_get_tsf64(sc->ah);
1802 		bc_tstamp = le64_to_cpu(mgmt->u.beacon.timestamp);
1803 		hw_tu = TSF_TO_TU(tsf);
1804 
1805 		ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON,
1806 			"beacon %llx mactime %llx (diff %lld) tsf now %llx\n",
1807 			(unsigned long long)bc_tstamp,
1808 			(unsigned long long)rxs->mactime,
1809 			(unsigned long long)(rxs->mactime - bc_tstamp),
1810 			(unsigned long long)tsf);
1811 
1812 		/*
1813 		 * Sometimes the HW will give us a wrong tstamp in the rx
1814 		 * status, causing the timestamp extension to go wrong.
1815 		 * (This seems to happen especially with beacon frames bigger
1816 		 * than 78 byte (incl. FCS))
1817 		 * But we know that the receive timestamp must be later than the
1818 		 * timestamp of the beacon since HW must have synced to that.
1819 		 *
1820 		 * NOTE: here we assume mactime to be after the frame was
1821 		 * received, not like mac80211 which defines it at the start.
1822 		 */
1823 		if (bc_tstamp > rxs->mactime) {
1824 			ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON,
1825 				"fixing mactime from %llx to %llx\n",
1826 				(unsigned long long)rxs->mactime,
1827 				(unsigned long long)tsf);
1828 			rxs->mactime = tsf;
1829 		}
1830 
1831 		/*
1832 		 * Local TSF might have moved higher than our beacon timers,
1833 		 * in that case we have to update them to continue sending
1834 		 * beacons. This also takes care of synchronizing beacon sending
1835 		 * times with other stations.
1836 		 */
1837 		if (hw_tu >= sc->nexttbtt)
1838 			ath5k_beacon_update_timers(sc, bc_tstamp);
1839 	}
1840 }
1841 
1842 static void
1843 ath5k_update_beacon_rssi(struct ath5k_softc *sc, struct sk_buff *skb, int rssi)
1844 {
1845 	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
1846 	struct ath5k_hw *ah = sc->ah;
1847 	struct ath_common *common = ath5k_hw_common(ah);
1848 
1849 	/* only beacons from our BSSID */
1850 	if (!ieee80211_is_beacon(mgmt->frame_control) ||
1851 	    memcmp(mgmt->bssid, common->curbssid, ETH_ALEN) != 0)
1852 		return;
1853 
1854 	ah->ah_beacon_rssi_avg = ath5k_moving_average(ah->ah_beacon_rssi_avg,
1855 						      rssi);
1856 
1857 	/* in IBSS mode we should keep RSSI statistics per neighbour */
1858 	/* le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS */
1859 }
1860 
1861 /*
1862  * Compute padding position. skb must contains an IEEE 802.11 frame
1863  */
1864 static int ath5k_common_padpos(struct sk_buff *skb)
1865 {
1866 	struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
1867 	__le16 frame_control = hdr->frame_control;
1868 	int padpos = 24;
1869 
1870 	if (ieee80211_has_a4(frame_control)) {
1871 		padpos += ETH_ALEN;
1872 	}
1873 	if (ieee80211_is_data_qos(frame_control)) {
1874 		padpos += IEEE80211_QOS_CTL_LEN;
1875 	}
1876 
1877 	return padpos;
1878 }
1879 
1880 /*
1881  * This function expects a 802.11 frame and returns the number of
1882  * bytes added, or -1 if we don't have enought header room.
1883  */
1884 
1885 static int ath5k_add_padding(struct sk_buff *skb)
1886 {
1887 	int padpos = ath5k_common_padpos(skb);
1888 	int padsize = padpos & 3;
1889 
1890 	if (padsize && skb->len>padpos) {
1891 
1892 		if (skb_headroom(skb) < padsize)
1893 			return -1;
1894 
1895 		skb_push(skb, padsize);
1896 		memmove(skb->data, skb->data+padsize, padpos);
1897 		return padsize;
1898 	}
1899 
1900 	return 0;
1901 }
1902 
1903 /*
1904  * This function expects a 802.11 frame and returns the number of
1905  * bytes removed
1906  */
1907 
1908 static int ath5k_remove_padding(struct sk_buff *skb)
1909 {
1910 	int padpos = ath5k_common_padpos(skb);
1911 	int padsize = padpos & 3;
1912 
1913 	if (padsize && skb->len>=padpos+padsize) {
1914 		memmove(skb->data + padsize, skb->data, padpos);
1915 		skb_pull(skb, padsize);
1916 		return padsize;
1917 	}
1918 
1919 	return 0;
1920 }
1921 
1922 static void
1923 ath5k_receive_frame(struct ath5k_softc *sc, struct sk_buff *skb,
1924 		    struct ath5k_rx_status *rs)
1925 {
1926 	struct ieee80211_rx_status *rxs;
1927 
1928 	/* The MAC header is padded to have 32-bit boundary if the
1929 	 * packet payload is non-zero. The general calculation for
1930 	 * padsize would take into account odd header lengths:
1931 	 * padsize = (4 - hdrlen % 4) % 4; However, since only
1932 	 * even-length headers are used, padding can only be 0 or 2
1933 	 * bytes and we can optimize this a bit. In addition, we must
1934 	 * not try to remove padding from short control frames that do
1935 	 * not have payload. */
1936 	ath5k_remove_padding(skb);
1937 
1938 	rxs = IEEE80211_SKB_RXCB(skb);
1939 
1940 	rxs->flag = 0;
1941 	if (unlikely(rs->rs_status & AR5K_RXERR_MIC))
1942 		rxs->flag |= RX_FLAG_MMIC_ERROR;
1943 
1944 	/*
1945 	 * always extend the mac timestamp, since this information is
1946 	 * also needed for proper IBSS merging.
1947 	 *
1948 	 * XXX: it might be too late to do it here, since rs_tstamp is
1949 	 * 15bit only. that means TSF extension has to be done within
1950 	 * 32768usec (about 32ms). it might be necessary to move this to
1951 	 * the interrupt handler, like it is done in madwifi.
1952 	 *
1953 	 * Unfortunately we don't know when the hardware takes the rx
1954 	 * timestamp (beginning of phy frame, data frame, end of rx?).
1955 	 * The only thing we know is that it is hardware specific...
1956 	 * On AR5213 it seems the rx timestamp is at the end of the
1957 	 * frame, but i'm not sure.
1958 	 *
1959 	 * NOTE: mac80211 defines mactime at the beginning of the first
1960 	 * data symbol. Since we don't have any time references it's
1961 	 * impossible to comply to that. This affects IBSS merge only
1962 	 * right now, so it's not too bad...
1963 	 */
1964 	rxs->mactime = ath5k_extend_tsf(sc->ah, rs->rs_tstamp);
1965 	rxs->flag |= RX_FLAG_TSFT;
1966 
1967 	rxs->freq = sc->curchan->center_freq;
1968 	rxs->band = sc->curband->band;
1969 
1970 	rxs->signal = sc->ah->ah_noise_floor + rs->rs_rssi;
1971 
1972 	rxs->antenna = rs->rs_antenna;
1973 
1974 	if (rs->rs_antenna > 0 && rs->rs_antenna < 5)
1975 		sc->stats.antenna_rx[rs->rs_antenna]++;
1976 	else
1977 		sc->stats.antenna_rx[0]++; /* invalid */
1978 
1979 	rxs->rate_idx = ath5k_hw_to_driver_rix(sc, rs->rs_rate);
1980 	rxs->flag |= ath5k_rx_decrypted(sc, skb, rs);
1981 
1982 	if (rxs->rate_idx >= 0 && rs->rs_rate ==
1983 	    sc->curband->bitrates[rxs->rate_idx].hw_value_short)
1984 		rxs->flag |= RX_FLAG_SHORTPRE;
1985 
1986 	ath5k_debug_dump_skb(sc, skb, "RX  ", 0);
1987 
1988 	ath5k_update_beacon_rssi(sc, skb, rs->rs_rssi);
1989 
1990 	/* check beacons in IBSS mode */
1991 	if (sc->opmode == NL80211_IFTYPE_ADHOC)
1992 		ath5k_check_ibss_tsf(sc, skb, rxs);
1993 
1994 	ieee80211_rx(sc->hw, skb);
1995 }
1996 
1997 /** ath5k_frame_receive_ok() - Do we want to receive this frame or not?
1998  *
1999  * Check if we want to further process this frame or not. Also update
2000  * statistics. Return true if we want this frame, false if not.
2001  */
2002 static bool
2003 ath5k_receive_frame_ok(struct ath5k_softc *sc, struct ath5k_rx_status *rs)
2004 {
2005 	sc->stats.rx_all_count++;
2006 
2007 	if (unlikely(rs->rs_status)) {
2008 		if (rs->rs_status & AR5K_RXERR_CRC)
2009 			sc->stats.rxerr_crc++;
2010 		if (rs->rs_status & AR5K_RXERR_FIFO)
2011 			sc->stats.rxerr_fifo++;
2012 		if (rs->rs_status & AR5K_RXERR_PHY) {
2013 			sc->stats.rxerr_phy++;
2014 			if (rs->rs_phyerr > 0 && rs->rs_phyerr < 32)
2015 				sc->stats.rxerr_phy_code[rs->rs_phyerr]++;
2016 			return false;
2017 		}
2018 		if (rs->rs_status & AR5K_RXERR_DECRYPT) {
2019 			/*
2020 			 * Decrypt error.  If the error occurred
2021 			 * because there was no hardware key, then
2022 			 * let the frame through so the upper layers
2023 			 * can process it.  This is necessary for 5210
2024 			 * parts which have no way to setup a ``clear''
2025 			 * key cache entry.
2026 			 *
2027 			 * XXX do key cache faulting
2028 			 */
2029 			sc->stats.rxerr_decrypt++;
2030 			if (rs->rs_keyix == AR5K_RXKEYIX_INVALID &&
2031 			    !(rs->rs_status & AR5K_RXERR_CRC))
2032 				return true;
2033 		}
2034 		if (rs->rs_status & AR5K_RXERR_MIC) {
2035 			sc->stats.rxerr_mic++;
2036 			return true;
2037 		}
2038 
2039 		/* let crypto-error packets fall through in MNTR */
2040 		if ((rs->rs_status & ~(AR5K_RXERR_DECRYPT|AR5K_RXERR_MIC)) ||
2041 		    sc->opmode != NL80211_IFTYPE_MONITOR)
2042 			return false;
2043 	}
2044 
2045 	if (unlikely(rs->rs_more)) {
2046 		sc->stats.rxerr_jumbo++;
2047 		return false;
2048 	}
2049 	return true;
2050 }
2051 
2052 static void
2053 ath5k_tasklet_rx(unsigned long data)
2054 {
2055 	struct ath5k_rx_status rs = {};
2056 	struct sk_buff *skb, *next_skb;
2057 	dma_addr_t next_skb_addr;
2058 	struct ath5k_softc *sc = (void *)data;
2059 	struct ath5k_hw *ah = sc->ah;
2060 	struct ath_common *common = ath5k_hw_common(ah);
2061 	struct ath5k_buf *bf;
2062 	struct ath5k_desc *ds;
2063 	int ret;
2064 
2065 	spin_lock(&sc->rxbuflock);
2066 	if (list_empty(&sc->rxbuf)) {
2067 		ATH5K_WARN(sc, "empty rx buf pool\n");
2068 		goto unlock;
2069 	}
2070 	do {
2071 		bf = list_first_entry(&sc->rxbuf, struct ath5k_buf, list);
2072 		BUG_ON(bf->skb == NULL);
2073 		skb = bf->skb;
2074 		ds = bf->desc;
2075 
2076 		/* bail if HW is still using self-linked descriptor */
2077 		if (ath5k_hw_get_rxdp(sc->ah) == bf->daddr)
2078 			break;
2079 
2080 		ret = sc->ah->ah_proc_rx_desc(sc->ah, ds, &rs);
2081 		if (unlikely(ret == -EINPROGRESS))
2082 			break;
2083 		else if (unlikely(ret)) {
2084 			ATH5K_ERR(sc, "error in processing rx descriptor\n");
2085 			sc->stats.rxerr_proc++;
2086 			break;
2087 		}
2088 
2089 		if (ath5k_receive_frame_ok(sc, &rs)) {
2090 			next_skb = ath5k_rx_skb_alloc(sc, &next_skb_addr);
2091 
2092 			/*
2093 			 * If we can't replace bf->skb with a new skb under
2094 			 * memory pressure, just skip this packet
2095 			 */
2096 			if (!next_skb)
2097 				goto next;
2098 
2099 			pci_unmap_single(sc->pdev, bf->skbaddr,
2100 					 common->rx_bufsize,
2101 					 PCI_DMA_FROMDEVICE);
2102 
2103 			skb_put(skb, rs.rs_datalen);
2104 
2105 			ath5k_receive_frame(sc, skb, &rs);
2106 
2107 			bf->skb = next_skb;
2108 			bf->skbaddr = next_skb_addr;
2109 		}
2110 next:
2111 		list_move_tail(&bf->list, &sc->rxbuf);
2112 	} while (ath5k_rxbuf_setup(sc, bf) == 0);
2113 unlock:
2114 	spin_unlock(&sc->rxbuflock);
2115 }
2116 
2117 
2118 /*************\
2119 * TX Handling *
2120 \*************/
2121 
2122 static void
2123 ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
2124 {
2125 	struct ath5k_tx_status ts = {};
2126 	struct ath5k_buf *bf, *bf0;
2127 	struct ath5k_desc *ds;
2128 	struct sk_buff *skb;
2129 	struct ieee80211_tx_info *info;
2130 	int i, ret;
2131 
2132 	spin_lock(&txq->lock);
2133 	list_for_each_entry_safe(bf, bf0, &txq->q, list) {
2134 		ds = bf->desc;
2135 
2136 		/*
2137 		 * It's possible that the hardware can say the buffer is
2138 		 * completed when it hasn't yet loaded the ds_link from
2139 		 * host memory and moved on.  If there are more TX
2140 		 * descriptors in the queue, wait for TXDP to change
2141 		 * before processing this one.
2142 		 */
2143 		if (ath5k_hw_get_txdp(sc->ah, txq->qnum) == bf->daddr &&
2144 		    !list_is_last(&bf->list, &txq->q))
2145 			break;
2146 
2147 		ret = sc->ah->ah_proc_tx_desc(sc->ah, ds, &ts);
2148 		if (unlikely(ret == -EINPROGRESS))
2149 			break;
2150 		else if (unlikely(ret)) {
2151 			ATH5K_ERR(sc, "error %d while processing queue %u\n",
2152 				ret, txq->qnum);
2153 			break;
2154 		}
2155 
2156 		sc->stats.tx_all_count++;
2157 		skb = bf->skb;
2158 		info = IEEE80211_SKB_CB(skb);
2159 		bf->skb = NULL;
2160 
2161 		pci_unmap_single(sc->pdev, bf->skbaddr, skb->len,
2162 				PCI_DMA_TODEVICE);
2163 
2164 		ieee80211_tx_info_clear_status(info);
2165 		for (i = 0; i < 4; i++) {
2166 			struct ieee80211_tx_rate *r =
2167 				&info->status.rates[i];
2168 
2169 			if (ts.ts_rate[i]) {
2170 				r->idx = ath5k_hw_to_driver_rix(sc, ts.ts_rate[i]);
2171 				r->count = ts.ts_retry[i];
2172 			} else {
2173 				r->idx = -1;
2174 				r->count = 0;
2175 			}
2176 		}
2177 
2178 		/* count the successful attempt as well */
2179 		info->status.rates[ts.ts_final_idx].count++;
2180 
2181 		if (unlikely(ts.ts_status)) {
2182 			sc->stats.ack_fail++;
2183 			if (ts.ts_status & AR5K_TXERR_FILT) {
2184 				info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
2185 				sc->stats.txerr_filt++;
2186 			}
2187 			if (ts.ts_status & AR5K_TXERR_XRETRY)
2188 				sc->stats.txerr_retry++;
2189 			if (ts.ts_status & AR5K_TXERR_FIFO)
2190 				sc->stats.txerr_fifo++;
2191 		} else {
2192 			info->flags |= IEEE80211_TX_STAT_ACK;
2193 			info->status.ack_signal = ts.ts_rssi;
2194 		}
2195 
2196 		/*
2197 		 * Remove MAC header padding before giving the frame
2198 		 * back to mac80211.
2199 		 */
2200 		ath5k_remove_padding(skb);
2201 
2202 		if (ts.ts_antenna > 0 && ts.ts_antenna < 5)
2203 			sc->stats.antenna_tx[ts.ts_antenna]++;
2204 		else
2205 			sc->stats.antenna_tx[0]++; /* invalid */
2206 
2207 		ieee80211_tx_status(sc->hw, skb);
2208 
2209 		spin_lock(&sc->txbuflock);
2210 		list_move_tail(&bf->list, &sc->txbuf);
2211 		sc->txbuf_len++;
2212 		spin_unlock(&sc->txbuflock);
2213 	}
2214 	if (likely(list_empty(&txq->q)))
2215 		txq->link = NULL;
2216 	spin_unlock(&txq->lock);
2217 	if (sc->txbuf_len > ATH_TXBUF / 5)
2218 		ieee80211_wake_queues(sc->hw);
2219 }
2220 
2221 static void
2222 ath5k_tasklet_tx(unsigned long data)
2223 {
2224 	int i;
2225 	struct ath5k_softc *sc = (void *)data;
2226 
2227 	for (i=0; i < AR5K_NUM_TX_QUEUES; i++)
2228 		if (sc->txqs[i].setup && (sc->ah->ah_txq_isr & BIT(i)))
2229 			ath5k_tx_processq(sc, &sc->txqs[i]);
2230 }
2231 
2232 
2233 /*****************\
2234 * Beacon handling *
2235 \*****************/
2236 
2237 /*
2238  * Setup the beacon frame for transmit.
2239  */
2240 static int
2241 ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
2242 {
2243 	struct sk_buff *skb = bf->skb;
2244 	struct	ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2245 	struct ath5k_hw *ah = sc->ah;
2246 	struct ath5k_desc *ds;
2247 	int ret = 0;
2248 	u8 antenna;
2249 	u32 flags;
2250 	const int padsize = 0;
2251 
2252 	bf->skbaddr = pci_map_single(sc->pdev, skb->data, skb->len,
2253 			PCI_DMA_TODEVICE);
2254 	ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "skb %p [data %p len %u] "
2255 			"skbaddr %llx\n", skb, skb->data, skb->len,
2256 			(unsigned long long)bf->skbaddr);
2257 	if (pci_dma_mapping_error(sc->pdev, bf->skbaddr)) {
2258 		ATH5K_ERR(sc, "beacon DMA mapping failed\n");
2259 		return -EIO;
2260 	}
2261 
2262 	ds = bf->desc;
2263 	antenna = ah->ah_tx_ant;
2264 
2265 	flags = AR5K_TXDESC_NOACK;
2266 	if (sc->opmode == NL80211_IFTYPE_ADHOC && ath5k_hw_hasveol(ah)) {
2267 		ds->ds_link = bf->daddr;	/* self-linked */
2268 		flags |= AR5K_TXDESC_VEOL;
2269 	} else
2270 		ds->ds_link = 0;
2271 
2272 	/*
2273 	 * If we use multiple antennas on AP and use
2274 	 * the Sectored AP scenario, switch antenna every
2275 	 * 4 beacons to make sure everybody hears our AP.
2276 	 * When a client tries to associate, hw will keep
2277 	 * track of the tx antenna to be used for this client
2278 	 * automaticaly, based on ACKed packets.
2279 	 *
2280 	 * Note: AP still listens and transmits RTS on the
2281 	 * default antenna which is supposed to be an omni.
2282 	 *
2283 	 * Note2: On sectored scenarios it's possible to have
2284 	 * multiple antennas (1omni -the default- and 14 sectors)
2285 	 * so if we choose to actually support this mode we need
2286 	 * to allow user to set how many antennas we have and tweak
2287 	 * the code below to send beacons on all of them.
2288 	 */
2289 	if (ah->ah_ant_mode == AR5K_ANTMODE_SECTOR_AP)
2290 		antenna = sc->bsent & 4 ? 2 : 1;
2291 
2292 
2293 	/* FIXME: If we are in g mode and rate is a CCK rate
2294 	 * subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta
2295 	 * from tx power (value is in dB units already) */
2296 	ds->ds_data = bf->skbaddr;
2297 	ret = ah->ah_setup_tx_desc(ah, ds, skb->len,
2298 			ieee80211_get_hdrlen_from_skb(skb), padsize,
2299 			AR5K_PKT_TYPE_BEACON, (sc->power_level * 2),
2300 			ieee80211_get_tx_rate(sc->hw, info)->hw_value,
2301 			1, AR5K_TXKEYIX_INVALID,
2302 			antenna, flags, 0, 0);
2303 	if (ret)
2304 		goto err_unmap;
2305 
2306 	return 0;
2307 err_unmap:
2308 	pci_unmap_single(sc->pdev, bf->skbaddr, skb->len, PCI_DMA_TODEVICE);
2309 	return ret;
2310 }
2311 
2312 /*
2313  * Transmit a beacon frame at SWBA.  Dynamic updates to the
2314  * frame contents are done as needed and the slot time is
2315  * also adjusted based on current state.
2316  *
2317  * This is called from software irq context (beacontq tasklets)
2318  * or user context from ath5k_beacon_config.
2319  */
2320 static void
2321 ath5k_beacon_send(struct ath5k_softc *sc)
2322 {
2323 	struct ath5k_buf *bf = sc->bbuf;
2324 	struct ath5k_hw *ah = sc->ah;
2325 	struct sk_buff *skb;
2326 
2327 	ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, "in beacon_send\n");
2328 
2329 	if (unlikely(bf->skb == NULL || sc->opmode == NL80211_IFTYPE_STATION ||
2330 			sc->opmode == NL80211_IFTYPE_MONITOR)) {
2331 		ATH5K_WARN(sc, "bf=%p bf_skb=%p\n", bf, bf ? bf->skb : NULL);
2332 		return;
2333 	}
2334 	/*
2335 	 * Check if the previous beacon has gone out.  If
2336 	 * not don't don't try to post another, skip this
2337 	 * period and wait for the next.  Missed beacons
2338 	 * indicate a problem and should not occur.  If we
2339 	 * miss too many consecutive beacons reset the device.
2340 	 */
2341 	if (unlikely(ath5k_hw_num_tx_pending(ah, sc->bhalq) != 0)) {
2342 		sc->bmisscount++;
2343 		ATH5K_DBG(sc, ATH5K_DEBUG_BEACON,
2344 			"missed %u consecutive beacons\n", sc->bmisscount);
2345 		if (sc->bmisscount > 10) {	/* NB: 10 is a guess */
2346 			ATH5K_DBG(sc, ATH5K_DEBUG_BEACON,
2347 				"stuck beacon time (%u missed)\n",
2348 				sc->bmisscount);
2349 			ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
2350 				  "stuck beacon, resetting\n");
2351 			ieee80211_queue_work(sc->hw, &sc->reset_work);
2352 		}
2353 		return;
2354 	}
2355 	if (unlikely(sc->bmisscount != 0)) {
2356 		ATH5K_DBG(sc, ATH5K_DEBUG_BEACON,
2357 			"resume beacon xmit after %u misses\n",
2358 			sc->bmisscount);
2359 		sc->bmisscount = 0;
2360 	}
2361 
2362 	/*
2363 	 * Stop any current dma and put the new frame on the queue.
2364 	 * This should never fail since we check above that no frames
2365 	 * are still pending on the queue.
2366 	 */
2367 	if (unlikely(ath5k_hw_stop_tx_dma(ah, sc->bhalq))) {
2368 		ATH5K_WARN(sc, "beacon queue %u didn't start/stop ?\n", sc->bhalq);
2369 		/* NB: hw still stops DMA, so proceed */
2370 	}
2371 
2372 	/* refresh the beacon for AP mode */
2373 	if (sc->opmode == NL80211_IFTYPE_AP)
2374 		ath5k_beacon_update(sc->hw, sc->vif);
2375 
2376 	ath5k_hw_set_txdp(ah, sc->bhalq, bf->daddr);
2377 	ath5k_hw_start_tx_dma(ah, sc->bhalq);
2378 	ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "TXDP[%u] = %llx (%p)\n",
2379 		sc->bhalq, (unsigned long long)bf->daddr, bf->desc);
2380 
2381 	skb = ieee80211_get_buffered_bc(sc->hw, sc->vif);
2382 	while (skb) {
2383 		ath5k_tx_queue(sc->hw, skb, sc->cabq);
2384 		skb = ieee80211_get_buffered_bc(sc->hw, sc->vif);
2385 	}
2386 
2387 	sc->bsent++;
2388 }
2389 
2390 
2391 /**
2392  * ath5k_beacon_update_timers - update beacon timers
2393  *
2394  * @sc: struct ath5k_softc pointer we are operating on
2395  * @bc_tsf: the timestamp of the beacon. 0 to reset the TSF. -1 to perform a
2396  *          beacon timer update based on the current HW TSF.
2397  *
2398  * Calculate the next target beacon transmit time (TBTT) based on the timestamp
2399  * of a received beacon or the current local hardware TSF and write it to the
2400  * beacon timer registers.
2401  *
2402  * This is called in a variety of situations, e.g. when a beacon is received,
2403  * when a TSF update has been detected, but also when an new IBSS is created or
2404  * when we otherwise know we have to update the timers, but we keep it in this
2405  * function to have it all together in one place.
2406  */
2407 static void
2408 ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf)
2409 {
2410 	struct ath5k_hw *ah = sc->ah;
2411 	u32 nexttbtt, intval, hw_tu, bc_tu;
2412 	u64 hw_tsf;
2413 
2414 	intval = sc->bintval & AR5K_BEACON_PERIOD;
2415 	if (WARN_ON(!intval))
2416 		return;
2417 
2418 	/* beacon TSF converted to TU */
2419 	bc_tu = TSF_TO_TU(bc_tsf);
2420 
2421 	/* current TSF converted to TU */
2422 	hw_tsf = ath5k_hw_get_tsf64(ah);
2423 	hw_tu = TSF_TO_TU(hw_tsf);
2424 
2425 #define FUDGE 3
2426 	/* we use FUDGE to make sure the next TBTT is ahead of the current TU */
2427 	if (bc_tsf == -1) {
2428 		/*
2429 		 * no beacons received, called internally.
2430 		 * just need to refresh timers based on HW TSF.
2431 		 */
2432 		nexttbtt = roundup(hw_tu + FUDGE, intval);
2433 	} else if (bc_tsf == 0) {
2434 		/*
2435 		 * no beacon received, probably called by ath5k_reset_tsf().
2436 		 * reset TSF to start with 0.
2437 		 */
2438 		nexttbtt = intval;
2439 		intval |= AR5K_BEACON_RESET_TSF;
2440 	} else if (bc_tsf > hw_tsf) {
2441 		/*
2442 		 * beacon received, SW merge happend but HW TSF not yet updated.
2443 		 * not possible to reconfigure timers yet, but next time we
2444 		 * receive a beacon with the same BSSID, the hardware will
2445 		 * automatically update the TSF and then we need to reconfigure
2446 		 * the timers.
2447 		 */
2448 		ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON,
2449 			"need to wait for HW TSF sync\n");
2450 		return;
2451 	} else {
2452 		/*
2453 		 * most important case for beacon synchronization between STA.
2454 		 *
2455 		 * beacon received and HW TSF has been already updated by HW.
2456 		 * update next TBTT based on the TSF of the beacon, but make
2457 		 * sure it is ahead of our local TSF timer.
2458 		 */
2459 		nexttbtt = bc_tu + roundup(hw_tu + FUDGE - bc_tu, intval);
2460 	}
2461 #undef FUDGE
2462 
2463 	sc->nexttbtt = nexttbtt;
2464 
2465 	intval |= AR5K_BEACON_ENA;
2466 	ath5k_hw_init_beacon(ah, nexttbtt, intval);
2467 
2468 	/*
2469 	 * debugging output last in order to preserve the time critical aspect
2470 	 * of this function
2471 	 */
2472 	if (bc_tsf == -1)
2473 		ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON,
2474 			"reconfigured timers based on HW TSF\n");
2475 	else if (bc_tsf == 0)
2476 		ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON,
2477 			"reset HW TSF and timers\n");
2478 	else
2479 		ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON,
2480 			"updated timers based on beacon TSF\n");
2481 
2482 	ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON,
2483 			  "bc_tsf %llx hw_tsf %llx bc_tu %u hw_tu %u nexttbtt %u\n",
2484 			  (unsigned long long) bc_tsf,
2485 			  (unsigned long long) hw_tsf, bc_tu, hw_tu, nexttbtt);
2486 	ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, "intval %u %s %s\n",
2487 		intval & AR5K_BEACON_PERIOD,
2488 		intval & AR5K_BEACON_ENA ? "AR5K_BEACON_ENA" : "",
2489 		intval & AR5K_BEACON_RESET_TSF ? "AR5K_BEACON_RESET_TSF" : "");
2490 }
2491 
2492 
2493 /**
2494  * ath5k_beacon_config - Configure the beacon queues and interrupts
2495  *
2496  * @sc: struct ath5k_softc pointer we are operating on
2497  *
2498  * In IBSS mode we use a self-linked tx descriptor if possible. We enable SWBA
2499  * interrupts to detect TSF updates only.
2500  */
2501 static void
2502 ath5k_beacon_config(struct ath5k_softc *sc)
2503 {
2504 	struct ath5k_hw *ah = sc->ah;
2505 	unsigned long flags;
2506 
2507 	spin_lock_irqsave(&sc->block, flags);
2508 	sc->bmisscount = 0;
2509 	sc->imask &= ~(AR5K_INT_BMISS | AR5K_INT_SWBA);
2510 
2511 	if (sc->enable_beacon) {
2512 		/*
2513 		 * In IBSS mode we use a self-linked tx descriptor and let the
2514 		 * hardware send the beacons automatically. We have to load it
2515 		 * only once here.
2516 		 * We use the SWBA interrupt only to keep track of the beacon
2517 		 * timers in order to detect automatic TSF updates.
2518 		 */
2519 		ath5k_beaconq_config(sc);
2520 
2521 		sc->imask |= AR5K_INT_SWBA;
2522 
2523 		if (sc->opmode == NL80211_IFTYPE_ADHOC) {
2524 			if (ath5k_hw_hasveol(ah))
2525 				ath5k_beacon_send(sc);
2526 		} else
2527 			ath5k_beacon_update_timers(sc, -1);
2528 	} else {
2529 		ath5k_hw_stop_tx_dma(sc->ah, sc->bhalq);
2530 	}
2531 
2532 	ath5k_hw_set_imr(ah, sc->imask);
2533 	mmiowb();
2534 	spin_unlock_irqrestore(&sc->block, flags);
2535 }
2536 
2537 static void ath5k_tasklet_beacon(unsigned long data)
2538 {
2539 	struct ath5k_softc *sc = (struct ath5k_softc *) data;
2540 
2541 	/*
2542 	 * Software beacon alert--time to send a beacon.
2543 	 *
2544 	 * In IBSS mode we use this interrupt just to
2545 	 * keep track of the next TBTT (target beacon
2546 	 * transmission time) in order to detect wether
2547 	 * automatic TSF updates happened.
2548 	 */
2549 	if (sc->opmode == NL80211_IFTYPE_ADHOC) {
2550 		/* XXX: only if VEOL suppported */
2551 		u64 tsf = ath5k_hw_get_tsf64(sc->ah);
2552 		sc->nexttbtt += sc->bintval;
2553 		ATH5K_DBG(sc, ATH5K_DEBUG_BEACON,
2554 				"SWBA nexttbtt: %x hw_tu: %x "
2555 				"TSF: %llx\n",
2556 				sc->nexttbtt,
2557 				TSF_TO_TU(tsf),
2558 				(unsigned long long) tsf);
2559 	} else {
2560 		spin_lock(&sc->block);
2561 		ath5k_beacon_send(sc);
2562 		spin_unlock(&sc->block);
2563 	}
2564 }
2565 
2566 
2567 /********************\
2568 * Interrupt handling *
2569 \********************/
2570 
2571 static int
2572 ath5k_init(struct ath5k_softc *sc)
2573 {
2574 	struct ath5k_hw *ah = sc->ah;
2575 	int ret, i;
2576 
2577 	mutex_lock(&sc->lock);
2578 
2579 	ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "mode %d\n", sc->opmode);
2580 
2581 	/*
2582 	 * Stop anything previously setup.  This is safe
2583 	 * no matter this is the first time through or not.
2584 	 */
2585 	ath5k_stop_locked(sc);
2586 
2587 	/*
2588 	 * The basic interface to setting the hardware in a good
2589 	 * state is ``reset''.  On return the hardware is known to
2590 	 * be powered up and with interrupts disabled.  This must
2591 	 * be followed by initialization of the appropriate bits
2592 	 * and then setup of the interrupt mask.
2593 	 */
2594 	sc->curchan = sc->hw->conf.channel;
2595 	sc->curband = &sc->sbands[sc->curchan->band];
2596 	sc->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL |
2597 		AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL |
2598 		AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_MIB;
2599 
2600 	ret = ath5k_reset(sc, NULL);
2601 	if (ret)
2602 		goto done;
2603 
2604 	ath5k_rfkill_hw_start(ah);
2605 
2606 	/*
2607 	 * Reset the key cache since some parts do not reset the
2608 	 * contents on initial power up or resume from suspend.
2609 	 */
2610 	for (i = 0; i < AR5K_KEYTABLE_SIZE; i++)
2611 		ath5k_hw_reset_key(ah, i);
2612 
2613 	ath5k_hw_set_ack_bitrate_high(ah, true);
2614 	ret = 0;
2615 done:
2616 	mmiowb();
2617 	mutex_unlock(&sc->lock);
2618 	return ret;
2619 }
2620 
2621 static int
2622 ath5k_stop_locked(struct ath5k_softc *sc)
2623 {
2624 	struct ath5k_hw *ah = sc->ah;
2625 
2626 	ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "invalid %u\n",
2627 			test_bit(ATH_STAT_INVALID, sc->status));
2628 
2629 	/*
2630 	 * Shutdown the hardware and driver:
2631 	 *    stop output from above
2632 	 *    disable interrupts
2633 	 *    turn off timers
2634 	 *    turn off the radio
2635 	 *    clear transmit machinery
2636 	 *    clear receive machinery
2637 	 *    drain and release tx queues
2638 	 *    reclaim beacon resources
2639 	 *    power down hardware
2640 	 *
2641 	 * Note that some of this work is not possible if the
2642 	 * hardware is gone (invalid).
2643 	 */
2644 	ieee80211_stop_queues(sc->hw);
2645 
2646 	if (!test_bit(ATH_STAT_INVALID, sc->status)) {
2647 		ath5k_led_off(sc);
2648 		ath5k_hw_set_imr(ah, 0);
2649 		synchronize_irq(sc->pdev->irq);
2650 	}
2651 	ath5k_txq_cleanup(sc);
2652 	if (!test_bit(ATH_STAT_INVALID, sc->status)) {
2653 		ath5k_rx_stop(sc);
2654 		ath5k_hw_phy_disable(ah);
2655 	}
2656 
2657 	return 0;
2658 }
2659 
2660 static void stop_tasklets(struct ath5k_softc *sc)
2661 {
2662 	tasklet_kill(&sc->rxtq);
2663 	tasklet_kill(&sc->txtq);
2664 	tasklet_kill(&sc->calib);
2665 	tasklet_kill(&sc->beacontq);
2666 	tasklet_kill(&sc->ani_tasklet);
2667 }
2668 
2669 /*
2670  * Stop the device, grabbing the top-level lock to protect
2671  * against concurrent entry through ath5k_init (which can happen
2672  * if another thread does a system call and the thread doing the
2673  * stop is preempted).
2674  */
2675 static int
2676 ath5k_stop_hw(struct ath5k_softc *sc)
2677 {
2678 	int ret;
2679 
2680 	mutex_lock(&sc->lock);
2681 	ret = ath5k_stop_locked(sc);
2682 	if (ret == 0 && !test_bit(ATH_STAT_INVALID, sc->status)) {
2683 		/*
2684 		 * Don't set the card in full sleep mode!
2685 		 *
2686 		 * a) When the device is in this state it must be carefully
2687 		 * woken up or references to registers in the PCI clock
2688 		 * domain may freeze the bus (and system).  This varies
2689 		 * by chip and is mostly an issue with newer parts
2690 		 * (madwifi sources mentioned srev >= 0x78) that go to
2691 		 * sleep more quickly.
2692 		 *
2693 		 * b) On older chips full sleep results a weird behaviour
2694 		 * during wakeup. I tested various cards with srev < 0x78
2695 		 * and they don't wake up after module reload, a second
2696 		 * module reload is needed to bring the card up again.
2697 		 *
2698 		 * Until we figure out what's going on don't enable
2699 		 * full chip reset on any chip (this is what Legacy HAL
2700 		 * and Sam's HAL do anyway). Instead Perform a full reset
2701 		 * on the device (same as initial state after attach) and
2702 		 * leave it idle (keep MAC/BB on warm reset) */
2703 		ret = ath5k_hw_on_hold(sc->ah);
2704 
2705 		ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
2706 				"putting device to sleep\n");
2707 	}
2708 	ath5k_txbuf_free_skb(sc, sc->bbuf);
2709 
2710 	mmiowb();
2711 	mutex_unlock(&sc->lock);
2712 
2713 	stop_tasklets(sc);
2714 
2715 	ath5k_rfkill_hw_stop(sc->ah);
2716 
2717 	return ret;
2718 }
2719 
2720 static void
2721 ath5k_intr_calibration_poll(struct ath5k_hw *ah)
2722 {
2723 	if (time_is_before_eq_jiffies(ah->ah_cal_next_ani) &&
2724 	    !(ah->ah_cal_mask & AR5K_CALIBRATION_FULL)) {
2725 		/* run ANI only when full calibration is not active */
2726 		ah->ah_cal_next_ani = jiffies +
2727 			msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_ANI);
2728 		tasklet_schedule(&ah->ah_sc->ani_tasklet);
2729 
2730 	} else if (time_is_before_eq_jiffies(ah->ah_cal_next_full)) {
2731 		ah->ah_cal_next_full = jiffies +
2732 			msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL);
2733 		tasklet_schedule(&ah->ah_sc->calib);
2734 	}
2735 	/* we could use SWI to generate enough interrupts to meet our
2736 	 * calibration interval requirements, if necessary:
2737 	 * AR5K_REG_ENABLE_BITS(ah, AR5K_CR, AR5K_CR_SWI); */
2738 }
2739 
2740 static irqreturn_t
2741 ath5k_intr(int irq, void *dev_id)
2742 {
2743 	struct ath5k_softc *sc = dev_id;
2744 	struct ath5k_hw *ah = sc->ah;
2745 	enum ath5k_int status;
2746 	unsigned int counter = 1000;
2747 
2748 	if (unlikely(test_bit(ATH_STAT_INVALID, sc->status) ||
2749 				!ath5k_hw_is_intr_pending(ah)))
2750 		return IRQ_NONE;
2751 
2752 	do {
2753 		ath5k_hw_get_isr(ah, &status);		/* NB: clears IRQ too */
2754 		ATH5K_DBG(sc, ATH5K_DEBUG_INTR, "status 0x%x/0x%x\n",
2755 				status, sc->imask);
2756 		if (unlikely(status & AR5K_INT_FATAL)) {
2757 			/*
2758 			 * Fatal errors are unrecoverable.
2759 			 * Typically these are caused by DMA errors.
2760 			 */
2761 			ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
2762 				  "fatal int, resetting\n");
2763 			ieee80211_queue_work(sc->hw, &sc->reset_work);
2764 		} else if (unlikely(status & AR5K_INT_RXORN)) {
2765 			/*
2766 			 * Receive buffers are full. Either the bus is busy or
2767 			 * the CPU is not fast enough to process all received
2768 			 * frames.
2769 			 * Older chipsets need a reset to come out of this
2770 			 * condition, but we treat it as RX for newer chips.
2771 			 * We don't know exactly which versions need a reset -
2772 			 * this guess is copied from the HAL.
2773 			 */
2774 			sc->stats.rxorn_intr++;
2775 			if (ah->ah_mac_srev < AR5K_SREV_AR5212) {
2776 				ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
2777 					  "rx overrun, resetting\n");
2778 				ieee80211_queue_work(sc->hw, &sc->reset_work);
2779 			}
2780 			else
2781 				tasklet_schedule(&sc->rxtq);
2782 		} else {
2783 			if (status & AR5K_INT_SWBA) {
2784 				tasklet_hi_schedule(&sc->beacontq);
2785 			}
2786 			if (status & AR5K_INT_RXEOL) {
2787 				/*
2788 				* NB: the hardware should re-read the link when
2789 				*     RXE bit is written, but it doesn't work at
2790 				*     least on older hardware revs.
2791 				*/
2792 				sc->stats.rxeol_intr++;
2793 			}
2794 			if (status & AR5K_INT_TXURN) {
2795 				/* bump tx trigger level */
2796 				ath5k_hw_update_tx_triglevel(ah, true);
2797 			}
2798 			if (status & (AR5K_INT_RXOK | AR5K_INT_RXERR))
2799 				tasklet_schedule(&sc->rxtq);
2800 			if (status & (AR5K_INT_TXOK | AR5K_INT_TXDESC
2801 					| AR5K_INT_TXERR | AR5K_INT_TXEOL))
2802 				tasklet_schedule(&sc->txtq);
2803 			if (status & AR5K_INT_BMISS) {
2804 				/* TODO */
2805 			}
2806 			if (status & AR5K_INT_MIB) {
2807 				sc->stats.mib_intr++;
2808 				ath5k_hw_update_mib_counters(ah);
2809 				ath5k_ani_mib_intr(ah);
2810 			}
2811 			if (status & AR5K_INT_GPIO)
2812 				tasklet_schedule(&sc->rf_kill.toggleq);
2813 
2814 		}
2815 	} while (ath5k_hw_is_intr_pending(ah) && --counter > 0);
2816 
2817 	if (unlikely(!counter))
2818 		ATH5K_WARN(sc, "too many interrupts, giving up for now\n");
2819 
2820 	ath5k_intr_calibration_poll(ah);
2821 
2822 	return IRQ_HANDLED;
2823 }
2824 
2825 /*
2826  * Periodically recalibrate the PHY to account
2827  * for temperature/environment changes.
2828  */
2829 static void
2830 ath5k_tasklet_calibrate(unsigned long data)
2831 {
2832 	struct ath5k_softc *sc = (void *)data;
2833 	struct ath5k_hw *ah = sc->ah;
2834 
2835 	/* Only full calibration for now */
2836 	ah->ah_cal_mask |= AR5K_CALIBRATION_FULL;
2837 
2838 	ATH5K_DBG(sc, ATH5K_DEBUG_CALIBRATE, "channel %u/%x\n",
2839 		ieee80211_frequency_to_channel(sc->curchan->center_freq),
2840 		sc->curchan->hw_value);
2841 
2842 	if (ath5k_hw_gainf_calibrate(ah) == AR5K_RFGAIN_NEED_CHANGE) {
2843 		/*
2844 		 * Rfgain is out of bounds, reset the chip
2845 		 * to load new gain values.
2846 		 */
2847 		ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "calibration, resetting\n");
2848 		ieee80211_queue_work(sc->hw, &sc->reset_work);
2849 	}
2850 	if (ath5k_hw_phy_calibrate(ah, sc->curchan))
2851 		ATH5K_ERR(sc, "calibration of channel %u failed\n",
2852 			ieee80211_frequency_to_channel(
2853 				sc->curchan->center_freq));
2854 
2855 	/* Noise floor calibration interrupts rx/tx path while I/Q calibration
2856 	 * doesn't. We stop the queues so that calibration doesn't interfere
2857 	 * with TX and don't run it as often */
2858 	if (time_is_before_eq_jiffies(ah->ah_cal_next_nf)) {
2859 		ah->ah_cal_next_nf = jiffies +
2860 			msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_NF);
2861 		ieee80211_stop_queues(sc->hw);
2862 		ath5k_hw_update_noise_floor(ah);
2863 		ieee80211_wake_queues(sc->hw);
2864 	}
2865 
2866 	ah->ah_cal_mask &= ~AR5K_CALIBRATION_FULL;
2867 }
2868 
2869 
2870 static void
2871 ath5k_tasklet_ani(unsigned long data)
2872 {
2873 	struct ath5k_softc *sc = (void *)data;
2874 	struct ath5k_hw *ah = sc->ah;
2875 
2876 	ah->ah_cal_mask |= AR5K_CALIBRATION_ANI;
2877 	ath5k_ani_calibration(ah);
2878 	ah->ah_cal_mask &= ~AR5K_CALIBRATION_ANI;
2879 }
2880 
2881 
2882 /********************\
2883 * Mac80211 functions *
2884 \********************/
2885 
2886 static int
2887 ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2888 {
2889 	struct ath5k_softc *sc = hw->priv;
2890 
2891 	return ath5k_tx_queue(hw, skb, sc->txq);
2892 }
2893 
2894 static int ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
2895 			  struct ath5k_txq *txq)
2896 {
2897 	struct ath5k_softc *sc = hw->priv;
2898 	struct ath5k_buf *bf;
2899 	unsigned long flags;
2900 	int padsize;
2901 
2902 	ath5k_debug_dump_skb(sc, skb, "TX  ", 1);
2903 
2904 	if (sc->opmode == NL80211_IFTYPE_MONITOR)
2905 		ATH5K_DBG(sc, ATH5K_DEBUG_XMIT, "tx in monitor (scan?)\n");
2906 
2907 	/*
2908 	 * the hardware expects the header padded to 4 byte boundaries
2909 	 * if this is not the case we add the padding after the header
2910 	 */
2911 	padsize = ath5k_add_padding(skb);
2912 	if (padsize < 0) {
2913 		ATH5K_ERR(sc, "tx hdrlen not %%4: not enough"
2914 			  " headroom to pad");
2915 		goto drop_packet;
2916 	}
2917 
2918 	spin_lock_irqsave(&sc->txbuflock, flags);
2919 	if (list_empty(&sc->txbuf)) {
2920 		ATH5K_ERR(sc, "no further txbuf available, dropping packet\n");
2921 		spin_unlock_irqrestore(&sc->txbuflock, flags);
2922 		ieee80211_stop_queue(hw, skb_get_queue_mapping(skb));
2923 		goto drop_packet;
2924 	}
2925 	bf = list_first_entry(&sc->txbuf, struct ath5k_buf, list);
2926 	list_del(&bf->list);
2927 	sc->txbuf_len--;
2928 	if (list_empty(&sc->txbuf))
2929 		ieee80211_stop_queues(hw);
2930 	spin_unlock_irqrestore(&sc->txbuflock, flags);
2931 
2932 	bf->skb = skb;
2933 
2934 	if (ath5k_txbuf_setup(sc, bf, txq, padsize)) {
2935 		bf->skb = NULL;
2936 		spin_lock_irqsave(&sc->txbuflock, flags);
2937 		list_add_tail(&bf->list, &sc->txbuf);
2938 		sc->txbuf_len++;
2939 		spin_unlock_irqrestore(&sc->txbuflock, flags);
2940 		goto drop_packet;
2941 	}
2942 	return NETDEV_TX_OK;
2943 
2944 drop_packet:
2945 	dev_kfree_skb_any(skb);
2946 	return NETDEV_TX_OK;
2947 }
2948 
2949 /*
2950  * Reset the hardware.  If chan is not NULL, then also pause rx/tx
2951  * and change to the given channel.
2952  *
2953  * This should be called with sc->lock.
2954  */
2955 static int
2956 ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan)
2957 {
2958 	struct ath5k_hw *ah = sc->ah;
2959 	int ret;
2960 
2961 	ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "resetting\n");
2962 
2963 	ath5k_hw_set_imr(ah, 0);
2964 	synchronize_irq(sc->pdev->irq);
2965 	stop_tasklets(sc);
2966 
2967 	if (chan) {
2968 		ath5k_txq_cleanup(sc);
2969 		ath5k_rx_stop(sc);
2970 
2971 		sc->curchan = chan;
2972 		sc->curband = &sc->sbands[chan->band];
2973 	}
2974 	ret = ath5k_hw_reset(ah, sc->opmode, sc->curchan, chan != NULL);
2975 	if (ret) {
2976 		ATH5K_ERR(sc, "can't reset hardware (%d)\n", ret);
2977 		goto err;
2978 	}
2979 
2980 	ret = ath5k_rx_start(sc);
2981 	if (ret) {
2982 		ATH5K_ERR(sc, "can't start recv logic\n");
2983 		goto err;
2984 	}
2985 
2986 	ath5k_ani_init(ah, ah->ah_sc->ani_state.ani_mode);
2987 
2988 	ah->ah_cal_next_full = jiffies;
2989 	ah->ah_cal_next_ani = jiffies;
2990 	ah->ah_cal_next_nf = jiffies;
2991 
2992 	/*
2993 	 * Change channels and update the h/w rate map if we're switching;
2994 	 * e.g. 11a to 11b/g.
2995 	 *
2996 	 * We may be doing a reset in response to an ioctl that changes the
2997 	 * channel so update any state that might change as a result.
2998 	 *
2999 	 * XXX needed?
3000 	 */
3001 /*	ath5k_chan_change(sc, c); */
3002 
3003 	ath5k_beacon_config(sc);
3004 	/* intrs are enabled by ath5k_beacon_config */
3005 
3006 	ieee80211_wake_queues(sc->hw);
3007 
3008 	return 0;
3009 err:
3010 	return ret;
3011 }
3012 
3013 static void ath5k_reset_work(struct work_struct *work)
3014 {
3015 	struct ath5k_softc *sc = container_of(work, struct ath5k_softc,
3016 		reset_work);
3017 
3018 	mutex_lock(&sc->lock);
3019 	ath5k_reset(sc, sc->curchan);
3020 	mutex_unlock(&sc->lock);
3021 }
3022 
3023 static int ath5k_start(struct ieee80211_hw *hw)
3024 {
3025 	return ath5k_init(hw->priv);
3026 }
3027 
3028 static void ath5k_stop(struct ieee80211_hw *hw)
3029 {
3030 	ath5k_stop_hw(hw->priv);
3031 }
3032 
3033 static int ath5k_add_interface(struct ieee80211_hw *hw,
3034 		struct ieee80211_vif *vif)
3035 {
3036 	struct ath5k_softc *sc = hw->priv;
3037 	int ret;
3038 
3039 	mutex_lock(&sc->lock);
3040 	if (sc->vif) {
3041 		ret = 0;
3042 		goto end;
3043 	}
3044 
3045 	sc->vif = vif;
3046 
3047 	switch (vif->type) {
3048 	case NL80211_IFTYPE_AP:
3049 	case NL80211_IFTYPE_STATION:
3050 	case NL80211_IFTYPE_ADHOC:
3051 	case NL80211_IFTYPE_MESH_POINT:
3052 	case NL80211_IFTYPE_MONITOR:
3053 		sc->opmode = vif->type;
3054 		break;
3055 	default:
3056 		ret = -EOPNOTSUPP;
3057 		goto end;
3058 	}
3059 
3060 	ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "add interface mode %d\n", sc->opmode);
3061 
3062 	ath5k_hw_set_lladdr(sc->ah, vif->addr);
3063 	ath5k_mode_setup(sc);
3064 
3065 	ret = 0;
3066 end:
3067 	mutex_unlock(&sc->lock);
3068 	return ret;
3069 }
3070 
3071 static void
3072 ath5k_remove_interface(struct ieee80211_hw *hw,
3073 			struct ieee80211_vif *vif)
3074 {
3075 	struct ath5k_softc *sc = hw->priv;
3076 	u8 mac[ETH_ALEN] = {};
3077 
3078 	mutex_lock(&sc->lock);
3079 	if (sc->vif != vif)
3080 		goto end;
3081 
3082 	ath5k_hw_set_lladdr(sc->ah, mac);
3083 	sc->vif = NULL;
3084 end:
3085 	mutex_unlock(&sc->lock);
3086 }
3087 
3088 /*
3089  * TODO: Phy disable/diversity etc
3090  */
3091 static int
3092 ath5k_config(struct ieee80211_hw *hw, u32 changed)
3093 {
3094 	struct ath5k_softc *sc = hw->priv;
3095 	struct ath5k_hw *ah = sc->ah;
3096 	struct ieee80211_conf *conf = &hw->conf;
3097 	int ret = 0;
3098 
3099 	mutex_lock(&sc->lock);
3100 
3101 	if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
3102 		ret = ath5k_chan_set(sc, conf->channel);
3103 		if (ret < 0)
3104 			goto unlock;
3105 	}
3106 
3107 	if ((changed & IEEE80211_CONF_CHANGE_POWER) &&
3108 	(sc->power_level != conf->power_level)) {
3109 		sc->power_level = conf->power_level;
3110 
3111 		/* Half dB steps */
3112 		ath5k_hw_set_txpower_limit(ah, (conf->power_level * 2));
3113 	}
3114 
3115 	/* TODO:
3116 	 * 1) Move this on config_interface and handle each case
3117 	 * separately eg. when we have only one STA vif, use
3118 	 * AR5K_ANTMODE_SINGLE_AP
3119 	 *
3120 	 * 2) Allow the user to change antenna mode eg. when only
3121 	 * one antenna is present
3122 	 *
3123 	 * 3) Allow the user to set default/tx antenna when possible
3124 	 *
3125 	 * 4) Default mode should handle 90% of the cases, together
3126 	 * with fixed a/b and single AP modes we should be able to
3127 	 * handle 99%. Sectored modes are extreme cases and i still
3128 	 * haven't found a usage for them. If we decide to support them,
3129 	 * then we must allow the user to set how many tx antennas we
3130 	 * have available
3131 	 */
3132 	ath5k_hw_set_antenna_mode(ah, ah->ah_ant_mode);
3133 
3134 unlock:
3135 	mutex_unlock(&sc->lock);
3136 	return ret;
3137 }
3138 
3139 static u64 ath5k_prepare_multicast(struct ieee80211_hw *hw,
3140 				   struct netdev_hw_addr_list *mc_list)
3141 {
3142 	u32 mfilt[2], val;
3143 	u8 pos;
3144 	struct netdev_hw_addr *ha;
3145 
3146 	mfilt[0] = 0;
3147 	mfilt[1] = 1;
3148 
3149 	netdev_hw_addr_list_for_each(ha, mc_list) {
3150 		/* calculate XOR of eight 6-bit values */
3151 		val = get_unaligned_le32(ha->addr + 0);
3152 		pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
3153 		val = get_unaligned_le32(ha->addr + 3);
3154 		pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
3155 		pos &= 0x3f;
3156 		mfilt[pos / 32] |= (1 << (pos % 32));
3157 		/* XXX: we might be able to just do this instead,
3158 		* but not sure, needs testing, if we do use this we'd
3159 		* neet to inform below to not reset the mcast */
3160 		/* ath5k_hw_set_mcast_filterindex(ah,
3161 		 *      ha->addr[5]); */
3162 	}
3163 
3164 	return ((u64)(mfilt[1]) << 32) | mfilt[0];
3165 }
3166 
3167 #define SUPPORTED_FIF_FLAGS \
3168 	FIF_PROMISC_IN_BSS |  FIF_ALLMULTI | FIF_FCSFAIL | \
3169 	FIF_PLCPFAIL | FIF_CONTROL | FIF_OTHER_BSS | \
3170 	FIF_BCN_PRBRESP_PROMISC
3171 /*
3172  * o always accept unicast, broadcast, and multicast traffic
3173  * o multicast traffic for all BSSIDs will be enabled if mac80211
3174  *   says it should be
3175  * o maintain current state of phy ofdm or phy cck error reception.
3176  *   If the hardware detects any of these type of errors then
3177  *   ath5k_hw_get_rx_filter() will pass to us the respective
3178  *   hardware filters to be able to receive these type of frames.
3179  * o probe request frames are accepted only when operating in
3180  *   hostap, adhoc, or monitor modes
3181  * o enable promiscuous mode according to the interface state
3182  * o accept beacons:
3183  *   - when operating in adhoc mode so the 802.11 layer creates
3184  *     node table entries for peers,
3185  *   - when operating in station mode for collecting rssi data when
3186  *     the station is otherwise quiet, or
3187  *   - when scanning
3188  */
3189 static void ath5k_configure_filter(struct ieee80211_hw *hw,
3190 		unsigned int changed_flags,
3191 		unsigned int *new_flags,
3192 		u64 multicast)
3193 {
3194 	struct ath5k_softc *sc = hw->priv;
3195 	struct ath5k_hw *ah = sc->ah;
3196 	u32 mfilt[2], rfilt;
3197 
3198 	mutex_lock(&sc->lock);
3199 
3200 	mfilt[0] = multicast;
3201 	mfilt[1] = multicast >> 32;
3202 
3203 	/* Only deal with supported flags */
3204 	changed_flags &= SUPPORTED_FIF_FLAGS;
3205 	*new_flags &= SUPPORTED_FIF_FLAGS;
3206 
3207 	/* If HW detects any phy or radar errors, leave those filters on.
3208 	 * Also, always enable Unicast, Broadcasts and Multicast
3209 	 * XXX: move unicast, bssid broadcasts and multicast to mac80211 */
3210 	rfilt = (ath5k_hw_get_rx_filter(ah) & (AR5K_RX_FILTER_PHYERR)) |
3211 		(AR5K_RX_FILTER_UCAST | AR5K_RX_FILTER_BCAST |
3212 		AR5K_RX_FILTER_MCAST);
3213 
3214 	if (changed_flags & (FIF_PROMISC_IN_BSS | FIF_OTHER_BSS)) {
3215 		if (*new_flags & FIF_PROMISC_IN_BSS) {
3216 			__set_bit(ATH_STAT_PROMISC, sc->status);
3217 		} else {
3218 			__clear_bit(ATH_STAT_PROMISC, sc->status);
3219 		}
3220 	}
3221 
3222 	if (test_bit(ATH_STAT_PROMISC, sc->status))
3223 		rfilt |= AR5K_RX_FILTER_PROM;
3224 
3225 	/* Note, AR5K_RX_FILTER_MCAST is already enabled */
3226 	if (*new_flags & FIF_ALLMULTI) {
3227 		mfilt[0] =  ~0;
3228 		mfilt[1] =  ~0;
3229 	}
3230 
3231 	/* This is the best we can do */
3232 	if (*new_flags & (FIF_FCSFAIL | FIF_PLCPFAIL))
3233 		rfilt |= AR5K_RX_FILTER_PHYERR;
3234 
3235 	/* FIF_BCN_PRBRESP_PROMISC really means to enable beacons
3236 	* and probes for any BSSID, this needs testing */
3237 	if (*new_flags & FIF_BCN_PRBRESP_PROMISC)
3238 		rfilt |= AR5K_RX_FILTER_BEACON | AR5K_RX_FILTER_PROBEREQ;
3239 
3240 	/* FIF_CONTROL doc says that if FIF_PROMISC_IN_BSS is not
3241 	 * set we should only pass on control frames for this
3242 	 * station. This needs testing. I believe right now this
3243 	 * enables *all* control frames, which is OK.. but
3244 	 * but we should see if we can improve on granularity */
3245 	if (*new_flags & FIF_CONTROL)
3246 		rfilt |= AR5K_RX_FILTER_CONTROL;
3247 
3248 	/* Additional settings per mode -- this is per ath5k */
3249 
3250 	/* XXX move these to mac80211, and add a beacon IFF flag to mac80211 */
3251 
3252 	switch (sc->opmode) {
3253 	case NL80211_IFTYPE_MESH_POINT:
3254 	case NL80211_IFTYPE_MONITOR:
3255 		rfilt |= AR5K_RX_FILTER_CONTROL |
3256 			 AR5K_RX_FILTER_BEACON |
3257 			 AR5K_RX_FILTER_PROBEREQ |
3258 			 AR5K_RX_FILTER_PROM;
3259 		break;
3260 	case NL80211_IFTYPE_AP:
3261 	case NL80211_IFTYPE_ADHOC:
3262 		rfilt |= AR5K_RX_FILTER_PROBEREQ |
3263 			 AR5K_RX_FILTER_BEACON;
3264 		break;
3265 	case NL80211_IFTYPE_STATION:
3266 		if (sc->assoc)
3267 			rfilt |= AR5K_RX_FILTER_BEACON;
3268 	default:
3269 		break;
3270 	}
3271 
3272 	/* Set filters */
3273 	ath5k_hw_set_rx_filter(ah, rfilt);
3274 
3275 	/* Set multicast bits */
3276 	ath5k_hw_set_mcast_filter(ah, mfilt[0], mfilt[1]);
3277 	/* Set the cached hw filter flags, this will alter actually
3278 	 * be set in HW */
3279 	sc->filter_flags = rfilt;
3280 
3281 	mutex_unlock(&sc->lock);
3282 }
3283 
3284 static int
3285 ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3286 	      struct ieee80211_vif *vif, struct ieee80211_sta *sta,
3287 	      struct ieee80211_key_conf *key)
3288 {
3289 	struct ath5k_softc *sc = hw->priv;
3290 	struct ath5k_hw *ah = sc->ah;
3291 	struct ath_common *common = ath5k_hw_common(ah);
3292 	int ret = 0;
3293 
3294 	if (modparam_nohwcrypt)
3295 		return -EOPNOTSUPP;
3296 
3297 	if (sc->opmode == NL80211_IFTYPE_AP)
3298 		return -EOPNOTSUPP;
3299 
3300 	switch (key->alg) {
3301 	case ALG_WEP:
3302 	case ALG_TKIP:
3303 		break;
3304 	case ALG_CCMP:
3305 		if (sc->ah->ah_aes_support)
3306 			break;
3307 
3308 		return -EOPNOTSUPP;
3309 	default:
3310 		WARN_ON(1);
3311 		return -EINVAL;
3312 	}
3313 
3314 	mutex_lock(&sc->lock);
3315 
3316 	switch (cmd) {
3317 	case SET_KEY:
3318 		ret = ath5k_hw_set_key(sc->ah, key->keyidx, key,
3319 				       sta ? sta->addr : NULL);
3320 		if (ret) {
3321 			ATH5K_ERR(sc, "can't set the key\n");
3322 			goto unlock;
3323 		}
3324 		__set_bit(key->keyidx, common->keymap);
3325 		key->hw_key_idx = key->keyidx;
3326 		key->flags |= (IEEE80211_KEY_FLAG_GENERATE_IV |
3327 			       IEEE80211_KEY_FLAG_GENERATE_MMIC);
3328 		break;
3329 	case DISABLE_KEY:
3330 		ath5k_hw_reset_key(sc->ah, key->keyidx);
3331 		__clear_bit(key->keyidx, common->keymap);
3332 		break;
3333 	default:
3334 		ret = -EINVAL;
3335 		goto unlock;
3336 	}
3337 
3338 unlock:
3339 	mmiowb();
3340 	mutex_unlock(&sc->lock);
3341 	return ret;
3342 }
3343 
3344 static int
3345 ath5k_get_stats(struct ieee80211_hw *hw,
3346 		struct ieee80211_low_level_stats *stats)
3347 {
3348 	struct ath5k_softc *sc = hw->priv;
3349 
3350 	/* Force update */
3351 	ath5k_hw_update_mib_counters(sc->ah);
3352 
3353 	stats->dot11ACKFailureCount = sc->stats.ack_fail;
3354 	stats->dot11RTSFailureCount = sc->stats.rts_fail;
3355 	stats->dot11RTSSuccessCount = sc->stats.rts_ok;
3356 	stats->dot11FCSErrorCount = sc->stats.fcs_error;
3357 
3358 	return 0;
3359 }
3360 
3361 static int ath5k_get_survey(struct ieee80211_hw *hw, int idx,
3362 		struct survey_info *survey)
3363 {
3364 	struct ath5k_softc *sc = hw->priv;
3365 	struct ieee80211_conf *conf = &hw->conf;
3366 
3367 	 if (idx != 0)
3368 		return -ENOENT;
3369 
3370 	survey->channel = conf->channel;
3371 	survey->filled = SURVEY_INFO_NOISE_DBM;
3372 	survey->noise = sc->ah->ah_noise_floor;
3373 
3374 	return 0;
3375 }
3376 
3377 static u64
3378 ath5k_get_tsf(struct ieee80211_hw *hw)
3379 {
3380 	struct ath5k_softc *sc = hw->priv;
3381 
3382 	return ath5k_hw_get_tsf64(sc->ah);
3383 }
3384 
3385 static void
3386 ath5k_set_tsf(struct ieee80211_hw *hw, u64 tsf)
3387 {
3388 	struct ath5k_softc *sc = hw->priv;
3389 
3390 	ath5k_hw_set_tsf64(sc->ah, tsf);
3391 }
3392 
3393 static void
3394 ath5k_reset_tsf(struct ieee80211_hw *hw)
3395 {
3396 	struct ath5k_softc *sc = hw->priv;
3397 
3398 	/*
3399 	 * in IBSS mode we need to update the beacon timers too.
3400 	 * this will also reset the TSF if we call it with 0
3401 	 */
3402 	if (sc->opmode == NL80211_IFTYPE_ADHOC)
3403 		ath5k_beacon_update_timers(sc, 0);
3404 	else
3405 		ath5k_hw_reset_tsf(sc->ah);
3406 }
3407 
3408 /*
3409  * Updates the beacon that is sent by ath5k_beacon_send.  For adhoc,
3410  * this is called only once at config_bss time, for AP we do it every
3411  * SWBA interrupt so that the TIM will reflect buffered frames.
3412  *
3413  * Called with the beacon lock.
3414  */
3415 static int
3416 ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
3417 {
3418 	int ret;
3419 	struct ath5k_softc *sc = hw->priv;
3420 	struct sk_buff *skb;
3421 
3422 	if (WARN_ON(!vif)) {
3423 		ret = -EINVAL;
3424 		goto out;
3425 	}
3426 
3427 	skb = ieee80211_beacon_get(hw, vif);
3428 
3429 	if (!skb) {
3430 		ret = -ENOMEM;
3431 		goto out;
3432 	}
3433 
3434 	ath5k_debug_dump_skb(sc, skb, "BC  ", 1);
3435 
3436 	ath5k_txbuf_free_skb(sc, sc->bbuf);
3437 	sc->bbuf->skb = skb;
3438 	ret = ath5k_beacon_setup(sc, sc->bbuf);
3439 	if (ret)
3440 		sc->bbuf->skb = NULL;
3441 out:
3442 	return ret;
3443 }
3444 
3445 static void
3446 set_beacon_filter(struct ieee80211_hw *hw, bool enable)
3447 {
3448 	struct ath5k_softc *sc = hw->priv;
3449 	struct ath5k_hw *ah = sc->ah;
3450 	u32 rfilt;
3451 	rfilt = ath5k_hw_get_rx_filter(ah);
3452 	if (enable)
3453 		rfilt |= AR5K_RX_FILTER_BEACON;
3454 	else
3455 		rfilt &= ~AR5K_RX_FILTER_BEACON;
3456 	ath5k_hw_set_rx_filter(ah, rfilt);
3457 	sc->filter_flags = rfilt;
3458 }
3459 
3460 static void ath5k_bss_info_changed(struct ieee80211_hw *hw,
3461 				    struct ieee80211_vif *vif,
3462 				    struct ieee80211_bss_conf *bss_conf,
3463 				    u32 changes)
3464 {
3465 	struct ath5k_softc *sc = hw->priv;
3466 	struct ath5k_hw *ah = sc->ah;
3467 	struct ath_common *common = ath5k_hw_common(ah);
3468 	unsigned long flags;
3469 
3470 	mutex_lock(&sc->lock);
3471 	if (WARN_ON(sc->vif != vif))
3472 		goto unlock;
3473 
3474 	if (changes & BSS_CHANGED_BSSID) {
3475 		/* Cache for later use during resets */
3476 		memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
3477 		common->curaid = 0;
3478 		ath5k_hw_set_associd(ah);
3479 		mmiowb();
3480 	}
3481 
3482 	if (changes & BSS_CHANGED_BEACON_INT)
3483 		sc->bintval = bss_conf->beacon_int;
3484 
3485 	if (changes & BSS_CHANGED_ASSOC) {
3486 		sc->assoc = bss_conf->assoc;
3487 		if (sc->opmode == NL80211_IFTYPE_STATION)
3488 			set_beacon_filter(hw, sc->assoc);
3489 		ath5k_hw_set_ledstate(sc->ah, sc->assoc ?
3490 			AR5K_LED_ASSOC : AR5K_LED_INIT);
3491 		if (bss_conf->assoc) {
3492 			ATH5K_DBG(sc, ATH5K_DEBUG_ANY,
3493 				  "Bss Info ASSOC %d, bssid: %pM\n",
3494 				  bss_conf->aid, common->curbssid);
3495 			common->curaid = bss_conf->aid;
3496 			ath5k_hw_set_associd(ah);
3497 			/* Once ANI is available you would start it here */
3498 		}
3499 	}
3500 
3501 	if (changes & BSS_CHANGED_BEACON) {
3502 		spin_lock_irqsave(&sc->block, flags);
3503 		ath5k_beacon_update(hw, vif);
3504 		spin_unlock_irqrestore(&sc->block, flags);
3505 	}
3506 
3507 	if (changes & BSS_CHANGED_BEACON_ENABLED)
3508 		sc->enable_beacon = bss_conf->enable_beacon;
3509 
3510 	if (changes & (BSS_CHANGED_BEACON | BSS_CHANGED_BEACON_ENABLED |
3511 		       BSS_CHANGED_BEACON_INT))
3512 		ath5k_beacon_config(sc);
3513 
3514  unlock:
3515 	mutex_unlock(&sc->lock);
3516 }
3517 
3518 static void ath5k_sw_scan_start(struct ieee80211_hw *hw)
3519 {
3520 	struct ath5k_softc *sc = hw->priv;
3521 	if (!sc->assoc)
3522 		ath5k_hw_set_ledstate(sc->ah, AR5K_LED_SCAN);
3523 }
3524 
3525 static void ath5k_sw_scan_complete(struct ieee80211_hw *hw)
3526 {
3527 	struct ath5k_softc *sc = hw->priv;
3528 	ath5k_hw_set_ledstate(sc->ah, sc->assoc ?
3529 		AR5K_LED_ASSOC : AR5K_LED_INIT);
3530 }
3531 
3532 /**
3533  * ath5k_set_coverage_class - Set IEEE 802.11 coverage class
3534  *
3535  * @hw: struct ieee80211_hw pointer
3536  * @coverage_class: IEEE 802.11 coverage class number
3537  *
3538  * Mac80211 callback. Sets slot time, ACK timeout and CTS timeout for given
3539  * coverage class. The values are persistent, they are restored after device
3540  * reset.
3541  */
3542 static void ath5k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
3543 {
3544 	struct ath5k_softc *sc = hw->priv;
3545 
3546 	mutex_lock(&sc->lock);
3547 	ath5k_hw_set_coverage_class(sc->ah, coverage_class);
3548 	mutex_unlock(&sc->lock);
3549 }
3550