base.c (bc030d6cb9532877c1c5a3f5e7123344fa24a285) base.c (cd2c5486526b744fb505e18c9d981b35feaf283a)
1/*-
2 * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
3 * Copyright (c) 2004-2005 Atheros Communications, Inc.
4 * Copyright (c) 2006 Devicescape Software, Inc.
5 * Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com>
6 * Copyright (c) 2007 Luis R. Rodriguez <mcgrof@winlab.rutgers.edu>
7 *
8 * All rights reserved.

--- 33 unchanged lines hidden (view full) ---

42
43#include <linux/module.h>
44#include <linux/delay.h>
45#include <linux/hardirq.h>
46#include <linux/if.h>
47#include <linux/io.h>
48#include <linux/netdevice.h>
49#include <linux/cache.h>
1/*-
2 * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
3 * Copyright (c) 2004-2005 Atheros Communications, Inc.
4 * Copyright (c) 2006 Devicescape Software, Inc.
5 * Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com>
6 * Copyright (c) 2007 Luis R. Rodriguez <mcgrof@winlab.rutgers.edu>
7 *
8 * All rights reserved.

--- 33 unchanged lines hidden (view full) ---

42
43#include <linux/module.h>
44#include <linux/delay.h>
45#include <linux/hardirq.h>
46#include <linux/if.h>
47#include <linux/io.h>
48#include <linux/netdevice.h>
49#include <linux/cache.h>
50#include <linux/pci.h>
51#include <linux/pci-aspm.h>
52#include <linux/ethtool.h>
53#include <linux/uaccess.h>
54#include <linux/slab.h>
55#include <linux/etherdevice.h>
56
57#include <net/ieee80211_radiotap.h>
58
59#include <asm/unaligned.h>
60
61#include "base.h"
62#include "reg.h"
63#include "debug.h"
64#include "ani.h"
50#include <linux/ethtool.h>
51#include <linux/uaccess.h>
52#include <linux/slab.h>
53#include <linux/etherdevice.h>
54
55#include <net/ieee80211_radiotap.h>
56
57#include <asm/unaligned.h>
58
59#include "base.h"
60#include "reg.h"
61#include "debug.h"
62#include "ani.h"
65#include "../debug.h"
66
63
67static int modparam_nohwcrypt;
64int modparam_nohwcrypt;
68module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
69MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
70
71static int modparam_all_channels;
72module_param_named(all_channels, modparam_all_channels, bool, S_IRUGO);
73MODULE_PARM_DESC(all_channels, "Expose all channels the device can use.");
74
75/* Module info */
76MODULE_AUTHOR("Jiri Slaby");
77MODULE_AUTHOR("Nick Kossifidis");
78MODULE_DESCRIPTION("Support for 5xxx series of Atheros 802.11 wireless LAN cards.");
79MODULE_SUPPORTED_DEVICE("Atheros 5xxx WLAN cards");
80MODULE_LICENSE("Dual BSD/GPL");
65module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
66MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
67
68static int modparam_all_channels;
69module_param_named(all_channels, modparam_all_channels, bool, S_IRUGO);
70MODULE_PARM_DESC(all_channels, "Expose all channels the device can use.");
71
72/* Module info */
73MODULE_AUTHOR("Jiri Slaby");
74MODULE_AUTHOR("Nick Kossifidis");
75MODULE_DESCRIPTION("Support for 5xxx series of Atheros 802.11 wireless LAN cards.");
76MODULE_SUPPORTED_DEVICE("Atheros 5xxx WLAN cards");
77MODULE_LICENSE("Dual BSD/GPL");
81MODULE_VERSION("0.6.0 (EXPERIMENTAL)");
82
78
83static int ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan);
84static int ath5k_beacon_update(struct ieee80211_hw *hw,
85 struct ieee80211_vif *vif);
86static void ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf);
79static int ath5k_init(struct ieee80211_hw *hw);
80static int ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan,
81 bool skip_pcu);
82int ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
83void ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf);
87
84
88/* Known PCI ids */
89static DEFINE_PCI_DEVICE_TABLE(ath5k_pci_id_table) = {
90 { PCI_VDEVICE(ATHEROS, 0x0207) }, /* 5210 early */
91 { PCI_VDEVICE(ATHEROS, 0x0007) }, /* 5210 */
92 { PCI_VDEVICE(ATHEROS, 0x0011) }, /* 5311 - this is on AHB bus !*/
93 { PCI_VDEVICE(ATHEROS, 0x0012) }, /* 5211 */
94 { PCI_VDEVICE(ATHEROS, 0x0013) }, /* 5212 */
95 { PCI_VDEVICE(3COM_2, 0x0013) }, /* 3com 5212 */
96 { PCI_VDEVICE(3COM, 0x0013) }, /* 3com 3CRDAG675 5212 */
97 { PCI_VDEVICE(ATHEROS, 0x1014) }, /* IBM minipci 5212 */
98 { PCI_VDEVICE(ATHEROS, 0x0014) }, /* 5212 combatible */
99 { PCI_VDEVICE(ATHEROS, 0x0015) }, /* 5212 combatible */
100 { PCI_VDEVICE(ATHEROS, 0x0016) }, /* 5212 combatible */
101 { PCI_VDEVICE(ATHEROS, 0x0017) }, /* 5212 combatible */
102 { PCI_VDEVICE(ATHEROS, 0x0018) }, /* 5212 combatible */
103 { PCI_VDEVICE(ATHEROS, 0x0019) }, /* 5212 combatible */
104 { PCI_VDEVICE(ATHEROS, 0x001a) }, /* 2413 Griffin-lite */
105 { PCI_VDEVICE(ATHEROS, 0x001b) }, /* 5413 Eagle */
106 { PCI_VDEVICE(ATHEROS, 0x001c) }, /* PCI-E cards */
107 { PCI_VDEVICE(ATHEROS, 0x001d) }, /* 2417 Nala */
108 { 0 }
109};
110MODULE_DEVICE_TABLE(pci, ath5k_pci_id_table);
111
112/* Known SREVs */
113static const struct ath5k_srev_name srev_names[] = {
85/* Known SREVs */
86static const struct ath5k_srev_name srev_names[] = {
87#ifdef CONFIG_ATHEROS_AR231X
88 { "5312", AR5K_VERSION_MAC, AR5K_SREV_AR5312_R2 },
89 { "5312", AR5K_VERSION_MAC, AR5K_SREV_AR5312_R7 },
90 { "2313", AR5K_VERSION_MAC, AR5K_SREV_AR2313_R8 },
91 { "2315", AR5K_VERSION_MAC, AR5K_SREV_AR2315_R6 },
92 { "2315", AR5K_VERSION_MAC, AR5K_SREV_AR2315_R7 },
93 { "2317", AR5K_VERSION_MAC, AR5K_SREV_AR2317_R1 },
94 { "2317", AR5K_VERSION_MAC, AR5K_SREV_AR2317_R2 },
95#else
114 { "5210", AR5K_VERSION_MAC, AR5K_SREV_AR5210 },
115 { "5311", AR5K_VERSION_MAC, AR5K_SREV_AR5311 },
116 { "5311A", AR5K_VERSION_MAC, AR5K_SREV_AR5311A },
117 { "5311B", AR5K_VERSION_MAC, AR5K_SREV_AR5311B },
118 { "5211", AR5K_VERSION_MAC, AR5K_SREV_AR5211 },
119 { "5212", AR5K_VERSION_MAC, AR5K_SREV_AR5212 },
120 { "5213", AR5K_VERSION_MAC, AR5K_SREV_AR5213 },
121 { "5213A", AR5K_VERSION_MAC, AR5K_SREV_AR5213A },
122 { "2413", AR5K_VERSION_MAC, AR5K_SREV_AR2413 },
123 { "2414", AR5K_VERSION_MAC, AR5K_SREV_AR2414 },
124 { "5424", AR5K_VERSION_MAC, AR5K_SREV_AR5424 },
125 { "5413", AR5K_VERSION_MAC, AR5K_SREV_AR5413 },
126 { "5414", AR5K_VERSION_MAC, AR5K_SREV_AR5414 },
127 { "2415", AR5K_VERSION_MAC, AR5K_SREV_AR2415 },
128 { "5416", AR5K_VERSION_MAC, AR5K_SREV_AR5416 },
129 { "5418", AR5K_VERSION_MAC, AR5K_SREV_AR5418 },
130 { "2425", AR5K_VERSION_MAC, AR5K_SREV_AR2425 },
131 { "2417", AR5K_VERSION_MAC, AR5K_SREV_AR2417 },
96 { "5210", AR5K_VERSION_MAC, AR5K_SREV_AR5210 },
97 { "5311", AR5K_VERSION_MAC, AR5K_SREV_AR5311 },
98 { "5311A", AR5K_VERSION_MAC, AR5K_SREV_AR5311A },
99 { "5311B", AR5K_VERSION_MAC, AR5K_SREV_AR5311B },
100 { "5211", AR5K_VERSION_MAC, AR5K_SREV_AR5211 },
101 { "5212", AR5K_VERSION_MAC, AR5K_SREV_AR5212 },
102 { "5213", AR5K_VERSION_MAC, AR5K_SREV_AR5213 },
103 { "5213A", AR5K_VERSION_MAC, AR5K_SREV_AR5213A },
104 { "2413", AR5K_VERSION_MAC, AR5K_SREV_AR2413 },
105 { "2414", AR5K_VERSION_MAC, AR5K_SREV_AR2414 },
106 { "5424", AR5K_VERSION_MAC, AR5K_SREV_AR5424 },
107 { "5413", AR5K_VERSION_MAC, AR5K_SREV_AR5413 },
108 { "5414", AR5K_VERSION_MAC, AR5K_SREV_AR5414 },
109 { "2415", AR5K_VERSION_MAC, AR5K_SREV_AR2415 },
110 { "5416", AR5K_VERSION_MAC, AR5K_SREV_AR5416 },
111 { "5418", AR5K_VERSION_MAC, AR5K_SREV_AR5418 },
112 { "2425", AR5K_VERSION_MAC, AR5K_SREV_AR2425 },
113 { "2417", AR5K_VERSION_MAC, AR5K_SREV_AR2417 },
114#endif
132 { "xxxxx", AR5K_VERSION_MAC, AR5K_SREV_UNKNOWN },
133 { "5110", AR5K_VERSION_RAD, AR5K_SREV_RAD_5110 },
134 { "5111", AR5K_VERSION_RAD, AR5K_SREV_RAD_5111 },
135 { "5111A", AR5K_VERSION_RAD, AR5K_SREV_RAD_5111A },
136 { "2111", AR5K_VERSION_RAD, AR5K_SREV_RAD_2111 },
137 { "5112", AR5K_VERSION_RAD, AR5K_SREV_RAD_5112 },
138 { "5112A", AR5K_VERSION_RAD, AR5K_SREV_RAD_5112A },
139 { "5112B", AR5K_VERSION_RAD, AR5K_SREV_RAD_5112B },
140 { "2112", AR5K_VERSION_RAD, AR5K_SREV_RAD_2112 },
141 { "2112A", AR5K_VERSION_RAD, AR5K_SREV_RAD_2112A },
142 { "2112B", AR5K_VERSION_RAD, AR5K_SREV_RAD_2112B },
143 { "2413", AR5K_VERSION_RAD, AR5K_SREV_RAD_2413 },
144 { "5413", AR5K_VERSION_RAD, AR5K_SREV_RAD_5413 },
115 { "xxxxx", AR5K_VERSION_MAC, AR5K_SREV_UNKNOWN },
116 { "5110", AR5K_VERSION_RAD, AR5K_SREV_RAD_5110 },
117 { "5111", AR5K_VERSION_RAD, AR5K_SREV_RAD_5111 },
118 { "5111A", AR5K_VERSION_RAD, AR5K_SREV_RAD_5111A },
119 { "2111", AR5K_VERSION_RAD, AR5K_SREV_RAD_2111 },
120 { "5112", AR5K_VERSION_RAD, AR5K_SREV_RAD_5112 },
121 { "5112A", AR5K_VERSION_RAD, AR5K_SREV_RAD_5112A },
122 { "5112B", AR5K_VERSION_RAD, AR5K_SREV_RAD_5112B },
123 { "2112", AR5K_VERSION_RAD, AR5K_SREV_RAD_2112 },
124 { "2112A", AR5K_VERSION_RAD, AR5K_SREV_RAD_2112A },
125 { "2112B", AR5K_VERSION_RAD, AR5K_SREV_RAD_2112B },
126 { "2413", AR5K_VERSION_RAD, AR5K_SREV_RAD_2413 },
127 { "5413", AR5K_VERSION_RAD, AR5K_SREV_RAD_5413 },
145 { "2316", AR5K_VERSION_RAD, AR5K_SREV_RAD_2316 },
146 { "2317", AR5K_VERSION_RAD, AR5K_SREV_RAD_2317 },
147 { "5424", AR5K_VERSION_RAD, AR5K_SREV_RAD_5424 },
148 { "5133", AR5K_VERSION_RAD, AR5K_SREV_RAD_5133 },
128 { "5424", AR5K_VERSION_RAD, AR5K_SREV_RAD_5424 },
129 { "5133", AR5K_VERSION_RAD, AR5K_SREV_RAD_5133 },
130#ifdef CONFIG_ATHEROS_AR231X
131 { "2316", AR5K_VERSION_RAD, AR5K_SREV_RAD_2316 },
132 { "2317", AR5K_VERSION_RAD, AR5K_SREV_RAD_2317 },
133#endif
149 { "xxxxx", AR5K_VERSION_RAD, AR5K_SREV_UNKNOWN },
150};
151
152static const struct ieee80211_rate ath5k_rates[] = {
153 { .bitrate = 10,
154 .hw_value = ATH5K_RATE_CODE_1M, },
155 { .bitrate = 20,
156 .hw_value = ATH5K_RATE_CODE_2M,

--- 29 unchanged lines hidden (view full) ---

186 .hw_value = ATH5K_RATE_CODE_48M,
187 .flags = 0 },
188 { .bitrate = 540,
189 .hw_value = ATH5K_RATE_CODE_54M,
190 .flags = 0 },
191 /* XR missing */
192};
193
134 { "xxxxx", AR5K_VERSION_RAD, AR5K_SREV_UNKNOWN },
135};
136
137static const struct ieee80211_rate ath5k_rates[] = {
138 { .bitrate = 10,
139 .hw_value = ATH5K_RATE_CODE_1M, },
140 { .bitrate = 20,
141 .hw_value = ATH5K_RATE_CODE_2M,

--- 29 unchanged lines hidden (view full) ---

171 .hw_value = ATH5K_RATE_CODE_48M,
172 .flags = 0 },
173 { .bitrate = 540,
174 .hw_value = ATH5K_RATE_CODE_54M,
175 .flags = 0 },
176 /* XR missing */
177};
178
194static inline void ath5k_txbuf_free_skb(struct ath5k_softc *sc,
195 struct ath5k_buf *bf)
196{
197 BUG_ON(!bf);
198 if (!bf->skb)
199 return;
200 pci_unmap_single(sc->pdev, bf->skbaddr, bf->skb->len,
201 PCI_DMA_TODEVICE);
202 dev_kfree_skb_any(bf->skb);
203 bf->skb = NULL;
204 bf->skbaddr = 0;
205 bf->desc->ds_data = 0;
206}
207
208static inline void ath5k_rxbuf_free_skb(struct ath5k_softc *sc,
209 struct ath5k_buf *bf)
210{
211 struct ath5k_hw *ah = sc->ah;
212 struct ath_common *common = ath5k_hw_common(ah);
213
214 BUG_ON(!bf);
215 if (!bf->skb)
216 return;
217 pci_unmap_single(sc->pdev, bf->skbaddr, common->rx_bufsize,
218 PCI_DMA_FROMDEVICE);
219 dev_kfree_skb_any(bf->skb);
220 bf->skb = NULL;
221 bf->skbaddr = 0;
222 bf->desc->ds_data = 0;
223}
224
225
226static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp)
227{
228 u64 tsf = ath5k_hw_get_tsf64(ah);
229
230 if ((tsf & 0x7fff) < rstamp)
231 tsf -= 0x8000;
232
233 return (tsf & ~0x7fff) | rstamp;
234}
235
179static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp)
180{
181 u64 tsf = ath5k_hw_get_tsf64(ah);
182
183 if ((tsf & 0x7fff) < rstamp)
184 tsf -= 0x8000;
185
186 return (tsf & ~0x7fff) | rstamp;
187}
188
236static const char *
189const char *
237ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val)
238{
239 const char *name = "xxxxx";
240 unsigned int i;
241
242 for (i = 0; i < ARRAY_SIZE(srev_names); i++) {
243 if (srev_names[i].sr_type != type)
244 continue;

--- 77 unchanged lines hidden (view full) ---

322{
323 unsigned int i, count, size, chfreq, freq, ch;
324
325 if (!test_bit(mode, ah->ah_modes))
326 return 0;
327
328 switch (mode) {
329 case AR5K_MODE_11A:
190ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val)
191{
192 const char *name = "xxxxx";
193 unsigned int i;
194
195 for (i = 0; i < ARRAY_SIZE(srev_names); i++) {
196 if (srev_names[i].sr_type != type)
197 continue;

--- 77 unchanged lines hidden (view full) ---

275{
276 unsigned int i, count, size, chfreq, freq, ch;
277
278 if (!test_bit(mode, ah->ah_modes))
279 return 0;
280
281 switch (mode) {
282 case AR5K_MODE_11A:
330 case AR5K_MODE_11A_TURBO:
331 /* 1..220, but 2GHz frequencies are filtered by check_channel */
332 size = 220 ;
333 chfreq = CHANNEL_5GHZ;
334 break;
335 case AR5K_MODE_11B:
336 case AR5K_MODE_11G:
283 /* 1..220, but 2GHz frequencies are filtered by check_channel */
284 size = 220 ;
285 chfreq = CHANNEL_5GHZ;
286 break;
287 case AR5K_MODE_11B:
288 case AR5K_MODE_11G:
337 case AR5K_MODE_11G_TURBO:
338 size = 26;
339 chfreq = CHANNEL_2GHZ;
340 break;
341 default:
342 ATH5K_WARN(ah->ah_sc, "bad mode, not copying channels\n");
343 return 0;
344 }
345

--- 12 unchanged lines hidden (view full) ---

358 channels[count].center_freq = freq;
359 channels[count].band = (chfreq == CHANNEL_2GHZ) ?
360 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
361 switch (mode) {
362 case AR5K_MODE_11A:
363 case AR5K_MODE_11G:
364 channels[count].hw_value = chfreq | CHANNEL_OFDM;
365 break;
289 size = 26;
290 chfreq = CHANNEL_2GHZ;
291 break;
292 default:
293 ATH5K_WARN(ah->ah_sc, "bad mode, not copying channels\n");
294 return 0;
295 }
296

--- 12 unchanged lines hidden (view full) ---

309 channels[count].center_freq = freq;
310 channels[count].band = (chfreq == CHANNEL_2GHZ) ?
311 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
312 switch (mode) {
313 case AR5K_MODE_11A:
314 case AR5K_MODE_11G:
315 channels[count].hw_value = chfreq | CHANNEL_OFDM;
316 break;
366 case AR5K_MODE_11A_TURBO:
367 case AR5K_MODE_11G_TURBO:
368 channels[count].hw_value = chfreq |
369 CHANNEL_OFDM | CHANNEL_TURBO;
370 break;
371 case AR5K_MODE_11B:
372 channels[count].hw_value = CHANNEL_B;
373 }
374
375 count++;
376 max--;
377 }
378

--- 99 unchanged lines hidden (view full) ---

478
479/*
480 * Set/change channels. We always reset the chip.
481 * To accomplish this we must first cleanup any pending DMA,
482 * then restart stuff after a la ath5k_init.
483 *
484 * Called with sc->lock.
485 */
317 case AR5K_MODE_11B:
318 channels[count].hw_value = CHANNEL_B;
319 }
320
321 count++;
322 max--;
323 }
324

--- 99 unchanged lines hidden (view full) ---

424
425/*
426 * Set/change channels. We always reset the chip.
427 * To accomplish this we must first cleanup any pending DMA,
428 * then restart stuff after a la ath5k_init.
429 *
430 * Called with sc->lock.
431 */
486static int
432int
487ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan)
488{
489 ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
490 "channel set, resetting (%u -> %u MHz)\n",
491 sc->curchan->center_freq, chan->center_freq);
492
493 /*
494 * To switch channels clear any pending DMA operations;
495 * wait long enough for the RX fifo to drain, reset the
496 * hardware at the new frequency, and then re-enable
497 * the relevant bits of the h/w.
498 */
433ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan)
434{
435 ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
436 "channel set, resetting (%u -> %u MHz)\n",
437 sc->curchan->center_freq, chan->center_freq);
438
439 /*
440 * To switch channels clear any pending DMA operations;
441 * wait long enough for the RX fifo to drain, reset the
442 * hardware at the new frequency, and then re-enable
443 * the relevant bits of the h/w.
444 */
499 return ath5k_reset(sc, chan);
445 return ath5k_reset(sc, chan, true);
500}
501
502static void
503ath5k_setcurmode(struct ath5k_softc *sc, unsigned int mode)
504{
505 sc->curmode = mode;
506
507 if (mode == AR5K_MODE_11A) {

--- 36 unchanged lines hidden (view full) ---

544 if (!iter_data->any_assoc) {
545 if (avf->assoc)
546 iter_data->any_assoc = true;
547 }
548
549 /* Calculate combined mode - when APs are active, operate in AP mode.
550 * Otherwise use the mode of the new interface. This can currently
551 * only deal with combinations of APs and STAs. Only one ad-hoc
446}
447
448static void
449ath5k_setcurmode(struct ath5k_softc *sc, unsigned int mode)
450{
451 sc->curmode = mode;
452
453 if (mode == AR5K_MODE_11A) {

--- 36 unchanged lines hidden (view full) ---

490 if (!iter_data->any_assoc) {
491 if (avf->assoc)
492 iter_data->any_assoc = true;
493 }
494
495 /* Calculate combined mode - when APs are active, operate in AP mode.
496 * Otherwise use the mode of the new interface. This can currently
497 * only deal with combinations of APs and STAs. Only one ad-hoc
552 * interfaces is allowed above.
498 * interfaces is allowed.
553 */
554 if (avf->opmode == NL80211_IFTYPE_AP)
555 iter_data->opmode = NL80211_IFTYPE_AP;
556 else
557 if (iter_data->opmode == NL80211_IFTYPE_UNSPECIFIED)
558 iter_data->opmode = avf->opmode;
559}
560
499 */
500 if (avf->opmode == NL80211_IFTYPE_AP)
501 iter_data->opmode = NL80211_IFTYPE_AP;
502 else
503 if (iter_data->opmode == NL80211_IFTYPE_UNSPECIFIED)
504 iter_data->opmode = avf->opmode;
505}
506
561static void ath_do_set_opmode(struct ath5k_softc *sc)
507void
508ath5k_update_bssid_mask_and_opmode(struct ath5k_softc *sc,
509 struct ieee80211_vif *vif)
562{
510{
563 struct ath5k_hw *ah = sc->ah;
564 ath5k_hw_set_opmode(ah, sc->opmode);
565 ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "mode setup opmode %d (%s)\n",
566 sc->opmode, ath_opmode_to_string(sc->opmode));
567}
568
569void ath5k_update_bssid_mask_and_opmode(struct ath5k_softc *sc,
570 struct ieee80211_vif *vif)
571{
572 struct ath_common *common = ath5k_hw_common(sc->ah);
573 struct ath_vif_iter_data iter_data;
574
575 /*
576 * Use the hardware MAC address as reference, the hardware uses it
577 * together with the BSSID mask when matching addresses.
578 */
579 iter_data.hw_macaddr = common->macaddr;

--- 10 unchanged lines hidden (view full) ---

590 &iter_data);
591 memcpy(sc->bssidmask, iter_data.mask, ETH_ALEN);
592
593 sc->opmode = iter_data.opmode;
594 if (sc->opmode == NL80211_IFTYPE_UNSPECIFIED)
595 /* Nothing active, default to station mode */
596 sc->opmode = NL80211_IFTYPE_STATION;
597
511 struct ath_common *common = ath5k_hw_common(sc->ah);
512 struct ath_vif_iter_data iter_data;
513
514 /*
515 * Use the hardware MAC address as reference, the hardware uses it
516 * together with the BSSID mask when matching addresses.
517 */
518 iter_data.hw_macaddr = common->macaddr;

--- 10 unchanged lines hidden (view full) ---

529 &iter_data);
530 memcpy(sc->bssidmask, iter_data.mask, ETH_ALEN);
531
532 sc->opmode = iter_data.opmode;
533 if (sc->opmode == NL80211_IFTYPE_UNSPECIFIED)
534 /* Nothing active, default to station mode */
535 sc->opmode = NL80211_IFTYPE_STATION;
536
598 ath_do_set_opmode(sc);
537 ath5k_hw_set_opmode(sc->ah, sc->opmode);
538 ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "mode setup opmode %d (%s)\n",
539 sc->opmode, ath_opmode_to_string(sc->opmode));
599
600 if (iter_data.need_set_hw_addr && iter_data.found_active)
601 ath5k_hw_set_lladdr(sc->ah, iter_data.active_mac);
602
603 if (ath5k_hw_hasbssidmask(sc->ah))
604 ath5k_hw_set_bssid_mask(sc->ah, sc->bssidmask);
605}
606
540
541 if (iter_data.need_set_hw_addr && iter_data.found_active)
542 ath5k_hw_set_lladdr(sc->ah, iter_data.active_mac);
543
544 if (ath5k_hw_hasbssidmask(sc->ah))
545 ath5k_hw_set_bssid_mask(sc->ah, sc->bssidmask);
546}
547
607static void
548void
608ath5k_mode_setup(struct ath5k_softc *sc, struct ieee80211_vif *vif)
609{
610 struct ath5k_hw *ah = sc->ah;
611 u32 rfilt;
612
613 /* configure rx filter */
614 rfilt = sc->filter_flags;
615 ath5k_hw_set_rx_filter(ah, rfilt);

--- 38 unchanged lines hidden (view full) ---

654 GFP_ATOMIC);
655
656 if (!skb) {
657 ATH5K_ERR(sc, "can't alloc skbuff of size %u\n",
658 common->rx_bufsize);
659 return NULL;
660 }
661
549ath5k_mode_setup(struct ath5k_softc *sc, struct ieee80211_vif *vif)
550{
551 struct ath5k_hw *ah = sc->ah;
552 u32 rfilt;
553
554 /* configure rx filter */
555 rfilt = sc->filter_flags;
556 ath5k_hw_set_rx_filter(ah, rfilt);

--- 38 unchanged lines hidden (view full) ---

595 GFP_ATOMIC);
596
597 if (!skb) {
598 ATH5K_ERR(sc, "can't alloc skbuff of size %u\n",
599 common->rx_bufsize);
600 return NULL;
601 }
602
662 *skb_addr = pci_map_single(sc->pdev,
603 *skb_addr = dma_map_single(sc->dev,
663 skb->data, common->rx_bufsize,
604 skb->data, common->rx_bufsize,
664 PCI_DMA_FROMDEVICE);
665 if (unlikely(pci_dma_mapping_error(sc->pdev, *skb_addr))) {
605 DMA_FROM_DEVICE);
606
607 if (unlikely(dma_mapping_error(sc->dev, *skb_addr))) {
666 ATH5K_ERR(sc, "%s: DMA mapping failed\n", __func__);
667 dev_kfree_skb(skb);
668 return NULL;
669 }
670 return skb;
671}
672
673static int

--- 79 unchanged lines hidden (view full) ---

753 u16 hw_rate;
754 u16 cts_rate = 0;
755 u16 duration = 0;
756 u8 rc_flags;
757
758 flags = AR5K_TXDESC_INTREQ | AR5K_TXDESC_CLRDMASK;
759
760 /* XXX endianness */
608 ATH5K_ERR(sc, "%s: DMA mapping failed\n", __func__);
609 dev_kfree_skb(skb);
610 return NULL;
611 }
612 return skb;
613}
614
615static int

--- 79 unchanged lines hidden (view full) ---

695 u16 hw_rate;
696 u16 cts_rate = 0;
697 u16 duration = 0;
698 u8 rc_flags;
699
700 flags = AR5K_TXDESC_INTREQ | AR5K_TXDESC_CLRDMASK;
701
702 /* XXX endianness */
761 bf->skbaddr = pci_map_single(sc->pdev, skb->data, skb->len,
762 PCI_DMA_TODEVICE);
703 bf->skbaddr = dma_map_single(sc->dev, skb->data, skb->len,
704 DMA_TO_DEVICE);
763
764 rate = ieee80211_get_tx_rate(sc->hw, info);
765 if (!rate) {
766 ret = -EINVAL;
767 goto err_unmap;
768 }
769
770 if (info->flags & IEEE80211_TX_CTL_NO_ACK)

--- 63 unchanged lines hidden (view full) ---

834
835 txq->link = &ds->ds_link;
836 ath5k_hw_start_tx_dma(ah, txq->qnum);
837 mmiowb();
838 spin_unlock_bh(&txq->lock);
839
840 return 0;
841err_unmap:
705
706 rate = ieee80211_get_tx_rate(sc->hw, info);
707 if (!rate) {
708 ret = -EINVAL;
709 goto err_unmap;
710 }
711
712 if (info->flags & IEEE80211_TX_CTL_NO_ACK)

--- 63 unchanged lines hidden (view full) ---

776
777 txq->link = &ds->ds_link;
778 ath5k_hw_start_tx_dma(ah, txq->qnum);
779 mmiowb();
780 spin_unlock_bh(&txq->lock);
781
782 return 0;
783err_unmap:
842 pci_unmap_single(sc->pdev, bf->skbaddr, skb->len, PCI_DMA_TODEVICE);
784 dma_unmap_single(sc->dev, bf->skbaddr, skb->len, DMA_TO_DEVICE);
843 return ret;
844}
845
846/*******************\
847* Descriptors setup *
848\*******************/
849
850static int
785 return ret;
786}
787
788/*******************\
789* Descriptors setup *
790\*******************/
791
792static int
851ath5k_desc_alloc(struct ath5k_softc *sc, struct pci_dev *pdev)
793ath5k_desc_alloc(struct ath5k_softc *sc)
852{
853 struct ath5k_desc *ds;
854 struct ath5k_buf *bf;
855 dma_addr_t da;
856 unsigned int i;
857 int ret;
858
859 /* allocate descriptors */
860 sc->desc_len = sizeof(struct ath5k_desc) *
861 (ATH_TXBUF + ATH_RXBUF + ATH_BCBUF + 1);
794{
795 struct ath5k_desc *ds;
796 struct ath5k_buf *bf;
797 dma_addr_t da;
798 unsigned int i;
799 int ret;
800
801 /* allocate descriptors */
802 sc->desc_len = sizeof(struct ath5k_desc) *
803 (ATH_TXBUF + ATH_RXBUF + ATH_BCBUF + 1);
862 sc->desc = pci_alloc_consistent(pdev, sc->desc_len, &sc->desc_daddr);
804
805 sc->desc = dma_alloc_coherent(sc->dev, sc->desc_len,
806 &sc->desc_daddr, GFP_KERNEL);
863 if (sc->desc == NULL) {
864 ATH5K_ERR(sc, "can't allocate descriptors\n");
865 ret = -ENOMEM;
866 goto err;
867 }
868 ds = sc->desc;
869 da = sc->desc_daddr;
870 ATH5K_DBG(sc, ATH5K_DEBUG_ANY, "DMA map: %p (%zu) -> %llx\n",

--- 29 unchanged lines hidden (view full) ---

900 for (i = 0; i < ATH_BCBUF; i++, bf++, ds++, da += sizeof(*ds)) {
901 bf->desc = ds;
902 bf->daddr = da;
903 list_add_tail(&bf->list, &sc->bcbuf);
904 }
905
906 return 0;
907err_free:
807 if (sc->desc == NULL) {
808 ATH5K_ERR(sc, "can't allocate descriptors\n");
809 ret = -ENOMEM;
810 goto err;
811 }
812 ds = sc->desc;
813 da = sc->desc_daddr;
814 ATH5K_DBG(sc, ATH5K_DEBUG_ANY, "DMA map: %p (%zu) -> %llx\n",

--- 29 unchanged lines hidden (view full) ---

844 for (i = 0; i < ATH_BCBUF; i++, bf++, ds++, da += sizeof(*ds)) {
845 bf->desc = ds;
846 bf->daddr = da;
847 list_add_tail(&bf->list, &sc->bcbuf);
848 }
849
850 return 0;
851err_free:
908 pci_free_consistent(pdev, sc->desc_len, sc->desc, sc->desc_daddr);
852 dma_free_coherent(sc->dev, sc->desc_len, sc->desc, sc->desc_daddr);
909err:
910 sc->desc = NULL;
911 return ret;
912}
913
853err:
854 sc->desc = NULL;
855 return ret;
856}
857
858void
859ath5k_txbuf_free_skb(struct ath5k_softc *sc, struct ath5k_buf *bf)
860{
861 BUG_ON(!bf);
862 if (!bf->skb)
863 return;
864 dma_unmap_single(sc->dev, bf->skbaddr, bf->skb->len,
865 DMA_TO_DEVICE);
866 dev_kfree_skb_any(bf->skb);
867 bf->skb = NULL;
868 bf->skbaddr = 0;
869 bf->desc->ds_data = 0;
870}
871
872void
873ath5k_rxbuf_free_skb(struct ath5k_softc *sc, struct ath5k_buf *bf)
874{
875 struct ath5k_hw *ah = sc->ah;
876 struct ath_common *common = ath5k_hw_common(ah);
877
878 BUG_ON(!bf);
879 if (!bf->skb)
880 return;
881 dma_unmap_single(sc->dev, bf->skbaddr, common->rx_bufsize,
882 DMA_FROM_DEVICE);
883 dev_kfree_skb_any(bf->skb);
884 bf->skb = NULL;
885 bf->skbaddr = 0;
886 bf->desc->ds_data = 0;
887}
888
914static void
889static void
915ath5k_desc_free(struct ath5k_softc *sc, struct pci_dev *pdev)
890ath5k_desc_free(struct ath5k_softc *sc)
916{
917 struct ath5k_buf *bf;
918
919 list_for_each_entry(bf, &sc->txbuf, list)
920 ath5k_txbuf_free_skb(sc, bf);
921 list_for_each_entry(bf, &sc->rxbuf, list)
922 ath5k_rxbuf_free_skb(sc, bf);
923 list_for_each_entry(bf, &sc->bcbuf, list)
924 ath5k_txbuf_free_skb(sc, bf);
925
926 /* Free memory associated with all descriptors */
891{
892 struct ath5k_buf *bf;
893
894 list_for_each_entry(bf, &sc->txbuf, list)
895 ath5k_txbuf_free_skb(sc, bf);
896 list_for_each_entry(bf, &sc->rxbuf, list)
897 ath5k_rxbuf_free_skb(sc, bf);
898 list_for_each_entry(bf, &sc->bcbuf, list)
899 ath5k_txbuf_free_skb(sc, bf);
900
901 /* Free memory associated with all descriptors */
927 pci_free_consistent(pdev, sc->desc_len, sc->desc, sc->desc_daddr);
902 dma_free_coherent(sc->dev, sc->desc_len, sc->desc, sc->desc_daddr);
928 sc->desc = NULL;
929 sc->desc_daddr = 0;
930
931 kfree(sc->bufptr);
932 sc->bufptr = NULL;
933}
934
935

--- 128 unchanged lines hidden (view full) ---

1064 if (ret)
1065 goto err;
1066
1067 ret = ath5k_hw_reset_tx_queue(ah, AR5K_TX_QUEUE_ID_CAB);
1068err:
1069 return ret;
1070}
1071
903 sc->desc = NULL;
904 sc->desc_daddr = 0;
905
906 kfree(sc->bufptr);
907 sc->bufptr = NULL;
908}
909
910

--- 128 unchanged lines hidden (view full) ---

1039 if (ret)
1040 goto err;
1041
1042 ret = ath5k_hw_reset_tx_queue(ah, AR5K_TX_QUEUE_ID_CAB);
1043err:
1044 return ret;
1045}
1046
1047/**
1048 * ath5k_drain_tx_buffs - Empty tx buffers
1049 *
1050 * @sc The &struct ath5k_softc
1051 *
1052 * Empty tx buffers from all queues in preparation
1053 * of a reset or during shutdown.
1054 *
1055 * NB: this assumes output has been stopped and
1056 * we do not need to block ath5k_tx_tasklet
1057 */
1072static void
1058static void
1073ath5k_txq_drainq(struct ath5k_softc *sc, struct ath5k_txq *txq)
1059ath5k_drain_tx_buffs(struct ath5k_softc *sc)
1074{
1060{
1061 struct ath5k_txq *txq;
1075 struct ath5k_buf *bf, *bf0;
1062 struct ath5k_buf *bf, *bf0;
1063 int i;
1076
1064
1077 /*
1078 * NB: this assumes output has been stopped and
1079 * we do not need to block ath5k_tx_tasklet
1080 */
1081 spin_lock_bh(&txq->lock);
1082 list_for_each_entry_safe(bf, bf0, &txq->q, list) {
1083 ath5k_debug_printtxbuf(sc, bf);
1065 for (i = 0; i < ARRAY_SIZE(sc->txqs); i++) {
1066 if (sc->txqs[i].setup) {
1067 txq = &sc->txqs[i];
1068 spin_lock_bh(&txq->lock);
1069 list_for_each_entry_safe(bf, bf0, &txq->q, list) {
1070 ath5k_debug_printtxbuf(sc, bf);
1084
1071
1085 ath5k_txbuf_free_skb(sc, bf);
1072 ath5k_txbuf_free_skb(sc, bf);
1086
1073
1087 spin_lock_bh(&sc->txbuflock);
1088 list_move_tail(&bf->list, &sc->txbuf);
1089 sc->txbuf_len++;
1090 txq->txq_len--;
1091 spin_unlock_bh(&sc->txbuflock);
1092 }
1093 txq->link = NULL;
1094 txq->txq_poll_mark = false;
1095 spin_unlock_bh(&txq->lock);
1096}
1097
1098/*
1099 * Drain the transmit queues and reclaim resources.
1100 */
1101static void
1102ath5k_txq_cleanup(struct ath5k_softc *sc)
1103{
1104 struct ath5k_hw *ah = sc->ah;
1105 unsigned int i;
1106
1107 /* XXX return value */
1108 if (likely(!test_bit(ATH_STAT_INVALID, sc->status))) {
1109 /* don't touch the hardware if marked invalid */
1110 ath5k_hw_stop_tx_dma(ah, sc->bhalq);
1111 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "beacon queue %x\n",
1112 ath5k_hw_get_txdp(ah, sc->bhalq));
1113 for (i = 0; i < ARRAY_SIZE(sc->txqs); i++)
1114 if (sc->txqs[i].setup) {
1115 ath5k_hw_stop_tx_dma(ah, sc->txqs[i].qnum);
1116 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "txq [%u] %x, "
1117 "link %p\n",
1118 sc->txqs[i].qnum,
1119 ath5k_hw_get_txdp(ah,
1120 sc->txqs[i].qnum),
1121 sc->txqs[i].link);
1074 spin_lock_bh(&sc->txbuflock);
1075 list_move_tail(&bf->list, &sc->txbuf);
1076 sc->txbuf_len++;
1077 txq->txq_len--;
1078 spin_unlock_bh(&sc->txbuflock);
1122 }
1079 }
1080 txq->link = NULL;
1081 txq->txq_poll_mark = false;
1082 spin_unlock_bh(&txq->lock);
1083 }
1123 }
1084 }
1124
1125 for (i = 0; i < ARRAY_SIZE(sc->txqs); i++)
1126 if (sc->txqs[i].setup)
1127 ath5k_txq_drainq(sc, &sc->txqs[i]);
1128}
1129
1130static void
1131ath5k_txq_release(struct ath5k_softc *sc)
1132{
1133 struct ath5k_txq *txq = sc->txqs;
1134 unsigned int i;
1135

--- 43 unchanged lines hidden (view full) ---

1179 ath5k_hw_start_rx_pcu(ah); /* re-enable PCU/DMA engine */
1180
1181 return 0;
1182err:
1183 return ret;
1184}
1185
1186/*
1085}
1086
1087static void
1088ath5k_txq_release(struct ath5k_softc *sc)
1089{
1090 struct ath5k_txq *txq = sc->txqs;
1091 unsigned int i;
1092

--- 43 unchanged lines hidden (view full) ---

1136 ath5k_hw_start_rx_pcu(ah); /* re-enable PCU/DMA engine */
1137
1138 return 0;
1139err:
1140 return ret;
1141}
1142
1143/*
1187 * Disable the receive h/w in preparation for a reset.
1144 * Disable the receive logic on PCU (DRU)
1145 * In preparation for a shutdown.
1146 *
1147 * Note: Doesn't stop rx DMA, ath5k_hw_dma_stop
1148 * does.
1188 */
1189static void
1190ath5k_rx_stop(struct ath5k_softc *sc)
1191{
1192 struct ath5k_hw *ah = sc->ah;
1193
1149 */
1150static void
1151ath5k_rx_stop(struct ath5k_softc *sc)
1152{
1153 struct ath5k_hw *ah = sc->ah;
1154
1194 ath5k_hw_stop_rx_pcu(ah); /* disable PCU */
1195 ath5k_hw_set_rx_filter(ah, 0); /* clear recv filter */
1155 ath5k_hw_set_rx_filter(ah, 0); /* clear recv filter */
1196 ath5k_hw_stop_rx_dma(ah); /* disable DMA engine */
1156 ath5k_hw_stop_rx_pcu(ah); /* disable PCU */
1197
1198 ath5k_debug_printrxbuffs(sc, ah);
1199}
1200
1201static unsigned int
1202ath5k_rx_decrypted(struct ath5k_softc *sc, struct sk_buff *skb,
1203 struct ath5k_rx_status *rs)
1204{

--- 97 unchanged lines hidden (view full) ---

1302 struct ath5k_hw *ah = sc->ah;
1303 struct ath_common *common = ath5k_hw_common(ah);
1304
1305 /* only beacons from our BSSID */
1306 if (!ieee80211_is_beacon(mgmt->frame_control) ||
1307 memcmp(mgmt->bssid, common->curbssid, ETH_ALEN) != 0)
1308 return;
1309
1157
1158 ath5k_debug_printrxbuffs(sc, ah);
1159}
1160
1161static unsigned int
1162ath5k_rx_decrypted(struct ath5k_softc *sc, struct sk_buff *skb,
1163 struct ath5k_rx_status *rs)
1164{

--- 97 unchanged lines hidden (view full) ---

1262 struct ath5k_hw *ah = sc->ah;
1263 struct ath_common *common = ath5k_hw_common(ah);
1264
1265 /* only beacons from our BSSID */
1266 if (!ieee80211_is_beacon(mgmt->frame_control) ||
1267 memcmp(mgmt->bssid, common->curbssid, ETH_ALEN) != 0)
1268 return;
1269
1310 ah->ah_beacon_rssi_avg = ath5k_moving_average(ah->ah_beacon_rssi_avg,
1311 rssi);
1270 ewma_add(&ah->ah_beacon_rssi_avg, rssi);
1312
1313 /* in IBSS mode we should keep RSSI statistics per neighbour */
1314 /* le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS */
1315}
1316
1317/*
1318 * Compute padding position. skb must contain an IEEE 802.11 frame
1319 */

--- 226 unchanged lines hidden (view full) ---

1546
1547 /*
1548 * If we can't replace bf->skb with a new skb under
1549 * memory pressure, just skip this packet
1550 */
1551 if (!next_skb)
1552 goto next;
1553
1271
1272 /* in IBSS mode we should keep RSSI statistics per neighbour */
1273 /* le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS */
1274}
1275
1276/*
1277 * Compute padding position. skb must contain an IEEE 802.11 frame
1278 */

--- 226 unchanged lines hidden (view full) ---

1505
1506 /*
1507 * If we can't replace bf->skb with a new skb under
1508 * memory pressure, just skip this packet
1509 */
1510 if (!next_skb)
1511 goto next;
1512
1554 pci_unmap_single(sc->pdev, bf->skbaddr,
1513 dma_unmap_single(sc->dev, bf->skbaddr,
1555 common->rx_bufsize,
1514 common->rx_bufsize,
1556 PCI_DMA_FROMDEVICE);
1515 DMA_FROM_DEVICE);
1557
1558 skb_put(skb, rs.rs_datalen);
1559
1560 ath5k_receive_frame(sc, skb, &rs);
1561
1562 bf->skb = next_skb;
1563 bf->skbaddr = next_skb_addr;
1564 }

--- 4 unchanged lines hidden (view full) ---

1569 spin_unlock(&sc->rxbuflock);
1570}
1571
1572
1573/*************\
1574* TX Handling *
1575\*************/
1576
1516
1517 skb_put(skb, rs.rs_datalen);
1518
1519 ath5k_receive_frame(sc, skb, &rs);
1520
1521 bf->skb = next_skb;
1522 bf->skbaddr = next_skb_addr;
1523 }

--- 4 unchanged lines hidden (view full) ---

1528 spin_unlock(&sc->rxbuflock);
1529}
1530
1531
1532/*************\
1533* TX Handling *
1534\*************/
1535
1577static int ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
1578 struct ath5k_txq *txq)
1536int
1537ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
1538 struct ath5k_txq *txq)
1579{
1580 struct ath5k_softc *sc = hw->priv;
1581 struct ath5k_buf *bf;
1582 unsigned long flags;
1583 int padsize;
1584
1585 ath5k_debug_dump_skb(sc, skb, "TX ", 1);
1586

--- 124 unchanged lines hidden (view full) ---

1711 ATH5K_ERR(sc,
1712 "error %d while processing "
1713 "queue %u\n", ret, txq->qnum);
1714 break;
1715 }
1716
1717 skb = bf->skb;
1718 bf->skb = NULL;
1539{
1540 struct ath5k_softc *sc = hw->priv;
1541 struct ath5k_buf *bf;
1542 unsigned long flags;
1543 int padsize;
1544
1545 ath5k_debug_dump_skb(sc, skb, "TX ", 1);
1546

--- 124 unchanged lines hidden (view full) ---

1671 ATH5K_ERR(sc,
1672 "error %d while processing "
1673 "queue %u\n", ret, txq->qnum);
1674 break;
1675 }
1676
1677 skb = bf->skb;
1678 bf->skb = NULL;
1719 pci_unmap_single(sc->pdev, bf->skbaddr, skb->len,
1720 PCI_DMA_TODEVICE);
1679
1680 dma_unmap_single(sc->dev, bf->skbaddr, skb->len,
1681 DMA_TO_DEVICE);
1721 ath5k_tx_frame_completed(sc, skb, &ts);
1722 }
1723
1724 /*
1725 * It's possible that the hardware can say the buffer is
1726 * completed when it hasn't yet loaded the ds_link from
1727 * host memory and moved on.
1728 * Always keep the last descriptor to avoid HW races...

--- 37 unchanged lines hidden (view full) ---

1766 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1767 struct ath5k_hw *ah = sc->ah;
1768 struct ath5k_desc *ds;
1769 int ret = 0;
1770 u8 antenna;
1771 u32 flags;
1772 const int padsize = 0;
1773
1682 ath5k_tx_frame_completed(sc, skb, &ts);
1683 }
1684
1685 /*
1686 * It's possible that the hardware can say the buffer is
1687 * completed when it hasn't yet loaded the ds_link from
1688 * host memory and moved on.
1689 * Always keep the last descriptor to avoid HW races...

--- 37 unchanged lines hidden (view full) ---

1727 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1728 struct ath5k_hw *ah = sc->ah;
1729 struct ath5k_desc *ds;
1730 int ret = 0;
1731 u8 antenna;
1732 u32 flags;
1733 const int padsize = 0;
1734
1774 bf->skbaddr = pci_map_single(sc->pdev, skb->data, skb->len,
1775 PCI_DMA_TODEVICE);
1735 bf->skbaddr = dma_map_single(sc->dev, skb->data, skb->len,
1736 DMA_TO_DEVICE);
1776 ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "skb %p [data %p len %u] "
1777 "skbaddr %llx\n", skb, skb->data, skb->len,
1778 (unsigned long long)bf->skbaddr);
1737 ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "skb %p [data %p len %u] "
1738 "skbaddr %llx\n", skb, skb->data, skb->len,
1739 (unsigned long long)bf->skbaddr);
1779 if (pci_dma_mapping_error(sc->pdev, bf->skbaddr)) {
1740
1741 if (dma_mapping_error(sc->dev, bf->skbaddr)) {
1780 ATH5K_ERR(sc, "beacon DMA mapping failed\n");
1781 return -EIO;
1782 }
1783
1784 ds = bf->desc;
1785 antenna = ah->ah_tx_ant;
1786
1787 flags = AR5K_TXDESC_NOACK;

--- 35 unchanged lines hidden (view full) ---

1823 ieee80211_get_tx_rate(sc->hw, info)->hw_value,
1824 1, AR5K_TXKEYIX_INVALID,
1825 antenna, flags, 0, 0);
1826 if (ret)
1827 goto err_unmap;
1828
1829 return 0;
1830err_unmap:
1742 ATH5K_ERR(sc, "beacon DMA mapping failed\n");
1743 return -EIO;
1744 }
1745
1746 ds = bf->desc;
1747 antenna = ah->ah_tx_ant;
1748
1749 flags = AR5K_TXDESC_NOACK;

--- 35 unchanged lines hidden (view full) ---

1785 ieee80211_get_tx_rate(sc->hw, info)->hw_value,
1786 1, AR5K_TXKEYIX_INVALID,
1787 antenna, flags, 0, 0);
1788 if (ret)
1789 goto err_unmap;
1790
1791 return 0;
1792err_unmap:
1831 pci_unmap_single(sc->pdev, bf->skbaddr, skb->len, PCI_DMA_TODEVICE);
1793 dma_unmap_single(sc->dev, bf->skbaddr, skb->len, DMA_TO_DEVICE);
1832 return ret;
1833}
1834
1835/*
1836 * Updates the beacon that is sent by ath5k_beacon_send. For adhoc,
1837 * this is called only once at config_bss time, for AP we do it every
1838 * SWBA interrupt so that the TIM will reflect buffered frames.
1839 *
1840 * Called with the beacon lock.
1841 */
1794 return ret;
1795}
1796
1797/*
1798 * Updates the beacon that is sent by ath5k_beacon_send. For adhoc,
1799 * this is called only once at config_bss time, for AP we do it every
1800 * SWBA interrupt so that the TIM will reflect buffered frames.
1801 *
1802 * Called with the beacon lock.
1803 */
1842static int
1804int
1843ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1844{
1845 int ret;
1846 struct ath5k_softc *sc = hw->priv;
1847 struct ath5k_vif *avf = (void *)vif->drv_priv;
1848 struct sk_buff *skb;
1849
1850 if (WARN_ON(!vif)) {

--- 89 unchanged lines hidden (view full) ---

1940 return;
1941 }
1942
1943 /*
1944 * Stop any current dma and put the new frame on the queue.
1945 * This should never fail since we check above that no frames
1946 * are still pending on the queue.
1947 */
1805ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1806{
1807 int ret;
1808 struct ath5k_softc *sc = hw->priv;
1809 struct ath5k_vif *avf = (void *)vif->drv_priv;
1810 struct sk_buff *skb;
1811
1812 if (WARN_ON(!vif)) {

--- 89 unchanged lines hidden (view full) ---

1902 return;
1903 }
1904
1905 /*
1906 * Stop any current dma and put the new frame on the queue.
1907 * This should never fail since we check above that no frames
1908 * are still pending on the queue.
1909 */
1948 if (unlikely(ath5k_hw_stop_tx_dma(ah, sc->bhalq))) {
1910 if (unlikely(ath5k_hw_stop_beacon_queue(ah, sc->bhalq))) {
1949 ATH5K_WARN(sc, "beacon queue %u didn't start/stop ?\n", sc->bhalq);
1950 /* NB: hw still stops DMA, so proceed */
1951 }
1952
1953 /* refresh the beacon for AP or MESH mode */
1954 if (sc->opmode == NL80211_IFTYPE_AP ||
1955 sc->opmode == NL80211_IFTYPE_MESH_POINT)
1956 ath5k_beacon_update(sc->hw, vif);

--- 23 unchanged lines hidden (view full) ---

1980 * of a received beacon or the current local hardware TSF and write it to the
1981 * beacon timer registers.
1982 *
1983 * This is called in a variety of situations, e.g. when a beacon is received,
1984 * when a TSF update has been detected, but also when an new IBSS is created or
1985 * when we otherwise know we have to update the timers, but we keep it in this
1986 * function to have it all together in one place.
1987 */
1911 ATH5K_WARN(sc, "beacon queue %u didn't start/stop ?\n", sc->bhalq);
1912 /* NB: hw still stops DMA, so proceed */
1913 }
1914
1915 /* refresh the beacon for AP or MESH mode */
1916 if (sc->opmode == NL80211_IFTYPE_AP ||
1917 sc->opmode == NL80211_IFTYPE_MESH_POINT)
1918 ath5k_beacon_update(sc->hw, vif);

--- 23 unchanged lines hidden (view full) ---

1942 * of a received beacon or the current local hardware TSF and write it to the
1943 * beacon timer registers.
1944 *
1945 * This is called in a variety of situations, e.g. when a beacon is received,
1946 * when a TSF update has been detected, but also when an new IBSS is created or
1947 * when we otherwise know we have to update the timers, but we keep it in this
1948 * function to have it all together in one place.
1949 */
1988static void
1950void
1989ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf)
1990{
1991 struct ath5k_hw *ah = sc->ah;
1992 u32 nexttbtt, intval, hw_tu, bc_tu;
1993 u64 hw_tsf;
1994
1995 intval = sc->bintval & AR5K_BEACON_PERIOD;
1996 if (sc->opmode == NL80211_IFTYPE_AP && sc->num_ap_vifs > 1) {

--- 85 unchanged lines hidden (view full) ---

2082/**
2083 * ath5k_beacon_config - Configure the beacon queues and interrupts
2084 *
2085 * @sc: struct ath5k_softc pointer we are operating on
2086 *
2087 * In IBSS mode we use a self-linked tx descriptor if possible. We enable SWBA
2088 * interrupts to detect TSF updates only.
2089 */
1951ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf)
1952{
1953 struct ath5k_hw *ah = sc->ah;
1954 u32 nexttbtt, intval, hw_tu, bc_tu;
1955 u64 hw_tsf;
1956
1957 intval = sc->bintval & AR5K_BEACON_PERIOD;
1958 if (sc->opmode == NL80211_IFTYPE_AP && sc->num_ap_vifs > 1) {

--- 85 unchanged lines hidden (view full) ---

2044/**
2045 * ath5k_beacon_config - Configure the beacon queues and interrupts
2046 *
2047 * @sc: struct ath5k_softc pointer we are operating on
2048 *
2049 * In IBSS mode we use a self-linked tx descriptor if possible. We enable SWBA
2050 * interrupts to detect TSF updates only.
2051 */
2090static void
2052void
2091ath5k_beacon_config(struct ath5k_softc *sc)
2092{
2093 struct ath5k_hw *ah = sc->ah;
2094 unsigned long flags;
2095
2096 spin_lock_irqsave(&sc->block, flags);
2097 sc->bmisscount = 0;
2098 sc->imask &= ~(AR5K_INT_BMISS | AR5K_INT_SWBA);

--- 11 unchanged lines hidden (view full) ---

2110 sc->imask |= AR5K_INT_SWBA;
2111
2112 if (sc->opmode == NL80211_IFTYPE_ADHOC) {
2113 if (ath5k_hw_hasveol(ah))
2114 ath5k_beacon_send(sc);
2115 } else
2116 ath5k_beacon_update_timers(sc, -1);
2117 } else {
2053ath5k_beacon_config(struct ath5k_softc *sc)
2054{
2055 struct ath5k_hw *ah = sc->ah;
2056 unsigned long flags;
2057
2058 spin_lock_irqsave(&sc->block, flags);
2059 sc->bmisscount = 0;
2060 sc->imask &= ~(AR5K_INT_BMISS | AR5K_INT_SWBA);

--- 11 unchanged lines hidden (view full) ---

2072 sc->imask |= AR5K_INT_SWBA;
2073
2074 if (sc->opmode == NL80211_IFTYPE_ADHOC) {
2075 if (ath5k_hw_hasveol(ah))
2076 ath5k_beacon_send(sc);
2077 } else
2078 ath5k_beacon_update_timers(sc, -1);
2079 } else {
2118 ath5k_hw_stop_tx_dma(sc->ah, sc->bhalq);
2080 ath5k_hw_stop_beacon_queue(sc->ah, sc->bhalq);
2119 }
2120
2121 ath5k_hw_set_imr(ah, sc->imask);
2122 mmiowb();
2123 spin_unlock_irqrestore(&sc->block, flags);
2124}
2125
2126static void ath5k_tasklet_beacon(unsigned long data)

--- 45 unchanged lines hidden (view full) ---

2172 msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL);
2173 tasklet_schedule(&ah->ah_sc->calib);
2174 }
2175 /* we could use SWI to generate enough interrupts to meet our
2176 * calibration interval requirements, if necessary:
2177 * AR5K_REG_ENABLE_BITS(ah, AR5K_CR, AR5K_CR_SWI); */
2178}
2179
2081 }
2082
2083 ath5k_hw_set_imr(ah, sc->imask);
2084 mmiowb();
2085 spin_unlock_irqrestore(&sc->block, flags);
2086}
2087
2088static void ath5k_tasklet_beacon(unsigned long data)

--- 45 unchanged lines hidden (view full) ---

2134 msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL);
2135 tasklet_schedule(&ah->ah_sc->calib);
2136 }
2137 /* we could use SWI to generate enough interrupts to meet our
2138 * calibration interval requirements, if necessary:
2139 * AR5K_REG_ENABLE_BITS(ah, AR5K_CR, AR5K_CR_SWI); */
2140}
2141
2180static irqreturn_t
2142irqreturn_t
2181ath5k_intr(int irq, void *dev_id)
2182{
2183 struct ath5k_softc *sc = dev_id;
2184 struct ath5k_hw *ah = sc->ah;
2185 enum ath5k_int status;
2186 unsigned int counter = 1000;
2187
2188 if (unlikely(test_bit(ATH_STAT_INVALID, sc->status) ||
2143ath5k_intr(int irq, void *dev_id)
2144{
2145 struct ath5k_softc *sc = dev_id;
2146 struct ath5k_hw *ah = sc->ah;
2147 enum ath5k_int status;
2148 unsigned int counter = 1000;
2149
2150 if (unlikely(test_bit(ATH_STAT_INVALID, sc->status) ||
2189 !ath5k_hw_is_intr_pending(ah)))
2151 ((ath5k_get_bus_type(ah) != ATH_AHB) &&
2152 !ath5k_hw_is_intr_pending(ah))))
2190 return IRQ_NONE;
2191
2192 do {
2193 ath5k_hw_get_isr(ah, &status); /* NB: clears IRQ too */
2194 ATH5K_DBG(sc, ATH5K_DEBUG_INTR, "status 0x%x/0x%x\n",
2195 status, sc->imask);
2196 if (unlikely(status & AR5K_INT_FATAL)) {
2197 /*

--- 49 unchanged lines hidden (view full) ---

2247 sc->stats.mib_intr++;
2248 ath5k_hw_update_mib_counters(ah);
2249 ath5k_ani_mib_intr(ah);
2250 }
2251 if (status & AR5K_INT_GPIO)
2252 tasklet_schedule(&sc->rf_kill.toggleq);
2253
2254 }
2153 return IRQ_NONE;
2154
2155 do {
2156 ath5k_hw_get_isr(ah, &status); /* NB: clears IRQ too */
2157 ATH5K_DBG(sc, ATH5K_DEBUG_INTR, "status 0x%x/0x%x\n",
2158 status, sc->imask);
2159 if (unlikely(status & AR5K_INT_FATAL)) {
2160 /*

--- 49 unchanged lines hidden (view full) ---

2210 sc->stats.mib_intr++;
2211 ath5k_hw_update_mib_counters(ah);
2212 ath5k_ani_mib_intr(ah);
2213 }
2214 if (status & AR5K_INT_GPIO)
2215 tasklet_schedule(&sc->rf_kill.toggleq);
2216
2217 }
2218
2219 if (ath5k_get_bus_type(ah) == ATH_AHB)
2220 break;
2221
2255 } while (ath5k_hw_is_intr_pending(ah) && --counter > 0);
2256
2257 if (unlikely(!counter))
2258 ATH5K_WARN(sc, "too many interrupts, giving up for now\n");
2259
2260 ath5k_intr_calibration_poll(ah);
2261
2262 return IRQ_HANDLED;

--- 83 unchanged lines hidden (view full) ---

2346 }
2347 spin_unlock_bh(&txq->lock);
2348 }
2349 }
2350
2351 if (needreset) {
2352 ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
2353 "TX queues stuck, resetting\n");
2222 } while (ath5k_hw_is_intr_pending(ah) && --counter > 0);
2223
2224 if (unlikely(!counter))
2225 ATH5K_WARN(sc, "too many interrupts, giving up for now\n");
2226
2227 ath5k_intr_calibration_poll(ah);
2228
2229 return IRQ_HANDLED;

--- 83 unchanged lines hidden (view full) ---

2313 }
2314 spin_unlock_bh(&txq->lock);
2315 }
2316 }
2317
2318 if (needreset) {
2319 ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
2320 "TX queues stuck, resetting\n");
2354 ath5k_reset(sc, sc->curchan);
2321 ath5k_reset(sc, NULL, true);
2355 }
2356
2357 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
2358 msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT));
2359}
2360
2361
2362/*************************\
2363* Initialization routines *
2364\*************************/
2365
2322 }
2323
2324 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
2325 msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT));
2326}
2327
2328
2329/*************************\
2330* Initialization routines *
2331\*************************/
2332
2333int
2334ath5k_init_softc(struct ath5k_softc *sc, const struct ath_bus_ops *bus_ops)
2335{
2336 struct ieee80211_hw *hw = sc->hw;
2337 struct ath_common *common;
2338 int ret;
2339 int csz;
2340
2341 /* Initialize driver private data */
2342 SET_IEEE80211_DEV(hw, sc->dev);
2343 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
2344 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
2345 IEEE80211_HW_SIGNAL_DBM |
2346 IEEE80211_HW_REPORTS_TX_ACK_STATUS;
2347
2348 hw->wiphy->interface_modes =
2349 BIT(NL80211_IFTYPE_AP) |
2350 BIT(NL80211_IFTYPE_STATION) |
2351 BIT(NL80211_IFTYPE_ADHOC) |
2352 BIT(NL80211_IFTYPE_MESH_POINT);
2353
2354 /* both antennas can be configured as RX or TX */
2355 hw->wiphy->available_antennas_tx = 0x3;
2356 hw->wiphy->available_antennas_rx = 0x3;
2357
2358 hw->extra_tx_headroom = 2;
2359 hw->channel_change_time = 5000;
2360
2361 /*
2362 * Mark the device as detached to avoid processing
2363 * interrupts until setup is complete.
2364 */
2365 __set_bit(ATH_STAT_INVALID, sc->status);
2366
2367 sc->opmode = NL80211_IFTYPE_STATION;
2368 sc->bintval = 1000;
2369 mutex_init(&sc->lock);
2370 spin_lock_init(&sc->rxbuflock);
2371 spin_lock_init(&sc->txbuflock);
2372 spin_lock_init(&sc->block);
2373
2374
2375 /* Setup interrupt handler */
2376 ret = request_irq(sc->irq, ath5k_intr, IRQF_SHARED, "ath", sc);
2377 if (ret) {
2378 ATH5K_ERR(sc, "request_irq failed\n");
2379 goto err;
2380 }
2381
2382 /* If we passed the test, malloc an ath5k_hw struct */
2383 sc->ah = kzalloc(sizeof(struct ath5k_hw), GFP_KERNEL);
2384 if (!sc->ah) {
2385 ret = -ENOMEM;
2386 ATH5K_ERR(sc, "out of memory\n");
2387 goto err_irq;
2388 }
2389
2390 sc->ah->ah_sc = sc;
2391 sc->ah->ah_iobase = sc->iobase;
2392 common = ath5k_hw_common(sc->ah);
2393 common->ops = &ath5k_common_ops;
2394 common->bus_ops = bus_ops;
2395 common->ah = sc->ah;
2396 common->hw = hw;
2397 common->priv = sc;
2398
2399 /*
2400 * Cache line size is used to size and align various
2401 * structures used to communicate with the hardware.
2402 */
2403 ath5k_read_cachesize(common, &csz);
2404 common->cachelsz = csz << 2; /* convert to bytes */
2405
2406 spin_lock_init(&common->cc_lock);
2407
2408 /* Initialize device */
2409 ret = ath5k_hw_init(sc);
2410 if (ret)
2411 goto err_free_ah;
2412
2413 /* set up multi-rate retry capabilities */
2414 if (sc->ah->ah_version == AR5K_AR5212) {
2415 hw->max_rates = 4;
2416 hw->max_rate_tries = 11;
2417 }
2418
2419 hw->vif_data_size = sizeof(struct ath5k_vif);
2420
2421 /* Finish private driver data initialization */
2422 ret = ath5k_init(hw);
2423 if (ret)
2424 goto err_ah;
2425
2426 ATH5K_INFO(sc, "Atheros AR%s chip found (MAC: 0x%x, PHY: 0x%x)\n",
2427 ath5k_chip_name(AR5K_VERSION_MAC, sc->ah->ah_mac_srev),
2428 sc->ah->ah_mac_srev,
2429 sc->ah->ah_phy_revision);
2430
2431 if (!sc->ah->ah_single_chip) {
2432 /* Single chip radio (!RF5111) */
2433 if (sc->ah->ah_radio_5ghz_revision &&
2434 !sc->ah->ah_radio_2ghz_revision) {
2435 /* No 5GHz support -> report 2GHz radio */
2436 if (!test_bit(AR5K_MODE_11A,
2437 sc->ah->ah_capabilities.cap_mode)) {
2438 ATH5K_INFO(sc, "RF%s 2GHz radio found (0x%x)\n",
2439 ath5k_chip_name(AR5K_VERSION_RAD,
2440 sc->ah->ah_radio_5ghz_revision),
2441 sc->ah->ah_radio_5ghz_revision);
2442 /* No 2GHz support (5110 and some
2443 * 5Ghz only cards) -> report 5Ghz radio */
2444 } else if (!test_bit(AR5K_MODE_11B,
2445 sc->ah->ah_capabilities.cap_mode)) {
2446 ATH5K_INFO(sc, "RF%s 5GHz radio found (0x%x)\n",
2447 ath5k_chip_name(AR5K_VERSION_RAD,
2448 sc->ah->ah_radio_5ghz_revision),
2449 sc->ah->ah_radio_5ghz_revision);
2450 /* Multiband radio */
2451 } else {
2452 ATH5K_INFO(sc, "RF%s multiband radio found"
2453 " (0x%x)\n",
2454 ath5k_chip_name(AR5K_VERSION_RAD,
2455 sc->ah->ah_radio_5ghz_revision),
2456 sc->ah->ah_radio_5ghz_revision);
2457 }
2458 }
2459 /* Multi chip radio (RF5111 - RF2111) ->
2460 * report both 2GHz/5GHz radios */
2461 else if (sc->ah->ah_radio_5ghz_revision &&
2462 sc->ah->ah_radio_2ghz_revision){
2463 ATH5K_INFO(sc, "RF%s 5GHz radio found (0x%x)\n",
2464 ath5k_chip_name(AR5K_VERSION_RAD,
2465 sc->ah->ah_radio_5ghz_revision),
2466 sc->ah->ah_radio_5ghz_revision);
2467 ATH5K_INFO(sc, "RF%s 2GHz radio found (0x%x)\n",
2468 ath5k_chip_name(AR5K_VERSION_RAD,
2469 sc->ah->ah_radio_2ghz_revision),
2470 sc->ah->ah_radio_2ghz_revision);
2471 }
2472 }
2473
2474 ath5k_debug_init_device(sc);
2475
2476 /* ready to process interrupts */
2477 __clear_bit(ATH_STAT_INVALID, sc->status);
2478
2479 return 0;
2480err_ah:
2481 ath5k_hw_deinit(sc->ah);
2482err_free_ah:
2483 kfree(sc->ah);
2484err_irq:
2485 free_irq(sc->irq, sc);
2486err:
2487 return ret;
2488}
2489
2366static int
2367ath5k_stop_locked(struct ath5k_softc *sc)
2368{
2369 struct ath5k_hw *ah = sc->ah;
2370
2371 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "invalid %u\n",
2372 test_bit(ATH_STAT_INVALID, sc->status));
2373

--- 12 unchanged lines hidden (view full) ---

2386 * Note that some of this work is not possible if the
2387 * hardware is gone (invalid).
2388 */
2389 ieee80211_stop_queues(sc->hw);
2390
2391 if (!test_bit(ATH_STAT_INVALID, sc->status)) {
2392 ath5k_led_off(sc);
2393 ath5k_hw_set_imr(ah, 0);
2490static int
2491ath5k_stop_locked(struct ath5k_softc *sc)
2492{
2493 struct ath5k_hw *ah = sc->ah;
2494
2495 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "invalid %u\n",
2496 test_bit(ATH_STAT_INVALID, sc->status));
2497

--- 12 unchanged lines hidden (view full) ---

2510 * Note that some of this work is not possible if the
2511 * hardware is gone (invalid).
2512 */
2513 ieee80211_stop_queues(sc->hw);
2514
2515 if (!test_bit(ATH_STAT_INVALID, sc->status)) {
2516 ath5k_led_off(sc);
2517 ath5k_hw_set_imr(ah, 0);
2394 synchronize_irq(sc->pdev->irq);
2395 }
2396 ath5k_txq_cleanup(sc);
2397 if (!test_bit(ATH_STAT_INVALID, sc->status)) {
2518 synchronize_irq(sc->irq);
2398 ath5k_rx_stop(sc);
2519 ath5k_rx_stop(sc);
2520 ath5k_hw_dma_stop(ah);
2521 ath5k_drain_tx_buffs(sc);
2399 ath5k_hw_phy_disable(ah);
2400 }
2401
2402 return 0;
2403}
2404
2522 ath5k_hw_phy_disable(ah);
2523 }
2524
2525 return 0;
2526}
2527
2405static int
2406ath5k_init(struct ath5k_softc *sc)
2528int
2529ath5k_init_hw(struct ath5k_softc *sc)
2407{
2408 struct ath5k_hw *ah = sc->ah;
2409 struct ath_common *common = ath5k_hw_common(ah);
2410 int ret, i;
2411
2412 mutex_lock(&sc->lock);
2413
2414 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "mode %d\n", sc->opmode);

--- 12 unchanged lines hidden (view full) ---

2427 * and then setup of the interrupt mask.
2428 */
2429 sc->curchan = sc->hw->conf.channel;
2430 sc->curband = &sc->sbands[sc->curchan->band];
2431 sc->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL |
2432 AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL |
2433 AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_MIB;
2434
2530{
2531 struct ath5k_hw *ah = sc->ah;
2532 struct ath_common *common = ath5k_hw_common(ah);
2533 int ret, i;
2534
2535 mutex_lock(&sc->lock);
2536
2537 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "mode %d\n", sc->opmode);

--- 12 unchanged lines hidden (view full) ---

2550 * and then setup of the interrupt mask.
2551 */
2552 sc->curchan = sc->hw->conf.channel;
2553 sc->curband = &sc->sbands[sc->curchan->band];
2554 sc->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL |
2555 AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL |
2556 AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_MIB;
2557
2435 ret = ath5k_reset(sc, NULL);
2558 ret = ath5k_reset(sc, NULL, false);
2436 if (ret)
2437 goto done;
2438
2439 ath5k_rfkill_hw_start(ah);
2440
2441 /*
2442 * Reset the key cache since some parts do not reset the
2443 * contents on initial power up or resume from suspend.
2444 */
2445 for (i = 0; i < common->keymax; i++)
2446 ath_hw_keyreset(common, (u16) i);
2447
2559 if (ret)
2560 goto done;
2561
2562 ath5k_rfkill_hw_start(ah);
2563
2564 /*
2565 * Reset the key cache since some parts do not reset the
2566 * contents on initial power up or resume from suspend.
2567 */
2568 for (i = 0; i < common->keymax; i++)
2569 ath_hw_keyreset(common, (u16) i);
2570
2448 ath5k_hw_set_ack_bitrate_high(ah, true);
2571 /* Use higher rates for acks instead of base
2572 * rate */
2573 ah->ah_ack_bitrate_high = true;
2449
2450 for (i = 0; i < ARRAY_SIZE(sc->bslot); i++)
2451 sc->bslot[i] = NULL;
2452
2453 ret = 0;
2454done:
2455 mmiowb();
2456 mutex_unlock(&sc->lock);

--- 14 unchanged lines hidden (view full) ---

2471}
2472
2473/*
2474 * Stop the device, grabbing the top-level lock to protect
2475 * against concurrent entry through ath5k_init (which can happen
2476 * if another thread does a system call and the thread doing the
2477 * stop is preempted).
2478 */
2574
2575 for (i = 0; i < ARRAY_SIZE(sc->bslot); i++)
2576 sc->bslot[i] = NULL;
2577
2578 ret = 0;
2579done:
2580 mmiowb();
2581 mutex_unlock(&sc->lock);

--- 14 unchanged lines hidden (view full) ---

2596}
2597
2598/*
2599 * Stop the device, grabbing the top-level lock to protect
2600 * against concurrent entry through ath5k_init (which can happen
2601 * if another thread does a system call and the thread doing the
2602 * stop is preempted).
2603 */
2479static int
2604int
2480ath5k_stop_hw(struct ath5k_softc *sc)
2481{
2482 int ret;
2483
2484 mutex_lock(&sc->lock);
2485 ret = ath5k_stop_locked(sc);
2486 if (ret == 0 && !test_bit(ATH_STAT_INVALID, sc->status)) {
2487 /*

--- 36 unchanged lines hidden (view full) ---

2524
2525/*
2526 * Reset the hardware. If chan is not NULL, then also pause rx/tx
2527 * and change to the given channel.
2528 *
2529 * This should be called with sc->lock.
2530 */
2531static int
2605ath5k_stop_hw(struct ath5k_softc *sc)
2606{
2607 int ret;
2608
2609 mutex_lock(&sc->lock);
2610 ret = ath5k_stop_locked(sc);
2611 if (ret == 0 && !test_bit(ATH_STAT_INVALID, sc->status)) {
2612 /*

--- 36 unchanged lines hidden (view full) ---

2649
2650/*
2651 * Reset the hardware. If chan is not NULL, then also pause rx/tx
2652 * and change to the given channel.
2653 *
2654 * This should be called with sc->lock.
2655 */
2656static int
2532ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan)
2657ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan,
2658 bool skip_pcu)
2533{
2534 struct ath5k_hw *ah = sc->ah;
2659{
2660 struct ath5k_hw *ah = sc->ah;
2535 int ret;
2661 struct ath_common *common = ath5k_hw_common(ah);
2662 int ret, ani_mode;
2536
2537 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "resetting\n");
2538
2539 ath5k_hw_set_imr(ah, 0);
2663
2664 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "resetting\n");
2665
2666 ath5k_hw_set_imr(ah, 0);
2540 synchronize_irq(sc->pdev->irq);
2667 synchronize_irq(sc->irq);
2541 stop_tasklets(sc);
2542
2668 stop_tasklets(sc);
2669
2543 if (chan) {
2544 ath5k_txq_cleanup(sc);
2545 ath5k_rx_stop(sc);
2670 /* Save ani mode and disable ANI durring
2671 * reset. If we don't we might get false
2672 * PHY error interrupts. */
2673 ani_mode = ah->ah_sc->ani_state.ani_mode;
2674 ath5k_ani_init(ah, ATH5K_ANI_MODE_OFF);
2546
2675
2676 /* We are going to empty hw queues
2677 * so we should also free any remaining
2678 * tx buffers */
2679 ath5k_drain_tx_buffs(sc);
2680 if (chan) {
2547 sc->curchan = chan;
2548 sc->curband = &sc->sbands[chan->band];
2549 }
2681 sc->curchan = chan;
2682 sc->curband = &sc->sbands[chan->band];
2683 }
2550 ret = ath5k_hw_reset(ah, sc->opmode, sc->curchan, chan != NULL);
2684 ret = ath5k_hw_reset(ah, sc->opmode, sc->curchan, chan != NULL,
2685 skip_pcu);
2551 if (ret) {
2552 ATH5K_ERR(sc, "can't reset hardware (%d)\n", ret);
2553 goto err;
2554 }
2555
2556 ret = ath5k_rx_start(sc);
2557 if (ret) {
2558 ATH5K_ERR(sc, "can't start recv logic\n");
2559 goto err;
2560 }
2561
2686 if (ret) {
2687 ATH5K_ERR(sc, "can't reset hardware (%d)\n", ret);
2688 goto err;
2689 }
2690
2691 ret = ath5k_rx_start(sc);
2692 if (ret) {
2693 ATH5K_ERR(sc, "can't start recv logic\n");
2694 goto err;
2695 }
2696
2562 ath5k_ani_init(ah, ah->ah_sc->ani_state.ani_mode);
2697 ath5k_ani_init(ah, ani_mode);
2563
2564 ah->ah_cal_next_full = jiffies;
2565 ah->ah_cal_next_ani = jiffies;
2566 ah->ah_cal_next_nf = jiffies;
2698
2699 ah->ah_cal_next_full = jiffies;
2700 ah->ah_cal_next_ani = jiffies;
2701 ah->ah_cal_next_nf = jiffies;
2702 ewma_init(&ah->ah_beacon_rssi_avg, 1024, 8);
2567
2703
2704 /* clear survey data and cycle counters */
2705 memset(&sc->survey, 0, sizeof(sc->survey));
2706 spin_lock(&common->cc_lock);
2707 ath_hw_cycle_counters_update(common);
2708 memset(&common->cc_survey, 0, sizeof(common->cc_survey));
2709 memset(&common->cc_ani, 0, sizeof(common->cc_ani));
2710 spin_unlock(&common->cc_lock);
2711
2568 /*
2569 * Change channels and update the h/w rate map if we're switching;
2570 * e.g. 11a to 11b/g.
2571 *
2572 * We may be doing a reset in response to an ioctl that changes the
2573 * channel so update any state that might change as a result.
2574 *
2575 * XXX needed?

--- 11 unchanged lines hidden (view full) ---

2587}
2588
2589static void ath5k_reset_work(struct work_struct *work)
2590{
2591 struct ath5k_softc *sc = container_of(work, struct ath5k_softc,
2592 reset_work);
2593
2594 mutex_lock(&sc->lock);
2712 /*
2713 * Change channels and update the h/w rate map if we're switching;
2714 * e.g. 11a to 11b/g.
2715 *
2716 * We may be doing a reset in response to an ioctl that changes the
2717 * channel so update any state that might change as a result.
2718 *
2719 * XXX needed?

--- 11 unchanged lines hidden (view full) ---

2731}
2732
2733static void ath5k_reset_work(struct work_struct *work)
2734{
2735 struct ath5k_softc *sc = container_of(work, struct ath5k_softc,
2736 reset_work);
2737
2738 mutex_lock(&sc->lock);
2595 ath5k_reset(sc, sc->curchan);
2739 ath5k_reset(sc, NULL, true);
2596 mutex_unlock(&sc->lock);
2597}
2598
2599static int
2740 mutex_unlock(&sc->lock);
2741}
2742
2743static int
2600ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw)
2744ath5k_init(struct ieee80211_hw *hw)
2601{
2745{
2746
2602 struct ath5k_softc *sc = hw->priv;
2603 struct ath5k_hw *ah = sc->ah;
2604 struct ath_regulatory *regulatory = ath5k_hw_regulatory(ah);
2605 struct ath5k_txq *txq;
2606 u8 mac[ETH_ALEN] = {};
2607 int ret;
2608
2747 struct ath5k_softc *sc = hw->priv;
2748 struct ath5k_hw *ah = sc->ah;
2749 struct ath_regulatory *regulatory = ath5k_hw_regulatory(ah);
2750 struct ath5k_txq *txq;
2751 u8 mac[ETH_ALEN] = {};
2752 int ret;
2753
2609 ATH5K_DBG(sc, ATH5K_DEBUG_ANY, "devid 0x%x\n", pdev->device);
2610
2611 /*
2612 * Check if the MAC has multi-rate retry support.
2613 * We do this by trying to setup a fake extended
2614 * descriptor. MACs that don't have support will
2615 * return false w/o doing anything. MACs that do
2616 * support it will return true w/o doing anything.
2617 */

--- 20 unchanged lines hidden (view full) ---

2638 if (test_bit(AR5K_MODE_11A, ah->ah_modes))
2639 ath5k_setcurmode(sc, AR5K_MODE_11A);
2640 else
2641 ath5k_setcurmode(sc, AR5K_MODE_11B);
2642
2643 /*
2644 * Allocate tx+rx descriptors and populate the lists.
2645 */
2754
2755 /*
2756 * Check if the MAC has multi-rate retry support.
2757 * We do this by trying to setup a fake extended
2758 * descriptor. MACs that don't have support will
2759 * return false w/o doing anything. MACs that do
2760 * support it will return true w/o doing anything.
2761 */

--- 20 unchanged lines hidden (view full) ---

2782 if (test_bit(AR5K_MODE_11A, ah->ah_modes))
2783 ath5k_setcurmode(sc, AR5K_MODE_11A);
2784 else
2785 ath5k_setcurmode(sc, AR5K_MODE_11B);
2786
2787 /*
2788 * Allocate tx+rx descriptors and populate the lists.
2789 */
2646 ret = ath5k_desc_alloc(sc, pdev);
2790 ret = ath5k_desc_alloc(sc);
2647 if (ret) {
2648 ATH5K_ERR(sc, "can't allocate descriptors\n");
2649 goto err;
2650 }
2651
2652 /*
2653 * Allocate hardware transmit queues: one queue for
2654 * beacon frames and one data queue for each QoS

--- 8 unchanged lines hidden (view full) ---

2663 sc->bhalq = ret;
2664 sc->cabq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_CAB, 0);
2665 if (IS_ERR(sc->cabq)) {
2666 ATH5K_ERR(sc, "can't setup cab queue\n");
2667 ret = PTR_ERR(sc->cabq);
2668 goto err_bhal;
2669 }
2670
2791 if (ret) {
2792 ATH5K_ERR(sc, "can't allocate descriptors\n");
2793 goto err;
2794 }
2795
2796 /*
2797 * Allocate hardware transmit queues: one queue for
2798 * beacon frames and one data queue for each QoS

--- 8 unchanged lines hidden (view full) ---

2807 sc->bhalq = ret;
2808 sc->cabq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_CAB, 0);
2809 if (IS_ERR(sc->cabq)) {
2810 ATH5K_ERR(sc, "can't setup cab queue\n");
2811 ret = PTR_ERR(sc->cabq);
2812 goto err_bhal;
2813 }
2814
2671 /* This order matches mac80211's queue priority, so we can
2672 * directly use the mac80211 queue number without any mapping */
2673 txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_VO);
2674 if (IS_ERR(txq)) {
2675 ATH5K_ERR(sc, "can't setup xmit queue\n");
2676 ret = PTR_ERR(txq);
2677 goto err_queues;
2815 /* 5211 and 5212 usually support 10 queues but we better rely on the
2816 * capability information */
2817 if (ah->ah_capabilities.cap_queues.q_tx_num >= 6) {
2818 /* This order matches mac80211's queue priority, so we can
2819 * directly use the mac80211 queue number without any mapping */
2820 txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_VO);
2821 if (IS_ERR(txq)) {
2822 ATH5K_ERR(sc, "can't setup xmit queue\n");
2823 ret = PTR_ERR(txq);
2824 goto err_queues;
2825 }
2826 txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_VI);
2827 if (IS_ERR(txq)) {
2828 ATH5K_ERR(sc, "can't setup xmit queue\n");
2829 ret = PTR_ERR(txq);
2830 goto err_queues;
2831 }
2832 txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BE);
2833 if (IS_ERR(txq)) {
2834 ATH5K_ERR(sc, "can't setup xmit queue\n");
2835 ret = PTR_ERR(txq);
2836 goto err_queues;
2837 }
2838 txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BK);
2839 if (IS_ERR(txq)) {
2840 ATH5K_ERR(sc, "can't setup xmit queue\n");
2841 ret = PTR_ERR(txq);
2842 goto err_queues;
2843 }
2844 hw->queues = 4;
2845 } else {
2846 /* older hardware (5210) can only support one data queue */
2847 txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BE);
2848 if (IS_ERR(txq)) {
2849 ATH5K_ERR(sc, "can't setup xmit queue\n");
2850 ret = PTR_ERR(txq);
2851 goto err_queues;
2852 }
2853 hw->queues = 1;
2678 }
2854 }
2679 txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_VI);
2680 if (IS_ERR(txq)) {
2681 ATH5K_ERR(sc, "can't setup xmit queue\n");
2682 ret = PTR_ERR(txq);
2683 goto err_queues;
2684 }
2685 txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BE);
2686 if (IS_ERR(txq)) {
2687 ATH5K_ERR(sc, "can't setup xmit queue\n");
2688 ret = PTR_ERR(txq);
2689 goto err_queues;
2690 }
2691 txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BK);
2692 if (IS_ERR(txq)) {
2693 ATH5K_ERR(sc, "can't setup xmit queue\n");
2694 ret = PTR_ERR(txq);
2695 goto err_queues;
2696 }
2697 hw->queues = 4;
2698
2699 tasklet_init(&sc->rxtq, ath5k_tasklet_rx, (unsigned long)sc);
2700 tasklet_init(&sc->txtq, ath5k_tasklet_tx, (unsigned long)sc);
2701 tasklet_init(&sc->calib, ath5k_tasklet_calibrate, (unsigned long)sc);
2702 tasklet_init(&sc->beacontq, ath5k_tasklet_beacon, (unsigned long)sc);
2703 tasklet_init(&sc->ani_tasklet, ath5k_tasklet_ani, (unsigned long)sc);
2704
2705 INIT_WORK(&sc->reset_work, ath5k_reset_work);
2706 INIT_DELAYED_WORK(&sc->tx_complete_work, ath5k_tx_complete_poll_work);
2707
2708 ret = ath5k_eeprom_read_mac(ah, mac);
2709 if (ret) {
2855
2856 tasklet_init(&sc->rxtq, ath5k_tasklet_rx, (unsigned long)sc);
2857 tasklet_init(&sc->txtq, ath5k_tasklet_tx, (unsigned long)sc);
2858 tasklet_init(&sc->calib, ath5k_tasklet_calibrate, (unsigned long)sc);
2859 tasklet_init(&sc->beacontq, ath5k_tasklet_beacon, (unsigned long)sc);
2860 tasklet_init(&sc->ani_tasklet, ath5k_tasklet_ani, (unsigned long)sc);
2861
2862 INIT_WORK(&sc->reset_work, ath5k_reset_work);
2863 INIT_DELAYED_WORK(&sc->tx_complete_work, ath5k_tx_complete_poll_work);
2864
2865 ret = ath5k_eeprom_read_mac(ah, mac);
2866 if (ret) {
2710 ATH5K_ERR(sc, "unable to read address from EEPROM: 0x%04x\n",
2711 sc->pdev->device);
2867 ATH5K_ERR(sc, "unable to read address from EEPROM\n");
2712 goto err_queues;
2713 }
2714
2715 SET_IEEE80211_PERM_ADDR(hw, mac);
2716 memcpy(&sc->lladdr, mac, ETH_ALEN);
2717 /* All MAC address bits matter for ACKs */
2718 ath5k_update_bssid_mask_and_opmode(sc, NULL);
2719

--- 18 unchanged lines hidden (view full) ---

2738 ath5k_sysfs_register(sc);
2739
2740 return 0;
2741err_queues:
2742 ath5k_txq_release(sc);
2743err_bhal:
2744 ath5k_hw_release_tx_queue(ah, sc->bhalq);
2745err_desc:
2868 goto err_queues;
2869 }
2870
2871 SET_IEEE80211_PERM_ADDR(hw, mac);
2872 memcpy(&sc->lladdr, mac, ETH_ALEN);
2873 /* All MAC address bits matter for ACKs */
2874 ath5k_update_bssid_mask_and_opmode(sc, NULL);
2875

--- 18 unchanged lines hidden (view full) ---

2894 ath5k_sysfs_register(sc);
2895
2896 return 0;
2897err_queues:
2898 ath5k_txq_release(sc);
2899err_bhal:
2900 ath5k_hw_release_tx_queue(ah, sc->bhalq);
2901err_desc:
2746 ath5k_desc_free(sc, pdev);
2902 ath5k_desc_free(sc);
2747err:
2748 return ret;
2749}
2750
2903err:
2904 return ret;
2905}
2906
2751static void
2752ath5k_detach(struct pci_dev *pdev, struct ieee80211_hw *hw)
2907void
2908ath5k_deinit_softc(struct ath5k_softc *sc)
2753{
2909{
2754 struct ath5k_softc *sc = hw->priv;
2910 struct ieee80211_hw *hw = sc->hw;
2755
2756 /*
2757 * NB: the order of these is important:
2758 * o call the 802.11 layer before detaching ath5k_hw to
2759 * ensure callbacks into the driver to delete global
2760 * key cache entries can be handled
2761 * o reclaim the tx queue data structures after calling
2762 * the 802.11 layer as we'll get called back to reclaim
2763 * node state and potentially want to use them
2764 * o to cleanup the tx queues the hal is called, so detach
2765 * it last
2766 * XXX: ??? detach ath5k_hw ???
2767 * Other than that, it's straightforward...
2768 */
2911
2912 /*
2913 * NB: the order of these is important:
2914 * o call the 802.11 layer before detaching ath5k_hw to
2915 * ensure callbacks into the driver to delete global
2916 * key cache entries can be handled
2917 * o reclaim the tx queue data structures after calling
2918 * the 802.11 layer as we'll get called back to reclaim
2919 * node state and potentially want to use them
2920 * o to cleanup the tx queues the hal is called, so detach
2921 * it last
2922 * XXX: ??? detach ath5k_hw ???
2923 * Other than that, it's straightforward...
2924 */
2925 ath5k_debug_finish_device(sc);
2769 ieee80211_unregister_hw(hw);
2926 ieee80211_unregister_hw(hw);
2770 ath5k_desc_free(sc, pdev);
2927 ath5k_desc_free(sc);
2771 ath5k_txq_release(sc);
2772 ath5k_hw_release_tx_queue(sc->ah, sc->bhalq);
2773 ath5k_unregister_leds(sc);
2774
2775 ath5k_sysfs_unregister(sc);
2776 /*
2777 * NB: can't reclaim these until after ieee80211_ifdetach
2778 * returns because we'll get called back to reclaim node
2779 * state and potentially want to use them.
2780 */
2928 ath5k_txq_release(sc);
2929 ath5k_hw_release_tx_queue(sc->ah, sc->bhalq);
2930 ath5k_unregister_leds(sc);
2931
2932 ath5k_sysfs_unregister(sc);
2933 /*
2934 * NB: can't reclaim these until after ieee80211_ifdetach
2935 * returns because we'll get called back to reclaim node
2936 * state and potentially want to use them.
2937 */
2938 ath5k_hw_deinit(sc->ah);
2939 free_irq(sc->irq, sc);
2781}
2782
2940}
2941
2783/********************\
2784* Mac80211 functions *
2785\********************/
2786
2787static int
2788ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2942bool
2943ath_any_vif_assoc(struct ath5k_softc *sc)
2789{
2944{
2790 struct ath5k_softc *sc = hw->priv;
2791 u16 qnum = skb_get_queue_mapping(skb);
2792
2793 if (WARN_ON(qnum >= sc->ah->ah_capabilities.cap_queues.q_tx_num)) {
2794 dev_kfree_skb_any(skb);
2795 return 0;
2796 }
2797
2798 return ath5k_tx_queue(hw, skb, &sc->txqs[qnum]);
2799}
2800
2801static int ath5k_start(struct ieee80211_hw *hw)
2802{
2803 return ath5k_init(hw->priv);
2804}
2805
2806static void ath5k_stop(struct ieee80211_hw *hw)
2807{
2808 ath5k_stop_hw(hw->priv);
2809}
2810
2811static int ath5k_add_interface(struct ieee80211_hw *hw,
2812 struct ieee80211_vif *vif)
2813{
2814 struct ath5k_softc *sc = hw->priv;
2815 int ret;
2816 struct ath5k_vif *avf = (void *)vif->drv_priv;
2817
2818 mutex_lock(&sc->lock);
2819
2820 if ((vif->type == NL80211_IFTYPE_AP ||
2821 vif->type == NL80211_IFTYPE_ADHOC)
2822 && (sc->num_ap_vifs + sc->num_adhoc_vifs) >= ATH_BCBUF) {
2823 ret = -ELNRNG;
2824 goto end;
2825 }
2826
2827 /* Don't allow other interfaces if one ad-hoc is configured.
2828 * TODO: Fix the problems with ad-hoc and multiple other interfaces.
2829 * We would need to operate the HW in ad-hoc mode to allow TSF updates
2830 * for the IBSS, but this breaks with additional AP or STA interfaces
2831 * at the moment. */
2832 if (sc->num_adhoc_vifs ||
2833 (sc->nvifs && vif->type == NL80211_IFTYPE_ADHOC)) {
2834 ATH5K_ERR(sc, "Only one single ad-hoc interface is allowed.\n");
2835 ret = -ELNRNG;
2836 goto end;
2837 }
2838
2839 switch (vif->type) {
2840 case NL80211_IFTYPE_AP:
2841 case NL80211_IFTYPE_STATION:
2842 case NL80211_IFTYPE_ADHOC:
2843 case NL80211_IFTYPE_MESH_POINT:
2844 avf->opmode = vif->type;
2845 break;
2846 default:
2847 ret = -EOPNOTSUPP;
2848 goto end;
2849 }
2850
2851 sc->nvifs++;
2852 ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "add interface mode %d\n", avf->opmode);
2853
2854 /* Assign the vap/adhoc to a beacon xmit slot. */
2855 if ((avf->opmode == NL80211_IFTYPE_AP) ||
2856 (avf->opmode == NL80211_IFTYPE_ADHOC) ||
2857 (avf->opmode == NL80211_IFTYPE_MESH_POINT)) {
2858 int slot;
2859
2860 WARN_ON(list_empty(&sc->bcbuf));
2861 avf->bbuf = list_first_entry(&sc->bcbuf, struct ath5k_buf,
2862 list);
2863 list_del(&avf->bbuf->list);
2864
2865 avf->bslot = 0;
2866 for (slot = 0; slot < ATH_BCBUF; slot++) {
2867 if (!sc->bslot[slot]) {
2868 avf->bslot = slot;
2869 break;
2870 }
2871 }
2872 BUG_ON(sc->bslot[avf->bslot] != NULL);
2873 sc->bslot[avf->bslot] = vif;
2874 if (avf->opmode == NL80211_IFTYPE_AP)
2875 sc->num_ap_vifs++;
2876 else if (avf->opmode == NL80211_IFTYPE_ADHOC)
2877 sc->num_adhoc_vifs++;
2878 }
2879
2880 /* Any MAC address is fine, all others are included through the
2881 * filter.
2882 */
2883 memcpy(&sc->lladdr, vif->addr, ETH_ALEN);
2884 ath5k_hw_set_lladdr(sc->ah, vif->addr);
2885
2886 memcpy(&avf->lladdr, vif->addr, ETH_ALEN);
2887
2888 ath5k_mode_setup(sc, vif);
2889
2890 ret = 0;
2891end:
2892 mutex_unlock(&sc->lock);
2893 return ret;
2894}
2895
2896static void
2897ath5k_remove_interface(struct ieee80211_hw *hw,
2898 struct ieee80211_vif *vif)
2899{
2900 struct ath5k_softc *sc = hw->priv;
2901 struct ath5k_vif *avf = (void *)vif->drv_priv;
2902 unsigned int i;
2903
2904 mutex_lock(&sc->lock);
2905 sc->nvifs--;
2906
2907 if (avf->bbuf) {
2908 ath5k_txbuf_free_skb(sc, avf->bbuf);
2909 list_add_tail(&avf->bbuf->list, &sc->bcbuf);
2910 for (i = 0; i < ATH_BCBUF; i++) {
2911 if (sc->bslot[i] == vif) {
2912 sc->bslot[i] = NULL;
2913 break;
2914 }
2915 }
2916 avf->bbuf = NULL;
2917 }
2918 if (avf->opmode == NL80211_IFTYPE_AP)
2919 sc->num_ap_vifs--;
2920 else if (avf->opmode == NL80211_IFTYPE_ADHOC)
2921 sc->num_adhoc_vifs--;
2922
2923 ath5k_update_bssid_mask_and_opmode(sc, NULL);
2924 mutex_unlock(&sc->lock);
2925}
2926
2927/*
2928 * TODO: Phy disable/diversity etc
2929 */
2930static int
2931ath5k_config(struct ieee80211_hw *hw, u32 changed)
2932{
2933 struct ath5k_softc *sc = hw->priv;
2934 struct ath5k_hw *ah = sc->ah;
2935 struct ieee80211_conf *conf = &hw->conf;
2936 int ret = 0;
2937
2938 mutex_lock(&sc->lock);
2939
2940 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
2941 ret = ath5k_chan_set(sc, conf->channel);
2942 if (ret < 0)
2943 goto unlock;
2944 }
2945
2946 if ((changed & IEEE80211_CONF_CHANGE_POWER) &&
2947 (sc->power_level != conf->power_level)) {
2948 sc->power_level = conf->power_level;
2949
2950 /* Half dB steps */
2951 ath5k_hw_set_txpower_limit(ah, (conf->power_level * 2));
2952 }
2953
2954 /* TODO:
2955 * 1) Move this on config_interface and handle each case
2956 * separately eg. when we have only one STA vif, use
2957 * AR5K_ANTMODE_SINGLE_AP
2958 *
2959 * 2) Allow the user to change antenna mode eg. when only
2960 * one antenna is present
2961 *
2962 * 3) Allow the user to set default/tx antenna when possible
2963 *
2964 * 4) Default mode should handle 90% of the cases, together
2965 * with fixed a/b and single AP modes we should be able to
2966 * handle 99%. Sectored modes are extreme cases and i still
2967 * haven't found a usage for them. If we decide to support them,
2968 * then we must allow the user to set how many tx antennas we
2969 * have available
2970 */
2971 ath5k_hw_set_antenna_mode(ah, ah->ah_ant_mode);
2972
2973unlock:
2974 mutex_unlock(&sc->lock);
2975 return ret;
2976}
2977
2978static u64 ath5k_prepare_multicast(struct ieee80211_hw *hw,
2979 struct netdev_hw_addr_list *mc_list)
2980{
2981 u32 mfilt[2], val;
2982 u8 pos;
2983 struct netdev_hw_addr *ha;
2984
2985 mfilt[0] = 0;
2986 mfilt[1] = 1;
2987
2988 netdev_hw_addr_list_for_each(ha, mc_list) {
2989 /* calculate XOR of eight 6-bit values */
2990 val = get_unaligned_le32(ha->addr + 0);
2991 pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
2992 val = get_unaligned_le32(ha->addr + 3);
2993 pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
2994 pos &= 0x3f;
2995 mfilt[pos / 32] |= (1 << (pos % 32));
2996 /* XXX: we might be able to just do this instead,
2997 * but not sure, needs testing, if we do use this we'd
2998 * neet to inform below to not reset the mcast */
2999 /* ath5k_hw_set_mcast_filterindex(ah,
3000 * ha->addr[5]); */
3001 }
3002
3003 return ((u64)(mfilt[1]) << 32) | mfilt[0];
3004}
3005
3006static bool ath_any_vif_assoc(struct ath5k_softc *sc)
3007{
3008 struct ath_vif_iter_data iter_data;
3009 iter_data.hw_macaddr = NULL;
3010 iter_data.any_assoc = false;
3011 iter_data.need_set_hw_addr = false;
3012 iter_data.found_active = true;
3013
3014 ieee80211_iterate_active_interfaces_atomic(sc->hw, ath_vif_iter,
3015 &iter_data);
3016 return iter_data.any_assoc;
3017}
3018
2945 struct ath_vif_iter_data iter_data;
2946 iter_data.hw_macaddr = NULL;
2947 iter_data.any_assoc = false;
2948 iter_data.need_set_hw_addr = false;
2949 iter_data.found_active = true;
2950
2951 ieee80211_iterate_active_interfaces_atomic(sc->hw, ath_vif_iter,
2952 &iter_data);
2953 return iter_data.any_assoc;
2954}
2955
3019#define SUPPORTED_FIF_FLAGS \
3020 FIF_PROMISC_IN_BSS | FIF_ALLMULTI | FIF_FCSFAIL | \
3021 FIF_PLCPFAIL | FIF_CONTROL | FIF_OTHER_BSS | \
3022 FIF_BCN_PRBRESP_PROMISC
3023/*
3024 * o always accept unicast, broadcast, and multicast traffic
3025 * o multicast traffic for all BSSIDs will be enabled if mac80211
3026 * says it should be
3027 * o maintain current state of phy ofdm or phy cck error reception.
3028 * If the hardware detects any of these type of errors then
3029 * ath5k_hw_get_rx_filter() will pass to us the respective
3030 * hardware filters to be able to receive these type of frames.
3031 * o probe request frames are accepted only when operating in
3032 * hostap, adhoc, or monitor modes
3033 * o enable promiscuous mode according to the interface state
3034 * o accept beacons:
3035 * - when operating in adhoc mode so the 802.11 layer creates
3036 * node table entries for peers,
3037 * - when operating in station mode for collecting rssi data when
3038 * the station is otherwise quiet, or
3039 * - when scanning
3040 */
3041static void ath5k_configure_filter(struct ieee80211_hw *hw,
3042 unsigned int changed_flags,
3043 unsigned int *new_flags,
3044 u64 multicast)
3045{
3046 struct ath5k_softc *sc = hw->priv;
3047 struct ath5k_hw *ah = sc->ah;
3048 u32 mfilt[2], rfilt;
3049
3050 mutex_lock(&sc->lock);
3051
3052 mfilt[0] = multicast;
3053 mfilt[1] = multicast >> 32;
3054
3055 /* Only deal with supported flags */
3056 changed_flags &= SUPPORTED_FIF_FLAGS;
3057 *new_flags &= SUPPORTED_FIF_FLAGS;
3058
3059 /* If HW detects any phy or radar errors, leave those filters on.
3060 * Also, always enable Unicast, Broadcasts and Multicast
3061 * XXX: move unicast, bssid broadcasts and multicast to mac80211 */
3062 rfilt = (ath5k_hw_get_rx_filter(ah) & (AR5K_RX_FILTER_PHYERR)) |
3063 (AR5K_RX_FILTER_UCAST | AR5K_RX_FILTER_BCAST |
3064 AR5K_RX_FILTER_MCAST);
3065
3066 if (changed_flags & (FIF_PROMISC_IN_BSS | FIF_OTHER_BSS)) {
3067 if (*new_flags & FIF_PROMISC_IN_BSS) {
3068 __set_bit(ATH_STAT_PROMISC, sc->status);
3069 } else {
3070 __clear_bit(ATH_STAT_PROMISC, sc->status);
3071 }
3072 }
3073
3074 if (test_bit(ATH_STAT_PROMISC, sc->status))
3075 rfilt |= AR5K_RX_FILTER_PROM;
3076
3077 /* Note, AR5K_RX_FILTER_MCAST is already enabled */
3078 if (*new_flags & FIF_ALLMULTI) {
3079 mfilt[0] = ~0;
3080 mfilt[1] = ~0;
3081 }
3082
3083 /* This is the best we can do */
3084 if (*new_flags & (FIF_FCSFAIL | FIF_PLCPFAIL))
3085 rfilt |= AR5K_RX_FILTER_PHYERR;
3086
3087 /* FIF_BCN_PRBRESP_PROMISC really means to enable beacons
3088 * and probes for any BSSID */
3089 if ((*new_flags & FIF_BCN_PRBRESP_PROMISC) || (sc->nvifs > 1))
3090 rfilt |= AR5K_RX_FILTER_BEACON;
3091
3092 /* FIF_CONTROL doc says that if FIF_PROMISC_IN_BSS is not
3093 * set we should only pass on control frames for this
3094 * station. This needs testing. I believe right now this
3095 * enables *all* control frames, which is OK.. but
3096 * but we should see if we can improve on granularity */
3097 if (*new_flags & FIF_CONTROL)
3098 rfilt |= AR5K_RX_FILTER_CONTROL;
3099
3100 /* Additional settings per mode -- this is per ath5k */
3101
3102 /* XXX move these to mac80211, and add a beacon IFF flag to mac80211 */
3103
3104 switch (sc->opmode) {
3105 case NL80211_IFTYPE_MESH_POINT:
3106 rfilt |= AR5K_RX_FILTER_CONTROL |
3107 AR5K_RX_FILTER_BEACON |
3108 AR5K_RX_FILTER_PROBEREQ |
3109 AR5K_RX_FILTER_PROM;
3110 break;
3111 case NL80211_IFTYPE_AP:
3112 case NL80211_IFTYPE_ADHOC:
3113 rfilt |= AR5K_RX_FILTER_PROBEREQ |
3114 AR5K_RX_FILTER_BEACON;
3115 break;
3116 case NL80211_IFTYPE_STATION:
3117 if (sc->assoc)
3118 rfilt |= AR5K_RX_FILTER_BEACON;
3119 default:
3120 break;
3121 }
3122
3123 /* Set filters */
3124 ath5k_hw_set_rx_filter(ah, rfilt);
3125
3126 /* Set multicast bits */
3127 ath5k_hw_set_mcast_filter(ah, mfilt[0], mfilt[1]);
3128 /* Set the cached hw filter flags, this will later actually
3129 * be set in HW */
3130 sc->filter_flags = rfilt;
3131
3132 mutex_unlock(&sc->lock);
3133}
3134
3135static int
3136ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3137 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
3138 struct ieee80211_key_conf *key)
3139{
3140 struct ath5k_softc *sc = hw->priv;
3141 struct ath5k_hw *ah = sc->ah;
3142 struct ath_common *common = ath5k_hw_common(ah);
3143 int ret = 0;
3144
3145 if (modparam_nohwcrypt)
3146 return -EOPNOTSUPP;
3147
3148 switch (key->cipher) {
3149 case WLAN_CIPHER_SUITE_WEP40:
3150 case WLAN_CIPHER_SUITE_WEP104:
3151 case WLAN_CIPHER_SUITE_TKIP:
3152 break;
3153 case WLAN_CIPHER_SUITE_CCMP:
3154 if (common->crypt_caps & ATH_CRYPT_CAP_CIPHER_AESCCM)
3155 break;
3156 return -EOPNOTSUPP;
3157 default:
3158 WARN_ON(1);
3159 return -EINVAL;
3160 }
3161
3162 mutex_lock(&sc->lock);
3163
3164 switch (cmd) {
3165 case SET_KEY:
3166 ret = ath_key_config(common, vif, sta, key);
3167 if (ret >= 0) {
3168 key->hw_key_idx = ret;
3169 /* push IV and Michael MIC generation to stack */
3170 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
3171 if (key->cipher == WLAN_CIPHER_SUITE_TKIP)
3172 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
3173 if (key->cipher == WLAN_CIPHER_SUITE_CCMP)
3174 key->flags |= IEEE80211_KEY_FLAG_SW_MGMT;
3175 ret = 0;
3176 }
3177 break;
3178 case DISABLE_KEY:
3179 ath_key_delete(common, key);
3180 break;
3181 default:
3182 ret = -EINVAL;
3183 }
3184
3185 mmiowb();
3186 mutex_unlock(&sc->lock);
3187 return ret;
3188}
3189
3190static int
3191ath5k_get_stats(struct ieee80211_hw *hw,
3192 struct ieee80211_low_level_stats *stats)
3193{
3194 struct ath5k_softc *sc = hw->priv;
3195
3196 /* Force update */
3197 ath5k_hw_update_mib_counters(sc->ah);
3198
3199 stats->dot11ACKFailureCount = sc->stats.ack_fail;
3200 stats->dot11RTSFailureCount = sc->stats.rts_fail;
3201 stats->dot11RTSSuccessCount = sc->stats.rts_ok;
3202 stats->dot11FCSErrorCount = sc->stats.fcs_error;
3203
3204 return 0;
3205}
3206
3207static int ath5k_get_survey(struct ieee80211_hw *hw, int idx,
3208 struct survey_info *survey)
3209{
3210 struct ath5k_softc *sc = hw->priv;
3211 struct ieee80211_conf *conf = &hw->conf;
3212
3213 if (idx != 0)
3214 return -ENOENT;
3215
3216 survey->channel = conf->channel;
3217 survey->filled = SURVEY_INFO_NOISE_DBM;
3218 survey->noise = sc->ah->ah_noise_floor;
3219
3220 return 0;
3221}
3222
3223static u64
3224ath5k_get_tsf(struct ieee80211_hw *hw)
3225{
3226 struct ath5k_softc *sc = hw->priv;
3227
3228 return ath5k_hw_get_tsf64(sc->ah);
3229}
3230
3231static void
3232ath5k_set_tsf(struct ieee80211_hw *hw, u64 tsf)
3233{
3234 struct ath5k_softc *sc = hw->priv;
3235
3236 ath5k_hw_set_tsf64(sc->ah, tsf);
3237}
3238
3239static void
3240ath5k_reset_tsf(struct ieee80211_hw *hw)
3241{
3242 struct ath5k_softc *sc = hw->priv;
3243
3244 /*
3245 * in IBSS mode we need to update the beacon timers too.
3246 * this will also reset the TSF if we call it with 0
3247 */
3248 if (sc->opmode == NL80211_IFTYPE_ADHOC)
3249 ath5k_beacon_update_timers(sc, 0);
3250 else
3251 ath5k_hw_reset_tsf(sc->ah);
3252}
3253
3254static void
2956void
3255set_beacon_filter(struct ieee80211_hw *hw, bool enable)
3256{
3257 struct ath5k_softc *sc = hw->priv;
3258 struct ath5k_hw *ah = sc->ah;
3259 u32 rfilt;
3260 rfilt = ath5k_hw_get_rx_filter(ah);
3261 if (enable)
3262 rfilt |= AR5K_RX_FILTER_BEACON;
3263 else
3264 rfilt &= ~AR5K_RX_FILTER_BEACON;
3265 ath5k_hw_set_rx_filter(ah, rfilt);
3266 sc->filter_flags = rfilt;
3267}
2957set_beacon_filter(struct ieee80211_hw *hw, bool enable)
2958{
2959 struct ath5k_softc *sc = hw->priv;
2960 struct ath5k_hw *ah = sc->ah;
2961 u32 rfilt;
2962 rfilt = ath5k_hw_get_rx_filter(ah);
2963 if (enable)
2964 rfilt |= AR5K_RX_FILTER_BEACON;
2965 else
2966 rfilt &= ~AR5K_RX_FILTER_BEACON;
2967 ath5k_hw_set_rx_filter(ah, rfilt);
2968 sc->filter_flags = rfilt;
2969}
3268
3269static void ath5k_bss_info_changed(struct ieee80211_hw *hw,
3270 struct ieee80211_vif *vif,
3271 struct ieee80211_bss_conf *bss_conf,
3272 u32 changes)
3273{
3274 struct ath5k_vif *avf = (void *)vif->drv_priv;
3275 struct ath5k_softc *sc = hw->priv;
3276 struct ath5k_hw *ah = sc->ah;
3277 struct ath_common *common = ath5k_hw_common(ah);
3278 unsigned long flags;
3279
3280 mutex_lock(&sc->lock);
3281
3282 if (changes & BSS_CHANGED_BSSID) {
3283 /* Cache for later use during resets */
3284 memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
3285 common->curaid = 0;
3286 ath5k_hw_set_bssid(ah);
3287 mmiowb();
3288 }
3289
3290 if (changes & BSS_CHANGED_BEACON_INT)
3291 sc->bintval = bss_conf->beacon_int;
3292
3293 if (changes & BSS_CHANGED_ASSOC) {
3294 avf->assoc = bss_conf->assoc;
3295 if (bss_conf->assoc)
3296 sc->assoc = bss_conf->assoc;
3297 else
3298 sc->assoc = ath_any_vif_assoc(sc);
3299
3300 if (sc->opmode == NL80211_IFTYPE_STATION)
3301 set_beacon_filter(hw, sc->assoc);
3302 ath5k_hw_set_ledstate(sc->ah, sc->assoc ?
3303 AR5K_LED_ASSOC : AR5K_LED_INIT);
3304 if (bss_conf->assoc) {
3305 ATH5K_DBG(sc, ATH5K_DEBUG_ANY,
3306 "Bss Info ASSOC %d, bssid: %pM\n",
3307 bss_conf->aid, common->curbssid);
3308 common->curaid = bss_conf->aid;
3309 ath5k_hw_set_bssid(ah);
3310 /* Once ANI is available you would start it here */
3311 }
3312 }
3313
3314 if (changes & BSS_CHANGED_BEACON) {
3315 spin_lock_irqsave(&sc->block, flags);
3316 ath5k_beacon_update(hw, vif);
3317 spin_unlock_irqrestore(&sc->block, flags);
3318 }
3319
3320 if (changes & BSS_CHANGED_BEACON_ENABLED)
3321 sc->enable_beacon = bss_conf->enable_beacon;
3322
3323 if (changes & (BSS_CHANGED_BEACON | BSS_CHANGED_BEACON_ENABLED |
3324 BSS_CHANGED_BEACON_INT))
3325 ath5k_beacon_config(sc);
3326
3327 mutex_unlock(&sc->lock);
3328}
3329
3330static void ath5k_sw_scan_start(struct ieee80211_hw *hw)
3331{
3332 struct ath5k_softc *sc = hw->priv;
3333 if (!sc->assoc)
3334 ath5k_hw_set_ledstate(sc->ah, AR5K_LED_SCAN);
3335}
3336
3337static void ath5k_sw_scan_complete(struct ieee80211_hw *hw)
3338{
3339 struct ath5k_softc *sc = hw->priv;
3340 ath5k_hw_set_ledstate(sc->ah, sc->assoc ?
3341 AR5K_LED_ASSOC : AR5K_LED_INIT);
3342}
3343
3344/**
3345 * ath5k_set_coverage_class - Set IEEE 802.11 coverage class
3346 *
3347 * @hw: struct ieee80211_hw pointer
3348 * @coverage_class: IEEE 802.11 coverage class number
3349 *
3350 * Mac80211 callback. Sets slot time, ACK timeout and CTS timeout for given
3351 * coverage class. The values are persistent, they are restored after device
3352 * reset.
3353 */
3354static void ath5k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
3355{
3356 struct ath5k_softc *sc = hw->priv;
3357
3358 mutex_lock(&sc->lock);
3359 ath5k_hw_set_coverage_class(sc->ah, coverage_class);
3360 mutex_unlock(&sc->lock);
3361}
3362
3363static int ath5k_conf_tx(struct ieee80211_hw *hw, u16 queue,
3364 const struct ieee80211_tx_queue_params *params)
3365{
3366 struct ath5k_softc *sc = hw->priv;
3367 struct ath5k_hw *ah = sc->ah;
3368 struct ath5k_txq_info qi;
3369 int ret = 0;
3370
3371 if (queue >= ah->ah_capabilities.cap_queues.q_tx_num)
3372 return 0;
3373
3374 mutex_lock(&sc->lock);
3375
3376 ath5k_hw_get_tx_queueprops(ah, queue, &qi);
3377
3378 qi.tqi_aifs = params->aifs;
3379 qi.tqi_cw_min = params->cw_min;
3380 qi.tqi_cw_max = params->cw_max;
3381 qi.tqi_burst_time = params->txop;
3382
3383 ATH5K_DBG(sc, ATH5K_DEBUG_ANY,
3384 "Configure tx [queue %d], "
3385 "aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n",
3386 queue, params->aifs, params->cw_min,
3387 params->cw_max, params->txop);
3388
3389 if (ath5k_hw_set_tx_queueprops(ah, queue, &qi)) {
3390 ATH5K_ERR(sc,
3391 "Unable to update hardware queue %u!\n", queue);
3392 ret = -EIO;
3393 } else
3394 ath5k_hw_reset_tx_queue(ah, queue);
3395
3396 mutex_unlock(&sc->lock);
3397
3398 return ret;
3399}
3400
3401static const struct ieee80211_ops ath5k_hw_ops = {
3402 .tx = ath5k_tx,
3403 .start = ath5k_start,
3404 .stop = ath5k_stop,
3405 .add_interface = ath5k_add_interface,
3406 .remove_interface = ath5k_remove_interface,
3407 .config = ath5k_config,
3408 .prepare_multicast = ath5k_prepare_multicast,
3409 .configure_filter = ath5k_configure_filter,
3410 .set_key = ath5k_set_key,
3411 .get_stats = ath5k_get_stats,
3412 .get_survey = ath5k_get_survey,
3413 .conf_tx = ath5k_conf_tx,
3414 .get_tsf = ath5k_get_tsf,
3415 .set_tsf = ath5k_set_tsf,
3416 .reset_tsf = ath5k_reset_tsf,
3417 .bss_info_changed = ath5k_bss_info_changed,
3418 .sw_scan_start = ath5k_sw_scan_start,
3419 .sw_scan_complete = ath5k_sw_scan_complete,
3420 .set_coverage_class = ath5k_set_coverage_class,
3421};
3422
3423/********************\
3424* PCI Initialization *
3425\********************/
3426
3427static int __devinit
3428ath5k_pci_probe(struct pci_dev *pdev,
3429 const struct pci_device_id *id)
3430{
3431 void __iomem *mem;
3432 struct ath5k_softc *sc;
3433 struct ath_common *common;
3434 struct ieee80211_hw *hw;
3435 int ret;
3436 u8 csz;
3437
3438 /*
3439 * L0s needs to be disabled on all ath5k cards.
3440 *
3441 * For distributions shipping with CONFIG_PCIEASPM (this will be enabled
3442 * by default in the future in 2.6.36) this will also mean both L1 and
3443 * L0s will be disabled when a pre 1.1 PCIe device is detected. We do
3444 * know L1 works correctly even for all ath5k pre 1.1 PCIe devices
3445 * though but cannot currently undue the effect of a blacklist, for
3446 * details you can read pcie_aspm_sanity_check() and see how it adjusts
3447 * the device link capability.
3448 *
3449 * It may be possible in the future to implement some PCI API to allow
3450 * drivers to override blacklists for pre 1.1 PCIe but for now it is
3451 * best to accept that both L0s and L1 will be disabled completely for
3452 * distributions shipping with CONFIG_PCIEASPM rather than having this
3453 * issue present. Motivation for adding this new API will be to help
3454 * with power consumption for some of these devices.
3455 */
3456 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S);
3457
3458 ret = pci_enable_device(pdev);
3459 if (ret) {
3460 dev_err(&pdev->dev, "can't enable device\n");
3461 goto err;
3462 }
3463
3464 /* XXX 32-bit addressing only */
3465 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3466 if (ret) {
3467 dev_err(&pdev->dev, "32-bit DMA not available\n");
3468 goto err_dis;
3469 }
3470
3471 /*
3472 * Cache line size is used to size and align various
3473 * structures used to communicate with the hardware.
3474 */
3475 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &csz);
3476 if (csz == 0) {
3477 /*
3478 * Linux 2.4.18 (at least) writes the cache line size
3479 * register as a 16-bit wide register which is wrong.
3480 * We must have this setup properly for rx buffer
3481 * DMA to work so force a reasonable value here if it
3482 * comes up zero.
3483 */
3484 csz = L1_CACHE_BYTES >> 2;
3485 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, csz);
3486 }
3487 /*
3488 * The default setting of latency timer yields poor results,
3489 * set it to the value used by other systems. It may be worth
3490 * tweaking this setting more.
3491 */
3492 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xa8);
3493
3494 /* Enable bus mastering */
3495 pci_set_master(pdev);
3496
3497 /*
3498 * Disable the RETRY_TIMEOUT register (0x41) to keep
3499 * PCI Tx retries from interfering with C3 CPU state.
3500 */
3501 pci_write_config_byte(pdev, 0x41, 0);
3502
3503 ret = pci_request_region(pdev, 0, "ath5k");
3504 if (ret) {
3505 dev_err(&pdev->dev, "cannot reserve PCI memory region\n");
3506 goto err_dis;
3507 }
3508
3509 mem = pci_iomap(pdev, 0, 0);
3510 if (!mem) {
3511 dev_err(&pdev->dev, "cannot remap PCI memory region\n") ;
3512 ret = -EIO;
3513 goto err_reg;
3514 }
3515
3516 /*
3517 * Allocate hw (mac80211 main struct)
3518 * and hw->priv (driver private data)
3519 */
3520 hw = ieee80211_alloc_hw(sizeof(*sc), &ath5k_hw_ops);
3521 if (hw == NULL) {
3522 dev_err(&pdev->dev, "cannot allocate ieee80211_hw\n");
3523 ret = -ENOMEM;
3524 goto err_map;
3525 }
3526
3527 dev_info(&pdev->dev, "registered as '%s'\n", wiphy_name(hw->wiphy));
3528
3529 /* Initialize driver private data */
3530 SET_IEEE80211_DEV(hw, &pdev->dev);
3531 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
3532 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
3533 IEEE80211_HW_SIGNAL_DBM;
3534
3535 hw->wiphy->interface_modes =
3536 BIT(NL80211_IFTYPE_AP) |
3537 BIT(NL80211_IFTYPE_STATION) |
3538 BIT(NL80211_IFTYPE_ADHOC) |
3539 BIT(NL80211_IFTYPE_MESH_POINT);
3540
3541 hw->extra_tx_headroom = 2;
3542 hw->channel_change_time = 5000;
3543 sc = hw->priv;
3544 sc->hw = hw;
3545 sc->pdev = pdev;
3546
3547 /*
3548 * Mark the device as detached to avoid processing
3549 * interrupts until setup is complete.
3550 */
3551 __set_bit(ATH_STAT_INVALID, sc->status);
3552
3553 sc->iobase = mem; /* So we can unmap it on detach */
3554 sc->opmode = NL80211_IFTYPE_STATION;
3555 sc->bintval = 1000;
3556 mutex_init(&sc->lock);
3557 spin_lock_init(&sc->rxbuflock);
3558 spin_lock_init(&sc->txbuflock);
3559 spin_lock_init(&sc->block);
3560
3561 /* Set private data */
3562 pci_set_drvdata(pdev, sc);
3563
3564 /* Setup interrupt handler */
3565 ret = request_irq(pdev->irq, ath5k_intr, IRQF_SHARED, "ath", sc);
3566 if (ret) {
3567 ATH5K_ERR(sc, "request_irq failed\n");
3568 goto err_free;
3569 }
3570
3571 /* If we passed the test, malloc an ath5k_hw struct */
3572 sc->ah = kzalloc(sizeof(struct ath5k_hw), GFP_KERNEL);
3573 if (!sc->ah) {
3574 ret = -ENOMEM;
3575 ATH5K_ERR(sc, "out of memory\n");
3576 goto err_irq;
3577 }
3578
3579 sc->ah->ah_sc = sc;
3580 sc->ah->ah_iobase = sc->iobase;
3581 common = ath5k_hw_common(sc->ah);
3582 common->ops = &ath5k_common_ops;
3583 common->ah = sc->ah;
3584 common->hw = hw;
3585 common->cachelsz = csz << 2; /* convert to bytes */
3586 spin_lock_init(&common->cc_lock);
3587
3588 /* Initialize device */
3589 ret = ath5k_hw_attach(sc);
3590 if (ret) {
3591 goto err_free_ah;
3592 }
3593
3594 /* set up multi-rate retry capabilities */
3595 if (sc->ah->ah_version == AR5K_AR5212) {
3596 hw->max_rates = 4;
3597 hw->max_rate_tries = 11;
3598 }
3599
3600 hw->vif_data_size = sizeof(struct ath5k_vif);
3601
3602 /* Finish private driver data initialization */
3603 ret = ath5k_attach(pdev, hw);
3604 if (ret)
3605 goto err_ah;
3606
3607 ATH5K_INFO(sc, "Atheros AR%s chip found (MAC: 0x%x, PHY: 0x%x)\n",
3608 ath5k_chip_name(AR5K_VERSION_MAC, sc->ah->ah_mac_srev),
3609 sc->ah->ah_mac_srev,
3610 sc->ah->ah_phy_revision);
3611
3612 if (!sc->ah->ah_single_chip) {
3613 /* Single chip radio (!RF5111) */
3614 if (sc->ah->ah_radio_5ghz_revision &&
3615 !sc->ah->ah_radio_2ghz_revision) {
3616 /* No 5GHz support -> report 2GHz radio */
3617 if (!test_bit(AR5K_MODE_11A,
3618 sc->ah->ah_capabilities.cap_mode)) {
3619 ATH5K_INFO(sc, "RF%s 2GHz radio found (0x%x)\n",
3620 ath5k_chip_name(AR5K_VERSION_RAD,
3621 sc->ah->ah_radio_5ghz_revision),
3622 sc->ah->ah_radio_5ghz_revision);
3623 /* No 2GHz support (5110 and some
3624 * 5Ghz only cards) -> report 5Ghz radio */
3625 } else if (!test_bit(AR5K_MODE_11B,
3626 sc->ah->ah_capabilities.cap_mode)) {
3627 ATH5K_INFO(sc, "RF%s 5GHz radio found (0x%x)\n",
3628 ath5k_chip_name(AR5K_VERSION_RAD,
3629 sc->ah->ah_radio_5ghz_revision),
3630 sc->ah->ah_radio_5ghz_revision);
3631 /* Multiband radio */
3632 } else {
3633 ATH5K_INFO(sc, "RF%s multiband radio found"
3634 " (0x%x)\n",
3635 ath5k_chip_name(AR5K_VERSION_RAD,
3636 sc->ah->ah_radio_5ghz_revision),
3637 sc->ah->ah_radio_5ghz_revision);
3638 }
3639 }
3640 /* Multi chip radio (RF5111 - RF2111) ->
3641 * report both 2GHz/5GHz radios */
3642 else if (sc->ah->ah_radio_5ghz_revision &&
3643 sc->ah->ah_radio_2ghz_revision){
3644 ATH5K_INFO(sc, "RF%s 5GHz radio found (0x%x)\n",
3645 ath5k_chip_name(AR5K_VERSION_RAD,
3646 sc->ah->ah_radio_5ghz_revision),
3647 sc->ah->ah_radio_5ghz_revision);
3648 ATH5K_INFO(sc, "RF%s 2GHz radio found (0x%x)\n",
3649 ath5k_chip_name(AR5K_VERSION_RAD,
3650 sc->ah->ah_radio_2ghz_revision),
3651 sc->ah->ah_radio_2ghz_revision);
3652 }
3653 }
3654
3655 ath5k_debug_init_device(sc);
3656
3657 /* ready to process interrupts */
3658 __clear_bit(ATH_STAT_INVALID, sc->status);
3659
3660 return 0;
3661err_ah:
3662 ath5k_hw_detach(sc->ah);
3663err_free_ah:
3664 kfree(sc->ah);
3665err_irq:
3666 free_irq(pdev->irq, sc);
3667err_free:
3668 ieee80211_free_hw(hw);
3669err_map:
3670 pci_iounmap(pdev, mem);
3671err_reg:
3672 pci_release_region(pdev, 0);
3673err_dis:
3674 pci_disable_device(pdev);
3675err:
3676 return ret;
3677}
3678
3679static void __devexit
3680ath5k_pci_remove(struct pci_dev *pdev)
3681{
3682 struct ath5k_softc *sc = pci_get_drvdata(pdev);
3683
3684 ath5k_debug_finish_device(sc);
3685 ath5k_detach(pdev, sc->hw);
3686 ath5k_hw_detach(sc->ah);
3687 kfree(sc->ah);
3688 free_irq(pdev->irq, sc);
3689 pci_iounmap(pdev, sc->iobase);
3690 pci_release_region(pdev, 0);
3691 pci_disable_device(pdev);
3692 ieee80211_free_hw(sc->hw);
3693}
3694
3695#ifdef CONFIG_PM_SLEEP
3696static int ath5k_pci_suspend(struct device *dev)
3697{
3698 struct ath5k_softc *sc = pci_get_drvdata(to_pci_dev(dev));
3699
3700 ath5k_led_off(sc);
3701 return 0;
3702}
3703
3704static int ath5k_pci_resume(struct device *dev)
3705{
3706 struct pci_dev *pdev = to_pci_dev(dev);
3707 struct ath5k_softc *sc = pci_get_drvdata(pdev);
3708
3709 /*
3710 * Suspend/Resume resets the PCI configuration space, so we have to
3711 * re-disable the RETRY_TIMEOUT register (0x41) to keep
3712 * PCI Tx retries from interfering with C3 CPU state
3713 */
3714 pci_write_config_byte(pdev, 0x41, 0);
3715
3716 ath5k_led_enable(sc);
3717 return 0;
3718}
3719
3720static SIMPLE_DEV_PM_OPS(ath5k_pm_ops, ath5k_pci_suspend, ath5k_pci_resume);
3721#define ATH5K_PM_OPS (&ath5k_pm_ops)
3722#else
3723#define ATH5K_PM_OPS NULL
3724#endif /* CONFIG_PM_SLEEP */
3725
3726static struct pci_driver ath5k_pci_driver = {
3727 .name = KBUILD_MODNAME,
3728 .id_table = ath5k_pci_id_table,
3729 .probe = ath5k_pci_probe,
3730 .remove = __devexit_p(ath5k_pci_remove),
3731 .driver.pm = ATH5K_PM_OPS,
3732};
3733
3734/*
3735 * Module init/exit functions
3736 */
3737static int __init
3738init_ath5k_pci(void)
3739{
3740 int ret;
3741
3742 ret = pci_register_driver(&ath5k_pci_driver);
3743 if (ret) {
3744 printk(KERN_ERR "ath5k_pci: can't register pci driver\n");
3745 return ret;
3746 }
3747
3748 return 0;
3749}
3750
3751static void __exit
3752exit_ath5k_pci(void)
3753{
3754 pci_unregister_driver(&ath5k_pci_driver);
3755}
3756
3757module_init(init_ath5k_pci);
3758module_exit(exit_ath5k_pci);