xref: /linux/drivers/net/wireless/intel/ipw2x00/ipw2200.c (revision e58e871becec2d3b04ed91c0c16fe8deac9c9dfa)
1 /******************************************************************************
2 
3   Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved.
4 
5   802.11 status code portion of this file from ethereal-0.10.6:
6     Copyright 2000, Axis Communications AB
7     Ethereal - Network traffic analyzer
8     By Gerald Combs <gerald@ethereal.com>
9     Copyright 1998 Gerald Combs
10 
11   This program is free software; you can redistribute it and/or modify it
12   under the terms of version 2 of the GNU General Public License as
13   published by the Free Software Foundation.
14 
15   This program is distributed in the hope that it will be useful, but WITHOUT
16   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
18   more details.
19 
20   You should have received a copy of the GNU General Public License along with
21   this program; if not, write to the Free Software Foundation, Inc., 59
22   Temple Place - Suite 330, Boston, MA  02111-1307, USA.
23 
24   The full GNU General Public License is included in this distribution in the
25   file called LICENSE.
26 
27   Contact Information:
28   Intel Linux Wireless <ilw@linux.intel.com>
29   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 
31 ******************************************************************************/
32 
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <net/cfg80211-wext.h>
36 #include "ipw2200.h"
37 #include "ipw.h"
38 
39 
40 #ifndef KBUILD_EXTMOD
41 #define VK "k"
42 #else
43 #define VK
44 #endif
45 
46 #ifdef CONFIG_IPW2200_DEBUG
47 #define VD "d"
48 #else
49 #define VD
50 #endif
51 
52 #ifdef CONFIG_IPW2200_MONITOR
53 #define VM "m"
54 #else
55 #define VM
56 #endif
57 
58 #ifdef CONFIG_IPW2200_PROMISCUOUS
59 #define VP "p"
60 #else
61 #define VP
62 #endif
63 
64 #ifdef CONFIG_IPW2200_RADIOTAP
65 #define VR "r"
66 #else
67 #define VR
68 #endif
69 
70 #ifdef CONFIG_IPW2200_QOS
71 #define VQ "q"
72 #else
73 #define VQ
74 #endif
75 
76 #define IPW2200_VERSION "1.2.2" VK VD VM VP VR VQ
77 #define DRV_DESCRIPTION	"Intel(R) PRO/Wireless 2200/2915 Network Driver"
78 #define DRV_COPYRIGHT	"Copyright(c) 2003-2006 Intel Corporation"
79 #define DRV_VERSION     IPW2200_VERSION
80 
81 #define ETH_P_80211_STATS (ETH_P_80211_RAW + 1)
82 
83 MODULE_DESCRIPTION(DRV_DESCRIPTION);
84 MODULE_VERSION(DRV_VERSION);
85 MODULE_AUTHOR(DRV_COPYRIGHT);
86 MODULE_LICENSE("GPL");
87 MODULE_FIRMWARE("ipw2200-ibss.fw");
88 #ifdef CONFIG_IPW2200_MONITOR
89 MODULE_FIRMWARE("ipw2200-sniffer.fw");
90 #endif
91 MODULE_FIRMWARE("ipw2200-bss.fw");
92 
93 static int cmdlog = 0;
94 static int debug = 0;
95 static int default_channel = 0;
96 static int network_mode = 0;
97 
98 static u32 ipw_debug_level;
99 static int associate;
100 static int auto_create = 1;
101 static int led_support = 1;
102 static int disable = 0;
103 static int bt_coexist = 0;
104 static int hwcrypto = 0;
105 static int roaming = 1;
106 static const char ipw_modes[] = {
107 	'a', 'b', 'g', '?'
108 };
109 static int antenna = CFG_SYS_ANTENNA_BOTH;
110 
111 #ifdef CONFIG_IPW2200_PROMISCUOUS
112 static int rtap_iface = 0;     /* def: 0 -- do not create rtap interface */
113 #endif
114 
115 static struct ieee80211_rate ipw2200_rates[] = {
116 	{ .bitrate = 10 },
117 	{ .bitrate = 20, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
118 	{ .bitrate = 55, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
119 	{ .bitrate = 110, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
120 	{ .bitrate = 60 },
121 	{ .bitrate = 90 },
122 	{ .bitrate = 120 },
123 	{ .bitrate = 180 },
124 	{ .bitrate = 240 },
125 	{ .bitrate = 360 },
126 	{ .bitrate = 480 },
127 	{ .bitrate = 540 }
128 };
129 
130 #define ipw2200_a_rates		(ipw2200_rates + 4)
131 #define ipw2200_num_a_rates	8
132 #define ipw2200_bg_rates	(ipw2200_rates + 0)
133 #define ipw2200_num_bg_rates	12
134 
135 /* Ugly macro to convert literal channel numbers into their mhz equivalents
136  * There are certianly some conditions that will break this (like feeding it '30')
137  * but they shouldn't arise since nothing talks on channel 30. */
138 #define ieee80211chan2mhz(x) \
139 	(((x) <= 14) ? \
140 	(((x) == 14) ? 2484 : ((x) * 5) + 2407) : \
141 	((x) + 1000) * 5)
142 
143 #ifdef CONFIG_IPW2200_QOS
144 static int qos_enable = 0;
145 static int qos_burst_enable = 0;
146 static int qos_no_ack_mask = 0;
147 static int burst_duration_CCK = 0;
148 static int burst_duration_OFDM = 0;
149 
150 static struct libipw_qos_parameters def_qos_parameters_OFDM = {
151 	{QOS_TX0_CW_MIN_OFDM, QOS_TX1_CW_MIN_OFDM, QOS_TX2_CW_MIN_OFDM,
152 	 QOS_TX3_CW_MIN_OFDM},
153 	{QOS_TX0_CW_MAX_OFDM, QOS_TX1_CW_MAX_OFDM, QOS_TX2_CW_MAX_OFDM,
154 	 QOS_TX3_CW_MAX_OFDM},
155 	{QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
156 	{QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
157 	{QOS_TX0_TXOP_LIMIT_OFDM, QOS_TX1_TXOP_LIMIT_OFDM,
158 	 QOS_TX2_TXOP_LIMIT_OFDM, QOS_TX3_TXOP_LIMIT_OFDM}
159 };
160 
161 static struct libipw_qos_parameters def_qos_parameters_CCK = {
162 	{QOS_TX0_CW_MIN_CCK, QOS_TX1_CW_MIN_CCK, QOS_TX2_CW_MIN_CCK,
163 	 QOS_TX3_CW_MIN_CCK},
164 	{QOS_TX0_CW_MAX_CCK, QOS_TX1_CW_MAX_CCK, QOS_TX2_CW_MAX_CCK,
165 	 QOS_TX3_CW_MAX_CCK},
166 	{QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
167 	{QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
168 	{QOS_TX0_TXOP_LIMIT_CCK, QOS_TX1_TXOP_LIMIT_CCK, QOS_TX2_TXOP_LIMIT_CCK,
169 	 QOS_TX3_TXOP_LIMIT_CCK}
170 };
171 
172 static struct libipw_qos_parameters def_parameters_OFDM = {
173 	{DEF_TX0_CW_MIN_OFDM, DEF_TX1_CW_MIN_OFDM, DEF_TX2_CW_MIN_OFDM,
174 	 DEF_TX3_CW_MIN_OFDM},
175 	{DEF_TX0_CW_MAX_OFDM, DEF_TX1_CW_MAX_OFDM, DEF_TX2_CW_MAX_OFDM,
176 	 DEF_TX3_CW_MAX_OFDM},
177 	{DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
178 	{DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
179 	{DEF_TX0_TXOP_LIMIT_OFDM, DEF_TX1_TXOP_LIMIT_OFDM,
180 	 DEF_TX2_TXOP_LIMIT_OFDM, DEF_TX3_TXOP_LIMIT_OFDM}
181 };
182 
183 static struct libipw_qos_parameters def_parameters_CCK = {
184 	{DEF_TX0_CW_MIN_CCK, DEF_TX1_CW_MIN_CCK, DEF_TX2_CW_MIN_CCK,
185 	 DEF_TX3_CW_MIN_CCK},
186 	{DEF_TX0_CW_MAX_CCK, DEF_TX1_CW_MAX_CCK, DEF_TX2_CW_MAX_CCK,
187 	 DEF_TX3_CW_MAX_CCK},
188 	{DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
189 	{DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
190 	{DEF_TX0_TXOP_LIMIT_CCK, DEF_TX1_TXOP_LIMIT_CCK, DEF_TX2_TXOP_LIMIT_CCK,
191 	 DEF_TX3_TXOP_LIMIT_CCK}
192 };
193 
194 static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 };
195 
196 static int from_priority_to_tx_queue[] = {
197 	IPW_TX_QUEUE_1, IPW_TX_QUEUE_2, IPW_TX_QUEUE_2, IPW_TX_QUEUE_1,
198 	IPW_TX_QUEUE_3, IPW_TX_QUEUE_3, IPW_TX_QUEUE_4, IPW_TX_QUEUE_4
199 };
200 
201 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv);
202 
203 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct libipw_qos_parameters
204 				       *qos_param);
205 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct libipw_qos_information_element
206 				     *qos_param);
207 #endif				/* CONFIG_IPW2200_QOS */
208 
209 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev);
210 static void ipw_remove_current_network(struct ipw_priv *priv);
211 static void ipw_rx(struct ipw_priv *priv);
212 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
213 				struct clx2_tx_queue *txq, int qindex);
214 static int ipw_queue_reset(struct ipw_priv *priv);
215 
216 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
217 			     int len, int sync);
218 
219 static void ipw_tx_queue_free(struct ipw_priv *);
220 
221 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *);
222 static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *);
223 static void ipw_rx_queue_replenish(void *);
224 static int ipw_up(struct ipw_priv *);
225 static void ipw_bg_up(struct work_struct *work);
226 static void ipw_down(struct ipw_priv *);
227 static void ipw_bg_down(struct work_struct *work);
228 static int ipw_config(struct ipw_priv *);
229 static int init_supported_rates(struct ipw_priv *priv,
230 				struct ipw_supported_rates *prates);
231 static void ipw_set_hwcrypto_keys(struct ipw_priv *);
232 static void ipw_send_wep_keys(struct ipw_priv *, int);
233 
234 static int snprint_line(char *buf, size_t count,
235 			const u8 * data, u32 len, u32 ofs)
236 {
237 	int out, i, j, l;
238 	char c;
239 
240 	out = snprintf(buf, count, "%08X", ofs);
241 
242 	for (l = 0, i = 0; i < 2; i++) {
243 		out += snprintf(buf + out, count - out, " ");
244 		for (j = 0; j < 8 && l < len; j++, l++)
245 			out += snprintf(buf + out, count - out, "%02X ",
246 					data[(i * 8 + j)]);
247 		for (; j < 8; j++)
248 			out += snprintf(buf + out, count - out, "   ");
249 	}
250 
251 	out += snprintf(buf + out, count - out, " ");
252 	for (l = 0, i = 0; i < 2; i++) {
253 		out += snprintf(buf + out, count - out, " ");
254 		for (j = 0; j < 8 && l < len; j++, l++) {
255 			c = data[(i * 8 + j)];
256 			if (!isascii(c) || !isprint(c))
257 				c = '.';
258 
259 			out += snprintf(buf + out, count - out, "%c", c);
260 		}
261 
262 		for (; j < 8; j++)
263 			out += snprintf(buf + out, count - out, " ");
264 	}
265 
266 	return out;
267 }
268 
269 static void printk_buf(int level, const u8 * data, u32 len)
270 {
271 	char line[81];
272 	u32 ofs = 0;
273 	if (!(ipw_debug_level & level))
274 		return;
275 
276 	while (len) {
277 		snprint_line(line, sizeof(line), &data[ofs],
278 			     min(len, 16U), ofs);
279 		printk(KERN_DEBUG "%s\n", line);
280 		ofs += 16;
281 		len -= min(len, 16U);
282 	}
283 }
284 
285 static int snprintk_buf(u8 * output, size_t size, const u8 * data, size_t len)
286 {
287 	size_t out = size;
288 	u32 ofs = 0;
289 	int total = 0;
290 
291 	while (size && len) {
292 		out = snprint_line(output, size, &data[ofs],
293 				   min_t(size_t, len, 16U), ofs);
294 
295 		ofs += 16;
296 		output += out;
297 		size -= out;
298 		len -= min_t(size_t, len, 16U);
299 		total += out;
300 	}
301 	return total;
302 }
303 
304 /* alias for 32-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
305 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg);
306 #define ipw_read_reg32(a, b) _ipw_read_reg32(a, b)
307 
308 /* alias for 8-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
309 static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg);
310 #define ipw_read_reg8(a, b) _ipw_read_reg8(a, b)
311 
312 /* 8-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
313 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value);
314 static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c)
315 {
316 	IPW_DEBUG_IO("%s %d: write_indirect8(0x%08X, 0x%08X)\n", __FILE__,
317 		     __LINE__, (u32) (b), (u32) (c));
318 	_ipw_write_reg8(a, b, c);
319 }
320 
321 /* 16-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
322 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value);
323 static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c)
324 {
325 	IPW_DEBUG_IO("%s %d: write_indirect16(0x%08X, 0x%08X)\n", __FILE__,
326 		     __LINE__, (u32) (b), (u32) (c));
327 	_ipw_write_reg16(a, b, c);
328 }
329 
330 /* 32-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
331 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value);
332 static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c)
333 {
334 	IPW_DEBUG_IO("%s %d: write_indirect32(0x%08X, 0x%08X)\n", __FILE__,
335 		     __LINE__, (u32) (b), (u32) (c));
336 	_ipw_write_reg32(a, b, c);
337 }
338 
339 /* 8-bit direct write (low 4K) */
340 static inline void _ipw_write8(struct ipw_priv *ipw, unsigned long ofs,
341 		u8 val)
342 {
343 	writeb(val, ipw->hw_base + ofs);
344 }
345 
346 /* 8-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
347 #define ipw_write8(ipw, ofs, val) do { \
348 	IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, \
349 			__LINE__, (u32)(ofs), (u32)(val)); \
350 	_ipw_write8(ipw, ofs, val); \
351 } while (0)
352 
353 /* 16-bit direct write (low 4K) */
354 static inline void _ipw_write16(struct ipw_priv *ipw, unsigned long ofs,
355 		u16 val)
356 {
357 	writew(val, ipw->hw_base + ofs);
358 }
359 
360 /* 16-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
361 #define ipw_write16(ipw, ofs, val) do { \
362 	IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, \
363 			__LINE__, (u32)(ofs), (u32)(val)); \
364 	_ipw_write16(ipw, ofs, val); \
365 } while (0)
366 
367 /* 32-bit direct write (low 4K) */
368 static inline void _ipw_write32(struct ipw_priv *ipw, unsigned long ofs,
369 		u32 val)
370 {
371 	writel(val, ipw->hw_base + ofs);
372 }
373 
374 /* 32-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
375 #define ipw_write32(ipw, ofs, val) do { \
376 	IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, \
377 			__LINE__, (u32)(ofs), (u32)(val)); \
378 	_ipw_write32(ipw, ofs, val); \
379 } while (0)
380 
381 /* 8-bit direct read (low 4K) */
382 static inline u8 _ipw_read8(struct ipw_priv *ipw, unsigned long ofs)
383 {
384 	return readb(ipw->hw_base + ofs);
385 }
386 
387 /* alias to 8-bit direct read (low 4K of SRAM/regs), with debug wrapper */
388 #define ipw_read8(ipw, ofs) ({ \
389 	IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", __FILE__, __LINE__, \
390 			(u32)(ofs)); \
391 	_ipw_read8(ipw, ofs); \
392 })
393 
394 /* 16-bit direct read (low 4K) */
395 static inline u16 _ipw_read16(struct ipw_priv *ipw, unsigned long ofs)
396 {
397 	return readw(ipw->hw_base + ofs);
398 }
399 
400 /* alias to 16-bit direct read (low 4K of SRAM/regs), with debug wrapper */
401 #define ipw_read16(ipw, ofs) ({ \
402 	IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", __FILE__, __LINE__, \
403 			(u32)(ofs)); \
404 	_ipw_read16(ipw, ofs); \
405 })
406 
407 /* 32-bit direct read (low 4K) */
408 static inline u32 _ipw_read32(struct ipw_priv *ipw, unsigned long ofs)
409 {
410 	return readl(ipw->hw_base + ofs);
411 }
412 
413 /* alias to 32-bit direct read (low 4K of SRAM/regs), with debug wrapper */
414 #define ipw_read32(ipw, ofs) ({ \
415 	IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", __FILE__, __LINE__, \
416 			(u32)(ofs)); \
417 	_ipw_read32(ipw, ofs); \
418 })
419 
420 static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int);
421 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
422 #define ipw_read_indirect(a, b, c, d) ({ \
423 	IPW_DEBUG_IO("%s %d: read_indirect(0x%08X) %u bytes\n", __FILE__, \
424 			__LINE__, (u32)(b), (u32)(d)); \
425 	_ipw_read_indirect(a, b, c, d); \
426 })
427 
428 /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
429 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * data,
430 				int num);
431 #define ipw_write_indirect(a, b, c, d) do { \
432 	IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %u bytes\n", __FILE__, \
433 			__LINE__, (u32)(b), (u32)(d)); \
434 	_ipw_write_indirect(a, b, c, d); \
435 } while (0)
436 
437 /* 32-bit indirect write (above 4K) */
438 static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value)
439 {
440 	IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", priv, reg, value);
441 	_ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
442 	_ipw_write32(priv, IPW_INDIRECT_DATA, value);
443 }
444 
445 /* 8-bit indirect write (above 4K) */
446 static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value)
447 {
448 	u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK;	/* dword align */
449 	u32 dif_len = reg - aligned_addr;
450 
451 	IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
452 	_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
453 	_ipw_write8(priv, IPW_INDIRECT_DATA + dif_len, value);
454 }
455 
456 /* 16-bit indirect write (above 4K) */
457 static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value)
458 {
459 	u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK;	/* dword align */
460 	u32 dif_len = (reg - aligned_addr) & (~0x1ul);
461 
462 	IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
463 	_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
464 	_ipw_write16(priv, IPW_INDIRECT_DATA + dif_len, value);
465 }
466 
467 /* 8-bit indirect read (above 4K) */
468 static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg)
469 {
470 	u32 word;
471 	_ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK);
472 	IPW_DEBUG_IO(" reg = 0x%8X :\n", reg);
473 	word = _ipw_read32(priv, IPW_INDIRECT_DATA);
474 	return (word >> ((reg & 0x3) * 8)) & 0xff;
475 }
476 
477 /* 32-bit indirect read (above 4K) */
478 static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg)
479 {
480 	u32 value;
481 
482 	IPW_DEBUG_IO("%p : reg = 0x%08x\n", priv, reg);
483 
484 	_ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
485 	value = _ipw_read32(priv, IPW_INDIRECT_DATA);
486 	IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x\n", reg, value);
487 	return value;
488 }
489 
490 /* General purpose, no alignment requirement, iterative (multi-byte) read, */
491 /*    for area above 1st 4K of SRAM/reg space */
492 static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
493 			       int num)
494 {
495 	u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK;	/* dword align */
496 	u32 dif_len = addr - aligned_addr;
497 	u32 i;
498 
499 	IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
500 
501 	if (num <= 0) {
502 		return;
503 	}
504 
505 	/* Read the first dword (or portion) byte by byte */
506 	if (unlikely(dif_len)) {
507 		_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
508 		/* Start reading at aligned_addr + dif_len */
509 		for (i = dif_len; ((i < 4) && (num > 0)); i++, num--)
510 			*buf++ = _ipw_read8(priv, IPW_INDIRECT_DATA + i);
511 		aligned_addr += 4;
512 	}
513 
514 	/* Read all of the middle dwords as dwords, with auto-increment */
515 	_ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
516 	for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
517 		*(u32 *) buf = _ipw_read32(priv, IPW_AUTOINC_DATA);
518 
519 	/* Read the last dword (or portion) byte by byte */
520 	if (unlikely(num)) {
521 		_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
522 		for (i = 0; num > 0; i++, num--)
523 			*buf++ = ipw_read8(priv, IPW_INDIRECT_DATA + i);
524 	}
525 }
526 
527 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
528 /*    for area above 1st 4K of SRAM/reg space */
529 static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
530 				int num)
531 {
532 	u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK;	/* dword align */
533 	u32 dif_len = addr - aligned_addr;
534 	u32 i;
535 
536 	IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
537 
538 	if (num <= 0) {
539 		return;
540 	}
541 
542 	/* Write the first dword (or portion) byte by byte */
543 	if (unlikely(dif_len)) {
544 		_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
545 		/* Start writing at aligned_addr + dif_len */
546 		for (i = dif_len; ((i < 4) && (num > 0)); i++, num--, buf++)
547 			_ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
548 		aligned_addr += 4;
549 	}
550 
551 	/* Write all of the middle dwords as dwords, with auto-increment */
552 	_ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
553 	for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
554 		_ipw_write32(priv, IPW_AUTOINC_DATA, *(u32 *) buf);
555 
556 	/* Write the last dword (or portion) byte by byte */
557 	if (unlikely(num)) {
558 		_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
559 		for (i = 0; num > 0; i++, num--, buf++)
560 			_ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
561 	}
562 }
563 
564 /* General purpose, no alignment requirement, iterative (multi-byte) write, */
565 /*    for 1st 4K of SRAM/regs space */
566 static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf,
567 			     int num)
568 {
569 	memcpy_toio((priv->hw_base + addr), buf, num);
570 }
571 
572 /* Set bit(s) in low 4K of SRAM/regs */
573 static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask)
574 {
575 	ipw_write32(priv, reg, ipw_read32(priv, reg) | mask);
576 }
577 
578 /* Clear bit(s) in low 4K of SRAM/regs */
579 static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask)
580 {
581 	ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask);
582 }
583 
584 static inline void __ipw_enable_interrupts(struct ipw_priv *priv)
585 {
586 	if (priv->status & STATUS_INT_ENABLED)
587 		return;
588 	priv->status |= STATUS_INT_ENABLED;
589 	ipw_write32(priv, IPW_INTA_MASK_R, IPW_INTA_MASK_ALL);
590 }
591 
592 static inline void __ipw_disable_interrupts(struct ipw_priv *priv)
593 {
594 	if (!(priv->status & STATUS_INT_ENABLED))
595 		return;
596 	priv->status &= ~STATUS_INT_ENABLED;
597 	ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
598 }
599 
600 static inline void ipw_enable_interrupts(struct ipw_priv *priv)
601 {
602 	unsigned long flags;
603 
604 	spin_lock_irqsave(&priv->irq_lock, flags);
605 	__ipw_enable_interrupts(priv);
606 	spin_unlock_irqrestore(&priv->irq_lock, flags);
607 }
608 
609 static inline void ipw_disable_interrupts(struct ipw_priv *priv)
610 {
611 	unsigned long flags;
612 
613 	spin_lock_irqsave(&priv->irq_lock, flags);
614 	__ipw_disable_interrupts(priv);
615 	spin_unlock_irqrestore(&priv->irq_lock, flags);
616 }
617 
618 static char *ipw_error_desc(u32 val)
619 {
620 	switch (val) {
621 	case IPW_FW_ERROR_OK:
622 		return "ERROR_OK";
623 	case IPW_FW_ERROR_FAIL:
624 		return "ERROR_FAIL";
625 	case IPW_FW_ERROR_MEMORY_UNDERFLOW:
626 		return "MEMORY_UNDERFLOW";
627 	case IPW_FW_ERROR_MEMORY_OVERFLOW:
628 		return "MEMORY_OVERFLOW";
629 	case IPW_FW_ERROR_BAD_PARAM:
630 		return "BAD_PARAM";
631 	case IPW_FW_ERROR_BAD_CHECKSUM:
632 		return "BAD_CHECKSUM";
633 	case IPW_FW_ERROR_NMI_INTERRUPT:
634 		return "NMI_INTERRUPT";
635 	case IPW_FW_ERROR_BAD_DATABASE:
636 		return "BAD_DATABASE";
637 	case IPW_FW_ERROR_ALLOC_FAIL:
638 		return "ALLOC_FAIL";
639 	case IPW_FW_ERROR_DMA_UNDERRUN:
640 		return "DMA_UNDERRUN";
641 	case IPW_FW_ERROR_DMA_STATUS:
642 		return "DMA_STATUS";
643 	case IPW_FW_ERROR_DINO_ERROR:
644 		return "DINO_ERROR";
645 	case IPW_FW_ERROR_EEPROM_ERROR:
646 		return "EEPROM_ERROR";
647 	case IPW_FW_ERROR_SYSASSERT:
648 		return "SYSASSERT";
649 	case IPW_FW_ERROR_FATAL_ERROR:
650 		return "FATAL_ERROR";
651 	default:
652 		return "UNKNOWN_ERROR";
653 	}
654 }
655 
656 static void ipw_dump_error_log(struct ipw_priv *priv,
657 			       struct ipw_fw_error *error)
658 {
659 	u32 i;
660 
661 	if (!error) {
662 		IPW_ERROR("Error allocating and capturing error log.  "
663 			  "Nothing to dump.\n");
664 		return;
665 	}
666 
667 	IPW_ERROR("Start IPW Error Log Dump:\n");
668 	IPW_ERROR("Status: 0x%08X, Config: %08X\n",
669 		  error->status, error->config);
670 
671 	for (i = 0; i < error->elem_len; i++)
672 		IPW_ERROR("%s %i 0x%08x  0x%08x  0x%08x  0x%08x  0x%08x\n",
673 			  ipw_error_desc(error->elem[i].desc),
674 			  error->elem[i].time,
675 			  error->elem[i].blink1,
676 			  error->elem[i].blink2,
677 			  error->elem[i].link1,
678 			  error->elem[i].link2, error->elem[i].data);
679 	for (i = 0; i < error->log_len; i++)
680 		IPW_ERROR("%i\t0x%08x\t%i\n",
681 			  error->log[i].time,
682 			  error->log[i].data, error->log[i].event);
683 }
684 
685 static inline int ipw_is_init(struct ipw_priv *priv)
686 {
687 	return (priv->status & STATUS_INIT) ? 1 : 0;
688 }
689 
690 static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val, u32 * len)
691 {
692 	u32 addr, field_info, field_len, field_count, total_len;
693 
694 	IPW_DEBUG_ORD("ordinal = %i\n", ord);
695 
696 	if (!priv || !val || !len) {
697 		IPW_DEBUG_ORD("Invalid argument\n");
698 		return -EINVAL;
699 	}
700 
701 	/* verify device ordinal tables have been initialized */
702 	if (!priv->table0_addr || !priv->table1_addr || !priv->table2_addr) {
703 		IPW_DEBUG_ORD("Access ordinals before initialization\n");
704 		return -EINVAL;
705 	}
706 
707 	switch (IPW_ORD_TABLE_ID_MASK & ord) {
708 	case IPW_ORD_TABLE_0_MASK:
709 		/*
710 		 * TABLE 0: Direct access to a table of 32 bit values
711 		 *
712 		 * This is a very simple table with the data directly
713 		 * read from the table
714 		 */
715 
716 		/* remove the table id from the ordinal */
717 		ord &= IPW_ORD_TABLE_VALUE_MASK;
718 
719 		/* boundary check */
720 		if (ord > priv->table0_len) {
721 			IPW_DEBUG_ORD("ordinal value (%i) longer then "
722 				      "max (%i)\n", ord, priv->table0_len);
723 			return -EINVAL;
724 		}
725 
726 		/* verify we have enough room to store the value */
727 		if (*len < sizeof(u32)) {
728 			IPW_DEBUG_ORD("ordinal buffer length too small, "
729 				      "need %zd\n", sizeof(u32));
730 			return -EINVAL;
731 		}
732 
733 		IPW_DEBUG_ORD("Reading TABLE0[%i] from offset 0x%08x\n",
734 			      ord, priv->table0_addr + (ord << 2));
735 
736 		*len = sizeof(u32);
737 		ord <<= 2;
738 		*((u32 *) val) = ipw_read32(priv, priv->table0_addr + ord);
739 		break;
740 
741 	case IPW_ORD_TABLE_1_MASK:
742 		/*
743 		 * TABLE 1: Indirect access to a table of 32 bit values
744 		 *
745 		 * This is a fairly large table of u32 values each
746 		 * representing starting addr for the data (which is
747 		 * also a u32)
748 		 */
749 
750 		/* remove the table id from the ordinal */
751 		ord &= IPW_ORD_TABLE_VALUE_MASK;
752 
753 		/* boundary check */
754 		if (ord > priv->table1_len) {
755 			IPW_DEBUG_ORD("ordinal value too long\n");
756 			return -EINVAL;
757 		}
758 
759 		/* verify we have enough room to store the value */
760 		if (*len < sizeof(u32)) {
761 			IPW_DEBUG_ORD("ordinal buffer length too small, "
762 				      "need %zd\n", sizeof(u32));
763 			return -EINVAL;
764 		}
765 
766 		*((u32 *) val) =
767 		    ipw_read_reg32(priv, (priv->table1_addr + (ord << 2)));
768 		*len = sizeof(u32);
769 		break;
770 
771 	case IPW_ORD_TABLE_2_MASK:
772 		/*
773 		 * TABLE 2: Indirect access to a table of variable sized values
774 		 *
775 		 * This table consist of six values, each containing
776 		 *     - dword containing the starting offset of the data
777 		 *     - dword containing the lengh in the first 16bits
778 		 *       and the count in the second 16bits
779 		 */
780 
781 		/* remove the table id from the ordinal */
782 		ord &= IPW_ORD_TABLE_VALUE_MASK;
783 
784 		/* boundary check */
785 		if (ord > priv->table2_len) {
786 			IPW_DEBUG_ORD("ordinal value too long\n");
787 			return -EINVAL;
788 		}
789 
790 		/* get the address of statistic */
791 		addr = ipw_read_reg32(priv, priv->table2_addr + (ord << 3));
792 
793 		/* get the second DW of statistics ;
794 		 * two 16-bit words - first is length, second is count */
795 		field_info =
796 		    ipw_read_reg32(priv,
797 				   priv->table2_addr + (ord << 3) +
798 				   sizeof(u32));
799 
800 		/* get each entry length */
801 		field_len = *((u16 *) & field_info);
802 
803 		/* get number of entries */
804 		field_count = *(((u16 *) & field_info) + 1);
805 
806 		/* abort if not enough memory */
807 		total_len = field_len * field_count;
808 		if (total_len > *len) {
809 			*len = total_len;
810 			return -EINVAL;
811 		}
812 
813 		*len = total_len;
814 		if (!total_len)
815 			return 0;
816 
817 		IPW_DEBUG_ORD("addr = 0x%08x, total_len = %i, "
818 			      "field_info = 0x%08x\n",
819 			      addr, total_len, field_info);
820 		ipw_read_indirect(priv, addr, val, total_len);
821 		break;
822 
823 	default:
824 		IPW_DEBUG_ORD("Invalid ordinal!\n");
825 		return -EINVAL;
826 
827 	}
828 
829 	return 0;
830 }
831 
832 static void ipw_init_ordinals(struct ipw_priv *priv)
833 {
834 	priv->table0_addr = IPW_ORDINALS_TABLE_LOWER;
835 	priv->table0_len = ipw_read32(priv, priv->table0_addr);
836 
837 	IPW_DEBUG_ORD("table 0 offset at 0x%08x, len = %i\n",
838 		      priv->table0_addr, priv->table0_len);
839 
840 	priv->table1_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_1);
841 	priv->table1_len = ipw_read_reg32(priv, priv->table1_addr);
842 
843 	IPW_DEBUG_ORD("table 1 offset at 0x%08x, len = %i\n",
844 		      priv->table1_addr, priv->table1_len);
845 
846 	priv->table2_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_2);
847 	priv->table2_len = ipw_read_reg32(priv, priv->table2_addr);
848 	priv->table2_len &= 0x0000ffff;	/* use first two bytes */
849 
850 	IPW_DEBUG_ORD("table 2 offset at 0x%08x, len = %i\n",
851 		      priv->table2_addr, priv->table2_len);
852 
853 }
854 
855 static u32 ipw_register_toggle(u32 reg)
856 {
857 	reg &= ~IPW_START_STANDBY;
858 	if (reg & IPW_GATE_ODMA)
859 		reg &= ~IPW_GATE_ODMA;
860 	if (reg & IPW_GATE_IDMA)
861 		reg &= ~IPW_GATE_IDMA;
862 	if (reg & IPW_GATE_ADMA)
863 		reg &= ~IPW_GATE_ADMA;
864 	return reg;
865 }
866 
867 /*
868  * LED behavior:
869  * - On radio ON, turn on any LEDs that require to be on during start
870  * - On initialization, start unassociated blink
871  * - On association, disable unassociated blink
872  * - On disassociation, start unassociated blink
873  * - On radio OFF, turn off any LEDs started during radio on
874  *
875  */
876 #define LD_TIME_LINK_ON msecs_to_jiffies(300)
877 #define LD_TIME_LINK_OFF msecs_to_jiffies(2700)
878 #define LD_TIME_ACT_ON msecs_to_jiffies(250)
879 
880 static void ipw_led_link_on(struct ipw_priv *priv)
881 {
882 	unsigned long flags;
883 	u32 led;
884 
885 	/* If configured to not use LEDs, or nic_type is 1,
886 	 * then we don't toggle a LINK led */
887 	if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
888 		return;
889 
890 	spin_lock_irqsave(&priv->lock, flags);
891 
892 	if (!(priv->status & STATUS_RF_KILL_MASK) &&
893 	    !(priv->status & STATUS_LED_LINK_ON)) {
894 		IPW_DEBUG_LED("Link LED On\n");
895 		led = ipw_read_reg32(priv, IPW_EVENT_REG);
896 		led |= priv->led_association_on;
897 
898 		led = ipw_register_toggle(led);
899 
900 		IPW_DEBUG_LED("Reg: 0x%08X\n", led);
901 		ipw_write_reg32(priv, IPW_EVENT_REG, led);
902 
903 		priv->status |= STATUS_LED_LINK_ON;
904 
905 		/* If we aren't associated, schedule turning the LED off */
906 		if (!(priv->status & STATUS_ASSOCIATED))
907 			schedule_delayed_work(&priv->led_link_off,
908 					      LD_TIME_LINK_ON);
909 	}
910 
911 	spin_unlock_irqrestore(&priv->lock, flags);
912 }
913 
914 static void ipw_bg_led_link_on(struct work_struct *work)
915 {
916 	struct ipw_priv *priv =
917 		container_of(work, struct ipw_priv, led_link_on.work);
918 	mutex_lock(&priv->mutex);
919 	ipw_led_link_on(priv);
920 	mutex_unlock(&priv->mutex);
921 }
922 
923 static void ipw_led_link_off(struct ipw_priv *priv)
924 {
925 	unsigned long flags;
926 	u32 led;
927 
928 	/* If configured not to use LEDs, or nic type is 1,
929 	 * then we don't goggle the LINK led. */
930 	if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
931 		return;
932 
933 	spin_lock_irqsave(&priv->lock, flags);
934 
935 	if (priv->status & STATUS_LED_LINK_ON) {
936 		led = ipw_read_reg32(priv, IPW_EVENT_REG);
937 		led &= priv->led_association_off;
938 		led = ipw_register_toggle(led);
939 
940 		IPW_DEBUG_LED("Reg: 0x%08X\n", led);
941 		ipw_write_reg32(priv, IPW_EVENT_REG, led);
942 
943 		IPW_DEBUG_LED("Link LED Off\n");
944 
945 		priv->status &= ~STATUS_LED_LINK_ON;
946 
947 		/* If we aren't associated and the radio is on, schedule
948 		 * turning the LED on (blink while unassociated) */
949 		if (!(priv->status & STATUS_RF_KILL_MASK) &&
950 		    !(priv->status & STATUS_ASSOCIATED))
951 			schedule_delayed_work(&priv->led_link_on,
952 					      LD_TIME_LINK_OFF);
953 
954 	}
955 
956 	spin_unlock_irqrestore(&priv->lock, flags);
957 }
958 
959 static void ipw_bg_led_link_off(struct work_struct *work)
960 {
961 	struct ipw_priv *priv =
962 		container_of(work, struct ipw_priv, led_link_off.work);
963 	mutex_lock(&priv->mutex);
964 	ipw_led_link_off(priv);
965 	mutex_unlock(&priv->mutex);
966 }
967 
968 static void __ipw_led_activity_on(struct ipw_priv *priv)
969 {
970 	u32 led;
971 
972 	if (priv->config & CFG_NO_LED)
973 		return;
974 
975 	if (priv->status & STATUS_RF_KILL_MASK)
976 		return;
977 
978 	if (!(priv->status & STATUS_LED_ACT_ON)) {
979 		led = ipw_read_reg32(priv, IPW_EVENT_REG);
980 		led |= priv->led_activity_on;
981 
982 		led = ipw_register_toggle(led);
983 
984 		IPW_DEBUG_LED("Reg: 0x%08X\n", led);
985 		ipw_write_reg32(priv, IPW_EVENT_REG, led);
986 
987 		IPW_DEBUG_LED("Activity LED On\n");
988 
989 		priv->status |= STATUS_LED_ACT_ON;
990 
991 		cancel_delayed_work(&priv->led_act_off);
992 		schedule_delayed_work(&priv->led_act_off, LD_TIME_ACT_ON);
993 	} else {
994 		/* Reschedule LED off for full time period */
995 		cancel_delayed_work(&priv->led_act_off);
996 		schedule_delayed_work(&priv->led_act_off, LD_TIME_ACT_ON);
997 	}
998 }
999 
1000 #if 0
1001 void ipw_led_activity_on(struct ipw_priv *priv)
1002 {
1003 	unsigned long flags;
1004 	spin_lock_irqsave(&priv->lock, flags);
1005 	__ipw_led_activity_on(priv);
1006 	spin_unlock_irqrestore(&priv->lock, flags);
1007 }
1008 #endif  /*  0  */
1009 
1010 static void ipw_led_activity_off(struct ipw_priv *priv)
1011 {
1012 	unsigned long flags;
1013 	u32 led;
1014 
1015 	if (priv->config & CFG_NO_LED)
1016 		return;
1017 
1018 	spin_lock_irqsave(&priv->lock, flags);
1019 
1020 	if (priv->status & STATUS_LED_ACT_ON) {
1021 		led = ipw_read_reg32(priv, IPW_EVENT_REG);
1022 		led &= priv->led_activity_off;
1023 
1024 		led = ipw_register_toggle(led);
1025 
1026 		IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1027 		ipw_write_reg32(priv, IPW_EVENT_REG, led);
1028 
1029 		IPW_DEBUG_LED("Activity LED Off\n");
1030 
1031 		priv->status &= ~STATUS_LED_ACT_ON;
1032 	}
1033 
1034 	spin_unlock_irqrestore(&priv->lock, flags);
1035 }
1036 
1037 static void ipw_bg_led_activity_off(struct work_struct *work)
1038 {
1039 	struct ipw_priv *priv =
1040 		container_of(work, struct ipw_priv, led_act_off.work);
1041 	mutex_lock(&priv->mutex);
1042 	ipw_led_activity_off(priv);
1043 	mutex_unlock(&priv->mutex);
1044 }
1045 
1046 static void ipw_led_band_on(struct ipw_priv *priv)
1047 {
1048 	unsigned long flags;
1049 	u32 led;
1050 
1051 	/* Only nic type 1 supports mode LEDs */
1052 	if (priv->config & CFG_NO_LED ||
1053 	    priv->nic_type != EEPROM_NIC_TYPE_1 || !priv->assoc_network)
1054 		return;
1055 
1056 	spin_lock_irqsave(&priv->lock, flags);
1057 
1058 	led = ipw_read_reg32(priv, IPW_EVENT_REG);
1059 	if (priv->assoc_network->mode == IEEE_A) {
1060 		led |= priv->led_ofdm_on;
1061 		led &= priv->led_association_off;
1062 		IPW_DEBUG_LED("Mode LED On: 802.11a\n");
1063 	} else if (priv->assoc_network->mode == IEEE_G) {
1064 		led |= priv->led_ofdm_on;
1065 		led |= priv->led_association_on;
1066 		IPW_DEBUG_LED("Mode LED On: 802.11g\n");
1067 	} else {
1068 		led &= priv->led_ofdm_off;
1069 		led |= priv->led_association_on;
1070 		IPW_DEBUG_LED("Mode LED On: 802.11b\n");
1071 	}
1072 
1073 	led = ipw_register_toggle(led);
1074 
1075 	IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1076 	ipw_write_reg32(priv, IPW_EVENT_REG, led);
1077 
1078 	spin_unlock_irqrestore(&priv->lock, flags);
1079 }
1080 
1081 static void ipw_led_band_off(struct ipw_priv *priv)
1082 {
1083 	unsigned long flags;
1084 	u32 led;
1085 
1086 	/* Only nic type 1 supports mode LEDs */
1087 	if (priv->config & CFG_NO_LED || priv->nic_type != EEPROM_NIC_TYPE_1)
1088 		return;
1089 
1090 	spin_lock_irqsave(&priv->lock, flags);
1091 
1092 	led = ipw_read_reg32(priv, IPW_EVENT_REG);
1093 	led &= priv->led_ofdm_off;
1094 	led &= priv->led_association_off;
1095 
1096 	led = ipw_register_toggle(led);
1097 
1098 	IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1099 	ipw_write_reg32(priv, IPW_EVENT_REG, led);
1100 
1101 	spin_unlock_irqrestore(&priv->lock, flags);
1102 }
1103 
1104 static void ipw_led_radio_on(struct ipw_priv *priv)
1105 {
1106 	ipw_led_link_on(priv);
1107 }
1108 
1109 static void ipw_led_radio_off(struct ipw_priv *priv)
1110 {
1111 	ipw_led_activity_off(priv);
1112 	ipw_led_link_off(priv);
1113 }
1114 
1115 static void ipw_led_link_up(struct ipw_priv *priv)
1116 {
1117 	/* Set the Link Led on for all nic types */
1118 	ipw_led_link_on(priv);
1119 }
1120 
1121 static void ipw_led_link_down(struct ipw_priv *priv)
1122 {
1123 	ipw_led_activity_off(priv);
1124 	ipw_led_link_off(priv);
1125 
1126 	if (priv->status & STATUS_RF_KILL_MASK)
1127 		ipw_led_radio_off(priv);
1128 }
1129 
1130 static void ipw_led_init(struct ipw_priv *priv)
1131 {
1132 	priv->nic_type = priv->eeprom[EEPROM_NIC_TYPE];
1133 
1134 	/* Set the default PINs for the link and activity leds */
1135 	priv->led_activity_on = IPW_ACTIVITY_LED;
1136 	priv->led_activity_off = ~(IPW_ACTIVITY_LED);
1137 
1138 	priv->led_association_on = IPW_ASSOCIATED_LED;
1139 	priv->led_association_off = ~(IPW_ASSOCIATED_LED);
1140 
1141 	/* Set the default PINs for the OFDM leds */
1142 	priv->led_ofdm_on = IPW_OFDM_LED;
1143 	priv->led_ofdm_off = ~(IPW_OFDM_LED);
1144 
1145 	switch (priv->nic_type) {
1146 	case EEPROM_NIC_TYPE_1:
1147 		/* In this NIC type, the LEDs are reversed.... */
1148 		priv->led_activity_on = IPW_ASSOCIATED_LED;
1149 		priv->led_activity_off = ~(IPW_ASSOCIATED_LED);
1150 		priv->led_association_on = IPW_ACTIVITY_LED;
1151 		priv->led_association_off = ~(IPW_ACTIVITY_LED);
1152 
1153 		if (!(priv->config & CFG_NO_LED))
1154 			ipw_led_band_on(priv);
1155 
1156 		/* And we don't blink link LEDs for this nic, so
1157 		 * just return here */
1158 		return;
1159 
1160 	case EEPROM_NIC_TYPE_3:
1161 	case EEPROM_NIC_TYPE_2:
1162 	case EEPROM_NIC_TYPE_4:
1163 	case EEPROM_NIC_TYPE_0:
1164 		break;
1165 
1166 	default:
1167 		IPW_DEBUG_INFO("Unknown NIC type from EEPROM: %d\n",
1168 			       priv->nic_type);
1169 		priv->nic_type = EEPROM_NIC_TYPE_0;
1170 		break;
1171 	}
1172 
1173 	if (!(priv->config & CFG_NO_LED)) {
1174 		if (priv->status & STATUS_ASSOCIATED)
1175 			ipw_led_link_on(priv);
1176 		else
1177 			ipw_led_link_off(priv);
1178 	}
1179 }
1180 
1181 static void ipw_led_shutdown(struct ipw_priv *priv)
1182 {
1183 	ipw_led_activity_off(priv);
1184 	ipw_led_link_off(priv);
1185 	ipw_led_band_off(priv);
1186 	cancel_delayed_work(&priv->led_link_on);
1187 	cancel_delayed_work(&priv->led_link_off);
1188 	cancel_delayed_work(&priv->led_act_off);
1189 }
1190 
1191 /*
1192  * The following adds a new attribute to the sysfs representation
1193  * of this device driver (i.e. a new file in /sys/bus/pci/drivers/ipw/)
1194  * used for controlling the debug level.
1195  *
1196  * See the level definitions in ipw for details.
1197  */
1198 static ssize_t show_debug_level(struct device_driver *d, char *buf)
1199 {
1200 	return sprintf(buf, "0x%08X\n", ipw_debug_level);
1201 }
1202 
1203 static ssize_t store_debug_level(struct device_driver *d, const char *buf,
1204 				 size_t count)
1205 {
1206 	char *p = (char *)buf;
1207 	u32 val;
1208 
1209 	if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1210 		p++;
1211 		if (p[0] == 'x' || p[0] == 'X')
1212 			p++;
1213 		val = simple_strtoul(p, &p, 16);
1214 	} else
1215 		val = simple_strtoul(p, &p, 10);
1216 	if (p == buf)
1217 		printk(KERN_INFO DRV_NAME
1218 		       ": %s is not in hex or decimal form.\n", buf);
1219 	else
1220 		ipw_debug_level = val;
1221 
1222 	return strnlen(buf, count);
1223 }
1224 
1225 static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
1226 		   show_debug_level, store_debug_level);
1227 
1228 static inline u32 ipw_get_event_log_len(struct ipw_priv *priv)
1229 {
1230 	/* length = 1st dword in log */
1231 	return ipw_read_reg32(priv, ipw_read32(priv, IPW_EVENT_LOG));
1232 }
1233 
1234 static void ipw_capture_event_log(struct ipw_priv *priv,
1235 				  u32 log_len, struct ipw_event *log)
1236 {
1237 	u32 base;
1238 
1239 	if (log_len) {
1240 		base = ipw_read32(priv, IPW_EVENT_LOG);
1241 		ipw_read_indirect(priv, base + sizeof(base) + sizeof(u32),
1242 				  (u8 *) log, sizeof(*log) * log_len);
1243 	}
1244 }
1245 
1246 static struct ipw_fw_error *ipw_alloc_error_log(struct ipw_priv *priv)
1247 {
1248 	struct ipw_fw_error *error;
1249 	u32 log_len = ipw_get_event_log_len(priv);
1250 	u32 base = ipw_read32(priv, IPW_ERROR_LOG);
1251 	u32 elem_len = ipw_read_reg32(priv, base);
1252 
1253 	error = kmalloc(sizeof(*error) +
1254 			sizeof(*error->elem) * elem_len +
1255 			sizeof(*error->log) * log_len, GFP_ATOMIC);
1256 	if (!error) {
1257 		IPW_ERROR("Memory allocation for firmware error log "
1258 			  "failed.\n");
1259 		return NULL;
1260 	}
1261 	error->jiffies = jiffies;
1262 	error->status = priv->status;
1263 	error->config = priv->config;
1264 	error->elem_len = elem_len;
1265 	error->log_len = log_len;
1266 	error->elem = (struct ipw_error_elem *)error->payload;
1267 	error->log = (struct ipw_event *)(error->elem + elem_len);
1268 
1269 	ipw_capture_event_log(priv, log_len, error->log);
1270 
1271 	if (elem_len)
1272 		ipw_read_indirect(priv, base + sizeof(base), (u8 *) error->elem,
1273 				  sizeof(*error->elem) * elem_len);
1274 
1275 	return error;
1276 }
1277 
1278 static ssize_t show_event_log(struct device *d,
1279 			      struct device_attribute *attr, char *buf)
1280 {
1281 	struct ipw_priv *priv = dev_get_drvdata(d);
1282 	u32 log_len = ipw_get_event_log_len(priv);
1283 	u32 log_size;
1284 	struct ipw_event *log;
1285 	u32 len = 0, i;
1286 
1287 	/* not using min() because of its strict type checking */
1288 	log_size = PAGE_SIZE / sizeof(*log) > log_len ?
1289 			sizeof(*log) * log_len : PAGE_SIZE;
1290 	log = kzalloc(log_size, GFP_KERNEL);
1291 	if (!log) {
1292 		IPW_ERROR("Unable to allocate memory for log\n");
1293 		return 0;
1294 	}
1295 	log_len = log_size / sizeof(*log);
1296 	ipw_capture_event_log(priv, log_len, log);
1297 
1298 	len += snprintf(buf + len, PAGE_SIZE - len, "%08X", log_len);
1299 	for (i = 0; i < log_len; i++)
1300 		len += snprintf(buf + len, PAGE_SIZE - len,
1301 				"\n%08X%08X%08X",
1302 				log[i].time, log[i].event, log[i].data);
1303 	len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1304 	kfree(log);
1305 	return len;
1306 }
1307 
1308 static DEVICE_ATTR(event_log, S_IRUGO, show_event_log, NULL);
1309 
1310 static ssize_t show_error(struct device *d,
1311 			  struct device_attribute *attr, char *buf)
1312 {
1313 	struct ipw_priv *priv = dev_get_drvdata(d);
1314 	u32 len = 0, i;
1315 	if (!priv->error)
1316 		return 0;
1317 	len += snprintf(buf + len, PAGE_SIZE - len,
1318 			"%08lX%08X%08X%08X",
1319 			priv->error->jiffies,
1320 			priv->error->status,
1321 			priv->error->config, priv->error->elem_len);
1322 	for (i = 0; i < priv->error->elem_len; i++)
1323 		len += snprintf(buf + len, PAGE_SIZE - len,
1324 				"\n%08X%08X%08X%08X%08X%08X%08X",
1325 				priv->error->elem[i].time,
1326 				priv->error->elem[i].desc,
1327 				priv->error->elem[i].blink1,
1328 				priv->error->elem[i].blink2,
1329 				priv->error->elem[i].link1,
1330 				priv->error->elem[i].link2,
1331 				priv->error->elem[i].data);
1332 
1333 	len += snprintf(buf + len, PAGE_SIZE - len,
1334 			"\n%08X", priv->error->log_len);
1335 	for (i = 0; i < priv->error->log_len; i++)
1336 		len += snprintf(buf + len, PAGE_SIZE - len,
1337 				"\n%08X%08X%08X",
1338 				priv->error->log[i].time,
1339 				priv->error->log[i].event,
1340 				priv->error->log[i].data);
1341 	len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1342 	return len;
1343 }
1344 
1345 static ssize_t clear_error(struct device *d,
1346 			   struct device_attribute *attr,
1347 			   const char *buf, size_t count)
1348 {
1349 	struct ipw_priv *priv = dev_get_drvdata(d);
1350 
1351 	kfree(priv->error);
1352 	priv->error = NULL;
1353 	return count;
1354 }
1355 
1356 static DEVICE_ATTR(error, S_IRUGO | S_IWUSR, show_error, clear_error);
1357 
1358 static ssize_t show_cmd_log(struct device *d,
1359 			    struct device_attribute *attr, char *buf)
1360 {
1361 	struct ipw_priv *priv = dev_get_drvdata(d);
1362 	u32 len = 0, i;
1363 	if (!priv->cmdlog)
1364 		return 0;
1365 	for (i = (priv->cmdlog_pos + 1) % priv->cmdlog_len;
1366 	     (i != priv->cmdlog_pos) && (len < PAGE_SIZE);
1367 	     i = (i + 1) % priv->cmdlog_len) {
1368 		len +=
1369 		    snprintf(buf + len, PAGE_SIZE - len,
1370 			     "\n%08lX%08X%08X%08X\n", priv->cmdlog[i].jiffies,
1371 			     priv->cmdlog[i].retcode, priv->cmdlog[i].cmd.cmd,
1372 			     priv->cmdlog[i].cmd.len);
1373 		len +=
1374 		    snprintk_buf(buf + len, PAGE_SIZE - len,
1375 				 (u8 *) priv->cmdlog[i].cmd.param,
1376 				 priv->cmdlog[i].cmd.len);
1377 		len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1378 	}
1379 	len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1380 	return len;
1381 }
1382 
1383 static DEVICE_ATTR(cmd_log, S_IRUGO, show_cmd_log, NULL);
1384 
1385 #ifdef CONFIG_IPW2200_PROMISCUOUS
1386 static void ipw_prom_free(struct ipw_priv *priv);
1387 static int ipw_prom_alloc(struct ipw_priv *priv);
1388 static ssize_t store_rtap_iface(struct device *d,
1389 			 struct device_attribute *attr,
1390 			 const char *buf, size_t count)
1391 {
1392 	struct ipw_priv *priv = dev_get_drvdata(d);
1393 	int rc = 0;
1394 
1395 	if (count < 1)
1396 		return -EINVAL;
1397 
1398 	switch (buf[0]) {
1399 	case '0':
1400 		if (!rtap_iface)
1401 			return count;
1402 
1403 		if (netif_running(priv->prom_net_dev)) {
1404 			IPW_WARNING("Interface is up.  Cannot unregister.\n");
1405 			return count;
1406 		}
1407 
1408 		ipw_prom_free(priv);
1409 		rtap_iface = 0;
1410 		break;
1411 
1412 	case '1':
1413 		if (rtap_iface)
1414 			return count;
1415 
1416 		rc = ipw_prom_alloc(priv);
1417 		if (!rc)
1418 			rtap_iface = 1;
1419 		break;
1420 
1421 	default:
1422 		return -EINVAL;
1423 	}
1424 
1425 	if (rc) {
1426 		IPW_ERROR("Failed to register promiscuous network "
1427 			  "device (error %d).\n", rc);
1428 	}
1429 
1430 	return count;
1431 }
1432 
1433 static ssize_t show_rtap_iface(struct device *d,
1434 			struct device_attribute *attr,
1435 			char *buf)
1436 {
1437 	struct ipw_priv *priv = dev_get_drvdata(d);
1438 	if (rtap_iface)
1439 		return sprintf(buf, "%s", priv->prom_net_dev->name);
1440 	else {
1441 		buf[0] = '-';
1442 		buf[1] = '1';
1443 		buf[2] = '\0';
1444 		return 3;
1445 	}
1446 }
1447 
1448 static DEVICE_ATTR(rtap_iface, S_IWUSR | S_IRUSR, show_rtap_iface,
1449 		   store_rtap_iface);
1450 
1451 static ssize_t store_rtap_filter(struct device *d,
1452 			 struct device_attribute *attr,
1453 			 const char *buf, size_t count)
1454 {
1455 	struct ipw_priv *priv = dev_get_drvdata(d);
1456 
1457 	if (!priv->prom_priv) {
1458 		IPW_ERROR("Attempting to set filter without "
1459 			  "rtap_iface enabled.\n");
1460 		return -EPERM;
1461 	}
1462 
1463 	priv->prom_priv->filter = simple_strtol(buf, NULL, 0);
1464 
1465 	IPW_DEBUG_INFO("Setting rtap filter to " BIT_FMT16 "\n",
1466 		       BIT_ARG16(priv->prom_priv->filter));
1467 
1468 	return count;
1469 }
1470 
1471 static ssize_t show_rtap_filter(struct device *d,
1472 			struct device_attribute *attr,
1473 			char *buf)
1474 {
1475 	struct ipw_priv *priv = dev_get_drvdata(d);
1476 	return sprintf(buf, "0x%04X",
1477 		       priv->prom_priv ? priv->prom_priv->filter : 0);
1478 }
1479 
1480 static DEVICE_ATTR(rtap_filter, S_IWUSR | S_IRUSR, show_rtap_filter,
1481 		   store_rtap_filter);
1482 #endif
1483 
1484 static ssize_t show_scan_age(struct device *d, struct device_attribute *attr,
1485 			     char *buf)
1486 {
1487 	struct ipw_priv *priv = dev_get_drvdata(d);
1488 	return sprintf(buf, "%d\n", priv->ieee->scan_age);
1489 }
1490 
1491 static ssize_t store_scan_age(struct device *d, struct device_attribute *attr,
1492 			      const char *buf, size_t count)
1493 {
1494 	struct ipw_priv *priv = dev_get_drvdata(d);
1495 	struct net_device *dev = priv->net_dev;
1496 	char buffer[] = "00000000";
1497 	unsigned long len =
1498 	    (sizeof(buffer) - 1) > count ? count : sizeof(buffer) - 1;
1499 	unsigned long val;
1500 	char *p = buffer;
1501 
1502 	IPW_DEBUG_INFO("enter\n");
1503 
1504 	strncpy(buffer, buf, len);
1505 	buffer[len] = 0;
1506 
1507 	if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1508 		p++;
1509 		if (p[0] == 'x' || p[0] == 'X')
1510 			p++;
1511 		val = simple_strtoul(p, &p, 16);
1512 	} else
1513 		val = simple_strtoul(p, &p, 10);
1514 	if (p == buffer) {
1515 		IPW_DEBUG_INFO("%s: user supplied invalid value.\n", dev->name);
1516 	} else {
1517 		priv->ieee->scan_age = val;
1518 		IPW_DEBUG_INFO("set scan_age = %u\n", priv->ieee->scan_age);
1519 	}
1520 
1521 	IPW_DEBUG_INFO("exit\n");
1522 	return len;
1523 }
1524 
1525 static DEVICE_ATTR(scan_age, S_IWUSR | S_IRUGO, show_scan_age, store_scan_age);
1526 
1527 static ssize_t show_led(struct device *d, struct device_attribute *attr,
1528 			char *buf)
1529 {
1530 	struct ipw_priv *priv = dev_get_drvdata(d);
1531 	return sprintf(buf, "%d\n", (priv->config & CFG_NO_LED) ? 0 : 1);
1532 }
1533 
1534 static ssize_t store_led(struct device *d, struct device_attribute *attr,
1535 			 const char *buf, size_t count)
1536 {
1537 	struct ipw_priv *priv = dev_get_drvdata(d);
1538 
1539 	IPW_DEBUG_INFO("enter\n");
1540 
1541 	if (count == 0)
1542 		return 0;
1543 
1544 	if (*buf == 0) {
1545 		IPW_DEBUG_LED("Disabling LED control.\n");
1546 		priv->config |= CFG_NO_LED;
1547 		ipw_led_shutdown(priv);
1548 	} else {
1549 		IPW_DEBUG_LED("Enabling LED control.\n");
1550 		priv->config &= ~CFG_NO_LED;
1551 		ipw_led_init(priv);
1552 	}
1553 
1554 	IPW_DEBUG_INFO("exit\n");
1555 	return count;
1556 }
1557 
1558 static DEVICE_ATTR(led, S_IWUSR | S_IRUGO, show_led, store_led);
1559 
1560 static ssize_t show_status(struct device *d,
1561 			   struct device_attribute *attr, char *buf)
1562 {
1563 	struct ipw_priv *p = dev_get_drvdata(d);
1564 	return sprintf(buf, "0x%08x\n", (int)p->status);
1565 }
1566 
1567 static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
1568 
1569 static ssize_t show_cfg(struct device *d, struct device_attribute *attr,
1570 			char *buf)
1571 {
1572 	struct ipw_priv *p = dev_get_drvdata(d);
1573 	return sprintf(buf, "0x%08x\n", (int)p->config);
1574 }
1575 
1576 static DEVICE_ATTR(cfg, S_IRUGO, show_cfg, NULL);
1577 
1578 static ssize_t show_nic_type(struct device *d,
1579 			     struct device_attribute *attr, char *buf)
1580 {
1581 	struct ipw_priv *priv = dev_get_drvdata(d);
1582 	return sprintf(buf, "TYPE: %d\n", priv->nic_type);
1583 }
1584 
1585 static DEVICE_ATTR(nic_type, S_IRUGO, show_nic_type, NULL);
1586 
1587 static ssize_t show_ucode_version(struct device *d,
1588 				  struct device_attribute *attr, char *buf)
1589 {
1590 	u32 len = sizeof(u32), tmp = 0;
1591 	struct ipw_priv *p = dev_get_drvdata(d);
1592 
1593 	if (ipw_get_ordinal(p, IPW_ORD_STAT_UCODE_VERSION, &tmp, &len))
1594 		return 0;
1595 
1596 	return sprintf(buf, "0x%08x\n", tmp);
1597 }
1598 
1599 static DEVICE_ATTR(ucode_version, S_IWUSR | S_IRUGO, show_ucode_version, NULL);
1600 
1601 static ssize_t show_rtc(struct device *d, struct device_attribute *attr,
1602 			char *buf)
1603 {
1604 	u32 len = sizeof(u32), tmp = 0;
1605 	struct ipw_priv *p = dev_get_drvdata(d);
1606 
1607 	if (ipw_get_ordinal(p, IPW_ORD_STAT_RTC, &tmp, &len))
1608 		return 0;
1609 
1610 	return sprintf(buf, "0x%08x\n", tmp);
1611 }
1612 
1613 static DEVICE_ATTR(rtc, S_IWUSR | S_IRUGO, show_rtc, NULL);
1614 
1615 /*
1616  * Add a device attribute to view/control the delay between eeprom
1617  * operations.
1618  */
1619 static ssize_t show_eeprom_delay(struct device *d,
1620 				 struct device_attribute *attr, char *buf)
1621 {
1622 	struct ipw_priv *p = dev_get_drvdata(d);
1623 	int n = p->eeprom_delay;
1624 	return sprintf(buf, "%i\n", n);
1625 }
1626 static ssize_t store_eeprom_delay(struct device *d,
1627 				  struct device_attribute *attr,
1628 				  const char *buf, size_t count)
1629 {
1630 	struct ipw_priv *p = dev_get_drvdata(d);
1631 	sscanf(buf, "%i", &p->eeprom_delay);
1632 	return strnlen(buf, count);
1633 }
1634 
1635 static DEVICE_ATTR(eeprom_delay, S_IWUSR | S_IRUGO,
1636 		   show_eeprom_delay, store_eeprom_delay);
1637 
1638 static ssize_t show_command_event_reg(struct device *d,
1639 				      struct device_attribute *attr, char *buf)
1640 {
1641 	u32 reg = 0;
1642 	struct ipw_priv *p = dev_get_drvdata(d);
1643 
1644 	reg = ipw_read_reg32(p, IPW_INTERNAL_CMD_EVENT);
1645 	return sprintf(buf, "0x%08x\n", reg);
1646 }
1647 static ssize_t store_command_event_reg(struct device *d,
1648 				       struct device_attribute *attr,
1649 				       const char *buf, size_t count)
1650 {
1651 	u32 reg;
1652 	struct ipw_priv *p = dev_get_drvdata(d);
1653 
1654 	sscanf(buf, "%x", &reg);
1655 	ipw_write_reg32(p, IPW_INTERNAL_CMD_EVENT, reg);
1656 	return strnlen(buf, count);
1657 }
1658 
1659 static DEVICE_ATTR(command_event_reg, S_IWUSR | S_IRUGO,
1660 		   show_command_event_reg, store_command_event_reg);
1661 
1662 static ssize_t show_mem_gpio_reg(struct device *d,
1663 				 struct device_attribute *attr, char *buf)
1664 {
1665 	u32 reg = 0;
1666 	struct ipw_priv *p = dev_get_drvdata(d);
1667 
1668 	reg = ipw_read_reg32(p, 0x301100);
1669 	return sprintf(buf, "0x%08x\n", reg);
1670 }
1671 static ssize_t store_mem_gpio_reg(struct device *d,
1672 				  struct device_attribute *attr,
1673 				  const char *buf, size_t count)
1674 {
1675 	u32 reg;
1676 	struct ipw_priv *p = dev_get_drvdata(d);
1677 
1678 	sscanf(buf, "%x", &reg);
1679 	ipw_write_reg32(p, 0x301100, reg);
1680 	return strnlen(buf, count);
1681 }
1682 
1683 static DEVICE_ATTR(mem_gpio_reg, S_IWUSR | S_IRUGO,
1684 		   show_mem_gpio_reg, store_mem_gpio_reg);
1685 
1686 static ssize_t show_indirect_dword(struct device *d,
1687 				   struct device_attribute *attr, char *buf)
1688 {
1689 	u32 reg = 0;
1690 	struct ipw_priv *priv = dev_get_drvdata(d);
1691 
1692 	if (priv->status & STATUS_INDIRECT_DWORD)
1693 		reg = ipw_read_reg32(priv, priv->indirect_dword);
1694 	else
1695 		reg = 0;
1696 
1697 	return sprintf(buf, "0x%08x\n", reg);
1698 }
1699 static ssize_t store_indirect_dword(struct device *d,
1700 				    struct device_attribute *attr,
1701 				    const char *buf, size_t count)
1702 {
1703 	struct ipw_priv *priv = dev_get_drvdata(d);
1704 
1705 	sscanf(buf, "%x", &priv->indirect_dword);
1706 	priv->status |= STATUS_INDIRECT_DWORD;
1707 	return strnlen(buf, count);
1708 }
1709 
1710 static DEVICE_ATTR(indirect_dword, S_IWUSR | S_IRUGO,
1711 		   show_indirect_dword, store_indirect_dword);
1712 
1713 static ssize_t show_indirect_byte(struct device *d,
1714 				  struct device_attribute *attr, char *buf)
1715 {
1716 	u8 reg = 0;
1717 	struct ipw_priv *priv = dev_get_drvdata(d);
1718 
1719 	if (priv->status & STATUS_INDIRECT_BYTE)
1720 		reg = ipw_read_reg8(priv, priv->indirect_byte);
1721 	else
1722 		reg = 0;
1723 
1724 	return sprintf(buf, "0x%02x\n", reg);
1725 }
1726 static ssize_t store_indirect_byte(struct device *d,
1727 				   struct device_attribute *attr,
1728 				   const char *buf, size_t count)
1729 {
1730 	struct ipw_priv *priv = dev_get_drvdata(d);
1731 
1732 	sscanf(buf, "%x", &priv->indirect_byte);
1733 	priv->status |= STATUS_INDIRECT_BYTE;
1734 	return strnlen(buf, count);
1735 }
1736 
1737 static DEVICE_ATTR(indirect_byte, S_IWUSR | S_IRUGO,
1738 		   show_indirect_byte, store_indirect_byte);
1739 
1740 static ssize_t show_direct_dword(struct device *d,
1741 				 struct device_attribute *attr, char *buf)
1742 {
1743 	u32 reg = 0;
1744 	struct ipw_priv *priv = dev_get_drvdata(d);
1745 
1746 	if (priv->status & STATUS_DIRECT_DWORD)
1747 		reg = ipw_read32(priv, priv->direct_dword);
1748 	else
1749 		reg = 0;
1750 
1751 	return sprintf(buf, "0x%08x\n", reg);
1752 }
1753 static ssize_t store_direct_dword(struct device *d,
1754 				  struct device_attribute *attr,
1755 				  const char *buf, size_t count)
1756 {
1757 	struct ipw_priv *priv = dev_get_drvdata(d);
1758 
1759 	sscanf(buf, "%x", &priv->direct_dword);
1760 	priv->status |= STATUS_DIRECT_DWORD;
1761 	return strnlen(buf, count);
1762 }
1763 
1764 static DEVICE_ATTR(direct_dword, S_IWUSR | S_IRUGO,
1765 		   show_direct_dword, store_direct_dword);
1766 
1767 static int rf_kill_active(struct ipw_priv *priv)
1768 {
1769 	if (0 == (ipw_read32(priv, 0x30) & 0x10000)) {
1770 		priv->status |= STATUS_RF_KILL_HW;
1771 		wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, true);
1772 	} else {
1773 		priv->status &= ~STATUS_RF_KILL_HW;
1774 		wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, false);
1775 	}
1776 
1777 	return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0;
1778 }
1779 
1780 static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr,
1781 			    char *buf)
1782 {
1783 	/* 0 - RF kill not enabled
1784 	   1 - SW based RF kill active (sysfs)
1785 	   2 - HW based RF kill active
1786 	   3 - Both HW and SW baed RF kill active */
1787 	struct ipw_priv *priv = dev_get_drvdata(d);
1788 	int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) |
1789 	    (rf_kill_active(priv) ? 0x2 : 0x0);
1790 	return sprintf(buf, "%i\n", val);
1791 }
1792 
1793 static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
1794 {
1795 	if ((disable_radio ? 1 : 0) ==
1796 	    ((priv->status & STATUS_RF_KILL_SW) ? 1 : 0))
1797 		return 0;
1798 
1799 	IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO  %s\n",
1800 			  disable_radio ? "OFF" : "ON");
1801 
1802 	if (disable_radio) {
1803 		priv->status |= STATUS_RF_KILL_SW;
1804 
1805 		cancel_delayed_work(&priv->request_scan);
1806 		cancel_delayed_work(&priv->request_direct_scan);
1807 		cancel_delayed_work(&priv->request_passive_scan);
1808 		cancel_delayed_work(&priv->scan_event);
1809 		schedule_work(&priv->down);
1810 	} else {
1811 		priv->status &= ~STATUS_RF_KILL_SW;
1812 		if (rf_kill_active(priv)) {
1813 			IPW_DEBUG_RF_KILL("Can not turn radio back on - "
1814 					  "disabled by HW switch\n");
1815 			/* Make sure the RF_KILL check timer is running */
1816 			cancel_delayed_work(&priv->rf_kill);
1817 			schedule_delayed_work(&priv->rf_kill,
1818 					      round_jiffies_relative(2 * HZ));
1819 		} else
1820 			schedule_work(&priv->up);
1821 	}
1822 
1823 	return 1;
1824 }
1825 
1826 static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr,
1827 			     const char *buf, size_t count)
1828 {
1829 	struct ipw_priv *priv = dev_get_drvdata(d);
1830 
1831 	ipw_radio_kill_sw(priv, buf[0] == '1');
1832 
1833 	return count;
1834 }
1835 
1836 static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill);
1837 
1838 static ssize_t show_speed_scan(struct device *d, struct device_attribute *attr,
1839 			       char *buf)
1840 {
1841 	struct ipw_priv *priv = dev_get_drvdata(d);
1842 	int pos = 0, len = 0;
1843 	if (priv->config & CFG_SPEED_SCAN) {
1844 		while (priv->speed_scan[pos] != 0)
1845 			len += sprintf(&buf[len], "%d ",
1846 				       priv->speed_scan[pos++]);
1847 		return len + sprintf(&buf[len], "\n");
1848 	}
1849 
1850 	return sprintf(buf, "0\n");
1851 }
1852 
1853 static ssize_t store_speed_scan(struct device *d, struct device_attribute *attr,
1854 				const char *buf, size_t count)
1855 {
1856 	struct ipw_priv *priv = dev_get_drvdata(d);
1857 	int channel, pos = 0;
1858 	const char *p = buf;
1859 
1860 	/* list of space separated channels to scan, optionally ending with 0 */
1861 	while ((channel = simple_strtol(p, NULL, 0))) {
1862 		if (pos == MAX_SPEED_SCAN - 1) {
1863 			priv->speed_scan[pos] = 0;
1864 			break;
1865 		}
1866 
1867 		if (libipw_is_valid_channel(priv->ieee, channel))
1868 			priv->speed_scan[pos++] = channel;
1869 		else
1870 			IPW_WARNING("Skipping invalid channel request: %d\n",
1871 				    channel);
1872 		p = strchr(p, ' ');
1873 		if (!p)
1874 			break;
1875 		while (*p == ' ' || *p == '\t')
1876 			p++;
1877 	}
1878 
1879 	if (pos == 0)
1880 		priv->config &= ~CFG_SPEED_SCAN;
1881 	else {
1882 		priv->speed_scan_pos = 0;
1883 		priv->config |= CFG_SPEED_SCAN;
1884 	}
1885 
1886 	return count;
1887 }
1888 
1889 static DEVICE_ATTR(speed_scan, S_IWUSR | S_IRUGO, show_speed_scan,
1890 		   store_speed_scan);
1891 
1892 static ssize_t show_net_stats(struct device *d, struct device_attribute *attr,
1893 			      char *buf)
1894 {
1895 	struct ipw_priv *priv = dev_get_drvdata(d);
1896 	return sprintf(buf, "%c\n", (priv->config & CFG_NET_STATS) ? '1' : '0');
1897 }
1898 
1899 static ssize_t store_net_stats(struct device *d, struct device_attribute *attr,
1900 			       const char *buf, size_t count)
1901 {
1902 	struct ipw_priv *priv = dev_get_drvdata(d);
1903 	if (buf[0] == '1')
1904 		priv->config |= CFG_NET_STATS;
1905 	else
1906 		priv->config &= ~CFG_NET_STATS;
1907 
1908 	return count;
1909 }
1910 
1911 static DEVICE_ATTR(net_stats, S_IWUSR | S_IRUGO,
1912 		   show_net_stats, store_net_stats);
1913 
1914 static ssize_t show_channels(struct device *d,
1915 			     struct device_attribute *attr,
1916 			     char *buf)
1917 {
1918 	struct ipw_priv *priv = dev_get_drvdata(d);
1919 	const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
1920 	int len = 0, i;
1921 
1922 	len = sprintf(&buf[len],
1923 		      "Displaying %d channels in 2.4Ghz band "
1924 		      "(802.11bg):\n", geo->bg_channels);
1925 
1926 	for (i = 0; i < geo->bg_channels; i++) {
1927 		len += sprintf(&buf[len], "%d: BSS%s%s, %s, Band %s.\n",
1928 			       geo->bg[i].channel,
1929 			       geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT ?
1930 			       " (radar spectrum)" : "",
1931 			       ((geo->bg[i].flags & LIBIPW_CH_NO_IBSS) ||
1932 				(geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT))
1933 			       ? "" : ", IBSS",
1934 			       geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY ?
1935 			       "passive only" : "active/passive",
1936 			       geo->bg[i].flags & LIBIPW_CH_B_ONLY ?
1937 			       "B" : "B/G");
1938 	}
1939 
1940 	len += sprintf(&buf[len],
1941 		       "Displaying %d channels in 5.2Ghz band "
1942 		       "(802.11a):\n", geo->a_channels);
1943 	for (i = 0; i < geo->a_channels; i++) {
1944 		len += sprintf(&buf[len], "%d: BSS%s%s, %s.\n",
1945 			       geo->a[i].channel,
1946 			       geo->a[i].flags & LIBIPW_CH_RADAR_DETECT ?
1947 			       " (radar spectrum)" : "",
1948 			       ((geo->a[i].flags & LIBIPW_CH_NO_IBSS) ||
1949 				(geo->a[i].flags & LIBIPW_CH_RADAR_DETECT))
1950 			       ? "" : ", IBSS",
1951 			       geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY ?
1952 			       "passive only" : "active/passive");
1953 	}
1954 
1955 	return len;
1956 }
1957 
1958 static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL);
1959 
1960 static void notify_wx_assoc_event(struct ipw_priv *priv)
1961 {
1962 	union iwreq_data wrqu;
1963 	wrqu.ap_addr.sa_family = ARPHRD_ETHER;
1964 	if (priv->status & STATUS_ASSOCIATED)
1965 		memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN);
1966 	else
1967 		eth_zero_addr(wrqu.ap_addr.sa_data);
1968 	wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
1969 }
1970 
1971 static void ipw_irq_tasklet(struct ipw_priv *priv)
1972 {
1973 	u32 inta, inta_mask, handled = 0;
1974 	unsigned long flags;
1975 	int rc = 0;
1976 
1977 	spin_lock_irqsave(&priv->irq_lock, flags);
1978 
1979 	inta = ipw_read32(priv, IPW_INTA_RW);
1980 	inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
1981 
1982 	if (inta == 0xFFFFFFFF) {
1983 		/* Hardware disappeared */
1984 		IPW_WARNING("TASKLET INTA == 0xFFFFFFFF\n");
1985 		/* Only handle the cached INTA values */
1986 		inta = 0;
1987 	}
1988 	inta &= (IPW_INTA_MASK_ALL & inta_mask);
1989 
1990 	/* Add any cached INTA values that need to be handled */
1991 	inta |= priv->isr_inta;
1992 
1993 	spin_unlock_irqrestore(&priv->irq_lock, flags);
1994 
1995 	spin_lock_irqsave(&priv->lock, flags);
1996 
1997 	/* handle all the justifications for the interrupt */
1998 	if (inta & IPW_INTA_BIT_RX_TRANSFER) {
1999 		ipw_rx(priv);
2000 		handled |= IPW_INTA_BIT_RX_TRANSFER;
2001 	}
2002 
2003 	if (inta & IPW_INTA_BIT_TX_CMD_QUEUE) {
2004 		IPW_DEBUG_HC("Command completed.\n");
2005 		rc = ipw_queue_tx_reclaim(priv, &priv->txq_cmd, -1);
2006 		priv->status &= ~STATUS_HCMD_ACTIVE;
2007 		wake_up_interruptible(&priv->wait_command_queue);
2008 		handled |= IPW_INTA_BIT_TX_CMD_QUEUE;
2009 	}
2010 
2011 	if (inta & IPW_INTA_BIT_TX_QUEUE_1) {
2012 		IPW_DEBUG_TX("TX_QUEUE_1\n");
2013 		rc = ipw_queue_tx_reclaim(priv, &priv->txq[0], 0);
2014 		handled |= IPW_INTA_BIT_TX_QUEUE_1;
2015 	}
2016 
2017 	if (inta & IPW_INTA_BIT_TX_QUEUE_2) {
2018 		IPW_DEBUG_TX("TX_QUEUE_2\n");
2019 		rc = ipw_queue_tx_reclaim(priv, &priv->txq[1], 1);
2020 		handled |= IPW_INTA_BIT_TX_QUEUE_2;
2021 	}
2022 
2023 	if (inta & IPW_INTA_BIT_TX_QUEUE_3) {
2024 		IPW_DEBUG_TX("TX_QUEUE_3\n");
2025 		rc = ipw_queue_tx_reclaim(priv, &priv->txq[2], 2);
2026 		handled |= IPW_INTA_BIT_TX_QUEUE_3;
2027 	}
2028 
2029 	if (inta & IPW_INTA_BIT_TX_QUEUE_4) {
2030 		IPW_DEBUG_TX("TX_QUEUE_4\n");
2031 		rc = ipw_queue_tx_reclaim(priv, &priv->txq[3], 3);
2032 		handled |= IPW_INTA_BIT_TX_QUEUE_4;
2033 	}
2034 
2035 	if (inta & IPW_INTA_BIT_STATUS_CHANGE) {
2036 		IPW_WARNING("STATUS_CHANGE\n");
2037 		handled |= IPW_INTA_BIT_STATUS_CHANGE;
2038 	}
2039 
2040 	if (inta & IPW_INTA_BIT_BEACON_PERIOD_EXPIRED) {
2041 		IPW_WARNING("TX_PERIOD_EXPIRED\n");
2042 		handled |= IPW_INTA_BIT_BEACON_PERIOD_EXPIRED;
2043 	}
2044 
2045 	if (inta & IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE) {
2046 		IPW_WARNING("HOST_CMD_DONE\n");
2047 		handled |= IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE;
2048 	}
2049 
2050 	if (inta & IPW_INTA_BIT_FW_INITIALIZATION_DONE) {
2051 		IPW_WARNING("FW_INITIALIZATION_DONE\n");
2052 		handled |= IPW_INTA_BIT_FW_INITIALIZATION_DONE;
2053 	}
2054 
2055 	if (inta & IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE) {
2056 		IPW_WARNING("PHY_OFF_DONE\n");
2057 		handled |= IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE;
2058 	}
2059 
2060 	if (inta & IPW_INTA_BIT_RF_KILL_DONE) {
2061 		IPW_DEBUG_RF_KILL("RF_KILL_DONE\n");
2062 		priv->status |= STATUS_RF_KILL_HW;
2063 		wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, true);
2064 		wake_up_interruptible(&priv->wait_command_queue);
2065 		priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
2066 		cancel_delayed_work(&priv->request_scan);
2067 		cancel_delayed_work(&priv->request_direct_scan);
2068 		cancel_delayed_work(&priv->request_passive_scan);
2069 		cancel_delayed_work(&priv->scan_event);
2070 		schedule_work(&priv->link_down);
2071 		schedule_delayed_work(&priv->rf_kill, 2 * HZ);
2072 		handled |= IPW_INTA_BIT_RF_KILL_DONE;
2073 	}
2074 
2075 	if (inta & IPW_INTA_BIT_FATAL_ERROR) {
2076 		IPW_WARNING("Firmware error detected.  Restarting.\n");
2077 		if (priv->error) {
2078 			IPW_DEBUG_FW("Sysfs 'error' log already exists.\n");
2079 			if (ipw_debug_level & IPW_DL_FW_ERRORS) {
2080 				struct ipw_fw_error *error =
2081 				    ipw_alloc_error_log(priv);
2082 				ipw_dump_error_log(priv, error);
2083 				kfree(error);
2084 			}
2085 		} else {
2086 			priv->error = ipw_alloc_error_log(priv);
2087 			if (priv->error)
2088 				IPW_DEBUG_FW("Sysfs 'error' log captured.\n");
2089 			else
2090 				IPW_DEBUG_FW("Error allocating sysfs 'error' "
2091 					     "log.\n");
2092 			if (ipw_debug_level & IPW_DL_FW_ERRORS)
2093 				ipw_dump_error_log(priv, priv->error);
2094 		}
2095 
2096 		/* XXX: If hardware encryption is for WPA/WPA2,
2097 		 * we have to notify the supplicant. */
2098 		if (priv->ieee->sec.encrypt) {
2099 			priv->status &= ~STATUS_ASSOCIATED;
2100 			notify_wx_assoc_event(priv);
2101 		}
2102 
2103 		/* Keep the restart process from trying to send host
2104 		 * commands by clearing the INIT status bit */
2105 		priv->status &= ~STATUS_INIT;
2106 
2107 		/* Cancel currently queued command. */
2108 		priv->status &= ~STATUS_HCMD_ACTIVE;
2109 		wake_up_interruptible(&priv->wait_command_queue);
2110 
2111 		schedule_work(&priv->adapter_restart);
2112 		handled |= IPW_INTA_BIT_FATAL_ERROR;
2113 	}
2114 
2115 	if (inta & IPW_INTA_BIT_PARITY_ERROR) {
2116 		IPW_ERROR("Parity error\n");
2117 		handled |= IPW_INTA_BIT_PARITY_ERROR;
2118 	}
2119 
2120 	if (handled != inta) {
2121 		IPW_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
2122 	}
2123 
2124 	spin_unlock_irqrestore(&priv->lock, flags);
2125 
2126 	/* enable all interrupts */
2127 	ipw_enable_interrupts(priv);
2128 }
2129 
2130 #define IPW_CMD(x) case IPW_CMD_ ## x : return #x
2131 static char *get_cmd_string(u8 cmd)
2132 {
2133 	switch (cmd) {
2134 		IPW_CMD(HOST_COMPLETE);
2135 		IPW_CMD(POWER_DOWN);
2136 		IPW_CMD(SYSTEM_CONFIG);
2137 		IPW_CMD(MULTICAST_ADDRESS);
2138 		IPW_CMD(SSID);
2139 		IPW_CMD(ADAPTER_ADDRESS);
2140 		IPW_CMD(PORT_TYPE);
2141 		IPW_CMD(RTS_THRESHOLD);
2142 		IPW_CMD(FRAG_THRESHOLD);
2143 		IPW_CMD(POWER_MODE);
2144 		IPW_CMD(WEP_KEY);
2145 		IPW_CMD(TGI_TX_KEY);
2146 		IPW_CMD(SCAN_REQUEST);
2147 		IPW_CMD(SCAN_REQUEST_EXT);
2148 		IPW_CMD(ASSOCIATE);
2149 		IPW_CMD(SUPPORTED_RATES);
2150 		IPW_CMD(SCAN_ABORT);
2151 		IPW_CMD(TX_FLUSH);
2152 		IPW_CMD(QOS_PARAMETERS);
2153 		IPW_CMD(DINO_CONFIG);
2154 		IPW_CMD(RSN_CAPABILITIES);
2155 		IPW_CMD(RX_KEY);
2156 		IPW_CMD(CARD_DISABLE);
2157 		IPW_CMD(SEED_NUMBER);
2158 		IPW_CMD(TX_POWER);
2159 		IPW_CMD(COUNTRY_INFO);
2160 		IPW_CMD(AIRONET_INFO);
2161 		IPW_CMD(AP_TX_POWER);
2162 		IPW_CMD(CCKM_INFO);
2163 		IPW_CMD(CCX_VER_INFO);
2164 		IPW_CMD(SET_CALIBRATION);
2165 		IPW_CMD(SENSITIVITY_CALIB);
2166 		IPW_CMD(RETRY_LIMIT);
2167 		IPW_CMD(IPW_PRE_POWER_DOWN);
2168 		IPW_CMD(VAP_BEACON_TEMPLATE);
2169 		IPW_CMD(VAP_DTIM_PERIOD);
2170 		IPW_CMD(EXT_SUPPORTED_RATES);
2171 		IPW_CMD(VAP_LOCAL_TX_PWR_CONSTRAINT);
2172 		IPW_CMD(VAP_QUIET_INTERVALS);
2173 		IPW_CMD(VAP_CHANNEL_SWITCH);
2174 		IPW_CMD(VAP_MANDATORY_CHANNELS);
2175 		IPW_CMD(VAP_CELL_PWR_LIMIT);
2176 		IPW_CMD(VAP_CF_PARAM_SET);
2177 		IPW_CMD(VAP_SET_BEACONING_STATE);
2178 		IPW_CMD(MEASUREMENT);
2179 		IPW_CMD(POWER_CAPABILITY);
2180 		IPW_CMD(SUPPORTED_CHANNELS);
2181 		IPW_CMD(TPC_REPORT);
2182 		IPW_CMD(WME_INFO);
2183 		IPW_CMD(PRODUCTION_COMMAND);
2184 	default:
2185 		return "UNKNOWN";
2186 	}
2187 }
2188 
2189 #define HOST_COMPLETE_TIMEOUT HZ
2190 
2191 static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
2192 {
2193 	int rc = 0;
2194 	unsigned long flags;
2195 	unsigned long now, end;
2196 
2197 	spin_lock_irqsave(&priv->lock, flags);
2198 	if (priv->status & STATUS_HCMD_ACTIVE) {
2199 		IPW_ERROR("Failed to send %s: Already sending a command.\n",
2200 			  get_cmd_string(cmd->cmd));
2201 		spin_unlock_irqrestore(&priv->lock, flags);
2202 		return -EAGAIN;
2203 	}
2204 
2205 	priv->status |= STATUS_HCMD_ACTIVE;
2206 
2207 	if (priv->cmdlog) {
2208 		priv->cmdlog[priv->cmdlog_pos].jiffies = jiffies;
2209 		priv->cmdlog[priv->cmdlog_pos].cmd.cmd = cmd->cmd;
2210 		priv->cmdlog[priv->cmdlog_pos].cmd.len = cmd->len;
2211 		memcpy(priv->cmdlog[priv->cmdlog_pos].cmd.param, cmd->param,
2212 		       cmd->len);
2213 		priv->cmdlog[priv->cmdlog_pos].retcode = -1;
2214 	}
2215 
2216 	IPW_DEBUG_HC("%s command (#%d) %d bytes: 0x%08X\n",
2217 		     get_cmd_string(cmd->cmd), cmd->cmd, cmd->len,
2218 		     priv->status);
2219 
2220 #ifndef DEBUG_CMD_WEP_KEY
2221 	if (cmd->cmd == IPW_CMD_WEP_KEY)
2222 		IPW_DEBUG_HC("WEP_KEY command masked out for secure.\n");
2223 	else
2224 #endif
2225 		printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len);
2226 
2227 	rc = ipw_queue_tx_hcmd(priv, cmd->cmd, cmd->param, cmd->len, 0);
2228 	if (rc) {
2229 		priv->status &= ~STATUS_HCMD_ACTIVE;
2230 		IPW_ERROR("Failed to send %s: Reason %d\n",
2231 			  get_cmd_string(cmd->cmd), rc);
2232 		spin_unlock_irqrestore(&priv->lock, flags);
2233 		goto exit;
2234 	}
2235 	spin_unlock_irqrestore(&priv->lock, flags);
2236 
2237 	now = jiffies;
2238 	end = now + HOST_COMPLETE_TIMEOUT;
2239 again:
2240 	rc = wait_event_interruptible_timeout(priv->wait_command_queue,
2241 					      !(priv->
2242 						status & STATUS_HCMD_ACTIVE),
2243 					      end - now);
2244 	if (rc < 0) {
2245 		now = jiffies;
2246 		if (time_before(now, end))
2247 			goto again;
2248 		rc = 0;
2249 	}
2250 
2251 	if (rc == 0) {
2252 		spin_lock_irqsave(&priv->lock, flags);
2253 		if (priv->status & STATUS_HCMD_ACTIVE) {
2254 			IPW_ERROR("Failed to send %s: Command timed out.\n",
2255 				  get_cmd_string(cmd->cmd));
2256 			priv->status &= ~STATUS_HCMD_ACTIVE;
2257 			spin_unlock_irqrestore(&priv->lock, flags);
2258 			rc = -EIO;
2259 			goto exit;
2260 		}
2261 		spin_unlock_irqrestore(&priv->lock, flags);
2262 	} else
2263 		rc = 0;
2264 
2265 	if (priv->status & STATUS_RF_KILL_HW) {
2266 		IPW_ERROR("Failed to send %s: Aborted due to RF kill switch.\n",
2267 			  get_cmd_string(cmd->cmd));
2268 		rc = -EIO;
2269 		goto exit;
2270 	}
2271 
2272       exit:
2273 	if (priv->cmdlog) {
2274 		priv->cmdlog[priv->cmdlog_pos++].retcode = rc;
2275 		priv->cmdlog_pos %= priv->cmdlog_len;
2276 	}
2277 	return rc;
2278 }
2279 
2280 static int ipw_send_cmd_simple(struct ipw_priv *priv, u8 command)
2281 {
2282 	struct host_cmd cmd = {
2283 		.cmd = command,
2284 	};
2285 
2286 	return __ipw_send_cmd(priv, &cmd);
2287 }
2288 
2289 static int ipw_send_cmd_pdu(struct ipw_priv *priv, u8 command, u8 len,
2290 			    void *data)
2291 {
2292 	struct host_cmd cmd = {
2293 		.cmd = command,
2294 		.len = len,
2295 		.param = data,
2296 	};
2297 
2298 	return __ipw_send_cmd(priv, &cmd);
2299 }
2300 
2301 static int ipw_send_host_complete(struct ipw_priv *priv)
2302 {
2303 	if (!priv) {
2304 		IPW_ERROR("Invalid args\n");
2305 		return -1;
2306 	}
2307 
2308 	return ipw_send_cmd_simple(priv, IPW_CMD_HOST_COMPLETE);
2309 }
2310 
2311 static int ipw_send_system_config(struct ipw_priv *priv)
2312 {
2313 	return ipw_send_cmd_pdu(priv, IPW_CMD_SYSTEM_CONFIG,
2314 				sizeof(priv->sys_config),
2315 				&priv->sys_config);
2316 }
2317 
2318 static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len)
2319 {
2320 	if (!priv || !ssid) {
2321 		IPW_ERROR("Invalid args\n");
2322 		return -1;
2323 	}
2324 
2325 	return ipw_send_cmd_pdu(priv, IPW_CMD_SSID, min(len, IW_ESSID_MAX_SIZE),
2326 				ssid);
2327 }
2328 
2329 static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac)
2330 {
2331 	if (!priv || !mac) {
2332 		IPW_ERROR("Invalid args\n");
2333 		return -1;
2334 	}
2335 
2336 	IPW_DEBUG_INFO("%s: Setting MAC to %pM\n",
2337 		       priv->net_dev->name, mac);
2338 
2339 	return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac);
2340 }
2341 
2342 static void ipw_adapter_restart(void *adapter)
2343 {
2344 	struct ipw_priv *priv = adapter;
2345 
2346 	if (priv->status & STATUS_RF_KILL_MASK)
2347 		return;
2348 
2349 	ipw_down(priv);
2350 
2351 	if (priv->assoc_network &&
2352 	    (priv->assoc_network->capability & WLAN_CAPABILITY_IBSS))
2353 		ipw_remove_current_network(priv);
2354 
2355 	if (ipw_up(priv)) {
2356 		IPW_ERROR("Failed to up device\n");
2357 		return;
2358 	}
2359 }
2360 
2361 static void ipw_bg_adapter_restart(struct work_struct *work)
2362 {
2363 	struct ipw_priv *priv =
2364 		container_of(work, struct ipw_priv, adapter_restart);
2365 	mutex_lock(&priv->mutex);
2366 	ipw_adapter_restart(priv);
2367 	mutex_unlock(&priv->mutex);
2368 }
2369 
2370 static void ipw_abort_scan(struct ipw_priv *priv);
2371 
2372 #define IPW_SCAN_CHECK_WATCHDOG	(5 * HZ)
2373 
2374 static void ipw_scan_check(void *data)
2375 {
2376 	struct ipw_priv *priv = data;
2377 
2378 	if (priv->status & STATUS_SCAN_ABORTING) {
2379 		IPW_DEBUG_SCAN("Scan completion watchdog resetting "
2380 			       "adapter after (%dms).\n",
2381 			       jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2382 		schedule_work(&priv->adapter_restart);
2383 	} else if (priv->status & STATUS_SCANNING) {
2384 		IPW_DEBUG_SCAN("Scan completion watchdog aborting scan "
2385 			       "after (%dms).\n",
2386 			       jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2387 		ipw_abort_scan(priv);
2388 		schedule_delayed_work(&priv->scan_check, HZ);
2389 	}
2390 }
2391 
2392 static void ipw_bg_scan_check(struct work_struct *work)
2393 {
2394 	struct ipw_priv *priv =
2395 		container_of(work, struct ipw_priv, scan_check.work);
2396 	mutex_lock(&priv->mutex);
2397 	ipw_scan_check(priv);
2398 	mutex_unlock(&priv->mutex);
2399 }
2400 
2401 static int ipw_send_scan_request_ext(struct ipw_priv *priv,
2402 				     struct ipw_scan_request_ext *request)
2403 {
2404 	return ipw_send_cmd_pdu(priv, IPW_CMD_SCAN_REQUEST_EXT,
2405 				sizeof(*request), request);
2406 }
2407 
2408 static int ipw_send_scan_abort(struct ipw_priv *priv)
2409 {
2410 	if (!priv) {
2411 		IPW_ERROR("Invalid args\n");
2412 		return -1;
2413 	}
2414 
2415 	return ipw_send_cmd_simple(priv, IPW_CMD_SCAN_ABORT);
2416 }
2417 
2418 static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens)
2419 {
2420 	struct ipw_sensitivity_calib calib = {
2421 		.beacon_rssi_raw = cpu_to_le16(sens),
2422 	};
2423 
2424 	return ipw_send_cmd_pdu(priv, IPW_CMD_SENSITIVITY_CALIB, sizeof(calib),
2425 				&calib);
2426 }
2427 
2428 static int ipw_send_associate(struct ipw_priv *priv,
2429 			      struct ipw_associate *associate)
2430 {
2431 	if (!priv || !associate) {
2432 		IPW_ERROR("Invalid args\n");
2433 		return -1;
2434 	}
2435 
2436 	return ipw_send_cmd_pdu(priv, IPW_CMD_ASSOCIATE, sizeof(*associate),
2437 				associate);
2438 }
2439 
2440 static int ipw_send_supported_rates(struct ipw_priv *priv,
2441 				    struct ipw_supported_rates *rates)
2442 {
2443 	if (!priv || !rates) {
2444 		IPW_ERROR("Invalid args\n");
2445 		return -1;
2446 	}
2447 
2448 	return ipw_send_cmd_pdu(priv, IPW_CMD_SUPPORTED_RATES, sizeof(*rates),
2449 				rates);
2450 }
2451 
2452 static int ipw_set_random_seed(struct ipw_priv *priv)
2453 {
2454 	u32 val;
2455 
2456 	if (!priv) {
2457 		IPW_ERROR("Invalid args\n");
2458 		return -1;
2459 	}
2460 
2461 	get_random_bytes(&val, sizeof(val));
2462 
2463 	return ipw_send_cmd_pdu(priv, IPW_CMD_SEED_NUMBER, sizeof(val), &val);
2464 }
2465 
2466 static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off)
2467 {
2468 	__le32 v = cpu_to_le32(phy_off);
2469 	if (!priv) {
2470 		IPW_ERROR("Invalid args\n");
2471 		return -1;
2472 	}
2473 
2474 	return ipw_send_cmd_pdu(priv, IPW_CMD_CARD_DISABLE, sizeof(v), &v);
2475 }
2476 
2477 static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power)
2478 {
2479 	if (!priv || !power) {
2480 		IPW_ERROR("Invalid args\n");
2481 		return -1;
2482 	}
2483 
2484 	return ipw_send_cmd_pdu(priv, IPW_CMD_TX_POWER, sizeof(*power), power);
2485 }
2486 
2487 static int ipw_set_tx_power(struct ipw_priv *priv)
2488 {
2489 	const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
2490 	struct ipw_tx_power tx_power;
2491 	s8 max_power;
2492 	int i;
2493 
2494 	memset(&tx_power, 0, sizeof(tx_power));
2495 
2496 	/* configure device for 'G' band */
2497 	tx_power.ieee_mode = IPW_G_MODE;
2498 	tx_power.num_channels = geo->bg_channels;
2499 	for (i = 0; i < geo->bg_channels; i++) {
2500 		max_power = geo->bg[i].max_power;
2501 		tx_power.channels_tx_power[i].channel_number =
2502 		    geo->bg[i].channel;
2503 		tx_power.channels_tx_power[i].tx_power = max_power ?
2504 		    min(max_power, priv->tx_power) : priv->tx_power;
2505 	}
2506 	if (ipw_send_tx_power(priv, &tx_power))
2507 		return -EIO;
2508 
2509 	/* configure device to also handle 'B' band */
2510 	tx_power.ieee_mode = IPW_B_MODE;
2511 	if (ipw_send_tx_power(priv, &tx_power))
2512 		return -EIO;
2513 
2514 	/* configure device to also handle 'A' band */
2515 	if (priv->ieee->abg_true) {
2516 		tx_power.ieee_mode = IPW_A_MODE;
2517 		tx_power.num_channels = geo->a_channels;
2518 		for (i = 0; i < tx_power.num_channels; i++) {
2519 			max_power = geo->a[i].max_power;
2520 			tx_power.channels_tx_power[i].channel_number =
2521 			    geo->a[i].channel;
2522 			tx_power.channels_tx_power[i].tx_power = max_power ?
2523 			    min(max_power, priv->tx_power) : priv->tx_power;
2524 		}
2525 		if (ipw_send_tx_power(priv, &tx_power))
2526 			return -EIO;
2527 	}
2528 	return 0;
2529 }
2530 
2531 static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts)
2532 {
2533 	struct ipw_rts_threshold rts_threshold = {
2534 		.rts_threshold = cpu_to_le16(rts),
2535 	};
2536 
2537 	if (!priv) {
2538 		IPW_ERROR("Invalid args\n");
2539 		return -1;
2540 	}
2541 
2542 	return ipw_send_cmd_pdu(priv, IPW_CMD_RTS_THRESHOLD,
2543 				sizeof(rts_threshold), &rts_threshold);
2544 }
2545 
2546 static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag)
2547 {
2548 	struct ipw_frag_threshold frag_threshold = {
2549 		.frag_threshold = cpu_to_le16(frag),
2550 	};
2551 
2552 	if (!priv) {
2553 		IPW_ERROR("Invalid args\n");
2554 		return -1;
2555 	}
2556 
2557 	return ipw_send_cmd_pdu(priv, IPW_CMD_FRAG_THRESHOLD,
2558 				sizeof(frag_threshold), &frag_threshold);
2559 }
2560 
2561 static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode)
2562 {
2563 	__le32 param;
2564 
2565 	if (!priv) {
2566 		IPW_ERROR("Invalid args\n");
2567 		return -1;
2568 	}
2569 
2570 	/* If on battery, set to 3, if AC set to CAM, else user
2571 	 * level */
2572 	switch (mode) {
2573 	case IPW_POWER_BATTERY:
2574 		param = cpu_to_le32(IPW_POWER_INDEX_3);
2575 		break;
2576 	case IPW_POWER_AC:
2577 		param = cpu_to_le32(IPW_POWER_MODE_CAM);
2578 		break;
2579 	default:
2580 		param = cpu_to_le32(mode);
2581 		break;
2582 	}
2583 
2584 	return ipw_send_cmd_pdu(priv, IPW_CMD_POWER_MODE, sizeof(param),
2585 				&param);
2586 }
2587 
2588 static int ipw_send_retry_limit(struct ipw_priv *priv, u8 slimit, u8 llimit)
2589 {
2590 	struct ipw_retry_limit retry_limit = {
2591 		.short_retry_limit = slimit,
2592 		.long_retry_limit = llimit
2593 	};
2594 
2595 	if (!priv) {
2596 		IPW_ERROR("Invalid args\n");
2597 		return -1;
2598 	}
2599 
2600 	return ipw_send_cmd_pdu(priv, IPW_CMD_RETRY_LIMIT, sizeof(retry_limit),
2601 				&retry_limit);
2602 }
2603 
2604 /*
2605  * The IPW device contains a Microwire compatible EEPROM that stores
2606  * various data like the MAC address.  Usually the firmware has exclusive
2607  * access to the eeprom, but during device initialization (before the
2608  * device driver has sent the HostComplete command to the firmware) the
2609  * device driver has read access to the EEPROM by way of indirect addressing
2610  * through a couple of memory mapped registers.
2611  *
2612  * The following is a simplified implementation for pulling data out of the
2613  * the eeprom, along with some helper functions to find information in
2614  * the per device private data's copy of the eeprom.
2615  *
2616  * NOTE: To better understand how these functions work (i.e what is a chip
2617  *       select and why do have to keep driving the eeprom clock?), read
2618  *       just about any data sheet for a Microwire compatible EEPROM.
2619  */
2620 
2621 /* write a 32 bit value into the indirect accessor register */
2622 static inline void eeprom_write_reg(struct ipw_priv *p, u32 data)
2623 {
2624 	ipw_write_reg32(p, FW_MEM_REG_EEPROM_ACCESS, data);
2625 
2626 	/* the eeprom requires some time to complete the operation */
2627 	udelay(p->eeprom_delay);
2628 }
2629 
2630 /* perform a chip select operation */
2631 static void eeprom_cs(struct ipw_priv *priv)
2632 {
2633 	eeprom_write_reg(priv, 0);
2634 	eeprom_write_reg(priv, EEPROM_BIT_CS);
2635 	eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2636 	eeprom_write_reg(priv, EEPROM_BIT_CS);
2637 }
2638 
2639 /* perform a chip select operation */
2640 static void eeprom_disable_cs(struct ipw_priv *priv)
2641 {
2642 	eeprom_write_reg(priv, EEPROM_BIT_CS);
2643 	eeprom_write_reg(priv, 0);
2644 	eeprom_write_reg(priv, EEPROM_BIT_SK);
2645 }
2646 
2647 /* push a single bit down to the eeprom */
2648 static inline void eeprom_write_bit(struct ipw_priv *p, u8 bit)
2649 {
2650 	int d = (bit ? EEPROM_BIT_DI : 0);
2651 	eeprom_write_reg(p, EEPROM_BIT_CS | d);
2652 	eeprom_write_reg(p, EEPROM_BIT_CS | d | EEPROM_BIT_SK);
2653 }
2654 
2655 /* push an opcode followed by an address down to the eeprom */
2656 static void eeprom_op(struct ipw_priv *priv, u8 op, u8 addr)
2657 {
2658 	int i;
2659 
2660 	eeprom_cs(priv);
2661 	eeprom_write_bit(priv, 1);
2662 	eeprom_write_bit(priv, op & 2);
2663 	eeprom_write_bit(priv, op & 1);
2664 	for (i = 7; i >= 0; i--) {
2665 		eeprom_write_bit(priv, addr & (1 << i));
2666 	}
2667 }
2668 
2669 /* pull 16 bits off the eeprom, one bit at a time */
2670 static u16 eeprom_read_u16(struct ipw_priv *priv, u8 addr)
2671 {
2672 	int i;
2673 	u16 r = 0;
2674 
2675 	/* Send READ Opcode */
2676 	eeprom_op(priv, EEPROM_CMD_READ, addr);
2677 
2678 	/* Send dummy bit */
2679 	eeprom_write_reg(priv, EEPROM_BIT_CS);
2680 
2681 	/* Read the byte off the eeprom one bit at a time */
2682 	for (i = 0; i < 16; i++) {
2683 		u32 data = 0;
2684 		eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2685 		eeprom_write_reg(priv, EEPROM_BIT_CS);
2686 		data = ipw_read_reg32(priv, FW_MEM_REG_EEPROM_ACCESS);
2687 		r = (r << 1) | ((data & EEPROM_BIT_DO) ? 1 : 0);
2688 	}
2689 
2690 	/* Send another dummy bit */
2691 	eeprom_write_reg(priv, 0);
2692 	eeprom_disable_cs(priv);
2693 
2694 	return r;
2695 }
2696 
2697 /* helper function for pulling the mac address out of the private */
2698 /* data's copy of the eeprom data                                 */
2699 static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
2700 {
2701 	memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], ETH_ALEN);
2702 }
2703 
2704 static void ipw_read_eeprom(struct ipw_priv *priv)
2705 {
2706 	int i;
2707 	__le16 *eeprom = (__le16 *) priv->eeprom;
2708 
2709 	IPW_DEBUG_TRACE(">>\n");
2710 
2711 	/* read entire contents of eeprom into private buffer */
2712 	for (i = 0; i < 128; i++)
2713 		eeprom[i] = cpu_to_le16(eeprom_read_u16(priv, (u8) i));
2714 
2715 	IPW_DEBUG_TRACE("<<\n");
2716 }
2717 
2718 /*
2719  * Either the device driver (i.e. the host) or the firmware can
2720  * load eeprom data into the designated region in SRAM.  If neither
2721  * happens then the FW will shutdown with a fatal error.
2722  *
2723  * In order to signal the FW to load the EEPROM, the EEPROM_LOAD_DISABLE
2724  * bit needs region of shared SRAM needs to be non-zero.
2725  */
2726 static void ipw_eeprom_init_sram(struct ipw_priv *priv)
2727 {
2728 	int i;
2729 
2730 	IPW_DEBUG_TRACE(">>\n");
2731 
2732 	/*
2733 	   If the data looks correct, then copy it to our private
2734 	   copy.  Otherwise let the firmware know to perform the operation
2735 	   on its own.
2736 	 */
2737 	if (priv->eeprom[EEPROM_VERSION] != 0) {
2738 		IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n");
2739 
2740 		/* write the eeprom data to sram */
2741 		for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
2742 			ipw_write8(priv, IPW_EEPROM_DATA + i, priv->eeprom[i]);
2743 
2744 		/* Do not load eeprom data on fatal error or suspend */
2745 		ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
2746 	} else {
2747 		IPW_DEBUG_INFO("Enabling FW initializationg of SRAM\n");
2748 
2749 		/* Load eeprom data on fatal error or suspend */
2750 		ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 1);
2751 	}
2752 
2753 	IPW_DEBUG_TRACE("<<\n");
2754 }
2755 
2756 static void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count)
2757 {
2758 	count >>= 2;
2759 	if (!count)
2760 		return;
2761 	_ipw_write32(priv, IPW_AUTOINC_ADDR, start);
2762 	while (count--)
2763 		_ipw_write32(priv, IPW_AUTOINC_DATA, 0);
2764 }
2765 
2766 static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv)
2767 {
2768 	ipw_zero_memory(priv, IPW_SHARED_SRAM_DMA_CONTROL,
2769 			CB_NUMBER_OF_ELEMENTS_SMALL *
2770 			sizeof(struct command_block));
2771 }
2772 
2773 static int ipw_fw_dma_enable(struct ipw_priv *priv)
2774 {				/* start dma engine but no transfers yet */
2775 
2776 	IPW_DEBUG_FW(">> :\n");
2777 
2778 	/* Start the dma */
2779 	ipw_fw_dma_reset_command_blocks(priv);
2780 
2781 	/* Write CB base address */
2782 	ipw_write_reg32(priv, IPW_DMA_I_CB_BASE, IPW_SHARED_SRAM_DMA_CONTROL);
2783 
2784 	IPW_DEBUG_FW("<< :\n");
2785 	return 0;
2786 }
2787 
2788 static void ipw_fw_dma_abort(struct ipw_priv *priv)
2789 {
2790 	u32 control = 0;
2791 
2792 	IPW_DEBUG_FW(">> :\n");
2793 
2794 	/* set the Stop and Abort bit */
2795 	control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT;
2796 	ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2797 	priv->sram_desc.last_cb_index = 0;
2798 
2799 	IPW_DEBUG_FW("<<\n");
2800 }
2801 
2802 static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index,
2803 					  struct command_block *cb)
2804 {
2805 	u32 address =
2806 	    IPW_SHARED_SRAM_DMA_CONTROL +
2807 	    (sizeof(struct command_block) * index);
2808 	IPW_DEBUG_FW(">> :\n");
2809 
2810 	ipw_write_indirect(priv, address, (u8 *) cb,
2811 			   (int)sizeof(struct command_block));
2812 
2813 	IPW_DEBUG_FW("<< :\n");
2814 	return 0;
2815 
2816 }
2817 
2818 static int ipw_fw_dma_kick(struct ipw_priv *priv)
2819 {
2820 	u32 control = 0;
2821 	u32 index = 0;
2822 
2823 	IPW_DEBUG_FW(">> :\n");
2824 
2825 	for (index = 0; index < priv->sram_desc.last_cb_index; index++)
2826 		ipw_fw_dma_write_command_block(priv, index,
2827 					       &priv->sram_desc.cb_list[index]);
2828 
2829 	/* Enable the DMA in the CSR register */
2830 	ipw_clear_bit(priv, IPW_RESET_REG,
2831 		      IPW_RESET_REG_MASTER_DISABLED |
2832 		      IPW_RESET_REG_STOP_MASTER);
2833 
2834 	/* Set the Start bit. */
2835 	control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_START;
2836 	ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2837 
2838 	IPW_DEBUG_FW("<< :\n");
2839 	return 0;
2840 }
2841 
2842 static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv)
2843 {
2844 	u32 address;
2845 	u32 register_value = 0;
2846 	u32 cb_fields_address = 0;
2847 
2848 	IPW_DEBUG_FW(">> :\n");
2849 	address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2850 	IPW_DEBUG_FW_INFO("Current CB is 0x%x\n", address);
2851 
2852 	/* Read the DMA Controlor register */
2853 	register_value = ipw_read_reg32(priv, IPW_DMA_I_DMA_CONTROL);
2854 	IPW_DEBUG_FW_INFO("IPW_DMA_I_DMA_CONTROL is 0x%x\n", register_value);
2855 
2856 	/* Print the CB values */
2857 	cb_fields_address = address;
2858 	register_value = ipw_read_reg32(priv, cb_fields_address);
2859 	IPW_DEBUG_FW_INFO("Current CB Control Field is 0x%x\n", register_value);
2860 
2861 	cb_fields_address += sizeof(u32);
2862 	register_value = ipw_read_reg32(priv, cb_fields_address);
2863 	IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x\n", register_value);
2864 
2865 	cb_fields_address += sizeof(u32);
2866 	register_value = ipw_read_reg32(priv, cb_fields_address);
2867 	IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x\n",
2868 			  register_value);
2869 
2870 	cb_fields_address += sizeof(u32);
2871 	register_value = ipw_read_reg32(priv, cb_fields_address);
2872 	IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x\n", register_value);
2873 
2874 	IPW_DEBUG_FW(">> :\n");
2875 }
2876 
2877 static int ipw_fw_dma_command_block_index(struct ipw_priv *priv)
2878 {
2879 	u32 current_cb_address = 0;
2880 	u32 current_cb_index = 0;
2881 
2882 	IPW_DEBUG_FW("<< :\n");
2883 	current_cb_address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2884 
2885 	current_cb_index = (current_cb_address - IPW_SHARED_SRAM_DMA_CONTROL) /
2886 	    sizeof(struct command_block);
2887 
2888 	IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X\n",
2889 			  current_cb_index, current_cb_address);
2890 
2891 	IPW_DEBUG_FW(">> :\n");
2892 	return current_cb_index;
2893 
2894 }
2895 
2896 static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
2897 					u32 src_address,
2898 					u32 dest_address,
2899 					u32 length,
2900 					int interrupt_enabled, int is_last)
2901 {
2902 
2903 	u32 control = CB_VALID | CB_SRC_LE | CB_DEST_LE | CB_SRC_AUTOINC |
2904 	    CB_SRC_IO_GATED | CB_DEST_AUTOINC | CB_SRC_SIZE_LONG |
2905 	    CB_DEST_SIZE_LONG;
2906 	struct command_block *cb;
2907 	u32 last_cb_element = 0;
2908 
2909 	IPW_DEBUG_FW_INFO("src_address=0x%x dest_address=0x%x length=0x%x\n",
2910 			  src_address, dest_address, length);
2911 
2912 	if (priv->sram_desc.last_cb_index >= CB_NUMBER_OF_ELEMENTS_SMALL)
2913 		return -1;
2914 
2915 	last_cb_element = priv->sram_desc.last_cb_index;
2916 	cb = &priv->sram_desc.cb_list[last_cb_element];
2917 	priv->sram_desc.last_cb_index++;
2918 
2919 	/* Calculate the new CB control word */
2920 	if (interrupt_enabled)
2921 		control |= CB_INT_ENABLED;
2922 
2923 	if (is_last)
2924 		control |= CB_LAST_VALID;
2925 
2926 	control |= length;
2927 
2928 	/* Calculate the CB Element's checksum value */
2929 	cb->status = control ^ src_address ^ dest_address;
2930 
2931 	/* Copy the Source and Destination addresses */
2932 	cb->dest_addr = dest_address;
2933 	cb->source_addr = src_address;
2934 
2935 	/* Copy the Control Word last */
2936 	cb->control = control;
2937 
2938 	return 0;
2939 }
2940 
2941 static int ipw_fw_dma_add_buffer(struct ipw_priv *priv, dma_addr_t *src_address,
2942 				 int nr, u32 dest_address, u32 len)
2943 {
2944 	int ret, i;
2945 	u32 size;
2946 
2947 	IPW_DEBUG_FW(">>\n");
2948 	IPW_DEBUG_FW_INFO("nr=%d dest_address=0x%x len=0x%x\n",
2949 			  nr, dest_address, len);
2950 
2951 	for (i = 0; i < nr; i++) {
2952 		size = min_t(u32, len - i * CB_MAX_LENGTH, CB_MAX_LENGTH);
2953 		ret = ipw_fw_dma_add_command_block(priv, src_address[i],
2954 						   dest_address +
2955 						   i * CB_MAX_LENGTH, size,
2956 						   0, 0);
2957 		if (ret) {
2958 			IPW_DEBUG_FW_INFO(": Failed\n");
2959 			return -1;
2960 		} else
2961 			IPW_DEBUG_FW_INFO(": Added new cb\n");
2962 	}
2963 
2964 	IPW_DEBUG_FW("<<\n");
2965 	return 0;
2966 }
2967 
2968 static int ipw_fw_dma_wait(struct ipw_priv *priv)
2969 {
2970 	u32 current_index = 0, previous_index;
2971 	u32 watchdog = 0;
2972 
2973 	IPW_DEBUG_FW(">> :\n");
2974 
2975 	current_index = ipw_fw_dma_command_block_index(priv);
2976 	IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%08X\n",
2977 			  (int)priv->sram_desc.last_cb_index);
2978 
2979 	while (current_index < priv->sram_desc.last_cb_index) {
2980 		udelay(50);
2981 		previous_index = current_index;
2982 		current_index = ipw_fw_dma_command_block_index(priv);
2983 
2984 		if (previous_index < current_index) {
2985 			watchdog = 0;
2986 			continue;
2987 		}
2988 		if (++watchdog > 400) {
2989 			IPW_DEBUG_FW_INFO("Timeout\n");
2990 			ipw_fw_dma_dump_command_block(priv);
2991 			ipw_fw_dma_abort(priv);
2992 			return -1;
2993 		}
2994 	}
2995 
2996 	ipw_fw_dma_abort(priv);
2997 
2998 	/*Disable the DMA in the CSR register */
2999 	ipw_set_bit(priv, IPW_RESET_REG,
3000 		    IPW_RESET_REG_MASTER_DISABLED | IPW_RESET_REG_STOP_MASTER);
3001 
3002 	IPW_DEBUG_FW("<< dmaWaitSync\n");
3003 	return 0;
3004 }
3005 
3006 static void ipw_remove_current_network(struct ipw_priv *priv)
3007 {
3008 	struct list_head *element, *safe;
3009 	struct libipw_network *network = NULL;
3010 	unsigned long flags;
3011 
3012 	spin_lock_irqsave(&priv->ieee->lock, flags);
3013 	list_for_each_safe(element, safe, &priv->ieee->network_list) {
3014 		network = list_entry(element, struct libipw_network, list);
3015 		if (ether_addr_equal(network->bssid, priv->bssid)) {
3016 			list_del(element);
3017 			list_add_tail(&network->list,
3018 				      &priv->ieee->network_free_list);
3019 		}
3020 	}
3021 	spin_unlock_irqrestore(&priv->ieee->lock, flags);
3022 }
3023 
3024 /**
3025  * Check that card is still alive.
3026  * Reads debug register from domain0.
3027  * If card is present, pre-defined value should
3028  * be found there.
3029  *
3030  * @param priv
3031  * @return 1 if card is present, 0 otherwise
3032  */
3033 static inline int ipw_alive(struct ipw_priv *priv)
3034 {
3035 	return ipw_read32(priv, 0x90) == 0xd55555d5;
3036 }
3037 
3038 /* timeout in msec, attempted in 10-msec quanta */
3039 static int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask,
3040 			       int timeout)
3041 {
3042 	int i = 0;
3043 
3044 	do {
3045 		if ((ipw_read32(priv, addr) & mask) == mask)
3046 			return i;
3047 		mdelay(10);
3048 		i += 10;
3049 	} while (i < timeout);
3050 
3051 	return -ETIME;
3052 }
3053 
3054 /* These functions load the firmware and micro code for the operation of
3055  * the ipw hardware.  It assumes the buffer has all the bits for the
3056  * image and the caller is handling the memory allocation and clean up.
3057  */
3058 
3059 static int ipw_stop_master(struct ipw_priv *priv)
3060 {
3061 	int rc;
3062 
3063 	IPW_DEBUG_TRACE(">>\n");
3064 	/* stop master. typical delay - 0 */
3065 	ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3066 
3067 	/* timeout is in msec, polled in 10-msec quanta */
3068 	rc = ipw_poll_bit(priv, IPW_RESET_REG,
3069 			  IPW_RESET_REG_MASTER_DISABLED, 100);
3070 	if (rc < 0) {
3071 		IPW_ERROR("wait for stop master failed after 100ms\n");
3072 		return -1;
3073 	}
3074 
3075 	IPW_DEBUG_INFO("stop master %dms\n", rc);
3076 
3077 	return rc;
3078 }
3079 
3080 static void ipw_arc_release(struct ipw_priv *priv)
3081 {
3082 	IPW_DEBUG_TRACE(">>\n");
3083 	mdelay(5);
3084 
3085 	ipw_clear_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3086 
3087 	/* no one knows timing, for safety add some delay */
3088 	mdelay(5);
3089 }
3090 
3091 struct fw_chunk {
3092 	__le32 address;
3093 	__le32 length;
3094 };
3095 
3096 static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
3097 {
3098 	int rc = 0, i, addr;
3099 	u8 cr = 0;
3100 	__le16 *image;
3101 
3102 	image = (__le16 *) data;
3103 
3104 	IPW_DEBUG_TRACE(">>\n");
3105 
3106 	rc = ipw_stop_master(priv);
3107 
3108 	if (rc < 0)
3109 		return rc;
3110 
3111 	for (addr = IPW_SHARED_LOWER_BOUND;
3112 	     addr < IPW_REGISTER_DOMAIN1_END; addr += 4) {
3113 		ipw_write32(priv, addr, 0);
3114 	}
3115 
3116 	/* no ucode (yet) */
3117 	memset(&priv->dino_alive, 0, sizeof(priv->dino_alive));
3118 	/* destroy DMA queues */
3119 	/* reset sequence */
3120 
3121 	ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_ON);
3122 	ipw_arc_release(priv);
3123 	ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_OFF);
3124 	mdelay(1);
3125 
3126 	/* reset PHY */
3127 	ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, IPW_BASEBAND_POWER_DOWN);
3128 	mdelay(1);
3129 
3130 	ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, 0);
3131 	mdelay(1);
3132 
3133 	/* enable ucode store */
3134 	ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0x0);
3135 	ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_CS);
3136 	mdelay(1);
3137 
3138 	/* write ucode */
3139 	/**
3140 	 * @bug
3141 	 * Do NOT set indirect address register once and then
3142 	 * store data to indirect data register in the loop.
3143 	 * It seems very reasonable, but in this case DINO do not
3144 	 * accept ucode. It is essential to set address each time.
3145 	 */
3146 	/* load new ipw uCode */
3147 	for (i = 0; i < len / 2; i++)
3148 		ipw_write_reg16(priv, IPW_BASEBAND_CONTROL_STORE,
3149 				le16_to_cpu(image[i]));
3150 
3151 	/* enable DINO */
3152 	ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3153 	ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_SYSTEM);
3154 
3155 	/* this is where the igx / win driver deveates from the VAP driver. */
3156 
3157 	/* wait for alive response */
3158 	for (i = 0; i < 100; i++) {
3159 		/* poll for incoming data */
3160 		cr = ipw_read_reg8(priv, IPW_BASEBAND_CONTROL_STATUS);
3161 		if (cr & DINO_RXFIFO_DATA)
3162 			break;
3163 		mdelay(1);
3164 	}
3165 
3166 	if (cr & DINO_RXFIFO_DATA) {
3167 		/* alive_command_responce size is NOT multiple of 4 */
3168 		__le32 response_buffer[(sizeof(priv->dino_alive) + 3) / 4];
3169 
3170 		for (i = 0; i < ARRAY_SIZE(response_buffer); i++)
3171 			response_buffer[i] =
3172 			    cpu_to_le32(ipw_read_reg32(priv,
3173 						       IPW_BASEBAND_RX_FIFO_READ));
3174 		memcpy(&priv->dino_alive, response_buffer,
3175 		       sizeof(priv->dino_alive));
3176 		if (priv->dino_alive.alive_command == 1
3177 		    && priv->dino_alive.ucode_valid == 1) {
3178 			rc = 0;
3179 			IPW_DEBUG_INFO
3180 			    ("Microcode OK, rev. %d (0x%x) dev. %d (0x%x) "
3181 			     "of %02d/%02d/%02d %02d:%02d\n",
3182 			     priv->dino_alive.software_revision,
3183 			     priv->dino_alive.software_revision,
3184 			     priv->dino_alive.device_identifier,
3185 			     priv->dino_alive.device_identifier,
3186 			     priv->dino_alive.time_stamp[0],
3187 			     priv->dino_alive.time_stamp[1],
3188 			     priv->dino_alive.time_stamp[2],
3189 			     priv->dino_alive.time_stamp[3],
3190 			     priv->dino_alive.time_stamp[4]);
3191 		} else {
3192 			IPW_DEBUG_INFO("Microcode is not alive\n");
3193 			rc = -EINVAL;
3194 		}
3195 	} else {
3196 		IPW_DEBUG_INFO("No alive response from DINO\n");
3197 		rc = -ETIME;
3198 	}
3199 
3200 	/* disable DINO, otherwise for some reason
3201 	   firmware have problem getting alive resp. */
3202 	ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3203 
3204 	return rc;
3205 }
3206 
3207 static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
3208 {
3209 	int ret = -1;
3210 	int offset = 0;
3211 	struct fw_chunk *chunk;
3212 	int total_nr = 0;
3213 	int i;
3214 	struct pci_pool *pool;
3215 	void **virts;
3216 	dma_addr_t *phys;
3217 
3218 	IPW_DEBUG_TRACE("<< :\n");
3219 
3220 	virts = kmalloc(sizeof(void *) * CB_NUMBER_OF_ELEMENTS_SMALL,
3221 			GFP_KERNEL);
3222 	if (!virts)
3223 		return -ENOMEM;
3224 
3225 	phys = kmalloc(sizeof(dma_addr_t) * CB_NUMBER_OF_ELEMENTS_SMALL,
3226 			GFP_KERNEL);
3227 	if (!phys) {
3228 		kfree(virts);
3229 		return -ENOMEM;
3230 	}
3231 	pool = pci_pool_create("ipw2200", priv->pci_dev, CB_MAX_LENGTH, 0, 0);
3232 	if (!pool) {
3233 		IPW_ERROR("pci_pool_create failed\n");
3234 		kfree(phys);
3235 		kfree(virts);
3236 		return -ENOMEM;
3237 	}
3238 
3239 	/* Start the Dma */
3240 	ret = ipw_fw_dma_enable(priv);
3241 
3242 	/* the DMA is already ready this would be a bug. */
3243 	BUG_ON(priv->sram_desc.last_cb_index > 0);
3244 
3245 	do {
3246 		u32 chunk_len;
3247 		u8 *start;
3248 		int size;
3249 		int nr = 0;
3250 
3251 		chunk = (struct fw_chunk *)(data + offset);
3252 		offset += sizeof(struct fw_chunk);
3253 		chunk_len = le32_to_cpu(chunk->length);
3254 		start = data + offset;
3255 
3256 		nr = (chunk_len + CB_MAX_LENGTH - 1) / CB_MAX_LENGTH;
3257 		for (i = 0; i < nr; i++) {
3258 			virts[total_nr] = pci_pool_alloc(pool, GFP_KERNEL,
3259 							 &phys[total_nr]);
3260 			if (!virts[total_nr]) {
3261 				ret = -ENOMEM;
3262 				goto out;
3263 			}
3264 			size = min_t(u32, chunk_len - i * CB_MAX_LENGTH,
3265 				     CB_MAX_LENGTH);
3266 			memcpy(virts[total_nr], start, size);
3267 			start += size;
3268 			total_nr++;
3269 			/* We don't support fw chunk larger than 64*8K */
3270 			BUG_ON(total_nr > CB_NUMBER_OF_ELEMENTS_SMALL);
3271 		}
3272 
3273 		/* build DMA packet and queue up for sending */
3274 		/* dma to chunk->address, the chunk->length bytes from data +
3275 		 * offeset*/
3276 		/* Dma loading */
3277 		ret = ipw_fw_dma_add_buffer(priv, &phys[total_nr - nr],
3278 					    nr, le32_to_cpu(chunk->address),
3279 					    chunk_len);
3280 		if (ret) {
3281 			IPW_DEBUG_INFO("dmaAddBuffer Failed\n");
3282 			goto out;
3283 		}
3284 
3285 		offset += chunk_len;
3286 	} while (offset < len);
3287 
3288 	/* Run the DMA and wait for the answer */
3289 	ret = ipw_fw_dma_kick(priv);
3290 	if (ret) {
3291 		IPW_ERROR("dmaKick Failed\n");
3292 		goto out;
3293 	}
3294 
3295 	ret = ipw_fw_dma_wait(priv);
3296 	if (ret) {
3297 		IPW_ERROR("dmaWaitSync Failed\n");
3298 		goto out;
3299 	}
3300  out:
3301 	for (i = 0; i < total_nr; i++)
3302 		pci_pool_free(pool, virts[i], phys[i]);
3303 
3304 	pci_pool_destroy(pool);
3305 	kfree(phys);
3306 	kfree(virts);
3307 
3308 	return ret;
3309 }
3310 
3311 /* stop nic */
3312 static int ipw_stop_nic(struct ipw_priv *priv)
3313 {
3314 	int rc = 0;
3315 
3316 	/* stop */
3317 	ipw_write32(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3318 
3319 	rc = ipw_poll_bit(priv, IPW_RESET_REG,
3320 			  IPW_RESET_REG_MASTER_DISABLED, 500);
3321 	if (rc < 0) {
3322 		IPW_ERROR("wait for reg master disabled failed after 500ms\n");
3323 		return rc;
3324 	}
3325 
3326 	ipw_set_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3327 
3328 	return rc;
3329 }
3330 
3331 static void ipw_start_nic(struct ipw_priv *priv)
3332 {
3333 	IPW_DEBUG_TRACE(">>\n");
3334 
3335 	/* prvHwStartNic  release ARC */
3336 	ipw_clear_bit(priv, IPW_RESET_REG,
3337 		      IPW_RESET_REG_MASTER_DISABLED |
3338 		      IPW_RESET_REG_STOP_MASTER |
3339 		      CBD_RESET_REG_PRINCETON_RESET);
3340 
3341 	/* enable power management */
3342 	ipw_set_bit(priv, IPW_GP_CNTRL_RW,
3343 		    IPW_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY);
3344 
3345 	IPW_DEBUG_TRACE("<<\n");
3346 }
3347 
3348 static int ipw_init_nic(struct ipw_priv *priv)
3349 {
3350 	int rc;
3351 
3352 	IPW_DEBUG_TRACE(">>\n");
3353 	/* reset */
3354 	/*prvHwInitNic */
3355 	/* set "initialization complete" bit to move adapter to D0 state */
3356 	ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3357 
3358 	/* low-level PLL activation */
3359 	ipw_write32(priv, IPW_READ_INT_REGISTER,
3360 		    IPW_BIT_INT_HOST_SRAM_READ_INT_REGISTER);
3361 
3362 	/* wait for clock stabilization */
3363 	rc = ipw_poll_bit(priv, IPW_GP_CNTRL_RW,
3364 			  IPW_GP_CNTRL_BIT_CLOCK_READY, 250);
3365 	if (rc < 0)
3366 		IPW_DEBUG_INFO("FAILED wait for clock stablization\n");
3367 
3368 	/* assert SW reset */
3369 	ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_SW_RESET);
3370 
3371 	udelay(10);
3372 
3373 	/* set "initialization complete" bit to move adapter to D0 state */
3374 	ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3375 
3376 	IPW_DEBUG_TRACE(">>\n");
3377 	return 0;
3378 }
3379 
3380 /* Call this function from process context, it will sleep in request_firmware.
3381  * Probe is an ok place to call this from.
3382  */
3383 static int ipw_reset_nic(struct ipw_priv *priv)
3384 {
3385 	int rc = 0;
3386 	unsigned long flags;
3387 
3388 	IPW_DEBUG_TRACE(">>\n");
3389 
3390 	rc = ipw_init_nic(priv);
3391 
3392 	spin_lock_irqsave(&priv->lock, flags);
3393 	/* Clear the 'host command active' bit... */
3394 	priv->status &= ~STATUS_HCMD_ACTIVE;
3395 	wake_up_interruptible(&priv->wait_command_queue);
3396 	priv->status &= ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
3397 	wake_up_interruptible(&priv->wait_state);
3398 	spin_unlock_irqrestore(&priv->lock, flags);
3399 
3400 	IPW_DEBUG_TRACE("<<\n");
3401 	return rc;
3402 }
3403 
3404 
3405 struct ipw_fw {
3406 	__le32 ver;
3407 	__le32 boot_size;
3408 	__le32 ucode_size;
3409 	__le32 fw_size;
3410 	u8 data[0];
3411 };
3412 
3413 static int ipw_get_fw(struct ipw_priv *priv,
3414 		      const struct firmware **raw, const char *name)
3415 {
3416 	struct ipw_fw *fw;
3417 	int rc;
3418 
3419 	/* ask firmware_class module to get the boot firmware off disk */
3420 	rc = request_firmware(raw, name, &priv->pci_dev->dev);
3421 	if (rc < 0) {
3422 		IPW_ERROR("%s request_firmware failed: Reason %d\n", name, rc);
3423 		return rc;
3424 	}
3425 
3426 	if ((*raw)->size < sizeof(*fw)) {
3427 		IPW_ERROR("%s is too small (%zd)\n", name, (*raw)->size);
3428 		return -EINVAL;
3429 	}
3430 
3431 	fw = (void *)(*raw)->data;
3432 
3433 	if ((*raw)->size < sizeof(*fw) + le32_to_cpu(fw->boot_size) +
3434 	    le32_to_cpu(fw->ucode_size) + le32_to_cpu(fw->fw_size)) {
3435 		IPW_ERROR("%s is too small or corrupt (%zd)\n",
3436 			  name, (*raw)->size);
3437 		return -EINVAL;
3438 	}
3439 
3440 	IPW_DEBUG_INFO("Read firmware '%s' image v%d.%d (%zd bytes)\n",
3441 		       name,
3442 		       le32_to_cpu(fw->ver) >> 16,
3443 		       le32_to_cpu(fw->ver) & 0xff,
3444 		       (*raw)->size - sizeof(*fw));
3445 	return 0;
3446 }
3447 
3448 #define IPW_RX_BUF_SIZE (3000)
3449 
3450 static void ipw_rx_queue_reset(struct ipw_priv *priv,
3451 				      struct ipw_rx_queue *rxq)
3452 {
3453 	unsigned long flags;
3454 	int i;
3455 
3456 	spin_lock_irqsave(&rxq->lock, flags);
3457 
3458 	INIT_LIST_HEAD(&rxq->rx_free);
3459 	INIT_LIST_HEAD(&rxq->rx_used);
3460 
3461 	/* Fill the rx_used queue with _all_ of the Rx buffers */
3462 	for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
3463 		/* In the reset function, these buffers may have been allocated
3464 		 * to an SKB, so we need to unmap and free potential storage */
3465 		if (rxq->pool[i].skb != NULL) {
3466 			pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
3467 					 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
3468 			dev_kfree_skb(rxq->pool[i].skb);
3469 			rxq->pool[i].skb = NULL;
3470 		}
3471 		list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
3472 	}
3473 
3474 	/* Set us so that we have processed and used all buffers, but have
3475 	 * not restocked the Rx queue with fresh buffers */
3476 	rxq->read = rxq->write = 0;
3477 	rxq->free_count = 0;
3478 	spin_unlock_irqrestore(&rxq->lock, flags);
3479 }
3480 
3481 #ifdef CONFIG_PM
3482 static int fw_loaded = 0;
3483 static const struct firmware *raw = NULL;
3484 
3485 static void free_firmware(void)
3486 {
3487 	if (fw_loaded) {
3488 		release_firmware(raw);
3489 		raw = NULL;
3490 		fw_loaded = 0;
3491 	}
3492 }
3493 #else
3494 #define free_firmware() do {} while (0)
3495 #endif
3496 
3497 static int ipw_load(struct ipw_priv *priv)
3498 {
3499 #ifndef CONFIG_PM
3500 	const struct firmware *raw = NULL;
3501 #endif
3502 	struct ipw_fw *fw;
3503 	u8 *boot_img, *ucode_img, *fw_img;
3504 	u8 *name = NULL;
3505 	int rc = 0, retries = 3;
3506 
3507 	switch (priv->ieee->iw_mode) {
3508 	case IW_MODE_ADHOC:
3509 		name = "ipw2200-ibss.fw";
3510 		break;
3511 #ifdef CONFIG_IPW2200_MONITOR
3512 	case IW_MODE_MONITOR:
3513 		name = "ipw2200-sniffer.fw";
3514 		break;
3515 #endif
3516 	case IW_MODE_INFRA:
3517 		name = "ipw2200-bss.fw";
3518 		break;
3519 	}
3520 
3521 	if (!name) {
3522 		rc = -EINVAL;
3523 		goto error;
3524 	}
3525 
3526 #ifdef CONFIG_PM
3527 	if (!fw_loaded) {
3528 #endif
3529 		rc = ipw_get_fw(priv, &raw, name);
3530 		if (rc < 0)
3531 			goto error;
3532 #ifdef CONFIG_PM
3533 	}
3534 #endif
3535 
3536 	fw = (void *)raw->data;
3537 	boot_img = &fw->data[0];
3538 	ucode_img = &fw->data[le32_to_cpu(fw->boot_size)];
3539 	fw_img = &fw->data[le32_to_cpu(fw->boot_size) +
3540 			   le32_to_cpu(fw->ucode_size)];
3541 
3542 	if (!priv->rxq)
3543 		priv->rxq = ipw_rx_queue_alloc(priv);
3544 	else
3545 		ipw_rx_queue_reset(priv, priv->rxq);
3546 	if (!priv->rxq) {
3547 		IPW_ERROR("Unable to initialize Rx queue\n");
3548 		rc = -ENOMEM;
3549 		goto error;
3550 	}
3551 
3552       retry:
3553 	/* Ensure interrupts are disabled */
3554 	ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3555 	priv->status &= ~STATUS_INT_ENABLED;
3556 
3557 	/* ack pending interrupts */
3558 	ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3559 
3560 	ipw_stop_nic(priv);
3561 
3562 	rc = ipw_reset_nic(priv);
3563 	if (rc < 0) {
3564 		IPW_ERROR("Unable to reset NIC\n");
3565 		goto error;
3566 	}
3567 
3568 	ipw_zero_memory(priv, IPW_NIC_SRAM_LOWER_BOUND,
3569 			IPW_NIC_SRAM_UPPER_BOUND - IPW_NIC_SRAM_LOWER_BOUND);
3570 
3571 	/* DMA the initial boot firmware into the device */
3572 	rc = ipw_load_firmware(priv, boot_img, le32_to_cpu(fw->boot_size));
3573 	if (rc < 0) {
3574 		IPW_ERROR("Unable to load boot firmware: %d\n", rc);
3575 		goto error;
3576 	}
3577 
3578 	/* kick start the device */
3579 	ipw_start_nic(priv);
3580 
3581 	/* wait for the device to finish its initial startup sequence */
3582 	rc = ipw_poll_bit(priv, IPW_INTA_RW,
3583 			  IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3584 	if (rc < 0) {
3585 		IPW_ERROR("device failed to boot initial fw image\n");
3586 		goto error;
3587 	}
3588 	IPW_DEBUG_INFO("initial device response after %dms\n", rc);
3589 
3590 	/* ack fw init done interrupt */
3591 	ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3592 
3593 	/* DMA the ucode into the device */
3594 	rc = ipw_load_ucode(priv, ucode_img, le32_to_cpu(fw->ucode_size));
3595 	if (rc < 0) {
3596 		IPW_ERROR("Unable to load ucode: %d\n", rc);
3597 		goto error;
3598 	}
3599 
3600 	/* stop nic */
3601 	ipw_stop_nic(priv);
3602 
3603 	/* DMA bss firmware into the device */
3604 	rc = ipw_load_firmware(priv, fw_img, le32_to_cpu(fw->fw_size));
3605 	if (rc < 0) {
3606 		IPW_ERROR("Unable to load firmware: %d\n", rc);
3607 		goto error;
3608 	}
3609 #ifdef CONFIG_PM
3610 	fw_loaded = 1;
3611 #endif
3612 
3613 	ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
3614 
3615 	rc = ipw_queue_reset(priv);
3616 	if (rc < 0) {
3617 		IPW_ERROR("Unable to initialize queues\n");
3618 		goto error;
3619 	}
3620 
3621 	/* Ensure interrupts are disabled */
3622 	ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3623 	/* ack pending interrupts */
3624 	ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3625 
3626 	/* kick start the device */
3627 	ipw_start_nic(priv);
3628 
3629 	if (ipw_read32(priv, IPW_INTA_RW) & IPW_INTA_BIT_PARITY_ERROR) {
3630 		if (retries > 0) {
3631 			IPW_WARNING("Parity error.  Retrying init.\n");
3632 			retries--;
3633 			goto retry;
3634 		}
3635 
3636 		IPW_ERROR("TODO: Handle parity error -- schedule restart?\n");
3637 		rc = -EIO;
3638 		goto error;
3639 	}
3640 
3641 	/* wait for the device */
3642 	rc = ipw_poll_bit(priv, IPW_INTA_RW,
3643 			  IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3644 	if (rc < 0) {
3645 		IPW_ERROR("device failed to start within 500ms\n");
3646 		goto error;
3647 	}
3648 	IPW_DEBUG_INFO("device response after %dms\n", rc);
3649 
3650 	/* ack fw init done interrupt */
3651 	ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3652 
3653 	/* read eeprom data */
3654 	priv->eeprom_delay = 1;
3655 	ipw_read_eeprom(priv);
3656 	/* initialize the eeprom region of sram */
3657 	ipw_eeprom_init_sram(priv);
3658 
3659 	/* enable interrupts */
3660 	ipw_enable_interrupts(priv);
3661 
3662 	/* Ensure our queue has valid packets */
3663 	ipw_rx_queue_replenish(priv);
3664 
3665 	ipw_write32(priv, IPW_RX_READ_INDEX, priv->rxq->read);
3666 
3667 	/* ack pending interrupts */
3668 	ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3669 
3670 #ifndef CONFIG_PM
3671 	release_firmware(raw);
3672 #endif
3673 	return 0;
3674 
3675       error:
3676 	if (priv->rxq) {
3677 		ipw_rx_queue_free(priv, priv->rxq);
3678 		priv->rxq = NULL;
3679 	}
3680 	ipw_tx_queue_free(priv);
3681 	release_firmware(raw);
3682 #ifdef CONFIG_PM
3683 	fw_loaded = 0;
3684 	raw = NULL;
3685 #endif
3686 
3687 	return rc;
3688 }
3689 
3690 /**
3691  * DMA services
3692  *
3693  * Theory of operation
3694  *
3695  * A queue is a circular buffers with 'Read' and 'Write' pointers.
3696  * 2 empty entries always kept in the buffer to protect from overflow.
3697  *
3698  * For Tx queue, there are low mark and high mark limits. If, after queuing
3699  * the packet for Tx, free space become < low mark, Tx queue stopped. When
3700  * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
3701  * Tx queue resumed.
3702  *
3703  * The IPW operates with six queues, one receive queue in the device's
3704  * sram, one transmit queue for sending commands to the device firmware,
3705  * and four transmit queues for data.
3706  *
3707  * The four transmit queues allow for performing quality of service (qos)
3708  * transmissions as per the 802.11 protocol.  Currently Linux does not
3709  * provide a mechanism to the user for utilizing prioritized queues, so
3710  * we only utilize the first data transmit queue (queue1).
3711  */
3712 
3713 /**
3714  * Driver allocates buffers of this size for Rx
3715  */
3716 
3717 /**
3718  * ipw_rx_queue_space - Return number of free slots available in queue.
3719  */
3720 static int ipw_rx_queue_space(const struct ipw_rx_queue *q)
3721 {
3722 	int s = q->read - q->write;
3723 	if (s <= 0)
3724 		s += RX_QUEUE_SIZE;
3725 	/* keep some buffer to not confuse full and empty queue */
3726 	s -= 2;
3727 	if (s < 0)
3728 		s = 0;
3729 	return s;
3730 }
3731 
3732 static inline int ipw_tx_queue_space(const struct clx2_queue *q)
3733 {
3734 	int s = q->last_used - q->first_empty;
3735 	if (s <= 0)
3736 		s += q->n_bd;
3737 	s -= 2;			/* keep some reserve to not confuse empty and full situations */
3738 	if (s < 0)
3739 		s = 0;
3740 	return s;
3741 }
3742 
3743 static inline int ipw_queue_inc_wrap(int index, int n_bd)
3744 {
3745 	return (++index == n_bd) ? 0 : index;
3746 }
3747 
3748 /**
3749  * Initialize common DMA queue structure
3750  *
3751  * @param q                queue to init
3752  * @param count            Number of BD's to allocate. Should be power of 2
3753  * @param read_register    Address for 'read' register
3754  *                         (not offset within BAR, full address)
3755  * @param write_register   Address for 'write' register
3756  *                         (not offset within BAR, full address)
3757  * @param base_register    Address for 'base' register
3758  *                         (not offset within BAR, full address)
3759  * @param size             Address for 'size' register
3760  *                         (not offset within BAR, full address)
3761  */
3762 static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q,
3763 			   int count, u32 read, u32 write, u32 base, u32 size)
3764 {
3765 	q->n_bd = count;
3766 
3767 	q->low_mark = q->n_bd / 4;
3768 	if (q->low_mark < 4)
3769 		q->low_mark = 4;
3770 
3771 	q->high_mark = q->n_bd / 8;
3772 	if (q->high_mark < 2)
3773 		q->high_mark = 2;
3774 
3775 	q->first_empty = q->last_used = 0;
3776 	q->reg_r = read;
3777 	q->reg_w = write;
3778 
3779 	ipw_write32(priv, base, q->dma_addr);
3780 	ipw_write32(priv, size, count);
3781 	ipw_write32(priv, read, 0);
3782 	ipw_write32(priv, write, 0);
3783 
3784 	_ipw_read32(priv, 0x90);
3785 }
3786 
3787 static int ipw_queue_tx_init(struct ipw_priv *priv,
3788 			     struct clx2_tx_queue *q,
3789 			     int count, u32 read, u32 write, u32 base, u32 size)
3790 {
3791 	struct pci_dev *dev = priv->pci_dev;
3792 
3793 	q->txb = kmalloc(sizeof(q->txb[0]) * count, GFP_KERNEL);
3794 	if (!q->txb) {
3795 		IPW_ERROR("vmalloc for auxiliary BD structures failed\n");
3796 		return -ENOMEM;
3797 	}
3798 
3799 	q->bd =
3800 	    pci_alloc_consistent(dev, sizeof(q->bd[0]) * count, &q->q.dma_addr);
3801 	if (!q->bd) {
3802 		IPW_ERROR("pci_alloc_consistent(%zd) failed\n",
3803 			  sizeof(q->bd[0]) * count);
3804 		kfree(q->txb);
3805 		q->txb = NULL;
3806 		return -ENOMEM;
3807 	}
3808 
3809 	ipw_queue_init(priv, &q->q, count, read, write, base, size);
3810 	return 0;
3811 }
3812 
3813 /**
3814  * Free one TFD, those at index [txq->q.last_used].
3815  * Do NOT advance any indexes
3816  *
3817  * @param dev
3818  * @param txq
3819  */
3820 static void ipw_queue_tx_free_tfd(struct ipw_priv *priv,
3821 				  struct clx2_tx_queue *txq)
3822 {
3823 	struct tfd_frame *bd = &txq->bd[txq->q.last_used];
3824 	struct pci_dev *dev = priv->pci_dev;
3825 	int i;
3826 
3827 	/* classify bd */
3828 	if (bd->control_flags.message_type == TX_HOST_COMMAND_TYPE)
3829 		/* nothing to cleanup after for host commands */
3830 		return;
3831 
3832 	/* sanity check */
3833 	if (le32_to_cpu(bd->u.data.num_chunks) > NUM_TFD_CHUNKS) {
3834 		IPW_ERROR("Too many chunks: %i\n",
3835 			  le32_to_cpu(bd->u.data.num_chunks));
3836 		/** @todo issue fatal error, it is quite serious situation */
3837 		return;
3838 	}
3839 
3840 	/* unmap chunks if any */
3841 	for (i = 0; i < le32_to_cpu(bd->u.data.num_chunks); i++) {
3842 		pci_unmap_single(dev, le32_to_cpu(bd->u.data.chunk_ptr[i]),
3843 				 le16_to_cpu(bd->u.data.chunk_len[i]),
3844 				 PCI_DMA_TODEVICE);
3845 		if (txq->txb[txq->q.last_used]) {
3846 			libipw_txb_free(txq->txb[txq->q.last_used]);
3847 			txq->txb[txq->q.last_used] = NULL;
3848 		}
3849 	}
3850 }
3851 
3852 /**
3853  * Deallocate DMA queue.
3854  *
3855  * Empty queue by removing and destroying all BD's.
3856  * Free all buffers.
3857  *
3858  * @param dev
3859  * @param q
3860  */
3861 static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq)
3862 {
3863 	struct clx2_queue *q = &txq->q;
3864 	struct pci_dev *dev = priv->pci_dev;
3865 
3866 	if (q->n_bd == 0)
3867 		return;
3868 
3869 	/* first, empty all BD's */
3870 	for (; q->first_empty != q->last_used;
3871 	     q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
3872 		ipw_queue_tx_free_tfd(priv, txq);
3873 	}
3874 
3875 	/* free buffers belonging to queue itself */
3876 	pci_free_consistent(dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd,
3877 			    q->dma_addr);
3878 	kfree(txq->txb);
3879 
3880 	/* 0 fill whole structure */
3881 	memset(txq, 0, sizeof(*txq));
3882 }
3883 
3884 /**
3885  * Destroy all DMA queues and structures
3886  *
3887  * @param priv
3888  */
3889 static void ipw_tx_queue_free(struct ipw_priv *priv)
3890 {
3891 	/* Tx CMD queue */
3892 	ipw_queue_tx_free(priv, &priv->txq_cmd);
3893 
3894 	/* Tx queues */
3895 	ipw_queue_tx_free(priv, &priv->txq[0]);
3896 	ipw_queue_tx_free(priv, &priv->txq[1]);
3897 	ipw_queue_tx_free(priv, &priv->txq[2]);
3898 	ipw_queue_tx_free(priv, &priv->txq[3]);
3899 }
3900 
3901 static void ipw_create_bssid(struct ipw_priv *priv, u8 * bssid)
3902 {
3903 	/* First 3 bytes are manufacturer */
3904 	bssid[0] = priv->mac_addr[0];
3905 	bssid[1] = priv->mac_addr[1];
3906 	bssid[2] = priv->mac_addr[2];
3907 
3908 	/* Last bytes are random */
3909 	get_random_bytes(&bssid[3], ETH_ALEN - 3);
3910 
3911 	bssid[0] &= 0xfe;	/* clear multicast bit */
3912 	bssid[0] |= 0x02;	/* set local assignment bit (IEEE802) */
3913 }
3914 
3915 static u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid)
3916 {
3917 	struct ipw_station_entry entry;
3918 	int i;
3919 
3920 	for (i = 0; i < priv->num_stations; i++) {
3921 		if (ether_addr_equal(priv->stations[i], bssid)) {
3922 			/* Another node is active in network */
3923 			priv->missed_adhoc_beacons = 0;
3924 			if (!(priv->config & CFG_STATIC_CHANNEL))
3925 				/* when other nodes drop out, we drop out */
3926 				priv->config &= ~CFG_ADHOC_PERSIST;
3927 
3928 			return i;
3929 		}
3930 	}
3931 
3932 	if (i == MAX_STATIONS)
3933 		return IPW_INVALID_STATION;
3934 
3935 	IPW_DEBUG_SCAN("Adding AdHoc station: %pM\n", bssid);
3936 
3937 	entry.reserved = 0;
3938 	entry.support_mode = 0;
3939 	memcpy(entry.mac_addr, bssid, ETH_ALEN);
3940 	memcpy(priv->stations[i], bssid, ETH_ALEN);
3941 	ipw_write_direct(priv, IPW_STATION_TABLE_LOWER + i * sizeof(entry),
3942 			 &entry, sizeof(entry));
3943 	priv->num_stations++;
3944 
3945 	return i;
3946 }
3947 
3948 static u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid)
3949 {
3950 	int i;
3951 
3952 	for (i = 0; i < priv->num_stations; i++)
3953 		if (ether_addr_equal(priv->stations[i], bssid))
3954 			return i;
3955 
3956 	return IPW_INVALID_STATION;
3957 }
3958 
3959 static void ipw_send_disassociate(struct ipw_priv *priv, int quiet)
3960 {
3961 	int err;
3962 
3963 	if (priv->status & STATUS_ASSOCIATING) {
3964 		IPW_DEBUG_ASSOC("Disassociating while associating.\n");
3965 		schedule_work(&priv->disassociate);
3966 		return;
3967 	}
3968 
3969 	if (!(priv->status & STATUS_ASSOCIATED)) {
3970 		IPW_DEBUG_ASSOC("Disassociating while not associated.\n");
3971 		return;
3972 	}
3973 
3974 	IPW_DEBUG_ASSOC("Disassociation attempt from %pM "
3975 			"on channel %d.\n",
3976 			priv->assoc_request.bssid,
3977 			priv->assoc_request.channel);
3978 
3979 	priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED);
3980 	priv->status |= STATUS_DISASSOCIATING;
3981 
3982 	if (quiet)
3983 		priv->assoc_request.assoc_type = HC_DISASSOC_QUIET;
3984 	else
3985 		priv->assoc_request.assoc_type = HC_DISASSOCIATE;
3986 
3987 	err = ipw_send_associate(priv, &priv->assoc_request);
3988 	if (err) {
3989 		IPW_DEBUG_HC("Attempt to send [dis]associate command "
3990 			     "failed.\n");
3991 		return;
3992 	}
3993 
3994 }
3995 
3996 static int ipw_disassociate(void *data)
3997 {
3998 	struct ipw_priv *priv = data;
3999 	if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)))
4000 		return 0;
4001 	ipw_send_disassociate(data, 0);
4002 	netif_carrier_off(priv->net_dev);
4003 	return 1;
4004 }
4005 
4006 static void ipw_bg_disassociate(struct work_struct *work)
4007 {
4008 	struct ipw_priv *priv =
4009 		container_of(work, struct ipw_priv, disassociate);
4010 	mutex_lock(&priv->mutex);
4011 	ipw_disassociate(priv);
4012 	mutex_unlock(&priv->mutex);
4013 }
4014 
4015 static void ipw_system_config(struct work_struct *work)
4016 {
4017 	struct ipw_priv *priv =
4018 		container_of(work, struct ipw_priv, system_config);
4019 
4020 #ifdef CONFIG_IPW2200_PROMISCUOUS
4021 	if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
4022 		priv->sys_config.accept_all_data_frames = 1;
4023 		priv->sys_config.accept_non_directed_frames = 1;
4024 		priv->sys_config.accept_all_mgmt_bcpr = 1;
4025 		priv->sys_config.accept_all_mgmt_frames = 1;
4026 	}
4027 #endif
4028 
4029 	ipw_send_system_config(priv);
4030 }
4031 
4032 struct ipw_status_code {
4033 	u16 status;
4034 	const char *reason;
4035 };
4036 
4037 static const struct ipw_status_code ipw_status_codes[] = {
4038 	{0x00, "Successful"},
4039 	{0x01, "Unspecified failure"},
4040 	{0x0A, "Cannot support all requested capabilities in the "
4041 	 "Capability information field"},
4042 	{0x0B, "Reassociation denied due to inability to confirm that "
4043 	 "association exists"},
4044 	{0x0C, "Association denied due to reason outside the scope of this "
4045 	 "standard"},
4046 	{0x0D,
4047 	 "Responding station does not support the specified authentication "
4048 	 "algorithm"},
4049 	{0x0E,
4050 	 "Received an Authentication frame with authentication sequence "
4051 	 "transaction sequence number out of expected sequence"},
4052 	{0x0F, "Authentication rejected because of challenge failure"},
4053 	{0x10, "Authentication rejected due to timeout waiting for next "
4054 	 "frame in sequence"},
4055 	{0x11, "Association denied because AP is unable to handle additional "
4056 	 "associated stations"},
4057 	{0x12,
4058 	 "Association denied due to requesting station not supporting all "
4059 	 "of the datarates in the BSSBasicServiceSet Parameter"},
4060 	{0x13,
4061 	 "Association denied due to requesting station not supporting "
4062 	 "short preamble operation"},
4063 	{0x14,
4064 	 "Association denied due to requesting station not supporting "
4065 	 "PBCC encoding"},
4066 	{0x15,
4067 	 "Association denied due to requesting station not supporting "
4068 	 "channel agility"},
4069 	{0x19,
4070 	 "Association denied due to requesting station not supporting "
4071 	 "short slot operation"},
4072 	{0x1A,
4073 	 "Association denied due to requesting station not supporting "
4074 	 "DSSS-OFDM operation"},
4075 	{0x28, "Invalid Information Element"},
4076 	{0x29, "Group Cipher is not valid"},
4077 	{0x2A, "Pairwise Cipher is not valid"},
4078 	{0x2B, "AKMP is not valid"},
4079 	{0x2C, "Unsupported RSN IE version"},
4080 	{0x2D, "Invalid RSN IE Capabilities"},
4081 	{0x2E, "Cipher suite is rejected per security policy"},
4082 };
4083 
4084 static const char *ipw_get_status_code(u16 status)
4085 {
4086 	int i;
4087 	for (i = 0; i < ARRAY_SIZE(ipw_status_codes); i++)
4088 		if (ipw_status_codes[i].status == (status & 0xff))
4089 			return ipw_status_codes[i].reason;
4090 	return "Unknown status value.";
4091 }
4092 
4093 static inline void average_init(struct average *avg)
4094 {
4095 	memset(avg, 0, sizeof(*avg));
4096 }
4097 
4098 #define DEPTH_RSSI 8
4099 #define DEPTH_NOISE 16
4100 static s16 exponential_average(s16 prev_avg, s16 val, u8 depth)
4101 {
4102 	return ((depth-1)*prev_avg +  val)/depth;
4103 }
4104 
4105 static void average_add(struct average *avg, s16 val)
4106 {
4107 	avg->sum -= avg->entries[avg->pos];
4108 	avg->sum += val;
4109 	avg->entries[avg->pos++] = val;
4110 	if (unlikely(avg->pos == AVG_ENTRIES)) {
4111 		avg->init = 1;
4112 		avg->pos = 0;
4113 	}
4114 }
4115 
4116 static s16 average_value(struct average *avg)
4117 {
4118 	if (!unlikely(avg->init)) {
4119 		if (avg->pos)
4120 			return avg->sum / avg->pos;
4121 		return 0;
4122 	}
4123 
4124 	return avg->sum / AVG_ENTRIES;
4125 }
4126 
4127 static void ipw_reset_stats(struct ipw_priv *priv)
4128 {
4129 	u32 len = sizeof(u32);
4130 
4131 	priv->quality = 0;
4132 
4133 	average_init(&priv->average_missed_beacons);
4134 	priv->exp_avg_rssi = -60;
4135 	priv->exp_avg_noise = -85 + 0x100;
4136 
4137 	priv->last_rate = 0;
4138 	priv->last_missed_beacons = 0;
4139 	priv->last_rx_packets = 0;
4140 	priv->last_tx_packets = 0;
4141 	priv->last_tx_failures = 0;
4142 
4143 	/* Firmware managed, reset only when NIC is restarted, so we have to
4144 	 * normalize on the current value */
4145 	ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC,
4146 			&priv->last_rx_err, &len);
4147 	ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE,
4148 			&priv->last_tx_failures, &len);
4149 
4150 	/* Driver managed, reset with each association */
4151 	priv->missed_adhoc_beacons = 0;
4152 	priv->missed_beacons = 0;
4153 	priv->tx_packets = 0;
4154 	priv->rx_packets = 0;
4155 
4156 }
4157 
4158 static u32 ipw_get_max_rate(struct ipw_priv *priv)
4159 {
4160 	u32 i = 0x80000000;
4161 	u32 mask = priv->rates_mask;
4162 	/* If currently associated in B mode, restrict the maximum
4163 	 * rate match to B rates */
4164 	if (priv->assoc_request.ieee_mode == IPW_B_MODE)
4165 		mask &= LIBIPW_CCK_RATES_MASK;
4166 
4167 	/* TODO: Verify that the rate is supported by the current rates
4168 	 * list. */
4169 
4170 	while (i && !(mask & i))
4171 		i >>= 1;
4172 	switch (i) {
4173 	case LIBIPW_CCK_RATE_1MB_MASK:
4174 		return 1000000;
4175 	case LIBIPW_CCK_RATE_2MB_MASK:
4176 		return 2000000;
4177 	case LIBIPW_CCK_RATE_5MB_MASK:
4178 		return 5500000;
4179 	case LIBIPW_OFDM_RATE_6MB_MASK:
4180 		return 6000000;
4181 	case LIBIPW_OFDM_RATE_9MB_MASK:
4182 		return 9000000;
4183 	case LIBIPW_CCK_RATE_11MB_MASK:
4184 		return 11000000;
4185 	case LIBIPW_OFDM_RATE_12MB_MASK:
4186 		return 12000000;
4187 	case LIBIPW_OFDM_RATE_18MB_MASK:
4188 		return 18000000;
4189 	case LIBIPW_OFDM_RATE_24MB_MASK:
4190 		return 24000000;
4191 	case LIBIPW_OFDM_RATE_36MB_MASK:
4192 		return 36000000;
4193 	case LIBIPW_OFDM_RATE_48MB_MASK:
4194 		return 48000000;
4195 	case LIBIPW_OFDM_RATE_54MB_MASK:
4196 		return 54000000;
4197 	}
4198 
4199 	if (priv->ieee->mode == IEEE_B)
4200 		return 11000000;
4201 	else
4202 		return 54000000;
4203 }
4204 
4205 static u32 ipw_get_current_rate(struct ipw_priv *priv)
4206 {
4207 	u32 rate, len = sizeof(rate);
4208 	int err;
4209 
4210 	if (!(priv->status & STATUS_ASSOCIATED))
4211 		return 0;
4212 
4213 	if (priv->tx_packets > IPW_REAL_RATE_RX_PACKET_THRESHOLD) {
4214 		err = ipw_get_ordinal(priv, IPW_ORD_STAT_TX_CURR_RATE, &rate,
4215 				      &len);
4216 		if (err) {
4217 			IPW_DEBUG_INFO("failed querying ordinals.\n");
4218 			return 0;
4219 		}
4220 	} else
4221 		return ipw_get_max_rate(priv);
4222 
4223 	switch (rate) {
4224 	case IPW_TX_RATE_1MB:
4225 		return 1000000;
4226 	case IPW_TX_RATE_2MB:
4227 		return 2000000;
4228 	case IPW_TX_RATE_5MB:
4229 		return 5500000;
4230 	case IPW_TX_RATE_6MB:
4231 		return 6000000;
4232 	case IPW_TX_RATE_9MB:
4233 		return 9000000;
4234 	case IPW_TX_RATE_11MB:
4235 		return 11000000;
4236 	case IPW_TX_RATE_12MB:
4237 		return 12000000;
4238 	case IPW_TX_RATE_18MB:
4239 		return 18000000;
4240 	case IPW_TX_RATE_24MB:
4241 		return 24000000;
4242 	case IPW_TX_RATE_36MB:
4243 		return 36000000;
4244 	case IPW_TX_RATE_48MB:
4245 		return 48000000;
4246 	case IPW_TX_RATE_54MB:
4247 		return 54000000;
4248 	}
4249 
4250 	return 0;
4251 }
4252 
4253 #define IPW_STATS_INTERVAL (2 * HZ)
4254 static void ipw_gather_stats(struct ipw_priv *priv)
4255 {
4256 	u32 rx_err, rx_err_delta, rx_packets_delta;
4257 	u32 tx_failures, tx_failures_delta, tx_packets_delta;
4258 	u32 missed_beacons_percent, missed_beacons_delta;
4259 	u32 quality = 0;
4260 	u32 len = sizeof(u32);
4261 	s16 rssi;
4262 	u32 beacon_quality, signal_quality, tx_quality, rx_quality,
4263 	    rate_quality;
4264 	u32 max_rate;
4265 
4266 	if (!(priv->status & STATUS_ASSOCIATED)) {
4267 		priv->quality = 0;
4268 		return;
4269 	}
4270 
4271 	/* Update the statistics */
4272 	ipw_get_ordinal(priv, IPW_ORD_STAT_MISSED_BEACONS,
4273 			&priv->missed_beacons, &len);
4274 	missed_beacons_delta = priv->missed_beacons - priv->last_missed_beacons;
4275 	priv->last_missed_beacons = priv->missed_beacons;
4276 	if (priv->assoc_request.beacon_interval) {
4277 		missed_beacons_percent = missed_beacons_delta *
4278 		    (HZ * le16_to_cpu(priv->assoc_request.beacon_interval)) /
4279 		    (IPW_STATS_INTERVAL * 10);
4280 	} else {
4281 		missed_beacons_percent = 0;
4282 	}
4283 	average_add(&priv->average_missed_beacons, missed_beacons_percent);
4284 
4285 	ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, &rx_err, &len);
4286 	rx_err_delta = rx_err - priv->last_rx_err;
4287 	priv->last_rx_err = rx_err;
4288 
4289 	ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, &tx_failures, &len);
4290 	tx_failures_delta = tx_failures - priv->last_tx_failures;
4291 	priv->last_tx_failures = tx_failures;
4292 
4293 	rx_packets_delta = priv->rx_packets - priv->last_rx_packets;
4294 	priv->last_rx_packets = priv->rx_packets;
4295 
4296 	tx_packets_delta = priv->tx_packets - priv->last_tx_packets;
4297 	priv->last_tx_packets = priv->tx_packets;
4298 
4299 	/* Calculate quality based on the following:
4300 	 *
4301 	 * Missed beacon: 100% = 0, 0% = 70% missed
4302 	 * Rate: 60% = 1Mbs, 100% = Max
4303 	 * Rx and Tx errors represent a straight % of total Rx/Tx
4304 	 * RSSI: 100% = > -50,  0% = < -80
4305 	 * Rx errors: 100% = 0, 0% = 50% missed
4306 	 *
4307 	 * The lowest computed quality is used.
4308 	 *
4309 	 */
4310 #define BEACON_THRESHOLD 5
4311 	beacon_quality = 100 - missed_beacons_percent;
4312 	if (beacon_quality < BEACON_THRESHOLD)
4313 		beacon_quality = 0;
4314 	else
4315 		beacon_quality = (beacon_quality - BEACON_THRESHOLD) * 100 /
4316 		    (100 - BEACON_THRESHOLD);
4317 	IPW_DEBUG_STATS("Missed beacon: %3d%% (%d%%)\n",
4318 			beacon_quality, missed_beacons_percent);
4319 
4320 	priv->last_rate = ipw_get_current_rate(priv);
4321 	max_rate = ipw_get_max_rate(priv);
4322 	rate_quality = priv->last_rate * 40 / max_rate + 60;
4323 	IPW_DEBUG_STATS("Rate quality : %3d%% (%dMbs)\n",
4324 			rate_quality, priv->last_rate / 1000000);
4325 
4326 	if (rx_packets_delta > 100 && rx_packets_delta + rx_err_delta)
4327 		rx_quality = 100 - (rx_err_delta * 100) /
4328 		    (rx_packets_delta + rx_err_delta);
4329 	else
4330 		rx_quality = 100;
4331 	IPW_DEBUG_STATS("Rx quality   : %3d%% (%u errors, %u packets)\n",
4332 			rx_quality, rx_err_delta, rx_packets_delta);
4333 
4334 	if (tx_packets_delta > 100 && tx_packets_delta + tx_failures_delta)
4335 		tx_quality = 100 - (tx_failures_delta * 100) /
4336 		    (tx_packets_delta + tx_failures_delta);
4337 	else
4338 		tx_quality = 100;
4339 	IPW_DEBUG_STATS("Tx quality   : %3d%% (%u errors, %u packets)\n",
4340 			tx_quality, tx_failures_delta, tx_packets_delta);
4341 
4342 	rssi = priv->exp_avg_rssi;
4343 	signal_quality =
4344 	    (100 *
4345 	     (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4346 	     (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) -
4347 	     (priv->ieee->perfect_rssi - rssi) *
4348 	     (15 * (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) +
4349 	      62 * (priv->ieee->perfect_rssi - rssi))) /
4350 	    ((priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4351 	     (priv->ieee->perfect_rssi - priv->ieee->worst_rssi));
4352 	if (signal_quality > 100)
4353 		signal_quality = 100;
4354 	else if (signal_quality < 1)
4355 		signal_quality = 0;
4356 
4357 	IPW_DEBUG_STATS("Signal level : %3d%% (%d dBm)\n",
4358 			signal_quality, rssi);
4359 
4360 	quality = min(rx_quality, signal_quality);
4361 	quality = min(tx_quality, quality);
4362 	quality = min(rate_quality, quality);
4363 	quality = min(beacon_quality, quality);
4364 	if (quality == beacon_quality)
4365 		IPW_DEBUG_STATS("Quality (%d%%): Clamped to missed beacons.\n",
4366 				quality);
4367 	if (quality == rate_quality)
4368 		IPW_DEBUG_STATS("Quality (%d%%): Clamped to rate quality.\n",
4369 				quality);
4370 	if (quality == tx_quality)
4371 		IPW_DEBUG_STATS("Quality (%d%%): Clamped to Tx quality.\n",
4372 				quality);
4373 	if (quality == rx_quality)
4374 		IPW_DEBUG_STATS("Quality (%d%%): Clamped to Rx quality.\n",
4375 				quality);
4376 	if (quality == signal_quality)
4377 		IPW_DEBUG_STATS("Quality (%d%%): Clamped to signal quality.\n",
4378 				quality);
4379 
4380 	priv->quality = quality;
4381 
4382 	schedule_delayed_work(&priv->gather_stats, IPW_STATS_INTERVAL);
4383 }
4384 
4385 static void ipw_bg_gather_stats(struct work_struct *work)
4386 {
4387 	struct ipw_priv *priv =
4388 		container_of(work, struct ipw_priv, gather_stats.work);
4389 	mutex_lock(&priv->mutex);
4390 	ipw_gather_stats(priv);
4391 	mutex_unlock(&priv->mutex);
4392 }
4393 
4394 /* Missed beacon behavior:
4395  * 1st missed -> roaming_threshold, just wait, don't do any scan/roam.
4396  * roaming_threshold -> disassociate_threshold, scan and roam for better signal.
4397  * Above disassociate threshold, give up and stop scanning.
4398  * Roaming is disabled if disassociate_threshold <= roaming_threshold  */
4399 static void ipw_handle_missed_beacon(struct ipw_priv *priv,
4400 					    int missed_count)
4401 {
4402 	priv->notif_missed_beacons = missed_count;
4403 
4404 	if (missed_count > priv->disassociate_threshold &&
4405 	    priv->status & STATUS_ASSOCIATED) {
4406 		/* If associated and we've hit the missed
4407 		 * beacon threshold, disassociate, turn
4408 		 * off roaming, and abort any active scans */
4409 		IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4410 			  IPW_DL_STATE | IPW_DL_ASSOC,
4411 			  "Missed beacon: %d - disassociate\n", missed_count);
4412 		priv->status &= ~STATUS_ROAMING;
4413 		if (priv->status & STATUS_SCANNING) {
4414 			IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4415 				  IPW_DL_STATE,
4416 				  "Aborting scan with missed beacon.\n");
4417 			schedule_work(&priv->abort_scan);
4418 		}
4419 
4420 		schedule_work(&priv->disassociate);
4421 		return;
4422 	}
4423 
4424 	if (priv->status & STATUS_ROAMING) {
4425 		/* If we are currently roaming, then just
4426 		 * print a debug statement... */
4427 		IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4428 			  "Missed beacon: %d - roam in progress\n",
4429 			  missed_count);
4430 		return;
4431 	}
4432 
4433 	if (roaming &&
4434 	    (missed_count > priv->roaming_threshold &&
4435 	     missed_count <= priv->disassociate_threshold)) {
4436 		/* If we are not already roaming, set the ROAM
4437 		 * bit in the status and kick off a scan.
4438 		 * This can happen several times before we reach
4439 		 * disassociate_threshold. */
4440 		IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4441 			  "Missed beacon: %d - initiate "
4442 			  "roaming\n", missed_count);
4443 		if (!(priv->status & STATUS_ROAMING)) {
4444 			priv->status |= STATUS_ROAMING;
4445 			if (!(priv->status & STATUS_SCANNING))
4446 				schedule_delayed_work(&priv->request_scan, 0);
4447 		}
4448 		return;
4449 	}
4450 
4451 	if (priv->status & STATUS_SCANNING &&
4452 	    missed_count > IPW_MB_SCAN_CANCEL_THRESHOLD) {
4453 		/* Stop scan to keep fw from getting
4454 		 * stuck (only if we aren't roaming --
4455 		 * otherwise we'll never scan more than 2 or 3
4456 		 * channels..) */
4457 		IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | IPW_DL_STATE,
4458 			  "Aborting scan with missed beacon.\n");
4459 		schedule_work(&priv->abort_scan);
4460 	}
4461 
4462 	IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count);
4463 }
4464 
4465 static void ipw_scan_event(struct work_struct *work)
4466 {
4467 	union iwreq_data wrqu;
4468 
4469 	struct ipw_priv *priv =
4470 		container_of(work, struct ipw_priv, scan_event.work);
4471 
4472 	wrqu.data.length = 0;
4473 	wrqu.data.flags = 0;
4474 	wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL);
4475 }
4476 
4477 static void handle_scan_event(struct ipw_priv *priv)
4478 {
4479 	/* Only userspace-requested scan completion events go out immediately */
4480 	if (!priv->user_requested_scan) {
4481 		schedule_delayed_work(&priv->scan_event,
4482 				      round_jiffies_relative(msecs_to_jiffies(4000)));
4483 	} else {
4484 		priv->user_requested_scan = 0;
4485 		mod_delayed_work(system_wq, &priv->scan_event, 0);
4486 	}
4487 }
4488 
4489 /**
4490  * Handle host notification packet.
4491  * Called from interrupt routine
4492  */
4493 static void ipw_rx_notification(struct ipw_priv *priv,
4494 				       struct ipw_rx_notification *notif)
4495 {
4496 	u16 size = le16_to_cpu(notif->size);
4497 
4498 	IPW_DEBUG_NOTIF("type = %i (%d bytes)\n", notif->subtype, size);
4499 
4500 	switch (notif->subtype) {
4501 	case HOST_NOTIFICATION_STATUS_ASSOCIATED:{
4502 			struct notif_association *assoc = &notif->u.assoc;
4503 
4504 			switch (assoc->state) {
4505 			case CMAS_ASSOCIATED:{
4506 					IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4507 						  IPW_DL_ASSOC,
4508 						  "associated: '%*pE' %pM\n",
4509 						  priv->essid_len, priv->essid,
4510 						  priv->bssid);
4511 
4512 					switch (priv->ieee->iw_mode) {
4513 					case IW_MODE_INFRA:
4514 						memcpy(priv->ieee->bssid,
4515 						       priv->bssid, ETH_ALEN);
4516 						break;
4517 
4518 					case IW_MODE_ADHOC:
4519 						memcpy(priv->ieee->bssid,
4520 						       priv->bssid, ETH_ALEN);
4521 
4522 						/* clear out the station table */
4523 						priv->num_stations = 0;
4524 
4525 						IPW_DEBUG_ASSOC
4526 						    ("queueing adhoc check\n");
4527 						schedule_delayed_work(
4528 							&priv->adhoc_check,
4529 							le16_to_cpu(priv->
4530 							assoc_request.
4531 							beacon_interval));
4532 						break;
4533 					}
4534 
4535 					priv->status &= ~STATUS_ASSOCIATING;
4536 					priv->status |= STATUS_ASSOCIATED;
4537 					schedule_work(&priv->system_config);
4538 
4539 #ifdef CONFIG_IPW2200_QOS
4540 #define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \
4541 			 le16_to_cpu(((struct ieee80211_hdr *)(x))->frame_control))
4542 					if ((priv->status & STATUS_AUTH) &&
4543 					    (IPW_GET_PACKET_STYPE(&notif->u.raw)
4544 					     == IEEE80211_STYPE_ASSOC_RESP)) {
4545 						if ((sizeof
4546 						     (struct
4547 						      libipw_assoc_response)
4548 						     <= size)
4549 						    && (size <= 2314)) {
4550 							struct
4551 							libipw_rx_stats
4552 							    stats = {
4553 								.len = size - 1,
4554 							};
4555 
4556 							IPW_DEBUG_QOS
4557 							    ("QoS Associate "
4558 							     "size %d\n", size);
4559 							libipw_rx_mgt(priv->
4560 									 ieee,
4561 									 (struct
4562 									  libipw_hdr_4addr
4563 									  *)
4564 									 &notif->u.raw, &stats);
4565 						}
4566 					}
4567 #endif
4568 
4569 					schedule_work(&priv->link_up);
4570 
4571 					break;
4572 				}
4573 
4574 			case CMAS_AUTHENTICATED:{
4575 					if (priv->
4576 					    status & (STATUS_ASSOCIATED |
4577 						      STATUS_AUTH)) {
4578 						struct notif_authenticate *auth
4579 						    = &notif->u.auth;
4580 						IPW_DEBUG(IPW_DL_NOTIF |
4581 							  IPW_DL_STATE |
4582 							  IPW_DL_ASSOC,
4583 							  "deauthenticated: '%*pE' %pM: (0x%04X) - %s\n",
4584 							  priv->essid_len,
4585 							  priv->essid,
4586 							  priv->bssid,
4587 							  le16_to_cpu(auth->status),
4588 							  ipw_get_status_code
4589 							  (le16_to_cpu
4590 							   (auth->status)));
4591 
4592 						priv->status &=
4593 						    ~(STATUS_ASSOCIATING |
4594 						      STATUS_AUTH |
4595 						      STATUS_ASSOCIATED);
4596 
4597 						schedule_work(&priv->link_down);
4598 						break;
4599 					}
4600 
4601 					IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4602 						  IPW_DL_ASSOC,
4603 						  "authenticated: '%*pE' %pM\n",
4604 						  priv->essid_len, priv->essid,
4605 						  priv->bssid);
4606 					break;
4607 				}
4608 
4609 			case CMAS_INIT:{
4610 					if (priv->status & STATUS_AUTH) {
4611 						struct
4612 						    libipw_assoc_response
4613 						*resp;
4614 						resp =
4615 						    (struct
4616 						     libipw_assoc_response
4617 						     *)&notif->u.raw;
4618 						IPW_DEBUG(IPW_DL_NOTIF |
4619 							  IPW_DL_STATE |
4620 							  IPW_DL_ASSOC,
4621 							  "association failed (0x%04X): %s\n",
4622 							  le16_to_cpu(resp->status),
4623 							  ipw_get_status_code
4624 							  (le16_to_cpu
4625 							   (resp->status)));
4626 					}
4627 
4628 					IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4629 						  IPW_DL_ASSOC,
4630 						  "disassociated: '%*pE' %pM\n",
4631 						  priv->essid_len, priv->essid,
4632 						  priv->bssid);
4633 
4634 					priv->status &=
4635 					    ~(STATUS_DISASSOCIATING |
4636 					      STATUS_ASSOCIATING |
4637 					      STATUS_ASSOCIATED | STATUS_AUTH);
4638 					if (priv->assoc_network
4639 					    && (priv->assoc_network->
4640 						capability &
4641 						WLAN_CAPABILITY_IBSS))
4642 						ipw_remove_current_network
4643 						    (priv);
4644 
4645 					schedule_work(&priv->link_down);
4646 
4647 					break;
4648 				}
4649 
4650 			case CMAS_RX_ASSOC_RESP:
4651 				break;
4652 
4653 			default:
4654 				IPW_ERROR("assoc: unknown (%d)\n",
4655 					  assoc->state);
4656 				break;
4657 			}
4658 
4659 			break;
4660 		}
4661 
4662 	case HOST_NOTIFICATION_STATUS_AUTHENTICATE:{
4663 			struct notif_authenticate *auth = &notif->u.auth;
4664 			switch (auth->state) {
4665 			case CMAS_AUTHENTICATED:
4666 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4667 					  "authenticated: '%*pE' %pM\n",
4668 					  priv->essid_len, priv->essid,
4669 					  priv->bssid);
4670 				priv->status |= STATUS_AUTH;
4671 				break;
4672 
4673 			case CMAS_INIT:
4674 				if (priv->status & STATUS_AUTH) {
4675 					IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4676 						  IPW_DL_ASSOC,
4677 						  "authentication failed (0x%04X): %s\n",
4678 						  le16_to_cpu(auth->status),
4679 						  ipw_get_status_code(le16_to_cpu
4680 								      (auth->
4681 								       status)));
4682 				}
4683 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4684 					  IPW_DL_ASSOC,
4685 					  "deauthenticated: '%*pE' %pM\n",
4686 					  priv->essid_len, priv->essid,
4687 					  priv->bssid);
4688 
4689 				priv->status &= ~(STATUS_ASSOCIATING |
4690 						  STATUS_AUTH |
4691 						  STATUS_ASSOCIATED);
4692 
4693 				schedule_work(&priv->link_down);
4694 				break;
4695 
4696 			case CMAS_TX_AUTH_SEQ_1:
4697 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4698 					  IPW_DL_ASSOC, "AUTH_SEQ_1\n");
4699 				break;
4700 			case CMAS_RX_AUTH_SEQ_2:
4701 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4702 					  IPW_DL_ASSOC, "AUTH_SEQ_2\n");
4703 				break;
4704 			case CMAS_AUTH_SEQ_1_PASS:
4705 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4706 					  IPW_DL_ASSOC, "AUTH_SEQ_1_PASS\n");
4707 				break;
4708 			case CMAS_AUTH_SEQ_1_FAIL:
4709 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4710 					  IPW_DL_ASSOC, "AUTH_SEQ_1_FAIL\n");
4711 				break;
4712 			case CMAS_TX_AUTH_SEQ_3:
4713 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4714 					  IPW_DL_ASSOC, "AUTH_SEQ_3\n");
4715 				break;
4716 			case CMAS_RX_AUTH_SEQ_4:
4717 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4718 					  IPW_DL_ASSOC, "RX_AUTH_SEQ_4\n");
4719 				break;
4720 			case CMAS_AUTH_SEQ_2_PASS:
4721 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4722 					  IPW_DL_ASSOC, "AUTH_SEQ_2_PASS\n");
4723 				break;
4724 			case CMAS_AUTH_SEQ_2_FAIL:
4725 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4726 					  IPW_DL_ASSOC, "AUT_SEQ_2_FAIL\n");
4727 				break;
4728 			case CMAS_TX_ASSOC:
4729 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4730 					  IPW_DL_ASSOC, "TX_ASSOC\n");
4731 				break;
4732 			case CMAS_RX_ASSOC_RESP:
4733 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4734 					  IPW_DL_ASSOC, "RX_ASSOC_RESP\n");
4735 
4736 				break;
4737 			case CMAS_ASSOCIATED:
4738 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4739 					  IPW_DL_ASSOC, "ASSOCIATED\n");
4740 				break;
4741 			default:
4742 				IPW_DEBUG_NOTIF("auth: failure - %d\n",
4743 						auth->state);
4744 				break;
4745 			}
4746 			break;
4747 		}
4748 
4749 	case HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT:{
4750 			struct notif_channel_result *x =
4751 			    &notif->u.channel_result;
4752 
4753 			if (size == sizeof(*x)) {
4754 				IPW_DEBUG_SCAN("Scan result for channel %d\n",
4755 					       x->channel_num);
4756 			} else {
4757 				IPW_DEBUG_SCAN("Scan result of wrong size %d "
4758 					       "(should be %zd)\n",
4759 					       size, sizeof(*x));
4760 			}
4761 			break;
4762 		}
4763 
4764 	case HOST_NOTIFICATION_STATUS_SCAN_COMPLETED:{
4765 			struct notif_scan_complete *x = &notif->u.scan_complete;
4766 			if (size == sizeof(*x)) {
4767 				IPW_DEBUG_SCAN
4768 				    ("Scan completed: type %d, %d channels, "
4769 				     "%d status\n", x->scan_type,
4770 				     x->num_channels, x->status);
4771 			} else {
4772 				IPW_ERROR("Scan completed of wrong size %d "
4773 					  "(should be %zd)\n",
4774 					  size, sizeof(*x));
4775 			}
4776 
4777 			priv->status &=
4778 			    ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
4779 
4780 			wake_up_interruptible(&priv->wait_state);
4781 			cancel_delayed_work(&priv->scan_check);
4782 
4783 			if (priv->status & STATUS_EXIT_PENDING)
4784 				break;
4785 
4786 			priv->ieee->scans++;
4787 
4788 #ifdef CONFIG_IPW2200_MONITOR
4789 			if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
4790 				priv->status |= STATUS_SCAN_FORCED;
4791 				schedule_delayed_work(&priv->request_scan, 0);
4792 				break;
4793 			}
4794 			priv->status &= ~STATUS_SCAN_FORCED;
4795 #endif				/* CONFIG_IPW2200_MONITOR */
4796 
4797 			/* Do queued direct scans first */
4798 			if (priv->status & STATUS_DIRECT_SCAN_PENDING)
4799 				schedule_delayed_work(&priv->request_direct_scan, 0);
4800 
4801 			if (!(priv->status & (STATUS_ASSOCIATED |
4802 					      STATUS_ASSOCIATING |
4803 					      STATUS_ROAMING |
4804 					      STATUS_DISASSOCIATING)))
4805 				schedule_work(&priv->associate);
4806 			else if (priv->status & STATUS_ROAMING) {
4807 				if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4808 					/* If a scan completed and we are in roam mode, then
4809 					 * the scan that completed was the one requested as a
4810 					 * result of entering roam... so, schedule the
4811 					 * roam work */
4812 					schedule_work(&priv->roam);
4813 				else
4814 					/* Don't schedule if we aborted the scan */
4815 					priv->status &= ~STATUS_ROAMING;
4816 			} else if (priv->status & STATUS_SCAN_PENDING)
4817 				schedule_delayed_work(&priv->request_scan, 0);
4818 			else if (priv->config & CFG_BACKGROUND_SCAN
4819 				 && priv->status & STATUS_ASSOCIATED)
4820 				schedule_delayed_work(&priv->request_scan,
4821 						      round_jiffies_relative(HZ));
4822 
4823 			/* Send an empty event to user space.
4824 			 * We don't send the received data on the event because
4825 			 * it would require us to do complex transcoding, and
4826 			 * we want to minimise the work done in the irq handler
4827 			 * Use a request to extract the data.
4828 			 * Also, we generate this even for any scan, regardless
4829 			 * on how the scan was initiated. User space can just
4830 			 * sync on periodic scan to get fresh data...
4831 			 * Jean II */
4832 			if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4833 				handle_scan_event(priv);
4834 			break;
4835 		}
4836 
4837 	case HOST_NOTIFICATION_STATUS_FRAG_LENGTH:{
4838 			struct notif_frag_length *x = &notif->u.frag_len;
4839 
4840 			if (size == sizeof(*x))
4841 				IPW_ERROR("Frag length: %d\n",
4842 					  le16_to_cpu(x->frag_length));
4843 			else
4844 				IPW_ERROR("Frag length of wrong size %d "
4845 					  "(should be %zd)\n",
4846 					  size, sizeof(*x));
4847 			break;
4848 		}
4849 
4850 	case HOST_NOTIFICATION_STATUS_LINK_DETERIORATION:{
4851 			struct notif_link_deterioration *x =
4852 			    &notif->u.link_deterioration;
4853 
4854 			if (size == sizeof(*x)) {
4855 				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4856 					"link deterioration: type %d, cnt %d\n",
4857 					x->silence_notification_type,
4858 					x->silence_count);
4859 				memcpy(&priv->last_link_deterioration, x,
4860 				       sizeof(*x));
4861 			} else {
4862 				IPW_ERROR("Link Deterioration of wrong size %d "
4863 					  "(should be %zd)\n",
4864 					  size, sizeof(*x));
4865 			}
4866 			break;
4867 		}
4868 
4869 	case HOST_NOTIFICATION_DINO_CONFIG_RESPONSE:{
4870 			IPW_ERROR("Dino config\n");
4871 			if (priv->hcmd
4872 			    && priv->hcmd->cmd != HOST_CMD_DINO_CONFIG)
4873 				IPW_ERROR("Unexpected DINO_CONFIG_RESPONSE\n");
4874 
4875 			break;
4876 		}
4877 
4878 	case HOST_NOTIFICATION_STATUS_BEACON_STATE:{
4879 			struct notif_beacon_state *x = &notif->u.beacon_state;
4880 			if (size != sizeof(*x)) {
4881 				IPW_ERROR
4882 				    ("Beacon state of wrong size %d (should "
4883 				     "be %zd)\n", size, sizeof(*x));
4884 				break;
4885 			}
4886 
4887 			if (le32_to_cpu(x->state) ==
4888 			    HOST_NOTIFICATION_STATUS_BEACON_MISSING)
4889 				ipw_handle_missed_beacon(priv,
4890 							 le32_to_cpu(x->
4891 								     number));
4892 
4893 			break;
4894 		}
4895 
4896 	case HOST_NOTIFICATION_STATUS_TGI_TX_KEY:{
4897 			struct notif_tgi_tx_key *x = &notif->u.tgi_tx_key;
4898 			if (size == sizeof(*x)) {
4899 				IPW_ERROR("TGi Tx Key: state 0x%02x sec type "
4900 					  "0x%02x station %d\n",
4901 					  x->key_state, x->security_type,
4902 					  x->station_index);
4903 				break;
4904 			}
4905 
4906 			IPW_ERROR
4907 			    ("TGi Tx Key of wrong size %d (should be %zd)\n",
4908 			     size, sizeof(*x));
4909 			break;
4910 		}
4911 
4912 	case HOST_NOTIFICATION_CALIB_KEEP_RESULTS:{
4913 			struct notif_calibration *x = &notif->u.calibration;
4914 
4915 			if (size == sizeof(*x)) {
4916 				memcpy(&priv->calib, x, sizeof(*x));
4917 				IPW_DEBUG_INFO("TODO: Calibration\n");
4918 				break;
4919 			}
4920 
4921 			IPW_ERROR
4922 			    ("Calibration of wrong size %d (should be %zd)\n",
4923 			     size, sizeof(*x));
4924 			break;
4925 		}
4926 
4927 	case HOST_NOTIFICATION_NOISE_STATS:{
4928 			if (size == sizeof(u32)) {
4929 				priv->exp_avg_noise =
4930 				    exponential_average(priv->exp_avg_noise,
4931 				    (u8) (le32_to_cpu(notif->u.noise.value) & 0xff),
4932 				    DEPTH_NOISE);
4933 				break;
4934 			}
4935 
4936 			IPW_ERROR
4937 			    ("Noise stat is wrong size %d (should be %zd)\n",
4938 			     size, sizeof(u32));
4939 			break;
4940 		}
4941 
4942 	default:
4943 		IPW_DEBUG_NOTIF("Unknown notification: "
4944 				"subtype=%d,flags=0x%2x,size=%d\n",
4945 				notif->subtype, notif->flags, size);
4946 	}
4947 }
4948 
4949 /**
4950  * Destroys all DMA structures and initialise them again
4951  *
4952  * @param priv
4953  * @return error code
4954  */
4955 static int ipw_queue_reset(struct ipw_priv *priv)
4956 {
4957 	int rc = 0;
4958 	/** @todo customize queue sizes */
4959 	int nTx = 64, nTxCmd = 8;
4960 	ipw_tx_queue_free(priv);
4961 	/* Tx CMD queue */
4962 	rc = ipw_queue_tx_init(priv, &priv->txq_cmd, nTxCmd,
4963 			       IPW_TX_CMD_QUEUE_READ_INDEX,
4964 			       IPW_TX_CMD_QUEUE_WRITE_INDEX,
4965 			       IPW_TX_CMD_QUEUE_BD_BASE,
4966 			       IPW_TX_CMD_QUEUE_BD_SIZE);
4967 	if (rc) {
4968 		IPW_ERROR("Tx Cmd queue init failed\n");
4969 		goto error;
4970 	}
4971 	/* Tx queue(s) */
4972 	rc = ipw_queue_tx_init(priv, &priv->txq[0], nTx,
4973 			       IPW_TX_QUEUE_0_READ_INDEX,
4974 			       IPW_TX_QUEUE_0_WRITE_INDEX,
4975 			       IPW_TX_QUEUE_0_BD_BASE, IPW_TX_QUEUE_0_BD_SIZE);
4976 	if (rc) {
4977 		IPW_ERROR("Tx 0 queue init failed\n");
4978 		goto error;
4979 	}
4980 	rc = ipw_queue_tx_init(priv, &priv->txq[1], nTx,
4981 			       IPW_TX_QUEUE_1_READ_INDEX,
4982 			       IPW_TX_QUEUE_1_WRITE_INDEX,
4983 			       IPW_TX_QUEUE_1_BD_BASE, IPW_TX_QUEUE_1_BD_SIZE);
4984 	if (rc) {
4985 		IPW_ERROR("Tx 1 queue init failed\n");
4986 		goto error;
4987 	}
4988 	rc = ipw_queue_tx_init(priv, &priv->txq[2], nTx,
4989 			       IPW_TX_QUEUE_2_READ_INDEX,
4990 			       IPW_TX_QUEUE_2_WRITE_INDEX,
4991 			       IPW_TX_QUEUE_2_BD_BASE, IPW_TX_QUEUE_2_BD_SIZE);
4992 	if (rc) {
4993 		IPW_ERROR("Tx 2 queue init failed\n");
4994 		goto error;
4995 	}
4996 	rc = ipw_queue_tx_init(priv, &priv->txq[3], nTx,
4997 			       IPW_TX_QUEUE_3_READ_INDEX,
4998 			       IPW_TX_QUEUE_3_WRITE_INDEX,
4999 			       IPW_TX_QUEUE_3_BD_BASE, IPW_TX_QUEUE_3_BD_SIZE);
5000 	if (rc) {
5001 		IPW_ERROR("Tx 3 queue init failed\n");
5002 		goto error;
5003 	}
5004 	/* statistics */
5005 	priv->rx_bufs_min = 0;
5006 	priv->rx_pend_max = 0;
5007 	return rc;
5008 
5009       error:
5010 	ipw_tx_queue_free(priv);
5011 	return rc;
5012 }
5013 
5014 /**
5015  * Reclaim Tx queue entries no more used by NIC.
5016  *
5017  * When FW advances 'R' index, all entries between old and
5018  * new 'R' index need to be reclaimed. As result, some free space
5019  * forms. If there is enough free space (> low mark), wake Tx queue.
5020  *
5021  * @note Need to protect against garbage in 'R' index
5022  * @param priv
5023  * @param txq
5024  * @param qindex
5025  * @return Number of used entries remains in the queue
5026  */
5027 static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
5028 				struct clx2_tx_queue *txq, int qindex)
5029 {
5030 	u32 hw_tail;
5031 	int used;
5032 	struct clx2_queue *q = &txq->q;
5033 
5034 	hw_tail = ipw_read32(priv, q->reg_r);
5035 	if (hw_tail >= q->n_bd) {
5036 		IPW_ERROR
5037 		    ("Read index for DMA queue (%d) is out of range [0-%d)\n",
5038 		     hw_tail, q->n_bd);
5039 		goto done;
5040 	}
5041 	for (; q->last_used != hw_tail;
5042 	     q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
5043 		ipw_queue_tx_free_tfd(priv, txq);
5044 		priv->tx_packets++;
5045 	}
5046       done:
5047 	if ((ipw_tx_queue_space(q) > q->low_mark) &&
5048 	    (qindex >= 0))
5049 		netif_wake_queue(priv->net_dev);
5050 	used = q->first_empty - q->last_used;
5051 	if (used < 0)
5052 		used += q->n_bd;
5053 
5054 	return used;
5055 }
5056 
5057 static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
5058 			     int len, int sync)
5059 {
5060 	struct clx2_tx_queue *txq = &priv->txq_cmd;
5061 	struct clx2_queue *q = &txq->q;
5062 	struct tfd_frame *tfd;
5063 
5064 	if (ipw_tx_queue_space(q) < (sync ? 1 : 2)) {
5065 		IPW_ERROR("No space for Tx\n");
5066 		return -EBUSY;
5067 	}
5068 
5069 	tfd = &txq->bd[q->first_empty];
5070 	txq->txb[q->first_empty] = NULL;
5071 
5072 	memset(tfd, 0, sizeof(*tfd));
5073 	tfd->control_flags.message_type = TX_HOST_COMMAND_TYPE;
5074 	tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
5075 	priv->hcmd_seq++;
5076 	tfd->u.cmd.index = hcmd;
5077 	tfd->u.cmd.length = len;
5078 	memcpy(tfd->u.cmd.payload, buf, len);
5079 	q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
5080 	ipw_write32(priv, q->reg_w, q->first_empty);
5081 	_ipw_read32(priv, 0x90);
5082 
5083 	return 0;
5084 }
5085 
5086 /*
5087  * Rx theory of operation
5088  *
5089  * The host allocates 32 DMA target addresses and passes the host address
5090  * to the firmware at register IPW_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
5091  * 0 to 31
5092  *
5093  * Rx Queue Indexes
5094  * The host/firmware share two index registers for managing the Rx buffers.
5095  *
5096  * The READ index maps to the first position that the firmware may be writing
5097  * to -- the driver can read up to (but not including) this position and get
5098  * good data.
5099  * The READ index is managed by the firmware once the card is enabled.
5100  *
5101  * The WRITE index maps to the last position the driver has read from -- the
5102  * position preceding WRITE is the last slot the firmware can place a packet.
5103  *
5104  * The queue is empty (no good data) if WRITE = READ - 1, and is full if
5105  * WRITE = READ.
5106  *
5107  * During initialization the host sets up the READ queue position to the first
5108  * INDEX position, and WRITE to the last (READ - 1 wrapped)
5109  *
5110  * When the firmware places a packet in a buffer it will advance the READ index
5111  * and fire the RX interrupt.  The driver can then query the READ index and
5112  * process as many packets as possible, moving the WRITE index forward as it
5113  * resets the Rx queue buffers with new memory.
5114  *
5115  * The management in the driver is as follows:
5116  * + A list of pre-allocated SKBs is stored in ipw->rxq->rx_free.  When
5117  *   ipw->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
5118  *   to replensish the ipw->rxq->rx_free.
5119  * + In ipw_rx_queue_replenish (scheduled) if 'processed' != 'read' then the
5120  *   ipw->rxq is replenished and the READ INDEX is updated (updating the
5121  *   'processed' and 'read' driver indexes as well)
5122  * + A received packet is processed and handed to the kernel network stack,
5123  *   detached from the ipw->rxq.  The driver 'processed' index is updated.
5124  * + The Host/Firmware ipw->rxq is replenished at tasklet time from the rx_free
5125  *   list. If there are no allocated buffers in ipw->rxq->rx_free, the READ
5126  *   INDEX is not incremented and ipw->status(RX_STALLED) is set.  If there
5127  *   were enough free buffers and RX_STALLED is set it is cleared.
5128  *
5129  *
5130  * Driver sequence:
5131  *
5132  * ipw_rx_queue_alloc()       Allocates rx_free
5133  * ipw_rx_queue_replenish()   Replenishes rx_free list from rx_used, and calls
5134  *                            ipw_rx_queue_restock
5135  * ipw_rx_queue_restock()     Moves available buffers from rx_free into Rx
5136  *                            queue, updates firmware pointers, and updates
5137  *                            the WRITE index.  If insufficient rx_free buffers
5138  *                            are available, schedules ipw_rx_queue_replenish
5139  *
5140  * -- enable interrupts --
5141  * ISR - ipw_rx()             Detach ipw_rx_mem_buffers from pool up to the
5142  *                            READ INDEX, detaching the SKB from the pool.
5143  *                            Moves the packet buffer from queue to rx_used.
5144  *                            Calls ipw_rx_queue_restock to refill any empty
5145  *                            slots.
5146  * ...
5147  *
5148  */
5149 
5150 /*
5151  * If there are slots in the RX queue that  need to be restocked,
5152  * and we have free pre-allocated buffers, fill the ranks as much
5153  * as we can pulling from rx_free.
5154  *
5155  * This moves the 'write' index forward to catch up with 'processed', and
5156  * also updates the memory address in the firmware to reference the new
5157  * target buffer.
5158  */
5159 static void ipw_rx_queue_restock(struct ipw_priv *priv)
5160 {
5161 	struct ipw_rx_queue *rxq = priv->rxq;
5162 	struct list_head *element;
5163 	struct ipw_rx_mem_buffer *rxb;
5164 	unsigned long flags;
5165 	int write;
5166 
5167 	spin_lock_irqsave(&rxq->lock, flags);
5168 	write = rxq->write;
5169 	while ((ipw_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
5170 		element = rxq->rx_free.next;
5171 		rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5172 		list_del(element);
5173 
5174 		ipw_write32(priv, IPW_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE,
5175 			    rxb->dma_addr);
5176 		rxq->queue[rxq->write] = rxb;
5177 		rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE;
5178 		rxq->free_count--;
5179 	}
5180 	spin_unlock_irqrestore(&rxq->lock, flags);
5181 
5182 	/* If the pre-allocated buffer pool is dropping low, schedule to
5183 	 * refill it */
5184 	if (rxq->free_count <= RX_LOW_WATERMARK)
5185 		schedule_work(&priv->rx_replenish);
5186 
5187 	/* If we've added more space for the firmware to place data, tell it */
5188 	if (write != rxq->write)
5189 		ipw_write32(priv, IPW_RX_WRITE_INDEX, rxq->write);
5190 }
5191 
5192 /*
5193  * Move all used packet from rx_used to rx_free, allocating a new SKB for each.
5194  * Also restock the Rx queue via ipw_rx_queue_restock.
5195  *
5196  * This is called as a scheduled work item (except for during initialization)
5197  */
5198 static void ipw_rx_queue_replenish(void *data)
5199 {
5200 	struct ipw_priv *priv = data;
5201 	struct ipw_rx_queue *rxq = priv->rxq;
5202 	struct list_head *element;
5203 	struct ipw_rx_mem_buffer *rxb;
5204 	unsigned long flags;
5205 
5206 	spin_lock_irqsave(&rxq->lock, flags);
5207 	while (!list_empty(&rxq->rx_used)) {
5208 		element = rxq->rx_used.next;
5209 		rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5210 		rxb->skb = alloc_skb(IPW_RX_BUF_SIZE, GFP_ATOMIC);
5211 		if (!rxb->skb) {
5212 			printk(KERN_CRIT "%s: Can not allocate SKB buffers.\n",
5213 			       priv->net_dev->name);
5214 			/* We don't reschedule replenish work here -- we will
5215 			 * call the restock method and if it still needs
5216 			 * more buffers it will schedule replenish */
5217 			break;
5218 		}
5219 		list_del(element);
5220 
5221 		rxb->dma_addr =
5222 		    pci_map_single(priv->pci_dev, rxb->skb->data,
5223 				   IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5224 
5225 		list_add_tail(&rxb->list, &rxq->rx_free);
5226 		rxq->free_count++;
5227 	}
5228 	spin_unlock_irqrestore(&rxq->lock, flags);
5229 
5230 	ipw_rx_queue_restock(priv);
5231 }
5232 
5233 static void ipw_bg_rx_queue_replenish(struct work_struct *work)
5234 {
5235 	struct ipw_priv *priv =
5236 		container_of(work, struct ipw_priv, rx_replenish);
5237 	mutex_lock(&priv->mutex);
5238 	ipw_rx_queue_replenish(priv);
5239 	mutex_unlock(&priv->mutex);
5240 }
5241 
5242 /* Assumes that the skb field of the buffers in 'pool' is kept accurate.
5243  * If an SKB has been detached, the POOL needs to have its SKB set to NULL
5244  * This free routine walks the list of POOL entries and if SKB is set to
5245  * non NULL it is unmapped and freed
5246  */
5247 static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq)
5248 {
5249 	int i;
5250 
5251 	if (!rxq)
5252 		return;
5253 
5254 	for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
5255 		if (rxq->pool[i].skb != NULL) {
5256 			pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
5257 					 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5258 			dev_kfree_skb(rxq->pool[i].skb);
5259 		}
5260 	}
5261 
5262 	kfree(rxq);
5263 }
5264 
5265 static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv)
5266 {
5267 	struct ipw_rx_queue *rxq;
5268 	int i;
5269 
5270 	rxq = kzalloc(sizeof(*rxq), GFP_KERNEL);
5271 	if (unlikely(!rxq)) {
5272 		IPW_ERROR("memory allocation failed\n");
5273 		return NULL;
5274 	}
5275 	spin_lock_init(&rxq->lock);
5276 	INIT_LIST_HEAD(&rxq->rx_free);
5277 	INIT_LIST_HEAD(&rxq->rx_used);
5278 
5279 	/* Fill the rx_used queue with _all_ of the Rx buffers */
5280 	for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
5281 		list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
5282 
5283 	/* Set us so that we have processed and used all buffers, but have
5284 	 * not restocked the Rx queue with fresh buffers */
5285 	rxq->read = rxq->write = 0;
5286 	rxq->free_count = 0;
5287 
5288 	return rxq;
5289 }
5290 
5291 static int ipw_is_rate_in_mask(struct ipw_priv *priv, int ieee_mode, u8 rate)
5292 {
5293 	rate &= ~LIBIPW_BASIC_RATE_MASK;
5294 	if (ieee_mode == IEEE_A) {
5295 		switch (rate) {
5296 		case LIBIPW_OFDM_RATE_6MB:
5297 			return priv->rates_mask & LIBIPW_OFDM_RATE_6MB_MASK ?
5298 			    1 : 0;
5299 		case LIBIPW_OFDM_RATE_9MB:
5300 			return priv->rates_mask & LIBIPW_OFDM_RATE_9MB_MASK ?
5301 			    1 : 0;
5302 		case LIBIPW_OFDM_RATE_12MB:
5303 			return priv->
5304 			    rates_mask & LIBIPW_OFDM_RATE_12MB_MASK ? 1 : 0;
5305 		case LIBIPW_OFDM_RATE_18MB:
5306 			return priv->
5307 			    rates_mask & LIBIPW_OFDM_RATE_18MB_MASK ? 1 : 0;
5308 		case LIBIPW_OFDM_RATE_24MB:
5309 			return priv->
5310 			    rates_mask & LIBIPW_OFDM_RATE_24MB_MASK ? 1 : 0;
5311 		case LIBIPW_OFDM_RATE_36MB:
5312 			return priv->
5313 			    rates_mask & LIBIPW_OFDM_RATE_36MB_MASK ? 1 : 0;
5314 		case LIBIPW_OFDM_RATE_48MB:
5315 			return priv->
5316 			    rates_mask & LIBIPW_OFDM_RATE_48MB_MASK ? 1 : 0;
5317 		case LIBIPW_OFDM_RATE_54MB:
5318 			return priv->
5319 			    rates_mask & LIBIPW_OFDM_RATE_54MB_MASK ? 1 : 0;
5320 		default:
5321 			return 0;
5322 		}
5323 	}
5324 
5325 	/* B and G mixed */
5326 	switch (rate) {
5327 	case LIBIPW_CCK_RATE_1MB:
5328 		return priv->rates_mask & LIBIPW_CCK_RATE_1MB_MASK ? 1 : 0;
5329 	case LIBIPW_CCK_RATE_2MB:
5330 		return priv->rates_mask & LIBIPW_CCK_RATE_2MB_MASK ? 1 : 0;
5331 	case LIBIPW_CCK_RATE_5MB:
5332 		return priv->rates_mask & LIBIPW_CCK_RATE_5MB_MASK ? 1 : 0;
5333 	case LIBIPW_CCK_RATE_11MB:
5334 		return priv->rates_mask & LIBIPW_CCK_RATE_11MB_MASK ? 1 : 0;
5335 	}
5336 
5337 	/* If we are limited to B modulations, bail at this point */
5338 	if (ieee_mode == IEEE_B)
5339 		return 0;
5340 
5341 	/* G */
5342 	switch (rate) {
5343 	case LIBIPW_OFDM_RATE_6MB:
5344 		return priv->rates_mask & LIBIPW_OFDM_RATE_6MB_MASK ? 1 : 0;
5345 	case LIBIPW_OFDM_RATE_9MB:
5346 		return priv->rates_mask & LIBIPW_OFDM_RATE_9MB_MASK ? 1 : 0;
5347 	case LIBIPW_OFDM_RATE_12MB:
5348 		return priv->rates_mask & LIBIPW_OFDM_RATE_12MB_MASK ? 1 : 0;
5349 	case LIBIPW_OFDM_RATE_18MB:
5350 		return priv->rates_mask & LIBIPW_OFDM_RATE_18MB_MASK ? 1 : 0;
5351 	case LIBIPW_OFDM_RATE_24MB:
5352 		return priv->rates_mask & LIBIPW_OFDM_RATE_24MB_MASK ? 1 : 0;
5353 	case LIBIPW_OFDM_RATE_36MB:
5354 		return priv->rates_mask & LIBIPW_OFDM_RATE_36MB_MASK ? 1 : 0;
5355 	case LIBIPW_OFDM_RATE_48MB:
5356 		return priv->rates_mask & LIBIPW_OFDM_RATE_48MB_MASK ? 1 : 0;
5357 	case LIBIPW_OFDM_RATE_54MB:
5358 		return priv->rates_mask & LIBIPW_OFDM_RATE_54MB_MASK ? 1 : 0;
5359 	}
5360 
5361 	return 0;
5362 }
5363 
5364 static int ipw_compatible_rates(struct ipw_priv *priv,
5365 				const struct libipw_network *network,
5366 				struct ipw_supported_rates *rates)
5367 {
5368 	int num_rates, i;
5369 
5370 	memset(rates, 0, sizeof(*rates));
5371 	num_rates = min(network->rates_len, (u8) IPW_MAX_RATES);
5372 	rates->num_rates = 0;
5373 	for (i = 0; i < num_rates; i++) {
5374 		if (!ipw_is_rate_in_mask(priv, network->mode,
5375 					 network->rates[i])) {
5376 
5377 			if (network->rates[i] & LIBIPW_BASIC_RATE_MASK) {
5378 				IPW_DEBUG_SCAN("Adding masked mandatory "
5379 					       "rate %02X\n",
5380 					       network->rates[i]);
5381 				rates->supported_rates[rates->num_rates++] =
5382 				    network->rates[i];
5383 				continue;
5384 			}
5385 
5386 			IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5387 				       network->rates[i], priv->rates_mask);
5388 			continue;
5389 		}
5390 
5391 		rates->supported_rates[rates->num_rates++] = network->rates[i];
5392 	}
5393 
5394 	num_rates = min(network->rates_ex_len,
5395 			(u8) (IPW_MAX_RATES - num_rates));
5396 	for (i = 0; i < num_rates; i++) {
5397 		if (!ipw_is_rate_in_mask(priv, network->mode,
5398 					 network->rates_ex[i])) {
5399 			if (network->rates_ex[i] & LIBIPW_BASIC_RATE_MASK) {
5400 				IPW_DEBUG_SCAN("Adding masked mandatory "
5401 					       "rate %02X\n",
5402 					       network->rates_ex[i]);
5403 				rates->supported_rates[rates->num_rates++] =
5404 				    network->rates[i];
5405 				continue;
5406 			}
5407 
5408 			IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5409 				       network->rates_ex[i], priv->rates_mask);
5410 			continue;
5411 		}
5412 
5413 		rates->supported_rates[rates->num_rates++] =
5414 		    network->rates_ex[i];
5415 	}
5416 
5417 	return 1;
5418 }
5419 
5420 static void ipw_copy_rates(struct ipw_supported_rates *dest,
5421 				  const struct ipw_supported_rates *src)
5422 {
5423 	u8 i;
5424 	for (i = 0; i < src->num_rates; i++)
5425 		dest->supported_rates[i] = src->supported_rates[i];
5426 	dest->num_rates = src->num_rates;
5427 }
5428 
5429 /* TODO: Look at sniffed packets in the air to determine if the basic rate
5430  * mask should ever be used -- right now all callers to add the scan rates are
5431  * set with the modulation = CCK, so BASIC_RATE_MASK is never set... */
5432 static void ipw_add_cck_scan_rates(struct ipw_supported_rates *rates,
5433 				   u8 modulation, u32 rate_mask)
5434 {
5435 	u8 basic_mask = (LIBIPW_OFDM_MODULATION == modulation) ?
5436 	    LIBIPW_BASIC_RATE_MASK : 0;
5437 
5438 	if (rate_mask & LIBIPW_CCK_RATE_1MB_MASK)
5439 		rates->supported_rates[rates->num_rates++] =
5440 		    LIBIPW_BASIC_RATE_MASK | LIBIPW_CCK_RATE_1MB;
5441 
5442 	if (rate_mask & LIBIPW_CCK_RATE_2MB_MASK)
5443 		rates->supported_rates[rates->num_rates++] =
5444 		    LIBIPW_BASIC_RATE_MASK | LIBIPW_CCK_RATE_2MB;
5445 
5446 	if (rate_mask & LIBIPW_CCK_RATE_5MB_MASK)
5447 		rates->supported_rates[rates->num_rates++] = basic_mask |
5448 		    LIBIPW_CCK_RATE_5MB;
5449 
5450 	if (rate_mask & LIBIPW_CCK_RATE_11MB_MASK)
5451 		rates->supported_rates[rates->num_rates++] = basic_mask |
5452 		    LIBIPW_CCK_RATE_11MB;
5453 }
5454 
5455 static void ipw_add_ofdm_scan_rates(struct ipw_supported_rates *rates,
5456 				    u8 modulation, u32 rate_mask)
5457 {
5458 	u8 basic_mask = (LIBIPW_OFDM_MODULATION == modulation) ?
5459 	    LIBIPW_BASIC_RATE_MASK : 0;
5460 
5461 	if (rate_mask & LIBIPW_OFDM_RATE_6MB_MASK)
5462 		rates->supported_rates[rates->num_rates++] = basic_mask |
5463 		    LIBIPW_OFDM_RATE_6MB;
5464 
5465 	if (rate_mask & LIBIPW_OFDM_RATE_9MB_MASK)
5466 		rates->supported_rates[rates->num_rates++] =
5467 		    LIBIPW_OFDM_RATE_9MB;
5468 
5469 	if (rate_mask & LIBIPW_OFDM_RATE_12MB_MASK)
5470 		rates->supported_rates[rates->num_rates++] = basic_mask |
5471 		    LIBIPW_OFDM_RATE_12MB;
5472 
5473 	if (rate_mask & LIBIPW_OFDM_RATE_18MB_MASK)
5474 		rates->supported_rates[rates->num_rates++] =
5475 		    LIBIPW_OFDM_RATE_18MB;
5476 
5477 	if (rate_mask & LIBIPW_OFDM_RATE_24MB_MASK)
5478 		rates->supported_rates[rates->num_rates++] = basic_mask |
5479 		    LIBIPW_OFDM_RATE_24MB;
5480 
5481 	if (rate_mask & LIBIPW_OFDM_RATE_36MB_MASK)
5482 		rates->supported_rates[rates->num_rates++] =
5483 		    LIBIPW_OFDM_RATE_36MB;
5484 
5485 	if (rate_mask & LIBIPW_OFDM_RATE_48MB_MASK)
5486 		rates->supported_rates[rates->num_rates++] =
5487 		    LIBIPW_OFDM_RATE_48MB;
5488 
5489 	if (rate_mask & LIBIPW_OFDM_RATE_54MB_MASK)
5490 		rates->supported_rates[rates->num_rates++] =
5491 		    LIBIPW_OFDM_RATE_54MB;
5492 }
5493 
5494 struct ipw_network_match {
5495 	struct libipw_network *network;
5496 	struct ipw_supported_rates rates;
5497 };
5498 
5499 static int ipw_find_adhoc_network(struct ipw_priv *priv,
5500 				  struct ipw_network_match *match,
5501 				  struct libipw_network *network,
5502 				  int roaming)
5503 {
5504 	struct ipw_supported_rates rates;
5505 
5506 	/* Verify that this network's capability is compatible with the
5507 	 * current mode (AdHoc or Infrastructure) */
5508 	if ((priv->ieee->iw_mode == IW_MODE_ADHOC &&
5509 	     !(network->capability & WLAN_CAPABILITY_IBSS))) {
5510 		IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded due to capability mismatch.\n",
5511 				network->ssid_len, network->ssid,
5512 				network->bssid);
5513 		return 0;
5514 	}
5515 
5516 	if (unlikely(roaming)) {
5517 		/* If we are roaming, then ensure check if this is a valid
5518 		 * network to try and roam to */
5519 		if ((network->ssid_len != match->network->ssid_len) ||
5520 		    memcmp(network->ssid, match->network->ssid,
5521 			   network->ssid_len)) {
5522 			IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of non-network ESSID.\n",
5523 					network->ssid_len, network->ssid,
5524 					network->bssid);
5525 			return 0;
5526 		}
5527 	} else {
5528 		/* If an ESSID has been configured then compare the broadcast
5529 		 * ESSID to ours */
5530 		if ((priv->config & CFG_STATIC_ESSID) &&
5531 		    ((network->ssid_len != priv->essid_len) ||
5532 		     memcmp(network->ssid, priv->essid,
5533 			    min(network->ssid_len, priv->essid_len)))) {
5534 			IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of ESSID mismatch: '%*pE'.\n",
5535 					network->ssid_len, network->ssid,
5536 					network->bssid, priv->essid_len,
5537 					priv->essid);
5538 			return 0;
5539 		}
5540 	}
5541 
5542 	/* If the old network rate is better than this one, don't bother
5543 	 * testing everything else. */
5544 
5545 	if (network->time_stamp[0] < match->network->time_stamp[0]) {
5546 		IPW_DEBUG_MERGE("Network '%*pE excluded because newer than current network.\n",
5547 				match->network->ssid_len, match->network->ssid);
5548 		return 0;
5549 	} else if (network->time_stamp[1] < match->network->time_stamp[1]) {
5550 		IPW_DEBUG_MERGE("Network '%*pE excluded because newer than current network.\n",
5551 				match->network->ssid_len, match->network->ssid);
5552 		return 0;
5553 	}
5554 
5555 	/* Now go through and see if the requested network is valid... */
5556 	if (priv->ieee->scan_age != 0 &&
5557 	    time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5558 		IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of age: %ums.\n",
5559 				network->ssid_len, network->ssid,
5560 				network->bssid,
5561 				jiffies_to_msecs(jiffies -
5562 						 network->last_scanned));
5563 		return 0;
5564 	}
5565 
5566 	if ((priv->config & CFG_STATIC_CHANNEL) &&
5567 	    (network->channel != priv->channel)) {
5568 		IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of channel mismatch: %d != %d.\n",
5569 				network->ssid_len, network->ssid,
5570 				network->bssid,
5571 				network->channel, priv->channel);
5572 		return 0;
5573 	}
5574 
5575 	/* Verify privacy compatibility */
5576 	if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5577 	    ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5578 		IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of privacy mismatch: %s != %s.\n",
5579 				network->ssid_len, network->ssid,
5580 				network->bssid,
5581 				priv->
5582 				capability & CAP_PRIVACY_ON ? "on" : "off",
5583 				network->
5584 				capability & WLAN_CAPABILITY_PRIVACY ? "on" :
5585 				"off");
5586 		return 0;
5587 	}
5588 
5589 	if (ether_addr_equal(network->bssid, priv->bssid)) {
5590 		IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of the same BSSID match: %pM.\n",
5591 				network->ssid_len, network->ssid,
5592 				network->bssid, priv->bssid);
5593 		return 0;
5594 	}
5595 
5596 	/* Filter out any incompatible freq / mode combinations */
5597 	if (!libipw_is_valid_mode(priv->ieee, network->mode)) {
5598 		IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of invalid frequency/mode combination.\n",
5599 				network->ssid_len, network->ssid,
5600 				network->bssid);
5601 		return 0;
5602 	}
5603 
5604 	/* Ensure that the rates supported by the driver are compatible with
5605 	 * this AP, including verification of basic rates (mandatory) */
5606 	if (!ipw_compatible_rates(priv, network, &rates)) {
5607 		IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because configured rate mask excludes AP mandatory rate.\n",
5608 				network->ssid_len, network->ssid,
5609 				network->bssid);
5610 		return 0;
5611 	}
5612 
5613 	if (rates.num_rates == 0) {
5614 		IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of no compatible rates.\n",
5615 				network->ssid_len, network->ssid,
5616 				network->bssid);
5617 		return 0;
5618 	}
5619 
5620 	/* TODO: Perform any further minimal comparititive tests.  We do not
5621 	 * want to put too much policy logic here; intelligent scan selection
5622 	 * should occur within a generic IEEE 802.11 user space tool.  */
5623 
5624 	/* Set up 'new' AP to this network */
5625 	ipw_copy_rates(&match->rates, &rates);
5626 	match->network = network;
5627 	IPW_DEBUG_MERGE("Network '%*pE (%pM)' is a viable match.\n",
5628 			network->ssid_len, network->ssid, network->bssid);
5629 
5630 	return 1;
5631 }
5632 
5633 static void ipw_merge_adhoc_network(struct work_struct *work)
5634 {
5635 	struct ipw_priv *priv =
5636 		container_of(work, struct ipw_priv, merge_networks);
5637 	struct libipw_network *network = NULL;
5638 	struct ipw_network_match match = {
5639 		.network = priv->assoc_network
5640 	};
5641 
5642 	if ((priv->status & STATUS_ASSOCIATED) &&
5643 	    (priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5644 		/* First pass through ROAM process -- look for a better
5645 		 * network */
5646 		unsigned long flags;
5647 
5648 		spin_lock_irqsave(&priv->ieee->lock, flags);
5649 		list_for_each_entry(network, &priv->ieee->network_list, list) {
5650 			if (network != priv->assoc_network)
5651 				ipw_find_adhoc_network(priv, &match, network,
5652 						       1);
5653 		}
5654 		spin_unlock_irqrestore(&priv->ieee->lock, flags);
5655 
5656 		if (match.network == priv->assoc_network) {
5657 			IPW_DEBUG_MERGE("No better ADHOC in this network to "
5658 					"merge to.\n");
5659 			return;
5660 		}
5661 
5662 		mutex_lock(&priv->mutex);
5663 		if ((priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5664 			IPW_DEBUG_MERGE("remove network %*pE\n",
5665 					priv->essid_len, priv->essid);
5666 			ipw_remove_current_network(priv);
5667 		}
5668 
5669 		ipw_disassociate(priv);
5670 		priv->assoc_network = match.network;
5671 		mutex_unlock(&priv->mutex);
5672 		return;
5673 	}
5674 }
5675 
5676 static int ipw_best_network(struct ipw_priv *priv,
5677 			    struct ipw_network_match *match,
5678 			    struct libipw_network *network, int roaming)
5679 {
5680 	struct ipw_supported_rates rates;
5681 
5682 	/* Verify that this network's capability is compatible with the
5683 	 * current mode (AdHoc or Infrastructure) */
5684 	if ((priv->ieee->iw_mode == IW_MODE_INFRA &&
5685 	     !(network->capability & WLAN_CAPABILITY_ESS)) ||
5686 	    (priv->ieee->iw_mode == IW_MODE_ADHOC &&
5687 	     !(network->capability & WLAN_CAPABILITY_IBSS))) {
5688 		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded due to capability mismatch.\n",
5689 				network->ssid_len, network->ssid,
5690 				network->bssid);
5691 		return 0;
5692 	}
5693 
5694 	if (unlikely(roaming)) {
5695 		/* If we are roaming, then ensure check if this is a valid
5696 		 * network to try and roam to */
5697 		if ((network->ssid_len != match->network->ssid_len) ||
5698 		    memcmp(network->ssid, match->network->ssid,
5699 			   network->ssid_len)) {
5700 			IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of non-network ESSID.\n",
5701 					network->ssid_len, network->ssid,
5702 					network->bssid);
5703 			return 0;
5704 		}
5705 	} else {
5706 		/* If an ESSID has been configured then compare the broadcast
5707 		 * ESSID to ours */
5708 		if ((priv->config & CFG_STATIC_ESSID) &&
5709 		    ((network->ssid_len != priv->essid_len) ||
5710 		     memcmp(network->ssid, priv->essid,
5711 			    min(network->ssid_len, priv->essid_len)))) {
5712 			IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of ESSID mismatch: '%*pE'.\n",
5713 					network->ssid_len, network->ssid,
5714 					network->bssid, priv->essid_len,
5715 					priv->essid);
5716 			return 0;
5717 		}
5718 	}
5719 
5720 	/* If the old network rate is better than this one, don't bother
5721 	 * testing everything else. */
5722 	if (match->network && match->network->stats.rssi > network->stats.rssi) {
5723 		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because '%*pE (%pM)' has a stronger signal.\n",
5724 				network->ssid_len, network->ssid,
5725 				network->bssid, match->network->ssid_len,
5726 				match->network->ssid, match->network->bssid);
5727 		return 0;
5728 	}
5729 
5730 	/* If this network has already had an association attempt within the
5731 	 * last 3 seconds, do not try and associate again... */
5732 	if (network->last_associate &&
5733 	    time_after(network->last_associate + (HZ * 3UL), jiffies)) {
5734 		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of storming (%ums since last assoc attempt).\n",
5735 				network->ssid_len, network->ssid,
5736 				network->bssid,
5737 				jiffies_to_msecs(jiffies -
5738 						 network->last_associate));
5739 		return 0;
5740 	}
5741 
5742 	/* Now go through and see if the requested network is valid... */
5743 	if (priv->ieee->scan_age != 0 &&
5744 	    time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5745 		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of age: %ums.\n",
5746 				network->ssid_len, network->ssid,
5747 				network->bssid,
5748 				jiffies_to_msecs(jiffies -
5749 						 network->last_scanned));
5750 		return 0;
5751 	}
5752 
5753 	if ((priv->config & CFG_STATIC_CHANNEL) &&
5754 	    (network->channel != priv->channel)) {
5755 		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of channel mismatch: %d != %d.\n",
5756 				network->ssid_len, network->ssid,
5757 				network->bssid,
5758 				network->channel, priv->channel);
5759 		return 0;
5760 	}
5761 
5762 	/* Verify privacy compatibility */
5763 	if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5764 	    ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5765 		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of privacy mismatch: %s != %s.\n",
5766 				network->ssid_len, network->ssid,
5767 				network->bssid,
5768 				priv->capability & CAP_PRIVACY_ON ? "on" :
5769 				"off",
5770 				network->capability &
5771 				WLAN_CAPABILITY_PRIVACY ? "on" : "off");
5772 		return 0;
5773 	}
5774 
5775 	if ((priv->config & CFG_STATIC_BSSID) &&
5776 	    !ether_addr_equal(network->bssid, priv->bssid)) {
5777 		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of BSSID mismatch: %pM.\n",
5778 				network->ssid_len, network->ssid,
5779 				network->bssid, priv->bssid);
5780 		return 0;
5781 	}
5782 
5783 	/* Filter out any incompatible freq / mode combinations */
5784 	if (!libipw_is_valid_mode(priv->ieee, network->mode)) {
5785 		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of invalid frequency/mode combination.\n",
5786 				network->ssid_len, network->ssid,
5787 				network->bssid);
5788 		return 0;
5789 	}
5790 
5791 	/* Filter out invalid channel in current GEO */
5792 	if (!libipw_is_valid_channel(priv->ieee, network->channel)) {
5793 		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of invalid channel in current GEO\n",
5794 				network->ssid_len, network->ssid,
5795 				network->bssid);
5796 		return 0;
5797 	}
5798 
5799 	/* Ensure that the rates supported by the driver are compatible with
5800 	 * this AP, including verification of basic rates (mandatory) */
5801 	if (!ipw_compatible_rates(priv, network, &rates)) {
5802 		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because configured rate mask excludes AP mandatory rate.\n",
5803 				network->ssid_len, network->ssid,
5804 				network->bssid);
5805 		return 0;
5806 	}
5807 
5808 	if (rates.num_rates == 0) {
5809 		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of no compatible rates.\n",
5810 				network->ssid_len, network->ssid,
5811 				network->bssid);
5812 		return 0;
5813 	}
5814 
5815 	/* TODO: Perform any further minimal comparititive tests.  We do not
5816 	 * want to put too much policy logic here; intelligent scan selection
5817 	 * should occur within a generic IEEE 802.11 user space tool.  */
5818 
5819 	/* Set up 'new' AP to this network */
5820 	ipw_copy_rates(&match->rates, &rates);
5821 	match->network = network;
5822 
5823 	IPW_DEBUG_ASSOC("Network '%*pE (%pM)' is a viable match.\n",
5824 			network->ssid_len, network->ssid, network->bssid);
5825 
5826 	return 1;
5827 }
5828 
5829 static void ipw_adhoc_create(struct ipw_priv *priv,
5830 			     struct libipw_network *network)
5831 {
5832 	const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
5833 	int i;
5834 
5835 	/*
5836 	 * For the purposes of scanning, we can set our wireless mode
5837 	 * to trigger scans across combinations of bands, but when it
5838 	 * comes to creating a new ad-hoc network, we have tell the FW
5839 	 * exactly which band to use.
5840 	 *
5841 	 * We also have the possibility of an invalid channel for the
5842 	 * chossen band.  Attempting to create a new ad-hoc network
5843 	 * with an invalid channel for wireless mode will trigger a
5844 	 * FW fatal error.
5845 	 *
5846 	 */
5847 	switch (libipw_is_valid_channel(priv->ieee, priv->channel)) {
5848 	case LIBIPW_52GHZ_BAND:
5849 		network->mode = IEEE_A;
5850 		i = libipw_channel_to_index(priv->ieee, priv->channel);
5851 		BUG_ON(i == -1);
5852 		if (geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY) {
5853 			IPW_WARNING("Overriding invalid channel\n");
5854 			priv->channel = geo->a[0].channel;
5855 		}
5856 		break;
5857 
5858 	case LIBIPW_24GHZ_BAND:
5859 		if (priv->ieee->mode & IEEE_G)
5860 			network->mode = IEEE_G;
5861 		else
5862 			network->mode = IEEE_B;
5863 		i = libipw_channel_to_index(priv->ieee, priv->channel);
5864 		BUG_ON(i == -1);
5865 		if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY) {
5866 			IPW_WARNING("Overriding invalid channel\n");
5867 			priv->channel = geo->bg[0].channel;
5868 		}
5869 		break;
5870 
5871 	default:
5872 		IPW_WARNING("Overriding invalid channel\n");
5873 		if (priv->ieee->mode & IEEE_A) {
5874 			network->mode = IEEE_A;
5875 			priv->channel = geo->a[0].channel;
5876 		} else if (priv->ieee->mode & IEEE_G) {
5877 			network->mode = IEEE_G;
5878 			priv->channel = geo->bg[0].channel;
5879 		} else {
5880 			network->mode = IEEE_B;
5881 			priv->channel = geo->bg[0].channel;
5882 		}
5883 		break;
5884 	}
5885 
5886 	network->channel = priv->channel;
5887 	priv->config |= CFG_ADHOC_PERSIST;
5888 	ipw_create_bssid(priv, network->bssid);
5889 	network->ssid_len = priv->essid_len;
5890 	memcpy(network->ssid, priv->essid, priv->essid_len);
5891 	memset(&network->stats, 0, sizeof(network->stats));
5892 	network->capability = WLAN_CAPABILITY_IBSS;
5893 	if (!(priv->config & CFG_PREAMBLE_LONG))
5894 		network->capability |= WLAN_CAPABILITY_SHORT_PREAMBLE;
5895 	if (priv->capability & CAP_PRIVACY_ON)
5896 		network->capability |= WLAN_CAPABILITY_PRIVACY;
5897 	network->rates_len = min(priv->rates.num_rates, MAX_RATES_LENGTH);
5898 	memcpy(network->rates, priv->rates.supported_rates, network->rates_len);
5899 	network->rates_ex_len = priv->rates.num_rates - network->rates_len;
5900 	memcpy(network->rates_ex,
5901 	       &priv->rates.supported_rates[network->rates_len],
5902 	       network->rates_ex_len);
5903 	network->last_scanned = 0;
5904 	network->flags = 0;
5905 	network->last_associate = 0;
5906 	network->time_stamp[0] = 0;
5907 	network->time_stamp[1] = 0;
5908 	network->beacon_interval = 100;	/* Default */
5909 	network->listen_interval = 10;	/* Default */
5910 	network->atim_window = 0;	/* Default */
5911 	network->wpa_ie_len = 0;
5912 	network->rsn_ie_len = 0;
5913 }
5914 
5915 static void ipw_send_tgi_tx_key(struct ipw_priv *priv, int type, int index)
5916 {
5917 	struct ipw_tgi_tx_key key;
5918 
5919 	if (!(priv->ieee->sec.flags & (1 << index)))
5920 		return;
5921 
5922 	key.key_id = index;
5923 	memcpy(key.key, priv->ieee->sec.keys[index], SCM_TEMPORAL_KEY_LENGTH);
5924 	key.security_type = type;
5925 	key.station_index = 0;	/* always 0 for BSS */
5926 	key.flags = 0;
5927 	/* 0 for new key; previous value of counter (after fatal error) */
5928 	key.tx_counter[0] = cpu_to_le32(0);
5929 	key.tx_counter[1] = cpu_to_le32(0);
5930 
5931 	ipw_send_cmd_pdu(priv, IPW_CMD_TGI_TX_KEY, sizeof(key), &key);
5932 }
5933 
5934 static void ipw_send_wep_keys(struct ipw_priv *priv, int type)
5935 {
5936 	struct ipw_wep_key key;
5937 	int i;
5938 
5939 	key.cmd_id = DINO_CMD_WEP_KEY;
5940 	key.seq_num = 0;
5941 
5942 	/* Note: AES keys cannot be set for multiple times.
5943 	 * Only set it at the first time. */
5944 	for (i = 0; i < 4; i++) {
5945 		key.key_index = i | type;
5946 		if (!(priv->ieee->sec.flags & (1 << i))) {
5947 			key.key_size = 0;
5948 			continue;
5949 		}
5950 
5951 		key.key_size = priv->ieee->sec.key_sizes[i];
5952 		memcpy(key.key, priv->ieee->sec.keys[i], key.key_size);
5953 
5954 		ipw_send_cmd_pdu(priv, IPW_CMD_WEP_KEY, sizeof(key), &key);
5955 	}
5956 }
5957 
5958 static void ipw_set_hw_decrypt_unicast(struct ipw_priv *priv, int level)
5959 {
5960 	if (priv->ieee->host_encrypt)
5961 		return;
5962 
5963 	switch (level) {
5964 	case SEC_LEVEL_3:
5965 		priv->sys_config.disable_unicast_decryption = 0;
5966 		priv->ieee->host_decrypt = 0;
5967 		break;
5968 	case SEC_LEVEL_2:
5969 		priv->sys_config.disable_unicast_decryption = 1;
5970 		priv->ieee->host_decrypt = 1;
5971 		break;
5972 	case SEC_LEVEL_1:
5973 		priv->sys_config.disable_unicast_decryption = 0;
5974 		priv->ieee->host_decrypt = 0;
5975 		break;
5976 	case SEC_LEVEL_0:
5977 		priv->sys_config.disable_unicast_decryption = 1;
5978 		break;
5979 	default:
5980 		break;
5981 	}
5982 }
5983 
5984 static void ipw_set_hw_decrypt_multicast(struct ipw_priv *priv, int level)
5985 {
5986 	if (priv->ieee->host_encrypt)
5987 		return;
5988 
5989 	switch (level) {
5990 	case SEC_LEVEL_3:
5991 		priv->sys_config.disable_multicast_decryption = 0;
5992 		break;
5993 	case SEC_LEVEL_2:
5994 		priv->sys_config.disable_multicast_decryption = 1;
5995 		break;
5996 	case SEC_LEVEL_1:
5997 		priv->sys_config.disable_multicast_decryption = 0;
5998 		break;
5999 	case SEC_LEVEL_0:
6000 		priv->sys_config.disable_multicast_decryption = 1;
6001 		break;
6002 	default:
6003 		break;
6004 	}
6005 }
6006 
6007 static void ipw_set_hwcrypto_keys(struct ipw_priv *priv)
6008 {
6009 	switch (priv->ieee->sec.level) {
6010 	case SEC_LEVEL_3:
6011 		if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
6012 			ipw_send_tgi_tx_key(priv,
6013 					    DCT_FLAG_EXT_SECURITY_CCM,
6014 					    priv->ieee->sec.active_key);
6015 
6016 		if (!priv->ieee->host_mc_decrypt)
6017 			ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_CCM);
6018 		break;
6019 	case SEC_LEVEL_2:
6020 		if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
6021 			ipw_send_tgi_tx_key(priv,
6022 					    DCT_FLAG_EXT_SECURITY_TKIP,
6023 					    priv->ieee->sec.active_key);
6024 		break;
6025 	case SEC_LEVEL_1:
6026 		ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
6027 		ipw_set_hw_decrypt_unicast(priv, priv->ieee->sec.level);
6028 		ipw_set_hw_decrypt_multicast(priv, priv->ieee->sec.level);
6029 		break;
6030 	case SEC_LEVEL_0:
6031 	default:
6032 		break;
6033 	}
6034 }
6035 
6036 static void ipw_adhoc_check(void *data)
6037 {
6038 	struct ipw_priv *priv = data;
6039 
6040 	if (priv->missed_adhoc_beacons++ > priv->disassociate_threshold &&
6041 	    !(priv->config & CFG_ADHOC_PERSIST)) {
6042 		IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
6043 			  IPW_DL_STATE | IPW_DL_ASSOC,
6044 			  "Missed beacon: %d - disassociate\n",
6045 			  priv->missed_adhoc_beacons);
6046 		ipw_remove_current_network(priv);
6047 		ipw_disassociate(priv);
6048 		return;
6049 	}
6050 
6051 	schedule_delayed_work(&priv->adhoc_check,
6052 			      le16_to_cpu(priv->assoc_request.beacon_interval));
6053 }
6054 
6055 static void ipw_bg_adhoc_check(struct work_struct *work)
6056 {
6057 	struct ipw_priv *priv =
6058 		container_of(work, struct ipw_priv, adhoc_check.work);
6059 	mutex_lock(&priv->mutex);
6060 	ipw_adhoc_check(priv);
6061 	mutex_unlock(&priv->mutex);
6062 }
6063 
6064 static void ipw_debug_config(struct ipw_priv *priv)
6065 {
6066 	IPW_DEBUG_INFO("Scan completed, no valid APs matched "
6067 		       "[CFG 0x%08X]\n", priv->config);
6068 	if (priv->config & CFG_STATIC_CHANNEL)
6069 		IPW_DEBUG_INFO("Channel locked to %d\n", priv->channel);
6070 	else
6071 		IPW_DEBUG_INFO("Channel unlocked.\n");
6072 	if (priv->config & CFG_STATIC_ESSID)
6073 		IPW_DEBUG_INFO("ESSID locked to '%*pE'\n",
6074 			       priv->essid_len, priv->essid);
6075 	else
6076 		IPW_DEBUG_INFO("ESSID unlocked.\n");
6077 	if (priv->config & CFG_STATIC_BSSID)
6078 		IPW_DEBUG_INFO("BSSID locked to %pM\n", priv->bssid);
6079 	else
6080 		IPW_DEBUG_INFO("BSSID unlocked.\n");
6081 	if (priv->capability & CAP_PRIVACY_ON)
6082 		IPW_DEBUG_INFO("PRIVACY on\n");
6083 	else
6084 		IPW_DEBUG_INFO("PRIVACY off\n");
6085 	IPW_DEBUG_INFO("RATE MASK: 0x%08X\n", priv->rates_mask);
6086 }
6087 
6088 static void ipw_set_fixed_rate(struct ipw_priv *priv, int mode)
6089 {
6090 	/* TODO: Verify that this works... */
6091 	struct ipw_fixed_rate fr;
6092 	u32 reg;
6093 	u16 mask = 0;
6094 	u16 new_tx_rates = priv->rates_mask;
6095 
6096 	/* Identify 'current FW band' and match it with the fixed
6097 	 * Tx rates */
6098 
6099 	switch (priv->ieee->freq_band) {
6100 	case LIBIPW_52GHZ_BAND:	/* A only */
6101 		/* IEEE_A */
6102 		if (priv->rates_mask & ~LIBIPW_OFDM_RATES_MASK) {
6103 			/* Invalid fixed rate mask */
6104 			IPW_DEBUG_WX
6105 			    ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6106 			new_tx_rates = 0;
6107 			break;
6108 		}
6109 
6110 		new_tx_rates >>= LIBIPW_OFDM_SHIFT_MASK_A;
6111 		break;
6112 
6113 	default:		/* 2.4Ghz or Mixed */
6114 		/* IEEE_B */
6115 		if (mode == IEEE_B) {
6116 			if (new_tx_rates & ~LIBIPW_CCK_RATES_MASK) {
6117 				/* Invalid fixed rate mask */
6118 				IPW_DEBUG_WX
6119 				    ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6120 				new_tx_rates = 0;
6121 			}
6122 			break;
6123 		}
6124 
6125 		/* IEEE_G */
6126 		if (new_tx_rates & ~(LIBIPW_CCK_RATES_MASK |
6127 				    LIBIPW_OFDM_RATES_MASK)) {
6128 			/* Invalid fixed rate mask */
6129 			IPW_DEBUG_WX
6130 			    ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6131 			new_tx_rates = 0;
6132 			break;
6133 		}
6134 
6135 		if (LIBIPW_OFDM_RATE_6MB_MASK & new_tx_rates) {
6136 			mask |= (LIBIPW_OFDM_RATE_6MB_MASK >> 1);
6137 			new_tx_rates &= ~LIBIPW_OFDM_RATE_6MB_MASK;
6138 		}
6139 
6140 		if (LIBIPW_OFDM_RATE_9MB_MASK & new_tx_rates) {
6141 			mask |= (LIBIPW_OFDM_RATE_9MB_MASK >> 1);
6142 			new_tx_rates &= ~LIBIPW_OFDM_RATE_9MB_MASK;
6143 		}
6144 
6145 		if (LIBIPW_OFDM_RATE_12MB_MASK & new_tx_rates) {
6146 			mask |= (LIBIPW_OFDM_RATE_12MB_MASK >> 1);
6147 			new_tx_rates &= ~LIBIPW_OFDM_RATE_12MB_MASK;
6148 		}
6149 
6150 		new_tx_rates |= mask;
6151 		break;
6152 	}
6153 
6154 	fr.tx_rates = cpu_to_le16(new_tx_rates);
6155 
6156 	reg = ipw_read32(priv, IPW_MEM_FIXED_OVERRIDE);
6157 	ipw_write_reg32(priv, reg, *(u32 *) & fr);
6158 }
6159 
6160 static void ipw_abort_scan(struct ipw_priv *priv)
6161 {
6162 	int err;
6163 
6164 	if (priv->status & STATUS_SCAN_ABORTING) {
6165 		IPW_DEBUG_HC("Ignoring concurrent scan abort request.\n");
6166 		return;
6167 	}
6168 	priv->status |= STATUS_SCAN_ABORTING;
6169 
6170 	err = ipw_send_scan_abort(priv);
6171 	if (err)
6172 		IPW_DEBUG_HC("Request to abort scan failed.\n");
6173 }
6174 
6175 static void ipw_add_scan_channels(struct ipw_priv *priv,
6176 				  struct ipw_scan_request_ext *scan,
6177 				  int scan_type)
6178 {
6179 	int channel_index = 0;
6180 	const struct libipw_geo *geo;
6181 	int i;
6182 
6183 	geo = libipw_get_geo(priv->ieee);
6184 
6185 	if (priv->ieee->freq_band & LIBIPW_52GHZ_BAND) {
6186 		int start = channel_index;
6187 		for (i = 0; i < geo->a_channels; i++) {
6188 			if ((priv->status & STATUS_ASSOCIATED) &&
6189 			    geo->a[i].channel == priv->channel)
6190 				continue;
6191 			channel_index++;
6192 			scan->channels_list[channel_index] = geo->a[i].channel;
6193 			ipw_set_scan_type(scan, channel_index,
6194 					  geo->a[i].
6195 					  flags & LIBIPW_CH_PASSIVE_ONLY ?
6196 					  IPW_SCAN_PASSIVE_FULL_DWELL_SCAN :
6197 					  scan_type);
6198 		}
6199 
6200 		if (start != channel_index) {
6201 			scan->channels_list[start] = (u8) (IPW_A_MODE << 6) |
6202 			    (channel_index - start);
6203 			channel_index++;
6204 		}
6205 	}
6206 
6207 	if (priv->ieee->freq_band & LIBIPW_24GHZ_BAND) {
6208 		int start = channel_index;
6209 		if (priv->config & CFG_SPEED_SCAN) {
6210 			int index;
6211 			u8 channels[LIBIPW_24GHZ_CHANNELS] = {
6212 				/* nop out the list */
6213 				[0] = 0
6214 			};
6215 
6216 			u8 channel;
6217 			while (channel_index < IPW_SCAN_CHANNELS - 1) {
6218 				channel =
6219 				    priv->speed_scan[priv->speed_scan_pos];
6220 				if (channel == 0) {
6221 					priv->speed_scan_pos = 0;
6222 					channel = priv->speed_scan[0];
6223 				}
6224 				if ((priv->status & STATUS_ASSOCIATED) &&
6225 				    channel == priv->channel) {
6226 					priv->speed_scan_pos++;
6227 					continue;
6228 				}
6229 
6230 				/* If this channel has already been
6231 				 * added in scan, break from loop
6232 				 * and this will be the first channel
6233 				 * in the next scan.
6234 				 */
6235 				if (channels[channel - 1] != 0)
6236 					break;
6237 
6238 				channels[channel - 1] = 1;
6239 				priv->speed_scan_pos++;
6240 				channel_index++;
6241 				scan->channels_list[channel_index] = channel;
6242 				index =
6243 				    libipw_channel_to_index(priv->ieee, channel);
6244 				ipw_set_scan_type(scan, channel_index,
6245 						  geo->bg[index].
6246 						  flags &
6247 						  LIBIPW_CH_PASSIVE_ONLY ?
6248 						  IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6249 						  : scan_type);
6250 			}
6251 		} else {
6252 			for (i = 0; i < geo->bg_channels; i++) {
6253 				if ((priv->status & STATUS_ASSOCIATED) &&
6254 				    geo->bg[i].channel == priv->channel)
6255 					continue;
6256 				channel_index++;
6257 				scan->channels_list[channel_index] =
6258 				    geo->bg[i].channel;
6259 				ipw_set_scan_type(scan, channel_index,
6260 						  geo->bg[i].
6261 						  flags &
6262 						  LIBIPW_CH_PASSIVE_ONLY ?
6263 						  IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6264 						  : scan_type);
6265 			}
6266 		}
6267 
6268 		if (start != channel_index) {
6269 			scan->channels_list[start] = (u8) (IPW_B_MODE << 6) |
6270 			    (channel_index - start);
6271 		}
6272 	}
6273 }
6274 
6275 static int ipw_passive_dwell_time(struct ipw_priv *priv)
6276 {
6277 	/* staying on passive channels longer than the DTIM interval during a
6278 	 * scan, while associated, causes the firmware to cancel the scan
6279 	 * without notification. Hence, don't stay on passive channels longer
6280 	 * than the beacon interval.
6281 	 */
6282 	if (priv->status & STATUS_ASSOCIATED
6283 	    && priv->assoc_network->beacon_interval > 10)
6284 		return priv->assoc_network->beacon_interval - 10;
6285 	else
6286 		return 120;
6287 }
6288 
6289 static int ipw_request_scan_helper(struct ipw_priv *priv, int type, int direct)
6290 {
6291 	struct ipw_scan_request_ext scan;
6292 	int err = 0, scan_type;
6293 
6294 	if (!(priv->status & STATUS_INIT) ||
6295 	    (priv->status & STATUS_EXIT_PENDING))
6296 		return 0;
6297 
6298 	mutex_lock(&priv->mutex);
6299 
6300 	if (direct && (priv->direct_scan_ssid_len == 0)) {
6301 		IPW_DEBUG_HC("Direct scan requested but no SSID to scan for\n");
6302 		priv->status &= ~STATUS_DIRECT_SCAN_PENDING;
6303 		goto done;
6304 	}
6305 
6306 	if (priv->status & STATUS_SCANNING) {
6307 		IPW_DEBUG_HC("Concurrent scan requested.  Queuing.\n");
6308 		priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6309 					STATUS_SCAN_PENDING;
6310 		goto done;
6311 	}
6312 
6313 	if (!(priv->status & STATUS_SCAN_FORCED) &&
6314 	    priv->status & STATUS_SCAN_ABORTING) {
6315 		IPW_DEBUG_HC("Scan request while abort pending.  Queuing.\n");
6316 		priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6317 					STATUS_SCAN_PENDING;
6318 		goto done;
6319 	}
6320 
6321 	if (priv->status & STATUS_RF_KILL_MASK) {
6322 		IPW_DEBUG_HC("Queuing scan due to RF Kill activation\n");
6323 		priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6324 					STATUS_SCAN_PENDING;
6325 		goto done;
6326 	}
6327 
6328 	memset(&scan, 0, sizeof(scan));
6329 	scan.full_scan_index = cpu_to_le32(libipw_get_scans(priv->ieee));
6330 
6331 	if (type == IW_SCAN_TYPE_PASSIVE) {
6332 		IPW_DEBUG_WX("use passive scanning\n");
6333 		scan_type = IPW_SCAN_PASSIVE_FULL_DWELL_SCAN;
6334 		scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6335 			cpu_to_le16(ipw_passive_dwell_time(priv));
6336 		ipw_add_scan_channels(priv, &scan, scan_type);
6337 		goto send_request;
6338 	}
6339 
6340 	/* Use active scan by default. */
6341 	if (priv->config & CFG_SPEED_SCAN)
6342 		scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6343 			cpu_to_le16(30);
6344 	else
6345 		scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6346 			cpu_to_le16(20);
6347 
6348 	scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
6349 		cpu_to_le16(20);
6350 
6351 	scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6352 		cpu_to_le16(ipw_passive_dwell_time(priv));
6353 	scan.dwell_time[IPW_SCAN_ACTIVE_DIRECT_SCAN] = cpu_to_le16(20);
6354 
6355 #ifdef CONFIG_IPW2200_MONITOR
6356 	if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
6357 		u8 channel;
6358 		u8 band = 0;
6359 
6360 		switch (libipw_is_valid_channel(priv->ieee, priv->channel)) {
6361 		case LIBIPW_52GHZ_BAND:
6362 			band = (u8) (IPW_A_MODE << 6) | 1;
6363 			channel = priv->channel;
6364 			break;
6365 
6366 		case LIBIPW_24GHZ_BAND:
6367 			band = (u8) (IPW_B_MODE << 6) | 1;
6368 			channel = priv->channel;
6369 			break;
6370 
6371 		default:
6372 			band = (u8) (IPW_B_MODE << 6) | 1;
6373 			channel = 9;
6374 			break;
6375 		}
6376 
6377 		scan.channels_list[0] = band;
6378 		scan.channels_list[1] = channel;
6379 		ipw_set_scan_type(&scan, 1, IPW_SCAN_PASSIVE_FULL_DWELL_SCAN);
6380 
6381 		/* NOTE:  The card will sit on this channel for this time
6382 		 * period.  Scan aborts are timing sensitive and frequently
6383 		 * result in firmware restarts.  As such, it is best to
6384 		 * set a small dwell_time here and just keep re-issuing
6385 		 * scans.  Otherwise fast channel hopping will not actually
6386 		 * hop channels.
6387 		 *
6388 		 * TODO: Move SPEED SCAN support to all modes and bands */
6389 		scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6390 			cpu_to_le16(2000);
6391 	} else {
6392 #endif				/* CONFIG_IPW2200_MONITOR */
6393 		/* Honor direct scans first, otherwise if we are roaming make
6394 		 * this a direct scan for the current network.  Finally,
6395 		 * ensure that every other scan is a fast channel hop scan */
6396 		if (direct) {
6397 			err = ipw_send_ssid(priv, priv->direct_scan_ssid,
6398 			                    priv->direct_scan_ssid_len);
6399 			if (err) {
6400 				IPW_DEBUG_HC("Attempt to send SSID command  "
6401 					     "failed\n");
6402 				goto done;
6403 			}
6404 
6405 			scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
6406 		} else if ((priv->status & STATUS_ROAMING)
6407 			   || (!(priv->status & STATUS_ASSOCIATED)
6408 			       && (priv->config & CFG_STATIC_ESSID)
6409 			       && (le32_to_cpu(scan.full_scan_index) % 2))) {
6410 			err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
6411 			if (err) {
6412 				IPW_DEBUG_HC("Attempt to send SSID command "
6413 					     "failed.\n");
6414 				goto done;
6415 			}
6416 
6417 			scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
6418 		} else
6419 			scan_type = IPW_SCAN_ACTIVE_BROADCAST_SCAN;
6420 
6421 		ipw_add_scan_channels(priv, &scan, scan_type);
6422 #ifdef CONFIG_IPW2200_MONITOR
6423 	}
6424 #endif
6425 
6426 send_request:
6427 	err = ipw_send_scan_request_ext(priv, &scan);
6428 	if (err) {
6429 		IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
6430 		goto done;
6431 	}
6432 
6433 	priv->status |= STATUS_SCANNING;
6434 	if (direct) {
6435 		priv->status &= ~STATUS_DIRECT_SCAN_PENDING;
6436 		priv->direct_scan_ssid_len = 0;
6437 	} else
6438 		priv->status &= ~STATUS_SCAN_PENDING;
6439 
6440 	schedule_delayed_work(&priv->scan_check, IPW_SCAN_CHECK_WATCHDOG);
6441 done:
6442 	mutex_unlock(&priv->mutex);
6443 	return err;
6444 }
6445 
6446 static void ipw_request_passive_scan(struct work_struct *work)
6447 {
6448 	struct ipw_priv *priv =
6449 		container_of(work, struct ipw_priv, request_passive_scan.work);
6450 	ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE, 0);
6451 }
6452 
6453 static void ipw_request_scan(struct work_struct *work)
6454 {
6455 	struct ipw_priv *priv =
6456 		container_of(work, struct ipw_priv, request_scan.work);
6457 	ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 0);
6458 }
6459 
6460 static void ipw_request_direct_scan(struct work_struct *work)
6461 {
6462 	struct ipw_priv *priv =
6463 		container_of(work, struct ipw_priv, request_direct_scan.work);
6464 	ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 1);
6465 }
6466 
6467 static void ipw_bg_abort_scan(struct work_struct *work)
6468 {
6469 	struct ipw_priv *priv =
6470 		container_of(work, struct ipw_priv, abort_scan);
6471 	mutex_lock(&priv->mutex);
6472 	ipw_abort_scan(priv);
6473 	mutex_unlock(&priv->mutex);
6474 }
6475 
6476 static int ipw_wpa_enable(struct ipw_priv *priv, int value)
6477 {
6478 	/* This is called when wpa_supplicant loads and closes the driver
6479 	 * interface. */
6480 	priv->ieee->wpa_enabled = value;
6481 	return 0;
6482 }
6483 
6484 static int ipw_wpa_set_auth_algs(struct ipw_priv *priv, int value)
6485 {
6486 	struct libipw_device *ieee = priv->ieee;
6487 	struct libipw_security sec = {
6488 		.flags = SEC_AUTH_MODE,
6489 	};
6490 	int ret = 0;
6491 
6492 	if (value & IW_AUTH_ALG_SHARED_KEY) {
6493 		sec.auth_mode = WLAN_AUTH_SHARED_KEY;
6494 		ieee->open_wep = 0;
6495 	} else if (value & IW_AUTH_ALG_OPEN_SYSTEM) {
6496 		sec.auth_mode = WLAN_AUTH_OPEN;
6497 		ieee->open_wep = 1;
6498 	} else if (value & IW_AUTH_ALG_LEAP) {
6499 		sec.auth_mode = WLAN_AUTH_LEAP;
6500 		ieee->open_wep = 1;
6501 	} else
6502 		return -EINVAL;
6503 
6504 	if (ieee->set_security)
6505 		ieee->set_security(ieee->dev, &sec);
6506 	else
6507 		ret = -EOPNOTSUPP;
6508 
6509 	return ret;
6510 }
6511 
6512 static void ipw_wpa_assoc_frame(struct ipw_priv *priv, char *wpa_ie,
6513 				int wpa_ie_len)
6514 {
6515 	/* make sure WPA is enabled */
6516 	ipw_wpa_enable(priv, 1);
6517 }
6518 
6519 static int ipw_set_rsn_capa(struct ipw_priv *priv,
6520 			    char *capabilities, int length)
6521 {
6522 	IPW_DEBUG_HC("HOST_CMD_RSN_CAPABILITIES\n");
6523 
6524 	return ipw_send_cmd_pdu(priv, IPW_CMD_RSN_CAPABILITIES, length,
6525 				capabilities);
6526 }
6527 
6528 /*
6529  * WE-18 support
6530  */
6531 
6532 /* SIOCSIWGENIE */
6533 static int ipw_wx_set_genie(struct net_device *dev,
6534 			    struct iw_request_info *info,
6535 			    union iwreq_data *wrqu, char *extra)
6536 {
6537 	struct ipw_priv *priv = libipw_priv(dev);
6538 	struct libipw_device *ieee = priv->ieee;
6539 	u8 *buf;
6540 	int err = 0;
6541 
6542 	if (wrqu->data.length > MAX_WPA_IE_LEN ||
6543 	    (wrqu->data.length && extra == NULL))
6544 		return -EINVAL;
6545 
6546 	if (wrqu->data.length) {
6547 		buf = kmemdup(extra, wrqu->data.length, GFP_KERNEL);
6548 		if (buf == NULL) {
6549 			err = -ENOMEM;
6550 			goto out;
6551 		}
6552 
6553 		kfree(ieee->wpa_ie);
6554 		ieee->wpa_ie = buf;
6555 		ieee->wpa_ie_len = wrqu->data.length;
6556 	} else {
6557 		kfree(ieee->wpa_ie);
6558 		ieee->wpa_ie = NULL;
6559 		ieee->wpa_ie_len = 0;
6560 	}
6561 
6562 	ipw_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len);
6563       out:
6564 	return err;
6565 }
6566 
6567 /* SIOCGIWGENIE */
6568 static int ipw_wx_get_genie(struct net_device *dev,
6569 			    struct iw_request_info *info,
6570 			    union iwreq_data *wrqu, char *extra)
6571 {
6572 	struct ipw_priv *priv = libipw_priv(dev);
6573 	struct libipw_device *ieee = priv->ieee;
6574 	int err = 0;
6575 
6576 	if (ieee->wpa_ie_len == 0 || ieee->wpa_ie == NULL) {
6577 		wrqu->data.length = 0;
6578 		goto out;
6579 	}
6580 
6581 	if (wrqu->data.length < ieee->wpa_ie_len) {
6582 		err = -E2BIG;
6583 		goto out;
6584 	}
6585 
6586 	wrqu->data.length = ieee->wpa_ie_len;
6587 	memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len);
6588 
6589       out:
6590 	return err;
6591 }
6592 
6593 static int wext_cipher2level(int cipher)
6594 {
6595 	switch (cipher) {
6596 	case IW_AUTH_CIPHER_NONE:
6597 		return SEC_LEVEL_0;
6598 	case IW_AUTH_CIPHER_WEP40:
6599 	case IW_AUTH_CIPHER_WEP104:
6600 		return SEC_LEVEL_1;
6601 	case IW_AUTH_CIPHER_TKIP:
6602 		return SEC_LEVEL_2;
6603 	case IW_AUTH_CIPHER_CCMP:
6604 		return SEC_LEVEL_3;
6605 	default:
6606 		return -1;
6607 	}
6608 }
6609 
6610 /* SIOCSIWAUTH */
6611 static int ipw_wx_set_auth(struct net_device *dev,
6612 			   struct iw_request_info *info,
6613 			   union iwreq_data *wrqu, char *extra)
6614 {
6615 	struct ipw_priv *priv = libipw_priv(dev);
6616 	struct libipw_device *ieee = priv->ieee;
6617 	struct iw_param *param = &wrqu->param;
6618 	struct lib80211_crypt_data *crypt;
6619 	unsigned long flags;
6620 	int ret = 0;
6621 
6622 	switch (param->flags & IW_AUTH_INDEX) {
6623 	case IW_AUTH_WPA_VERSION:
6624 		break;
6625 	case IW_AUTH_CIPHER_PAIRWISE:
6626 		ipw_set_hw_decrypt_unicast(priv,
6627 					   wext_cipher2level(param->value));
6628 		break;
6629 	case IW_AUTH_CIPHER_GROUP:
6630 		ipw_set_hw_decrypt_multicast(priv,
6631 					     wext_cipher2level(param->value));
6632 		break;
6633 	case IW_AUTH_KEY_MGMT:
6634 		/*
6635 		 * ipw2200 does not use these parameters
6636 		 */
6637 		break;
6638 
6639 	case IW_AUTH_TKIP_COUNTERMEASURES:
6640 		crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx];
6641 		if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags)
6642 			break;
6643 
6644 		flags = crypt->ops->get_flags(crypt->priv);
6645 
6646 		if (param->value)
6647 			flags |= IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6648 		else
6649 			flags &= ~IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6650 
6651 		crypt->ops->set_flags(flags, crypt->priv);
6652 
6653 		break;
6654 
6655 	case IW_AUTH_DROP_UNENCRYPTED:{
6656 			/* HACK:
6657 			 *
6658 			 * wpa_supplicant calls set_wpa_enabled when the driver
6659 			 * is loaded and unloaded, regardless of if WPA is being
6660 			 * used.  No other calls are made which can be used to
6661 			 * determine if encryption will be used or not prior to
6662 			 * association being expected.  If encryption is not being
6663 			 * used, drop_unencrypted is set to false, else true -- we
6664 			 * can use this to determine if the CAP_PRIVACY_ON bit should
6665 			 * be set.
6666 			 */
6667 			struct libipw_security sec = {
6668 				.flags = SEC_ENABLED,
6669 				.enabled = param->value,
6670 			};
6671 			priv->ieee->drop_unencrypted = param->value;
6672 			/* We only change SEC_LEVEL for open mode. Others
6673 			 * are set by ipw_wpa_set_encryption.
6674 			 */
6675 			if (!param->value) {
6676 				sec.flags |= SEC_LEVEL;
6677 				sec.level = SEC_LEVEL_0;
6678 			} else {
6679 				sec.flags |= SEC_LEVEL;
6680 				sec.level = SEC_LEVEL_1;
6681 			}
6682 			if (priv->ieee->set_security)
6683 				priv->ieee->set_security(priv->ieee->dev, &sec);
6684 			break;
6685 		}
6686 
6687 	case IW_AUTH_80211_AUTH_ALG:
6688 		ret = ipw_wpa_set_auth_algs(priv, param->value);
6689 		break;
6690 
6691 	case IW_AUTH_WPA_ENABLED:
6692 		ret = ipw_wpa_enable(priv, param->value);
6693 		ipw_disassociate(priv);
6694 		break;
6695 
6696 	case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6697 		ieee->ieee802_1x = param->value;
6698 		break;
6699 
6700 	case IW_AUTH_PRIVACY_INVOKED:
6701 		ieee->privacy_invoked = param->value;
6702 		break;
6703 
6704 	default:
6705 		return -EOPNOTSUPP;
6706 	}
6707 	return ret;
6708 }
6709 
6710 /* SIOCGIWAUTH */
6711 static int ipw_wx_get_auth(struct net_device *dev,
6712 			   struct iw_request_info *info,
6713 			   union iwreq_data *wrqu, char *extra)
6714 {
6715 	struct ipw_priv *priv = libipw_priv(dev);
6716 	struct libipw_device *ieee = priv->ieee;
6717 	struct lib80211_crypt_data *crypt;
6718 	struct iw_param *param = &wrqu->param;
6719 
6720 	switch (param->flags & IW_AUTH_INDEX) {
6721 	case IW_AUTH_WPA_VERSION:
6722 	case IW_AUTH_CIPHER_PAIRWISE:
6723 	case IW_AUTH_CIPHER_GROUP:
6724 	case IW_AUTH_KEY_MGMT:
6725 		/*
6726 		 * wpa_supplicant will control these internally
6727 		 */
6728 		return -EOPNOTSUPP;
6729 
6730 	case IW_AUTH_TKIP_COUNTERMEASURES:
6731 		crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx];
6732 		if (!crypt || !crypt->ops->get_flags)
6733 			break;
6734 
6735 		param->value = (crypt->ops->get_flags(crypt->priv) &
6736 				IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) ? 1 : 0;
6737 
6738 		break;
6739 
6740 	case IW_AUTH_DROP_UNENCRYPTED:
6741 		param->value = ieee->drop_unencrypted;
6742 		break;
6743 
6744 	case IW_AUTH_80211_AUTH_ALG:
6745 		param->value = ieee->sec.auth_mode;
6746 		break;
6747 
6748 	case IW_AUTH_WPA_ENABLED:
6749 		param->value = ieee->wpa_enabled;
6750 		break;
6751 
6752 	case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6753 		param->value = ieee->ieee802_1x;
6754 		break;
6755 
6756 	case IW_AUTH_ROAMING_CONTROL:
6757 	case IW_AUTH_PRIVACY_INVOKED:
6758 		param->value = ieee->privacy_invoked;
6759 		break;
6760 
6761 	default:
6762 		return -EOPNOTSUPP;
6763 	}
6764 	return 0;
6765 }
6766 
6767 /* SIOCSIWENCODEEXT */
6768 static int ipw_wx_set_encodeext(struct net_device *dev,
6769 				struct iw_request_info *info,
6770 				union iwreq_data *wrqu, char *extra)
6771 {
6772 	struct ipw_priv *priv = libipw_priv(dev);
6773 	struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
6774 
6775 	if (hwcrypto) {
6776 		if (ext->alg == IW_ENCODE_ALG_TKIP) {
6777 			/* IPW HW can't build TKIP MIC,
6778 			   host decryption still needed */
6779 			if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY)
6780 				priv->ieee->host_mc_decrypt = 1;
6781 			else {
6782 				priv->ieee->host_encrypt = 0;
6783 				priv->ieee->host_encrypt_msdu = 1;
6784 				priv->ieee->host_decrypt = 1;
6785 			}
6786 		} else {
6787 			priv->ieee->host_encrypt = 0;
6788 			priv->ieee->host_encrypt_msdu = 0;
6789 			priv->ieee->host_decrypt = 0;
6790 			priv->ieee->host_mc_decrypt = 0;
6791 		}
6792 	}
6793 
6794 	return libipw_wx_set_encodeext(priv->ieee, info, wrqu, extra);
6795 }
6796 
6797 /* SIOCGIWENCODEEXT */
6798 static int ipw_wx_get_encodeext(struct net_device *dev,
6799 				struct iw_request_info *info,
6800 				union iwreq_data *wrqu, char *extra)
6801 {
6802 	struct ipw_priv *priv = libipw_priv(dev);
6803 	return libipw_wx_get_encodeext(priv->ieee, info, wrqu, extra);
6804 }
6805 
6806 /* SIOCSIWMLME */
6807 static int ipw_wx_set_mlme(struct net_device *dev,
6808 			   struct iw_request_info *info,
6809 			   union iwreq_data *wrqu, char *extra)
6810 {
6811 	struct ipw_priv *priv = libipw_priv(dev);
6812 	struct iw_mlme *mlme = (struct iw_mlme *)extra;
6813 	__le16 reason;
6814 
6815 	reason = cpu_to_le16(mlme->reason_code);
6816 
6817 	switch (mlme->cmd) {
6818 	case IW_MLME_DEAUTH:
6819 		/* silently ignore */
6820 		break;
6821 
6822 	case IW_MLME_DISASSOC:
6823 		ipw_disassociate(priv);
6824 		break;
6825 
6826 	default:
6827 		return -EOPNOTSUPP;
6828 	}
6829 	return 0;
6830 }
6831 
6832 #ifdef CONFIG_IPW2200_QOS
6833 
6834 /* QoS */
6835 /*
6836 * get the modulation type of the current network or
6837 * the card current mode
6838 */
6839 static u8 ipw_qos_current_mode(struct ipw_priv * priv)
6840 {
6841 	u8 mode = 0;
6842 
6843 	if (priv->status & STATUS_ASSOCIATED) {
6844 		unsigned long flags;
6845 
6846 		spin_lock_irqsave(&priv->ieee->lock, flags);
6847 		mode = priv->assoc_network->mode;
6848 		spin_unlock_irqrestore(&priv->ieee->lock, flags);
6849 	} else {
6850 		mode = priv->ieee->mode;
6851 	}
6852 	IPW_DEBUG_QOS("QoS network/card mode %d\n", mode);
6853 	return mode;
6854 }
6855 
6856 /*
6857 * Handle management frame beacon and probe response
6858 */
6859 static int ipw_qos_handle_probe_response(struct ipw_priv *priv,
6860 					 int active_network,
6861 					 struct libipw_network *network)
6862 {
6863 	u32 size = sizeof(struct libipw_qos_parameters);
6864 
6865 	if (network->capability & WLAN_CAPABILITY_IBSS)
6866 		network->qos_data.active = network->qos_data.supported;
6867 
6868 	if (network->flags & NETWORK_HAS_QOS_MASK) {
6869 		if (active_network &&
6870 		    (network->flags & NETWORK_HAS_QOS_PARAMETERS))
6871 			network->qos_data.active = network->qos_data.supported;
6872 
6873 		if ((network->qos_data.active == 1) && (active_network == 1) &&
6874 		    (network->flags & NETWORK_HAS_QOS_PARAMETERS) &&
6875 		    (network->qos_data.old_param_count !=
6876 		     network->qos_data.param_count)) {
6877 			network->qos_data.old_param_count =
6878 			    network->qos_data.param_count;
6879 			schedule_work(&priv->qos_activate);
6880 			IPW_DEBUG_QOS("QoS parameters change call "
6881 				      "qos_activate\n");
6882 		}
6883 	} else {
6884 		if ((priv->ieee->mode == IEEE_B) || (network->mode == IEEE_B))
6885 			memcpy(&network->qos_data.parameters,
6886 			       &def_parameters_CCK, size);
6887 		else
6888 			memcpy(&network->qos_data.parameters,
6889 			       &def_parameters_OFDM, size);
6890 
6891 		if ((network->qos_data.active == 1) && (active_network == 1)) {
6892 			IPW_DEBUG_QOS("QoS was disabled call qos_activate\n");
6893 			schedule_work(&priv->qos_activate);
6894 		}
6895 
6896 		network->qos_data.active = 0;
6897 		network->qos_data.supported = 0;
6898 	}
6899 	if ((priv->status & STATUS_ASSOCIATED) &&
6900 	    (priv->ieee->iw_mode == IW_MODE_ADHOC) && (active_network == 0)) {
6901 		if (!ether_addr_equal(network->bssid, priv->bssid))
6902 			if (network->capability & WLAN_CAPABILITY_IBSS)
6903 				if ((network->ssid_len ==
6904 				     priv->assoc_network->ssid_len) &&
6905 				    !memcmp(network->ssid,
6906 					    priv->assoc_network->ssid,
6907 					    network->ssid_len)) {
6908 					schedule_work(&priv->merge_networks);
6909 				}
6910 	}
6911 
6912 	return 0;
6913 }
6914 
6915 /*
6916 * This function set up the firmware to support QoS. It sends
6917 * IPW_CMD_QOS_PARAMETERS and IPW_CMD_WME_INFO
6918 */
6919 static int ipw_qos_activate(struct ipw_priv *priv,
6920 			    struct libipw_qos_data *qos_network_data)
6921 {
6922 	int err;
6923 	struct libipw_qos_parameters qos_parameters[QOS_QOS_SETS];
6924 	struct libipw_qos_parameters *active_one = NULL;
6925 	u32 size = sizeof(struct libipw_qos_parameters);
6926 	u32 burst_duration;
6927 	int i;
6928 	u8 type;
6929 
6930 	type = ipw_qos_current_mode(priv);
6931 
6932 	active_one = &(qos_parameters[QOS_PARAM_SET_DEF_CCK]);
6933 	memcpy(active_one, priv->qos_data.def_qos_parm_CCK, size);
6934 	active_one = &(qos_parameters[QOS_PARAM_SET_DEF_OFDM]);
6935 	memcpy(active_one, priv->qos_data.def_qos_parm_OFDM, size);
6936 
6937 	if (qos_network_data == NULL) {
6938 		if (type == IEEE_B) {
6939 			IPW_DEBUG_QOS("QoS activate network mode %d\n", type);
6940 			active_one = &def_parameters_CCK;
6941 		} else
6942 			active_one = &def_parameters_OFDM;
6943 
6944 		memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6945 		burst_duration = ipw_qos_get_burst_duration(priv);
6946 		for (i = 0; i < QOS_QUEUE_NUM; i++)
6947 			qos_parameters[QOS_PARAM_SET_ACTIVE].tx_op_limit[i] =
6948 			    cpu_to_le16(burst_duration);
6949 	} else if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
6950 		if (type == IEEE_B) {
6951 			IPW_DEBUG_QOS("QoS activate IBSS network mode %d\n",
6952 				      type);
6953 			if (priv->qos_data.qos_enable == 0)
6954 				active_one = &def_parameters_CCK;
6955 			else
6956 				active_one = priv->qos_data.def_qos_parm_CCK;
6957 		} else {
6958 			if (priv->qos_data.qos_enable == 0)
6959 				active_one = &def_parameters_OFDM;
6960 			else
6961 				active_one = priv->qos_data.def_qos_parm_OFDM;
6962 		}
6963 		memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6964 	} else {
6965 		unsigned long flags;
6966 		int active;
6967 
6968 		spin_lock_irqsave(&priv->ieee->lock, flags);
6969 		active_one = &(qos_network_data->parameters);
6970 		qos_network_data->old_param_count =
6971 		    qos_network_data->param_count;
6972 		memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6973 		active = qos_network_data->supported;
6974 		spin_unlock_irqrestore(&priv->ieee->lock, flags);
6975 
6976 		if (active == 0) {
6977 			burst_duration = ipw_qos_get_burst_duration(priv);
6978 			for (i = 0; i < QOS_QUEUE_NUM; i++)
6979 				qos_parameters[QOS_PARAM_SET_ACTIVE].
6980 				    tx_op_limit[i] = cpu_to_le16(burst_duration);
6981 		}
6982 	}
6983 
6984 	IPW_DEBUG_QOS("QoS sending IPW_CMD_QOS_PARAMETERS\n");
6985 	err = ipw_send_qos_params_command(priv, &qos_parameters[0]);
6986 	if (err)
6987 		IPW_DEBUG_QOS("QoS IPW_CMD_QOS_PARAMETERS failed\n");
6988 
6989 	return err;
6990 }
6991 
6992 /*
6993 * send IPW_CMD_WME_INFO to the firmware
6994 */
6995 static int ipw_qos_set_info_element(struct ipw_priv *priv)
6996 {
6997 	int ret = 0;
6998 	struct libipw_qos_information_element qos_info;
6999 
7000 	if (priv == NULL)
7001 		return -1;
7002 
7003 	qos_info.elementID = QOS_ELEMENT_ID;
7004 	qos_info.length = sizeof(struct libipw_qos_information_element) - 2;
7005 
7006 	qos_info.version = QOS_VERSION_1;
7007 	qos_info.ac_info = 0;
7008 
7009 	memcpy(qos_info.qui, qos_oui, QOS_OUI_LEN);
7010 	qos_info.qui_type = QOS_OUI_TYPE;
7011 	qos_info.qui_subtype = QOS_OUI_INFO_SUB_TYPE;
7012 
7013 	ret = ipw_send_qos_info_command(priv, &qos_info);
7014 	if (ret != 0) {
7015 		IPW_DEBUG_QOS("QoS error calling ipw_send_qos_info_command\n");
7016 	}
7017 	return ret;
7018 }
7019 
7020 /*
7021 * Set the QoS parameter with the association request structure
7022 */
7023 static int ipw_qos_association(struct ipw_priv *priv,
7024 			       struct libipw_network *network)
7025 {
7026 	int err = 0;
7027 	struct libipw_qos_data *qos_data = NULL;
7028 	struct libipw_qos_data ibss_data = {
7029 		.supported = 1,
7030 		.active = 1,
7031 	};
7032 
7033 	switch (priv->ieee->iw_mode) {
7034 	case IW_MODE_ADHOC:
7035 		BUG_ON(!(network->capability & WLAN_CAPABILITY_IBSS));
7036 
7037 		qos_data = &ibss_data;
7038 		break;
7039 
7040 	case IW_MODE_INFRA:
7041 		qos_data = &network->qos_data;
7042 		break;
7043 
7044 	default:
7045 		BUG();
7046 		break;
7047 	}
7048 
7049 	err = ipw_qos_activate(priv, qos_data);
7050 	if (err) {
7051 		priv->assoc_request.policy_support &= ~HC_QOS_SUPPORT_ASSOC;
7052 		return err;
7053 	}
7054 
7055 	if (priv->qos_data.qos_enable && qos_data->supported) {
7056 		IPW_DEBUG_QOS("QoS will be enabled for this association\n");
7057 		priv->assoc_request.policy_support |= HC_QOS_SUPPORT_ASSOC;
7058 		return ipw_qos_set_info_element(priv);
7059 	}
7060 
7061 	return 0;
7062 }
7063 
7064 /*
7065 * handling the beaconing responses. if we get different QoS setting
7066 * off the network from the associated setting, adjust the QoS
7067 * setting
7068 */
7069 static int ipw_qos_association_resp(struct ipw_priv *priv,
7070 				    struct libipw_network *network)
7071 {
7072 	int ret = 0;
7073 	unsigned long flags;
7074 	u32 size = sizeof(struct libipw_qos_parameters);
7075 	int set_qos_param = 0;
7076 
7077 	if ((priv == NULL) || (network == NULL) ||
7078 	    (priv->assoc_network == NULL))
7079 		return ret;
7080 
7081 	if (!(priv->status & STATUS_ASSOCIATED))
7082 		return ret;
7083 
7084 	if ((priv->ieee->iw_mode != IW_MODE_INFRA))
7085 		return ret;
7086 
7087 	spin_lock_irqsave(&priv->ieee->lock, flags);
7088 	if (network->flags & NETWORK_HAS_QOS_PARAMETERS) {
7089 		memcpy(&priv->assoc_network->qos_data, &network->qos_data,
7090 		       sizeof(struct libipw_qos_data));
7091 		priv->assoc_network->qos_data.active = 1;
7092 		if ((network->qos_data.old_param_count !=
7093 		     network->qos_data.param_count)) {
7094 			set_qos_param = 1;
7095 			network->qos_data.old_param_count =
7096 			    network->qos_data.param_count;
7097 		}
7098 
7099 	} else {
7100 		if ((network->mode == IEEE_B) || (priv->ieee->mode == IEEE_B))
7101 			memcpy(&priv->assoc_network->qos_data.parameters,
7102 			       &def_parameters_CCK, size);
7103 		else
7104 			memcpy(&priv->assoc_network->qos_data.parameters,
7105 			       &def_parameters_OFDM, size);
7106 		priv->assoc_network->qos_data.active = 0;
7107 		priv->assoc_network->qos_data.supported = 0;
7108 		set_qos_param = 1;
7109 	}
7110 
7111 	spin_unlock_irqrestore(&priv->ieee->lock, flags);
7112 
7113 	if (set_qos_param == 1)
7114 		schedule_work(&priv->qos_activate);
7115 
7116 	return ret;
7117 }
7118 
7119 static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv)
7120 {
7121 	u32 ret = 0;
7122 
7123 	if ((priv == NULL))
7124 		return 0;
7125 
7126 	if (!(priv->ieee->modulation & LIBIPW_OFDM_MODULATION))
7127 		ret = priv->qos_data.burst_duration_CCK;
7128 	else
7129 		ret = priv->qos_data.burst_duration_OFDM;
7130 
7131 	return ret;
7132 }
7133 
7134 /*
7135 * Initialize the setting of QoS global
7136 */
7137 static void ipw_qos_init(struct ipw_priv *priv, int enable,
7138 			 int burst_enable, u32 burst_duration_CCK,
7139 			 u32 burst_duration_OFDM)
7140 {
7141 	priv->qos_data.qos_enable = enable;
7142 
7143 	if (priv->qos_data.qos_enable) {
7144 		priv->qos_data.def_qos_parm_CCK = &def_qos_parameters_CCK;
7145 		priv->qos_data.def_qos_parm_OFDM = &def_qos_parameters_OFDM;
7146 		IPW_DEBUG_QOS("QoS is enabled\n");
7147 	} else {
7148 		priv->qos_data.def_qos_parm_CCK = &def_parameters_CCK;
7149 		priv->qos_data.def_qos_parm_OFDM = &def_parameters_OFDM;
7150 		IPW_DEBUG_QOS("QoS is not enabled\n");
7151 	}
7152 
7153 	priv->qos_data.burst_enable = burst_enable;
7154 
7155 	if (burst_enable) {
7156 		priv->qos_data.burst_duration_CCK = burst_duration_CCK;
7157 		priv->qos_data.burst_duration_OFDM = burst_duration_OFDM;
7158 	} else {
7159 		priv->qos_data.burst_duration_CCK = 0;
7160 		priv->qos_data.burst_duration_OFDM = 0;
7161 	}
7162 }
7163 
7164 /*
7165 * map the packet priority to the right TX Queue
7166 */
7167 static int ipw_get_tx_queue_number(struct ipw_priv *priv, u16 priority)
7168 {
7169 	if (priority > 7 || !priv->qos_data.qos_enable)
7170 		priority = 0;
7171 
7172 	return from_priority_to_tx_queue[priority] - 1;
7173 }
7174 
7175 static int ipw_is_qos_active(struct net_device *dev,
7176 			     struct sk_buff *skb)
7177 {
7178 	struct ipw_priv *priv = libipw_priv(dev);
7179 	struct libipw_qos_data *qos_data = NULL;
7180 	int active, supported;
7181 	u8 *daddr = skb->data + ETH_ALEN;
7182 	int unicast = !is_multicast_ether_addr(daddr);
7183 
7184 	if (!(priv->status & STATUS_ASSOCIATED))
7185 		return 0;
7186 
7187 	qos_data = &priv->assoc_network->qos_data;
7188 
7189 	if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7190 		if (unicast == 0)
7191 			qos_data->active = 0;
7192 		else
7193 			qos_data->active = qos_data->supported;
7194 	}
7195 	active = qos_data->active;
7196 	supported = qos_data->supported;
7197 	IPW_DEBUG_QOS("QoS  %d network is QoS active %d  supported %d  "
7198 		      "unicast %d\n",
7199 		      priv->qos_data.qos_enable, active, supported, unicast);
7200 	if (active && priv->qos_data.qos_enable)
7201 		return 1;
7202 
7203 	return 0;
7204 
7205 }
7206 /*
7207 * add QoS parameter to the TX command
7208 */
7209 static int ipw_qos_set_tx_queue_command(struct ipw_priv *priv,
7210 					u16 priority,
7211 					struct tfd_data *tfd)
7212 {
7213 	int tx_queue_id = 0;
7214 
7215 
7216 	tx_queue_id = from_priority_to_tx_queue[priority] - 1;
7217 	tfd->tx_flags_ext |= DCT_FLAG_EXT_QOS_ENABLED;
7218 
7219 	if (priv->qos_data.qos_no_ack_mask & (1UL << tx_queue_id)) {
7220 		tfd->tx_flags &= ~DCT_FLAG_ACK_REQD;
7221 		tfd->tfd.tfd_26.mchdr.qos_ctrl |= cpu_to_le16(CTRL_QOS_NO_ACK);
7222 	}
7223 	return 0;
7224 }
7225 
7226 /*
7227 * background support to run QoS activate functionality
7228 */
7229 static void ipw_bg_qos_activate(struct work_struct *work)
7230 {
7231 	struct ipw_priv *priv =
7232 		container_of(work, struct ipw_priv, qos_activate);
7233 
7234 	mutex_lock(&priv->mutex);
7235 
7236 	if (priv->status & STATUS_ASSOCIATED)
7237 		ipw_qos_activate(priv, &(priv->assoc_network->qos_data));
7238 
7239 	mutex_unlock(&priv->mutex);
7240 }
7241 
7242 static int ipw_handle_probe_response(struct net_device *dev,
7243 				     struct libipw_probe_response *resp,
7244 				     struct libipw_network *network)
7245 {
7246 	struct ipw_priv *priv = libipw_priv(dev);
7247 	int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7248 			      (network == priv->assoc_network));
7249 
7250 	ipw_qos_handle_probe_response(priv, active_network, network);
7251 
7252 	return 0;
7253 }
7254 
7255 static int ipw_handle_beacon(struct net_device *dev,
7256 			     struct libipw_beacon *resp,
7257 			     struct libipw_network *network)
7258 {
7259 	struct ipw_priv *priv = libipw_priv(dev);
7260 	int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7261 			      (network == priv->assoc_network));
7262 
7263 	ipw_qos_handle_probe_response(priv, active_network, network);
7264 
7265 	return 0;
7266 }
7267 
7268 static int ipw_handle_assoc_response(struct net_device *dev,
7269 				     struct libipw_assoc_response *resp,
7270 				     struct libipw_network *network)
7271 {
7272 	struct ipw_priv *priv = libipw_priv(dev);
7273 	ipw_qos_association_resp(priv, network);
7274 	return 0;
7275 }
7276 
7277 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct libipw_qos_parameters
7278 				       *qos_param)
7279 {
7280 	return ipw_send_cmd_pdu(priv, IPW_CMD_QOS_PARAMETERS,
7281 				sizeof(*qos_param) * 3, qos_param);
7282 }
7283 
7284 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct libipw_qos_information_element
7285 				     *qos_param)
7286 {
7287 	return ipw_send_cmd_pdu(priv, IPW_CMD_WME_INFO, sizeof(*qos_param),
7288 				qos_param);
7289 }
7290 
7291 #endif				/* CONFIG_IPW2200_QOS */
7292 
7293 static int ipw_associate_network(struct ipw_priv *priv,
7294 				 struct libipw_network *network,
7295 				 struct ipw_supported_rates *rates, int roaming)
7296 {
7297 	int err;
7298 
7299 	if (priv->config & CFG_FIXED_RATE)
7300 		ipw_set_fixed_rate(priv, network->mode);
7301 
7302 	if (!(priv->config & CFG_STATIC_ESSID)) {
7303 		priv->essid_len = min(network->ssid_len,
7304 				      (u8) IW_ESSID_MAX_SIZE);
7305 		memcpy(priv->essid, network->ssid, priv->essid_len);
7306 	}
7307 
7308 	network->last_associate = jiffies;
7309 
7310 	memset(&priv->assoc_request, 0, sizeof(priv->assoc_request));
7311 	priv->assoc_request.channel = network->channel;
7312 	priv->assoc_request.auth_key = 0;
7313 
7314 	if ((priv->capability & CAP_PRIVACY_ON) &&
7315 	    (priv->ieee->sec.auth_mode == WLAN_AUTH_SHARED_KEY)) {
7316 		priv->assoc_request.auth_type = AUTH_SHARED_KEY;
7317 		priv->assoc_request.auth_key = priv->ieee->sec.active_key;
7318 
7319 		if (priv->ieee->sec.level == SEC_LEVEL_1)
7320 			ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
7321 
7322 	} else if ((priv->capability & CAP_PRIVACY_ON) &&
7323 		   (priv->ieee->sec.auth_mode == WLAN_AUTH_LEAP))
7324 		priv->assoc_request.auth_type = AUTH_LEAP;
7325 	else
7326 		priv->assoc_request.auth_type = AUTH_OPEN;
7327 
7328 	if (priv->ieee->wpa_ie_len) {
7329 		priv->assoc_request.policy_support = cpu_to_le16(0x02);	/* RSN active */
7330 		ipw_set_rsn_capa(priv, priv->ieee->wpa_ie,
7331 				 priv->ieee->wpa_ie_len);
7332 	}
7333 
7334 	/*
7335 	 * It is valid for our ieee device to support multiple modes, but
7336 	 * when it comes to associating to a given network we have to choose
7337 	 * just one mode.
7338 	 */
7339 	if (network->mode & priv->ieee->mode & IEEE_A)
7340 		priv->assoc_request.ieee_mode = IPW_A_MODE;
7341 	else if (network->mode & priv->ieee->mode & IEEE_G)
7342 		priv->assoc_request.ieee_mode = IPW_G_MODE;
7343 	else if (network->mode & priv->ieee->mode & IEEE_B)
7344 		priv->assoc_request.ieee_mode = IPW_B_MODE;
7345 
7346 	priv->assoc_request.capability = cpu_to_le16(network->capability);
7347 	if ((network->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
7348 	    && !(priv->config & CFG_PREAMBLE_LONG)) {
7349 		priv->assoc_request.preamble_length = DCT_FLAG_SHORT_PREAMBLE;
7350 	} else {
7351 		priv->assoc_request.preamble_length = DCT_FLAG_LONG_PREAMBLE;
7352 
7353 		/* Clear the short preamble if we won't be supporting it */
7354 		priv->assoc_request.capability &=
7355 		    ~cpu_to_le16(WLAN_CAPABILITY_SHORT_PREAMBLE);
7356 	}
7357 
7358 	/* Clear capability bits that aren't used in Ad Hoc */
7359 	if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7360 		priv->assoc_request.capability &=
7361 		    ~cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT_TIME);
7362 
7363 	IPW_DEBUG_ASSOC("%ssociation attempt: '%*pE', channel %d, 802.11%c [%d], %s[:%s], enc=%s%s%s%c%c\n",
7364 			roaming ? "Rea" : "A",
7365 			priv->essid_len, priv->essid,
7366 			network->channel,
7367 			ipw_modes[priv->assoc_request.ieee_mode],
7368 			rates->num_rates,
7369 			(priv->assoc_request.preamble_length ==
7370 			 DCT_FLAG_LONG_PREAMBLE) ? "long" : "short",
7371 			network->capability &
7372 			WLAN_CAPABILITY_SHORT_PREAMBLE ? "short" : "long",
7373 			priv->capability & CAP_PRIVACY_ON ? "on " : "off",
7374 			priv->capability & CAP_PRIVACY_ON ?
7375 			(priv->capability & CAP_SHARED_KEY ? "(shared)" :
7376 			 "(open)") : "",
7377 			priv->capability & CAP_PRIVACY_ON ? " key=" : "",
7378 			priv->capability & CAP_PRIVACY_ON ?
7379 			'1' + priv->ieee->sec.active_key : '.',
7380 			priv->capability & CAP_PRIVACY_ON ? '.' : ' ');
7381 
7382 	priv->assoc_request.beacon_interval = cpu_to_le16(network->beacon_interval);
7383 	if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
7384 	    (network->time_stamp[0] == 0) && (network->time_stamp[1] == 0)) {
7385 		priv->assoc_request.assoc_type = HC_IBSS_START;
7386 		priv->assoc_request.assoc_tsf_msw = 0;
7387 		priv->assoc_request.assoc_tsf_lsw = 0;
7388 	} else {
7389 		if (unlikely(roaming))
7390 			priv->assoc_request.assoc_type = HC_REASSOCIATE;
7391 		else
7392 			priv->assoc_request.assoc_type = HC_ASSOCIATE;
7393 		priv->assoc_request.assoc_tsf_msw = cpu_to_le32(network->time_stamp[1]);
7394 		priv->assoc_request.assoc_tsf_lsw = cpu_to_le32(network->time_stamp[0]);
7395 	}
7396 
7397 	memcpy(priv->assoc_request.bssid, network->bssid, ETH_ALEN);
7398 
7399 	if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7400 		eth_broadcast_addr(priv->assoc_request.dest);
7401 		priv->assoc_request.atim_window = cpu_to_le16(network->atim_window);
7402 	} else {
7403 		memcpy(priv->assoc_request.dest, network->bssid, ETH_ALEN);
7404 		priv->assoc_request.atim_window = 0;
7405 	}
7406 
7407 	priv->assoc_request.listen_interval = cpu_to_le16(network->listen_interval);
7408 
7409 	err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
7410 	if (err) {
7411 		IPW_DEBUG_HC("Attempt to send SSID command failed.\n");
7412 		return err;
7413 	}
7414 
7415 	rates->ieee_mode = priv->assoc_request.ieee_mode;
7416 	rates->purpose = IPW_RATE_CONNECT;
7417 	ipw_send_supported_rates(priv, rates);
7418 
7419 	if (priv->assoc_request.ieee_mode == IPW_G_MODE)
7420 		priv->sys_config.dot11g_auto_detection = 1;
7421 	else
7422 		priv->sys_config.dot11g_auto_detection = 0;
7423 
7424 	if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7425 		priv->sys_config.answer_broadcast_ssid_probe = 1;
7426 	else
7427 		priv->sys_config.answer_broadcast_ssid_probe = 0;
7428 
7429 	err = ipw_send_system_config(priv);
7430 	if (err) {
7431 		IPW_DEBUG_HC("Attempt to send sys config command failed.\n");
7432 		return err;
7433 	}
7434 
7435 	IPW_DEBUG_ASSOC("Association sensitivity: %d\n", network->stats.rssi);
7436 	err = ipw_set_sensitivity(priv, network->stats.rssi + IPW_RSSI_TO_DBM);
7437 	if (err) {
7438 		IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7439 		return err;
7440 	}
7441 
7442 	/*
7443 	 * If preemption is enabled, it is possible for the association
7444 	 * to complete before we return from ipw_send_associate.  Therefore
7445 	 * we have to be sure and update our priviate data first.
7446 	 */
7447 	priv->channel = network->channel;
7448 	memcpy(priv->bssid, network->bssid, ETH_ALEN);
7449 	priv->status |= STATUS_ASSOCIATING;
7450 	priv->status &= ~STATUS_SECURITY_UPDATED;
7451 
7452 	priv->assoc_network = network;
7453 
7454 #ifdef CONFIG_IPW2200_QOS
7455 	ipw_qos_association(priv, network);
7456 #endif
7457 
7458 	err = ipw_send_associate(priv, &priv->assoc_request);
7459 	if (err) {
7460 		IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7461 		return err;
7462 	}
7463 
7464 	IPW_DEBUG(IPW_DL_STATE, "associating: '%*pE' %pM\n",
7465 		  priv->essid_len, priv->essid, priv->bssid);
7466 
7467 	return 0;
7468 }
7469 
7470 static void ipw_roam(void *data)
7471 {
7472 	struct ipw_priv *priv = data;
7473 	struct libipw_network *network = NULL;
7474 	struct ipw_network_match match = {
7475 		.network = priv->assoc_network
7476 	};
7477 
7478 	/* The roaming process is as follows:
7479 	 *
7480 	 * 1.  Missed beacon threshold triggers the roaming process by
7481 	 *     setting the status ROAM bit and requesting a scan.
7482 	 * 2.  When the scan completes, it schedules the ROAM work
7483 	 * 3.  The ROAM work looks at all of the known networks for one that
7484 	 *     is a better network than the currently associated.  If none
7485 	 *     found, the ROAM process is over (ROAM bit cleared)
7486 	 * 4.  If a better network is found, a disassociation request is
7487 	 *     sent.
7488 	 * 5.  When the disassociation completes, the roam work is again
7489 	 *     scheduled.  The second time through, the driver is no longer
7490 	 *     associated, and the newly selected network is sent an
7491 	 *     association request.
7492 	 * 6.  At this point ,the roaming process is complete and the ROAM
7493 	 *     status bit is cleared.
7494 	 */
7495 
7496 	/* If we are no longer associated, and the roaming bit is no longer
7497 	 * set, then we are not actively roaming, so just return */
7498 	if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ROAMING)))
7499 		return;
7500 
7501 	if (priv->status & STATUS_ASSOCIATED) {
7502 		/* First pass through ROAM process -- look for a better
7503 		 * network */
7504 		unsigned long flags;
7505 		u8 rssi = priv->assoc_network->stats.rssi;
7506 		priv->assoc_network->stats.rssi = -128;
7507 		spin_lock_irqsave(&priv->ieee->lock, flags);
7508 		list_for_each_entry(network, &priv->ieee->network_list, list) {
7509 			if (network != priv->assoc_network)
7510 				ipw_best_network(priv, &match, network, 1);
7511 		}
7512 		spin_unlock_irqrestore(&priv->ieee->lock, flags);
7513 		priv->assoc_network->stats.rssi = rssi;
7514 
7515 		if (match.network == priv->assoc_network) {
7516 			IPW_DEBUG_ASSOC("No better APs in this network to "
7517 					"roam to.\n");
7518 			priv->status &= ~STATUS_ROAMING;
7519 			ipw_debug_config(priv);
7520 			return;
7521 		}
7522 
7523 		ipw_send_disassociate(priv, 1);
7524 		priv->assoc_network = match.network;
7525 
7526 		return;
7527 	}
7528 
7529 	/* Second pass through ROAM process -- request association */
7530 	ipw_compatible_rates(priv, priv->assoc_network, &match.rates);
7531 	ipw_associate_network(priv, priv->assoc_network, &match.rates, 1);
7532 	priv->status &= ~STATUS_ROAMING;
7533 }
7534 
7535 static void ipw_bg_roam(struct work_struct *work)
7536 {
7537 	struct ipw_priv *priv =
7538 		container_of(work, struct ipw_priv, roam);
7539 	mutex_lock(&priv->mutex);
7540 	ipw_roam(priv);
7541 	mutex_unlock(&priv->mutex);
7542 }
7543 
7544 static int ipw_associate(void *data)
7545 {
7546 	struct ipw_priv *priv = data;
7547 
7548 	struct libipw_network *network = NULL;
7549 	struct ipw_network_match match = {
7550 		.network = NULL
7551 	};
7552 	struct ipw_supported_rates *rates;
7553 	struct list_head *element;
7554 	unsigned long flags;
7555 
7556 	if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
7557 		IPW_DEBUG_ASSOC("Not attempting association (monitor mode)\n");
7558 		return 0;
7559 	}
7560 
7561 	if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
7562 		IPW_DEBUG_ASSOC("Not attempting association (already in "
7563 				"progress)\n");
7564 		return 0;
7565 	}
7566 
7567 	if (priv->status & STATUS_DISASSOCIATING) {
7568 		IPW_DEBUG_ASSOC("Not attempting association (in "
7569 				"disassociating)\n ");
7570 		schedule_work(&priv->associate);
7571 		return 0;
7572 	}
7573 
7574 	if (!ipw_is_init(priv) || (priv->status & STATUS_SCANNING)) {
7575 		IPW_DEBUG_ASSOC("Not attempting association (scanning or not "
7576 				"initialized)\n");
7577 		return 0;
7578 	}
7579 
7580 	if (!(priv->config & CFG_ASSOCIATE) &&
7581 	    !(priv->config & (CFG_STATIC_ESSID | CFG_STATIC_BSSID))) {
7582 		IPW_DEBUG_ASSOC("Not attempting association (associate=0)\n");
7583 		return 0;
7584 	}
7585 
7586 	/* Protect our use of the network_list */
7587 	spin_lock_irqsave(&priv->ieee->lock, flags);
7588 	list_for_each_entry(network, &priv->ieee->network_list, list)
7589 	    ipw_best_network(priv, &match, network, 0);
7590 
7591 	network = match.network;
7592 	rates = &match.rates;
7593 
7594 	if (network == NULL &&
7595 	    priv->ieee->iw_mode == IW_MODE_ADHOC &&
7596 	    priv->config & CFG_ADHOC_CREATE &&
7597 	    priv->config & CFG_STATIC_ESSID &&
7598 	    priv->config & CFG_STATIC_CHANNEL) {
7599 		/* Use oldest network if the free list is empty */
7600 		if (list_empty(&priv->ieee->network_free_list)) {
7601 			struct libipw_network *oldest = NULL;
7602 			struct libipw_network *target;
7603 
7604 			list_for_each_entry(target, &priv->ieee->network_list, list) {
7605 				if ((oldest == NULL) ||
7606 				    (target->last_scanned < oldest->last_scanned))
7607 					oldest = target;
7608 			}
7609 
7610 			/* If there are no more slots, expire the oldest */
7611 			list_del(&oldest->list);
7612 			target = oldest;
7613 			IPW_DEBUG_ASSOC("Expired '%*pE' (%pM) from network list.\n",
7614 					target->ssid_len, target->ssid,
7615 					target->bssid);
7616 			list_add_tail(&target->list,
7617 				      &priv->ieee->network_free_list);
7618 		}
7619 
7620 		element = priv->ieee->network_free_list.next;
7621 		network = list_entry(element, struct libipw_network, list);
7622 		ipw_adhoc_create(priv, network);
7623 		rates = &priv->rates;
7624 		list_del(element);
7625 		list_add_tail(&network->list, &priv->ieee->network_list);
7626 	}
7627 	spin_unlock_irqrestore(&priv->ieee->lock, flags);
7628 
7629 	/* If we reached the end of the list, then we don't have any valid
7630 	 * matching APs */
7631 	if (!network) {
7632 		ipw_debug_config(priv);
7633 
7634 		if (!(priv->status & STATUS_SCANNING)) {
7635 			if (!(priv->config & CFG_SPEED_SCAN))
7636 				schedule_delayed_work(&priv->request_scan,
7637 						      SCAN_INTERVAL);
7638 			else
7639 				schedule_delayed_work(&priv->request_scan, 0);
7640 		}
7641 
7642 		return 0;
7643 	}
7644 
7645 	ipw_associate_network(priv, network, rates, 0);
7646 
7647 	return 1;
7648 }
7649 
7650 static void ipw_bg_associate(struct work_struct *work)
7651 {
7652 	struct ipw_priv *priv =
7653 		container_of(work, struct ipw_priv, associate);
7654 	mutex_lock(&priv->mutex);
7655 	ipw_associate(priv);
7656 	mutex_unlock(&priv->mutex);
7657 }
7658 
7659 static void ipw_rebuild_decrypted_skb(struct ipw_priv *priv,
7660 				      struct sk_buff *skb)
7661 {
7662 	struct ieee80211_hdr *hdr;
7663 	u16 fc;
7664 
7665 	hdr = (struct ieee80211_hdr *)skb->data;
7666 	fc = le16_to_cpu(hdr->frame_control);
7667 	if (!(fc & IEEE80211_FCTL_PROTECTED))
7668 		return;
7669 
7670 	fc &= ~IEEE80211_FCTL_PROTECTED;
7671 	hdr->frame_control = cpu_to_le16(fc);
7672 	switch (priv->ieee->sec.level) {
7673 	case SEC_LEVEL_3:
7674 		/* Remove CCMP HDR */
7675 		memmove(skb->data + LIBIPW_3ADDR_LEN,
7676 			skb->data + LIBIPW_3ADDR_LEN + 8,
7677 			skb->len - LIBIPW_3ADDR_LEN - 8);
7678 		skb_trim(skb, skb->len - 16);	/* CCMP_HDR_LEN + CCMP_MIC_LEN */
7679 		break;
7680 	case SEC_LEVEL_2:
7681 		break;
7682 	case SEC_LEVEL_1:
7683 		/* Remove IV */
7684 		memmove(skb->data + LIBIPW_3ADDR_LEN,
7685 			skb->data + LIBIPW_3ADDR_LEN + 4,
7686 			skb->len - LIBIPW_3ADDR_LEN - 4);
7687 		skb_trim(skb, skb->len - 8);	/* IV + ICV */
7688 		break;
7689 	case SEC_LEVEL_0:
7690 		break;
7691 	default:
7692 		printk(KERN_ERR "Unknown security level %d\n",
7693 		       priv->ieee->sec.level);
7694 		break;
7695 	}
7696 }
7697 
7698 static void ipw_handle_data_packet(struct ipw_priv *priv,
7699 				   struct ipw_rx_mem_buffer *rxb,
7700 				   struct libipw_rx_stats *stats)
7701 {
7702 	struct net_device *dev = priv->net_dev;
7703 	struct libipw_hdr_4addr *hdr;
7704 	struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7705 
7706 	/* We received data from the HW, so stop the watchdog */
7707 	netif_trans_update(dev);
7708 
7709 	/* We only process data packets if the
7710 	 * interface is open */
7711 	if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7712 		     skb_tailroom(rxb->skb))) {
7713 		dev->stats.rx_errors++;
7714 		priv->wstats.discard.misc++;
7715 		IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7716 		return;
7717 	} else if (unlikely(!netif_running(priv->net_dev))) {
7718 		dev->stats.rx_dropped++;
7719 		priv->wstats.discard.misc++;
7720 		IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7721 		return;
7722 	}
7723 
7724 	/* Advance skb->data to the start of the actual payload */
7725 	skb_reserve(rxb->skb, offsetof(struct ipw_rx_packet, u.frame.data));
7726 
7727 	/* Set the size of the skb to the size of the frame */
7728 	skb_put(rxb->skb, le16_to_cpu(pkt->u.frame.length));
7729 
7730 	IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7731 
7732 	/* HW decrypt will not clear the WEP bit, MIC, PN, etc. */
7733 	hdr = (struct libipw_hdr_4addr *)rxb->skb->data;
7734 	if (priv->ieee->iw_mode != IW_MODE_MONITOR &&
7735 	    (is_multicast_ether_addr(hdr->addr1) ?
7736 	     !priv->ieee->host_mc_decrypt : !priv->ieee->host_decrypt))
7737 		ipw_rebuild_decrypted_skb(priv, rxb->skb);
7738 
7739 	if (!libipw_rx(priv->ieee, rxb->skb, stats))
7740 		dev->stats.rx_errors++;
7741 	else {			/* libipw_rx succeeded, so it now owns the SKB */
7742 		rxb->skb = NULL;
7743 		__ipw_led_activity_on(priv);
7744 	}
7745 }
7746 
7747 #ifdef CONFIG_IPW2200_RADIOTAP
7748 static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
7749 					   struct ipw_rx_mem_buffer *rxb,
7750 					   struct libipw_rx_stats *stats)
7751 {
7752 	struct net_device *dev = priv->net_dev;
7753 	struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7754 	struct ipw_rx_frame *frame = &pkt->u.frame;
7755 
7756 	/* initial pull of some data */
7757 	u16 received_channel = frame->received_channel;
7758 	u8 antennaAndPhy = frame->antennaAndPhy;
7759 	s8 antsignal = frame->rssi_dbm - IPW_RSSI_TO_DBM;	/* call it signed anyhow */
7760 	u16 pktrate = frame->rate;
7761 
7762 	/* Magic struct that slots into the radiotap header -- no reason
7763 	 * to build this manually element by element, we can write it much
7764 	 * more efficiently than we can parse it. ORDER MATTERS HERE */
7765 	struct ipw_rt_hdr *ipw_rt;
7766 
7767 	unsigned short len = le16_to_cpu(pkt->u.frame.length);
7768 
7769 	/* We received data from the HW, so stop the watchdog */
7770 	netif_trans_update(dev);
7771 
7772 	/* We only process data packets if the
7773 	 * interface is open */
7774 	if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7775 		     skb_tailroom(rxb->skb))) {
7776 		dev->stats.rx_errors++;
7777 		priv->wstats.discard.misc++;
7778 		IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7779 		return;
7780 	} else if (unlikely(!netif_running(priv->net_dev))) {
7781 		dev->stats.rx_dropped++;
7782 		priv->wstats.discard.misc++;
7783 		IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7784 		return;
7785 	}
7786 
7787 	/* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
7788 	 * that now */
7789 	if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
7790 		/* FIXME: Should alloc bigger skb instead */
7791 		dev->stats.rx_dropped++;
7792 		priv->wstats.discard.misc++;
7793 		IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
7794 		return;
7795 	}
7796 
7797 	/* copy the frame itself */
7798 	memmove(rxb->skb->data + sizeof(struct ipw_rt_hdr),
7799 		rxb->skb->data + IPW_RX_FRAME_SIZE, len);
7800 
7801 	ipw_rt = (struct ipw_rt_hdr *)rxb->skb->data;
7802 
7803 	ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
7804 	ipw_rt->rt_hdr.it_pad = 0;	/* always good to zero */
7805 	ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(struct ipw_rt_hdr));	/* total header+data */
7806 
7807 	/* Big bitfield of all the fields we provide in radiotap */
7808 	ipw_rt->rt_hdr.it_present = cpu_to_le32(
7809 	     (1 << IEEE80211_RADIOTAP_TSFT) |
7810 	     (1 << IEEE80211_RADIOTAP_FLAGS) |
7811 	     (1 << IEEE80211_RADIOTAP_RATE) |
7812 	     (1 << IEEE80211_RADIOTAP_CHANNEL) |
7813 	     (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
7814 	     (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
7815 	     (1 << IEEE80211_RADIOTAP_ANTENNA));
7816 
7817 	/* Zero the flags, we'll add to them as we go */
7818 	ipw_rt->rt_flags = 0;
7819 	ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
7820 			       frame->parent_tsf[2] << 16 |
7821 			       frame->parent_tsf[1] << 8  |
7822 			       frame->parent_tsf[0]);
7823 
7824 	/* Convert signal to DBM */
7825 	ipw_rt->rt_dbmsignal = antsignal;
7826 	ipw_rt->rt_dbmnoise = (s8) le16_to_cpu(frame->noise);
7827 
7828 	/* Convert the channel data and set the flags */
7829 	ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(received_channel));
7830 	if (received_channel > 14) {	/* 802.11a */
7831 		ipw_rt->rt_chbitmask =
7832 		    cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
7833 	} else if (antennaAndPhy & 32) {	/* 802.11b */
7834 		ipw_rt->rt_chbitmask =
7835 		    cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
7836 	} else {		/* 802.11g */
7837 		ipw_rt->rt_chbitmask =
7838 		    cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
7839 	}
7840 
7841 	/* set the rate in multiples of 500k/s */
7842 	switch (pktrate) {
7843 	case IPW_TX_RATE_1MB:
7844 		ipw_rt->rt_rate = 2;
7845 		break;
7846 	case IPW_TX_RATE_2MB:
7847 		ipw_rt->rt_rate = 4;
7848 		break;
7849 	case IPW_TX_RATE_5MB:
7850 		ipw_rt->rt_rate = 10;
7851 		break;
7852 	case IPW_TX_RATE_6MB:
7853 		ipw_rt->rt_rate = 12;
7854 		break;
7855 	case IPW_TX_RATE_9MB:
7856 		ipw_rt->rt_rate = 18;
7857 		break;
7858 	case IPW_TX_RATE_11MB:
7859 		ipw_rt->rt_rate = 22;
7860 		break;
7861 	case IPW_TX_RATE_12MB:
7862 		ipw_rt->rt_rate = 24;
7863 		break;
7864 	case IPW_TX_RATE_18MB:
7865 		ipw_rt->rt_rate = 36;
7866 		break;
7867 	case IPW_TX_RATE_24MB:
7868 		ipw_rt->rt_rate = 48;
7869 		break;
7870 	case IPW_TX_RATE_36MB:
7871 		ipw_rt->rt_rate = 72;
7872 		break;
7873 	case IPW_TX_RATE_48MB:
7874 		ipw_rt->rt_rate = 96;
7875 		break;
7876 	case IPW_TX_RATE_54MB:
7877 		ipw_rt->rt_rate = 108;
7878 		break;
7879 	default:
7880 		ipw_rt->rt_rate = 0;
7881 		break;
7882 	}
7883 
7884 	/* antenna number */
7885 	ipw_rt->rt_antenna = (antennaAndPhy & 3);	/* Is this right? */
7886 
7887 	/* set the preamble flag if we have it */
7888 	if ((antennaAndPhy & 64))
7889 		ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
7890 
7891 	/* Set the size of the skb to the size of the frame */
7892 	skb_put(rxb->skb, len + sizeof(struct ipw_rt_hdr));
7893 
7894 	IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7895 
7896 	if (!libipw_rx(priv->ieee, rxb->skb, stats))
7897 		dev->stats.rx_errors++;
7898 	else {			/* libipw_rx succeeded, so it now owns the SKB */
7899 		rxb->skb = NULL;
7900 		/* no LED during capture */
7901 	}
7902 }
7903 #endif
7904 
7905 #ifdef CONFIG_IPW2200_PROMISCUOUS
7906 #define libipw_is_probe_response(fc) \
7907    ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT && \
7908     (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP )
7909 
7910 #define libipw_is_management(fc) \
7911    ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)
7912 
7913 #define libipw_is_control(fc) \
7914    ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL)
7915 
7916 #define libipw_is_data(fc) \
7917    ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)
7918 
7919 #define libipw_is_assoc_request(fc) \
7920    ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ)
7921 
7922 #define libipw_is_reassoc_request(fc) \
7923    ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ)
7924 
7925 static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
7926 				      struct ipw_rx_mem_buffer *rxb,
7927 				      struct libipw_rx_stats *stats)
7928 {
7929 	struct net_device *dev = priv->prom_net_dev;
7930 	struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7931 	struct ipw_rx_frame *frame = &pkt->u.frame;
7932 	struct ipw_rt_hdr *ipw_rt;
7933 
7934 	/* First cache any information we need before we overwrite
7935 	 * the information provided in the skb from the hardware */
7936 	struct ieee80211_hdr *hdr;
7937 	u16 channel = frame->received_channel;
7938 	u8 phy_flags = frame->antennaAndPhy;
7939 	s8 signal = frame->rssi_dbm - IPW_RSSI_TO_DBM;
7940 	s8 noise = (s8) le16_to_cpu(frame->noise);
7941 	u8 rate = frame->rate;
7942 	unsigned short len = le16_to_cpu(pkt->u.frame.length);
7943 	struct sk_buff *skb;
7944 	int hdr_only = 0;
7945 	u16 filter = priv->prom_priv->filter;
7946 
7947 	/* If the filter is set to not include Rx frames then return */
7948 	if (filter & IPW_PROM_NO_RX)
7949 		return;
7950 
7951 	/* We received data from the HW, so stop the watchdog */
7952 	netif_trans_update(dev);
7953 
7954 	if (unlikely((len + IPW_RX_FRAME_SIZE) > skb_tailroom(rxb->skb))) {
7955 		dev->stats.rx_errors++;
7956 		IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7957 		return;
7958 	}
7959 
7960 	/* We only process data packets if the interface is open */
7961 	if (unlikely(!netif_running(dev))) {
7962 		dev->stats.rx_dropped++;
7963 		IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7964 		return;
7965 	}
7966 
7967 	/* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
7968 	 * that now */
7969 	if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
7970 		/* FIXME: Should alloc bigger skb instead */
7971 		dev->stats.rx_dropped++;
7972 		IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
7973 		return;
7974 	}
7975 
7976 	hdr = (void *)rxb->skb->data + IPW_RX_FRAME_SIZE;
7977 	if (libipw_is_management(le16_to_cpu(hdr->frame_control))) {
7978 		if (filter & IPW_PROM_NO_MGMT)
7979 			return;
7980 		if (filter & IPW_PROM_MGMT_HEADER_ONLY)
7981 			hdr_only = 1;
7982 	} else if (libipw_is_control(le16_to_cpu(hdr->frame_control))) {
7983 		if (filter & IPW_PROM_NO_CTL)
7984 			return;
7985 		if (filter & IPW_PROM_CTL_HEADER_ONLY)
7986 			hdr_only = 1;
7987 	} else if (libipw_is_data(le16_to_cpu(hdr->frame_control))) {
7988 		if (filter & IPW_PROM_NO_DATA)
7989 			return;
7990 		if (filter & IPW_PROM_DATA_HEADER_ONLY)
7991 			hdr_only = 1;
7992 	}
7993 
7994 	/* Copy the SKB since this is for the promiscuous side */
7995 	skb = skb_copy(rxb->skb, GFP_ATOMIC);
7996 	if (skb == NULL) {
7997 		IPW_ERROR("skb_clone failed for promiscuous copy.\n");
7998 		return;
7999 	}
8000 
8001 	/* copy the frame data to write after where the radiotap header goes */
8002 	ipw_rt = (void *)skb->data;
8003 
8004 	if (hdr_only)
8005 		len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_control));
8006 
8007 	memcpy(ipw_rt->payload, hdr, len);
8008 
8009 	ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
8010 	ipw_rt->rt_hdr.it_pad = 0;	/* always good to zero */
8011 	ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(*ipw_rt));	/* total header+data */
8012 
8013 	/* Set the size of the skb to the size of the frame */
8014 	skb_put(skb, sizeof(*ipw_rt) + len);
8015 
8016 	/* Big bitfield of all the fields we provide in radiotap */
8017 	ipw_rt->rt_hdr.it_present = cpu_to_le32(
8018 	     (1 << IEEE80211_RADIOTAP_TSFT) |
8019 	     (1 << IEEE80211_RADIOTAP_FLAGS) |
8020 	     (1 << IEEE80211_RADIOTAP_RATE) |
8021 	     (1 << IEEE80211_RADIOTAP_CHANNEL) |
8022 	     (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
8023 	     (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
8024 	     (1 << IEEE80211_RADIOTAP_ANTENNA));
8025 
8026 	/* Zero the flags, we'll add to them as we go */
8027 	ipw_rt->rt_flags = 0;
8028 	ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
8029 			       frame->parent_tsf[2] << 16 |
8030 			       frame->parent_tsf[1] << 8  |
8031 			       frame->parent_tsf[0]);
8032 
8033 	/* Convert to DBM */
8034 	ipw_rt->rt_dbmsignal = signal;
8035 	ipw_rt->rt_dbmnoise = noise;
8036 
8037 	/* Convert the channel data and set the flags */
8038 	ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(channel));
8039 	if (channel > 14) {	/* 802.11a */
8040 		ipw_rt->rt_chbitmask =
8041 		    cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
8042 	} else if (phy_flags & (1 << 5)) {	/* 802.11b */
8043 		ipw_rt->rt_chbitmask =
8044 		    cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
8045 	} else {		/* 802.11g */
8046 		ipw_rt->rt_chbitmask =
8047 		    cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
8048 	}
8049 
8050 	/* set the rate in multiples of 500k/s */
8051 	switch (rate) {
8052 	case IPW_TX_RATE_1MB:
8053 		ipw_rt->rt_rate = 2;
8054 		break;
8055 	case IPW_TX_RATE_2MB:
8056 		ipw_rt->rt_rate = 4;
8057 		break;
8058 	case IPW_TX_RATE_5MB:
8059 		ipw_rt->rt_rate = 10;
8060 		break;
8061 	case IPW_TX_RATE_6MB:
8062 		ipw_rt->rt_rate = 12;
8063 		break;
8064 	case IPW_TX_RATE_9MB:
8065 		ipw_rt->rt_rate = 18;
8066 		break;
8067 	case IPW_TX_RATE_11MB:
8068 		ipw_rt->rt_rate = 22;
8069 		break;
8070 	case IPW_TX_RATE_12MB:
8071 		ipw_rt->rt_rate = 24;
8072 		break;
8073 	case IPW_TX_RATE_18MB:
8074 		ipw_rt->rt_rate = 36;
8075 		break;
8076 	case IPW_TX_RATE_24MB:
8077 		ipw_rt->rt_rate = 48;
8078 		break;
8079 	case IPW_TX_RATE_36MB:
8080 		ipw_rt->rt_rate = 72;
8081 		break;
8082 	case IPW_TX_RATE_48MB:
8083 		ipw_rt->rt_rate = 96;
8084 		break;
8085 	case IPW_TX_RATE_54MB:
8086 		ipw_rt->rt_rate = 108;
8087 		break;
8088 	default:
8089 		ipw_rt->rt_rate = 0;
8090 		break;
8091 	}
8092 
8093 	/* antenna number */
8094 	ipw_rt->rt_antenna = (phy_flags & 3);
8095 
8096 	/* set the preamble flag if we have it */
8097 	if (phy_flags & (1 << 6))
8098 		ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
8099 
8100 	IPW_DEBUG_RX("Rx packet of %d bytes.\n", skb->len);
8101 
8102 	if (!libipw_rx(priv->prom_priv->ieee, skb, stats)) {
8103 		dev->stats.rx_errors++;
8104 		dev_kfree_skb_any(skb);
8105 	}
8106 }
8107 #endif
8108 
8109 static int is_network_packet(struct ipw_priv *priv,
8110 				    struct libipw_hdr_4addr *header)
8111 {
8112 	/* Filter incoming packets to determine if they are targeted toward
8113 	 * this network, discarding packets coming from ourselves */
8114 	switch (priv->ieee->iw_mode) {
8115 	case IW_MODE_ADHOC:	/* Header: Dest. | Source    | BSSID */
8116 		/* packets from our adapter are dropped (echo) */
8117 		if (ether_addr_equal(header->addr2, priv->net_dev->dev_addr))
8118 			return 0;
8119 
8120 		/* {broad,multi}cast packets to our BSSID go through */
8121 		if (is_multicast_ether_addr(header->addr1))
8122 			return ether_addr_equal(header->addr3, priv->bssid);
8123 
8124 		/* packets to our adapter go through */
8125 		return ether_addr_equal(header->addr1,
8126 					priv->net_dev->dev_addr);
8127 
8128 	case IW_MODE_INFRA:	/* Header: Dest. | BSSID | Source */
8129 		/* packets from our adapter are dropped (echo) */
8130 		if (ether_addr_equal(header->addr3, priv->net_dev->dev_addr))
8131 			return 0;
8132 
8133 		/* {broad,multi}cast packets to our BSS go through */
8134 		if (is_multicast_ether_addr(header->addr1))
8135 			return ether_addr_equal(header->addr2, priv->bssid);
8136 
8137 		/* packets to our adapter go through */
8138 		return ether_addr_equal(header->addr1,
8139 					priv->net_dev->dev_addr);
8140 	}
8141 
8142 	return 1;
8143 }
8144 
8145 #define IPW_PACKET_RETRY_TIME HZ
8146 
8147 static  int is_duplicate_packet(struct ipw_priv *priv,
8148 				      struct libipw_hdr_4addr *header)
8149 {
8150 	u16 sc = le16_to_cpu(header->seq_ctl);
8151 	u16 seq = WLAN_GET_SEQ_SEQ(sc);
8152 	u16 frag = WLAN_GET_SEQ_FRAG(sc);
8153 	u16 *last_seq, *last_frag;
8154 	unsigned long *last_time;
8155 
8156 	switch (priv->ieee->iw_mode) {
8157 	case IW_MODE_ADHOC:
8158 		{
8159 			struct list_head *p;
8160 			struct ipw_ibss_seq *entry = NULL;
8161 			u8 *mac = header->addr2;
8162 			int index = mac[5] % IPW_IBSS_MAC_HASH_SIZE;
8163 
8164 			list_for_each(p, &priv->ibss_mac_hash[index]) {
8165 				entry =
8166 				    list_entry(p, struct ipw_ibss_seq, list);
8167 				if (ether_addr_equal(entry->mac, mac))
8168 					break;
8169 			}
8170 			if (p == &priv->ibss_mac_hash[index]) {
8171 				entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
8172 				if (!entry) {
8173 					IPW_ERROR
8174 					    ("Cannot malloc new mac entry\n");
8175 					return 0;
8176 				}
8177 				memcpy(entry->mac, mac, ETH_ALEN);
8178 				entry->seq_num = seq;
8179 				entry->frag_num = frag;
8180 				entry->packet_time = jiffies;
8181 				list_add(&entry->list,
8182 					 &priv->ibss_mac_hash[index]);
8183 				return 0;
8184 			}
8185 			last_seq = &entry->seq_num;
8186 			last_frag = &entry->frag_num;
8187 			last_time = &entry->packet_time;
8188 			break;
8189 		}
8190 	case IW_MODE_INFRA:
8191 		last_seq = &priv->last_seq_num;
8192 		last_frag = &priv->last_frag_num;
8193 		last_time = &priv->last_packet_time;
8194 		break;
8195 	default:
8196 		return 0;
8197 	}
8198 	if ((*last_seq == seq) &&
8199 	    time_after(*last_time + IPW_PACKET_RETRY_TIME, jiffies)) {
8200 		if (*last_frag == frag)
8201 			goto drop;
8202 		if (*last_frag + 1 != frag)
8203 			/* out-of-order fragment */
8204 			goto drop;
8205 	} else
8206 		*last_seq = seq;
8207 
8208 	*last_frag = frag;
8209 	*last_time = jiffies;
8210 	return 0;
8211 
8212       drop:
8213 	/* Comment this line now since we observed the card receives
8214 	 * duplicate packets but the FCTL_RETRY bit is not set in the
8215 	 * IBSS mode with fragmentation enabled.
8216 	 BUG_ON(!(le16_to_cpu(header->frame_control) & IEEE80211_FCTL_RETRY)); */
8217 	return 1;
8218 }
8219 
8220 static void ipw_handle_mgmt_packet(struct ipw_priv *priv,
8221 				   struct ipw_rx_mem_buffer *rxb,
8222 				   struct libipw_rx_stats *stats)
8223 {
8224 	struct sk_buff *skb = rxb->skb;
8225 	struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)skb->data;
8226 	struct libipw_hdr_4addr *header = (struct libipw_hdr_4addr *)
8227 	    (skb->data + IPW_RX_FRAME_SIZE);
8228 
8229 	libipw_rx_mgt(priv->ieee, header, stats);
8230 
8231 	if (priv->ieee->iw_mode == IW_MODE_ADHOC &&
8232 	    ((WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8233 	      IEEE80211_STYPE_PROBE_RESP) ||
8234 	     (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8235 	      IEEE80211_STYPE_BEACON))) {
8236 		if (ether_addr_equal(header->addr3, priv->bssid))
8237 			ipw_add_station(priv, header->addr2);
8238 	}
8239 
8240 	if (priv->config & CFG_NET_STATS) {
8241 		IPW_DEBUG_HC("sending stat packet\n");
8242 
8243 		/* Set the size of the skb to the size of the full
8244 		 * ipw header and 802.11 frame */
8245 		skb_put(skb, le16_to_cpu(pkt->u.frame.length) +
8246 			IPW_RX_FRAME_SIZE);
8247 
8248 		/* Advance past the ipw packet header to the 802.11 frame */
8249 		skb_pull(skb, IPW_RX_FRAME_SIZE);
8250 
8251 		/* Push the libipw_rx_stats before the 802.11 frame */
8252 		memcpy(skb_push(skb, sizeof(*stats)), stats, sizeof(*stats));
8253 
8254 		skb->dev = priv->ieee->dev;
8255 
8256 		/* Point raw at the libipw_stats */
8257 		skb_reset_mac_header(skb);
8258 
8259 		skb->pkt_type = PACKET_OTHERHOST;
8260 		skb->protocol = cpu_to_be16(ETH_P_80211_STATS);
8261 		memset(skb->cb, 0, sizeof(rxb->skb->cb));
8262 		netif_rx(skb);
8263 		rxb->skb = NULL;
8264 	}
8265 }
8266 
8267 /*
8268  * Main entry function for receiving a packet with 80211 headers.  This
8269  * should be called when ever the FW has notified us that there is a new
8270  * skb in the receive queue.
8271  */
8272 static void ipw_rx(struct ipw_priv *priv)
8273 {
8274 	struct ipw_rx_mem_buffer *rxb;
8275 	struct ipw_rx_packet *pkt;
8276 	struct libipw_hdr_4addr *header;
8277 	u32 r, w, i;
8278 	u8 network_packet;
8279 	u8 fill_rx = 0;
8280 
8281 	r = ipw_read32(priv, IPW_RX_READ_INDEX);
8282 	w = ipw_read32(priv, IPW_RX_WRITE_INDEX);
8283 	i = priv->rxq->read;
8284 
8285 	if (ipw_rx_queue_space (priv->rxq) > (RX_QUEUE_SIZE / 2))
8286 		fill_rx = 1;
8287 
8288 	while (i != r) {
8289 		rxb = priv->rxq->queue[i];
8290 		if (unlikely(rxb == NULL)) {
8291 			printk(KERN_CRIT "Queue not allocated!\n");
8292 			break;
8293 		}
8294 		priv->rxq->queue[i] = NULL;
8295 
8296 		pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
8297 					    IPW_RX_BUF_SIZE,
8298 					    PCI_DMA_FROMDEVICE);
8299 
8300 		pkt = (struct ipw_rx_packet *)rxb->skb->data;
8301 		IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n",
8302 			     pkt->header.message_type,
8303 			     pkt->header.rx_seq_num, pkt->header.control_bits);
8304 
8305 		switch (pkt->header.message_type) {
8306 		case RX_FRAME_TYPE:	/* 802.11 frame */  {
8307 				struct libipw_rx_stats stats = {
8308 					.rssi = pkt->u.frame.rssi_dbm -
8309 					    IPW_RSSI_TO_DBM,
8310 					.signal =
8311 					    pkt->u.frame.rssi_dbm -
8312 					    IPW_RSSI_TO_DBM + 0x100,
8313 					.noise =
8314 					    le16_to_cpu(pkt->u.frame.noise),
8315 					.rate = pkt->u.frame.rate,
8316 					.mac_time = jiffies,
8317 					.received_channel =
8318 					    pkt->u.frame.received_channel,
8319 					.freq =
8320 					    (pkt->u.frame.
8321 					     control & (1 << 0)) ?
8322 					    LIBIPW_24GHZ_BAND :
8323 					    LIBIPW_52GHZ_BAND,
8324 					.len = le16_to_cpu(pkt->u.frame.length),
8325 				};
8326 
8327 				if (stats.rssi != 0)
8328 					stats.mask |= LIBIPW_STATMASK_RSSI;
8329 				if (stats.signal != 0)
8330 					stats.mask |= LIBIPW_STATMASK_SIGNAL;
8331 				if (stats.noise != 0)
8332 					stats.mask |= LIBIPW_STATMASK_NOISE;
8333 				if (stats.rate != 0)
8334 					stats.mask |= LIBIPW_STATMASK_RATE;
8335 
8336 				priv->rx_packets++;
8337 
8338 #ifdef CONFIG_IPW2200_PROMISCUOUS
8339 	if (priv->prom_net_dev && netif_running(priv->prom_net_dev))
8340 		ipw_handle_promiscuous_rx(priv, rxb, &stats);
8341 #endif
8342 
8343 #ifdef CONFIG_IPW2200_MONITOR
8344 				if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8345 #ifdef CONFIG_IPW2200_RADIOTAP
8346 
8347                 ipw_handle_data_packet_monitor(priv,
8348 					       rxb,
8349 					       &stats);
8350 #else
8351 		ipw_handle_data_packet(priv, rxb,
8352 				       &stats);
8353 #endif
8354 					break;
8355 				}
8356 #endif
8357 
8358 				header =
8359 				    (struct libipw_hdr_4addr *)(rxb->skb->
8360 								   data +
8361 								   IPW_RX_FRAME_SIZE);
8362 				/* TODO: Check Ad-Hoc dest/source and make sure
8363 				 * that we are actually parsing these packets
8364 				 * correctly -- we should probably use the
8365 				 * frame control of the packet and disregard
8366 				 * the current iw_mode */
8367 
8368 				network_packet =
8369 				    is_network_packet(priv, header);
8370 				if (network_packet && priv->assoc_network) {
8371 					priv->assoc_network->stats.rssi =
8372 					    stats.rssi;
8373 					priv->exp_avg_rssi =
8374 					    exponential_average(priv->exp_avg_rssi,
8375 					    stats.rssi, DEPTH_RSSI);
8376 				}
8377 
8378 				IPW_DEBUG_RX("Frame: len=%u\n",
8379 					     le16_to_cpu(pkt->u.frame.length));
8380 
8381 				if (le16_to_cpu(pkt->u.frame.length) <
8382 				    libipw_get_hdrlen(le16_to_cpu(
8383 						    header->frame_ctl))) {
8384 					IPW_DEBUG_DROP
8385 					    ("Received packet is too small. "
8386 					     "Dropping.\n");
8387 					priv->net_dev->stats.rx_errors++;
8388 					priv->wstats.discard.misc++;
8389 					break;
8390 				}
8391 
8392 				switch (WLAN_FC_GET_TYPE
8393 					(le16_to_cpu(header->frame_ctl))) {
8394 
8395 				case IEEE80211_FTYPE_MGMT:
8396 					ipw_handle_mgmt_packet(priv, rxb,
8397 							       &stats);
8398 					break;
8399 
8400 				case IEEE80211_FTYPE_CTL:
8401 					break;
8402 
8403 				case IEEE80211_FTYPE_DATA:
8404 					if (unlikely(!network_packet ||
8405 						     is_duplicate_packet(priv,
8406 									 header)))
8407 					{
8408 						IPW_DEBUG_DROP("Dropping: "
8409 							       "%pM, "
8410 							       "%pM, "
8411 							       "%pM\n",
8412 							       header->addr1,
8413 							       header->addr2,
8414 							       header->addr3);
8415 						break;
8416 					}
8417 
8418 					ipw_handle_data_packet(priv, rxb,
8419 							       &stats);
8420 
8421 					break;
8422 				}
8423 				break;
8424 			}
8425 
8426 		case RX_HOST_NOTIFICATION_TYPE:{
8427 				IPW_DEBUG_RX
8428 				    ("Notification: subtype=%02X flags=%02X size=%d\n",
8429 				     pkt->u.notification.subtype,
8430 				     pkt->u.notification.flags,
8431 				     le16_to_cpu(pkt->u.notification.size));
8432 				ipw_rx_notification(priv, &pkt->u.notification);
8433 				break;
8434 			}
8435 
8436 		default:
8437 			IPW_DEBUG_RX("Bad Rx packet of type %d\n",
8438 				     pkt->header.message_type);
8439 			break;
8440 		}
8441 
8442 		/* For now we just don't re-use anything.  We can tweak this
8443 		 * later to try and re-use notification packets and SKBs that
8444 		 * fail to Rx correctly */
8445 		if (rxb->skb != NULL) {
8446 			dev_kfree_skb_any(rxb->skb);
8447 			rxb->skb = NULL;
8448 		}
8449 
8450 		pci_unmap_single(priv->pci_dev, rxb->dma_addr,
8451 				 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
8452 		list_add_tail(&rxb->list, &priv->rxq->rx_used);
8453 
8454 		i = (i + 1) % RX_QUEUE_SIZE;
8455 
8456 		/* If there are a lot of unsued frames, restock the Rx queue
8457 		 * so the ucode won't assert */
8458 		if (fill_rx) {
8459 			priv->rxq->read = i;
8460 			ipw_rx_queue_replenish(priv);
8461 		}
8462 	}
8463 
8464 	/* Backtrack one entry */
8465 	priv->rxq->read = i;
8466 	ipw_rx_queue_restock(priv);
8467 }
8468 
8469 #define DEFAULT_RTS_THRESHOLD     2304U
8470 #define MIN_RTS_THRESHOLD         1U
8471 #define MAX_RTS_THRESHOLD         2304U
8472 #define DEFAULT_BEACON_INTERVAL   100U
8473 #define	DEFAULT_SHORT_RETRY_LIMIT 7U
8474 #define	DEFAULT_LONG_RETRY_LIMIT  4U
8475 
8476 /**
8477  * ipw_sw_reset
8478  * @option: options to control different reset behaviour
8479  * 	    0 = reset everything except the 'disable' module_param
8480  * 	    1 = reset everything and print out driver info (for probe only)
8481  * 	    2 = reset everything
8482  */
8483 static int ipw_sw_reset(struct ipw_priv *priv, int option)
8484 {
8485 	int band, modulation;
8486 	int old_mode = priv->ieee->iw_mode;
8487 
8488 	/* Initialize module parameter values here */
8489 	priv->config = 0;
8490 
8491 	/* We default to disabling the LED code as right now it causes
8492 	 * too many systems to lock up... */
8493 	if (!led_support)
8494 		priv->config |= CFG_NO_LED;
8495 
8496 	if (associate)
8497 		priv->config |= CFG_ASSOCIATE;
8498 	else
8499 		IPW_DEBUG_INFO("Auto associate disabled.\n");
8500 
8501 	if (auto_create)
8502 		priv->config |= CFG_ADHOC_CREATE;
8503 	else
8504 		IPW_DEBUG_INFO("Auto adhoc creation disabled.\n");
8505 
8506 	priv->config &= ~CFG_STATIC_ESSID;
8507 	priv->essid_len = 0;
8508 	memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
8509 
8510 	if (disable && option) {
8511 		priv->status |= STATUS_RF_KILL_SW;
8512 		IPW_DEBUG_INFO("Radio disabled.\n");
8513 	}
8514 
8515 	if (default_channel != 0) {
8516 		priv->config |= CFG_STATIC_CHANNEL;
8517 		priv->channel = default_channel;
8518 		IPW_DEBUG_INFO("Bind to static channel %d\n", default_channel);
8519 		/* TODO: Validate that provided channel is in range */
8520 	}
8521 #ifdef CONFIG_IPW2200_QOS
8522 	ipw_qos_init(priv, qos_enable, qos_burst_enable,
8523 		     burst_duration_CCK, burst_duration_OFDM);
8524 #endif				/* CONFIG_IPW2200_QOS */
8525 
8526 	switch (network_mode) {
8527 	case 1:
8528 		priv->ieee->iw_mode = IW_MODE_ADHOC;
8529 		priv->net_dev->type = ARPHRD_ETHER;
8530 
8531 		break;
8532 #ifdef CONFIG_IPW2200_MONITOR
8533 	case 2:
8534 		priv->ieee->iw_mode = IW_MODE_MONITOR;
8535 #ifdef CONFIG_IPW2200_RADIOTAP
8536 		priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8537 #else
8538 		priv->net_dev->type = ARPHRD_IEEE80211;
8539 #endif
8540 		break;
8541 #endif
8542 	default:
8543 	case 0:
8544 		priv->net_dev->type = ARPHRD_ETHER;
8545 		priv->ieee->iw_mode = IW_MODE_INFRA;
8546 		break;
8547 	}
8548 
8549 	if (hwcrypto) {
8550 		priv->ieee->host_encrypt = 0;
8551 		priv->ieee->host_encrypt_msdu = 0;
8552 		priv->ieee->host_decrypt = 0;
8553 		priv->ieee->host_mc_decrypt = 0;
8554 	}
8555 	IPW_DEBUG_INFO("Hardware crypto [%s]\n", hwcrypto ? "on" : "off");
8556 
8557 	/* IPW2200/2915 is abled to do hardware fragmentation. */
8558 	priv->ieee->host_open_frag = 0;
8559 
8560 	if ((priv->pci_dev->device == 0x4223) ||
8561 	    (priv->pci_dev->device == 0x4224)) {
8562 		if (option == 1)
8563 			printk(KERN_INFO DRV_NAME
8564 			       ": Detected Intel PRO/Wireless 2915ABG Network "
8565 			       "Connection\n");
8566 		priv->ieee->abg_true = 1;
8567 		band = LIBIPW_52GHZ_BAND | LIBIPW_24GHZ_BAND;
8568 		modulation = LIBIPW_OFDM_MODULATION |
8569 		    LIBIPW_CCK_MODULATION;
8570 		priv->adapter = IPW_2915ABG;
8571 		priv->ieee->mode = IEEE_A | IEEE_G | IEEE_B;
8572 	} else {
8573 		if (option == 1)
8574 			printk(KERN_INFO DRV_NAME
8575 			       ": Detected Intel PRO/Wireless 2200BG Network "
8576 			       "Connection\n");
8577 
8578 		priv->ieee->abg_true = 0;
8579 		band = LIBIPW_24GHZ_BAND;
8580 		modulation = LIBIPW_OFDM_MODULATION |
8581 		    LIBIPW_CCK_MODULATION;
8582 		priv->adapter = IPW_2200BG;
8583 		priv->ieee->mode = IEEE_G | IEEE_B;
8584 	}
8585 
8586 	priv->ieee->freq_band = band;
8587 	priv->ieee->modulation = modulation;
8588 
8589 	priv->rates_mask = LIBIPW_DEFAULT_RATES_MASK;
8590 
8591 	priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
8592 	priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
8593 
8594 	priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
8595 	priv->short_retry_limit = DEFAULT_SHORT_RETRY_LIMIT;
8596 	priv->long_retry_limit = DEFAULT_LONG_RETRY_LIMIT;
8597 
8598 	/* If power management is turned on, default to AC mode */
8599 	priv->power_mode = IPW_POWER_AC;
8600 	priv->tx_power = IPW_TX_POWER_DEFAULT;
8601 
8602 	return old_mode == priv->ieee->iw_mode;
8603 }
8604 
8605 /*
8606  * This file defines the Wireless Extension handlers.  It does not
8607  * define any methods of hardware manipulation and relies on the
8608  * functions defined in ipw_main to provide the HW interaction.
8609  *
8610  * The exception to this is the use of the ipw_get_ordinal()
8611  * function used to poll the hardware vs. making unnecessary calls.
8612  *
8613  */
8614 
8615 static int ipw_set_channel(struct ipw_priv *priv, u8 channel)
8616 {
8617 	if (channel == 0) {
8618 		IPW_DEBUG_INFO("Setting channel to ANY (0)\n");
8619 		priv->config &= ~CFG_STATIC_CHANNEL;
8620 		IPW_DEBUG_ASSOC("Attempting to associate with new "
8621 				"parameters.\n");
8622 		ipw_associate(priv);
8623 		return 0;
8624 	}
8625 
8626 	priv->config |= CFG_STATIC_CHANNEL;
8627 
8628 	if (priv->channel == channel) {
8629 		IPW_DEBUG_INFO("Request to set channel to current value (%d)\n",
8630 			       channel);
8631 		return 0;
8632 	}
8633 
8634 	IPW_DEBUG_INFO("Setting channel to %i\n", (int)channel);
8635 	priv->channel = channel;
8636 
8637 #ifdef CONFIG_IPW2200_MONITOR
8638 	if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8639 		int i;
8640 		if (priv->status & STATUS_SCANNING) {
8641 			IPW_DEBUG_SCAN("Scan abort triggered due to "
8642 				       "channel change.\n");
8643 			ipw_abort_scan(priv);
8644 		}
8645 
8646 		for (i = 1000; i && (priv->status & STATUS_SCANNING); i--)
8647 			udelay(10);
8648 
8649 		if (priv->status & STATUS_SCANNING)
8650 			IPW_DEBUG_SCAN("Still scanning...\n");
8651 		else
8652 			IPW_DEBUG_SCAN("Took %dms to abort current scan\n",
8653 				       1000 - i);
8654 
8655 		return 0;
8656 	}
8657 #endif				/* CONFIG_IPW2200_MONITOR */
8658 
8659 	/* Network configuration changed -- force [re]association */
8660 	IPW_DEBUG_ASSOC("[re]association triggered due to channel change.\n");
8661 	if (!ipw_disassociate(priv))
8662 		ipw_associate(priv);
8663 
8664 	return 0;
8665 }
8666 
8667 static int ipw_wx_set_freq(struct net_device *dev,
8668 			   struct iw_request_info *info,
8669 			   union iwreq_data *wrqu, char *extra)
8670 {
8671 	struct ipw_priv *priv = libipw_priv(dev);
8672 	const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
8673 	struct iw_freq *fwrq = &wrqu->freq;
8674 	int ret = 0, i;
8675 	u8 channel, flags;
8676 	int band;
8677 
8678 	if (fwrq->m == 0) {
8679 		IPW_DEBUG_WX("SET Freq/Channel -> any\n");
8680 		mutex_lock(&priv->mutex);
8681 		ret = ipw_set_channel(priv, 0);
8682 		mutex_unlock(&priv->mutex);
8683 		return ret;
8684 	}
8685 	/* if setting by freq convert to channel */
8686 	if (fwrq->e == 1) {
8687 		channel = libipw_freq_to_channel(priv->ieee, fwrq->m);
8688 		if (channel == 0)
8689 			return -EINVAL;
8690 	} else
8691 		channel = fwrq->m;
8692 
8693 	if (!(band = libipw_is_valid_channel(priv->ieee, channel)))
8694 		return -EINVAL;
8695 
8696 	if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
8697 		i = libipw_channel_to_index(priv->ieee, channel);
8698 		if (i == -1)
8699 			return -EINVAL;
8700 
8701 		flags = (band == LIBIPW_24GHZ_BAND) ?
8702 		    geo->bg[i].flags : geo->a[i].flags;
8703 		if (flags & LIBIPW_CH_PASSIVE_ONLY) {
8704 			IPW_DEBUG_WX("Invalid Ad-Hoc channel for 802.11a\n");
8705 			return -EINVAL;
8706 		}
8707 	}
8708 
8709 	IPW_DEBUG_WX("SET Freq/Channel -> %d\n", fwrq->m);
8710 	mutex_lock(&priv->mutex);
8711 	ret = ipw_set_channel(priv, channel);
8712 	mutex_unlock(&priv->mutex);
8713 	return ret;
8714 }
8715 
8716 static int ipw_wx_get_freq(struct net_device *dev,
8717 			   struct iw_request_info *info,
8718 			   union iwreq_data *wrqu, char *extra)
8719 {
8720 	struct ipw_priv *priv = libipw_priv(dev);
8721 
8722 	wrqu->freq.e = 0;
8723 
8724 	/* If we are associated, trying to associate, or have a statically
8725 	 * configured CHANNEL then return that; otherwise return ANY */
8726 	mutex_lock(&priv->mutex);
8727 	if (priv->config & CFG_STATIC_CHANNEL ||
8728 	    priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED)) {
8729 		int i;
8730 
8731 		i = libipw_channel_to_index(priv->ieee, priv->channel);
8732 		BUG_ON(i == -1);
8733 		wrqu->freq.e = 1;
8734 
8735 		switch (libipw_is_valid_channel(priv->ieee, priv->channel)) {
8736 		case LIBIPW_52GHZ_BAND:
8737 			wrqu->freq.m = priv->ieee->geo.a[i].freq * 100000;
8738 			break;
8739 
8740 		case LIBIPW_24GHZ_BAND:
8741 			wrqu->freq.m = priv->ieee->geo.bg[i].freq * 100000;
8742 			break;
8743 
8744 		default:
8745 			BUG();
8746 		}
8747 	} else
8748 		wrqu->freq.m = 0;
8749 
8750 	mutex_unlock(&priv->mutex);
8751 	IPW_DEBUG_WX("GET Freq/Channel -> %d\n", priv->channel);
8752 	return 0;
8753 }
8754 
8755 static int ipw_wx_set_mode(struct net_device *dev,
8756 			   struct iw_request_info *info,
8757 			   union iwreq_data *wrqu, char *extra)
8758 {
8759 	struct ipw_priv *priv = libipw_priv(dev);
8760 	int err = 0;
8761 
8762 	IPW_DEBUG_WX("Set MODE: %d\n", wrqu->mode);
8763 
8764 	switch (wrqu->mode) {
8765 #ifdef CONFIG_IPW2200_MONITOR
8766 	case IW_MODE_MONITOR:
8767 #endif
8768 	case IW_MODE_ADHOC:
8769 	case IW_MODE_INFRA:
8770 		break;
8771 	case IW_MODE_AUTO:
8772 		wrqu->mode = IW_MODE_INFRA;
8773 		break;
8774 	default:
8775 		return -EINVAL;
8776 	}
8777 	if (wrqu->mode == priv->ieee->iw_mode)
8778 		return 0;
8779 
8780 	mutex_lock(&priv->mutex);
8781 
8782 	ipw_sw_reset(priv, 0);
8783 
8784 #ifdef CONFIG_IPW2200_MONITOR
8785 	if (priv->ieee->iw_mode == IW_MODE_MONITOR)
8786 		priv->net_dev->type = ARPHRD_ETHER;
8787 
8788 	if (wrqu->mode == IW_MODE_MONITOR)
8789 #ifdef CONFIG_IPW2200_RADIOTAP
8790 		priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8791 #else
8792 		priv->net_dev->type = ARPHRD_IEEE80211;
8793 #endif
8794 #endif				/* CONFIG_IPW2200_MONITOR */
8795 
8796 	/* Free the existing firmware and reset the fw_loaded
8797 	 * flag so ipw_load() will bring in the new firmware */
8798 	free_firmware();
8799 
8800 	priv->ieee->iw_mode = wrqu->mode;
8801 
8802 	schedule_work(&priv->adapter_restart);
8803 	mutex_unlock(&priv->mutex);
8804 	return err;
8805 }
8806 
8807 static int ipw_wx_get_mode(struct net_device *dev,
8808 			   struct iw_request_info *info,
8809 			   union iwreq_data *wrqu, char *extra)
8810 {
8811 	struct ipw_priv *priv = libipw_priv(dev);
8812 	mutex_lock(&priv->mutex);
8813 	wrqu->mode = priv->ieee->iw_mode;
8814 	IPW_DEBUG_WX("Get MODE -> %d\n", wrqu->mode);
8815 	mutex_unlock(&priv->mutex);
8816 	return 0;
8817 }
8818 
8819 /* Values are in microsecond */
8820 static const s32 timeout_duration[] = {
8821 	350000,
8822 	250000,
8823 	75000,
8824 	37000,
8825 	25000,
8826 };
8827 
8828 static const s32 period_duration[] = {
8829 	400000,
8830 	700000,
8831 	1000000,
8832 	1000000,
8833 	1000000
8834 };
8835 
8836 static int ipw_wx_get_range(struct net_device *dev,
8837 			    struct iw_request_info *info,
8838 			    union iwreq_data *wrqu, char *extra)
8839 {
8840 	struct ipw_priv *priv = libipw_priv(dev);
8841 	struct iw_range *range = (struct iw_range *)extra;
8842 	const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
8843 	int i = 0, j;
8844 
8845 	wrqu->data.length = sizeof(*range);
8846 	memset(range, 0, sizeof(*range));
8847 
8848 	/* 54Mbs == ~27 Mb/s real (802.11g) */
8849 	range->throughput = 27 * 1000 * 1000;
8850 
8851 	range->max_qual.qual = 100;
8852 	/* TODO: Find real max RSSI and stick here */
8853 	range->max_qual.level = 0;
8854 	range->max_qual.noise = 0;
8855 	range->max_qual.updated = 7;	/* Updated all three */
8856 
8857 	range->avg_qual.qual = 70;
8858 	/* TODO: Find real 'good' to 'bad' threshold value for RSSI */
8859 	range->avg_qual.level = 0;	/* FIXME to real average level */
8860 	range->avg_qual.noise = 0;
8861 	range->avg_qual.updated = 7;	/* Updated all three */
8862 	mutex_lock(&priv->mutex);
8863 	range->num_bitrates = min(priv->rates.num_rates, (u8) IW_MAX_BITRATES);
8864 
8865 	for (i = 0; i < range->num_bitrates; i++)
8866 		range->bitrate[i] = (priv->rates.supported_rates[i] & 0x7F) *
8867 		    500000;
8868 
8869 	range->max_rts = DEFAULT_RTS_THRESHOLD;
8870 	range->min_frag = MIN_FRAG_THRESHOLD;
8871 	range->max_frag = MAX_FRAG_THRESHOLD;
8872 
8873 	range->encoding_size[0] = 5;
8874 	range->encoding_size[1] = 13;
8875 	range->num_encoding_sizes = 2;
8876 	range->max_encoding_tokens = WEP_KEYS;
8877 
8878 	/* Set the Wireless Extension versions */
8879 	range->we_version_compiled = WIRELESS_EXT;
8880 	range->we_version_source = 18;
8881 
8882 	i = 0;
8883 	if (priv->ieee->mode & (IEEE_B | IEEE_G)) {
8884 		for (j = 0; j < geo->bg_channels && i < IW_MAX_FREQUENCIES; j++) {
8885 			if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8886 			    (geo->bg[j].flags & LIBIPW_CH_PASSIVE_ONLY))
8887 				continue;
8888 
8889 			range->freq[i].i = geo->bg[j].channel;
8890 			range->freq[i].m = geo->bg[j].freq * 100000;
8891 			range->freq[i].e = 1;
8892 			i++;
8893 		}
8894 	}
8895 
8896 	if (priv->ieee->mode & IEEE_A) {
8897 		for (j = 0; j < geo->a_channels && i < IW_MAX_FREQUENCIES; j++) {
8898 			if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8899 			    (geo->a[j].flags & LIBIPW_CH_PASSIVE_ONLY))
8900 				continue;
8901 
8902 			range->freq[i].i = geo->a[j].channel;
8903 			range->freq[i].m = geo->a[j].freq * 100000;
8904 			range->freq[i].e = 1;
8905 			i++;
8906 		}
8907 	}
8908 
8909 	range->num_channels = i;
8910 	range->num_frequency = i;
8911 
8912 	mutex_unlock(&priv->mutex);
8913 
8914 	/* Event capability (kernel + driver) */
8915 	range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
8916 				IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) |
8917 				IW_EVENT_CAPA_MASK(SIOCGIWAP) |
8918 				IW_EVENT_CAPA_MASK(SIOCGIWSCAN));
8919 	range->event_capa[1] = IW_EVENT_CAPA_K_1;
8920 
8921 	range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
8922 		IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
8923 
8924 	range->scan_capa = IW_SCAN_CAPA_ESSID | IW_SCAN_CAPA_TYPE;
8925 
8926 	IPW_DEBUG_WX("GET Range\n");
8927 	return 0;
8928 }
8929 
8930 static int ipw_wx_set_wap(struct net_device *dev,
8931 			  struct iw_request_info *info,
8932 			  union iwreq_data *wrqu, char *extra)
8933 {
8934 	struct ipw_priv *priv = libipw_priv(dev);
8935 
8936 	if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
8937 		return -EINVAL;
8938 	mutex_lock(&priv->mutex);
8939 	if (is_broadcast_ether_addr(wrqu->ap_addr.sa_data) ||
8940 	    is_zero_ether_addr(wrqu->ap_addr.sa_data)) {
8941 		/* we disable mandatory BSSID association */
8942 		IPW_DEBUG_WX("Setting AP BSSID to ANY\n");
8943 		priv->config &= ~CFG_STATIC_BSSID;
8944 		IPW_DEBUG_ASSOC("Attempting to associate with new "
8945 				"parameters.\n");
8946 		ipw_associate(priv);
8947 		mutex_unlock(&priv->mutex);
8948 		return 0;
8949 	}
8950 
8951 	priv->config |= CFG_STATIC_BSSID;
8952 	if (ether_addr_equal(priv->bssid, wrqu->ap_addr.sa_data)) {
8953 		IPW_DEBUG_WX("BSSID set to current BSSID.\n");
8954 		mutex_unlock(&priv->mutex);
8955 		return 0;
8956 	}
8957 
8958 	IPW_DEBUG_WX("Setting mandatory BSSID to %pM\n",
8959 		     wrqu->ap_addr.sa_data);
8960 
8961 	memcpy(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN);
8962 
8963 	/* Network configuration changed -- force [re]association */
8964 	IPW_DEBUG_ASSOC("[re]association triggered due to BSSID change.\n");
8965 	if (!ipw_disassociate(priv))
8966 		ipw_associate(priv);
8967 
8968 	mutex_unlock(&priv->mutex);
8969 	return 0;
8970 }
8971 
8972 static int ipw_wx_get_wap(struct net_device *dev,
8973 			  struct iw_request_info *info,
8974 			  union iwreq_data *wrqu, char *extra)
8975 {
8976 	struct ipw_priv *priv = libipw_priv(dev);
8977 
8978 	/* If we are associated, trying to associate, or have a statically
8979 	 * configured BSSID then return that; otherwise return ANY */
8980 	mutex_lock(&priv->mutex);
8981 	if (priv->config & CFG_STATIC_BSSID ||
8982 	    priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
8983 		wrqu->ap_addr.sa_family = ARPHRD_ETHER;
8984 		memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN);
8985 	} else
8986 		eth_zero_addr(wrqu->ap_addr.sa_data);
8987 
8988 	IPW_DEBUG_WX("Getting WAP BSSID: %pM\n",
8989 		     wrqu->ap_addr.sa_data);
8990 	mutex_unlock(&priv->mutex);
8991 	return 0;
8992 }
8993 
8994 static int ipw_wx_set_essid(struct net_device *dev,
8995 			    struct iw_request_info *info,
8996 			    union iwreq_data *wrqu, char *extra)
8997 {
8998 	struct ipw_priv *priv = libipw_priv(dev);
8999         int length;
9000 
9001         mutex_lock(&priv->mutex);
9002 
9003         if (!wrqu->essid.flags)
9004         {
9005                 IPW_DEBUG_WX("Setting ESSID to ANY\n");
9006                 ipw_disassociate(priv);
9007                 priv->config &= ~CFG_STATIC_ESSID;
9008                 ipw_associate(priv);
9009                 mutex_unlock(&priv->mutex);
9010                 return 0;
9011         }
9012 
9013 	length = min((int)wrqu->essid.length, IW_ESSID_MAX_SIZE);
9014 
9015 	priv->config |= CFG_STATIC_ESSID;
9016 
9017 	if (priv->essid_len == length && !memcmp(priv->essid, extra, length)
9018 	    && (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))) {
9019 		IPW_DEBUG_WX("ESSID set to current ESSID.\n");
9020 		mutex_unlock(&priv->mutex);
9021 		return 0;
9022 	}
9023 
9024 	IPW_DEBUG_WX("Setting ESSID: '%*pE' (%d)\n", length, extra, length);
9025 
9026 	priv->essid_len = length;
9027 	memcpy(priv->essid, extra, priv->essid_len);
9028 
9029 	/* Network configuration changed -- force [re]association */
9030 	IPW_DEBUG_ASSOC("[re]association triggered due to ESSID change.\n");
9031 	if (!ipw_disassociate(priv))
9032 		ipw_associate(priv);
9033 
9034 	mutex_unlock(&priv->mutex);
9035 	return 0;
9036 }
9037 
9038 static int ipw_wx_get_essid(struct net_device *dev,
9039 			    struct iw_request_info *info,
9040 			    union iwreq_data *wrqu, char *extra)
9041 {
9042 	struct ipw_priv *priv = libipw_priv(dev);
9043 
9044 	/* If we are associated, trying to associate, or have a statically
9045 	 * configured ESSID then return that; otherwise return ANY */
9046 	mutex_lock(&priv->mutex);
9047 	if (priv->config & CFG_STATIC_ESSID ||
9048 	    priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
9049 		IPW_DEBUG_WX("Getting essid: '%*pE'\n",
9050 			     priv->essid_len, priv->essid);
9051 		memcpy(extra, priv->essid, priv->essid_len);
9052 		wrqu->essid.length = priv->essid_len;
9053 		wrqu->essid.flags = 1;	/* active */
9054 	} else {
9055 		IPW_DEBUG_WX("Getting essid: ANY\n");
9056 		wrqu->essid.length = 0;
9057 		wrqu->essid.flags = 0;	/* active */
9058 	}
9059 	mutex_unlock(&priv->mutex);
9060 	return 0;
9061 }
9062 
9063 static int ipw_wx_set_nick(struct net_device *dev,
9064 			   struct iw_request_info *info,
9065 			   union iwreq_data *wrqu, char *extra)
9066 {
9067 	struct ipw_priv *priv = libipw_priv(dev);
9068 
9069 	IPW_DEBUG_WX("Setting nick to '%s'\n", extra);
9070 	if (wrqu->data.length > IW_ESSID_MAX_SIZE)
9071 		return -E2BIG;
9072 	mutex_lock(&priv->mutex);
9073 	wrqu->data.length = min_t(size_t, wrqu->data.length, sizeof(priv->nick));
9074 	memset(priv->nick, 0, sizeof(priv->nick));
9075 	memcpy(priv->nick, extra, wrqu->data.length);
9076 	IPW_DEBUG_TRACE("<<\n");
9077 	mutex_unlock(&priv->mutex);
9078 	return 0;
9079 
9080 }
9081 
9082 static int ipw_wx_get_nick(struct net_device *dev,
9083 			   struct iw_request_info *info,
9084 			   union iwreq_data *wrqu, char *extra)
9085 {
9086 	struct ipw_priv *priv = libipw_priv(dev);
9087 	IPW_DEBUG_WX("Getting nick\n");
9088 	mutex_lock(&priv->mutex);
9089 	wrqu->data.length = strlen(priv->nick);
9090 	memcpy(extra, priv->nick, wrqu->data.length);
9091 	wrqu->data.flags = 1;	/* active */
9092 	mutex_unlock(&priv->mutex);
9093 	return 0;
9094 }
9095 
9096 static int ipw_wx_set_sens(struct net_device *dev,
9097 			    struct iw_request_info *info,
9098 			    union iwreq_data *wrqu, char *extra)
9099 {
9100 	struct ipw_priv *priv = libipw_priv(dev);
9101 	int err = 0;
9102 
9103 	IPW_DEBUG_WX("Setting roaming threshold to %d\n", wrqu->sens.value);
9104 	IPW_DEBUG_WX("Setting disassociate threshold to %d\n", 3*wrqu->sens.value);
9105 	mutex_lock(&priv->mutex);
9106 
9107 	if (wrqu->sens.fixed == 0)
9108 	{
9109 		priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
9110 		priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
9111 		goto out;
9112 	}
9113 	if ((wrqu->sens.value > IPW_MB_ROAMING_THRESHOLD_MAX) ||
9114 	    (wrqu->sens.value < IPW_MB_ROAMING_THRESHOLD_MIN)) {
9115 		err = -EINVAL;
9116 		goto out;
9117 	}
9118 
9119 	priv->roaming_threshold = wrqu->sens.value;
9120 	priv->disassociate_threshold = 3*wrqu->sens.value;
9121       out:
9122 	mutex_unlock(&priv->mutex);
9123 	return err;
9124 }
9125 
9126 static int ipw_wx_get_sens(struct net_device *dev,
9127 			    struct iw_request_info *info,
9128 			    union iwreq_data *wrqu, char *extra)
9129 {
9130 	struct ipw_priv *priv = libipw_priv(dev);
9131 	mutex_lock(&priv->mutex);
9132 	wrqu->sens.fixed = 1;
9133 	wrqu->sens.value = priv->roaming_threshold;
9134 	mutex_unlock(&priv->mutex);
9135 
9136 	IPW_DEBUG_WX("GET roaming threshold -> %s %d\n",
9137 		     wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9138 
9139 	return 0;
9140 }
9141 
9142 static int ipw_wx_set_rate(struct net_device *dev,
9143 			   struct iw_request_info *info,
9144 			   union iwreq_data *wrqu, char *extra)
9145 {
9146 	/* TODO: We should use semaphores or locks for access to priv */
9147 	struct ipw_priv *priv = libipw_priv(dev);
9148 	u32 target_rate = wrqu->bitrate.value;
9149 	u32 fixed, mask;
9150 
9151 	/* value = -1, fixed = 0 means auto only, so we should use all rates offered by AP */
9152 	/* value = X, fixed = 1 means only rate X */
9153 	/* value = X, fixed = 0 means all rates lower equal X */
9154 
9155 	if (target_rate == -1) {
9156 		fixed = 0;
9157 		mask = LIBIPW_DEFAULT_RATES_MASK;
9158 		/* Now we should reassociate */
9159 		goto apply;
9160 	}
9161 
9162 	mask = 0;
9163 	fixed = wrqu->bitrate.fixed;
9164 
9165 	if (target_rate == 1000000 || !fixed)
9166 		mask |= LIBIPW_CCK_RATE_1MB_MASK;
9167 	if (target_rate == 1000000)
9168 		goto apply;
9169 
9170 	if (target_rate == 2000000 || !fixed)
9171 		mask |= LIBIPW_CCK_RATE_2MB_MASK;
9172 	if (target_rate == 2000000)
9173 		goto apply;
9174 
9175 	if (target_rate == 5500000 || !fixed)
9176 		mask |= LIBIPW_CCK_RATE_5MB_MASK;
9177 	if (target_rate == 5500000)
9178 		goto apply;
9179 
9180 	if (target_rate == 6000000 || !fixed)
9181 		mask |= LIBIPW_OFDM_RATE_6MB_MASK;
9182 	if (target_rate == 6000000)
9183 		goto apply;
9184 
9185 	if (target_rate == 9000000 || !fixed)
9186 		mask |= LIBIPW_OFDM_RATE_9MB_MASK;
9187 	if (target_rate == 9000000)
9188 		goto apply;
9189 
9190 	if (target_rate == 11000000 || !fixed)
9191 		mask |= LIBIPW_CCK_RATE_11MB_MASK;
9192 	if (target_rate == 11000000)
9193 		goto apply;
9194 
9195 	if (target_rate == 12000000 || !fixed)
9196 		mask |= LIBIPW_OFDM_RATE_12MB_MASK;
9197 	if (target_rate == 12000000)
9198 		goto apply;
9199 
9200 	if (target_rate == 18000000 || !fixed)
9201 		mask |= LIBIPW_OFDM_RATE_18MB_MASK;
9202 	if (target_rate == 18000000)
9203 		goto apply;
9204 
9205 	if (target_rate == 24000000 || !fixed)
9206 		mask |= LIBIPW_OFDM_RATE_24MB_MASK;
9207 	if (target_rate == 24000000)
9208 		goto apply;
9209 
9210 	if (target_rate == 36000000 || !fixed)
9211 		mask |= LIBIPW_OFDM_RATE_36MB_MASK;
9212 	if (target_rate == 36000000)
9213 		goto apply;
9214 
9215 	if (target_rate == 48000000 || !fixed)
9216 		mask |= LIBIPW_OFDM_RATE_48MB_MASK;
9217 	if (target_rate == 48000000)
9218 		goto apply;
9219 
9220 	if (target_rate == 54000000 || !fixed)
9221 		mask |= LIBIPW_OFDM_RATE_54MB_MASK;
9222 	if (target_rate == 54000000)
9223 		goto apply;
9224 
9225 	IPW_DEBUG_WX("invalid rate specified, returning error\n");
9226 	return -EINVAL;
9227 
9228       apply:
9229 	IPW_DEBUG_WX("Setting rate mask to 0x%08X [%s]\n",
9230 		     mask, fixed ? "fixed" : "sub-rates");
9231 	mutex_lock(&priv->mutex);
9232 	if (mask == LIBIPW_DEFAULT_RATES_MASK) {
9233 		priv->config &= ~CFG_FIXED_RATE;
9234 		ipw_set_fixed_rate(priv, priv->ieee->mode);
9235 	} else
9236 		priv->config |= CFG_FIXED_RATE;
9237 
9238 	if (priv->rates_mask == mask) {
9239 		IPW_DEBUG_WX("Mask set to current mask.\n");
9240 		mutex_unlock(&priv->mutex);
9241 		return 0;
9242 	}
9243 
9244 	priv->rates_mask = mask;
9245 
9246 	/* Network configuration changed -- force [re]association */
9247 	IPW_DEBUG_ASSOC("[re]association triggered due to rates change.\n");
9248 	if (!ipw_disassociate(priv))
9249 		ipw_associate(priv);
9250 
9251 	mutex_unlock(&priv->mutex);
9252 	return 0;
9253 }
9254 
9255 static int ipw_wx_get_rate(struct net_device *dev,
9256 			   struct iw_request_info *info,
9257 			   union iwreq_data *wrqu, char *extra)
9258 {
9259 	struct ipw_priv *priv = libipw_priv(dev);
9260 	mutex_lock(&priv->mutex);
9261 	wrqu->bitrate.value = priv->last_rate;
9262 	wrqu->bitrate.fixed = (priv->config & CFG_FIXED_RATE) ? 1 : 0;
9263 	mutex_unlock(&priv->mutex);
9264 	IPW_DEBUG_WX("GET Rate -> %d\n", wrqu->bitrate.value);
9265 	return 0;
9266 }
9267 
9268 static int ipw_wx_set_rts(struct net_device *dev,
9269 			  struct iw_request_info *info,
9270 			  union iwreq_data *wrqu, char *extra)
9271 {
9272 	struct ipw_priv *priv = libipw_priv(dev);
9273 	mutex_lock(&priv->mutex);
9274 	if (wrqu->rts.disabled || !wrqu->rts.fixed)
9275 		priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
9276 	else {
9277 		if (wrqu->rts.value < MIN_RTS_THRESHOLD ||
9278 		    wrqu->rts.value > MAX_RTS_THRESHOLD) {
9279 			mutex_unlock(&priv->mutex);
9280 			return -EINVAL;
9281 		}
9282 		priv->rts_threshold = wrqu->rts.value;
9283 	}
9284 
9285 	ipw_send_rts_threshold(priv, priv->rts_threshold);
9286 	mutex_unlock(&priv->mutex);
9287 	IPW_DEBUG_WX("SET RTS Threshold -> %d\n", priv->rts_threshold);
9288 	return 0;
9289 }
9290 
9291 static int ipw_wx_get_rts(struct net_device *dev,
9292 			  struct iw_request_info *info,
9293 			  union iwreq_data *wrqu, char *extra)
9294 {
9295 	struct ipw_priv *priv = libipw_priv(dev);
9296 	mutex_lock(&priv->mutex);
9297 	wrqu->rts.value = priv->rts_threshold;
9298 	wrqu->rts.fixed = 0;	/* no auto select */
9299 	wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
9300 	mutex_unlock(&priv->mutex);
9301 	IPW_DEBUG_WX("GET RTS Threshold -> %d\n", wrqu->rts.value);
9302 	return 0;
9303 }
9304 
9305 static int ipw_wx_set_txpow(struct net_device *dev,
9306 			    struct iw_request_info *info,
9307 			    union iwreq_data *wrqu, char *extra)
9308 {
9309 	struct ipw_priv *priv = libipw_priv(dev);
9310 	int err = 0;
9311 
9312 	mutex_lock(&priv->mutex);
9313 	if (ipw_radio_kill_sw(priv, wrqu->power.disabled)) {
9314 		err = -EINPROGRESS;
9315 		goto out;
9316 	}
9317 
9318 	if (!wrqu->power.fixed)
9319 		wrqu->power.value = IPW_TX_POWER_DEFAULT;
9320 
9321 	if (wrqu->power.flags != IW_TXPOW_DBM) {
9322 		err = -EINVAL;
9323 		goto out;
9324 	}
9325 
9326 	if ((wrqu->power.value > IPW_TX_POWER_MAX) ||
9327 	    (wrqu->power.value < IPW_TX_POWER_MIN)) {
9328 		err = -EINVAL;
9329 		goto out;
9330 	}
9331 
9332 	priv->tx_power = wrqu->power.value;
9333 	err = ipw_set_tx_power(priv);
9334       out:
9335 	mutex_unlock(&priv->mutex);
9336 	return err;
9337 }
9338 
9339 static int ipw_wx_get_txpow(struct net_device *dev,
9340 			    struct iw_request_info *info,
9341 			    union iwreq_data *wrqu, char *extra)
9342 {
9343 	struct ipw_priv *priv = libipw_priv(dev);
9344 	mutex_lock(&priv->mutex);
9345 	wrqu->power.value = priv->tx_power;
9346 	wrqu->power.fixed = 1;
9347 	wrqu->power.flags = IW_TXPOW_DBM;
9348 	wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0;
9349 	mutex_unlock(&priv->mutex);
9350 
9351 	IPW_DEBUG_WX("GET TX Power -> %s %d\n",
9352 		     wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9353 
9354 	return 0;
9355 }
9356 
9357 static int ipw_wx_set_frag(struct net_device *dev,
9358 			   struct iw_request_info *info,
9359 			   union iwreq_data *wrqu, char *extra)
9360 {
9361 	struct ipw_priv *priv = libipw_priv(dev);
9362 	mutex_lock(&priv->mutex);
9363 	if (wrqu->frag.disabled || !wrqu->frag.fixed)
9364 		priv->ieee->fts = DEFAULT_FTS;
9365 	else {
9366 		if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
9367 		    wrqu->frag.value > MAX_FRAG_THRESHOLD) {
9368 			mutex_unlock(&priv->mutex);
9369 			return -EINVAL;
9370 		}
9371 
9372 		priv->ieee->fts = wrqu->frag.value & ~0x1;
9373 	}
9374 
9375 	ipw_send_frag_threshold(priv, wrqu->frag.value);
9376 	mutex_unlock(&priv->mutex);
9377 	IPW_DEBUG_WX("SET Frag Threshold -> %d\n", wrqu->frag.value);
9378 	return 0;
9379 }
9380 
9381 static int ipw_wx_get_frag(struct net_device *dev,
9382 			   struct iw_request_info *info,
9383 			   union iwreq_data *wrqu, char *extra)
9384 {
9385 	struct ipw_priv *priv = libipw_priv(dev);
9386 	mutex_lock(&priv->mutex);
9387 	wrqu->frag.value = priv->ieee->fts;
9388 	wrqu->frag.fixed = 0;	/* no auto select */
9389 	wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS);
9390 	mutex_unlock(&priv->mutex);
9391 	IPW_DEBUG_WX("GET Frag Threshold -> %d\n", wrqu->frag.value);
9392 
9393 	return 0;
9394 }
9395 
9396 static int ipw_wx_set_retry(struct net_device *dev,
9397 			    struct iw_request_info *info,
9398 			    union iwreq_data *wrqu, char *extra)
9399 {
9400 	struct ipw_priv *priv = libipw_priv(dev);
9401 
9402 	if (wrqu->retry.flags & IW_RETRY_LIFETIME || wrqu->retry.disabled)
9403 		return -EINVAL;
9404 
9405 	if (!(wrqu->retry.flags & IW_RETRY_LIMIT))
9406 		return 0;
9407 
9408 	if (wrqu->retry.value < 0 || wrqu->retry.value >= 255)
9409 		return -EINVAL;
9410 
9411 	mutex_lock(&priv->mutex);
9412 	if (wrqu->retry.flags & IW_RETRY_SHORT)
9413 		priv->short_retry_limit = (u8) wrqu->retry.value;
9414 	else if (wrqu->retry.flags & IW_RETRY_LONG)
9415 		priv->long_retry_limit = (u8) wrqu->retry.value;
9416 	else {
9417 		priv->short_retry_limit = (u8) wrqu->retry.value;
9418 		priv->long_retry_limit = (u8) wrqu->retry.value;
9419 	}
9420 
9421 	ipw_send_retry_limit(priv, priv->short_retry_limit,
9422 			     priv->long_retry_limit);
9423 	mutex_unlock(&priv->mutex);
9424 	IPW_DEBUG_WX("SET retry limit -> short:%d long:%d\n",
9425 		     priv->short_retry_limit, priv->long_retry_limit);
9426 	return 0;
9427 }
9428 
9429 static int ipw_wx_get_retry(struct net_device *dev,
9430 			    struct iw_request_info *info,
9431 			    union iwreq_data *wrqu, char *extra)
9432 {
9433 	struct ipw_priv *priv = libipw_priv(dev);
9434 
9435 	mutex_lock(&priv->mutex);
9436 	wrqu->retry.disabled = 0;
9437 
9438 	if ((wrqu->retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
9439 		mutex_unlock(&priv->mutex);
9440 		return -EINVAL;
9441 	}
9442 
9443 	if (wrqu->retry.flags & IW_RETRY_LONG) {
9444 		wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_LONG;
9445 		wrqu->retry.value = priv->long_retry_limit;
9446 	} else if (wrqu->retry.flags & IW_RETRY_SHORT) {
9447 		wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_SHORT;
9448 		wrqu->retry.value = priv->short_retry_limit;
9449 	} else {
9450 		wrqu->retry.flags = IW_RETRY_LIMIT;
9451 		wrqu->retry.value = priv->short_retry_limit;
9452 	}
9453 	mutex_unlock(&priv->mutex);
9454 
9455 	IPW_DEBUG_WX("GET retry -> %d\n", wrqu->retry.value);
9456 
9457 	return 0;
9458 }
9459 
9460 static int ipw_wx_set_scan(struct net_device *dev,
9461 			   struct iw_request_info *info,
9462 			   union iwreq_data *wrqu, char *extra)
9463 {
9464 	struct ipw_priv *priv = libipw_priv(dev);
9465 	struct iw_scan_req *req = (struct iw_scan_req *)extra;
9466 	struct delayed_work *work = NULL;
9467 
9468 	mutex_lock(&priv->mutex);
9469 
9470 	priv->user_requested_scan = 1;
9471 
9472 	if (wrqu->data.length == sizeof(struct iw_scan_req)) {
9473 		if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
9474 			int len = min((int)req->essid_len,
9475 			              (int)sizeof(priv->direct_scan_ssid));
9476 			memcpy(priv->direct_scan_ssid, req->essid, len);
9477 			priv->direct_scan_ssid_len = len;
9478 			work = &priv->request_direct_scan;
9479 		} else if (req->scan_type == IW_SCAN_TYPE_PASSIVE) {
9480 			work = &priv->request_passive_scan;
9481 		}
9482 	} else {
9483 		/* Normal active broadcast scan */
9484 		work = &priv->request_scan;
9485 	}
9486 
9487 	mutex_unlock(&priv->mutex);
9488 
9489 	IPW_DEBUG_WX("Start scan\n");
9490 
9491 	schedule_delayed_work(work, 0);
9492 
9493 	return 0;
9494 }
9495 
9496 static int ipw_wx_get_scan(struct net_device *dev,
9497 			   struct iw_request_info *info,
9498 			   union iwreq_data *wrqu, char *extra)
9499 {
9500 	struct ipw_priv *priv = libipw_priv(dev);
9501 	return libipw_wx_get_scan(priv->ieee, info, wrqu, extra);
9502 }
9503 
9504 static int ipw_wx_set_encode(struct net_device *dev,
9505 			     struct iw_request_info *info,
9506 			     union iwreq_data *wrqu, char *key)
9507 {
9508 	struct ipw_priv *priv = libipw_priv(dev);
9509 	int ret;
9510 	u32 cap = priv->capability;
9511 
9512 	mutex_lock(&priv->mutex);
9513 	ret = libipw_wx_set_encode(priv->ieee, info, wrqu, key);
9514 
9515 	/* In IBSS mode, we need to notify the firmware to update
9516 	 * the beacon info after we changed the capability. */
9517 	if (cap != priv->capability &&
9518 	    priv->ieee->iw_mode == IW_MODE_ADHOC &&
9519 	    priv->status & STATUS_ASSOCIATED)
9520 		ipw_disassociate(priv);
9521 
9522 	mutex_unlock(&priv->mutex);
9523 	return ret;
9524 }
9525 
9526 static int ipw_wx_get_encode(struct net_device *dev,
9527 			     struct iw_request_info *info,
9528 			     union iwreq_data *wrqu, char *key)
9529 {
9530 	struct ipw_priv *priv = libipw_priv(dev);
9531 	return libipw_wx_get_encode(priv->ieee, info, wrqu, key);
9532 }
9533 
9534 static int ipw_wx_set_power(struct net_device *dev,
9535 			    struct iw_request_info *info,
9536 			    union iwreq_data *wrqu, char *extra)
9537 {
9538 	struct ipw_priv *priv = libipw_priv(dev);
9539 	int err;
9540 	mutex_lock(&priv->mutex);
9541 	if (wrqu->power.disabled) {
9542 		priv->power_mode = IPW_POWER_LEVEL(priv->power_mode);
9543 		err = ipw_send_power_mode(priv, IPW_POWER_MODE_CAM);
9544 		if (err) {
9545 			IPW_DEBUG_WX("failed setting power mode.\n");
9546 			mutex_unlock(&priv->mutex);
9547 			return err;
9548 		}
9549 		IPW_DEBUG_WX("SET Power Management Mode -> off\n");
9550 		mutex_unlock(&priv->mutex);
9551 		return 0;
9552 	}
9553 
9554 	switch (wrqu->power.flags & IW_POWER_MODE) {
9555 	case IW_POWER_ON:	/* If not specified */
9556 	case IW_POWER_MODE:	/* If set all mask */
9557 	case IW_POWER_ALL_R:	/* If explicitly state all */
9558 		break;
9559 	default:		/* Otherwise we don't support it */
9560 		IPW_DEBUG_WX("SET PM Mode: %X not supported.\n",
9561 			     wrqu->power.flags);
9562 		mutex_unlock(&priv->mutex);
9563 		return -EOPNOTSUPP;
9564 	}
9565 
9566 	/* If the user hasn't specified a power management mode yet, default
9567 	 * to BATTERY */
9568 	if (IPW_POWER_LEVEL(priv->power_mode) == IPW_POWER_AC)
9569 		priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY;
9570 	else
9571 		priv->power_mode = IPW_POWER_ENABLED | priv->power_mode;
9572 
9573 	err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode));
9574 	if (err) {
9575 		IPW_DEBUG_WX("failed setting power mode.\n");
9576 		mutex_unlock(&priv->mutex);
9577 		return err;
9578 	}
9579 
9580 	IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode);
9581 	mutex_unlock(&priv->mutex);
9582 	return 0;
9583 }
9584 
9585 static int ipw_wx_get_power(struct net_device *dev,
9586 			    struct iw_request_info *info,
9587 			    union iwreq_data *wrqu, char *extra)
9588 {
9589 	struct ipw_priv *priv = libipw_priv(dev);
9590 	mutex_lock(&priv->mutex);
9591 	if (!(priv->power_mode & IPW_POWER_ENABLED))
9592 		wrqu->power.disabled = 1;
9593 	else
9594 		wrqu->power.disabled = 0;
9595 
9596 	mutex_unlock(&priv->mutex);
9597 	IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode);
9598 
9599 	return 0;
9600 }
9601 
9602 static int ipw_wx_set_powermode(struct net_device *dev,
9603 				struct iw_request_info *info,
9604 				union iwreq_data *wrqu, char *extra)
9605 {
9606 	struct ipw_priv *priv = libipw_priv(dev);
9607 	int mode = *(int *)extra;
9608 	int err;
9609 
9610 	mutex_lock(&priv->mutex);
9611 	if ((mode < 1) || (mode > IPW_POWER_LIMIT))
9612 		mode = IPW_POWER_AC;
9613 
9614 	if (IPW_POWER_LEVEL(priv->power_mode) != mode) {
9615 		err = ipw_send_power_mode(priv, mode);
9616 		if (err) {
9617 			IPW_DEBUG_WX("failed setting power mode.\n");
9618 			mutex_unlock(&priv->mutex);
9619 			return err;
9620 		}
9621 		priv->power_mode = IPW_POWER_ENABLED | mode;
9622 	}
9623 	mutex_unlock(&priv->mutex);
9624 	return 0;
9625 }
9626 
9627 #define MAX_WX_STRING 80
9628 static int ipw_wx_get_powermode(struct net_device *dev,
9629 				struct iw_request_info *info,
9630 				union iwreq_data *wrqu, char *extra)
9631 {
9632 	struct ipw_priv *priv = libipw_priv(dev);
9633 	int level = IPW_POWER_LEVEL(priv->power_mode);
9634 	char *p = extra;
9635 
9636 	p += snprintf(p, MAX_WX_STRING, "Power save level: %d ", level);
9637 
9638 	switch (level) {
9639 	case IPW_POWER_AC:
9640 		p += snprintf(p, MAX_WX_STRING - (p - extra), "(AC)");
9641 		break;
9642 	case IPW_POWER_BATTERY:
9643 		p += snprintf(p, MAX_WX_STRING - (p - extra), "(BATTERY)");
9644 		break;
9645 	default:
9646 		p += snprintf(p, MAX_WX_STRING - (p - extra),
9647 			      "(Timeout %dms, Period %dms)",
9648 			      timeout_duration[level - 1] / 1000,
9649 			      period_duration[level - 1] / 1000);
9650 	}
9651 
9652 	if (!(priv->power_mode & IPW_POWER_ENABLED))
9653 		p += snprintf(p, MAX_WX_STRING - (p - extra), " OFF");
9654 
9655 	wrqu->data.length = p - extra + 1;
9656 
9657 	return 0;
9658 }
9659 
9660 static int ipw_wx_set_wireless_mode(struct net_device *dev,
9661 				    struct iw_request_info *info,
9662 				    union iwreq_data *wrqu, char *extra)
9663 {
9664 	struct ipw_priv *priv = libipw_priv(dev);
9665 	int mode = *(int *)extra;
9666 	u8 band = 0, modulation = 0;
9667 
9668 	if (mode == 0 || mode & ~IEEE_MODE_MASK) {
9669 		IPW_WARNING("Attempt to set invalid wireless mode: %d\n", mode);
9670 		return -EINVAL;
9671 	}
9672 	mutex_lock(&priv->mutex);
9673 	if (priv->adapter == IPW_2915ABG) {
9674 		priv->ieee->abg_true = 1;
9675 		if (mode & IEEE_A) {
9676 			band |= LIBIPW_52GHZ_BAND;
9677 			modulation |= LIBIPW_OFDM_MODULATION;
9678 		} else
9679 			priv->ieee->abg_true = 0;
9680 	} else {
9681 		if (mode & IEEE_A) {
9682 			IPW_WARNING("Attempt to set 2200BG into "
9683 				    "802.11a mode\n");
9684 			mutex_unlock(&priv->mutex);
9685 			return -EINVAL;
9686 		}
9687 
9688 		priv->ieee->abg_true = 0;
9689 	}
9690 
9691 	if (mode & IEEE_B) {
9692 		band |= LIBIPW_24GHZ_BAND;
9693 		modulation |= LIBIPW_CCK_MODULATION;
9694 	} else
9695 		priv->ieee->abg_true = 0;
9696 
9697 	if (mode & IEEE_G) {
9698 		band |= LIBIPW_24GHZ_BAND;
9699 		modulation |= LIBIPW_OFDM_MODULATION;
9700 	} else
9701 		priv->ieee->abg_true = 0;
9702 
9703 	priv->ieee->mode = mode;
9704 	priv->ieee->freq_band = band;
9705 	priv->ieee->modulation = modulation;
9706 	init_supported_rates(priv, &priv->rates);
9707 
9708 	/* Network configuration changed -- force [re]association */
9709 	IPW_DEBUG_ASSOC("[re]association triggered due to mode change.\n");
9710 	if (!ipw_disassociate(priv)) {
9711 		ipw_send_supported_rates(priv, &priv->rates);
9712 		ipw_associate(priv);
9713 	}
9714 
9715 	/* Update the band LEDs */
9716 	ipw_led_band_on(priv);
9717 
9718 	IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n",
9719 		     mode & IEEE_A ? 'a' : '.',
9720 		     mode & IEEE_B ? 'b' : '.', mode & IEEE_G ? 'g' : '.');
9721 	mutex_unlock(&priv->mutex);
9722 	return 0;
9723 }
9724 
9725 static int ipw_wx_get_wireless_mode(struct net_device *dev,
9726 				    struct iw_request_info *info,
9727 				    union iwreq_data *wrqu, char *extra)
9728 {
9729 	struct ipw_priv *priv = libipw_priv(dev);
9730 	mutex_lock(&priv->mutex);
9731 	switch (priv->ieee->mode) {
9732 	case IEEE_A:
9733 		strncpy(extra, "802.11a (1)", MAX_WX_STRING);
9734 		break;
9735 	case IEEE_B:
9736 		strncpy(extra, "802.11b (2)", MAX_WX_STRING);
9737 		break;
9738 	case IEEE_A | IEEE_B:
9739 		strncpy(extra, "802.11ab (3)", MAX_WX_STRING);
9740 		break;
9741 	case IEEE_G:
9742 		strncpy(extra, "802.11g (4)", MAX_WX_STRING);
9743 		break;
9744 	case IEEE_A | IEEE_G:
9745 		strncpy(extra, "802.11ag (5)", MAX_WX_STRING);
9746 		break;
9747 	case IEEE_B | IEEE_G:
9748 		strncpy(extra, "802.11bg (6)", MAX_WX_STRING);
9749 		break;
9750 	case IEEE_A | IEEE_B | IEEE_G:
9751 		strncpy(extra, "802.11abg (7)", MAX_WX_STRING);
9752 		break;
9753 	default:
9754 		strncpy(extra, "unknown", MAX_WX_STRING);
9755 		break;
9756 	}
9757 	extra[MAX_WX_STRING - 1] = '\0';
9758 
9759 	IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra);
9760 
9761 	wrqu->data.length = strlen(extra) + 1;
9762 	mutex_unlock(&priv->mutex);
9763 
9764 	return 0;
9765 }
9766 
9767 static int ipw_wx_set_preamble(struct net_device *dev,
9768 			       struct iw_request_info *info,
9769 			       union iwreq_data *wrqu, char *extra)
9770 {
9771 	struct ipw_priv *priv = libipw_priv(dev);
9772 	int mode = *(int *)extra;
9773 	mutex_lock(&priv->mutex);
9774 	/* Switching from SHORT -> LONG requires a disassociation */
9775 	if (mode == 1) {
9776 		if (!(priv->config & CFG_PREAMBLE_LONG)) {
9777 			priv->config |= CFG_PREAMBLE_LONG;
9778 
9779 			/* Network configuration changed -- force [re]association */
9780 			IPW_DEBUG_ASSOC
9781 			    ("[re]association triggered due to preamble change.\n");
9782 			if (!ipw_disassociate(priv))
9783 				ipw_associate(priv);
9784 		}
9785 		goto done;
9786 	}
9787 
9788 	if (mode == 0) {
9789 		priv->config &= ~CFG_PREAMBLE_LONG;
9790 		goto done;
9791 	}
9792 	mutex_unlock(&priv->mutex);
9793 	return -EINVAL;
9794 
9795       done:
9796 	mutex_unlock(&priv->mutex);
9797 	return 0;
9798 }
9799 
9800 static int ipw_wx_get_preamble(struct net_device *dev,
9801 			       struct iw_request_info *info,
9802 			       union iwreq_data *wrqu, char *extra)
9803 {
9804 	struct ipw_priv *priv = libipw_priv(dev);
9805 	mutex_lock(&priv->mutex);
9806 	if (priv->config & CFG_PREAMBLE_LONG)
9807 		snprintf(wrqu->name, IFNAMSIZ, "long (1)");
9808 	else
9809 		snprintf(wrqu->name, IFNAMSIZ, "auto (0)");
9810 	mutex_unlock(&priv->mutex);
9811 	return 0;
9812 }
9813 
9814 #ifdef CONFIG_IPW2200_MONITOR
9815 static int ipw_wx_set_monitor(struct net_device *dev,
9816 			      struct iw_request_info *info,
9817 			      union iwreq_data *wrqu, char *extra)
9818 {
9819 	struct ipw_priv *priv = libipw_priv(dev);
9820 	int *parms = (int *)extra;
9821 	int enable = (parms[0] > 0);
9822 	mutex_lock(&priv->mutex);
9823 	IPW_DEBUG_WX("SET MONITOR: %d %d\n", enable, parms[1]);
9824 	if (enable) {
9825 		if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9826 #ifdef CONFIG_IPW2200_RADIOTAP
9827 			priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
9828 #else
9829 			priv->net_dev->type = ARPHRD_IEEE80211;
9830 #endif
9831 			schedule_work(&priv->adapter_restart);
9832 		}
9833 
9834 		ipw_set_channel(priv, parms[1]);
9835 	} else {
9836 		if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9837 			mutex_unlock(&priv->mutex);
9838 			return 0;
9839 		}
9840 		priv->net_dev->type = ARPHRD_ETHER;
9841 		schedule_work(&priv->adapter_restart);
9842 	}
9843 	mutex_unlock(&priv->mutex);
9844 	return 0;
9845 }
9846 
9847 #endif				/* CONFIG_IPW2200_MONITOR */
9848 
9849 static int ipw_wx_reset(struct net_device *dev,
9850 			struct iw_request_info *info,
9851 			union iwreq_data *wrqu, char *extra)
9852 {
9853 	struct ipw_priv *priv = libipw_priv(dev);
9854 	IPW_DEBUG_WX("RESET\n");
9855 	schedule_work(&priv->adapter_restart);
9856 	return 0;
9857 }
9858 
9859 static int ipw_wx_sw_reset(struct net_device *dev,
9860 			   struct iw_request_info *info,
9861 			   union iwreq_data *wrqu, char *extra)
9862 {
9863 	struct ipw_priv *priv = libipw_priv(dev);
9864 	union iwreq_data wrqu_sec = {
9865 		.encoding = {
9866 			     .flags = IW_ENCODE_DISABLED,
9867 			     },
9868 	};
9869 	int ret;
9870 
9871 	IPW_DEBUG_WX("SW_RESET\n");
9872 
9873 	mutex_lock(&priv->mutex);
9874 
9875 	ret = ipw_sw_reset(priv, 2);
9876 	if (!ret) {
9877 		free_firmware();
9878 		ipw_adapter_restart(priv);
9879 	}
9880 
9881 	/* The SW reset bit might have been toggled on by the 'disable'
9882 	 * module parameter, so take appropriate action */
9883 	ipw_radio_kill_sw(priv, priv->status & STATUS_RF_KILL_SW);
9884 
9885 	mutex_unlock(&priv->mutex);
9886 	libipw_wx_set_encode(priv->ieee, info, &wrqu_sec, NULL);
9887 	mutex_lock(&priv->mutex);
9888 
9889 	if (!(priv->status & STATUS_RF_KILL_MASK)) {
9890 		/* Configuration likely changed -- force [re]association */
9891 		IPW_DEBUG_ASSOC("[re]association triggered due to sw "
9892 				"reset.\n");
9893 		if (!ipw_disassociate(priv))
9894 			ipw_associate(priv);
9895 	}
9896 
9897 	mutex_unlock(&priv->mutex);
9898 
9899 	return 0;
9900 }
9901 
9902 /* Rebase the WE IOCTLs to zero for the handler array */
9903 static iw_handler ipw_wx_handlers[] = {
9904 	IW_HANDLER(SIOCGIWNAME, (iw_handler)cfg80211_wext_giwname),
9905 	IW_HANDLER(SIOCSIWFREQ, ipw_wx_set_freq),
9906 	IW_HANDLER(SIOCGIWFREQ, ipw_wx_get_freq),
9907 	IW_HANDLER(SIOCSIWMODE, ipw_wx_set_mode),
9908 	IW_HANDLER(SIOCGIWMODE, ipw_wx_get_mode),
9909 	IW_HANDLER(SIOCSIWSENS, ipw_wx_set_sens),
9910 	IW_HANDLER(SIOCGIWSENS, ipw_wx_get_sens),
9911 	IW_HANDLER(SIOCGIWRANGE, ipw_wx_get_range),
9912 	IW_HANDLER(SIOCSIWAP, ipw_wx_set_wap),
9913 	IW_HANDLER(SIOCGIWAP, ipw_wx_get_wap),
9914 	IW_HANDLER(SIOCSIWSCAN, ipw_wx_set_scan),
9915 	IW_HANDLER(SIOCGIWSCAN, ipw_wx_get_scan),
9916 	IW_HANDLER(SIOCSIWESSID, ipw_wx_set_essid),
9917 	IW_HANDLER(SIOCGIWESSID, ipw_wx_get_essid),
9918 	IW_HANDLER(SIOCSIWNICKN, ipw_wx_set_nick),
9919 	IW_HANDLER(SIOCGIWNICKN, ipw_wx_get_nick),
9920 	IW_HANDLER(SIOCSIWRATE, ipw_wx_set_rate),
9921 	IW_HANDLER(SIOCGIWRATE, ipw_wx_get_rate),
9922 	IW_HANDLER(SIOCSIWRTS, ipw_wx_set_rts),
9923 	IW_HANDLER(SIOCGIWRTS, ipw_wx_get_rts),
9924 	IW_HANDLER(SIOCSIWFRAG, ipw_wx_set_frag),
9925 	IW_HANDLER(SIOCGIWFRAG, ipw_wx_get_frag),
9926 	IW_HANDLER(SIOCSIWTXPOW, ipw_wx_set_txpow),
9927 	IW_HANDLER(SIOCGIWTXPOW, ipw_wx_get_txpow),
9928 	IW_HANDLER(SIOCSIWRETRY, ipw_wx_set_retry),
9929 	IW_HANDLER(SIOCGIWRETRY, ipw_wx_get_retry),
9930 	IW_HANDLER(SIOCSIWENCODE, ipw_wx_set_encode),
9931 	IW_HANDLER(SIOCGIWENCODE, ipw_wx_get_encode),
9932 	IW_HANDLER(SIOCSIWPOWER, ipw_wx_set_power),
9933 	IW_HANDLER(SIOCGIWPOWER, ipw_wx_get_power),
9934 	IW_HANDLER(SIOCSIWSPY, iw_handler_set_spy),
9935 	IW_HANDLER(SIOCGIWSPY, iw_handler_get_spy),
9936 	IW_HANDLER(SIOCSIWTHRSPY, iw_handler_set_thrspy),
9937 	IW_HANDLER(SIOCGIWTHRSPY, iw_handler_get_thrspy),
9938 	IW_HANDLER(SIOCSIWGENIE, ipw_wx_set_genie),
9939 	IW_HANDLER(SIOCGIWGENIE, ipw_wx_get_genie),
9940 	IW_HANDLER(SIOCSIWMLME, ipw_wx_set_mlme),
9941 	IW_HANDLER(SIOCSIWAUTH, ipw_wx_set_auth),
9942 	IW_HANDLER(SIOCGIWAUTH, ipw_wx_get_auth),
9943 	IW_HANDLER(SIOCSIWENCODEEXT, ipw_wx_set_encodeext),
9944 	IW_HANDLER(SIOCGIWENCODEEXT, ipw_wx_get_encodeext),
9945 };
9946 
9947 enum {
9948 	IPW_PRIV_SET_POWER = SIOCIWFIRSTPRIV,
9949 	IPW_PRIV_GET_POWER,
9950 	IPW_PRIV_SET_MODE,
9951 	IPW_PRIV_GET_MODE,
9952 	IPW_PRIV_SET_PREAMBLE,
9953 	IPW_PRIV_GET_PREAMBLE,
9954 	IPW_PRIV_RESET,
9955 	IPW_PRIV_SW_RESET,
9956 #ifdef CONFIG_IPW2200_MONITOR
9957 	IPW_PRIV_SET_MONITOR,
9958 #endif
9959 };
9960 
9961 static struct iw_priv_args ipw_priv_args[] = {
9962 	{
9963 	 .cmd = IPW_PRIV_SET_POWER,
9964 	 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9965 	 .name = "set_power"},
9966 	{
9967 	 .cmd = IPW_PRIV_GET_POWER,
9968 	 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
9969 	 .name = "get_power"},
9970 	{
9971 	 .cmd = IPW_PRIV_SET_MODE,
9972 	 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9973 	 .name = "set_mode"},
9974 	{
9975 	 .cmd = IPW_PRIV_GET_MODE,
9976 	 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
9977 	 .name = "get_mode"},
9978 	{
9979 	 .cmd = IPW_PRIV_SET_PREAMBLE,
9980 	 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
9981 	 .name = "set_preamble"},
9982 	{
9983 	 .cmd = IPW_PRIV_GET_PREAMBLE,
9984 	 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ,
9985 	 .name = "get_preamble"},
9986 	{
9987 	 IPW_PRIV_RESET,
9988 	 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset"},
9989 	{
9990 	 IPW_PRIV_SW_RESET,
9991 	 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "sw_reset"},
9992 #ifdef CONFIG_IPW2200_MONITOR
9993 	{
9994 	 IPW_PRIV_SET_MONITOR,
9995 	 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor"},
9996 #endif				/* CONFIG_IPW2200_MONITOR */
9997 };
9998 
9999 static iw_handler ipw_priv_handler[] = {
10000 	ipw_wx_set_powermode,
10001 	ipw_wx_get_powermode,
10002 	ipw_wx_set_wireless_mode,
10003 	ipw_wx_get_wireless_mode,
10004 	ipw_wx_set_preamble,
10005 	ipw_wx_get_preamble,
10006 	ipw_wx_reset,
10007 	ipw_wx_sw_reset,
10008 #ifdef CONFIG_IPW2200_MONITOR
10009 	ipw_wx_set_monitor,
10010 #endif
10011 };
10012 
10013 static struct iw_handler_def ipw_wx_handler_def = {
10014 	.standard = ipw_wx_handlers,
10015 	.num_standard = ARRAY_SIZE(ipw_wx_handlers),
10016 	.num_private = ARRAY_SIZE(ipw_priv_handler),
10017 	.num_private_args = ARRAY_SIZE(ipw_priv_args),
10018 	.private = ipw_priv_handler,
10019 	.private_args = ipw_priv_args,
10020 	.get_wireless_stats = ipw_get_wireless_stats,
10021 };
10022 
10023 /*
10024  * Get wireless statistics.
10025  * Called by /proc/net/wireless
10026  * Also called by SIOCGIWSTATS
10027  */
10028 static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev)
10029 {
10030 	struct ipw_priv *priv = libipw_priv(dev);
10031 	struct iw_statistics *wstats;
10032 
10033 	wstats = &priv->wstats;
10034 
10035 	/* if hw is disabled, then ipw_get_ordinal() can't be called.
10036 	 * netdev->get_wireless_stats seems to be called before fw is
10037 	 * initialized.  STATUS_ASSOCIATED will only be set if the hw is up
10038 	 * and associated; if not associcated, the values are all meaningless
10039 	 * anyway, so set them all to NULL and INVALID */
10040 	if (!(priv->status & STATUS_ASSOCIATED)) {
10041 		wstats->miss.beacon = 0;
10042 		wstats->discard.retries = 0;
10043 		wstats->qual.qual = 0;
10044 		wstats->qual.level = 0;
10045 		wstats->qual.noise = 0;
10046 		wstats->qual.updated = 7;
10047 		wstats->qual.updated |= IW_QUAL_NOISE_INVALID |
10048 		    IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID;
10049 		return wstats;
10050 	}
10051 
10052 	wstats->qual.qual = priv->quality;
10053 	wstats->qual.level = priv->exp_avg_rssi;
10054 	wstats->qual.noise = priv->exp_avg_noise;
10055 	wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED |
10056 	    IW_QUAL_NOISE_UPDATED | IW_QUAL_DBM;
10057 
10058 	wstats->miss.beacon = average_value(&priv->average_missed_beacons);
10059 	wstats->discard.retries = priv->last_tx_failures;
10060 	wstats->discard.code = priv->ieee->ieee_stats.rx_discards_undecryptable;
10061 
10062 /*	if (ipw_get_ordinal(priv, IPW_ORD_STAT_TX_RETRY, &tx_retry, &len))
10063 	goto fail_get_ordinal;
10064 	wstats->discard.retries += tx_retry; */
10065 
10066 	return wstats;
10067 }
10068 
10069 /* net device stuff */
10070 
10071 static  void init_sys_config(struct ipw_sys_config *sys_config)
10072 {
10073 	memset(sys_config, 0, sizeof(struct ipw_sys_config));
10074 	sys_config->bt_coexistence = 0;
10075 	sys_config->answer_broadcast_ssid_probe = 0;
10076 	sys_config->accept_all_data_frames = 0;
10077 	sys_config->accept_non_directed_frames = 1;
10078 	sys_config->exclude_unicast_unencrypted = 0;
10079 	sys_config->disable_unicast_decryption = 1;
10080 	sys_config->exclude_multicast_unencrypted = 0;
10081 	sys_config->disable_multicast_decryption = 1;
10082 	if (antenna < CFG_SYS_ANTENNA_BOTH || antenna > CFG_SYS_ANTENNA_B)
10083 		antenna = CFG_SYS_ANTENNA_BOTH;
10084 	sys_config->antenna_diversity = antenna;
10085 	sys_config->pass_crc_to_host = 0;	/* TODO: See if 1 gives us FCS */
10086 	sys_config->dot11g_auto_detection = 0;
10087 	sys_config->enable_cts_to_self = 0;
10088 	sys_config->bt_coexist_collision_thr = 0;
10089 	sys_config->pass_noise_stats_to_host = 1;	/* 1 -- fix for 256 */
10090 	sys_config->silence_threshold = 0x1e;
10091 }
10092 
10093 static int ipw_net_open(struct net_device *dev)
10094 {
10095 	IPW_DEBUG_INFO("dev->open\n");
10096 	netif_start_queue(dev);
10097 	return 0;
10098 }
10099 
10100 static int ipw_net_stop(struct net_device *dev)
10101 {
10102 	IPW_DEBUG_INFO("dev->close\n");
10103 	netif_stop_queue(dev);
10104 	return 0;
10105 }
10106 
10107 /*
10108 todo:
10109 
10110 modify to send one tfd per fragment instead of using chunking.  otherwise
10111 we need to heavily modify the libipw_skb_to_txb.
10112 */
10113 
10114 static int ipw_tx_skb(struct ipw_priv *priv, struct libipw_txb *txb,
10115 			     int pri)
10116 {
10117 	struct libipw_hdr_3addrqos *hdr = (struct libipw_hdr_3addrqos *)
10118 	    txb->fragments[0]->data;
10119 	int i = 0;
10120 	struct tfd_frame *tfd;
10121 #ifdef CONFIG_IPW2200_QOS
10122 	int tx_id = ipw_get_tx_queue_number(priv, pri);
10123 	struct clx2_tx_queue *txq = &priv->txq[tx_id];
10124 #else
10125 	struct clx2_tx_queue *txq = &priv->txq[0];
10126 #endif
10127 	struct clx2_queue *q = &txq->q;
10128 	u8 id, hdr_len, unicast;
10129 	int fc;
10130 
10131 	if (!(priv->status & STATUS_ASSOCIATED))
10132 		goto drop;
10133 
10134 	hdr_len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
10135 	switch (priv->ieee->iw_mode) {
10136 	case IW_MODE_ADHOC:
10137 		unicast = !is_multicast_ether_addr(hdr->addr1);
10138 		id = ipw_find_station(priv, hdr->addr1);
10139 		if (id == IPW_INVALID_STATION) {
10140 			id = ipw_add_station(priv, hdr->addr1);
10141 			if (id == IPW_INVALID_STATION) {
10142 				IPW_WARNING("Attempt to send data to "
10143 					    "invalid cell: %pM\n",
10144 					    hdr->addr1);
10145 				goto drop;
10146 			}
10147 		}
10148 		break;
10149 
10150 	case IW_MODE_INFRA:
10151 	default:
10152 		unicast = !is_multicast_ether_addr(hdr->addr3);
10153 		id = 0;
10154 		break;
10155 	}
10156 
10157 	tfd = &txq->bd[q->first_empty];
10158 	txq->txb[q->first_empty] = txb;
10159 	memset(tfd, 0, sizeof(*tfd));
10160 	tfd->u.data.station_number = id;
10161 
10162 	tfd->control_flags.message_type = TX_FRAME_TYPE;
10163 	tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
10164 
10165 	tfd->u.data.cmd_id = DINO_CMD_TX;
10166 	tfd->u.data.len = cpu_to_le16(txb->payload_size);
10167 
10168 	if (priv->assoc_request.ieee_mode == IPW_B_MODE)
10169 		tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_CCK;
10170 	else
10171 		tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_OFDM;
10172 
10173 	if (priv->assoc_request.preamble_length == DCT_FLAG_SHORT_PREAMBLE)
10174 		tfd->u.data.tx_flags |= DCT_FLAG_SHORT_PREAMBLE;
10175 
10176 	fc = le16_to_cpu(hdr->frame_ctl);
10177 	hdr->frame_ctl = cpu_to_le16(fc & ~IEEE80211_FCTL_MOREFRAGS);
10178 
10179 	memcpy(&tfd->u.data.tfd.tfd_24.mchdr, hdr, hdr_len);
10180 
10181 	if (likely(unicast))
10182 		tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10183 
10184 	if (txb->encrypted && !priv->ieee->host_encrypt) {
10185 		switch (priv->ieee->sec.level) {
10186 		case SEC_LEVEL_3:
10187 			tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10188 			    cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10189 			/* XXX: ACK flag must be set for CCMP even if it
10190 			 * is a multicast/broadcast packet, because CCMP
10191 			 * group communication encrypted by GTK is
10192 			 * actually done by the AP. */
10193 			if (!unicast)
10194 				tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10195 
10196 			tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10197 			tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_CCM;
10198 			tfd->u.data.key_index = 0;
10199 			tfd->u.data.key_index |= DCT_WEP_INDEX_USE_IMMEDIATE;
10200 			break;
10201 		case SEC_LEVEL_2:
10202 			tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10203 			    cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10204 			tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10205 			tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_TKIP;
10206 			tfd->u.data.key_index = DCT_WEP_INDEX_USE_IMMEDIATE;
10207 			break;
10208 		case SEC_LEVEL_1:
10209 			tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10210 			    cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10211 			tfd->u.data.key_index = priv->ieee->crypt_info.tx_keyidx;
10212 			if (priv->ieee->sec.key_sizes[priv->ieee->crypt_info.tx_keyidx] <=
10213 			    40)
10214 				tfd->u.data.key_index |= DCT_WEP_KEY_64Bit;
10215 			else
10216 				tfd->u.data.key_index |= DCT_WEP_KEY_128Bit;
10217 			break;
10218 		case SEC_LEVEL_0:
10219 			break;
10220 		default:
10221 			printk(KERN_ERR "Unknown security level %d\n",
10222 			       priv->ieee->sec.level);
10223 			break;
10224 		}
10225 	} else
10226 		/* No hardware encryption */
10227 		tfd->u.data.tx_flags |= DCT_FLAG_NO_WEP;
10228 
10229 #ifdef CONFIG_IPW2200_QOS
10230 	if (fc & IEEE80211_STYPE_QOS_DATA)
10231 		ipw_qos_set_tx_queue_command(priv, pri, &(tfd->u.data));
10232 #endif				/* CONFIG_IPW2200_QOS */
10233 
10234 	/* payload */
10235 	tfd->u.data.num_chunks = cpu_to_le32(min((u8) (NUM_TFD_CHUNKS - 2),
10236 						 txb->nr_frags));
10237 	IPW_DEBUG_FRAG("%i fragments being sent as %i chunks.\n",
10238 		       txb->nr_frags, le32_to_cpu(tfd->u.data.num_chunks));
10239 	for (i = 0; i < le32_to_cpu(tfd->u.data.num_chunks); i++) {
10240 		IPW_DEBUG_FRAG("Adding fragment %i of %i (%d bytes).\n",
10241 			       i, le32_to_cpu(tfd->u.data.num_chunks),
10242 			       txb->fragments[i]->len - hdr_len);
10243 		IPW_DEBUG_TX("Dumping TX packet frag %i of %i (%d bytes):\n",
10244 			     i, tfd->u.data.num_chunks,
10245 			     txb->fragments[i]->len - hdr_len);
10246 		printk_buf(IPW_DL_TX, txb->fragments[i]->data + hdr_len,
10247 			   txb->fragments[i]->len - hdr_len);
10248 
10249 		tfd->u.data.chunk_ptr[i] =
10250 		    cpu_to_le32(pci_map_single
10251 				(priv->pci_dev,
10252 				 txb->fragments[i]->data + hdr_len,
10253 				 txb->fragments[i]->len - hdr_len,
10254 				 PCI_DMA_TODEVICE));
10255 		tfd->u.data.chunk_len[i] =
10256 		    cpu_to_le16(txb->fragments[i]->len - hdr_len);
10257 	}
10258 
10259 	if (i != txb->nr_frags) {
10260 		struct sk_buff *skb;
10261 		u16 remaining_bytes = 0;
10262 		int j;
10263 
10264 		for (j = i; j < txb->nr_frags; j++)
10265 			remaining_bytes += txb->fragments[j]->len - hdr_len;
10266 
10267 		printk(KERN_INFO "Trying to reallocate for %d bytes\n",
10268 		       remaining_bytes);
10269 		skb = alloc_skb(remaining_bytes, GFP_ATOMIC);
10270 		if (skb != NULL) {
10271 			tfd->u.data.chunk_len[i] = cpu_to_le16(remaining_bytes);
10272 			for (j = i; j < txb->nr_frags; j++) {
10273 				int size = txb->fragments[j]->len - hdr_len;
10274 
10275 				printk(KERN_INFO "Adding frag %d %d...\n",
10276 				       j, size);
10277 				memcpy(skb_put(skb, size),
10278 				       txb->fragments[j]->data + hdr_len, size);
10279 			}
10280 			dev_kfree_skb_any(txb->fragments[i]);
10281 			txb->fragments[i] = skb;
10282 			tfd->u.data.chunk_ptr[i] =
10283 			    cpu_to_le32(pci_map_single
10284 					(priv->pci_dev, skb->data,
10285 					 remaining_bytes,
10286 					 PCI_DMA_TODEVICE));
10287 
10288 			le32_add_cpu(&tfd->u.data.num_chunks, 1);
10289 		}
10290 	}
10291 
10292 	/* kick DMA */
10293 	q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
10294 	ipw_write32(priv, q->reg_w, q->first_empty);
10295 
10296 	if (ipw_tx_queue_space(q) < q->high_mark)
10297 		netif_stop_queue(priv->net_dev);
10298 
10299 	return NETDEV_TX_OK;
10300 
10301       drop:
10302 	IPW_DEBUG_DROP("Silently dropping Tx packet.\n");
10303 	libipw_txb_free(txb);
10304 	return NETDEV_TX_OK;
10305 }
10306 
10307 static int ipw_net_is_queue_full(struct net_device *dev, int pri)
10308 {
10309 	struct ipw_priv *priv = libipw_priv(dev);
10310 #ifdef CONFIG_IPW2200_QOS
10311 	int tx_id = ipw_get_tx_queue_number(priv, pri);
10312 	struct clx2_tx_queue *txq = &priv->txq[tx_id];
10313 #else
10314 	struct clx2_tx_queue *txq = &priv->txq[0];
10315 #endif				/* CONFIG_IPW2200_QOS */
10316 
10317 	if (ipw_tx_queue_space(&txq->q) < txq->q.high_mark)
10318 		return 1;
10319 
10320 	return 0;
10321 }
10322 
10323 #ifdef CONFIG_IPW2200_PROMISCUOUS
10324 static void ipw_handle_promiscuous_tx(struct ipw_priv *priv,
10325 				      struct libipw_txb *txb)
10326 {
10327 	struct libipw_rx_stats dummystats;
10328 	struct ieee80211_hdr *hdr;
10329 	u8 n;
10330 	u16 filter = priv->prom_priv->filter;
10331 	int hdr_only = 0;
10332 
10333 	if (filter & IPW_PROM_NO_TX)
10334 		return;
10335 
10336 	memset(&dummystats, 0, sizeof(dummystats));
10337 
10338 	/* Filtering of fragment chains is done against the first fragment */
10339 	hdr = (void *)txb->fragments[0]->data;
10340 	if (libipw_is_management(le16_to_cpu(hdr->frame_control))) {
10341 		if (filter & IPW_PROM_NO_MGMT)
10342 			return;
10343 		if (filter & IPW_PROM_MGMT_HEADER_ONLY)
10344 			hdr_only = 1;
10345 	} else if (libipw_is_control(le16_to_cpu(hdr->frame_control))) {
10346 		if (filter & IPW_PROM_NO_CTL)
10347 			return;
10348 		if (filter & IPW_PROM_CTL_HEADER_ONLY)
10349 			hdr_only = 1;
10350 	} else if (libipw_is_data(le16_to_cpu(hdr->frame_control))) {
10351 		if (filter & IPW_PROM_NO_DATA)
10352 			return;
10353 		if (filter & IPW_PROM_DATA_HEADER_ONLY)
10354 			hdr_only = 1;
10355 	}
10356 
10357 	for(n=0; n<txb->nr_frags; ++n) {
10358 		struct sk_buff *src = txb->fragments[n];
10359 		struct sk_buff *dst;
10360 		struct ieee80211_radiotap_header *rt_hdr;
10361 		int len;
10362 
10363 		if (hdr_only) {
10364 			hdr = (void *)src->data;
10365 			len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_control));
10366 		} else
10367 			len = src->len;
10368 
10369 		dst = alloc_skb(len + sizeof(*rt_hdr) + sizeof(u16)*2, GFP_ATOMIC);
10370 		if (!dst)
10371 			continue;
10372 
10373 		rt_hdr = (void *)skb_put(dst, sizeof(*rt_hdr));
10374 
10375 		rt_hdr->it_version = PKTHDR_RADIOTAP_VERSION;
10376 		rt_hdr->it_pad = 0;
10377 		rt_hdr->it_present = 0; /* after all, it's just an idea */
10378 		rt_hdr->it_present |=  cpu_to_le32(1 << IEEE80211_RADIOTAP_CHANNEL);
10379 
10380 		*(__le16*)skb_put(dst, sizeof(u16)) = cpu_to_le16(
10381 			ieee80211chan2mhz(priv->channel));
10382 		if (priv->channel > 14) 	/* 802.11a */
10383 			*(__le16*)skb_put(dst, sizeof(u16)) =
10384 				cpu_to_le16(IEEE80211_CHAN_OFDM |
10385 					     IEEE80211_CHAN_5GHZ);
10386 		else if (priv->ieee->mode == IEEE_B) /* 802.11b */
10387 			*(__le16*)skb_put(dst, sizeof(u16)) =
10388 				cpu_to_le16(IEEE80211_CHAN_CCK |
10389 					     IEEE80211_CHAN_2GHZ);
10390 		else 		/* 802.11g */
10391 			*(__le16*)skb_put(dst, sizeof(u16)) =
10392 				cpu_to_le16(IEEE80211_CHAN_OFDM |
10393 				 IEEE80211_CHAN_2GHZ);
10394 
10395 		rt_hdr->it_len = cpu_to_le16(dst->len);
10396 
10397 		skb_copy_from_linear_data(src, skb_put(dst, len), len);
10398 
10399 		if (!libipw_rx(priv->prom_priv->ieee, dst, &dummystats))
10400 			dev_kfree_skb_any(dst);
10401 	}
10402 }
10403 #endif
10404 
10405 static netdev_tx_t ipw_net_hard_start_xmit(struct libipw_txb *txb,
10406 					   struct net_device *dev, int pri)
10407 {
10408 	struct ipw_priv *priv = libipw_priv(dev);
10409 	unsigned long flags;
10410 	netdev_tx_t ret;
10411 
10412 	IPW_DEBUG_TX("dev->xmit(%d bytes)\n", txb->payload_size);
10413 	spin_lock_irqsave(&priv->lock, flags);
10414 
10415 #ifdef CONFIG_IPW2200_PROMISCUOUS
10416 	if (rtap_iface && netif_running(priv->prom_net_dev))
10417 		ipw_handle_promiscuous_tx(priv, txb);
10418 #endif
10419 
10420 	ret = ipw_tx_skb(priv, txb, pri);
10421 	if (ret == NETDEV_TX_OK)
10422 		__ipw_led_activity_on(priv);
10423 	spin_unlock_irqrestore(&priv->lock, flags);
10424 
10425 	return ret;
10426 }
10427 
10428 static void ipw_net_set_multicast_list(struct net_device *dev)
10429 {
10430 
10431 }
10432 
10433 static int ipw_net_set_mac_address(struct net_device *dev, void *p)
10434 {
10435 	struct ipw_priv *priv = libipw_priv(dev);
10436 	struct sockaddr *addr = p;
10437 
10438 	if (!is_valid_ether_addr(addr->sa_data))
10439 		return -EADDRNOTAVAIL;
10440 	mutex_lock(&priv->mutex);
10441 	priv->config |= CFG_CUSTOM_MAC;
10442 	memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
10443 	printk(KERN_INFO "%s: Setting MAC to %pM\n",
10444 	       priv->net_dev->name, priv->mac_addr);
10445 	schedule_work(&priv->adapter_restart);
10446 	mutex_unlock(&priv->mutex);
10447 	return 0;
10448 }
10449 
10450 static void ipw_ethtool_get_drvinfo(struct net_device *dev,
10451 				    struct ethtool_drvinfo *info)
10452 {
10453 	struct ipw_priv *p = libipw_priv(dev);
10454 	char vers[64];
10455 	char date[32];
10456 	u32 len;
10457 
10458 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
10459 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
10460 
10461 	len = sizeof(vers);
10462 	ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len);
10463 	len = sizeof(date);
10464 	ipw_get_ordinal(p, IPW_ORD_STAT_FW_DATE, date, &len);
10465 
10466 	snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)",
10467 		 vers, date);
10468 	strlcpy(info->bus_info, pci_name(p->pci_dev),
10469 		sizeof(info->bus_info));
10470 }
10471 
10472 static u32 ipw_ethtool_get_link(struct net_device *dev)
10473 {
10474 	struct ipw_priv *priv = libipw_priv(dev);
10475 	return (priv->status & STATUS_ASSOCIATED) != 0;
10476 }
10477 
10478 static int ipw_ethtool_get_eeprom_len(struct net_device *dev)
10479 {
10480 	return IPW_EEPROM_IMAGE_SIZE;
10481 }
10482 
10483 static int ipw_ethtool_get_eeprom(struct net_device *dev,
10484 				  struct ethtool_eeprom *eeprom, u8 * bytes)
10485 {
10486 	struct ipw_priv *p = libipw_priv(dev);
10487 
10488 	if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10489 		return -EINVAL;
10490 	mutex_lock(&p->mutex);
10491 	memcpy(bytes, &p->eeprom[eeprom->offset], eeprom->len);
10492 	mutex_unlock(&p->mutex);
10493 	return 0;
10494 }
10495 
10496 static int ipw_ethtool_set_eeprom(struct net_device *dev,
10497 				  struct ethtool_eeprom *eeprom, u8 * bytes)
10498 {
10499 	struct ipw_priv *p = libipw_priv(dev);
10500 	int i;
10501 
10502 	if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10503 		return -EINVAL;
10504 	mutex_lock(&p->mutex);
10505 	memcpy(&p->eeprom[eeprom->offset], bytes, eeprom->len);
10506 	for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
10507 		ipw_write8(p, i + IPW_EEPROM_DATA, p->eeprom[i]);
10508 	mutex_unlock(&p->mutex);
10509 	return 0;
10510 }
10511 
10512 static const struct ethtool_ops ipw_ethtool_ops = {
10513 	.get_link = ipw_ethtool_get_link,
10514 	.get_drvinfo = ipw_ethtool_get_drvinfo,
10515 	.get_eeprom_len = ipw_ethtool_get_eeprom_len,
10516 	.get_eeprom = ipw_ethtool_get_eeprom,
10517 	.set_eeprom = ipw_ethtool_set_eeprom,
10518 };
10519 
10520 static irqreturn_t ipw_isr(int irq, void *data)
10521 {
10522 	struct ipw_priv *priv = data;
10523 	u32 inta, inta_mask;
10524 
10525 	if (!priv)
10526 		return IRQ_NONE;
10527 
10528 	spin_lock(&priv->irq_lock);
10529 
10530 	if (!(priv->status & STATUS_INT_ENABLED)) {
10531 		/* IRQ is disabled */
10532 		goto none;
10533 	}
10534 
10535 	inta = ipw_read32(priv, IPW_INTA_RW);
10536 	inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
10537 
10538 	if (inta == 0xFFFFFFFF) {
10539 		/* Hardware disappeared */
10540 		IPW_WARNING("IRQ INTA == 0xFFFFFFFF\n");
10541 		goto none;
10542 	}
10543 
10544 	if (!(inta & (IPW_INTA_MASK_ALL & inta_mask))) {
10545 		/* Shared interrupt */
10546 		goto none;
10547 	}
10548 
10549 	/* tell the device to stop sending interrupts */
10550 	__ipw_disable_interrupts(priv);
10551 
10552 	/* ack current interrupts */
10553 	inta &= (IPW_INTA_MASK_ALL & inta_mask);
10554 	ipw_write32(priv, IPW_INTA_RW, inta);
10555 
10556 	/* Cache INTA value for our tasklet */
10557 	priv->isr_inta = inta;
10558 
10559 	tasklet_schedule(&priv->irq_tasklet);
10560 
10561 	spin_unlock(&priv->irq_lock);
10562 
10563 	return IRQ_HANDLED;
10564       none:
10565 	spin_unlock(&priv->irq_lock);
10566 	return IRQ_NONE;
10567 }
10568 
10569 static void ipw_rf_kill(void *adapter)
10570 {
10571 	struct ipw_priv *priv = adapter;
10572 	unsigned long flags;
10573 
10574 	spin_lock_irqsave(&priv->lock, flags);
10575 
10576 	if (rf_kill_active(priv)) {
10577 		IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
10578 		schedule_delayed_work(&priv->rf_kill, 2 * HZ);
10579 		goto exit_unlock;
10580 	}
10581 
10582 	/* RF Kill is now disabled, so bring the device back up */
10583 
10584 	if (!(priv->status & STATUS_RF_KILL_MASK)) {
10585 		IPW_DEBUG_RF_KILL("HW RF Kill no longer active, restarting "
10586 				  "device\n");
10587 
10588 		/* we can not do an adapter restart while inside an irq lock */
10589 		schedule_work(&priv->adapter_restart);
10590 	} else
10591 		IPW_DEBUG_RF_KILL("HW RF Kill deactivated.  SW RF Kill still "
10592 				  "enabled\n");
10593 
10594       exit_unlock:
10595 	spin_unlock_irqrestore(&priv->lock, flags);
10596 }
10597 
10598 static void ipw_bg_rf_kill(struct work_struct *work)
10599 {
10600 	struct ipw_priv *priv =
10601 		container_of(work, struct ipw_priv, rf_kill.work);
10602 	mutex_lock(&priv->mutex);
10603 	ipw_rf_kill(priv);
10604 	mutex_unlock(&priv->mutex);
10605 }
10606 
10607 static void ipw_link_up(struct ipw_priv *priv)
10608 {
10609 	priv->last_seq_num = -1;
10610 	priv->last_frag_num = -1;
10611 	priv->last_packet_time = 0;
10612 
10613 	netif_carrier_on(priv->net_dev);
10614 
10615 	cancel_delayed_work(&priv->request_scan);
10616 	cancel_delayed_work(&priv->request_direct_scan);
10617 	cancel_delayed_work(&priv->request_passive_scan);
10618 	cancel_delayed_work(&priv->scan_event);
10619 	ipw_reset_stats(priv);
10620 	/* Ensure the rate is updated immediately */
10621 	priv->last_rate = ipw_get_current_rate(priv);
10622 	ipw_gather_stats(priv);
10623 	ipw_led_link_up(priv);
10624 	notify_wx_assoc_event(priv);
10625 
10626 	if (priv->config & CFG_BACKGROUND_SCAN)
10627 		schedule_delayed_work(&priv->request_scan, HZ);
10628 }
10629 
10630 static void ipw_bg_link_up(struct work_struct *work)
10631 {
10632 	struct ipw_priv *priv =
10633 		container_of(work, struct ipw_priv, link_up);
10634 	mutex_lock(&priv->mutex);
10635 	ipw_link_up(priv);
10636 	mutex_unlock(&priv->mutex);
10637 }
10638 
10639 static void ipw_link_down(struct ipw_priv *priv)
10640 {
10641 	ipw_led_link_down(priv);
10642 	netif_carrier_off(priv->net_dev);
10643 	notify_wx_assoc_event(priv);
10644 
10645 	/* Cancel any queued work ... */
10646 	cancel_delayed_work(&priv->request_scan);
10647 	cancel_delayed_work(&priv->request_direct_scan);
10648 	cancel_delayed_work(&priv->request_passive_scan);
10649 	cancel_delayed_work(&priv->adhoc_check);
10650 	cancel_delayed_work(&priv->gather_stats);
10651 
10652 	ipw_reset_stats(priv);
10653 
10654 	if (!(priv->status & STATUS_EXIT_PENDING)) {
10655 		/* Queue up another scan... */
10656 		schedule_delayed_work(&priv->request_scan, 0);
10657 	} else
10658 		cancel_delayed_work(&priv->scan_event);
10659 }
10660 
10661 static void ipw_bg_link_down(struct work_struct *work)
10662 {
10663 	struct ipw_priv *priv =
10664 		container_of(work, struct ipw_priv, link_down);
10665 	mutex_lock(&priv->mutex);
10666 	ipw_link_down(priv);
10667 	mutex_unlock(&priv->mutex);
10668 }
10669 
10670 static int ipw_setup_deferred_work(struct ipw_priv *priv)
10671 {
10672 	int ret = 0;
10673 
10674 	init_waitqueue_head(&priv->wait_command_queue);
10675 	init_waitqueue_head(&priv->wait_state);
10676 
10677 	INIT_DELAYED_WORK(&priv->adhoc_check, ipw_bg_adhoc_check);
10678 	INIT_WORK(&priv->associate, ipw_bg_associate);
10679 	INIT_WORK(&priv->disassociate, ipw_bg_disassociate);
10680 	INIT_WORK(&priv->system_config, ipw_system_config);
10681 	INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish);
10682 	INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart);
10683 	INIT_DELAYED_WORK(&priv->rf_kill, ipw_bg_rf_kill);
10684 	INIT_WORK(&priv->up, ipw_bg_up);
10685 	INIT_WORK(&priv->down, ipw_bg_down);
10686 	INIT_DELAYED_WORK(&priv->request_scan, ipw_request_scan);
10687 	INIT_DELAYED_WORK(&priv->request_direct_scan, ipw_request_direct_scan);
10688 	INIT_DELAYED_WORK(&priv->request_passive_scan, ipw_request_passive_scan);
10689 	INIT_DELAYED_WORK(&priv->scan_event, ipw_scan_event);
10690 	INIT_DELAYED_WORK(&priv->gather_stats, ipw_bg_gather_stats);
10691 	INIT_WORK(&priv->abort_scan, ipw_bg_abort_scan);
10692 	INIT_WORK(&priv->roam, ipw_bg_roam);
10693 	INIT_DELAYED_WORK(&priv->scan_check, ipw_bg_scan_check);
10694 	INIT_WORK(&priv->link_up, ipw_bg_link_up);
10695 	INIT_WORK(&priv->link_down, ipw_bg_link_down);
10696 	INIT_DELAYED_WORK(&priv->led_link_on, ipw_bg_led_link_on);
10697 	INIT_DELAYED_WORK(&priv->led_link_off, ipw_bg_led_link_off);
10698 	INIT_DELAYED_WORK(&priv->led_act_off, ipw_bg_led_activity_off);
10699 	INIT_WORK(&priv->merge_networks, ipw_merge_adhoc_network);
10700 
10701 #ifdef CONFIG_IPW2200_QOS
10702 	INIT_WORK(&priv->qos_activate, ipw_bg_qos_activate);
10703 #endif				/* CONFIG_IPW2200_QOS */
10704 
10705 	tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
10706 		     ipw_irq_tasklet, (unsigned long)priv);
10707 
10708 	return ret;
10709 }
10710 
10711 static void shim__set_security(struct net_device *dev,
10712 			       struct libipw_security *sec)
10713 {
10714 	struct ipw_priv *priv = libipw_priv(dev);
10715 	int i;
10716 	for (i = 0; i < 4; i++) {
10717 		if (sec->flags & (1 << i)) {
10718 			priv->ieee->sec.encode_alg[i] = sec->encode_alg[i];
10719 			priv->ieee->sec.key_sizes[i] = sec->key_sizes[i];
10720 			if (sec->key_sizes[i] == 0)
10721 				priv->ieee->sec.flags &= ~(1 << i);
10722 			else {
10723 				memcpy(priv->ieee->sec.keys[i], sec->keys[i],
10724 				       sec->key_sizes[i]);
10725 				priv->ieee->sec.flags |= (1 << i);
10726 			}
10727 			priv->status |= STATUS_SECURITY_UPDATED;
10728 		} else if (sec->level != SEC_LEVEL_1)
10729 			priv->ieee->sec.flags &= ~(1 << i);
10730 	}
10731 
10732 	if (sec->flags & SEC_ACTIVE_KEY) {
10733 		if (sec->active_key <= 3) {
10734 			priv->ieee->sec.active_key = sec->active_key;
10735 			priv->ieee->sec.flags |= SEC_ACTIVE_KEY;
10736 		} else
10737 			priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10738 		priv->status |= STATUS_SECURITY_UPDATED;
10739 	} else
10740 		priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10741 
10742 	if ((sec->flags & SEC_AUTH_MODE) &&
10743 	    (priv->ieee->sec.auth_mode != sec->auth_mode)) {
10744 		priv->ieee->sec.auth_mode = sec->auth_mode;
10745 		priv->ieee->sec.flags |= SEC_AUTH_MODE;
10746 		if (sec->auth_mode == WLAN_AUTH_SHARED_KEY)
10747 			priv->capability |= CAP_SHARED_KEY;
10748 		else
10749 			priv->capability &= ~CAP_SHARED_KEY;
10750 		priv->status |= STATUS_SECURITY_UPDATED;
10751 	}
10752 
10753 	if (sec->flags & SEC_ENABLED && priv->ieee->sec.enabled != sec->enabled) {
10754 		priv->ieee->sec.flags |= SEC_ENABLED;
10755 		priv->ieee->sec.enabled = sec->enabled;
10756 		priv->status |= STATUS_SECURITY_UPDATED;
10757 		if (sec->enabled)
10758 			priv->capability |= CAP_PRIVACY_ON;
10759 		else
10760 			priv->capability &= ~CAP_PRIVACY_ON;
10761 	}
10762 
10763 	if (sec->flags & SEC_ENCRYPT)
10764 		priv->ieee->sec.encrypt = sec->encrypt;
10765 
10766 	if (sec->flags & SEC_LEVEL && priv->ieee->sec.level != sec->level) {
10767 		priv->ieee->sec.level = sec->level;
10768 		priv->ieee->sec.flags |= SEC_LEVEL;
10769 		priv->status |= STATUS_SECURITY_UPDATED;
10770 	}
10771 
10772 	if (!priv->ieee->host_encrypt && (sec->flags & SEC_ENCRYPT))
10773 		ipw_set_hwcrypto_keys(priv);
10774 
10775 	/* To match current functionality of ipw2100 (which works well w/
10776 	 * various supplicants, we don't force a disassociate if the
10777 	 * privacy capability changes ... */
10778 #if 0
10779 	if ((priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) &&
10780 	    (((priv->assoc_request.capability &
10781 	       cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && !sec->enabled) ||
10782 	     (!(priv->assoc_request.capability &
10783 		cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && sec->enabled))) {
10784 		IPW_DEBUG_ASSOC("Disassociating due to capability "
10785 				"change.\n");
10786 		ipw_disassociate(priv);
10787 	}
10788 #endif
10789 }
10790 
10791 static int init_supported_rates(struct ipw_priv *priv,
10792 				struct ipw_supported_rates *rates)
10793 {
10794 	/* TODO: Mask out rates based on priv->rates_mask */
10795 
10796 	memset(rates, 0, sizeof(*rates));
10797 	/* configure supported rates */
10798 	switch (priv->ieee->freq_band) {
10799 	case LIBIPW_52GHZ_BAND:
10800 		rates->ieee_mode = IPW_A_MODE;
10801 		rates->purpose = IPW_RATE_CAPABILITIES;
10802 		ipw_add_ofdm_scan_rates(rates, LIBIPW_CCK_MODULATION,
10803 					LIBIPW_OFDM_DEFAULT_RATES_MASK);
10804 		break;
10805 
10806 	default:		/* Mixed or 2.4Ghz */
10807 		rates->ieee_mode = IPW_G_MODE;
10808 		rates->purpose = IPW_RATE_CAPABILITIES;
10809 		ipw_add_cck_scan_rates(rates, LIBIPW_CCK_MODULATION,
10810 				       LIBIPW_CCK_DEFAULT_RATES_MASK);
10811 		if (priv->ieee->modulation & LIBIPW_OFDM_MODULATION) {
10812 			ipw_add_ofdm_scan_rates(rates, LIBIPW_CCK_MODULATION,
10813 						LIBIPW_OFDM_DEFAULT_RATES_MASK);
10814 		}
10815 		break;
10816 	}
10817 
10818 	return 0;
10819 }
10820 
10821 static int ipw_config(struct ipw_priv *priv)
10822 {
10823 	/* This is only called from ipw_up, which resets/reloads the firmware
10824 	   so, we don't need to first disable the card before we configure
10825 	   it */
10826 	if (ipw_set_tx_power(priv))
10827 		goto error;
10828 
10829 	/* initialize adapter address */
10830 	if (ipw_send_adapter_address(priv, priv->net_dev->dev_addr))
10831 		goto error;
10832 
10833 	/* set basic system config settings */
10834 	init_sys_config(&priv->sys_config);
10835 
10836 	/* Support Bluetooth if we have BT h/w on board, and user wants to.
10837 	 * Does not support BT priority yet (don't abort or defer our Tx) */
10838 	if (bt_coexist) {
10839 		unsigned char bt_caps = priv->eeprom[EEPROM_SKU_CAPABILITY];
10840 
10841 		if (bt_caps & EEPROM_SKU_CAP_BT_CHANNEL_SIG)
10842 			priv->sys_config.bt_coexistence
10843 			    |= CFG_BT_COEXISTENCE_SIGNAL_CHNL;
10844 		if (bt_caps & EEPROM_SKU_CAP_BT_OOB)
10845 			priv->sys_config.bt_coexistence
10846 			    |= CFG_BT_COEXISTENCE_OOB;
10847 	}
10848 
10849 #ifdef CONFIG_IPW2200_PROMISCUOUS
10850 	if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
10851 		priv->sys_config.accept_all_data_frames = 1;
10852 		priv->sys_config.accept_non_directed_frames = 1;
10853 		priv->sys_config.accept_all_mgmt_bcpr = 1;
10854 		priv->sys_config.accept_all_mgmt_frames = 1;
10855 	}
10856 #endif
10857 
10858 	if (priv->ieee->iw_mode == IW_MODE_ADHOC)
10859 		priv->sys_config.answer_broadcast_ssid_probe = 1;
10860 	else
10861 		priv->sys_config.answer_broadcast_ssid_probe = 0;
10862 
10863 	if (ipw_send_system_config(priv))
10864 		goto error;
10865 
10866 	init_supported_rates(priv, &priv->rates);
10867 	if (ipw_send_supported_rates(priv, &priv->rates))
10868 		goto error;
10869 
10870 	/* Set request-to-send threshold */
10871 	if (priv->rts_threshold) {
10872 		if (ipw_send_rts_threshold(priv, priv->rts_threshold))
10873 			goto error;
10874 	}
10875 #ifdef CONFIG_IPW2200_QOS
10876 	IPW_DEBUG_QOS("QoS: call ipw_qos_activate\n");
10877 	ipw_qos_activate(priv, NULL);
10878 #endif				/* CONFIG_IPW2200_QOS */
10879 
10880 	if (ipw_set_random_seed(priv))
10881 		goto error;
10882 
10883 	/* final state transition to the RUN state */
10884 	if (ipw_send_host_complete(priv))
10885 		goto error;
10886 
10887 	priv->status |= STATUS_INIT;
10888 
10889 	ipw_led_init(priv);
10890 	ipw_led_radio_on(priv);
10891 	priv->notif_missed_beacons = 0;
10892 
10893 	/* Set hardware WEP key if it is configured. */
10894 	if ((priv->capability & CAP_PRIVACY_ON) &&
10895 	    (priv->ieee->sec.level == SEC_LEVEL_1) &&
10896 	    !(priv->ieee->host_encrypt || priv->ieee->host_decrypt))
10897 		ipw_set_hwcrypto_keys(priv);
10898 
10899 	return 0;
10900 
10901       error:
10902 	return -EIO;
10903 }
10904 
10905 /*
10906  * NOTE:
10907  *
10908  * These tables have been tested in conjunction with the
10909  * Intel PRO/Wireless 2200BG and 2915ABG Network Connection Adapters.
10910  *
10911  * Altering this values, using it on other hardware, or in geographies
10912  * not intended for resale of the above mentioned Intel adapters has
10913  * not been tested.
10914  *
10915  * Remember to update the table in README.ipw2200 when changing this
10916  * table.
10917  *
10918  */
10919 static const struct libipw_geo ipw_geos[] = {
10920 	{			/* Restricted */
10921 	 "---",
10922 	 .bg_channels = 11,
10923 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10924 		{2427, 4}, {2432, 5}, {2437, 6},
10925 		{2442, 7}, {2447, 8}, {2452, 9},
10926 		{2457, 10}, {2462, 11}},
10927 	 },
10928 
10929 	{			/* Custom US/Canada */
10930 	 "ZZF",
10931 	 .bg_channels = 11,
10932 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10933 		{2427, 4}, {2432, 5}, {2437, 6},
10934 		{2442, 7}, {2447, 8}, {2452, 9},
10935 		{2457, 10}, {2462, 11}},
10936 	 .a_channels = 8,
10937 	 .a = {{5180, 36},
10938 	       {5200, 40},
10939 	       {5220, 44},
10940 	       {5240, 48},
10941 	       {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
10942 	       {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
10943 	       {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
10944 	       {5320, 64, LIBIPW_CH_PASSIVE_ONLY}},
10945 	 },
10946 
10947 	{			/* Rest of World */
10948 	 "ZZD",
10949 	 .bg_channels = 13,
10950 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10951 		{2427, 4}, {2432, 5}, {2437, 6},
10952 		{2442, 7}, {2447, 8}, {2452, 9},
10953 		{2457, 10}, {2462, 11}, {2467, 12},
10954 		{2472, 13}},
10955 	 },
10956 
10957 	{			/* Custom USA & Europe & High */
10958 	 "ZZA",
10959 	 .bg_channels = 11,
10960 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10961 		{2427, 4}, {2432, 5}, {2437, 6},
10962 		{2442, 7}, {2447, 8}, {2452, 9},
10963 		{2457, 10}, {2462, 11}},
10964 	 .a_channels = 13,
10965 	 .a = {{5180, 36},
10966 	       {5200, 40},
10967 	       {5220, 44},
10968 	       {5240, 48},
10969 	       {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
10970 	       {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
10971 	       {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
10972 	       {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
10973 	       {5745, 149},
10974 	       {5765, 153},
10975 	       {5785, 157},
10976 	       {5805, 161},
10977 	       {5825, 165}},
10978 	 },
10979 
10980 	{			/* Custom NA & Europe */
10981 	 "ZZB",
10982 	 .bg_channels = 11,
10983 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10984 		{2427, 4}, {2432, 5}, {2437, 6},
10985 		{2442, 7}, {2447, 8}, {2452, 9},
10986 		{2457, 10}, {2462, 11}},
10987 	 .a_channels = 13,
10988 	 .a = {{5180, 36},
10989 	       {5200, 40},
10990 	       {5220, 44},
10991 	       {5240, 48},
10992 	       {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
10993 	       {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
10994 	       {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
10995 	       {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
10996 	       {5745, 149, LIBIPW_CH_PASSIVE_ONLY},
10997 	       {5765, 153, LIBIPW_CH_PASSIVE_ONLY},
10998 	       {5785, 157, LIBIPW_CH_PASSIVE_ONLY},
10999 	       {5805, 161, LIBIPW_CH_PASSIVE_ONLY},
11000 	       {5825, 165, LIBIPW_CH_PASSIVE_ONLY}},
11001 	 },
11002 
11003 	{			/* Custom Japan */
11004 	 "ZZC",
11005 	 .bg_channels = 11,
11006 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11007 		{2427, 4}, {2432, 5}, {2437, 6},
11008 		{2442, 7}, {2447, 8}, {2452, 9},
11009 		{2457, 10}, {2462, 11}},
11010 	 .a_channels = 4,
11011 	 .a = {{5170, 34}, {5190, 38},
11012 	       {5210, 42}, {5230, 46}},
11013 	 },
11014 
11015 	{			/* Custom */
11016 	 "ZZM",
11017 	 .bg_channels = 11,
11018 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11019 		{2427, 4}, {2432, 5}, {2437, 6},
11020 		{2442, 7}, {2447, 8}, {2452, 9},
11021 		{2457, 10}, {2462, 11}},
11022 	 },
11023 
11024 	{			/* Europe */
11025 	 "ZZE",
11026 	 .bg_channels = 13,
11027 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11028 		{2427, 4}, {2432, 5}, {2437, 6},
11029 		{2442, 7}, {2447, 8}, {2452, 9},
11030 		{2457, 10}, {2462, 11}, {2467, 12},
11031 		{2472, 13}},
11032 	 .a_channels = 19,
11033 	 .a = {{5180, 36},
11034 	       {5200, 40},
11035 	       {5220, 44},
11036 	       {5240, 48},
11037 	       {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11038 	       {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11039 	       {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11040 	       {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11041 	       {5500, 100, LIBIPW_CH_PASSIVE_ONLY},
11042 	       {5520, 104, LIBIPW_CH_PASSIVE_ONLY},
11043 	       {5540, 108, LIBIPW_CH_PASSIVE_ONLY},
11044 	       {5560, 112, LIBIPW_CH_PASSIVE_ONLY},
11045 	       {5580, 116, LIBIPW_CH_PASSIVE_ONLY},
11046 	       {5600, 120, LIBIPW_CH_PASSIVE_ONLY},
11047 	       {5620, 124, LIBIPW_CH_PASSIVE_ONLY},
11048 	       {5640, 128, LIBIPW_CH_PASSIVE_ONLY},
11049 	       {5660, 132, LIBIPW_CH_PASSIVE_ONLY},
11050 	       {5680, 136, LIBIPW_CH_PASSIVE_ONLY},
11051 	       {5700, 140, LIBIPW_CH_PASSIVE_ONLY}},
11052 	 },
11053 
11054 	{			/* Custom Japan */
11055 	 "ZZJ",
11056 	 .bg_channels = 14,
11057 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11058 		{2427, 4}, {2432, 5}, {2437, 6},
11059 		{2442, 7}, {2447, 8}, {2452, 9},
11060 		{2457, 10}, {2462, 11}, {2467, 12},
11061 		{2472, 13}, {2484, 14, LIBIPW_CH_B_ONLY}},
11062 	 .a_channels = 4,
11063 	 .a = {{5170, 34}, {5190, 38},
11064 	       {5210, 42}, {5230, 46}},
11065 	 },
11066 
11067 	{			/* Rest of World */
11068 	 "ZZR",
11069 	 .bg_channels = 14,
11070 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11071 		{2427, 4}, {2432, 5}, {2437, 6},
11072 		{2442, 7}, {2447, 8}, {2452, 9},
11073 		{2457, 10}, {2462, 11}, {2467, 12},
11074 		{2472, 13}, {2484, 14, LIBIPW_CH_B_ONLY |
11075 			     LIBIPW_CH_PASSIVE_ONLY}},
11076 	 },
11077 
11078 	{			/* High Band */
11079 	 "ZZH",
11080 	 .bg_channels = 13,
11081 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11082 		{2427, 4}, {2432, 5}, {2437, 6},
11083 		{2442, 7}, {2447, 8}, {2452, 9},
11084 		{2457, 10}, {2462, 11},
11085 		{2467, 12, LIBIPW_CH_PASSIVE_ONLY},
11086 		{2472, 13, LIBIPW_CH_PASSIVE_ONLY}},
11087 	 .a_channels = 4,
11088 	 .a = {{5745, 149}, {5765, 153},
11089 	       {5785, 157}, {5805, 161}},
11090 	 },
11091 
11092 	{			/* Custom Europe */
11093 	 "ZZG",
11094 	 .bg_channels = 13,
11095 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11096 		{2427, 4}, {2432, 5}, {2437, 6},
11097 		{2442, 7}, {2447, 8}, {2452, 9},
11098 		{2457, 10}, {2462, 11},
11099 		{2467, 12}, {2472, 13}},
11100 	 .a_channels = 4,
11101 	 .a = {{5180, 36}, {5200, 40},
11102 	       {5220, 44}, {5240, 48}},
11103 	 },
11104 
11105 	{			/* Europe */
11106 	 "ZZK",
11107 	 .bg_channels = 13,
11108 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11109 		{2427, 4}, {2432, 5}, {2437, 6},
11110 		{2442, 7}, {2447, 8}, {2452, 9},
11111 		{2457, 10}, {2462, 11},
11112 		{2467, 12, LIBIPW_CH_PASSIVE_ONLY},
11113 		{2472, 13, LIBIPW_CH_PASSIVE_ONLY}},
11114 	 .a_channels = 24,
11115 	 .a = {{5180, 36, LIBIPW_CH_PASSIVE_ONLY},
11116 	       {5200, 40, LIBIPW_CH_PASSIVE_ONLY},
11117 	       {5220, 44, LIBIPW_CH_PASSIVE_ONLY},
11118 	       {5240, 48, LIBIPW_CH_PASSIVE_ONLY},
11119 	       {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11120 	       {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11121 	       {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11122 	       {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11123 	       {5500, 100, LIBIPW_CH_PASSIVE_ONLY},
11124 	       {5520, 104, LIBIPW_CH_PASSIVE_ONLY},
11125 	       {5540, 108, LIBIPW_CH_PASSIVE_ONLY},
11126 	       {5560, 112, LIBIPW_CH_PASSIVE_ONLY},
11127 	       {5580, 116, LIBIPW_CH_PASSIVE_ONLY},
11128 	       {5600, 120, LIBIPW_CH_PASSIVE_ONLY},
11129 	       {5620, 124, LIBIPW_CH_PASSIVE_ONLY},
11130 	       {5640, 128, LIBIPW_CH_PASSIVE_ONLY},
11131 	       {5660, 132, LIBIPW_CH_PASSIVE_ONLY},
11132 	       {5680, 136, LIBIPW_CH_PASSIVE_ONLY},
11133 	       {5700, 140, LIBIPW_CH_PASSIVE_ONLY},
11134 	       {5745, 149, LIBIPW_CH_PASSIVE_ONLY},
11135 	       {5765, 153, LIBIPW_CH_PASSIVE_ONLY},
11136 	       {5785, 157, LIBIPW_CH_PASSIVE_ONLY},
11137 	       {5805, 161, LIBIPW_CH_PASSIVE_ONLY},
11138 	       {5825, 165, LIBIPW_CH_PASSIVE_ONLY}},
11139 	 },
11140 
11141 	{			/* Europe */
11142 	 "ZZL",
11143 	 .bg_channels = 11,
11144 	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11145 		{2427, 4}, {2432, 5}, {2437, 6},
11146 		{2442, 7}, {2447, 8}, {2452, 9},
11147 		{2457, 10}, {2462, 11}},
11148 	 .a_channels = 13,
11149 	 .a = {{5180, 36, LIBIPW_CH_PASSIVE_ONLY},
11150 	       {5200, 40, LIBIPW_CH_PASSIVE_ONLY},
11151 	       {5220, 44, LIBIPW_CH_PASSIVE_ONLY},
11152 	       {5240, 48, LIBIPW_CH_PASSIVE_ONLY},
11153 	       {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
11154 	       {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
11155 	       {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
11156 	       {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
11157 	       {5745, 149, LIBIPW_CH_PASSIVE_ONLY},
11158 	       {5765, 153, LIBIPW_CH_PASSIVE_ONLY},
11159 	       {5785, 157, LIBIPW_CH_PASSIVE_ONLY},
11160 	       {5805, 161, LIBIPW_CH_PASSIVE_ONLY},
11161 	       {5825, 165, LIBIPW_CH_PASSIVE_ONLY}},
11162 	 }
11163 };
11164 
11165 static void ipw_set_geo(struct ipw_priv *priv)
11166 {
11167 	int j;
11168 
11169 	for (j = 0; j < ARRAY_SIZE(ipw_geos); j++) {
11170 		if (!memcmp(&priv->eeprom[EEPROM_COUNTRY_CODE],
11171 			    ipw_geos[j].name, 3))
11172 			break;
11173 	}
11174 
11175 	if (j == ARRAY_SIZE(ipw_geos)) {
11176 		IPW_WARNING("SKU [%c%c%c] not recognized.\n",
11177 			    priv->eeprom[EEPROM_COUNTRY_CODE + 0],
11178 			    priv->eeprom[EEPROM_COUNTRY_CODE + 1],
11179 			    priv->eeprom[EEPROM_COUNTRY_CODE + 2]);
11180 		j = 0;
11181 	}
11182 
11183 	libipw_set_geo(priv->ieee, &ipw_geos[j]);
11184 }
11185 
11186 #define MAX_HW_RESTARTS 5
11187 static int ipw_up(struct ipw_priv *priv)
11188 {
11189 	int rc, i;
11190 
11191 	/* Age scan list entries found before suspend */
11192 	if (priv->suspend_time) {
11193 		libipw_networks_age(priv->ieee, priv->suspend_time);
11194 		priv->suspend_time = 0;
11195 	}
11196 
11197 	if (priv->status & STATUS_EXIT_PENDING)
11198 		return -EIO;
11199 
11200 	if (cmdlog && !priv->cmdlog) {
11201 		priv->cmdlog = kcalloc(cmdlog, sizeof(*priv->cmdlog),
11202 				       GFP_KERNEL);
11203 		if (priv->cmdlog == NULL) {
11204 			IPW_ERROR("Error allocating %d command log entries.\n",
11205 				  cmdlog);
11206 			return -ENOMEM;
11207 		} else {
11208 			priv->cmdlog_len = cmdlog;
11209 		}
11210 	}
11211 
11212 	for (i = 0; i < MAX_HW_RESTARTS; i++) {
11213 		/* Load the microcode, firmware, and eeprom.
11214 		 * Also start the clocks. */
11215 		rc = ipw_load(priv);
11216 		if (rc) {
11217 			IPW_ERROR("Unable to load firmware: %d\n", rc);
11218 			return rc;
11219 		}
11220 
11221 		ipw_init_ordinals(priv);
11222 		if (!(priv->config & CFG_CUSTOM_MAC))
11223 			eeprom_parse_mac(priv, priv->mac_addr);
11224 		memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
11225 
11226 		ipw_set_geo(priv);
11227 
11228 		if (priv->status & STATUS_RF_KILL_SW) {
11229 			IPW_WARNING("Radio disabled by module parameter.\n");
11230 			return 0;
11231 		} else if (rf_kill_active(priv)) {
11232 			IPW_WARNING("Radio Frequency Kill Switch is On:\n"
11233 				    "Kill switch must be turned off for "
11234 				    "wireless networking to work.\n");
11235 			schedule_delayed_work(&priv->rf_kill, 2 * HZ);
11236 			return 0;
11237 		}
11238 
11239 		rc = ipw_config(priv);
11240 		if (!rc) {
11241 			IPW_DEBUG_INFO("Configured device on count %i\n", i);
11242 
11243 			/* If configure to try and auto-associate, kick
11244 			 * off a scan. */
11245 			schedule_delayed_work(&priv->request_scan, 0);
11246 
11247 			return 0;
11248 		}
11249 
11250 		IPW_DEBUG_INFO("Device configuration failed: 0x%08X\n", rc);
11251 		IPW_DEBUG_INFO("Failed to config device on retry %d of %d\n",
11252 			       i, MAX_HW_RESTARTS);
11253 
11254 		/* We had an error bringing up the hardware, so take it
11255 		 * all the way back down so we can try again */
11256 		ipw_down(priv);
11257 	}
11258 
11259 	/* tried to restart and config the device for as long as our
11260 	 * patience could withstand */
11261 	IPW_ERROR("Unable to initialize device after %d attempts.\n", i);
11262 
11263 	return -EIO;
11264 }
11265 
11266 static void ipw_bg_up(struct work_struct *work)
11267 {
11268 	struct ipw_priv *priv =
11269 		container_of(work, struct ipw_priv, up);
11270 	mutex_lock(&priv->mutex);
11271 	ipw_up(priv);
11272 	mutex_unlock(&priv->mutex);
11273 }
11274 
11275 static void ipw_deinit(struct ipw_priv *priv)
11276 {
11277 	int i;
11278 
11279 	if (priv->status & STATUS_SCANNING) {
11280 		IPW_DEBUG_INFO("Aborting scan during shutdown.\n");
11281 		ipw_abort_scan(priv);
11282 	}
11283 
11284 	if (priv->status & STATUS_ASSOCIATED) {
11285 		IPW_DEBUG_INFO("Disassociating during shutdown.\n");
11286 		ipw_disassociate(priv);
11287 	}
11288 
11289 	ipw_led_shutdown(priv);
11290 
11291 	/* Wait up to 1s for status to change to not scanning and not
11292 	 * associated (disassociation can take a while for a ful 802.11
11293 	 * exchange */
11294 	for (i = 1000; i && (priv->status &
11295 			     (STATUS_DISASSOCIATING |
11296 			      STATUS_ASSOCIATED | STATUS_SCANNING)); i--)
11297 		udelay(10);
11298 
11299 	if (priv->status & (STATUS_DISASSOCIATING |
11300 			    STATUS_ASSOCIATED | STATUS_SCANNING))
11301 		IPW_DEBUG_INFO("Still associated or scanning...\n");
11302 	else
11303 		IPW_DEBUG_INFO("Took %dms to de-init\n", 1000 - i);
11304 
11305 	/* Attempt to disable the card */
11306 	ipw_send_card_disable(priv, 0);
11307 
11308 	priv->status &= ~STATUS_INIT;
11309 }
11310 
11311 static void ipw_down(struct ipw_priv *priv)
11312 {
11313 	int exit_pending = priv->status & STATUS_EXIT_PENDING;
11314 
11315 	priv->status |= STATUS_EXIT_PENDING;
11316 
11317 	if (ipw_is_init(priv))
11318 		ipw_deinit(priv);
11319 
11320 	/* Wipe out the EXIT_PENDING status bit if we are not actually
11321 	 * exiting the module */
11322 	if (!exit_pending)
11323 		priv->status &= ~STATUS_EXIT_PENDING;
11324 
11325 	/* tell the device to stop sending interrupts */
11326 	ipw_disable_interrupts(priv);
11327 
11328 	/* Clear all bits but the RF Kill */
11329 	priv->status &= STATUS_RF_KILL_MASK | STATUS_EXIT_PENDING;
11330 	netif_carrier_off(priv->net_dev);
11331 
11332 	ipw_stop_nic(priv);
11333 
11334 	ipw_led_radio_off(priv);
11335 }
11336 
11337 static void ipw_bg_down(struct work_struct *work)
11338 {
11339 	struct ipw_priv *priv =
11340 		container_of(work, struct ipw_priv, down);
11341 	mutex_lock(&priv->mutex);
11342 	ipw_down(priv);
11343 	mutex_unlock(&priv->mutex);
11344 }
11345 
11346 static int ipw_wdev_init(struct net_device *dev)
11347 {
11348 	int i, rc = 0;
11349 	struct ipw_priv *priv = libipw_priv(dev);
11350 	const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
11351 	struct wireless_dev *wdev = &priv->ieee->wdev;
11352 
11353 	memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN);
11354 
11355 	/* fill-out priv->ieee->bg_band */
11356 	if (geo->bg_channels) {
11357 		struct ieee80211_supported_band *bg_band = &priv->ieee->bg_band;
11358 
11359 		bg_band->band = NL80211_BAND_2GHZ;
11360 		bg_band->n_channels = geo->bg_channels;
11361 		bg_band->channels = kcalloc(geo->bg_channels,
11362 					    sizeof(struct ieee80211_channel),
11363 					    GFP_KERNEL);
11364 		if (!bg_band->channels) {
11365 			rc = -ENOMEM;
11366 			goto out;
11367 		}
11368 		/* translate geo->bg to bg_band.channels */
11369 		for (i = 0; i < geo->bg_channels; i++) {
11370 			bg_band->channels[i].band = NL80211_BAND_2GHZ;
11371 			bg_band->channels[i].center_freq = geo->bg[i].freq;
11372 			bg_band->channels[i].hw_value = geo->bg[i].channel;
11373 			bg_band->channels[i].max_power = geo->bg[i].max_power;
11374 			if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY)
11375 				bg_band->channels[i].flags |=
11376 					IEEE80211_CHAN_NO_IR;
11377 			if (geo->bg[i].flags & LIBIPW_CH_NO_IBSS)
11378 				bg_band->channels[i].flags |=
11379 					IEEE80211_CHAN_NO_IR;
11380 			if (geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT)
11381 				bg_band->channels[i].flags |=
11382 					IEEE80211_CHAN_RADAR;
11383 			/* No equivalent for LIBIPW_CH_80211H_RULES,
11384 			   LIBIPW_CH_UNIFORM_SPREADING, or
11385 			   LIBIPW_CH_B_ONLY... */
11386 		}
11387 		/* point at bitrate info */
11388 		bg_band->bitrates = ipw2200_bg_rates;
11389 		bg_band->n_bitrates = ipw2200_num_bg_rates;
11390 
11391 		wdev->wiphy->bands[NL80211_BAND_2GHZ] = bg_band;
11392 	}
11393 
11394 	/* fill-out priv->ieee->a_band */
11395 	if (geo->a_channels) {
11396 		struct ieee80211_supported_band *a_band = &priv->ieee->a_band;
11397 
11398 		a_band->band = NL80211_BAND_5GHZ;
11399 		a_band->n_channels = geo->a_channels;
11400 		a_band->channels = kcalloc(geo->a_channels,
11401 					   sizeof(struct ieee80211_channel),
11402 					   GFP_KERNEL);
11403 		if (!a_band->channels) {
11404 			rc = -ENOMEM;
11405 			goto out;
11406 		}
11407 		/* translate geo->a to a_band.channels */
11408 		for (i = 0; i < geo->a_channels; i++) {
11409 			a_band->channels[i].band = NL80211_BAND_5GHZ;
11410 			a_band->channels[i].center_freq = geo->a[i].freq;
11411 			a_band->channels[i].hw_value = geo->a[i].channel;
11412 			a_band->channels[i].max_power = geo->a[i].max_power;
11413 			if (geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY)
11414 				a_band->channels[i].flags |=
11415 					IEEE80211_CHAN_NO_IR;
11416 			if (geo->a[i].flags & LIBIPW_CH_NO_IBSS)
11417 				a_band->channels[i].flags |=
11418 					IEEE80211_CHAN_NO_IR;
11419 			if (geo->a[i].flags & LIBIPW_CH_RADAR_DETECT)
11420 				a_band->channels[i].flags |=
11421 					IEEE80211_CHAN_RADAR;
11422 			/* No equivalent for LIBIPW_CH_80211H_RULES,
11423 			   LIBIPW_CH_UNIFORM_SPREADING, or
11424 			   LIBIPW_CH_B_ONLY... */
11425 		}
11426 		/* point at bitrate info */
11427 		a_band->bitrates = ipw2200_a_rates;
11428 		a_band->n_bitrates = ipw2200_num_a_rates;
11429 
11430 		wdev->wiphy->bands[NL80211_BAND_5GHZ] = a_band;
11431 	}
11432 
11433 	wdev->wiphy->cipher_suites = ipw_cipher_suites;
11434 	wdev->wiphy->n_cipher_suites = ARRAY_SIZE(ipw_cipher_suites);
11435 
11436 	set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev);
11437 
11438 	/* With that information in place, we can now register the wiphy... */
11439 	if (wiphy_register(wdev->wiphy))
11440 		rc = -EIO;
11441 out:
11442 	return rc;
11443 }
11444 
11445 /* PCI driver stuff */
11446 static const struct pci_device_id card_ids[] = {
11447 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0},
11448 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0},
11449 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0},
11450 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2712, 0, 0, 0},
11451 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2721, 0, 0, 0},
11452 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2722, 0, 0, 0},
11453 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2731, 0, 0, 0},
11454 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2732, 0, 0, 0},
11455 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2741, 0, 0, 0},
11456 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x103c, 0x2741, 0, 0, 0},
11457 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2742, 0, 0, 0},
11458 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2751, 0, 0, 0},
11459 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2752, 0, 0, 0},
11460 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2753, 0, 0, 0},
11461 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2754, 0, 0, 0},
11462 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2761, 0, 0, 0},
11463 	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2762, 0, 0, 0},
11464 	{PCI_VDEVICE(INTEL, 0x104f), 0},
11465 	{PCI_VDEVICE(INTEL, 0x4220), 0},	/* BG */
11466 	{PCI_VDEVICE(INTEL, 0x4221), 0},	/* BG */
11467 	{PCI_VDEVICE(INTEL, 0x4223), 0},	/* ABG */
11468 	{PCI_VDEVICE(INTEL, 0x4224), 0},	/* ABG */
11469 
11470 	/* required last entry */
11471 	{0,}
11472 };
11473 
11474 MODULE_DEVICE_TABLE(pci, card_ids);
11475 
11476 static struct attribute *ipw_sysfs_entries[] = {
11477 	&dev_attr_rf_kill.attr,
11478 	&dev_attr_direct_dword.attr,
11479 	&dev_attr_indirect_byte.attr,
11480 	&dev_attr_indirect_dword.attr,
11481 	&dev_attr_mem_gpio_reg.attr,
11482 	&dev_attr_command_event_reg.attr,
11483 	&dev_attr_nic_type.attr,
11484 	&dev_attr_status.attr,
11485 	&dev_attr_cfg.attr,
11486 	&dev_attr_error.attr,
11487 	&dev_attr_event_log.attr,
11488 	&dev_attr_cmd_log.attr,
11489 	&dev_attr_eeprom_delay.attr,
11490 	&dev_attr_ucode_version.attr,
11491 	&dev_attr_rtc.attr,
11492 	&dev_attr_scan_age.attr,
11493 	&dev_attr_led.attr,
11494 	&dev_attr_speed_scan.attr,
11495 	&dev_attr_net_stats.attr,
11496 	&dev_attr_channels.attr,
11497 #ifdef CONFIG_IPW2200_PROMISCUOUS
11498 	&dev_attr_rtap_iface.attr,
11499 	&dev_attr_rtap_filter.attr,
11500 #endif
11501 	NULL
11502 };
11503 
11504 static struct attribute_group ipw_attribute_group = {
11505 	.name = NULL,		/* put in device directory */
11506 	.attrs = ipw_sysfs_entries,
11507 };
11508 
11509 #ifdef CONFIG_IPW2200_PROMISCUOUS
11510 static int ipw_prom_open(struct net_device *dev)
11511 {
11512 	struct ipw_prom_priv *prom_priv = libipw_priv(dev);
11513 	struct ipw_priv *priv = prom_priv->priv;
11514 
11515 	IPW_DEBUG_INFO("prom dev->open\n");
11516 	netif_carrier_off(dev);
11517 
11518 	if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11519 		priv->sys_config.accept_all_data_frames = 1;
11520 		priv->sys_config.accept_non_directed_frames = 1;
11521 		priv->sys_config.accept_all_mgmt_bcpr = 1;
11522 		priv->sys_config.accept_all_mgmt_frames = 1;
11523 
11524 		ipw_send_system_config(priv);
11525 	}
11526 
11527 	return 0;
11528 }
11529 
11530 static int ipw_prom_stop(struct net_device *dev)
11531 {
11532 	struct ipw_prom_priv *prom_priv = libipw_priv(dev);
11533 	struct ipw_priv *priv = prom_priv->priv;
11534 
11535 	IPW_DEBUG_INFO("prom dev->stop\n");
11536 
11537 	if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11538 		priv->sys_config.accept_all_data_frames = 0;
11539 		priv->sys_config.accept_non_directed_frames = 0;
11540 		priv->sys_config.accept_all_mgmt_bcpr = 0;
11541 		priv->sys_config.accept_all_mgmt_frames = 0;
11542 
11543 		ipw_send_system_config(priv);
11544 	}
11545 
11546 	return 0;
11547 }
11548 
11549 static netdev_tx_t ipw_prom_hard_start_xmit(struct sk_buff *skb,
11550 					    struct net_device *dev)
11551 {
11552 	IPW_DEBUG_INFO("prom dev->xmit\n");
11553 	dev_kfree_skb(skb);
11554 	return NETDEV_TX_OK;
11555 }
11556 
11557 static const struct net_device_ops ipw_prom_netdev_ops = {
11558 	.ndo_open 		= ipw_prom_open,
11559 	.ndo_stop		= ipw_prom_stop,
11560 	.ndo_start_xmit		= ipw_prom_hard_start_xmit,
11561 	.ndo_set_mac_address 	= eth_mac_addr,
11562 	.ndo_validate_addr	= eth_validate_addr,
11563 };
11564 
11565 static int ipw_prom_alloc(struct ipw_priv *priv)
11566 {
11567 	int rc = 0;
11568 
11569 	if (priv->prom_net_dev)
11570 		return -EPERM;
11571 
11572 	priv->prom_net_dev = alloc_libipw(sizeof(struct ipw_prom_priv), 1);
11573 	if (priv->prom_net_dev == NULL)
11574 		return -ENOMEM;
11575 
11576 	priv->prom_priv = libipw_priv(priv->prom_net_dev);
11577 	priv->prom_priv->ieee = netdev_priv(priv->prom_net_dev);
11578 	priv->prom_priv->priv = priv;
11579 
11580 	strcpy(priv->prom_net_dev->name, "rtap%d");
11581 	memcpy(priv->prom_net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
11582 
11583 	priv->prom_net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
11584 	priv->prom_net_dev->netdev_ops = &ipw_prom_netdev_ops;
11585 
11586 	priv->prom_net_dev->min_mtu = 68;
11587 	priv->prom_net_dev->max_mtu = LIBIPW_DATA_LEN;
11588 
11589 	priv->prom_priv->ieee->iw_mode = IW_MODE_MONITOR;
11590 	SET_NETDEV_DEV(priv->prom_net_dev, &priv->pci_dev->dev);
11591 
11592 	rc = register_netdev(priv->prom_net_dev);
11593 	if (rc) {
11594 		free_libipw(priv->prom_net_dev, 1);
11595 		priv->prom_net_dev = NULL;
11596 		return rc;
11597 	}
11598 
11599 	return 0;
11600 }
11601 
11602 static void ipw_prom_free(struct ipw_priv *priv)
11603 {
11604 	if (!priv->prom_net_dev)
11605 		return;
11606 
11607 	unregister_netdev(priv->prom_net_dev);
11608 	free_libipw(priv->prom_net_dev, 1);
11609 
11610 	priv->prom_net_dev = NULL;
11611 }
11612 
11613 #endif
11614 
11615 static const struct net_device_ops ipw_netdev_ops = {
11616 	.ndo_open		= ipw_net_open,
11617 	.ndo_stop		= ipw_net_stop,
11618 	.ndo_set_rx_mode	= ipw_net_set_multicast_list,
11619 	.ndo_set_mac_address	= ipw_net_set_mac_address,
11620 	.ndo_start_xmit		= libipw_xmit,
11621 	.ndo_validate_addr	= eth_validate_addr,
11622 };
11623 
11624 static int ipw_pci_probe(struct pci_dev *pdev,
11625 				   const struct pci_device_id *ent)
11626 {
11627 	int err = 0;
11628 	struct net_device *net_dev;
11629 	void __iomem *base;
11630 	u32 length, val;
11631 	struct ipw_priv *priv;
11632 	int i;
11633 
11634 	net_dev = alloc_libipw(sizeof(struct ipw_priv), 0);
11635 	if (net_dev == NULL) {
11636 		err = -ENOMEM;
11637 		goto out;
11638 	}
11639 
11640 	priv = libipw_priv(net_dev);
11641 	priv->ieee = netdev_priv(net_dev);
11642 
11643 	priv->net_dev = net_dev;
11644 	priv->pci_dev = pdev;
11645 	ipw_debug_level = debug;
11646 	spin_lock_init(&priv->irq_lock);
11647 	spin_lock_init(&priv->lock);
11648 	for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++)
11649 		INIT_LIST_HEAD(&priv->ibss_mac_hash[i]);
11650 
11651 	mutex_init(&priv->mutex);
11652 	if (pci_enable_device(pdev)) {
11653 		err = -ENODEV;
11654 		goto out_free_libipw;
11655 	}
11656 
11657 	pci_set_master(pdev);
11658 
11659 	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
11660 	if (!err)
11661 		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
11662 	if (err) {
11663 		printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
11664 		goto out_pci_disable_device;
11665 	}
11666 
11667 	pci_set_drvdata(pdev, priv);
11668 
11669 	err = pci_request_regions(pdev, DRV_NAME);
11670 	if (err)
11671 		goto out_pci_disable_device;
11672 
11673 	/* We disable the RETRY_TIMEOUT register (0x41) to keep
11674 	 * PCI Tx retries from interfering with C3 CPU state */
11675 	pci_read_config_dword(pdev, 0x40, &val);
11676 	if ((val & 0x0000ff00) != 0)
11677 		pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11678 
11679 	length = pci_resource_len(pdev, 0);
11680 	priv->hw_len = length;
11681 
11682 	base = pci_ioremap_bar(pdev, 0);
11683 	if (!base) {
11684 		err = -ENODEV;
11685 		goto out_pci_release_regions;
11686 	}
11687 
11688 	priv->hw_base = base;
11689 	IPW_DEBUG_INFO("pci_resource_len = 0x%08x\n", length);
11690 	IPW_DEBUG_INFO("pci_resource_base = %p\n", base);
11691 
11692 	err = ipw_setup_deferred_work(priv);
11693 	if (err) {
11694 		IPW_ERROR("Unable to setup deferred work\n");
11695 		goto out_iounmap;
11696 	}
11697 
11698 	ipw_sw_reset(priv, 1);
11699 
11700 	err = request_irq(pdev->irq, ipw_isr, IRQF_SHARED, DRV_NAME, priv);
11701 	if (err) {
11702 		IPW_ERROR("Error allocating IRQ %d\n", pdev->irq);
11703 		goto out_iounmap;
11704 	}
11705 
11706 	SET_NETDEV_DEV(net_dev, &pdev->dev);
11707 
11708 	mutex_lock(&priv->mutex);
11709 
11710 	priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit;
11711 	priv->ieee->set_security = shim__set_security;
11712 	priv->ieee->is_queue_full = ipw_net_is_queue_full;
11713 
11714 #ifdef CONFIG_IPW2200_QOS
11715 	priv->ieee->is_qos_active = ipw_is_qos_active;
11716 	priv->ieee->handle_probe_response = ipw_handle_beacon;
11717 	priv->ieee->handle_beacon = ipw_handle_probe_response;
11718 	priv->ieee->handle_assoc_response = ipw_handle_assoc_response;
11719 #endif				/* CONFIG_IPW2200_QOS */
11720 
11721 	priv->ieee->perfect_rssi = -20;
11722 	priv->ieee->worst_rssi = -85;
11723 
11724 	net_dev->netdev_ops = &ipw_netdev_ops;
11725 	priv->wireless_data.spy_data = &priv->ieee->spy_data;
11726 	net_dev->wireless_data = &priv->wireless_data;
11727 	net_dev->wireless_handlers = &ipw_wx_handler_def;
11728 	net_dev->ethtool_ops = &ipw_ethtool_ops;
11729 
11730 	net_dev->min_mtu = 68;
11731 	net_dev->max_mtu = LIBIPW_DATA_LEN;
11732 
11733 	err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group);
11734 	if (err) {
11735 		IPW_ERROR("failed to create sysfs device attributes\n");
11736 		mutex_unlock(&priv->mutex);
11737 		goto out_release_irq;
11738 	}
11739 
11740 	if (ipw_up(priv)) {
11741 		mutex_unlock(&priv->mutex);
11742 		err = -EIO;
11743 		goto out_remove_sysfs;
11744 	}
11745 
11746 	mutex_unlock(&priv->mutex);
11747 
11748 	err = ipw_wdev_init(net_dev);
11749 	if (err) {
11750 		IPW_ERROR("failed to register wireless device\n");
11751 		goto out_remove_sysfs;
11752 	}
11753 
11754 	err = register_netdev(net_dev);
11755 	if (err) {
11756 		IPW_ERROR("failed to register network device\n");
11757 		goto out_unregister_wiphy;
11758 	}
11759 
11760 #ifdef CONFIG_IPW2200_PROMISCUOUS
11761 	if (rtap_iface) {
11762 	        err = ipw_prom_alloc(priv);
11763 		if (err) {
11764 			IPW_ERROR("Failed to register promiscuous network "
11765 				  "device (error %d).\n", err);
11766 			unregister_netdev(priv->net_dev);
11767 			goto out_unregister_wiphy;
11768 		}
11769 	}
11770 #endif
11771 
11772 	printk(KERN_INFO DRV_NAME ": Detected geography %s (%d 802.11bg "
11773 	       "channels, %d 802.11a channels)\n",
11774 	       priv->ieee->geo.name, priv->ieee->geo.bg_channels,
11775 	       priv->ieee->geo.a_channels);
11776 
11777 	return 0;
11778 
11779       out_unregister_wiphy:
11780 	wiphy_unregister(priv->ieee->wdev.wiphy);
11781 	kfree(priv->ieee->a_band.channels);
11782 	kfree(priv->ieee->bg_band.channels);
11783       out_remove_sysfs:
11784 	sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11785       out_release_irq:
11786 	free_irq(pdev->irq, priv);
11787       out_iounmap:
11788 	iounmap(priv->hw_base);
11789       out_pci_release_regions:
11790 	pci_release_regions(pdev);
11791       out_pci_disable_device:
11792 	pci_disable_device(pdev);
11793       out_free_libipw:
11794 	free_libipw(priv->net_dev, 0);
11795       out:
11796 	return err;
11797 }
11798 
11799 static void ipw_pci_remove(struct pci_dev *pdev)
11800 {
11801 	struct ipw_priv *priv = pci_get_drvdata(pdev);
11802 	struct list_head *p, *q;
11803 	int i;
11804 
11805 	if (!priv)
11806 		return;
11807 
11808 	mutex_lock(&priv->mutex);
11809 
11810 	priv->status |= STATUS_EXIT_PENDING;
11811 	ipw_down(priv);
11812 	sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11813 
11814 	mutex_unlock(&priv->mutex);
11815 
11816 	unregister_netdev(priv->net_dev);
11817 
11818 	if (priv->rxq) {
11819 		ipw_rx_queue_free(priv, priv->rxq);
11820 		priv->rxq = NULL;
11821 	}
11822 	ipw_tx_queue_free(priv);
11823 
11824 	if (priv->cmdlog) {
11825 		kfree(priv->cmdlog);
11826 		priv->cmdlog = NULL;
11827 	}
11828 
11829 	/* make sure all works are inactive */
11830 	cancel_delayed_work_sync(&priv->adhoc_check);
11831 	cancel_work_sync(&priv->associate);
11832 	cancel_work_sync(&priv->disassociate);
11833 	cancel_work_sync(&priv->system_config);
11834 	cancel_work_sync(&priv->rx_replenish);
11835 	cancel_work_sync(&priv->adapter_restart);
11836 	cancel_delayed_work_sync(&priv->rf_kill);
11837 	cancel_work_sync(&priv->up);
11838 	cancel_work_sync(&priv->down);
11839 	cancel_delayed_work_sync(&priv->request_scan);
11840 	cancel_delayed_work_sync(&priv->request_direct_scan);
11841 	cancel_delayed_work_sync(&priv->request_passive_scan);
11842 	cancel_delayed_work_sync(&priv->scan_event);
11843 	cancel_delayed_work_sync(&priv->gather_stats);
11844 	cancel_work_sync(&priv->abort_scan);
11845 	cancel_work_sync(&priv->roam);
11846 	cancel_delayed_work_sync(&priv->scan_check);
11847 	cancel_work_sync(&priv->link_up);
11848 	cancel_work_sync(&priv->link_down);
11849 	cancel_delayed_work_sync(&priv->led_link_on);
11850 	cancel_delayed_work_sync(&priv->led_link_off);
11851 	cancel_delayed_work_sync(&priv->led_act_off);
11852 	cancel_work_sync(&priv->merge_networks);
11853 
11854 	/* Free MAC hash list for ADHOC */
11855 	for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) {
11856 		list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) {
11857 			list_del(p);
11858 			kfree(list_entry(p, struct ipw_ibss_seq, list));
11859 		}
11860 	}
11861 
11862 	kfree(priv->error);
11863 	priv->error = NULL;
11864 
11865 #ifdef CONFIG_IPW2200_PROMISCUOUS
11866 	ipw_prom_free(priv);
11867 #endif
11868 
11869 	free_irq(pdev->irq, priv);
11870 	iounmap(priv->hw_base);
11871 	pci_release_regions(pdev);
11872 	pci_disable_device(pdev);
11873 	/* wiphy_unregister needs to be here, before free_libipw */
11874 	wiphy_unregister(priv->ieee->wdev.wiphy);
11875 	kfree(priv->ieee->a_band.channels);
11876 	kfree(priv->ieee->bg_band.channels);
11877 	free_libipw(priv->net_dev, 0);
11878 	free_firmware();
11879 }
11880 
11881 #ifdef CONFIG_PM
11882 static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
11883 {
11884 	struct ipw_priv *priv = pci_get_drvdata(pdev);
11885 	struct net_device *dev = priv->net_dev;
11886 
11887 	printk(KERN_INFO "%s: Going into suspend...\n", dev->name);
11888 
11889 	/* Take down the device; powers it off, etc. */
11890 	ipw_down(priv);
11891 
11892 	/* Remove the PRESENT state of the device */
11893 	netif_device_detach(dev);
11894 
11895 	pci_save_state(pdev);
11896 	pci_disable_device(pdev);
11897 	pci_set_power_state(pdev, pci_choose_state(pdev, state));
11898 
11899 	priv->suspend_at = get_seconds();
11900 
11901 	return 0;
11902 }
11903 
11904 static int ipw_pci_resume(struct pci_dev *pdev)
11905 {
11906 	struct ipw_priv *priv = pci_get_drvdata(pdev);
11907 	struct net_device *dev = priv->net_dev;
11908 	int err;
11909 	u32 val;
11910 
11911 	printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name);
11912 
11913 	pci_set_power_state(pdev, PCI_D0);
11914 	err = pci_enable_device(pdev);
11915 	if (err) {
11916 		printk(KERN_ERR "%s: pci_enable_device failed on resume\n",
11917 		       dev->name);
11918 		return err;
11919 	}
11920 	pci_restore_state(pdev);
11921 
11922 	/*
11923 	 * Suspend/Resume resets the PCI configuration space, so we have to
11924 	 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
11925 	 * from interfering with C3 CPU state. pci_restore_state won't help
11926 	 * here since it only restores the first 64 bytes pci config header.
11927 	 */
11928 	pci_read_config_dword(pdev, 0x40, &val);
11929 	if ((val & 0x0000ff00) != 0)
11930 		pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11931 
11932 	/* Set the device back into the PRESENT state; this will also wake
11933 	 * the queue of needed */
11934 	netif_device_attach(dev);
11935 
11936 	priv->suspend_time = get_seconds() - priv->suspend_at;
11937 
11938 	/* Bring the device back up */
11939 	schedule_work(&priv->up);
11940 
11941 	return 0;
11942 }
11943 #endif
11944 
11945 static void ipw_pci_shutdown(struct pci_dev *pdev)
11946 {
11947 	struct ipw_priv *priv = pci_get_drvdata(pdev);
11948 
11949 	/* Take down the device; powers it off, etc. */
11950 	ipw_down(priv);
11951 
11952 	pci_disable_device(pdev);
11953 }
11954 
11955 /* driver initialization stuff */
11956 static struct pci_driver ipw_driver = {
11957 	.name = DRV_NAME,
11958 	.id_table = card_ids,
11959 	.probe = ipw_pci_probe,
11960 	.remove = ipw_pci_remove,
11961 #ifdef CONFIG_PM
11962 	.suspend = ipw_pci_suspend,
11963 	.resume = ipw_pci_resume,
11964 #endif
11965 	.shutdown = ipw_pci_shutdown,
11966 };
11967 
11968 static int __init ipw_init(void)
11969 {
11970 	int ret;
11971 
11972 	printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
11973 	printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
11974 
11975 	ret = pci_register_driver(&ipw_driver);
11976 	if (ret) {
11977 		IPW_ERROR("Unable to initialize PCI module\n");
11978 		return ret;
11979 	}
11980 
11981 	ret = driver_create_file(&ipw_driver.driver, &driver_attr_debug_level);
11982 	if (ret) {
11983 		IPW_ERROR("Unable to create driver sysfs file\n");
11984 		pci_unregister_driver(&ipw_driver);
11985 		return ret;
11986 	}
11987 
11988 	return ret;
11989 }
11990 
11991 static void __exit ipw_exit(void)
11992 {
11993 	driver_remove_file(&ipw_driver.driver, &driver_attr_debug_level);
11994 	pci_unregister_driver(&ipw_driver);
11995 }
11996 
11997 module_param(disable, int, 0444);
11998 MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
11999 
12000 module_param(associate, int, 0444);
12001 MODULE_PARM_DESC(associate, "auto associate when scanning (default off)");
12002 
12003 module_param(auto_create, int, 0444);
12004 MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)");
12005 
12006 module_param_named(led, led_support, int, 0444);
12007 MODULE_PARM_DESC(led, "enable led control on some systems (default 1 on)");
12008 
12009 module_param(debug, int, 0444);
12010 MODULE_PARM_DESC(debug, "debug output mask");
12011 
12012 module_param_named(channel, default_channel, int, 0444);
12013 MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])");
12014 
12015 #ifdef CONFIG_IPW2200_PROMISCUOUS
12016 module_param(rtap_iface, int, 0444);
12017 MODULE_PARM_DESC(rtap_iface, "create the rtap interface (1 - create, default 0)");
12018 #endif
12019 
12020 #ifdef CONFIG_IPW2200_QOS
12021 module_param(qos_enable, int, 0444);
12022 MODULE_PARM_DESC(qos_enable, "enable all QoS functionalitis");
12023 
12024 module_param(qos_burst_enable, int, 0444);
12025 MODULE_PARM_DESC(qos_burst_enable, "enable QoS burst mode");
12026 
12027 module_param(qos_no_ack_mask, int, 0444);
12028 MODULE_PARM_DESC(qos_no_ack_mask, "mask Tx_Queue to no ack");
12029 
12030 module_param(burst_duration_CCK, int, 0444);
12031 MODULE_PARM_DESC(burst_duration_CCK, "set CCK burst value");
12032 
12033 module_param(burst_duration_OFDM, int, 0444);
12034 MODULE_PARM_DESC(burst_duration_OFDM, "set OFDM burst value");
12035 #endif				/* CONFIG_IPW2200_QOS */
12036 
12037 #ifdef CONFIG_IPW2200_MONITOR
12038 module_param_named(mode, network_mode, int, 0444);
12039 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)");
12040 #else
12041 module_param_named(mode, network_mode, int, 0444);
12042 MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS)");
12043 #endif
12044 
12045 module_param(bt_coexist, int, 0444);
12046 MODULE_PARM_DESC(bt_coexist, "enable bluetooth coexistence (default off)");
12047 
12048 module_param(hwcrypto, int, 0444);
12049 MODULE_PARM_DESC(hwcrypto, "enable hardware crypto (default off)");
12050 
12051 module_param(cmdlog, int, 0444);
12052 MODULE_PARM_DESC(cmdlog,
12053 		 "allocate a ring buffer for logging firmware commands");
12054 
12055 module_param(roaming, int, 0444);
12056 MODULE_PARM_DESC(roaming, "enable roaming support (default on)");
12057 
12058 module_param(antenna, int, 0444);
12059 MODULE_PARM_DESC(antenna, "select antenna 1=Main, 3=Aux, default 0 [both], 2=slow_diversity (choose the one with lower background noise)");
12060 
12061 module_exit(ipw_exit);
12062 module_init(ipw_init);
12063